.. | .. |
---|
38 | 38 | #include "nfsd.h" |
---|
39 | 39 | #include "state.h" |
---|
40 | 40 | #include "netns.h" |
---|
| 41 | +#include "trace.h" |
---|
41 | 42 | #include "xdr4cb.h" |
---|
| 43 | +#include "xdr4.h" |
---|
42 | 44 | |
---|
43 | 45 | #define NFSDDBG_FACILITY NFSDDBG_PROC |
---|
44 | 46 | |
---|
.. | .. |
---|
58 | 60 | /* res */ |
---|
59 | 61 | int status; |
---|
60 | 62 | }; |
---|
61 | | - |
---|
62 | | -/* |
---|
63 | | - * Handle decode buffer overflows out-of-line. |
---|
64 | | - */ |
---|
65 | | -static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) |
---|
66 | | -{ |
---|
67 | | - dprintk("NFS: %s prematurely hit the end of our receive buffer. " |
---|
68 | | - "Remaining buffer length is %tu words.\n", |
---|
69 | | - func, xdr->end - xdr->p); |
---|
70 | | -} |
---|
71 | 63 | |
---|
72 | 64 | static __be32 *xdr_encode_empty_array(__be32 *p) |
---|
73 | 65 | { |
---|
.. | .. |
---|
105 | 97 | OP_CB_WANTS_CANCELLED = 12, |
---|
106 | 98 | OP_CB_NOTIFY_LOCK = 13, |
---|
107 | 99 | OP_CB_NOTIFY_DEVICEID = 14, |
---|
| 100 | + OP_CB_OFFLOAD = 15, |
---|
108 | 101 | OP_CB_ILLEGAL = 10044 |
---|
109 | 102 | }; |
---|
110 | 103 | |
---|
.. | .. |
---|
238 | 231 | *status = nfs_cb_stat_to_errno(be32_to_cpup(p)); |
---|
239 | 232 | return 0; |
---|
240 | 233 | out_overflow: |
---|
241 | | - print_overflow_msg(__func__, xdr); |
---|
242 | 234 | return -EIO; |
---|
243 | 235 | out_unexpected: |
---|
244 | 236 | dprintk("NFSD: Callback server returned operation %d but " |
---|
.. | .. |
---|
307 | 299 | hdr->nops = be32_to_cpup(p); |
---|
308 | 300 | return 0; |
---|
309 | 301 | out_overflow: |
---|
310 | | - print_overflow_msg(__func__, xdr); |
---|
311 | 302 | return -EIO; |
---|
312 | 303 | } |
---|
313 | 304 | |
---|
.. | .. |
---|
435 | 426 | cb->cb_seq_status = status; |
---|
436 | 427 | return status; |
---|
437 | 428 | out_overflow: |
---|
438 | | - print_overflow_msg(__func__, xdr); |
---|
439 | 429 | status = -EIO; |
---|
440 | 430 | goto out; |
---|
441 | 431 | } |
---|
.. | .. |
---|
523 | 513 | if (unlikely(status)) |
---|
524 | 514 | return status; |
---|
525 | 515 | |
---|
526 | | - if (cb != NULL) { |
---|
527 | | - status = decode_cb_sequence4res(xdr, cb); |
---|
528 | | - if (unlikely(status || cb->cb_seq_status)) |
---|
529 | | - return status; |
---|
530 | | - } |
---|
| 516 | + status = decode_cb_sequence4res(xdr, cb); |
---|
| 517 | + if (unlikely(status || cb->cb_seq_status)) |
---|
| 518 | + return status; |
---|
531 | 519 | |
---|
532 | 520 | return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status); |
---|
533 | 521 | } |
---|
.. | .. |
---|
615 | 603 | if (unlikely(status)) |
---|
616 | 604 | return status; |
---|
617 | 605 | |
---|
618 | | - if (cb) { |
---|
619 | | - status = decode_cb_sequence4res(xdr, cb); |
---|
620 | | - if (unlikely(status || cb->cb_seq_status)) |
---|
621 | | - return status; |
---|
622 | | - } |
---|
| 606 | + status = decode_cb_sequence4res(xdr, cb); |
---|
| 607 | + if (unlikely(status || cb->cb_seq_status)) |
---|
| 608 | + return status; |
---|
| 609 | + |
---|
623 | 610 | return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status); |
---|
624 | 611 | } |
---|
625 | 612 | #endif /* CONFIG_NFSD_PNFS */ |
---|
.. | .. |
---|
674 | 661 | if (unlikely(status)) |
---|
675 | 662 | return status; |
---|
676 | 663 | |
---|
677 | | - if (cb) { |
---|
678 | | - status = decode_cb_sequence4res(xdr, cb); |
---|
679 | | - if (unlikely(status || cb->cb_seq_status)) |
---|
680 | | - return status; |
---|
681 | | - } |
---|
| 664 | + status = decode_cb_sequence4res(xdr, cb); |
---|
| 665 | + if (unlikely(status || cb->cb_seq_status)) |
---|
| 666 | + return status; |
---|
| 667 | + |
---|
682 | 668 | return decode_cb_op_status(xdr, OP_CB_NOTIFY_LOCK, &cb->cb_status); |
---|
683 | 669 | } |
---|
684 | 670 | |
---|
| 671 | +/* |
---|
| 672 | + * struct write_response4 { |
---|
| 673 | + * stateid4 wr_callback_id<1>; |
---|
| 674 | + * length4 wr_count; |
---|
| 675 | + * stable_how4 wr_committed; |
---|
| 676 | + * verifier4 wr_writeverf; |
---|
| 677 | + * }; |
---|
| 678 | + * union offload_info4 switch (nfsstat4 coa_status) { |
---|
| 679 | + * case NFS4_OK: |
---|
| 680 | + * write_response4 coa_resok4; |
---|
| 681 | + * default: |
---|
| 682 | + * length4 coa_bytes_copied; |
---|
| 683 | + * }; |
---|
| 684 | + * struct CB_OFFLOAD4args { |
---|
| 685 | + * nfs_fh4 coa_fh; |
---|
| 686 | + * stateid4 coa_stateid; |
---|
| 687 | + * offload_info4 coa_offload_info; |
---|
| 688 | + * }; |
---|
| 689 | + */ |
---|
| 690 | +static void encode_offload_info4(struct xdr_stream *xdr, |
---|
| 691 | + __be32 nfserr, |
---|
| 692 | + const struct nfsd4_copy *cp) |
---|
| 693 | +{ |
---|
| 694 | + __be32 *p; |
---|
| 695 | + |
---|
| 696 | + p = xdr_reserve_space(xdr, 4); |
---|
| 697 | + *p++ = nfserr; |
---|
| 698 | + if (!nfserr) { |
---|
| 699 | + p = xdr_reserve_space(xdr, 4 + 8 + 4 + NFS4_VERIFIER_SIZE); |
---|
| 700 | + p = xdr_encode_empty_array(p); |
---|
| 701 | + p = xdr_encode_hyper(p, cp->cp_res.wr_bytes_written); |
---|
| 702 | + *p++ = cpu_to_be32(cp->cp_res.wr_stable_how); |
---|
| 703 | + p = xdr_encode_opaque_fixed(p, cp->cp_res.wr_verifier.data, |
---|
| 704 | + NFS4_VERIFIER_SIZE); |
---|
| 705 | + } else { |
---|
| 706 | + p = xdr_reserve_space(xdr, 8); |
---|
| 707 | + /* We always return success if bytes were written */ |
---|
| 708 | + p = xdr_encode_hyper(p, 0); |
---|
| 709 | + } |
---|
| 710 | +} |
---|
| 711 | + |
---|
| 712 | +static void encode_cb_offload4args(struct xdr_stream *xdr, |
---|
| 713 | + __be32 nfserr, |
---|
| 714 | + const struct knfsd_fh *fh, |
---|
| 715 | + const struct nfsd4_copy *cp, |
---|
| 716 | + struct nfs4_cb_compound_hdr *hdr) |
---|
| 717 | +{ |
---|
| 718 | + __be32 *p; |
---|
| 719 | + |
---|
| 720 | + p = xdr_reserve_space(xdr, 4); |
---|
| 721 | + *p++ = cpu_to_be32(OP_CB_OFFLOAD); |
---|
| 722 | + encode_nfs_fh4(xdr, fh); |
---|
| 723 | + encode_stateid4(xdr, &cp->cp_res.cb_stateid); |
---|
| 724 | + encode_offload_info4(xdr, nfserr, cp); |
---|
| 725 | + |
---|
| 726 | + hdr->nops++; |
---|
| 727 | +} |
---|
| 728 | + |
---|
| 729 | +static void nfs4_xdr_enc_cb_offload(struct rpc_rqst *req, |
---|
| 730 | + struct xdr_stream *xdr, |
---|
| 731 | + const void *data) |
---|
| 732 | +{ |
---|
| 733 | + const struct nfsd4_callback *cb = data; |
---|
| 734 | + const struct nfsd4_copy *cp = |
---|
| 735 | + container_of(cb, struct nfsd4_copy, cp_cb); |
---|
| 736 | + struct nfs4_cb_compound_hdr hdr = { |
---|
| 737 | + .ident = 0, |
---|
| 738 | + .minorversion = cb->cb_clp->cl_minorversion, |
---|
| 739 | + }; |
---|
| 740 | + |
---|
| 741 | + encode_cb_compound4args(xdr, &hdr); |
---|
| 742 | + encode_cb_sequence4args(xdr, cb, &hdr); |
---|
| 743 | + encode_cb_offload4args(xdr, cp->nfserr, &cp->fh, cp, &hdr); |
---|
| 744 | + encode_cb_nops(&hdr); |
---|
| 745 | +} |
---|
| 746 | + |
---|
| 747 | +static int nfs4_xdr_dec_cb_offload(struct rpc_rqst *rqstp, |
---|
| 748 | + struct xdr_stream *xdr, |
---|
| 749 | + void *data) |
---|
| 750 | +{ |
---|
| 751 | + struct nfsd4_callback *cb = data; |
---|
| 752 | + struct nfs4_cb_compound_hdr hdr; |
---|
| 753 | + int status; |
---|
| 754 | + |
---|
| 755 | + status = decode_cb_compound4res(xdr, &hdr); |
---|
| 756 | + if (unlikely(status)) |
---|
| 757 | + return status; |
---|
| 758 | + |
---|
| 759 | + status = decode_cb_sequence4res(xdr, cb); |
---|
| 760 | + if (unlikely(status || cb->cb_seq_status)) |
---|
| 761 | + return status; |
---|
| 762 | + |
---|
| 763 | + return decode_cb_op_status(xdr, OP_CB_OFFLOAD, &cb->cb_status); |
---|
| 764 | +} |
---|
685 | 765 | /* |
---|
686 | 766 | * RPC procedure tables |
---|
687 | 767 | */ |
---|
.. | .. |
---|
703 | 783 | PROC(CB_LAYOUT, COMPOUND, cb_layout, cb_layout), |
---|
704 | 784 | #endif |
---|
705 | 785 | PROC(CB_NOTIFY_LOCK, COMPOUND, cb_notify_lock, cb_notify_lock), |
---|
| 786 | + PROC(CB_OFFLOAD, COMPOUND, cb_offload, cb_offload), |
---|
706 | 787 | }; |
---|
707 | 788 | |
---|
708 | 789 | static unsigned int nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)]; |
---|
.. | .. |
---|
743 | 824 | static int max_cb_time(struct net *net) |
---|
744 | 825 | { |
---|
745 | 826 | struct nfsd_net *nn = net_generic(net, nfsd_net_id); |
---|
746 | | - return max(nn->nfsd4_lease/10, (time_t)1) * HZ; |
---|
| 827 | + |
---|
| 828 | + /* |
---|
| 829 | + * nfsd4_lease is set to at most one hour in __nfsd4_write_time, |
---|
| 830 | + * so we can use 32-bit math on it. Warn if that assumption |
---|
| 831 | + * ever stops being true. |
---|
| 832 | + */ |
---|
| 833 | + if (WARN_ON_ONCE(nn->nfsd4_lease > 3600)) |
---|
| 834 | + return 360 * HZ; |
---|
| 835 | + |
---|
| 836 | + return max(((u32)nn->nfsd4_lease)/10, 1u) * HZ; |
---|
747 | 837 | } |
---|
748 | 838 | |
---|
749 | | -static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses) |
---|
| 839 | +static struct workqueue_struct *callback_wq; |
---|
| 840 | + |
---|
| 841 | +static bool nfsd4_queue_cb(struct nfsd4_callback *cb) |
---|
| 842 | +{ |
---|
| 843 | + return queue_work(callback_wq, &cb->cb_work); |
---|
| 844 | +} |
---|
| 845 | + |
---|
| 846 | +static void nfsd41_cb_inflight_begin(struct nfs4_client *clp) |
---|
| 847 | +{ |
---|
| 848 | + atomic_inc(&clp->cl_cb_inflight); |
---|
| 849 | +} |
---|
| 850 | + |
---|
| 851 | +static void nfsd41_cb_inflight_end(struct nfs4_client *clp) |
---|
| 852 | +{ |
---|
| 853 | + |
---|
| 854 | + if (atomic_dec_and_test(&clp->cl_cb_inflight)) |
---|
| 855 | + wake_up_var(&clp->cl_cb_inflight); |
---|
| 856 | +} |
---|
| 857 | + |
---|
| 858 | +static void nfsd41_cb_inflight_wait_complete(struct nfs4_client *clp) |
---|
| 859 | +{ |
---|
| 860 | + wait_var_event(&clp->cl_cb_inflight, |
---|
| 861 | + !atomic_read(&clp->cl_cb_inflight)); |
---|
| 862 | +} |
---|
| 863 | + |
---|
| 864 | +static const struct cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses) |
---|
750 | 865 | { |
---|
751 | 866 | if (clp->cl_minorversion == 0) { |
---|
752 | | - char *principal = clp->cl_cred.cr_targ_princ ? |
---|
753 | | - clp->cl_cred.cr_targ_princ : "nfs"; |
---|
754 | | - struct rpc_cred *cred; |
---|
| 867 | + client->cl_principal = clp->cl_cred.cr_targ_princ ? |
---|
| 868 | + clp->cl_cred.cr_targ_princ : "nfs"; |
---|
755 | 869 | |
---|
756 | | - cred = rpc_lookup_machine_cred(principal); |
---|
757 | | - if (!IS_ERR(cred)) |
---|
758 | | - get_rpccred(cred); |
---|
759 | | - return cred; |
---|
| 870 | + return get_cred(rpc_machine_cred()); |
---|
760 | 871 | } else { |
---|
761 | | - struct rpc_auth *auth = client->cl_auth; |
---|
762 | | - struct auth_cred acred = {}; |
---|
| 872 | + struct cred *kcred; |
---|
763 | 873 | |
---|
764 | | - acred.uid = ses->se_cb_sec.uid; |
---|
765 | | - acred.gid = ses->se_cb_sec.gid; |
---|
766 | | - return auth->au_ops->lookup_cred(client->cl_auth, &acred, 0); |
---|
| 874 | + kcred = prepare_kernel_cred(NULL); |
---|
| 875 | + if (!kcred) |
---|
| 876 | + return NULL; |
---|
| 877 | + |
---|
| 878 | + kcred->fsuid = ses->se_cb_sec.uid; |
---|
| 879 | + kcred->fsgid = ses->se_cb_sec.gid; |
---|
| 880 | + return kcred; |
---|
767 | 881 | } |
---|
768 | 882 | } |
---|
769 | 883 | |
---|
.. | .. |
---|
784 | 898 | .program = &cb_program, |
---|
785 | 899 | .version = 1, |
---|
786 | 900 | .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET), |
---|
| 901 | + .cred = current_cred(), |
---|
787 | 902 | }; |
---|
788 | 903 | struct rpc_clnt *client; |
---|
789 | | - struct rpc_cred *cred; |
---|
| 904 | + const struct cred *cred; |
---|
790 | 905 | |
---|
791 | 906 | if (clp->cl_minorversion == 0) { |
---|
792 | 907 | if (!clp->cl_cred.cr_principal && |
---|
793 | | - (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5)) |
---|
| 908 | + (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5)) { |
---|
| 909 | + trace_nfsd_cb_setup_err(clp, -EINVAL); |
---|
794 | 910 | return -EINVAL; |
---|
| 911 | + } |
---|
795 | 912 | args.client_name = clp->cl_cred.cr_principal; |
---|
796 | 913 | args.prognumber = conn->cb_prog; |
---|
797 | 914 | args.protocol = XPRT_TRANSPORT_TCP; |
---|
.. | .. |
---|
800 | 917 | } else { |
---|
801 | 918 | if (!conn->cb_xprt) |
---|
802 | 919 | return -EINVAL; |
---|
803 | | - clp->cl_cb_conn.cb_xprt = conn->cb_xprt; |
---|
804 | 920 | clp->cl_cb_session = ses; |
---|
805 | 921 | args.bc_xprt = conn->cb_xprt; |
---|
806 | 922 | args.prognumber = clp->cl_cb_session->se_cb_prog; |
---|
.. | .. |
---|
811 | 927 | /* Create RPC client */ |
---|
812 | 928 | client = rpc_create(&args); |
---|
813 | 929 | if (IS_ERR(client)) { |
---|
814 | | - dprintk("NFSD: couldn't create callback client: %ld\n", |
---|
815 | | - PTR_ERR(client)); |
---|
| 930 | + trace_nfsd_cb_setup_err(clp, PTR_ERR(client)); |
---|
816 | 931 | return PTR_ERR(client); |
---|
817 | 932 | } |
---|
818 | 933 | cred = get_backchannel_cred(clp, client, ses); |
---|
819 | | - if (IS_ERR(cred)) { |
---|
| 934 | + if (!cred) { |
---|
| 935 | + trace_nfsd_cb_setup_err(clp, -ENOMEM); |
---|
820 | 936 | rpc_shutdown_client(client); |
---|
821 | | - return PTR_ERR(cred); |
---|
| 937 | + return -ENOMEM; |
---|
822 | 938 | } |
---|
| 939 | + |
---|
| 940 | + if (clp->cl_minorversion != 0) |
---|
| 941 | + clp->cl_cb_conn.cb_xprt = conn->cb_xprt; |
---|
823 | 942 | clp->cl_cb_client = client; |
---|
824 | 943 | clp->cl_cb_cred = cred; |
---|
| 944 | + trace_nfsd_cb_setup(clp); |
---|
825 | 945 | return 0; |
---|
826 | | -} |
---|
827 | | - |
---|
828 | | -static void warn_no_callback_path(struct nfs4_client *clp, int reason) |
---|
829 | | -{ |
---|
830 | | - dprintk("NFSD: warning: no callback path to client %.*s: error %d\n", |
---|
831 | | - (int)clp->cl_name.len, clp->cl_name.data, reason); |
---|
832 | 946 | } |
---|
833 | 947 | |
---|
834 | 948 | static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason) |
---|
.. | .. |
---|
836 | 950 | if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags)) |
---|
837 | 951 | return; |
---|
838 | 952 | clp->cl_cb_state = NFSD4_CB_DOWN; |
---|
839 | | - warn_no_callback_path(clp, reason); |
---|
| 953 | + trace_nfsd_cb_state(clp); |
---|
840 | 954 | } |
---|
841 | 955 | |
---|
842 | 956 | static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason) |
---|
.. | .. |
---|
844 | 958 | if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags)) |
---|
845 | 959 | return; |
---|
846 | 960 | clp->cl_cb_state = NFSD4_CB_FAULT; |
---|
847 | | - warn_no_callback_path(clp, reason); |
---|
| 961 | + trace_nfsd_cb_state(clp); |
---|
848 | 962 | } |
---|
849 | 963 | |
---|
850 | 964 | static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata) |
---|
851 | 965 | { |
---|
852 | 966 | struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null); |
---|
853 | 967 | |
---|
| 968 | + trace_nfsd_cb_done(clp, task->tk_status); |
---|
854 | 969 | if (task->tk_status) |
---|
855 | 970 | nfsd4_mark_cb_down(clp, task->tk_status); |
---|
856 | | - else |
---|
| 971 | + else { |
---|
857 | 972 | clp->cl_cb_state = NFSD4_CB_UP; |
---|
| 973 | + trace_nfsd_cb_state(clp); |
---|
| 974 | + } |
---|
| 975 | +} |
---|
| 976 | + |
---|
| 977 | +static void nfsd4_cb_probe_release(void *calldata) |
---|
| 978 | +{ |
---|
| 979 | + struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null); |
---|
| 980 | + |
---|
| 981 | + nfsd41_cb_inflight_end(clp); |
---|
| 982 | + |
---|
858 | 983 | } |
---|
859 | 984 | |
---|
860 | 985 | static const struct rpc_call_ops nfsd4_cb_probe_ops = { |
---|
861 | 986 | /* XXX: release method to ensure we set the cb channel down if |
---|
862 | 987 | * necessary on early failure? */ |
---|
863 | 988 | .rpc_call_done = nfsd4_cb_probe_done, |
---|
| 989 | + .rpc_release = nfsd4_cb_probe_release, |
---|
864 | 990 | }; |
---|
865 | | - |
---|
866 | | -static struct workqueue_struct *callback_wq; |
---|
867 | 991 | |
---|
868 | 992 | /* |
---|
869 | 993 | * Poke the callback thread to process any updates to the callback |
---|
.. | .. |
---|
872 | 996 | void nfsd4_probe_callback(struct nfs4_client *clp) |
---|
873 | 997 | { |
---|
874 | 998 | clp->cl_cb_state = NFSD4_CB_UNKNOWN; |
---|
| 999 | + trace_nfsd_cb_state(clp); |
---|
875 | 1000 | set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags); |
---|
876 | 1001 | nfsd4_run_cb(&clp->cl_cb_null); |
---|
877 | 1002 | } |
---|
.. | .. |
---|
888 | 1013 | spin_lock(&clp->cl_lock); |
---|
889 | 1014 | memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn)); |
---|
890 | 1015 | spin_unlock(&clp->cl_lock); |
---|
| 1016 | + trace_nfsd_cb_state(clp); |
---|
891 | 1017 | } |
---|
892 | 1018 | |
---|
893 | 1019 | /* |
---|
.. | .. |
---|
895 | 1021 | * If the slot is available, then mark it busy. Otherwise, set the |
---|
896 | 1022 | * thread for sleeping on the callback RPC wait queue. |
---|
897 | 1023 | */ |
---|
898 | | -static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task) |
---|
| 1024 | +static bool nfsd41_cb_get_slot(struct nfsd4_callback *cb, struct rpc_task *task) |
---|
899 | 1025 | { |
---|
900 | | - if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) { |
---|
| 1026 | + struct nfs4_client *clp = cb->cb_clp; |
---|
| 1027 | + |
---|
| 1028 | + if (!cb->cb_holds_slot && |
---|
| 1029 | + test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) { |
---|
901 | 1030 | rpc_sleep_on(&clp->cl_cb_waitq, task, NULL); |
---|
902 | 1031 | /* Race breaker */ |
---|
903 | 1032 | if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) { |
---|
.. | .. |
---|
906 | 1035 | } |
---|
907 | 1036 | rpc_wake_up_queued_task(&clp->cl_cb_waitq, task); |
---|
908 | 1037 | } |
---|
| 1038 | + cb->cb_holds_slot = true; |
---|
909 | 1039 | return true; |
---|
| 1040 | +} |
---|
| 1041 | + |
---|
| 1042 | +static void nfsd41_cb_release_slot(struct nfsd4_callback *cb) |
---|
| 1043 | +{ |
---|
| 1044 | + struct nfs4_client *clp = cb->cb_clp; |
---|
| 1045 | + |
---|
| 1046 | + if (cb->cb_holds_slot) { |
---|
| 1047 | + cb->cb_holds_slot = false; |
---|
| 1048 | + clear_bit(0, &clp->cl_cb_slot_busy); |
---|
| 1049 | + rpc_wake_up_next(&clp->cl_cb_waitq); |
---|
| 1050 | + } |
---|
| 1051 | +} |
---|
| 1052 | + |
---|
| 1053 | +static void nfsd41_destroy_cb(struct nfsd4_callback *cb) |
---|
| 1054 | +{ |
---|
| 1055 | + struct nfs4_client *clp = cb->cb_clp; |
---|
| 1056 | + |
---|
| 1057 | + nfsd41_cb_release_slot(cb); |
---|
| 1058 | + if (cb->cb_ops && cb->cb_ops->release) |
---|
| 1059 | + cb->cb_ops->release(cb); |
---|
| 1060 | + nfsd41_cb_inflight_end(clp); |
---|
910 | 1061 | } |
---|
911 | 1062 | |
---|
912 | 1063 | /* |
---|
.. | .. |
---|
925 | 1076 | */ |
---|
926 | 1077 | cb->cb_seq_status = 1; |
---|
927 | 1078 | cb->cb_status = 0; |
---|
928 | | - if (minorversion) { |
---|
929 | | - if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task)) |
---|
930 | | - return; |
---|
931 | | - cb->cb_holds_slot = true; |
---|
932 | | - } |
---|
| 1079 | + if (minorversion && !nfsd41_cb_get_slot(cb, task)) |
---|
| 1080 | + return; |
---|
933 | 1081 | rpc_call_start(task); |
---|
934 | 1082 | } |
---|
935 | 1083 | |
---|
.. | .. |
---|
949 | 1097 | * the submission code will error out, so we don't need to |
---|
950 | 1098 | * handle that case here. |
---|
951 | 1099 | */ |
---|
952 | | - if (task->tk_flags & RPC_TASK_KILLED) |
---|
| 1100 | + if (RPC_SIGNALLED(task)) |
---|
953 | 1101 | goto need_restart; |
---|
954 | 1102 | |
---|
955 | 1103 | return true; |
---|
.. | .. |
---|
971 | 1119 | break; |
---|
972 | 1120 | case -ESERVERFAULT: |
---|
973 | 1121 | ++session->se_cb_seq_nr; |
---|
974 | | - /* Fall through */ |
---|
| 1122 | + fallthrough; |
---|
975 | 1123 | case 1: |
---|
976 | 1124 | case -NFS4ERR_BADSESSION: |
---|
977 | 1125 | nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status); |
---|
.. | .. |
---|
992 | 1140 | } |
---|
993 | 1141 | break; |
---|
994 | 1142 | default: |
---|
| 1143 | + nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status); |
---|
995 | 1144 | dprintk("%s: unprocessed error %d\n", __func__, |
---|
996 | 1145 | cb->cb_seq_status); |
---|
997 | 1146 | } |
---|
998 | 1147 | |
---|
999 | | - cb->cb_holds_slot = false; |
---|
1000 | | - clear_bit(0, &clp->cl_cb_slot_busy); |
---|
1001 | | - rpc_wake_up_next(&clp->cl_cb_waitq); |
---|
| 1148 | + nfsd41_cb_release_slot(cb); |
---|
1002 | 1149 | dprintk("%s: freed slot, new seqid=%d\n", __func__, |
---|
1003 | 1150 | clp->cl_cb_session->se_cb_seq_nr); |
---|
1004 | 1151 | |
---|
1005 | | - if (task->tk_flags & RPC_TASK_KILLED) |
---|
| 1152 | + if (RPC_SIGNALLED(task)) |
---|
1006 | 1153 | goto need_restart; |
---|
1007 | 1154 | out: |
---|
1008 | 1155 | return ret; |
---|
.. | .. |
---|
1011 | 1158 | ret = false; |
---|
1012 | 1159 | goto out; |
---|
1013 | 1160 | need_restart: |
---|
1014 | | - task->tk_status = 0; |
---|
1015 | | - cb->cb_need_restart = true; |
---|
| 1161 | + if (!test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags)) { |
---|
| 1162 | + task->tk_status = 0; |
---|
| 1163 | + cb->cb_need_restart = true; |
---|
| 1164 | + } |
---|
1016 | 1165 | return false; |
---|
1017 | 1166 | } |
---|
1018 | 1167 | |
---|
.. | .. |
---|
1021 | 1170 | struct nfsd4_callback *cb = calldata; |
---|
1022 | 1171 | struct nfs4_client *clp = cb->cb_clp; |
---|
1023 | 1172 | |
---|
1024 | | - dprintk("%s: minorversion=%d\n", __func__, |
---|
1025 | | - clp->cl_minorversion); |
---|
| 1173 | + trace_nfsd_cb_done(clp, task->tk_status); |
---|
1026 | 1174 | |
---|
1027 | 1175 | if (!nfsd4_cb_sequence_done(task, cb)) |
---|
1028 | 1176 | return; |
---|
.. | .. |
---|
1038 | 1186 | rpc_restart_call_prepare(task); |
---|
1039 | 1187 | return; |
---|
1040 | 1188 | case 1: |
---|
1041 | | - break; |
---|
1042 | | - case -1: |
---|
1043 | | - /* Network partition? */ |
---|
1044 | | - nfsd4_mark_cb_down(clp, task->tk_status); |
---|
| 1189 | + switch (task->tk_status) { |
---|
| 1190 | + case -EIO: |
---|
| 1191 | + case -ETIMEDOUT: |
---|
| 1192 | + case -EACCES: |
---|
| 1193 | + nfsd4_mark_cb_down(clp, task->tk_status); |
---|
| 1194 | + } |
---|
1045 | 1195 | break; |
---|
1046 | 1196 | default: |
---|
1047 | 1197 | BUG(); |
---|
.. | .. |
---|
1053 | 1203 | struct nfsd4_callback *cb = calldata; |
---|
1054 | 1204 | |
---|
1055 | 1205 | if (cb->cb_need_restart) |
---|
1056 | | - nfsd4_run_cb(cb); |
---|
| 1206 | + nfsd4_queue_cb(cb); |
---|
1057 | 1207 | else |
---|
1058 | | - cb->cb_ops->release(cb); |
---|
| 1208 | + nfsd41_destroy_cb(cb); |
---|
1059 | 1209 | |
---|
1060 | 1210 | } |
---|
1061 | 1211 | |
---|
.. | .. |
---|
1089 | 1239 | */ |
---|
1090 | 1240 | nfsd4_run_cb(&clp->cl_cb_null); |
---|
1091 | 1241 | flush_workqueue(callback_wq); |
---|
| 1242 | + nfsd41_cb_inflight_wait_complete(clp); |
---|
1092 | 1243 | } |
---|
1093 | 1244 | |
---|
1094 | 1245 | /* requires cl_lock: */ |
---|
.. | .. |
---|
1106 | 1257 | return NULL; |
---|
1107 | 1258 | } |
---|
1108 | 1259 | |
---|
| 1260 | +/* |
---|
| 1261 | + * Note there isn't a lot of locking in this code; instead we depend on |
---|
| 1262 | + * the fact that it is run from the callback_wq, which won't run two |
---|
| 1263 | + * work items at once. So, for example, callback_wq handles all access |
---|
| 1264 | + * of cl_cb_client and all calls to rpc_create or rpc_shutdown_client. |
---|
| 1265 | + */ |
---|
1109 | 1266 | static void nfsd4_process_cb_update(struct nfsd4_callback *cb) |
---|
1110 | 1267 | { |
---|
1111 | 1268 | struct nfs4_cb_conn conn; |
---|
.. | .. |
---|
1119 | 1276 | * kill the old client: |
---|
1120 | 1277 | */ |
---|
1121 | 1278 | if (clp->cl_cb_client) { |
---|
| 1279 | + trace_nfsd_cb_shutdown(clp); |
---|
1122 | 1280 | rpc_shutdown_client(clp->cl_cb_client); |
---|
1123 | 1281 | clp->cl_cb_client = NULL; |
---|
1124 | | - put_rpccred(clp->cl_cb_cred); |
---|
| 1282 | + put_cred(clp->cl_cb_cred); |
---|
1125 | 1283 | clp->cl_cb_cred = NULL; |
---|
1126 | 1284 | } |
---|
1127 | 1285 | if (clp->cl_cb_conn.cb_xprt) { |
---|
.. | .. |
---|
1162 | 1320 | container_of(work, struct nfsd4_callback, cb_work); |
---|
1163 | 1321 | struct nfs4_client *clp = cb->cb_clp; |
---|
1164 | 1322 | struct rpc_clnt *clnt; |
---|
| 1323 | + int flags; |
---|
| 1324 | + |
---|
| 1325 | + trace_nfsd_cb_work(clp, cb->cb_msg.rpc_proc->p_name); |
---|
1165 | 1326 | |
---|
1166 | 1327 | if (cb->cb_need_restart) { |
---|
1167 | 1328 | cb->cb_need_restart = false; |
---|
.. | .. |
---|
1176 | 1337 | clnt = clp->cl_cb_client; |
---|
1177 | 1338 | if (!clnt) { |
---|
1178 | 1339 | /* Callback channel broken, or client killed; give up: */ |
---|
1179 | | - if (cb->cb_ops && cb->cb_ops->release) |
---|
1180 | | - cb->cb_ops->release(cb); |
---|
| 1340 | + nfsd41_destroy_cb(cb); |
---|
1181 | 1341 | return; |
---|
1182 | 1342 | } |
---|
1183 | 1343 | |
---|
.. | .. |
---|
1186 | 1346 | */ |
---|
1187 | 1347 | if (!cb->cb_ops && clp->cl_minorversion) { |
---|
1188 | 1348 | clp->cl_cb_state = NFSD4_CB_UP; |
---|
| 1349 | + nfsd41_destroy_cb(cb); |
---|
1189 | 1350 | return; |
---|
1190 | 1351 | } |
---|
1191 | 1352 | |
---|
1192 | 1353 | cb->cb_msg.rpc_cred = clp->cl_cb_cred; |
---|
1193 | | - rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN, |
---|
| 1354 | + flags = clp->cl_minorversion ? RPC_TASK_NOCONNECT : RPC_TASK_SOFTCONN; |
---|
| 1355 | + rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | flags, |
---|
1194 | 1356 | cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb); |
---|
1195 | 1357 | } |
---|
1196 | 1358 | |
---|
.. | .. |
---|
1211 | 1373 | |
---|
1212 | 1374 | void nfsd4_run_cb(struct nfsd4_callback *cb) |
---|
1213 | 1375 | { |
---|
1214 | | - queue_work(callback_wq, &cb->cb_work); |
---|
| 1376 | + struct nfs4_client *clp = cb->cb_clp; |
---|
| 1377 | + |
---|
| 1378 | + nfsd41_cb_inflight_begin(clp); |
---|
| 1379 | + if (!nfsd4_queue_cb(cb)) |
---|
| 1380 | + nfsd41_cb_inflight_end(clp); |
---|
1215 | 1381 | } |
---|