hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/fs/nfsd/nfs4callback.c
....@@ -38,7 +38,9 @@
3838 #include "nfsd.h"
3939 #include "state.h"
4040 #include "netns.h"
41
+#include "trace.h"
4142 #include "xdr4cb.h"
43
+#include "xdr4.h"
4244
4345 #define NFSDDBG_FACILITY NFSDDBG_PROC
4446
....@@ -58,16 +60,6 @@
5860 /* res */
5961 int status;
6062 };
61
-
62
-/*
63
- * Handle decode buffer overflows out-of-line.
64
- */
65
-static void print_overflow_msg(const char *func, const struct xdr_stream *xdr)
66
-{
67
- dprintk("NFS: %s prematurely hit the end of our receive buffer. "
68
- "Remaining buffer length is %tu words.\n",
69
- func, xdr->end - xdr->p);
70
-}
7163
7264 static __be32 *xdr_encode_empty_array(__be32 *p)
7365 {
....@@ -105,6 +97,7 @@
10597 OP_CB_WANTS_CANCELLED = 12,
10698 OP_CB_NOTIFY_LOCK = 13,
10799 OP_CB_NOTIFY_DEVICEID = 14,
100
+ OP_CB_OFFLOAD = 15,
108101 OP_CB_ILLEGAL = 10044
109102 };
110103
....@@ -238,7 +231,6 @@
238231 *status = nfs_cb_stat_to_errno(be32_to_cpup(p));
239232 return 0;
240233 out_overflow:
241
- print_overflow_msg(__func__, xdr);
242234 return -EIO;
243235 out_unexpected:
244236 dprintk("NFSD: Callback server returned operation %d but "
....@@ -307,7 +299,6 @@
307299 hdr->nops = be32_to_cpup(p);
308300 return 0;
309301 out_overflow:
310
- print_overflow_msg(__func__, xdr);
311302 return -EIO;
312303 }
313304
....@@ -435,7 +426,6 @@
435426 cb->cb_seq_status = status;
436427 return status;
437428 out_overflow:
438
- print_overflow_msg(__func__, xdr);
439429 status = -EIO;
440430 goto out;
441431 }
....@@ -523,11 +513,9 @@
523513 if (unlikely(status))
524514 return status;
525515
526
- if (cb != NULL) {
527
- status = decode_cb_sequence4res(xdr, cb);
528
- if (unlikely(status || cb->cb_seq_status))
529
- return status;
530
- }
516
+ status = decode_cb_sequence4res(xdr, cb);
517
+ if (unlikely(status || cb->cb_seq_status))
518
+ return status;
531519
532520 return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status);
533521 }
....@@ -615,11 +603,10 @@
615603 if (unlikely(status))
616604 return status;
617605
618
- if (cb) {
619
- status = decode_cb_sequence4res(xdr, cb);
620
- if (unlikely(status || cb->cb_seq_status))
621
- return status;
622
- }
606
+ status = decode_cb_sequence4res(xdr, cb);
607
+ if (unlikely(status || cb->cb_seq_status))
608
+ return status;
609
+
623610 return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status);
624611 }
625612 #endif /* CONFIG_NFSD_PNFS */
....@@ -674,14 +661,107 @@
674661 if (unlikely(status))
675662 return status;
676663
677
- if (cb) {
678
- status = decode_cb_sequence4res(xdr, cb);
679
- if (unlikely(status || cb->cb_seq_status))
680
- return status;
681
- }
664
+ status = decode_cb_sequence4res(xdr, cb);
665
+ if (unlikely(status || cb->cb_seq_status))
666
+ return status;
667
+
682668 return decode_cb_op_status(xdr, OP_CB_NOTIFY_LOCK, &cb->cb_status);
683669 }
684670
671
+/*
672
+ * struct write_response4 {
673
+ * stateid4 wr_callback_id<1>;
674
+ * length4 wr_count;
675
+ * stable_how4 wr_committed;
676
+ * verifier4 wr_writeverf;
677
+ * };
678
+ * union offload_info4 switch (nfsstat4 coa_status) {
679
+ * case NFS4_OK:
680
+ * write_response4 coa_resok4;
681
+ * default:
682
+ * length4 coa_bytes_copied;
683
+ * };
684
+ * struct CB_OFFLOAD4args {
685
+ * nfs_fh4 coa_fh;
686
+ * stateid4 coa_stateid;
687
+ * offload_info4 coa_offload_info;
688
+ * };
689
+ */
690
+static void encode_offload_info4(struct xdr_stream *xdr,
691
+ __be32 nfserr,
692
+ const struct nfsd4_copy *cp)
693
+{
694
+ __be32 *p;
695
+
696
+ p = xdr_reserve_space(xdr, 4);
697
+ *p++ = nfserr;
698
+ if (!nfserr) {
699
+ p = xdr_reserve_space(xdr, 4 + 8 + 4 + NFS4_VERIFIER_SIZE);
700
+ p = xdr_encode_empty_array(p);
701
+ p = xdr_encode_hyper(p, cp->cp_res.wr_bytes_written);
702
+ *p++ = cpu_to_be32(cp->cp_res.wr_stable_how);
703
+ p = xdr_encode_opaque_fixed(p, cp->cp_res.wr_verifier.data,
704
+ NFS4_VERIFIER_SIZE);
705
+ } else {
706
+ p = xdr_reserve_space(xdr, 8);
707
+ /* We always return success if bytes were written */
708
+ p = xdr_encode_hyper(p, 0);
709
+ }
710
+}
711
+
712
+static void encode_cb_offload4args(struct xdr_stream *xdr,
713
+ __be32 nfserr,
714
+ const struct knfsd_fh *fh,
715
+ const struct nfsd4_copy *cp,
716
+ struct nfs4_cb_compound_hdr *hdr)
717
+{
718
+ __be32 *p;
719
+
720
+ p = xdr_reserve_space(xdr, 4);
721
+ *p++ = cpu_to_be32(OP_CB_OFFLOAD);
722
+ encode_nfs_fh4(xdr, fh);
723
+ encode_stateid4(xdr, &cp->cp_res.cb_stateid);
724
+ encode_offload_info4(xdr, nfserr, cp);
725
+
726
+ hdr->nops++;
727
+}
728
+
729
+static void nfs4_xdr_enc_cb_offload(struct rpc_rqst *req,
730
+ struct xdr_stream *xdr,
731
+ const void *data)
732
+{
733
+ const struct nfsd4_callback *cb = data;
734
+ const struct nfsd4_copy *cp =
735
+ container_of(cb, struct nfsd4_copy, cp_cb);
736
+ struct nfs4_cb_compound_hdr hdr = {
737
+ .ident = 0,
738
+ .minorversion = cb->cb_clp->cl_minorversion,
739
+ };
740
+
741
+ encode_cb_compound4args(xdr, &hdr);
742
+ encode_cb_sequence4args(xdr, cb, &hdr);
743
+ encode_cb_offload4args(xdr, cp->nfserr, &cp->fh, cp, &hdr);
744
+ encode_cb_nops(&hdr);
745
+}
746
+
747
+static int nfs4_xdr_dec_cb_offload(struct rpc_rqst *rqstp,
748
+ struct xdr_stream *xdr,
749
+ void *data)
750
+{
751
+ struct nfsd4_callback *cb = data;
752
+ struct nfs4_cb_compound_hdr hdr;
753
+ int status;
754
+
755
+ status = decode_cb_compound4res(xdr, &hdr);
756
+ if (unlikely(status))
757
+ return status;
758
+
759
+ status = decode_cb_sequence4res(xdr, cb);
760
+ if (unlikely(status || cb->cb_seq_status))
761
+ return status;
762
+
763
+ return decode_cb_op_status(xdr, OP_CB_OFFLOAD, &cb->cb_status);
764
+}
685765 /*
686766 * RPC procedure tables
687767 */
....@@ -703,6 +783,7 @@
703783 PROC(CB_LAYOUT, COMPOUND, cb_layout, cb_layout),
704784 #endif
705785 PROC(CB_NOTIFY_LOCK, COMPOUND, cb_notify_lock, cb_notify_lock),
786
+ PROC(CB_OFFLOAD, COMPOUND, cb_offload, cb_offload),
706787 };
707788
708789 static unsigned int nfs4_cb_counts[ARRAY_SIZE(nfs4_cb_procedures)];
....@@ -743,27 +824,60 @@
743824 static int max_cb_time(struct net *net)
744825 {
745826 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
746
- return max(nn->nfsd4_lease/10, (time_t)1) * HZ;
827
+
828
+ /*
829
+ * nfsd4_lease is set to at most one hour in __nfsd4_write_time,
830
+ * so we can use 32-bit math on it. Warn if that assumption
831
+ * ever stops being true.
832
+ */
833
+ if (WARN_ON_ONCE(nn->nfsd4_lease > 3600))
834
+ return 360 * HZ;
835
+
836
+ return max(((u32)nn->nfsd4_lease)/10, 1u) * HZ;
747837 }
748838
749
-static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
839
+static struct workqueue_struct *callback_wq;
840
+
841
+static bool nfsd4_queue_cb(struct nfsd4_callback *cb)
842
+{
843
+ return queue_work(callback_wq, &cb->cb_work);
844
+}
845
+
846
+static void nfsd41_cb_inflight_begin(struct nfs4_client *clp)
847
+{
848
+ atomic_inc(&clp->cl_cb_inflight);
849
+}
850
+
851
+static void nfsd41_cb_inflight_end(struct nfs4_client *clp)
852
+{
853
+
854
+ if (atomic_dec_and_test(&clp->cl_cb_inflight))
855
+ wake_up_var(&clp->cl_cb_inflight);
856
+}
857
+
858
+static void nfsd41_cb_inflight_wait_complete(struct nfs4_client *clp)
859
+{
860
+ wait_var_event(&clp->cl_cb_inflight,
861
+ !atomic_read(&clp->cl_cb_inflight));
862
+}
863
+
864
+static const struct cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc_clnt *client, struct nfsd4_session *ses)
750865 {
751866 if (clp->cl_minorversion == 0) {
752
- char *principal = clp->cl_cred.cr_targ_princ ?
753
- clp->cl_cred.cr_targ_princ : "nfs";
754
- struct rpc_cred *cred;
867
+ client->cl_principal = clp->cl_cred.cr_targ_princ ?
868
+ clp->cl_cred.cr_targ_princ : "nfs";
755869
756
- cred = rpc_lookup_machine_cred(principal);
757
- if (!IS_ERR(cred))
758
- get_rpccred(cred);
759
- return cred;
870
+ return get_cred(rpc_machine_cred());
760871 } else {
761
- struct rpc_auth *auth = client->cl_auth;
762
- struct auth_cred acred = {};
872
+ struct cred *kcred;
763873
764
- acred.uid = ses->se_cb_sec.uid;
765
- acred.gid = ses->se_cb_sec.gid;
766
- return auth->au_ops->lookup_cred(client->cl_auth, &acred, 0);
874
+ kcred = prepare_kernel_cred(NULL);
875
+ if (!kcred)
876
+ return NULL;
877
+
878
+ kcred->fsuid = ses->se_cb_sec.uid;
879
+ kcred->fsgid = ses->se_cb_sec.gid;
880
+ return kcred;
767881 }
768882 }
769883
....@@ -784,14 +898,17 @@
784898 .program = &cb_program,
785899 .version = 1,
786900 .flags = (RPC_CLNT_CREATE_NOPING | RPC_CLNT_CREATE_QUIET),
901
+ .cred = current_cred(),
787902 };
788903 struct rpc_clnt *client;
789
- struct rpc_cred *cred;
904
+ const struct cred *cred;
790905
791906 if (clp->cl_minorversion == 0) {
792907 if (!clp->cl_cred.cr_principal &&
793
- (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5))
908
+ (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5)) {
909
+ trace_nfsd_cb_setup_err(clp, -EINVAL);
794910 return -EINVAL;
911
+ }
795912 args.client_name = clp->cl_cred.cr_principal;
796913 args.prognumber = conn->cb_prog;
797914 args.protocol = XPRT_TRANSPORT_TCP;
....@@ -800,7 +917,6 @@
800917 } else {
801918 if (!conn->cb_xprt)
802919 return -EINVAL;
803
- clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
804920 clp->cl_cb_session = ses;
805921 args.bc_xprt = conn->cb_xprt;
806922 args.prognumber = clp->cl_cb_session->se_cb_prog;
....@@ -811,24 +927,22 @@
811927 /* Create RPC client */
812928 client = rpc_create(&args);
813929 if (IS_ERR(client)) {
814
- dprintk("NFSD: couldn't create callback client: %ld\n",
815
- PTR_ERR(client));
930
+ trace_nfsd_cb_setup_err(clp, PTR_ERR(client));
816931 return PTR_ERR(client);
817932 }
818933 cred = get_backchannel_cred(clp, client, ses);
819
- if (IS_ERR(cred)) {
934
+ if (!cred) {
935
+ trace_nfsd_cb_setup_err(clp, -ENOMEM);
820936 rpc_shutdown_client(client);
821
- return PTR_ERR(cred);
937
+ return -ENOMEM;
822938 }
939
+
940
+ if (clp->cl_minorversion != 0)
941
+ clp->cl_cb_conn.cb_xprt = conn->cb_xprt;
823942 clp->cl_cb_client = client;
824943 clp->cl_cb_cred = cred;
944
+ trace_nfsd_cb_setup(clp);
825945 return 0;
826
-}
827
-
828
-static void warn_no_callback_path(struct nfs4_client *clp, int reason)
829
-{
830
- dprintk("NFSD: warning: no callback path to client %.*s: error %d\n",
831
- (int)clp->cl_name.len, clp->cl_name.data, reason);
832946 }
833947
834948 static void nfsd4_mark_cb_down(struct nfs4_client *clp, int reason)
....@@ -836,7 +950,7 @@
836950 if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
837951 return;
838952 clp->cl_cb_state = NFSD4_CB_DOWN;
839
- warn_no_callback_path(clp, reason);
953
+ trace_nfsd_cb_state(clp);
840954 }
841955
842956 static void nfsd4_mark_cb_fault(struct nfs4_client *clp, int reason)
....@@ -844,26 +958,36 @@
844958 if (test_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags))
845959 return;
846960 clp->cl_cb_state = NFSD4_CB_FAULT;
847
- warn_no_callback_path(clp, reason);
961
+ trace_nfsd_cb_state(clp);
848962 }
849963
850964 static void nfsd4_cb_probe_done(struct rpc_task *task, void *calldata)
851965 {
852966 struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
853967
968
+ trace_nfsd_cb_done(clp, task->tk_status);
854969 if (task->tk_status)
855970 nfsd4_mark_cb_down(clp, task->tk_status);
856
- else
971
+ else {
857972 clp->cl_cb_state = NFSD4_CB_UP;
973
+ trace_nfsd_cb_state(clp);
974
+ }
975
+}
976
+
977
+static void nfsd4_cb_probe_release(void *calldata)
978
+{
979
+ struct nfs4_client *clp = container_of(calldata, struct nfs4_client, cl_cb_null);
980
+
981
+ nfsd41_cb_inflight_end(clp);
982
+
858983 }
859984
860985 static const struct rpc_call_ops nfsd4_cb_probe_ops = {
861986 /* XXX: release method to ensure we set the cb channel down if
862987 * necessary on early failure? */
863988 .rpc_call_done = nfsd4_cb_probe_done,
989
+ .rpc_release = nfsd4_cb_probe_release,
864990 };
865
-
866
-static struct workqueue_struct *callback_wq;
867991
868992 /*
869993 * Poke the callback thread to process any updates to the callback
....@@ -872,6 +996,7 @@
872996 void nfsd4_probe_callback(struct nfs4_client *clp)
873997 {
874998 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
999
+ trace_nfsd_cb_state(clp);
8751000 set_bit(NFSD4_CLIENT_CB_UPDATE, &clp->cl_flags);
8761001 nfsd4_run_cb(&clp->cl_cb_null);
8771002 }
....@@ -888,6 +1013,7 @@
8881013 spin_lock(&clp->cl_lock);
8891014 memcpy(&clp->cl_cb_conn, conn, sizeof(struct nfs4_cb_conn));
8901015 spin_unlock(&clp->cl_lock);
1016
+ trace_nfsd_cb_state(clp);
8911017 }
8921018
8931019 /*
....@@ -895,9 +1021,12 @@
8951021 * If the slot is available, then mark it busy. Otherwise, set the
8961022 * thread for sleeping on the callback RPC wait queue.
8971023 */
898
-static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
1024
+static bool nfsd41_cb_get_slot(struct nfsd4_callback *cb, struct rpc_task *task)
8991025 {
900
- if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
1026
+ struct nfs4_client *clp = cb->cb_clp;
1027
+
1028
+ if (!cb->cb_holds_slot &&
1029
+ test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
9011030 rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
9021031 /* Race breaker */
9031032 if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
....@@ -906,7 +1035,29 @@
9061035 }
9071036 rpc_wake_up_queued_task(&clp->cl_cb_waitq, task);
9081037 }
1038
+ cb->cb_holds_slot = true;
9091039 return true;
1040
+}
1041
+
1042
+static void nfsd41_cb_release_slot(struct nfsd4_callback *cb)
1043
+{
1044
+ struct nfs4_client *clp = cb->cb_clp;
1045
+
1046
+ if (cb->cb_holds_slot) {
1047
+ cb->cb_holds_slot = false;
1048
+ clear_bit(0, &clp->cl_cb_slot_busy);
1049
+ rpc_wake_up_next(&clp->cl_cb_waitq);
1050
+ }
1051
+}
1052
+
1053
+static void nfsd41_destroy_cb(struct nfsd4_callback *cb)
1054
+{
1055
+ struct nfs4_client *clp = cb->cb_clp;
1056
+
1057
+ nfsd41_cb_release_slot(cb);
1058
+ if (cb->cb_ops && cb->cb_ops->release)
1059
+ cb->cb_ops->release(cb);
1060
+ nfsd41_cb_inflight_end(clp);
9101061 }
9111062
9121063 /*
....@@ -925,11 +1076,8 @@
9251076 */
9261077 cb->cb_seq_status = 1;
9271078 cb->cb_status = 0;
928
- if (minorversion) {
929
- if (!cb->cb_holds_slot && !nfsd41_cb_get_slot(clp, task))
930
- return;
931
- cb->cb_holds_slot = true;
932
- }
1079
+ if (minorversion && !nfsd41_cb_get_slot(cb, task))
1080
+ return;
9331081 rpc_call_start(task);
9341082 }
9351083
....@@ -949,7 +1097,7 @@
9491097 * the submission code will error out, so we don't need to
9501098 * handle that case here.
9511099 */
952
- if (task->tk_flags & RPC_TASK_KILLED)
1100
+ if (RPC_SIGNALLED(task))
9531101 goto need_restart;
9541102
9551103 return true;
....@@ -971,7 +1119,7 @@
9711119 break;
9721120 case -ESERVERFAULT:
9731121 ++session->se_cb_seq_nr;
974
- /* Fall through */
1122
+ fallthrough;
9751123 case 1:
9761124 case -NFS4ERR_BADSESSION:
9771125 nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
....@@ -992,17 +1140,16 @@
9921140 }
9931141 break;
9941142 default:
1143
+ nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status);
9951144 dprintk("%s: unprocessed error %d\n", __func__,
9961145 cb->cb_seq_status);
9971146 }
9981147
999
- cb->cb_holds_slot = false;
1000
- clear_bit(0, &clp->cl_cb_slot_busy);
1001
- rpc_wake_up_next(&clp->cl_cb_waitq);
1148
+ nfsd41_cb_release_slot(cb);
10021149 dprintk("%s: freed slot, new seqid=%d\n", __func__,
10031150 clp->cl_cb_session->se_cb_seq_nr);
10041151
1005
- if (task->tk_flags & RPC_TASK_KILLED)
1152
+ if (RPC_SIGNALLED(task))
10061153 goto need_restart;
10071154 out:
10081155 return ret;
....@@ -1011,8 +1158,10 @@
10111158 ret = false;
10121159 goto out;
10131160 need_restart:
1014
- task->tk_status = 0;
1015
- cb->cb_need_restart = true;
1161
+ if (!test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags)) {
1162
+ task->tk_status = 0;
1163
+ cb->cb_need_restart = true;
1164
+ }
10161165 return false;
10171166 }
10181167
....@@ -1021,8 +1170,7 @@
10211170 struct nfsd4_callback *cb = calldata;
10221171 struct nfs4_client *clp = cb->cb_clp;
10231172
1024
- dprintk("%s: minorversion=%d\n", __func__,
1025
- clp->cl_minorversion);
1173
+ trace_nfsd_cb_done(clp, task->tk_status);
10261174
10271175 if (!nfsd4_cb_sequence_done(task, cb))
10281176 return;
....@@ -1038,10 +1186,12 @@
10381186 rpc_restart_call_prepare(task);
10391187 return;
10401188 case 1:
1041
- break;
1042
- case -1:
1043
- /* Network partition? */
1044
- nfsd4_mark_cb_down(clp, task->tk_status);
1189
+ switch (task->tk_status) {
1190
+ case -EIO:
1191
+ case -ETIMEDOUT:
1192
+ case -EACCES:
1193
+ nfsd4_mark_cb_down(clp, task->tk_status);
1194
+ }
10451195 break;
10461196 default:
10471197 BUG();
....@@ -1053,9 +1203,9 @@
10531203 struct nfsd4_callback *cb = calldata;
10541204
10551205 if (cb->cb_need_restart)
1056
- nfsd4_run_cb(cb);
1206
+ nfsd4_queue_cb(cb);
10571207 else
1058
- cb->cb_ops->release(cb);
1208
+ nfsd41_destroy_cb(cb);
10591209
10601210 }
10611211
....@@ -1089,6 +1239,7 @@
10891239 */
10901240 nfsd4_run_cb(&clp->cl_cb_null);
10911241 flush_workqueue(callback_wq);
1242
+ nfsd41_cb_inflight_wait_complete(clp);
10921243 }
10931244
10941245 /* requires cl_lock: */
....@@ -1106,6 +1257,12 @@
11061257 return NULL;
11071258 }
11081259
1260
+/*
1261
+ * Note there isn't a lot of locking in this code; instead we depend on
1262
+ * the fact that it is run from the callback_wq, which won't run two
1263
+ * work items at once. So, for example, callback_wq handles all access
1264
+ * of cl_cb_client and all calls to rpc_create or rpc_shutdown_client.
1265
+ */
11091266 static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
11101267 {
11111268 struct nfs4_cb_conn conn;
....@@ -1119,9 +1276,10 @@
11191276 * kill the old client:
11201277 */
11211278 if (clp->cl_cb_client) {
1279
+ trace_nfsd_cb_shutdown(clp);
11221280 rpc_shutdown_client(clp->cl_cb_client);
11231281 clp->cl_cb_client = NULL;
1124
- put_rpccred(clp->cl_cb_cred);
1282
+ put_cred(clp->cl_cb_cred);
11251283 clp->cl_cb_cred = NULL;
11261284 }
11271285 if (clp->cl_cb_conn.cb_xprt) {
....@@ -1162,6 +1320,9 @@
11621320 container_of(work, struct nfsd4_callback, cb_work);
11631321 struct nfs4_client *clp = cb->cb_clp;
11641322 struct rpc_clnt *clnt;
1323
+ int flags;
1324
+
1325
+ trace_nfsd_cb_work(clp, cb->cb_msg.rpc_proc->p_name);
11651326
11661327 if (cb->cb_need_restart) {
11671328 cb->cb_need_restart = false;
....@@ -1176,8 +1337,7 @@
11761337 clnt = clp->cl_cb_client;
11771338 if (!clnt) {
11781339 /* Callback channel broken, or client killed; give up: */
1179
- if (cb->cb_ops && cb->cb_ops->release)
1180
- cb->cb_ops->release(cb);
1340
+ nfsd41_destroy_cb(cb);
11811341 return;
11821342 }
11831343
....@@ -1186,11 +1346,13 @@
11861346 */
11871347 if (!cb->cb_ops && clp->cl_minorversion) {
11881348 clp->cl_cb_state = NFSD4_CB_UP;
1349
+ nfsd41_destroy_cb(cb);
11891350 return;
11901351 }
11911352
11921353 cb->cb_msg.rpc_cred = clp->cl_cb_cred;
1193
- rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
1354
+ flags = clp->cl_minorversion ? RPC_TASK_NOCONNECT : RPC_TASK_SOFTCONN;
1355
+ rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | flags,
11941356 cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
11951357 }
11961358
....@@ -1211,5 +1373,9 @@
12111373
12121374 void nfsd4_run_cb(struct nfsd4_callback *cb)
12131375 {
1214
- queue_work(callback_wq, &cb->cb_work);
1376
+ struct nfs4_client *clp = cb->cb_clp;
1377
+
1378
+ nfsd41_cb_inflight_begin(clp);
1379
+ if (!nfsd4_queue_cb(cb))
1380
+ nfsd41_cb_inflight_end(clp);
12151381 }