hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/fs/nfs/pnfs.c
....@@ -92,6 +92,17 @@
9292 return local;
9393 }
9494
95
+const struct pnfs_layoutdriver_type *pnfs_find_layoutdriver(u32 id)
96
+{
97
+ return find_pnfs_driver(id);
98
+}
99
+
100
+void pnfs_put_layoutdriver(const struct pnfs_layoutdriver_type *ld)
101
+{
102
+ if (ld)
103
+ module_put(ld->owner);
104
+}
105
+
95106 void
96107 unset_pnfs_layoutdriver(struct nfs_server *nfss)
97108 {
....@@ -268,14 +279,14 @@
268279 struct nfs_server *server = NFS_SERVER(lo->plh_inode);
269280 struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
270281
271
- if (!list_empty(&lo->plh_layouts)) {
282
+ if (test_and_clear_bit(NFS_LAYOUT_HASHED, &lo->plh_flags)) {
272283 struct nfs_client *clp = server->nfs_client;
273284
274285 spin_lock(&clp->cl_lock);
275
- list_del_init(&lo->plh_layouts);
286
+ list_del_rcu(&lo->plh_layouts);
276287 spin_unlock(&clp->cl_lock);
277288 }
278
- put_rpccred(lo->plh_lc_cred);
289
+ put_cred(lo->plh_lc_cred);
279290 return ld->free_layout_hdr(lo);
280291 }
281292
....@@ -314,6 +325,31 @@
314325 }
315326 }
316327
328
+static struct inode *
329
+pnfs_grab_inode_layout_hdr(struct pnfs_layout_hdr *lo)
330
+{
331
+ struct inode *inode = igrab(lo->plh_inode);
332
+ if (inode)
333
+ return inode;
334
+ set_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags);
335
+ return NULL;
336
+}
337
+
338
+/*
339
+ * Compare 2 layout stateid sequence ids, to see which is newer,
340
+ * taking into account wraparound issues.
341
+ */
342
+static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
343
+{
344
+ return (s32)(s1 - s2) > 0;
345
+}
346
+
347
+static void pnfs_barrier_update(struct pnfs_layout_hdr *lo, u32 newseq)
348
+{
349
+ if (pnfs_seqid_is_newer(newseq, lo->plh_barrier) || !lo->plh_barrier)
350
+ lo->plh_barrier = newseq;
351
+}
352
+
317353 static void
318354 pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
319355 u32 seq)
....@@ -322,10 +358,15 @@
322358 iomode = IOMODE_ANY;
323359 lo->plh_return_iomode = iomode;
324360 set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
325
- if (seq != 0) {
326
- WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
361
+ /*
362
+ * We must set lo->plh_return_seq to avoid livelocks with
363
+ * pnfs_layout_need_return()
364
+ */
365
+ if (seq == 0)
366
+ seq = be32_to_cpu(lo->plh_stateid.seqid);
367
+ if (!lo->plh_return_seq || pnfs_seqid_is_newer(seq, lo->plh_return_seq))
327368 lo->plh_return_seq = seq;
328
- }
369
+ pnfs_barrier_update(lo, seq);
329370 }
330371
331372 static void
....@@ -364,9 +405,10 @@
364405 }
365406
366407 /*
367
- * Update the seqid of a layout stateid
408
+ * Update the seqid of a layout stateid after receiving
409
+ * NFS4ERR_OLD_STATEID
368410 */
369
-bool nfs4_layoutreturn_refresh_stateid(nfs4_stateid *dst,
411
+bool nfs4_layout_refresh_old_stateid(nfs4_stateid *dst,
370412 struct pnfs_layout_range *dst_range,
371413 struct inode *inode)
372414 {
....@@ -382,7 +424,15 @@
382424
383425 spin_lock(&inode->i_lock);
384426 lo = NFS_I(inode)->layout;
385
- if (lo && nfs4_stateid_match_other(dst, &lo->plh_stateid)) {
427
+ if (lo && pnfs_layout_is_valid(lo) &&
428
+ nfs4_stateid_match_other(dst, &lo->plh_stateid)) {
429
+ /* Is our call using the most recent seqid? If so, bump it */
430
+ if (!nfs4_stateid_is_newer(&lo->plh_stateid, dst)) {
431
+ nfs4_stateid_seqid_inc(dst);
432
+ ret = true;
433
+ goto out;
434
+ }
435
+ /* Try to update the seqid to the most recent */
386436 err = pnfs_mark_matching_lsegs_return(lo, &head, &range, 0);
387437 if (err != -EBUSY) {
388438 dst->seqid = lo->plh_stateid.seqid;
....@@ -390,6 +440,7 @@
390440 ret = true;
391441 }
392442 }
443
+out:
393444 spin_unlock(&inode->i_lock);
394445 pnfs_free_lseg_list(&head);
395446 return ret;
....@@ -418,6 +469,7 @@
418469 pnfs_clear_lseg_state(lseg, lseg_list);
419470 pnfs_clear_layoutreturn_info(lo);
420471 pnfs_free_returned_lsegs(lo, lseg_list, &range, 0);
472
+ set_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags);
421473 if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
422474 !test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
423475 pnfs_clear_layoutreturn_waitbit(lo);
....@@ -491,6 +543,7 @@
491543 {
492544 INIT_LIST_HEAD(&lseg->pls_list);
493545 INIT_LIST_HEAD(&lseg->pls_lc_list);
546
+ INIT_LIST_HEAD(&lseg->pls_commits);
494547 refcount_set(&lseg->pls_refcount, 1);
495548 set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
496549 lseg->pls_layout = lo;
....@@ -616,15 +669,6 @@
616669 rv = 1;
617670 }
618671 return rv;
619
-}
620
-
621
-/*
622
- * Compare 2 layout stateid sequence ids, to see which is newer,
623
- * taking into account wraparound issues.
624
- */
625
-static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
626
-{
627
- return (s32)(s1 - s2) > 0;
628672 }
629673
630674 static bool
....@@ -801,9 +845,10 @@
801845 /* If the sb is being destroyed, just bail */
802846 if (!nfs_sb_active(server->super))
803847 break;
804
- inode = igrab(lo->plh_inode);
848
+ inode = pnfs_grab_inode_layout_hdr(lo);
805849 if (inode != NULL) {
806
- list_del_init(&lo->plh_layouts);
850
+ if (test_and_clear_bit(NFS_LAYOUT_HASHED, &lo->plh_flags))
851
+ list_del_rcu(&lo->plh_layouts);
807852 if (pnfs_layout_add_bulk_destroy_list(inode,
808853 layout_list))
809854 continue;
....@@ -813,7 +858,6 @@
813858 } else {
814859 rcu_read_unlock();
815860 spin_unlock(&clp->cl_lock);
816
- set_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags);
817861 }
818862 nfs_sb_deactive(server->super);
819863 spin_lock(&clp->cl_lock);
....@@ -910,7 +954,7 @@
910954 }
911955
912956 /*
913
- * Called by the state manger to remove all layouts established under an
957
+ * Called by the state manager to remove all layouts established under an
914958 * expired lease.
915959 */
916960 void
....@@ -922,37 +966,48 @@
922966 pnfs_destroy_layouts_byclid(clp, false);
923967 }
924968
969
+static void
970
+pnfs_set_layout_cred(struct pnfs_layout_hdr *lo, const struct cred *cred)
971
+{
972
+ const struct cred *old;
973
+
974
+ if (cred && cred_fscmp(lo->plh_lc_cred, cred) != 0) {
975
+ old = xchg(&lo->plh_lc_cred, get_cred(cred));
976
+ put_cred(old);
977
+ }
978
+}
979
+
925980 /* update lo->plh_stateid with new if is more recent */
926981 void
927982 pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
928
- bool update_barrier)
983
+ const struct cred *cred, bool update_barrier)
929984 {
930
- u32 oldseq, newseq, new_barrier = 0;
931
-
932
- oldseq = be32_to_cpu(lo->plh_stateid.seqid);
933
- newseq = be32_to_cpu(new->seqid);
985
+ u32 oldseq = be32_to_cpu(lo->plh_stateid.seqid);
986
+ u32 newseq = be32_to_cpu(new->seqid);
934987
935988 if (!pnfs_layout_is_valid(lo)) {
989
+ pnfs_set_layout_cred(lo, cred);
936990 nfs4_stateid_copy(&lo->plh_stateid, new);
937991 lo->plh_barrier = newseq;
938992 pnfs_clear_layoutreturn_info(lo);
939993 clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
940994 return;
941995 }
942
- if (pnfs_seqid_is_newer(newseq, oldseq)) {
996
+
997
+ if (pnfs_seqid_is_newer(newseq, oldseq))
943998 nfs4_stateid_copy(&lo->plh_stateid, new);
944
- /*
945
- * Because of wraparound, we want to keep the barrier
946
- * "close" to the current seqids.
947
- */
948
- new_barrier = newseq - atomic_read(&lo->plh_outstanding);
949
- }
950
- if (update_barrier)
951
- new_barrier = be32_to_cpu(new->seqid);
952
- else if (new_barrier == 0)
999
+
1000
+ if (update_barrier) {
1001
+ pnfs_barrier_update(lo, newseq);
9531002 return;
954
- if (pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
955
- lo->plh_barrier = new_barrier;
1003
+ }
1004
+ /*
1005
+ * Because of wraparound, we want to keep the barrier
1006
+ * "close" to the current seqids. We really only want to
1007
+ * get here from a layoutget call.
1008
+ */
1009
+ if (atomic_read(&lo->plh_outstanding) == 1)
1010
+ pnfs_barrier_update(lo, be32_to_cpu(lo->plh_stateid.seqid));
9561011 }
9571012
9581013 static bool
....@@ -961,7 +1016,7 @@
9611016 {
9621017 u32 seqid = be32_to_cpu(stateid->seqid);
9631018
964
- return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
1019
+ return lo->plh_barrier && pnfs_seqid_is_newer(lo->plh_barrier, seqid);
9651020 }
9661021
9671022 /* lget is set to 1 if called from inside send_layoutget call chain */
....@@ -1007,7 +1062,7 @@
10071062 struct page **pages;
10081063 int i;
10091064
1010
- pages = kcalloc(size, sizeof(struct page *), gfp_flags);
1065
+ pages = kmalloc_array(size, sizeof(struct page *), gfp_flags);
10111066 if (!pages) {
10121067 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
10131068 return NULL;
....@@ -1017,7 +1072,7 @@
10171072 pages[i] = alloc_page(gfp_flags);
10181073 if (!pages[i]) {
10191074 dprintk("%s: failed to allocate page\n", __func__);
1020
- nfs4_free_pages(pages, size);
1075
+ nfs4_free_pages(pages, i);
10211076 return NULL;
10221077 }
10231078 }
....@@ -1033,6 +1088,7 @@
10331088 gfp_t gfp_flags)
10341089 {
10351090 struct nfs_server *server = pnfs_find_server(ino, ctx);
1091
+ size_t max_reply_sz = server->pnfs_curr_ld->max_layoutget_response;
10361092 size_t max_pages = max_response_pages(server);
10371093 struct nfs4_layoutget *lgp;
10381094
....@@ -1041,6 +1097,12 @@
10411097 lgp = kzalloc(sizeof(*lgp), gfp_flags);
10421098 if (lgp == NULL)
10431099 return NULL;
1100
+
1101
+ if (max_reply_sz) {
1102
+ size_t npages = (max_reply_sz + PAGE_SIZE - 1) >> PAGE_SHIFT;
1103
+ if (npages < max_pages)
1104
+ max_pages = npages;
1105
+ }
10441106
10451107 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
10461108 if (!lgp->args.layout.pages) {
....@@ -1073,7 +1135,7 @@
10731135 lgp->args.ctx = get_nfs_open_context(ctx);
10741136 nfs4_stateid_copy(&lgp->args.stateid, stateid);
10751137 lgp->gfp_flags = gfp_flags;
1076
- lgp->cred = get_rpccred(ctx->cred);
1138
+ lgp->cred = ctx->cred;
10771139 return lgp;
10781140 }
10791141
....@@ -1084,7 +1146,6 @@
10841146 nfs4_free_pages(lgp->args.layout.pages, max_pages);
10851147 if (lgp->args.inode)
10861148 pnfs_put_layout_hdr(NFS_I(lgp->args.inode)->layout);
1087
- put_rpccred(lgp->cred);
10881149 put_nfs_open_context(lgp->args.ctx);
10891150 kfree(lgp);
10901151 }
....@@ -1121,7 +1182,7 @@
11211182
11221183 pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
11231184 pnfs_free_returned_lsegs(lo, &freeme, range, seq);
1124
- pnfs_set_layout_stateid(lo, stateid, true);
1185
+ pnfs_set_layout_stateid(lo, stateid, NULL, true);
11251186 } else
11261187 pnfs_mark_layout_stateid_invalid(lo, &freeme);
11271188 out_unlock:
....@@ -1134,6 +1195,7 @@
11341195 static bool
11351196 pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
11361197 nfs4_stateid *stateid,
1198
+ const struct cred **cred,
11371199 enum pnfs_iomode *iomode)
11381200 {
11391201 /* Serialise LAYOUTGET/LAYOUTRETURN */
....@@ -1143,21 +1205,17 @@
11431205 return false;
11441206 set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
11451207 pnfs_get_layout_hdr(lo);
1208
+ nfs4_stateid_copy(stateid, &lo->plh_stateid);
1209
+ *cred = get_cred(lo->plh_lc_cred);
11461210 if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
1147
- if (stateid != NULL) {
1148
- nfs4_stateid_copy(stateid, &lo->plh_stateid);
1149
- if (lo->plh_return_seq != 0)
1150
- stateid->seqid = cpu_to_be32(lo->plh_return_seq);
1151
- }
1211
+ if (lo->plh_return_seq != 0)
1212
+ stateid->seqid = cpu_to_be32(lo->plh_return_seq);
11521213 if (iomode != NULL)
11531214 *iomode = lo->plh_return_iomode;
11541215 pnfs_clear_layoutreturn_info(lo);
1155
- return true;
1156
- }
1157
- if (stateid != NULL)
1158
- nfs4_stateid_copy(stateid, &lo->plh_stateid);
1159
- if (iomode != NULL)
1216
+ } else if (iomode != NULL)
11601217 *iomode = IOMODE_ANY;
1218
+ pnfs_barrier_update(lo, be32_to_cpu(stateid->seqid));
11611219 return true;
11621220 }
11631221
....@@ -1179,20 +1237,26 @@
11791237 }
11801238
11811239 static int
1182
-pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
1183
- enum pnfs_iomode iomode, bool sync)
1240
+pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo,
1241
+ const nfs4_stateid *stateid,
1242
+ const struct cred **pcred,
1243
+ enum pnfs_iomode iomode,
1244
+ bool sync)
11841245 {
11851246 struct inode *ino = lo->plh_inode;
11861247 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
11871248 struct nfs4_layoutreturn *lrp;
1249
+ const struct cred *cred = *pcred;
11881250 int status = 0;
11891251
1252
+ *pcred = NULL;
11901253 lrp = kzalloc(sizeof(*lrp), GFP_NOFS);
11911254 if (unlikely(lrp == NULL)) {
11921255 status = -ENOMEM;
11931256 spin_lock(&ino->i_lock);
11941257 pnfs_clear_layoutreturn_waitbit(lo);
11951258 spin_unlock(&ino->i_lock);
1259
+ put_cred(cred);
11961260 pnfs_put_layout_hdr(lo);
11971261 goto out;
11981262 }
....@@ -1200,7 +1264,7 @@
12001264 pnfs_init_layoutreturn_args(&lrp->args, lo, stateid, iomode);
12011265 lrp->args.ld_private = &lrp->ld_private;
12021266 lrp->clp = NFS_SERVER(ino)->nfs_client;
1203
- lrp->cred = lo->plh_lc_cred;
1267
+ lrp->cred = cred;
12041268 if (ld->prepare_layoutreturn)
12051269 ld->prepare_layoutreturn(&lrp->args);
12061270
....@@ -1241,15 +1305,16 @@
12411305 return;
12421306 spin_lock(&inode->i_lock);
12431307 if (pnfs_layout_need_return(lo)) {
1308
+ const struct cred *cred;
12441309 nfs4_stateid stateid;
12451310 enum pnfs_iomode iomode;
12461311 bool send;
12471312
1248
- send = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
1313
+ send = pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode);
12491314 spin_unlock(&inode->i_lock);
12501315 if (send) {
12511316 /* Send an async layoutreturn so we dont deadlock */
1252
- pnfs_send_layoutreturn(lo, &stateid, iomode, false);
1317
+ pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
12531318 }
12541319 } else
12551320 spin_unlock(&inode->i_lock);
....@@ -1274,6 +1339,7 @@
12741339 .length = NFS4_MAX_UINT64,
12751340 };
12761341 LIST_HEAD(tmp_list);
1342
+ const struct cred *cred;
12771343 nfs4_stateid stateid;
12781344 int status = 0;
12791345 bool send, valid_layout;
....@@ -1309,13 +1375,15 @@
13091375 !valid_layout) {
13101376 spin_unlock(&ino->i_lock);
13111377 dprintk("NFS: %s no layout segments to return\n", __func__);
1312
- goto out_put_layout_hdr;
1378
+ goto out_wait_layoutreturn;
13131379 }
13141380
1315
- send = pnfs_prepare_layoutreturn(lo, &stateid, NULL);
1381
+ send = pnfs_prepare_layoutreturn(lo, &stateid, &cred, NULL);
13161382 spin_unlock(&ino->i_lock);
13171383 if (send)
1318
- status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
1384
+ status = pnfs_send_layoutreturn(lo, &stateid, &cred, IOMODE_ANY, true);
1385
+out_wait_layoutreturn:
1386
+ wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN, TASK_UNINTERRUPTIBLE);
13191387 out_put_layout_hdr:
13201388 pnfs_free_lseg_list(&tmp_list);
13211389 pnfs_put_layout_hdr(lo);
....@@ -1354,13 +1422,14 @@
13541422 bool pnfs_roc(struct inode *ino,
13551423 struct nfs4_layoutreturn_args *args,
13561424 struct nfs4_layoutreturn_res *res,
1357
- const struct rpc_cred *cred)
1425
+ const struct cred *cred)
13581426 {
13591427 struct nfs_inode *nfsi = NFS_I(ino);
13601428 struct nfs_open_context *ctx;
13611429 struct nfs4_state *state;
13621430 struct pnfs_layout_hdr *lo;
13631431 struct pnfs_layout_segment *lseg, *next;
1432
+ const struct cred *lc_cred;
13641433 nfs4_stateid stateid;
13651434 enum pnfs_iomode iomode = 0;
13661435 bool layoutreturn = false, roc = false;
....@@ -1369,6 +1438,7 @@
13691438 if (!nfs_have_layout(ino))
13701439 return false;
13711440 retry:
1441
+ rcu_read_lock();
13721442 spin_lock(&ino->i_lock);
13731443 lo = nfsi->layout;
13741444 if (!lo || !pnfs_layout_is_valid(lo) ||
....@@ -1379,6 +1449,7 @@
13791449 pnfs_get_layout_hdr(lo);
13801450 if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
13811451 spin_unlock(&ino->i_lock);
1452
+ rcu_read_unlock();
13821453 wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
13831454 TASK_UNINTERRUPTIBLE);
13841455 pnfs_put_layout_hdr(lo);
....@@ -1392,7 +1463,7 @@
13921463 skip_read = true;
13931464 }
13941465
1395
- list_for_each_entry(ctx, &nfsi->open_files, list) {
1466
+ list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
13961467 state = ctx->state;
13971468 if (state == NULL)
13981469 continue;
....@@ -1428,18 +1499,20 @@
14281499 * 2. we don't send layoutreturn
14291500 */
14301501 /* lo ref dropped in pnfs_roc_release() */
1431
- layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
1502
+ layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &lc_cred, &iomode);
14321503 /* If the creds don't match, we can't compound the layoutreturn */
1433
- if (!layoutreturn || cred != lo->plh_lc_cred)
1504
+ if (!layoutreturn || cred_fscmp(cred, lc_cred) != 0)
14341505 goto out_noroc;
14351506
14361507 roc = layoutreturn;
14371508 pnfs_init_layoutreturn_args(args, lo, &stateid, iomode);
14381509 res->lrs_present = 0;
14391510 layoutreturn = false;
1511
+ put_cred(lc_cred);
14401512
14411513 out_noroc:
14421514 spin_unlock(&ino->i_lock);
1515
+ rcu_read_unlock();
14431516 pnfs_layoutcommit_inode(ino, true);
14441517 if (roc) {
14451518 struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
....@@ -1449,9 +1522,53 @@
14491522 return true;
14501523 }
14511524 if (layoutreturn)
1452
- pnfs_send_layoutreturn(lo, &stateid, iomode, true);
1525
+ pnfs_send_layoutreturn(lo, &stateid, &lc_cred, iomode, true);
14531526 pnfs_put_layout_hdr(lo);
14541527 return false;
1528
+}
1529
+
1530
+int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
1531
+ struct nfs4_layoutreturn_res **respp, int *ret)
1532
+{
1533
+ struct nfs4_layoutreturn_args *arg = *argpp;
1534
+ int retval = -EAGAIN;
1535
+
1536
+ if (!arg)
1537
+ return 0;
1538
+ /* Handle Layoutreturn errors */
1539
+ switch (*ret) {
1540
+ case 0:
1541
+ retval = 0;
1542
+ break;
1543
+ case -NFS4ERR_NOMATCHING_LAYOUT:
1544
+ /* Was there an RPC level error? If not, retry */
1545
+ if (task->tk_rpc_status == 0)
1546
+ break;
1547
+ /* If the call was not sent, let caller handle it */
1548
+ if (!RPC_WAS_SENT(task))
1549
+ return 0;
1550
+ /*
1551
+ * Otherwise, assume the call succeeded and
1552
+ * that we need to release the layout
1553
+ */
1554
+ *ret = 0;
1555
+ (*respp)->lrs_present = 0;
1556
+ retval = 0;
1557
+ break;
1558
+ case -NFS4ERR_DELAY:
1559
+ /* Let the caller handle the retry */
1560
+ *ret = -NFS4ERR_NOMATCHING_LAYOUT;
1561
+ return 0;
1562
+ case -NFS4ERR_OLD_STATEID:
1563
+ if (!nfs4_layout_refresh_old_stateid(&arg->stateid,
1564
+ &arg->range, arg->inode))
1565
+ break;
1566
+ *ret = -NFS4ERR_NOMATCHING_LAYOUT;
1567
+ return -EAGAIN;
1568
+ }
1569
+ *argpp = NULL;
1570
+ *respp = NULL;
1571
+ return retval;
14551572 }
14561573
14571574 void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
....@@ -1475,16 +1592,16 @@
14751592 case 0:
14761593 if (res->lrs_present)
14771594 res_stateid = &res->stateid;
1478
- /* Fallthrough */
1595
+ fallthrough;
14791596 default:
14801597 arg_stateid = &args->stateid;
14811598 }
1599
+ trace_nfs4_layoutreturn_on_close(args->inode, &args->stateid, ret);
14821600 pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
14831601 res_stateid);
14841602 if (ld_private && ld_private->ops && ld_private->ops->free)
14851603 ld_private->ops->free(ld_private);
14861604 pnfs_put_layout_hdr(lo);
1487
- trace_nfs4_layoutreturn_on_close(args->inode, 0);
14881605 }
14891606
14901607 bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
....@@ -1621,7 +1738,7 @@
16211738 INIT_LIST_HEAD(&lo->plh_return_segs);
16221739 INIT_LIST_HEAD(&lo->plh_bulk_destroy);
16231740 lo->plh_inode = ino;
1624
- lo->plh_lc_cred = get_rpccred(ctx->cred);
1741
+ lo->plh_lc_cred = get_cred(ctx->cred);
16251742 lo->plh_flags |= 1 << NFS_LAYOUT_INVALID_STID;
16261743 return lo;
16271744 }
....@@ -1807,8 +1924,14 @@
18071924
18081925 static void nfs_layoutget_end(struct pnfs_layout_hdr *lo)
18091926 {
1810
- if (atomic_dec_and_test(&lo->plh_outstanding))
1811
- wake_up_var(&lo->plh_outstanding);
1927
+ if (atomic_dec_and_test(&lo->plh_outstanding) &&
1928
+ test_and_clear_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags))
1929
+ wake_up_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN);
1930
+}
1931
+
1932
+static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo)
1933
+{
1934
+ return test_bit(NFS_LAYOUT_FIRST_LAYOUTGET, &lo->plh_flags);
18121935 }
18131936
18141937 static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
....@@ -1823,15 +1946,14 @@
18231946 static void _add_to_server_list(struct pnfs_layout_hdr *lo,
18241947 struct nfs_server *server)
18251948 {
1826
- if (list_empty(&lo->plh_layouts)) {
1949
+ if (!test_and_set_bit(NFS_LAYOUT_HASHED, &lo->plh_flags)) {
18271950 struct nfs_client *clp = server->nfs_client;
18281951
18291952 /* The lo must be on the clp list if there is any
18301953 * chance of a CB_LAYOUTRECALL(FILE) coming in.
18311954 */
18321955 spin_lock(&clp->cl_lock);
1833
- if (list_empty(&lo->plh_layouts))
1834
- list_add_tail(&lo->plh_layouts, &server->layouts);
1956
+ list_add_tail_rcu(&lo->plh_layouts, &server->layouts);
18351957 spin_unlock(&clp->cl_lock);
18361958 }
18371959 }
....@@ -1886,6 +2008,7 @@
18862008 lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
18872009 if (lo == NULL) {
18882010 spin_unlock(&ino->i_lock);
2011
+ lseg = ERR_PTR(-ENOMEM);
18892012 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
18902013 PNFS_UPDATE_LAYOUT_NOMEM);
18912014 goto out;
....@@ -1910,15 +2033,36 @@
19102033 * If the layout segment list is empty, but there are outstanding
19112034 * layoutget calls, then they might be subject to a layoutrecall.
19122035 */
1913
- if (list_empty(&lo->plh_segs) &&
2036
+ if (test_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags) &&
19142037 atomic_read(&lo->plh_outstanding) != 0) {
19152038 spin_unlock(&ino->i_lock);
1916
- lseg = ERR_PTR(wait_var_event_killable(&lo->plh_outstanding,
1917
- !atomic_read(&lo->plh_outstanding)));
2039
+ lseg = ERR_PTR(wait_on_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN,
2040
+ TASK_KILLABLE));
19182041 if (IS_ERR(lseg))
19192042 goto out_put_layout_hdr;
19202043 pnfs_put_layout_hdr(lo);
19212044 goto lookup_again;
2045
+ }
2046
+
2047
+ /*
2048
+ * Because we free lsegs when sending LAYOUTRETURN, we need to wait
2049
+ * for LAYOUTRETURN.
2050
+ */
2051
+ if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
2052
+ spin_unlock(&ino->i_lock);
2053
+ dprintk("%s wait for layoutreturn\n", __func__);
2054
+ lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
2055
+ if (!IS_ERR(lseg)) {
2056
+ pnfs_put_layout_hdr(lo);
2057
+ dprintk("%s retrying\n", __func__);
2058
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo,
2059
+ lseg,
2060
+ PNFS_UPDATE_LAYOUT_RETRY);
2061
+ goto lookup_again;
2062
+ }
2063
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
2064
+ PNFS_UPDATE_LAYOUT_RETURN);
2065
+ goto out_put_layout_hdr;
19222066 }
19232067
19242068 lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
....@@ -1928,18 +2072,13 @@
19282072 goto out_unlock;
19292073 }
19302074
1931
- if (!nfs4_valid_open_stateid(ctx->state)) {
1932
- trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1933
- PNFS_UPDATE_LAYOUT_INVALID_OPEN);
1934
- goto out_unlock;
1935
- }
1936
-
19372075 /*
19382076 * Choose a stateid for the LAYOUTGET. If we don't have a layout
19392077 * stateid, or it has been invalidated, then we must use the open
19402078 * stateid.
19412079 */
19422080 if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
2081
+ int status;
19432082
19442083 /*
19452084 * The first layoutget for the file. Need to serialize per
....@@ -1958,39 +2097,24 @@
19582097 goto lookup_again;
19592098 }
19602099
2100
+ spin_unlock(&ino->i_lock);
19612101 first = true;
1962
- if (nfs4_select_rw_stateid(ctx->state,
2102
+ status = nfs4_select_rw_stateid(ctx->state,
19632103 iomode == IOMODE_RW ? FMODE_WRITE : FMODE_READ,
1964
- NULL, &stateid, NULL) != 0) {
2104
+ NULL, &stateid, NULL);
2105
+ if (status != 0) {
2106
+ lseg = ERR_PTR(status);
19652107 trace_pnfs_update_layout(ino, pos, count,
19662108 iomode, lo, lseg,
19672109 PNFS_UPDATE_LAYOUT_INVALID_OPEN);
1968
- goto out_unlock;
1969
- }
1970
- } else {
1971
- nfs4_stateid_copy(&stateid, &lo->plh_stateid);
1972
- }
1973
-
1974
- /*
1975
- * Because we free lsegs before sending LAYOUTRETURN, we need to wait
1976
- * for LAYOUTRETURN even if first is true.
1977
- */
1978
- if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1979
- spin_unlock(&ino->i_lock);
1980
- dprintk("%s wait for layoutreturn\n", __func__);
1981
- lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
1982
- if (!IS_ERR(lseg)) {
1983
- if (first)
1984
- pnfs_clear_first_layoutget(lo);
2110
+ nfs4_schedule_stateid_recovery(server, ctx->state);
2111
+ pnfs_clear_first_layoutget(lo);
19852112 pnfs_put_layout_hdr(lo);
1986
- dprintk("%s retrying\n", __func__);
1987
- trace_pnfs_update_layout(ino, pos, count, iomode, lo,
1988
- lseg, PNFS_UPDATE_LAYOUT_RETRY);
19892113 goto lookup_again;
19902114 }
1991
- trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1992
- PNFS_UPDATE_LAYOUT_RETURN);
1993
- goto out_put_layout_hdr;
2115
+ spin_lock(&ino->i_lock);
2116
+ } else {
2117
+ nfs4_stateid_copy(&stateid, &lo->plh_stateid);
19942118 }
19952119
19962120 if (pnfs_layoutgets_blocked(lo)) {
....@@ -2013,6 +2137,7 @@
20132137
20142138 lgp = pnfs_alloc_init_layoutget_args(ino, ctx, &stateid, &arg, gfp_flags);
20152139 if (!lgp) {
2140
+ lseg = ERR_PTR(-ENOMEM);
20162141 trace_pnfs_update_layout(ino, pos, count, iomode, lo, NULL,
20172142 PNFS_UPDATE_LAYOUT_NOMEM);
20182143 nfs_layoutget_end(lo);
....@@ -2032,6 +2157,12 @@
20322157 case -ERECALLCONFLICT:
20332158 case -EAGAIN:
20342159 break;
2160
+ case -ENODATA:
2161
+ /* The server returned NFS4ERR_LAYOUTUNAVAILABLE */
2162
+ pnfs_layout_set_fail_bit(
2163
+ lo, pnfs_iomode_to_fail_bit(iomode));
2164
+ lseg = NULL;
2165
+ goto out_put_layout_hdr;
20352166 default:
20362167 if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
20372168 pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
....@@ -2054,6 +2185,8 @@
20542185 out_put_layout_hdr:
20552186 if (first)
20562187 pnfs_clear_first_layoutget(lo);
2188
+ trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
2189
+ PNFS_UPDATE_LAYOUT_EXIT);
20572190 pnfs_put_layout_hdr(lo);
20582191 out:
20592192 dprintk("%s: inode %s/%llu pNFS layout segment %s for "
....@@ -2118,8 +2251,6 @@
21182251 pnfs_put_layout_hdr(lo);
21192252 return NULL;
21202253 }
2121
-
2122
-extern const nfs4_stateid current_stateid;
21232254
21242255 static void _lgopen_prepare_attached(struct nfs4_opendata *data,
21252256 struct nfs_open_context *ctx)
....@@ -2285,17 +2416,20 @@
22852416 goto out_forget;
22862417 }
22872418
2288
- if (!pnfs_layout_is_valid(lo)) {
2289
- /* We have a completely new layout */
2290
- pnfs_set_layout_stateid(lo, &res->stateid, true);
2291
- } else if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
2419
+ if (test_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags) &&
2420
+ !pnfs_is_first_layoutget(lo))
2421
+ goto out_forget;
2422
+
2423
+ if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
22922424 /* existing state ID, make sure the sequence number matches. */
22932425 if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
2426
+ if (!pnfs_layout_is_valid(lo))
2427
+ lo->plh_barrier = 0;
22942428 dprintk("%s forget reply due to sequence\n", __func__);
22952429 goto out_forget;
22962430 }
2297
- pnfs_set_layout_stateid(lo, &res->stateid, false);
2298
- } else {
2431
+ pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, false);
2432
+ } else if (pnfs_layout_is_valid(lo)) {
22992433 /*
23002434 * We got an entirely new state ID. Mark all segments for the
23012435 * inode invalid, and retry the layoutget
....@@ -2308,6 +2442,9 @@
23082442 pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs,
23092443 &range, 0);
23102444 goto out_forget;
2445
+ } else {
2446
+ /* We have a completely new layout */
2447
+ pnfs_set_layout_stateid(lo, &res->stateid, lgp->cred, true);
23112448 }
23122449
23132450 pnfs_get_lseg(lseg);
....@@ -2389,42 +2526,158 @@
23892526 return -ENOENT;
23902527 }
23912528
2392
-void pnfs_error_mark_layout_for_return(struct inode *inode,
2393
- struct pnfs_layout_segment *lseg)
2529
+static void
2530
+pnfs_mark_layout_for_return(struct inode *inode,
2531
+ const struct pnfs_layout_range *range)
23942532 {
2395
- struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
2396
- struct pnfs_layout_range range = {
2397
- .iomode = lseg->pls_range.iomode,
2398
- .offset = 0,
2399
- .length = NFS4_MAX_UINT64,
2400
- };
2533
+ struct pnfs_layout_hdr *lo;
24012534 bool return_now = false;
24022535
24032536 spin_lock(&inode->i_lock);
2537
+ lo = NFS_I(inode)->layout;
24042538 if (!pnfs_layout_is_valid(lo)) {
24052539 spin_unlock(&inode->i_lock);
24062540 return;
24072541 }
2408
- pnfs_set_plh_return_info(lo, range.iomode, 0);
2542
+ pnfs_set_plh_return_info(lo, range->iomode, 0);
24092543 /*
24102544 * mark all matching lsegs so that we are sure to have no live
24112545 * segments at hand when sending layoutreturn. See pnfs_put_lseg()
24122546 * for how it works.
24132547 */
2414
- if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, &range, 0) != -EBUSY) {
2548
+ if (pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, range, 0) != -EBUSY) {
2549
+ const struct cred *cred;
24152550 nfs4_stateid stateid;
24162551 enum pnfs_iomode iomode;
24172552
2418
- return_now = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
2553
+ return_now = pnfs_prepare_layoutreturn(lo, &stateid, &cred, &iomode);
24192554 spin_unlock(&inode->i_lock);
24202555 if (return_now)
2421
- pnfs_send_layoutreturn(lo, &stateid, iomode, false);
2556
+ pnfs_send_layoutreturn(lo, &stateid, &cred, iomode, false);
24222557 } else {
24232558 spin_unlock(&inode->i_lock);
24242559 nfs_commit_inode(inode, 0);
24252560 }
24262561 }
2562
+
2563
+void pnfs_error_mark_layout_for_return(struct inode *inode,
2564
+ struct pnfs_layout_segment *lseg)
2565
+{
2566
+ struct pnfs_layout_range range = {
2567
+ .iomode = lseg->pls_range.iomode,
2568
+ .offset = 0,
2569
+ .length = NFS4_MAX_UINT64,
2570
+ };
2571
+
2572
+ pnfs_mark_layout_for_return(inode, &range);
2573
+}
24272574 EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);
2575
+
2576
+static bool
2577
+pnfs_layout_can_be_returned(struct pnfs_layout_hdr *lo)
2578
+{
2579
+ return pnfs_layout_is_valid(lo) &&
2580
+ !test_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags) &&
2581
+ !test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
2582
+}
2583
+
2584
+static struct pnfs_layout_segment *
2585
+pnfs_find_first_lseg(struct pnfs_layout_hdr *lo,
2586
+ const struct pnfs_layout_range *range,
2587
+ enum pnfs_iomode iomode)
2588
+{
2589
+ struct pnfs_layout_segment *lseg;
2590
+
2591
+ list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
2592
+ if (!test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
2593
+ continue;
2594
+ if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
2595
+ continue;
2596
+ if (lseg->pls_range.iomode != iomode && iomode != IOMODE_ANY)
2597
+ continue;
2598
+ if (pnfs_lseg_range_intersecting(&lseg->pls_range, range))
2599
+ return lseg;
2600
+ }
2601
+ return NULL;
2602
+}
2603
+
2604
+/* Find open file states whose mode matches that of the range */
2605
+static bool
2606
+pnfs_should_return_unused_layout(struct pnfs_layout_hdr *lo,
2607
+ const struct pnfs_layout_range *range)
2608
+{
2609
+ struct list_head *head;
2610
+ struct nfs_open_context *ctx;
2611
+ fmode_t mode = 0;
2612
+
2613
+ if (!pnfs_layout_can_be_returned(lo) ||
2614
+ !pnfs_find_first_lseg(lo, range, range->iomode))
2615
+ return false;
2616
+
2617
+ head = &NFS_I(lo->plh_inode)->open_files;
2618
+ list_for_each_entry_rcu(ctx, head, list) {
2619
+ if (ctx->state)
2620
+ mode |= ctx->state->state & (FMODE_READ|FMODE_WRITE);
2621
+ }
2622
+
2623
+ switch (range->iomode) {
2624
+ default:
2625
+ break;
2626
+ case IOMODE_READ:
2627
+ mode &= ~FMODE_WRITE;
2628
+ break;
2629
+ case IOMODE_RW:
2630
+ if (pnfs_find_first_lseg(lo, range, IOMODE_READ))
2631
+ mode &= ~FMODE_READ;
2632
+ }
2633
+ return mode == 0;
2634
+}
2635
+
2636
+static int
2637
+pnfs_layout_return_unused_byserver(struct nfs_server *server, void *data)
2638
+{
2639
+ const struct pnfs_layout_range *range = data;
2640
+ struct pnfs_layout_hdr *lo;
2641
+ struct inode *inode;
2642
+restart:
2643
+ rcu_read_lock();
2644
+ list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
2645
+ if (!pnfs_layout_can_be_returned(lo) ||
2646
+ test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
2647
+ continue;
2648
+ inode = lo->plh_inode;
2649
+ spin_lock(&inode->i_lock);
2650
+ if (!pnfs_should_return_unused_layout(lo, range)) {
2651
+ spin_unlock(&inode->i_lock);
2652
+ continue;
2653
+ }
2654
+ spin_unlock(&inode->i_lock);
2655
+ inode = pnfs_grab_inode_layout_hdr(lo);
2656
+ if (!inode)
2657
+ continue;
2658
+ rcu_read_unlock();
2659
+ pnfs_mark_layout_for_return(inode, range);
2660
+ iput(inode);
2661
+ cond_resched();
2662
+ goto restart;
2663
+ }
2664
+ rcu_read_unlock();
2665
+ return 0;
2666
+}
2667
+
2668
+void
2669
+pnfs_layout_return_unused_byclid(struct nfs_client *clp,
2670
+ enum pnfs_iomode iomode)
2671
+{
2672
+ struct pnfs_layout_range range = {
2673
+ .iomode = iomode,
2674
+ .offset = 0,
2675
+ .length = NFS4_MAX_UINT64,
2676
+ };
2677
+
2678
+ nfs_client_for_each_server(clp, pnfs_layout_return_unused_byserver,
2679
+ &range);
2680
+}
24282681
24292682 void
24302683 pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio)
....@@ -2441,7 +2694,7 @@
24412694 * Check for any intersection between the request and the pgio->pg_lseg,
24422695 * and if none, put this pgio->pg_lseg away.
24432696 */
2444
-static void
2697
+void
24452698 pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
24462699 {
24472700 if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) {
....@@ -2449,6 +2702,7 @@
24492702 pgio->pg_lseg = NULL;
24502703 }
24512704 }
2705
+EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_range);
24522706
24532707 void
24542708 pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
....@@ -2464,7 +2718,7 @@
24642718 rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
24652719
24662720 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
2467
- req->wb_context,
2721
+ nfs_req_openctx(req),
24682722 req_offset(req),
24692723 rd_size,
24702724 IOMODE_READ,
....@@ -2491,12 +2745,12 @@
24912745 pnfs_generic_pg_check_range(pgio, req);
24922746 if (pgio->pg_lseg == NULL) {
24932747 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
2494
- req->wb_context,
2748
+ nfs_req_openctx(req),
24952749 req_offset(req),
24962750 wb_size,
24972751 IOMODE_RW,
24982752 false,
2499
- GFP_NOFS);
2753
+ GFP_KERNEL);
25002754 if (IS_ERR(pgio->pg_lseg)) {
25012755 pgio->pg_error = PTR_ERR(pgio->pg_lseg);
25022756 pgio->pg_lseg = NULL;
....@@ -2768,7 +3022,8 @@
27683022 }
27693023
27703024 /* Resend all requests through pnfs. */
2771
-void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
3025
+void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr,
3026
+ unsigned int mirror_idx)
27723027 {
27733028 struct nfs_pageio_descriptor pgio;
27743029
....@@ -2779,6 +3034,7 @@
27793034
27803035 nfs_pageio_init_read(&pgio, hdr->inode, false,
27813036 hdr->completion_ops);
3037
+ pgio.pg_mirror_idx = mirror_idx;
27823038 hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
27833039 }
27843040 }
....@@ -2966,10 +3222,10 @@
29663222 end_pos = nfsi->layout->plh_lwb;
29673223
29683224 nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
3225
+ data->cred = get_cred(nfsi->layout->plh_lc_cred);
29693226 spin_unlock(&inode->i_lock);
29703227
29713228 data->args.inode = inode;
2972
- data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
29733229 nfs_fattr_init(&data->fattr);
29743230 data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
29753231 data->res.fattr = &data->fattr;
....@@ -2982,7 +3238,7 @@
29823238 if (ld->prepare_layoutcommit) {
29833239 status = ld->prepare_layoutcommit(&data->args);
29843240 if (status) {
2985
- put_rpccred(data->cred);
3241
+ put_cred(data->cred);
29863242 spin_lock(&inode->i_lock);
29873243 set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
29883244 if (end_pos > nfsi->layout->plh_lwb)