forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/fs/nfs/direct.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * linux/fs/nfs/direct.c
34 *
....@@ -63,13 +64,6 @@
6364
6465 static struct kmem_cache *nfs_direct_cachep;
6566
66
-/*
67
- * This represents a set of asynchronous requests that we're waiting on
68
- */
69
-struct nfs_direct_mirror {
70
- ssize_t count;
71
-};
72
-
7367 struct nfs_direct_req {
7468 struct kref kref; /* release manager */
7569
....@@ -82,9 +76,6 @@
8276 /* completion state */
8377 atomic_t io_count; /* i/os we're waiting for */
8478 spinlock_t lock; /* protect completion state */
85
-
86
- struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
87
- int mirror_count;
8879
8980 loff_t io_start; /* Start offset for I/O */
9081 ssize_t count, /* bytes actually processed */
....@@ -103,7 +94,7 @@
10394 #define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
10495 /* for read */
10596 #define NFS_ODIRECT_SHOULD_DIRTY (3) /* dirty user-space page after read */
106
- struct nfs_writeverf verf; /* unstable write verifier */
97
+#define NFS_ODIRECT_DONE INT_MAX /* write verification failed */
10798 };
10899
109100 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
....@@ -126,8 +117,6 @@
126117 const struct nfs_pgio_header *hdr,
127118 ssize_t dreq_len)
128119 {
129
- struct nfs_direct_mirror *mirror = &dreq->mirrors[hdr->pgio_mirror_idx];
130
-
131120 if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
132121 test_bit(NFS_IOHDR_EOF, &hdr->flags)))
133122 return;
....@@ -141,15 +130,12 @@
141130 else /* Clear outstanding error if this is EOF */
142131 dreq->error = 0;
143132 }
144
- if (mirror->count > dreq_len)
145
- mirror->count = dreq_len;
146133 }
147134
148135 static void
149136 nfs_direct_count_bytes(struct nfs_direct_req *dreq,
150137 const struct nfs_pgio_header *hdr)
151138 {
152
- struct nfs_direct_mirror *mirror = &dreq->mirrors[hdr->pgio_mirror_idx];
153139 loff_t hdr_end = hdr->io_start + hdr->good_bytes;
154140 ssize_t dreq_len = 0;
155141
....@@ -161,110 +147,8 @@
161147 if (dreq_len > dreq->max_count)
162148 dreq_len = dreq->max_count;
163149
164
- if (mirror->count < dreq_len)
165
- mirror->count = dreq_len;
166150 if (dreq->count < dreq_len)
167151 dreq->count = dreq_len;
168
-}
169
-
170
-/*
171
- * nfs_direct_select_verf - select the right verifier
172
- * @dreq - direct request possibly spanning multiple servers
173
- * @ds_clp - nfs_client of data server or NULL if MDS / non-pnfs
174
- * @commit_idx - commit bucket index for the DS
175
- *
176
- * returns the correct verifier to use given the role of the server
177
- */
178
-static struct nfs_writeverf *
179
-nfs_direct_select_verf(struct nfs_direct_req *dreq,
180
- struct nfs_client *ds_clp,
181
- int commit_idx)
182
-{
183
- struct nfs_writeverf *verfp = &dreq->verf;
184
-
185
-#ifdef CONFIG_NFS_V4_1
186
- /*
187
- * pNFS is in use, use the DS verf except commit_through_mds is set
188
- * for layout segment where nbuckets is zero.
189
- */
190
- if (ds_clp && dreq->ds_cinfo.nbuckets > 0) {
191
- if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets)
192
- verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf;
193
- else
194
- WARN_ON_ONCE(1);
195
- }
196
-#endif
197
- return verfp;
198
-}
199
-
200
-
201
-/*
202
- * nfs_direct_set_hdr_verf - set the write/commit verifier
203
- * @dreq - direct request possibly spanning multiple servers
204
- * @hdr - pageio header to validate against previously seen verfs
205
- *
206
- * Set the server's (MDS or DS) "seen" verifier
207
- */
208
-static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq,
209
- struct nfs_pgio_header *hdr)
210
-{
211
- struct nfs_writeverf *verfp;
212
-
213
- verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
214
- WARN_ON_ONCE(verfp->committed >= 0);
215
- memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
216
- WARN_ON_ONCE(verfp->committed < 0);
217
-}
218
-
219
-static int nfs_direct_cmp_verf(const struct nfs_writeverf *v1,
220
- const struct nfs_writeverf *v2)
221
-{
222
- return nfs_write_verifier_cmp(&v1->verifier, &v2->verifier);
223
-}
224
-
225
-/*
226
- * nfs_direct_cmp_hdr_verf - compare verifier for pgio header
227
- * @dreq - direct request possibly spanning multiple servers
228
- * @hdr - pageio header to validate against previously seen verf
229
- *
230
- * set the server's "seen" verf if not initialized.
231
- * returns result of comparison between @hdr->verf and the "seen"
232
- * verf of the server used by @hdr (DS or MDS)
233
- */
234
-static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq,
235
- struct nfs_pgio_header *hdr)
236
-{
237
- struct nfs_writeverf *verfp;
238
-
239
- verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
240
- if (verfp->committed < 0) {
241
- nfs_direct_set_hdr_verf(dreq, hdr);
242
- return 0;
243
- }
244
- return nfs_direct_cmp_verf(verfp, &hdr->verf);
245
-}
246
-
247
-/*
248
- * nfs_direct_cmp_commit_data_verf - compare verifier for commit data
249
- * @dreq - direct request possibly spanning multiple servers
250
- * @data - commit data to validate against previously seen verf
251
- *
252
- * returns result of comparison between @data->verf and the verf of
253
- * the server used by @data (DS or MDS)
254
- */
255
-static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
256
- struct nfs_commit_data *data)
257
-{
258
- struct nfs_writeverf *verfp;
259
-
260
- verfp = nfs_direct_select_verf(dreq, data->ds_clp,
261
- data->ds_commit_index);
262
-
263
- /* verifier not set so always fail */
264
- if (verfp->committed < 0 || data->res.verf->committed <= NFS_UNSTABLE)
265
- return 1;
266
-
267
- return nfs_direct_cmp_verf(verfp, data->res.verf);
268152 }
269153
270154 /**
....@@ -288,8 +172,8 @@
288172 VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
289173
290174 if (iov_iter_rw(iter) == READ)
291
- return nfs_file_direct_read(iocb, iter);
292
- return nfs_file_direct_write(iocb, iter);
175
+ return nfs_file_direct_read(iocb, iter, true);
176
+ return nfs_file_direct_write(iocb, iter, true);
293177 }
294178
295179 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
....@@ -309,18 +193,6 @@
309193 cinfo->completion_ops = &nfs_direct_commit_completion_ops;
310194 }
311195
312
-static inline void nfs_direct_setup_mirroring(struct nfs_direct_req *dreq,
313
- struct nfs_pageio_descriptor *pgio,
314
- struct nfs_page *req)
315
-{
316
- int mirror_count = 1;
317
-
318
- if (pgio->pg_ops->pg_get_mirror_count)
319
- mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
320
-
321
- dreq->mirror_count = mirror_count;
322
-}
323
-
324196 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
325197 {
326198 struct nfs_direct_req *dreq;
....@@ -333,9 +205,8 @@
333205 kref_get(&dreq->kref);
334206 init_completion(&dreq->completion);
335207 INIT_LIST_HEAD(&dreq->mds_cinfo.list);
336
- dreq->verf.committed = NFS_INVALID_STABLE_HOW; /* not set yet */
208
+ pnfs_init_ds_commit_info(&dreq->ds_cinfo);
337209 INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
338
- dreq->mirror_count = 1;
339210 spin_lock_init(&dreq->lock);
340211
341212 return dreq;
....@@ -345,7 +216,7 @@
345216 {
346217 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
347218
348
- nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo);
219
+ pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode);
349220 if (dreq->l_ctx != NULL)
350221 nfs_put_lock_context(dreq->l_ctx);
351222 if (dreq->ctx != NULL)
....@@ -507,7 +378,7 @@
507378 struct nfs_page *req;
508379 unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
509380 /* XXX do we need to do the eof zeroing found in async_filler? */
510
- req = nfs_create_request(dreq->ctx, pagevec[i], NULL,
381
+ req = nfs_create_request(dreq->ctx, pagevec[i],
511382 pgbase, req_len);
512383 if (IS_ERR(req)) {
513384 result = PTR_ERR(req);
....@@ -553,6 +424,7 @@
553424 * nfs_file_direct_read - file direct read operation for NFS files
554425 * @iocb: target I/O control block
555426 * @iter: vector of user buffers into which to read data
427
+ * @swap: flag indicating this is swap IO, not O_DIRECT IO
556428 *
557429 * We use this function for direct reads instead of calling
558430 * generic_file_aio_read() in order to avoid gfar's check to see if
....@@ -568,14 +440,15 @@
568440 * client must read the updated atime from the server back into its
569441 * cache.
570442 */
571
-ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
443
+ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
444
+ bool swap)
572445 {
573446 struct file *file = iocb->ki_filp;
574447 struct address_space *mapping = file->f_mapping;
575448 struct inode *inode = mapping->host;
576449 struct nfs_direct_req *dreq;
577450 struct nfs_lock_context *l_ctx;
578
- ssize_t result = -EINVAL, requested;
451
+ ssize_t result, requested;
579452 size_t count = iov_iter_count(iter);
580453 nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
581454
....@@ -610,12 +483,14 @@
610483 if (iter_is_iovec(iter))
611484 dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
612485
613
- nfs_start_io_direct(inode);
486
+ if (!swap)
487
+ nfs_start_io_direct(inode);
614488
615489 NFS_I(inode)->read_io += count;
616490 requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
617491
618
- nfs_end_io_direct(inode);
492
+ if (!swap)
493
+ nfs_end_io_direct(inode);
619494
620495 if (requested > 0) {
621496 result = nfs_direct_wait(dreq);
....@@ -635,15 +510,30 @@
635510 }
636511
637512 static void
513
+nfs_direct_join_group(struct list_head *list, struct inode *inode)
514
+{
515
+ struct nfs_page *req, *next;
516
+
517
+ list_for_each_entry(req, list, wb_list) {
518
+ if (req->wb_head != req || req->wb_this_page == req)
519
+ continue;
520
+ for (next = req->wb_this_page;
521
+ next != req->wb_head;
522
+ next = next->wb_this_page) {
523
+ nfs_list_remove_request(next);
524
+ nfs_release_request(next);
525
+ }
526
+ nfs_join_page_group(req, inode);
527
+ }
528
+}
529
+
530
+static void
638531 nfs_direct_write_scan_commit_list(struct inode *inode,
639532 struct list_head *list,
640533 struct nfs_commit_info *cinfo)
641534 {
642535 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
643
-#ifdef CONFIG_NFS_V4_1
644
- if (cinfo->ds != NULL && cinfo->ds->nwritten != 0)
645
- NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
646
-#endif
536
+ pnfs_recover_commit_reqs(list, cinfo);
647537 nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
648538 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
649539 }
....@@ -655,33 +545,26 @@
655545 LIST_HEAD(reqs);
656546 struct nfs_commit_info cinfo;
657547 LIST_HEAD(failed);
658
- int i;
659548
660549 nfs_init_cinfo_from_dreq(&cinfo, dreq);
661550 nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
551
+
552
+ nfs_direct_join_group(&reqs, dreq->inode);
662553
663554 dreq->count = 0;
664555 dreq->max_count = 0;
665556 list_for_each_entry(req, &reqs, wb_list)
666557 dreq->max_count += req->wb_bytes;
667
- dreq->verf.committed = NFS_INVALID_STABLE_HOW;
668558 nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
669
- for (i = 0; i < dreq->mirror_count; i++)
670
- dreq->mirrors[i].count = 0;
671559 get_dreq(dreq);
672560
673561 nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
674562 &nfs_direct_write_completion_ops);
675563 desc.pg_dreq = dreq;
676564
677
- req = nfs_list_entry(reqs.next);
678
- nfs_direct_setup_mirroring(dreq, &desc, req);
679
- if (desc.pg_error < 0) {
680
- list_splice_init(&reqs, &failed);
681
- goto out_failed;
682
- }
683
-
684565 list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
566
+ /* Bump the transmission count */
567
+ req->wb_nio++;
685568 if (!nfs_pageio_add_request(&desc, req)) {
686569 nfs_list_move_request(req, &failed);
687570 spin_lock(&cinfo.inode->i_lock);
....@@ -696,7 +579,6 @@
696579 }
697580 nfs_pageio_complete(&desc);
698581
699
-out_failed:
700582 while (!list_empty(&failed)) {
701583 req = nfs_list_entry(failed.next);
702584 nfs_list_remove_request(req);
....@@ -709,27 +591,40 @@
709591
710592 static void nfs_direct_commit_complete(struct nfs_commit_data *data)
711593 {
594
+ const struct nfs_writeverf *verf = data->res.verf;
712595 struct nfs_direct_req *dreq = data->dreq;
713596 struct nfs_commit_info cinfo;
714597 struct nfs_page *req;
715598 int status = data->task.tk_status;
716599
600
+ if (status < 0) {
601
+ /* Errors in commit are fatal */
602
+ dreq->error = status;
603
+ dreq->max_count = 0;
604
+ dreq->count = 0;
605
+ dreq->flags = NFS_ODIRECT_DONE;
606
+ } else if (dreq->flags == NFS_ODIRECT_DONE)
607
+ status = dreq->error;
608
+
717609 nfs_init_cinfo_from_dreq(&cinfo, dreq);
718
- if (status < 0 || nfs_direct_cmp_commit_data_verf(dreq, data))
719
- dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
720610
721611 while (!list_empty(&data->pages)) {
722612 req = nfs_list_entry(data->pages.next);
723613 nfs_list_remove_request(req);
724
- if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
725
- /* Note the rewrite will go through mds */
614
+ if (status >= 0 && !nfs_write_match_verf(verf, req)) {
615
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
616
+ /*
617
+ * Despite the reboot, the write was successful,
618
+ * so reset wb_nio.
619
+ */
620
+ req->wb_nio = 0;
726621 nfs_mark_request_commit(req, NULL, &cinfo, 0);
727
- } else
622
+ } else /* Error or match */
728623 nfs_release_request(req);
729624 nfs_unlock_and_release_request(req);
730625 }
731626
732
- if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
627
+ if (nfs_commit_end(cinfo.mds))
733628 nfs_direct_write_complete(dreq);
734629 }
735630
....@@ -739,7 +634,8 @@
739634 struct nfs_direct_req *dreq = cinfo->dreq;
740635
741636 spin_lock(&dreq->lock);
742
- dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
637
+ if (dreq->flags != NFS_ODIRECT_DONE)
638
+ dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
743639 spin_unlock(&dreq->lock);
744640 nfs_mark_request_commit(req, NULL, cinfo, 0);
745641 }
....@@ -762,6 +658,23 @@
762658 nfs_direct_write_reschedule(dreq);
763659 }
764660
661
+static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
662
+{
663
+ struct nfs_commit_info cinfo;
664
+ struct nfs_page *req;
665
+ LIST_HEAD(reqs);
666
+
667
+ nfs_init_cinfo_from_dreq(&cinfo, dreq);
668
+ nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
669
+
670
+ while (!list_empty(&reqs)) {
671
+ req = nfs_list_entry(reqs.next);
672
+ nfs_list_remove_request(req);
673
+ nfs_release_request(req);
674
+ nfs_unlock_and_release_request(req);
675
+ }
676
+}
677
+
765678 static void nfs_direct_write_schedule_work(struct work_struct *work)
766679 {
767680 struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
....@@ -776,6 +689,7 @@
776689 nfs_direct_write_reschedule(dreq);
777690 break;
778691 default:
692
+ nfs_direct_write_clear_reqs(dreq);
779693 nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
780694 nfs_direct_complete(dreq);
781695 }
....@@ -790,8 +704,8 @@
790704 {
791705 struct nfs_direct_req *dreq = hdr->dreq;
792706 struct nfs_commit_info cinfo;
793
- bool request_commit = false;
794707 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
708
+ int flags = NFS_ODIRECT_DONE;
795709
796710 nfs_init_cinfo_from_dreq(&cinfo, dreq);
797711
....@@ -802,21 +716,10 @@
802716 }
803717
804718 nfs_direct_count_bytes(dreq, hdr);
805
- if (hdr->good_bytes != 0) {
806
- if (nfs_write_need_commit(hdr)) {
807
- if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
808
- request_commit = true;
809
- else if (dreq->flags == 0) {
810
- nfs_direct_set_hdr_verf(dreq, hdr);
811
- request_commit = true;
812
- dreq->flags = NFS_ODIRECT_DO_COMMIT;
813
- } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
814
- request_commit = true;
815
- if (nfs_direct_set_or_cmp_hdr_verf(dreq, hdr))
816
- dreq->flags =
817
- NFS_ODIRECT_RESCHED_WRITES;
818
- }
819
- }
719
+ if (hdr->good_bytes != 0 && nfs_write_need_commit(hdr)) {
720
+ if (!dreq->flags)
721
+ dreq->flags = NFS_ODIRECT_DO_COMMIT;
722
+ flags = dreq->flags;
820723 }
821724 spin_unlock(&dreq->lock);
822725
....@@ -824,10 +727,15 @@
824727
825728 req = nfs_list_entry(hdr->pages.next);
826729 nfs_list_remove_request(req);
827
- if (request_commit) {
730
+ if (flags == NFS_ODIRECT_DO_COMMIT) {
828731 kref_get(&req->wb_kref);
732
+ memcpy(&req->wb_verf, &hdr->verf.verifier,
733
+ sizeof(req->wb_verf));
829734 nfs_mark_request_commit(req, hdr->lseg, &cinfo,
830735 hdr->ds_commit_idx);
736
+ } else if (flags == NFS_ODIRECT_RESCHED_WRITES) {
737
+ kref_get(&req->wb_kref);
738
+ nfs_mark_request_commit(req, NULL, &cinfo, 0);
831739 }
832740 nfs_unlock_and_release_request(req);
833741 }
....@@ -858,7 +766,8 @@
858766 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
859767 /* fake unstable write to let common nfs resend pages */
860768 hdr->verf.committed = NFS_UNSTABLE;
861
- hdr->good_bytes = hdr->args.count;
769
+ hdr->good_bytes = hdr->args.offset + hdr->args.count -
770
+ hdr->io_start;
862771 }
863772 spin_unlock(&dreq->lock);
864773 }
....@@ -884,7 +793,7 @@
884793 */
885794 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
886795 struct iov_iter *iter,
887
- loff_t pos)
796
+ loff_t pos, int ioflags)
888797 {
889798 struct nfs_pageio_descriptor desc;
890799 struct inode *inode = dreq->inode;
....@@ -892,7 +801,7 @@
892801 size_t requested_bytes = 0;
893802 size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
894803
895
- nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false,
804
+ nfs_pageio_init_write(&desc, inode, ioflags, false,
896805 &nfs_direct_write_completion_ops);
897806 desc.pg_dreq = dreq;
898807 get_dreq(dreq);
....@@ -917,14 +826,13 @@
917826 struct nfs_page *req;
918827 unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
919828
920
- req = nfs_create_request(dreq->ctx, pagevec[i], NULL,
829
+ req = nfs_create_request(dreq->ctx, pagevec[i],
921830 pgbase, req_len);
922831 if (IS_ERR(req)) {
923832 result = PTR_ERR(req);
924833 break;
925834 }
926835
927
- nfs_direct_setup_mirroring(dreq, &desc, req);
928836 if (desc.pg_error < 0) {
929837 nfs_free_request(req);
930838 result = desc.pg_error;
....@@ -971,6 +879,7 @@
971879 * nfs_file_direct_write - file direct write operation for NFS files
972880 * @iocb: target I/O control block
973881 * @iter: vector of user buffers from which to write data
882
+ * @swap: flag indicating this is swap IO, not O_DIRECT IO
974883 *
975884 * We use this function for direct writes instead of calling
976885 * generic_file_aio_write() in order to avoid taking the inode
....@@ -987,9 +896,10 @@
987896 * Note that O_APPEND is not supported for NFS direct writes, as there
988897 * is no atomic O_APPEND write facility in the NFS protocol.
989898 */
990
-ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
899
+ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
900
+ bool swap)
991901 {
992
- ssize_t result = -EINVAL, requested;
902
+ ssize_t result, requested;
993903 size_t count;
994904 struct file *file = iocb->ki_filp;
995905 struct address_space *mapping = file->f_mapping;
....@@ -1001,7 +911,11 @@
1001911 dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
1002912 file, iov_iter_count(iter), (long long) iocb->ki_pos);
1003913
1004
- result = generic_write_checks(iocb, iter);
914
+ if (swap)
915
+ /* bypass generic checks */
916
+ result = iov_iter_count(iter);
917
+ else
918
+ result = generic_write_checks(iocb, iter);
1005919 if (result <= 0)
1006920 return result;
1007921 count = result;
....@@ -1030,17 +944,24 @@
1030944 dreq->l_ctx = l_ctx;
1031945 if (!is_sync_kiocb(iocb))
1032946 dreq->iocb = iocb;
947
+ pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode);
1033948
1034
- nfs_start_io_direct(inode);
949
+ if (swap) {
950
+ requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
951
+ FLUSH_STABLE);
952
+ } else {
953
+ nfs_start_io_direct(inode);
1035954
1036
- requested = nfs_direct_write_schedule_iovec(dreq, iter, pos);
955
+ requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
956
+ FLUSH_COND_STABLE);
1037957
1038
- if (mapping->nrpages) {
1039
- invalidate_inode_pages2_range(mapping,
1040
- pos >> PAGE_SHIFT, end);
958
+ if (mapping->nrpages) {
959
+ invalidate_inode_pages2_range(mapping,
960
+ pos >> PAGE_SHIFT, end);
961
+ }
962
+
963
+ nfs_end_io_direct(inode);
1041964 }
1042
-
1043
- nfs_end_io_direct(inode);
1044965
1045966 if (requested > 0) {
1046967 result = nfs_direct_wait(dreq);