hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/fs/afs/file.c
....@@ -1,12 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /* AFS filesystem file handling
23 *
34 * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
45 * Written by David Howells (dhowells@redhat.com)
5
- *
6
- * This program is free software; you can redistribute it and/or
7
- * modify it under the terms of the GNU General Public License
8
- * as published by the Free Software Foundation; either version
9
- * 2 of the License, or (at your option) any later version.
106 */
117
128 #include <linux/kernel.h>
....@@ -17,6 +13,7 @@
1713 #include <linux/writeback.h>
1814 #include <linux/gfp.h>
1915 #include <linux/task_io_accounting_ops.h>
16
+#include <linux/mm.h>
2017 #include "internal.h"
2118
2219 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
....@@ -36,6 +33,7 @@
3633 .write_iter = afs_file_write,
3734 .mmap = afs_file_mmap,
3835 .splice_read = generic_file_splice_read,
36
+ .splice_write = iter_file_splice_write,
3937 .fsync = afs_fsync,
4038 .lock = afs_lock,
4139 .flock = afs_flock,
....@@ -45,7 +43,6 @@
4543 .getattr = afs_getattr,
4644 .setattr = afs_setattr,
4745 .permission = afs_permission,
48
- .listxattr = afs_listxattr,
4946 };
5047
5148 const struct address_space_operations afs_fs_aops = {
....@@ -72,7 +69,7 @@
7269 */
7370 void afs_put_wb_key(struct afs_wb_key *wbk)
7471 {
75
- if (refcount_dec_and_test(&wbk->usage)) {
72
+ if (wbk && refcount_dec_and_test(&wbk->usage)) {
7673 key_put(wbk->key);
7774 kfree(wbk);
7875 }
....@@ -121,7 +118,7 @@
121118 struct key *key;
122119 int ret;
123120
124
- _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
121
+ _enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
125122
126123 key = afs_request_key(vnode->volume->cell);
127124 if (IS_ERR(key)) {
....@@ -171,7 +168,7 @@
171168 struct afs_file *af = file->private_data;
172169 int ret = 0;
173170
174
- _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
171
+ _enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
175172
176173 if ((file->f_mode & FMODE_WRITE))
177174 ret = vfs_fsync(file, 0);
....@@ -223,41 +220,52 @@
223220 }
224221 #endif
225222
223
+static void afs_fetch_data_success(struct afs_operation *op)
224
+{
225
+ struct afs_vnode *vnode = op->file[0].vnode;
226
+
227
+ _enter("op=%08x", op->debug_id);
228
+ afs_vnode_commit_status(op, &op->file[0]);
229
+ afs_stat_v(vnode, n_fetches);
230
+ atomic_long_add(op->fetch.req->actual_len, &op->net->n_fetch_bytes);
231
+}
232
+
233
+static void afs_fetch_data_put(struct afs_operation *op)
234
+{
235
+ afs_put_read(op->fetch.req);
236
+}
237
+
238
+static const struct afs_operation_ops afs_fetch_data_operation = {
239
+ .issue_afs_rpc = afs_fs_fetch_data,
240
+ .issue_yfs_rpc = yfs_fs_fetch_data,
241
+ .success = afs_fetch_data_success,
242
+ .aborted = afs_check_for_remote_deletion,
243
+ .put = afs_fetch_data_put,
244
+};
245
+
226246 /*
227247 * Fetch file data from the volume.
228248 */
229
-int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *desc)
249
+int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *req)
230250 {
231
- struct afs_fs_cursor fc;
232
- int ret;
251
+ struct afs_operation *op;
233252
234
- _enter("%s{%x:%u.%u},%x,,,",
253
+ _enter("%s{%llx:%llu.%u},%x,,,",
235254 vnode->volume->name,
236255 vnode->fid.vid,
237256 vnode->fid.vnode,
238257 vnode->fid.unique,
239258 key_serial(key));
240259
241
- ret = -ERESTARTSYS;
242
- if (afs_begin_vnode_operation(&fc, vnode, key)) {
243
- while (afs_select_fileserver(&fc)) {
244
- fc.cb_break = afs_calc_vnode_cb_break(vnode);
245
- afs_fs_fetch_data(&fc, desc);
246
- }
260
+ op = afs_alloc_operation(key, vnode->volume);
261
+ if (IS_ERR(op))
262
+ return PTR_ERR(op);
247263
248
- afs_check_for_remote_deletion(&fc, fc.vnode);
249
- afs_vnode_commit_status(&fc, vnode, fc.cb_break);
250
- ret = afs_end_vnode_operation(&fc);
251
- }
264
+ afs_op_set_vnode(op, 0, vnode);
252265
253
- if (ret == 0) {
254
- afs_stat_v(vnode, n_fetches);
255
- atomic_long_add(desc->actual_len,
256
- &afs_v2net(vnode)->n_fetch_bytes);
257
- }
258
-
259
- _leave(" = %d", ret);
260
- return ret;
266
+ op->fetch.req = afs_get_read(req);
267
+ op->ops = &afs_fetch_data_operation;
268
+ return afs_do_sync_operation(op);
261269 }
262270
263271 /*
....@@ -302,10 +310,11 @@
302310 /* page will not be cached */
303311 case -ENOBUFS:
304312 _debug("cache said ENOBUFS");
313
+
314
+ fallthrough;
305315 default:
306316 go_on:
307
- req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
308
- GFP_KERNEL);
317
+ req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
309318 if (!req)
310319 goto enomem;
311320
....@@ -404,10 +413,10 @@
404413 /*
405414 * Make pages available as they're filled.
406415 */
407
-static void afs_readpages_page_done(struct afs_call *call, struct afs_read *req)
416
+static void afs_readpages_page_done(struct afs_read *req)
408417 {
409418 #ifdef CONFIG_AFS_FSCACHE
410
- struct afs_vnode *vnode = call->reply[0];
419
+ struct afs_vnode *vnode = req->vnode;
411420 #endif
412421 struct page *page = req->pages[req->index];
413422
....@@ -444,7 +453,7 @@
444453 /* Count the number of contiguous pages at the front of the list. Note
445454 * that the list goes prev-wards rather than next-wards.
446455 */
447
- first = list_entry(pages->prev, struct page, lru);
456
+ first = lru_to_page(pages);
448457 index = first->index + 1;
449458 n = 1;
450459 for (p = first->lru.prev; p != pages; p = p->prev) {
....@@ -455,12 +464,12 @@
455464 n++;
456465 }
457466
458
- req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *) * n,
459
- GFP_NOFS);
467
+ req = kzalloc(struct_size(req, array, n), GFP_NOFS);
460468 if (!req)
461469 return -ENOMEM;
462470
463471 refcount_set(&req->usage, 1);
472
+ req->vnode = vnode;
464473 req->page_done = afs_readpages_page_done;
465474 req->pos = first->index;
466475 req->pos <<= PAGE_SHIFT;
....@@ -476,7 +485,7 @@
476485 * page at the end of the file.
477486 */
478487 do {
479
- page = list_entry(pages->prev, struct page, lru);
488
+ page = lru_to_page(pages);
480489 list_del(&page->lru);
481490 index = page->index;
482491 if (add_to_page_cache_lru(page, mapping, index,
....@@ -592,6 +601,63 @@
592601 }
593602
594603 /*
604
+ * Adjust the dirty region of the page on truncation or full invalidation,
605
+ * getting rid of the markers altogether if the region is entirely invalidated.
606
+ */
607
+static void afs_invalidate_dirty(struct page *page, unsigned int offset,
608
+ unsigned int length)
609
+{
610
+ struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
611
+ unsigned long priv;
612
+ unsigned int f, t, end = offset + length;
613
+
614
+ priv = page_private(page);
615
+
616
+ /* we clean up only if the entire page is being invalidated */
617
+ if (offset == 0 && length == thp_size(page))
618
+ goto full_invalidate;
619
+
620
+ /* If the page was dirtied by page_mkwrite(), the PTE stays writable
621
+ * and we don't get another notification to tell us to expand it
622
+ * again.
623
+ */
624
+ if (afs_is_page_dirty_mmapped(priv))
625
+ return;
626
+
627
+ /* We may need to shorten the dirty region */
628
+ f = afs_page_dirty_from(priv);
629
+ t = afs_page_dirty_to(priv);
630
+
631
+ if (t <= offset || f >= end)
632
+ return; /* Doesn't overlap */
633
+
634
+ if (f < offset && t > end)
635
+ return; /* Splits the dirty region - just absorb it */
636
+
637
+ if (f >= offset && t <= end)
638
+ goto undirty;
639
+
640
+ if (f < offset)
641
+ t = offset;
642
+ else
643
+ f = end;
644
+ if (f == t)
645
+ goto undirty;
646
+
647
+ priv = afs_page_dirty(f, t);
648
+ set_page_private(page, priv);
649
+ trace_afs_page_dirty(vnode, tracepoint_string("trunc"), page->index, priv);
650
+ return;
651
+
652
+undirty:
653
+ trace_afs_page_dirty(vnode, tracepoint_string("undirty"), page->index, priv);
654
+ clear_page_dirty_for_io(page);
655
+full_invalidate:
656
+ priv = (unsigned long)detach_page_private(page);
657
+ trace_afs_page_dirty(vnode, tracepoint_string("inval"), page->index, priv);
658
+}
659
+
660
+/*
595661 * invalidate part or all of a page
596662 * - release a page and clean up its private data if offset is 0 (indicating
597663 * the entire page)
....@@ -599,31 +665,23 @@
599665 static void afs_invalidatepage(struct page *page, unsigned int offset,
600666 unsigned int length)
601667 {
602
- struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
603
- unsigned long priv;
604
-
605668 _enter("{%lu},%u,%u", page->index, offset, length);
606669
607670 BUG_ON(!PageLocked(page));
608671
672
+#ifdef CONFIG_AFS_FSCACHE
609673 /* we clean up only if the entire page is being invalidated */
610674 if (offset == 0 && length == PAGE_SIZE) {
611
-#ifdef CONFIG_AFS_FSCACHE
612675 if (PageFsCache(page)) {
613676 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
614677 fscache_wait_on_page_write(vnode->cache, page);
615678 fscache_uncache_page(vnode->cache, page);
616679 }
680
+ }
617681 #endif
618682
619
- if (PagePrivate(page)) {
620
- priv = page_private(page);
621
- trace_afs_page_dirty(vnode, tracepoint_string("inval"),
622
- page->index, priv);
623
- set_page_private(page, 0);
624
- ClearPagePrivate(page);
625
- }
626
- }
683
+ if (PagePrivate(page))
684
+ afs_invalidate_dirty(page, offset, length);
627685
628686 _leave("");
629687 }
....@@ -637,7 +695,7 @@
637695 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
638696 unsigned long priv;
639697
640
- _enter("{{%x:%u}[%lu],%lx},%x",
698
+ _enter("{{%llx:%llu}[%lu],%lx},%x",
641699 vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
642700 gfp_flags);
643701
....@@ -651,11 +709,9 @@
651709 #endif
652710
653711 if (PagePrivate(page)) {
654
- priv = page_private(page);
712
+ priv = (unsigned long)detach_page_private(page);
655713 trace_afs_page_dirty(vnode, tracepoint_string("rel"),
656714 page->index, priv);
657
- set_page_private(page, 0);
658
- ClearPagePrivate(page);
659715 }
660716
661717 /* indicate that the page can be released */