forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/fs/gfs2/aops.c
....@@ -1,10 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
34 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4
- *
5
- * This copyrighted material is made available to anyone wishing to use,
6
- * modify, copy, or redistribute it subject to the terms and conditions
7
- * of the GNU General Public License version 2.
85 */
96
107 #include <linux/sched.h>
....@@ -80,39 +77,28 @@
8077 if (error)
8178 return error;
8279 if (!buffer_mapped(bh_result))
83
- return -EIO;
80
+ return -ENODATA;
8481 return 0;
8582 }
8683
8784 /**
88
- * gfs2_writepage_common - Common bits of writepage
89
- * @page: The page to be written
85
+ * gfs2_writepage - Write page for writeback mappings
86
+ * @page: The page
9087 * @wbc: The writeback control
91
- *
92
- * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
9388 */
94
-
95
-static int gfs2_writepage_common(struct page *page,
96
- struct writeback_control *wbc)
89
+static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
9790 {
9891 struct inode *inode = page->mapping->host;
9992 struct gfs2_inode *ip = GFS2_I(inode);
10093 struct gfs2_sbd *sdp = GFS2_SB(inode);
101
- loff_t i_size = i_size_read(inode);
102
- pgoff_t end_index = i_size >> PAGE_SHIFT;
103
- unsigned offset;
94
+ struct iomap_writepage_ctx wpc = { };
10495
10596 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
10697 goto out;
10798 if (current->journal_info)
10899 goto redirty;
109
- /* Is the page fully outside i_size? (truncate in progress) */
110
- offset = i_size & (PAGE_SIZE-1);
111
- if (page->index > end_index || (page->index == end_index && !offset)) {
112
- page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
113
- goto out;
114
- }
115
- return 1;
100
+ return iomap_writepage(page, wbc, &wpc, &gfs2_writeback_ops);
101
+
116102 redirty:
117103 redirty_page_for_writepage(wbc, page);
118104 out:
....@@ -121,28 +107,15 @@
121107 }
122108
123109 /**
124
- * gfs2_writepage - Write page for writeback mappings
125
- * @page: The page
110
+ * gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
111
+ * @page: The page to write
126112 * @wbc: The writeback control
127113 *
128
- */
129
-
130
-static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
131
-{
132
- int ret;
133
-
134
- ret = gfs2_writepage_common(page, wbc);
135
- if (ret <= 0)
136
- return ret;
137
-
138
- return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
139
-}
140
-
141
-/* This is the same as calling block_write_full_page, but it also
114
+ * This is the same as calling block_write_full_page, but it also
142115 * writes pages outside of i_size
143116 */
144
-static int gfs2_write_full_page(struct page *page, get_block_t *get_block,
145
- struct writeback_control *wbc)
117
+static int gfs2_write_jdata_page(struct page *page,
118
+ struct writeback_control *wbc)
146119 {
147120 struct inode * const inode = page->mapping->host;
148121 loff_t i_size = i_size_read(inode);
....@@ -156,11 +129,11 @@
156129 * the page size, the remaining memory is zeroed when mapped, and
157130 * writes to that region are not written out to the file."
158131 */
159
- offset = i_size & (PAGE_SIZE-1);
132
+ offset = i_size & (PAGE_SIZE - 1);
160133 if (page->index == end_index && offset)
161134 zero_user_segment(page, offset, PAGE_SIZE);
162135
163
- return __block_write_full_page(inode, page, get_block, wbc,
136
+ return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
164137 end_buffer_async_write);
165138 }
166139
....@@ -189,7 +162,7 @@
189162 }
190163 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
191164 }
192
- return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc);
165
+ return gfs2_write_jdata_page(page, wbc);
193166 }
194167
195168 /**
....@@ -206,14 +179,12 @@
206179 struct inode *inode = page->mapping->host;
207180 struct gfs2_inode *ip = GFS2_I(inode);
208181 struct gfs2_sbd *sdp = GFS2_SB(inode);
209
- int ret;
210182
211183 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
212184 goto out;
213185 if (PageChecked(page) || current->journal_info)
214186 goto out_ignore;
215
- ret = __gfs2_jdata_writepage(page, wbc);
216
- return ret;
187
+ return __gfs2_jdata_writepage(page, wbc);
217188
218189 out_ignore:
219190 redirty_page_for_writepage(wbc, page);
....@@ -233,7 +204,8 @@
233204 struct writeback_control *wbc)
234205 {
235206 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
236
- int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
207
+ struct iomap_writepage_ctx wpc = { };
208
+ int ret;
237209
238210 /*
239211 * Even if we didn't write any pages here, we might still be holding
....@@ -241,9 +213,9 @@
241213 * want balance_dirty_pages() to loop indefinitely trying to write out
242214 * pages held in the ail that it can't find.
243215 */
216
+ ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
244217 if (ret == 0)
245218 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
246
-
247219 return ret;
248220 }
249221
....@@ -266,7 +238,7 @@
266238 {
267239 struct inode *inode = mapping->host;
268240 struct gfs2_sbd *sdp = GFS2_SB(inode);
269
- unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize);
241
+ unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
270242 int i;
271243 int ret;
272244
....@@ -360,13 +332,13 @@
360332 int done = 0;
361333 struct pagevec pvec;
362334 int nr_pages;
363
- pgoff_t uninitialized_var(writeback_index);
335
+ pgoff_t writeback_index;
364336 pgoff_t index;
365337 pgoff_t end;
366338 pgoff_t done_index;
367339 int cycled;
368340 int range_whole = 0;
369
- int tag;
341
+ xa_mark_t tag;
370342
371343 pagevec_init(&pvec);
372344 if (wbc->range_cyclic) {
....@@ -457,8 +429,7 @@
457429 *
458430 * Returns: errno
459431 */
460
-
461
-int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
432
+static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
462433 {
463434 struct buffer_head *dibh;
464435 u64 dsize = i_size_read(&ip->i_inode);
....@@ -494,25 +465,15 @@
494465 }
495466
496467
497
-/**
498
- * __gfs2_readpage - readpage
499
- * @file: The file to read a page for
500
- * @page: The page to read
501
- *
502
- * This is the core of gfs2's readpage. It's used by the internal file
503
- * reading code as in that case we already hold the glock. Also it's
504
- * called by gfs2_readpage() once the required lock has been granted.
505
- */
506
-
507468 static int __gfs2_readpage(void *file, struct page *page)
508469 {
509
- struct gfs2_inode *ip = GFS2_I(page->mapping->host);
510
- struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
511
-
470
+ struct inode *inode = page->mapping->host;
471
+ struct gfs2_inode *ip = GFS2_I(inode);
472
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
512473 int error;
513474
514
- if (i_blocksize(page->mapping->host) == PAGE_SIZE &&
515
- !page_has_buffers(page)) {
475
+ if (!gfs2_is_jdata(ip) ||
476
+ (i_blocksize(inode) == PAGE_SIZE && !page_has_buffers(page))) {
516477 error = iomap_readpage(page, &gfs2_iomap_ops);
517478 } else if (gfs2_is_stuffed(ip)) {
518479 error = stuffed_readpage(ip, page);
....@@ -521,7 +482,7 @@
521482 error = mpage_readpage(page, gfs2_block_map);
522483 }
523484
524
- if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
485
+ if (unlikely(gfs2_withdrawn(sdp)))
525486 return -EIO;
526487
527488 return error;
....@@ -531,36 +492,11 @@
531492 * gfs2_readpage - read a page of a file
532493 * @file: The file to read
533494 * @page: The page of the file
534
- *
535
- * This deals with the locking required. We have to unlock and
536
- * relock the page in order to get the locking in the right
537
- * order.
538495 */
539496
540497 static int gfs2_readpage(struct file *file, struct page *page)
541498 {
542
- struct address_space *mapping = page->mapping;
543
- struct gfs2_inode *ip = GFS2_I(mapping->host);
544
- struct gfs2_holder gh;
545
- int error;
546
-
547
- unlock_page(page);
548
- gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
549
- error = gfs2_glock_nq(&gh);
550
- if (unlikely(error))
551
- goto out;
552
- error = AOP_TRUNCATED_PAGE;
553
- lock_page(page);
554
- if (page->mapping == mapping && !PageUptodate(page))
555
- error = __gfs2_readpage(file, page);
556
- else
557
- unlock_page(page);
558
- gfs2_glock_dq(&gh);
559
-out:
560
- gfs2_holder_uninit(&gh);
561
- if (error && error != AOP_TRUNCATED_PAGE)
562
- lock_page(page);
563
- return error;
499
+ return __gfs2_readpage(file, page);
564500 }
565501
566502 /**
....@@ -576,7 +512,7 @@
576512 unsigned size)
577513 {
578514 struct address_space *mapping = ip->i_inode.i_mapping;
579
- unsigned long index = *pos / PAGE_SIZE;
515
+ unsigned long index = *pos >> PAGE_SHIFT;
580516 unsigned offset = *pos & (PAGE_SIZE - 1);
581517 unsigned copied = 0;
582518 unsigned amt;
....@@ -603,7 +539,7 @@
603539 }
604540
605541 /**
606
- * gfs2_readpages - Read a bunch of pages at once
542
+ * gfs2_readahead - Read a bunch of pages at once
607543 * @file: The file to read from
608544 * @mapping: Address space info
609545 * @pages: List of pages to read
....@@ -616,31 +552,21 @@
616552 * obviously not something we'd want to do on too regular a basis.
617553 * Any I/O we ignore at this time will be done via readpage later.
618554 * 2. We don't handle stuffed files here we let readpage do the honours.
619
- * 3. mpage_readpages() does most of the heavy lifting in the common case.
555
+ * 3. mpage_readahead() does most of the heavy lifting in the common case.
620556 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
621557 */
622558
623
-static int gfs2_readpages(struct file *file, struct address_space *mapping,
624
- struct list_head *pages, unsigned nr_pages)
559
+static void gfs2_readahead(struct readahead_control *rac)
625560 {
626
- struct inode *inode = mapping->host;
561
+ struct inode *inode = rac->mapping->host;
627562 struct gfs2_inode *ip = GFS2_I(inode);
628
- struct gfs2_sbd *sdp = GFS2_SB(inode);
629
- struct gfs2_holder gh;
630
- int ret;
631563
632
- gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
633
- ret = gfs2_glock_nq(&gh);
634
- if (unlikely(ret))
635
- goto out_uninit;
636
- if (!gfs2_is_stuffed(ip))
637
- ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
638
- gfs2_glock_dq(&gh);
639
-out_uninit:
640
- gfs2_holder_uninit(&gh);
641
- if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
642
- ret = -EIO;
643
- return ret;
564
+ if (gfs2_is_stuffed(ip))
565
+ ;
566
+ else if (gfs2_is_jdata(ip))
567
+ mpage_readahead(rac, gfs2_block_map);
568
+ else
569
+ iomap_readahead(rac, &gfs2_iomap_ops);
644570 }
645571
646572 /**
....@@ -649,7 +575,7 @@
649575 */
650576 void adjust_fs_space(struct inode *inode)
651577 {
652
- struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
578
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
653579 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
654580 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
655581 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
....@@ -657,10 +583,13 @@
657583 struct buffer_head *m_bh, *l_bh;
658584 u64 fs_total, new_free;
659585
586
+ if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
587
+ return;
588
+
660589 /* Total up the file system space, according to the latest rindex. */
661590 fs_total = gfs2_ri_total(sdp);
662591 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
663
- return;
592
+ goto out;
664593
665594 spin_lock(&sdp->sd_statfs_spin);
666595 gfs2_statfs_change_in(m_sc, m_bh->b_data +
....@@ -675,52 +604,14 @@
675604 gfs2_statfs_change(sdp, new_free, new_free, 0);
676605
677606 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
678
- goto out;
607
+ goto out2;
679608 update_statfs(sdp, m_bh, l_bh);
680609 brelse(l_bh);
681
-out:
610
+out2:
682611 brelse(m_bh);
683
-}
684
-
685
-/**
686
- * gfs2_stuffed_write_end - Write end for stuffed files
687
- * @inode: The inode
688
- * @dibh: The buffer_head containing the on-disk inode
689
- * @pos: The file position
690
- * @copied: How much was actually copied by the VFS
691
- * @page: The page
692
- *
693
- * This copies the data from the page into the inode block after
694
- * the inode data structure itself.
695
- *
696
- * Returns: copied bytes or errno
697
- */
698
-int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
699
- loff_t pos, unsigned copied,
700
- struct page *page)
701
-{
702
- struct gfs2_inode *ip = GFS2_I(inode);
703
- u64 to = pos + copied;
704
- void *kaddr;
705
- unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
706
-
707
- BUG_ON(pos + copied > gfs2_max_stuffed_size(ip));
708
-
709
- kaddr = kmap_atomic(page);
710
- memcpy(buf + pos, kaddr + pos, copied);
711
- flush_dcache_page(page);
712
- kunmap_atomic(kaddr);
713
-
714
- WARN_ON(!PageUptodate(page));
715
- unlock_page(page);
716
- put_page(page);
717
-
718
- if (copied) {
719
- if (inode->i_size < to)
720
- i_size_write(inode, to);
721
- mark_inode_dirty(inode);
722
- }
723
- return copied;
612
+out:
613
+ sdp->sd_rindex_uptodate = 0;
614
+ gfs2_trans_end(sdp);
724615 }
725616
726617 /**
....@@ -732,7 +623,8 @@
732623
733624 static int jdata_set_page_dirty(struct page *page)
734625 {
735
- SetPageChecked(page);
626
+ if (current->journal_info)
627
+ SetPageChecked(page);
736628 return __set_page_dirty_buffers(page);
737629 }
738630
....@@ -756,7 +648,7 @@
756648 return 0;
757649
758650 if (!gfs2_is_stuffed(ip))
759
- dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
651
+ dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
760652
761653 gfs2_glock_dq_uninit(&i_gh);
762654
....@@ -774,8 +666,11 @@
774666 if (bd) {
775667 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
776668 list_del_init(&bd->bd_list);
777
- else
669
+ else {
670
+ spin_lock(&sdp->sd_ail_lock);
778671 gfs2_remove_from_journal(bh, REMOVE_JDATA);
672
+ spin_unlock(&sdp->sd_ail_lock);
673
+ }
779674 }
780675 bh->b_bdev = NULL;
781676 clear_buffer_mapped(bh);
....@@ -820,10 +715,10 @@
820715 * @page: the page that's being released
821716 * @gfp_mask: passed from Linux VFS, ignored by us
822717 *
823
- * Call try_to_free_buffers() if the buffers in this page can be
824
- * released.
718
+ * Calls try_to_free_buffers() to free the buffers and put the page if the
719
+ * buffers can be released.
825720 *
826
- * Returns: 0
721
+ * Returns: 1 if the page was put or else 0
827722 */
828723
829724 int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
....@@ -847,7 +742,6 @@
847742 */
848743
849744 gfs2_log_lock(sdp);
850
- spin_lock(&sdp->sd_ail_lock);
851745 head = bh = page_buffers(page);
852746 do {
853747 if (atomic_read(&bh->b_count))
....@@ -859,18 +753,22 @@
859753 goto cannot_release;
860754 bh = bh->b_this_page;
861755 } while(bh != head);
862
- spin_unlock(&sdp->sd_ail_lock);
863756
864757 head = bh = page_buffers(page);
865758 do {
866759 bd = bh->b_private;
867760 if (bd) {
868761 gfs2_assert_warn(sdp, bd->bd_bh == bh);
869
- if (!list_empty(&bd->bd_list))
870
- list_del_init(&bd->bd_list);
871762 bd->bd_bh = NULL;
872763 bh->b_private = NULL;
873
- kmem_cache_free(gfs2_bufdata_cachep, bd);
764
+ /*
765
+ * The bd may still be queued as a revoke, in which
766
+ * case we must not dequeue nor free it.
767
+ */
768
+ if (!bd->bd_blkno && !list_empty(&bd->bd_list))
769
+ list_del_init(&bd->bd_list);
770
+ if (list_empty(&bd->bd_list))
771
+ kmem_cache_free(gfs2_bufdata_cachep, bd);
874772 }
875773
876774 bh = bh->b_this_page;
....@@ -880,37 +778,22 @@
880778 return try_to_free_buffers(page);
881779
882780 cannot_release:
883
- spin_unlock(&sdp->sd_ail_lock);
884781 gfs2_log_unlock(sdp);
885782 return 0;
886783 }
887784
888
-static const struct address_space_operations gfs2_writeback_aops = {
785
+static const struct address_space_operations gfs2_aops = {
889786 .writepage = gfs2_writepage,
890787 .writepages = gfs2_writepages,
891788 .readpage = gfs2_readpage,
892
- .readpages = gfs2_readpages,
789
+ .readahead = gfs2_readahead,
790
+ .set_page_dirty = iomap_set_page_dirty,
791
+ .releasepage = iomap_releasepage,
792
+ .invalidatepage = iomap_invalidatepage,
893793 .bmap = gfs2_bmap,
894
- .invalidatepage = gfs2_invalidatepage,
895
- .releasepage = gfs2_releasepage,
896794 .direct_IO = noop_direct_IO,
897
- .migratepage = buffer_migrate_page,
898
- .is_partially_uptodate = block_is_partially_uptodate,
899
- .error_remove_page = generic_error_remove_page,
900
-};
901
-
902
-static const struct address_space_operations gfs2_ordered_aops = {
903
- .writepage = gfs2_writepage,
904
- .writepages = gfs2_writepages,
905
- .readpage = gfs2_readpage,
906
- .readpages = gfs2_readpages,
907
- .set_page_dirty = __set_page_dirty_buffers,
908
- .bmap = gfs2_bmap,
909
- .invalidatepage = gfs2_invalidatepage,
910
- .releasepage = gfs2_releasepage,
911
- .direct_IO = noop_direct_IO,
912
- .migratepage = buffer_migrate_page,
913
- .is_partially_uptodate = block_is_partially_uptodate,
795
+ .migratepage = iomap_migrate_page,
796
+ .is_partially_uptodate = iomap_is_partially_uptodate,
914797 .error_remove_page = generic_error_remove_page,
915798 };
916799
....@@ -918,7 +801,7 @@
918801 .writepage = gfs2_jdata_writepage,
919802 .writepages = gfs2_jdata_writepages,
920803 .readpage = gfs2_readpage,
921
- .readpages = gfs2_readpages,
804
+ .readahead = gfs2_readahead,
922805 .set_page_dirty = jdata_set_page_dirty,
923806 .bmap = gfs2_bmap,
924807 .invalidatepage = gfs2_invalidatepage,
....@@ -929,15 +812,8 @@
929812
930813 void gfs2_set_aops(struct inode *inode)
931814 {
932
- struct gfs2_inode *ip = GFS2_I(inode);
933
-
934
- if (gfs2_is_writeback(ip))
935
- inode->i_mapping->a_ops = &gfs2_writeback_aops;
936
- else if (gfs2_is_ordered(ip))
937
- inode->i_mapping->a_ops = &gfs2_ordered_aops;
938
- else if (gfs2_is_jdata(ip))
815
+ if (gfs2_is_jdata(GFS2_I(inode)))
939816 inode->i_mapping->a_ops = &gfs2_jdata_aops;
940817 else
941
- BUG();
818
+ inode->i_mapping->a_ops = &gfs2_aops;
942819 }
943
-