hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/fs/nilfs2/page.c
....@@ -69,7 +69,6 @@
6969
7070 /**
7171 * nilfs_forget_buffer - discard dirty state
72
- * @inode: owner inode of the buffer
7372 * @bh: buffer head of the buffer to be discarded
7473 */
7574 void nilfs_forget_buffer(struct buffer_head *bh)
....@@ -289,7 +288,7 @@
289288 * @dmap: destination page cache
290289 * @smap: source page cache
291290 *
292
- * No pages must no be added to the cache during this process.
291
+ * No pages must be added to the cache during this process.
293292 * This must be ensured by the caller.
294293 */
295294 void nilfs_copy_back_pages(struct address_space *dmap,
....@@ -298,7 +297,6 @@
298297 struct pagevec pvec;
299298 unsigned int i, n;
300299 pgoff_t index = 0;
301
- int err;
302300
303301 pagevec_init(&pvec);
304302 repeat:
....@@ -313,35 +311,34 @@
313311 lock_page(page);
314312 dpage = find_lock_page(dmap, offset);
315313 if (dpage) {
316
- /* override existing page on the destination cache */
314
+ /* overwrite existing page in the destination cache */
317315 WARN_ON(PageDirty(dpage));
318316 nilfs_copy_page(dpage, page, 0);
319317 unlock_page(dpage);
320318 put_page(dpage);
319
+ /* Do we not need to remove page from smap here? */
321320 } else {
322
- struct page *page2;
321
+ struct page *p;
323322
324323 /* move the page to the destination cache */
325324 xa_lock_irq(&smap->i_pages);
326
- page2 = radix_tree_delete(&smap->i_pages, offset);
327
- WARN_ON(page2 != page);
328
-
325
+ p = __xa_erase(&smap->i_pages, offset);
326
+ WARN_ON(page != p);
329327 smap->nrpages--;
330328 xa_unlock_irq(&smap->i_pages);
331329
332330 xa_lock_irq(&dmap->i_pages);
333
- err = radix_tree_insert(&dmap->i_pages, offset, page);
334
- if (unlikely(err < 0)) {
335
- WARN_ON(err == -EEXIST);
331
+ p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS);
332
+ if (unlikely(p)) {
333
+ /* Probably -ENOMEM */
336334 page->mapping = NULL;
337
- put_page(page); /* for cache */
335
+ put_page(page);
338336 } else {
339337 page->mapping = dmap;
340338 dmap->nrpages++;
341339 if (PageDirty(page))
342
- radix_tree_tag_set(&dmap->i_pages,
343
- offset,
344
- PAGECACHE_TAG_DIRTY);
340
+ __xa_set_mark(&dmap->i_pages, offset,
341
+ PAGECACHE_TAG_DIRTY);
345342 }
346343 xa_unlock_irq(&dmap->i_pages);
347344 }
....@@ -393,9 +390,8 @@
393390 BUG_ON(!PageLocked(page));
394391
395392 if (!silent)
396
- nilfs_msg(sb, KERN_WARNING,
397
- "discard dirty page: offset=%lld, ino=%lu",
398
- page_offset(page), inode->i_ino);
393
+ nilfs_warn(sb, "discard dirty page: offset=%lld, ino=%lu",
394
+ page_offset(page), inode->i_ino);
399395
400396 ClearPageUptodate(page);
401397 ClearPageMappedToDisk(page);
....@@ -411,9 +407,9 @@
411407 do {
412408 lock_buffer(bh);
413409 if (!silent)
414
- nilfs_msg(sb, KERN_WARNING,
415
- "discard dirty block: blocknr=%llu, size=%zu",
416
- (u64)bh->b_blocknr, bh->b_size);
410
+ nilfs_warn(sb,
411
+ "discard dirty block: blocknr=%llu, size=%zu",
412
+ (u64)bh->b_blocknr, bh->b_size);
417413
418414 set_mask_bits(&bh->b_state, clear_bits, 0);
419415 unlock_buffer(bh);
....@@ -452,10 +448,9 @@
452448 /*
453449 * NILFS2 needs clear_page_dirty() in the following two cases:
454450 *
455
- * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears
456
- * page dirty flags when it copies back pages from the shadow cache
457
- * (gcdat->{i_mapping,i_btnode_cache}) to its original cache
458
- * (dat->{i_mapping,i_btnode_cache}).
451
+ * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty
452
+ * flag of pages when it copies back pages from shadow cache to the
453
+ * original cache.
459454 *
460455 * 2) Some B-tree operations like insertion or deletion may dispose buffers
461456 * in dirty state, and this needs to cancel the dirty state of their pages.
....@@ -467,8 +462,7 @@
467462 if (mapping) {
468463 xa_lock_irq(&mapping->i_pages);
469464 if (test_bit(PG_dirty, &page->flags)) {
470
- radix_tree_tag_clear(&mapping->i_pages,
471
- page_index(page),
465
+ __xa_clear_mark(&mapping->i_pages, page_index(page),
472466 PAGECACHE_TAG_DIRTY);
473467 xa_unlock_irq(&mapping->i_pages);
474468 return clear_page_dirty_for_io(page);