.. | .. |
---|
69 | 69 | |
---|
70 | 70 | /** |
---|
71 | 71 | * nilfs_forget_buffer - discard dirty state |
---|
72 | | - * @inode: owner inode of the buffer |
---|
73 | 72 | * @bh: buffer head of the buffer to be discarded |
---|
74 | 73 | */ |
---|
75 | 74 | void nilfs_forget_buffer(struct buffer_head *bh) |
---|
.. | .. |
---|
289 | 288 | * @dmap: destination page cache |
---|
290 | 289 | * @smap: source page cache |
---|
291 | 290 | * |
---|
292 | | - * No pages must no be added to the cache during this process. |
---|
| 291 | + * No pages must be added to the cache during this process. |
---|
293 | 292 | * This must be ensured by the caller. |
---|
294 | 293 | */ |
---|
295 | 294 | void nilfs_copy_back_pages(struct address_space *dmap, |
---|
.. | .. |
---|
298 | 297 | struct pagevec pvec; |
---|
299 | 298 | unsigned int i, n; |
---|
300 | 299 | pgoff_t index = 0; |
---|
301 | | - int err; |
---|
302 | 300 | |
---|
303 | 301 | pagevec_init(&pvec); |
---|
304 | 302 | repeat: |
---|
.. | .. |
---|
313 | 311 | lock_page(page); |
---|
314 | 312 | dpage = find_lock_page(dmap, offset); |
---|
315 | 313 | if (dpage) { |
---|
316 | | - /* override existing page on the destination cache */ |
---|
| 314 | + /* overwrite existing page in the destination cache */ |
---|
317 | 315 | WARN_ON(PageDirty(dpage)); |
---|
318 | 316 | nilfs_copy_page(dpage, page, 0); |
---|
319 | 317 | unlock_page(dpage); |
---|
320 | 318 | put_page(dpage); |
---|
| 319 | + /* Do we not need to remove page from smap here? */ |
---|
321 | 320 | } else { |
---|
322 | | - struct page *page2; |
---|
| 321 | + struct page *p; |
---|
323 | 322 | |
---|
324 | 323 | /* move the page to the destination cache */ |
---|
325 | 324 | xa_lock_irq(&smap->i_pages); |
---|
326 | | - page2 = radix_tree_delete(&smap->i_pages, offset); |
---|
327 | | - WARN_ON(page2 != page); |
---|
328 | | - |
---|
| 325 | + p = __xa_erase(&smap->i_pages, offset); |
---|
| 326 | + WARN_ON(page != p); |
---|
329 | 327 | smap->nrpages--; |
---|
330 | 328 | xa_unlock_irq(&smap->i_pages); |
---|
331 | 329 | |
---|
332 | 330 | xa_lock_irq(&dmap->i_pages); |
---|
333 | | - err = radix_tree_insert(&dmap->i_pages, offset, page); |
---|
334 | | - if (unlikely(err < 0)) { |
---|
335 | | - WARN_ON(err == -EEXIST); |
---|
| 331 | + p = __xa_store(&dmap->i_pages, offset, page, GFP_NOFS); |
---|
| 332 | + if (unlikely(p)) { |
---|
| 333 | + /* Probably -ENOMEM */ |
---|
336 | 334 | page->mapping = NULL; |
---|
337 | | - put_page(page); /* for cache */ |
---|
| 335 | + put_page(page); |
---|
338 | 336 | } else { |
---|
339 | 337 | page->mapping = dmap; |
---|
340 | 338 | dmap->nrpages++; |
---|
341 | 339 | if (PageDirty(page)) |
---|
342 | | - radix_tree_tag_set(&dmap->i_pages, |
---|
343 | | - offset, |
---|
344 | | - PAGECACHE_TAG_DIRTY); |
---|
| 340 | + __xa_set_mark(&dmap->i_pages, offset, |
---|
| 341 | + PAGECACHE_TAG_DIRTY); |
---|
345 | 342 | } |
---|
346 | 343 | xa_unlock_irq(&dmap->i_pages); |
---|
347 | 344 | } |
---|
.. | .. |
---|
393 | 390 | BUG_ON(!PageLocked(page)); |
---|
394 | 391 | |
---|
395 | 392 | if (!silent) |
---|
396 | | - nilfs_msg(sb, KERN_WARNING, |
---|
397 | | - "discard dirty page: offset=%lld, ino=%lu", |
---|
398 | | - page_offset(page), inode->i_ino); |
---|
| 393 | + nilfs_warn(sb, "discard dirty page: offset=%lld, ino=%lu", |
---|
| 394 | + page_offset(page), inode->i_ino); |
---|
399 | 395 | |
---|
400 | 396 | ClearPageUptodate(page); |
---|
401 | 397 | ClearPageMappedToDisk(page); |
---|
.. | .. |
---|
411 | 407 | do { |
---|
412 | 408 | lock_buffer(bh); |
---|
413 | 409 | if (!silent) |
---|
414 | | - nilfs_msg(sb, KERN_WARNING, |
---|
415 | | - "discard dirty block: blocknr=%llu, size=%zu", |
---|
416 | | - (u64)bh->b_blocknr, bh->b_size); |
---|
| 410 | + nilfs_warn(sb, |
---|
| 411 | + "discard dirty block: blocknr=%llu, size=%zu", |
---|
| 412 | + (u64)bh->b_blocknr, bh->b_size); |
---|
417 | 413 | |
---|
418 | 414 | set_mask_bits(&bh->b_state, clear_bits, 0); |
---|
419 | 415 | unlock_buffer(bh); |
---|
.. | .. |
---|
452 | 448 | /* |
---|
453 | 449 | * NILFS2 needs clear_page_dirty() in the following two cases: |
---|
454 | 450 | * |
---|
455 | | - * 1) For B-tree node pages and data pages of the dat/gcdat, NILFS2 clears |
---|
456 | | - * page dirty flags when it copies back pages from the shadow cache |
---|
457 | | - * (gcdat->{i_mapping,i_btnode_cache}) to its original cache |
---|
458 | | - * (dat->{i_mapping,i_btnode_cache}). |
---|
| 451 | + * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty |
---|
| 452 | + * flag of pages when it copies back pages from shadow cache to the |
---|
| 453 | + * original cache. |
---|
459 | 454 | * |
---|
460 | 455 | * 2) Some B-tree operations like insertion or deletion may dispose buffers |
---|
461 | 456 | * in dirty state, and this needs to cancel the dirty state of their pages. |
---|
.. | .. |
---|
467 | 462 | if (mapping) { |
---|
468 | 463 | xa_lock_irq(&mapping->i_pages); |
---|
469 | 464 | if (test_bit(PG_dirty, &page->flags)) { |
---|
470 | | - radix_tree_tag_clear(&mapping->i_pages, |
---|
471 | | - page_index(page), |
---|
| 465 | + __xa_clear_mark(&mapping->i_pages, page_index(page), |
---|
472 | 466 | PAGECACHE_TAG_DIRTY); |
---|
473 | 467 | xa_unlock_irq(&mapping->i_pages); |
---|
474 | 468 | return clear_page_dirty_for_io(page); |
---|