.. | .. |
---|
39 | 39 | EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_end); |
---|
40 | 40 | EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_start); |
---|
41 | 41 | EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_end); |
---|
42 | | -EXPORT_TRACEPOINT_SYMBOL(android_fs_fsync_start); |
---|
43 | | -EXPORT_TRACEPOINT_SYMBOL(android_fs_fsync_end); |
---|
44 | 42 | |
---|
45 | 43 | /* |
---|
46 | 44 | * I/O completion handler for multipage BIOs. |
---|
.. | .. |
---|
57 | 55 | static void mpage_end_io(struct bio *bio) |
---|
58 | 56 | { |
---|
59 | 57 | struct bio_vec *bv; |
---|
60 | | - int i; |
---|
| 58 | + struct bvec_iter_all iter_all; |
---|
61 | 59 | |
---|
62 | 60 | if (trace_android_fs_dataread_end_enabled() && |
---|
63 | 61 | (bio_data_dir(bio) == READ)) { |
---|
.. | .. |
---|
69 | 67 | bio->bi_iter.bi_size); |
---|
70 | 68 | } |
---|
71 | 69 | |
---|
72 | | - bio_for_each_segment_all(bv, bio, i) { |
---|
| 70 | + bio_for_each_segment_all(bv, bio, iter_all) { |
---|
73 | 71 | struct page *page = bv->bv_page; |
---|
74 | 72 | page_endio(page, bio_op(bio), |
---|
75 | 73 | blk_status_to_errno(bio->bi_status)); |
---|
.. | .. |
---|
100 | 98 | } |
---|
101 | 99 | bio->bi_end_io = mpage_end_io; |
---|
102 | 100 | bio_set_op_attrs(bio, op, op_flags); |
---|
103 | | - guard_bio_eod(op, bio); |
---|
| 101 | + guard_bio_eod(bio); |
---|
104 | 102 | submit_bio(bio); |
---|
105 | 103 | return NULL; |
---|
106 | 104 | } |
---|
.. | .. |
---|
129 | 127 | } |
---|
130 | 128 | |
---|
131 | 129 | /* |
---|
132 | | - * support function for mpage_readpages. The fs supplied get_block might |
---|
| 130 | + * support function for mpage_readahead. The fs supplied get_block might |
---|
133 | 131 | * return an up to date buffer. This is used to map that buffer into |
---|
134 | 132 | * the page, which allows readpage to avoid triggering a duplicate call |
---|
135 | 133 | * to get_block. |
---|
.. | .. |
---|
376 | 374 | } |
---|
377 | 375 | |
---|
378 | 376 | /** |
---|
379 | | - * mpage_readpages - populate an address space with some pages & start reads against them |
---|
380 | | - * @mapping: the address_space |
---|
381 | | - * @pages: The address of a list_head which contains the target pages. These |
---|
382 | | - * pages have their ->index populated and are otherwise uninitialised. |
---|
383 | | - * The page at @pages->prev has the lowest file offset, and reads should be |
---|
384 | | - * issued in @pages->prev to @pages->next order. |
---|
385 | | - * @nr_pages: The number of pages at *@pages |
---|
| 377 | + * mpage_readahead - start reads against pages |
---|
| 378 | + * @rac: Describes which pages to read. |
---|
386 | 379 | * @get_block: The filesystem's block mapper function. |
---|
387 | 380 | * |
---|
388 | 381 | * This function walks the pages and the blocks within each page, building and |
---|
.. | .. |
---|
419 | 412 | * |
---|
420 | 413 | * This all causes the disk requests to be issued in the correct order. |
---|
421 | 414 | */ |
---|
422 | | -int |
---|
423 | | -mpage_readpages(struct address_space *mapping, struct list_head *pages, |
---|
424 | | - unsigned nr_pages, get_block_t get_block) |
---|
| 415 | +void mpage_readahead(struct readahead_control *rac, get_block_t get_block) |
---|
425 | 416 | { |
---|
| 417 | + struct page *page; |
---|
426 | 418 | struct mpage_readpage_args args = { |
---|
427 | 419 | .get_block = get_block, |
---|
428 | 420 | .is_readahead = true, |
---|
429 | 421 | }; |
---|
430 | | - unsigned page_idx; |
---|
431 | 422 | |
---|
432 | | - for (page_idx = 0; page_idx < nr_pages; page_idx++) { |
---|
433 | | - struct page *page = lru_to_page(pages); |
---|
434 | | - |
---|
| 423 | + while ((page = readahead_page(rac))) { |
---|
435 | 424 | prefetchw(&page->flags); |
---|
436 | | - list_del(&page->lru); |
---|
437 | | - if (!add_to_page_cache_lru(page, mapping, |
---|
438 | | - page->index, |
---|
439 | | - readahead_gfp_mask(mapping))) { |
---|
440 | | - args.page = page; |
---|
441 | | - args.nr_pages = nr_pages - page_idx; |
---|
442 | | - args.bio = do_mpage_readpage(&args); |
---|
443 | | - } |
---|
| 425 | + args.page = page; |
---|
| 426 | + args.nr_pages = readahead_count(rac); |
---|
| 427 | + args.bio = do_mpage_readpage(&args); |
---|
444 | 428 | put_page(page); |
---|
445 | 429 | } |
---|
446 | | - BUG_ON(!list_empty(pages)); |
---|
447 | 430 | if (args.bio) |
---|
448 | 431 | mpage_bio_submit(REQ_OP_READ, REQ_RAHEAD, args.bio); |
---|
449 | | - return 0; |
---|
450 | 432 | } |
---|
451 | | -EXPORT_SYMBOL(mpage_readpages); |
---|
| 433 | +EXPORT_SYMBOL_NS(mpage_readahead, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
452 | 434 | |
---|
453 | 435 | /* |
---|
454 | 436 | * This isn't called much at all |
---|
.. | .. |
---|
466 | 448 | mpage_bio_submit(REQ_OP_READ, 0, args.bio); |
---|
467 | 449 | return 0; |
---|
468 | 450 | } |
---|
469 | | -EXPORT_SYMBOL(mpage_readpage); |
---|
| 451 | +EXPORT_SYMBOL_NS(mpage_readpage, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
470 | 452 | |
---|
471 | 453 | /* |
---|
472 | 454 | * Writing is not so simple. |
---|
.. | .. |
---|
601 | 583 | * Page has buffers, but they are all unmapped. The page was |
---|
602 | 584 | * created by pagein or read over a hole which was handled by |
---|
603 | 585 | * block_read_full_page(). If this address_space is also |
---|
604 | | - * using mpage_readpages then this can rarely happen. |
---|
| 586 | + * using mpage_readahead then this can rarely happen. |
---|
605 | 587 | */ |
---|
606 | 588 | goto confused; |
---|
607 | 589 | } |
---|
.. | .. |
---|
685 | 667 | * the confused fail path above (OOM) will be very confused when |
---|
686 | 668 | * it finds all bh marked clean (i.e. it will not write anything) |
---|
687 | 669 | */ |
---|
688 | | - wbc_account_io(wbc, page, PAGE_SIZE); |
---|
| 670 | + wbc_account_cgroup_owner(wbc, page, PAGE_SIZE); |
---|
689 | 671 | length = first_unmapped << blkbits; |
---|
690 | 672 | if (bio_add_page(bio, page, length, 0) < length) { |
---|
691 | 673 | bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio); |
---|