.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * linux/fs/buffer.c |
---|
3 | 4 | * |
---|
.. | .. |
---|
47 | 48 | #include <linux/sched/mm.h> |
---|
48 | 49 | #include <trace/events/block.h> |
---|
49 | 50 | #include <linux/fscrypt.h> |
---|
| 51 | + |
---|
| 52 | +#include "internal.h" |
---|
50 | 53 | |
---|
51 | 54 | static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); |
---|
52 | 55 | static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, |
---|
.. | .. |
---|
120 | 123 | } |
---|
121 | 124 | EXPORT_SYMBOL(__wait_on_buffer); |
---|
122 | 125 | |
---|
123 | | -static void |
---|
124 | | -__clear_page_buffers(struct page *page) |
---|
125 | | -{ |
---|
126 | | - ClearPagePrivate(page); |
---|
127 | | - set_page_private(page, 0); |
---|
128 | | - put_page(page); |
---|
129 | | -} |
---|
130 | | - |
---|
131 | 126 | static void buffer_io_error(struct buffer_head *bh, char *msg) |
---|
132 | 127 | { |
---|
133 | 128 | if (!test_bit(BH_Quiet, &bh->b_state)) |
---|
.. | .. |
---|
178 | 173 | unlock_buffer(bh); |
---|
179 | 174 | put_bh(bh); |
---|
180 | 175 | } |
---|
181 | | -EXPORT_SYMBOL(end_buffer_write_sync); |
---|
| 176 | +EXPORT_SYMBOL_NS(end_buffer_write_sync, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
182 | 177 | |
---|
183 | 178 | /* |
---|
184 | 179 | * Various filesystems appear to want __find_get_block to be non-blocking. |
---|
.. | .. |
---|
246 | 241 | return ret; |
---|
247 | 242 | } |
---|
248 | 243 | |
---|
249 | | -/* |
---|
250 | | - * I/O completion handler for block_read_full_page() - pages |
---|
251 | | - * which come unlocked at the end of I/O. |
---|
252 | | - */ |
---|
253 | 244 | static void end_buffer_async_read(struct buffer_head *bh, int uptodate) |
---|
254 | 245 | { |
---|
255 | 246 | unsigned long flags; |
---|
.. | .. |
---|
275 | 266 | * decide that the page is now completely done. |
---|
276 | 267 | */ |
---|
277 | 268 | first = page_buffers(page); |
---|
278 | | - local_irq_save(flags); |
---|
279 | | - bit_spin_lock(BH_Uptodate_Lock, &first->b_state); |
---|
| 269 | + spin_lock_irqsave(&first->b_uptodate_lock, flags); |
---|
280 | 270 | clear_buffer_async_read(bh); |
---|
281 | 271 | unlock_buffer(bh); |
---|
282 | 272 | tmp = bh; |
---|
.. | .. |
---|
289 | 279 | } |
---|
290 | 280 | tmp = tmp->b_this_page; |
---|
291 | 281 | } while (tmp != bh); |
---|
292 | | - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); |
---|
293 | | - local_irq_restore(flags); |
---|
| 282 | + spin_unlock_irqrestore(&first->b_uptodate_lock, flags); |
---|
294 | 283 | |
---|
295 | 284 | /* |
---|
296 | 285 | * If none of the buffers had errors and they are all |
---|
.. | .. |
---|
302 | 291 | return; |
---|
303 | 292 | |
---|
304 | 293 | still_busy: |
---|
305 | | - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); |
---|
306 | | - local_irq_restore(flags); |
---|
| 294 | + spin_unlock_irqrestore(&first->b_uptodate_lock, flags); |
---|
307 | 295 | return; |
---|
| 296 | +} |
---|
| 297 | + |
---|
| 298 | +struct decrypt_bh_ctx { |
---|
| 299 | + struct work_struct work; |
---|
| 300 | + struct buffer_head *bh; |
---|
| 301 | +}; |
---|
| 302 | + |
---|
| 303 | +static void decrypt_bh(struct work_struct *work) |
---|
| 304 | +{ |
---|
| 305 | + struct decrypt_bh_ctx *ctx = |
---|
| 306 | + container_of(work, struct decrypt_bh_ctx, work); |
---|
| 307 | + struct buffer_head *bh = ctx->bh; |
---|
| 308 | + int err; |
---|
| 309 | + |
---|
| 310 | + err = fscrypt_decrypt_pagecache_blocks(bh->b_page, bh->b_size, |
---|
| 311 | + bh_offset(bh)); |
---|
| 312 | + end_buffer_async_read(bh, err == 0); |
---|
| 313 | + kfree(ctx); |
---|
| 314 | +} |
---|
| 315 | + |
---|
| 316 | +/* |
---|
| 317 | + * I/O completion handler for block_read_full_page() - pages |
---|
| 318 | + * which come unlocked at the end of I/O. |
---|
| 319 | + */ |
---|
| 320 | +static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) |
---|
| 321 | +{ |
---|
| 322 | + /* Decrypt if needed */ |
---|
| 323 | + if (uptodate && |
---|
| 324 | + fscrypt_inode_uses_fs_layer_crypto(bh->b_page->mapping->host)) { |
---|
| 325 | + struct decrypt_bh_ctx *ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC); |
---|
| 326 | + |
---|
| 327 | + if (ctx) { |
---|
| 328 | + INIT_WORK(&ctx->work, decrypt_bh); |
---|
| 329 | + ctx->bh = bh; |
---|
| 330 | + fscrypt_enqueue_decrypt_work(&ctx->work); |
---|
| 331 | + return; |
---|
| 332 | + } |
---|
| 333 | + uptodate = 0; |
---|
| 334 | + } |
---|
| 335 | + end_buffer_async_read(bh, uptodate); |
---|
308 | 336 | } |
---|
309 | 337 | |
---|
310 | 338 | /* |
---|
.. | .. |
---|
331 | 359 | } |
---|
332 | 360 | |
---|
333 | 361 | first = page_buffers(page); |
---|
334 | | - local_irq_save(flags); |
---|
335 | | - bit_spin_lock(BH_Uptodate_Lock, &first->b_state); |
---|
| 362 | + spin_lock_irqsave(&first->b_uptodate_lock, flags); |
---|
336 | 363 | |
---|
337 | 364 | clear_buffer_async_write(bh); |
---|
338 | 365 | unlock_buffer(bh); |
---|
.. | .. |
---|
344 | 371 | } |
---|
345 | 372 | tmp = tmp->b_this_page; |
---|
346 | 373 | } |
---|
347 | | - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); |
---|
348 | | - local_irq_restore(flags); |
---|
| 374 | + spin_unlock_irqrestore(&first->b_uptodate_lock, flags); |
---|
349 | 375 | end_page_writeback(page); |
---|
350 | 376 | return; |
---|
351 | 377 | |
---|
352 | 378 | still_busy: |
---|
353 | | - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); |
---|
354 | | - local_irq_restore(flags); |
---|
| 379 | + spin_unlock_irqrestore(&first->b_uptodate_lock, flags); |
---|
355 | 380 | return; |
---|
356 | 381 | } |
---|
357 | 382 | EXPORT_SYMBOL(end_buffer_async_write); |
---|
.. | .. |
---|
379 | 404 | */ |
---|
380 | 405 | static void mark_buffer_async_read(struct buffer_head *bh) |
---|
381 | 406 | { |
---|
382 | | - bh->b_end_io = end_buffer_async_read; |
---|
| 407 | + bh->b_end_io = end_buffer_async_read_io; |
---|
383 | 408 | set_buffer_async_read(bh); |
---|
384 | 409 | } |
---|
385 | 410 | |
---|
.. | .. |
---|
394 | 419 | { |
---|
395 | 420 | mark_buffer_async_write_endio(bh, end_buffer_async_write); |
---|
396 | 421 | } |
---|
397 | | -EXPORT_SYMBOL(mark_buffer_async_write); |
---|
| 422 | +EXPORT_SYMBOL_NS(mark_buffer_async_write, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
398 | 423 | |
---|
399 | 424 | |
---|
400 | 425 | /* |
---|
.. | .. |
---|
498 | 523 | |
---|
499 | 524 | void emergency_thaw_bdev(struct super_block *sb) |
---|
500 | 525 | { |
---|
501 | | - while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb)) |
---|
| 526 | + while (sb->s_bdev && !thaw_bdev(sb->s_bdev)) |
---|
502 | 527 | printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev); |
---|
503 | 528 | } |
---|
504 | 529 | |
---|
.. | .. |
---|
564 | 589 | EXPORT_SYMBOL(mark_buffer_dirty_inode); |
---|
565 | 590 | |
---|
566 | 591 | /* |
---|
567 | | - * Mark the page dirty, and set it dirty in the radix tree, and mark the inode |
---|
| 592 | + * Mark the page dirty, and set it dirty in the page cache, and mark the inode |
---|
568 | 593 | * dirty. |
---|
569 | 594 | * |
---|
570 | 595 | * If warn is true, then emit a warning if the page is not uptodate and has |
---|
.. | .. |
---|
581 | 606 | if (page->mapping) { /* Race with truncate? */ |
---|
582 | 607 | WARN_ON_ONCE(warn && !PageUptodate(page)); |
---|
583 | 608 | account_page_dirtied(page, mapping); |
---|
584 | | - radix_tree_tag_set(&mapping->i_pages, |
---|
585 | | - page_index(page), PAGECACHE_TAG_DIRTY); |
---|
| 609 | + __xa_set_mark(&mapping->i_pages, page_index(page), |
---|
| 610 | + PAGECACHE_TAG_DIRTY); |
---|
586 | 611 | } |
---|
587 | 612 | xa_unlock_irqrestore(&mapping->i_pages, flags); |
---|
588 | 613 | } |
---|
.. | .. |
---|
649 | 674 | |
---|
650 | 675 | return newly_dirty; |
---|
651 | 676 | } |
---|
652 | | -EXPORT_SYMBOL(__set_page_dirty_buffers); |
---|
| 677 | +EXPORT_SYMBOL_NS(__set_page_dirty_buffers, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
653 | 678 | |
---|
654 | 679 | /* |
---|
655 | 680 | * Write out and wait upon a list of buffers. |
---|
.. | .. |
---|
817 | 842 | struct buffer_head *bh, *head; |
---|
818 | 843 | gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT; |
---|
819 | 844 | long offset; |
---|
820 | | - struct mem_cgroup *memcg; |
---|
| 845 | + struct mem_cgroup *memcg, *old_memcg; |
---|
821 | 846 | |
---|
822 | 847 | if (retry) |
---|
823 | 848 | gfp |= __GFP_NOFAIL; |
---|
824 | 849 | |
---|
825 | 850 | memcg = get_mem_cgroup_from_page(page); |
---|
826 | | - memalloc_use_memcg(memcg); |
---|
| 851 | + old_memcg = set_active_memcg(memcg); |
---|
827 | 852 | |
---|
828 | 853 | head = NULL; |
---|
829 | 854 | offset = PAGE_SIZE; |
---|
.. | .. |
---|
842 | 867 | set_bh_page(bh, page, offset); |
---|
843 | 868 | } |
---|
844 | 869 | out: |
---|
845 | | - memalloc_unuse_memcg(); |
---|
| 870 | + set_active_memcg(old_memcg); |
---|
846 | 871 | mem_cgroup_put(memcg); |
---|
847 | 872 | return head; |
---|
848 | 873 | /* |
---|
.. | .. |
---|
872 | 897 | bh = bh->b_this_page; |
---|
873 | 898 | } while (bh); |
---|
874 | 899 | tail->b_this_page = head; |
---|
875 | | - attach_page_buffers(page, head); |
---|
| 900 | + attach_page_private(page, head); |
---|
876 | 901 | } |
---|
877 | 902 | |
---|
878 | 903 | static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size) |
---|
.. | .. |
---|
933 | 958 | struct page *page; |
---|
934 | 959 | struct buffer_head *bh; |
---|
935 | 960 | sector_t end_block; |
---|
936 | | - int ret = 0; /* Will call free_more_memory() */ |
---|
| 961 | + int ret = 0; |
---|
937 | 962 | gfp_t gfp_mask; |
---|
938 | 963 | |
---|
939 | 964 | gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp; |
---|
.. | .. |
---|
1052 | 1077 | * The relationship between dirty buffers and dirty pages: |
---|
1053 | 1078 | * |
---|
1054 | 1079 | * Whenever a page has any dirty buffers, the page's dirty bit is set, and |
---|
1055 | | - * the page is tagged dirty in its radix tree. |
---|
| 1080 | + * the page is tagged dirty in the page cache. |
---|
1056 | 1081 | * |
---|
1057 | 1082 | * At all times, the dirtiness of the buffers represents the dirtiness of |
---|
1058 | 1083 | * subsections of the page. If the page has buffers, the page dirty bit is |
---|
.. | .. |
---|
1075 | 1100 | * mark_buffer_dirty - mark a buffer_head as needing writeout |
---|
1076 | 1101 | * @bh: the buffer_head to mark dirty |
---|
1077 | 1102 | * |
---|
1078 | | - * mark_buffer_dirty() will set the dirty bit against the buffer, then set its |
---|
1079 | | - * backing page dirty, then tag the page as dirty in its address_space's radix |
---|
1080 | | - * tree and then attach the address_space's inode to its superblock's dirty |
---|
| 1103 | + * mark_buffer_dirty() will set the dirty bit against the buffer, then set |
---|
| 1104 | + * its backing page dirty, then tag the page as dirty in the page cache |
---|
| 1105 | + * and then attach the address_space's inode to its superblock's dirty |
---|
1081 | 1106 | * inode list. |
---|
1082 | 1107 | * |
---|
1083 | 1108 | * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, |
---|
.. | .. |
---|
1116 | 1141 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); |
---|
1117 | 1142 | } |
---|
1118 | 1143 | } |
---|
1119 | | -EXPORT_SYMBOL(mark_buffer_dirty); |
---|
| 1144 | +EXPORT_SYMBOL_NS(mark_buffer_dirty, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
1120 | 1145 | |
---|
1121 | 1146 | void mark_buffer_write_io_error(struct buffer_head *bh) |
---|
1122 | 1147 | { |
---|
| 1148 | + struct super_block *sb; |
---|
| 1149 | + |
---|
1123 | 1150 | set_buffer_write_io_error(bh); |
---|
1124 | 1151 | /* FIXME: do we need to set this in both places? */ |
---|
1125 | 1152 | if (bh->b_page && bh->b_page->mapping) |
---|
1126 | 1153 | mapping_set_error(bh->b_page->mapping, -EIO); |
---|
1127 | 1154 | if (bh->b_assoc_map) |
---|
1128 | 1155 | mapping_set_error(bh->b_assoc_map, -EIO); |
---|
| 1156 | + rcu_read_lock(); |
---|
| 1157 | + sb = READ_ONCE(bh->b_bdev->bd_super); |
---|
| 1158 | + if (sb) |
---|
| 1159 | + errseq_set(&sb->s_wb_err, -EIO); |
---|
| 1160 | + rcu_read_unlock(); |
---|
1129 | 1161 | } |
---|
1130 | | -EXPORT_SYMBOL(mark_buffer_write_io_error); |
---|
| 1162 | +EXPORT_SYMBOL_NS(mark_buffer_write_io_error, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
1131 | 1163 | |
---|
1132 | 1164 | /* |
---|
1133 | 1165 | * Decrement a buffer_head's reference count. If all buffers against a page |
---|
.. | .. |
---|
1144 | 1176 | } |
---|
1145 | 1177 | WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n"); |
---|
1146 | 1178 | } |
---|
1147 | | -EXPORT_SYMBOL(__brelse); |
---|
| 1179 | +EXPORT_SYMBOL_NS(__brelse, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
1148 | 1180 | |
---|
1149 | 1181 | /* |
---|
1150 | 1182 | * bforget() is like brelse(), except it discards any |
---|
.. | .. |
---|
1163 | 1195 | } |
---|
1164 | 1196 | __brelse(bh); |
---|
1165 | 1197 | } |
---|
1166 | | -EXPORT_SYMBOL(__bforget); |
---|
| 1198 | +EXPORT_SYMBOL_NS(__bforget, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
1167 | 1199 | |
---|
1168 | 1200 | static struct buffer_head *__bread_slow(struct buffer_head *bh) |
---|
1169 | 1201 | { |
---|
.. | .. |
---|
1232 | 1264 | int i; |
---|
1233 | 1265 | |
---|
1234 | 1266 | check_irqs_on(); |
---|
| 1267 | + /* |
---|
| 1268 | + * the refcount of buffer_head in bh_lru prevents dropping the |
---|
| 1269 | + * attached page(i.e., try_to_free_buffers) so it could cause |
---|
| 1270 | + * failing page migration. |
---|
| 1271 | + * Skip putting upcoming bh into bh_lru until migration is done. |
---|
| 1272 | + */ |
---|
| 1273 | + if (lru_cache_disabled()) |
---|
| 1274 | + return; |
---|
| 1275 | + |
---|
1235 | 1276 | bh_lru_lock(); |
---|
1236 | 1277 | |
---|
1237 | 1278 | b = this_cpu_ptr(&bh_lrus); |
---|
.. | .. |
---|
1335 | 1376 | brelse(bh); |
---|
1336 | 1377 | } |
---|
1337 | 1378 | } |
---|
1338 | | -EXPORT_SYMBOL(__breadahead); |
---|
| 1379 | +EXPORT_SYMBOL_NS(__breadahead, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
1339 | 1380 | |
---|
1340 | 1381 | void __breadahead_gfp(struct block_device *bdev, sector_t block, unsigned size, |
---|
1341 | 1382 | gfp_t gfp) |
---|
.. | .. |
---|
1370 | 1411 | bh = __bread_slow(bh); |
---|
1371 | 1412 | return bh; |
---|
1372 | 1413 | } |
---|
1373 | | -EXPORT_SYMBOL(__bread_gfp); |
---|
| 1414 | +EXPORT_SYMBOL_NS(__bread_gfp, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
1374 | 1415 | |
---|
| 1416 | +static void __invalidate_bh_lrus(struct bh_lru *b) |
---|
| 1417 | +{ |
---|
| 1418 | + int i; |
---|
| 1419 | + |
---|
| 1420 | + for (i = 0; i < BH_LRU_SIZE; i++) { |
---|
| 1421 | + brelse(b->bhs[i]); |
---|
| 1422 | + b->bhs[i] = NULL; |
---|
| 1423 | + } |
---|
| 1424 | +} |
---|
1375 | 1425 | /* |
---|
1376 | 1426 | * invalidate_bh_lrus() is called rarely - but not only at unmount. |
---|
1377 | 1427 | * This doesn't race because it runs in each cpu either in irq |
---|
.. | .. |
---|
1380 | 1430 | static void invalidate_bh_lru(void *arg) |
---|
1381 | 1431 | { |
---|
1382 | 1432 | struct bh_lru *b = &get_cpu_var(bh_lrus); |
---|
1383 | | - int i; |
---|
1384 | 1433 | |
---|
1385 | | - for (i = 0; i < BH_LRU_SIZE; i++) { |
---|
1386 | | - brelse(b->bhs[i]); |
---|
1387 | | - b->bhs[i] = NULL; |
---|
1388 | | - } |
---|
| 1434 | + __invalidate_bh_lrus(b); |
---|
1389 | 1435 | put_cpu_var(bh_lrus); |
---|
1390 | 1436 | } |
---|
1391 | 1437 | |
---|
1392 | | -static bool has_bh_in_lru(int cpu, void *dummy) |
---|
| 1438 | +bool has_bh_in_lru(int cpu, void *dummy) |
---|
1393 | 1439 | { |
---|
1394 | 1440 | struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); |
---|
1395 | 1441 | int i; |
---|
1396 | 1442 | |
---|
1397 | 1443 | for (i = 0; i < BH_LRU_SIZE; i++) { |
---|
1398 | 1444 | if (b->bhs[i]) |
---|
1399 | | - return 1; |
---|
| 1445 | + return true; |
---|
1400 | 1446 | } |
---|
1401 | 1447 | |
---|
1402 | | - return 0; |
---|
| 1448 | + return false; |
---|
1403 | 1449 | } |
---|
1404 | 1450 | |
---|
1405 | 1451 | void invalidate_bh_lrus(void) |
---|
1406 | 1452 | { |
---|
1407 | | - on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL); |
---|
| 1453 | + on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1); |
---|
1408 | 1454 | } |
---|
1409 | 1455 | EXPORT_SYMBOL_GPL(invalidate_bh_lrus); |
---|
| 1456 | + |
---|
| 1457 | +/* |
---|
| 1458 | + * It's called from workqueue context so we need a bh_lru_lock to close |
---|
| 1459 | + * the race with preemption/irq. |
---|
| 1460 | + */ |
---|
| 1461 | +void invalidate_bh_lrus_cpu(void) |
---|
| 1462 | +{ |
---|
| 1463 | + struct bh_lru *b; |
---|
| 1464 | + |
---|
| 1465 | + bh_lru_lock(); |
---|
| 1466 | + b = this_cpu_ptr(&bh_lrus); |
---|
| 1467 | + __invalidate_bh_lrus(b); |
---|
| 1468 | + bh_lru_unlock(); |
---|
| 1469 | +} |
---|
1410 | 1470 | |
---|
1411 | 1471 | void set_bh_page(struct buffer_head *bh, |
---|
1412 | 1472 | struct page *page, unsigned long offset) |
---|
.. | .. |
---|
1513 | 1573 | out: |
---|
1514 | 1574 | return; |
---|
1515 | 1575 | } |
---|
1516 | | -EXPORT_SYMBOL(block_invalidatepage); |
---|
| 1576 | +EXPORT_SYMBOL_NS(block_invalidatepage, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
1517 | 1577 | |
---|
1518 | 1578 | |
---|
1519 | 1579 | /* |
---|
.. | .. |
---|
1546 | 1606 | bh = bh->b_this_page; |
---|
1547 | 1607 | } while (bh != head); |
---|
1548 | 1608 | } |
---|
1549 | | - attach_page_buffers(page, head); |
---|
| 1609 | + attach_page_private(page, head); |
---|
1550 | 1610 | spin_unlock(&page->mapping->private_lock); |
---|
1551 | 1611 | } |
---|
1552 | | -EXPORT_SYMBOL(create_empty_buffers); |
---|
| 1612 | +EXPORT_SYMBOL_NS(create_empty_buffers, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
1553 | 1613 | |
---|
1554 | 1614 | /** |
---|
1555 | 1615 | * clean_bdev_aliases: clean a range of buffers in block device |
---|
.. | .. |
---|
1623 | 1683 | break; |
---|
1624 | 1684 | } |
---|
1625 | 1685 | } |
---|
1626 | | -EXPORT_SYMBOL(clean_bdev_aliases); |
---|
| 1686 | +EXPORT_SYMBOL_NS(clean_bdev_aliases, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
1627 | 1687 | |
---|
1628 | 1688 | /* |
---|
1629 | 1689 | * Size is a power-of-two in the range 512..PAGE_SIZE, |
---|
.. | .. |
---|
1881 | 1941 | bh = bh->b_this_page; |
---|
1882 | 1942 | } while (bh != head); |
---|
1883 | 1943 | } |
---|
1884 | | -EXPORT_SYMBOL(page_zero_new_buffers); |
---|
| 1944 | +EXPORT_SYMBOL_NS(page_zero_new_buffers, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
1885 | 1945 | |
---|
1886 | 1946 | static void |
---|
1887 | 1947 | iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, |
---|
.. | .. |
---|
1926 | 1986 | */ |
---|
1927 | 1987 | set_buffer_new(bh); |
---|
1928 | 1988 | set_buffer_unwritten(bh); |
---|
1929 | | - /* FALLTHRU */ |
---|
| 1989 | + fallthrough; |
---|
1930 | 1990 | case IOMAP_MAPPED: |
---|
1931 | 1991 | if ((iomap->flags & IOMAP_F_NEW) || |
---|
1932 | 1992 | offset >= i_size_read(inode)) |
---|
.. | .. |
---|
2097 | 2157 | } |
---|
2098 | 2158 | EXPORT_SYMBOL(block_write_begin); |
---|
2099 | 2159 | |
---|
2100 | | -int __generic_write_end(struct inode *inode, loff_t pos, unsigned copied, |
---|
2101 | | - struct page *page) |
---|
2102 | | -{ |
---|
2103 | | - loff_t old_size = inode->i_size; |
---|
2104 | | - bool i_size_changed = false; |
---|
2105 | | - |
---|
2106 | | - /* |
---|
2107 | | - * No need to use i_size_read() here, the i_size cannot change under us |
---|
2108 | | - * because we hold i_rwsem. |
---|
2109 | | - * |
---|
2110 | | - * But it's important to update i_size while still holding page lock: |
---|
2111 | | - * page writeout could otherwise come in and zero beyond i_size. |
---|
2112 | | - */ |
---|
2113 | | - if (pos + copied > inode->i_size) { |
---|
2114 | | - i_size_write(inode, pos + copied); |
---|
2115 | | - i_size_changed = true; |
---|
2116 | | - } |
---|
2117 | | - |
---|
2118 | | - unlock_page(page); |
---|
2119 | | - put_page(page); |
---|
2120 | | - |
---|
2121 | | - if (old_size < pos) |
---|
2122 | | - pagecache_isize_extended(inode, old_size, pos); |
---|
2123 | | - /* |
---|
2124 | | - * Don't mark the inode dirty under page lock. First, it unnecessarily |
---|
2125 | | - * makes the holding time of page lock longer. Second, it forces lock |
---|
2126 | | - * ordering of page lock and transaction start for journaling |
---|
2127 | | - * filesystems. |
---|
2128 | | - */ |
---|
2129 | | - if (i_size_changed) |
---|
2130 | | - mark_inode_dirty(inode); |
---|
2131 | | - return copied; |
---|
2132 | | -} |
---|
2133 | | - |
---|
2134 | 2160 | int block_write_end(struct file *file, struct address_space *mapping, |
---|
2135 | 2161 | loff_t pos, unsigned len, unsigned copied, |
---|
2136 | 2162 | struct page *page, void *fsdata) |
---|
.. | .. |
---|
2171 | 2197 | loff_t pos, unsigned len, unsigned copied, |
---|
2172 | 2198 | struct page *page, void *fsdata) |
---|
2173 | 2199 | { |
---|
| 2200 | + struct inode *inode = mapping->host; |
---|
| 2201 | + loff_t old_size = inode->i_size; |
---|
| 2202 | + bool i_size_changed = false; |
---|
| 2203 | + |
---|
2174 | 2204 | copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); |
---|
2175 | | - return __generic_write_end(mapping->host, pos, copied, page); |
---|
| 2205 | + |
---|
| 2206 | + /* |
---|
| 2207 | + * No need to use i_size_read() here, the i_size cannot change under us |
---|
| 2208 | + * because we hold i_rwsem. |
---|
| 2209 | + * |
---|
| 2210 | + * But it's important to update i_size while still holding page lock: |
---|
| 2211 | + * page writeout could otherwise come in and zero beyond i_size. |
---|
| 2212 | + */ |
---|
| 2213 | + if (pos + copied > inode->i_size) { |
---|
| 2214 | + i_size_write(inode, pos + copied); |
---|
| 2215 | + i_size_changed = true; |
---|
| 2216 | + } |
---|
| 2217 | + |
---|
| 2218 | + unlock_page(page); |
---|
| 2219 | + put_page(page); |
---|
| 2220 | + |
---|
| 2221 | + if (old_size < pos) |
---|
| 2222 | + pagecache_isize_extended(inode, old_size, pos); |
---|
| 2223 | + /* |
---|
| 2224 | + * Don't mark the inode dirty under page lock. First, it unnecessarily |
---|
| 2225 | + * makes the holding time of page lock longer. Second, it forces lock |
---|
| 2226 | + * ordering of page lock and transaction start for journaling |
---|
| 2227 | + * filesystems. |
---|
| 2228 | + */ |
---|
| 2229 | + if (i_size_changed) |
---|
| 2230 | + mark_inode_dirty(inode); |
---|
| 2231 | + return copied; |
---|
2176 | 2232 | } |
---|
2177 | 2233 | EXPORT_SYMBOL(generic_write_end); |
---|
2178 | 2234 | |
---|
.. | .. |
---|
2219 | 2275 | |
---|
2220 | 2276 | return ret; |
---|
2221 | 2277 | } |
---|
2222 | | -EXPORT_SYMBOL(block_is_partially_uptodate); |
---|
| 2278 | +EXPORT_SYMBOL_NS(block_is_partially_uptodate, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
2223 | 2279 | |
---|
2224 | 2280 | /* |
---|
2225 | 2281 | * Generic "read page" function for block devices that have the normal |
---|
.. | .. |
---|
2322 | 2378 | { |
---|
2323 | 2379 | struct address_space *mapping = inode->i_mapping; |
---|
2324 | 2380 | struct page *page; |
---|
2325 | | - void *fsdata; |
---|
| 2381 | + void *fsdata = NULL; |
---|
2326 | 2382 | int err; |
---|
2327 | 2383 | |
---|
2328 | 2384 | err = inode_newsize_ok(inode, size); |
---|
.. | .. |
---|
2348 | 2404 | struct inode *inode = mapping->host; |
---|
2349 | 2405 | unsigned int blocksize = i_blocksize(inode); |
---|
2350 | 2406 | struct page *page; |
---|
2351 | | - void *fsdata; |
---|
| 2407 | + void *fsdata = NULL; |
---|
2352 | 2408 | pgoff_t index, curidx; |
---|
2353 | 2409 | loff_t curpos; |
---|
2354 | 2410 | unsigned zerofrom, offset, len; |
---|
.. | .. |
---|
2379 | 2435 | |
---|
2380 | 2436 | balance_dirty_pages_ratelimited(mapping); |
---|
2381 | 2437 | |
---|
2382 | | - if (unlikely(fatal_signal_pending(current))) { |
---|
| 2438 | + if (fatal_signal_pending(current)) { |
---|
2383 | 2439 | err = -EINTR; |
---|
2384 | 2440 | goto out; |
---|
2385 | 2441 | } |
---|
.. | .. |
---|
2537 | 2593 | bh->b_this_page = head; |
---|
2538 | 2594 | bh = bh->b_this_page; |
---|
2539 | 2595 | } while (bh != head); |
---|
2540 | | - attach_page_buffers(page, head); |
---|
| 2596 | + attach_page_private(page, head); |
---|
2541 | 2597 | spin_unlock(&page->mapping->private_lock); |
---|
2542 | 2598 | } |
---|
2543 | 2599 | |
---|
.. | .. |
---|
2978 | 3034 | bio_put(bio); |
---|
2979 | 3035 | } |
---|
2980 | 3036 | |
---|
2981 | | -/* |
---|
2982 | | - * This allows us to do IO even on the odd last sectors |
---|
2983 | | - * of a device, even if the block size is some multiple |
---|
2984 | | - * of the physical sector size. |
---|
2985 | | - * |
---|
2986 | | - * We'll just truncate the bio to the size of the device, |
---|
2987 | | - * and clear the end of the buffer head manually. |
---|
2988 | | - * |
---|
2989 | | - * Truly out-of-range accesses will turn into actual IO |
---|
2990 | | - * errors, this only handles the "we need to be able to |
---|
2991 | | - * do IO at the final sector" case. |
---|
2992 | | - */ |
---|
2993 | | -void guard_bio_eod(int op, struct bio *bio) |
---|
2994 | | -{ |
---|
2995 | | - sector_t maxsector; |
---|
2996 | | - struct bio_vec *bvec = bio_last_bvec_all(bio); |
---|
2997 | | - unsigned truncated_bytes; |
---|
2998 | | - struct hd_struct *part; |
---|
2999 | | - |
---|
3000 | | - rcu_read_lock(); |
---|
3001 | | - part = __disk_get_part(bio->bi_disk, bio->bi_partno); |
---|
3002 | | - if (part) |
---|
3003 | | - maxsector = part_nr_sects_read(part); |
---|
3004 | | - else |
---|
3005 | | - maxsector = get_capacity(bio->bi_disk); |
---|
3006 | | - rcu_read_unlock(); |
---|
3007 | | - |
---|
3008 | | - if (!maxsector) |
---|
3009 | | - return; |
---|
3010 | | - |
---|
3011 | | - /* |
---|
3012 | | - * If the *whole* IO is past the end of the device, |
---|
3013 | | - * let it through, and the IO layer will turn it into |
---|
3014 | | - * an EIO. |
---|
3015 | | - */ |
---|
3016 | | - if (unlikely(bio->bi_iter.bi_sector >= maxsector)) |
---|
3017 | | - return; |
---|
3018 | | - |
---|
3019 | | - maxsector -= bio->bi_iter.bi_sector; |
---|
3020 | | - if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) |
---|
3021 | | - return; |
---|
3022 | | - |
---|
3023 | | - /* Uhhuh. We've got a bio that straddles the device size! */ |
---|
3024 | | - truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9); |
---|
3025 | | - |
---|
3026 | | - /* |
---|
3027 | | - * The bio contains more than one segment which spans EOD, just return |
---|
3028 | | - * and let IO layer turn it into an EIO |
---|
3029 | | - */ |
---|
3030 | | - if (truncated_bytes > bvec->bv_len) |
---|
3031 | | - return; |
---|
3032 | | - |
---|
3033 | | - /* Truncate the bio.. */ |
---|
3034 | | - bio->bi_iter.bi_size -= truncated_bytes; |
---|
3035 | | - bvec->bv_len -= truncated_bytes; |
---|
3036 | | - |
---|
3037 | | - /* ..and clear the end of the buffer for reads */ |
---|
3038 | | - if (op == REQ_OP_READ) { |
---|
3039 | | - zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len, |
---|
3040 | | - truncated_bytes); |
---|
3041 | | - } |
---|
3042 | | -} |
---|
3043 | | - |
---|
3044 | 3037 | static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, |
---|
3045 | 3038 | enum rw_hint write_hint, struct writeback_control *wbc) |
---|
3046 | 3039 | { |
---|
.. | .. |
---|
3058 | 3051 | if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) |
---|
3059 | 3052 | clear_buffer_write_io_error(bh); |
---|
3060 | 3053 | |
---|
3061 | | - /* |
---|
3062 | | - * from here on down, it's all bio -- do the initial mapping, |
---|
3063 | | - * submit_bio -> generic_make_request may further map this bio around |
---|
3064 | | - */ |
---|
3065 | 3054 | bio = bio_alloc(GFP_NOIO, 1); |
---|
3066 | 3055 | |
---|
3067 | 3056 | fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); |
---|
3068 | | - |
---|
3069 | | - if (wbc) { |
---|
3070 | | - wbc_init_bio(wbc, bio); |
---|
3071 | | - wbc_account_io(wbc, bh->b_page, bh->b_size); |
---|
3072 | | - } |
---|
3073 | 3057 | |
---|
3074 | 3058 | bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); |
---|
3075 | 3059 | bio_set_dev(bio, bh->b_bdev); |
---|
.. | .. |
---|
3081 | 3065 | bio->bi_end_io = end_bio_bh_io_sync; |
---|
3082 | 3066 | bio->bi_private = bh; |
---|
3083 | 3067 | |
---|
3084 | | - /* Take care of bh's that straddle the end of the device */ |
---|
3085 | | - guard_bio_eod(op, bio); |
---|
3086 | | - |
---|
3087 | 3068 | if (buffer_meta(bh)) |
---|
3088 | 3069 | op_flags |= REQ_META; |
---|
3089 | 3070 | if (buffer_prio(bh)) |
---|
3090 | 3071 | op_flags |= REQ_PRIO; |
---|
3091 | 3072 | bio_set_op_attrs(bio, op, op_flags); |
---|
| 3073 | + |
---|
| 3074 | + /* Take care of bh's that straddle the end of the device */ |
---|
| 3075 | + guard_bio_eod(bio); |
---|
| 3076 | + |
---|
| 3077 | + if (wbc) { |
---|
| 3078 | + wbc_init_bio(wbc, bio); |
---|
| 3079 | + wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size); |
---|
| 3080 | + } |
---|
3092 | 3081 | |
---|
3093 | 3082 | submit_bio(bio); |
---|
3094 | 3083 | return 0; |
---|
.. | .. |
---|
3153 | 3142 | unlock_buffer(bh); |
---|
3154 | 3143 | } |
---|
3155 | 3144 | } |
---|
3156 | | -EXPORT_SYMBOL(ll_rw_block); |
---|
| 3145 | +EXPORT_SYMBOL_NS(ll_rw_block, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
3157 | 3146 | |
---|
3158 | 3147 | void write_dirty_buffer(struct buffer_head *bh, int op_flags) |
---|
3159 | 3148 | { |
---|
.. | .. |
---|
3206 | 3195 | { |
---|
3207 | 3196 | return __sync_dirty_buffer(bh, REQ_SYNC); |
---|
3208 | 3197 | } |
---|
3209 | | -EXPORT_SYMBOL(sync_dirty_buffer); |
---|
| 3198 | +EXPORT_SYMBOL_NS(sync_dirty_buffer, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
3210 | 3199 | |
---|
3211 | 3200 | /* |
---|
3212 | 3201 | * try_to_free_buffers() checks if all the buffers on this particular page |
---|
.. | .. |
---|
3255 | 3244 | bh = next; |
---|
3256 | 3245 | } while (bh != head); |
---|
3257 | 3246 | *buffers_to_free = head; |
---|
3258 | | - __clear_page_buffers(page); |
---|
| 3247 | + detach_page_private(page); |
---|
3259 | 3248 | return 1; |
---|
3260 | 3249 | failed: |
---|
3261 | 3250 | return 0; |
---|
.. | .. |
---|
3375 | 3364 | struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); |
---|
3376 | 3365 | if (ret) { |
---|
3377 | 3366 | INIT_LIST_HEAD(&ret->b_assoc_buffers); |
---|
| 3367 | + spin_lock_init(&ret->b_uptodate_lock); |
---|
3378 | 3368 | preempt_disable(); |
---|
3379 | 3369 | __this_cpu_inc(bh_accounting.nr); |
---|
3380 | 3370 | recalc_bh_state(); |
---|