.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * fs/dcache.c |
---|
3 | 4 | * |
---|
.. | .. |
---|
27 | 28 | #include <linux/export.h> |
---|
28 | 29 | #include <linux/security.h> |
---|
29 | 30 | #include <linux/seqlock.h> |
---|
30 | | -#include <linux/bootmem.h> |
---|
| 31 | +#include <linux/memblock.h> |
---|
31 | 32 | #include <linux/bit_spinlock.h> |
---|
32 | 33 | #include <linux/rculist_bl.h> |
---|
33 | 34 | #include <linux/list_lru.h> |
---|
.. | .. |
---|
120 | 121 | |
---|
121 | 122 | static DEFINE_PER_CPU(long, nr_dentry); |
---|
122 | 123 | static DEFINE_PER_CPU(long, nr_dentry_unused); |
---|
| 124 | +static DEFINE_PER_CPU(long, nr_dentry_negative); |
---|
123 | 125 | |
---|
124 | 126 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) |
---|
125 | 127 | |
---|
.. | .. |
---|
153 | 155 | return sum < 0 ? 0 : sum; |
---|
154 | 156 | } |
---|
155 | 157 | |
---|
156 | | -int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer, |
---|
| 158 | +static long get_nr_dentry_negative(void) |
---|
| 159 | +{ |
---|
| 160 | + int i; |
---|
| 161 | + long sum = 0; |
---|
| 162 | + |
---|
| 163 | + for_each_possible_cpu(i) |
---|
| 164 | + sum += per_cpu(nr_dentry_negative, i); |
---|
| 165 | + return sum < 0 ? 0 : sum; |
---|
| 166 | +} |
---|
| 167 | + |
---|
| 168 | +int proc_nr_dentry(struct ctl_table *table, int write, void *buffer, |
---|
157 | 169 | size_t *lenp, loff_t *ppos) |
---|
158 | 170 | { |
---|
159 | 171 | dentry_stat.nr_dentry = get_nr_dentry(); |
---|
160 | 172 | dentry_stat.nr_unused = get_nr_dentry_unused(); |
---|
| 173 | + dentry_stat.nr_negative = get_nr_dentry_negative(); |
---|
161 | 174 | return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
---|
162 | 175 | } |
---|
163 | 176 | #endif |
---|
.. | .. |
---|
273 | 286 | void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry) |
---|
274 | 287 | { |
---|
275 | 288 | spin_lock(&dentry->d_lock); |
---|
| 289 | + name->name = dentry->d_name; |
---|
276 | 290 | if (unlikely(dname_external(dentry))) { |
---|
277 | | - struct external_name *p = external_name(dentry); |
---|
278 | | - atomic_inc(&p->u.count); |
---|
279 | | - spin_unlock(&dentry->d_lock); |
---|
280 | | - name->name = p->name; |
---|
| 291 | + atomic_inc(&external_name(dentry)->u.count); |
---|
281 | 292 | } else { |
---|
282 | 293 | memcpy(name->inline_name, dentry->d_iname, |
---|
283 | 294 | dentry->d_name.len + 1); |
---|
284 | | - spin_unlock(&dentry->d_lock); |
---|
285 | | - name->name = name->inline_name; |
---|
| 295 | + name->name.name = name->inline_name; |
---|
286 | 296 | } |
---|
| 297 | + spin_unlock(&dentry->d_lock); |
---|
287 | 298 | } |
---|
288 | 299 | EXPORT_SYMBOL(take_dentry_name_snapshot); |
---|
289 | 300 | |
---|
290 | 301 | void release_dentry_name_snapshot(struct name_snapshot *name) |
---|
291 | 302 | { |
---|
292 | | - if (unlikely(name->name != name->inline_name)) { |
---|
| 303 | + if (unlikely(name->name.name != name->inline_name)) { |
---|
293 | 304 | struct external_name *p; |
---|
294 | | - p = container_of(name->name, struct external_name, name[0]); |
---|
| 305 | + p = container_of(name->name.name, struct external_name, name[0]); |
---|
295 | 306 | if (unlikely(atomic_dec_and_test(&p->u.count))) |
---|
296 | 307 | kfree_rcu(p, u.head); |
---|
297 | 308 | } |
---|
.. | .. |
---|
308 | 319 | flags = READ_ONCE(dentry->d_flags); |
---|
309 | 320 | flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); |
---|
310 | 321 | flags |= type_flags; |
---|
311 | | - WRITE_ONCE(dentry->d_flags, flags); |
---|
| 322 | + smp_store_release(&dentry->d_flags, flags); |
---|
312 | 323 | } |
---|
313 | 324 | |
---|
314 | 325 | static inline void __d_clear_type_and_inode(struct dentry *dentry) |
---|
.. | .. |
---|
318 | 329 | flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); |
---|
319 | 330 | WRITE_ONCE(dentry->d_flags, flags); |
---|
320 | 331 | dentry->d_inode = NULL; |
---|
| 332 | + if (dentry->d_flags & DCACHE_LRU_LIST) |
---|
| 333 | + this_cpu_inc(nr_dentry_negative); |
---|
321 | 334 | } |
---|
322 | 335 | |
---|
323 | 336 | static void dentry_free(struct dentry *dentry) |
---|
.. | .. |
---|
372 | 385 | * The per-cpu "nr_dentry_unused" counters are updated with |
---|
373 | 386 | * the DCACHE_LRU_LIST bit. |
---|
374 | 387 | * |
---|
| 388 | + * The per-cpu "nr_dentry_negative" counters are only updated |
---|
| 389 | + * when deleted from or added to the per-superblock LRU list, not |
---|
| 390 | + * from/to the shrink list. That is to avoid an unneeded dec/inc |
---|
| 391 | + * pair when moving from LRU to shrink list in select_collect(). |
---|
| 392 | + * |
---|
375 | 393 | * These helper functions make sure we always follow the |
---|
376 | 394 | * rules. d_lock must be held by the caller. |
---|
377 | 395 | */ |
---|
.. | .. |
---|
381 | 399 | D_FLAG_VERIFY(dentry, 0); |
---|
382 | 400 | dentry->d_flags |= DCACHE_LRU_LIST; |
---|
383 | 401 | this_cpu_inc(nr_dentry_unused); |
---|
| 402 | + if (d_is_negative(dentry)) |
---|
| 403 | + this_cpu_inc(nr_dentry_negative); |
---|
384 | 404 | WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); |
---|
385 | 405 | } |
---|
386 | 406 | |
---|
.. | .. |
---|
389 | 409 | D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); |
---|
390 | 410 | dentry->d_flags &= ~DCACHE_LRU_LIST; |
---|
391 | 411 | this_cpu_dec(nr_dentry_unused); |
---|
| 412 | + if (d_is_negative(dentry)) |
---|
| 413 | + this_cpu_dec(nr_dentry_negative); |
---|
392 | 414 | WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); |
---|
393 | 415 | } |
---|
394 | 416 | |
---|
.. | .. |
---|
419 | 441 | D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); |
---|
420 | 442 | dentry->d_flags &= ~DCACHE_LRU_LIST; |
---|
421 | 443 | this_cpu_dec(nr_dentry_unused); |
---|
| 444 | + if (d_is_negative(dentry)) |
---|
| 445 | + this_cpu_dec(nr_dentry_negative); |
---|
422 | 446 | list_lru_isolate(lru, &dentry->d_lru); |
---|
423 | 447 | } |
---|
424 | 448 | |
---|
.. | .. |
---|
427 | 451 | { |
---|
428 | 452 | D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); |
---|
429 | 453 | dentry->d_flags |= DCACHE_SHRINK_LIST; |
---|
| 454 | + if (d_is_negative(dentry)) |
---|
| 455 | + this_cpu_dec(nr_dentry_negative); |
---|
430 | 456 | list_lru_isolate_move(lru, &dentry->d_lru, list); |
---|
431 | 457 | } |
---|
432 | 458 | |
---|
.. | .. |
---|
621 | 647 | if (dentry->d_op->d_delete(dentry)) |
---|
622 | 648 | return false; |
---|
623 | 649 | } |
---|
| 650 | + |
---|
| 651 | + if (unlikely(dentry->d_flags & DCACHE_DONTCACHE)) |
---|
| 652 | + return false; |
---|
| 653 | + |
---|
624 | 654 | /* retain; LRU fodder */ |
---|
625 | 655 | dentry->d_lockref.count--; |
---|
626 | 656 | if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) |
---|
.. | .. |
---|
629 | 659 | dentry->d_flags |= DCACHE_REFERENCED; |
---|
630 | 660 | return true; |
---|
631 | 661 | } |
---|
| 662 | + |
---|
| 663 | +void d_mark_dontcache(struct inode *inode) |
---|
| 664 | +{ |
---|
| 665 | + struct dentry *de; |
---|
| 666 | + |
---|
| 667 | + spin_lock(&inode->i_lock); |
---|
| 668 | + hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) { |
---|
| 669 | + spin_lock(&de->d_lock); |
---|
| 670 | + de->d_flags |= DCACHE_DONTCACHE; |
---|
| 671 | + spin_unlock(&de->d_lock); |
---|
| 672 | + } |
---|
| 673 | + inode->i_state |= I_DONTCACHE; |
---|
| 674 | + spin_unlock(&inode->i_lock); |
---|
| 675 | +} |
---|
| 676 | +EXPORT_SYMBOL(d_mark_dontcache); |
---|
632 | 677 | |
---|
633 | 678 | /* |
---|
634 | 679 | * Finish off a dentry we've decided to kill. |
---|
.. | .. |
---|
835 | 880 | } |
---|
836 | 881 | EXPORT_SYMBOL(dput); |
---|
837 | 882 | |
---|
| 883 | +static void __dput_to_list(struct dentry *dentry, struct list_head *list) |
---|
| 884 | +__must_hold(&dentry->d_lock) |
---|
| 885 | +{ |
---|
| 886 | + if (dentry->d_flags & DCACHE_SHRINK_LIST) { |
---|
| 887 | + /* let the owner of the list it's on deal with it */ |
---|
| 888 | + --dentry->d_lockref.count; |
---|
| 889 | + } else { |
---|
| 890 | + if (dentry->d_flags & DCACHE_LRU_LIST) |
---|
| 891 | + d_lru_del(dentry); |
---|
| 892 | + if (!--dentry->d_lockref.count) |
---|
| 893 | + d_shrink_add(dentry, list); |
---|
| 894 | + } |
---|
| 895 | +} |
---|
| 896 | + |
---|
| 897 | +void dput_to_list(struct dentry *dentry, struct list_head *list) |
---|
| 898 | +{ |
---|
| 899 | + rcu_read_lock(); |
---|
| 900 | + if (likely(fast_dput(dentry))) { |
---|
| 901 | + rcu_read_unlock(); |
---|
| 902 | + return; |
---|
| 903 | + } |
---|
| 904 | + rcu_read_unlock(); |
---|
| 905 | + if (!retain_dentry(dentry)) |
---|
| 906 | + __dput_to_list(dentry, list); |
---|
| 907 | + spin_unlock(&dentry->d_lock); |
---|
| 908 | +} |
---|
838 | 909 | |
---|
839 | 910 | /* This must be called with d_lock held */ |
---|
840 | 911 | static inline void __dget_dlock(struct dentry *dentry) |
---|
.. | .. |
---|
1043 | 1114 | return false; |
---|
1044 | 1115 | } |
---|
1045 | 1116 | |
---|
1046 | | -static void shrink_dentry_list(struct list_head *list) |
---|
| 1117 | +void shrink_dentry_list(struct list_head *list) |
---|
1047 | 1118 | { |
---|
1048 | 1119 | while (!list_empty(list)) { |
---|
1049 | 1120 | struct dentry *dentry, *parent; |
---|
.. | .. |
---|
1065 | 1136 | rcu_read_unlock(); |
---|
1066 | 1137 | d_shrink_del(dentry); |
---|
1067 | 1138 | parent = dentry->d_parent; |
---|
| 1139 | + if (parent != dentry) |
---|
| 1140 | + __dput_to_list(parent, list); |
---|
1068 | 1141 | __dentry_kill(dentry); |
---|
1069 | | - if (parent == dentry) |
---|
1070 | | - continue; |
---|
1071 | | - /* |
---|
1072 | | - * We need to prune ancestors too. This is necessary to prevent |
---|
1073 | | - * quadratic behavior of shrink_dcache_parent(), but is also |
---|
1074 | | - * expected to be beneficial in reducing dentry cache |
---|
1075 | | - * fragmentation. |
---|
1076 | | - */ |
---|
1077 | | - dentry = parent; |
---|
1078 | | - while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) |
---|
1079 | | - dentry = dentry_kill(dentry); |
---|
1080 | 1142 | } |
---|
1081 | 1143 | } |
---|
1082 | 1144 | |
---|
.. | .. |
---|
1278 | 1340 | |
---|
1279 | 1341 | if (!list_empty(&dentry->d_subdirs)) { |
---|
1280 | 1342 | spin_unlock(&this_parent->d_lock); |
---|
1281 | | - spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_); |
---|
| 1343 | + spin_release(&dentry->d_lock.dep_map, _RET_IP_); |
---|
1282 | 1344 | this_parent = dentry; |
---|
1283 | 1345 | spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); |
---|
1284 | 1346 | goto repeat; |
---|
.. | .. |
---|
1421 | 1483 | |
---|
1422 | 1484 | struct select_data { |
---|
1423 | 1485 | struct dentry *start; |
---|
| 1486 | + union { |
---|
| 1487 | + long found; |
---|
| 1488 | + struct dentry *victim; |
---|
| 1489 | + }; |
---|
1424 | 1490 | struct list_head dispose; |
---|
1425 | | - int found; |
---|
1426 | 1491 | }; |
---|
1427 | 1492 | |
---|
1428 | 1493 | static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) |
---|
.. | .. |
---|
1454 | 1519 | return ret; |
---|
1455 | 1520 | } |
---|
1456 | 1521 | |
---|
| 1522 | +static enum d_walk_ret select_collect2(void *_data, struct dentry *dentry) |
---|
| 1523 | +{ |
---|
| 1524 | + struct select_data *data = _data; |
---|
| 1525 | + enum d_walk_ret ret = D_WALK_CONTINUE; |
---|
| 1526 | + |
---|
| 1527 | + if (data->start == dentry) |
---|
| 1528 | + goto out; |
---|
| 1529 | + |
---|
| 1530 | + if (dentry->d_flags & DCACHE_SHRINK_LIST) { |
---|
| 1531 | + if (!dentry->d_lockref.count) { |
---|
| 1532 | + rcu_read_lock(); |
---|
| 1533 | + data->victim = dentry; |
---|
| 1534 | + return D_WALK_QUIT; |
---|
| 1535 | + } |
---|
| 1536 | + } else { |
---|
| 1537 | + if (dentry->d_flags & DCACHE_LRU_LIST) |
---|
| 1538 | + d_lru_del(dentry); |
---|
| 1539 | + if (!dentry->d_lockref.count) |
---|
| 1540 | + d_shrink_add(dentry, &data->dispose); |
---|
| 1541 | + } |
---|
| 1542 | + /* |
---|
| 1543 | + * We can return to the caller if we have found some (this |
---|
| 1544 | + * ensures forward progress). We'll be coming back to find |
---|
| 1545 | + * the rest. |
---|
| 1546 | + */ |
---|
| 1547 | + if (!list_empty(&data->dispose)) |
---|
| 1548 | + ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY; |
---|
| 1549 | +out: |
---|
| 1550 | + return ret; |
---|
| 1551 | +} |
---|
| 1552 | + |
---|
1457 | 1553 | /** |
---|
1458 | 1554 | * shrink_dcache_parent - prune dcache |
---|
1459 | 1555 | * @parent: parent of entries to prune |
---|
.. | .. |
---|
1463 | 1559 | void shrink_dcache_parent(struct dentry *parent) |
---|
1464 | 1560 | { |
---|
1465 | 1561 | for (;;) { |
---|
1466 | | - struct select_data data; |
---|
| 1562 | + struct select_data data = {.start = parent}; |
---|
1467 | 1563 | |
---|
1468 | 1564 | INIT_LIST_HEAD(&data.dispose); |
---|
1469 | | - data.start = parent; |
---|
1470 | | - data.found = 0; |
---|
1471 | | - |
---|
1472 | 1565 | d_walk(parent, &data, select_collect); |
---|
1473 | 1566 | |
---|
1474 | 1567 | if (!list_empty(&data.dispose)) { |
---|
.. | .. |
---|
1479 | 1572 | cond_resched(); |
---|
1480 | 1573 | if (!data.found) |
---|
1481 | 1574 | break; |
---|
| 1575 | + data.victim = NULL; |
---|
| 1576 | + d_walk(parent, &data, select_collect2); |
---|
| 1577 | + if (data.victim) { |
---|
| 1578 | + struct dentry *parent; |
---|
| 1579 | + spin_lock(&data.victim->d_lock); |
---|
| 1580 | + if (!shrink_lock_dentry(data.victim)) { |
---|
| 1581 | + spin_unlock(&data.victim->d_lock); |
---|
| 1582 | + rcu_read_unlock(); |
---|
| 1583 | + } else { |
---|
| 1584 | + rcu_read_unlock(); |
---|
| 1585 | + parent = data.victim->d_parent; |
---|
| 1586 | + if (parent != data.victim) |
---|
| 1587 | + __dput_to_list(parent, &data.dispose); |
---|
| 1588 | + __dentry_kill(data.victim); |
---|
| 1589 | + } |
---|
| 1590 | + } |
---|
| 1591 | + if (!list_empty(&data.dispose)) |
---|
| 1592 | + shrink_dentry_list(&data.dispose); |
---|
1482 | 1593 | } |
---|
1483 | 1594 | } |
---|
1484 | 1595 | EXPORT_SYMBOL(shrink_dcache_parent); |
---|
.. | .. |
---|
1589 | 1700 | * copied and the copy passed in may be reused after this call. |
---|
1590 | 1701 | */ |
---|
1591 | 1702 | |
---|
1592 | | -struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) |
---|
| 1703 | +static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) |
---|
1593 | 1704 | { |
---|
1594 | 1705 | struct dentry *dentry; |
---|
1595 | 1706 | char *dname; |
---|
.. | .. |
---|
1635 | 1746 | dentry->d_lockref.count = 1; |
---|
1636 | 1747 | dentry->d_flags = 0; |
---|
1637 | 1748 | spin_lock_init(&dentry->d_lock); |
---|
1638 | | - seqcount_init(&dentry->d_seq); |
---|
| 1749 | + seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock); |
---|
1639 | 1750 | dentry->d_inode = NULL; |
---|
1640 | 1751 | dentry->d_parent = dentry; |
---|
1641 | 1752 | dentry->d_sb = sb; |
---|
.. | .. |
---|
1718 | 1829 | * never be anyone's children or parents. Unlike all other |
---|
1719 | 1830 | * dentries, these will not have RCU delay between dropping the |
---|
1720 | 1831 | * last reference and freeing them. |
---|
| 1832 | + * |
---|
| 1833 | + * The only user is alloc_file_pseudo() and that's what should |
---|
| 1834 | + * be considered a public interface. Don't use directly. |
---|
1721 | 1835 | */ |
---|
1722 | 1836 | struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name) |
---|
1723 | 1837 | { |
---|
.. | .. |
---|
1726 | 1840 | dentry->d_flags |= DCACHE_NORCU; |
---|
1727 | 1841 | return dentry; |
---|
1728 | 1842 | } |
---|
1729 | | -EXPORT_SYMBOL(d_alloc_pseudo); |
---|
1730 | 1843 | |
---|
1731 | 1844 | struct dentry *d_alloc_name(struct dentry *parent, const char *name) |
---|
1732 | 1845 | { |
---|
.. | .. |
---|
1825 | 1938 | WARN_ON(d_in_lookup(dentry)); |
---|
1826 | 1939 | |
---|
1827 | 1940 | spin_lock(&dentry->d_lock); |
---|
| 1941 | + /* |
---|
| 1942 | + * Decrement negative dentry count if it was in the LRU list. |
---|
| 1943 | + */ |
---|
| 1944 | + if (dentry->d_flags & DCACHE_LRU_LIST) |
---|
| 1945 | + this_cpu_dec(nr_dentry_negative); |
---|
1828 | 1946 | hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); |
---|
1829 | 1947 | raw_write_seqcount_begin(&dentry->d_seq); |
---|
1830 | 1948 | __d_set_inode_and_type(dentry, inode, add_flags); |
---|
.. | .. |
---|
1992 | 2110 | { |
---|
1993 | 2111 | return __d_obtain_alias(inode, true); |
---|
1994 | 2112 | } |
---|
1995 | | -EXPORT_SYMBOL(d_obtain_alias); |
---|
| 2113 | +EXPORT_SYMBOL_NS(d_obtain_alias, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
1996 | 2114 | |
---|
1997 | 2115 | /** |
---|
1998 | 2116 | * d_obtain_root - find or allocate a dentry for a given inode |
---|
.. | .. |
---|
2066 | 2184 | } |
---|
2067 | 2185 | return found; |
---|
2068 | 2186 | } |
---|
2069 | | -EXPORT_SYMBOL(d_add_ci); |
---|
| 2187 | +EXPORT_SYMBOL_NS(d_add_ci, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
2070 | 2188 | |
---|
2071 | 2189 | |
---|
2072 | 2190 | static inline bool d_same_name(const struct dentry *dentry, |
---|
.. | .. |
---|
2341 | 2459 | void d_delete(struct dentry * dentry) |
---|
2342 | 2460 | { |
---|
2343 | 2461 | struct inode *inode = dentry->d_inode; |
---|
2344 | | - int isdir = d_is_dir(dentry); |
---|
2345 | 2462 | |
---|
2346 | 2463 | spin_lock(&inode->i_lock); |
---|
2347 | 2464 | spin_lock(&dentry->d_lock); |
---|
.. | .. |
---|
2356 | 2473 | spin_unlock(&dentry->d_lock); |
---|
2357 | 2474 | spin_unlock(&inode->i_lock); |
---|
2358 | 2475 | } |
---|
2359 | | - fsnotify_nameremove(dentry, isdir); |
---|
2360 | 2476 | } |
---|
2361 | 2477 | EXPORT_SYMBOL(d_delete); |
---|
2362 | 2478 | |
---|
.. | .. |
---|
2387 | 2503 | static inline unsigned start_dir_add(struct inode *dir) |
---|
2388 | 2504 | { |
---|
2389 | 2505 | |
---|
2390 | | - preempt_disable_rt(); |
---|
2391 | 2506 | for (;;) { |
---|
2392 | | - unsigned n = dir->__i_dir_seq; |
---|
2393 | | - if (!(n & 1) && cmpxchg(&dir->__i_dir_seq, n, n + 1) == n) |
---|
| 2507 | + unsigned n = dir->i_dir_seq; |
---|
| 2508 | + if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n) |
---|
2394 | 2509 | return n; |
---|
2395 | 2510 | cpu_relax(); |
---|
2396 | 2511 | } |
---|
.. | .. |
---|
2398 | 2513 | |
---|
2399 | 2514 | static inline void end_dir_add(struct inode *dir, unsigned n) |
---|
2400 | 2515 | { |
---|
2401 | | - smp_store_release(&dir->__i_dir_seq, n + 2); |
---|
2402 | | - preempt_enable_rt(); |
---|
| 2516 | + smp_store_release(&dir->i_dir_seq, n + 2); |
---|
2403 | 2517 | } |
---|
2404 | 2518 | |
---|
2405 | 2519 | static void d_wait_lookup(struct dentry *dentry) |
---|
2406 | 2520 | { |
---|
2407 | | - struct swait_queue __wait; |
---|
2408 | | - |
---|
2409 | | - if (!d_in_lookup(dentry)) |
---|
2410 | | - return; |
---|
2411 | | - |
---|
2412 | | - INIT_LIST_HEAD(&__wait.task_list); |
---|
2413 | | - do { |
---|
2414 | | - prepare_to_swait_exclusive(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE); |
---|
2415 | | - spin_unlock(&dentry->d_lock); |
---|
2416 | | - schedule(); |
---|
2417 | | - spin_lock(&dentry->d_lock); |
---|
2418 | | - } while (d_in_lookup(dentry)); |
---|
2419 | | - finish_swait(dentry->d_wait, &__wait); |
---|
| 2521 | + if (d_in_lookup(dentry)) { |
---|
| 2522 | + DECLARE_WAITQUEUE(wait, current); |
---|
| 2523 | + add_wait_queue(dentry->d_wait, &wait); |
---|
| 2524 | + do { |
---|
| 2525 | + set_current_state(TASK_UNINTERRUPTIBLE); |
---|
| 2526 | + spin_unlock(&dentry->d_lock); |
---|
| 2527 | + schedule(); |
---|
| 2528 | + spin_lock(&dentry->d_lock); |
---|
| 2529 | + } while (d_in_lookup(dentry)); |
---|
| 2530 | + } |
---|
2420 | 2531 | } |
---|
2421 | 2532 | |
---|
2422 | 2533 | struct dentry *d_alloc_parallel(struct dentry *parent, |
---|
2423 | 2534 | const struct qstr *name, |
---|
2424 | | - struct swait_queue_head *wq) |
---|
| 2535 | + wait_queue_head_t *wq) |
---|
2425 | 2536 | { |
---|
2426 | 2537 | unsigned int hash = name->hash; |
---|
2427 | 2538 | struct hlist_bl_head *b = in_lookup_hash(parent, hash); |
---|
.. | .. |
---|
2435 | 2546 | |
---|
2436 | 2547 | retry: |
---|
2437 | 2548 | rcu_read_lock(); |
---|
2438 | | - seq = smp_load_acquire(&parent->d_inode->__i_dir_seq); |
---|
| 2549 | + seq = smp_load_acquire(&parent->d_inode->i_dir_seq); |
---|
2439 | 2550 | r_seq = read_seqbegin(&rename_lock); |
---|
2440 | 2551 | dentry = __d_lookup_rcu(parent, name, &d_seq); |
---|
2441 | 2552 | if (unlikely(dentry)) { |
---|
.. | .. |
---|
2463 | 2574 | } |
---|
2464 | 2575 | |
---|
2465 | 2576 | hlist_bl_lock(b); |
---|
2466 | | - if (unlikely(READ_ONCE(parent->d_inode->__i_dir_seq) != seq)) { |
---|
| 2577 | + if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) { |
---|
2467 | 2578 | hlist_bl_unlock(b); |
---|
2468 | 2579 | rcu_read_unlock(); |
---|
2469 | 2580 | goto retry; |
---|
.. | .. |
---|
2536 | 2647 | hlist_bl_lock(b); |
---|
2537 | 2648 | dentry->d_flags &= ~DCACHE_PAR_LOOKUP; |
---|
2538 | 2649 | __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); |
---|
2539 | | - swake_up_all(dentry->d_wait); |
---|
| 2650 | + wake_up_all(dentry->d_wait); |
---|
2540 | 2651 | dentry->d_wait = NULL; |
---|
2541 | 2652 | hlist_bl_unlock(b); |
---|
2542 | 2653 | INIT_HLIST_NODE(&dentry->d_u.d_alias); |
---|
.. | .. |
---|
2954 | 3065 | __d_add(dentry, inode); |
---|
2955 | 3066 | return NULL; |
---|
2956 | 3067 | } |
---|
2957 | | -EXPORT_SYMBOL(d_splice_alias); |
---|
| 3068 | +EXPORT_SYMBOL_NS(d_splice_alias, ANDROID_GKI_VFS_EXPORT_ONLY); |
---|
2958 | 3069 | |
---|
2959 | 3070 | /* |
---|
2960 | 3071 | * Test whether new_dentry is a subdirectory of old_dentry. |
---|
.. | .. |
---|
3049 | 3160 | |
---|
3050 | 3161 | static void __init dcache_init_early(void) |
---|
3051 | 3162 | { |
---|
3052 | | - unsigned int loop; |
---|
3053 | | - |
---|
3054 | 3163 | /* If hashes are distributed across NUMA nodes, defer |
---|
3055 | 3164 | * hash allocation until vmalloc space is available. |
---|
3056 | 3165 | */ |
---|
.. | .. |
---|
3067 | 3176 | NULL, |
---|
3068 | 3177 | 0, |
---|
3069 | 3178 | 0); |
---|
3070 | | - |
---|
3071 | | - for (loop = 0; loop < (1U << d_hash_shift); loop++) |
---|
3072 | | - INIT_HLIST_BL_HEAD(dentry_hashtable + loop); |
---|
3073 | | - |
---|
3074 | 3179 | d_hash_shift = 32 - d_hash_shift; |
---|
3075 | 3180 | } |
---|
3076 | 3181 | |
---|
3077 | 3182 | static void __init dcache_init(void) |
---|
3078 | 3183 | { |
---|
3079 | | - unsigned int loop; |
---|
3080 | 3184 | /* |
---|
3081 | 3185 | * A constructor could be added for stable state like the lists, |
---|
3082 | 3186 | * but it is probably not worth it because of the cache nature |
---|
.. | .. |
---|
3100 | 3204 | NULL, |
---|
3101 | 3205 | 0, |
---|
3102 | 3206 | 0); |
---|
3103 | | - |
---|
3104 | | - for (loop = 0; loop < (1U << d_hash_shift); loop++) |
---|
3105 | | - INIT_HLIST_BL_HEAD(dentry_hashtable + loop); |
---|
3106 | | - |
---|
3107 | 3207 | d_hash_shift = 32 - d_hash_shift; |
---|
3108 | 3208 | } |
---|
3109 | 3209 | |
---|