hc
2023-11-06 e3e12f52b214121840b44c91de5b3e5af5d3eb84
kernel/fs/dcache.c
....@@ -2387,9 +2387,10 @@
23872387 static inline unsigned start_dir_add(struct inode *dir)
23882388 {
23892389
2390
+ preempt_disable_rt();
23902391 for (;;) {
2391
- unsigned n = dir->i_dir_seq;
2392
- if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2392
+ unsigned n = dir->__i_dir_seq;
2393
+ if (!(n & 1) && cmpxchg(&dir->__i_dir_seq, n, n + 1) == n)
23932394 return n;
23942395 cpu_relax();
23952396 }
....@@ -2397,26 +2398,30 @@
23972398
23982399 static inline void end_dir_add(struct inode *dir, unsigned n)
23992400 {
2400
- smp_store_release(&dir->i_dir_seq, n + 2);
2401
+ smp_store_release(&dir->__i_dir_seq, n + 2);
2402
+ preempt_enable_rt();
24012403 }
24022404
24032405 static void d_wait_lookup(struct dentry *dentry)
24042406 {
2405
- if (d_in_lookup(dentry)) {
2406
- DECLARE_WAITQUEUE(wait, current);
2407
- add_wait_queue(dentry->d_wait, &wait);
2408
- do {
2409
- set_current_state(TASK_UNINTERRUPTIBLE);
2410
- spin_unlock(&dentry->d_lock);
2411
- schedule();
2412
- spin_lock(&dentry->d_lock);
2413
- } while (d_in_lookup(dentry));
2414
- }
2407
+ struct swait_queue __wait;
2408
+
2409
+ if (!d_in_lookup(dentry))
2410
+ return;
2411
+
2412
+ INIT_LIST_HEAD(&__wait.task_list);
2413
+ do {
2414
+ prepare_to_swait_exclusive(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE);
2415
+ spin_unlock(&dentry->d_lock);
2416
+ schedule();
2417
+ spin_lock(&dentry->d_lock);
2418
+ } while (d_in_lookup(dentry));
2419
+ finish_swait(dentry->d_wait, &__wait);
24152420 }
24162421
24172422 struct dentry *d_alloc_parallel(struct dentry *parent,
24182423 const struct qstr *name,
2419
- wait_queue_head_t *wq)
2424
+ struct swait_queue_head *wq)
24202425 {
24212426 unsigned int hash = name->hash;
24222427 struct hlist_bl_head *b = in_lookup_hash(parent, hash);
....@@ -2430,7 +2435,7 @@
24302435
24312436 retry:
24322437 rcu_read_lock();
2433
- seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
2438
+ seq = smp_load_acquire(&parent->d_inode->__i_dir_seq);
24342439 r_seq = read_seqbegin(&rename_lock);
24352440 dentry = __d_lookup_rcu(parent, name, &d_seq);
24362441 if (unlikely(dentry)) {
....@@ -2458,7 +2463,7 @@
24582463 }
24592464
24602465 hlist_bl_lock(b);
2461
- if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
2466
+ if (unlikely(READ_ONCE(parent->d_inode->__i_dir_seq) != seq)) {
24622467 hlist_bl_unlock(b);
24632468 rcu_read_unlock();
24642469 goto retry;
....@@ -2531,7 +2536,7 @@
25312536 hlist_bl_lock(b);
25322537 dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
25332538 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2534
- wake_up_all(dentry->d_wait);
2539
+ swake_up_all(dentry->d_wait);
25352540 dentry->d_wait = NULL;
25362541 hlist_bl_unlock(b);
25372542 INIT_HLIST_NODE(&dentry->d_u.d_alias);
....@@ -3044,6 +3049,8 @@
30443049
30453050 static void __init dcache_init_early(void)
30463051 {
3052
+ unsigned int loop;
3053
+
30473054 /* If hashes are distributed across NUMA nodes, defer
30483055 * hash allocation until vmalloc space is available.
30493056 */
....@@ -3060,11 +3067,16 @@
30603067 NULL,
30613068 0,
30623069 0);
3070
+
3071
+ for (loop = 0; loop < (1U << d_hash_shift); loop++)
3072
+ INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3073
+
30633074 d_hash_shift = 32 - d_hash_shift;
30643075 }
30653076
30663077 static void __init dcache_init(void)
30673078 {
3079
+ unsigned int loop;
30683080 /*
30693081 * A constructor could be added for stable state like the lists,
30703082 * but it is probably not worth it because of the cache nature
....@@ -3088,6 +3100,10 @@
30883100 NULL,
30893101 0,
30903102 0);
3103
+
3104
+ for (loop = 0; loop < (1U << d_hash_shift); loop++)
3105
+ INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3106
+
30913107 d_hash_shift = 32 - d_hash_shift;
30923108 }
30933109