hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/mm/slub.c
....@@ -431,7 +431,7 @@
431431
432432 #ifdef CONFIG_SLUB_DEBUG
433433 static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
434
-static DEFINE_RAW_SPINLOCK(object_map_lock);
434
+static DEFINE_SPINLOCK(object_map_lock);
435435
436436 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
437437 struct page *page)
....@@ -456,7 +456,7 @@
456456 {
457457 VM_BUG_ON(!irqs_disabled());
458458
459
- raw_spin_lock(&object_map_lock);
459
+ spin_lock(&object_map_lock);
460460
461461 __fill_map(object_map, s, page);
462462
....@@ -466,7 +466,7 @@
466466 static void put_map(unsigned long *map) __releases(&object_map_lock)
467467 {
468468 VM_BUG_ON(map != object_map);
469
- raw_spin_unlock(&object_map_lock);
469
+ spin_unlock(&object_map_lock);
470470 }
471471
472472 static inline unsigned int size_from_object(struct kmem_cache *s)
....@@ -1255,7 +1255,7 @@
12551255 unsigned long flags;
12561256 int ret = 0;
12571257
1258
- raw_spin_lock_irqsave(&n->list_lock, flags);
1258
+ spin_lock_irqsave(&n->list_lock, flags);
12591259 slab_lock(page);
12601260
12611261 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
....@@ -1290,7 +1290,7 @@
12901290 bulk_cnt, cnt);
12911291
12921292 slab_unlock(page);
1293
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
1293
+ spin_unlock_irqrestore(&n->list_lock, flags);
12941294 if (!ret)
12951295 slab_fix(s, "Object at 0x%p not freed", object);
12961296 return ret;
....@@ -1537,12 +1537,6 @@
15371537 return false;
15381538 }
15391539 #endif /* CONFIG_SLUB_DEBUG */
1540
-
1541
-struct slub_free_list {
1542
- raw_spinlock_t lock;
1543
- struct list_head list;
1544
-};
1545
-static DEFINE_PER_CPU(struct slub_free_list, slub_free_list);
15461540
15471541 /*
15481542 * Hooks for other subsystems that check memory allocations. In a typical
....@@ -1804,18 +1798,10 @@
18041798 void *start, *p, *next;
18051799 int idx;
18061800 bool shuffle;
1807
- bool enableirqs = false;
18081801
18091802 flags &= gfp_allowed_mask;
18101803
18111804 if (gfpflags_allow_blocking(flags))
1812
- enableirqs = true;
1813
-
1814
-#ifdef CONFIG_PREEMPT_RT
1815
- if (system_state > SYSTEM_BOOTING && system_state < SYSTEM_SUSPEND)
1816
- enableirqs = true;
1817
-#endif
1818
- if (enableirqs)
18191805 local_irq_enable();
18201806
18211807 flags |= s->allocflags;
....@@ -1874,7 +1860,7 @@
18741860 page->frozen = 1;
18751861
18761862 out:
1877
- if (enableirqs)
1863
+ if (gfpflags_allow_blocking(flags))
18781864 local_irq_disable();
18791865 if (!page)
18801866 return NULL;
....@@ -1917,16 +1903,6 @@
19171903 __free_pages(page, order);
19181904 }
19191905
1920
-static void free_delayed(struct list_head *h)
1921
-{
1922
- while (!list_empty(h)) {
1923
- struct page *page = list_first_entry(h, struct page, lru);
1924
-
1925
- list_del(&page->lru);
1926
- __free_slab(page->slab_cache, page);
1927
- }
1928
-}
1929
-
19301906 static void rcu_free_slab(struct rcu_head *h)
19311907 {
19321908 struct page *page = container_of(h, struct page, rcu_head);
....@@ -1938,12 +1914,6 @@
19381914 {
19391915 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
19401916 call_rcu(&page->rcu_head, rcu_free_slab);
1941
- } else if (irqs_disabled()) {
1942
- struct slub_free_list *f = this_cpu_ptr(&slub_free_list);
1943
-
1944
- raw_spin_lock(&f->lock);
1945
- list_add(&page->lru, &f->list);
1946
- raw_spin_unlock(&f->lock);
19471917 } else
19481918 __free_slab(s, page);
19491919 }
....@@ -2051,7 +2021,7 @@
20512021 if (!n || !n->nr_partial)
20522022 return NULL;
20532023
2054
- raw_spin_lock(&n->list_lock);
2024
+ spin_lock(&n->list_lock);
20552025 list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
20562026 void *t;
20572027
....@@ -2076,7 +2046,7 @@
20762046 break;
20772047
20782048 }
2079
- raw_spin_unlock(&n->list_lock);
2049
+ spin_unlock(&n->list_lock);
20802050 return object;
20812051 }
20822052
....@@ -2330,7 +2300,7 @@
23302300 * that acquire_slab() will see a slab page that
23312301 * is frozen
23322302 */
2333
- raw_spin_lock(&n->list_lock);
2303
+ spin_lock(&n->list_lock);
23342304 }
23352305 } else {
23362306 m = M_FULL;
....@@ -2342,7 +2312,7 @@
23422312 * slabs from diagnostic functions will not see
23432313 * any frozen slabs.
23442314 */
2345
- raw_spin_lock(&n->list_lock);
2315
+ spin_lock(&n->list_lock);
23462316 }
23472317 #endif
23482318 }
....@@ -2367,7 +2337,7 @@
23672337 goto redo;
23682338
23692339 if (lock)
2370
- raw_spin_unlock(&n->list_lock);
2340
+ spin_unlock(&n->list_lock);
23712341
23722342 if (m == M_PARTIAL)
23732343 stat(s, tail);
....@@ -2407,10 +2377,10 @@
24072377 n2 = get_node(s, page_to_nid(page));
24082378 if (n != n2) {
24092379 if (n)
2410
- raw_spin_unlock(&n->list_lock);
2380
+ spin_unlock(&n->list_lock);
24112381
24122382 n = n2;
2413
- raw_spin_lock(&n->list_lock);
2383
+ spin_lock(&n->list_lock);
24142384 }
24152385
24162386 do {
....@@ -2439,7 +2409,7 @@
24392409 }
24402410
24412411 if (n)
2442
- raw_spin_unlock(&n->list_lock);
2412
+ spin_unlock(&n->list_lock);
24432413
24442414 while (discard_page) {
24452415 page = discard_page;
....@@ -2476,21 +2446,14 @@
24762446 pobjects = oldpage->pobjects;
24772447 pages = oldpage->pages;
24782448 if (drain && pobjects > slub_cpu_partial(s)) {
2479
- struct slub_free_list *f;
24802449 unsigned long flags;
2481
- LIST_HEAD(tofree);
24822450 /*
24832451 * partial array is full. Move the existing
24842452 * set to the per node partial list.
24852453 */
24862454 local_irq_save(flags);
24872455 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2488
- f = this_cpu_ptr(&slub_free_list);
2489
- raw_spin_lock(&f->lock);
2490
- list_splice_init(&f->list, &tofree);
2491
- raw_spin_unlock(&f->lock);
24922456 local_irq_restore(flags);
2493
- free_delayed(&tofree);
24942457 oldpage = NULL;
24952458 pobjects = 0;
24962459 pages = 0;
....@@ -2556,19 +2519,7 @@
25562519
25572520 static void flush_all(struct kmem_cache *s)
25582521 {
2559
- LIST_HEAD(tofree);
2560
- int cpu;
2561
-
25622522 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
2563
- for_each_online_cpu(cpu) {
2564
- struct slub_free_list *f;
2565
-
2566
- f = &per_cpu(slub_free_list, cpu);
2567
- raw_spin_lock_irq(&f->lock);
2568
- list_splice_init(&f->list, &tofree);
2569
- raw_spin_unlock_irq(&f->lock);
2570
- free_delayed(&tofree);
2571
- }
25722523 }
25732524
25742525 /*
....@@ -2623,10 +2574,10 @@
26232574 unsigned long x = 0;
26242575 struct page *page;
26252576
2626
- raw_spin_lock_irqsave(&n->list_lock, flags);
2577
+ spin_lock_irqsave(&n->list_lock, flags);
26272578 list_for_each_entry(page, &n->partial, slab_list)
26282579 x += get_count(page);
2629
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
2580
+ spin_unlock_irqrestore(&n->list_lock, flags);
26302581 return x;
26312582 }
26322583 #endif /* CONFIG_SLUB_DEBUG || CONFIG_SLUB_SYSFS */
....@@ -2765,10 +2716,8 @@
27652716 * already disabled (which is the case for bulk allocation).
27662717 */
27672718 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2768
- unsigned long addr, struct kmem_cache_cpu *c,
2769
- struct list_head *to_free)
2719
+ unsigned long addr, struct kmem_cache_cpu *c)
27702720 {
2771
- struct slub_free_list *f;
27722721 void *freelist;
27732722 struct page *page;
27742723
....@@ -2837,13 +2786,6 @@
28372786 VM_BUG_ON(!c->page->frozen);
28382787 c->freelist = get_freepointer(s, freelist);
28392788 c->tid = next_tid(c->tid);
2840
-
2841
-out:
2842
- f = this_cpu_ptr(&slub_free_list);
2843
- raw_spin_lock(&f->lock);
2844
- list_splice_init(&f->list, to_free);
2845
- raw_spin_unlock(&f->lock);
2846
-
28472789 return freelist;
28482790
28492791 new_slab:
....@@ -2859,7 +2801,7 @@
28592801
28602802 if (unlikely(!freelist)) {
28612803 slab_out_of_memory(s, gfpflags, node);
2862
- goto out;
2804
+ return NULL;
28632805 }
28642806
28652807 page = c->page;
....@@ -2872,7 +2814,7 @@
28722814 goto new_slab; /* Slab failed checks. Next slab needed */
28732815
28742816 deactivate_slab(s, page, get_freepointer(s, freelist), c);
2875
- goto out;
2817
+ return freelist;
28762818 }
28772819
28782820 /*
....@@ -2884,7 +2826,6 @@
28842826 {
28852827 void *p;
28862828 unsigned long flags;
2887
- LIST_HEAD(tofree);
28882829
28892830 local_irq_save(flags);
28902831 #ifdef CONFIG_PREEMPTION
....@@ -2896,9 +2837,8 @@
28962837 c = this_cpu_ptr(s->cpu_slab);
28972838 #endif
28982839
2899
- p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree);
2840
+ p = ___slab_alloc(s, gfpflags, node, addr, c);
29002841 local_irq_restore(flags);
2901
- free_delayed(&tofree);
29022842 return p;
29032843 }
29042844
....@@ -2933,10 +2873,6 @@
29332873 unsigned long tid;
29342874 struct obj_cgroup *objcg = NULL;
29352875 bool init = false;
2936
-
2937
- if (IS_ENABLED(CONFIG_PREEMPT_RT) && IS_ENABLED(CONFIG_DEBUG_ATOMIC_SLEEP))
2938
- WARN_ON_ONCE(!preemptible() &&
2939
- (system_state > SYSTEM_BOOTING && system_state < SYSTEM_SUSPEND));
29402876
29412877 s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags);
29422878 if (!s)
....@@ -3110,7 +3046,7 @@
31103046
31113047 do {
31123048 if (unlikely(n)) {
3113
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
3049
+ spin_unlock_irqrestore(&n->list_lock, flags);
31143050 n = NULL;
31153051 }
31163052 prior = page->freelist;
....@@ -3142,7 +3078,7 @@
31423078 * Otherwise the list_lock will synchronize with
31433079 * other processors updating the list of slabs.
31443080 */
3145
- raw_spin_lock_irqsave(&n->list_lock, flags);
3081
+ spin_lock_irqsave(&n->list_lock, flags);
31463082
31473083 }
31483084 }
....@@ -3184,7 +3120,7 @@
31843120 add_partial(n, page, DEACTIVATE_TO_TAIL);
31853121 stat(s, FREE_ADD_PARTIAL);
31863122 }
3187
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
3123
+ spin_unlock_irqrestore(&n->list_lock, flags);
31883124 return;
31893125
31903126 slab_empty:
....@@ -3199,7 +3135,7 @@
31993135 remove_full(s, n, page);
32003136 }
32013137
3202
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
3138
+ spin_unlock_irqrestore(&n->list_lock, flags);
32033139 stat(s, FREE_SLAB);
32043140 discard_slab(s, page);
32053141 }
....@@ -3416,13 +3352,8 @@
34163352 void **p)
34173353 {
34183354 struct kmem_cache_cpu *c;
3419
- LIST_HEAD(to_free);
34203355 int i;
34213356 struct obj_cgroup *objcg = NULL;
3422
-
3423
- if (IS_ENABLED(CONFIG_PREEMPT_RT) && IS_ENABLED(CONFIG_DEBUG_ATOMIC_SLEEP))
3424
- WARN_ON_ONCE(!preemptible() &&
3425
- (system_state > SYSTEM_BOOTING && system_state < SYSTEM_SUSPEND));
34263357
34273358 /* memcg and kmem_cache debug support */
34283359 s = slab_pre_alloc_hook(s, &objcg, size, flags);
....@@ -3460,7 +3391,7 @@
34603391 * of re-populating per CPU c->freelist
34613392 */
34623393 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
3463
- _RET_IP_, c, &to_free);
3394
+ _RET_IP_, c);
34643395 if (unlikely(!p[i]))
34653396 goto error;
34663397
....@@ -3475,7 +3406,6 @@
34753406 }
34763407 c->tid = next_tid(c->tid);
34773408 local_irq_enable();
3478
- free_delayed(&to_free);
34793409
34803410 /*
34813411 * memcg and kmem_cache debug support and memory initialization.
....@@ -3486,7 +3416,6 @@
34863416 return i;
34873417 error:
34883418 local_irq_enable();
3489
- free_delayed(&to_free);
34903419 slab_post_alloc_hook(s, objcg, flags, i, p, false);
34913420 __kmem_cache_free_bulk(s, i, p);
34923421 return 0;
....@@ -3622,7 +3551,7 @@
36223551 init_kmem_cache_node(struct kmem_cache_node *n)
36233552 {
36243553 n->nr_partial = 0;
3625
- raw_spin_lock_init(&n->list_lock);
3554
+ spin_lock_init(&n->list_lock);
36263555 INIT_LIST_HEAD(&n->partial);
36273556 #ifdef CONFIG_SLUB_DEBUG
36283557 atomic_long_set(&n->nr_slabs, 0);
....@@ -4016,7 +3945,7 @@
40163945 struct page *page, *h;
40173946
40183947 BUG_ON(irqs_disabled());
4019
- raw_spin_lock_irq(&n->list_lock);
3948
+ spin_lock_irq(&n->list_lock);
40203949 list_for_each_entry_safe(page, h, &n->partial, slab_list) {
40213950 if (!page->inuse) {
40223951 remove_partial(n, page);
....@@ -4026,7 +3955,7 @@
40263955 "Objects remaining in %s on __kmem_cache_shutdown()");
40273956 }
40283957 }
4029
- raw_spin_unlock_irq(&n->list_lock);
3958
+ spin_unlock_irq(&n->list_lock);
40303959
40313960 list_for_each_entry_safe(page, h, &discard, slab_list)
40323961 discard_slab(s, page);
....@@ -4301,7 +4230,7 @@
43014230 for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
43024231 INIT_LIST_HEAD(promote + i);
43034232
4304
- raw_spin_lock_irqsave(&n->list_lock, flags);
4233
+ spin_lock_irqsave(&n->list_lock, flags);
43054234
43064235 /*
43074236 * Build lists of slabs to discard or promote.
....@@ -4332,7 +4261,7 @@
43324261 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
43334262 list_splice(promote + i, &n->partial);
43344263
4335
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
4264
+ spin_unlock_irqrestore(&n->list_lock, flags);
43364265
43374266 /* Release empty slabs */
43384267 list_for_each_entry_safe(page, t, &discard, slab_list)
....@@ -4507,12 +4436,6 @@
45074436 {
45084437 static __initdata struct kmem_cache boot_kmem_cache,
45094438 boot_kmem_cache_node;
4510
- int cpu;
4511
-
4512
- for_each_possible_cpu(cpu) {
4513
- raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock);
4514
- INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list);
4515
- }
45164439
45174440 if (debug_guardpage_minorder())
45184441 slub_max_order = 0;
....@@ -4705,7 +4628,7 @@
47054628 struct page *page;
47064629 unsigned long flags;
47074630
4708
- raw_spin_lock_irqsave(&n->list_lock, flags);
4631
+ spin_lock_irqsave(&n->list_lock, flags);
47094632
47104633 list_for_each_entry(page, &n->partial, slab_list) {
47114634 validate_slab(s, page);
....@@ -4727,7 +4650,7 @@
47274650 s->name, count, atomic_long_read(&n->nr_slabs));
47284651
47294652 out:
4730
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
4653
+ spin_unlock_irqrestore(&n->list_lock, flags);
47314654 return count;
47324655 }
47334656
....@@ -4782,9 +4705,6 @@
47824705 {
47834706 struct location *l;
47844707 int order;
4785
-
4786
- if (IS_ENABLED(CONFIG_PREEMPT_RT) && flags == GFP_ATOMIC)
4787
- return 0;
47884708
47894709 order = get_order(sizeof(struct location) * max);
47904710
....@@ -5920,12 +5840,12 @@
59205840 if (!atomic_long_read(&n->nr_slabs))
59215841 continue;
59225842
5923
- raw_spin_lock_irqsave(&n->list_lock, flags);
5843
+ spin_lock_irqsave(&n->list_lock, flags);
59245844 list_for_each_entry(page, &n->partial, slab_list)
59255845 process_slab(t, s, page, alloc, obj_map);
59265846 list_for_each_entry(page, &n->full, slab_list)
59275847 process_slab(t, s, page, alloc, obj_map);
5928
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
5848
+ spin_unlock_irqrestore(&n->list_lock, flags);
59295849 }
59305850
59315851 bitmap_free(obj_map);