.. | .. |
---|
431 | 431 | |
---|
432 | 432 | #ifdef CONFIG_SLUB_DEBUG |
---|
433 | 433 | static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)]; |
---|
434 | | -static DEFINE_RAW_SPINLOCK(object_map_lock); |
---|
| 434 | +static DEFINE_SPINLOCK(object_map_lock); |
---|
435 | 435 | |
---|
436 | 436 | static void __fill_map(unsigned long *obj_map, struct kmem_cache *s, |
---|
437 | 437 | struct page *page) |
---|
.. | .. |
---|
456 | 456 | { |
---|
457 | 457 | VM_BUG_ON(!irqs_disabled()); |
---|
458 | 458 | |
---|
459 | | - raw_spin_lock(&object_map_lock); |
---|
| 459 | + spin_lock(&object_map_lock); |
---|
460 | 460 | |
---|
461 | 461 | __fill_map(object_map, s, page); |
---|
462 | 462 | |
---|
.. | .. |
---|
466 | 466 | static void put_map(unsigned long *map) __releases(&object_map_lock) |
---|
467 | 467 | { |
---|
468 | 468 | VM_BUG_ON(map != object_map); |
---|
469 | | - raw_spin_unlock(&object_map_lock); |
---|
| 469 | + spin_unlock(&object_map_lock); |
---|
470 | 470 | } |
---|
471 | 471 | |
---|
472 | 472 | static inline unsigned int size_from_object(struct kmem_cache *s) |
---|
.. | .. |
---|
1255 | 1255 | unsigned long flags; |
---|
1256 | 1256 | int ret = 0; |
---|
1257 | 1257 | |
---|
1258 | | - raw_spin_lock_irqsave(&n->list_lock, flags); |
---|
| 1258 | + spin_lock_irqsave(&n->list_lock, flags); |
---|
1259 | 1259 | slab_lock(page); |
---|
1260 | 1260 | |
---|
1261 | 1261 | if (s->flags & SLAB_CONSISTENCY_CHECKS) { |
---|
.. | .. |
---|
1290 | 1290 | bulk_cnt, cnt); |
---|
1291 | 1291 | |
---|
1292 | 1292 | slab_unlock(page); |
---|
1293 | | - raw_spin_unlock_irqrestore(&n->list_lock, flags); |
---|
| 1293 | + spin_unlock_irqrestore(&n->list_lock, flags); |
---|
1294 | 1294 | if (!ret) |
---|
1295 | 1295 | slab_fix(s, "Object at 0x%p not freed", object); |
---|
1296 | 1296 | return ret; |
---|
.. | .. |
---|
1537 | 1537 | return false; |
---|
1538 | 1538 | } |
---|
1539 | 1539 | #endif /* CONFIG_SLUB_DEBUG */ |
---|
1540 | | - |
---|
1541 | | -struct slub_free_list { |
---|
1542 | | - raw_spinlock_t lock; |
---|
1543 | | - struct list_head list; |
---|
1544 | | -}; |
---|
1545 | | -static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); |
---|
1546 | 1540 | |
---|
1547 | 1541 | /* |
---|
1548 | 1542 | * Hooks for other subsystems that check memory allocations. In a typical |
---|
.. | .. |
---|
1804 | 1798 | void *start, *p, *next; |
---|
1805 | 1799 | int idx; |
---|
1806 | 1800 | bool shuffle; |
---|
1807 | | - bool enableirqs = false; |
---|
1808 | 1801 | |
---|
1809 | 1802 | flags &= gfp_allowed_mask; |
---|
1810 | 1803 | |
---|
1811 | 1804 | if (gfpflags_allow_blocking(flags)) |
---|
1812 | | - enableirqs = true; |
---|
1813 | | - |
---|
1814 | | -#ifdef CONFIG_PREEMPT_RT |
---|
1815 | | - if (system_state > SYSTEM_BOOTING && system_state < SYSTEM_SUSPEND) |
---|
1816 | | - enableirqs = true; |
---|
1817 | | -#endif |
---|
1818 | | - if (enableirqs) |
---|
1819 | 1805 | local_irq_enable(); |
---|
1820 | 1806 | |
---|
1821 | 1807 | flags |= s->allocflags; |
---|
.. | .. |
---|
1874 | 1860 | page->frozen = 1; |
---|
1875 | 1861 | |
---|
1876 | 1862 | out: |
---|
1877 | | - if (enableirqs) |
---|
| 1863 | + if (gfpflags_allow_blocking(flags)) |
---|
1878 | 1864 | local_irq_disable(); |
---|
1879 | 1865 | if (!page) |
---|
1880 | 1866 | return NULL; |
---|
.. | .. |
---|
1917 | 1903 | __free_pages(page, order); |
---|
1918 | 1904 | } |
---|
1919 | 1905 | |
---|
1920 | | -static void free_delayed(struct list_head *h) |
---|
1921 | | -{ |
---|
1922 | | - while (!list_empty(h)) { |
---|
1923 | | - struct page *page = list_first_entry(h, struct page, lru); |
---|
1924 | | - |
---|
1925 | | - list_del(&page->lru); |
---|
1926 | | - __free_slab(page->slab_cache, page); |
---|
1927 | | - } |
---|
1928 | | -} |
---|
1929 | | - |
---|
1930 | 1906 | static void rcu_free_slab(struct rcu_head *h) |
---|
1931 | 1907 | { |
---|
1932 | 1908 | struct page *page = container_of(h, struct page, rcu_head); |
---|
.. | .. |
---|
1938 | 1914 | { |
---|
1939 | 1915 | if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { |
---|
1940 | 1916 | call_rcu(&page->rcu_head, rcu_free_slab); |
---|
1941 | | - } else if (irqs_disabled()) { |
---|
1942 | | - struct slub_free_list *f = this_cpu_ptr(&slub_free_list); |
---|
1943 | | - |
---|
1944 | | - raw_spin_lock(&f->lock); |
---|
1945 | | - list_add(&page->lru, &f->list); |
---|
1946 | | - raw_spin_unlock(&f->lock); |
---|
1947 | 1917 | } else |
---|
1948 | 1918 | __free_slab(s, page); |
---|
1949 | 1919 | } |
---|
.. | .. |
---|
2051 | 2021 | if (!n || !n->nr_partial) |
---|
2052 | 2022 | return NULL; |
---|
2053 | 2023 | |
---|
2054 | | - raw_spin_lock(&n->list_lock); |
---|
| 2024 | + spin_lock(&n->list_lock); |
---|
2055 | 2025 | list_for_each_entry_safe(page, page2, &n->partial, slab_list) { |
---|
2056 | 2026 | void *t; |
---|
2057 | 2027 | |
---|
.. | .. |
---|
2076 | 2046 | break; |
---|
2077 | 2047 | |
---|
2078 | 2048 | } |
---|
2079 | | - raw_spin_unlock(&n->list_lock); |
---|
| 2049 | + spin_unlock(&n->list_lock); |
---|
2080 | 2050 | return object; |
---|
2081 | 2051 | } |
---|
2082 | 2052 | |
---|
.. | .. |
---|
2330 | 2300 | * that acquire_slab() will see a slab page that |
---|
2331 | 2301 | * is frozen |
---|
2332 | 2302 | */ |
---|
2333 | | - raw_spin_lock(&n->list_lock); |
---|
| 2303 | + spin_lock(&n->list_lock); |
---|
2334 | 2304 | } |
---|
2335 | 2305 | } else { |
---|
2336 | 2306 | m = M_FULL; |
---|
.. | .. |
---|
2342 | 2312 | * slabs from diagnostic functions will not see |
---|
2343 | 2313 | * any frozen slabs. |
---|
2344 | 2314 | */ |
---|
2345 | | - raw_spin_lock(&n->list_lock); |
---|
| 2315 | + spin_lock(&n->list_lock); |
---|
2346 | 2316 | } |
---|
2347 | 2317 | #endif |
---|
2348 | 2318 | } |
---|
.. | .. |
---|
2367 | 2337 | goto redo; |
---|
2368 | 2338 | |
---|
2369 | 2339 | if (lock) |
---|
2370 | | - raw_spin_unlock(&n->list_lock); |
---|
| 2340 | + spin_unlock(&n->list_lock); |
---|
2371 | 2341 | |
---|
2372 | 2342 | if (m == M_PARTIAL) |
---|
2373 | 2343 | stat(s, tail); |
---|
.. | .. |
---|
2407 | 2377 | n2 = get_node(s, page_to_nid(page)); |
---|
2408 | 2378 | if (n != n2) { |
---|
2409 | 2379 | if (n) |
---|
2410 | | - raw_spin_unlock(&n->list_lock); |
---|
| 2380 | + spin_unlock(&n->list_lock); |
---|
2411 | 2381 | |
---|
2412 | 2382 | n = n2; |
---|
2413 | | - raw_spin_lock(&n->list_lock); |
---|
| 2383 | + spin_lock(&n->list_lock); |
---|
2414 | 2384 | } |
---|
2415 | 2385 | |
---|
2416 | 2386 | do { |
---|
.. | .. |
---|
2439 | 2409 | } |
---|
2440 | 2410 | |
---|
2441 | 2411 | if (n) |
---|
2442 | | - raw_spin_unlock(&n->list_lock); |
---|
| 2412 | + spin_unlock(&n->list_lock); |
---|
2443 | 2413 | |
---|
2444 | 2414 | while (discard_page) { |
---|
2445 | 2415 | page = discard_page; |
---|
.. | .. |
---|
2476 | 2446 | pobjects = oldpage->pobjects; |
---|
2477 | 2447 | pages = oldpage->pages; |
---|
2478 | 2448 | if (drain && pobjects > slub_cpu_partial(s)) { |
---|
2479 | | - struct slub_free_list *f; |
---|
2480 | 2449 | unsigned long flags; |
---|
2481 | | - LIST_HEAD(tofree); |
---|
2482 | 2450 | /* |
---|
2483 | 2451 | * partial array is full. Move the existing |
---|
2484 | 2452 | * set to the per node partial list. |
---|
2485 | 2453 | */ |
---|
2486 | 2454 | local_irq_save(flags); |
---|
2487 | 2455 | unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); |
---|
2488 | | - f = this_cpu_ptr(&slub_free_list); |
---|
2489 | | - raw_spin_lock(&f->lock); |
---|
2490 | | - list_splice_init(&f->list, &tofree); |
---|
2491 | | - raw_spin_unlock(&f->lock); |
---|
2492 | 2456 | local_irq_restore(flags); |
---|
2493 | | - free_delayed(&tofree); |
---|
2494 | 2457 | oldpage = NULL; |
---|
2495 | 2458 | pobjects = 0; |
---|
2496 | 2459 | pages = 0; |
---|
.. | .. |
---|
2556 | 2519 | |
---|
2557 | 2520 | static void flush_all(struct kmem_cache *s) |
---|
2558 | 2521 | { |
---|
2559 | | - LIST_HEAD(tofree); |
---|
2560 | | - int cpu; |
---|
2561 | | - |
---|
2562 | 2522 | on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1); |
---|
2563 | | - for_each_online_cpu(cpu) { |
---|
2564 | | - struct slub_free_list *f; |
---|
2565 | | - |
---|
2566 | | - f = &per_cpu(slub_free_list, cpu); |
---|
2567 | | - raw_spin_lock_irq(&f->lock); |
---|
2568 | | - list_splice_init(&f->list, &tofree); |
---|
2569 | | - raw_spin_unlock_irq(&f->lock); |
---|
2570 | | - free_delayed(&tofree); |
---|
2571 | | - } |
---|
2572 | 2523 | } |
---|
2573 | 2524 | |
---|
2574 | 2525 | /* |
---|
.. | .. |
---|
2623 | 2574 | unsigned long x = 0; |
---|
2624 | 2575 | struct page *page; |
---|
2625 | 2576 | |
---|
2626 | | - raw_spin_lock_irqsave(&n->list_lock, flags); |
---|
| 2577 | + spin_lock_irqsave(&n->list_lock, flags); |
---|
2627 | 2578 | list_for_each_entry(page, &n->partial, slab_list) |
---|
2628 | 2579 | x += get_count(page); |
---|
2629 | | - raw_spin_unlock_irqrestore(&n->list_lock, flags); |
---|
| 2580 | + spin_unlock_irqrestore(&n->list_lock, flags); |
---|
2630 | 2581 | return x; |
---|
2631 | 2582 | } |
---|
2632 | 2583 | #endif /* CONFIG_SLUB_DEBUG || CONFIG_SLUB_SYSFS */ |
---|
.. | .. |
---|
2765 | 2716 | * already disabled (which is the case for bulk allocation). |
---|
2766 | 2717 | */ |
---|
2767 | 2718 | static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, |
---|
2768 | | - unsigned long addr, struct kmem_cache_cpu *c, |
---|
2769 | | - struct list_head *to_free) |
---|
| 2719 | + unsigned long addr, struct kmem_cache_cpu *c) |
---|
2770 | 2720 | { |
---|
2771 | | - struct slub_free_list *f; |
---|
2772 | 2721 | void *freelist; |
---|
2773 | 2722 | struct page *page; |
---|
2774 | 2723 | |
---|
.. | .. |
---|
2837 | 2786 | VM_BUG_ON(!c->page->frozen); |
---|
2838 | 2787 | c->freelist = get_freepointer(s, freelist); |
---|
2839 | 2788 | c->tid = next_tid(c->tid); |
---|
2840 | | - |
---|
2841 | | -out: |
---|
2842 | | - f = this_cpu_ptr(&slub_free_list); |
---|
2843 | | - raw_spin_lock(&f->lock); |
---|
2844 | | - list_splice_init(&f->list, to_free); |
---|
2845 | | - raw_spin_unlock(&f->lock); |
---|
2846 | | - |
---|
2847 | 2789 | return freelist; |
---|
2848 | 2790 | |
---|
2849 | 2791 | new_slab: |
---|
.. | .. |
---|
2859 | 2801 | |
---|
2860 | 2802 | if (unlikely(!freelist)) { |
---|
2861 | 2803 | slab_out_of_memory(s, gfpflags, node); |
---|
2862 | | - goto out; |
---|
| 2804 | + return NULL; |
---|
2863 | 2805 | } |
---|
2864 | 2806 | |
---|
2865 | 2807 | page = c->page; |
---|
.. | .. |
---|
2872 | 2814 | goto new_slab; /* Slab failed checks. Next slab needed */ |
---|
2873 | 2815 | |
---|
2874 | 2816 | deactivate_slab(s, page, get_freepointer(s, freelist), c); |
---|
2875 | | - goto out; |
---|
| 2817 | + return freelist; |
---|
2876 | 2818 | } |
---|
2877 | 2819 | |
---|
2878 | 2820 | /* |
---|
.. | .. |
---|
2884 | 2826 | { |
---|
2885 | 2827 | void *p; |
---|
2886 | 2828 | unsigned long flags; |
---|
2887 | | - LIST_HEAD(tofree); |
---|
2888 | 2829 | |
---|
2889 | 2830 | local_irq_save(flags); |
---|
2890 | 2831 | #ifdef CONFIG_PREEMPTION |
---|
.. | .. |
---|
2896 | 2837 | c = this_cpu_ptr(s->cpu_slab); |
---|
2897 | 2838 | #endif |
---|
2898 | 2839 | |
---|
2899 | | - p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree); |
---|
| 2840 | + p = ___slab_alloc(s, gfpflags, node, addr, c); |
---|
2900 | 2841 | local_irq_restore(flags); |
---|
2901 | | - free_delayed(&tofree); |
---|
2902 | 2842 | return p; |
---|
2903 | 2843 | } |
---|
2904 | 2844 | |
---|
.. | .. |
---|
2933 | 2873 | unsigned long tid; |
---|
2934 | 2874 | struct obj_cgroup *objcg = NULL; |
---|
2935 | 2875 | bool init = false; |
---|
2936 | | - |
---|
2937 | | - if (IS_ENABLED(CONFIG_PREEMPT_RT) && IS_ENABLED(CONFIG_DEBUG_ATOMIC_SLEEP)) |
---|
2938 | | - WARN_ON_ONCE(!preemptible() && |
---|
2939 | | - (system_state > SYSTEM_BOOTING && system_state < SYSTEM_SUSPEND)); |
---|
2940 | 2876 | |
---|
2941 | 2877 | s = slab_pre_alloc_hook(s, &objcg, 1, gfpflags); |
---|
2942 | 2878 | if (!s) |
---|
.. | .. |
---|
3110 | 3046 | |
---|
3111 | 3047 | do { |
---|
3112 | 3048 | if (unlikely(n)) { |
---|
3113 | | - raw_spin_unlock_irqrestore(&n->list_lock, flags); |
---|
| 3049 | + spin_unlock_irqrestore(&n->list_lock, flags); |
---|
3114 | 3050 | n = NULL; |
---|
3115 | 3051 | } |
---|
3116 | 3052 | prior = page->freelist; |
---|
.. | .. |
---|
3142 | 3078 | * Otherwise the list_lock will synchronize with |
---|
3143 | 3079 | * other processors updating the list of slabs. |
---|
3144 | 3080 | */ |
---|
3145 | | - raw_spin_lock_irqsave(&n->list_lock, flags); |
---|
| 3081 | + spin_lock_irqsave(&n->list_lock, flags); |
---|
3146 | 3082 | |
---|
3147 | 3083 | } |
---|
3148 | 3084 | } |
---|
.. | .. |
---|
3184 | 3120 | add_partial(n, page, DEACTIVATE_TO_TAIL); |
---|
3185 | 3121 | stat(s, FREE_ADD_PARTIAL); |
---|
3186 | 3122 | } |
---|
3187 | | - raw_spin_unlock_irqrestore(&n->list_lock, flags); |
---|
| 3123 | + spin_unlock_irqrestore(&n->list_lock, flags); |
---|
3188 | 3124 | return; |
---|
3189 | 3125 | |
---|
3190 | 3126 | slab_empty: |
---|
.. | .. |
---|
3199 | 3135 | remove_full(s, n, page); |
---|
3200 | 3136 | } |
---|
3201 | 3137 | |
---|
3202 | | - raw_spin_unlock_irqrestore(&n->list_lock, flags); |
---|
| 3138 | + spin_unlock_irqrestore(&n->list_lock, flags); |
---|
3203 | 3139 | stat(s, FREE_SLAB); |
---|
3204 | 3140 | discard_slab(s, page); |
---|
3205 | 3141 | } |
---|
.. | .. |
---|
3416 | 3352 | void **p) |
---|
3417 | 3353 | { |
---|
3418 | 3354 | struct kmem_cache_cpu *c; |
---|
3419 | | - LIST_HEAD(to_free); |
---|
3420 | 3355 | int i; |
---|
3421 | 3356 | struct obj_cgroup *objcg = NULL; |
---|
3422 | | - |
---|
3423 | | - if (IS_ENABLED(CONFIG_PREEMPT_RT) && IS_ENABLED(CONFIG_DEBUG_ATOMIC_SLEEP)) |
---|
3424 | | - WARN_ON_ONCE(!preemptible() && |
---|
3425 | | - (system_state > SYSTEM_BOOTING && system_state < SYSTEM_SUSPEND)); |
---|
3426 | 3357 | |
---|
3427 | 3358 | /* memcg and kmem_cache debug support */ |
---|
3428 | 3359 | s = slab_pre_alloc_hook(s, &objcg, size, flags); |
---|
.. | .. |
---|
3460 | 3391 | * of re-populating per CPU c->freelist |
---|
3461 | 3392 | */ |
---|
3462 | 3393 | p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, |
---|
3463 | | - _RET_IP_, c, &to_free); |
---|
| 3394 | + _RET_IP_, c); |
---|
3464 | 3395 | if (unlikely(!p[i])) |
---|
3465 | 3396 | goto error; |
---|
3466 | 3397 | |
---|
.. | .. |
---|
3475 | 3406 | } |
---|
3476 | 3407 | c->tid = next_tid(c->tid); |
---|
3477 | 3408 | local_irq_enable(); |
---|
3478 | | - free_delayed(&to_free); |
---|
3479 | 3409 | |
---|
3480 | 3410 | /* |
---|
3481 | 3411 | * memcg and kmem_cache debug support and memory initialization. |
---|
.. | .. |
---|
3486 | 3416 | return i; |
---|
3487 | 3417 | error: |
---|
3488 | 3418 | local_irq_enable(); |
---|
3489 | | - free_delayed(&to_free); |
---|
3490 | 3419 | slab_post_alloc_hook(s, objcg, flags, i, p, false); |
---|
3491 | 3420 | __kmem_cache_free_bulk(s, i, p); |
---|
3492 | 3421 | return 0; |
---|
.. | .. |
---|
3622 | 3551 | init_kmem_cache_node(struct kmem_cache_node *n) |
---|
3623 | 3552 | { |
---|
3624 | 3553 | n->nr_partial = 0; |
---|
3625 | | - raw_spin_lock_init(&n->list_lock); |
---|
| 3554 | + spin_lock_init(&n->list_lock); |
---|
3626 | 3555 | INIT_LIST_HEAD(&n->partial); |
---|
3627 | 3556 | #ifdef CONFIG_SLUB_DEBUG |
---|
3628 | 3557 | atomic_long_set(&n->nr_slabs, 0); |
---|
.. | .. |
---|
4016 | 3945 | struct page *page, *h; |
---|
4017 | 3946 | |
---|
4018 | 3947 | BUG_ON(irqs_disabled()); |
---|
4019 | | - raw_spin_lock_irq(&n->list_lock); |
---|
| 3948 | + spin_lock_irq(&n->list_lock); |
---|
4020 | 3949 | list_for_each_entry_safe(page, h, &n->partial, slab_list) { |
---|
4021 | 3950 | if (!page->inuse) { |
---|
4022 | 3951 | remove_partial(n, page); |
---|
.. | .. |
---|
4026 | 3955 | "Objects remaining in %s on __kmem_cache_shutdown()"); |
---|
4027 | 3956 | } |
---|
4028 | 3957 | } |
---|
4029 | | - raw_spin_unlock_irq(&n->list_lock); |
---|
| 3958 | + spin_unlock_irq(&n->list_lock); |
---|
4030 | 3959 | |
---|
4031 | 3960 | list_for_each_entry_safe(page, h, &discard, slab_list) |
---|
4032 | 3961 | discard_slab(s, page); |
---|
.. | .. |
---|
4301 | 4230 | for (i = 0; i < SHRINK_PROMOTE_MAX; i++) |
---|
4302 | 4231 | INIT_LIST_HEAD(promote + i); |
---|
4303 | 4232 | |
---|
4304 | | - raw_spin_lock_irqsave(&n->list_lock, flags); |
---|
| 4233 | + spin_lock_irqsave(&n->list_lock, flags); |
---|
4305 | 4234 | |
---|
4306 | 4235 | /* |
---|
4307 | 4236 | * Build lists of slabs to discard or promote. |
---|
.. | .. |
---|
4332 | 4261 | for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) |
---|
4333 | 4262 | list_splice(promote + i, &n->partial); |
---|
4334 | 4263 | |
---|
4335 | | - raw_spin_unlock_irqrestore(&n->list_lock, flags); |
---|
| 4264 | + spin_unlock_irqrestore(&n->list_lock, flags); |
---|
4336 | 4265 | |
---|
4337 | 4266 | /* Release empty slabs */ |
---|
4338 | 4267 | list_for_each_entry_safe(page, t, &discard, slab_list) |
---|
.. | .. |
---|
4507 | 4436 | { |
---|
4508 | 4437 | static __initdata struct kmem_cache boot_kmem_cache, |
---|
4509 | 4438 | boot_kmem_cache_node; |
---|
4510 | | - int cpu; |
---|
4511 | | - |
---|
4512 | | - for_each_possible_cpu(cpu) { |
---|
4513 | | - raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); |
---|
4514 | | - INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); |
---|
4515 | | - } |
---|
4516 | 4439 | |
---|
4517 | 4440 | if (debug_guardpage_minorder()) |
---|
4518 | 4441 | slub_max_order = 0; |
---|
.. | .. |
---|
4705 | 4628 | struct page *page; |
---|
4706 | 4629 | unsigned long flags; |
---|
4707 | 4630 | |
---|
4708 | | - raw_spin_lock_irqsave(&n->list_lock, flags); |
---|
| 4631 | + spin_lock_irqsave(&n->list_lock, flags); |
---|
4709 | 4632 | |
---|
4710 | 4633 | list_for_each_entry(page, &n->partial, slab_list) { |
---|
4711 | 4634 | validate_slab(s, page); |
---|
.. | .. |
---|
4727 | 4650 | s->name, count, atomic_long_read(&n->nr_slabs)); |
---|
4728 | 4651 | |
---|
4729 | 4652 | out: |
---|
4730 | | - raw_spin_unlock_irqrestore(&n->list_lock, flags); |
---|
| 4653 | + spin_unlock_irqrestore(&n->list_lock, flags); |
---|
4731 | 4654 | return count; |
---|
4732 | 4655 | } |
---|
4733 | 4656 | |
---|
.. | .. |
---|
4782 | 4705 | { |
---|
4783 | 4706 | struct location *l; |
---|
4784 | 4707 | int order; |
---|
4785 | | - |
---|
4786 | | - if (IS_ENABLED(CONFIG_PREEMPT_RT) && flags == GFP_ATOMIC) |
---|
4787 | | - return 0; |
---|
4788 | 4708 | |
---|
4789 | 4709 | order = get_order(sizeof(struct location) * max); |
---|
4790 | 4710 | |
---|
.. | .. |
---|
5920 | 5840 | if (!atomic_long_read(&n->nr_slabs)) |
---|
5921 | 5841 | continue; |
---|
5922 | 5842 | |
---|
5923 | | - raw_spin_lock_irqsave(&n->list_lock, flags); |
---|
| 5843 | + spin_lock_irqsave(&n->list_lock, flags); |
---|
5924 | 5844 | list_for_each_entry(page, &n->partial, slab_list) |
---|
5925 | 5845 | process_slab(t, s, page, alloc, obj_map); |
---|
5926 | 5846 | list_for_each_entry(page, &n->full, slab_list) |
---|
5927 | 5847 | process_slab(t, s, page, alloc, obj_map); |
---|
5928 | | - raw_spin_unlock_irqrestore(&n->list_lock, flags); |
---|
| 5848 | + spin_unlock_irqrestore(&n->list_lock, flags); |
---|
5929 | 5849 | } |
---|
5930 | 5850 | |
---|
5931 | 5851 | bitmap_free(obj_map); |
---|