.. | .. |
---|
1199 | 1199 | unsigned long uninitialized_var(flags); |
---|
1200 | 1200 | int ret = 0; |
---|
1201 | 1201 | |
---|
1202 | | - spin_lock_irqsave(&n->list_lock, flags); |
---|
| 1202 | + raw_spin_lock_irqsave(&n->list_lock, flags); |
---|
1203 | 1203 | slab_lock(page); |
---|
1204 | 1204 | |
---|
1205 | 1205 | if (s->flags & SLAB_CONSISTENCY_CHECKS) { |
---|
.. | .. |
---|
1234 | 1234 | bulk_cnt, cnt); |
---|
1235 | 1235 | |
---|
1236 | 1236 | slab_unlock(page); |
---|
1237 | | - spin_unlock_irqrestore(&n->list_lock, flags); |
---|
| 1237 | + raw_spin_unlock_irqrestore(&n->list_lock, flags); |
---|
1238 | 1238 | if (!ret) |
---|
1239 | 1239 | slab_fix(s, "Object at 0x%p not freed", object); |
---|
1240 | 1240 | return ret; |
---|
.. | .. |
---|
1372 | 1372 | return false; |
---|
1373 | 1373 | } |
---|
1374 | 1374 | #endif /* CONFIG_SLUB_DEBUG */ |
---|
| 1375 | + |
---|
| 1376 | +struct slub_free_list { |
---|
| 1377 | + raw_spinlock_t lock; |
---|
| 1378 | + struct list_head list; |
---|
| 1379 | +}; |
---|
| 1380 | +static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); |
---|
1375 | 1381 | |
---|
1376 | 1382 | /* |
---|
1377 | 1383 | * Hooks for other subsystems that check memory allocations. In a typical |
---|
.. | .. |
---|
1619 | 1625 | void *start, *p, *next; |
---|
1620 | 1626 | int idx, order; |
---|
1621 | 1627 | bool shuffle; |
---|
| 1628 | + bool enableirqs = false; |
---|
1622 | 1629 | |
---|
1623 | 1630 | flags &= gfp_allowed_mask; |
---|
1624 | 1631 | |
---|
1625 | 1632 | if (gfpflags_allow_blocking(flags)) |
---|
| 1633 | + enableirqs = true; |
---|
| 1634 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 1635 | + if (system_state > SYSTEM_BOOTING) |
---|
| 1636 | + enableirqs = true; |
---|
| 1637 | +#endif |
---|
| 1638 | + if (enableirqs) |
---|
1626 | 1639 | local_irq_enable(); |
---|
1627 | 1640 | |
---|
1628 | 1641 | flags |= s->allocflags; |
---|
.. | .. |
---|
1682 | 1695 | page->frozen = 1; |
---|
1683 | 1696 | |
---|
1684 | 1697 | out: |
---|
1685 | | - if (gfpflags_allow_blocking(flags)) |
---|
| 1698 | + if (enableirqs) |
---|
1686 | 1699 | local_irq_disable(); |
---|
1687 | 1700 | if (!page) |
---|
1688 | 1701 | return NULL; |
---|
.. | .. |
---|
1740 | 1753 | __free_pages(page, order); |
---|
1741 | 1754 | } |
---|
1742 | 1755 | |
---|
| 1756 | +static void free_delayed(struct list_head *h) |
---|
| 1757 | +{ |
---|
| 1758 | + while (!list_empty(h)) { |
---|
| 1759 | + struct page *page = list_first_entry(h, struct page, lru); |
---|
| 1760 | + |
---|
| 1761 | + list_del(&page->lru); |
---|
| 1762 | + __free_slab(page->slab_cache, page); |
---|
| 1763 | + } |
---|
| 1764 | +} |
---|
| 1765 | + |
---|
1743 | 1766 | static void rcu_free_slab(struct rcu_head *h) |
---|
1744 | 1767 | { |
---|
1745 | 1768 | struct page *page = container_of(h, struct page, rcu_head); |
---|
.. | .. |
---|
1751 | 1774 | { |
---|
1752 | 1775 | if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { |
---|
1753 | 1776 | call_rcu(&page->rcu_head, rcu_free_slab); |
---|
| 1777 | + } else if (irqs_disabled()) { |
---|
| 1778 | + struct slub_free_list *f = this_cpu_ptr(&slub_free_list); |
---|
| 1779 | + |
---|
| 1780 | + raw_spin_lock(&f->lock); |
---|
| 1781 | + list_add(&page->lru, &f->list); |
---|
| 1782 | + raw_spin_unlock(&f->lock); |
---|
1754 | 1783 | } else |
---|
1755 | 1784 | __free_slab(s, page); |
---|
1756 | 1785 | } |
---|
.. | .. |
---|
1858 | 1887 | if (!n || !n->nr_partial) |
---|
1859 | 1888 | return NULL; |
---|
1860 | 1889 | |
---|
1861 | | - spin_lock(&n->list_lock); |
---|
| 1890 | + raw_spin_lock(&n->list_lock); |
---|
1862 | 1891 | list_for_each_entry_safe(page, page2, &n->partial, lru) { |
---|
1863 | 1892 | void *t; |
---|
1864 | 1893 | |
---|
.. | .. |
---|
1883 | 1912 | break; |
---|
1884 | 1913 | |
---|
1885 | 1914 | } |
---|
1886 | | - spin_unlock(&n->list_lock); |
---|
| 1915 | + raw_spin_unlock(&n->list_lock); |
---|
1887 | 1916 | return object; |
---|
1888 | 1917 | } |
---|
1889 | 1918 | |
---|
.. | .. |
---|
2135 | 2164 | * that acquire_slab() will see a slab page that |
---|
2136 | 2165 | * is frozen |
---|
2137 | 2166 | */ |
---|
2138 | | - spin_lock(&n->list_lock); |
---|
| 2167 | + raw_spin_lock(&n->list_lock); |
---|
2139 | 2168 | } |
---|
2140 | 2169 | } else { |
---|
2141 | 2170 | m = M_FULL; |
---|
.. | .. |
---|
2146 | 2175 | * slabs from diagnostic functions will not see |
---|
2147 | 2176 | * any frozen slabs. |
---|
2148 | 2177 | */ |
---|
2149 | | - spin_lock(&n->list_lock); |
---|
| 2178 | + raw_spin_lock(&n->list_lock); |
---|
2150 | 2179 | } |
---|
2151 | 2180 | } |
---|
2152 | 2181 | |
---|
.. | .. |
---|
2181 | 2210 | goto redo; |
---|
2182 | 2211 | |
---|
2183 | 2212 | if (lock) |
---|
2184 | | - spin_unlock(&n->list_lock); |
---|
| 2213 | + raw_spin_unlock(&n->list_lock); |
---|
2185 | 2214 | |
---|
2186 | 2215 | if (m == M_FREE) { |
---|
2187 | 2216 | stat(s, DEACTIVATE_EMPTY); |
---|
.. | .. |
---|
2216 | 2245 | n2 = get_node(s, page_to_nid(page)); |
---|
2217 | 2246 | if (n != n2) { |
---|
2218 | 2247 | if (n) |
---|
2219 | | - spin_unlock(&n->list_lock); |
---|
| 2248 | + raw_spin_unlock(&n->list_lock); |
---|
2220 | 2249 | |
---|
2221 | 2250 | n = n2; |
---|
2222 | | - spin_lock(&n->list_lock); |
---|
| 2251 | + raw_spin_lock(&n->list_lock); |
---|
2223 | 2252 | } |
---|
2224 | 2253 | |
---|
2225 | 2254 | do { |
---|
.. | .. |
---|
2248 | 2277 | } |
---|
2249 | 2278 | |
---|
2250 | 2279 | if (n) |
---|
2251 | | - spin_unlock(&n->list_lock); |
---|
| 2280 | + raw_spin_unlock(&n->list_lock); |
---|
2252 | 2281 | |
---|
2253 | 2282 | while (discard_page) { |
---|
2254 | 2283 | page = discard_page; |
---|
.. | .. |
---|
2285 | 2314 | pobjects = oldpage->pobjects; |
---|
2286 | 2315 | pages = oldpage->pages; |
---|
2287 | 2316 | if (drain && pobjects > s->cpu_partial) { |
---|
| 2317 | + struct slub_free_list *f; |
---|
2288 | 2318 | unsigned long flags; |
---|
| 2319 | + LIST_HEAD(tofree); |
---|
2289 | 2320 | /* |
---|
2290 | 2321 | * partial array is full. Move the existing |
---|
2291 | 2322 | * set to the per node partial list. |
---|
2292 | 2323 | */ |
---|
2293 | 2324 | local_irq_save(flags); |
---|
2294 | 2325 | unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); |
---|
| 2326 | + f = this_cpu_ptr(&slub_free_list); |
---|
| 2327 | + raw_spin_lock(&f->lock); |
---|
| 2328 | + list_splice_init(&f->list, &tofree); |
---|
| 2329 | + raw_spin_unlock(&f->lock); |
---|
2295 | 2330 | local_irq_restore(flags); |
---|
| 2331 | + free_delayed(&tofree); |
---|
2296 | 2332 | oldpage = NULL; |
---|
2297 | 2333 | pobjects = 0; |
---|
2298 | 2334 | pages = 0; |
---|
.. | .. |
---|
2362 | 2398 | |
---|
2363 | 2399 | static void flush_all(struct kmem_cache *s) |
---|
2364 | 2400 | { |
---|
| 2401 | + LIST_HEAD(tofree); |
---|
| 2402 | + int cpu; |
---|
| 2403 | + |
---|
2365 | 2404 | on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); |
---|
| 2405 | + for_each_online_cpu(cpu) { |
---|
| 2406 | + struct slub_free_list *f; |
---|
| 2407 | + |
---|
| 2408 | + f = &per_cpu(slub_free_list, cpu); |
---|
| 2409 | + raw_spin_lock_irq(&f->lock); |
---|
| 2410 | + list_splice_init(&f->list, &tofree); |
---|
| 2411 | + raw_spin_unlock_irq(&f->lock); |
---|
| 2412 | + free_delayed(&tofree); |
---|
| 2413 | + } |
---|
2366 | 2414 | } |
---|
2367 | 2415 | |
---|
2368 | 2416 | /* |
---|
.. | .. |
---|
2417 | 2465 | unsigned long x = 0; |
---|
2418 | 2466 | struct page *page; |
---|
2419 | 2467 | |
---|
2420 | | - spin_lock_irqsave(&n->list_lock, flags); |
---|
| 2468 | + raw_spin_lock_irqsave(&n->list_lock, flags); |
---|
2421 | 2469 | list_for_each_entry(page, &n->partial, lru) |
---|
2422 | 2470 | x += get_count(page); |
---|
2423 | | - spin_unlock_irqrestore(&n->list_lock, flags); |
---|
| 2471 | + raw_spin_unlock_irqrestore(&n->list_lock, flags); |
---|
2424 | 2472 | return x; |
---|
2425 | 2473 | } |
---|
2426 | 2474 | #endif /* CONFIG_SLUB_DEBUG || CONFIG_SLUB_SYSFS */ |
---|
.. | .. |
---|
2560 | 2608 | * already disabled (which is the case for bulk allocation). |
---|
2561 | 2609 | */ |
---|
2562 | 2610 | static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, |
---|
2563 | | - unsigned long addr, struct kmem_cache_cpu *c) |
---|
| 2611 | + unsigned long addr, struct kmem_cache_cpu *c, |
---|
| 2612 | + struct list_head *to_free) |
---|
2564 | 2613 | { |
---|
| 2614 | + struct slub_free_list *f; |
---|
2565 | 2615 | void *freelist; |
---|
2566 | 2616 | struct page *page; |
---|
2567 | 2617 | |
---|
.. | .. |
---|
2627 | 2677 | VM_BUG_ON(!c->page->frozen); |
---|
2628 | 2678 | c->freelist = get_freepointer(s, freelist); |
---|
2629 | 2679 | c->tid = next_tid(c->tid); |
---|
| 2680 | + |
---|
| 2681 | +out: |
---|
| 2682 | + f = this_cpu_ptr(&slub_free_list); |
---|
| 2683 | + raw_spin_lock(&f->lock); |
---|
| 2684 | + list_splice_init(&f->list, to_free); |
---|
| 2685 | + raw_spin_unlock(&f->lock); |
---|
| 2686 | + |
---|
2630 | 2687 | return freelist; |
---|
2631 | 2688 | |
---|
2632 | 2689 | new_slab: |
---|
.. | .. |
---|
2642 | 2699 | |
---|
2643 | 2700 | if (unlikely(!freelist)) { |
---|
2644 | 2701 | slab_out_of_memory(s, gfpflags, node); |
---|
2645 | | - return NULL; |
---|
| 2702 | + goto out; |
---|
2646 | 2703 | } |
---|
2647 | 2704 | |
---|
2648 | 2705 | page = c->page; |
---|
.. | .. |
---|
2655 | 2712 | goto new_slab; /* Slab failed checks. Next slab needed */ |
---|
2656 | 2713 | |
---|
2657 | 2714 | deactivate_slab(s, page, get_freepointer(s, freelist), c); |
---|
2658 | | - return freelist; |
---|
| 2715 | + goto out; |
---|
2659 | 2716 | } |
---|
2660 | 2717 | |
---|
2661 | 2718 | /* |
---|
.. | .. |
---|
2667 | 2724 | { |
---|
2668 | 2725 | void *p; |
---|
2669 | 2726 | unsigned long flags; |
---|
| 2727 | + LIST_HEAD(tofree); |
---|
2670 | 2728 | |
---|
2671 | 2729 | local_irq_save(flags); |
---|
2672 | 2730 | #ifdef CONFIG_PREEMPT |
---|
.. | .. |
---|
2678 | 2736 | c = this_cpu_ptr(s->cpu_slab); |
---|
2679 | 2737 | #endif |
---|
2680 | 2738 | |
---|
2681 | | - p = ___slab_alloc(s, gfpflags, node, addr, c); |
---|
| 2739 | + p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree); |
---|
2682 | 2740 | local_irq_restore(flags); |
---|
| 2741 | + free_delayed(&tofree); |
---|
2683 | 2742 | return p; |
---|
2684 | 2743 | } |
---|
2685 | 2744 | |
---|
.. | .. |
---|
2878 | 2937 | |
---|
2879 | 2938 | do { |
---|
2880 | 2939 | if (unlikely(n)) { |
---|
2881 | | - spin_unlock_irqrestore(&n->list_lock, flags); |
---|
| 2940 | + raw_spin_unlock_irqrestore(&n->list_lock, flags); |
---|
2882 | 2941 | n = NULL; |
---|
2883 | 2942 | } |
---|
2884 | 2943 | prior = page->freelist; |
---|
.. | .. |
---|
2910 | 2969 | * Otherwise the list_lock will synchronize with |
---|
2911 | 2970 | * other processors updating the list of slabs. |
---|
2912 | 2971 | */ |
---|
2913 | | - spin_lock_irqsave(&n->list_lock, flags); |
---|
| 2972 | + raw_spin_lock_irqsave(&n->list_lock, flags); |
---|
2914 | 2973 | |
---|
2915 | 2974 | } |
---|
2916 | 2975 | } |
---|
.. | .. |
---|
2952 | 3011 | add_partial(n, page, DEACTIVATE_TO_TAIL); |
---|
2953 | 3012 | stat(s, FREE_ADD_PARTIAL); |
---|
2954 | 3013 | } |
---|
2955 | | - spin_unlock_irqrestore(&n->list_lock, flags); |
---|
| 3014 | + raw_spin_unlock_irqrestore(&n->list_lock, flags); |
---|
2956 | 3015 | return; |
---|
2957 | 3016 | |
---|
2958 | 3017 | slab_empty: |
---|
.. | .. |
---|
2967 | 3026 | remove_full(s, n, page); |
---|
2968 | 3027 | } |
---|
2969 | 3028 | |
---|
2970 | | - spin_unlock_irqrestore(&n->list_lock, flags); |
---|
| 3029 | + raw_spin_unlock_irqrestore(&n->list_lock, flags); |
---|
2971 | 3030 | stat(s, FREE_SLAB); |
---|
2972 | 3031 | discard_slab(s, page); |
---|
2973 | 3032 | } |
---|
.. | .. |
---|
3172 | 3231 | void **p) |
---|
3173 | 3232 | { |
---|
3174 | 3233 | struct kmem_cache_cpu *c; |
---|
| 3234 | + LIST_HEAD(to_free); |
---|
3175 | 3235 | int i; |
---|
3176 | 3236 | |
---|
3177 | 3237 | /* memcg and kmem_cache debug support */ |
---|
.. | .. |
---|
3204 | 3264 | * of re-populating per CPU c->freelist |
---|
3205 | 3265 | */ |
---|
3206 | 3266 | p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, |
---|
3207 | | - _RET_IP_, c); |
---|
| 3267 | + _RET_IP_, c, &to_free); |
---|
3208 | 3268 | if (unlikely(!p[i])) |
---|
3209 | 3269 | goto error; |
---|
3210 | 3270 | |
---|
.. | .. |
---|
3219 | 3279 | } |
---|
3220 | 3280 | c->tid = next_tid(c->tid); |
---|
3221 | 3281 | local_irq_enable(); |
---|
| 3282 | + free_delayed(&to_free); |
---|
3222 | 3283 | |
---|
3223 | 3284 | /* Clear memory outside IRQ disabled fastpath loop */ |
---|
3224 | 3285 | if (unlikely(slab_want_init_on_alloc(flags, s))) { |
---|
.. | .. |
---|
3233 | 3294 | return i; |
---|
3234 | 3295 | error: |
---|
3235 | 3296 | local_irq_enable(); |
---|
| 3297 | + free_delayed(&to_free); |
---|
3236 | 3298 | slab_post_alloc_hook(s, flags, i, p); |
---|
3237 | 3299 | __kmem_cache_free_bulk(s, i, p); |
---|
3238 | 3300 | return 0; |
---|
.. | .. |
---|
3368 | 3430 | init_kmem_cache_node(struct kmem_cache_node *n) |
---|
3369 | 3431 | { |
---|
3370 | 3432 | n->nr_partial = 0; |
---|
3371 | | - spin_lock_init(&n->list_lock); |
---|
| 3433 | + raw_spin_lock_init(&n->list_lock); |
---|
3372 | 3434 | INIT_LIST_HEAD(&n->partial); |
---|
3373 | 3435 | #ifdef CONFIG_SLUB_DEBUG |
---|
3374 | 3436 | atomic_long_set(&n->nr_slabs, 0); |
---|
.. | .. |
---|
3721 | 3783 | const char *text) |
---|
3722 | 3784 | { |
---|
3723 | 3785 | #ifdef CONFIG_SLUB_DEBUG |
---|
| 3786 | +#ifdef CONFIG_PREEMPT_RT_BASE |
---|
| 3787 | + /* XXX move out of irq-off section */ |
---|
| 3788 | + slab_err(s, page, text, s->name); |
---|
| 3789 | +#else |
---|
| 3790 | + |
---|
3724 | 3791 | void *addr = page_address(page); |
---|
3725 | 3792 | void *p; |
---|
3726 | 3793 | unsigned long *map = kcalloc(BITS_TO_LONGS(page->objects), |
---|
.. | .. |
---|
3742 | 3809 | slab_unlock(page); |
---|
3743 | 3810 | kfree(map); |
---|
3744 | 3811 | #endif |
---|
| 3812 | +#endif |
---|
3745 | 3813 | } |
---|
3746 | 3814 | |
---|
3747 | 3815 | /* |
---|
.. | .. |
---|
3755 | 3823 | struct page *page, *h; |
---|
3756 | 3824 | |
---|
3757 | 3825 | BUG_ON(irqs_disabled()); |
---|
3758 | | - spin_lock_irq(&n->list_lock); |
---|
| 3826 | + raw_spin_lock_irq(&n->list_lock); |
---|
3759 | 3827 | list_for_each_entry_safe(page, h, &n->partial, lru) { |
---|
3760 | 3828 | if (!page->inuse) { |
---|
3761 | 3829 | remove_partial(n, page); |
---|
.. | .. |
---|
3765 | 3833 | "Objects remaining in %s on __kmem_cache_shutdown()"); |
---|
3766 | 3834 | } |
---|
3767 | 3835 | } |
---|
3768 | | - spin_unlock_irq(&n->list_lock); |
---|
| 3836 | + raw_spin_unlock_irq(&n->list_lock); |
---|
3769 | 3837 | |
---|
3770 | 3838 | list_for_each_entry_safe(page, h, &discard, lru) |
---|
3771 | 3839 | discard_slab(s, page); |
---|
.. | .. |
---|
4039 | 4107 | for (i = 0; i < SHRINK_PROMOTE_MAX; i++) |
---|
4040 | 4108 | INIT_LIST_HEAD(promote + i); |
---|
4041 | 4109 | |
---|
4042 | | - spin_lock_irqsave(&n->list_lock, flags); |
---|
| 4110 | + raw_spin_lock_irqsave(&n->list_lock, flags); |
---|
4043 | 4111 | |
---|
4044 | 4112 | /* |
---|
4045 | 4113 | * Build lists of slabs to discard or promote. |
---|
.. | .. |
---|
4070 | 4138 | for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) |
---|
4071 | 4139 | list_splice(promote + i, &n->partial); |
---|
4072 | 4140 | |
---|
4073 | | - spin_unlock_irqrestore(&n->list_lock, flags); |
---|
| 4141 | + raw_spin_unlock_irqrestore(&n->list_lock, flags); |
---|
4074 | 4142 | |
---|
4075 | 4143 | /* Release empty slabs */ |
---|
4076 | 4144 | list_for_each_entry_safe(page, t, &discard, lru) |
---|
.. | .. |
---|
4283 | 4351 | { |
---|
4284 | 4352 | static __initdata struct kmem_cache boot_kmem_cache, |
---|
4285 | 4353 | boot_kmem_cache_node; |
---|
| 4354 | + int cpu; |
---|
| 4355 | + |
---|
| 4356 | + for_each_possible_cpu(cpu) { |
---|
| 4357 | + raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); |
---|
| 4358 | + INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); |
---|
| 4359 | + } |
---|
4286 | 4360 | |
---|
4287 | 4361 | if (debug_guardpage_minorder()) |
---|
4288 | 4362 | slub_max_order = 0; |
---|
.. | .. |
---|
4486 | 4560 | struct page *page; |
---|
4487 | 4561 | unsigned long flags; |
---|
4488 | 4562 | |
---|
4489 | | - spin_lock_irqsave(&n->list_lock, flags); |
---|
| 4563 | + raw_spin_lock_irqsave(&n->list_lock, flags); |
---|
4490 | 4564 | |
---|
4491 | 4565 | list_for_each_entry(page, &n->partial, lru) { |
---|
4492 | 4566 | validate_slab_slab(s, page, map); |
---|
.. | .. |
---|
4508 | 4582 | s->name, count, atomic_long_read(&n->nr_slabs)); |
---|
4509 | 4583 | |
---|
4510 | 4584 | out: |
---|
4511 | | - spin_unlock_irqrestore(&n->list_lock, flags); |
---|
| 4585 | + raw_spin_unlock_irqrestore(&n->list_lock, flags); |
---|
4512 | 4586 | return count; |
---|
4513 | 4587 | } |
---|
4514 | 4588 | |
---|
.. | .. |
---|
4564 | 4638 | { |
---|
4565 | 4639 | struct location *l; |
---|
4566 | 4640 | int order; |
---|
| 4641 | + |
---|
| 4642 | + if (IS_ENABLED(CONFIG_PREEMPT_RT) && flags == GFP_ATOMIC) |
---|
| 4643 | + return 0; |
---|
4567 | 4644 | |
---|
4568 | 4645 | order = get_order(sizeof(struct location) * max); |
---|
4569 | 4646 | |
---|
.. | .. |
---|
4698 | 4775 | if (!atomic_long_read(&n->nr_slabs)) |
---|
4699 | 4776 | continue; |
---|
4700 | 4777 | |
---|
4701 | | - spin_lock_irqsave(&n->list_lock, flags); |
---|
| 4778 | + raw_spin_lock_irqsave(&n->list_lock, flags); |
---|
4702 | 4779 | list_for_each_entry(page, &n->partial, lru) |
---|
4703 | 4780 | process_slab(&t, s, page, alloc, map); |
---|
4704 | 4781 | list_for_each_entry(page, &n->full, lru) |
---|
4705 | 4782 | process_slab(&t, s, page, alloc, map); |
---|
4706 | | - spin_unlock_irqrestore(&n->list_lock, flags); |
---|
| 4783 | + raw_spin_unlock_irqrestore(&n->list_lock, flags); |
---|
4707 | 4784 | } |
---|
4708 | 4785 | |
---|
4709 | 4786 | for (i = 0; i < t.count; i++) { |
---|