| .. | .. |
|---|
| 233 | 233 | parent->shared = NULL; |
|---|
| 234 | 234 | parent->alien = NULL; |
|---|
| 235 | 235 | parent->colour_next = 0; |
|---|
| 236 | | - spin_lock_init(&parent->list_lock); |
|---|
| 236 | + raw_spin_lock_init(&parent->list_lock); |
|---|
| 237 | 237 | parent->free_objects = 0; |
|---|
| 238 | 238 | parent->free_touched = 0; |
|---|
| 239 | 239 | } |
|---|
| .. | .. |
|---|
| 587 | 587 | page_node = page_to_nid(page); |
|---|
| 588 | 588 | n = get_node(cachep, page_node); |
|---|
| 589 | 589 | |
|---|
| 590 | | - spin_lock(&n->list_lock); |
|---|
| 590 | + raw_spin_lock(&n->list_lock); |
|---|
| 591 | 591 | free_block(cachep, &objp, 1, page_node, &list); |
|---|
| 592 | | - spin_unlock(&n->list_lock); |
|---|
| 592 | + raw_spin_unlock(&n->list_lock); |
|---|
| 593 | 593 | |
|---|
| 594 | 594 | slabs_destroy(cachep, &list); |
|---|
| 595 | 595 | } |
|---|
| .. | .. |
|---|
| 718 | 718 | struct kmem_cache_node *n = get_node(cachep, node); |
|---|
| 719 | 719 | |
|---|
| 720 | 720 | if (ac->avail) { |
|---|
| 721 | | - spin_lock(&n->list_lock); |
|---|
| 721 | + raw_spin_lock(&n->list_lock); |
|---|
| 722 | 722 | /* |
|---|
| 723 | 723 | * Stuff objects into the remote nodes shared array first. |
|---|
| 724 | 724 | * That way we could avoid the overhead of putting the objects |
|---|
| .. | .. |
|---|
| 729 | 729 | |
|---|
| 730 | 730 | free_block(cachep, ac->entry, ac->avail, node, list); |
|---|
| 731 | 731 | ac->avail = 0; |
|---|
| 732 | | - spin_unlock(&n->list_lock); |
|---|
| 732 | + raw_spin_unlock(&n->list_lock); |
|---|
| 733 | 733 | } |
|---|
| 734 | 734 | } |
|---|
| 735 | 735 | |
|---|
| .. | .. |
|---|
| 802 | 802 | slabs_destroy(cachep, &list); |
|---|
| 803 | 803 | } else { |
|---|
| 804 | 804 | n = get_node(cachep, page_node); |
|---|
| 805 | | - spin_lock(&n->list_lock); |
|---|
| 805 | + raw_spin_lock(&n->list_lock); |
|---|
| 806 | 806 | free_block(cachep, &objp, 1, page_node, &list); |
|---|
| 807 | | - spin_unlock(&n->list_lock); |
|---|
| 807 | + raw_spin_unlock(&n->list_lock); |
|---|
| 808 | 808 | slabs_destroy(cachep, &list); |
|---|
| 809 | 809 | } |
|---|
| 810 | 810 | return 1; |
|---|
| .. | .. |
|---|
| 845 | 845 | */ |
|---|
| 846 | 846 | n = get_node(cachep, node); |
|---|
| 847 | 847 | if (n) { |
|---|
| 848 | | - spin_lock_irq(&n->list_lock); |
|---|
| 848 | + raw_spin_lock_irq(&n->list_lock); |
|---|
| 849 | 849 | n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + |
|---|
| 850 | 850 | cachep->num; |
|---|
| 851 | | - spin_unlock_irq(&n->list_lock); |
|---|
| 851 | + raw_spin_unlock_irq(&n->list_lock); |
|---|
| 852 | 852 | |
|---|
| 853 | 853 | return 0; |
|---|
| 854 | 854 | } |
|---|
| .. | .. |
|---|
| 927 | 927 | goto fail; |
|---|
| 928 | 928 | |
|---|
| 929 | 929 | n = get_node(cachep, node); |
|---|
| 930 | | - spin_lock_irq(&n->list_lock); |
|---|
| 930 | + raw_spin_lock_irq(&n->list_lock); |
|---|
| 931 | 931 | if (n->shared && force_change) { |
|---|
| 932 | 932 | free_block(cachep, n->shared->entry, |
|---|
| 933 | 933 | n->shared->avail, node, &list); |
|---|
| .. | .. |
|---|
| 945 | 945 | new_alien = NULL; |
|---|
| 946 | 946 | } |
|---|
| 947 | 947 | |
|---|
| 948 | | - spin_unlock_irq(&n->list_lock); |
|---|
| 948 | + raw_spin_unlock_irq(&n->list_lock); |
|---|
| 949 | 949 | slabs_destroy(cachep, &list); |
|---|
| 950 | 950 | |
|---|
| 951 | 951 | /* |
|---|
| .. | .. |
|---|
| 984 | 984 | if (!n) |
|---|
| 985 | 985 | continue; |
|---|
| 986 | 986 | |
|---|
| 987 | | - spin_lock_irq(&n->list_lock); |
|---|
| 987 | + raw_spin_lock_irq(&n->list_lock); |
|---|
| 988 | 988 | |
|---|
| 989 | 989 | /* Free limit for this kmem_cache_node */ |
|---|
| 990 | 990 | n->free_limit -= cachep->batchcount; |
|---|
| .. | .. |
|---|
| 997 | 997 | } |
|---|
| 998 | 998 | |
|---|
| 999 | 999 | if (!cpumask_empty(mask)) { |
|---|
| 1000 | | - spin_unlock_irq(&n->list_lock); |
|---|
| 1000 | + raw_spin_unlock_irq(&n->list_lock); |
|---|
| 1001 | 1001 | goto free_slab; |
|---|
| 1002 | 1002 | } |
|---|
| 1003 | 1003 | |
|---|
| .. | .. |
|---|
| 1011 | 1011 | alien = n->alien; |
|---|
| 1012 | 1012 | n->alien = NULL; |
|---|
| 1013 | 1013 | |
|---|
| 1014 | | - spin_unlock_irq(&n->list_lock); |
|---|
| 1014 | + raw_spin_unlock_irq(&n->list_lock); |
|---|
| 1015 | 1015 | |
|---|
| 1016 | 1016 | kfree(shared); |
|---|
| 1017 | 1017 | if (alien) { |
|---|
| .. | .. |
|---|
| 1195 | 1195 | /* |
|---|
| 1196 | 1196 | * Do not assume that spinlocks can be initialized via memcpy: |
|---|
| 1197 | 1197 | */ |
|---|
| 1198 | | - spin_lock_init(&ptr->list_lock); |
|---|
| 1198 | + raw_spin_lock_init(&ptr->list_lock); |
|---|
| 1199 | 1199 | |
|---|
| 1200 | 1200 | MAKE_ALL_LISTS(cachep, ptr, nodeid); |
|---|
| 1201 | 1201 | cachep->node[nodeid] = ptr; |
|---|
| .. | .. |
|---|
| 1366 | 1366 | for_each_kmem_cache_node(cachep, node, n) { |
|---|
| 1367 | 1367 | unsigned long total_slabs, free_slabs, free_objs; |
|---|
| 1368 | 1368 | |
|---|
| 1369 | | - spin_lock_irqsave(&n->list_lock, flags); |
|---|
| 1369 | + raw_spin_lock_irqsave(&n->list_lock, flags); |
|---|
| 1370 | 1370 | total_slabs = n->total_slabs; |
|---|
| 1371 | 1371 | free_slabs = n->free_slabs; |
|---|
| 1372 | 1372 | free_objs = n->free_objects; |
|---|
| 1373 | | - spin_unlock_irqrestore(&n->list_lock, flags); |
|---|
| 1373 | + raw_spin_unlock_irqrestore(&n->list_lock, flags); |
|---|
| 1374 | 1374 | |
|---|
| 1375 | 1375 | pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n", |
|---|
| 1376 | 1376 | node, total_slabs - free_slabs, total_slabs, |
|---|
| .. | .. |
|---|
| 2173 | 2173 | { |
|---|
| 2174 | 2174 | #ifdef CONFIG_SMP |
|---|
| 2175 | 2175 | check_irq_off(); |
|---|
| 2176 | | - assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); |
|---|
| 2176 | + assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); |
|---|
| 2177 | 2177 | #endif |
|---|
| 2178 | 2178 | } |
|---|
| 2179 | 2179 | |
|---|
| .. | .. |
|---|
| 2181 | 2181 | { |
|---|
| 2182 | 2182 | #ifdef CONFIG_SMP |
|---|
| 2183 | 2183 | check_irq_off(); |
|---|
| 2184 | | - assert_spin_locked(&get_node(cachep, node)->list_lock); |
|---|
| 2184 | + assert_raw_spin_locked(&get_node(cachep, node)->list_lock); |
|---|
| 2185 | 2185 | #endif |
|---|
| 2186 | 2186 | } |
|---|
| 2187 | 2187 | |
|---|
| .. | .. |
|---|
| 2221 | 2221 | check_irq_off(); |
|---|
| 2222 | 2222 | ac = cpu_cache_get(cachep); |
|---|
| 2223 | 2223 | n = get_node(cachep, node); |
|---|
| 2224 | | - spin_lock(&n->list_lock); |
|---|
| 2224 | + raw_spin_lock(&n->list_lock); |
|---|
| 2225 | 2225 | free_block(cachep, ac->entry, ac->avail, node, &list); |
|---|
| 2226 | | - spin_unlock(&n->list_lock); |
|---|
| 2226 | + raw_spin_unlock(&n->list_lock); |
|---|
| 2227 | 2227 | slabs_destroy(cachep, &list); |
|---|
| 2228 | 2228 | ac->avail = 0; |
|---|
| 2229 | 2229 | } |
|---|
| .. | .. |
|---|
| 2241 | 2241 | drain_alien_cache(cachep, n->alien); |
|---|
| 2242 | 2242 | |
|---|
| 2243 | 2243 | for_each_kmem_cache_node(cachep, node, n) { |
|---|
| 2244 | | - spin_lock_irq(&n->list_lock); |
|---|
| 2244 | + raw_spin_lock_irq(&n->list_lock); |
|---|
| 2245 | 2245 | drain_array_locked(cachep, n->shared, node, true, &list); |
|---|
| 2246 | | - spin_unlock_irq(&n->list_lock); |
|---|
| 2246 | + raw_spin_unlock_irq(&n->list_lock); |
|---|
| 2247 | 2247 | |
|---|
| 2248 | 2248 | slabs_destroy(cachep, &list); |
|---|
| 2249 | 2249 | } |
|---|
| .. | .. |
|---|
| 2265 | 2265 | nr_freed = 0; |
|---|
| 2266 | 2266 | while (nr_freed < tofree && !list_empty(&n->slabs_free)) { |
|---|
| 2267 | 2267 | |
|---|
| 2268 | | - spin_lock_irq(&n->list_lock); |
|---|
| 2268 | + raw_spin_lock_irq(&n->list_lock); |
|---|
| 2269 | 2269 | p = n->slabs_free.prev; |
|---|
| 2270 | 2270 | if (p == &n->slabs_free) { |
|---|
| 2271 | | - spin_unlock_irq(&n->list_lock); |
|---|
| 2271 | + raw_spin_unlock_irq(&n->list_lock); |
|---|
| 2272 | 2272 | goto out; |
|---|
| 2273 | 2273 | } |
|---|
| 2274 | 2274 | |
|---|
| .. | .. |
|---|
| 2281 | 2281 | * to the cache. |
|---|
| 2282 | 2282 | */ |
|---|
| 2283 | 2283 | n->free_objects -= cache->num; |
|---|
| 2284 | | - spin_unlock_irq(&n->list_lock); |
|---|
| 2284 | + raw_spin_unlock_irq(&n->list_lock); |
|---|
| 2285 | 2285 | slab_destroy(cache, page); |
|---|
| 2286 | 2286 | nr_freed++; |
|---|
| 2287 | 2287 | } |
|---|
| .. | .. |
|---|
| 2735 | 2735 | INIT_LIST_HEAD(&page->lru); |
|---|
| 2736 | 2736 | n = get_node(cachep, page_to_nid(page)); |
|---|
| 2737 | 2737 | |
|---|
| 2738 | | - spin_lock(&n->list_lock); |
|---|
| 2738 | + raw_spin_lock(&n->list_lock); |
|---|
| 2739 | 2739 | n->total_slabs++; |
|---|
| 2740 | 2740 | if (!page->active) { |
|---|
| 2741 | 2741 | list_add_tail(&page->lru, &(n->slabs_free)); |
|---|
| .. | .. |
|---|
| 2745 | 2745 | |
|---|
| 2746 | 2746 | STATS_INC_GROWN(cachep); |
|---|
| 2747 | 2747 | n->free_objects += cachep->num - page->active; |
|---|
| 2748 | | - spin_unlock(&n->list_lock); |
|---|
| 2748 | + raw_spin_unlock(&n->list_lock); |
|---|
| 2749 | 2749 | |
|---|
| 2750 | 2750 | fixup_objfreelist_debug(cachep, &list); |
|---|
| 2751 | 2751 | } |
|---|
| .. | .. |
|---|
| 2913 | 2913 | { |
|---|
| 2914 | 2914 | struct page *page; |
|---|
| 2915 | 2915 | |
|---|
| 2916 | | - assert_spin_locked(&n->list_lock); |
|---|
| 2916 | + assert_raw_spin_locked(&n->list_lock); |
|---|
| 2917 | 2917 | page = list_first_entry_or_null(&n->slabs_partial, struct page, lru); |
|---|
| 2918 | 2918 | if (!page) { |
|---|
| 2919 | 2919 | n->free_touched = 1; |
|---|
| .. | .. |
|---|
| 2939 | 2939 | if (!gfp_pfmemalloc_allowed(flags)) |
|---|
| 2940 | 2940 | return NULL; |
|---|
| 2941 | 2941 | |
|---|
| 2942 | | - spin_lock(&n->list_lock); |
|---|
| 2942 | + raw_spin_lock(&n->list_lock); |
|---|
| 2943 | 2943 | page = get_first_slab(n, true); |
|---|
| 2944 | 2944 | if (!page) { |
|---|
| 2945 | | - spin_unlock(&n->list_lock); |
|---|
| 2945 | + raw_spin_unlock(&n->list_lock); |
|---|
| 2946 | 2946 | return NULL; |
|---|
| 2947 | 2947 | } |
|---|
| 2948 | 2948 | |
|---|
| .. | .. |
|---|
| 2951 | 2951 | |
|---|
| 2952 | 2952 | fixup_slab_list(cachep, n, page, &list); |
|---|
| 2953 | 2953 | |
|---|
| 2954 | | - spin_unlock(&n->list_lock); |
|---|
| 2954 | + raw_spin_unlock(&n->list_lock); |
|---|
| 2955 | 2955 | fixup_objfreelist_debug(cachep, &list); |
|---|
| 2956 | 2956 | |
|---|
| 2957 | 2957 | return obj; |
|---|
| .. | .. |
|---|
| 3010 | 3010 | if (!n->free_objects && (!shared || !shared->avail)) |
|---|
| 3011 | 3011 | goto direct_grow; |
|---|
| 3012 | 3012 | |
|---|
| 3013 | | - spin_lock(&n->list_lock); |
|---|
| 3013 | + raw_spin_lock(&n->list_lock); |
|---|
| 3014 | 3014 | shared = READ_ONCE(n->shared); |
|---|
| 3015 | 3015 | |
|---|
| 3016 | 3016 | /* See if we can refill from the shared array */ |
|---|
| .. | .. |
|---|
| 3034 | 3034 | must_grow: |
|---|
| 3035 | 3035 | n->free_objects -= ac->avail; |
|---|
| 3036 | 3036 | alloc_done: |
|---|
| 3037 | | - spin_unlock(&n->list_lock); |
|---|
| 3037 | + raw_spin_unlock(&n->list_lock); |
|---|
| 3038 | 3038 | fixup_objfreelist_debug(cachep, &list); |
|---|
| 3039 | 3039 | |
|---|
| 3040 | 3040 | direct_grow: |
|---|
| .. | .. |
|---|
| 3259 | 3259 | BUG_ON(!n); |
|---|
| 3260 | 3260 | |
|---|
| 3261 | 3261 | check_irq_off(); |
|---|
| 3262 | | - spin_lock(&n->list_lock); |
|---|
| 3262 | + raw_spin_lock(&n->list_lock); |
|---|
| 3263 | 3263 | page = get_first_slab(n, false); |
|---|
| 3264 | 3264 | if (!page) |
|---|
| 3265 | 3265 | goto must_grow; |
|---|
| .. | .. |
|---|
| 3277 | 3277 | |
|---|
| 3278 | 3278 | fixup_slab_list(cachep, n, page, &list); |
|---|
| 3279 | 3279 | |
|---|
| 3280 | | - spin_unlock(&n->list_lock); |
|---|
| 3280 | + raw_spin_unlock(&n->list_lock); |
|---|
| 3281 | 3281 | fixup_objfreelist_debug(cachep, &list); |
|---|
| 3282 | 3282 | return obj; |
|---|
| 3283 | 3283 | |
|---|
| 3284 | 3284 | must_grow: |
|---|
| 3285 | | - spin_unlock(&n->list_lock); |
|---|
| 3285 | + raw_spin_unlock(&n->list_lock); |
|---|
| 3286 | 3286 | page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid); |
|---|
| 3287 | 3287 | if (page) { |
|---|
| 3288 | 3288 | /* This slab isn't counted yet so don't update free_objects */ |
|---|
| .. | .. |
|---|
| 3458 | 3458 | |
|---|
| 3459 | 3459 | check_irq_off(); |
|---|
| 3460 | 3460 | n = get_node(cachep, node); |
|---|
| 3461 | | - spin_lock(&n->list_lock); |
|---|
| 3461 | + raw_spin_lock(&n->list_lock); |
|---|
| 3462 | 3462 | if (n->shared) { |
|---|
| 3463 | 3463 | struct array_cache *shared_array = n->shared; |
|---|
| 3464 | 3464 | int max = shared_array->limit - shared_array->avail; |
|---|
| .. | .. |
|---|
| 3487 | 3487 | STATS_SET_FREEABLE(cachep, i); |
|---|
| 3488 | 3488 | } |
|---|
| 3489 | 3489 | #endif |
|---|
| 3490 | | - spin_unlock(&n->list_lock); |
|---|
| 3490 | + raw_spin_unlock(&n->list_lock); |
|---|
| 3491 | 3491 | slabs_destroy(cachep, &list); |
|---|
| 3492 | 3492 | ac->avail -= batchcount; |
|---|
| 3493 | 3493 | memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); |
|---|
| .. | .. |
|---|
| 3897 | 3897 | |
|---|
| 3898 | 3898 | node = cpu_to_mem(cpu); |
|---|
| 3899 | 3899 | n = get_node(cachep, node); |
|---|
| 3900 | | - spin_lock_irq(&n->list_lock); |
|---|
| 3900 | + raw_spin_lock_irq(&n->list_lock); |
|---|
| 3901 | 3901 | free_block(cachep, ac->entry, ac->avail, node, &list); |
|---|
| 3902 | | - spin_unlock_irq(&n->list_lock); |
|---|
| 3902 | + raw_spin_unlock_irq(&n->list_lock); |
|---|
| 3903 | 3903 | slabs_destroy(cachep, &list); |
|---|
| 3904 | 3904 | } |
|---|
| 3905 | 3905 | free_percpu(prev); |
|---|
| .. | .. |
|---|
| 4024 | 4024 | return; |
|---|
| 4025 | 4025 | } |
|---|
| 4026 | 4026 | |
|---|
| 4027 | | - spin_lock_irq(&n->list_lock); |
|---|
| 4027 | + raw_spin_lock_irq(&n->list_lock); |
|---|
| 4028 | 4028 | drain_array_locked(cachep, ac, node, false, &list); |
|---|
| 4029 | | - spin_unlock_irq(&n->list_lock); |
|---|
| 4029 | + raw_spin_unlock_irq(&n->list_lock); |
|---|
| 4030 | 4030 | |
|---|
| 4031 | 4031 | slabs_destroy(cachep, &list); |
|---|
| 4032 | 4032 | } |
|---|
| .. | .. |
|---|
| 4110 | 4110 | |
|---|
| 4111 | 4111 | for_each_kmem_cache_node(cachep, node, n) { |
|---|
| 4112 | 4112 | check_irq_on(); |
|---|
| 4113 | | - spin_lock_irq(&n->list_lock); |
|---|
| 4113 | + raw_spin_lock_irq(&n->list_lock); |
|---|
| 4114 | 4114 | |
|---|
| 4115 | 4115 | total_slabs += n->total_slabs; |
|---|
| 4116 | 4116 | free_slabs += n->free_slabs; |
|---|
| .. | .. |
|---|
| 4119 | 4119 | if (n->shared) |
|---|
| 4120 | 4120 | shared_avail += n->shared->avail; |
|---|
| 4121 | 4121 | |
|---|
| 4122 | | - spin_unlock_irq(&n->list_lock); |
|---|
| 4122 | + raw_spin_unlock_irq(&n->list_lock); |
|---|
| 4123 | 4123 | } |
|---|
| 4124 | 4124 | num_objs = total_slabs * cachep->num; |
|---|
| 4125 | 4125 | active_slabs = total_slabs - free_slabs; |
|---|
| .. | .. |
|---|
| 4339 | 4339 | for_each_kmem_cache_node(cachep, node, n) { |
|---|
| 4340 | 4340 | |
|---|
| 4341 | 4341 | check_irq_on(); |
|---|
| 4342 | | - spin_lock_irq(&n->list_lock); |
|---|
| 4342 | + raw_spin_lock_irq(&n->list_lock); |
|---|
| 4343 | 4343 | |
|---|
| 4344 | 4344 | list_for_each_entry(page, &n->slabs_full, lru) |
|---|
| 4345 | 4345 | handle_slab(x, cachep, page); |
|---|
| 4346 | 4346 | list_for_each_entry(page, &n->slabs_partial, lru) |
|---|
| 4347 | 4347 | handle_slab(x, cachep, page); |
|---|
| 4348 | | - spin_unlock_irq(&n->list_lock); |
|---|
| 4348 | + raw_spin_unlock_irq(&n->list_lock); |
|---|
| 4349 | 4349 | } |
|---|
| 4350 | 4350 | } while (!is_store_user_clean(cachep)); |
|---|
| 4351 | 4351 | |
|---|