| .. | .. |
|---|
| 234 | 234 | parent->shared = NULL; |
|---|
| 235 | 235 | parent->alien = NULL; |
|---|
| 236 | 236 | parent->colour_next = 0; |
|---|
| 237 | | - raw_spin_lock_init(&parent->list_lock); |
|---|
| 237 | + spin_lock_init(&parent->list_lock); |
|---|
| 238 | 238 | parent->free_objects = 0; |
|---|
| 239 | 239 | parent->free_touched = 0; |
|---|
| 240 | 240 | } |
|---|
| .. | .. |
|---|
| 559 | 559 | page_node = page_to_nid(page); |
|---|
| 560 | 560 | n = get_node(cachep, page_node); |
|---|
| 561 | 561 | |
|---|
| 562 | | - raw_spin_lock(&n->list_lock); |
|---|
| 562 | + spin_lock(&n->list_lock); |
|---|
| 563 | 563 | free_block(cachep, &objp, 1, page_node, &list); |
|---|
| 564 | | - raw_spin_unlock(&n->list_lock); |
|---|
| 564 | + spin_unlock(&n->list_lock); |
|---|
| 565 | 565 | |
|---|
| 566 | 566 | slabs_destroy(cachep, &list); |
|---|
| 567 | 567 | } |
|---|
| .. | .. |
|---|
| 699 | 699 | struct kmem_cache_node *n = get_node(cachep, node); |
|---|
| 700 | 700 | |
|---|
| 701 | 701 | if (ac->avail) { |
|---|
| 702 | | - raw_spin_lock(&n->list_lock); |
|---|
| 702 | + spin_lock(&n->list_lock); |
|---|
| 703 | 703 | /* |
|---|
| 704 | 704 | * Stuff objects into the remote nodes shared array first. |
|---|
| 705 | 705 | * That way we could avoid the overhead of putting the objects |
|---|
| .. | .. |
|---|
| 710 | 710 | |
|---|
| 711 | 711 | free_block(cachep, ac->entry, ac->avail, node, list); |
|---|
| 712 | 712 | ac->avail = 0; |
|---|
| 713 | | - raw_spin_unlock(&n->list_lock); |
|---|
| 713 | + spin_unlock(&n->list_lock); |
|---|
| 714 | 714 | } |
|---|
| 715 | 715 | } |
|---|
| 716 | 716 | |
|---|
| .. | .. |
|---|
| 783 | 783 | slabs_destroy(cachep, &list); |
|---|
| 784 | 784 | } else { |
|---|
| 785 | 785 | n = get_node(cachep, page_node); |
|---|
| 786 | | - raw_spin_lock(&n->list_lock); |
|---|
| 786 | + spin_lock(&n->list_lock); |
|---|
| 787 | 787 | free_block(cachep, &objp, 1, page_node, &list); |
|---|
| 788 | | - raw_spin_unlock(&n->list_lock); |
|---|
| 788 | + spin_unlock(&n->list_lock); |
|---|
| 789 | 789 | slabs_destroy(cachep, &list); |
|---|
| 790 | 790 | } |
|---|
| 791 | 791 | return 1; |
|---|
| .. | .. |
|---|
| 826 | 826 | */ |
|---|
| 827 | 827 | n = get_node(cachep, node); |
|---|
| 828 | 828 | if (n) { |
|---|
| 829 | | - raw_spin_lock_irq(&n->list_lock); |
|---|
| 829 | + spin_lock_irq(&n->list_lock); |
|---|
| 830 | 830 | n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + |
|---|
| 831 | 831 | cachep->num; |
|---|
| 832 | | - raw_spin_unlock_irq(&n->list_lock); |
|---|
| 832 | + spin_unlock_irq(&n->list_lock); |
|---|
| 833 | 833 | |
|---|
| 834 | 834 | return 0; |
|---|
| 835 | 835 | } |
|---|
| .. | .. |
|---|
| 908 | 908 | goto fail; |
|---|
| 909 | 909 | |
|---|
| 910 | 910 | n = get_node(cachep, node); |
|---|
| 911 | | - raw_spin_lock_irq(&n->list_lock); |
|---|
| 911 | + spin_lock_irq(&n->list_lock); |
|---|
| 912 | 912 | if (n->shared && force_change) { |
|---|
| 913 | 913 | free_block(cachep, n->shared->entry, |
|---|
| 914 | 914 | n->shared->avail, node, &list); |
|---|
| .. | .. |
|---|
| 926 | 926 | new_alien = NULL; |
|---|
| 927 | 927 | } |
|---|
| 928 | 928 | |
|---|
| 929 | | - raw_spin_unlock_irq(&n->list_lock); |
|---|
| 929 | + spin_unlock_irq(&n->list_lock); |
|---|
| 930 | 930 | slabs_destroy(cachep, &list); |
|---|
| 931 | 931 | |
|---|
| 932 | 932 | /* |
|---|
| .. | .. |
|---|
| 965 | 965 | if (!n) |
|---|
| 966 | 966 | continue; |
|---|
| 967 | 967 | |
|---|
| 968 | | - raw_spin_lock_irq(&n->list_lock); |
|---|
| 968 | + spin_lock_irq(&n->list_lock); |
|---|
| 969 | 969 | |
|---|
| 970 | 970 | /* Free limit for this kmem_cache_node */ |
|---|
| 971 | 971 | n->free_limit -= cachep->batchcount; |
|---|
| .. | .. |
|---|
| 976 | 976 | nc->avail = 0; |
|---|
| 977 | 977 | |
|---|
| 978 | 978 | if (!cpumask_empty(mask)) { |
|---|
| 979 | | - raw_spin_unlock_irq(&n->list_lock); |
|---|
| 979 | + spin_unlock_irq(&n->list_lock); |
|---|
| 980 | 980 | goto free_slab; |
|---|
| 981 | 981 | } |
|---|
| 982 | 982 | |
|---|
| .. | .. |
|---|
| 990 | 990 | alien = n->alien; |
|---|
| 991 | 991 | n->alien = NULL; |
|---|
| 992 | 992 | |
|---|
| 993 | | - raw_spin_unlock_irq(&n->list_lock); |
|---|
| 993 | + spin_unlock_irq(&n->list_lock); |
|---|
| 994 | 994 | |
|---|
| 995 | 995 | kfree(shared); |
|---|
| 996 | 996 | if (alien) { |
|---|
| .. | .. |
|---|
| 1174 | 1174 | /* |
|---|
| 1175 | 1175 | * Do not assume that spinlocks can be initialized via memcpy: |
|---|
| 1176 | 1176 | */ |
|---|
| 1177 | | - raw_spin_lock_init(&ptr->list_lock); |
|---|
| 1177 | + spin_lock_init(&ptr->list_lock); |
|---|
| 1178 | 1178 | |
|---|
| 1179 | 1179 | MAKE_ALL_LISTS(cachep, ptr, nodeid); |
|---|
| 1180 | 1180 | cachep->node[nodeid] = ptr; |
|---|
| .. | .. |
|---|
| 1345 | 1345 | for_each_kmem_cache_node(cachep, node, n) { |
|---|
| 1346 | 1346 | unsigned long total_slabs, free_slabs, free_objs; |
|---|
| 1347 | 1347 | |
|---|
| 1348 | | - raw_spin_lock_irqsave(&n->list_lock, flags); |
|---|
| 1348 | + spin_lock_irqsave(&n->list_lock, flags); |
|---|
| 1349 | 1349 | total_slabs = n->total_slabs; |
|---|
| 1350 | 1350 | free_slabs = n->free_slabs; |
|---|
| 1351 | 1351 | free_objs = n->free_objects; |
|---|
| 1352 | | - raw_spin_unlock_irqrestore(&n->list_lock, flags); |
|---|
| 1352 | + spin_unlock_irqrestore(&n->list_lock, flags); |
|---|
| 1353 | 1353 | |
|---|
| 1354 | 1354 | pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n", |
|---|
| 1355 | 1355 | node, total_slabs - free_slabs, total_slabs, |
|---|
| .. | .. |
|---|
| 2106 | 2106 | { |
|---|
| 2107 | 2107 | #ifdef CONFIG_SMP |
|---|
| 2108 | 2108 | check_irq_off(); |
|---|
| 2109 | | - assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); |
|---|
| 2109 | + assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); |
|---|
| 2110 | 2110 | #endif |
|---|
| 2111 | 2111 | } |
|---|
| 2112 | 2112 | |
|---|
| .. | .. |
|---|
| 2114 | 2114 | { |
|---|
| 2115 | 2115 | #ifdef CONFIG_SMP |
|---|
| 2116 | 2116 | check_irq_off(); |
|---|
| 2117 | | - assert_raw_spin_locked(&get_node(cachep, node)->list_lock); |
|---|
| 2117 | + assert_spin_locked(&get_node(cachep, node)->list_lock); |
|---|
| 2118 | 2118 | #endif |
|---|
| 2119 | 2119 | } |
|---|
| 2120 | 2120 | |
|---|
| .. | .. |
|---|
| 2154 | 2154 | check_irq_off(); |
|---|
| 2155 | 2155 | ac = cpu_cache_get(cachep); |
|---|
| 2156 | 2156 | n = get_node(cachep, node); |
|---|
| 2157 | | - raw_spin_lock(&n->list_lock); |
|---|
| 2157 | + spin_lock(&n->list_lock); |
|---|
| 2158 | 2158 | free_block(cachep, ac->entry, ac->avail, node, &list); |
|---|
| 2159 | | - raw_spin_unlock(&n->list_lock); |
|---|
| 2159 | + spin_unlock(&n->list_lock); |
|---|
| 2160 | 2160 | ac->avail = 0; |
|---|
| 2161 | 2161 | slabs_destroy(cachep, &list); |
|---|
| 2162 | 2162 | } |
|---|
| .. | .. |
|---|
| 2174 | 2174 | drain_alien_cache(cachep, n->alien); |
|---|
| 2175 | 2175 | |
|---|
| 2176 | 2176 | for_each_kmem_cache_node(cachep, node, n) { |
|---|
| 2177 | | - raw_spin_lock_irq(&n->list_lock); |
|---|
| 2177 | + spin_lock_irq(&n->list_lock); |
|---|
| 2178 | 2178 | drain_array_locked(cachep, n->shared, node, true, &list); |
|---|
| 2179 | | - raw_spin_unlock_irq(&n->list_lock); |
|---|
| 2179 | + spin_unlock_irq(&n->list_lock); |
|---|
| 2180 | 2180 | |
|---|
| 2181 | 2181 | slabs_destroy(cachep, &list); |
|---|
| 2182 | 2182 | } |
|---|
| .. | .. |
|---|
| 2198 | 2198 | nr_freed = 0; |
|---|
| 2199 | 2199 | while (nr_freed < tofree && !list_empty(&n->slabs_free)) { |
|---|
| 2200 | 2200 | |
|---|
| 2201 | | - raw_spin_lock_irq(&n->list_lock); |
|---|
| 2201 | + spin_lock_irq(&n->list_lock); |
|---|
| 2202 | 2202 | p = n->slabs_free.prev; |
|---|
| 2203 | 2203 | if (p == &n->slabs_free) { |
|---|
| 2204 | | - raw_spin_unlock_irq(&n->list_lock); |
|---|
| 2204 | + spin_unlock_irq(&n->list_lock); |
|---|
| 2205 | 2205 | goto out; |
|---|
| 2206 | 2206 | } |
|---|
| 2207 | 2207 | |
|---|
| .. | .. |
|---|
| 2214 | 2214 | * to the cache. |
|---|
| 2215 | 2215 | */ |
|---|
| 2216 | 2216 | n->free_objects -= cache->num; |
|---|
| 2217 | | - raw_spin_unlock_irq(&n->list_lock); |
|---|
| 2217 | + spin_unlock_irq(&n->list_lock); |
|---|
| 2218 | 2218 | slab_destroy(cache, page); |
|---|
| 2219 | 2219 | nr_freed++; |
|---|
| 2220 | 2220 | } |
|---|
| .. | .. |
|---|
| 2650 | 2650 | INIT_LIST_HEAD(&page->slab_list); |
|---|
| 2651 | 2651 | n = get_node(cachep, page_to_nid(page)); |
|---|
| 2652 | 2652 | |
|---|
| 2653 | | - raw_spin_lock(&n->list_lock); |
|---|
| 2653 | + spin_lock(&n->list_lock); |
|---|
| 2654 | 2654 | n->total_slabs++; |
|---|
| 2655 | 2655 | if (!page->active) { |
|---|
| 2656 | 2656 | list_add_tail(&page->slab_list, &n->slabs_free); |
|---|
| .. | .. |
|---|
| 2660 | 2660 | |
|---|
| 2661 | 2661 | STATS_INC_GROWN(cachep); |
|---|
| 2662 | 2662 | n->free_objects += cachep->num - page->active; |
|---|
| 2663 | | - raw_spin_unlock(&n->list_lock); |
|---|
| 2663 | + spin_unlock(&n->list_lock); |
|---|
| 2664 | 2664 | |
|---|
| 2665 | 2665 | fixup_objfreelist_debug(cachep, &list); |
|---|
| 2666 | 2666 | } |
|---|
| .. | .. |
|---|
| 2826 | 2826 | { |
|---|
| 2827 | 2827 | struct page *page; |
|---|
| 2828 | 2828 | |
|---|
| 2829 | | - assert_raw_spin_locked(&n->list_lock); |
|---|
| 2829 | + assert_spin_locked(&n->list_lock); |
|---|
| 2830 | 2830 | page = list_first_entry_or_null(&n->slabs_partial, struct page, |
|---|
| 2831 | 2831 | slab_list); |
|---|
| 2832 | 2832 | if (!page) { |
|---|
| .. | .. |
|---|
| 2853 | 2853 | if (!gfp_pfmemalloc_allowed(flags)) |
|---|
| 2854 | 2854 | return NULL; |
|---|
| 2855 | 2855 | |
|---|
| 2856 | | - raw_spin_lock(&n->list_lock); |
|---|
| 2856 | + spin_lock(&n->list_lock); |
|---|
| 2857 | 2857 | page = get_first_slab(n, true); |
|---|
| 2858 | 2858 | if (!page) { |
|---|
| 2859 | | - raw_spin_unlock(&n->list_lock); |
|---|
| 2859 | + spin_unlock(&n->list_lock); |
|---|
| 2860 | 2860 | return NULL; |
|---|
| 2861 | 2861 | } |
|---|
| 2862 | 2862 | |
|---|
| .. | .. |
|---|
| 2865 | 2865 | |
|---|
| 2866 | 2866 | fixup_slab_list(cachep, n, page, &list); |
|---|
| 2867 | 2867 | |
|---|
| 2868 | | - raw_spin_unlock(&n->list_lock); |
|---|
| 2868 | + spin_unlock(&n->list_lock); |
|---|
| 2869 | 2869 | fixup_objfreelist_debug(cachep, &list); |
|---|
| 2870 | 2870 | |
|---|
| 2871 | 2871 | return obj; |
|---|
| .. | .. |
|---|
| 2924 | 2924 | if (!n->free_objects && (!shared || !shared->avail)) |
|---|
| 2925 | 2925 | goto direct_grow; |
|---|
| 2926 | 2926 | |
|---|
| 2927 | | - raw_spin_lock(&n->list_lock); |
|---|
| 2927 | + spin_lock(&n->list_lock); |
|---|
| 2928 | 2928 | shared = READ_ONCE(n->shared); |
|---|
| 2929 | 2929 | |
|---|
| 2930 | 2930 | /* See if we can refill from the shared array */ |
|---|
| .. | .. |
|---|
| 2948 | 2948 | must_grow: |
|---|
| 2949 | 2949 | n->free_objects -= ac->avail; |
|---|
| 2950 | 2950 | alloc_done: |
|---|
| 2951 | | - raw_spin_unlock(&n->list_lock); |
|---|
| 2951 | + spin_unlock(&n->list_lock); |
|---|
| 2952 | 2952 | fixup_objfreelist_debug(cachep, &list); |
|---|
| 2953 | 2953 | |
|---|
| 2954 | 2954 | direct_grow: |
|---|
| .. | .. |
|---|
| 3172 | 3172 | BUG_ON(!n); |
|---|
| 3173 | 3173 | |
|---|
| 3174 | 3174 | check_irq_off(); |
|---|
| 3175 | | - raw_spin_lock(&n->list_lock); |
|---|
| 3175 | + spin_lock(&n->list_lock); |
|---|
| 3176 | 3176 | page = get_first_slab(n, false); |
|---|
| 3177 | 3177 | if (!page) |
|---|
| 3178 | 3178 | goto must_grow; |
|---|
| .. | .. |
|---|
| 3190 | 3190 | |
|---|
| 3191 | 3191 | fixup_slab_list(cachep, n, page, &list); |
|---|
| 3192 | 3192 | |
|---|
| 3193 | | - raw_spin_unlock(&n->list_lock); |
|---|
| 3193 | + spin_unlock(&n->list_lock); |
|---|
| 3194 | 3194 | fixup_objfreelist_debug(cachep, &list); |
|---|
| 3195 | 3195 | return obj; |
|---|
| 3196 | 3196 | |
|---|
| 3197 | 3197 | must_grow: |
|---|
| 3198 | | - raw_spin_unlock(&n->list_lock); |
|---|
| 3198 | + spin_unlock(&n->list_lock); |
|---|
| 3199 | 3199 | page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid); |
|---|
| 3200 | 3200 | if (page) { |
|---|
| 3201 | 3201 | /* This slab isn't counted yet so don't update free_objects */ |
|---|
| .. | .. |
|---|
| 3381 | 3381 | |
|---|
| 3382 | 3382 | check_irq_off(); |
|---|
| 3383 | 3383 | n = get_node(cachep, node); |
|---|
| 3384 | | - raw_spin_lock(&n->list_lock); |
|---|
| 3384 | + spin_lock(&n->list_lock); |
|---|
| 3385 | 3385 | if (n->shared) { |
|---|
| 3386 | 3386 | struct array_cache *shared_array = n->shared; |
|---|
| 3387 | 3387 | int max = shared_array->limit - shared_array->avail; |
|---|
| .. | .. |
|---|
| 3410 | 3410 | STATS_SET_FREEABLE(cachep, i); |
|---|
| 3411 | 3411 | } |
|---|
| 3412 | 3412 | #endif |
|---|
| 3413 | | - raw_spin_unlock(&n->list_lock); |
|---|
| 3413 | + spin_unlock(&n->list_lock); |
|---|
| 3414 | 3414 | ac->avail -= batchcount; |
|---|
| 3415 | 3415 | memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); |
|---|
| 3416 | 3416 | slabs_destroy(cachep, &list); |
|---|
| .. | .. |
|---|
| 3854 | 3854 | |
|---|
| 3855 | 3855 | node = cpu_to_mem(cpu); |
|---|
| 3856 | 3856 | n = get_node(cachep, node); |
|---|
| 3857 | | - raw_spin_lock_irq(&n->list_lock); |
|---|
| 3857 | + spin_lock_irq(&n->list_lock); |
|---|
| 3858 | 3858 | free_block(cachep, ac->entry, ac->avail, node, &list); |
|---|
| 3859 | | - raw_spin_unlock_irq(&n->list_lock); |
|---|
| 3859 | + spin_unlock_irq(&n->list_lock); |
|---|
| 3860 | 3860 | slabs_destroy(cachep, &list); |
|---|
| 3861 | 3861 | } |
|---|
| 3862 | 3862 | free_percpu(prev); |
|---|
| .. | .. |
|---|
| 3951 | 3951 | return; |
|---|
| 3952 | 3952 | } |
|---|
| 3953 | 3953 | |
|---|
| 3954 | | - raw_spin_lock_irq(&n->list_lock); |
|---|
| 3954 | + spin_lock_irq(&n->list_lock); |
|---|
| 3955 | 3955 | drain_array_locked(cachep, ac, node, false, &list); |
|---|
| 3956 | | - raw_spin_unlock_irq(&n->list_lock); |
|---|
| 3956 | + spin_unlock_irq(&n->list_lock); |
|---|
| 3957 | 3957 | |
|---|
| 3958 | 3958 | slabs_destroy(cachep, &list); |
|---|
| 3959 | 3959 | } |
|---|
| .. | .. |
|---|
| 4037 | 4037 | |
|---|
| 4038 | 4038 | for_each_kmem_cache_node(cachep, node, n) { |
|---|
| 4039 | 4039 | check_irq_on(); |
|---|
| 4040 | | - raw_spin_lock_irq(&n->list_lock); |
|---|
| 4040 | + spin_lock_irq(&n->list_lock); |
|---|
| 4041 | 4041 | |
|---|
| 4042 | 4042 | total_slabs += n->total_slabs; |
|---|
| 4043 | 4043 | free_slabs += n->free_slabs; |
|---|
| .. | .. |
|---|
| 4046 | 4046 | if (n->shared) |
|---|
| 4047 | 4047 | shared_avail += n->shared->avail; |
|---|
| 4048 | 4048 | |
|---|
| 4049 | | - raw_spin_unlock_irq(&n->list_lock); |
|---|
| 4049 | + spin_unlock_irq(&n->list_lock); |
|---|
| 4050 | 4050 | } |
|---|
| 4051 | 4051 | num_objs = total_slabs * cachep->num; |
|---|
| 4052 | 4052 | active_slabs = total_slabs - free_slabs; |
|---|