hc
2023-11-06 e3e12f52b214121840b44c91de5b3e5af5d3eb84
kernel/mm/slab.c
....@@ -233,7 +233,7 @@
233233 parent->shared = NULL;
234234 parent->alien = NULL;
235235 parent->colour_next = 0;
236
- spin_lock_init(&parent->list_lock);
236
+ raw_spin_lock_init(&parent->list_lock);
237237 parent->free_objects = 0;
238238 parent->free_touched = 0;
239239 }
....@@ -587,9 +587,9 @@
587587 page_node = page_to_nid(page);
588588 n = get_node(cachep, page_node);
589589
590
- spin_lock(&n->list_lock);
590
+ raw_spin_lock(&n->list_lock);
591591 free_block(cachep, &objp, 1, page_node, &list);
592
- spin_unlock(&n->list_lock);
592
+ raw_spin_unlock(&n->list_lock);
593593
594594 slabs_destroy(cachep, &list);
595595 }
....@@ -718,7 +718,7 @@
718718 struct kmem_cache_node *n = get_node(cachep, node);
719719
720720 if (ac->avail) {
721
- spin_lock(&n->list_lock);
721
+ raw_spin_lock(&n->list_lock);
722722 /*
723723 * Stuff objects into the remote nodes shared array first.
724724 * That way we could avoid the overhead of putting the objects
....@@ -729,7 +729,7 @@
729729
730730 free_block(cachep, ac->entry, ac->avail, node, list);
731731 ac->avail = 0;
732
- spin_unlock(&n->list_lock);
732
+ raw_spin_unlock(&n->list_lock);
733733 }
734734 }
735735
....@@ -802,9 +802,9 @@
802802 slabs_destroy(cachep, &list);
803803 } else {
804804 n = get_node(cachep, page_node);
805
- spin_lock(&n->list_lock);
805
+ raw_spin_lock(&n->list_lock);
806806 free_block(cachep, &objp, 1, page_node, &list);
807
- spin_unlock(&n->list_lock);
807
+ raw_spin_unlock(&n->list_lock);
808808 slabs_destroy(cachep, &list);
809809 }
810810 return 1;
....@@ -845,10 +845,10 @@
845845 */
846846 n = get_node(cachep, node);
847847 if (n) {
848
- spin_lock_irq(&n->list_lock);
848
+ raw_spin_lock_irq(&n->list_lock);
849849 n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
850850 cachep->num;
851
- spin_unlock_irq(&n->list_lock);
851
+ raw_spin_unlock_irq(&n->list_lock);
852852
853853 return 0;
854854 }
....@@ -927,7 +927,7 @@
927927 goto fail;
928928
929929 n = get_node(cachep, node);
930
- spin_lock_irq(&n->list_lock);
930
+ raw_spin_lock_irq(&n->list_lock);
931931 if (n->shared && force_change) {
932932 free_block(cachep, n->shared->entry,
933933 n->shared->avail, node, &list);
....@@ -945,7 +945,7 @@
945945 new_alien = NULL;
946946 }
947947
948
- spin_unlock_irq(&n->list_lock);
948
+ raw_spin_unlock_irq(&n->list_lock);
949949 slabs_destroy(cachep, &list);
950950
951951 /*
....@@ -984,7 +984,7 @@
984984 if (!n)
985985 continue;
986986
987
- spin_lock_irq(&n->list_lock);
987
+ raw_spin_lock_irq(&n->list_lock);
988988
989989 /* Free limit for this kmem_cache_node */
990990 n->free_limit -= cachep->batchcount;
....@@ -997,7 +997,7 @@
997997 }
998998
999999 if (!cpumask_empty(mask)) {
1000
- spin_unlock_irq(&n->list_lock);
1000
+ raw_spin_unlock_irq(&n->list_lock);
10011001 goto free_slab;
10021002 }
10031003
....@@ -1011,7 +1011,7 @@
10111011 alien = n->alien;
10121012 n->alien = NULL;
10131013
1014
- spin_unlock_irq(&n->list_lock);
1014
+ raw_spin_unlock_irq(&n->list_lock);
10151015
10161016 kfree(shared);
10171017 if (alien) {
....@@ -1195,7 +1195,7 @@
11951195 /*
11961196 * Do not assume that spinlocks can be initialized via memcpy:
11971197 */
1198
- spin_lock_init(&ptr->list_lock);
1198
+ raw_spin_lock_init(&ptr->list_lock);
11991199
12001200 MAKE_ALL_LISTS(cachep, ptr, nodeid);
12011201 cachep->node[nodeid] = ptr;
....@@ -1366,11 +1366,11 @@
13661366 for_each_kmem_cache_node(cachep, node, n) {
13671367 unsigned long total_slabs, free_slabs, free_objs;
13681368
1369
- spin_lock_irqsave(&n->list_lock, flags);
1369
+ raw_spin_lock_irqsave(&n->list_lock, flags);
13701370 total_slabs = n->total_slabs;
13711371 free_slabs = n->free_slabs;
13721372 free_objs = n->free_objects;
1373
- spin_unlock_irqrestore(&n->list_lock, flags);
1373
+ raw_spin_unlock_irqrestore(&n->list_lock, flags);
13741374
13751375 pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
13761376 node, total_slabs - free_slabs, total_slabs,
....@@ -2173,7 +2173,7 @@
21732173 {
21742174 #ifdef CONFIG_SMP
21752175 check_irq_off();
2176
- assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
2176
+ assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
21772177 #endif
21782178 }
21792179
....@@ -2181,7 +2181,7 @@
21812181 {
21822182 #ifdef CONFIG_SMP
21832183 check_irq_off();
2184
- assert_spin_locked(&get_node(cachep, node)->list_lock);
2184
+ assert_raw_spin_locked(&get_node(cachep, node)->list_lock);
21852185 #endif
21862186 }
21872187
....@@ -2221,9 +2221,9 @@
22212221 check_irq_off();
22222222 ac = cpu_cache_get(cachep);
22232223 n = get_node(cachep, node);
2224
- spin_lock(&n->list_lock);
2224
+ raw_spin_lock(&n->list_lock);
22252225 free_block(cachep, ac->entry, ac->avail, node, &list);
2226
- spin_unlock(&n->list_lock);
2226
+ raw_spin_unlock(&n->list_lock);
22272227 slabs_destroy(cachep, &list);
22282228 ac->avail = 0;
22292229 }
....@@ -2241,9 +2241,9 @@
22412241 drain_alien_cache(cachep, n->alien);
22422242
22432243 for_each_kmem_cache_node(cachep, node, n) {
2244
- spin_lock_irq(&n->list_lock);
2244
+ raw_spin_lock_irq(&n->list_lock);
22452245 drain_array_locked(cachep, n->shared, node, true, &list);
2246
- spin_unlock_irq(&n->list_lock);
2246
+ raw_spin_unlock_irq(&n->list_lock);
22472247
22482248 slabs_destroy(cachep, &list);
22492249 }
....@@ -2265,10 +2265,10 @@
22652265 nr_freed = 0;
22662266 while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
22672267
2268
- spin_lock_irq(&n->list_lock);
2268
+ raw_spin_lock_irq(&n->list_lock);
22692269 p = n->slabs_free.prev;
22702270 if (p == &n->slabs_free) {
2271
- spin_unlock_irq(&n->list_lock);
2271
+ raw_spin_unlock_irq(&n->list_lock);
22722272 goto out;
22732273 }
22742274
....@@ -2281,7 +2281,7 @@
22812281 * to the cache.
22822282 */
22832283 n->free_objects -= cache->num;
2284
- spin_unlock_irq(&n->list_lock);
2284
+ raw_spin_unlock_irq(&n->list_lock);
22852285 slab_destroy(cache, page);
22862286 nr_freed++;
22872287 }
....@@ -2735,7 +2735,7 @@
27352735 INIT_LIST_HEAD(&page->lru);
27362736 n = get_node(cachep, page_to_nid(page));
27372737
2738
- spin_lock(&n->list_lock);
2738
+ raw_spin_lock(&n->list_lock);
27392739 n->total_slabs++;
27402740 if (!page->active) {
27412741 list_add_tail(&page->lru, &(n->slabs_free));
....@@ -2745,7 +2745,7 @@
27452745
27462746 STATS_INC_GROWN(cachep);
27472747 n->free_objects += cachep->num - page->active;
2748
- spin_unlock(&n->list_lock);
2748
+ raw_spin_unlock(&n->list_lock);
27492749
27502750 fixup_objfreelist_debug(cachep, &list);
27512751 }
....@@ -2913,7 +2913,7 @@
29132913 {
29142914 struct page *page;
29152915
2916
- assert_spin_locked(&n->list_lock);
2916
+ assert_raw_spin_locked(&n->list_lock);
29172917 page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
29182918 if (!page) {
29192919 n->free_touched = 1;
....@@ -2939,10 +2939,10 @@
29392939 if (!gfp_pfmemalloc_allowed(flags))
29402940 return NULL;
29412941
2942
- spin_lock(&n->list_lock);
2942
+ raw_spin_lock(&n->list_lock);
29432943 page = get_first_slab(n, true);
29442944 if (!page) {
2945
- spin_unlock(&n->list_lock);
2945
+ raw_spin_unlock(&n->list_lock);
29462946 return NULL;
29472947 }
29482948
....@@ -2951,7 +2951,7 @@
29512951
29522952 fixup_slab_list(cachep, n, page, &list);
29532953
2954
- spin_unlock(&n->list_lock);
2954
+ raw_spin_unlock(&n->list_lock);
29552955 fixup_objfreelist_debug(cachep, &list);
29562956
29572957 return obj;
....@@ -3010,7 +3010,7 @@
30103010 if (!n->free_objects && (!shared || !shared->avail))
30113011 goto direct_grow;
30123012
3013
- spin_lock(&n->list_lock);
3013
+ raw_spin_lock(&n->list_lock);
30143014 shared = READ_ONCE(n->shared);
30153015
30163016 /* See if we can refill from the shared array */
....@@ -3034,7 +3034,7 @@
30343034 must_grow:
30353035 n->free_objects -= ac->avail;
30363036 alloc_done:
3037
- spin_unlock(&n->list_lock);
3037
+ raw_spin_unlock(&n->list_lock);
30383038 fixup_objfreelist_debug(cachep, &list);
30393039
30403040 direct_grow:
....@@ -3259,7 +3259,7 @@
32593259 BUG_ON(!n);
32603260
32613261 check_irq_off();
3262
- spin_lock(&n->list_lock);
3262
+ raw_spin_lock(&n->list_lock);
32633263 page = get_first_slab(n, false);
32643264 if (!page)
32653265 goto must_grow;
....@@ -3277,12 +3277,12 @@
32773277
32783278 fixup_slab_list(cachep, n, page, &list);
32793279
3280
- spin_unlock(&n->list_lock);
3280
+ raw_spin_unlock(&n->list_lock);
32813281 fixup_objfreelist_debug(cachep, &list);
32823282 return obj;
32833283
32843284 must_grow:
3285
- spin_unlock(&n->list_lock);
3285
+ raw_spin_unlock(&n->list_lock);
32863286 page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
32873287 if (page) {
32883288 /* This slab isn't counted yet so don't update free_objects */
....@@ -3458,7 +3458,7 @@
34583458
34593459 check_irq_off();
34603460 n = get_node(cachep, node);
3461
- spin_lock(&n->list_lock);
3461
+ raw_spin_lock(&n->list_lock);
34623462 if (n->shared) {
34633463 struct array_cache *shared_array = n->shared;
34643464 int max = shared_array->limit - shared_array->avail;
....@@ -3487,7 +3487,7 @@
34873487 STATS_SET_FREEABLE(cachep, i);
34883488 }
34893489 #endif
3490
- spin_unlock(&n->list_lock);
3490
+ raw_spin_unlock(&n->list_lock);
34913491 slabs_destroy(cachep, &list);
34923492 ac->avail -= batchcount;
34933493 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
....@@ -3897,9 +3897,9 @@
38973897
38983898 node = cpu_to_mem(cpu);
38993899 n = get_node(cachep, node);
3900
- spin_lock_irq(&n->list_lock);
3900
+ raw_spin_lock_irq(&n->list_lock);
39013901 free_block(cachep, ac->entry, ac->avail, node, &list);
3902
- spin_unlock_irq(&n->list_lock);
3902
+ raw_spin_unlock_irq(&n->list_lock);
39033903 slabs_destroy(cachep, &list);
39043904 }
39053905 free_percpu(prev);
....@@ -4024,9 +4024,9 @@
40244024 return;
40254025 }
40264026
4027
- spin_lock_irq(&n->list_lock);
4027
+ raw_spin_lock_irq(&n->list_lock);
40284028 drain_array_locked(cachep, ac, node, false, &list);
4029
- spin_unlock_irq(&n->list_lock);
4029
+ raw_spin_unlock_irq(&n->list_lock);
40304030
40314031 slabs_destroy(cachep, &list);
40324032 }
....@@ -4110,7 +4110,7 @@
41104110
41114111 for_each_kmem_cache_node(cachep, node, n) {
41124112 check_irq_on();
4113
- spin_lock_irq(&n->list_lock);
4113
+ raw_spin_lock_irq(&n->list_lock);
41144114
41154115 total_slabs += n->total_slabs;
41164116 free_slabs += n->free_slabs;
....@@ -4119,7 +4119,7 @@
41194119 if (n->shared)
41204120 shared_avail += n->shared->avail;
41214121
4122
- spin_unlock_irq(&n->list_lock);
4122
+ raw_spin_unlock_irq(&n->list_lock);
41234123 }
41244124 num_objs = total_slabs * cachep->num;
41254125 active_slabs = total_slabs - free_slabs;
....@@ -4339,13 +4339,13 @@
43394339 for_each_kmem_cache_node(cachep, node, n) {
43404340
43414341 check_irq_on();
4342
- spin_lock_irq(&n->list_lock);
4342
+ raw_spin_lock_irq(&n->list_lock);
43434343
43444344 list_for_each_entry(page, &n->slabs_full, lru)
43454345 handle_slab(x, cachep, page);
43464346 list_for_each_entry(page, &n->slabs_partial, lru)
43474347 handle_slab(x, cachep, page);
4348
- spin_unlock_irq(&n->list_lock);
4348
+ raw_spin_unlock_irq(&n->list_lock);
43494349 }
43504350 } while (!is_store_user_clean(cachep));
43514351