hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/mm/slab.c
....@@ -234,7 +234,7 @@
234234 parent->shared = NULL;
235235 parent->alien = NULL;
236236 parent->colour_next = 0;
237
- raw_spin_lock_init(&parent->list_lock);
237
+ spin_lock_init(&parent->list_lock);
238238 parent->free_objects = 0;
239239 parent->free_touched = 0;
240240 }
....@@ -559,9 +559,9 @@
559559 page_node = page_to_nid(page);
560560 n = get_node(cachep, page_node);
561561
562
- raw_spin_lock(&n->list_lock);
562
+ spin_lock(&n->list_lock);
563563 free_block(cachep, &objp, 1, page_node, &list);
564
- raw_spin_unlock(&n->list_lock);
564
+ spin_unlock(&n->list_lock);
565565
566566 slabs_destroy(cachep, &list);
567567 }
....@@ -699,7 +699,7 @@
699699 struct kmem_cache_node *n = get_node(cachep, node);
700700
701701 if (ac->avail) {
702
- raw_spin_lock(&n->list_lock);
702
+ spin_lock(&n->list_lock);
703703 /*
704704 * Stuff objects into the remote nodes shared array first.
705705 * That way we could avoid the overhead of putting the objects
....@@ -710,7 +710,7 @@
710710
711711 free_block(cachep, ac->entry, ac->avail, node, list);
712712 ac->avail = 0;
713
- raw_spin_unlock(&n->list_lock);
713
+ spin_unlock(&n->list_lock);
714714 }
715715 }
716716
....@@ -783,9 +783,9 @@
783783 slabs_destroy(cachep, &list);
784784 } else {
785785 n = get_node(cachep, page_node);
786
- raw_spin_lock(&n->list_lock);
786
+ spin_lock(&n->list_lock);
787787 free_block(cachep, &objp, 1, page_node, &list);
788
- raw_spin_unlock(&n->list_lock);
788
+ spin_unlock(&n->list_lock);
789789 slabs_destroy(cachep, &list);
790790 }
791791 return 1;
....@@ -826,10 +826,10 @@
826826 */
827827 n = get_node(cachep, node);
828828 if (n) {
829
- raw_spin_lock_irq(&n->list_lock);
829
+ spin_lock_irq(&n->list_lock);
830830 n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
831831 cachep->num;
832
- raw_spin_unlock_irq(&n->list_lock);
832
+ spin_unlock_irq(&n->list_lock);
833833
834834 return 0;
835835 }
....@@ -908,7 +908,7 @@
908908 goto fail;
909909
910910 n = get_node(cachep, node);
911
- raw_spin_lock_irq(&n->list_lock);
911
+ spin_lock_irq(&n->list_lock);
912912 if (n->shared && force_change) {
913913 free_block(cachep, n->shared->entry,
914914 n->shared->avail, node, &list);
....@@ -926,7 +926,7 @@
926926 new_alien = NULL;
927927 }
928928
929
- raw_spin_unlock_irq(&n->list_lock);
929
+ spin_unlock_irq(&n->list_lock);
930930 slabs_destroy(cachep, &list);
931931
932932 /*
....@@ -965,7 +965,7 @@
965965 if (!n)
966966 continue;
967967
968
- raw_spin_lock_irq(&n->list_lock);
968
+ spin_lock_irq(&n->list_lock);
969969
970970 /* Free limit for this kmem_cache_node */
971971 n->free_limit -= cachep->batchcount;
....@@ -976,7 +976,7 @@
976976 nc->avail = 0;
977977
978978 if (!cpumask_empty(mask)) {
979
- raw_spin_unlock_irq(&n->list_lock);
979
+ spin_unlock_irq(&n->list_lock);
980980 goto free_slab;
981981 }
982982
....@@ -990,7 +990,7 @@
990990 alien = n->alien;
991991 n->alien = NULL;
992992
993
- raw_spin_unlock_irq(&n->list_lock);
993
+ spin_unlock_irq(&n->list_lock);
994994
995995 kfree(shared);
996996 if (alien) {
....@@ -1174,7 +1174,7 @@
11741174 /*
11751175 * Do not assume that spinlocks can be initialized via memcpy:
11761176 */
1177
- raw_spin_lock_init(&ptr->list_lock);
1177
+ spin_lock_init(&ptr->list_lock);
11781178
11791179 MAKE_ALL_LISTS(cachep, ptr, nodeid);
11801180 cachep->node[nodeid] = ptr;
....@@ -1345,11 +1345,11 @@
13451345 for_each_kmem_cache_node(cachep, node, n) {
13461346 unsigned long total_slabs, free_slabs, free_objs;
13471347
1348
- raw_spin_lock_irqsave(&n->list_lock, flags);
1348
+ spin_lock_irqsave(&n->list_lock, flags);
13491349 total_slabs = n->total_slabs;
13501350 free_slabs = n->free_slabs;
13511351 free_objs = n->free_objects;
1352
- raw_spin_unlock_irqrestore(&n->list_lock, flags);
1352
+ spin_unlock_irqrestore(&n->list_lock, flags);
13531353
13541354 pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
13551355 node, total_slabs - free_slabs, total_slabs,
....@@ -2106,7 +2106,7 @@
21062106 {
21072107 #ifdef CONFIG_SMP
21082108 check_irq_off();
2109
- assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
2109
+ assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
21102110 #endif
21112111 }
21122112
....@@ -2114,7 +2114,7 @@
21142114 {
21152115 #ifdef CONFIG_SMP
21162116 check_irq_off();
2117
- assert_raw_spin_locked(&get_node(cachep, node)->list_lock);
2117
+ assert_spin_locked(&get_node(cachep, node)->list_lock);
21182118 #endif
21192119 }
21202120
....@@ -2154,9 +2154,9 @@
21542154 check_irq_off();
21552155 ac = cpu_cache_get(cachep);
21562156 n = get_node(cachep, node);
2157
- raw_spin_lock(&n->list_lock);
2157
+ spin_lock(&n->list_lock);
21582158 free_block(cachep, ac->entry, ac->avail, node, &list);
2159
- raw_spin_unlock(&n->list_lock);
2159
+ spin_unlock(&n->list_lock);
21602160 ac->avail = 0;
21612161 slabs_destroy(cachep, &list);
21622162 }
....@@ -2174,9 +2174,9 @@
21742174 drain_alien_cache(cachep, n->alien);
21752175
21762176 for_each_kmem_cache_node(cachep, node, n) {
2177
- raw_spin_lock_irq(&n->list_lock);
2177
+ spin_lock_irq(&n->list_lock);
21782178 drain_array_locked(cachep, n->shared, node, true, &list);
2179
- raw_spin_unlock_irq(&n->list_lock);
2179
+ spin_unlock_irq(&n->list_lock);
21802180
21812181 slabs_destroy(cachep, &list);
21822182 }
....@@ -2198,10 +2198,10 @@
21982198 nr_freed = 0;
21992199 while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
22002200
2201
- raw_spin_lock_irq(&n->list_lock);
2201
+ spin_lock_irq(&n->list_lock);
22022202 p = n->slabs_free.prev;
22032203 if (p == &n->slabs_free) {
2204
- raw_spin_unlock_irq(&n->list_lock);
2204
+ spin_unlock_irq(&n->list_lock);
22052205 goto out;
22062206 }
22072207
....@@ -2214,7 +2214,7 @@
22142214 * to the cache.
22152215 */
22162216 n->free_objects -= cache->num;
2217
- raw_spin_unlock_irq(&n->list_lock);
2217
+ spin_unlock_irq(&n->list_lock);
22182218 slab_destroy(cache, page);
22192219 nr_freed++;
22202220 }
....@@ -2650,7 +2650,7 @@
26502650 INIT_LIST_HEAD(&page->slab_list);
26512651 n = get_node(cachep, page_to_nid(page));
26522652
2653
- raw_spin_lock(&n->list_lock);
2653
+ spin_lock(&n->list_lock);
26542654 n->total_slabs++;
26552655 if (!page->active) {
26562656 list_add_tail(&page->slab_list, &n->slabs_free);
....@@ -2660,7 +2660,7 @@
26602660
26612661 STATS_INC_GROWN(cachep);
26622662 n->free_objects += cachep->num - page->active;
2663
- raw_spin_unlock(&n->list_lock);
2663
+ spin_unlock(&n->list_lock);
26642664
26652665 fixup_objfreelist_debug(cachep, &list);
26662666 }
....@@ -2826,7 +2826,7 @@
28262826 {
28272827 struct page *page;
28282828
2829
- assert_raw_spin_locked(&n->list_lock);
2829
+ assert_spin_locked(&n->list_lock);
28302830 page = list_first_entry_or_null(&n->slabs_partial, struct page,
28312831 slab_list);
28322832 if (!page) {
....@@ -2853,10 +2853,10 @@
28532853 if (!gfp_pfmemalloc_allowed(flags))
28542854 return NULL;
28552855
2856
- raw_spin_lock(&n->list_lock);
2856
+ spin_lock(&n->list_lock);
28572857 page = get_first_slab(n, true);
28582858 if (!page) {
2859
- raw_spin_unlock(&n->list_lock);
2859
+ spin_unlock(&n->list_lock);
28602860 return NULL;
28612861 }
28622862
....@@ -2865,7 +2865,7 @@
28652865
28662866 fixup_slab_list(cachep, n, page, &list);
28672867
2868
- raw_spin_unlock(&n->list_lock);
2868
+ spin_unlock(&n->list_lock);
28692869 fixup_objfreelist_debug(cachep, &list);
28702870
28712871 return obj;
....@@ -2924,7 +2924,7 @@
29242924 if (!n->free_objects && (!shared || !shared->avail))
29252925 goto direct_grow;
29262926
2927
- raw_spin_lock(&n->list_lock);
2927
+ spin_lock(&n->list_lock);
29282928 shared = READ_ONCE(n->shared);
29292929
29302930 /* See if we can refill from the shared array */
....@@ -2948,7 +2948,7 @@
29482948 must_grow:
29492949 n->free_objects -= ac->avail;
29502950 alloc_done:
2951
- raw_spin_unlock(&n->list_lock);
2951
+ spin_unlock(&n->list_lock);
29522952 fixup_objfreelist_debug(cachep, &list);
29532953
29542954 direct_grow:
....@@ -3172,7 +3172,7 @@
31723172 BUG_ON(!n);
31733173
31743174 check_irq_off();
3175
- raw_spin_lock(&n->list_lock);
3175
+ spin_lock(&n->list_lock);
31763176 page = get_first_slab(n, false);
31773177 if (!page)
31783178 goto must_grow;
....@@ -3190,12 +3190,12 @@
31903190
31913191 fixup_slab_list(cachep, n, page, &list);
31923192
3193
- raw_spin_unlock(&n->list_lock);
3193
+ spin_unlock(&n->list_lock);
31943194 fixup_objfreelist_debug(cachep, &list);
31953195 return obj;
31963196
31973197 must_grow:
3198
- raw_spin_unlock(&n->list_lock);
3198
+ spin_unlock(&n->list_lock);
31993199 page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
32003200 if (page) {
32013201 /* This slab isn't counted yet so don't update free_objects */
....@@ -3381,7 +3381,7 @@
33813381
33823382 check_irq_off();
33833383 n = get_node(cachep, node);
3384
- raw_spin_lock(&n->list_lock);
3384
+ spin_lock(&n->list_lock);
33853385 if (n->shared) {
33863386 struct array_cache *shared_array = n->shared;
33873387 int max = shared_array->limit - shared_array->avail;
....@@ -3410,7 +3410,7 @@
34103410 STATS_SET_FREEABLE(cachep, i);
34113411 }
34123412 #endif
3413
- raw_spin_unlock(&n->list_lock);
3413
+ spin_unlock(&n->list_lock);
34143414 ac->avail -= batchcount;
34153415 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
34163416 slabs_destroy(cachep, &list);
....@@ -3854,9 +3854,9 @@
38543854
38553855 node = cpu_to_mem(cpu);
38563856 n = get_node(cachep, node);
3857
- raw_spin_lock_irq(&n->list_lock);
3857
+ spin_lock_irq(&n->list_lock);
38583858 free_block(cachep, ac->entry, ac->avail, node, &list);
3859
- raw_spin_unlock_irq(&n->list_lock);
3859
+ spin_unlock_irq(&n->list_lock);
38603860 slabs_destroy(cachep, &list);
38613861 }
38623862 free_percpu(prev);
....@@ -3951,9 +3951,9 @@
39513951 return;
39523952 }
39533953
3954
- raw_spin_lock_irq(&n->list_lock);
3954
+ spin_lock_irq(&n->list_lock);
39553955 drain_array_locked(cachep, ac, node, false, &list);
3956
- raw_spin_unlock_irq(&n->list_lock);
3956
+ spin_unlock_irq(&n->list_lock);
39573957
39583958 slabs_destroy(cachep, &list);
39593959 }
....@@ -4037,7 +4037,7 @@
40374037
40384038 for_each_kmem_cache_node(cachep, node, n) {
40394039 check_irq_on();
4040
- raw_spin_lock_irq(&n->list_lock);
4040
+ spin_lock_irq(&n->list_lock);
40414041
40424042 total_slabs += n->total_slabs;
40434043 free_slabs += n->free_slabs;
....@@ -4046,7 +4046,7 @@
40464046 if (n->shared)
40474047 shared_avail += n->shared->avail;
40484048
4049
- raw_spin_unlock_irq(&n->list_lock);
4049
+ spin_unlock_irq(&n->list_lock);
40504050 }
40514051 num_objs = total_slabs * cachep->num;
40524052 active_slabs = total_slabs - free_slabs;