From 6778948f9de86c3cfaf36725a7c87dcff9ba247f Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 08:20:59 +0000
Subject: [PATCH] kernel_5.10 no rt

---
 kernel/mm/slab.c |   90 ++++++++++++++++++++++----------------------
 1 files changed, 45 insertions(+), 45 deletions(-)

diff --git a/kernel/mm/slab.c b/kernel/mm/slab.c
index 290fafc..aa4ef18 100644
--- a/kernel/mm/slab.c
+++ b/kernel/mm/slab.c
@@ -234,7 +234,7 @@
 	parent->shared = NULL;
 	parent->alien = NULL;
 	parent->colour_next = 0;
-	raw_spin_lock_init(&parent->list_lock);
+	spin_lock_init(&parent->list_lock);
 	parent->free_objects = 0;
 	parent->free_touched = 0;
 }
@@ -559,9 +559,9 @@
 	page_node = page_to_nid(page);
 	n = get_node(cachep, page_node);
 
-	raw_spin_lock(&n->list_lock);
+	spin_lock(&n->list_lock);
 	free_block(cachep, &objp, 1, page_node, &list);
-	raw_spin_unlock(&n->list_lock);
+	spin_unlock(&n->list_lock);
 
 	slabs_destroy(cachep, &list);
 }
@@ -699,7 +699,7 @@
 	struct kmem_cache_node *n = get_node(cachep, node);
 
 	if (ac->avail) {
-		raw_spin_lock(&n->list_lock);
+		spin_lock(&n->list_lock);
 		/*
 		 * Stuff objects into the remote nodes shared array first.
 		 * That way we could avoid the overhead of putting the objects
@@ -710,7 +710,7 @@
 
 		free_block(cachep, ac->entry, ac->avail, node, list);
 		ac->avail = 0;
-		raw_spin_unlock(&n->list_lock);
+		spin_unlock(&n->list_lock);
 	}
 }
 
@@ -783,9 +783,9 @@
 		slabs_destroy(cachep, &list);
 	} else {
 		n = get_node(cachep, page_node);
-		raw_spin_lock(&n->list_lock);
+		spin_lock(&n->list_lock);
 		free_block(cachep, &objp, 1, page_node, &list);
-		raw_spin_unlock(&n->list_lock);
+		spin_unlock(&n->list_lock);
 		slabs_destroy(cachep, &list);
 	}
 	return 1;
@@ -826,10 +826,10 @@
 	 */
 	n = get_node(cachep, node);
 	if (n) {
-		raw_spin_lock_irq(&n->list_lock);
+		spin_lock_irq(&n->list_lock);
 		n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount +
 				cachep->num;
-		raw_spin_unlock_irq(&n->list_lock);
+		spin_unlock_irq(&n->list_lock);
 
 		return 0;
 	}
@@ -908,7 +908,7 @@
 		goto fail;
 
 	n = get_node(cachep, node);
-	raw_spin_lock_irq(&n->list_lock);
+	spin_lock_irq(&n->list_lock);
 	if (n->shared && force_change) {
 		free_block(cachep, n->shared->entry,
 				n->shared->avail, node, &list);
@@ -926,7 +926,7 @@
 		new_alien = NULL;
 	}
 
-	raw_spin_unlock_irq(&n->list_lock);
+	spin_unlock_irq(&n->list_lock);
 	slabs_destroy(cachep, &list);
 
 	/*
@@ -965,7 +965,7 @@
 		if (!n)
 			continue;
 
-		raw_spin_lock_irq(&n->list_lock);
+		spin_lock_irq(&n->list_lock);
 
 		/* Free limit for this kmem_cache_node */
 		n->free_limit -= cachep->batchcount;
@@ -976,7 +976,7 @@
 		nc->avail = 0;
 
 		if (!cpumask_empty(mask)) {
-			raw_spin_unlock_irq(&n->list_lock);
+			spin_unlock_irq(&n->list_lock);
 			goto free_slab;
 		}
 
@@ -990,7 +990,7 @@
 		alien = n->alien;
 		n->alien = NULL;
 
-		raw_spin_unlock_irq(&n->list_lock);
+		spin_unlock_irq(&n->list_lock);
 
 		kfree(shared);
 		if (alien) {
@@ -1174,7 +1174,7 @@
 	/*
 	 * Do not assume that spinlocks can be initialized via memcpy:
 	 */
-	raw_spin_lock_init(&ptr->list_lock);
+	spin_lock_init(&ptr->list_lock);
 
 	MAKE_ALL_LISTS(cachep, ptr, nodeid);
 	cachep->node[nodeid] = ptr;
@@ -1345,11 +1345,11 @@
 	for_each_kmem_cache_node(cachep, node, n) {
 		unsigned long total_slabs, free_slabs, free_objs;
 
-		raw_spin_lock_irqsave(&n->list_lock, flags);
+		spin_lock_irqsave(&n->list_lock, flags);
 		total_slabs = n->total_slabs;
 		free_slabs = n->free_slabs;
 		free_objs = n->free_objects;
-		raw_spin_unlock_irqrestore(&n->list_lock, flags);
+		spin_unlock_irqrestore(&n->list_lock, flags);
 
 		pr_warn("  node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
 			node, total_slabs - free_slabs, total_slabs,
@@ -2106,7 +2106,7 @@
 {
 #ifdef CONFIG_SMP
 	check_irq_off();
-	assert_raw_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
+	assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock);
 #endif
 }
 
@@ -2114,7 +2114,7 @@
 {
 #ifdef CONFIG_SMP
 	check_irq_off();
-	assert_raw_spin_locked(&get_node(cachep, node)->list_lock);
+	assert_spin_locked(&get_node(cachep, node)->list_lock);
 #endif
 }
 
@@ -2154,9 +2154,9 @@
 	check_irq_off();
 	ac = cpu_cache_get(cachep);
 	n = get_node(cachep, node);
-	raw_spin_lock(&n->list_lock);
+	spin_lock(&n->list_lock);
 	free_block(cachep, ac->entry, ac->avail, node, &list);
-	raw_spin_unlock(&n->list_lock);
+	spin_unlock(&n->list_lock);
 	ac->avail = 0;
 	slabs_destroy(cachep, &list);
 }
@@ -2174,9 +2174,9 @@
 			drain_alien_cache(cachep, n->alien);
 
 	for_each_kmem_cache_node(cachep, node, n) {
-		raw_spin_lock_irq(&n->list_lock);
+		spin_lock_irq(&n->list_lock);
 		drain_array_locked(cachep, n->shared, node, true, &list);
-		raw_spin_unlock_irq(&n->list_lock);
+		spin_unlock_irq(&n->list_lock);
 
 		slabs_destroy(cachep, &list);
 	}
@@ -2198,10 +2198,10 @@
 	nr_freed = 0;
 	while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
 
-		raw_spin_lock_irq(&n->list_lock);
+		spin_lock_irq(&n->list_lock);
 		p = n->slabs_free.prev;
 		if (p == &n->slabs_free) {
-			raw_spin_unlock_irq(&n->list_lock);
+			spin_unlock_irq(&n->list_lock);
 			goto out;
 		}
 
@@ -2214,7 +2214,7 @@
 		 * to the cache.
 		 */
 		n->free_objects -= cache->num;
-		raw_spin_unlock_irq(&n->list_lock);
+		spin_unlock_irq(&n->list_lock);
 		slab_destroy(cache, page);
 		nr_freed++;
 	}
@@ -2650,7 +2650,7 @@
 	INIT_LIST_HEAD(&page->slab_list);
 	n = get_node(cachep, page_to_nid(page));
 
-	raw_spin_lock(&n->list_lock);
+	spin_lock(&n->list_lock);
 	n->total_slabs++;
 	if (!page->active) {
 		list_add_tail(&page->slab_list, &n->slabs_free);
@@ -2660,7 +2660,7 @@
 
 	STATS_INC_GROWN(cachep);
 	n->free_objects += cachep->num - page->active;
-	raw_spin_unlock(&n->list_lock);
+	spin_unlock(&n->list_lock);
 
 	fixup_objfreelist_debug(cachep, &list);
 }
@@ -2826,7 +2826,7 @@
 {
 	struct page *page;
 
-	assert_raw_spin_locked(&n->list_lock);
+	assert_spin_locked(&n->list_lock);
 	page = list_first_entry_or_null(&n->slabs_partial, struct page,
 					slab_list);
 	if (!page) {
@@ -2853,10 +2853,10 @@
 	if (!gfp_pfmemalloc_allowed(flags))
 		return NULL;
 
-	raw_spin_lock(&n->list_lock);
+	spin_lock(&n->list_lock);
 	page = get_first_slab(n, true);
 	if (!page) {
-		raw_spin_unlock(&n->list_lock);
+		spin_unlock(&n->list_lock);
 		return NULL;
 	}
 
@@ -2865,7 +2865,7 @@
 
 	fixup_slab_list(cachep, n, page, &list);
 
-	raw_spin_unlock(&n->list_lock);
+	spin_unlock(&n->list_lock);
 	fixup_objfreelist_debug(cachep, &list);
 
 	return obj;
@@ -2924,7 +2924,7 @@
 	if (!n->free_objects && (!shared || !shared->avail))
 		goto direct_grow;
 
-	raw_spin_lock(&n->list_lock);
+	spin_lock(&n->list_lock);
 	shared = READ_ONCE(n->shared);
 
 	/* See if we can refill from the shared array */
@@ -2948,7 +2948,7 @@
 must_grow:
 	n->free_objects -= ac->avail;
 alloc_done:
-	raw_spin_unlock(&n->list_lock);
+	spin_unlock(&n->list_lock);
 	fixup_objfreelist_debug(cachep, &list);
 
 direct_grow:
@@ -3172,7 +3172,7 @@
 	BUG_ON(!n);
 
 	check_irq_off();
-	raw_spin_lock(&n->list_lock);
+	spin_lock(&n->list_lock);
 	page = get_first_slab(n, false);
 	if (!page)
 		goto must_grow;
@@ -3190,12 +3190,12 @@
 
 	fixup_slab_list(cachep, n, page, &list);
 
-	raw_spin_unlock(&n->list_lock);
+	spin_unlock(&n->list_lock);
 	fixup_objfreelist_debug(cachep, &list);
 	return obj;
 
 must_grow:
-	raw_spin_unlock(&n->list_lock);
+	spin_unlock(&n->list_lock);
 	page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
 	if (page) {
 		/* This slab isn't counted yet so don't update free_objects */
@@ -3381,7 +3381,7 @@
 
 	check_irq_off();
 	n = get_node(cachep, node);
-	raw_spin_lock(&n->list_lock);
+	spin_lock(&n->list_lock);
 	if (n->shared) {
 		struct array_cache *shared_array = n->shared;
 		int max = shared_array->limit - shared_array->avail;
@@ -3410,7 +3410,7 @@
 		STATS_SET_FREEABLE(cachep, i);
 	}
 #endif
-	raw_spin_unlock(&n->list_lock);
+	spin_unlock(&n->list_lock);
 	ac->avail -= batchcount;
 	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
 	slabs_destroy(cachep, &list);
@@ -3854,9 +3854,9 @@
 
 		node = cpu_to_mem(cpu);
 		n = get_node(cachep, node);
-		raw_spin_lock_irq(&n->list_lock);
+		spin_lock_irq(&n->list_lock);
 		free_block(cachep, ac->entry, ac->avail, node, &list);
-		raw_spin_unlock_irq(&n->list_lock);
+		spin_unlock_irq(&n->list_lock);
 		slabs_destroy(cachep, &list);
 	}
 	free_percpu(prev);
@@ -3951,9 +3951,9 @@
 		return;
 	}
 
-	raw_spin_lock_irq(&n->list_lock);
+	spin_lock_irq(&n->list_lock);
 	drain_array_locked(cachep, ac, node, false, &list);
-	raw_spin_unlock_irq(&n->list_lock);
+	spin_unlock_irq(&n->list_lock);
 
 	slabs_destroy(cachep, &list);
 }
@@ -4037,7 +4037,7 @@
 
 	for_each_kmem_cache_node(cachep, node, n) {
 		check_irq_on();
-		raw_spin_lock_irq(&n->list_lock);
+		spin_lock_irq(&n->list_lock);
 
 		total_slabs += n->total_slabs;
 		free_slabs += n->free_slabs;
@@ -4046,7 +4046,7 @@
 		if (n->shared)
 			shared_avail += n->shared->avail;
 
-		raw_spin_unlock_irq(&n->list_lock);
+		spin_unlock_irq(&n->list_lock);
 	}
 	num_objs = total_slabs * cachep->num;
 	active_slabs = total_slabs - free_slabs;

--
Gitblit v1.6.2