From 6778948f9de86c3cfaf36725a7c87dcff9ba247f Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 08:20:59 +0000
Subject: [PATCH] kernel_5.10 no rt

---
 kernel/mm/memcontrol.c |   66 +++++++++++---------------------
 1 files changed, 23 insertions(+), 43 deletions(-)

diff --git a/kernel/mm/memcontrol.c b/kernel/mm/memcontrol.c
index 7db1e5a..0501a27 100644
--- a/kernel/mm/memcontrol.c
+++ b/kernel/mm/memcontrol.c
@@ -63,7 +63,6 @@
 #include <net/sock.h>
 #include <net/ip.h>
 #include "slab.h"
-#include <linux/local_lock.h>
 
 #include <linux/uaccess.h>
 
@@ -94,13 +93,6 @@
 #ifdef CONFIG_CGROUP_WRITEBACK
 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
 #endif
-
-struct event_lock {
-	local_lock_t l;
-};
-static DEFINE_PER_CPU(struct event_lock, event_lock) = {
-	.l      = INIT_LOCAL_LOCK(l),
-};
 
 /* Whether legacy memory+swap accounting is active */
 static bool do_memsw_account(void)
@@ -825,7 +817,6 @@
 	pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
 	memcg = pn->memcg;
 
-	preempt_disable_rt();
 	/* Update memcg */
 	__mod_memcg_state(memcg, idx, val);
 
@@ -845,7 +836,6 @@
 		x = 0;
 	}
 	__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
-	preempt_enable_rt();
 }
 
 /**
@@ -2243,7 +2233,6 @@
 EXPORT_SYMBOL(unlock_page_memcg);
 
 struct memcg_stock_pcp {
-	local_lock_t lock;
 	struct mem_cgroup *cached; /* this never be root cgroup */
 	unsigned int nr_pages;
 
@@ -2295,7 +2284,7 @@
 	if (nr_pages > MEMCG_CHARGE_BATCH)
 		return ret;
 
-	local_lock_irqsave(&memcg_stock.lock, flags);
+	local_irq_save(flags);
 
 	stock = this_cpu_ptr(&memcg_stock);
 	if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
@@ -2303,7 +2292,7 @@
 		ret = true;
 	}
 
-	local_unlock_irqrestore(&memcg_stock.lock, flags);
+	local_irq_restore(flags);
 
 	return ret;
 }
@@ -2338,14 +2327,14 @@
 	 * The only protection from memory hotplug vs. drain_stock races is
 	 * that we always operate on local CPU stock here with IRQ disabled
 	 */
-	local_lock_irqsave(&memcg_stock.lock, flags);
+	local_irq_save(flags);
 
 	stock = this_cpu_ptr(&memcg_stock);
 	drain_obj_stock(stock);
 	drain_stock(stock);
 	clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
 
-	local_unlock_irqrestore(&memcg_stock.lock, flags);
+	local_irq_restore(flags);
 }
 
 /*
@@ -2357,7 +2346,7 @@
 	struct memcg_stock_pcp *stock;
 	unsigned long flags;
 
-	local_lock_irqsave(&memcg_stock.lock, flags);
+	local_irq_save(flags);
 
 	stock = this_cpu_ptr(&memcg_stock);
 	if (stock->cached != memcg) { /* reset if necessary */
@@ -2370,7 +2359,7 @@
 	if (stock->nr_pages > MEMCG_CHARGE_BATCH)
 		drain_stock(stock);
 
-	local_unlock_irqrestore(&memcg_stock.lock, flags);
+	local_irq_restore(flags);
 }
 
 /*
@@ -2390,7 +2379,7 @@
 	 * as well as workers from this path always operate on the local
 	 * per-cpu data. CPU up doesn't touch memcg_stock at all.
 	 */
-	curcpu = get_cpu_light();
+	curcpu = get_cpu();
 	for_each_online_cpu(cpu) {
 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
 		struct mem_cgroup *memcg;
@@ -2413,7 +2402,7 @@
 				schedule_work_on(cpu, &stock->work);
 		}
 	}
-	put_cpu_light();
+	put_cpu();
 	mutex_unlock(&percpu_charge_mutex);
 }
 
@@ -3178,7 +3167,7 @@
 	unsigned long flags;
 	bool ret = false;
 
-	local_lock_irqsave(&memcg_stock.lock, flags);
+	local_irq_save(flags);
 
 	stock = this_cpu_ptr(&memcg_stock);
 	if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
@@ -3186,7 +3175,7 @@
 		ret = true;
 	}
 
-	local_unlock_irqrestore(&memcg_stock.lock, flags);
+	local_irq_restore(flags);
 
 	return ret;
 }
@@ -3253,7 +3242,7 @@
 	struct memcg_stock_pcp *stock;
 	unsigned long flags;
 
-	local_lock_irqsave(&memcg_stock.lock, flags);
+	local_irq_save(flags);
 
 	stock = this_cpu_ptr(&memcg_stock);
 	if (stock->cached_objcg != objcg) { /* reset if necessary */
@@ -3267,7 +3256,7 @@
 	if (stock->nr_bytes > PAGE_SIZE)
 		drain_obj_stock(stock);
 
-	local_unlock_irqrestore(&memcg_stock.lock, flags);
+	local_irq_restore(flags);
 }
 
 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
@@ -5789,12 +5778,12 @@
 
 	ret = 0;
 
-	local_lock_irq(&event_lock.l);
+	local_irq_disable();
 	mem_cgroup_charge_statistics(to, page, nr_pages);
 	memcg_check_events(to, page);
 	mem_cgroup_charge_statistics(from, page, -nr_pages);
 	memcg_check_events(from, page);
-	local_unlock_irq(&event_lock.l);
+	local_irq_enable();
 out_unlock:
 	unlock_page(page);
 out:
@@ -6862,10 +6851,10 @@
 	css_get(&memcg->css);
 	commit_charge(page, memcg);
 
-	local_lock_irq(&event_lock.l);
+	local_irq_disable();
 	mem_cgroup_charge_statistics(memcg, page, nr_pages);
 	memcg_check_events(memcg, page);
-	local_unlock_irq(&event_lock.l);
+	local_irq_enable();
 
 	/*
 	 * Cgroup1's unified memory+swap counter has been charged with the
@@ -6921,11 +6910,11 @@
 		memcg_oom_recover(ug->memcg);
 	}
 
-	local_lock_irqsave(&event_lock.l, flags);
+	local_irq_save(flags);
 	__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
 	__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
 	memcg_check_events(ug->memcg, ug->dummy_page);
-	local_unlock_irqrestore(&event_lock.l, flags);
+	local_irq_restore(flags);
 
 	/* drop reference from uncharge_page */
 	css_put(&ug->memcg->css);
@@ -7073,10 +7062,10 @@
 	css_get(&memcg->css);
 	commit_charge(newpage, memcg);
 
-	local_lock_irqsave(&event_lock.l, flags);
+	local_irq_save(flags);
 	mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
 	memcg_check_events(memcg, newpage);
-	local_unlock_irqrestore(&event_lock.l, flags);
+	local_irq_restore(flags);
 }
 
 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
@@ -7196,13 +7185,9 @@
 	cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
 				  memcg_hotplug_cpu_dead);
 
-	for_each_possible_cpu(cpu) {
-		struct memcg_stock_pcp *stock;
-
-		stock = per_cpu_ptr(&memcg_stock, cpu);
-		INIT_WORK(&stock->work, drain_local_stock);
-		local_lock_init(&stock->lock);
-	}
+	for_each_possible_cpu(cpu)
+		INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
+			  drain_local_stock);
 
 	for_each_node(node) {
 		struct mem_cgroup_tree_per_node *rtpn;
@@ -7251,7 +7236,6 @@
 	struct mem_cgroup *memcg, *swap_memcg;
 	unsigned int nr_entries;
 	unsigned short oldid;
-	unsigned long flags;
 
 	VM_BUG_ON_PAGE(PageLRU(page), page);
 	VM_BUG_ON_PAGE(page_count(page), page);
@@ -7300,13 +7284,9 @@
 	 * important here to have the interrupts disabled because it is the
 	 * only synchronisation we have for updating the per-CPU variables.
 	 */
-	local_lock_irqsave(&event_lock.l, flags);
-#ifndef CONFIG_PREEMPT_RT
 	VM_BUG_ON(!irqs_disabled());
-#endif
 	mem_cgroup_charge_statistics(memcg, page, -nr_entries);
 	memcg_check_events(memcg, page);
-	local_unlock_irqrestore(&event_lock.l, flags);
 
 	css_put(&memcg->css);
 }

--
Gitblit v1.6.2