From 151fecfb72a0d602dfe79790602ef64b4e241574 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 19 Feb 2024 01:51:07 +0000
Subject: [PATCH] export RK_PA3
---
kernel/mm/memcontrol.c | 75 +++++++++++++++----------------------
1 files changed, 31 insertions(+), 44 deletions(-)
diff --git a/kernel/mm/memcontrol.c b/kernel/mm/memcontrol.c
index 7db1e5a..b6f6bfc 100644
--- a/kernel/mm/memcontrol.c
+++ b/kernel/mm/memcontrol.c
@@ -63,7 +63,6 @@
#include <net/sock.h>
#include <net/ip.h>
#include "slab.h"
-#include <linux/local_lock.h>
#include <linux/uaccess.h>
@@ -74,6 +73,7 @@
EXPORT_SYMBOL(memory_cgrp_subsys);
struct mem_cgroup *root_mem_cgroup __read_mostly;
+EXPORT_SYMBOL_GPL(root_mem_cgroup);
/* Active memory cgroup to use from an interrupt context */
DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
@@ -94,13 +94,6 @@
#ifdef CONFIG_CGROUP_WRITEBACK
static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
#endif
-
-struct event_lock {
- local_lock_t l;
-};
-static DEFINE_PER_CPU(struct event_lock, event_lock) = {
- .l = INIT_LOCAL_LOCK(l),
-};
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
@@ -825,7 +818,6 @@
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
memcg = pn->memcg;
- preempt_disable_rt();
/* Update memcg */
__mod_memcg_state(memcg, idx, val);
@@ -845,7 +837,6 @@
x = 0;
}
__this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
- preempt_enable_rt();
}
/**
@@ -868,6 +859,7 @@
if (!mem_cgroup_disabled())
__mod_memcg_lruvec_state(lruvec, idx, val);
}
+EXPORT_SYMBOL_GPL(__mod_lruvec_state);
void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
{
@@ -1452,6 +1444,7 @@
if (nr_pages > 0)
*lru_size += nr_pages;
}
+EXPORT_SYMBOL_GPL(mem_cgroup_update_lru_size);
/**
* mem_cgroup_margin - calculate chargeable space of a memory cgroup
@@ -2243,7 +2236,6 @@
EXPORT_SYMBOL(unlock_page_memcg);
struct memcg_stock_pcp {
- local_lock_t lock;
struct mem_cgroup *cached; /* this never be root cgroup */
unsigned int nr_pages;
@@ -2295,7 +2287,7 @@
if (nr_pages > MEMCG_CHARGE_BATCH)
return ret;
- local_lock_irqsave(&memcg_stock.lock, flags);
+ local_irq_save(flags);
stock = this_cpu_ptr(&memcg_stock);
if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
@@ -2303,7 +2295,7 @@
ret = true;
}
- local_unlock_irqrestore(&memcg_stock.lock, flags);
+ local_irq_restore(flags);
return ret;
}
@@ -2338,14 +2330,14 @@
* The only protection from memory hotplug vs. drain_stock races is
* that we always operate on local CPU stock here with IRQ disabled
*/
- local_lock_irqsave(&memcg_stock.lock, flags);
+ local_irq_save(flags);
stock = this_cpu_ptr(&memcg_stock);
drain_obj_stock(stock);
drain_stock(stock);
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
- local_unlock_irqrestore(&memcg_stock.lock, flags);
+ local_irq_restore(flags);
}
/*
@@ -2357,7 +2349,7 @@
struct memcg_stock_pcp *stock;
unsigned long flags;
- local_lock_irqsave(&memcg_stock.lock, flags);
+ local_irq_save(flags);
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached != memcg) { /* reset if necessary */
@@ -2370,7 +2362,7 @@
if (stock->nr_pages > MEMCG_CHARGE_BATCH)
drain_stock(stock);
- local_unlock_irqrestore(&memcg_stock.lock, flags);
+ local_irq_restore(flags);
}
/*
@@ -2390,7 +2382,7 @@
* as well as workers from this path always operate on the local
* per-cpu data. CPU up doesn't touch memcg_stock at all.
*/
- curcpu = get_cpu_light();
+ curcpu = get_cpu();
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
@@ -2413,7 +2405,7 @@
schedule_work_on(cpu, &stock->work);
}
}
- put_cpu_light();
+ put_cpu();
mutex_unlock(&percpu_charge_mutex);
}
@@ -3178,7 +3170,7 @@
unsigned long flags;
bool ret = false;
- local_lock_irqsave(&memcg_stock.lock, flags);
+ local_irq_save(flags);
stock = this_cpu_ptr(&memcg_stock);
if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
@@ -3186,7 +3178,7 @@
ret = true;
}
- local_unlock_irqrestore(&memcg_stock.lock, flags);
+ local_irq_restore(flags);
return ret;
}
@@ -3253,7 +3245,7 @@
struct memcg_stock_pcp *stock;
unsigned long flags;
- local_lock_irqsave(&memcg_stock.lock, flags);
+ local_irq_save(flags);
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached_objcg != objcg) { /* reset if necessary */
@@ -3267,7 +3259,7 @@
if (stock->nr_bytes > PAGE_SIZE)
drain_obj_stock(stock);
- local_unlock_irqrestore(&memcg_stock.lock, flags);
+ local_irq_restore(flags);
}
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
@@ -3974,6 +3966,10 @@
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
+ "Please report your usecase to linux-mm@kvack.org if you "
+ "depend on this functionality.\n");
+
if (val & ~MOVE_MASK)
return -EINVAL;
@@ -4217,7 +4213,7 @@
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
- if (val > 100)
+ if (val > 200)
return -EINVAL;
if (css->parent)
@@ -5789,12 +5785,12 @@
ret = 0;
- local_lock_irq(&event_lock.l);
+ local_irq_disable();
mem_cgroup_charge_statistics(to, page, nr_pages);
memcg_check_events(to, page);
mem_cgroup_charge_statistics(from, page, -nr_pages);
memcg_check_events(from, page);
- local_unlock_irq(&event_lock.l);
+ local_irq_enable();
out_unlock:
unlock_page(page);
out:
@@ -6862,10 +6858,10 @@
css_get(&memcg->css);
commit_charge(page, memcg);
- local_lock_irq(&event_lock.l);
+ local_irq_disable();
mem_cgroup_charge_statistics(memcg, page, nr_pages);
memcg_check_events(memcg, page);
- local_unlock_irq(&event_lock.l);
+ local_irq_enable();
/*
* Cgroup1's unified memory+swap counter has been charged with the
@@ -6921,11 +6917,11 @@
memcg_oom_recover(ug->memcg);
}
- local_lock_irqsave(&event_lock.l, flags);
+ local_irq_save(flags);
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
memcg_check_events(ug->memcg, ug->dummy_page);
- local_unlock_irqrestore(&event_lock.l, flags);
+ local_irq_restore(flags);
/* drop reference from uncharge_page */
css_put(&ug->memcg->css);
@@ -7073,10 +7069,10 @@
css_get(&memcg->css);
commit_charge(newpage, memcg);
- local_lock_irqsave(&event_lock.l, flags);
+ local_irq_save(flags);
mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
memcg_check_events(memcg, newpage);
- local_unlock_irqrestore(&event_lock.l, flags);
+ local_irq_restore(flags);
}
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
@@ -7196,13 +7192,9 @@
cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
memcg_hotplug_cpu_dead);
- for_each_possible_cpu(cpu) {
- struct memcg_stock_pcp *stock;
-
- stock = per_cpu_ptr(&memcg_stock, cpu);
- INIT_WORK(&stock->work, drain_local_stock);
- local_lock_init(&stock->lock);
- }
+ for_each_possible_cpu(cpu)
+ INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
+ drain_local_stock);
for_each_node(node) {
struct mem_cgroup_tree_per_node *rtpn;
@@ -7251,7 +7243,6 @@
struct mem_cgroup *memcg, *swap_memcg;
unsigned int nr_entries;
unsigned short oldid;
- unsigned long flags;
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
@@ -7300,13 +7291,9 @@
* important here to have the interrupts disabled because it is the
* only synchronisation we have for updating the per-CPU variables.
*/
- local_lock_irqsave(&event_lock.l, flags);
-#ifndef CONFIG_PREEMPT_RT
VM_BUG_ON(!irqs_disabled());
-#endif
mem_cgroup_charge_statistics(memcg, page, -nr_entries);
memcg_check_events(memcg, page);
- local_unlock_irqrestore(&event_lock.l, flags);
css_put(&memcg->css);
}
--
Gitblit v1.6.2