hc
2023-11-06 e3e12f52b214121840b44c91de5b3e5af5d3eb84
kernel/mm/memcontrol.c
....@@ -69,6 +69,7 @@
6969 #include <net/sock.h>
7070 #include <net/ip.h>
7171 #include "slab.h"
72
+#include <linux/locallock.h>
7273
7374 #include <linux/uaccess.h>
7475
....@@ -93,6 +94,8 @@
9394 #else
9495 #define do_swap_account 0
9596 #endif
97
+
98
+static DEFINE_LOCAL_IRQ_LOCK(event_lock);
9699
97100 /* Whether legacy memory+swap accounting is active */
98101 static bool do_memsw_account(void)
....@@ -2084,7 +2087,7 @@
20842087 * as well as workers from this path always operate on the local
20852088 * per-cpu data. CPU up doesn't touch memcg_stock at all.
20862089 */
2087
- curcpu = get_cpu();
2090
+ curcpu = get_cpu_light();
20882091 for_each_online_cpu(cpu) {
20892092 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
20902093 struct mem_cgroup *memcg;
....@@ -2104,7 +2107,7 @@
21042107 }
21052108 css_put(&memcg->css);
21062109 }
2107
- put_cpu();
2110
+ put_cpu_light();
21082111 mutex_unlock(&percpu_charge_mutex);
21092112 }
21102113
....@@ -4933,12 +4936,12 @@
49334936
49344937 ret = 0;
49354938
4936
- local_irq_disable();
4939
+ local_lock_irq(event_lock);
49374940 mem_cgroup_charge_statistics(to, page, compound, nr_pages);
49384941 memcg_check_events(to, page);
49394942 mem_cgroup_charge_statistics(from, page, compound, -nr_pages);
49404943 memcg_check_events(from, page);
4941
- local_irq_enable();
4944
+ local_unlock_irq(event_lock);
49424945 out_unlock:
49434946 unlock_page(page);
49444947 out:
....@@ -6057,10 +6060,10 @@
60576060
60586061 commit_charge(page, memcg, lrucare);
60596062
6060
- local_irq_disable();
6063
+ local_lock_irq(event_lock);
60616064 mem_cgroup_charge_statistics(memcg, page, compound, nr_pages);
60626065 memcg_check_events(memcg, page);
6063
- local_irq_enable();
6066
+ local_unlock_irq(event_lock);
60646067
60656068 if (do_memsw_account() && PageSwapCache(page)) {
60666069 swp_entry_t entry = { .val = page_private(page) };
....@@ -6129,7 +6132,7 @@
61296132 memcg_oom_recover(ug->memcg);
61306133 }
61316134
6132
- local_irq_save(flags);
6135
+ local_lock_irqsave(event_lock, flags);
61336136 __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
61346137 __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
61356138 __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
....@@ -6137,7 +6140,7 @@
61376140 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
61386141 __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages);
61396142 memcg_check_events(ug->memcg, ug->dummy_page);
6140
- local_irq_restore(flags);
6143
+ local_unlock_irqrestore(event_lock, flags);
61416144
61426145 if (!mem_cgroup_is_root(ug->memcg))
61436146 css_put_many(&ug->memcg->css, nr_pages);
....@@ -6300,10 +6303,10 @@
63006303
63016304 commit_charge(newpage, memcg, false);
63026305
6303
- local_irq_save(flags);
6306
+ local_lock_irqsave(event_lock, flags);
63046307 mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
63056308 memcg_check_events(memcg, newpage);
6306
- local_irq_restore(flags);
6309
+ local_unlock_irqrestore(event_lock, flags);
63076310 }
63086311
63096312 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
....@@ -6485,6 +6488,7 @@
64856488 struct mem_cgroup *memcg, *swap_memcg;
64866489 unsigned int nr_entries;
64876490 unsigned short oldid;
6491
+ unsigned long flags;
64886492
64896493 VM_BUG_ON_PAGE(PageLRU(page), page);
64906494 VM_BUG_ON_PAGE(page_count(page), page);
....@@ -6530,10 +6534,14 @@
65306534 * important here to have the interrupts disabled because it is the
65316535 * only synchronisation we have for updating the per-CPU variables.
65326536 */
6537
+ local_lock_irqsave(event_lock, flags);
6538
+#ifndef CONFIG_PREEMPT_RT_BASE
65336539 VM_BUG_ON(!irqs_disabled());
6540
+#endif
65346541 mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page),
65356542 -nr_entries);
65366543 memcg_check_events(memcg, page);
6544
+ local_unlock_irqrestore(event_lock, flags);
65376545
65386546 if (!mem_cgroup_is_root(memcg))
65396547 css_put_many(&memcg->css, nr_entries);