hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/mm/memcontrol.c
....@@ -63,7 +63,6 @@
6363 #include <net/sock.h>
6464 #include <net/ip.h>
6565 #include "slab.h"
66
-#include <linux/local_lock.h>
6766
6867 #include <linux/uaccess.h>
6968
....@@ -94,13 +93,6 @@
9493 #ifdef CONFIG_CGROUP_WRITEBACK
9594 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
9695 #endif
97
-
98
-struct event_lock {
99
- local_lock_t l;
100
-};
101
-static DEFINE_PER_CPU(struct event_lock, event_lock) = {
102
- .l = INIT_LOCAL_LOCK(l),
103
-};
10496
10597 /* Whether legacy memory+swap accounting is active */
10698 static bool do_memsw_account(void)
....@@ -825,7 +817,6 @@
825817 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
826818 memcg = pn->memcg;
827819
828
- preempt_disable_rt();
829820 /* Update memcg */
830821 __mod_memcg_state(memcg, idx, val);
831822
....@@ -845,7 +836,6 @@
845836 x = 0;
846837 }
847838 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
848
- preempt_enable_rt();
849839 }
850840
851841 /**
....@@ -2243,7 +2233,6 @@
22432233 EXPORT_SYMBOL(unlock_page_memcg);
22442234
22452235 struct memcg_stock_pcp {
2246
- local_lock_t lock;
22472236 struct mem_cgroup *cached; /* this never be root cgroup */
22482237 unsigned int nr_pages;
22492238
....@@ -2295,7 +2284,7 @@
22952284 if (nr_pages > MEMCG_CHARGE_BATCH)
22962285 return ret;
22972286
2298
- local_lock_irqsave(&memcg_stock.lock, flags);
2287
+ local_irq_save(flags);
22992288
23002289 stock = this_cpu_ptr(&memcg_stock);
23012290 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
....@@ -2303,7 +2292,7 @@
23032292 ret = true;
23042293 }
23052294
2306
- local_unlock_irqrestore(&memcg_stock.lock, flags);
2295
+ local_irq_restore(flags);
23072296
23082297 return ret;
23092298 }
....@@ -2338,14 +2327,14 @@
23382327 * The only protection from memory hotplug vs. drain_stock races is
23392328 * that we always operate on local CPU stock here with IRQ disabled
23402329 */
2341
- local_lock_irqsave(&memcg_stock.lock, flags);
2330
+ local_irq_save(flags);
23422331
23432332 stock = this_cpu_ptr(&memcg_stock);
23442333 drain_obj_stock(stock);
23452334 drain_stock(stock);
23462335 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
23472336
2348
- local_unlock_irqrestore(&memcg_stock.lock, flags);
2337
+ local_irq_restore(flags);
23492338 }
23502339
23512340 /*
....@@ -2357,7 +2346,7 @@
23572346 struct memcg_stock_pcp *stock;
23582347 unsigned long flags;
23592348
2360
- local_lock_irqsave(&memcg_stock.lock, flags);
2349
+ local_irq_save(flags);
23612350
23622351 stock = this_cpu_ptr(&memcg_stock);
23632352 if (stock->cached != memcg) { /* reset if necessary */
....@@ -2370,7 +2359,7 @@
23702359 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
23712360 drain_stock(stock);
23722361
2373
- local_unlock_irqrestore(&memcg_stock.lock, flags);
2362
+ local_irq_restore(flags);
23742363 }
23752364
23762365 /*
....@@ -2390,7 +2379,7 @@
23902379 * as well as workers from this path always operate on the local
23912380 * per-cpu data. CPU up doesn't touch memcg_stock at all.
23922381 */
2393
- curcpu = get_cpu_light();
2382
+ curcpu = get_cpu();
23942383 for_each_online_cpu(cpu) {
23952384 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
23962385 struct mem_cgroup *memcg;
....@@ -2413,7 +2402,7 @@
24132402 schedule_work_on(cpu, &stock->work);
24142403 }
24152404 }
2416
- put_cpu_light();
2405
+ put_cpu();
24172406 mutex_unlock(&percpu_charge_mutex);
24182407 }
24192408
....@@ -3178,7 +3167,7 @@
31783167 unsigned long flags;
31793168 bool ret = false;
31803169
3181
- local_lock_irqsave(&memcg_stock.lock, flags);
3170
+ local_irq_save(flags);
31823171
31833172 stock = this_cpu_ptr(&memcg_stock);
31843173 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
....@@ -3186,7 +3175,7 @@
31863175 ret = true;
31873176 }
31883177
3189
- local_unlock_irqrestore(&memcg_stock.lock, flags);
3178
+ local_irq_restore(flags);
31903179
31913180 return ret;
31923181 }
....@@ -3253,7 +3242,7 @@
32533242 struct memcg_stock_pcp *stock;
32543243 unsigned long flags;
32553244
3256
- local_lock_irqsave(&memcg_stock.lock, flags);
3245
+ local_irq_save(flags);
32573246
32583247 stock = this_cpu_ptr(&memcg_stock);
32593248 if (stock->cached_objcg != objcg) { /* reset if necessary */
....@@ -3267,7 +3256,7 @@
32673256 if (stock->nr_bytes > PAGE_SIZE)
32683257 drain_obj_stock(stock);
32693258
3270
- local_unlock_irqrestore(&memcg_stock.lock, flags);
3259
+ local_irq_restore(flags);
32713260 }
32723261
32733262 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
....@@ -5789,12 +5778,12 @@
57895778
57905779 ret = 0;
57915780
5792
- local_lock_irq(&event_lock.l);
5781
+ local_irq_disable();
57935782 mem_cgroup_charge_statistics(to, page, nr_pages);
57945783 memcg_check_events(to, page);
57955784 mem_cgroup_charge_statistics(from, page, -nr_pages);
57965785 memcg_check_events(from, page);
5797
- local_unlock_irq(&event_lock.l);
5786
+ local_irq_enable();
57985787 out_unlock:
57995788 unlock_page(page);
58005789 out:
....@@ -6862,10 +6851,10 @@
68626851 css_get(&memcg->css);
68636852 commit_charge(page, memcg);
68646853
6865
- local_lock_irq(&event_lock.l);
6854
+ local_irq_disable();
68666855 mem_cgroup_charge_statistics(memcg, page, nr_pages);
68676856 memcg_check_events(memcg, page);
6868
- local_unlock_irq(&event_lock.l);
6857
+ local_irq_enable();
68696858
68706859 /*
68716860 * Cgroup1's unified memory+swap counter has been charged with the
....@@ -6921,11 +6910,11 @@
69216910 memcg_oom_recover(ug->memcg);
69226911 }
69236912
6924
- local_lock_irqsave(&event_lock.l, flags);
6913
+ local_irq_save(flags);
69256914 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
69266915 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
69276916 memcg_check_events(ug->memcg, ug->dummy_page);
6928
- local_unlock_irqrestore(&event_lock.l, flags);
6917
+ local_irq_restore(flags);
69296918
69306919 /* drop reference from uncharge_page */
69316920 css_put(&ug->memcg->css);
....@@ -7073,10 +7062,10 @@
70737062 css_get(&memcg->css);
70747063 commit_charge(newpage, memcg);
70757064
7076
- local_lock_irqsave(&event_lock.l, flags);
7065
+ local_irq_save(flags);
70777066 mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
70787067 memcg_check_events(memcg, newpage);
7079
- local_unlock_irqrestore(&event_lock.l, flags);
7068
+ local_irq_restore(flags);
70807069 }
70817070
70827071 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
....@@ -7196,13 +7185,9 @@
71967185 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
71977186 memcg_hotplug_cpu_dead);
71987187
7199
- for_each_possible_cpu(cpu) {
7200
- struct memcg_stock_pcp *stock;
7201
-
7202
- stock = per_cpu_ptr(&memcg_stock, cpu);
7203
- INIT_WORK(&stock->work, drain_local_stock);
7204
- local_lock_init(&stock->lock);
7205
- }
7188
+ for_each_possible_cpu(cpu)
7189
+ INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7190
+ drain_local_stock);
72067191
72077192 for_each_node(node) {
72087193 struct mem_cgroup_tree_per_node *rtpn;
....@@ -7251,7 +7236,6 @@
72517236 struct mem_cgroup *memcg, *swap_memcg;
72527237 unsigned int nr_entries;
72537238 unsigned short oldid;
7254
- unsigned long flags;
72557239
72567240 VM_BUG_ON_PAGE(PageLRU(page), page);
72577241 VM_BUG_ON_PAGE(page_count(page), page);
....@@ -7300,13 +7284,9 @@
73007284 * important here to have the interrupts disabled because it is the
73017285 * only synchronisation we have for updating the per-CPU variables.
73027286 */
7303
- local_lock_irqsave(&event_lock.l, flags);
7304
-#ifndef CONFIG_PREEMPT_RT
73057287 VM_BUG_ON(!irqs_disabled());
7306
-#endif
73077288 mem_cgroup_charge_statistics(memcg, page, -nr_entries);
73087289 memcg_check_events(memcg, page);
7309
- local_unlock_irqrestore(&event_lock.l, flags);
73107290
73117291 css_put(&memcg->css);
73127292 }