hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/mm/memcontrol.c
....@@ -63,7 +63,6 @@
6363 #include <net/sock.h>
6464 #include <net/ip.h>
6565 #include "slab.h"
66
-#include <linux/local_lock.h>
6766
6867 #include <linux/uaccess.h>
6968
....@@ -74,6 +73,7 @@
7473 EXPORT_SYMBOL(memory_cgrp_subsys);
7574
7675 struct mem_cgroup *root_mem_cgroup __read_mostly;
76
+EXPORT_SYMBOL_GPL(root_mem_cgroup);
7777
7878 /* Active memory cgroup to use from an interrupt context */
7979 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
....@@ -94,13 +94,6 @@
9494 #ifdef CONFIG_CGROUP_WRITEBACK
9595 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
9696 #endif
97
-
98
-struct event_lock {
99
- local_lock_t l;
100
-};
101
-static DEFINE_PER_CPU(struct event_lock, event_lock) = {
102
- .l = INIT_LOCAL_LOCK(l),
103
-};
10497
10598 /* Whether legacy memory+swap accounting is active */
10699 static bool do_memsw_account(void)
....@@ -825,7 +818,6 @@
825818 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
826819 memcg = pn->memcg;
827820
828
- preempt_disable_rt();
829821 /* Update memcg */
830822 __mod_memcg_state(memcg, idx, val);
831823
....@@ -845,7 +837,6 @@
845837 x = 0;
846838 }
847839 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
848
- preempt_enable_rt();
849840 }
850841
851842 /**
....@@ -868,6 +859,7 @@
868859 if (!mem_cgroup_disabled())
869860 __mod_memcg_lruvec_state(lruvec, idx, val);
870861 }
862
+EXPORT_SYMBOL_GPL(__mod_lruvec_state);
871863
872864 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
873865 {
....@@ -1452,6 +1444,7 @@
14521444 if (nr_pages > 0)
14531445 *lru_size += nr_pages;
14541446 }
1447
+EXPORT_SYMBOL_GPL(mem_cgroup_update_lru_size);
14551448
14561449 /**
14571450 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
....@@ -2243,7 +2236,6 @@
22432236 EXPORT_SYMBOL(unlock_page_memcg);
22442237
22452238 struct memcg_stock_pcp {
2246
- local_lock_t lock;
22472239 struct mem_cgroup *cached; /* this never be root cgroup */
22482240 unsigned int nr_pages;
22492241
....@@ -2295,7 +2287,7 @@
22952287 if (nr_pages > MEMCG_CHARGE_BATCH)
22962288 return ret;
22972289
2298
- local_lock_irqsave(&memcg_stock.lock, flags);
2290
+ local_irq_save(flags);
22992291
23002292 stock = this_cpu_ptr(&memcg_stock);
23012293 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
....@@ -2303,7 +2295,7 @@
23032295 ret = true;
23042296 }
23052297
2306
- local_unlock_irqrestore(&memcg_stock.lock, flags);
2298
+ local_irq_restore(flags);
23072299
23082300 return ret;
23092301 }
....@@ -2338,14 +2330,14 @@
23382330 * The only protection from memory hotplug vs. drain_stock races is
23392331 * that we always operate on local CPU stock here with IRQ disabled
23402332 */
2341
- local_lock_irqsave(&memcg_stock.lock, flags);
2333
+ local_irq_save(flags);
23422334
23432335 stock = this_cpu_ptr(&memcg_stock);
23442336 drain_obj_stock(stock);
23452337 drain_stock(stock);
23462338 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
23472339
2348
- local_unlock_irqrestore(&memcg_stock.lock, flags);
2340
+ local_irq_restore(flags);
23492341 }
23502342
23512343 /*
....@@ -2357,7 +2349,7 @@
23572349 struct memcg_stock_pcp *stock;
23582350 unsigned long flags;
23592351
2360
- local_lock_irqsave(&memcg_stock.lock, flags);
2352
+ local_irq_save(flags);
23612353
23622354 stock = this_cpu_ptr(&memcg_stock);
23632355 if (stock->cached != memcg) { /* reset if necessary */
....@@ -2370,7 +2362,7 @@
23702362 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
23712363 drain_stock(stock);
23722364
2373
- local_unlock_irqrestore(&memcg_stock.lock, flags);
2365
+ local_irq_restore(flags);
23742366 }
23752367
23762368 /*
....@@ -2390,7 +2382,7 @@
23902382 * as well as workers from this path always operate on the local
23912383 * per-cpu data. CPU up doesn't touch memcg_stock at all.
23922384 */
2393
- curcpu = get_cpu_light();
2385
+ curcpu = get_cpu();
23942386 for_each_online_cpu(cpu) {
23952387 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
23962388 struct mem_cgroup *memcg;
....@@ -2413,7 +2405,7 @@
24132405 schedule_work_on(cpu, &stock->work);
24142406 }
24152407 }
2416
- put_cpu_light();
2408
+ put_cpu();
24172409 mutex_unlock(&percpu_charge_mutex);
24182410 }
24192411
....@@ -3178,7 +3170,7 @@
31783170 unsigned long flags;
31793171 bool ret = false;
31803172
3181
- local_lock_irqsave(&memcg_stock.lock, flags);
3173
+ local_irq_save(flags);
31823174
31833175 stock = this_cpu_ptr(&memcg_stock);
31843176 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
....@@ -3186,7 +3178,7 @@
31863178 ret = true;
31873179 }
31883180
3189
- local_unlock_irqrestore(&memcg_stock.lock, flags);
3181
+ local_irq_restore(flags);
31903182
31913183 return ret;
31923184 }
....@@ -3253,7 +3245,7 @@
32533245 struct memcg_stock_pcp *stock;
32543246 unsigned long flags;
32553247
3256
- local_lock_irqsave(&memcg_stock.lock, flags);
3248
+ local_irq_save(flags);
32573249
32583250 stock = this_cpu_ptr(&memcg_stock);
32593251 if (stock->cached_objcg != objcg) { /* reset if necessary */
....@@ -3267,7 +3259,7 @@
32673259 if (stock->nr_bytes > PAGE_SIZE)
32683260 drain_obj_stock(stock);
32693261
3270
- local_unlock_irqrestore(&memcg_stock.lock, flags);
3262
+ local_irq_restore(flags);
32713263 }
32723264
32733265 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
....@@ -3974,6 +3966,10 @@
39743966 {
39753967 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
39763968
3969
+ pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
3970
+ "Please report your usecase to linux-mm@kvack.org if you "
3971
+ "depend on this functionality.\n");
3972
+
39773973 if (val & ~MOVE_MASK)
39783974 return -EINVAL;
39793975
....@@ -4217,7 +4213,7 @@
42174213 {
42184214 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
42194215
4220
- if (val > 100)
4216
+ if (val > 200)
42214217 return -EINVAL;
42224218
42234219 if (css->parent)
....@@ -5789,12 +5785,12 @@
57895785
57905786 ret = 0;
57915787
5792
- local_lock_irq(&event_lock.l);
5788
+ local_irq_disable();
57935789 mem_cgroup_charge_statistics(to, page, nr_pages);
57945790 memcg_check_events(to, page);
57955791 mem_cgroup_charge_statistics(from, page, -nr_pages);
57965792 memcg_check_events(from, page);
5797
- local_unlock_irq(&event_lock.l);
5793
+ local_irq_enable();
57985794 out_unlock:
57995795 unlock_page(page);
58005796 out:
....@@ -6862,10 +6858,10 @@
68626858 css_get(&memcg->css);
68636859 commit_charge(page, memcg);
68646860
6865
- local_lock_irq(&event_lock.l);
6861
+ local_irq_disable();
68666862 mem_cgroup_charge_statistics(memcg, page, nr_pages);
68676863 memcg_check_events(memcg, page);
6868
- local_unlock_irq(&event_lock.l);
6864
+ local_irq_enable();
68696865
68706866 /*
68716867 * Cgroup1's unified memory+swap counter has been charged with the
....@@ -6921,11 +6917,11 @@
69216917 memcg_oom_recover(ug->memcg);
69226918 }
69236919
6924
- local_lock_irqsave(&event_lock.l, flags);
6920
+ local_irq_save(flags);
69256921 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
69266922 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
69276923 memcg_check_events(ug->memcg, ug->dummy_page);
6928
- local_unlock_irqrestore(&event_lock.l, flags);
6924
+ local_irq_restore(flags);
69296925
69306926 /* drop reference from uncharge_page */
69316927 css_put(&ug->memcg->css);
....@@ -7073,10 +7069,10 @@
70737069 css_get(&memcg->css);
70747070 commit_charge(newpage, memcg);
70757071
7076
- local_lock_irqsave(&event_lock.l, flags);
7072
+ local_irq_save(flags);
70777073 mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
70787074 memcg_check_events(memcg, newpage);
7079
- local_unlock_irqrestore(&event_lock.l, flags);
7075
+ local_irq_restore(flags);
70807076 }
70817077
70827078 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
....@@ -7196,13 +7192,9 @@
71967192 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
71977193 memcg_hotplug_cpu_dead);
71987194
7199
- for_each_possible_cpu(cpu) {
7200
- struct memcg_stock_pcp *stock;
7201
-
7202
- stock = per_cpu_ptr(&memcg_stock, cpu);
7203
- INIT_WORK(&stock->work, drain_local_stock);
7204
- local_lock_init(&stock->lock);
7205
- }
7195
+ for_each_possible_cpu(cpu)
7196
+ INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7197
+ drain_local_stock);
72067198
72077199 for_each_node(node) {
72087200 struct mem_cgroup_tree_per_node *rtpn;
....@@ -7251,7 +7243,6 @@
72517243 struct mem_cgroup *memcg, *swap_memcg;
72527244 unsigned int nr_entries;
72537245 unsigned short oldid;
7254
- unsigned long flags;
72557246
72567247 VM_BUG_ON_PAGE(PageLRU(page), page);
72577248 VM_BUG_ON_PAGE(page_count(page), page);
....@@ -7300,13 +7291,9 @@
73007291 * important here to have the interrupts disabled because it is the
73017292 * only synchronisation we have for updating the per-CPU variables.
73027293 */
7303
- local_lock_irqsave(&event_lock.l, flags);
7304
-#ifndef CONFIG_PREEMPT_RT
73057294 VM_BUG_ON(!irqs_disabled());
7306
-#endif
73077295 mem_cgroup_charge_statistics(memcg, page, -nr_entries);
73087296 memcg_check_events(memcg, page);
7309
- local_unlock_irqrestore(&event_lock.l, flags);
73107297
73117298 css_put(&memcg->css);
73127299 }