.. | .. |
---|
63 | 63 | #include <net/sock.h> |
---|
64 | 64 | #include <net/ip.h> |
---|
65 | 65 | #include "slab.h" |
---|
66 | | -#include <linux/local_lock.h> |
---|
67 | 66 | |
---|
68 | 67 | #include <linux/uaccess.h> |
---|
69 | 68 | |
---|
.. | .. |
---|
74 | 73 | EXPORT_SYMBOL(memory_cgrp_subsys); |
---|
75 | 74 | |
---|
76 | 75 | struct mem_cgroup *root_mem_cgroup __read_mostly; |
---|
| 76 | +EXPORT_SYMBOL_GPL(root_mem_cgroup); |
---|
77 | 77 | |
---|
78 | 78 | /* Active memory cgroup to use from an interrupt context */ |
---|
79 | 79 | DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg); |
---|
.. | .. |
---|
94 | 94 | #ifdef CONFIG_CGROUP_WRITEBACK |
---|
95 | 95 | static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); |
---|
96 | 96 | #endif |
---|
97 | | - |
---|
98 | | -struct event_lock { |
---|
99 | | - local_lock_t l; |
---|
100 | | -}; |
---|
101 | | -static DEFINE_PER_CPU(struct event_lock, event_lock) = { |
---|
102 | | - .l = INIT_LOCAL_LOCK(l), |
---|
103 | | -}; |
---|
104 | 97 | |
---|
105 | 98 | /* Whether legacy memory+swap accounting is active */ |
---|
106 | 99 | static bool do_memsw_account(void) |
---|
.. | .. |
---|
825 | 818 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); |
---|
826 | 819 | memcg = pn->memcg; |
---|
827 | 820 | |
---|
828 | | - preempt_disable_rt(); |
---|
829 | 821 | /* Update memcg */ |
---|
830 | 822 | __mod_memcg_state(memcg, idx, val); |
---|
831 | 823 | |
---|
.. | .. |
---|
845 | 837 | x = 0; |
---|
846 | 838 | } |
---|
847 | 839 | __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); |
---|
848 | | - preempt_enable_rt(); |
---|
849 | 840 | } |
---|
850 | 841 | |
---|
851 | 842 | /** |
---|
.. | .. |
---|
868 | 859 | if (!mem_cgroup_disabled()) |
---|
869 | 860 | __mod_memcg_lruvec_state(lruvec, idx, val); |
---|
870 | 861 | } |
---|
| 862 | +EXPORT_SYMBOL_GPL(__mod_lruvec_state); |
---|
871 | 863 | |
---|
872 | 864 | void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val) |
---|
873 | 865 | { |
---|
.. | .. |
---|
1452 | 1444 | if (nr_pages > 0) |
---|
1453 | 1445 | *lru_size += nr_pages; |
---|
1454 | 1446 | } |
---|
| 1447 | +EXPORT_SYMBOL_GPL(mem_cgroup_update_lru_size); |
---|
1455 | 1448 | |
---|
1456 | 1449 | /** |
---|
1457 | 1450 | * mem_cgroup_margin - calculate chargeable space of a memory cgroup |
---|
.. | .. |
---|
2243 | 2236 | EXPORT_SYMBOL(unlock_page_memcg); |
---|
2244 | 2237 | |
---|
2245 | 2238 | struct memcg_stock_pcp { |
---|
2246 | | - local_lock_t lock; |
---|
2247 | 2239 | struct mem_cgroup *cached; /* this never be root cgroup */ |
---|
2248 | 2240 | unsigned int nr_pages; |
---|
2249 | 2241 | |
---|
.. | .. |
---|
2295 | 2287 | if (nr_pages > MEMCG_CHARGE_BATCH) |
---|
2296 | 2288 | return ret; |
---|
2297 | 2289 | |
---|
2298 | | - local_lock_irqsave(&memcg_stock.lock, flags); |
---|
| 2290 | + local_irq_save(flags); |
---|
2299 | 2291 | |
---|
2300 | 2292 | stock = this_cpu_ptr(&memcg_stock); |
---|
2301 | 2293 | if (memcg == stock->cached && stock->nr_pages >= nr_pages) { |
---|
.. | .. |
---|
2303 | 2295 | ret = true; |
---|
2304 | 2296 | } |
---|
2305 | 2297 | |
---|
2306 | | - local_unlock_irqrestore(&memcg_stock.lock, flags); |
---|
| 2298 | + local_irq_restore(flags); |
---|
2307 | 2299 | |
---|
2308 | 2300 | return ret; |
---|
2309 | 2301 | } |
---|
.. | .. |
---|
2338 | 2330 | * The only protection from memory hotplug vs. drain_stock races is |
---|
2339 | 2331 | * that we always operate on local CPU stock here with IRQ disabled |
---|
2340 | 2332 | */ |
---|
2341 | | - local_lock_irqsave(&memcg_stock.lock, flags); |
---|
| 2333 | + local_irq_save(flags); |
---|
2342 | 2334 | |
---|
2343 | 2335 | stock = this_cpu_ptr(&memcg_stock); |
---|
2344 | 2336 | drain_obj_stock(stock); |
---|
2345 | 2337 | drain_stock(stock); |
---|
2346 | 2338 | clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); |
---|
2347 | 2339 | |
---|
2348 | | - local_unlock_irqrestore(&memcg_stock.lock, flags); |
---|
| 2340 | + local_irq_restore(flags); |
---|
2349 | 2341 | } |
---|
2350 | 2342 | |
---|
2351 | 2343 | /* |
---|
.. | .. |
---|
2357 | 2349 | struct memcg_stock_pcp *stock; |
---|
2358 | 2350 | unsigned long flags; |
---|
2359 | 2351 | |
---|
2360 | | - local_lock_irqsave(&memcg_stock.lock, flags); |
---|
| 2352 | + local_irq_save(flags); |
---|
2361 | 2353 | |
---|
2362 | 2354 | stock = this_cpu_ptr(&memcg_stock); |
---|
2363 | 2355 | if (stock->cached != memcg) { /* reset if necessary */ |
---|
.. | .. |
---|
2370 | 2362 | if (stock->nr_pages > MEMCG_CHARGE_BATCH) |
---|
2371 | 2363 | drain_stock(stock); |
---|
2372 | 2364 | |
---|
2373 | | - local_unlock_irqrestore(&memcg_stock.lock, flags); |
---|
| 2365 | + local_irq_restore(flags); |
---|
2374 | 2366 | } |
---|
2375 | 2367 | |
---|
2376 | 2368 | /* |
---|
.. | .. |
---|
2390 | 2382 | * as well as workers from this path always operate on the local |
---|
2391 | 2383 | * per-cpu data. CPU up doesn't touch memcg_stock at all. |
---|
2392 | 2384 | */ |
---|
2393 | | - curcpu = get_cpu_light(); |
---|
| 2385 | + curcpu = get_cpu(); |
---|
2394 | 2386 | for_each_online_cpu(cpu) { |
---|
2395 | 2387 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); |
---|
2396 | 2388 | struct mem_cgroup *memcg; |
---|
.. | .. |
---|
2413 | 2405 | schedule_work_on(cpu, &stock->work); |
---|
2414 | 2406 | } |
---|
2415 | 2407 | } |
---|
2416 | | - put_cpu_light(); |
---|
| 2408 | + put_cpu(); |
---|
2417 | 2409 | mutex_unlock(&percpu_charge_mutex); |
---|
2418 | 2410 | } |
---|
2419 | 2411 | |
---|
.. | .. |
---|
3178 | 3170 | unsigned long flags; |
---|
3179 | 3171 | bool ret = false; |
---|
3180 | 3172 | |
---|
3181 | | - local_lock_irqsave(&memcg_stock.lock, flags); |
---|
| 3173 | + local_irq_save(flags); |
---|
3182 | 3174 | |
---|
3183 | 3175 | stock = this_cpu_ptr(&memcg_stock); |
---|
3184 | 3176 | if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { |
---|
.. | .. |
---|
3186 | 3178 | ret = true; |
---|
3187 | 3179 | } |
---|
3188 | 3180 | |
---|
3189 | | - local_unlock_irqrestore(&memcg_stock.lock, flags); |
---|
| 3181 | + local_irq_restore(flags); |
---|
3190 | 3182 | |
---|
3191 | 3183 | return ret; |
---|
3192 | 3184 | } |
---|
.. | .. |
---|
3253 | 3245 | struct memcg_stock_pcp *stock; |
---|
3254 | 3246 | unsigned long flags; |
---|
3255 | 3247 | |
---|
3256 | | - local_lock_irqsave(&memcg_stock.lock, flags); |
---|
| 3248 | + local_irq_save(flags); |
---|
3257 | 3249 | |
---|
3258 | 3250 | stock = this_cpu_ptr(&memcg_stock); |
---|
3259 | 3251 | if (stock->cached_objcg != objcg) { /* reset if necessary */ |
---|
.. | .. |
---|
3267 | 3259 | if (stock->nr_bytes > PAGE_SIZE) |
---|
3268 | 3260 | drain_obj_stock(stock); |
---|
3269 | 3261 | |
---|
3270 | | - local_unlock_irqrestore(&memcg_stock.lock, flags); |
---|
| 3262 | + local_irq_restore(flags); |
---|
3271 | 3263 | } |
---|
3272 | 3264 | |
---|
3273 | 3265 | int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) |
---|
.. | .. |
---|
3974 | 3966 | { |
---|
3975 | 3967 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
---|
3976 | 3968 | |
---|
| 3969 | + pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. " |
---|
| 3970 | + "Please report your usecase to linux-mm@kvack.org if you " |
---|
| 3971 | + "depend on this functionality.\n"); |
---|
| 3972 | + |
---|
3977 | 3973 | if (val & ~MOVE_MASK) |
---|
3978 | 3974 | return -EINVAL; |
---|
3979 | 3975 | |
---|
.. | .. |
---|
4217 | 4213 | { |
---|
4218 | 4214 | struct mem_cgroup *memcg = mem_cgroup_from_css(css); |
---|
4219 | 4215 | |
---|
4220 | | - if (val > 100) |
---|
| 4216 | + if (val > 200) |
---|
4221 | 4217 | return -EINVAL; |
---|
4222 | 4218 | |
---|
4223 | 4219 | if (css->parent) |
---|
.. | .. |
---|
5789 | 5785 | |
---|
5790 | 5786 | ret = 0; |
---|
5791 | 5787 | |
---|
5792 | | - local_lock_irq(&event_lock.l); |
---|
| 5788 | + local_irq_disable(); |
---|
5793 | 5789 | mem_cgroup_charge_statistics(to, page, nr_pages); |
---|
5794 | 5790 | memcg_check_events(to, page); |
---|
5795 | 5791 | mem_cgroup_charge_statistics(from, page, -nr_pages); |
---|
5796 | 5792 | memcg_check_events(from, page); |
---|
5797 | | - local_unlock_irq(&event_lock.l); |
---|
| 5793 | + local_irq_enable(); |
---|
5798 | 5794 | out_unlock: |
---|
5799 | 5795 | unlock_page(page); |
---|
5800 | 5796 | out: |
---|
.. | .. |
---|
6862 | 6858 | css_get(&memcg->css); |
---|
6863 | 6859 | commit_charge(page, memcg); |
---|
6864 | 6860 | |
---|
6865 | | - local_lock_irq(&event_lock.l); |
---|
| 6861 | + local_irq_disable(); |
---|
6866 | 6862 | mem_cgroup_charge_statistics(memcg, page, nr_pages); |
---|
6867 | 6863 | memcg_check_events(memcg, page); |
---|
6868 | | - local_unlock_irq(&event_lock.l); |
---|
| 6864 | + local_irq_enable(); |
---|
6869 | 6865 | |
---|
6870 | 6866 | /* |
---|
6871 | 6867 | * Cgroup1's unified memory+swap counter has been charged with the |
---|
.. | .. |
---|
6921 | 6917 | memcg_oom_recover(ug->memcg); |
---|
6922 | 6918 | } |
---|
6923 | 6919 | |
---|
6924 | | - local_lock_irqsave(&event_lock.l, flags); |
---|
| 6920 | + local_irq_save(flags); |
---|
6925 | 6921 | __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); |
---|
6926 | 6922 | __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages); |
---|
6927 | 6923 | memcg_check_events(ug->memcg, ug->dummy_page); |
---|
6928 | | - local_unlock_irqrestore(&event_lock.l, flags); |
---|
| 6924 | + local_irq_restore(flags); |
---|
6929 | 6925 | |
---|
6930 | 6926 | /* drop reference from uncharge_page */ |
---|
6931 | 6927 | css_put(&ug->memcg->css); |
---|
.. | .. |
---|
7073 | 7069 | css_get(&memcg->css); |
---|
7074 | 7070 | commit_charge(newpage, memcg); |
---|
7075 | 7071 | |
---|
7076 | | - local_lock_irqsave(&event_lock.l, flags); |
---|
| 7072 | + local_irq_save(flags); |
---|
7077 | 7073 | mem_cgroup_charge_statistics(memcg, newpage, nr_pages); |
---|
7078 | 7074 | memcg_check_events(memcg, newpage); |
---|
7079 | | - local_unlock_irqrestore(&event_lock.l, flags); |
---|
| 7075 | + local_irq_restore(flags); |
---|
7080 | 7076 | } |
---|
7081 | 7077 | |
---|
7082 | 7078 | DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); |
---|
.. | .. |
---|
7196 | 7192 | cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, |
---|
7197 | 7193 | memcg_hotplug_cpu_dead); |
---|
7198 | 7194 | |
---|
7199 | | - for_each_possible_cpu(cpu) { |
---|
7200 | | - struct memcg_stock_pcp *stock; |
---|
7201 | | - |
---|
7202 | | - stock = per_cpu_ptr(&memcg_stock, cpu); |
---|
7203 | | - INIT_WORK(&stock->work, drain_local_stock); |
---|
7204 | | - local_lock_init(&stock->lock); |
---|
7205 | | - } |
---|
| 7195 | + for_each_possible_cpu(cpu) |
---|
| 7196 | + INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, |
---|
| 7197 | + drain_local_stock); |
---|
7206 | 7198 | |
---|
7207 | 7199 | for_each_node(node) { |
---|
7208 | 7200 | struct mem_cgroup_tree_per_node *rtpn; |
---|
.. | .. |
---|
7251 | 7243 | struct mem_cgroup *memcg, *swap_memcg; |
---|
7252 | 7244 | unsigned int nr_entries; |
---|
7253 | 7245 | unsigned short oldid; |
---|
7254 | | - unsigned long flags; |
---|
7255 | 7246 | |
---|
7256 | 7247 | VM_BUG_ON_PAGE(PageLRU(page), page); |
---|
7257 | 7248 | VM_BUG_ON_PAGE(page_count(page), page); |
---|
.. | .. |
---|
7300 | 7291 | * important here to have the interrupts disabled because it is the |
---|
7301 | 7292 | * only synchronisation we have for updating the per-CPU variables. |
---|
7302 | 7293 | */ |
---|
7303 | | - local_lock_irqsave(&event_lock.l, flags); |
---|
7304 | | -#ifndef CONFIG_PREEMPT_RT |
---|
7305 | 7294 | VM_BUG_ON(!irqs_disabled()); |
---|
7306 | | -#endif |
---|
7307 | 7295 | mem_cgroup_charge_statistics(memcg, page, -nr_entries); |
---|
7308 | 7296 | memcg_check_events(memcg, page); |
---|
7309 | | - local_unlock_irqrestore(&event_lock.l, flags); |
---|
7310 | 7297 | |
---|
7311 | 7298 | css_put(&memcg->css); |
---|
7312 | 7299 | } |
---|