.. | .. |
---|
63 | 63 | #include <net/sock.h> |
---|
64 | 64 | #include <net/ip.h> |
---|
65 | 65 | #include "slab.h" |
---|
66 | | -#include <linux/local_lock.h> |
---|
67 | 66 | |
---|
68 | 67 | #include <linux/uaccess.h> |
---|
69 | 68 | |
---|
.. | .. |
---|
94 | 93 | #ifdef CONFIG_CGROUP_WRITEBACK |
---|
95 | 94 | static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq); |
---|
96 | 95 | #endif |
---|
97 | | - |
---|
98 | | -struct event_lock { |
---|
99 | | - local_lock_t l; |
---|
100 | | -}; |
---|
101 | | -static DEFINE_PER_CPU(struct event_lock, event_lock) = { |
---|
102 | | - .l = INIT_LOCAL_LOCK(l), |
---|
103 | | -}; |
---|
104 | 96 | |
---|
105 | 97 | /* Whether legacy memory+swap accounting is active */ |
---|
106 | 98 | static bool do_memsw_account(void) |
---|
.. | .. |
---|
825 | 817 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); |
---|
826 | 818 | memcg = pn->memcg; |
---|
827 | 819 | |
---|
828 | | - preempt_disable_rt(); |
---|
829 | 820 | /* Update memcg */ |
---|
830 | 821 | __mod_memcg_state(memcg, idx, val); |
---|
831 | 822 | |
---|
.. | .. |
---|
845 | 836 | x = 0; |
---|
846 | 837 | } |
---|
847 | 838 | __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); |
---|
848 | | - preempt_enable_rt(); |
---|
849 | 839 | } |
---|
850 | 840 | |
---|
851 | 841 | /** |
---|
.. | .. |
---|
2243 | 2233 | EXPORT_SYMBOL(unlock_page_memcg); |
---|
2244 | 2234 | |
---|
2245 | 2235 | struct memcg_stock_pcp { |
---|
2246 | | - local_lock_t lock; |
---|
2247 | 2236 | struct mem_cgroup *cached; /* this never be root cgroup */ |
---|
2248 | 2237 | unsigned int nr_pages; |
---|
2249 | 2238 | |
---|
.. | .. |
---|
2295 | 2284 | if (nr_pages > MEMCG_CHARGE_BATCH) |
---|
2296 | 2285 | return ret; |
---|
2297 | 2286 | |
---|
2298 | | - local_lock_irqsave(&memcg_stock.lock, flags); |
---|
| 2287 | + local_irq_save(flags); |
---|
2299 | 2288 | |
---|
2300 | 2289 | stock = this_cpu_ptr(&memcg_stock); |
---|
2301 | 2290 | if (memcg == stock->cached && stock->nr_pages >= nr_pages) { |
---|
.. | .. |
---|
2303 | 2292 | ret = true; |
---|
2304 | 2293 | } |
---|
2305 | 2294 | |
---|
2306 | | - local_unlock_irqrestore(&memcg_stock.lock, flags); |
---|
| 2295 | + local_irq_restore(flags); |
---|
2307 | 2296 | |
---|
2308 | 2297 | return ret; |
---|
2309 | 2298 | } |
---|
.. | .. |
---|
2338 | 2327 | * The only protection from memory hotplug vs. drain_stock races is |
---|
2339 | 2328 | * that we always operate on local CPU stock here with IRQ disabled |
---|
2340 | 2329 | */ |
---|
2341 | | - local_lock_irqsave(&memcg_stock.lock, flags); |
---|
| 2330 | + local_irq_save(flags); |
---|
2342 | 2331 | |
---|
2343 | 2332 | stock = this_cpu_ptr(&memcg_stock); |
---|
2344 | 2333 | drain_obj_stock(stock); |
---|
2345 | 2334 | drain_stock(stock); |
---|
2346 | 2335 | clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); |
---|
2347 | 2336 | |
---|
2348 | | - local_unlock_irqrestore(&memcg_stock.lock, flags); |
---|
| 2337 | + local_irq_restore(flags); |
---|
2349 | 2338 | } |
---|
2350 | 2339 | |
---|
2351 | 2340 | /* |
---|
.. | .. |
---|
2357 | 2346 | struct memcg_stock_pcp *stock; |
---|
2358 | 2347 | unsigned long flags; |
---|
2359 | 2348 | |
---|
2360 | | - local_lock_irqsave(&memcg_stock.lock, flags); |
---|
| 2349 | + local_irq_save(flags); |
---|
2361 | 2350 | |
---|
2362 | 2351 | stock = this_cpu_ptr(&memcg_stock); |
---|
2363 | 2352 | if (stock->cached != memcg) { /* reset if necessary */ |
---|
.. | .. |
---|
2370 | 2359 | if (stock->nr_pages > MEMCG_CHARGE_BATCH) |
---|
2371 | 2360 | drain_stock(stock); |
---|
2372 | 2361 | |
---|
2373 | | - local_unlock_irqrestore(&memcg_stock.lock, flags); |
---|
| 2362 | + local_irq_restore(flags); |
---|
2374 | 2363 | } |
---|
2375 | 2364 | |
---|
2376 | 2365 | /* |
---|
.. | .. |
---|
2390 | 2379 | * as well as workers from this path always operate on the local |
---|
2391 | 2380 | * per-cpu data. CPU up doesn't touch memcg_stock at all. |
---|
2392 | 2381 | */ |
---|
2393 | | - curcpu = get_cpu_light(); |
---|
| 2382 | + curcpu = get_cpu(); |
---|
2394 | 2383 | for_each_online_cpu(cpu) { |
---|
2395 | 2384 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); |
---|
2396 | 2385 | struct mem_cgroup *memcg; |
---|
.. | .. |
---|
2413 | 2402 | schedule_work_on(cpu, &stock->work); |
---|
2414 | 2403 | } |
---|
2415 | 2404 | } |
---|
2416 | | - put_cpu_light(); |
---|
| 2405 | + put_cpu(); |
---|
2417 | 2406 | mutex_unlock(&percpu_charge_mutex); |
---|
2418 | 2407 | } |
---|
2419 | 2408 | |
---|
.. | .. |
---|
3178 | 3167 | unsigned long flags; |
---|
3179 | 3168 | bool ret = false; |
---|
3180 | 3169 | |
---|
3181 | | - local_lock_irqsave(&memcg_stock.lock, flags); |
---|
| 3170 | + local_irq_save(flags); |
---|
3182 | 3171 | |
---|
3183 | 3172 | stock = this_cpu_ptr(&memcg_stock); |
---|
3184 | 3173 | if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { |
---|
.. | .. |
---|
3186 | 3175 | ret = true; |
---|
3187 | 3176 | } |
---|
3188 | 3177 | |
---|
3189 | | - local_unlock_irqrestore(&memcg_stock.lock, flags); |
---|
| 3178 | + local_irq_restore(flags); |
---|
3190 | 3179 | |
---|
3191 | 3180 | return ret; |
---|
3192 | 3181 | } |
---|
.. | .. |
---|
3253 | 3242 | struct memcg_stock_pcp *stock; |
---|
3254 | 3243 | unsigned long flags; |
---|
3255 | 3244 | |
---|
3256 | | - local_lock_irqsave(&memcg_stock.lock, flags); |
---|
| 3245 | + local_irq_save(flags); |
---|
3257 | 3246 | |
---|
3258 | 3247 | stock = this_cpu_ptr(&memcg_stock); |
---|
3259 | 3248 | if (stock->cached_objcg != objcg) { /* reset if necessary */ |
---|
.. | .. |
---|
3267 | 3256 | if (stock->nr_bytes > PAGE_SIZE) |
---|
3268 | 3257 | drain_obj_stock(stock); |
---|
3269 | 3258 | |
---|
3270 | | - local_unlock_irqrestore(&memcg_stock.lock, flags); |
---|
| 3259 | + local_irq_restore(flags); |
---|
3271 | 3260 | } |
---|
3272 | 3261 | |
---|
3273 | 3262 | int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size) |
---|
.. | .. |
---|
5789 | 5778 | |
---|
5790 | 5779 | ret = 0; |
---|
5791 | 5780 | |
---|
5792 | | - local_lock_irq(&event_lock.l); |
---|
| 5781 | + local_irq_disable(); |
---|
5793 | 5782 | mem_cgroup_charge_statistics(to, page, nr_pages); |
---|
5794 | 5783 | memcg_check_events(to, page); |
---|
5795 | 5784 | mem_cgroup_charge_statistics(from, page, -nr_pages); |
---|
5796 | 5785 | memcg_check_events(from, page); |
---|
5797 | | - local_unlock_irq(&event_lock.l); |
---|
| 5786 | + local_irq_enable(); |
---|
5798 | 5787 | out_unlock: |
---|
5799 | 5788 | unlock_page(page); |
---|
5800 | 5789 | out: |
---|
.. | .. |
---|
6862 | 6851 | css_get(&memcg->css); |
---|
6863 | 6852 | commit_charge(page, memcg); |
---|
6864 | 6853 | |
---|
6865 | | - local_lock_irq(&event_lock.l); |
---|
| 6854 | + local_irq_disable(); |
---|
6866 | 6855 | mem_cgroup_charge_statistics(memcg, page, nr_pages); |
---|
6867 | 6856 | memcg_check_events(memcg, page); |
---|
6868 | | - local_unlock_irq(&event_lock.l); |
---|
| 6857 | + local_irq_enable(); |
---|
6869 | 6858 | |
---|
6870 | 6859 | /* |
---|
6871 | 6860 | * Cgroup1's unified memory+swap counter has been charged with the |
---|
.. | .. |
---|
6921 | 6910 | memcg_oom_recover(ug->memcg); |
---|
6922 | 6911 | } |
---|
6923 | 6912 | |
---|
6924 | | - local_lock_irqsave(&event_lock.l, flags); |
---|
| 6913 | + local_irq_save(flags); |
---|
6925 | 6914 | __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); |
---|
6926 | 6915 | __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages); |
---|
6927 | 6916 | memcg_check_events(ug->memcg, ug->dummy_page); |
---|
6928 | | - local_unlock_irqrestore(&event_lock.l, flags); |
---|
| 6917 | + local_irq_restore(flags); |
---|
6929 | 6918 | |
---|
6930 | 6919 | /* drop reference from uncharge_page */ |
---|
6931 | 6920 | css_put(&ug->memcg->css); |
---|
.. | .. |
---|
7073 | 7062 | css_get(&memcg->css); |
---|
7074 | 7063 | commit_charge(newpage, memcg); |
---|
7075 | 7064 | |
---|
7076 | | - local_lock_irqsave(&event_lock.l, flags); |
---|
| 7065 | + local_irq_save(flags); |
---|
7077 | 7066 | mem_cgroup_charge_statistics(memcg, newpage, nr_pages); |
---|
7078 | 7067 | memcg_check_events(memcg, newpage); |
---|
7079 | | - local_unlock_irqrestore(&event_lock.l, flags); |
---|
| 7068 | + local_irq_restore(flags); |
---|
7080 | 7069 | } |
---|
7081 | 7070 | |
---|
7082 | 7071 | DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); |
---|
.. | .. |
---|
7196 | 7185 | cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, |
---|
7197 | 7186 | memcg_hotplug_cpu_dead); |
---|
7198 | 7187 | |
---|
7199 | | - for_each_possible_cpu(cpu) { |
---|
7200 | | - struct memcg_stock_pcp *stock; |
---|
7201 | | - |
---|
7202 | | - stock = per_cpu_ptr(&memcg_stock, cpu); |
---|
7203 | | - INIT_WORK(&stock->work, drain_local_stock); |
---|
7204 | | - local_lock_init(&stock->lock); |
---|
7205 | | - } |
---|
| 7188 | + for_each_possible_cpu(cpu) |
---|
| 7189 | + INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, |
---|
| 7190 | + drain_local_stock); |
---|
7206 | 7191 | |
---|
7207 | 7192 | for_each_node(node) { |
---|
7208 | 7193 | struct mem_cgroup_tree_per_node *rtpn; |
---|
.. | .. |
---|
7251 | 7236 | struct mem_cgroup *memcg, *swap_memcg; |
---|
7252 | 7237 | unsigned int nr_entries; |
---|
7253 | 7238 | unsigned short oldid; |
---|
7254 | | - unsigned long flags; |
---|
7255 | 7239 | |
---|
7256 | 7240 | VM_BUG_ON_PAGE(PageLRU(page), page); |
---|
7257 | 7241 | VM_BUG_ON_PAGE(page_count(page), page); |
---|
.. | .. |
---|
7300 | 7284 | * important here to have the interrupts disabled because it is the |
---|
7301 | 7285 | * only synchronisation we have for updating the per-CPU variables. |
---|
7302 | 7286 | */ |
---|
7303 | | - local_lock_irqsave(&event_lock.l, flags); |
---|
7304 | | -#ifndef CONFIG_PREEMPT_RT |
---|
7305 | 7287 | VM_BUG_ON(!irqs_disabled()); |
---|
7306 | | -#endif |
---|
7307 | 7288 | mem_cgroup_charge_statistics(memcg, page, -nr_entries); |
---|
7308 | 7289 | memcg_check_events(memcg, page); |
---|
7309 | | - local_unlock_irqrestore(&event_lock.l, flags); |
---|
7310 | 7290 | |
---|
7311 | 7291 | css_put(&memcg->css); |
---|
7312 | 7292 | } |
---|