hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/include/linux/memcontrol.h
....@@ -1,3 +1,4 @@
1
+/* SPDX-License-Identifier: GPL-2.0-or-later */
12 /* memcontrol.h - Memory Controller
23 *
34 * Copyright IBM Corporation, 2007
....@@ -5,16 +6,6 @@
56 *
67 * Copyright 2007 OpenVZ SWsoft Inc
78 * Author: Pavel Emelianov <xemul@openvz.org>
8
- *
9
- * This program is free software; you can redistribute it and/or modify
10
- * it under the terms of the GNU General Public License as published by
11
- * the Free Software Foundation; either version 2 of the License, or
12
- * (at your option) any later version.
13
- *
14
- * This program is distributed in the hope that it will be useful,
15
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
16
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17
- * GNU General Public License for more details.
189 */
1910
2011 #ifndef _LINUX_MEMCONTROL_H
....@@ -32,19 +23,16 @@
3223 #include <linux/page-flags.h>
3324
3425 struct mem_cgroup;
26
+struct obj_cgroup;
3527 struct page;
3628 struct mm_struct;
3729 struct kmem_cache;
3830
3931 /* Cgroup-specific page state, on top of universal node page state */
4032 enum memcg_stat_item {
41
- MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
42
- MEMCG_RSS,
43
- MEMCG_RSS_HUGE,
44
- MEMCG_SWAP,
33
+ MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
4534 MEMCG_SOCK,
46
- /* XXX: why are these zone and not node counters? */
47
- MEMCG_KERNEL_STACK_KB,
35
+ MEMCG_PERCPU_B,
4836 MEMCG_NR_STAT,
4937 };
5038
....@@ -54,20 +42,14 @@
5442 MEMCG_MAX,
5543 MEMCG_OOM,
5644 MEMCG_OOM_KILL,
45
+ MEMCG_SWAP_HIGH,
5746 MEMCG_SWAP_MAX,
5847 MEMCG_SWAP_FAIL,
5948 MEMCG_NR_MEMORY_EVENTS,
6049 };
6150
62
-enum mem_cgroup_protection {
63
- MEMCG_PROT_NONE,
64
- MEMCG_PROT_LOW,
65
- MEMCG_PROT_MIN,
66
-};
67
-
6851 struct mem_cgroup_reclaim_cookie {
6952 pg_data_t *pgdat;
70
- int priority;
7153 unsigned int generation;
7254 };
7355
....@@ -78,24 +60,23 @@
7860
7961 struct mem_cgroup_id {
8062 int id;
81
- atomic_t ref;
63
+ refcount_t ref;
8264 };
8365
8466 /*
8567 * Per memcg event counter is incremented at every pagein/pageout. With THP,
86
- * it will be incremated by the number of pages. This counter is used for
87
- * for trigger some periodic events. This is straightforward and better
68
+ * it will be incremented by the number of pages. This counter is used
69
+ * to trigger some periodic events. This is straightforward and better
8870 * than using jiffies etc. to handle periodic memcg event.
8971 */
9072 enum mem_cgroup_events_target {
9173 MEM_CGROUP_TARGET_THRESH,
9274 MEM_CGROUP_TARGET_SOFTLIMIT,
93
- MEM_CGROUP_TARGET_NUMAINFO,
9475 MEM_CGROUP_NTARGETS,
9576 };
9677
97
-struct mem_cgroup_stat_cpu {
98
- long count[MEMCG_NR_STAT];
78
+struct memcg_vmstats_percpu {
79
+ long stat[MEMCG_NR_STAT];
9980 unsigned long events[NR_VM_EVENT_ITEMS];
10081 unsigned long nr_page_events;
10182 unsigned long targets[MEM_CGROUP_NTARGETS];
....@@ -117,32 +98,32 @@
11798 */
11899 struct memcg_shrinker_map {
119100 struct rcu_head rcu;
120
- unsigned long map[0];
101
+ unsigned long map[];
121102 };
122103
123104 /*
124
- * per-zone information in memory controller.
105
+ * per-node information in memory controller.
125106 */
126107 struct mem_cgroup_per_node {
127108 struct lruvec lruvec;
128109
110
+ /* Legacy local VM stats */
111
+ struct lruvec_stat __percpu *lruvec_stat_local;
112
+
113
+ /* Subtree VM stats (batched updates) */
129114 struct lruvec_stat __percpu *lruvec_stat_cpu;
130115 atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS];
131116
132117 unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
133118
134
- struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
119
+ struct mem_cgroup_reclaim_iter iter;
135120
136
-#ifdef CONFIG_MEMCG_KMEM
137121 struct memcg_shrinker_map __rcu *shrinker_map;
138
-#endif
122
+
139123 struct rb_node tree_node; /* RB tree node */
140124 unsigned long usage_in_excess;/* Set to the value by which */
141125 /* the soft limit is exceeded*/
142126 bool on_tree;
143
- bool congested; /* memcg has many dirty pages */
144
- /* backed by a congested BDI */
145
-
146127 struct mem_cgroup *memcg; /* Back pointer, we cannot */
147128 /* use container_of */
148129 };
....@@ -159,7 +140,7 @@
159140 /* Size of entries[] */
160141 unsigned int size;
161142 /* Array of thresholds */
162
- struct mem_cgroup_threshold entries[0];
143
+ struct mem_cgroup_threshold entries[];
163144 };
164145
165146 struct mem_cgroup_thresholds {
....@@ -189,6 +170,39 @@
189170 #endif
190171
191172 /*
173
+ * Remember four most recent foreign writebacks with dirty pages in this
174
+ * cgroup. Inode sharing is expected to be uncommon and, even if we miss
175
+ * one in a given round, we're likely to catch it later if it keeps
176
+ * foreign-dirtying, so a fairly low count should be enough.
177
+ *
178
+ * See mem_cgroup_track_foreign_dirty_slowpath() for details.
179
+ */
180
+#define MEMCG_CGWB_FRN_CNT 4
181
+
182
+struct memcg_cgwb_frn {
183
+ u64 bdi_id; /* bdi->id of the foreign inode */
184
+ int memcg_id; /* memcg->css.id of foreign inode */
185
+ u64 at; /* jiffies_64 at the time of dirtying */
186
+ struct wb_completion done; /* tracks in-flight foreign writebacks */
187
+};
188
+
189
+/*
190
+ * Bucket for arbitrarily byte-sized objects charged to a memory
191
+ * cgroup. The bucket can be reparented in one piece when the cgroup
192
+ * is destroyed, without having to round up the individual references
193
+ * of all live memory objects in the wild.
194
+ */
195
+struct obj_cgroup {
196
+ struct percpu_ref refcnt;
197
+ struct mem_cgroup *memcg;
198
+ atomic_t nr_charged_bytes;
199
+ union {
200
+ struct list_head list; /* protected by objcg_lock */
201
+ struct rcu_head rcu;
202
+ };
203
+};
204
+
205
+/*
192206 * The memory controller data structure. The memory controller controls both
193207 * page cache and RSS per cgroup. We would eventually like to provide
194208 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
....@@ -201,16 +215,16 @@
201215 struct mem_cgroup_id id;
202216
203217 /* Accounted resources */
204
- struct page_counter memory;
205
- struct page_counter swap;
218
+ struct page_counter memory; /* Both v1 & v2 */
219
+
220
+ union {
221
+ struct page_counter swap; /* v2 only */
222
+ struct page_counter memsw; /* v1 only */
223
+ };
206224
207225 /* Legacy consumer-oriented counters */
208
- struct page_counter memsw;
209
- struct page_counter kmem;
210
- struct page_counter tcpmem;
211
-
212
- /* Upper bound of normal memory consumption range */
213
- unsigned long high;
226
+ struct page_counter kmem; /* v1 only */
227
+ struct page_counter tcpmem; /* v1 only */
214228
215229 /* Range enforcement for interrupt charges */
216230 struct work_struct high_work;
....@@ -238,8 +252,9 @@
238252 /* OOM-Killer disable */
239253 int oom_kill_disable;
240254
241
- /* memory.events */
255
+ /* memory.events and memory.events.local */
242256 struct cgroup_file events_file;
257
+ struct cgroup_file events_local_file;
243258
244259 /* handle for "memory.swap.events" */
245260 struct cgroup_file swap_events_file;
....@@ -267,20 +282,12 @@
267282
268283 MEMCG_PADDING(_pad1_);
269284
270
- /*
271
- * set > 0 if pages under this cgroup are moving to other cgroup.
272
- */
273
- atomic_t moving_account;
274
- struct task_struct *move_lock_task;
285
+ atomic_long_t vmstats[MEMCG_NR_STAT];
286
+ atomic_long_t vmevents[NR_VM_EVENT_ITEMS];
275287
276
- /* memory.stat */
277
- struct mem_cgroup_stat_cpu __percpu *stat_cpu;
278
-
279
- MEMCG_PADDING(_pad2_);
280
-
281
- atomic_long_t stat[MEMCG_NR_STAT];
282
- atomic_long_t events[NR_VM_EVENT_ITEMS];
283
- atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
288
+ /* memory.events */
289
+ atomic_long_t memory_events[MEMCG_NR_MEMORY_EVENTS];
290
+ atomic_long_t memory_events_local[MEMCG_NR_MEMORY_EVENTS];
284291
285292 unsigned long socket_pressure;
286293
....@@ -292,25 +299,40 @@
292299 /* Index in the kmem_cache->memcg_params.memcg_caches array */
293300 int kmemcg_id;
294301 enum memcg_kmem_state kmem_state;
295
- struct list_head kmem_caches;
302
+ struct obj_cgroup __rcu *objcg;
303
+ /* list of inherited objcgs, protected by objcg_lock */
304
+ struct list_head objcg_list;
296305 #endif
297306
298
- int last_scanned_node;
299
-#if MAX_NUMNODES > 1
300
- nodemask_t scan_nodes;
301
- atomic_t numainfo_events;
302
- atomic_t numainfo_updating;
303
-#endif
307
+ MEMCG_PADDING(_pad2_);
308
+
309
+ /*
310
+ * set > 0 if pages under this cgroup are moving to other cgroup.
311
+ */
312
+ atomic_t moving_account;
313
+ struct task_struct *move_lock_task;
314
+
315
+ /* Legacy local VM stats and events */
316
+ struct memcg_vmstats_percpu __percpu *vmstats_local;
317
+
318
+ /* Subtree VM stats and events (batched updates) */
319
+ struct memcg_vmstats_percpu __percpu *vmstats_percpu;
304320
305321 #ifdef CONFIG_CGROUP_WRITEBACK
306322 struct list_head cgwb_list;
307323 struct wb_domain cgwb_domain;
324
+ struct memcg_cgwb_frn cgwb_frn[MEMCG_CGWB_FRN_CNT];
308325 #endif
309326
310327 /* List of events which userspace want to receive */
311328 struct list_head event_list;
312329 spinlock_t event_list_lock;
313330
331
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
332
+ struct deferred_split deferred_split_queue;
333
+#endif
334
+
335
+ ANDROID_OEM_DATA(1);
314336 struct mem_cgroup_per_node *nodeinfo[0];
315337 /* WARNING: nodeinfo must be the last member here */
316338 };
....@@ -323,6 +345,16 @@
323345
324346 extern struct mem_cgroup *root_mem_cgroup;
325347
348
+struct lruvec *page_to_lruvec(struct page *page, pg_data_t *pgdat);
349
+void do_traversal_all_lruvec(void);
350
+
351
+static __always_inline bool memcg_stat_item_in_bytes(int idx)
352
+{
353
+ if (idx == MEMCG_PERCPU_B)
354
+ return true;
355
+ return vmstat_item_in_bytes(idx);
356
+}
357
+
326358 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
327359 {
328360 return (memcg == root_mem_cgroup);
....@@ -333,21 +365,112 @@
333365 return !cgroup_subsys_enabled(memory_cgrp_subsys);
334366 }
335367
336
-enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
337
- struct mem_cgroup *memcg);
368
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
369
+ struct mem_cgroup *memcg,
370
+ unsigned long *min,
371
+ unsigned long *low)
372
+{
373
+ *min = *low = 0;
338374
339
-int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
340
- gfp_t gfp_mask, struct mem_cgroup **memcgp,
341
- bool compound);
342
-int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
343
- gfp_t gfp_mask, struct mem_cgroup **memcgp,
344
- bool compound);
345
-void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
346
- bool lrucare, bool compound);
347
-void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
348
- bool compound);
349
-void mem_cgroup_uncharge(struct page *page);
350
-void mem_cgroup_uncharge_list(struct list_head *page_list);
375
+ if (mem_cgroup_disabled())
376
+ return;
377
+
378
+ /*
379
+ * There is no reclaim protection applied to a targeted reclaim.
380
+ * We are special casing this specific case here because
381
+ * mem_cgroup_protected calculation is not robust enough to keep
382
+ * the protection invariant for calculated effective values for
383
+ * parallel reclaimers with different reclaim target. This is
384
+ * especially a problem for tail memcgs (as they have pages on LRU)
385
+ * which would want to have effective values 0 for targeted reclaim
386
+ * but a different value for external reclaim.
387
+ *
388
+ * Example
389
+ * Let's have global and A's reclaim in parallel:
390
+ * |
391
+ * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
392
+ * |\
393
+ * | C (low = 1G, usage = 2.5G)
394
+ * B (low = 1G, usage = 0.5G)
395
+ *
396
+ * For the global reclaim
397
+ * A.elow = A.low
398
+ * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
399
+ * C.elow = min(C.usage, C.low)
400
+ *
401
+ * With the effective values resetting we have A reclaim
402
+ * A.elow = 0
403
+ * B.elow = B.low
404
+ * C.elow = C.low
405
+ *
406
+ * If the global reclaim races with A's reclaim then
407
+ * B.elow = C.elow = 0 because children_low_usage > A.elow)
408
+ * is possible and reclaiming B would be violating the protection.
409
+ *
410
+ */
411
+ if (root == memcg)
412
+ return;
413
+
414
+ *min = READ_ONCE(memcg->memory.emin);
415
+ *low = READ_ONCE(memcg->memory.elow);
416
+}
417
+
418
+void mem_cgroup_calculate_protection(struct mem_cgroup *root,
419
+ struct mem_cgroup *memcg);
420
+
421
+static inline bool mem_cgroup_supports_protection(struct mem_cgroup *memcg)
422
+{
423
+ /*
424
+ * The root memcg doesn't account charges, and doesn't support
425
+ * protection.
426
+ */
427
+ return !mem_cgroup_disabled() && !mem_cgroup_is_root(memcg);
428
+
429
+}
430
+
431
+static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
432
+{
433
+ if (!mem_cgroup_supports_protection(memcg))
434
+ return false;
435
+
436
+ return READ_ONCE(memcg->memory.elow) >=
437
+ page_counter_read(&memcg->memory);
438
+}
439
+
440
+static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
441
+{
442
+ if (!mem_cgroup_supports_protection(memcg))
443
+ return false;
444
+
445
+ return READ_ONCE(memcg->memory.emin) >=
446
+ page_counter_read(&memcg->memory);
447
+}
448
+
449
+int __mem_cgroup_charge(struct page *page, struct mm_struct *mm,
450
+ gfp_t gfp_mask);
451
+static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
452
+ gfp_t gfp_mask)
453
+{
454
+ if (mem_cgroup_disabled())
455
+ return 0;
456
+ return __mem_cgroup_charge(page, mm, gfp_mask);
457
+}
458
+
459
+void __mem_cgroup_uncharge(struct page *page);
460
+static inline void mem_cgroup_uncharge(struct page *page)
461
+{
462
+ if (mem_cgroup_disabled())
463
+ return;
464
+ __mem_cgroup_uncharge(page);
465
+}
466
+
467
+void __mem_cgroup_uncharge_list(struct list_head *page_list);
468
+static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
469
+{
470
+ if (mem_cgroup_disabled())
471
+ return;
472
+ __mem_cgroup_uncharge_list(page_list);
473
+}
351474
352475 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage);
353476
....@@ -358,24 +481,26 @@
358481 }
359482
360483 /**
361
- * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
362
- * @node: node of the wanted lruvec
484
+ * mem_cgroup_lruvec - get the lru list vector for a memcg & node
363485 * @memcg: memcg of the wanted lruvec
364486 *
365
- * Returns the lru list vector holding pages for a given @node or a given
366
- * @memcg and @zone. This can be the node lruvec, if the memory controller
367
- * is disabled.
487
+ * Returns the lru list vector holding pages for a given @memcg &
488
+ * @node combination. This can be the node lruvec, if the memory
489
+ * controller is disabled.
368490 */
369
-static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
370
- struct mem_cgroup *memcg)
491
+static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
492
+ struct pglist_data *pgdat)
371493 {
372494 struct mem_cgroup_per_node *mz;
373495 struct lruvec *lruvec;
374496
375497 if (mem_cgroup_disabled()) {
376
- lruvec = node_lruvec(pgdat);
498
+ lruvec = &pgdat->__lruvec;
377499 goto out;
378500 }
501
+
502
+ if (!memcg)
503
+ memcg = root_mem_cgroup;
379504
380505 mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
381506 lruvec = &mz->lruvec;
....@@ -392,7 +517,6 @@
392517
393518 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
394519
395
-bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
396520 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
397521
398522 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
....@@ -402,6 +526,33 @@
402526 static inline
403527 struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
404528 return css ? container_of(css, struct mem_cgroup, css) : NULL;
529
+}
530
+
531
+static inline bool obj_cgroup_tryget(struct obj_cgroup *objcg)
532
+{
533
+ return percpu_ref_tryget(&objcg->refcnt);
534
+}
535
+
536
+static inline void obj_cgroup_get(struct obj_cgroup *objcg)
537
+{
538
+ percpu_ref_get(&objcg->refcnt);
539
+}
540
+
541
+static inline void obj_cgroup_put(struct obj_cgroup *objcg)
542
+{
543
+ percpu_ref_put(&objcg->refcnt);
544
+}
545
+
546
+/*
547
+ * After the initialization objcg->memcg is always pointing at
548
+ * a valid memcg, but can be atomically swapped to the parent memcg.
549
+ *
550
+ * The caller must ensure that the returned memcg won't be released:
551
+ * e.g. acquire the rcu_read_lock or css_set_lock.
552
+ */
553
+static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg)
554
+{
555
+ return READ_ONCE(objcg->memcg);
405556 }
406557
407558 static inline void mem_cgroup_put(struct mem_cgroup *memcg)
....@@ -428,6 +579,11 @@
428579 return memcg->id.id;
429580 }
430581 struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
582
+
583
+static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
584
+{
585
+ return mem_cgroup_from_css(seq_css(m));
586
+}
431587
432588 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
433589 {
....@@ -496,22 +652,6 @@
496652 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
497653 int zid, int nr_pages);
498654
499
-unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
500
- int nid, unsigned int lru_mask);
501
-
502
-static inline
503
-unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
504
-{
505
- struct mem_cgroup_per_node *mz;
506
- unsigned long nr_pages = 0;
507
- int zid;
508
-
509
- mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
510
- for (zid = 0; zid < MAX_NR_ZONES; zid++)
511
- nr_pages += mz->lru_zone_size[zid][lru];
512
- return nr_pages;
513
-}
514
-
515655 static inline
516656 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
517657 enum lru_list lru, int zone_idx)
....@@ -519,15 +659,19 @@
519659 struct mem_cgroup_per_node *mz;
520660
521661 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
522
- return mz->lru_zone_size[zone_idx][lru];
662
+ return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
523663 }
524664
525665 void mem_cgroup_handle_over_high(void);
526666
527667 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
528668
529
-void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
669
+unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
670
+
671
+void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
530672 struct task_struct *p);
673
+
674
+void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg);
531675
532676 static inline void mem_cgroup_enter_user_fault(void)
533677 {
....@@ -552,7 +696,7 @@
552696 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
553697
554698 #ifdef CONFIG_MEMCG_SWAP
555
-extern int do_swap_account;
699
+extern bool cgroup_memory_noswap;
556700 #endif
557701
558702 struct mem_cgroup *lock_page_memcg(struct page *page);
....@@ -563,10 +707,9 @@
563707 * idx can be of type enum memcg_stat_item or node_stat_item.
564708 * Keep in sync with memcg_exact_page_state().
565709 */
566
-static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
567
- int idx)
710
+static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
568711 {
569
- long x = atomic_long_read(&memcg->stat[idx]);
712
+ long x = atomic_long_read(&memcg->vmstats[idx]);
570713 #ifdef CONFIG_SMP
571714 if (x < 0)
572715 x = 0;
....@@ -574,22 +717,26 @@
574717 return x;
575718 }
576719
577
-/* idx can be of type enum memcg_stat_item or node_stat_item */
578
-static inline void __mod_memcg_state(struct mem_cgroup *memcg,
579
- int idx, int val)
720
+/*
721
+ * idx can be of type enum memcg_stat_item or node_stat_item.
722
+ * Keep in sync with memcg_exact_page_state().
723
+ */
724
+static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
725
+ int idx)
580726 {
581
- long x;
727
+ long x = 0;
728
+ int cpu;
582729
583
- if (mem_cgroup_disabled())
584
- return;
585
-
586
- x = val + __this_cpu_read(memcg->stat_cpu->count[idx]);
587
- if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
588
- atomic_long_add(x, &memcg->stat[idx]);
730
+ for_each_possible_cpu(cpu)
731
+ x += per_cpu(memcg->vmstats_local->stat[idx], cpu);
732
+#ifdef CONFIG_SMP
733
+ if (x < 0)
589734 x = 0;
590
- }
591
- __this_cpu_write(memcg->stat_cpu->count[idx], x);
735
+#endif
736
+ return x;
592737 }
738
+
739
+void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
593740
594741 /* idx can be of type enum memcg_stat_item or node_stat_item */
595742 static inline void mod_memcg_state(struct mem_cgroup *memcg,
....@@ -651,32 +798,52 @@
651798 return x;
652799 }
653800
654
-static inline void __mod_lruvec_state(struct lruvec *lruvec,
655
- enum node_stat_item idx, int val)
801
+static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
802
+ enum node_stat_item idx)
656803 {
657804 struct mem_cgroup_per_node *pn;
658
- long x;
659
-
660
- /* Update node */
661
- __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
805
+ long x = 0;
806
+ int cpu;
662807
663808 if (mem_cgroup_disabled())
664
- return;
809
+ return node_page_state(lruvec_pgdat(lruvec), idx);
665810
666811 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
667
-
668
- preempt_disable_rt();
669
- /* Update memcg */
670
- __mod_memcg_state(pn->memcg, idx, val);
671
-
672
- /* Update lruvec */
673
- x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
674
- if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) {
675
- atomic_long_add(x, &pn->lruvec_stat[idx]);
812
+ for_each_possible_cpu(cpu)
813
+ x += per_cpu(pn->lruvec_stat_local->count[idx], cpu);
814
+#ifdef CONFIG_SMP
815
+ if (x < 0)
676816 x = 0;
677
- }
678
- __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
679
- preempt_enable_rt();
817
+#endif
818
+ return x;
819
+}
820
+
821
+void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
822
+ int val);
823
+void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
824
+ int val);
825
+void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
826
+
827
+void mod_memcg_obj_state(void *p, int idx, int val);
828
+
829
+static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
830
+ int val)
831
+{
832
+ unsigned long flags;
833
+
834
+ local_irq_save(flags);
835
+ __mod_lruvec_slab_state(p, idx, val);
836
+ local_irq_restore(flags);
837
+}
838
+
839
+static inline void mod_memcg_lruvec_state(struct lruvec *lruvec,
840
+ enum node_stat_item idx, int val)
841
+{
842
+ unsigned long flags;
843
+
844
+ local_irq_save(flags);
845
+ __mod_memcg_lruvec_state(lruvec, idx, val);
846
+ local_irq_restore(flags);
680847 }
681848
682849 static inline void mod_lruvec_state(struct lruvec *lruvec,
....@@ -692,16 +859,17 @@
692859 static inline void __mod_lruvec_page_state(struct page *page,
693860 enum node_stat_item idx, int val)
694861 {
862
+ struct page *head = compound_head(page); /* rmap on tail pages */
695863 pg_data_t *pgdat = page_pgdat(page);
696864 struct lruvec *lruvec;
697865
698866 /* Untracked pages have no memcg, no lruvec. Update only the node */
699
- if (!page->mem_cgroup) {
867
+ if (!head->mem_cgroup) {
700868 __mod_node_page_state(pgdat, idx, val);
701869 return;
702870 }
703871
704
- lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
872
+ lruvec = mem_cgroup_lruvec(head->mem_cgroup, pgdat);
705873 __mod_lruvec_state(lruvec, idx, val);
706874 }
707875
....@@ -719,22 +887,8 @@
719887 gfp_t gfp_mask,
720888 unsigned long *total_scanned);
721889
722
-static inline void __count_memcg_events(struct mem_cgroup *memcg,
723
- enum vm_event_item idx,
724
- unsigned long count)
725
-{
726
- unsigned long x;
727
-
728
- if (mem_cgroup_disabled())
729
- return;
730
-
731
- x = count + __this_cpu_read(memcg->stat_cpu->events[idx]);
732
- if (unlikely(x > MEMCG_CHARGE_BATCH)) {
733
- atomic_long_add(x, &memcg->events[idx]);
734
- x = 0;
735
- }
736
- __this_cpu_write(memcg->stat_cpu->events[idx], x);
737
-}
890
+void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
891
+ unsigned long count);
738892
739893 static inline void count_memcg_events(struct mem_cgroup *memcg,
740894 enum vm_event_item idx,
....@@ -772,8 +926,26 @@
772926 static inline void memcg_memory_event(struct mem_cgroup *memcg,
773927 enum memcg_memory_event event)
774928 {
775
- atomic_long_inc(&memcg->memory_events[event]);
776
- cgroup_file_notify(&memcg->events_file);
929
+ bool swap_event = event == MEMCG_SWAP_HIGH || event == MEMCG_SWAP_MAX ||
930
+ event == MEMCG_SWAP_FAIL;
931
+
932
+ atomic_long_inc(&memcg->memory_events_local[event]);
933
+ if (!swap_event)
934
+ cgroup_file_notify(&memcg->events_local_file);
935
+
936
+ do {
937
+ atomic_long_inc(&memcg->memory_events[event]);
938
+ if (swap_event)
939
+ cgroup_file_notify(&memcg->swap_events_file);
940
+ else
941
+ cgroup_file_notify(&memcg->events_file);
942
+
943
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
944
+ break;
945
+ if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
946
+ break;
947
+ } while ((memcg = parent_mem_cgroup(memcg)) &&
948
+ !mem_cgroup_is_root(memcg));
777949 }
778950
779951 static inline void memcg_memory_event_mm(struct mm_struct *mm,
....@@ -791,9 +963,7 @@
791963 rcu_read_unlock();
792964 }
793965
794
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
795
-void mem_cgroup_split_huge_fixup(struct page *head);
796
-#endif
966
+void split_page_memcg(struct page *head, unsigned int nr);
797967
798968 #else /* CONFIG_MEMCG */
799969
....@@ -801,6 +971,15 @@
801971 #define MEM_CGROUP_ID_MAX 0
802972
803973 struct mem_cgroup;
974
+
975
+static inline struct lruvec *page_to_lruvec(struct page *page, pg_data_t *pgdat)
976
+{
977
+ return NULL;
978
+}
979
+
980
+static inline void do_traversal_all_lruvec(void)
981
+{
982
+}
804983
805984 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
806985 {
....@@ -822,41 +1001,33 @@
8221001 {
8231002 }
8241003
825
-static inline enum mem_cgroup_protection mem_cgroup_protected(
826
- struct mem_cgroup *root, struct mem_cgroup *memcg)
1004
+static inline void mem_cgroup_protection(struct mem_cgroup *root,
1005
+ struct mem_cgroup *memcg,
1006
+ unsigned long *min,
1007
+ unsigned long *low)
8271008 {
828
- return MEMCG_PROT_NONE;
1009
+ *min = *low = 0;
8291010 }
8301011
831
-static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
832
- gfp_t gfp_mask,
833
- struct mem_cgroup **memcgp,
834
- bool compound)
1012
+static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root,
1013
+ struct mem_cgroup *memcg)
8351014 {
836
- *memcgp = NULL;
1015
+}
1016
+
1017
+static inline bool mem_cgroup_below_low(struct mem_cgroup *memcg)
1018
+{
1019
+ return false;
1020
+}
1021
+
1022
+static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg)
1023
+{
1024
+ return false;
1025
+}
1026
+
1027
+static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
1028
+ gfp_t gfp_mask)
1029
+{
8371030 return 0;
838
-}
839
-
840
-static inline int mem_cgroup_try_charge_delay(struct page *page,
841
- struct mm_struct *mm,
842
- gfp_t gfp_mask,
843
- struct mem_cgroup **memcgp,
844
- bool compound)
845
-{
846
- *memcgp = NULL;
847
- return 0;
848
-}
849
-
850
-static inline void mem_cgroup_commit_charge(struct page *page,
851
- struct mem_cgroup *memcg,
852
- bool lrucare, bool compound)
853
-{
854
-}
855
-
856
-static inline void mem_cgroup_cancel_charge(struct page *page,
857
- struct mem_cgroup *memcg,
858
- bool compound)
859
-{
8601031 }
8611032
8621033 static inline void mem_cgroup_uncharge(struct page *page)
....@@ -871,26 +1042,25 @@
8711042 {
8721043 }
8731044
874
-static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
875
- struct mem_cgroup *memcg)
1045
+static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
1046
+ struct pglist_data *pgdat)
8761047 {
877
- return node_lruvec(pgdat);
1048
+ return &pgdat->__lruvec;
8781049 }
8791050
8801051 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
8811052 struct pglist_data *pgdat)
8821053 {
883
- return &pgdat->lruvec;
1054
+ return &pgdat->__lruvec;
1055
+}
1056
+
1057
+static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
1058
+{
1059
+ return NULL;
8841060 }
8851061
8861062 static inline bool mm_match_cgroup(struct mm_struct *mm,
8871063 struct mem_cgroup *memcg)
888
-{
889
- return true;
890
-}
891
-
892
-static inline bool task_in_mem_cgroup(struct task_struct *task,
893
- const struct mem_cgroup *memcg)
8941064 {
8951065 return true;
8961066 }
....@@ -940,6 +1110,11 @@
9401110 return NULL;
9411111 }
9421112
1113
+static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m)
1114
+{
1115
+ return NULL;
1116
+}
1117
+
9431118 static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
9441119 {
9451120 return NULL;
....@@ -950,21 +1125,9 @@
9501125 return true;
9511126 }
9521127
953
-static inline unsigned long
954
-mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
955
-{
956
- return 0;
957
-}
9581128 static inline
9591129 unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
9601130 enum lru_list lru, int zone_idx)
961
-{
962
- return 0;
963
-}
964
-
965
-static inline unsigned long
966
-mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
967
- int nid, unsigned int lru_mask)
9681131 {
9691132 return 0;
9701133 }
....@@ -974,8 +1137,18 @@
9741137 return 0;
9751138 }
9761139
1140
+static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1141
+{
1142
+ return 0;
1143
+}
1144
+
9771145 static inline void
978
-mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1146
+mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1147
+{
1148
+}
1149
+
1150
+static inline void
1151
+mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
9791152 {
9801153 }
9811154
....@@ -1024,8 +1197,13 @@
10241197 {
10251198 }
10261199
1027
-static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
1028
- int idx)
1200
+static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
1201
+{
1202
+ return 0;
1203
+}
1204
+
1205
+static inline unsigned long memcg_page_state_local(struct mem_cgroup *memcg,
1206
+ int idx)
10291207 {
10301208 return 0;
10311209 }
....@@ -1060,6 +1238,17 @@
10601238 return node_page_state(lruvec_pgdat(lruvec), idx);
10611239 }
10621240
1241
+static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
1242
+ enum node_stat_item idx)
1243
+{
1244
+ return node_page_state(lruvec_pgdat(lruvec), idx);
1245
+}
1246
+
1247
+static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
1248
+ enum node_stat_item idx, int val)
1249
+{
1250
+}
1251
+
10631252 static inline void __mod_lruvec_state(struct lruvec *lruvec,
10641253 enum node_stat_item idx, int val)
10651254 {
....@@ -1084,6 +1273,26 @@
10841273 mod_node_page_state(page_pgdat(page), idx, val);
10851274 }
10861275
1276
+static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
1277
+ int val)
1278
+{
1279
+ struct page *page = virt_to_head_page(p);
1280
+
1281
+ __mod_node_page_state(page_pgdat(page), idx, val);
1282
+}
1283
+
1284
+static inline void mod_lruvec_slab_state(void *p, enum node_stat_item idx,
1285
+ int val)
1286
+{
1287
+ struct page *page = virt_to_head_page(p);
1288
+
1289
+ mod_node_page_state(page_pgdat(page), idx, val);
1290
+}
1291
+
1292
+static inline void mod_memcg_obj_state(void *p, int idx, int val)
1293
+{
1294
+}
1295
+
10871296 static inline
10881297 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
10891298 gfp_t gfp_mask,
....@@ -1092,13 +1301,19 @@
10921301 return 0;
10931302 }
10941303
1095
-static inline void mem_cgroup_split_huge_fixup(struct page *head)
1304
+static inline void split_page_memcg(struct page *head, unsigned int nr)
10961305 {
10971306 }
10981307
10991308 static inline void count_memcg_events(struct mem_cgroup *memcg,
11001309 enum vm_event_item idx,
11011310 unsigned long count)
1311
+{
1312
+}
1313
+
1314
+static inline void __count_memcg_events(struct mem_cgroup *memcg,
1315
+ enum vm_event_item idx,
1316
+ unsigned long count)
11021317 {
11031318 }
11041319
....@@ -1165,6 +1380,16 @@
11651380 __mod_lruvec_page_state(page, idx, -1);
11661381 }
11671382
1383
+static inline void __inc_lruvec_slab_state(void *p, enum node_stat_item idx)
1384
+{
1385
+ __mod_lruvec_slab_state(p, idx, 1);
1386
+}
1387
+
1388
+static inline void __dec_lruvec_slab_state(void *p, enum node_stat_item idx)
1389
+{
1390
+ __mod_lruvec_slab_state(p, idx, -1);
1391
+}
1392
+
11681393 /* idx can be of type enum memcg_stat_item or node_stat_item */
11691394 static inline void inc_memcg_state(struct mem_cgroup *memcg,
11701395 int idx)
....@@ -1217,12 +1442,40 @@
12171442 mod_lruvec_page_state(page, idx, -1);
12181443 }
12191444
1445
+static inline struct lruvec *parent_lruvec(struct lruvec *lruvec)
1446
+{
1447
+ struct mem_cgroup *memcg;
1448
+
1449
+ memcg = lruvec_memcg(lruvec);
1450
+ if (!memcg)
1451
+ return NULL;
1452
+ memcg = parent_mem_cgroup(memcg);
1453
+ if (!memcg)
1454
+ return NULL;
1455
+ return mem_cgroup_lruvec(memcg, lruvec_pgdat(lruvec));
1456
+}
1457
+
12201458 #ifdef CONFIG_CGROUP_WRITEBACK
12211459
12221460 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb);
12231461 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
12241462 unsigned long *pheadroom, unsigned long *pdirty,
12251463 unsigned long *pwriteback);
1464
+
1465
+void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
1466
+ struct bdi_writeback *wb);
1467
+
1468
+static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1469
+ struct bdi_writeback *wb)
1470
+{
1471
+ if (mem_cgroup_disabled())
1472
+ return;
1473
+
1474
+ if (unlikely(&page->mem_cgroup->css != wb->memcg_css))
1475
+ mem_cgroup_track_foreign_dirty_slowpath(page, wb);
1476
+}
1477
+
1478
+void mem_cgroup_flush_foreign(struct bdi_writeback *wb);
12261479
12271480 #else /* CONFIG_CGROUP_WRITEBACK */
12281481
....@@ -1236,6 +1489,15 @@
12361489 unsigned long *pheadroom,
12371490 unsigned long *pdirty,
12381491 unsigned long *pwriteback)
1492
+{
1493
+}
1494
+
1495
+static inline void mem_cgroup_track_foreign_dirty(struct page *page,
1496
+ struct bdi_writeback *wb)
1497
+{
1498
+}
1499
+
1500
+static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
12391501 {
12401502 }
12411503
....@@ -1259,6 +1521,11 @@
12591521 } while ((memcg = parent_mem_cgroup(memcg)));
12601522 return false;
12611523 }
1524
+
1525
+extern int memcg_expand_shrinker_maps(int new_id);
1526
+
1527
+extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1528
+ int nid, int shrinker_id);
12621529 #else
12631530 #define mem_cgroup_sockets_enabled 0
12641531 static inline void mem_cgroup_sk_alloc(struct sock *sk) { };
....@@ -1267,18 +1534,26 @@
12671534 {
12681535 return false;
12691536 }
1537
+
1538
+static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1539
+ int nid, int shrinker_id)
1540
+{
1541
+}
12701542 #endif
12711543
1272
-struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
1273
-void memcg_kmem_put_cache(struct kmem_cache *cachep);
1274
-int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
1275
- struct mem_cgroup *memcg);
1276
-int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
1277
-void memcg_kmem_uncharge(struct page *page, int order);
1278
-
12791544 #ifdef CONFIG_MEMCG_KMEM
1545
+int __memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
1546
+ unsigned int nr_pages);
1547
+void __memcg_kmem_uncharge(struct mem_cgroup *memcg, unsigned int nr_pages);
1548
+int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
1549
+void __memcg_kmem_uncharge_page(struct page *page, int order);
1550
+
1551
+struct obj_cgroup *get_obj_cgroup_from_current(void);
1552
+
1553
+int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
1554
+void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
1555
+
12801556 extern struct static_key_false memcg_kmem_enabled_key;
1281
-extern struct workqueue_struct *memcg_kmem_cache_wq;
12821557
12831558 extern int memcg_nr_cache_ids;
12841559 void memcg_get_cache_ids(void);
....@@ -1294,7 +1569,36 @@
12941569
12951570 static inline bool memcg_kmem_enabled(void)
12961571 {
1297
- return static_branch_unlikely(&memcg_kmem_enabled_key);
1572
+ return static_branch_likely(&memcg_kmem_enabled_key);
1573
+}
1574
+
1575
+static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1576
+ int order)
1577
+{
1578
+ if (memcg_kmem_enabled())
1579
+ return __memcg_kmem_charge_page(page, gfp, order);
1580
+ return 0;
1581
+}
1582
+
1583
+static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1584
+{
1585
+ if (memcg_kmem_enabled())
1586
+ __memcg_kmem_uncharge_page(page, order);
1587
+}
1588
+
1589
+static inline int memcg_kmem_charge(struct mem_cgroup *memcg, gfp_t gfp,
1590
+ unsigned int nr_pages)
1591
+{
1592
+ if (memcg_kmem_enabled())
1593
+ return __memcg_kmem_charge(memcg, gfp, nr_pages);
1594
+ return 0;
1595
+}
1596
+
1597
+static inline void memcg_kmem_uncharge(struct mem_cgroup *memcg,
1598
+ unsigned int nr_pages)
1599
+{
1600
+ if (memcg_kmem_enabled())
1601
+ __memcg_kmem_uncharge(memcg, nr_pages);
12981602 }
12991603
13001604 /*
....@@ -1307,11 +1611,30 @@
13071611 return memcg ? memcg->kmemcg_id : -1;
13081612 }
13091613
1310
-extern int memcg_expand_shrinker_maps(int new_id);
1614
+struct mem_cgroup *mem_cgroup_from_obj(void *p);
13111615
1312
-extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1313
- int nid, int shrinker_id);
13141616 #else
1617
+
1618
+static inline int memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1619
+ int order)
1620
+{
1621
+ return 0;
1622
+}
1623
+
1624
+static inline void memcg_kmem_uncharge_page(struct page *page, int order)
1625
+{
1626
+}
1627
+
1628
+static inline int __memcg_kmem_charge_page(struct page *page, gfp_t gfp,
1629
+ int order)
1630
+{
1631
+ return 0;
1632
+}
1633
+
1634
+static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
1635
+{
1636
+}
1637
+
13151638 #define for_each_memcg_cache_index(_idx) \
13161639 for (; NULL; )
13171640
....@@ -1333,8 +1656,11 @@
13331656 {
13341657 }
13351658
1336
-static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1337
- int nid, int shrinker_id) { }
1659
+static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
1660
+{
1661
+ return NULL;
1662
+}
1663
+
13381664 #endif /* CONFIG_MEMCG_KMEM */
13391665
13401666 #endif /* _LINUX_MEMCONTROL_H */