hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/mm/page_counter.c
....@@ -17,29 +17,24 @@
1717 unsigned long usage)
1818 {
1919 unsigned long protected, old_protected;
20
+ unsigned long low, min;
2021 long delta;
2122
2223 if (!c->parent)
2324 return;
2425
25
- if (c->min || atomic_long_read(&c->min_usage)) {
26
- if (usage <= c->min)
27
- protected = usage;
28
- else
29
- protected = 0;
30
-
26
+ min = READ_ONCE(c->min);
27
+ if (min || atomic_long_read(&c->min_usage)) {
28
+ protected = min(usage, min);
3129 old_protected = atomic_long_xchg(&c->min_usage, protected);
3230 delta = protected - old_protected;
3331 if (delta)
3432 atomic_long_add(delta, &c->parent->children_min_usage);
3533 }
3634
37
- if (c->low || atomic_long_read(&c->low_usage)) {
38
- if (usage <= c->low)
39
- protected = usage;
40
- else
41
- protected = 0;
42
-
35
+ low = READ_ONCE(c->low);
36
+ if (low || atomic_long_read(&c->low_usage)) {
37
+ protected = min(usage, low);
4338 old_protected = atomic_long_xchg(&c->low_usage, protected);
4439 delta = protected - old_protected;
4540 if (delta)
....@@ -82,8 +77,8 @@
8277 * This is indeed racy, but we can live with some
8378 * inaccuracy in the watermark.
8479 */
85
- if (new > c->watermark)
86
- c->watermark = new;
80
+ if (new > READ_ONCE(c->watermark))
81
+ WRITE_ONCE(c->watermark, new);
8782 }
8883 }
8984
....@@ -114,7 +109,7 @@
114109 *
115110 * The atomic_long_add_return() implies a full memory
116111 * barrier between incrementing the count and reading
117
- * the limit. When racing with page_counter_limit(),
112
+ * the limit. When racing with page_counter_set_max(),
118113 * we either see the new limit or the setter sees the
119114 * counter has changed and retries.
120115 */
....@@ -124,9 +119,10 @@
124119 propagate_protected_usage(c, new);
125120 /*
126121 * This is racy, but we can live with some
127
- * inaccuracy in the failcnt.
122
+ * inaccuracy in the failcnt which is only used
123
+ * to report stats.
128124 */
129
- c->failcnt++;
125
+ data_race(c->failcnt++);
130126 *fail = c;
131127 goto failed;
132128 }
....@@ -135,8 +131,8 @@
135131 * Just like with failcnt, we can live with some
136132 * inaccuracy in the watermark.
137133 */
138
- if (new > c->watermark)
139
- c->watermark = new;
134
+ if (new > READ_ONCE(c->watermark))
135
+ WRITE_ONCE(c->watermark, new);
140136 }
141137 return true;
142138
....@@ -213,7 +209,7 @@
213209 {
214210 struct page_counter *c;
215211
216
- counter->min = nr_pages;
212
+ WRITE_ONCE(counter->min, nr_pages);
217213
218214 for (c = counter; c; c = c->parent)
219215 propagate_protected_usage(c, atomic_long_read(&c->usage));
....@@ -230,7 +226,7 @@
230226 {
231227 struct page_counter *c;
232228
233
- counter->low = nr_pages;
229
+ WRITE_ONCE(counter->low, nr_pages);
234230
235231 for (c = counter; c; c = c->parent)
236232 propagate_protected_usage(c, atomic_long_read(&c->usage));