hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/lib/percpu_counter.c
....@@ -17,7 +17,7 @@
1717
1818 #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER
1919
20
-static struct debug_obj_descr percpu_counter_debug_descr;
20
+static const struct debug_obj_descr percpu_counter_debug_descr;
2121
2222 static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state)
2323 {
....@@ -33,7 +33,7 @@
3333 }
3434 }
3535
36
-static struct debug_obj_descr percpu_counter_debug_descr = {
36
+static const struct debug_obj_descr percpu_counter_debug_descr = {
3737 .name = "percpu_counter",
3838 .fixup_free = percpu_counter_fixup_free,
3939 };
....@@ -85,7 +85,7 @@
8585
8686 preempt_disable();
8787 count = __this_cpu_read(*fbc->counters) + amount;
88
- if (count >= batch || count <= -batch) {
88
+ if (abs(count) >= batch) {
8989 unsigned long flags;
9090 raw_spin_lock_irqsave(&fbc->lock, flags);
9191 fbc->count += count;
....@@ -99,6 +99,25 @@
9999 EXPORT_SYMBOL(percpu_counter_add_batch);
100100
101101 /*
102
+ * For percpu_counter with a big batch, the devication of its count could
103
+ * be big, and there is requirement to reduce the deviation, like when the
104
+ * counter's batch could be runtime decreased to get a better accuracy,
105
+ * which can be achieved by running this sync function on each CPU.
106
+ */
107
+void percpu_counter_sync(struct percpu_counter *fbc)
108
+{
109
+ unsigned long flags;
110
+ s64 count;
111
+
112
+ raw_spin_lock_irqsave(&fbc->lock, flags);
113
+ count = __this_cpu_read(*fbc->counters);
114
+ fbc->count += count;
115
+ __this_cpu_sub(*fbc->counters, count);
116
+ raw_spin_unlock_irqrestore(&fbc->lock, flags);
117
+}
118
+EXPORT_SYMBOL(percpu_counter_sync);
119
+
120
+/*
102121 * Add up all the per-cpu counts, return the result. This is a more accurate
103122 * but much slower version of percpu_counter_read_positive()
104123 */