.. | .. |
---|
17 | 17 | |
---|
18 | 18 | #ifdef CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER |
---|
19 | 19 | |
---|
20 | | -static struct debug_obj_descr percpu_counter_debug_descr; |
---|
| 20 | +static const struct debug_obj_descr percpu_counter_debug_descr; |
---|
21 | 21 | |
---|
22 | 22 | static bool percpu_counter_fixup_free(void *addr, enum debug_obj_state state) |
---|
23 | 23 | { |
---|
.. | .. |
---|
33 | 33 | } |
---|
34 | 34 | } |
---|
35 | 35 | |
---|
36 | | -static struct debug_obj_descr percpu_counter_debug_descr = { |
---|
| 36 | +static const struct debug_obj_descr percpu_counter_debug_descr = { |
---|
37 | 37 | .name = "percpu_counter", |
---|
38 | 38 | .fixup_free = percpu_counter_fixup_free, |
---|
39 | 39 | }; |
---|
.. | .. |
---|
85 | 85 | |
---|
86 | 86 | preempt_disable(); |
---|
87 | 87 | count = __this_cpu_read(*fbc->counters) + amount; |
---|
88 | | - if (count >= batch || count <= -batch) { |
---|
| 88 | + if (abs(count) >= batch) { |
---|
89 | 89 | unsigned long flags; |
---|
90 | 90 | raw_spin_lock_irqsave(&fbc->lock, flags); |
---|
91 | 91 | fbc->count += count; |
---|
.. | .. |
---|
99 | 99 | EXPORT_SYMBOL(percpu_counter_add_batch); |
---|
100 | 100 | |
---|
101 | 101 | /* |
---|
| 102 | + * For percpu_counter with a big batch, the devication of its count could |
---|
| 103 | + * be big, and there is requirement to reduce the deviation, like when the |
---|
| 104 | + * counter's batch could be runtime decreased to get a better accuracy, |
---|
| 105 | + * which can be achieved by running this sync function on each CPU. |
---|
| 106 | + */ |
---|
| 107 | +void percpu_counter_sync(struct percpu_counter *fbc) |
---|
| 108 | +{ |
---|
| 109 | + unsigned long flags; |
---|
| 110 | + s64 count; |
---|
| 111 | + |
---|
| 112 | + raw_spin_lock_irqsave(&fbc->lock, flags); |
---|
| 113 | + count = __this_cpu_read(*fbc->counters); |
---|
| 114 | + fbc->count += count; |
---|
| 115 | + __this_cpu_sub(*fbc->counters, count); |
---|
| 116 | + raw_spin_unlock_irqrestore(&fbc->lock, flags); |
---|
| 117 | +} |
---|
| 118 | +EXPORT_SYMBOL(percpu_counter_sync); |
---|
| 119 | + |
---|
| 120 | +/* |
---|
102 | 121 | * Add up all the per-cpu counts, return the result. This is a more accurate |
---|
103 | 122 | * but much slower version of percpu_counter_read_positive() |
---|
104 | 123 | */ |
---|