.. | .. |
---|
3 | 3 | #define _LINUX_U64_STATS_SYNC_H |
---|
4 | 4 | |
---|
5 | 5 | /* |
---|
6 | | - * To properly implement 64bits network statistics on 32bit and 64bit hosts, |
---|
7 | | - * we provide a synchronization point, that is a noop on 64bit or UP kernels. |
---|
| 6 | + * Protect against 64-bit values tearing on 32-bit architectures. This is |
---|
| 7 | + * typically used for statistics read/update in different subsystems. |
---|
8 | 8 | * |
---|
9 | 9 | * Key points : |
---|
10 | | - * 1) Use a seqcount on SMP 32bits, with low overhead. |
---|
11 | | - * 2) Whole thing is a noop on 64bit arches or UP kernels. |
---|
12 | | - * 3) Write side must ensure mutual exclusion or one seqcount update could |
---|
| 10 | + * |
---|
| 11 | + * - Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP. |
---|
| 12 | + * - The whole thing is a no-op on 64-bit architectures. |
---|
| 13 | + * |
---|
| 14 | + * Usage constraints: |
---|
| 15 | + * |
---|
| 16 | + * 1) Write side must ensure mutual exclusion, or one seqcount update could |
---|
13 | 17 | * be lost, thus blocking readers forever. |
---|
14 | | - * If this synchronization point is not a mutex, but a spinlock or |
---|
15 | | - * spinlock_bh() or disable_bh() : |
---|
16 | | - * 3.1) Write side should not sleep. |
---|
17 | | - * 3.2) Write side should not allow preemption. |
---|
18 | | - * 3.3) If applicable, interrupts should be disabled. |
---|
| 18 | + * |
---|
| 19 | + * 2) Write side must disable preemption, or a seqcount reader can preempt the |
---|
| 20 | + * writer and also spin forever. |
---|
| 21 | + * |
---|
| 22 | + * 3) Write side must use the _irqsave() variant if other writers, or a reader, |
---|
| 23 | + * can be invoked from an IRQ context. |
---|
19 | 24 | * |
---|
20 | 25 | * 4) If reader fetches several counters, there is no guarantee the whole values |
---|
21 | | - * are consistent (remember point 1) : this is a noop on 64bit arches anyway) |
---|
| 26 | + * are consistent w.r.t. each other (remember point #2: seqcounts are not |
---|
| 27 | + * used for 64bit architectures). |
---|
22 | 28 | * |
---|
23 | | - * 5) readers are allowed to sleep or be preempted/interrupted : They perform |
---|
24 | | - * pure reads. But if they have to fetch many values, it's better to not allow |
---|
25 | | - * preemptions/interruptions to avoid many retries. |
---|
| 29 | + * 5) Readers are allowed to sleep or be preempted/interrupted: they perform |
---|
| 30 | + * pure reads. |
---|
26 | 31 | * |
---|
27 | | - * 6) If counter might be written by an interrupt, readers should block interrupts. |
---|
28 | | - * (On UP, there is no seqcount_t protection, a reader allowing interrupts could |
---|
29 | | - * read partial values) |
---|
30 | | - * |
---|
31 | | - * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and |
---|
32 | | - * u64_stats_fetch_retry_irq() helpers |
---|
| 32 | + * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats |
---|
| 33 | + * might be updated from a hardirq or softirq context (remember point #1: |
---|
| 34 | + * seqcounts are not used for UP kernels). 32-bit UP stat readers could read |
---|
| 35 | + * corrupted 64-bit values otherwise. |
---|
33 | 36 | * |
---|
34 | 37 | * Usage : |
---|
35 | 38 | * |
---|
.. | .. |
---|
40 | 43 | * spin_lock_bh(...) or other synchronization to get exclusive access |
---|
41 | 44 | * ... |
---|
42 | 45 | * u64_stats_update_begin(&stats->syncp); |
---|
43 | | - * stats->bytes64 += len; // non atomic operation |
---|
44 | | - * stats->packets64++; // non atomic operation |
---|
| 46 | + * u64_stats_add(&stats->bytes64, len); // non atomic operation |
---|
| 47 | + * u64_stats_inc(&stats->packets64); // non atomic operation |
---|
45 | 48 | * u64_stats_update_end(&stats->syncp); |
---|
46 | 49 | * |
---|
47 | 50 | * While a consumer (reader) should use following template to get consistent |
---|
.. | .. |
---|
52 | 55 | * |
---|
53 | 56 | * do { |
---|
54 | 57 | * start = u64_stats_fetch_begin(&stats->syncp); |
---|
55 | | - * tbytes = stats->bytes64; // non atomic operation |
---|
56 | | - * tpackets = stats->packets64; // non atomic operation |
---|
| 58 | + * tbytes = u64_stats_read(&stats->bytes64); // non atomic operation |
---|
| 59 | + * tpackets = u64_stats_read(&stats->packets64); // non atomic operation |
---|
57 | 60 | * } while (u64_stats_fetch_retry(&stats->syncp, start)); |
---|
58 | 61 | * |
---|
59 | 62 | * |
---|
.. | .. |
---|
63 | 66 | #include <linux/seqlock.h> |
---|
64 | 67 | |
---|
65 | 68 | struct u64_stats_sync { |
---|
66 | | -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
---|
| 69 | +#if BITS_PER_LONG==32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) |
---|
67 | 70 | seqcount_t seq; |
---|
68 | 71 | #endif |
---|
69 | 72 | }; |
---|
70 | 73 | |
---|
| 74 | +#if BITS_PER_LONG == 64 |
---|
| 75 | +#include <asm/local64.h> |
---|
71 | 76 | |
---|
72 | | -#if BITS_PER_LONG == 32 && defined(CONFIG_SMP) |
---|
| 77 | +typedef struct { |
---|
| 78 | + local64_t v; |
---|
| 79 | +} u64_stats_t ; |
---|
| 80 | + |
---|
| 81 | +static inline u64 u64_stats_read(const u64_stats_t *p) |
---|
| 82 | +{ |
---|
| 83 | + return local64_read(&p->v); |
---|
| 84 | +} |
---|
| 85 | + |
---|
| 86 | +static inline void u64_stats_add(u64_stats_t *p, unsigned long val) |
---|
| 87 | +{ |
---|
| 88 | + local64_add(val, &p->v); |
---|
| 89 | +} |
---|
| 90 | + |
---|
| 91 | +static inline void u64_stats_inc(u64_stats_t *p) |
---|
| 92 | +{ |
---|
| 93 | + local64_inc(&p->v); |
---|
| 94 | +} |
---|
| 95 | + |
---|
| 96 | +#else |
---|
| 97 | + |
---|
| 98 | +typedef struct { |
---|
| 99 | + u64 v; |
---|
| 100 | +} u64_stats_t; |
---|
| 101 | + |
---|
| 102 | +static inline u64 u64_stats_read(const u64_stats_t *p) |
---|
| 103 | +{ |
---|
| 104 | + return p->v; |
---|
| 105 | +} |
---|
| 106 | + |
---|
| 107 | +static inline void u64_stats_add(u64_stats_t *p, unsigned long val) |
---|
| 108 | +{ |
---|
| 109 | + p->v += val; |
---|
| 110 | +} |
---|
| 111 | + |
---|
| 112 | +static inline void u64_stats_inc(u64_stats_t *p) |
---|
| 113 | +{ |
---|
| 114 | + p->v++; |
---|
| 115 | +} |
---|
| 116 | +#endif |
---|
| 117 | + |
---|
| 118 | +#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) |
---|
73 | 119 | #define u64_stats_init(syncp) seqcount_init(&(syncp)->seq) |
---|
74 | 120 | #else |
---|
75 | 121 | static inline void u64_stats_init(struct u64_stats_sync *syncp) |
---|
.. | .. |
---|
79 | 125 | |
---|
80 | 126 | static inline void u64_stats_update_begin(struct u64_stats_sync *syncp) |
---|
81 | 127 | { |
---|
82 | | -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
---|
| 128 | +#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) |
---|
| 129 | + if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
---|
| 130 | + preempt_disable(); |
---|
83 | 131 | write_seqcount_begin(&syncp->seq); |
---|
84 | 132 | #endif |
---|
85 | 133 | } |
---|
86 | 134 | |
---|
87 | 135 | static inline void u64_stats_update_end(struct u64_stats_sync *syncp) |
---|
88 | 136 | { |
---|
89 | | -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
---|
| 137 | +#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) |
---|
90 | 138 | write_seqcount_end(&syncp->seq); |
---|
| 139 | + if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
---|
| 140 | + preempt_enable(); |
---|
91 | 141 | #endif |
---|
92 | 142 | } |
---|
93 | 143 | |
---|
.. | .. |
---|
96 | 146 | { |
---|
97 | 147 | unsigned long flags = 0; |
---|
98 | 148 | |
---|
99 | | -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
---|
100 | | - local_irq_save(flags); |
---|
| 149 | +#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) |
---|
| 150 | + if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
---|
| 151 | + preempt_disable(); |
---|
| 152 | + else |
---|
| 153 | + local_irq_save(flags); |
---|
101 | 154 | write_seqcount_begin(&syncp->seq); |
---|
102 | 155 | #endif |
---|
103 | 156 | return flags; |
---|
.. | .. |
---|
107 | 160 | u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp, |
---|
108 | 161 | unsigned long flags) |
---|
109 | 162 | { |
---|
110 | | -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
---|
| 163 | +#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) |
---|
111 | 164 | write_seqcount_end(&syncp->seq); |
---|
112 | | - local_irq_restore(flags); |
---|
| 165 | + if (IS_ENABLED(CONFIG_PREEMPT_RT)) |
---|
| 166 | + preempt_enable(); |
---|
| 167 | + else |
---|
| 168 | + local_irq_restore(flags); |
---|
113 | 169 | #endif |
---|
114 | 170 | } |
---|
115 | 171 | |
---|
116 | 172 | static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp) |
---|
117 | 173 | { |
---|
118 | | -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
---|
| 174 | +#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) |
---|
119 | 175 | return read_seqcount_begin(&syncp->seq); |
---|
120 | 176 | #else |
---|
121 | 177 | return 0; |
---|
.. | .. |
---|
124 | 180 | |
---|
125 | 181 | static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) |
---|
126 | 182 | { |
---|
127 | | -#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) |
---|
| 183 | +#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT)) |
---|
128 | 184 | preempt_disable(); |
---|
129 | 185 | #endif |
---|
130 | 186 | return __u64_stats_fetch_begin(syncp); |
---|
.. | .. |
---|
133 | 189 | static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp, |
---|
134 | 190 | unsigned int start) |
---|
135 | 191 | { |
---|
136 | | -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) |
---|
| 192 | +#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)) |
---|
137 | 193 | return read_seqcount_retry(&syncp->seq, start); |
---|
138 | 194 | #else |
---|
139 | 195 | return false; |
---|
.. | .. |
---|
143 | 199 | static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, |
---|
144 | 200 | unsigned int start) |
---|
145 | 201 | { |
---|
146 | | -#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) |
---|
| 202 | +#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT)) |
---|
147 | 203 | preempt_enable(); |
---|
148 | 204 | #endif |
---|
149 | 205 | return __u64_stats_fetch_retry(syncp, start); |
---|
.. | .. |
---|
157 | 213 | */ |
---|
158 | 214 | static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) |
---|
159 | 215 | { |
---|
160 | | -#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) |
---|
| 216 | +#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT) |
---|
| 217 | + preempt_disable(); |
---|
| 218 | +#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP) |
---|
161 | 219 | local_irq_disable(); |
---|
162 | 220 | #endif |
---|
163 | 221 | return __u64_stats_fetch_begin(syncp); |
---|
.. | .. |
---|
166 | 224 | static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, |
---|
167 | 225 | unsigned int start) |
---|
168 | 226 | { |
---|
169 | | -#if BITS_PER_LONG==32 && !defined(CONFIG_SMP) |
---|
| 227 | +#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT) |
---|
| 228 | + preempt_enable(); |
---|
| 229 | +#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP) |
---|
170 | 230 | local_irq_enable(); |
---|
171 | 231 | #endif |
---|
172 | 232 | return __u64_stats_fetch_retry(syncp, start); |
---|