.. | .. |
---|
75 | 75 | * operation using percpu_ref_switch_to_percpu(). If initialized |
---|
76 | 76 | * with this flag, the ref will stay in atomic mode until |
---|
77 | 77 | * percpu_ref_switch_to_percpu() is invoked on it. |
---|
| 78 | + * Implies ALLOW_REINIT. |
---|
78 | 79 | */ |
---|
79 | 80 | PERCPU_REF_INIT_ATOMIC = 1 << 0, |
---|
80 | 81 | |
---|
81 | 82 | /* |
---|
82 | 83 | * Start dead w/ ref == 0 in atomic mode. Must be revived with |
---|
83 | | - * percpu_ref_reinit() before used. Implies INIT_ATOMIC. |
---|
| 84 | + * percpu_ref_reinit() before used. Implies INIT_ATOMIC and |
---|
| 85 | + * ALLOW_REINIT. |
---|
84 | 86 | */ |
---|
85 | 87 | PERCPU_REF_INIT_DEAD = 1 << 1, |
---|
| 88 | + |
---|
| 89 | + /* |
---|
| 90 | + * Allow switching from atomic mode to percpu mode. |
---|
| 91 | + */ |
---|
| 92 | + PERCPU_REF_ALLOW_REINIT = 1 << 2, |
---|
| 93 | +}; |
---|
| 94 | + |
---|
| 95 | +struct percpu_ref_data { |
---|
| 96 | + atomic_long_t count; |
---|
| 97 | + percpu_ref_func_t *release; |
---|
| 98 | + percpu_ref_func_t *confirm_switch; |
---|
| 99 | + bool force_atomic:1; |
---|
| 100 | + bool allow_reinit:1; |
---|
| 101 | + struct rcu_head rcu; |
---|
| 102 | + struct percpu_ref *ref; |
---|
86 | 103 | }; |
---|
87 | 104 | |
---|
88 | 105 | struct percpu_ref { |
---|
89 | | - atomic_long_t count; |
---|
90 | 106 | /* |
---|
91 | 107 | * The low bit of the pointer indicates whether the ref is in percpu |
---|
92 | 108 | * mode; if set, then get/put will manipulate the atomic_t. |
---|
93 | 109 | */ |
---|
94 | 110 | unsigned long percpu_count_ptr; |
---|
95 | | - percpu_ref_func_t *release; |
---|
96 | | - percpu_ref_func_t *confirm_switch; |
---|
97 | | - bool force_atomic:1; |
---|
98 | | - struct rcu_head rcu; |
---|
| 111 | + |
---|
| 112 | + /* |
---|
| 113 | + * 'percpu_ref' is often embedded into user structure, and only |
---|
| 114 | + * 'percpu_count_ptr' is required in fast path, move other fields |
---|
| 115 | + * into 'percpu_ref_data', so we can reduce memory footprint in |
---|
| 116 | + * fast path. |
---|
| 117 | + */ |
---|
| 118 | + struct percpu_ref_data *data; |
---|
99 | 119 | }; |
---|
100 | 120 | |
---|
101 | 121 | int __must_check percpu_ref_init(struct percpu_ref *ref, |
---|
.. | .. |
---|
108 | 128 | void percpu_ref_switch_to_percpu(struct percpu_ref *ref); |
---|
109 | 129 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, |
---|
110 | 130 | percpu_ref_func_t *confirm_kill); |
---|
| 131 | +void percpu_ref_resurrect(struct percpu_ref *ref); |
---|
111 | 132 | void percpu_ref_reinit(struct percpu_ref *ref); |
---|
| 133 | +bool percpu_ref_is_zero(struct percpu_ref *ref); |
---|
112 | 134 | |
---|
113 | 135 | /** |
---|
114 | 136 | * percpu_ref_kill - drop the initial ref |
---|
.. | .. |
---|
146 | 168 | * between contaminating the pointer value, meaning that |
---|
147 | 169 | * READ_ONCE() is required when fetching it. |
---|
148 | 170 | * |
---|
149 | | - * The smp_read_barrier_depends() implied by READ_ONCE() pairs |
---|
| 171 | + * The dependency ordering from the READ_ONCE() pairs |
---|
150 | 172 | * with smp_store_release() in __percpu_ref_switch_to_percpu(). |
---|
151 | 173 | */ |
---|
152 | 174 | percpu_ptr = READ_ONCE(ref->percpu_count_ptr); |
---|
.. | .. |
---|
177 | 199 | { |
---|
178 | 200 | unsigned long __percpu *percpu_count; |
---|
179 | 201 | |
---|
180 | | - rcu_read_lock_sched(); |
---|
| 202 | + rcu_read_lock(); |
---|
181 | 203 | |
---|
182 | 204 | if (__ref_is_percpu(ref, &percpu_count)) |
---|
183 | 205 | this_cpu_add(*percpu_count, nr); |
---|
184 | 206 | else |
---|
185 | | - atomic_long_add(nr, &ref->count); |
---|
| 207 | + atomic_long_add(nr, &ref->data->count); |
---|
186 | 208 | |
---|
187 | | - rcu_read_unlock_sched(); |
---|
| 209 | + rcu_read_unlock(); |
---|
188 | 210 | } |
---|
189 | 211 | |
---|
190 | 212 | /** |
---|
.. | .. |
---|
201 | 223 | } |
---|
202 | 224 | |
---|
203 | 225 | /** |
---|
| 226 | + * percpu_ref_tryget_many - try to increment a percpu refcount |
---|
| 227 | + * @ref: percpu_ref to try-get |
---|
| 228 | + * @nr: number of references to get |
---|
| 229 | + * |
---|
| 230 | + * Increment a percpu refcount by @nr unless its count already reached zero. |
---|
| 231 | + * Returns %true on success; %false on failure. |
---|
| 232 | + * |
---|
| 233 | + * This function is safe to call as long as @ref is between init and exit. |
---|
| 234 | + */ |
---|
| 235 | +static inline bool percpu_ref_tryget_many(struct percpu_ref *ref, |
---|
| 236 | + unsigned long nr) |
---|
| 237 | +{ |
---|
| 238 | + unsigned long __percpu *percpu_count; |
---|
| 239 | + bool ret; |
---|
| 240 | + |
---|
| 241 | + rcu_read_lock(); |
---|
| 242 | + |
---|
| 243 | + if (__ref_is_percpu(ref, &percpu_count)) { |
---|
| 244 | + this_cpu_add(*percpu_count, nr); |
---|
| 245 | + ret = true; |
---|
| 246 | + } else { |
---|
| 247 | + ret = atomic_long_add_unless(&ref->data->count, nr, 0); |
---|
| 248 | + } |
---|
| 249 | + |
---|
| 250 | + rcu_read_unlock(); |
---|
| 251 | + |
---|
| 252 | + return ret; |
---|
| 253 | +} |
---|
| 254 | + |
---|
| 255 | +/** |
---|
204 | 256 | * percpu_ref_tryget - try to increment a percpu refcount |
---|
205 | 257 | * @ref: percpu_ref to try-get |
---|
206 | 258 | * |
---|
.. | .. |
---|
211 | 263 | */ |
---|
212 | 264 | static inline bool percpu_ref_tryget(struct percpu_ref *ref) |
---|
213 | 265 | { |
---|
214 | | - unsigned long __percpu *percpu_count; |
---|
215 | | - bool ret; |
---|
216 | | - |
---|
217 | | - rcu_read_lock_sched(); |
---|
218 | | - |
---|
219 | | - if (__ref_is_percpu(ref, &percpu_count)) { |
---|
220 | | - this_cpu_inc(*percpu_count); |
---|
221 | | - ret = true; |
---|
222 | | - } else { |
---|
223 | | - ret = atomic_long_inc_not_zero(&ref->count); |
---|
224 | | - } |
---|
225 | | - |
---|
226 | | - rcu_read_unlock_sched(); |
---|
227 | | - |
---|
228 | | - return ret; |
---|
| 266 | + return percpu_ref_tryget_many(ref, 1); |
---|
229 | 267 | } |
---|
230 | 268 | |
---|
231 | 269 | /** |
---|
.. | .. |
---|
248 | 286 | unsigned long __percpu *percpu_count; |
---|
249 | 287 | bool ret = false; |
---|
250 | 288 | |
---|
251 | | - rcu_read_lock_sched(); |
---|
| 289 | + rcu_read_lock(); |
---|
252 | 290 | |
---|
253 | 291 | if (__ref_is_percpu(ref, &percpu_count)) { |
---|
254 | 292 | this_cpu_inc(*percpu_count); |
---|
255 | 293 | ret = true; |
---|
256 | 294 | } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) { |
---|
257 | | - ret = atomic_long_inc_not_zero(&ref->count); |
---|
| 295 | + ret = atomic_long_inc_not_zero(&ref->data->count); |
---|
258 | 296 | } |
---|
259 | 297 | |
---|
260 | | - rcu_read_unlock_sched(); |
---|
| 298 | + rcu_read_unlock(); |
---|
261 | 299 | |
---|
262 | 300 | return ret; |
---|
263 | 301 | } |
---|
.. | .. |
---|
276 | 314 | { |
---|
277 | 315 | unsigned long __percpu *percpu_count; |
---|
278 | 316 | |
---|
279 | | - rcu_read_lock_sched(); |
---|
| 317 | + rcu_read_lock(); |
---|
280 | 318 | |
---|
281 | 319 | if (__ref_is_percpu(ref, &percpu_count)) |
---|
282 | 320 | this_cpu_sub(*percpu_count, nr); |
---|
283 | | - else if (unlikely(atomic_long_sub_and_test(nr, &ref->count))) |
---|
284 | | - ref->release(ref); |
---|
| 321 | + else if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count))) |
---|
| 322 | + ref->data->release(ref); |
---|
285 | 323 | |
---|
286 | | - rcu_read_unlock_sched(); |
---|
| 324 | + rcu_read_unlock(); |
---|
287 | 325 | } |
---|
288 | 326 | |
---|
289 | 327 | /** |
---|
.. | .. |
---|
312 | 350 | static inline bool percpu_ref_is_dying(struct percpu_ref *ref) |
---|
313 | 351 | { |
---|
314 | 352 | return ref->percpu_count_ptr & __PERCPU_REF_DEAD; |
---|
315 | | -} |
---|
316 | | - |
---|
317 | | -/** |
---|
318 | | - * percpu_ref_is_zero - test whether a percpu refcount reached zero |
---|
319 | | - * @ref: percpu_ref to test |
---|
320 | | - * |
---|
321 | | - * Returns %true if @ref reached zero. |
---|
322 | | - * |
---|
323 | | - * This function is safe to call as long as @ref is between init and exit. |
---|
324 | | - */ |
---|
325 | | -static inline bool percpu_ref_is_zero(struct percpu_ref *ref) |
---|
326 | | -{ |
---|
327 | | - unsigned long __percpu *percpu_count; |
---|
328 | | - |
---|
329 | | - if (__ref_is_percpu(ref, &percpu_count)) |
---|
330 | | - return false; |
---|
331 | | - return !atomic_long_read(&ref->count); |
---|
332 | 353 | } |
---|
333 | 354 | |
---|
334 | 355 | #endif |
---|