hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/include/linux/percpu-refcount.h
....@@ -75,27 +75,47 @@
7575 * operation using percpu_ref_switch_to_percpu(). If initialized
7676 * with this flag, the ref will stay in atomic mode until
7777 * percpu_ref_switch_to_percpu() is invoked on it.
78
+ * Implies ALLOW_REINIT.
7879 */
7980 PERCPU_REF_INIT_ATOMIC = 1 << 0,
8081
8182 /*
8283 * Start dead w/ ref == 0 in atomic mode. Must be revived with
83
- * percpu_ref_reinit() before used. Implies INIT_ATOMIC.
84
+ * percpu_ref_reinit() before used. Implies INIT_ATOMIC and
85
+ * ALLOW_REINIT.
8486 */
8587 PERCPU_REF_INIT_DEAD = 1 << 1,
88
+
89
+ /*
90
+ * Allow switching from atomic mode to percpu mode.
91
+ */
92
+ PERCPU_REF_ALLOW_REINIT = 1 << 2,
93
+};
94
+
95
+struct percpu_ref_data {
96
+ atomic_long_t count;
97
+ percpu_ref_func_t *release;
98
+ percpu_ref_func_t *confirm_switch;
99
+ bool force_atomic:1;
100
+ bool allow_reinit:1;
101
+ struct rcu_head rcu;
102
+ struct percpu_ref *ref;
86103 };
87104
88105 struct percpu_ref {
89
- atomic_long_t count;
90106 /*
91107 * The low bit of the pointer indicates whether the ref is in percpu
92108 * mode; if set, then get/put will manipulate the atomic_t.
93109 */
94110 unsigned long percpu_count_ptr;
95
- percpu_ref_func_t *release;
96
- percpu_ref_func_t *confirm_switch;
97
- bool force_atomic:1;
98
- struct rcu_head rcu;
111
+
112
+ /*
113
+ * 'percpu_ref' is often embedded into user structure, and only
114
+ * 'percpu_count_ptr' is required in fast path, move other fields
115
+ * into 'percpu_ref_data', so we can reduce memory footprint in
116
+ * fast path.
117
+ */
118
+ struct percpu_ref_data *data;
99119 };
100120
101121 int __must_check percpu_ref_init(struct percpu_ref *ref,
....@@ -108,7 +128,9 @@
108128 void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
109129 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
110130 percpu_ref_func_t *confirm_kill);
131
+void percpu_ref_resurrect(struct percpu_ref *ref);
111132 void percpu_ref_reinit(struct percpu_ref *ref);
133
+bool percpu_ref_is_zero(struct percpu_ref *ref);
112134
113135 /**
114136 * percpu_ref_kill - drop the initial ref
....@@ -146,7 +168,7 @@
146168 * between contaminating the pointer value, meaning that
147169 * READ_ONCE() is required when fetching it.
148170 *
149
- * The smp_read_barrier_depends() implied by READ_ONCE() pairs
171
+ * The dependency ordering from the READ_ONCE() pairs
150172 * with smp_store_release() in __percpu_ref_switch_to_percpu().
151173 */
152174 percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
....@@ -177,14 +199,14 @@
177199 {
178200 unsigned long __percpu *percpu_count;
179201
180
- rcu_read_lock_sched();
202
+ rcu_read_lock();
181203
182204 if (__ref_is_percpu(ref, &percpu_count))
183205 this_cpu_add(*percpu_count, nr);
184206 else
185
- atomic_long_add(nr, &ref->count);
207
+ atomic_long_add(nr, &ref->data->count);
186208
187
- rcu_read_unlock_sched();
209
+ rcu_read_unlock();
188210 }
189211
190212 /**
....@@ -201,6 +223,36 @@
201223 }
202224
203225 /**
226
+ * percpu_ref_tryget_many - try to increment a percpu refcount
227
+ * @ref: percpu_ref to try-get
228
+ * @nr: number of references to get
229
+ *
230
+ * Increment a percpu refcount by @nr unless its count already reached zero.
231
+ * Returns %true on success; %false on failure.
232
+ *
233
+ * This function is safe to call as long as @ref is between init and exit.
234
+ */
235
+static inline bool percpu_ref_tryget_many(struct percpu_ref *ref,
236
+ unsigned long nr)
237
+{
238
+ unsigned long __percpu *percpu_count;
239
+ bool ret;
240
+
241
+ rcu_read_lock();
242
+
243
+ if (__ref_is_percpu(ref, &percpu_count)) {
244
+ this_cpu_add(*percpu_count, nr);
245
+ ret = true;
246
+ } else {
247
+ ret = atomic_long_add_unless(&ref->data->count, nr, 0);
248
+ }
249
+
250
+ rcu_read_unlock();
251
+
252
+ return ret;
253
+}
254
+
255
+/**
204256 * percpu_ref_tryget - try to increment a percpu refcount
205257 * @ref: percpu_ref to try-get
206258 *
....@@ -211,21 +263,7 @@
211263 */
212264 static inline bool percpu_ref_tryget(struct percpu_ref *ref)
213265 {
214
- unsigned long __percpu *percpu_count;
215
- bool ret;
216
-
217
- rcu_read_lock_sched();
218
-
219
- if (__ref_is_percpu(ref, &percpu_count)) {
220
- this_cpu_inc(*percpu_count);
221
- ret = true;
222
- } else {
223
- ret = atomic_long_inc_not_zero(&ref->count);
224
- }
225
-
226
- rcu_read_unlock_sched();
227
-
228
- return ret;
266
+ return percpu_ref_tryget_many(ref, 1);
229267 }
230268
231269 /**
....@@ -248,16 +286,16 @@
248286 unsigned long __percpu *percpu_count;
249287 bool ret = false;
250288
251
- rcu_read_lock_sched();
289
+ rcu_read_lock();
252290
253291 if (__ref_is_percpu(ref, &percpu_count)) {
254292 this_cpu_inc(*percpu_count);
255293 ret = true;
256294 } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
257
- ret = atomic_long_inc_not_zero(&ref->count);
295
+ ret = atomic_long_inc_not_zero(&ref->data->count);
258296 }
259297
260
- rcu_read_unlock_sched();
298
+ rcu_read_unlock();
261299
262300 return ret;
263301 }
....@@ -276,14 +314,14 @@
276314 {
277315 unsigned long __percpu *percpu_count;
278316
279
- rcu_read_lock_sched();
317
+ rcu_read_lock();
280318
281319 if (__ref_is_percpu(ref, &percpu_count))
282320 this_cpu_sub(*percpu_count, nr);
283
- else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
284
- ref->release(ref);
321
+ else if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count)))
322
+ ref->data->release(ref);
285323
286
- rcu_read_unlock_sched();
324
+ rcu_read_unlock();
287325 }
288326
289327 /**
....@@ -312,23 +350,6 @@
312350 static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
313351 {
314352 return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
315
-}
316
-
317
-/**
318
- * percpu_ref_is_zero - test whether a percpu refcount reached zero
319
- * @ref: percpu_ref to test
320
- *
321
- * Returns %true if @ref reached zero.
322
- *
323
- * This function is safe to call as long as @ref is between init and exit.
324
- */
325
-static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
326
-{
327
- unsigned long __percpu *percpu_count;
328
-
329
- if (__ref_is_percpu(ref, &percpu_count))
330
- return false;
331
- return !atomic_long_read(&ref->count);
332353 }
333354
334355 #endif