hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/include/linux/rcupdate.h
....@@ -1,25 +1,12 @@
1
+/* SPDX-License-Identifier: GPL-2.0+ */
12 /*
23 * Read-Copy Update mechanism for mutual exclusion
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License as published by
6
- * the Free Software Foundation; either version 2 of the License, or
7
- * (at your option) any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
- * GNU General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program; if not, you can access it online at
16
- * http://www.gnu.org/licenses/gpl-2.0.html.
174 *
185 * Copyright IBM Corporation, 2001
196 *
207 * Author: Dipankar Sarma <dipankar@in.ibm.com>
218 *
22
- * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
9
+ * Based on the original work by Paul McKenney <paulmck@vnet.ibm.com>
2310 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
2411 * Papers:
2512 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
....@@ -46,25 +33,19 @@
4633 #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
4734 #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
4835 #define ulong2long(a) (*(long *)(&(a)))
36
+#define USHORT_CMP_GE(a, b) (USHRT_MAX / 2 >= (unsigned short)((a) - (b)))
37
+#define USHORT_CMP_LT(a, b) (USHRT_MAX / 2 < (unsigned short)((a) - (b)))
4938
5039 /* Exported common interfaces */
51
-
52
-#ifdef CONFIG_PREEMPT_RCU
5340 void call_rcu(struct rcu_head *head, rcu_callback_t func);
54
-#else /* #ifdef CONFIG_PREEMPT_RCU */
55
-#define call_rcu call_rcu_sched
56
-#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
57
-
58
-void call_rcu_bh(struct rcu_head *head, rcu_callback_t func);
59
-void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
60
-void synchronize_sched(void);
6141 void rcu_barrier_tasks(void);
42
+void rcu_barrier_tasks_rude(void);
43
+void synchronize_rcu(void);
6244
6345 #ifdef CONFIG_PREEMPT_RCU
6446
6547 void __rcu_read_lock(void);
6648 void __rcu_read_unlock(void);
67
-void synchronize_rcu(void);
6849
6950 /*
7051 * Defined as a macro as it is a very low level header included from
....@@ -73,8 +54,19 @@
7354 * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
7455 */
7556 #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
57
+#ifndef CONFIG_PREEMPT_RT
58
+#define sched_rcu_preempt_depth() rcu_preempt_depth()
59
+#else
60
+static inline int sched_rcu_preempt_depth(void) { return 0; }
61
+#endif
7662
7763 #else /* #ifdef CONFIG_PREEMPT_RCU */
64
+
65
+#ifdef CONFIG_TINY_RCU
66
+#define rcu_read_unlock_strict() do { } while (0)
67
+#else
68
+void rcu_read_unlock_strict(void);
69
+#endif
7870
7971 static inline void __rcu_read_lock(void)
8072 {
....@@ -84,11 +76,7 @@
8476 static inline void __rcu_read_unlock(void)
8577 {
8678 preempt_enable();
87
-}
88
-
89
-static inline void synchronize_rcu(void)
90
-{
91
- synchronize_sched();
79
+ rcu_read_unlock_strict();
9280 }
9381
9482 static inline int rcu_preempt_depth(void)
....@@ -96,16 +84,22 @@
9684 return 0;
9785 }
9886
87
+#define sched_rcu_preempt_depth() rcu_preempt_depth()
88
+
9989 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
10090
10191 /* Internal to kernel */
10292 void rcu_init(void);
10393 extern int rcu_scheduler_active __read_mostly;
104
-void rcu_sched_qs(void);
105
-void rcu_bh_qs(void);
106
-void rcu_check_callbacks(int user);
94
+void rcu_sched_clock_irq(int user);
10795 void rcu_report_dead(unsigned int cpu);
10896 void rcutree_migrate_callbacks(int cpu);
97
+
98
+#ifdef CONFIG_TASKS_RCU_GENERIC
99
+void rcu_init_tasks_generic(void);
100
+#else
101
+static inline void rcu_init_tasks_generic(void) { }
102
+#endif
109103
110104 #ifdef CONFIG_RCU_STALL_COMMON
111105 void rcu_sysrq_start(void);
....@@ -125,19 +119,20 @@
125119
126120 #ifdef CONFIG_RCU_NOCB_CPU
127121 void rcu_init_nohz(void);
122
+void rcu_nocb_flush_deferred_wakeup(void);
128123 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
129124 static inline void rcu_init_nohz(void) { }
125
+static inline void rcu_nocb_flush_deferred_wakeup(void) { }
130126 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
131127
132128 /**
133129 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
134130 * @a: Code that RCU needs to pay attention to.
135131 *
136
- * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
137
- * in the inner idle loop, that is, between the rcu_idle_enter() and
138
- * the rcu_idle_exit() -- RCU will happily ignore any such read-side
139
- * critical sections. However, things like powertop need tracepoints
140
- * in the inner idle loop.
132
+ * RCU read-side critical sections are forbidden in the inner idle loop,
133
+ * that is, between the rcu_idle_enter() and the rcu_idle_exit() -- RCU
134
+ * will happily ignore any such read-side critical sections. However,
135
+ * things like powertop need tracepoints in the inner idle loop.
141136 *
142137 * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
143138 * will tell RCU that it needs to pay attention, invoke its argument
....@@ -159,40 +154,68 @@
159154 * Note a quasi-voluntary context switch for RCU-tasks's benefit.
160155 * This is a macro rather than an inline function to avoid #include hell.
161156 */
162
-#ifdef CONFIG_TASKS_RCU
163
-#define rcu_tasks_qs(t) \
164
- do { \
165
- if (READ_ONCE((t)->rcu_tasks_holdout)) \
166
- WRITE_ONCE((t)->rcu_tasks_holdout, false); \
167
- } while (0)
168
-#define rcu_note_voluntary_context_switch(t) \
169
- do { \
170
- rcu_all_qs(); \
171
- rcu_tasks_qs(t); \
157
+#ifdef CONFIG_TASKS_RCU_GENERIC
158
+
159
+# ifdef CONFIG_TASKS_RCU
160
+# define rcu_tasks_classic_qs(t, preempt) \
161
+ do { \
162
+ if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \
163
+ WRITE_ONCE((t)->rcu_tasks_holdout, false); \
172164 } while (0)
173165 void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
174166 void synchronize_rcu_tasks(void);
167
+# else
168
+# define rcu_tasks_classic_qs(t, preempt) do { } while (0)
169
+# define call_rcu_tasks call_rcu
170
+# define synchronize_rcu_tasks synchronize_rcu
171
+# endif
172
+
173
+# ifdef CONFIG_TASKS_TRACE_RCU
174
+# define rcu_tasks_trace_qs(t) \
175
+ do { \
176
+ if (!likely(READ_ONCE((t)->trc_reader_checked)) && \
177
+ !unlikely(READ_ONCE((t)->trc_reader_nesting))) { \
178
+ smp_store_release(&(t)->trc_reader_checked, true); \
179
+ smp_mb(); /* Readers partitioned by store. */ \
180
+ } \
181
+ } while (0)
182
+# else
183
+# define rcu_tasks_trace_qs(t) do { } while (0)
184
+# endif
185
+
186
+#define rcu_tasks_qs(t, preempt) \
187
+do { \
188
+ rcu_tasks_classic_qs((t), (preempt)); \
189
+ rcu_tasks_trace_qs((t)); \
190
+} while (0)
191
+
192
+# ifdef CONFIG_TASKS_RUDE_RCU
193
+void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func);
194
+void synchronize_rcu_tasks_rude(void);
195
+# endif
196
+
197
+#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
175198 void exit_tasks_rcu_start(void);
176199 void exit_tasks_rcu_finish(void);
177
-#else /* #ifdef CONFIG_TASKS_RCU */
178
-#define rcu_tasks_qs(t) do { } while (0)
179
-#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
180
-#define call_rcu_tasks call_rcu_sched
181
-#define synchronize_rcu_tasks synchronize_sched
200
+#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
201
+#define rcu_tasks_qs(t, preempt) do { } while (0)
202
+#define rcu_note_voluntary_context_switch(t) do { } while (0)
203
+#define call_rcu_tasks call_rcu
204
+#define synchronize_rcu_tasks synchronize_rcu
182205 static inline void exit_tasks_rcu_start(void) { }
183206 static inline void exit_tasks_rcu_finish(void) { }
184
-#endif /* #else #ifdef CONFIG_TASKS_RCU */
207
+#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
185208
186209 /**
187210 * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU
188211 *
189212 * This macro resembles cond_resched(), except that it is defined to
190213 * report potential quiescent states to RCU-tasks even if the cond_resched()
191
- * machinery were to be shut off, as some advocate for PREEMPT kernels.
214
+ * machinery were to be shut off, as some advocate for PREEMPTION kernels.
192215 */
193216 #define cond_resched_tasks_rcu_qs() \
194217 do { \
195
- rcu_tasks_qs(current); \
218
+ rcu_tasks_qs(current, false); \
196219 cond_resched(); \
197220 } while (0)
198221
....@@ -201,7 +224,7 @@
201224 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
202225 */
203226
204
-#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
227
+#if defined(CONFIG_TREE_RCU)
205228 #include <linux/rcutree.h>
206229 #elif defined(CONFIG_TINY_RCU)
207230 #include <linux/rcutiny.h>
....@@ -244,7 +267,7 @@
244267
245268 static inline void rcu_lock_release(struct lockdep_map *map)
246269 {
247
- lock_release(map, 1, _THIS_IP_);
270
+ lock_release(map, _THIS_IP_);
248271 }
249272
250273 extern struct lockdep_map rcu_lock_map;
....@@ -255,6 +278,7 @@
255278 int rcu_read_lock_held(void);
256279 int rcu_read_lock_bh_held(void);
257280 int rcu_read_lock_sched_held(void);
281
+int rcu_read_lock_any_held(void);
258282
259283 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
260284
....@@ -275,6 +299,12 @@
275299 {
276300 return !preemptible();
277301 }
302
+
303
+static inline int rcu_read_lock_any_held(void)
304
+{
305
+ return !preemptible();
306
+}
307
+
278308 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
279309
280310 #ifdef CONFIG_PROVE_RCU
....@@ -286,8 +316,8 @@
286316 */
287317 #define RCU_LOCKDEP_WARN(c, s) \
288318 do { \
289
- static bool __section(.data.unlikely) __warned; \
290
- if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \
319
+ static bool __section(".data.unlikely") __warned; \
320
+ if ((c) && debug_lockdep_rcu_enabled() && !__warned) { \
291321 __warned = true; \
292322 lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
293323 } \
....@@ -306,7 +336,8 @@
306336 #define rcu_sleep_check() \
307337 do { \
308338 rcu_preempt_sleep_check(); \
309
- RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
339
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
340
+ RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \
310341 "Illegal context switch in RCU-bh read-side critical section"); \
311342 RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \
312343 "Illegal context switch in RCU-sched read-side critical section"); \
....@@ -323,22 +354,21 @@
323354 * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
324355 * and rcu_assign_pointer(). Some of these could be folded into their
325356 * callers, but they are left separate in order to ease introduction of
326
- * multiple flavors of pointers to match the multiple flavors of RCU
327
- * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
328
- * the future.
357
+ * multiple pointers markings to match different RCU implementations
358
+ * (e.g., __srcu), should this make sense in the future.
329359 */
330360
331361 #ifdef __CHECKER__
332
-#define rcu_dereference_sparse(p, space) \
362
+#define rcu_check_sparse(p, space) \
333363 ((void)(((typeof(*p) space *)p) == p))
334364 #else /* #ifdef __CHECKER__ */
335
-#define rcu_dereference_sparse(p, space)
365
+#define rcu_check_sparse(p, space)
336366 #endif /* #else #ifdef __CHECKER__ */
337367
338368 #define __rcu_access_pointer(p, space) \
339369 ({ \
340370 typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
341
- rcu_dereference_sparse(p, space); \
371
+ rcu_check_sparse(p, space); \
342372 ((typeof(*p) __force __kernel *)(_________p1)); \
343373 })
344374 #define __rcu_dereference_check(p, c, space) \
....@@ -346,13 +376,13 @@
346376 /* Dependency order vs. p above. */ \
347377 typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
348378 RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
349
- rcu_dereference_sparse(p, space); \
379
+ rcu_check_sparse(p, space); \
350380 ((typeof(*p) __force __kernel *)(________p1)); \
351381 })
352382 #define __rcu_dereference_protected(p, c, space) \
353383 ({ \
354384 RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
355
- rcu_dereference_sparse(p, space); \
385
+ rcu_check_sparse(p, space); \
356386 ((typeof(*p) __force __kernel *)(p)); \
357387 })
358388 #define rcu_dereference_raw(p) \
....@@ -400,31 +430,33 @@
400430 * other macros that it invokes.
401431 */
402432 #define rcu_assign_pointer(p, v) \
403
-({ \
433
+do { \
404434 uintptr_t _r_a_p__v = (uintptr_t)(v); \
435
+ rcu_check_sparse(p, __rcu); \
405436 \
406437 if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
407438 WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
408439 else \
409440 smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
410
- _r_a_p__v; \
411
-})
441
+} while (0)
412442
413443 /**
414
- * rcu_swap_protected() - swap an RCU and a regular pointer
415
- * @rcu_ptr: RCU pointer
444
+ * rcu_replace_pointer() - replace an RCU pointer, returning its old value
445
+ * @rcu_ptr: RCU pointer, whose old value is returned
416446 * @ptr: regular pointer
417
- * @c: the conditions under which the dereference will take place
447
+ * @c: the lockdep conditions under which the dereference will take place
418448 *
419
- * Perform swap(@rcu_ptr, @ptr) where @rcu_ptr is an RCU-annotated pointer and
420
- * @c is the argument that is passed to the rcu_dereference_protected() call
421
- * used to read that pointer.
449
+ * Perform a replacement, where @rcu_ptr is an RCU-annotated
450
+ * pointer and @c is the lockdep argument that is passed to the
451
+ * rcu_dereference_protected() call used to read that pointer. The old
452
+ * value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr.
422453 */
423
-#define rcu_swap_protected(rcu_ptr, ptr, c) do { \
454
+#define rcu_replace_pointer(rcu_ptr, ptr, c) \
455
+({ \
424456 typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \
425457 rcu_assign_pointer((rcu_ptr), (ptr)); \
426
- (ptr) = __tmp; \
427
-} while (0)
458
+ __tmp; \
459
+})
428460
429461 /**
430462 * rcu_access_pointer() - fetch RCU pointer with no dereferencing
....@@ -511,7 +543,7 @@
511543 * The no-tracing version of rcu_dereference_raw() must not call
512544 * rcu_read_lock_held().
513545 */
514
-#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
546
+#define rcu_dereference_raw_check(p) __rcu_dereference_check((p), 1, __rcu)
515547
516548 /**
517549 * rcu_dereference_protected() - fetch RCU pointer when updates prevented
....@@ -608,12 +640,12 @@
608640 *
609641 * You can avoid reading and understanding the next paragraph by
610642 * following this rule: don't put anything in an rcu_read_lock() RCU
611
- * read-side critical section that would block in a !PREEMPT kernel.
643
+ * read-side critical section that would block in a !PREEMPTION kernel.
612644 * But if you want the full story, read on!
613645 *
614
- * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
646
+ * In non-preemptible RCU implementations (pure TREE_RCU and TINY_RCU),
615647 * it is illegal to block while in an RCU read-side critical section.
616
- * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT
648
+ * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION
617649 * kernel builds, RCU read-side critical sections may be preempted,
618650 * but explicit blocking is illegal. Finally, in preemptible RCU
619651 * implementations in real-time (with -rt patchset) kernel builds, RCU
....@@ -684,14 +716,9 @@
684716 /**
685717 * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
686718 *
687
- * This is equivalent of rcu_read_lock(), but to be used when updates
688
- * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
689
- * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
690
- * softirq handler to be a quiescent state, a process in RCU read-side
691
- * critical section must be protected by disabling softirqs. Read-side
692
- * critical sections in interrupt context can use just rcu_read_lock(),
693
- * though this should at least be commented to avoid confusing people
694
- * reading the code.
719
+ * This is equivalent of rcu_read_lock(), but also disables softirqs.
720
+ * Note that anything else that disables softirqs can also serve as
721
+ * an RCU read-side critical section.
695722 *
696723 * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
697724 * must occur in the same context, for example, it is illegal to invoke
....@@ -707,8 +734,8 @@
707734 "rcu_read_lock_bh() used illegally while idle");
708735 }
709736
710
-/*
711
- * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
737
+/**
738
+ * rcu_read_unlock_bh() - marks the end of a softirq-only RCU critical section
712739 *
713740 * See rcu_read_lock_bh() for more information.
714741 */
....@@ -724,10 +751,9 @@
724751 /**
725752 * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
726753 *
727
- * This is equivalent of rcu_read_lock(), but to be used when updates
728
- * are being done using call_rcu_sched() or synchronize_rcu_sched().
729
- * Read-side critical sections can also be introduced by anything that
730
- * disables preemption, including local_irq_disable() and friends.
754
+ * This is equivalent of rcu_read_lock(), but disables preemption.
755
+ * Read-side critical sections can also be introduced by anything else
756
+ * that disables preemption, including local_irq_disable() and friends.
731757 *
732758 * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
733759 * must occur in the same context, for example, it is illegal to invoke
....@@ -750,10 +776,10 @@
750776 __acquire(RCU_SCHED);
751777 }
752778
753
-/*
754
- * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
779
+/**
780
+ * rcu_read_unlock_sched() - marks the end of a RCU-classic critical section
755781 *
756
- * See rcu_read_lock_sched for more information.
782
+ * See rcu_read_lock_sched() for more information.
757783 */
758784 static inline void rcu_read_unlock_sched(void)
759785 {
....@@ -811,7 +837,7 @@
811837 */
812838 #define RCU_INIT_POINTER(p, v) \
813839 do { \
814
- rcu_dereference_sparse(p, __rcu); \
840
+ rcu_check_sparse(p, __rcu); \
815841 WRITE_ONCE(p, RCU_INITIALIZER(v)); \
816842 } while (0)
817843
....@@ -827,23 +853,23 @@
827853
828854 /*
829855 * Does the specified offset indicate that the corresponding rcu_head
830
- * structure can be handled by kfree_rcu()?
856
+ * structure can be handled by kvfree_rcu()?
831857 */
832
-#define __is_kfree_rcu_offset(offset) ((offset) < 4096)
858
+#define __is_kvfree_rcu_offset(offset) ((offset) < 4096)
833859
834860 /*
835861 * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
836862 */
837
-#define __kfree_rcu(head, offset) \
863
+#define __kvfree_rcu(head, offset) \
838864 do { \
839
- BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
840
- kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
865
+ BUILD_BUG_ON(!__is_kvfree_rcu_offset(offset)); \
866
+ kvfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
841867 } while (0)
842868
843869 /**
844870 * kfree_rcu() - kfree an object after a grace period.
845871 * @ptr: pointer to kfree
846
- * @rcu_head: the name of the struct rcu_head within the type of @ptr.
872
+ * @rhf: the name of the struct rcu_head within the type of @ptr.
847873 *
848874 * Many rcu callbacks functions just call kfree() on the base structure.
849875 * These functions are trivial, but their size adds up, and furthermore
....@@ -856,7 +882,7 @@
856882 * Because the functions are not allowed in the low-order 4096 bytes of
857883 * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
858884 * If the offset is larger than 4095 bytes, a compile-time error will
859
- * be generated in __kfree_rcu(). If this error is triggered, you can
885
+ * be generated in __kvfree_rcu(). If this error is triggered, you can
860886 * either fall back to use of call_rcu() or rearrange the structure to
861887 * position the rcu_head structure into the first 4096 bytes.
862888 *
....@@ -866,9 +892,52 @@
866892 * The BUILD_BUG_ON check must not involve any function calls, hence the
867893 * checks are done in macros here.
868894 */
869
-#define kfree_rcu(ptr, rcu_head) \
870
- __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
895
+#define kfree_rcu(ptr, rhf) \
896
+do { \
897
+ typeof (ptr) ___p = (ptr); \
898
+ \
899
+ if (___p) \
900
+ __kvfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \
901
+} while (0)
871902
903
+/**
904
+ * kvfree_rcu() - kvfree an object after a grace period.
905
+ *
906
+ * This macro consists of one or two arguments and it is
907
+ * based on whether an object is head-less or not. If it
908
+ * has a head then a semantic stays the same as it used
909
+ * to be before:
910
+ *
911
+ * kvfree_rcu(ptr, rhf);
912
+ *
913
+ * where @ptr is a pointer to kvfree(), @rhf is the name
914
+ * of the rcu_head structure within the type of @ptr.
915
+ *
916
+ * When it comes to head-less variant, only one argument
917
+ * is passed and that is just a pointer which has to be
918
+ * freed after a grace period. Therefore the semantic is
919
+ *
920
+ * kvfree_rcu(ptr);
921
+ *
922
+ * where @ptr is a pointer to kvfree().
923
+ *
924
+ * Please note, head-less way of freeing is permitted to
925
+ * use from a context that has to follow might_sleep()
926
+ * annotation. Otherwise, please switch and embed the
927
+ * rcu_head structure within the type of @ptr.
928
+ */
929
+#define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__, \
930
+ kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__)
931
+
932
+#define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME
933
+#define kvfree_rcu_arg_2(ptr, rhf) kfree_rcu(ptr, rhf)
934
+#define kvfree_rcu_arg_1(ptr) \
935
+do { \
936
+ typeof(ptr) ___p = (ptr); \
937
+ \
938
+ if (___p) \
939
+ kvfree_call_rcu(NULL, (rcu_callback_t) (___p)); \
940
+} while (0)
872941
873942 /*
874943 * Place this after a lock-acquisition primitive to guarantee that
....@@ -883,4 +952,49 @@
883952 #endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
884953
885954
955
+/* Has the specified rcu_head structure been handed to call_rcu()? */
956
+
957
+/**
958
+ * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu()
959
+ * @rhp: The rcu_head structure to initialize.
960
+ *
961
+ * If you intend to invoke rcu_head_after_call_rcu() to test whether a
962
+ * given rcu_head structure has already been passed to call_rcu(), then
963
+ * you must also invoke this rcu_head_init() function on it just after
964
+ * allocating that structure. Calls to this function must not race with
965
+ * calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation.
966
+ */
967
+static inline void rcu_head_init(struct rcu_head *rhp)
968
+{
969
+ rhp->func = (rcu_callback_t)~0L;
970
+}
971
+
972
+/**
973
+ * rcu_head_after_call_rcu() - Has this rcu_head been passed to call_rcu()?
974
+ * @rhp: The rcu_head structure to test.
975
+ * @f: The function passed to call_rcu() along with @rhp.
976
+ *
977
+ * Returns @true if the @rhp has been passed to call_rcu() with @func,
978
+ * and @false otherwise. Emits a warning in any other case, including
979
+ * the case where @rhp has already been invoked after a grace period.
980
+ * Calls to this function must not race with callback invocation. One way
981
+ * to avoid such races is to enclose the call to rcu_head_after_call_rcu()
982
+ * in an RCU read-side critical section that includes a read-side fetch
983
+ * of the pointer to the structure containing @rhp.
984
+ */
985
+static inline bool
986
+rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
987
+{
988
+ rcu_callback_t func = READ_ONCE(rhp->func);
989
+
990
+ if (func == f)
991
+ return true;
992
+ WARN_ON_ONCE(func != (rcu_callback_t)~0L);
993
+ return false;
994
+}
995
+
996
+/* kernel/ksysfs.c definitions */
997
+extern int rcu_expedited;
998
+extern int rcu_normal;
999
+
8861000 #endif /* __LINUX_RCUPDATE_H */