hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/include/linux/rcupdate.h
....@@ -1,25 +1,12 @@
1
+/* SPDX-License-Identifier: GPL-2.0+ */
12 /*
23 * Read-Copy Update mechanism for mutual exclusion
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License as published by
6
- * the Free Software Foundation; either version 2 of the License, or
7
- * (at your option) any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
- * GNU General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program; if not, you can access it online at
16
- * http://www.gnu.org/licenses/gpl-2.0.html.
174 *
185 * Copyright IBM Corporation, 2001
196 *
207 * Author: Dipankar Sarma <dipankar@in.ibm.com>
218 *
22
- * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
9
+ * Based on the original work by Paul McKenney <paulmck@vnet.ibm.com>
2310 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
2411 * Papers:
2512 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
....@@ -42,34 +29,23 @@
4229 #include <linux/lockdep.h>
4330 #include <asm/processor.h>
4431 #include <linux/cpumask.h>
45
-#include <linux/rcu_assign_pointer.h>
4632
4733 #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
4834 #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
4935 #define ulong2long(a) (*(long *)(&(a)))
36
+#define USHORT_CMP_GE(a, b) (USHRT_MAX / 2 >= (unsigned short)((a) - (b)))
37
+#define USHORT_CMP_LT(a, b) (USHRT_MAX / 2 < (unsigned short)((a) - (b)))
5038
5139 /* Exported common interfaces */
52
-
53
-#ifdef CONFIG_PREEMPT_RCU
5440 void call_rcu(struct rcu_head *head, rcu_callback_t func);
55
-#else /* #ifdef CONFIG_PREEMPT_RCU */
56
-#define call_rcu call_rcu_sched
57
-#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
58
-
59
-#ifdef CONFIG_PREEMPT_RT_FULL
60
-#define call_rcu_bh call_rcu
61
-#else
62
-void call_rcu_bh(struct rcu_head *head, rcu_callback_t func);
63
-#endif
64
-void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
65
-void synchronize_sched(void);
6641 void rcu_barrier_tasks(void);
42
+void rcu_barrier_tasks_rude(void);
43
+void synchronize_rcu(void);
6744
6845 #ifdef CONFIG_PREEMPT_RCU
6946
7047 void __rcu_read_lock(void);
7148 void __rcu_read_unlock(void);
72
-void synchronize_rcu(void);
7349
7450 /*
7551 * Defined as a macro as it is a very low level header included from
....@@ -78,13 +54,14 @@
7854 * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
7955 */
8056 #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
81
-#ifndef CONFIG_PREEMPT_RT_FULL
82
-#define sched_rcu_preempt_depth() rcu_preempt_depth()
83
-#else
84
-static inline int sched_rcu_preempt_depth(void) { return 0; }
85
-#endif
8657
8758 #else /* #ifdef CONFIG_PREEMPT_RCU */
59
+
60
+#ifdef CONFIG_TINY_RCU
61
+#define rcu_read_unlock_strict() do { } while (0)
62
+#else
63
+void rcu_read_unlock_strict(void);
64
+#endif
8865
8966 static inline void __rcu_read_lock(void)
9067 {
....@@ -94,11 +71,7 @@
9471 static inline void __rcu_read_unlock(void)
9572 {
9673 preempt_enable();
97
-}
98
-
99
-static inline void synchronize_rcu(void)
100
-{
101
- synchronize_sched();
74
+ rcu_read_unlock_strict();
10275 }
10376
10477 static inline int rcu_preempt_depth(void)
....@@ -106,18 +79,20 @@
10679 return 0;
10780 }
10881
109
-#define sched_rcu_preempt_depth() rcu_preempt_depth()
110
-
11182 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
11283
11384 /* Internal to kernel */
11485 void rcu_init(void);
11586 extern int rcu_scheduler_active __read_mostly;
116
-void rcu_sched_qs(void);
117
-void rcu_bh_qs(void);
118
-void rcu_check_callbacks(int user);
87
+void rcu_sched_clock_irq(int user);
11988 void rcu_report_dead(unsigned int cpu);
12089 void rcutree_migrate_callbacks(int cpu);
90
+
91
+#ifdef CONFIG_TASKS_RCU_GENERIC
92
+void rcu_init_tasks_generic(void);
93
+#else
94
+static inline void rcu_init_tasks_generic(void) { }
95
+#endif
12196
12297 #ifdef CONFIG_RCU_STALL_COMMON
12398 void rcu_sysrq_start(void);
....@@ -137,19 +112,20 @@
137112
138113 #ifdef CONFIG_RCU_NOCB_CPU
139114 void rcu_init_nohz(void);
115
+void rcu_nocb_flush_deferred_wakeup(void);
140116 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
141117 static inline void rcu_init_nohz(void) { }
118
+static inline void rcu_nocb_flush_deferred_wakeup(void) { }
142119 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
143120
144121 /**
145122 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
146123 * @a: Code that RCU needs to pay attention to.
147124 *
148
- * RCU, RCU-bh, and RCU-sched read-side critical sections are forbidden
149
- * in the inner idle loop, that is, between the rcu_idle_enter() and
150
- * the rcu_idle_exit() -- RCU will happily ignore any such read-side
151
- * critical sections. However, things like powertop need tracepoints
152
- * in the inner idle loop.
125
+ * RCU read-side critical sections are forbidden in the inner idle loop,
126
+ * that is, between the rcu_idle_enter() and the rcu_idle_exit() -- RCU
127
+ * will happily ignore any such read-side critical sections. However,
128
+ * things like powertop need tracepoints in the inner idle loop.
153129 *
154130 * This macro provides the way out: RCU_NONIDLE(do_something_with_RCU())
155131 * will tell RCU that it needs to pay attention, invoke its argument
....@@ -171,40 +147,68 @@
171147 * Note a quasi-voluntary context switch for RCU-tasks's benefit.
172148 * This is a macro rather than an inline function to avoid #include hell.
173149 */
174
-#ifdef CONFIG_TASKS_RCU
175
-#define rcu_tasks_qs(t) \
176
- do { \
177
- if (READ_ONCE((t)->rcu_tasks_holdout)) \
178
- WRITE_ONCE((t)->rcu_tasks_holdout, false); \
179
- } while (0)
180
-#define rcu_note_voluntary_context_switch(t) \
181
- do { \
182
- rcu_all_qs(); \
183
- rcu_tasks_qs(t); \
150
+#ifdef CONFIG_TASKS_RCU_GENERIC
151
+
152
+# ifdef CONFIG_TASKS_RCU
153
+# define rcu_tasks_classic_qs(t, preempt) \
154
+ do { \
155
+ if (!(preempt) && READ_ONCE((t)->rcu_tasks_holdout)) \
156
+ WRITE_ONCE((t)->rcu_tasks_holdout, false); \
184157 } while (0)
185158 void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
186159 void synchronize_rcu_tasks(void);
160
+# else
161
+# define rcu_tasks_classic_qs(t, preempt) do { } while (0)
162
+# define call_rcu_tasks call_rcu
163
+# define synchronize_rcu_tasks synchronize_rcu
164
+# endif
165
+
166
+# ifdef CONFIG_TASKS_TRACE_RCU
167
+# define rcu_tasks_trace_qs(t) \
168
+ do { \
169
+ if (!likely(READ_ONCE((t)->trc_reader_checked)) && \
170
+ !unlikely(READ_ONCE((t)->trc_reader_nesting))) { \
171
+ smp_store_release(&(t)->trc_reader_checked, true); \
172
+ smp_mb(); /* Readers partitioned by store. */ \
173
+ } \
174
+ } while (0)
175
+# else
176
+# define rcu_tasks_trace_qs(t) do { } while (0)
177
+# endif
178
+
179
+#define rcu_tasks_qs(t, preempt) \
180
+do { \
181
+ rcu_tasks_classic_qs((t), (preempt)); \
182
+ rcu_tasks_trace_qs((t)); \
183
+} while (0)
184
+
185
+# ifdef CONFIG_TASKS_RUDE_RCU
186
+void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func);
187
+void synchronize_rcu_tasks_rude(void);
188
+# endif
189
+
190
+#define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false)
187191 void exit_tasks_rcu_start(void);
188192 void exit_tasks_rcu_finish(void);
189
-#else /* #ifdef CONFIG_TASKS_RCU */
190
-#define rcu_tasks_qs(t) do { } while (0)
191
-#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
192
-#define call_rcu_tasks call_rcu_sched
193
-#define synchronize_rcu_tasks synchronize_sched
193
+#else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
194
+#define rcu_tasks_qs(t, preempt) do { } while (0)
195
+#define rcu_note_voluntary_context_switch(t) do { } while (0)
196
+#define call_rcu_tasks call_rcu
197
+#define synchronize_rcu_tasks synchronize_rcu
194198 static inline void exit_tasks_rcu_start(void) { }
195199 static inline void exit_tasks_rcu_finish(void) { }
196
-#endif /* #else #ifdef CONFIG_TASKS_RCU */
200
+#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
197201
198202 /**
199203 * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU
200204 *
201205 * This macro resembles cond_resched(), except that it is defined to
202206 * report potential quiescent states to RCU-tasks even if the cond_resched()
203
- * machinery were to be shut off, as some advocate for PREEMPT kernels.
207
+ * machinery were to be shut off, as some advocate for PREEMPTION kernels.
204208 */
205209 #define cond_resched_tasks_rcu_qs() \
206210 do { \
207
- rcu_tasks_qs(current); \
211
+ rcu_tasks_qs(current, false); \
208212 cond_resched(); \
209213 } while (0)
210214
....@@ -213,7 +217,7 @@
213217 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
214218 */
215219
216
-#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
220
+#if defined(CONFIG_TREE_RCU)
217221 #include <linux/rcutree.h>
218222 #elif defined(CONFIG_TINY_RCU)
219223 #include <linux/rcutiny.h>
....@@ -256,7 +260,7 @@
256260
257261 static inline void rcu_lock_release(struct lockdep_map *map)
258262 {
259
- lock_release(map, 1, _THIS_IP_);
263
+ lock_release(map, _THIS_IP_);
260264 }
261265
262266 extern struct lockdep_map rcu_lock_map;
....@@ -265,15 +269,9 @@
265269 extern struct lockdep_map rcu_callback_map;
266270 int debug_lockdep_rcu_enabled(void);
267271 int rcu_read_lock_held(void);
268
-#ifdef CONFIG_PREEMPT_RT_FULL
269
-static inline int rcu_read_lock_bh_held(void)
270
-{
271
- return rcu_read_lock_held();
272
-}
273
-#else
274272 int rcu_read_lock_bh_held(void);
275
-#endif
276273 int rcu_read_lock_sched_held(void);
274
+int rcu_read_lock_any_held(void);
277275
278276 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
279277
....@@ -294,6 +292,12 @@
294292 {
295293 return !preemptible();
296294 }
295
+
296
+static inline int rcu_read_lock_any_held(void)
297
+{
298
+ return !preemptible();
299
+}
300
+
297301 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
298302
299303 #ifdef CONFIG_PROVE_RCU
....@@ -305,8 +309,8 @@
305309 */
306310 #define RCU_LOCKDEP_WARN(c, s) \
307311 do { \
308
- static bool __section(.data.unlikely) __warned; \
309
- if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \
312
+ static bool __section(".data.unlikely") __warned; \
313
+ if ((c) && debug_lockdep_rcu_enabled() && !__warned) { \
310314 __warned = true; \
311315 lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
312316 } \
....@@ -342,22 +346,21 @@
342346 * Helper functions for rcu_dereference_check(), rcu_dereference_protected()
343347 * and rcu_assign_pointer(). Some of these could be folded into their
344348 * callers, but they are left separate in order to ease introduction of
345
- * multiple flavors of pointers to match the multiple flavors of RCU
346
- * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in
347
- * the future.
349
+ * multiple pointers markings to match different RCU implementations
350
+ * (e.g., __srcu), should this make sense in the future.
348351 */
349352
350353 #ifdef __CHECKER__
351
-#define rcu_dereference_sparse(p, space) \
354
+#define rcu_check_sparse(p, space) \
352355 ((void)(((typeof(*p) space *)p) == p))
353356 #else /* #ifdef __CHECKER__ */
354
-#define rcu_dereference_sparse(p, space)
357
+#define rcu_check_sparse(p, space)
355358 #endif /* #else #ifdef __CHECKER__ */
356359
357360 #define __rcu_access_pointer(p, space) \
358361 ({ \
359362 typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
360
- rcu_dereference_sparse(p, space); \
363
+ rcu_check_sparse(p, space); \
361364 ((typeof(*p) __force __kernel *)(_________p1)); \
362365 })
363366 #define __rcu_dereference_check(p, c, space) \
....@@ -365,13 +368,13 @@
365368 /* Dependency order vs. p above. */ \
366369 typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \
367370 RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \
368
- rcu_dereference_sparse(p, space); \
371
+ rcu_check_sparse(p, space); \
369372 ((typeof(*p) __force __kernel *)(________p1)); \
370373 })
371374 #define __rcu_dereference_protected(p, c, space) \
372375 ({ \
373376 RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \
374
- rcu_dereference_sparse(p, space); \
377
+ rcu_check_sparse(p, space); \
375378 ((typeof(*p) __force __kernel *)(p)); \
376379 })
377380 #define rcu_dereference_raw(p) \
....@@ -382,20 +385,70 @@
382385 })
383386
384387 /**
385
- * rcu_swap_protected() - swap an RCU and a regular pointer
386
- * @rcu_ptr: RCU pointer
387
- * @ptr: regular pointer
388
- * @c: the conditions under which the dereference will take place
389
- *
390
- * Perform swap(@rcu_ptr, @ptr) where @rcu_ptr is an RCU-annotated pointer and
391
- * @c is the argument that is passed to the rcu_dereference_protected() call
392
- * used to read that pointer.
388
+ * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
389
+ * @v: The value to statically initialize with.
393390 */
394
-#define rcu_swap_protected(rcu_ptr, ptr, c) do { \
391
+#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
392
+
393
+/**
394
+ * rcu_assign_pointer() - assign to RCU-protected pointer
395
+ * @p: pointer to assign to
396
+ * @v: value to assign (publish)
397
+ *
398
+ * Assigns the specified value to the specified RCU-protected
399
+ * pointer, ensuring that any concurrent RCU readers will see
400
+ * any prior initialization.
401
+ *
402
+ * Inserts memory barriers on architectures that require them
403
+ * (which is most of them), and also prevents the compiler from
404
+ * reordering the code that initializes the structure after the pointer
405
+ * assignment. More importantly, this call documents which pointers
406
+ * will be dereferenced by RCU read-side code.
407
+ *
408
+ * In some special cases, you may use RCU_INIT_POINTER() instead
409
+ * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due
410
+ * to the fact that it does not constrain either the CPU or the compiler.
411
+ * That said, using RCU_INIT_POINTER() when you should have used
412
+ * rcu_assign_pointer() is a very bad thing that results in
413
+ * impossible-to-diagnose memory corruption. So please be careful.
414
+ * See the RCU_INIT_POINTER() comment header for details.
415
+ *
416
+ * Note that rcu_assign_pointer() evaluates each of its arguments only
417
+ * once, appearances notwithstanding. One of the "extra" evaluations
418
+ * is in typeof() and the other visible only to sparse (__CHECKER__),
419
+ * neither of which actually execute the argument. As with most cpp
420
+ * macros, this execute-arguments-only-once property is important, so
421
+ * please be careful when making changes to rcu_assign_pointer() and the
422
+ * other macros that it invokes.
423
+ */
424
+#define rcu_assign_pointer(p, v) \
425
+do { \
426
+ uintptr_t _r_a_p__v = (uintptr_t)(v); \
427
+ rcu_check_sparse(p, __rcu); \
428
+ \
429
+ if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \
430
+ WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \
431
+ else \
432
+ smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
433
+} while (0)
434
+
435
+/**
436
+ * rcu_replace_pointer() - replace an RCU pointer, returning its old value
437
+ * @rcu_ptr: RCU pointer, whose old value is returned
438
+ * @ptr: regular pointer
439
+ * @c: the lockdep conditions under which the dereference will take place
440
+ *
441
+ * Perform a replacement, where @rcu_ptr is an RCU-annotated
442
+ * pointer and @c is the lockdep argument that is passed to the
443
+ * rcu_dereference_protected() call used to read that pointer. The old
444
+ * value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr.
445
+ */
446
+#define rcu_replace_pointer(rcu_ptr, ptr, c) \
447
+({ \
395448 typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \
396449 rcu_assign_pointer((rcu_ptr), (ptr)); \
397
- (ptr) = __tmp; \
398
-} while (0)
450
+ __tmp; \
451
+})
399452
400453 /**
401454 * rcu_access_pointer() - fetch RCU pointer with no dereferencing
....@@ -482,7 +535,7 @@
482535 * The no-tracing version of rcu_dereference_raw() must not call
483536 * rcu_read_lock_held().
484537 */
485
-#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
538
+#define rcu_dereference_raw_check(p) __rcu_dereference_check((p), 1, __rcu)
486539
487540 /**
488541 * rcu_dereference_protected() - fetch RCU pointer when updates prevented
....@@ -579,12 +632,12 @@
579632 *
580633 * You can avoid reading and understanding the next paragraph by
581634 * following this rule: don't put anything in an rcu_read_lock() RCU
582
- * read-side critical section that would block in a !PREEMPT kernel.
635
+ * read-side critical section that would block in a !PREEMPTION kernel.
583636 * But if you want the full story, read on!
584637 *
585
- * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
638
+ * In non-preemptible RCU implementations (pure TREE_RCU and TINY_RCU),
586639 * it is illegal to block while in an RCU read-side critical section.
587
- * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT
640
+ * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION
588641 * kernel builds, RCU read-side critical sections may be preempted,
589642 * but explicit blocking is illegal. Finally, in preemptible RCU
590643 * implementations in real-time (with -rt patchset) kernel builds, RCU
....@@ -655,14 +708,9 @@
655708 /**
656709 * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
657710 *
658
- * This is equivalent of rcu_read_lock(), but to be used when updates
659
- * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since
660
- * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a
661
- * softirq handler to be a quiescent state, a process in RCU read-side
662
- * critical section must be protected by disabling softirqs. Read-side
663
- * critical sections in interrupt context can use just rcu_read_lock(),
664
- * though this should at least be commented to avoid confusing people
665
- * reading the code.
711
+ * This is equivalent of rcu_read_lock(), but also disables softirqs.
712
+ * Note that anything else that disables softirqs can also serve as
713
+ * an RCU read-side critical section.
666714 *
667715 * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
668716 * must occur in the same context, for example, it is illegal to invoke
....@@ -672,41 +720,32 @@
672720 static inline void rcu_read_lock_bh(void)
673721 {
674722 local_bh_disable();
675
-#ifdef CONFIG_PREEMPT_RT_FULL
676
- rcu_read_lock();
677
-#else
678723 __acquire(RCU_BH);
679724 rcu_lock_acquire(&rcu_bh_lock_map);
680725 RCU_LOCKDEP_WARN(!rcu_is_watching(),
681726 "rcu_read_lock_bh() used illegally while idle");
682
-#endif
683727 }
684728
685
-/*
686
- * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section
729
+/**
730
+ * rcu_read_unlock_bh() - marks the end of a softirq-only RCU critical section
687731 *
688732 * See rcu_read_lock_bh() for more information.
689733 */
690734 static inline void rcu_read_unlock_bh(void)
691735 {
692
-#ifdef CONFIG_PREEMPT_RT_FULL
693
- rcu_read_unlock();
694
-#else
695736 RCU_LOCKDEP_WARN(!rcu_is_watching(),
696737 "rcu_read_unlock_bh() used illegally while idle");
697738 rcu_lock_release(&rcu_bh_lock_map);
698739 __release(RCU_BH);
699
-#endif
700740 local_bh_enable();
701741 }
702742
703743 /**
704744 * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section
705745 *
706
- * This is equivalent of rcu_read_lock(), but to be used when updates
707
- * are being done using call_rcu_sched() or synchronize_rcu_sched().
708
- * Read-side critical sections can also be introduced by anything that
709
- * disables preemption, including local_irq_disable() and friends.
746
+ * This is equivalent of rcu_read_lock(), but disables preemption.
747
+ * Read-side critical sections can also be introduced by anything else
748
+ * that disables preemption, including local_irq_disable() and friends.
710749 *
711750 * Note that rcu_read_lock_sched() and the matching rcu_read_unlock_sched()
712751 * must occur in the same context, for example, it is illegal to invoke
....@@ -729,10 +768,10 @@
729768 __acquire(RCU_SCHED);
730769 }
731770
732
-/*
733
- * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
771
+/**
772
+ * rcu_read_unlock_sched() - marks the end of a RCU-classic critical section
734773 *
735
- * See rcu_read_lock_sched for more information.
774
+ * See rcu_read_lock_sched() for more information.
736775 */
737776 static inline void rcu_read_unlock_sched(void)
738777 {
....@@ -790,7 +829,7 @@
790829 */
791830 #define RCU_INIT_POINTER(p, v) \
792831 do { \
793
- rcu_dereference_sparse(p, __rcu); \
832
+ rcu_check_sparse(p, __rcu); \
794833 WRITE_ONCE(p, RCU_INITIALIZER(v)); \
795834 } while (0)
796835
....@@ -806,23 +845,23 @@
806845
807846 /*
808847 * Does the specified offset indicate that the corresponding rcu_head
809
- * structure can be handled by kfree_rcu()?
848
+ * structure can be handled by kvfree_rcu()?
810849 */
811
-#define __is_kfree_rcu_offset(offset) ((offset) < 4096)
850
+#define __is_kvfree_rcu_offset(offset) ((offset) < 4096)
812851
813852 /*
814853 * Helper macro for kfree_rcu() to prevent argument-expansion eyestrain.
815854 */
816
-#define __kfree_rcu(head, offset) \
855
+#define __kvfree_rcu(head, offset) \
817856 do { \
818
- BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
819
- kfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
857
+ BUILD_BUG_ON(!__is_kvfree_rcu_offset(offset)); \
858
+ kvfree_call_rcu(head, (rcu_callback_t)(unsigned long)(offset)); \
820859 } while (0)
821860
822861 /**
823862 * kfree_rcu() - kfree an object after a grace period.
824863 * @ptr: pointer to kfree
825
- * @rcu_head: the name of the struct rcu_head within the type of @ptr.
864
+ * @rhf: the name of the struct rcu_head within the type of @ptr.
826865 *
827866 * Many rcu callbacks functions just call kfree() on the base structure.
828867 * These functions are trivial, but their size adds up, and furthermore
....@@ -835,7 +874,7 @@
835874 * Because the functions are not allowed in the low-order 4096 bytes of
836875 * kernel virtual memory, offsets up to 4095 bytes can be accommodated.
837876 * If the offset is larger than 4095 bytes, a compile-time error will
838
- * be generated in __kfree_rcu(). If this error is triggered, you can
877
+ * be generated in __kvfree_rcu(). If this error is triggered, you can
839878 * either fall back to use of call_rcu() or rearrange the structure to
840879 * position the rcu_head structure into the first 4096 bytes.
841880 *
....@@ -845,9 +884,52 @@
845884 * The BUILD_BUG_ON check must not involve any function calls, hence the
846885 * checks are done in macros here.
847886 */
848
-#define kfree_rcu(ptr, rcu_head) \
849
- __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
887
+#define kfree_rcu(ptr, rhf) \
888
+do { \
889
+ typeof (ptr) ___p = (ptr); \
890
+ \
891
+ if (___p) \
892
+ __kvfree_rcu(&((___p)->rhf), offsetof(typeof(*(ptr)), rhf)); \
893
+} while (0)
850894
895
+/**
896
+ * kvfree_rcu() - kvfree an object after a grace period.
897
+ *
898
+ * This macro consists of one or two arguments and it is
899
+ * based on whether an object is head-less or not. If it
900
+ * has a head then a semantic stays the same as it used
901
+ * to be before:
902
+ *
903
+ * kvfree_rcu(ptr, rhf);
904
+ *
905
+ * where @ptr is a pointer to kvfree(), @rhf is the name
906
+ * of the rcu_head structure within the type of @ptr.
907
+ *
908
+ * When it comes to head-less variant, only one argument
909
+ * is passed and that is just a pointer which has to be
910
+ * freed after a grace period. Therefore the semantic is
911
+ *
912
+ * kvfree_rcu(ptr);
913
+ *
914
+ * where @ptr is a pointer to kvfree().
915
+ *
916
+ * Please note, head-less way of freeing is permitted to
917
+ * use from a context that has to follow might_sleep()
918
+ * annotation. Otherwise, please switch and embed the
919
+ * rcu_head structure within the type of @ptr.
920
+ */
921
+#define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__, \
922
+ kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__)
923
+
924
+#define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME
925
+#define kvfree_rcu_arg_2(ptr, rhf) kfree_rcu(ptr, rhf)
926
+#define kvfree_rcu_arg_1(ptr) \
927
+do { \
928
+ typeof(ptr) ___p = (ptr); \
929
+ \
930
+ if (___p) \
931
+ kvfree_call_rcu(NULL, (rcu_callback_t) (___p)); \
932
+} while (0)
851933
852934 /*
853935 * Place this after a lock-acquisition primitive to guarantee that
....@@ -862,4 +944,49 @@
862944 #endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
863945
864946
947
+/* Has the specified rcu_head structure been handed to call_rcu()? */
948
+
949
+/**
950
+ * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu()
951
+ * @rhp: The rcu_head structure to initialize.
952
+ *
953
+ * If you intend to invoke rcu_head_after_call_rcu() to test whether a
954
+ * given rcu_head structure has already been passed to call_rcu(), then
955
+ * you must also invoke this rcu_head_init() function on it just after
956
+ * allocating that structure. Calls to this function must not race with
957
+ * calls to call_rcu(), rcu_head_after_call_rcu(), or callback invocation.
958
+ */
959
+static inline void rcu_head_init(struct rcu_head *rhp)
960
+{
961
+ rhp->func = (rcu_callback_t)~0L;
962
+}
963
+
964
+/**
965
+ * rcu_head_after_call_rcu() - Has this rcu_head been passed to call_rcu()?
966
+ * @rhp: The rcu_head structure to test.
967
+ * @f: The function passed to call_rcu() along with @rhp.
968
+ *
969
+ * Returns @true if the @rhp has been passed to call_rcu() with @func,
970
+ * and @false otherwise. Emits a warning in any other case, including
971
+ * the case where @rhp has already been invoked after a grace period.
972
+ * Calls to this function must not race with callback invocation. One way
973
+ * to avoid such races is to enclose the call to rcu_head_after_call_rcu()
974
+ * in an RCU read-side critical section that includes a read-side fetch
975
+ * of the pointer to the structure containing @rhp.
976
+ */
977
+static inline bool
978
+rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
979
+{
980
+ rcu_callback_t func = READ_ONCE(rhp->func);
981
+
982
+ if (func == f)
983
+ return true;
984
+ WARN_ON_ONCE(func != (rcu_callback_t)~0L);
985
+ return false;
986
+}
987
+
988
+/* kernel/ksysfs.c definitions */
989
+extern int rcu_expedited;
990
+extern int rcu_normal;
991
+
865992 #endif /* __LINUX_RCUPDATE_H */