hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/kernel/rcu/rcu.h
....@@ -1,36 +1,18 @@
1
+/* SPDX-License-Identifier: GPL-2.0+ */
12 /*
23 * Read-Copy Update definitions shared among RCU implementations.
34 *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License as published by
6
- * the Free Software Foundation; either version 2 of the License, or
7
- * (at your option) any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
- * GNU General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program; if not, you can access it online at
16
- * http://www.gnu.org/licenses/gpl-2.0.html.
17
- *
185 * Copyright IBM Corporation, 2011
196 *
20
- * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
7
+ * Author: Paul E. McKenney <paulmck@linux.ibm.com>
218 */
229
2310 #ifndef __LINUX_RCU_H
2411 #define __LINUX_RCU_H
2512
2613 #include <trace/events/rcu.h>
27
-#ifdef CONFIG_RCU_TRACE
28
-#define RCU_TRACE(stmt) stmt
29
-#else /* #ifdef CONFIG_RCU_TRACE */
30
-#define RCU_TRACE(stmt)
31
-#endif /* #else #ifdef CONFIG_RCU_TRACE */
3214
33
-/* Offset to allow for unmatched rcu_irq_{enter,exit}(). */
15
+/* Offset to allow distinguishing irq vs. task-based idle entry/exit. */
3416 #define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
3517
3618
....@@ -176,15 +158,16 @@
176158
177159 /*
178160 * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
179
- * by call_rcu() and rcu callback execution, and are therefore not part of the
180
- * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors.
161
+ * by call_rcu() and rcu callback execution, and are therefore not part
162
+ * of the RCU API. These are in rcupdate.h because they are used by all
163
+ * RCU implementations.
181164 */
182165
183166 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
184167 # define STATE_RCU_HEAD_READY 0
185168 # define STATE_RCU_HEAD_QUEUED 1
186169
187
-extern struct debug_obj_descr rcuhead_debug_descr;
170
+extern const struct debug_obj_descr rcuhead_debug_descr;
188171
189172 static inline int debug_rcu_head_queue(struct rcu_head *head)
190173 {
....@@ -215,34 +198,24 @@
215198 }
216199 #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
217200
218
-void kfree(const void *);
201
+extern int rcu_cpu_stall_suppress_at_boot;
219202
220
-/*
221
- * Reclaim the specified callback, either by invoking it (non-lazy case)
222
- * or freeing it directly (lazy case). Return true if lazy, false otherwise.
223
- */
224
-static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
203
+static inline bool rcu_stall_is_suppressed_at_boot(void)
225204 {
226
- unsigned long offset = (unsigned long)head->func;
227
-
228
- rcu_lock_acquire(&rcu_callback_map);
229
- if (__is_kfree_rcu_offset(offset)) {
230
- RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset);)
231
- kfree((void *)head - offset);
232
- rcu_lock_release(&rcu_callback_map);
233
- return true;
234
- } else {
235
- RCU_TRACE(trace_rcu_invoke_callback(rn, head);)
236
- head->func(head);
237
- rcu_lock_release(&rcu_callback_map);
238
- return false;
239
- }
205
+ return rcu_cpu_stall_suppress_at_boot && !rcu_inkernel_boot_has_ended();
240206 }
241207
242208 #ifdef CONFIG_RCU_STALL_COMMON
243209
210
+extern int rcu_cpu_stall_ftrace_dump;
244211 extern int rcu_cpu_stall_suppress;
212
+extern int rcu_cpu_stall_timeout;
245213 int rcu_jiffies_till_stall_check(void);
214
+
215
+static inline bool rcu_stall_is_suppressed(void)
216
+{
217
+ return rcu_stall_is_suppressed_at_boot() || rcu_cpu_stall_suppress;
218
+}
246219
247220 #define rcu_ftrace_dump_stall_suppress() \
248221 do { \
....@@ -257,6 +230,11 @@
257230 } while (0)
258231
259232 #else /* #endif #ifdef CONFIG_RCU_STALL_COMMON */
233
+
234
+static inline bool rcu_stall_is_suppressed(void)
235
+{
236
+ return rcu_stall_is_suppressed_at_boot();
237
+}
260238 #define rcu_ftrace_dump_stall_suppress()
261239 #define rcu_ftrace_dump_stall_unsuppress()
262240 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
....@@ -293,7 +271,7 @@
293271 */
294272 extern void resched_cpu(int cpu);
295273
296
-#if defined(SRCU) || !defined(TINY_RCU)
274
+#if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU)
297275
298276 #include <linux/rcu_node_tree.h>
299277
....@@ -311,6 +289,8 @@
311289 {
312290 int i;
313291
292
+ for (i = 0; i < RCU_NUM_LVLS; i++)
293
+ levelspread[i] = INT_MIN;
314294 if (rcu_fanout_exact) {
315295 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
316296 for (i = rcu_num_lvls - 2; i >= 0; i--)
....@@ -328,46 +308,44 @@
328308 }
329309 }
330310
331
-/* Returns first leaf rcu_node of the specified RCU flavor. */
332
-#define rcu_first_leaf_node(rsp) ((rsp)->level[rcu_num_lvls - 1])
311
+extern void rcu_init_geometry(void);
312
+
313
+/* Returns a pointer to the first leaf rcu_node structure. */
314
+#define rcu_first_leaf_node() (rcu_state.level[rcu_num_lvls - 1])
333315
334316 /* Is this rcu_node a leaf? */
335317 #define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
336318
337319 /* Is this rcu_node the last leaf? */
338
-#define rcu_is_last_leaf_node(rsp, rnp) ((rnp) == &(rsp)->node[rcu_num_nodes - 1])
320
+#define rcu_is_last_leaf_node(rnp) ((rnp) == &rcu_state.node[rcu_num_nodes - 1])
339321
340322 /*
341
- * Do a full breadth-first scan of the rcu_node structures for the
342
- * specified rcu_state structure.
323
+ * Do a full breadth-first scan of the {s,}rcu_node structures for the
324
+ * specified state structure (for SRCU) or the only rcu_state structure
325
+ * (for RCU).
343326 */
344
-#define rcu_for_each_node_breadth_first(rsp, rnp) \
345
- for ((rnp) = &(rsp)->node[0]; \
346
- (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
327
+#define srcu_for_each_node_breadth_first(sp, rnp) \
328
+ for ((rnp) = &(sp)->node[0]; \
329
+ (rnp) < &(sp)->node[rcu_num_nodes]; (rnp)++)
330
+#define rcu_for_each_node_breadth_first(rnp) \
331
+ srcu_for_each_node_breadth_first(&rcu_state, rnp)
347332
348333 /*
349
- * Do a breadth-first scan of the non-leaf rcu_node structures for the
350
- * specified rcu_state structure. Note that if there is a singleton
351
- * rcu_node tree with but one rcu_node structure, this loop is a no-op.
334
+ * Scan the leaves of the rcu_node hierarchy for the rcu_state structure.
335
+ * Note that if there is a singleton rcu_node tree with but one rcu_node
336
+ * structure, this loop -will- visit the rcu_node structure. It is still
337
+ * a leaf node, even if it is also the root node.
352338 */
353
-#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
354
- for ((rnp) = &(rsp)->node[0]; !rcu_is_leaf_node(rsp, rnp); (rnp)++)
355
-
356
-/*
357
- * Scan the leaves of the rcu_node hierarchy for the specified rcu_state
358
- * structure. Note that if there is a singleton rcu_node tree with but
359
- * one rcu_node structure, this loop -will- visit the rcu_node structure.
360
- * It is still a leaf node, even if it is also the root node.
361
- */
362
-#define rcu_for_each_leaf_node(rsp, rnp) \
363
- for ((rnp) = rcu_first_leaf_node(rsp); \
364
- (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
339
+#define rcu_for_each_leaf_node(rnp) \
340
+ for ((rnp) = rcu_first_leaf_node(); \
341
+ (rnp) < &rcu_state.node[rcu_num_nodes]; (rnp)++)
365342
366343 /*
367344 * Iterate over all possible CPUs in a leaf RCU node.
368345 */
369346 #define for_each_leaf_node_possible_cpu(rnp, cpu) \
370
- for ((cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
347
+ for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
348
+ (cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
371349 (cpu) <= rnp->grphi; \
372350 (cpu) = cpumask_next((cpu), cpu_possible_mask))
373351
....@@ -377,7 +355,8 @@
377355 #define rcu_find_next_bit(rnp, cpu, mask) \
378356 ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
379357 #define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
380
- for ((cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
358
+ for (WARN_ON_ONCE(!rcu_is_leaf_node(rnp)), \
359
+ (cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
381360 (cpu) <= rnp->grphi; \
382361 (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
383362
....@@ -433,7 +412,13 @@
433412 #define raw_lockdep_assert_held_rcu_node(p) \
434413 lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
435414
436
-#endif /* #if defined(SRCU) || !defined(TINY_RCU) */
415
+#endif /* #if defined(CONFIG_SRCU) || !defined(CONFIG_TINY_RCU) */
416
+
417
+#ifdef CONFIG_SRCU
418
+void srcu_init(void);
419
+#else /* #ifdef CONFIG_SRCU */
420
+static inline void srcu_init(void) { }
421
+#endif /* #else #ifdef CONFIG_SRCU */
437422
438423 #ifdef CONFIG_TINY_RCU
439424 /* Tiny RCU doesn't expedite, as its purpose in life is instead to be tiny. */
....@@ -448,6 +433,7 @@
448433 void rcu_expedite_gp(void);
449434 void rcu_unexpedite_gp(void);
450435 void rcupdate_announce_bootup_oddness(void);
436
+void show_rcu_tasks_gp_kthreads(void);
451437 void rcu_request_urgent_qs_task(struct task_struct *t);
452438 #endif /* #else #ifdef CONFIG_TINY_RCU */
453439
....@@ -457,22 +443,23 @@
457443
458444 enum rcutorture_type {
459445 RCU_FLAVOR,
460
- RCU_BH_FLAVOR,
461
- RCU_SCHED_FLAVOR,
462446 RCU_TASKS_FLAVOR,
447
+ RCU_TASKS_RUDE_FLAVOR,
448
+ RCU_TASKS_TRACING_FLAVOR,
449
+ RCU_TRIVIAL_FLAVOR,
463450 SRCU_FLAVOR,
464451 INVALID_RCU_FLAVOR
465452 };
466453
467
-#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
454
+#if defined(CONFIG_TREE_RCU)
468455 void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
469456 unsigned long *gp_seq);
470
-void rcutorture_record_progress(unsigned long vernum);
471457 void do_trace_rcu_torture_read(const char *rcutorturename,
472458 struct rcu_head *rhp,
473459 unsigned long secs,
474460 unsigned long c_old,
475461 unsigned long c);
462
+void rcu_gp_set_torture_wait(int duration);
476463 #else
477464 static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
478465 int *flags, unsigned long *gp_seq)
....@@ -480,7 +467,6 @@
480467 *flags = 0;
481468 *gp_seq = 0;
482469 }
483
-static inline void rcutorture_record_progress(unsigned long vernum) { }
484470 #ifdef CONFIG_RCU_TRACE
485471 void do_trace_rcu_torture_read(const char *rcutorturename,
486472 struct rcu_head *rhp,
....@@ -491,6 +477,11 @@
491477 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
492478 do { } while (0)
493479 #endif
480
+static inline void rcu_gp_set_torture_wait(int duration) { }
481
+#endif
482
+
483
+#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
484
+long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
494485 #endif
495486
496487 #ifdef CONFIG_TINY_SRCU
....@@ -514,45 +505,34 @@
514505 #endif
515506
516507 #ifdef CONFIG_TINY_RCU
508
+static inline bool rcu_dynticks_zero_in_eqs(int cpu, int *vp) { return false; }
517509 static inline unsigned long rcu_get_gp_seq(void) { return 0; }
518
-static inline unsigned long rcu_bh_get_gp_seq(void) { return 0; }
519
-static inline unsigned long rcu_sched_get_gp_seq(void) { return 0; }
520510 static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
521
-static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; }
522511 static inline unsigned long
523512 srcu_batches_completed(struct srcu_struct *sp) { return 0; }
524513 static inline void rcu_force_quiescent_state(void) { }
525
-static inline void rcu_bh_force_quiescent_state(void) { }
526
-static inline void rcu_sched_force_quiescent_state(void) { }
527514 static inline void show_rcu_gp_kthreads(void) { }
528515 static inline int rcu_get_gp_kthreads_prio(void) { return 0; }
516
+static inline void rcu_fwd_progress_check(unsigned long j) { }
529517 #else /* #ifdef CONFIG_TINY_RCU */
518
+bool rcu_dynticks_zero_in_eqs(int cpu, int *vp);
530519 unsigned long rcu_get_gp_seq(void);
531
-unsigned long rcu_sched_get_gp_seq(void);
532520 unsigned long rcu_exp_batches_completed(void);
533
-unsigned long rcu_exp_batches_completed_sched(void);
534521 unsigned long srcu_batches_completed(struct srcu_struct *sp);
535522 void show_rcu_gp_kthreads(void);
536523 int rcu_get_gp_kthreads_prio(void);
524
+void rcu_fwd_progress_check(unsigned long j);
537525 void rcu_force_quiescent_state(void);
538
-void rcu_sched_force_quiescent_state(void);
539526 extern struct workqueue_struct *rcu_gp_wq;
540527 extern struct workqueue_struct *rcu_par_gp_wq;
541
-
542
-#ifdef CONFIG_PREEMPT_RT_FULL
543
-#define rcu_bh_get_gp_seq rcu_get_gp_seq
544
-#define rcu_bh_force_quiescent_state rcu_force_quiescent_state
545
-#else
546
-unsigned long rcu_bh_get_gp_seq(void);
547
-void rcu_bh_force_quiescent_state(void);
548
-#endif
549
-
550528 #endif /* #else #ifdef CONFIG_TINY_RCU */
551529
552530 #ifdef CONFIG_RCU_NOCB_CPU
553531 bool rcu_is_nocb_cpu(int cpu);
532
+void rcu_bind_current_to_nocb(void);
554533 #else
555534 static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
535
+static inline void rcu_bind_current_to_nocb(void) { }
556536 #endif
557537
558538 #endif /* __LINUX_RCU_H */