.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0+ |
---|
1 | 2 | /* |
---|
2 | 3 | * Read-Copy Update mechanism for mutual exclusion |
---|
3 | | - * |
---|
4 | | - * This program is free software; you can redistribute it and/or modify |
---|
5 | | - * it under the terms of the GNU General Public License as published by |
---|
6 | | - * the Free Software Foundation; either version 2 of the License, or |
---|
7 | | - * (at your option) any later version. |
---|
8 | | - * |
---|
9 | | - * This program is distributed in the hope that it will be useful, |
---|
10 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
11 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
12 | | - * GNU General Public License for more details. |
---|
13 | | - * |
---|
14 | | - * You should have received a copy of the GNU General Public License |
---|
15 | | - * along with this program; if not, you can access it online at |
---|
16 | | - * http://www.gnu.org/licenses/gpl-2.0.html. |
---|
17 | 4 | * |
---|
18 | 5 | * Copyright IBM Corporation, 2001 |
---|
19 | 6 | * |
---|
20 | 7 | * Authors: Dipankar Sarma <dipankar@in.ibm.com> |
---|
21 | 8 | * Manfred Spraul <manfred@colorfullife.com> |
---|
22 | 9 | * |
---|
23 | | - * Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
---|
| 10 | + * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> |
---|
24 | 11 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
---|
25 | 12 | * Papers: |
---|
26 | 13 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf |
---|
.. | .. |
---|
53 | 40 | #include <linux/rcupdate_wait.h> |
---|
54 | 41 | #include <linux/sched/isolation.h> |
---|
55 | 42 | #include <linux/kprobes.h> |
---|
| 43 | +#include <linux/slab.h> |
---|
| 44 | +#include <linux/irq_work.h> |
---|
| 45 | +#include <linux/rcupdate_trace.h> |
---|
56 | 46 | |
---|
57 | 47 | #define CREATE_TRACE_POINTS |
---|
58 | 48 | |
---|
.. | .. |
---|
64 | 54 | #define MODULE_PARAM_PREFIX "rcupdate." |
---|
65 | 55 | |
---|
66 | 56 | #ifndef CONFIG_TINY_RCU |
---|
67 | | -extern int rcu_expedited; /* from sysctl */ |
---|
68 | 57 | module_param(rcu_expedited, int, 0); |
---|
69 | | -extern int rcu_normal; /* from sysctl */ |
---|
70 | 58 | module_param(rcu_normal, int, 0); |
---|
71 | | -static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); |
---|
72 | | -#ifndef CONFIG_PREEMPT_RT_FULL |
---|
| 59 | +static int rcu_normal_after_boot; |
---|
73 | 60 | module_param(rcu_normal_after_boot, int, 0); |
---|
74 | | -#endif |
---|
75 | 61 | #endif /* #ifndef CONFIG_TINY_RCU */ |
---|
76 | 62 | |
---|
77 | 63 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
---|
78 | 64 | /** |
---|
79 | | - * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? |
---|
| 65 | + * rcu_read_lock_held_common() - might we be in RCU-sched read-side critical section? |
---|
| 66 | + * @ret: Best guess answer if lockdep cannot be relied on |
---|
80 | 67 | * |
---|
81 | | - * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an |
---|
| 68 | + * Returns true if lockdep must be ignored, in which case ``*ret`` contains |
---|
| 69 | + * the best guess described below. Otherwise returns false, in which |
---|
| 70 | + * case ``*ret`` tells the caller nothing and the caller should instead |
---|
| 71 | + * consult lockdep. |
---|
| 72 | + * |
---|
| 73 | + * If CONFIG_DEBUG_LOCK_ALLOC is selected, set ``*ret`` to nonzero iff in an |
---|
82 | 74 | * RCU-sched read-side critical section. In absence of |
---|
83 | 75 | * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side |
---|
84 | 76 | * critical section unless it can prove otherwise. Note that disabling |
---|
.. | .. |
---|
90 | 82 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot |
---|
91 | 83 | * and while lockdep is disabled. |
---|
92 | 84 | * |
---|
93 | | - * Note that if the CPU is in the idle loop from an RCU point of |
---|
94 | | - * view (ie: that we are in the section between rcu_idle_enter() and |
---|
95 | | - * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU |
---|
96 | | - * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs |
---|
97 | | - * that are in such a section, considering these as in extended quiescent |
---|
98 | | - * state, so such a CPU is effectively never in an RCU read-side critical |
---|
99 | | - * section regardless of what RCU primitives it invokes. This state of |
---|
100 | | - * affairs is required --- we need to keep an RCU-free window in idle |
---|
101 | | - * where the CPU may possibly enter into low power mode. This way we can |
---|
102 | | - * notice an extended quiescent state to other CPUs that started a grace |
---|
103 | | - * period. Otherwise we would delay any grace period as long as we run in |
---|
104 | | - * the idle task. |
---|
| 85 | + * Note that if the CPU is in the idle loop from an RCU point of view (ie: |
---|
| 86 | + * that we are in the section between rcu_idle_enter() and rcu_idle_exit()) |
---|
| 87 | + * then rcu_read_lock_held() sets ``*ret`` to false even if the CPU did an |
---|
| 88 | + * rcu_read_lock(). The reason for this is that RCU ignores CPUs that are |
---|
| 89 | + * in such a section, considering these as in extended quiescent state, |
---|
| 90 | + * so such a CPU is effectively never in an RCU read-side critical section |
---|
| 91 | + * regardless of what RCU primitives it invokes. This state of affairs is |
---|
| 92 | + * required --- we need to keep an RCU-free window in idle where the CPU may |
---|
| 93 | + * possibly enter into low power mode. This way we can notice an extended |
---|
| 94 | + * quiescent state to other CPUs that started a grace period. Otherwise |
---|
| 95 | + * we would delay any grace period as long as we run in the idle task. |
---|
105 | 96 | * |
---|
106 | | - * Similarly, we avoid claiming an SRCU read lock held if the current |
---|
| 97 | + * Similarly, we avoid claiming an RCU read lock held if the current |
---|
107 | 98 | * CPU is offline. |
---|
108 | 99 | */ |
---|
| 100 | +static bool rcu_read_lock_held_common(bool *ret) |
---|
| 101 | +{ |
---|
| 102 | + if (!debug_lockdep_rcu_enabled()) { |
---|
| 103 | + *ret = true; |
---|
| 104 | + return true; |
---|
| 105 | + } |
---|
| 106 | + if (!rcu_is_watching()) { |
---|
| 107 | + *ret = false; |
---|
| 108 | + return true; |
---|
| 109 | + } |
---|
| 110 | + if (!rcu_lockdep_current_cpu_online()) { |
---|
| 111 | + *ret = false; |
---|
| 112 | + return true; |
---|
| 113 | + } |
---|
| 114 | + return false; |
---|
| 115 | +} |
---|
| 116 | + |
---|
109 | 117 | int rcu_read_lock_sched_held(void) |
---|
110 | 118 | { |
---|
111 | | - int lockdep_opinion = 0; |
---|
| 119 | + bool ret; |
---|
112 | 120 | |
---|
113 | | - if (!debug_lockdep_rcu_enabled()) |
---|
114 | | - return 1; |
---|
115 | | - if (!rcu_is_watching()) |
---|
116 | | - return 0; |
---|
117 | | - if (!rcu_lockdep_current_cpu_online()) |
---|
118 | | - return 0; |
---|
119 | | - if (debug_locks) |
---|
120 | | - lockdep_opinion = lock_is_held(&rcu_sched_lock_map); |
---|
121 | | - return lockdep_opinion || !preemptible(); |
---|
| 121 | + if (rcu_read_lock_held_common(&ret)) |
---|
| 122 | + return ret; |
---|
| 123 | + return lock_is_held(&rcu_sched_lock_map) || !preemptible(); |
---|
122 | 124 | } |
---|
123 | 125 | EXPORT_SYMBOL(rcu_read_lock_sched_held); |
---|
124 | 126 | #endif |
---|
.. | .. |
---|
151 | 153 | */ |
---|
152 | 154 | bool rcu_gp_is_expedited(void) |
---|
153 | 155 | { |
---|
154 | | - return rcu_expedited || atomic_read(&rcu_expedited_nesting) || |
---|
155 | | - rcu_scheduler_active == RCU_SCHEDULER_INIT; |
---|
| 156 | + return rcu_expedited || atomic_read(&rcu_expedited_nesting); |
---|
156 | 157 | } |
---|
157 | 158 | EXPORT_SYMBOL_GPL(rcu_gp_is_expedited); |
---|
158 | 159 | |
---|
.. | .. |
---|
184 | 185 | } |
---|
185 | 186 | EXPORT_SYMBOL_GPL(rcu_unexpedite_gp); |
---|
186 | 187 | |
---|
| 188 | +static bool rcu_boot_ended __read_mostly; |
---|
| 189 | + |
---|
187 | 190 | /* |
---|
188 | 191 | * Inform RCU of the end of the in-kernel boot sequence. |
---|
189 | 192 | */ |
---|
.. | .. |
---|
192 | 195 | rcu_unexpedite_gp(); |
---|
193 | 196 | if (rcu_normal_after_boot) |
---|
194 | 197 | WRITE_ONCE(rcu_normal, 1); |
---|
| 198 | + rcu_boot_ended = true; |
---|
195 | 199 | } |
---|
| 200 | + |
---|
| 201 | +/* |
---|
| 202 | + * Let rcutorture know when it is OK to turn it up to eleven. |
---|
| 203 | + */ |
---|
| 204 | +bool rcu_inkernel_boot_has_ended(void) |
---|
| 205 | +{ |
---|
| 206 | + return rcu_boot_ended; |
---|
| 207 | +} |
---|
| 208 | +EXPORT_SYMBOL_GPL(rcu_inkernel_boot_has_ended); |
---|
196 | 209 | |
---|
197 | 210 | #endif /* #ifndef CONFIG_TINY_RCU */ |
---|
198 | 211 | |
---|
.. | .. |
---|
206 | 219 | if (!IS_ENABLED(CONFIG_PROVE_RCU)) |
---|
207 | 220 | return; |
---|
208 | 221 | synchronize_rcu(); |
---|
209 | | - synchronize_rcu_bh(); |
---|
210 | | - synchronize_sched(); |
---|
211 | 222 | synchronize_rcu_expedited(); |
---|
212 | | - synchronize_rcu_bh_expedited(); |
---|
213 | | - synchronize_sched_expedited(); |
---|
214 | 223 | } |
---|
215 | 224 | |
---|
216 | 225 | #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) |
---|
.. | .. |
---|
222 | 231 | { |
---|
223 | 232 | rcu_test_sync_prims(); |
---|
224 | 233 | rcu_scheduler_active = RCU_SCHEDULER_RUNNING; |
---|
| 234 | + kfree_rcu_scheduler_running(); |
---|
225 | 235 | rcu_test_sync_prims(); |
---|
226 | 236 | return 0; |
---|
227 | 237 | } |
---|
.. | .. |
---|
231 | 241 | |
---|
232 | 242 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
---|
233 | 243 | static struct lock_class_key rcu_lock_key; |
---|
234 | | -struct lockdep_map rcu_lock_map = |
---|
235 | | - STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); |
---|
| 244 | +struct lockdep_map rcu_lock_map = { |
---|
| 245 | + .name = "rcu_read_lock", |
---|
| 246 | + .key = &rcu_lock_key, |
---|
| 247 | + .wait_type_outer = LD_WAIT_FREE, |
---|
| 248 | + .wait_type_inner = LD_WAIT_CONFIG, /* XXX PREEMPT_RCU ? */ |
---|
| 249 | +}; |
---|
236 | 250 | EXPORT_SYMBOL_GPL(rcu_lock_map); |
---|
237 | 251 | |
---|
238 | 252 | static struct lock_class_key rcu_bh_lock_key; |
---|
239 | | -struct lockdep_map rcu_bh_lock_map = |
---|
240 | | - STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key); |
---|
| 253 | +struct lockdep_map rcu_bh_lock_map = { |
---|
| 254 | + .name = "rcu_read_lock_bh", |
---|
| 255 | + .key = &rcu_bh_lock_key, |
---|
| 256 | + .wait_type_outer = LD_WAIT_FREE, |
---|
| 257 | + .wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_LOCK also makes BH preemptible */ |
---|
| 258 | +}; |
---|
241 | 259 | EXPORT_SYMBOL_GPL(rcu_bh_lock_map); |
---|
242 | 260 | |
---|
243 | 261 | static struct lock_class_key rcu_sched_lock_key; |
---|
244 | | -struct lockdep_map rcu_sched_lock_map = |
---|
245 | | - STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); |
---|
| 262 | +struct lockdep_map rcu_sched_lock_map = { |
---|
| 263 | + .name = "rcu_read_lock_sched", |
---|
| 264 | + .key = &rcu_sched_lock_key, |
---|
| 265 | + .wait_type_outer = LD_WAIT_FREE, |
---|
| 266 | + .wait_type_inner = LD_WAIT_SPIN, |
---|
| 267 | +}; |
---|
246 | 268 | EXPORT_SYMBOL_GPL(rcu_sched_lock_map); |
---|
247 | 269 | |
---|
| 270 | +// Tell lockdep when RCU callbacks are being invoked. |
---|
248 | 271 | static struct lock_class_key rcu_callback_key; |
---|
249 | 272 | struct lockdep_map rcu_callback_map = |
---|
250 | 273 | STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key); |
---|
251 | 274 | EXPORT_SYMBOL_GPL(rcu_callback_map); |
---|
252 | 275 | |
---|
253 | | -int notrace debug_lockdep_rcu_enabled(void) |
---|
| 276 | +noinstr int notrace debug_lockdep_rcu_enabled(void) |
---|
254 | 277 | { |
---|
255 | | - return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks && |
---|
| 278 | + return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && READ_ONCE(debug_locks) && |
---|
256 | 279 | current->lockdep_recursion == 0; |
---|
257 | 280 | } |
---|
258 | 281 | EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); |
---|
259 | | -NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled); |
---|
260 | 282 | |
---|
261 | 283 | /** |
---|
262 | 284 | * rcu_read_lock_held() - might we be in RCU read-side critical section? |
---|
.. | .. |
---|
280 | 302 | */ |
---|
281 | 303 | int rcu_read_lock_held(void) |
---|
282 | 304 | { |
---|
283 | | - if (!debug_lockdep_rcu_enabled()) |
---|
284 | | - return 1; |
---|
285 | | - if (!rcu_is_watching()) |
---|
286 | | - return 0; |
---|
287 | | - if (!rcu_lockdep_current_cpu_online()) |
---|
288 | | - return 0; |
---|
| 305 | + bool ret; |
---|
| 306 | + |
---|
| 307 | + if (rcu_read_lock_held_common(&ret)) |
---|
| 308 | + return ret; |
---|
289 | 309 | return lock_is_held(&rcu_lock_map); |
---|
290 | 310 | } |
---|
291 | 311 | EXPORT_SYMBOL_GPL(rcu_read_lock_held); |
---|
292 | 312 | |
---|
293 | | -#ifndef CONFIG_PREEMPT_RT_FULL |
---|
294 | 313 | /** |
---|
295 | 314 | * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? |
---|
296 | 315 | * |
---|
.. | .. |
---|
303 | 322 | * |
---|
304 | 323 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. |
---|
305 | 324 | * |
---|
306 | | - * Note that rcu_read_lock() is disallowed if the CPU is either idle or |
---|
| 325 | + * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or |
---|
307 | 326 | * offline from an RCU perspective, so check for those as well. |
---|
308 | 327 | */ |
---|
309 | 328 | int rcu_read_lock_bh_held(void) |
---|
310 | 329 | { |
---|
311 | | - if (!debug_lockdep_rcu_enabled()) |
---|
312 | | - return 1; |
---|
313 | | - if (!rcu_is_watching()) |
---|
314 | | - return 0; |
---|
315 | | - if (!rcu_lockdep_current_cpu_online()) |
---|
316 | | - return 0; |
---|
| 330 | + bool ret; |
---|
| 331 | + |
---|
| 332 | + if (rcu_read_lock_held_common(&ret)) |
---|
| 333 | + return ret; |
---|
317 | 334 | return in_softirq() || irqs_disabled(); |
---|
318 | 335 | } |
---|
319 | 336 | EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); |
---|
320 | | -#endif |
---|
| 337 | + |
---|
| 338 | +int rcu_read_lock_any_held(void) |
---|
| 339 | +{ |
---|
| 340 | + bool ret; |
---|
| 341 | + |
---|
| 342 | + if (rcu_read_lock_held_common(&ret)) |
---|
| 343 | + return ret; |
---|
| 344 | + if (lock_is_held(&rcu_lock_map) || |
---|
| 345 | + lock_is_held(&rcu_bh_lock_map) || |
---|
| 346 | + lock_is_held(&rcu_sched_lock_map)) |
---|
| 347 | + return 1; |
---|
| 348 | + return !preemptible(); |
---|
| 349 | +} |
---|
| 350 | +EXPORT_SYMBOL_GPL(rcu_read_lock_any_held); |
---|
321 | 351 | |
---|
322 | 352 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
---|
323 | 353 | |
---|
.. | .. |
---|
342 | 372 | int i; |
---|
343 | 373 | int j; |
---|
344 | 374 | |
---|
345 | | - /* Initialize and register callbacks for each flavor specified. */ |
---|
| 375 | + /* Initialize and register callbacks for each crcu_array element. */ |
---|
346 | 376 | for (i = 0; i < n; i++) { |
---|
347 | 377 | if (checktiny && |
---|
348 | | - (crcu_array[i] == call_rcu || |
---|
349 | | - crcu_array[i] == call_rcu_bh)) { |
---|
| 378 | + (crcu_array[i] == call_rcu)) { |
---|
350 | 379 | might_sleep(); |
---|
351 | 380 | continue; |
---|
352 | 381 | } |
---|
353 | | - init_rcu_head_on_stack(&rs_array[i].head); |
---|
354 | | - init_completion(&rs_array[i].completion); |
---|
355 | 382 | for (j = 0; j < i; j++) |
---|
356 | 383 | if (crcu_array[j] == crcu_array[i]) |
---|
357 | 384 | break; |
---|
358 | | - if (j == i) |
---|
| 385 | + if (j == i) { |
---|
| 386 | + init_rcu_head_on_stack(&rs_array[i].head); |
---|
| 387 | + init_completion(&rs_array[i].completion); |
---|
359 | 388 | (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu); |
---|
| 389 | + } |
---|
360 | 390 | } |
---|
361 | 391 | |
---|
362 | 392 | /* Wait for all callbacks to be invoked. */ |
---|
363 | 393 | for (i = 0; i < n; i++) { |
---|
364 | 394 | if (checktiny && |
---|
365 | | - (crcu_array[i] == call_rcu || |
---|
366 | | - crcu_array[i] == call_rcu_bh)) |
---|
| 395 | + (crcu_array[i] == call_rcu)) |
---|
367 | 396 | continue; |
---|
368 | 397 | for (j = 0; j < i; j++) |
---|
369 | 398 | if (crcu_array[j] == crcu_array[i]) |
---|
370 | 399 | break; |
---|
371 | | - if (j == i) |
---|
| 400 | + if (j == i) { |
---|
372 | 401 | wait_for_completion(&rs_array[i].completion); |
---|
373 | | - destroy_rcu_head_on_stack(&rs_array[i].head); |
---|
| 402 | + destroy_rcu_head_on_stack(&rs_array[i].head); |
---|
| 403 | + } |
---|
374 | 404 | } |
---|
375 | 405 | } |
---|
376 | 406 | EXPORT_SYMBOL_GPL(__wait_rcu_gp); |
---|
.. | .. |
---|
426 | 456 | } |
---|
427 | 457 | EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); |
---|
428 | 458 | |
---|
429 | | -struct debug_obj_descr rcuhead_debug_descr = { |
---|
| 459 | +const struct debug_obj_descr rcuhead_debug_descr = { |
---|
430 | 460 | .name = "rcu_head", |
---|
431 | 461 | .is_static_object = rcuhead_is_static_object, |
---|
432 | 462 | }; |
---|
433 | 463 | EXPORT_SYMBOL_GPL(rcuhead_debug_descr); |
---|
434 | 464 | #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
---|
435 | 465 | |
---|
436 | | -#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) |
---|
| 466 | +#if defined(CONFIG_TREE_RCU) || defined(CONFIG_RCU_TRACE) |
---|
437 | 467 | void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, |
---|
438 | 468 | unsigned long secs, |
---|
439 | 469 | unsigned long c_old, unsigned long c) |
---|
.. | .. |
---|
446 | 476 | do { } while (0) |
---|
447 | 477 | #endif |
---|
448 | 478 | |
---|
449 | | -#ifdef CONFIG_RCU_STALL_COMMON |
---|
| 479 | +#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) |
---|
| 480 | +/* Get rcutorture access to sched_setaffinity(). */ |
---|
| 481 | +long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask) |
---|
| 482 | +{ |
---|
| 483 | + int ret; |
---|
450 | 484 | |
---|
451 | | -#ifdef CONFIG_PROVE_RCU |
---|
452 | | -#define RCU_STALL_DELAY_DELTA (5 * HZ) |
---|
453 | | -#else |
---|
454 | | -#define RCU_STALL_DELAY_DELTA 0 |
---|
| 485 | + ret = sched_setaffinity(pid, in_mask); |
---|
| 486 | + WARN_ONCE(ret, "%s: sched_setaffinity() returned %d\n", __func__, ret); |
---|
| 487 | + return ret; |
---|
| 488 | +} |
---|
| 489 | +EXPORT_SYMBOL_GPL(rcutorture_sched_setaffinity); |
---|
455 | 490 | #endif |
---|
456 | 491 | |
---|
457 | | -int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ |
---|
| 492 | +#ifdef CONFIG_RCU_STALL_COMMON |
---|
| 493 | +int rcu_cpu_stall_ftrace_dump __read_mostly; |
---|
| 494 | +module_param(rcu_cpu_stall_ftrace_dump, int, 0644); |
---|
| 495 | +int rcu_cpu_stall_suppress __read_mostly; // !0 = suppress stall warnings. |
---|
458 | 496 | EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress); |
---|
459 | | -static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; |
---|
460 | | - |
---|
461 | 497 | module_param(rcu_cpu_stall_suppress, int, 0644); |
---|
| 498 | +int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; |
---|
462 | 499 | module_param(rcu_cpu_stall_timeout, int, 0644); |
---|
463 | | - |
---|
464 | | -int rcu_jiffies_till_stall_check(void) |
---|
465 | | -{ |
---|
466 | | - int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); |
---|
467 | | - |
---|
468 | | - /* |
---|
469 | | - * Limit check must be consistent with the Kconfig limits |
---|
470 | | - * for CONFIG_RCU_CPU_STALL_TIMEOUT. |
---|
471 | | - */ |
---|
472 | | - if (till_stall_check < 3) { |
---|
473 | | - WRITE_ONCE(rcu_cpu_stall_timeout, 3); |
---|
474 | | - till_stall_check = 3; |
---|
475 | | - } else if (till_stall_check > 300) { |
---|
476 | | - WRITE_ONCE(rcu_cpu_stall_timeout, 300); |
---|
477 | | - till_stall_check = 300; |
---|
478 | | - } |
---|
479 | | - return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; |
---|
480 | | -} |
---|
481 | | - |
---|
482 | | -void rcu_sysrq_start(void) |
---|
483 | | -{ |
---|
484 | | - if (!rcu_cpu_stall_suppress) |
---|
485 | | - rcu_cpu_stall_suppress = 2; |
---|
486 | | -} |
---|
487 | | - |
---|
488 | | -void rcu_sysrq_end(void) |
---|
489 | | -{ |
---|
490 | | - if (rcu_cpu_stall_suppress == 2) |
---|
491 | | - rcu_cpu_stall_suppress = 0; |
---|
492 | | -} |
---|
493 | | - |
---|
494 | | -static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) |
---|
495 | | -{ |
---|
496 | | - rcu_cpu_stall_suppress = 1; |
---|
497 | | - return NOTIFY_DONE; |
---|
498 | | -} |
---|
499 | | - |
---|
500 | | -static struct notifier_block rcu_panic_block = { |
---|
501 | | - .notifier_call = rcu_panic, |
---|
502 | | -}; |
---|
503 | | - |
---|
504 | | -static int __init check_cpu_stall_init(void) |
---|
505 | | -{ |
---|
506 | | - atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); |
---|
507 | | - return 0; |
---|
508 | | -} |
---|
509 | | -early_initcall(check_cpu_stall_init); |
---|
510 | | - |
---|
511 | 500 | #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ |
---|
512 | 501 | |
---|
513 | | -#ifdef CONFIG_TASKS_RCU |
---|
514 | | - |
---|
515 | | -/* |
---|
516 | | - * Simple variant of RCU whose quiescent states are voluntary context |
---|
517 | | - * switch, cond_resched_rcu_qs(), user-space execution, and idle. |
---|
518 | | - * As such, grace periods can take one good long time. There are no |
---|
519 | | - * read-side primitives similar to rcu_read_lock() and rcu_read_unlock() |
---|
520 | | - * because this implementation is intended to get the system into a safe |
---|
521 | | - * state for some of the manipulations involved in tracing and the like. |
---|
522 | | - * Finally, this implementation does not support high call_rcu_tasks() |
---|
523 | | - * rates from multiple CPUs. If this is required, per-CPU callback lists |
---|
524 | | - * will be needed. |
---|
525 | | - */ |
---|
526 | | - |
---|
527 | | -/* Global list of callbacks and associated lock. */ |
---|
528 | | -static struct rcu_head *rcu_tasks_cbs_head; |
---|
529 | | -static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; |
---|
530 | | -static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq); |
---|
531 | | -static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock); |
---|
532 | | - |
---|
533 | | -/* Track exiting tasks in order to allow them to be waited for. */ |
---|
534 | | -DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu); |
---|
535 | | - |
---|
536 | | -/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ |
---|
537 | | -#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10) |
---|
538 | | -static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT; |
---|
539 | | -module_param(rcu_task_stall_timeout, int, 0644); |
---|
540 | | - |
---|
541 | | -static struct task_struct *rcu_tasks_kthread_ptr; |
---|
542 | | - |
---|
543 | | -/** |
---|
544 | | - * call_rcu_tasks() - Queue an RCU for invocation task-based grace period |
---|
545 | | - * @rhp: structure to be used for queueing the RCU updates. |
---|
546 | | - * @func: actual callback function to be invoked after the grace period |
---|
547 | | - * |
---|
548 | | - * The callback function will be invoked some time after a full grace |
---|
549 | | - * period elapses, in other words after all currently executing RCU |
---|
550 | | - * read-side critical sections have completed. call_rcu_tasks() assumes |
---|
551 | | - * that the read-side critical sections end at a voluntary context |
---|
552 | | - * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle, |
---|
553 | | - * or transition to usermode execution. As such, there are no read-side |
---|
554 | | - * primitives analogous to rcu_read_lock() and rcu_read_unlock() because |
---|
555 | | - * this primitive is intended to determine that all tasks have passed |
---|
556 | | - * through a safe state, not so much for data-strcuture synchronization. |
---|
557 | | - * |
---|
558 | | - * See the description of call_rcu() for more detailed information on |
---|
559 | | - * memory ordering guarantees. |
---|
560 | | - */ |
---|
561 | | -void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func) |
---|
562 | | -{ |
---|
563 | | - unsigned long flags; |
---|
564 | | - bool needwake; |
---|
565 | | - |
---|
566 | | - rhp->next = NULL; |
---|
567 | | - rhp->func = func; |
---|
568 | | - raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); |
---|
569 | | - needwake = !rcu_tasks_cbs_head; |
---|
570 | | - *rcu_tasks_cbs_tail = rhp; |
---|
571 | | - rcu_tasks_cbs_tail = &rhp->next; |
---|
572 | | - raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); |
---|
573 | | - /* We can't create the thread unless interrupts are enabled. */ |
---|
574 | | - if (needwake && READ_ONCE(rcu_tasks_kthread_ptr)) |
---|
575 | | - wake_up(&rcu_tasks_cbs_wq); |
---|
576 | | -} |
---|
577 | | -EXPORT_SYMBOL_GPL(call_rcu_tasks); |
---|
578 | | - |
---|
579 | | -/** |
---|
580 | | - * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed. |
---|
581 | | - * |
---|
582 | | - * Control will return to the caller some time after a full rcu-tasks |
---|
583 | | - * grace period has elapsed, in other words after all currently |
---|
584 | | - * executing rcu-tasks read-side critical sections have elapsed. These |
---|
585 | | - * read-side critical sections are delimited by calls to schedule(), |
---|
586 | | - * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls |
---|
587 | | - * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched(). |
---|
588 | | - * |
---|
589 | | - * This is a very specialized primitive, intended only for a few uses in |
---|
590 | | - * tracing and other situations requiring manipulation of function |
---|
591 | | - * preambles and profiling hooks. The synchronize_rcu_tasks() function |
---|
592 | | - * is not (yet) intended for heavy use from multiple CPUs. |
---|
593 | | - * |
---|
594 | | - * Note that this guarantee implies further memory-ordering guarantees. |
---|
595 | | - * On systems with more than one CPU, when synchronize_rcu_tasks() returns, |
---|
596 | | - * each CPU is guaranteed to have executed a full memory barrier since the |
---|
597 | | - * end of its last RCU-tasks read-side critical section whose beginning |
---|
598 | | - * preceded the call to synchronize_rcu_tasks(). In addition, each CPU |
---|
599 | | - * having an RCU-tasks read-side critical section that extends beyond |
---|
600 | | - * the return from synchronize_rcu_tasks() is guaranteed to have executed |
---|
601 | | - * a full memory barrier after the beginning of synchronize_rcu_tasks() |
---|
602 | | - * and before the beginning of that RCU-tasks read-side critical section. |
---|
603 | | - * Note that these guarantees include CPUs that are offline, idle, or |
---|
604 | | - * executing in user mode, as well as CPUs that are executing in the kernel. |
---|
605 | | - * |
---|
606 | | - * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned |
---|
607 | | - * to its caller on CPU B, then both CPU A and CPU B are guaranteed |
---|
608 | | - * to have executed a full memory barrier during the execution of |
---|
609 | | - * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU |
---|
610 | | - * (but again only if the system has more than one CPU). |
---|
611 | | - */ |
---|
612 | | -void synchronize_rcu_tasks(void) |
---|
613 | | -{ |
---|
614 | | - /* Complain if the scheduler has not started. */ |
---|
615 | | - RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE, |
---|
616 | | - "synchronize_rcu_tasks called too soon"); |
---|
617 | | - |
---|
618 | | - /* Wait for the grace period. */ |
---|
619 | | - wait_rcu_gp(call_rcu_tasks); |
---|
620 | | -} |
---|
621 | | -EXPORT_SYMBOL_GPL(synchronize_rcu_tasks); |
---|
622 | | - |
---|
623 | | -/** |
---|
624 | | - * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks. |
---|
625 | | - * |
---|
626 | | - * Although the current implementation is guaranteed to wait, it is not |
---|
627 | | - * obligated to, for example, if there are no pending callbacks. |
---|
628 | | - */ |
---|
629 | | -void rcu_barrier_tasks(void) |
---|
630 | | -{ |
---|
631 | | - /* There is only one callback queue, so this is easy. ;-) */ |
---|
632 | | - synchronize_rcu_tasks(); |
---|
633 | | -} |
---|
634 | | -EXPORT_SYMBOL_GPL(rcu_barrier_tasks); |
---|
635 | | - |
---|
636 | | -/* See if tasks are still holding out, complain if so. */ |
---|
637 | | -static void check_holdout_task(struct task_struct *t, |
---|
638 | | - bool needreport, bool *firstreport) |
---|
639 | | -{ |
---|
640 | | - int cpu; |
---|
641 | | - |
---|
642 | | - if (!READ_ONCE(t->rcu_tasks_holdout) || |
---|
643 | | - t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || |
---|
644 | | - !READ_ONCE(t->on_rq) || |
---|
645 | | - (IS_ENABLED(CONFIG_NO_HZ_FULL) && |
---|
646 | | - !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { |
---|
647 | | - WRITE_ONCE(t->rcu_tasks_holdout, false); |
---|
648 | | - list_del_init(&t->rcu_tasks_holdout_list); |
---|
649 | | - put_task_struct(t); |
---|
650 | | - return; |
---|
651 | | - } |
---|
652 | | - rcu_request_urgent_qs_task(t); |
---|
653 | | - if (!needreport) |
---|
654 | | - return; |
---|
655 | | - if (*firstreport) { |
---|
656 | | - pr_err("INFO: rcu_tasks detected stalls on tasks:\n"); |
---|
657 | | - *firstreport = false; |
---|
658 | | - } |
---|
659 | | - cpu = task_cpu(t); |
---|
660 | | - pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n", |
---|
661 | | - t, ".I"[is_idle_task(t)], |
---|
662 | | - "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)], |
---|
663 | | - t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout, |
---|
664 | | - t->rcu_tasks_idle_cpu, cpu); |
---|
665 | | - sched_show_task(t); |
---|
666 | | -} |
---|
667 | | - |
---|
668 | | -/* RCU-tasks kthread that detects grace periods and invokes callbacks. */ |
---|
669 | | -static int __noreturn rcu_tasks_kthread(void *arg) |
---|
670 | | -{ |
---|
671 | | - unsigned long flags; |
---|
672 | | - struct task_struct *g, *t; |
---|
673 | | - unsigned long lastreport; |
---|
674 | | - struct rcu_head *list; |
---|
675 | | - struct rcu_head *next; |
---|
676 | | - LIST_HEAD(rcu_tasks_holdouts); |
---|
677 | | - int fract; |
---|
678 | | - |
---|
679 | | - /* Run on housekeeping CPUs by default. Sysadm can move if desired. */ |
---|
680 | | - housekeeping_affine(current, HK_FLAG_RCU); |
---|
681 | | - |
---|
682 | | - /* |
---|
683 | | - * Each pass through the following loop makes one check for |
---|
684 | | - * newly arrived callbacks, and, if there are some, waits for |
---|
685 | | - * one RCU-tasks grace period and then invokes the callbacks. |
---|
686 | | - * This loop is terminated by the system going down. ;-) |
---|
687 | | - */ |
---|
688 | | - for (;;) { |
---|
689 | | - |
---|
690 | | - /* Pick up any new callbacks. */ |
---|
691 | | - raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags); |
---|
692 | | - list = rcu_tasks_cbs_head; |
---|
693 | | - rcu_tasks_cbs_head = NULL; |
---|
694 | | - rcu_tasks_cbs_tail = &rcu_tasks_cbs_head; |
---|
695 | | - raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags); |
---|
696 | | - |
---|
697 | | - /* If there were none, wait a bit and start over. */ |
---|
698 | | - if (!list) { |
---|
699 | | - wait_event_interruptible(rcu_tasks_cbs_wq, |
---|
700 | | - rcu_tasks_cbs_head); |
---|
701 | | - if (!rcu_tasks_cbs_head) { |
---|
702 | | - WARN_ON(signal_pending(current)); |
---|
703 | | - schedule_timeout_interruptible(HZ/10); |
---|
704 | | - } |
---|
705 | | - continue; |
---|
706 | | - } |
---|
707 | | - |
---|
708 | | - /* |
---|
709 | | - * Wait for all pre-existing t->on_rq and t->nvcsw |
---|
710 | | - * transitions to complete. Invoking synchronize_sched() |
---|
711 | | - * suffices because all these transitions occur with |
---|
712 | | - * interrupts disabled. Without this synchronize_sched(), |
---|
713 | | - * a read-side critical section that started before the |
---|
714 | | - * grace period might be incorrectly seen as having started |
---|
715 | | - * after the grace period. |
---|
716 | | - * |
---|
717 | | - * This synchronize_sched() also dispenses with the |
---|
718 | | - * need for a memory barrier on the first store to |
---|
719 | | - * ->rcu_tasks_holdout, as it forces the store to happen |
---|
720 | | - * after the beginning of the grace period. |
---|
721 | | - */ |
---|
722 | | - synchronize_sched(); |
---|
723 | | - |
---|
724 | | - /* |
---|
725 | | - * There were callbacks, so we need to wait for an |
---|
726 | | - * RCU-tasks grace period. Start off by scanning |
---|
727 | | - * the task list for tasks that are not already |
---|
728 | | - * voluntarily blocked. Mark these tasks and make |
---|
729 | | - * a list of them in rcu_tasks_holdouts. |
---|
730 | | - */ |
---|
731 | | - rcu_read_lock(); |
---|
732 | | - for_each_process_thread(g, t) { |
---|
733 | | - if (t != current && READ_ONCE(t->on_rq) && |
---|
734 | | - !is_idle_task(t)) { |
---|
735 | | - get_task_struct(t); |
---|
736 | | - t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); |
---|
737 | | - WRITE_ONCE(t->rcu_tasks_holdout, true); |
---|
738 | | - list_add(&t->rcu_tasks_holdout_list, |
---|
739 | | - &rcu_tasks_holdouts); |
---|
740 | | - } |
---|
741 | | - } |
---|
742 | | - rcu_read_unlock(); |
---|
743 | | - |
---|
744 | | - /* |
---|
745 | | - * Wait for tasks that are in the process of exiting. |
---|
746 | | - * This does only part of the job, ensuring that all |
---|
747 | | - * tasks that were previously exiting reach the point |
---|
748 | | - * where they have disabled preemption, allowing the |
---|
749 | | - * later synchronize_sched() to finish the job. |
---|
750 | | - */ |
---|
751 | | - synchronize_srcu(&tasks_rcu_exit_srcu); |
---|
752 | | - |
---|
753 | | - /* |
---|
754 | | - * Each pass through the following loop scans the list |
---|
755 | | - * of holdout tasks, removing any that are no longer |
---|
756 | | - * holdouts. When the list is empty, we are done. |
---|
757 | | - */ |
---|
758 | | - lastreport = jiffies; |
---|
759 | | - |
---|
760 | | - /* Start off with HZ/10 wait and slowly back off to 1 HZ wait*/ |
---|
761 | | - fract = 10; |
---|
762 | | - |
---|
763 | | - for (;;) { |
---|
764 | | - bool firstreport; |
---|
765 | | - bool needreport; |
---|
766 | | - int rtst; |
---|
767 | | - struct task_struct *t1; |
---|
768 | | - |
---|
769 | | - if (list_empty(&rcu_tasks_holdouts)) |
---|
770 | | - break; |
---|
771 | | - |
---|
772 | | - /* Slowly back off waiting for holdouts */ |
---|
773 | | - schedule_timeout_interruptible(HZ/fract); |
---|
774 | | - |
---|
775 | | - if (fract > 1) |
---|
776 | | - fract--; |
---|
777 | | - |
---|
778 | | - rtst = READ_ONCE(rcu_task_stall_timeout); |
---|
779 | | - needreport = rtst > 0 && |
---|
780 | | - time_after(jiffies, lastreport + rtst); |
---|
781 | | - if (needreport) |
---|
782 | | - lastreport = jiffies; |
---|
783 | | - firstreport = true; |
---|
784 | | - WARN_ON(signal_pending(current)); |
---|
785 | | - list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts, |
---|
786 | | - rcu_tasks_holdout_list) { |
---|
787 | | - check_holdout_task(t, needreport, &firstreport); |
---|
788 | | - cond_resched(); |
---|
789 | | - } |
---|
790 | | - } |
---|
791 | | - |
---|
792 | | - /* |
---|
793 | | - * Because ->on_rq and ->nvcsw are not guaranteed |
---|
794 | | - * to have a full memory barriers prior to them in the |
---|
795 | | - * schedule() path, memory reordering on other CPUs could |
---|
796 | | - * cause their RCU-tasks read-side critical sections to |
---|
797 | | - * extend past the end of the grace period. However, |
---|
798 | | - * because these ->nvcsw updates are carried out with |
---|
799 | | - * interrupts disabled, we can use synchronize_sched() |
---|
800 | | - * to force the needed ordering on all such CPUs. |
---|
801 | | - * |
---|
802 | | - * This synchronize_sched() also confines all |
---|
803 | | - * ->rcu_tasks_holdout accesses to be within the grace |
---|
804 | | - * period, avoiding the need for memory barriers for |
---|
805 | | - * ->rcu_tasks_holdout accesses. |
---|
806 | | - * |
---|
807 | | - * In addition, this synchronize_sched() waits for exiting |
---|
808 | | - * tasks to complete their final preempt_disable() region |
---|
809 | | - * of execution, cleaning up after the synchronize_srcu() |
---|
810 | | - * above. |
---|
811 | | - */ |
---|
812 | | - synchronize_sched(); |
---|
813 | | - |
---|
814 | | - /* Invoke the callbacks. */ |
---|
815 | | - while (list) { |
---|
816 | | - next = list->next; |
---|
817 | | - local_bh_disable(); |
---|
818 | | - list->func(list); |
---|
819 | | - local_bh_enable(); |
---|
820 | | - list = next; |
---|
821 | | - cond_resched(); |
---|
822 | | - } |
---|
823 | | - /* Paranoid sleep to keep this from entering a tight loop */ |
---|
824 | | - schedule_timeout_uninterruptible(HZ/10); |
---|
825 | | - } |
---|
826 | | -} |
---|
827 | | - |
---|
828 | | -/* Spawn rcu_tasks_kthread() at core_initcall() time. */ |
---|
829 | | -static int __init rcu_spawn_tasks_kthread(void) |
---|
830 | | -{ |
---|
831 | | - struct task_struct *t; |
---|
832 | | - |
---|
833 | | - t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread"); |
---|
834 | | - BUG_ON(IS_ERR(t)); |
---|
835 | | - smp_mb(); /* Ensure others see full kthread. */ |
---|
836 | | - WRITE_ONCE(rcu_tasks_kthread_ptr, t); |
---|
837 | | - return 0; |
---|
838 | | -} |
---|
839 | | -core_initcall(rcu_spawn_tasks_kthread); |
---|
840 | | - |
---|
841 | | -/* Do the srcu_read_lock() for the above synchronize_srcu(). */ |
---|
842 | | -void exit_tasks_rcu_start(void) |
---|
843 | | -{ |
---|
844 | | - preempt_disable(); |
---|
845 | | - current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu); |
---|
846 | | - preempt_enable(); |
---|
847 | | -} |
---|
848 | | - |
---|
849 | | -/* Do the srcu_read_unlock() for the above synchronize_srcu(). */ |
---|
850 | | -void exit_tasks_rcu_finish(void) |
---|
851 | | -{ |
---|
852 | | - preempt_disable(); |
---|
853 | | - __srcu_read_unlock(&tasks_rcu_exit_srcu, current->rcu_tasks_idx); |
---|
854 | | - preempt_enable(); |
---|
855 | | -} |
---|
856 | | - |
---|
857 | | -#endif /* #ifdef CONFIG_TASKS_RCU */ |
---|
858 | | - |
---|
859 | | -#ifndef CONFIG_TINY_RCU |
---|
860 | | - |
---|
861 | | -/* |
---|
862 | | - * Print any non-default Tasks RCU settings. |
---|
863 | | - */ |
---|
864 | | -static void __init rcu_tasks_bootup_oddness(void) |
---|
865 | | -{ |
---|
866 | | -#ifdef CONFIG_TASKS_RCU |
---|
867 | | - if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT) |
---|
868 | | - pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout); |
---|
869 | | - else |
---|
870 | | - pr_info("\tTasks RCU enabled.\n"); |
---|
871 | | -#endif /* #ifdef CONFIG_TASKS_RCU */ |
---|
872 | | -} |
---|
873 | | - |
---|
874 | | -#endif /* #ifndef CONFIG_TINY_RCU */ |
---|
| 502 | +// Suppress boot-time RCU CPU stall warnings and rcutorture writer stall |
---|
| 503 | +// warnings. Also used by rcutorture even if stall warnings are excluded. |
---|
| 504 | +int rcu_cpu_stall_suppress_at_boot __read_mostly; // !0 = suppress boot stalls. |
---|
| 505 | +EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress_at_boot); |
---|
| 506 | +module_param(rcu_cpu_stall_suppress_at_boot, int, 0444); |
---|
875 | 507 | |
---|
876 | 508 | #ifdef CONFIG_PROVE_RCU |
---|
877 | 509 | |
---|
878 | 510 | /* |
---|
879 | | - * Early boot self test parameters, one for each flavor |
---|
| 511 | + * Early boot self test parameters. |
---|
880 | 512 | */ |
---|
881 | 513 | static bool rcu_self_test; |
---|
882 | | -static bool rcu_self_test_bh; |
---|
883 | | -static bool rcu_self_test_sched; |
---|
884 | | - |
---|
885 | 514 | module_param(rcu_self_test, bool, 0444); |
---|
886 | | -module_param(rcu_self_test_bh, bool, 0444); |
---|
887 | | -module_param(rcu_self_test_sched, bool, 0444); |
---|
888 | 515 | |
---|
889 | 516 | static int rcu_self_test_counter; |
---|
890 | 517 | |
---|
.. | .. |
---|
894 | 521 | pr_info("RCU test callback executed %d\n", rcu_self_test_counter); |
---|
895 | 522 | } |
---|
896 | 523 | |
---|
| 524 | +DEFINE_STATIC_SRCU(early_srcu); |
---|
| 525 | + |
---|
| 526 | +struct early_boot_kfree_rcu { |
---|
| 527 | + struct rcu_head rh; |
---|
| 528 | +}; |
---|
| 529 | + |
---|
897 | 530 | static void early_boot_test_call_rcu(void) |
---|
898 | 531 | { |
---|
899 | 532 | static struct rcu_head head; |
---|
| 533 | + static struct rcu_head shead; |
---|
| 534 | + struct early_boot_kfree_rcu *rhp; |
---|
900 | 535 | |
---|
901 | 536 | call_rcu(&head, test_callback); |
---|
902 | | -} |
---|
903 | | - |
---|
904 | | -static void early_boot_test_call_rcu_bh(void) |
---|
905 | | -{ |
---|
906 | | - static struct rcu_head head; |
---|
907 | | - |
---|
908 | | - call_rcu_bh(&head, test_callback); |
---|
909 | | -} |
---|
910 | | - |
---|
911 | | -static void early_boot_test_call_rcu_sched(void) |
---|
912 | | -{ |
---|
913 | | - static struct rcu_head head; |
---|
914 | | - |
---|
915 | | - call_rcu_sched(&head, test_callback); |
---|
| 537 | + if (IS_ENABLED(CONFIG_SRCU)) |
---|
| 538 | + call_srcu(&early_srcu, &shead, test_callback); |
---|
| 539 | + rhp = kmalloc(sizeof(*rhp), GFP_KERNEL); |
---|
| 540 | + if (!WARN_ON_ONCE(!rhp)) |
---|
| 541 | + kfree_rcu(rhp, rh); |
---|
916 | 542 | } |
---|
917 | 543 | |
---|
918 | 544 | void rcu_early_boot_tests(void) |
---|
.. | .. |
---|
921 | 547 | |
---|
922 | 548 | if (rcu_self_test) |
---|
923 | 549 | early_boot_test_call_rcu(); |
---|
924 | | - if (rcu_self_test_bh) |
---|
925 | | - early_boot_test_call_rcu_bh(); |
---|
926 | | - if (rcu_self_test_sched) |
---|
927 | | - early_boot_test_call_rcu_sched(); |
---|
928 | 550 | rcu_test_sync_prims(); |
---|
929 | 551 | } |
---|
930 | 552 | |
---|
.. | .. |
---|
936 | 558 | if (rcu_self_test) { |
---|
937 | 559 | early_boot_test_counter++; |
---|
938 | 560 | rcu_barrier(); |
---|
| 561 | + if (IS_ENABLED(CONFIG_SRCU)) { |
---|
| 562 | + early_boot_test_counter++; |
---|
| 563 | + srcu_barrier(&early_srcu); |
---|
| 564 | + } |
---|
939 | 565 | } |
---|
940 | | - if (rcu_self_test_bh) { |
---|
941 | | - early_boot_test_counter++; |
---|
942 | | - rcu_barrier_bh(); |
---|
943 | | - } |
---|
944 | | - if (rcu_self_test_sched) { |
---|
945 | | - early_boot_test_counter++; |
---|
946 | | - rcu_barrier_sched(); |
---|
947 | | - } |
---|
948 | | - |
---|
949 | 566 | if (rcu_self_test_counter != early_boot_test_counter) { |
---|
950 | 567 | WARN_ON(1); |
---|
951 | 568 | ret = -1; |
---|
.. | .. |
---|
958 | 575 | void rcu_early_boot_tests(void) {} |
---|
959 | 576 | #endif /* CONFIG_PROVE_RCU */ |
---|
960 | 577 | |
---|
| 578 | +#include "tasks.h" |
---|
| 579 | + |
---|
961 | 580 | #ifndef CONFIG_TINY_RCU |
---|
962 | 581 | |
---|
963 | 582 | /* |
---|