.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0+ |
---|
1 | 2 | /* |
---|
2 | 3 | * Read-Copy Update module-based torture test facility |
---|
3 | 4 | * |
---|
4 | | - * This program is free software; you can redistribute it and/or modify |
---|
5 | | - * it under the terms of the GNU General Public License as published by |
---|
6 | | - * the Free Software Foundation; either version 2 of the License, or |
---|
7 | | - * (at your option) any later version. |
---|
8 | | - * |
---|
9 | | - * This program is distributed in the hope that it will be useful, |
---|
10 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
11 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
12 | | - * GNU General Public License for more details. |
---|
13 | | - * |
---|
14 | | - * You should have received a copy of the GNU General Public License |
---|
15 | | - * along with this program; if not, you can access it online at |
---|
16 | | - * http://www.gnu.org/licenses/gpl-2.0.html. |
---|
17 | | - * |
---|
18 | 5 | * Copyright (C) IBM Corporation, 2005, 2006 |
---|
19 | 6 | * |
---|
20 | | - * Authors: Paul E. McKenney <paulmck@us.ibm.com> |
---|
| 7 | + * Authors: Paul E. McKenney <paulmck@linux.ibm.com> |
---|
21 | 8 | * Josh Triplett <josh@joshtriplett.org> |
---|
22 | 9 | * |
---|
23 | | - * See also: Documentation/RCU/torture.txt |
---|
| 10 | + * See also: Documentation/RCU/torture.rst |
---|
24 | 11 | */ |
---|
25 | 12 | |
---|
26 | 13 | #define pr_fmt(fmt) fmt |
---|
.. | .. |
---|
33 | 20 | #include <linux/err.h> |
---|
34 | 21 | #include <linux/spinlock.h> |
---|
35 | 22 | #include <linux/smp.h> |
---|
36 | | -#include <linux/rcupdate.h> |
---|
| 23 | +#include <linux/rcupdate_wait.h> |
---|
37 | 24 | #include <linux/interrupt.h> |
---|
38 | 25 | #include <linux/sched/signal.h> |
---|
39 | 26 | #include <uapi/linux/sched/types.h> |
---|
.. | .. |
---|
56 | 43 | #include <linux/vmalloc.h> |
---|
57 | 44 | #include <linux/sched/debug.h> |
---|
58 | 45 | #include <linux/sched/sysctl.h> |
---|
| 46 | +#include <linux/oom.h> |
---|
| 47 | +#include <linux/tick.h> |
---|
| 48 | +#include <linux/rcupdate_trace.h> |
---|
59 | 49 | |
---|
60 | 50 | #include "rcu.h" |
---|
61 | 51 | |
---|
62 | 52 | MODULE_LICENSE("GPL"); |
---|
63 | | -MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); |
---|
64 | | - |
---|
| 53 | +MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>"); |
---|
65 | 54 | |
---|
66 | 55 | /* Bits for ->extendables field, extendables param, and related definitions. */ |
---|
67 | 56 | #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */ |
---|
68 | 57 | #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1) |
---|
69 | | -#define RCUTORTURE_RDR_BH 0x1 /* Extend readers by disabling bh. */ |
---|
70 | | -#define RCUTORTURE_RDR_IRQ 0x2 /* ... disabling interrupts. */ |
---|
71 | | -#define RCUTORTURE_RDR_PREEMPT 0x4 /* ... disabling preemption. */ |
---|
72 | | -#define RCUTORTURE_RDR_RCU 0x8 /* ... entering another RCU reader. */ |
---|
73 | | -#define RCUTORTURE_RDR_NBITS 4 /* Number of bits defined above. */ |
---|
74 | | -#define RCUTORTURE_MAX_EXTEND (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | \ |
---|
75 | | - RCUTORTURE_RDR_PREEMPT) |
---|
| 58 | +#define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */ |
---|
| 59 | +#define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */ |
---|
| 60 | +#define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */ |
---|
| 61 | +#define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */ |
---|
| 62 | +#define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */ |
---|
| 63 | +#define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */ |
---|
| 64 | +#define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */ |
---|
| 65 | +#define RCUTORTURE_MAX_EXTEND \ |
---|
| 66 | + (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \ |
---|
| 67 | + RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED) |
---|
76 | 68 | #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */ |
---|
77 | 69 | /* Must be power of two minus one. */ |
---|
| 70 | +#define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3) |
---|
78 | 71 | |
---|
79 | | -torture_param(int, cbflood_inter_holdoff, HZ, |
---|
80 | | - "Holdoff between floods (jiffies)"); |
---|
81 | | -torture_param(int, cbflood_intra_holdoff, 1, |
---|
82 | | - "Holdoff between bursts (jiffies)"); |
---|
83 | | -torture_param(int, cbflood_n_burst, 3, "# bursts in flood, zero to disable"); |
---|
84 | | -torture_param(int, cbflood_n_per_burst, 20000, |
---|
85 | | - "# callbacks per burst in flood"); |
---|
86 | 72 | torture_param(int, extendables, RCUTORTURE_MAX_EXTEND, |
---|
87 | 73 | "Extend readers by disabling bh (1), irqs (2), or preempt (4)"); |
---|
88 | 74 | torture_param(int, fqs_duration, 0, |
---|
89 | 75 | "Duration of fqs bursts (us), 0 to disable"); |
---|
90 | 76 | torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)"); |
---|
91 | 77 | torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)"); |
---|
| 78 | +torture_param(bool, fwd_progress, 1, "Test grace-period forward progress"); |
---|
| 79 | +torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait"); |
---|
| 80 | +torture_param(int, fwd_progress_holdoff, 60, |
---|
| 81 | + "Time between forward-progress tests (s)"); |
---|
| 82 | +torture_param(bool, fwd_progress_need_resched, 1, |
---|
| 83 | + "Hide cond_resched() behind need_resched()"); |
---|
92 | 84 | torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives"); |
---|
93 | 85 | torture_param(bool, gp_exp, false, "Use expedited GP wait primitives"); |
---|
94 | 86 | torture_param(bool, gp_normal, false, |
---|
95 | 87 | "Use normal (non-expedited) GP wait primitives"); |
---|
96 | 88 | torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives"); |
---|
97 | 89 | torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers"); |
---|
| 90 | +torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers"); |
---|
98 | 91 | torture_param(int, n_barrier_cbs, 0, |
---|
99 | 92 | "# of callbacks/kthreads for barrier testing"); |
---|
100 | 93 | torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads"); |
---|
.. | .. |
---|
104 | 97 | torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)"); |
---|
105 | 98 | torture_param(int, onoff_interval, 0, |
---|
106 | 99 | "Time between CPU hotplugs (jiffies), 0=disable"); |
---|
| 100 | +torture_param(int, read_exit_delay, 13, |
---|
| 101 | + "Delay between read-then-exit episodes (s)"); |
---|
| 102 | +torture_param(int, read_exit_burst, 16, |
---|
| 103 | + "# of read-then-exit bursts per episode, zero to disable"); |
---|
107 | 104 | torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles"); |
---|
108 | 105 | torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable."); |
---|
109 | 106 | torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable."); |
---|
110 | 107 | torture_param(int, stall_cpu_holdoff, 10, |
---|
111 | 108 | "Time to wait before starting stall (s)."); |
---|
112 | 109 | torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling."); |
---|
| 110 | +torture_param(int, stall_cpu_block, 0, "Sleep while stalling."); |
---|
| 111 | +torture_param(int, stall_gp_kthread, 0, |
---|
| 112 | + "Grace-period kthread stall duration (s)."); |
---|
113 | 113 | torture_param(int, stat_interval, 60, |
---|
114 | 114 | "Number of seconds between stats printk()s"); |
---|
115 | 115 | torture_param(int, stutter, 5, "Number of seconds to run/halt test"); |
---|
.. | .. |
---|
125 | 125 | |
---|
126 | 126 | static char *torture_type = "rcu"; |
---|
127 | 127 | module_param(torture_type, charp, 0444); |
---|
128 | | -MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)"); |
---|
| 128 | +MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)"); |
---|
129 | 129 | |
---|
130 | 130 | static int nrealreaders; |
---|
131 | | -static int ncbflooders; |
---|
132 | 131 | static struct task_struct *writer_task; |
---|
133 | 132 | static struct task_struct **fakewriter_tasks; |
---|
134 | 133 | static struct task_struct **reader_tasks; |
---|
135 | 134 | static struct task_struct *stats_task; |
---|
136 | | -static struct task_struct **cbflood_task; |
---|
137 | 135 | static struct task_struct *fqs_task; |
---|
138 | 136 | static struct task_struct *boost_tasks[NR_CPUS]; |
---|
139 | 137 | static struct task_struct *stall_task; |
---|
| 138 | +static struct task_struct *fwd_prog_task; |
---|
140 | 139 | static struct task_struct **barrier_cbs_tasks; |
---|
141 | 140 | static struct task_struct *barrier_task; |
---|
| 141 | +static struct task_struct *read_exit_task; |
---|
142 | 142 | |
---|
143 | 143 | #define RCU_TORTURE_PIPE_LEN 10 |
---|
144 | 144 | |
---|
.. | .. |
---|
170 | 170 | static atomic_long_t n_rcu_torture_timers; |
---|
171 | 171 | static long n_barrier_attempts; |
---|
172 | 172 | static long n_barrier_successes; /* did rcu_barrier test succeed? */ |
---|
173 | | -static atomic_long_t n_cbfloods; |
---|
| 173 | +static unsigned long n_read_exits; |
---|
174 | 174 | static struct list_head rcu_torture_removed; |
---|
| 175 | +static unsigned long shutdown_jiffies; |
---|
| 176 | +static unsigned long start_gp_seq; |
---|
175 | 177 | |
---|
176 | 178 | static int rcu_torture_writer_state; |
---|
177 | 179 | #define RTWS_FIXED_DELAY 0 |
---|
.. | .. |
---|
196 | 198 | "RTWS_STUTTER", |
---|
197 | 199 | "RTWS_STOPPING", |
---|
198 | 200 | }; |
---|
| 201 | + |
---|
| 202 | +/* Record reader segment types and duration for first failing read. */ |
---|
| 203 | +struct rt_read_seg { |
---|
| 204 | + int rt_readstate; |
---|
| 205 | + unsigned long rt_delay_jiffies; |
---|
| 206 | + unsigned long rt_delay_ms; |
---|
| 207 | + unsigned long rt_delay_us; |
---|
| 208 | + bool rt_preempted; |
---|
| 209 | +}; |
---|
| 210 | +static int err_segs_recorded; |
---|
| 211 | +static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS]; |
---|
| 212 | +static int rt_read_nsegs; |
---|
199 | 213 | |
---|
200 | 214 | static const char *rcu_torture_writer_state_getname(void) |
---|
201 | 215 | { |
---|
.. | .. |
---|
227 | 241 | } |
---|
228 | 242 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ |
---|
229 | 243 | |
---|
| 244 | +/* |
---|
| 245 | + * Stop aggressive CPU-hog tests a bit before the end of the test in order |
---|
| 246 | + * to avoid interfering with test shutdown. |
---|
| 247 | + */ |
---|
| 248 | +static bool shutdown_time_arrived(void) |
---|
| 249 | +{ |
---|
| 250 | + return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ); |
---|
| 251 | +} |
---|
| 252 | + |
---|
230 | 253 | static unsigned long boost_starttime; /* jiffies of next boost test start. */ |
---|
231 | 254 | static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */ |
---|
232 | 255 | /* and boost task create/destroy. */ |
---|
.. | .. |
---|
235 | 258 | static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */ |
---|
236 | 259 | static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */ |
---|
237 | 260 | static DECLARE_WAIT_QUEUE_HEAD(barrier_wq); |
---|
| 261 | + |
---|
| 262 | +static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */ |
---|
238 | 263 | |
---|
239 | 264 | /* |
---|
240 | 265 | * Allocate an element from the rcu_tortures pool. |
---|
.. | .. |
---|
278 | 303 | void (*init)(void); |
---|
279 | 304 | void (*cleanup)(void); |
---|
280 | 305 | int (*readlock)(void); |
---|
281 | | - void (*read_delay)(struct torture_random_state *rrsp); |
---|
| 306 | + void (*read_delay)(struct torture_random_state *rrsp, |
---|
| 307 | + struct rt_read_seg *rtrsp); |
---|
282 | 308 | void (*readunlock)(int idx); |
---|
283 | 309 | unsigned long (*get_gp_seq)(void); |
---|
284 | 310 | unsigned long (*gp_diff)(unsigned long new, unsigned long old); |
---|
.. | .. |
---|
291 | 317 | void (*cb_barrier)(void); |
---|
292 | 318 | void (*fqs)(void); |
---|
293 | 319 | void (*stats)(void); |
---|
| 320 | + int (*stall_dur)(void); |
---|
294 | 321 | int irq_capable; |
---|
295 | 322 | int can_boost; |
---|
296 | 323 | int extendables; |
---|
297 | | - int ext_irq_conflict; |
---|
| 324 | + int slow_gps; |
---|
298 | 325 | const char *name; |
---|
299 | 326 | }; |
---|
300 | 327 | |
---|
.. | .. |
---|
310 | 337 | return 0; |
---|
311 | 338 | } |
---|
312 | 339 | |
---|
313 | | -static void rcu_read_delay(struct torture_random_state *rrsp) |
---|
| 340 | +static void |
---|
| 341 | +rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) |
---|
314 | 342 | { |
---|
315 | 343 | unsigned long started; |
---|
316 | 344 | unsigned long completed; |
---|
317 | 345 | const unsigned long shortdelay_us = 200; |
---|
318 | | - const unsigned long longdelay_ms = 50; |
---|
| 346 | + unsigned long longdelay_ms = 300; |
---|
319 | 347 | unsigned long long ts; |
---|
320 | 348 | |
---|
321 | 349 | /* We want a short delay sometimes to make a reader delay the grace |
---|
322 | 350 | * period, and we want a long delay occasionally to trigger |
---|
323 | 351 | * force_quiescent_state. */ |
---|
324 | 352 | |
---|
325 | | - if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { |
---|
| 353 | + if (!READ_ONCE(rcu_fwd_cb_nodelay) && |
---|
| 354 | + !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) { |
---|
326 | 355 | started = cur_ops->get_gp_seq(); |
---|
327 | 356 | ts = rcu_trace_clock_local(); |
---|
| 357 | + if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK)) |
---|
| 358 | + longdelay_ms = 5; /* Avoid triggering BH limits. */ |
---|
328 | 359 | mdelay(longdelay_ms); |
---|
| 360 | + rtrsp->rt_delay_ms = longdelay_ms; |
---|
329 | 361 | completed = cur_ops->get_gp_seq(); |
---|
330 | 362 | do_trace_rcu_torture_read(cur_ops->name, NULL, ts, |
---|
331 | 363 | started, completed); |
---|
332 | 364 | } |
---|
333 | | - if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) |
---|
| 365 | + if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) { |
---|
334 | 366 | udelay(shortdelay_us); |
---|
| 367 | + rtrsp->rt_delay_us = shortdelay_us; |
---|
| 368 | + } |
---|
335 | 369 | if (!preempt_count() && |
---|
336 | | - !(torture_random(rrsp) % (nrealreaders * 500))) |
---|
| 370 | + !(torture_random(rrsp) % (nrealreaders * 500))) { |
---|
337 | 371 | torture_preempt_schedule(); /* QS only if preemptible. */ |
---|
| 372 | + rtrsp->rt_preempted = true; |
---|
| 373 | + } |
---|
338 | 374 | } |
---|
339 | 375 | |
---|
340 | 376 | static void rcu_torture_read_unlock(int idx) __releases(RCU) |
---|
.. | .. |
---|
350 | 386 | { |
---|
351 | 387 | int i; |
---|
352 | 388 | |
---|
353 | | - i = rp->rtort_pipe_count; |
---|
| 389 | + i = READ_ONCE(rp->rtort_pipe_count); |
---|
354 | 390 | if (i > RCU_TORTURE_PIPE_LEN) |
---|
355 | 391 | i = RCU_TORTURE_PIPE_LEN; |
---|
356 | 392 | atomic_inc(&rcu_torture_wcount[i]); |
---|
357 | | - if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { |
---|
| 393 | + WRITE_ONCE(rp->rtort_pipe_count, i + 1); |
---|
| 394 | + if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) { |
---|
358 | 395 | rp->rtort_mbtest = 0; |
---|
359 | 396 | return true; |
---|
360 | 397 | } |
---|
.. | .. |
---|
429 | 466 | .cb_barrier = rcu_barrier, |
---|
430 | 467 | .fqs = rcu_force_quiescent_state, |
---|
431 | 468 | .stats = NULL, |
---|
| 469 | + .stall_dur = rcu_jiffies_till_stall_check, |
---|
432 | 470 | .irq_capable = 1, |
---|
433 | 471 | .can_boost = rcu_can_boost(), |
---|
| 472 | + .extendables = RCUTORTURE_MAX_EXTEND, |
---|
434 | 473 | .name = "rcu" |
---|
435 | | -}; |
---|
436 | | - |
---|
437 | | -/* |
---|
438 | | - * Definitions for rcu_bh torture testing. |
---|
439 | | - */ |
---|
440 | | - |
---|
441 | | -static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH) |
---|
442 | | -{ |
---|
443 | | - rcu_read_lock_bh(); |
---|
444 | | - return 0; |
---|
445 | | -} |
---|
446 | | - |
---|
447 | | -static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH) |
---|
448 | | -{ |
---|
449 | | - rcu_read_unlock_bh(); |
---|
450 | | -} |
---|
451 | | - |
---|
452 | | -static void rcu_bh_torture_deferred_free(struct rcu_torture *p) |
---|
453 | | -{ |
---|
454 | | - call_rcu_bh(&p->rtort_rcu, rcu_torture_cb); |
---|
455 | | -} |
---|
456 | | - |
---|
457 | | -static struct rcu_torture_ops rcu_bh_ops = { |
---|
458 | | - .ttype = RCU_BH_FLAVOR, |
---|
459 | | - .init = rcu_sync_torture_init, |
---|
460 | | - .readlock = rcu_bh_torture_read_lock, |
---|
461 | | - .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
---|
462 | | - .readunlock = rcu_bh_torture_read_unlock, |
---|
463 | | - .get_gp_seq = rcu_bh_get_gp_seq, |
---|
464 | | - .gp_diff = rcu_seq_diff, |
---|
465 | | - .deferred_free = rcu_bh_torture_deferred_free, |
---|
466 | | - .sync = synchronize_rcu_bh, |
---|
467 | | - .exp_sync = synchronize_rcu_bh_expedited, |
---|
468 | | - .call = call_rcu_bh, |
---|
469 | | - .cb_barrier = rcu_barrier_bh, |
---|
470 | | - .fqs = rcu_bh_force_quiescent_state, |
---|
471 | | - .stats = NULL, |
---|
472 | | - .irq_capable = 1, |
---|
473 | | - .extendables = (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ), |
---|
474 | | - .ext_irq_conflict = RCUTORTURE_RDR_RCU, |
---|
475 | | - .name = "rcu_bh" |
---|
476 | 474 | }; |
---|
477 | 475 | |
---|
478 | 476 | /* |
---|
.. | .. |
---|
531 | 529 | return srcu_read_lock(srcu_ctlp); |
---|
532 | 530 | } |
---|
533 | 531 | |
---|
534 | | -static void srcu_read_delay(struct torture_random_state *rrsp) |
---|
| 532 | +static void |
---|
| 533 | +srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp) |
---|
535 | 534 | { |
---|
536 | 535 | long delay; |
---|
537 | 536 | const long uspertick = 1000000 / HZ; |
---|
.. | .. |
---|
541 | 540 | |
---|
542 | 541 | delay = torture_random(rrsp) % |
---|
543 | 542 | (nrealreaders * 2 * longdelay * uspertick); |
---|
544 | | - if (!delay && in_task()) |
---|
| 543 | + if (!delay && in_task()) { |
---|
545 | 544 | schedule_timeout_interruptible(longdelay); |
---|
546 | | - else |
---|
547 | | - rcu_read_delay(rrsp); |
---|
| 545 | + rtrsp->rt_delay_jiffies = longdelay; |
---|
| 546 | + } else { |
---|
| 547 | + rcu_read_delay(rrsp, rtrsp); |
---|
| 548 | + } |
---|
548 | 549 | } |
---|
549 | 550 | |
---|
550 | 551 | static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp) |
---|
.. | .. |
---|
614 | 615 | |
---|
615 | 616 | static void srcu_torture_cleanup(void) |
---|
616 | 617 | { |
---|
617 | | - static DEFINE_TORTURE_RANDOM(rand); |
---|
618 | | - |
---|
619 | | - if (torture_random(&rand) & 0x800) |
---|
620 | | - cleanup_srcu_struct(&srcu_ctld); |
---|
621 | | - else |
---|
622 | | - cleanup_srcu_struct_quiesced(&srcu_ctld); |
---|
| 618 | + cleanup_srcu_struct(&srcu_ctld); |
---|
623 | 619 | srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */ |
---|
624 | 620 | } |
---|
625 | 621 | |
---|
.. | .. |
---|
663 | 659 | }; |
---|
664 | 660 | |
---|
665 | 661 | /* |
---|
666 | | - * Definitions for sched torture testing. |
---|
667 | | - */ |
---|
668 | | - |
---|
669 | | -static int sched_torture_read_lock(void) |
---|
670 | | -{ |
---|
671 | | - preempt_disable(); |
---|
672 | | - return 0; |
---|
673 | | -} |
---|
674 | | - |
---|
675 | | -static void sched_torture_read_unlock(int idx) |
---|
676 | | -{ |
---|
677 | | - preempt_enable(); |
---|
678 | | -} |
---|
679 | | - |
---|
680 | | -static void rcu_sched_torture_deferred_free(struct rcu_torture *p) |
---|
681 | | -{ |
---|
682 | | - call_rcu_sched(&p->rtort_rcu, rcu_torture_cb); |
---|
683 | | -} |
---|
684 | | - |
---|
685 | | -static struct rcu_torture_ops sched_ops = { |
---|
686 | | - .ttype = RCU_SCHED_FLAVOR, |
---|
687 | | - .init = rcu_sync_torture_init, |
---|
688 | | - .readlock = sched_torture_read_lock, |
---|
689 | | - .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
---|
690 | | - .readunlock = sched_torture_read_unlock, |
---|
691 | | - .get_gp_seq = rcu_sched_get_gp_seq, |
---|
692 | | - .gp_diff = rcu_seq_diff, |
---|
693 | | - .deferred_free = rcu_sched_torture_deferred_free, |
---|
694 | | - .sync = synchronize_sched, |
---|
695 | | - .exp_sync = synchronize_sched_expedited, |
---|
696 | | - .get_state = get_state_synchronize_sched, |
---|
697 | | - .cond_sync = cond_synchronize_sched, |
---|
698 | | - .call = call_rcu_sched, |
---|
699 | | - .cb_barrier = rcu_barrier_sched, |
---|
700 | | - .fqs = rcu_sched_force_quiescent_state, |
---|
701 | | - .stats = NULL, |
---|
702 | | - .irq_capable = 1, |
---|
703 | | - .extendables = RCUTORTURE_MAX_EXTEND, |
---|
704 | | - .name = "sched" |
---|
705 | | -}; |
---|
706 | | - |
---|
707 | | -/* |
---|
708 | 662 | * Definitions for RCU-tasks torture testing. |
---|
709 | 663 | */ |
---|
710 | 664 | |
---|
.. | .. |
---|
722 | 676 | call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb); |
---|
723 | 677 | } |
---|
724 | 678 | |
---|
| 679 | +static void synchronize_rcu_mult_test(void) |
---|
| 680 | +{ |
---|
| 681 | + synchronize_rcu_mult(call_rcu_tasks, call_rcu); |
---|
| 682 | +} |
---|
| 683 | + |
---|
725 | 684 | static struct rcu_torture_ops tasks_ops = { |
---|
726 | 685 | .ttype = RCU_TASKS_FLAVOR, |
---|
727 | 686 | .init = rcu_sync_torture_init, |
---|
.. | .. |
---|
731 | 690 | .get_gp_seq = rcu_no_completed, |
---|
732 | 691 | .deferred_free = rcu_tasks_torture_deferred_free, |
---|
733 | 692 | .sync = synchronize_rcu_tasks, |
---|
734 | | - .exp_sync = synchronize_rcu_tasks, |
---|
| 693 | + .exp_sync = synchronize_rcu_mult_test, |
---|
735 | 694 | .call = call_rcu_tasks, |
---|
736 | 695 | .cb_barrier = rcu_barrier_tasks, |
---|
737 | 696 | .fqs = NULL, |
---|
738 | 697 | .stats = NULL, |
---|
739 | 698 | .irq_capable = 1, |
---|
| 699 | + .slow_gps = 1, |
---|
740 | 700 | .name = "tasks" |
---|
| 701 | +}; |
---|
| 702 | + |
---|
| 703 | +/* |
---|
| 704 | + * Definitions for trivial CONFIG_PREEMPT=n-only torture testing. |
---|
| 705 | + * This implementation does not necessarily work well with CPU hotplug. |
---|
| 706 | + */ |
---|
| 707 | + |
---|
| 708 | +static void synchronize_rcu_trivial(void) |
---|
| 709 | +{ |
---|
| 710 | + int cpu; |
---|
| 711 | + |
---|
| 712 | + for_each_online_cpu(cpu) { |
---|
| 713 | + rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu)); |
---|
| 714 | + WARN_ON_ONCE(raw_smp_processor_id() != cpu); |
---|
| 715 | + } |
---|
| 716 | +} |
---|
| 717 | + |
---|
| 718 | +static int rcu_torture_read_lock_trivial(void) __acquires(RCU) |
---|
| 719 | +{ |
---|
| 720 | + preempt_disable(); |
---|
| 721 | + return 0; |
---|
| 722 | +} |
---|
| 723 | + |
---|
| 724 | +static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU) |
---|
| 725 | +{ |
---|
| 726 | + preempt_enable(); |
---|
| 727 | +} |
---|
| 728 | + |
---|
| 729 | +static struct rcu_torture_ops trivial_ops = { |
---|
| 730 | + .ttype = RCU_TRIVIAL_FLAVOR, |
---|
| 731 | + .init = rcu_sync_torture_init, |
---|
| 732 | + .readlock = rcu_torture_read_lock_trivial, |
---|
| 733 | + .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
---|
| 734 | + .readunlock = rcu_torture_read_unlock_trivial, |
---|
| 735 | + .get_gp_seq = rcu_no_completed, |
---|
| 736 | + .sync = synchronize_rcu_trivial, |
---|
| 737 | + .exp_sync = synchronize_rcu_trivial, |
---|
| 738 | + .fqs = NULL, |
---|
| 739 | + .stats = NULL, |
---|
| 740 | + .irq_capable = 1, |
---|
| 741 | + .name = "trivial" |
---|
| 742 | +}; |
---|
| 743 | + |
---|
| 744 | +/* |
---|
| 745 | + * Definitions for rude RCU-tasks torture testing. |
---|
| 746 | + */ |
---|
| 747 | + |
---|
| 748 | +static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p) |
---|
| 749 | +{ |
---|
| 750 | + call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb); |
---|
| 751 | +} |
---|
| 752 | + |
---|
| 753 | +static struct rcu_torture_ops tasks_rude_ops = { |
---|
| 754 | + .ttype = RCU_TASKS_RUDE_FLAVOR, |
---|
| 755 | + .init = rcu_sync_torture_init, |
---|
| 756 | + .readlock = rcu_torture_read_lock_trivial, |
---|
| 757 | + .read_delay = rcu_read_delay, /* just reuse rcu's version. */ |
---|
| 758 | + .readunlock = rcu_torture_read_unlock_trivial, |
---|
| 759 | + .get_gp_seq = rcu_no_completed, |
---|
| 760 | + .deferred_free = rcu_tasks_rude_torture_deferred_free, |
---|
| 761 | + .sync = synchronize_rcu_tasks_rude, |
---|
| 762 | + .exp_sync = synchronize_rcu_tasks_rude, |
---|
| 763 | + .call = call_rcu_tasks_rude, |
---|
| 764 | + .cb_barrier = rcu_barrier_tasks_rude, |
---|
| 765 | + .fqs = NULL, |
---|
| 766 | + .stats = NULL, |
---|
| 767 | + .irq_capable = 1, |
---|
| 768 | + .name = "tasks-rude" |
---|
| 769 | +}; |
---|
| 770 | + |
---|
| 771 | +/* |
---|
| 772 | + * Definitions for tracing RCU-tasks torture testing. |
---|
| 773 | + */ |
---|
| 774 | + |
---|
| 775 | +static int tasks_tracing_torture_read_lock(void) |
---|
| 776 | +{ |
---|
| 777 | + rcu_read_lock_trace(); |
---|
| 778 | + return 0; |
---|
| 779 | +} |
---|
| 780 | + |
---|
| 781 | +static void tasks_tracing_torture_read_unlock(int idx) |
---|
| 782 | +{ |
---|
| 783 | + rcu_read_unlock_trace(); |
---|
| 784 | +} |
---|
| 785 | + |
---|
| 786 | +static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p) |
---|
| 787 | +{ |
---|
| 788 | + call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb); |
---|
| 789 | +} |
---|
| 790 | + |
---|
| 791 | +static struct rcu_torture_ops tasks_tracing_ops = { |
---|
| 792 | + .ttype = RCU_TASKS_TRACING_FLAVOR, |
---|
| 793 | + .init = rcu_sync_torture_init, |
---|
| 794 | + .readlock = tasks_tracing_torture_read_lock, |
---|
| 795 | + .read_delay = srcu_read_delay, /* just reuse srcu's version. */ |
---|
| 796 | + .readunlock = tasks_tracing_torture_read_unlock, |
---|
| 797 | + .get_gp_seq = rcu_no_completed, |
---|
| 798 | + .deferred_free = rcu_tasks_tracing_torture_deferred_free, |
---|
| 799 | + .sync = synchronize_rcu_tasks_trace, |
---|
| 800 | + .exp_sync = synchronize_rcu_tasks_trace, |
---|
| 801 | + .call = call_rcu_tasks_trace, |
---|
| 802 | + .cb_barrier = rcu_barrier_tasks_trace, |
---|
| 803 | + .fqs = NULL, |
---|
| 804 | + .stats = NULL, |
---|
| 805 | + .irq_capable = 1, |
---|
| 806 | + .slow_gps = 1, |
---|
| 807 | + .name = "tasks-tracing" |
---|
741 | 808 | }; |
---|
742 | 809 | |
---|
743 | 810 | static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old) |
---|
.. | .. |
---|
749 | 816 | |
---|
750 | 817 | static bool __maybe_unused torturing_tasks(void) |
---|
751 | 818 | { |
---|
752 | | - return cur_ops == &tasks_ops; |
---|
| 819 | + return cur_ops == &tasks_ops || cur_ops == &tasks_rude_ops; |
---|
753 | 820 | } |
---|
754 | 821 | |
---|
755 | 822 | /* |
---|
.. | .. |
---|
817 | 884 | unsigned long endtime; |
---|
818 | 885 | unsigned long oldstarttime; |
---|
819 | 886 | struct rcu_boost_inflight rbi = { .inflight = 0 }; |
---|
820 | | - struct sched_param sp; |
---|
821 | 887 | |
---|
822 | 888 | VERBOSE_TOROUT_STRING("rcu_torture_boost started"); |
---|
823 | 889 | |
---|
824 | 890 | /* Set real-time priority. */ |
---|
825 | | - sp.sched_priority = 1; |
---|
826 | | - if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) { |
---|
827 | | - VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!"); |
---|
828 | | - n_rcu_torture_boost_rterror++; |
---|
829 | | - } |
---|
| 891 | + sched_set_fifo_low(current); |
---|
830 | 892 | |
---|
831 | 893 | init_rcu_head_on_stack(&rbi.rcu); |
---|
832 | 894 | /* Each pass through the following loop does one boost-test cycle. */ |
---|
.. | .. |
---|
848 | 910 | |
---|
849 | 911 | /* Wait for the next test interval. */ |
---|
850 | 912 | oldstarttime = boost_starttime; |
---|
851 | | - while (ULONG_CMP_LT(jiffies, oldstarttime)) { |
---|
| 913 | + while (time_before(jiffies, oldstarttime)) { |
---|
852 | 914 | schedule_timeout_interruptible(oldstarttime - jiffies); |
---|
853 | 915 | stutter_wait("rcu_torture_boost"); |
---|
854 | 916 | if (torture_must_stop()) |
---|
.. | .. |
---|
858 | 920 | /* Do one boost-test interval. */ |
---|
859 | 921 | endtime = oldstarttime + test_boost_duration * HZ; |
---|
860 | 922 | call_rcu_time = jiffies; |
---|
861 | | - while (ULONG_CMP_LT(jiffies, endtime)) { |
---|
| 923 | + while (time_before(jiffies, endtime)) { |
---|
862 | 924 | /* If we don't have a callback in flight, post one. */ |
---|
863 | 925 | if (!smp_load_acquire(&rbi.inflight)) { |
---|
864 | 926 | /* RCU core before ->inflight = 1. */ |
---|
.. | .. |
---|
915 | 977 | return 0; |
---|
916 | 978 | } |
---|
917 | 979 | |
---|
918 | | -static void rcu_torture_cbflood_cb(struct rcu_head *rhp) |
---|
919 | | -{ |
---|
920 | | -} |
---|
921 | | - |
---|
922 | | -/* |
---|
923 | | - * RCU torture callback-flood kthread. Repeatedly induces bursts of calls |
---|
924 | | - * to call_rcu() or analogous, increasing the probability of occurrence |
---|
925 | | - * of callback-overflow corner cases. |
---|
926 | | - */ |
---|
927 | | -static int |
---|
928 | | -rcu_torture_cbflood(void *arg) |
---|
929 | | -{ |
---|
930 | | - int err = 1; |
---|
931 | | - int i; |
---|
932 | | - int j; |
---|
933 | | - struct rcu_head *rhp; |
---|
934 | | - |
---|
935 | | - if (cbflood_n_per_burst > 0 && |
---|
936 | | - cbflood_inter_holdoff > 0 && |
---|
937 | | - cbflood_intra_holdoff > 0 && |
---|
938 | | - cur_ops->call && |
---|
939 | | - cur_ops->cb_barrier) { |
---|
940 | | - rhp = vmalloc(array3_size(cbflood_n_burst, |
---|
941 | | - cbflood_n_per_burst, |
---|
942 | | - sizeof(*rhp))); |
---|
943 | | - err = !rhp; |
---|
944 | | - } |
---|
945 | | - if (err) { |
---|
946 | | - VERBOSE_TOROUT_STRING("rcu_torture_cbflood disabled: Bad args or OOM"); |
---|
947 | | - goto wait_for_stop; |
---|
948 | | - } |
---|
949 | | - VERBOSE_TOROUT_STRING("rcu_torture_cbflood task started"); |
---|
950 | | - do { |
---|
951 | | - schedule_timeout_interruptible(cbflood_inter_holdoff); |
---|
952 | | - atomic_long_inc(&n_cbfloods); |
---|
953 | | - WARN_ON(signal_pending(current)); |
---|
954 | | - for (i = 0; i < cbflood_n_burst; i++) { |
---|
955 | | - for (j = 0; j < cbflood_n_per_burst; j++) { |
---|
956 | | - cur_ops->call(&rhp[i * cbflood_n_per_burst + j], |
---|
957 | | - rcu_torture_cbflood_cb); |
---|
958 | | - } |
---|
959 | | - schedule_timeout_interruptible(cbflood_intra_holdoff); |
---|
960 | | - WARN_ON(signal_pending(current)); |
---|
961 | | - } |
---|
962 | | - cur_ops->cb_barrier(); |
---|
963 | | - stutter_wait("rcu_torture_cbflood"); |
---|
964 | | - } while (!torture_must_stop()); |
---|
965 | | - vfree(rhp); |
---|
966 | | -wait_for_stop: |
---|
967 | | - torture_kthread_stopping("rcu_torture_cbflood"); |
---|
968 | | - return 0; |
---|
969 | | -} |
---|
970 | | - |
---|
971 | 980 | /* |
---|
972 | 981 | * RCU torture force-quiescent-state kthread. Repeatedly induces |
---|
973 | 982 | * bursts of calls to force_quiescent_state(), increasing the probability |
---|
.. | .. |
---|
982 | 991 | VERBOSE_TOROUT_STRING("rcu_torture_fqs task started"); |
---|
983 | 992 | do { |
---|
984 | 993 | fqs_resume_time = jiffies + fqs_stutter * HZ; |
---|
985 | | - while (ULONG_CMP_LT(jiffies, fqs_resume_time) && |
---|
| 994 | + while (time_before(jiffies, fqs_resume_time) && |
---|
986 | 995 | !kthread_should_stop()) { |
---|
987 | 996 | schedule_timeout_interruptible(1); |
---|
988 | 997 | } |
---|
.. | .. |
---|
1084 | 1093 | if (i > RCU_TORTURE_PIPE_LEN) |
---|
1085 | 1094 | i = RCU_TORTURE_PIPE_LEN; |
---|
1086 | 1095 | atomic_inc(&rcu_torture_wcount[i]); |
---|
1087 | | - old_rp->rtort_pipe_count++; |
---|
| 1096 | + WRITE_ONCE(old_rp->rtort_pipe_count, |
---|
| 1097 | + old_rp->rtort_pipe_count + 1); |
---|
1088 | 1098 | switch (synctype[torture_random(&rand) % nsynctypes]) { |
---|
1089 | 1099 | case RTWS_DEF_FREE: |
---|
1090 | 1100 | rcu_torture_writer_state = RTWS_DEF_FREE; |
---|
.. | .. |
---|
1116 | 1126 | break; |
---|
1117 | 1127 | } |
---|
1118 | 1128 | } |
---|
1119 | | - rcu_torture_current_version++; |
---|
| 1129 | + WRITE_ONCE(rcu_torture_current_version, |
---|
| 1130 | + rcu_torture_current_version + 1); |
---|
1120 | 1131 | /* Cycle through nesting levels of rcu_expedite_gp() calls. */ |
---|
1121 | 1132 | if (can_expedite && |
---|
1122 | 1133 | !(torture_random(&rand) & 0xff & (!!expediting - 1))) { |
---|
.. | .. |
---|
1132 | 1143 | !rcu_gp_is_normal(); |
---|
1133 | 1144 | } |
---|
1134 | 1145 | rcu_torture_writer_state = RTWS_STUTTER; |
---|
1135 | | - stutter_wait("rcu_torture_writer"); |
---|
| 1146 | + if (stutter_wait("rcu_torture_writer") && |
---|
| 1147 | + !READ_ONCE(rcu_fwd_cb_nodelay) && |
---|
| 1148 | + !cur_ops->slow_gps && |
---|
| 1149 | + !torture_must_stop() && |
---|
| 1150 | + rcu_inkernel_boot_has_ended()) |
---|
| 1151 | + for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) |
---|
| 1152 | + if (list_empty(&rcu_tortures[i].rtort_free) && |
---|
| 1153 | + rcu_access_pointer(rcu_torture_current) != |
---|
| 1154 | + &rcu_tortures[i]) { |
---|
| 1155 | + rcu_ftrace_dump(DUMP_ALL); |
---|
| 1156 | + WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count); |
---|
| 1157 | + } |
---|
1136 | 1158 | } while (!torture_must_stop()); |
---|
| 1159 | + rcu_torture_current = NULL; // Let stats task know that we are done. |
---|
1137 | 1160 | /* Reset expediting back to unexpedited. */ |
---|
1138 | 1161 | if (expediting > 0) |
---|
1139 | 1162 | expediting = -expediting; |
---|
.. | .. |
---|
1199 | 1222 | * change, do a ->read_delay(). |
---|
1200 | 1223 | */ |
---|
1201 | 1224 | static void rcutorture_one_extend(int *readstate, int newstate, |
---|
1202 | | - struct torture_random_state *trsp) |
---|
| 1225 | + struct torture_random_state *trsp, |
---|
| 1226 | + struct rt_read_seg *rtrsp) |
---|
1203 | 1227 | { |
---|
| 1228 | + unsigned long flags; |
---|
1204 | 1229 | int idxnew = -1; |
---|
1205 | 1230 | int idxold = *readstate; |
---|
1206 | 1231 | int statesnew = ~*readstate & newstate; |
---|
.. | .. |
---|
1208 | 1233 | |
---|
1209 | 1234 | WARN_ON_ONCE(idxold < 0); |
---|
1210 | 1235 | WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1); |
---|
| 1236 | + rtrsp->rt_readstate = newstate; |
---|
1211 | 1237 | |
---|
1212 | 1238 | /* First, put new protection in place to avoid critical-section gap. */ |
---|
1213 | 1239 | if (statesnew & RCUTORTURE_RDR_BH) |
---|
1214 | 1240 | local_bh_disable(); |
---|
| 1241 | + if (statesnew & RCUTORTURE_RDR_RBH) |
---|
| 1242 | + rcu_read_lock_bh(); |
---|
1215 | 1243 | if (statesnew & RCUTORTURE_RDR_IRQ) |
---|
1216 | 1244 | local_irq_disable(); |
---|
1217 | 1245 | if (statesnew & RCUTORTURE_RDR_PREEMPT) |
---|
1218 | 1246 | preempt_disable(); |
---|
| 1247 | + if (statesnew & RCUTORTURE_RDR_SCHED) |
---|
| 1248 | + rcu_read_lock_sched(); |
---|
1219 | 1249 | if (statesnew & RCUTORTURE_RDR_RCU) |
---|
1220 | 1250 | idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT; |
---|
1221 | 1251 | |
---|
1222 | | - /* Next, remove old protection, irq first due to bh conflict. */ |
---|
| 1252 | + /* |
---|
| 1253 | + * Next, remove old protection, in decreasing order of strength |
---|
| 1254 | + * to avoid unlock paths that aren't safe in the stronger |
---|
| 1255 | + * context. Namely: BH can not be enabled with disabled interrupts. |
---|
| 1256 | + * Additionally PREEMPT_RT requires that BH is enabled in preemptible |
---|
| 1257 | + * context. |
---|
| 1258 | + */ |
---|
1223 | 1259 | if (statesold & RCUTORTURE_RDR_IRQ) |
---|
1224 | 1260 | local_irq_enable(); |
---|
1225 | | - if (statesold & RCUTORTURE_RDR_BH) |
---|
1226 | | - local_bh_enable(); |
---|
1227 | 1261 | if (statesold & RCUTORTURE_RDR_PREEMPT) |
---|
1228 | 1262 | preempt_enable(); |
---|
1229 | | - if (statesold & RCUTORTURE_RDR_RCU) |
---|
| 1263 | + if (statesold & RCUTORTURE_RDR_SCHED) |
---|
| 1264 | + rcu_read_unlock_sched(); |
---|
| 1265 | + if (statesold & RCUTORTURE_RDR_BH) |
---|
| 1266 | + local_bh_enable(); |
---|
| 1267 | + if (statesold & RCUTORTURE_RDR_RBH) |
---|
| 1268 | + rcu_read_unlock_bh(); |
---|
| 1269 | + if (statesold & RCUTORTURE_RDR_RCU) { |
---|
| 1270 | + bool lockit = !statesnew && !(torture_random(trsp) & 0xffff); |
---|
| 1271 | + |
---|
| 1272 | + if (lockit) |
---|
| 1273 | + raw_spin_lock_irqsave(¤t->pi_lock, flags); |
---|
1230 | 1274 | cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT); |
---|
| 1275 | + if (lockit) |
---|
| 1276 | + raw_spin_unlock_irqrestore(¤t->pi_lock, flags); |
---|
| 1277 | + } |
---|
1231 | 1278 | |
---|
1232 | 1279 | /* Delay if neither beginning nor end and there was a change. */ |
---|
1233 | 1280 | if ((statesnew || statesold) && *readstate && newstate) |
---|
1234 | | - cur_ops->read_delay(trsp); |
---|
| 1281 | + cur_ops->read_delay(trsp, rtrsp); |
---|
1235 | 1282 | |
---|
1236 | 1283 | /* Update the reader state. */ |
---|
1237 | 1284 | if (idxnew == -1) |
---|
.. | .. |
---|
1260 | 1307 | { |
---|
1261 | 1308 | int mask = rcutorture_extend_mask_max(); |
---|
1262 | 1309 | unsigned long randmask1 = torture_random(trsp) >> 8; |
---|
1263 | | - unsigned long randmask2 = randmask1 >> 1; |
---|
| 1310 | + unsigned long randmask2 = randmask1 >> 3; |
---|
| 1311 | + unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED; |
---|
| 1312 | + unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ; |
---|
| 1313 | + unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH; |
---|
1264 | 1314 | |
---|
1265 | 1315 | WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT); |
---|
1266 | | - /* Half the time lots of bits, half the time only one bit. */ |
---|
1267 | | - if (randmask1 & 0x1) |
---|
| 1316 | + /* Mostly only one bit (need preemption!), sometimes lots of bits. */ |
---|
| 1317 | + if (!(randmask1 & 0x7)) |
---|
1268 | 1318 | mask = mask & randmask2; |
---|
1269 | 1319 | else |
---|
1270 | 1320 | mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS)); |
---|
1271 | | - if ((mask & RCUTORTURE_RDR_IRQ) && |
---|
1272 | | - !(mask & RCUTORTURE_RDR_BH) && |
---|
1273 | | - (oldmask & RCUTORTURE_RDR_BH)) |
---|
1274 | | - mask |= RCUTORTURE_RDR_BH; /* Can't enable bh w/irq disabled. */ |
---|
1275 | | - if ((mask & RCUTORTURE_RDR_IRQ) && |
---|
1276 | | - !(mask & cur_ops->ext_irq_conflict) && |
---|
1277 | | - (oldmask & cur_ops->ext_irq_conflict)) |
---|
1278 | | - mask |= cur_ops->ext_irq_conflict; /* Or if readers object. */ |
---|
| 1321 | + |
---|
| 1322 | + /* |
---|
| 1323 | + * Can't enable bh w/irq disabled. |
---|
| 1324 | + */ |
---|
| 1325 | + if (mask & RCUTORTURE_RDR_IRQ) |
---|
| 1326 | + mask |= oldmask & bhs; |
---|
| 1327 | + |
---|
| 1328 | + /* |
---|
| 1329 | + * Ideally these sequences would be detected in debug builds |
---|
| 1330 | + * (regardless of RT), but until then don't stop testing |
---|
| 1331 | + * them on non-RT. |
---|
| 1332 | + */ |
---|
| 1333 | + if (IS_ENABLED(CONFIG_PREEMPT_RT)) { |
---|
| 1334 | + /* Can't modify BH in atomic context */ |
---|
| 1335 | + if (oldmask & preempts_irq) |
---|
| 1336 | + mask &= ~bhs; |
---|
| 1337 | + if ((oldmask | mask) & preempts_irq) |
---|
| 1338 | + mask |= oldmask & bhs; |
---|
| 1339 | + } |
---|
| 1340 | + |
---|
1279 | 1341 | return mask ?: RCUTORTURE_RDR_RCU; |
---|
1280 | 1342 | } |
---|
1281 | 1343 | |
---|
.. | .. |
---|
1283 | 1345 | * Do a randomly selected number of extensions of an existing RCU read-side |
---|
1284 | 1346 | * critical section. |
---|
1285 | 1347 | */ |
---|
1286 | | -static void rcutorture_loop_extend(int *readstate, |
---|
1287 | | - struct torture_random_state *trsp) |
---|
| 1348 | +static struct rt_read_seg * |
---|
| 1349 | +rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp, |
---|
| 1350 | + struct rt_read_seg *rtrsp) |
---|
1288 | 1351 | { |
---|
1289 | 1352 | int i; |
---|
| 1353 | + int j; |
---|
1290 | 1354 | int mask = rcutorture_extend_mask_max(); |
---|
1291 | 1355 | |
---|
1292 | 1356 | WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */ |
---|
1293 | 1357 | if (!((mask - 1) & mask)) |
---|
1294 | | - return; /* Current RCU flavor not extendable. */ |
---|
1295 | | - i = (torture_random(trsp) >> 3) & RCUTORTURE_RDR_MAX_LOOPS; |
---|
1296 | | - while (i--) { |
---|
| 1358 | + return rtrsp; /* Current RCU reader not extendable. */ |
---|
| 1359 | + /* Bias towards larger numbers of loops. */ |
---|
| 1360 | + i = (torture_random(trsp) >> 3); |
---|
| 1361 | + i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1; |
---|
| 1362 | + for (j = 0; j < i; j++) { |
---|
1297 | 1363 | mask = rcutorture_extend_mask(*readstate, trsp); |
---|
1298 | | - rcutorture_one_extend(readstate, mask, trsp); |
---|
| 1364 | + rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]); |
---|
1299 | 1365 | } |
---|
| 1366 | + return &rtrsp[j]; |
---|
1300 | 1367 | } |
---|
1301 | 1368 | |
---|
1302 | 1369 | /* |
---|
.. | .. |
---|
1306 | 1373 | */ |
---|
1307 | 1374 | static bool rcu_torture_one_read(struct torture_random_state *trsp) |
---|
1308 | 1375 | { |
---|
| 1376 | + int i; |
---|
1309 | 1377 | unsigned long started; |
---|
1310 | 1378 | unsigned long completed; |
---|
1311 | 1379 | int newstate; |
---|
1312 | 1380 | struct rcu_torture *p; |
---|
1313 | 1381 | int pipe_count; |
---|
1314 | 1382 | int readstate = 0; |
---|
| 1383 | + struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } }; |
---|
| 1384 | + struct rt_read_seg *rtrsp = &rtseg[0]; |
---|
| 1385 | + struct rt_read_seg *rtrsp1; |
---|
1315 | 1386 | unsigned long long ts; |
---|
1316 | 1387 | |
---|
| 1388 | + WARN_ON_ONCE(!rcu_is_watching()); |
---|
1317 | 1389 | newstate = rcutorture_extend_mask(readstate, trsp); |
---|
1318 | | - rcutorture_one_extend(&readstate, newstate, trsp); |
---|
| 1390 | + rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++); |
---|
1319 | 1391 | started = cur_ops->get_gp_seq(); |
---|
1320 | 1392 | ts = rcu_trace_clock_local(); |
---|
1321 | 1393 | p = rcu_dereference_check(rcu_torture_current, |
---|
1322 | 1394 | rcu_read_lock_bh_held() || |
---|
1323 | 1395 | rcu_read_lock_sched_held() || |
---|
1324 | 1396 | srcu_read_lock_held(srcu_ctlp) || |
---|
| 1397 | + rcu_read_lock_trace_held() || |
---|
1325 | 1398 | torturing_tasks()); |
---|
1326 | 1399 | if (p == NULL) { |
---|
1327 | 1400 | /* Wait for rcu_torture_writer to get underway */ |
---|
1328 | | - rcutorture_one_extend(&readstate, 0, trsp); |
---|
| 1401 | + rcutorture_one_extend(&readstate, 0, trsp, rtrsp); |
---|
1329 | 1402 | return false; |
---|
1330 | 1403 | } |
---|
1331 | 1404 | if (p->rtort_mbtest == 0) |
---|
1332 | 1405 | atomic_inc(&n_rcu_torture_mberror); |
---|
1333 | | - rcutorture_loop_extend(&readstate, trsp); |
---|
| 1406 | + rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp); |
---|
1334 | 1407 | preempt_disable(); |
---|
1335 | | - pipe_count = p->rtort_pipe_count; |
---|
| 1408 | + pipe_count = READ_ONCE(p->rtort_pipe_count); |
---|
1336 | 1409 | if (pipe_count > RCU_TORTURE_PIPE_LEN) { |
---|
1337 | 1410 | /* Should not happen, but... */ |
---|
1338 | 1411 | pipe_count = RCU_TORTURE_PIPE_LEN; |
---|
.. | .. |
---|
1351 | 1424 | } |
---|
1352 | 1425 | __this_cpu_inc(rcu_torture_batch[completed]); |
---|
1353 | 1426 | preempt_enable(); |
---|
1354 | | - rcutorture_one_extend(&readstate, 0, trsp); |
---|
| 1427 | + rcutorture_one_extend(&readstate, 0, trsp, rtrsp); |
---|
1355 | 1428 | WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK); |
---|
| 1429 | + // This next splat is expected behavior if leakpointer, especially |
---|
| 1430 | + // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels. |
---|
| 1431 | + WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1); |
---|
| 1432 | + |
---|
| 1433 | + /* If error or close call, record the sequence of reader protections. */ |
---|
| 1434 | + if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { |
---|
| 1435 | + i = 0; |
---|
| 1436 | + for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++) |
---|
| 1437 | + err_segs[i++] = *rtrsp1; |
---|
| 1438 | + rt_read_nsegs = i; |
---|
| 1439 | + } |
---|
| 1440 | + |
---|
1356 | 1441 | return true; |
---|
1357 | 1442 | } |
---|
1358 | 1443 | |
---|
.. | .. |
---|
1387 | 1472 | static int |
---|
1388 | 1473 | rcu_torture_reader(void *arg) |
---|
1389 | 1474 | { |
---|
| 1475 | + unsigned long lastsleep = jiffies; |
---|
| 1476 | + long myid = (long)arg; |
---|
| 1477 | + int mynumonline = myid; |
---|
1390 | 1478 | DEFINE_TORTURE_RANDOM(rand); |
---|
1391 | 1479 | struct timer_list t; |
---|
1392 | 1480 | |
---|
.. | .. |
---|
1394 | 1482 | set_user_nice(current, MAX_NICE); |
---|
1395 | 1483 | if (irqreader && cur_ops->irq_capable) |
---|
1396 | 1484 | timer_setup_on_stack(&t, rcu_torture_timer, 0); |
---|
1397 | | - |
---|
| 1485 | + tick_dep_set_task(current, TICK_DEP_BIT_RCU); |
---|
1398 | 1486 | do { |
---|
1399 | 1487 | if (irqreader && cur_ops->irq_capable) { |
---|
1400 | 1488 | if (!timer_pending(&t)) |
---|
1401 | 1489 | mod_timer(&t, jiffies + 1); |
---|
1402 | 1490 | } |
---|
1403 | | - if (!rcu_torture_one_read(&rand)) |
---|
| 1491 | + if (!rcu_torture_one_read(&rand) && !torture_must_stop()) |
---|
1404 | 1492 | schedule_timeout_interruptible(HZ); |
---|
| 1493 | + if (time_after(jiffies, lastsleep) && !torture_must_stop()) { |
---|
| 1494 | + schedule_timeout_interruptible(1); |
---|
| 1495 | + lastsleep = jiffies + 10; |
---|
| 1496 | + } |
---|
| 1497 | + while (num_online_cpus() < mynumonline && !torture_must_stop()) |
---|
| 1498 | + schedule_timeout_interruptible(HZ / 5); |
---|
1405 | 1499 | stutter_wait("rcu_torture_reader"); |
---|
1406 | 1500 | } while (!torture_must_stop()); |
---|
1407 | 1501 | if (irqreader && cur_ops->irq_capable) { |
---|
1408 | 1502 | del_timer_sync(&t); |
---|
1409 | 1503 | destroy_timer_on_stack(&t); |
---|
1410 | 1504 | } |
---|
| 1505 | + tick_dep_clear_task(current, TICK_DEP_BIT_RCU); |
---|
1411 | 1506 | torture_kthread_stopping("rcu_torture_reader"); |
---|
1412 | 1507 | return 0; |
---|
1413 | 1508 | } |
---|
.. | .. |
---|
1427 | 1522 | int i; |
---|
1428 | 1523 | long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; |
---|
1429 | 1524 | long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; |
---|
| 1525 | + struct rcu_torture *rtcp; |
---|
1430 | 1526 | static unsigned long rtcv_snap = ULONG_MAX; |
---|
1431 | 1527 | static bool splatted; |
---|
1432 | 1528 | struct task_struct *wtp; |
---|
1433 | 1529 | |
---|
1434 | 1530 | for_each_possible_cpu(cpu) { |
---|
1435 | 1531 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { |
---|
1436 | | - pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i]; |
---|
1437 | | - batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i]; |
---|
| 1532 | + pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]); |
---|
| 1533 | + batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]); |
---|
1438 | 1534 | } |
---|
1439 | 1535 | } |
---|
1440 | 1536 | for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) { |
---|
.. | .. |
---|
1443 | 1539 | } |
---|
1444 | 1540 | |
---|
1445 | 1541 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); |
---|
1446 | | - pr_cont("rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", |
---|
1447 | | - rcu_torture_current, |
---|
| 1542 | + rtcp = rcu_access_pointer(rcu_torture_current); |
---|
| 1543 | + pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ", |
---|
| 1544 | + rtcp, |
---|
| 1545 | + rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER", |
---|
1448 | 1546 | rcu_torture_current_version, |
---|
1449 | 1547 | list_empty(&rcu_torture_freelist), |
---|
1450 | 1548 | atomic_read(&n_rcu_torture_alloc), |
---|
.. | .. |
---|
1461 | 1559 | atomic_long_read(&n_rcu_torture_timers)); |
---|
1462 | 1560 | torture_onoff_stats(); |
---|
1463 | 1561 | pr_cont("barrier: %ld/%ld:%ld ", |
---|
1464 | | - n_barrier_successes, |
---|
1465 | | - n_barrier_attempts, |
---|
1466 | | - n_rcu_torture_barrier_error); |
---|
1467 | | - pr_cont("cbflood: %ld\n", atomic_long_read(&n_cbfloods)); |
---|
| 1562 | + data_race(n_barrier_successes), |
---|
| 1563 | + data_race(n_barrier_attempts), |
---|
| 1564 | + data_race(n_rcu_torture_barrier_error)); |
---|
| 1565 | + pr_cont("read-exits: %ld\n", data_race(n_read_exits)); |
---|
1468 | 1566 | |
---|
1469 | 1567 | pr_alert("%s%s ", torture_type, TORTURE_FLAG); |
---|
1470 | | - if (atomic_read(&n_rcu_torture_mberror) != 0 || |
---|
1471 | | - n_rcu_torture_barrier_error != 0 || |
---|
1472 | | - n_rcu_torture_boost_ktrerror != 0 || |
---|
1473 | | - n_rcu_torture_boost_rterror != 0 || |
---|
1474 | | - n_rcu_torture_boost_failure != 0 || |
---|
| 1568 | + if (atomic_read(&n_rcu_torture_mberror) || |
---|
| 1569 | + n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror || |
---|
| 1570 | + n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure || |
---|
1475 | 1571 | i > 1) { |
---|
1476 | 1572 | pr_cont("%s", "!!! "); |
---|
1477 | 1573 | atomic_inc(&n_rcu_torture_error); |
---|
1478 | | - WARN_ON_ONCE(1); |
---|
| 1574 | + WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror)); |
---|
| 1575 | + WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier() |
---|
| 1576 | + WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread |
---|
| 1577 | + WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio |
---|
| 1578 | + WARN_ON_ONCE(n_rcu_torture_boost_failure); // RCU boost failed |
---|
| 1579 | + WARN_ON_ONCE(i > 1); // Too-short grace period |
---|
1479 | 1580 | } |
---|
1480 | 1581 | pr_cont("Reader Pipe: "); |
---|
1481 | 1582 | for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) |
---|
.. | .. |
---|
1498 | 1599 | if (cur_ops->stats) |
---|
1499 | 1600 | cur_ops->stats(); |
---|
1500 | 1601 | if (rtcv_snap == rcu_torture_current_version && |
---|
1501 | | - rcu_torture_current != NULL) { |
---|
| 1602 | + rcu_access_pointer(rcu_torture_current) && |
---|
| 1603 | + !rcu_stall_is_suppressed()) { |
---|
1502 | 1604 | int __maybe_unused flags = 0; |
---|
1503 | 1605 | unsigned long __maybe_unused gp_seq = 0; |
---|
1504 | 1606 | |
---|
.. | .. |
---|
1550 | 1652 | "test_boost=%d/%d test_boost_interval=%d " |
---|
1551 | 1653 | "test_boost_duration=%d shutdown_secs=%d " |
---|
1552 | 1654 | "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d " |
---|
| 1655 | + "stall_cpu_block=%d " |
---|
1553 | 1656 | "n_barrier_cbs=%d " |
---|
1554 | | - "onoff_interval=%d onoff_holdoff=%d\n", |
---|
| 1657 | + "onoff_interval=%d onoff_holdoff=%d " |
---|
| 1658 | + "read_exit_delay=%d read_exit_burst=%d\n", |
---|
1555 | 1659 | torture_type, tag, nrealreaders, nfakewriters, |
---|
1556 | 1660 | stat_interval, verbose, test_no_idle_hz, shuffle_interval, |
---|
1557 | 1661 | stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, |
---|
1558 | 1662 | test_boost, cur_ops->can_boost, |
---|
1559 | 1663 | test_boost_interval, test_boost_duration, shutdown_secs, |
---|
1560 | 1664 | stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff, |
---|
| 1665 | + stall_cpu_block, |
---|
1561 | 1666 | n_barrier_cbs, |
---|
1562 | | - onoff_interval, onoff_holdoff); |
---|
| 1667 | + onoff_interval, onoff_holdoff, |
---|
| 1668 | + read_exit_delay, read_exit_burst); |
---|
1563 | 1669 | } |
---|
1564 | 1670 | |
---|
1565 | 1671 | static int rcutorture_booster_cleanup(unsigned int cpu) |
---|
.. | .. |
---|
1613 | 1719 | */ |
---|
1614 | 1720 | static int rcu_torture_stall(void *args) |
---|
1615 | 1721 | { |
---|
| 1722 | + int idx; |
---|
1616 | 1723 | unsigned long stop_at; |
---|
1617 | 1724 | |
---|
1618 | 1725 | VERBOSE_TOROUT_STRING("rcu_torture_stall task started"); |
---|
.. | .. |
---|
1621 | 1728 | schedule_timeout_interruptible(stall_cpu_holdoff * HZ); |
---|
1622 | 1729 | VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff"); |
---|
1623 | 1730 | } |
---|
1624 | | - if (!kthread_should_stop()) { |
---|
| 1731 | + if (!kthread_should_stop() && stall_gp_kthread > 0) { |
---|
| 1732 | + VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall"); |
---|
| 1733 | + rcu_gp_set_torture_wait(stall_gp_kthread * HZ); |
---|
| 1734 | + for (idx = 0; idx < stall_gp_kthread + 2; idx++) { |
---|
| 1735 | + if (kthread_should_stop()) |
---|
| 1736 | + break; |
---|
| 1737 | + schedule_timeout_uninterruptible(HZ); |
---|
| 1738 | + } |
---|
| 1739 | + } |
---|
| 1740 | + if (!kthread_should_stop() && stall_cpu > 0) { |
---|
| 1741 | + VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall"); |
---|
1625 | 1742 | stop_at = ktime_get_seconds() + stall_cpu; |
---|
1626 | 1743 | /* RCU CPU stall is expected behavior in following code. */ |
---|
1627 | | - rcu_read_lock(); |
---|
| 1744 | + idx = cur_ops->readlock(); |
---|
1628 | 1745 | if (stall_cpu_irqsoff) |
---|
1629 | 1746 | local_irq_disable(); |
---|
1630 | | - else |
---|
| 1747 | + else if (!stall_cpu_block) |
---|
1631 | 1748 | preempt_disable(); |
---|
1632 | 1749 | pr_alert("rcu_torture_stall start on CPU %d.\n", |
---|
1633 | | - smp_processor_id()); |
---|
| 1750 | + raw_smp_processor_id()); |
---|
1634 | 1751 | while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), |
---|
1635 | 1752 | stop_at)) |
---|
1636 | | - continue; /* Induce RCU CPU stall warning. */ |
---|
| 1753 | + if (stall_cpu_block) |
---|
| 1754 | + schedule_timeout_uninterruptible(HZ); |
---|
1637 | 1755 | if (stall_cpu_irqsoff) |
---|
1638 | 1756 | local_irq_enable(); |
---|
1639 | | - else |
---|
| 1757 | + else if (!stall_cpu_block) |
---|
1640 | 1758 | preempt_enable(); |
---|
1641 | | - rcu_read_unlock(); |
---|
1642 | | - pr_alert("rcu_torture_stall end.\n"); |
---|
| 1759 | + cur_ops->readunlock(idx); |
---|
1643 | 1760 | } |
---|
| 1761 | + pr_alert("rcu_torture_stall end.\n"); |
---|
1644 | 1762 | torture_shutdown_absorb("rcu_torture_stall"); |
---|
1645 | 1763 | while (!kthread_should_stop()) |
---|
1646 | 1764 | schedule_timeout_interruptible(10 * HZ); |
---|
.. | .. |
---|
1650 | 1768 | /* Spawn CPU-stall kthread, if stall_cpu specified. */ |
---|
1651 | 1769 | static int __init rcu_torture_stall_init(void) |
---|
1652 | 1770 | { |
---|
1653 | | - if (stall_cpu <= 0) |
---|
| 1771 | + if (stall_cpu <= 0 && stall_gp_kthread <= 0) |
---|
1654 | 1772 | return 0; |
---|
1655 | 1773 | return torture_create_kthread(rcu_torture_stall, NULL, stall_task); |
---|
| 1774 | +} |
---|
| 1775 | + |
---|
| 1776 | +/* State structure for forward-progress self-propagating RCU callback. */ |
---|
| 1777 | +struct fwd_cb_state { |
---|
| 1778 | + struct rcu_head rh; |
---|
| 1779 | + int stop; |
---|
| 1780 | +}; |
---|
| 1781 | + |
---|
| 1782 | +/* |
---|
| 1783 | + * Forward-progress self-propagating RCU callback function. Because |
---|
| 1784 | + * callbacks run from softirq, this function is an implicit RCU read-side |
---|
| 1785 | + * critical section. |
---|
| 1786 | + */ |
---|
| 1787 | +static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp) |
---|
| 1788 | +{ |
---|
| 1789 | + struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh); |
---|
| 1790 | + |
---|
| 1791 | + if (READ_ONCE(fcsp->stop)) { |
---|
| 1792 | + WRITE_ONCE(fcsp->stop, 2); |
---|
| 1793 | + return; |
---|
| 1794 | + } |
---|
| 1795 | + cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb); |
---|
| 1796 | +} |
---|
| 1797 | + |
---|
| 1798 | +/* State for continuous-flood RCU callbacks. */ |
---|
| 1799 | +struct rcu_fwd_cb { |
---|
| 1800 | + struct rcu_head rh; |
---|
| 1801 | + struct rcu_fwd_cb *rfc_next; |
---|
| 1802 | + struct rcu_fwd *rfc_rfp; |
---|
| 1803 | + int rfc_gps; |
---|
| 1804 | +}; |
---|
| 1805 | + |
---|
| 1806 | +#define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */ |
---|
| 1807 | +#define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */ |
---|
| 1808 | +#define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */ |
---|
| 1809 | +#define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */ |
---|
| 1810 | +#define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV)) |
---|
| 1811 | + |
---|
| 1812 | +struct rcu_launder_hist { |
---|
| 1813 | + long n_launders; |
---|
| 1814 | + unsigned long launder_gp_seq; |
---|
| 1815 | +}; |
---|
| 1816 | + |
---|
| 1817 | +struct rcu_fwd { |
---|
| 1818 | + spinlock_t rcu_fwd_lock; |
---|
| 1819 | + struct rcu_fwd_cb *rcu_fwd_cb_head; |
---|
| 1820 | + struct rcu_fwd_cb **rcu_fwd_cb_tail; |
---|
| 1821 | + long n_launders_cb; |
---|
| 1822 | + unsigned long rcu_fwd_startat; |
---|
| 1823 | + struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST]; |
---|
| 1824 | + unsigned long rcu_launder_gp_seq_start; |
---|
| 1825 | +}; |
---|
| 1826 | + |
---|
| 1827 | +static DEFINE_MUTEX(rcu_fwd_mutex); |
---|
| 1828 | +static struct rcu_fwd *rcu_fwds; |
---|
| 1829 | +static bool rcu_fwd_emergency_stop; |
---|
| 1830 | + |
---|
| 1831 | +static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp) |
---|
| 1832 | +{ |
---|
| 1833 | + unsigned long gps; |
---|
| 1834 | + unsigned long gps_old; |
---|
| 1835 | + int i; |
---|
| 1836 | + int j; |
---|
| 1837 | + |
---|
| 1838 | + for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--) |
---|
| 1839 | + if (rfp->n_launders_hist[i].n_launders > 0) |
---|
| 1840 | + break; |
---|
| 1841 | + pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):", |
---|
| 1842 | + __func__, jiffies - rfp->rcu_fwd_startat); |
---|
| 1843 | + gps_old = rfp->rcu_launder_gp_seq_start; |
---|
| 1844 | + for (j = 0; j <= i; j++) { |
---|
| 1845 | + gps = rfp->n_launders_hist[j].launder_gp_seq; |
---|
| 1846 | + pr_cont(" %ds/%d: %ld:%ld", |
---|
| 1847 | + j + 1, FWD_CBS_HIST_DIV, |
---|
| 1848 | + rfp->n_launders_hist[j].n_launders, |
---|
| 1849 | + rcutorture_seq_diff(gps, gps_old)); |
---|
| 1850 | + gps_old = gps; |
---|
| 1851 | + } |
---|
| 1852 | + pr_cont("\n"); |
---|
| 1853 | +} |
---|
| 1854 | + |
---|
| 1855 | +/* Callback function for continuous-flood RCU callbacks. */ |
---|
| 1856 | +static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp) |
---|
| 1857 | +{ |
---|
| 1858 | + unsigned long flags; |
---|
| 1859 | + int i; |
---|
| 1860 | + struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh); |
---|
| 1861 | + struct rcu_fwd_cb **rfcpp; |
---|
| 1862 | + struct rcu_fwd *rfp = rfcp->rfc_rfp; |
---|
| 1863 | + |
---|
| 1864 | + rfcp->rfc_next = NULL; |
---|
| 1865 | + rfcp->rfc_gps++; |
---|
| 1866 | + spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); |
---|
| 1867 | + rfcpp = rfp->rcu_fwd_cb_tail; |
---|
| 1868 | + rfp->rcu_fwd_cb_tail = &rfcp->rfc_next; |
---|
| 1869 | + WRITE_ONCE(*rfcpp, rfcp); |
---|
| 1870 | + WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1); |
---|
| 1871 | + i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV)); |
---|
| 1872 | + if (i >= ARRAY_SIZE(rfp->n_launders_hist)) |
---|
| 1873 | + i = ARRAY_SIZE(rfp->n_launders_hist) - 1; |
---|
| 1874 | + rfp->n_launders_hist[i].n_launders++; |
---|
| 1875 | + rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq(); |
---|
| 1876 | + spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); |
---|
| 1877 | +} |
---|
| 1878 | + |
---|
| 1879 | +// Give the scheduler a chance, even on nohz_full CPUs. |
---|
| 1880 | +static void rcu_torture_fwd_prog_cond_resched(unsigned long iter) |
---|
| 1881 | +{ |
---|
| 1882 | + if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) { |
---|
| 1883 | + // Real call_rcu() floods hit userspace, so emulate that. |
---|
| 1884 | + if (need_resched() || (iter & 0xfff)) |
---|
| 1885 | + schedule(); |
---|
| 1886 | + return; |
---|
| 1887 | + } |
---|
| 1888 | + // No userspace emulation: CB invocation throttles call_rcu() |
---|
| 1889 | + cond_resched(); |
---|
| 1890 | +} |
---|
| 1891 | + |
---|
| 1892 | +/* |
---|
| 1893 | + * Free all callbacks on the rcu_fwd_cb_head list, either because the |
---|
| 1894 | + * test is over or because we hit an OOM event. |
---|
| 1895 | + */ |
---|
| 1896 | +static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp) |
---|
| 1897 | +{ |
---|
| 1898 | + unsigned long flags; |
---|
| 1899 | + unsigned long freed = 0; |
---|
| 1900 | + struct rcu_fwd_cb *rfcp; |
---|
| 1901 | + |
---|
| 1902 | + for (;;) { |
---|
| 1903 | + spin_lock_irqsave(&rfp->rcu_fwd_lock, flags); |
---|
| 1904 | + rfcp = rfp->rcu_fwd_cb_head; |
---|
| 1905 | + if (!rfcp) { |
---|
| 1906 | + spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); |
---|
| 1907 | + break; |
---|
| 1908 | + } |
---|
| 1909 | + rfp->rcu_fwd_cb_head = rfcp->rfc_next; |
---|
| 1910 | + if (!rfp->rcu_fwd_cb_head) |
---|
| 1911 | + rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; |
---|
| 1912 | + spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags); |
---|
| 1913 | + kfree(rfcp); |
---|
| 1914 | + freed++; |
---|
| 1915 | + rcu_torture_fwd_prog_cond_resched(freed); |
---|
| 1916 | + if (tick_nohz_full_enabled()) { |
---|
| 1917 | + local_irq_save(flags); |
---|
| 1918 | + rcu_momentary_dyntick_idle(); |
---|
| 1919 | + local_irq_restore(flags); |
---|
| 1920 | + } |
---|
| 1921 | + } |
---|
| 1922 | + return freed; |
---|
| 1923 | +} |
---|
| 1924 | + |
---|
| 1925 | +/* Carry out need_resched()/cond_resched() forward-progress testing. */ |
---|
| 1926 | +static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp, |
---|
| 1927 | + int *tested, int *tested_tries) |
---|
| 1928 | +{ |
---|
| 1929 | + unsigned long cver; |
---|
| 1930 | + unsigned long dur; |
---|
| 1931 | + struct fwd_cb_state fcs; |
---|
| 1932 | + unsigned long gps; |
---|
| 1933 | + int idx; |
---|
| 1934 | + int sd; |
---|
| 1935 | + int sd4; |
---|
| 1936 | + bool selfpropcb = false; |
---|
| 1937 | + unsigned long stopat; |
---|
| 1938 | + static DEFINE_TORTURE_RANDOM(trs); |
---|
| 1939 | + |
---|
| 1940 | + if (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) { |
---|
| 1941 | + init_rcu_head_on_stack(&fcs.rh); |
---|
| 1942 | + selfpropcb = true; |
---|
| 1943 | + } |
---|
| 1944 | + |
---|
| 1945 | + /* Tight loop containing cond_resched(). */ |
---|
| 1946 | + WRITE_ONCE(rcu_fwd_cb_nodelay, true); |
---|
| 1947 | + cur_ops->sync(); /* Later readers see above write. */ |
---|
| 1948 | + if (selfpropcb) { |
---|
| 1949 | + WRITE_ONCE(fcs.stop, 0); |
---|
| 1950 | + cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb); |
---|
| 1951 | + } |
---|
| 1952 | + cver = READ_ONCE(rcu_torture_current_version); |
---|
| 1953 | + gps = cur_ops->get_gp_seq(); |
---|
| 1954 | + sd = cur_ops->stall_dur() + 1; |
---|
| 1955 | + sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div; |
---|
| 1956 | + dur = sd4 + torture_random(&trs) % (sd - sd4); |
---|
| 1957 | + WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); |
---|
| 1958 | + stopat = rfp->rcu_fwd_startat + dur; |
---|
| 1959 | + while (time_before(jiffies, stopat) && |
---|
| 1960 | + !shutdown_time_arrived() && |
---|
| 1961 | + !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { |
---|
| 1962 | + idx = cur_ops->readlock(); |
---|
| 1963 | + udelay(10); |
---|
| 1964 | + cur_ops->readunlock(idx); |
---|
| 1965 | + if (!fwd_progress_need_resched || need_resched()) |
---|
| 1966 | + cond_resched(); |
---|
| 1967 | + } |
---|
| 1968 | + (*tested_tries)++; |
---|
| 1969 | + if (!time_before(jiffies, stopat) && |
---|
| 1970 | + !shutdown_time_arrived() && |
---|
| 1971 | + !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { |
---|
| 1972 | + (*tested)++; |
---|
| 1973 | + cver = READ_ONCE(rcu_torture_current_version) - cver; |
---|
| 1974 | + gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); |
---|
| 1975 | + WARN_ON(!cver && gps < 2); |
---|
| 1976 | + pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps); |
---|
| 1977 | + } |
---|
| 1978 | + if (selfpropcb) { |
---|
| 1979 | + WRITE_ONCE(fcs.stop, 1); |
---|
| 1980 | + cur_ops->sync(); /* Wait for running CB to complete. */ |
---|
| 1981 | + cur_ops->cb_barrier(); /* Wait for queued callbacks. */ |
---|
| 1982 | + } |
---|
| 1983 | + |
---|
| 1984 | + if (selfpropcb) { |
---|
| 1985 | + WARN_ON(READ_ONCE(fcs.stop) != 2); |
---|
| 1986 | + destroy_rcu_head_on_stack(&fcs.rh); |
---|
| 1987 | + } |
---|
| 1988 | + schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */ |
---|
| 1989 | + WRITE_ONCE(rcu_fwd_cb_nodelay, false); |
---|
| 1990 | +} |
---|
| 1991 | + |
---|
| 1992 | +/* Carry out call_rcu() forward-progress testing. */ |
---|
| 1993 | +static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp) |
---|
| 1994 | +{ |
---|
| 1995 | + unsigned long cver; |
---|
| 1996 | + unsigned long flags; |
---|
| 1997 | + unsigned long gps; |
---|
| 1998 | + int i; |
---|
| 1999 | + long n_launders; |
---|
| 2000 | + long n_launders_cb_snap; |
---|
| 2001 | + long n_launders_sa; |
---|
| 2002 | + long n_max_cbs; |
---|
| 2003 | + long n_max_gps; |
---|
| 2004 | + struct rcu_fwd_cb *rfcp; |
---|
| 2005 | + struct rcu_fwd_cb *rfcpn; |
---|
| 2006 | + unsigned long stopat; |
---|
| 2007 | + unsigned long stoppedat; |
---|
| 2008 | + |
---|
| 2009 | + if (READ_ONCE(rcu_fwd_emergency_stop)) |
---|
| 2010 | + return; /* Get out of the way quickly, no GP wait! */ |
---|
| 2011 | + if (!cur_ops->call) |
---|
| 2012 | + return; /* Can't do call_rcu() fwd prog without ->call. */ |
---|
| 2013 | + |
---|
| 2014 | + /* Loop continuously posting RCU callbacks. */ |
---|
| 2015 | + WRITE_ONCE(rcu_fwd_cb_nodelay, true); |
---|
| 2016 | + cur_ops->sync(); /* Later readers see above write. */ |
---|
| 2017 | + WRITE_ONCE(rfp->rcu_fwd_startat, jiffies); |
---|
| 2018 | + stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES; |
---|
| 2019 | + n_launders = 0; |
---|
| 2020 | + rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread |
---|
| 2021 | + n_launders_sa = 0; |
---|
| 2022 | + n_max_cbs = 0; |
---|
| 2023 | + n_max_gps = 0; |
---|
| 2024 | + for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++) |
---|
| 2025 | + rfp->n_launders_hist[i].n_launders = 0; |
---|
| 2026 | + cver = READ_ONCE(rcu_torture_current_version); |
---|
| 2027 | + gps = cur_ops->get_gp_seq(); |
---|
| 2028 | + rfp->rcu_launder_gp_seq_start = gps; |
---|
| 2029 | + tick_dep_set_task(current, TICK_DEP_BIT_RCU); |
---|
| 2030 | + while (time_before(jiffies, stopat) && |
---|
| 2031 | + !shutdown_time_arrived() && |
---|
| 2032 | + !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) { |
---|
| 2033 | + rfcp = READ_ONCE(rfp->rcu_fwd_cb_head); |
---|
| 2034 | + rfcpn = NULL; |
---|
| 2035 | + if (rfcp) |
---|
| 2036 | + rfcpn = READ_ONCE(rfcp->rfc_next); |
---|
| 2037 | + if (rfcpn) { |
---|
| 2038 | + if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS && |
---|
| 2039 | + ++n_max_gps >= MIN_FWD_CBS_LAUNDERED) |
---|
| 2040 | + break; |
---|
| 2041 | + rfp->rcu_fwd_cb_head = rfcpn; |
---|
| 2042 | + n_launders++; |
---|
| 2043 | + n_launders_sa++; |
---|
| 2044 | + } else { |
---|
| 2045 | + rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL); |
---|
| 2046 | + if (WARN_ON_ONCE(!rfcp)) { |
---|
| 2047 | + schedule_timeout_interruptible(1); |
---|
| 2048 | + continue; |
---|
| 2049 | + } |
---|
| 2050 | + n_max_cbs++; |
---|
| 2051 | + n_launders_sa = 0; |
---|
| 2052 | + rfcp->rfc_gps = 0; |
---|
| 2053 | + rfcp->rfc_rfp = rfp; |
---|
| 2054 | + } |
---|
| 2055 | + cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr); |
---|
| 2056 | + rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs); |
---|
| 2057 | + if (tick_nohz_full_enabled()) { |
---|
| 2058 | + local_irq_save(flags); |
---|
| 2059 | + rcu_momentary_dyntick_idle(); |
---|
| 2060 | + local_irq_restore(flags); |
---|
| 2061 | + } |
---|
| 2062 | + } |
---|
| 2063 | + stoppedat = jiffies; |
---|
| 2064 | + n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb); |
---|
| 2065 | + cver = READ_ONCE(rcu_torture_current_version) - cver; |
---|
| 2066 | + gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps); |
---|
| 2067 | + cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */ |
---|
| 2068 | + (void)rcu_torture_fwd_prog_cbfree(rfp); |
---|
| 2069 | + |
---|
| 2070 | + if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) && |
---|
| 2071 | + !shutdown_time_arrived()) { |
---|
| 2072 | + WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED); |
---|
| 2073 | + pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n", |
---|
| 2074 | + __func__, |
---|
| 2075 | + stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat, |
---|
| 2076 | + n_launders + n_max_cbs - n_launders_cb_snap, |
---|
| 2077 | + n_launders, n_launders_sa, |
---|
| 2078 | + n_max_gps, n_max_cbs, cver, gps); |
---|
| 2079 | + rcu_torture_fwd_cb_hist(rfp); |
---|
| 2080 | + } |
---|
| 2081 | + schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */ |
---|
| 2082 | + tick_dep_clear_task(current, TICK_DEP_BIT_RCU); |
---|
| 2083 | + WRITE_ONCE(rcu_fwd_cb_nodelay, false); |
---|
| 2084 | +} |
---|
| 2085 | + |
---|
| 2086 | + |
---|
| 2087 | +/* |
---|
| 2088 | + * OOM notifier, but this only prints diagnostic information for the |
---|
| 2089 | + * current forward-progress test. |
---|
| 2090 | + */ |
---|
| 2091 | +static int rcutorture_oom_notify(struct notifier_block *self, |
---|
| 2092 | + unsigned long notused, void *nfreed) |
---|
| 2093 | +{ |
---|
| 2094 | + struct rcu_fwd *rfp; |
---|
| 2095 | + |
---|
| 2096 | + mutex_lock(&rcu_fwd_mutex); |
---|
| 2097 | + rfp = rcu_fwds; |
---|
| 2098 | + if (!rfp) { |
---|
| 2099 | + mutex_unlock(&rcu_fwd_mutex); |
---|
| 2100 | + return NOTIFY_OK; |
---|
| 2101 | + } |
---|
| 2102 | + WARN(1, "%s invoked upon OOM during forward-progress testing.\n", |
---|
| 2103 | + __func__); |
---|
| 2104 | + rcu_torture_fwd_cb_hist(rfp); |
---|
| 2105 | + rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2); |
---|
| 2106 | + WRITE_ONCE(rcu_fwd_emergency_stop, true); |
---|
| 2107 | + smp_mb(); /* Emergency stop before free and wait to avoid hangs. */ |
---|
| 2108 | + pr_info("%s: Freed %lu RCU callbacks.\n", |
---|
| 2109 | + __func__, rcu_torture_fwd_prog_cbfree(rfp)); |
---|
| 2110 | + rcu_barrier(); |
---|
| 2111 | + pr_info("%s: Freed %lu RCU callbacks.\n", |
---|
| 2112 | + __func__, rcu_torture_fwd_prog_cbfree(rfp)); |
---|
| 2113 | + rcu_barrier(); |
---|
| 2114 | + pr_info("%s: Freed %lu RCU callbacks.\n", |
---|
| 2115 | + __func__, rcu_torture_fwd_prog_cbfree(rfp)); |
---|
| 2116 | + smp_mb(); /* Frees before return to avoid redoing OOM. */ |
---|
| 2117 | + (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */ |
---|
| 2118 | + pr_info("%s returning after OOM processing.\n", __func__); |
---|
| 2119 | + mutex_unlock(&rcu_fwd_mutex); |
---|
| 2120 | + return NOTIFY_OK; |
---|
| 2121 | +} |
---|
| 2122 | + |
---|
| 2123 | +static struct notifier_block rcutorture_oom_nb = { |
---|
| 2124 | + .notifier_call = rcutorture_oom_notify |
---|
| 2125 | +}; |
---|
| 2126 | + |
---|
| 2127 | +/* Carry out grace-period forward-progress testing. */ |
---|
| 2128 | +static int rcu_torture_fwd_prog(void *args) |
---|
| 2129 | +{ |
---|
| 2130 | + struct rcu_fwd *rfp = args; |
---|
| 2131 | + int tested = 0; |
---|
| 2132 | + int tested_tries = 0; |
---|
| 2133 | + |
---|
| 2134 | + VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started"); |
---|
| 2135 | + rcu_bind_current_to_nocb(); |
---|
| 2136 | + if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST)) |
---|
| 2137 | + set_user_nice(current, MAX_NICE); |
---|
| 2138 | + do { |
---|
| 2139 | + schedule_timeout_interruptible(fwd_progress_holdoff * HZ); |
---|
| 2140 | + WRITE_ONCE(rcu_fwd_emergency_stop, false); |
---|
| 2141 | + if (!IS_ENABLED(CONFIG_TINY_RCU) || |
---|
| 2142 | + rcu_inkernel_boot_has_ended()) |
---|
| 2143 | + rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries); |
---|
| 2144 | + if (rcu_inkernel_boot_has_ended()) |
---|
| 2145 | + rcu_torture_fwd_prog_cr(rfp); |
---|
| 2146 | + |
---|
| 2147 | + /* Avoid slow periods, better to test when busy. */ |
---|
| 2148 | + stutter_wait("rcu_torture_fwd_prog"); |
---|
| 2149 | + } while (!torture_must_stop()); |
---|
| 2150 | + /* Short runs might not contain a valid forward-progress attempt. */ |
---|
| 2151 | + WARN_ON(!tested && tested_tries >= 5); |
---|
| 2152 | + pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries); |
---|
| 2153 | + torture_kthread_stopping("rcu_torture_fwd_prog"); |
---|
| 2154 | + return 0; |
---|
| 2155 | +} |
---|
| 2156 | + |
---|
| 2157 | +/* If forward-progress checking is requested and feasible, spawn the thread. */ |
---|
| 2158 | +static int __init rcu_torture_fwd_prog_init(void) |
---|
| 2159 | +{ |
---|
| 2160 | + struct rcu_fwd *rfp; |
---|
| 2161 | + |
---|
| 2162 | + if (!fwd_progress) |
---|
| 2163 | + return 0; /* Not requested, so don't do it. */ |
---|
| 2164 | + if (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || |
---|
| 2165 | + cur_ops == &rcu_busted_ops) { |
---|
| 2166 | + VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test"); |
---|
| 2167 | + return 0; |
---|
| 2168 | + } |
---|
| 2169 | + if (stall_cpu > 0) { |
---|
| 2170 | + VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing"); |
---|
| 2171 | + if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS)) |
---|
| 2172 | + return -EINVAL; /* In module, can fail back to user. */ |
---|
| 2173 | + WARN_ON(1); /* Make sure rcutorture notices conflict. */ |
---|
| 2174 | + return 0; |
---|
| 2175 | + } |
---|
| 2176 | + if (fwd_progress_holdoff <= 0) |
---|
| 2177 | + fwd_progress_holdoff = 1; |
---|
| 2178 | + if (fwd_progress_div <= 0) |
---|
| 2179 | + fwd_progress_div = 4; |
---|
| 2180 | + rfp = kzalloc(sizeof(*rfp), GFP_KERNEL); |
---|
| 2181 | + if (!rfp) |
---|
| 2182 | + return -ENOMEM; |
---|
| 2183 | + spin_lock_init(&rfp->rcu_fwd_lock); |
---|
| 2184 | + rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head; |
---|
| 2185 | + mutex_lock(&rcu_fwd_mutex); |
---|
| 2186 | + rcu_fwds = rfp; |
---|
| 2187 | + mutex_unlock(&rcu_fwd_mutex); |
---|
| 2188 | + register_oom_notifier(&rcutorture_oom_nb); |
---|
| 2189 | + return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task); |
---|
| 2190 | +} |
---|
| 2191 | + |
---|
| 2192 | +static void rcu_torture_fwd_prog_cleanup(void) |
---|
| 2193 | +{ |
---|
| 2194 | + struct rcu_fwd *rfp; |
---|
| 2195 | + |
---|
| 2196 | + torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task); |
---|
| 2197 | + rfp = rcu_fwds; |
---|
| 2198 | + mutex_lock(&rcu_fwd_mutex); |
---|
| 2199 | + rcu_fwds = NULL; |
---|
| 2200 | + mutex_unlock(&rcu_fwd_mutex); |
---|
| 2201 | + unregister_oom_notifier(&rcutorture_oom_nb); |
---|
| 2202 | + kfree(rfp); |
---|
1656 | 2203 | } |
---|
1657 | 2204 | |
---|
1658 | 2205 | /* Callback function for RCU barrier testing. */ |
---|
.. | .. |
---|
1661 | 2208 | atomic_inc(&barrier_cbs_invoked); |
---|
1662 | 2209 | } |
---|
1663 | 2210 | |
---|
| 2211 | +/* IPI handler to get callback posted on desired CPU, if online. */ |
---|
| 2212 | +static void rcu_torture_barrier1cb(void *rcu_void) |
---|
| 2213 | +{ |
---|
| 2214 | + struct rcu_head *rhp = rcu_void; |
---|
| 2215 | + |
---|
| 2216 | + cur_ops->call(rhp, rcu_torture_barrier_cbf); |
---|
| 2217 | +} |
---|
| 2218 | + |
---|
1664 | 2219 | /* kthread function to register callbacks used to test RCU barriers. */ |
---|
1665 | 2220 | static int rcu_torture_barrier_cbs(void *arg) |
---|
1666 | 2221 | { |
---|
1667 | 2222 | long myid = (long)arg; |
---|
1668 | | - bool lastphase = 0; |
---|
| 2223 | + bool lastphase = false; |
---|
1669 | 2224 | bool newphase; |
---|
1670 | 2225 | struct rcu_head rcu; |
---|
1671 | 2226 | |
---|
.. | .. |
---|
1684 | 2239 | * The above smp_load_acquire() ensures barrier_phase load |
---|
1685 | 2240 | * is ordered before the following ->call(). |
---|
1686 | 2241 | */ |
---|
1687 | | - local_irq_disable(); /* Just to test no-irq call_rcu(). */ |
---|
1688 | | - cur_ops->call(&rcu, rcu_torture_barrier_cbf); |
---|
1689 | | - local_irq_enable(); |
---|
| 2242 | + if (smp_call_function_single(myid, rcu_torture_barrier1cb, |
---|
| 2243 | + &rcu, 1)) { |
---|
| 2244 | + // IPI failed, so use direct call from current CPU. |
---|
| 2245 | + cur_ops->call(&rcu, rcu_torture_barrier_cbf); |
---|
| 2246 | + } |
---|
1690 | 2247 | if (atomic_dec_and_test(&barrier_cbs_count)) |
---|
1691 | 2248 | wake_up(&barrier_wq); |
---|
1692 | 2249 | } while (!torture_must_stop()); |
---|
.. | .. |
---|
1722 | 2279 | pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n", |
---|
1723 | 2280 | atomic_read(&barrier_cbs_invoked), |
---|
1724 | 2281 | n_barrier_cbs); |
---|
1725 | | - WARN_ON_ONCE(1); |
---|
| 2282 | + WARN_ON(1); |
---|
| 2283 | + // Wait manually for the remaining callbacks |
---|
| 2284 | + i = 0; |
---|
| 2285 | + do { |
---|
| 2286 | + if (WARN_ON(i++ > HZ)) |
---|
| 2287 | + i = INT_MIN; |
---|
| 2288 | + schedule_timeout_interruptible(1); |
---|
| 2289 | + cur_ops->cb_barrier(); |
---|
| 2290 | + } while (atomic_read(&barrier_cbs_invoked) != |
---|
| 2291 | + n_barrier_cbs && |
---|
| 2292 | + !torture_must_stop()); |
---|
| 2293 | + smp_mb(); // Can't trust ordering if broken. |
---|
| 2294 | + if (!torture_must_stop()) |
---|
| 2295 | + pr_err("Recovered: barrier_cbs_invoked = %d\n", |
---|
| 2296 | + atomic_read(&barrier_cbs_invoked)); |
---|
1726 | 2297 | } else { |
---|
1727 | 2298 | n_barrier_successes++; |
---|
1728 | 2299 | } |
---|
.. | .. |
---|
1812 | 2383 | return true; |
---|
1813 | 2384 | } |
---|
1814 | 2385 | |
---|
| 2386 | +static bool read_exit_child_stop; |
---|
| 2387 | +static bool read_exit_child_stopped; |
---|
| 2388 | +static wait_queue_head_t read_exit_wq; |
---|
| 2389 | + |
---|
| 2390 | +// Child kthread which just does an rcutorture reader and exits. |
---|
| 2391 | +static int rcu_torture_read_exit_child(void *trsp_in) |
---|
| 2392 | +{ |
---|
| 2393 | + struct torture_random_state *trsp = trsp_in; |
---|
| 2394 | + |
---|
| 2395 | + set_user_nice(current, MAX_NICE); |
---|
| 2396 | + // Minimize time between reading and exiting. |
---|
| 2397 | + while (!kthread_should_stop()) |
---|
| 2398 | + schedule_timeout_uninterruptible(1); |
---|
| 2399 | + (void)rcu_torture_one_read(trsp); |
---|
| 2400 | + return 0; |
---|
| 2401 | +} |
---|
| 2402 | + |
---|
| 2403 | +// Parent kthread which creates and destroys read-exit child kthreads. |
---|
| 2404 | +static int rcu_torture_read_exit(void *unused) |
---|
| 2405 | +{ |
---|
| 2406 | + int count = 0; |
---|
| 2407 | + bool errexit = false; |
---|
| 2408 | + int i; |
---|
| 2409 | + struct task_struct *tsp; |
---|
| 2410 | + DEFINE_TORTURE_RANDOM(trs); |
---|
| 2411 | + |
---|
| 2412 | + // Allocate and initialize. |
---|
| 2413 | + set_user_nice(current, MAX_NICE); |
---|
| 2414 | + VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test"); |
---|
| 2415 | + |
---|
| 2416 | + // Each pass through this loop does one read-exit episode. |
---|
| 2417 | + do { |
---|
| 2418 | + if (++count > read_exit_burst) { |
---|
| 2419 | + VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode"); |
---|
| 2420 | + rcu_barrier(); // Wait for task_struct free, avoid OOM. |
---|
| 2421 | + for (i = 0; i < read_exit_delay; i++) { |
---|
| 2422 | + schedule_timeout_uninterruptible(HZ); |
---|
| 2423 | + if (READ_ONCE(read_exit_child_stop)) |
---|
| 2424 | + break; |
---|
| 2425 | + } |
---|
| 2426 | + if (!READ_ONCE(read_exit_child_stop)) |
---|
| 2427 | + VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode"); |
---|
| 2428 | + count = 0; |
---|
| 2429 | + } |
---|
| 2430 | + if (READ_ONCE(read_exit_child_stop)) |
---|
| 2431 | + break; |
---|
| 2432 | + // Spawn child. |
---|
| 2433 | + tsp = kthread_run(rcu_torture_read_exit_child, |
---|
| 2434 | + &trs, "%s", |
---|
| 2435 | + "rcu_torture_read_exit_child"); |
---|
| 2436 | + if (IS_ERR(tsp)) { |
---|
| 2437 | + VERBOSE_TOROUT_ERRSTRING("out of memory"); |
---|
| 2438 | + errexit = true; |
---|
| 2439 | + tsp = NULL; |
---|
| 2440 | + break; |
---|
| 2441 | + } |
---|
| 2442 | + cond_resched(); |
---|
| 2443 | + kthread_stop(tsp); |
---|
| 2444 | + n_read_exits ++; |
---|
| 2445 | + stutter_wait("rcu_torture_read_exit"); |
---|
| 2446 | + } while (!errexit && !READ_ONCE(read_exit_child_stop)); |
---|
| 2447 | + |
---|
| 2448 | + // Clean up and exit. |
---|
| 2449 | + smp_store_release(&read_exit_child_stopped, true); // After reaping. |
---|
| 2450 | + smp_mb(); // Store before wakeup. |
---|
| 2451 | + wake_up(&read_exit_wq); |
---|
| 2452 | + while (!torture_must_stop()) |
---|
| 2453 | + schedule_timeout_uninterruptible(1); |
---|
| 2454 | + torture_kthread_stopping("rcu_torture_read_exit"); |
---|
| 2455 | + return 0; |
---|
| 2456 | +} |
---|
| 2457 | + |
---|
| 2458 | +static int rcu_torture_read_exit_init(void) |
---|
| 2459 | +{ |
---|
| 2460 | + if (read_exit_burst <= 0) |
---|
| 2461 | + return -EINVAL; |
---|
| 2462 | + init_waitqueue_head(&read_exit_wq); |
---|
| 2463 | + read_exit_child_stop = false; |
---|
| 2464 | + read_exit_child_stopped = false; |
---|
| 2465 | + return torture_create_kthread(rcu_torture_read_exit, NULL, |
---|
| 2466 | + read_exit_task); |
---|
| 2467 | +} |
---|
| 2468 | + |
---|
| 2469 | +static void rcu_torture_read_exit_cleanup(void) |
---|
| 2470 | +{ |
---|
| 2471 | + if (!read_exit_task) |
---|
| 2472 | + return; |
---|
| 2473 | + WRITE_ONCE(read_exit_child_stop, true); |
---|
| 2474 | + smp_mb(); // Above write before wait. |
---|
| 2475 | + wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped)); |
---|
| 2476 | + torture_stop_kthread(rcutorture_read_exit, read_exit_task); |
---|
| 2477 | +} |
---|
| 2478 | + |
---|
1815 | 2479 | static enum cpuhp_state rcutor_hp; |
---|
1816 | 2480 | |
---|
1817 | 2481 | static void |
---|
1818 | 2482 | rcu_torture_cleanup(void) |
---|
1819 | 2483 | { |
---|
| 2484 | + int firsttime; |
---|
1820 | 2485 | int flags = 0; |
---|
1821 | 2486 | unsigned long gp_seq = 0; |
---|
1822 | 2487 | int i; |
---|
.. | .. |
---|
1831 | 2496 | return; |
---|
1832 | 2497 | } |
---|
1833 | 2498 | |
---|
| 2499 | + show_rcu_gp_kthreads(); |
---|
| 2500 | + rcu_torture_read_exit_cleanup(); |
---|
1834 | 2501 | rcu_torture_barrier_cleanup(); |
---|
| 2502 | + rcu_torture_fwd_prog_cleanup(); |
---|
1835 | 2503 | torture_stop_kthread(rcu_torture_stall, stall_task); |
---|
1836 | 2504 | torture_stop_kthread(rcu_torture_writer, writer_task); |
---|
1837 | 2505 | |
---|
.. | .. |
---|
1841 | 2509 | reader_tasks[i]); |
---|
1842 | 2510 | kfree(reader_tasks); |
---|
1843 | 2511 | } |
---|
1844 | | - rcu_torture_current = NULL; |
---|
1845 | 2512 | |
---|
1846 | 2513 | if (fakewriter_tasks) { |
---|
1847 | 2514 | for (i = 0; i < nfakewriters; i++) { |
---|
.. | .. |
---|
1854 | 2521 | |
---|
1855 | 2522 | rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); |
---|
1856 | 2523 | srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); |
---|
1857 | | - pr_alert("%s: End-test grace-period state: g%lu f%#x\n", |
---|
1858 | | - cur_ops->name, gp_seq, flags); |
---|
| 2524 | + pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n", |
---|
| 2525 | + cur_ops->name, (long)gp_seq, flags, |
---|
| 2526 | + rcutorture_seq_diff(gp_seq, start_gp_seq)); |
---|
1859 | 2527 | torture_stop_kthread(rcu_torture_stats, stats_task); |
---|
1860 | 2528 | torture_stop_kthread(rcu_torture_fqs, fqs_task); |
---|
1861 | | - for (i = 0; i < ncbflooders; i++) |
---|
1862 | | - torture_stop_kthread(rcu_torture_cbflood, cbflood_task[i]); |
---|
1863 | 2529 | if (rcu_torture_can_boost()) |
---|
1864 | 2530 | cpuhp_remove_state(rcutor_hp); |
---|
1865 | 2531 | |
---|
1866 | 2532 | /* |
---|
1867 | | - * Wait for all RCU callbacks to fire, then do flavor-specific |
---|
| 2533 | + * Wait for all RCU callbacks to fire, then do torture-type-specific |
---|
1868 | 2534 | * cleanup operations. |
---|
1869 | 2535 | */ |
---|
1870 | 2536 | if (cur_ops->cb_barrier != NULL) |
---|
.. | .. |
---|
1874 | 2540 | |
---|
1875 | 2541 | rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ |
---|
1876 | 2542 | |
---|
| 2543 | + if (err_segs_recorded) { |
---|
| 2544 | + pr_alert("Failure/close-call rcutorture reader segments:\n"); |
---|
| 2545 | + if (rt_read_nsegs == 0) |
---|
| 2546 | + pr_alert("\t: No segments recorded!!!\n"); |
---|
| 2547 | + firsttime = 1; |
---|
| 2548 | + for (i = 0; i < rt_read_nsegs; i++) { |
---|
| 2549 | + pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate); |
---|
| 2550 | + if (err_segs[i].rt_delay_jiffies != 0) { |
---|
| 2551 | + pr_cont("%s%ldjiffies", firsttime ? "" : "+", |
---|
| 2552 | + err_segs[i].rt_delay_jiffies); |
---|
| 2553 | + firsttime = 0; |
---|
| 2554 | + } |
---|
| 2555 | + if (err_segs[i].rt_delay_ms != 0) { |
---|
| 2556 | + pr_cont("%s%ldms", firsttime ? "" : "+", |
---|
| 2557 | + err_segs[i].rt_delay_ms); |
---|
| 2558 | + firsttime = 0; |
---|
| 2559 | + } |
---|
| 2560 | + if (err_segs[i].rt_delay_us != 0) { |
---|
| 2561 | + pr_cont("%s%ldus", firsttime ? "" : "+", |
---|
| 2562 | + err_segs[i].rt_delay_us); |
---|
| 2563 | + firsttime = 0; |
---|
| 2564 | + } |
---|
| 2565 | + pr_cont("%s\n", |
---|
| 2566 | + err_segs[i].rt_preempted ? "preempted" : ""); |
---|
| 2567 | + |
---|
| 2568 | + } |
---|
| 2569 | + } |
---|
1877 | 2570 | if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error) |
---|
1878 | 2571 | rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE"); |
---|
1879 | 2572 | else if (torture_onoff_failures()) |
---|
.. | .. |
---|
1940 | 2633 | #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
---|
1941 | 2634 | } |
---|
1942 | 2635 | |
---|
| 2636 | +static void rcutorture_sync(void) |
---|
| 2637 | +{ |
---|
| 2638 | + static unsigned long n; |
---|
| 2639 | + |
---|
| 2640 | + if (cur_ops->sync && !(++n & 0xfff)) |
---|
| 2641 | + cur_ops->sync(); |
---|
| 2642 | +} |
---|
| 2643 | + |
---|
1943 | 2644 | static int __init |
---|
1944 | 2645 | rcu_torture_init(void) |
---|
1945 | 2646 | { |
---|
1946 | | - int i; |
---|
| 2647 | + long i; |
---|
1947 | 2648 | int cpu; |
---|
1948 | 2649 | int firsterr = 0; |
---|
| 2650 | + int flags = 0; |
---|
| 2651 | + unsigned long gp_seq = 0; |
---|
1949 | 2652 | static struct rcu_torture_ops *torture_ops[] = { |
---|
1950 | | - &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, |
---|
1951 | | - &busted_srcud_ops, &sched_ops, &tasks_ops, |
---|
| 2653 | + &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, |
---|
| 2654 | + &busted_srcud_ops, &tasks_ops, &tasks_rude_ops, |
---|
| 2655 | + &tasks_tracing_ops, &trivial_ops, |
---|
1952 | 2656 | }; |
---|
1953 | 2657 | |
---|
1954 | 2658 | if (!torture_init_begin(torture_type, verbose)) |
---|
.. | .. |
---|
1967 | 2671 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) |
---|
1968 | 2672 | pr_cont(" %s", torture_ops[i]->name); |
---|
1969 | 2673 | pr_cont("\n"); |
---|
| 2674 | + WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST)); |
---|
1970 | 2675 | firsterr = -EINVAL; |
---|
1971 | 2676 | cur_ops = NULL; |
---|
1972 | 2677 | goto unwind; |
---|
.. | .. |
---|
1986 | 2691 | nrealreaders = 1; |
---|
1987 | 2692 | } |
---|
1988 | 2693 | rcu_torture_print_module_parms(cur_ops, "Start of test"); |
---|
| 2694 | + rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq); |
---|
| 2695 | + srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq); |
---|
| 2696 | + start_gp_seq = gp_seq; |
---|
| 2697 | + pr_alert("%s: Start-test grace-period state: g%ld f%#x\n", |
---|
| 2698 | + cur_ops->name, (long)gp_seq, flags); |
---|
1989 | 2699 | |
---|
1990 | 2700 | /* Set up the freelist. */ |
---|
1991 | 2701 | |
---|
.. | .. |
---|
2018 | 2728 | per_cpu(rcu_torture_batch, cpu)[i] = 0; |
---|
2019 | 2729 | } |
---|
2020 | 2730 | } |
---|
| 2731 | + err_segs_recorded = 0; |
---|
| 2732 | + rt_read_nsegs = 0; |
---|
2021 | 2733 | |
---|
2022 | 2734 | /* Start up the kthreads. */ |
---|
2023 | 2735 | |
---|
.. | .. |
---|
2049 | 2761 | goto unwind; |
---|
2050 | 2762 | } |
---|
2051 | 2763 | for (i = 0; i < nrealreaders; i++) { |
---|
2052 | | - firsterr = torture_create_kthread(rcu_torture_reader, NULL, |
---|
| 2764 | + firsterr = torture_create_kthread(rcu_torture_reader, (void *)i, |
---|
2053 | 2765 | reader_tasks[i]); |
---|
2054 | 2766 | if (firsterr) |
---|
2055 | 2767 | goto unwind; |
---|
.. | .. |
---|
2068 | 2780 | if (stutter < 0) |
---|
2069 | 2781 | stutter = 0; |
---|
2070 | 2782 | if (stutter) { |
---|
2071 | | - firsterr = torture_stutter_init(stutter * HZ); |
---|
| 2783 | + int t; |
---|
| 2784 | + |
---|
| 2785 | + t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ; |
---|
| 2786 | + firsterr = torture_stutter_init(stutter * HZ, t); |
---|
2072 | 2787 | if (firsterr) |
---|
2073 | 2788 | goto unwind; |
---|
2074 | 2789 | } |
---|
.. | .. |
---|
2096 | 2811 | goto unwind; |
---|
2097 | 2812 | rcutor_hp = firsterr; |
---|
2098 | 2813 | } |
---|
| 2814 | + shutdown_jiffies = jiffies + shutdown_secs * HZ; |
---|
2099 | 2815 | firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup); |
---|
2100 | 2816 | if (firsterr) |
---|
2101 | 2817 | goto unwind; |
---|
2102 | | - firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval); |
---|
| 2818 | + firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval, |
---|
| 2819 | + rcutorture_sync); |
---|
2103 | 2820 | if (firsterr) |
---|
2104 | 2821 | goto unwind; |
---|
2105 | 2822 | firsterr = rcu_torture_stall_init(); |
---|
2106 | 2823 | if (firsterr) |
---|
2107 | 2824 | goto unwind; |
---|
| 2825 | + firsterr = rcu_torture_fwd_prog_init(); |
---|
| 2826 | + if (firsterr) |
---|
| 2827 | + goto unwind; |
---|
2108 | 2828 | firsterr = rcu_torture_barrier_init(); |
---|
| 2829 | + if (firsterr) |
---|
| 2830 | + goto unwind; |
---|
| 2831 | + firsterr = rcu_torture_read_exit_init(); |
---|
2109 | 2832 | if (firsterr) |
---|
2110 | 2833 | goto unwind; |
---|
2111 | 2834 | if (object_debug) |
---|
2112 | 2835 | rcu_test_debug_objects(); |
---|
2113 | | - if (cbflood_n_burst > 0) { |
---|
2114 | | - /* Create the cbflood threads */ |
---|
2115 | | - ncbflooders = (num_online_cpus() + 3) / 4; |
---|
2116 | | - cbflood_task = kcalloc(ncbflooders, sizeof(*cbflood_task), |
---|
2117 | | - GFP_KERNEL); |
---|
2118 | | - if (!cbflood_task) { |
---|
2119 | | - VERBOSE_TOROUT_ERRSTRING("out of memory"); |
---|
2120 | | - firsterr = -ENOMEM; |
---|
2121 | | - goto unwind; |
---|
2122 | | - } |
---|
2123 | | - for (i = 0; i < ncbflooders; i++) { |
---|
2124 | | - firsterr = torture_create_kthread(rcu_torture_cbflood, |
---|
2125 | | - NULL, |
---|
2126 | | - cbflood_task[i]); |
---|
2127 | | - if (firsterr) |
---|
2128 | | - goto unwind; |
---|
2129 | | - } |
---|
2130 | | - } |
---|
2131 | 2836 | torture_init_end(); |
---|
2132 | 2837 | return 0; |
---|
2133 | 2838 | |
---|