hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/kernel/rcu/srcutree.c
....@@ -1,24 +1,11 @@
1
+// SPDX-License-Identifier: GPL-2.0+
12 /*
23 * Sleepable Read-Copy Update mechanism for mutual exclusion.
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License as published by
6
- * the Free Software Foundation; either version 2 of the License, or
7
- * (at your option) any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
- * GNU General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program; if not, you can access it online at
16
- * http://www.gnu.org/licenses/gpl-2.0.html.
174 *
185 * Copyright (C) IBM Corporation, 2006
196 * Copyright (C) Fujitsu, 2012
207 *
21
- * Author: Paul McKenney <paulmck@us.ibm.com>
8
+ * Authors: Paul McKenney <paulmck@linux.ibm.com>
229 * Lai Jiangshan <laijs@cn.fujitsu.com>
2310 *
2411 * For detailed explanation of Read-Copy Update mechanism see -
....@@ -38,8 +25,6 @@
3825 #include <linux/delay.h>
3926 #include <linux/module.h>
4027 #include <linux/srcu.h>
41
-#include <linux/cpu.h>
42
-#include <linux/locallock.h>
4328
4429 #include "rcu.h"
4530 #include "rcu_segcblist.h"
....@@ -53,9 +38,14 @@
5338 static ulong counter_wrap_check = (ULONG_MAX >> 2);
5439 module_param(counter_wrap_check, ulong, 0444);
5540
41
+/* Early-boot callback-management, so early that no lock is required! */
42
+static LIST_HEAD(srcu_boot_list);
43
+static bool __read_mostly srcu_init_done;
44
+
5645 static void srcu_invoke_callbacks(struct work_struct *work);
57
-static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
46
+static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
5847 static void process_srcu(struct work_struct *work);
48
+static void srcu_delay_timer(struct timer_list *t);
5949
6050 /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
6151 #define spin_lock_rcu_node(p) \
....@@ -90,7 +80,7 @@
9080 * srcu_read_unlock() running against them. So if the is_static parameter
9181 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
9282 */
93
-static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
83
+static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
9484 {
9585 int cpu;
9686 int i;
....@@ -100,14 +90,17 @@
10090 struct srcu_node *snp;
10191 struct srcu_node *snp_first;
10292
93
+ /* Initialize geometry if it has not already been initialized. */
94
+ rcu_init_geometry();
95
+
10396 /* Work out the overall tree geometry. */
104
- sp->level[0] = &sp->node[0];
97
+ ssp->level[0] = &ssp->node[0];
10598 for (i = 1; i < rcu_num_lvls; i++)
106
- sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1];
99
+ ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
107100 rcu_init_levelspread(levelspread, num_rcu_lvl);
108101
109102 /* Each pass through this loop initializes one srcu_node structure. */
110
- rcu_for_each_node_breadth_first(sp, snp) {
103
+ srcu_for_each_node_breadth_first(ssp, snp) {
111104 spin_lock_init(&ACCESS_PRIVATE(snp, lock));
112105 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
113106 ARRAY_SIZE(snp->srcu_data_have_cbs));
....@@ -118,17 +111,17 @@
118111 snp->srcu_gp_seq_needed_exp = 0;
119112 snp->grplo = -1;
120113 snp->grphi = -1;
121
- if (snp == &sp->node[0]) {
114
+ if (snp == &ssp->node[0]) {
122115 /* Root node, special case. */
123116 snp->srcu_parent = NULL;
124117 continue;
125118 }
126119
127120 /* Non-root node. */
128
- if (snp == sp->level[level + 1])
121
+ if (snp == ssp->level[level + 1])
129122 level++;
130
- snp->srcu_parent = sp->level[level - 1] +
131
- (snp - sp->level[level]) /
123
+ snp->srcu_parent = ssp->level[level - 1] +
124
+ (snp - ssp->level[level]) /
132125 levelspread[level - 1];
133126 }
134127
....@@ -139,14 +132,14 @@
139132 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
140133 ARRAY_SIZE(sdp->srcu_unlock_count));
141134 level = rcu_num_lvls - 1;
142
- snp_first = sp->level[level];
135
+ snp_first = ssp->level[level];
143136 for_each_possible_cpu(cpu) {
144
- sdp = per_cpu_ptr(sp->sda, cpu);
137
+ sdp = per_cpu_ptr(ssp->sda, cpu);
145138 spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
146139 rcu_segcblist_init(&sdp->srcu_cblist);
147140 sdp->srcu_cblist_invoking = false;
148
- sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
149
- sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq;
141
+ sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
142
+ sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
150143 sdp->mynode = &snp_first[cpu / levelspread[level]];
151144 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
152145 if (snp->grplo < 0)
....@@ -154,8 +147,9 @@
154147 snp->grphi = cpu;
155148 }
156149 sdp->cpu = cpu;
157
- INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
158
- sdp->sp = sp;
150
+ INIT_WORK(&sdp->work, srcu_invoke_callbacks);
151
+ timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
152
+ sdp->ssp = ssp;
159153 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
160154 if (is_static)
161155 continue;
....@@ -174,35 +168,35 @@
174168 * parameter is passed through to init_srcu_struct_nodes(), and
175169 * also tells us that ->sda has already been wired up to srcu_data.
176170 */
177
-static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static)
171
+static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
178172 {
179
- mutex_init(&sp->srcu_cb_mutex);
180
- mutex_init(&sp->srcu_gp_mutex);
181
- sp->srcu_idx = 0;
182
- sp->srcu_gp_seq = 0;
183
- sp->srcu_barrier_seq = 0;
184
- mutex_init(&sp->srcu_barrier_mutex);
185
- atomic_set(&sp->srcu_barrier_cpu_cnt, 0);
186
- INIT_DELAYED_WORK(&sp->work, process_srcu);
173
+ mutex_init(&ssp->srcu_cb_mutex);
174
+ mutex_init(&ssp->srcu_gp_mutex);
175
+ ssp->srcu_idx = 0;
176
+ ssp->srcu_gp_seq = 0;
177
+ ssp->srcu_barrier_seq = 0;
178
+ mutex_init(&ssp->srcu_barrier_mutex);
179
+ atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
180
+ INIT_DELAYED_WORK(&ssp->work, process_srcu);
187181 if (!is_static)
188
- sp->sda = alloc_percpu(struct srcu_data);
189
- init_srcu_struct_nodes(sp, is_static);
190
- sp->srcu_gp_seq_needed_exp = 0;
191
- sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
192
- smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */
193
- return sp->sda ? 0 : -ENOMEM;
182
+ ssp->sda = alloc_percpu(struct srcu_data);
183
+ init_srcu_struct_nodes(ssp, is_static);
184
+ ssp->srcu_gp_seq_needed_exp = 0;
185
+ ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
186
+ smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
187
+ return ssp->sda ? 0 : -ENOMEM;
194188 }
195189
196190 #ifdef CONFIG_DEBUG_LOCK_ALLOC
197191
198
-int __init_srcu_struct(struct srcu_struct *sp, const char *name,
192
+int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
199193 struct lock_class_key *key)
200194 {
201195 /* Don't re-initialize a lock while it is held. */
202
- debug_check_no_locks_freed((void *)sp, sizeof(*sp));
203
- lockdep_init_map(&sp->dep_map, name, key, 0);
204
- spin_lock_init(&ACCESS_PRIVATE(sp, lock));
205
- return init_srcu_struct_fields(sp, false);
196
+ debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
197
+ lockdep_init_map(&ssp->dep_map, name, key, 0);
198
+ spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
199
+ return init_srcu_struct_fields(ssp, false);
206200 }
207201 EXPORT_SYMBOL_GPL(__init_srcu_struct);
208202
....@@ -210,16 +204,16 @@
210204
211205 /**
212206 * init_srcu_struct - initialize a sleep-RCU structure
213
- * @sp: structure to initialize.
207
+ * @ssp: structure to initialize.
214208 *
215209 * Must invoke this on a given srcu_struct before passing that srcu_struct
216210 * to any other function. Each srcu_struct represents a separate domain
217211 * of SRCU protection.
218212 */
219
-int init_srcu_struct(struct srcu_struct *sp)
213
+int init_srcu_struct(struct srcu_struct *ssp)
220214 {
221
- spin_lock_init(&ACCESS_PRIVATE(sp, lock));
222
- return init_srcu_struct_fields(sp, false);
215
+ spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
216
+ return init_srcu_struct_fields(ssp, false);
223217 }
224218 EXPORT_SYMBOL_GPL(init_srcu_struct);
225219
....@@ -229,38 +223,37 @@
229223 * First-use initialization of statically allocated srcu_struct
230224 * structure. Wiring up the combining tree is more than can be
231225 * done with compile-time initialization, so this check is added
232
- * to each update-side SRCU primitive. Use sp->lock, which -is-
226
+ * to each update-side SRCU primitive. Use ssp->lock, which -is-
233227 * compile-time initialized, to resolve races involving multiple
234228 * CPUs trying to garner first-use privileges.
235229 */
236
-static void check_init_srcu_struct(struct srcu_struct *sp)
230
+static void check_init_srcu_struct(struct srcu_struct *ssp)
237231 {
238232 unsigned long flags;
239233
240
- WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT);
241234 /* The smp_load_acquire() pairs with the smp_store_release(). */
242
- if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
235
+ if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
243236 return; /* Already initialized. */
244
- spin_lock_irqsave_rcu_node(sp, flags);
245
- if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
246
- spin_unlock_irqrestore_rcu_node(sp, flags);
237
+ spin_lock_irqsave_rcu_node(ssp, flags);
238
+ if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
239
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
247240 return;
248241 }
249
- init_srcu_struct_fields(sp, true);
250
- spin_unlock_irqrestore_rcu_node(sp, flags);
242
+ init_srcu_struct_fields(ssp, true);
243
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
251244 }
252245
253246 /*
254247 * Returns approximate total of the readers' ->srcu_lock_count[] values
255248 * for the rank of per-CPU counters specified by idx.
256249 */
257
-static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
250
+static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
258251 {
259252 int cpu;
260253 unsigned long sum = 0;
261254
262255 for_each_possible_cpu(cpu) {
263
- struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
256
+ struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
264257
265258 sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
266259 }
....@@ -271,13 +264,13 @@
271264 * Returns approximate total of the readers' ->srcu_unlock_count[] values
272265 * for the rank of per-CPU counters specified by idx.
273266 */
274
-static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
267
+static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
275268 {
276269 int cpu;
277270 unsigned long sum = 0;
278271
279272 for_each_possible_cpu(cpu) {
280
- struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
273
+ struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
281274
282275 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
283276 }
....@@ -288,11 +281,11 @@
288281 * Return true if the number of pre-existing readers is determined to
289282 * be zero.
290283 */
291
-static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
284
+static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
292285 {
293286 unsigned long unlocks;
294287
295
- unlocks = srcu_readers_unlock_idx(sp, idx);
288
+ unlocks = srcu_readers_unlock_idx(ssp, idx);
296289
297290 /*
298291 * Make sure that a lock is always counted if the corresponding
....@@ -328,25 +321,25 @@
328321 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
329322 * especially on 64-bit systems.
330323 */
331
- return srcu_readers_lock_idx(sp, idx) == unlocks;
324
+ return srcu_readers_lock_idx(ssp, idx) == unlocks;
332325 }
333326
334327 /**
335328 * srcu_readers_active - returns true if there are readers. and false
336329 * otherwise
337
- * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
330
+ * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
338331 *
339332 * Note that this is not an atomic primitive, and can therefore suffer
340333 * severe errors when invoked on an active srcu_struct. That said, it
341334 * can be useful as an error check at cleanup time.
342335 */
343
-static bool srcu_readers_active(struct srcu_struct *sp)
336
+static bool srcu_readers_active(struct srcu_struct *ssp)
344337 {
345338 int cpu;
346339 unsigned long sum = 0;
347340
348341 for_each_possible_cpu(cpu) {
349
- struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
342
+ struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
350343
351344 sum += READ_ONCE(cpuc->srcu_lock_count[0]);
352345 sum += READ_ONCE(cpuc->srcu_lock_count[1]);
....@@ -362,58 +355,60 @@
362355 * Return grace-period delay, zero if there are expedited grace
363356 * periods pending, SRCU_INTERVAL otherwise.
364357 */
365
-static unsigned long srcu_get_delay(struct srcu_struct *sp)
358
+static unsigned long srcu_get_delay(struct srcu_struct *ssp)
366359 {
367
- if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq),
368
- READ_ONCE(sp->srcu_gp_seq_needed_exp)))
360
+ if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq),
361
+ READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
369362 return 0;
370363 return SRCU_INTERVAL;
371364 }
372365
373
-/* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */
374
-void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
366
+/**
367
+ * cleanup_srcu_struct - deconstruct a sleep-RCU structure
368
+ * @ssp: structure to clean up.
369
+ *
370
+ * Must invoke this after you are finished using a given srcu_struct that
371
+ * was initialized via init_srcu_struct(), else you leak memory.
372
+ */
373
+void cleanup_srcu_struct(struct srcu_struct *ssp)
375374 {
376375 int cpu;
377376
378
- if (WARN_ON(!srcu_get_delay(sp)))
377
+ if (WARN_ON(!srcu_get_delay(ssp)))
379378 return; /* Just leak it! */
380
- if (WARN_ON(srcu_readers_active(sp)))
379
+ if (WARN_ON(srcu_readers_active(ssp)))
381380 return; /* Just leak it! */
382
- if (quiesced) {
383
- if (WARN_ON(delayed_work_pending(&sp->work)))
384
- return; /* Just leak it! */
385
- } else {
386
- flush_delayed_work(&sp->work);
381
+ flush_delayed_work(&ssp->work);
382
+ for_each_possible_cpu(cpu) {
383
+ struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
384
+
385
+ del_timer_sync(&sdp->delay_work);
386
+ flush_work(&sdp->work);
387
+ if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
388
+ return; /* Forgot srcu_barrier(), so just leak it! */
387389 }
388
- for_each_possible_cpu(cpu)
389
- if (quiesced) {
390
- if (WARN_ON(delayed_work_pending(&per_cpu_ptr(sp->sda, cpu)->work)))
391
- return; /* Just leak it! */
392
- } else {
393
- flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work);
394
- }
395
- if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
396
- WARN_ON(srcu_readers_active(sp))) {
390
+ if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
391
+ WARN_ON(srcu_readers_active(ssp))) {
397392 pr_info("%s: Active srcu_struct %p state: %d\n",
398
- __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
393
+ __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)));
399394 return; /* Caller forgot to stop doing call_srcu()? */
400395 }
401
- free_percpu(sp->sda);
402
- sp->sda = NULL;
396
+ free_percpu(ssp->sda);
397
+ ssp->sda = NULL;
403398 }
404
-EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
399
+EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
405400
406401 /*
407402 * Counts the new reader in the appropriate per-CPU element of the
408403 * srcu_struct.
409404 * Returns an index that must be passed to the matching srcu_read_unlock().
410405 */
411
-int __srcu_read_lock(struct srcu_struct *sp)
406
+int __srcu_read_lock(struct srcu_struct *ssp)
412407 {
413408 int idx;
414409
415
- idx = READ_ONCE(sp->srcu_idx) & 0x1;
416
- this_cpu_inc(sp->sda->srcu_lock_count[idx]);
410
+ idx = READ_ONCE(ssp->srcu_idx) & 0x1;
411
+ this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
417412 smp_mb(); /* B */ /* Avoid leaking the critical section. */
418413 return idx;
419414 }
....@@ -424,10 +419,10 @@
424419 * element of the srcu_struct. Note that this may well be a different
425420 * CPU than that which was incremented by the corresponding srcu_read_lock().
426421 */
427
-void __srcu_read_unlock(struct srcu_struct *sp, int idx)
422
+void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
428423 {
429424 smp_mb(); /* C */ /* Avoid leaking the critical section. */
430
- this_cpu_inc(sp->sda->srcu_unlock_count[idx]);
425
+ this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
431426 }
432427 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
433428
....@@ -443,43 +438,42 @@
443438 /*
444439 * Start an SRCU grace period.
445440 */
446
-static void srcu_gp_start(struct srcu_struct *sp)
441
+static void srcu_gp_start(struct srcu_struct *ssp)
447442 {
448
- struct srcu_data *sdp = this_cpu_ptr(sp->sda);
443
+ struct srcu_data *sdp = this_cpu_ptr(ssp->sda);
449444 int state;
450445
451
- lockdep_assert_held(&ACCESS_PRIVATE(sp, lock));
452
- WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
446
+ lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
447
+ WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
453448 spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
454449 rcu_segcblist_advance(&sdp->srcu_cblist,
455
- rcu_seq_current(&sp->srcu_gp_seq));
450
+ rcu_seq_current(&ssp->srcu_gp_seq));
456451 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
457
- rcu_seq_snap(&sp->srcu_gp_seq));
452
+ rcu_seq_snap(&ssp->srcu_gp_seq));
458453 spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
459454 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
460
- rcu_seq_start(&sp->srcu_gp_seq);
461
- state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
455
+ rcu_seq_start(&ssp->srcu_gp_seq);
456
+ state = rcu_seq_state(ssp->srcu_gp_seq);
462457 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
463458 }
464459
465
-/*
466
- * Place the workqueue handler on the specified CPU if online, otherwise
467
- * just run it whereever. This is useful for placing workqueue handlers
468
- * that are to invoke the specified CPU's callbacks.
469
- */
470
-static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
471
- struct delayed_work *dwork,
460
+
461
+static void srcu_delay_timer(struct timer_list *t)
462
+{
463
+ struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
464
+
465
+ queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
466
+}
467
+
468
+static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
472469 unsigned long delay)
473470 {
474
- bool ret;
471
+ if (!delay) {
472
+ queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
473
+ return;
474
+ }
475475
476
- cpus_read_lock();
477
- if (cpu_online(cpu))
478
- ret = queue_delayed_work_on(cpu, wq, dwork, delay);
479
- else
480
- ret = queue_delayed_work(wq, dwork, delay);
481
- cpus_read_unlock();
482
- return ret;
476
+ timer_reduce(&sdp->delay_work, jiffies + delay);
483477 }
484478
485479 /*
....@@ -488,7 +482,7 @@
488482 */
489483 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
490484 {
491
- srcu_queue_delayed_work_on(sdp->cpu, rcu_gp_wq, &sdp->work, delay);
485
+ srcu_queue_delayed_work_on(sdp, delay);
492486 }
493487
494488 /*
....@@ -497,7 +491,7 @@
497491 * just-completed grace period, the one corresponding to idx. If possible,
498492 * schedule this invocation on the corresponding CPUs.
499493 */
500
-static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
494
+static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
501495 unsigned long mask, unsigned long delay)
502496 {
503497 int cpu;
....@@ -505,7 +499,7 @@
505499 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
506500 if (!(mask & (1 << (cpu - snp->grplo))))
507501 continue;
508
- srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay);
502
+ srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
509503 }
510504 }
511505
....@@ -518,7 +512,7 @@
518512 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
519513 * array to have a finite number of elements.
520514 */
521
-static void srcu_gp_end(struct srcu_struct *sp)
515
+static void srcu_gp_end(struct srcu_struct *ssp)
522516 {
523517 unsigned long cbdelay;
524518 bool cbs;
....@@ -532,44 +526,44 @@
532526 struct srcu_node *snp;
533527
534528 /* Prevent more than one additional grace period. */
535
- mutex_lock(&sp->srcu_cb_mutex);
529
+ mutex_lock(&ssp->srcu_cb_mutex);
536530
537531 /* End the current grace period. */
538
- spin_lock_irq_rcu_node(sp);
539
- idx = rcu_seq_state(sp->srcu_gp_seq);
532
+ spin_lock_irq_rcu_node(ssp);
533
+ idx = rcu_seq_state(ssp->srcu_gp_seq);
540534 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
541
- cbdelay = srcu_get_delay(sp);
542
- sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
543
- rcu_seq_end(&sp->srcu_gp_seq);
544
- gpseq = rcu_seq_current(&sp->srcu_gp_seq);
545
- if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
546
- sp->srcu_gp_seq_needed_exp = gpseq;
547
- spin_unlock_irq_rcu_node(sp);
548
- mutex_unlock(&sp->srcu_gp_mutex);
535
+ cbdelay = srcu_get_delay(ssp);
536
+ WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
537
+ rcu_seq_end(&ssp->srcu_gp_seq);
538
+ gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
539
+ if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
540
+ WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq);
541
+ spin_unlock_irq_rcu_node(ssp);
542
+ mutex_unlock(&ssp->srcu_gp_mutex);
549543 /* A new grace period can start at this point. But only one. */
550544
551545 /* Initiate callback invocation as needed. */
552546 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
553
- rcu_for_each_node_breadth_first(sp, snp) {
547
+ srcu_for_each_node_breadth_first(ssp, snp) {
554548 spin_lock_irq_rcu_node(snp);
555549 cbs = false;
556
- last_lvl = snp >= sp->level[rcu_num_lvls - 1];
550
+ last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
557551 if (last_lvl)
558552 cbs = snp->srcu_have_cbs[idx] == gpseq;
559553 snp->srcu_have_cbs[idx] = gpseq;
560554 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
561555 if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
562
- snp->srcu_gp_seq_needed_exp = gpseq;
556
+ WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
563557 mask = snp->srcu_data_have_cbs[idx];
564558 snp->srcu_data_have_cbs[idx] = 0;
565559 spin_unlock_irq_rcu_node(snp);
566560 if (cbs)
567
- srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
561
+ srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
568562
569563 /* Occasionally prevent srcu_data counter wrap. */
570564 if (!(gpseq & counter_wrap_check) && last_lvl)
571565 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
572
- sdp = per_cpu_ptr(sp->sda, cpu);
566
+ sdp = per_cpu_ptr(ssp->sda, cpu);
573567 spin_lock_irqsave_rcu_node(sdp, flags);
574568 if (ULONG_CMP_GE(gpseq,
575569 sdp->srcu_gp_seq_needed + 100))
....@@ -582,18 +576,18 @@
582576 }
583577
584578 /* Callback initiation done, allow grace periods after next. */
585
- mutex_unlock(&sp->srcu_cb_mutex);
579
+ mutex_unlock(&ssp->srcu_cb_mutex);
586580
587581 /* Start a new grace period if needed. */
588
- spin_lock_irq_rcu_node(sp);
589
- gpseq = rcu_seq_current(&sp->srcu_gp_seq);
582
+ spin_lock_irq_rcu_node(ssp);
583
+ gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
590584 if (!rcu_seq_state(gpseq) &&
591
- ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
592
- srcu_gp_start(sp);
593
- spin_unlock_irq_rcu_node(sp);
594
- srcu_reschedule(sp, 0);
585
+ ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
586
+ srcu_gp_start(ssp);
587
+ spin_unlock_irq_rcu_node(ssp);
588
+ srcu_reschedule(ssp, 0);
595589 } else {
596
- spin_unlock_irq_rcu_node(sp);
590
+ spin_unlock_irq_rcu_node(ssp);
597591 }
598592 }
599593
....@@ -604,13 +598,13 @@
604598 * but without expediting. To start a completely new grace period,
605599 * whether expedited or not, use srcu_funnel_gp_start() instead.
606600 */
607
-static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
601
+static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
608602 unsigned long s)
609603 {
610604 unsigned long flags;
611605
612606 for (; snp != NULL; snp = snp->srcu_parent) {
613
- if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
607
+ if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
614608 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
615609 return;
616610 spin_lock_irqsave_rcu_node(snp, flags);
....@@ -621,10 +615,10 @@
621615 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
622616 spin_unlock_irqrestore_rcu_node(snp, flags);
623617 }
624
- spin_lock_irqsave_rcu_node(sp, flags);
625
- if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
626
- sp->srcu_gp_seq_needed_exp = s;
627
- spin_unlock_irqrestore_rcu_node(sp, flags);
618
+ spin_lock_irqsave_rcu_node(ssp, flags);
619
+ if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
620
+ WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
621
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
628622 }
629623
630624 /*
....@@ -637,7 +631,7 @@
637631 * Note that this function also does the work of srcu_funnel_exp_start(),
638632 * in some cases by directly invoking it.
639633 */
640
-static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
634
+static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
641635 unsigned long s, bool do_norm)
642636 {
643637 unsigned long flags;
....@@ -647,7 +641,7 @@
647641
648642 /* Each pass through the loop does one level of the srcu_node tree. */
649643 for (; snp != NULL; snp = snp->srcu_parent) {
650
- if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
644
+ if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode)
651645 return; /* GP already done and CBs recorded. */
652646 spin_lock_irqsave_rcu_node(snp, flags);
653647 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
....@@ -662,37 +656,41 @@
662656 return;
663657 }
664658 if (!do_norm)
665
- srcu_funnel_exp_start(sp, snp, s);
659
+ srcu_funnel_exp_start(ssp, snp, s);
666660 return;
667661 }
668662 snp->srcu_have_cbs[idx] = s;
669663 if (snp == sdp->mynode)
670664 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
671665 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
672
- snp->srcu_gp_seq_needed_exp = s;
666
+ WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
673667 spin_unlock_irqrestore_rcu_node(snp, flags);
674668 }
675669
676670 /* Top of tree, must ensure the grace period will be started. */
677
- spin_lock_irqsave_rcu_node(sp, flags);
678
- if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
671
+ spin_lock_irqsave_rcu_node(ssp, flags);
672
+ if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
679673 /*
680674 * Record need for grace period s. Pair with load
681675 * acquire setting up for initialization.
682676 */
683
- smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/
677
+ smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
684678 }
685
- if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
686
- sp->srcu_gp_seq_needed_exp = s;
679
+ if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
680
+ WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
687681
688682 /* If grace period not already done and none in progress, start it. */
689
- if (!rcu_seq_done(&sp->srcu_gp_seq, s) &&
690
- rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) {
691
- WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
692
- srcu_gp_start(sp);
693
- queue_delayed_work(rcu_gp_wq, &sp->work, srcu_get_delay(sp));
683
+ if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
684
+ rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
685
+ WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
686
+ srcu_gp_start(ssp);
687
+ if (likely(srcu_init_done))
688
+ queue_delayed_work(rcu_gp_wq, &ssp->work,
689
+ srcu_get_delay(ssp));
690
+ else if (list_empty(&ssp->work.work.entry))
691
+ list_add(&ssp->work.work.entry, &srcu_boot_list);
694692 }
695
- spin_unlock_irqrestore_rcu_node(sp, flags);
693
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
696694 }
697695
698696 /*
....@@ -700,12 +698,12 @@
700698 * loop an additional time if there is an expedited grace period pending.
701699 * The caller must ensure that ->srcu_idx is not changed while checking.
702700 */
703
-static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
701
+static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
704702 {
705703 for (;;) {
706
- if (srcu_readers_active_idx_check(sp, idx))
704
+ if (srcu_readers_active_idx_check(ssp, idx))
707705 return true;
708
- if (--trycount + !srcu_get_delay(sp) <= 0)
706
+ if (--trycount + !srcu_get_delay(ssp) <= 0)
709707 return false;
710708 udelay(SRCU_RETRY_CHECK_DELAY);
711709 }
....@@ -716,7 +714,7 @@
716714 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
717715 * us to wait for pre-existing readers in a starvation-free manner.
718716 */
719
-static void srcu_flip(struct srcu_struct *sp)
717
+static void srcu_flip(struct srcu_struct *ssp)
720718 {
721719 /*
722720 * Ensure that if this updater saw a given reader's increment
....@@ -728,7 +726,7 @@
728726 */
729727 smp_mb(); /* E */ /* Pairs with B and C. */
730728
731
- WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1);
729
+ WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
732730
733731 /*
734732 * Ensure that if the updater misses an __srcu_read_unlock()
....@@ -758,26 +756,26 @@
758756 * it, if this function was preempted for enough time for the counters
759757 * to wrap, it really doesn't matter whether or not we expedite the grace
760758 * period. The extra overhead of a needlessly expedited grace period is
761
- * negligible when amoritized over that time period, and the extra latency
759
+ * negligible when amortized over that time period, and the extra latency
762760 * of a needlessly non-expedited grace period is similarly negligible.
763761 */
764
-static DEFINE_LOCAL_IRQ_LOCK(sp_llock);
765
-
766
-static bool srcu_might_be_idle(struct srcu_struct *sp)
762
+static bool srcu_might_be_idle(struct srcu_struct *ssp)
767763 {
768764 unsigned long curseq;
769765 unsigned long flags;
770766 struct srcu_data *sdp;
771767 unsigned long t;
768
+ unsigned long tlast;
772769
770
+ check_init_srcu_struct(ssp);
773771 /* If the local srcu_data structure has callbacks, not idle. */
774
- local_lock_irqsave(sp_llock, flags);
775
- sdp = this_cpu_ptr(sp->sda);
772
+ sdp = raw_cpu_ptr(ssp->sda);
773
+ spin_lock_irqsave_rcu_node(sdp, flags);
776774 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
777
- local_unlock_irqrestore(sp_llock, flags);
775
+ spin_unlock_irqrestore_rcu_node(sdp, flags);
778776 return false; /* Callbacks already present, so not idle. */
779777 }
780
- local_unlock_irqrestore(sp_llock, flags);
778
+ spin_unlock_irqrestore_rcu_node(sdp, flags);
781779
782780 /*
783781 * No local callbacks, so probabalistically probe global state.
....@@ -787,18 +785,18 @@
787785
788786 /* First, see if enough time has passed since the last GP. */
789787 t = ktime_get_mono_fast_ns();
788
+ tlast = READ_ONCE(ssp->srcu_last_gp_end);
790789 if (exp_holdoff == 0 ||
791
- time_in_range_open(t, sp->srcu_last_gp_end,
792
- sp->srcu_last_gp_end + exp_holdoff))
790
+ time_in_range_open(t, tlast, tlast + exp_holdoff))
793791 return false; /* Too soon after last GP. */
794792
795793 /* Next, check for probable idleness. */
796
- curseq = rcu_seq_current(&sp->srcu_gp_seq);
794
+ curseq = rcu_seq_current(&ssp->srcu_gp_seq);
797795 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
798
- if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed)))
796
+ if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
799797 return false; /* Grace period in progress, so not idle. */
800798 smp_mb(); /* Order ->srcu_gp_seq with prior access. */
801
- if (curseq != rcu_seq_current(&sp->srcu_gp_seq))
799
+ if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
802800 return false; /* GP # changed, so not idle. */
803801 return true; /* With reasonable probability, idle! */
804802 }
....@@ -808,6 +806,46 @@
808806 */
809807 static void srcu_leak_callback(struct rcu_head *rhp)
810808 {
809
+}
810
+
811
+/*
812
+ * Start an SRCU grace period, and also queue the callback if non-NULL.
813
+ */
814
+static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
815
+ struct rcu_head *rhp, bool do_norm)
816
+{
817
+ unsigned long flags;
818
+ int idx;
819
+ bool needexp = false;
820
+ bool needgp = false;
821
+ unsigned long s;
822
+ struct srcu_data *sdp;
823
+
824
+ check_init_srcu_struct(ssp);
825
+ idx = srcu_read_lock(ssp);
826
+ sdp = raw_cpu_ptr(ssp->sda);
827
+ spin_lock_irqsave_rcu_node(sdp, flags);
828
+ if (rhp)
829
+ rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
830
+ rcu_segcblist_advance(&sdp->srcu_cblist,
831
+ rcu_seq_current(&ssp->srcu_gp_seq));
832
+ s = rcu_seq_snap(&ssp->srcu_gp_seq);
833
+ (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
834
+ if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
835
+ sdp->srcu_gp_seq_needed = s;
836
+ needgp = true;
837
+ }
838
+ if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
839
+ sdp->srcu_gp_seq_needed_exp = s;
840
+ needexp = true;
841
+ }
842
+ spin_unlock_irqrestore_rcu_node(sdp, flags);
843
+ if (needgp)
844
+ srcu_funnel_gp_start(ssp, sdp, s, do_norm);
845
+ else if (needexp)
846
+ srcu_funnel_exp_start(ssp, sdp->mynode, s);
847
+ srcu_read_unlock(ssp, idx);
848
+ return s;
811849 }
812850
813851 /*
....@@ -838,16 +876,9 @@
838876 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
839877 * srcu_struct structure.
840878 */
841
-void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
842
- rcu_callback_t func, bool do_norm)
879
+static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
880
+ rcu_callback_t func, bool do_norm)
843881 {
844
- unsigned long flags;
845
- bool needexp = false;
846
- bool needgp = false;
847
- unsigned long s;
848
- struct srcu_data *sdp;
849
-
850
- check_init_srcu_struct(sp);
851882 if (debug_rcu_head_queue(rhp)) {
852883 /* Probable double call_srcu(), so leak the callback. */
853884 WRITE_ONCE(rhp->func, srcu_leak_callback);
....@@ -855,33 +886,12 @@
855886 return;
856887 }
857888 rhp->func = func;
858
- local_lock_irqsave(sp_llock, flags);
859
- sdp = this_cpu_ptr(sp->sda);
860
- spin_lock_rcu_node(sdp);
861
- rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
862
- rcu_segcblist_advance(&sdp->srcu_cblist,
863
- rcu_seq_current(&sp->srcu_gp_seq));
864
- s = rcu_seq_snap(&sp->srcu_gp_seq);
865
- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
866
- if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
867
- sdp->srcu_gp_seq_needed = s;
868
- needgp = true;
869
- }
870
- if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
871
- sdp->srcu_gp_seq_needed_exp = s;
872
- needexp = true;
873
- }
874
- spin_unlock_rcu_node(sdp);
875
- local_unlock_irqrestore(sp_llock, flags);
876
- if (needgp)
877
- srcu_funnel_gp_start(sp, sdp, s, do_norm);
878
- else if (needexp)
879
- srcu_funnel_exp_start(sp, sdp->mynode, s);
889
+ (void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
880890 }
881891
882892 /**
883893 * call_srcu() - Queue a callback for invocation after an SRCU grace period
884
- * @sp: srcu_struct in queue the callback
894
+ * @ssp: srcu_struct in queue the callback
885895 * @rhp: structure to be used for queueing the SRCU callback.
886896 * @func: function to be invoked after the SRCU grace period
887897 *
....@@ -896,21 +906,21 @@
896906 * The callback will be invoked from process context, but must nevertheless
897907 * be fast and must not block.
898908 */
899
-void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
909
+void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
900910 rcu_callback_t func)
901911 {
902
- __call_srcu(sp, rhp, func, true);
912
+ __call_srcu(ssp, rhp, func, true);
903913 }
904914 EXPORT_SYMBOL_GPL(call_srcu);
905915
906916 /*
907917 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
908918 */
909
-static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
919
+static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
910920 {
911921 struct rcu_synchronize rcu;
912922
913
- RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
923
+ RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) ||
914924 lock_is_held(&rcu_bh_lock_map) ||
915925 lock_is_held(&rcu_lock_map) ||
916926 lock_is_held(&rcu_sched_lock_map),
....@@ -919,10 +929,10 @@
919929 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
920930 return;
921931 might_sleep();
922
- check_init_srcu_struct(sp);
932
+ check_init_srcu_struct(ssp);
923933 init_completion(&rcu.completion);
924934 init_rcu_head_on_stack(&rcu.head);
925
- __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm);
935
+ __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
926936 wait_for_completion(&rcu.completion);
927937 destroy_rcu_head_on_stack(&rcu.head);
928938
....@@ -938,7 +948,7 @@
938948
939949 /**
940950 * synchronize_srcu_expedited - Brute-force SRCU grace period
941
- * @sp: srcu_struct with which to synchronize.
951
+ * @ssp: srcu_struct with which to synchronize.
942952 *
943953 * Wait for an SRCU grace period to elapse, but be more aggressive about
944954 * spinning rather than blocking when waiting.
....@@ -946,15 +956,15 @@
946956 * Note that synchronize_srcu_expedited() has the same deadlock and
947957 * memory-ordering properties as does synchronize_srcu().
948958 */
949
-void synchronize_srcu_expedited(struct srcu_struct *sp)
959
+void synchronize_srcu_expedited(struct srcu_struct *ssp)
950960 {
951
- __synchronize_srcu(sp, rcu_gp_is_normal());
961
+ __synchronize_srcu(ssp, rcu_gp_is_normal());
952962 }
953963 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
954964
955965 /**
956966 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
957
- * @sp: srcu_struct with which to synchronize.
967
+ * @ssp: srcu_struct with which to synchronize.
958968 *
959969 * Wait for the count to drain to zero of both indexes. To avoid the
960970 * possible starvation of synchronize_srcu(), it waits for the count of
....@@ -972,7 +982,7 @@
972982 * There are memory-ordering constraints implied by synchronize_srcu().
973983 * On systems with more than one CPU, when synchronize_srcu() returns,
974984 * each CPU is guaranteed to have executed a full memory barrier since
975
- * the end of its last corresponding SRCU-sched read-side critical section
985
+ * the end of its last corresponding SRCU read-side critical section
976986 * whose beginning preceded the call to synchronize_srcu(). In addition,
977987 * each CPU having an SRCU read-side critical section that extends beyond
978988 * the return from synchronize_srcu() is guaranteed to have executed a
....@@ -996,14 +1006,70 @@
9961006 * SRCU must also provide it. Note that detecting idleness is heuristic
9971007 * and subject to both false positives and negatives.
9981008 */
999
-void synchronize_srcu(struct srcu_struct *sp)
1009
+void synchronize_srcu(struct srcu_struct *ssp)
10001010 {
1001
- if (srcu_might_be_idle(sp) || rcu_gp_is_expedited())
1002
- synchronize_srcu_expedited(sp);
1011
+ if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1012
+ synchronize_srcu_expedited(ssp);
10031013 else
1004
- __synchronize_srcu(sp, true);
1014
+ __synchronize_srcu(ssp, true);
10051015 }
10061016 EXPORT_SYMBOL_GPL(synchronize_srcu);
1017
+
1018
+/**
1019
+ * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
1020
+ * @ssp: srcu_struct to provide cookie for.
1021
+ *
1022
+ * This function returns a cookie that can be passed to
1023
+ * poll_state_synchronize_srcu(), which will return true if a full grace
1024
+ * period has elapsed in the meantime. It is the caller's responsibility
1025
+ * to make sure that grace period happens, for example, by invoking
1026
+ * call_srcu() after return from get_state_synchronize_srcu().
1027
+ */
1028
+unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
1029
+{
1030
+ // Any prior manipulation of SRCU-protected data must happen
1031
+ // before the load from ->srcu_gp_seq.
1032
+ smp_mb();
1033
+ return rcu_seq_snap(&ssp->srcu_gp_seq);
1034
+}
1035
+EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
1036
+
1037
+/**
1038
+ * start_poll_synchronize_srcu - Provide cookie and start grace period
1039
+ * @ssp: srcu_struct to provide cookie for.
1040
+ *
1041
+ * This function returns a cookie that can be passed to
1042
+ * poll_state_synchronize_srcu(), which will return true if a full grace
1043
+ * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(),
1044
+ * this function also ensures that any needed SRCU grace period will be
1045
+ * started. This convenience does come at a cost in terms of CPU overhead.
1046
+ */
1047
+unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
1048
+{
1049
+ return srcu_gp_start_if_needed(ssp, NULL, true);
1050
+}
1051
+EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
1052
+
1053
+/**
1054
+ * poll_state_synchronize_srcu - Has cookie's grace period ended?
1055
+ * @ssp: srcu_struct to provide cookie for.
1056
+ * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
1057
+ *
1058
+ * This function takes the cookie that was returned from either
1059
+ * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
1060
+ * returns @true if an SRCU grace period elapsed since the time that the
1061
+ * cookie was created.
1062
+ */
1063
+bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
1064
+{
1065
+ if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie))
1066
+ return false;
1067
+ // Ensure that the end of the SRCU grace period happens before
1068
+ // any subsequent code that the caller might execute.
1069
+ smp_mb(); // ^^^
1070
+ return true;
1071
+}
1072
+EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
10071073
10081074 /*
10091075 * Callback function for srcu_barrier() use.
....@@ -1011,36 +1077,36 @@
10111077 static void srcu_barrier_cb(struct rcu_head *rhp)
10121078 {
10131079 struct srcu_data *sdp;
1014
- struct srcu_struct *sp;
1080
+ struct srcu_struct *ssp;
10151081
10161082 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1017
- sp = sdp->sp;
1018
- if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
1019
- complete(&sp->srcu_barrier_completion);
1083
+ ssp = sdp->ssp;
1084
+ if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1085
+ complete(&ssp->srcu_barrier_completion);
10201086 }
10211087
10221088 /**
10231089 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1024
- * @sp: srcu_struct on which to wait for in-flight callbacks.
1090
+ * @ssp: srcu_struct on which to wait for in-flight callbacks.
10251091 */
1026
-void srcu_barrier(struct srcu_struct *sp)
1092
+void srcu_barrier(struct srcu_struct *ssp)
10271093 {
10281094 int cpu;
10291095 struct srcu_data *sdp;
1030
- unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq);
1096
+ unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
10311097
1032
- check_init_srcu_struct(sp);
1033
- mutex_lock(&sp->srcu_barrier_mutex);
1034
- if (rcu_seq_done(&sp->srcu_barrier_seq, s)) {
1098
+ check_init_srcu_struct(ssp);
1099
+ mutex_lock(&ssp->srcu_barrier_mutex);
1100
+ if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
10351101 smp_mb(); /* Force ordering following return. */
1036
- mutex_unlock(&sp->srcu_barrier_mutex);
1102
+ mutex_unlock(&ssp->srcu_barrier_mutex);
10371103 return; /* Someone else did our work for us. */
10381104 }
1039
- rcu_seq_start(&sp->srcu_barrier_seq);
1040
- init_completion(&sp->srcu_barrier_completion);
1105
+ rcu_seq_start(&ssp->srcu_barrier_seq);
1106
+ init_completion(&ssp->srcu_barrier_completion);
10411107
10421108 /* Initial count prevents reaching zero until all CBs are posted. */
1043
- atomic_set(&sp->srcu_barrier_cpu_cnt, 1);
1109
+ atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
10441110
10451111 /*
10461112 * Each pass through this loop enqueues a callback, but only
....@@ -1051,39 +1117,39 @@
10511117 * grace period as the last callback already in the queue.
10521118 */
10531119 for_each_possible_cpu(cpu) {
1054
- sdp = per_cpu_ptr(sp->sda, cpu);
1120
+ sdp = per_cpu_ptr(ssp->sda, cpu);
10551121 spin_lock_irq_rcu_node(sdp);
1056
- atomic_inc(&sp->srcu_barrier_cpu_cnt);
1122
+ atomic_inc(&ssp->srcu_barrier_cpu_cnt);
10571123 sdp->srcu_barrier_head.func = srcu_barrier_cb;
10581124 debug_rcu_head_queue(&sdp->srcu_barrier_head);
10591125 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1060
- &sdp->srcu_barrier_head, 0)) {
1126
+ &sdp->srcu_barrier_head)) {
10611127 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1062
- atomic_dec(&sp->srcu_barrier_cpu_cnt);
1128
+ atomic_dec(&ssp->srcu_barrier_cpu_cnt);
10631129 }
10641130 spin_unlock_irq_rcu_node(sdp);
10651131 }
10661132
10671133 /* Remove the initial count, at which point reaching zero can happen. */
1068
- if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
1069
- complete(&sp->srcu_barrier_completion);
1070
- wait_for_completion(&sp->srcu_barrier_completion);
1134
+ if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1135
+ complete(&ssp->srcu_barrier_completion);
1136
+ wait_for_completion(&ssp->srcu_barrier_completion);
10711137
1072
- rcu_seq_end(&sp->srcu_barrier_seq);
1073
- mutex_unlock(&sp->srcu_barrier_mutex);
1138
+ rcu_seq_end(&ssp->srcu_barrier_seq);
1139
+ mutex_unlock(&ssp->srcu_barrier_mutex);
10741140 }
10751141 EXPORT_SYMBOL_GPL(srcu_barrier);
10761142
10771143 /**
10781144 * srcu_batches_completed - return batches completed.
1079
- * @sp: srcu_struct on which to report batch completion.
1145
+ * @ssp: srcu_struct on which to report batch completion.
10801146 *
10811147 * Report the number of batches, correlated with, but not necessarily
10821148 * precisely the same as, the number of grace periods that have elapsed.
10831149 */
1084
-unsigned long srcu_batches_completed(struct srcu_struct *sp)
1150
+unsigned long srcu_batches_completed(struct srcu_struct *ssp)
10851151 {
1086
- return sp->srcu_idx;
1152
+ return READ_ONCE(ssp->srcu_idx);
10871153 }
10881154 EXPORT_SYMBOL_GPL(srcu_batches_completed);
10891155
....@@ -1092,11 +1158,11 @@
10921158 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
10931159 * completed in that state.
10941160 */
1095
-static void srcu_advance_state(struct srcu_struct *sp)
1161
+static void srcu_advance_state(struct srcu_struct *ssp)
10961162 {
10971163 int idx;
10981164
1099
- mutex_lock(&sp->srcu_gp_mutex);
1165
+ mutex_lock(&ssp->srcu_gp_mutex);
11001166
11011167 /*
11021168 * Because readers might be delayed for an extended period after
....@@ -1108,47 +1174,49 @@
11081174 * The load-acquire ensures that we see the accesses performed
11091175 * by the prior grace period.
11101176 */
1111
- idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
1177
+ idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
11121178 if (idx == SRCU_STATE_IDLE) {
1113
- spin_lock_irq_rcu_node(sp);
1114
- if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1115
- WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
1116
- spin_unlock_irq_rcu_node(sp);
1117
- mutex_unlock(&sp->srcu_gp_mutex);
1179
+ spin_lock_irq_rcu_node(ssp);
1180
+ if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1181
+ WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
1182
+ spin_unlock_irq_rcu_node(ssp);
1183
+ mutex_unlock(&ssp->srcu_gp_mutex);
11181184 return;
11191185 }
1120
- idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
1186
+ idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
11211187 if (idx == SRCU_STATE_IDLE)
1122
- srcu_gp_start(sp);
1123
- spin_unlock_irq_rcu_node(sp);
1188
+ srcu_gp_start(ssp);
1189
+ spin_unlock_irq_rcu_node(ssp);
11241190 if (idx != SRCU_STATE_IDLE) {
1125
- mutex_unlock(&sp->srcu_gp_mutex);
1191
+ mutex_unlock(&ssp->srcu_gp_mutex);
11261192 return; /* Someone else started the grace period. */
11271193 }
11281194 }
11291195
1130
- if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1131
- idx = 1 ^ (sp->srcu_idx & 1);
1132
- if (!try_check_zero(sp, idx, 1)) {
1133
- mutex_unlock(&sp->srcu_gp_mutex);
1196
+ if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1197
+ idx = 1 ^ (ssp->srcu_idx & 1);
1198
+ if (!try_check_zero(ssp, idx, 1)) {
1199
+ mutex_unlock(&ssp->srcu_gp_mutex);
11341200 return; /* readers present, retry later. */
11351201 }
1136
- srcu_flip(sp);
1137
- rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2);
1202
+ srcu_flip(ssp);
1203
+ spin_lock_irq_rcu_node(ssp);
1204
+ rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
1205
+ spin_unlock_irq_rcu_node(ssp);
11381206 }
11391207
1140
- if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1208
+ if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
11411209
11421210 /*
11431211 * SRCU read-side critical sections are normally short,
11441212 * so check at least twice in quick succession after a flip.
11451213 */
1146
- idx = 1 ^ (sp->srcu_idx & 1);
1147
- if (!try_check_zero(sp, idx, 2)) {
1148
- mutex_unlock(&sp->srcu_gp_mutex);
1214
+ idx = 1 ^ (ssp->srcu_idx & 1);
1215
+ if (!try_check_zero(ssp, idx, 2)) {
1216
+ mutex_unlock(&ssp->srcu_gp_mutex);
11491217 return; /* readers present, retry later. */
11501218 }
1151
- srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */
1219
+ srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
11521220 }
11531221 }
11541222
....@@ -1164,14 +1232,15 @@
11641232 struct rcu_cblist ready_cbs;
11651233 struct rcu_head *rhp;
11661234 struct srcu_data *sdp;
1167
- struct srcu_struct *sp;
1235
+ struct srcu_struct *ssp;
11681236
1169
- sdp = container_of(work, struct srcu_data, work.work);
1170
- sp = sdp->sp;
1237
+ sdp = container_of(work, struct srcu_data, work);
1238
+
1239
+ ssp = sdp->ssp;
11711240 rcu_cblist_init(&ready_cbs);
11721241 spin_lock_irq_rcu_node(sdp);
11731242 rcu_segcblist_advance(&sdp->srcu_cblist,
1174
- rcu_seq_current(&sp->srcu_gp_seq));
1243
+ rcu_seq_current(&ssp->srcu_gp_seq));
11751244 if (sdp->srcu_cblist_invoking ||
11761245 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
11771246 spin_unlock_irq_rcu_node(sdp);
....@@ -1197,7 +1266,7 @@
11971266 spin_lock_irq_rcu_node(sdp);
11981267 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
11991268 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1200
- rcu_seq_snap(&sp->srcu_gp_seq));
1269
+ rcu_seq_snap(&ssp->srcu_gp_seq));
12011270 sdp->srcu_cblist_invoking = false;
12021271 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
12031272 spin_unlock_irq_rcu_node(sdp);
....@@ -1209,24 +1278,24 @@
12091278 * Finished one round of SRCU grace period. Start another if there are
12101279 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
12111280 */
1212
-static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
1281
+static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
12131282 {
12141283 bool pushgp = true;
12151284
1216
- spin_lock_irq_rcu_node(sp);
1217
- if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1218
- if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
1285
+ spin_lock_irq_rcu_node(ssp);
1286
+ if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1287
+ if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
12191288 /* All requests fulfilled, time to go idle. */
12201289 pushgp = false;
12211290 }
1222
- } else if (!rcu_seq_state(sp->srcu_gp_seq)) {
1291
+ } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
12231292 /* Outstanding request and no GP. Start one. */
1224
- srcu_gp_start(sp);
1293
+ srcu_gp_start(ssp);
12251294 }
1226
- spin_unlock_irq_rcu_node(sp);
1295
+ spin_unlock_irq_rcu_node(ssp);
12271296
12281297 if (pushgp)
1229
- queue_delayed_work(rcu_gp_wq, &sp->work, delay);
1298
+ queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
12301299 }
12311300
12321301 /*
....@@ -1234,43 +1303,43 @@
12341303 */
12351304 static void process_srcu(struct work_struct *work)
12361305 {
1237
- struct srcu_struct *sp;
1306
+ struct srcu_struct *ssp;
12381307
1239
- sp = container_of(work, struct srcu_struct, work.work);
1308
+ ssp = container_of(work, struct srcu_struct, work.work);
12401309
1241
- srcu_advance_state(sp);
1242
- srcu_reschedule(sp, srcu_get_delay(sp));
1310
+ srcu_advance_state(ssp);
1311
+ srcu_reschedule(ssp, srcu_get_delay(ssp));
12431312 }
12441313
12451314 void srcutorture_get_gp_data(enum rcutorture_type test_type,
1246
- struct srcu_struct *sp, int *flags,
1315
+ struct srcu_struct *ssp, int *flags,
12471316 unsigned long *gp_seq)
12481317 {
12491318 if (test_type != SRCU_FLAVOR)
12501319 return;
12511320 *flags = 0;
1252
- *gp_seq = rcu_seq_current(&sp->srcu_gp_seq);
1321
+ *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
12531322 }
12541323 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
12551324
1256
-void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
1325
+void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
12571326 {
12581327 int cpu;
12591328 int idx;
12601329 unsigned long s0 = 0, s1 = 0;
12611330
1262
- idx = sp->srcu_idx & 0x1;
1331
+ idx = ssp->srcu_idx & 0x1;
12631332 pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
1264
- tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx);
1333
+ tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx);
12651334 for_each_possible_cpu(cpu) {
12661335 unsigned long l0, l1;
12671336 unsigned long u0, u1;
12681337 long c0, c1;
12691338 struct srcu_data *sdp;
12701339
1271
- sdp = per_cpu_ptr(sp->sda, cpu);
1272
- u0 = sdp->srcu_unlock_count[!idx];
1273
- u1 = sdp->srcu_unlock_count[idx];
1340
+ sdp = per_cpu_ptr(ssp->sda, cpu);
1341
+ u0 = data_race(sdp->srcu_unlock_count[!idx]);
1342
+ u1 = data_race(sdp->srcu_unlock_count[idx]);
12741343
12751344 /*
12761345 * Make sure that a lock is always counted if the corresponding
....@@ -1278,13 +1347,14 @@
12781347 */
12791348 smp_rmb();
12801349
1281
- l0 = sdp->srcu_lock_count[!idx];
1282
- l1 = sdp->srcu_lock_count[idx];
1350
+ l0 = data_race(sdp->srcu_lock_count[!idx]);
1351
+ l1 = data_race(sdp->srcu_lock_count[idx]);
12831352
12841353 c0 = l0 - u0;
12851354 c1 = l1 - u1;
1286
- pr_cont(" %d(%ld,%ld %1p)",
1287
- cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist));
1355
+ pr_cont(" %d(%ld,%ld %c)",
1356
+ cpu, c0, c1,
1357
+ "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
12881358 s0 += c0;
12891359 s1 += c1;
12901360 }
....@@ -1300,3 +1370,82 @@
13001370 return 0;
13011371 }
13021372 early_initcall(srcu_bootup_announce);
1373
+
1374
+void __init srcu_init(void)
1375
+{
1376
+ struct srcu_struct *ssp;
1377
+
1378
+ srcu_init_done = true;
1379
+ while (!list_empty(&srcu_boot_list)) {
1380
+ ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
1381
+ work.work.entry);
1382
+ check_init_srcu_struct(ssp);
1383
+ list_del_init(&ssp->work.work.entry);
1384
+ queue_work(rcu_gp_wq, &ssp->work.work);
1385
+ }
1386
+}
1387
+
1388
+#ifdef CONFIG_MODULES
1389
+
1390
+/* Initialize any global-scope srcu_struct structures used by this module. */
1391
+static int srcu_module_coming(struct module *mod)
1392
+{
1393
+ int i;
1394
+ struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1395
+ int ret;
1396
+
1397
+ for (i = 0; i < mod->num_srcu_structs; i++) {
1398
+ ret = init_srcu_struct(*(sspp++));
1399
+ if (WARN_ON_ONCE(ret))
1400
+ return ret;
1401
+ }
1402
+ return 0;
1403
+}
1404
+
1405
+/* Clean up any global-scope srcu_struct structures used by this module. */
1406
+static void srcu_module_going(struct module *mod)
1407
+{
1408
+ int i;
1409
+ struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1410
+
1411
+ for (i = 0; i < mod->num_srcu_structs; i++)
1412
+ cleanup_srcu_struct(*(sspp++));
1413
+}
1414
+
1415
+/* Handle one module, either coming or going. */
1416
+static int srcu_module_notify(struct notifier_block *self,
1417
+ unsigned long val, void *data)
1418
+{
1419
+ struct module *mod = data;
1420
+ int ret = 0;
1421
+
1422
+ switch (val) {
1423
+ case MODULE_STATE_COMING:
1424
+ ret = srcu_module_coming(mod);
1425
+ break;
1426
+ case MODULE_STATE_GOING:
1427
+ srcu_module_going(mod);
1428
+ break;
1429
+ default:
1430
+ break;
1431
+ }
1432
+ return ret;
1433
+}
1434
+
1435
+static struct notifier_block srcu_module_nb = {
1436
+ .notifier_call = srcu_module_notify,
1437
+ .priority = 0,
1438
+};
1439
+
1440
+static __init int init_srcu_module_notifier(void)
1441
+{
1442
+ int ret;
1443
+
1444
+ ret = register_module_notifier(&srcu_module_nb);
1445
+ if (ret)
1446
+ pr_warn("Failed to register srcu module notifier\n");
1447
+ return ret;
1448
+}
1449
+late_initcall(init_srcu_module_notifier);
1450
+
1451
+#endif /* #ifdef CONFIG_MODULES */