hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/kernel/rcu/srcutree.c
....@@ -1,24 +1,11 @@
1
+// SPDX-License-Identifier: GPL-2.0+
12 /*
23 * Sleepable Read-Copy Update mechanism for mutual exclusion.
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License as published by
6
- * the Free Software Foundation; either version 2 of the License, or
7
- * (at your option) any later version.
8
- *
9
- * This program is distributed in the hope that it will be useful,
10
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
11
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
- * GNU General Public License for more details.
13
- *
14
- * You should have received a copy of the GNU General Public License
15
- * along with this program; if not, you can access it online at
16
- * http://www.gnu.org/licenses/gpl-2.0.html.
174 *
185 * Copyright (C) IBM Corporation, 2006
196 * Copyright (C) Fujitsu, 2012
207 *
21
- * Author: Paul McKenney <paulmck@us.ibm.com>
8
+ * Authors: Paul McKenney <paulmck@linux.ibm.com>
229 * Lai Jiangshan <laijs@cn.fujitsu.com>
2310 *
2411 * For detailed explanation of Read-Copy Update mechanism see -
....@@ -51,9 +38,14 @@
5138 static ulong counter_wrap_check = (ULONG_MAX >> 2);
5239 module_param(counter_wrap_check, ulong, 0444);
5340
41
+/* Early-boot callback-management, so early that no lock is required! */
42
+static LIST_HEAD(srcu_boot_list);
43
+static bool __read_mostly srcu_init_done;
44
+
5445 static void srcu_invoke_callbacks(struct work_struct *work);
55
-static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
46
+static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
5647 static void process_srcu(struct work_struct *work);
48
+static void srcu_delay_timer(struct timer_list *t);
5749
5850 /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
5951 #define spin_lock_rcu_node(p) \
....@@ -88,7 +80,7 @@
8880 * srcu_read_unlock() running against them. So if the is_static parameter
8981 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
9082 */
91
-static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
83
+static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
9284 {
9385 int cpu;
9486 int i;
....@@ -98,14 +90,17 @@
9890 struct srcu_node *snp;
9991 struct srcu_node *snp_first;
10092
93
+ /* Initialize geometry if it has not already been initialized. */
94
+ rcu_init_geometry();
95
+
10196 /* Work out the overall tree geometry. */
102
- sp->level[0] = &sp->node[0];
97
+ ssp->level[0] = &ssp->node[0];
10398 for (i = 1; i < rcu_num_lvls; i++)
104
- sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1];
99
+ ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
105100 rcu_init_levelspread(levelspread, num_rcu_lvl);
106101
107102 /* Each pass through this loop initializes one srcu_node structure. */
108
- rcu_for_each_node_breadth_first(sp, snp) {
103
+ srcu_for_each_node_breadth_first(ssp, snp) {
109104 spin_lock_init(&ACCESS_PRIVATE(snp, lock));
110105 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
111106 ARRAY_SIZE(snp->srcu_data_have_cbs));
....@@ -116,17 +111,17 @@
116111 snp->srcu_gp_seq_needed_exp = 0;
117112 snp->grplo = -1;
118113 snp->grphi = -1;
119
- if (snp == &sp->node[0]) {
114
+ if (snp == &ssp->node[0]) {
120115 /* Root node, special case. */
121116 snp->srcu_parent = NULL;
122117 continue;
123118 }
124119
125120 /* Non-root node. */
126
- if (snp == sp->level[level + 1])
121
+ if (snp == ssp->level[level + 1])
127122 level++;
128
- snp->srcu_parent = sp->level[level - 1] +
129
- (snp - sp->level[level]) /
123
+ snp->srcu_parent = ssp->level[level - 1] +
124
+ (snp - ssp->level[level]) /
130125 levelspread[level - 1];
131126 }
132127
....@@ -137,14 +132,14 @@
137132 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
138133 ARRAY_SIZE(sdp->srcu_unlock_count));
139134 level = rcu_num_lvls - 1;
140
- snp_first = sp->level[level];
135
+ snp_first = ssp->level[level];
141136 for_each_possible_cpu(cpu) {
142
- sdp = per_cpu_ptr(sp->sda, cpu);
137
+ sdp = per_cpu_ptr(ssp->sda, cpu);
143138 spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
144139 rcu_segcblist_init(&sdp->srcu_cblist);
145140 sdp->srcu_cblist_invoking = false;
146
- sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
147
- sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq;
141
+ sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
142
+ sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
148143 sdp->mynode = &snp_first[cpu / levelspread[level]];
149144 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
150145 if (snp->grplo < 0)
....@@ -152,8 +147,9 @@
152147 snp->grphi = cpu;
153148 }
154149 sdp->cpu = cpu;
155
- INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
156
- sdp->sp = sp;
150
+ INIT_WORK(&sdp->work, srcu_invoke_callbacks);
151
+ timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
152
+ sdp->ssp = ssp;
157153 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
158154 if (is_static)
159155 continue;
....@@ -172,35 +168,35 @@
172168 * parameter is passed through to init_srcu_struct_nodes(), and
173169 * also tells us that ->sda has already been wired up to srcu_data.
174170 */
175
-static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static)
171
+static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
176172 {
177
- mutex_init(&sp->srcu_cb_mutex);
178
- mutex_init(&sp->srcu_gp_mutex);
179
- sp->srcu_idx = 0;
180
- sp->srcu_gp_seq = 0;
181
- sp->srcu_barrier_seq = 0;
182
- mutex_init(&sp->srcu_barrier_mutex);
183
- atomic_set(&sp->srcu_barrier_cpu_cnt, 0);
184
- INIT_DELAYED_WORK(&sp->work, process_srcu);
173
+ mutex_init(&ssp->srcu_cb_mutex);
174
+ mutex_init(&ssp->srcu_gp_mutex);
175
+ ssp->srcu_idx = 0;
176
+ ssp->srcu_gp_seq = 0;
177
+ ssp->srcu_barrier_seq = 0;
178
+ mutex_init(&ssp->srcu_barrier_mutex);
179
+ atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
180
+ INIT_DELAYED_WORK(&ssp->work, process_srcu);
185181 if (!is_static)
186
- sp->sda = alloc_percpu(struct srcu_data);
187
- init_srcu_struct_nodes(sp, is_static);
188
- sp->srcu_gp_seq_needed_exp = 0;
189
- sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
190
- smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */
191
- return sp->sda ? 0 : -ENOMEM;
182
+ ssp->sda = alloc_percpu(struct srcu_data);
183
+ init_srcu_struct_nodes(ssp, is_static);
184
+ ssp->srcu_gp_seq_needed_exp = 0;
185
+ ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
186
+ smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
187
+ return ssp->sda ? 0 : -ENOMEM;
192188 }
193189
194190 #ifdef CONFIG_DEBUG_LOCK_ALLOC
195191
196
-int __init_srcu_struct(struct srcu_struct *sp, const char *name,
192
+int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
197193 struct lock_class_key *key)
198194 {
199195 /* Don't re-initialize a lock while it is held. */
200
- debug_check_no_locks_freed((void *)sp, sizeof(*sp));
201
- lockdep_init_map(&sp->dep_map, name, key, 0);
202
- spin_lock_init(&ACCESS_PRIVATE(sp, lock));
203
- return init_srcu_struct_fields(sp, false);
196
+ debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
197
+ lockdep_init_map(&ssp->dep_map, name, key, 0);
198
+ spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
199
+ return init_srcu_struct_fields(ssp, false);
204200 }
205201 EXPORT_SYMBOL_GPL(__init_srcu_struct);
206202
....@@ -208,16 +204,16 @@
208204
209205 /**
210206 * init_srcu_struct - initialize a sleep-RCU structure
211
- * @sp: structure to initialize.
207
+ * @ssp: structure to initialize.
212208 *
213209 * Must invoke this on a given srcu_struct before passing that srcu_struct
214210 * to any other function. Each srcu_struct represents a separate domain
215211 * of SRCU protection.
216212 */
217
-int init_srcu_struct(struct srcu_struct *sp)
213
+int init_srcu_struct(struct srcu_struct *ssp)
218214 {
219
- spin_lock_init(&ACCESS_PRIVATE(sp, lock));
220
- return init_srcu_struct_fields(sp, false);
215
+ spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
216
+ return init_srcu_struct_fields(ssp, false);
221217 }
222218 EXPORT_SYMBOL_GPL(init_srcu_struct);
223219
....@@ -227,38 +223,37 @@
227223 * First-use initialization of statically allocated srcu_struct
228224 * structure. Wiring up the combining tree is more than can be
229225 * done with compile-time initialization, so this check is added
230
- * to each update-side SRCU primitive. Use sp->lock, which -is-
226
+ * to each update-side SRCU primitive. Use ssp->lock, which -is-
231227 * compile-time initialized, to resolve races involving multiple
232228 * CPUs trying to garner first-use privileges.
233229 */
234
-static void check_init_srcu_struct(struct srcu_struct *sp)
230
+static void check_init_srcu_struct(struct srcu_struct *ssp)
235231 {
236232 unsigned long flags;
237233
238
- WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT);
239234 /* The smp_load_acquire() pairs with the smp_store_release(). */
240
- if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
235
+ if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
241236 return; /* Already initialized. */
242
- spin_lock_irqsave_rcu_node(sp, flags);
243
- if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
244
- spin_unlock_irqrestore_rcu_node(sp, flags);
237
+ spin_lock_irqsave_rcu_node(ssp, flags);
238
+ if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
239
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
245240 return;
246241 }
247
- init_srcu_struct_fields(sp, true);
248
- spin_unlock_irqrestore_rcu_node(sp, flags);
242
+ init_srcu_struct_fields(ssp, true);
243
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
249244 }
250245
251246 /*
252247 * Returns approximate total of the readers' ->srcu_lock_count[] values
253248 * for the rank of per-CPU counters specified by idx.
254249 */
255
-static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
250
+static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
256251 {
257252 int cpu;
258253 unsigned long sum = 0;
259254
260255 for_each_possible_cpu(cpu) {
261
- struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
256
+ struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
262257
263258 sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
264259 }
....@@ -269,13 +264,13 @@
269264 * Returns approximate total of the readers' ->srcu_unlock_count[] values
270265 * for the rank of per-CPU counters specified by idx.
271266 */
272
-static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
267
+static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
273268 {
274269 int cpu;
275270 unsigned long sum = 0;
276271
277272 for_each_possible_cpu(cpu) {
278
- struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
273
+ struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
279274
280275 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
281276 }
....@@ -286,11 +281,11 @@
286281 * Return true if the number of pre-existing readers is determined to
287282 * be zero.
288283 */
289
-static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
284
+static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
290285 {
291286 unsigned long unlocks;
292287
293
- unlocks = srcu_readers_unlock_idx(sp, idx);
288
+ unlocks = srcu_readers_unlock_idx(ssp, idx);
294289
295290 /*
296291 * Make sure that a lock is always counted if the corresponding
....@@ -326,25 +321,25 @@
326321 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
327322 * especially on 64-bit systems.
328323 */
329
- return srcu_readers_lock_idx(sp, idx) == unlocks;
324
+ return srcu_readers_lock_idx(ssp, idx) == unlocks;
330325 }
331326
332327 /**
333328 * srcu_readers_active - returns true if there are readers. and false
334329 * otherwise
335
- * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
330
+ * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
336331 *
337332 * Note that this is not an atomic primitive, and can therefore suffer
338333 * severe errors when invoked on an active srcu_struct. That said, it
339334 * can be useful as an error check at cleanup time.
340335 */
341
-static bool srcu_readers_active(struct srcu_struct *sp)
336
+static bool srcu_readers_active(struct srcu_struct *ssp)
342337 {
343338 int cpu;
344339 unsigned long sum = 0;
345340
346341 for_each_possible_cpu(cpu) {
347
- struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
342
+ struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
348343
349344 sum += READ_ONCE(cpuc->srcu_lock_count[0]);
350345 sum += READ_ONCE(cpuc->srcu_lock_count[1]);
....@@ -360,58 +355,60 @@
360355 * Return grace-period delay, zero if there are expedited grace
361356 * periods pending, SRCU_INTERVAL otherwise.
362357 */
363
-static unsigned long srcu_get_delay(struct srcu_struct *sp)
358
+static unsigned long srcu_get_delay(struct srcu_struct *ssp)
364359 {
365
- if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq),
366
- READ_ONCE(sp->srcu_gp_seq_needed_exp)))
360
+ if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq),
361
+ READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
367362 return 0;
368363 return SRCU_INTERVAL;
369364 }
370365
371
-/* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */
372
-void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)
366
+/**
367
+ * cleanup_srcu_struct - deconstruct a sleep-RCU structure
368
+ * @ssp: structure to clean up.
369
+ *
370
+ * Must invoke this after you are finished using a given srcu_struct that
371
+ * was initialized via init_srcu_struct(), else you leak memory.
372
+ */
373
+void cleanup_srcu_struct(struct srcu_struct *ssp)
373374 {
374375 int cpu;
375376
376
- if (WARN_ON(!srcu_get_delay(sp)))
377
+ if (WARN_ON(!srcu_get_delay(ssp)))
377378 return; /* Just leak it! */
378
- if (WARN_ON(srcu_readers_active(sp)))
379
+ if (WARN_ON(srcu_readers_active(ssp)))
379380 return; /* Just leak it! */
380
- if (quiesced) {
381
- if (WARN_ON(delayed_work_pending(&sp->work)))
382
- return; /* Just leak it! */
383
- } else {
384
- flush_delayed_work(&sp->work);
381
+ flush_delayed_work(&ssp->work);
382
+ for_each_possible_cpu(cpu) {
383
+ struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
384
+
385
+ del_timer_sync(&sdp->delay_work);
386
+ flush_work(&sdp->work);
387
+ if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
388
+ return; /* Forgot srcu_barrier(), so just leak it! */
385389 }
386
- for_each_possible_cpu(cpu)
387
- if (quiesced) {
388
- if (WARN_ON(delayed_work_pending(&per_cpu_ptr(sp->sda, cpu)->work)))
389
- return; /* Just leak it! */
390
- } else {
391
- flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work);
392
- }
393
- if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
394
- WARN_ON(srcu_readers_active(sp))) {
390
+ if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
391
+ WARN_ON(srcu_readers_active(ssp))) {
395392 pr_info("%s: Active srcu_struct %p state: %d\n",
396
- __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
393
+ __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)));
397394 return; /* Caller forgot to stop doing call_srcu()? */
398395 }
399
- free_percpu(sp->sda);
400
- sp->sda = NULL;
396
+ free_percpu(ssp->sda);
397
+ ssp->sda = NULL;
401398 }
402
-EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);
399
+EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
403400
404401 /*
405402 * Counts the new reader in the appropriate per-CPU element of the
406403 * srcu_struct.
407404 * Returns an index that must be passed to the matching srcu_read_unlock().
408405 */
409
-int __srcu_read_lock(struct srcu_struct *sp)
406
+int __srcu_read_lock(struct srcu_struct *ssp)
410407 {
411408 int idx;
412409
413
- idx = READ_ONCE(sp->srcu_idx) & 0x1;
414
- this_cpu_inc(sp->sda->srcu_lock_count[idx]);
410
+ idx = READ_ONCE(ssp->srcu_idx) & 0x1;
411
+ this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
415412 smp_mb(); /* B */ /* Avoid leaking the critical section. */
416413 return idx;
417414 }
....@@ -422,10 +419,10 @@
422419 * element of the srcu_struct. Note that this may well be a different
423420 * CPU than that which was incremented by the corresponding srcu_read_lock().
424421 */
425
-void __srcu_read_unlock(struct srcu_struct *sp, int idx)
422
+void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
426423 {
427424 smp_mb(); /* C */ /* Avoid leaking the critical section. */
428
- this_cpu_inc(sp->sda->srcu_unlock_count[idx]);
425
+ this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
429426 }
430427 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
431428
....@@ -441,58 +438,42 @@
441438 /*
442439 * Start an SRCU grace period.
443440 */
444
-static void srcu_gp_start(struct srcu_struct *sp)
441
+static void srcu_gp_start(struct srcu_struct *ssp)
445442 {
446
- struct srcu_data *sdp = this_cpu_ptr(sp->sda);
443
+ struct srcu_data *sdp = this_cpu_ptr(ssp->sda);
447444 int state;
448445
449
- lockdep_assert_held(&ACCESS_PRIVATE(sp, lock));
450
- WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
446
+ lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
447
+ WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
451448 spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
452449 rcu_segcblist_advance(&sdp->srcu_cblist,
453
- rcu_seq_current(&sp->srcu_gp_seq));
450
+ rcu_seq_current(&ssp->srcu_gp_seq));
454451 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
455
- rcu_seq_snap(&sp->srcu_gp_seq));
452
+ rcu_seq_snap(&ssp->srcu_gp_seq));
456453 spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
457454 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
458
- rcu_seq_start(&sp->srcu_gp_seq);
459
- state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
455
+ rcu_seq_start(&ssp->srcu_gp_seq);
456
+ state = rcu_seq_state(ssp->srcu_gp_seq);
460457 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
461458 }
462459
463
-/*
464
- * Track online CPUs to guide callback workqueue placement.
465
- */
466
-DEFINE_PER_CPU(bool, srcu_online);
467460
468
-void srcu_online_cpu(unsigned int cpu)
461
+static void srcu_delay_timer(struct timer_list *t)
469462 {
470
- WRITE_ONCE(per_cpu(srcu_online, cpu), true);
463
+ struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
464
+
465
+ queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
471466 }
472467
473
-void srcu_offline_cpu(unsigned int cpu)
474
-{
475
- WRITE_ONCE(per_cpu(srcu_online, cpu), false);
476
-}
477
-
478
-/*
479
- * Place the workqueue handler on the specified CPU if online, otherwise
480
- * just run it whereever. This is useful for placing workqueue handlers
481
- * that are to invoke the specified CPU's callbacks.
482
- */
483
-static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
484
- struct delayed_work *dwork,
468
+static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
485469 unsigned long delay)
486470 {
487
- bool ret;
471
+ if (!delay) {
472
+ queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
473
+ return;
474
+ }
488475
489
- preempt_disable();
490
- if (READ_ONCE(per_cpu(srcu_online, cpu)))
491
- ret = queue_delayed_work_on(cpu, wq, dwork, delay);
492
- else
493
- ret = queue_delayed_work(wq, dwork, delay);
494
- preempt_enable();
495
- return ret;
476
+ timer_reduce(&sdp->delay_work, jiffies + delay);
496477 }
497478
498479 /*
....@@ -501,7 +482,7 @@
501482 */
502483 static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
503484 {
504
- srcu_queue_delayed_work_on(sdp->cpu, rcu_gp_wq, &sdp->work, delay);
485
+ srcu_queue_delayed_work_on(sdp, delay);
505486 }
506487
507488 /*
....@@ -510,7 +491,7 @@
510491 * just-completed grace period, the one corresponding to idx. If possible,
511492 * schedule this invocation on the corresponding CPUs.
512493 */
513
-static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
494
+static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
514495 unsigned long mask, unsigned long delay)
515496 {
516497 int cpu;
....@@ -518,7 +499,7 @@
518499 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
519500 if (!(mask & (1 << (cpu - snp->grplo))))
520501 continue;
521
- srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay);
502
+ srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
522503 }
523504 }
524505
....@@ -531,7 +512,7 @@
531512 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
532513 * array to have a finite number of elements.
533514 */
534
-static void srcu_gp_end(struct srcu_struct *sp)
515
+static void srcu_gp_end(struct srcu_struct *ssp)
535516 {
536517 unsigned long cbdelay;
537518 bool cbs;
....@@ -545,44 +526,44 @@
545526 struct srcu_node *snp;
546527
547528 /* Prevent more than one additional grace period. */
548
- mutex_lock(&sp->srcu_cb_mutex);
529
+ mutex_lock(&ssp->srcu_cb_mutex);
549530
550531 /* End the current grace period. */
551
- spin_lock_irq_rcu_node(sp);
552
- idx = rcu_seq_state(sp->srcu_gp_seq);
532
+ spin_lock_irq_rcu_node(ssp);
533
+ idx = rcu_seq_state(ssp->srcu_gp_seq);
553534 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
554
- cbdelay = srcu_get_delay(sp);
555
- sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
556
- rcu_seq_end(&sp->srcu_gp_seq);
557
- gpseq = rcu_seq_current(&sp->srcu_gp_seq);
558
- if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
559
- sp->srcu_gp_seq_needed_exp = gpseq;
560
- spin_unlock_irq_rcu_node(sp);
561
- mutex_unlock(&sp->srcu_gp_mutex);
535
+ cbdelay = srcu_get_delay(ssp);
536
+ WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
537
+ rcu_seq_end(&ssp->srcu_gp_seq);
538
+ gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
539
+ if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
540
+ WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq);
541
+ spin_unlock_irq_rcu_node(ssp);
542
+ mutex_unlock(&ssp->srcu_gp_mutex);
562543 /* A new grace period can start at this point. But only one. */
563544
564545 /* Initiate callback invocation as needed. */
565546 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
566
- rcu_for_each_node_breadth_first(sp, snp) {
547
+ srcu_for_each_node_breadth_first(ssp, snp) {
567548 spin_lock_irq_rcu_node(snp);
568549 cbs = false;
569
- last_lvl = snp >= sp->level[rcu_num_lvls - 1];
550
+ last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
570551 if (last_lvl)
571552 cbs = snp->srcu_have_cbs[idx] == gpseq;
572553 snp->srcu_have_cbs[idx] = gpseq;
573554 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
574555 if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
575
- snp->srcu_gp_seq_needed_exp = gpseq;
556
+ WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
576557 mask = snp->srcu_data_have_cbs[idx];
577558 snp->srcu_data_have_cbs[idx] = 0;
578559 spin_unlock_irq_rcu_node(snp);
579560 if (cbs)
580
- srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
561
+ srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
581562
582563 /* Occasionally prevent srcu_data counter wrap. */
583564 if (!(gpseq & counter_wrap_check) && last_lvl)
584565 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
585
- sdp = per_cpu_ptr(sp->sda, cpu);
566
+ sdp = per_cpu_ptr(ssp->sda, cpu);
586567 spin_lock_irqsave_rcu_node(sdp, flags);
587568 if (ULONG_CMP_GE(gpseq,
588569 sdp->srcu_gp_seq_needed + 100))
....@@ -595,18 +576,18 @@
595576 }
596577
597578 /* Callback initiation done, allow grace periods after next. */
598
- mutex_unlock(&sp->srcu_cb_mutex);
579
+ mutex_unlock(&ssp->srcu_cb_mutex);
599580
600581 /* Start a new grace period if needed. */
601
- spin_lock_irq_rcu_node(sp);
602
- gpseq = rcu_seq_current(&sp->srcu_gp_seq);
582
+ spin_lock_irq_rcu_node(ssp);
583
+ gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
603584 if (!rcu_seq_state(gpseq) &&
604
- ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
605
- srcu_gp_start(sp);
606
- spin_unlock_irq_rcu_node(sp);
607
- srcu_reschedule(sp, 0);
585
+ ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
586
+ srcu_gp_start(ssp);
587
+ spin_unlock_irq_rcu_node(ssp);
588
+ srcu_reschedule(ssp, 0);
608589 } else {
609
- spin_unlock_irq_rcu_node(sp);
590
+ spin_unlock_irq_rcu_node(ssp);
610591 }
611592 }
612593
....@@ -617,13 +598,13 @@
617598 * but without expediting. To start a completely new grace period,
618599 * whether expedited or not, use srcu_funnel_gp_start() instead.
619600 */
620
-static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
601
+static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
621602 unsigned long s)
622603 {
623604 unsigned long flags;
624605
625606 for (; snp != NULL; snp = snp->srcu_parent) {
626
- if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
607
+ if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
627608 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
628609 return;
629610 spin_lock_irqsave_rcu_node(snp, flags);
....@@ -634,10 +615,10 @@
634615 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
635616 spin_unlock_irqrestore_rcu_node(snp, flags);
636617 }
637
- spin_lock_irqsave_rcu_node(sp, flags);
638
- if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
639
- sp->srcu_gp_seq_needed_exp = s;
640
- spin_unlock_irqrestore_rcu_node(sp, flags);
618
+ spin_lock_irqsave_rcu_node(ssp, flags);
619
+ if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
620
+ WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
621
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
641622 }
642623
643624 /*
....@@ -650,7 +631,7 @@
650631 * Note that this function also does the work of srcu_funnel_exp_start(),
651632 * in some cases by directly invoking it.
652633 */
653
-static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
634
+static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
654635 unsigned long s, bool do_norm)
655636 {
656637 unsigned long flags;
....@@ -660,7 +641,7 @@
660641
661642 /* Each pass through the loop does one level of the srcu_node tree. */
662643 for (; snp != NULL; snp = snp->srcu_parent) {
663
- if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
644
+ if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode)
664645 return; /* GP already done and CBs recorded. */
665646 spin_lock_irqsave_rcu_node(snp, flags);
666647 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
....@@ -675,37 +656,41 @@
675656 return;
676657 }
677658 if (!do_norm)
678
- srcu_funnel_exp_start(sp, snp, s);
659
+ srcu_funnel_exp_start(ssp, snp, s);
679660 return;
680661 }
681662 snp->srcu_have_cbs[idx] = s;
682663 if (snp == sdp->mynode)
683664 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
684665 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
685
- snp->srcu_gp_seq_needed_exp = s;
666
+ WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
686667 spin_unlock_irqrestore_rcu_node(snp, flags);
687668 }
688669
689670 /* Top of tree, must ensure the grace period will be started. */
690
- spin_lock_irqsave_rcu_node(sp, flags);
691
- if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
671
+ spin_lock_irqsave_rcu_node(ssp, flags);
672
+ if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
692673 /*
693674 * Record need for grace period s. Pair with load
694675 * acquire setting up for initialization.
695676 */
696
- smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/
677
+ smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
697678 }
698
- if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
699
- sp->srcu_gp_seq_needed_exp = s;
679
+ if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
680
+ WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
700681
701682 /* If grace period not already done and none in progress, start it. */
702
- if (!rcu_seq_done(&sp->srcu_gp_seq, s) &&
703
- rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) {
704
- WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
705
- srcu_gp_start(sp);
706
- queue_delayed_work(rcu_gp_wq, &sp->work, srcu_get_delay(sp));
683
+ if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
684
+ rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
685
+ WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
686
+ srcu_gp_start(ssp);
687
+ if (likely(srcu_init_done))
688
+ queue_delayed_work(rcu_gp_wq, &ssp->work,
689
+ srcu_get_delay(ssp));
690
+ else if (list_empty(&ssp->work.work.entry))
691
+ list_add(&ssp->work.work.entry, &srcu_boot_list);
707692 }
708
- spin_unlock_irqrestore_rcu_node(sp, flags);
693
+ spin_unlock_irqrestore_rcu_node(ssp, flags);
709694 }
710695
711696 /*
....@@ -713,12 +698,12 @@
713698 * loop an additional time if there is an expedited grace period pending.
714699 * The caller must ensure that ->srcu_idx is not changed while checking.
715700 */
716
-static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
701
+static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
717702 {
718703 for (;;) {
719
- if (srcu_readers_active_idx_check(sp, idx))
704
+ if (srcu_readers_active_idx_check(ssp, idx))
720705 return true;
721
- if (--trycount + !srcu_get_delay(sp) <= 0)
706
+ if (--trycount + !srcu_get_delay(ssp) <= 0)
722707 return false;
723708 udelay(SRCU_RETRY_CHECK_DELAY);
724709 }
....@@ -729,7 +714,7 @@
729714 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
730715 * us to wait for pre-existing readers in a starvation-free manner.
731716 */
732
-static void srcu_flip(struct srcu_struct *sp)
717
+static void srcu_flip(struct srcu_struct *ssp)
733718 {
734719 /*
735720 * Ensure that if this updater saw a given reader's increment
....@@ -741,7 +726,7 @@
741726 */
742727 smp_mb(); /* E */ /* Pairs with B and C. */
743728
744
- WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1);
729
+ WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
745730
746731 /*
747732 * Ensure that if the updater misses an __srcu_read_unlock()
....@@ -771,24 +756,26 @@
771756 * it, if this function was preempted for enough time for the counters
772757 * to wrap, it really doesn't matter whether or not we expedite the grace
773758 * period. The extra overhead of a needlessly expedited grace period is
774
- * negligible when amoritized over that time period, and the extra latency
759
+ * negligible when amortized over that time period, and the extra latency
775760 * of a needlessly non-expedited grace period is similarly negligible.
776761 */
777
-static bool srcu_might_be_idle(struct srcu_struct *sp)
762
+static bool srcu_might_be_idle(struct srcu_struct *ssp)
778763 {
779764 unsigned long curseq;
780765 unsigned long flags;
781766 struct srcu_data *sdp;
782767 unsigned long t;
768
+ unsigned long tlast;
783769
770
+ check_init_srcu_struct(ssp);
784771 /* If the local srcu_data structure has callbacks, not idle. */
785
- local_irq_save(flags);
786
- sdp = this_cpu_ptr(sp->sda);
772
+ sdp = raw_cpu_ptr(ssp->sda);
773
+ spin_lock_irqsave_rcu_node(sdp, flags);
787774 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
788
- local_irq_restore(flags);
775
+ spin_unlock_irqrestore_rcu_node(sdp, flags);
789776 return false; /* Callbacks already present, so not idle. */
790777 }
791
- local_irq_restore(flags);
778
+ spin_unlock_irqrestore_rcu_node(sdp, flags);
792779
793780 /*
794781 * No local callbacks, so probabalistically probe global state.
....@@ -798,18 +785,18 @@
798785
799786 /* First, see if enough time has passed since the last GP. */
800787 t = ktime_get_mono_fast_ns();
788
+ tlast = READ_ONCE(ssp->srcu_last_gp_end);
801789 if (exp_holdoff == 0 ||
802
- time_in_range_open(t, sp->srcu_last_gp_end,
803
- sp->srcu_last_gp_end + exp_holdoff))
790
+ time_in_range_open(t, tlast, tlast + exp_holdoff))
804791 return false; /* Too soon after last GP. */
805792
806793 /* Next, check for probable idleness. */
807
- curseq = rcu_seq_current(&sp->srcu_gp_seq);
794
+ curseq = rcu_seq_current(&ssp->srcu_gp_seq);
808795 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
809
- if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed)))
796
+ if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
810797 return false; /* Grace period in progress, so not idle. */
811798 smp_mb(); /* Order ->srcu_gp_seq with prior access. */
812
- if (curseq != rcu_seq_current(&sp->srcu_gp_seq))
799
+ if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
813800 return false; /* GP # changed, so not idle. */
814801 return true; /* With reasonable probability, idle! */
815802 }
....@@ -819,6 +806,46 @@
819806 */
820807 static void srcu_leak_callback(struct rcu_head *rhp)
821808 {
809
+}
810
+
811
+/*
812
+ * Start an SRCU grace period, and also queue the callback if non-NULL.
813
+ */
814
+static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
815
+ struct rcu_head *rhp, bool do_norm)
816
+{
817
+ unsigned long flags;
818
+ int idx;
819
+ bool needexp = false;
820
+ bool needgp = false;
821
+ unsigned long s;
822
+ struct srcu_data *sdp;
823
+
824
+ check_init_srcu_struct(ssp);
825
+ idx = srcu_read_lock(ssp);
826
+ sdp = raw_cpu_ptr(ssp->sda);
827
+ spin_lock_irqsave_rcu_node(sdp, flags);
828
+ if (rhp)
829
+ rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
830
+ rcu_segcblist_advance(&sdp->srcu_cblist,
831
+ rcu_seq_current(&ssp->srcu_gp_seq));
832
+ s = rcu_seq_snap(&ssp->srcu_gp_seq);
833
+ (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
834
+ if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
835
+ sdp->srcu_gp_seq_needed = s;
836
+ needgp = true;
837
+ }
838
+ if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
839
+ sdp->srcu_gp_seq_needed_exp = s;
840
+ needexp = true;
841
+ }
842
+ spin_unlock_irqrestore_rcu_node(sdp, flags);
843
+ if (needgp)
844
+ srcu_funnel_gp_start(ssp, sdp, s, do_norm);
845
+ else if (needexp)
846
+ srcu_funnel_exp_start(ssp, sdp->mynode, s);
847
+ srcu_read_unlock(ssp, idx);
848
+ return s;
822849 }
823850
824851 /*
....@@ -849,16 +876,9 @@
849876 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
850877 * srcu_struct structure.
851878 */
852
-void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
853
- rcu_callback_t func, bool do_norm)
879
+static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
880
+ rcu_callback_t func, bool do_norm)
854881 {
855
- unsigned long flags;
856
- bool needexp = false;
857
- bool needgp = false;
858
- unsigned long s;
859
- struct srcu_data *sdp;
860
-
861
- check_init_srcu_struct(sp);
862882 if (debug_rcu_head_queue(rhp)) {
863883 /* Probable double call_srcu(), so leak the callback. */
864884 WRITE_ONCE(rhp->func, srcu_leak_callback);
....@@ -866,32 +886,12 @@
866886 return;
867887 }
868888 rhp->func = func;
869
- local_irq_save(flags);
870
- sdp = this_cpu_ptr(sp->sda);
871
- spin_lock_rcu_node(sdp);
872
- rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
873
- rcu_segcblist_advance(&sdp->srcu_cblist,
874
- rcu_seq_current(&sp->srcu_gp_seq));
875
- s = rcu_seq_snap(&sp->srcu_gp_seq);
876
- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
877
- if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
878
- sdp->srcu_gp_seq_needed = s;
879
- needgp = true;
880
- }
881
- if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
882
- sdp->srcu_gp_seq_needed_exp = s;
883
- needexp = true;
884
- }
885
- spin_unlock_irqrestore_rcu_node(sdp, flags);
886
- if (needgp)
887
- srcu_funnel_gp_start(sp, sdp, s, do_norm);
888
- else if (needexp)
889
- srcu_funnel_exp_start(sp, sdp->mynode, s);
889
+ (void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
890890 }
891891
892892 /**
893893 * call_srcu() - Queue a callback for invocation after an SRCU grace period
894
- * @sp: srcu_struct in queue the callback
894
+ * @ssp: srcu_struct in queue the callback
895895 * @rhp: structure to be used for queueing the SRCU callback.
896896 * @func: function to be invoked after the SRCU grace period
897897 *
....@@ -906,21 +906,21 @@
906906 * The callback will be invoked from process context, but must nevertheless
907907 * be fast and must not block.
908908 */
909
-void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
909
+void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
910910 rcu_callback_t func)
911911 {
912
- __call_srcu(sp, rhp, func, true);
912
+ __call_srcu(ssp, rhp, func, true);
913913 }
914914 EXPORT_SYMBOL_GPL(call_srcu);
915915
916916 /*
917917 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
918918 */
919
-static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
919
+static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
920920 {
921921 struct rcu_synchronize rcu;
922922
923
- RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
923
+ RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) ||
924924 lock_is_held(&rcu_bh_lock_map) ||
925925 lock_is_held(&rcu_lock_map) ||
926926 lock_is_held(&rcu_sched_lock_map),
....@@ -929,10 +929,10 @@
929929 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
930930 return;
931931 might_sleep();
932
- check_init_srcu_struct(sp);
932
+ check_init_srcu_struct(ssp);
933933 init_completion(&rcu.completion);
934934 init_rcu_head_on_stack(&rcu.head);
935
- __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm);
935
+ __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
936936 wait_for_completion(&rcu.completion);
937937 destroy_rcu_head_on_stack(&rcu.head);
938938
....@@ -948,7 +948,7 @@
948948
949949 /**
950950 * synchronize_srcu_expedited - Brute-force SRCU grace period
951
- * @sp: srcu_struct with which to synchronize.
951
+ * @ssp: srcu_struct with which to synchronize.
952952 *
953953 * Wait for an SRCU grace period to elapse, but be more aggressive about
954954 * spinning rather than blocking when waiting.
....@@ -956,15 +956,15 @@
956956 * Note that synchronize_srcu_expedited() has the same deadlock and
957957 * memory-ordering properties as does synchronize_srcu().
958958 */
959
-void synchronize_srcu_expedited(struct srcu_struct *sp)
959
+void synchronize_srcu_expedited(struct srcu_struct *ssp)
960960 {
961
- __synchronize_srcu(sp, rcu_gp_is_normal());
961
+ __synchronize_srcu(ssp, rcu_gp_is_normal());
962962 }
963963 EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
964964
965965 /**
966966 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
967
- * @sp: srcu_struct with which to synchronize.
967
+ * @ssp: srcu_struct with which to synchronize.
968968 *
969969 * Wait for the count to drain to zero of both indexes. To avoid the
970970 * possible starvation of synchronize_srcu(), it waits for the count of
....@@ -982,7 +982,7 @@
982982 * There are memory-ordering constraints implied by synchronize_srcu().
983983 * On systems with more than one CPU, when synchronize_srcu() returns,
984984 * each CPU is guaranteed to have executed a full memory barrier since
985
- * the end of its last corresponding SRCU-sched read-side critical section
985
+ * the end of its last corresponding SRCU read-side critical section
986986 * whose beginning preceded the call to synchronize_srcu(). In addition,
987987 * each CPU having an SRCU read-side critical section that extends beyond
988988 * the return from synchronize_srcu() is guaranteed to have executed a
....@@ -1006,14 +1006,70 @@
10061006 * SRCU must also provide it. Note that detecting idleness is heuristic
10071007 * and subject to both false positives and negatives.
10081008 */
1009
-void synchronize_srcu(struct srcu_struct *sp)
1009
+void synchronize_srcu(struct srcu_struct *ssp)
10101010 {
1011
- if (srcu_might_be_idle(sp) || rcu_gp_is_expedited())
1012
- synchronize_srcu_expedited(sp);
1011
+ if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1012
+ synchronize_srcu_expedited(ssp);
10131013 else
1014
- __synchronize_srcu(sp, true);
1014
+ __synchronize_srcu(ssp, true);
10151015 }
10161016 EXPORT_SYMBOL_GPL(synchronize_srcu);
1017
+
1018
+/**
1019
+ * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
1020
+ * @ssp: srcu_struct to provide cookie for.
1021
+ *
1022
+ * This function returns a cookie that can be passed to
1023
+ * poll_state_synchronize_srcu(), which will return true if a full grace
1024
+ * period has elapsed in the meantime. It is the caller's responsibility
1025
+ * to make sure that grace period happens, for example, by invoking
1026
+ * call_srcu() after return from get_state_synchronize_srcu().
1027
+ */
1028
+unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
1029
+{
1030
+ // Any prior manipulation of SRCU-protected data must happen
1031
+ // before the load from ->srcu_gp_seq.
1032
+ smp_mb();
1033
+ return rcu_seq_snap(&ssp->srcu_gp_seq);
1034
+}
1035
+EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
1036
+
1037
+/**
1038
+ * start_poll_synchronize_srcu - Provide cookie and start grace period
1039
+ * @ssp: srcu_struct to provide cookie for.
1040
+ *
1041
+ * This function returns a cookie that can be passed to
1042
+ * poll_state_synchronize_srcu(), which will return true if a full grace
1043
+ * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(),
1044
+ * this function also ensures that any needed SRCU grace period will be
1045
+ * started. This convenience does come at a cost in terms of CPU overhead.
1046
+ */
1047
+unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
1048
+{
1049
+ return srcu_gp_start_if_needed(ssp, NULL, true);
1050
+}
1051
+EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
1052
+
1053
+/**
1054
+ * poll_state_synchronize_srcu - Has cookie's grace period ended?
1055
+ * @ssp: srcu_struct to provide cookie for.
1056
+ * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
1057
+ *
1058
+ * This function takes the cookie that was returned from either
1059
+ * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
1060
+ * returns @true if an SRCU grace period elapsed since the time that the
1061
+ * cookie was created.
1062
+ */
1063
+bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
1064
+{
1065
+ if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie))
1066
+ return false;
1067
+ // Ensure that the end of the SRCU grace period happens before
1068
+ // any subsequent code that the caller might execute.
1069
+ smp_mb(); // ^^^
1070
+ return true;
1071
+}
1072
+EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
10171073
10181074 /*
10191075 * Callback function for srcu_barrier() use.
....@@ -1021,36 +1077,36 @@
10211077 static void srcu_barrier_cb(struct rcu_head *rhp)
10221078 {
10231079 struct srcu_data *sdp;
1024
- struct srcu_struct *sp;
1080
+ struct srcu_struct *ssp;
10251081
10261082 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1027
- sp = sdp->sp;
1028
- if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
1029
- complete(&sp->srcu_barrier_completion);
1083
+ ssp = sdp->ssp;
1084
+ if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1085
+ complete(&ssp->srcu_barrier_completion);
10301086 }
10311087
10321088 /**
10331089 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
1034
- * @sp: srcu_struct on which to wait for in-flight callbacks.
1090
+ * @ssp: srcu_struct on which to wait for in-flight callbacks.
10351091 */
1036
-void srcu_barrier(struct srcu_struct *sp)
1092
+void srcu_barrier(struct srcu_struct *ssp)
10371093 {
10381094 int cpu;
10391095 struct srcu_data *sdp;
1040
- unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq);
1096
+ unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
10411097
1042
- check_init_srcu_struct(sp);
1043
- mutex_lock(&sp->srcu_barrier_mutex);
1044
- if (rcu_seq_done(&sp->srcu_barrier_seq, s)) {
1098
+ check_init_srcu_struct(ssp);
1099
+ mutex_lock(&ssp->srcu_barrier_mutex);
1100
+ if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
10451101 smp_mb(); /* Force ordering following return. */
1046
- mutex_unlock(&sp->srcu_barrier_mutex);
1102
+ mutex_unlock(&ssp->srcu_barrier_mutex);
10471103 return; /* Someone else did our work for us. */
10481104 }
1049
- rcu_seq_start(&sp->srcu_barrier_seq);
1050
- init_completion(&sp->srcu_barrier_completion);
1105
+ rcu_seq_start(&ssp->srcu_barrier_seq);
1106
+ init_completion(&ssp->srcu_barrier_completion);
10511107
10521108 /* Initial count prevents reaching zero until all CBs are posted. */
1053
- atomic_set(&sp->srcu_barrier_cpu_cnt, 1);
1109
+ atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
10541110
10551111 /*
10561112 * Each pass through this loop enqueues a callback, but only
....@@ -1061,39 +1117,39 @@
10611117 * grace period as the last callback already in the queue.
10621118 */
10631119 for_each_possible_cpu(cpu) {
1064
- sdp = per_cpu_ptr(sp->sda, cpu);
1120
+ sdp = per_cpu_ptr(ssp->sda, cpu);
10651121 spin_lock_irq_rcu_node(sdp);
1066
- atomic_inc(&sp->srcu_barrier_cpu_cnt);
1122
+ atomic_inc(&ssp->srcu_barrier_cpu_cnt);
10671123 sdp->srcu_barrier_head.func = srcu_barrier_cb;
10681124 debug_rcu_head_queue(&sdp->srcu_barrier_head);
10691125 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1070
- &sdp->srcu_barrier_head, 0)) {
1126
+ &sdp->srcu_barrier_head)) {
10711127 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1072
- atomic_dec(&sp->srcu_barrier_cpu_cnt);
1128
+ atomic_dec(&ssp->srcu_barrier_cpu_cnt);
10731129 }
10741130 spin_unlock_irq_rcu_node(sdp);
10751131 }
10761132
10771133 /* Remove the initial count, at which point reaching zero can happen. */
1078
- if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
1079
- complete(&sp->srcu_barrier_completion);
1080
- wait_for_completion(&sp->srcu_barrier_completion);
1134
+ if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1135
+ complete(&ssp->srcu_barrier_completion);
1136
+ wait_for_completion(&ssp->srcu_barrier_completion);
10811137
1082
- rcu_seq_end(&sp->srcu_barrier_seq);
1083
- mutex_unlock(&sp->srcu_barrier_mutex);
1138
+ rcu_seq_end(&ssp->srcu_barrier_seq);
1139
+ mutex_unlock(&ssp->srcu_barrier_mutex);
10841140 }
10851141 EXPORT_SYMBOL_GPL(srcu_barrier);
10861142
10871143 /**
10881144 * srcu_batches_completed - return batches completed.
1089
- * @sp: srcu_struct on which to report batch completion.
1145
+ * @ssp: srcu_struct on which to report batch completion.
10901146 *
10911147 * Report the number of batches, correlated with, but not necessarily
10921148 * precisely the same as, the number of grace periods that have elapsed.
10931149 */
1094
-unsigned long srcu_batches_completed(struct srcu_struct *sp)
1150
+unsigned long srcu_batches_completed(struct srcu_struct *ssp)
10951151 {
1096
- return sp->srcu_idx;
1152
+ return READ_ONCE(ssp->srcu_idx);
10971153 }
10981154 EXPORT_SYMBOL_GPL(srcu_batches_completed);
10991155
....@@ -1102,11 +1158,11 @@
11021158 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
11031159 * completed in that state.
11041160 */
1105
-static void srcu_advance_state(struct srcu_struct *sp)
1161
+static void srcu_advance_state(struct srcu_struct *ssp)
11061162 {
11071163 int idx;
11081164
1109
- mutex_lock(&sp->srcu_gp_mutex);
1165
+ mutex_lock(&ssp->srcu_gp_mutex);
11101166
11111167 /*
11121168 * Because readers might be delayed for an extended period after
....@@ -1118,47 +1174,49 @@
11181174 * The load-acquire ensures that we see the accesses performed
11191175 * by the prior grace period.
11201176 */
1121
- idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
1177
+ idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
11221178 if (idx == SRCU_STATE_IDLE) {
1123
- spin_lock_irq_rcu_node(sp);
1124
- if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1125
- WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
1126
- spin_unlock_irq_rcu_node(sp);
1127
- mutex_unlock(&sp->srcu_gp_mutex);
1179
+ spin_lock_irq_rcu_node(ssp);
1180
+ if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1181
+ WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
1182
+ spin_unlock_irq_rcu_node(ssp);
1183
+ mutex_unlock(&ssp->srcu_gp_mutex);
11281184 return;
11291185 }
1130
- idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
1186
+ idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
11311187 if (idx == SRCU_STATE_IDLE)
1132
- srcu_gp_start(sp);
1133
- spin_unlock_irq_rcu_node(sp);
1188
+ srcu_gp_start(ssp);
1189
+ spin_unlock_irq_rcu_node(ssp);
11341190 if (idx != SRCU_STATE_IDLE) {
1135
- mutex_unlock(&sp->srcu_gp_mutex);
1191
+ mutex_unlock(&ssp->srcu_gp_mutex);
11361192 return; /* Someone else started the grace period. */
11371193 }
11381194 }
11391195
1140
- if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1141
- idx = 1 ^ (sp->srcu_idx & 1);
1142
- if (!try_check_zero(sp, idx, 1)) {
1143
- mutex_unlock(&sp->srcu_gp_mutex);
1196
+ if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1197
+ idx = 1 ^ (ssp->srcu_idx & 1);
1198
+ if (!try_check_zero(ssp, idx, 1)) {
1199
+ mutex_unlock(&ssp->srcu_gp_mutex);
11441200 return; /* readers present, retry later. */
11451201 }
1146
- srcu_flip(sp);
1147
- rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2);
1202
+ srcu_flip(ssp);
1203
+ spin_lock_irq_rcu_node(ssp);
1204
+ rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
1205
+ spin_unlock_irq_rcu_node(ssp);
11481206 }
11491207
1150
- if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1208
+ if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
11511209
11521210 /*
11531211 * SRCU read-side critical sections are normally short,
11541212 * so check at least twice in quick succession after a flip.
11551213 */
1156
- idx = 1 ^ (sp->srcu_idx & 1);
1157
- if (!try_check_zero(sp, idx, 2)) {
1158
- mutex_unlock(&sp->srcu_gp_mutex);
1214
+ idx = 1 ^ (ssp->srcu_idx & 1);
1215
+ if (!try_check_zero(ssp, idx, 2)) {
1216
+ mutex_unlock(&ssp->srcu_gp_mutex);
11591217 return; /* readers present, retry later. */
11601218 }
1161
- srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */
1219
+ srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
11621220 }
11631221 }
11641222
....@@ -1174,14 +1232,15 @@
11741232 struct rcu_cblist ready_cbs;
11751233 struct rcu_head *rhp;
11761234 struct srcu_data *sdp;
1177
- struct srcu_struct *sp;
1235
+ struct srcu_struct *ssp;
11781236
1179
- sdp = container_of(work, struct srcu_data, work.work);
1180
- sp = sdp->sp;
1237
+ sdp = container_of(work, struct srcu_data, work);
1238
+
1239
+ ssp = sdp->ssp;
11811240 rcu_cblist_init(&ready_cbs);
11821241 spin_lock_irq_rcu_node(sdp);
11831242 rcu_segcblist_advance(&sdp->srcu_cblist,
1184
- rcu_seq_current(&sp->srcu_gp_seq));
1243
+ rcu_seq_current(&ssp->srcu_gp_seq));
11851244 if (sdp->srcu_cblist_invoking ||
11861245 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
11871246 spin_unlock_irq_rcu_node(sdp);
....@@ -1207,7 +1266,7 @@
12071266 spin_lock_irq_rcu_node(sdp);
12081267 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
12091268 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1210
- rcu_seq_snap(&sp->srcu_gp_seq));
1269
+ rcu_seq_snap(&ssp->srcu_gp_seq));
12111270 sdp->srcu_cblist_invoking = false;
12121271 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
12131272 spin_unlock_irq_rcu_node(sdp);
....@@ -1219,24 +1278,24 @@
12191278 * Finished one round of SRCU grace period. Start another if there are
12201279 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
12211280 */
1222
-static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
1281
+static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
12231282 {
12241283 bool pushgp = true;
12251284
1226
- spin_lock_irq_rcu_node(sp);
1227
- if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1228
- if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
1285
+ spin_lock_irq_rcu_node(ssp);
1286
+ if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1287
+ if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
12291288 /* All requests fulfilled, time to go idle. */
12301289 pushgp = false;
12311290 }
1232
- } else if (!rcu_seq_state(sp->srcu_gp_seq)) {
1291
+ } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
12331292 /* Outstanding request and no GP. Start one. */
1234
- srcu_gp_start(sp);
1293
+ srcu_gp_start(ssp);
12351294 }
1236
- spin_unlock_irq_rcu_node(sp);
1295
+ spin_unlock_irq_rcu_node(ssp);
12371296
12381297 if (pushgp)
1239
- queue_delayed_work(rcu_gp_wq, &sp->work, delay);
1298
+ queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
12401299 }
12411300
12421301 /*
....@@ -1244,43 +1303,43 @@
12441303 */
12451304 static void process_srcu(struct work_struct *work)
12461305 {
1247
- struct srcu_struct *sp;
1306
+ struct srcu_struct *ssp;
12481307
1249
- sp = container_of(work, struct srcu_struct, work.work);
1308
+ ssp = container_of(work, struct srcu_struct, work.work);
12501309
1251
- srcu_advance_state(sp);
1252
- srcu_reschedule(sp, srcu_get_delay(sp));
1310
+ srcu_advance_state(ssp);
1311
+ srcu_reschedule(ssp, srcu_get_delay(ssp));
12531312 }
12541313
12551314 void srcutorture_get_gp_data(enum rcutorture_type test_type,
1256
- struct srcu_struct *sp, int *flags,
1315
+ struct srcu_struct *ssp, int *flags,
12571316 unsigned long *gp_seq)
12581317 {
12591318 if (test_type != SRCU_FLAVOR)
12601319 return;
12611320 *flags = 0;
1262
- *gp_seq = rcu_seq_current(&sp->srcu_gp_seq);
1321
+ *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
12631322 }
12641323 EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
12651324
1266
-void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
1325
+void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
12671326 {
12681327 int cpu;
12691328 int idx;
12701329 unsigned long s0 = 0, s1 = 0;
12711330
1272
- idx = sp->srcu_idx & 0x1;
1331
+ idx = ssp->srcu_idx & 0x1;
12731332 pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
1274
- tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx);
1333
+ tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx);
12751334 for_each_possible_cpu(cpu) {
12761335 unsigned long l0, l1;
12771336 unsigned long u0, u1;
12781337 long c0, c1;
12791338 struct srcu_data *sdp;
12801339
1281
- sdp = per_cpu_ptr(sp->sda, cpu);
1282
- u0 = sdp->srcu_unlock_count[!idx];
1283
- u1 = sdp->srcu_unlock_count[idx];
1340
+ sdp = per_cpu_ptr(ssp->sda, cpu);
1341
+ u0 = data_race(sdp->srcu_unlock_count[!idx]);
1342
+ u1 = data_race(sdp->srcu_unlock_count[idx]);
12841343
12851344 /*
12861345 * Make sure that a lock is always counted if the corresponding
....@@ -1288,13 +1347,14 @@
12881347 */
12891348 smp_rmb();
12901349
1291
- l0 = sdp->srcu_lock_count[!idx];
1292
- l1 = sdp->srcu_lock_count[idx];
1350
+ l0 = data_race(sdp->srcu_lock_count[!idx]);
1351
+ l1 = data_race(sdp->srcu_lock_count[idx]);
12931352
12941353 c0 = l0 - u0;
12951354 c1 = l1 - u1;
1296
- pr_cont(" %d(%ld,%ld %1p)",
1297
- cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist));
1355
+ pr_cont(" %d(%ld,%ld %c)",
1356
+ cpu, c0, c1,
1357
+ "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
12981358 s0 += c0;
12991359 s1 += c1;
13001360 }
....@@ -1310,3 +1370,82 @@
13101370 return 0;
13111371 }
13121372 early_initcall(srcu_bootup_announce);
1373
+
1374
+void __init srcu_init(void)
1375
+{
1376
+ struct srcu_struct *ssp;
1377
+
1378
+ srcu_init_done = true;
1379
+ while (!list_empty(&srcu_boot_list)) {
1380
+ ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
1381
+ work.work.entry);
1382
+ check_init_srcu_struct(ssp);
1383
+ list_del_init(&ssp->work.work.entry);
1384
+ queue_work(rcu_gp_wq, &ssp->work.work);
1385
+ }
1386
+}
1387
+
1388
+#ifdef CONFIG_MODULES
1389
+
1390
+/* Initialize any global-scope srcu_struct structures used by this module. */
1391
+static int srcu_module_coming(struct module *mod)
1392
+{
1393
+ int i;
1394
+ struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1395
+ int ret;
1396
+
1397
+ for (i = 0; i < mod->num_srcu_structs; i++) {
1398
+ ret = init_srcu_struct(*(sspp++));
1399
+ if (WARN_ON_ONCE(ret))
1400
+ return ret;
1401
+ }
1402
+ return 0;
1403
+}
1404
+
1405
+/* Clean up any global-scope srcu_struct structures used by this module. */
1406
+static void srcu_module_going(struct module *mod)
1407
+{
1408
+ int i;
1409
+ struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1410
+
1411
+ for (i = 0; i < mod->num_srcu_structs; i++)
1412
+ cleanup_srcu_struct(*(sspp++));
1413
+}
1414
+
1415
+/* Handle one module, either coming or going. */
1416
+static int srcu_module_notify(struct notifier_block *self,
1417
+ unsigned long val, void *data)
1418
+{
1419
+ struct module *mod = data;
1420
+ int ret = 0;
1421
+
1422
+ switch (val) {
1423
+ case MODULE_STATE_COMING:
1424
+ ret = srcu_module_coming(mod);
1425
+ break;
1426
+ case MODULE_STATE_GOING:
1427
+ srcu_module_going(mod);
1428
+ break;
1429
+ default:
1430
+ break;
1431
+ }
1432
+ return ret;
1433
+}
1434
+
1435
+static struct notifier_block srcu_module_nb = {
1436
+ .notifier_call = srcu_module_notify,
1437
+ .priority = 0,
1438
+};
1439
+
1440
+static __init int init_srcu_module_notifier(void)
1441
+{
1442
+ int ret;
1443
+
1444
+ ret = register_module_notifier(&srcu_module_nb);
1445
+ if (ret)
1446
+ pr_warn("Failed to register srcu module notifier\n");
1447
+ return ret;
1448
+}
1449
+late_initcall(init_srcu_module_notifier);
1450
+
1451
+#endif /* #ifdef CONFIG_MODULES */