.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0+ |
---|
1 | 2 | /* |
---|
2 | 3 | * Sleepable Read-Copy Update mechanism for mutual exclusion. |
---|
3 | | - * |
---|
4 | | - * This program is free software; you can redistribute it and/or modify |
---|
5 | | - * it under the terms of the GNU General Public License as published by |
---|
6 | | - * the Free Software Foundation; either version 2 of the License, or |
---|
7 | | - * (at your option) any later version. |
---|
8 | | - * |
---|
9 | | - * This program is distributed in the hope that it will be useful, |
---|
10 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
11 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
12 | | - * GNU General Public License for more details. |
---|
13 | | - * |
---|
14 | | - * You should have received a copy of the GNU General Public License |
---|
15 | | - * along with this program; if not, you can access it online at |
---|
16 | | - * http://www.gnu.org/licenses/gpl-2.0.html. |
---|
17 | 4 | * |
---|
18 | 5 | * Copyright (C) IBM Corporation, 2006 |
---|
19 | 6 | * Copyright (C) Fujitsu, 2012 |
---|
20 | 7 | * |
---|
21 | | - * Author: Paul McKenney <paulmck@us.ibm.com> |
---|
| 8 | + * Authors: Paul McKenney <paulmck@linux.ibm.com> |
---|
22 | 9 | * Lai Jiangshan <laijs@cn.fujitsu.com> |
---|
23 | 10 | * |
---|
24 | 11 | * For detailed explanation of Read-Copy Update mechanism see - |
---|
.. | .. |
---|
38 | 25 | #include <linux/delay.h> |
---|
39 | 26 | #include <linux/module.h> |
---|
40 | 27 | #include <linux/srcu.h> |
---|
41 | | -#include <linux/cpu.h> |
---|
42 | | -#include <linux/locallock.h> |
---|
43 | 28 | |
---|
44 | 29 | #include "rcu.h" |
---|
45 | 30 | #include "rcu_segcblist.h" |
---|
.. | .. |
---|
53 | 38 | static ulong counter_wrap_check = (ULONG_MAX >> 2); |
---|
54 | 39 | module_param(counter_wrap_check, ulong, 0444); |
---|
55 | 40 | |
---|
| 41 | +/* Early-boot callback-management, so early that no lock is required! */ |
---|
| 42 | +static LIST_HEAD(srcu_boot_list); |
---|
| 43 | +static bool __read_mostly srcu_init_done; |
---|
| 44 | + |
---|
56 | 45 | static void srcu_invoke_callbacks(struct work_struct *work); |
---|
57 | | -static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); |
---|
| 46 | +static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay); |
---|
58 | 47 | static void process_srcu(struct work_struct *work); |
---|
| 48 | +static void srcu_delay_timer(struct timer_list *t); |
---|
59 | 49 | |
---|
60 | 50 | /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */ |
---|
61 | 51 | #define spin_lock_rcu_node(p) \ |
---|
.. | .. |
---|
90 | 80 | * srcu_read_unlock() running against them. So if the is_static parameter |
---|
91 | 81 | * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. |
---|
92 | 82 | */ |
---|
93 | | -static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) |
---|
| 83 | +static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static) |
---|
94 | 84 | { |
---|
95 | 85 | int cpu; |
---|
96 | 86 | int i; |
---|
.. | .. |
---|
100 | 90 | struct srcu_node *snp; |
---|
101 | 91 | struct srcu_node *snp_first; |
---|
102 | 92 | |
---|
| 93 | + /* Initialize geometry if it has not already been initialized. */ |
---|
| 94 | + rcu_init_geometry(); |
---|
| 95 | + |
---|
103 | 96 | /* Work out the overall tree geometry. */ |
---|
104 | | - sp->level[0] = &sp->node[0]; |
---|
| 97 | + ssp->level[0] = &ssp->node[0]; |
---|
105 | 98 | for (i = 1; i < rcu_num_lvls; i++) |
---|
106 | | - sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1]; |
---|
| 99 | + ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1]; |
---|
107 | 100 | rcu_init_levelspread(levelspread, num_rcu_lvl); |
---|
108 | 101 | |
---|
109 | 102 | /* Each pass through this loop initializes one srcu_node structure. */ |
---|
110 | | - rcu_for_each_node_breadth_first(sp, snp) { |
---|
| 103 | + srcu_for_each_node_breadth_first(ssp, snp) { |
---|
111 | 104 | spin_lock_init(&ACCESS_PRIVATE(snp, lock)); |
---|
112 | 105 | WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != |
---|
113 | 106 | ARRAY_SIZE(snp->srcu_data_have_cbs)); |
---|
.. | .. |
---|
118 | 111 | snp->srcu_gp_seq_needed_exp = 0; |
---|
119 | 112 | snp->grplo = -1; |
---|
120 | 113 | snp->grphi = -1; |
---|
121 | | - if (snp == &sp->node[0]) { |
---|
| 114 | + if (snp == &ssp->node[0]) { |
---|
122 | 115 | /* Root node, special case. */ |
---|
123 | 116 | snp->srcu_parent = NULL; |
---|
124 | 117 | continue; |
---|
125 | 118 | } |
---|
126 | 119 | |
---|
127 | 120 | /* Non-root node. */ |
---|
128 | | - if (snp == sp->level[level + 1]) |
---|
| 121 | + if (snp == ssp->level[level + 1]) |
---|
129 | 122 | level++; |
---|
130 | | - snp->srcu_parent = sp->level[level - 1] + |
---|
131 | | - (snp - sp->level[level]) / |
---|
| 123 | + snp->srcu_parent = ssp->level[level - 1] + |
---|
| 124 | + (snp - ssp->level[level]) / |
---|
132 | 125 | levelspread[level - 1]; |
---|
133 | 126 | } |
---|
134 | 127 | |
---|
.. | .. |
---|
139 | 132 | WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != |
---|
140 | 133 | ARRAY_SIZE(sdp->srcu_unlock_count)); |
---|
141 | 134 | level = rcu_num_lvls - 1; |
---|
142 | | - snp_first = sp->level[level]; |
---|
| 135 | + snp_first = ssp->level[level]; |
---|
143 | 136 | for_each_possible_cpu(cpu) { |
---|
144 | | - sdp = per_cpu_ptr(sp->sda, cpu); |
---|
| 137 | + sdp = per_cpu_ptr(ssp->sda, cpu); |
---|
145 | 138 | spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); |
---|
146 | 139 | rcu_segcblist_init(&sdp->srcu_cblist); |
---|
147 | 140 | sdp->srcu_cblist_invoking = false; |
---|
148 | | - sdp->srcu_gp_seq_needed = sp->srcu_gp_seq; |
---|
149 | | - sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq; |
---|
| 141 | + sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq; |
---|
| 142 | + sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq; |
---|
150 | 143 | sdp->mynode = &snp_first[cpu / levelspread[level]]; |
---|
151 | 144 | for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { |
---|
152 | 145 | if (snp->grplo < 0) |
---|
.. | .. |
---|
154 | 147 | snp->grphi = cpu; |
---|
155 | 148 | } |
---|
156 | 149 | sdp->cpu = cpu; |
---|
157 | | - INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks); |
---|
158 | | - sdp->sp = sp; |
---|
| 150 | + INIT_WORK(&sdp->work, srcu_invoke_callbacks); |
---|
| 151 | + timer_setup(&sdp->delay_work, srcu_delay_timer, 0); |
---|
| 152 | + sdp->ssp = ssp; |
---|
159 | 153 | sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); |
---|
160 | 154 | if (is_static) |
---|
161 | 155 | continue; |
---|
.. | .. |
---|
174 | 168 | * parameter is passed through to init_srcu_struct_nodes(), and |
---|
175 | 169 | * also tells us that ->sda has already been wired up to srcu_data. |
---|
176 | 170 | */ |
---|
177 | | -static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static) |
---|
| 171 | +static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static) |
---|
178 | 172 | { |
---|
179 | | - mutex_init(&sp->srcu_cb_mutex); |
---|
180 | | - mutex_init(&sp->srcu_gp_mutex); |
---|
181 | | - sp->srcu_idx = 0; |
---|
182 | | - sp->srcu_gp_seq = 0; |
---|
183 | | - sp->srcu_barrier_seq = 0; |
---|
184 | | - mutex_init(&sp->srcu_barrier_mutex); |
---|
185 | | - atomic_set(&sp->srcu_barrier_cpu_cnt, 0); |
---|
186 | | - INIT_DELAYED_WORK(&sp->work, process_srcu); |
---|
| 173 | + mutex_init(&ssp->srcu_cb_mutex); |
---|
| 174 | + mutex_init(&ssp->srcu_gp_mutex); |
---|
| 175 | + ssp->srcu_idx = 0; |
---|
| 176 | + ssp->srcu_gp_seq = 0; |
---|
| 177 | + ssp->srcu_barrier_seq = 0; |
---|
| 178 | + mutex_init(&ssp->srcu_barrier_mutex); |
---|
| 179 | + atomic_set(&ssp->srcu_barrier_cpu_cnt, 0); |
---|
| 180 | + INIT_DELAYED_WORK(&ssp->work, process_srcu); |
---|
187 | 181 | if (!is_static) |
---|
188 | | - sp->sda = alloc_percpu(struct srcu_data); |
---|
189 | | - init_srcu_struct_nodes(sp, is_static); |
---|
190 | | - sp->srcu_gp_seq_needed_exp = 0; |
---|
191 | | - sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); |
---|
192 | | - smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */ |
---|
193 | | - return sp->sda ? 0 : -ENOMEM; |
---|
| 182 | + ssp->sda = alloc_percpu(struct srcu_data); |
---|
| 183 | + init_srcu_struct_nodes(ssp, is_static); |
---|
| 184 | + ssp->srcu_gp_seq_needed_exp = 0; |
---|
| 185 | + ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); |
---|
| 186 | + smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */ |
---|
| 187 | + return ssp->sda ? 0 : -ENOMEM; |
---|
194 | 188 | } |
---|
195 | 189 | |
---|
196 | 190 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
---|
197 | 191 | |
---|
198 | | -int __init_srcu_struct(struct srcu_struct *sp, const char *name, |
---|
| 192 | +int __init_srcu_struct(struct srcu_struct *ssp, const char *name, |
---|
199 | 193 | struct lock_class_key *key) |
---|
200 | 194 | { |
---|
201 | 195 | /* Don't re-initialize a lock while it is held. */ |
---|
202 | | - debug_check_no_locks_freed((void *)sp, sizeof(*sp)); |
---|
203 | | - lockdep_init_map(&sp->dep_map, name, key, 0); |
---|
204 | | - spin_lock_init(&ACCESS_PRIVATE(sp, lock)); |
---|
205 | | - return init_srcu_struct_fields(sp, false); |
---|
| 196 | + debug_check_no_locks_freed((void *)ssp, sizeof(*ssp)); |
---|
| 197 | + lockdep_init_map(&ssp->dep_map, name, key, 0); |
---|
| 198 | + spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); |
---|
| 199 | + return init_srcu_struct_fields(ssp, false); |
---|
206 | 200 | } |
---|
207 | 201 | EXPORT_SYMBOL_GPL(__init_srcu_struct); |
---|
208 | 202 | |
---|
.. | .. |
---|
210 | 204 | |
---|
211 | 205 | /** |
---|
212 | 206 | * init_srcu_struct - initialize a sleep-RCU structure |
---|
213 | | - * @sp: structure to initialize. |
---|
| 207 | + * @ssp: structure to initialize. |
---|
214 | 208 | * |
---|
215 | 209 | * Must invoke this on a given srcu_struct before passing that srcu_struct |
---|
216 | 210 | * to any other function. Each srcu_struct represents a separate domain |
---|
217 | 211 | * of SRCU protection. |
---|
218 | 212 | */ |
---|
219 | | -int init_srcu_struct(struct srcu_struct *sp) |
---|
| 213 | +int init_srcu_struct(struct srcu_struct *ssp) |
---|
220 | 214 | { |
---|
221 | | - spin_lock_init(&ACCESS_PRIVATE(sp, lock)); |
---|
222 | | - return init_srcu_struct_fields(sp, false); |
---|
| 215 | + spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); |
---|
| 216 | + return init_srcu_struct_fields(ssp, false); |
---|
223 | 217 | } |
---|
224 | 218 | EXPORT_SYMBOL_GPL(init_srcu_struct); |
---|
225 | 219 | |
---|
.. | .. |
---|
229 | 223 | * First-use initialization of statically allocated srcu_struct |
---|
230 | 224 | * structure. Wiring up the combining tree is more than can be |
---|
231 | 225 | * done with compile-time initialization, so this check is added |
---|
232 | | - * to each update-side SRCU primitive. Use sp->lock, which -is- |
---|
| 226 | + * to each update-side SRCU primitive. Use ssp->lock, which -is- |
---|
233 | 227 | * compile-time initialized, to resolve races involving multiple |
---|
234 | 228 | * CPUs trying to garner first-use privileges. |
---|
235 | 229 | */ |
---|
236 | | -static void check_init_srcu_struct(struct srcu_struct *sp) |
---|
| 230 | +static void check_init_srcu_struct(struct srcu_struct *ssp) |
---|
237 | 231 | { |
---|
238 | 232 | unsigned long flags; |
---|
239 | 233 | |
---|
240 | | - WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT); |
---|
241 | 234 | /* The smp_load_acquire() pairs with the smp_store_release(). */ |
---|
242 | | - if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/ |
---|
| 235 | + if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/ |
---|
243 | 236 | return; /* Already initialized. */ |
---|
244 | | - spin_lock_irqsave_rcu_node(sp, flags); |
---|
245 | | - if (!rcu_seq_state(sp->srcu_gp_seq_needed)) { |
---|
246 | | - spin_unlock_irqrestore_rcu_node(sp, flags); |
---|
| 237 | + spin_lock_irqsave_rcu_node(ssp, flags); |
---|
| 238 | + if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) { |
---|
| 239 | + spin_unlock_irqrestore_rcu_node(ssp, flags); |
---|
247 | 240 | return; |
---|
248 | 241 | } |
---|
249 | | - init_srcu_struct_fields(sp, true); |
---|
250 | | - spin_unlock_irqrestore_rcu_node(sp, flags); |
---|
| 242 | + init_srcu_struct_fields(ssp, true); |
---|
| 243 | + spin_unlock_irqrestore_rcu_node(ssp, flags); |
---|
251 | 244 | } |
---|
252 | 245 | |
---|
253 | 246 | /* |
---|
254 | 247 | * Returns approximate total of the readers' ->srcu_lock_count[] values |
---|
255 | 248 | * for the rank of per-CPU counters specified by idx. |
---|
256 | 249 | */ |
---|
257 | | -static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx) |
---|
| 250 | +static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx) |
---|
258 | 251 | { |
---|
259 | 252 | int cpu; |
---|
260 | 253 | unsigned long sum = 0; |
---|
261 | 254 | |
---|
262 | 255 | for_each_possible_cpu(cpu) { |
---|
263 | | - struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); |
---|
| 256 | + struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
---|
264 | 257 | |
---|
265 | 258 | sum += READ_ONCE(cpuc->srcu_lock_count[idx]); |
---|
266 | 259 | } |
---|
.. | .. |
---|
271 | 264 | * Returns approximate total of the readers' ->srcu_unlock_count[] values |
---|
272 | 265 | * for the rank of per-CPU counters specified by idx. |
---|
273 | 266 | */ |
---|
274 | | -static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx) |
---|
| 267 | +static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx) |
---|
275 | 268 | { |
---|
276 | 269 | int cpu; |
---|
277 | 270 | unsigned long sum = 0; |
---|
278 | 271 | |
---|
279 | 272 | for_each_possible_cpu(cpu) { |
---|
280 | | - struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); |
---|
| 273 | + struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
---|
281 | 274 | |
---|
282 | 275 | sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); |
---|
283 | 276 | } |
---|
.. | .. |
---|
288 | 281 | * Return true if the number of pre-existing readers is determined to |
---|
289 | 282 | * be zero. |
---|
290 | 283 | */ |
---|
291 | | -static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) |
---|
| 284 | +static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx) |
---|
292 | 285 | { |
---|
293 | 286 | unsigned long unlocks; |
---|
294 | 287 | |
---|
295 | | - unlocks = srcu_readers_unlock_idx(sp, idx); |
---|
| 288 | + unlocks = srcu_readers_unlock_idx(ssp, idx); |
---|
296 | 289 | |
---|
297 | 290 | /* |
---|
298 | 291 | * Make sure that a lock is always counted if the corresponding |
---|
.. | .. |
---|
328 | 321 | * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, |
---|
329 | 322 | * especially on 64-bit systems. |
---|
330 | 323 | */ |
---|
331 | | - return srcu_readers_lock_idx(sp, idx) == unlocks; |
---|
| 324 | + return srcu_readers_lock_idx(ssp, idx) == unlocks; |
---|
332 | 325 | } |
---|
333 | 326 | |
---|
334 | 327 | /** |
---|
335 | 328 | * srcu_readers_active - returns true if there are readers. and false |
---|
336 | 329 | * otherwise |
---|
337 | | - * @sp: which srcu_struct to count active readers (holding srcu_read_lock). |
---|
| 330 | + * @ssp: which srcu_struct to count active readers (holding srcu_read_lock). |
---|
338 | 331 | * |
---|
339 | 332 | * Note that this is not an atomic primitive, and can therefore suffer |
---|
340 | 333 | * severe errors when invoked on an active srcu_struct. That said, it |
---|
341 | 334 | * can be useful as an error check at cleanup time. |
---|
342 | 335 | */ |
---|
343 | | -static bool srcu_readers_active(struct srcu_struct *sp) |
---|
| 336 | +static bool srcu_readers_active(struct srcu_struct *ssp) |
---|
344 | 337 | { |
---|
345 | 338 | int cpu; |
---|
346 | 339 | unsigned long sum = 0; |
---|
347 | 340 | |
---|
348 | 341 | for_each_possible_cpu(cpu) { |
---|
349 | | - struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); |
---|
| 342 | + struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
---|
350 | 343 | |
---|
351 | 344 | sum += READ_ONCE(cpuc->srcu_lock_count[0]); |
---|
352 | 345 | sum += READ_ONCE(cpuc->srcu_lock_count[1]); |
---|
.. | .. |
---|
362 | 355 | * Return grace-period delay, zero if there are expedited grace |
---|
363 | 356 | * periods pending, SRCU_INTERVAL otherwise. |
---|
364 | 357 | */ |
---|
365 | | -static unsigned long srcu_get_delay(struct srcu_struct *sp) |
---|
| 358 | +static unsigned long srcu_get_delay(struct srcu_struct *ssp) |
---|
366 | 359 | { |
---|
367 | | - if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq), |
---|
368 | | - READ_ONCE(sp->srcu_gp_seq_needed_exp))) |
---|
| 360 | + if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), |
---|
| 361 | + READ_ONCE(ssp->srcu_gp_seq_needed_exp))) |
---|
369 | 362 | return 0; |
---|
370 | 363 | return SRCU_INTERVAL; |
---|
371 | 364 | } |
---|
372 | 365 | |
---|
373 | | -/* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */ |
---|
374 | | -void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced) |
---|
| 366 | +/** |
---|
| 367 | + * cleanup_srcu_struct - deconstruct a sleep-RCU structure |
---|
| 368 | + * @ssp: structure to clean up. |
---|
| 369 | + * |
---|
| 370 | + * Must invoke this after you are finished using a given srcu_struct that |
---|
| 371 | + * was initialized via init_srcu_struct(), else you leak memory. |
---|
| 372 | + */ |
---|
| 373 | +void cleanup_srcu_struct(struct srcu_struct *ssp) |
---|
375 | 374 | { |
---|
376 | 375 | int cpu; |
---|
377 | 376 | |
---|
378 | | - if (WARN_ON(!srcu_get_delay(sp))) |
---|
| 377 | + if (WARN_ON(!srcu_get_delay(ssp))) |
---|
379 | 378 | return; /* Just leak it! */ |
---|
380 | | - if (WARN_ON(srcu_readers_active(sp))) |
---|
| 379 | + if (WARN_ON(srcu_readers_active(ssp))) |
---|
381 | 380 | return; /* Just leak it! */ |
---|
382 | | - if (quiesced) { |
---|
383 | | - if (WARN_ON(delayed_work_pending(&sp->work))) |
---|
384 | | - return; /* Just leak it! */ |
---|
385 | | - } else { |
---|
386 | | - flush_delayed_work(&sp->work); |
---|
| 381 | + flush_delayed_work(&ssp->work); |
---|
| 382 | + for_each_possible_cpu(cpu) { |
---|
| 383 | + struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); |
---|
| 384 | + |
---|
| 385 | + del_timer_sync(&sdp->delay_work); |
---|
| 386 | + flush_work(&sdp->work); |
---|
| 387 | + if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist))) |
---|
| 388 | + return; /* Forgot srcu_barrier(), so just leak it! */ |
---|
387 | 389 | } |
---|
388 | | - for_each_possible_cpu(cpu) |
---|
389 | | - if (quiesced) { |
---|
390 | | - if (WARN_ON(delayed_work_pending(&per_cpu_ptr(sp->sda, cpu)->work))) |
---|
391 | | - return; /* Just leak it! */ |
---|
392 | | - } else { |
---|
393 | | - flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work); |
---|
394 | | - } |
---|
395 | | - if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) || |
---|
396 | | - WARN_ON(srcu_readers_active(sp))) { |
---|
| 390 | + if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) || |
---|
| 391 | + WARN_ON(srcu_readers_active(ssp))) { |
---|
397 | 392 | pr_info("%s: Active srcu_struct %p state: %d\n", |
---|
398 | | - __func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); |
---|
| 393 | + __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))); |
---|
399 | 394 | return; /* Caller forgot to stop doing call_srcu()? */ |
---|
400 | 395 | } |
---|
401 | | - free_percpu(sp->sda); |
---|
402 | | - sp->sda = NULL; |
---|
| 396 | + free_percpu(ssp->sda); |
---|
| 397 | + ssp->sda = NULL; |
---|
403 | 398 | } |
---|
404 | | -EXPORT_SYMBOL_GPL(_cleanup_srcu_struct); |
---|
| 399 | +EXPORT_SYMBOL_GPL(cleanup_srcu_struct); |
---|
405 | 400 | |
---|
406 | 401 | /* |
---|
407 | 402 | * Counts the new reader in the appropriate per-CPU element of the |
---|
408 | 403 | * srcu_struct. |
---|
409 | 404 | * Returns an index that must be passed to the matching srcu_read_unlock(). |
---|
410 | 405 | */ |
---|
411 | | -int __srcu_read_lock(struct srcu_struct *sp) |
---|
| 406 | +int __srcu_read_lock(struct srcu_struct *ssp) |
---|
412 | 407 | { |
---|
413 | 408 | int idx; |
---|
414 | 409 | |
---|
415 | | - idx = READ_ONCE(sp->srcu_idx) & 0x1; |
---|
416 | | - this_cpu_inc(sp->sda->srcu_lock_count[idx]); |
---|
| 410 | + idx = READ_ONCE(ssp->srcu_idx) & 0x1; |
---|
| 411 | + this_cpu_inc(ssp->sda->srcu_lock_count[idx]); |
---|
417 | 412 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
---|
418 | 413 | return idx; |
---|
419 | 414 | } |
---|
.. | .. |
---|
424 | 419 | * element of the srcu_struct. Note that this may well be a different |
---|
425 | 420 | * CPU than that which was incremented by the corresponding srcu_read_lock(). |
---|
426 | 421 | */ |
---|
427 | | -void __srcu_read_unlock(struct srcu_struct *sp, int idx) |
---|
| 422 | +void __srcu_read_unlock(struct srcu_struct *ssp, int idx) |
---|
428 | 423 | { |
---|
429 | 424 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ |
---|
430 | | - this_cpu_inc(sp->sda->srcu_unlock_count[idx]); |
---|
| 425 | + this_cpu_inc(ssp->sda->srcu_unlock_count[idx]); |
---|
431 | 426 | } |
---|
432 | 427 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); |
---|
433 | 428 | |
---|
.. | .. |
---|
443 | 438 | /* |
---|
444 | 439 | * Start an SRCU grace period. |
---|
445 | 440 | */ |
---|
446 | | -static void srcu_gp_start(struct srcu_struct *sp) |
---|
| 441 | +static void srcu_gp_start(struct srcu_struct *ssp) |
---|
447 | 442 | { |
---|
448 | | - struct srcu_data *sdp = this_cpu_ptr(sp->sda); |
---|
| 443 | + struct srcu_data *sdp = this_cpu_ptr(ssp->sda); |
---|
449 | 444 | int state; |
---|
450 | 445 | |
---|
451 | | - lockdep_assert_held(&ACCESS_PRIVATE(sp, lock)); |
---|
452 | | - WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); |
---|
| 446 | + lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock)); |
---|
| 447 | + WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); |
---|
453 | 448 | spin_lock_rcu_node(sdp); /* Interrupts already disabled. */ |
---|
454 | 449 | rcu_segcblist_advance(&sdp->srcu_cblist, |
---|
455 | | - rcu_seq_current(&sp->srcu_gp_seq)); |
---|
| 450 | + rcu_seq_current(&ssp->srcu_gp_seq)); |
---|
456 | 451 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, |
---|
457 | | - rcu_seq_snap(&sp->srcu_gp_seq)); |
---|
| 452 | + rcu_seq_snap(&ssp->srcu_gp_seq)); |
---|
458 | 453 | spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */ |
---|
459 | 454 | smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ |
---|
460 | | - rcu_seq_start(&sp->srcu_gp_seq); |
---|
461 | | - state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); |
---|
| 455 | + rcu_seq_start(&ssp->srcu_gp_seq); |
---|
| 456 | + state = rcu_seq_state(ssp->srcu_gp_seq); |
---|
462 | 457 | WARN_ON_ONCE(state != SRCU_STATE_SCAN1); |
---|
463 | 458 | } |
---|
464 | 459 | |
---|
465 | | -/* |
---|
466 | | - * Place the workqueue handler on the specified CPU if online, otherwise |
---|
467 | | - * just run it whereever. This is useful for placing workqueue handlers |
---|
468 | | - * that are to invoke the specified CPU's callbacks. |
---|
469 | | - */ |
---|
470 | | -static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
---|
471 | | - struct delayed_work *dwork, |
---|
| 460 | + |
---|
| 461 | +static void srcu_delay_timer(struct timer_list *t) |
---|
| 462 | +{ |
---|
| 463 | + struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work); |
---|
| 464 | + |
---|
| 465 | + queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); |
---|
| 466 | +} |
---|
| 467 | + |
---|
| 468 | +static void srcu_queue_delayed_work_on(struct srcu_data *sdp, |
---|
472 | 469 | unsigned long delay) |
---|
473 | 470 | { |
---|
474 | | - bool ret; |
---|
| 471 | + if (!delay) { |
---|
| 472 | + queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); |
---|
| 473 | + return; |
---|
| 474 | + } |
---|
475 | 475 | |
---|
476 | | - cpus_read_lock(); |
---|
477 | | - if (cpu_online(cpu)) |
---|
478 | | - ret = queue_delayed_work_on(cpu, wq, dwork, delay); |
---|
479 | | - else |
---|
480 | | - ret = queue_delayed_work(wq, dwork, delay); |
---|
481 | | - cpus_read_unlock(); |
---|
482 | | - return ret; |
---|
| 476 | + timer_reduce(&sdp->delay_work, jiffies + delay); |
---|
483 | 477 | } |
---|
484 | 478 | |
---|
485 | 479 | /* |
---|
.. | .. |
---|
488 | 482 | */ |
---|
489 | 483 | static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) |
---|
490 | 484 | { |
---|
491 | | - srcu_queue_delayed_work_on(sdp->cpu, rcu_gp_wq, &sdp->work, delay); |
---|
| 485 | + srcu_queue_delayed_work_on(sdp, delay); |
---|
492 | 486 | } |
---|
493 | 487 | |
---|
494 | 488 | /* |
---|
.. | .. |
---|
497 | 491 | * just-completed grace period, the one corresponding to idx. If possible, |
---|
498 | 492 | * schedule this invocation on the corresponding CPUs. |
---|
499 | 493 | */ |
---|
500 | | -static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp, |
---|
| 494 | +static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp, |
---|
501 | 495 | unsigned long mask, unsigned long delay) |
---|
502 | 496 | { |
---|
503 | 497 | int cpu; |
---|
.. | .. |
---|
505 | 499 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
---|
506 | 500 | if (!(mask & (1 << (cpu - snp->grplo)))) |
---|
507 | 501 | continue; |
---|
508 | | - srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay); |
---|
| 502 | + srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay); |
---|
509 | 503 | } |
---|
510 | 504 | } |
---|
511 | 505 | |
---|
.. | .. |
---|
518 | 512 | * are initiating callback invocation. This allows the ->srcu_have_cbs[] |
---|
519 | 513 | * array to have a finite number of elements. |
---|
520 | 514 | */ |
---|
521 | | -static void srcu_gp_end(struct srcu_struct *sp) |
---|
| 515 | +static void srcu_gp_end(struct srcu_struct *ssp) |
---|
522 | 516 | { |
---|
523 | 517 | unsigned long cbdelay; |
---|
524 | 518 | bool cbs; |
---|
.. | .. |
---|
532 | 526 | struct srcu_node *snp; |
---|
533 | 527 | |
---|
534 | 528 | /* Prevent more than one additional grace period. */ |
---|
535 | | - mutex_lock(&sp->srcu_cb_mutex); |
---|
| 529 | + mutex_lock(&ssp->srcu_cb_mutex); |
---|
536 | 530 | |
---|
537 | 531 | /* End the current grace period. */ |
---|
538 | | - spin_lock_irq_rcu_node(sp); |
---|
539 | | - idx = rcu_seq_state(sp->srcu_gp_seq); |
---|
| 532 | + spin_lock_irq_rcu_node(ssp); |
---|
| 533 | + idx = rcu_seq_state(ssp->srcu_gp_seq); |
---|
540 | 534 | WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); |
---|
541 | | - cbdelay = srcu_get_delay(sp); |
---|
542 | | - sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); |
---|
543 | | - rcu_seq_end(&sp->srcu_gp_seq); |
---|
544 | | - gpseq = rcu_seq_current(&sp->srcu_gp_seq); |
---|
545 | | - if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq)) |
---|
546 | | - sp->srcu_gp_seq_needed_exp = gpseq; |
---|
547 | | - spin_unlock_irq_rcu_node(sp); |
---|
548 | | - mutex_unlock(&sp->srcu_gp_mutex); |
---|
| 535 | + cbdelay = srcu_get_delay(ssp); |
---|
| 536 | + WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns()); |
---|
| 537 | + rcu_seq_end(&ssp->srcu_gp_seq); |
---|
| 538 | + gpseq = rcu_seq_current(&ssp->srcu_gp_seq); |
---|
| 539 | + if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq)) |
---|
| 540 | + WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq); |
---|
| 541 | + spin_unlock_irq_rcu_node(ssp); |
---|
| 542 | + mutex_unlock(&ssp->srcu_gp_mutex); |
---|
549 | 543 | /* A new grace period can start at this point. But only one. */ |
---|
550 | 544 | |
---|
551 | 545 | /* Initiate callback invocation as needed. */ |
---|
552 | 546 | idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); |
---|
553 | | - rcu_for_each_node_breadth_first(sp, snp) { |
---|
| 547 | + srcu_for_each_node_breadth_first(ssp, snp) { |
---|
554 | 548 | spin_lock_irq_rcu_node(snp); |
---|
555 | 549 | cbs = false; |
---|
556 | | - last_lvl = snp >= sp->level[rcu_num_lvls - 1]; |
---|
| 550 | + last_lvl = snp >= ssp->level[rcu_num_lvls - 1]; |
---|
557 | 551 | if (last_lvl) |
---|
558 | 552 | cbs = snp->srcu_have_cbs[idx] == gpseq; |
---|
559 | 553 | snp->srcu_have_cbs[idx] = gpseq; |
---|
560 | 554 | rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); |
---|
561 | 555 | if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) |
---|
562 | | - snp->srcu_gp_seq_needed_exp = gpseq; |
---|
| 556 | + WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq); |
---|
563 | 557 | mask = snp->srcu_data_have_cbs[idx]; |
---|
564 | 558 | snp->srcu_data_have_cbs[idx] = 0; |
---|
565 | 559 | spin_unlock_irq_rcu_node(snp); |
---|
566 | 560 | if (cbs) |
---|
567 | | - srcu_schedule_cbs_snp(sp, snp, mask, cbdelay); |
---|
| 561 | + srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay); |
---|
568 | 562 | |
---|
569 | 563 | /* Occasionally prevent srcu_data counter wrap. */ |
---|
570 | 564 | if (!(gpseq & counter_wrap_check) && last_lvl) |
---|
571 | 565 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
---|
572 | | - sdp = per_cpu_ptr(sp->sda, cpu); |
---|
| 566 | + sdp = per_cpu_ptr(ssp->sda, cpu); |
---|
573 | 567 | spin_lock_irqsave_rcu_node(sdp, flags); |
---|
574 | 568 | if (ULONG_CMP_GE(gpseq, |
---|
575 | 569 | sdp->srcu_gp_seq_needed + 100)) |
---|
.. | .. |
---|
582 | 576 | } |
---|
583 | 577 | |
---|
584 | 578 | /* Callback initiation done, allow grace periods after next. */ |
---|
585 | | - mutex_unlock(&sp->srcu_cb_mutex); |
---|
| 579 | + mutex_unlock(&ssp->srcu_cb_mutex); |
---|
586 | 580 | |
---|
587 | 581 | /* Start a new grace period if needed. */ |
---|
588 | | - spin_lock_irq_rcu_node(sp); |
---|
589 | | - gpseq = rcu_seq_current(&sp->srcu_gp_seq); |
---|
| 582 | + spin_lock_irq_rcu_node(ssp); |
---|
| 583 | + gpseq = rcu_seq_current(&ssp->srcu_gp_seq); |
---|
590 | 584 | if (!rcu_seq_state(gpseq) && |
---|
591 | | - ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) { |
---|
592 | | - srcu_gp_start(sp); |
---|
593 | | - spin_unlock_irq_rcu_node(sp); |
---|
594 | | - srcu_reschedule(sp, 0); |
---|
| 585 | + ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) { |
---|
| 586 | + srcu_gp_start(ssp); |
---|
| 587 | + spin_unlock_irq_rcu_node(ssp); |
---|
| 588 | + srcu_reschedule(ssp, 0); |
---|
595 | 589 | } else { |
---|
596 | | - spin_unlock_irq_rcu_node(sp); |
---|
| 590 | + spin_unlock_irq_rcu_node(ssp); |
---|
597 | 591 | } |
---|
598 | 592 | } |
---|
599 | 593 | |
---|
.. | .. |
---|
604 | 598 | * but without expediting. To start a completely new grace period, |
---|
605 | 599 | * whether expedited or not, use srcu_funnel_gp_start() instead. |
---|
606 | 600 | */ |
---|
607 | | -static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp, |
---|
| 601 | +static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp, |
---|
608 | 602 | unsigned long s) |
---|
609 | 603 | { |
---|
610 | 604 | unsigned long flags; |
---|
611 | 605 | |
---|
612 | 606 | for (; snp != NULL; snp = snp->srcu_parent) { |
---|
613 | | - if (rcu_seq_done(&sp->srcu_gp_seq, s) || |
---|
| 607 | + if (rcu_seq_done(&ssp->srcu_gp_seq, s) || |
---|
614 | 608 | ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) |
---|
615 | 609 | return; |
---|
616 | 610 | spin_lock_irqsave_rcu_node(snp, flags); |
---|
.. | .. |
---|
621 | 615 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); |
---|
622 | 616 | spin_unlock_irqrestore_rcu_node(snp, flags); |
---|
623 | 617 | } |
---|
624 | | - spin_lock_irqsave_rcu_node(sp, flags); |
---|
625 | | - if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) |
---|
626 | | - sp->srcu_gp_seq_needed_exp = s; |
---|
627 | | - spin_unlock_irqrestore_rcu_node(sp, flags); |
---|
| 618 | + spin_lock_irqsave_rcu_node(ssp, flags); |
---|
| 619 | + if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) |
---|
| 620 | + WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); |
---|
| 621 | + spin_unlock_irqrestore_rcu_node(ssp, flags); |
---|
628 | 622 | } |
---|
629 | 623 | |
---|
630 | 624 | /* |
---|
.. | .. |
---|
637 | 631 | * Note that this function also does the work of srcu_funnel_exp_start(), |
---|
638 | 632 | * in some cases by directly invoking it. |
---|
639 | 633 | */ |
---|
640 | | -static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, |
---|
| 634 | +static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp, |
---|
641 | 635 | unsigned long s, bool do_norm) |
---|
642 | 636 | { |
---|
643 | 637 | unsigned long flags; |
---|
.. | .. |
---|
647 | 641 | |
---|
648 | 642 | /* Each pass through the loop does one level of the srcu_node tree. */ |
---|
649 | 643 | for (; snp != NULL; snp = snp->srcu_parent) { |
---|
650 | | - if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode) |
---|
| 644 | + if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode) |
---|
651 | 645 | return; /* GP already done and CBs recorded. */ |
---|
652 | 646 | spin_lock_irqsave_rcu_node(snp, flags); |
---|
653 | 647 | if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { |
---|
.. | .. |
---|
662 | 656 | return; |
---|
663 | 657 | } |
---|
664 | 658 | if (!do_norm) |
---|
665 | | - srcu_funnel_exp_start(sp, snp, s); |
---|
| 659 | + srcu_funnel_exp_start(ssp, snp, s); |
---|
666 | 660 | return; |
---|
667 | 661 | } |
---|
668 | 662 | snp->srcu_have_cbs[idx] = s; |
---|
669 | 663 | if (snp == sdp->mynode) |
---|
670 | 664 | snp->srcu_data_have_cbs[idx] |= sdp->grpmask; |
---|
671 | 665 | if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) |
---|
672 | | - snp->srcu_gp_seq_needed_exp = s; |
---|
| 666 | + WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); |
---|
673 | 667 | spin_unlock_irqrestore_rcu_node(snp, flags); |
---|
674 | 668 | } |
---|
675 | 669 | |
---|
676 | 670 | /* Top of tree, must ensure the grace period will be started. */ |
---|
677 | | - spin_lock_irqsave_rcu_node(sp, flags); |
---|
678 | | - if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) { |
---|
| 671 | + spin_lock_irqsave_rcu_node(ssp, flags); |
---|
| 672 | + if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) { |
---|
679 | 673 | /* |
---|
680 | 674 | * Record need for grace period s. Pair with load |
---|
681 | 675 | * acquire setting up for initialization. |
---|
682 | 676 | */ |
---|
683 | | - smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/ |
---|
| 677 | + smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/ |
---|
684 | 678 | } |
---|
685 | | - if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) |
---|
686 | | - sp->srcu_gp_seq_needed_exp = s; |
---|
| 679 | + if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) |
---|
| 680 | + WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); |
---|
687 | 681 | |
---|
688 | 682 | /* If grace period not already done and none in progress, start it. */ |
---|
689 | | - if (!rcu_seq_done(&sp->srcu_gp_seq, s) && |
---|
690 | | - rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) { |
---|
691 | | - WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); |
---|
692 | | - srcu_gp_start(sp); |
---|
693 | | - queue_delayed_work(rcu_gp_wq, &sp->work, srcu_get_delay(sp)); |
---|
| 683 | + if (!rcu_seq_done(&ssp->srcu_gp_seq, s) && |
---|
| 684 | + rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) { |
---|
| 685 | + WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); |
---|
| 686 | + srcu_gp_start(ssp); |
---|
| 687 | + if (likely(srcu_init_done)) |
---|
| 688 | + queue_delayed_work(rcu_gp_wq, &ssp->work, |
---|
| 689 | + srcu_get_delay(ssp)); |
---|
| 690 | + else if (list_empty(&ssp->work.work.entry)) |
---|
| 691 | + list_add(&ssp->work.work.entry, &srcu_boot_list); |
---|
694 | 692 | } |
---|
695 | | - spin_unlock_irqrestore_rcu_node(sp, flags); |
---|
| 693 | + spin_unlock_irqrestore_rcu_node(ssp, flags); |
---|
696 | 694 | } |
---|
697 | 695 | |
---|
698 | 696 | /* |
---|
.. | .. |
---|
700 | 698 | * loop an additional time if there is an expedited grace period pending. |
---|
701 | 699 | * The caller must ensure that ->srcu_idx is not changed while checking. |
---|
702 | 700 | */ |
---|
703 | | -static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) |
---|
| 701 | +static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount) |
---|
704 | 702 | { |
---|
705 | 703 | for (;;) { |
---|
706 | | - if (srcu_readers_active_idx_check(sp, idx)) |
---|
| 704 | + if (srcu_readers_active_idx_check(ssp, idx)) |
---|
707 | 705 | return true; |
---|
708 | | - if (--trycount + !srcu_get_delay(sp) <= 0) |
---|
| 706 | + if (--trycount + !srcu_get_delay(ssp) <= 0) |
---|
709 | 707 | return false; |
---|
710 | 708 | udelay(SRCU_RETRY_CHECK_DELAY); |
---|
711 | 709 | } |
---|
.. | .. |
---|
716 | 714 | * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows |
---|
717 | 715 | * us to wait for pre-existing readers in a starvation-free manner. |
---|
718 | 716 | */ |
---|
719 | | -static void srcu_flip(struct srcu_struct *sp) |
---|
| 717 | +static void srcu_flip(struct srcu_struct *ssp) |
---|
720 | 718 | { |
---|
721 | 719 | /* |
---|
722 | 720 | * Ensure that if this updater saw a given reader's increment |
---|
.. | .. |
---|
728 | 726 | */ |
---|
729 | 727 | smp_mb(); /* E */ /* Pairs with B and C. */ |
---|
730 | 728 | |
---|
731 | | - WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1); |
---|
| 729 | + WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); |
---|
732 | 730 | |
---|
733 | 731 | /* |
---|
734 | 732 | * Ensure that if the updater misses an __srcu_read_unlock() |
---|
.. | .. |
---|
758 | 756 | * it, if this function was preempted for enough time for the counters |
---|
759 | 757 | * to wrap, it really doesn't matter whether or not we expedite the grace |
---|
760 | 758 | * period. The extra overhead of a needlessly expedited grace period is |
---|
761 | | - * negligible when amoritized over that time period, and the extra latency |
---|
| 759 | + * negligible when amortized over that time period, and the extra latency |
---|
762 | 760 | * of a needlessly non-expedited grace period is similarly negligible. |
---|
763 | 761 | */ |
---|
764 | | -static DEFINE_LOCAL_IRQ_LOCK(sp_llock); |
---|
765 | | - |
---|
766 | | -static bool srcu_might_be_idle(struct srcu_struct *sp) |
---|
| 762 | +static bool srcu_might_be_idle(struct srcu_struct *ssp) |
---|
767 | 763 | { |
---|
768 | 764 | unsigned long curseq; |
---|
769 | 765 | unsigned long flags; |
---|
770 | 766 | struct srcu_data *sdp; |
---|
771 | 767 | unsigned long t; |
---|
| 768 | + unsigned long tlast; |
---|
772 | 769 | |
---|
| 770 | + check_init_srcu_struct(ssp); |
---|
773 | 771 | /* If the local srcu_data structure has callbacks, not idle. */ |
---|
774 | | - local_lock_irqsave(sp_llock, flags); |
---|
775 | | - sdp = this_cpu_ptr(sp->sda); |
---|
| 772 | + sdp = raw_cpu_ptr(ssp->sda); |
---|
| 773 | + spin_lock_irqsave_rcu_node(sdp, flags); |
---|
776 | 774 | if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { |
---|
777 | | - local_unlock_irqrestore(sp_llock, flags); |
---|
| 775 | + spin_unlock_irqrestore_rcu_node(sdp, flags); |
---|
778 | 776 | return false; /* Callbacks already present, so not idle. */ |
---|
779 | 777 | } |
---|
780 | | - local_unlock_irqrestore(sp_llock, flags); |
---|
| 778 | + spin_unlock_irqrestore_rcu_node(sdp, flags); |
---|
781 | 779 | |
---|
782 | 780 | /* |
---|
783 | 781 | * No local callbacks, so probabalistically probe global state. |
---|
.. | .. |
---|
787 | 785 | |
---|
788 | 786 | /* First, see if enough time has passed since the last GP. */ |
---|
789 | 787 | t = ktime_get_mono_fast_ns(); |
---|
| 788 | + tlast = READ_ONCE(ssp->srcu_last_gp_end); |
---|
790 | 789 | if (exp_holdoff == 0 || |
---|
791 | | - time_in_range_open(t, sp->srcu_last_gp_end, |
---|
792 | | - sp->srcu_last_gp_end + exp_holdoff)) |
---|
| 790 | + time_in_range_open(t, tlast, tlast + exp_holdoff)) |
---|
793 | 791 | return false; /* Too soon after last GP. */ |
---|
794 | 792 | |
---|
795 | 793 | /* Next, check for probable idleness. */ |
---|
796 | | - curseq = rcu_seq_current(&sp->srcu_gp_seq); |
---|
| 794 | + curseq = rcu_seq_current(&ssp->srcu_gp_seq); |
---|
797 | 795 | smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ |
---|
798 | | - if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed))) |
---|
| 796 | + if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed))) |
---|
799 | 797 | return false; /* Grace period in progress, so not idle. */ |
---|
800 | 798 | smp_mb(); /* Order ->srcu_gp_seq with prior access. */ |
---|
801 | | - if (curseq != rcu_seq_current(&sp->srcu_gp_seq)) |
---|
| 799 | + if (curseq != rcu_seq_current(&ssp->srcu_gp_seq)) |
---|
802 | 800 | return false; /* GP # changed, so not idle. */ |
---|
803 | 801 | return true; /* With reasonable probability, idle! */ |
---|
804 | 802 | } |
---|
.. | .. |
---|
808 | 806 | */ |
---|
809 | 807 | static void srcu_leak_callback(struct rcu_head *rhp) |
---|
810 | 808 | { |
---|
| 809 | +} |
---|
| 810 | + |
---|
| 811 | +/* |
---|
| 812 | + * Start an SRCU grace period, and also queue the callback if non-NULL. |
---|
| 813 | + */ |
---|
| 814 | +static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp, |
---|
| 815 | + struct rcu_head *rhp, bool do_norm) |
---|
| 816 | +{ |
---|
| 817 | + unsigned long flags; |
---|
| 818 | + int idx; |
---|
| 819 | + bool needexp = false; |
---|
| 820 | + bool needgp = false; |
---|
| 821 | + unsigned long s; |
---|
| 822 | + struct srcu_data *sdp; |
---|
| 823 | + |
---|
| 824 | + check_init_srcu_struct(ssp); |
---|
| 825 | + idx = srcu_read_lock(ssp); |
---|
| 826 | + sdp = raw_cpu_ptr(ssp->sda); |
---|
| 827 | + spin_lock_irqsave_rcu_node(sdp, flags); |
---|
| 828 | + if (rhp) |
---|
| 829 | + rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); |
---|
| 830 | + rcu_segcblist_advance(&sdp->srcu_cblist, |
---|
| 831 | + rcu_seq_current(&ssp->srcu_gp_seq)); |
---|
| 832 | + s = rcu_seq_snap(&ssp->srcu_gp_seq); |
---|
| 833 | + (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); |
---|
| 834 | + if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { |
---|
| 835 | + sdp->srcu_gp_seq_needed = s; |
---|
| 836 | + needgp = true; |
---|
| 837 | + } |
---|
| 838 | + if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { |
---|
| 839 | + sdp->srcu_gp_seq_needed_exp = s; |
---|
| 840 | + needexp = true; |
---|
| 841 | + } |
---|
| 842 | + spin_unlock_irqrestore_rcu_node(sdp, flags); |
---|
| 843 | + if (needgp) |
---|
| 844 | + srcu_funnel_gp_start(ssp, sdp, s, do_norm); |
---|
| 845 | + else if (needexp) |
---|
| 846 | + srcu_funnel_exp_start(ssp, sdp->mynode, s); |
---|
| 847 | + srcu_read_unlock(ssp, idx); |
---|
| 848 | + return s; |
---|
811 | 849 | } |
---|
812 | 850 | |
---|
813 | 851 | /* |
---|
.. | .. |
---|
838 | 876 | * srcu_read_lock(), and srcu_read_unlock() that are all passed the same |
---|
839 | 877 | * srcu_struct structure. |
---|
840 | 878 | */ |
---|
841 | | -void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, |
---|
842 | | - rcu_callback_t func, bool do_norm) |
---|
| 879 | +static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, |
---|
| 880 | + rcu_callback_t func, bool do_norm) |
---|
843 | 881 | { |
---|
844 | | - unsigned long flags; |
---|
845 | | - bool needexp = false; |
---|
846 | | - bool needgp = false; |
---|
847 | | - unsigned long s; |
---|
848 | | - struct srcu_data *sdp; |
---|
849 | | - |
---|
850 | | - check_init_srcu_struct(sp); |
---|
851 | 882 | if (debug_rcu_head_queue(rhp)) { |
---|
852 | 883 | /* Probable double call_srcu(), so leak the callback. */ |
---|
853 | 884 | WRITE_ONCE(rhp->func, srcu_leak_callback); |
---|
.. | .. |
---|
855 | 886 | return; |
---|
856 | 887 | } |
---|
857 | 888 | rhp->func = func; |
---|
858 | | - local_lock_irqsave(sp_llock, flags); |
---|
859 | | - sdp = this_cpu_ptr(sp->sda); |
---|
860 | | - spin_lock_rcu_node(sdp); |
---|
861 | | - rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); |
---|
862 | | - rcu_segcblist_advance(&sdp->srcu_cblist, |
---|
863 | | - rcu_seq_current(&sp->srcu_gp_seq)); |
---|
864 | | - s = rcu_seq_snap(&sp->srcu_gp_seq); |
---|
865 | | - (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); |
---|
866 | | - if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { |
---|
867 | | - sdp->srcu_gp_seq_needed = s; |
---|
868 | | - needgp = true; |
---|
869 | | - } |
---|
870 | | - if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { |
---|
871 | | - sdp->srcu_gp_seq_needed_exp = s; |
---|
872 | | - needexp = true; |
---|
873 | | - } |
---|
874 | | - spin_unlock_rcu_node(sdp); |
---|
875 | | - local_unlock_irqrestore(sp_llock, flags); |
---|
876 | | - if (needgp) |
---|
877 | | - srcu_funnel_gp_start(sp, sdp, s, do_norm); |
---|
878 | | - else if (needexp) |
---|
879 | | - srcu_funnel_exp_start(sp, sdp->mynode, s); |
---|
| 889 | + (void)srcu_gp_start_if_needed(ssp, rhp, do_norm); |
---|
880 | 890 | } |
---|
881 | 891 | |
---|
882 | 892 | /** |
---|
883 | 893 | * call_srcu() - Queue a callback for invocation after an SRCU grace period |
---|
884 | | - * @sp: srcu_struct in queue the callback |
---|
| 894 | + * @ssp: srcu_struct in queue the callback |
---|
885 | 895 | * @rhp: structure to be used for queueing the SRCU callback. |
---|
886 | 896 | * @func: function to be invoked after the SRCU grace period |
---|
887 | 897 | * |
---|
.. | .. |
---|
896 | 906 | * The callback will be invoked from process context, but must nevertheless |
---|
897 | 907 | * be fast and must not block. |
---|
898 | 908 | */ |
---|
899 | | -void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, |
---|
| 909 | +void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, |
---|
900 | 910 | rcu_callback_t func) |
---|
901 | 911 | { |
---|
902 | | - __call_srcu(sp, rhp, func, true); |
---|
| 912 | + __call_srcu(ssp, rhp, func, true); |
---|
903 | 913 | } |
---|
904 | 914 | EXPORT_SYMBOL_GPL(call_srcu); |
---|
905 | 915 | |
---|
906 | 916 | /* |
---|
907 | 917 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). |
---|
908 | 918 | */ |
---|
909 | | -static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm) |
---|
| 919 | +static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm) |
---|
910 | 920 | { |
---|
911 | 921 | struct rcu_synchronize rcu; |
---|
912 | 922 | |
---|
913 | | - RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) || |
---|
| 923 | + RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) || |
---|
914 | 924 | lock_is_held(&rcu_bh_lock_map) || |
---|
915 | 925 | lock_is_held(&rcu_lock_map) || |
---|
916 | 926 | lock_is_held(&rcu_sched_lock_map), |
---|
.. | .. |
---|
919 | 929 | if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) |
---|
920 | 930 | return; |
---|
921 | 931 | might_sleep(); |
---|
922 | | - check_init_srcu_struct(sp); |
---|
| 932 | + check_init_srcu_struct(ssp); |
---|
923 | 933 | init_completion(&rcu.completion); |
---|
924 | 934 | init_rcu_head_on_stack(&rcu.head); |
---|
925 | | - __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm); |
---|
| 935 | + __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm); |
---|
926 | 936 | wait_for_completion(&rcu.completion); |
---|
927 | 937 | destroy_rcu_head_on_stack(&rcu.head); |
---|
928 | 938 | |
---|
.. | .. |
---|
938 | 948 | |
---|
939 | 949 | /** |
---|
940 | 950 | * synchronize_srcu_expedited - Brute-force SRCU grace period |
---|
941 | | - * @sp: srcu_struct with which to synchronize. |
---|
| 951 | + * @ssp: srcu_struct with which to synchronize. |
---|
942 | 952 | * |
---|
943 | 953 | * Wait for an SRCU grace period to elapse, but be more aggressive about |
---|
944 | 954 | * spinning rather than blocking when waiting. |
---|
.. | .. |
---|
946 | 956 | * Note that synchronize_srcu_expedited() has the same deadlock and |
---|
947 | 957 | * memory-ordering properties as does synchronize_srcu(). |
---|
948 | 958 | */ |
---|
949 | | -void synchronize_srcu_expedited(struct srcu_struct *sp) |
---|
| 959 | +void synchronize_srcu_expedited(struct srcu_struct *ssp) |
---|
950 | 960 | { |
---|
951 | | - __synchronize_srcu(sp, rcu_gp_is_normal()); |
---|
| 961 | + __synchronize_srcu(ssp, rcu_gp_is_normal()); |
---|
952 | 962 | } |
---|
953 | 963 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); |
---|
954 | 964 | |
---|
955 | 965 | /** |
---|
956 | 966 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion |
---|
957 | | - * @sp: srcu_struct with which to synchronize. |
---|
| 967 | + * @ssp: srcu_struct with which to synchronize. |
---|
958 | 968 | * |
---|
959 | 969 | * Wait for the count to drain to zero of both indexes. To avoid the |
---|
960 | 970 | * possible starvation of synchronize_srcu(), it waits for the count of |
---|
.. | .. |
---|
972 | 982 | * There are memory-ordering constraints implied by synchronize_srcu(). |
---|
973 | 983 | * On systems with more than one CPU, when synchronize_srcu() returns, |
---|
974 | 984 | * each CPU is guaranteed to have executed a full memory barrier since |
---|
975 | | - * the end of its last corresponding SRCU-sched read-side critical section |
---|
| 985 | + * the end of its last corresponding SRCU read-side critical section |
---|
976 | 986 | * whose beginning preceded the call to synchronize_srcu(). In addition, |
---|
977 | 987 | * each CPU having an SRCU read-side critical section that extends beyond |
---|
978 | 988 | * the return from synchronize_srcu() is guaranteed to have executed a |
---|
.. | .. |
---|
996 | 1006 | * SRCU must also provide it. Note that detecting idleness is heuristic |
---|
997 | 1007 | * and subject to both false positives and negatives. |
---|
998 | 1008 | */ |
---|
999 | | -void synchronize_srcu(struct srcu_struct *sp) |
---|
| 1009 | +void synchronize_srcu(struct srcu_struct *ssp) |
---|
1000 | 1010 | { |
---|
1001 | | - if (srcu_might_be_idle(sp) || rcu_gp_is_expedited()) |
---|
1002 | | - synchronize_srcu_expedited(sp); |
---|
| 1011 | + if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited()) |
---|
| 1012 | + synchronize_srcu_expedited(ssp); |
---|
1003 | 1013 | else |
---|
1004 | | - __synchronize_srcu(sp, true); |
---|
| 1014 | + __synchronize_srcu(ssp, true); |
---|
1005 | 1015 | } |
---|
1006 | 1016 | EXPORT_SYMBOL_GPL(synchronize_srcu); |
---|
| 1017 | + |
---|
| 1018 | +/** |
---|
| 1019 | + * get_state_synchronize_srcu - Provide an end-of-grace-period cookie |
---|
| 1020 | + * @ssp: srcu_struct to provide cookie for. |
---|
| 1021 | + * |
---|
| 1022 | + * This function returns a cookie that can be passed to |
---|
| 1023 | + * poll_state_synchronize_srcu(), which will return true if a full grace |
---|
| 1024 | + * period has elapsed in the meantime. It is the caller's responsibility |
---|
| 1025 | + * to make sure that grace period happens, for example, by invoking |
---|
| 1026 | + * call_srcu() after return from get_state_synchronize_srcu(). |
---|
| 1027 | + */ |
---|
| 1028 | +unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp) |
---|
| 1029 | +{ |
---|
| 1030 | + // Any prior manipulation of SRCU-protected data must happen |
---|
| 1031 | + // before the load from ->srcu_gp_seq. |
---|
| 1032 | + smp_mb(); |
---|
| 1033 | + return rcu_seq_snap(&ssp->srcu_gp_seq); |
---|
| 1034 | +} |
---|
| 1035 | +EXPORT_SYMBOL_GPL(get_state_synchronize_srcu); |
---|
| 1036 | + |
---|
| 1037 | +/** |
---|
| 1038 | + * start_poll_synchronize_srcu - Provide cookie and start grace period |
---|
| 1039 | + * @ssp: srcu_struct to provide cookie for. |
---|
| 1040 | + * |
---|
| 1041 | + * This function returns a cookie that can be passed to |
---|
| 1042 | + * poll_state_synchronize_srcu(), which will return true if a full grace |
---|
| 1043 | + * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(), |
---|
| 1044 | + * this function also ensures that any needed SRCU grace period will be |
---|
| 1045 | + * started. This convenience does come at a cost in terms of CPU overhead. |
---|
| 1046 | + */ |
---|
| 1047 | +unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp) |
---|
| 1048 | +{ |
---|
| 1049 | + return srcu_gp_start_if_needed(ssp, NULL, true); |
---|
| 1050 | +} |
---|
| 1051 | +EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu); |
---|
| 1052 | + |
---|
| 1053 | +/** |
---|
| 1054 | + * poll_state_synchronize_srcu - Has cookie's grace period ended? |
---|
| 1055 | + * @ssp: srcu_struct to provide cookie for. |
---|
| 1056 | + * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu(). |
---|
| 1057 | + * |
---|
| 1058 | + * This function takes the cookie that was returned from either |
---|
| 1059 | + * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and |
---|
| 1060 | + * returns @true if an SRCU grace period elapsed since the time that the |
---|
| 1061 | + * cookie was created. |
---|
| 1062 | + */ |
---|
| 1063 | +bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie) |
---|
| 1064 | +{ |
---|
| 1065 | + if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie)) |
---|
| 1066 | + return false; |
---|
| 1067 | + // Ensure that the end of the SRCU grace period happens before |
---|
| 1068 | + // any subsequent code that the caller might execute. |
---|
| 1069 | + smp_mb(); // ^^^ |
---|
| 1070 | + return true; |
---|
| 1071 | +} |
---|
| 1072 | +EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu); |
---|
1007 | 1073 | |
---|
1008 | 1074 | /* |
---|
1009 | 1075 | * Callback function for srcu_barrier() use. |
---|
.. | .. |
---|
1011 | 1077 | static void srcu_barrier_cb(struct rcu_head *rhp) |
---|
1012 | 1078 | { |
---|
1013 | 1079 | struct srcu_data *sdp; |
---|
1014 | | - struct srcu_struct *sp; |
---|
| 1080 | + struct srcu_struct *ssp; |
---|
1015 | 1081 | |
---|
1016 | 1082 | sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); |
---|
1017 | | - sp = sdp->sp; |
---|
1018 | | - if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) |
---|
1019 | | - complete(&sp->srcu_barrier_completion); |
---|
| 1083 | + ssp = sdp->ssp; |
---|
| 1084 | + if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) |
---|
| 1085 | + complete(&ssp->srcu_barrier_completion); |
---|
1020 | 1086 | } |
---|
1021 | 1087 | |
---|
1022 | 1088 | /** |
---|
1023 | 1089 | * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. |
---|
1024 | | - * @sp: srcu_struct on which to wait for in-flight callbacks. |
---|
| 1090 | + * @ssp: srcu_struct on which to wait for in-flight callbacks. |
---|
1025 | 1091 | */ |
---|
1026 | | -void srcu_barrier(struct srcu_struct *sp) |
---|
| 1092 | +void srcu_barrier(struct srcu_struct *ssp) |
---|
1027 | 1093 | { |
---|
1028 | 1094 | int cpu; |
---|
1029 | 1095 | struct srcu_data *sdp; |
---|
1030 | | - unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq); |
---|
| 1096 | + unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq); |
---|
1031 | 1097 | |
---|
1032 | | - check_init_srcu_struct(sp); |
---|
1033 | | - mutex_lock(&sp->srcu_barrier_mutex); |
---|
1034 | | - if (rcu_seq_done(&sp->srcu_barrier_seq, s)) { |
---|
| 1098 | + check_init_srcu_struct(ssp); |
---|
| 1099 | + mutex_lock(&ssp->srcu_barrier_mutex); |
---|
| 1100 | + if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) { |
---|
1035 | 1101 | smp_mb(); /* Force ordering following return. */ |
---|
1036 | | - mutex_unlock(&sp->srcu_barrier_mutex); |
---|
| 1102 | + mutex_unlock(&ssp->srcu_barrier_mutex); |
---|
1037 | 1103 | return; /* Someone else did our work for us. */ |
---|
1038 | 1104 | } |
---|
1039 | | - rcu_seq_start(&sp->srcu_barrier_seq); |
---|
1040 | | - init_completion(&sp->srcu_barrier_completion); |
---|
| 1105 | + rcu_seq_start(&ssp->srcu_barrier_seq); |
---|
| 1106 | + init_completion(&ssp->srcu_barrier_completion); |
---|
1041 | 1107 | |
---|
1042 | 1108 | /* Initial count prevents reaching zero until all CBs are posted. */ |
---|
1043 | | - atomic_set(&sp->srcu_barrier_cpu_cnt, 1); |
---|
| 1109 | + atomic_set(&ssp->srcu_barrier_cpu_cnt, 1); |
---|
1044 | 1110 | |
---|
1045 | 1111 | /* |
---|
1046 | 1112 | * Each pass through this loop enqueues a callback, but only |
---|
.. | .. |
---|
1051 | 1117 | * grace period as the last callback already in the queue. |
---|
1052 | 1118 | */ |
---|
1053 | 1119 | for_each_possible_cpu(cpu) { |
---|
1054 | | - sdp = per_cpu_ptr(sp->sda, cpu); |
---|
| 1120 | + sdp = per_cpu_ptr(ssp->sda, cpu); |
---|
1055 | 1121 | spin_lock_irq_rcu_node(sdp); |
---|
1056 | | - atomic_inc(&sp->srcu_barrier_cpu_cnt); |
---|
| 1122 | + atomic_inc(&ssp->srcu_barrier_cpu_cnt); |
---|
1057 | 1123 | sdp->srcu_barrier_head.func = srcu_barrier_cb; |
---|
1058 | 1124 | debug_rcu_head_queue(&sdp->srcu_barrier_head); |
---|
1059 | 1125 | if (!rcu_segcblist_entrain(&sdp->srcu_cblist, |
---|
1060 | | - &sdp->srcu_barrier_head, 0)) { |
---|
| 1126 | + &sdp->srcu_barrier_head)) { |
---|
1061 | 1127 | debug_rcu_head_unqueue(&sdp->srcu_barrier_head); |
---|
1062 | | - atomic_dec(&sp->srcu_barrier_cpu_cnt); |
---|
| 1128 | + atomic_dec(&ssp->srcu_barrier_cpu_cnt); |
---|
1063 | 1129 | } |
---|
1064 | 1130 | spin_unlock_irq_rcu_node(sdp); |
---|
1065 | 1131 | } |
---|
1066 | 1132 | |
---|
1067 | 1133 | /* Remove the initial count, at which point reaching zero can happen. */ |
---|
1068 | | - if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) |
---|
1069 | | - complete(&sp->srcu_barrier_completion); |
---|
1070 | | - wait_for_completion(&sp->srcu_barrier_completion); |
---|
| 1134 | + if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) |
---|
| 1135 | + complete(&ssp->srcu_barrier_completion); |
---|
| 1136 | + wait_for_completion(&ssp->srcu_barrier_completion); |
---|
1071 | 1137 | |
---|
1072 | | - rcu_seq_end(&sp->srcu_barrier_seq); |
---|
1073 | | - mutex_unlock(&sp->srcu_barrier_mutex); |
---|
| 1138 | + rcu_seq_end(&ssp->srcu_barrier_seq); |
---|
| 1139 | + mutex_unlock(&ssp->srcu_barrier_mutex); |
---|
1074 | 1140 | } |
---|
1075 | 1141 | EXPORT_SYMBOL_GPL(srcu_barrier); |
---|
1076 | 1142 | |
---|
1077 | 1143 | /** |
---|
1078 | 1144 | * srcu_batches_completed - return batches completed. |
---|
1079 | | - * @sp: srcu_struct on which to report batch completion. |
---|
| 1145 | + * @ssp: srcu_struct on which to report batch completion. |
---|
1080 | 1146 | * |
---|
1081 | 1147 | * Report the number of batches, correlated with, but not necessarily |
---|
1082 | 1148 | * precisely the same as, the number of grace periods that have elapsed. |
---|
1083 | 1149 | */ |
---|
1084 | | -unsigned long srcu_batches_completed(struct srcu_struct *sp) |
---|
| 1150 | +unsigned long srcu_batches_completed(struct srcu_struct *ssp) |
---|
1085 | 1151 | { |
---|
1086 | | - return sp->srcu_idx; |
---|
| 1152 | + return READ_ONCE(ssp->srcu_idx); |
---|
1087 | 1153 | } |
---|
1088 | 1154 | EXPORT_SYMBOL_GPL(srcu_batches_completed); |
---|
1089 | 1155 | |
---|
.. | .. |
---|
1092 | 1158 | * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has |
---|
1093 | 1159 | * completed in that state. |
---|
1094 | 1160 | */ |
---|
1095 | | -static void srcu_advance_state(struct srcu_struct *sp) |
---|
| 1161 | +static void srcu_advance_state(struct srcu_struct *ssp) |
---|
1096 | 1162 | { |
---|
1097 | 1163 | int idx; |
---|
1098 | 1164 | |
---|
1099 | | - mutex_lock(&sp->srcu_gp_mutex); |
---|
| 1165 | + mutex_lock(&ssp->srcu_gp_mutex); |
---|
1100 | 1166 | |
---|
1101 | 1167 | /* |
---|
1102 | 1168 | * Because readers might be delayed for an extended period after |
---|
.. | .. |
---|
1108 | 1174 | * The load-acquire ensures that we see the accesses performed |
---|
1109 | 1175 | * by the prior grace period. |
---|
1110 | 1176 | */ |
---|
1111 | | - idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */ |
---|
| 1177 | + idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */ |
---|
1112 | 1178 | if (idx == SRCU_STATE_IDLE) { |
---|
1113 | | - spin_lock_irq_rcu_node(sp); |
---|
1114 | | - if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { |
---|
1115 | | - WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq)); |
---|
1116 | | - spin_unlock_irq_rcu_node(sp); |
---|
1117 | | - mutex_unlock(&sp->srcu_gp_mutex); |
---|
| 1179 | + spin_lock_irq_rcu_node(ssp); |
---|
| 1180 | + if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { |
---|
| 1181 | + WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq)); |
---|
| 1182 | + spin_unlock_irq_rcu_node(ssp); |
---|
| 1183 | + mutex_unlock(&ssp->srcu_gp_mutex); |
---|
1118 | 1184 | return; |
---|
1119 | 1185 | } |
---|
1120 | | - idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); |
---|
| 1186 | + idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)); |
---|
1121 | 1187 | if (idx == SRCU_STATE_IDLE) |
---|
1122 | | - srcu_gp_start(sp); |
---|
1123 | | - spin_unlock_irq_rcu_node(sp); |
---|
| 1188 | + srcu_gp_start(ssp); |
---|
| 1189 | + spin_unlock_irq_rcu_node(ssp); |
---|
1124 | 1190 | if (idx != SRCU_STATE_IDLE) { |
---|
1125 | | - mutex_unlock(&sp->srcu_gp_mutex); |
---|
| 1191 | + mutex_unlock(&ssp->srcu_gp_mutex); |
---|
1126 | 1192 | return; /* Someone else started the grace period. */ |
---|
1127 | 1193 | } |
---|
1128 | 1194 | } |
---|
1129 | 1195 | |
---|
1130 | | - if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { |
---|
1131 | | - idx = 1 ^ (sp->srcu_idx & 1); |
---|
1132 | | - if (!try_check_zero(sp, idx, 1)) { |
---|
1133 | | - mutex_unlock(&sp->srcu_gp_mutex); |
---|
| 1196 | + if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { |
---|
| 1197 | + idx = 1 ^ (ssp->srcu_idx & 1); |
---|
| 1198 | + if (!try_check_zero(ssp, idx, 1)) { |
---|
| 1199 | + mutex_unlock(&ssp->srcu_gp_mutex); |
---|
1134 | 1200 | return; /* readers present, retry later. */ |
---|
1135 | 1201 | } |
---|
1136 | | - srcu_flip(sp); |
---|
1137 | | - rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2); |
---|
| 1202 | + srcu_flip(ssp); |
---|
| 1203 | + spin_lock_irq_rcu_node(ssp); |
---|
| 1204 | + rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2); |
---|
| 1205 | + spin_unlock_irq_rcu_node(ssp); |
---|
1138 | 1206 | } |
---|
1139 | 1207 | |
---|
1140 | | - if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { |
---|
| 1208 | + if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { |
---|
1141 | 1209 | |
---|
1142 | 1210 | /* |
---|
1143 | 1211 | * SRCU read-side critical sections are normally short, |
---|
1144 | 1212 | * so check at least twice in quick succession after a flip. |
---|
1145 | 1213 | */ |
---|
1146 | | - idx = 1 ^ (sp->srcu_idx & 1); |
---|
1147 | | - if (!try_check_zero(sp, idx, 2)) { |
---|
1148 | | - mutex_unlock(&sp->srcu_gp_mutex); |
---|
| 1214 | + idx = 1 ^ (ssp->srcu_idx & 1); |
---|
| 1215 | + if (!try_check_zero(ssp, idx, 2)) { |
---|
| 1216 | + mutex_unlock(&ssp->srcu_gp_mutex); |
---|
1149 | 1217 | return; /* readers present, retry later. */ |
---|
1150 | 1218 | } |
---|
1151 | | - srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */ |
---|
| 1219 | + srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */ |
---|
1152 | 1220 | } |
---|
1153 | 1221 | } |
---|
1154 | 1222 | |
---|
.. | .. |
---|
1164 | 1232 | struct rcu_cblist ready_cbs; |
---|
1165 | 1233 | struct rcu_head *rhp; |
---|
1166 | 1234 | struct srcu_data *sdp; |
---|
1167 | | - struct srcu_struct *sp; |
---|
| 1235 | + struct srcu_struct *ssp; |
---|
1168 | 1236 | |
---|
1169 | | - sdp = container_of(work, struct srcu_data, work.work); |
---|
1170 | | - sp = sdp->sp; |
---|
| 1237 | + sdp = container_of(work, struct srcu_data, work); |
---|
| 1238 | + |
---|
| 1239 | + ssp = sdp->ssp; |
---|
1171 | 1240 | rcu_cblist_init(&ready_cbs); |
---|
1172 | 1241 | spin_lock_irq_rcu_node(sdp); |
---|
1173 | 1242 | rcu_segcblist_advance(&sdp->srcu_cblist, |
---|
1174 | | - rcu_seq_current(&sp->srcu_gp_seq)); |
---|
| 1243 | + rcu_seq_current(&ssp->srcu_gp_seq)); |
---|
1175 | 1244 | if (sdp->srcu_cblist_invoking || |
---|
1176 | 1245 | !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { |
---|
1177 | 1246 | spin_unlock_irq_rcu_node(sdp); |
---|
.. | .. |
---|
1197 | 1266 | spin_lock_irq_rcu_node(sdp); |
---|
1198 | 1267 | rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); |
---|
1199 | 1268 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, |
---|
1200 | | - rcu_seq_snap(&sp->srcu_gp_seq)); |
---|
| 1269 | + rcu_seq_snap(&ssp->srcu_gp_seq)); |
---|
1201 | 1270 | sdp->srcu_cblist_invoking = false; |
---|
1202 | 1271 | more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); |
---|
1203 | 1272 | spin_unlock_irq_rcu_node(sdp); |
---|
.. | .. |
---|
1209 | 1278 | * Finished one round of SRCU grace period. Start another if there are |
---|
1210 | 1279 | * more SRCU callbacks queued, otherwise put SRCU into not-running state. |
---|
1211 | 1280 | */ |
---|
1212 | | -static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) |
---|
| 1281 | +static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay) |
---|
1213 | 1282 | { |
---|
1214 | 1283 | bool pushgp = true; |
---|
1215 | 1284 | |
---|
1216 | | - spin_lock_irq_rcu_node(sp); |
---|
1217 | | - if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { |
---|
1218 | | - if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) { |
---|
| 1285 | + spin_lock_irq_rcu_node(ssp); |
---|
| 1286 | + if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { |
---|
| 1287 | + if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) { |
---|
1219 | 1288 | /* All requests fulfilled, time to go idle. */ |
---|
1220 | 1289 | pushgp = false; |
---|
1221 | 1290 | } |
---|
1222 | | - } else if (!rcu_seq_state(sp->srcu_gp_seq)) { |
---|
| 1291 | + } else if (!rcu_seq_state(ssp->srcu_gp_seq)) { |
---|
1223 | 1292 | /* Outstanding request and no GP. Start one. */ |
---|
1224 | | - srcu_gp_start(sp); |
---|
| 1293 | + srcu_gp_start(ssp); |
---|
1225 | 1294 | } |
---|
1226 | | - spin_unlock_irq_rcu_node(sp); |
---|
| 1295 | + spin_unlock_irq_rcu_node(ssp); |
---|
1227 | 1296 | |
---|
1228 | 1297 | if (pushgp) |
---|
1229 | | - queue_delayed_work(rcu_gp_wq, &sp->work, delay); |
---|
| 1298 | + queue_delayed_work(rcu_gp_wq, &ssp->work, delay); |
---|
1230 | 1299 | } |
---|
1231 | 1300 | |
---|
1232 | 1301 | /* |
---|
.. | .. |
---|
1234 | 1303 | */ |
---|
1235 | 1304 | static void process_srcu(struct work_struct *work) |
---|
1236 | 1305 | { |
---|
1237 | | - struct srcu_struct *sp; |
---|
| 1306 | + struct srcu_struct *ssp; |
---|
1238 | 1307 | |
---|
1239 | | - sp = container_of(work, struct srcu_struct, work.work); |
---|
| 1308 | + ssp = container_of(work, struct srcu_struct, work.work); |
---|
1240 | 1309 | |
---|
1241 | | - srcu_advance_state(sp); |
---|
1242 | | - srcu_reschedule(sp, srcu_get_delay(sp)); |
---|
| 1310 | + srcu_advance_state(ssp); |
---|
| 1311 | + srcu_reschedule(ssp, srcu_get_delay(ssp)); |
---|
1243 | 1312 | } |
---|
1244 | 1313 | |
---|
1245 | 1314 | void srcutorture_get_gp_data(enum rcutorture_type test_type, |
---|
1246 | | - struct srcu_struct *sp, int *flags, |
---|
| 1315 | + struct srcu_struct *ssp, int *flags, |
---|
1247 | 1316 | unsigned long *gp_seq) |
---|
1248 | 1317 | { |
---|
1249 | 1318 | if (test_type != SRCU_FLAVOR) |
---|
1250 | 1319 | return; |
---|
1251 | 1320 | *flags = 0; |
---|
1252 | | - *gp_seq = rcu_seq_current(&sp->srcu_gp_seq); |
---|
| 1321 | + *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq); |
---|
1253 | 1322 | } |
---|
1254 | 1323 | EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); |
---|
1255 | 1324 | |
---|
1256 | | -void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf) |
---|
| 1325 | +void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf) |
---|
1257 | 1326 | { |
---|
1258 | 1327 | int cpu; |
---|
1259 | 1328 | int idx; |
---|
1260 | 1329 | unsigned long s0 = 0, s1 = 0; |
---|
1261 | 1330 | |
---|
1262 | | - idx = sp->srcu_idx & 0x1; |
---|
| 1331 | + idx = ssp->srcu_idx & 0x1; |
---|
1263 | 1332 | pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):", |
---|
1264 | | - tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx); |
---|
| 1333 | + tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx); |
---|
1265 | 1334 | for_each_possible_cpu(cpu) { |
---|
1266 | 1335 | unsigned long l0, l1; |
---|
1267 | 1336 | unsigned long u0, u1; |
---|
1268 | 1337 | long c0, c1; |
---|
1269 | 1338 | struct srcu_data *sdp; |
---|
1270 | 1339 | |
---|
1271 | | - sdp = per_cpu_ptr(sp->sda, cpu); |
---|
1272 | | - u0 = sdp->srcu_unlock_count[!idx]; |
---|
1273 | | - u1 = sdp->srcu_unlock_count[idx]; |
---|
| 1340 | + sdp = per_cpu_ptr(ssp->sda, cpu); |
---|
| 1341 | + u0 = data_race(sdp->srcu_unlock_count[!idx]); |
---|
| 1342 | + u1 = data_race(sdp->srcu_unlock_count[idx]); |
---|
1274 | 1343 | |
---|
1275 | 1344 | /* |
---|
1276 | 1345 | * Make sure that a lock is always counted if the corresponding |
---|
.. | .. |
---|
1278 | 1347 | */ |
---|
1279 | 1348 | smp_rmb(); |
---|
1280 | 1349 | |
---|
1281 | | - l0 = sdp->srcu_lock_count[!idx]; |
---|
1282 | | - l1 = sdp->srcu_lock_count[idx]; |
---|
| 1350 | + l0 = data_race(sdp->srcu_lock_count[!idx]); |
---|
| 1351 | + l1 = data_race(sdp->srcu_lock_count[idx]); |
---|
1283 | 1352 | |
---|
1284 | 1353 | c0 = l0 - u0; |
---|
1285 | 1354 | c1 = l1 - u1; |
---|
1286 | | - pr_cont(" %d(%ld,%ld %1p)", |
---|
1287 | | - cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist)); |
---|
| 1355 | + pr_cont(" %d(%ld,%ld %c)", |
---|
| 1356 | + cpu, c0, c1, |
---|
| 1357 | + "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]); |
---|
1288 | 1358 | s0 += c0; |
---|
1289 | 1359 | s1 += c1; |
---|
1290 | 1360 | } |
---|
.. | .. |
---|
1300 | 1370 | return 0; |
---|
1301 | 1371 | } |
---|
1302 | 1372 | early_initcall(srcu_bootup_announce); |
---|
| 1373 | + |
---|
| 1374 | +void __init srcu_init(void) |
---|
| 1375 | +{ |
---|
| 1376 | + struct srcu_struct *ssp; |
---|
| 1377 | + |
---|
| 1378 | + srcu_init_done = true; |
---|
| 1379 | + while (!list_empty(&srcu_boot_list)) { |
---|
| 1380 | + ssp = list_first_entry(&srcu_boot_list, struct srcu_struct, |
---|
| 1381 | + work.work.entry); |
---|
| 1382 | + check_init_srcu_struct(ssp); |
---|
| 1383 | + list_del_init(&ssp->work.work.entry); |
---|
| 1384 | + queue_work(rcu_gp_wq, &ssp->work.work); |
---|
| 1385 | + } |
---|
| 1386 | +} |
---|
| 1387 | + |
---|
| 1388 | +#ifdef CONFIG_MODULES |
---|
| 1389 | + |
---|
| 1390 | +/* Initialize any global-scope srcu_struct structures used by this module. */ |
---|
| 1391 | +static int srcu_module_coming(struct module *mod) |
---|
| 1392 | +{ |
---|
| 1393 | + int i; |
---|
| 1394 | + struct srcu_struct **sspp = mod->srcu_struct_ptrs; |
---|
| 1395 | + int ret; |
---|
| 1396 | + |
---|
| 1397 | + for (i = 0; i < mod->num_srcu_structs; i++) { |
---|
| 1398 | + ret = init_srcu_struct(*(sspp++)); |
---|
| 1399 | + if (WARN_ON_ONCE(ret)) |
---|
| 1400 | + return ret; |
---|
| 1401 | + } |
---|
| 1402 | + return 0; |
---|
| 1403 | +} |
---|
| 1404 | + |
---|
| 1405 | +/* Clean up any global-scope srcu_struct structures used by this module. */ |
---|
| 1406 | +static void srcu_module_going(struct module *mod) |
---|
| 1407 | +{ |
---|
| 1408 | + int i; |
---|
| 1409 | + struct srcu_struct **sspp = mod->srcu_struct_ptrs; |
---|
| 1410 | + |
---|
| 1411 | + for (i = 0; i < mod->num_srcu_structs; i++) |
---|
| 1412 | + cleanup_srcu_struct(*(sspp++)); |
---|
| 1413 | +} |
---|
| 1414 | + |
---|
| 1415 | +/* Handle one module, either coming or going. */ |
---|
| 1416 | +static int srcu_module_notify(struct notifier_block *self, |
---|
| 1417 | + unsigned long val, void *data) |
---|
| 1418 | +{ |
---|
| 1419 | + struct module *mod = data; |
---|
| 1420 | + int ret = 0; |
---|
| 1421 | + |
---|
| 1422 | + switch (val) { |
---|
| 1423 | + case MODULE_STATE_COMING: |
---|
| 1424 | + ret = srcu_module_coming(mod); |
---|
| 1425 | + break; |
---|
| 1426 | + case MODULE_STATE_GOING: |
---|
| 1427 | + srcu_module_going(mod); |
---|
| 1428 | + break; |
---|
| 1429 | + default: |
---|
| 1430 | + break; |
---|
| 1431 | + } |
---|
| 1432 | + return ret; |
---|
| 1433 | +} |
---|
| 1434 | + |
---|
| 1435 | +static struct notifier_block srcu_module_nb = { |
---|
| 1436 | + .notifier_call = srcu_module_notify, |
---|
| 1437 | + .priority = 0, |
---|
| 1438 | +}; |
---|
| 1439 | + |
---|
| 1440 | +static __init int init_srcu_module_notifier(void) |
---|
| 1441 | +{ |
---|
| 1442 | + int ret; |
---|
| 1443 | + |
---|
| 1444 | + ret = register_module_notifier(&srcu_module_nb); |
---|
| 1445 | + if (ret) |
---|
| 1446 | + pr_warn("Failed to register srcu module notifier\n"); |
---|
| 1447 | + return ret; |
---|
| 1448 | +} |
---|
| 1449 | +late_initcall(init_srcu_module_notifier); |
---|
| 1450 | + |
---|
| 1451 | +#endif /* #ifdef CONFIG_MODULES */ |
---|