| .. | .. | 
|---|
 | 1 | +// SPDX-License-Identifier: GPL-2.0+  | 
|---|
| 1 | 2 |  /* | 
|---|
| 2 | 3 |   * Sleepable Read-Copy Update mechanism for mutual exclusion. | 
|---|
| 3 |  | - *  | 
|---|
| 4 |  | - * This program is free software; you can redistribute it and/or modify  | 
|---|
| 5 |  | - * it under the terms of the GNU General Public License as published by  | 
|---|
| 6 |  | - * the Free Software Foundation; either version 2 of the License, or  | 
|---|
| 7 |  | - * (at your option) any later version.  | 
|---|
| 8 |  | - *  | 
|---|
| 9 |  | - * This program is distributed in the hope that it will be useful,  | 
|---|
| 10 |  | - * but WITHOUT ANY WARRANTY; without even the implied warranty of  | 
|---|
| 11 |  | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the  | 
|---|
| 12 |  | - * GNU General Public License for more details.  | 
|---|
| 13 |  | - *  | 
|---|
| 14 |  | - * You should have received a copy of the GNU General Public License  | 
|---|
| 15 |  | - * along with this program; if not, you can access it online at  | 
|---|
| 16 |  | - * http://www.gnu.org/licenses/gpl-2.0.html.  | 
|---|
| 17 | 4 |   * | 
|---|
| 18 | 5 |   * Copyright (C) IBM Corporation, 2006 | 
|---|
| 19 | 6 |   * Copyright (C) Fujitsu, 2012 | 
|---|
| 20 | 7 |   * | 
|---|
| 21 |  | - * Author: Paul McKenney <paulmck@us.ibm.com>  | 
|---|
 | 8 | + * Authors: Paul McKenney <paulmck@linux.ibm.com>  | 
|---|
| 22 | 9 |   *	   Lai Jiangshan <laijs@cn.fujitsu.com> | 
|---|
| 23 | 10 |   * | 
|---|
| 24 | 11 |   * For detailed explanation of Read-Copy Update mechanism see - | 
|---|
| .. | .. | 
|---|
| 51 | 38 |  static ulong counter_wrap_check = (ULONG_MAX >> 2); | 
|---|
| 52 | 39 |  module_param(counter_wrap_check, ulong, 0444); | 
|---|
| 53 | 40 |   | 
|---|
 | 41 | +/* Early-boot callback-management, so early that no lock is required! */  | 
|---|
 | 42 | +static LIST_HEAD(srcu_boot_list);  | 
|---|
 | 43 | +static bool __read_mostly srcu_init_done;  | 
|---|
 | 44 | +  | 
|---|
| 54 | 45 |  static void srcu_invoke_callbacks(struct work_struct *work); | 
|---|
| 55 |  | -static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);  | 
|---|
 | 46 | +static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);  | 
|---|
| 56 | 47 |  static void process_srcu(struct work_struct *work); | 
|---|
 | 48 | +static void srcu_delay_timer(struct timer_list *t);  | 
|---|
| 57 | 49 |   | 
|---|
| 58 | 50 |  /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */ | 
|---|
| 59 | 51 |  #define spin_lock_rcu_node(p)					\ | 
|---|
| .. | .. | 
|---|
| 88 | 80 |   * srcu_read_unlock() running against them.  So if the is_static parameter | 
|---|
| 89 | 81 |   * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. | 
|---|
| 90 | 82 |   */ | 
|---|
| 91 |  | -static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)  | 
|---|
 | 83 | +static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)  | 
|---|
| 92 | 84 |  { | 
|---|
| 93 | 85 |  	int cpu; | 
|---|
| 94 | 86 |  	int i; | 
|---|
| .. | .. | 
|---|
| 98 | 90 |  	struct srcu_node *snp; | 
|---|
| 99 | 91 |  	struct srcu_node *snp_first; | 
|---|
| 100 | 92 |   | 
|---|
 | 93 | +	/* Initialize geometry if it has not already been initialized. */  | 
|---|
 | 94 | +	rcu_init_geometry();  | 
|---|
 | 95 | +  | 
|---|
| 101 | 96 |  	/* Work out the overall tree geometry. */ | 
|---|
| 102 |  | -	sp->level[0] = &sp->node[0];  | 
|---|
 | 97 | +	ssp->level[0] = &ssp->node[0];  | 
|---|
| 103 | 98 |  	for (i = 1; i < rcu_num_lvls; i++) | 
|---|
| 104 |  | -		sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1];  | 
|---|
 | 99 | +		ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];  | 
|---|
| 105 | 100 |  	rcu_init_levelspread(levelspread, num_rcu_lvl); | 
|---|
| 106 | 101 |   | 
|---|
| 107 | 102 |  	/* Each pass through this loop initializes one srcu_node structure. */ | 
|---|
| 108 |  | -	rcu_for_each_node_breadth_first(sp, snp) {  | 
|---|
 | 103 | +	srcu_for_each_node_breadth_first(ssp, snp) {  | 
|---|
| 109 | 104 |  		spin_lock_init(&ACCESS_PRIVATE(snp, lock)); | 
|---|
| 110 | 105 |  		WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != | 
|---|
| 111 | 106 |  			     ARRAY_SIZE(snp->srcu_data_have_cbs)); | 
|---|
| .. | .. | 
|---|
| 116 | 111 |  		snp->srcu_gp_seq_needed_exp = 0; | 
|---|
| 117 | 112 |  		snp->grplo = -1; | 
|---|
| 118 | 113 |  		snp->grphi = -1; | 
|---|
| 119 |  | -		if (snp == &sp->node[0]) {  | 
|---|
 | 114 | +		if (snp == &ssp->node[0]) {  | 
|---|
| 120 | 115 |  			/* Root node, special case. */ | 
|---|
| 121 | 116 |  			snp->srcu_parent = NULL; | 
|---|
| 122 | 117 |  			continue; | 
|---|
| 123 | 118 |  		} | 
|---|
| 124 | 119 |   | 
|---|
| 125 | 120 |  		/* Non-root node. */ | 
|---|
| 126 |  | -		if (snp == sp->level[level + 1])  | 
|---|
 | 121 | +		if (snp == ssp->level[level + 1])  | 
|---|
| 127 | 122 |  			level++; | 
|---|
| 128 |  | -		snp->srcu_parent = sp->level[level - 1] +  | 
|---|
| 129 |  | -				   (snp - sp->level[level]) /  | 
|---|
 | 123 | +		snp->srcu_parent = ssp->level[level - 1] +  | 
|---|
 | 124 | +				   (snp - ssp->level[level]) /  | 
|---|
| 130 | 125 |  				   levelspread[level - 1]; | 
|---|
| 131 | 126 |  	} | 
|---|
| 132 | 127 |   | 
|---|
| .. | .. | 
|---|
| 137 | 132 |  	WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != | 
|---|
| 138 | 133 |  		     ARRAY_SIZE(sdp->srcu_unlock_count)); | 
|---|
| 139 | 134 |  	level = rcu_num_lvls - 1; | 
|---|
| 140 |  | -	snp_first = sp->level[level];  | 
|---|
 | 135 | +	snp_first = ssp->level[level];  | 
|---|
| 141 | 136 |  	for_each_possible_cpu(cpu) { | 
|---|
| 142 |  | -		sdp = per_cpu_ptr(sp->sda, cpu);  | 
|---|
 | 137 | +		sdp = per_cpu_ptr(ssp->sda, cpu);  | 
|---|
| 143 | 138 |  		spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); | 
|---|
| 144 | 139 |  		rcu_segcblist_init(&sdp->srcu_cblist); | 
|---|
| 145 | 140 |  		sdp->srcu_cblist_invoking = false; | 
|---|
| 146 |  | -		sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;  | 
|---|
| 147 |  | -		sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq;  | 
|---|
 | 141 | +		sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;  | 
|---|
 | 142 | +		sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;  | 
|---|
| 148 | 143 |  		sdp->mynode = &snp_first[cpu / levelspread[level]]; | 
|---|
| 149 | 144 |  		for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { | 
|---|
| 150 | 145 |  			if (snp->grplo < 0) | 
|---|
| .. | .. | 
|---|
| 152 | 147 |  			snp->grphi = cpu; | 
|---|
| 153 | 148 |  		} | 
|---|
| 154 | 149 |  		sdp->cpu = cpu; | 
|---|
| 155 |  | -		INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);  | 
|---|
| 156 |  | -		sdp->sp = sp;  | 
|---|
 | 150 | +		INIT_WORK(&sdp->work, srcu_invoke_callbacks);  | 
|---|
 | 151 | +		timer_setup(&sdp->delay_work, srcu_delay_timer, 0);  | 
|---|
 | 152 | +		sdp->ssp = ssp;  | 
|---|
| 157 | 153 |  		sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); | 
|---|
| 158 | 154 |  		if (is_static) | 
|---|
| 159 | 155 |  			continue; | 
|---|
| .. | .. | 
|---|
| 172 | 168 |   * parameter is passed through to init_srcu_struct_nodes(), and | 
|---|
| 173 | 169 |   * also tells us that ->sda has already been wired up to srcu_data. | 
|---|
| 174 | 170 |   */ | 
|---|
| 175 |  | -static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static)  | 
|---|
 | 171 | +static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)  | 
|---|
| 176 | 172 |  { | 
|---|
| 177 |  | -	mutex_init(&sp->srcu_cb_mutex);  | 
|---|
| 178 |  | -	mutex_init(&sp->srcu_gp_mutex);  | 
|---|
| 179 |  | -	sp->srcu_idx = 0;  | 
|---|
| 180 |  | -	sp->srcu_gp_seq = 0;  | 
|---|
| 181 |  | -	sp->srcu_barrier_seq = 0;  | 
|---|
| 182 |  | -	mutex_init(&sp->srcu_barrier_mutex);  | 
|---|
| 183 |  | -	atomic_set(&sp->srcu_barrier_cpu_cnt, 0);  | 
|---|
| 184 |  | -	INIT_DELAYED_WORK(&sp->work, process_srcu);  | 
|---|
 | 173 | +	mutex_init(&ssp->srcu_cb_mutex);  | 
|---|
 | 174 | +	mutex_init(&ssp->srcu_gp_mutex);  | 
|---|
 | 175 | +	ssp->srcu_idx = 0;  | 
|---|
 | 176 | +	ssp->srcu_gp_seq = 0;  | 
|---|
 | 177 | +	ssp->srcu_barrier_seq = 0;  | 
|---|
 | 178 | +	mutex_init(&ssp->srcu_barrier_mutex);  | 
|---|
 | 179 | +	atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);  | 
|---|
 | 180 | +	INIT_DELAYED_WORK(&ssp->work, process_srcu);  | 
|---|
| 185 | 181 |  	if (!is_static) | 
|---|
| 186 |  | -		sp->sda = alloc_percpu(struct srcu_data);  | 
|---|
| 187 |  | -	init_srcu_struct_nodes(sp, is_static);  | 
|---|
| 188 |  | -	sp->srcu_gp_seq_needed_exp = 0;  | 
|---|
| 189 |  | -	sp->srcu_last_gp_end = ktime_get_mono_fast_ns();  | 
|---|
| 190 |  | -	smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */  | 
|---|
| 191 |  | -	return sp->sda ? 0 : -ENOMEM;  | 
|---|
 | 182 | +		ssp->sda = alloc_percpu(struct srcu_data);  | 
|---|
 | 183 | +	init_srcu_struct_nodes(ssp, is_static);  | 
|---|
 | 184 | +	ssp->srcu_gp_seq_needed_exp = 0;  | 
|---|
 | 185 | +	ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();  | 
|---|
 | 186 | +	smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */  | 
|---|
 | 187 | +	return ssp->sda ? 0 : -ENOMEM;  | 
|---|
| 192 | 188 |  } | 
|---|
| 193 | 189 |   | 
|---|
| 194 | 190 |  #ifdef CONFIG_DEBUG_LOCK_ALLOC | 
|---|
| 195 | 191 |   | 
|---|
| 196 |  | -int __init_srcu_struct(struct srcu_struct *sp, const char *name,  | 
|---|
 | 192 | +int __init_srcu_struct(struct srcu_struct *ssp, const char *name,  | 
|---|
| 197 | 193 |  		       struct lock_class_key *key) | 
|---|
| 198 | 194 |  { | 
|---|
| 199 | 195 |  	/* Don't re-initialize a lock while it is held. */ | 
|---|
| 200 |  | -	debug_check_no_locks_freed((void *)sp, sizeof(*sp));  | 
|---|
| 201 |  | -	lockdep_init_map(&sp->dep_map, name, key, 0);  | 
|---|
| 202 |  | -	spin_lock_init(&ACCESS_PRIVATE(sp, lock));  | 
|---|
| 203 |  | -	return init_srcu_struct_fields(sp, false);  | 
|---|
 | 196 | +	debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));  | 
|---|
 | 197 | +	lockdep_init_map(&ssp->dep_map, name, key, 0);  | 
|---|
 | 198 | +	spin_lock_init(&ACCESS_PRIVATE(ssp, lock));  | 
|---|
 | 199 | +	return init_srcu_struct_fields(ssp, false);  | 
|---|
| 204 | 200 |  } | 
|---|
| 205 | 201 |  EXPORT_SYMBOL_GPL(__init_srcu_struct); | 
|---|
| 206 | 202 |   | 
|---|
| .. | .. | 
|---|
| 208 | 204 |   | 
|---|
| 209 | 205 |  /** | 
|---|
| 210 | 206 |   * init_srcu_struct - initialize a sleep-RCU structure | 
|---|
| 211 |  | - * @sp: structure to initialize.  | 
|---|
 | 207 | + * @ssp: structure to initialize.  | 
|---|
| 212 | 208 |   * | 
|---|
| 213 | 209 |   * Must invoke this on a given srcu_struct before passing that srcu_struct | 
|---|
| 214 | 210 |   * to any other function.  Each srcu_struct represents a separate domain | 
|---|
| 215 | 211 |   * of SRCU protection. | 
|---|
| 216 | 212 |   */ | 
|---|
| 217 |  | -int init_srcu_struct(struct srcu_struct *sp)  | 
|---|
 | 213 | +int init_srcu_struct(struct srcu_struct *ssp)  | 
|---|
| 218 | 214 |  { | 
|---|
| 219 |  | -	spin_lock_init(&ACCESS_PRIVATE(sp, lock));  | 
|---|
| 220 |  | -	return init_srcu_struct_fields(sp, false);  | 
|---|
 | 215 | +	spin_lock_init(&ACCESS_PRIVATE(ssp, lock));  | 
|---|
 | 216 | +	return init_srcu_struct_fields(ssp, false);  | 
|---|
| 221 | 217 |  } | 
|---|
| 222 | 218 |  EXPORT_SYMBOL_GPL(init_srcu_struct); | 
|---|
| 223 | 219 |   | 
|---|
| .. | .. | 
|---|
| 227 | 223 |   * First-use initialization of statically allocated srcu_struct | 
|---|
| 228 | 224 |   * structure.  Wiring up the combining tree is more than can be | 
|---|
| 229 | 225 |   * done with compile-time initialization, so this check is added | 
|---|
| 230 |  | - * to each update-side SRCU primitive.  Use sp->lock, which -is-  | 
|---|
 | 226 | + * to each update-side SRCU primitive.  Use ssp->lock, which -is-  | 
|---|
| 231 | 227 |   * compile-time initialized, to resolve races involving multiple | 
|---|
| 232 | 228 |   * CPUs trying to garner first-use privileges. | 
|---|
| 233 | 229 |   */ | 
|---|
| 234 |  | -static void check_init_srcu_struct(struct srcu_struct *sp)  | 
|---|
 | 230 | +static void check_init_srcu_struct(struct srcu_struct *ssp)  | 
|---|
| 235 | 231 |  { | 
|---|
| 236 | 232 |  	unsigned long flags; | 
|---|
| 237 | 233 |   | 
|---|
| 238 |  | -	WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT);  | 
|---|
| 239 | 234 |  	/* The smp_load_acquire() pairs with the smp_store_release(). */ | 
|---|
| 240 |  | -	if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/  | 
|---|
 | 235 | +	if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/  | 
|---|
| 241 | 236 |  		return; /* Already initialized. */ | 
|---|
| 242 |  | -	spin_lock_irqsave_rcu_node(sp, flags);  | 
|---|
| 243 |  | -	if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {  | 
|---|
| 244 |  | -		spin_unlock_irqrestore_rcu_node(sp, flags);  | 
|---|
 | 237 | +	spin_lock_irqsave_rcu_node(ssp, flags);  | 
|---|
 | 238 | +	if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {  | 
|---|
 | 239 | +		spin_unlock_irqrestore_rcu_node(ssp, flags);  | 
|---|
| 245 | 240 |  		return; | 
|---|
| 246 | 241 |  	} | 
|---|
| 247 |  | -	init_srcu_struct_fields(sp, true);  | 
|---|
| 248 |  | -	spin_unlock_irqrestore_rcu_node(sp, flags);  | 
|---|
 | 242 | +	init_srcu_struct_fields(ssp, true);  | 
|---|
 | 243 | +	spin_unlock_irqrestore_rcu_node(ssp, flags);  | 
|---|
| 249 | 244 |  } | 
|---|
| 250 | 245 |   | 
|---|
| 251 | 246 |  /* | 
|---|
| 252 | 247 |   * Returns approximate total of the readers' ->srcu_lock_count[] values | 
|---|
| 253 | 248 |   * for the rank of per-CPU counters specified by idx. | 
|---|
| 254 | 249 |   */ | 
|---|
| 255 |  | -static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)  | 
|---|
 | 250 | +static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)  | 
|---|
| 256 | 251 |  { | 
|---|
| 257 | 252 |  	int cpu; | 
|---|
| 258 | 253 |  	unsigned long sum = 0; | 
|---|
| 259 | 254 |   | 
|---|
| 260 | 255 |  	for_each_possible_cpu(cpu) { | 
|---|
| 261 |  | -		struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);  | 
|---|
 | 256 | +		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);  | 
|---|
| 262 | 257 |   | 
|---|
| 263 | 258 |  		sum += READ_ONCE(cpuc->srcu_lock_count[idx]); | 
|---|
| 264 | 259 |  	} | 
|---|
| .. | .. | 
|---|
| 269 | 264 |   * Returns approximate total of the readers' ->srcu_unlock_count[] values | 
|---|
| 270 | 265 |   * for the rank of per-CPU counters specified by idx. | 
|---|
| 271 | 266 |   */ | 
|---|
| 272 |  | -static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)  | 
|---|
 | 267 | +static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)  | 
|---|
| 273 | 268 |  { | 
|---|
| 274 | 269 |  	int cpu; | 
|---|
| 275 | 270 |  	unsigned long sum = 0; | 
|---|
| 276 | 271 |   | 
|---|
| 277 | 272 |  	for_each_possible_cpu(cpu) { | 
|---|
| 278 |  | -		struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);  | 
|---|
 | 273 | +		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);  | 
|---|
| 279 | 274 |   | 
|---|
| 280 | 275 |  		sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); | 
|---|
| 281 | 276 |  	} | 
|---|
| .. | .. | 
|---|
| 286 | 281 |   * Return true if the number of pre-existing readers is determined to | 
|---|
| 287 | 282 |   * be zero. | 
|---|
| 288 | 283 |   */ | 
|---|
| 289 |  | -static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)  | 
|---|
 | 284 | +static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)  | 
|---|
| 290 | 285 |  { | 
|---|
| 291 | 286 |  	unsigned long unlocks; | 
|---|
| 292 | 287 |   | 
|---|
| 293 |  | -	unlocks = srcu_readers_unlock_idx(sp, idx);  | 
|---|
 | 288 | +	unlocks = srcu_readers_unlock_idx(ssp, idx);  | 
|---|
| 294 | 289 |   | 
|---|
| 295 | 290 |  	/* | 
|---|
| 296 | 291 |  	 * Make sure that a lock is always counted if the corresponding | 
|---|
| .. | .. | 
|---|
| 326 | 321 |  	 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, | 
|---|
| 327 | 322 |  	 * especially on 64-bit systems. | 
|---|
| 328 | 323 |  	 */ | 
|---|
| 329 |  | -	return srcu_readers_lock_idx(sp, idx) == unlocks;  | 
|---|
 | 324 | +	return srcu_readers_lock_idx(ssp, idx) == unlocks;  | 
|---|
| 330 | 325 |  } | 
|---|
| 331 | 326 |   | 
|---|
| 332 | 327 |  /** | 
|---|
| 333 | 328 |   * srcu_readers_active - returns true if there are readers. and false | 
|---|
| 334 | 329 |   *                       otherwise | 
|---|
| 335 |  | - * @sp: which srcu_struct to count active readers (holding srcu_read_lock).  | 
|---|
 | 330 | + * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).  | 
|---|
| 336 | 331 |   * | 
|---|
| 337 | 332 |   * Note that this is not an atomic primitive, and can therefore suffer | 
|---|
| 338 | 333 |   * severe errors when invoked on an active srcu_struct.  That said, it | 
|---|
| 339 | 334 |   * can be useful as an error check at cleanup time. | 
|---|
| 340 | 335 |   */ | 
|---|
| 341 |  | -static bool srcu_readers_active(struct srcu_struct *sp)  | 
|---|
 | 336 | +static bool srcu_readers_active(struct srcu_struct *ssp)  | 
|---|
| 342 | 337 |  { | 
|---|
| 343 | 338 |  	int cpu; | 
|---|
| 344 | 339 |  	unsigned long sum = 0; | 
|---|
| 345 | 340 |   | 
|---|
| 346 | 341 |  	for_each_possible_cpu(cpu) { | 
|---|
| 347 |  | -		struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);  | 
|---|
 | 342 | +		struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);  | 
|---|
| 348 | 343 |   | 
|---|
| 349 | 344 |  		sum += READ_ONCE(cpuc->srcu_lock_count[0]); | 
|---|
| 350 | 345 |  		sum += READ_ONCE(cpuc->srcu_lock_count[1]); | 
|---|
| .. | .. | 
|---|
| 360 | 355 |   * Return grace-period delay, zero if there are expedited grace | 
|---|
| 361 | 356 |   * periods pending, SRCU_INTERVAL otherwise. | 
|---|
| 362 | 357 |   */ | 
|---|
| 363 |  | -static unsigned long srcu_get_delay(struct srcu_struct *sp)  | 
|---|
 | 358 | +static unsigned long srcu_get_delay(struct srcu_struct *ssp)  | 
|---|
| 364 | 359 |  { | 
|---|
| 365 |  | -	if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq),  | 
|---|
| 366 |  | -			 READ_ONCE(sp->srcu_gp_seq_needed_exp)))  | 
|---|
 | 360 | +	if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq),  | 
|---|
 | 361 | +			 READ_ONCE(ssp->srcu_gp_seq_needed_exp)))  | 
|---|
| 367 | 362 |  		return 0; | 
|---|
| 368 | 363 |  	return SRCU_INTERVAL; | 
|---|
| 369 | 364 |  } | 
|---|
| 370 | 365 |   | 
|---|
| 371 |  | -/* Helper for cleanup_srcu_struct() and cleanup_srcu_struct_quiesced(). */  | 
|---|
| 372 |  | -void _cleanup_srcu_struct(struct srcu_struct *sp, bool quiesced)  | 
|---|
 | 366 | +/**  | 
|---|
 | 367 | + * cleanup_srcu_struct - deconstruct a sleep-RCU structure  | 
|---|
 | 368 | + * @ssp: structure to clean up.  | 
|---|
 | 369 | + *  | 
|---|
 | 370 | + * Must invoke this after you are finished using a given srcu_struct that  | 
|---|
 | 371 | + * was initialized via init_srcu_struct(), else you leak memory.  | 
|---|
 | 372 | + */  | 
|---|
 | 373 | +void cleanup_srcu_struct(struct srcu_struct *ssp)  | 
|---|
| 373 | 374 |  { | 
|---|
| 374 | 375 |  	int cpu; | 
|---|
| 375 | 376 |   | 
|---|
| 376 |  | -	if (WARN_ON(!srcu_get_delay(sp)))  | 
|---|
 | 377 | +	if (WARN_ON(!srcu_get_delay(ssp)))  | 
|---|
| 377 | 378 |  		return; /* Just leak it! */ | 
|---|
| 378 |  | -	if (WARN_ON(srcu_readers_active(sp)))  | 
|---|
 | 379 | +	if (WARN_ON(srcu_readers_active(ssp)))  | 
|---|
| 379 | 380 |  		return; /* Just leak it! */ | 
|---|
| 380 |  | -	if (quiesced) {  | 
|---|
| 381 |  | -		if (WARN_ON(delayed_work_pending(&sp->work)))  | 
|---|
| 382 |  | -			return; /* Just leak it! */  | 
|---|
| 383 |  | -	} else {  | 
|---|
| 384 |  | -		flush_delayed_work(&sp->work);  | 
|---|
 | 381 | +	flush_delayed_work(&ssp->work);  | 
|---|
 | 382 | +	for_each_possible_cpu(cpu) {  | 
|---|
 | 383 | +		struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);  | 
|---|
 | 384 | +  | 
|---|
 | 385 | +		del_timer_sync(&sdp->delay_work);  | 
|---|
 | 386 | +		flush_work(&sdp->work);  | 
|---|
 | 387 | +		if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))  | 
|---|
 | 388 | +			return; /* Forgot srcu_barrier(), so just leak it! */  | 
|---|
| 385 | 389 |  	} | 
|---|
| 386 |  | -	for_each_possible_cpu(cpu)  | 
|---|
| 387 |  | -		if (quiesced) {  | 
|---|
| 388 |  | -			if (WARN_ON(delayed_work_pending(&per_cpu_ptr(sp->sda, cpu)->work)))  | 
|---|
| 389 |  | -				return; /* Just leak it! */  | 
|---|
| 390 |  | -		} else {  | 
|---|
| 391 |  | -			flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work);  | 
|---|
| 392 |  | -		}  | 
|---|
| 393 |  | -	if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||  | 
|---|
| 394 |  | -	    WARN_ON(srcu_readers_active(sp))) {  | 
|---|
 | 390 | +	if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||  | 
|---|
 | 391 | +	    WARN_ON(srcu_readers_active(ssp))) {  | 
|---|
| 395 | 392 |  		pr_info("%s: Active srcu_struct %p state: %d\n", | 
|---|
| 396 |  | -			__func__, sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));  | 
|---|
 | 393 | +			__func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)));  | 
|---|
| 397 | 394 |  		return; /* Caller forgot to stop doing call_srcu()? */ | 
|---|
| 398 | 395 |  	} | 
|---|
| 399 |  | -	free_percpu(sp->sda);  | 
|---|
| 400 |  | -	sp->sda = NULL;  | 
|---|
 | 396 | +	free_percpu(ssp->sda);  | 
|---|
 | 397 | +	ssp->sda = NULL;  | 
|---|
| 401 | 398 |  } | 
|---|
| 402 |  | -EXPORT_SYMBOL_GPL(_cleanup_srcu_struct);  | 
|---|
 | 399 | +EXPORT_SYMBOL_GPL(cleanup_srcu_struct);  | 
|---|
| 403 | 400 |   | 
|---|
| 404 | 401 |  /* | 
|---|
| 405 | 402 |   * Counts the new reader in the appropriate per-CPU element of the | 
|---|
| 406 | 403 |   * srcu_struct. | 
|---|
| 407 | 404 |   * Returns an index that must be passed to the matching srcu_read_unlock(). | 
|---|
| 408 | 405 |   */ | 
|---|
| 409 |  | -int __srcu_read_lock(struct srcu_struct *sp)  | 
|---|
 | 406 | +int __srcu_read_lock(struct srcu_struct *ssp)  | 
|---|
| 410 | 407 |  { | 
|---|
| 411 | 408 |  	int idx; | 
|---|
| 412 | 409 |   | 
|---|
| 413 |  | -	idx = READ_ONCE(sp->srcu_idx) & 0x1;  | 
|---|
| 414 |  | -	this_cpu_inc(sp->sda->srcu_lock_count[idx]);  | 
|---|
 | 410 | +	idx = READ_ONCE(ssp->srcu_idx) & 0x1;  | 
|---|
 | 411 | +	this_cpu_inc(ssp->sda->srcu_lock_count[idx]);  | 
|---|
| 415 | 412 |  	smp_mb(); /* B */  /* Avoid leaking the critical section. */ | 
|---|
| 416 | 413 |  	return idx; | 
|---|
| 417 | 414 |  } | 
|---|
| .. | .. | 
|---|
| 422 | 419 |   * element of the srcu_struct.  Note that this may well be a different | 
|---|
| 423 | 420 |   * CPU than that which was incremented by the corresponding srcu_read_lock(). | 
|---|
| 424 | 421 |   */ | 
|---|
| 425 |  | -void __srcu_read_unlock(struct srcu_struct *sp, int idx)  | 
|---|
 | 422 | +void __srcu_read_unlock(struct srcu_struct *ssp, int idx)  | 
|---|
| 426 | 423 |  { | 
|---|
| 427 | 424 |  	smp_mb(); /* C */  /* Avoid leaking the critical section. */ | 
|---|
| 428 |  | -	this_cpu_inc(sp->sda->srcu_unlock_count[idx]);  | 
|---|
 | 425 | +	this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);  | 
|---|
| 429 | 426 |  } | 
|---|
| 430 | 427 |  EXPORT_SYMBOL_GPL(__srcu_read_unlock); | 
|---|
| 431 | 428 |   | 
|---|
| .. | .. | 
|---|
| 441 | 438 |  /* | 
|---|
| 442 | 439 |   * Start an SRCU grace period. | 
|---|
| 443 | 440 |   */ | 
|---|
| 444 |  | -static void srcu_gp_start(struct srcu_struct *sp)  | 
|---|
 | 441 | +static void srcu_gp_start(struct srcu_struct *ssp)  | 
|---|
| 445 | 442 |  { | 
|---|
| 446 |  | -	struct srcu_data *sdp = this_cpu_ptr(sp->sda);  | 
|---|
 | 443 | +	struct srcu_data *sdp = this_cpu_ptr(ssp->sda);  | 
|---|
| 447 | 444 |  	int state; | 
|---|
| 448 | 445 |   | 
|---|
| 449 |  | -	lockdep_assert_held(&ACCESS_PRIVATE(sp, lock));  | 
|---|
| 450 |  | -	WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));  | 
|---|
 | 446 | +	lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));  | 
|---|
 | 447 | +	WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));  | 
|---|
| 451 | 448 |  	spin_lock_rcu_node(sdp);  /* Interrupts already disabled. */ | 
|---|
| 452 | 449 |  	rcu_segcblist_advance(&sdp->srcu_cblist, | 
|---|
| 453 |  | -			      rcu_seq_current(&sp->srcu_gp_seq));  | 
|---|
 | 450 | +			      rcu_seq_current(&ssp->srcu_gp_seq));  | 
|---|
| 454 | 451 |  	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, | 
|---|
| 455 |  | -				       rcu_seq_snap(&sp->srcu_gp_seq));  | 
|---|
 | 452 | +				       rcu_seq_snap(&ssp->srcu_gp_seq));  | 
|---|
| 456 | 453 |  	spin_unlock_rcu_node(sdp);  /* Interrupts remain disabled. */ | 
|---|
| 457 | 454 |  	smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ | 
|---|
| 458 |  | -	rcu_seq_start(&sp->srcu_gp_seq);  | 
|---|
| 459 |  | -	state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));  | 
|---|
 | 455 | +	rcu_seq_start(&ssp->srcu_gp_seq);  | 
|---|
 | 456 | +	state = rcu_seq_state(ssp->srcu_gp_seq);  | 
|---|
| 460 | 457 |  	WARN_ON_ONCE(state != SRCU_STATE_SCAN1); | 
|---|
| 461 | 458 |  } | 
|---|
| 462 | 459 |   | 
|---|
| 463 |  | -/*  | 
|---|
| 464 |  | - * Track online CPUs to guide callback workqueue placement.  | 
|---|
| 465 |  | - */  | 
|---|
| 466 |  | -DEFINE_PER_CPU(bool, srcu_online);  | 
|---|
| 467 | 460 |   | 
|---|
| 468 |  | -void srcu_online_cpu(unsigned int cpu)  | 
|---|
 | 461 | +static void srcu_delay_timer(struct timer_list *t)  | 
|---|
| 469 | 462 |  { | 
|---|
| 470 |  | -	WRITE_ONCE(per_cpu(srcu_online, cpu), true);  | 
|---|
 | 463 | +	struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);  | 
|---|
 | 464 | +  | 
|---|
 | 465 | +	queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);  | 
|---|
| 471 | 466 |  } | 
|---|
| 472 | 467 |   | 
|---|
| 473 |  | -void srcu_offline_cpu(unsigned int cpu)  | 
|---|
| 474 |  | -{  | 
|---|
| 475 |  | -	WRITE_ONCE(per_cpu(srcu_online, cpu), false);  | 
|---|
| 476 |  | -}  | 
|---|
| 477 |  | -  | 
|---|
| 478 |  | -/*  | 
|---|
| 479 |  | - * Place the workqueue handler on the specified CPU if online, otherwise  | 
|---|
| 480 |  | - * just run it whereever.  This is useful for placing workqueue handlers  | 
|---|
| 481 |  | - * that are to invoke the specified CPU's callbacks.  | 
|---|
| 482 |  | - */  | 
|---|
| 483 |  | -static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,  | 
|---|
| 484 |  | -				       struct delayed_work *dwork,  | 
|---|
 | 468 | +static void srcu_queue_delayed_work_on(struct srcu_data *sdp,  | 
|---|
| 485 | 469 |  				       unsigned long delay) | 
|---|
| 486 | 470 |  { | 
|---|
| 487 |  | -	bool ret;  | 
|---|
 | 471 | +	if (!delay) {  | 
|---|
 | 472 | +		queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);  | 
|---|
 | 473 | +		return;  | 
|---|
 | 474 | +	}  | 
|---|
| 488 | 475 |   | 
|---|
| 489 |  | -	preempt_disable();  | 
|---|
| 490 |  | -	if (READ_ONCE(per_cpu(srcu_online, cpu)))  | 
|---|
| 491 |  | -		ret = queue_delayed_work_on(cpu, wq, dwork, delay);  | 
|---|
| 492 |  | -	else  | 
|---|
| 493 |  | -		ret = queue_delayed_work(wq, dwork, delay);  | 
|---|
| 494 |  | -	preempt_enable();  | 
|---|
| 495 |  | -	return ret;  | 
|---|
 | 476 | +	timer_reduce(&sdp->delay_work, jiffies + delay);  | 
|---|
| 496 | 477 |  } | 
|---|
| 497 | 478 |   | 
|---|
| 498 | 479 |  /* | 
|---|
| .. | .. | 
|---|
| 501 | 482 |   */ | 
|---|
| 502 | 483 |  static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) | 
|---|
| 503 | 484 |  { | 
|---|
| 504 |  | -	srcu_queue_delayed_work_on(sdp->cpu, rcu_gp_wq, &sdp->work, delay);  | 
|---|
 | 485 | +	srcu_queue_delayed_work_on(sdp, delay);  | 
|---|
| 505 | 486 |  } | 
|---|
| 506 | 487 |   | 
|---|
| 507 | 488 |  /* | 
|---|
| .. | .. | 
|---|
| 510 | 491 |   * just-completed grace period, the one corresponding to idx.  If possible, | 
|---|
| 511 | 492 |   * schedule this invocation on the corresponding CPUs. | 
|---|
| 512 | 493 |   */ | 
|---|
| 513 |  | -static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,  | 
|---|
 | 494 | +static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,  | 
|---|
| 514 | 495 |  				  unsigned long mask, unsigned long delay) | 
|---|
| 515 | 496 |  { | 
|---|
| 516 | 497 |  	int cpu; | 
|---|
| .. | .. | 
|---|
| 518 | 499 |  	for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { | 
|---|
| 519 | 500 |  		if (!(mask & (1 << (cpu - snp->grplo)))) | 
|---|
| 520 | 501 |  			continue; | 
|---|
| 521 |  | -		srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay);  | 
|---|
 | 502 | +		srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);  | 
|---|
| 522 | 503 |  	} | 
|---|
| 523 | 504 |  } | 
|---|
| 524 | 505 |   | 
|---|
| .. | .. | 
|---|
| 531 | 512 |   * are initiating callback invocation.  This allows the ->srcu_have_cbs[] | 
|---|
| 532 | 513 |   * array to have a finite number of elements. | 
|---|
| 533 | 514 |   */ | 
|---|
| 534 |  | -static void srcu_gp_end(struct srcu_struct *sp)  | 
|---|
 | 515 | +static void srcu_gp_end(struct srcu_struct *ssp)  | 
|---|
| 535 | 516 |  { | 
|---|
| 536 | 517 |  	unsigned long cbdelay; | 
|---|
| 537 | 518 |  	bool cbs; | 
|---|
| .. | .. | 
|---|
| 545 | 526 |  	struct srcu_node *snp; | 
|---|
| 546 | 527 |   | 
|---|
| 547 | 528 |  	/* Prevent more than one additional grace period. */ | 
|---|
| 548 |  | -	mutex_lock(&sp->srcu_cb_mutex);  | 
|---|
 | 529 | +	mutex_lock(&ssp->srcu_cb_mutex);  | 
|---|
| 549 | 530 |   | 
|---|
| 550 | 531 |  	/* End the current grace period. */ | 
|---|
| 551 |  | -	spin_lock_irq_rcu_node(sp);  | 
|---|
| 552 |  | -	idx = rcu_seq_state(sp->srcu_gp_seq);  | 
|---|
 | 532 | +	spin_lock_irq_rcu_node(ssp);  | 
|---|
 | 533 | +	idx = rcu_seq_state(ssp->srcu_gp_seq);  | 
|---|
| 553 | 534 |  	WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); | 
|---|
| 554 |  | -	cbdelay = srcu_get_delay(sp);  | 
|---|
| 555 |  | -	sp->srcu_last_gp_end = ktime_get_mono_fast_ns();  | 
|---|
| 556 |  | -	rcu_seq_end(&sp->srcu_gp_seq);  | 
|---|
| 557 |  | -	gpseq = rcu_seq_current(&sp->srcu_gp_seq);  | 
|---|
| 558 |  | -	if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))  | 
|---|
| 559 |  | -		sp->srcu_gp_seq_needed_exp = gpseq;  | 
|---|
| 560 |  | -	spin_unlock_irq_rcu_node(sp);  | 
|---|
| 561 |  | -	mutex_unlock(&sp->srcu_gp_mutex);  | 
|---|
 | 535 | +	cbdelay = srcu_get_delay(ssp);  | 
|---|
 | 536 | +	WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());  | 
|---|
 | 537 | +	rcu_seq_end(&ssp->srcu_gp_seq);  | 
|---|
 | 538 | +	gpseq = rcu_seq_current(&ssp->srcu_gp_seq);  | 
|---|
 | 539 | +	if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))  | 
|---|
 | 540 | +		WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq);  | 
|---|
 | 541 | +	spin_unlock_irq_rcu_node(ssp);  | 
|---|
 | 542 | +	mutex_unlock(&ssp->srcu_gp_mutex);  | 
|---|
| 562 | 543 |  	/* A new grace period can start at this point.  But only one. */ | 
|---|
| 563 | 544 |   | 
|---|
| 564 | 545 |  	/* Initiate callback invocation as needed. */ | 
|---|
| 565 | 546 |  	idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); | 
|---|
| 566 |  | -	rcu_for_each_node_breadth_first(sp, snp) {  | 
|---|
 | 547 | +	srcu_for_each_node_breadth_first(ssp, snp) {  | 
|---|
| 567 | 548 |  		spin_lock_irq_rcu_node(snp); | 
|---|
| 568 | 549 |  		cbs = false; | 
|---|
| 569 |  | -		last_lvl = snp >= sp->level[rcu_num_lvls - 1];  | 
|---|
 | 550 | +		last_lvl = snp >= ssp->level[rcu_num_lvls - 1];  | 
|---|
| 570 | 551 |  		if (last_lvl) | 
|---|
| 571 | 552 |  			cbs = snp->srcu_have_cbs[idx] == gpseq; | 
|---|
| 572 | 553 |  		snp->srcu_have_cbs[idx] = gpseq; | 
|---|
| 573 | 554 |  		rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); | 
|---|
| 574 | 555 |  		if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) | 
|---|
| 575 |  | -			snp->srcu_gp_seq_needed_exp = gpseq;  | 
|---|
 | 556 | +			WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);  | 
|---|
| 576 | 557 |  		mask = snp->srcu_data_have_cbs[idx]; | 
|---|
| 577 | 558 |  		snp->srcu_data_have_cbs[idx] = 0; | 
|---|
| 578 | 559 |  		spin_unlock_irq_rcu_node(snp); | 
|---|
| 579 | 560 |  		if (cbs) | 
|---|
| 580 |  | -			srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);  | 
|---|
 | 561 | +			srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);  | 
|---|
| 581 | 562 |   | 
|---|
| 582 | 563 |  		/* Occasionally prevent srcu_data counter wrap. */ | 
|---|
| 583 | 564 |  		if (!(gpseq & counter_wrap_check) && last_lvl) | 
|---|
| 584 | 565 |  			for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { | 
|---|
| 585 |  | -				sdp = per_cpu_ptr(sp->sda, cpu);  | 
|---|
 | 566 | +				sdp = per_cpu_ptr(ssp->sda, cpu);  | 
|---|
| 586 | 567 |  				spin_lock_irqsave_rcu_node(sdp, flags); | 
|---|
| 587 | 568 |  				if (ULONG_CMP_GE(gpseq, | 
|---|
| 588 | 569 |  						 sdp->srcu_gp_seq_needed + 100)) | 
|---|
| .. | .. | 
|---|
| 595 | 576 |  	} | 
|---|
| 596 | 577 |   | 
|---|
| 597 | 578 |  	/* Callback initiation done, allow grace periods after next. */ | 
|---|
| 598 |  | -	mutex_unlock(&sp->srcu_cb_mutex);  | 
|---|
 | 579 | +	mutex_unlock(&ssp->srcu_cb_mutex);  | 
|---|
| 599 | 580 |   | 
|---|
| 600 | 581 |  	/* Start a new grace period if needed. */ | 
|---|
| 601 |  | -	spin_lock_irq_rcu_node(sp);  | 
|---|
| 602 |  | -	gpseq = rcu_seq_current(&sp->srcu_gp_seq);  | 
|---|
 | 582 | +	spin_lock_irq_rcu_node(ssp);  | 
|---|
 | 583 | +	gpseq = rcu_seq_current(&ssp->srcu_gp_seq);  | 
|---|
| 603 | 584 |  	if (!rcu_seq_state(gpseq) && | 
|---|
| 604 |  | -	    ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {  | 
|---|
| 605 |  | -		srcu_gp_start(sp);  | 
|---|
| 606 |  | -		spin_unlock_irq_rcu_node(sp);  | 
|---|
| 607 |  | -		srcu_reschedule(sp, 0);  | 
|---|
 | 585 | +	    ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {  | 
|---|
 | 586 | +		srcu_gp_start(ssp);  | 
|---|
 | 587 | +		spin_unlock_irq_rcu_node(ssp);  | 
|---|
 | 588 | +		srcu_reschedule(ssp, 0);  | 
|---|
| 608 | 589 |  	} else { | 
|---|
| 609 |  | -		spin_unlock_irq_rcu_node(sp);  | 
|---|
 | 590 | +		spin_unlock_irq_rcu_node(ssp);  | 
|---|
| 610 | 591 |  	} | 
|---|
| 611 | 592 |  } | 
|---|
| 612 | 593 |   | 
|---|
| .. | .. | 
|---|
| 617 | 598 |   * but without expediting.  To start a completely new grace period, | 
|---|
| 618 | 599 |   * whether expedited or not, use srcu_funnel_gp_start() instead. | 
|---|
| 619 | 600 |   */ | 
|---|
| 620 |  | -static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,  | 
|---|
 | 601 | +static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,  | 
|---|
| 621 | 602 |  				  unsigned long s) | 
|---|
| 622 | 603 |  { | 
|---|
| 623 | 604 |  	unsigned long flags; | 
|---|
| 624 | 605 |   | 
|---|
| 625 | 606 |  	for (; snp != NULL; snp = snp->srcu_parent) { | 
|---|
| 626 |  | -		if (rcu_seq_done(&sp->srcu_gp_seq, s) ||  | 
|---|
 | 607 | +		if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||  | 
|---|
| 627 | 608 |  		    ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) | 
|---|
| 628 | 609 |  			return; | 
|---|
| 629 | 610 |  		spin_lock_irqsave_rcu_node(snp, flags); | 
|---|
| .. | .. | 
|---|
| 634 | 615 |  		WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); | 
|---|
| 635 | 616 |  		spin_unlock_irqrestore_rcu_node(snp, flags); | 
|---|
| 636 | 617 |  	} | 
|---|
| 637 |  | -	spin_lock_irqsave_rcu_node(sp, flags);  | 
|---|
| 638 |  | -	if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))  | 
|---|
| 639 |  | -		sp->srcu_gp_seq_needed_exp = s;  | 
|---|
| 640 |  | -	spin_unlock_irqrestore_rcu_node(sp, flags);  | 
|---|
 | 618 | +	spin_lock_irqsave_rcu_node(ssp, flags);  | 
|---|
 | 619 | +	if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))  | 
|---|
 | 620 | +		WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);  | 
|---|
 | 621 | +	spin_unlock_irqrestore_rcu_node(ssp, flags);  | 
|---|
| 641 | 622 |  } | 
|---|
| 642 | 623 |   | 
|---|
| 643 | 624 |  /* | 
|---|
| .. | .. | 
|---|
| 650 | 631 |   * Note that this function also does the work of srcu_funnel_exp_start(), | 
|---|
| 651 | 632 |   * in some cases by directly invoking it. | 
|---|
| 652 | 633 |   */ | 
|---|
| 653 |  | -static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,  | 
|---|
 | 634 | +static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,  | 
|---|
| 654 | 635 |  				 unsigned long s, bool do_norm) | 
|---|
| 655 | 636 |  { | 
|---|
| 656 | 637 |  	unsigned long flags; | 
|---|
| .. | .. | 
|---|
| 660 | 641 |   | 
|---|
| 661 | 642 |  	/* Each pass through the loop does one level of the srcu_node tree. */ | 
|---|
| 662 | 643 |  	for (; snp != NULL; snp = snp->srcu_parent) { | 
|---|
| 663 |  | -		if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)  | 
|---|
 | 644 | +		if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode)  | 
|---|
| 664 | 645 |  			return; /* GP already done and CBs recorded. */ | 
|---|
| 665 | 646 |  		spin_lock_irqsave_rcu_node(snp, flags); | 
|---|
| 666 | 647 |  		if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { | 
|---|
| .. | .. | 
|---|
| 675 | 656 |  				return; | 
|---|
| 676 | 657 |  			} | 
|---|
| 677 | 658 |  			if (!do_norm) | 
|---|
| 678 |  | -				srcu_funnel_exp_start(sp, snp, s);  | 
|---|
 | 659 | +				srcu_funnel_exp_start(ssp, snp, s);  | 
|---|
| 679 | 660 |  			return; | 
|---|
| 680 | 661 |  		} | 
|---|
| 681 | 662 |  		snp->srcu_have_cbs[idx] = s; | 
|---|
| 682 | 663 |  		if (snp == sdp->mynode) | 
|---|
| 683 | 664 |  			snp->srcu_data_have_cbs[idx] |= sdp->grpmask; | 
|---|
| 684 | 665 |  		if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) | 
|---|
| 685 |  | -			snp->srcu_gp_seq_needed_exp = s;  | 
|---|
 | 666 | +			WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);  | 
|---|
| 686 | 667 |  		spin_unlock_irqrestore_rcu_node(snp, flags); | 
|---|
| 687 | 668 |  	} | 
|---|
| 688 | 669 |   | 
|---|
| 689 | 670 |  	/* Top of tree, must ensure the grace period will be started. */ | 
|---|
| 690 |  | -	spin_lock_irqsave_rcu_node(sp, flags);  | 
|---|
| 691 |  | -	if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {  | 
|---|
 | 671 | +	spin_lock_irqsave_rcu_node(ssp, flags);  | 
|---|
 | 672 | +	if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {  | 
|---|
| 692 | 673 |  		/* | 
|---|
| 693 | 674 |  		 * Record need for grace period s.  Pair with load | 
|---|
| 694 | 675 |  		 * acquire setting up for initialization. | 
|---|
| 695 | 676 |  		 */ | 
|---|
| 696 |  | -		smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/  | 
|---|
 | 677 | +		smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/  | 
|---|
| 697 | 678 |  	} | 
|---|
| 698 |  | -	if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))  | 
|---|
| 699 |  | -		sp->srcu_gp_seq_needed_exp = s;  | 
|---|
 | 679 | +	if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))  | 
|---|
 | 680 | +		WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);  | 
|---|
| 700 | 681 |   | 
|---|
| 701 | 682 |  	/* If grace period not already done and none in progress, start it. */ | 
|---|
| 702 |  | -	if (!rcu_seq_done(&sp->srcu_gp_seq, s) &&  | 
|---|
| 703 |  | -	    rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) {  | 
|---|
| 704 |  | -		WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));  | 
|---|
| 705 |  | -		srcu_gp_start(sp);  | 
|---|
| 706 |  | -		queue_delayed_work(rcu_gp_wq, &sp->work, srcu_get_delay(sp));  | 
|---|
 | 683 | +	if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&  | 
|---|
 | 684 | +	    rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {  | 
|---|
 | 685 | +		WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));  | 
|---|
 | 686 | +		srcu_gp_start(ssp);  | 
|---|
 | 687 | +		if (likely(srcu_init_done))  | 
|---|
 | 688 | +			queue_delayed_work(rcu_gp_wq, &ssp->work,  | 
|---|
 | 689 | +					   srcu_get_delay(ssp));  | 
|---|
 | 690 | +		else if (list_empty(&ssp->work.work.entry))  | 
|---|
 | 691 | +			list_add(&ssp->work.work.entry, &srcu_boot_list);  | 
|---|
| 707 | 692 |  	} | 
|---|
| 708 |  | -	spin_unlock_irqrestore_rcu_node(sp, flags);  | 
|---|
 | 693 | +	spin_unlock_irqrestore_rcu_node(ssp, flags);  | 
|---|
| 709 | 694 |  } | 
|---|
| 710 | 695 |   | 
|---|
| 711 | 696 |  /* | 
|---|
| .. | .. | 
|---|
| 713 | 698 |   * loop an additional time if there is an expedited grace period pending. | 
|---|
| 714 | 699 |   * The caller must ensure that ->srcu_idx is not changed while checking. | 
|---|
| 715 | 700 |   */ | 
|---|
| 716 |  | -static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)  | 
|---|
 | 701 | +static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)  | 
|---|
| 717 | 702 |  { | 
|---|
| 718 | 703 |  	for (;;) { | 
|---|
| 719 |  | -		if (srcu_readers_active_idx_check(sp, idx))  | 
|---|
 | 704 | +		if (srcu_readers_active_idx_check(ssp, idx))  | 
|---|
| 720 | 705 |  			return true; | 
|---|
| 721 |  | -		if (--trycount + !srcu_get_delay(sp) <= 0)  | 
|---|
 | 706 | +		if (--trycount + !srcu_get_delay(ssp) <= 0)  | 
|---|
| 722 | 707 |  			return false; | 
|---|
| 723 | 708 |  		udelay(SRCU_RETRY_CHECK_DELAY); | 
|---|
| 724 | 709 |  	} | 
|---|
| .. | .. | 
|---|
| 729 | 714 |   * use the other rank of the ->srcu_(un)lock_count[] arrays.  This allows | 
|---|
| 730 | 715 |   * us to wait for pre-existing readers in a starvation-free manner. | 
|---|
| 731 | 716 |   */ | 
|---|
| 732 |  | -static void srcu_flip(struct srcu_struct *sp)  | 
|---|
 | 717 | +static void srcu_flip(struct srcu_struct *ssp)  | 
|---|
| 733 | 718 |  { | 
|---|
| 734 | 719 |  	/* | 
|---|
| 735 | 720 |  	 * Ensure that if this updater saw a given reader's increment | 
|---|
| .. | .. | 
|---|
| 741 | 726 |  	 */ | 
|---|
| 742 | 727 |  	smp_mb(); /* E */  /* Pairs with B and C. */ | 
|---|
| 743 | 728 |   | 
|---|
| 744 |  | -	WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1);  | 
|---|
 | 729 | +	WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);  | 
|---|
| 745 | 730 |   | 
|---|
| 746 | 731 |  	/* | 
|---|
| 747 | 732 |  	 * Ensure that if the updater misses an __srcu_read_unlock() | 
|---|
| .. | .. | 
|---|
| 771 | 756 |   * it, if this function was preempted for enough time for the counters | 
|---|
| 772 | 757 |   * to wrap, it really doesn't matter whether or not we expedite the grace | 
|---|
| 773 | 758 |   * period.  The extra overhead of a needlessly expedited grace period is | 
|---|
| 774 |  | - * negligible when amoritized over that time period, and the extra latency  | 
|---|
 | 759 | + * negligible when amortized over that time period, and the extra latency  | 
|---|
| 775 | 760 |   * of a needlessly non-expedited grace period is similarly negligible. | 
|---|
| 776 | 761 |   */ | 
|---|
| 777 |  | -static bool srcu_might_be_idle(struct srcu_struct *sp)  | 
|---|
 | 762 | +static bool srcu_might_be_idle(struct srcu_struct *ssp)  | 
|---|
| 778 | 763 |  { | 
|---|
| 779 | 764 |  	unsigned long curseq; | 
|---|
| 780 | 765 |  	unsigned long flags; | 
|---|
| 781 | 766 |  	struct srcu_data *sdp; | 
|---|
| 782 | 767 |  	unsigned long t; | 
|---|
 | 768 | +	unsigned long tlast;  | 
|---|
| 783 | 769 |   | 
|---|
 | 770 | +	check_init_srcu_struct(ssp);  | 
|---|
| 784 | 771 |  	/* If the local srcu_data structure has callbacks, not idle.  */ | 
|---|
| 785 |  | -	local_irq_save(flags);  | 
|---|
| 786 |  | -	sdp = this_cpu_ptr(sp->sda);  | 
|---|
 | 772 | +	sdp = raw_cpu_ptr(ssp->sda);  | 
|---|
 | 773 | +	spin_lock_irqsave_rcu_node(sdp, flags);  | 
|---|
| 787 | 774 |  	if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { | 
|---|
| 788 |  | -		local_irq_restore(flags);  | 
|---|
 | 775 | +		spin_unlock_irqrestore_rcu_node(sdp, flags);  | 
|---|
| 789 | 776 |  		return false; /* Callbacks already present, so not idle. */ | 
|---|
| 790 | 777 |  	} | 
|---|
| 791 |  | -	local_irq_restore(flags);  | 
|---|
 | 778 | +	spin_unlock_irqrestore_rcu_node(sdp, flags);  | 
|---|
| 792 | 779 |   | 
|---|
| 793 | 780 |  	/* | 
|---|
| 794 | 781 |  	 * No local callbacks, so probabalistically probe global state. | 
|---|
| .. | .. | 
|---|
| 798 | 785 |   | 
|---|
| 799 | 786 |  	/* First, see if enough time has passed since the last GP. */ | 
|---|
| 800 | 787 |  	t = ktime_get_mono_fast_ns(); | 
|---|
 | 788 | +	tlast = READ_ONCE(ssp->srcu_last_gp_end);  | 
|---|
| 801 | 789 |  	if (exp_holdoff == 0 || | 
|---|
| 802 |  | -	    time_in_range_open(t, sp->srcu_last_gp_end,  | 
|---|
| 803 |  | -			       sp->srcu_last_gp_end + exp_holdoff))  | 
|---|
 | 790 | +	    time_in_range_open(t, tlast, tlast + exp_holdoff))  | 
|---|
| 804 | 791 |  		return false; /* Too soon after last GP. */ | 
|---|
| 805 | 792 |   | 
|---|
| 806 | 793 |  	/* Next, check for probable idleness. */ | 
|---|
| 807 |  | -	curseq = rcu_seq_current(&sp->srcu_gp_seq);  | 
|---|
 | 794 | +	curseq = rcu_seq_current(&ssp->srcu_gp_seq);  | 
|---|
| 808 | 795 |  	smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ | 
|---|
| 809 |  | -	if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed)))  | 
|---|
 | 796 | +	if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))  | 
|---|
| 810 | 797 |  		return false; /* Grace period in progress, so not idle. */ | 
|---|
| 811 | 798 |  	smp_mb(); /* Order ->srcu_gp_seq with prior access. */ | 
|---|
| 812 |  | -	if (curseq != rcu_seq_current(&sp->srcu_gp_seq))  | 
|---|
 | 799 | +	if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))  | 
|---|
| 813 | 800 |  		return false; /* GP # changed, so not idle. */ | 
|---|
| 814 | 801 |  	return true; /* With reasonable probability, idle! */ | 
|---|
| 815 | 802 |  } | 
|---|
| .. | .. | 
|---|
| 819 | 806 |   */ | 
|---|
| 820 | 807 |  static void srcu_leak_callback(struct rcu_head *rhp) | 
|---|
| 821 | 808 |  { | 
|---|
 | 809 | +}  | 
|---|
 | 810 | +  | 
|---|
 | 811 | +/*  | 
|---|
 | 812 | + * Start an SRCU grace period, and also queue the callback if non-NULL.  | 
|---|
 | 813 | + */  | 
|---|
 | 814 | +static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,  | 
|---|
 | 815 | +					     struct rcu_head *rhp, bool do_norm)  | 
|---|
 | 816 | +{  | 
|---|
 | 817 | +	unsigned long flags;  | 
|---|
 | 818 | +	int idx;  | 
|---|
 | 819 | +	bool needexp = false;  | 
|---|
 | 820 | +	bool needgp = false;  | 
|---|
 | 821 | +	unsigned long s;  | 
|---|
 | 822 | +	struct srcu_data *sdp;  | 
|---|
 | 823 | +  | 
|---|
 | 824 | +	check_init_srcu_struct(ssp);  | 
|---|
 | 825 | +	idx = srcu_read_lock(ssp);  | 
|---|
 | 826 | +	sdp = raw_cpu_ptr(ssp->sda);  | 
|---|
 | 827 | +	spin_lock_irqsave_rcu_node(sdp, flags);  | 
|---|
 | 828 | +	if (rhp)  | 
|---|
 | 829 | +		rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);  | 
|---|
 | 830 | +	rcu_segcblist_advance(&sdp->srcu_cblist,  | 
|---|
 | 831 | +			      rcu_seq_current(&ssp->srcu_gp_seq));  | 
|---|
 | 832 | +	s = rcu_seq_snap(&ssp->srcu_gp_seq);  | 
|---|
 | 833 | +	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);  | 
|---|
 | 834 | +	if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {  | 
|---|
 | 835 | +		sdp->srcu_gp_seq_needed = s;  | 
|---|
 | 836 | +		needgp = true;  | 
|---|
 | 837 | +	}  | 
|---|
 | 838 | +	if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {  | 
|---|
 | 839 | +		sdp->srcu_gp_seq_needed_exp = s;  | 
|---|
 | 840 | +		needexp = true;  | 
|---|
 | 841 | +	}  | 
|---|
 | 842 | +	spin_unlock_irqrestore_rcu_node(sdp, flags);  | 
|---|
 | 843 | +	if (needgp)  | 
|---|
 | 844 | +		srcu_funnel_gp_start(ssp, sdp, s, do_norm);  | 
|---|
 | 845 | +	else if (needexp)  | 
|---|
 | 846 | +		srcu_funnel_exp_start(ssp, sdp->mynode, s);  | 
|---|
 | 847 | +	srcu_read_unlock(ssp, idx);  | 
|---|
 | 848 | +	return s;  | 
|---|
| 822 | 849 |  } | 
|---|
| 823 | 850 |   | 
|---|
| 824 | 851 |  /* | 
|---|
| .. | .. | 
|---|
| 849 | 876 |   * srcu_read_lock(), and srcu_read_unlock() that are all passed the same | 
|---|
| 850 | 877 |   * srcu_struct structure. | 
|---|
| 851 | 878 |   */ | 
|---|
| 852 |  | -void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,  | 
|---|
| 853 |  | -		 rcu_callback_t func, bool do_norm)  | 
|---|
 | 879 | +static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,  | 
|---|
 | 880 | +			rcu_callback_t func, bool do_norm)  | 
|---|
| 854 | 881 |  { | 
|---|
| 855 |  | -	unsigned long flags;  | 
|---|
| 856 |  | -	bool needexp = false;  | 
|---|
| 857 |  | -	bool needgp = false;  | 
|---|
| 858 |  | -	unsigned long s;  | 
|---|
| 859 |  | -	struct srcu_data *sdp;  | 
|---|
| 860 |  | -  | 
|---|
| 861 |  | -	check_init_srcu_struct(sp);  | 
|---|
| 862 | 882 |  	if (debug_rcu_head_queue(rhp)) { | 
|---|
| 863 | 883 |  		/* Probable double call_srcu(), so leak the callback. */ | 
|---|
| 864 | 884 |  		WRITE_ONCE(rhp->func, srcu_leak_callback); | 
|---|
| .. | .. | 
|---|
| 866 | 886 |  		return; | 
|---|
| 867 | 887 |  	} | 
|---|
| 868 | 888 |  	rhp->func = func; | 
|---|
| 869 |  | -	local_irq_save(flags);  | 
|---|
| 870 |  | -	sdp = this_cpu_ptr(sp->sda);  | 
|---|
| 871 |  | -	spin_lock_rcu_node(sdp);  | 
|---|
| 872 |  | -	rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);  | 
|---|
| 873 |  | -	rcu_segcblist_advance(&sdp->srcu_cblist,  | 
|---|
| 874 |  | -			      rcu_seq_current(&sp->srcu_gp_seq));  | 
|---|
| 875 |  | -	s = rcu_seq_snap(&sp->srcu_gp_seq);  | 
|---|
| 876 |  | -	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);  | 
|---|
| 877 |  | -	if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {  | 
|---|
| 878 |  | -		sdp->srcu_gp_seq_needed = s;  | 
|---|
| 879 |  | -		needgp = true;  | 
|---|
| 880 |  | -	}  | 
|---|
| 881 |  | -	if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {  | 
|---|
| 882 |  | -		sdp->srcu_gp_seq_needed_exp = s;  | 
|---|
| 883 |  | -		needexp = true;  | 
|---|
| 884 |  | -	}  | 
|---|
| 885 |  | -	spin_unlock_irqrestore_rcu_node(sdp, flags);  | 
|---|
| 886 |  | -	if (needgp)  | 
|---|
| 887 |  | -		srcu_funnel_gp_start(sp, sdp, s, do_norm);  | 
|---|
| 888 |  | -	else if (needexp)  | 
|---|
| 889 |  | -		srcu_funnel_exp_start(sp, sdp->mynode, s);  | 
|---|
 | 889 | +	(void)srcu_gp_start_if_needed(ssp, rhp, do_norm);  | 
|---|
| 890 | 890 |  } | 
|---|
| 891 | 891 |   | 
|---|
| 892 | 892 |  /** | 
|---|
| 893 | 893 |   * call_srcu() - Queue a callback for invocation after an SRCU grace period | 
|---|
| 894 |  | - * @sp: srcu_struct in queue the callback  | 
|---|
 | 894 | + * @ssp: srcu_struct in queue the callback  | 
|---|
| 895 | 895 |   * @rhp: structure to be used for queueing the SRCU callback. | 
|---|
| 896 | 896 |   * @func: function to be invoked after the SRCU grace period | 
|---|
| 897 | 897 |   * | 
|---|
| .. | .. | 
|---|
| 906 | 906 |   * The callback will be invoked from process context, but must nevertheless | 
|---|
| 907 | 907 |   * be fast and must not block. | 
|---|
| 908 | 908 |   */ | 
|---|
| 909 |  | -void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,  | 
|---|
 | 909 | +void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,  | 
|---|
| 910 | 910 |  	       rcu_callback_t func) | 
|---|
| 911 | 911 |  { | 
|---|
| 912 |  | -	__call_srcu(sp, rhp, func, true);  | 
|---|
 | 912 | +	__call_srcu(ssp, rhp, func, true);  | 
|---|
| 913 | 913 |  } | 
|---|
| 914 | 914 |  EXPORT_SYMBOL_GPL(call_srcu); | 
|---|
| 915 | 915 |   | 
|---|
| 916 | 916 |  /* | 
|---|
| 917 | 917 |   * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). | 
|---|
| 918 | 918 |   */ | 
|---|
| 919 |  | -static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)  | 
|---|
 | 919 | +static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)  | 
|---|
| 920 | 920 |  { | 
|---|
| 921 | 921 |  	struct rcu_synchronize rcu; | 
|---|
| 922 | 922 |   | 
|---|
| 923 |  | -	RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||  | 
|---|
 | 923 | +	RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) ||  | 
|---|
| 924 | 924 |  			 lock_is_held(&rcu_bh_lock_map) || | 
|---|
| 925 | 925 |  			 lock_is_held(&rcu_lock_map) || | 
|---|
| 926 | 926 |  			 lock_is_held(&rcu_sched_lock_map), | 
|---|
| .. | .. | 
|---|
| 929 | 929 |  	if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) | 
|---|
| 930 | 930 |  		return; | 
|---|
| 931 | 931 |  	might_sleep(); | 
|---|
| 932 |  | -	check_init_srcu_struct(sp);  | 
|---|
 | 932 | +	check_init_srcu_struct(ssp);  | 
|---|
| 933 | 933 |  	init_completion(&rcu.completion); | 
|---|
| 934 | 934 |  	init_rcu_head_on_stack(&rcu.head); | 
|---|
| 935 |  | -	__call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm);  | 
|---|
 | 935 | +	__call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);  | 
|---|
| 936 | 936 |  	wait_for_completion(&rcu.completion); | 
|---|
| 937 | 937 |  	destroy_rcu_head_on_stack(&rcu.head); | 
|---|
| 938 | 938 |   | 
|---|
| .. | .. | 
|---|
| 948 | 948 |   | 
|---|
| 949 | 949 |  /** | 
|---|
| 950 | 950 |   * synchronize_srcu_expedited - Brute-force SRCU grace period | 
|---|
| 951 |  | - * @sp: srcu_struct with which to synchronize.  | 
|---|
 | 951 | + * @ssp: srcu_struct with which to synchronize.  | 
|---|
| 952 | 952 |   * | 
|---|
| 953 | 953 |   * Wait for an SRCU grace period to elapse, but be more aggressive about | 
|---|
| 954 | 954 |   * spinning rather than blocking when waiting. | 
|---|
| .. | .. | 
|---|
| 956 | 956 |   * Note that synchronize_srcu_expedited() has the same deadlock and | 
|---|
| 957 | 957 |   * memory-ordering properties as does synchronize_srcu(). | 
|---|
| 958 | 958 |   */ | 
|---|
| 959 |  | -void synchronize_srcu_expedited(struct srcu_struct *sp)  | 
|---|
 | 959 | +void synchronize_srcu_expedited(struct srcu_struct *ssp)  | 
|---|
| 960 | 960 |  { | 
|---|
| 961 |  | -	__synchronize_srcu(sp, rcu_gp_is_normal());  | 
|---|
 | 961 | +	__synchronize_srcu(ssp, rcu_gp_is_normal());  | 
|---|
| 962 | 962 |  } | 
|---|
| 963 | 963 |  EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); | 
|---|
| 964 | 964 |   | 
|---|
| 965 | 965 |  /** | 
|---|
| 966 | 966 |   * synchronize_srcu - wait for prior SRCU read-side critical-section completion | 
|---|
| 967 |  | - * @sp: srcu_struct with which to synchronize.  | 
|---|
 | 967 | + * @ssp: srcu_struct with which to synchronize.  | 
|---|
| 968 | 968 |   * | 
|---|
| 969 | 969 |   * Wait for the count to drain to zero of both indexes. To avoid the | 
|---|
| 970 | 970 |   * possible starvation of synchronize_srcu(), it waits for the count of | 
|---|
| .. | .. | 
|---|
| 982 | 982 |   * There are memory-ordering constraints implied by synchronize_srcu(). | 
|---|
| 983 | 983 |   * On systems with more than one CPU, when synchronize_srcu() returns, | 
|---|
| 984 | 984 |   * each CPU is guaranteed to have executed a full memory barrier since | 
|---|
| 985 |  | - * the end of its last corresponding SRCU-sched read-side critical section  | 
|---|
 | 985 | + * the end of its last corresponding SRCU read-side critical section  | 
|---|
| 986 | 986 |   * whose beginning preceded the call to synchronize_srcu().  In addition, | 
|---|
| 987 | 987 |   * each CPU having an SRCU read-side critical section that extends beyond | 
|---|
| 988 | 988 |   * the return from synchronize_srcu() is guaranteed to have executed a | 
|---|
| .. | .. | 
|---|
| 1006 | 1006 |   * SRCU must also provide it.  Note that detecting idleness is heuristic | 
|---|
| 1007 | 1007 |   * and subject to both false positives and negatives. | 
|---|
| 1008 | 1008 |   */ | 
|---|
| 1009 |  | -void synchronize_srcu(struct srcu_struct *sp)  | 
|---|
 | 1009 | +void synchronize_srcu(struct srcu_struct *ssp)  | 
|---|
| 1010 | 1010 |  { | 
|---|
| 1011 |  | -	if (srcu_might_be_idle(sp) || rcu_gp_is_expedited())  | 
|---|
| 1012 |  | -		synchronize_srcu_expedited(sp);  | 
|---|
 | 1011 | +	if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())  | 
|---|
 | 1012 | +		synchronize_srcu_expedited(ssp);  | 
|---|
| 1013 | 1013 |  	else | 
|---|
| 1014 |  | -		__synchronize_srcu(sp, true);  | 
|---|
 | 1014 | +		__synchronize_srcu(ssp, true);  | 
|---|
| 1015 | 1015 |  } | 
|---|
| 1016 | 1016 |  EXPORT_SYMBOL_GPL(synchronize_srcu); | 
|---|
 | 1017 | +  | 
|---|
 | 1018 | +/**  | 
|---|
 | 1019 | + * get_state_synchronize_srcu - Provide an end-of-grace-period cookie  | 
|---|
 | 1020 | + * @ssp: srcu_struct to provide cookie for.  | 
|---|
 | 1021 | + *  | 
|---|
 | 1022 | + * This function returns a cookie that can be passed to  | 
|---|
 | 1023 | + * poll_state_synchronize_srcu(), which will return true if a full grace  | 
|---|
 | 1024 | + * period has elapsed in the meantime.  It is the caller's responsibility  | 
|---|
 | 1025 | + * to make sure that grace period happens, for example, by invoking  | 
|---|
 | 1026 | + * call_srcu() after return from get_state_synchronize_srcu().  | 
|---|
 | 1027 | + */  | 
|---|
 | 1028 | +unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)  | 
|---|
 | 1029 | +{  | 
|---|
 | 1030 | +	// Any prior manipulation of SRCU-protected data must happen  | 
|---|
 | 1031 | +	// before the load from ->srcu_gp_seq.  | 
|---|
 | 1032 | +	smp_mb();  | 
|---|
 | 1033 | +	return rcu_seq_snap(&ssp->srcu_gp_seq);  | 
|---|
 | 1034 | +}  | 
|---|
 | 1035 | +EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);  | 
|---|
 | 1036 | +  | 
|---|
 | 1037 | +/**  | 
|---|
 | 1038 | + * start_poll_synchronize_srcu - Provide cookie and start grace period  | 
|---|
 | 1039 | + * @ssp: srcu_struct to provide cookie for.  | 
|---|
 | 1040 | + *  | 
|---|
 | 1041 | + * This function returns a cookie that can be passed to  | 
|---|
 | 1042 | + * poll_state_synchronize_srcu(), which will return true if a full grace  | 
|---|
 | 1043 | + * period has elapsed in the meantime.  Unlike get_state_synchronize_srcu(),  | 
|---|
 | 1044 | + * this function also ensures that any needed SRCU grace period will be  | 
|---|
 | 1045 | + * started.  This convenience does come at a cost in terms of CPU overhead.  | 
|---|
 | 1046 | + */  | 
|---|
 | 1047 | +unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)  | 
|---|
 | 1048 | +{  | 
|---|
 | 1049 | +	return srcu_gp_start_if_needed(ssp, NULL, true);  | 
|---|
 | 1050 | +}  | 
|---|
 | 1051 | +EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);  | 
|---|
 | 1052 | +  | 
|---|
 | 1053 | +/**  | 
|---|
 | 1054 | + * poll_state_synchronize_srcu - Has cookie's grace period ended?  | 
|---|
 | 1055 | + * @ssp: srcu_struct to provide cookie for.  | 
|---|
 | 1056 | + * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().  | 
|---|
 | 1057 | + *  | 
|---|
 | 1058 | + * This function takes the cookie that was returned from either  | 
|---|
 | 1059 | + * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and  | 
|---|
 | 1060 | + * returns @true if an SRCU grace period elapsed since the time that the  | 
|---|
 | 1061 | + * cookie was created.  | 
|---|
 | 1062 | + */  | 
|---|
 | 1063 | +bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)  | 
|---|
 | 1064 | +{  | 
|---|
 | 1065 | +	if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie))  | 
|---|
 | 1066 | +		return false;  | 
|---|
 | 1067 | +	// Ensure that the end of the SRCU grace period happens before  | 
|---|
 | 1068 | +	// any subsequent code that the caller might execute.  | 
|---|
 | 1069 | +	smp_mb(); // ^^^  | 
|---|
 | 1070 | +	return true;  | 
|---|
 | 1071 | +}  | 
|---|
 | 1072 | +EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);  | 
|---|
| 1017 | 1073 |   | 
|---|
| 1018 | 1074 |  /* | 
|---|
| 1019 | 1075 |   * Callback function for srcu_barrier() use. | 
|---|
| .. | .. | 
|---|
| 1021 | 1077 |  static void srcu_barrier_cb(struct rcu_head *rhp) | 
|---|
| 1022 | 1078 |  { | 
|---|
| 1023 | 1079 |  	struct srcu_data *sdp; | 
|---|
| 1024 |  | -	struct srcu_struct *sp;  | 
|---|
 | 1080 | +	struct srcu_struct *ssp;  | 
|---|
| 1025 | 1081 |   | 
|---|
| 1026 | 1082 |  	sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); | 
|---|
| 1027 |  | -	sp = sdp->sp;  | 
|---|
| 1028 |  | -	if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))  | 
|---|
| 1029 |  | -		complete(&sp->srcu_barrier_completion);  | 
|---|
 | 1083 | +	ssp = sdp->ssp;  | 
|---|
 | 1084 | +	if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))  | 
|---|
 | 1085 | +		complete(&ssp->srcu_barrier_completion);  | 
|---|
| 1030 | 1086 |  } | 
|---|
| 1031 | 1087 |   | 
|---|
| 1032 | 1088 |  /** | 
|---|
| 1033 | 1089 |   * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. | 
|---|
| 1034 |  | - * @sp: srcu_struct on which to wait for in-flight callbacks.  | 
|---|
 | 1090 | + * @ssp: srcu_struct on which to wait for in-flight callbacks.  | 
|---|
| 1035 | 1091 |   */ | 
|---|
| 1036 |  | -void srcu_barrier(struct srcu_struct *sp)  | 
|---|
 | 1092 | +void srcu_barrier(struct srcu_struct *ssp)  | 
|---|
| 1037 | 1093 |  { | 
|---|
| 1038 | 1094 |  	int cpu; | 
|---|
| 1039 | 1095 |  	struct srcu_data *sdp; | 
|---|
| 1040 |  | -	unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq);  | 
|---|
 | 1096 | +	unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);  | 
|---|
| 1041 | 1097 |   | 
|---|
| 1042 |  | -	check_init_srcu_struct(sp);  | 
|---|
| 1043 |  | -	mutex_lock(&sp->srcu_barrier_mutex);  | 
|---|
| 1044 |  | -	if (rcu_seq_done(&sp->srcu_barrier_seq, s)) {  | 
|---|
 | 1098 | +	check_init_srcu_struct(ssp);  | 
|---|
 | 1099 | +	mutex_lock(&ssp->srcu_barrier_mutex);  | 
|---|
 | 1100 | +	if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {  | 
|---|
| 1045 | 1101 |  		smp_mb(); /* Force ordering following return. */ | 
|---|
| 1046 |  | -		mutex_unlock(&sp->srcu_barrier_mutex);  | 
|---|
 | 1102 | +		mutex_unlock(&ssp->srcu_barrier_mutex);  | 
|---|
| 1047 | 1103 |  		return; /* Someone else did our work for us. */ | 
|---|
| 1048 | 1104 |  	} | 
|---|
| 1049 |  | -	rcu_seq_start(&sp->srcu_barrier_seq);  | 
|---|
| 1050 |  | -	init_completion(&sp->srcu_barrier_completion);  | 
|---|
 | 1105 | +	rcu_seq_start(&ssp->srcu_barrier_seq);  | 
|---|
 | 1106 | +	init_completion(&ssp->srcu_barrier_completion);  | 
|---|
| 1051 | 1107 |   | 
|---|
| 1052 | 1108 |  	/* Initial count prevents reaching zero until all CBs are posted. */ | 
|---|
| 1053 |  | -	atomic_set(&sp->srcu_barrier_cpu_cnt, 1);  | 
|---|
 | 1109 | +	atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);  | 
|---|
| 1054 | 1110 |   | 
|---|
| 1055 | 1111 |  	/* | 
|---|
| 1056 | 1112 |  	 * Each pass through this loop enqueues a callback, but only | 
|---|
| .. | .. | 
|---|
| 1061 | 1117 |  	 * grace period as the last callback already in the queue. | 
|---|
| 1062 | 1118 |  	 */ | 
|---|
| 1063 | 1119 |  	for_each_possible_cpu(cpu) { | 
|---|
| 1064 |  | -		sdp = per_cpu_ptr(sp->sda, cpu);  | 
|---|
 | 1120 | +		sdp = per_cpu_ptr(ssp->sda, cpu);  | 
|---|
| 1065 | 1121 |  		spin_lock_irq_rcu_node(sdp); | 
|---|
| 1066 |  | -		atomic_inc(&sp->srcu_barrier_cpu_cnt);  | 
|---|
 | 1122 | +		atomic_inc(&ssp->srcu_barrier_cpu_cnt);  | 
|---|
| 1067 | 1123 |  		sdp->srcu_barrier_head.func = srcu_barrier_cb; | 
|---|
| 1068 | 1124 |  		debug_rcu_head_queue(&sdp->srcu_barrier_head); | 
|---|
| 1069 | 1125 |  		if (!rcu_segcblist_entrain(&sdp->srcu_cblist, | 
|---|
| 1070 |  | -					   &sdp->srcu_barrier_head, 0)) {  | 
|---|
 | 1126 | +					   &sdp->srcu_barrier_head)) {  | 
|---|
| 1071 | 1127 |  			debug_rcu_head_unqueue(&sdp->srcu_barrier_head); | 
|---|
| 1072 |  | -			atomic_dec(&sp->srcu_barrier_cpu_cnt);  | 
|---|
 | 1128 | +			atomic_dec(&ssp->srcu_barrier_cpu_cnt);  | 
|---|
| 1073 | 1129 |  		} | 
|---|
| 1074 | 1130 |  		spin_unlock_irq_rcu_node(sdp); | 
|---|
| 1075 | 1131 |  	} | 
|---|
| 1076 | 1132 |   | 
|---|
| 1077 | 1133 |  	/* Remove the initial count, at which point reaching zero can happen. */ | 
|---|
| 1078 |  | -	if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))  | 
|---|
| 1079 |  | -		complete(&sp->srcu_barrier_completion);  | 
|---|
| 1080 |  | -	wait_for_completion(&sp->srcu_barrier_completion);  | 
|---|
 | 1134 | +	if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))  | 
|---|
 | 1135 | +		complete(&ssp->srcu_barrier_completion);  | 
|---|
 | 1136 | +	wait_for_completion(&ssp->srcu_barrier_completion);  | 
|---|
| 1081 | 1137 |   | 
|---|
| 1082 |  | -	rcu_seq_end(&sp->srcu_barrier_seq);  | 
|---|
| 1083 |  | -	mutex_unlock(&sp->srcu_barrier_mutex);  | 
|---|
 | 1138 | +	rcu_seq_end(&ssp->srcu_barrier_seq);  | 
|---|
 | 1139 | +	mutex_unlock(&ssp->srcu_barrier_mutex);  | 
|---|
| 1084 | 1140 |  } | 
|---|
| 1085 | 1141 |  EXPORT_SYMBOL_GPL(srcu_barrier); | 
|---|
| 1086 | 1142 |   | 
|---|
| 1087 | 1143 |  /** | 
|---|
| 1088 | 1144 |   * srcu_batches_completed - return batches completed. | 
|---|
| 1089 |  | - * @sp: srcu_struct on which to report batch completion.  | 
|---|
 | 1145 | + * @ssp: srcu_struct on which to report batch completion.  | 
|---|
| 1090 | 1146 |   * | 
|---|
| 1091 | 1147 |   * Report the number of batches, correlated with, but not necessarily | 
|---|
| 1092 | 1148 |   * precisely the same as, the number of grace periods that have elapsed. | 
|---|
| 1093 | 1149 |   */ | 
|---|
| 1094 |  | -unsigned long srcu_batches_completed(struct srcu_struct *sp)  | 
|---|
 | 1150 | +unsigned long srcu_batches_completed(struct srcu_struct *ssp)  | 
|---|
| 1095 | 1151 |  { | 
|---|
| 1096 |  | -	return sp->srcu_idx;  | 
|---|
 | 1152 | +	return READ_ONCE(ssp->srcu_idx);  | 
|---|
| 1097 | 1153 |  } | 
|---|
| 1098 | 1154 |  EXPORT_SYMBOL_GPL(srcu_batches_completed); | 
|---|
| 1099 | 1155 |   | 
|---|
| .. | .. | 
|---|
| 1102 | 1158 |   * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has | 
|---|
| 1103 | 1159 |   * completed in that state. | 
|---|
| 1104 | 1160 |   */ | 
|---|
| 1105 |  | -static void srcu_advance_state(struct srcu_struct *sp)  | 
|---|
 | 1161 | +static void srcu_advance_state(struct srcu_struct *ssp)  | 
|---|
| 1106 | 1162 |  { | 
|---|
| 1107 | 1163 |  	int idx; | 
|---|
| 1108 | 1164 |   | 
|---|
| 1109 |  | -	mutex_lock(&sp->srcu_gp_mutex);  | 
|---|
 | 1165 | +	mutex_lock(&ssp->srcu_gp_mutex);  | 
|---|
| 1110 | 1166 |   | 
|---|
| 1111 | 1167 |  	/* | 
|---|
| 1112 | 1168 |  	 * Because readers might be delayed for an extended period after | 
|---|
| .. | .. | 
|---|
| 1118 | 1174 |  	 * The load-acquire ensures that we see the accesses performed | 
|---|
| 1119 | 1175 |  	 * by the prior grace period. | 
|---|
| 1120 | 1176 |  	 */ | 
|---|
| 1121 |  | -	idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */  | 
|---|
 | 1177 | +	idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */  | 
|---|
| 1122 | 1178 |  	if (idx == SRCU_STATE_IDLE) { | 
|---|
| 1123 |  | -		spin_lock_irq_rcu_node(sp);  | 
|---|
| 1124 |  | -		if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {  | 
|---|
| 1125 |  | -			WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));  | 
|---|
| 1126 |  | -			spin_unlock_irq_rcu_node(sp);  | 
|---|
| 1127 |  | -			mutex_unlock(&sp->srcu_gp_mutex);  | 
|---|
 | 1179 | +		spin_lock_irq_rcu_node(ssp);  | 
|---|
 | 1180 | +		if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {  | 
|---|
 | 1181 | +			WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));  | 
|---|
 | 1182 | +			spin_unlock_irq_rcu_node(ssp);  | 
|---|
 | 1183 | +			mutex_unlock(&ssp->srcu_gp_mutex);  | 
|---|
| 1128 | 1184 |  			return; | 
|---|
| 1129 | 1185 |  		} | 
|---|
| 1130 |  | -		idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));  | 
|---|
 | 1186 | +		idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));  | 
|---|
| 1131 | 1187 |  		if (idx == SRCU_STATE_IDLE) | 
|---|
| 1132 |  | -			srcu_gp_start(sp);  | 
|---|
| 1133 |  | -		spin_unlock_irq_rcu_node(sp);  | 
|---|
 | 1188 | +			srcu_gp_start(ssp);  | 
|---|
 | 1189 | +		spin_unlock_irq_rcu_node(ssp);  | 
|---|
| 1134 | 1190 |  		if (idx != SRCU_STATE_IDLE) { | 
|---|
| 1135 |  | -			mutex_unlock(&sp->srcu_gp_mutex);  | 
|---|
 | 1191 | +			mutex_unlock(&ssp->srcu_gp_mutex);  | 
|---|
| 1136 | 1192 |  			return; /* Someone else started the grace period. */ | 
|---|
| 1137 | 1193 |  		} | 
|---|
| 1138 | 1194 |  	} | 
|---|
| 1139 | 1195 |   | 
|---|
| 1140 |  | -	if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {  | 
|---|
| 1141 |  | -		idx = 1 ^ (sp->srcu_idx & 1);  | 
|---|
| 1142 |  | -		if (!try_check_zero(sp, idx, 1)) {  | 
|---|
| 1143 |  | -			mutex_unlock(&sp->srcu_gp_mutex);  | 
|---|
 | 1196 | +	if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {  | 
|---|
 | 1197 | +		idx = 1 ^ (ssp->srcu_idx & 1);  | 
|---|
 | 1198 | +		if (!try_check_zero(ssp, idx, 1)) {  | 
|---|
 | 1199 | +			mutex_unlock(&ssp->srcu_gp_mutex);  | 
|---|
| 1144 | 1200 |  			return; /* readers present, retry later. */ | 
|---|
| 1145 | 1201 |  		} | 
|---|
| 1146 |  | -		srcu_flip(sp);  | 
|---|
| 1147 |  | -		rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2);  | 
|---|
 | 1202 | +		srcu_flip(ssp);  | 
|---|
 | 1203 | +		spin_lock_irq_rcu_node(ssp);  | 
|---|
 | 1204 | +		rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);  | 
|---|
 | 1205 | +		spin_unlock_irq_rcu_node(ssp);  | 
|---|
| 1148 | 1206 |  	} | 
|---|
| 1149 | 1207 |   | 
|---|
| 1150 |  | -	if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {  | 
|---|
 | 1208 | +	if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {  | 
|---|
| 1151 | 1209 |   | 
|---|
| 1152 | 1210 |  		/* | 
|---|
| 1153 | 1211 |  		 * SRCU read-side critical sections are normally short, | 
|---|
| 1154 | 1212 |  		 * so check at least twice in quick succession after a flip. | 
|---|
| 1155 | 1213 |  		 */ | 
|---|
| 1156 |  | -		idx = 1 ^ (sp->srcu_idx & 1);  | 
|---|
| 1157 |  | -		if (!try_check_zero(sp, idx, 2)) {  | 
|---|
| 1158 |  | -			mutex_unlock(&sp->srcu_gp_mutex);  | 
|---|
 | 1214 | +		idx = 1 ^ (ssp->srcu_idx & 1);  | 
|---|
 | 1215 | +		if (!try_check_zero(ssp, idx, 2)) {  | 
|---|
 | 1216 | +			mutex_unlock(&ssp->srcu_gp_mutex);  | 
|---|
| 1159 | 1217 |  			return; /* readers present, retry later. */ | 
|---|
| 1160 | 1218 |  		} | 
|---|
| 1161 |  | -		srcu_gp_end(sp);  /* Releases ->srcu_gp_mutex. */  | 
|---|
 | 1219 | +		srcu_gp_end(ssp);  /* Releases ->srcu_gp_mutex. */  | 
|---|
| 1162 | 1220 |  	} | 
|---|
| 1163 | 1221 |  } | 
|---|
| 1164 | 1222 |   | 
|---|
| .. | .. | 
|---|
| 1174 | 1232 |  	struct rcu_cblist ready_cbs; | 
|---|
| 1175 | 1233 |  	struct rcu_head *rhp; | 
|---|
| 1176 | 1234 |  	struct srcu_data *sdp; | 
|---|
| 1177 |  | -	struct srcu_struct *sp;  | 
|---|
 | 1235 | +	struct srcu_struct *ssp;  | 
|---|
| 1178 | 1236 |   | 
|---|
| 1179 |  | -	sdp = container_of(work, struct srcu_data, work.work);  | 
|---|
| 1180 |  | -	sp = sdp->sp;  | 
|---|
 | 1237 | +	sdp = container_of(work, struct srcu_data, work);  | 
|---|
 | 1238 | +  | 
|---|
 | 1239 | +	ssp = sdp->ssp;  | 
|---|
| 1181 | 1240 |  	rcu_cblist_init(&ready_cbs); | 
|---|
| 1182 | 1241 |  	spin_lock_irq_rcu_node(sdp); | 
|---|
| 1183 | 1242 |  	rcu_segcblist_advance(&sdp->srcu_cblist, | 
|---|
| 1184 |  | -			      rcu_seq_current(&sp->srcu_gp_seq));  | 
|---|
 | 1243 | +			      rcu_seq_current(&ssp->srcu_gp_seq));  | 
|---|
| 1185 | 1244 |  	if (sdp->srcu_cblist_invoking || | 
|---|
| 1186 | 1245 |  	    !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { | 
|---|
| 1187 | 1246 |  		spin_unlock_irq_rcu_node(sdp); | 
|---|
| .. | .. | 
|---|
| 1207 | 1266 |  	spin_lock_irq_rcu_node(sdp); | 
|---|
| 1208 | 1267 |  	rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); | 
|---|
| 1209 | 1268 |  	(void)rcu_segcblist_accelerate(&sdp->srcu_cblist, | 
|---|
| 1210 |  | -				       rcu_seq_snap(&sp->srcu_gp_seq));  | 
|---|
 | 1269 | +				       rcu_seq_snap(&ssp->srcu_gp_seq));  | 
|---|
| 1211 | 1270 |  	sdp->srcu_cblist_invoking = false; | 
|---|
| 1212 | 1271 |  	more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); | 
|---|
| 1213 | 1272 |  	spin_unlock_irq_rcu_node(sdp); | 
|---|
| .. | .. | 
|---|
| 1219 | 1278 |   * Finished one round of SRCU grace period.  Start another if there are | 
|---|
| 1220 | 1279 |   * more SRCU callbacks queued, otherwise put SRCU into not-running state. | 
|---|
| 1221 | 1280 |   */ | 
|---|
| 1222 |  | -static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)  | 
|---|
 | 1281 | +static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)  | 
|---|
| 1223 | 1282 |  { | 
|---|
| 1224 | 1283 |  	bool pushgp = true; | 
|---|
| 1225 | 1284 |   | 
|---|
| 1226 |  | -	spin_lock_irq_rcu_node(sp);  | 
|---|
| 1227 |  | -	if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {  | 
|---|
| 1228 |  | -		if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {  | 
|---|
 | 1285 | +	spin_lock_irq_rcu_node(ssp);  | 
|---|
 | 1286 | +	if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {  | 
|---|
 | 1287 | +		if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {  | 
|---|
| 1229 | 1288 |  			/* All requests fulfilled, time to go idle. */ | 
|---|
| 1230 | 1289 |  			pushgp = false; | 
|---|
| 1231 | 1290 |  		} | 
|---|
| 1232 |  | -	} else if (!rcu_seq_state(sp->srcu_gp_seq)) {  | 
|---|
 | 1291 | +	} else if (!rcu_seq_state(ssp->srcu_gp_seq)) {  | 
|---|
| 1233 | 1292 |  		/* Outstanding request and no GP.  Start one. */ | 
|---|
| 1234 |  | -		srcu_gp_start(sp);  | 
|---|
 | 1293 | +		srcu_gp_start(ssp);  | 
|---|
| 1235 | 1294 |  	} | 
|---|
| 1236 |  | -	spin_unlock_irq_rcu_node(sp);  | 
|---|
 | 1295 | +	spin_unlock_irq_rcu_node(ssp);  | 
|---|
| 1237 | 1296 |   | 
|---|
| 1238 | 1297 |  	if (pushgp) | 
|---|
| 1239 |  | -		queue_delayed_work(rcu_gp_wq, &sp->work, delay);  | 
|---|
 | 1298 | +		queue_delayed_work(rcu_gp_wq, &ssp->work, delay);  | 
|---|
| 1240 | 1299 |  } | 
|---|
| 1241 | 1300 |   | 
|---|
| 1242 | 1301 |  /* | 
|---|
| .. | .. | 
|---|
| 1244 | 1303 |   */ | 
|---|
| 1245 | 1304 |  static void process_srcu(struct work_struct *work) | 
|---|
| 1246 | 1305 |  { | 
|---|
| 1247 |  | -	struct srcu_struct *sp;  | 
|---|
 | 1306 | +	struct srcu_struct *ssp;  | 
|---|
| 1248 | 1307 |   | 
|---|
| 1249 |  | -	sp = container_of(work, struct srcu_struct, work.work);  | 
|---|
 | 1308 | +	ssp = container_of(work, struct srcu_struct, work.work);  | 
|---|
| 1250 | 1309 |   | 
|---|
| 1251 |  | -	srcu_advance_state(sp);  | 
|---|
| 1252 |  | -	srcu_reschedule(sp, srcu_get_delay(sp));  | 
|---|
 | 1310 | +	srcu_advance_state(ssp);  | 
|---|
 | 1311 | +	srcu_reschedule(ssp, srcu_get_delay(ssp));  | 
|---|
| 1253 | 1312 |  } | 
|---|
| 1254 | 1313 |   | 
|---|
| 1255 | 1314 |  void srcutorture_get_gp_data(enum rcutorture_type test_type, | 
|---|
| 1256 |  | -			     struct srcu_struct *sp, int *flags,  | 
|---|
 | 1315 | +			     struct srcu_struct *ssp, int *flags,  | 
|---|
| 1257 | 1316 |  			     unsigned long *gp_seq) | 
|---|
| 1258 | 1317 |  { | 
|---|
| 1259 | 1318 |  	if (test_type != SRCU_FLAVOR) | 
|---|
| 1260 | 1319 |  		return; | 
|---|
| 1261 | 1320 |  	*flags = 0; | 
|---|
| 1262 |  | -	*gp_seq = rcu_seq_current(&sp->srcu_gp_seq);  | 
|---|
 | 1321 | +	*gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);  | 
|---|
| 1263 | 1322 |  } | 
|---|
| 1264 | 1323 |  EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); | 
|---|
| 1265 | 1324 |   | 
|---|
| 1266 |  | -void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)  | 
|---|
 | 1325 | +void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)  | 
|---|
| 1267 | 1326 |  { | 
|---|
| 1268 | 1327 |  	int cpu; | 
|---|
| 1269 | 1328 |  	int idx; | 
|---|
| 1270 | 1329 |  	unsigned long s0 = 0, s1 = 0; | 
|---|
| 1271 | 1330 |   | 
|---|
| 1272 |  | -	idx = sp->srcu_idx & 0x1;  | 
|---|
 | 1331 | +	idx = ssp->srcu_idx & 0x1;  | 
|---|
| 1273 | 1332 |  	pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):", | 
|---|
| 1274 |  | -		 tt, tf, rcu_seq_current(&sp->srcu_gp_seq), idx);  | 
|---|
 | 1333 | +		 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx);  | 
|---|
| 1275 | 1334 |  	for_each_possible_cpu(cpu) { | 
|---|
| 1276 | 1335 |  		unsigned long l0, l1; | 
|---|
| 1277 | 1336 |  		unsigned long u0, u1; | 
|---|
| 1278 | 1337 |  		long c0, c1; | 
|---|
| 1279 | 1338 |  		struct srcu_data *sdp; | 
|---|
| 1280 | 1339 |   | 
|---|
| 1281 |  | -		sdp = per_cpu_ptr(sp->sda, cpu);  | 
|---|
| 1282 |  | -		u0 = sdp->srcu_unlock_count[!idx];  | 
|---|
| 1283 |  | -		u1 = sdp->srcu_unlock_count[idx];  | 
|---|
 | 1340 | +		sdp = per_cpu_ptr(ssp->sda, cpu);  | 
|---|
 | 1341 | +		u0 = data_race(sdp->srcu_unlock_count[!idx]);  | 
|---|
 | 1342 | +		u1 = data_race(sdp->srcu_unlock_count[idx]);  | 
|---|
| 1284 | 1343 |   | 
|---|
| 1285 | 1344 |  		/* | 
|---|
| 1286 | 1345 |  		 * Make sure that a lock is always counted if the corresponding | 
|---|
| .. | .. | 
|---|
| 1288 | 1347 |  		 */ | 
|---|
| 1289 | 1348 |  		smp_rmb(); | 
|---|
| 1290 | 1349 |   | 
|---|
| 1291 |  | -		l0 = sdp->srcu_lock_count[!idx];  | 
|---|
| 1292 |  | -		l1 = sdp->srcu_lock_count[idx];  | 
|---|
 | 1350 | +		l0 = data_race(sdp->srcu_lock_count[!idx]);  | 
|---|
 | 1351 | +		l1 = data_race(sdp->srcu_lock_count[idx]);  | 
|---|
| 1293 | 1352 |   | 
|---|
| 1294 | 1353 |  		c0 = l0 - u0; | 
|---|
| 1295 | 1354 |  		c1 = l1 - u1; | 
|---|
| 1296 |  | -		pr_cont(" %d(%ld,%ld %1p)",  | 
|---|
| 1297 |  | -			cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist));  | 
|---|
 | 1355 | +		pr_cont(" %d(%ld,%ld %c)",  | 
|---|
 | 1356 | +			cpu, c0, c1,  | 
|---|
 | 1357 | +			"C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);  | 
|---|
| 1298 | 1358 |  		s0 += c0; | 
|---|
| 1299 | 1359 |  		s1 += c1; | 
|---|
| 1300 | 1360 |  	} | 
|---|
| .. | .. | 
|---|
| 1310 | 1370 |  	return 0; | 
|---|
| 1311 | 1371 |  } | 
|---|
| 1312 | 1372 |  early_initcall(srcu_bootup_announce); | 
|---|
 | 1373 | +  | 
|---|
 | 1374 | +void __init srcu_init(void)  | 
|---|
 | 1375 | +{  | 
|---|
 | 1376 | +	struct srcu_struct *ssp;  | 
|---|
 | 1377 | +  | 
|---|
 | 1378 | +	srcu_init_done = true;  | 
|---|
 | 1379 | +	while (!list_empty(&srcu_boot_list)) {  | 
|---|
 | 1380 | +		ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,  | 
|---|
 | 1381 | +				      work.work.entry);  | 
|---|
 | 1382 | +		check_init_srcu_struct(ssp);  | 
|---|
 | 1383 | +		list_del_init(&ssp->work.work.entry);  | 
|---|
 | 1384 | +		queue_work(rcu_gp_wq, &ssp->work.work);  | 
|---|
 | 1385 | +	}  | 
|---|
 | 1386 | +}  | 
|---|
 | 1387 | +  | 
|---|
 | 1388 | +#ifdef CONFIG_MODULES  | 
|---|
 | 1389 | +  | 
|---|
 | 1390 | +/* Initialize any global-scope srcu_struct structures used by this module. */  | 
|---|
 | 1391 | +static int srcu_module_coming(struct module *mod)  | 
|---|
 | 1392 | +{  | 
|---|
 | 1393 | +	int i;  | 
|---|
 | 1394 | +	struct srcu_struct **sspp = mod->srcu_struct_ptrs;  | 
|---|
 | 1395 | +	int ret;  | 
|---|
 | 1396 | +  | 
|---|
 | 1397 | +	for (i = 0; i < mod->num_srcu_structs; i++) {  | 
|---|
 | 1398 | +		ret = init_srcu_struct(*(sspp++));  | 
|---|
 | 1399 | +		if (WARN_ON_ONCE(ret))  | 
|---|
 | 1400 | +			return ret;  | 
|---|
 | 1401 | +	}  | 
|---|
 | 1402 | +	return 0;  | 
|---|
 | 1403 | +}  | 
|---|
 | 1404 | +  | 
|---|
 | 1405 | +/* Clean up any global-scope srcu_struct structures used by this module. */  | 
|---|
 | 1406 | +static void srcu_module_going(struct module *mod)  | 
|---|
 | 1407 | +{  | 
|---|
 | 1408 | +	int i;  | 
|---|
 | 1409 | +	struct srcu_struct **sspp = mod->srcu_struct_ptrs;  | 
|---|
 | 1410 | +  | 
|---|
 | 1411 | +	for (i = 0; i < mod->num_srcu_structs; i++)  | 
|---|
 | 1412 | +		cleanup_srcu_struct(*(sspp++));  | 
|---|
 | 1413 | +}  | 
|---|
 | 1414 | +  | 
|---|
 | 1415 | +/* Handle one module, either coming or going. */  | 
|---|
 | 1416 | +static int srcu_module_notify(struct notifier_block *self,  | 
|---|
 | 1417 | +			      unsigned long val, void *data)  | 
|---|
 | 1418 | +{  | 
|---|
 | 1419 | +	struct module *mod = data;  | 
|---|
 | 1420 | +	int ret = 0;  | 
|---|
 | 1421 | +  | 
|---|
 | 1422 | +	switch (val) {  | 
|---|
 | 1423 | +	case MODULE_STATE_COMING:  | 
|---|
 | 1424 | +		ret = srcu_module_coming(mod);  | 
|---|
 | 1425 | +		break;  | 
|---|
 | 1426 | +	case MODULE_STATE_GOING:  | 
|---|
 | 1427 | +		srcu_module_going(mod);  | 
|---|
 | 1428 | +		break;  | 
|---|
 | 1429 | +	default:  | 
|---|
 | 1430 | +		break;  | 
|---|
 | 1431 | +	}  | 
|---|
 | 1432 | +	return ret;  | 
|---|
 | 1433 | +}  | 
|---|
 | 1434 | +  | 
|---|
 | 1435 | +static struct notifier_block srcu_module_nb = {  | 
|---|
 | 1436 | +	.notifier_call = srcu_module_notify,  | 
|---|
 | 1437 | +	.priority = 0,  | 
|---|
 | 1438 | +};  | 
|---|
 | 1439 | +  | 
|---|
 | 1440 | +static __init int init_srcu_module_notifier(void)  | 
|---|
 | 1441 | +{  | 
|---|
 | 1442 | +	int ret;  | 
|---|
 | 1443 | +  | 
|---|
 | 1444 | +	ret = register_module_notifier(&srcu_module_nb);  | 
|---|
 | 1445 | +	if (ret)  | 
|---|
 | 1446 | +		pr_warn("Failed to register srcu module notifier\n");  | 
|---|
 | 1447 | +	return ret;  | 
|---|
 | 1448 | +}  | 
|---|
 | 1449 | +late_initcall(init_srcu_module_notifier);  | 
|---|
 | 1450 | +  | 
|---|
 | 1451 | +#endif /* #ifdef CONFIG_MODULES */  | 
|---|