| .. | .. | 
|---|
 | 1 | +/* SPDX-License-Identifier: GPL-2.0+ */  | 
|---|
| 1 | 2 |  /* | 
|---|
| 2 | 3 |   * Read-Copy Update mechanism for mutual exclusion (tree-based version) | 
|---|
| 3 | 4 |   * Internal non-public definitions. | 
|---|
| 4 | 5 |   * | 
|---|
| 5 |  | - * This program is free software; you can redistribute it and/or modify  | 
|---|
| 6 |  | - * it under the terms of the GNU General Public License as published by  | 
|---|
| 7 |  | - * the Free Software Foundation; either version 2 of the License, or  | 
|---|
| 8 |  | - * (at your option) any later version.  | 
|---|
| 9 |  | - *  | 
|---|
| 10 |  | - * This program is distributed in the hope that it will be useful,  | 
|---|
| 11 |  | - * but WITHOUT ANY WARRANTY; without even the implied warranty of  | 
|---|
| 12 |  | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the  | 
|---|
| 13 |  | - * GNU General Public License for more details.  | 
|---|
| 14 |  | - *  | 
|---|
| 15 |  | - * You should have received a copy of the GNU General Public License  | 
|---|
| 16 |  | - * along with this program; if not, you can access it online at  | 
|---|
| 17 |  | - * http://www.gnu.org/licenses/gpl-2.0.html.  | 
|---|
| 18 |  | - *  | 
|---|
| 19 | 6 |   * Copyright IBM Corporation, 2008 | 
|---|
| 20 | 7 |   * | 
|---|
| 21 | 8 |   * Author: Ingo Molnar <mingo@elte.hu> | 
|---|
| 22 |  | - *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com>  | 
|---|
 | 9 | + *	   Paul E. McKenney <paulmck@linux.ibm.com>  | 
|---|
| 23 | 10 |   */ | 
|---|
| 24 | 11 |   | 
|---|
| 25 | 12 |  #include <linux/cache.h> | 
|---|
| .. | .. | 
|---|
| 29 | 16 |  #include <linux/cpumask.h> | 
|---|
| 30 | 17 |  #include <linux/seqlock.h> | 
|---|
| 31 | 18 |  #include <linux/swait.h> | 
|---|
| 32 |  | -#include <linux/stop_machine.h>  | 
|---|
| 33 | 19 |  #include <linux/rcu_node_tree.h> | 
|---|
| 34 | 20 |   | 
|---|
| 35 | 21 |  #include "rcu_segcblist.h" | 
|---|
| 36 | 22 |   | 
|---|
| 37 |  | -/*  | 
|---|
| 38 |  | - * Dynticks per-CPU state.  | 
|---|
| 39 |  | - */  | 
|---|
| 40 |  | -struct rcu_dynticks {  | 
|---|
| 41 |  | -	long dynticks_nesting;      /* Track process nesting level. */  | 
|---|
| 42 |  | -	long dynticks_nmi_nesting;  /* Track irq/NMI nesting level. */  | 
|---|
| 43 |  | -	atomic_t dynticks;	    /* Even value for idle, else odd. */  | 
|---|
| 44 |  | -	bool rcu_need_heavy_qs;     /* GP old, need heavy quiescent state. */  | 
|---|
| 45 |  | -	unsigned long rcu_qs_ctr;   /* Light universal quiescent state ctr. */  | 
|---|
| 46 |  | -	bool rcu_urgent_qs;	    /* GP old need light quiescent state. */  | 
|---|
| 47 |  | -#ifdef CONFIG_RCU_FAST_NO_HZ  | 
|---|
| 48 |  | -	bool all_lazy;		    /* Are all CPU's CBs lazy? */  | 
|---|
| 49 |  | -	unsigned long nonlazy_posted;  | 
|---|
| 50 |  | -				    /* # times non-lazy CBs posted to CPU. */  | 
|---|
| 51 |  | -	unsigned long nonlazy_posted_snap;  | 
|---|
| 52 |  | -				    /* idle-period nonlazy_posted snapshot. */  | 
|---|
| 53 |  | -	unsigned long last_accelerate;  | 
|---|
| 54 |  | -				    /* Last jiffy CBs were accelerated. */  | 
|---|
| 55 |  | -	unsigned long last_advance_all;  | 
|---|
| 56 |  | -				    /* Last jiffy CBs were all advanced. */  | 
|---|
| 57 |  | -	int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */  | 
|---|
| 58 |  | -#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */  | 
|---|
| 59 |  | -};  | 
|---|
| 60 |  | -  | 
|---|
| 61 | 23 |  /* Communicate arguments to a workqueue handler. */ | 
|---|
| 62 | 24 |  struct rcu_exp_work { | 
|---|
| 63 |  | -	smp_call_func_t rew_func;  | 
|---|
| 64 |  | -	struct rcu_state *rew_rsp;  | 
|---|
| 65 | 25 |  	unsigned long rew_s; | 
|---|
| 66 | 26 |  	struct work_struct rew_work; | 
|---|
| 67 | 27 |  }; | 
|---|
| .. | .. | 
|---|
| 81 | 41 |  	raw_spinlock_t __private lock;	/* Root rcu_node's lock protects */ | 
|---|
| 82 | 42 |  					/*  some rcu_state fields as well as */ | 
|---|
| 83 | 43 |  					/*  following. */ | 
|---|
| 84 |  | -	unsigned long gp_seq;	/* Track rsp->rcu_gp_seq. */  | 
|---|
| 85 |  | -	unsigned long gp_seq_needed; /* Track rsp->rcu_gp_seq_needed. */  | 
|---|
 | 44 | +	unsigned long gp_seq;	/* Track rsp->gp_seq. */  | 
|---|
 | 45 | +	unsigned long gp_seq_needed; /* Track furthest future GP request. */  | 
|---|
| 86 | 46 |  	unsigned long completedqs; /* All QSes done for this node. */ | 
|---|
| 87 | 47 |  	unsigned long qsmask;	/* CPUs or groups that need to switch in */ | 
|---|
| 88 | 48 |  				/*  order for current grace period to proceed.*/ | 
|---|
| .. | .. | 
|---|
| 108 | 68 |  				/* Online CPUs for next expedited GP. */ | 
|---|
| 109 | 69 |  				/*  Any CPU that has ever been online will */ | 
|---|
| 110 | 70 |  				/*  have its bit set. */ | 
|---|
 | 71 | +	unsigned long cbovldmask;  | 
|---|
 | 72 | +				/* CPUs experiencing callback overload. */  | 
|---|
| 111 | 73 |  	unsigned long ffmask;	/* Fully functional CPUs. */ | 
|---|
| 112 | 74 |  	unsigned long grpmask;	/* Mask to apply to parent qsmask. */ | 
|---|
| 113 | 75 |  				/*  Only one bit will be set in this mask. */ | 
|---|
| 114 |  | -	int	grplo;		/* lowest-numbered CPU or group here. */  | 
|---|
| 115 |  | -	int	grphi;		/* highest-numbered CPU or group here. */  | 
|---|
| 116 |  | -	u8	grpnum;		/* CPU/group number for next level up. */  | 
|---|
 | 76 | +	int	grplo;		/* lowest-numbered CPU here. */  | 
|---|
 | 77 | +	int	grphi;		/* highest-numbered CPU here. */  | 
|---|
 | 78 | +	u8	grpnum;		/* group number for next level up. */  | 
|---|
| 117 | 79 |  	u8	level;		/* root is at level 0. */ | 
|---|
| 118 | 80 |  	bool	wait_blkd_tasks;/* Necessary to wait for blocked tasks to */ | 
|---|
| 119 | 81 |  				/*  exit RCU read-side critical sections */ | 
|---|
| .. | .. | 
|---|
| 170 | 132 |   * are indexed relative to this interval rather than the global CPU ID space. | 
|---|
| 171 | 133 |   * This generates the bit for a CPU in node-local masks. | 
|---|
| 172 | 134 |   */ | 
|---|
| 173 |  | -#define leaf_node_cpu_bit(rnp, cpu) (1UL << ((cpu) - (rnp)->grplo))  | 
|---|
 | 135 | +#define leaf_node_cpu_bit(rnp, cpu) (BIT((cpu) - (rnp)->grplo))  | 
|---|
| 174 | 136 |   | 
|---|
| 175 | 137 |  /* | 
|---|
| 176 | 138 |   * Union to allow "aggregate OR" operation on the need for a quiescent | 
|---|
| .. | .. | 
|---|
| 187 | 149 |  /* Per-CPU data for read-copy update. */ | 
|---|
| 188 | 150 |  struct rcu_data { | 
|---|
| 189 | 151 |  	/* 1) quiescent-state and grace-period handling : */ | 
|---|
| 190 |  | -	unsigned long	gp_seq;		/* Track rsp->rcu_gp_seq counter. */  | 
|---|
| 191 |  | -	unsigned long	gp_seq_needed;	/* Track rsp->rcu_gp_seq_needed ctr. */  | 
|---|
| 192 |  | -	unsigned long	rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */  | 
|---|
| 193 |  | -					/*  for rcu_all_qs() invocations. */  | 
|---|
 | 152 | +	unsigned long	gp_seq;		/* Track rsp->gp_seq counter. */  | 
|---|
 | 153 | +	unsigned long	gp_seq_needed;	/* Track furthest future GP request. */  | 
|---|
| 194 | 154 |  	union rcu_noqs	cpu_no_qs;	/* No QSes yet for this CPU. */ | 
|---|
| 195 | 155 |  	bool		core_needs_qs;	/* Core waits for quiesc state. */ | 
|---|
| 196 | 156 |  	bool		beenonline;	/* CPU online at least once. */ | 
|---|
| 197 | 157 |  	bool		gpwrap;		/* Possible ->gp_seq wrap. */ | 
|---|
 | 158 | +	bool		exp_deferred_qs; /* This CPU awaiting a deferred QS? */  | 
|---|
 | 159 | +	bool		cpu_started;	/* RCU watching this onlining CPU. */  | 
|---|
| 198 | 160 |  	struct rcu_node *mynode;	/* This CPU's leaf of hierarchy */ | 
|---|
| 199 | 161 |  	unsigned long grpmask;		/* Mask to apply to leaf qsmask. */ | 
|---|
| 200 | 162 |  	unsigned long	ticks_this_gp;	/* The number of scheduling-clock */ | 
|---|
| 201 | 163 |  					/*  ticks this CPU has handled */ | 
|---|
| 202 | 164 |  					/*  during and after the last grace */ | 
|---|
| 203 | 165 |  					/* period it is aware of. */ | 
|---|
 | 166 | +	struct irq_work defer_qs_iw;	/* Obtain later scheduler attention. */  | 
|---|
 | 167 | +	bool defer_qs_iw_pending;	/* Scheduler attention pending? */  | 
|---|
 | 168 | +	struct work_struct strict_work;	/* Schedule readers for strict GPs. */  | 
|---|
| 204 | 169 |   | 
|---|
| 205 | 170 |  	/* 2) batch handling */ | 
|---|
| 206 | 171 |  	struct rcu_segcblist cblist;	/* Segmented callback list, with */ | 
|---|
| .. | .. | 
|---|
| 208 | 173 |  					/* different grace periods. */ | 
|---|
| 209 | 174 |  	long		qlen_last_fqs_check; | 
|---|
| 210 | 175 |  					/* qlen at last check for QS forcing */ | 
|---|
 | 176 | +	unsigned long	n_cbs_invoked;	/* # callbacks invoked since boot. */  | 
|---|
| 211 | 177 |  	unsigned long	n_force_qs_snap; | 
|---|
| 212 | 178 |  					/* did other CPU force QS recently? */ | 
|---|
| 213 | 179 |  	long		blimit;		/* Upper limit on a processed batch */ | 
|---|
| 214 | 180 |   | 
|---|
| 215 | 181 |  	/* 3) dynticks interface. */ | 
|---|
| 216 |  | -	struct rcu_dynticks *dynticks;	/* Shared per-CPU dynticks state. */  | 
|---|
| 217 | 182 |  	int dynticks_snap;		/* Per-GP tracking for dynticks. */ | 
|---|
| 218 |  | -  | 
|---|
| 219 |  | -	/* 4) reasons this CPU needed to be kicked by force_quiescent_state */  | 
|---|
| 220 |  | -	unsigned long dynticks_fqs;	/* Kicked due to dynticks idle. */  | 
|---|
| 221 |  | -	unsigned long cond_resched_completed;  | 
|---|
| 222 |  | -					/* Grace period that needs help */  | 
|---|
| 223 |  | -					/*  from cond_resched(). */  | 
|---|
| 224 |  | -  | 
|---|
| 225 |  | -	/* 5) _rcu_barrier(), OOM callbacks, and expediting. */  | 
|---|
| 226 |  | -	struct rcu_head barrier_head;  | 
|---|
 | 183 | +	long dynticks_nesting;		/* Track process nesting level. */  | 
|---|
 | 184 | +	long dynticks_nmi_nesting;	/* Track irq/NMI nesting level. */  | 
|---|
 | 185 | +	atomic_t dynticks;		/* Even value for idle, else odd. */  | 
|---|
 | 186 | +	bool rcu_need_heavy_qs;		/* GP old, so heavy quiescent state! */  | 
|---|
 | 187 | +	bool rcu_urgent_qs;		/* GP old need light quiescent state. */  | 
|---|
 | 188 | +	bool rcu_forced_tick;		/* Forced tick to provide QS. */  | 
|---|
 | 189 | +	bool rcu_forced_tick_exp;	/*   ... provide QS to expedited GP. */  | 
|---|
| 227 | 190 |  #ifdef CONFIG_RCU_FAST_NO_HZ | 
|---|
| 228 |  | -	struct rcu_head oom_head;  | 
|---|
 | 191 | +	unsigned long last_accelerate;	/* Last jiffy CBs were accelerated. */  | 
|---|
 | 192 | +	unsigned long last_advance_all;	/* Last jiffy CBs were all advanced. */  | 
|---|
 | 193 | +	int tick_nohz_enabled_snap;	/* Previously seen value from sysfs. */  | 
|---|
| 229 | 194 |  #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ | 
|---|
 | 195 | +  | 
|---|
 | 196 | +	/* 4) rcu_barrier(), OOM callbacks, and expediting. */  | 
|---|
 | 197 | +	struct rcu_head barrier_head;  | 
|---|
| 230 | 198 |  	int exp_dynticks_snap;		/* Double-check need for IPI. */ | 
|---|
| 231 | 199 |   | 
|---|
| 232 |  | -	/* 6) Callback offloading. */  | 
|---|
 | 200 | +	/* 5) Callback offloading. */  | 
|---|
| 233 | 201 |  #ifdef CONFIG_RCU_NOCB_CPU | 
|---|
| 234 |  | -	struct rcu_head *nocb_head;	/* CBs waiting for kthread. */  | 
|---|
| 235 |  | -	struct rcu_head **nocb_tail;  | 
|---|
| 236 |  | -	atomic_long_t nocb_q_count;	/* # CBs waiting for nocb */  | 
|---|
| 237 |  | -	atomic_long_t nocb_q_count_lazy; /*  invocation (all stages). */  | 
|---|
| 238 |  | -	struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */  | 
|---|
| 239 |  | -	struct rcu_head **nocb_follower_tail;  | 
|---|
| 240 |  | -	struct swait_queue_head nocb_wq; /* For nocb kthreads to sleep on. */  | 
|---|
| 241 |  | -	struct task_struct *nocb_kthread;  | 
|---|
 | 202 | +	struct swait_queue_head nocb_cb_wq; /* For nocb kthreads to sleep on. */  | 
|---|
 | 203 | +	struct task_struct *nocb_gp_kthread;  | 
|---|
| 242 | 204 |  	raw_spinlock_t nocb_lock;	/* Guard following pair of fields. */ | 
|---|
 | 205 | +	atomic_t nocb_lock_contended;	/* Contention experienced. */  | 
|---|
| 243 | 206 |  	int nocb_defer_wakeup;		/* Defer wakeup of nocb_kthread. */ | 
|---|
| 244 | 207 |  	struct timer_list nocb_timer;	/* Enforce finite deferral. */ | 
|---|
 | 208 | +	unsigned long nocb_gp_adv_time;	/* Last call_rcu() CB adv (jiffies). */  | 
|---|
| 245 | 209 |   | 
|---|
| 246 |  | -	/* The following fields are used by the leader, hence own cacheline. */  | 
|---|
| 247 |  | -	struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;  | 
|---|
| 248 |  | -					/* CBs waiting for GP. */  | 
|---|
| 249 |  | -	struct rcu_head **nocb_gp_tail;  | 
|---|
| 250 |  | -	bool nocb_leader_sleep;		/* Is the nocb leader thread asleep? */  | 
|---|
| 251 |  | -	struct rcu_data *nocb_next_follower;  | 
|---|
| 252 |  | -					/* Next follower in wakeup chain. */  | 
|---|
 | 210 | +	/* The following fields are used by call_rcu, hence own cacheline. */  | 
|---|
 | 211 | +	raw_spinlock_t nocb_bypass_lock ____cacheline_internodealigned_in_smp;  | 
|---|
 | 212 | +	struct rcu_cblist nocb_bypass;	/* Lock-contention-bypass CB list. */  | 
|---|
 | 213 | +	unsigned long nocb_bypass_first; /* Time (jiffies) of first enqueue. */  | 
|---|
 | 214 | +	unsigned long nocb_nobypass_last; /* Last ->cblist enqueue (jiffies). */  | 
|---|
 | 215 | +	int nocb_nobypass_count;	/* # ->cblist enqueues at ^^^ time. */  | 
|---|
| 253 | 216 |   | 
|---|
| 254 |  | -	/* The following fields are used by the follower, hence new cachline. */  | 
|---|
| 255 |  | -	struct rcu_data *nocb_leader ____cacheline_internodealigned_in_smp;  | 
|---|
| 256 |  | -					/* Leader CPU takes GP-end wakeups. */  | 
|---|
 | 217 | +	/* The following fields are used by GP kthread, hence own cacheline. */  | 
|---|
 | 218 | +	raw_spinlock_t nocb_gp_lock ____cacheline_internodealigned_in_smp;  | 
|---|
 | 219 | +	struct timer_list nocb_bypass_timer; /* Force nocb_bypass flush. */  | 
|---|
 | 220 | +	u8 nocb_gp_sleep;		/* Is the nocb GP thread asleep? */  | 
|---|
 | 221 | +	u8 nocb_gp_bypass;		/* Found a bypass on last scan? */  | 
|---|
 | 222 | +	u8 nocb_gp_gp;			/* GP to wait for on last scan? */  | 
|---|
 | 223 | +	unsigned long nocb_gp_seq;	/*  If so, ->gp_seq to wait for. */  | 
|---|
 | 224 | +	unsigned long nocb_gp_loops;	/* # passes through wait code. */  | 
|---|
 | 225 | +	struct swait_queue_head nocb_gp_wq; /* For nocb kthreads to sleep on. */  | 
|---|
 | 226 | +	bool nocb_cb_sleep;		/* Is the nocb CB thread asleep? */  | 
|---|
 | 227 | +	struct task_struct *nocb_cb_kthread;  | 
|---|
 | 228 | +	struct rcu_data *nocb_next_cb_rdp;  | 
|---|
 | 229 | +					/* Next rcu_data in wakeup chain. */  | 
|---|
 | 230 | +  | 
|---|
 | 231 | +	/* The following fields are used by CB kthread, hence new cacheline. */  | 
|---|
 | 232 | +	struct rcu_data *nocb_gp_rdp ____cacheline_internodealigned_in_smp;  | 
|---|
 | 233 | +					/* GP rdp takes GP-end wakeups. */  | 
|---|
| 257 | 234 |  #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ | 
|---|
 | 235 | +  | 
|---|
 | 236 | +	/* 6) RCU priority boosting. */  | 
|---|
 | 237 | +	struct task_struct *rcu_cpu_kthread_task;  | 
|---|
 | 238 | +					/* rcuc per-CPU kthread or NULL. */  | 
|---|
 | 239 | +	unsigned int rcu_cpu_kthread_status;  | 
|---|
 | 240 | +	char rcu_cpu_has_work;  | 
|---|
| 258 | 241 |   | 
|---|
| 259 | 242 |  	/* 7) Diagnostic data, including RCU CPU stall warnings. */ | 
|---|
| 260 | 243 |  	unsigned int softirq_snap;	/* Snapshot of softirq activity. */ | 
|---|
| .. | .. | 
|---|
| 266 | 249 |  	short rcu_ofl_gp_flags;		/* ->gp_flags at last offline. */ | 
|---|
| 267 | 250 |  	unsigned long rcu_onl_gp_seq;	/* ->gp_seq at last online. */ | 
|---|
| 268 | 251 |  	short rcu_onl_gp_flags;		/* ->gp_flags at last online. */ | 
|---|
 | 252 | +	unsigned long last_fqs_resched;	/* Time of last rcu_resched(). */  | 
|---|
| 269 | 253 |   | 
|---|
| 270 | 254 |  	int cpu; | 
|---|
| 271 |  | -	struct rcu_state *rsp;  | 
|---|
| 272 | 255 |  }; | 
|---|
| 273 | 256 |   | 
|---|
| 274 | 257 |  /* Values for nocb_defer_wakeup field in struct rcu_data. */ | 
|---|
| .. | .. | 
|---|
| 314 | 297 |  	struct rcu_node *level[RCU_NUM_LVLS + 1]; | 
|---|
| 315 | 298 |  						/* Hierarchy levels (+1 to */ | 
|---|
| 316 | 299 |  						/*  shut bogus gcc warning) */ | 
|---|
| 317 |  | -	struct rcu_data __percpu *rda;		/* pointer of percu rcu_data. */  | 
|---|
| 318 |  | -	call_rcu_func_t call;			/* call_rcu() flavor. */  | 
|---|
| 319 | 300 |  	int ncpus;				/* # CPUs seen so far. */ | 
|---|
| 320 | 301 |   | 
|---|
| 321 | 302 |  	/* The following fields are guarded by the root rcu_node's lock. */ | 
|---|
| .. | .. | 
|---|
| 323 | 304 |  	u8	boost ____cacheline_internodealigned_in_smp; | 
|---|
| 324 | 305 |  						/* Subject to priority boost. */ | 
|---|
| 325 | 306 |  	unsigned long gp_seq;			/* Grace-period sequence #. */ | 
|---|
 | 307 | +	unsigned long gp_max;			/* Maximum GP duration in */  | 
|---|
 | 308 | +						/*  jiffies. */  | 
|---|
| 326 | 309 |  	struct task_struct *gp_kthread;		/* Task for grace periods. */ | 
|---|
| 327 | 310 |  	struct swait_queue_head gp_wq;		/* Where GP task waits. */ | 
|---|
| 328 | 311 |  	short gp_flags;				/* Commands for GP task. */ | 
|---|
| 329 | 312 |  	short gp_state;				/* GP kthread sleep state. */ | 
|---|
 | 313 | +	unsigned long gp_wake_time;		/* Last GP kthread wake. */  | 
|---|
 | 314 | +	unsigned long gp_wake_seq;		/* ->gp_seq at ^^^. */  | 
|---|
| 330 | 315 |   | 
|---|
| 331 | 316 |  	/* End of fields guarded by root rcu_node's lock. */ | 
|---|
| 332 | 317 |   | 
|---|
| .. | .. | 
|---|
| 334 | 319 |  	atomic_t barrier_cpu_count;		/* # CPUs waiting on. */ | 
|---|
| 335 | 320 |  	struct completion barrier_completion;	/* Wake at barrier end. */ | 
|---|
| 336 | 321 |  	unsigned long barrier_sequence;		/* ++ at start and end of */ | 
|---|
| 337 |  | -						/*  _rcu_barrier(). */  | 
|---|
 | 322 | +						/*  rcu_barrier(). */  | 
|---|
| 338 | 323 |  	/* End of fields guarded by barrier_mutex. */ | 
|---|
| 339 | 324 |   | 
|---|
| 340 | 325 |  	struct mutex exp_mutex;			/* Serialize expedited GP. */ | 
|---|
| .. | .. | 
|---|
| 343 | 328 |  	atomic_t expedited_need_qs;		/* # CPUs left to check in. */ | 
|---|
| 344 | 329 |  	struct swait_queue_head expedited_wq;	/* Wait for check-ins. */ | 
|---|
| 345 | 330 |  	int ncpus_snap;				/* # CPUs seen last time. */ | 
|---|
 | 331 | +	u8 cbovld;				/* Callback overload now? */  | 
|---|
 | 332 | +	u8 cbovldnext;				/* ^        ^  next time? */  | 
|---|
| 346 | 333 |   | 
|---|
| 347 | 334 |  	unsigned long jiffies_force_qs;		/* Time at which to invoke */ | 
|---|
| 348 | 335 |  						/*  force_quiescent_state(). */ | 
|---|
| .. | .. | 
|---|
| 352 | 339 |  						/*  force_quiescent_state(). */ | 
|---|
| 353 | 340 |  	unsigned long gp_start;			/* Time at which GP started, */ | 
|---|
| 354 | 341 |  						/*  but in jiffies. */ | 
|---|
 | 342 | +	unsigned long gp_end;			/* Time last GP ended, again */  | 
|---|
 | 343 | +						/*  in jiffies. */  | 
|---|
| 355 | 344 |  	unsigned long gp_activity;		/* Time of last GP kthread */ | 
|---|
| 356 | 345 |  						/*  activity in jiffies. */ | 
|---|
| 357 | 346 |  	unsigned long gp_req_activity;		/* Time of last GP request */ | 
|---|
| .. | .. | 
|---|
| 362 | 351 |  						/*  a reluctant CPU. */ | 
|---|
| 363 | 352 |  	unsigned long n_force_qs_gpstart;	/* Snapshot of n_force_qs at */ | 
|---|
| 364 | 353 |  						/*  GP start. */ | 
|---|
| 365 |  | -	unsigned long gp_max;			/* Maximum GP duration in */  | 
|---|
| 366 |  | -						/*  jiffies. */  | 
|---|
| 367 | 354 |  	const char *name;			/* Name of structure. */ | 
|---|
| 368 | 355 |  	char abbr;				/* Abbreviated name. */ | 
|---|
| 369 |  | -	struct list_head flavors;		/* List of RCU flavors. */  | 
|---|
| 370 | 356 |   | 
|---|
| 371 |  | -	spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;  | 
|---|
 | 357 | +	raw_spinlock_t ofl_lock ____cacheline_internodealigned_in_smp;  | 
|---|
| 372 | 358 |  						/* Synchronize offline with */ | 
|---|
| 373 | 359 |  						/*  GP pre-initialization. */ | 
|---|
| 374 | 360 |  }; | 
|---|
| .. | .. | 
|---|
| 376 | 362 |  /* Values for rcu_state structure's gp_flags field. */ | 
|---|
| 377 | 363 |  #define RCU_GP_FLAG_INIT 0x1	/* Need grace-period initialization. */ | 
|---|
| 378 | 364 |  #define RCU_GP_FLAG_FQS  0x2	/* Need grace-period quiescent-state forcing. */ | 
|---|
 | 365 | +#define RCU_GP_FLAG_OVLD 0x4	/* Experiencing callback overload. */  | 
|---|
| 379 | 366 |   | 
|---|
| 380 | 367 |  /* Values for rcu_state structure's gp_state field. */ | 
|---|
| 381 | 368 |  #define RCU_GP_IDLE	 0	/* Initial state and no GP in progress. */ | 
|---|
| .. | .. | 
|---|
| 388 | 375 |  #define RCU_GP_CLEANUP   7	/* Grace-period cleanup started. */ | 
|---|
| 389 | 376 |  #define RCU_GP_CLEANED   8	/* Grace-period cleanup complete. */ | 
|---|
| 390 | 377 |   | 
|---|
| 391 |  | -#ifndef RCU_TREE_NONCORE  | 
|---|
| 392 |  | -static const char * const gp_state_names[] = {  | 
|---|
| 393 |  | -	"RCU_GP_IDLE",  | 
|---|
| 394 |  | -	"RCU_GP_WAIT_GPS",  | 
|---|
| 395 |  | -	"RCU_GP_DONE_GPS",  | 
|---|
| 396 |  | -	"RCU_GP_ONOFF",  | 
|---|
| 397 |  | -	"RCU_GP_INIT",  | 
|---|
| 398 |  | -	"RCU_GP_WAIT_FQS",  | 
|---|
| 399 |  | -	"RCU_GP_DOING_FQS",  | 
|---|
| 400 |  | -	"RCU_GP_CLEANUP",  | 
|---|
| 401 |  | -	"RCU_GP_CLEANED",  | 
|---|
| 402 |  | -};  | 
|---|
| 403 |  | -#endif /* #ifndef RCU_TREE_NONCORE */  | 
|---|
| 404 |  | -  | 
|---|
| 405 |  | -extern struct list_head rcu_struct_flavors;  | 
|---|
| 406 |  | -  | 
|---|
| 407 |  | -/* Sequence through rcu_state structures for each RCU flavor. */  | 
|---|
| 408 |  | -#define for_each_rcu_flavor(rsp) \  | 
|---|
| 409 |  | -	list_for_each_entry((rsp), &rcu_struct_flavors, flavors)  | 
|---|
| 410 |  | -  | 
|---|
| 411 | 378 |  /* | 
|---|
| 412 |  | - * RCU implementation internal declarations:  | 
|---|
 | 379 | + * In order to export the rcu_state name to the tracing tools, it  | 
|---|
 | 380 | + * needs to be added in the __tracepoint_string section.  | 
|---|
 | 381 | + * This requires defining a separate variable tp_<sname>_varname  | 
|---|
 | 382 | + * that points to the string being used, and this will allow  | 
|---|
 | 383 | + * the tracing userspace tools to be able to decipher the string  | 
|---|
 | 384 | + * address to the matching string.  | 
|---|
| 413 | 385 |   */ | 
|---|
| 414 |  | -extern struct rcu_state rcu_sched_state;  | 
|---|
| 415 |  | -  | 
|---|
| 416 |  | -#ifndef CONFIG_PREEMPT_RT_FULL  | 
|---|
| 417 |  | -extern struct rcu_state rcu_bh_state;  | 
|---|
| 418 |  | -#endif  | 
|---|
| 419 |  | -  | 
|---|
| 420 | 386 |  #ifdef CONFIG_PREEMPT_RCU | 
|---|
| 421 |  | -extern struct rcu_state rcu_preempt_state;  | 
|---|
| 422 |  | -#endif /* #ifdef CONFIG_PREEMPT_RCU */  | 
|---|
 | 387 | +#define RCU_ABBR 'p'  | 
|---|
 | 388 | +#define RCU_NAME_RAW "rcu_preempt"  | 
|---|
 | 389 | +#else /* #ifdef CONFIG_PREEMPT_RCU */  | 
|---|
 | 390 | +#define RCU_ABBR 's'  | 
|---|
 | 391 | +#define RCU_NAME_RAW "rcu_sched"  | 
|---|
 | 392 | +#endif /* #else #ifdef CONFIG_PREEMPT_RCU */  | 
|---|
 | 393 | +#ifndef CONFIG_TRACING  | 
|---|
 | 394 | +#define RCU_NAME RCU_NAME_RAW  | 
|---|
 | 395 | +#else /* #ifdef CONFIG_TRACING */  | 
|---|
 | 396 | +static char rcu_name[] = RCU_NAME_RAW;  | 
|---|
 | 397 | +static const char *tp_rcu_varname __used __tracepoint_string = rcu_name;  | 
|---|
 | 398 | +#define RCU_NAME rcu_name  | 
|---|
 | 399 | +#endif /* #else #ifdef CONFIG_TRACING */  | 
|---|
| 423 | 400 |   | 
|---|
| 424 |  | -int rcu_dynticks_snap(struct rcu_dynticks *rdtp);  | 
|---|
| 425 |  | -  | 
|---|
| 426 |  | -DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);  | 
|---|
| 427 |  | -DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);  | 
|---|
| 428 |  | -DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);  | 
|---|
| 429 |  | -DECLARE_PER_CPU(char, rcu_cpu_has_work);  | 
|---|
| 430 |  | -  | 
|---|
| 431 |  | -#ifndef RCU_TREE_NONCORE  | 
|---|
| 432 |  | -  | 
|---|
| 433 |  | -/* Forward declarations for rcutree_plugin.h */  | 
|---|
 | 401 | +/* Forward declarations for tree_plugin.h */  | 
|---|
| 434 | 402 |  static void rcu_bootup_announce(void); | 
|---|
| 435 |  | -static void rcu_preempt_note_context_switch(bool preempt);  | 
|---|
 | 403 | +static void rcu_qs(void);  | 
|---|
| 436 | 404 |  static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); | 
|---|
| 437 | 405 |  #ifdef CONFIG_HOTPLUG_CPU | 
|---|
| 438 | 406 |  static bool rcu_preempt_has_tasks(struct rcu_node *rnp); | 
|---|
| 439 | 407 |  #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | 
|---|
| 440 |  | -static void rcu_print_detail_task_stall(struct rcu_state *rsp);  | 
|---|
| 441 |  | -static int rcu_print_task_stall(struct rcu_node *rnp);  | 
|---|
| 442 | 408 |  static int rcu_print_task_exp_stall(struct rcu_node *rnp); | 
|---|
| 443 |  | -static void rcu_preempt_check_blocked_tasks(struct rcu_state *rsp,  | 
|---|
| 444 |  | -					    struct rcu_node *rnp);  | 
|---|
| 445 |  | -static void rcu_preempt_check_callbacks(void);  | 
|---|
| 446 |  | -void call_rcu(struct rcu_head *head, rcu_callback_t func);  | 
|---|
| 447 |  | -static void __init __rcu_init_preempt(void);  | 
|---|
| 448 |  | -static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp,  | 
|---|
| 449 |  | -			    int ncheck);  | 
|---|
 | 409 | +static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);  | 
|---|
 | 410 | +static void rcu_flavor_sched_clock_irq(int user);  | 
|---|
 | 411 | +static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);  | 
|---|
| 450 | 412 |  static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); | 
|---|
| 451 | 413 |  static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); | 
|---|
| 452 | 414 |  static bool rcu_is_callbacks_kthread(void); | 
|---|
| 453 | 415 |  static void rcu_cpu_kthread_setup(unsigned int cpu); | 
|---|
| 454 |  | -#ifdef CONFIG_RCU_BOOST  | 
|---|
| 455 |  | -static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,  | 
|---|
| 456 |  | -						 struct rcu_node *rnp);  | 
|---|
| 457 |  | -#endif /* #ifdef CONFIG_RCU_BOOST */  | 
|---|
| 458 | 416 |  static void __init rcu_spawn_boost_kthreads(void); | 
|---|
| 459 | 417 |  static void rcu_prepare_kthreads(int cpu); | 
|---|
| 460 | 418 |  static void rcu_cleanup_after_idle(void); | 
|---|
| 461 | 419 |  static void rcu_prepare_for_idle(void); | 
|---|
| 462 |  | -static void rcu_idle_count_callbacks_posted(void);  | 
|---|
| 463 | 420 |  static bool rcu_preempt_has_tasks(struct rcu_node *rnp); | 
|---|
| 464 |  | -static void print_cpu_stall_info_begin(void);  | 
|---|
| 465 |  | -static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);  | 
|---|
| 466 |  | -static void print_cpu_stall_info_end(void);  | 
|---|
 | 421 | +static bool rcu_preempt_need_deferred_qs(struct task_struct *t);  | 
|---|
 | 422 | +static void rcu_preempt_deferred_qs(struct task_struct *t);  | 
|---|
| 467 | 423 |  static void zero_cpu_stall_ticks(struct rcu_data *rdp); | 
|---|
| 468 |  | -static void increment_cpu_stall_ticks(void);  | 
|---|
| 469 |  | -static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);  | 
|---|
| 470 | 424 |  static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp); | 
|---|
| 471 | 425 |  static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq); | 
|---|
| 472 | 426 |  static void rcu_init_one_nocb(struct rcu_node *rnp); | 
|---|
| 473 |  | -static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,  | 
|---|
| 474 |  | -			    bool lazy, unsigned long flags);  | 
|---|
| 475 |  | -static bool rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp,  | 
|---|
| 476 |  | -				      struct rcu_data *rdp,  | 
|---|
| 477 |  | -				      unsigned long flags);  | 
|---|
 | 427 | +static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,  | 
|---|
 | 428 | +				  unsigned long j);  | 
|---|
 | 429 | +static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,  | 
|---|
 | 430 | +				bool *was_alldone, unsigned long flags);  | 
|---|
 | 431 | +static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,  | 
|---|
 | 432 | +				 unsigned long flags);  | 
|---|
| 478 | 433 |  static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp); | 
|---|
| 479 | 434 |  static void do_nocb_deferred_wakeup(struct rcu_data *rdp); | 
|---|
| 480 | 435 |  static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); | 
|---|
| 481 |  | -static void rcu_spawn_all_nocb_kthreads(int cpu);  | 
|---|
 | 436 | +static void rcu_spawn_cpu_nocb_kthread(int cpu);  | 
|---|
| 482 | 437 |  static void __init rcu_spawn_nocb_kthreads(void); | 
|---|
 | 438 | +static void show_rcu_nocb_state(struct rcu_data *rdp);  | 
|---|
 | 439 | +static void rcu_nocb_lock(struct rcu_data *rdp);  | 
|---|
 | 440 | +static void rcu_nocb_unlock(struct rcu_data *rdp);  | 
|---|
 | 441 | +static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,  | 
|---|
 | 442 | +				       unsigned long flags);  | 
|---|
 | 443 | +static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp);  | 
|---|
| 483 | 444 |  #ifdef CONFIG_RCU_NOCB_CPU | 
|---|
| 484 |  | -static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);  | 
|---|
| 485 |  | -#endif /* #ifdef CONFIG_RCU_NOCB_CPU */  | 
|---|
| 486 |  | -static bool init_nocb_callback_list(struct rcu_data *rdp);  | 
|---|
 | 445 | +static void __init rcu_organize_nocb_kthreads(void);  | 
|---|
 | 446 | +#define rcu_nocb_lock_irqsave(rdp, flags)				\  | 
|---|
 | 447 | +do {									\  | 
|---|
 | 448 | +	if (!rcu_segcblist_is_offloaded(&(rdp)->cblist))		\  | 
|---|
 | 449 | +		local_irq_save(flags);					\  | 
|---|
 | 450 | +	else								\  | 
|---|
 | 451 | +		raw_spin_lock_irqsave(&(rdp)->nocb_lock, (flags));	\  | 
|---|
 | 452 | +} while (0)  | 
|---|
 | 453 | +#else /* #ifdef CONFIG_RCU_NOCB_CPU */  | 
|---|
 | 454 | +#define rcu_nocb_lock_irqsave(rdp, flags) local_irq_save(flags)  | 
|---|
 | 455 | +#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */  | 
|---|
 | 456 | +  | 
|---|
| 487 | 457 |  static void rcu_bind_gp_kthread(void); | 
|---|
| 488 |  | -static bool rcu_nohz_full_cpu(struct rcu_state *rsp);  | 
|---|
 | 458 | +static bool rcu_nohz_full_cpu(void);  | 
|---|
| 489 | 459 |  static void rcu_dynticks_task_enter(void); | 
|---|
| 490 | 460 |  static void rcu_dynticks_task_exit(void); | 
|---|
 | 461 | +static void rcu_dynticks_task_trace_enter(void);  | 
|---|
 | 462 | +static void rcu_dynticks_task_trace_exit(void);  | 
|---|
| 491 | 463 |   | 
|---|
| 492 |  | -#ifdef CONFIG_SRCU  | 
|---|
| 493 |  | -void srcu_online_cpu(unsigned int cpu);  | 
|---|
| 494 |  | -void srcu_offline_cpu(unsigned int cpu);  | 
|---|
| 495 |  | -#else /* #ifdef CONFIG_SRCU */  | 
|---|
| 496 |  | -void srcu_online_cpu(unsigned int cpu) { }  | 
|---|
| 497 |  | -void srcu_offline_cpu(unsigned int cpu) { }  | 
|---|
| 498 |  | -#endif /* #else #ifdef CONFIG_SRCU */  | 
|---|
| 499 |  | -  | 
|---|
| 500 |  | -#endif /* #ifndef RCU_TREE_NONCORE */  | 
|---|
 | 464 | +/* Forward declarations for tree_stall.h */  | 
|---|
 | 465 | +static void record_gp_stall_check_time(void);  | 
|---|
 | 466 | +static void rcu_iw_handler(struct irq_work *iwp);  | 
|---|
 | 467 | +static void check_cpu_stall(struct rcu_data *rdp);  | 
|---|
 | 468 | +static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,  | 
|---|
 | 469 | +				     const unsigned long gpssdelay);  | 
|---|