.. | .. |
---|
24 | 24 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
---|
25 | 25 | */ |
---|
26 | 26 | |
---|
27 | | -#include <linux/delay.h> |
---|
28 | | -#include <linux/gfp.h> |
---|
29 | | -#include <linux/oom.h> |
---|
30 | | -#include <linux/sched/debug.h> |
---|
31 | | -#include <linux/smpboot.h> |
---|
32 | | -#include <linux/sched/isolation.h> |
---|
33 | | -#include <uapi/linux/sched/types.h> |
---|
34 | | -#include "../time/tick-internal.h" |
---|
35 | | - |
---|
36 | | -#ifdef CONFIG_RCU_BOOST |
---|
37 | | - |
---|
38 | 27 | #include "../locking/rtmutex_common.h" |
---|
39 | 28 | |
---|
40 | 29 | /* |
---|
41 | 30 | * Control variables for per-CPU and per-rcu_node kthreads. These |
---|
42 | 31 | * handle all flavors of RCU. |
---|
43 | 32 | */ |
---|
44 | | -static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); |
---|
45 | 33 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); |
---|
46 | 34 | DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); |
---|
47 | 35 | DEFINE_PER_CPU(char, rcu_cpu_has_work); |
---|
48 | | - |
---|
49 | | -#else /* #ifdef CONFIG_RCU_BOOST */ |
---|
50 | | - |
---|
51 | | -/* |
---|
52 | | - * Some architectures do not define rt_mutexes, but if !CONFIG_RCU_BOOST, |
---|
53 | | - * all uses are in dead code. Provide a definition to keep the compiler |
---|
54 | | - * happy, but add WARN_ON_ONCE() to complain if used in the wrong place. |
---|
55 | | - * This probably needs to be excluded from -rt builds. |
---|
56 | | - */ |
---|
57 | | -#define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; }) |
---|
58 | | -#define rt_mutex_futex_unlock(x) WARN_ON_ONCE(1) |
---|
59 | | - |
---|
60 | | -#endif /* #else #ifdef CONFIG_RCU_BOOST */ |
---|
61 | 36 | |
---|
62 | 37 | #ifdef CONFIG_RCU_NOCB_CPU |
---|
63 | 38 | static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ |
---|
.. | .. |
---|
337 | 312 | struct task_struct *t = current; |
---|
338 | 313 | struct rcu_data *rdp; |
---|
339 | 314 | struct rcu_node *rnp; |
---|
| 315 | + int sleeping_l = 0; |
---|
340 | 316 | |
---|
341 | 317 | lockdep_assert_irqs_disabled(); |
---|
342 | | - WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0); |
---|
| 318 | +#if defined(CONFIG_PREEMPT_RT_FULL) |
---|
| 319 | + sleeping_l = t->sleeping_lock; |
---|
| 320 | +#endif |
---|
| 321 | + WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0 && !sleeping_l); |
---|
343 | 322 | if (t->rcu_read_lock_nesting > 0 && |
---|
344 | 323 | !t->rcu_read_unlock_special.b.blocked) { |
---|
345 | 324 | |
---|
.. | .. |
---|
520 | 499 | } |
---|
521 | 500 | |
---|
522 | 501 | /* Hardware IRQ handlers cannot block, complain if they get here. */ |
---|
523 | | - if (in_irq() || in_serving_softirq()) { |
---|
| 502 | + if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) { |
---|
524 | 503 | lockdep_rcu_suspicious(__FILE__, __LINE__, |
---|
525 | 504 | "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n"); |
---|
526 | 505 | pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n", |
---|
.. | .. |
---|
1023 | 1002 | |
---|
1024 | 1003 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
---|
1025 | 1004 | |
---|
1026 | | -#ifdef CONFIG_RCU_BOOST |
---|
1027 | | - |
---|
1028 | | -static void rcu_wake_cond(struct task_struct *t, int status) |
---|
| 1005 | +/* |
---|
| 1006 | + * If boosting, set rcuc kthreads to realtime priority. |
---|
| 1007 | + */ |
---|
| 1008 | +static void rcu_cpu_kthread_setup(unsigned int cpu) |
---|
1029 | 1009 | { |
---|
1030 | | - /* |
---|
1031 | | - * If the thread is yielding, only wake it when this |
---|
1032 | | - * is invoked from idle |
---|
1033 | | - */ |
---|
1034 | | - if (status != RCU_KTHREAD_YIELDING || is_idle_task(current)) |
---|
1035 | | - wake_up_process(t); |
---|
| 1010 | +#ifdef CONFIG_RCU_BOOST |
---|
| 1011 | + struct sched_param sp; |
---|
| 1012 | + |
---|
| 1013 | + sp.sched_priority = kthread_prio; |
---|
| 1014 | + sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); |
---|
| 1015 | +#endif /* #ifdef CONFIG_RCU_BOOST */ |
---|
1036 | 1016 | } |
---|
| 1017 | + |
---|
| 1018 | +#ifdef CONFIG_RCU_BOOST |
---|
1037 | 1019 | |
---|
1038 | 1020 | /* |
---|
1039 | 1021 | * Carry out RCU priority boosting on the task indicated by ->exp_tasks |
---|
.. | .. |
---|
1173 | 1155 | } |
---|
1174 | 1156 | |
---|
1175 | 1157 | /* |
---|
1176 | | - * Wake up the per-CPU kthread to invoke RCU callbacks. |
---|
1177 | | - */ |
---|
1178 | | -static void invoke_rcu_callbacks_kthread(void) |
---|
1179 | | -{ |
---|
1180 | | - unsigned long flags; |
---|
1181 | | - |
---|
1182 | | - local_irq_save(flags); |
---|
1183 | | - __this_cpu_write(rcu_cpu_has_work, 1); |
---|
1184 | | - if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && |
---|
1185 | | - current != __this_cpu_read(rcu_cpu_kthread_task)) { |
---|
1186 | | - rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), |
---|
1187 | | - __this_cpu_read(rcu_cpu_kthread_status)); |
---|
1188 | | - } |
---|
1189 | | - local_irq_restore(flags); |
---|
1190 | | -} |
---|
1191 | | - |
---|
1192 | | -/* |
---|
1193 | 1158 | * Is the current CPU running the RCU-callbacks kthread? |
---|
1194 | 1159 | * Caller must have preemption disabled. |
---|
1195 | 1160 | */ |
---|
.. | .. |
---|
1243 | 1208 | return 0; |
---|
1244 | 1209 | } |
---|
1245 | 1210 | |
---|
1246 | | -static void rcu_kthread_do_work(void) |
---|
1247 | | -{ |
---|
1248 | | - rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); |
---|
1249 | | - rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data)); |
---|
1250 | | - rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data)); |
---|
1251 | | -} |
---|
1252 | | - |
---|
1253 | | -static void rcu_cpu_kthread_setup(unsigned int cpu) |
---|
1254 | | -{ |
---|
1255 | | - struct sched_param sp; |
---|
1256 | | - |
---|
1257 | | - sp.sched_priority = kthread_prio; |
---|
1258 | | - sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); |
---|
1259 | | -} |
---|
1260 | | - |
---|
1261 | | -static void rcu_cpu_kthread_park(unsigned int cpu) |
---|
1262 | | -{ |
---|
1263 | | - per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; |
---|
1264 | | -} |
---|
1265 | | - |
---|
1266 | | -static int rcu_cpu_kthread_should_run(unsigned int cpu) |
---|
1267 | | -{ |
---|
1268 | | - return __this_cpu_read(rcu_cpu_has_work); |
---|
1269 | | -} |
---|
1270 | | - |
---|
1271 | | -/* |
---|
1272 | | - * Per-CPU kernel thread that invokes RCU callbacks. This replaces the |
---|
1273 | | - * RCU softirq used in flavors and configurations of RCU that do not |
---|
1274 | | - * support RCU priority boosting. |
---|
1275 | | - */ |
---|
1276 | | -static void rcu_cpu_kthread(unsigned int cpu) |
---|
1277 | | -{ |
---|
1278 | | - unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); |
---|
1279 | | - char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); |
---|
1280 | | - int spincnt; |
---|
1281 | | - |
---|
1282 | | - for (spincnt = 0; spincnt < 10; spincnt++) { |
---|
1283 | | - trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); |
---|
1284 | | - local_bh_disable(); |
---|
1285 | | - *statusp = RCU_KTHREAD_RUNNING; |
---|
1286 | | - this_cpu_inc(rcu_cpu_kthread_loops); |
---|
1287 | | - local_irq_disable(); |
---|
1288 | | - work = *workp; |
---|
1289 | | - *workp = 0; |
---|
1290 | | - local_irq_enable(); |
---|
1291 | | - if (work) |
---|
1292 | | - rcu_kthread_do_work(); |
---|
1293 | | - local_bh_enable(); |
---|
1294 | | - if (*workp == 0) { |
---|
1295 | | - trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); |
---|
1296 | | - *statusp = RCU_KTHREAD_WAITING; |
---|
1297 | | - return; |
---|
1298 | | - } |
---|
1299 | | - } |
---|
1300 | | - *statusp = RCU_KTHREAD_YIELDING; |
---|
1301 | | - trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); |
---|
1302 | | - schedule_timeout_interruptible(2); |
---|
1303 | | - trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); |
---|
1304 | | - *statusp = RCU_KTHREAD_WAITING; |
---|
1305 | | -} |
---|
1306 | | - |
---|
1307 | 1211 | /* |
---|
1308 | 1212 | * Set the per-rcu_node kthread's affinity to cover all CPUs that are |
---|
1309 | 1213 | * served by the rcu_node in question. The CPU hotplug lock is still |
---|
.. | .. |
---|
1334 | 1238 | free_cpumask_var(cm); |
---|
1335 | 1239 | } |
---|
1336 | 1240 | |
---|
1337 | | -static struct smp_hotplug_thread rcu_cpu_thread_spec = { |
---|
1338 | | - .store = &rcu_cpu_kthread_task, |
---|
1339 | | - .thread_should_run = rcu_cpu_kthread_should_run, |
---|
1340 | | - .thread_fn = rcu_cpu_kthread, |
---|
1341 | | - .thread_comm = "rcuc/%u", |
---|
1342 | | - .setup = rcu_cpu_kthread_setup, |
---|
1343 | | - .park = rcu_cpu_kthread_park, |
---|
1344 | | -}; |
---|
1345 | | - |
---|
1346 | 1241 | /* |
---|
1347 | 1242 | * Spawn boost kthreads -- called as soon as the scheduler is running. |
---|
1348 | 1243 | */ |
---|
1349 | 1244 | static void __init rcu_spawn_boost_kthreads(void) |
---|
1350 | 1245 | { |
---|
1351 | 1246 | struct rcu_node *rnp; |
---|
1352 | | - int cpu; |
---|
1353 | | - |
---|
1354 | | - for_each_possible_cpu(cpu) |
---|
1355 | | - per_cpu(rcu_cpu_has_work, cpu) = 0; |
---|
1356 | | - BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); |
---|
1357 | 1247 | rcu_for_each_leaf_node(rcu_state_p, rnp) |
---|
1358 | 1248 | (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); |
---|
1359 | 1249 | } |
---|
.. | .. |
---|
1374 | 1264 | __releases(rnp->lock) |
---|
1375 | 1265 | { |
---|
1376 | 1266 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
---|
1377 | | -} |
---|
1378 | | - |
---|
1379 | | -static void invoke_rcu_callbacks_kthread(void) |
---|
1380 | | -{ |
---|
1381 | | - WARN_ON_ONCE(1); |
---|
1382 | 1267 | } |
---|
1383 | 1268 | |
---|
1384 | 1269 | static bool rcu_is_callbacks_kthread(void) |
---|
.. | .. |
---|
1404 | 1289 | |
---|
1405 | 1290 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ |
---|
1406 | 1291 | |
---|
1407 | | -#if !defined(CONFIG_RCU_FAST_NO_HZ) |
---|
| 1292 | +#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) |
---|
1408 | 1293 | |
---|
1409 | 1294 | /* |
---|
1410 | 1295 | * Check to see if any future RCU-related work will need to be done |
---|
.. | .. |
---|
1420 | 1305 | *nextevt = KTIME_MAX; |
---|
1421 | 1306 | return rcu_cpu_has_callbacks(NULL); |
---|
1422 | 1307 | } |
---|
| 1308 | +#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */ |
---|
1423 | 1309 | |
---|
| 1310 | +#if !defined(CONFIG_RCU_FAST_NO_HZ) |
---|
1424 | 1311 | /* |
---|
1425 | 1312 | * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up |
---|
1426 | 1313 | * after it. |
---|
.. | .. |
---|
1517 | 1404 | return cbs_ready; |
---|
1518 | 1405 | } |
---|
1519 | 1406 | |
---|
| 1407 | +#ifndef CONFIG_PREEMPT_RT_FULL |
---|
| 1408 | + |
---|
1520 | 1409 | /* |
---|
1521 | 1410 | * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready |
---|
1522 | 1411 | * to invoke. If the CPU has callbacks, try to advance them. Tell the |
---|
.. | .. |
---|
1559 | 1448 | *nextevt = basemono + dj * TICK_NSEC; |
---|
1560 | 1449 | return 0; |
---|
1561 | 1450 | } |
---|
| 1451 | +#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */ |
---|
1562 | 1452 | |
---|
1563 | 1453 | /* |
---|
1564 | 1454 | * Prepare a CPU for idle from an RCU perspective. The first major task |
---|