hc
2023-11-06 15ade055295d13f95d49e3d99b09f3bbfb4a43e7
kernel/kernel/rcu/tree_plugin.h
....@@ -24,40 +24,15 @@
2424 * Paul E. McKenney <paulmck@linux.vnet.ibm.com>
2525 */
2626
27
-#include <linux/delay.h>
28
-#include <linux/gfp.h>
29
-#include <linux/oom.h>
30
-#include <linux/sched/debug.h>
31
-#include <linux/smpboot.h>
32
-#include <linux/sched/isolation.h>
33
-#include <uapi/linux/sched/types.h>
34
-#include "../time/tick-internal.h"
35
-
36
-#ifdef CONFIG_RCU_BOOST
37
-
3827 #include "../locking/rtmutex_common.h"
3928
4029 /*
4130 * Control variables for per-CPU and per-rcu_node kthreads. These
4231 * handle all flavors of RCU.
4332 */
44
-static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task);
4533 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
4634 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
4735 DEFINE_PER_CPU(char, rcu_cpu_has_work);
48
-
49
-#else /* #ifdef CONFIG_RCU_BOOST */
50
-
51
-/*
52
- * Some architectures do not define rt_mutexes, but if !CONFIG_RCU_BOOST,
53
- * all uses are in dead code. Provide a definition to keep the compiler
54
- * happy, but add WARN_ON_ONCE() to complain if used in the wrong place.
55
- * This probably needs to be excluded from -rt builds.
56
- */
57
-#define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; })
58
-#define rt_mutex_futex_unlock(x) WARN_ON_ONCE(1)
59
-
60
-#endif /* #else #ifdef CONFIG_RCU_BOOST */
6136
6237 #ifdef CONFIG_RCU_NOCB_CPU
6338 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
....@@ -337,9 +312,13 @@
337312 struct task_struct *t = current;
338313 struct rcu_data *rdp;
339314 struct rcu_node *rnp;
315
+ int sleeping_l = 0;
340316
341317 lockdep_assert_irqs_disabled();
342
- WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0);
318
+#if defined(CONFIG_PREEMPT_RT_FULL)
319
+ sleeping_l = t->sleeping_lock;
320
+#endif
321
+ WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0 && !sleeping_l);
343322 if (t->rcu_read_lock_nesting > 0 &&
344323 !t->rcu_read_unlock_special.b.blocked) {
345324
....@@ -520,7 +499,7 @@
520499 }
521500
522501 /* Hardware IRQ handlers cannot block, complain if they get here. */
523
- if (in_irq() || in_serving_softirq()) {
502
+ if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) {
524503 lockdep_rcu_suspicious(__FILE__, __LINE__,
525504 "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n");
526505 pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n",
....@@ -1023,17 +1002,20 @@
10231002
10241003 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
10251004
1026
-#ifdef CONFIG_RCU_BOOST
1027
-
1028
-static void rcu_wake_cond(struct task_struct *t, int status)
1005
+/*
1006
+ * If boosting, set rcuc kthreads to realtime priority.
1007
+ */
1008
+static void rcu_cpu_kthread_setup(unsigned int cpu)
10291009 {
1030
- /*
1031
- * If the thread is yielding, only wake it when this
1032
- * is invoked from idle
1033
- */
1034
- if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
1035
- wake_up_process(t);
1010
+#ifdef CONFIG_RCU_BOOST
1011
+ struct sched_param sp;
1012
+
1013
+ sp.sched_priority = kthread_prio;
1014
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1015
+#endif /* #ifdef CONFIG_RCU_BOOST */
10361016 }
1017
+
1018
+#ifdef CONFIG_RCU_BOOST
10371019
10381020 /*
10391021 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
....@@ -1173,23 +1155,6 @@
11731155 }
11741156
11751157 /*
1176
- * Wake up the per-CPU kthread to invoke RCU callbacks.
1177
- */
1178
-static void invoke_rcu_callbacks_kthread(void)
1179
-{
1180
- unsigned long flags;
1181
-
1182
- local_irq_save(flags);
1183
- __this_cpu_write(rcu_cpu_has_work, 1);
1184
- if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
1185
- current != __this_cpu_read(rcu_cpu_kthread_task)) {
1186
- rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
1187
- __this_cpu_read(rcu_cpu_kthread_status));
1188
- }
1189
- local_irq_restore(flags);
1190
-}
1191
-
1192
-/*
11931158 * Is the current CPU running the RCU-callbacks kthread?
11941159 * Caller must have preemption disabled.
11951160 */
....@@ -1243,67 +1208,6 @@
12431208 return 0;
12441209 }
12451210
1246
-static void rcu_kthread_do_work(void)
1247
-{
1248
- rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
1249
- rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
1250
- rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data));
1251
-}
1252
-
1253
-static void rcu_cpu_kthread_setup(unsigned int cpu)
1254
-{
1255
- struct sched_param sp;
1256
-
1257
- sp.sched_priority = kthread_prio;
1258
- sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1259
-}
1260
-
1261
-static void rcu_cpu_kthread_park(unsigned int cpu)
1262
-{
1263
- per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1264
-}
1265
-
1266
-static int rcu_cpu_kthread_should_run(unsigned int cpu)
1267
-{
1268
- return __this_cpu_read(rcu_cpu_has_work);
1269
-}
1270
-
1271
-/*
1272
- * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1273
- * RCU softirq used in flavors and configurations of RCU that do not
1274
- * support RCU priority boosting.
1275
- */
1276
-static void rcu_cpu_kthread(unsigned int cpu)
1277
-{
1278
- unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
1279
- char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
1280
- int spincnt;
1281
-
1282
- for (spincnt = 0; spincnt < 10; spincnt++) {
1283
- trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
1284
- local_bh_disable();
1285
- *statusp = RCU_KTHREAD_RUNNING;
1286
- this_cpu_inc(rcu_cpu_kthread_loops);
1287
- local_irq_disable();
1288
- work = *workp;
1289
- *workp = 0;
1290
- local_irq_enable();
1291
- if (work)
1292
- rcu_kthread_do_work();
1293
- local_bh_enable();
1294
- if (*workp == 0) {
1295
- trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
1296
- *statusp = RCU_KTHREAD_WAITING;
1297
- return;
1298
- }
1299
- }
1300
- *statusp = RCU_KTHREAD_YIELDING;
1301
- trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
1302
- schedule_timeout_interruptible(2);
1303
- trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
1304
- *statusp = RCU_KTHREAD_WAITING;
1305
-}
1306
-
13071211 /*
13081212 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
13091213 * served by the rcu_node in question. The CPU hotplug lock is still
....@@ -1334,26 +1238,12 @@
13341238 free_cpumask_var(cm);
13351239 }
13361240
1337
-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
1338
- .store = &rcu_cpu_kthread_task,
1339
- .thread_should_run = rcu_cpu_kthread_should_run,
1340
- .thread_fn = rcu_cpu_kthread,
1341
- .thread_comm = "rcuc/%u",
1342
- .setup = rcu_cpu_kthread_setup,
1343
- .park = rcu_cpu_kthread_park,
1344
-};
1345
-
13461241 /*
13471242 * Spawn boost kthreads -- called as soon as the scheduler is running.
13481243 */
13491244 static void __init rcu_spawn_boost_kthreads(void)
13501245 {
13511246 struct rcu_node *rnp;
1352
- int cpu;
1353
-
1354
- for_each_possible_cpu(cpu)
1355
- per_cpu(rcu_cpu_has_work, cpu) = 0;
1356
- BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
13571247 rcu_for_each_leaf_node(rcu_state_p, rnp)
13581248 (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
13591249 }
....@@ -1374,11 +1264,6 @@
13741264 __releases(rnp->lock)
13751265 {
13761266 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
1377
-}
1378
-
1379
-static void invoke_rcu_callbacks_kthread(void)
1380
-{
1381
- WARN_ON_ONCE(1);
13821267 }
13831268
13841269 static bool rcu_is_callbacks_kthread(void)
....@@ -1404,7 +1289,7 @@
14041289
14051290 #endif /* #else #ifdef CONFIG_RCU_BOOST */
14061291
1407
-#if !defined(CONFIG_RCU_FAST_NO_HZ)
1292
+#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL)
14081293
14091294 /*
14101295 * Check to see if any future RCU-related work will need to be done
....@@ -1420,7 +1305,9 @@
14201305 *nextevt = KTIME_MAX;
14211306 return rcu_cpu_has_callbacks(NULL);
14221307 }
1308
+#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */
14231309
1310
+#if !defined(CONFIG_RCU_FAST_NO_HZ)
14241311 /*
14251312 * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
14261313 * after it.
....@@ -1517,6 +1404,8 @@
15171404 return cbs_ready;
15181405 }
15191406
1407
+#ifndef CONFIG_PREEMPT_RT_FULL
1408
+
15201409 /*
15211410 * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
15221411 * to invoke. If the CPU has callbacks, try to advance them. Tell the
....@@ -1559,6 +1448,7 @@
15591448 *nextevt = basemono + dj * TICK_NSEC;
15601449 return 0;
15611450 }
1451
+#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */
15621452
15631453 /*
15641454 * Prepare a CPU for idle from an RCU perspective. The first major task