hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/kernel/rcu/tree.c
....@@ -232,6 +232,11 @@
232232 return 0;
233233 }
234234
235
+static inline bool rcu_in_nonmaskable(void)
236
+{
237
+ return on_pipeline_entry() || in_nmi();
238
+}
239
+
235240 void rcu_softirq_qs(void)
236241 {
237242 rcu_qs();
....@@ -710,6 +715,7 @@
710715 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
711716
712717 instrumentation_begin();
718
+
713719 /*
714720 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
715721 * (We are exiting an NMI handler, so RCU better be paying attention
....@@ -735,7 +741,7 @@
735741 trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
736742 WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
737743
738
- if (!in_nmi())
744
+ if (!rcu_in_nonmaskable())
739745 rcu_prepare_for_idle();
740746
741747 // instrumentation for the noinstr rcu_dynticks_eqs_enter()
....@@ -746,7 +752,7 @@
746752 rcu_dynticks_eqs_enter();
747753 // ... but is no longer watching here.
748754
749
- if (!in_nmi())
755
+ if (!rcu_in_nonmaskable())
750756 rcu_dynticks_task_enter();
751757 }
752758
....@@ -935,7 +941,7 @@
935941 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
936942
937943 // If we're here from NMI there's nothing to do.
938
- if (in_nmi())
944
+ if (rcu_in_nonmaskable())
939945 return;
940946
941947 RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
....@@ -996,14 +1002,14 @@
9961002 */
9971003 if (rcu_dynticks_curr_cpu_in_eqs()) {
9981004
999
- if (!in_nmi())
1005
+ if (!rcu_in_nonmaskable())
10001006 rcu_dynticks_task_exit();
10011007
10021008 // RCU is not watching here ...
10031009 rcu_dynticks_eqs_exit();
10041010 // ... but is watching here.
10051011
1006
- if (!in_nmi()) {
1012
+ if (!rcu_in_nonmaskable()) {
10071013 instrumentation_begin();
10081014 rcu_cleanup_after_idle();
10091015 instrumentation_end();
....@@ -1016,7 +1022,7 @@
10161022 instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
10171023
10181024 incby = 1;
1019
- } else if (!in_nmi()) {
1025
+ } else if (!rcu_in_nonmaskable()) {
10201026 instrumentation_begin();
10211027 rcu_irq_enter_check_tick();
10221028 } else {
....@@ -1094,10 +1100,10 @@
10941100 /**
10951101 * rcu_is_watching - see if RCU thinks that the current CPU is not idle
10961102 *
1097
- * Return true if RCU is watching the running CPU, which means that this
1098
- * CPU can safely enter RCU read-side critical sections. In other words,
1099
- * if the current CPU is not in its idle loop or is in an interrupt or
1100
- * NMI handler, return true.
1103
+ * Return true if RCU is watching the running CPU, which means that
1104
+ * this CPU can safely enter RCU read-side critical sections. In
1105
+ * other words, if the current CPU is not in its idle loop or is in an
1106
+ * interrupt or NMI handler, return true.
11011107 *
11021108 * Make notrace because it can be called by the internal functions of
11031109 * ftrace, and making this notrace removes unnecessary recursion calls.
....@@ -1106,6 +1112,9 @@
11061112 {
11071113 bool ret;
11081114
1115
+ if (on_pipeline_entry())
1116
+ return true;
1117
+
11091118 preempt_disable_notrace();
11101119 ret = !rcu_dynticks_curr_cpu_in_eqs();
11111120 preempt_enable_notrace();
....@@ -1152,7 +1161,7 @@
11521161 struct rcu_node *rnp;
11531162 bool ret = false;
11541163
1155
- if (in_nmi() || !rcu_scheduler_fully_active)
1164
+ if (rcu_in_nonmaskable() || !rcu_scheduler_fully_active)
11561165 return true;
11571166 preempt_disable_notrace();
11581167 rdp = this_cpu_ptr(&rcu_data);