| .. | .. |
|---|
| 232 | 232 | return 0; |
|---|
| 233 | 233 | } |
|---|
| 234 | 234 | |
|---|
| 235 | +static inline bool rcu_in_nonmaskable(void) |
|---|
| 236 | +{ |
|---|
| 237 | + return on_pipeline_entry() || in_nmi(); |
|---|
| 238 | +} |
|---|
| 239 | + |
|---|
| 235 | 240 | void rcu_softirq_qs(void) |
|---|
| 236 | 241 | { |
|---|
| 237 | 242 | rcu_qs(); |
|---|
| .. | .. |
|---|
| 710 | 715 | struct rcu_data *rdp = this_cpu_ptr(&rcu_data); |
|---|
| 711 | 716 | |
|---|
| 712 | 717 | instrumentation_begin(); |
|---|
| 718 | + |
|---|
| 713 | 719 | /* |
|---|
| 714 | 720 | * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks. |
|---|
| 715 | 721 | * (We are exiting an NMI handler, so RCU better be paying attention |
|---|
| .. | .. |
|---|
| 735 | 741 | trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks)); |
|---|
| 736 | 742 | WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */ |
|---|
| 737 | 743 | |
|---|
| 738 | | - if (!in_nmi()) |
|---|
| 744 | + if (!rcu_in_nonmaskable()) |
|---|
| 739 | 745 | rcu_prepare_for_idle(); |
|---|
| 740 | 746 | |
|---|
| 741 | 747 | // instrumentation for the noinstr rcu_dynticks_eqs_enter() |
|---|
| .. | .. |
|---|
| 746 | 752 | rcu_dynticks_eqs_enter(); |
|---|
| 747 | 753 | // ... but is no longer watching here. |
|---|
| 748 | 754 | |
|---|
| 749 | | - if (!in_nmi()) |
|---|
| 755 | + if (!rcu_in_nonmaskable()) |
|---|
| 750 | 756 | rcu_dynticks_task_enter(); |
|---|
| 751 | 757 | } |
|---|
| 752 | 758 | |
|---|
| .. | .. |
|---|
| 935 | 941 | struct rcu_data *rdp = this_cpu_ptr(&rcu_data); |
|---|
| 936 | 942 | |
|---|
| 937 | 943 | // If we're here from NMI there's nothing to do. |
|---|
| 938 | | - if (in_nmi()) |
|---|
| 944 | + if (rcu_in_nonmaskable()) |
|---|
| 939 | 945 | return; |
|---|
| 940 | 946 | |
|---|
| 941 | 947 | RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(), |
|---|
| .. | .. |
|---|
| 996 | 1002 | */ |
|---|
| 997 | 1003 | if (rcu_dynticks_curr_cpu_in_eqs()) { |
|---|
| 998 | 1004 | |
|---|
| 999 | | - if (!in_nmi()) |
|---|
| 1005 | + if (!rcu_in_nonmaskable()) |
|---|
| 1000 | 1006 | rcu_dynticks_task_exit(); |
|---|
| 1001 | 1007 | |
|---|
| 1002 | 1008 | // RCU is not watching here ... |
|---|
| 1003 | 1009 | rcu_dynticks_eqs_exit(); |
|---|
| 1004 | 1010 | // ... but is watching here. |
|---|
| 1005 | 1011 | |
|---|
| 1006 | | - if (!in_nmi()) { |
|---|
| 1012 | + if (!rcu_in_nonmaskable()) { |
|---|
| 1007 | 1013 | instrumentation_begin(); |
|---|
| 1008 | 1014 | rcu_cleanup_after_idle(); |
|---|
| 1009 | 1015 | instrumentation_end(); |
|---|
| .. | .. |
|---|
| 1016 | 1022 | instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks)); |
|---|
| 1017 | 1023 | |
|---|
| 1018 | 1024 | incby = 1; |
|---|
| 1019 | | - } else if (!in_nmi()) { |
|---|
| 1025 | + } else if (!rcu_in_nonmaskable()) { |
|---|
| 1020 | 1026 | instrumentation_begin(); |
|---|
| 1021 | 1027 | rcu_irq_enter_check_tick(); |
|---|
| 1022 | 1028 | } else { |
|---|
| .. | .. |
|---|
| 1094 | 1100 | /** |
|---|
| 1095 | 1101 | * rcu_is_watching - see if RCU thinks that the current CPU is not idle |
|---|
| 1096 | 1102 | * |
|---|
| 1097 | | - * Return true if RCU is watching the running CPU, which means that this |
|---|
| 1098 | | - * CPU can safely enter RCU read-side critical sections. In other words, |
|---|
| 1099 | | - * if the current CPU is not in its idle loop or is in an interrupt or |
|---|
| 1100 | | - * NMI handler, return true. |
|---|
| 1103 | + * Return true if RCU is watching the running CPU, which means that |
|---|
| 1104 | + * this CPU can safely enter RCU read-side critical sections. In |
|---|
| 1105 | + * other words, if the current CPU is not in its idle loop or is in an |
|---|
| 1106 | + * interrupt or NMI handler, return true. |
|---|
| 1101 | 1107 | * |
|---|
| 1102 | 1108 | * Make notrace because it can be called by the internal functions of |
|---|
| 1103 | 1109 | * ftrace, and making this notrace removes unnecessary recursion calls. |
|---|
| .. | .. |
|---|
| 1106 | 1112 | { |
|---|
| 1107 | 1113 | bool ret; |
|---|
| 1108 | 1114 | |
|---|
| 1115 | + if (on_pipeline_entry()) |
|---|
| 1116 | + return true; |
|---|
| 1117 | + |
|---|
| 1109 | 1118 | preempt_disable_notrace(); |
|---|
| 1110 | 1119 | ret = !rcu_dynticks_curr_cpu_in_eqs(); |
|---|
| 1111 | 1120 | preempt_enable_notrace(); |
|---|
| .. | .. |
|---|
| 1152 | 1161 | struct rcu_node *rnp; |
|---|
| 1153 | 1162 | bool ret = false; |
|---|
| 1154 | 1163 | |
|---|
| 1155 | | - if (in_nmi() || !rcu_scheduler_fully_active) |
|---|
| 1164 | + if (rcu_in_nonmaskable() || !rcu_scheduler_fully_active) |
|---|
| 1156 | 1165 | return true; |
|---|
| 1157 | 1166 | preempt_disable_notrace(); |
|---|
| 1158 | 1167 | rdp = this_cpu_ptr(&rcu_data); |
|---|