hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/include/linux/hardirq.h
....@@ -2,30 +2,27 @@
22 #ifndef LINUX_HARDIRQ_H
33 #define LINUX_HARDIRQ_H
44
5
+#include <linux/context_tracking_state.h>
56 #include <linux/preempt.h>
67 #include <linux/lockdep.h>
78 #include <linux/ftrace_irq.h>
89 #include <linux/vtime.h>
910 #include <asm/hardirq.h>
1011
11
-
1212 extern void synchronize_irq(unsigned int irq);
1313 extern bool synchronize_hardirq(unsigned int irq);
1414
15
-#if defined(CONFIG_TINY_RCU)
16
-
17
-static inline void rcu_nmi_enter(void)
18
-{
19
-}
20
-
21
-static inline void rcu_nmi_exit(void)
22
-{
23
-}
24
-
15
+#ifdef CONFIG_NO_HZ_FULL
16
+void __rcu_irq_enter_check_tick(void);
2517 #else
26
-extern void rcu_nmi_enter(void);
27
-extern void rcu_nmi_exit(void);
18
+static inline void __rcu_irq_enter_check_tick(void) { }
2819 #endif
20
+
21
+static __always_inline void rcu_irq_enter_check_tick(void)
22
+{
23
+ if (context_tracking_enabled())
24
+ __rcu_irq_enter_check_tick();
25
+}
2926
3027 /*
3128 * It is safe to do non-atomic ops on ->hardirq_context,
....@@ -37,56 +34,119 @@
3734 do { \
3835 account_irq_enter_time(current); \
3936 preempt_count_add(HARDIRQ_OFFSET); \
40
- trace_hardirq_enter(); \
37
+ lockdep_hardirq_enter(); \
38
+ } while (0)
39
+
40
+/*
41
+ * Like __irq_enter() without time accounting for fast
42
+ * interrupts, e.g. reschedule IPI where time accounting
43
+ * is more expensive than the actual interrupt.
44
+ */
45
+#define __irq_enter_raw() \
46
+ do { \
47
+ preempt_count_add(HARDIRQ_OFFSET); \
48
+ lockdep_hardirq_enter(); \
4149 } while (0)
4250
4351 /*
4452 * Enter irq context (on NO_HZ, update jiffies):
4553 */
46
-extern void irq_enter(void);
54
+void irq_enter(void);
55
+/*
56
+ * Like irq_enter(), but RCU is already watching.
57
+ */
58
+void irq_enter_rcu(void);
4759
4860 /*
4961 * Exit irq context without processing softirqs:
5062 */
5163 #define __irq_exit() \
5264 do { \
53
- trace_hardirq_exit(); \
65
+ lockdep_hardirq_exit(); \
5466 account_irq_exit_time(current); \
67
+ preempt_count_sub(HARDIRQ_OFFSET); \
68
+ } while (0)
69
+
70
+/*
71
+ * Like __irq_exit() without time accounting
72
+ */
73
+#define __irq_exit_raw() \
74
+ do { \
75
+ lockdep_hardirq_exit(); \
5576 preempt_count_sub(HARDIRQ_OFFSET); \
5677 } while (0)
5778
5879 /*
5980 * Exit irq context and process softirqs if needed:
6081 */
61
-extern void irq_exit(void);
82
+void irq_exit(void);
83
+
84
+/*
85
+ * Like irq_exit(), but return with RCU watching.
86
+ */
87
+void irq_exit_rcu(void);
6288
6389 #ifndef arch_nmi_enter
6490 #define arch_nmi_enter() do { } while (0)
6591 #define arch_nmi_exit() do { } while (0)
6692 #endif
6793
68
-#define nmi_enter() \
94
+#ifdef CONFIG_TINY_RCU
95
+static inline void rcu_nmi_enter(void) { }
96
+static inline void rcu_nmi_exit(void) { }
97
+#else
98
+extern void rcu_nmi_enter(void);
99
+extern void rcu_nmi_exit(void);
100
+#endif
101
+
102
+/*
103
+ * NMI vs Tracing
104
+ * --------------
105
+ *
106
+ * We must not land in a tracer until (or after) we've changed preempt_count
107
+ * such that in_nmi() becomes true. To that effect all NMI C entry points must
108
+ * be marked 'notrace' and call nmi_enter() as soon as possible.
109
+ */
110
+
111
+/*
112
+ * nmi_enter() can nest up to 15 times; see NMI_BITS.
113
+ */
114
+#define __nmi_enter() \
69115 do { \
116
+ lockdep_off(); \
70117 arch_nmi_enter(); \
71118 printk_nmi_enter(); \
72
- lockdep_off(); \
73
- ftrace_nmi_enter(); \
74
- BUG_ON(in_nmi()); \
75
- preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
119
+ BUG_ON(in_nmi() == NMI_MASK); \
120
+ __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
121
+ } while (0)
122
+
123
+#define nmi_enter() \
124
+ do { \
125
+ __nmi_enter(); \
126
+ lockdep_hardirq_enter(); \
76127 rcu_nmi_enter(); \
77
- trace_hardirq_enter(); \
128
+ instrumentation_begin(); \
129
+ ftrace_nmi_enter(); \
130
+ instrumentation_end(); \
131
+ } while (0)
132
+
133
+#define __nmi_exit() \
134
+ do { \
135
+ BUG_ON(!in_nmi()); \
136
+ __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
137
+ printk_nmi_exit(); \
138
+ arch_nmi_exit(); \
139
+ lockdep_on(); \
78140 } while (0)
79141
80142 #define nmi_exit() \
81143 do { \
82
- trace_hardirq_exit(); \
83
- rcu_nmi_exit(); \
84
- BUG_ON(!in_nmi()); \
85
- preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
144
+ instrumentation_begin(); \
86145 ftrace_nmi_exit(); \
87
- lockdep_on(); \
88
- printk_nmi_exit(); \
89
- arch_nmi_exit(); \
146
+ instrumentation_end(); \
147
+ rcu_nmi_exit(); \
148
+ lockdep_hardirq_exit(); \
149
+ __nmi_exit(); \
90150 } while (0)
91151
92152 #endif /* LINUX_HARDIRQ_H */