hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/include/linux/hardirq.h
....@@ -2,30 +2,28 @@
22 #ifndef LINUX_HARDIRQ_H
33 #define LINUX_HARDIRQ_H
44
5
+#include <linux/context_tracking_state.h>
56 #include <linux/preempt.h>
67 #include <linux/lockdep.h>
78 #include <linux/ftrace_irq.h>
9
+#include <linux/sched.h>
810 #include <linux/vtime.h>
911 #include <asm/hardirq.h>
10
-
1112
1213 extern void synchronize_irq(unsigned int irq);
1314 extern bool synchronize_hardirq(unsigned int irq);
1415
15
-#if defined(CONFIG_TINY_RCU)
16
-
17
-static inline void rcu_nmi_enter(void)
18
-{
19
-}
20
-
21
-static inline void rcu_nmi_exit(void)
22
-{
23
-}
24
-
16
+#ifdef CONFIG_NO_HZ_FULL
17
+void __rcu_irq_enter_check_tick(void);
2518 #else
26
-extern void rcu_nmi_enter(void);
27
-extern void rcu_nmi_exit(void);
19
+static inline void __rcu_irq_enter_check_tick(void) { }
2820 #endif
21
+
22
+static __always_inline void rcu_irq_enter_check_tick(void)
23
+{
24
+ if (context_tracking_enabled())
25
+ __rcu_irq_enter_check_tick();
26
+}
2927
3028 /*
3129 * It is safe to do non-atomic ops on ->hardirq_context,
....@@ -35,58 +33,119 @@
3533 */
3634 #define __irq_enter() \
3735 do { \
38
- account_irq_enter_time(current); \
3936 preempt_count_add(HARDIRQ_OFFSET); \
40
- trace_hardirq_enter(); \
37
+ lockdep_hardirq_enter(); \
38
+ account_hardirq_enter(current); \
39
+ } while (0)
40
+
41
+/*
42
+ * Like __irq_enter() without time accounting for fast
43
+ * interrupts, e.g. reschedule IPI where time accounting
44
+ * is more expensive than the actual interrupt.
45
+ */
46
+#define __irq_enter_raw() \
47
+ do { \
48
+ preempt_count_add(HARDIRQ_OFFSET); \
49
+ lockdep_hardirq_enter(); \
4150 } while (0)
4251
4352 /*
4453 * Enter irq context (on NO_HZ, update jiffies):
4554 */
46
-extern void irq_enter(void);
55
+void irq_enter(void);
56
+/*
57
+ * Like irq_enter(), but RCU is already watching.
58
+ */
59
+void irq_enter_rcu(void);
4760
4861 /*
4962 * Exit irq context without processing softirqs:
5063 */
5164 #define __irq_exit() \
5265 do { \
53
- trace_hardirq_exit(); \
54
- account_irq_exit_time(current); \
66
+ account_hardirq_exit(current); \
67
+ lockdep_hardirq_exit(); \
68
+ preempt_count_sub(HARDIRQ_OFFSET); \
69
+ } while (0)
70
+
71
+/*
72
+ * Like __irq_exit() without time accounting
73
+ */
74
+#define __irq_exit_raw() \
75
+ do { \
76
+ lockdep_hardirq_exit(); \
5577 preempt_count_sub(HARDIRQ_OFFSET); \
5678 } while (0)
5779
5880 /*
5981 * Exit irq context and process softirqs if needed:
6082 */
61
-extern void irq_exit(void);
83
+void irq_exit(void);
84
+
85
+/*
86
+ * Like irq_exit(), but return with RCU watching.
87
+ */
88
+void irq_exit_rcu(void);
6289
6390 #ifndef arch_nmi_enter
6491 #define arch_nmi_enter() do { } while (0)
6592 #define arch_nmi_exit() do { } while (0)
6693 #endif
6794
95
+#ifdef CONFIG_TINY_RCU
96
+static inline void rcu_nmi_enter(void) { }
97
+static inline void rcu_nmi_exit(void) { }
98
+#else
99
+extern void rcu_nmi_enter(void);
100
+extern void rcu_nmi_exit(void);
101
+#endif
102
+
103
+/*
104
+ * NMI vs Tracing
105
+ * --------------
106
+ *
107
+ * We must not land in a tracer until (or after) we've changed preempt_count
108
+ * such that in_nmi() becomes true. To that effect all NMI C entry points must
109
+ * be marked 'notrace' and call nmi_enter() as soon as possible.
110
+ */
111
+
112
+/*
113
+ * nmi_enter() can nest up to 15 times; see NMI_BITS.
114
+ */
115
+#define __nmi_enter() \
116
+ do { \
117
+ lockdep_off(); \
118
+ arch_nmi_enter(); \
119
+ BUG_ON(in_nmi() == NMI_MASK); \
120
+ __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
121
+ } while (0)
122
+
68123 #define nmi_enter() \
69124 do { \
70
- arch_nmi_enter(); \
71
- printk_nmi_enter(); \
72
- lockdep_off(); \
73
- ftrace_nmi_enter(); \
74
- BUG_ON(in_nmi()); \
75
- preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
125
+ __nmi_enter(); \
126
+ lockdep_hardirq_enter(); \
76127 rcu_nmi_enter(); \
77
- trace_hardirq_enter(); \
128
+ instrumentation_begin(); \
129
+ ftrace_nmi_enter(); \
130
+ instrumentation_end(); \
131
+ } while (0)
132
+
133
+#define __nmi_exit() \
134
+ do { \
135
+ BUG_ON(!in_nmi()); \
136
+ __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
137
+ arch_nmi_exit(); \
138
+ lockdep_on(); \
78139 } while (0)
79140
80141 #define nmi_exit() \
81142 do { \
82
- trace_hardirq_exit(); \
83
- rcu_nmi_exit(); \
84
- BUG_ON(!in_nmi()); \
85
- preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
143
+ instrumentation_begin(); \
86144 ftrace_nmi_exit(); \
87
- lockdep_on(); \
88
- printk_nmi_exit(); \
89
- arch_nmi_exit(); \
145
+ instrumentation_end(); \
146
+ rcu_nmi_exit(); \
147
+ lockdep_hardirq_exit(); \
148
+ __nmi_exit(); \
90149 } while (0)
91150
92151 #endif /* LINUX_HARDIRQ_H */