hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/include/linux/preempt.h
....@@ -27,17 +27,23 @@
2727 * SOFTIRQ_MASK: 0x0000ff00
2828 * HARDIRQ_MASK: 0x000f0000
2929 * NMI_MASK: 0x00f00000
30
+ * PIPELINE_MASK: 0x01000000
31
+ * STAGE_MASK: 0x02000000
3032 * PREEMPT_NEED_RESCHED: 0x80000000
3133 */
3234 #define PREEMPT_BITS 8
3335 #define SOFTIRQ_BITS 8
3436 #define HARDIRQ_BITS 4
3537 #define NMI_BITS 4
38
+#define PIPELINE_BITS 1
39
+#define STAGE_BITS 1
3640
3741 #define PREEMPT_SHIFT 0
3842 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
3943 #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
4044 #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
45
+#define PIPELINE_SHIFT (NMI_SHIFT + NMI_BITS)
46
+#define STAGE_SHIFT (PIPELINE_SHIFT + PIPELINE_BITS)
4147
4248 #define __IRQ_MASK(x) ((1UL << (x))-1)
4349
....@@ -45,11 +51,15 @@
4551 #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
4652 #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
4753 #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
54
+#define PIPELINE_MASK (__IRQ_MASK(PIPELINE_BITS) << PIPELINE_SHIFT)
55
+#define STAGE_MASK (__IRQ_MASK(STAGE_BITS) << STAGE_SHIFT)
4856
4957 #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
5058 #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
5159 #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
5260 #define NMI_OFFSET (1UL << NMI_SHIFT)
61
+#define PIPELINE_OFFSET (1UL << PIPELINE_SHIFT)
62
+#define STAGE_OFFSET (1UL << STAGE_SHIFT)
5363
5464 #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
5565
....@@ -82,6 +92,9 @@
8292 #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
8393 | NMI_MASK))
8494
95
+/* The current IRQ stage level: 0=inband, 1=oob */
96
+#define stage_level() ((preempt_count() & STAGE_MASK) >> STAGE_SHIFT)
97
+
8598 /*
8699 * Are we doing bottom half or hardware interrupt processing?
87100 *
....@@ -91,6 +104,7 @@
91104 * in_serving_softirq() - We're in softirq context
92105 * in_nmi() - We're in NMI context
93106 * in_task() - We're in task context
107
+ * in_pipeline() - We're on pipeline entry
94108 *
95109 * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
96110 * should not be used in new code.
....@@ -102,6 +116,7 @@
102116 #define in_nmi() (preempt_count() & NMI_MASK)
103117 #define in_task() (!(preempt_count() & \
104118 (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
119
+#define in_pipeline() (preempt_count() & PIPELINE_MASK)
105120
106121 /*
107122 * The preempt_count offset after preempt_disable();
....@@ -180,7 +195,8 @@
180195
181196 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
182197
183
-#define preemptible() (preempt_count() == 0 && !irqs_disabled())
198
+#define preemptible() (preempt_count() == 0 && \
199
+ !hard_irqs_disabled() && !irqs_disabled())
184200
185201 #ifdef CONFIG_PREEMPTION
186202 #define preempt_enable() \
....@@ -352,4 +368,43 @@
352368 preempt_enable();
353369 }
354370
371
+#ifdef CONFIG_IRQ_PIPELINE
372
+
373
+static __always_inline bool running_inband(void)
374
+{
375
+ return stage_level() == 0;
376
+}
377
+
378
+static __always_inline bool running_oob(void)
379
+{
380
+ return !running_inband();
381
+}
382
+
383
+unsigned long hard_preempt_disable(void);
384
+void hard_preempt_enable(unsigned long flags);
385
+
386
+#else
387
+
388
+static __always_inline bool running_inband(void)
389
+{
390
+ return true;
391
+}
392
+
393
+static __always_inline bool running_oob(void)
394
+{
395
+ return false;
396
+}
397
+
398
+#define hard_preempt_disable() \
399
+({ \
400
+ preempt_disable(); \
401
+ 0; \
402
+})
403
+#define hard_preempt_enable(__flags) \
404
+ do { \
405
+ preempt_enable(); \
406
+ (void)(__flags); \
407
+ } while (0)
408
+#endif
409
+
355410 #endif /* __LINUX_PREEMPT_H */