.. | .. |
---|
27 | 27 | * SOFTIRQ_MASK: 0x0000ff00 |
---|
28 | 28 | * HARDIRQ_MASK: 0x000f0000 |
---|
29 | 29 | * NMI_MASK: 0x00f00000 |
---|
| 30 | + * PIPELINE_MASK: 0x01000000 |
---|
| 31 | + * STAGE_MASK: 0x02000000 |
---|
30 | 32 | * PREEMPT_NEED_RESCHED: 0x80000000 |
---|
31 | 33 | */ |
---|
32 | 34 | #define PREEMPT_BITS 8 |
---|
33 | 35 | #define SOFTIRQ_BITS 8 |
---|
34 | 36 | #define HARDIRQ_BITS 4 |
---|
35 | 37 | #define NMI_BITS 4 |
---|
| 38 | +#define PIPELINE_BITS 1 |
---|
| 39 | +#define STAGE_BITS 1 |
---|
36 | 40 | |
---|
37 | 41 | #define PREEMPT_SHIFT 0 |
---|
38 | 42 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) |
---|
39 | 43 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) |
---|
40 | 44 | #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) |
---|
| 45 | +#define PIPELINE_SHIFT (NMI_SHIFT + NMI_BITS) |
---|
| 46 | +#define STAGE_SHIFT (PIPELINE_SHIFT + PIPELINE_BITS) |
---|
41 | 47 | |
---|
42 | 48 | #define __IRQ_MASK(x) ((1UL << (x))-1) |
---|
43 | 49 | |
---|
.. | .. |
---|
45 | 51 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) |
---|
46 | 52 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) |
---|
47 | 53 | #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) |
---|
| 54 | +#define PIPELINE_MASK (__IRQ_MASK(PIPELINE_BITS) << PIPELINE_SHIFT) |
---|
| 55 | +#define STAGE_MASK (__IRQ_MASK(STAGE_BITS) << STAGE_SHIFT) |
---|
48 | 56 | |
---|
49 | 57 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) |
---|
50 | 58 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) |
---|
51 | 59 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) |
---|
52 | 60 | #define NMI_OFFSET (1UL << NMI_SHIFT) |
---|
| 61 | +#define PIPELINE_OFFSET (1UL << PIPELINE_SHIFT) |
---|
| 62 | +#define STAGE_OFFSET (1UL << STAGE_SHIFT) |
---|
53 | 63 | |
---|
54 | 64 | #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) |
---|
55 | 65 | |
---|
.. | .. |
---|
82 | 92 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ |
---|
83 | 93 | | NMI_MASK)) |
---|
84 | 94 | |
---|
| 95 | +/* The current IRQ stage level: 0=inband, 1=oob */ |
---|
| 96 | +#define stage_level() ((preempt_count() & STAGE_MASK) >> STAGE_SHIFT) |
---|
| 97 | + |
---|
85 | 98 | /* |
---|
86 | 99 | * Are we doing bottom half or hardware interrupt processing? |
---|
87 | 100 | * |
---|
.. | .. |
---|
91 | 104 | * in_serving_softirq() - We're in softirq context |
---|
92 | 105 | * in_nmi() - We're in NMI context |
---|
93 | 106 | * in_task() - We're in task context |
---|
| 107 | + * in_pipeline() - We're on pipeline entry |
---|
94 | 108 | * |
---|
95 | 109 | * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really |
---|
96 | 110 | * should not be used in new code. |
---|
.. | .. |
---|
102 | 116 | #define in_nmi() (preempt_count() & NMI_MASK) |
---|
103 | 117 | #define in_task() (!(preempt_count() & \ |
---|
104 | 118 | (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) |
---|
| 119 | +#define in_pipeline() (preempt_count() & PIPELINE_MASK) |
---|
105 | 120 | |
---|
106 | 121 | /* |
---|
107 | 122 | * The preempt_count offset after preempt_disable(); |
---|
.. | .. |
---|
180 | 195 | |
---|
181 | 196 | #define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
---|
182 | 197 | |
---|
183 | | -#define preemptible() (preempt_count() == 0 && !irqs_disabled()) |
---|
| 198 | +#define preemptible() (preempt_count() == 0 && \ |
---|
| 199 | + !hard_irqs_disabled() && !irqs_disabled()) |
---|
184 | 200 | |
---|
185 | 201 | #ifdef CONFIG_PREEMPTION |
---|
186 | 202 | #define preempt_enable() \ |
---|
.. | .. |
---|
352 | 368 | preempt_enable(); |
---|
353 | 369 | } |
---|
354 | 370 | |
---|
| 371 | +#ifdef CONFIG_IRQ_PIPELINE |
---|
| 372 | + |
---|
| 373 | +static __always_inline bool running_inband(void) |
---|
| 374 | +{ |
---|
| 375 | + return stage_level() == 0; |
---|
| 376 | +} |
---|
| 377 | + |
---|
| 378 | +static __always_inline bool running_oob(void) |
---|
| 379 | +{ |
---|
| 380 | + return !running_inband(); |
---|
| 381 | +} |
---|
| 382 | + |
---|
| 383 | +unsigned long hard_preempt_disable(void); |
---|
| 384 | +void hard_preempt_enable(unsigned long flags); |
---|
| 385 | + |
---|
| 386 | +#else |
---|
| 387 | + |
---|
| 388 | +static __always_inline bool running_inband(void) |
---|
| 389 | +{ |
---|
| 390 | + return true; |
---|
| 391 | +} |
---|
| 392 | + |
---|
| 393 | +static __always_inline bool running_oob(void) |
---|
| 394 | +{ |
---|
| 395 | + return false; |
---|
| 396 | +} |
---|
| 397 | + |
---|
| 398 | +#define hard_preempt_disable() \ |
---|
| 399 | +({ \ |
---|
| 400 | + preempt_disable(); \ |
---|
| 401 | + 0; \ |
---|
| 402 | +}) |
---|
| 403 | +#define hard_preempt_enable(__flags) \ |
---|
| 404 | + do { \ |
---|
| 405 | + preempt_enable(); \ |
---|
| 406 | + (void)(__flags); \ |
---|
| 407 | + } while (0) |
---|
| 408 | +#endif |
---|
| 409 | + |
---|
355 | 410 | #endif /* __LINUX_PREEMPT_H */ |
---|