.. | .. |
---|
26 | 26 | * PREEMPT_MASK: 0x000000ff |
---|
27 | 27 | * SOFTIRQ_MASK: 0x0000ff00 |
---|
28 | 28 | * HARDIRQ_MASK: 0x000f0000 |
---|
29 | | - * NMI_MASK: 0x00100000 |
---|
| 29 | + * NMI_MASK: 0x00f00000 |
---|
30 | 30 | * PREEMPT_NEED_RESCHED: 0x80000000 |
---|
31 | 31 | */ |
---|
32 | 32 | #define PREEMPT_BITS 8 |
---|
33 | 33 | #define SOFTIRQ_BITS 8 |
---|
34 | 34 | #define HARDIRQ_BITS 4 |
---|
35 | | -#define NMI_BITS 1 |
---|
| 35 | +#define NMI_BITS 4 |
---|
36 | 36 | |
---|
37 | 37 | #define PREEMPT_SHIFT 0 |
---|
38 | 38 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) |
---|
.. | .. |
---|
51 | 51 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) |
---|
52 | 52 | #define NMI_OFFSET (1UL << NMI_SHIFT) |
---|
53 | 53 | |
---|
54 | | -#ifndef CONFIG_PREEMPT_RT_FULL |
---|
55 | | -# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) |
---|
56 | | -#else |
---|
57 | | -# define SOFTIRQ_DISABLE_OFFSET (0) |
---|
58 | | -#endif |
---|
59 | | - |
---|
60 | | -/* We use the MSB mostly because its available */ |
---|
61 | | -#define PREEMPT_NEED_RESCHED 0x80000000 |
---|
| 54 | +#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) |
---|
62 | 55 | |
---|
63 | 56 | #define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) |
---|
64 | 57 | |
---|
.. | .. |
---|
84 | 77 | /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ |
---|
85 | 78 | #include <asm/preempt.h> |
---|
86 | 79 | |
---|
| 80 | +#define nmi_count() (preempt_count() & NMI_MASK) |
---|
87 | 81 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
---|
88 | | -#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ |
---|
89 | | - | NMI_MASK)) |
---|
90 | | -#ifndef CONFIG_PREEMPT_RT_FULL |
---|
91 | | -# define softirq_count() (preempt_count() & SOFTIRQ_MASK) |
---|
92 | | -# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) |
---|
| 82 | +#ifdef CONFIG_PREEMPT_RT |
---|
| 83 | +# define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK) |
---|
93 | 84 | #else |
---|
94 | | -# define softirq_count() ((unsigned long)current->softirq_nestcnt) |
---|
95 | | -extern int in_serving_softirq(void); |
---|
| 85 | +# define softirq_count() (preempt_count() & SOFTIRQ_MASK) |
---|
96 | 86 | #endif |
---|
| 87 | +#define irq_count() (nmi_count() | hardirq_count() | softirq_count()) |
---|
97 | 88 | |
---|
98 | 89 | /* |
---|
99 | | - * Are we doing bottom half or hardware interrupt processing? |
---|
| 90 | + * Macros to retrieve the current execution context: |
---|
100 | 91 | * |
---|
101 | | - * in_irq() - We're in (hard) IRQ context |
---|
| 92 | + * in_nmi() - We're in NMI context |
---|
| 93 | + * in_hardirq() - We're in hard IRQ context |
---|
| 94 | + * in_serving_softirq() - We're in softirq context |
---|
| 95 | + * in_task() - We're in task context |
---|
| 96 | + */ |
---|
| 97 | +#define in_nmi() (nmi_count()) |
---|
| 98 | +#define in_hardirq() (hardirq_count()) |
---|
| 99 | +#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) |
---|
| 100 | +#define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq())) |
---|
| 101 | + |
---|
| 102 | +/* |
---|
| 103 | + * The following macros are deprecated and should not be used in new code: |
---|
| 104 | + * in_irq() - Obsolete version of in_hardirq() |
---|
102 | 105 | * in_softirq() - We have BH disabled, or are processing softirqs |
---|
103 | 106 | * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled |
---|
104 | | - * in_serving_softirq() - We're in softirq context |
---|
105 | | - * in_nmi() - We're in NMI context |
---|
106 | | - * in_task() - We're in task context |
---|
107 | | - * |
---|
108 | | - * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really |
---|
109 | | - * should not be used in new code. |
---|
110 | 107 | */ |
---|
111 | 108 | #define in_irq() (hardirq_count()) |
---|
112 | 109 | #define in_softirq() (softirq_count()) |
---|
113 | 110 | #define in_interrupt() (irq_count()) |
---|
114 | | -#define in_nmi() (preempt_count() & NMI_MASK) |
---|
115 | | -#define in_task() (!(preempt_count() & \ |
---|
116 | | - (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) |
---|
117 | 111 | |
---|
118 | 112 | /* |
---|
119 | 113 | * The preempt_count offset after preempt_disable(); |
---|
.. | .. |
---|
127 | 121 | /* |
---|
128 | 122 | * The preempt_count offset after spin_lock() |
---|
129 | 123 | */ |
---|
130 | | -#if !defined(CONFIG_PREEMPT_RT_FULL) |
---|
| 124 | +#if !defined(CONFIG_PREEMPT_RT) |
---|
131 | 125 | #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET |
---|
132 | 126 | #else |
---|
133 | 127 | #define PREEMPT_LOCK_OFFSET 0 |
---|
.. | .. |
---|
214 | 208 | preempt_count_dec(); \ |
---|
215 | 209 | } while (0) |
---|
216 | 210 | |
---|
217 | | -#ifdef CONFIG_PREEMPT_RT_BASE |
---|
| 211 | +#ifndef CONFIG_PREEMPT_RT |
---|
218 | 212 | # define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
---|
219 | | -# define preempt_check_resched_rt() preempt_check_resched() |
---|
| 213 | +# define preempt_check_resched_rt() barrier(); |
---|
220 | 214 | #else |
---|
221 | 215 | # define preempt_enable_no_resched() preempt_enable() |
---|
222 | | -# define preempt_check_resched_rt() barrier(); |
---|
| 216 | +# define preempt_check_resched_rt() preempt_check_resched() |
---|
223 | 217 | #endif |
---|
224 | 218 | |
---|
225 | 219 | #define preemptible() (preempt_count() == 0 && !irqs_disabled()) |
---|
226 | 220 | |
---|
227 | | -#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) |
---|
228 | | - |
---|
229 | | -extern void migrate_disable(void); |
---|
230 | | -extern void migrate_enable(void); |
---|
231 | | - |
---|
232 | | -int __migrate_disabled(struct task_struct *p); |
---|
233 | | - |
---|
234 | | -#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) |
---|
235 | | - |
---|
236 | | -extern void migrate_disable(void); |
---|
237 | | -extern void migrate_enable(void); |
---|
238 | | -static inline int __migrate_disabled(struct task_struct *p) |
---|
239 | | -{ |
---|
240 | | - return 0; |
---|
241 | | -} |
---|
242 | | - |
---|
243 | | -#else |
---|
244 | | -#define migrate_disable() preempt_disable() |
---|
245 | | -#define migrate_enable() preempt_enable() |
---|
246 | | -static inline int __migrate_disabled(struct task_struct *p) |
---|
247 | | -{ |
---|
248 | | - return 0; |
---|
249 | | -} |
---|
250 | | -#endif |
---|
251 | | - |
---|
252 | | -#ifdef CONFIG_PREEMPT |
---|
| 221 | +#ifdef CONFIG_PREEMPTION |
---|
253 | 222 | #define preempt_enable() \ |
---|
254 | 223 | do { \ |
---|
255 | 224 | barrier(); \ |
---|
.. | .. |
---|
270 | 239 | __preempt_schedule(); \ |
---|
271 | 240 | } while (0) |
---|
272 | 241 | |
---|
| 242 | +/* |
---|
| 243 | + * open code preempt_check_resched() because it is not exported to modules and |
---|
| 244 | + * used by local_unlock() or bpf_enable_instrumentation(). |
---|
| 245 | + */ |
---|
273 | 246 | #define preempt_lazy_enable() \ |
---|
274 | 247 | do { \ |
---|
275 | 248 | dec_preempt_lazy_count(); \ |
---|
276 | 249 | barrier(); \ |
---|
277 | | - preempt_check_resched(); \ |
---|
| 250 | + if (should_resched(0)) \ |
---|
| 251 | + __preempt_schedule(); \ |
---|
278 | 252 | } while (0) |
---|
279 | 253 | |
---|
280 | | -#else /* !CONFIG_PREEMPT */ |
---|
| 254 | +#else /* !CONFIG_PREEMPTION */ |
---|
281 | 255 | #define preempt_enable() \ |
---|
282 | 256 | do { \ |
---|
283 | 257 | barrier(); \ |
---|
.. | .. |
---|
297 | 271 | } while (0) |
---|
298 | 272 | |
---|
299 | 273 | #define preempt_check_resched() do { } while (0) |
---|
300 | | -#endif /* CONFIG_PREEMPT */ |
---|
| 274 | +#endif /* CONFIG_PREEMPTION */ |
---|
301 | 275 | |
---|
302 | 276 | #define preempt_disable_notrace() \ |
---|
303 | 277 | do { \ |
---|
.. | .. |
---|
331 | 305 | #define preempt_check_resched_rt() barrier() |
---|
332 | 306 | #define preemptible() 0 |
---|
333 | 307 | |
---|
334 | | -#define migrate_disable() barrier() |
---|
335 | | -#define migrate_enable() barrier() |
---|
| 308 | +#define preempt_lazy_disable() barrier() |
---|
| 309 | +#define preempt_lazy_enable() barrier() |
---|
336 | 310 | |
---|
337 | | -static inline int __migrate_disabled(struct task_struct *p) |
---|
338 | | -{ |
---|
339 | | - return 0; |
---|
340 | | -} |
---|
341 | 311 | #endif /* CONFIG_PREEMPT_COUNT */ |
---|
342 | 312 | |
---|
343 | 313 | #ifdef MODULE |
---|
.. | .. |
---|
360 | 330 | set_preempt_need_resched(); \ |
---|
361 | 331 | } while (0) |
---|
362 | 332 | |
---|
363 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 333 | +#ifdef CONFIG_PREEMPT_RT |
---|
364 | 334 | # define preempt_disable_rt() preempt_disable() |
---|
365 | 335 | # define preempt_enable_rt() preempt_enable() |
---|
366 | 336 | # define preempt_disable_nort() barrier() |
---|
.. | .. |
---|
422 | 392 | |
---|
423 | 393 | #endif |
---|
424 | 394 | |
---|
| 395 | +#ifdef CONFIG_SMP |
---|
| 396 | + |
---|
| 397 | +/* |
---|
| 398 | + * Migrate-Disable and why it is undesired. |
---|
| 399 | + * |
---|
| 400 | + * When a preempted task becomes elegible to run under the ideal model (IOW it |
---|
| 401 | + * becomes one of the M highest priority tasks), it might still have to wait |
---|
| 402 | + * for the preemptee's migrate_disable() section to complete. Thereby suffering |
---|
| 403 | + * a reduction in bandwidth in the exact duration of the migrate_disable() |
---|
| 404 | + * section. |
---|
| 405 | + * |
---|
| 406 | + * Per this argument, the change from preempt_disable() to migrate_disable() |
---|
| 407 | + * gets us: |
---|
| 408 | + * |
---|
| 409 | + * - a higher priority tasks gains reduced wake-up latency; with preempt_disable() |
---|
| 410 | + * it would have had to wait for the lower priority task. |
---|
| 411 | + * |
---|
| 412 | + * - a lower priority tasks; which under preempt_disable() could've instantly |
---|
| 413 | + * migrated away when another CPU becomes available, is now constrained |
---|
| 414 | + * by the ability to push the higher priority task away, which might itself be |
---|
| 415 | + * in a migrate_disable() section, reducing it's available bandwidth. |
---|
| 416 | + * |
---|
| 417 | + * IOW it trades latency / moves the interference term, but it stays in the |
---|
| 418 | + * system, and as long as it remains unbounded, the system is not fully |
---|
| 419 | + * deterministic. |
---|
| 420 | + * |
---|
| 421 | + * |
---|
| 422 | + * The reason we have it anyway. |
---|
| 423 | + * |
---|
| 424 | + * PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a |
---|
| 425 | + * number of primitives into becoming preemptible, they would also allow |
---|
| 426 | + * migration. This turns out to break a bunch of per-cpu usage. To this end, |
---|
| 427 | + * all these primitives employ migirate_disable() to restore this implicit |
---|
| 428 | + * assumption. |
---|
| 429 | + * |
---|
| 430 | + * This is a 'temporary' work-around at best. The correct solution is getting |
---|
| 431 | + * rid of the above assumptions and reworking the code to employ explicit |
---|
| 432 | + * per-cpu locking or short preempt-disable regions. |
---|
| 433 | + * |
---|
| 434 | + * The end goal must be to get rid of migrate_disable(), alternatively we need |
---|
| 435 | + * a schedulability theory that does not depend on abritrary migration. |
---|
| 436 | + * |
---|
| 437 | + * |
---|
| 438 | + * Notes on the implementation. |
---|
| 439 | + * |
---|
| 440 | + * The implementation is particularly tricky since existing code patterns |
---|
| 441 | + * dictate neither migrate_disable() nor migrate_enable() is allowed to block. |
---|
| 442 | + * This means that it cannot use cpus_read_lock() to serialize against hotplug, |
---|
| 443 | + * nor can it easily migrate itself into a pending affinity mask change on |
---|
| 444 | + * migrate_enable(). |
---|
| 445 | + * |
---|
| 446 | + * |
---|
| 447 | + * Note: even non-work-conserving schedulers like semi-partitioned depends on |
---|
| 448 | + * migration, so migrate_disable() is not only a problem for |
---|
| 449 | + * work-conserving schedulers. |
---|
| 450 | + * |
---|
| 451 | + */ |
---|
| 452 | +extern void migrate_disable(void); |
---|
| 453 | +extern void migrate_enable(void); |
---|
| 454 | + |
---|
| 455 | +#else |
---|
| 456 | + |
---|
| 457 | +static inline void migrate_disable(void) |
---|
| 458 | +{ |
---|
| 459 | + preempt_lazy_disable(); |
---|
| 460 | +} |
---|
| 461 | + |
---|
| 462 | +static inline void migrate_enable(void) |
---|
| 463 | +{ |
---|
| 464 | + preempt_lazy_enable(); |
---|
| 465 | +} |
---|
| 466 | + |
---|
| 467 | +#endif /* CONFIG_SMP */ |
---|
| 468 | + |
---|
425 | 469 | #endif /* __LINUX_PREEMPT_H */ |
---|