.. | .. |
---|
77 | 77 | /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ |
---|
78 | 78 | #include <asm/preempt.h> |
---|
79 | 79 | |
---|
80 | | -#define nmi_count() (preempt_count() & NMI_MASK) |
---|
81 | 80 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
---|
82 | | -#ifdef CONFIG_PREEMPT_RT |
---|
83 | | -# define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK) |
---|
84 | | -#else |
---|
85 | | -# define softirq_count() (preempt_count() & SOFTIRQ_MASK) |
---|
86 | | -#endif |
---|
87 | | -#define irq_count() (nmi_count() | hardirq_count() | softirq_count()) |
---|
| 81 | +#define softirq_count() (preempt_count() & SOFTIRQ_MASK) |
---|
| 82 | +#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ |
---|
| 83 | + | NMI_MASK)) |
---|
88 | 84 | |
---|
89 | 85 | /* |
---|
90 | | - * Macros to retrieve the current execution context: |
---|
| 86 | + * Are we doing bottom half or hardware interrupt processing? |
---|
91 | 87 | * |
---|
92 | | - * in_nmi() - We're in NMI context |
---|
93 | | - * in_hardirq() - We're in hard IRQ context |
---|
94 | | - * in_serving_softirq() - We're in softirq context |
---|
95 | | - * in_task() - We're in task context |
---|
96 | | - */ |
---|
97 | | -#define in_nmi() (nmi_count()) |
---|
98 | | -#define in_hardirq() (hardirq_count()) |
---|
99 | | -#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) |
---|
100 | | -#define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq())) |
---|
101 | | - |
---|
102 | | -/* |
---|
103 | | - * The following macros are deprecated and should not be used in new code: |
---|
104 | | - * in_irq() - Obsolete version of in_hardirq() |
---|
| 88 | + * in_irq() - We're in (hard) IRQ context |
---|
105 | 89 | * in_softirq() - We have BH disabled, or are processing softirqs |
---|
106 | 90 | * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled |
---|
| 91 | + * in_serving_softirq() - We're in softirq context |
---|
| 92 | + * in_nmi() - We're in NMI context |
---|
| 93 | + * in_task() - We're in task context |
---|
| 94 | + * |
---|
| 95 | + * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really |
---|
| 96 | + * should not be used in new code. |
---|
107 | 97 | */ |
---|
108 | 98 | #define in_irq() (hardirq_count()) |
---|
109 | 99 | #define in_softirq() (softirq_count()) |
---|
110 | 100 | #define in_interrupt() (irq_count()) |
---|
| 101 | +#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) |
---|
| 102 | +#define in_nmi() (preempt_count() & NMI_MASK) |
---|
| 103 | +#define in_task() (!(preempt_count() & \ |
---|
| 104 | + (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) |
---|
111 | 105 | |
---|
112 | 106 | /* |
---|
113 | 107 | * The preempt_count offset after preempt_disable(); |
---|
.. | .. |
---|
121 | 115 | /* |
---|
122 | 116 | * The preempt_count offset after spin_lock() |
---|
123 | 117 | */ |
---|
124 | | -#if !defined(CONFIG_PREEMPT_RT) |
---|
125 | 118 | #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET |
---|
126 | | -#else |
---|
127 | | -#define PREEMPT_LOCK_OFFSET 0 |
---|
128 | | -#endif |
---|
129 | 119 | |
---|
130 | 120 | /* |
---|
131 | 121 | * The preempt_count offset needed for things like: |
---|
.. | .. |
---|
174 | 164 | #define preempt_count_inc() preempt_count_add(1) |
---|
175 | 165 | #define preempt_count_dec() preempt_count_sub(1) |
---|
176 | 166 | |
---|
177 | | -#ifdef CONFIG_PREEMPT_LAZY |
---|
178 | | -#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) |
---|
179 | | -#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) |
---|
180 | | -#define inc_preempt_lazy_count() add_preempt_lazy_count(1) |
---|
181 | | -#define dec_preempt_lazy_count() sub_preempt_lazy_count(1) |
---|
182 | | -#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) |
---|
183 | | -#else |
---|
184 | | -#define add_preempt_lazy_count(val) do { } while (0) |
---|
185 | | -#define sub_preempt_lazy_count(val) do { } while (0) |
---|
186 | | -#define inc_preempt_lazy_count() do { } while (0) |
---|
187 | | -#define dec_preempt_lazy_count() do { } while (0) |
---|
188 | | -#define preempt_lazy_count() (0) |
---|
189 | | -#endif |
---|
190 | | - |
---|
191 | 167 | #ifdef CONFIG_PREEMPT_COUNT |
---|
192 | 168 | |
---|
193 | 169 | #define preempt_disable() \ |
---|
194 | 170 | do { \ |
---|
195 | 171 | preempt_count_inc(); \ |
---|
196 | | - barrier(); \ |
---|
197 | | -} while (0) |
---|
198 | | - |
---|
199 | | -#define preempt_lazy_disable() \ |
---|
200 | | -do { \ |
---|
201 | | - inc_preempt_lazy_count(); \ |
---|
202 | 172 | barrier(); \ |
---|
203 | 173 | } while (0) |
---|
204 | 174 | |
---|
.. | .. |
---|
208 | 178 | preempt_count_dec(); \ |
---|
209 | 179 | } while (0) |
---|
210 | 180 | |
---|
211 | | -#ifndef CONFIG_PREEMPT_RT |
---|
212 | | -# define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
---|
213 | | -# define preempt_check_resched_rt() barrier(); |
---|
214 | | -#else |
---|
215 | | -# define preempt_enable_no_resched() preempt_enable() |
---|
216 | | -# define preempt_check_resched_rt() preempt_check_resched() |
---|
217 | | -#endif |
---|
| 181 | +#define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
---|
218 | 182 | |
---|
219 | 183 | #define preemptible() (preempt_count() == 0 && !irqs_disabled()) |
---|
220 | 184 | |
---|
.. | .. |
---|
239 | 203 | __preempt_schedule(); \ |
---|
240 | 204 | } while (0) |
---|
241 | 205 | |
---|
242 | | -/* |
---|
243 | | - * open code preempt_check_resched() because it is not exported to modules and |
---|
244 | | - * used by local_unlock() or bpf_enable_instrumentation(). |
---|
245 | | - */ |
---|
246 | | -#define preempt_lazy_enable() \ |
---|
247 | | -do { \ |
---|
248 | | - dec_preempt_lazy_count(); \ |
---|
249 | | - barrier(); \ |
---|
250 | | - if (should_resched(0)) \ |
---|
251 | | - __preempt_schedule(); \ |
---|
252 | | -} while (0) |
---|
253 | | - |
---|
254 | 206 | #else /* !CONFIG_PREEMPTION */ |
---|
255 | 207 | #define preempt_enable() \ |
---|
256 | 208 | do { \ |
---|
257 | 209 | barrier(); \ |
---|
258 | 210 | preempt_count_dec(); \ |
---|
259 | | -} while (0) |
---|
260 | | - |
---|
261 | | -#define preempt_lazy_enable() \ |
---|
262 | | -do { \ |
---|
263 | | - dec_preempt_lazy_count(); \ |
---|
264 | | - barrier(); \ |
---|
265 | 211 | } while (0) |
---|
266 | 212 | |
---|
267 | 213 | #define preempt_enable_notrace() \ |
---|
.. | .. |
---|
302 | 248 | #define preempt_disable_notrace() barrier() |
---|
303 | 249 | #define preempt_enable_no_resched_notrace() barrier() |
---|
304 | 250 | #define preempt_enable_notrace() barrier() |
---|
305 | | -#define preempt_check_resched_rt() barrier() |
---|
306 | 251 | #define preemptible() 0 |
---|
307 | | - |
---|
308 | | -#define preempt_lazy_disable() barrier() |
---|
309 | | -#define preempt_lazy_enable() barrier() |
---|
310 | 252 | |
---|
311 | 253 | #endif /* CONFIG_PREEMPT_COUNT */ |
---|
312 | 254 | |
---|
.. | .. |
---|
326 | 268 | } while (0) |
---|
327 | 269 | #define preempt_fold_need_resched() \ |
---|
328 | 270 | do { \ |
---|
329 | | - if (tif_need_resched_now()) \ |
---|
| 271 | + if (tif_need_resched()) \ |
---|
330 | 272 | set_preempt_need_resched(); \ |
---|
331 | 273 | } while (0) |
---|
332 | | - |
---|
333 | | -#ifdef CONFIG_PREEMPT_RT |
---|
334 | | -# define preempt_disable_rt() preempt_disable() |
---|
335 | | -# define preempt_enable_rt() preempt_enable() |
---|
336 | | -# define preempt_disable_nort() barrier() |
---|
337 | | -# define preempt_enable_nort() barrier() |
---|
338 | | -#else |
---|
339 | | -# define preempt_disable_rt() barrier() |
---|
340 | | -# define preempt_enable_rt() barrier() |
---|
341 | | -# define preempt_disable_nort() preempt_disable() |
---|
342 | | -# define preempt_enable_nort() preempt_enable() |
---|
343 | | -#endif |
---|
344 | 274 | |
---|
345 | 275 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
---|
346 | 276 | |
---|
.. | .. |
---|
392 | 322 | |
---|
393 | 323 | #endif |
---|
394 | 324 | |
---|
395 | | -#ifdef CONFIG_SMP |
---|
396 | | - |
---|
397 | | -/* |
---|
398 | | - * Migrate-Disable and why it is undesired. |
---|
| 325 | +/** |
---|
| 326 | + * migrate_disable - Prevent migration of the current task |
---|
399 | 327 | * |
---|
400 | | - * When a preempted task becomes elegible to run under the ideal model (IOW it |
---|
401 | | - * becomes one of the M highest priority tasks), it might still have to wait |
---|
402 | | - * for the preemptee's migrate_disable() section to complete. Thereby suffering |
---|
403 | | - * a reduction in bandwidth in the exact duration of the migrate_disable() |
---|
404 | | - * section. |
---|
| 328 | + * Maps to preempt_disable() which also disables preemption. Use |
---|
| 329 | + * migrate_disable() to annotate that the intent is to prevent migration, |
---|
| 330 | + * but not necessarily preemption. |
---|
405 | 331 | * |
---|
406 | | - * Per this argument, the change from preempt_disable() to migrate_disable() |
---|
407 | | - * gets us: |
---|
408 | | - * |
---|
409 | | - * - a higher priority tasks gains reduced wake-up latency; with preempt_disable() |
---|
410 | | - * it would have had to wait for the lower priority task. |
---|
411 | | - * |
---|
412 | | - * - a lower priority tasks; which under preempt_disable() could've instantly |
---|
413 | | - * migrated away when another CPU becomes available, is now constrained |
---|
414 | | - * by the ability to push the higher priority task away, which might itself be |
---|
415 | | - * in a migrate_disable() section, reducing it's available bandwidth. |
---|
416 | | - * |
---|
417 | | - * IOW it trades latency / moves the interference term, but it stays in the |
---|
418 | | - * system, and as long as it remains unbounded, the system is not fully |
---|
419 | | - * deterministic. |
---|
420 | | - * |
---|
421 | | - * |
---|
422 | | - * The reason we have it anyway. |
---|
423 | | - * |
---|
424 | | - * PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a |
---|
425 | | - * number of primitives into becoming preemptible, they would also allow |
---|
426 | | - * migration. This turns out to break a bunch of per-cpu usage. To this end, |
---|
427 | | - * all these primitives employ migirate_disable() to restore this implicit |
---|
428 | | - * assumption. |
---|
429 | | - * |
---|
430 | | - * This is a 'temporary' work-around at best. The correct solution is getting |
---|
431 | | - * rid of the above assumptions and reworking the code to employ explicit |
---|
432 | | - * per-cpu locking or short preempt-disable regions. |
---|
433 | | - * |
---|
434 | | - * The end goal must be to get rid of migrate_disable(), alternatively we need |
---|
435 | | - * a schedulability theory that does not depend on abritrary migration. |
---|
436 | | - * |
---|
437 | | - * |
---|
438 | | - * Notes on the implementation. |
---|
439 | | - * |
---|
440 | | - * The implementation is particularly tricky since existing code patterns |
---|
441 | | - * dictate neither migrate_disable() nor migrate_enable() is allowed to block. |
---|
442 | | - * This means that it cannot use cpus_read_lock() to serialize against hotplug, |
---|
443 | | - * nor can it easily migrate itself into a pending affinity mask change on |
---|
444 | | - * migrate_enable(). |
---|
445 | | - * |
---|
446 | | - * |
---|
447 | | - * Note: even non-work-conserving schedulers like semi-partitioned depends on |
---|
448 | | - * migration, so migrate_disable() is not only a problem for |
---|
449 | | - * work-conserving schedulers. |
---|
450 | | - * |
---|
| 332 | + * Can be invoked nested like preempt_disable() and needs the corresponding |
---|
| 333 | + * number of migrate_enable() invocations. |
---|
451 | 334 | */ |
---|
452 | | -extern void migrate_disable(void); |
---|
453 | | -extern void migrate_enable(void); |
---|
454 | | - |
---|
455 | | -#else |
---|
456 | | - |
---|
457 | | -static inline void migrate_disable(void) |
---|
| 335 | +static __always_inline void migrate_disable(void) |
---|
458 | 336 | { |
---|
459 | | - preempt_lazy_disable(); |
---|
| 337 | + preempt_disable(); |
---|
460 | 338 | } |
---|
461 | 339 | |
---|
462 | | -static inline void migrate_enable(void) |
---|
| 340 | +/** |
---|
| 341 | + * migrate_enable - Allow migration of the current task |
---|
| 342 | + * |
---|
| 343 | + * Counterpart to migrate_disable(). |
---|
| 344 | + * |
---|
| 345 | + * As migrate_disable() can be invoked nested, only the outermost invocation |
---|
| 346 | + * reenables migration. |
---|
| 347 | + * |
---|
| 348 | + * Currently mapped to preempt_enable(). |
---|
| 349 | + */ |
---|
| 350 | +static __always_inline void migrate_enable(void) |
---|
463 | 351 | { |
---|
464 | | - preempt_lazy_enable(); |
---|
| 352 | + preempt_enable(); |
---|
465 | 353 | } |
---|
466 | | - |
---|
467 | | -#endif /* CONFIG_SMP */ |
---|
468 | 354 | |
---|
469 | 355 | #endif /* __LINUX_PREEMPT_H */ |
---|