hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/include/linux/preempt.h
....@@ -77,37 +77,31 @@
7777 /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
7878 #include <asm/preempt.h>
7979
80
-#define nmi_count() (preempt_count() & NMI_MASK)
8180 #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
82
-#ifdef CONFIG_PREEMPT_RT
83
-# define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK)
84
-#else
85
-# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
86
-#endif
87
-#define irq_count() (nmi_count() | hardirq_count() | softirq_count())
81
+#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
82
+#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
83
+ | NMI_MASK))
8884
8985 /*
90
- * Macros to retrieve the current execution context:
86
+ * Are we doing bottom half or hardware interrupt processing?
9187 *
92
- * in_nmi() - We're in NMI context
93
- * in_hardirq() - We're in hard IRQ context
94
- * in_serving_softirq() - We're in softirq context
95
- * in_task() - We're in task context
96
- */
97
-#define in_nmi() (nmi_count())
98
-#define in_hardirq() (hardirq_count())
99
-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
100
-#define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq()))
101
-
102
-/*
103
- * The following macros are deprecated and should not be used in new code:
104
- * in_irq() - Obsolete version of in_hardirq()
88
+ * in_irq() - We're in (hard) IRQ context
10589 * in_softirq() - We have BH disabled, or are processing softirqs
10690 * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
91
+ * in_serving_softirq() - We're in softirq context
92
+ * in_nmi() - We're in NMI context
93
+ * in_task() - We're in task context
94
+ *
95
+ * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
96
+ * should not be used in new code.
10797 */
10898 #define in_irq() (hardirq_count())
10999 #define in_softirq() (softirq_count())
110100 #define in_interrupt() (irq_count())
101
+#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
102
+#define in_nmi() (preempt_count() & NMI_MASK)
103
+#define in_task() (!(preempt_count() & \
104
+ (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
111105
112106 /*
113107 * The preempt_count offset after preempt_disable();
....@@ -121,11 +115,7 @@
121115 /*
122116 * The preempt_count offset after spin_lock()
123117 */
124
-#if !defined(CONFIG_PREEMPT_RT)
125118 #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
126
-#else
127
-#define PREEMPT_LOCK_OFFSET 0
128
-#endif
129119
130120 /*
131121 * The preempt_count offset needed for things like:
....@@ -174,31 +164,11 @@
174164 #define preempt_count_inc() preempt_count_add(1)
175165 #define preempt_count_dec() preempt_count_sub(1)
176166
177
-#ifdef CONFIG_PREEMPT_LAZY
178
-#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
179
-#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
180
-#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
181
-#define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
182
-#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
183
-#else
184
-#define add_preempt_lazy_count(val) do { } while (0)
185
-#define sub_preempt_lazy_count(val) do { } while (0)
186
-#define inc_preempt_lazy_count() do { } while (0)
187
-#define dec_preempt_lazy_count() do { } while (0)
188
-#define preempt_lazy_count() (0)
189
-#endif
190
-
191167 #ifdef CONFIG_PREEMPT_COUNT
192168
193169 #define preempt_disable() \
194170 do { \
195171 preempt_count_inc(); \
196
- barrier(); \
197
-} while (0)
198
-
199
-#define preempt_lazy_disable() \
200
-do { \
201
- inc_preempt_lazy_count(); \
202172 barrier(); \
203173 } while (0)
204174
....@@ -208,13 +178,7 @@
208178 preempt_count_dec(); \
209179 } while (0)
210180
211
-#ifndef CONFIG_PREEMPT_RT
212
-# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
213
-# define preempt_check_resched_rt() barrier();
214
-#else
215
-# define preempt_enable_no_resched() preempt_enable()
216
-# define preempt_check_resched_rt() preempt_check_resched()
217
-#endif
181
+#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
218182
219183 #define preemptible() (preempt_count() == 0 && !irqs_disabled())
220184
....@@ -239,29 +203,11 @@
239203 __preempt_schedule(); \
240204 } while (0)
241205
242
-/*
243
- * open code preempt_check_resched() because it is not exported to modules and
244
- * used by local_unlock() or bpf_enable_instrumentation().
245
- */
246
-#define preempt_lazy_enable() \
247
-do { \
248
- dec_preempt_lazy_count(); \
249
- barrier(); \
250
- if (should_resched(0)) \
251
- __preempt_schedule(); \
252
-} while (0)
253
-
254206 #else /* !CONFIG_PREEMPTION */
255207 #define preempt_enable() \
256208 do { \
257209 barrier(); \
258210 preempt_count_dec(); \
259
-} while (0)
260
-
261
-#define preempt_lazy_enable() \
262
-do { \
263
- dec_preempt_lazy_count(); \
264
- barrier(); \
265211 } while (0)
266212
267213 #define preempt_enable_notrace() \
....@@ -302,11 +248,7 @@
302248 #define preempt_disable_notrace() barrier()
303249 #define preempt_enable_no_resched_notrace() barrier()
304250 #define preempt_enable_notrace() barrier()
305
-#define preempt_check_resched_rt() barrier()
306251 #define preemptible() 0
307
-
308
-#define preempt_lazy_disable() barrier()
309
-#define preempt_lazy_enable() barrier()
310252
311253 #endif /* CONFIG_PREEMPT_COUNT */
312254
....@@ -326,21 +268,9 @@
326268 } while (0)
327269 #define preempt_fold_need_resched() \
328270 do { \
329
- if (tif_need_resched_now()) \
271
+ if (tif_need_resched()) \
330272 set_preempt_need_resched(); \
331273 } while (0)
332
-
333
-#ifdef CONFIG_PREEMPT_RT
334
-# define preempt_disable_rt() preempt_disable()
335
-# define preempt_enable_rt() preempt_enable()
336
-# define preempt_disable_nort() barrier()
337
-# define preempt_enable_nort() barrier()
338
-#else
339
-# define preempt_disable_rt() barrier()
340
-# define preempt_enable_rt() barrier()
341
-# define preempt_disable_nort() preempt_disable()
342
-# define preempt_enable_nort() preempt_enable()
343
-#endif
344274
345275 #ifdef CONFIG_PREEMPT_NOTIFIERS
346276
....@@ -392,78 +322,34 @@
392322
393323 #endif
394324
395
-#ifdef CONFIG_SMP
396
-
397
-/*
398
- * Migrate-Disable and why it is undesired.
325
+/**
326
+ * migrate_disable - Prevent migration of the current task
399327 *
400
- * When a preempted task becomes elegible to run under the ideal model (IOW it
401
- * becomes one of the M highest priority tasks), it might still have to wait
402
- * for the preemptee's migrate_disable() section to complete. Thereby suffering
403
- * a reduction in bandwidth in the exact duration of the migrate_disable()
404
- * section.
328
+ * Maps to preempt_disable() which also disables preemption. Use
329
+ * migrate_disable() to annotate that the intent is to prevent migration,
330
+ * but not necessarily preemption.
405331 *
406
- * Per this argument, the change from preempt_disable() to migrate_disable()
407
- * gets us:
408
- *
409
- * - a higher priority tasks gains reduced wake-up latency; with preempt_disable()
410
- * it would have had to wait for the lower priority task.
411
- *
412
- * - a lower priority tasks; which under preempt_disable() could've instantly
413
- * migrated away when another CPU becomes available, is now constrained
414
- * by the ability to push the higher priority task away, which might itself be
415
- * in a migrate_disable() section, reducing it's available bandwidth.
416
- *
417
- * IOW it trades latency / moves the interference term, but it stays in the
418
- * system, and as long as it remains unbounded, the system is not fully
419
- * deterministic.
420
- *
421
- *
422
- * The reason we have it anyway.
423
- *
424
- * PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a
425
- * number of primitives into becoming preemptible, they would also allow
426
- * migration. This turns out to break a bunch of per-cpu usage. To this end,
427
- * all these primitives employ migirate_disable() to restore this implicit
428
- * assumption.
429
- *
430
- * This is a 'temporary' work-around at best. The correct solution is getting
431
- * rid of the above assumptions and reworking the code to employ explicit
432
- * per-cpu locking or short preempt-disable regions.
433
- *
434
- * The end goal must be to get rid of migrate_disable(), alternatively we need
435
- * a schedulability theory that does not depend on abritrary migration.
436
- *
437
- *
438
- * Notes on the implementation.
439
- *
440
- * The implementation is particularly tricky since existing code patterns
441
- * dictate neither migrate_disable() nor migrate_enable() is allowed to block.
442
- * This means that it cannot use cpus_read_lock() to serialize against hotplug,
443
- * nor can it easily migrate itself into a pending affinity mask change on
444
- * migrate_enable().
445
- *
446
- *
447
- * Note: even non-work-conserving schedulers like semi-partitioned depends on
448
- * migration, so migrate_disable() is not only a problem for
449
- * work-conserving schedulers.
450
- *
332
+ * Can be invoked nested like preempt_disable() and needs the corresponding
333
+ * number of migrate_enable() invocations.
451334 */
452
-extern void migrate_disable(void);
453
-extern void migrate_enable(void);
454
-
455
-#else
456
-
457
-static inline void migrate_disable(void)
335
+static __always_inline void migrate_disable(void)
458336 {
459
- preempt_lazy_disable();
337
+ preempt_disable();
460338 }
461339
462
-static inline void migrate_enable(void)
340
+/**
341
+ * migrate_enable - Allow migration of the current task
342
+ *
343
+ * Counterpart to migrate_disable().
344
+ *
345
+ * As migrate_disable() can be invoked nested, only the outermost invocation
346
+ * reenables migration.
347
+ *
348
+ * Currently mapped to preempt_enable().
349
+ */
350
+static __always_inline void migrate_enable(void)
463351 {
464
- preempt_lazy_enable();
352
+ preempt_enable();
465353 }
466
-
467
-#endif /* CONFIG_SMP */
468354
469355 #endif /* __LINUX_PREEMPT_H */