From 223293205a7265c8b02882461ba8996650048ade Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 06:33:33 +0000
Subject: [PATCH] audio ok
---
kernel/include/linux/preempt.h | 180 +++++++++++++++++++++++++++++++++++++----------------------
1 files changed, 112 insertions(+), 68 deletions(-)
diff --git a/kernel/include/linux/preempt.h b/kernel/include/linux/preempt.h
index 9c74a01..7b5b2ed 100644
--- a/kernel/include/linux/preempt.h
+++ b/kernel/include/linux/preempt.h
@@ -26,13 +26,13 @@
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0x000f0000
- * NMI_MASK: 0x00100000
+ * NMI_MASK: 0x00f00000
* PREEMPT_NEED_RESCHED: 0x80000000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 4
-#define NMI_BITS 1
+#define NMI_BITS 4
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
@@ -51,14 +51,7 @@
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-#else
-# define SOFTIRQ_DISABLE_OFFSET (0)
-#endif
-
-/* We use the MSB mostly because its available */
-#define PREEMPT_NEED_RESCHED 0x80000000
+#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
@@ -84,36 +77,37 @@
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
#include <asm/preempt.h>
+#define nmi_count() (preempt_count() & NMI_MASK)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
-#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
- | NMI_MASK))
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
-# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+#ifdef CONFIG_PREEMPT_RT
+# define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK)
#else
-# define softirq_count() ((unsigned long)current->softirq_nestcnt)
-extern int in_serving_softirq(void);
+# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#endif
+#define irq_count() (nmi_count() | hardirq_count() | softirq_count())
/*
- * Are we doing bottom half or hardware interrupt processing?
+ * Macros to retrieve the current execution context:
*
- * in_irq() - We're in (hard) IRQ context
+ * in_nmi() - We're in NMI context
+ * in_hardirq() - We're in hard IRQ context
+ * in_serving_softirq() - We're in softirq context
+ * in_task() - We're in task context
+ */
+#define in_nmi() (nmi_count())
+#define in_hardirq() (hardirq_count())
+#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+#define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq()))
+
+/*
+ * The following macros are deprecated and should not be used in new code:
+ * in_irq() - Obsolete version of in_hardirq()
* in_softirq() - We have BH disabled, or are processing softirqs
* in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
- * in_serving_softirq() - We're in softirq context
- * in_nmi() - We're in NMI context
- * in_task() - We're in task context
- *
- * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
- * should not be used in new code.
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
-#define in_nmi() (preempt_count() & NMI_MASK)
-#define in_task() (!(preempt_count() & \
- (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
/*
* The preempt_count offset after preempt_disable();
@@ -127,7 +121,7 @@
/*
* The preempt_count offset after spin_lock()
*/
-#if !defined(CONFIG_PREEMPT_RT_FULL)
+#if !defined(CONFIG_PREEMPT_RT)
#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
#else
#define PREEMPT_LOCK_OFFSET 0
@@ -214,42 +208,17 @@
preempt_count_dec(); \
} while (0)
-#ifdef CONFIG_PREEMPT_RT_BASE
+#ifndef CONFIG_PREEMPT_RT
# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
-# define preempt_check_resched_rt() preempt_check_resched()
+# define preempt_check_resched_rt() barrier();
#else
# define preempt_enable_no_resched() preempt_enable()
-# define preempt_check_resched_rt() barrier();
+# define preempt_check_resched_rt() preempt_check_resched()
#endif
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
-#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
-
-extern void migrate_disable(void);
-extern void migrate_enable(void);
-
-int __migrate_disabled(struct task_struct *p);
-
-#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
-
-extern void migrate_disable(void);
-extern void migrate_enable(void);
-static inline int __migrate_disabled(struct task_struct *p)
-{
- return 0;
-}
-
-#else
-#define migrate_disable() preempt_disable()
-#define migrate_enable() preempt_enable()
-static inline int __migrate_disabled(struct task_struct *p)
-{
- return 0;
-}
-#endif
-
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
#define preempt_enable() \
do { \
barrier(); \
@@ -270,14 +239,19 @@
__preempt_schedule(); \
} while (0)
+/*
+ * open code preempt_check_resched() because it is not exported to modules and
+ * used by local_unlock() or bpf_enable_instrumentation().
+ */
#define preempt_lazy_enable() \
do { \
dec_preempt_lazy_count(); \
barrier(); \
- preempt_check_resched(); \
+ if (should_resched(0)) \
+ __preempt_schedule(); \
} while (0)
-#else /* !CONFIG_PREEMPT */
+#else /* !CONFIG_PREEMPTION */
#define preempt_enable() \
do { \
barrier(); \
@@ -297,7 +271,7 @@
} while (0)
#define preempt_check_resched() do { } while (0)
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */
#define preempt_disable_notrace() \
do { \
@@ -331,13 +305,9 @@
#define preempt_check_resched_rt() barrier()
#define preemptible() 0
-#define migrate_disable() barrier()
-#define migrate_enable() barrier()
+#define preempt_lazy_disable() barrier()
+#define preempt_lazy_enable() barrier()
-static inline int __migrate_disabled(struct task_struct *p)
-{
- return 0;
-}
#endif /* CONFIG_PREEMPT_COUNT */
#ifdef MODULE
@@ -360,7 +330,7 @@
set_preempt_need_resched(); \
} while (0)
-#ifdef CONFIG_PREEMPT_RT_FULL
+#ifdef CONFIG_PREEMPT_RT
# define preempt_disable_rt() preempt_disable()
# define preempt_enable_rt() preempt_enable()
# define preempt_disable_nort() barrier()
@@ -422,4 +392,78 @@
#endif
+#ifdef CONFIG_SMP
+
+/*
+ * Migrate-Disable and why it is undesired.
+ *
+ * When a preempted task becomes elegible to run under the ideal model (IOW it
+ * becomes one of the M highest priority tasks), it might still have to wait
+ * for the preemptee's migrate_disable() section to complete. Thereby suffering
+ * a reduction in bandwidth in the exact duration of the migrate_disable()
+ * section.
+ *
+ * Per this argument, the change from preempt_disable() to migrate_disable()
+ * gets us:
+ *
+ * - a higher priority tasks gains reduced wake-up latency; with preempt_disable()
+ * it would have had to wait for the lower priority task.
+ *
+ * - a lower priority tasks; which under preempt_disable() could've instantly
+ * migrated away when another CPU becomes available, is now constrained
+ * by the ability to push the higher priority task away, which might itself be
+ * in a migrate_disable() section, reducing it's available bandwidth.
+ *
+ * IOW it trades latency / moves the interference term, but it stays in the
+ * system, and as long as it remains unbounded, the system is not fully
+ * deterministic.
+ *
+ *
+ * The reason we have it anyway.
+ *
+ * PREEMPT_RT breaks a number of assumptions traditionally held. By forcing a
+ * number of primitives into becoming preemptible, they would also allow
+ * migration. This turns out to break a bunch of per-cpu usage. To this end,
+ * all these primitives employ migirate_disable() to restore this implicit
+ * assumption.
+ *
+ * This is a 'temporary' work-around at best. The correct solution is getting
+ * rid of the above assumptions and reworking the code to employ explicit
+ * per-cpu locking or short preempt-disable regions.
+ *
+ * The end goal must be to get rid of migrate_disable(), alternatively we need
+ * a schedulability theory that does not depend on abritrary migration.
+ *
+ *
+ * Notes on the implementation.
+ *
+ * The implementation is particularly tricky since existing code patterns
+ * dictate neither migrate_disable() nor migrate_enable() is allowed to block.
+ * This means that it cannot use cpus_read_lock() to serialize against hotplug,
+ * nor can it easily migrate itself into a pending affinity mask change on
+ * migrate_enable().
+ *
+ *
+ * Note: even non-work-conserving schedulers like semi-partitioned depends on
+ * migration, so migrate_disable() is not only a problem for
+ * work-conserving schedulers.
+ *
+ */
+extern void migrate_disable(void);
+extern void migrate_enable(void);
+
+#else
+
+static inline void migrate_disable(void)
+{
+ preempt_lazy_disable();
+}
+
+static inline void migrate_enable(void)
+{
+ preempt_lazy_enable();
+}
+
+#endif /* CONFIG_SMP */
+
#endif /* __LINUX_PREEMPT_H */
--
Gitblit v1.6.2