From 093a6c67005148ae32a5c9e4553491b9f5c2457b Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 10 May 2024 07:40:51 +0000
Subject: [PATCH] disable kernel build waring
---
kernel/include/linux/preempt.h | 150 +++++++++++++------------------------------------
1 files changed, 40 insertions(+), 110 deletions(-)
diff --git a/kernel/include/linux/preempt.h b/kernel/include/linux/preempt.h
index 9c74a01..7d9c1c0 100644
--- a/kernel/include/linux/preempt.h
+++ b/kernel/include/linux/preempt.h
@@ -26,13 +26,13 @@
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0x000f0000
- * NMI_MASK: 0x00100000
+ * NMI_MASK: 0x00f00000
* PREEMPT_NEED_RESCHED: 0x80000000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 4
-#define NMI_BITS 1
+#define NMI_BITS 4
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
@@ -51,14 +51,7 @@
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-#else
-# define SOFTIRQ_DISABLE_OFFSET (0)
-#endif
-
-/* We use the MSB mostly because its available */
-#define PREEMPT_NEED_RESCHED 0x80000000
+#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
@@ -85,15 +78,9 @@
#include <asm/preempt.h>
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
| NMI_MASK))
-#ifndef CONFIG_PREEMPT_RT_FULL
-# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
-# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-#else
-# define softirq_count() ((unsigned long)current->softirq_nestcnt)
-extern int in_serving_softirq(void);
-#endif
/*
* Are we doing bottom half or hardware interrupt processing?
@@ -111,6 +98,7 @@
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
+#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
#define in_nmi() (preempt_count() & NMI_MASK)
#define in_task() (!(preempt_count() & \
(NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
@@ -127,11 +115,7 @@
/*
* The preempt_count offset after spin_lock()
*/
-#if !defined(CONFIG_PREEMPT_RT_FULL)
#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
-#else
-#define PREEMPT_LOCK_OFFSET 0
-#endif
/*
* The preempt_count offset needed for things like:
@@ -180,31 +164,11 @@
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)
-#ifdef CONFIG_PREEMPT_LAZY
-#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
-#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
-#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
-#define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
-#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
-#else
-#define add_preempt_lazy_count(val) do { } while (0)
-#define sub_preempt_lazy_count(val) do { } while (0)
-#define inc_preempt_lazy_count() do { } while (0)
-#define dec_preempt_lazy_count() do { } while (0)
-#define preempt_lazy_count() (0)
-#endif
-
#ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \
do { \
preempt_count_inc(); \
- barrier(); \
-} while (0)
-
-#define preempt_lazy_disable() \
-do { \
- inc_preempt_lazy_count(); \
barrier(); \
} while (0)
@@ -214,42 +178,11 @@
preempt_count_dec(); \
} while (0)
-#ifdef CONFIG_PREEMPT_RT_BASE
-# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
-# define preempt_check_resched_rt() preempt_check_resched()
-#else
-# define preempt_enable_no_resched() preempt_enable()
-# define preempt_check_resched_rt() barrier();
-#endif
+#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
-#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
-
-extern void migrate_disable(void);
-extern void migrate_enable(void);
-
-int __migrate_disabled(struct task_struct *p);
-
-#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
-
-extern void migrate_disable(void);
-extern void migrate_enable(void);
-static inline int __migrate_disabled(struct task_struct *p)
-{
- return 0;
-}
-
-#else
-#define migrate_disable() preempt_disable()
-#define migrate_enable() preempt_enable()
-static inline int __migrate_disabled(struct task_struct *p)
-{
- return 0;
-}
-#endif
-
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
#define preempt_enable() \
do { \
barrier(); \
@@ -270,24 +203,11 @@
__preempt_schedule(); \
} while (0)
-#define preempt_lazy_enable() \
-do { \
- dec_preempt_lazy_count(); \
- barrier(); \
- preempt_check_resched(); \
-} while (0)
-
-#else /* !CONFIG_PREEMPT */
+#else /* !CONFIG_PREEMPTION */
#define preempt_enable() \
do { \
barrier(); \
preempt_count_dec(); \
-} while (0)
-
-#define preempt_lazy_enable() \
-do { \
- dec_preempt_lazy_count(); \
- barrier(); \
} while (0)
#define preempt_enable_notrace() \
@@ -297,7 +217,7 @@
} while (0)
#define preempt_check_resched() do { } while (0)
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */
#define preempt_disable_notrace() \
do { \
@@ -328,16 +248,8 @@
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
-#define preempt_check_resched_rt() barrier()
#define preemptible() 0
-#define migrate_disable() barrier()
-#define migrate_enable() barrier()
-
-static inline int __migrate_disabled(struct task_struct *p)
-{
- return 0;
-}
#endif /* CONFIG_PREEMPT_COUNT */
#ifdef MODULE
@@ -356,21 +268,9 @@
} while (0)
#define preempt_fold_need_resched() \
do { \
- if (tif_need_resched_now()) \
+ if (tif_need_resched()) \
set_preempt_need_resched(); \
} while (0)
-
-#ifdef CONFIG_PREEMPT_RT_FULL
-# define preempt_disable_rt() preempt_disable()
-# define preempt_enable_rt() preempt_enable()
-# define preempt_disable_nort() barrier()
-# define preempt_enable_nort() barrier()
-#else
-# define preempt_disable_rt() barrier()
-# define preempt_enable_rt() barrier()
-# define preempt_disable_nort() preempt_disable()
-# define preempt_enable_nort() preempt_enable()
-#endif
#ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -422,4 +322,34 @@
#endif
+/**
+ * migrate_disable - Prevent migration of the current task
+ *
+ * Maps to preempt_disable() which also disables preemption. Use
+ * migrate_disable() to annotate that the intent is to prevent migration,
+ * but not necessarily preemption.
+ *
+ * Can be invoked nested like preempt_disable() and needs the corresponding
+ * number of migrate_enable() invocations.
+ */
+static __always_inline void migrate_disable(void)
+{
+ preempt_disable();
+}
+
+/**
+ * migrate_enable - Allow migration of the current task
+ *
+ * Counterpart to migrate_disable().
+ *
+ * As migrate_disable() can be invoked nested, only the outermost invocation
+ * reenables migration.
+ *
+ * Currently mapped to preempt_enable().
+ */
+static __always_inline void migrate_enable(void)
+{
+ preempt_enable();
+}
+
#endif /* __LINUX_PREEMPT_H */
--
Gitblit v1.6.2