From 093a6c67005148ae32a5c9e4553491b9f5c2457b Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 10 May 2024 07:40:51 +0000
Subject: [PATCH] disable kernel build waring
---
kernel/include/linux/interrupt.h | 230 +++++++++++++++++++++++++++++++++++----------------------
1 files changed, 140 insertions(+), 90 deletions(-)
diff --git a/kernel/include/linux/interrupt.h b/kernel/include/linux/interrupt.h
index a3b5edb..6ff7d17 100644
--- a/kernel/include/linux/interrupt.h
+++ b/kernel/include/linux/interrupt.h
@@ -45,14 +45,14 @@
* IRQF_PERCPU - Interrupt is per cpu
* IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
* IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
- * registered first in an shared interrupt is considered for
+ * registered first in a shared interrupt is considered for
* performance reasons)
* IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
* Used by threaded interrupts which need to keep the
* irq line disabled until the threaded handler has been run.
* IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
* that this interrupt will wake the system from a suspended
- * state. See Documentation/power/suspend-and-interrupts.txt
+ * state. See Documentation/power/suspend-and-interrupts.rst
* IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
* IRQF_NO_THREAD - Interrupt cannot be threaded
* IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
@@ -61,7 +61,9 @@
* interrupt handler after suspending interrupts. For system
* wakeup devices users need to implement wakeup detection in
* their interrupt handlers.
- * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
+ * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it.
+ * Users will enable it explicitly by enable_irq() or enable_nmi()
+ * later.
*/
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
@@ -75,7 +77,7 @@
#define IRQF_NO_THREAD 0x00010000
#define IRQF_EARLY_RESUME 0x00020000
#define IRQF_COND_SUSPEND 0x00040000
-#define IRQF_NO_SOFTIRQ_CALL 0x00080000
+#define IRQF_NO_AUTOEN 0x00080000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
@@ -142,6 +144,19 @@
irq_handler_t thread_fn,
unsigned long flags, const char *name, void *dev);
+/**
+ * request_irq - Add a handler for an interrupt line
+ * @irq: The interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs.
+ * Primary handler for threaded interrupts
+ * If NULL, the default primary handler is installed
+ * @flags: Handling flags
+ * @name: Name of the device generating this interrupt
+ * @dev: A cookie passed to the handler function
+ *
+ * This call allocates an interrupt and establishes a handler; see
+ * the documentation for request_threaded_irq() for details.
+ */
static inline int __must_check
request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
const char *name, void *dev)
@@ -158,6 +173,10 @@
unsigned long flags, const char *devname,
void __percpu *percpu_dev_id);
+extern int __must_check
+request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
+ const char *name, void *dev);
+
static inline int __must_check
request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id)
@@ -166,8 +185,15 @@
devname, percpu_dev_id);
}
+extern int __must_check
+request_percpu_nmi(unsigned int irq, irq_handler_t handler,
+ const char *devname, void __percpu *dev);
+
extern const void *free_irq(unsigned int, void *);
extern void free_percpu_irq(unsigned int, void __percpu *);
+
+extern const void *free_nmi(unsigned int irq, void *dev_id);
+extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
struct device;
@@ -219,9 +245,19 @@
extern bool irq_percpu_is_enabled(unsigned int irq);
extern void irq_wake_thread(unsigned int irq, void *dev_id);
+extern void disable_nmi_nosync(unsigned int irq);
+extern void disable_percpu_nmi(unsigned int irq);
+extern void enable_nmi(unsigned int irq);
+extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
+extern int prepare_percpu_nmi(unsigned int irq);
+extern void teardown_percpu_nmi(unsigned int irq);
+
+extern int irq_inject_interrupt(unsigned int irq);
+
/* The following three functions are for the core kernel use only. */
extern void suspend_device_irqs(void);
extern void resume_device_irqs(void);
+extern void rearm_wake_irq(unsigned int irq);
/**
* struct irq_affinity_notify - context for notification of IRQ affinity changes
@@ -243,16 +279,39 @@
void (*release)(struct kref *ref);
};
+#define IRQ_AFFINITY_MAX_SETS 4
+
/**
* struct irq_affinity - Description for automatic irq affinity assignements
* @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
* the MSI(-X) vector space
* @post_vectors: Don't apply affinity to @post_vectors at end of
* the MSI(-X) vector space
+ * @nr_sets: The number of interrupt sets for which affinity
+ * spreading is required
+ * @set_size: Array holding the size of each interrupt set
+ * @calc_sets: Callback for calculating the number and size
+ * of interrupt sets
+ * @priv: Private data for usage by @calc_sets, usually a
+ * pointer to driver/device specific data.
*/
struct irq_affinity {
- int pre_vectors;
- int post_vectors;
+ unsigned int pre_vectors;
+ unsigned int post_vectors;
+ unsigned int nr_sets;
+ unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
+ void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
+ void *priv;
+};
+
+/**
+ * struct irq_affinity_desc - Interrupt affinity descriptor
+ * @mask: cpumask to hold the affinity assignment
+ * @is_managed: 1 if the interrupt is managed internally
+ */
+struct irq_affinity_desc {
+ struct cpumask mask;
+ unsigned int is_managed : 1;
};
#if defined(CONFIG_SMP)
@@ -301,8 +360,11 @@
extern int
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
-struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
-int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd);
+struct irq_affinity_desc *
+irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
+
+unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
+ const struct irq_affinity *affd);
#else /* CONFIG_SMP */
@@ -335,14 +397,15 @@
return 0;
}
-static inline struct cpumask *
-irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
+static inline struct irq_affinity_desc *
+irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
{
return NULL;
}
-static inline int
-irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd)
+static inline unsigned int
+irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
+ const struct irq_affinity *affd)
{
return maxvec;
}
@@ -429,7 +492,7 @@
bool state);
#ifdef CONFIG_IRQ_FORCED_THREADING
-# ifdef CONFIG_PREEMPT_RT_BASE
+# ifdef CONFIG_PREEMPT_RT
# define force_irqthreads (true)
# else
extern bool force_irqthreads;
@@ -476,14 +539,19 @@
IRQ_POLL_SOFTIRQ,
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
- HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
- numbering. Sigh! */
+ HRTIMER_SOFTIRQ,
RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
NR_SOFTIRQS
};
#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
+/* Softirq's where the handling might be long: */
+#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ) | \
+ (1 << NET_RX_SOFTIRQ) | \
+ (1 << BLOCK_SOFTIRQ) | \
+ (1 << IRQ_POLL_SOFTIRQ) | \
+ (1 << TASKLET_SOFTIRQ))
/* map softirq index to softirq name. update 'softirq_to_name' in
* kernel/softirq.c when adding a new softirq.
@@ -499,10 +567,9 @@
void (*action)(struct softirq_action *);
};
-#ifndef CONFIG_PREEMPT_RT_FULL
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
-static inline void thread_do_softirq(void) { do_softirq(); }
+
#ifdef __ARCH_HAS_DO_SOFTIRQ
void do_softirq_own_stack(void);
#else
@@ -511,27 +578,16 @@
__do_softirq();
}
#endif
-#else
-extern void thread_do_softirq(void);
-#endif
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
-#ifdef CONFIG_PREEMPT_RT_FULL
-extern void __raise_softirq_irqoff_ksoft(unsigned int nr);
-#else
-static inline void __raise_softirq_irqoff_ksoft(unsigned int nr)
-{
- __raise_softirq_irqoff(nr);
-}
-#endif
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
-extern void softirq_check_pending_idle(void);
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
+DECLARE_PER_CPU(__u32, active_softirqs);
static inline struct task_struct *this_cpu_ksoftirqd(void)
{
@@ -539,6 +595,9 @@
}
/* Tasklets --- multithreaded analogue of BHs.
+
+ This API is deprecated. Please consider using threaded IRQs instead:
+ https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de
Main feature differing them of generic softirqs: tasklet
is running only on one CPU simultaneously.
@@ -551,9 +610,8 @@
to be executed on some cpu at least once after this.
* If the tasklet is already scheduled, but its execution is still not
started, it will be executed only once.
- * If this tasklet is already running on another CPU, it is rescheduled
- for later.
- * Schedule must not be called from the tasklet itself (a lockup occurs)
+ * If this tasklet is already running on another CPU (or schedule is called
+ from tasklet itself), it is rescheduled for later.
* Tasklet is strictly serialized wrt itself, but not
wrt another tasklets. If client needs some intertask synchronization,
he makes it with spinlocks.
@@ -564,40 +622,53 @@
struct tasklet_struct *next;
unsigned long state;
atomic_t count;
- void (*func)(unsigned long);
+ bool use_callback;
+ union {
+ void (*func)(unsigned long data);
+ void (*callback)(struct tasklet_struct *t);
+ };
unsigned long data;
};
-#define DECLARE_TASKLET(name, func, data) \
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
+#define DECLARE_TASKLET(name, _callback) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(0), \
+ .callback = _callback, \
+ .use_callback = true, \
+}
-#define DECLARE_TASKLET_DISABLED(name, func, data) \
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
+#define DECLARE_TASKLET_DISABLED(name, _callback) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(1), \
+ .callback = _callback, \
+ .use_callback = true, \
+}
+#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
+ container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
+
+#define DECLARE_TASKLET_OLD(name, _func) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(0), \
+ .func = _func, \
+}
+
+#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \
+struct tasklet_struct name = { \
+ .count = ATOMIC_INIT(1), \
+ .func = _func, \
+}
enum
{
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
- TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
- TASKLET_STATE_PENDING, /* Tasklet is pending */
- TASKLET_STATE_CHAINED /* Tasklet is chained */
+ TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
};
-#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
-#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
-#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
-#define TASKLET_STATEF_CHAINED (1 << TASKLET_STATE_CHAINED)
-#define TASKLET_STATEF_RC (TASKLET_STATEF_RUN | TASKLET_STATEF_CHAINED)
-
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
+#ifdef CONFIG_SMP
static inline int tasklet_trylock(struct tasklet_struct *t)
{
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
-}
-
-static inline int tasklet_tryunlock(struct tasklet_struct *t)
-{
- return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
}
static inline void tasklet_unlock(struct tasklet_struct *t)
@@ -606,11 +677,12 @@
clear_bit(TASKLET_STATE_RUN, &(t)->state);
}
-extern void tasklet_unlock_wait(struct tasklet_struct *t);
-
+static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+{
+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
+}
#else
#define tasklet_trylock(t) 1
-#define tasklet_tryunlock(t) 1
#define tasklet_unlock_wait(t) do { } while (0)
#define tasklet_unlock(t) do { } while (0)
#endif
@@ -644,42 +716,18 @@
smp_mb();
}
-extern void tasklet_enable(struct tasklet_struct *t);
+static inline void tasklet_enable(struct tasklet_struct *t)
+{
+ smp_mb__before_atomic();
+ atomic_dec(&t->count);
+}
+
extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
extern void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data);
-
-#ifdef CONFIG_PREEMPT_RT_FULL
-extern void softirq_early_init(void);
-#else
-static inline void softirq_early_init(void) { }
-#endif
-
-struct tasklet_hrtimer {
- struct hrtimer timer;
- struct tasklet_struct tasklet;
- enum hrtimer_restart (*function)(struct hrtimer *);
-};
-
-extern void
-tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
- enum hrtimer_restart (*function)(struct hrtimer *),
- clockid_t which_clock, enum hrtimer_mode mode);
-
-static inline
-void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
- const enum hrtimer_mode mode)
-{
- hrtimer_start(&ttimer->timer, time, mode);
-}
-
-static inline
-void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
-{
- hrtimer_cancel(&ttimer->timer);
- tasklet_kill(&ttimer->tasklet);
-}
+extern void tasklet_setup(struct tasklet_struct *t,
+ void (*callback)(struct tasklet_struct *));
/*
* Autoprobing for irqs:
@@ -754,8 +802,10 @@
/*
* We want to know which function is an entrypoint of a hardirq or a softirq.
*/
-#define __irq_entry __attribute__((__section__(".irqentry.text")))
-#define __softirq_entry \
- __attribute__((__section__(".softirqentry.text")))
+#ifndef __irq_entry
+# define __irq_entry __section(".irqentry.text")
+#endif
+
+#define __softirq_entry __section(".softirqentry.text")
#endif
--
Gitblit v1.6.2