hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/include/linux/interrupt.h
....@@ -45,14 +45,14 @@
4545 * IRQF_PERCPU - Interrupt is per cpu
4646 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
4747 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
48
- * registered first in an shared interrupt is considered for
48
+ * registered first in a shared interrupt is considered for
4949 * performance reasons)
5050 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
5151 * Used by threaded interrupts which need to keep the
5252 * irq line disabled until the threaded handler has been run.
5353 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
5454 * that this interrupt will wake the system from a suspended
55
- * state. See Documentation/power/suspend-and-interrupts.txt
55
+ * state. See Documentation/power/suspend-and-interrupts.rst
5656 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
5757 * IRQF_NO_THREAD - Interrupt cannot be threaded
5858 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
....@@ -61,6 +61,9 @@
6161 * interrupt handler after suspending interrupts. For system
6262 * wakeup devices users need to implement wakeup detection in
6363 * their interrupt handlers.
64
+ * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it.
65
+ * Users will enable it explicitly by enable_irq() or enable_nmi()
66
+ * later.
6467 */
6568 #define IRQF_SHARED 0x00000080
6669 #define IRQF_PROBE_SHARED 0x00000100
....@@ -74,6 +77,7 @@
7477 #define IRQF_NO_THREAD 0x00010000
7578 #define IRQF_EARLY_RESUME 0x00020000
7679 #define IRQF_COND_SUSPEND 0x00040000
80
+#define IRQF_NO_AUTOEN 0x00080000
7781
7882 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
7983
....@@ -140,6 +144,19 @@
140144 irq_handler_t thread_fn,
141145 unsigned long flags, const char *name, void *dev);
142146
147
+/**
148
+ * request_irq - Add a handler for an interrupt line
149
+ * @irq: The interrupt line to allocate
150
+ * @handler: Function to be called when the IRQ occurs.
151
+ * Primary handler for threaded interrupts
152
+ * If NULL, the default primary handler is installed
153
+ * @flags: Handling flags
154
+ * @name: Name of the device generating this interrupt
155
+ * @dev: A cookie passed to the handler function
156
+ *
157
+ * This call allocates an interrupt and establishes a handler; see
158
+ * the documentation for request_threaded_irq() for details.
159
+ */
143160 static inline int __must_check
144161 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
145162 const char *name, void *dev)
....@@ -156,6 +173,10 @@
156173 unsigned long flags, const char *devname,
157174 void __percpu *percpu_dev_id);
158175
176
+extern int __must_check
177
+request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
178
+ const char *name, void *dev);
179
+
159180 static inline int __must_check
160181 request_percpu_irq(unsigned int irq, irq_handler_t handler,
161182 const char *devname, void __percpu *percpu_dev_id)
....@@ -164,8 +185,15 @@
164185 devname, percpu_dev_id);
165186 }
166187
188
+extern int __must_check
189
+request_percpu_nmi(unsigned int irq, irq_handler_t handler,
190
+ const char *devname, void __percpu *dev);
191
+
167192 extern const void *free_irq(unsigned int, void *);
168193 extern void free_percpu_irq(unsigned int, void __percpu *);
194
+
195
+extern const void *free_nmi(unsigned int irq, void *dev_id);
196
+extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
169197
170198 struct device;
171199
....@@ -217,9 +245,19 @@
217245 extern bool irq_percpu_is_enabled(unsigned int irq);
218246 extern void irq_wake_thread(unsigned int irq, void *dev_id);
219247
248
+extern void disable_nmi_nosync(unsigned int irq);
249
+extern void disable_percpu_nmi(unsigned int irq);
250
+extern void enable_nmi(unsigned int irq);
251
+extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
252
+extern int prepare_percpu_nmi(unsigned int irq);
253
+extern void teardown_percpu_nmi(unsigned int irq);
254
+
255
+extern int irq_inject_interrupt(unsigned int irq);
256
+
220257 /* The following three functions are for the core kernel use only. */
221258 extern void suspend_device_irqs(void);
222259 extern void resume_device_irqs(void);
260
+extern void rearm_wake_irq(unsigned int irq);
223261
224262 /**
225263 * struct irq_affinity_notify - context for notification of IRQ affinity changes
....@@ -241,16 +279,39 @@
241279 void (*release)(struct kref *ref);
242280 };
243281
282
+#define IRQ_AFFINITY_MAX_SETS 4
283
+
244284 /**
245285 * struct irq_affinity - Description for automatic irq affinity assignements
246286 * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
247287 * the MSI(-X) vector space
248288 * @post_vectors: Don't apply affinity to @post_vectors at end of
249289 * the MSI(-X) vector space
290
+ * @nr_sets: The number of interrupt sets for which affinity
291
+ * spreading is required
292
+ * @set_size: Array holding the size of each interrupt set
293
+ * @calc_sets: Callback for calculating the number and size
294
+ * of interrupt sets
295
+ * @priv: Private data for usage by @calc_sets, usually a
296
+ * pointer to driver/device specific data.
250297 */
251298 struct irq_affinity {
252
- int pre_vectors;
253
- int post_vectors;
299
+ unsigned int pre_vectors;
300
+ unsigned int post_vectors;
301
+ unsigned int nr_sets;
302
+ unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
303
+ void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
304
+ void *priv;
305
+};
306
+
307
+/**
308
+ * struct irq_affinity_desc - Interrupt affinity descriptor
309
+ * @mask: cpumask to hold the affinity assignment
310
+ * @is_managed: 1 if the interrupt is managed internally
311
+ */
312
+struct irq_affinity_desc {
313
+ struct cpumask mask;
314
+ unsigned int is_managed : 1;
254315 };
255316
256317 #if defined(CONFIG_SMP)
....@@ -299,8 +360,11 @@
299360 extern int
300361 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
301362
302
-struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
303
-int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd);
363
+struct irq_affinity_desc *
364
+irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
365
+
366
+unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
367
+ const struct irq_affinity *affd);
304368
305369 #else /* CONFIG_SMP */
306370
....@@ -333,14 +397,15 @@
333397 return 0;
334398 }
335399
336
-static inline struct cpumask *
337
-irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
400
+static inline struct irq_affinity_desc *
401
+irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
338402 {
339403 return NULL;
340404 }
341405
342
-static inline int
343
-irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd)
406
+static inline unsigned int
407
+irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
408
+ const struct irq_affinity *affd)
344409 {
345410 return maxvec;
346411 }
....@@ -427,7 +492,11 @@
427492 bool state);
428493
429494 #ifdef CONFIG_IRQ_FORCED_THREADING
495
+# ifdef CONFIG_PREEMPT_RT
496
+# define force_irqthreads (true)
497
+# else
430498 extern bool force_irqthreads;
499
+# endif
431500 #else
432501 #define force_irqthreads (0)
433502 #endif
....@@ -470,14 +539,19 @@
470539 IRQ_POLL_SOFTIRQ,
471540 TASKLET_SOFTIRQ,
472541 SCHED_SOFTIRQ,
473
- HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
474
- numbering. Sigh! */
542
+ HRTIMER_SOFTIRQ,
475543 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
476544
477545 NR_SOFTIRQS
478546 };
479547
480548 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
549
+/* Softirq's where the handling might be long: */
550
+#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ) | \
551
+ (1 << NET_RX_SOFTIRQ) | \
552
+ (1 << BLOCK_SOFTIRQ) | \
553
+ (1 << IRQ_POLL_SOFTIRQ) | \
554
+ (1 << TASKLET_SOFTIRQ))
481555
482556 /* map softirq index to softirq name. update 'softirq_to_name' in
483557 * kernel/softirq.c when adding a new softirq.
....@@ -513,6 +587,7 @@
513587 extern void raise_softirq(unsigned int nr);
514588
515589 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
590
+DECLARE_PER_CPU(__u32, active_softirqs);
516591
517592 static inline struct task_struct *this_cpu_ksoftirqd(void)
518593 {
....@@ -520,6 +595,9 @@
520595 }
521596
522597 /* Tasklets --- multithreaded analogue of BHs.
598
+
599
+ This API is deprecated. Please consider using threaded IRQs instead:
600
+ https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de
523601
524602 Main feature differing them of generic softirqs: tasklet
525603 is running only on one CPU simultaneously.
....@@ -544,16 +622,42 @@
544622 struct tasklet_struct *next;
545623 unsigned long state;
546624 atomic_t count;
547
- void (*func)(unsigned long);
625
+ bool use_callback;
626
+ union {
627
+ void (*func)(unsigned long data);
628
+ void (*callback)(struct tasklet_struct *t);
629
+ };
548630 unsigned long data;
549631 };
550632
551
-#define DECLARE_TASKLET(name, func, data) \
552
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
633
+#define DECLARE_TASKLET(name, _callback) \
634
+struct tasklet_struct name = { \
635
+ .count = ATOMIC_INIT(0), \
636
+ .callback = _callback, \
637
+ .use_callback = true, \
638
+}
553639
554
-#define DECLARE_TASKLET_DISABLED(name, func, data) \
555
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
640
+#define DECLARE_TASKLET_DISABLED(name, _callback) \
641
+struct tasklet_struct name = { \
642
+ .count = ATOMIC_INIT(1), \
643
+ .callback = _callback, \
644
+ .use_callback = true, \
645
+}
556646
647
+#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
648
+ container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
649
+
650
+#define DECLARE_TASKLET_OLD(name, _func) \
651
+struct tasklet_struct name = { \
652
+ .count = ATOMIC_INIT(0), \
653
+ .func = _func, \
654
+}
655
+
656
+#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \
657
+struct tasklet_struct name = { \
658
+ .count = ATOMIC_INIT(1), \
659
+ .func = _func, \
660
+}
557661
558662 enum
559663 {
....@@ -622,31 +726,8 @@
622726 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
623727 extern void tasklet_init(struct tasklet_struct *t,
624728 void (*func)(unsigned long), unsigned long data);
625
-
626
-struct tasklet_hrtimer {
627
- struct hrtimer timer;
628
- struct tasklet_struct tasklet;
629
- enum hrtimer_restart (*function)(struct hrtimer *);
630
-};
631
-
632
-extern void
633
-tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
634
- enum hrtimer_restart (*function)(struct hrtimer *),
635
- clockid_t which_clock, enum hrtimer_mode mode);
636
-
637
-static inline
638
-void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
639
- const enum hrtimer_mode mode)
640
-{
641
- hrtimer_start(&ttimer->timer, time, mode);
642
-}
643
-
644
-static inline
645
-void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
646
-{
647
- hrtimer_cancel(&ttimer->timer);
648
- tasklet_kill(&ttimer->tasklet);
649
-}
729
+extern void tasklet_setup(struct tasklet_struct *t,
730
+ void (*callback)(struct tasklet_struct *));
650731
651732 /*
652733 * Autoprobing for irqs:
....@@ -721,8 +802,10 @@
721802 /*
722803 * We want to know which function is an entrypoint of a hardirq or a softirq.
723804 */
724
-#define __irq_entry __attribute__((__section__(".irqentry.text")))
725
-#define __softirq_entry \
726
- __attribute__((__section__(".softirqentry.text")))
805
+#ifndef __irq_entry
806
+# define __irq_entry __section(".irqentry.text")
807
+#endif
808
+
809
+#define __softirq_entry __section(".softirqentry.text")
727810
728811 #endif