hc
2023-12-11 1f93a7dfd1f8d5ff7a5c53246c7534fe2332d6f4
kernel/include/linux/interrupt.h
....@@ -45,14 +45,14 @@
4545 * IRQF_PERCPU - Interrupt is per cpu
4646 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
4747 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
48
- * registered first in an shared interrupt is considered for
48
+ * registered first in a shared interrupt is considered for
4949 * performance reasons)
5050 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
5151 * Used by threaded interrupts which need to keep the
5252 * irq line disabled until the threaded handler has been run.
5353 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
5454 * that this interrupt will wake the system from a suspended
55
- * state. See Documentation/power/suspend-and-interrupts.txt
55
+ * state. See Documentation/power/suspend-and-interrupts.rst
5656 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
5757 * IRQF_NO_THREAD - Interrupt cannot be threaded
5858 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
....@@ -140,6 +140,19 @@
140140 irq_handler_t thread_fn,
141141 unsigned long flags, const char *name, void *dev);
142142
143
+/**
144
+ * request_irq - Add a handler for an interrupt line
145
+ * @irq: The interrupt line to allocate
146
+ * @handler: Function to be called when the IRQ occurs.
147
+ * Primary handler for threaded interrupts
148
+ * If NULL, the default primary handler is installed
149
+ * @flags: Handling flags
150
+ * @name: Name of the device generating this interrupt
151
+ * @dev: A cookie passed to the handler function
152
+ *
153
+ * This call allocates an interrupt and establishes a handler; see
154
+ * the documentation for request_threaded_irq() for details.
155
+ */
143156 static inline int __must_check
144157 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
145158 const char *name, void *dev)
....@@ -156,6 +169,10 @@
156169 unsigned long flags, const char *devname,
157170 void __percpu *percpu_dev_id);
158171
172
+extern int __must_check
173
+request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
174
+ const char *name, void *dev);
175
+
159176 static inline int __must_check
160177 request_percpu_irq(unsigned int irq, irq_handler_t handler,
161178 const char *devname, void __percpu *percpu_dev_id)
....@@ -164,8 +181,15 @@
164181 devname, percpu_dev_id);
165182 }
166183
184
+extern int __must_check
185
+request_percpu_nmi(unsigned int irq, irq_handler_t handler,
186
+ const char *devname, void __percpu *dev);
187
+
167188 extern const void *free_irq(unsigned int, void *);
168189 extern void free_percpu_irq(unsigned int, void __percpu *);
190
+
191
+extern const void *free_nmi(unsigned int irq, void *dev_id);
192
+extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
169193
170194 struct device;
171195
....@@ -217,9 +241,19 @@
217241 extern bool irq_percpu_is_enabled(unsigned int irq);
218242 extern void irq_wake_thread(unsigned int irq, void *dev_id);
219243
244
+extern void disable_nmi_nosync(unsigned int irq);
245
+extern void disable_percpu_nmi(unsigned int irq);
246
+extern void enable_nmi(unsigned int irq);
247
+extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
248
+extern int prepare_percpu_nmi(unsigned int irq);
249
+extern void teardown_percpu_nmi(unsigned int irq);
250
+
251
+extern int irq_inject_interrupt(unsigned int irq);
252
+
220253 /* The following three functions are for the core kernel use only. */
221254 extern void suspend_device_irqs(void);
222255 extern void resume_device_irqs(void);
256
+extern void rearm_wake_irq(unsigned int irq);
223257
224258 /**
225259 * struct irq_affinity_notify - context for notification of IRQ affinity changes
....@@ -241,16 +275,39 @@
241275 void (*release)(struct kref *ref);
242276 };
243277
278
+#define IRQ_AFFINITY_MAX_SETS 4
279
+
244280 /**
245281 * struct irq_affinity - Description for automatic irq affinity assignements
246282 * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
247283 * the MSI(-X) vector space
248284 * @post_vectors: Don't apply affinity to @post_vectors at end of
249285 * the MSI(-X) vector space
286
+ * @nr_sets: The number of interrupt sets for which affinity
287
+ * spreading is required
288
+ * @set_size: Array holding the size of each interrupt set
289
+ * @calc_sets: Callback for calculating the number and size
290
+ * of interrupt sets
291
+ * @priv: Private data for usage by @calc_sets, usually a
292
+ * pointer to driver/device specific data.
250293 */
251294 struct irq_affinity {
252
- int pre_vectors;
253
- int post_vectors;
295
+ unsigned int pre_vectors;
296
+ unsigned int post_vectors;
297
+ unsigned int nr_sets;
298
+ unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
299
+ void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
300
+ void *priv;
301
+};
302
+
303
+/**
304
+ * struct irq_affinity_desc - Interrupt affinity descriptor
305
+ * @mask: cpumask to hold the affinity assignment
306
+ * @is_managed: 1 if the interrupt is managed internally
307
+ */
308
+struct irq_affinity_desc {
309
+ struct cpumask mask;
310
+ unsigned int is_managed : 1;
254311 };
255312
256313 #if defined(CONFIG_SMP)
....@@ -299,8 +356,11 @@
299356 extern int
300357 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
301358
302
-struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
303
-int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd);
359
+struct irq_affinity_desc *
360
+irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
361
+
362
+unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
363
+ const struct irq_affinity *affd);
304364
305365 #else /* CONFIG_SMP */
306366
....@@ -333,14 +393,15 @@
333393 return 0;
334394 }
335395
336
-static inline struct cpumask *
337
-irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
396
+static inline struct irq_affinity_desc *
397
+irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
338398 {
339399 return NULL;
340400 }
341401
342
-static inline int
343
-irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd)
402
+static inline unsigned int
403
+irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
404
+ const struct irq_affinity *affd)
344405 {
345406 return maxvec;
346407 }
....@@ -427,7 +488,11 @@
427488 bool state);
428489
429490 #ifdef CONFIG_IRQ_FORCED_THREADING
491
+# ifdef CONFIG_PREEMPT_RT
492
+# define force_irqthreads (true)
493
+# else
430494 extern bool force_irqthreads;
495
+# endif
431496 #else
432497 #define force_irqthreads (0)
433498 #endif
....@@ -470,14 +535,19 @@
470535 IRQ_POLL_SOFTIRQ,
471536 TASKLET_SOFTIRQ,
472537 SCHED_SOFTIRQ,
473
- HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
474
- numbering. Sigh! */
538
+ HRTIMER_SOFTIRQ,
475539 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
476540
477541 NR_SOFTIRQS
478542 };
479543
480544 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
545
+/* Softirq's where the handling might be long: */
546
+#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ) | \
547
+ (1 << NET_RX_SOFTIRQ) | \
548
+ (1 << BLOCK_SOFTIRQ) | \
549
+ (1 << IRQ_POLL_SOFTIRQ) | \
550
+ (1 << TASKLET_SOFTIRQ))
481551
482552 /* map softirq index to softirq name. update 'softirq_to_name' in
483553 * kernel/softirq.c when adding a new softirq.
....@@ -496,7 +566,7 @@
496566 asmlinkage void do_softirq(void);
497567 asmlinkage void __do_softirq(void);
498568
499
-#ifdef __ARCH_HAS_DO_SOFTIRQ
569
+#if defined(__ARCH_HAS_DO_SOFTIRQ) && !defined(CONFIG_PREEMPT_RT)
500570 void do_softirq_own_stack(void);
501571 #else
502572 static inline void do_softirq_own_stack(void)
....@@ -513,6 +583,7 @@
513583 extern void raise_softirq(unsigned int nr);
514584
515585 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
586
+DECLARE_PER_CPU(__u32, active_softirqs);
516587
517588 static inline struct task_struct *this_cpu_ksoftirqd(void)
518589 {
....@@ -520,6 +591,9 @@
520591 }
521592
522593 /* Tasklets --- multithreaded analogue of BHs.
594
+
595
+ This API is deprecated. Please consider using threaded IRQs instead:
596
+ https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de
523597
524598 Main feature differing them of generic softirqs: tasklet
525599 is running only on one CPU simultaneously.
....@@ -544,16 +618,42 @@
544618 struct tasklet_struct *next;
545619 unsigned long state;
546620 atomic_t count;
547
- void (*func)(unsigned long);
621
+ bool use_callback;
622
+ union {
623
+ void (*func)(unsigned long data);
624
+ void (*callback)(struct tasklet_struct *t);
625
+ };
548626 unsigned long data;
549627 };
550628
551
-#define DECLARE_TASKLET(name, func, data) \
552
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
629
+#define DECLARE_TASKLET(name, _callback) \
630
+struct tasklet_struct name = { \
631
+ .count = ATOMIC_INIT(0), \
632
+ .callback = _callback, \
633
+ .use_callback = true, \
634
+}
553635
554
-#define DECLARE_TASKLET_DISABLED(name, func, data) \
555
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
636
+#define DECLARE_TASKLET_DISABLED(name, _callback) \
637
+struct tasklet_struct name = { \
638
+ .count = ATOMIC_INIT(1), \
639
+ .callback = _callback, \
640
+ .use_callback = true, \
641
+}
556642
643
+#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
644
+ container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
645
+
646
+#define DECLARE_TASKLET_OLD(name, _func) \
647
+struct tasklet_struct name = { \
648
+ .count = ATOMIC_INIT(0), \
649
+ .func = _func, \
650
+}
651
+
652
+#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \
653
+struct tasklet_struct name = { \
654
+ .count = ATOMIC_INIT(1), \
655
+ .func = _func, \
656
+}
557657
558658 enum
559659 {
....@@ -561,26 +661,21 @@
561661 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
562662 };
563663
564
-#ifdef CONFIG_SMP
664
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
565665 static inline int tasklet_trylock(struct tasklet_struct *t)
566666 {
567667 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
568668 }
569669
570
-static inline void tasklet_unlock(struct tasklet_struct *t)
571
-{
572
- smp_mb__before_atomic();
573
- clear_bit(TASKLET_STATE_RUN, &(t)->state);
574
-}
670
+void tasklet_unlock(struct tasklet_struct *t);
671
+void tasklet_unlock_wait(struct tasklet_struct *t);
672
+void tasklet_unlock_spin_wait(struct tasklet_struct *t);
575673
576
-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
577
-{
578
- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
579
-}
580674 #else
581
-#define tasklet_trylock(t) 1
582
-#define tasklet_unlock_wait(t) do { } while (0)
583
-#define tasklet_unlock(t) do { } while (0)
675
+static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
676
+static inline void tasklet_unlock(struct tasklet_struct *t) { }
677
+static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
678
+static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
584679 #endif
585680
586681 extern void __tasklet_schedule(struct tasklet_struct *t);
....@@ -605,6 +700,17 @@
605700 smp_mb__after_atomic();
606701 }
607702
703
+/*
704
+ * Do not use in new code. Disabling tasklets from atomic contexts is
705
+ * error prone and should be avoided.
706
+ */
707
+static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
708
+{
709
+ tasklet_disable_nosync(t);
710
+ tasklet_unlock_spin_wait(t);
711
+ smp_mb();
712
+}
713
+
608714 static inline void tasklet_disable(struct tasklet_struct *t)
609715 {
610716 tasklet_disable_nosync(t);
....@@ -622,31 +728,8 @@
622728 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
623729 extern void tasklet_init(struct tasklet_struct *t,
624730 void (*func)(unsigned long), unsigned long data);
625
-
626
-struct tasklet_hrtimer {
627
- struct hrtimer timer;
628
- struct tasklet_struct tasklet;
629
- enum hrtimer_restart (*function)(struct hrtimer *);
630
-};
631
-
632
-extern void
633
-tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
634
- enum hrtimer_restart (*function)(struct hrtimer *),
635
- clockid_t which_clock, enum hrtimer_mode mode);
636
-
637
-static inline
638
-void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
639
- const enum hrtimer_mode mode)
640
-{
641
- hrtimer_start(&ttimer->timer, time, mode);
642
-}
643
-
644
-static inline
645
-void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
646
-{
647
- hrtimer_cancel(&ttimer->timer);
648
- tasklet_kill(&ttimer->tasklet);
649
-}
731
+extern void tasklet_setup(struct tasklet_struct *t,
732
+ void (*callback)(struct tasklet_struct *));
650733
651734 /*
652735 * Autoprobing for irqs:
....@@ -721,8 +804,10 @@
721804 /*
722805 * We want to know which function is an entrypoint of a hardirq or a softirq.
723806 */
724
-#define __irq_entry __attribute__((__section__(".irqentry.text")))
725
-#define __softirq_entry \
726
- __attribute__((__section__(".softirqentry.text")))
807
+#ifndef __irq_entry
808
+# define __irq_entry __section(".irqentry.text")
809
+#endif
810
+
811
+#define __softirq_entry __section(".softirqentry.text")
727812
728813 #endif