hc
2024-05-14 bedbef8ad3e75a304af6361af235302bcc61d06b
kernel/include/linux/interrupt.h
....@@ -45,14 +45,14 @@
4545 * IRQF_PERCPU - Interrupt is per cpu
4646 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
4747 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
48
- * registered first in an shared interrupt is considered for
48
+ * registered first in a shared interrupt is considered for
4949 * performance reasons)
5050 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
5151 * Used by threaded interrupts which need to keep the
5252 * irq line disabled until the threaded handler has been run.
5353 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
5454 * that this interrupt will wake the system from a suspended
55
- * state. See Documentation/power/suspend-and-interrupts.txt
55
+ * state. See Documentation/power/suspend-and-interrupts.rst
5656 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
5757 * IRQF_NO_THREAD - Interrupt cannot be threaded
5858 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
....@@ -61,7 +61,9 @@
6161 * interrupt handler after suspending interrupts. For system
6262 * wakeup devices users need to implement wakeup detection in
6363 * their interrupt handlers.
64
- * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
64
+ * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it.
65
+ * Users will enable it explicitly by enable_irq() or enable_nmi()
66
+ * later.
6567 */
6668 #define IRQF_SHARED 0x00000080
6769 #define IRQF_PROBE_SHARED 0x00000100
....@@ -75,7 +77,7 @@
7577 #define IRQF_NO_THREAD 0x00010000
7678 #define IRQF_EARLY_RESUME 0x00020000
7779 #define IRQF_COND_SUSPEND 0x00040000
78
-#define IRQF_NO_SOFTIRQ_CALL 0x00080000
80
+#define IRQF_NO_AUTOEN 0x00080000
7981
8082 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
8183
....@@ -142,6 +144,19 @@
142144 irq_handler_t thread_fn,
143145 unsigned long flags, const char *name, void *dev);
144146
147
+/**
148
+ * request_irq - Add a handler for an interrupt line
149
+ * @irq: The interrupt line to allocate
150
+ * @handler: Function to be called when the IRQ occurs.
151
+ * Primary handler for threaded interrupts
152
+ * If NULL, the default primary handler is installed
153
+ * @flags: Handling flags
154
+ * @name: Name of the device generating this interrupt
155
+ * @dev: A cookie passed to the handler function
156
+ *
157
+ * This call allocates an interrupt and establishes a handler; see
158
+ * the documentation for request_threaded_irq() for details.
159
+ */
145160 static inline int __must_check
146161 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
147162 const char *name, void *dev)
....@@ -158,6 +173,10 @@
158173 unsigned long flags, const char *devname,
159174 void __percpu *percpu_dev_id);
160175
176
+extern int __must_check
177
+request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
178
+ const char *name, void *dev);
179
+
161180 static inline int __must_check
162181 request_percpu_irq(unsigned int irq, irq_handler_t handler,
163182 const char *devname, void __percpu *percpu_dev_id)
....@@ -166,8 +185,15 @@
166185 devname, percpu_dev_id);
167186 }
168187
188
+extern int __must_check
189
+request_percpu_nmi(unsigned int irq, irq_handler_t handler,
190
+ const char *devname, void __percpu *dev);
191
+
169192 extern const void *free_irq(unsigned int, void *);
170193 extern void free_percpu_irq(unsigned int, void __percpu *);
194
+
195
+extern const void *free_nmi(unsigned int irq, void *dev_id);
196
+extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
171197
172198 struct device;
173199
....@@ -219,9 +245,19 @@
219245 extern bool irq_percpu_is_enabled(unsigned int irq);
220246 extern void irq_wake_thread(unsigned int irq, void *dev_id);
221247
248
+extern void disable_nmi_nosync(unsigned int irq);
249
+extern void disable_percpu_nmi(unsigned int irq);
250
+extern void enable_nmi(unsigned int irq);
251
+extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
252
+extern int prepare_percpu_nmi(unsigned int irq);
253
+extern void teardown_percpu_nmi(unsigned int irq);
254
+
255
+extern int irq_inject_interrupt(unsigned int irq);
256
+
222257 /* The following three functions are for the core kernel use only. */
223258 extern void suspend_device_irqs(void);
224259 extern void resume_device_irqs(void);
260
+extern void rearm_wake_irq(unsigned int irq);
225261
226262 /**
227263 * struct irq_affinity_notify - context for notification of IRQ affinity changes
....@@ -243,16 +279,39 @@
243279 void (*release)(struct kref *ref);
244280 };
245281
282
+#define IRQ_AFFINITY_MAX_SETS 4
283
+
246284 /**
247285 * struct irq_affinity - Description for automatic irq affinity assignements
248286 * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
249287 * the MSI(-X) vector space
250288 * @post_vectors: Don't apply affinity to @post_vectors at end of
251289 * the MSI(-X) vector space
290
+ * @nr_sets: The number of interrupt sets for which affinity
291
+ * spreading is required
292
+ * @set_size: Array holding the size of each interrupt set
293
+ * @calc_sets: Callback for calculating the number and size
294
+ * of interrupt sets
295
+ * @priv: Private data for usage by @calc_sets, usually a
296
+ * pointer to driver/device specific data.
252297 */
253298 struct irq_affinity {
254
- int pre_vectors;
255
- int post_vectors;
299
+ unsigned int pre_vectors;
300
+ unsigned int post_vectors;
301
+ unsigned int nr_sets;
302
+ unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
303
+ void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
304
+ void *priv;
305
+};
306
+
307
+/**
308
+ * struct irq_affinity_desc - Interrupt affinity descriptor
309
+ * @mask: cpumask to hold the affinity assignment
310
+ * @is_managed: 1 if the interrupt is managed internally
311
+ */
312
+struct irq_affinity_desc {
313
+ struct cpumask mask;
314
+ unsigned int is_managed : 1;
256315 };
257316
258317 #if defined(CONFIG_SMP)
....@@ -301,8 +360,11 @@
301360 extern int
302361 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
303362
304
-struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
305
-int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd);
363
+struct irq_affinity_desc *
364
+irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
365
+
366
+unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
367
+ const struct irq_affinity *affd);
306368
307369 #else /* CONFIG_SMP */
308370
....@@ -335,14 +397,15 @@
335397 return 0;
336398 }
337399
338
-static inline struct cpumask *
339
-irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
400
+static inline struct irq_affinity_desc *
401
+irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
340402 {
341403 return NULL;
342404 }
343405
344
-static inline int
345
-irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd)
406
+static inline unsigned int
407
+irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
408
+ const struct irq_affinity *affd)
346409 {
347410 return maxvec;
348411 }
....@@ -429,7 +492,7 @@
429492 bool state);
430493
431494 #ifdef CONFIG_IRQ_FORCED_THREADING
432
-# ifdef CONFIG_PREEMPT_RT_BASE
495
+# ifdef CONFIG_PREEMPT_RT
433496 # define force_irqthreads (true)
434497 # else
435498 extern bool force_irqthreads;
....@@ -476,14 +539,19 @@
476539 IRQ_POLL_SOFTIRQ,
477540 TASKLET_SOFTIRQ,
478541 SCHED_SOFTIRQ,
479
- HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
480
- numbering. Sigh! */
542
+ HRTIMER_SOFTIRQ,
481543 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
482544
483545 NR_SOFTIRQS
484546 };
485547
486548 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
549
+/* Softirq's where the handling might be long: */
550
+#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ) | \
551
+ (1 << NET_RX_SOFTIRQ) | \
552
+ (1 << BLOCK_SOFTIRQ) | \
553
+ (1 << IRQ_POLL_SOFTIRQ) | \
554
+ (1 << TASKLET_SOFTIRQ))
487555
488556 /* map softirq index to softirq name. update 'softirq_to_name' in
489557 * kernel/softirq.c when adding a new softirq.
....@@ -499,10 +567,9 @@
499567 void (*action)(struct softirq_action *);
500568 };
501569
502
-#ifndef CONFIG_PREEMPT_RT_FULL
503570 asmlinkage void do_softirq(void);
504571 asmlinkage void __do_softirq(void);
505
-static inline void thread_do_softirq(void) { do_softirq(); }
572
+
506573 #ifdef __ARCH_HAS_DO_SOFTIRQ
507574 void do_softirq_own_stack(void);
508575 #else
....@@ -511,27 +578,16 @@
511578 __do_softirq();
512579 }
513580 #endif
514
-#else
515
-extern void thread_do_softirq(void);
516
-#endif
517581
518582 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
519583 extern void softirq_init(void);
520584 extern void __raise_softirq_irqoff(unsigned int nr);
521
-#ifdef CONFIG_PREEMPT_RT_FULL
522
-extern void __raise_softirq_irqoff_ksoft(unsigned int nr);
523
-#else
524
-static inline void __raise_softirq_irqoff_ksoft(unsigned int nr)
525
-{
526
- __raise_softirq_irqoff(nr);
527
-}
528
-#endif
529585
530586 extern void raise_softirq_irqoff(unsigned int nr);
531587 extern void raise_softirq(unsigned int nr);
532
-extern void softirq_check_pending_idle(void);
533588
534589 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
590
+DECLARE_PER_CPU(__u32, active_softirqs);
535591
536592 static inline struct task_struct *this_cpu_ksoftirqd(void)
537593 {
....@@ -539,6 +595,9 @@
539595 }
540596
541597 /* Tasklets --- multithreaded analogue of BHs.
598
+
599
+ This API is deprecated. Please consider using threaded IRQs instead:
600
+ https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de
542601
543602 Main feature differing them of generic softirqs: tasklet
544603 is running only on one CPU simultaneously.
....@@ -551,9 +610,8 @@
551610 to be executed on some cpu at least once after this.
552611 * If the tasklet is already scheduled, but its execution is still not
553612 started, it will be executed only once.
554
- * If this tasklet is already running on another CPU, it is rescheduled
555
- for later.
556
- * Schedule must not be called from the tasklet itself (a lockup occurs)
613
+ * If this tasklet is already running on another CPU (or schedule is called
614
+ from tasklet itself), it is rescheduled for later.
557615 * Tasklet is strictly serialized wrt itself, but not
558616 wrt another tasklets. If client needs some intertask synchronization,
559617 he makes it with spinlocks.
....@@ -564,40 +622,53 @@
564622 struct tasklet_struct *next;
565623 unsigned long state;
566624 atomic_t count;
567
- void (*func)(unsigned long);
625
+ bool use_callback;
626
+ union {
627
+ void (*func)(unsigned long data);
628
+ void (*callback)(struct tasklet_struct *t);
629
+ };
568630 unsigned long data;
569631 };
570632
571
-#define DECLARE_TASKLET(name, func, data) \
572
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
633
+#define DECLARE_TASKLET(name, _callback) \
634
+struct tasklet_struct name = { \
635
+ .count = ATOMIC_INIT(0), \
636
+ .callback = _callback, \
637
+ .use_callback = true, \
638
+}
573639
574
-#define DECLARE_TASKLET_DISABLED(name, func, data) \
575
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
640
+#define DECLARE_TASKLET_DISABLED(name, _callback) \
641
+struct tasklet_struct name = { \
642
+ .count = ATOMIC_INIT(1), \
643
+ .callback = _callback, \
644
+ .use_callback = true, \
645
+}
576646
647
+#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
648
+ container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
649
+
650
+#define DECLARE_TASKLET_OLD(name, _func) \
651
+struct tasklet_struct name = { \
652
+ .count = ATOMIC_INIT(0), \
653
+ .func = _func, \
654
+}
655
+
656
+#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \
657
+struct tasklet_struct name = { \
658
+ .count = ATOMIC_INIT(1), \
659
+ .func = _func, \
660
+}
577661
578662 enum
579663 {
580664 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
581
- TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
582
- TASKLET_STATE_PENDING, /* Tasklet is pending */
583
- TASKLET_STATE_CHAINED /* Tasklet is chained */
665
+ TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
584666 };
585667
586
-#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
587
-#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
588
-#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
589
-#define TASKLET_STATEF_CHAINED (1 << TASKLET_STATE_CHAINED)
590
-#define TASKLET_STATEF_RC (TASKLET_STATEF_RUN | TASKLET_STATEF_CHAINED)
591
-
592
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
668
+#ifdef CONFIG_SMP
593669 static inline int tasklet_trylock(struct tasklet_struct *t)
594670 {
595671 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
596
-}
597
-
598
-static inline int tasklet_tryunlock(struct tasklet_struct *t)
599
-{
600
- return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
601672 }
602673
603674 static inline void tasklet_unlock(struct tasklet_struct *t)
....@@ -606,11 +677,12 @@
606677 clear_bit(TASKLET_STATE_RUN, &(t)->state);
607678 }
608679
609
-extern void tasklet_unlock_wait(struct tasklet_struct *t);
610
-
680
+static inline void tasklet_unlock_wait(struct tasklet_struct *t)
681
+{
682
+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
683
+}
611684 #else
612685 #define tasklet_trylock(t) 1
613
-#define tasklet_tryunlock(t) 1
614686 #define tasklet_unlock_wait(t) do { } while (0)
615687 #define tasklet_unlock(t) do { } while (0)
616688 #endif
....@@ -644,42 +716,18 @@
644716 smp_mb();
645717 }
646718
647
-extern void tasklet_enable(struct tasklet_struct *t);
719
+static inline void tasklet_enable(struct tasklet_struct *t)
720
+{
721
+ smp_mb__before_atomic();
722
+ atomic_dec(&t->count);
723
+}
724
+
648725 extern void tasklet_kill(struct tasklet_struct *t);
649726 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
650727 extern void tasklet_init(struct tasklet_struct *t,
651728 void (*func)(unsigned long), unsigned long data);
652
-
653
-#ifdef CONFIG_PREEMPT_RT_FULL
654
-extern void softirq_early_init(void);
655
-#else
656
-static inline void softirq_early_init(void) { }
657
-#endif
658
-
659
-struct tasklet_hrtimer {
660
- struct hrtimer timer;
661
- struct tasklet_struct tasklet;
662
- enum hrtimer_restart (*function)(struct hrtimer *);
663
-};
664
-
665
-extern void
666
-tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
667
- enum hrtimer_restart (*function)(struct hrtimer *),
668
- clockid_t which_clock, enum hrtimer_mode mode);
669
-
670
-static inline
671
-void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
672
- const enum hrtimer_mode mode)
673
-{
674
- hrtimer_start(&ttimer->timer, time, mode);
675
-}
676
-
677
-static inline
678
-void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
679
-{
680
- hrtimer_cancel(&ttimer->timer);
681
- tasklet_kill(&ttimer->tasklet);
682
-}
729
+extern void tasklet_setup(struct tasklet_struct *t,
730
+ void (*callback)(struct tasklet_struct *));
683731
684732 /*
685733 * Autoprobing for irqs:
....@@ -754,8 +802,10 @@
754802 /*
755803 * We want to know which function is an entrypoint of a hardirq or a softirq.
756804 */
757
-#define __irq_entry __attribute__((__section__(".irqentry.text")))
758
-#define __softirq_entry \
759
- __attribute__((__section__(".softirqentry.text")))
805
+#ifndef __irq_entry
806
+# define __irq_entry __section(".irqentry.text")
807
+#endif
808
+
809
+#define __softirq_entry __section(".softirqentry.text")
760810
761811 #endif