hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/include/linux/interrupt.h
....@@ -45,14 +45,14 @@
4545 * IRQF_PERCPU - Interrupt is per cpu
4646 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
4747 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
48
- * registered first in an shared interrupt is considered for
48
+ * registered first in a shared interrupt is considered for
4949 * performance reasons)
5050 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
5151 * Used by threaded interrupts which need to keep the
5252 * irq line disabled until the threaded handler has been run.
5353 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
5454 * that this interrupt will wake the system from a suspended
55
- * state. See Documentation/power/suspend-and-interrupts.txt
55
+ * state. See Documentation/power/suspend-and-interrupts.rst
5656 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
5757 * IRQF_NO_THREAD - Interrupt cannot be threaded
5858 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
....@@ -61,7 +61,6 @@
6161 * interrupt handler after suspending interrupts. For system
6262 * wakeup devices users need to implement wakeup detection in
6363 * their interrupt handlers.
64
- * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
6564 */
6665 #define IRQF_SHARED 0x00000080
6766 #define IRQF_PROBE_SHARED 0x00000100
....@@ -75,7 +74,6 @@
7574 #define IRQF_NO_THREAD 0x00010000
7675 #define IRQF_EARLY_RESUME 0x00020000
7776 #define IRQF_COND_SUSPEND 0x00040000
78
-#define IRQF_NO_SOFTIRQ_CALL 0x00080000
7977
8078 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
8179
....@@ -142,6 +140,19 @@
142140 irq_handler_t thread_fn,
143141 unsigned long flags, const char *name, void *dev);
144142
143
+/**
144
+ * request_irq - Add a handler for an interrupt line
145
+ * @irq: The interrupt line to allocate
146
+ * @handler: Function to be called when the IRQ occurs.
147
+ * Primary handler for threaded interrupts
148
+ * If NULL, the default primary handler is installed
149
+ * @flags: Handling flags
150
+ * @name: Name of the device generating this interrupt
151
+ * @dev: A cookie passed to the handler function
152
+ *
153
+ * This call allocates an interrupt and establishes a handler; see
154
+ * the documentation for request_threaded_irq() for details.
155
+ */
145156 static inline int __must_check
146157 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
147158 const char *name, void *dev)
....@@ -158,6 +169,10 @@
158169 unsigned long flags, const char *devname,
159170 void __percpu *percpu_dev_id);
160171
172
+extern int __must_check
173
+request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
174
+ const char *name, void *dev);
175
+
161176 static inline int __must_check
162177 request_percpu_irq(unsigned int irq, irq_handler_t handler,
163178 const char *devname, void __percpu *percpu_dev_id)
....@@ -166,8 +181,15 @@
166181 devname, percpu_dev_id);
167182 }
168183
184
+extern int __must_check
185
+request_percpu_nmi(unsigned int irq, irq_handler_t handler,
186
+ const char *devname, void __percpu *dev);
187
+
169188 extern const void *free_irq(unsigned int, void *);
170189 extern void free_percpu_irq(unsigned int, void __percpu *);
190
+
191
+extern const void *free_nmi(unsigned int irq, void *dev_id);
192
+extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
171193
172194 struct device;
173195
....@@ -219,9 +241,19 @@
219241 extern bool irq_percpu_is_enabled(unsigned int irq);
220242 extern void irq_wake_thread(unsigned int irq, void *dev_id);
221243
244
+extern void disable_nmi_nosync(unsigned int irq);
245
+extern void disable_percpu_nmi(unsigned int irq);
246
+extern void enable_nmi(unsigned int irq);
247
+extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
248
+extern int prepare_percpu_nmi(unsigned int irq);
249
+extern void teardown_percpu_nmi(unsigned int irq);
250
+
251
+extern int irq_inject_interrupt(unsigned int irq);
252
+
222253 /* The following three functions are for the core kernel use only. */
223254 extern void suspend_device_irqs(void);
224255 extern void resume_device_irqs(void);
256
+extern void rearm_wake_irq(unsigned int irq);
225257
226258 /**
227259 * struct irq_affinity_notify - context for notification of IRQ affinity changes
....@@ -243,16 +275,39 @@
243275 void (*release)(struct kref *ref);
244276 };
245277
278
+#define IRQ_AFFINITY_MAX_SETS 4
279
+
246280 /**
247281 * struct irq_affinity - Description for automatic irq affinity assignements
248282 * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
249283 * the MSI(-X) vector space
250284 * @post_vectors: Don't apply affinity to @post_vectors at end of
251285 * the MSI(-X) vector space
286
+ * @nr_sets: The number of interrupt sets for which affinity
287
+ * spreading is required
288
+ * @set_size: Array holding the size of each interrupt set
289
+ * @calc_sets: Callback for calculating the number and size
290
+ * of interrupt sets
291
+ * @priv: Private data for usage by @calc_sets, usually a
292
+ * pointer to driver/device specific data.
252293 */
253294 struct irq_affinity {
254
- int pre_vectors;
255
- int post_vectors;
295
+ unsigned int pre_vectors;
296
+ unsigned int post_vectors;
297
+ unsigned int nr_sets;
298
+ unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
299
+ void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
300
+ void *priv;
301
+};
302
+
303
+/**
304
+ * struct irq_affinity_desc - Interrupt affinity descriptor
305
+ * @mask: cpumask to hold the affinity assignment
306
+ * @is_managed: 1 if the interrupt is managed internally
307
+ */
308
+struct irq_affinity_desc {
309
+ struct cpumask mask;
310
+ unsigned int is_managed : 1;
256311 };
257312
258313 #if defined(CONFIG_SMP)
....@@ -301,8 +356,11 @@
301356 extern int
302357 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
303358
304
-struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd);
305
-int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd);
359
+struct irq_affinity_desc *
360
+irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
361
+
362
+unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
363
+ const struct irq_affinity *affd);
306364
307365 #else /* CONFIG_SMP */
308366
....@@ -335,14 +393,15 @@
335393 return 0;
336394 }
337395
338
-static inline struct cpumask *
339
-irq_create_affinity_masks(int nvec, const struct irq_affinity *affd)
396
+static inline struct irq_affinity_desc *
397
+irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
340398 {
341399 return NULL;
342400 }
343401
344
-static inline int
345
-irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd)
402
+static inline unsigned int
403
+irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
404
+ const struct irq_affinity *affd)
346405 {
347406 return maxvec;
348407 }
....@@ -429,7 +488,7 @@
429488 bool state);
430489
431490 #ifdef CONFIG_IRQ_FORCED_THREADING
432
-# ifdef CONFIG_PREEMPT_RT_BASE
491
+# ifdef CONFIG_PREEMPT_RT
433492 # define force_irqthreads (true)
434493 # else
435494 extern bool force_irqthreads;
....@@ -476,14 +535,19 @@
476535 IRQ_POLL_SOFTIRQ,
477536 TASKLET_SOFTIRQ,
478537 SCHED_SOFTIRQ,
479
- HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
480
- numbering. Sigh! */
538
+ HRTIMER_SOFTIRQ,
481539 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
482540
483541 NR_SOFTIRQS
484542 };
485543
486544 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
545
+/* Softirq's where the handling might be long: */
546
+#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ) | \
547
+ (1 << NET_RX_SOFTIRQ) | \
548
+ (1 << BLOCK_SOFTIRQ) | \
549
+ (1 << IRQ_POLL_SOFTIRQ) | \
550
+ (1 << TASKLET_SOFTIRQ))
487551
488552 /* map softirq index to softirq name. update 'softirq_to_name' in
489553 * kernel/softirq.c when adding a new softirq.
....@@ -499,10 +563,9 @@
499563 void (*action)(struct softirq_action *);
500564 };
501565
502
-#ifndef CONFIG_PREEMPT_RT_FULL
503566 asmlinkage void do_softirq(void);
504567 asmlinkage void __do_softirq(void);
505
-static inline void thread_do_softirq(void) { do_softirq(); }
568
+
506569 #ifdef __ARCH_HAS_DO_SOFTIRQ
507570 void do_softirq_own_stack(void);
508571 #else
....@@ -511,27 +574,16 @@
511574 __do_softirq();
512575 }
513576 #endif
514
-#else
515
-extern void thread_do_softirq(void);
516
-#endif
517577
518578 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
519579 extern void softirq_init(void);
520580 extern void __raise_softirq_irqoff(unsigned int nr);
521
-#ifdef CONFIG_PREEMPT_RT_FULL
522
-extern void __raise_softirq_irqoff_ksoft(unsigned int nr);
523
-#else
524
-static inline void __raise_softirq_irqoff_ksoft(unsigned int nr)
525
-{
526
- __raise_softirq_irqoff(nr);
527
-}
528
-#endif
529581
530582 extern void raise_softirq_irqoff(unsigned int nr);
531583 extern void raise_softirq(unsigned int nr);
532
-extern void softirq_check_pending_idle(void);
533584
534585 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
586
+DECLARE_PER_CPU(__u32, active_softirqs);
535587
536588 static inline struct task_struct *this_cpu_ksoftirqd(void)
537589 {
....@@ -539,6 +591,9 @@
539591 }
540592
541593 /* Tasklets --- multithreaded analogue of BHs.
594
+
595
+ This API is deprecated. Please consider using threaded IRQs instead:
596
+ https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de
542597
543598 Main feature differing them of generic softirqs: tasklet
544599 is running only on one CPU simultaneously.
....@@ -551,9 +606,8 @@
551606 to be executed on some cpu at least once after this.
552607 * If the tasklet is already scheduled, but its execution is still not
553608 started, it will be executed only once.
554
- * If this tasklet is already running on another CPU, it is rescheduled
555
- for later.
556
- * Schedule must not be called from the tasklet itself (a lockup occurs)
609
+ * If this tasklet is already running on another CPU (or schedule is called
610
+ from tasklet itself), it is rescheduled for later.
557611 * Tasklet is strictly serialized wrt itself, but not
558612 wrt another tasklets. If client needs some intertask synchronization,
559613 he makes it with spinlocks.
....@@ -564,40 +618,53 @@
564618 struct tasklet_struct *next;
565619 unsigned long state;
566620 atomic_t count;
567
- void (*func)(unsigned long);
621
+ bool use_callback;
622
+ union {
623
+ void (*func)(unsigned long data);
624
+ void (*callback)(struct tasklet_struct *t);
625
+ };
568626 unsigned long data;
569627 };
570628
571
-#define DECLARE_TASKLET(name, func, data) \
572
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
629
+#define DECLARE_TASKLET(name, _callback) \
630
+struct tasklet_struct name = { \
631
+ .count = ATOMIC_INIT(0), \
632
+ .callback = _callback, \
633
+ .use_callback = true, \
634
+}
573635
574
-#define DECLARE_TASKLET_DISABLED(name, func, data) \
575
-struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
636
+#define DECLARE_TASKLET_DISABLED(name, _callback) \
637
+struct tasklet_struct name = { \
638
+ .count = ATOMIC_INIT(1), \
639
+ .callback = _callback, \
640
+ .use_callback = true, \
641
+}
576642
643
+#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
644
+ container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
645
+
646
+#define DECLARE_TASKLET_OLD(name, _func) \
647
+struct tasklet_struct name = { \
648
+ .count = ATOMIC_INIT(0), \
649
+ .func = _func, \
650
+}
651
+
652
+#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \
653
+struct tasklet_struct name = { \
654
+ .count = ATOMIC_INIT(1), \
655
+ .func = _func, \
656
+}
577657
578658 enum
579659 {
580660 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
581
- TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
582
- TASKLET_STATE_PENDING, /* Tasklet is pending */
583
- TASKLET_STATE_CHAINED /* Tasklet is chained */
661
+ TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
584662 };
585663
586
-#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
587
-#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
588
-#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
589
-#define TASKLET_STATEF_CHAINED (1 << TASKLET_STATE_CHAINED)
590
-#define TASKLET_STATEF_RC (TASKLET_STATEF_RUN | TASKLET_STATEF_CHAINED)
591
-
592
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
664
+#ifdef CONFIG_SMP
593665 static inline int tasklet_trylock(struct tasklet_struct *t)
594666 {
595667 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
596
-}
597
-
598
-static inline int tasklet_tryunlock(struct tasklet_struct *t)
599
-{
600
- return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
601668 }
602669
603670 static inline void tasklet_unlock(struct tasklet_struct *t)
....@@ -606,11 +673,12 @@
606673 clear_bit(TASKLET_STATE_RUN, &(t)->state);
607674 }
608675
609
-extern void tasklet_unlock_wait(struct tasklet_struct *t);
610
-
676
+static inline void tasklet_unlock_wait(struct tasklet_struct *t)
677
+{
678
+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
679
+}
611680 #else
612681 #define tasklet_trylock(t) 1
613
-#define tasklet_tryunlock(t) 1
614682 #define tasklet_unlock_wait(t) do { } while (0)
615683 #define tasklet_unlock(t) do { } while (0)
616684 #endif
....@@ -644,42 +712,18 @@
644712 smp_mb();
645713 }
646714
647
-extern void tasklet_enable(struct tasklet_struct *t);
715
+static inline void tasklet_enable(struct tasklet_struct *t)
716
+{
717
+ smp_mb__before_atomic();
718
+ atomic_dec(&t->count);
719
+}
720
+
648721 extern void tasklet_kill(struct tasklet_struct *t);
649722 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
650723 extern void tasklet_init(struct tasklet_struct *t,
651724 void (*func)(unsigned long), unsigned long data);
652
-
653
-#ifdef CONFIG_PREEMPT_RT_FULL
654
-extern void softirq_early_init(void);
655
-#else
656
-static inline void softirq_early_init(void) { }
657
-#endif
658
-
659
-struct tasklet_hrtimer {
660
- struct hrtimer timer;
661
- struct tasklet_struct tasklet;
662
- enum hrtimer_restart (*function)(struct hrtimer *);
663
-};
664
-
665
-extern void
666
-tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
667
- enum hrtimer_restart (*function)(struct hrtimer *),
668
- clockid_t which_clock, enum hrtimer_mode mode);
669
-
670
-static inline
671
-void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
672
- const enum hrtimer_mode mode)
673
-{
674
- hrtimer_start(&ttimer->timer, time, mode);
675
-}
676
-
677
-static inline
678
-void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
679
-{
680
- hrtimer_cancel(&ttimer->timer);
681
- tasklet_kill(&ttimer->tasklet);
682
-}
725
+extern void tasklet_setup(struct tasklet_struct *t,
726
+ void (*callback)(struct tasklet_struct *));
683727
684728 /*
685729 * Autoprobing for irqs:
....@@ -754,8 +798,10 @@
754798 /*
755799 * We want to know which function is an entrypoint of a hardirq or a softirq.
756800 */
757
-#define __irq_entry __attribute__((__section__(".irqentry.text")))
758
-#define __softirq_entry \
759
- __attribute__((__section__(".softirqentry.text")))
801
+#ifndef __irq_entry
802
+# define __irq_entry __section(".irqentry.text")
803
+#endif
804
+
805
+#define __softirq_entry __section(".softirqentry.text")
760806
761807 #endif