.. | .. |
---|
45 | 45 | * IRQF_PERCPU - Interrupt is per cpu |
---|
46 | 46 | * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing |
---|
47 | 47 | * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is |
---|
48 | | - * registered first in an shared interrupt is considered for |
---|
| 48 | + * registered first in a shared interrupt is considered for |
---|
49 | 49 | * performance reasons) |
---|
50 | 50 | * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. |
---|
51 | 51 | * Used by threaded interrupts which need to keep the |
---|
52 | 52 | * irq line disabled until the threaded handler has been run. |
---|
53 | 53 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee |
---|
54 | 54 | * that this interrupt will wake the system from a suspended |
---|
55 | | - * state. See Documentation/power/suspend-and-interrupts.txt |
---|
| 55 | + * state. See Documentation/power/suspend-and-interrupts.rst |
---|
56 | 56 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set |
---|
57 | 57 | * IRQF_NO_THREAD - Interrupt cannot be threaded |
---|
58 | 58 | * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device |
---|
.. | .. |
---|
140 | 140 | irq_handler_t thread_fn, |
---|
141 | 141 | unsigned long flags, const char *name, void *dev); |
---|
142 | 142 | |
---|
| 143 | +/** |
---|
| 144 | + * request_irq - Add a handler for an interrupt line |
---|
| 145 | + * @irq: The interrupt line to allocate |
---|
| 146 | + * @handler: Function to be called when the IRQ occurs. |
---|
| 147 | + * Primary handler for threaded interrupts |
---|
| 148 | + * If NULL, the default primary handler is installed |
---|
| 149 | + * @flags: Handling flags |
---|
| 150 | + * @name: Name of the device generating this interrupt |
---|
| 151 | + * @dev: A cookie passed to the handler function |
---|
| 152 | + * |
---|
| 153 | + * This call allocates an interrupt and establishes a handler; see |
---|
| 154 | + * the documentation for request_threaded_irq() for details. |
---|
| 155 | + */ |
---|
143 | 156 | static inline int __must_check |
---|
144 | 157 | request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, |
---|
145 | 158 | const char *name, void *dev) |
---|
.. | .. |
---|
156 | 169 | unsigned long flags, const char *devname, |
---|
157 | 170 | void __percpu *percpu_dev_id); |
---|
158 | 171 | |
---|
| 172 | +extern int __must_check |
---|
| 173 | +request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags, |
---|
| 174 | + const char *name, void *dev); |
---|
| 175 | + |
---|
159 | 176 | static inline int __must_check |
---|
160 | 177 | request_percpu_irq(unsigned int irq, irq_handler_t handler, |
---|
161 | 178 | const char *devname, void __percpu *percpu_dev_id) |
---|
.. | .. |
---|
164 | 181 | devname, percpu_dev_id); |
---|
165 | 182 | } |
---|
166 | 183 | |
---|
| 184 | +extern int __must_check |
---|
| 185 | +request_percpu_nmi(unsigned int irq, irq_handler_t handler, |
---|
| 186 | + const char *devname, void __percpu *dev); |
---|
| 187 | + |
---|
167 | 188 | extern const void *free_irq(unsigned int, void *); |
---|
168 | 189 | extern void free_percpu_irq(unsigned int, void __percpu *); |
---|
| 190 | + |
---|
| 191 | +extern const void *free_nmi(unsigned int irq, void *dev_id); |
---|
| 192 | +extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id); |
---|
169 | 193 | |
---|
170 | 194 | struct device; |
---|
171 | 195 | |
---|
.. | .. |
---|
217 | 241 | extern bool irq_percpu_is_enabled(unsigned int irq); |
---|
218 | 242 | extern void irq_wake_thread(unsigned int irq, void *dev_id); |
---|
219 | 243 | |
---|
| 244 | +extern void disable_nmi_nosync(unsigned int irq); |
---|
| 245 | +extern void disable_percpu_nmi(unsigned int irq); |
---|
| 246 | +extern void enable_nmi(unsigned int irq); |
---|
| 247 | +extern void enable_percpu_nmi(unsigned int irq, unsigned int type); |
---|
| 248 | +extern int prepare_percpu_nmi(unsigned int irq); |
---|
| 249 | +extern void teardown_percpu_nmi(unsigned int irq); |
---|
| 250 | + |
---|
| 251 | +extern int irq_inject_interrupt(unsigned int irq); |
---|
| 252 | + |
---|
220 | 253 | /* The following three functions are for the core kernel use only. */ |
---|
221 | 254 | extern void suspend_device_irqs(void); |
---|
222 | 255 | extern void resume_device_irqs(void); |
---|
| 256 | +extern void rearm_wake_irq(unsigned int irq); |
---|
223 | 257 | |
---|
224 | 258 | /** |
---|
225 | 259 | * struct irq_affinity_notify - context for notification of IRQ affinity changes |
---|
.. | .. |
---|
241 | 275 | void (*release)(struct kref *ref); |
---|
242 | 276 | }; |
---|
243 | 277 | |
---|
| 278 | +#define IRQ_AFFINITY_MAX_SETS 4 |
---|
| 279 | + |
---|
244 | 280 | /** |
---|
245 | 281 | * struct irq_affinity - Description for automatic irq affinity assignements |
---|
246 | 282 | * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of |
---|
247 | 283 | * the MSI(-X) vector space |
---|
248 | 284 | * @post_vectors: Don't apply affinity to @post_vectors at end of |
---|
249 | 285 | * the MSI(-X) vector space |
---|
| 286 | + * @nr_sets: The number of interrupt sets for which affinity |
---|
| 287 | + * spreading is required |
---|
| 288 | + * @set_size: Array holding the size of each interrupt set |
---|
| 289 | + * @calc_sets: Callback for calculating the number and size |
---|
| 290 | + * of interrupt sets |
---|
| 291 | + * @priv: Private data for usage by @calc_sets, usually a |
---|
| 292 | + * pointer to driver/device specific data. |
---|
250 | 293 | */ |
---|
251 | 294 | struct irq_affinity { |
---|
252 | | - int pre_vectors; |
---|
253 | | - int post_vectors; |
---|
| 295 | + unsigned int pre_vectors; |
---|
| 296 | + unsigned int post_vectors; |
---|
| 297 | + unsigned int nr_sets; |
---|
| 298 | + unsigned int set_size[IRQ_AFFINITY_MAX_SETS]; |
---|
| 299 | + void (*calc_sets)(struct irq_affinity *, unsigned int nvecs); |
---|
| 300 | + void *priv; |
---|
| 301 | +}; |
---|
| 302 | + |
---|
| 303 | +/** |
---|
| 304 | + * struct irq_affinity_desc - Interrupt affinity descriptor |
---|
| 305 | + * @mask: cpumask to hold the affinity assignment |
---|
| 306 | + * @is_managed: 1 if the interrupt is managed internally |
---|
| 307 | + */ |
---|
| 308 | +struct irq_affinity_desc { |
---|
| 309 | + struct cpumask mask; |
---|
| 310 | + unsigned int is_managed : 1; |
---|
254 | 311 | }; |
---|
255 | 312 | |
---|
256 | 313 | #if defined(CONFIG_SMP) |
---|
.. | .. |
---|
299 | 356 | extern int |
---|
300 | 357 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); |
---|
301 | 358 | |
---|
302 | | -struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); |
---|
303 | | -int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd); |
---|
| 359 | +struct irq_affinity_desc * |
---|
| 360 | +irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd); |
---|
| 361 | + |
---|
| 362 | +unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, |
---|
| 363 | + const struct irq_affinity *affd); |
---|
304 | 364 | |
---|
305 | 365 | #else /* CONFIG_SMP */ |
---|
306 | 366 | |
---|
.. | .. |
---|
333 | 393 | return 0; |
---|
334 | 394 | } |
---|
335 | 395 | |
---|
336 | | -static inline struct cpumask * |
---|
337 | | -irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) |
---|
| 396 | +static inline struct irq_affinity_desc * |
---|
| 397 | +irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd) |
---|
338 | 398 | { |
---|
339 | 399 | return NULL; |
---|
340 | 400 | } |
---|
341 | 401 | |
---|
342 | | -static inline int |
---|
343 | | -irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) |
---|
| 402 | +static inline unsigned int |
---|
| 403 | +irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, |
---|
| 404 | + const struct irq_affinity *affd) |
---|
344 | 405 | { |
---|
345 | 406 | return maxvec; |
---|
346 | 407 | } |
---|
.. | .. |
---|
427 | 488 | bool state); |
---|
428 | 489 | |
---|
429 | 490 | #ifdef CONFIG_IRQ_FORCED_THREADING |
---|
| 491 | +# ifdef CONFIG_PREEMPT_RT |
---|
| 492 | +# define force_irqthreads (true) |
---|
| 493 | +# else |
---|
430 | 494 | extern bool force_irqthreads; |
---|
| 495 | +# endif |
---|
431 | 496 | #else |
---|
432 | 497 | #define force_irqthreads (0) |
---|
433 | 498 | #endif |
---|
.. | .. |
---|
470 | 535 | IRQ_POLL_SOFTIRQ, |
---|
471 | 536 | TASKLET_SOFTIRQ, |
---|
472 | 537 | SCHED_SOFTIRQ, |
---|
473 | | - HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the |
---|
474 | | - numbering. Sigh! */ |
---|
| 538 | + HRTIMER_SOFTIRQ, |
---|
475 | 539 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ |
---|
476 | 540 | |
---|
477 | 541 | NR_SOFTIRQS |
---|
478 | 542 | }; |
---|
479 | 543 | |
---|
480 | 544 | #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) |
---|
| 545 | +/* Softirq's where the handling might be long: */ |
---|
| 546 | +#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ) | \ |
---|
| 547 | + (1 << NET_RX_SOFTIRQ) | \ |
---|
| 548 | + (1 << BLOCK_SOFTIRQ) | \ |
---|
| 549 | + (1 << IRQ_POLL_SOFTIRQ) | \ |
---|
| 550 | + (1 << TASKLET_SOFTIRQ)) |
---|
481 | 551 | |
---|
482 | 552 | /* map softirq index to softirq name. update 'softirq_to_name' in |
---|
483 | 553 | * kernel/softirq.c when adding a new softirq. |
---|
.. | .. |
---|
513 | 583 | extern void raise_softirq(unsigned int nr); |
---|
514 | 584 | |
---|
515 | 585 | DECLARE_PER_CPU(struct task_struct *, ksoftirqd); |
---|
| 586 | +DECLARE_PER_CPU(__u32, active_softirqs); |
---|
516 | 587 | |
---|
517 | 588 | static inline struct task_struct *this_cpu_ksoftirqd(void) |
---|
518 | 589 | { |
---|
.. | .. |
---|
520 | 591 | } |
---|
521 | 592 | |
---|
522 | 593 | /* Tasklets --- multithreaded analogue of BHs. |
---|
| 594 | + |
---|
| 595 | + This API is deprecated. Please consider using threaded IRQs instead: |
---|
| 596 | + https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de |
---|
523 | 597 | |
---|
524 | 598 | Main feature differing them of generic softirqs: tasklet |
---|
525 | 599 | is running only on one CPU simultaneously. |
---|
.. | .. |
---|
544 | 618 | struct tasklet_struct *next; |
---|
545 | 619 | unsigned long state; |
---|
546 | 620 | atomic_t count; |
---|
547 | | - void (*func)(unsigned long); |
---|
| 621 | + bool use_callback; |
---|
| 622 | + union { |
---|
| 623 | + void (*func)(unsigned long data); |
---|
| 624 | + void (*callback)(struct tasklet_struct *t); |
---|
| 625 | + }; |
---|
548 | 626 | unsigned long data; |
---|
549 | 627 | }; |
---|
550 | 628 | |
---|
551 | | -#define DECLARE_TASKLET(name, func, data) \ |
---|
552 | | -struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } |
---|
| 629 | +#define DECLARE_TASKLET(name, _callback) \ |
---|
| 630 | +struct tasklet_struct name = { \ |
---|
| 631 | + .count = ATOMIC_INIT(0), \ |
---|
| 632 | + .callback = _callback, \ |
---|
| 633 | + .use_callback = true, \ |
---|
| 634 | +} |
---|
553 | 635 | |
---|
554 | | -#define DECLARE_TASKLET_DISABLED(name, func, data) \ |
---|
555 | | -struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } |
---|
| 636 | +#define DECLARE_TASKLET_DISABLED(name, _callback) \ |
---|
| 637 | +struct tasklet_struct name = { \ |
---|
| 638 | + .count = ATOMIC_INIT(1), \ |
---|
| 639 | + .callback = _callback, \ |
---|
| 640 | + .use_callback = true, \ |
---|
| 641 | +} |
---|
556 | 642 | |
---|
| 643 | +#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \ |
---|
| 644 | + container_of(callback_tasklet, typeof(*var), tasklet_fieldname) |
---|
| 645 | + |
---|
| 646 | +#define DECLARE_TASKLET_OLD(name, _func) \ |
---|
| 647 | +struct tasklet_struct name = { \ |
---|
| 648 | + .count = ATOMIC_INIT(0), \ |
---|
| 649 | + .func = _func, \ |
---|
| 650 | +} |
---|
| 651 | + |
---|
| 652 | +#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \ |
---|
| 653 | +struct tasklet_struct name = { \ |
---|
| 654 | + .count = ATOMIC_INIT(1), \ |
---|
| 655 | + .func = _func, \ |
---|
| 656 | +} |
---|
557 | 657 | |
---|
558 | 658 | enum |
---|
559 | 659 | { |
---|
.. | .. |
---|
622 | 722 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); |
---|
623 | 723 | extern void tasklet_init(struct tasklet_struct *t, |
---|
624 | 724 | void (*func)(unsigned long), unsigned long data); |
---|
625 | | - |
---|
626 | | -struct tasklet_hrtimer { |
---|
627 | | - struct hrtimer timer; |
---|
628 | | - struct tasklet_struct tasklet; |
---|
629 | | - enum hrtimer_restart (*function)(struct hrtimer *); |
---|
630 | | -}; |
---|
631 | | - |
---|
632 | | -extern void |
---|
633 | | -tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, |
---|
634 | | - enum hrtimer_restart (*function)(struct hrtimer *), |
---|
635 | | - clockid_t which_clock, enum hrtimer_mode mode); |
---|
636 | | - |
---|
637 | | -static inline |
---|
638 | | -void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, |
---|
639 | | - const enum hrtimer_mode mode) |
---|
640 | | -{ |
---|
641 | | - hrtimer_start(&ttimer->timer, time, mode); |
---|
642 | | -} |
---|
643 | | - |
---|
644 | | -static inline |
---|
645 | | -void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) |
---|
646 | | -{ |
---|
647 | | - hrtimer_cancel(&ttimer->timer); |
---|
648 | | - tasklet_kill(&ttimer->tasklet); |
---|
649 | | -} |
---|
| 725 | +extern void tasklet_setup(struct tasklet_struct *t, |
---|
| 726 | + void (*callback)(struct tasklet_struct *)); |
---|
650 | 727 | |
---|
651 | 728 | /* |
---|
652 | 729 | * Autoprobing for irqs: |
---|
.. | .. |
---|
721 | 798 | /* |
---|
722 | 799 | * We want to know which function is an entrypoint of a hardirq or a softirq. |
---|
723 | 800 | */ |
---|
724 | | -#define __irq_entry __attribute__((__section__(".irqentry.text"))) |
---|
725 | | -#define __softirq_entry \ |
---|
726 | | - __attribute__((__section__(".softirqentry.text"))) |
---|
| 801 | +#ifndef __irq_entry |
---|
| 802 | +# define __irq_entry __section(".irqentry.text") |
---|
| 803 | +#endif |
---|
| 804 | + |
---|
| 805 | +#define __softirq_entry __section(".softirqentry.text") |
---|
727 | 806 | |
---|
728 | 807 | #endif |
---|