.. | .. |
---|
45 | 45 | * IRQF_PERCPU - Interrupt is per cpu |
---|
46 | 46 | * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing |
---|
47 | 47 | * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is |
---|
48 | | - * registered first in an shared interrupt is considered for |
---|
| 48 | + * registered first in a shared interrupt is considered for |
---|
49 | 49 | * performance reasons) |
---|
50 | 50 | * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. |
---|
51 | 51 | * Used by threaded interrupts which need to keep the |
---|
52 | 52 | * irq line disabled until the threaded handler has been run. |
---|
53 | 53 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee |
---|
54 | 54 | * that this interrupt will wake the system from a suspended |
---|
55 | | - * state. See Documentation/power/suspend-and-interrupts.txt |
---|
| 55 | + * state. See Documentation/power/suspend-and-interrupts.rst |
---|
56 | 56 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set |
---|
57 | 57 | * IRQF_NO_THREAD - Interrupt cannot be threaded |
---|
58 | 58 | * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device |
---|
.. | .. |
---|
61 | 61 | * interrupt handler after suspending interrupts. For system |
---|
62 | 62 | * wakeup devices users need to implement wakeup detection in |
---|
63 | 63 | * their interrupt handlers. |
---|
64 | | - * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT) |
---|
65 | 64 | */ |
---|
66 | 65 | #define IRQF_SHARED 0x00000080 |
---|
67 | 66 | #define IRQF_PROBE_SHARED 0x00000100 |
---|
.. | .. |
---|
75 | 74 | #define IRQF_NO_THREAD 0x00010000 |
---|
76 | 75 | #define IRQF_EARLY_RESUME 0x00020000 |
---|
77 | 76 | #define IRQF_COND_SUSPEND 0x00040000 |
---|
78 | | -#define IRQF_NO_SOFTIRQ_CALL 0x00080000 |
---|
79 | 77 | |
---|
80 | 78 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) |
---|
81 | 79 | |
---|
.. | .. |
---|
142 | 140 | irq_handler_t thread_fn, |
---|
143 | 141 | unsigned long flags, const char *name, void *dev); |
---|
144 | 142 | |
---|
| 143 | +/** |
---|
| 144 | + * request_irq - Add a handler for an interrupt line |
---|
| 145 | + * @irq: The interrupt line to allocate |
---|
| 146 | + * @handler: Function to be called when the IRQ occurs. |
---|
| 147 | + * Primary handler for threaded interrupts |
---|
| 148 | + * If NULL, the default primary handler is installed |
---|
| 149 | + * @flags: Handling flags |
---|
| 150 | + * @name: Name of the device generating this interrupt |
---|
| 151 | + * @dev: A cookie passed to the handler function |
---|
| 152 | + * |
---|
| 153 | + * This call allocates an interrupt and establishes a handler; see |
---|
| 154 | + * the documentation for request_threaded_irq() for details. |
---|
| 155 | + */ |
---|
145 | 156 | static inline int __must_check |
---|
146 | 157 | request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, |
---|
147 | 158 | const char *name, void *dev) |
---|
.. | .. |
---|
158 | 169 | unsigned long flags, const char *devname, |
---|
159 | 170 | void __percpu *percpu_dev_id); |
---|
160 | 171 | |
---|
| 172 | +extern int __must_check |
---|
| 173 | +request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags, |
---|
| 174 | + const char *name, void *dev); |
---|
| 175 | + |
---|
161 | 176 | static inline int __must_check |
---|
162 | 177 | request_percpu_irq(unsigned int irq, irq_handler_t handler, |
---|
163 | 178 | const char *devname, void __percpu *percpu_dev_id) |
---|
.. | .. |
---|
166 | 181 | devname, percpu_dev_id); |
---|
167 | 182 | } |
---|
168 | 183 | |
---|
| 184 | +extern int __must_check |
---|
| 185 | +request_percpu_nmi(unsigned int irq, irq_handler_t handler, |
---|
| 186 | + const char *devname, void __percpu *dev); |
---|
| 187 | + |
---|
169 | 188 | extern const void *free_irq(unsigned int, void *); |
---|
170 | 189 | extern void free_percpu_irq(unsigned int, void __percpu *); |
---|
| 190 | + |
---|
| 191 | +extern const void *free_nmi(unsigned int irq, void *dev_id); |
---|
| 192 | +extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id); |
---|
171 | 193 | |
---|
172 | 194 | struct device; |
---|
173 | 195 | |
---|
.. | .. |
---|
219 | 241 | extern bool irq_percpu_is_enabled(unsigned int irq); |
---|
220 | 242 | extern void irq_wake_thread(unsigned int irq, void *dev_id); |
---|
221 | 243 | |
---|
| 244 | +extern void disable_nmi_nosync(unsigned int irq); |
---|
| 245 | +extern void disable_percpu_nmi(unsigned int irq); |
---|
| 246 | +extern void enable_nmi(unsigned int irq); |
---|
| 247 | +extern void enable_percpu_nmi(unsigned int irq, unsigned int type); |
---|
| 248 | +extern int prepare_percpu_nmi(unsigned int irq); |
---|
| 249 | +extern void teardown_percpu_nmi(unsigned int irq); |
---|
| 250 | + |
---|
| 251 | +extern int irq_inject_interrupt(unsigned int irq); |
---|
| 252 | + |
---|
222 | 253 | /* The following three functions are for the core kernel use only. */ |
---|
223 | 254 | extern void suspend_device_irqs(void); |
---|
224 | 255 | extern void resume_device_irqs(void); |
---|
| 256 | +extern void rearm_wake_irq(unsigned int irq); |
---|
225 | 257 | |
---|
226 | 258 | /** |
---|
227 | 259 | * struct irq_affinity_notify - context for notification of IRQ affinity changes |
---|
.. | .. |
---|
243 | 275 | void (*release)(struct kref *ref); |
---|
244 | 276 | }; |
---|
245 | 277 | |
---|
| 278 | +#define IRQ_AFFINITY_MAX_SETS 4 |
---|
| 279 | + |
---|
246 | 280 | /** |
---|
247 | 281 | * struct irq_affinity - Description for automatic irq affinity assignements |
---|
248 | 282 | * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of |
---|
249 | 283 | * the MSI(-X) vector space |
---|
250 | 284 | * @post_vectors: Don't apply affinity to @post_vectors at end of |
---|
251 | 285 | * the MSI(-X) vector space |
---|
| 286 | + * @nr_sets: The number of interrupt sets for which affinity |
---|
| 287 | + * spreading is required |
---|
| 288 | + * @set_size: Array holding the size of each interrupt set |
---|
| 289 | + * @calc_sets: Callback for calculating the number and size |
---|
| 290 | + * of interrupt sets |
---|
| 291 | + * @priv: Private data for usage by @calc_sets, usually a |
---|
| 292 | + * pointer to driver/device specific data. |
---|
252 | 293 | */ |
---|
253 | 294 | struct irq_affinity { |
---|
254 | | - int pre_vectors; |
---|
255 | | - int post_vectors; |
---|
| 295 | + unsigned int pre_vectors; |
---|
| 296 | + unsigned int post_vectors; |
---|
| 297 | + unsigned int nr_sets; |
---|
| 298 | + unsigned int set_size[IRQ_AFFINITY_MAX_SETS]; |
---|
| 299 | + void (*calc_sets)(struct irq_affinity *, unsigned int nvecs); |
---|
| 300 | + void *priv; |
---|
| 301 | +}; |
---|
| 302 | + |
---|
| 303 | +/** |
---|
| 304 | + * struct irq_affinity_desc - Interrupt affinity descriptor |
---|
| 305 | + * @mask: cpumask to hold the affinity assignment |
---|
| 306 | + * @is_managed: 1 if the interrupt is managed internally |
---|
| 307 | + */ |
---|
| 308 | +struct irq_affinity_desc { |
---|
| 309 | + struct cpumask mask; |
---|
| 310 | + unsigned int is_managed : 1; |
---|
256 | 311 | }; |
---|
257 | 312 | |
---|
258 | 313 | #if defined(CONFIG_SMP) |
---|
.. | .. |
---|
301 | 356 | extern int |
---|
302 | 357 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); |
---|
303 | 358 | |
---|
304 | | -struct cpumask *irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); |
---|
305 | | -int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd); |
---|
| 359 | +struct irq_affinity_desc * |
---|
| 360 | +irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd); |
---|
| 361 | + |
---|
| 362 | +unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, |
---|
| 363 | + const struct irq_affinity *affd); |
---|
306 | 364 | |
---|
307 | 365 | #else /* CONFIG_SMP */ |
---|
308 | 366 | |
---|
.. | .. |
---|
335 | 393 | return 0; |
---|
336 | 394 | } |
---|
337 | 395 | |
---|
338 | | -static inline struct cpumask * |
---|
339 | | -irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) |
---|
| 396 | +static inline struct irq_affinity_desc * |
---|
| 397 | +irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd) |
---|
340 | 398 | { |
---|
341 | 399 | return NULL; |
---|
342 | 400 | } |
---|
343 | 401 | |
---|
344 | | -static inline int |
---|
345 | | -irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) |
---|
| 402 | +static inline unsigned int |
---|
| 403 | +irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, |
---|
| 404 | + const struct irq_affinity *affd) |
---|
346 | 405 | { |
---|
347 | 406 | return maxvec; |
---|
348 | 407 | } |
---|
.. | .. |
---|
429 | 488 | bool state); |
---|
430 | 489 | |
---|
431 | 490 | #ifdef CONFIG_IRQ_FORCED_THREADING |
---|
432 | | -# ifdef CONFIG_PREEMPT_RT_BASE |
---|
| 491 | +# ifdef CONFIG_PREEMPT_RT |
---|
433 | 492 | # define force_irqthreads (true) |
---|
434 | 493 | # else |
---|
435 | 494 | extern bool force_irqthreads; |
---|
.. | .. |
---|
476 | 535 | IRQ_POLL_SOFTIRQ, |
---|
477 | 536 | TASKLET_SOFTIRQ, |
---|
478 | 537 | SCHED_SOFTIRQ, |
---|
479 | | - HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the |
---|
480 | | - numbering. Sigh! */ |
---|
| 538 | + HRTIMER_SOFTIRQ, |
---|
481 | 539 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ |
---|
482 | 540 | |
---|
483 | 541 | NR_SOFTIRQS |
---|
484 | 542 | }; |
---|
485 | 543 | |
---|
486 | 544 | #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) |
---|
| 545 | +/* Softirq's where the handling might be long: */ |
---|
| 546 | +#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ) | \ |
---|
| 547 | + (1 << NET_RX_SOFTIRQ) | \ |
---|
| 548 | + (1 << BLOCK_SOFTIRQ) | \ |
---|
| 549 | + (1 << IRQ_POLL_SOFTIRQ) | \ |
---|
| 550 | + (1 << TASKLET_SOFTIRQ)) |
---|
487 | 551 | |
---|
488 | 552 | /* map softirq index to softirq name. update 'softirq_to_name' in |
---|
489 | 553 | * kernel/softirq.c when adding a new softirq. |
---|
.. | .. |
---|
499 | 563 | void (*action)(struct softirq_action *); |
---|
500 | 564 | }; |
---|
501 | 565 | |
---|
502 | | -#ifndef CONFIG_PREEMPT_RT_FULL |
---|
503 | 566 | asmlinkage void do_softirq(void); |
---|
504 | 567 | asmlinkage void __do_softirq(void); |
---|
505 | | -static inline void thread_do_softirq(void) { do_softirq(); } |
---|
| 568 | + |
---|
506 | 569 | #ifdef __ARCH_HAS_DO_SOFTIRQ |
---|
507 | 570 | void do_softirq_own_stack(void); |
---|
508 | 571 | #else |
---|
.. | .. |
---|
511 | 574 | __do_softirq(); |
---|
512 | 575 | } |
---|
513 | 576 | #endif |
---|
514 | | -#else |
---|
515 | | -extern void thread_do_softirq(void); |
---|
516 | | -#endif |
---|
517 | 577 | |
---|
518 | 578 | extern void open_softirq(int nr, void (*action)(struct softirq_action *)); |
---|
519 | 579 | extern void softirq_init(void); |
---|
520 | 580 | extern void __raise_softirq_irqoff(unsigned int nr); |
---|
521 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
522 | | -extern void __raise_softirq_irqoff_ksoft(unsigned int nr); |
---|
523 | | -#else |
---|
524 | | -static inline void __raise_softirq_irqoff_ksoft(unsigned int nr) |
---|
525 | | -{ |
---|
526 | | - __raise_softirq_irqoff(nr); |
---|
527 | | -} |
---|
528 | | -#endif |
---|
529 | 581 | |
---|
530 | 582 | extern void raise_softirq_irqoff(unsigned int nr); |
---|
531 | 583 | extern void raise_softirq(unsigned int nr); |
---|
532 | | -extern void softirq_check_pending_idle(void); |
---|
533 | 584 | |
---|
534 | 585 | DECLARE_PER_CPU(struct task_struct *, ksoftirqd); |
---|
| 586 | +DECLARE_PER_CPU(__u32, active_softirqs); |
---|
535 | 587 | |
---|
536 | 588 | static inline struct task_struct *this_cpu_ksoftirqd(void) |
---|
537 | 589 | { |
---|
.. | .. |
---|
539 | 591 | } |
---|
540 | 592 | |
---|
541 | 593 | /* Tasklets --- multithreaded analogue of BHs. |
---|
| 594 | + |
---|
| 595 | + This API is deprecated. Please consider using threaded IRQs instead: |
---|
| 596 | + https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de |
---|
542 | 597 | |
---|
543 | 598 | Main feature differing them of generic softirqs: tasklet |
---|
544 | 599 | is running only on one CPU simultaneously. |
---|
.. | .. |
---|
551 | 606 | to be executed on some cpu at least once after this. |
---|
552 | 607 | * If the tasklet is already scheduled, but its execution is still not |
---|
553 | 608 | started, it will be executed only once. |
---|
554 | | - * If this tasklet is already running on another CPU, it is rescheduled |
---|
555 | | - for later. |
---|
556 | | - * Schedule must not be called from the tasklet itself (a lockup occurs) |
---|
| 609 | + * If this tasklet is already running on another CPU (or schedule is called |
---|
| 610 | + from tasklet itself), it is rescheduled for later. |
---|
557 | 611 | * Tasklet is strictly serialized wrt itself, but not |
---|
558 | 612 | wrt another tasklets. If client needs some intertask synchronization, |
---|
559 | 613 | he makes it with spinlocks. |
---|
.. | .. |
---|
564 | 618 | struct tasklet_struct *next; |
---|
565 | 619 | unsigned long state; |
---|
566 | 620 | atomic_t count; |
---|
567 | | - void (*func)(unsigned long); |
---|
| 621 | + bool use_callback; |
---|
| 622 | + union { |
---|
| 623 | + void (*func)(unsigned long data); |
---|
| 624 | + void (*callback)(struct tasklet_struct *t); |
---|
| 625 | + }; |
---|
568 | 626 | unsigned long data; |
---|
569 | 627 | }; |
---|
570 | 628 | |
---|
571 | | -#define DECLARE_TASKLET(name, func, data) \ |
---|
572 | | -struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } |
---|
| 629 | +#define DECLARE_TASKLET(name, _callback) \ |
---|
| 630 | +struct tasklet_struct name = { \ |
---|
| 631 | + .count = ATOMIC_INIT(0), \ |
---|
| 632 | + .callback = _callback, \ |
---|
| 633 | + .use_callback = true, \ |
---|
| 634 | +} |
---|
573 | 635 | |
---|
574 | | -#define DECLARE_TASKLET_DISABLED(name, func, data) \ |
---|
575 | | -struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } |
---|
| 636 | +#define DECLARE_TASKLET_DISABLED(name, _callback) \ |
---|
| 637 | +struct tasklet_struct name = { \ |
---|
| 638 | + .count = ATOMIC_INIT(1), \ |
---|
| 639 | + .callback = _callback, \ |
---|
| 640 | + .use_callback = true, \ |
---|
| 641 | +} |
---|
576 | 642 | |
---|
| 643 | +#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \ |
---|
| 644 | + container_of(callback_tasklet, typeof(*var), tasklet_fieldname) |
---|
| 645 | + |
---|
| 646 | +#define DECLARE_TASKLET_OLD(name, _func) \ |
---|
| 647 | +struct tasklet_struct name = { \ |
---|
| 648 | + .count = ATOMIC_INIT(0), \ |
---|
| 649 | + .func = _func, \ |
---|
| 650 | +} |
---|
| 651 | + |
---|
| 652 | +#define DECLARE_TASKLET_DISABLED_OLD(name, _func) \ |
---|
| 653 | +struct tasklet_struct name = { \ |
---|
| 654 | + .count = ATOMIC_INIT(1), \ |
---|
| 655 | + .func = _func, \ |
---|
| 656 | +} |
---|
577 | 657 | |
---|
578 | 658 | enum |
---|
579 | 659 | { |
---|
580 | 660 | TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ |
---|
581 | | - TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */ |
---|
582 | | - TASKLET_STATE_PENDING, /* Tasklet is pending */ |
---|
583 | | - TASKLET_STATE_CHAINED /* Tasklet is chained */ |
---|
| 661 | + TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ |
---|
584 | 662 | }; |
---|
585 | 663 | |
---|
586 | | -#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED) |
---|
587 | | -#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN) |
---|
588 | | -#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING) |
---|
589 | | -#define TASKLET_STATEF_CHAINED (1 << TASKLET_STATE_CHAINED) |
---|
590 | | -#define TASKLET_STATEF_RC (TASKLET_STATEF_RUN | TASKLET_STATEF_CHAINED) |
---|
591 | | - |
---|
592 | | -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) |
---|
| 664 | +#ifdef CONFIG_SMP |
---|
593 | 665 | static inline int tasklet_trylock(struct tasklet_struct *t) |
---|
594 | 666 | { |
---|
595 | 667 | return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); |
---|
596 | | -} |
---|
597 | | - |
---|
598 | | -static inline int tasklet_tryunlock(struct tasklet_struct *t) |
---|
599 | | -{ |
---|
600 | | - return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN; |
---|
601 | 668 | } |
---|
602 | 669 | |
---|
603 | 670 | static inline void tasklet_unlock(struct tasklet_struct *t) |
---|
.. | .. |
---|
606 | 673 | clear_bit(TASKLET_STATE_RUN, &(t)->state); |
---|
607 | 674 | } |
---|
608 | 675 | |
---|
609 | | -extern void tasklet_unlock_wait(struct tasklet_struct *t); |
---|
610 | | - |
---|
| 676 | +static inline void tasklet_unlock_wait(struct tasklet_struct *t) |
---|
| 677 | +{ |
---|
| 678 | + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } |
---|
| 679 | +} |
---|
611 | 680 | #else |
---|
612 | 681 | #define tasklet_trylock(t) 1 |
---|
613 | | -#define tasklet_tryunlock(t) 1 |
---|
614 | 682 | #define tasklet_unlock_wait(t) do { } while (0) |
---|
615 | 683 | #define tasklet_unlock(t) do { } while (0) |
---|
616 | 684 | #endif |
---|
.. | .. |
---|
644 | 712 | smp_mb(); |
---|
645 | 713 | } |
---|
646 | 714 | |
---|
647 | | -extern void tasklet_enable(struct tasklet_struct *t); |
---|
| 715 | +static inline void tasklet_enable(struct tasklet_struct *t) |
---|
| 716 | +{ |
---|
| 717 | + smp_mb__before_atomic(); |
---|
| 718 | + atomic_dec(&t->count); |
---|
| 719 | +} |
---|
| 720 | + |
---|
648 | 721 | extern void tasklet_kill(struct tasklet_struct *t); |
---|
649 | 722 | extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); |
---|
650 | 723 | extern void tasklet_init(struct tasklet_struct *t, |
---|
651 | 724 | void (*func)(unsigned long), unsigned long data); |
---|
652 | | - |
---|
653 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
654 | | -extern void softirq_early_init(void); |
---|
655 | | -#else |
---|
656 | | -static inline void softirq_early_init(void) { } |
---|
657 | | -#endif |
---|
658 | | - |
---|
659 | | -struct tasklet_hrtimer { |
---|
660 | | - struct hrtimer timer; |
---|
661 | | - struct tasklet_struct tasklet; |
---|
662 | | - enum hrtimer_restart (*function)(struct hrtimer *); |
---|
663 | | -}; |
---|
664 | | - |
---|
665 | | -extern void |
---|
666 | | -tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, |
---|
667 | | - enum hrtimer_restart (*function)(struct hrtimer *), |
---|
668 | | - clockid_t which_clock, enum hrtimer_mode mode); |
---|
669 | | - |
---|
670 | | -static inline |
---|
671 | | -void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, |
---|
672 | | - const enum hrtimer_mode mode) |
---|
673 | | -{ |
---|
674 | | - hrtimer_start(&ttimer->timer, time, mode); |
---|
675 | | -} |
---|
676 | | - |
---|
677 | | -static inline |
---|
678 | | -void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) |
---|
679 | | -{ |
---|
680 | | - hrtimer_cancel(&ttimer->timer); |
---|
681 | | - tasklet_kill(&ttimer->tasklet); |
---|
682 | | -} |
---|
| 725 | +extern void tasklet_setup(struct tasklet_struct *t, |
---|
| 726 | + void (*callback)(struct tasklet_struct *)); |
---|
683 | 727 | |
---|
684 | 728 | /* |
---|
685 | 729 | * Autoprobing for irqs: |
---|
.. | .. |
---|
754 | 798 | /* |
---|
755 | 799 | * We want to know which function is an entrypoint of a hardirq or a softirq. |
---|
756 | 800 | */ |
---|
757 | | -#define __irq_entry __attribute__((__section__(".irqentry.text"))) |
---|
758 | | -#define __softirq_entry \ |
---|
759 | | - __attribute__((__section__(".softirqentry.text"))) |
---|
| 801 | +#ifndef __irq_entry |
---|
| 802 | +# define __irq_entry __section(".irqentry.text") |
---|
| 803 | +#endif |
---|
| 804 | + |
---|
| 805 | +#define __softirq_entry __section(".softirqentry.text") |
---|
760 | 806 | |
---|
761 | 807 | #endif |
---|