.. | .. |
---|
61 | 61 | * interrupt handler after suspending interrupts. For system |
---|
62 | 62 | * wakeup devices users need to implement wakeup detection in |
---|
63 | 63 | * their interrupt handlers. |
---|
| 64 | + * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it. |
---|
| 65 | + * Users will enable it explicitly by enable_irq() or enable_nmi() |
---|
| 66 | + * later. |
---|
64 | 67 | */ |
---|
65 | 68 | #define IRQF_SHARED 0x00000080 |
---|
66 | 69 | #define IRQF_PROBE_SHARED 0x00000100 |
---|
.. | .. |
---|
74 | 77 | #define IRQF_NO_THREAD 0x00010000 |
---|
75 | 78 | #define IRQF_EARLY_RESUME 0x00020000 |
---|
76 | 79 | #define IRQF_COND_SUSPEND 0x00040000 |
---|
| 80 | +#define IRQF_NO_AUTOEN 0x00080000 |
---|
77 | 81 | |
---|
78 | 82 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) |
---|
79 | 83 | |
---|
.. | .. |
---|
566 | 570 | asmlinkage void do_softirq(void); |
---|
567 | 571 | asmlinkage void __do_softirq(void); |
---|
568 | 572 | |
---|
569 | | -#if defined(__ARCH_HAS_DO_SOFTIRQ) && !defined(CONFIG_PREEMPT_RT) |
---|
| 573 | +#ifdef __ARCH_HAS_DO_SOFTIRQ |
---|
570 | 574 | void do_softirq_own_stack(void); |
---|
571 | 575 | #else |
---|
572 | 576 | static inline void do_softirq_own_stack(void) |
---|
.. | .. |
---|
661 | 665 | TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ |
---|
662 | 666 | }; |
---|
663 | 667 | |
---|
664 | | -#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) |
---|
| 668 | +#ifdef CONFIG_SMP |
---|
665 | 669 | static inline int tasklet_trylock(struct tasklet_struct *t) |
---|
666 | 670 | { |
---|
667 | 671 | return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); |
---|
668 | 672 | } |
---|
669 | 673 | |
---|
670 | | -void tasklet_unlock(struct tasklet_struct *t); |
---|
671 | | -void tasklet_unlock_wait(struct tasklet_struct *t); |
---|
672 | | -void tasklet_unlock_spin_wait(struct tasklet_struct *t); |
---|
| 674 | +static inline void tasklet_unlock(struct tasklet_struct *t) |
---|
| 675 | +{ |
---|
| 676 | + smp_mb__before_atomic(); |
---|
| 677 | + clear_bit(TASKLET_STATE_RUN, &(t)->state); |
---|
| 678 | +} |
---|
673 | 679 | |
---|
| 680 | +static inline void tasklet_unlock_wait(struct tasklet_struct *t) |
---|
| 681 | +{ |
---|
| 682 | + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } |
---|
| 683 | +} |
---|
674 | 684 | #else |
---|
675 | | -static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; } |
---|
676 | | -static inline void tasklet_unlock(struct tasklet_struct *t) { } |
---|
677 | | -static inline void tasklet_unlock_wait(struct tasklet_struct *t) { } |
---|
678 | | -static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { } |
---|
| 685 | +#define tasklet_trylock(t) 1 |
---|
| 686 | +#define tasklet_unlock_wait(t) do { } while (0) |
---|
| 687 | +#define tasklet_unlock(t) do { } while (0) |
---|
679 | 688 | #endif |
---|
680 | 689 | |
---|
681 | 690 | extern void __tasklet_schedule(struct tasklet_struct *t); |
---|
.. | .. |
---|
698 | 707 | { |
---|
699 | 708 | atomic_inc(&t->count); |
---|
700 | 709 | smp_mb__after_atomic(); |
---|
701 | | -} |
---|
702 | | - |
---|
703 | | -/* |
---|
704 | | - * Do not use in new code. Disabling tasklets from atomic contexts is |
---|
705 | | - * error prone and should be avoided. |
---|
706 | | - */ |
---|
707 | | -static inline void tasklet_disable_in_atomic(struct tasklet_struct *t) |
---|
708 | | -{ |
---|
709 | | - tasklet_disable_nosync(t); |
---|
710 | | - tasklet_unlock_spin_wait(t); |
---|
711 | | - smp_mb(); |
---|
712 | 710 | } |
---|
713 | 711 | |
---|
714 | 712 | static inline void tasklet_disable(struct tasklet_struct *t) |
---|