.. | .. |
---|
431 | 431 | typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); |
---|
432 | 432 | |
---|
433 | 433 | void __napi_schedule(struct napi_struct *n); |
---|
| 434 | + |
---|
| 435 | +/* |
---|
| 436 | + * When PREEMPT_RT_FULL is defined, all device interrupt handlers |
---|
| 437 | + * run as threads, and they can also be preempted (without PREEMPT_RT |
---|
| 438 | + * interrupt threads can not be preempted). Which means that calling |
---|
| 439 | + * __napi_schedule_irqoff() from an interrupt handler can be preempted |
---|
| 440 | + * and can corrupt the napi->poll_list. |
---|
| 441 | + */ |
---|
| 442 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 443 | +#define __napi_schedule_irqoff(n) __napi_schedule(n) |
---|
| 444 | +#else |
---|
434 | 445 | void __napi_schedule_irqoff(struct napi_struct *n); |
---|
| 446 | +#endif |
---|
435 | 447 | |
---|
436 | 448 | static inline bool napi_disable_pending(struct napi_struct *n) |
---|
437 | 449 | { |
---|
.. | .. |
---|
596 | 608 | * write-mostly part |
---|
597 | 609 | */ |
---|
598 | 610 | spinlock_t _xmit_lock ____cacheline_aligned_in_smp; |
---|
| 611 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 612 | + struct task_struct *xmit_lock_owner; |
---|
| 613 | +#else |
---|
599 | 614 | int xmit_lock_owner; |
---|
| 615 | +#endif |
---|
600 | 616 | /* |
---|
601 | 617 | * Time (in jiffies) of last Tx |
---|
602 | 618 | */ |
---|
.. | .. |
---|
3036 | 3052 | unsigned int dropped; |
---|
3037 | 3053 | struct sk_buff_head input_pkt_queue; |
---|
3038 | 3054 | struct napi_struct backlog; |
---|
| 3055 | + struct sk_buff_head tofree_queue; |
---|
3039 | 3056 | |
---|
3040 | 3057 | }; |
---|
3041 | 3058 | |
---|
.. | .. |
---|
3054 | 3071 | #endif |
---|
3055 | 3072 | } |
---|
3056 | 3073 | |
---|
| 3074 | +#define XMIT_RECURSION_LIMIT 8 |
---|
3057 | 3075 | DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); |
---|
| 3076 | + |
---|
| 3077 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 3078 | +static inline int dev_recursion_level(void) |
---|
| 3079 | +{ |
---|
| 3080 | + return current->xmit_recursion; |
---|
| 3081 | +} |
---|
| 3082 | + |
---|
| 3083 | +static inline bool dev_xmit_recursion(void) |
---|
| 3084 | +{ |
---|
| 3085 | + return unlikely(current->xmit_recursion > |
---|
| 3086 | + XMIT_RECURSION_LIMIT); |
---|
| 3087 | +} |
---|
| 3088 | + |
---|
| 3089 | +static inline void dev_xmit_recursion_inc(void) |
---|
| 3090 | +{ |
---|
| 3091 | + current->xmit_recursion++; |
---|
| 3092 | +} |
---|
| 3093 | + |
---|
| 3094 | +static inline void dev_xmit_recursion_dec(void) |
---|
| 3095 | +{ |
---|
| 3096 | + current->xmit_recursion--; |
---|
| 3097 | +} |
---|
| 3098 | + |
---|
| 3099 | +#else |
---|
3058 | 3100 | |
---|
3059 | 3101 | static inline int dev_recursion_level(void) |
---|
3060 | 3102 | { |
---|
3061 | 3103 | return this_cpu_read(softnet_data.xmit.recursion); |
---|
3062 | 3104 | } |
---|
3063 | 3105 | |
---|
3064 | | -#define XMIT_RECURSION_LIMIT 8 |
---|
3065 | 3106 | static inline bool dev_xmit_recursion(void) |
---|
3066 | 3107 | { |
---|
3067 | 3108 | return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > |
---|
.. | .. |
---|
3077 | 3118 | { |
---|
3078 | 3119 | __this_cpu_dec(softnet_data.xmit.recursion); |
---|
3079 | 3120 | } |
---|
| 3121 | +#endif |
---|
3080 | 3122 | |
---|
3081 | 3123 | void __netif_schedule(struct Qdisc *q); |
---|
3082 | 3124 | void netif_schedule_queue(struct netdev_queue *txq); |
---|
.. | .. |
---|
3885 | 3927 | return (1U << debug_value) - 1; |
---|
3886 | 3928 | } |
---|
3887 | 3929 | |
---|
| 3930 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 3931 | +static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) |
---|
| 3932 | +{ |
---|
| 3933 | + WRITE_ONCE(txq->xmit_lock_owner, current); |
---|
| 3934 | +} |
---|
| 3935 | + |
---|
| 3936 | +static inline void netdev_queue_clear_owner(struct netdev_queue *txq) |
---|
| 3937 | +{ |
---|
| 3938 | + WRITE_ONCE(txq->xmit_lock_owner, NULL); |
---|
| 3939 | +} |
---|
| 3940 | + |
---|
| 3941 | +static inline bool netdev_queue_has_owner(struct netdev_queue *txq) |
---|
| 3942 | +{ |
---|
| 3943 | + if (READ_ONCE(txq->xmit_lock_owner) != NULL) |
---|
| 3944 | + return true; |
---|
| 3945 | + return false; |
---|
| 3946 | +} |
---|
| 3947 | + |
---|
| 3948 | +#else |
---|
| 3949 | + |
---|
| 3950 | +static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) |
---|
| 3951 | +{ |
---|
| 3952 | + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
| 3953 | + WRITE_ONCE(txq->xmit_lock_owner, cpu); |
---|
| 3954 | +} |
---|
| 3955 | + |
---|
| 3956 | +static inline void netdev_queue_clear_owner(struct netdev_queue *txq) |
---|
| 3957 | +{ |
---|
| 3958 | + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
| 3959 | + WRITE_ONCE(txq->xmit_lock_owner, -1); |
---|
| 3960 | +} |
---|
| 3961 | + |
---|
| 3962 | +static inline bool netdev_queue_has_owner(struct netdev_queue *txq) |
---|
| 3963 | +{ |
---|
| 3964 | + if (READ_ONCE(txq->xmit_lock_owner) != -1) |
---|
| 3965 | + return true; |
---|
| 3966 | + return false; |
---|
| 3967 | +} |
---|
| 3968 | +#endif |
---|
| 3969 | + |
---|
3888 | 3970 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) |
---|
3889 | 3971 | { |
---|
3890 | 3972 | spin_lock(&txq->_xmit_lock); |
---|
3891 | | - /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
3892 | | - WRITE_ONCE(txq->xmit_lock_owner, cpu); |
---|
| 3973 | + netdev_queue_set_owner(txq, cpu); |
---|
3893 | 3974 | } |
---|
3894 | 3975 | |
---|
3895 | 3976 | static inline bool __netif_tx_acquire(struct netdev_queue *txq) |
---|
.. | .. |
---|
3906 | 3987 | static inline void __netif_tx_lock_bh(struct netdev_queue *txq) |
---|
3907 | 3988 | { |
---|
3908 | 3989 | spin_lock_bh(&txq->_xmit_lock); |
---|
3909 | | - /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
3910 | | - WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); |
---|
| 3990 | + netdev_queue_set_owner(txq, smp_processor_id()); |
---|
3911 | 3991 | } |
---|
3912 | 3992 | |
---|
3913 | 3993 | static inline bool __netif_tx_trylock(struct netdev_queue *txq) |
---|
.. | .. |
---|
3915 | 3995 | bool ok = spin_trylock(&txq->_xmit_lock); |
---|
3916 | 3996 | |
---|
3917 | 3997 | if (likely(ok)) { |
---|
3918 | | - /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
3919 | | - WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); |
---|
| 3998 | + netdev_queue_set_owner(txq, smp_processor_id()); |
---|
3920 | 3999 | } |
---|
3921 | 4000 | return ok; |
---|
3922 | 4001 | } |
---|
3923 | 4002 | |
---|
3924 | 4003 | static inline void __netif_tx_unlock(struct netdev_queue *txq) |
---|
3925 | 4004 | { |
---|
3926 | | - /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
3927 | | - WRITE_ONCE(txq->xmit_lock_owner, -1); |
---|
| 4005 | + netdev_queue_clear_owner(txq); |
---|
3928 | 4006 | spin_unlock(&txq->_xmit_lock); |
---|
3929 | 4007 | } |
---|
3930 | 4008 | |
---|
3931 | 4009 | static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) |
---|
3932 | 4010 | { |
---|
3933 | | - /* Pairs with READ_ONCE() in __dev_queue_xmit() */ |
---|
3934 | | - WRITE_ONCE(txq->xmit_lock_owner, -1); |
---|
| 4011 | + netdev_queue_clear_owner(txq); |
---|
3935 | 4012 | spin_unlock_bh(&txq->_xmit_lock); |
---|
3936 | 4013 | } |
---|
3937 | 4014 | |
---|
3938 | 4015 | static inline void txq_trans_update(struct netdev_queue *txq) |
---|
3939 | 4016 | { |
---|
3940 | | - if (txq->xmit_lock_owner != -1) |
---|
| 4017 | + if (netdev_queue_has_owner(txq)) |
---|
3941 | 4018 | txq->trans_start = jiffies; |
---|
3942 | 4019 | } |
---|
3943 | 4020 | |
---|