From 2e7bd41e4e8ab3d1efdabd9e263a2f7fe79bff8c Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Mon, 20 Nov 2023 10:14:59 +0000 Subject: [PATCH] otg change to host --- kernel/include/linux/netdevice.h | 101 ++++++++++++++++++++++++++++++++++++++++++++------ 1 files changed, 89 insertions(+), 12 deletions(-) diff --git a/kernel/include/linux/netdevice.h b/kernel/include/linux/netdevice.h index 0f4009d..16606e1 100644 --- a/kernel/include/linux/netdevice.h +++ b/kernel/include/linux/netdevice.h @@ -431,7 +431,19 @@ typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); void __napi_schedule(struct napi_struct *n); + +/* + * When PREEMPT_RT_FULL is defined, all device interrupt handlers + * run as threads, and they can also be preempted (without PREEMPT_RT + * interrupt threads can not be preempted). Which means that calling + * __napi_schedule_irqoff() from an interrupt handler can be preempted + * and can corrupt the napi->poll_list. + */ +#ifdef CONFIG_PREEMPT_RT_FULL +#define __napi_schedule_irqoff(n) __napi_schedule(n) +#else void __napi_schedule_irqoff(struct napi_struct *n); +#endif static inline bool napi_disable_pending(struct napi_struct *n) { @@ -596,7 +608,11 @@ * write-mostly part */ spinlock_t _xmit_lock ____cacheline_aligned_in_smp; +#ifdef CONFIG_PREEMPT_RT_FULL + struct task_struct *xmit_lock_owner; +#else int xmit_lock_owner; +#endif /* * Time (in jiffies) of last Tx */ @@ -3036,6 +3052,7 @@ unsigned int dropped; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; + struct sk_buff_head tofree_queue; }; @@ -3054,14 +3071,38 @@ #endif } +#define XMIT_RECURSION_LIMIT 8 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); + +#ifdef CONFIG_PREEMPT_RT_FULL +static inline int dev_recursion_level(void) +{ + return current->xmit_recursion; +} + +static inline bool dev_xmit_recursion(void) +{ + return unlikely(current->xmit_recursion > + XMIT_RECURSION_LIMIT); +} + +static inline void dev_xmit_recursion_inc(void) +{ + current->xmit_recursion++; +} + +static inline void dev_xmit_recursion_dec(void) +{ + current->xmit_recursion--; +} + +#else static inline int dev_recursion_level(void) { return this_cpu_read(softnet_data.xmit.recursion); } -#define XMIT_RECURSION_LIMIT 8 static inline bool dev_xmit_recursion(void) { return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > @@ -3077,6 +3118,7 @@ { __this_cpu_dec(softnet_data.xmit.recursion); } +#endif void __netif_schedule(struct Qdisc *q); void netif_schedule_queue(struct netdev_queue *txq); @@ -3885,11 +3927,50 @@ return (1U << debug_value) - 1; } +#ifdef CONFIG_PREEMPT_RT_FULL +static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) +{ + WRITE_ONCE(txq->xmit_lock_owner, current); +} + +static inline void netdev_queue_clear_owner(struct netdev_queue *txq) +{ + WRITE_ONCE(txq->xmit_lock_owner, NULL); +} + +static inline bool netdev_queue_has_owner(struct netdev_queue *txq) +{ + if (READ_ONCE(txq->xmit_lock_owner) != NULL) + return true; + return false; +} + +#else + +static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu) +{ + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, cpu); +} + +static inline void netdev_queue_clear_owner(struct netdev_queue *txq) +{ + /* Pairs with READ_ONCE() in __dev_queue_xmit() */ + WRITE_ONCE(txq->xmit_lock_owner, -1); +} + +static inline bool netdev_queue_has_owner(struct netdev_queue *txq) +{ + if (READ_ONCE(txq->xmit_lock_owner) != -1) + return true; + return false; +} +#endif + static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) { spin_lock(&txq->_xmit_lock); - /* Pairs with READ_ONCE() in __dev_queue_xmit() */ - WRITE_ONCE(txq->xmit_lock_owner, cpu); + netdev_queue_set_owner(txq, cpu); } static inline bool __netif_tx_acquire(struct netdev_queue *txq) @@ -3906,8 +3987,7 @@ static inline void __netif_tx_lock_bh(struct netdev_queue *txq) { spin_lock_bh(&txq->_xmit_lock); - /* Pairs with READ_ONCE() in __dev_queue_xmit() */ - WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); + netdev_queue_set_owner(txq, smp_processor_id()); } static inline bool __netif_tx_trylock(struct netdev_queue *txq) @@ -3915,29 +3995,26 @@ bool ok = spin_trylock(&txq->_xmit_lock); if (likely(ok)) { - /* Pairs with READ_ONCE() in __dev_queue_xmit() */ - WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id()); + netdev_queue_set_owner(txq, smp_processor_id()); } return ok; } static inline void __netif_tx_unlock(struct netdev_queue *txq) { - /* Pairs with READ_ONCE() in __dev_queue_xmit() */ - WRITE_ONCE(txq->xmit_lock_owner, -1); + netdev_queue_clear_owner(txq); spin_unlock(&txq->_xmit_lock); } static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) { - /* Pairs with READ_ONCE() in __dev_queue_xmit() */ - WRITE_ONCE(txq->xmit_lock_owner, -1); + netdev_queue_clear_owner(txq); spin_unlock_bh(&txq->_xmit_lock); } static inline void txq_trans_update(struct netdev_queue *txq) { - if (txq->xmit_lock_owner != -1) + if (netdev_queue_has_owner(txq)) txq->trans_start = jiffies; } -- Gitblit v1.6.2