hc
2023-11-20 2e7bd41e4e8ab3d1efdabd9e263a2f7fe79bff8c
kernel/include/linux/netdevice.h
....@@ -431,7 +431,19 @@
431431 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
432432
433433 void __napi_schedule(struct napi_struct *n);
434
+
435
+/*
436
+ * When PREEMPT_RT_FULL is defined, all device interrupt handlers
437
+ * run as threads, and they can also be preempted (without PREEMPT_RT
438
+ * interrupt threads can not be preempted). Which means that calling
439
+ * __napi_schedule_irqoff() from an interrupt handler can be preempted
440
+ * and can corrupt the napi->poll_list.
441
+ */
442
+#ifdef CONFIG_PREEMPT_RT_FULL
443
+#define __napi_schedule_irqoff(n) __napi_schedule(n)
444
+#else
434445 void __napi_schedule_irqoff(struct napi_struct *n);
446
+#endif
435447
436448 static inline bool napi_disable_pending(struct napi_struct *n)
437449 {
....@@ -596,7 +608,11 @@
596608 * write-mostly part
597609 */
598610 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
611
+#ifdef CONFIG_PREEMPT_RT_FULL
612
+ struct task_struct *xmit_lock_owner;
613
+#else
599614 int xmit_lock_owner;
615
+#endif
600616 /*
601617 * Time (in jiffies) of last Tx
602618 */
....@@ -3036,6 +3052,7 @@
30363052 unsigned int dropped;
30373053 struct sk_buff_head input_pkt_queue;
30383054 struct napi_struct backlog;
3055
+ struct sk_buff_head tofree_queue;
30393056
30403057 };
30413058
....@@ -3054,14 +3071,38 @@
30543071 #endif
30553072 }
30563073
3074
+#define XMIT_RECURSION_LIMIT 8
30573075 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
3076
+
3077
+#ifdef CONFIG_PREEMPT_RT_FULL
3078
+static inline int dev_recursion_level(void)
3079
+{
3080
+ return current->xmit_recursion;
3081
+}
3082
+
3083
+static inline bool dev_xmit_recursion(void)
3084
+{
3085
+ return unlikely(current->xmit_recursion >
3086
+ XMIT_RECURSION_LIMIT);
3087
+}
3088
+
3089
+static inline void dev_xmit_recursion_inc(void)
3090
+{
3091
+ current->xmit_recursion++;
3092
+}
3093
+
3094
+static inline void dev_xmit_recursion_dec(void)
3095
+{
3096
+ current->xmit_recursion--;
3097
+}
3098
+
3099
+#else
30583100
30593101 static inline int dev_recursion_level(void)
30603102 {
30613103 return this_cpu_read(softnet_data.xmit.recursion);
30623104 }
30633105
3064
-#define XMIT_RECURSION_LIMIT 8
30653106 static inline bool dev_xmit_recursion(void)
30663107 {
30673108 return unlikely(__this_cpu_read(softnet_data.xmit.recursion) >
....@@ -3077,6 +3118,7 @@
30773118 {
30783119 __this_cpu_dec(softnet_data.xmit.recursion);
30793120 }
3121
+#endif
30803122
30813123 void __netif_schedule(struct Qdisc *q);
30823124 void netif_schedule_queue(struct netdev_queue *txq);
....@@ -3885,11 +3927,50 @@
38853927 return (1U << debug_value) - 1;
38863928 }
38873929
3930
+#ifdef CONFIG_PREEMPT_RT_FULL
3931
+static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
3932
+{
3933
+ WRITE_ONCE(txq->xmit_lock_owner, current);
3934
+}
3935
+
3936
+static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
3937
+{
3938
+ WRITE_ONCE(txq->xmit_lock_owner, NULL);
3939
+}
3940
+
3941
+static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
3942
+{
3943
+ if (READ_ONCE(txq->xmit_lock_owner) != NULL)
3944
+ return true;
3945
+ return false;
3946
+}
3947
+
3948
+#else
3949
+
3950
+static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
3951
+{
3952
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
3953
+ WRITE_ONCE(txq->xmit_lock_owner, cpu);
3954
+}
3955
+
3956
+static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
3957
+{
3958
+ /* Pairs with READ_ONCE() in __dev_queue_xmit() */
3959
+ WRITE_ONCE(txq->xmit_lock_owner, -1);
3960
+}
3961
+
3962
+static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
3963
+{
3964
+ if (READ_ONCE(txq->xmit_lock_owner) != -1)
3965
+ return true;
3966
+ return false;
3967
+}
3968
+#endif
3969
+
38883970 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
38893971 {
38903972 spin_lock(&txq->_xmit_lock);
3891
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
3892
- WRITE_ONCE(txq->xmit_lock_owner, cpu);
3973
+ netdev_queue_set_owner(txq, cpu);
38933974 }
38943975
38953976 static inline bool __netif_tx_acquire(struct netdev_queue *txq)
....@@ -3906,8 +3987,7 @@
39063987 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
39073988 {
39083989 spin_lock_bh(&txq->_xmit_lock);
3909
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
3910
- WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
3990
+ netdev_queue_set_owner(txq, smp_processor_id());
39113991 }
39123992
39133993 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
....@@ -3915,29 +3995,26 @@
39153995 bool ok = spin_trylock(&txq->_xmit_lock);
39163996
39173997 if (likely(ok)) {
3918
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
3919
- WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
3998
+ netdev_queue_set_owner(txq, smp_processor_id());
39203999 }
39214000 return ok;
39224001 }
39234002
39244003 static inline void __netif_tx_unlock(struct netdev_queue *txq)
39254004 {
3926
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
3927
- WRITE_ONCE(txq->xmit_lock_owner, -1);
4005
+ netdev_queue_clear_owner(txq);
39284006 spin_unlock(&txq->_xmit_lock);
39294007 }
39304008
39314009 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
39324010 {
3933
- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
3934
- WRITE_ONCE(txq->xmit_lock_owner, -1);
4011
+ netdev_queue_clear_owner(txq);
39354012 spin_unlock_bh(&txq->_xmit_lock);
39364013 }
39374014
39384015 static inline void txq_trans_update(struct netdev_queue *txq)
39394016 {
3940
- if (txq->xmit_lock_owner != -1)
4017
+ if (netdev_queue_has_owner(txq))
39414018 txq->trans_start = jiffies;
39424019 }
39434020