hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/net/core/dev.c
....@@ -3111,6 +3111,10 @@
31113111 } else if (likely(!refcount_dec_and_test(&skb->users))) {
31123112 return;
31133113 }
3114
+
3115
+ if (recycle_oob_skb(skb))
3116
+ return;
3117
+
31143118 get_kfree_skb_cb(skb)->reason = reason;
31153119 local_irq_save(flags);
31163120 skb->next = __this_cpu_read(softnet_data.completion_queue);
....@@ -3584,7 +3588,12 @@
35843588 unsigned int len;
35853589 int rc;
35863590
3587
- if (dev_nit_active(dev))
3591
+ /*
3592
+ * Clone-relay outgoing packet to listening taps. Network taps
3593
+ * interested in out-of-band traffic should be handled by the
3594
+ * companion core.
3595
+ */
3596
+ if (dev_nit_active(dev) && !skb_is_oob(skb))
35883597 dev_queue_xmit_nit(skb, dev);
35893598
35903599 len = skb->len;
....@@ -4797,6 +4806,81 @@
47974806 }
47984807 EXPORT_SYMBOL_GPL(do_xdp_generic);
47994808
4809
+#ifdef CONFIG_NET_OOB
4810
+
4811
+__weak bool netif_oob_deliver(struct sk_buff *skb)
4812
+{
4813
+ return false;
4814
+}
4815
+
4816
+__weak int netif_xmit_oob(struct sk_buff *skb)
4817
+{
4818
+ return NET_XMIT_DROP;
4819
+}
4820
+
4821
+static bool netif_receive_oob(struct sk_buff *skb)
4822
+{
4823
+ struct net_device *dev = skb->dev;
4824
+
4825
+ if (dev && netif_oob_diversion(dev))
4826
+ return netif_oob_deliver(skb);
4827
+
4828
+ return false;
4829
+}
4830
+
4831
+static bool netif_receive_oob_list(struct list_head *head)
4832
+{
4833
+ struct sk_buff *skb, *next;
4834
+ struct net_device *dev;
4835
+
4836
+ if (list_empty(head))
4837
+ return false;
4838
+
4839
+ dev = list_first_entry(head, struct sk_buff, list)->dev;
4840
+ if (!dev || !netif_oob_diversion(dev))
4841
+ return false;
4842
+
4843
+ /* Callee dequeues every skb it consumes. */
4844
+ list_for_each_entry_safe(skb, next, head, list)
4845
+ netif_oob_deliver(skb);
4846
+
4847
+ return list_empty(head);
4848
+}
4849
+
4850
+__weak void netif_oob_run(struct net_device *dev)
4851
+{ }
4852
+
4853
+static void napi_complete_oob(struct napi_struct *n)
4854
+{
4855
+ struct net_device *dev = n->dev;
4856
+
4857
+ if (netif_oob_diversion(dev))
4858
+ netif_oob_run(dev);
4859
+}
4860
+
4861
+__weak void skb_inband_xmit_backlog(void)
4862
+{ }
4863
+
4864
+#else
4865
+
4866
+static inline bool netif_receive_oob(struct sk_buff *skb)
4867
+{
4868
+ return false;
4869
+}
4870
+
4871
+static inline bool netif_receive_oob_list(struct list_head *head)
4872
+{
4873
+ return false;
4874
+}
4875
+
4876
+static inline void napi_complete_oob(struct napi_struct *n)
4877
+{ }
4878
+
4879
+static inline void skb_inband_xmit_backlog(void)
4880
+{ }
4881
+
4882
+#endif
4883
+
48004884 static int netif_rx_internal(struct sk_buff *skb)
48014885 {
48024886 int ret;
....@@ -4895,6 +4979,8 @@
48954979 static __latent_entropy void net_tx_action(struct softirq_action *h)
48964980 {
48974981 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4982
+
4983
+ skb_inband_xmit_backlog();
48984984
48994985 if (sd->completion_queue) {
49004986 struct sk_buff *clist;
....@@ -5639,6 +5725,9 @@
56395725 {
56405726 int ret;
56415727
5728
+ if (netif_receive_oob(skb))
5729
+ return NET_RX_SUCCESS;
5730
+
56425731 trace_netif_receive_skb_entry(skb);
56435732
56445733 ret = netif_receive_skb_internal(skb);
....@@ -5662,6 +5751,8 @@
56625751 {
56635752 struct sk_buff *skb;
56645753
5754
+ if (netif_receive_oob_list(head))
5755
+ return;
56655756 if (list_empty(head))
56665757 return;
56675758 if (trace_netif_receive_skb_list_entry_enabled()) {
....@@ -6152,6 +6243,9 @@
61526243 {
61536244 gro_result_t ret;
61546245
6246
+ if (netif_receive_oob(skb))
6247
+ return GRO_NORMAL;
6248
+
61556249 skb_mark_napi_id(skb, napi);
61566250 trace_napi_gro_receive_entry(skb);
61576251
....@@ -6489,6 +6583,8 @@
64896583 unsigned long flags, val, new, timeout = 0;
64906584 bool ret = true;
64916585
6586
+ napi_complete_oob(n);
6587
+
64926588 /*
64936589 * 1) Don't let napi dequeue from the cpu poll list
64946590 * just in case its running on a different cpu.