From 2f529f9b558ca1c1bd74be7437a84e4711743404 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 01 Nov 2024 02:11:33 +0000
Subject: [PATCH] add xenomai

---
 kernel/net/core/dev.c |   98 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 97 insertions(+), 1 deletions(-)

diff --git a/kernel/net/core/dev.c b/kernel/net/core/dev.c
index bc5dcf5..01d2396 100644
--- a/kernel/net/core/dev.c
+++ b/kernel/net/core/dev.c
@@ -3111,6 +3111,10 @@
 	} else if (likely(!refcount_dec_and_test(&skb->users))) {
 		return;
 	}
+
+	if (recycle_oob_skb(skb))
+		return;
+
 	get_kfree_skb_cb(skb)->reason = reason;
 	local_irq_save(flags);
 	skb->next = __this_cpu_read(softnet_data.completion_queue);
@@ -3584,7 +3588,12 @@
 	unsigned int len;
 	int rc;
 
-	if (dev_nit_active(dev))
+	/*
+	 * Clone-relay outgoing packet to listening taps. Network taps
+	 * interested in out-of-band traffic should be handled by the
+	 * companion core.
+	 */
+	if (dev_nit_active(dev) && !skb_is_oob(skb))
 		dev_queue_xmit_nit(skb, dev);
 
 	len = skb->len;
@@ -4797,6 +4806,81 @@
 }
 EXPORT_SYMBOL_GPL(do_xdp_generic);
 
+#ifdef CONFIG_NET_OOB
+
+__weak bool netif_oob_deliver(struct sk_buff *skb)
+{
+	return false;
+}
+
+__weak int netif_xmit_oob(struct sk_buff *skb)
+{
+	return NET_XMIT_DROP;
+}
+
+static bool netif_receive_oob(struct sk_buff *skb)
+{
+	struct net_device *dev = skb->dev;
+
+	if (dev && netif_oob_diversion(dev))
+		return netif_oob_deliver(skb);
+
+	return false;
+}
+
+static bool netif_receive_oob_list(struct list_head *head)
+{
+	struct sk_buff *skb, *next;
+	struct net_device *dev;
+
+	if (list_empty(head))
+		return false;
+
+	dev = list_first_entry(head, struct sk_buff, list)->dev;
+	if (!dev || !netif_oob_diversion(dev))
+		return false;
+
+	/* Callee dequeues every skb it consumes. */
+	list_for_each_entry_safe(skb, next, head, list)
+		netif_oob_deliver(skb);
+
+	return list_empty(head);
+}
+
+__weak void netif_oob_run(struct net_device *dev)
+{ }
+
+static void napi_complete_oob(struct napi_struct *n)
+{
+	struct net_device *dev = n->dev;
+
+	if (netif_oob_diversion(dev))
+		netif_oob_run(dev);
+}
+
+__weak void skb_inband_xmit_backlog(void)
+{ }
+
+#else
+
+static inline bool netif_receive_oob(struct sk_buff *skb)
+{
+	return false;
+}
+
+static inline bool netif_receive_oob_list(struct list_head *head)
+{
+	return false;
+}
+
+static inline void napi_complete_oob(struct napi_struct *n)
+{ }
+
+static inline void skb_inband_xmit_backlog(void)
+{ }
+
+#endif
+
 static int netif_rx_internal(struct sk_buff *skb)
 {
 	int ret;
@@ -4895,6 +4979,8 @@
 static __latent_entropy void net_tx_action(struct softirq_action *h)
 {
 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+
+	skb_inband_xmit_backlog();
 
 	if (sd->completion_queue) {
 		struct sk_buff *clist;
@@ -5639,6 +5725,9 @@
 {
 	int ret;
 
+	if (netif_receive_oob(skb))
+		return NET_RX_SUCCESS;
+
 	trace_netif_receive_skb_entry(skb);
 
 	ret = netif_receive_skb_internal(skb);
@@ -5662,6 +5751,8 @@
 {
 	struct sk_buff *skb;
 
+	if (netif_receive_oob_list(head))
+		return;
 	if (list_empty(head))
 		return;
 	if (trace_netif_receive_skb_list_entry_enabled()) {
@@ -6152,6 +6243,9 @@
 {
 	gro_result_t ret;
 
+	if (netif_receive_oob(skb))
+		return GRO_NORMAL;
+
 	skb_mark_napi_id(skb, napi);
 	trace_napi_gro_receive_entry(skb);
 
@@ -6489,6 +6583,8 @@
 	unsigned long flags, val, new, timeout = 0;
 	bool ret = true;
 
+	napi_complete_oob(n);
+
 	/*
 	 * 1) Don't let napi dequeue from the cpu poll list
 	 *    just in case its running on a different cpu.

--
Gitblit v1.6.2