From 072de836f53be56a70cecf70b43ae43b7ce17376 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 10:08:36 +0000
Subject: [PATCH] mk-rootfs.sh
---
kernel/net/core/netpoll.c | 89 ++++++++++++++++++++++++++------------------
1 files changed, 53 insertions(+), 36 deletions(-)
diff --git a/kernel/net/core/netpoll.c b/kernel/net/core/netpoll.c
index 41e32a9..9609482 100644
--- a/kernel/net/core/netpoll.c
+++ b/kernel/net/core/netpoll.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Common framework for low-level network console, dump, and debugger code
*
@@ -58,7 +59,6 @@
MAX_UDP_CHUNK)
static void zap_completion_queue(void);
-static void netpoll_async_cleanup(struct work_struct *work);
static unsigned int carrier_timeout = 4;
module_param(carrier_timeout, uint, 0644);
@@ -70,10 +70,11 @@
#define np_notice(np, fmt, ...) \
pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
-static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
- struct netdev_queue *txq)
+static netdev_tx_t netpoll_start_xmit(struct sk_buff *skb,
+ struct net_device *dev,
+ struct netdev_queue *txq)
{
- int status = NETDEV_TX_OK;
+ netdev_tx_t status = NETDEV_TX_OK;
netdev_features_t features;
features = netif_skb_features(skb);
@@ -151,7 +152,7 @@
* indicate that we are clearing the Tx path only.
*/
work = napi->poll(napi, 0);
- WARN_ONCE(work, "%pF exceeded budget in poll\n", napi->poll);
+ WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll);
trace_napi_poll(napi, work, 0);
clear_bit(NAPI_STATE_NPSVC, &napi->state);
@@ -297,7 +298,7 @@
{
struct napi_struct *napi;
- list_for_each_entry(napi, &dev->napi_list, dev_list) {
+ list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
if (napi->poll_owner == smp_processor_id())
return 1;
}
@@ -305,27 +306,29 @@
}
/* call with IRQ disabled */
-void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
- struct net_device *dev)
+static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
- int status = NETDEV_TX_BUSY;
+ netdev_tx_t status = NETDEV_TX_BUSY;
+ struct net_device *dev;
unsigned long tries;
/* It is up to the caller to keep npinfo alive. */
struct netpoll_info *npinfo;
lockdep_assert_irqs_disabled();
- npinfo = rcu_dereference_bh(np->dev->npinfo);
+ dev = np->dev;
+ npinfo = rcu_dereference_bh(dev->npinfo);
+
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
dev_kfree_skb_irq(skb);
- return;
+ return NET_XMIT_DROP;
}
/* don't get messages out of order, and no recursion */
if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
struct netdev_queue *txq;
- txq = netdev_pick_tx(dev, skb, NULL);
+ txq = netdev_core_pick_tx(dev, skb, NULL);
/* try until next clock tick */
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
@@ -348,7 +351,7 @@
}
WARN_ONCE(!irqs_disabled(),
- "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
+ "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n",
dev->name, dev->netdev_ops->ndo_start_xmit);
}
@@ -357,8 +360,25 @@
skb_queue_tail(&npinfo->txq, skb);
schedule_delayed_work(&npinfo->tx_work,0);
}
+ return NETDEV_TX_OK;
}
-EXPORT_SYMBOL(netpoll_send_skb_on_dev);
+
+netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
+{
+ unsigned long flags;
+ netdev_tx_t ret;
+
+ if (unlikely(!np)) {
+ dev_kfree_skb_irq(skb);
+ ret = NET_XMIT_DROP;
+ } else {
+ local_irq_save(flags);
+ ret = __netpoll_send_skb(np, skb);
+ local_irq_restore(flags);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(netpoll_send_skb);
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
{
@@ -590,7 +610,6 @@
np->dev = ndev;
strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
- INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
np_err(np, "%s doesn't support polling, aborting\n",
@@ -660,7 +679,7 @@
if (!netdev_uses_dsa(dev))
continue;
- err = dev_change_flags(dev, dev->flags | IFF_UP);
+ err = dev_change_flags(dev, dev->flags | IFF_UP, NULL);
if (err < 0) {
np_err(np, "%s failed to open %s\n",
np->dev_name, dev->name);
@@ -679,7 +698,7 @@
np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
- err = dev_open(ndev);
+ err = dev_open(ndev, NULL);
if (err) {
np_err(np, "failed to open %s\n", ndev->name);
@@ -711,16 +730,22 @@
if (!np->local_ip.ip) {
if (!np->ipv6) {
- in_dev = __in_dev_get_rtnl(ndev);
+ const struct in_ifaddr *ifa;
- if (!in_dev || !in_dev->ifa_list) {
+ in_dev = __in_dev_get_rtnl(ndev);
+ if (!in_dev)
+ goto put_noaddr;
+
+ ifa = rtnl_dereference(in_dev->ifa_list);
+ if (!ifa) {
+put_noaddr:
np_err(np, "no IP address for %s, aborting\n",
np->dev_name);
err = -EDESTADDRREQ;
goto put;
}
- np->local_ip.ip = in_dev->ifa_list->ifa_local;
+ np->local_ip.ip = ifa->ifa_local;
np_info(np, "local IP %pI4\n", &np->local_ip.ip);
} else {
#if IS_ENABLED(CONFIG_IPV6)
@@ -733,7 +758,8 @@
read_lock_bh(&idev->lock);
list_for_each_entry(ifp, &idev->addr_list, if_list) {
- if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
+ if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
+ !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
continue;
np->local_ip.in6 = ifp->addr;
err = 0;
@@ -802,10 +828,6 @@
{
struct netpoll_info *npinfo;
- /* rtnl_dereference would be preferable here but
- * rcu_cleanup_netpoll path can put us in here safely without
- * holding the rtnl, so plain rcu_dereference it is
- */
npinfo = rtnl_dereference(np->dev->npinfo);
if (!npinfo)
return;
@@ -820,27 +842,22 @@
ops->ndo_netpoll_cleanup(np->dev);
RCU_INIT_POINTER(np->dev->npinfo, NULL);
- call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
+ call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
} else
RCU_INIT_POINTER(np->dev->npinfo, NULL);
}
EXPORT_SYMBOL_GPL(__netpoll_cleanup);
-static void netpoll_async_cleanup(struct work_struct *work)
+void __netpoll_free(struct netpoll *np)
{
- struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
+ ASSERT_RTNL();
- rtnl_lock();
+ /* Wait for transmitting packets to finish before freeing. */
+ synchronize_rcu();
__netpoll_cleanup(np);
- rtnl_unlock();
kfree(np);
}
-
-void __netpoll_free_async(struct netpoll *np)
-{
- schedule_work(&np->cleanup_work);
-}
-EXPORT_SYMBOL_GPL(__netpoll_free_async);
+EXPORT_SYMBOL_GPL(__netpoll_free);
void netpoll_cleanup(struct netpoll *np)
{
--
Gitblit v1.6.2