hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/net/core/netpoll.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Common framework for low-level network console, dump, and debugger code
34 *
....@@ -58,7 +59,6 @@
5859 MAX_UDP_CHUNK)
5960
6061 static void zap_completion_queue(void);
61
-static void netpoll_async_cleanup(struct work_struct *work);
6262
6363 static unsigned int carrier_timeout = 4;
6464 module_param(carrier_timeout, uint, 0644);
....@@ -70,10 +70,11 @@
7070 #define np_notice(np, fmt, ...) \
7171 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
7272
73
-static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
74
- struct netdev_queue *txq)
73
+static netdev_tx_t netpoll_start_xmit(struct sk_buff *skb,
74
+ struct net_device *dev,
75
+ struct netdev_queue *txq)
7576 {
76
- int status = NETDEV_TX_OK;
77
+ netdev_tx_t status = NETDEV_TX_OK;
7778 netdev_features_t features;
7879
7980 features = netif_skb_features(skb);
....@@ -151,7 +152,7 @@
151152 * indicate that we are clearing the Tx path only.
152153 */
153154 work = napi->poll(napi, 0);
154
- WARN_ONCE(work, "%pF exceeded budget in poll\n", napi->poll);
155
+ WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll);
155156 trace_napi_poll(napi, work, 0);
156157
157158 clear_bit(NAPI_STATE_NPSVC, &napi->state);
....@@ -297,7 +298,7 @@
297298 {
298299 struct napi_struct *napi;
299300
300
- list_for_each_entry(napi, &dev->napi_list, dev_list) {
301
+ list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
301302 if (napi->poll_owner == smp_processor_id())
302303 return 1;
303304 }
....@@ -305,27 +306,29 @@
305306 }
306307
307308 /* call with IRQ disabled */
308
-void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
309
- struct net_device *dev)
309
+static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
310310 {
311
- int status = NETDEV_TX_BUSY;
311
+ netdev_tx_t status = NETDEV_TX_BUSY;
312
+ struct net_device *dev;
312313 unsigned long tries;
313314 /* It is up to the caller to keep npinfo alive. */
314315 struct netpoll_info *npinfo;
315316
316317 lockdep_assert_irqs_disabled();
317318
318
- npinfo = rcu_dereference_bh(np->dev->npinfo);
319
+ dev = np->dev;
320
+ npinfo = rcu_dereference_bh(dev->npinfo);
321
+
319322 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
320323 dev_kfree_skb_irq(skb);
321
- return;
324
+ return NET_XMIT_DROP;
322325 }
323326
324327 /* don't get messages out of order, and no recursion */
325328 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
326329 struct netdev_queue *txq;
327330
328
- txq = netdev_pick_tx(dev, skb, NULL);
331
+ txq = netdev_core_pick_tx(dev, skb, NULL);
329332
330333 /* try until next clock tick */
331334 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
....@@ -348,7 +351,7 @@
348351 }
349352
350353 WARN_ONCE(!irqs_disabled(),
351
- "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
354
+ "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n",
352355 dev->name, dev->netdev_ops->ndo_start_xmit);
353356
354357 }
....@@ -357,8 +360,25 @@
357360 skb_queue_tail(&npinfo->txq, skb);
358361 schedule_delayed_work(&npinfo->tx_work,0);
359362 }
363
+ return NETDEV_TX_OK;
360364 }
361
-EXPORT_SYMBOL(netpoll_send_skb_on_dev);
365
+
366
+netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
367
+{
368
+ unsigned long flags;
369
+ netdev_tx_t ret;
370
+
371
+ if (unlikely(!np)) {
372
+ dev_kfree_skb_irq(skb);
373
+ ret = NET_XMIT_DROP;
374
+ } else {
375
+ local_irq_save(flags);
376
+ ret = __netpoll_send_skb(np, skb);
377
+ local_irq_restore(flags);
378
+ }
379
+ return ret;
380
+}
381
+EXPORT_SYMBOL(netpoll_send_skb);
362382
363383 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
364384 {
....@@ -590,7 +610,6 @@
590610
591611 np->dev = ndev;
592612 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
593
- INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
594613
595614 if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
596615 np_err(np, "%s doesn't support polling, aborting\n",
....@@ -660,7 +679,7 @@
660679 if (!netdev_uses_dsa(dev))
661680 continue;
662681
663
- err = dev_change_flags(dev, dev->flags | IFF_UP);
682
+ err = dev_change_flags(dev, dev->flags | IFF_UP, NULL);
664683 if (err < 0) {
665684 np_err(np, "%s failed to open %s\n",
666685 np->dev_name, dev->name);
....@@ -679,7 +698,7 @@
679698
680699 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
681700
682
- err = dev_open(ndev);
701
+ err = dev_open(ndev, NULL);
683702
684703 if (err) {
685704 np_err(np, "failed to open %s\n", ndev->name);
....@@ -711,16 +730,22 @@
711730
712731 if (!np->local_ip.ip) {
713732 if (!np->ipv6) {
714
- in_dev = __in_dev_get_rtnl(ndev);
733
+ const struct in_ifaddr *ifa;
715734
716
- if (!in_dev || !in_dev->ifa_list) {
735
+ in_dev = __in_dev_get_rtnl(ndev);
736
+ if (!in_dev)
737
+ goto put_noaddr;
738
+
739
+ ifa = rtnl_dereference(in_dev->ifa_list);
740
+ if (!ifa) {
741
+put_noaddr:
717742 np_err(np, "no IP address for %s, aborting\n",
718743 np->dev_name);
719744 err = -EDESTADDRREQ;
720745 goto put;
721746 }
722747
723
- np->local_ip.ip = in_dev->ifa_list->ifa_local;
748
+ np->local_ip.ip = ifa->ifa_local;
724749 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
725750 } else {
726751 #if IS_ENABLED(CONFIG_IPV6)
....@@ -733,7 +758,8 @@
733758
734759 read_lock_bh(&idev->lock);
735760 list_for_each_entry(ifp, &idev->addr_list, if_list) {
736
- if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
761
+ if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
762
+ !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
737763 continue;
738764 np->local_ip.in6 = ifp->addr;
739765 err = 0;
....@@ -802,10 +828,6 @@
802828 {
803829 struct netpoll_info *npinfo;
804830
805
- /* rtnl_dereference would be preferable here but
806
- * rcu_cleanup_netpoll path can put us in here safely without
807
- * holding the rtnl, so plain rcu_dereference it is
808
- */
809831 npinfo = rtnl_dereference(np->dev->npinfo);
810832 if (!npinfo)
811833 return;
....@@ -820,27 +842,22 @@
820842 ops->ndo_netpoll_cleanup(np->dev);
821843
822844 RCU_INIT_POINTER(np->dev->npinfo, NULL);
823
- call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
845
+ call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
824846 } else
825847 RCU_INIT_POINTER(np->dev->npinfo, NULL);
826848 }
827849 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
828850
829
-static void netpoll_async_cleanup(struct work_struct *work)
851
+void __netpoll_free(struct netpoll *np)
830852 {
831
- struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
853
+ ASSERT_RTNL();
832854
833
- rtnl_lock();
855
+ /* Wait for transmitting packets to finish before freeing. */
856
+ synchronize_rcu();
834857 __netpoll_cleanup(np);
835
- rtnl_unlock();
836858 kfree(np);
837859 }
838
-
839
-void __netpoll_free_async(struct netpoll *np)
840
-{
841
- schedule_work(&np->cleanup_work);
842
-}
843
-EXPORT_SYMBOL_GPL(__netpoll_free_async);
860
+EXPORT_SYMBOL_GPL(__netpoll_free);
844861
845862 void netpoll_cleanup(struct netpoll *np)
846863 {