hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/net/core/netpoll.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Common framework for low-level network console, dump, and debugger code
34 *
....@@ -58,7 +59,6 @@
5859 MAX_UDP_CHUNK)
5960
6061 static void zap_completion_queue(void);
61
-static void netpoll_async_cleanup(struct work_struct *work);
6262
6363 static unsigned int carrier_timeout = 4;
6464 module_param(carrier_timeout, uint, 0644);
....@@ -70,10 +70,11 @@
7070 #define np_notice(np, fmt, ...) \
7171 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
7272
73
-static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev,
74
- struct netdev_queue *txq)
73
+static netdev_tx_t netpoll_start_xmit(struct sk_buff *skb,
74
+ struct net_device *dev,
75
+ struct netdev_queue *txq)
7576 {
76
- int status = NETDEV_TX_OK;
77
+ netdev_tx_t status = NETDEV_TX_OK;
7778 netdev_features_t features;
7879
7980 features = netif_skb_features(skb);
....@@ -136,6 +137,20 @@
136137 }
137138 }
138139
140
+static int netif_local_xmit_active(struct net_device *dev)
141
+{
142
+ int i;
143
+
144
+ for (i = 0; i < dev->num_tx_queues; i++) {
145
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
146
+
147
+ if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id())
148
+ return 1;
149
+ }
150
+
151
+ return 0;
152
+}
153
+
139154 static void poll_one_napi(struct napi_struct *napi)
140155 {
141156 int work;
....@@ -151,7 +166,7 @@
151166 * indicate that we are clearing the Tx path only.
152167 */
153168 work = napi->poll(napi, 0);
154
- WARN_ONCE(work, "%pF exceeded budget in poll\n", napi->poll);
169
+ WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll);
155170 trace_napi_poll(napi, work, 0);
156171
157172 clear_bit(NAPI_STATE_NPSVC, &napi->state);
....@@ -182,7 +197,10 @@
182197 if (!ni || down_trylock(&ni->dev_lock))
183198 return;
184199
185
- if (!netif_running(dev)) {
200
+ /* Some drivers will take the same locks in poll and xmit,
201
+ * we can't poll if local CPU is already in xmit.
202
+ */
203
+ if (!netif_running(dev) || netif_local_xmit_active(dev)) {
186204 up(&ni->dev_lock);
187205 return;
188206 }
....@@ -297,7 +315,7 @@
297315 {
298316 struct napi_struct *napi;
299317
300
- list_for_each_entry(napi, &dev->napi_list, dev_list) {
318
+ list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
301319 if (napi->poll_owner == smp_processor_id())
302320 return 1;
303321 }
....@@ -305,27 +323,29 @@
305323 }
306324
307325 /* call with IRQ disabled */
308
-void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
309
- struct net_device *dev)
326
+static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
310327 {
311
- int status = NETDEV_TX_BUSY;
328
+ netdev_tx_t status = NETDEV_TX_BUSY;
329
+ struct net_device *dev;
312330 unsigned long tries;
313331 /* It is up to the caller to keep npinfo alive. */
314332 struct netpoll_info *npinfo;
315333
316334 lockdep_assert_irqs_disabled();
317335
318
- npinfo = rcu_dereference_bh(np->dev->npinfo);
336
+ dev = np->dev;
337
+ npinfo = rcu_dereference_bh(dev->npinfo);
338
+
319339 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
320340 dev_kfree_skb_irq(skb);
321
- return;
341
+ return NET_XMIT_DROP;
322342 }
323343
324344 /* don't get messages out of order, and no recursion */
325345 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
326346 struct netdev_queue *txq;
327347
328
- txq = netdev_pick_tx(dev, skb, NULL);
348
+ txq = netdev_core_pick_tx(dev, skb, NULL);
329349
330350 /* try until next clock tick */
331351 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
....@@ -348,7 +368,7 @@
348368 }
349369
350370 WARN_ONCE(!irqs_disabled(),
351
- "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
371
+ "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n",
352372 dev->name, dev->netdev_ops->ndo_start_xmit);
353373
354374 }
....@@ -357,8 +377,25 @@
357377 skb_queue_tail(&npinfo->txq, skb);
358378 schedule_delayed_work(&npinfo->tx_work,0);
359379 }
380
+ return NETDEV_TX_OK;
360381 }
361
-EXPORT_SYMBOL(netpoll_send_skb_on_dev);
382
+
383
+netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
384
+{
385
+ unsigned long flags;
386
+ netdev_tx_t ret;
387
+
388
+ if (unlikely(!np)) {
389
+ dev_kfree_skb_irq(skb);
390
+ ret = NET_XMIT_DROP;
391
+ } else {
392
+ local_irq_save(flags);
393
+ ret = __netpoll_send_skb(np, skb);
394
+ local_irq_restore(flags);
395
+ }
396
+ return ret;
397
+}
398
+EXPORT_SYMBOL(netpoll_send_skb);
362399
363400 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
364401 {
....@@ -590,7 +627,6 @@
590627
591628 np->dev = ndev;
592629 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
593
- INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
594630
595631 if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
596632 np_err(np, "%s doesn't support polling, aborting\n",
....@@ -660,7 +696,7 @@
660696 if (!netdev_uses_dsa(dev))
661697 continue;
662698
663
- err = dev_change_flags(dev, dev->flags | IFF_UP);
699
+ err = dev_change_flags(dev, dev->flags | IFF_UP, NULL);
664700 if (err < 0) {
665701 np_err(np, "%s failed to open %s\n",
666702 np->dev_name, dev->name);
....@@ -679,7 +715,7 @@
679715
680716 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
681717
682
- err = dev_open(ndev);
718
+ err = dev_open(ndev, NULL);
683719
684720 if (err) {
685721 np_err(np, "failed to open %s\n", ndev->name);
....@@ -711,16 +747,22 @@
711747
712748 if (!np->local_ip.ip) {
713749 if (!np->ipv6) {
714
- in_dev = __in_dev_get_rtnl(ndev);
750
+ const struct in_ifaddr *ifa;
715751
716
- if (!in_dev || !in_dev->ifa_list) {
752
+ in_dev = __in_dev_get_rtnl(ndev);
753
+ if (!in_dev)
754
+ goto put_noaddr;
755
+
756
+ ifa = rtnl_dereference(in_dev->ifa_list);
757
+ if (!ifa) {
758
+put_noaddr:
717759 np_err(np, "no IP address for %s, aborting\n",
718760 np->dev_name);
719761 err = -EDESTADDRREQ;
720762 goto put;
721763 }
722764
723
- np->local_ip.ip = in_dev->ifa_list->ifa_local;
765
+ np->local_ip.ip = ifa->ifa_local;
724766 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
725767 } else {
726768 #if IS_ENABLED(CONFIG_IPV6)
....@@ -733,7 +775,8 @@
733775
734776 read_lock_bh(&idev->lock);
735777 list_for_each_entry(ifp, &idev->addr_list, if_list) {
736
- if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
778
+ if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
779
+ !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
737780 continue;
738781 np->local_ip.in6 = ifp->addr;
739782 err = 0;
....@@ -802,10 +845,6 @@
802845 {
803846 struct netpoll_info *npinfo;
804847
805
- /* rtnl_dereference would be preferable here but
806
- * rcu_cleanup_netpoll path can put us in here safely without
807
- * holding the rtnl, so plain rcu_dereference it is
808
- */
809848 npinfo = rtnl_dereference(np->dev->npinfo);
810849 if (!npinfo)
811850 return;
....@@ -820,27 +859,22 @@
820859 ops->ndo_netpoll_cleanup(np->dev);
821860
822861 RCU_INIT_POINTER(np->dev->npinfo, NULL);
823
- call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
862
+ call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
824863 } else
825864 RCU_INIT_POINTER(np->dev->npinfo, NULL);
826865 }
827866 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
828867
829
-static void netpoll_async_cleanup(struct work_struct *work)
868
+void __netpoll_free(struct netpoll *np)
830869 {
831
- struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
870
+ ASSERT_RTNL();
832871
833
- rtnl_lock();
872
+ /* Wait for transmitting packets to finish before freeing. */
873
+ synchronize_rcu();
834874 __netpoll_cleanup(np);
835
- rtnl_unlock();
836875 kfree(np);
837876 }
838
-
839
-void __netpoll_free_async(struct netpoll *np)
840
-{
841
- schedule_work(&np->cleanup_work);
842
-}
843
-EXPORT_SYMBOL_GPL(__netpoll_free_async);
877
+EXPORT_SYMBOL_GPL(__netpoll_free);
844878
845879 void netpoll_cleanup(struct netpoll *np)
846880 {