.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Common framework for low-level network console, dump, and debugger code |
---|
3 | 4 | * |
---|
.. | .. |
---|
58 | 59 | MAX_UDP_CHUNK) |
---|
59 | 60 | |
---|
60 | 61 | static void zap_completion_queue(void); |
---|
61 | | -static void netpoll_async_cleanup(struct work_struct *work); |
---|
62 | 62 | |
---|
63 | 63 | static unsigned int carrier_timeout = 4; |
---|
64 | 64 | module_param(carrier_timeout, uint, 0644); |
---|
.. | .. |
---|
70 | 70 | #define np_notice(np, fmt, ...) \ |
---|
71 | 71 | pr_notice("%s: " fmt, np->name, ##__VA_ARGS__) |
---|
72 | 72 | |
---|
73 | | -static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev, |
---|
74 | | - struct netdev_queue *txq) |
---|
| 73 | +static netdev_tx_t netpoll_start_xmit(struct sk_buff *skb, |
---|
| 74 | + struct net_device *dev, |
---|
| 75 | + struct netdev_queue *txq) |
---|
75 | 76 | { |
---|
76 | | - int status = NETDEV_TX_OK; |
---|
| 77 | + netdev_tx_t status = NETDEV_TX_OK; |
---|
77 | 78 | netdev_features_t features; |
---|
78 | 79 | |
---|
79 | 80 | features = netif_skb_features(skb); |
---|
.. | .. |
---|
151 | 152 | * indicate that we are clearing the Tx path only. |
---|
152 | 153 | */ |
---|
153 | 154 | work = napi->poll(napi, 0); |
---|
154 | | - WARN_ONCE(work, "%pF exceeded budget in poll\n", napi->poll); |
---|
| 155 | + WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll); |
---|
155 | 156 | trace_napi_poll(napi, work, 0); |
---|
156 | 157 | |
---|
157 | 158 | clear_bit(NAPI_STATE_NPSVC, &napi->state); |
---|
.. | .. |
---|
297 | 298 | { |
---|
298 | 299 | struct napi_struct *napi; |
---|
299 | 300 | |
---|
300 | | - list_for_each_entry(napi, &dev->napi_list, dev_list) { |
---|
| 301 | + list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) { |
---|
301 | 302 | if (napi->poll_owner == smp_processor_id()) |
---|
302 | 303 | return 1; |
---|
303 | 304 | } |
---|
.. | .. |
---|
305 | 306 | } |
---|
306 | 307 | |
---|
307 | 308 | /* call with IRQ disabled */ |
---|
308 | | -void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, |
---|
309 | | - struct net_device *dev) |
---|
| 309 | +static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) |
---|
310 | 310 | { |
---|
311 | | - int status = NETDEV_TX_BUSY; |
---|
| 311 | + netdev_tx_t status = NETDEV_TX_BUSY; |
---|
| 312 | + struct net_device *dev; |
---|
312 | 313 | unsigned long tries; |
---|
313 | 314 | /* It is up to the caller to keep npinfo alive. */ |
---|
314 | 315 | struct netpoll_info *npinfo; |
---|
315 | 316 | |
---|
316 | 317 | lockdep_assert_irqs_disabled(); |
---|
317 | 318 | |
---|
318 | | - npinfo = rcu_dereference_bh(np->dev->npinfo); |
---|
| 319 | + dev = np->dev; |
---|
| 320 | + npinfo = rcu_dereference_bh(dev->npinfo); |
---|
| 321 | + |
---|
319 | 322 | if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { |
---|
320 | 323 | dev_kfree_skb_irq(skb); |
---|
321 | | - return; |
---|
| 324 | + return NET_XMIT_DROP; |
---|
322 | 325 | } |
---|
323 | 326 | |
---|
324 | 327 | /* don't get messages out of order, and no recursion */ |
---|
325 | 328 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { |
---|
326 | 329 | struct netdev_queue *txq; |
---|
327 | 330 | |
---|
328 | | - txq = netdev_pick_tx(dev, skb, NULL); |
---|
| 331 | + txq = netdev_core_pick_tx(dev, skb, NULL); |
---|
329 | 332 | |
---|
330 | 333 | /* try until next clock tick */ |
---|
331 | 334 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; |
---|
.. | .. |
---|
348 | 351 | } |
---|
349 | 352 | |
---|
350 | 353 | WARN_ONCE(!irqs_disabled(), |
---|
351 | | - "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n", |
---|
| 354 | + "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n", |
---|
352 | 355 | dev->name, dev->netdev_ops->ndo_start_xmit); |
---|
353 | 356 | |
---|
354 | 357 | } |
---|
.. | .. |
---|
357 | 360 | skb_queue_tail(&npinfo->txq, skb); |
---|
358 | 361 | schedule_delayed_work(&npinfo->tx_work,0); |
---|
359 | 362 | } |
---|
| 363 | + return NETDEV_TX_OK; |
---|
360 | 364 | } |
---|
361 | | -EXPORT_SYMBOL(netpoll_send_skb_on_dev); |
---|
| 365 | + |
---|
| 366 | +netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) |
---|
| 367 | +{ |
---|
| 368 | + unsigned long flags; |
---|
| 369 | + netdev_tx_t ret; |
---|
| 370 | + |
---|
| 371 | + if (unlikely(!np)) { |
---|
| 372 | + dev_kfree_skb_irq(skb); |
---|
| 373 | + ret = NET_XMIT_DROP; |
---|
| 374 | + } else { |
---|
| 375 | + local_irq_save(flags); |
---|
| 376 | + ret = __netpoll_send_skb(np, skb); |
---|
| 377 | + local_irq_restore(flags); |
---|
| 378 | + } |
---|
| 379 | + return ret; |
---|
| 380 | +} |
---|
| 381 | +EXPORT_SYMBOL(netpoll_send_skb); |
---|
362 | 382 | |
---|
363 | 383 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len) |
---|
364 | 384 | { |
---|
.. | .. |
---|
590 | 610 | |
---|
591 | 611 | np->dev = ndev; |
---|
592 | 612 | strlcpy(np->dev_name, ndev->name, IFNAMSIZ); |
---|
593 | | - INIT_WORK(&np->cleanup_work, netpoll_async_cleanup); |
---|
594 | 613 | |
---|
595 | 614 | if (ndev->priv_flags & IFF_DISABLE_NETPOLL) { |
---|
596 | 615 | np_err(np, "%s doesn't support polling, aborting\n", |
---|
.. | .. |
---|
660 | 679 | if (!netdev_uses_dsa(dev)) |
---|
661 | 680 | continue; |
---|
662 | 681 | |
---|
663 | | - err = dev_change_flags(dev, dev->flags | IFF_UP); |
---|
| 682 | + err = dev_change_flags(dev, dev->flags | IFF_UP, NULL); |
---|
664 | 683 | if (err < 0) { |
---|
665 | 684 | np_err(np, "%s failed to open %s\n", |
---|
666 | 685 | np->dev_name, dev->name); |
---|
.. | .. |
---|
679 | 698 | |
---|
680 | 699 | np_info(np, "device %s not up yet, forcing it\n", np->dev_name); |
---|
681 | 700 | |
---|
682 | | - err = dev_open(ndev); |
---|
| 701 | + err = dev_open(ndev, NULL); |
---|
683 | 702 | |
---|
684 | 703 | if (err) { |
---|
685 | 704 | np_err(np, "failed to open %s\n", ndev->name); |
---|
.. | .. |
---|
711 | 730 | |
---|
712 | 731 | if (!np->local_ip.ip) { |
---|
713 | 732 | if (!np->ipv6) { |
---|
714 | | - in_dev = __in_dev_get_rtnl(ndev); |
---|
| 733 | + const struct in_ifaddr *ifa; |
---|
715 | 734 | |
---|
716 | | - if (!in_dev || !in_dev->ifa_list) { |
---|
| 735 | + in_dev = __in_dev_get_rtnl(ndev); |
---|
| 736 | + if (!in_dev) |
---|
| 737 | + goto put_noaddr; |
---|
| 738 | + |
---|
| 739 | + ifa = rtnl_dereference(in_dev->ifa_list); |
---|
| 740 | + if (!ifa) { |
---|
| 741 | +put_noaddr: |
---|
717 | 742 | np_err(np, "no IP address for %s, aborting\n", |
---|
718 | 743 | np->dev_name); |
---|
719 | 744 | err = -EDESTADDRREQ; |
---|
720 | 745 | goto put; |
---|
721 | 746 | } |
---|
722 | 747 | |
---|
723 | | - np->local_ip.ip = in_dev->ifa_list->ifa_local; |
---|
| 748 | + np->local_ip.ip = ifa->ifa_local; |
---|
724 | 749 | np_info(np, "local IP %pI4\n", &np->local_ip.ip); |
---|
725 | 750 | } else { |
---|
726 | 751 | #if IS_ENABLED(CONFIG_IPV6) |
---|
.. | .. |
---|
733 | 758 | |
---|
734 | 759 | read_lock_bh(&idev->lock); |
---|
735 | 760 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
---|
736 | | - if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) |
---|
| 761 | + if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) != |
---|
| 762 | + !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL)) |
---|
737 | 763 | continue; |
---|
738 | 764 | np->local_ip.in6 = ifp->addr; |
---|
739 | 765 | err = 0; |
---|
.. | .. |
---|
802 | 828 | { |
---|
803 | 829 | struct netpoll_info *npinfo; |
---|
804 | 830 | |
---|
805 | | - /* rtnl_dereference would be preferable here but |
---|
806 | | - * rcu_cleanup_netpoll path can put us in here safely without |
---|
807 | | - * holding the rtnl, so plain rcu_dereference it is |
---|
808 | | - */ |
---|
809 | 831 | npinfo = rtnl_dereference(np->dev->npinfo); |
---|
810 | 832 | if (!npinfo) |
---|
811 | 833 | return; |
---|
.. | .. |
---|
820 | 842 | ops->ndo_netpoll_cleanup(np->dev); |
---|
821 | 843 | |
---|
822 | 844 | RCU_INIT_POINTER(np->dev->npinfo, NULL); |
---|
823 | | - call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info); |
---|
| 845 | + call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info); |
---|
824 | 846 | } else |
---|
825 | 847 | RCU_INIT_POINTER(np->dev->npinfo, NULL); |
---|
826 | 848 | } |
---|
827 | 849 | EXPORT_SYMBOL_GPL(__netpoll_cleanup); |
---|
828 | 850 | |
---|
829 | | -static void netpoll_async_cleanup(struct work_struct *work) |
---|
| 851 | +void __netpoll_free(struct netpoll *np) |
---|
830 | 852 | { |
---|
831 | | - struct netpoll *np = container_of(work, struct netpoll, cleanup_work); |
---|
| 853 | + ASSERT_RTNL(); |
---|
832 | 854 | |
---|
833 | | - rtnl_lock(); |
---|
| 855 | + /* Wait for transmitting packets to finish before freeing. */ |
---|
| 856 | + synchronize_rcu(); |
---|
834 | 857 | __netpoll_cleanup(np); |
---|
835 | | - rtnl_unlock(); |
---|
836 | 858 | kfree(np); |
---|
837 | 859 | } |
---|
838 | | - |
---|
839 | | -void __netpoll_free_async(struct netpoll *np) |
---|
840 | | -{ |
---|
841 | | - schedule_work(&np->cleanup_work); |
---|
842 | | -} |
---|
843 | | -EXPORT_SYMBOL_GPL(__netpoll_free_async); |
---|
| 860 | +EXPORT_SYMBOL_GPL(__netpoll_free); |
---|
844 | 861 | |
---|
845 | 862 | void netpoll_cleanup(struct netpoll *np) |
---|
846 | 863 | { |
---|