| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Common framework for low-level network console, dump, and debugger code |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 58 | 59 | MAX_UDP_CHUNK) |
|---|
| 59 | 60 | |
|---|
| 60 | 61 | static void zap_completion_queue(void); |
|---|
| 61 | | -static void netpoll_async_cleanup(struct work_struct *work); |
|---|
| 62 | 62 | |
|---|
| 63 | 63 | static unsigned int carrier_timeout = 4; |
|---|
| 64 | 64 | module_param(carrier_timeout, uint, 0644); |
|---|
| .. | .. |
|---|
| 70 | 70 | #define np_notice(np, fmt, ...) \ |
|---|
| 71 | 71 | pr_notice("%s: " fmt, np->name, ##__VA_ARGS__) |
|---|
| 72 | 72 | |
|---|
| 73 | | -static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev, |
|---|
| 74 | | - struct netdev_queue *txq) |
|---|
| 73 | +static netdev_tx_t netpoll_start_xmit(struct sk_buff *skb, |
|---|
| 74 | + struct net_device *dev, |
|---|
| 75 | + struct netdev_queue *txq) |
|---|
| 75 | 76 | { |
|---|
| 76 | | - int status = NETDEV_TX_OK; |
|---|
| 77 | + netdev_tx_t status = NETDEV_TX_OK; |
|---|
| 77 | 78 | netdev_features_t features; |
|---|
| 78 | 79 | |
|---|
| 79 | 80 | features = netif_skb_features(skb); |
|---|
| .. | .. |
|---|
| 136 | 137 | } |
|---|
| 137 | 138 | } |
|---|
| 138 | 139 | |
|---|
| 140 | +static int netif_local_xmit_active(struct net_device *dev) |
|---|
| 141 | +{ |
|---|
| 142 | + int i; |
|---|
| 143 | + |
|---|
| 144 | + for (i = 0; i < dev->num_tx_queues; i++) { |
|---|
| 145 | + struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
|---|
| 146 | + |
|---|
| 147 | + if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id()) |
|---|
| 148 | + return 1; |
|---|
| 149 | + } |
|---|
| 150 | + |
|---|
| 151 | + return 0; |
|---|
| 152 | +} |
|---|
| 153 | + |
|---|
| 139 | 154 | static void poll_one_napi(struct napi_struct *napi) |
|---|
| 140 | 155 | { |
|---|
| 141 | 156 | int work; |
|---|
| .. | .. |
|---|
| 151 | 166 | * indicate that we are clearing the Tx path only. |
|---|
| 152 | 167 | */ |
|---|
| 153 | 168 | work = napi->poll(napi, 0); |
|---|
| 154 | | - WARN_ONCE(work, "%pF exceeded budget in poll\n", napi->poll); |
|---|
| 169 | + WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll); |
|---|
| 155 | 170 | trace_napi_poll(napi, work, 0); |
|---|
| 156 | 171 | |
|---|
| 157 | 172 | clear_bit(NAPI_STATE_NPSVC, &napi->state); |
|---|
| .. | .. |
|---|
| 182 | 197 | if (!ni || down_trylock(&ni->dev_lock)) |
|---|
| 183 | 198 | return; |
|---|
| 184 | 199 | |
|---|
| 185 | | - if (!netif_running(dev)) { |
|---|
| 200 | + /* Some drivers will take the same locks in poll and xmit, |
|---|
| 201 | + * we can't poll if local CPU is already in xmit. |
|---|
| 202 | + */ |
|---|
| 203 | + if (!netif_running(dev) || netif_local_xmit_active(dev)) { |
|---|
| 186 | 204 | up(&ni->dev_lock); |
|---|
| 187 | 205 | return; |
|---|
| 188 | 206 | } |
|---|
| .. | .. |
|---|
| 297 | 315 | { |
|---|
| 298 | 316 | struct napi_struct *napi; |
|---|
| 299 | 317 | |
|---|
| 300 | | - list_for_each_entry(napi, &dev->napi_list, dev_list) { |
|---|
| 318 | + list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) { |
|---|
| 301 | 319 | if (napi->poll_owner == smp_processor_id()) |
|---|
| 302 | 320 | return 1; |
|---|
| 303 | 321 | } |
|---|
| .. | .. |
|---|
| 305 | 323 | } |
|---|
| 306 | 324 | |
|---|
| 307 | 325 | /* call with IRQ disabled */ |
|---|
| 308 | | -void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, |
|---|
| 309 | | - struct net_device *dev) |
|---|
| 326 | +static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) |
|---|
| 310 | 327 | { |
|---|
| 311 | | - int status = NETDEV_TX_BUSY; |
|---|
| 328 | + netdev_tx_t status = NETDEV_TX_BUSY; |
|---|
| 329 | + struct net_device *dev; |
|---|
| 312 | 330 | unsigned long tries; |
|---|
| 313 | 331 | /* It is up to the caller to keep npinfo alive. */ |
|---|
| 314 | 332 | struct netpoll_info *npinfo; |
|---|
| 315 | 333 | |
|---|
| 316 | 334 | lockdep_assert_irqs_disabled(); |
|---|
| 317 | 335 | |
|---|
| 318 | | - npinfo = rcu_dereference_bh(np->dev->npinfo); |
|---|
| 336 | + dev = np->dev; |
|---|
| 337 | + npinfo = rcu_dereference_bh(dev->npinfo); |
|---|
| 338 | + |
|---|
| 319 | 339 | if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { |
|---|
| 320 | 340 | dev_kfree_skb_irq(skb); |
|---|
| 321 | | - return; |
|---|
| 341 | + return NET_XMIT_DROP; |
|---|
| 322 | 342 | } |
|---|
| 323 | 343 | |
|---|
| 324 | 344 | /* don't get messages out of order, and no recursion */ |
|---|
| 325 | 345 | if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { |
|---|
| 326 | 346 | struct netdev_queue *txq; |
|---|
| 327 | 347 | |
|---|
| 328 | | - txq = netdev_pick_tx(dev, skb, NULL); |
|---|
| 348 | + txq = netdev_core_pick_tx(dev, skb, NULL); |
|---|
| 329 | 349 | |
|---|
| 330 | 350 | /* try until next clock tick */ |
|---|
| 331 | 351 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; |
|---|
| .. | .. |
|---|
| 348 | 368 | } |
|---|
| 349 | 369 | |
|---|
| 350 | 370 | WARN_ONCE(!irqs_disabled(), |
|---|
| 351 | | - "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n", |
|---|
| 371 | + "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n", |
|---|
| 352 | 372 | dev->name, dev->netdev_ops->ndo_start_xmit); |
|---|
| 353 | 373 | |
|---|
| 354 | 374 | } |
|---|
| .. | .. |
|---|
| 357 | 377 | skb_queue_tail(&npinfo->txq, skb); |
|---|
| 358 | 378 | schedule_delayed_work(&npinfo->tx_work,0); |
|---|
| 359 | 379 | } |
|---|
| 380 | + return NETDEV_TX_OK; |
|---|
| 360 | 381 | } |
|---|
| 361 | | -EXPORT_SYMBOL(netpoll_send_skb_on_dev); |
|---|
| 382 | + |
|---|
| 383 | +netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) |
|---|
| 384 | +{ |
|---|
| 385 | + unsigned long flags; |
|---|
| 386 | + netdev_tx_t ret; |
|---|
| 387 | + |
|---|
| 388 | + if (unlikely(!np)) { |
|---|
| 389 | + dev_kfree_skb_irq(skb); |
|---|
| 390 | + ret = NET_XMIT_DROP; |
|---|
| 391 | + } else { |
|---|
| 392 | + local_irq_save(flags); |
|---|
| 393 | + ret = __netpoll_send_skb(np, skb); |
|---|
| 394 | + local_irq_restore(flags); |
|---|
| 395 | + } |
|---|
| 396 | + return ret; |
|---|
| 397 | +} |
|---|
| 398 | +EXPORT_SYMBOL(netpoll_send_skb); |
|---|
| 362 | 399 | |
|---|
| 363 | 400 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len) |
|---|
| 364 | 401 | { |
|---|
| .. | .. |
|---|
| 590 | 627 | |
|---|
| 591 | 628 | np->dev = ndev; |
|---|
| 592 | 629 | strlcpy(np->dev_name, ndev->name, IFNAMSIZ); |
|---|
| 593 | | - INIT_WORK(&np->cleanup_work, netpoll_async_cleanup); |
|---|
| 594 | 630 | |
|---|
| 595 | 631 | if (ndev->priv_flags & IFF_DISABLE_NETPOLL) { |
|---|
| 596 | 632 | np_err(np, "%s doesn't support polling, aborting\n", |
|---|
| .. | .. |
|---|
| 660 | 696 | if (!netdev_uses_dsa(dev)) |
|---|
| 661 | 697 | continue; |
|---|
| 662 | 698 | |
|---|
| 663 | | - err = dev_change_flags(dev, dev->flags | IFF_UP); |
|---|
| 699 | + err = dev_change_flags(dev, dev->flags | IFF_UP, NULL); |
|---|
| 664 | 700 | if (err < 0) { |
|---|
| 665 | 701 | np_err(np, "%s failed to open %s\n", |
|---|
| 666 | 702 | np->dev_name, dev->name); |
|---|
| .. | .. |
|---|
| 679 | 715 | |
|---|
| 680 | 716 | np_info(np, "device %s not up yet, forcing it\n", np->dev_name); |
|---|
| 681 | 717 | |
|---|
| 682 | | - err = dev_open(ndev); |
|---|
| 718 | + err = dev_open(ndev, NULL); |
|---|
| 683 | 719 | |
|---|
| 684 | 720 | if (err) { |
|---|
| 685 | 721 | np_err(np, "failed to open %s\n", ndev->name); |
|---|
| .. | .. |
|---|
| 711 | 747 | |
|---|
| 712 | 748 | if (!np->local_ip.ip) { |
|---|
| 713 | 749 | if (!np->ipv6) { |
|---|
| 714 | | - in_dev = __in_dev_get_rtnl(ndev); |
|---|
| 750 | + const struct in_ifaddr *ifa; |
|---|
| 715 | 751 | |
|---|
| 716 | | - if (!in_dev || !in_dev->ifa_list) { |
|---|
| 752 | + in_dev = __in_dev_get_rtnl(ndev); |
|---|
| 753 | + if (!in_dev) |
|---|
| 754 | + goto put_noaddr; |
|---|
| 755 | + |
|---|
| 756 | + ifa = rtnl_dereference(in_dev->ifa_list); |
|---|
| 757 | + if (!ifa) { |
|---|
| 758 | +put_noaddr: |
|---|
| 717 | 759 | np_err(np, "no IP address for %s, aborting\n", |
|---|
| 718 | 760 | np->dev_name); |
|---|
| 719 | 761 | err = -EDESTADDRREQ; |
|---|
| 720 | 762 | goto put; |
|---|
| 721 | 763 | } |
|---|
| 722 | 764 | |
|---|
| 723 | | - np->local_ip.ip = in_dev->ifa_list->ifa_local; |
|---|
| 765 | + np->local_ip.ip = ifa->ifa_local; |
|---|
| 724 | 766 | np_info(np, "local IP %pI4\n", &np->local_ip.ip); |
|---|
| 725 | 767 | } else { |
|---|
| 726 | 768 | #if IS_ENABLED(CONFIG_IPV6) |
|---|
| .. | .. |
|---|
| 733 | 775 | |
|---|
| 734 | 776 | read_lock_bh(&idev->lock); |
|---|
| 735 | 777 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
|---|
| 736 | | - if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) |
|---|
| 778 | + if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) != |
|---|
| 779 | + !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL)) |
|---|
| 737 | 780 | continue; |
|---|
| 738 | 781 | np->local_ip.in6 = ifp->addr; |
|---|
| 739 | 782 | err = 0; |
|---|
| .. | .. |
|---|
| 802 | 845 | { |
|---|
| 803 | 846 | struct netpoll_info *npinfo; |
|---|
| 804 | 847 | |
|---|
| 805 | | - /* rtnl_dereference would be preferable here but |
|---|
| 806 | | - * rcu_cleanup_netpoll path can put us in here safely without |
|---|
| 807 | | - * holding the rtnl, so plain rcu_dereference it is |
|---|
| 808 | | - */ |
|---|
| 809 | 848 | npinfo = rtnl_dereference(np->dev->npinfo); |
|---|
| 810 | 849 | if (!npinfo) |
|---|
| 811 | 850 | return; |
|---|
| .. | .. |
|---|
| 820 | 859 | ops->ndo_netpoll_cleanup(np->dev); |
|---|
| 821 | 860 | |
|---|
| 822 | 861 | RCU_INIT_POINTER(np->dev->npinfo, NULL); |
|---|
| 823 | | - call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info); |
|---|
| 862 | + call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info); |
|---|
| 824 | 863 | } else |
|---|
| 825 | 864 | RCU_INIT_POINTER(np->dev->npinfo, NULL); |
|---|
| 826 | 865 | } |
|---|
| 827 | 866 | EXPORT_SYMBOL_GPL(__netpoll_cleanup); |
|---|
| 828 | 867 | |
|---|
| 829 | | -static void netpoll_async_cleanup(struct work_struct *work) |
|---|
| 868 | +void __netpoll_free(struct netpoll *np) |
|---|
| 830 | 869 | { |
|---|
| 831 | | - struct netpoll *np = container_of(work, struct netpoll, cleanup_work); |
|---|
| 870 | + ASSERT_RTNL(); |
|---|
| 832 | 871 | |
|---|
| 833 | | - rtnl_lock(); |
|---|
| 872 | + /* Wait for transmitting packets to finish before freeing. */ |
|---|
| 873 | + synchronize_rcu(); |
|---|
| 834 | 874 | __netpoll_cleanup(np); |
|---|
| 835 | | - rtnl_unlock(); |
|---|
| 836 | 875 | kfree(np); |
|---|
| 837 | 876 | } |
|---|
| 838 | | - |
|---|
| 839 | | -void __netpoll_free_async(struct netpoll *np) |
|---|
| 840 | | -{ |
|---|
| 841 | | - schedule_work(&np->cleanup_work); |
|---|
| 842 | | -} |
|---|
| 843 | | -EXPORT_SYMBOL_GPL(__netpoll_free_async); |
|---|
| 877 | +EXPORT_SYMBOL_GPL(__netpoll_free); |
|---|
| 844 | 878 | |
|---|
| 845 | 879 | void netpoll_cleanup(struct netpoll *np) |
|---|
| 846 | 880 | { |
|---|