| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * net/sched/sch_generic.c Generic packet scheduler routines. |
|---|
| 3 | | - * |
|---|
| 4 | | - * This program is free software; you can redistribute it and/or |
|---|
| 5 | | - * modify it under the terms of the GNU General Public License |
|---|
| 6 | | - * as published by the Free Software Foundation; either version |
|---|
| 7 | | - * 2 of the License, or (at your option) any later version. |
|---|
| 8 | 4 | * |
|---|
| 9 | 5 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
|---|
| 10 | 6 | * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 |
|---|
| .. | .. |
|---|
| 32 | 28 | #include <net/pkt_sched.h> |
|---|
| 33 | 29 | #include <net/dst.h> |
|---|
| 34 | 30 | #include <trace/events/qdisc.h> |
|---|
| 31 | +#include <trace/events/net.h> |
|---|
| 35 | 32 | #include <net/xfrm.h> |
|---|
| 36 | 33 | |
|---|
| 37 | 34 | /* Qdisc to use by default */ |
|---|
| 38 | 35 | const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops; |
|---|
| 39 | 36 | EXPORT_SYMBOL(default_qdisc_ops); |
|---|
| 37 | + |
|---|
| 38 | +static void qdisc_maybe_clear_missed(struct Qdisc *q, |
|---|
| 39 | + const struct netdev_queue *txq) |
|---|
| 40 | +{ |
|---|
| 41 | + clear_bit(__QDISC_STATE_MISSED, &q->state); |
|---|
| 42 | + |
|---|
| 43 | + /* Make sure the below netif_xmit_frozen_or_stopped() |
|---|
| 44 | + * checking happens after clearing STATE_MISSED. |
|---|
| 45 | + */ |
|---|
| 46 | + smp_mb__after_atomic(); |
|---|
| 47 | + |
|---|
| 48 | + /* Checking netif_xmit_frozen_or_stopped() again to |
|---|
| 49 | + * make sure STATE_MISSED is set if the STATE_MISSED |
|---|
| 50 | + * set by netif_tx_wake_queue()'s rescheduling of |
|---|
| 51 | + * net_tx_action() is cleared by the above clear_bit(). |
|---|
| 52 | + */ |
|---|
| 53 | + if (!netif_xmit_frozen_or_stopped(txq)) |
|---|
| 54 | + set_bit(__QDISC_STATE_MISSED, &q->state); |
|---|
| 55 | +} |
|---|
| 40 | 56 | |
|---|
| 41 | 57 | /* Main transmission queue. */ |
|---|
| 42 | 58 | |
|---|
| .. | .. |
|---|
| 70 | 86 | skb = __skb_dequeue(&q->skb_bad_txq); |
|---|
| 71 | 87 | if (qdisc_is_percpu_stats(q)) { |
|---|
| 72 | 88 | qdisc_qstats_cpu_backlog_dec(q, skb); |
|---|
| 73 | | - qdisc_qstats_atomic_qlen_dec(q); |
|---|
| 89 | + qdisc_qstats_cpu_qlen_dec(q); |
|---|
| 74 | 90 | } else { |
|---|
| 75 | 91 | qdisc_qstats_backlog_dec(q, skb); |
|---|
| 76 | 92 | q->q.qlen--; |
|---|
| 77 | 93 | } |
|---|
| 78 | 94 | } else { |
|---|
| 79 | 95 | skb = SKB_XOFF_MAGIC; |
|---|
| 96 | + qdisc_maybe_clear_missed(q, txq); |
|---|
| 80 | 97 | } |
|---|
| 81 | 98 | } |
|---|
| 82 | 99 | |
|---|
| .. | .. |
|---|
| 110 | 127 | |
|---|
| 111 | 128 | if (qdisc_is_percpu_stats(q)) { |
|---|
| 112 | 129 | qdisc_qstats_cpu_backlog_inc(q, skb); |
|---|
| 113 | | - qdisc_qstats_atomic_qlen_inc(q); |
|---|
| 130 | + qdisc_qstats_cpu_qlen_inc(q); |
|---|
| 114 | 131 | } else { |
|---|
| 115 | 132 | qdisc_qstats_backlog_inc(q, skb); |
|---|
| 116 | 133 | q->q.qlen++; |
|---|
| .. | .. |
|---|
| 120 | 137 | spin_unlock(lock); |
|---|
| 121 | 138 | } |
|---|
| 122 | 139 | |
|---|
| 123 | | -static inline int __dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) |
|---|
| 140 | +static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) |
|---|
| 124 | 141 | { |
|---|
| 125 | | - while (skb) { |
|---|
| 126 | | - struct sk_buff *next = skb->next; |
|---|
| 142 | + spinlock_t *lock = NULL; |
|---|
| 127 | 143 | |
|---|
| 128 | | - __skb_queue_tail(&q->gso_skb, skb); |
|---|
| 129 | | - q->qstats.requeues++; |
|---|
| 130 | | - qdisc_qstats_backlog_inc(q, skb); |
|---|
| 131 | | - q->q.qlen++; /* it's still part of the queue */ |
|---|
| 132 | | - |
|---|
| 133 | | - skb = next; |
|---|
| 144 | + if (q->flags & TCQ_F_NOLOCK) { |
|---|
| 145 | + lock = qdisc_lock(q); |
|---|
| 146 | + spin_lock(lock); |
|---|
| 134 | 147 | } |
|---|
| 135 | | - __netif_schedule(q); |
|---|
| 136 | 148 | |
|---|
| 137 | | - return 0; |
|---|
| 138 | | -} |
|---|
| 139 | | - |
|---|
| 140 | | -static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q) |
|---|
| 141 | | -{ |
|---|
| 142 | | - spinlock_t *lock = qdisc_lock(q); |
|---|
| 143 | | - |
|---|
| 144 | | - spin_lock(lock); |
|---|
| 145 | 149 | while (skb) { |
|---|
| 146 | 150 | struct sk_buff *next = skb->next; |
|---|
| 147 | 151 | |
|---|
| 148 | 152 | __skb_queue_tail(&q->gso_skb, skb); |
|---|
| 149 | 153 | |
|---|
| 150 | | - qdisc_qstats_cpu_requeues_inc(q); |
|---|
| 151 | | - qdisc_qstats_cpu_backlog_inc(q, skb); |
|---|
| 152 | | - qdisc_qstats_atomic_qlen_inc(q); |
|---|
| 154 | + /* it's still part of the queue */ |
|---|
| 155 | + if (qdisc_is_percpu_stats(q)) { |
|---|
| 156 | + qdisc_qstats_cpu_requeues_inc(q); |
|---|
| 157 | + qdisc_qstats_cpu_backlog_inc(q, skb); |
|---|
| 158 | + qdisc_qstats_cpu_qlen_inc(q); |
|---|
| 159 | + } else { |
|---|
| 160 | + q->qstats.requeues++; |
|---|
| 161 | + qdisc_qstats_backlog_inc(q, skb); |
|---|
| 162 | + q->q.qlen++; |
|---|
| 163 | + } |
|---|
| 153 | 164 | |
|---|
| 154 | 165 | skb = next; |
|---|
| 155 | 166 | } |
|---|
| 156 | | - spin_unlock(lock); |
|---|
| 157 | | - |
|---|
| 167 | + if (lock) |
|---|
| 168 | + spin_unlock(lock); |
|---|
| 158 | 169 | __netif_schedule(q); |
|---|
| 159 | | - |
|---|
| 160 | | - return 0; |
|---|
| 161 | | -} |
|---|
| 162 | | - |
|---|
| 163 | | -static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) |
|---|
| 164 | | -{ |
|---|
| 165 | | - if (q->flags & TCQ_F_NOLOCK) |
|---|
| 166 | | - return dev_requeue_skb_locked(skb, q); |
|---|
| 167 | | - else |
|---|
| 168 | | - return __dev_requeue_skb(skb, q); |
|---|
| 169 | 170 | } |
|---|
| 170 | 171 | |
|---|
| 171 | 172 | static void try_bulk_dequeue_skb(struct Qdisc *q, |
|---|
| .. | .. |
|---|
| 186 | 187 | skb = nskb; |
|---|
| 187 | 188 | (*packets)++; /* GSO counts as one pkt */ |
|---|
| 188 | 189 | } |
|---|
| 189 | | - skb->next = NULL; |
|---|
| 190 | + skb_mark_not_on_list(skb); |
|---|
| 190 | 191 | } |
|---|
| 191 | 192 | |
|---|
| 192 | 193 | /* This variant of try_bulk_dequeue_skb() makes sure |
|---|
| .. | .. |
|---|
| 212 | 213 | skb = nskb; |
|---|
| 213 | 214 | } while (++cnt < 8); |
|---|
| 214 | 215 | (*packets) += cnt; |
|---|
| 215 | | - skb->next = NULL; |
|---|
| 216 | + skb_mark_not_on_list(skb); |
|---|
| 216 | 217 | } |
|---|
| 217 | 218 | |
|---|
| 218 | 219 | /* Note that dequeue_skb can possibly return a SKB list (via skb->next). |
|---|
| .. | .. |
|---|
| 254 | 255 | skb = __skb_dequeue(&q->gso_skb); |
|---|
| 255 | 256 | if (qdisc_is_percpu_stats(q)) { |
|---|
| 256 | 257 | qdisc_qstats_cpu_backlog_dec(q, skb); |
|---|
| 257 | | - qdisc_qstats_atomic_qlen_dec(q); |
|---|
| 258 | + qdisc_qstats_cpu_qlen_dec(q); |
|---|
| 258 | 259 | } else { |
|---|
| 259 | 260 | qdisc_qstats_backlog_dec(q, skb); |
|---|
| 260 | 261 | q->q.qlen--; |
|---|
| 261 | 262 | } |
|---|
| 262 | 263 | } else { |
|---|
| 263 | 264 | skb = NULL; |
|---|
| 265 | + qdisc_maybe_clear_missed(q, txq); |
|---|
| 264 | 266 | } |
|---|
| 265 | 267 | if (lock) |
|---|
| 266 | 268 | spin_unlock(lock); |
|---|
| .. | .. |
|---|
| 270 | 272 | *validate = true; |
|---|
| 271 | 273 | |
|---|
| 272 | 274 | if ((q->flags & TCQ_F_ONETXQUEUE) && |
|---|
| 273 | | - netif_xmit_frozen_or_stopped(txq)) |
|---|
| 275 | + netif_xmit_frozen_or_stopped(txq)) { |
|---|
| 276 | + qdisc_maybe_clear_missed(q, txq); |
|---|
| 274 | 277 | return skb; |
|---|
| 278 | + } |
|---|
| 275 | 279 | |
|---|
| 276 | 280 | skb = qdisc_dequeue_skb_bad_txq(q); |
|---|
| 277 | 281 | if (unlikely(skb)) { |
|---|
| .. | .. |
|---|
| 330 | 334 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
|---|
| 331 | 335 | if (!netif_xmit_frozen_or_stopped(txq)) |
|---|
| 332 | 336 | skb = dev_hard_start_xmit(skb, dev, txq, &ret); |
|---|
| 337 | + else |
|---|
| 338 | + qdisc_maybe_clear_missed(q, txq); |
|---|
| 333 | 339 | |
|---|
| 334 | 340 | HARD_TX_UNLOCK(dev, txq); |
|---|
| 335 | 341 | } else { |
|---|
| .. | .. |
|---|
| 397 | 403 | |
|---|
| 398 | 404 | void __qdisc_run(struct Qdisc *q) |
|---|
| 399 | 405 | { |
|---|
| 400 | | - int quota = dev_tx_weight; |
|---|
| 406 | + int quota = READ_ONCE(dev_tx_weight); |
|---|
| 401 | 407 | int packets; |
|---|
| 402 | 408 | |
|---|
| 403 | 409 | while (qdisc_restart(q, &packets)) { |
|---|
| 404 | | - /* |
|---|
| 405 | | - * Ordered by possible occurrence: Postpone processing if |
|---|
| 406 | | - * 1. we've exceeded packet quota |
|---|
| 407 | | - * 2. another process needs the CPU; |
|---|
| 408 | | - */ |
|---|
| 409 | 410 | quota -= packets; |
|---|
| 410 | | - if (quota <= 0 || need_resched()) { |
|---|
| 411 | + if (quota <= 0) { |
|---|
| 411 | 412 | __netif_schedule(q); |
|---|
| 412 | 413 | break; |
|---|
| 413 | 414 | } |
|---|
| .. | .. |
|---|
| 462 | 463 | } |
|---|
| 463 | 464 | |
|---|
| 464 | 465 | if (some_queue_timedout) { |
|---|
| 466 | + trace_net_dev_xmit_timeout(dev, i); |
|---|
| 465 | 467 | WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", |
|---|
| 466 | 468 | dev->name, netdev_drivername(dev), i); |
|---|
| 467 | | - dev->netdev_ops->ndo_tx_timeout(dev); |
|---|
| 469 | + dev->netdev_ops->ndo_tx_timeout(dev, i); |
|---|
| 468 | 470 | } |
|---|
| 469 | 471 | if (!mod_timer(&dev->watchdog_timer, |
|---|
| 470 | 472 | round_jiffies(jiffies + |
|---|
| .. | .. |
|---|
| 506 | 508 | * netif_carrier_on - set carrier |
|---|
| 507 | 509 | * @dev: network device |
|---|
| 508 | 510 | * |
|---|
| 509 | | - * Device has detected that carrier. |
|---|
| 511 | + * Device has detected acquisition of carrier. |
|---|
| 510 | 512 | */ |
|---|
| 511 | 513 | void netif_carrier_on(struct net_device *dev) |
|---|
| 512 | 514 | { |
|---|
| .. | .. |
|---|
| 565 | 567 | }; |
|---|
| 566 | 568 | |
|---|
| 567 | 569 | static struct netdev_queue noop_netdev_queue = { |
|---|
| 568 | | - .qdisc = &noop_qdisc, |
|---|
| 570 | + RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc), |
|---|
| 569 | 571 | .qdisc_sleeping = &noop_qdisc, |
|---|
| 570 | 572 | }; |
|---|
| 571 | 573 | |
|---|
| .. | .. |
|---|
| 576 | 578 | .ops = &noop_qdisc_ops, |
|---|
| 577 | 579 | .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), |
|---|
| 578 | 580 | .dev_queue = &noop_netdev_queue, |
|---|
| 579 | | -#ifdef CONFIG_PREEMPT_RT_BASE |
|---|
| 581 | +#ifdef CONFIG_PREEMPT_RT |
|---|
| 580 | 582 | .running = __SEQLOCK_UNLOCKED(noop_qdisc.running), |
|---|
| 581 | 583 | #else |
|---|
| 582 | 584 | .running = SEQCNT_ZERO(noop_qdisc.running), |
|---|
| .. | .. |
|---|
| 652 | 654 | |
|---|
| 653 | 655 | err = skb_array_produce(q, skb); |
|---|
| 654 | 656 | |
|---|
| 655 | | - if (unlikely(err)) |
|---|
| 656 | | - return qdisc_drop_cpu(skb, qdisc, to_free); |
|---|
| 657 | + if (unlikely(err)) { |
|---|
| 658 | + if (qdisc_is_percpu_stats(qdisc)) |
|---|
| 659 | + return qdisc_drop_cpu(skb, qdisc, to_free); |
|---|
| 660 | + else |
|---|
| 661 | + return qdisc_drop(skb, qdisc, to_free); |
|---|
| 662 | + } |
|---|
| 657 | 663 | |
|---|
| 658 | | - qdisc_qstats_atomic_qlen_inc(qdisc); |
|---|
| 659 | | - /* Note: skb can not be used after skb_array_produce(), |
|---|
| 660 | | - * so we better not use qdisc_qstats_cpu_backlog_inc() |
|---|
| 661 | | - */ |
|---|
| 662 | | - this_cpu_add(qdisc->cpu_qstats->backlog, pkt_len); |
|---|
| 664 | + qdisc_update_stats_at_enqueue(qdisc, pkt_len); |
|---|
| 663 | 665 | return NET_XMIT_SUCCESS; |
|---|
| 664 | 666 | } |
|---|
| 665 | 667 | |
|---|
| .. | .. |
|---|
| 667 | 669 | { |
|---|
| 668 | 670 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
|---|
| 669 | 671 | struct sk_buff *skb = NULL; |
|---|
| 672 | + bool need_retry = true; |
|---|
| 670 | 673 | int band; |
|---|
| 671 | 674 | |
|---|
| 675 | +retry: |
|---|
| 672 | 676 | for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) { |
|---|
| 673 | 677 | struct skb_array *q = band2list(priv, band); |
|---|
| 674 | 678 | |
|---|
| .. | .. |
|---|
| 678 | 682 | skb = __skb_array_consume(q); |
|---|
| 679 | 683 | } |
|---|
| 680 | 684 | if (likely(skb)) { |
|---|
| 681 | | - qdisc_qstats_cpu_backlog_dec(qdisc, skb); |
|---|
| 682 | | - qdisc_bstats_cpu_update(qdisc, skb); |
|---|
| 683 | | - qdisc_qstats_atomic_qlen_dec(qdisc); |
|---|
| 685 | + qdisc_update_stats_at_dequeue(qdisc, skb); |
|---|
| 686 | + } else if (need_retry && |
|---|
| 687 | + test_bit(__QDISC_STATE_MISSED, &qdisc->state)) { |
|---|
| 688 | + /* Delay clearing the STATE_MISSED here to reduce |
|---|
| 689 | + * the overhead of the second spin_trylock() in |
|---|
| 690 | + * qdisc_run_begin() and __netif_schedule() calling |
|---|
| 691 | + * in qdisc_run_end(). |
|---|
| 692 | + */ |
|---|
| 693 | + clear_bit(__QDISC_STATE_MISSED, &qdisc->state); |
|---|
| 694 | + |
|---|
| 695 | + /* Make sure dequeuing happens after clearing |
|---|
| 696 | + * STATE_MISSED. |
|---|
| 697 | + */ |
|---|
| 698 | + smp_mb__after_atomic(); |
|---|
| 699 | + |
|---|
| 700 | + need_retry = false; |
|---|
| 701 | + |
|---|
| 702 | + goto retry; |
|---|
| 703 | + } else { |
|---|
| 704 | + WRITE_ONCE(qdisc->empty, true); |
|---|
| 684 | 705 | } |
|---|
| 685 | 706 | |
|---|
| 686 | 707 | return skb; |
|---|
| .. | .. |
|---|
| 720 | 741 | kfree_skb(skb); |
|---|
| 721 | 742 | } |
|---|
| 722 | 743 | |
|---|
| 723 | | - for_each_possible_cpu(i) { |
|---|
| 724 | | - struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i); |
|---|
| 744 | + if (qdisc_is_percpu_stats(qdisc)) { |
|---|
| 745 | + for_each_possible_cpu(i) { |
|---|
| 746 | + struct gnet_stats_queue *q; |
|---|
| 725 | 747 | |
|---|
| 726 | | - q->backlog = 0; |
|---|
| 748 | + q = per_cpu_ptr(qdisc->cpu_qstats, i); |
|---|
| 749 | + q->backlog = 0; |
|---|
| 750 | + q->qlen = 0; |
|---|
| 751 | + } |
|---|
| 727 | 752 | } |
|---|
| 728 | 753 | } |
|---|
| 729 | 754 | |
|---|
| .. | .. |
|---|
| 825 | 850 | const struct Qdisc_ops *ops, |
|---|
| 826 | 851 | struct netlink_ext_ack *extack) |
|---|
| 827 | 852 | { |
|---|
| 828 | | - void *p; |
|---|
| 829 | 853 | struct Qdisc *sch; |
|---|
| 830 | | - unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size; |
|---|
| 854 | + unsigned int size = sizeof(*sch) + ops->priv_size; |
|---|
| 831 | 855 | int err = -ENOBUFS; |
|---|
| 832 | 856 | struct net_device *dev; |
|---|
| 833 | 857 | |
|---|
| .. | .. |
|---|
| 838 | 862 | } |
|---|
| 839 | 863 | |
|---|
| 840 | 864 | dev = dev_queue->dev; |
|---|
| 841 | | - p = kzalloc_node(size, GFP_KERNEL, |
|---|
| 842 | | - netdev_queue_numa_node_read(dev_queue)); |
|---|
| 865 | + sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue)); |
|---|
| 843 | 866 | |
|---|
| 844 | | - if (!p) |
|---|
| 867 | + if (!sch) |
|---|
| 845 | 868 | goto errout; |
|---|
| 846 | | - sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); |
|---|
| 847 | | - /* if we got non aligned memory, ask more and do alignment ourself */ |
|---|
| 848 | | - if (sch != p) { |
|---|
| 849 | | - kfree(p); |
|---|
| 850 | | - p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL, |
|---|
| 851 | | - netdev_queue_numa_node_read(dev_queue)); |
|---|
| 852 | | - if (!p) |
|---|
| 853 | | - goto errout; |
|---|
| 854 | | - sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); |
|---|
| 855 | | - sch->padded = (char *) sch - (char *) p; |
|---|
| 856 | | - } |
|---|
| 857 | 869 | __skb_queue_head_init(&sch->gso_skb); |
|---|
| 858 | 870 | __skb_queue_head_init(&sch->skb_bad_txq); |
|---|
| 859 | 871 | qdisc_skb_head_init(&sch->q); |
|---|
| .. | .. |
|---|
| 878 | 890 | |
|---|
| 879 | 891 | /* seqlock has the same scope of busylock, for NOLOCK qdisc */ |
|---|
| 880 | 892 | spin_lock_init(&sch->seqlock); |
|---|
| 881 | | - lockdep_set_class(&sch->busylock, |
|---|
| 893 | + lockdep_set_class(&sch->seqlock, |
|---|
| 882 | 894 | dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); |
|---|
| 883 | 895 | |
|---|
| 884 | | -#ifdef CONFIG_PREEMPT_RT_BASE |
|---|
| 896 | +#ifdef CONFIG_PREEMPT_RT |
|---|
| 885 | 897 | seqlock_init(&sch->running); |
|---|
| 886 | | - lockdep_set_class(&sch->running.seqcount, |
|---|
| 887 | | - dev->qdisc_running_key ?: &qdisc_running_key); |
|---|
| 888 | 898 | lockdep_set_class(&sch->running.lock, |
|---|
| 889 | 899 | dev->qdisc_running_key ?: &qdisc_running_key); |
|---|
| 890 | 900 | #else |
|---|
| .. | .. |
|---|
| 898 | 908 | sch->enqueue = ops->enqueue; |
|---|
| 899 | 909 | sch->dequeue = ops->dequeue; |
|---|
| 900 | 910 | sch->dev_queue = dev_queue; |
|---|
| 911 | + sch->empty = true; |
|---|
| 901 | 912 | dev_hold(dev); |
|---|
| 902 | 913 | refcount_set(&sch->refcnt, 1); |
|---|
| 903 | 914 | |
|---|
| 904 | 915 | return sch; |
|---|
| 905 | 916 | errout1: |
|---|
| 906 | | - kfree(p); |
|---|
| 917 | + kfree(sch); |
|---|
| 907 | 918 | errout: |
|---|
| 908 | 919 | return ERR_PTR(err); |
|---|
| 909 | 920 | } |
|---|
| .. | .. |
|---|
| 927 | 938 | } |
|---|
| 928 | 939 | sch->parent = parentid; |
|---|
| 929 | 940 | |
|---|
| 930 | | - if (!ops->init || ops->init(sch, NULL, extack) == 0) |
|---|
| 941 | + if (!ops->init || ops->init(sch, NULL, extack) == 0) { |
|---|
| 942 | + trace_qdisc_create(ops, dev_queue->dev, parentid); |
|---|
| 931 | 943 | return sch; |
|---|
| 944 | + } |
|---|
| 932 | 945 | |
|---|
| 933 | 946 | qdisc_put(sch); |
|---|
| 934 | 947 | return NULL; |
|---|
| .. | .. |
|---|
| 941 | 954 | { |
|---|
| 942 | 955 | const struct Qdisc_ops *ops = qdisc->ops; |
|---|
| 943 | 956 | struct sk_buff *skb, *tmp; |
|---|
| 957 | + |
|---|
| 958 | + trace_qdisc_reset(qdisc); |
|---|
| 944 | 959 | |
|---|
| 945 | 960 | if (ops->reset) |
|---|
| 946 | 961 | ops->reset(qdisc); |
|---|
| .. | .. |
|---|
| 967 | 982 | free_percpu(qdisc->cpu_qstats); |
|---|
| 968 | 983 | } |
|---|
| 969 | 984 | |
|---|
| 970 | | - kfree((char *) qdisc - qdisc->padded); |
|---|
| 985 | + kfree(qdisc); |
|---|
| 971 | 986 | } |
|---|
| 972 | 987 | |
|---|
| 973 | 988 | static void qdisc_free_cb(struct rcu_head *head) |
|---|
| .. | .. |
|---|
| 979 | 994 | |
|---|
| 980 | 995 | static void qdisc_destroy(struct Qdisc *qdisc) |
|---|
| 981 | 996 | { |
|---|
| 982 | | - const struct Qdisc_ops *ops; |
|---|
| 983 | | - struct sk_buff *skb, *tmp; |
|---|
| 984 | | - |
|---|
| 985 | | - if (!qdisc) |
|---|
| 986 | | - return; |
|---|
| 987 | | - ops = qdisc->ops; |
|---|
| 997 | + const struct Qdisc_ops *ops = qdisc->ops; |
|---|
| 988 | 998 | |
|---|
| 989 | 999 | #ifdef CONFIG_NET_SCHED |
|---|
| 990 | 1000 | qdisc_hash_del(qdisc); |
|---|
| .. | .. |
|---|
| 992 | 1002 | qdisc_put_stab(rtnl_dereference(qdisc->stab)); |
|---|
| 993 | 1003 | #endif |
|---|
| 994 | 1004 | gen_kill_estimator(&qdisc->rate_est); |
|---|
| 995 | | - if (ops->reset) |
|---|
| 996 | | - ops->reset(qdisc); |
|---|
| 1005 | + |
|---|
| 1006 | + qdisc_reset(qdisc); |
|---|
| 1007 | + |
|---|
| 997 | 1008 | if (ops->destroy) |
|---|
| 998 | 1009 | ops->destroy(qdisc); |
|---|
| 999 | 1010 | |
|---|
| 1000 | 1011 | module_put(ops->owner); |
|---|
| 1001 | 1012 | dev_put(qdisc_dev(qdisc)); |
|---|
| 1002 | 1013 | |
|---|
| 1003 | | - skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) { |
|---|
| 1004 | | - __skb_unlink(skb, &qdisc->gso_skb); |
|---|
| 1005 | | - kfree_skb_list(skb); |
|---|
| 1006 | | - } |
|---|
| 1007 | | - |
|---|
| 1008 | | - skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) { |
|---|
| 1009 | | - __skb_unlink(skb, &qdisc->skb_bad_txq); |
|---|
| 1010 | | - kfree_skb_list(skb); |
|---|
| 1011 | | - } |
|---|
| 1014 | + trace_qdisc_destroy(qdisc); |
|---|
| 1012 | 1015 | |
|---|
| 1013 | 1016 | call_rcu(&qdisc->rcu, qdisc_free_cb); |
|---|
| 1014 | 1017 | } |
|---|
| 1015 | 1018 | |
|---|
| 1016 | 1019 | void qdisc_put(struct Qdisc *qdisc) |
|---|
| 1017 | 1020 | { |
|---|
| 1021 | + if (!qdisc) |
|---|
| 1022 | + return; |
|---|
| 1023 | + |
|---|
| 1018 | 1024 | if (qdisc->flags & TCQ_F_BUILTIN || |
|---|
| 1019 | 1025 | !refcount_dec_and_test(&qdisc->refcnt)) |
|---|
| 1020 | 1026 | return; |
|---|
| .. | .. |
|---|
| 1061 | 1067 | } |
|---|
| 1062 | 1068 | EXPORT_SYMBOL(dev_graft_qdisc); |
|---|
| 1063 | 1069 | |
|---|
| 1070 | +static void shutdown_scheduler_queue(struct net_device *dev, |
|---|
| 1071 | + struct netdev_queue *dev_queue, |
|---|
| 1072 | + void *_qdisc_default) |
|---|
| 1073 | +{ |
|---|
| 1074 | + struct Qdisc *qdisc = dev_queue->qdisc_sleeping; |
|---|
| 1075 | + struct Qdisc *qdisc_default = _qdisc_default; |
|---|
| 1076 | + |
|---|
| 1077 | + if (qdisc) { |
|---|
| 1078 | + rcu_assign_pointer(dev_queue->qdisc, qdisc_default); |
|---|
| 1079 | + dev_queue->qdisc_sleeping = qdisc_default; |
|---|
| 1080 | + |
|---|
| 1081 | + qdisc_put(qdisc); |
|---|
| 1082 | + } |
|---|
| 1083 | +} |
|---|
| 1084 | + |
|---|
| 1064 | 1085 | static void attach_one_default_qdisc(struct net_device *dev, |
|---|
| 1065 | 1086 | struct netdev_queue *dev_queue, |
|---|
| 1066 | 1087 | void *_unused) |
|---|
| .. | .. |
|---|
| 1070 | 1091 | |
|---|
| 1071 | 1092 | if (dev->priv_flags & IFF_NO_QUEUE) |
|---|
| 1072 | 1093 | ops = &noqueue_qdisc_ops; |
|---|
| 1094 | + else if(dev->type == ARPHRD_CAN) |
|---|
| 1095 | + ops = &pfifo_fast_ops; |
|---|
| 1073 | 1096 | |
|---|
| 1074 | 1097 | qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL); |
|---|
| 1075 | | - if (!qdisc) { |
|---|
| 1076 | | - netdev_info(dev, "activation failed\n"); |
|---|
| 1098 | + if (!qdisc) |
|---|
| 1077 | 1099 | return; |
|---|
| 1078 | | - } |
|---|
| 1100 | + |
|---|
| 1079 | 1101 | if (!netif_is_multiqueue(dev)) |
|---|
| 1080 | 1102 | qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; |
|---|
| 1081 | 1103 | dev_queue->qdisc_sleeping = qdisc; |
|---|
| .. | .. |
|---|
| 1091 | 1113 | if (!netif_is_multiqueue(dev) || |
|---|
| 1092 | 1114 | dev->priv_flags & IFF_NO_QUEUE) { |
|---|
| 1093 | 1115 | netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); |
|---|
| 1094 | | - dev->qdisc = txq->qdisc_sleeping; |
|---|
| 1095 | | - qdisc_refcount_inc(dev->qdisc); |
|---|
| 1116 | + qdisc = txq->qdisc_sleeping; |
|---|
| 1117 | + rcu_assign_pointer(dev->qdisc, qdisc); |
|---|
| 1118 | + qdisc_refcount_inc(qdisc); |
|---|
| 1096 | 1119 | } else { |
|---|
| 1097 | 1120 | qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL); |
|---|
| 1098 | 1121 | if (qdisc) { |
|---|
| 1099 | | - dev->qdisc = qdisc; |
|---|
| 1122 | + rcu_assign_pointer(dev->qdisc, qdisc); |
|---|
| 1100 | 1123 | qdisc->ops->attach(qdisc); |
|---|
| 1101 | 1124 | } |
|---|
| 1102 | 1125 | } |
|---|
| 1126 | + qdisc = rtnl_dereference(dev->qdisc); |
|---|
| 1127 | + |
|---|
| 1128 | + /* Detect default qdisc setup/init failed and fallback to "noqueue" */ |
|---|
| 1129 | + if (qdisc == &noop_qdisc) { |
|---|
| 1130 | + netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n", |
|---|
| 1131 | + default_qdisc_ops->id, noqueue_qdisc_ops.id); |
|---|
| 1132 | + netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); |
|---|
| 1133 | + dev->priv_flags |= IFF_NO_QUEUE; |
|---|
| 1134 | + netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); |
|---|
| 1135 | + qdisc = txq->qdisc_sleeping; |
|---|
| 1136 | + rcu_assign_pointer(dev->qdisc, qdisc); |
|---|
| 1137 | + qdisc_refcount_inc(qdisc); |
|---|
| 1138 | + dev->priv_flags ^= IFF_NO_QUEUE; |
|---|
| 1139 | + } |
|---|
| 1140 | + |
|---|
| 1103 | 1141 | #ifdef CONFIG_NET_SCHED |
|---|
| 1104 | | - if (dev->qdisc != &noop_qdisc) |
|---|
| 1105 | | - qdisc_hash_add(dev->qdisc, false); |
|---|
| 1142 | + if (qdisc != &noop_qdisc) |
|---|
| 1143 | + qdisc_hash_add(qdisc, false); |
|---|
| 1106 | 1144 | #endif |
|---|
| 1107 | 1145 | } |
|---|
| 1108 | 1146 | |
|---|
| .. | .. |
|---|
| 1132 | 1170 | * and noqueue_qdisc for virtual interfaces |
|---|
| 1133 | 1171 | */ |
|---|
| 1134 | 1172 | |
|---|
| 1135 | | - if (dev->qdisc == &noop_qdisc) |
|---|
| 1173 | + if (rtnl_dereference(dev->qdisc) == &noop_qdisc) |
|---|
| 1136 | 1174 | attach_default_qdiscs(dev); |
|---|
| 1137 | 1175 | |
|---|
| 1138 | 1176 | if (!netif_carrier_ok(dev)) |
|---|
| .. | .. |
|---|
| 1151 | 1189 | } |
|---|
| 1152 | 1190 | EXPORT_SYMBOL(dev_activate); |
|---|
| 1153 | 1191 | |
|---|
| 1192 | +static void qdisc_deactivate(struct Qdisc *qdisc) |
|---|
| 1193 | +{ |
|---|
| 1194 | + if (qdisc->flags & TCQ_F_BUILTIN) |
|---|
| 1195 | + return; |
|---|
| 1196 | + |
|---|
| 1197 | + set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); |
|---|
| 1198 | +} |
|---|
| 1199 | + |
|---|
| 1154 | 1200 | static void dev_deactivate_queue(struct net_device *dev, |
|---|
| 1155 | 1201 | struct netdev_queue *dev_queue, |
|---|
| 1156 | 1202 | void *_qdisc_default) |
|---|
| 1157 | 1203 | { |
|---|
| 1158 | | - struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc); |
|---|
| 1159 | 1204 | struct Qdisc *qdisc_default = _qdisc_default; |
|---|
| 1205 | + struct Qdisc *qdisc; |
|---|
| 1160 | 1206 | |
|---|
| 1207 | + qdisc = rtnl_dereference(dev_queue->qdisc); |
|---|
| 1161 | 1208 | if (qdisc) { |
|---|
| 1162 | | - if (!(qdisc->flags & TCQ_F_BUILTIN)) |
|---|
| 1163 | | - set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); |
|---|
| 1164 | | - |
|---|
| 1209 | + qdisc_deactivate(qdisc); |
|---|
| 1165 | 1210 | rcu_assign_pointer(dev_queue->qdisc, qdisc_default); |
|---|
| 1166 | 1211 | } |
|---|
| 1167 | 1212 | } |
|---|
| .. | .. |
|---|
| 1186 | 1231 | qdisc_reset(qdisc); |
|---|
| 1187 | 1232 | |
|---|
| 1188 | 1233 | spin_unlock_bh(qdisc_lock(qdisc)); |
|---|
| 1189 | | - if (nolock) |
|---|
| 1234 | + if (nolock) { |
|---|
| 1235 | + clear_bit(__QDISC_STATE_MISSED, &qdisc->state); |
|---|
| 1190 | 1236 | spin_unlock_bh(&qdisc->seqlock); |
|---|
| 1237 | + } |
|---|
| 1191 | 1238 | } |
|---|
| 1192 | 1239 | |
|---|
| 1193 | 1240 | static bool some_qdisc_is_busy(struct net_device *dev) |
|---|
| .. | .. |
|---|
| 1215 | 1262 | return true; |
|---|
| 1216 | 1263 | } |
|---|
| 1217 | 1264 | return false; |
|---|
| 1218 | | -} |
|---|
| 1219 | | - |
|---|
| 1220 | | -static void dev_qdisc_reset(struct net_device *dev, |
|---|
| 1221 | | - struct netdev_queue *dev_queue, |
|---|
| 1222 | | - void *none) |
|---|
| 1223 | | -{ |
|---|
| 1224 | | - struct Qdisc *qdisc = dev_queue->qdisc_sleeping; |
|---|
| 1225 | | - |
|---|
| 1226 | | - if (qdisc) |
|---|
| 1227 | | - qdisc_reset(qdisc); |
|---|
| 1228 | 1265 | } |
|---|
| 1229 | 1266 | |
|---|
| 1230 | 1267 | /** |
|---|
| .. | .. |
|---|
| 1264 | 1301 | |
|---|
| 1265 | 1302 | /* Wait for outstanding qdisc_run calls. */ |
|---|
| 1266 | 1303 | list_for_each_entry(dev, head, close_list) { |
|---|
| 1267 | | - while (some_qdisc_is_busy(dev)) |
|---|
| 1268 | | - msleep(1); |
|---|
| 1269 | | - /* The new qdisc is assigned at this point so we can safely |
|---|
| 1270 | | - * unwind stale skb lists and qdisc statistics |
|---|
| 1271 | | - */ |
|---|
| 1272 | | - netdev_for_each_tx_queue(dev, dev_qdisc_reset, NULL); |
|---|
| 1273 | | - if (dev_ingress_queue(dev)) |
|---|
| 1274 | | - dev_qdisc_reset(dev, dev_ingress_queue(dev), NULL); |
|---|
| 1304 | + while (some_qdisc_is_busy(dev)) { |
|---|
| 1305 | + /* wait_event() would avoid this sleep-loop but would |
|---|
| 1306 | + * require expensive checks in the fast paths of packet |
|---|
| 1307 | + * processing which isn't worth it. |
|---|
| 1308 | + */ |
|---|
| 1309 | + schedule_timeout_uninterruptible(1); |
|---|
| 1310 | + } |
|---|
| 1275 | 1311 | } |
|---|
| 1276 | 1312 | } |
|---|
| 1277 | 1313 | |
|---|
| .. | .. |
|---|
| 1330 | 1366 | |
|---|
| 1331 | 1367 | void dev_init_scheduler(struct net_device *dev) |
|---|
| 1332 | 1368 | { |
|---|
| 1333 | | - dev->qdisc = &noop_qdisc; |
|---|
| 1369 | + rcu_assign_pointer(dev->qdisc, &noop_qdisc); |
|---|
| 1334 | 1370 | netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); |
|---|
| 1335 | 1371 | if (dev_ingress_queue(dev)) |
|---|
| 1336 | 1372 | dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); |
|---|
| .. | .. |
|---|
| 1338 | 1374 | timer_setup(&dev->watchdog_timer, dev_watchdog, 0); |
|---|
| 1339 | 1375 | } |
|---|
| 1340 | 1376 | |
|---|
| 1341 | | -static void shutdown_scheduler_queue(struct net_device *dev, |
|---|
| 1342 | | - struct netdev_queue *dev_queue, |
|---|
| 1343 | | - void *_qdisc_default) |
|---|
| 1344 | | -{ |
|---|
| 1345 | | - struct Qdisc *qdisc = dev_queue->qdisc_sleeping; |
|---|
| 1346 | | - struct Qdisc *qdisc_default = _qdisc_default; |
|---|
| 1347 | | - |
|---|
| 1348 | | - if (qdisc) { |
|---|
| 1349 | | - rcu_assign_pointer(dev_queue->qdisc, qdisc_default); |
|---|
| 1350 | | - dev_queue->qdisc_sleeping = qdisc_default; |
|---|
| 1351 | | - |
|---|
| 1352 | | - qdisc_put(qdisc); |
|---|
| 1353 | | - } |
|---|
| 1354 | | -} |
|---|
| 1355 | | - |
|---|
| 1356 | 1377 | void dev_shutdown(struct net_device *dev) |
|---|
| 1357 | 1378 | { |
|---|
| 1358 | 1379 | netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); |
|---|
| 1359 | 1380 | if (dev_ingress_queue(dev)) |
|---|
| 1360 | 1381 | shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc); |
|---|
| 1361 | | - qdisc_put(dev->qdisc); |
|---|
| 1362 | | - dev->qdisc = &noop_qdisc; |
|---|
| 1382 | + qdisc_put(rtnl_dereference(dev->qdisc)); |
|---|
| 1383 | + rcu_assign_pointer(dev->qdisc, &noop_qdisc); |
|---|
| 1363 | 1384 | |
|---|
| 1364 | 1385 | WARN_ON(timer_pending(&dev->watchdog_timer)); |
|---|
| 1365 | 1386 | } |
|---|
| .. | .. |
|---|
| 1408 | 1429 | void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp, |
|---|
| 1409 | 1430 | struct tcf_proto *tp_head) |
|---|
| 1410 | 1431 | { |
|---|
| 1411 | | - struct mini_Qdisc *miniq_old = rtnl_dereference(*miniqp->p_miniq); |
|---|
| 1432 | + /* Protected with chain0->filter_chain_lock. |
|---|
| 1433 | + * Can't access chain directly because tp_head can be NULL. |
|---|
| 1434 | + */ |
|---|
| 1435 | + struct mini_Qdisc *miniq_old = |
|---|
| 1436 | + rcu_dereference_protected(*miniqp->p_miniq, 1); |
|---|
| 1412 | 1437 | struct mini_Qdisc *miniq; |
|---|
| 1413 | 1438 | |
|---|
| 1414 | 1439 | if (!tp_head) { |
|---|
| 1415 | 1440 | RCU_INIT_POINTER(*miniqp->p_miniq, NULL); |
|---|
| 1416 | 1441 | /* Wait for flying RCU callback before it is freed. */ |
|---|
| 1417 | | - rcu_barrier_bh(); |
|---|
| 1442 | + rcu_barrier(); |
|---|
| 1418 | 1443 | return; |
|---|
| 1419 | 1444 | } |
|---|
| 1420 | 1445 | |
|---|
| .. | .. |
|---|
| 1422 | 1447 | &miniqp->miniq1 : &miniqp->miniq2; |
|---|
| 1423 | 1448 | |
|---|
| 1424 | 1449 | /* We need to make sure that readers won't see the miniq |
|---|
| 1425 | | - * we are about to modify. So wait until previous call_rcu_bh callback |
|---|
| 1450 | + * we are about to modify. So wait until previous call_rcu callback |
|---|
| 1426 | 1451 | * is done. |
|---|
| 1427 | 1452 | */ |
|---|
| 1428 | | - rcu_barrier_bh(); |
|---|
| 1453 | + rcu_barrier(); |
|---|
| 1429 | 1454 | miniq->filter_list = tp_head; |
|---|
| 1430 | 1455 | rcu_assign_pointer(*miniqp->p_miniq, miniq); |
|---|
| 1431 | 1456 | |
|---|
| .. | .. |
|---|
| 1434 | 1459 | * block potential new user of miniq_old until all readers |
|---|
| 1435 | 1460 | * are not seeing it. |
|---|
| 1436 | 1461 | */ |
|---|
| 1437 | | - call_rcu_bh(&miniq_old->rcu, mini_qdisc_rcu_func); |
|---|
| 1462 | + call_rcu(&miniq_old->rcu, mini_qdisc_rcu_func); |
|---|
| 1438 | 1463 | } |
|---|
| 1439 | 1464 | EXPORT_SYMBOL(mini_qdisc_pair_swap); |
|---|
| 1440 | 1465 | |
|---|
| 1466 | +void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, |
|---|
| 1467 | + struct tcf_block *block) |
|---|
| 1468 | +{ |
|---|
| 1469 | + miniqp->miniq1.block = block; |
|---|
| 1470 | + miniqp->miniq2.block = block; |
|---|
| 1471 | +} |
|---|
| 1472 | +EXPORT_SYMBOL(mini_qdisc_pair_block_init); |
|---|
| 1473 | + |
|---|
| 1441 | 1474 | void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, |
|---|
| 1442 | 1475 | struct mini_Qdisc __rcu **p_miniq) |
|---|
| 1443 | 1476 | { |
|---|