hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/net/sched/sch_generic.c
....@@ -1,10 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * net/sched/sch_generic.c Generic packet scheduler routines.
3
- *
4
- * This program is free software; you can redistribute it and/or
5
- * modify it under the terms of the GNU General Public License
6
- * as published by the Free Software Foundation; either version
7
- * 2 of the License, or (at your option) any later version.
84 *
95 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
106 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601
....@@ -32,11 +28,31 @@
3228 #include <net/pkt_sched.h>
3329 #include <net/dst.h>
3430 #include <trace/events/qdisc.h>
31
+#include <trace/events/net.h>
3532 #include <net/xfrm.h>
3633
3734 /* Qdisc to use by default */
3835 const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
3936 EXPORT_SYMBOL(default_qdisc_ops);
37
+
38
+static void qdisc_maybe_clear_missed(struct Qdisc *q,
39
+ const struct netdev_queue *txq)
40
+{
41
+ clear_bit(__QDISC_STATE_MISSED, &q->state);
42
+
43
+ /* Make sure the below netif_xmit_frozen_or_stopped()
44
+ * checking happens after clearing STATE_MISSED.
45
+ */
46
+ smp_mb__after_atomic();
47
+
48
+ /* Checking netif_xmit_frozen_or_stopped() again to
49
+ * make sure STATE_MISSED is set if the STATE_MISSED
50
+ * set by netif_tx_wake_queue()'s rescheduling of
51
+ * net_tx_action() is cleared by the above clear_bit().
52
+ */
53
+ if (!netif_xmit_frozen_or_stopped(txq))
54
+ set_bit(__QDISC_STATE_MISSED, &q->state);
55
+}
4056
4157 /* Main transmission queue. */
4258
....@@ -70,13 +86,14 @@
7086 skb = __skb_dequeue(&q->skb_bad_txq);
7187 if (qdisc_is_percpu_stats(q)) {
7288 qdisc_qstats_cpu_backlog_dec(q, skb);
73
- qdisc_qstats_atomic_qlen_dec(q);
89
+ qdisc_qstats_cpu_qlen_dec(q);
7490 } else {
7591 qdisc_qstats_backlog_dec(q, skb);
7692 q->q.qlen--;
7793 }
7894 } else {
7995 skb = SKB_XOFF_MAGIC;
96
+ qdisc_maybe_clear_missed(q, txq);
8097 }
8198 }
8299
....@@ -110,7 +127,7 @@
110127
111128 if (qdisc_is_percpu_stats(q)) {
112129 qdisc_qstats_cpu_backlog_inc(q, skb);
113
- qdisc_qstats_atomic_qlen_inc(q);
130
+ qdisc_qstats_cpu_qlen_inc(q);
114131 } else {
115132 qdisc_qstats_backlog_inc(q, skb);
116133 q->q.qlen++;
....@@ -120,52 +137,36 @@
120137 spin_unlock(lock);
121138 }
122139
123
-static inline int __dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
140
+static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
124141 {
125
- while (skb) {
126
- struct sk_buff *next = skb->next;
142
+ spinlock_t *lock = NULL;
127143
128
- __skb_queue_tail(&q->gso_skb, skb);
129
- q->qstats.requeues++;
130
- qdisc_qstats_backlog_inc(q, skb);
131
- q->q.qlen++; /* it's still part of the queue */
132
-
133
- skb = next;
144
+ if (q->flags & TCQ_F_NOLOCK) {
145
+ lock = qdisc_lock(q);
146
+ spin_lock(lock);
134147 }
135
- __netif_schedule(q);
136148
137
- return 0;
138
-}
139
-
140
-static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q)
141
-{
142
- spinlock_t *lock = qdisc_lock(q);
143
-
144
- spin_lock(lock);
145149 while (skb) {
146150 struct sk_buff *next = skb->next;
147151
148152 __skb_queue_tail(&q->gso_skb, skb);
149153
150
- qdisc_qstats_cpu_requeues_inc(q);
151
- qdisc_qstats_cpu_backlog_inc(q, skb);
152
- qdisc_qstats_atomic_qlen_inc(q);
154
+ /* it's still part of the queue */
155
+ if (qdisc_is_percpu_stats(q)) {
156
+ qdisc_qstats_cpu_requeues_inc(q);
157
+ qdisc_qstats_cpu_backlog_inc(q, skb);
158
+ qdisc_qstats_cpu_qlen_inc(q);
159
+ } else {
160
+ q->qstats.requeues++;
161
+ qdisc_qstats_backlog_inc(q, skb);
162
+ q->q.qlen++;
163
+ }
153164
154165 skb = next;
155166 }
156
- spin_unlock(lock);
157
-
167
+ if (lock)
168
+ spin_unlock(lock);
158169 __netif_schedule(q);
159
-
160
- return 0;
161
-}
162
-
163
-static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
164
-{
165
- if (q->flags & TCQ_F_NOLOCK)
166
- return dev_requeue_skb_locked(skb, q);
167
- else
168
- return __dev_requeue_skb(skb, q);
169170 }
170171
171172 static void try_bulk_dequeue_skb(struct Qdisc *q,
....@@ -186,7 +187,7 @@
186187 skb = nskb;
187188 (*packets)++; /* GSO counts as one pkt */
188189 }
189
- skb->next = NULL;
190
+ skb_mark_not_on_list(skb);
190191 }
191192
192193 /* This variant of try_bulk_dequeue_skb() makes sure
....@@ -212,7 +213,7 @@
212213 skb = nskb;
213214 } while (++cnt < 8);
214215 (*packets) += cnt;
215
- skb->next = NULL;
216
+ skb_mark_not_on_list(skb);
216217 }
217218
218219 /* Note that dequeue_skb can possibly return a SKB list (via skb->next).
....@@ -254,13 +255,14 @@
254255 skb = __skb_dequeue(&q->gso_skb);
255256 if (qdisc_is_percpu_stats(q)) {
256257 qdisc_qstats_cpu_backlog_dec(q, skb);
257
- qdisc_qstats_atomic_qlen_dec(q);
258
+ qdisc_qstats_cpu_qlen_dec(q);
258259 } else {
259260 qdisc_qstats_backlog_dec(q, skb);
260261 q->q.qlen--;
261262 }
262263 } else {
263264 skb = NULL;
265
+ qdisc_maybe_clear_missed(q, txq);
264266 }
265267 if (lock)
266268 spin_unlock(lock);
....@@ -270,8 +272,10 @@
270272 *validate = true;
271273
272274 if ((q->flags & TCQ_F_ONETXQUEUE) &&
273
- netif_xmit_frozen_or_stopped(txq))
275
+ netif_xmit_frozen_or_stopped(txq)) {
276
+ qdisc_maybe_clear_missed(q, txq);
274277 return skb;
278
+ }
275279
276280 skb = qdisc_dequeue_skb_bad_txq(q);
277281 if (unlikely(skb)) {
....@@ -330,6 +334,8 @@
330334 HARD_TX_LOCK(dev, txq, smp_processor_id());
331335 if (!netif_xmit_frozen_or_stopped(txq))
332336 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
337
+ else
338
+ qdisc_maybe_clear_missed(q, txq);
333339
334340 HARD_TX_UNLOCK(dev, txq);
335341 } else {
....@@ -397,17 +403,12 @@
397403
398404 void __qdisc_run(struct Qdisc *q)
399405 {
400
- int quota = dev_tx_weight;
406
+ int quota = READ_ONCE(dev_tx_weight);
401407 int packets;
402408
403409 while (qdisc_restart(q, &packets)) {
404
- /*
405
- * Ordered by possible occurrence: Postpone processing if
406
- * 1. we've exceeded packet quota
407
- * 2. another process needs the CPU;
408
- */
409410 quota -= packets;
410
- if (quota <= 0 || need_resched()) {
411
+ if (quota <= 0) {
411412 __netif_schedule(q);
412413 break;
413414 }
....@@ -462,9 +463,10 @@
462463 }
463464
464465 if (some_queue_timedout) {
466
+ trace_net_dev_xmit_timeout(dev, i);
465467 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
466468 dev->name, netdev_drivername(dev), i);
467
- dev->netdev_ops->ndo_tx_timeout(dev);
469
+ dev->netdev_ops->ndo_tx_timeout(dev, i);
468470 }
469471 if (!mod_timer(&dev->watchdog_timer,
470472 round_jiffies(jiffies +
....@@ -506,7 +508,7 @@
506508 * netif_carrier_on - set carrier
507509 * @dev: network device
508510 *
509
- * Device has detected that carrier.
511
+ * Device has detected acquisition of carrier.
510512 */
511513 void netif_carrier_on(struct net_device *dev)
512514 {
....@@ -565,7 +567,7 @@
565567 };
566568
567569 static struct netdev_queue noop_netdev_queue = {
568
- .qdisc = &noop_qdisc,
570
+ RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
569571 .qdisc_sleeping = &noop_qdisc,
570572 };
571573
....@@ -576,11 +578,7 @@
576578 .ops = &noop_qdisc_ops,
577579 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
578580 .dev_queue = &noop_netdev_queue,
579
-#ifdef CONFIG_PREEMPT_RT_BASE
580
- .running = __SEQLOCK_UNLOCKED(noop_qdisc.running),
581
-#else
582581 .running = SEQCNT_ZERO(noop_qdisc.running),
583
-#endif
584582 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
585583 .gso_skb = {
586584 .next = (struct sk_buff *)&noop_qdisc.gso_skb,
....@@ -652,14 +650,14 @@
652650
653651 err = skb_array_produce(q, skb);
654652
655
- if (unlikely(err))
656
- return qdisc_drop_cpu(skb, qdisc, to_free);
653
+ if (unlikely(err)) {
654
+ if (qdisc_is_percpu_stats(qdisc))
655
+ return qdisc_drop_cpu(skb, qdisc, to_free);
656
+ else
657
+ return qdisc_drop(skb, qdisc, to_free);
658
+ }
657659
658
- qdisc_qstats_atomic_qlen_inc(qdisc);
659
- /* Note: skb can not be used after skb_array_produce(),
660
- * so we better not use qdisc_qstats_cpu_backlog_inc()
661
- */
662
- this_cpu_add(qdisc->cpu_qstats->backlog, pkt_len);
660
+ qdisc_update_stats_at_enqueue(qdisc, pkt_len);
663661 return NET_XMIT_SUCCESS;
664662 }
665663
....@@ -667,8 +665,10 @@
667665 {
668666 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
669667 struct sk_buff *skb = NULL;
668
+ bool need_retry = true;
670669 int band;
671670
671
+retry:
672672 for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
673673 struct skb_array *q = band2list(priv, band);
674674
....@@ -678,9 +678,26 @@
678678 skb = __skb_array_consume(q);
679679 }
680680 if (likely(skb)) {
681
- qdisc_qstats_cpu_backlog_dec(qdisc, skb);
682
- qdisc_bstats_cpu_update(qdisc, skb);
683
- qdisc_qstats_atomic_qlen_dec(qdisc);
681
+ qdisc_update_stats_at_dequeue(qdisc, skb);
682
+ } else if (need_retry &&
683
+ test_bit(__QDISC_STATE_MISSED, &qdisc->state)) {
684
+ /* Delay clearing the STATE_MISSED here to reduce
685
+ * the overhead of the second spin_trylock() in
686
+ * qdisc_run_begin() and __netif_schedule() calling
687
+ * in qdisc_run_end().
688
+ */
689
+ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
690
+
691
+ /* Make sure dequeuing happens after clearing
692
+ * STATE_MISSED.
693
+ */
694
+ smp_mb__after_atomic();
695
+
696
+ need_retry = false;
697
+
698
+ goto retry;
699
+ } else {
700
+ WRITE_ONCE(qdisc->empty, true);
684701 }
685702
686703 return skb;
....@@ -720,10 +737,14 @@
720737 kfree_skb(skb);
721738 }
722739
723
- for_each_possible_cpu(i) {
724
- struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
740
+ if (qdisc_is_percpu_stats(qdisc)) {
741
+ for_each_possible_cpu(i) {
742
+ struct gnet_stats_queue *q;
725743
726
- q->backlog = 0;
744
+ q = per_cpu_ptr(qdisc->cpu_qstats, i);
745
+ q->backlog = 0;
746
+ q->qlen = 0;
747
+ }
727748 }
728749 }
729750
....@@ -825,9 +846,8 @@
825846 const struct Qdisc_ops *ops,
826847 struct netlink_ext_ack *extack)
827848 {
828
- void *p;
829849 struct Qdisc *sch;
830
- unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
850
+ unsigned int size = sizeof(*sch) + ops->priv_size;
831851 int err = -ENOBUFS;
832852 struct net_device *dev;
833853
....@@ -838,22 +858,10 @@
838858 }
839859
840860 dev = dev_queue->dev;
841
- p = kzalloc_node(size, GFP_KERNEL,
842
- netdev_queue_numa_node_read(dev_queue));
861
+ sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue));
843862
844
- if (!p)
863
+ if (!sch)
845864 goto errout;
846
- sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
847
- /* if we got non aligned memory, ask more and do alignment ourself */
848
- if (sch != p) {
849
- kfree(p);
850
- p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
851
- netdev_queue_numa_node_read(dev_queue));
852
- if (!p)
853
- goto errout;
854
- sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
855
- sch->padded = (char *) sch - (char *) p;
856
- }
857865 __skb_queue_head_init(&sch->gso_skb);
858866 __skb_queue_head_init(&sch->skb_bad_txq);
859867 qdisc_skb_head_init(&sch->q);
....@@ -878,32 +886,25 @@
878886
879887 /* seqlock has the same scope of busylock, for NOLOCK qdisc */
880888 spin_lock_init(&sch->seqlock);
881
- lockdep_set_class(&sch->busylock,
889
+ lockdep_set_class(&sch->seqlock,
882890 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
883891
884
-#ifdef CONFIG_PREEMPT_RT_BASE
885
- seqlock_init(&sch->running);
886
- lockdep_set_class(&sch->running.seqcount,
887
- dev->qdisc_running_key ?: &qdisc_running_key);
888
- lockdep_set_class(&sch->running.lock,
889
- dev->qdisc_running_key ?: &qdisc_running_key);
890
-#else
891892 seqcount_init(&sch->running);
892893 lockdep_set_class(&sch->running,
893894 dev->qdisc_running_key ?: &qdisc_running_key);
894
-#endif
895895
896896 sch->ops = ops;
897897 sch->flags = ops->static_flags;
898898 sch->enqueue = ops->enqueue;
899899 sch->dequeue = ops->dequeue;
900900 sch->dev_queue = dev_queue;
901
+ sch->empty = true;
901902 dev_hold(dev);
902903 refcount_set(&sch->refcnt, 1);
903904
904905 return sch;
905906 errout1:
906
- kfree(p);
907
+ kfree(sch);
907908 errout:
908909 return ERR_PTR(err);
909910 }
....@@ -927,8 +928,10 @@
927928 }
928929 sch->parent = parentid;
929930
930
- if (!ops->init || ops->init(sch, NULL, extack) == 0)
931
+ if (!ops->init || ops->init(sch, NULL, extack) == 0) {
932
+ trace_qdisc_create(ops, dev_queue->dev, parentid);
931933 return sch;
934
+ }
932935
933936 qdisc_put(sch);
934937 return NULL;
....@@ -941,6 +944,8 @@
941944 {
942945 const struct Qdisc_ops *ops = qdisc->ops;
943946 struct sk_buff *skb, *tmp;
947
+
948
+ trace_qdisc_reset(qdisc);
944949
945950 if (ops->reset)
946951 ops->reset(qdisc);
....@@ -967,7 +972,7 @@
967972 free_percpu(qdisc->cpu_qstats);
968973 }
969974
970
- kfree((char *) qdisc - qdisc->padded);
975
+ kfree(qdisc);
971976 }
972977
973978 static void qdisc_free_cb(struct rcu_head *head)
....@@ -979,12 +984,7 @@
979984
980985 static void qdisc_destroy(struct Qdisc *qdisc)
981986 {
982
- const struct Qdisc_ops *ops;
983
- struct sk_buff *skb, *tmp;
984
-
985
- if (!qdisc)
986
- return;
987
- ops = qdisc->ops;
987
+ const struct Qdisc_ops *ops = qdisc->ops;
988988
989989 #ifdef CONFIG_NET_SCHED
990990 qdisc_hash_del(qdisc);
....@@ -992,29 +992,25 @@
992992 qdisc_put_stab(rtnl_dereference(qdisc->stab));
993993 #endif
994994 gen_kill_estimator(&qdisc->rate_est);
995
- if (ops->reset)
996
- ops->reset(qdisc);
995
+
996
+ qdisc_reset(qdisc);
997
+
997998 if (ops->destroy)
998999 ops->destroy(qdisc);
9991000
10001001 module_put(ops->owner);
10011002 dev_put(qdisc_dev(qdisc));
10021003
1003
- skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
1004
- __skb_unlink(skb, &qdisc->gso_skb);
1005
- kfree_skb_list(skb);
1006
- }
1007
-
1008
- skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) {
1009
- __skb_unlink(skb, &qdisc->skb_bad_txq);
1010
- kfree_skb_list(skb);
1011
- }
1004
+ trace_qdisc_destroy(qdisc);
10121005
10131006 call_rcu(&qdisc->rcu, qdisc_free_cb);
10141007 }
10151008
10161009 void qdisc_put(struct Qdisc *qdisc)
10171010 {
1011
+ if (!qdisc)
1012
+ return;
1013
+
10181014 if (qdisc->flags & TCQ_F_BUILTIN ||
10191015 !refcount_dec_and_test(&qdisc->refcnt))
10201016 return;
....@@ -1061,6 +1057,21 @@
10611057 }
10621058 EXPORT_SYMBOL(dev_graft_qdisc);
10631059
1060
+static void shutdown_scheduler_queue(struct net_device *dev,
1061
+ struct netdev_queue *dev_queue,
1062
+ void *_qdisc_default)
1063
+{
1064
+ struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1065
+ struct Qdisc *qdisc_default = _qdisc_default;
1066
+
1067
+ if (qdisc) {
1068
+ rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1069
+ dev_queue->qdisc_sleeping = qdisc_default;
1070
+
1071
+ qdisc_put(qdisc);
1072
+ }
1073
+}
1074
+
10641075 static void attach_one_default_qdisc(struct net_device *dev,
10651076 struct netdev_queue *dev_queue,
10661077 void *_unused)
....@@ -1070,12 +1081,13 @@
10701081
10711082 if (dev->priv_flags & IFF_NO_QUEUE)
10721083 ops = &noqueue_qdisc_ops;
1084
+ else if(dev->type == ARPHRD_CAN)
1085
+ ops = &pfifo_fast_ops;
10731086
10741087 qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
1075
- if (!qdisc) {
1076
- netdev_info(dev, "activation failed\n");
1088
+ if (!qdisc)
10771089 return;
1078
- }
1090
+
10791091 if (!netif_is_multiqueue(dev))
10801092 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
10811093 dev_queue->qdisc_sleeping = qdisc;
....@@ -1091,18 +1103,34 @@
10911103 if (!netif_is_multiqueue(dev) ||
10921104 dev->priv_flags & IFF_NO_QUEUE) {
10931105 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1094
- dev->qdisc = txq->qdisc_sleeping;
1095
- qdisc_refcount_inc(dev->qdisc);
1106
+ qdisc = txq->qdisc_sleeping;
1107
+ rcu_assign_pointer(dev->qdisc, qdisc);
1108
+ qdisc_refcount_inc(qdisc);
10961109 } else {
10971110 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
10981111 if (qdisc) {
1099
- dev->qdisc = qdisc;
1112
+ rcu_assign_pointer(dev->qdisc, qdisc);
11001113 qdisc->ops->attach(qdisc);
11011114 }
11021115 }
1116
+ qdisc = rtnl_dereference(dev->qdisc);
1117
+
1118
+ /* Detect default qdisc setup/init failed and fallback to "noqueue" */
1119
+ if (qdisc == &noop_qdisc) {
1120
+ netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
1121
+ default_qdisc_ops->id, noqueue_qdisc_ops.id);
1122
+ netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1123
+ dev->priv_flags |= IFF_NO_QUEUE;
1124
+ netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1125
+ qdisc = txq->qdisc_sleeping;
1126
+ rcu_assign_pointer(dev->qdisc, qdisc);
1127
+ qdisc_refcount_inc(qdisc);
1128
+ dev->priv_flags ^= IFF_NO_QUEUE;
1129
+ }
1130
+
11031131 #ifdef CONFIG_NET_SCHED
1104
- if (dev->qdisc != &noop_qdisc)
1105
- qdisc_hash_add(dev->qdisc, false);
1132
+ if (qdisc != &noop_qdisc)
1133
+ qdisc_hash_add(qdisc, false);
11061134 #endif
11071135 }
11081136
....@@ -1132,7 +1160,7 @@
11321160 * and noqueue_qdisc for virtual interfaces
11331161 */
11341162
1135
- if (dev->qdisc == &noop_qdisc)
1163
+ if (rtnl_dereference(dev->qdisc) == &noop_qdisc)
11361164 attach_default_qdiscs(dev);
11371165
11381166 if (!netif_carrier_ok(dev))
....@@ -1151,17 +1179,24 @@
11511179 }
11521180 EXPORT_SYMBOL(dev_activate);
11531181
1182
+static void qdisc_deactivate(struct Qdisc *qdisc)
1183
+{
1184
+ if (qdisc->flags & TCQ_F_BUILTIN)
1185
+ return;
1186
+
1187
+ set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
1188
+}
1189
+
11541190 static void dev_deactivate_queue(struct net_device *dev,
11551191 struct netdev_queue *dev_queue,
11561192 void *_qdisc_default)
11571193 {
1158
- struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc);
11591194 struct Qdisc *qdisc_default = _qdisc_default;
1195
+ struct Qdisc *qdisc;
11601196
1197
+ qdisc = rtnl_dereference(dev_queue->qdisc);
11611198 if (qdisc) {
1162
- if (!(qdisc->flags & TCQ_F_BUILTIN))
1163
- set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
1164
-
1199
+ qdisc_deactivate(qdisc);
11651200 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
11661201 }
11671202 }
....@@ -1186,8 +1221,10 @@
11861221 qdisc_reset(qdisc);
11871222
11881223 spin_unlock_bh(qdisc_lock(qdisc));
1189
- if (nolock)
1224
+ if (nolock) {
1225
+ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
11901226 spin_unlock_bh(&qdisc->seqlock);
1227
+ }
11911228 }
11921229
11931230 static bool some_qdisc_is_busy(struct net_device *dev)
....@@ -1215,16 +1252,6 @@
12151252 return true;
12161253 }
12171254 return false;
1218
-}
1219
-
1220
-static void dev_qdisc_reset(struct net_device *dev,
1221
- struct netdev_queue *dev_queue,
1222
- void *none)
1223
-{
1224
- struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1225
-
1226
- if (qdisc)
1227
- qdisc_reset(qdisc);
12281255 }
12291256
12301257 /**
....@@ -1264,14 +1291,13 @@
12641291
12651292 /* Wait for outstanding qdisc_run calls. */
12661293 list_for_each_entry(dev, head, close_list) {
1267
- while (some_qdisc_is_busy(dev))
1268
- msleep(1);
1269
- /* The new qdisc is assigned at this point so we can safely
1270
- * unwind stale skb lists and qdisc statistics
1271
- */
1272
- netdev_for_each_tx_queue(dev, dev_qdisc_reset, NULL);
1273
- if (dev_ingress_queue(dev))
1274
- dev_qdisc_reset(dev, dev_ingress_queue(dev), NULL);
1294
+ while (some_qdisc_is_busy(dev)) {
1295
+ /* wait_event() would avoid this sleep-loop but would
1296
+ * require expensive checks in the fast paths of packet
1297
+ * processing which isn't worth it.
1298
+ */
1299
+ schedule_timeout_uninterruptible(1);
1300
+ }
12751301 }
12761302 }
12771303
....@@ -1330,7 +1356,7 @@
13301356
13311357 void dev_init_scheduler(struct net_device *dev)
13321358 {
1333
- dev->qdisc = &noop_qdisc;
1359
+ rcu_assign_pointer(dev->qdisc, &noop_qdisc);
13341360 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
13351361 if (dev_ingress_queue(dev))
13361362 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
....@@ -1338,28 +1364,13 @@
13381364 timer_setup(&dev->watchdog_timer, dev_watchdog, 0);
13391365 }
13401366
1341
-static void shutdown_scheduler_queue(struct net_device *dev,
1342
- struct netdev_queue *dev_queue,
1343
- void *_qdisc_default)
1344
-{
1345
- struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1346
- struct Qdisc *qdisc_default = _qdisc_default;
1347
-
1348
- if (qdisc) {
1349
- rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1350
- dev_queue->qdisc_sleeping = qdisc_default;
1351
-
1352
- qdisc_put(qdisc);
1353
- }
1354
-}
1355
-
13561367 void dev_shutdown(struct net_device *dev)
13571368 {
13581369 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
13591370 if (dev_ingress_queue(dev))
13601371 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1361
- qdisc_put(dev->qdisc);
1362
- dev->qdisc = &noop_qdisc;
1372
+ qdisc_put(rtnl_dereference(dev->qdisc));
1373
+ rcu_assign_pointer(dev->qdisc, &noop_qdisc);
13631374
13641375 WARN_ON(timer_pending(&dev->watchdog_timer));
13651376 }
....@@ -1408,13 +1419,17 @@
14081419 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
14091420 struct tcf_proto *tp_head)
14101421 {
1411
- struct mini_Qdisc *miniq_old = rtnl_dereference(*miniqp->p_miniq);
1422
+ /* Protected with chain0->filter_chain_lock.
1423
+ * Can't access chain directly because tp_head can be NULL.
1424
+ */
1425
+ struct mini_Qdisc *miniq_old =
1426
+ rcu_dereference_protected(*miniqp->p_miniq, 1);
14121427 struct mini_Qdisc *miniq;
14131428
14141429 if (!tp_head) {
14151430 RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
14161431 /* Wait for flying RCU callback before it is freed. */
1417
- rcu_barrier_bh();
1432
+ rcu_barrier();
14181433 return;
14191434 }
14201435
....@@ -1422,10 +1437,10 @@
14221437 &miniqp->miniq1 : &miniqp->miniq2;
14231438
14241439 /* We need to make sure that readers won't see the miniq
1425
- * we are about to modify. So wait until previous call_rcu_bh callback
1440
+ * we are about to modify. So wait until previous call_rcu callback
14261441 * is done.
14271442 */
1428
- rcu_barrier_bh();
1443
+ rcu_barrier();
14291444 miniq->filter_list = tp_head;
14301445 rcu_assign_pointer(*miniqp->p_miniq, miniq);
14311446
....@@ -1434,10 +1449,18 @@
14341449 * block potential new user of miniq_old until all readers
14351450 * are not seeing it.
14361451 */
1437
- call_rcu_bh(&miniq_old->rcu, mini_qdisc_rcu_func);
1452
+ call_rcu(&miniq_old->rcu, mini_qdisc_rcu_func);
14381453 }
14391454 EXPORT_SYMBOL(mini_qdisc_pair_swap);
14401455
1456
+void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1457
+ struct tcf_block *block)
1458
+{
1459
+ miniqp->miniq1.block = block;
1460
+ miniqp->miniq2.block = block;
1461
+}
1462
+EXPORT_SYMBOL(mini_qdisc_pair_block_init);
1463
+
14411464 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
14421465 struct mini_Qdisc __rcu **p_miniq)
14431466 {