forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/net/sched/sch_generic.c
....@@ -1,10 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * net/sched/sch_generic.c Generic packet scheduler routines.
3
- *
4
- * This program is free software; you can redistribute it and/or
5
- * modify it under the terms of the GNU General Public License
6
- * as published by the Free Software Foundation; either version
7
- * 2 of the License, or (at your option) any later version.
84 *
95 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
106 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601
....@@ -32,11 +28,31 @@
3228 #include <net/pkt_sched.h>
3329 #include <net/dst.h>
3430 #include <trace/events/qdisc.h>
31
+#include <trace/events/net.h>
3532 #include <net/xfrm.h>
3633
3734 /* Qdisc to use by default */
3835 const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
3936 EXPORT_SYMBOL(default_qdisc_ops);
37
+
38
+static void qdisc_maybe_clear_missed(struct Qdisc *q,
39
+ const struct netdev_queue *txq)
40
+{
41
+ clear_bit(__QDISC_STATE_MISSED, &q->state);
42
+
43
+ /* Make sure the below netif_xmit_frozen_or_stopped()
44
+ * checking happens after clearing STATE_MISSED.
45
+ */
46
+ smp_mb__after_atomic();
47
+
48
+ /* Checking netif_xmit_frozen_or_stopped() again to
49
+ * make sure STATE_MISSED is set if the STATE_MISSED
50
+ * set by netif_tx_wake_queue()'s rescheduling of
51
+ * net_tx_action() is cleared by the above clear_bit().
52
+ */
53
+ if (!netif_xmit_frozen_or_stopped(txq))
54
+ set_bit(__QDISC_STATE_MISSED, &q->state);
55
+}
4056
4157 /* Main transmission queue. */
4258
....@@ -70,13 +86,14 @@
7086 skb = __skb_dequeue(&q->skb_bad_txq);
7187 if (qdisc_is_percpu_stats(q)) {
7288 qdisc_qstats_cpu_backlog_dec(q, skb);
73
- qdisc_qstats_atomic_qlen_dec(q);
89
+ qdisc_qstats_cpu_qlen_dec(q);
7490 } else {
7591 qdisc_qstats_backlog_dec(q, skb);
7692 q->q.qlen--;
7793 }
7894 } else {
7995 skb = SKB_XOFF_MAGIC;
96
+ qdisc_maybe_clear_missed(q, txq);
8097 }
8198 }
8299
....@@ -110,7 +127,7 @@
110127
111128 if (qdisc_is_percpu_stats(q)) {
112129 qdisc_qstats_cpu_backlog_inc(q, skb);
113
- qdisc_qstats_atomic_qlen_inc(q);
130
+ qdisc_qstats_cpu_qlen_inc(q);
114131 } else {
115132 qdisc_qstats_backlog_inc(q, skb);
116133 q->q.qlen++;
....@@ -120,52 +137,36 @@
120137 spin_unlock(lock);
121138 }
122139
123
-static inline int __dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
140
+static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
124141 {
125
- while (skb) {
126
- struct sk_buff *next = skb->next;
142
+ spinlock_t *lock = NULL;
127143
128
- __skb_queue_tail(&q->gso_skb, skb);
129
- q->qstats.requeues++;
130
- qdisc_qstats_backlog_inc(q, skb);
131
- q->q.qlen++; /* it's still part of the queue */
132
-
133
- skb = next;
144
+ if (q->flags & TCQ_F_NOLOCK) {
145
+ lock = qdisc_lock(q);
146
+ spin_lock(lock);
134147 }
135
- __netif_schedule(q);
136148
137
- return 0;
138
-}
139
-
140
-static inline int dev_requeue_skb_locked(struct sk_buff *skb, struct Qdisc *q)
141
-{
142
- spinlock_t *lock = qdisc_lock(q);
143
-
144
- spin_lock(lock);
145149 while (skb) {
146150 struct sk_buff *next = skb->next;
147151
148152 __skb_queue_tail(&q->gso_skb, skb);
149153
150
- qdisc_qstats_cpu_requeues_inc(q);
151
- qdisc_qstats_cpu_backlog_inc(q, skb);
152
- qdisc_qstats_atomic_qlen_inc(q);
154
+ /* it's still part of the queue */
155
+ if (qdisc_is_percpu_stats(q)) {
156
+ qdisc_qstats_cpu_requeues_inc(q);
157
+ qdisc_qstats_cpu_backlog_inc(q, skb);
158
+ qdisc_qstats_cpu_qlen_inc(q);
159
+ } else {
160
+ q->qstats.requeues++;
161
+ qdisc_qstats_backlog_inc(q, skb);
162
+ q->q.qlen++;
163
+ }
153164
154165 skb = next;
155166 }
156
- spin_unlock(lock);
157
-
167
+ if (lock)
168
+ spin_unlock(lock);
158169 __netif_schedule(q);
159
-
160
- return 0;
161
-}
162
-
163
-static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
164
-{
165
- if (q->flags & TCQ_F_NOLOCK)
166
- return dev_requeue_skb_locked(skb, q);
167
- else
168
- return __dev_requeue_skb(skb, q);
169170 }
170171
171172 static void try_bulk_dequeue_skb(struct Qdisc *q,
....@@ -186,7 +187,7 @@
186187 skb = nskb;
187188 (*packets)++; /* GSO counts as one pkt */
188189 }
189
- skb->next = NULL;
190
+ skb_mark_not_on_list(skb);
190191 }
191192
192193 /* This variant of try_bulk_dequeue_skb() makes sure
....@@ -212,7 +213,7 @@
212213 skb = nskb;
213214 } while (++cnt < 8);
214215 (*packets) += cnt;
215
- skb->next = NULL;
216
+ skb_mark_not_on_list(skb);
216217 }
217218
218219 /* Note that dequeue_skb can possibly return a SKB list (via skb->next).
....@@ -254,13 +255,14 @@
254255 skb = __skb_dequeue(&q->gso_skb);
255256 if (qdisc_is_percpu_stats(q)) {
256257 qdisc_qstats_cpu_backlog_dec(q, skb);
257
- qdisc_qstats_atomic_qlen_dec(q);
258
+ qdisc_qstats_cpu_qlen_dec(q);
258259 } else {
259260 qdisc_qstats_backlog_dec(q, skb);
260261 q->q.qlen--;
261262 }
262263 } else {
263264 skb = NULL;
265
+ qdisc_maybe_clear_missed(q, txq);
264266 }
265267 if (lock)
266268 spin_unlock(lock);
....@@ -270,8 +272,10 @@
270272 *validate = true;
271273
272274 if ((q->flags & TCQ_F_ONETXQUEUE) &&
273
- netif_xmit_frozen_or_stopped(txq))
275
+ netif_xmit_frozen_or_stopped(txq)) {
276
+ qdisc_maybe_clear_missed(q, txq);
274277 return skb;
278
+ }
275279
276280 skb = qdisc_dequeue_skb_bad_txq(q);
277281 if (unlikely(skb)) {
....@@ -330,6 +334,8 @@
330334 HARD_TX_LOCK(dev, txq, smp_processor_id());
331335 if (!netif_xmit_frozen_or_stopped(txq))
332336 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
337
+ else
338
+ qdisc_maybe_clear_missed(q, txq);
333339
334340 HARD_TX_UNLOCK(dev, txq);
335341 } else {
....@@ -397,17 +403,12 @@
397403
398404 void __qdisc_run(struct Qdisc *q)
399405 {
400
- int quota = dev_tx_weight;
406
+ int quota = READ_ONCE(dev_tx_weight);
401407 int packets;
402408
403409 while (qdisc_restart(q, &packets)) {
404
- /*
405
- * Ordered by possible occurrence: Postpone processing if
406
- * 1. we've exceeded packet quota
407
- * 2. another process needs the CPU;
408
- */
409410 quota -= packets;
410
- if (quota <= 0 || need_resched()) {
411
+ if (quota <= 0) {
411412 __netif_schedule(q);
412413 break;
413414 }
....@@ -462,9 +463,10 @@
462463 }
463464
464465 if (some_queue_timedout) {
466
+ trace_net_dev_xmit_timeout(dev, i);
465467 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
466468 dev->name, netdev_drivername(dev), i);
467
- dev->netdev_ops->ndo_tx_timeout(dev);
469
+ dev->netdev_ops->ndo_tx_timeout(dev, i);
468470 }
469471 if (!mod_timer(&dev->watchdog_timer,
470472 round_jiffies(jiffies +
....@@ -506,7 +508,7 @@
506508 * netif_carrier_on - set carrier
507509 * @dev: network device
508510 *
509
- * Device has detected that carrier.
511
+ * Device has detected acquisition of carrier.
510512 */
511513 void netif_carrier_on(struct net_device *dev)
512514 {
....@@ -565,7 +567,7 @@
565567 };
566568
567569 static struct netdev_queue noop_netdev_queue = {
568
- .qdisc = &noop_qdisc,
570
+ RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
569571 .qdisc_sleeping = &noop_qdisc,
570572 };
571573
....@@ -576,7 +578,11 @@
576578 .ops = &noop_qdisc_ops,
577579 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
578580 .dev_queue = &noop_netdev_queue,
581
+#ifdef CONFIG_PREEMPT_RT
582
+ .running = __SEQLOCK_UNLOCKED(noop_qdisc.running),
583
+#else
579584 .running = SEQCNT_ZERO(noop_qdisc.running),
585
+#endif
580586 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
581587 .gso_skb = {
582588 .next = (struct sk_buff *)&noop_qdisc.gso_skb,
....@@ -648,14 +654,14 @@
648654
649655 err = skb_array_produce(q, skb);
650656
651
- if (unlikely(err))
652
- return qdisc_drop_cpu(skb, qdisc, to_free);
657
+ if (unlikely(err)) {
658
+ if (qdisc_is_percpu_stats(qdisc))
659
+ return qdisc_drop_cpu(skb, qdisc, to_free);
660
+ else
661
+ return qdisc_drop(skb, qdisc, to_free);
662
+ }
653663
654
- qdisc_qstats_atomic_qlen_inc(qdisc);
655
- /* Note: skb can not be used after skb_array_produce(),
656
- * so we better not use qdisc_qstats_cpu_backlog_inc()
657
- */
658
- this_cpu_add(qdisc->cpu_qstats->backlog, pkt_len);
664
+ qdisc_update_stats_at_enqueue(qdisc, pkt_len);
659665 return NET_XMIT_SUCCESS;
660666 }
661667
....@@ -663,8 +669,10 @@
663669 {
664670 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
665671 struct sk_buff *skb = NULL;
672
+ bool need_retry = true;
666673 int band;
667674
675
+retry:
668676 for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
669677 struct skb_array *q = band2list(priv, band);
670678
....@@ -674,9 +682,26 @@
674682 skb = __skb_array_consume(q);
675683 }
676684 if (likely(skb)) {
677
- qdisc_qstats_cpu_backlog_dec(qdisc, skb);
678
- qdisc_bstats_cpu_update(qdisc, skb);
679
- qdisc_qstats_atomic_qlen_dec(qdisc);
685
+ qdisc_update_stats_at_dequeue(qdisc, skb);
686
+ } else if (need_retry &&
687
+ test_bit(__QDISC_STATE_MISSED, &qdisc->state)) {
688
+ /* Delay clearing the STATE_MISSED here to reduce
689
+ * the overhead of the second spin_trylock() in
690
+ * qdisc_run_begin() and __netif_schedule() calling
691
+ * in qdisc_run_end().
692
+ */
693
+ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
694
+
695
+ /* Make sure dequeuing happens after clearing
696
+ * STATE_MISSED.
697
+ */
698
+ smp_mb__after_atomic();
699
+
700
+ need_retry = false;
701
+
702
+ goto retry;
703
+ } else {
704
+ WRITE_ONCE(qdisc->empty, true);
680705 }
681706
682707 return skb;
....@@ -716,10 +741,14 @@
716741 kfree_skb(skb);
717742 }
718743
719
- for_each_possible_cpu(i) {
720
- struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
744
+ if (qdisc_is_percpu_stats(qdisc)) {
745
+ for_each_possible_cpu(i) {
746
+ struct gnet_stats_queue *q;
721747
722
- q->backlog = 0;
748
+ q = per_cpu_ptr(qdisc->cpu_qstats, i);
749
+ q->backlog = 0;
750
+ q->qlen = 0;
751
+ }
723752 }
724753 }
725754
....@@ -821,9 +850,8 @@
821850 const struct Qdisc_ops *ops,
822851 struct netlink_ext_ack *extack)
823852 {
824
- void *p;
825853 struct Qdisc *sch;
826
- unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
854
+ unsigned int size = sizeof(*sch) + ops->priv_size;
827855 int err = -ENOBUFS;
828856 struct net_device *dev;
829857
....@@ -834,22 +862,10 @@
834862 }
835863
836864 dev = dev_queue->dev;
837
- p = kzalloc_node(size, GFP_KERNEL,
838
- netdev_queue_numa_node_read(dev_queue));
865
+ sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue));
839866
840
- if (!p)
867
+ if (!sch)
841868 goto errout;
842
- sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
843
- /* if we got non aligned memory, ask more and do alignment ourself */
844
- if (sch != p) {
845
- kfree(p);
846
- p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
847
- netdev_queue_numa_node_read(dev_queue));
848
- if (!p)
849
- goto errout;
850
- sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
851
- sch->padded = (char *) sch - (char *) p;
852
- }
853869 __skb_queue_head_init(&sch->gso_skb);
854870 __skb_queue_head_init(&sch->skb_bad_txq);
855871 qdisc_skb_head_init(&sch->q);
....@@ -874,24 +890,31 @@
874890
875891 /* seqlock has the same scope of busylock, for NOLOCK qdisc */
876892 spin_lock_init(&sch->seqlock);
877
- lockdep_set_class(&sch->busylock,
893
+ lockdep_set_class(&sch->seqlock,
878894 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
879895
896
+#ifdef CONFIG_PREEMPT_RT
897
+ seqlock_init(&sch->running);
898
+ lockdep_set_class(&sch->running.lock,
899
+ dev->qdisc_running_key ?: &qdisc_running_key);
900
+#else
880901 seqcount_init(&sch->running);
881902 lockdep_set_class(&sch->running,
882903 dev->qdisc_running_key ?: &qdisc_running_key);
904
+#endif
883905
884906 sch->ops = ops;
885907 sch->flags = ops->static_flags;
886908 sch->enqueue = ops->enqueue;
887909 sch->dequeue = ops->dequeue;
888910 sch->dev_queue = dev_queue;
911
+ sch->empty = true;
889912 dev_hold(dev);
890913 refcount_set(&sch->refcnt, 1);
891914
892915 return sch;
893916 errout1:
894
- kfree(p);
917
+ kfree(sch);
895918 errout:
896919 return ERR_PTR(err);
897920 }
....@@ -915,8 +938,10 @@
915938 }
916939 sch->parent = parentid;
917940
918
- if (!ops->init || ops->init(sch, NULL, extack) == 0)
941
+ if (!ops->init || ops->init(sch, NULL, extack) == 0) {
942
+ trace_qdisc_create(ops, dev_queue->dev, parentid);
919943 return sch;
944
+ }
920945
921946 qdisc_put(sch);
922947 return NULL;
....@@ -929,6 +954,8 @@
929954 {
930955 const struct Qdisc_ops *ops = qdisc->ops;
931956 struct sk_buff *skb, *tmp;
957
+
958
+ trace_qdisc_reset(qdisc);
932959
933960 if (ops->reset)
934961 ops->reset(qdisc);
....@@ -955,7 +982,7 @@
955982 free_percpu(qdisc->cpu_qstats);
956983 }
957984
958
- kfree((char *) qdisc - qdisc->padded);
985
+ kfree(qdisc);
959986 }
960987
961988 static void qdisc_free_cb(struct rcu_head *head)
....@@ -967,12 +994,7 @@
967994
968995 static void qdisc_destroy(struct Qdisc *qdisc)
969996 {
970
- const struct Qdisc_ops *ops;
971
- struct sk_buff *skb, *tmp;
972
-
973
- if (!qdisc)
974
- return;
975
- ops = qdisc->ops;
997
+ const struct Qdisc_ops *ops = qdisc->ops;
976998
977999 #ifdef CONFIG_NET_SCHED
9781000 qdisc_hash_del(qdisc);
....@@ -980,29 +1002,25 @@
9801002 qdisc_put_stab(rtnl_dereference(qdisc->stab));
9811003 #endif
9821004 gen_kill_estimator(&qdisc->rate_est);
983
- if (ops->reset)
984
- ops->reset(qdisc);
1005
+
1006
+ qdisc_reset(qdisc);
1007
+
9851008 if (ops->destroy)
9861009 ops->destroy(qdisc);
9871010
9881011 module_put(ops->owner);
9891012 dev_put(qdisc_dev(qdisc));
9901013
991
- skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) {
992
- __skb_unlink(skb, &qdisc->gso_skb);
993
- kfree_skb_list(skb);
994
- }
995
-
996
- skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) {
997
- __skb_unlink(skb, &qdisc->skb_bad_txq);
998
- kfree_skb_list(skb);
999
- }
1014
+ trace_qdisc_destroy(qdisc);
10001015
10011016 call_rcu(&qdisc->rcu, qdisc_free_cb);
10021017 }
10031018
10041019 void qdisc_put(struct Qdisc *qdisc)
10051020 {
1021
+ if (!qdisc)
1022
+ return;
1023
+
10061024 if (qdisc->flags & TCQ_F_BUILTIN ||
10071025 !refcount_dec_and_test(&qdisc->refcnt))
10081026 return;
....@@ -1049,6 +1067,21 @@
10491067 }
10501068 EXPORT_SYMBOL(dev_graft_qdisc);
10511069
1070
+static void shutdown_scheduler_queue(struct net_device *dev,
1071
+ struct netdev_queue *dev_queue,
1072
+ void *_qdisc_default)
1073
+{
1074
+ struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1075
+ struct Qdisc *qdisc_default = _qdisc_default;
1076
+
1077
+ if (qdisc) {
1078
+ rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1079
+ dev_queue->qdisc_sleeping = qdisc_default;
1080
+
1081
+ qdisc_put(qdisc);
1082
+ }
1083
+}
1084
+
10521085 static void attach_one_default_qdisc(struct net_device *dev,
10531086 struct netdev_queue *dev_queue,
10541087 void *_unused)
....@@ -1058,12 +1091,13 @@
10581091
10591092 if (dev->priv_flags & IFF_NO_QUEUE)
10601093 ops = &noqueue_qdisc_ops;
1094
+ else if(dev->type == ARPHRD_CAN)
1095
+ ops = &pfifo_fast_ops;
10611096
10621097 qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
1063
- if (!qdisc) {
1064
- netdev_info(dev, "activation failed\n");
1098
+ if (!qdisc)
10651099 return;
1066
- }
1100
+
10671101 if (!netif_is_multiqueue(dev))
10681102 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
10691103 dev_queue->qdisc_sleeping = qdisc;
....@@ -1079,18 +1113,34 @@
10791113 if (!netif_is_multiqueue(dev) ||
10801114 dev->priv_flags & IFF_NO_QUEUE) {
10811115 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1082
- dev->qdisc = txq->qdisc_sleeping;
1083
- qdisc_refcount_inc(dev->qdisc);
1116
+ qdisc = txq->qdisc_sleeping;
1117
+ rcu_assign_pointer(dev->qdisc, qdisc);
1118
+ qdisc_refcount_inc(qdisc);
10841119 } else {
10851120 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
10861121 if (qdisc) {
1087
- dev->qdisc = qdisc;
1122
+ rcu_assign_pointer(dev->qdisc, qdisc);
10881123 qdisc->ops->attach(qdisc);
10891124 }
10901125 }
1126
+ qdisc = rtnl_dereference(dev->qdisc);
1127
+
1128
+ /* Detect default qdisc setup/init failed and fallback to "noqueue" */
1129
+ if (qdisc == &noop_qdisc) {
1130
+ netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
1131
+ default_qdisc_ops->id, noqueue_qdisc_ops.id);
1132
+ netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1133
+ dev->priv_flags |= IFF_NO_QUEUE;
1134
+ netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1135
+ qdisc = txq->qdisc_sleeping;
1136
+ rcu_assign_pointer(dev->qdisc, qdisc);
1137
+ qdisc_refcount_inc(qdisc);
1138
+ dev->priv_flags ^= IFF_NO_QUEUE;
1139
+ }
1140
+
10911141 #ifdef CONFIG_NET_SCHED
1092
- if (dev->qdisc != &noop_qdisc)
1093
- qdisc_hash_add(dev->qdisc, false);
1142
+ if (qdisc != &noop_qdisc)
1143
+ qdisc_hash_add(qdisc, false);
10941144 #endif
10951145 }
10961146
....@@ -1120,7 +1170,7 @@
11201170 * and noqueue_qdisc for virtual interfaces
11211171 */
11221172
1123
- if (dev->qdisc == &noop_qdisc)
1173
+ if (rtnl_dereference(dev->qdisc) == &noop_qdisc)
11241174 attach_default_qdiscs(dev);
11251175
11261176 if (!netif_carrier_ok(dev))
....@@ -1139,17 +1189,24 @@
11391189 }
11401190 EXPORT_SYMBOL(dev_activate);
11411191
1192
+static void qdisc_deactivate(struct Qdisc *qdisc)
1193
+{
1194
+ if (qdisc->flags & TCQ_F_BUILTIN)
1195
+ return;
1196
+
1197
+ set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
1198
+}
1199
+
11421200 static void dev_deactivate_queue(struct net_device *dev,
11431201 struct netdev_queue *dev_queue,
11441202 void *_qdisc_default)
11451203 {
1146
- struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc);
11471204 struct Qdisc *qdisc_default = _qdisc_default;
1205
+ struct Qdisc *qdisc;
11481206
1207
+ qdisc = rtnl_dereference(dev_queue->qdisc);
11491208 if (qdisc) {
1150
- if (!(qdisc->flags & TCQ_F_BUILTIN))
1151
- set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
1152
-
1209
+ qdisc_deactivate(qdisc);
11531210 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
11541211 }
11551212 }
....@@ -1174,8 +1231,10 @@
11741231 qdisc_reset(qdisc);
11751232
11761233 spin_unlock_bh(qdisc_lock(qdisc));
1177
- if (nolock)
1234
+ if (nolock) {
1235
+ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
11781236 spin_unlock_bh(&qdisc->seqlock);
1237
+ }
11791238 }
11801239
11811240 static bool some_qdisc_is_busy(struct net_device *dev)
....@@ -1203,16 +1262,6 @@
12031262 return true;
12041263 }
12051264 return false;
1206
-}
1207
-
1208
-static void dev_qdisc_reset(struct net_device *dev,
1209
- struct netdev_queue *dev_queue,
1210
- void *none)
1211
-{
1212
- struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1213
-
1214
- if (qdisc)
1215
- qdisc_reset(qdisc);
12161265 }
12171266
12181267 /**
....@@ -1252,14 +1301,13 @@
12521301
12531302 /* Wait for outstanding qdisc_run calls. */
12541303 list_for_each_entry(dev, head, close_list) {
1255
- while (some_qdisc_is_busy(dev))
1256
- yield();
1257
- /* The new qdisc is assigned at this point so we can safely
1258
- * unwind stale skb lists and qdisc statistics
1259
- */
1260
- netdev_for_each_tx_queue(dev, dev_qdisc_reset, NULL);
1261
- if (dev_ingress_queue(dev))
1262
- dev_qdisc_reset(dev, dev_ingress_queue(dev), NULL);
1304
+ while (some_qdisc_is_busy(dev)) {
1305
+ /* wait_event() would avoid this sleep-loop but would
1306
+ * require expensive checks in the fast paths of packet
1307
+ * processing which isn't worth it.
1308
+ */
1309
+ schedule_timeout_uninterruptible(1);
1310
+ }
12631311 }
12641312 }
12651313
....@@ -1318,7 +1366,7 @@
13181366
13191367 void dev_init_scheduler(struct net_device *dev)
13201368 {
1321
- dev->qdisc = &noop_qdisc;
1369
+ rcu_assign_pointer(dev->qdisc, &noop_qdisc);
13221370 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
13231371 if (dev_ingress_queue(dev))
13241372 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
....@@ -1326,28 +1374,13 @@
13261374 timer_setup(&dev->watchdog_timer, dev_watchdog, 0);
13271375 }
13281376
1329
-static void shutdown_scheduler_queue(struct net_device *dev,
1330
- struct netdev_queue *dev_queue,
1331
- void *_qdisc_default)
1332
-{
1333
- struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1334
- struct Qdisc *qdisc_default = _qdisc_default;
1335
-
1336
- if (qdisc) {
1337
- rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1338
- dev_queue->qdisc_sleeping = qdisc_default;
1339
-
1340
- qdisc_put(qdisc);
1341
- }
1342
-}
1343
-
13441377 void dev_shutdown(struct net_device *dev)
13451378 {
13461379 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
13471380 if (dev_ingress_queue(dev))
13481381 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1349
- qdisc_put(dev->qdisc);
1350
- dev->qdisc = &noop_qdisc;
1382
+ qdisc_put(rtnl_dereference(dev->qdisc));
1383
+ rcu_assign_pointer(dev->qdisc, &noop_qdisc);
13511384
13521385 WARN_ON(timer_pending(&dev->watchdog_timer));
13531386 }
....@@ -1396,13 +1429,17 @@
13961429 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
13971430 struct tcf_proto *tp_head)
13981431 {
1399
- struct mini_Qdisc *miniq_old = rtnl_dereference(*miniqp->p_miniq);
1432
+ /* Protected with chain0->filter_chain_lock.
1433
+ * Can't access chain directly because tp_head can be NULL.
1434
+ */
1435
+ struct mini_Qdisc *miniq_old =
1436
+ rcu_dereference_protected(*miniqp->p_miniq, 1);
14001437 struct mini_Qdisc *miniq;
14011438
14021439 if (!tp_head) {
14031440 RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
14041441 /* Wait for flying RCU callback before it is freed. */
1405
- rcu_barrier_bh();
1442
+ rcu_barrier();
14061443 return;
14071444 }
14081445
....@@ -1410,10 +1447,10 @@
14101447 &miniqp->miniq1 : &miniqp->miniq2;
14111448
14121449 /* We need to make sure that readers won't see the miniq
1413
- * we are about to modify. So wait until previous call_rcu_bh callback
1450
+ * we are about to modify. So wait until previous call_rcu callback
14141451 * is done.
14151452 */
1416
- rcu_barrier_bh();
1453
+ rcu_barrier();
14171454 miniq->filter_list = tp_head;
14181455 rcu_assign_pointer(*miniqp->p_miniq, miniq);
14191456
....@@ -1422,10 +1459,18 @@
14221459 * block potential new user of miniq_old until all readers
14231460 * are not seeing it.
14241461 */
1425
- call_rcu_bh(&miniq_old->rcu, mini_qdisc_rcu_func);
1462
+ call_rcu(&miniq_old->rcu, mini_qdisc_rcu_func);
14261463 }
14271464 EXPORT_SYMBOL(mini_qdisc_pair_swap);
14281465
1466
+void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1467
+ struct tcf_block *block)
1468
+{
1469
+ miniqp->miniq1.block = block;
1470
+ miniqp->miniq2.block = block;
1471
+}
1472
+EXPORT_SYMBOL(mini_qdisc_pair_block_init);
1473
+
14291474 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
14301475 struct mini_Qdisc __rcu **p_miniq)
14311476 {