hc
2024-05-10 ee930fffee469d076998274a2ca55e13dc1efb67
kernel/include/net/sch_generic.h
....@@ -12,16 +12,20 @@
1212 #include <linux/list.h>
1313 #include <linux/refcount.h>
1414 #include <linux/workqueue.h>
15
+#include <linux/mutex.h>
16
+#include <linux/rwsem.h>
17
+#include <linux/atomic.h>
18
+#include <linux/hashtable.h>
19
+#include <linux/android_kabi.h>
1520 #include <net/gen_stats.h>
1621 #include <net/rtnetlink.h>
22
+#include <net/flow_offload.h>
1723
1824 struct Qdisc_ops;
1925 struct qdisc_walker;
2026 struct tcf_walker;
2127 struct module;
22
-
23
-typedef int tc_setup_cb_t(enum tc_setup_type type,
24
- void *type_data, void *cb_priv);
28
+struct bpf_flow_keys;
2529
2630 struct qdisc_rate_table {
2731 struct tc_ratespec rate;
....@@ -33,6 +37,7 @@
3337 enum qdisc_state_t {
3438 __QDISC_STATE_SCHED,
3539 __QDISC_STATE_DEACTIVATED,
40
+ __QDISC_STATE_MISSED,
3641 };
3742
3843 struct qdisc_size_table {
....@@ -47,10 +52,7 @@
4752 struct qdisc_skb_head {
4853 struct sk_buff *head;
4954 struct sk_buff *tail;
50
- union {
51
- u32 qlen;
52
- atomic_t atomic_qlen;
53
- };
55
+ __u32 qlen;
5456 spinlock_t lock;
5557 };
5658
....@@ -91,7 +93,7 @@
9193 struct net_rate_estimator __rcu *rate_est;
9294 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
9395 struct gnet_stats_queue __percpu *cpu_qstats;
94
- int padded;
96
+ int pad;
9597 refcount_t refcnt;
9698
9799 /*
....@@ -108,9 +110,15 @@
108110
109111 spinlock_t busylock ____cacheline_aligned_in_smp;
110112 spinlock_t seqlock;
111
-#ifndef __GENKSYMS__
113
+
114
+ /* for NOLOCK qdisc, true if there are no enqueued skbs */
115
+ bool empty;
112116 struct rcu_head rcu;
113
-#endif
117
+
118
+ ANDROID_KABI_RESERVE(1);
119
+
120
+ /* private data */
121
+ long privdata[] ____cacheline_aligned;
114122 };
115123
116124 static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
....@@ -140,11 +148,41 @@
140148 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
141149 }
142150
151
+static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
152
+{
153
+ return q->flags & TCQ_F_CPUSTATS;
154
+}
155
+
156
+static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
157
+{
158
+ if (qdisc_is_percpu_stats(qdisc))
159
+ return READ_ONCE(qdisc->empty);
160
+ return !READ_ONCE(qdisc->q.qlen);
161
+}
162
+
143163 static inline bool qdisc_run_begin(struct Qdisc *qdisc)
144164 {
145165 if (qdisc->flags & TCQ_F_NOLOCK) {
166
+ if (spin_trylock(&qdisc->seqlock))
167
+ goto nolock_empty;
168
+
169
+ /* No need to insist if the MISSED flag was already set.
170
+ * Note that test_and_set_bit() also gives us memory ordering
171
+ * guarantees wrt potential earlier enqueue() and below
172
+ * spin_trylock(), both of which are necessary to prevent races
173
+ */
174
+ if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state))
175
+ return false;
176
+
177
+ /* Try to take the lock again to make sure that we will either
178
+ * grab it or the CPU that still has it will see MISSED set
179
+ * when testing it in qdisc_run_end()
180
+ */
146181 if (!spin_trylock(&qdisc->seqlock))
147182 return false;
183
+
184
+nolock_empty:
185
+ WRITE_ONCE(qdisc->empty, false);
148186 } else if (qdisc_is_running(qdisc)) {
149187 return false;
150188 }
....@@ -159,8 +197,21 @@
159197 static inline void qdisc_run_end(struct Qdisc *qdisc)
160198 {
161199 write_seqcount_end(&qdisc->running);
162
- if (qdisc->flags & TCQ_F_NOLOCK)
200
+ if (qdisc->flags & TCQ_F_NOLOCK) {
163201 spin_unlock(&qdisc->seqlock);
202
+
203
+ /* spin_unlock() only has store-release semantic. The unlock
204
+ * and test_bit() ordering is a store-load ordering, so a full
205
+ * memory barrier is needed here.
206
+ */
207
+ smp_mb();
208
+
209
+ if (unlikely(test_bit(__QDISC_STATE_MISSED,
210
+ &qdisc->state))) {
211
+ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
212
+ __netif_schedule(qdisc);
213
+ }
214
+ }
164215 }
165216
166217 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
....@@ -179,6 +230,7 @@
179230 }
180231
181232 struct Qdisc_class_ops {
233
+ unsigned int flags;
182234 /* Child qdisc manipulation */
183235 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
184236 int (*graft)(struct Qdisc *, unsigned long cl,
....@@ -208,6 +260,15 @@
208260 struct sk_buff *skb, struct tcmsg*);
209261 int (*dump_stats)(struct Qdisc *, unsigned long,
210262 struct gnet_dump *);
263
+
264
+ ANDROID_KABI_RESERVE(1);
265
+};
266
+
267
+/* Qdisc_class_ops flag values */
268
+
269
+/* Implements API that doesn't require rtnl lock */
270
+enum qdisc_class_ops_flags {
271
+ QDISC_CLASS_OPS_DOIT_UNLOCKED = 1,
211272 };
212273
213274 struct Qdisc_ops {
....@@ -244,6 +305,8 @@
244305 u32 (*egress_block_get)(struct Qdisc *sch);
245306
246307 struct module *owner;
308
+
309
+ ANDROID_KABI_RESERVE(1);
247310 };
248311
249312
....@@ -255,7 +318,7 @@
255318 };
256319 const struct tcf_proto *goto_tp;
257320
258
- /* used by the TC_ACT_REINSERT action */
321
+ /* used in the skb_tc_reinsert function */
259322 struct {
260323 bool ingress;
261324 struct gnet_stats_queue *qstats;
....@@ -273,22 +336,29 @@
273336 const struct tcf_proto *,
274337 struct tcf_result *);
275338 int (*init)(struct tcf_proto*);
276
- void (*destroy)(struct tcf_proto *tp,
339
+ void (*destroy)(struct tcf_proto *tp, bool rtnl_held,
277340 struct netlink_ext_ack *extack);
278341
279342 void* (*get)(struct tcf_proto*, u32 handle);
343
+ void (*put)(struct tcf_proto *tp, void *f);
280344 int (*change)(struct net *net, struct sk_buff *,
281345 struct tcf_proto*, unsigned long,
282346 u32 handle, struct nlattr **,
283
- void **, bool,
347
+ void **, bool, bool,
284348 struct netlink_ext_ack *);
285349 int (*delete)(struct tcf_proto *tp, void *arg,
286
- bool *last,
350
+ bool *last, bool rtnl_held,
287351 struct netlink_ext_ack *);
288
- void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
352
+ bool (*delete_empty)(struct tcf_proto *tp);
353
+ void (*walk)(struct tcf_proto *tp,
354
+ struct tcf_walker *arg, bool rtnl_held);
289355 int (*reoffload)(struct tcf_proto *tp, bool add,
290
- tc_setup_cb_t *cb, void *cb_priv,
356
+ flow_setup_cb_t *cb, void *cb_priv,
291357 struct netlink_ext_ack *extack);
358
+ void (*hw_add)(struct tcf_proto *tp,
359
+ void *type_data);
360
+ void (*hw_del)(struct tcf_proto *tp,
361
+ void *type_data);
292362 void (*bind_class)(void *, u32, unsigned long,
293363 void *, unsigned long);
294364 void * (*tmplt_create)(struct net *net,
....@@ -299,12 +369,26 @@
299369
300370 /* rtnetlink specific */
301371 int (*dump)(struct net*, struct tcf_proto*, void *,
302
- struct sk_buff *skb, struct tcmsg*);
372
+ struct sk_buff *skb, struct tcmsg*,
373
+ bool);
374
+ int (*terse_dump)(struct net *net,
375
+ struct tcf_proto *tp, void *fh,
376
+ struct sk_buff *skb,
377
+ struct tcmsg *t, bool rtnl_held);
303378 int (*tmplt_dump)(struct sk_buff *skb,
304379 struct net *net,
305380 void *tmplt_priv);
306381
307382 struct module *owner;
383
+ int flags;
384
+};
385
+
386
+/* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
387
+ * are expected to implement tcf_proto_ops->delete_empty(), otherwise race
388
+ * conditions can occur when filters are inserted/deleted simultaneously.
389
+ */
390
+enum tcf_proto_ops_flags {
391
+ TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
308392 };
309393
310394 struct tcf_proto {
....@@ -323,20 +407,32 @@
323407 void *data;
324408 const struct tcf_proto_ops *ops;
325409 struct tcf_chain *chain;
410
+ /* Lock protects tcf_proto shared state and can be used by unlocked
411
+ * classifiers to protect their private data.
412
+ */
413
+ spinlock_t lock;
414
+ bool deleting;
415
+ refcount_t refcnt;
326416 struct rcu_head rcu;
417
+ struct hlist_node destroy_ht_node;
327418 };
328419
329420 struct qdisc_skb_cb {
330
- unsigned int pkt_len;
331
- u16 slave_dev_queue_mapping;
332
- u16 tc_classid;
421
+ struct {
422
+ unsigned int pkt_len;
423
+ u16 slave_dev_queue_mapping;
424
+ u16 tc_classid;
425
+ };
333426 #define QDISC_CB_PRIV_LEN 20
334427 unsigned char data[QDISC_CB_PRIV_LEN];
428
+ u16 mru;
335429 };
336430
337431 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
338432
339433 struct tcf_chain {
434
+ /* Protects filter_chain. */
435
+ struct mutex filter_chain_lock;
340436 struct tcf_proto __rcu *filter_chain;
341437 struct list_head list;
342438 struct tcf_block *block;
....@@ -344,64 +440,78 @@
344440 unsigned int refcnt;
345441 unsigned int action_refcnt;
346442 bool explicitly_created;
443
+ bool flushing;
347444 const struct tcf_proto_ops *tmplt_ops;
348445 void *tmplt_priv;
446
+ struct rcu_head rcu;
349447 };
350448
351449 struct tcf_block {
450
+ /* Lock protects tcf_block and lifetime-management data of chains
451
+ * attached to the block (refcnt, action_refcnt, explicitly_created).
452
+ */
453
+ struct mutex lock;
352454 struct list_head chain_list;
353455 u32 index; /* block index for shared blocks */
354
- unsigned int refcnt;
456
+ u32 classid; /* which class this block belongs to */
457
+ refcount_t refcnt;
355458 struct net *net;
356459 struct Qdisc *q;
357
- struct list_head cb_list;
460
+ struct rw_semaphore cb_lock; /* protects cb_list and offload counters */
461
+ struct flow_block flow_block;
358462 struct list_head owner_list;
359463 bool keep_dst;
360
- unsigned int offloadcnt; /* Number of oddloaded filters */
464
+ atomic_t offloadcnt; /* Number of oddloaded filters */
361465 unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
466
+ unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */
362467 struct {
363468 struct tcf_chain *chain;
364469 struct list_head filter_chain_list;
365470 } chain0;
471
+ struct rcu_head rcu;
472
+ DECLARE_HASHTABLE(proto_destroy_ht, 7);
473
+ struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
366474 };
367475
368
-static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
476
+#ifdef CONFIG_PROVE_LOCKING
477
+static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
369478 {
370
- if (*flags & TCA_CLS_FLAGS_IN_HW)
371
- return;
372
- *flags |= TCA_CLS_FLAGS_IN_HW;
373
- block->offloadcnt++;
479
+ return lockdep_is_held(&chain->filter_chain_lock);
374480 }
375481
376
-static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
482
+static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
377483 {
378
- if (!(*flags & TCA_CLS_FLAGS_IN_HW))
379
- return;
380
- *flags &= ~TCA_CLS_FLAGS_IN_HW;
381
- block->offloadcnt--;
484
+ return lockdep_is_held(&tp->lock);
485
+}
486
+#else
487
+static inline bool lockdep_tcf_chain_is_locked(struct tcf_block *chain)
488
+{
489
+ return true;
382490 }
383491
384
-static inline void
385
-tc_cls_offload_cnt_update(struct tcf_block *block, unsigned int *cnt,
386
- u32 *flags, bool add)
492
+static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
387493 {
388
- if (add) {
389
- if (!*cnt)
390
- tcf_block_offload_inc(block, flags);
391
- (*cnt)++;
392
- } else {
393
- (*cnt)--;
394
- if (!*cnt)
395
- tcf_block_offload_dec(block, flags);
396
- }
494
+ return true;
397495 }
496
+#endif /* #ifdef CONFIG_PROVE_LOCKING */
497
+
498
+#define tcf_chain_dereference(p, chain) \
499
+ rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
500
+
501
+#define tcf_proto_dereference(p, tp) \
502
+ rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp))
398503
399504 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
400505 {
401506 struct qdisc_skb_cb *qcb;
402507
403
- BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
508
+ BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb));
404509 BUILD_BUG_ON(sizeof(qcb->data) < sz);
510
+}
511
+
512
+static inline int qdisc_qlen_cpu(const struct Qdisc *q)
513
+{
514
+ return this_cpu_ptr(q->cpu_qstats)->qlen;
405515 }
406516
407517 static inline int qdisc_qlen(const struct Qdisc *q)
....@@ -409,14 +519,17 @@
409519 return q->q.qlen;
410520 }
411521
412
-static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
522
+static inline int qdisc_qlen_sum(const struct Qdisc *q)
413523 {
414
- u32 qlen = q->qstats.qlen;
524
+ __u32 qlen = q->qstats.qlen;
525
+ int i;
415526
416
- if (q->flags & TCQ_F_NOLOCK)
417
- qlen += atomic_read(&q->q.atomic_qlen);
418
- else
527
+ if (qdisc_is_percpu_stats(q)) {
528
+ for_each_possible_cpu(i)
529
+ qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
530
+ } else {
419531 qlen += q->q.qlen;
532
+ }
420533
421534 return qlen;
422535 }
....@@ -573,8 +686,31 @@
573686 void qdisc_reset(struct Qdisc *qdisc);
574687 void qdisc_put(struct Qdisc *qdisc);
575688 void qdisc_put_unlocked(struct Qdisc *qdisc);
576
-void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
577
- unsigned int len);
689
+void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
690
+#ifdef CONFIG_NET_SCHED
691
+int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
692
+ void *type_data);
693
+void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
694
+ struct Qdisc *new, struct Qdisc *old,
695
+ enum tc_setup_type type, void *type_data,
696
+ struct netlink_ext_ack *extack);
697
+#else
698
+static inline int
699
+qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
700
+ void *type_data)
701
+{
702
+ q->flags &= ~TCQ_F_OFFLOADED;
703
+ return 0;
704
+}
705
+
706
+static inline void
707
+qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
708
+ struct Qdisc *new, struct Qdisc *old,
709
+ enum tc_setup_type type, void *type_data,
710
+ struct netlink_ext_ack *extack)
711
+{
712
+}
713
+#endif
578714 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
579715 const struct Qdisc_ops *ops,
580716 struct netlink_ext_ack *extack);
....@@ -585,22 +721,6 @@
585721 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
586722 const struct qdisc_size_table *stab);
587723 int skb_do_redirect(struct sk_buff *);
588
-
589
-static inline void skb_reset_tc(struct sk_buff *skb)
590
-{
591
-#ifdef CONFIG_NET_CLS_ACT
592
- skb->tc_redirected = 0;
593
-#endif
594
-}
595
-
596
-static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
597
-{
598
-#ifdef CONFIG_NET_CLS_ACT
599
- return skb->tc_redirected;
600
-#else
601
- return false;
602
-#endif
603
-}
604724
605725 static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
606726 {
....@@ -637,11 +757,6 @@
637757 }
638758 }
639759
640
-static inline void qdisc_reset_all_tx(struct net_device *dev)
641
-{
642
- qdisc_reset_all_tx_gt(dev, 0);
643
-}
644
-
645760 /* Are all TX queues of the device empty? */
646761 static inline bool qdisc_all_tx_empty(const struct net_device *dev)
647762 {
....@@ -652,7 +767,7 @@
652767 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
653768 const struct Qdisc *q = rcu_dereference(txq->qdisc);
654769
655
- if (q->q.qlen) {
770
+ if (!qdisc_is_empty(q)) {
656771 rcu_read_unlock();
657772 return false;
658773 }
....@@ -722,11 +837,6 @@
722837 return sch->enqueue(skb, sch, to_free);
723838 }
724839
725
-static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
726
-{
727
- return q->flags & TCQ_F_CPUSTATS;
728
-}
729
-
730840 static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
731841 __u64 bytes, __u32 packets)
732842 {
....@@ -794,14 +904,14 @@
794904 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
795905 }
796906
797
-static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch)
907
+static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
798908 {
799
- atomic_inc(&sch->q.atomic_qlen);
909
+ this_cpu_inc(sch->cpu_qstats->qlen);
800910 }
801911
802
-static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch)
912
+static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
803913 {
804
- atomic_dec(&sch->q.atomic_qlen);
914
+ this_cpu_dec(sch->cpu_qstats->qlen);
805915 }
806916
807917 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
....@@ -839,6 +949,41 @@
839949 sch->qstats.overlimits++;
840950 }
841951
952
+static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
953
+{
954
+ __u32 qlen = qdisc_qlen_sum(sch);
955
+
956
+ return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
957
+}
958
+
959
+static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
960
+ __u32 *backlog)
961
+{
962
+ struct gnet_stats_queue qstats = { 0 };
963
+ __u32 len = qdisc_qlen_sum(sch);
964
+
965
+ __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
966
+ *qlen = qstats.qlen;
967
+ *backlog = qstats.backlog;
968
+}
969
+
970
+static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
971
+{
972
+ __u32 qlen, backlog;
973
+
974
+ qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
975
+ qdisc_tree_reduce_backlog(sch, qlen, backlog);
976
+}
977
+
978
+static inline void qdisc_purge_queue(struct Qdisc *sch)
979
+{
980
+ __u32 qlen, backlog;
981
+
982
+ qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
983
+ qdisc_reset(sch);
984
+ qdisc_tree_reduce_backlog(sch, qlen, backlog);
985
+}
986
+
842987 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
843988 {
844989 qh->head = NULL;
....@@ -846,8 +991,8 @@
846991 qh->qlen = 0;
847992 }
848993
849
-static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
850
- struct qdisc_skb_head *qh)
994
+static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
995
+ struct qdisc_skb_head *qh)
851996 {
852997 struct sk_buff *last = qh->tail;
853998
....@@ -860,14 +1005,24 @@
8601005 qh->head = skb;
8611006 }
8621007 qh->qlen++;
863
- qdisc_qstats_backlog_inc(sch, skb);
864
-
865
- return NET_XMIT_SUCCESS;
8661008 }
8671009
8681010 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
8691011 {
870
- return __qdisc_enqueue_tail(skb, sch, &sch->q);
1012
+ __qdisc_enqueue_tail(skb, &sch->q);
1013
+ qdisc_qstats_backlog_inc(sch, skb);
1014
+ return NET_XMIT_SUCCESS;
1015
+}
1016
+
1017
+static inline void __qdisc_enqueue_head(struct sk_buff *skb,
1018
+ struct qdisc_skb_head *qh)
1019
+{
1020
+ skb->next = qh->head;
1021
+
1022
+ if (!qh->head)
1023
+ qh->tail = skb;
1024
+ qh->head = skb;
1025
+ qh->qlen++;
8711026 }
8721027
8731028 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
....@@ -933,12 +1088,6 @@
9331088 return 0;
9341089 }
9351090
936
-static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
937
- struct sk_buff **to_free)
938
-{
939
- return __qdisc_queue_drop_head(sch, &sch->q, to_free);
940
-}
941
-
9421091 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
9431092 {
9441093 const struct qdisc_skb_head *qh = &sch->q;
....@@ -966,6 +1115,32 @@
9661115 return skb;
9671116 }
9681117
1118
+static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
1119
+ struct sk_buff *skb)
1120
+{
1121
+ if (qdisc_is_percpu_stats(sch)) {
1122
+ qdisc_qstats_cpu_backlog_dec(sch, skb);
1123
+ qdisc_bstats_cpu_update(sch, skb);
1124
+ qdisc_qstats_cpu_qlen_dec(sch);
1125
+ } else {
1126
+ qdisc_qstats_backlog_dec(sch, skb);
1127
+ qdisc_bstats_update(sch, skb);
1128
+ sch->q.qlen--;
1129
+ }
1130
+}
1131
+
1132
+static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
1133
+ unsigned int pkt_len)
1134
+{
1135
+ if (qdisc_is_percpu_stats(sch)) {
1136
+ qdisc_qstats_cpu_qlen_inc(sch);
1137
+ this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
1138
+ } else {
1139
+ sch->qstats.backlog += pkt_len;
1140
+ sch->q.qlen++;
1141
+ }
1142
+}
1143
+
9691144 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
9701145 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
9711146 {
....@@ -973,8 +1148,13 @@
9731148
9741149 if (skb) {
9751150 skb = __skb_dequeue(&sch->gso_skb);
976
- qdisc_qstats_backlog_dec(sch, skb);
977
- sch->q.qlen--;
1151
+ if (qdisc_is_percpu_stats(sch)) {
1152
+ qdisc_qstats_cpu_backlog_dec(sch, skb);
1153
+ qdisc_qstats_cpu_qlen_dec(sch);
1154
+ } else {
1155
+ qdisc_qstats_backlog_dec(sch, skb);
1156
+ sch->q.qlen--;
1157
+ }
9781158 } else {
9791159 skb = sch->dequeue(sch);
9801160 }
....@@ -1001,7 +1181,6 @@
10011181 static inline void qdisc_reset_queue(struct Qdisc *sch)
10021182 {
10031183 __qdisc_reset_queue(&sch->q);
1004
- sch->qstats.backlog = 0;
10051184 }
10061185
10071186 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
....@@ -1012,13 +1191,8 @@
10121191 sch_tree_lock(sch);
10131192 old = *pold;
10141193 *pold = new;
1015
- if (old != NULL) {
1016
- unsigned int qlen = old->q.qlen;
1017
- unsigned int backlog = old->qstats.backlog;
1018
-
1019
- qdisc_reset(old);
1020
- qdisc_tree_reduce_backlog(old, qlen, backlog);
1021
- }
1194
+ if (old != NULL)
1195
+ qdisc_purge_queue(old);
10221196 sch_tree_unlock(sch);
10231197
10241198 return old;
....@@ -1119,6 +1293,7 @@
11191293 */
11201294 struct mini_Qdisc {
11211295 struct tcf_proto *filter_list;
1296
+ struct tcf_block *block;
11221297 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
11231298 struct gnet_stats_queue __percpu *cpu_qstats;
11241299 struct rcu_head rcu;
....@@ -1145,18 +1320,14 @@
11451320 struct tcf_proto *tp_head);
11461321 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
11471322 struct mini_Qdisc __rcu **p_miniq);
1323
+void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1324
+ struct tcf_block *block);
11481325
1149
-static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
1326
+/* Make sure qdisc is no longer in SCHED state. */
1327
+static inline void qdisc_synchronize(const struct Qdisc *q)
11501328 {
1151
- struct gnet_stats_queue *stats = res->qstats;
1152
- int ret;
1153
-
1154
- if (res->ingress)
1155
- ret = netif_receive_skb(skb);
1156
- else
1157
- ret = dev_queue_xmit(skb);
1158
- if (ret && stats)
1159
- qstats_overlimit_inc(res->qstats);
1329
+ while (test_bit(__QDISC_STATE_SCHED, &q->state))
1330
+ msleep(1);
11601331 }
11611332
11621333 #endif