hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/include/net/sch_generic.h
....@@ -13,16 +13,20 @@
1313 #include <net/net_seq_lock.h>
1414 #include <linux/refcount.h>
1515 #include <linux/workqueue.h>
16
+#include <linux/mutex.h>
17
+#include <linux/rwsem.h>
18
+#include <linux/atomic.h>
19
+#include <linux/hashtable.h>
20
+#include <linux/android_kabi.h>
1621 #include <net/gen_stats.h>
1722 #include <net/rtnetlink.h>
23
+#include <net/flow_offload.h>
1824
1925 struct Qdisc_ops;
2026 struct qdisc_walker;
2127 struct tcf_walker;
2228 struct module;
23
-
24
-typedef int tc_setup_cb_t(enum tc_setup_type type,
25
- void *type_data, void *cb_priv);
29
+struct bpf_flow_keys;
2630
2731 struct qdisc_rate_table {
2832 struct tc_ratespec rate;
....@@ -34,6 +38,7 @@
3438 enum qdisc_state_t {
3539 __QDISC_STATE_SCHED,
3640 __QDISC_STATE_DEACTIVATED,
41
+ __QDISC_STATE_MISSED,
3742 };
3843
3944 struct qdisc_size_table {
....@@ -48,10 +53,7 @@
4853 struct qdisc_skb_head {
4954 struct sk_buff *head;
5055 struct sk_buff *tail;
51
- union {
52
- u32 qlen;
53
- atomic_t atomic_qlen;
54
- };
56
+ __u32 qlen;
5557 spinlock_t lock;
5658 };
5759
....@@ -92,7 +94,7 @@
9294 struct net_rate_estimator __rcu *rate_est;
9395 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
9496 struct gnet_stats_queue __percpu *cpu_qstats;
95
- int padded;
97
+ int pad;
9698 refcount_t refcnt;
9799
98100 /*
....@@ -109,9 +111,15 @@
109111
110112 spinlock_t busylock ____cacheline_aligned_in_smp;
111113 spinlock_t seqlock;
112
-#ifndef __GENKSYMS__
114
+
115
+ /* for NOLOCK qdisc, true if there are no enqueued skbs */
116
+ bool empty;
113117 struct rcu_head rcu;
114
-#endif
118
+
119
+ ANDROID_KABI_RESERVE(1);
120
+
121
+ /* private data */
122
+ long privdata[] ____cacheline_aligned;
115123 };
116124
117125 static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
....@@ -138,24 +146,62 @@
138146 {
139147 if (qdisc->flags & TCQ_F_NOLOCK)
140148 return spin_is_locked(&qdisc->seqlock);
141
-#ifdef CONFIG_PREEMPT_RT_BASE
149
+#ifdef CONFIG_PREEMPT_RT
142150 return spin_is_locked(&qdisc->running.lock) ? true : false;
143151 #else
144152 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
145153 #endif
146154 }
147155
156
+static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
157
+{
158
+ return q->flags & TCQ_F_CPUSTATS;
159
+}
160
+
161
+static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
162
+{
163
+ if (qdisc_is_percpu_stats(qdisc))
164
+ return READ_ONCE(qdisc->empty);
165
+ return !READ_ONCE(qdisc->q.qlen);
166
+}
167
+
148168 static inline bool qdisc_run_begin(struct Qdisc *qdisc)
149169 {
150170 if (qdisc->flags & TCQ_F_NOLOCK) {
171
+ if (spin_trylock(&qdisc->seqlock))
172
+ goto nolock_empty;
173
+
174
+ /* No need to insist if the MISSED flag was already set.
175
+ * Note that test_and_set_bit() also gives us memory ordering
176
+ * guarantees wrt potential earlier enqueue() and below
177
+ * spin_trylock(), both of which are necessary to prevent races
178
+ */
179
+ if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state))
180
+ return false;
181
+
182
+ /* Try to take the lock again to make sure that we will either
183
+ * grab it or the CPU that still has it will see MISSED set
184
+ * when testing it in qdisc_run_end()
185
+ */
151186 if (!spin_trylock(&qdisc->seqlock))
152187 return false;
188
+
189
+nolock_empty:
190
+ WRITE_ONCE(qdisc->empty, false);
153191 } else if (qdisc_is_running(qdisc)) {
154192 return false;
155193 }
156
-#ifdef CONFIG_PREEMPT_RT_BASE
157
- if (try_write_seqlock(&qdisc->running))
194
+#ifdef CONFIG_PREEMPT_RT
195
+ if (spin_trylock(&qdisc->running.lock)) {
196
+ seqcount_t *s = &qdisc->running.seqcount.seqcount;
197
+ /*
198
+ * Variant of write_seqcount_t_begin() telling lockdep that a
199
+ * trylock was attempted.
200
+ */
201
+ raw_write_seqcount_t_begin(s);
202
+ seqcount_acquire(&s->dep_map, 0, 1, _RET_IP_);
158203 return true;
204
+ }
159205 return false;
160206 #else
161207 /* Variant of write_seqcount_begin() telling lockdep a trylock
....@@ -169,13 +215,26 @@
169215
170216 static inline void qdisc_run_end(struct Qdisc *qdisc)
171217 {
172
-#ifdef CONFIG_PREEMPT_RT_BASE
218
+#ifdef CONFIG_PREEMPT_RT
173219 write_sequnlock(&qdisc->running);
174220 #else
175221 write_seqcount_end(&qdisc->running);
176222 #endif
177
- if (qdisc->flags & TCQ_F_NOLOCK)
223
+ if (qdisc->flags & TCQ_F_NOLOCK) {
178224 spin_unlock(&qdisc->seqlock);
225
+
226
+ /* spin_unlock() only has store-release semantic. The unlock
227
+ * and test_bit() ordering is a store-load ordering, so a full
228
+ * memory barrier is needed here.
229
+ */
230
+ smp_mb();
231
+
232
+ if (unlikely(test_bit(__QDISC_STATE_MISSED,
233
+ &qdisc->state))) {
234
+ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
235
+ __netif_schedule(qdisc);
236
+ }
237
+ }
179238 }
180239
181240 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
....@@ -194,6 +253,7 @@
194253 }
195254
196255 struct Qdisc_class_ops {
256
+ unsigned int flags;
197257 /* Child qdisc manipulation */
198258 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
199259 int (*graft)(struct Qdisc *, unsigned long cl,
....@@ -223,6 +283,15 @@
223283 struct sk_buff *skb, struct tcmsg*);
224284 int (*dump_stats)(struct Qdisc *, unsigned long,
225285 struct gnet_dump *);
286
+
287
+ ANDROID_KABI_RESERVE(1);
288
+};
289
+
290
+/* Qdisc_class_ops flag values */
291
+
292
+/* Implements API that doesn't require rtnl lock */
293
+enum qdisc_class_ops_flags {
294
+ QDISC_CLASS_OPS_DOIT_UNLOCKED = 1,
226295 };
227296
228297 struct Qdisc_ops {
....@@ -259,6 +328,8 @@
259328 u32 (*egress_block_get)(struct Qdisc *sch);
260329
261330 struct module *owner;
331
+
332
+ ANDROID_KABI_RESERVE(1);
262333 };
263334
264335
....@@ -270,7 +341,7 @@
270341 };
271342 const struct tcf_proto *goto_tp;
272343
273
- /* used by the TC_ACT_REINSERT action */
344
+ /* used in the skb_tc_reinsert function */
274345 struct {
275346 bool ingress;
276347 struct gnet_stats_queue *qstats;
....@@ -288,22 +359,29 @@
288359 const struct tcf_proto *,
289360 struct tcf_result *);
290361 int (*init)(struct tcf_proto*);
291
- void (*destroy)(struct tcf_proto *tp,
362
+ void (*destroy)(struct tcf_proto *tp, bool rtnl_held,
292363 struct netlink_ext_ack *extack);
293364
294365 void* (*get)(struct tcf_proto*, u32 handle);
366
+ void (*put)(struct tcf_proto *tp, void *f);
295367 int (*change)(struct net *net, struct sk_buff *,
296368 struct tcf_proto*, unsigned long,
297369 u32 handle, struct nlattr **,
298
- void **, bool,
370
+ void **, bool, bool,
299371 struct netlink_ext_ack *);
300372 int (*delete)(struct tcf_proto *tp, void *arg,
301
- bool *last,
373
+ bool *last, bool rtnl_held,
302374 struct netlink_ext_ack *);
303
- void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
375
+ bool (*delete_empty)(struct tcf_proto *tp);
376
+ void (*walk)(struct tcf_proto *tp,
377
+ struct tcf_walker *arg, bool rtnl_held);
304378 int (*reoffload)(struct tcf_proto *tp, bool add,
305
- tc_setup_cb_t *cb, void *cb_priv,
379
+ flow_setup_cb_t *cb, void *cb_priv,
306380 struct netlink_ext_ack *extack);
381
+ void (*hw_add)(struct tcf_proto *tp,
382
+ void *type_data);
383
+ void (*hw_del)(struct tcf_proto *tp,
384
+ void *type_data);
307385 void (*bind_class)(void *, u32, unsigned long,
308386 void *, unsigned long);
309387 void * (*tmplt_create)(struct net *net,
....@@ -314,12 +392,26 @@
314392
315393 /* rtnetlink specific */
316394 int (*dump)(struct net*, struct tcf_proto*, void *,
317
- struct sk_buff *skb, struct tcmsg*);
395
+ struct sk_buff *skb, struct tcmsg*,
396
+ bool);
397
+ int (*terse_dump)(struct net *net,
398
+ struct tcf_proto *tp, void *fh,
399
+ struct sk_buff *skb,
400
+ struct tcmsg *t, bool rtnl_held);
318401 int (*tmplt_dump)(struct sk_buff *skb,
319402 struct net *net,
320403 void *tmplt_priv);
321404
322405 struct module *owner;
406
+ int flags;
407
+};
408
+
409
+/* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
410
+ * are expected to implement tcf_proto_ops->delete_empty(), otherwise race
411
+ * conditions can occur when filters are inserted/deleted simultaneously.
412
+ */
413
+enum tcf_proto_ops_flags {
414
+ TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
323415 };
324416
325417 struct tcf_proto {
....@@ -338,20 +430,32 @@
338430 void *data;
339431 const struct tcf_proto_ops *ops;
340432 struct tcf_chain *chain;
433
+ /* Lock protects tcf_proto shared state and can be used by unlocked
434
+ * classifiers to protect their private data.
435
+ */
436
+ spinlock_t lock;
437
+ bool deleting;
438
+ refcount_t refcnt;
341439 struct rcu_head rcu;
440
+ struct hlist_node destroy_ht_node;
342441 };
343442
344443 struct qdisc_skb_cb {
345
- unsigned int pkt_len;
346
- u16 slave_dev_queue_mapping;
347
- u16 tc_classid;
444
+ struct {
445
+ unsigned int pkt_len;
446
+ u16 slave_dev_queue_mapping;
447
+ u16 tc_classid;
448
+ };
348449 #define QDISC_CB_PRIV_LEN 20
349450 unsigned char data[QDISC_CB_PRIV_LEN];
451
+ u16 mru;
350452 };
351453
352454 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
353455
354456 struct tcf_chain {
457
+ /* Protects filter_chain. */
458
+ struct mutex filter_chain_lock;
355459 struct tcf_proto __rcu *filter_chain;
356460 struct list_head list;
357461 struct tcf_block *block;
....@@ -359,64 +463,78 @@
359463 unsigned int refcnt;
360464 unsigned int action_refcnt;
361465 bool explicitly_created;
466
+ bool flushing;
362467 const struct tcf_proto_ops *tmplt_ops;
363468 void *tmplt_priv;
469
+ struct rcu_head rcu;
364470 };
365471
366472 struct tcf_block {
473
+ /* Lock protects tcf_block and lifetime-management data of chains
474
+ * attached to the block (refcnt, action_refcnt, explicitly_created).
475
+ */
476
+ struct mutex lock;
367477 struct list_head chain_list;
368478 u32 index; /* block index for shared blocks */
369
- unsigned int refcnt;
479
+ u32 classid; /* which class this block belongs to */
480
+ refcount_t refcnt;
370481 struct net *net;
371482 struct Qdisc *q;
372
- struct list_head cb_list;
483
+ struct rw_semaphore cb_lock; /* protects cb_list and offload counters */
484
+ struct flow_block flow_block;
373485 struct list_head owner_list;
374486 bool keep_dst;
375
- unsigned int offloadcnt; /* Number of oddloaded filters */
487
+ atomic_t offloadcnt; /* Number of oddloaded filters */
376488 unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
489
+ unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */
377490 struct {
378491 struct tcf_chain *chain;
379492 struct list_head filter_chain_list;
380493 } chain0;
494
+ struct rcu_head rcu;
495
+ DECLARE_HASHTABLE(proto_destroy_ht, 7);
496
+ struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
381497 };
382498
383
-static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
499
+#ifdef CONFIG_PROVE_LOCKING
500
+static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
384501 {
385
- if (*flags & TCA_CLS_FLAGS_IN_HW)
386
- return;
387
- *flags |= TCA_CLS_FLAGS_IN_HW;
388
- block->offloadcnt++;
502
+ return lockdep_is_held(&chain->filter_chain_lock);
389503 }
390504
391
-static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
505
+static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
392506 {
393
- if (!(*flags & TCA_CLS_FLAGS_IN_HW))
394
- return;
395
- *flags &= ~TCA_CLS_FLAGS_IN_HW;
396
- block->offloadcnt--;
507
+ return lockdep_is_held(&tp->lock);
508
+}
509
+#else
510
+static inline bool lockdep_tcf_chain_is_locked(struct tcf_block *chain)
511
+{
512
+ return true;
397513 }
398514
399
-static inline void
400
-tc_cls_offload_cnt_update(struct tcf_block *block, unsigned int *cnt,
401
- u32 *flags, bool add)
515
+static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
402516 {
403
- if (add) {
404
- if (!*cnt)
405
- tcf_block_offload_inc(block, flags);
406
- (*cnt)++;
407
- } else {
408
- (*cnt)--;
409
- if (!*cnt)
410
- tcf_block_offload_dec(block, flags);
411
- }
517
+ return true;
412518 }
519
+#endif /* #ifdef CONFIG_PROVE_LOCKING */
520
+
521
+#define tcf_chain_dereference(p, chain) \
522
+ rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
523
+
524
+#define tcf_proto_dereference(p, tp) \
525
+ rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp))
413526
414527 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
415528 {
416529 struct qdisc_skb_cb *qcb;
417530
418
- BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
531
+ BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb));
419532 BUILD_BUG_ON(sizeof(qcb->data) < sz);
533
+}
534
+
535
+static inline int qdisc_qlen_cpu(const struct Qdisc *q)
536
+{
537
+ return this_cpu_ptr(q->cpu_qstats)->qlen;
420538 }
421539
422540 static inline int qdisc_qlen(const struct Qdisc *q)
....@@ -424,14 +542,17 @@
424542 return q->q.qlen;
425543 }
426544
427
-static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
545
+static inline int qdisc_qlen_sum(const struct Qdisc *q)
428546 {
429
- u32 qlen = q->qstats.qlen;
547
+ __u32 qlen = q->qstats.qlen;
548
+ int i;
430549
431
- if (q->flags & TCQ_F_NOLOCK)
432
- qlen += atomic_read(&q->q.atomic_qlen);
433
- else
550
+ if (qdisc_is_percpu_stats(q)) {
551
+ for_each_possible_cpu(i)
552
+ qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
553
+ } else {
434554 qlen += q->q.qlen;
555
+ }
435556
436557 return qlen;
437558 }
....@@ -588,8 +709,31 @@
588709 void qdisc_reset(struct Qdisc *qdisc);
589710 void qdisc_put(struct Qdisc *qdisc);
590711 void qdisc_put_unlocked(struct Qdisc *qdisc);
591
-void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
592
- unsigned int len);
712
+void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
713
+#ifdef CONFIG_NET_SCHED
714
+int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
715
+ void *type_data);
716
+void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
717
+ struct Qdisc *new, struct Qdisc *old,
718
+ enum tc_setup_type type, void *type_data,
719
+ struct netlink_ext_ack *extack);
720
+#else
721
+static inline int
722
+qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
723
+ void *type_data)
724
+{
725
+ q->flags &= ~TCQ_F_OFFLOADED;
726
+ return 0;
727
+}
728
+
729
+static inline void
730
+qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
731
+ struct Qdisc *new, struct Qdisc *old,
732
+ enum tc_setup_type type, void *type_data,
733
+ struct netlink_ext_ack *extack)
734
+{
735
+}
736
+#endif
593737 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
594738 const struct Qdisc_ops *ops,
595739 struct netlink_ext_ack *extack);
....@@ -600,22 +744,6 @@
600744 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
601745 const struct qdisc_size_table *stab);
602746 int skb_do_redirect(struct sk_buff *);
603
-
604
-static inline void skb_reset_tc(struct sk_buff *skb)
605
-{
606
-#ifdef CONFIG_NET_CLS_ACT
607
- skb->tc_redirected = 0;
608
-#endif
609
-}
610
-
611
-static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
612
-{
613
-#ifdef CONFIG_NET_CLS_ACT
614
- return skb->tc_redirected;
615
-#else
616
- return false;
617
-#endif
618
-}
619747
620748 static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
621749 {
....@@ -652,11 +780,6 @@
652780 }
653781 }
654782
655
-static inline void qdisc_reset_all_tx(struct net_device *dev)
656
-{
657
- qdisc_reset_all_tx_gt(dev, 0);
658
-}
659
-
660783 /* Are all TX queues of the device empty? */
661784 static inline bool qdisc_all_tx_empty(const struct net_device *dev)
662785 {
....@@ -667,7 +790,7 @@
667790 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
668791 const struct Qdisc *q = rcu_dereference(txq->qdisc);
669792
670
- if (q->q.qlen) {
793
+ if (!qdisc_is_empty(q)) {
671794 rcu_read_unlock();
672795 return false;
673796 }
....@@ -737,11 +860,6 @@
737860 return sch->enqueue(skb, sch, to_free);
738861 }
739862
740
-static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
741
-{
742
- return q->flags & TCQ_F_CPUSTATS;
743
-}
744
-
745863 static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
746864 __u64 bytes, __u32 packets)
747865 {
....@@ -809,14 +927,14 @@
809927 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
810928 }
811929
812
-static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch)
930
+static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
813931 {
814
- atomic_inc(&sch->q.atomic_qlen);
932
+ this_cpu_inc(sch->cpu_qstats->qlen);
815933 }
816934
817
-static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch)
935
+static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
818936 {
819
- atomic_dec(&sch->q.atomic_qlen);
937
+ this_cpu_dec(sch->cpu_qstats->qlen);
820938 }
821939
822940 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
....@@ -854,6 +972,41 @@
854972 sch->qstats.overlimits++;
855973 }
856974
975
+static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
976
+{
977
+ __u32 qlen = qdisc_qlen_sum(sch);
978
+
979
+ return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
980
+}
981
+
982
+static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
983
+ __u32 *backlog)
984
+{
985
+ struct gnet_stats_queue qstats = { 0 };
986
+ __u32 len = qdisc_qlen_sum(sch);
987
+
988
+ __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
989
+ *qlen = qstats.qlen;
990
+ *backlog = qstats.backlog;
991
+}
992
+
993
+static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
994
+{
995
+ __u32 qlen, backlog;
996
+
997
+ qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
998
+ qdisc_tree_reduce_backlog(sch, qlen, backlog);
999
+}
1000
+
1001
+static inline void qdisc_purge_queue(struct Qdisc *sch)
1002
+{
1003
+ __u32 qlen, backlog;
1004
+
1005
+ qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
1006
+ qdisc_reset(sch);
1007
+ qdisc_tree_reduce_backlog(sch, qlen, backlog);
1008
+}
1009
+
8571010 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
8581011 {
8591012 qh->head = NULL;
....@@ -861,8 +1014,8 @@
8611014 qh->qlen = 0;
8621015 }
8631016
864
-static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
865
- struct qdisc_skb_head *qh)
1017
+static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
1018
+ struct qdisc_skb_head *qh)
8661019 {
8671020 struct sk_buff *last = qh->tail;
8681021
....@@ -875,14 +1028,24 @@
8751028 qh->head = skb;
8761029 }
8771030 qh->qlen++;
878
- qdisc_qstats_backlog_inc(sch, skb);
879
-
880
- return NET_XMIT_SUCCESS;
8811031 }
8821032
8831033 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
8841034 {
885
- return __qdisc_enqueue_tail(skb, sch, &sch->q);
1035
+ __qdisc_enqueue_tail(skb, &sch->q);
1036
+ qdisc_qstats_backlog_inc(sch, skb);
1037
+ return NET_XMIT_SUCCESS;
1038
+}
1039
+
1040
+static inline void __qdisc_enqueue_head(struct sk_buff *skb,
1041
+ struct qdisc_skb_head *qh)
1042
+{
1043
+ skb->next = qh->head;
1044
+
1045
+ if (!qh->head)
1046
+ qh->tail = skb;
1047
+ qh->head = skb;
1048
+ qh->qlen++;
8861049 }
8871050
8881051 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
....@@ -948,12 +1111,6 @@
9481111 return 0;
9491112 }
9501113
951
-static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
952
- struct sk_buff **to_free)
953
-{
954
- return __qdisc_queue_drop_head(sch, &sch->q, to_free);
955
-}
956
-
9571114 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
9581115 {
9591116 const struct qdisc_skb_head *qh = &sch->q;
....@@ -981,6 +1138,32 @@
9811138 return skb;
9821139 }
9831140
1141
+static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
1142
+ struct sk_buff *skb)
1143
+{
1144
+ if (qdisc_is_percpu_stats(sch)) {
1145
+ qdisc_qstats_cpu_backlog_dec(sch, skb);
1146
+ qdisc_bstats_cpu_update(sch, skb);
1147
+ qdisc_qstats_cpu_qlen_dec(sch);
1148
+ } else {
1149
+ qdisc_qstats_backlog_dec(sch, skb);
1150
+ qdisc_bstats_update(sch, skb);
1151
+ sch->q.qlen--;
1152
+ }
1153
+}
1154
+
1155
+static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
1156
+ unsigned int pkt_len)
1157
+{
1158
+ if (qdisc_is_percpu_stats(sch)) {
1159
+ qdisc_qstats_cpu_qlen_inc(sch);
1160
+ this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
1161
+ } else {
1162
+ sch->qstats.backlog += pkt_len;
1163
+ sch->q.qlen++;
1164
+ }
1165
+}
1166
+
9841167 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
9851168 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
9861169 {
....@@ -988,8 +1171,13 @@
9881171
9891172 if (skb) {
9901173 skb = __skb_dequeue(&sch->gso_skb);
991
- qdisc_qstats_backlog_dec(sch, skb);
992
- sch->q.qlen--;
1174
+ if (qdisc_is_percpu_stats(sch)) {
1175
+ qdisc_qstats_cpu_backlog_dec(sch, skb);
1176
+ qdisc_qstats_cpu_qlen_dec(sch);
1177
+ } else {
1178
+ qdisc_qstats_backlog_dec(sch, skb);
1179
+ sch->q.qlen--;
1180
+ }
9931181 } else {
9941182 skb = sch->dequeue(sch);
9951183 }
....@@ -1016,7 +1204,6 @@
10161204 static inline void qdisc_reset_queue(struct Qdisc *sch)
10171205 {
10181206 __qdisc_reset_queue(&sch->q);
1019
- sch->qstats.backlog = 0;
10201207 }
10211208
10221209 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
....@@ -1027,13 +1214,8 @@
10271214 sch_tree_lock(sch);
10281215 old = *pold;
10291216 *pold = new;
1030
- if (old != NULL) {
1031
- unsigned int qlen = old->q.qlen;
1032
- unsigned int backlog = old->qstats.backlog;
1033
-
1034
- qdisc_reset(old);
1035
- qdisc_tree_reduce_backlog(old, qlen, backlog);
1036
- }
1217
+ if (old != NULL)
1218
+ qdisc_purge_queue(old);
10371219 sch_tree_unlock(sch);
10381220
10391221 return old;
....@@ -1134,6 +1316,7 @@
11341316 */
11351317 struct mini_Qdisc {
11361318 struct tcf_proto *filter_list;
1319
+ struct tcf_block *block;
11371320 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
11381321 struct gnet_stats_queue __percpu *cpu_qstats;
11391322 struct rcu_head rcu;
....@@ -1160,18 +1343,12 @@
11601343 struct tcf_proto *tp_head);
11611344 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
11621345 struct mini_Qdisc __rcu **p_miniq);
1346
+void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1347
+ struct tcf_block *block);
11631348
1164
-static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
1349
+static inline int skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
11651350 {
1166
- struct gnet_stats_queue *stats = res->qstats;
1167
- int ret;
1168
-
1169
- if (res->ingress)
1170
- ret = netif_receive_skb(skb);
1171
- else
1172
- ret = dev_queue_xmit(skb);
1173
- if (ret && stats)
1174
- qstats_overlimit_inc(res->qstats);
1351
+ return res->ingress ? netif_receive_skb(skb) : dev_queue_xmit(skb);
11751352 }
11761353
11771354 #endif