hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/include/net/sch_generic.h
....@@ -10,18 +10,23 @@
1010 #include <linux/percpu.h>
1111 #include <linux/dynamic_queue_limits.h>
1212 #include <linux/list.h>
13
+#include <net/net_seq_lock.h>
1314 #include <linux/refcount.h>
1415 #include <linux/workqueue.h>
16
+#include <linux/mutex.h>
17
+#include <linux/rwsem.h>
18
+#include <linux/atomic.h>
19
+#include <linux/hashtable.h>
20
+#include <linux/android_kabi.h>
1521 #include <net/gen_stats.h>
1622 #include <net/rtnetlink.h>
23
+#include <net/flow_offload.h>
1724
1825 struct Qdisc_ops;
1926 struct qdisc_walker;
2027 struct tcf_walker;
2128 struct module;
22
-
23
-typedef int tc_setup_cb_t(enum tc_setup_type type,
24
- void *type_data, void *cb_priv);
29
+struct bpf_flow_keys;
2530
2631 struct qdisc_rate_table {
2732 struct tc_ratespec rate;
....@@ -33,6 +38,7 @@
3338 enum qdisc_state_t {
3439 __QDISC_STATE_SCHED,
3540 __QDISC_STATE_DEACTIVATED,
41
+ __QDISC_STATE_MISSED,
3642 };
3743
3844 struct qdisc_size_table {
....@@ -47,10 +53,7 @@
4753 struct qdisc_skb_head {
4854 struct sk_buff *head;
4955 struct sk_buff *tail;
50
- union {
51
- u32 qlen;
52
- atomic_t atomic_qlen;
53
- };
56
+ __u32 qlen;
5457 spinlock_t lock;
5558 };
5659
....@@ -91,7 +94,7 @@
9194 struct net_rate_estimator __rcu *rate_est;
9295 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
9396 struct gnet_stats_queue __percpu *cpu_qstats;
94
- int padded;
97
+ int pad;
9598 refcount_t refcnt;
9699
97100 /*
....@@ -100,7 +103,7 @@
100103 struct sk_buff_head gso_skb ____cacheline_aligned_in_smp;
101104 struct qdisc_skb_head q;
102105 struct gnet_stats_basic_packed bstats;
103
- seqcount_t running;
106
+ net_seqlock_t running;
104107 struct gnet_stats_queue qstats;
105108 unsigned long state;
106109 struct Qdisc *next_sched;
....@@ -108,9 +111,15 @@
108111
109112 spinlock_t busylock ____cacheline_aligned_in_smp;
110113 spinlock_t seqlock;
111
-#ifndef __GENKSYMS__
114
+
115
+ /* for NOLOCK qdisc, true if there are no enqueued skbs */
116
+ bool empty;
112117 struct rcu_head rcu;
113
-#endif
118
+
119
+ ANDROID_KABI_RESERVE(1);
120
+
121
+ /* private data */
122
+ long privdata[] ____cacheline_aligned;
114123 };
115124
116125 static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
....@@ -137,30 +146,95 @@
137146 {
138147 if (qdisc->flags & TCQ_F_NOLOCK)
139148 return spin_is_locked(&qdisc->seqlock);
149
+#ifdef CONFIG_PREEMPT_RT
150
+ return spin_is_locked(&qdisc->running.lock) ? true : false;
151
+#else
140152 return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
153
+#endif
154
+}
155
+
156
+static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
157
+{
158
+ return q->flags & TCQ_F_CPUSTATS;
159
+}
160
+
161
+static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
162
+{
163
+ if (qdisc_is_percpu_stats(qdisc))
164
+ return READ_ONCE(qdisc->empty);
165
+ return !READ_ONCE(qdisc->q.qlen);
141166 }
142167
143168 static inline bool qdisc_run_begin(struct Qdisc *qdisc)
144169 {
145170 if (qdisc->flags & TCQ_F_NOLOCK) {
171
+ if (spin_trylock(&qdisc->seqlock))
172
+ goto nolock_empty;
173
+
174
+ /* No need to insist if the MISSED flag was already set.
175
+ * Note that test_and_set_bit() also gives us memory ordering
176
+ * guarantees wrt potential earlier enqueue() and below
177
+ * spin_trylock(), both of which are necessary to prevent races
178
+ */
179
+ if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state))
180
+ return false;
181
+
182
+ /* Try to take the lock again to make sure that we will either
183
+ * grab it or the CPU that still has it will see MISSED set
184
+ * when testing it in qdisc_run_end()
185
+ */
146186 if (!spin_trylock(&qdisc->seqlock))
147187 return false;
188
+
189
+nolock_empty:
190
+ WRITE_ONCE(qdisc->empty, false);
148191 } else if (qdisc_is_running(qdisc)) {
149192 return false;
150193 }
194
+#ifdef CONFIG_PREEMPT_RT
195
+ if (spin_trylock(&qdisc->running.lock)) {
196
+ seqcount_t *s = &qdisc->running.seqcount.seqcount;
197
+ /*
198
+ * Variant of write_seqcount_t_begin() telling lockdep that a
199
+ * trylock was attempted.
200
+ */
201
+ raw_write_seqcount_t_begin(s);
202
+ seqcount_acquire(&s->dep_map, 0, 1, _RET_IP_);
203
+ return true;
204
+ }
205
+ return false;
206
+#else
151207 /* Variant of write_seqcount_begin() telling lockdep a trylock
152208 * was attempted.
153209 */
154210 raw_write_seqcount_begin(&qdisc->running);
155211 seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
156212 return true;
213
+#endif
157214 }
158215
159216 static inline void qdisc_run_end(struct Qdisc *qdisc)
160217 {
218
+#ifdef CONFIG_PREEMPT_RT
219
+ write_sequnlock(&qdisc->running);
220
+#else
161221 write_seqcount_end(&qdisc->running);
162
- if (qdisc->flags & TCQ_F_NOLOCK)
222
+#endif
223
+ if (qdisc->flags & TCQ_F_NOLOCK) {
163224 spin_unlock(&qdisc->seqlock);
225
+
226
+ /* spin_unlock() only has store-release semantic. The unlock
227
+ * and test_bit() ordering is a store-load ordering, so a full
228
+ * memory barrier is needed here.
229
+ */
230
+ smp_mb();
231
+
232
+ if (unlikely(test_bit(__QDISC_STATE_MISSED,
233
+ &qdisc->state))) {
234
+ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
235
+ __netif_schedule(qdisc);
236
+ }
237
+ }
164238 }
165239
166240 static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
....@@ -179,6 +253,7 @@
179253 }
180254
181255 struct Qdisc_class_ops {
256
+ unsigned int flags;
182257 /* Child qdisc manipulation */
183258 struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
184259 int (*graft)(struct Qdisc *, unsigned long cl,
....@@ -208,6 +283,15 @@
208283 struct sk_buff *skb, struct tcmsg*);
209284 int (*dump_stats)(struct Qdisc *, unsigned long,
210285 struct gnet_dump *);
286
+
287
+ ANDROID_KABI_RESERVE(1);
288
+};
289
+
290
+/* Qdisc_class_ops flag values */
291
+
292
+/* Implements API that doesn't require rtnl lock */
293
+enum qdisc_class_ops_flags {
294
+ QDISC_CLASS_OPS_DOIT_UNLOCKED = 1,
211295 };
212296
213297 struct Qdisc_ops {
....@@ -244,6 +328,8 @@
244328 u32 (*egress_block_get)(struct Qdisc *sch);
245329
246330 struct module *owner;
331
+
332
+ ANDROID_KABI_RESERVE(1);
247333 };
248334
249335
....@@ -255,7 +341,7 @@
255341 };
256342 const struct tcf_proto *goto_tp;
257343
258
- /* used by the TC_ACT_REINSERT action */
344
+ /* used in the skb_tc_reinsert function */
259345 struct {
260346 bool ingress;
261347 struct gnet_stats_queue *qstats;
....@@ -273,22 +359,29 @@
273359 const struct tcf_proto *,
274360 struct tcf_result *);
275361 int (*init)(struct tcf_proto*);
276
- void (*destroy)(struct tcf_proto *tp,
362
+ void (*destroy)(struct tcf_proto *tp, bool rtnl_held,
277363 struct netlink_ext_ack *extack);
278364
279365 void* (*get)(struct tcf_proto*, u32 handle);
366
+ void (*put)(struct tcf_proto *tp, void *f);
280367 int (*change)(struct net *net, struct sk_buff *,
281368 struct tcf_proto*, unsigned long,
282369 u32 handle, struct nlattr **,
283
- void **, bool,
370
+ void **, bool, bool,
284371 struct netlink_ext_ack *);
285372 int (*delete)(struct tcf_proto *tp, void *arg,
286
- bool *last,
373
+ bool *last, bool rtnl_held,
287374 struct netlink_ext_ack *);
288
- void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
375
+ bool (*delete_empty)(struct tcf_proto *tp);
376
+ void (*walk)(struct tcf_proto *tp,
377
+ struct tcf_walker *arg, bool rtnl_held);
289378 int (*reoffload)(struct tcf_proto *tp, bool add,
290
- tc_setup_cb_t *cb, void *cb_priv,
379
+ flow_setup_cb_t *cb, void *cb_priv,
291380 struct netlink_ext_ack *extack);
381
+ void (*hw_add)(struct tcf_proto *tp,
382
+ void *type_data);
383
+ void (*hw_del)(struct tcf_proto *tp,
384
+ void *type_data);
292385 void (*bind_class)(void *, u32, unsigned long,
293386 void *, unsigned long);
294387 void * (*tmplt_create)(struct net *net,
....@@ -299,12 +392,26 @@
299392
300393 /* rtnetlink specific */
301394 int (*dump)(struct net*, struct tcf_proto*, void *,
302
- struct sk_buff *skb, struct tcmsg*);
395
+ struct sk_buff *skb, struct tcmsg*,
396
+ bool);
397
+ int (*terse_dump)(struct net *net,
398
+ struct tcf_proto *tp, void *fh,
399
+ struct sk_buff *skb,
400
+ struct tcmsg *t, bool rtnl_held);
303401 int (*tmplt_dump)(struct sk_buff *skb,
304402 struct net *net,
305403 void *tmplt_priv);
306404
307405 struct module *owner;
406
+ int flags;
407
+};
408
+
409
+/* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
410
+ * are expected to implement tcf_proto_ops->delete_empty(), otherwise race
411
+ * conditions can occur when filters are inserted/deleted simultaneously.
412
+ */
413
+enum tcf_proto_ops_flags {
414
+ TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
308415 };
309416
310417 struct tcf_proto {
....@@ -323,20 +430,32 @@
323430 void *data;
324431 const struct tcf_proto_ops *ops;
325432 struct tcf_chain *chain;
433
+ /* Lock protects tcf_proto shared state and can be used by unlocked
434
+ * classifiers to protect their private data.
435
+ */
436
+ spinlock_t lock;
437
+ bool deleting;
438
+ refcount_t refcnt;
326439 struct rcu_head rcu;
440
+ struct hlist_node destroy_ht_node;
327441 };
328442
329443 struct qdisc_skb_cb {
330
- unsigned int pkt_len;
331
- u16 slave_dev_queue_mapping;
332
- u16 tc_classid;
444
+ struct {
445
+ unsigned int pkt_len;
446
+ u16 slave_dev_queue_mapping;
447
+ u16 tc_classid;
448
+ };
333449 #define QDISC_CB_PRIV_LEN 20
334450 unsigned char data[QDISC_CB_PRIV_LEN];
451
+ u16 mru;
335452 };
336453
337454 typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
338455
339456 struct tcf_chain {
457
+ /* Protects filter_chain. */
458
+ struct mutex filter_chain_lock;
340459 struct tcf_proto __rcu *filter_chain;
341460 struct list_head list;
342461 struct tcf_block *block;
....@@ -344,64 +463,78 @@
344463 unsigned int refcnt;
345464 unsigned int action_refcnt;
346465 bool explicitly_created;
466
+ bool flushing;
347467 const struct tcf_proto_ops *tmplt_ops;
348468 void *tmplt_priv;
469
+ struct rcu_head rcu;
349470 };
350471
351472 struct tcf_block {
473
+ /* Lock protects tcf_block and lifetime-management data of chains
474
+ * attached to the block (refcnt, action_refcnt, explicitly_created).
475
+ */
476
+ struct mutex lock;
352477 struct list_head chain_list;
353478 u32 index; /* block index for shared blocks */
354
- unsigned int refcnt;
479
+ u32 classid; /* which class this block belongs to */
480
+ refcount_t refcnt;
355481 struct net *net;
356482 struct Qdisc *q;
357
- struct list_head cb_list;
483
+ struct rw_semaphore cb_lock; /* protects cb_list and offload counters */
484
+ struct flow_block flow_block;
358485 struct list_head owner_list;
359486 bool keep_dst;
360
- unsigned int offloadcnt; /* Number of oddloaded filters */
487
+ atomic_t offloadcnt; /* Number of oddloaded filters */
361488 unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
489
+ unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */
362490 struct {
363491 struct tcf_chain *chain;
364492 struct list_head filter_chain_list;
365493 } chain0;
494
+ struct rcu_head rcu;
495
+ DECLARE_HASHTABLE(proto_destroy_ht, 7);
496
+ struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
366497 };
367498
368
-static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
499
+#ifdef CONFIG_PROVE_LOCKING
500
+static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
369501 {
370
- if (*flags & TCA_CLS_FLAGS_IN_HW)
371
- return;
372
- *flags |= TCA_CLS_FLAGS_IN_HW;
373
- block->offloadcnt++;
502
+ return lockdep_is_held(&chain->filter_chain_lock);
374503 }
375504
376
-static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
505
+static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
377506 {
378
- if (!(*flags & TCA_CLS_FLAGS_IN_HW))
379
- return;
380
- *flags &= ~TCA_CLS_FLAGS_IN_HW;
381
- block->offloadcnt--;
507
+ return lockdep_is_held(&tp->lock);
508
+}
509
+#else
510
+static inline bool lockdep_tcf_chain_is_locked(struct tcf_block *chain)
511
+{
512
+ return true;
382513 }
383514
384
-static inline void
385
-tc_cls_offload_cnt_update(struct tcf_block *block, unsigned int *cnt,
386
- u32 *flags, bool add)
515
+static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
387516 {
388
- if (add) {
389
- if (!*cnt)
390
- tcf_block_offload_inc(block, flags);
391
- (*cnt)++;
392
- } else {
393
- (*cnt)--;
394
- if (!*cnt)
395
- tcf_block_offload_dec(block, flags);
396
- }
517
+ return true;
397518 }
519
+#endif /* #ifdef CONFIG_PROVE_LOCKING */
520
+
521
+#define tcf_chain_dereference(p, chain) \
522
+ rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
523
+
524
+#define tcf_proto_dereference(p, tp) \
525
+ rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp))
398526
399527 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
400528 {
401529 struct qdisc_skb_cb *qcb;
402530
403
- BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
531
+ BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb));
404532 BUILD_BUG_ON(sizeof(qcb->data) < sz);
533
+}
534
+
535
+static inline int qdisc_qlen_cpu(const struct Qdisc *q)
536
+{
537
+ return this_cpu_ptr(q->cpu_qstats)->qlen;
405538 }
406539
407540 static inline int qdisc_qlen(const struct Qdisc *q)
....@@ -409,14 +542,17 @@
409542 return q->q.qlen;
410543 }
411544
412
-static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
545
+static inline int qdisc_qlen_sum(const struct Qdisc *q)
413546 {
414
- u32 qlen = q->qstats.qlen;
547
+ __u32 qlen = q->qstats.qlen;
548
+ int i;
415549
416
- if (q->flags & TCQ_F_NOLOCK)
417
- qlen += atomic_read(&q->q.atomic_qlen);
418
- else
550
+ if (qdisc_is_percpu_stats(q)) {
551
+ for_each_possible_cpu(i)
552
+ qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
553
+ } else {
419554 qlen += q->q.qlen;
555
+ }
420556
421557 return qlen;
422558 }
....@@ -475,7 +611,7 @@
475611 return qdisc_lock(root);
476612 }
477613
478
-static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
614
+static inline net_seqlock_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
479615 {
480616 struct Qdisc *root = qdisc_root_sleeping(qdisc);
481617
....@@ -573,8 +709,31 @@
573709 void qdisc_reset(struct Qdisc *qdisc);
574710 void qdisc_put(struct Qdisc *qdisc);
575711 void qdisc_put_unlocked(struct Qdisc *qdisc);
576
-void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
577
- unsigned int len);
712
+void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
713
+#ifdef CONFIG_NET_SCHED
714
+int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
715
+ void *type_data);
716
+void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
717
+ struct Qdisc *new, struct Qdisc *old,
718
+ enum tc_setup_type type, void *type_data,
719
+ struct netlink_ext_ack *extack);
720
+#else
721
+static inline int
722
+qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
723
+ void *type_data)
724
+{
725
+ q->flags &= ~TCQ_F_OFFLOADED;
726
+ return 0;
727
+}
728
+
729
+static inline void
730
+qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
731
+ struct Qdisc *new, struct Qdisc *old,
732
+ enum tc_setup_type type, void *type_data,
733
+ struct netlink_ext_ack *extack)
734
+{
735
+}
736
+#endif
578737 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
579738 const struct Qdisc_ops *ops,
580739 struct netlink_ext_ack *extack);
....@@ -585,22 +744,6 @@
585744 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
586745 const struct qdisc_size_table *stab);
587746 int skb_do_redirect(struct sk_buff *);
588
-
589
-static inline void skb_reset_tc(struct sk_buff *skb)
590
-{
591
-#ifdef CONFIG_NET_CLS_ACT
592
- skb->tc_redirected = 0;
593
-#endif
594
-}
595
-
596
-static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
597
-{
598
-#ifdef CONFIG_NET_CLS_ACT
599
- return skb->tc_redirected;
600
-#else
601
- return false;
602
-#endif
603
-}
604747
605748 static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
606749 {
....@@ -637,11 +780,6 @@
637780 }
638781 }
639782
640
-static inline void qdisc_reset_all_tx(struct net_device *dev)
641
-{
642
- qdisc_reset_all_tx_gt(dev, 0);
643
-}
644
-
645783 /* Are all TX queues of the device empty? */
646784 static inline bool qdisc_all_tx_empty(const struct net_device *dev)
647785 {
....@@ -652,7 +790,7 @@
652790 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
653791 const struct Qdisc *q = rcu_dereference(txq->qdisc);
654792
655
- if (q->q.qlen) {
793
+ if (!qdisc_is_empty(q)) {
656794 rcu_read_unlock();
657795 return false;
658796 }
....@@ -722,11 +860,6 @@
722860 return sch->enqueue(skb, sch, to_free);
723861 }
724862
725
-static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
726
-{
727
- return q->flags & TCQ_F_CPUSTATS;
728
-}
729
-
730863 static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
731864 __u64 bytes, __u32 packets)
732865 {
....@@ -794,14 +927,14 @@
794927 this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
795928 }
796929
797
-static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch)
930
+static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
798931 {
799
- atomic_inc(&sch->q.atomic_qlen);
932
+ this_cpu_inc(sch->cpu_qstats->qlen);
800933 }
801934
802
-static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch)
935
+static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
803936 {
804
- atomic_dec(&sch->q.atomic_qlen);
937
+ this_cpu_dec(sch->cpu_qstats->qlen);
805938 }
806939
807940 static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
....@@ -839,6 +972,41 @@
839972 sch->qstats.overlimits++;
840973 }
841974
975
+static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
976
+{
977
+ __u32 qlen = qdisc_qlen_sum(sch);
978
+
979
+ return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
980
+}
981
+
982
+static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
983
+ __u32 *backlog)
984
+{
985
+ struct gnet_stats_queue qstats = { 0 };
986
+ __u32 len = qdisc_qlen_sum(sch);
987
+
988
+ __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
989
+ *qlen = qstats.qlen;
990
+ *backlog = qstats.backlog;
991
+}
992
+
993
+static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
994
+{
995
+ __u32 qlen, backlog;
996
+
997
+ qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
998
+ qdisc_tree_reduce_backlog(sch, qlen, backlog);
999
+}
1000
+
1001
+static inline void qdisc_purge_queue(struct Qdisc *sch)
1002
+{
1003
+ __u32 qlen, backlog;
1004
+
1005
+ qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
1006
+ qdisc_reset(sch);
1007
+ qdisc_tree_reduce_backlog(sch, qlen, backlog);
1008
+}
1009
+
8421010 static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
8431011 {
8441012 qh->head = NULL;
....@@ -846,8 +1014,8 @@
8461014 qh->qlen = 0;
8471015 }
8481016
849
-static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
850
- struct qdisc_skb_head *qh)
1017
+static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
1018
+ struct qdisc_skb_head *qh)
8511019 {
8521020 struct sk_buff *last = qh->tail;
8531021
....@@ -860,14 +1028,24 @@
8601028 qh->head = skb;
8611029 }
8621030 qh->qlen++;
863
- qdisc_qstats_backlog_inc(sch, skb);
864
-
865
- return NET_XMIT_SUCCESS;
8661031 }
8671032
8681033 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
8691034 {
870
- return __qdisc_enqueue_tail(skb, sch, &sch->q);
1035
+ __qdisc_enqueue_tail(skb, &sch->q);
1036
+ qdisc_qstats_backlog_inc(sch, skb);
1037
+ return NET_XMIT_SUCCESS;
1038
+}
1039
+
1040
+static inline void __qdisc_enqueue_head(struct sk_buff *skb,
1041
+ struct qdisc_skb_head *qh)
1042
+{
1043
+ skb->next = qh->head;
1044
+
1045
+ if (!qh->head)
1046
+ qh->tail = skb;
1047
+ qh->head = skb;
1048
+ qh->qlen++;
8711049 }
8721050
8731051 static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
....@@ -933,12 +1111,6 @@
9331111 return 0;
9341112 }
9351113
936
-static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
937
- struct sk_buff **to_free)
938
-{
939
- return __qdisc_queue_drop_head(sch, &sch->q, to_free);
940
-}
941
-
9421114 static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
9431115 {
9441116 const struct qdisc_skb_head *qh = &sch->q;
....@@ -966,6 +1138,32 @@
9661138 return skb;
9671139 }
9681140
1141
+static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
1142
+ struct sk_buff *skb)
1143
+{
1144
+ if (qdisc_is_percpu_stats(sch)) {
1145
+ qdisc_qstats_cpu_backlog_dec(sch, skb);
1146
+ qdisc_bstats_cpu_update(sch, skb);
1147
+ qdisc_qstats_cpu_qlen_dec(sch);
1148
+ } else {
1149
+ qdisc_qstats_backlog_dec(sch, skb);
1150
+ qdisc_bstats_update(sch, skb);
1151
+ sch->q.qlen--;
1152
+ }
1153
+}
1154
+
1155
+static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
1156
+ unsigned int pkt_len)
1157
+{
1158
+ if (qdisc_is_percpu_stats(sch)) {
1159
+ qdisc_qstats_cpu_qlen_inc(sch);
1160
+ this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
1161
+ } else {
1162
+ sch->qstats.backlog += pkt_len;
1163
+ sch->q.qlen++;
1164
+ }
1165
+}
1166
+
9691167 /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
9701168 static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
9711169 {
....@@ -973,8 +1171,13 @@
9731171
9741172 if (skb) {
9751173 skb = __skb_dequeue(&sch->gso_skb);
976
- qdisc_qstats_backlog_dec(sch, skb);
977
- sch->q.qlen--;
1174
+ if (qdisc_is_percpu_stats(sch)) {
1175
+ qdisc_qstats_cpu_backlog_dec(sch, skb);
1176
+ qdisc_qstats_cpu_qlen_dec(sch);
1177
+ } else {
1178
+ qdisc_qstats_backlog_dec(sch, skb);
1179
+ sch->q.qlen--;
1180
+ }
9781181 } else {
9791182 skb = sch->dequeue(sch);
9801183 }
....@@ -1001,7 +1204,6 @@
10011204 static inline void qdisc_reset_queue(struct Qdisc *sch)
10021205 {
10031206 __qdisc_reset_queue(&sch->q);
1004
- sch->qstats.backlog = 0;
10051207 }
10061208
10071209 static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
....@@ -1012,13 +1214,8 @@
10121214 sch_tree_lock(sch);
10131215 old = *pold;
10141216 *pold = new;
1015
- if (old != NULL) {
1016
- unsigned int qlen = old->q.qlen;
1017
- unsigned int backlog = old->qstats.backlog;
1018
-
1019
- qdisc_reset(old);
1020
- qdisc_tree_reduce_backlog(old, qlen, backlog);
1021
- }
1217
+ if (old != NULL)
1218
+ qdisc_purge_queue(old);
10221219 sch_tree_unlock(sch);
10231220
10241221 return old;
....@@ -1119,6 +1316,7 @@
11191316 */
11201317 struct mini_Qdisc {
11211318 struct tcf_proto *filter_list;
1319
+ struct tcf_block *block;
11221320 struct gnet_stats_basic_cpu __percpu *cpu_bstats;
11231321 struct gnet_stats_queue __percpu *cpu_qstats;
11241322 struct rcu_head rcu;
....@@ -1145,18 +1343,12 @@
11451343 struct tcf_proto *tp_head);
11461344 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
11471345 struct mini_Qdisc __rcu **p_miniq);
1346
+void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1347
+ struct tcf_block *block);
11481348
1149
-static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
1349
+static inline int skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
11501350 {
1151
- struct gnet_stats_queue *stats = res->qstats;
1152
- int ret;
1153
-
1154
- if (res->ingress)
1155
- ret = netif_receive_skb(skb);
1156
- else
1157
- ret = dev_queue_xmit(skb);
1158
- if (ret && stats)
1159
- qstats_overlimit_inc(res->qstats);
1351
+ return res->ingress ? netif_receive_skb(skb) : dev_queue_xmit(skb);
11601352 }
11611353
11621354 #endif