From 244b2c5ca8b14627e4a17755e5922221e121c771 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 09 Oct 2024 06:15:07 +0000
Subject: [PATCH] change system file
---
kernel/include/net/sch_generic.h | 405 +++++++++++++++++++++++++++++++++++++++++----------------
1 files changed, 288 insertions(+), 117 deletions(-)
diff --git a/kernel/include/net/sch_generic.h b/kernel/include/net/sch_generic.h
index f571833..c9b9482 100644
--- a/kernel/include/net/sch_generic.h
+++ b/kernel/include/net/sch_generic.h
@@ -12,16 +12,20 @@
#include <linux/list.h>
#include <linux/refcount.h>
#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/atomic.h>
+#include <linux/hashtable.h>
+#include <linux/android_kabi.h>
#include <net/gen_stats.h>
#include <net/rtnetlink.h>
+#include <net/flow_offload.h>
struct Qdisc_ops;
struct qdisc_walker;
struct tcf_walker;
struct module;
-
-typedef int tc_setup_cb_t(enum tc_setup_type type,
- void *type_data, void *cb_priv);
+struct bpf_flow_keys;
struct qdisc_rate_table {
struct tc_ratespec rate;
@@ -33,6 +37,7 @@
enum qdisc_state_t {
__QDISC_STATE_SCHED,
__QDISC_STATE_DEACTIVATED,
+ __QDISC_STATE_MISSED,
};
struct qdisc_size_table {
@@ -47,10 +52,7 @@
struct qdisc_skb_head {
struct sk_buff *head;
struct sk_buff *tail;
- union {
- u32 qlen;
- atomic_t atomic_qlen;
- };
+ __u32 qlen;
spinlock_t lock;
};
@@ -91,7 +93,7 @@
struct net_rate_estimator __rcu *rate_est;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
- int padded;
+ int pad;
refcount_t refcnt;
/*
@@ -108,9 +110,15 @@
spinlock_t busylock ____cacheline_aligned_in_smp;
spinlock_t seqlock;
-#ifndef __GENKSYMS__
+
+ /* for NOLOCK qdisc, true if there are no enqueued skbs */
+ bool empty;
struct rcu_head rcu;
-#endif
+
+ ANDROID_KABI_RESERVE(1);
+
+ /* private data */
+ long privdata[] ____cacheline_aligned;
};
static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
@@ -140,11 +148,41 @@
return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
}
+static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
+{
+ return q->flags & TCQ_F_CPUSTATS;
+}
+
+static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
+{
+ if (qdisc_is_percpu_stats(qdisc))
+ return READ_ONCE(qdisc->empty);
+ return !READ_ONCE(qdisc->q.qlen);
+}
+
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
if (qdisc->flags & TCQ_F_NOLOCK) {
+ if (spin_trylock(&qdisc->seqlock))
+ goto nolock_empty;
+
+ /* No need to insist if the MISSED flag was already set.
+ * Note that test_and_set_bit() also gives us memory ordering
+ * guarantees wrt potential earlier enqueue() and below
+ * spin_trylock(), both of which are necessary to prevent races
+ */
+ if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state))
+ return false;
+
+ /* Try to take the lock again to make sure that we will either
+ * grab it or the CPU that still has it will see MISSED set
+ * when testing it in qdisc_run_end()
+ */
if (!spin_trylock(&qdisc->seqlock))
return false;
+
+nolock_empty:
+ WRITE_ONCE(qdisc->empty, false);
} else if (qdisc_is_running(qdisc)) {
return false;
}
@@ -159,8 +197,21 @@
static inline void qdisc_run_end(struct Qdisc *qdisc)
{
write_seqcount_end(&qdisc->running);
- if (qdisc->flags & TCQ_F_NOLOCK)
+ if (qdisc->flags & TCQ_F_NOLOCK) {
spin_unlock(&qdisc->seqlock);
+
+ /* spin_unlock() only has store-release semantic. The unlock
+ * and test_bit() ordering is a store-load ordering, so a full
+ * memory barrier is needed here.
+ */
+ smp_mb();
+
+ if (unlikely(test_bit(__QDISC_STATE_MISSED,
+ &qdisc->state))) {
+ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
+ __netif_schedule(qdisc);
+ }
+ }
}
static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
@@ -179,6 +230,7 @@
}
struct Qdisc_class_ops {
+ unsigned int flags;
/* Child qdisc manipulation */
struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
int (*graft)(struct Qdisc *, unsigned long cl,
@@ -208,6 +260,15 @@
struct sk_buff *skb, struct tcmsg*);
int (*dump_stats)(struct Qdisc *, unsigned long,
struct gnet_dump *);
+
+ ANDROID_KABI_RESERVE(1);
+};
+
+/* Qdisc_class_ops flag values */
+
+/* Implements API that doesn't require rtnl lock */
+enum qdisc_class_ops_flags {
+ QDISC_CLASS_OPS_DOIT_UNLOCKED = 1,
};
struct Qdisc_ops {
@@ -244,6 +305,8 @@
u32 (*egress_block_get)(struct Qdisc *sch);
struct module *owner;
+
+ ANDROID_KABI_RESERVE(1);
};
@@ -255,7 +318,7 @@
};
const struct tcf_proto *goto_tp;
- /* used by the TC_ACT_REINSERT action */
+ /* used in the skb_tc_reinsert function */
struct {
bool ingress;
struct gnet_stats_queue *qstats;
@@ -273,22 +336,29 @@
const struct tcf_proto *,
struct tcf_result *);
int (*init)(struct tcf_proto*);
- void (*destroy)(struct tcf_proto *tp,
+ void (*destroy)(struct tcf_proto *tp, bool rtnl_held,
struct netlink_ext_ack *extack);
void* (*get)(struct tcf_proto*, u32 handle);
+ void (*put)(struct tcf_proto *tp, void *f);
int (*change)(struct net *net, struct sk_buff *,
struct tcf_proto*, unsigned long,
u32 handle, struct nlattr **,
- void **, bool,
+ void **, bool, bool,
struct netlink_ext_ack *);
int (*delete)(struct tcf_proto *tp, void *arg,
- bool *last,
+ bool *last, bool rtnl_held,
struct netlink_ext_ack *);
- void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
+ bool (*delete_empty)(struct tcf_proto *tp);
+ void (*walk)(struct tcf_proto *tp,
+ struct tcf_walker *arg, bool rtnl_held);
int (*reoffload)(struct tcf_proto *tp, bool add,
- tc_setup_cb_t *cb, void *cb_priv,
+ flow_setup_cb_t *cb, void *cb_priv,
struct netlink_ext_ack *extack);
+ void (*hw_add)(struct tcf_proto *tp,
+ void *type_data);
+ void (*hw_del)(struct tcf_proto *tp,
+ void *type_data);
void (*bind_class)(void *, u32, unsigned long,
void *, unsigned long);
void * (*tmplt_create)(struct net *net,
@@ -299,12 +369,26 @@
/* rtnetlink specific */
int (*dump)(struct net*, struct tcf_proto*, void *,
- struct sk_buff *skb, struct tcmsg*);
+ struct sk_buff *skb, struct tcmsg*,
+ bool);
+ int (*terse_dump)(struct net *net,
+ struct tcf_proto *tp, void *fh,
+ struct sk_buff *skb,
+ struct tcmsg *t, bool rtnl_held);
int (*tmplt_dump)(struct sk_buff *skb,
struct net *net,
void *tmplt_priv);
struct module *owner;
+ int flags;
+};
+
+/* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
+ * are expected to implement tcf_proto_ops->delete_empty(), otherwise race
+ * conditions can occur when filters are inserted/deleted simultaneously.
+ */
+enum tcf_proto_ops_flags {
+ TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
};
struct tcf_proto {
@@ -323,20 +407,32 @@
void *data;
const struct tcf_proto_ops *ops;
struct tcf_chain *chain;
+ /* Lock protects tcf_proto shared state and can be used by unlocked
+ * classifiers to protect their private data.
+ */
+ spinlock_t lock;
+ bool deleting;
+ refcount_t refcnt;
struct rcu_head rcu;
+ struct hlist_node destroy_ht_node;
};
struct qdisc_skb_cb {
- unsigned int pkt_len;
- u16 slave_dev_queue_mapping;
- u16 tc_classid;
+ struct {
+ unsigned int pkt_len;
+ u16 slave_dev_queue_mapping;
+ u16 tc_classid;
+ };
#define QDISC_CB_PRIV_LEN 20
unsigned char data[QDISC_CB_PRIV_LEN];
+ u16 mru;
};
typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
struct tcf_chain {
+ /* Protects filter_chain. */
+ struct mutex filter_chain_lock;
struct tcf_proto __rcu *filter_chain;
struct list_head list;
struct tcf_block *block;
@@ -344,64 +440,78 @@
unsigned int refcnt;
unsigned int action_refcnt;
bool explicitly_created;
+ bool flushing;
const struct tcf_proto_ops *tmplt_ops;
void *tmplt_priv;
+ struct rcu_head rcu;
};
struct tcf_block {
+ /* Lock protects tcf_block and lifetime-management data of chains
+ * attached to the block (refcnt, action_refcnt, explicitly_created).
+ */
+ struct mutex lock;
struct list_head chain_list;
u32 index; /* block index for shared blocks */
- unsigned int refcnt;
+ u32 classid; /* which class this block belongs to */
+ refcount_t refcnt;
struct net *net;
struct Qdisc *q;
- struct list_head cb_list;
+ struct rw_semaphore cb_lock; /* protects cb_list and offload counters */
+ struct flow_block flow_block;
struct list_head owner_list;
bool keep_dst;
- unsigned int offloadcnt; /* Number of oddloaded filters */
+ atomic_t offloadcnt; /* Number of oddloaded filters */
unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
+ unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */
struct {
struct tcf_chain *chain;
struct list_head filter_chain_list;
} chain0;
+ struct rcu_head rcu;
+ DECLARE_HASHTABLE(proto_destroy_ht, 7);
+ struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
};
-static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
+#ifdef CONFIG_PROVE_LOCKING
+static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain)
{
- if (*flags & TCA_CLS_FLAGS_IN_HW)
- return;
- *flags |= TCA_CLS_FLAGS_IN_HW;
- block->offloadcnt++;
+ return lockdep_is_held(&chain->filter_chain_lock);
}
-static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
+static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
{
- if (!(*flags & TCA_CLS_FLAGS_IN_HW))
- return;
- *flags &= ~TCA_CLS_FLAGS_IN_HW;
- block->offloadcnt--;
+ return lockdep_is_held(&tp->lock);
+}
+#else
+static inline bool lockdep_tcf_chain_is_locked(struct tcf_block *chain)
+{
+ return true;
}
-static inline void
-tc_cls_offload_cnt_update(struct tcf_block *block, unsigned int *cnt,
- u32 *flags, bool add)
+static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp)
{
- if (add) {
- if (!*cnt)
- tcf_block_offload_inc(block, flags);
- (*cnt)++;
- } else {
- (*cnt)--;
- if (!*cnt)
- tcf_block_offload_dec(block, flags);
- }
+ return true;
}
+#endif /* #ifdef CONFIG_PROVE_LOCKING */
+
+#define tcf_chain_dereference(p, chain) \
+ rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain))
+
+#define tcf_proto_dereference(p, tp) \
+ rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp))
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
{
struct qdisc_skb_cb *qcb;
- BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
+ BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb));
BUILD_BUG_ON(sizeof(qcb->data) < sz);
+}
+
+static inline int qdisc_qlen_cpu(const struct Qdisc *q)
+{
+ return this_cpu_ptr(q->cpu_qstats)->qlen;
}
static inline int qdisc_qlen(const struct Qdisc *q)
@@ -409,14 +519,17 @@
return q->q.qlen;
}
-static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
+static inline int qdisc_qlen_sum(const struct Qdisc *q)
{
- u32 qlen = q->qstats.qlen;
+ __u32 qlen = q->qstats.qlen;
+ int i;
- if (q->flags & TCQ_F_NOLOCK)
- qlen += atomic_read(&q->q.atomic_qlen);
- else
+ if (qdisc_is_percpu_stats(q)) {
+ for_each_possible_cpu(i)
+ qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
+ } else {
qlen += q->q.qlen;
+ }
return qlen;
}
@@ -573,8 +686,31 @@
void qdisc_reset(struct Qdisc *qdisc);
void qdisc_put(struct Qdisc *qdisc);
void qdisc_put_unlocked(struct Qdisc *qdisc);
-void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
- unsigned int len);
+void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len);
+#ifdef CONFIG_NET_SCHED
+int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
+ void *type_data);
+void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
+ struct Qdisc *new, struct Qdisc *old,
+ enum tc_setup_type type, void *type_data,
+ struct netlink_ext_ack *extack);
+#else
+static inline int
+qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type,
+ void *type_data)
+{
+ q->flags &= ~TCQ_F_OFFLOADED;
+ return 0;
+}
+
+static inline void
+qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch,
+ struct Qdisc *new, struct Qdisc *old,
+ enum tc_setup_type type, void *type_data,
+ struct netlink_ext_ack *extack)
+{
+}
+#endif
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops,
struct netlink_ext_ack *extack);
@@ -585,22 +721,6 @@
void __qdisc_calculate_pkt_len(struct sk_buff *skb,
const struct qdisc_size_table *stab);
int skb_do_redirect(struct sk_buff *);
-
-static inline void skb_reset_tc(struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_CLS_ACT
- skb->tc_redirected = 0;
-#endif
-}
-
-static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_CLS_ACT
- return skb->tc_redirected;
-#else
- return false;
-#endif
-}
static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
{
@@ -637,11 +757,6 @@
}
}
-static inline void qdisc_reset_all_tx(struct net_device *dev)
-{
- qdisc_reset_all_tx_gt(dev, 0);
-}
-
/* Are all TX queues of the device empty? */
static inline bool qdisc_all_tx_empty(const struct net_device *dev)
{
@@ -652,7 +767,7 @@
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
const struct Qdisc *q = rcu_dereference(txq->qdisc);
- if (q->q.qlen) {
+ if (!qdisc_is_empty(q)) {
rcu_read_unlock();
return false;
}
@@ -722,11 +837,6 @@
return sch->enqueue(skb, sch, to_free);
}
-static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
-{
- return q->flags & TCQ_F_CPUSTATS;
-}
-
static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
__u64 bytes, __u32 packets)
{
@@ -794,14 +904,14 @@
this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
}
-static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch)
+static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
{
- atomic_inc(&sch->q.atomic_qlen);
+ this_cpu_inc(sch->cpu_qstats->qlen);
}
-static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch)
+static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
{
- atomic_dec(&sch->q.atomic_qlen);
+ this_cpu_dec(sch->cpu_qstats->qlen);
}
static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
@@ -839,6 +949,41 @@
sch->qstats.overlimits++;
}
+static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
+{
+ __u32 qlen = qdisc_qlen_sum(sch);
+
+ return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
+}
+
+static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
+ __u32 *backlog)
+{
+ struct gnet_stats_queue qstats = { 0 };
+ __u32 len = qdisc_qlen_sum(sch);
+
+ __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
+ *qlen = qstats.qlen;
+ *backlog = qstats.backlog;
+}
+
+static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
+{
+ __u32 qlen, backlog;
+
+ qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+ qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
+static inline void qdisc_purge_queue(struct Qdisc *sch)
+{
+ __u32 qlen, backlog;
+
+ qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+ qdisc_reset(sch);
+ qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
{
qh->head = NULL;
@@ -846,8 +991,8 @@
qh->qlen = 0;
}
-static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
- struct qdisc_skb_head *qh)
+static inline void __qdisc_enqueue_tail(struct sk_buff *skb,
+ struct qdisc_skb_head *qh)
{
struct sk_buff *last = qh->tail;
@@ -860,14 +1005,24 @@
qh->head = skb;
}
qh->qlen++;
- qdisc_qstats_backlog_inc(sch, skb);
-
- return NET_XMIT_SUCCESS;
}
static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
{
- return __qdisc_enqueue_tail(skb, sch, &sch->q);
+ __qdisc_enqueue_tail(skb, &sch->q);
+ qdisc_qstats_backlog_inc(sch, skb);
+ return NET_XMIT_SUCCESS;
+}
+
+static inline void __qdisc_enqueue_head(struct sk_buff *skb,
+ struct qdisc_skb_head *qh)
+{
+ skb->next = qh->head;
+
+ if (!qh->head)
+ qh->tail = skb;
+ qh->head = skb;
+ qh->qlen++;
}
static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
@@ -933,12 +1088,6 @@
return 0;
}
-static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
- struct sk_buff **to_free)
-{
- return __qdisc_queue_drop_head(sch, &sch->q, to_free);
-}
-
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
const struct qdisc_skb_head *qh = &sch->q;
@@ -966,6 +1115,32 @@
return skb;
}
+static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch,
+ struct sk_buff *skb)
+{
+ if (qdisc_is_percpu_stats(sch)) {
+ qdisc_qstats_cpu_backlog_dec(sch, skb);
+ qdisc_bstats_cpu_update(sch, skb);
+ qdisc_qstats_cpu_qlen_dec(sch);
+ } else {
+ qdisc_qstats_backlog_dec(sch, skb);
+ qdisc_bstats_update(sch, skb);
+ sch->q.qlen--;
+ }
+}
+
+static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch,
+ unsigned int pkt_len)
+{
+ if (qdisc_is_percpu_stats(sch)) {
+ qdisc_qstats_cpu_qlen_inc(sch);
+ this_cpu_add(sch->cpu_qstats->backlog, pkt_len);
+ } else {
+ sch->qstats.backlog += pkt_len;
+ sch->q.qlen++;
+ }
+}
+
/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
{
@@ -973,8 +1148,13 @@
if (skb) {
skb = __skb_dequeue(&sch->gso_skb);
- qdisc_qstats_backlog_dec(sch, skb);
- sch->q.qlen--;
+ if (qdisc_is_percpu_stats(sch)) {
+ qdisc_qstats_cpu_backlog_dec(sch, skb);
+ qdisc_qstats_cpu_qlen_dec(sch);
+ } else {
+ qdisc_qstats_backlog_dec(sch, skb);
+ sch->q.qlen--;
+ }
} else {
skb = sch->dequeue(sch);
}
@@ -1001,7 +1181,6 @@
static inline void qdisc_reset_queue(struct Qdisc *sch)
{
__qdisc_reset_queue(&sch->q);
- sch->qstats.backlog = 0;
}
static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
@@ -1012,13 +1191,8 @@
sch_tree_lock(sch);
old = *pold;
*pold = new;
- if (old != NULL) {
- unsigned int qlen = old->q.qlen;
- unsigned int backlog = old->qstats.backlog;
-
- qdisc_reset(old);
- qdisc_tree_reduce_backlog(old, qlen, backlog);
- }
+ if (old != NULL)
+ qdisc_purge_queue(old);
sch_tree_unlock(sch);
return old;
@@ -1119,6 +1293,7 @@
*/
struct mini_Qdisc {
struct tcf_proto *filter_list;
+ struct tcf_block *block;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
struct rcu_head rcu;
@@ -1145,18 +1320,14 @@
struct tcf_proto *tp_head);
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
struct mini_Qdisc __rcu **p_miniq);
+void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
+ struct tcf_block *block);
-static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
+/* Make sure qdisc is no longer in SCHED state. */
+static inline void qdisc_synchronize(const struct Qdisc *q)
{
- struct gnet_stats_queue *stats = res->qstats;
- int ret;
-
- if (res->ingress)
- ret = netif_receive_skb(skb);
- else
- ret = dev_queue_xmit(skb);
- if (ret && stats)
- qstats_overlimit_inc(res->qstats);
+ while (test_bit(__QDISC_STATE_SCHED, &q->state))
+ msleep(1);
}
#endif
--
Gitblit v1.6.2