.. | .. |
---|
12 | 12 | #include <linux/list.h> |
---|
13 | 13 | #include <linux/refcount.h> |
---|
14 | 14 | #include <linux/workqueue.h> |
---|
| 15 | +#include <linux/mutex.h> |
---|
| 16 | +#include <linux/rwsem.h> |
---|
| 17 | +#include <linux/atomic.h> |
---|
| 18 | +#include <linux/hashtable.h> |
---|
| 19 | +#include <linux/android_kabi.h> |
---|
15 | 20 | #include <net/gen_stats.h> |
---|
16 | 21 | #include <net/rtnetlink.h> |
---|
| 22 | +#include <net/flow_offload.h> |
---|
17 | 23 | |
---|
18 | 24 | struct Qdisc_ops; |
---|
19 | 25 | struct qdisc_walker; |
---|
20 | 26 | struct tcf_walker; |
---|
21 | 27 | struct module; |
---|
22 | | - |
---|
23 | | -typedef int tc_setup_cb_t(enum tc_setup_type type, |
---|
24 | | - void *type_data, void *cb_priv); |
---|
| 28 | +struct bpf_flow_keys; |
---|
25 | 29 | |
---|
26 | 30 | struct qdisc_rate_table { |
---|
27 | 31 | struct tc_ratespec rate; |
---|
.. | .. |
---|
33 | 37 | enum qdisc_state_t { |
---|
34 | 38 | __QDISC_STATE_SCHED, |
---|
35 | 39 | __QDISC_STATE_DEACTIVATED, |
---|
| 40 | + __QDISC_STATE_MISSED, |
---|
36 | 41 | }; |
---|
37 | 42 | |
---|
38 | 43 | struct qdisc_size_table { |
---|
.. | .. |
---|
47 | 52 | struct qdisc_skb_head { |
---|
48 | 53 | struct sk_buff *head; |
---|
49 | 54 | struct sk_buff *tail; |
---|
50 | | - union { |
---|
51 | | - u32 qlen; |
---|
52 | | - atomic_t atomic_qlen; |
---|
53 | | - }; |
---|
| 55 | + __u32 qlen; |
---|
54 | 56 | spinlock_t lock; |
---|
55 | 57 | }; |
---|
56 | 58 | |
---|
.. | .. |
---|
91 | 93 | struct net_rate_estimator __rcu *rate_est; |
---|
92 | 94 | struct gnet_stats_basic_cpu __percpu *cpu_bstats; |
---|
93 | 95 | struct gnet_stats_queue __percpu *cpu_qstats; |
---|
94 | | - int padded; |
---|
| 96 | + int pad; |
---|
95 | 97 | refcount_t refcnt; |
---|
96 | 98 | |
---|
97 | 99 | /* |
---|
.. | .. |
---|
108 | 110 | |
---|
109 | 111 | spinlock_t busylock ____cacheline_aligned_in_smp; |
---|
110 | 112 | spinlock_t seqlock; |
---|
111 | | -#ifndef __GENKSYMS__ |
---|
| 113 | + |
---|
| 114 | + /* for NOLOCK qdisc, true if there are no enqueued skbs */ |
---|
| 115 | + bool empty; |
---|
112 | 116 | struct rcu_head rcu; |
---|
113 | | -#endif |
---|
| 117 | + |
---|
| 118 | + ANDROID_KABI_RESERVE(1); |
---|
| 119 | + |
---|
| 120 | + /* private data */ |
---|
| 121 | + long privdata[] ____cacheline_aligned; |
---|
114 | 122 | }; |
---|
115 | 123 | |
---|
116 | 124 | static inline void qdisc_refcount_inc(struct Qdisc *qdisc) |
---|
.. | .. |
---|
140 | 148 | return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; |
---|
141 | 149 | } |
---|
142 | 150 | |
---|
| 151 | +static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) |
---|
| 152 | +{ |
---|
| 153 | + return q->flags & TCQ_F_CPUSTATS; |
---|
| 154 | +} |
---|
| 155 | + |
---|
| 156 | +static inline bool qdisc_is_empty(const struct Qdisc *qdisc) |
---|
| 157 | +{ |
---|
| 158 | + if (qdisc_is_percpu_stats(qdisc)) |
---|
| 159 | + return READ_ONCE(qdisc->empty); |
---|
| 160 | + return !READ_ONCE(qdisc->q.qlen); |
---|
| 161 | +} |
---|
| 162 | + |
---|
143 | 163 | static inline bool qdisc_run_begin(struct Qdisc *qdisc) |
---|
144 | 164 | { |
---|
145 | 165 | if (qdisc->flags & TCQ_F_NOLOCK) { |
---|
| 166 | + if (spin_trylock(&qdisc->seqlock)) |
---|
| 167 | + goto nolock_empty; |
---|
| 168 | + |
---|
| 169 | + /* No need to insist if the MISSED flag was already set. |
---|
| 170 | + * Note that test_and_set_bit() also gives us memory ordering |
---|
| 171 | + * guarantees wrt potential earlier enqueue() and below |
---|
| 172 | + * spin_trylock(), both of which are necessary to prevent races |
---|
| 173 | + */ |
---|
| 174 | + if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state)) |
---|
| 175 | + return false; |
---|
| 176 | + |
---|
| 177 | + /* Try to take the lock again to make sure that we will either |
---|
| 178 | + * grab it or the CPU that still has it will see MISSED set |
---|
| 179 | + * when testing it in qdisc_run_end() |
---|
| 180 | + */ |
---|
146 | 181 | if (!spin_trylock(&qdisc->seqlock)) |
---|
147 | 182 | return false; |
---|
| 183 | + |
---|
| 184 | +nolock_empty: |
---|
| 185 | + WRITE_ONCE(qdisc->empty, false); |
---|
148 | 186 | } else if (qdisc_is_running(qdisc)) { |
---|
149 | 187 | return false; |
---|
150 | 188 | } |
---|
.. | .. |
---|
159 | 197 | static inline void qdisc_run_end(struct Qdisc *qdisc) |
---|
160 | 198 | { |
---|
161 | 199 | write_seqcount_end(&qdisc->running); |
---|
162 | | - if (qdisc->flags & TCQ_F_NOLOCK) |
---|
| 200 | + if (qdisc->flags & TCQ_F_NOLOCK) { |
---|
163 | 201 | spin_unlock(&qdisc->seqlock); |
---|
| 202 | + |
---|
| 203 | + /* spin_unlock() only has store-release semantic. The unlock |
---|
| 204 | + * and test_bit() ordering is a store-load ordering, so a full |
---|
| 205 | + * memory barrier is needed here. |
---|
| 206 | + */ |
---|
| 207 | + smp_mb(); |
---|
| 208 | + |
---|
| 209 | + if (unlikely(test_bit(__QDISC_STATE_MISSED, |
---|
| 210 | + &qdisc->state))) { |
---|
| 211 | + clear_bit(__QDISC_STATE_MISSED, &qdisc->state); |
---|
| 212 | + __netif_schedule(qdisc); |
---|
| 213 | + } |
---|
| 214 | + } |
---|
164 | 215 | } |
---|
165 | 216 | |
---|
166 | 217 | static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) |
---|
.. | .. |
---|
179 | 230 | } |
---|
180 | 231 | |
---|
181 | 232 | struct Qdisc_class_ops { |
---|
| 233 | + unsigned int flags; |
---|
182 | 234 | /* Child qdisc manipulation */ |
---|
183 | 235 | struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *); |
---|
184 | 236 | int (*graft)(struct Qdisc *, unsigned long cl, |
---|
.. | .. |
---|
208 | 260 | struct sk_buff *skb, struct tcmsg*); |
---|
209 | 261 | int (*dump_stats)(struct Qdisc *, unsigned long, |
---|
210 | 262 | struct gnet_dump *); |
---|
| 263 | + |
---|
| 264 | + ANDROID_KABI_RESERVE(1); |
---|
| 265 | +}; |
---|
| 266 | + |
---|
| 267 | +/* Qdisc_class_ops flag values */ |
---|
| 268 | + |
---|
| 269 | +/* Implements API that doesn't require rtnl lock */ |
---|
| 270 | +enum qdisc_class_ops_flags { |
---|
| 271 | + QDISC_CLASS_OPS_DOIT_UNLOCKED = 1, |
---|
211 | 272 | }; |
---|
212 | 273 | |
---|
213 | 274 | struct Qdisc_ops { |
---|
.. | .. |
---|
244 | 305 | u32 (*egress_block_get)(struct Qdisc *sch); |
---|
245 | 306 | |
---|
246 | 307 | struct module *owner; |
---|
| 308 | + |
---|
| 309 | + ANDROID_KABI_RESERVE(1); |
---|
247 | 310 | }; |
---|
248 | 311 | |
---|
249 | 312 | |
---|
.. | .. |
---|
255 | 318 | }; |
---|
256 | 319 | const struct tcf_proto *goto_tp; |
---|
257 | 320 | |
---|
258 | | - /* used by the TC_ACT_REINSERT action */ |
---|
| 321 | + /* used in the skb_tc_reinsert function */ |
---|
259 | 322 | struct { |
---|
260 | 323 | bool ingress; |
---|
261 | 324 | struct gnet_stats_queue *qstats; |
---|
.. | .. |
---|
273 | 336 | const struct tcf_proto *, |
---|
274 | 337 | struct tcf_result *); |
---|
275 | 338 | int (*init)(struct tcf_proto*); |
---|
276 | | - void (*destroy)(struct tcf_proto *tp, |
---|
| 339 | + void (*destroy)(struct tcf_proto *tp, bool rtnl_held, |
---|
277 | 340 | struct netlink_ext_ack *extack); |
---|
278 | 341 | |
---|
279 | 342 | void* (*get)(struct tcf_proto*, u32 handle); |
---|
| 343 | + void (*put)(struct tcf_proto *tp, void *f); |
---|
280 | 344 | int (*change)(struct net *net, struct sk_buff *, |
---|
281 | 345 | struct tcf_proto*, unsigned long, |
---|
282 | 346 | u32 handle, struct nlattr **, |
---|
283 | | - void **, bool, |
---|
| 347 | + void **, bool, bool, |
---|
284 | 348 | struct netlink_ext_ack *); |
---|
285 | 349 | int (*delete)(struct tcf_proto *tp, void *arg, |
---|
286 | | - bool *last, |
---|
| 350 | + bool *last, bool rtnl_held, |
---|
287 | 351 | struct netlink_ext_ack *); |
---|
288 | | - void (*walk)(struct tcf_proto*, struct tcf_walker *arg); |
---|
| 352 | + bool (*delete_empty)(struct tcf_proto *tp); |
---|
| 353 | + void (*walk)(struct tcf_proto *tp, |
---|
| 354 | + struct tcf_walker *arg, bool rtnl_held); |
---|
289 | 355 | int (*reoffload)(struct tcf_proto *tp, bool add, |
---|
290 | | - tc_setup_cb_t *cb, void *cb_priv, |
---|
| 356 | + flow_setup_cb_t *cb, void *cb_priv, |
---|
291 | 357 | struct netlink_ext_ack *extack); |
---|
| 358 | + void (*hw_add)(struct tcf_proto *tp, |
---|
| 359 | + void *type_data); |
---|
| 360 | + void (*hw_del)(struct tcf_proto *tp, |
---|
| 361 | + void *type_data); |
---|
292 | 362 | void (*bind_class)(void *, u32, unsigned long, |
---|
293 | 363 | void *, unsigned long); |
---|
294 | 364 | void * (*tmplt_create)(struct net *net, |
---|
.. | .. |
---|
299 | 369 | |
---|
300 | 370 | /* rtnetlink specific */ |
---|
301 | 371 | int (*dump)(struct net*, struct tcf_proto*, void *, |
---|
302 | | - struct sk_buff *skb, struct tcmsg*); |
---|
| 372 | + struct sk_buff *skb, struct tcmsg*, |
---|
| 373 | + bool); |
---|
| 374 | + int (*terse_dump)(struct net *net, |
---|
| 375 | + struct tcf_proto *tp, void *fh, |
---|
| 376 | + struct sk_buff *skb, |
---|
| 377 | + struct tcmsg *t, bool rtnl_held); |
---|
303 | 378 | int (*tmplt_dump)(struct sk_buff *skb, |
---|
304 | 379 | struct net *net, |
---|
305 | 380 | void *tmplt_priv); |
---|
306 | 381 | |
---|
307 | 382 | struct module *owner; |
---|
| 383 | + int flags; |
---|
| 384 | +}; |
---|
| 385 | + |
---|
| 386 | +/* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags |
---|
| 387 | + * are expected to implement tcf_proto_ops->delete_empty(), otherwise race |
---|
| 388 | + * conditions can occur when filters are inserted/deleted simultaneously. |
---|
| 389 | + */ |
---|
| 390 | +enum tcf_proto_ops_flags { |
---|
| 391 | + TCF_PROTO_OPS_DOIT_UNLOCKED = 1, |
---|
308 | 392 | }; |
---|
309 | 393 | |
---|
310 | 394 | struct tcf_proto { |
---|
.. | .. |
---|
323 | 407 | void *data; |
---|
324 | 408 | const struct tcf_proto_ops *ops; |
---|
325 | 409 | struct tcf_chain *chain; |
---|
| 410 | + /* Lock protects tcf_proto shared state and can be used by unlocked |
---|
| 411 | + * classifiers to protect their private data. |
---|
| 412 | + */ |
---|
| 413 | + spinlock_t lock; |
---|
| 414 | + bool deleting; |
---|
| 415 | + refcount_t refcnt; |
---|
326 | 416 | struct rcu_head rcu; |
---|
| 417 | + struct hlist_node destroy_ht_node; |
---|
327 | 418 | }; |
---|
328 | 419 | |
---|
329 | 420 | struct qdisc_skb_cb { |
---|
330 | | - unsigned int pkt_len; |
---|
331 | | - u16 slave_dev_queue_mapping; |
---|
332 | | - u16 tc_classid; |
---|
| 421 | + struct { |
---|
| 422 | + unsigned int pkt_len; |
---|
| 423 | + u16 slave_dev_queue_mapping; |
---|
| 424 | + u16 tc_classid; |
---|
| 425 | + }; |
---|
333 | 426 | #define QDISC_CB_PRIV_LEN 20 |
---|
334 | 427 | unsigned char data[QDISC_CB_PRIV_LEN]; |
---|
| 428 | + u16 mru; |
---|
335 | 429 | }; |
---|
336 | 430 | |
---|
337 | 431 | typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv); |
---|
338 | 432 | |
---|
339 | 433 | struct tcf_chain { |
---|
| 434 | + /* Protects filter_chain. */ |
---|
| 435 | + struct mutex filter_chain_lock; |
---|
340 | 436 | struct tcf_proto __rcu *filter_chain; |
---|
341 | 437 | struct list_head list; |
---|
342 | 438 | struct tcf_block *block; |
---|
.. | .. |
---|
344 | 440 | unsigned int refcnt; |
---|
345 | 441 | unsigned int action_refcnt; |
---|
346 | 442 | bool explicitly_created; |
---|
| 443 | + bool flushing; |
---|
347 | 444 | const struct tcf_proto_ops *tmplt_ops; |
---|
348 | 445 | void *tmplt_priv; |
---|
| 446 | + struct rcu_head rcu; |
---|
349 | 447 | }; |
---|
350 | 448 | |
---|
351 | 449 | struct tcf_block { |
---|
| 450 | + /* Lock protects tcf_block and lifetime-management data of chains |
---|
| 451 | + * attached to the block (refcnt, action_refcnt, explicitly_created). |
---|
| 452 | + */ |
---|
| 453 | + struct mutex lock; |
---|
352 | 454 | struct list_head chain_list; |
---|
353 | 455 | u32 index; /* block index for shared blocks */ |
---|
354 | | - unsigned int refcnt; |
---|
| 456 | + u32 classid; /* which class this block belongs to */ |
---|
| 457 | + refcount_t refcnt; |
---|
355 | 458 | struct net *net; |
---|
356 | 459 | struct Qdisc *q; |
---|
357 | | - struct list_head cb_list; |
---|
| 460 | + struct rw_semaphore cb_lock; /* protects cb_list and offload counters */ |
---|
| 461 | + struct flow_block flow_block; |
---|
358 | 462 | struct list_head owner_list; |
---|
359 | 463 | bool keep_dst; |
---|
360 | | - unsigned int offloadcnt; /* Number of oddloaded filters */ |
---|
| 464 | + atomic_t offloadcnt; /* Number of oddloaded filters */ |
---|
361 | 465 | unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */ |
---|
| 466 | + unsigned int lockeddevcnt; /* Number of devs that require rtnl lock. */ |
---|
362 | 467 | struct { |
---|
363 | 468 | struct tcf_chain *chain; |
---|
364 | 469 | struct list_head filter_chain_list; |
---|
365 | 470 | } chain0; |
---|
| 471 | + struct rcu_head rcu; |
---|
| 472 | + DECLARE_HASHTABLE(proto_destroy_ht, 7); |
---|
| 473 | + struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */ |
---|
366 | 474 | }; |
---|
367 | 475 | |
---|
368 | | -static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags) |
---|
| 476 | +#ifdef CONFIG_PROVE_LOCKING |
---|
| 477 | +static inline bool lockdep_tcf_chain_is_locked(struct tcf_chain *chain) |
---|
369 | 478 | { |
---|
370 | | - if (*flags & TCA_CLS_FLAGS_IN_HW) |
---|
371 | | - return; |
---|
372 | | - *flags |= TCA_CLS_FLAGS_IN_HW; |
---|
373 | | - block->offloadcnt++; |
---|
| 479 | + return lockdep_is_held(&chain->filter_chain_lock); |
---|
374 | 480 | } |
---|
375 | 481 | |
---|
376 | | -static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) |
---|
| 482 | +static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp) |
---|
377 | 483 | { |
---|
378 | | - if (!(*flags & TCA_CLS_FLAGS_IN_HW)) |
---|
379 | | - return; |
---|
380 | | - *flags &= ~TCA_CLS_FLAGS_IN_HW; |
---|
381 | | - block->offloadcnt--; |
---|
| 484 | + return lockdep_is_held(&tp->lock); |
---|
| 485 | +} |
---|
| 486 | +#else |
---|
| 487 | +static inline bool lockdep_tcf_chain_is_locked(struct tcf_block *chain) |
---|
| 488 | +{ |
---|
| 489 | + return true; |
---|
382 | 490 | } |
---|
383 | 491 | |
---|
384 | | -static inline void |
---|
385 | | -tc_cls_offload_cnt_update(struct tcf_block *block, unsigned int *cnt, |
---|
386 | | - u32 *flags, bool add) |
---|
| 492 | +static inline bool lockdep_tcf_proto_is_locked(struct tcf_proto *tp) |
---|
387 | 493 | { |
---|
388 | | - if (add) { |
---|
389 | | - if (!*cnt) |
---|
390 | | - tcf_block_offload_inc(block, flags); |
---|
391 | | - (*cnt)++; |
---|
392 | | - } else { |
---|
393 | | - (*cnt)--; |
---|
394 | | - if (!*cnt) |
---|
395 | | - tcf_block_offload_dec(block, flags); |
---|
396 | | - } |
---|
| 494 | + return true; |
---|
397 | 495 | } |
---|
| 496 | +#endif /* #ifdef CONFIG_PROVE_LOCKING */ |
---|
| 497 | + |
---|
| 498 | +#define tcf_chain_dereference(p, chain) \ |
---|
| 499 | + rcu_dereference_protected(p, lockdep_tcf_chain_is_locked(chain)) |
---|
| 500 | + |
---|
| 501 | +#define tcf_proto_dereference(p, tp) \ |
---|
| 502 | + rcu_dereference_protected(p, lockdep_tcf_proto_is_locked(tp)) |
---|
398 | 503 | |
---|
399 | 504 | static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) |
---|
400 | 505 | { |
---|
401 | 506 | struct qdisc_skb_cb *qcb; |
---|
402 | 507 | |
---|
403 | | - BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz); |
---|
| 508 | + BUILD_BUG_ON(sizeof(skb->cb) < sizeof(*qcb)); |
---|
404 | 509 | BUILD_BUG_ON(sizeof(qcb->data) < sz); |
---|
| 510 | +} |
---|
| 511 | + |
---|
| 512 | +static inline int qdisc_qlen_cpu(const struct Qdisc *q) |
---|
| 513 | +{ |
---|
| 514 | + return this_cpu_ptr(q->cpu_qstats)->qlen; |
---|
405 | 515 | } |
---|
406 | 516 | |
---|
407 | 517 | static inline int qdisc_qlen(const struct Qdisc *q) |
---|
.. | .. |
---|
409 | 519 | return q->q.qlen; |
---|
410 | 520 | } |
---|
411 | 521 | |
---|
412 | | -static inline u32 qdisc_qlen_sum(const struct Qdisc *q) |
---|
| 522 | +static inline int qdisc_qlen_sum(const struct Qdisc *q) |
---|
413 | 523 | { |
---|
414 | | - u32 qlen = q->qstats.qlen; |
---|
| 524 | + __u32 qlen = q->qstats.qlen; |
---|
| 525 | + int i; |
---|
415 | 526 | |
---|
416 | | - if (q->flags & TCQ_F_NOLOCK) |
---|
417 | | - qlen += atomic_read(&q->q.atomic_qlen); |
---|
418 | | - else |
---|
| 527 | + if (qdisc_is_percpu_stats(q)) { |
---|
| 528 | + for_each_possible_cpu(i) |
---|
| 529 | + qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; |
---|
| 530 | + } else { |
---|
419 | 531 | qlen += q->q.qlen; |
---|
| 532 | + } |
---|
420 | 533 | |
---|
421 | 534 | return qlen; |
---|
422 | 535 | } |
---|
.. | .. |
---|
573 | 686 | void qdisc_reset(struct Qdisc *qdisc); |
---|
574 | 687 | void qdisc_put(struct Qdisc *qdisc); |
---|
575 | 688 | void qdisc_put_unlocked(struct Qdisc *qdisc); |
---|
576 | | -void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n, |
---|
577 | | - unsigned int len); |
---|
| 689 | +void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, int n, int len); |
---|
| 690 | +#ifdef CONFIG_NET_SCHED |
---|
| 691 | +int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, |
---|
| 692 | + void *type_data); |
---|
| 693 | +void qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, |
---|
| 694 | + struct Qdisc *new, struct Qdisc *old, |
---|
| 695 | + enum tc_setup_type type, void *type_data, |
---|
| 696 | + struct netlink_ext_ack *extack); |
---|
| 697 | +#else |
---|
| 698 | +static inline int |
---|
| 699 | +qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, |
---|
| 700 | + void *type_data) |
---|
| 701 | +{ |
---|
| 702 | + q->flags &= ~TCQ_F_OFFLOADED; |
---|
| 703 | + return 0; |
---|
| 704 | +} |
---|
| 705 | + |
---|
| 706 | +static inline void |
---|
| 707 | +qdisc_offload_graft_helper(struct net_device *dev, struct Qdisc *sch, |
---|
| 708 | + struct Qdisc *new, struct Qdisc *old, |
---|
| 709 | + enum tc_setup_type type, void *type_data, |
---|
| 710 | + struct netlink_ext_ack *extack) |
---|
| 711 | +{ |
---|
| 712 | +} |
---|
| 713 | +#endif |
---|
578 | 714 | struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, |
---|
579 | 715 | const struct Qdisc_ops *ops, |
---|
580 | 716 | struct netlink_ext_ack *extack); |
---|
.. | .. |
---|
585 | 721 | void __qdisc_calculate_pkt_len(struct sk_buff *skb, |
---|
586 | 722 | const struct qdisc_size_table *stab); |
---|
587 | 723 | int skb_do_redirect(struct sk_buff *); |
---|
588 | | - |
---|
589 | | -static inline void skb_reset_tc(struct sk_buff *skb) |
---|
590 | | -{ |
---|
591 | | -#ifdef CONFIG_NET_CLS_ACT |
---|
592 | | - skb->tc_redirected = 0; |
---|
593 | | -#endif |
---|
594 | | -} |
---|
595 | | - |
---|
596 | | -static inline bool skb_is_tc_redirected(const struct sk_buff *skb) |
---|
597 | | -{ |
---|
598 | | -#ifdef CONFIG_NET_CLS_ACT |
---|
599 | | - return skb->tc_redirected; |
---|
600 | | -#else |
---|
601 | | - return false; |
---|
602 | | -#endif |
---|
603 | | -} |
---|
604 | 724 | |
---|
605 | 725 | static inline bool skb_at_tc_ingress(const struct sk_buff *skb) |
---|
606 | 726 | { |
---|
.. | .. |
---|
637 | 757 | } |
---|
638 | 758 | } |
---|
639 | 759 | |
---|
640 | | -static inline void qdisc_reset_all_tx(struct net_device *dev) |
---|
641 | | -{ |
---|
642 | | - qdisc_reset_all_tx_gt(dev, 0); |
---|
643 | | -} |
---|
644 | | - |
---|
645 | 760 | /* Are all TX queues of the device empty? */ |
---|
646 | 761 | static inline bool qdisc_all_tx_empty(const struct net_device *dev) |
---|
647 | 762 | { |
---|
.. | .. |
---|
652 | 767 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
---|
653 | 768 | const struct Qdisc *q = rcu_dereference(txq->qdisc); |
---|
654 | 769 | |
---|
655 | | - if (q->q.qlen) { |
---|
| 770 | + if (!qdisc_is_empty(q)) { |
---|
656 | 771 | rcu_read_unlock(); |
---|
657 | 772 | return false; |
---|
658 | 773 | } |
---|
.. | .. |
---|
722 | 837 | return sch->enqueue(skb, sch, to_free); |
---|
723 | 838 | } |
---|
724 | 839 | |
---|
725 | | -static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) |
---|
726 | | -{ |
---|
727 | | - return q->flags & TCQ_F_CPUSTATS; |
---|
728 | | -} |
---|
729 | | - |
---|
730 | 840 | static inline void _bstats_update(struct gnet_stats_basic_packed *bstats, |
---|
731 | 841 | __u64 bytes, __u32 packets) |
---|
732 | 842 | { |
---|
.. | .. |
---|
794 | 904 | this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb)); |
---|
795 | 905 | } |
---|
796 | 906 | |
---|
797 | | -static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch) |
---|
| 907 | +static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch) |
---|
798 | 908 | { |
---|
799 | | - atomic_inc(&sch->q.atomic_qlen); |
---|
| 909 | + this_cpu_inc(sch->cpu_qstats->qlen); |
---|
800 | 910 | } |
---|
801 | 911 | |
---|
802 | | -static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch) |
---|
| 912 | +static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch) |
---|
803 | 913 | { |
---|
804 | | - atomic_dec(&sch->q.atomic_qlen); |
---|
| 914 | + this_cpu_dec(sch->cpu_qstats->qlen); |
---|
805 | 915 | } |
---|
806 | 916 | |
---|
807 | 917 | static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch) |
---|
.. | .. |
---|
839 | 949 | sch->qstats.overlimits++; |
---|
840 | 950 | } |
---|
841 | 951 | |
---|
| 952 | +static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch) |
---|
| 953 | +{ |
---|
| 954 | + __u32 qlen = qdisc_qlen_sum(sch); |
---|
| 955 | + |
---|
| 956 | + return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen); |
---|
| 957 | +} |
---|
| 958 | + |
---|
| 959 | +static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen, |
---|
| 960 | + __u32 *backlog) |
---|
| 961 | +{ |
---|
| 962 | + struct gnet_stats_queue qstats = { 0 }; |
---|
| 963 | + __u32 len = qdisc_qlen_sum(sch); |
---|
| 964 | + |
---|
| 965 | + __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len); |
---|
| 966 | + *qlen = qstats.qlen; |
---|
| 967 | + *backlog = qstats.backlog; |
---|
| 968 | +} |
---|
| 969 | + |
---|
| 970 | +static inline void qdisc_tree_flush_backlog(struct Qdisc *sch) |
---|
| 971 | +{ |
---|
| 972 | + __u32 qlen, backlog; |
---|
| 973 | + |
---|
| 974 | + qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); |
---|
| 975 | + qdisc_tree_reduce_backlog(sch, qlen, backlog); |
---|
| 976 | +} |
---|
| 977 | + |
---|
| 978 | +static inline void qdisc_purge_queue(struct Qdisc *sch) |
---|
| 979 | +{ |
---|
| 980 | + __u32 qlen, backlog; |
---|
| 981 | + |
---|
| 982 | + qdisc_qstats_qlen_backlog(sch, &qlen, &backlog); |
---|
| 983 | + qdisc_reset(sch); |
---|
| 984 | + qdisc_tree_reduce_backlog(sch, qlen, backlog); |
---|
| 985 | +} |
---|
| 986 | + |
---|
842 | 987 | static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) |
---|
843 | 988 | { |
---|
844 | 989 | qh->head = NULL; |
---|
.. | .. |
---|
846 | 991 | qh->qlen = 0; |
---|
847 | 992 | } |
---|
848 | 993 | |
---|
849 | | -static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, |
---|
850 | | - struct qdisc_skb_head *qh) |
---|
| 994 | +static inline void __qdisc_enqueue_tail(struct sk_buff *skb, |
---|
| 995 | + struct qdisc_skb_head *qh) |
---|
851 | 996 | { |
---|
852 | 997 | struct sk_buff *last = qh->tail; |
---|
853 | 998 | |
---|
.. | .. |
---|
860 | 1005 | qh->head = skb; |
---|
861 | 1006 | } |
---|
862 | 1007 | qh->qlen++; |
---|
863 | | - qdisc_qstats_backlog_inc(sch, skb); |
---|
864 | | - |
---|
865 | | - return NET_XMIT_SUCCESS; |
---|
866 | 1008 | } |
---|
867 | 1009 | |
---|
868 | 1010 | static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch) |
---|
869 | 1011 | { |
---|
870 | | - return __qdisc_enqueue_tail(skb, sch, &sch->q); |
---|
| 1012 | + __qdisc_enqueue_tail(skb, &sch->q); |
---|
| 1013 | + qdisc_qstats_backlog_inc(sch, skb); |
---|
| 1014 | + return NET_XMIT_SUCCESS; |
---|
| 1015 | +} |
---|
| 1016 | + |
---|
| 1017 | +static inline void __qdisc_enqueue_head(struct sk_buff *skb, |
---|
| 1018 | + struct qdisc_skb_head *qh) |
---|
| 1019 | +{ |
---|
| 1020 | + skb->next = qh->head; |
---|
| 1021 | + |
---|
| 1022 | + if (!qh->head) |
---|
| 1023 | + qh->tail = skb; |
---|
| 1024 | + qh->head = skb; |
---|
| 1025 | + qh->qlen++; |
---|
871 | 1026 | } |
---|
872 | 1027 | |
---|
873 | 1028 | static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh) |
---|
.. | .. |
---|
933 | 1088 | return 0; |
---|
934 | 1089 | } |
---|
935 | 1090 | |
---|
936 | | -static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch, |
---|
937 | | - struct sk_buff **to_free) |
---|
938 | | -{ |
---|
939 | | - return __qdisc_queue_drop_head(sch, &sch->q, to_free); |
---|
940 | | -} |
---|
941 | | - |
---|
942 | 1091 | static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch) |
---|
943 | 1092 | { |
---|
944 | 1093 | const struct qdisc_skb_head *qh = &sch->q; |
---|
.. | .. |
---|
966 | 1115 | return skb; |
---|
967 | 1116 | } |
---|
968 | 1117 | |
---|
| 1118 | +static inline void qdisc_update_stats_at_dequeue(struct Qdisc *sch, |
---|
| 1119 | + struct sk_buff *skb) |
---|
| 1120 | +{ |
---|
| 1121 | + if (qdisc_is_percpu_stats(sch)) { |
---|
| 1122 | + qdisc_qstats_cpu_backlog_dec(sch, skb); |
---|
| 1123 | + qdisc_bstats_cpu_update(sch, skb); |
---|
| 1124 | + qdisc_qstats_cpu_qlen_dec(sch); |
---|
| 1125 | + } else { |
---|
| 1126 | + qdisc_qstats_backlog_dec(sch, skb); |
---|
| 1127 | + qdisc_bstats_update(sch, skb); |
---|
| 1128 | + sch->q.qlen--; |
---|
| 1129 | + } |
---|
| 1130 | +} |
---|
| 1131 | + |
---|
| 1132 | +static inline void qdisc_update_stats_at_enqueue(struct Qdisc *sch, |
---|
| 1133 | + unsigned int pkt_len) |
---|
| 1134 | +{ |
---|
| 1135 | + if (qdisc_is_percpu_stats(sch)) { |
---|
| 1136 | + qdisc_qstats_cpu_qlen_inc(sch); |
---|
| 1137 | + this_cpu_add(sch->cpu_qstats->backlog, pkt_len); |
---|
| 1138 | + } else { |
---|
| 1139 | + sch->qstats.backlog += pkt_len; |
---|
| 1140 | + sch->q.qlen++; |
---|
| 1141 | + } |
---|
| 1142 | +} |
---|
| 1143 | + |
---|
969 | 1144 | /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */ |
---|
970 | 1145 | static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) |
---|
971 | 1146 | { |
---|
.. | .. |
---|
973 | 1148 | |
---|
974 | 1149 | if (skb) { |
---|
975 | 1150 | skb = __skb_dequeue(&sch->gso_skb); |
---|
976 | | - qdisc_qstats_backlog_dec(sch, skb); |
---|
977 | | - sch->q.qlen--; |
---|
| 1151 | + if (qdisc_is_percpu_stats(sch)) { |
---|
| 1152 | + qdisc_qstats_cpu_backlog_dec(sch, skb); |
---|
| 1153 | + qdisc_qstats_cpu_qlen_dec(sch); |
---|
| 1154 | + } else { |
---|
| 1155 | + qdisc_qstats_backlog_dec(sch, skb); |
---|
| 1156 | + sch->q.qlen--; |
---|
| 1157 | + } |
---|
978 | 1158 | } else { |
---|
979 | 1159 | skb = sch->dequeue(sch); |
---|
980 | 1160 | } |
---|
.. | .. |
---|
1001 | 1181 | static inline void qdisc_reset_queue(struct Qdisc *sch) |
---|
1002 | 1182 | { |
---|
1003 | 1183 | __qdisc_reset_queue(&sch->q); |
---|
1004 | | - sch->qstats.backlog = 0; |
---|
1005 | 1184 | } |
---|
1006 | 1185 | |
---|
1007 | 1186 | static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, |
---|
.. | .. |
---|
1012 | 1191 | sch_tree_lock(sch); |
---|
1013 | 1192 | old = *pold; |
---|
1014 | 1193 | *pold = new; |
---|
1015 | | - if (old != NULL) { |
---|
1016 | | - unsigned int qlen = old->q.qlen; |
---|
1017 | | - unsigned int backlog = old->qstats.backlog; |
---|
1018 | | - |
---|
1019 | | - qdisc_reset(old); |
---|
1020 | | - qdisc_tree_reduce_backlog(old, qlen, backlog); |
---|
1021 | | - } |
---|
| 1194 | + if (old != NULL) |
---|
| 1195 | + qdisc_purge_queue(old); |
---|
1022 | 1196 | sch_tree_unlock(sch); |
---|
1023 | 1197 | |
---|
1024 | 1198 | return old; |
---|
.. | .. |
---|
1119 | 1293 | */ |
---|
1120 | 1294 | struct mini_Qdisc { |
---|
1121 | 1295 | struct tcf_proto *filter_list; |
---|
| 1296 | + struct tcf_block *block; |
---|
1122 | 1297 | struct gnet_stats_basic_cpu __percpu *cpu_bstats; |
---|
1123 | 1298 | struct gnet_stats_queue __percpu *cpu_qstats; |
---|
1124 | 1299 | struct rcu_head rcu; |
---|
.. | .. |
---|
1145 | 1320 | struct tcf_proto *tp_head); |
---|
1146 | 1321 | void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc, |
---|
1147 | 1322 | struct mini_Qdisc __rcu **p_miniq); |
---|
| 1323 | +void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp, |
---|
| 1324 | + struct tcf_block *block); |
---|
1148 | 1325 | |
---|
1149 | | -static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res) |
---|
| 1326 | +/* Make sure qdisc is no longer in SCHED state. */ |
---|
| 1327 | +static inline void qdisc_synchronize(const struct Qdisc *q) |
---|
1150 | 1328 | { |
---|
1151 | | - struct gnet_stats_queue *stats = res->qstats; |
---|
1152 | | - int ret; |
---|
1153 | | - |
---|
1154 | | - if (res->ingress) |
---|
1155 | | - ret = netif_receive_skb(skb); |
---|
1156 | | - else |
---|
1157 | | - ret = dev_queue_xmit(skb); |
---|
1158 | | - if (ret && stats) |
---|
1159 | | - qstats_overlimit_inc(res->qstats); |
---|
| 1329 | + while (test_bit(__QDISC_STATE_SCHED, &q->state)) |
---|
| 1330 | + msleep(1); |
---|
1160 | 1331 | } |
---|
1161 | 1332 | |
---|
1162 | 1333 | #endif |
---|