hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/net/sched/sch_gred.c
....@@ -1,11 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * net/sched/sch_gred.c Generic Random Early Detection queue.
3
- *
4
- *
5
- * This program is free software; you can redistribute it and/or
6
- * modify it under the terms of the GNU General Public License
7
- * as published by the Free Software Foundation; either version
8
- * 2 of the License, or (at your option) any later version.
94 *
105 * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
116 *
....@@ -23,11 +18,14 @@
2318 #include <linux/types.h>
2419 #include <linux/kernel.h>
2520 #include <linux/skbuff.h>
21
+#include <net/pkt_cls.h>
2622 #include <net/pkt_sched.h>
2723 #include <net/red.h>
2824
2925 #define GRED_DEF_PRIO (MAX_DPs / 2)
3026 #define GRED_VQ_MASK (MAX_DPs - 1)
27
+
28
+#define GRED_VQ_RED_FLAGS (TC_RED_ECN | TC_RED_HARDDROP)
3129
3230 struct gred_sched_data;
3331 struct gred_sched;
....@@ -35,7 +33,8 @@
3533 struct gred_sched_data {
3634 u32 limit; /* HARD maximal queue length */
3735 u32 DP; /* the drop parameters */
38
- u32 bytesin; /* bytes seen on virtualQ so far*/
36
+ u32 red_flags; /* virtualQ version of red_flags */
37
+ u64 bytesin; /* bytes seen on virtualQ so far*/
3938 u32 packetsin; /* packets seen on virtualQ so far*/
4039 u32 backlog; /* bytes on the virtualQ */
4140 u8 prio; /* the prio of this vq */
....@@ -139,14 +138,27 @@
139138 table->wred_set.qidlestart = q->vars.qidlestart;
140139 }
141140
142
-static inline int gred_use_ecn(struct gred_sched *t)
141
+static int gred_use_ecn(struct gred_sched_data *q)
143142 {
144
- return t->red_flags & TC_RED_ECN;
143
+ return q->red_flags & TC_RED_ECN;
145144 }
146145
147
-static inline int gred_use_harddrop(struct gred_sched *t)
146
+static int gred_use_harddrop(struct gred_sched_data *q)
148147 {
149
- return t->red_flags & TC_RED_HARDDROP;
148
+ return q->red_flags & TC_RED_HARDDROP;
149
+}
150
+
151
+static bool gred_per_vq_red_flags_used(struct gred_sched *table)
152
+{
153
+ unsigned int i;
154
+
155
+ /* Local per-vq flags couldn't have been set unless global are 0 */
156
+ if (table->red_flags)
157
+ return false;
158
+ for (i = 0; i < MAX_DPs; i++)
159
+ if (table->tab[i] && table->tab[i]->red_flags)
160
+ return true;
161
+ return false;
150162 }
151163
152164 static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
....@@ -212,7 +224,7 @@
212224
213225 case RED_PROB_MARK:
214226 qdisc_qstats_overlimit(sch);
215
- if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
227
+ if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
216228 q->stats.prob_drop++;
217229 goto congestion_drop;
218230 }
....@@ -222,7 +234,7 @@
222234
223235 case RED_HARD_MARK:
224236 qdisc_qstats_overlimit(sch);
225
- if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
237
+ if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
226238 !INET_ECN_set_ce(skb)) {
227239 q->stats.forced_drop++;
228240 goto congestion_drop;
....@@ -295,15 +307,103 @@
295307 }
296308 }
297309
310
+static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
311
+{
312
+ struct gred_sched *table = qdisc_priv(sch);
313
+ struct net_device *dev = qdisc_dev(sch);
314
+ struct tc_gred_qopt_offload opt = {
315
+ .command = command,
316
+ .handle = sch->handle,
317
+ .parent = sch->parent,
318
+ };
319
+
320
+ if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
321
+ return;
322
+
323
+ if (command == TC_GRED_REPLACE) {
324
+ unsigned int i;
325
+
326
+ opt.set.grio_on = gred_rio_mode(table);
327
+ opt.set.wred_on = gred_wred_mode(table);
328
+ opt.set.dp_cnt = table->DPs;
329
+ opt.set.dp_def = table->def;
330
+
331
+ for (i = 0; i < table->DPs; i++) {
332
+ struct gred_sched_data *q = table->tab[i];
333
+
334
+ if (!q)
335
+ continue;
336
+ opt.set.tab[i].present = true;
337
+ opt.set.tab[i].limit = q->limit;
338
+ opt.set.tab[i].prio = q->prio;
339
+ opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
340
+ opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
341
+ opt.set.tab[i].is_ecn = gred_use_ecn(q);
342
+ opt.set.tab[i].is_harddrop = gred_use_harddrop(q);
343
+ opt.set.tab[i].probability = q->parms.max_P;
344
+ opt.set.tab[i].backlog = &q->backlog;
345
+ }
346
+ opt.set.qstats = &sch->qstats;
347
+ }
348
+
349
+ dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt);
350
+}
351
+
352
+static int gred_offload_dump_stats(struct Qdisc *sch)
353
+{
354
+ struct gred_sched *table = qdisc_priv(sch);
355
+ struct tc_gred_qopt_offload *hw_stats;
356
+ unsigned int i;
357
+ int ret;
358
+
359
+ hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL);
360
+ if (!hw_stats)
361
+ return -ENOMEM;
362
+
363
+ hw_stats->command = TC_GRED_STATS;
364
+ hw_stats->handle = sch->handle;
365
+ hw_stats->parent = sch->parent;
366
+
367
+ for (i = 0; i < MAX_DPs; i++)
368
+ if (table->tab[i])
369
+ hw_stats->stats.xstats[i] = &table->tab[i]->stats;
370
+
371
+ ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
372
+ /* Even if driver returns failure adjust the stats - in case offload
373
+ * ended but driver still wants to adjust the values.
374
+ */
375
+ for (i = 0; i < MAX_DPs; i++) {
376
+ if (!table->tab[i])
377
+ continue;
378
+ table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets;
379
+ table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes;
380
+ table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
381
+
382
+ _bstats_update(&sch->bstats,
383
+ hw_stats->stats.bstats[i].bytes,
384
+ hw_stats->stats.bstats[i].packets);
385
+ sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
386
+ sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
387
+ sch->qstats.drops += hw_stats->stats.qstats[i].drops;
388
+ sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
389
+ sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
390
+ }
391
+
392
+ kfree(hw_stats);
393
+ return ret;
394
+}
395
+
298396 static inline void gred_destroy_vq(struct gred_sched_data *q)
299397 {
300398 kfree(q);
301399 }
302400
303
-static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
401
+static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
402
+ struct netlink_ext_ack *extack)
304403 {
305404 struct gred_sched *table = qdisc_priv(sch);
306405 struct tc_gred_sopt *sopt;
406
+ bool red_flags_changed;
307407 int i;
308408
309409 if (!dps)
....@@ -311,13 +411,28 @@
311411
312412 sopt = nla_data(dps);
313413
314
- if (sopt->DPs > MAX_DPs || sopt->DPs == 0 ||
315
- sopt->def_DP >= sopt->DPs)
414
+ if (sopt->DPs > MAX_DPs) {
415
+ NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high");
316416 return -EINVAL;
417
+ }
418
+ if (sopt->DPs == 0) {
419
+ NL_SET_ERR_MSG_MOD(extack,
420
+ "number of virtual queues can't be 0");
421
+ return -EINVAL;
422
+ }
423
+ if (sopt->def_DP >= sopt->DPs) {
424
+ NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count");
425
+ return -EINVAL;
426
+ }
427
+ if (sopt->flags && gred_per_vq_red_flags_used(table)) {
428
+ NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used");
429
+ return -EINVAL;
430
+ }
317431
318432 sch_tree_lock(sch);
319433 table->DPs = sopt->DPs;
320434 table->def = sopt->def_DP;
435
+ red_flags_changed = table->red_flags != sopt->flags;
321436 table->red_flags = sopt->flags;
322437
323438 /*
....@@ -337,6 +452,12 @@
337452 gred_disable_wred_mode(table);
338453 }
339454
455
+ if (red_flags_changed)
456
+ for (i = 0; i < table->DPs; i++)
457
+ if (table->tab[i])
458
+ table->tab[i]->red_flags =
459
+ table->red_flags & GRED_VQ_RED_FLAGS;
460
+
340461 for (i = table->DPs; i < MAX_DPs; i++) {
341462 if (table->tab[i]) {
342463 pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
....@@ -346,25 +467,30 @@
346467 }
347468 }
348469
470
+ gred_offload(sch, TC_GRED_REPLACE);
349471 return 0;
350472 }
351473
352474 static inline int gred_change_vq(struct Qdisc *sch, int dp,
353475 struct tc_gred_qopt *ctl, int prio,
354476 u8 *stab, u32 max_P,
355
- struct gred_sched_data **prealloc)
477
+ struct gred_sched_data **prealloc,
478
+ struct netlink_ext_ack *extack)
356479 {
357480 struct gred_sched *table = qdisc_priv(sch);
358481 struct gred_sched_data *q = table->tab[dp];
359482
360
- if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab))
483
+ if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) {
484
+ NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
361485 return -EINVAL;
486
+ }
362487
363488 if (!q) {
364489 table->tab[dp] = q = *prealloc;
365490 *prealloc = NULL;
366491 if (!q)
367492 return -ENOMEM;
493
+ q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS;
368494 }
369495
370496 q->DP = dp;
....@@ -384,13 +510,127 @@
384510 return 0;
385511 }
386512
513
+static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = {
514
+ [TCA_GRED_VQ_DP] = { .type = NLA_U32 },
515
+ [TCA_GRED_VQ_FLAGS] = { .type = NLA_U32 },
516
+};
517
+
518
+static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = {
519
+ [TCA_GRED_VQ_ENTRY] = { .type = NLA_NESTED },
520
+};
521
+
387522 static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
388523 [TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) },
389524 [TCA_GRED_STAB] = { .len = 256 },
390525 [TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) },
391526 [TCA_GRED_MAX_P] = { .type = NLA_U32 },
392527 [TCA_GRED_LIMIT] = { .type = NLA_U32 },
528
+ [TCA_GRED_VQ_LIST] = { .type = NLA_NESTED },
393529 };
530
+
531
+static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry)
532
+{
533
+ struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
534
+ u32 dp;
535
+
536
+ nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
537
+ gred_vq_policy, NULL);
538
+
539
+ dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
540
+
541
+ if (tb[TCA_GRED_VQ_FLAGS])
542
+ table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
543
+}
544
+
545
+static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs)
546
+{
547
+ const struct nlattr *attr;
548
+ int rem;
549
+
550
+ nla_for_each_nested(attr, vqs, rem) {
551
+ switch (nla_type(attr)) {
552
+ case TCA_GRED_VQ_ENTRY:
553
+ gred_vq_apply(table, attr);
554
+ break;
555
+ }
556
+ }
557
+}
558
+
559
+static int gred_vq_validate(struct gred_sched *table, u32 cdp,
560
+ const struct nlattr *entry,
561
+ struct netlink_ext_ack *extack)
562
+{
563
+ struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
564
+ int err;
565
+ u32 dp;
566
+
567
+ err = nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
568
+ gred_vq_policy, extack);
569
+ if (err < 0)
570
+ return err;
571
+
572
+ if (!tb[TCA_GRED_VQ_DP]) {
573
+ NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified");
574
+ return -EINVAL;
575
+ }
576
+ dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
577
+ if (dp >= table->DPs) {
578
+ NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds");
579
+ return -EINVAL;
580
+ }
581
+ if (dp != cdp && !table->tab[dp]) {
582
+ NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated");
583
+ return -EINVAL;
584
+ }
585
+
586
+ if (tb[TCA_GRED_VQ_FLAGS]) {
587
+ u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
588
+
589
+ if (table->red_flags && table->red_flags != red_flags) {
590
+ NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used");
591
+ return -EINVAL;
592
+ }
593
+ if (red_flags & ~GRED_VQ_RED_FLAGS) {
594
+ NL_SET_ERR_MSG_MOD(extack,
595
+ "invalid RED flags specified");
596
+ return -EINVAL;
597
+ }
598
+ }
599
+
600
+ return 0;
601
+}
602
+
603
+static int gred_vqs_validate(struct gred_sched *table, u32 cdp,
604
+ struct nlattr *vqs, struct netlink_ext_ack *extack)
605
+{
606
+ const struct nlattr *attr;
607
+ int rem, err;
608
+
609
+ err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX,
610
+ gred_vqe_policy, extack);
611
+ if (err < 0)
612
+ return err;
613
+
614
+ nla_for_each_nested(attr, vqs, rem) {
615
+ switch (nla_type(attr)) {
616
+ case TCA_GRED_VQ_ENTRY:
617
+ err = gred_vq_validate(table, cdp, attr, extack);
618
+ if (err)
619
+ return err;
620
+ break;
621
+ default:
622
+ NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes");
623
+ return -EINVAL;
624
+ }
625
+ }
626
+
627
+ if (rem > 0) {
628
+ NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list");
629
+ return -EINVAL;
630
+ }
631
+
632
+ return 0;
633
+}
394634
395635 static int gred_change(struct Qdisc *sch, struct nlattr *opt,
396636 struct netlink_ext_ack *extack)
....@@ -406,29 +646,40 @@
406646 if (opt == NULL)
407647 return -EINVAL;
408648
409
- err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, NULL);
649
+ err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
650
+ extack);
410651 if (err < 0)
411652 return err;
412653
413654 if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
414655 if (tb[TCA_GRED_LIMIT] != NULL)
415656 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
416
- return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
657
+ return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
417658 }
418659
419660 if (tb[TCA_GRED_PARMS] == NULL ||
420661 tb[TCA_GRED_STAB] == NULL ||
421
- tb[TCA_GRED_LIMIT] != NULL)
662
+ tb[TCA_GRED_LIMIT] != NULL) {
663
+ NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time");
422664 return -EINVAL;
665
+ }
423666
424667 max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
425668
426
- err = -EINVAL;
427669 ctl = nla_data(tb[TCA_GRED_PARMS]);
428670 stab = nla_data(tb[TCA_GRED_STAB]);
429671
430
- if (ctl->DP >= table->DPs)
431
- goto errout;
672
+ if (ctl->DP >= table->DPs) {
673
+ NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count");
674
+ return -EINVAL;
675
+ }
676
+
677
+ if (tb[TCA_GRED_VQ_LIST]) {
678
+ err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
679
+ extack);
680
+ if (err)
681
+ return err;
682
+ }
432683
433684 if (gred_rio_mode(table)) {
434685 if (ctl->prio == 0) {
....@@ -448,9 +699,13 @@
448699 prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
449700 sch_tree_lock(sch);
450701
451
- err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc);
702
+ err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc,
703
+ extack);
452704 if (err < 0)
453
- goto errout_locked;
705
+ goto err_unlock_free;
706
+
707
+ if (tb[TCA_GRED_VQ_LIST])
708
+ gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);
454709
455710 if (gred_rio_mode(table)) {
456711 gred_disable_wred_mode(table);
....@@ -458,12 +713,15 @@
458713 gred_enable_wred_mode(table);
459714 }
460715
461
- err = 0;
462
-
463
-errout_locked:
464716 sch_tree_unlock(sch);
465717 kfree(prealloc);
466
-errout:
718
+
719
+ gred_offload(sch, TC_GRED_REPLACE);
720
+ return 0;
721
+
722
+err_unlock_free:
723
+ sch_tree_unlock(sch);
724
+ kfree(prealloc);
467725 return err;
468726 }
469727
....@@ -476,12 +734,16 @@
476734 if (!opt)
477735 return -EINVAL;
478736
479
- err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, NULL);
737
+ err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
738
+ extack);
480739 if (err < 0)
481740 return err;
482741
483
- if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB])
742
+ if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) {
743
+ NL_SET_ERR_MSG_MOD(extack,
744
+ "virtual queue configuration can't be specified at initialization time");
484745 return -EINVAL;
746
+ }
485747
486748 if (tb[TCA_GRED_LIMIT])
487749 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
....@@ -489,13 +751,13 @@
489751 sch->limit = qdisc_dev(sch)->tx_queue_len
490752 * psched_mtu(qdisc_dev(sch));
491753
492
- return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
754
+ return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
493755 }
494756
495757 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
496758 {
497759 struct gred_sched *table = qdisc_priv(sch);
498
- struct nlattr *parms, *opts = NULL;
760
+ struct nlattr *parms, *vqs, *opts = NULL;
499761 int i;
500762 u32 max_p[MAX_DPs];
501763 struct tc_gred_sopt sopt = {
....@@ -505,7 +767,10 @@
505767 .flags = table->red_flags,
506768 };
507769
508
- opts = nla_nest_start(skb, TCA_OPTIONS);
770
+ if (gred_offload_dump_stats(sch))
771
+ goto nla_put_failure;
772
+
773
+ opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
509774 if (opts == NULL)
510775 goto nla_put_failure;
511776 if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
....@@ -522,7 +787,8 @@
522787 if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
523788 goto nla_put_failure;
524789
525
- parms = nla_nest_start(skb, TCA_GRED_PARMS);
790
+ /* Old style all-in-one dump of VQs */
791
+ parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS);
526792 if (parms == NULL)
527793 goto nla_put_failure;
528794
....@@ -572,6 +838,58 @@
572838
573839 nla_nest_end(skb, parms);
574840
841
+ /* Dump the VQs again, in more structured way */
842
+ vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST);
843
+ if (!vqs)
844
+ goto nla_put_failure;
845
+
846
+ for (i = 0; i < MAX_DPs; i++) {
847
+ struct gred_sched_data *q = table->tab[i];
848
+ struct nlattr *vq;
849
+
850
+ if (!q)
851
+ continue;
852
+
853
+ vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY);
854
+ if (!vq)
855
+ goto nla_put_failure;
856
+
857
+ if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
858
+ goto nla_put_failure;
859
+
860
+ if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
861
+ goto nla_put_failure;
862
+
863
+ /* Stats */
864
+ if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
865
+ TCA_GRED_VQ_PAD))
866
+ goto nla_put_failure;
867
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
868
+ goto nla_put_failure;
869
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
870
+ gred_backlog(table, q, sch)))
871
+ goto nla_put_failure;
872
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
873
+ q->stats.prob_drop))
874
+ goto nla_put_failure;
875
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
876
+ q->stats.prob_mark))
877
+ goto nla_put_failure;
878
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
879
+ q->stats.forced_drop))
880
+ goto nla_put_failure;
881
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
882
+ q->stats.forced_mark))
883
+ goto nla_put_failure;
884
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
885
+ goto nla_put_failure;
886
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other))
887
+ goto nla_put_failure;
888
+
889
+ nla_nest_end(skb, vq);
890
+ }
891
+ nla_nest_end(skb, vqs);
892
+
575893 return nla_nest_end(skb, opts);
576894
577895 nla_put_failure:
....@@ -588,6 +906,7 @@
588906 if (table->tab[i])
589907 gred_destroy_vq(table->tab[i]);
590908 }
909
+ gred_offload(sch, TC_GRED_DESTROY);
591910 }
592911
593912 static struct Qdisc_ops gred_qdisc_ops __read_mostly = {