.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * net/sched/sch_gred.c Generic Random Early Detection queue. |
---|
3 | | - * |
---|
4 | | - * |
---|
5 | | - * This program is free software; you can redistribute it and/or |
---|
6 | | - * modify it under the terms of the GNU General Public License |
---|
7 | | - * as published by the Free Software Foundation; either version |
---|
8 | | - * 2 of the License, or (at your option) any later version. |
---|
9 | 4 | * |
---|
10 | 5 | * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002 |
---|
11 | 6 | * |
---|
.. | .. |
---|
23 | 18 | #include <linux/types.h> |
---|
24 | 19 | #include <linux/kernel.h> |
---|
25 | 20 | #include <linux/skbuff.h> |
---|
| 21 | +#include <net/pkt_cls.h> |
---|
26 | 22 | #include <net/pkt_sched.h> |
---|
27 | 23 | #include <net/red.h> |
---|
28 | 24 | |
---|
29 | 25 | #define GRED_DEF_PRIO (MAX_DPs / 2) |
---|
30 | 26 | #define GRED_VQ_MASK (MAX_DPs - 1) |
---|
| 27 | + |
---|
| 28 | +#define GRED_VQ_RED_FLAGS (TC_RED_ECN | TC_RED_HARDDROP) |
---|
31 | 29 | |
---|
32 | 30 | struct gred_sched_data; |
---|
33 | 31 | struct gred_sched; |
---|
.. | .. |
---|
35 | 33 | struct gred_sched_data { |
---|
36 | 34 | u32 limit; /* HARD maximal queue length */ |
---|
37 | 35 | u32 DP; /* the drop parameters */ |
---|
38 | | - u32 bytesin; /* bytes seen on virtualQ so far*/ |
---|
| 36 | + u32 red_flags; /* virtualQ version of red_flags */ |
---|
| 37 | + u64 bytesin; /* bytes seen on virtualQ so far*/ |
---|
39 | 38 | u32 packetsin; /* packets seen on virtualQ so far*/ |
---|
40 | 39 | u32 backlog; /* bytes on the virtualQ */ |
---|
41 | 40 | u8 prio; /* the prio of this vq */ |
---|
.. | .. |
---|
139 | 138 | table->wred_set.qidlestart = q->vars.qidlestart; |
---|
140 | 139 | } |
---|
141 | 140 | |
---|
142 | | -static inline int gred_use_ecn(struct gred_sched *t) |
---|
| 141 | +static int gred_use_ecn(struct gred_sched_data *q) |
---|
143 | 142 | { |
---|
144 | | - return t->red_flags & TC_RED_ECN; |
---|
| 143 | + return q->red_flags & TC_RED_ECN; |
---|
145 | 144 | } |
---|
146 | 145 | |
---|
147 | | -static inline int gred_use_harddrop(struct gred_sched *t) |
---|
| 146 | +static int gred_use_harddrop(struct gred_sched_data *q) |
---|
148 | 147 | { |
---|
149 | | - return t->red_flags & TC_RED_HARDDROP; |
---|
| 148 | + return q->red_flags & TC_RED_HARDDROP; |
---|
| 149 | +} |
---|
| 150 | + |
---|
| 151 | +static bool gred_per_vq_red_flags_used(struct gred_sched *table) |
---|
| 152 | +{ |
---|
| 153 | + unsigned int i; |
---|
| 154 | + |
---|
| 155 | + /* Local per-vq flags couldn't have been set unless global are 0 */ |
---|
| 156 | + if (table->red_flags) |
---|
| 157 | + return false; |
---|
| 158 | + for (i = 0; i < MAX_DPs; i++) |
---|
| 159 | + if (table->tab[i] && table->tab[i]->red_flags) |
---|
| 160 | + return true; |
---|
| 161 | + return false; |
---|
150 | 162 | } |
---|
151 | 163 | |
---|
152 | 164 | static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
---|
.. | .. |
---|
212 | 224 | |
---|
213 | 225 | case RED_PROB_MARK: |
---|
214 | 226 | qdisc_qstats_overlimit(sch); |
---|
215 | | - if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) { |
---|
| 227 | + if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) { |
---|
216 | 228 | q->stats.prob_drop++; |
---|
217 | 229 | goto congestion_drop; |
---|
218 | 230 | } |
---|
.. | .. |
---|
222 | 234 | |
---|
223 | 235 | case RED_HARD_MARK: |
---|
224 | 236 | qdisc_qstats_overlimit(sch); |
---|
225 | | - if (gred_use_harddrop(t) || !gred_use_ecn(t) || |
---|
| 237 | + if (gred_use_harddrop(q) || !gred_use_ecn(q) || |
---|
226 | 238 | !INET_ECN_set_ce(skb)) { |
---|
227 | 239 | q->stats.forced_drop++; |
---|
228 | 240 | goto congestion_drop; |
---|
.. | .. |
---|
295 | 307 | } |
---|
296 | 308 | } |
---|
297 | 309 | |
---|
| 310 | +static void gred_offload(struct Qdisc *sch, enum tc_gred_command command) |
---|
| 311 | +{ |
---|
| 312 | + struct gred_sched *table = qdisc_priv(sch); |
---|
| 313 | + struct net_device *dev = qdisc_dev(sch); |
---|
| 314 | + struct tc_gred_qopt_offload opt = { |
---|
| 315 | + .command = command, |
---|
| 316 | + .handle = sch->handle, |
---|
| 317 | + .parent = sch->parent, |
---|
| 318 | + }; |
---|
| 319 | + |
---|
| 320 | + if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) |
---|
| 321 | + return; |
---|
| 322 | + |
---|
| 323 | + if (command == TC_GRED_REPLACE) { |
---|
| 324 | + unsigned int i; |
---|
| 325 | + |
---|
| 326 | + opt.set.grio_on = gred_rio_mode(table); |
---|
| 327 | + opt.set.wred_on = gred_wred_mode(table); |
---|
| 328 | + opt.set.dp_cnt = table->DPs; |
---|
| 329 | + opt.set.dp_def = table->def; |
---|
| 330 | + |
---|
| 331 | + for (i = 0; i < table->DPs; i++) { |
---|
| 332 | + struct gred_sched_data *q = table->tab[i]; |
---|
| 333 | + |
---|
| 334 | + if (!q) |
---|
| 335 | + continue; |
---|
| 336 | + opt.set.tab[i].present = true; |
---|
| 337 | + opt.set.tab[i].limit = q->limit; |
---|
| 338 | + opt.set.tab[i].prio = q->prio; |
---|
| 339 | + opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog; |
---|
| 340 | + opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog; |
---|
| 341 | + opt.set.tab[i].is_ecn = gred_use_ecn(q); |
---|
| 342 | + opt.set.tab[i].is_harddrop = gred_use_harddrop(q); |
---|
| 343 | + opt.set.tab[i].probability = q->parms.max_P; |
---|
| 344 | + opt.set.tab[i].backlog = &q->backlog; |
---|
| 345 | + } |
---|
| 346 | + opt.set.qstats = &sch->qstats; |
---|
| 347 | + } |
---|
| 348 | + |
---|
| 349 | + dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt); |
---|
| 350 | +} |
---|
| 351 | + |
---|
| 352 | +static int gred_offload_dump_stats(struct Qdisc *sch) |
---|
| 353 | +{ |
---|
| 354 | + struct gred_sched *table = qdisc_priv(sch); |
---|
| 355 | + struct tc_gred_qopt_offload *hw_stats; |
---|
| 356 | + unsigned int i; |
---|
| 357 | + int ret; |
---|
| 358 | + |
---|
| 359 | + hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL); |
---|
| 360 | + if (!hw_stats) |
---|
| 361 | + return -ENOMEM; |
---|
| 362 | + |
---|
| 363 | + hw_stats->command = TC_GRED_STATS; |
---|
| 364 | + hw_stats->handle = sch->handle; |
---|
| 365 | + hw_stats->parent = sch->parent; |
---|
| 366 | + |
---|
| 367 | + for (i = 0; i < MAX_DPs; i++) |
---|
| 368 | + if (table->tab[i]) |
---|
| 369 | + hw_stats->stats.xstats[i] = &table->tab[i]->stats; |
---|
| 370 | + |
---|
| 371 | + ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats); |
---|
| 372 | + /* Even if driver returns failure adjust the stats - in case offload |
---|
| 373 | + * ended but driver still wants to adjust the values. |
---|
| 374 | + */ |
---|
| 375 | + for (i = 0; i < MAX_DPs; i++) { |
---|
| 376 | + if (!table->tab[i]) |
---|
| 377 | + continue; |
---|
| 378 | + table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets; |
---|
| 379 | + table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes; |
---|
| 380 | + table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog; |
---|
| 381 | + |
---|
| 382 | + _bstats_update(&sch->bstats, |
---|
| 383 | + hw_stats->stats.bstats[i].bytes, |
---|
| 384 | + hw_stats->stats.bstats[i].packets); |
---|
| 385 | + sch->qstats.qlen += hw_stats->stats.qstats[i].qlen; |
---|
| 386 | + sch->qstats.backlog += hw_stats->stats.qstats[i].backlog; |
---|
| 387 | + sch->qstats.drops += hw_stats->stats.qstats[i].drops; |
---|
| 388 | + sch->qstats.requeues += hw_stats->stats.qstats[i].requeues; |
---|
| 389 | + sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits; |
---|
| 390 | + } |
---|
| 391 | + |
---|
| 392 | + kfree(hw_stats); |
---|
| 393 | + return ret; |
---|
| 394 | +} |
---|
| 395 | + |
---|
298 | 396 | static inline void gred_destroy_vq(struct gred_sched_data *q) |
---|
299 | 397 | { |
---|
300 | 398 | kfree(q); |
---|
301 | 399 | } |
---|
302 | 400 | |
---|
303 | | -static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps) |
---|
| 401 | +static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps, |
---|
| 402 | + struct netlink_ext_ack *extack) |
---|
304 | 403 | { |
---|
305 | 404 | struct gred_sched *table = qdisc_priv(sch); |
---|
306 | 405 | struct tc_gred_sopt *sopt; |
---|
| 406 | + bool red_flags_changed; |
---|
307 | 407 | int i; |
---|
308 | 408 | |
---|
309 | 409 | if (!dps) |
---|
.. | .. |
---|
311 | 411 | |
---|
312 | 412 | sopt = nla_data(dps); |
---|
313 | 413 | |
---|
314 | | - if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || |
---|
315 | | - sopt->def_DP >= sopt->DPs) |
---|
| 414 | + if (sopt->DPs > MAX_DPs) { |
---|
| 415 | + NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high"); |
---|
316 | 416 | return -EINVAL; |
---|
| 417 | + } |
---|
| 418 | + if (sopt->DPs == 0) { |
---|
| 419 | + NL_SET_ERR_MSG_MOD(extack, |
---|
| 420 | + "number of virtual queues can't be 0"); |
---|
| 421 | + return -EINVAL; |
---|
| 422 | + } |
---|
| 423 | + if (sopt->def_DP >= sopt->DPs) { |
---|
| 424 | + NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count"); |
---|
| 425 | + return -EINVAL; |
---|
| 426 | + } |
---|
| 427 | + if (sopt->flags && gred_per_vq_red_flags_used(table)) { |
---|
| 428 | + NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used"); |
---|
| 429 | + return -EINVAL; |
---|
| 430 | + } |
---|
317 | 431 | |
---|
318 | 432 | sch_tree_lock(sch); |
---|
319 | 433 | table->DPs = sopt->DPs; |
---|
320 | 434 | table->def = sopt->def_DP; |
---|
| 435 | + red_flags_changed = table->red_flags != sopt->flags; |
---|
321 | 436 | table->red_flags = sopt->flags; |
---|
322 | 437 | |
---|
323 | 438 | /* |
---|
.. | .. |
---|
337 | 452 | gred_disable_wred_mode(table); |
---|
338 | 453 | } |
---|
339 | 454 | |
---|
| 455 | + if (red_flags_changed) |
---|
| 456 | + for (i = 0; i < table->DPs; i++) |
---|
| 457 | + if (table->tab[i]) |
---|
| 458 | + table->tab[i]->red_flags = |
---|
| 459 | + table->red_flags & GRED_VQ_RED_FLAGS; |
---|
| 460 | + |
---|
340 | 461 | for (i = table->DPs; i < MAX_DPs; i++) { |
---|
341 | 462 | if (table->tab[i]) { |
---|
342 | 463 | pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n", |
---|
.. | .. |
---|
346 | 467 | } |
---|
347 | 468 | } |
---|
348 | 469 | |
---|
| 470 | + gred_offload(sch, TC_GRED_REPLACE); |
---|
349 | 471 | return 0; |
---|
350 | 472 | } |
---|
351 | 473 | |
---|
352 | 474 | static inline int gred_change_vq(struct Qdisc *sch, int dp, |
---|
353 | 475 | struct tc_gred_qopt *ctl, int prio, |
---|
354 | 476 | u8 *stab, u32 max_P, |
---|
355 | | - struct gred_sched_data **prealloc) |
---|
| 477 | + struct gred_sched_data **prealloc, |
---|
| 478 | + struct netlink_ext_ack *extack) |
---|
356 | 479 | { |
---|
357 | 480 | struct gred_sched *table = qdisc_priv(sch); |
---|
358 | 481 | struct gred_sched_data *q = table->tab[dp]; |
---|
359 | 482 | |
---|
360 | | - if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) |
---|
| 483 | + if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) { |
---|
| 484 | + NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters"); |
---|
361 | 485 | return -EINVAL; |
---|
| 486 | + } |
---|
362 | 487 | |
---|
363 | 488 | if (!q) { |
---|
364 | 489 | table->tab[dp] = q = *prealloc; |
---|
365 | 490 | *prealloc = NULL; |
---|
366 | 491 | if (!q) |
---|
367 | 492 | return -ENOMEM; |
---|
| 493 | + q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS; |
---|
368 | 494 | } |
---|
369 | 495 | |
---|
370 | 496 | q->DP = dp; |
---|
.. | .. |
---|
384 | 510 | return 0; |
---|
385 | 511 | } |
---|
386 | 512 | |
---|
| 513 | +static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = { |
---|
| 514 | + [TCA_GRED_VQ_DP] = { .type = NLA_U32 }, |
---|
| 515 | + [TCA_GRED_VQ_FLAGS] = { .type = NLA_U32 }, |
---|
| 516 | +}; |
---|
| 517 | + |
---|
| 518 | +static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = { |
---|
| 519 | + [TCA_GRED_VQ_ENTRY] = { .type = NLA_NESTED }, |
---|
| 520 | +}; |
---|
| 521 | + |
---|
387 | 522 | static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = { |
---|
388 | 523 | [TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) }, |
---|
389 | 524 | [TCA_GRED_STAB] = { .len = 256 }, |
---|
390 | 525 | [TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) }, |
---|
391 | 526 | [TCA_GRED_MAX_P] = { .type = NLA_U32 }, |
---|
392 | 527 | [TCA_GRED_LIMIT] = { .type = NLA_U32 }, |
---|
| 528 | + [TCA_GRED_VQ_LIST] = { .type = NLA_NESTED }, |
---|
393 | 529 | }; |
---|
| 530 | + |
---|
| 531 | +static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry) |
---|
| 532 | +{ |
---|
| 533 | + struct nlattr *tb[TCA_GRED_VQ_MAX + 1]; |
---|
| 534 | + u32 dp; |
---|
| 535 | + |
---|
| 536 | + nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry, |
---|
| 537 | + gred_vq_policy, NULL); |
---|
| 538 | + |
---|
| 539 | + dp = nla_get_u32(tb[TCA_GRED_VQ_DP]); |
---|
| 540 | + |
---|
| 541 | + if (tb[TCA_GRED_VQ_FLAGS]) |
---|
| 542 | + table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]); |
---|
| 543 | +} |
---|
| 544 | + |
---|
| 545 | +static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs) |
---|
| 546 | +{ |
---|
| 547 | + const struct nlattr *attr; |
---|
| 548 | + int rem; |
---|
| 549 | + |
---|
| 550 | + nla_for_each_nested(attr, vqs, rem) { |
---|
| 551 | + switch (nla_type(attr)) { |
---|
| 552 | + case TCA_GRED_VQ_ENTRY: |
---|
| 553 | + gred_vq_apply(table, attr); |
---|
| 554 | + break; |
---|
| 555 | + } |
---|
| 556 | + } |
---|
| 557 | +} |
---|
| 558 | + |
---|
| 559 | +static int gred_vq_validate(struct gred_sched *table, u32 cdp, |
---|
| 560 | + const struct nlattr *entry, |
---|
| 561 | + struct netlink_ext_ack *extack) |
---|
| 562 | +{ |
---|
| 563 | + struct nlattr *tb[TCA_GRED_VQ_MAX + 1]; |
---|
| 564 | + int err; |
---|
| 565 | + u32 dp; |
---|
| 566 | + |
---|
| 567 | + err = nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry, |
---|
| 568 | + gred_vq_policy, extack); |
---|
| 569 | + if (err < 0) |
---|
| 570 | + return err; |
---|
| 571 | + |
---|
| 572 | + if (!tb[TCA_GRED_VQ_DP]) { |
---|
| 573 | + NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified"); |
---|
| 574 | + return -EINVAL; |
---|
| 575 | + } |
---|
| 576 | + dp = nla_get_u32(tb[TCA_GRED_VQ_DP]); |
---|
| 577 | + if (dp >= table->DPs) { |
---|
| 578 | + NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds"); |
---|
| 579 | + return -EINVAL; |
---|
| 580 | + } |
---|
| 581 | + if (dp != cdp && !table->tab[dp]) { |
---|
| 582 | + NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated"); |
---|
| 583 | + return -EINVAL; |
---|
| 584 | + } |
---|
| 585 | + |
---|
| 586 | + if (tb[TCA_GRED_VQ_FLAGS]) { |
---|
| 587 | + u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]); |
---|
| 588 | + |
---|
| 589 | + if (table->red_flags && table->red_flags != red_flags) { |
---|
| 590 | + NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used"); |
---|
| 591 | + return -EINVAL; |
---|
| 592 | + } |
---|
| 593 | + if (red_flags & ~GRED_VQ_RED_FLAGS) { |
---|
| 594 | + NL_SET_ERR_MSG_MOD(extack, |
---|
| 595 | + "invalid RED flags specified"); |
---|
| 596 | + return -EINVAL; |
---|
| 597 | + } |
---|
| 598 | + } |
---|
| 599 | + |
---|
| 600 | + return 0; |
---|
| 601 | +} |
---|
| 602 | + |
---|
| 603 | +static int gred_vqs_validate(struct gred_sched *table, u32 cdp, |
---|
| 604 | + struct nlattr *vqs, struct netlink_ext_ack *extack) |
---|
| 605 | +{ |
---|
| 606 | + const struct nlattr *attr; |
---|
| 607 | + int rem, err; |
---|
| 608 | + |
---|
| 609 | + err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX, |
---|
| 610 | + gred_vqe_policy, extack); |
---|
| 611 | + if (err < 0) |
---|
| 612 | + return err; |
---|
| 613 | + |
---|
| 614 | + nla_for_each_nested(attr, vqs, rem) { |
---|
| 615 | + switch (nla_type(attr)) { |
---|
| 616 | + case TCA_GRED_VQ_ENTRY: |
---|
| 617 | + err = gred_vq_validate(table, cdp, attr, extack); |
---|
| 618 | + if (err) |
---|
| 619 | + return err; |
---|
| 620 | + break; |
---|
| 621 | + default: |
---|
| 622 | + NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes"); |
---|
| 623 | + return -EINVAL; |
---|
| 624 | + } |
---|
| 625 | + } |
---|
| 626 | + |
---|
| 627 | + if (rem > 0) { |
---|
| 628 | + NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list"); |
---|
| 629 | + return -EINVAL; |
---|
| 630 | + } |
---|
| 631 | + |
---|
| 632 | + return 0; |
---|
| 633 | +} |
---|
394 | 634 | |
---|
395 | 635 | static int gred_change(struct Qdisc *sch, struct nlattr *opt, |
---|
396 | 636 | struct netlink_ext_ack *extack) |
---|
.. | .. |
---|
406 | 646 | if (opt == NULL) |
---|
407 | 647 | return -EINVAL; |
---|
408 | 648 | |
---|
409 | | - err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, NULL); |
---|
| 649 | + err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy, |
---|
| 650 | + extack); |
---|
410 | 651 | if (err < 0) |
---|
411 | 652 | return err; |
---|
412 | 653 | |
---|
413 | 654 | if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) { |
---|
414 | 655 | if (tb[TCA_GRED_LIMIT] != NULL) |
---|
415 | 656 | sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); |
---|
416 | | - return gred_change_table_def(sch, tb[TCA_GRED_DPS]); |
---|
| 657 | + return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack); |
---|
417 | 658 | } |
---|
418 | 659 | |
---|
419 | 660 | if (tb[TCA_GRED_PARMS] == NULL || |
---|
420 | 661 | tb[TCA_GRED_STAB] == NULL || |
---|
421 | | - tb[TCA_GRED_LIMIT] != NULL) |
---|
| 662 | + tb[TCA_GRED_LIMIT] != NULL) { |
---|
| 663 | + NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time"); |
---|
422 | 664 | return -EINVAL; |
---|
| 665 | + } |
---|
423 | 666 | |
---|
424 | 667 | max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0; |
---|
425 | 668 | |
---|
426 | | - err = -EINVAL; |
---|
427 | 669 | ctl = nla_data(tb[TCA_GRED_PARMS]); |
---|
428 | 670 | stab = nla_data(tb[TCA_GRED_STAB]); |
---|
429 | 671 | |
---|
430 | | - if (ctl->DP >= table->DPs) |
---|
431 | | - goto errout; |
---|
| 672 | + if (ctl->DP >= table->DPs) { |
---|
| 673 | + NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count"); |
---|
| 674 | + return -EINVAL; |
---|
| 675 | + } |
---|
| 676 | + |
---|
| 677 | + if (tb[TCA_GRED_VQ_LIST]) { |
---|
| 678 | + err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST], |
---|
| 679 | + extack); |
---|
| 680 | + if (err) |
---|
| 681 | + return err; |
---|
| 682 | + } |
---|
432 | 683 | |
---|
433 | 684 | if (gred_rio_mode(table)) { |
---|
434 | 685 | if (ctl->prio == 0) { |
---|
.. | .. |
---|
448 | 699 | prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL); |
---|
449 | 700 | sch_tree_lock(sch); |
---|
450 | 701 | |
---|
451 | | - err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc); |
---|
| 702 | + err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc, |
---|
| 703 | + extack); |
---|
452 | 704 | if (err < 0) |
---|
453 | | - goto errout_locked; |
---|
| 705 | + goto err_unlock_free; |
---|
| 706 | + |
---|
| 707 | + if (tb[TCA_GRED_VQ_LIST]) |
---|
| 708 | + gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]); |
---|
454 | 709 | |
---|
455 | 710 | if (gred_rio_mode(table)) { |
---|
456 | 711 | gred_disable_wred_mode(table); |
---|
.. | .. |
---|
458 | 713 | gred_enable_wred_mode(table); |
---|
459 | 714 | } |
---|
460 | 715 | |
---|
461 | | - err = 0; |
---|
462 | | - |
---|
463 | | -errout_locked: |
---|
464 | 716 | sch_tree_unlock(sch); |
---|
465 | 717 | kfree(prealloc); |
---|
466 | | -errout: |
---|
| 718 | + |
---|
| 719 | + gred_offload(sch, TC_GRED_REPLACE); |
---|
| 720 | + return 0; |
---|
| 721 | + |
---|
| 722 | +err_unlock_free: |
---|
| 723 | + sch_tree_unlock(sch); |
---|
| 724 | + kfree(prealloc); |
---|
467 | 725 | return err; |
---|
468 | 726 | } |
---|
469 | 727 | |
---|
.. | .. |
---|
476 | 734 | if (!opt) |
---|
477 | 735 | return -EINVAL; |
---|
478 | 736 | |
---|
479 | | - err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, NULL); |
---|
| 737 | + err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy, |
---|
| 738 | + extack); |
---|
480 | 739 | if (err < 0) |
---|
481 | 740 | return err; |
---|
482 | 741 | |
---|
483 | | - if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) |
---|
| 742 | + if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) { |
---|
| 743 | + NL_SET_ERR_MSG_MOD(extack, |
---|
| 744 | + "virtual queue configuration can't be specified at initialization time"); |
---|
484 | 745 | return -EINVAL; |
---|
| 746 | + } |
---|
485 | 747 | |
---|
486 | 748 | if (tb[TCA_GRED_LIMIT]) |
---|
487 | 749 | sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); |
---|
.. | .. |
---|
489 | 751 | sch->limit = qdisc_dev(sch)->tx_queue_len |
---|
490 | 752 | * psched_mtu(qdisc_dev(sch)); |
---|
491 | 753 | |
---|
492 | | - return gred_change_table_def(sch, tb[TCA_GRED_DPS]); |
---|
| 754 | + return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack); |
---|
493 | 755 | } |
---|
494 | 756 | |
---|
495 | 757 | static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) |
---|
496 | 758 | { |
---|
497 | 759 | struct gred_sched *table = qdisc_priv(sch); |
---|
498 | | - struct nlattr *parms, *opts = NULL; |
---|
| 760 | + struct nlattr *parms, *vqs, *opts = NULL; |
---|
499 | 761 | int i; |
---|
500 | 762 | u32 max_p[MAX_DPs]; |
---|
501 | 763 | struct tc_gred_sopt sopt = { |
---|
.. | .. |
---|
505 | 767 | .flags = table->red_flags, |
---|
506 | 768 | }; |
---|
507 | 769 | |
---|
508 | | - opts = nla_nest_start(skb, TCA_OPTIONS); |
---|
| 770 | + if (gred_offload_dump_stats(sch)) |
---|
| 771 | + goto nla_put_failure; |
---|
| 772 | + |
---|
| 773 | + opts = nla_nest_start_noflag(skb, TCA_OPTIONS); |
---|
509 | 774 | if (opts == NULL) |
---|
510 | 775 | goto nla_put_failure; |
---|
511 | 776 | if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt)) |
---|
.. | .. |
---|
522 | 787 | if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit)) |
---|
523 | 788 | goto nla_put_failure; |
---|
524 | 789 | |
---|
525 | | - parms = nla_nest_start(skb, TCA_GRED_PARMS); |
---|
| 790 | + /* Old style all-in-one dump of VQs */ |
---|
| 791 | + parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS); |
---|
526 | 792 | if (parms == NULL) |
---|
527 | 793 | goto nla_put_failure; |
---|
528 | 794 | |
---|
.. | .. |
---|
572 | 838 | |
---|
573 | 839 | nla_nest_end(skb, parms); |
---|
574 | 840 | |
---|
| 841 | + /* Dump the VQs again, in more structured way */ |
---|
| 842 | + vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST); |
---|
| 843 | + if (!vqs) |
---|
| 844 | + goto nla_put_failure; |
---|
| 845 | + |
---|
| 846 | + for (i = 0; i < MAX_DPs; i++) { |
---|
| 847 | + struct gred_sched_data *q = table->tab[i]; |
---|
| 848 | + struct nlattr *vq; |
---|
| 849 | + |
---|
| 850 | + if (!q) |
---|
| 851 | + continue; |
---|
| 852 | + |
---|
| 853 | + vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY); |
---|
| 854 | + if (!vq) |
---|
| 855 | + goto nla_put_failure; |
---|
| 856 | + |
---|
| 857 | + if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP)) |
---|
| 858 | + goto nla_put_failure; |
---|
| 859 | + |
---|
| 860 | + if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags)) |
---|
| 861 | + goto nla_put_failure; |
---|
| 862 | + |
---|
| 863 | + /* Stats */ |
---|
| 864 | + if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin, |
---|
| 865 | + TCA_GRED_VQ_PAD)) |
---|
| 866 | + goto nla_put_failure; |
---|
| 867 | + if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin)) |
---|
| 868 | + goto nla_put_failure; |
---|
| 869 | + if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG, |
---|
| 870 | + gred_backlog(table, q, sch))) |
---|
| 871 | + goto nla_put_failure; |
---|
| 872 | + if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP, |
---|
| 873 | + q->stats.prob_drop)) |
---|
| 874 | + goto nla_put_failure; |
---|
| 875 | + if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK, |
---|
| 876 | + q->stats.prob_mark)) |
---|
| 877 | + goto nla_put_failure; |
---|
| 878 | + if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP, |
---|
| 879 | + q->stats.forced_drop)) |
---|
| 880 | + goto nla_put_failure; |
---|
| 881 | + if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK, |
---|
| 882 | + q->stats.forced_mark)) |
---|
| 883 | + goto nla_put_failure; |
---|
| 884 | + if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop)) |
---|
| 885 | + goto nla_put_failure; |
---|
| 886 | + if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other)) |
---|
| 887 | + goto nla_put_failure; |
---|
| 888 | + |
---|
| 889 | + nla_nest_end(skb, vq); |
---|
| 890 | + } |
---|
| 891 | + nla_nest_end(skb, vqs); |
---|
| 892 | + |
---|
575 | 893 | return nla_nest_end(skb, opts); |
---|
576 | 894 | |
---|
577 | 895 | nla_put_failure: |
---|
.. | .. |
---|
588 | 906 | if (table->tab[i]) |
---|
589 | 907 | gred_destroy_vq(table->tab[i]); |
---|
590 | 908 | } |
---|
| 909 | + gred_offload(sch, TC_GRED_DESTROY); |
---|
591 | 910 | } |
---|
592 | 911 | |
---|
593 | 912 | static struct Qdisc_ops gred_qdisc_ops __read_mostly = { |
---|