hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/net/sched/sch_drr.c
....@@ -1,11 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * net/sched/sch_drr.c Deficit Round Robin scheduler
34 *
45 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
5
- *
6
- * This program is free software; you can redistribute it and/or
7
- * modify it under the terms of the GNU General Public License
8
- * version 2 as published by the Free Software Foundation.
96 */
107
118 #include <linux/module.h>
....@@ -50,15 +47,6 @@
5047 return container_of(clc, struct drr_class, common);
5148 }
5249
53
-static void drr_purge_queue(struct drr_class *cl)
54
-{
55
- unsigned int len = cl->qdisc->q.qlen;
56
- unsigned int backlog = cl->qdisc->qstats.backlog;
57
-
58
- qdisc_reset(cl->qdisc);
59
- qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
60
-}
61
-
6250 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
6351 [TCA_DRR_QUANTUM] = { .type = NLA_U32 },
6452 };
....@@ -79,7 +67,8 @@
7967 return -EINVAL;
8068 }
8169
82
- err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy, extack);
70
+ err = nla_parse_nested_deprecated(tb, TCA_DRR_MAX, opt, drr_policy,
71
+ extack);
8372 if (err < 0)
8473 return err;
8574
....@@ -167,7 +156,7 @@
167156
168157 sch_tree_lock(sch);
169158
170
- drr_purge_queue(cl);
159
+ qdisc_purge_queue(cl->qdisc);
171160 qdisc_class_hash_remove(&q->clhash, &cl->common);
172161
173162 sch_tree_unlock(sch);
....@@ -253,7 +242,7 @@
253242 tcm->tcm_handle = cl->common.classid;
254243 tcm->tcm_info = cl->qdisc->handle;
255244
256
- nest = nla_nest_start(skb, TCA_OPTIONS);
245
+ nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
257246 if (nest == NULL)
258247 goto nla_put_failure;
259248 if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
....@@ -269,7 +258,8 @@
269258 struct gnet_dump *d)
270259 {
271260 struct drr_class *cl = (struct drr_class *)arg;
272
- __u32 qlen = cl->qdisc->q.qlen;
261
+ __u32 qlen = qdisc_qlen_sum(cl->qdisc);
262
+ struct Qdisc *cl_q = cl->qdisc;
273263 struct tc_drr_stats xstats;
274264
275265 memset(&xstats, 0, sizeof(xstats));
....@@ -279,7 +269,7 @@
279269 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
280270 d, NULL, &cl->bstats) < 0 ||
281271 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
282
- gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
272
+ gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
283273 return -1;
284274
285275 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
....@@ -334,7 +324,7 @@
334324 case TC_ACT_STOLEN:
335325 case TC_ACT_TRAP:
336326 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
337
- /* fall through */
327
+ fallthrough;
338328 case TC_ACT_SHOT:
339329 return NULL;
340330 }
....@@ -350,9 +340,11 @@
350340 static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
351341 struct sk_buff **to_free)
352342 {
343
+ unsigned int len = qdisc_pkt_len(skb);
353344 struct drr_sched *q = qdisc_priv(sch);
354345 struct drr_class *cl;
355346 int err = 0;
347
+ bool first;
356348
357349 cl = drr_classify(skb, sch, &err);
358350 if (cl == NULL) {
....@@ -362,6 +354,7 @@
362354 return err;
363355 }
364356
357
+ first = !cl->qdisc->q.qlen;
365358 err = qdisc_enqueue(skb, cl->qdisc, to_free);
366359 if (unlikely(err != NET_XMIT_SUCCESS)) {
367360 if (net_xmit_drop_count(err)) {
....@@ -371,12 +364,12 @@
371364 return err;
372365 }
373366
374
- if (cl->qdisc->q.qlen == 1) {
367
+ if (first) {
375368 list_add_tail(&cl->alist, &q->active);
376369 cl->deficit = cl->quantum;
377370 }
378371
379
- qdisc_qstats_backlog_inc(sch, skb);
372
+ sch->qstats.backlog += len;
380373 sch->q.qlen++;
381374 return err;
382375 }
....@@ -450,8 +443,6 @@
450443 qdisc_reset(cl->qdisc);
451444 }
452445 }
453
- sch->qstats.backlog = 0;
454
- sch->q.qlen = 0;
455446 }
456447
457448 static void drr_destroy_qdisc(struct Qdisc *sch)