forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/net/sched/sch_qfq.c
....@@ -1,12 +1,9 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * net/sched/sch_qfq.c Quick Fair Queueing Plus Scheduler.
34 *
45 * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
56 * Copyright (c) 2012 Paolo Valente.
6
- *
7
- * This program is free software; you can redistribute it and/or
8
- * modify it under the terms of the GNU General Public License
9
- * version 2 as published by the Free Software Foundation.
107 */
118
129 #include <linux/module.h>
....@@ -217,15 +214,6 @@
217214 return container_of(clc, struct qfq_class, common);
218215 }
219216
220
-static void qfq_purge_queue(struct qfq_class *cl)
221
-{
222
- unsigned int len = cl->qdisc->q.qlen;
223
- unsigned int backlog = cl->qdisc->qstats.backlog;
224
-
225
- qdisc_reset(cl->qdisc);
226
- qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
227
-}
228
-
229217 static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
230218 [TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
231219 [TCA_QFQ_LMAX] = { .type = NLA_U32 },
....@@ -419,8 +407,8 @@
419407 return -EINVAL;
420408 }
421409
422
- err = nla_parse_nested(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS], qfq_policy,
423
- NULL);
410
+ err = nla_parse_nested_deprecated(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS],
411
+ qfq_policy, NULL);
424412 if (err < 0)
425413 return err;
426414
....@@ -549,7 +537,7 @@
549537
550538 sch_tree_lock(sch);
551539
552
- qfq_purge_queue(cl);
540
+ qdisc_purge_queue(cl->qdisc);
553541 qdisc_class_hash_remove(&q->clhash, &cl->common);
554542
555543 sch_tree_unlock(sch);
....@@ -626,7 +614,7 @@
626614 tcm->tcm_handle = cl->common.classid;
627615 tcm->tcm_info = cl->qdisc->handle;
628616
629
- nest = nla_nest_start(skb, TCA_OPTIONS);
617
+ nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
630618 if (nest == NULL)
631619 goto nla_put_failure;
632620 if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) ||
....@@ -653,8 +641,7 @@
653641 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
654642 d, NULL, &cl->bstats) < 0 ||
655643 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
656
- gnet_stats_copy_queue(d, NULL,
657
- &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
644
+ qdisc_qstats_copy(d, cl->qdisc) < 0)
658645 return -1;
659646
660647 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
....@@ -710,7 +697,7 @@
710697 case TC_ACT_STOLEN:
711698 case TC_ACT_TRAP:
712699 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
713
- /* fall through */
700
+ fallthrough;
714701 case TC_ACT_SHOT:
715702 return NULL;
716703 }
....@@ -1208,10 +1195,12 @@
12081195 static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
12091196 struct sk_buff **to_free)
12101197 {
1198
+ unsigned int len = qdisc_pkt_len(skb), gso_segs;
12111199 struct qfq_sched *q = qdisc_priv(sch);
12121200 struct qfq_class *cl;
12131201 struct qfq_aggregate *agg;
12141202 int err = 0;
1203
+ bool first;
12151204
12161205 cl = qfq_classify(skb, sch, &err);
12171206 if (cl == NULL) {
....@@ -1222,17 +1211,18 @@
12221211 }
12231212 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
12241213
1225
- if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) {
1214
+ if (unlikely(cl->agg->lmax < len)) {
12261215 pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
1227
- cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
1228
- err = qfq_change_agg(sch, cl, cl->agg->class_weight,
1229
- qdisc_pkt_len(skb));
1216
+ cl->agg->lmax, len, cl->common.classid);
1217
+ err = qfq_change_agg(sch, cl, cl->agg->class_weight, len);
12301218 if (err) {
12311219 cl->qstats.drops++;
12321220 return qdisc_drop(skb, sch, to_free);
12331221 }
12341222 }
12351223
1224
+ gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
1225
+ first = !cl->qdisc->q.qlen;
12361226 err = qdisc_enqueue(skb, cl->qdisc, to_free);
12371227 if (unlikely(err != NET_XMIT_SUCCESS)) {
12381228 pr_debug("qfq_enqueue: enqueue failed %d\n", err);
....@@ -1243,16 +1233,17 @@
12431233 return err;
12441234 }
12451235
1246
- bstats_update(&cl->bstats, skb);
1247
- qdisc_qstats_backlog_inc(sch, skb);
1236
+ cl->bstats.bytes += len;
1237
+ cl->bstats.packets += gso_segs;
1238
+ sch->qstats.backlog += len;
12481239 ++sch->q.qlen;
12491240
12501241 agg = cl->agg;
12511242 /* if the queue was not empty, then done here */
1252
- if (cl->qdisc->q.qlen != 1) {
1243
+ if (!first) {
12531244 if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
12541245 list_first_entry(&agg->active, struct qfq_class, alist)
1255
- == cl && cl->deficit < qdisc_pkt_len(skb))
1246
+ == cl && cl->deficit < len)
12561247 list_move_tail(&cl->alist, &agg->active);
12571248
12581249 return err;
....@@ -1467,8 +1458,6 @@
14671458 qdisc_reset(cl->qdisc);
14681459 }
14691460 }
1470
- sch->qstats.backlog = 0;
1471
- sch->q.qlen = 0;
14721461 }
14731462
14741463 static void qfq_destroy_qdisc(struct Qdisc *sch)