hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/net/sched/sch_htb.c
....@@ -1,10 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
3
- *
4
- * This program is free software; you can redistribute it and/or
5
- * modify it under the terms of the GNU General Public License
6
- * as published by the Free Software Foundation; either version
7
- * 2 of the License, or (at your option) any later version.
84 *
95 * Authors: Martin Devera, <devik@cdi.cz>
106 *
....@@ -132,7 +128,7 @@
132128 struct htb_class_inner {
133129 struct htb_prio clprio[TC_HTB_NUMPRIO];
134130 } inner;
135
- } un;
131
+ };
136132 s64 pq_key;
137133
138134 int prio_activity; /* for which prios are we active */
....@@ -165,7 +161,8 @@
165161
166162 /* non shaped skbs; let them go directly thru */
167163 struct qdisc_skb_head direct_queue;
168
- long direct_pkts;
164
+ u32 direct_pkts;
165
+ u32 overlimits;
169166
170167 struct qdisc_watchdog watchdog;
171168
....@@ -242,7 +239,7 @@
242239 case TC_ACT_STOLEN:
243240 case TC_ACT_TRAP:
244241 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
245
- /* fall through */
242
+ fallthrough;
246243 case TC_ACT_SHOT:
247244 return NULL;
248245 }
....@@ -408,16 +405,19 @@
408405 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
409406 m = mask;
410407 while (m) {
411
- int prio = ffz(~m);
408
+ unsigned int prio = ffz(~m);
409
+
410
+ if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio)))
411
+ break;
412412 m &= ~(1 << prio);
413413
414
- if (p->un.inner.clprio[prio].feed.rb_node)
414
+ if (p->inner.clprio[prio].feed.rb_node)
415415 /* parent already has its feed in use so that
416416 * reset bit in mask as parent is already ok
417417 */
418418 mask &= ~(1 << prio);
419419
420
- htb_add_to_id_tree(&p->un.inner.clprio[prio].feed, cl, prio);
420
+ htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
421421 }
422422 p->prio_activity |= mask;
423423 cl = p;
....@@ -447,19 +447,19 @@
447447 int prio = ffz(~m);
448448 m &= ~(1 << prio);
449449
450
- if (p->un.inner.clprio[prio].ptr == cl->node + prio) {
450
+ if (p->inner.clprio[prio].ptr == cl->node + prio) {
451451 /* we are removing child which is pointed to from
452452 * parent feed - forget the pointer but remember
453453 * classid
454454 */
455
- p->un.inner.clprio[prio].last_ptr_id = cl->common.classid;
456
- p->un.inner.clprio[prio].ptr = NULL;
455
+ p->inner.clprio[prio].last_ptr_id = cl->common.classid;
456
+ p->inner.clprio[prio].ptr = NULL;
457457 }
458458
459459 htb_safe_rb_erase(cl->node + prio,
460
- &p->un.inner.clprio[prio].feed);
460
+ &p->inner.clprio[prio].feed);
461461
462
- if (!p->un.inner.clprio[prio].feed.rb_node)
462
+ if (!p->inner.clprio[prio].feed.rb_node)
463463 mask |= 1 << prio;
464464 }
465465
....@@ -533,8 +533,10 @@
533533 if (new_mode == cl->cmode)
534534 return;
535535
536
- if (new_mode == HTB_CANT_SEND)
536
+ if (new_mode == HTB_CANT_SEND) {
537537 cl->overlimits++;
538
+ q->overlimits++;
539
+ }
538540
539541 if (cl->prio_activity) { /* not necessary: speed optimization */
540542 if (cl->cmode != HTB_CANT_SEND)
....@@ -555,7 +557,7 @@
555557 */
556558 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
557559 {
558
- WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
560
+ WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
559561
560562 if (!cl->prio_activity) {
561563 cl->prio_activity = 1 << cl->prio;
....@@ -577,33 +579,18 @@
577579 cl->prio_activity = 0;
578580 }
579581
580
-static void htb_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
581
- struct qdisc_skb_head *qh)
582
-{
583
- struct sk_buff *last = qh->tail;
584
-
585
- if (last) {
586
- skb->next = NULL;
587
- last->next = skb;
588
- qh->tail = skb;
589
- } else {
590
- qh->tail = skb;
591
- qh->head = skb;
592
- }
593
- qh->qlen++;
594
-}
595
-
596582 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
597583 struct sk_buff **to_free)
598584 {
599
- int uninitialized_var(ret);
585
+ int ret;
586
+ unsigned int len = qdisc_pkt_len(skb);
600587 struct htb_sched *q = qdisc_priv(sch);
601588 struct htb_class *cl = htb_classify(skb, sch, &ret);
602589
603590 if (cl == HTB_DIRECT) {
604591 /* enqueue to helper queue */
605592 if (q->direct_queue.qlen < q->direct_qlen) {
606
- htb_enqueue_tail(skb, sch, &q->direct_queue);
593
+ __qdisc_enqueue_tail(skb, &q->direct_queue);
607594 q->direct_pkts++;
608595 } else {
609596 return qdisc_drop(skb, sch, to_free);
....@@ -615,7 +602,7 @@
615602 __qdisc_drop(skb, to_free);
616603 return ret;
617604 #endif
618
- } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q,
605
+ } else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
619606 to_free)) != NET_XMIT_SUCCESS) {
620607 if (net_xmit_drop_count(ret)) {
621608 qdisc_qstats_drop(sch);
....@@ -626,7 +613,7 @@
626613 htb_activate(q, cl);
627614 }
628615
629
- qdisc_qstats_backlog_inc(sch, skb);
616
+ sch->qstats.backlog += len;
630617 sch->q.qlen++;
631618 return NET_XMIT_SUCCESS;
632619 }
....@@ -823,7 +810,7 @@
823810 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
824811 if (!cl->level)
825812 return cl;
826
- clp = &cl->un.inner.clprio[prio];
813
+ clp = &cl->inner.clprio[prio];
827814 (++sp)->root = clp->feed.rb_node;
828815 sp->pptr = &clp->ptr;
829816 sp->pid = &clp->last_ptr_id;
....@@ -857,7 +844,7 @@
857844 * graft operation on the leaf since last dequeue;
858845 * simply deactivate and skip such class
859846 */
860
- if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
847
+ if (unlikely(cl->leaf.q->q.qlen == 0)) {
861848 struct htb_class *next;
862849 htb_deactivate(q, cl);
863850
....@@ -873,12 +860,12 @@
873860 goto next;
874861 }
875862
876
- skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
863
+ skb = cl->leaf.q->dequeue(cl->leaf.q);
877864 if (likely(skb != NULL))
878865 break;
879866
880
- qdisc_warn_nonwc("htb", cl->un.leaf.q);
881
- htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr:
867
+ qdisc_warn_nonwc("htb", cl->leaf.q);
868
+ htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
882869 &q->hlevel[0].hprio[prio].ptr);
883870 cl = htb_lookup_leaf(hprio, prio);
884871
....@@ -886,16 +873,16 @@
886873
887874 if (likely(skb != NULL)) {
888875 bstats_update(&cl->bstats, skb);
889
- cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
890
- if (cl->un.leaf.deficit[level] < 0) {
891
- cl->un.leaf.deficit[level] += cl->quantum;
892
- htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr :
876
+ cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
877
+ if (cl->leaf.deficit[level] < 0) {
878
+ cl->leaf.deficit[level] += cl->quantum;
879
+ htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
893880 &q->hlevel[0].hprio[prio].ptr);
894881 }
895882 /* this used to be after charge_class but this constelation
896883 * gives us slightly better performance
897884 */
898
- if (!cl->un.leaf.q->q.qlen)
885
+ if (!cl->leaf.q->q.qlen)
899886 htb_deactivate(q, cl);
900887 htb_charge_class(q, cl, level, skb);
901888 }
....@@ -952,7 +939,6 @@
952939 goto ok;
953940 }
954941 }
955
- qdisc_qstats_overlimit(sch);
956942 if (likely(next_event > q->now))
957943 qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
958944 else
....@@ -972,10 +958,10 @@
972958 for (i = 0; i < q->clhash.hashsize; i++) {
973959 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
974960 if (cl->level)
975
- memset(&cl->un.inner, 0, sizeof(cl->un.inner));
961
+ memset(&cl->inner, 0, sizeof(cl->inner));
976962 else {
977
- if (cl->un.leaf.q)
978
- qdisc_reset(cl->un.leaf.q);
963
+ if (cl->leaf.q)
964
+ qdisc_reset(cl->leaf.q);
979965 }
980966 cl->prio_activity = 0;
981967 cl->cmode = HTB_CAN_SEND;
....@@ -983,8 +969,6 @@
983969 }
984970 qdisc_watchdog_cancel(&q->watchdog);
985971 __qdisc_reset_queue(&q->direct_queue);
986
- sch->q.qlen = 0;
987
- sch->qstats.backlog = 0;
988972 memset(q->hlevel, 0, sizeof(q->hlevel));
989973 memset(q->row_mask, 0, sizeof(q->row_mask));
990974 }
....@@ -1027,7 +1011,8 @@
10271011 if (err)
10281012 return err;
10291013
1030
- err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy, NULL);
1014
+ err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1015
+ NULL);
10311016 if (err < 0)
10321017 return err;
10331018
....@@ -1062,6 +1047,7 @@
10621047 struct nlattr *nest;
10631048 struct tc_htb_glob gopt;
10641049
1050
+ sch->qstats.overlimits = q->overlimits;
10651051 /* Its safe to not acquire qdisc lock. As we hold RTNL,
10661052 * no change can happen on the qdisc parameters.
10671053 */
....@@ -1072,7 +1058,7 @@
10721058 gopt.defcls = q->defcls;
10731059 gopt.debug = 0;
10741060
1075
- nest = nla_nest_start(skb, TCA_OPTIONS);
1061
+ nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
10761062 if (nest == NULL)
10771063 goto nla_put_failure;
10781064 if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
....@@ -1098,10 +1084,10 @@
10981084 */
10991085 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
11001086 tcm->tcm_handle = cl->common.classid;
1101
- if (!cl->level && cl->un.leaf.q)
1102
- tcm->tcm_info = cl->un.leaf.q->handle;
1087
+ if (!cl->level && cl->leaf.q)
1088
+ tcm->tcm_info = cl->leaf.q->handle;
11031089
1104
- nest = nla_nest_start(skb, TCA_OPTIONS);
1090
+ nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
11051091 if (nest == NULL)
11061092 goto nla_put_failure;
11071093
....@@ -1142,10 +1128,9 @@
11421128 };
11431129 __u32 qlen = 0;
11441130
1145
- if (!cl->level && cl->un.leaf.q) {
1146
- qlen = cl->un.leaf.q->q.qlen;
1147
- qs.backlog = cl->un.leaf.q->qstats.backlog;
1148
- }
1131
+ if (!cl->level && cl->leaf.q)
1132
+ qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
1133
+
11491134 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
11501135 INT_MIN, INT_MAX);
11511136 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
....@@ -1172,14 +1157,14 @@
11721157 cl->common.classid, extack)) == NULL)
11731158 return -ENOBUFS;
11741159
1175
- *old = qdisc_replace(sch, new, &cl->un.leaf.q);
1160
+ *old = qdisc_replace(sch, new, &cl->leaf.q);
11761161 return 0;
11771162 }
11781163
11791164 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
11801165 {
11811166 struct htb_class *cl = (struct htb_class *)arg;
1182
- return !cl->level ? cl->un.leaf.q : NULL;
1167
+ return !cl->level ? cl->leaf.q : NULL;
11831168 }
11841169
11851170 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
....@@ -1205,15 +1190,15 @@
12051190 {
12061191 struct htb_class *parent = cl->parent;
12071192
1208
- WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
1193
+ WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
12091194
12101195 if (parent->cmode != HTB_CAN_SEND)
12111196 htb_safe_rb_erase(&parent->pq_node,
12121197 &q->hlevel[parent->level].wait_pq);
12131198
12141199 parent->level = 0;
1215
- memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1216
- parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1200
+ memset(&parent->inner, 0, sizeof(parent->inner));
1201
+ parent->leaf.q = new_q ? new_q : &noop_qdisc;
12171202 parent->tokens = parent->buffer;
12181203 parent->ctokens = parent->cbuffer;
12191204 parent->t_c = ktime_get_ns();
....@@ -1223,8 +1208,8 @@
12231208 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
12241209 {
12251210 if (!cl->level) {
1226
- WARN_ON(!cl->un.leaf.q);
1227
- qdisc_put(cl->un.leaf.q);
1211
+ WARN_ON(!cl->leaf.q);
1212
+ qdisc_put(cl->leaf.q);
12281213 }
12291214 gen_kill_estimator(&cl->rate_est);
12301215 tcf_block_put(cl->block);
....@@ -1285,13 +1270,8 @@
12851270
12861271 sch_tree_lock(sch);
12871272
1288
- if (!cl->level) {
1289
- unsigned int qlen = cl->un.leaf.q->q.qlen;
1290
- unsigned int backlog = cl->un.leaf.q->qstats.backlog;
1291
-
1292
- qdisc_reset(cl->un.leaf.q);
1293
- qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
1294
- }
1273
+ if (!cl->level)
1274
+ qdisc_purge_queue(cl->leaf.q);
12951275
12961276 /* delete from hash and active; remainder in destroy_class */
12971277 qdisc_class_hash_remove(&q->clhash, &cl->common);
....@@ -1323,6 +1303,7 @@
13231303 struct htb_class *cl = (struct htb_class *)*arg, *parent;
13241304 struct nlattr *opt = tca[TCA_OPTIONS];
13251305 struct nlattr *tb[TCA_HTB_MAX + 1];
1306
+ struct Qdisc *parent_qdisc = NULL;
13261307 struct tc_htb_opt *hopt;
13271308 u64 rate64, ceil64;
13281309 int warn = 0;
....@@ -1331,7 +1312,8 @@
13311312 if (!opt)
13321313 goto failure;
13331314
1334
- err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy, NULL);
1315
+ err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1316
+ NULL);
13351317 if (err < 0)
13361318 goto failure;
13371319
....@@ -1419,13 +1401,9 @@
14191401 classid, NULL);
14201402 sch_tree_lock(sch);
14211403 if (parent && !parent->level) {
1422
- unsigned int qlen = parent->un.leaf.q->q.qlen;
1423
- unsigned int backlog = parent->un.leaf.q->qstats.backlog;
1424
-
14251404 /* turn parent into inner node */
1426
- qdisc_reset(parent->un.leaf.q);
1427
- qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
1428
- qdisc_put(parent->un.leaf.q);
1405
+ qdisc_purge_queue(parent->leaf.q);
1406
+ parent_qdisc = parent->leaf.q;
14291407 if (parent->prio_activity)
14301408 htb_deactivate(q, parent);
14311409
....@@ -1436,10 +1414,10 @@
14361414 }
14371415 parent->level = (parent->parent ? parent->parent->level
14381416 : TC_HTB_MAXDEPTH) - 1;
1439
- memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1417
+ memset(&parent->inner, 0, sizeof(parent->inner));
14401418 }
14411419 /* leaf (we) needs elementary qdisc */
1442
- cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1420
+ cl->leaf.q = new_q ? new_q : &noop_qdisc;
14431421
14441422 cl->common.classid = classid;
14451423 cl->parent = parent;
....@@ -1455,8 +1433,8 @@
14551433 qdisc_class_hash_insert(&q->clhash, &cl->common);
14561434 if (parent)
14571435 parent->children++;
1458
- if (cl->un.leaf.q != &noop_qdisc)
1459
- qdisc_hash_add(cl->un.leaf.q, true);
1436
+ if (cl->leaf.q != &noop_qdisc)
1437
+ qdisc_hash_add(cl->leaf.q, true);
14601438 } else {
14611439 if (tca[TCA_RATE]) {
14621440 err = gen_replace_estimator(&cl->bstats, NULL,
....@@ -1478,7 +1456,7 @@
14781456 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
14791457
14801458 /* it used to be a nasty bug here, we have to check that node
1481
- * is really leaf before changing cl->un.leaf !
1459
+ * is really leaf before changing cl->leaf !
14821460 */
14831461 if (!cl->level) {
14841462 u64 quantum = cl->rate.rate_bytes_ps;
....@@ -1504,6 +1482,7 @@
15041482 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
15051483
15061484 sch_tree_unlock(sch);
1485
+ qdisc_put(parent_qdisc);
15071486
15081487 if (warn)
15091488 pr_warn("HTB: quantum of class %X is %s. Consider r2q change.\n",