hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/net/sched/sch_red.c
....@@ -1,10 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * net/sched/sch_red.c Random Early Detection queue.
3
- *
4
- * This program is free software; you can redistribute it and/or
5
- * modify it under the terms of the GNU General Public License
6
- * as published by the Free Software Foundation; either version
7
- * 2 of the License, or (at your option) any later version.
84 *
95 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
106 *
....@@ -39,14 +35,22 @@
3935
4036 struct red_sched_data {
4137 u32 limit; /* HARD maximal queue length */
38
+
4239 unsigned char flags;
40
+ /* Non-flags in tc_red_qopt.flags. */
41
+ unsigned char userbits;
42
+
4343 struct timer_list adapt_timer;
4444 struct Qdisc *sch;
4545 struct red_parms parms;
4646 struct red_vars vars;
4747 struct red_stats stats;
4848 struct Qdisc *qdisc;
49
+ struct tcf_qevent qe_early_drop;
50
+ struct tcf_qevent qe_mark;
4951 };
52
+
53
+#define TC_RED_SUPPORTED_FLAGS (TC_RED_HISTORIC_FLAGS | TC_RED_NODROP)
5054
5155 static inline int red_use_ecn(struct red_sched_data *q)
5256 {
....@@ -58,11 +62,17 @@
5862 return q->flags & TC_RED_HARDDROP;
5963 }
6064
65
+static int red_use_nodrop(struct red_sched_data *q)
66
+{
67
+ return q->flags & TC_RED_NODROP;
68
+}
69
+
6170 static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
6271 struct sk_buff **to_free)
6372 {
6473 struct red_sched_data *q = qdisc_priv(sch);
6574 struct Qdisc *child = q->qdisc;
75
+ unsigned int len;
6676 int ret;
6777
6878 q->vars.qavg = red_calc_qavg(&q->parms,
....@@ -78,29 +88,49 @@
7888
7989 case RED_PROB_MARK:
8090 qdisc_qstats_overlimit(sch);
81
- if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
91
+ if (!red_use_ecn(q)) {
8292 q->stats.prob_drop++;
8393 goto congestion_drop;
8494 }
8595
86
- q->stats.prob_mark++;
96
+ if (INET_ECN_set_ce(skb)) {
97
+ q->stats.prob_mark++;
98
+ skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
99
+ if (!skb)
100
+ return NET_XMIT_CN | ret;
101
+ } else if (!red_use_nodrop(q)) {
102
+ q->stats.prob_drop++;
103
+ goto congestion_drop;
104
+ }
105
+
106
+ /* Non-ECT packet in ECN nodrop mode: queue it. */
87107 break;
88108
89109 case RED_HARD_MARK:
90110 qdisc_qstats_overlimit(sch);
91
- if (red_use_harddrop(q) || !red_use_ecn(q) ||
92
- !INET_ECN_set_ce(skb)) {
111
+ if (red_use_harddrop(q) || !red_use_ecn(q)) {
93112 q->stats.forced_drop++;
94113 goto congestion_drop;
95114 }
96115
97
- q->stats.forced_mark++;
116
+ if (INET_ECN_set_ce(skb)) {
117
+ q->stats.forced_mark++;
118
+ skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
119
+ if (!skb)
120
+ return NET_XMIT_CN | ret;
121
+ } else if (!red_use_nodrop(q)) {
122
+ q->stats.forced_drop++;
123
+ goto congestion_drop;
124
+ }
125
+
126
+ /* Non-ECT packet in ECN nodrop mode: queue it. */
98127 break;
99128 }
100129
130
+ len = qdisc_pkt_len(skb);
101131 ret = qdisc_enqueue(skb, child, to_free);
102132 if (likely(ret == NET_XMIT_SUCCESS)) {
103
- qdisc_qstats_backlog_inc(sch, skb);
133
+ sch->qstats.backlog += len;
104134 sch->q.qlen++;
105135 } else if (net_xmit_drop_count(ret)) {
106136 q->stats.pdrop++;
....@@ -109,6 +139,10 @@
109139 return ret;
110140
111141 congestion_drop:
142
+ skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret);
143
+ if (!skb)
144
+ return NET_XMIT_CN | ret;
145
+
112146 qdisc_drop(skb, sch, to_free);
113147 return NET_XMIT_CN;
114148 }
....@@ -144,8 +178,6 @@
144178 struct red_sched_data *q = qdisc_priv(sch);
145179
146180 qdisc_reset(q->qdisc);
147
- sch->qstats.backlog = 0;
148
- sch->q.qlen = 0;
149181 red_restart(&q->vars);
150182 }
151183
....@@ -166,7 +198,10 @@
166198 opt.set.min = q->parms.qth_min >> q->parms.Wlog;
167199 opt.set.max = q->parms.qth_max >> q->parms.Wlog;
168200 opt.set.probability = q->parms.max_P;
201
+ opt.set.limit = q->limit;
169202 opt.set.is_ecn = red_use_ecn(q);
203
+ opt.set.is_harddrop = red_use_harddrop(q);
204
+ opt.set.is_nodrop = red_use_nodrop(q);
170205 opt.set.qstats = &sch->qstats;
171206 } else {
172207 opt.command = TC_RED_DESTROY;
....@@ -179,34 +214,35 @@
179214 {
180215 struct red_sched_data *q = qdisc_priv(sch);
181216
217
+ tcf_qevent_destroy(&q->qe_mark, sch);
218
+ tcf_qevent_destroy(&q->qe_early_drop, sch);
182219 del_timer_sync(&q->adapt_timer);
183220 red_offload(sch, false);
184221 qdisc_put(q->qdisc);
185222 }
186223
187224 static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
225
+ [TCA_RED_UNSPEC] = { .strict_start_type = TCA_RED_FLAGS },
188226 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
189227 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
190228 [TCA_RED_MAX_P] = { .type = NLA_U32 },
229
+ [TCA_RED_FLAGS] = NLA_POLICY_BITFIELD32(TC_RED_SUPPORTED_FLAGS),
230
+ [TCA_RED_EARLY_DROP_BLOCK] = { .type = NLA_U32 },
231
+ [TCA_RED_MARK_BLOCK] = { .type = NLA_U32 },
191232 };
192233
193
-static int red_change(struct Qdisc *sch, struct nlattr *opt,
194
- struct netlink_ext_ack *extack)
234
+static int __red_change(struct Qdisc *sch, struct nlattr **tb,
235
+ struct netlink_ext_ack *extack)
195236 {
237
+ struct Qdisc *old_child = NULL, *child = NULL;
196238 struct red_sched_data *q = qdisc_priv(sch);
197
- struct nlattr *tb[TCA_RED_MAX + 1];
239
+ struct nla_bitfield32 flags_bf;
198240 struct tc_red_qopt *ctl;
199
- struct Qdisc *child = NULL;
241
+ unsigned char userbits;
242
+ unsigned char flags;
200243 int err;
201244 u32 max_P;
202245 u8 *stab;
203
-
204
- if (opt == NULL)
205
- return -EINVAL;
206
-
207
- err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy, NULL);
208
- if (err < 0)
209
- return err;
210246
211247 if (tb[TCA_RED_PARMS] == NULL ||
212248 tb[TCA_RED_STAB] == NULL)
....@@ -220,6 +256,12 @@
220256 ctl->Scell_log, stab))
221257 return -EINVAL;
222258
259
+ err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS,
260
+ tb[TCA_RED_FLAGS], TC_RED_SUPPORTED_FLAGS,
261
+ &flags_bf, &userbits, extack);
262
+ if (err)
263
+ return err;
264
+
223265 if (ctl->limit > 0) {
224266 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
225267 extack);
....@@ -231,12 +273,18 @@
231273 }
232274
233275 sch_tree_lock(sch);
234
- q->flags = ctl->flags;
276
+
277
+ flags = (q->flags & ~flags_bf.selector) | flags_bf.value;
278
+ err = red_validate_flags(flags, extack);
279
+ if (err)
280
+ goto unlock_out;
281
+
282
+ q->flags = flags;
283
+ q->userbits = userbits;
235284 q->limit = ctl->limit;
236285 if (child) {
237
- qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
238
- q->qdisc->qstats.backlog);
239
- qdisc_put(q->qdisc);
286
+ qdisc_tree_flush_backlog(q->qdisc);
287
+ old_child = q->qdisc;
240288 q->qdisc = child;
241289 }
242290
....@@ -255,8 +303,18 @@
255303 red_start_of_idle_period(&q->vars);
256304
257305 sch_tree_unlock(sch);
306
+
258307 red_offload(sch, true);
308
+
309
+ if (old_child)
310
+ qdisc_put(old_child);
259311 return 0;
312
+
313
+unlock_out:
314
+ sch_tree_unlock(sch);
315
+ if (child)
316
+ qdisc_put(child);
317
+ return err;
260318 }
261319
262320 static inline void red_adaptative_timer(struct timer_list *t)
....@@ -275,16 +333,66 @@
275333 struct netlink_ext_ack *extack)
276334 {
277335 struct red_sched_data *q = qdisc_priv(sch);
336
+ struct nlattr *tb[TCA_RED_MAX + 1];
337
+ int err;
278338
279339 q->qdisc = &noop_qdisc;
280340 q->sch = sch;
281341 timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
282
- return red_change(sch, opt, extack);
342
+
343
+ if (!opt)
344
+ return -EINVAL;
345
+
346
+ err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
347
+ extack);
348
+ if (err < 0)
349
+ return err;
350
+
351
+ err = __red_change(sch, tb, extack);
352
+ if (err)
353
+ return err;
354
+
355
+ err = tcf_qevent_init(&q->qe_early_drop, sch,
356
+ FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
357
+ tb[TCA_RED_EARLY_DROP_BLOCK], extack);
358
+ if (err)
359
+ return err;
360
+
361
+ return tcf_qevent_init(&q->qe_mark, sch,
362
+ FLOW_BLOCK_BINDER_TYPE_RED_MARK,
363
+ tb[TCA_RED_MARK_BLOCK], extack);
283364 }
284365
285
-static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
366
+static int red_change(struct Qdisc *sch, struct nlattr *opt,
367
+ struct netlink_ext_ack *extack)
286368 {
287
- struct net_device *dev = qdisc_dev(sch);
369
+ struct red_sched_data *q = qdisc_priv(sch);
370
+ struct nlattr *tb[TCA_RED_MAX + 1];
371
+ int err;
372
+
373
+ if (!opt)
374
+ return -EINVAL;
375
+
376
+ err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
377
+ extack);
378
+ if (err < 0)
379
+ return err;
380
+
381
+ err = tcf_qevent_validate_change(&q->qe_early_drop,
382
+ tb[TCA_RED_EARLY_DROP_BLOCK], extack);
383
+ if (err)
384
+ return err;
385
+
386
+ err = tcf_qevent_validate_change(&q->qe_mark,
387
+ tb[TCA_RED_MARK_BLOCK], extack);
388
+ if (err)
389
+ return err;
390
+
391
+ return __red_change(sch, tb, extack);
392
+}
393
+
394
+static int red_dump_offload_stats(struct Qdisc *sch)
395
+{
288396 struct tc_red_qopt_offload hw_stats = {
289397 .command = TC_RED_STATS,
290398 .handle = sch->handle,
....@@ -294,22 +402,8 @@
294402 .stats.qstats = &sch->qstats,
295403 },
296404 };
297
- int err;
298405
299
- sch->flags &= ~TCQ_F_OFFLOADED;
300
-
301
- if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
302
- return 0;
303
-
304
- err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
305
- &hw_stats);
306
- if (err == -EOPNOTSUPP)
307
- return 0;
308
-
309
- if (!err)
310
- sch->flags |= TCQ_F_OFFLOADED;
311
-
312
- return err;
406
+ return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_RED, &hw_stats);
313407 }
314408
315409 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
....@@ -318,7 +412,8 @@
318412 struct nlattr *opts = NULL;
319413 struct tc_red_qopt opt = {
320414 .limit = q->limit,
321
- .flags = q->flags,
415
+ .flags = (q->flags & TC_RED_HISTORIC_FLAGS) |
416
+ q->userbits,
322417 .qth_min = q->parms.qth_min >> q->parms.Wlog,
323418 .qth_max = q->parms.qth_max >> q->parms.Wlog,
324419 .Wlog = q->parms.Wlog,
....@@ -327,15 +422,19 @@
327422 };
328423 int err;
329424
330
- err = red_dump_offload_stats(sch, &opt);
425
+ err = red_dump_offload_stats(sch);
331426 if (err)
332427 goto nla_put_failure;
333428
334
- opts = nla_nest_start(skb, TCA_OPTIONS);
429
+ opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
335430 if (opts == NULL)
336431 goto nla_put_failure;
337432 if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
338
- nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
433
+ nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P) ||
434
+ nla_put_bitfield32(skb, TCA_RED_FLAGS,
435
+ q->flags, TC_RED_SUPPORTED_FLAGS) ||
436
+ tcf_qevent_dump(skb, TCA_RED_MARK_BLOCK, &q->qe_mark) ||
437
+ tcf_qevent_dump(skb, TCA_RED_EARLY_DROP_BLOCK, &q->qe_early_drop))
339438 goto nla_put_failure;
340439 return nla_nest_end(skb, opts);
341440
....@@ -380,6 +479,21 @@
380479 return 0;
381480 }
382481
482
+static void red_graft_offload(struct Qdisc *sch,
483
+ struct Qdisc *new, struct Qdisc *old,
484
+ struct netlink_ext_ack *extack)
485
+{
486
+ struct tc_red_qopt_offload graft_offload = {
487
+ .handle = sch->handle,
488
+ .parent = sch->parent,
489
+ .child_handle = new->handle,
490
+ .command = TC_RED_GRAFT,
491
+ };
492
+
493
+ qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
494
+ TC_SETUP_QDISC_RED, &graft_offload, extack);
495
+}
496
+
383497 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
384498 struct Qdisc **old, struct netlink_ext_ack *extack)
385499 {
....@@ -389,6 +503,8 @@
389503 new = &noop_qdisc;
390504
391505 *old = qdisc_replace(sch, new, &q->qdisc);
506
+
507
+ red_graft_offload(sch, new, *old, extack);
392508 return 0;
393509 }
394510