forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
....@@ -8,6 +8,7 @@
88 #include <net/red.h>
99
1010 #include "spectrum.h"
11
+#include "spectrum_span.h"
1112 #include "reg.h"
1213
1314 #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
....@@ -18,14 +19,19 @@
1819 MLXSW_SP_QDISC_NO_QDISC,
1920 MLXSW_SP_QDISC_RED,
2021 MLXSW_SP_QDISC_PRIO,
22
+ MLXSW_SP_QDISC_ETS,
23
+ MLXSW_SP_QDISC_TBF,
24
+ MLXSW_SP_QDISC_FIFO,
2125 };
26
+
27
+struct mlxsw_sp_qdisc;
2228
2329 struct mlxsw_sp_qdisc_ops {
2430 enum mlxsw_sp_qdisc_type type;
2531 int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
2632 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
2733 void *params);
28
- int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port,
34
+ int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
2935 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
3036 int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
3137 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
....@@ -62,6 +68,25 @@
6268 struct mlxsw_sp_qdisc_ops *ops;
6369 };
6470
71
+struct mlxsw_sp_qdisc_state {
72
+ struct mlxsw_sp_qdisc root_qdisc;
73
+ struct mlxsw_sp_qdisc tclass_qdiscs[IEEE_8021QAZ_MAX_TCS];
74
+
75
+ /* When a PRIO or ETS are added, the invisible FIFOs in their bands are
76
+ * created first. When notifications for these FIFOs arrive, it is not
77
+ * known what qdisc their parent handle refers to. It could be a
78
+ * newly-created PRIO that will replace the currently-offloaded one, or
79
+ * it could be e.g. a RED that will be attached below it.
80
+ *
81
+ * As the notifications start to arrive, use them to note what the
82
+ * future parent handle is, and keep track of which child FIFOs were
83
+ * seen. Then when the parent is known, retroactively offload those
84
+ * FIFOs.
85
+ */
86
+ u32 future_handle;
87
+ bool future_fifos[IEEE_8021QAZ_MAX_TCS];
88
+};
89
+
6590 static bool
6691 mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
6792 enum mlxsw_sp_qdisc_type type)
....@@ -75,36 +100,38 @@
75100 mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
76101 bool root_only)
77102 {
103
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
78104 int tclass, child_index;
79105
80106 if (parent == TC_H_ROOT)
81
- return mlxsw_sp_port->root_qdisc;
107
+ return &qdisc_state->root_qdisc;
82108
83
- if (root_only || !mlxsw_sp_port->root_qdisc ||
84
- !mlxsw_sp_port->root_qdisc->ops ||
85
- TC_H_MAJ(parent) != mlxsw_sp_port->root_qdisc->handle ||
109
+ if (root_only || !qdisc_state ||
110
+ !qdisc_state->root_qdisc.ops ||
111
+ TC_H_MAJ(parent) != qdisc_state->root_qdisc.handle ||
86112 TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS)
87113 return NULL;
88114
89115 child_index = TC_H_MIN(parent);
90116 tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
91
- return &mlxsw_sp_port->tclass_qdiscs[tclass];
117
+ return &qdisc_state->tclass_qdiscs[tclass];
92118 }
93119
94120 static struct mlxsw_sp_qdisc *
95121 mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
96122 {
123
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
97124 int i;
98125
99
- if (mlxsw_sp_port->root_qdisc->handle == handle)
100
- return mlxsw_sp_port->root_qdisc;
126
+ if (qdisc_state->root_qdisc.handle == handle)
127
+ return &qdisc_state->root_qdisc;
101128
102
- if (mlxsw_sp_port->root_qdisc->handle == TC_H_UNSPEC)
129
+ if (qdisc_state->root_qdisc.handle == TC_H_UNSPEC)
103130 return NULL;
104131
105132 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
106
- if (mlxsw_sp_port->tclass_qdiscs[i].handle == handle)
107
- return &mlxsw_sp_port->tclass_qdiscs[i];
133
+ if (qdisc_state->tclass_qdiscs[i].handle == handle)
134
+ return &qdisc_state->tclass_qdiscs[i];
108135
109136 return NULL;
110137 }
....@@ -113,10 +140,22 @@
113140 mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
114141 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
115142 {
143
+ struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
144
+ int err_hdroom = 0;
116145 int err = 0;
117146
118147 if (!mlxsw_sp_qdisc)
119148 return 0;
149
+
150
+ if (root_qdisc == mlxsw_sp_qdisc) {
151
+ struct mlxsw_sp_hdroom hdroom = *mlxsw_sp_port->hdroom;
152
+
153
+ hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
154
+ mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
155
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
156
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
157
+ err_hdroom = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
158
+ }
120159
121160 if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
122161 err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
....@@ -124,7 +163,8 @@
124163
125164 mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
126165 mlxsw_sp_qdisc->ops = NULL;
127
- return err;
166
+
167
+ return err_hdroom ?: err;
128168 }
129169
130170 static int
....@@ -132,6 +172,8 @@
132172 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
133173 struct mlxsw_sp_qdisc_ops *ops, void *params)
134174 {
175
+ struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
176
+ struct mlxsw_sp_hdroom orig_hdroom;
135177 int err;
136178
137179 if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
....@@ -141,15 +183,34 @@
141183 * new one.
142184 */
143185 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
186
+
187
+ orig_hdroom = *mlxsw_sp_port->hdroom;
188
+ if (root_qdisc == mlxsw_sp_qdisc) {
189
+ struct mlxsw_sp_hdroom hdroom = orig_hdroom;
190
+
191
+ hdroom.mode = MLXSW_SP_HDROOM_MODE_TC;
192
+ mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
193
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
194
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
195
+
196
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
197
+ if (err)
198
+ goto err_hdroom_configure;
199
+ }
200
+
144201 err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
145202 if (err)
146203 goto err_bad_param;
147204
148
- err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params);
205
+ err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
149206 if (err)
150207 goto err_config;
151208
152
- if (mlxsw_sp_qdisc->handle != handle) {
209
+ /* Check if the Qdisc changed. That includes a situation where an
210
+ * invisible Qdisc replaces another one, or is being added for the
211
+ * first time.
212
+ */
213
+ if (mlxsw_sp_qdisc->handle != handle || handle == TC_H_UNSPEC) {
153214 mlxsw_sp_qdisc->ops = ops;
154215 if (ops->clean_stats)
155216 ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
....@@ -160,6 +221,8 @@
160221
161222 err_bad_param:
162223 err_config:
224
+ mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
225
+err_hdroom_configure:
163226 if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
164227 ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
165228
....@@ -226,10 +289,74 @@
226289 }
227290 }
228291
292
+static void
293
+mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
294
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
295
+ u64 *p_tx_bytes, u64 *p_tx_packets,
296
+ u64 *p_drops, u64 *p_backlog)
297
+{
298
+ u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
299
+ struct mlxsw_sp_port_xstats *xstats;
300
+ u64 tx_bytes, tx_packets;
301
+
302
+ xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
303
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
304
+ mlxsw_sp_qdisc->prio_bitmap,
305
+ &tx_packets, &tx_bytes);
306
+
307
+ *p_tx_packets += tx_packets;
308
+ *p_tx_bytes += tx_bytes;
309
+ *p_drops += xstats->wred_drop[tclass_num] +
310
+ mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
311
+ *p_backlog += mlxsw_sp_xstats_backlog(xstats, tclass_num);
312
+}
313
+
314
+static void
315
+mlxsw_sp_qdisc_update_stats(struct mlxsw_sp *mlxsw_sp,
316
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
317
+ u64 tx_bytes, u64 tx_packets,
318
+ u64 drops, u64 backlog,
319
+ struct tc_qopt_offload_stats *stats_ptr)
320
+{
321
+ struct mlxsw_sp_qdisc_stats *stats_base = &mlxsw_sp_qdisc->stats_base;
322
+
323
+ tx_bytes -= stats_base->tx_bytes;
324
+ tx_packets -= stats_base->tx_packets;
325
+ drops -= stats_base->drops;
326
+ backlog -= stats_base->backlog;
327
+
328
+ _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
329
+ stats_ptr->qstats->drops += drops;
330
+ stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog);
331
+
332
+ stats_base->backlog += backlog;
333
+ stats_base->drops += drops;
334
+ stats_base->tx_bytes += tx_bytes;
335
+ stats_base->tx_packets += tx_packets;
336
+}
337
+
338
+static void
339
+mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
340
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
341
+ struct tc_qopt_offload_stats *stats_ptr)
342
+{
343
+ u64 tx_packets = 0;
344
+ u64 tx_bytes = 0;
345
+ u64 backlog = 0;
346
+ u64 drops = 0;
347
+
348
+ mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
349
+ &tx_bytes, &tx_packets,
350
+ &drops, &backlog);
351
+ mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
352
+ tx_bytes, tx_packets, drops, backlog,
353
+ stats_ptr);
354
+}
355
+
229356 static int
230357 mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
231358 int tclass_num, u32 min, u32 max,
232
- u32 probability, bool is_ecn)
359
+ u32 probability, bool is_wred, bool is_ecn)
233360 {
234361 char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
235362 char cwtp_cmd[MLXSW_REG_CWTP_LEN];
....@@ -247,7 +374,7 @@
247374 return err;
248375
249376 mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
250
- MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
377
+ MLXSW_REG_CWTP_DEFAULT_PROFILE, is_wred, is_ecn);
251378
252379 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
253380 }
....@@ -281,7 +408,6 @@
281408 mlxsw_sp_qdisc->prio_bitmap,
282409 &stats_base->tx_packets,
283410 &stats_base->tx_bytes);
284
- red_base->prob_mark = xstats->ecn;
285411 red_base->prob_drop = xstats->wred_drop[tclass_num];
286412 red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
287413
....@@ -295,7 +421,8 @@
295421 mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
296422 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
297423 {
298
- struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
424
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
425
+ struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
299426
300427 if (root_qdisc != mlxsw_sp_qdisc)
301428 root_qdisc->stats_base.backlog -=
....@@ -319,7 +446,8 @@
319446 p->max);
320447 return -EINVAL;
321448 }
322
- if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
449
+ if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core,
450
+ GUARANTEED_SHARED_BUFFER)) {
323451 dev_err(mlxsw_sp->bus_info->dev,
324452 "spectrum: RED: max value %u is too big\n", p->max);
325453 return -EINVAL;
....@@ -333,7 +461,7 @@
333461 }
334462
335463 static int
336
-mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
464
+mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
337465 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
338466 void *params)
339467 {
....@@ -350,8 +478,22 @@
350478 prob = DIV_ROUND_UP(prob, 1 << 16);
351479 min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
352480 max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
353
- return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
354
- max, prob, p->is_ecn);
481
+ return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num,
482
+ min, max, prob,
483
+ !p->is_nodrop, p->is_ecn);
484
+}
485
+
486
+static void
487
+mlxsw_sp_qdisc_leaf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
488
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
489
+ struct gnet_stats_queue *qstats)
490
+{
491
+ u64 backlog;
492
+
493
+ backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
494
+ mlxsw_sp_qdisc->stats_base.backlog);
495
+ qstats->backlog -= backlog;
496
+ mlxsw_sp_qdisc->stats_base.backlog = 0;
355497 }
356498
357499 static void
....@@ -360,12 +502,8 @@
360502 void *params)
361503 {
362504 struct tc_red_qopt_offload_params *p = params;
363
- u64 backlog;
364505
365
- backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
366
- mlxsw_sp_qdisc->stats_base.backlog);
367
- p->qstats->backlog -= backlog;
368
- mlxsw_sp_qdisc->stats_base.backlog = 0;
506
+ mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
369507 }
370508
371509 static int
....@@ -377,22 +515,19 @@
377515 u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
378516 struct mlxsw_sp_port_xstats *xstats;
379517 struct red_stats *res = xstats_ptr;
380
- int early_drops, marks, pdrops;
518
+ int early_drops, pdrops;
381519
382520 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
383521
384522 early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
385
- marks = xstats->ecn - xstats_base->prob_mark;
386523 pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
387524 xstats_base->pdrop;
388525
389526 res->pdrop += pdrops;
390527 res->prob_drop += early_drops;
391
- res->prob_mark += marks;
392528
393529 xstats_base->pdrop += pdrops;
394530 xstats_base->prob_drop += early_drops;
395
- xstats_base->prob_mark += marks;
396531 return 0;
397532 }
398533
....@@ -401,41 +536,20 @@
401536 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
402537 struct tc_qopt_offload_stats *stats_ptr)
403538 {
404
- u64 tx_bytes, tx_packets, overlimits, drops, backlog;
405539 u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
406540 struct mlxsw_sp_qdisc_stats *stats_base;
407541 struct mlxsw_sp_port_xstats *xstats;
542
+ u64 overlimits;
408543
409544 xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
410545 stats_base = &mlxsw_sp_qdisc->stats_base;
411546
412
- mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
413
- mlxsw_sp_qdisc->prio_bitmap,
414
- &tx_packets, &tx_bytes);
415
- tx_bytes = tx_bytes - stats_base->tx_bytes;
416
- tx_packets = tx_packets - stats_base->tx_packets;
547
+ mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
548
+ overlimits = xstats->wred_drop[tclass_num] - stats_base->overlimits;
417549
418
- overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
419
- stats_base->overlimits;
420
- drops = xstats->wred_drop[tclass_num] +
421
- mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
422
- stats_base->drops;
423
- backlog = mlxsw_sp_xstats_backlog(xstats, tclass_num);
424
-
425
- _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
426550 stats_ptr->qstats->overlimits += overlimits;
427
- stats_ptr->qstats->drops += drops;
428
- stats_ptr->qstats->backlog +=
429
- mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
430
- backlog) -
431
- mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
432
- stats_base->backlog);
433
-
434
- stats_base->backlog = backlog;
435
- stats_base->drops += drops;
436551 stats_base->overlimits += overlimits;
437
- stats_base->tx_bytes += tx_bytes;
438
- stats_base->tx_packets += tx_packets;
552
+
439553 return 0;
440554 }
441555
....@@ -485,19 +599,349 @@
485599 }
486600 }
487601
602
+static void
603
+mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
604
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
605
+{
606
+ u64 backlog_cells = 0;
607
+ u64 tx_packets = 0;
608
+ u64 tx_bytes = 0;
609
+ u64 drops = 0;
610
+
611
+ mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
612
+ &tx_bytes, &tx_packets,
613
+ &drops, &backlog_cells);
614
+
615
+ mlxsw_sp_qdisc->stats_base.tx_packets = tx_packets;
616
+ mlxsw_sp_qdisc->stats_base.tx_bytes = tx_bytes;
617
+ mlxsw_sp_qdisc->stats_base.drops = drops;
618
+ mlxsw_sp_qdisc->stats_base.backlog = 0;
619
+}
620
+
488621 static int
489
-mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
622
+mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
623
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
624
+{
625
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
626
+ struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
627
+
628
+ if (root_qdisc != mlxsw_sp_qdisc)
629
+ root_qdisc->stats_base.backlog -=
630
+ mlxsw_sp_qdisc->stats_base.backlog;
631
+
632
+ return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
633
+ MLXSW_REG_QEEC_HR_SUBGROUP,
634
+ mlxsw_sp_qdisc->tclass_num, 0,
635
+ MLXSW_REG_QEEC_MAS_DIS, 0);
636
+}
637
+
638
+static int
639
+mlxsw_sp_qdisc_tbf_bs(struct mlxsw_sp_port *mlxsw_sp_port,
640
+ u32 max_size, u8 *p_burst_size)
641
+{
642
+ /* TBF burst size is configured in bytes. The ASIC burst size value is
643
+ * ((2 ^ bs) * 512 bits. Convert the TBF bytes to 512-bit units.
644
+ */
645
+ u32 bs512 = max_size / 64;
646
+ u8 bs = fls(bs512);
647
+
648
+ if (!bs)
649
+ return -EINVAL;
650
+ --bs;
651
+
652
+ /* Demand a power of two. */
653
+ if ((1 << bs) != bs512)
654
+ return -EINVAL;
655
+
656
+ if (bs < mlxsw_sp_port->mlxsw_sp->lowest_shaper_bs ||
657
+ bs > MLXSW_REG_QEEC_HIGHEST_SHAPER_BS)
658
+ return -EINVAL;
659
+
660
+ *p_burst_size = bs;
661
+ return 0;
662
+}
663
+
664
+static u32
665
+mlxsw_sp_qdisc_tbf_max_size(u8 bs)
666
+{
667
+ return (1U << bs) * 64;
668
+}
669
+
670
+static u64
671
+mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
672
+{
673
+ /* TBF interface is in bytes/s, whereas Spectrum ASIC is configured in
674
+ * Kbits/s.
675
+ */
676
+ return div_u64(p->rate.rate_bytes_ps, 1000) * 8;
677
+}
678
+
679
+static int
680
+mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
681
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
682
+ void *params)
683
+{
684
+ struct tc_tbf_qopt_offload_replace_params *p = params;
685
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
686
+ u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
687
+ u8 burst_size;
688
+ int err;
689
+
690
+ if (rate_kbps >= MLXSW_REG_QEEC_MAS_DIS) {
691
+ dev_err(mlxsw_sp_port->mlxsw_sp->bus_info->dev,
692
+ "spectrum: TBF: rate of %lluKbps must be below %u\n",
693
+ rate_kbps, MLXSW_REG_QEEC_MAS_DIS);
694
+ return -EINVAL;
695
+ }
696
+
697
+ err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
698
+ if (err) {
699
+ u8 highest_shaper_bs = MLXSW_REG_QEEC_HIGHEST_SHAPER_BS;
700
+
701
+ dev_err(mlxsw_sp->bus_info->dev,
702
+ "spectrum: TBF: invalid burst size of %u, must be a power of two between %u and %u",
703
+ p->max_size,
704
+ mlxsw_sp_qdisc_tbf_max_size(mlxsw_sp->lowest_shaper_bs),
705
+ mlxsw_sp_qdisc_tbf_max_size(highest_shaper_bs));
706
+ return -EINVAL;
707
+ }
708
+
709
+ return 0;
710
+}
711
+
712
+static int
713
+mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
714
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
715
+ void *params)
716
+{
717
+ struct tc_tbf_qopt_offload_replace_params *p = params;
718
+ u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
719
+ u8 burst_size;
720
+ int err;
721
+
722
+ err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
723
+ if (WARN_ON_ONCE(err))
724
+ /* check_params above was supposed to reject this value. */
725
+ return -EINVAL;
726
+
727
+ /* Configure subgroup shaper, so that both UC and MC traffic is subject
728
+ * to shaping. That is unlike RED, however UC queue lengths are going to
729
+ * be different than MC ones due to different pool and quota
730
+ * configurations, so the configuration is not applicable. For shaper on
731
+ * the other hand, subjecting the overall stream to the configured
732
+ * shaper makes sense. Also note that that is what we do for
733
+ * ieee_setmaxrate().
734
+ */
735
+ return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
736
+ MLXSW_REG_QEEC_HR_SUBGROUP,
737
+ mlxsw_sp_qdisc->tclass_num, 0,
738
+ rate_kbps, burst_size);
739
+}
740
+
741
+static void
742
+mlxsw_sp_qdisc_tbf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
743
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
744
+ void *params)
745
+{
746
+ struct tc_tbf_qopt_offload_replace_params *p = params;
747
+
748
+ mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
749
+}
750
+
751
+static int
752
+mlxsw_sp_qdisc_get_tbf_stats(struct mlxsw_sp_port *mlxsw_sp_port,
753
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
754
+ struct tc_qopt_offload_stats *stats_ptr)
755
+{
756
+ mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
757
+ stats_ptr);
758
+ return 0;
759
+}
760
+
761
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
762
+ .type = MLXSW_SP_QDISC_TBF,
763
+ .check_params = mlxsw_sp_qdisc_tbf_check_params,
764
+ .replace = mlxsw_sp_qdisc_tbf_replace,
765
+ .unoffload = mlxsw_sp_qdisc_tbf_unoffload,
766
+ .destroy = mlxsw_sp_qdisc_tbf_destroy,
767
+ .get_stats = mlxsw_sp_qdisc_get_tbf_stats,
768
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
769
+};
770
+
771
+int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
772
+ struct tc_tbf_qopt_offload *p)
773
+{
774
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
775
+
776
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
777
+ if (!mlxsw_sp_qdisc)
778
+ return -EOPNOTSUPP;
779
+
780
+ if (p->command == TC_TBF_REPLACE)
781
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
782
+ mlxsw_sp_qdisc,
783
+ &mlxsw_sp_qdisc_ops_tbf,
784
+ &p->replace_params);
785
+
786
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
787
+ MLXSW_SP_QDISC_TBF))
788
+ return -EOPNOTSUPP;
789
+
790
+ switch (p->command) {
791
+ case TC_TBF_DESTROY:
792
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
793
+ case TC_TBF_STATS:
794
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
795
+ &p->stats);
796
+ default:
797
+ return -EOPNOTSUPP;
798
+ }
799
+}
800
+
801
+static int
802
+mlxsw_sp_qdisc_fifo_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
490803 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
491804 {
805
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
806
+ struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
807
+
808
+ if (root_qdisc != mlxsw_sp_qdisc)
809
+ root_qdisc->stats_base.backlog -=
810
+ mlxsw_sp_qdisc->stats_base.backlog;
811
+ return 0;
812
+}
813
+
814
+static int
815
+mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
816
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
817
+ void *params)
818
+{
819
+ return 0;
820
+}
821
+
822
+static int
823
+mlxsw_sp_qdisc_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
824
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
825
+ void *params)
826
+{
827
+ return 0;
828
+}
829
+
830
+static int
831
+mlxsw_sp_qdisc_get_fifo_stats(struct mlxsw_sp_port *mlxsw_sp_port,
832
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
833
+ struct tc_qopt_offload_stats *stats_ptr)
834
+{
835
+ mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
836
+ stats_ptr);
837
+ return 0;
838
+}
839
+
840
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
841
+ .type = MLXSW_SP_QDISC_FIFO,
842
+ .check_params = mlxsw_sp_qdisc_fifo_check_params,
843
+ .replace = mlxsw_sp_qdisc_fifo_replace,
844
+ .destroy = mlxsw_sp_qdisc_fifo_destroy,
845
+ .get_stats = mlxsw_sp_qdisc_get_fifo_stats,
846
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
847
+};
848
+
849
+int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
850
+ struct tc_fifo_qopt_offload *p)
851
+{
852
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
853
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
854
+ int tclass, child_index;
855
+ u32 parent_handle;
856
+
857
+ /* Invisible FIFOs are tracked in future_handle and future_fifos. Make
858
+ * sure that not more than one qdisc is created for a port at a time.
859
+ * RTNL is a simple proxy for that.
860
+ */
861
+ ASSERT_RTNL();
862
+
863
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
864
+ if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
865
+ parent_handle = TC_H_MAJ(p->parent);
866
+ if (parent_handle != qdisc_state->future_handle) {
867
+ /* This notifications is for a different Qdisc than
868
+ * previously. Wipe the future cache.
869
+ */
870
+ memset(qdisc_state->future_fifos, 0,
871
+ sizeof(qdisc_state->future_fifos));
872
+ qdisc_state->future_handle = parent_handle;
873
+ }
874
+
875
+ child_index = TC_H_MIN(p->parent);
876
+ tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
877
+ if (tclass < IEEE_8021QAZ_MAX_TCS) {
878
+ if (p->command == TC_FIFO_REPLACE)
879
+ qdisc_state->future_fifos[tclass] = true;
880
+ else if (p->command == TC_FIFO_DESTROY)
881
+ qdisc_state->future_fifos[tclass] = false;
882
+ }
883
+ }
884
+ if (!mlxsw_sp_qdisc)
885
+ return -EOPNOTSUPP;
886
+
887
+ if (p->command == TC_FIFO_REPLACE) {
888
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
889
+ mlxsw_sp_qdisc,
890
+ &mlxsw_sp_qdisc_ops_fifo, NULL);
891
+ }
892
+
893
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
894
+ MLXSW_SP_QDISC_FIFO))
895
+ return -EOPNOTSUPP;
896
+
897
+ switch (p->command) {
898
+ case TC_FIFO_DESTROY:
899
+ if (p->handle == mlxsw_sp_qdisc->handle)
900
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
901
+ mlxsw_sp_qdisc);
902
+ return 0;
903
+ case TC_FIFO_STATS:
904
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
905
+ &p->stats);
906
+ case TC_FIFO_REPLACE: /* Handled above. */
907
+ break;
908
+ }
909
+
910
+ return -EOPNOTSUPP;
911
+}
912
+
913
+static int
914
+__mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
915
+{
916
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
492917 int i;
493918
494919 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
495920 mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
496921 MLXSW_SP_PORT_DEFAULT_TCLASS);
922
+ mlxsw_sp_port_ets_set(mlxsw_sp_port,
923
+ MLXSW_REG_QEEC_HR_SUBGROUP,
924
+ i, 0, false, 0);
497925 mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
498
- &mlxsw_sp_port->tclass_qdiscs[i]);
499
- mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0;
926
+ &qdisc_state->tclass_qdiscs[i]);
927
+ qdisc_state->tclass_qdiscs[i].prio_bitmap = 0;
500928 }
929
+
930
+ return 0;
931
+}
932
+
933
+static int
934
+mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
935
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
936
+{
937
+ return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
938
+}
939
+
940
+static int
941
+__mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)
942
+{
943
+ if (nbands > IEEE_8021QAZ_MAX_TCS)
944
+ return -EOPNOTSUPP;
501945
502946 return 0;
503947 }
....@@ -509,30 +953,37 @@
509953 {
510954 struct tc_prio_qopt_offload_params *p = params;
511955
512
- if (p->bands > IEEE_8021QAZ_MAX_TCS)
513
- return -EOPNOTSUPP;
514
-
515
- return 0;
956
+ return __mlxsw_sp_qdisc_ets_check_params(p->bands);
516957 }
517958
518959 static int
519
-mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
520
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
521
- void *params)
960
+__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
961
+ unsigned int nbands,
962
+ const unsigned int *quanta,
963
+ const unsigned int *weights,
964
+ const u8 *priomap)
522965 {
523
- struct tc_prio_qopt_offload_params *p = params;
966
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
524967 struct mlxsw_sp_qdisc *child_qdisc;
525968 int tclass, i, band, backlog;
526969 u8 old_priomap;
527970 int err;
528971
529
- for (band = 0; band < p->bands; band++) {
972
+ for (band = 0; band < nbands; band++) {
530973 tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
531
- child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
974
+ child_qdisc = &qdisc_state->tclass_qdiscs[tclass];
532975 old_priomap = child_qdisc->prio_bitmap;
533976 child_qdisc->prio_bitmap = 0;
977
+
978
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
979
+ MLXSW_REG_QEEC_HR_SUBGROUP,
980
+ tclass, 0, !!quanta[band],
981
+ weights[band]);
982
+ if (err)
983
+ return err;
984
+
534985 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
535
- if (p->priomap[i] == band) {
986
+ if (priomap[i] == band) {
536987 child_qdisc->prio_bitmap |= BIT(i);
537988 if (BIT(i) & old_priomap)
538989 continue;
....@@ -549,14 +1000,54 @@
5491000 child_qdisc);
5501001 child_qdisc->stats_base.backlog = backlog;
5511002 }
1003
+
1004
+ if (handle == qdisc_state->future_handle &&
1005
+ qdisc_state->future_fifos[tclass]) {
1006
+ err = mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
1007
+ child_qdisc,
1008
+ &mlxsw_sp_qdisc_ops_fifo,
1009
+ NULL);
1010
+ if (err)
1011
+ return err;
1012
+ }
5521013 }
5531014 for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
5541015 tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
555
- child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
1016
+ child_qdisc = &qdisc_state->tclass_qdiscs[tclass];
5561017 child_qdisc->prio_bitmap = 0;
5571018 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
1019
+ mlxsw_sp_port_ets_set(mlxsw_sp_port,
1020
+ MLXSW_REG_QEEC_HR_SUBGROUP,
1021
+ tclass, 0, false, 0);
5581022 }
1023
+
1024
+ qdisc_state->future_handle = TC_H_UNSPEC;
1025
+ memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
5591026 return 0;
1027
+}
1028
+
1029
+static int
1030
+mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1031
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1032
+ void *params)
1033
+{
1034
+ struct tc_prio_qopt_offload_params *p = params;
1035
+ unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
1036
+
1037
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands,
1038
+ zeroes, zeroes, p->priomap);
1039
+}
1040
+
1041
+static void
1042
+__mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1043
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1044
+ struct gnet_stats_queue *qstats)
1045
+{
1046
+ u64 backlog;
1047
+
1048
+ backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
1049
+ mlxsw_sp_qdisc->stats_base.backlog);
1050
+ qstats->backlog -= backlog;
5601051 }
5611052
5621053 static void
....@@ -565,11 +1056,9 @@
5651056 void *params)
5661057 {
5671058 struct tc_prio_qopt_offload_params *p = params;
568
- u64 backlog;
5691059
570
- backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
571
- mlxsw_sp_qdisc->stats_base.backlog);
572
- p->qstats->backlog -= backlog;
1060
+ __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1061
+ p->qstats);
5731062 }
5741063
5751064 static int
....@@ -577,37 +1066,24 @@
5771066 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
5781067 struct tc_qopt_offload_stats *stats_ptr)
5791068 {
580
- u64 tx_bytes, tx_packets, drops = 0, backlog = 0;
581
- struct mlxsw_sp_qdisc_stats *stats_base;
582
- struct mlxsw_sp_port_xstats *xstats;
583
- struct rtnl_link_stats64 *stats;
1069
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1070
+ struct mlxsw_sp_qdisc *tc_qdisc;
1071
+ u64 tx_packets = 0;
1072
+ u64 tx_bytes = 0;
1073
+ u64 backlog = 0;
1074
+ u64 drops = 0;
5841075 int i;
5851076
586
- xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
587
- stats = &mlxsw_sp_port->periodic_hw_stats.stats;
588
- stats_base = &mlxsw_sp_qdisc->stats_base;
589
-
590
- tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
591
- tx_packets = stats->tx_packets - stats_base->tx_packets;
592
-
5931077 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
594
- drops += mlxsw_sp_xstats_tail_drop(xstats, i);
595
- drops += xstats->wred_drop[i];
596
- backlog += mlxsw_sp_xstats_backlog(xstats, i);
1078
+ tc_qdisc = &qdisc_state->tclass_qdiscs[i];
1079
+ mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
1080
+ &tx_bytes, &tx_packets,
1081
+ &drops, &backlog);
5971082 }
598
- drops = drops - stats_base->drops;
5991083
600
- _bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
601
- stats_ptr->qstats->drops += drops;
602
- stats_ptr->qstats->backlog +=
603
- mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
604
- backlog) -
605
- mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
606
- stats_base->backlog);
607
- stats_base->backlog = backlog;
608
- stats_base->drops += drops;
609
- stats_base->tx_bytes += tx_bytes;
610
- stats_base->tx_packets += tx_packets;
1084
+ mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
1085
+ tx_bytes, tx_packets, drops, backlog,
1086
+ stats_ptr);
6111087 return 0;
6121088 }
6131089
....@@ -646,27 +1122,94 @@
6461122 .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
6471123 };
6481124
649
-/* Grafting is not supported in mlxsw. It will result in un-offloading of the
650
- * grafted qdisc as well as the qdisc in the qdisc new location.
651
- * (However, if the graft is to the location where the qdisc is already at, it
652
- * will be ignored completely and won't cause un-offloading).
1125
+static int
1126
+mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1127
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1128
+ void *params)
1129
+{
1130
+ struct tc_ets_qopt_offload_replace_params *p = params;
1131
+
1132
+ return __mlxsw_sp_qdisc_ets_check_params(p->bands);
1133
+}
1134
+
1135
+static int
1136
+mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1137
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1138
+ void *params)
1139
+{
1140
+ struct tc_ets_qopt_offload_replace_params *p = params;
1141
+
1142
+ return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands,
1143
+ p->quanta, p->weights, p->priomap);
1144
+}
1145
+
1146
+static void
1147
+mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1148
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1149
+ void *params)
1150
+{
1151
+ struct tc_ets_qopt_offload_replace_params *p = params;
1152
+
1153
+ __mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1154
+ p->qstats);
1155
+}
1156
+
1157
+static int
1158
+mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1159
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1160
+{
1161
+ return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
1162
+}
1163
+
1164
+static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
1165
+ .type = MLXSW_SP_QDISC_ETS,
1166
+ .check_params = mlxsw_sp_qdisc_ets_check_params,
1167
+ .replace = mlxsw_sp_qdisc_ets_replace,
1168
+ .unoffload = mlxsw_sp_qdisc_ets_unoffload,
1169
+ .destroy = mlxsw_sp_qdisc_ets_destroy,
1170
+ .get_stats = mlxsw_sp_qdisc_get_prio_stats,
1171
+ .clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
1172
+};
1173
+
1174
+/* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting
1175
+ * graph is free of cycles). These operations do not change the parent handle
1176
+ * though, which means it can be incomplete (if there is more than one class
1177
+ * where the Qdisc in question is grafted) or outright wrong (if the Qdisc was
1178
+ * linked to a different class and then removed from the original class).
1179
+ *
1180
+ * E.g. consider this sequence of operations:
1181
+ *
1182
+ * # tc qdisc add dev swp1 root handle 1: prio
1183
+ * # tc qdisc add dev swp1 parent 1:3 handle 13: red limit 1000000 avpkt 10000
1184
+ * RED: set bandwidth to 10Mbit
1185
+ * # tc qdisc link dev swp1 handle 13: parent 1:2
1186
+ *
1187
+ * At this point, both 1:2 and 1:3 have the same RED Qdisc instance as their
1188
+ * child. But RED will still only claim that 1:3 is its parent. If it's removed
1189
+ * from that band, its only parent will be 1:2, but it will continue to claim
1190
+ * that it is in fact 1:3.
1191
+ *
1192
+ * The notification for child Qdisc replace (e.g. TC_RED_REPLACE) comes before
1193
+ * the notification for parent graft (e.g. TC_PRIO_GRAFT). We take the replace
1194
+ * notification to offload the child Qdisc, based on its parent handle, and use
1195
+ * the graft operation to validate that the class where the child is actually
1196
+ * grafted corresponds to the parent handle. If the two don't match, we
1197
+ * unoffload the child.
6531198 */
6541199 static int
655
-mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
656
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
657
- struct tc_prio_qopt_offload_graft_params *p)
1200
+__mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
1201
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1202
+ u8 band, u32 child_handle)
6581203 {
659
- int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->band);
1204
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1205
+ int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
6601206 struct mlxsw_sp_qdisc *old_qdisc;
6611207
662
- /* Check if the grafted qdisc is already in its "new" location. If so -
663
- * nothing needs to be done.
664
- */
665
- if (p->band < IEEE_8021QAZ_MAX_TCS &&
666
- mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
1208
+ if (band < IEEE_8021QAZ_MAX_TCS &&
1209
+ qdisc_state->tclass_qdiscs[tclass_num].handle == child_handle)
6671210 return 0;
6681211
669
- if (!p->child_handle) {
1212
+ if (!child_handle) {
6701213 /* This is an invisible FIFO replacing the original Qdisc.
6711214 * Ignore it--the original Qdisc's destroy will follow.
6721215 */
....@@ -677,13 +1220,22 @@
6771220 * unoffload it.
6781221 */
6791222 old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
680
- p->child_handle);
1223
+ child_handle);
6811224 if (old_qdisc)
6821225 mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
6831226
6841227 mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
685
- &mlxsw_sp_port->tclass_qdiscs[tclass_num]);
1228
+ &qdisc_state->tclass_qdiscs[tclass_num]);
6861229 return -EOPNOTSUPP;
1230
+}
1231
+
1232
+static int
1233
+mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
1234
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1235
+ struct tc_prio_qopt_offload_graft_params *p)
1236
+{
1237
+ return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1238
+ p->band, p->child_handle);
6871239 }
6881240
6891241 int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
....@@ -719,39 +1271,582 @@
7191271 }
7201272 }
7211273
722
-int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
1274
+int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
1275
+ struct tc_ets_qopt_offload *p)
7231276 {
7241277 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
725
- int i;
7261278
727
- mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc), GFP_KERNEL);
1279
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
7281280 if (!mlxsw_sp_qdisc)
729
- goto err_root_qdisc_init;
1281
+ return -EOPNOTSUPP;
7301282
731
- mlxsw_sp_port->root_qdisc = mlxsw_sp_qdisc;
732
- mlxsw_sp_port->root_qdisc->prio_bitmap = 0xff;
733
- mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
1283
+ if (p->command == TC_ETS_REPLACE)
1284
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1285
+ mlxsw_sp_qdisc,
1286
+ &mlxsw_sp_qdisc_ops_ets,
1287
+ &p->replace_params);
7341288
735
- mlxsw_sp_qdisc = kcalloc(IEEE_8021QAZ_MAX_TCS,
736
- sizeof(*mlxsw_sp_qdisc),
737
- GFP_KERNEL);
738
- if (!mlxsw_sp_qdisc)
739
- goto err_tclass_qdiscs_init;
1289
+ if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
1290
+ MLXSW_SP_QDISC_ETS))
1291
+ return -EOPNOTSUPP;
7401292
741
- mlxsw_sp_port->tclass_qdiscs = mlxsw_sp_qdisc;
742
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
743
- mlxsw_sp_port->tclass_qdiscs[i].tclass_num = i;
1293
+ switch (p->command) {
1294
+ case TC_ETS_DESTROY:
1295
+ return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1296
+ case TC_ETS_STATS:
1297
+ return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1298
+ &p->stats);
1299
+ case TC_ETS_GRAFT:
1300
+ return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1301
+ p->graft_params.band,
1302
+ p->graft_params.child_handle);
1303
+ default:
1304
+ return -EOPNOTSUPP;
1305
+ }
1306
+}
1307
+
1308
+struct mlxsw_sp_qevent_block {
1309
+ struct list_head binding_list;
1310
+ struct list_head mall_entry_list;
1311
+ struct mlxsw_sp *mlxsw_sp;
1312
+};
1313
+
1314
+struct mlxsw_sp_qevent_binding {
1315
+ struct list_head list;
1316
+ struct mlxsw_sp_port *mlxsw_sp_port;
1317
+ u32 handle;
1318
+ int tclass_num;
1319
+ enum mlxsw_sp_span_trigger span_trigger;
1320
+};
1321
+
1322
+static LIST_HEAD(mlxsw_sp_qevent_block_cb_list);
1323
+
1324
+static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
1325
+ struct mlxsw_sp_mall_entry *mall_entry,
1326
+ struct mlxsw_sp_qevent_binding *qevent_binding,
1327
+ const struct mlxsw_sp_span_agent_parms *agent_parms,
1328
+ int *p_span_id)
1329
+{
1330
+ struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1331
+ struct mlxsw_sp_span_trigger_parms trigger_parms = {};
1332
+ int span_id;
1333
+ int err;
1334
+
1335
+ err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, agent_parms);
1336
+ if (err)
1337
+ return err;
1338
+
1339
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, true);
1340
+ if (err)
1341
+ goto err_analyzed_port_get;
1342
+
1343
+ trigger_parms.span_id = span_id;
1344
+ err = mlxsw_sp_span_agent_bind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
1345
+ &trigger_parms);
1346
+ if (err)
1347
+ goto err_agent_bind;
1348
+
1349
+ err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, qevent_binding->span_trigger,
1350
+ qevent_binding->tclass_num);
1351
+ if (err)
1352
+ goto err_trigger_enable;
1353
+
1354
+ *p_span_id = span_id;
1355
+ return 0;
1356
+
1357
+err_trigger_enable:
1358
+ mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
1359
+ &trigger_parms);
1360
+err_agent_bind:
1361
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
1362
+err_analyzed_port_get:
1363
+ mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1364
+ return err;
1365
+}
1366
+
1367
+static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp,
1368
+ struct mlxsw_sp_qevent_binding *qevent_binding,
1369
+ int span_id)
1370
+{
1371
+ struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1372
+ struct mlxsw_sp_span_trigger_parms trigger_parms = {
1373
+ .span_id = span_id,
1374
+ };
1375
+
1376
+ mlxsw_sp_span_trigger_disable(mlxsw_sp_port, qevent_binding->span_trigger,
1377
+ qevent_binding->tclass_num);
1378
+ mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
1379
+ &trigger_parms);
1380
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
1381
+ mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1382
+}
1383
+
1384
+static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp,
1385
+ struct mlxsw_sp_mall_entry *mall_entry,
1386
+ struct mlxsw_sp_qevent_binding *qevent_binding)
1387
+{
1388
+ struct mlxsw_sp_span_agent_parms agent_parms = {
1389
+ .to_dev = mall_entry->mirror.to_dev,
1390
+ };
1391
+
1392
+ return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1393
+ &agent_parms, &mall_entry->mirror.span_id);
1394
+}
1395
+
1396
+static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp,
1397
+ struct mlxsw_sp_mall_entry *mall_entry,
1398
+ struct mlxsw_sp_qevent_binding *qevent_binding)
1399
+{
1400
+ mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->mirror.span_id);
1401
+}
1402
+
1403
+static int mlxsw_sp_qevent_trap_configure(struct mlxsw_sp *mlxsw_sp,
1404
+ struct mlxsw_sp_mall_entry *mall_entry,
1405
+ struct mlxsw_sp_qevent_binding *qevent_binding)
1406
+{
1407
+ struct mlxsw_sp_span_agent_parms agent_parms = {};
1408
+ int err;
1409
+
1410
+ err = mlxsw_sp_trap_group_policer_hw_id_get(mlxsw_sp,
1411
+ DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS,
1412
+ &agent_parms.policer_enable,
1413
+ &agent_parms.policer_id);
1414
+ if (err)
1415
+ return err;
1416
+
1417
+ return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1418
+ &agent_parms, &mall_entry->trap.span_id);
1419
+}
1420
+
1421
+static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp,
1422
+ struct mlxsw_sp_mall_entry *mall_entry,
1423
+ struct mlxsw_sp_qevent_binding *qevent_binding)
1424
+{
1425
+ mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id);
1426
+}
1427
+
1428
+static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
1429
+ struct mlxsw_sp_mall_entry *mall_entry,
1430
+ struct mlxsw_sp_qevent_binding *qevent_binding)
1431
+{
1432
+ switch (mall_entry->type) {
1433
+ case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1434
+ return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding);
1435
+ case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1436
+ return mlxsw_sp_qevent_trap_configure(mlxsw_sp, mall_entry, qevent_binding);
1437
+ default:
1438
+ /* This should have been validated away. */
1439
+ WARN_ON(1);
1440
+ return -EOPNOTSUPP;
1441
+ }
1442
+}
1443
+
1444
+static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp,
1445
+ struct mlxsw_sp_mall_entry *mall_entry,
1446
+ struct mlxsw_sp_qevent_binding *qevent_binding)
1447
+{
1448
+ switch (mall_entry->type) {
1449
+ case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1450
+ return mlxsw_sp_qevent_mirror_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1451
+ case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1452
+ return mlxsw_sp_qevent_trap_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1453
+ default:
1454
+ WARN_ON(1);
1455
+ return;
1456
+ }
1457
+}
1458
+
1459
+static int mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
1460
+ struct mlxsw_sp_qevent_binding *qevent_binding)
1461
+{
1462
+ struct mlxsw_sp_mall_entry *mall_entry;
1463
+ int err;
1464
+
1465
+ list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) {
1466
+ err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry,
1467
+ qevent_binding);
1468
+ if (err)
1469
+ goto err_entry_configure;
1470
+ }
7441471
7451472 return 0;
7461473
747
-err_tclass_qdiscs_init:
748
- kfree(mlxsw_sp_port->root_qdisc);
749
-err_root_qdisc_init:
750
- return -ENOMEM;
1474
+err_entry_configure:
1475
+ list_for_each_entry_continue_reverse(mall_entry, &qevent_block->mall_entry_list, list)
1476
+ mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1477
+ qevent_binding);
1478
+ return err;
1479
+}
1480
+
1481
+static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qevent_block,
1482
+ struct mlxsw_sp_qevent_binding *qevent_binding)
1483
+{
1484
+ struct mlxsw_sp_mall_entry *mall_entry;
1485
+
1486
+ list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list)
1487
+ mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1488
+ qevent_binding);
1489
+}
1490
+
1491
+static int mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block)
1492
+{
1493
+ struct mlxsw_sp_qevent_binding *qevent_binding;
1494
+ int err;
1495
+
1496
+ list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) {
1497
+ err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
1498
+ if (err)
1499
+ goto err_binding_configure;
1500
+ }
1501
+
1502
+ return 0;
1503
+
1504
+err_binding_configure:
1505
+ list_for_each_entry_continue_reverse(qevent_binding, &qevent_block->binding_list, list)
1506
+ mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1507
+ return err;
1508
+}
1509
+
1510
+static void mlxsw_sp_qevent_block_deconfigure(struct mlxsw_sp_qevent_block *qevent_block)
1511
+{
1512
+ struct mlxsw_sp_qevent_binding *qevent_binding;
1513
+
1514
+ list_for_each_entry(qevent_binding, &qevent_block->binding_list, list)
1515
+ mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1516
+}
1517
+
1518
+static struct mlxsw_sp_mall_entry *
1519
+mlxsw_sp_qevent_mall_entry_find(struct mlxsw_sp_qevent_block *block, unsigned long cookie)
1520
+{
1521
+ struct mlxsw_sp_mall_entry *mall_entry;
1522
+
1523
+ list_for_each_entry(mall_entry, &block->mall_entry_list, list)
1524
+ if (mall_entry->cookie == cookie)
1525
+ return mall_entry;
1526
+
1527
+ return NULL;
1528
+}
1529
+
1530
+static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp,
1531
+ struct mlxsw_sp_qevent_block *qevent_block,
1532
+ struct tc_cls_matchall_offload *f)
1533
+{
1534
+ struct mlxsw_sp_mall_entry *mall_entry;
1535
+ struct flow_action_entry *act;
1536
+ int err;
1537
+
1538
+ /* It should not currently be possible to replace a matchall rule. So
1539
+ * this must be a new rule.
1540
+ */
1541
+ if (!list_empty(&qevent_block->mall_entry_list)) {
1542
+ NL_SET_ERR_MSG(f->common.extack, "At most one filter supported");
1543
+ return -EOPNOTSUPP;
1544
+ }
1545
+ if (f->rule->action.num_entries != 1) {
1546
+ NL_SET_ERR_MSG(f->common.extack, "Only singular actions supported");
1547
+ return -EOPNOTSUPP;
1548
+ }
1549
+ if (f->common.chain_index) {
1550
+ NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
1551
+ return -EOPNOTSUPP;
1552
+ }
1553
+ if (f->common.protocol != htons(ETH_P_ALL)) {
1554
+ NL_SET_ERR_MSG(f->common.extack, "Protocol matching not supported");
1555
+ return -EOPNOTSUPP;
1556
+ }
1557
+
1558
+ act = &f->rule->action.entries[0];
1559
+ if (!(act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED)) {
1560
+ NL_SET_ERR_MSG(f->common.extack, "HW counters not supported on qevents");
1561
+ return -EOPNOTSUPP;
1562
+ }
1563
+
1564
+ mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
1565
+ if (!mall_entry)
1566
+ return -ENOMEM;
1567
+ mall_entry->cookie = f->cookie;
1568
+
1569
+ if (act->id == FLOW_ACTION_MIRRED) {
1570
+ mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
1571
+ mall_entry->mirror.to_dev = act->dev;
1572
+ } else if (act->id == FLOW_ACTION_TRAP) {
1573
+ mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_TRAP;
1574
+ } else {
1575
+ NL_SET_ERR_MSG(f->common.extack, "Unsupported action");
1576
+ err = -EOPNOTSUPP;
1577
+ goto err_unsupported_action;
1578
+ }
1579
+
1580
+ list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list);
1581
+
1582
+ err = mlxsw_sp_qevent_block_configure(qevent_block);
1583
+ if (err)
1584
+ goto err_block_configure;
1585
+
1586
+ return 0;
1587
+
1588
+err_block_configure:
1589
+ list_del(&mall_entry->list);
1590
+err_unsupported_action:
1591
+ kfree(mall_entry);
1592
+ return err;
1593
+}
1594
+
1595
+static void mlxsw_sp_qevent_mall_destroy(struct mlxsw_sp_qevent_block *qevent_block,
1596
+ struct tc_cls_matchall_offload *f)
1597
+{
1598
+ struct mlxsw_sp_mall_entry *mall_entry;
1599
+
1600
+ mall_entry = mlxsw_sp_qevent_mall_entry_find(qevent_block, f->cookie);
1601
+ if (!mall_entry)
1602
+ return;
1603
+
1604
+ mlxsw_sp_qevent_block_deconfigure(qevent_block);
1605
+
1606
+ list_del(&mall_entry->list);
1607
+ kfree(mall_entry);
1608
+}
1609
+
1610
+static int mlxsw_sp_qevent_block_mall_cb(struct mlxsw_sp_qevent_block *qevent_block,
1611
+ struct tc_cls_matchall_offload *f)
1612
+{
1613
+ struct mlxsw_sp *mlxsw_sp = qevent_block->mlxsw_sp;
1614
+
1615
+ switch (f->command) {
1616
+ case TC_CLSMATCHALL_REPLACE:
1617
+ return mlxsw_sp_qevent_mall_replace(mlxsw_sp, qevent_block, f);
1618
+ case TC_CLSMATCHALL_DESTROY:
1619
+ mlxsw_sp_qevent_mall_destroy(qevent_block, f);
1620
+ return 0;
1621
+ default:
1622
+ return -EOPNOTSUPP;
1623
+ }
1624
+}
1625
+
1626
+static int mlxsw_sp_qevent_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
1627
+{
1628
+ struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
1629
+
1630
+ switch (type) {
1631
+ case TC_SETUP_CLSMATCHALL:
1632
+ return mlxsw_sp_qevent_block_mall_cb(qevent_block, type_data);
1633
+ default:
1634
+ return -EOPNOTSUPP;
1635
+ }
1636
+}
1637
+
1638
+static struct mlxsw_sp_qevent_block *mlxsw_sp_qevent_block_create(struct mlxsw_sp *mlxsw_sp,
1639
+ struct net *net)
1640
+{
1641
+ struct mlxsw_sp_qevent_block *qevent_block;
1642
+
1643
+ qevent_block = kzalloc(sizeof(*qevent_block), GFP_KERNEL);
1644
+ if (!qevent_block)
1645
+ return NULL;
1646
+
1647
+ INIT_LIST_HEAD(&qevent_block->binding_list);
1648
+ INIT_LIST_HEAD(&qevent_block->mall_entry_list);
1649
+ qevent_block->mlxsw_sp = mlxsw_sp;
1650
+ return qevent_block;
1651
+}
1652
+
1653
+static void
1654
+mlxsw_sp_qevent_block_destroy(struct mlxsw_sp_qevent_block *qevent_block)
1655
+{
1656
+ WARN_ON(!list_empty(&qevent_block->binding_list));
1657
+ WARN_ON(!list_empty(&qevent_block->mall_entry_list));
1658
+ kfree(qevent_block);
1659
+}
1660
+
1661
+static void mlxsw_sp_qevent_block_release(void *cb_priv)
1662
+{
1663
+ struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
1664
+
1665
+ mlxsw_sp_qevent_block_destroy(qevent_block);
1666
+}
1667
+
1668
+static struct mlxsw_sp_qevent_binding *
1669
+mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num,
1670
+ enum mlxsw_sp_span_trigger span_trigger)
1671
+{
1672
+ struct mlxsw_sp_qevent_binding *binding;
1673
+
1674
+ binding = kzalloc(sizeof(*binding), GFP_KERNEL);
1675
+ if (!binding)
1676
+ return ERR_PTR(-ENOMEM);
1677
+
1678
+ binding->mlxsw_sp_port = mlxsw_sp_port;
1679
+ binding->handle = handle;
1680
+ binding->tclass_num = tclass_num;
1681
+ binding->span_trigger = span_trigger;
1682
+ return binding;
1683
+}
1684
+
1685
+static void
1686
+mlxsw_sp_qevent_binding_destroy(struct mlxsw_sp_qevent_binding *binding)
1687
+{
1688
+ kfree(binding);
1689
+}
1690
+
1691
+static struct mlxsw_sp_qevent_binding *
1692
+mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block,
1693
+ struct mlxsw_sp_port *mlxsw_sp_port,
1694
+ u32 handle,
1695
+ enum mlxsw_sp_span_trigger span_trigger)
1696
+{
1697
+ struct mlxsw_sp_qevent_binding *qevent_binding;
1698
+
1699
+ list_for_each_entry(qevent_binding, &block->binding_list, list)
1700
+ if (qevent_binding->mlxsw_sp_port == mlxsw_sp_port &&
1701
+ qevent_binding->handle == handle &&
1702
+ qevent_binding->span_trigger == span_trigger)
1703
+ return qevent_binding;
1704
+ return NULL;
1705
+}
1706
+
1707
+static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
1708
+ struct flow_block_offload *f,
1709
+ enum mlxsw_sp_span_trigger span_trigger)
1710
+{
1711
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1712
+ struct mlxsw_sp_qevent_binding *qevent_binding;
1713
+ struct mlxsw_sp_qevent_block *qevent_block;
1714
+ struct flow_block_cb *block_cb;
1715
+ struct mlxsw_sp_qdisc *qdisc;
1716
+ bool register_block = false;
1717
+ int err;
1718
+
1719
+ block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
1720
+ if (!block_cb) {
1721
+ qevent_block = mlxsw_sp_qevent_block_create(mlxsw_sp, f->net);
1722
+ if (!qevent_block)
1723
+ return -ENOMEM;
1724
+ block_cb = flow_block_cb_alloc(mlxsw_sp_qevent_block_cb, mlxsw_sp, qevent_block,
1725
+ mlxsw_sp_qevent_block_release);
1726
+ if (IS_ERR(block_cb)) {
1727
+ mlxsw_sp_qevent_block_destroy(qevent_block);
1728
+ return PTR_ERR(block_cb);
1729
+ }
1730
+ register_block = true;
1731
+ } else {
1732
+ qevent_block = flow_block_cb_priv(block_cb);
1733
+ }
1734
+ flow_block_cb_incref(block_cb);
1735
+
1736
+ qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port, f->sch->handle);
1737
+ if (!qdisc) {
1738
+ NL_SET_ERR_MSG(f->extack, "Qdisc not offloaded");
1739
+ err = -ENOENT;
1740
+ goto err_find_qdisc;
1741
+ }
1742
+
1743
+ if (WARN_ON(mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
1744
+ span_trigger))) {
1745
+ err = -EEXIST;
1746
+ goto err_binding_exists;
1747
+ }
1748
+
1749
+ qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port, f->sch->handle,
1750
+ qdisc->tclass_num, span_trigger);
1751
+ if (IS_ERR(qevent_binding)) {
1752
+ err = PTR_ERR(qevent_binding);
1753
+ goto err_binding_create;
1754
+ }
1755
+
1756
+ err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
1757
+ if (err)
1758
+ goto err_binding_configure;
1759
+
1760
+ list_add(&qevent_binding->list, &qevent_block->binding_list);
1761
+
1762
+ if (register_block) {
1763
+ flow_block_cb_add(block_cb, f);
1764
+ list_add_tail(&block_cb->driver_list, &mlxsw_sp_qevent_block_cb_list);
1765
+ }
1766
+
1767
+ return 0;
1768
+
1769
+err_binding_configure:
1770
+ mlxsw_sp_qevent_binding_destroy(qevent_binding);
1771
+err_binding_create:
1772
+err_binding_exists:
1773
+err_find_qdisc:
1774
+ if (!flow_block_cb_decref(block_cb))
1775
+ flow_block_cb_free(block_cb);
1776
+ return err;
1777
+}
1778
+
1779
+static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
1780
+ struct flow_block_offload *f,
1781
+ enum mlxsw_sp_span_trigger span_trigger)
1782
+{
1783
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1784
+ struct mlxsw_sp_qevent_binding *qevent_binding;
1785
+ struct mlxsw_sp_qevent_block *qevent_block;
1786
+ struct flow_block_cb *block_cb;
1787
+
1788
+ block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
1789
+ if (!block_cb)
1790
+ return;
1791
+ qevent_block = flow_block_cb_priv(block_cb);
1792
+
1793
+ qevent_binding = mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
1794
+ span_trigger);
1795
+ if (!qevent_binding)
1796
+ return;
1797
+
1798
+ list_del(&qevent_binding->list);
1799
+ mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1800
+ mlxsw_sp_qevent_binding_destroy(qevent_binding);
1801
+
1802
+ if (!flow_block_cb_decref(block_cb)) {
1803
+ flow_block_cb_remove(block_cb, f);
1804
+ list_del(&block_cb->driver_list);
1805
+ }
1806
+}
1807
+
1808
+static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
1809
+ struct flow_block_offload *f,
1810
+ enum mlxsw_sp_span_trigger span_trigger)
1811
+{
1812
+ f->driver_block_list = &mlxsw_sp_qevent_block_cb_list;
1813
+
1814
+ switch (f->command) {
1815
+ case FLOW_BLOCK_BIND:
1816
+ return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f, span_trigger);
1817
+ case FLOW_BLOCK_UNBIND:
1818
+ mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger);
1819
+ return 0;
1820
+ default:
1821
+ return -EOPNOTSUPP;
1822
+ }
1823
+}
1824
+
1825
+int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
1826
+ struct flow_block_offload *f)
1827
+{
1828
+ return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f, MLXSW_SP_SPAN_TRIGGER_EARLY_DROP);
1829
+}
1830
+
1831
+int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
1832
+{
1833
+ struct mlxsw_sp_qdisc_state *qdisc_state;
1834
+ int i;
1835
+
1836
+ qdisc_state = kzalloc(sizeof(*qdisc_state), GFP_KERNEL);
1837
+ if (!qdisc_state)
1838
+ return -ENOMEM;
1839
+
1840
+ qdisc_state->root_qdisc.prio_bitmap = 0xff;
1841
+ qdisc_state->root_qdisc.tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
1842
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1843
+ qdisc_state->tclass_qdiscs[i].tclass_num = i;
1844
+
1845
+ mlxsw_sp_port->qdisc = qdisc_state;
1846
+ return 0;
7511847 }
7521848
7531849 void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
7541850 {
755
- kfree(mlxsw_sp_port->tclass_qdiscs);
756
- kfree(mlxsw_sp_port->root_qdisc);
1851
+ kfree(mlxsw_sp_port->qdisc);
7571852 }