forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
....@@ -8,6 +8,8 @@
88 #include <linux/list.h>
99 #include <linux/rhashtable.h>
1010 #include <linux/netdevice.h>
11
+#include <linux/mutex.h>
12
+#include <trace/events/mlxsw.h>
1113
1214 #include "reg.h"
1315 #include "core.h"
....@@ -23,6 +25,10 @@
2325 return ops->priv_size;
2426 }
2527
28
+#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */
29
+#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
30
+#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
31
+
2632 int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
2733 struct mlxsw_sp_acl_tcam *tcam)
2834 {
....@@ -32,6 +38,11 @@
3238 u64 max_groups;
3339 size_t alloc_size;
3440 int err;
41
+
42
+ mutex_init(&tcam->lock);
43
+ tcam->vregion_rehash_intrvl =
44
+ MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
45
+ INIT_LIST_HEAD(&tcam->vregion_list);
3546
3647 max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
3748 ACL_MAX_TCAM_REGIONS);
....@@ -76,6 +87,7 @@
7687 {
7788 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
7889
90
+ mutex_destroy(&tcam->lock);
7991 ops->fini(mlxsw_sp, tcam->priv);
8092 kfree(tcam->used_groups);
8193 kfree(tcam->used_regions);
....@@ -153,37 +165,101 @@
153165 struct mlxsw_sp_acl_tcam_group {
154166 struct mlxsw_sp_acl_tcam *tcam;
155167 u16 id;
168
+ struct mutex lock; /* guards region list updates */
156169 struct list_head region_list;
157170 unsigned int region_count;
158
- struct rhashtable chunk_ht;
159
- struct mlxsw_sp_acl_tcam_group_ops *ops;
171
+};
172
+
173
+struct mlxsw_sp_acl_tcam_vgroup {
174
+ struct mlxsw_sp_acl_tcam_group group;
175
+ struct list_head vregion_list;
176
+ struct rhashtable vchunk_ht;
160177 const struct mlxsw_sp_acl_tcam_pattern *patterns;
161178 unsigned int patterns_count;
162179 bool tmplt_elusage_set;
163180 struct mlxsw_afk_element_usage tmplt_elusage;
181
+ bool vregion_rehash_enabled;
182
+ unsigned int *p_min_prio;
183
+ unsigned int *p_max_prio;
164184 };
165185
166
-struct mlxsw_sp_acl_tcam_chunk {
167
- struct list_head list; /* Member of a TCAM region */
168
- struct rhash_head ht_node; /* Member of a chunk HT */
169
- unsigned int priority; /* Priority within the region and group */
170
- struct mlxsw_sp_acl_tcam_group *group;
186
+struct mlxsw_sp_acl_tcam_rehash_ctx {
187
+ void *hints_priv;
188
+ bool this_is_rollback;
189
+ struct mlxsw_sp_acl_tcam_vchunk *current_vchunk; /* vchunk being
190
+ * currently migrated.
191
+ */
192
+ struct mlxsw_sp_acl_tcam_ventry *start_ventry; /* ventry to start
193
+ * migration from in
194
+ * a vchunk being
195
+ * currently migrated.
196
+ */
197
+ struct mlxsw_sp_acl_tcam_ventry *stop_ventry; /* ventry to stop
198
+ * migration at
199
+ * a vchunk being
200
+ * currently migrated.
201
+ */
202
+};
203
+
204
+struct mlxsw_sp_acl_tcam_vregion {
205
+ struct mutex lock; /* Protects consistency of region, region2 pointers
206
+ * and vchunk_list.
207
+ */
171208 struct mlxsw_sp_acl_tcam_region *region;
209
+ struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */
210
+ struct list_head list; /* Member of a TCAM group */
211
+ struct list_head tlist; /* Member of a TCAM */
212
+ struct list_head vchunk_list; /* List of vchunks under this vregion */
213
+ struct mlxsw_afk_key_info *key_info;
214
+ struct mlxsw_sp_acl_tcam *tcam;
215
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup;
216
+ struct {
217
+ struct delayed_work dw;
218
+ struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
219
+ } rehash;
220
+ struct mlxsw_sp *mlxsw_sp;
172221 unsigned int ref_count;
173
- unsigned long priv[0];
222
+};
223
+
224
+struct mlxsw_sp_acl_tcam_vchunk;
225
+
226
+struct mlxsw_sp_acl_tcam_chunk {
227
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
228
+ struct mlxsw_sp_acl_tcam_region *region;
229
+ unsigned long priv[];
174230 /* priv has to be always the last item */
231
+};
232
+
233
+struct mlxsw_sp_acl_tcam_vchunk {
234
+ struct mlxsw_sp_acl_tcam_chunk *chunk;
235
+ struct mlxsw_sp_acl_tcam_chunk *chunk2; /* Used during migration */
236
+ struct list_head list; /* Member of a TCAM vregion */
237
+ struct rhash_head ht_node; /* Member of a chunk HT */
238
+ struct list_head ventry_list;
239
+ unsigned int priority; /* Priority within the vregion and group */
240
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup;
241
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
242
+ unsigned int ref_count;
175243 };
176244
177245 struct mlxsw_sp_acl_tcam_entry {
246
+ struct mlxsw_sp_acl_tcam_ventry *ventry;
178247 struct mlxsw_sp_acl_tcam_chunk *chunk;
179
- unsigned long priv[0];
248
+ unsigned long priv[];
180249 /* priv has to be always the last item */
181250 };
182251
183
-static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
252
+struct mlxsw_sp_acl_tcam_ventry {
253
+ struct mlxsw_sp_acl_tcam_entry *entry;
254
+ struct list_head list; /* Member of a TCAM vchunk */
255
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
256
+ struct mlxsw_sp_acl_rule_info *rulei;
257
+};
258
+
259
+static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = {
184260 .key_len = sizeof(unsigned int),
185
- .key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority),
186
- .head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node),
261
+ .key_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, priority),
262
+ .head_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, ht_node),
187263 .automatic_shrinking = true,
188264 };
189265
....@@ -195,55 +271,95 @@
195271 int acl_index = 0;
196272
197273 mlxsw_reg_pagt_pack(pagt_pl, group->id);
198
- list_for_each_entry(region, &group->region_list, list)
199
- mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id);
274
+ list_for_each_entry(region, &group->region_list, list) {
275
+ bool multi = false;
276
+
277
+ /* Check if the next entry in the list has the same vregion. */
278
+ if (region->list.next != &group->region_list &&
279
+ list_next_entry(region, list)->vregion == region->vregion)
280
+ multi = true;
281
+ mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++,
282
+ region->id, multi);
283
+ }
200284 mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
201285 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
202286 }
203287
204288 static int
205
-mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
206
- struct mlxsw_sp_acl_tcam *tcam,
207
- struct mlxsw_sp_acl_tcam_group *group,
208
- const struct mlxsw_sp_acl_tcam_pattern *patterns,
209
- unsigned int patterns_count,
210
- struct mlxsw_afk_element_usage *tmplt_elusage)
289
+mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam,
290
+ struct mlxsw_sp_acl_tcam_group *group)
211291 {
212292 int err;
213293
214294 group->tcam = tcam;
215
- group->patterns = patterns;
216
- group->patterns_count = patterns_count;
217
- if (tmplt_elusage) {
218
- group->tmplt_elusage_set = true;
219
- memcpy(&group->tmplt_elusage, tmplt_elusage,
220
- sizeof(group->tmplt_elusage));
221
- }
222295 INIT_LIST_HEAD(&group->region_list);
296
+
223297 err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
224298 if (err)
225299 return err;
226300
227
- err = rhashtable_init(&group->chunk_ht,
228
- &mlxsw_sp_acl_tcam_chunk_ht_params);
301
+ mutex_init(&group->lock);
302
+
303
+ return 0;
304
+}
305
+
306
+static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group *group)
307
+{
308
+ struct mlxsw_sp_acl_tcam *tcam = group->tcam;
309
+
310
+ mutex_destroy(&group->lock);
311
+ mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
312
+ WARN_ON(!list_empty(&group->region_list));
313
+}
314
+
315
+static int
316
+mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
317
+ struct mlxsw_sp_acl_tcam *tcam,
318
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
319
+ const struct mlxsw_sp_acl_tcam_pattern *patterns,
320
+ unsigned int patterns_count,
321
+ struct mlxsw_afk_element_usage *tmplt_elusage,
322
+ bool vregion_rehash_enabled,
323
+ unsigned int *p_min_prio,
324
+ unsigned int *p_max_prio)
325
+{
326
+ int err;
327
+
328
+ vgroup->patterns = patterns;
329
+ vgroup->patterns_count = patterns_count;
330
+ vgroup->vregion_rehash_enabled = vregion_rehash_enabled;
331
+ vgroup->p_min_prio = p_min_prio;
332
+ vgroup->p_max_prio = p_max_prio;
333
+
334
+ if (tmplt_elusage) {
335
+ vgroup->tmplt_elusage_set = true;
336
+ memcpy(&vgroup->tmplt_elusage, tmplt_elusage,
337
+ sizeof(vgroup->tmplt_elusage));
338
+ }
339
+ INIT_LIST_HEAD(&vgroup->vregion_list);
340
+
341
+ err = mlxsw_sp_acl_tcam_group_add(tcam, &vgroup->group);
342
+ if (err)
343
+ return err;
344
+
345
+ err = rhashtable_init(&vgroup->vchunk_ht,
346
+ &mlxsw_sp_acl_tcam_vchunk_ht_params);
229347 if (err)
230348 goto err_rhashtable_init;
231349
232350 return 0;
233351
234352 err_rhashtable_init:
235
- mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
353
+ mlxsw_sp_acl_tcam_group_del(&vgroup->group);
236354 return err;
237355 }
238356
239
-static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
240
- struct mlxsw_sp_acl_tcam_group *group)
357
+static void
358
+mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
241359 {
242
- struct mlxsw_sp_acl_tcam *tcam = group->tcam;
243
-
244
- rhashtable_destroy(&group->chunk_ht);
245
- mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
246
- WARN_ON(!list_empty(&group->region_list));
360
+ rhashtable_destroy(&vgroup->vchunk_ht);
361
+ mlxsw_sp_acl_tcam_group_del(&vgroup->group);
362
+ WARN_ON(!list_empty(&vgroup->vregion_list));
247363 }
248364
249365 static int
....@@ -283,76 +399,91 @@
283399 }
284400
285401 static unsigned int
286
-mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region)
402
+mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
287403 {
288
- struct mlxsw_sp_acl_tcam_chunk *chunk;
404
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
289405
290
- if (list_empty(&region->chunk_list))
406
+ if (list_empty(&vregion->vchunk_list))
291407 return 0;
292
- /* As a priority of a region, return priority of the first chunk */
293
- chunk = list_first_entry(&region->chunk_list, typeof(*chunk), list);
294
- return chunk->priority;
408
+ /* As a priority of a vregion, return priority of the first vchunk */
409
+ vchunk = list_first_entry(&vregion->vchunk_list,
410
+ typeof(*vchunk), list);
411
+ return vchunk->priority;
295412 }
296413
297414 static unsigned int
298
-mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region)
415
+mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
299416 {
300
- struct mlxsw_sp_acl_tcam_chunk *chunk;
417
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
301418
302
- if (list_empty(&region->chunk_list))
419
+ if (list_empty(&vregion->vchunk_list))
303420 return 0;
304
- chunk = list_last_entry(&region->chunk_list, typeof(*chunk), list);
305
- return chunk->priority;
421
+ vchunk = list_last_entry(&vregion->vchunk_list,
422
+ typeof(*vchunk), list);
423
+ return vchunk->priority;
306424 }
307425
308426 static void
309
-mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group,
310
- struct mlxsw_sp_acl_tcam_region *region)
427
+mlxsw_sp_acl_tcam_vgroup_prio_update(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
311428 {
312
- struct mlxsw_sp_acl_tcam_region *region2;
313
- struct list_head *pos;
429
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
314430
315
- /* Position the region inside the list according to priority */
316
- list_for_each(pos, &group->region_list) {
317
- region2 = list_entry(pos, typeof(*region2), list);
318
- if (mlxsw_sp_acl_tcam_region_prio(region2) >
319
- mlxsw_sp_acl_tcam_region_prio(region))
320
- break;
321
- }
322
- list_add_tail(&region->list, pos);
323
- group->region_count++;
324
-}
325
-
326
-static void
327
-mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group,
328
- struct mlxsw_sp_acl_tcam_region *region)
329
-{
330
- group->region_count--;
331
- list_del(&region->list);
431
+ if (list_empty(&vgroup->vregion_list))
432
+ return;
433
+ vregion = list_first_entry(&vgroup->vregion_list,
434
+ typeof(*vregion), list);
435
+ *vgroup->p_min_prio = mlxsw_sp_acl_tcam_vregion_prio(vregion);
436
+ vregion = list_last_entry(&vgroup->vregion_list,
437
+ typeof(*vregion), list);
438
+ *vgroup->p_max_prio = mlxsw_sp_acl_tcam_vregion_max_prio(vregion);
332439 }
333440
334441 static int
335442 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
336443 struct mlxsw_sp_acl_tcam_group *group,
337
- struct mlxsw_sp_acl_tcam_region *region)
444
+ struct mlxsw_sp_acl_tcam_region *region,
445
+ unsigned int priority,
446
+ struct mlxsw_sp_acl_tcam_region *next_region)
338447 {
448
+ struct mlxsw_sp_acl_tcam_region *region2;
449
+ struct list_head *pos;
339450 int err;
340451
341
- if (group->region_count == group->tcam->max_group_size)
342
- return -ENOBUFS;
452
+ mutex_lock(&group->lock);
453
+ if (group->region_count == group->tcam->max_group_size) {
454
+ err = -ENOBUFS;
455
+ goto err_region_count_check;
456
+ }
343457
344
- mlxsw_sp_acl_tcam_group_list_add(group, region);
458
+ if (next_region) {
459
+ /* If the next region is defined, place the new one
460
+ * before it. The next one is a sibling.
461
+ */
462
+ pos = &next_region->list;
463
+ } else {
464
+ /* Position the region inside the list according to priority */
465
+ list_for_each(pos, &group->region_list) {
466
+ region2 = list_entry(pos, typeof(*region2), list);
467
+ if (mlxsw_sp_acl_tcam_vregion_prio(region2->vregion) >
468
+ priority)
469
+ break;
470
+ }
471
+ }
472
+ list_add_tail(&region->list, pos);
473
+ region->group = group;
345474
346475 err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
347476 if (err)
348477 goto err_group_update;
349
- region->group = group;
350478
479
+ group->region_count++;
480
+ mutex_unlock(&group->lock);
351481 return 0;
352482
353483 err_group_update:
354
- mlxsw_sp_acl_tcam_group_list_del(group, region);
355
- mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
484
+ list_del(&region->list);
485
+err_region_count_check:
486
+ mutex_unlock(&group->lock);
356487 return err;
357488 }
358489
....@@ -362,67 +493,115 @@
362493 {
363494 struct mlxsw_sp_acl_tcam_group *group = region->group;
364495
365
- mlxsw_sp_acl_tcam_group_list_del(group, region);
496
+ mutex_lock(&group->lock);
497
+ list_del(&region->list);
498
+ group->region_count--;
366499 mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
500
+ mutex_unlock(&group->lock);
367501 }
368502
369
-static struct mlxsw_sp_acl_tcam_region *
370
-mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group,
371
- unsigned int priority,
372
- struct mlxsw_afk_element_usage *elusage,
373
- bool *p_need_split)
503
+static int
504
+mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp *mlxsw_sp,
505
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
506
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
507
+ unsigned int priority)
374508 {
375
- struct mlxsw_sp_acl_tcam_region *region, *region2;
509
+ struct mlxsw_sp_acl_tcam_vregion *vregion2;
376510 struct list_head *pos;
377
- bool issubset;
511
+ int err;
378512
379
- list_for_each(pos, &group->region_list) {
380
- region = list_entry(pos, typeof(*region), list);
381
-
382
- /* First, check if the requested priority does not rather belong
383
- * under some of the next regions.
384
- */
385
- if (pos->next != &group->region_list) { /* not last */
386
- region2 = list_entry(pos->next, typeof(*region2), list);
387
- if (priority >= mlxsw_sp_acl_tcam_region_prio(region2))
388
- continue;
389
- }
390
-
391
- issubset = mlxsw_afk_key_info_subset(region->key_info, elusage);
392
-
393
- /* If requested element usage would not fit and the priority
394
- * is lower than the currently inspected region we cannot
395
- * use this region, so return NULL to indicate new region has
396
- * to be created.
397
- */
398
- if (!issubset &&
399
- priority < mlxsw_sp_acl_tcam_region_prio(region))
400
- return NULL;
401
-
402
- /* If requested element usage would not fit and the priority
403
- * is higher than the currently inspected region we cannot
404
- * use this region. There is still some hope that the next
405
- * region would be the fit. So let it be processed and
406
- * eventually break at the check right above this.
407
- */
408
- if (!issubset &&
409
- priority > mlxsw_sp_acl_tcam_region_max_prio(region))
410
- continue;
411
-
412
- /* Indicate if the region needs to be split in order to add
413
- * the requested priority. Split is needed when requested
414
- * element usage won't fit into the found region.
415
- */
416
- *p_need_split = !issubset;
417
- return region;
513
+ /* Position the vregion inside the list according to priority */
514
+ list_for_each(pos, &vgroup->vregion_list) {
515
+ vregion2 = list_entry(pos, typeof(*vregion2), list);
516
+ if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) > priority)
517
+ break;
418518 }
419
- return NULL; /* New region has to be created. */
519
+ list_add_tail(&vregion->list, pos);
520
+
521
+ err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, &vgroup->group,
522
+ vregion->region,
523
+ priority, NULL);
524
+ if (err)
525
+ goto err_region_attach;
526
+
527
+ return 0;
528
+
529
+err_region_attach:
530
+ list_del(&vregion->list);
531
+ return err;
420532 }
421533
422534 static void
423
-mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
424
- struct mlxsw_afk_element_usage *elusage,
425
- struct mlxsw_afk_element_usage *out)
535
+mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp *mlxsw_sp,
536
+ struct mlxsw_sp_acl_tcam_vregion *vregion)
537
+{
538
+ list_del(&vregion->list);
539
+ if (vregion->region2)
540
+ mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp,
541
+ vregion->region2);
542
+ mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, vregion->region);
543
+}
544
+
545
+static struct mlxsw_sp_acl_tcam_vregion *
546
+mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
547
+ unsigned int priority,
548
+ struct mlxsw_afk_element_usage *elusage,
549
+ bool *p_need_split)
550
+{
551
+ struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2;
552
+ struct list_head *pos;
553
+ bool issubset;
554
+
555
+ list_for_each(pos, &vgroup->vregion_list) {
556
+ vregion = list_entry(pos, typeof(*vregion), list);
557
+
558
+ /* First, check if the requested priority does not rather belong
559
+ * under some of the next vregions.
560
+ */
561
+ if (pos->next != &vgroup->vregion_list) { /* not last */
562
+ vregion2 = list_entry(pos->next, typeof(*vregion2),
563
+ list);
564
+ if (priority >=
565
+ mlxsw_sp_acl_tcam_vregion_prio(vregion2))
566
+ continue;
567
+ }
568
+
569
+ issubset = mlxsw_afk_key_info_subset(vregion->key_info,
570
+ elusage);
571
+
572
+ /* If requested element usage would not fit and the priority
573
+ * is lower than the currently inspected vregion we cannot
574
+ * use this region, so return NULL to indicate new vregion has
575
+ * to be created.
576
+ */
577
+ if (!issubset &&
578
+ priority < mlxsw_sp_acl_tcam_vregion_prio(vregion))
579
+ return NULL;
580
+
581
+ /* If requested element usage would not fit and the priority
582
+ * is higher than the currently inspected vregion we cannot
583
+ * use this vregion. There is still some hope that the next
584
+ * vregion would be the fit. So let it be processed and
585
+ * eventually break at the check right above this.
586
+ */
587
+ if (!issubset &&
588
+ priority > mlxsw_sp_acl_tcam_vregion_max_prio(vregion))
589
+ continue;
590
+
591
+ /* Indicate if the vregion needs to be split in order to add
592
+ * the requested priority. Split is needed when requested
593
+ * element usage won't fit into the found vregion.
594
+ */
595
+ *p_need_split = !issubset;
596
+ return vregion;
597
+ }
598
+ return NULL; /* New vregion has to be created. */
599
+}
600
+
601
+static void
602
+mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
603
+ struct mlxsw_afk_element_usage *elusage,
604
+ struct mlxsw_afk_element_usage *out)
426605 {
427606 const struct mlxsw_sp_acl_tcam_pattern *pattern;
428607 int i;
....@@ -430,14 +609,14 @@
430609 /* In case the template is set, we don't have to look up the pattern
431610 * and just use the template.
432611 */
433
- if (group->tmplt_elusage_set) {
434
- memcpy(out, &group->tmplt_elusage, sizeof(*out));
612
+ if (vgroup->tmplt_elusage_set) {
613
+ memcpy(out, &vgroup->tmplt_elusage, sizeof(*out));
435614 WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
436615 return;
437616 }
438617
439
- for (i = 0; i < group->patterns_count; i++) {
440
- pattern = &group->patterns[i];
618
+ for (i = 0; i < vgroup->patterns_count; i++) {
619
+ pattern = &vgroup->patterns[i];
441620 mlxsw_afk_element_usage_fill(out, pattern->elements,
442621 pattern->elements_count);
443622 if (mlxsw_afk_element_usage_subset(elusage, out))
....@@ -511,24 +690,19 @@
511690 static struct mlxsw_sp_acl_tcam_region *
512691 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
513692 struct mlxsw_sp_acl_tcam *tcam,
514
- struct mlxsw_afk_element_usage *elusage)
693
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
694
+ void *hints_priv)
515695 {
516696 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
517
- struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
518697 struct mlxsw_sp_acl_tcam_region *region;
519698 int err;
520699
521700 region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
522701 if (!region)
523702 return ERR_PTR(-ENOMEM);
524
- INIT_LIST_HEAD(&region->chunk_list);
525703 region->mlxsw_sp = mlxsw_sp;
526
-
527
- region->key_info = mlxsw_afk_key_info_get(afk, elusage);
528
- if (IS_ERR(region->key_info)) {
529
- err = PTR_ERR(region->key_info);
530
- goto err_key_info_get;
531
- }
704
+ region->vregion = vregion;
705
+ region->key_info = vregion->key_info;
532706
533707 err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
534708 if (err)
....@@ -547,7 +721,8 @@
547721 if (err)
548722 goto err_tcam_region_enable;
549723
550
- err = ops->region_init(mlxsw_sp, region->priv, tcam->priv, region);
724
+ err = ops->region_init(mlxsw_sp, region->priv, tcam->priv,
725
+ region, hints_priv);
551726 if (err)
552727 goto err_tcam_region_init;
553728
....@@ -561,8 +736,6 @@
561736 err_tcam_region_associate:
562737 mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
563738 err_region_id_get:
564
- mlxsw_afk_key_info_put(region->key_info);
565
-err_key_info_get:
566739 kfree(region);
567740 return ERR_PTR(err);
568741 }
....@@ -576,116 +749,247 @@
576749 ops->region_fini(mlxsw_sp, region->priv);
577750 mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
578751 mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
579
- mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
580
- mlxsw_afk_key_info_put(region->key_info);
752
+ mlxsw_sp_acl_tcam_region_id_put(region->group->tcam,
753
+ region->id);
581754 kfree(region);
582755 }
583756
584
-static int
585
-mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp,
586
- struct mlxsw_sp_acl_tcam_group *group,
587
- unsigned int priority,
588
- struct mlxsw_afk_element_usage *elusage,
589
- struct mlxsw_sp_acl_tcam_chunk *chunk)
757
+static void
758
+mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion *vregion)
590759 {
591
- struct mlxsw_sp_acl_tcam_region *region;
592
- bool region_created = false;
593
- bool need_split;
594
- int err;
760
+ unsigned long interval = vregion->tcam->vregion_rehash_intrvl;
595761
596
- region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage,
597
- &need_split);
598
- if (region && need_split) {
599
- /* According to priority, the chunk should belong to an
600
- * existing region. However, this chunk needs elements
601
- * that region does not contain. We need to split the existing
602
- * region into two and create a new region for this chunk
603
- * in between. This is not supported now.
604
- */
605
- return -EOPNOTSUPP;
606
- }
607
- if (!region) {
608
- struct mlxsw_afk_element_usage region_elusage;
609
-
610
- mlxsw_sp_acl_tcam_group_use_patterns(group, elusage,
611
- &region_elusage);
612
- region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam,
613
- &region_elusage);
614
- if (IS_ERR(region))
615
- return PTR_ERR(region);
616
- region_created = true;
617
- }
618
-
619
- chunk->region = region;
620
- list_add_tail(&chunk->list, &region->chunk_list);
621
-
622
- if (!region_created)
623
- return 0;
624
-
625
- err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region);
626
- if (err)
627
- goto err_group_region_attach;
628
-
629
- return 0;
630
-
631
-err_group_region_attach:
632
- mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
633
- return err;
762
+ if (!interval)
763
+ return;
764
+ mlxsw_core_schedule_dw(&vregion->rehash.dw,
765
+ msecs_to_jiffies(interval));
634766 }
635767
636768 static void
637
-mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp,
638
- struct mlxsw_sp_acl_tcam_chunk *chunk)
639
-{
640
- struct mlxsw_sp_acl_tcam_region *region = chunk->region;
769
+mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
770
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
771
+ int *credits);
641772
642
- list_del(&chunk->list);
643
- if (list_empty(&region->chunk_list)) {
644
- mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region);
645
- mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
773
+static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
774
+{
775
+ struct mlxsw_sp_acl_tcam_vregion *vregion =
776
+ container_of(work, struct mlxsw_sp_acl_tcam_vregion,
777
+ rehash.dw.work);
778
+ int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
779
+
780
+ mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
781
+ if (credits < 0)
782
+ /* Rehash gone out of credits so it was interrupted.
783
+ * Schedule the work as soon as possible to continue.
784
+ */
785
+ mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
786
+ else
787
+ mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
788
+}
789
+
790
+static void
791
+mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
792
+{
793
+ struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
794
+
795
+ /* If a rule was added or deleted from vchunk which is currently
796
+ * under rehash migration, we have to reset the ventry pointers
797
+ * to make sure all rules are properly migrated.
798
+ */
799
+ if (vregion->rehash.ctx.current_vchunk == vchunk) {
800
+ vregion->rehash.ctx.start_ventry = NULL;
801
+ vregion->rehash.ctx.stop_ventry = NULL;
646802 }
803
+}
804
+
805
+static void
806
+mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *vregion)
807
+{
808
+ /* If a chunk was added or deleted from vregion we have to reset
809
+ * the current chunk pointer to make sure all chunks
810
+ * are properly migrated.
811
+ */
812
+ vregion->rehash.ctx.current_vchunk = NULL;
813
+}
814
+
815
+static struct mlxsw_sp_acl_tcam_vregion *
816
+mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
817
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
818
+ unsigned int priority,
819
+ struct mlxsw_afk_element_usage *elusage)
820
+{
821
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
822
+ struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
823
+ struct mlxsw_sp_acl_tcam *tcam = vgroup->group.tcam;
824
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
825
+ int err;
826
+
827
+ vregion = kzalloc(sizeof(*vregion), GFP_KERNEL);
828
+ if (!vregion)
829
+ return ERR_PTR(-ENOMEM);
830
+ INIT_LIST_HEAD(&vregion->vchunk_list);
831
+ mutex_init(&vregion->lock);
832
+ vregion->tcam = tcam;
833
+ vregion->mlxsw_sp = mlxsw_sp;
834
+ vregion->vgroup = vgroup;
835
+ vregion->ref_count = 1;
836
+
837
+ vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
838
+ if (IS_ERR(vregion->key_info)) {
839
+ err = PTR_ERR(vregion->key_info);
840
+ goto err_key_info_get;
841
+ }
842
+
843
+ vregion->region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, tcam,
844
+ vregion, NULL);
845
+ if (IS_ERR(vregion->region)) {
846
+ err = PTR_ERR(vregion->region);
847
+ goto err_region_create;
848
+ }
849
+
850
+ err = mlxsw_sp_acl_tcam_vgroup_vregion_attach(mlxsw_sp, vgroup, vregion,
851
+ priority);
852
+ if (err)
853
+ goto err_vgroup_vregion_attach;
854
+
855
+ if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
856
+ /* Create the delayed work for vregion periodic rehash */
857
+ INIT_DELAYED_WORK(&vregion->rehash.dw,
858
+ mlxsw_sp_acl_tcam_vregion_rehash_work);
859
+ mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
860
+ mutex_lock(&tcam->lock);
861
+ list_add_tail(&vregion->tlist, &tcam->vregion_list);
862
+ mutex_unlock(&tcam->lock);
863
+ }
864
+
865
+ return vregion;
866
+
867
+err_vgroup_vregion_attach:
868
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
869
+err_region_create:
870
+ mlxsw_afk_key_info_put(vregion->key_info);
871
+err_key_info_get:
872
+ kfree(vregion);
873
+ return ERR_PTR(err);
874
+}
875
+
876
+static void
877
+mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
878
+ struct mlxsw_sp_acl_tcam_vregion *vregion)
879
+{
880
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
881
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup = vregion->vgroup;
882
+ struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
883
+
884
+ if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
885
+ mutex_lock(&tcam->lock);
886
+ list_del(&vregion->tlist);
887
+ mutex_unlock(&tcam->lock);
888
+ cancel_delayed_work_sync(&vregion->rehash.dw);
889
+ }
890
+ mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
891
+ if (vregion->region2)
892
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2);
893
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
894
+ mlxsw_afk_key_info_put(vregion->key_info);
895
+ mutex_destroy(&vregion->lock);
896
+ kfree(vregion);
897
+}
898
+
899
+u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
900
+ struct mlxsw_sp_acl_tcam *tcam)
901
+{
902
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
903
+ u32 vregion_rehash_intrvl;
904
+
905
+ if (WARN_ON(!ops->region_rehash_hints_get))
906
+ return 0;
907
+ vregion_rehash_intrvl = tcam->vregion_rehash_intrvl;
908
+ return vregion_rehash_intrvl;
909
+}
910
+
911
+int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
912
+ struct mlxsw_sp_acl_tcam *tcam,
913
+ u32 val)
914
+{
915
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
916
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
917
+
918
+ if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
919
+ return -EINVAL;
920
+ if (WARN_ON(!ops->region_rehash_hints_get))
921
+ return -EOPNOTSUPP;
922
+ tcam->vregion_rehash_intrvl = val;
923
+ mutex_lock(&tcam->lock);
924
+ list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
925
+ if (val)
926
+ mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
927
+ else
928
+ cancel_delayed_work_sync(&vregion->rehash.dw);
929
+ }
930
+ mutex_unlock(&tcam->lock);
931
+ return 0;
932
+}
933
+
934
+static struct mlxsw_sp_acl_tcam_vregion *
935
+mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
936
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
937
+ unsigned int priority,
938
+ struct mlxsw_afk_element_usage *elusage)
939
+{
940
+ struct mlxsw_afk_element_usage vregion_elusage;
941
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
942
+ bool need_split;
943
+
944
+ vregion = mlxsw_sp_acl_tcam_vgroup_vregion_find(vgroup, priority,
945
+ elusage, &need_split);
946
+ if (vregion) {
947
+ if (need_split) {
948
+ /* According to priority, new vchunk should belong to
949
+ * an existing vregion. However, this vchunk needs
950
+ * elements that vregion does not contain. We need
951
+ * to split the existing vregion into two and create
952
+ * a new vregion for the new vchunk in between.
953
+ * This is not supported now.
954
+ */
955
+ return ERR_PTR(-EOPNOTSUPP);
956
+ }
957
+ vregion->ref_count++;
958
+ return vregion;
959
+ }
960
+
961
+ mlxsw_sp_acl_tcam_vgroup_use_patterns(vgroup, elusage,
962
+ &vregion_elusage);
963
+
964
+ return mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp, vgroup, priority,
965
+ &vregion_elusage);
966
+}
967
+
968
+static void
969
+mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
970
+ struct mlxsw_sp_acl_tcam_vregion *vregion)
971
+{
972
+ if (--vregion->ref_count)
973
+ return;
974
+ mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
647975 }
648976
649977 static struct mlxsw_sp_acl_tcam_chunk *
650978 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
651
- struct mlxsw_sp_acl_tcam_group *group,
652
- unsigned int priority,
653
- struct mlxsw_afk_element_usage *elusage)
979
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk,
980
+ struct mlxsw_sp_acl_tcam_region *region)
654981 {
655982 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
656983 struct mlxsw_sp_acl_tcam_chunk *chunk;
657
- int err;
658
-
659
- if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
660
- return ERR_PTR(-EINVAL);
661984
662985 chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
663986 if (!chunk)
664987 return ERR_PTR(-ENOMEM);
665
- chunk->priority = priority;
666
- chunk->group = group;
667
- chunk->ref_count = 1;
988
+ chunk->vchunk = vchunk;
989
+ chunk->region = region;
668990
669
- err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority,
670
- elusage, chunk);
671
- if (err)
672
- goto err_chunk_assoc;
673
-
674
- ops->chunk_init(chunk->region->priv, chunk->priv, priority);
675
-
676
- err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
677
- mlxsw_sp_acl_tcam_chunk_ht_params);
678
- if (err)
679
- goto err_rhashtable_insert;
680
-
991
+ ops->chunk_init(region->priv, chunk->priv, vchunk->priority);
681992 return chunk;
682
-
683
-err_rhashtable_insert:
684
- ops->chunk_fini(chunk->priv);
685
- mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
686
-err_chunk_assoc:
687
- kfree(chunk);
688
- return ERR_PTR(err);
689993 }
690994
691995 static void
....@@ -693,90 +997,178 @@
693997 struct mlxsw_sp_acl_tcam_chunk *chunk)
694998 {
695999 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
696
- struct mlxsw_sp_acl_tcam_group *group = chunk->group;
6971000
698
- rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
699
- mlxsw_sp_acl_tcam_chunk_ht_params);
7001001 ops->chunk_fini(chunk->priv);
701
- mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
7021002 kfree(chunk);
7031003 }
7041004
705
-static struct mlxsw_sp_acl_tcam_chunk *
706
-mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp,
707
- struct mlxsw_sp_acl_tcam_group *group,
708
- unsigned int priority,
709
- struct mlxsw_afk_element_usage *elusage)
1005
+static struct mlxsw_sp_acl_tcam_vchunk *
1006
+mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
1007
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1008
+ unsigned int priority,
1009
+ struct mlxsw_afk_element_usage *elusage)
7101010 {
711
- struct mlxsw_sp_acl_tcam_chunk *chunk;
1011
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
1012
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
1013
+ struct list_head *pos;
1014
+ int err;
7121015
713
- chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority,
714
- mlxsw_sp_acl_tcam_chunk_ht_params);
715
- if (chunk) {
716
- if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info,
1016
+ if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
1017
+ return ERR_PTR(-EINVAL);
1018
+
1019
+ vchunk = kzalloc(sizeof(*vchunk), GFP_KERNEL);
1020
+ if (!vchunk)
1021
+ return ERR_PTR(-ENOMEM);
1022
+ INIT_LIST_HEAD(&vchunk->ventry_list);
1023
+ vchunk->priority = priority;
1024
+ vchunk->vgroup = vgroup;
1025
+ vchunk->ref_count = 1;
1026
+
1027
+ vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
1028
+ priority, elusage);
1029
+ if (IS_ERR(vregion)) {
1030
+ err = PTR_ERR(vregion);
1031
+ goto err_vregion_get;
1032
+ }
1033
+
1034
+ vchunk->vregion = vregion;
1035
+
1036
+ err = rhashtable_insert_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1037
+ mlxsw_sp_acl_tcam_vchunk_ht_params);
1038
+ if (err)
1039
+ goto err_rhashtable_insert;
1040
+
1041
+ mutex_lock(&vregion->lock);
1042
+ vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk,
1043
+ vchunk->vregion->region);
1044
+ if (IS_ERR(vchunk->chunk)) {
1045
+ mutex_unlock(&vregion->lock);
1046
+ err = PTR_ERR(vchunk->chunk);
1047
+ goto err_chunk_create;
1048
+ }
1049
+
1050
+ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
1051
+
1052
+ /* Position the vchunk inside the list according to priority */
1053
+ list_for_each(pos, &vregion->vchunk_list) {
1054
+ vchunk2 = list_entry(pos, typeof(*vchunk2), list);
1055
+ if (vchunk2->priority > priority)
1056
+ break;
1057
+ }
1058
+ list_add_tail(&vchunk->list, pos);
1059
+ mutex_unlock(&vregion->lock);
1060
+ mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
1061
+
1062
+ return vchunk;
1063
+
1064
+err_chunk_create:
1065
+ rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1066
+ mlxsw_sp_acl_tcam_vchunk_ht_params);
1067
+err_rhashtable_insert:
1068
+ mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vregion);
1069
+err_vregion_get:
1070
+ kfree(vchunk);
1071
+ return ERR_PTR(err);
1072
+}
1073
+
1074
+static void
1075
+mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
1076
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1077
+{
1078
+ struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1079
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup = vchunk->vgroup;
1080
+
1081
+ mutex_lock(&vregion->lock);
1082
+ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
1083
+ list_del(&vchunk->list);
1084
+ if (vchunk->chunk2)
1085
+ mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1086
+ mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
1087
+ mutex_unlock(&vregion->lock);
1088
+ rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1089
+ mlxsw_sp_acl_tcam_vchunk_ht_params);
1090
+ mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion);
1091
+ kfree(vchunk);
1092
+ mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
1093
+}
1094
+
1095
+static struct mlxsw_sp_acl_tcam_vchunk *
1096
+mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
1097
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1098
+ unsigned int priority,
1099
+ struct mlxsw_afk_element_usage *elusage)
1100
+{
1101
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1102
+
1103
+ vchunk = rhashtable_lookup_fast(&vgroup->vchunk_ht, &priority,
1104
+ mlxsw_sp_acl_tcam_vchunk_ht_params);
1105
+ if (vchunk) {
1106
+ if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
7171107 elusage)))
7181108 return ERR_PTR(-EINVAL);
719
- chunk->ref_count++;
720
- return chunk;
1109
+ vchunk->ref_count++;
1110
+ return vchunk;
7211111 }
722
- return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group,
723
- priority, elusage);
1112
+ return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
1113
+ priority, elusage);
7241114 }
7251115
726
-static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
727
- struct mlxsw_sp_acl_tcam_chunk *chunk)
1116
+static void
1117
+mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
1118
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk)
7281119 {
729
- if (--chunk->ref_count)
1120
+ if (--vchunk->ref_count)
7301121 return;
731
- mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
1122
+ mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
7321123 }
7331124
734
-static size_t mlxsw_sp_acl_tcam_entry_priv_size(struct mlxsw_sp *mlxsw_sp)
1125
+static struct mlxsw_sp_acl_tcam_entry *
1126
+mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp *mlxsw_sp,
1127
+ struct mlxsw_sp_acl_tcam_ventry *ventry,
1128
+ struct mlxsw_sp_acl_tcam_chunk *chunk)
1129
+{
1130
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1131
+ struct mlxsw_sp_acl_tcam_entry *entry;
1132
+ int err;
1133
+
1134
+ entry = kzalloc(sizeof(*entry) + ops->entry_priv_size, GFP_KERNEL);
1135
+ if (!entry)
1136
+ return ERR_PTR(-ENOMEM);
1137
+ entry->ventry = ventry;
1138
+ entry->chunk = chunk;
1139
+
1140
+ err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv,
1141
+ entry->priv, ventry->rulei);
1142
+ if (err)
1143
+ goto err_entry_add;
1144
+
1145
+ return entry;
1146
+
1147
+err_entry_add:
1148
+ kfree(entry);
1149
+ return ERR_PTR(err);
1150
+}
1151
+
1152
+static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1153
+ struct mlxsw_sp_acl_tcam_entry *entry)
7351154 {
7361155 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
7371156
738
- return ops->entry_priv_size;
1157
+ ops->entry_del(mlxsw_sp, entry->chunk->region->priv,
1158
+ entry->chunk->priv, entry->priv);
1159
+ kfree(entry);
7391160 }
7401161
741
-static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
742
- struct mlxsw_sp_acl_tcam_group *group,
1162
+static int
1163
+mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
1164
+ struct mlxsw_sp_acl_tcam_region *region,
7431165 struct mlxsw_sp_acl_tcam_entry *entry,
7441166 struct mlxsw_sp_acl_rule_info *rulei)
7451167 {
7461168 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
747
- struct mlxsw_sp_acl_tcam_chunk *chunk;
748
- struct mlxsw_sp_acl_tcam_region *region;
749
- int err;
7501169
751
- chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority,
752
- &rulei->values.elusage);
753
- if (IS_ERR(chunk))
754
- return PTR_ERR(chunk);
755
-
756
- region = chunk->region;
757
-
758
- err = ops->entry_add(mlxsw_sp, region->priv, chunk->priv,
759
- entry->priv, rulei);
760
- if (err)
761
- goto err_entry_add;
762
- entry->chunk = chunk;
763
-
764
- return 0;
765
-
766
-err_entry_add:
767
- mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
768
- return err;
769
-}
770
-
771
-static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
772
- struct mlxsw_sp_acl_tcam_entry *entry)
773
-{
774
- const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
775
- struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
776
- struct mlxsw_sp_acl_tcam_region *region = chunk->region;
777
-
778
- ops->entry_del(mlxsw_sp, region->priv, chunk->priv, entry->priv);
779
- mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
1170
+ return ops->entry_action_replace(mlxsw_sp, region->priv,
1171
+ entry->priv, rulei);
7801172 }
7811173
7821174 static int
....@@ -785,11 +1177,372 @@
7851177 bool *activity)
7861178 {
7871179 const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
788
- struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
789
- struct mlxsw_sp_acl_tcam_region *region = chunk->region;
7901180
791
- return ops->entry_activity_get(mlxsw_sp, region->priv,
1181
+ return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv,
7921182 entry->priv, activity);
1183
+}
1184
+
1185
+static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp,
1186
+ struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1187
+ struct mlxsw_sp_acl_tcam_ventry *ventry,
1188
+ struct mlxsw_sp_acl_rule_info *rulei)
1189
+{
1190
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
1191
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1192
+ int err;
1193
+
1194
+ vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, vgroup, rulei->priority,
1195
+ &rulei->values.elusage);
1196
+ if (IS_ERR(vchunk))
1197
+ return PTR_ERR(vchunk);
1198
+
1199
+ ventry->vchunk = vchunk;
1200
+ ventry->rulei = rulei;
1201
+ vregion = vchunk->vregion;
1202
+
1203
+ mutex_lock(&vregion->lock);
1204
+ ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry,
1205
+ vchunk->chunk);
1206
+ if (IS_ERR(ventry->entry)) {
1207
+ mutex_unlock(&vregion->lock);
1208
+ err = PTR_ERR(ventry->entry);
1209
+ goto err_entry_create;
1210
+ }
1211
+
1212
+ list_add_tail(&ventry->list, &vchunk->ventry_list);
1213
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1214
+ mutex_unlock(&vregion->lock);
1215
+
1216
+ return 0;
1217
+
1218
+err_entry_create:
1219
+ mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1220
+ return err;
1221
+}
1222
+
1223
+static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp,
1224
+ struct mlxsw_sp_acl_tcam_ventry *ventry)
1225
+{
1226
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1227
+ struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1228
+
1229
+ mutex_lock(&vregion->lock);
1230
+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1231
+ list_del(&ventry->list);
1232
+ mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1233
+ mutex_unlock(&vregion->lock);
1234
+ mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1235
+}
1236
+
1237
+static int
1238
+mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp *mlxsw_sp,
1239
+ struct mlxsw_sp_acl_tcam_ventry *ventry,
1240
+ struct mlxsw_sp_acl_rule_info *rulei)
1241
+{
1242
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1243
+
1244
+ return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp,
1245
+ vchunk->vregion->region,
1246
+ ventry->entry, rulei);
1247
+}
1248
+
1249
+static int
1250
+mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
1251
+ struct mlxsw_sp_acl_tcam_ventry *ventry,
1252
+ bool *activity)
1253
+{
1254
+ return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
1255
+ ventry->entry, activity);
1256
+}
1257
+
1258
+static int
1259
+mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp,
1260
+ struct mlxsw_sp_acl_tcam_ventry *ventry,
1261
+ struct mlxsw_sp_acl_tcam_chunk *chunk,
1262
+ int *credits)
1263
+{
1264
+ struct mlxsw_sp_acl_tcam_entry *new_entry;
1265
+
1266
+ /* First check if the entry is not already where we want it to be. */
1267
+ if (ventry->entry->chunk == chunk)
1268
+ return 0;
1269
+
1270
+ if (--(*credits) < 0)
1271
+ return 0;
1272
+
1273
+ new_entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk);
1274
+ if (IS_ERR(new_entry))
1275
+ return PTR_ERR(new_entry);
1276
+ mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1277
+ ventry->entry = new_entry;
1278
+ return 0;
1279
+}
1280
+
1281
+static int
1282
+mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
1283
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1284
+ struct mlxsw_sp_acl_tcam_region *region,
1285
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1286
+{
1287
+ struct mlxsw_sp_acl_tcam_chunk *new_chunk;
1288
+
1289
+ new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
1290
+ if (IS_ERR(new_chunk))
1291
+ return PTR_ERR(new_chunk);
1292
+ vchunk->chunk2 = vchunk->chunk;
1293
+ vchunk->chunk = new_chunk;
1294
+ ctx->current_vchunk = vchunk;
1295
+ ctx->start_ventry = NULL;
1296
+ ctx->stop_ventry = NULL;
1297
+ return 0;
1298
+}
1299
+
1300
+static void
1301
+mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
1302
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1303
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1304
+{
1305
+ mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1306
+ vchunk->chunk2 = NULL;
1307
+ ctx->current_vchunk = NULL;
1308
+}
1309
+
1310
+static int
1311
+mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1312
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1313
+ struct mlxsw_sp_acl_tcam_region *region,
1314
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1315
+ int *credits)
1316
+{
1317
+ struct mlxsw_sp_acl_tcam_ventry *ventry;
1318
+ int err;
1319
+
1320
+ if (vchunk->chunk->region != region) {
1321
+ err = mlxsw_sp_acl_tcam_vchunk_migrate_start(mlxsw_sp, vchunk,
1322
+ region, ctx);
1323
+ if (err)
1324
+ return err;
1325
+ } else if (!vchunk->chunk2) {
1326
+ /* The chunk is already as it should be, nothing to do. */
1327
+ return 0;
1328
+ }
1329
+
1330
+ /* If the migration got interrupted, we have the ventry to start from
1331
+ * stored in context.
1332
+ */
1333
+ if (ctx->start_ventry)
1334
+ ventry = ctx->start_ventry;
1335
+ else
1336
+ ventry = list_first_entry(&vchunk->ventry_list,
1337
+ typeof(*ventry), list);
1338
+
1339
+ list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
1340
+ /* During rollback, once we reach the ventry that failed
1341
+ * to migrate, we are done.
1342
+ */
1343
+ if (ventry == ctx->stop_ventry)
1344
+ break;
1345
+
1346
+ err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
1347
+ vchunk->chunk, credits);
1348
+ if (err) {
1349
+ if (ctx->this_is_rollback) {
1350
+ /* Save the ventry which we ended with and try
1351
+ * to continue later on.
1352
+ */
1353
+ ctx->start_ventry = ventry;
1354
+ return err;
1355
+ }
1356
+ /* Swap the chunk and chunk2 pointers so the follow-up
1357
+ * rollback call will see the original chunk pointer
1358
+ * in vchunk->chunk.
1359
+ */
1360
+ swap(vchunk->chunk, vchunk->chunk2);
1361
+ /* The rollback has to be done from beginning of the
1362
+ * chunk, that is why we have to null the start_ventry.
1363
+ * However, we know where to stop the rollback,
1364
+ * at the current ventry.
1365
+ */
1366
+ ctx->start_ventry = NULL;
1367
+ ctx->stop_ventry = ventry;
1368
+ return err;
1369
+ } else if (*credits < 0) {
1370
+ /* We are out of credits, the rest of the ventries
1371
+ * will be migrated later. Save the ventry
1372
+ * which we ended with.
1373
+ */
1374
+ ctx->start_ventry = ventry;
1375
+ return 0;
1376
+ }
1377
+ }
1378
+
1379
+ mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
1380
+ return 0;
1381
+}
1382
+
1383
+static int
1384
+mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
1385
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
1386
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1387
+ int *credits)
1388
+{
1389
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1390
+ int err;
1391
+
1392
+ /* If the migration got interrupted, we have the vchunk
1393
+ * we are working on stored in context.
1394
+ */
1395
+ if (ctx->current_vchunk)
1396
+ vchunk = ctx->current_vchunk;
1397
+ else
1398
+ vchunk = list_first_entry(&vregion->vchunk_list,
1399
+ typeof(*vchunk), list);
1400
+
1401
+ list_for_each_entry_from(vchunk, &vregion->vchunk_list, list) {
1402
+ err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
1403
+ vregion->region,
1404
+ ctx, credits);
1405
+ if (err || *credits < 0)
1406
+ return err;
1407
+ }
1408
+ return 0;
1409
+}
1410
+
1411
+static int
1412
+mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
1413
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
1414
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1415
+ int *credits)
1416
+{
1417
+ int err, err2;
1418
+
1419
+ trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
1420
+ mutex_lock(&vregion->lock);
1421
+ err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1422
+ ctx, credits);
1423
+ if (err) {
1424
+ /* In case migration was not successful, we need to swap
1425
+ * so the original region pointer is assigned again
1426
+ * to vregion->region.
1427
+ */
1428
+ swap(vregion->region, vregion->region2);
1429
+ ctx->current_vchunk = NULL;
1430
+ ctx->this_is_rollback = true;
1431
+ err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1432
+ ctx, credits);
1433
+ if (err2) {
1434
+ trace_mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed(mlxsw_sp,
1435
+ vregion);
1436
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
1437
+ /* Let the rollback to be continued later on. */
1438
+ }
1439
+ }
1440
+ mutex_unlock(&vregion->lock);
1441
+ trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
1442
+ return err;
1443
+}
1444
+
1445
+static bool
1446
+mlxsw_sp_acl_tcam_vregion_rehash_in_progress(const struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1447
+{
1448
+ return ctx->hints_priv;
1449
+}
1450
+
1451
+static int
1452
+mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
1453
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
1454
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1455
+{
1456
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1457
+ unsigned int priority = mlxsw_sp_acl_tcam_vregion_prio(vregion);
1458
+ struct mlxsw_sp_acl_tcam_region *new_region;
1459
+ void *hints_priv;
1460
+ int err;
1461
+
1462
+ trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
1463
+
1464
+ hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
1465
+ if (IS_ERR(hints_priv))
1466
+ return PTR_ERR(hints_priv);
1467
+
1468
+ new_region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam,
1469
+ vregion, hints_priv);
1470
+ if (IS_ERR(new_region)) {
1471
+ err = PTR_ERR(new_region);
1472
+ goto err_region_create;
1473
+ }
1474
+
1475
+ /* vregion->region contains the pointer to the new region
1476
+ * we are going to migrate to.
1477
+ */
1478
+ vregion->region2 = vregion->region;
1479
+ vregion->region = new_region;
1480
+ err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp,
1481
+ vregion->region2->group,
1482
+ new_region, priority,
1483
+ vregion->region2);
1484
+ if (err)
1485
+ goto err_group_region_attach;
1486
+
1487
+ ctx->hints_priv = hints_priv;
1488
+ ctx->this_is_rollback = false;
1489
+
1490
+ return 0;
1491
+
1492
+err_group_region_attach:
1493
+ vregion->region = vregion->region2;
1494
+ vregion->region2 = NULL;
1495
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, new_region);
1496
+err_region_create:
1497
+ ops->region_rehash_hints_put(hints_priv);
1498
+ return err;
1499
+}
1500
+
1501
+static void
1502
+mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp,
1503
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
1504
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1505
+{
1506
+ struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2;
1507
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1508
+
1509
+ vregion->region2 = NULL;
1510
+ mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
1511
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
1512
+ ops->region_rehash_hints_put(ctx->hints_priv);
1513
+ ctx->hints_priv = NULL;
1514
+}
1515
+
1516
+static void
1517
+mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
1518
+ struct mlxsw_sp_acl_tcam_vregion *vregion,
1519
+ int *credits)
1520
+{
1521
+ struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
1522
+ int err;
1523
+
1524
+ /* Check if the previous rehash work was interrupted
1525
+ * which means we have to continue it now.
1526
+ * If not, start a new rehash.
1527
+ */
1528
+ if (!mlxsw_sp_acl_tcam_vregion_rehash_in_progress(ctx)) {
1529
+ err = mlxsw_sp_acl_tcam_vregion_rehash_start(mlxsw_sp,
1530
+ vregion, ctx);
1531
+ if (err) {
1532
+ if (err != -EAGAIN)
1533
+ dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n");
1534
+ return;
1535
+ }
1536
+ }
1537
+
1538
+ err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
1539
+ ctx, credits);
1540
+ if (err) {
1541
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
1542
+ }
1543
+
1544
+ if (*credits >= 0)
1545
+ mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx);
7931546 }
7941547
7951548 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
....@@ -842,25 +1595,28 @@
8421595 ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
8431596
8441597 struct mlxsw_sp_acl_tcam_flower_ruleset {
845
- struct mlxsw_sp_acl_tcam_group group;
1598
+ struct mlxsw_sp_acl_tcam_vgroup vgroup;
8461599 };
8471600
8481601 struct mlxsw_sp_acl_tcam_flower_rule {
849
- struct mlxsw_sp_acl_tcam_entry entry;
1602
+ struct mlxsw_sp_acl_tcam_ventry ventry;
8501603 };
8511604
8521605 static int
8531606 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
8541607 struct mlxsw_sp_acl_tcam *tcam,
8551608 void *ruleset_priv,
856
- struct mlxsw_afk_element_usage *tmplt_elusage)
1609
+ struct mlxsw_afk_element_usage *tmplt_elusage,
1610
+ unsigned int *p_min_prio,
1611
+ unsigned int *p_max_prio)
8571612 {
8581613 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
8591614
860
- return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
861
- mlxsw_sp_acl_tcam_patterns,
862
- MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
863
- tmplt_elusage);
1615
+ return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1616
+ mlxsw_sp_acl_tcam_patterns,
1617
+ MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1618
+ tmplt_elusage, true,
1619
+ p_min_prio, p_max_prio);
8641620 }
8651621
8661622 static void
....@@ -869,7 +1625,7 @@
8691625 {
8701626 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
8711627
872
- mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
1628
+ mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
8731629 }
8741630
8751631 static int
....@@ -880,7 +1636,7 @@
8801636 {
8811637 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
8821638
883
- return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
1639
+ return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->vgroup.group,
8841640 mlxsw_sp_port, ingress);
8851641 }
8861642
....@@ -892,7 +1648,7 @@
8921648 {
8931649 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
8941650
895
- mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group,
1651
+ mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->vgroup.group,
8961652 mlxsw_sp_port, ingress);
8971653 }
8981654
....@@ -901,13 +1657,7 @@
9011657 {
9021658 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
9031659
904
- return mlxsw_sp_acl_tcam_group_id(&ruleset->group);
905
-}
906
-
907
-static size_t mlxsw_sp_acl_tcam_flower_rule_priv_size(struct mlxsw_sp *mlxsw_sp)
908
-{
909
- return sizeof(struct mlxsw_sp_acl_tcam_flower_rule) +
910
- mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp);
1660
+ return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
9111661 }
9121662
9131663 static int
....@@ -918,8 +1668,8 @@
9181668 struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
9191669 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
9201670
921
- return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
922
- &rule->entry, rulei);
1671
+ return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1672
+ &rule->ventry, rulei);
9231673 }
9241674
9251675 static void
....@@ -927,7 +1677,15 @@
9271677 {
9281678 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
9291679
930
- mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
1680
+ mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1681
+}
1682
+
1683
+static int
1684
+mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1685
+ void *rule_priv,
1686
+ struct mlxsw_sp_acl_rule_info *rulei)
1687
+{
1688
+ return -EOPNOTSUPP;
9311689 }
9321690
9331691 static int
....@@ -936,8 +1694,8 @@
9361694 {
9371695 struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
9381696
939
- return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
940
- activity);
1697
+ return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1698
+ activity);
9411699 }
9421700
9431701 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
....@@ -947,15 +1705,155 @@
9471705 .ruleset_bind = mlxsw_sp_acl_tcam_flower_ruleset_bind,
9481706 .ruleset_unbind = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
9491707 .ruleset_group_id = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
950
- .rule_priv_size = mlxsw_sp_acl_tcam_flower_rule_priv_size,
1708
+ .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
9511709 .rule_add = mlxsw_sp_acl_tcam_flower_rule_add,
9521710 .rule_del = mlxsw_sp_acl_tcam_flower_rule_del,
1711
+ .rule_action_replace = mlxsw_sp_acl_tcam_flower_rule_action_replace,
9531712 .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get,
1713
+};
1714
+
1715
+struct mlxsw_sp_acl_tcam_mr_ruleset {
1716
+ struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1717
+ struct mlxsw_sp_acl_tcam_vgroup vgroup;
1718
+};
1719
+
1720
+struct mlxsw_sp_acl_tcam_mr_rule {
1721
+ struct mlxsw_sp_acl_tcam_ventry ventry;
1722
+};
1723
+
1724
+static int
1725
+mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1726
+ struct mlxsw_sp_acl_tcam *tcam,
1727
+ void *ruleset_priv,
1728
+ struct mlxsw_afk_element_usage *tmplt_elusage,
1729
+ unsigned int *p_min_prio,
1730
+ unsigned int *p_max_prio)
1731
+{
1732
+ struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1733
+ int err;
1734
+
1735
+ err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1736
+ mlxsw_sp_acl_tcam_patterns,
1737
+ MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1738
+ tmplt_elusage, false,
1739
+ p_min_prio, p_max_prio);
1740
+ if (err)
1741
+ return err;
1742
+
1743
+ /* For most of the TCAM clients it would make sense to take a tcam chunk
1744
+ * only when the first rule is written. This is not the case for
1745
+ * multicast router as it is required to bind the multicast router to a
1746
+ * specific ACL Group ID which must exist in HW before multicast router
1747
+ * is initialized.
1748
+ */
1749
+ ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp,
1750
+ &ruleset->vgroup, 1,
1751
+ tmplt_elusage);
1752
+ if (IS_ERR(ruleset->vchunk)) {
1753
+ err = PTR_ERR(ruleset->vchunk);
1754
+ goto err_chunk_get;
1755
+ }
1756
+
1757
+ return 0;
1758
+
1759
+err_chunk_get:
1760
+ mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1761
+ return err;
1762
+}
1763
+
1764
+static void
1765
+mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
1766
+{
1767
+ struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1768
+
1769
+ mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk);
1770
+ mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1771
+}
1772
+
1773
+static int
1774
+mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1775
+ struct mlxsw_sp_port *mlxsw_sp_port,
1776
+ bool ingress)
1777
+{
1778
+ /* Binding is done when initializing multicast router */
1779
+ return 0;
1780
+}
1781
+
1782
+static void
1783
+mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1784
+ void *ruleset_priv,
1785
+ struct mlxsw_sp_port *mlxsw_sp_port,
1786
+ bool ingress)
1787
+{
1788
+}
1789
+
1790
+static u16
1791
+mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
1792
+{
1793
+ struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1794
+
1795
+ return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1796
+}
1797
+
1798
+static int
1799
+mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1800
+ void *rule_priv,
1801
+ struct mlxsw_sp_acl_rule_info *rulei)
1802
+{
1803
+ struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1804
+ struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1805
+
1806
+ return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1807
+ &rule->ventry, rulei);
1808
+}
1809
+
1810
+static void
1811
+mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1812
+{
1813
+ struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1814
+
1815
+ mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1816
+}
1817
+
1818
+static int
1819
+mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1820
+ void *rule_priv,
1821
+ struct mlxsw_sp_acl_rule_info *rulei)
1822
+{
1823
+ struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1824
+
1825
+ return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp, &rule->ventry,
1826
+ rulei);
1827
+}
1828
+
1829
+static int
1830
+mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1831
+ void *rule_priv, bool *activity)
1832
+{
1833
+ struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1834
+
1835
+ return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1836
+ activity);
1837
+}
1838
+
1839
+static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
1840
+ .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset),
1841
+ .ruleset_add = mlxsw_sp_acl_tcam_mr_ruleset_add,
1842
+ .ruleset_del = mlxsw_sp_acl_tcam_mr_ruleset_del,
1843
+ .ruleset_bind = mlxsw_sp_acl_tcam_mr_ruleset_bind,
1844
+ .ruleset_unbind = mlxsw_sp_acl_tcam_mr_ruleset_unbind,
1845
+ .ruleset_group_id = mlxsw_sp_acl_tcam_mr_ruleset_group_id,
1846
+ .rule_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_rule),
1847
+ .rule_add = mlxsw_sp_acl_tcam_mr_rule_add,
1848
+ .rule_del = mlxsw_sp_acl_tcam_mr_rule_del,
1849
+ .rule_action_replace = mlxsw_sp_acl_tcam_mr_rule_action_replace,
1850
+ .rule_activity_get = mlxsw_sp_acl_tcam_mr_rule_activity_get,
9541851 };
9551852
9561853 static const struct mlxsw_sp_acl_profile_ops *
9571854 mlxsw_sp_acl_tcam_profile_ops_arr[] = {
9581855 [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1856
+ [MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops,
9591857 };
9601858
9611859 const struct mlxsw_sp_acl_profile_ops *