forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
....@@ -4,6 +4,7 @@
44 #include <linux/kernel.h>
55 #include <linux/errno.h>
66 #include <linux/netdevice.h>
7
+#include <linux/log2.h>
78 #include <net/net_namespace.h>
89 #include <net/flow_dissector.h>
910 #include <net/pkt_cls.h>
....@@ -15,43 +16,77 @@
1516 #include "core_acl_flex_keys.h"
1617
1718 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
18
- struct mlxsw_sp_acl_block *block,
19
+ struct mlxsw_sp_flow_block *block,
1920 struct mlxsw_sp_acl_rule_info *rulei,
20
- struct tcf_exts *exts,
21
+ struct flow_action *flow_action,
2122 struct netlink_ext_ack *extack)
2223 {
23
- const struct tc_action *a;
24
+ const struct flow_action_entry *act;
25
+ int mirror_act_count = 0;
26
+ int police_act_count = 0;
2427 int err, i;
2528
26
- if (!tcf_exts_has_actions(exts))
29
+ if (!flow_action_has_entries(flow_action))
2730 return 0;
31
+ if (!flow_action_mixed_hw_stats_check(flow_action, extack))
32
+ return -EOPNOTSUPP;
2833
29
- /* Count action is inserted first */
30
- err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
31
- if (err)
32
- return err;
34
+ act = flow_action_first_entry_get(flow_action);
35
+ if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) {
36
+ /* Nothing to do */
37
+ } else if (act->hw_stats & FLOW_ACTION_HW_STATS_IMMEDIATE) {
38
+ /* Count action is inserted first */
39
+ err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
40
+ if (err)
41
+ return err;
42
+ } else {
43
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
44
+ return -EOPNOTSUPP;
45
+ }
3346
34
- tcf_exts_for_each_action(i, a, exts) {
35
- if (is_tcf_gact_ok(a)) {
47
+ flow_action_for_each(i, act, flow_action) {
48
+ switch (act->id) {
49
+ case FLOW_ACTION_ACCEPT:
3650 err = mlxsw_sp_acl_rulei_act_terminate(rulei);
3751 if (err) {
3852 NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
3953 return err;
4054 }
41
- } else if (is_tcf_gact_shot(a)) {
42
- err = mlxsw_sp_acl_rulei_act_drop(rulei);
55
+ break;
56
+ case FLOW_ACTION_DROP: {
57
+ bool ingress;
58
+
59
+ if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
60
+ NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress");
61
+ return -EOPNOTSUPP;
62
+ }
63
+ ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
64
+ err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress,
65
+ act->cookie, extack);
4366 if (err) {
4467 NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
4568 return err;
4669 }
47
- } else if (is_tcf_gact_trap(a)) {
70
+
71
+ /* Forbid block with this rulei to be bound
72
+ * to ingress/egress in future. Ingress rule is
73
+ * a blocker for egress and vice versa.
74
+ */
75
+ if (ingress)
76
+ rulei->egress_bind_blocker = 1;
77
+ else
78
+ rulei->ingress_bind_blocker = 1;
79
+ }
80
+ break;
81
+ case FLOW_ACTION_TRAP:
4882 err = mlxsw_sp_acl_rulei_act_trap(rulei);
4983 if (err) {
5084 NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
5185 return err;
5286 }
53
- } else if (is_tcf_gact_goto_chain(a)) {
54
- u32 chain_index = tcf_gact_goto_chain_index(a);
87
+ break;
88
+ case FLOW_ACTION_GOTO: {
89
+ u32 chain_index = act->chain_index;
5590 struct mlxsw_sp_acl_ruleset *ruleset;
5691 u16 group_id;
5792
....@@ -67,10 +102,22 @@
67102 NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
68103 return err;
69104 }
70
- } else if (is_tcf_mirred_egress_redirect(a)) {
105
+ }
106
+ break;
107
+ case FLOW_ACTION_REDIRECT: {
71108 struct net_device *out_dev;
72109 struct mlxsw_sp_fid *fid;
73110 u16 fid_index;
111
+
112
+ if (mlxsw_sp_flow_block_is_egress_bound(block)) {
113
+ NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress");
114
+ return -EOPNOTSUPP;
115
+ }
116
+
117
+ /* Forbid block with this rulei to be bound
118
+ * to egress in future.
119
+ */
120
+ rulei->egress_bind_blocker = 1;
74121
75122 fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
76123 fid_index = mlxsw_sp_fid_index(fid);
....@@ -79,31 +126,85 @@
79126 if (err)
80127 return err;
81128
82
- out_dev = tcf_mirred_dev(a);
129
+ out_dev = act->dev;
83130 err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
84131 out_dev, extack);
85132 if (err)
86133 return err;
87
- } else if (is_tcf_mirred_egress_mirror(a)) {
88
- struct net_device *out_dev = tcf_mirred_dev(a);
134
+ }
135
+ break;
136
+ case FLOW_ACTION_MIRRED: {
137
+ struct net_device *out_dev = act->dev;
138
+
139
+ if (mirror_act_count++) {
140
+ NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported");
141
+ return -EOPNOTSUPP;
142
+ }
89143
90144 err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
91145 block, out_dev,
92146 extack);
93147 if (err)
94148 return err;
95
- } else if (is_tcf_vlan(a)) {
96
- u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
97
- u32 action = tcf_vlan_action(a);
98
- u8 prio = tcf_vlan_push_prio(a);
99
- u16 vid = tcf_vlan_push_vid(a);
149
+ }
150
+ break;
151
+ case FLOW_ACTION_VLAN_MANGLE: {
152
+ u16 proto = be16_to_cpu(act->vlan.proto);
153
+ u8 prio = act->vlan.prio;
154
+ u16 vid = act->vlan.vid;
100155
101156 err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
102
- action, vid,
157
+ act->id, vid,
103158 proto, prio, extack);
104159 if (err)
105160 return err;
106
- } else {
161
+ break;
162
+ }
163
+ case FLOW_ACTION_PRIORITY:
164
+ err = mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei,
165
+ act->priority,
166
+ extack);
167
+ if (err)
168
+ return err;
169
+ break;
170
+ case FLOW_ACTION_MANGLE: {
171
+ enum flow_action_mangle_base htype = act->mangle.htype;
172
+ __be32 be_mask = (__force __be32) act->mangle.mask;
173
+ __be32 be_val = (__force __be32) act->mangle.val;
174
+ u32 offset = act->mangle.offset;
175
+ u32 mask = be32_to_cpu(be_mask);
176
+ u32 val = be32_to_cpu(be_val);
177
+
178
+ err = mlxsw_sp_acl_rulei_act_mangle(mlxsw_sp, rulei,
179
+ htype, offset,
180
+ mask, val, extack);
181
+ if (err)
182
+ return err;
183
+ break;
184
+ }
185
+ case FLOW_ACTION_POLICE: {
186
+ u32 burst;
187
+
188
+ if (police_act_count++) {
189
+ NL_SET_ERR_MSG_MOD(extack, "Multiple police actions per rule are not supported");
190
+ return -EOPNOTSUPP;
191
+ }
192
+
193
+ /* The kernel might adjust the requested burst size so
194
+ * that it is not exactly a power of two. Re-adjust it
195
+ * here since the hardware only supports burst sizes
196
+ * that are a power of two.
197
+ */
198
+ burst = roundup_pow_of_two(act->police.burst);
199
+ err = mlxsw_sp_acl_rulei_act_police(mlxsw_sp, rulei,
200
+ act->police.index,
201
+ act->police.rate_bytes_ps,
202
+ burst, extack);
203
+ if (err)
204
+ return err;
205
+ break;
206
+ }
207
+ default:
107208 NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
108209 dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
109210 return -EOPNOTSUPP;
....@@ -112,72 +213,106 @@
112213 return 0;
113214 }
114215
115
-static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
116
- struct tc_cls_flower_offload *f)
216
+static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
217
+ struct flow_cls_offload *f,
218
+ struct mlxsw_sp_flow_block *block)
117219 {
118
- struct flow_dissector_key_ipv4_addrs *key =
119
- skb_flow_dissector_target(f->dissector,
120
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
121
- f->key);
122
- struct flow_dissector_key_ipv4_addrs *mask =
123
- skb_flow_dissector_target(f->dissector,
124
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
125
- f->mask);
220
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
221
+ struct mlxsw_sp_port *mlxsw_sp_port;
222
+ struct net_device *ingress_dev;
223
+ struct flow_match_meta match;
224
+
225
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
226
+ return 0;
227
+
228
+ flow_rule_match_meta(rule, &match);
229
+ if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
230
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask");
231
+ return -EINVAL;
232
+ }
233
+
234
+ ingress_dev = __dev_get_by_index(block->net,
235
+ match.key->ingress_ifindex);
236
+ if (!ingress_dev) {
237
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on");
238
+ return -EINVAL;
239
+ }
240
+
241
+ if (!mlxsw_sp_port_dev_check(ingress_dev)) {
242
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port");
243
+ return -EINVAL;
244
+ }
245
+
246
+ mlxsw_sp_port = netdev_priv(ingress_dev);
247
+ if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) {
248
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device");
249
+ return -EINVAL;
250
+ }
251
+
252
+ mlxsw_sp_acl_rulei_keymask_u32(rulei,
253
+ MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
254
+ mlxsw_sp_port->local_port,
255
+ 0xFFFFFFFF);
256
+ return 0;
257
+}
258
+
259
+static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
260
+ struct flow_cls_offload *f)
261
+{
262
+ struct flow_match_ipv4_addrs match;
263
+
264
+ flow_rule_match_ipv4_addrs(f->rule, &match);
126265
127266 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
128
- (char *) &key->src,
129
- (char *) &mask->src, 4);
267
+ (char *) &match.key->src,
268
+ (char *) &match.mask->src, 4);
130269 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
131
- (char *) &key->dst,
132
- (char *) &mask->dst, 4);
270
+ (char *) &match.key->dst,
271
+ (char *) &match.mask->dst, 4);
133272 }
134273
135274 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
136
- struct tc_cls_flower_offload *f)
275
+ struct flow_cls_offload *f)
137276 {
138
- struct flow_dissector_key_ipv6_addrs *key =
139
- skb_flow_dissector_target(f->dissector,
140
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
141
- f->key);
142
- struct flow_dissector_key_ipv6_addrs *mask =
143
- skb_flow_dissector_target(f->dissector,
144
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
145
- f->mask);
277
+ struct flow_match_ipv6_addrs match;
278
+
279
+ flow_rule_match_ipv6_addrs(f->rule, &match);
146280
147281 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
148
- &key->src.s6_addr[0x0],
149
- &mask->src.s6_addr[0x0], 4);
282
+ &match.key->src.s6_addr[0x0],
283
+ &match.mask->src.s6_addr[0x0], 4);
150284 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
151
- &key->src.s6_addr[0x4],
152
- &mask->src.s6_addr[0x4], 4);
285
+ &match.key->src.s6_addr[0x4],
286
+ &match.mask->src.s6_addr[0x4], 4);
153287 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
154
- &key->src.s6_addr[0x8],
155
- &mask->src.s6_addr[0x8], 4);
288
+ &match.key->src.s6_addr[0x8],
289
+ &match.mask->src.s6_addr[0x8], 4);
156290 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
157
- &key->src.s6_addr[0xC],
158
- &mask->src.s6_addr[0xC], 4);
291
+ &match.key->src.s6_addr[0xC],
292
+ &match.mask->src.s6_addr[0xC], 4);
159293 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
160
- &key->dst.s6_addr[0x0],
161
- &mask->dst.s6_addr[0x0], 4);
294
+ &match.key->dst.s6_addr[0x0],
295
+ &match.mask->dst.s6_addr[0x0], 4);
162296 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
163
- &key->dst.s6_addr[0x4],
164
- &mask->dst.s6_addr[0x4], 4);
297
+ &match.key->dst.s6_addr[0x4],
298
+ &match.mask->dst.s6_addr[0x4], 4);
165299 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
166
- &key->dst.s6_addr[0x8],
167
- &mask->dst.s6_addr[0x8], 4);
300
+ &match.key->dst.s6_addr[0x8],
301
+ &match.mask->dst.s6_addr[0x8], 4);
168302 mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
169
- &key->dst.s6_addr[0xC],
170
- &mask->dst.s6_addr[0xC], 4);
303
+ &match.key->dst.s6_addr[0xC],
304
+ &match.mask->dst.s6_addr[0xC], 4);
171305 }
172306
173307 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
174308 struct mlxsw_sp_acl_rule_info *rulei,
175
- struct tc_cls_flower_offload *f,
309
+ struct flow_cls_offload *f,
176310 u8 ip_proto)
177311 {
178
- struct flow_dissector_key_ports *key, *mask;
312
+ const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
313
+ struct flow_match_ports match;
179314
180
- if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS))
315
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
181316 return 0;
182317
183318 if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
....@@ -186,27 +321,25 @@
186321 return -EINVAL;
187322 }
188323
189
- key = skb_flow_dissector_target(f->dissector,
190
- FLOW_DISSECTOR_KEY_PORTS,
191
- f->key);
192
- mask = skb_flow_dissector_target(f->dissector,
193
- FLOW_DISSECTOR_KEY_PORTS,
194
- f->mask);
324
+ flow_rule_match_ports(rule, &match);
195325 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
196
- ntohs(key->dst), ntohs(mask->dst));
326
+ ntohs(match.key->dst),
327
+ ntohs(match.mask->dst));
197328 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
198
- ntohs(key->src), ntohs(mask->src));
329
+ ntohs(match.key->src),
330
+ ntohs(match.mask->src));
199331 return 0;
200332 }
201333
202334 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
203335 struct mlxsw_sp_acl_rule_info *rulei,
204
- struct tc_cls_flower_offload *f,
336
+ struct flow_cls_offload *f,
205337 u8 ip_proto)
206338 {
207
- struct flow_dissector_key_tcp *key, *mask;
339
+ const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
340
+ struct flow_match_tcp match;
208341
209
- if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP))
342
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP))
210343 return 0;
211344
212345 if (ip_proto != IPPROTO_TCP) {
....@@ -215,25 +348,29 @@
215348 return -EINVAL;
216349 }
217350
218
- key = skb_flow_dissector_target(f->dissector,
219
- FLOW_DISSECTOR_KEY_TCP,
220
- f->key);
221
- mask = skb_flow_dissector_target(f->dissector,
222
- FLOW_DISSECTOR_KEY_TCP,
223
- f->mask);
351
+ flow_rule_match_tcp(rule, &match);
352
+
353
+ if (match.mask->flags & htons(0x0E00)) {
354
+ NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits");
355
+ dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n");
356
+ return -EINVAL;
357
+ }
358
+
224359 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
225
- ntohs(key->flags), ntohs(mask->flags));
360
+ ntohs(match.key->flags),
361
+ ntohs(match.mask->flags));
226362 return 0;
227363 }
228364
229365 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
230366 struct mlxsw_sp_acl_rule_info *rulei,
231
- struct tc_cls_flower_offload *f,
367
+ struct flow_cls_offload *f,
232368 u16 n_proto)
233369 {
234
- struct flow_dissector_key_ip *key, *mask;
370
+ const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
371
+ struct flow_match_ip match;
235372
236
- if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP))
373
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP))
237374 return 0;
238375
239376 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
....@@ -242,37 +379,38 @@
242379 return -EINVAL;
243380 }
244381
245
- key = skb_flow_dissector_target(f->dissector,
246
- FLOW_DISSECTOR_KEY_IP,
247
- f->key);
248
- mask = skb_flow_dissector_target(f->dissector,
249
- FLOW_DISSECTOR_KEY_IP,
250
- f->mask);
382
+ flow_rule_match_ip(rule, &match);
383
+
251384 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
252
- key->ttl, mask->ttl);
385
+ match.key->ttl, match.mask->ttl);
253386
254387 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
255
- key->tos & 0x3, mask->tos & 0x3);
388
+ match.key->tos & 0x3,
389
+ match.mask->tos & 0x3);
256390
257391 mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
258
- key->tos >> 6, mask->tos >> 6);
392
+ match.key->tos >> 2,
393
+ match.mask->tos >> 2);
259394
260395 return 0;
261396 }
262397
263398 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
264
- struct mlxsw_sp_acl_block *block,
399
+ struct mlxsw_sp_flow_block *block,
265400 struct mlxsw_sp_acl_rule_info *rulei,
266
- struct tc_cls_flower_offload *f)
401
+ struct flow_cls_offload *f)
267402 {
403
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
404
+ struct flow_dissector *dissector = rule->match.dissector;
268405 u16 n_proto_mask = 0;
269406 u16 n_proto_key = 0;
270407 u16 addr_type = 0;
271408 u8 ip_proto = 0;
272409 int err;
273410
274
- if (f->dissector->used_keys &
275
- ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
411
+ if (dissector->used_keys &
412
+ ~(BIT(FLOW_DISSECTOR_KEY_META) |
413
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
276414 BIT(FLOW_DISSECTOR_KEY_BASIC) |
277415 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
278416 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
....@@ -288,25 +426,23 @@
288426
289427 mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
290428
291
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
292
- struct flow_dissector_key_control *key =
293
- skb_flow_dissector_target(f->dissector,
294
- FLOW_DISSECTOR_KEY_CONTROL,
295
- f->key);
296
- addr_type = key->addr_type;
429
+ err = mlxsw_sp_flower_parse_meta(rulei, f, block);
430
+ if (err)
431
+ return err;
432
+
433
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
434
+ struct flow_match_control match;
435
+
436
+ flow_rule_match_control(rule, &match);
437
+ addr_type = match.key->addr_type;
297438 }
298439
299
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
300
- struct flow_dissector_key_basic *key =
301
- skb_flow_dissector_target(f->dissector,
302
- FLOW_DISSECTOR_KEY_BASIC,
303
- f->key);
304
- struct flow_dissector_key_basic *mask =
305
- skb_flow_dissector_target(f->dissector,
306
- FLOW_DISSECTOR_KEY_BASIC,
307
- f->mask);
308
- n_proto_key = ntohs(key->n_proto);
309
- n_proto_mask = ntohs(mask->n_proto);
440
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
441
+ struct flow_match_basic match;
442
+
443
+ flow_rule_match_basic(rule, &match);
444
+ n_proto_key = ntohs(match.key->n_proto);
445
+ n_proto_mask = ntohs(match.mask->n_proto);
310446
311447 if (n_proto_key == ETH_P_ALL) {
312448 n_proto_key = 0;
....@@ -316,60 +452,59 @@
316452 MLXSW_AFK_ELEMENT_ETHERTYPE,
317453 n_proto_key, n_proto_mask);
318454
319
- ip_proto = key->ip_proto;
455
+ ip_proto = match.key->ip_proto;
320456 mlxsw_sp_acl_rulei_keymask_u32(rulei,
321457 MLXSW_AFK_ELEMENT_IP_PROTO,
322
- key->ip_proto, mask->ip_proto);
458
+ match.key->ip_proto,
459
+ match.mask->ip_proto);
323460 }
324461
325
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
326
- struct flow_dissector_key_eth_addrs *key =
327
- skb_flow_dissector_target(f->dissector,
328
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
329
- f->key);
330
- struct flow_dissector_key_eth_addrs *mask =
331
- skb_flow_dissector_target(f->dissector,
332
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
333
- f->mask);
462
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
463
+ struct flow_match_eth_addrs match;
334464
465
+ flow_rule_match_eth_addrs(rule, &match);
335466 mlxsw_sp_acl_rulei_keymask_buf(rulei,
336467 MLXSW_AFK_ELEMENT_DMAC_32_47,
337
- key->dst, mask->dst, 2);
468
+ match.key->dst,
469
+ match.mask->dst, 2);
338470 mlxsw_sp_acl_rulei_keymask_buf(rulei,
339471 MLXSW_AFK_ELEMENT_DMAC_0_31,
340
- key->dst + 2, mask->dst + 2, 4);
472
+ match.key->dst + 2,
473
+ match.mask->dst + 2, 4);
341474 mlxsw_sp_acl_rulei_keymask_buf(rulei,
342475 MLXSW_AFK_ELEMENT_SMAC_32_47,
343
- key->src, mask->src, 2);
476
+ match.key->src,
477
+ match.mask->src, 2);
344478 mlxsw_sp_acl_rulei_keymask_buf(rulei,
345479 MLXSW_AFK_ELEMENT_SMAC_0_31,
346
- key->src + 2, mask->src + 2, 4);
480
+ match.key->src + 2,
481
+ match.mask->src + 2, 4);
347482 }
348483
349
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
350
- struct flow_dissector_key_vlan *key =
351
- skb_flow_dissector_target(f->dissector,
352
- FLOW_DISSECTOR_KEY_VLAN,
353
- f->key);
354
- struct flow_dissector_key_vlan *mask =
355
- skb_flow_dissector_target(f->dissector,
356
- FLOW_DISSECTOR_KEY_VLAN,
357
- f->mask);
484
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
485
+ struct flow_match_vlan match;
358486
359
- if (mlxsw_sp_acl_block_is_egress_bound(block)) {
487
+ flow_rule_match_vlan(rule, &match);
488
+ if (mlxsw_sp_flow_block_is_egress_bound(block)) {
360489 NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
361490 return -EOPNOTSUPP;
362491 }
363
- if (mask->vlan_id != 0)
492
+
493
+ /* Forbid block with this rulei to be bound
494
+ * to egress in future.
495
+ */
496
+ rulei->egress_bind_blocker = 1;
497
+
498
+ if (match.mask->vlan_id != 0)
364499 mlxsw_sp_acl_rulei_keymask_u32(rulei,
365500 MLXSW_AFK_ELEMENT_VID,
366
- key->vlan_id,
367
- mask->vlan_id);
368
- if (mask->vlan_priority != 0)
501
+ match.key->vlan_id,
502
+ match.mask->vlan_id);
503
+ if (match.mask->vlan_priority != 0)
369504 mlxsw_sp_acl_rulei_keymask_u32(rulei,
370505 MLXSW_AFK_ELEMENT_PCP,
371
- key->vlan_priority,
372
- mask->vlan_priority);
506
+ match.key->vlan_priority,
507
+ match.mask->vlan_priority);
373508 }
374509
375510 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
....@@ -389,18 +524,51 @@
389524 if (err)
390525 return err;
391526
392
- return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei, f->exts,
527
+ return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei,
528
+ &f->rule->action,
393529 f->common.extack);
394530 }
395531
532
+static int mlxsw_sp_flower_mall_prio_check(struct mlxsw_sp_flow_block *block,
533
+ struct flow_cls_offload *f)
534
+{
535
+ bool ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
536
+ unsigned int mall_min_prio;
537
+ unsigned int mall_max_prio;
538
+ int err;
539
+
540
+ err = mlxsw_sp_mall_prio_get(block, f->common.chain_index,
541
+ &mall_min_prio, &mall_max_prio);
542
+ if (err) {
543
+ if (err == -ENOENT)
544
+ /* No matchall filters installed on this chain. */
545
+ return 0;
546
+ NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities");
547
+ return err;
548
+ }
549
+ if (ingress && f->common.prio <= mall_min_prio) {
550
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing matchall rules");
551
+ return -EOPNOTSUPP;
552
+ }
553
+ if (!ingress && f->common.prio >= mall_max_prio) {
554
+ NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules");
555
+ return -EOPNOTSUPP;
556
+ }
557
+ return 0;
558
+}
559
+
396560 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
397
- struct mlxsw_sp_acl_block *block,
398
- struct tc_cls_flower_offload *f)
561
+ struct mlxsw_sp_flow_block *block,
562
+ struct flow_cls_offload *f)
399563 {
400564 struct mlxsw_sp_acl_rule_info *rulei;
401565 struct mlxsw_sp_acl_ruleset *ruleset;
402566 struct mlxsw_sp_acl_rule *rule;
403567 int err;
568
+
569
+ err = mlxsw_sp_flower_mall_prio_check(block, f);
570
+ if (err)
571
+ return err;
404572
405573 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
406574 f->common.chain_index,
....@@ -408,7 +576,7 @@
408576 if (IS_ERR(ruleset))
409577 return PTR_ERR(ruleset);
410578
411
- rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie,
579
+ rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL,
412580 f->common.extack);
413581 if (IS_ERR(rule)) {
414582 err = PTR_ERR(rule);
....@@ -441,8 +609,8 @@
441609 }
442610
443611 void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
444
- struct mlxsw_sp_acl_block *block,
445
- struct tc_cls_flower_offload *f)
612
+ struct mlxsw_sp_flow_block *block,
613
+ struct flow_cls_offload *f)
446614 {
447615 struct mlxsw_sp_acl_ruleset *ruleset;
448616 struct mlxsw_sp_acl_rule *rule;
....@@ -463,14 +631,16 @@
463631 }
464632
465633 int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
466
- struct mlxsw_sp_acl_block *block,
467
- struct tc_cls_flower_offload *f)
634
+ struct mlxsw_sp_flow_block *block,
635
+ struct flow_cls_offload *f)
468636 {
637
+ enum flow_action_hw_stats used_hw_stats = FLOW_ACTION_HW_STATS_DISABLED;
469638 struct mlxsw_sp_acl_ruleset *ruleset;
470639 struct mlxsw_sp_acl_rule *rule;
471640 u64 packets;
472641 u64 lastuse;
473642 u64 bytes;
643
+ u64 drops;
474644 int err;
475645
476646 ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
....@@ -484,11 +654,12 @@
484654 return -EINVAL;
485655
486656 err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
487
- &lastuse);
657
+ &drops, &lastuse, &used_hw_stats);
488658 if (err)
489659 goto err_rule_get_stats;
490660
491
- tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
661
+ flow_stats_update(&f->stats, bytes, packets, drops, lastuse,
662
+ used_hw_stats);
492663
493664 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
494665 return 0;
....@@ -499,8 +670,8 @@
499670 }
500671
501672 int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
502
- struct mlxsw_sp_acl_block *block,
503
- struct tc_cls_flower_offload *f)
673
+ struct mlxsw_sp_flow_block *block,
674
+ struct flow_cls_offload *f)
504675 {
505676 struct mlxsw_sp_acl_ruleset *ruleset;
506677 struct mlxsw_sp_acl_rule_info rulei;
....@@ -520,8 +691,8 @@
520691 }
521692
522693 void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
523
- struct mlxsw_sp_acl_block *block,
524
- struct tc_cls_flower_offload *f)
694
+ struct mlxsw_sp_flow_block *block,
695
+ struct flow_cls_offload *f)
525696 {
526697 struct mlxsw_sp_acl_ruleset *ruleset;
527698
....@@ -534,3 +705,23 @@
534705 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
535706 mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
536707 }
708
+
709
+int mlxsw_sp_flower_prio_get(struct mlxsw_sp *mlxsw_sp,
710
+ struct mlxsw_sp_flow_block *block,
711
+ u32 chain_index, unsigned int *p_min_prio,
712
+ unsigned int *p_max_prio)
713
+{
714
+ struct mlxsw_sp_acl_ruleset *ruleset;
715
+
716
+ ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
717
+ chain_index,
718
+ MLXSW_SP_ACL_PROFILE_FLOWER);
719
+ if (IS_ERR(ruleset))
720
+ /* In case there are no flower rules, the caller
721
+ * receives -ENOENT to indicate there is no need
722
+ * to check the priorities.
723
+ */
724
+ return PTR_ERR(ruleset);
725
+ mlxsw_sp_acl_ruleset_prio_get(ruleset, p_min_prio, p_max_prio);
726
+ return 0;
727
+}