forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/drivers/net/ethernet/netronome/nfp/flower/action.c
....@@ -1,43 +1,13 @@
1
-/*
2
- * Copyright (C) 2017 Netronome Systems, Inc.
3
- *
4
- * This software is dual licensed under the GNU General License Version 2,
5
- * June 1991 as shown in the file COPYING in the top-level directory of this
6
- * source tree or the BSD 2-Clause License provided below. You have the
7
- * option to license this software under the complete terms of either license.
8
- *
9
- * The BSD 2-Clause License:
10
- *
11
- * Redistribution and use in source and binary forms, with or
12
- * without modification, are permitted provided that the following
13
- * conditions are met:
14
- *
15
- * 1. Redistributions of source code must retain the above
16
- * copyright notice, this list of conditions and the following
17
- * disclaimer.
18
- *
19
- * 2. Redistributions in binary form must reproduce the above
20
- * copyright notice, this list of conditions and the following
21
- * disclaimer in the documentation and/or other materials
22
- * provided with the distribution.
23
- *
24
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31
- * SOFTWARE.
32
- */
1
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
333
344 #include <linux/bitfield.h>
35
-#include <net/geneve.h>
5
+#include <linux/mpls.h>
366 #include <net/pkt_cls.h>
37
-#include <net/switchdev.h>
387 #include <net/tc_act/tc_csum.h>
398 #include <net/tc_act/tc_gact.h>
409 #include <net/tc_act/tc_mirred.h>
10
+#include <net/tc_act/tc_mpls.h>
4111 #include <net/tc_act/tc_pedit.h>
4212 #include <net/tc_act/tc_vlan.h>
4313 #include <net/tc_act/tc_tunnel_key.h>
....@@ -52,10 +22,85 @@
5222 #define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01)
5323 #define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04)
5424 #define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800)
55
-#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX
56
-#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
25
+#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS (IP_TUNNEL_INFO_TX | \
26
+ IP_TUNNEL_INFO_IPV6)
27
+#define NFP_FL_SUPPORTED_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
5728 NFP_FL_TUNNEL_KEY | \
5829 NFP_FL_TUNNEL_GENEVE_OPT)
30
+
31
+static int
32
+nfp_fl_push_mpls(struct nfp_fl_push_mpls *push_mpls,
33
+ const struct flow_action_entry *act,
34
+ struct netlink_ext_ack *extack)
35
+{
36
+ size_t act_size = sizeof(struct nfp_fl_push_mpls);
37
+ u32 mpls_lse = 0;
38
+
39
+ push_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_MPLS;
40
+ push_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
41
+
42
+ /* BOS is optional in the TC action but required for offload. */
43
+ if (act->mpls_push.bos != ACT_MPLS_BOS_NOT_SET) {
44
+ mpls_lse |= act->mpls_push.bos << MPLS_LS_S_SHIFT;
45
+ } else {
46
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: BOS field must explicitly be set for MPLS push");
47
+ return -EOPNOTSUPP;
48
+ }
49
+
50
+ /* Leave MPLS TC as a default value of 0 if not explicitly set. */
51
+ if (act->mpls_push.tc != ACT_MPLS_TC_NOT_SET)
52
+ mpls_lse |= act->mpls_push.tc << MPLS_LS_TC_SHIFT;
53
+
54
+ /* Proto, label and TTL are enforced and verified for MPLS push. */
55
+ mpls_lse |= act->mpls_push.label << MPLS_LS_LABEL_SHIFT;
56
+ mpls_lse |= act->mpls_push.ttl << MPLS_LS_TTL_SHIFT;
57
+ push_mpls->ethtype = act->mpls_push.proto;
58
+ push_mpls->lse = cpu_to_be32(mpls_lse);
59
+
60
+ return 0;
61
+}
62
+
63
+static void
64
+nfp_fl_pop_mpls(struct nfp_fl_pop_mpls *pop_mpls,
65
+ const struct flow_action_entry *act)
66
+{
67
+ size_t act_size = sizeof(struct nfp_fl_pop_mpls);
68
+
69
+ pop_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_POP_MPLS;
70
+ pop_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
71
+ pop_mpls->ethtype = act->mpls_pop.proto;
72
+}
73
+
74
+static void
75
+nfp_fl_set_mpls(struct nfp_fl_set_mpls *set_mpls,
76
+ const struct flow_action_entry *act)
77
+{
78
+ size_t act_size = sizeof(struct nfp_fl_set_mpls);
79
+ u32 mpls_lse = 0, mpls_mask = 0;
80
+
81
+ set_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_SET_MPLS;
82
+ set_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ;
83
+
84
+ if (act->mpls_mangle.label != ACT_MPLS_LABEL_NOT_SET) {
85
+ mpls_lse |= act->mpls_mangle.label << MPLS_LS_LABEL_SHIFT;
86
+ mpls_mask |= MPLS_LS_LABEL_MASK;
87
+ }
88
+ if (act->mpls_mangle.tc != ACT_MPLS_TC_NOT_SET) {
89
+ mpls_lse |= act->mpls_mangle.tc << MPLS_LS_TC_SHIFT;
90
+ mpls_mask |= MPLS_LS_TC_MASK;
91
+ }
92
+ if (act->mpls_mangle.bos != ACT_MPLS_BOS_NOT_SET) {
93
+ mpls_lse |= act->mpls_mangle.bos << MPLS_LS_S_SHIFT;
94
+ mpls_mask |= MPLS_LS_S_MASK;
95
+ }
96
+ if (act->mpls_mangle.ttl) {
97
+ mpls_lse |= act->mpls_mangle.ttl << MPLS_LS_TTL_SHIFT;
98
+ mpls_mask |= MPLS_LS_TTL_MASK;
99
+ }
100
+
101
+ set_mpls->lse = cpu_to_be32(mpls_lse);
102
+ set_mpls->lse_mask = cpu_to_be32(mpls_mask);
103
+}
59104
60105 static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
61106 {
....@@ -68,7 +113,7 @@
68113
69114 static void
70115 nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
71
- const struct tc_action *action)
116
+ const struct flow_action_entry *act)
72117 {
73118 size_t act_size = sizeof(struct nfp_fl_push_vlan);
74119 u16 tmp_push_vlan_tci;
....@@ -76,29 +121,32 @@
76121 push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
77122 push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
78123 push_vlan->reserved = 0;
79
- push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
124
+ push_vlan->vlan_tpid = act->vlan.proto;
80125
81126 tmp_push_vlan_tci =
82
- FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
83
- FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action));
127
+ FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
128
+ FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
84129 push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
85130 }
86131
87132 static int
88
-nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
89
- struct nfp_fl_payload *nfp_flow, int act_len)
133
+nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act,
134
+ struct nfp_fl_payload *nfp_flow, int act_len,
135
+ struct netlink_ext_ack *extack)
90136 {
91137 size_t act_size = sizeof(struct nfp_fl_pre_lag);
92138 struct nfp_fl_pre_lag *pre_lag;
93139 struct net_device *out_dev;
94140 int err;
95141
96
- out_dev = tcf_mirred_dev(action);
142
+ out_dev = act->dev;
97143 if (!out_dev || !netif_is_lag_master(out_dev))
98144 return 0;
99145
100
- if (act_len + act_size > NFP_FL_MAX_A_SIZ)
146
+ if (act_len + act_size > NFP_FL_MAX_A_SIZ) {
147
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at LAG action");
101148 return -EOPNOTSUPP;
149
+ }
102150
103151 /* Pre_lag action must be first on action list.
104152 * If other actions already exist they need pushed forward.
....@@ -108,7 +156,7 @@
108156 nfp_flow->action_data, act_len);
109157
110158 pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
111
- err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag);
159
+ err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag, extack);
112160 if (err)
113161 return err;
114162
....@@ -120,26 +168,13 @@
120168 return act_size;
121169 }
122170
123
-static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
124
- enum nfp_flower_tun_type tun_type)
125
-{
126
- if (!out_dev->rtnl_link_ops)
127
- return false;
128
-
129
- if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
130
- return tun_type == NFP_FL_TUNNEL_VXLAN;
131
-
132
- if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
133
- return tun_type == NFP_FL_TUNNEL_GENEVE;
134
-
135
- return false;
136
-}
137
-
138171 static int
139172 nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
140
- const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
173
+ const struct flow_action_entry *act,
174
+ struct nfp_fl_payload *nfp_flow,
141175 bool last, struct net_device *in_dev,
142
- enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
176
+ enum nfp_flower_tun_type tun_type, int *tun_out_cnt,
177
+ bool pkt_host, struct netlink_ext_ack *extack)
143178 {
144179 size_t act_size = sizeof(struct nfp_fl_output);
145180 struct nfp_flower_priv *priv = app->priv;
....@@ -149,68 +184,120 @@
149184 output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
150185 output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
151186
152
- out_dev = tcf_mirred_dev(action);
153
- if (!out_dev)
187
+ out_dev = act->dev;
188
+ if (!out_dev) {
189
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid egress interface for mirred action");
154190 return -EOPNOTSUPP;
191
+ }
155192
156193 tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
157194
158195 if (tun_type) {
159196 /* Verify the egress netdev matches the tunnel type. */
160
- if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
197
+ if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type)) {
198
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface does not match the required tunnel type");
161199 return -EOPNOTSUPP;
200
+ }
162201
163
- if (*tun_out_cnt)
202
+ if (*tun_out_cnt) {
203
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot offload more than one tunnel mirred output per filter");
164204 return -EOPNOTSUPP;
205
+ }
165206 (*tun_out_cnt)++;
166207
167208 output->flags = cpu_to_be16(tmp_flags |
168209 NFP_FL_OUT_FLAGS_USE_TUN);
169210 output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
170211 } else if (netif_is_lag_master(out_dev) &&
171
- priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
212
+ priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
172213 int gid;
173214
174215 output->flags = cpu_to_be16(tmp_flags);
175216 gid = nfp_flower_lag_get_output_id(app, out_dev);
176
- if (gid < 0)
217
+ if (gid < 0) {
218
+ NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot find group id for LAG action");
177219 return gid;
220
+ }
178221 output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
222
+ } else if (nfp_flower_internal_port_can_offload(app, out_dev)) {
223
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES)) {
224
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules not supported in loaded firmware");
225
+ return -EOPNOTSUPP;
226
+ }
227
+
228
+ if (nfp_flow->pre_tun_rule.dev || !pkt_host) {
229
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules require single egress dev and ptype HOST action");
230
+ return -EOPNOTSUPP;
231
+ }
232
+
233
+ nfp_flow->pre_tun_rule.dev = out_dev;
234
+
235
+ return 0;
179236 } else {
180237 /* Set action output parameters. */
181238 output->flags = cpu_to_be16(tmp_flags);
182239
183
- /* Only offload if egress ports are on the same device as the
184
- * ingress port.
185
- */
186
- if (!switchdev_port_same_parent_id(in_dev, out_dev))
240
+ if (nfp_netdev_is_nfp_repr(in_dev)) {
241
+ /* Confirm ingress and egress are on same device. */
242
+ if (!netdev_port_same_parent_id(in_dev, out_dev)) {
243
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress and egress interfaces are on different devices");
244
+ return -EOPNOTSUPP;
245
+ }
246
+ }
247
+
248
+ if (!nfp_netdev_is_nfp_repr(out_dev)) {
249
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface is not an nfp port");
187250 return -EOPNOTSUPP;
188
- if (!nfp_netdev_is_nfp_repr(out_dev))
189
- return -EOPNOTSUPP;
251
+ }
190252
191253 output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
192
- if (!output->port)
254
+ if (!output->port) {
255
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid port id for egress interface");
193256 return -EOPNOTSUPP;
257
+ }
194258 }
195259 nfp_flow->meta.shortcut = output->port;
196260
197261 return 0;
198262 }
199263
200
-static enum nfp_flower_tun_type
201
-nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
202
- const struct tc_action *action)
264
+static bool
265
+nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx)
203266 {
204
- struct ip_tunnel_info *tun = tcf_tunnel_info(action);
267
+ struct flow_action_entry *act = flow->rule->action.entries;
268
+ int num_act = flow->rule->action.num_entries;
269
+ int act_idx;
270
+
271
+ /* Preparse action list for next mirred or redirect action */
272
+ for (act_idx = start_idx + 1; act_idx < num_act; act_idx++)
273
+ if (act[act_idx].id == FLOW_ACTION_REDIRECT ||
274
+ act[act_idx].id == FLOW_ACTION_MIRRED)
275
+ return netif_is_gretap(act[act_idx].dev);
276
+
277
+ return false;
278
+}
279
+
280
+static enum nfp_flower_tun_type
281
+nfp_fl_get_tun_from_act(struct nfp_app *app,
282
+ struct flow_cls_offload *flow,
283
+ const struct flow_action_entry *act, int act_idx)
284
+{
285
+ const struct ip_tunnel_info *tun = act->tunnel;
205286 struct nfp_flower_priv *priv = app->priv;
206287
288
+ /* Determine the tunnel type based on the egress netdev
289
+ * in the mirred action for tunnels without l4.
290
+ */
291
+ if (nfp_flower_tun_is_gre(flow, act_idx))
292
+ return NFP_FL_TUNNEL_GRE;
293
+
207294 switch (tun->key.tp_dst) {
208
- case htons(NFP_FL_VXLAN_PORT):
295
+ case htons(IANA_VXLAN_UDP_PORT):
209296 return NFP_FL_TUNNEL_VXLAN;
210
- case htons(NFP_FL_GENEVE_PORT):
297
+ case htons(GENEVE_UDP_PORT):
211298 if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
212299 return NFP_FL_TUNNEL_GENEVE;
213
- /* FALLTHROUGH */
300
+ fallthrough;
214301 default:
215302 return NFP_FL_TUNNEL_NONE;
216303 }
....@@ -239,9 +326,10 @@
239326
240327 static int
241328 nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
242
- const struct tc_action *action)
329
+ const struct flow_action_entry *act,
330
+ struct netlink_ext_ack *extack)
243331 {
244
- struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
332
+ struct ip_tunnel_info *ip_tun = (struct ip_tunnel_info *)act->tunnel;
245333 int opt_len, opt_cnt, act_start, tot_push_len;
246334 u8 *src = ip_tunnel_info_opts(ip_tun);
247335
....@@ -257,20 +345,26 @@
257345 struct geneve_opt *opt = (struct geneve_opt *)src;
258346
259347 opt_cnt++;
260
- if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT)
348
+ if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT) {
349
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed number of geneve options exceeded");
261350 return -EOPNOTSUPP;
351
+ }
262352
263353 tot_push_len += sizeof(struct nfp_fl_push_geneve) +
264354 opt->length * 4;
265
- if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT)
355
+ if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT) {
356
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
266357 return -EOPNOTSUPP;
358
+ }
267359
268360 opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
269361 src += sizeof(struct geneve_opt) + opt->length * 4;
270362 }
271363
272
- if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ)
364
+ if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ) {
365
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options");
273366 return -EOPNOTSUPP;
367
+ }
274368
275369 act_start = *list_len;
276370 *list_len += tot_push_len;
....@@ -301,41 +395,65 @@
301395 }
302396
303397 static int
304
-nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
305
- struct nfp_fl_set_ipv4_udp_tun *set_tun,
306
- const struct tc_action *action,
307
- struct nfp_fl_pre_tunnel *pre_tun,
308
- enum nfp_flower_tun_type tun_type,
309
- struct net_device *netdev)
398
+nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
399
+ const struct flow_action_entry *act,
400
+ struct nfp_fl_pre_tunnel *pre_tun,
401
+ enum nfp_flower_tun_type tun_type,
402
+ struct net_device *netdev, struct netlink_ext_ack *extack)
310403 {
311
- size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
312
- struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
404
+ const struct ip_tunnel_info *ip_tun = act->tunnel;
405
+ bool ipv6 = ip_tunnel_info_af(ip_tun) == AF_INET6;
406
+ size_t act_size = sizeof(struct nfp_fl_set_tun);
313407 struct nfp_flower_priv *priv = app->priv;
314408 u32 tmp_set_ip_tun_type_index = 0;
315409 /* Currently support one pre-tunnel so index is always 0. */
316410 int pretun_idx = 0;
411
+
412
+ if (!IS_ENABLED(CONFIG_IPV6) && ipv6)
413
+ return -EOPNOTSUPP;
414
+
415
+ if (ipv6 && !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN))
416
+ return -EOPNOTSUPP;
317417
318418 BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
319419 NFP_FL_TUNNEL_KEY != TUNNEL_KEY ||
320420 NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
321421 if (ip_tun->options_len &&
322422 (tun_type != NFP_FL_TUNNEL_GENEVE ||
323
- !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)))
423
+ !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) {
424
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve options offload");
324425 return -EOPNOTSUPP;
426
+ }
325427
326
- set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
428
+ set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_TUNNEL;
327429 set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
328430
329431 /* Set tunnel type and pre-tunnel index. */
330432 tmp_set_ip_tun_type_index |=
331
- FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
332
- FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
433
+ FIELD_PREP(NFP_FL_TUNNEL_TYPE, tun_type) |
434
+ FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx);
333435
334436 set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
335437 set_tun->tun_id = ip_tun->key.tun_id;
336438
337439 if (ip_tun->key.ttl) {
338440 set_tun->ttl = ip_tun->key.ttl;
441
+#ifdef CONFIG_IPV6
442
+ } else if (ipv6) {
443
+ struct net *net = dev_net(netdev);
444
+ struct flowi6 flow = {};
445
+ struct dst_entry *dst;
446
+
447
+ flow.daddr = ip_tun->key.u.ipv6.dst;
448
+ flow.flowi4_proto = IPPROTO_UDP;
449
+ dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &flow, NULL);
450
+ if (!IS_ERR(dst)) {
451
+ set_tun->ttl = ip6_dst_hoplimit(dst);
452
+ dst_release(dst);
453
+ } else {
454
+ set_tun->ttl = net->ipv6.devconf_all->hop_limit;
455
+ }
456
+#endif
339457 } else {
340458 struct net *net = dev_net(netdev);
341459 struct flowi4 flow = {};
....@@ -361,8 +479,10 @@
361479 set_tun->tos = ip_tun->key.tos;
362480
363481 if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
364
- ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
482
+ ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) {
483
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload");
365484 return -EOPNOTSUPP;
485
+ }
366486 set_tun->tun_flags = ip_tun->key.tun_flags;
367487
368488 if (tun_type == NFP_FL_TUNNEL_GENEVE) {
....@@ -371,7 +491,12 @@
371491 }
372492
373493 /* Complete pre_tunnel action. */
374
- pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
494
+ if (ipv6) {
495
+ pre_tun->flags |= cpu_to_be16(NFP_FL_PRE_TUN_IPV6);
496
+ pre_tun->ipv6_dst = ip_tun->key.u.ipv6.dst;
497
+ } else {
498
+ pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
499
+ }
375500
376501 return 0;
377502 }
....@@ -389,19 +514,23 @@
389514 }
390515
391516 static int
392
-nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
393
- struct nfp_fl_set_eth *set_eth)
517
+nfp_fl_set_eth(const struct flow_action_entry *act, u32 off,
518
+ struct nfp_fl_set_eth *set_eth, struct netlink_ext_ack *extack)
394519 {
395520 u32 exact, mask;
396521
397
- if (off + 4 > ETH_ALEN * 2)
522
+ if (off + 4 > ETH_ALEN * 2) {
523
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action");
398524 return -EOPNOTSUPP;
525
+ }
399526
400
- mask = ~tcf_pedit_mask(action, idx);
401
- exact = tcf_pedit_val(action, idx);
527
+ mask = ~act->mangle.mask;
528
+ exact = act->mangle.val;
402529
403
- if (exact & ~mask)
530
+ if (exact & ~mask) {
531
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action");
404532 return -EOPNOTSUPP;
533
+ }
405534
406535 nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
407536 &set_eth->eth_addr_mask[off]);
....@@ -413,37 +542,89 @@
413542 return 0;
414543 }
415544
545
+struct ipv4_ttl_word {
546
+ __u8 ttl;
547
+ __u8 protocol;
548
+ __sum16 check;
549
+};
550
+
416551 static int
417
-nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
418
- struct nfp_fl_set_ip4_addrs *set_ip_addr)
552
+nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off,
553
+ struct nfp_fl_set_ip4_addrs *set_ip_addr,
554
+ struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos,
555
+ struct netlink_ext_ack *extack)
419556 {
557
+ struct ipv4_ttl_word *ttl_word_mask;
558
+ struct ipv4_ttl_word *ttl_word;
559
+ struct iphdr *tos_word_mask;
560
+ struct iphdr *tos_word;
420561 __be32 exact, mask;
421562
422563 /* We are expecting tcf_pedit to return a big endian value */
423
- mask = (__force __be32)~tcf_pedit_mask(action, idx);
424
- exact = (__force __be32)tcf_pedit_val(action, idx);
564
+ mask = (__force __be32)~act->mangle.mask;
565
+ exact = (__force __be32)act->mangle.val;
425566
426
- if (exact & ~mask)
567
+ if (exact & ~mask) {
568
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 action");
427569 return -EOPNOTSUPP;
570
+ }
428571
429572 switch (off) {
430573 case offsetof(struct iphdr, daddr):
431574 set_ip_addr->ipv4_dst_mask |= mask;
432575 set_ip_addr->ipv4_dst &= ~mask;
433576 set_ip_addr->ipv4_dst |= exact & mask;
577
+ set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
578
+ set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
579
+ NFP_FL_LW_SIZ;
434580 break;
435581 case offsetof(struct iphdr, saddr):
436582 set_ip_addr->ipv4_src_mask |= mask;
437583 set_ip_addr->ipv4_src &= ~mask;
438584 set_ip_addr->ipv4_src |= exact & mask;
585
+ set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
586
+ set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
587
+ NFP_FL_LW_SIZ;
588
+ break;
589
+ case offsetof(struct iphdr, ttl):
590
+ ttl_word_mask = (struct ipv4_ttl_word *)&mask;
591
+ ttl_word = (struct ipv4_ttl_word *)&exact;
592
+
593
+ if (ttl_word_mask->protocol || ttl_word_mask->check) {
594
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 ttl action");
595
+ return -EOPNOTSUPP;
596
+ }
597
+
598
+ set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl;
599
+ set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl;
600
+ set_ip_ttl_tos->ipv4_ttl |= ttl_word->ttl & ttl_word_mask->ttl;
601
+ set_ip_ttl_tos->head.jump_id =
602
+ NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
603
+ set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
604
+ NFP_FL_LW_SIZ;
605
+ break;
606
+ case round_down(offsetof(struct iphdr, tos), 4):
607
+ tos_word_mask = (struct iphdr *)&mask;
608
+ tos_word = (struct iphdr *)&exact;
609
+
610
+ if (tos_word_mask->version || tos_word_mask->ihl ||
611
+ tos_word_mask->tot_len) {
612
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 tos action");
613
+ return -EOPNOTSUPP;
614
+ }
615
+
616
+ set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos;
617
+ set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos;
618
+ set_ip_ttl_tos->ipv4_tos |= tos_word->tos & tos_word_mask->tos;
619
+ set_ip_ttl_tos->head.jump_id =
620
+ NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
621
+ set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
622
+ NFP_FL_LW_SIZ;
439623 break;
440624 default:
625
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv4 header");
441626 return -EOPNOTSUPP;
442627 }
443
-
444
- set_ip_addr->reserved = cpu_to_be16(0);
445
- set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
446
- set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ;
447628
448629 return 0;
449630 }
....@@ -461,23 +642,77 @@
461642 ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
462643 }
463644
645
+struct ipv6_hop_limit_word {
646
+ __be16 payload_len;
647
+ u8 nexthdr;
648
+ u8 hop_limit;
649
+};
650
+
464651 static int
465
-nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
652
+nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
653
+ struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl,
654
+ struct netlink_ext_ack *extack)
655
+{
656
+ struct ipv6_hop_limit_word *fl_hl_mask;
657
+ struct ipv6_hop_limit_word *fl_hl;
658
+
659
+ switch (off) {
660
+ case offsetof(struct ipv6hdr, payload_len):
661
+ fl_hl_mask = (struct ipv6_hop_limit_word *)&mask;
662
+ fl_hl = (struct ipv6_hop_limit_word *)&exact;
663
+
664
+ if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len) {
665
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 hop limit action");
666
+ return -EOPNOTSUPP;
667
+ }
668
+
669
+ ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit;
670
+ ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit;
671
+ ip_hl_fl->ipv6_hop_limit |= fl_hl->hop_limit &
672
+ fl_hl_mask->hop_limit;
673
+ break;
674
+ case round_down(offsetof(struct ipv6hdr, flow_lbl), 4):
675
+ if (mask & ~IPV6_FLOW_LABEL_MASK ||
676
+ exact & ~IPV6_FLOW_LABEL_MASK) {
677
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 flow label action");
678
+ return -EOPNOTSUPP;
679
+ }
680
+
681
+ ip_hl_fl->ipv6_label_mask |= mask;
682
+ ip_hl_fl->ipv6_label &= ~mask;
683
+ ip_hl_fl->ipv6_label |= exact & mask;
684
+ break;
685
+ }
686
+
687
+ ip_hl_fl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL;
688
+ ip_hl_fl->head.len_lw = sizeof(*ip_hl_fl) >> NFP_FL_LW_SIZ;
689
+
690
+ return 0;
691
+}
692
+
693
+static int
694
+nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off,
466695 struct nfp_fl_set_ipv6_addr *ip_dst,
467
- struct nfp_fl_set_ipv6_addr *ip_src)
696
+ struct nfp_fl_set_ipv6_addr *ip_src,
697
+ struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl,
698
+ struct netlink_ext_ack *extack)
468699 {
469700 __be32 exact, mask;
701
+ int err = 0;
470702 u8 word;
471703
472704 /* We are expecting tcf_pedit to return a big endian value */
473
- mask = (__force __be32)~tcf_pedit_mask(action, idx);
474
- exact = (__force __be32)tcf_pedit_val(action, idx);
705
+ mask = (__force __be32)~act->mangle.mask;
706
+ exact = (__force __be32)act->mangle.val;
475707
476
- if (exact & ~mask)
708
+ if (exact & ~mask) {
709
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 action");
477710 return -EOPNOTSUPP;
711
+ }
478712
479713 if (off < offsetof(struct ipv6hdr, saddr)) {
480
- return -EOPNOTSUPP;
714
+ err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask,
715
+ ip_hl_fl, extack);
481716 } else if (off < offsetof(struct ipv6hdr, daddr)) {
482717 word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
483718 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
....@@ -488,26 +723,32 @@
488723 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
489724 exact, mask, ip_dst);
490725 } else {
726
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv6 header");
491727 return -EOPNOTSUPP;
492728 }
493729
494
- return 0;
730
+ return err;
495731 }
496732
497733 static int
498
-nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
499
- struct nfp_fl_set_tport *set_tport, int opcode)
734
+nfp_fl_set_tport(const struct flow_action_entry *act, u32 off,
735
+ struct nfp_fl_set_tport *set_tport, int opcode,
736
+ struct netlink_ext_ack *extack)
500737 {
501738 u32 exact, mask;
502739
503
- if (off)
740
+ if (off) {
741
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of L4 header");
504742 return -EOPNOTSUPP;
743
+ }
505744
506
- mask = ~tcf_pedit_mask(action, idx);
507
- exact = tcf_pedit_val(action, idx);
745
+ mask = ~act->mangle.mask;
746
+ exact = act->mangle.val;
508747
509
- if (exact & ~mask)
748
+ if (exact & ~mask) {
749
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit L4 action");
510750 return -EOPNOTSUPP;
751
+ }
511752
512753 nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
513754 set_tport->tp_port_mask);
....@@ -537,122 +778,107 @@
537778 }
538779 }
539780
540
-static int
541
-nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
542
- char *nfp_action, int *a_len, u32 *csum_updated)
543
-{
781
+struct nfp_flower_pedit_acts {
544782 struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
783
+ struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
784
+ struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
545785 struct nfp_fl_set_ip4_addrs set_ip_addr;
546786 struct nfp_fl_set_tport set_tport;
547787 struct nfp_fl_set_eth set_eth;
548
- enum pedit_header_type htype;
549
- int idx, nkeys, err;
788
+};
789
+
790
+static int
791
+nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action,
792
+ int *a_len, struct nfp_flower_pedit_acts *set_act,
793
+ u32 *csum_updated)
794
+{
795
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
550796 size_t act_size = 0;
551
- u32 offset, cmd;
552797 u8 ip_proto = 0;
553798
554
- memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
555
- memset(&set_ip6_src, 0, sizeof(set_ip6_src));
556
- memset(&set_ip_addr, 0, sizeof(set_ip_addr));
557
- memset(&set_tport, 0, sizeof(set_tport));
558
- memset(&set_eth, 0, sizeof(set_eth));
559
- nkeys = tcf_pedit_nkeys(action);
799
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
800
+ struct flow_match_basic match;
560801
561
- for (idx = 0; idx < nkeys; idx++) {
562
- cmd = tcf_pedit_cmd(action, idx);
563
- htype = tcf_pedit_htype(action, idx);
564
- offset = tcf_pedit_offset(action, idx);
565
-
566
- if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
567
- return -EOPNOTSUPP;
568
-
569
- switch (htype) {
570
- case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
571
- err = nfp_fl_set_eth(action, idx, offset, &set_eth);
572
- break;
573
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
574
- err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
575
- break;
576
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
577
- err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
578
- &set_ip6_src);
579
- break;
580
- case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
581
- err = nfp_fl_set_tport(action, idx, offset, &set_tport,
582
- NFP_FL_ACTION_OPCODE_SET_TCP);
583
- break;
584
- case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
585
- err = nfp_fl_set_tport(action, idx, offset, &set_tport,
586
- NFP_FL_ACTION_OPCODE_SET_UDP);
587
- break;
588
- default:
589
- return -EOPNOTSUPP;
590
- }
591
- if (err)
592
- return err;
802
+ flow_rule_match_basic(rule, &match);
803
+ ip_proto = match.key->ip_proto;
593804 }
594805
595
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
596
- struct flow_dissector_key_basic *basic;
597
-
598
- basic = skb_flow_dissector_target(flow->dissector,
599
- FLOW_DISSECTOR_KEY_BASIC,
600
- flow->key);
601
- ip_proto = basic->ip_proto;
602
- }
603
-
604
- if (set_eth.head.len_lw) {
605
- act_size = sizeof(set_eth);
606
- memcpy(nfp_action, &set_eth, act_size);
806
+ if (set_act->set_eth.head.len_lw) {
807
+ act_size = sizeof(set_act->set_eth);
808
+ memcpy(nfp_action, &set_act->set_eth, act_size);
607809 *a_len += act_size;
608810 }
609
- if (set_ip_addr.head.len_lw) {
811
+
812
+ if (set_act->set_ip_ttl_tos.head.len_lw) {
610813 nfp_action += act_size;
611
- act_size = sizeof(set_ip_addr);
612
- memcpy(nfp_action, &set_ip_addr, act_size);
814
+ act_size = sizeof(set_act->set_ip_ttl_tos);
815
+ memcpy(nfp_action, &set_act->set_ip_ttl_tos, act_size);
613816 *a_len += act_size;
614817
615818 /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
616819 *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
617820 nfp_fl_csum_l4_to_flag(ip_proto);
618821 }
619
- if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
620
- /* TC compiles set src and dst IPv6 address as a single action,
621
- * the hardware requires this to be 2 separate actions.
622
- */
822
+
823
+ if (set_act->set_ip_addr.head.len_lw) {
623824 nfp_action += act_size;
624
- act_size = sizeof(set_ip6_src);
625
- memcpy(nfp_action, &set_ip6_src, act_size);
825
+ act_size = sizeof(set_act->set_ip_addr);
826
+ memcpy(nfp_action, &set_act->set_ip_addr, act_size);
626827 *a_len += act_size;
627828
628
- act_size = sizeof(set_ip6_dst);
629
- memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
630
- act_size);
631
- *a_len += act_size;
829
+ /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
830
+ *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
831
+ nfp_fl_csum_l4_to_flag(ip_proto);
832
+ }
632833
633
- /* Hardware will automatically fix TCP/UDP checksum. */
634
- *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
635
- } else if (set_ip6_dst.head.len_lw) {
834
+ if (set_act->set_ip6_tc_hl_fl.head.len_lw) {
636835 nfp_action += act_size;
637
- act_size = sizeof(set_ip6_dst);
638
- memcpy(nfp_action, &set_ip6_dst, act_size);
639
- *a_len += act_size;
640
-
641
- /* Hardware will automatically fix TCP/UDP checksum. */
642
- *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
643
- } else if (set_ip6_src.head.len_lw) {
644
- nfp_action += act_size;
645
- act_size = sizeof(set_ip6_src);
646
- memcpy(nfp_action, &set_ip6_src, act_size);
836
+ act_size = sizeof(set_act->set_ip6_tc_hl_fl);
837
+ memcpy(nfp_action, &set_act->set_ip6_tc_hl_fl, act_size);
647838 *a_len += act_size;
648839
649840 /* Hardware will automatically fix TCP/UDP checksum. */
650841 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
651842 }
652
- if (set_tport.head.len_lw) {
843
+
844
+ if (set_act->set_ip6_dst.head.len_lw &&
845
+ set_act->set_ip6_src.head.len_lw) {
846
+ /* TC compiles set src and dst IPv6 address as a single action,
847
+ * the hardware requires this to be 2 separate actions.
848
+ */
653849 nfp_action += act_size;
654
- act_size = sizeof(set_tport);
655
- memcpy(nfp_action, &set_tport, act_size);
850
+ act_size = sizeof(set_act->set_ip6_src);
851
+ memcpy(nfp_action, &set_act->set_ip6_src, act_size);
852
+ *a_len += act_size;
853
+
854
+ act_size = sizeof(set_act->set_ip6_dst);
855
+ memcpy(&nfp_action[sizeof(set_act->set_ip6_src)],
856
+ &set_act->set_ip6_dst, act_size);
857
+ *a_len += act_size;
858
+
859
+ /* Hardware will automatically fix TCP/UDP checksum. */
860
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
861
+ } else if (set_act->set_ip6_dst.head.len_lw) {
862
+ nfp_action += act_size;
863
+ act_size = sizeof(set_act->set_ip6_dst);
864
+ memcpy(nfp_action, &set_act->set_ip6_dst, act_size);
865
+ *a_len += act_size;
866
+
867
+ /* Hardware will automatically fix TCP/UDP checksum. */
868
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
869
+ } else if (set_act->set_ip6_src.head.len_lw) {
870
+ nfp_action += act_size;
871
+ act_size = sizeof(set_act->set_ip6_src);
872
+ memcpy(nfp_action, &set_act->set_ip6_src, act_size);
873
+ *a_len += act_size;
874
+
875
+ /* Hardware will automatically fix TCP/UDP checksum. */
876
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
877
+ }
878
+ if (set_act->set_tport.head.len_lw) {
879
+ nfp_action += act_size;
880
+ act_size = sizeof(set_act->set_tport);
881
+ memcpy(nfp_action, &set_act->set_tport, act_size);
656882 *a_len += act_size;
657883
658884 /* Hardware will automatically fix TCP/UDP checksum. */
....@@ -663,11 +889,47 @@
663889 }
664890
665891 static int
666
-nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
892
+nfp_fl_pedit(const struct flow_action_entry *act,
893
+ struct flow_cls_offload *flow, char *nfp_action, int *a_len,
894
+ u32 *csum_updated, struct nfp_flower_pedit_acts *set_act,
895
+ struct netlink_ext_ack *extack)
896
+{
897
+ enum flow_action_mangle_base htype;
898
+ u32 offset;
899
+
900
+ htype = act->mangle.htype;
901
+ offset = act->mangle.offset;
902
+
903
+ switch (htype) {
904
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
905
+ return nfp_fl_set_eth(act, offset, &set_act->set_eth, extack);
906
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
907
+ return nfp_fl_set_ip4(act, offset, &set_act->set_ip_addr,
908
+ &set_act->set_ip_ttl_tos, extack);
909
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
910
+ return nfp_fl_set_ip6(act, offset, &set_act->set_ip6_dst,
911
+ &set_act->set_ip6_src,
912
+ &set_act->set_ip6_tc_hl_fl, extack);
913
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
914
+ return nfp_fl_set_tport(act, offset, &set_act->set_tport,
915
+ NFP_FL_ACTION_OPCODE_SET_TCP, extack);
916
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
917
+ return nfp_fl_set_tport(act, offset, &set_act->set_tport,
918
+ NFP_FL_ACTION_OPCODE_SET_UDP, extack);
919
+ default:
920
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported header");
921
+ return -EOPNOTSUPP;
922
+ }
923
+}
924
+
925
+static int
926
+nfp_flower_output_action(struct nfp_app *app,
927
+ const struct flow_action_entry *act,
667928 struct nfp_fl_payload *nfp_fl, int *a_len,
668929 struct net_device *netdev, bool last,
669930 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
670
- int *out_cnt, u32 *csum_updated)
931
+ int *out_cnt, u32 *csum_updated, bool pkt_host,
932
+ struct netlink_ext_ack *extack)
671933 {
672934 struct nfp_flower_priv *priv = app->priv;
673935 struct nfp_fl_output *output;
....@@ -676,29 +938,35 @@
676938 /* If csum_updated has not been reset by now, it means HW will
677939 * incorrectly update csums when they are not requested.
678940 */
679
- if (*csum_updated)
941
+ if (*csum_updated) {
942
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: set actions without updating checksums are not supported");
680943 return -EOPNOTSUPP;
944
+ }
681945
682
- if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
946
+ if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) {
947
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: mirred output increases action list size beyond the allowed maximum");
683948 return -EOPNOTSUPP;
949
+ }
684950
685951 output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
686
- err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type,
687
- tun_out_cnt);
952
+ err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type,
953
+ tun_out_cnt, pkt_host, extack);
688954 if (err)
689955 return err;
690956
691957 *a_len += sizeof(struct nfp_fl_output);
692958
693
- if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
959
+ if (priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
694960 /* nfp_fl_pre_lag returns -err or size of prelag action added.
695961 * This will be 0 if it is not egressing to a lag dev.
696962 */
697
- prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len);
698
- if (prelag_size < 0)
963
+ prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len, extack);
964
+ if (prelag_size < 0) {
699965 return prelag_size;
700
- else if (prelag_size > 0 && (!last || *out_cnt))
966
+ } else if (prelag_size > 0 && (!last || *out_cnt)) {
967
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: LAG action has to be last action in action list");
701968 return -EOPNOTSUPP;
969
+ }
702970
703971 *a_len += prelag_size;
704972 }
....@@ -708,118 +976,240 @@
708976 }
709977
710978 static int
711
-nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
712
- struct tc_cls_flower_offload *flow,
979
+nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
980
+ struct flow_cls_offload *flow,
713981 struct nfp_fl_payload *nfp_fl, int *a_len,
714982 struct net_device *netdev,
715983 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
716
- int *out_cnt, u32 *csum_updated)
984
+ int *out_cnt, u32 *csum_updated,
985
+ struct nfp_flower_pedit_acts *set_act, bool *pkt_host,
986
+ struct netlink_ext_ack *extack, int act_idx)
717987 {
718
- struct nfp_fl_set_ipv4_udp_tun *set_tun;
719988 struct nfp_fl_pre_tunnel *pre_tun;
989
+ struct nfp_fl_set_tun *set_tun;
720990 struct nfp_fl_push_vlan *psh_v;
991
+ struct nfp_fl_push_mpls *psh_m;
721992 struct nfp_fl_pop_vlan *pop_v;
993
+ struct nfp_fl_pop_mpls *pop_m;
994
+ struct nfp_fl_set_mpls *set_m;
722995 int err;
723996
724
- if (is_tcf_gact_shot(a)) {
997
+ switch (act->id) {
998
+ case FLOW_ACTION_DROP:
725999 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
726
- } else if (is_tcf_mirred_egress_redirect(a)) {
727
- err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
1000
+ break;
1001
+ case FLOW_ACTION_REDIRECT_INGRESS:
1002
+ case FLOW_ACTION_REDIRECT:
1003
+ err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
7281004 true, tun_type, tun_out_cnt,
729
- out_cnt, csum_updated);
1005
+ out_cnt, csum_updated, *pkt_host,
1006
+ extack);
7301007 if (err)
7311008 return err;
732
-
733
- } else if (is_tcf_mirred_egress_mirror(a)) {
734
- err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
1009
+ break;
1010
+ case FLOW_ACTION_MIRRED_INGRESS:
1011
+ case FLOW_ACTION_MIRRED:
1012
+ err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev,
7351013 false, tun_type, tun_out_cnt,
736
- out_cnt, csum_updated);
1014
+ out_cnt, csum_updated, *pkt_host,
1015
+ extack);
7371016 if (err)
7381017 return err;
739
-
740
- } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
741
- if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
1018
+ break;
1019
+ case FLOW_ACTION_VLAN_POP:
1020
+ if (*a_len +
1021
+ sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ) {
1022
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop vlan");
7421023 return -EOPNOTSUPP;
1024
+ }
7431025
7441026 pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
7451027 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
7461028
7471029 nfp_fl_pop_vlan(pop_v);
7481030 *a_len += sizeof(struct nfp_fl_pop_vlan);
749
- } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
750
- if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
1031
+ break;
1032
+ case FLOW_ACTION_VLAN_PUSH:
1033
+ if (*a_len +
1034
+ sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ) {
1035
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push vlan");
7511036 return -EOPNOTSUPP;
1037
+ }
7521038
7531039 psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
7541040 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
7551041
756
- nfp_fl_push_vlan(psh_v, a);
1042
+ nfp_fl_push_vlan(psh_v, act);
7571043 *a_len += sizeof(struct nfp_fl_push_vlan);
758
- } else if (is_tcf_tunnel_set(a)) {
759
- struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
760
- struct nfp_repr *repr = netdev_priv(netdev);
1044
+ break;
1045
+ case FLOW_ACTION_TUNNEL_ENCAP: {
1046
+ const struct ip_tunnel_info *ip_tun = act->tunnel;
7611047
762
- *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
763
- if (*tun_type == NFP_FL_TUNNEL_NONE)
1048
+ *tun_type = nfp_fl_get_tun_from_act(app, flow, act, act_idx);
1049
+ if (*tun_type == NFP_FL_TUNNEL_NONE) {
1050
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel type in action list");
7641051 return -EOPNOTSUPP;
1052
+ }
7651053
766
- if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
1054
+ if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS) {
1055
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel flags in action list");
7671056 return -EOPNOTSUPP;
1057
+ }
7681058
7691059 /* Pre-tunnel action is required for tunnel encap.
7701060 * This checks for next hop entries on NFP.
7711061 * If none, the packet falls back before applying other actions.
7721062 */
7731063 if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
774
- sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
1064
+ sizeof(struct nfp_fl_set_tun) > NFP_FL_MAX_A_SIZ) {
1065
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap");
7751066 return -EOPNOTSUPP;
1067
+ }
7761068
7771069 pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
7781070 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
7791071 *a_len += sizeof(struct nfp_fl_pre_tunnel);
7801072
781
- err = nfp_fl_push_geneve_options(nfp_fl, a_len, a);
1073
+ err = nfp_fl_push_geneve_options(nfp_fl, a_len, act, extack);
7821074 if (err)
7831075 return err;
7841076
7851077 set_tun = (void *)&nfp_fl->action_data[*a_len];
786
- err = nfp_fl_set_ipv4_udp_tun(app, set_tun, a, pre_tun,
787
- *tun_type, netdev);
1078
+ err = nfp_fl_set_tun(app, set_tun, act, pre_tun, *tun_type,
1079
+ netdev, extack);
7881080 if (err)
7891081 return err;
790
- *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
791
- } else if (is_tcf_tunnel_release(a)) {
1082
+ *a_len += sizeof(struct nfp_fl_set_tun);
1083
+ }
1084
+ break;
1085
+ case FLOW_ACTION_TUNNEL_DECAP:
7921086 /* Tunnel decap is handled by default so accept action. */
7931087 return 0;
794
- } else if (is_tcf_pedit(a)) {
795
- if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len],
796
- a_len, csum_updated))
1088
+ case FLOW_ACTION_MANGLE:
1089
+ if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
1090
+ a_len, csum_updated, set_act, extack))
7971091 return -EOPNOTSUPP;
798
- } else if (is_tcf_csum(a)) {
1092
+ break;
1093
+ case FLOW_ACTION_CSUM:
7991094 /* csum action requests recalc of something we have not fixed */
800
- if (tcf_csum_update_flags(a) & ~*csum_updated)
1095
+ if (act->csum_flags & ~*csum_updated) {
1096
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported csum update action in action list");
8011097 return -EOPNOTSUPP;
1098
+ }
8021099 /* If we will correctly fix the csum we can remove it from the
8031100 * csum update list. Which will later be used to check support.
8041101 */
805
- *csum_updated &= ~tcf_csum_update_flags(a);
806
- } else {
1102
+ *csum_updated &= ~act->csum_flags;
1103
+ break;
1104
+ case FLOW_ACTION_MPLS_PUSH:
1105
+ if (*a_len +
1106
+ sizeof(struct nfp_fl_push_mpls) > NFP_FL_MAX_A_SIZ) {
1107
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push MPLS");
1108
+ return -EOPNOTSUPP;
1109
+ }
1110
+
1111
+ psh_m = (struct nfp_fl_push_mpls *)&nfp_fl->action_data[*a_len];
1112
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1113
+
1114
+ err = nfp_fl_push_mpls(psh_m, act, extack);
1115
+ if (err)
1116
+ return err;
1117
+ *a_len += sizeof(struct nfp_fl_push_mpls);
1118
+ break;
1119
+ case FLOW_ACTION_MPLS_POP:
1120
+ if (*a_len +
1121
+ sizeof(struct nfp_fl_pop_mpls) > NFP_FL_MAX_A_SIZ) {
1122
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop MPLS");
1123
+ return -EOPNOTSUPP;
1124
+ }
1125
+
1126
+ pop_m = (struct nfp_fl_pop_mpls *)&nfp_fl->action_data[*a_len];
1127
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1128
+
1129
+ nfp_fl_pop_mpls(pop_m, act);
1130
+ *a_len += sizeof(struct nfp_fl_pop_mpls);
1131
+ break;
1132
+ case FLOW_ACTION_MPLS_MANGLE:
1133
+ if (*a_len +
1134
+ sizeof(struct nfp_fl_set_mpls) > NFP_FL_MAX_A_SIZ) {
1135
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at set MPLS");
1136
+ return -EOPNOTSUPP;
1137
+ }
1138
+
1139
+ set_m = (struct nfp_fl_set_mpls *)&nfp_fl->action_data[*a_len];
1140
+ nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
1141
+
1142
+ nfp_fl_set_mpls(set_m, act);
1143
+ *a_len += sizeof(struct nfp_fl_set_mpls);
1144
+ break;
1145
+ case FLOW_ACTION_PTYPE:
1146
+ /* TC ptype skbedit sets PACKET_HOST for ingress redirect. */
1147
+ if (act->ptype != PACKET_HOST)
1148
+ return -EOPNOTSUPP;
1149
+
1150
+ *pkt_host = true;
1151
+ break;
1152
+ default:
8071153 /* Currently we do not handle any other actions. */
1154
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list");
8081155 return -EOPNOTSUPP;
8091156 }
8101157
8111158 return 0;
8121159 }
8131160
1161
+static bool nfp_fl_check_mangle_start(struct flow_action *flow_act,
1162
+ int current_act_idx)
1163
+{
1164
+ struct flow_action_entry current_act;
1165
+ struct flow_action_entry prev_act;
1166
+
1167
+ current_act = flow_act->entries[current_act_idx];
1168
+ if (current_act.id != FLOW_ACTION_MANGLE)
1169
+ return false;
1170
+
1171
+ if (current_act_idx == 0)
1172
+ return true;
1173
+
1174
+ prev_act = flow_act->entries[current_act_idx - 1];
1175
+
1176
+ return prev_act.id != FLOW_ACTION_MANGLE;
1177
+}
1178
+
1179
+static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
1180
+ int current_act_idx)
1181
+{
1182
+ struct flow_action_entry current_act;
1183
+ struct flow_action_entry next_act;
1184
+
1185
+ current_act = flow_act->entries[current_act_idx];
1186
+ if (current_act.id != FLOW_ACTION_MANGLE)
1187
+ return false;
1188
+
1189
+ if (current_act_idx == flow_act->num_entries)
1190
+ return true;
1191
+
1192
+ next_act = flow_act->entries[current_act_idx + 1];
1193
+
1194
+ return next_act.id != FLOW_ACTION_MANGLE;
1195
+}
1196
+
8141197 int nfp_flower_compile_action(struct nfp_app *app,
815
- struct tc_cls_flower_offload *flow,
1198
+ struct flow_cls_offload *flow,
8161199 struct net_device *netdev,
817
- struct nfp_fl_payload *nfp_flow)
1200
+ struct nfp_fl_payload *nfp_flow,
1201
+ struct netlink_ext_ack *extack)
8181202 {
8191203 int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
1204
+ struct nfp_flower_pedit_acts set_act;
8201205 enum nfp_flower_tun_type tun_type;
821
- const struct tc_action *a;
1206
+ struct flow_action_entry *act;
1207
+ bool pkt_host = false;
8221208 u32 csum_updated = 0;
1209
+
1210
+ if (!flow_action_hw_stats_check(&flow->rule->action, extack,
1211
+ FLOW_ACTION_HW_STATS_DELAYED_BIT))
1212
+ return -EOPNOTSUPP;
8231213
8241214 memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
8251215 nfp_flow->meta.act_len = 0;
....@@ -829,13 +1219,20 @@
8291219 tun_out_cnt = 0;
8301220 out_cnt = 0;
8311221
832
- tcf_exts_for_each_action(i, a, flow->exts) {
833
- err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
1222
+ flow_action_for_each(i, act, &flow->rule->action) {
1223
+ if (nfp_fl_check_mangle_start(&flow->rule->action, i))
1224
+ memset(&set_act, 0, sizeof(set_act));
1225
+ err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
8341226 netdev, &tun_type, &tun_out_cnt,
835
- &out_cnt, &csum_updated);
1227
+ &out_cnt, &csum_updated,
1228
+ &set_act, &pkt_host, extack, i);
8361229 if (err)
8371230 return err;
8381231 act_cnt++;
1232
+ if (nfp_fl_check_mangle_end(&flow->rule->action, i))
1233
+ nfp_fl_commit_mangle(flow,
1234
+ &nfp_flow->action_data[act_len],
1235
+ &act_len, &set_act, &csum_updated);
8391236 }
8401237
8411238 /* We optimise when the action list is small, this can unfortunately