.. | .. |
---|
1 | | -/* |
---|
2 | | - * Copyright (C) 2017 Netronome Systems, Inc. |
---|
3 | | - * |
---|
4 | | - * This software is dual licensed under the GNU General License Version 2, |
---|
5 | | - * June 1991 as shown in the file COPYING in the top-level directory of this |
---|
6 | | - * source tree or the BSD 2-Clause License provided below. You have the |
---|
7 | | - * option to license this software under the complete terms of either license. |
---|
8 | | - * |
---|
9 | | - * The BSD 2-Clause License: |
---|
10 | | - * |
---|
11 | | - * Redistribution and use in source and binary forms, with or |
---|
12 | | - * without modification, are permitted provided that the following |
---|
13 | | - * conditions are met: |
---|
14 | | - * |
---|
15 | | - * 1. Redistributions of source code must retain the above |
---|
16 | | - * copyright notice, this list of conditions and the following |
---|
17 | | - * disclaimer. |
---|
18 | | - * |
---|
19 | | - * 2. Redistributions in binary form must reproduce the above |
---|
20 | | - * copyright notice, this list of conditions and the following |
---|
21 | | - * disclaimer in the documentation and/or other materials |
---|
22 | | - * provided with the distribution. |
---|
23 | | - * |
---|
24 | | - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
---|
25 | | - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
---|
26 | | - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
---|
27 | | - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
---|
28 | | - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
---|
29 | | - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
---|
30 | | - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
---|
31 | | - * SOFTWARE. |
---|
32 | | - */ |
---|
| 1 | +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) |
---|
| 2 | +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ |
---|
33 | 3 | |
---|
34 | 4 | #include <linux/bitfield.h> |
---|
35 | | -#include <net/geneve.h> |
---|
| 5 | +#include <linux/mpls.h> |
---|
36 | 6 | #include <net/pkt_cls.h> |
---|
37 | | -#include <net/switchdev.h> |
---|
38 | 7 | #include <net/tc_act/tc_csum.h> |
---|
39 | 8 | #include <net/tc_act/tc_gact.h> |
---|
40 | 9 | #include <net/tc_act/tc_mirred.h> |
---|
| 10 | +#include <net/tc_act/tc_mpls.h> |
---|
41 | 11 | #include <net/tc_act/tc_pedit.h> |
---|
42 | 12 | #include <net/tc_act/tc_vlan.h> |
---|
43 | 13 | #include <net/tc_act/tc_tunnel_key.h> |
---|
.. | .. |
---|
52 | 22 | #define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01) |
---|
53 | 23 | #define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04) |
---|
54 | 24 | #define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800) |
---|
55 | | -#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX |
---|
56 | | -#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \ |
---|
| 25 | +#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS (IP_TUNNEL_INFO_TX | \ |
---|
| 26 | + IP_TUNNEL_INFO_IPV6) |
---|
| 27 | +#define NFP_FL_SUPPORTED_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \ |
---|
57 | 28 | NFP_FL_TUNNEL_KEY | \ |
---|
58 | 29 | NFP_FL_TUNNEL_GENEVE_OPT) |
---|
| 30 | + |
---|
| 31 | +static int |
---|
| 32 | +nfp_fl_push_mpls(struct nfp_fl_push_mpls *push_mpls, |
---|
| 33 | + const struct flow_action_entry *act, |
---|
| 34 | + struct netlink_ext_ack *extack) |
---|
| 35 | +{ |
---|
| 36 | + size_t act_size = sizeof(struct nfp_fl_push_mpls); |
---|
| 37 | + u32 mpls_lse = 0; |
---|
| 38 | + |
---|
| 39 | + push_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_MPLS; |
---|
| 40 | + push_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ; |
---|
| 41 | + |
---|
| 42 | + /* BOS is optional in the TC action but required for offload. */ |
---|
| 43 | + if (act->mpls_push.bos != ACT_MPLS_BOS_NOT_SET) { |
---|
| 44 | + mpls_lse |= act->mpls_push.bos << MPLS_LS_S_SHIFT; |
---|
| 45 | + } else { |
---|
| 46 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: BOS field must explicitly be set for MPLS push"); |
---|
| 47 | + return -EOPNOTSUPP; |
---|
| 48 | + } |
---|
| 49 | + |
---|
| 50 | + /* Leave MPLS TC as a default value of 0 if not explicitly set. */ |
---|
| 51 | + if (act->mpls_push.tc != ACT_MPLS_TC_NOT_SET) |
---|
| 52 | + mpls_lse |= act->mpls_push.tc << MPLS_LS_TC_SHIFT; |
---|
| 53 | + |
---|
| 54 | + /* Proto, label and TTL are enforced and verified for MPLS push. */ |
---|
| 55 | + mpls_lse |= act->mpls_push.label << MPLS_LS_LABEL_SHIFT; |
---|
| 56 | + mpls_lse |= act->mpls_push.ttl << MPLS_LS_TTL_SHIFT; |
---|
| 57 | + push_mpls->ethtype = act->mpls_push.proto; |
---|
| 58 | + push_mpls->lse = cpu_to_be32(mpls_lse); |
---|
| 59 | + |
---|
| 60 | + return 0; |
---|
| 61 | +} |
---|
| 62 | + |
---|
| 63 | +static void |
---|
| 64 | +nfp_fl_pop_mpls(struct nfp_fl_pop_mpls *pop_mpls, |
---|
| 65 | + const struct flow_action_entry *act) |
---|
| 66 | +{ |
---|
| 67 | + size_t act_size = sizeof(struct nfp_fl_pop_mpls); |
---|
| 68 | + |
---|
| 69 | + pop_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_POP_MPLS; |
---|
| 70 | + pop_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ; |
---|
| 71 | + pop_mpls->ethtype = act->mpls_pop.proto; |
---|
| 72 | +} |
---|
| 73 | + |
---|
| 74 | +static void |
---|
| 75 | +nfp_fl_set_mpls(struct nfp_fl_set_mpls *set_mpls, |
---|
| 76 | + const struct flow_action_entry *act) |
---|
| 77 | +{ |
---|
| 78 | + size_t act_size = sizeof(struct nfp_fl_set_mpls); |
---|
| 79 | + u32 mpls_lse = 0, mpls_mask = 0; |
---|
| 80 | + |
---|
| 81 | + set_mpls->head.jump_id = NFP_FL_ACTION_OPCODE_SET_MPLS; |
---|
| 82 | + set_mpls->head.len_lw = act_size >> NFP_FL_LW_SIZ; |
---|
| 83 | + |
---|
| 84 | + if (act->mpls_mangle.label != ACT_MPLS_LABEL_NOT_SET) { |
---|
| 85 | + mpls_lse |= act->mpls_mangle.label << MPLS_LS_LABEL_SHIFT; |
---|
| 86 | + mpls_mask |= MPLS_LS_LABEL_MASK; |
---|
| 87 | + } |
---|
| 88 | + if (act->mpls_mangle.tc != ACT_MPLS_TC_NOT_SET) { |
---|
| 89 | + mpls_lse |= act->mpls_mangle.tc << MPLS_LS_TC_SHIFT; |
---|
| 90 | + mpls_mask |= MPLS_LS_TC_MASK; |
---|
| 91 | + } |
---|
| 92 | + if (act->mpls_mangle.bos != ACT_MPLS_BOS_NOT_SET) { |
---|
| 93 | + mpls_lse |= act->mpls_mangle.bos << MPLS_LS_S_SHIFT; |
---|
| 94 | + mpls_mask |= MPLS_LS_S_MASK; |
---|
| 95 | + } |
---|
| 96 | + if (act->mpls_mangle.ttl) { |
---|
| 97 | + mpls_lse |= act->mpls_mangle.ttl << MPLS_LS_TTL_SHIFT; |
---|
| 98 | + mpls_mask |= MPLS_LS_TTL_MASK; |
---|
| 99 | + } |
---|
| 100 | + |
---|
| 101 | + set_mpls->lse = cpu_to_be32(mpls_lse); |
---|
| 102 | + set_mpls->lse_mask = cpu_to_be32(mpls_mask); |
---|
| 103 | +} |
---|
59 | 104 | |
---|
60 | 105 | static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan) |
---|
61 | 106 | { |
---|
.. | .. |
---|
68 | 113 | |
---|
69 | 114 | static void |
---|
70 | 115 | nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan, |
---|
71 | | - const struct tc_action *action) |
---|
| 116 | + const struct flow_action_entry *act) |
---|
72 | 117 | { |
---|
73 | 118 | size_t act_size = sizeof(struct nfp_fl_push_vlan); |
---|
74 | 119 | u16 tmp_push_vlan_tci; |
---|
.. | .. |
---|
76 | 121 | push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN; |
---|
77 | 122 | push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ; |
---|
78 | 123 | push_vlan->reserved = 0; |
---|
79 | | - push_vlan->vlan_tpid = tcf_vlan_push_proto(action); |
---|
| 124 | + push_vlan->vlan_tpid = act->vlan.proto; |
---|
80 | 125 | |
---|
81 | 126 | tmp_push_vlan_tci = |
---|
82 | | - FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) | |
---|
83 | | - FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)); |
---|
| 127 | + FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) | |
---|
| 128 | + FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid); |
---|
84 | 129 | push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); |
---|
85 | 130 | } |
---|
86 | 131 | |
---|
87 | 132 | static int |
---|
88 | | -nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action, |
---|
89 | | - struct nfp_fl_payload *nfp_flow, int act_len) |
---|
| 133 | +nfp_fl_pre_lag(struct nfp_app *app, const struct flow_action_entry *act, |
---|
| 134 | + struct nfp_fl_payload *nfp_flow, int act_len, |
---|
| 135 | + struct netlink_ext_ack *extack) |
---|
90 | 136 | { |
---|
91 | 137 | size_t act_size = sizeof(struct nfp_fl_pre_lag); |
---|
92 | 138 | struct nfp_fl_pre_lag *pre_lag; |
---|
93 | 139 | struct net_device *out_dev; |
---|
94 | 140 | int err; |
---|
95 | 141 | |
---|
96 | | - out_dev = tcf_mirred_dev(action); |
---|
| 142 | + out_dev = act->dev; |
---|
97 | 143 | if (!out_dev || !netif_is_lag_master(out_dev)) |
---|
98 | 144 | return 0; |
---|
99 | 145 | |
---|
100 | | - if (act_len + act_size > NFP_FL_MAX_A_SIZ) |
---|
| 146 | + if (act_len + act_size > NFP_FL_MAX_A_SIZ) { |
---|
| 147 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at LAG action"); |
---|
101 | 148 | return -EOPNOTSUPP; |
---|
| 149 | + } |
---|
102 | 150 | |
---|
103 | 151 | /* Pre_lag action must be first on action list. |
---|
104 | 152 | * If other actions already exist they need pushed forward. |
---|
.. | .. |
---|
108 | 156 | nfp_flow->action_data, act_len); |
---|
109 | 157 | |
---|
110 | 158 | pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data; |
---|
111 | | - err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag); |
---|
| 159 | + err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag, extack); |
---|
112 | 160 | if (err) |
---|
113 | 161 | return err; |
---|
114 | 162 | |
---|
.. | .. |
---|
120 | 168 | return act_size; |
---|
121 | 169 | } |
---|
122 | 170 | |
---|
123 | | -static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev, |
---|
124 | | - enum nfp_flower_tun_type tun_type) |
---|
125 | | -{ |
---|
126 | | - if (!out_dev->rtnl_link_ops) |
---|
127 | | - return false; |
---|
128 | | - |
---|
129 | | - if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan")) |
---|
130 | | - return tun_type == NFP_FL_TUNNEL_VXLAN; |
---|
131 | | - |
---|
132 | | - if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve")) |
---|
133 | | - return tun_type == NFP_FL_TUNNEL_GENEVE; |
---|
134 | | - |
---|
135 | | - return false; |
---|
136 | | -} |
---|
137 | | - |
---|
138 | 171 | static int |
---|
139 | 172 | nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, |
---|
140 | | - const struct tc_action *action, struct nfp_fl_payload *nfp_flow, |
---|
| 173 | + const struct flow_action_entry *act, |
---|
| 174 | + struct nfp_fl_payload *nfp_flow, |
---|
141 | 175 | bool last, struct net_device *in_dev, |
---|
142 | | - enum nfp_flower_tun_type tun_type, int *tun_out_cnt) |
---|
| 176 | + enum nfp_flower_tun_type tun_type, int *tun_out_cnt, |
---|
| 177 | + bool pkt_host, struct netlink_ext_ack *extack) |
---|
143 | 178 | { |
---|
144 | 179 | size_t act_size = sizeof(struct nfp_fl_output); |
---|
145 | 180 | struct nfp_flower_priv *priv = app->priv; |
---|
.. | .. |
---|
149 | 184 | output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT; |
---|
150 | 185 | output->head.len_lw = act_size >> NFP_FL_LW_SIZ; |
---|
151 | 186 | |
---|
152 | | - out_dev = tcf_mirred_dev(action); |
---|
153 | | - if (!out_dev) |
---|
| 187 | + out_dev = act->dev; |
---|
| 188 | + if (!out_dev) { |
---|
| 189 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid egress interface for mirred action"); |
---|
154 | 190 | return -EOPNOTSUPP; |
---|
| 191 | + } |
---|
155 | 192 | |
---|
156 | 193 | tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0; |
---|
157 | 194 | |
---|
158 | 195 | if (tun_type) { |
---|
159 | 196 | /* Verify the egress netdev matches the tunnel type. */ |
---|
160 | | - if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type)) |
---|
| 197 | + if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type)) { |
---|
| 198 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface does not match the required tunnel type"); |
---|
161 | 199 | return -EOPNOTSUPP; |
---|
| 200 | + } |
---|
162 | 201 | |
---|
163 | | - if (*tun_out_cnt) |
---|
| 202 | + if (*tun_out_cnt) { |
---|
| 203 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot offload more than one tunnel mirred output per filter"); |
---|
164 | 204 | return -EOPNOTSUPP; |
---|
| 205 | + } |
---|
165 | 206 | (*tun_out_cnt)++; |
---|
166 | 207 | |
---|
167 | 208 | output->flags = cpu_to_be16(tmp_flags | |
---|
168 | 209 | NFP_FL_OUT_FLAGS_USE_TUN); |
---|
169 | 210 | output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type); |
---|
170 | 211 | } else if (netif_is_lag_master(out_dev) && |
---|
171 | | - priv->flower_ext_feats & NFP_FL_FEATS_LAG) { |
---|
| 212 | + priv->flower_en_feats & NFP_FL_ENABLE_LAG) { |
---|
172 | 213 | int gid; |
---|
173 | 214 | |
---|
174 | 215 | output->flags = cpu_to_be16(tmp_flags); |
---|
175 | 216 | gid = nfp_flower_lag_get_output_id(app, out_dev); |
---|
176 | | - if (gid < 0) |
---|
| 217 | + if (gid < 0) { |
---|
| 218 | + NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot find group id for LAG action"); |
---|
177 | 219 | return gid; |
---|
| 220 | + } |
---|
178 | 221 | output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid); |
---|
| 222 | + } else if (nfp_flower_internal_port_can_offload(app, out_dev)) { |
---|
| 223 | + if (!(priv->flower_ext_feats & NFP_FL_FEATS_PRE_TUN_RULES)) { |
---|
| 224 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules not supported in loaded firmware"); |
---|
| 225 | + return -EOPNOTSUPP; |
---|
| 226 | + } |
---|
| 227 | + |
---|
| 228 | + if (nfp_flow->pre_tun_rule.dev || !pkt_host) { |
---|
| 229 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pre-tunnel rules require single egress dev and ptype HOST action"); |
---|
| 230 | + return -EOPNOTSUPP; |
---|
| 231 | + } |
---|
| 232 | + |
---|
| 233 | + nfp_flow->pre_tun_rule.dev = out_dev; |
---|
| 234 | + |
---|
| 235 | + return 0; |
---|
179 | 236 | } else { |
---|
180 | 237 | /* Set action output parameters. */ |
---|
181 | 238 | output->flags = cpu_to_be16(tmp_flags); |
---|
182 | 239 | |
---|
183 | | - /* Only offload if egress ports are on the same device as the |
---|
184 | | - * ingress port. |
---|
185 | | - */ |
---|
186 | | - if (!switchdev_port_same_parent_id(in_dev, out_dev)) |
---|
| 240 | + if (nfp_netdev_is_nfp_repr(in_dev)) { |
---|
| 241 | + /* Confirm ingress and egress are on same device. */ |
---|
| 242 | + if (!netdev_port_same_parent_id(in_dev, out_dev)) { |
---|
| 243 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress and egress interfaces are on different devices"); |
---|
| 244 | + return -EOPNOTSUPP; |
---|
| 245 | + } |
---|
| 246 | + } |
---|
| 247 | + |
---|
| 248 | + if (!nfp_netdev_is_nfp_repr(out_dev)) { |
---|
| 249 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: egress interface is not an nfp port"); |
---|
187 | 250 | return -EOPNOTSUPP; |
---|
188 | | - if (!nfp_netdev_is_nfp_repr(out_dev)) |
---|
189 | | - return -EOPNOTSUPP; |
---|
| 251 | + } |
---|
190 | 252 | |
---|
191 | 253 | output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev)); |
---|
192 | | - if (!output->port) |
---|
| 254 | + if (!output->port) { |
---|
| 255 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid port id for egress interface"); |
---|
193 | 256 | return -EOPNOTSUPP; |
---|
| 257 | + } |
---|
194 | 258 | } |
---|
195 | 259 | nfp_flow->meta.shortcut = output->port; |
---|
196 | 260 | |
---|
197 | 261 | return 0; |
---|
198 | 262 | } |
---|
199 | 263 | |
---|
200 | | -static enum nfp_flower_tun_type |
---|
201 | | -nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app, |
---|
202 | | - const struct tc_action *action) |
---|
| 264 | +static bool |
---|
| 265 | +nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx) |
---|
203 | 266 | { |
---|
204 | | - struct ip_tunnel_info *tun = tcf_tunnel_info(action); |
---|
| 267 | + struct flow_action_entry *act = flow->rule->action.entries; |
---|
| 268 | + int num_act = flow->rule->action.num_entries; |
---|
| 269 | + int act_idx; |
---|
| 270 | + |
---|
| 271 | + /* Preparse action list for next mirred or redirect action */ |
---|
| 272 | + for (act_idx = start_idx + 1; act_idx < num_act; act_idx++) |
---|
| 273 | + if (act[act_idx].id == FLOW_ACTION_REDIRECT || |
---|
| 274 | + act[act_idx].id == FLOW_ACTION_MIRRED) |
---|
| 275 | + return netif_is_gretap(act[act_idx].dev); |
---|
| 276 | + |
---|
| 277 | + return false; |
---|
| 278 | +} |
---|
| 279 | + |
---|
| 280 | +static enum nfp_flower_tun_type |
---|
| 281 | +nfp_fl_get_tun_from_act(struct nfp_app *app, |
---|
| 282 | + struct flow_cls_offload *flow, |
---|
| 283 | + const struct flow_action_entry *act, int act_idx) |
---|
| 284 | +{ |
---|
| 285 | + const struct ip_tunnel_info *tun = act->tunnel; |
---|
205 | 286 | struct nfp_flower_priv *priv = app->priv; |
---|
206 | 287 | |
---|
| 288 | + /* Determine the tunnel type based on the egress netdev |
---|
| 289 | + * in the mirred action for tunnels without l4. |
---|
| 290 | + */ |
---|
| 291 | + if (nfp_flower_tun_is_gre(flow, act_idx)) |
---|
| 292 | + return NFP_FL_TUNNEL_GRE; |
---|
| 293 | + |
---|
207 | 294 | switch (tun->key.tp_dst) { |
---|
208 | | - case htons(NFP_FL_VXLAN_PORT): |
---|
| 295 | + case htons(IANA_VXLAN_UDP_PORT): |
---|
209 | 296 | return NFP_FL_TUNNEL_VXLAN; |
---|
210 | | - case htons(NFP_FL_GENEVE_PORT): |
---|
| 297 | + case htons(GENEVE_UDP_PORT): |
---|
211 | 298 | if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE) |
---|
212 | 299 | return NFP_FL_TUNNEL_GENEVE; |
---|
213 | | - /* FALLTHROUGH */ |
---|
| 300 | + fallthrough; |
---|
214 | 301 | default: |
---|
215 | 302 | return NFP_FL_TUNNEL_NONE; |
---|
216 | 303 | } |
---|
.. | .. |
---|
239 | 326 | |
---|
240 | 327 | static int |
---|
241 | 328 | nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len, |
---|
242 | | - const struct tc_action *action) |
---|
| 329 | + const struct flow_action_entry *act, |
---|
| 330 | + struct netlink_ext_ack *extack) |
---|
243 | 331 | { |
---|
244 | | - struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); |
---|
| 332 | + struct ip_tunnel_info *ip_tun = (struct ip_tunnel_info *)act->tunnel; |
---|
245 | 333 | int opt_len, opt_cnt, act_start, tot_push_len; |
---|
246 | 334 | u8 *src = ip_tunnel_info_opts(ip_tun); |
---|
247 | 335 | |
---|
.. | .. |
---|
257 | 345 | struct geneve_opt *opt = (struct geneve_opt *)src; |
---|
258 | 346 | |
---|
259 | 347 | opt_cnt++; |
---|
260 | | - if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT) |
---|
| 348 | + if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT) { |
---|
| 349 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed number of geneve options exceeded"); |
---|
261 | 350 | return -EOPNOTSUPP; |
---|
| 351 | + } |
---|
262 | 352 | |
---|
263 | 353 | tot_push_len += sizeof(struct nfp_fl_push_geneve) + |
---|
264 | 354 | opt->length * 4; |
---|
265 | | - if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT) |
---|
| 355 | + if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT) { |
---|
| 356 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options"); |
---|
266 | 357 | return -EOPNOTSUPP; |
---|
| 358 | + } |
---|
267 | 359 | |
---|
268 | 360 | opt_len -= sizeof(struct geneve_opt) + opt->length * 4; |
---|
269 | 361 | src += sizeof(struct geneve_opt) + opt->length * 4; |
---|
270 | 362 | } |
---|
271 | 363 | |
---|
272 | | - if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ) |
---|
| 364 | + if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ) { |
---|
| 365 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push geneve options"); |
---|
273 | 366 | return -EOPNOTSUPP; |
---|
| 367 | + } |
---|
274 | 368 | |
---|
275 | 369 | act_start = *list_len; |
---|
276 | 370 | *list_len += tot_push_len; |
---|
.. | .. |
---|
301 | 395 | } |
---|
302 | 396 | |
---|
303 | 397 | static int |
---|
304 | | -nfp_fl_set_ipv4_udp_tun(struct nfp_app *app, |
---|
305 | | - struct nfp_fl_set_ipv4_udp_tun *set_tun, |
---|
306 | | - const struct tc_action *action, |
---|
307 | | - struct nfp_fl_pre_tunnel *pre_tun, |
---|
308 | | - enum nfp_flower_tun_type tun_type, |
---|
309 | | - struct net_device *netdev) |
---|
| 398 | +nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun, |
---|
| 399 | + const struct flow_action_entry *act, |
---|
| 400 | + struct nfp_fl_pre_tunnel *pre_tun, |
---|
| 401 | + enum nfp_flower_tun_type tun_type, |
---|
| 402 | + struct net_device *netdev, struct netlink_ext_ack *extack) |
---|
310 | 403 | { |
---|
311 | | - size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun); |
---|
312 | | - struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action); |
---|
| 404 | + const struct ip_tunnel_info *ip_tun = act->tunnel; |
---|
| 405 | + bool ipv6 = ip_tunnel_info_af(ip_tun) == AF_INET6; |
---|
| 406 | + size_t act_size = sizeof(struct nfp_fl_set_tun); |
---|
313 | 407 | struct nfp_flower_priv *priv = app->priv; |
---|
314 | 408 | u32 tmp_set_ip_tun_type_index = 0; |
---|
315 | 409 | /* Currently support one pre-tunnel so index is always 0. */ |
---|
316 | 410 | int pretun_idx = 0; |
---|
| 411 | + |
---|
| 412 | + if (!IS_ENABLED(CONFIG_IPV6) && ipv6) |
---|
| 413 | + return -EOPNOTSUPP; |
---|
| 414 | + |
---|
| 415 | + if (ipv6 && !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) |
---|
| 416 | + return -EOPNOTSUPP; |
---|
317 | 417 | |
---|
318 | 418 | BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM || |
---|
319 | 419 | NFP_FL_TUNNEL_KEY != TUNNEL_KEY || |
---|
320 | 420 | NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT); |
---|
321 | 421 | if (ip_tun->options_len && |
---|
322 | 422 | (tun_type != NFP_FL_TUNNEL_GENEVE || |
---|
323 | | - !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) |
---|
| 423 | + !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) { |
---|
| 424 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve options offload"); |
---|
324 | 425 | return -EOPNOTSUPP; |
---|
| 426 | + } |
---|
325 | 427 | |
---|
326 | | - set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL; |
---|
| 428 | + set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_TUNNEL; |
---|
327 | 429 | set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ; |
---|
328 | 430 | |
---|
329 | 431 | /* Set tunnel type and pre-tunnel index. */ |
---|
330 | 432 | tmp_set_ip_tun_type_index |= |
---|
331 | | - FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) | |
---|
332 | | - FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx); |
---|
| 433 | + FIELD_PREP(NFP_FL_TUNNEL_TYPE, tun_type) | |
---|
| 434 | + FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx); |
---|
333 | 435 | |
---|
334 | 436 | set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index); |
---|
335 | 437 | set_tun->tun_id = ip_tun->key.tun_id; |
---|
336 | 438 | |
---|
337 | 439 | if (ip_tun->key.ttl) { |
---|
338 | 440 | set_tun->ttl = ip_tun->key.ttl; |
---|
| 441 | +#ifdef CONFIG_IPV6 |
---|
| 442 | + } else if (ipv6) { |
---|
| 443 | + struct net *net = dev_net(netdev); |
---|
| 444 | + struct flowi6 flow = {}; |
---|
| 445 | + struct dst_entry *dst; |
---|
| 446 | + |
---|
| 447 | + flow.daddr = ip_tun->key.u.ipv6.dst; |
---|
| 448 | + flow.flowi4_proto = IPPROTO_UDP; |
---|
| 449 | + dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &flow, NULL); |
---|
| 450 | + if (!IS_ERR(dst)) { |
---|
| 451 | + set_tun->ttl = ip6_dst_hoplimit(dst); |
---|
| 452 | + dst_release(dst); |
---|
| 453 | + } else { |
---|
| 454 | + set_tun->ttl = net->ipv6.devconf_all->hop_limit; |
---|
| 455 | + } |
---|
| 456 | +#endif |
---|
339 | 457 | } else { |
---|
340 | 458 | struct net *net = dev_net(netdev); |
---|
341 | 459 | struct flowi4 flow = {}; |
---|
.. | .. |
---|
361 | 479 | set_tun->tos = ip_tun->key.tos; |
---|
362 | 480 | |
---|
363 | 481 | if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) || |
---|
364 | | - ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS) |
---|
| 482 | + ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) { |
---|
| 483 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support tunnel flag offload"); |
---|
365 | 484 | return -EOPNOTSUPP; |
---|
| 485 | + } |
---|
366 | 486 | set_tun->tun_flags = ip_tun->key.tun_flags; |
---|
367 | 487 | |
---|
368 | 488 | if (tun_type == NFP_FL_TUNNEL_GENEVE) { |
---|
.. | .. |
---|
371 | 491 | } |
---|
372 | 492 | |
---|
373 | 493 | /* Complete pre_tunnel action. */ |
---|
374 | | - pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; |
---|
| 494 | + if (ipv6) { |
---|
| 495 | + pre_tun->flags |= cpu_to_be16(NFP_FL_PRE_TUN_IPV6); |
---|
| 496 | + pre_tun->ipv6_dst = ip_tun->key.u.ipv6.dst; |
---|
| 497 | + } else { |
---|
| 498 | + pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst; |
---|
| 499 | + } |
---|
375 | 500 | |
---|
376 | 501 | return 0; |
---|
377 | 502 | } |
---|
.. | .. |
---|
389 | 514 | } |
---|
390 | 515 | |
---|
391 | 516 | static int |
---|
392 | | -nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off, |
---|
393 | | - struct nfp_fl_set_eth *set_eth) |
---|
| 517 | +nfp_fl_set_eth(const struct flow_action_entry *act, u32 off, |
---|
| 518 | + struct nfp_fl_set_eth *set_eth, struct netlink_ext_ack *extack) |
---|
394 | 519 | { |
---|
395 | 520 | u32 exact, mask; |
---|
396 | 521 | |
---|
397 | | - if (off + 4 > ETH_ALEN * 2) |
---|
| 522 | + if (off + 4 > ETH_ALEN * 2) { |
---|
| 523 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action"); |
---|
398 | 524 | return -EOPNOTSUPP; |
---|
| 525 | + } |
---|
399 | 526 | |
---|
400 | | - mask = ~tcf_pedit_mask(action, idx); |
---|
401 | | - exact = tcf_pedit_val(action, idx); |
---|
| 527 | + mask = ~act->mangle.mask; |
---|
| 528 | + exact = act->mangle.val; |
---|
402 | 529 | |
---|
403 | | - if (exact & ~mask) |
---|
| 530 | + if (exact & ~mask) { |
---|
| 531 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit ethernet action"); |
---|
404 | 532 | return -EOPNOTSUPP; |
---|
| 533 | + } |
---|
405 | 534 | |
---|
406 | 535 | nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off], |
---|
407 | 536 | &set_eth->eth_addr_mask[off]); |
---|
.. | .. |
---|
413 | 542 | return 0; |
---|
414 | 543 | } |
---|
415 | 544 | |
---|
| 545 | +struct ipv4_ttl_word { |
---|
| 546 | + __u8 ttl; |
---|
| 547 | + __u8 protocol; |
---|
| 548 | + __sum16 check; |
---|
| 549 | +}; |
---|
| 550 | + |
---|
416 | 551 | static int |
---|
417 | | -nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off, |
---|
418 | | - struct nfp_fl_set_ip4_addrs *set_ip_addr) |
---|
| 552 | +nfp_fl_set_ip4(const struct flow_action_entry *act, u32 off, |
---|
| 553 | + struct nfp_fl_set_ip4_addrs *set_ip_addr, |
---|
| 554 | + struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos, |
---|
| 555 | + struct netlink_ext_ack *extack) |
---|
419 | 556 | { |
---|
| 557 | + struct ipv4_ttl_word *ttl_word_mask; |
---|
| 558 | + struct ipv4_ttl_word *ttl_word; |
---|
| 559 | + struct iphdr *tos_word_mask; |
---|
| 560 | + struct iphdr *tos_word; |
---|
420 | 561 | __be32 exact, mask; |
---|
421 | 562 | |
---|
422 | 563 | /* We are expecting tcf_pedit to return a big endian value */ |
---|
423 | | - mask = (__force __be32)~tcf_pedit_mask(action, idx); |
---|
424 | | - exact = (__force __be32)tcf_pedit_val(action, idx); |
---|
| 564 | + mask = (__force __be32)~act->mangle.mask; |
---|
| 565 | + exact = (__force __be32)act->mangle.val; |
---|
425 | 566 | |
---|
426 | | - if (exact & ~mask) |
---|
| 567 | + if (exact & ~mask) { |
---|
| 568 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 action"); |
---|
427 | 569 | return -EOPNOTSUPP; |
---|
| 570 | + } |
---|
428 | 571 | |
---|
429 | 572 | switch (off) { |
---|
430 | 573 | case offsetof(struct iphdr, daddr): |
---|
431 | 574 | set_ip_addr->ipv4_dst_mask |= mask; |
---|
432 | 575 | set_ip_addr->ipv4_dst &= ~mask; |
---|
433 | 576 | set_ip_addr->ipv4_dst |= exact & mask; |
---|
| 577 | + set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS; |
---|
| 578 | + set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> |
---|
| 579 | + NFP_FL_LW_SIZ; |
---|
434 | 580 | break; |
---|
435 | 581 | case offsetof(struct iphdr, saddr): |
---|
436 | 582 | set_ip_addr->ipv4_src_mask |= mask; |
---|
437 | 583 | set_ip_addr->ipv4_src &= ~mask; |
---|
438 | 584 | set_ip_addr->ipv4_src |= exact & mask; |
---|
| 585 | + set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS; |
---|
| 586 | + set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> |
---|
| 587 | + NFP_FL_LW_SIZ; |
---|
| 588 | + break; |
---|
| 589 | + case offsetof(struct iphdr, ttl): |
---|
| 590 | + ttl_word_mask = (struct ipv4_ttl_word *)&mask; |
---|
| 591 | + ttl_word = (struct ipv4_ttl_word *)&exact; |
---|
| 592 | + |
---|
| 593 | + if (ttl_word_mask->protocol || ttl_word_mask->check) { |
---|
| 594 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 ttl action"); |
---|
| 595 | + return -EOPNOTSUPP; |
---|
| 596 | + } |
---|
| 597 | + |
---|
| 598 | + set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl; |
---|
| 599 | + set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl; |
---|
| 600 | + set_ip_ttl_tos->ipv4_ttl |= ttl_word->ttl & ttl_word_mask->ttl; |
---|
| 601 | + set_ip_ttl_tos->head.jump_id = |
---|
| 602 | + NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS; |
---|
| 603 | + set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >> |
---|
| 604 | + NFP_FL_LW_SIZ; |
---|
| 605 | + break; |
---|
| 606 | + case round_down(offsetof(struct iphdr, tos), 4): |
---|
| 607 | + tos_word_mask = (struct iphdr *)&mask; |
---|
| 608 | + tos_word = (struct iphdr *)&exact; |
---|
| 609 | + |
---|
| 610 | + if (tos_word_mask->version || tos_word_mask->ihl || |
---|
| 611 | + tos_word_mask->tot_len) { |
---|
| 612 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv4 tos action"); |
---|
| 613 | + return -EOPNOTSUPP; |
---|
| 614 | + } |
---|
| 615 | + |
---|
| 616 | + set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos; |
---|
| 617 | + set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos; |
---|
| 618 | + set_ip_ttl_tos->ipv4_tos |= tos_word->tos & tos_word_mask->tos; |
---|
| 619 | + set_ip_ttl_tos->head.jump_id = |
---|
| 620 | + NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS; |
---|
| 621 | + set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >> |
---|
| 622 | + NFP_FL_LW_SIZ; |
---|
439 | 623 | break; |
---|
440 | 624 | default: |
---|
| 625 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv4 header"); |
---|
441 | 626 | return -EOPNOTSUPP; |
---|
442 | 627 | } |
---|
443 | | - |
---|
444 | | - set_ip_addr->reserved = cpu_to_be16(0); |
---|
445 | | - set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS; |
---|
446 | | - set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ; |
---|
447 | 628 | |
---|
448 | 629 | return 0; |
---|
449 | 630 | } |
---|
.. | .. |
---|
461 | 642 | ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ; |
---|
462 | 643 | } |
---|
463 | 644 | |
---|
| 645 | +struct ipv6_hop_limit_word { |
---|
| 646 | + __be16 payload_len; |
---|
| 647 | + u8 nexthdr; |
---|
| 648 | + u8 hop_limit; |
---|
| 649 | +}; |
---|
| 650 | + |
---|
464 | 651 | static int |
---|
465 | | -nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off, |
---|
| 652 | +nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask, |
---|
| 653 | + struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl, |
---|
| 654 | + struct netlink_ext_ack *extack) |
---|
| 655 | +{ |
---|
| 656 | + struct ipv6_hop_limit_word *fl_hl_mask; |
---|
| 657 | + struct ipv6_hop_limit_word *fl_hl; |
---|
| 658 | + |
---|
| 659 | + switch (off) { |
---|
| 660 | + case offsetof(struct ipv6hdr, payload_len): |
---|
| 661 | + fl_hl_mask = (struct ipv6_hop_limit_word *)&mask; |
---|
| 662 | + fl_hl = (struct ipv6_hop_limit_word *)&exact; |
---|
| 663 | + |
---|
| 664 | + if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len) { |
---|
| 665 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 hop limit action"); |
---|
| 666 | + return -EOPNOTSUPP; |
---|
| 667 | + } |
---|
| 668 | + |
---|
| 669 | + ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit; |
---|
| 670 | + ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit; |
---|
| 671 | + ip_hl_fl->ipv6_hop_limit |= fl_hl->hop_limit & |
---|
| 672 | + fl_hl_mask->hop_limit; |
---|
| 673 | + break; |
---|
| 674 | + case round_down(offsetof(struct ipv6hdr, flow_lbl), 4): |
---|
| 675 | + if (mask & ~IPV6_FLOW_LABEL_MASK || |
---|
| 676 | + exact & ~IPV6_FLOW_LABEL_MASK) { |
---|
| 677 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 flow label action"); |
---|
| 678 | + return -EOPNOTSUPP; |
---|
| 679 | + } |
---|
| 680 | + |
---|
| 681 | + ip_hl_fl->ipv6_label_mask |= mask; |
---|
| 682 | + ip_hl_fl->ipv6_label &= ~mask; |
---|
| 683 | + ip_hl_fl->ipv6_label |= exact & mask; |
---|
| 684 | + break; |
---|
| 685 | + } |
---|
| 686 | + |
---|
| 687 | + ip_hl_fl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL; |
---|
| 688 | + ip_hl_fl->head.len_lw = sizeof(*ip_hl_fl) >> NFP_FL_LW_SIZ; |
---|
| 689 | + |
---|
| 690 | + return 0; |
---|
| 691 | +} |
---|
| 692 | + |
---|
| 693 | +static int |
---|
| 694 | +nfp_fl_set_ip6(const struct flow_action_entry *act, u32 off, |
---|
466 | 695 | struct nfp_fl_set_ipv6_addr *ip_dst, |
---|
467 | | - struct nfp_fl_set_ipv6_addr *ip_src) |
---|
| 696 | + struct nfp_fl_set_ipv6_addr *ip_src, |
---|
| 697 | + struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl, |
---|
| 698 | + struct netlink_ext_ack *extack) |
---|
468 | 699 | { |
---|
469 | 700 | __be32 exact, mask; |
---|
| 701 | + int err = 0; |
---|
470 | 702 | u8 word; |
---|
471 | 703 | |
---|
472 | 704 | /* We are expecting tcf_pedit to return a big endian value */ |
---|
473 | | - mask = (__force __be32)~tcf_pedit_mask(action, idx); |
---|
474 | | - exact = (__force __be32)tcf_pedit_val(action, idx); |
---|
| 705 | + mask = (__force __be32)~act->mangle.mask; |
---|
| 706 | + exact = (__force __be32)act->mangle.val; |
---|
475 | 707 | |
---|
476 | | - if (exact & ~mask) |
---|
| 708 | + if (exact & ~mask) { |
---|
| 709 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit IPv6 action"); |
---|
477 | 710 | return -EOPNOTSUPP; |
---|
| 711 | + } |
---|
478 | 712 | |
---|
479 | 713 | if (off < offsetof(struct ipv6hdr, saddr)) { |
---|
480 | | - return -EOPNOTSUPP; |
---|
| 714 | + err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask, |
---|
| 715 | + ip_hl_fl, extack); |
---|
481 | 716 | } else if (off < offsetof(struct ipv6hdr, daddr)) { |
---|
482 | 717 | word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact); |
---|
483 | 718 | nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word, |
---|
.. | .. |
---|
488 | 723 | nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word, |
---|
489 | 724 | exact, mask, ip_dst); |
---|
490 | 725 | } else { |
---|
| 726 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of IPv6 header"); |
---|
491 | 727 | return -EOPNOTSUPP; |
---|
492 | 728 | } |
---|
493 | 729 | |
---|
494 | | - return 0; |
---|
| 730 | + return err; |
---|
495 | 731 | } |
---|
496 | 732 | |
---|
497 | 733 | static int |
---|
498 | | -nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off, |
---|
499 | | - struct nfp_fl_set_tport *set_tport, int opcode) |
---|
| 734 | +nfp_fl_set_tport(const struct flow_action_entry *act, u32 off, |
---|
| 735 | + struct nfp_fl_set_tport *set_tport, int opcode, |
---|
| 736 | + struct netlink_ext_ack *extack) |
---|
500 | 737 | { |
---|
501 | 738 | u32 exact, mask; |
---|
502 | 739 | |
---|
503 | | - if (off) |
---|
| 740 | + if (off) { |
---|
| 741 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported section of L4 header"); |
---|
504 | 742 | return -EOPNOTSUPP; |
---|
| 743 | + } |
---|
505 | 744 | |
---|
506 | | - mask = ~tcf_pedit_mask(action, idx); |
---|
507 | | - exact = tcf_pedit_val(action, idx); |
---|
| 745 | + mask = ~act->mangle.mask; |
---|
| 746 | + exact = act->mangle.val; |
---|
508 | 747 | |
---|
509 | | - if (exact & ~mask) |
---|
| 748 | + if (exact & ~mask) { |
---|
| 749 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid pedit L4 action"); |
---|
510 | 750 | return -EOPNOTSUPP; |
---|
| 751 | + } |
---|
511 | 752 | |
---|
512 | 753 | nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val, |
---|
513 | 754 | set_tport->tp_port_mask); |
---|
.. | .. |
---|
537 | 778 | } |
---|
538 | 779 | } |
---|
539 | 780 | |
---|
540 | | -static int |
---|
541 | | -nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, |
---|
542 | | - char *nfp_action, int *a_len, u32 *csum_updated) |
---|
543 | | -{ |
---|
| 781 | +struct nfp_flower_pedit_acts { |
---|
544 | 782 | struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src; |
---|
| 783 | + struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl; |
---|
| 784 | + struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos; |
---|
545 | 785 | struct nfp_fl_set_ip4_addrs set_ip_addr; |
---|
546 | 786 | struct nfp_fl_set_tport set_tport; |
---|
547 | 787 | struct nfp_fl_set_eth set_eth; |
---|
548 | | - enum pedit_header_type htype; |
---|
549 | | - int idx, nkeys, err; |
---|
| 788 | +}; |
---|
| 789 | + |
---|
| 790 | +static int |
---|
| 791 | +nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action, |
---|
| 792 | + int *a_len, struct nfp_flower_pedit_acts *set_act, |
---|
| 793 | + u32 *csum_updated) |
---|
| 794 | +{ |
---|
| 795 | + struct flow_rule *rule = flow_cls_offload_flow_rule(flow); |
---|
550 | 796 | size_t act_size = 0; |
---|
551 | | - u32 offset, cmd; |
---|
552 | 797 | u8 ip_proto = 0; |
---|
553 | 798 | |
---|
554 | | - memset(&set_ip6_dst, 0, sizeof(set_ip6_dst)); |
---|
555 | | - memset(&set_ip6_src, 0, sizeof(set_ip6_src)); |
---|
556 | | - memset(&set_ip_addr, 0, sizeof(set_ip_addr)); |
---|
557 | | - memset(&set_tport, 0, sizeof(set_tport)); |
---|
558 | | - memset(&set_eth, 0, sizeof(set_eth)); |
---|
559 | | - nkeys = tcf_pedit_nkeys(action); |
---|
| 799 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { |
---|
| 800 | + struct flow_match_basic match; |
---|
560 | 801 | |
---|
561 | | - for (idx = 0; idx < nkeys; idx++) { |
---|
562 | | - cmd = tcf_pedit_cmd(action, idx); |
---|
563 | | - htype = tcf_pedit_htype(action, idx); |
---|
564 | | - offset = tcf_pedit_offset(action, idx); |
---|
565 | | - |
---|
566 | | - if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) |
---|
567 | | - return -EOPNOTSUPP; |
---|
568 | | - |
---|
569 | | - switch (htype) { |
---|
570 | | - case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: |
---|
571 | | - err = nfp_fl_set_eth(action, idx, offset, &set_eth); |
---|
572 | | - break; |
---|
573 | | - case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: |
---|
574 | | - err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr); |
---|
575 | | - break; |
---|
576 | | - case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: |
---|
577 | | - err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst, |
---|
578 | | - &set_ip6_src); |
---|
579 | | - break; |
---|
580 | | - case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: |
---|
581 | | - err = nfp_fl_set_tport(action, idx, offset, &set_tport, |
---|
582 | | - NFP_FL_ACTION_OPCODE_SET_TCP); |
---|
583 | | - break; |
---|
584 | | - case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: |
---|
585 | | - err = nfp_fl_set_tport(action, idx, offset, &set_tport, |
---|
586 | | - NFP_FL_ACTION_OPCODE_SET_UDP); |
---|
587 | | - break; |
---|
588 | | - default: |
---|
589 | | - return -EOPNOTSUPP; |
---|
590 | | - } |
---|
591 | | - if (err) |
---|
592 | | - return err; |
---|
| 802 | + flow_rule_match_basic(rule, &match); |
---|
| 803 | + ip_proto = match.key->ip_proto; |
---|
593 | 804 | } |
---|
594 | 805 | |
---|
595 | | - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { |
---|
596 | | - struct flow_dissector_key_basic *basic; |
---|
597 | | - |
---|
598 | | - basic = skb_flow_dissector_target(flow->dissector, |
---|
599 | | - FLOW_DISSECTOR_KEY_BASIC, |
---|
600 | | - flow->key); |
---|
601 | | - ip_proto = basic->ip_proto; |
---|
602 | | - } |
---|
603 | | - |
---|
604 | | - if (set_eth.head.len_lw) { |
---|
605 | | - act_size = sizeof(set_eth); |
---|
606 | | - memcpy(nfp_action, &set_eth, act_size); |
---|
| 806 | + if (set_act->set_eth.head.len_lw) { |
---|
| 807 | + act_size = sizeof(set_act->set_eth); |
---|
| 808 | + memcpy(nfp_action, &set_act->set_eth, act_size); |
---|
607 | 809 | *a_len += act_size; |
---|
608 | 810 | } |
---|
609 | | - if (set_ip_addr.head.len_lw) { |
---|
| 811 | + |
---|
| 812 | + if (set_act->set_ip_ttl_tos.head.len_lw) { |
---|
610 | 813 | nfp_action += act_size; |
---|
611 | | - act_size = sizeof(set_ip_addr); |
---|
612 | | - memcpy(nfp_action, &set_ip_addr, act_size); |
---|
| 814 | + act_size = sizeof(set_act->set_ip_ttl_tos); |
---|
| 815 | + memcpy(nfp_action, &set_act->set_ip_ttl_tos, act_size); |
---|
613 | 816 | *a_len += act_size; |
---|
614 | 817 | |
---|
615 | 818 | /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */ |
---|
616 | 819 | *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | |
---|
617 | 820 | nfp_fl_csum_l4_to_flag(ip_proto); |
---|
618 | 821 | } |
---|
619 | | - if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) { |
---|
620 | | - /* TC compiles set src and dst IPv6 address as a single action, |
---|
621 | | - * the hardware requires this to be 2 separate actions. |
---|
622 | | - */ |
---|
| 822 | + |
---|
| 823 | + if (set_act->set_ip_addr.head.len_lw) { |
---|
623 | 824 | nfp_action += act_size; |
---|
624 | | - act_size = sizeof(set_ip6_src); |
---|
625 | | - memcpy(nfp_action, &set_ip6_src, act_size); |
---|
| 825 | + act_size = sizeof(set_act->set_ip_addr); |
---|
| 826 | + memcpy(nfp_action, &set_act->set_ip_addr, act_size); |
---|
626 | 827 | *a_len += act_size; |
---|
627 | 828 | |
---|
628 | | - act_size = sizeof(set_ip6_dst); |
---|
629 | | - memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst, |
---|
630 | | - act_size); |
---|
631 | | - *a_len += act_size; |
---|
| 829 | + /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */ |
---|
| 830 | + *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | |
---|
| 831 | + nfp_fl_csum_l4_to_flag(ip_proto); |
---|
| 832 | + } |
---|
632 | 833 | |
---|
633 | | - /* Hardware will automatically fix TCP/UDP checksum. */ |
---|
634 | | - *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); |
---|
635 | | - } else if (set_ip6_dst.head.len_lw) { |
---|
| 834 | + if (set_act->set_ip6_tc_hl_fl.head.len_lw) { |
---|
636 | 835 | nfp_action += act_size; |
---|
637 | | - act_size = sizeof(set_ip6_dst); |
---|
638 | | - memcpy(nfp_action, &set_ip6_dst, act_size); |
---|
639 | | - *a_len += act_size; |
---|
640 | | - |
---|
641 | | - /* Hardware will automatically fix TCP/UDP checksum. */ |
---|
642 | | - *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); |
---|
643 | | - } else if (set_ip6_src.head.len_lw) { |
---|
644 | | - nfp_action += act_size; |
---|
645 | | - act_size = sizeof(set_ip6_src); |
---|
646 | | - memcpy(nfp_action, &set_ip6_src, act_size); |
---|
| 836 | + act_size = sizeof(set_act->set_ip6_tc_hl_fl); |
---|
| 837 | + memcpy(nfp_action, &set_act->set_ip6_tc_hl_fl, act_size); |
---|
647 | 838 | *a_len += act_size; |
---|
648 | 839 | |
---|
649 | 840 | /* Hardware will automatically fix TCP/UDP checksum. */ |
---|
650 | 841 | *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); |
---|
651 | 842 | } |
---|
652 | | - if (set_tport.head.len_lw) { |
---|
| 843 | + |
---|
| 844 | + if (set_act->set_ip6_dst.head.len_lw && |
---|
| 845 | + set_act->set_ip6_src.head.len_lw) { |
---|
| 846 | + /* TC compiles set src and dst IPv6 address as a single action, |
---|
| 847 | + * the hardware requires this to be 2 separate actions. |
---|
| 848 | + */ |
---|
653 | 849 | nfp_action += act_size; |
---|
654 | | - act_size = sizeof(set_tport); |
---|
655 | | - memcpy(nfp_action, &set_tport, act_size); |
---|
| 850 | + act_size = sizeof(set_act->set_ip6_src); |
---|
| 851 | + memcpy(nfp_action, &set_act->set_ip6_src, act_size); |
---|
| 852 | + *a_len += act_size; |
---|
| 853 | + |
---|
| 854 | + act_size = sizeof(set_act->set_ip6_dst); |
---|
| 855 | + memcpy(&nfp_action[sizeof(set_act->set_ip6_src)], |
---|
| 856 | + &set_act->set_ip6_dst, act_size); |
---|
| 857 | + *a_len += act_size; |
---|
| 858 | + |
---|
| 859 | + /* Hardware will automatically fix TCP/UDP checksum. */ |
---|
| 860 | + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); |
---|
| 861 | + } else if (set_act->set_ip6_dst.head.len_lw) { |
---|
| 862 | + nfp_action += act_size; |
---|
| 863 | + act_size = sizeof(set_act->set_ip6_dst); |
---|
| 864 | + memcpy(nfp_action, &set_act->set_ip6_dst, act_size); |
---|
| 865 | + *a_len += act_size; |
---|
| 866 | + |
---|
| 867 | + /* Hardware will automatically fix TCP/UDP checksum. */ |
---|
| 868 | + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); |
---|
| 869 | + } else if (set_act->set_ip6_src.head.len_lw) { |
---|
| 870 | + nfp_action += act_size; |
---|
| 871 | + act_size = sizeof(set_act->set_ip6_src); |
---|
| 872 | + memcpy(nfp_action, &set_act->set_ip6_src, act_size); |
---|
| 873 | + *a_len += act_size; |
---|
| 874 | + |
---|
| 875 | + /* Hardware will automatically fix TCP/UDP checksum. */ |
---|
| 876 | + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); |
---|
| 877 | + } |
---|
| 878 | + if (set_act->set_tport.head.len_lw) { |
---|
| 879 | + nfp_action += act_size; |
---|
| 880 | + act_size = sizeof(set_act->set_tport); |
---|
| 881 | + memcpy(nfp_action, &set_act->set_tport, act_size); |
---|
656 | 882 | *a_len += act_size; |
---|
657 | 883 | |
---|
658 | 884 | /* Hardware will automatically fix TCP/UDP checksum. */ |
---|
.. | .. |
---|
663 | 889 | } |
---|
664 | 890 | |
---|
665 | 891 | static int |
---|
666 | | -nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a, |
---|
| 892 | +nfp_fl_pedit(const struct flow_action_entry *act, |
---|
| 893 | + struct flow_cls_offload *flow, char *nfp_action, int *a_len, |
---|
| 894 | + u32 *csum_updated, struct nfp_flower_pedit_acts *set_act, |
---|
| 895 | + struct netlink_ext_ack *extack) |
---|
| 896 | +{ |
---|
| 897 | + enum flow_action_mangle_base htype; |
---|
| 898 | + u32 offset; |
---|
| 899 | + |
---|
| 900 | + htype = act->mangle.htype; |
---|
| 901 | + offset = act->mangle.offset; |
---|
| 902 | + |
---|
| 903 | + switch (htype) { |
---|
| 904 | + case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: |
---|
| 905 | + return nfp_fl_set_eth(act, offset, &set_act->set_eth, extack); |
---|
| 906 | + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: |
---|
| 907 | + return nfp_fl_set_ip4(act, offset, &set_act->set_ip_addr, |
---|
| 908 | + &set_act->set_ip_ttl_tos, extack); |
---|
| 909 | + case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: |
---|
| 910 | + return nfp_fl_set_ip6(act, offset, &set_act->set_ip6_dst, |
---|
| 911 | + &set_act->set_ip6_src, |
---|
| 912 | + &set_act->set_ip6_tc_hl_fl, extack); |
---|
| 913 | + case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: |
---|
| 914 | + return nfp_fl_set_tport(act, offset, &set_act->set_tport, |
---|
| 915 | + NFP_FL_ACTION_OPCODE_SET_TCP, extack); |
---|
| 916 | + case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: |
---|
| 917 | + return nfp_fl_set_tport(act, offset, &set_act->set_tport, |
---|
| 918 | + NFP_FL_ACTION_OPCODE_SET_UDP, extack); |
---|
| 919 | + default: |
---|
| 920 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: pedit on unsupported header"); |
---|
| 921 | + return -EOPNOTSUPP; |
---|
| 922 | + } |
---|
| 923 | +} |
---|
| 924 | + |
---|
| 925 | +static int |
---|
| 926 | +nfp_flower_output_action(struct nfp_app *app, |
---|
| 927 | + const struct flow_action_entry *act, |
---|
667 | 928 | struct nfp_fl_payload *nfp_fl, int *a_len, |
---|
668 | 929 | struct net_device *netdev, bool last, |
---|
669 | 930 | enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, |
---|
670 | | - int *out_cnt, u32 *csum_updated) |
---|
| 931 | + int *out_cnt, u32 *csum_updated, bool pkt_host, |
---|
| 932 | + struct netlink_ext_ack *extack) |
---|
671 | 933 | { |
---|
672 | 934 | struct nfp_flower_priv *priv = app->priv; |
---|
673 | 935 | struct nfp_fl_output *output; |
---|
.. | .. |
---|
676 | 938 | /* If csum_updated has not been reset by now, it means HW will |
---|
677 | 939 | * incorrectly update csums when they are not requested. |
---|
678 | 940 | */ |
---|
679 | | - if (*csum_updated) |
---|
| 941 | + if (*csum_updated) { |
---|
| 942 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: set actions without updating checksums are not supported"); |
---|
680 | 943 | return -EOPNOTSUPP; |
---|
| 944 | + } |
---|
681 | 945 | |
---|
682 | | - if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) |
---|
| 946 | + if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ) { |
---|
| 947 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: mirred output increases action list size beyond the allowed maximum"); |
---|
683 | 948 | return -EOPNOTSUPP; |
---|
| 949 | + } |
---|
684 | 950 | |
---|
685 | 951 | output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len]; |
---|
686 | | - err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type, |
---|
687 | | - tun_out_cnt); |
---|
| 952 | + err = nfp_fl_output(app, output, act, nfp_fl, last, netdev, *tun_type, |
---|
| 953 | + tun_out_cnt, pkt_host, extack); |
---|
688 | 954 | if (err) |
---|
689 | 955 | return err; |
---|
690 | 956 | |
---|
691 | 957 | *a_len += sizeof(struct nfp_fl_output); |
---|
692 | 958 | |
---|
693 | | - if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) { |
---|
| 959 | + if (priv->flower_en_feats & NFP_FL_ENABLE_LAG) { |
---|
694 | 960 | /* nfp_fl_pre_lag returns -err or size of prelag action added. |
---|
695 | 961 | * This will be 0 if it is not egressing to a lag dev. |
---|
696 | 962 | */ |
---|
697 | | - prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len); |
---|
698 | | - if (prelag_size < 0) |
---|
| 963 | + prelag_size = nfp_fl_pre_lag(app, act, nfp_fl, *a_len, extack); |
---|
| 964 | + if (prelag_size < 0) { |
---|
699 | 965 | return prelag_size; |
---|
700 | | - else if (prelag_size > 0 && (!last || *out_cnt)) |
---|
| 966 | + } else if (prelag_size > 0 && (!last || *out_cnt)) { |
---|
| 967 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: LAG action has to be last action in action list"); |
---|
701 | 968 | return -EOPNOTSUPP; |
---|
| 969 | + } |
---|
702 | 970 | |
---|
703 | 971 | *a_len += prelag_size; |
---|
704 | 972 | } |
---|
.. | .. |
---|
708 | 976 | } |
---|
709 | 977 | |
---|
710 | 978 | static int |
---|
711 | | -nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, |
---|
712 | | - struct tc_cls_flower_offload *flow, |
---|
| 979 | +nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, |
---|
| 980 | + struct flow_cls_offload *flow, |
---|
713 | 981 | struct nfp_fl_payload *nfp_fl, int *a_len, |
---|
714 | 982 | struct net_device *netdev, |
---|
715 | 983 | enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, |
---|
716 | | - int *out_cnt, u32 *csum_updated) |
---|
| 984 | + int *out_cnt, u32 *csum_updated, |
---|
| 985 | + struct nfp_flower_pedit_acts *set_act, bool *pkt_host, |
---|
| 986 | + struct netlink_ext_ack *extack, int act_idx) |
---|
717 | 987 | { |
---|
718 | | - struct nfp_fl_set_ipv4_udp_tun *set_tun; |
---|
719 | 988 | struct nfp_fl_pre_tunnel *pre_tun; |
---|
| 989 | + struct nfp_fl_set_tun *set_tun; |
---|
720 | 990 | struct nfp_fl_push_vlan *psh_v; |
---|
| 991 | + struct nfp_fl_push_mpls *psh_m; |
---|
721 | 992 | struct nfp_fl_pop_vlan *pop_v; |
---|
| 993 | + struct nfp_fl_pop_mpls *pop_m; |
---|
| 994 | + struct nfp_fl_set_mpls *set_m; |
---|
722 | 995 | int err; |
---|
723 | 996 | |
---|
724 | | - if (is_tcf_gact_shot(a)) { |
---|
| 997 | + switch (act->id) { |
---|
| 998 | + case FLOW_ACTION_DROP: |
---|
725 | 999 | nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP); |
---|
726 | | - } else if (is_tcf_mirred_egress_redirect(a)) { |
---|
727 | | - err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev, |
---|
| 1000 | + break; |
---|
| 1001 | + case FLOW_ACTION_REDIRECT_INGRESS: |
---|
| 1002 | + case FLOW_ACTION_REDIRECT: |
---|
| 1003 | + err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, |
---|
728 | 1004 | true, tun_type, tun_out_cnt, |
---|
729 | | - out_cnt, csum_updated); |
---|
| 1005 | + out_cnt, csum_updated, *pkt_host, |
---|
| 1006 | + extack); |
---|
730 | 1007 | if (err) |
---|
731 | 1008 | return err; |
---|
732 | | - |
---|
733 | | - } else if (is_tcf_mirred_egress_mirror(a)) { |
---|
734 | | - err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev, |
---|
| 1009 | + break; |
---|
| 1010 | + case FLOW_ACTION_MIRRED_INGRESS: |
---|
| 1011 | + case FLOW_ACTION_MIRRED: |
---|
| 1012 | + err = nfp_flower_output_action(app, act, nfp_fl, a_len, netdev, |
---|
735 | 1013 | false, tun_type, tun_out_cnt, |
---|
736 | | - out_cnt, csum_updated); |
---|
| 1014 | + out_cnt, csum_updated, *pkt_host, |
---|
| 1015 | + extack); |
---|
737 | 1016 | if (err) |
---|
738 | 1017 | return err; |
---|
739 | | - |
---|
740 | | - } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { |
---|
741 | | - if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ) |
---|
| 1018 | + break; |
---|
| 1019 | + case FLOW_ACTION_VLAN_POP: |
---|
| 1020 | + if (*a_len + |
---|
| 1021 | + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ) { |
---|
| 1022 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop vlan"); |
---|
742 | 1023 | return -EOPNOTSUPP; |
---|
| 1024 | + } |
---|
743 | 1025 | |
---|
744 | 1026 | pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len]; |
---|
745 | 1027 | nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV); |
---|
746 | 1028 | |
---|
747 | 1029 | nfp_fl_pop_vlan(pop_v); |
---|
748 | 1030 | *a_len += sizeof(struct nfp_fl_pop_vlan); |
---|
749 | | - } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { |
---|
750 | | - if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ) |
---|
| 1031 | + break; |
---|
| 1032 | + case FLOW_ACTION_VLAN_PUSH: |
---|
| 1033 | + if (*a_len + |
---|
| 1034 | + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ) { |
---|
| 1035 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push vlan"); |
---|
751 | 1036 | return -EOPNOTSUPP; |
---|
| 1037 | + } |
---|
752 | 1038 | |
---|
753 | 1039 | psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len]; |
---|
754 | 1040 | nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); |
---|
755 | 1041 | |
---|
756 | | - nfp_fl_push_vlan(psh_v, a); |
---|
| 1042 | + nfp_fl_push_vlan(psh_v, act); |
---|
757 | 1043 | *a_len += sizeof(struct nfp_fl_push_vlan); |
---|
758 | | - } else if (is_tcf_tunnel_set(a)) { |
---|
759 | | - struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a); |
---|
760 | | - struct nfp_repr *repr = netdev_priv(netdev); |
---|
| 1044 | + break; |
---|
| 1045 | + case FLOW_ACTION_TUNNEL_ENCAP: { |
---|
| 1046 | + const struct ip_tunnel_info *ip_tun = act->tunnel; |
---|
761 | 1047 | |
---|
762 | | - *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a); |
---|
763 | | - if (*tun_type == NFP_FL_TUNNEL_NONE) |
---|
| 1048 | + *tun_type = nfp_fl_get_tun_from_act(app, flow, act, act_idx); |
---|
| 1049 | + if (*tun_type == NFP_FL_TUNNEL_NONE) { |
---|
| 1050 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel type in action list"); |
---|
764 | 1051 | return -EOPNOTSUPP; |
---|
| 1052 | + } |
---|
765 | 1053 | |
---|
766 | | - if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS) |
---|
| 1054 | + if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS) { |
---|
| 1055 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel flags in action list"); |
---|
767 | 1056 | return -EOPNOTSUPP; |
---|
| 1057 | + } |
---|
768 | 1058 | |
---|
769 | 1059 | /* Pre-tunnel action is required for tunnel encap. |
---|
770 | 1060 | * This checks for next hop entries on NFP. |
---|
771 | 1061 | * If none, the packet falls back before applying other actions. |
---|
772 | 1062 | */ |
---|
773 | 1063 | if (*a_len + sizeof(struct nfp_fl_pre_tunnel) + |
---|
774 | | - sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ) |
---|
| 1064 | + sizeof(struct nfp_fl_set_tun) > NFP_FL_MAX_A_SIZ) { |
---|
| 1065 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at tunnel encap"); |
---|
775 | 1066 | return -EOPNOTSUPP; |
---|
| 1067 | + } |
---|
776 | 1068 | |
---|
777 | 1069 | pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len); |
---|
778 | 1070 | nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); |
---|
779 | 1071 | *a_len += sizeof(struct nfp_fl_pre_tunnel); |
---|
780 | 1072 | |
---|
781 | | - err = nfp_fl_push_geneve_options(nfp_fl, a_len, a); |
---|
| 1073 | + err = nfp_fl_push_geneve_options(nfp_fl, a_len, act, extack); |
---|
782 | 1074 | if (err) |
---|
783 | 1075 | return err; |
---|
784 | 1076 | |
---|
785 | 1077 | set_tun = (void *)&nfp_fl->action_data[*a_len]; |
---|
786 | | - err = nfp_fl_set_ipv4_udp_tun(app, set_tun, a, pre_tun, |
---|
787 | | - *tun_type, netdev); |
---|
| 1078 | + err = nfp_fl_set_tun(app, set_tun, act, pre_tun, *tun_type, |
---|
| 1079 | + netdev, extack); |
---|
788 | 1080 | if (err) |
---|
789 | 1081 | return err; |
---|
790 | | - *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun); |
---|
791 | | - } else if (is_tcf_tunnel_release(a)) { |
---|
| 1082 | + *a_len += sizeof(struct nfp_fl_set_tun); |
---|
| 1083 | + } |
---|
| 1084 | + break; |
---|
| 1085 | + case FLOW_ACTION_TUNNEL_DECAP: |
---|
792 | 1086 | /* Tunnel decap is handled by default so accept action. */ |
---|
793 | 1087 | return 0; |
---|
794 | | - } else if (is_tcf_pedit(a)) { |
---|
795 | | - if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len], |
---|
796 | | - a_len, csum_updated)) |
---|
| 1088 | + case FLOW_ACTION_MANGLE: |
---|
| 1089 | + if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len], |
---|
| 1090 | + a_len, csum_updated, set_act, extack)) |
---|
797 | 1091 | return -EOPNOTSUPP; |
---|
798 | | - } else if (is_tcf_csum(a)) { |
---|
| 1092 | + break; |
---|
| 1093 | + case FLOW_ACTION_CSUM: |
---|
799 | 1094 | /* csum action requests recalc of something we have not fixed */ |
---|
800 | | - if (tcf_csum_update_flags(a) & ~*csum_updated) |
---|
| 1095 | + if (act->csum_flags & ~*csum_updated) { |
---|
| 1096 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported csum update action in action list"); |
---|
801 | 1097 | return -EOPNOTSUPP; |
---|
| 1098 | + } |
---|
802 | 1099 | /* If we will correctly fix the csum we can remove it from the |
---|
803 | 1100 | * csum update list. Which will later be used to check support. |
---|
804 | 1101 | */ |
---|
805 | | - *csum_updated &= ~tcf_csum_update_flags(a); |
---|
806 | | - } else { |
---|
| 1102 | + *csum_updated &= ~act->csum_flags; |
---|
| 1103 | + break; |
---|
| 1104 | + case FLOW_ACTION_MPLS_PUSH: |
---|
| 1105 | + if (*a_len + |
---|
| 1106 | + sizeof(struct nfp_fl_push_mpls) > NFP_FL_MAX_A_SIZ) { |
---|
| 1107 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at push MPLS"); |
---|
| 1108 | + return -EOPNOTSUPP; |
---|
| 1109 | + } |
---|
| 1110 | + |
---|
| 1111 | + psh_m = (struct nfp_fl_push_mpls *)&nfp_fl->action_data[*a_len]; |
---|
| 1112 | + nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); |
---|
| 1113 | + |
---|
| 1114 | + err = nfp_fl_push_mpls(psh_m, act, extack); |
---|
| 1115 | + if (err) |
---|
| 1116 | + return err; |
---|
| 1117 | + *a_len += sizeof(struct nfp_fl_push_mpls); |
---|
| 1118 | + break; |
---|
| 1119 | + case FLOW_ACTION_MPLS_POP: |
---|
| 1120 | + if (*a_len + |
---|
| 1121 | + sizeof(struct nfp_fl_pop_mpls) > NFP_FL_MAX_A_SIZ) { |
---|
| 1122 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at pop MPLS"); |
---|
| 1123 | + return -EOPNOTSUPP; |
---|
| 1124 | + } |
---|
| 1125 | + |
---|
| 1126 | + pop_m = (struct nfp_fl_pop_mpls *)&nfp_fl->action_data[*a_len]; |
---|
| 1127 | + nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); |
---|
| 1128 | + |
---|
| 1129 | + nfp_fl_pop_mpls(pop_m, act); |
---|
| 1130 | + *a_len += sizeof(struct nfp_fl_pop_mpls); |
---|
| 1131 | + break; |
---|
| 1132 | + case FLOW_ACTION_MPLS_MANGLE: |
---|
| 1133 | + if (*a_len + |
---|
| 1134 | + sizeof(struct nfp_fl_set_mpls) > NFP_FL_MAX_A_SIZ) { |
---|
| 1135 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: maximum allowed action list size exceeded at set MPLS"); |
---|
| 1136 | + return -EOPNOTSUPP; |
---|
| 1137 | + } |
---|
| 1138 | + |
---|
| 1139 | + set_m = (struct nfp_fl_set_mpls *)&nfp_fl->action_data[*a_len]; |
---|
| 1140 | + nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); |
---|
| 1141 | + |
---|
| 1142 | + nfp_fl_set_mpls(set_m, act); |
---|
| 1143 | + *a_len += sizeof(struct nfp_fl_set_mpls); |
---|
| 1144 | + break; |
---|
| 1145 | + case FLOW_ACTION_PTYPE: |
---|
| 1146 | + /* TC ptype skbedit sets PACKET_HOST for ingress redirect. */ |
---|
| 1147 | + if (act->ptype != PACKET_HOST) |
---|
| 1148 | + return -EOPNOTSUPP; |
---|
| 1149 | + |
---|
| 1150 | + *pkt_host = true; |
---|
| 1151 | + break; |
---|
| 1152 | + default: |
---|
807 | 1153 | /* Currently we do not handle any other actions. */ |
---|
| 1154 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported action in action list"); |
---|
808 | 1155 | return -EOPNOTSUPP; |
---|
809 | 1156 | } |
---|
810 | 1157 | |
---|
811 | 1158 | return 0; |
---|
812 | 1159 | } |
---|
813 | 1160 | |
---|
| 1161 | +static bool nfp_fl_check_mangle_start(struct flow_action *flow_act, |
---|
| 1162 | + int current_act_idx) |
---|
| 1163 | +{ |
---|
| 1164 | + struct flow_action_entry current_act; |
---|
| 1165 | + struct flow_action_entry prev_act; |
---|
| 1166 | + |
---|
| 1167 | + current_act = flow_act->entries[current_act_idx]; |
---|
| 1168 | + if (current_act.id != FLOW_ACTION_MANGLE) |
---|
| 1169 | + return false; |
---|
| 1170 | + |
---|
| 1171 | + if (current_act_idx == 0) |
---|
| 1172 | + return true; |
---|
| 1173 | + |
---|
| 1174 | + prev_act = flow_act->entries[current_act_idx - 1]; |
---|
| 1175 | + |
---|
| 1176 | + return prev_act.id != FLOW_ACTION_MANGLE; |
---|
| 1177 | +} |
---|
| 1178 | + |
---|
| 1179 | +static bool nfp_fl_check_mangle_end(struct flow_action *flow_act, |
---|
| 1180 | + int current_act_idx) |
---|
| 1181 | +{ |
---|
| 1182 | + struct flow_action_entry current_act; |
---|
| 1183 | + struct flow_action_entry next_act; |
---|
| 1184 | + |
---|
| 1185 | + current_act = flow_act->entries[current_act_idx]; |
---|
| 1186 | + if (current_act.id != FLOW_ACTION_MANGLE) |
---|
| 1187 | + return false; |
---|
| 1188 | + |
---|
| 1189 | + if (current_act_idx == flow_act->num_entries) |
---|
| 1190 | + return true; |
---|
| 1191 | + |
---|
| 1192 | + next_act = flow_act->entries[current_act_idx + 1]; |
---|
| 1193 | + |
---|
| 1194 | + return next_act.id != FLOW_ACTION_MANGLE; |
---|
| 1195 | +} |
---|
| 1196 | + |
---|
814 | 1197 | int nfp_flower_compile_action(struct nfp_app *app, |
---|
815 | | - struct tc_cls_flower_offload *flow, |
---|
| 1198 | + struct flow_cls_offload *flow, |
---|
816 | 1199 | struct net_device *netdev, |
---|
817 | | - struct nfp_fl_payload *nfp_flow) |
---|
| 1200 | + struct nfp_fl_payload *nfp_flow, |
---|
| 1201 | + struct netlink_ext_ack *extack) |
---|
818 | 1202 | { |
---|
819 | 1203 | int act_len, act_cnt, err, tun_out_cnt, out_cnt, i; |
---|
| 1204 | + struct nfp_flower_pedit_acts set_act; |
---|
820 | 1205 | enum nfp_flower_tun_type tun_type; |
---|
821 | | - const struct tc_action *a; |
---|
| 1206 | + struct flow_action_entry *act; |
---|
| 1207 | + bool pkt_host = false; |
---|
822 | 1208 | u32 csum_updated = 0; |
---|
| 1209 | + |
---|
| 1210 | + if (!flow_action_hw_stats_check(&flow->rule->action, extack, |
---|
| 1211 | + FLOW_ACTION_HW_STATS_DELAYED_BIT)) |
---|
| 1212 | + return -EOPNOTSUPP; |
---|
823 | 1213 | |
---|
824 | 1214 | memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ); |
---|
825 | 1215 | nfp_flow->meta.act_len = 0; |
---|
.. | .. |
---|
829 | 1219 | tun_out_cnt = 0; |
---|
830 | 1220 | out_cnt = 0; |
---|
831 | 1221 | |
---|
832 | | - tcf_exts_for_each_action(i, a, flow->exts) { |
---|
833 | | - err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len, |
---|
| 1222 | + flow_action_for_each(i, act, &flow->rule->action) { |
---|
| 1223 | + if (nfp_fl_check_mangle_start(&flow->rule->action, i)) |
---|
| 1224 | + memset(&set_act, 0, sizeof(set_act)); |
---|
| 1225 | + err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len, |
---|
834 | 1226 | netdev, &tun_type, &tun_out_cnt, |
---|
835 | | - &out_cnt, &csum_updated); |
---|
| 1227 | + &out_cnt, &csum_updated, |
---|
| 1228 | + &set_act, &pkt_host, extack, i); |
---|
836 | 1229 | if (err) |
---|
837 | 1230 | return err; |
---|
838 | 1231 | act_cnt++; |
---|
| 1232 | + if (nfp_fl_check_mangle_end(&flow->rule->action, i)) |
---|
| 1233 | + nfp_fl_commit_mangle(flow, |
---|
| 1234 | + &nfp_flow->action_data[act_len], |
---|
| 1235 | + &act_len, &set_act, &csum_updated); |
---|
839 | 1236 | } |
---|
840 | 1237 | |
---|
841 | 1238 | /* We optimise when the action list is small, this can unfortunately |
---|