.. | .. |
---|
1 | | -/* |
---|
2 | | - * Copyright (C) 2017 Netronome Systems, Inc. |
---|
3 | | - * |
---|
4 | | - * This software is dual licensed under the GNU General License Version 2, |
---|
5 | | - * June 1991 as shown in the file COPYING in the top-level directory of this |
---|
6 | | - * source tree or the BSD 2-Clause License provided below. You have the |
---|
7 | | - * option to license this software under the complete terms of either license. |
---|
8 | | - * |
---|
9 | | - * The BSD 2-Clause License: |
---|
10 | | - * |
---|
11 | | - * Redistribution and use in source and binary forms, with or |
---|
12 | | - * without modification, are permitted provided that the following |
---|
13 | | - * conditions are met: |
---|
14 | | - * |
---|
15 | | - * 1. Redistributions of source code must retain the above |
---|
16 | | - * copyright notice, this list of conditions and the following |
---|
17 | | - * disclaimer. |
---|
18 | | - * |
---|
19 | | - * 2. Redistributions in binary form must reproduce the above |
---|
20 | | - * copyright notice, this list of conditions and the following |
---|
21 | | - * disclaimer in the documentation and/or other materials |
---|
22 | | - * provided with the distribution. |
---|
23 | | - * |
---|
24 | | - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
---|
25 | | - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
---|
26 | | - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
---|
27 | | - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
---|
28 | | - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
---|
29 | | - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
---|
30 | | - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
---|
31 | | - * SOFTWARE. |
---|
32 | | - */ |
---|
| 1 | +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) |
---|
| 2 | +/* Copyright (C) 2017-2018 Netronome Systems, Inc. */ |
---|
33 | 3 | |
---|
34 | 4 | #include <linux/skbuff.h> |
---|
35 | 5 | #include <net/devlink.h> |
---|
.. | .. |
---|
61 | 31 | BIT(FLOW_DISSECTOR_KEY_PORTS) | \ |
---|
62 | 32 | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \ |
---|
63 | 33 | BIT(FLOW_DISSECTOR_KEY_VLAN) | \ |
---|
| 34 | + BIT(FLOW_DISSECTOR_KEY_CVLAN) | \ |
---|
64 | 35 | BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \ |
---|
65 | 36 | BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ |
---|
66 | 37 | BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \ |
---|
.. | .. |
---|
82 | 53 | |
---|
83 | 54 | #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \ |
---|
84 | 55 | (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ |
---|
85 | | - BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \ |
---|
86 | | - BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) |
---|
| 56 | + BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) |
---|
| 57 | + |
---|
| 58 | +#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \ |
---|
| 59 | + (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \ |
---|
| 60 | + BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) |
---|
| 61 | + |
---|
| 62 | +#define NFP_FLOWER_MERGE_FIELDS \ |
---|
| 63 | + (NFP_FLOWER_LAYER_PORT | \ |
---|
| 64 | + NFP_FLOWER_LAYER_MAC | \ |
---|
| 65 | + NFP_FLOWER_LAYER_TP | \ |
---|
| 66 | + NFP_FLOWER_LAYER_IPV4 | \ |
---|
| 67 | + NFP_FLOWER_LAYER_IPV6) |
---|
| 68 | + |
---|
| 69 | +#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \ |
---|
| 70 | + (NFP_FLOWER_LAYER_EXT_META | \ |
---|
| 71 | + NFP_FLOWER_LAYER_PORT | \ |
---|
| 72 | + NFP_FLOWER_LAYER_MAC | \ |
---|
| 73 | + NFP_FLOWER_LAYER_IPV4 | \ |
---|
| 74 | + NFP_FLOWER_LAYER_IPV6) |
---|
| 75 | + |
---|
| 76 | +struct nfp_flower_merge_check { |
---|
| 77 | + union { |
---|
| 78 | + struct { |
---|
| 79 | + __be16 tci; |
---|
| 80 | + struct nfp_flower_mac_mpls l2; |
---|
| 81 | + struct nfp_flower_tp_ports l4; |
---|
| 82 | + union { |
---|
| 83 | + struct nfp_flower_ipv4 ipv4; |
---|
| 84 | + struct nfp_flower_ipv6 ipv6; |
---|
| 85 | + }; |
---|
| 86 | + }; |
---|
| 87 | + unsigned long vals[8]; |
---|
| 88 | + }; |
---|
| 89 | +}; |
---|
87 | 90 | |
---|
88 | 91 | static int |
---|
89 | | -nfp_flower_xmit_flow(struct net_device *netdev, |
---|
90 | | - struct nfp_fl_payload *nfp_flow, u8 mtype) |
---|
| 92 | +nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, |
---|
| 93 | + u8 mtype) |
---|
91 | 94 | { |
---|
92 | 95 | u32 meta_len, key_len, mask_len, act_len, tot_len; |
---|
93 | | - struct nfp_repr *priv = netdev_priv(netdev); |
---|
94 | 96 | struct sk_buff *skb; |
---|
95 | 97 | unsigned char *msg; |
---|
96 | 98 | |
---|
.. | .. |
---|
108 | 110 | nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ; |
---|
109 | 111 | nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ; |
---|
110 | 112 | |
---|
111 | | - skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL); |
---|
| 113 | + skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL); |
---|
112 | 114 | if (!skb) |
---|
113 | 115 | return -ENOMEM; |
---|
114 | 116 | |
---|
.. | .. |
---|
126 | 128 | nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ; |
---|
127 | 129 | nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ; |
---|
128 | 130 | |
---|
129 | | - nfp_ctrl_tx(priv->app->ctrl, skb); |
---|
| 131 | + nfp_ctrl_tx(app->ctrl, skb); |
---|
130 | 132 | |
---|
131 | 133 | return 0; |
---|
132 | 134 | } |
---|
133 | 135 | |
---|
134 | | -static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f) |
---|
| 136 | +static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f) |
---|
135 | 137 | { |
---|
136 | | - return dissector_uses_key(f->dissector, |
---|
137 | | - FLOW_DISSECTOR_KEY_IPV4_ADDRS) || |
---|
138 | | - dissector_uses_key(f->dissector, |
---|
139 | | - FLOW_DISSECTOR_KEY_IPV6_ADDRS) || |
---|
140 | | - dissector_uses_key(f->dissector, |
---|
141 | | - FLOW_DISSECTOR_KEY_PORTS) || |
---|
142 | | - dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP); |
---|
| 138 | + struct flow_rule *rule = flow_cls_offload_flow_rule(f); |
---|
| 139 | + |
---|
| 140 | + return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) || |
---|
| 141 | + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) || |
---|
| 142 | + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) || |
---|
| 143 | + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP); |
---|
| 144 | +} |
---|
| 145 | + |
---|
| 146 | +static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f) |
---|
| 147 | +{ |
---|
| 148 | + struct flow_rule *rule = flow_cls_offload_flow_rule(f); |
---|
| 149 | + |
---|
| 150 | + return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) || |
---|
| 151 | + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP); |
---|
143 | 152 | } |
---|
144 | 153 | |
---|
145 | 154 | static int |
---|
146 | 155 | nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts, |
---|
147 | | - u32 *key_layer_two, int *key_size) |
---|
| 156 | + u32 *key_layer_two, int *key_size, bool ipv6, |
---|
| 157 | + struct netlink_ext_ack *extack) |
---|
148 | 158 | { |
---|
149 | | - if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY) |
---|
| 159 | + if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY || |
---|
| 160 | + (ipv6 && enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY_V6)) { |
---|
| 161 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length"); |
---|
150 | 162 | return -EOPNOTSUPP; |
---|
| 163 | + } |
---|
151 | 164 | |
---|
152 | 165 | if (enc_opts->len > 0) { |
---|
153 | 166 | *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP; |
---|
.. | .. |
---|
158 | 171 | } |
---|
159 | 172 | |
---|
160 | 173 | static int |
---|
161 | | -nfp_flower_calculate_key_layers(struct nfp_app *app, |
---|
162 | | - struct nfp_fl_key_ls *ret_key_ls, |
---|
163 | | - struct tc_cls_flower_offload *flow, |
---|
164 | | - bool egress, |
---|
165 | | - enum nfp_flower_tun_type *tun_type) |
---|
| 174 | +nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports, |
---|
| 175 | + struct flow_dissector_key_enc_opts *enc_op, |
---|
| 176 | + u32 *key_layer_two, u8 *key_layer, int *key_size, |
---|
| 177 | + struct nfp_flower_priv *priv, |
---|
| 178 | + enum nfp_flower_tun_type *tun_type, bool ipv6, |
---|
| 179 | + struct netlink_ext_ack *extack) |
---|
166 | 180 | { |
---|
167 | | - struct flow_dissector_key_basic *mask_basic = NULL; |
---|
168 | | - struct flow_dissector_key_basic *key_basic = NULL; |
---|
| 181 | + int err; |
---|
| 182 | + |
---|
| 183 | + switch (enc_ports->dst) { |
---|
| 184 | + case htons(IANA_VXLAN_UDP_PORT): |
---|
| 185 | + *tun_type = NFP_FL_TUNNEL_VXLAN; |
---|
| 186 | + *key_layer |= NFP_FLOWER_LAYER_VXLAN; |
---|
| 187 | + |
---|
| 188 | + if (ipv6) { |
---|
| 189 | + *key_layer |= NFP_FLOWER_LAYER_EXT_META; |
---|
| 190 | + *key_size += sizeof(struct nfp_flower_ext_meta); |
---|
| 191 | + *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6; |
---|
| 192 | + *key_size += sizeof(struct nfp_flower_ipv6_udp_tun); |
---|
| 193 | + } else { |
---|
| 194 | + *key_size += sizeof(struct nfp_flower_ipv4_udp_tun); |
---|
| 195 | + } |
---|
| 196 | + |
---|
| 197 | + if (enc_op) { |
---|
| 198 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels"); |
---|
| 199 | + return -EOPNOTSUPP; |
---|
| 200 | + } |
---|
| 201 | + break; |
---|
| 202 | + case htons(GENEVE_UDP_PORT): |
---|
| 203 | + if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) { |
---|
| 204 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload"); |
---|
| 205 | + return -EOPNOTSUPP; |
---|
| 206 | + } |
---|
| 207 | + *tun_type = NFP_FL_TUNNEL_GENEVE; |
---|
| 208 | + *key_layer |= NFP_FLOWER_LAYER_EXT_META; |
---|
| 209 | + *key_size += sizeof(struct nfp_flower_ext_meta); |
---|
| 210 | + *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE; |
---|
| 211 | + |
---|
| 212 | + if (ipv6) { |
---|
| 213 | + *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6; |
---|
| 214 | + *key_size += sizeof(struct nfp_flower_ipv6_udp_tun); |
---|
| 215 | + } else { |
---|
| 216 | + *key_size += sizeof(struct nfp_flower_ipv4_udp_tun); |
---|
| 217 | + } |
---|
| 218 | + |
---|
| 219 | + if (!enc_op) |
---|
| 220 | + break; |
---|
| 221 | + if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) { |
---|
| 222 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload"); |
---|
| 223 | + return -EOPNOTSUPP; |
---|
| 224 | + } |
---|
| 225 | + err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, key_size, |
---|
| 226 | + ipv6, extack); |
---|
| 227 | + if (err) |
---|
| 228 | + return err; |
---|
| 229 | + break; |
---|
| 230 | + default: |
---|
| 231 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown"); |
---|
| 232 | + return -EOPNOTSUPP; |
---|
| 233 | + } |
---|
| 234 | + |
---|
| 235 | + return 0; |
---|
| 236 | +} |
---|
| 237 | + |
---|
| 238 | +static int |
---|
| 239 | +nfp_flower_calculate_key_layers(struct nfp_app *app, |
---|
| 240 | + struct net_device *netdev, |
---|
| 241 | + struct nfp_fl_key_ls *ret_key_ls, |
---|
| 242 | + struct flow_cls_offload *flow, |
---|
| 243 | + enum nfp_flower_tun_type *tun_type, |
---|
| 244 | + struct netlink_ext_ack *extack) |
---|
| 245 | +{ |
---|
| 246 | + struct flow_rule *rule = flow_cls_offload_flow_rule(flow); |
---|
| 247 | + struct flow_dissector *dissector = rule->match.dissector; |
---|
| 248 | + struct flow_match_basic basic = { NULL, NULL}; |
---|
169 | 249 | struct nfp_flower_priv *priv = app->priv; |
---|
170 | 250 | u32 key_layer_two; |
---|
171 | 251 | u8 key_layer; |
---|
172 | 252 | int key_size; |
---|
173 | 253 | int err; |
---|
174 | 254 | |
---|
175 | | - if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) |
---|
| 255 | + if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) { |
---|
| 256 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported"); |
---|
176 | 257 | return -EOPNOTSUPP; |
---|
| 258 | + } |
---|
177 | 259 | |
---|
178 | 260 | /* If any tun dissector is used then the required set must be used. */ |
---|
179 | | - if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && |
---|
180 | | - (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) |
---|
181 | | - != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) |
---|
| 261 | + if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR && |
---|
| 262 | + (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R) |
---|
| 263 | + != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R && |
---|
| 264 | + (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) |
---|
| 265 | + != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) { |
---|
| 266 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported"); |
---|
182 | 267 | return -EOPNOTSUPP; |
---|
| 268 | + } |
---|
183 | 269 | |
---|
184 | 270 | key_layer_two = 0; |
---|
185 | 271 | key_layer = NFP_FLOWER_LAYER_PORT; |
---|
186 | 272 | key_size = sizeof(struct nfp_flower_meta_tci) + |
---|
187 | 273 | sizeof(struct nfp_flower_in_port); |
---|
188 | 274 | |
---|
189 | | - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) || |
---|
190 | | - dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) { |
---|
| 275 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) || |
---|
| 276 | + flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { |
---|
191 | 277 | key_layer |= NFP_FLOWER_LAYER_MAC; |
---|
192 | 278 | key_size += sizeof(struct nfp_flower_mac_mpls); |
---|
193 | 279 | } |
---|
194 | 280 | |
---|
195 | | - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) { |
---|
196 | | - struct flow_dissector_key_vlan *flow_vlan; |
---|
| 281 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { |
---|
| 282 | + struct flow_match_vlan vlan; |
---|
197 | 283 | |
---|
198 | | - flow_vlan = skb_flow_dissector_target(flow->dissector, |
---|
199 | | - FLOW_DISSECTOR_KEY_VLAN, |
---|
200 | | - flow->mask); |
---|
| 284 | + flow_rule_match_vlan(rule, &vlan); |
---|
201 | 285 | if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) && |
---|
202 | | - flow_vlan->vlan_priority) |
---|
| 286 | + vlan.key->vlan_priority) { |
---|
| 287 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload"); |
---|
203 | 288 | return -EOPNOTSUPP; |
---|
204 | | - } |
---|
205 | | - |
---|
206 | | - if (dissector_uses_key(flow->dissector, |
---|
207 | | - FLOW_DISSECTOR_KEY_ENC_CONTROL)) { |
---|
208 | | - struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL; |
---|
209 | | - struct flow_dissector_key_ports *mask_enc_ports = NULL; |
---|
210 | | - struct flow_dissector_key_enc_opts *enc_op = NULL; |
---|
211 | | - struct flow_dissector_key_ports *enc_ports = NULL; |
---|
212 | | - struct flow_dissector_key_control *mask_enc_ctl = |
---|
213 | | - skb_flow_dissector_target(flow->dissector, |
---|
214 | | - FLOW_DISSECTOR_KEY_ENC_CONTROL, |
---|
215 | | - flow->mask); |
---|
216 | | - struct flow_dissector_key_control *enc_ctl = |
---|
217 | | - skb_flow_dissector_target(flow->dissector, |
---|
218 | | - FLOW_DISSECTOR_KEY_ENC_CONTROL, |
---|
219 | | - flow->key); |
---|
220 | | - if (!egress) |
---|
221 | | - return -EOPNOTSUPP; |
---|
222 | | - |
---|
223 | | - if (mask_enc_ctl->addr_type != 0xffff || |
---|
224 | | - enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
---|
225 | | - return -EOPNOTSUPP; |
---|
226 | | - |
---|
227 | | - /* These fields are already verified as used. */ |
---|
228 | | - mask_ipv4 = |
---|
229 | | - skb_flow_dissector_target(flow->dissector, |
---|
230 | | - FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, |
---|
231 | | - flow->mask); |
---|
232 | | - if (mask_ipv4->dst != cpu_to_be32(~0)) |
---|
233 | | - return -EOPNOTSUPP; |
---|
234 | | - |
---|
235 | | - mask_enc_ports = |
---|
236 | | - skb_flow_dissector_target(flow->dissector, |
---|
237 | | - FLOW_DISSECTOR_KEY_ENC_PORTS, |
---|
238 | | - flow->mask); |
---|
239 | | - enc_ports = |
---|
240 | | - skb_flow_dissector_target(flow->dissector, |
---|
241 | | - FLOW_DISSECTOR_KEY_ENC_PORTS, |
---|
242 | | - flow->key); |
---|
243 | | - |
---|
244 | | - if (mask_enc_ports->dst != cpu_to_be16(~0)) |
---|
245 | | - return -EOPNOTSUPP; |
---|
246 | | - |
---|
247 | | - if (dissector_uses_key(flow->dissector, |
---|
248 | | - FLOW_DISSECTOR_KEY_ENC_OPTS)) { |
---|
249 | | - enc_op = skb_flow_dissector_target(flow->dissector, |
---|
250 | | - FLOW_DISSECTOR_KEY_ENC_OPTS, |
---|
251 | | - flow->key); |
---|
252 | 289 | } |
---|
253 | | - |
---|
254 | | - switch (enc_ports->dst) { |
---|
255 | | - case htons(NFP_FL_VXLAN_PORT): |
---|
256 | | - *tun_type = NFP_FL_TUNNEL_VXLAN; |
---|
257 | | - key_layer |= NFP_FLOWER_LAYER_VXLAN; |
---|
258 | | - key_size += sizeof(struct nfp_flower_ipv4_udp_tun); |
---|
259 | | - |
---|
260 | | - if (enc_op) |
---|
261 | | - return -EOPNOTSUPP; |
---|
262 | | - break; |
---|
263 | | - case htons(NFP_FL_GENEVE_PORT): |
---|
264 | | - if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) |
---|
265 | | - return -EOPNOTSUPP; |
---|
266 | | - *tun_type = NFP_FL_TUNNEL_GENEVE; |
---|
| 290 | + if (priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ && |
---|
| 291 | + !(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) { |
---|
267 | 292 | key_layer |= NFP_FLOWER_LAYER_EXT_META; |
---|
268 | 293 | key_size += sizeof(struct nfp_flower_ext_meta); |
---|
269 | | - key_layer_two |= NFP_FLOWER_LAYER2_GENEVE; |
---|
270 | | - key_size += sizeof(struct nfp_flower_ipv4_udp_tun); |
---|
| 294 | + key_size += sizeof(struct nfp_flower_vlan); |
---|
| 295 | + key_layer_two |= NFP_FLOWER_LAYER2_QINQ; |
---|
| 296 | + } |
---|
| 297 | + } |
---|
271 | 298 | |
---|
272 | | - if (!enc_op) |
---|
273 | | - break; |
---|
274 | | - if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) |
---|
275 | | - return -EOPNOTSUPP; |
---|
276 | | - err = nfp_flower_calc_opt_layer(enc_op, &key_layer_two, |
---|
277 | | - &key_size); |
---|
278 | | - if (err) |
---|
279 | | - return err; |
---|
280 | | - break; |
---|
281 | | - default: |
---|
| 299 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { |
---|
| 300 | + struct flow_match_vlan cvlan; |
---|
| 301 | + |
---|
| 302 | + if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { |
---|
| 303 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN QinQ offload"); |
---|
282 | 304 | return -EOPNOTSUPP; |
---|
283 | 305 | } |
---|
284 | | - } else if (egress) { |
---|
285 | | - /* Reject non tunnel matches offloaded to egress repr. */ |
---|
286 | | - return -EOPNOTSUPP; |
---|
| 306 | + |
---|
| 307 | + flow_rule_match_vlan(rule, &cvlan); |
---|
| 308 | + if (!(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) { |
---|
| 309 | + key_layer |= NFP_FLOWER_LAYER_EXT_META; |
---|
| 310 | + key_size += sizeof(struct nfp_flower_ext_meta); |
---|
| 311 | + key_size += sizeof(struct nfp_flower_vlan); |
---|
| 312 | + key_layer_two |= NFP_FLOWER_LAYER2_QINQ; |
---|
| 313 | + } |
---|
287 | 314 | } |
---|
288 | 315 | |
---|
289 | | - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { |
---|
290 | | - mask_basic = skb_flow_dissector_target(flow->dissector, |
---|
291 | | - FLOW_DISSECTOR_KEY_BASIC, |
---|
292 | | - flow->mask); |
---|
| 316 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { |
---|
| 317 | + struct flow_match_enc_opts enc_op = { NULL, NULL }; |
---|
| 318 | + struct flow_match_ipv4_addrs ipv4_addrs; |
---|
| 319 | + struct flow_match_ipv6_addrs ipv6_addrs; |
---|
| 320 | + struct flow_match_control enc_ctl; |
---|
| 321 | + struct flow_match_ports enc_ports; |
---|
| 322 | + bool ipv6_tun = false; |
---|
293 | 323 | |
---|
294 | | - key_basic = skb_flow_dissector_target(flow->dissector, |
---|
295 | | - FLOW_DISSECTOR_KEY_BASIC, |
---|
296 | | - flow->key); |
---|
| 324 | + flow_rule_match_enc_control(rule, &enc_ctl); |
---|
| 325 | + |
---|
| 326 | + if (enc_ctl.mask->addr_type != 0xffff) { |
---|
| 327 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported"); |
---|
| 328 | + return -EOPNOTSUPP; |
---|
| 329 | + } |
---|
| 330 | + |
---|
| 331 | + ipv6_tun = enc_ctl.key->addr_type == |
---|
| 332 | + FLOW_DISSECTOR_KEY_IPV6_ADDRS; |
---|
| 333 | + if (ipv6_tun && |
---|
| 334 | + !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) { |
---|
| 335 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: firmware does not support IPv6 tunnels"); |
---|
| 336 | + return -EOPNOTSUPP; |
---|
| 337 | + } |
---|
| 338 | + |
---|
| 339 | + if (!ipv6_tun && |
---|
| 340 | + enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) { |
---|
| 341 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel address type not IPv4 or IPv6"); |
---|
| 342 | + return -EOPNOTSUPP; |
---|
| 343 | + } |
---|
| 344 | + |
---|
| 345 | + if (ipv6_tun) { |
---|
| 346 | + flow_rule_match_enc_ipv6_addrs(rule, &ipv6_addrs); |
---|
| 347 | + if (memchr_inv(&ipv6_addrs.mask->dst, 0xff, |
---|
| 348 | + sizeof(ipv6_addrs.mask->dst))) { |
---|
| 349 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv6 destination address is supported"); |
---|
| 350 | + return -EOPNOTSUPP; |
---|
| 351 | + } |
---|
| 352 | + } else { |
---|
| 353 | + flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs); |
---|
| 354 | + if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) { |
---|
| 355 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported"); |
---|
| 356 | + return -EOPNOTSUPP; |
---|
| 357 | + } |
---|
| 358 | + } |
---|
| 359 | + |
---|
| 360 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) |
---|
| 361 | + flow_rule_match_enc_opts(rule, &enc_op); |
---|
| 362 | + |
---|
| 363 | + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) { |
---|
| 364 | + /* check if GRE, which has no enc_ports */ |
---|
| 365 | + if (!netif_is_gretap(netdev)) { |
---|
| 366 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels"); |
---|
| 367 | + return -EOPNOTSUPP; |
---|
| 368 | + } |
---|
| 369 | + |
---|
| 370 | + *tun_type = NFP_FL_TUNNEL_GRE; |
---|
| 371 | + key_layer |= NFP_FLOWER_LAYER_EXT_META; |
---|
| 372 | + key_size += sizeof(struct nfp_flower_ext_meta); |
---|
| 373 | + key_layer_two |= NFP_FLOWER_LAYER2_GRE; |
---|
| 374 | + |
---|
| 375 | + if (ipv6_tun) { |
---|
| 376 | + key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6; |
---|
| 377 | + key_size += |
---|
| 378 | + sizeof(struct nfp_flower_ipv6_udp_tun); |
---|
| 379 | + } else { |
---|
| 380 | + key_size += |
---|
| 381 | + sizeof(struct nfp_flower_ipv4_udp_tun); |
---|
| 382 | + } |
---|
| 383 | + |
---|
| 384 | + if (enc_op.key) { |
---|
| 385 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels"); |
---|
| 386 | + return -EOPNOTSUPP; |
---|
| 387 | + } |
---|
| 388 | + } else { |
---|
| 389 | + flow_rule_match_enc_ports(rule, &enc_ports); |
---|
| 390 | + if (enc_ports.mask->dst != cpu_to_be16(~0)) { |
---|
| 391 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported"); |
---|
| 392 | + return -EOPNOTSUPP; |
---|
| 393 | + } |
---|
| 394 | + |
---|
| 395 | + err = nfp_flower_calc_udp_tun_layer(enc_ports.key, |
---|
| 396 | + enc_op.key, |
---|
| 397 | + &key_layer_two, |
---|
| 398 | + &key_layer, |
---|
| 399 | + &key_size, priv, |
---|
| 400 | + tun_type, ipv6_tun, |
---|
| 401 | + extack); |
---|
| 402 | + if (err) |
---|
| 403 | + return err; |
---|
| 404 | + |
---|
| 405 | + /* Ensure the ingress netdev matches the expected |
---|
| 406 | + * tun type. |
---|
| 407 | + */ |
---|
| 408 | + if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) { |
---|
| 409 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type"); |
---|
| 410 | + return -EOPNOTSUPP; |
---|
| 411 | + } |
---|
| 412 | + } |
---|
297 | 413 | } |
---|
298 | 414 | |
---|
299 | | - if (mask_basic && mask_basic->n_proto) { |
---|
| 415 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) |
---|
| 416 | + flow_rule_match_basic(rule, &basic); |
---|
| 417 | + |
---|
| 418 | + if (basic.mask && basic.mask->n_proto) { |
---|
300 | 419 | /* Ethernet type is present in the key. */ |
---|
301 | | - switch (key_basic->n_proto) { |
---|
| 420 | + switch (basic.key->n_proto) { |
---|
302 | 421 | case cpu_to_be16(ETH_P_IP): |
---|
303 | 422 | key_layer |= NFP_FLOWER_LAYER_IPV4; |
---|
304 | 423 | key_size += sizeof(struct nfp_flower_ipv4); |
---|
.. | .. |
---|
313 | 432 | * because we rely on it to get to the host. |
---|
314 | 433 | */ |
---|
315 | 434 | case cpu_to_be16(ETH_P_ARP): |
---|
| 435 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported"); |
---|
316 | 436 | return -EOPNOTSUPP; |
---|
317 | 437 | |
---|
318 | 438 | case cpu_to_be16(ETH_P_MPLS_UC): |
---|
.. | .. |
---|
328 | 448 | break; |
---|
329 | 449 | |
---|
330 | 450 | default: |
---|
331 | | - /* Other ethtype - we need check the masks for the |
---|
332 | | - * remainder of the key to ensure we can offload. |
---|
333 | | - */ |
---|
334 | | - if (nfp_flower_check_higher_than_mac(flow)) |
---|
335 | | - return -EOPNOTSUPP; |
---|
336 | | - break; |
---|
| 451 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported"); |
---|
| 452 | + return -EOPNOTSUPP; |
---|
337 | 453 | } |
---|
| 454 | + } else if (nfp_flower_check_higher_than_mac(flow)) { |
---|
| 455 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType"); |
---|
| 456 | + return -EOPNOTSUPP; |
---|
338 | 457 | } |
---|
339 | 458 | |
---|
340 | | - if (mask_basic && mask_basic->ip_proto) { |
---|
341 | | - /* Ethernet type is present in the key. */ |
---|
342 | | - switch (key_basic->ip_proto) { |
---|
| 459 | + if (basic.mask && basic.mask->ip_proto) { |
---|
| 460 | + switch (basic.key->ip_proto) { |
---|
343 | 461 | case IPPROTO_TCP: |
---|
344 | 462 | case IPPROTO_UDP: |
---|
345 | 463 | case IPPROTO_SCTP: |
---|
.. | .. |
---|
348 | 466 | key_layer |= NFP_FLOWER_LAYER_TP; |
---|
349 | 467 | key_size += sizeof(struct nfp_flower_tp_ports); |
---|
350 | 468 | break; |
---|
351 | | - default: |
---|
352 | | - /* Other ip proto - we need check the masks for the |
---|
353 | | - * remainder of the key to ensure we can offload. |
---|
354 | | - */ |
---|
355 | | - return -EOPNOTSUPP; |
---|
356 | 469 | } |
---|
357 | 470 | } |
---|
358 | 471 | |
---|
359 | | - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) { |
---|
360 | | - struct flow_dissector_key_tcp *tcp; |
---|
| 472 | + if (!(key_layer & NFP_FLOWER_LAYER_TP) && |
---|
| 473 | + nfp_flower_check_higher_than_l3(flow)) { |
---|
| 474 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type"); |
---|
| 475 | + return -EOPNOTSUPP; |
---|
| 476 | + } |
---|
| 477 | + |
---|
| 478 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { |
---|
| 479 | + struct flow_match_tcp tcp; |
---|
361 | 480 | u32 tcp_flags; |
---|
362 | 481 | |
---|
363 | | - tcp = skb_flow_dissector_target(flow->dissector, |
---|
364 | | - FLOW_DISSECTOR_KEY_TCP, |
---|
365 | | - flow->key); |
---|
366 | | - tcp_flags = be16_to_cpu(tcp->flags); |
---|
| 482 | + flow_rule_match_tcp(rule, &tcp); |
---|
| 483 | + tcp_flags = be16_to_cpu(tcp.key->flags); |
---|
367 | 484 | |
---|
368 | | - if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) |
---|
| 485 | + if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) { |
---|
| 486 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags"); |
---|
369 | 487 | return -EOPNOTSUPP; |
---|
| 488 | + } |
---|
370 | 489 | |
---|
371 | 490 | /* We only support PSH and URG flags when either |
---|
372 | 491 | * FIN, SYN or RST is present as well. |
---|
373 | 492 | */ |
---|
374 | 493 | if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) && |
---|
375 | | - !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) |
---|
| 494 | + !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) { |
---|
| 495 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST"); |
---|
376 | 496 | return -EOPNOTSUPP; |
---|
| 497 | + } |
---|
377 | 498 | |
---|
378 | 499 | /* We need to store TCP flags in the either the IPv4 or IPv6 key |
---|
379 | 500 | * space, thus we need to ensure we include a IPv4/IPv6 key |
---|
380 | 501 | * layer if we have not done so already. |
---|
381 | 502 | */ |
---|
382 | | - if (!key_basic) |
---|
| 503 | + if (!basic.key) { |
---|
| 504 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol"); |
---|
383 | 505 | return -EOPNOTSUPP; |
---|
| 506 | + } |
---|
384 | 507 | |
---|
385 | 508 | if (!(key_layer & NFP_FLOWER_LAYER_IPV4) && |
---|
386 | 509 | !(key_layer & NFP_FLOWER_LAYER_IPV6)) { |
---|
387 | | - switch (key_basic->n_proto) { |
---|
| 510 | + switch (basic.key->n_proto) { |
---|
388 | 511 | case cpu_to_be16(ETH_P_IP): |
---|
389 | 512 | key_layer |= NFP_FLOWER_LAYER_IPV4; |
---|
390 | 513 | key_size += sizeof(struct nfp_flower_ipv4); |
---|
391 | 514 | break; |
---|
392 | 515 | |
---|
393 | 516 | case cpu_to_be16(ETH_P_IPV6): |
---|
394 | | - key_layer |= NFP_FLOWER_LAYER_IPV6; |
---|
| 517 | + key_layer |= NFP_FLOWER_LAYER_IPV6; |
---|
395 | 518 | key_size += sizeof(struct nfp_flower_ipv6); |
---|
396 | 519 | break; |
---|
397 | 520 | |
---|
398 | 521 | default: |
---|
| 522 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6"); |
---|
399 | 523 | return -EOPNOTSUPP; |
---|
400 | 524 | } |
---|
401 | 525 | } |
---|
402 | 526 | } |
---|
403 | 527 | |
---|
404 | | - if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { |
---|
405 | | - struct flow_dissector_key_control *key_ctl; |
---|
| 528 | + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { |
---|
| 529 | + struct flow_match_control ctl; |
---|
406 | 530 | |
---|
407 | | - key_ctl = skb_flow_dissector_target(flow->dissector, |
---|
408 | | - FLOW_DISSECTOR_KEY_CONTROL, |
---|
409 | | - flow->key); |
---|
410 | | - |
---|
411 | | - if (key_ctl->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) |
---|
| 531 | + flow_rule_match_control(rule, &ctl); |
---|
| 532 | + if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) { |
---|
| 533 | + NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag"); |
---|
412 | 534 | return -EOPNOTSUPP; |
---|
| 535 | + } |
---|
413 | 536 | } |
---|
414 | 537 | |
---|
415 | 538 | ret_key_ls->key_layer = key_layer; |
---|
.. | .. |
---|
420 | 543 | } |
---|
421 | 544 | |
---|
422 | 545 | static struct nfp_fl_payload * |
---|
423 | | -nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress) |
---|
| 546 | +nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) |
---|
424 | 547 | { |
---|
425 | 548 | struct nfp_fl_payload *flow_pay; |
---|
426 | 549 | |
---|
.. | .. |
---|
443 | 566 | goto err_free_mask; |
---|
444 | 567 | |
---|
445 | 568 | flow_pay->nfp_tun_ipv4_addr = 0; |
---|
| 569 | + flow_pay->nfp_tun_ipv6 = NULL; |
---|
446 | 570 | flow_pay->meta.flags = 0; |
---|
447 | | - spin_lock_init(&flow_pay->lock); |
---|
448 | | - |
---|
449 | | - flow_pay->ingress_offload = !egress; |
---|
| 571 | + INIT_LIST_HEAD(&flow_pay->linked_flows); |
---|
| 572 | + flow_pay->in_hw = false; |
---|
| 573 | + flow_pay->pre_tun_rule.dev = NULL; |
---|
450 | 574 | |
---|
451 | 575 | return flow_pay; |
---|
452 | 576 | |
---|
.. | .. |
---|
459 | 583 | return NULL; |
---|
460 | 584 | } |
---|
461 | 585 | |
---|
| 586 | +static int |
---|
| 587 | +nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow, |
---|
| 588 | + struct nfp_flower_merge_check *merge, |
---|
| 589 | + u8 *last_act_id, int *act_out) |
---|
| 590 | +{ |
---|
| 591 | + struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl; |
---|
| 592 | + struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos; |
---|
| 593 | + struct nfp_fl_set_ip4_addrs *ipv4_add; |
---|
| 594 | + struct nfp_fl_set_ipv6_addr *ipv6_add; |
---|
| 595 | + struct nfp_fl_push_vlan *push_vlan; |
---|
| 596 | + struct nfp_fl_pre_tunnel *pre_tun; |
---|
| 597 | + struct nfp_fl_set_tport *tport; |
---|
| 598 | + struct nfp_fl_set_eth *eth; |
---|
| 599 | + struct nfp_fl_act_head *a; |
---|
| 600 | + unsigned int act_off = 0; |
---|
| 601 | + bool ipv6_tun = false; |
---|
| 602 | + u8 act_id = 0; |
---|
| 603 | + u8 *ports; |
---|
| 604 | + int i; |
---|
| 605 | + |
---|
| 606 | + while (act_off < flow->meta.act_len) { |
---|
| 607 | + a = (struct nfp_fl_act_head *)&flow->action_data[act_off]; |
---|
| 608 | + act_id = a->jump_id; |
---|
| 609 | + |
---|
| 610 | + switch (act_id) { |
---|
| 611 | + case NFP_FL_ACTION_OPCODE_OUTPUT: |
---|
| 612 | + if (act_out) |
---|
| 613 | + (*act_out)++; |
---|
| 614 | + break; |
---|
| 615 | + case NFP_FL_ACTION_OPCODE_PUSH_VLAN: |
---|
| 616 | + push_vlan = (struct nfp_fl_push_vlan *)a; |
---|
| 617 | + if (push_vlan->vlan_tci) |
---|
| 618 | + merge->tci = cpu_to_be16(0xffff); |
---|
| 619 | + break; |
---|
| 620 | + case NFP_FL_ACTION_OPCODE_POP_VLAN: |
---|
| 621 | + merge->tci = cpu_to_be16(0); |
---|
| 622 | + break; |
---|
| 623 | + case NFP_FL_ACTION_OPCODE_SET_TUNNEL: |
---|
| 624 | + /* New tunnel header means l2 to l4 can be matched. */ |
---|
| 625 | + eth_broadcast_addr(&merge->l2.mac_dst[0]); |
---|
| 626 | + eth_broadcast_addr(&merge->l2.mac_src[0]); |
---|
| 627 | + memset(&merge->l4, 0xff, |
---|
| 628 | + sizeof(struct nfp_flower_tp_ports)); |
---|
| 629 | + if (ipv6_tun) |
---|
| 630 | + memset(&merge->ipv6, 0xff, |
---|
| 631 | + sizeof(struct nfp_flower_ipv6)); |
---|
| 632 | + else |
---|
| 633 | + memset(&merge->ipv4, 0xff, |
---|
| 634 | + sizeof(struct nfp_flower_ipv4)); |
---|
| 635 | + break; |
---|
| 636 | + case NFP_FL_ACTION_OPCODE_SET_ETHERNET: |
---|
| 637 | + eth = (struct nfp_fl_set_eth *)a; |
---|
| 638 | + for (i = 0; i < ETH_ALEN; i++) |
---|
| 639 | + merge->l2.mac_dst[i] |= eth->eth_addr_mask[i]; |
---|
| 640 | + for (i = 0; i < ETH_ALEN; i++) |
---|
| 641 | + merge->l2.mac_src[i] |= |
---|
| 642 | + eth->eth_addr_mask[ETH_ALEN + i]; |
---|
| 643 | + break; |
---|
| 644 | + case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS: |
---|
| 645 | + ipv4_add = (struct nfp_fl_set_ip4_addrs *)a; |
---|
| 646 | + merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask; |
---|
| 647 | + merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask; |
---|
| 648 | + break; |
---|
| 649 | + case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS: |
---|
| 650 | + ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a; |
---|
| 651 | + merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask; |
---|
| 652 | + merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask; |
---|
| 653 | + break; |
---|
| 654 | + case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC: |
---|
| 655 | + ipv6_add = (struct nfp_fl_set_ipv6_addr *)a; |
---|
| 656 | + for (i = 0; i < 4; i++) |
---|
| 657 | + merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |= |
---|
| 658 | + ipv6_add->ipv6[i].mask; |
---|
| 659 | + break; |
---|
| 660 | + case NFP_FL_ACTION_OPCODE_SET_IPV6_DST: |
---|
| 661 | + ipv6_add = (struct nfp_fl_set_ipv6_addr *)a; |
---|
| 662 | + for (i = 0; i < 4; i++) |
---|
| 663 | + merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |= |
---|
| 664 | + ipv6_add->ipv6[i].mask; |
---|
| 665 | + break; |
---|
| 666 | + case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL: |
---|
| 667 | + ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a; |
---|
| 668 | + merge->ipv6.ip_ext.ttl |= |
---|
| 669 | + ipv6_tc_hl_fl->ipv6_hop_limit_mask; |
---|
| 670 | + merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask; |
---|
| 671 | + merge->ipv6.ipv6_flow_label_exthdr |= |
---|
| 672 | + ipv6_tc_hl_fl->ipv6_label_mask; |
---|
| 673 | + break; |
---|
| 674 | + case NFP_FL_ACTION_OPCODE_SET_UDP: |
---|
| 675 | + case NFP_FL_ACTION_OPCODE_SET_TCP: |
---|
| 676 | + tport = (struct nfp_fl_set_tport *)a; |
---|
| 677 | + ports = (u8 *)&merge->l4.port_src; |
---|
| 678 | + for (i = 0; i < 4; i++) |
---|
| 679 | + ports[i] |= tport->tp_port_mask[i]; |
---|
| 680 | + break; |
---|
| 681 | + case NFP_FL_ACTION_OPCODE_PRE_TUNNEL: |
---|
| 682 | + pre_tun = (struct nfp_fl_pre_tunnel *)a; |
---|
| 683 | + ipv6_tun = be16_to_cpu(pre_tun->flags) & |
---|
| 684 | + NFP_FL_PRE_TUN_IPV6; |
---|
| 685 | + break; |
---|
| 686 | + case NFP_FL_ACTION_OPCODE_PRE_LAG: |
---|
| 687 | + case NFP_FL_ACTION_OPCODE_PUSH_GENEVE: |
---|
| 688 | + break; |
---|
| 689 | + default: |
---|
| 690 | + return -EOPNOTSUPP; |
---|
| 691 | + } |
---|
| 692 | + |
---|
| 693 | + act_off += a->len_lw << NFP_FL_LW_SIZ; |
---|
| 694 | + } |
---|
| 695 | + |
---|
| 696 | + if (last_act_id) |
---|
| 697 | + *last_act_id = act_id; |
---|
| 698 | + |
---|
| 699 | + return 0; |
---|
| 700 | +} |
---|
| 701 | + |
---|
| 702 | +static int |
---|
| 703 | +nfp_flower_populate_merge_match(struct nfp_fl_payload *flow, |
---|
| 704 | + struct nfp_flower_merge_check *merge, |
---|
| 705 | + bool extra_fields) |
---|
| 706 | +{ |
---|
| 707 | + struct nfp_flower_meta_tci *meta_tci; |
---|
| 708 | + u8 *mask = flow->mask_data; |
---|
| 709 | + u8 key_layer, match_size; |
---|
| 710 | + |
---|
| 711 | + memset(merge, 0, sizeof(struct nfp_flower_merge_check)); |
---|
| 712 | + |
---|
| 713 | + meta_tci = (struct nfp_flower_meta_tci *)mask; |
---|
| 714 | + key_layer = meta_tci->nfp_flow_key_layer; |
---|
| 715 | + |
---|
| 716 | + if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields) |
---|
| 717 | + return -EOPNOTSUPP; |
---|
| 718 | + |
---|
| 719 | + merge->tci = meta_tci->tci; |
---|
| 720 | + mask += sizeof(struct nfp_flower_meta_tci); |
---|
| 721 | + |
---|
| 722 | + if (key_layer & NFP_FLOWER_LAYER_EXT_META) |
---|
| 723 | + mask += sizeof(struct nfp_flower_ext_meta); |
---|
| 724 | + |
---|
| 725 | + mask += sizeof(struct nfp_flower_in_port); |
---|
| 726 | + |
---|
| 727 | + if (key_layer & NFP_FLOWER_LAYER_MAC) { |
---|
| 728 | + match_size = sizeof(struct nfp_flower_mac_mpls); |
---|
| 729 | + memcpy(&merge->l2, mask, match_size); |
---|
| 730 | + mask += match_size; |
---|
| 731 | + } |
---|
| 732 | + |
---|
| 733 | + if (key_layer & NFP_FLOWER_LAYER_TP) { |
---|
| 734 | + match_size = sizeof(struct nfp_flower_tp_ports); |
---|
| 735 | + memcpy(&merge->l4, mask, match_size); |
---|
| 736 | + mask += match_size; |
---|
| 737 | + } |
---|
| 738 | + |
---|
| 739 | + if (key_layer & NFP_FLOWER_LAYER_IPV4) { |
---|
| 740 | + match_size = sizeof(struct nfp_flower_ipv4); |
---|
| 741 | + memcpy(&merge->ipv4, mask, match_size); |
---|
| 742 | + } |
---|
| 743 | + |
---|
| 744 | + if (key_layer & NFP_FLOWER_LAYER_IPV6) { |
---|
| 745 | + match_size = sizeof(struct nfp_flower_ipv6); |
---|
| 746 | + memcpy(&merge->ipv6, mask, match_size); |
---|
| 747 | + } |
---|
| 748 | + |
---|
| 749 | + return 0; |
---|
| 750 | +} |
---|
| 751 | + |
---|
| 752 | +static int |
---|
| 753 | +nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1, |
---|
| 754 | + struct nfp_fl_payload *sub_flow2) |
---|
| 755 | +{ |
---|
| 756 | + /* Two flows can be merged if sub_flow2 only matches on bits that are |
---|
| 757 | + * either matched by sub_flow1 or set by a sub_flow1 action. This |
---|
| 758 | + * ensures that every packet that hits sub_flow1 and recirculates is |
---|
| 759 | + * guaranteed to hit sub_flow2. |
---|
| 760 | + */ |
---|
| 761 | + struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge; |
---|
| 762 | + int err, act_out = 0; |
---|
| 763 | + u8 last_act_id = 0; |
---|
| 764 | + |
---|
| 765 | + err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge, |
---|
| 766 | + true); |
---|
| 767 | + if (err) |
---|
| 768 | + return err; |
---|
| 769 | + |
---|
| 770 | + err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge, |
---|
| 771 | + false); |
---|
| 772 | + if (err) |
---|
| 773 | + return err; |
---|
| 774 | + |
---|
| 775 | + err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge, |
---|
| 776 | + &last_act_id, &act_out); |
---|
| 777 | + if (err) |
---|
| 778 | + return err; |
---|
| 779 | + |
---|
| 780 | + /* Must only be 1 output action and it must be the last in sequence. */ |
---|
| 781 | + if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT) |
---|
| 782 | + return -EOPNOTSUPP; |
---|
| 783 | + |
---|
| 784 | + /* Reject merge if sub_flow2 matches on something that is not matched |
---|
| 785 | + * on or set in an action by sub_flow1. |
---|
| 786 | + */ |
---|
| 787 | + err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals, |
---|
| 788 | + sub_flow1_merge.vals, |
---|
| 789 | + sizeof(struct nfp_flower_merge_check) * 8); |
---|
| 790 | + if (err) |
---|
| 791 | + return -EINVAL; |
---|
| 792 | + |
---|
| 793 | + return 0; |
---|
| 794 | +} |
---|
| 795 | + |
---|
| 796 | +static unsigned int |
---|
| 797 | +nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len, |
---|
| 798 | + bool *tunnel_act) |
---|
| 799 | +{ |
---|
| 800 | + unsigned int act_off = 0, act_len; |
---|
| 801 | + struct nfp_fl_act_head *a; |
---|
| 802 | + u8 act_id = 0; |
---|
| 803 | + |
---|
| 804 | + while (act_off < len) { |
---|
| 805 | + a = (struct nfp_fl_act_head *)&act_src[act_off]; |
---|
| 806 | + act_len = a->len_lw << NFP_FL_LW_SIZ; |
---|
| 807 | + act_id = a->jump_id; |
---|
| 808 | + |
---|
| 809 | + switch (act_id) { |
---|
| 810 | + case NFP_FL_ACTION_OPCODE_PRE_TUNNEL: |
---|
| 811 | + if (tunnel_act) |
---|
| 812 | + *tunnel_act = true; |
---|
| 813 | + fallthrough; |
---|
| 814 | + case NFP_FL_ACTION_OPCODE_PRE_LAG: |
---|
| 815 | + memcpy(act_dst + act_off, act_src + act_off, act_len); |
---|
| 816 | + break; |
---|
| 817 | + default: |
---|
| 818 | + return act_off; |
---|
| 819 | + } |
---|
| 820 | + |
---|
| 821 | + act_off += act_len; |
---|
| 822 | + } |
---|
| 823 | + |
---|
| 824 | + return act_off; |
---|
| 825 | +} |
---|
| 826 | + |
---|
| 827 | +static int |
---|
| 828 | +nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan) |
---|
| 829 | +{ |
---|
| 830 | + struct nfp_fl_act_head *a; |
---|
| 831 | + unsigned int act_off = 0; |
---|
| 832 | + |
---|
| 833 | + while (act_off < len) { |
---|
| 834 | + a = (struct nfp_fl_act_head *)&acts[act_off]; |
---|
| 835 | + |
---|
| 836 | + if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off) |
---|
| 837 | + *vlan = (struct nfp_fl_push_vlan *)a; |
---|
| 838 | + else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) |
---|
| 839 | + return -EOPNOTSUPP; |
---|
| 840 | + |
---|
| 841 | + act_off += a->len_lw << NFP_FL_LW_SIZ; |
---|
| 842 | + } |
---|
| 843 | + |
---|
| 844 | + /* Ensure any VLAN push also has an egress action. */ |
---|
| 845 | + if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan)) |
---|
| 846 | + return -EOPNOTSUPP; |
---|
| 847 | + |
---|
| 848 | + return 0; |
---|
| 849 | +} |
---|
| 850 | + |
---|
| 851 | +static int |
---|
| 852 | +nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan) |
---|
| 853 | +{ |
---|
| 854 | + struct nfp_fl_set_tun *tun; |
---|
| 855 | + struct nfp_fl_act_head *a; |
---|
| 856 | + unsigned int act_off = 0; |
---|
| 857 | + |
---|
| 858 | + while (act_off < len) { |
---|
| 859 | + a = (struct nfp_fl_act_head *)&acts[act_off]; |
---|
| 860 | + |
---|
| 861 | + if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_TUNNEL) { |
---|
| 862 | + tun = (struct nfp_fl_set_tun *)a; |
---|
| 863 | + tun->outer_vlan_tpid = vlan->vlan_tpid; |
---|
| 864 | + tun->outer_vlan_tci = vlan->vlan_tci; |
---|
| 865 | + |
---|
| 866 | + return 0; |
---|
| 867 | + } |
---|
| 868 | + |
---|
| 869 | + act_off += a->len_lw << NFP_FL_LW_SIZ; |
---|
| 870 | + } |
---|
| 871 | + |
---|
| 872 | + /* Return error if no tunnel action is found. */ |
---|
| 873 | + return -EOPNOTSUPP; |
---|
| 874 | +} |
---|
| 875 | + |
---|
| 876 | +static int |
---|
| 877 | +nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1, |
---|
| 878 | + struct nfp_fl_payload *sub_flow2, |
---|
| 879 | + struct nfp_fl_payload *merge_flow) |
---|
| 880 | +{ |
---|
| 881 | + unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2; |
---|
| 882 | + struct nfp_fl_push_vlan *post_tun_push_vlan = NULL; |
---|
| 883 | + bool tunnel_act = false; |
---|
| 884 | + char *merge_act; |
---|
| 885 | + int err; |
---|
| 886 | + |
---|
| 887 | + /* The last action of sub_flow1 must be output - do not merge this. */ |
---|
| 888 | + sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output); |
---|
| 889 | + sub2_act_len = sub_flow2->meta.act_len; |
---|
| 890 | + |
---|
| 891 | + if (!sub2_act_len) |
---|
| 892 | + return -EINVAL; |
---|
| 893 | + |
---|
| 894 | + if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ) |
---|
| 895 | + return -EINVAL; |
---|
| 896 | + |
---|
| 897 | + /* A shortcut can only be applied if there is a single action. */ |
---|
| 898 | + if (sub1_act_len) |
---|
| 899 | + merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL); |
---|
| 900 | + else |
---|
| 901 | + merge_flow->meta.shortcut = sub_flow2->meta.shortcut; |
---|
| 902 | + |
---|
| 903 | + merge_flow->meta.act_len = sub1_act_len + sub2_act_len; |
---|
| 904 | + merge_act = merge_flow->action_data; |
---|
| 905 | + |
---|
| 906 | + /* Copy any pre-actions to the start of merge flow action list. */ |
---|
| 907 | + pre_off1 = nfp_flower_copy_pre_actions(merge_act, |
---|
| 908 | + sub_flow1->action_data, |
---|
| 909 | + sub1_act_len, &tunnel_act); |
---|
| 910 | + merge_act += pre_off1; |
---|
| 911 | + sub1_act_len -= pre_off1; |
---|
| 912 | + pre_off2 = nfp_flower_copy_pre_actions(merge_act, |
---|
| 913 | + sub_flow2->action_data, |
---|
| 914 | + sub2_act_len, NULL); |
---|
| 915 | + merge_act += pre_off2; |
---|
| 916 | + sub2_act_len -= pre_off2; |
---|
| 917 | + |
---|
| 918 | + /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes |
---|
| 919 | + * a tunnel, there are restrictions on what sub_flow 2 actions lead to a |
---|
| 920 | + * valid merge. |
---|
| 921 | + */ |
---|
| 922 | + if (tunnel_act) { |
---|
| 923 | + char *post_tun_acts = &sub_flow2->action_data[pre_off2]; |
---|
| 924 | + |
---|
| 925 | + err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len, |
---|
| 926 | + &post_tun_push_vlan); |
---|
| 927 | + if (err) |
---|
| 928 | + return err; |
---|
| 929 | + |
---|
| 930 | + if (post_tun_push_vlan) { |
---|
| 931 | + pre_off2 += sizeof(*post_tun_push_vlan); |
---|
| 932 | + sub2_act_len -= sizeof(*post_tun_push_vlan); |
---|
| 933 | + } |
---|
| 934 | + } |
---|
| 935 | + |
---|
| 936 | + /* Copy remaining actions from sub_flows 1 and 2. */ |
---|
| 937 | + memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len); |
---|
| 938 | + |
---|
| 939 | + if (post_tun_push_vlan) { |
---|
| 940 | + /* Update tunnel action in merge to include VLAN push. */ |
---|
| 941 | + err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len, |
---|
| 942 | + post_tun_push_vlan); |
---|
| 943 | + if (err) |
---|
| 944 | + return err; |
---|
| 945 | + |
---|
| 946 | + merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan); |
---|
| 947 | + } |
---|
| 948 | + |
---|
| 949 | + merge_act += sub1_act_len; |
---|
| 950 | + memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len); |
---|
| 951 | + |
---|
| 952 | + return 0; |
---|
| 953 | +} |
---|
| 954 | + |
---|
| 955 | +/* Flow link code should only be accessed under RTNL. */ |
---|
| 956 | +static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link) |
---|
| 957 | +{ |
---|
| 958 | + list_del(&link->merge_flow.list); |
---|
| 959 | + list_del(&link->sub_flow.list); |
---|
| 960 | + kfree(link); |
---|
| 961 | +} |
---|
| 962 | + |
---|
| 963 | +static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow, |
---|
| 964 | + struct nfp_fl_payload *sub_flow) |
---|
| 965 | +{ |
---|
| 966 | + struct nfp_fl_payload_link *link; |
---|
| 967 | + |
---|
| 968 | + list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) |
---|
| 969 | + if (link->sub_flow.flow == sub_flow) { |
---|
| 970 | + nfp_flower_unlink_flow(link); |
---|
| 971 | + return; |
---|
| 972 | + } |
---|
| 973 | +} |
---|
| 974 | + |
---|
| 975 | +static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow, |
---|
| 976 | + struct nfp_fl_payload *sub_flow) |
---|
| 977 | +{ |
---|
| 978 | + struct nfp_fl_payload_link *link; |
---|
| 979 | + |
---|
| 980 | + link = kmalloc(sizeof(*link), GFP_KERNEL); |
---|
| 981 | + if (!link) |
---|
| 982 | + return -ENOMEM; |
---|
| 983 | + |
---|
| 984 | + link->merge_flow.flow = merge_flow; |
---|
| 985 | + list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows); |
---|
| 986 | + link->sub_flow.flow = sub_flow; |
---|
| 987 | + list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows); |
---|
| 988 | + |
---|
| 989 | + return 0; |
---|
| 990 | +} |
---|
| 991 | + |
---|
| 992 | +/** |
---|
| 993 | + * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow. |
---|
| 994 | + * @app: Pointer to the APP handle |
---|
| 995 | + * @sub_flow1: Initial flow matched to produce merge hint |
---|
| 996 | + * @sub_flow2: Post recirculation flow matched in merge hint |
---|
| 997 | + * |
---|
| 998 | + * Combines 2 flows (if valid) to a single flow, removing the initial from hw |
---|
| 999 | + * and offloading the new, merged flow. |
---|
| 1000 | + * |
---|
| 1001 | + * Return: negative value on error, 0 in success. |
---|
| 1002 | + */ |
---|
| 1003 | +int nfp_flower_merge_offloaded_flows(struct nfp_app *app, |
---|
| 1004 | + struct nfp_fl_payload *sub_flow1, |
---|
| 1005 | + struct nfp_fl_payload *sub_flow2) |
---|
| 1006 | +{ |
---|
| 1007 | + struct flow_cls_offload merge_tc_off; |
---|
| 1008 | + struct nfp_flower_priv *priv = app->priv; |
---|
| 1009 | + struct netlink_ext_ack *extack = NULL; |
---|
| 1010 | + struct nfp_fl_payload *merge_flow; |
---|
| 1011 | + struct nfp_fl_key_ls merge_key_ls; |
---|
| 1012 | + struct nfp_merge_info *merge_info; |
---|
| 1013 | + u64 parent_ctx = 0; |
---|
| 1014 | + int err; |
---|
| 1015 | + |
---|
| 1016 | + ASSERT_RTNL(); |
---|
| 1017 | + |
---|
| 1018 | + extack = merge_tc_off.common.extack; |
---|
| 1019 | + if (sub_flow1 == sub_flow2 || |
---|
| 1020 | + nfp_flower_is_merge_flow(sub_flow1) || |
---|
| 1021 | + nfp_flower_is_merge_flow(sub_flow2)) |
---|
| 1022 | + return -EINVAL; |
---|
| 1023 | + |
---|
| 1024 | + /* check if the two flows are already merged */ |
---|
| 1025 | + parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32; |
---|
| 1026 | + parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id)); |
---|
| 1027 | + if (rhashtable_lookup_fast(&priv->merge_table, |
---|
| 1028 | + &parent_ctx, merge_table_params)) { |
---|
| 1029 | + nfp_flower_cmsg_warn(app, "The two flows are already merged.\n"); |
---|
| 1030 | + return 0; |
---|
| 1031 | + } |
---|
| 1032 | + |
---|
| 1033 | + err = nfp_flower_can_merge(sub_flow1, sub_flow2); |
---|
| 1034 | + if (err) |
---|
| 1035 | + return err; |
---|
| 1036 | + |
---|
| 1037 | + merge_key_ls.key_size = sub_flow1->meta.key_len; |
---|
| 1038 | + |
---|
| 1039 | + merge_flow = nfp_flower_allocate_new(&merge_key_ls); |
---|
| 1040 | + if (!merge_flow) |
---|
| 1041 | + return -ENOMEM; |
---|
| 1042 | + |
---|
| 1043 | + merge_flow->tc_flower_cookie = (unsigned long)merge_flow; |
---|
| 1044 | + merge_flow->ingress_dev = sub_flow1->ingress_dev; |
---|
| 1045 | + |
---|
| 1046 | + memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data, |
---|
| 1047 | + sub_flow1->meta.key_len); |
---|
| 1048 | + memcpy(merge_flow->mask_data, sub_flow1->mask_data, |
---|
| 1049 | + sub_flow1->meta.mask_len); |
---|
| 1050 | + |
---|
| 1051 | + err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow); |
---|
| 1052 | + if (err) |
---|
| 1053 | + goto err_destroy_merge_flow; |
---|
| 1054 | + |
---|
| 1055 | + err = nfp_flower_link_flows(merge_flow, sub_flow1); |
---|
| 1056 | + if (err) |
---|
| 1057 | + goto err_destroy_merge_flow; |
---|
| 1058 | + |
---|
| 1059 | + err = nfp_flower_link_flows(merge_flow, sub_flow2); |
---|
| 1060 | + if (err) |
---|
| 1061 | + goto err_unlink_sub_flow1; |
---|
| 1062 | + |
---|
| 1063 | + merge_tc_off.cookie = merge_flow->tc_flower_cookie; |
---|
| 1064 | + err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow, |
---|
| 1065 | + merge_flow->ingress_dev, extack); |
---|
| 1066 | + if (err) |
---|
| 1067 | + goto err_unlink_sub_flow2; |
---|
| 1068 | + |
---|
| 1069 | + err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node, |
---|
| 1070 | + nfp_flower_table_params); |
---|
| 1071 | + if (err) |
---|
| 1072 | + goto err_release_metadata; |
---|
| 1073 | + |
---|
| 1074 | + merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL); |
---|
| 1075 | + if (!merge_info) { |
---|
| 1076 | + err = -ENOMEM; |
---|
| 1077 | + goto err_remove_rhash; |
---|
| 1078 | + } |
---|
| 1079 | + merge_info->parent_ctx = parent_ctx; |
---|
| 1080 | + err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node, |
---|
| 1081 | + merge_table_params); |
---|
| 1082 | + if (err) |
---|
| 1083 | + goto err_destroy_merge_info; |
---|
| 1084 | + |
---|
| 1085 | + err = nfp_flower_xmit_flow(app, merge_flow, |
---|
| 1086 | + NFP_FLOWER_CMSG_TYPE_FLOW_MOD); |
---|
| 1087 | + if (err) |
---|
| 1088 | + goto err_remove_merge_info; |
---|
| 1089 | + |
---|
| 1090 | + merge_flow->in_hw = true; |
---|
| 1091 | + sub_flow1->in_hw = false; |
---|
| 1092 | + |
---|
| 1093 | + return 0; |
---|
| 1094 | + |
---|
| 1095 | +err_remove_merge_info: |
---|
| 1096 | + WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table, |
---|
| 1097 | + &merge_info->ht_node, |
---|
| 1098 | + merge_table_params)); |
---|
| 1099 | +err_destroy_merge_info: |
---|
| 1100 | + kfree(merge_info); |
---|
| 1101 | +err_remove_rhash: |
---|
| 1102 | + WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, |
---|
| 1103 | + &merge_flow->fl_node, |
---|
| 1104 | + nfp_flower_table_params)); |
---|
| 1105 | +err_release_metadata: |
---|
| 1106 | + nfp_modify_flow_metadata(app, merge_flow); |
---|
| 1107 | +err_unlink_sub_flow2: |
---|
| 1108 | + nfp_flower_unlink_flows(merge_flow, sub_flow2); |
---|
| 1109 | +err_unlink_sub_flow1: |
---|
| 1110 | + nfp_flower_unlink_flows(merge_flow, sub_flow1); |
---|
| 1111 | +err_destroy_merge_flow: |
---|
| 1112 | + kfree(merge_flow->action_data); |
---|
| 1113 | + kfree(merge_flow->mask_data); |
---|
| 1114 | + kfree(merge_flow->unmasked_data); |
---|
| 1115 | + kfree(merge_flow); |
---|
| 1116 | + return err; |
---|
| 1117 | +} |
---|
| 1118 | + |
---|
| 1119 | +/** |
---|
| 1120 | + * nfp_flower_validate_pre_tun_rule() |
---|
| 1121 | + * @app: Pointer to the APP handle |
---|
| 1122 | + * @flow: Pointer to NFP flow representation of rule |
---|
| 1123 | + * @key_ls: Pointer to NFP key layers structure |
---|
| 1124 | + * @extack: Netlink extended ACK report |
---|
| 1125 | + * |
---|
| 1126 | + * Verifies the flow as a pre-tunnel rule. |
---|
| 1127 | + * |
---|
| 1128 | + * Return: negative value on error, 0 if verified. |
---|
| 1129 | + */ |
---|
| 1130 | +static int |
---|
| 1131 | +nfp_flower_validate_pre_tun_rule(struct nfp_app *app, |
---|
| 1132 | + struct nfp_fl_payload *flow, |
---|
| 1133 | + struct nfp_fl_key_ls *key_ls, |
---|
| 1134 | + struct netlink_ext_ack *extack) |
---|
| 1135 | +{ |
---|
| 1136 | + struct nfp_flower_priv *priv = app->priv; |
---|
| 1137 | + struct nfp_flower_meta_tci *meta_tci; |
---|
| 1138 | + struct nfp_flower_mac_mpls *mac; |
---|
| 1139 | + u8 *ext = flow->unmasked_data; |
---|
| 1140 | + struct nfp_fl_act_head *act; |
---|
| 1141 | + u8 *mask = flow->mask_data; |
---|
| 1142 | + bool vlan = false; |
---|
| 1143 | + int act_offset; |
---|
| 1144 | + u8 key_layer; |
---|
| 1145 | + |
---|
| 1146 | + meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data; |
---|
| 1147 | + key_layer = key_ls->key_layer; |
---|
| 1148 | + if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { |
---|
| 1149 | + if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) { |
---|
| 1150 | + u16 vlan_tci = be16_to_cpu(meta_tci->tci); |
---|
| 1151 | + |
---|
| 1152 | + vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; |
---|
| 1153 | + flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); |
---|
| 1154 | + vlan = true; |
---|
| 1155 | + } else { |
---|
| 1156 | + flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); |
---|
| 1157 | + } |
---|
| 1158 | + } |
---|
| 1159 | + |
---|
| 1160 | + if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) { |
---|
| 1161 | + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields"); |
---|
| 1162 | + return -EOPNOTSUPP; |
---|
| 1163 | + } else if (key_ls->key_layer_two & ~NFP_FLOWER_LAYER2_QINQ) { |
---|
| 1164 | + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non-vlan in extended match fields"); |
---|
| 1165 | + return -EOPNOTSUPP; |
---|
| 1166 | + } |
---|
| 1167 | + |
---|
| 1168 | + if (!(key_layer & NFP_FLOWER_LAYER_MAC)) { |
---|
| 1169 | + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required"); |
---|
| 1170 | + return -EOPNOTSUPP; |
---|
| 1171 | + } |
---|
| 1172 | + |
---|
| 1173 | + if (!(key_layer & NFP_FLOWER_LAYER_IPV4) && |
---|
| 1174 | + !(key_layer & NFP_FLOWER_LAYER_IPV6)) { |
---|
| 1175 | + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present"); |
---|
| 1176 | + return -EOPNOTSUPP; |
---|
| 1177 | + } |
---|
| 1178 | + |
---|
| 1179 | + /* Skip fields known to exist. */ |
---|
| 1180 | + mask += sizeof(struct nfp_flower_meta_tci); |
---|
| 1181 | + ext += sizeof(struct nfp_flower_meta_tci); |
---|
| 1182 | + if (key_ls->key_layer_two) { |
---|
| 1183 | + mask += sizeof(struct nfp_flower_ext_meta); |
---|
| 1184 | + ext += sizeof(struct nfp_flower_ext_meta); |
---|
| 1185 | + } |
---|
| 1186 | + mask += sizeof(struct nfp_flower_in_port); |
---|
| 1187 | + ext += sizeof(struct nfp_flower_in_port); |
---|
| 1188 | + |
---|
| 1189 | + /* Ensure destination MAC address matches pre_tun_dev. */ |
---|
| 1190 | + mac = (struct nfp_flower_mac_mpls *)ext; |
---|
| 1191 | + if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) { |
---|
| 1192 | + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC must match output dev MAC"); |
---|
| 1193 | + return -EOPNOTSUPP; |
---|
| 1194 | + } |
---|
| 1195 | + |
---|
| 1196 | + /* Ensure destination MAC address is fully matched. */ |
---|
| 1197 | + mac = (struct nfp_flower_mac_mpls *)mask; |
---|
| 1198 | + if (!is_broadcast_ether_addr(&mac->mac_dst[0])) { |
---|
| 1199 | + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked"); |
---|
| 1200 | + return -EOPNOTSUPP; |
---|
| 1201 | + } |
---|
| 1202 | + |
---|
| 1203 | + if (mac->mpls_lse) { |
---|
| 1204 | + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported"); |
---|
| 1205 | + return -EOPNOTSUPP; |
---|
| 1206 | + } |
---|
| 1207 | + |
---|
| 1208 | + mask += sizeof(struct nfp_flower_mac_mpls); |
---|
| 1209 | + ext += sizeof(struct nfp_flower_mac_mpls); |
---|
| 1210 | + if (key_layer & NFP_FLOWER_LAYER_IPV4 || |
---|
| 1211 | + key_layer & NFP_FLOWER_LAYER_IPV6) { |
---|
| 1212 | + /* Flags and proto fields have same offset in IPv4 and IPv6. */ |
---|
| 1213 | + int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags); |
---|
| 1214 | + int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto); |
---|
| 1215 | + int size; |
---|
| 1216 | + int i; |
---|
| 1217 | + |
---|
| 1218 | + size = key_layer & NFP_FLOWER_LAYER_IPV4 ? |
---|
| 1219 | + sizeof(struct nfp_flower_ipv4) : |
---|
| 1220 | + sizeof(struct nfp_flower_ipv6); |
---|
| 1221 | + |
---|
| 1222 | + |
---|
| 1223 | + /* Ensure proto and flags are the only IP layer fields. */ |
---|
| 1224 | + for (i = 0; i < size; i++) |
---|
| 1225 | + if (mask[i] && i != ip_flags && i != ip_proto) { |
---|
| 1226 | + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header"); |
---|
| 1227 | + return -EOPNOTSUPP; |
---|
| 1228 | + } |
---|
| 1229 | + ext += size; |
---|
| 1230 | + mask += size; |
---|
| 1231 | + } |
---|
| 1232 | + |
---|
| 1233 | + if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) { |
---|
| 1234 | + if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) { |
---|
| 1235 | + struct nfp_flower_vlan *vlan_tags; |
---|
| 1236 | + u16 vlan_tci; |
---|
| 1237 | + |
---|
| 1238 | + vlan_tags = (struct nfp_flower_vlan *)ext; |
---|
| 1239 | + |
---|
| 1240 | + vlan_tci = be16_to_cpu(vlan_tags->outer_tci); |
---|
| 1241 | + |
---|
| 1242 | + vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT; |
---|
| 1243 | + flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci); |
---|
| 1244 | + vlan = true; |
---|
| 1245 | + } else { |
---|
| 1246 | + flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff); |
---|
| 1247 | + } |
---|
| 1248 | + } |
---|
| 1249 | + |
---|
| 1250 | + /* Action must be a single egress or pop_vlan and egress. */ |
---|
| 1251 | + act_offset = 0; |
---|
| 1252 | + act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; |
---|
| 1253 | + if (vlan) { |
---|
| 1254 | + if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) { |
---|
| 1255 | + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action"); |
---|
| 1256 | + return -EOPNOTSUPP; |
---|
| 1257 | + } |
---|
| 1258 | + |
---|
| 1259 | + act_offset += act->len_lw << NFP_FL_LW_SIZ; |
---|
| 1260 | + act = (struct nfp_fl_act_head *)&flow->action_data[act_offset]; |
---|
| 1261 | + } |
---|
| 1262 | + |
---|
| 1263 | + if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) { |
---|
| 1264 | + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected"); |
---|
| 1265 | + return -EOPNOTSUPP; |
---|
| 1266 | + } |
---|
| 1267 | + |
---|
| 1268 | + act_offset += act->len_lw << NFP_FL_LW_SIZ; |
---|
| 1269 | + |
---|
| 1270 | + /* Ensure there are no more actions after egress. */ |
---|
| 1271 | + if (act_offset != flow->meta.act_len) { |
---|
| 1272 | + NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action"); |
---|
| 1273 | + return -EOPNOTSUPP; |
---|
| 1274 | + } |
---|
| 1275 | + |
---|
| 1276 | + return 0; |
---|
| 1277 | +} |
---|
| 1278 | + |
---|
462 | 1279 | /** |
---|
463 | 1280 | * nfp_flower_add_offload() - Adds a new flow to hardware. |
---|
464 | 1281 | * @app: Pointer to the APP handle |
---|
465 | 1282 | * @netdev: netdev structure. |
---|
466 | 1283 | * @flow: TC flower classifier offload structure. |
---|
467 | | - * @egress: NFP netdev is the egress. |
---|
468 | 1284 | * |
---|
469 | 1285 | * Adds a new flow to the repeated hash structure and action payload. |
---|
470 | 1286 | * |
---|
.. | .. |
---|
472 | 1288 | */ |
---|
473 | 1289 | static int |
---|
474 | 1290 | nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, |
---|
475 | | - struct tc_cls_flower_offload *flow, bool egress) |
---|
| 1291 | + struct flow_cls_offload *flow) |
---|
476 | 1292 | { |
---|
477 | 1293 | enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; |
---|
478 | | - struct nfp_port *port = nfp_port_from_netdev(netdev); |
---|
479 | 1294 | struct nfp_flower_priv *priv = app->priv; |
---|
| 1295 | + struct netlink_ext_ack *extack = NULL; |
---|
480 | 1296 | struct nfp_fl_payload *flow_pay; |
---|
481 | 1297 | struct nfp_fl_key_ls *key_layer; |
---|
482 | | - struct net_device *ingr_dev; |
---|
| 1298 | + struct nfp_port *port = NULL; |
---|
483 | 1299 | int err; |
---|
484 | 1300 | |
---|
485 | | - ingr_dev = egress ? NULL : netdev; |
---|
486 | | - flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, |
---|
487 | | - NFP_FL_STATS_CTX_DONT_CARE); |
---|
488 | | - if (flow_pay) { |
---|
489 | | - /* Ignore as duplicate if it has been added by different cb. */ |
---|
490 | | - if (flow_pay->ingress_offload && egress) |
---|
491 | | - return 0; |
---|
492 | | - else |
---|
493 | | - return -EOPNOTSUPP; |
---|
494 | | - } |
---|
| 1301 | + extack = flow->common.extack; |
---|
| 1302 | + if (nfp_netdev_is_nfp_repr(netdev)) |
---|
| 1303 | + port = nfp_port_from_netdev(netdev); |
---|
495 | 1304 | |
---|
496 | 1305 | key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL); |
---|
497 | 1306 | if (!key_layer) |
---|
498 | 1307 | return -ENOMEM; |
---|
499 | 1308 | |
---|
500 | | - err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress, |
---|
501 | | - &tun_type); |
---|
| 1309 | + err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow, |
---|
| 1310 | + &tun_type, extack); |
---|
502 | 1311 | if (err) |
---|
503 | 1312 | goto err_free_key_ls; |
---|
504 | 1313 | |
---|
505 | | - flow_pay = nfp_flower_allocate_new(key_layer, egress); |
---|
| 1314 | + flow_pay = nfp_flower_allocate_new(key_layer); |
---|
506 | 1315 | if (!flow_pay) { |
---|
507 | 1316 | err = -ENOMEM; |
---|
508 | 1317 | goto err_free_key_ls; |
---|
509 | 1318 | } |
---|
510 | 1319 | |
---|
511 | | - flow_pay->ingress_dev = egress ? NULL : netdev; |
---|
512 | | - |
---|
513 | | - err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay, |
---|
514 | | - tun_type); |
---|
| 1320 | + err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev, |
---|
| 1321 | + flow_pay, tun_type, extack); |
---|
515 | 1322 | if (err) |
---|
516 | 1323 | goto err_destroy_flow; |
---|
517 | 1324 | |
---|
518 | | - err = nfp_flower_compile_action(app, flow, netdev, flow_pay); |
---|
| 1325 | + err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack); |
---|
519 | 1326 | if (err) |
---|
520 | 1327 | goto err_destroy_flow; |
---|
521 | 1328 | |
---|
522 | | - err = nfp_compile_flow_metadata(app, flow, flow_pay, |
---|
523 | | - flow_pay->ingress_dev); |
---|
| 1329 | + if (flow_pay->pre_tun_rule.dev) { |
---|
| 1330 | + err = nfp_flower_validate_pre_tun_rule(app, flow_pay, key_layer, extack); |
---|
| 1331 | + if (err) |
---|
| 1332 | + goto err_destroy_flow; |
---|
| 1333 | + } |
---|
| 1334 | + |
---|
| 1335 | + err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack); |
---|
524 | 1336 | if (err) |
---|
525 | 1337 | goto err_destroy_flow; |
---|
526 | 1338 | |
---|
527 | | - err = nfp_flower_xmit_flow(netdev, flow_pay, |
---|
528 | | - NFP_FLOWER_CMSG_TYPE_FLOW_ADD); |
---|
529 | | - if (err) |
---|
530 | | - goto err_destroy_flow; |
---|
531 | | - |
---|
532 | | - INIT_HLIST_NODE(&flow_pay->link); |
---|
533 | 1339 | flow_pay->tc_flower_cookie = flow->cookie; |
---|
534 | | - hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie); |
---|
535 | | - port->tc_offload_cnt++; |
---|
| 1340 | + err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node, |
---|
| 1341 | + nfp_flower_table_params); |
---|
| 1342 | + if (err) { |
---|
| 1343 | + NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads"); |
---|
| 1344 | + goto err_release_metadata; |
---|
| 1345 | + } |
---|
| 1346 | + |
---|
| 1347 | + if (flow_pay->pre_tun_rule.dev) |
---|
| 1348 | + err = nfp_flower_xmit_pre_tun_flow(app, flow_pay); |
---|
| 1349 | + else |
---|
| 1350 | + err = nfp_flower_xmit_flow(app, flow_pay, |
---|
| 1351 | + NFP_FLOWER_CMSG_TYPE_FLOW_ADD); |
---|
| 1352 | + if (err) |
---|
| 1353 | + goto err_remove_rhash; |
---|
| 1354 | + |
---|
| 1355 | + if (port) |
---|
| 1356 | + port->tc_offload_cnt++; |
---|
| 1357 | + |
---|
| 1358 | + flow_pay->in_hw = true; |
---|
536 | 1359 | |
---|
537 | 1360 | /* Deallocate flow payload when flower rule has been destroyed. */ |
---|
538 | 1361 | kfree(key_layer); |
---|
539 | 1362 | |
---|
540 | 1363 | return 0; |
---|
541 | 1364 | |
---|
| 1365 | +err_remove_rhash: |
---|
| 1366 | + WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, |
---|
| 1367 | + &flow_pay->fl_node, |
---|
| 1368 | + nfp_flower_table_params)); |
---|
| 1369 | +err_release_metadata: |
---|
| 1370 | + nfp_modify_flow_metadata(app, flow_pay); |
---|
542 | 1371 | err_destroy_flow: |
---|
| 1372 | + if (flow_pay->nfp_tun_ipv6) |
---|
| 1373 | + nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6); |
---|
543 | 1374 | kfree(flow_pay->action_data); |
---|
544 | 1375 | kfree(flow_pay->mask_data); |
---|
545 | 1376 | kfree(flow_pay->unmasked_data); |
---|
.. | .. |
---|
549 | 1380 | return err; |
---|
550 | 1381 | } |
---|
551 | 1382 | |
---|
| 1383 | +static void |
---|
| 1384 | +nfp_flower_remove_merge_flow(struct nfp_app *app, |
---|
| 1385 | + struct nfp_fl_payload *del_sub_flow, |
---|
| 1386 | + struct nfp_fl_payload *merge_flow) |
---|
| 1387 | +{ |
---|
| 1388 | + struct nfp_flower_priv *priv = app->priv; |
---|
| 1389 | + struct nfp_fl_payload_link *link, *temp; |
---|
| 1390 | + struct nfp_merge_info *merge_info; |
---|
| 1391 | + struct nfp_fl_payload *origin; |
---|
| 1392 | + u64 parent_ctx = 0; |
---|
| 1393 | + bool mod = false; |
---|
| 1394 | + int err; |
---|
| 1395 | + |
---|
| 1396 | + link = list_first_entry(&merge_flow->linked_flows, |
---|
| 1397 | + struct nfp_fl_payload_link, merge_flow.list); |
---|
| 1398 | + origin = link->sub_flow.flow; |
---|
| 1399 | + |
---|
| 1400 | + /* Re-add rule the merge had overwritten if it has not been deleted. */ |
---|
| 1401 | + if (origin != del_sub_flow) |
---|
| 1402 | + mod = true; |
---|
| 1403 | + |
---|
| 1404 | + err = nfp_modify_flow_metadata(app, merge_flow); |
---|
| 1405 | + if (err) { |
---|
| 1406 | + nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n"); |
---|
| 1407 | + goto err_free_links; |
---|
| 1408 | + } |
---|
| 1409 | + |
---|
| 1410 | + if (!mod) { |
---|
| 1411 | + err = nfp_flower_xmit_flow(app, merge_flow, |
---|
| 1412 | + NFP_FLOWER_CMSG_TYPE_FLOW_DEL); |
---|
| 1413 | + if (err) { |
---|
| 1414 | + nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n"); |
---|
| 1415 | + goto err_free_links; |
---|
| 1416 | + } |
---|
| 1417 | + } else { |
---|
| 1418 | + __nfp_modify_flow_metadata(priv, origin); |
---|
| 1419 | + err = nfp_flower_xmit_flow(app, origin, |
---|
| 1420 | + NFP_FLOWER_CMSG_TYPE_FLOW_MOD); |
---|
| 1421 | + if (err) |
---|
| 1422 | + nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n"); |
---|
| 1423 | + origin->in_hw = true; |
---|
| 1424 | + } |
---|
| 1425 | + |
---|
| 1426 | +err_free_links: |
---|
| 1427 | + /* Clean any links connected with the merged flow. */ |
---|
| 1428 | + list_for_each_entry_safe(link, temp, &merge_flow->linked_flows, |
---|
| 1429 | + merge_flow.list) { |
---|
| 1430 | + u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id); |
---|
| 1431 | + |
---|
| 1432 | + parent_ctx = (parent_ctx << 32) | (u64)(ctx_id); |
---|
| 1433 | + nfp_flower_unlink_flow(link); |
---|
| 1434 | + } |
---|
| 1435 | + |
---|
| 1436 | + merge_info = rhashtable_lookup_fast(&priv->merge_table, |
---|
| 1437 | + &parent_ctx, |
---|
| 1438 | + merge_table_params); |
---|
| 1439 | + if (merge_info) { |
---|
| 1440 | + WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table, |
---|
| 1441 | + &merge_info->ht_node, |
---|
| 1442 | + merge_table_params)); |
---|
| 1443 | + kfree(merge_info); |
---|
| 1444 | + } |
---|
| 1445 | + |
---|
| 1446 | + kfree(merge_flow->action_data); |
---|
| 1447 | + kfree(merge_flow->mask_data); |
---|
| 1448 | + kfree(merge_flow->unmasked_data); |
---|
| 1449 | + WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, |
---|
| 1450 | + &merge_flow->fl_node, |
---|
| 1451 | + nfp_flower_table_params)); |
---|
| 1452 | + kfree_rcu(merge_flow, rcu); |
---|
| 1453 | +} |
---|
| 1454 | + |
---|
| 1455 | +static void |
---|
| 1456 | +nfp_flower_del_linked_merge_flows(struct nfp_app *app, |
---|
| 1457 | + struct nfp_fl_payload *sub_flow) |
---|
| 1458 | +{ |
---|
| 1459 | + struct nfp_fl_payload_link *link, *temp; |
---|
| 1460 | + |
---|
| 1461 | + /* Remove any merge flow formed from the deleted sub_flow. */ |
---|
| 1462 | + list_for_each_entry_safe(link, temp, &sub_flow->linked_flows, |
---|
| 1463 | + sub_flow.list) |
---|
| 1464 | + nfp_flower_remove_merge_flow(app, sub_flow, |
---|
| 1465 | + link->merge_flow.flow); |
---|
| 1466 | +} |
---|
| 1467 | + |
---|
552 | 1468 | /** |
---|
553 | 1469 | * nfp_flower_del_offload() - Removes a flow from hardware. |
---|
554 | 1470 | * @app: Pointer to the APP handle |
---|
555 | 1471 | * @netdev: netdev structure. |
---|
556 | 1472 | * @flow: TC flower classifier offload structure |
---|
557 | | - * @egress: Netdev is the egress dev. |
---|
558 | 1473 | * |
---|
559 | 1474 | * Removes a flow from the repeated hash structure and clears the |
---|
560 | | - * action payload. |
---|
| 1475 | + * action payload. Any flows merged from this are also deleted. |
---|
561 | 1476 | * |
---|
562 | 1477 | * Return: negative value on error, 0 if removed successfully. |
---|
563 | 1478 | */ |
---|
564 | 1479 | static int |
---|
565 | 1480 | nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, |
---|
566 | | - struct tc_cls_flower_offload *flow, bool egress) |
---|
| 1481 | + struct flow_cls_offload *flow) |
---|
567 | 1482 | { |
---|
568 | | - struct nfp_port *port = nfp_port_from_netdev(netdev); |
---|
| 1483 | + struct nfp_flower_priv *priv = app->priv; |
---|
| 1484 | + struct netlink_ext_ack *extack = NULL; |
---|
569 | 1485 | struct nfp_fl_payload *nfp_flow; |
---|
570 | | - struct net_device *ingr_dev; |
---|
| 1486 | + struct nfp_port *port = NULL; |
---|
571 | 1487 | int err; |
---|
572 | 1488 | |
---|
573 | | - ingr_dev = egress ? NULL : netdev; |
---|
574 | | - nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, |
---|
575 | | - NFP_FL_STATS_CTX_DONT_CARE); |
---|
576 | | - if (!nfp_flow) |
---|
577 | | - return egress ? 0 : -ENOENT; |
---|
| 1489 | + extack = flow->common.extack; |
---|
| 1490 | + if (nfp_netdev_is_nfp_repr(netdev)) |
---|
| 1491 | + port = nfp_port_from_netdev(netdev); |
---|
| 1492 | + |
---|
| 1493 | + nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); |
---|
| 1494 | + if (!nfp_flow) { |
---|
| 1495 | + NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist"); |
---|
| 1496 | + return -ENOENT; |
---|
| 1497 | + } |
---|
578 | 1498 | |
---|
579 | 1499 | err = nfp_modify_flow_metadata(app, nfp_flow); |
---|
580 | 1500 | if (err) |
---|
581 | | - goto err_free_flow; |
---|
| 1501 | + goto err_free_merge_flow; |
---|
582 | 1502 | |
---|
583 | 1503 | if (nfp_flow->nfp_tun_ipv4_addr) |
---|
584 | 1504 | nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr); |
---|
585 | 1505 | |
---|
586 | | - err = nfp_flower_xmit_flow(netdev, nfp_flow, |
---|
587 | | - NFP_FLOWER_CMSG_TYPE_FLOW_DEL); |
---|
588 | | - if (err) |
---|
589 | | - goto err_free_flow; |
---|
| 1506 | + if (nfp_flow->nfp_tun_ipv6) |
---|
| 1507 | + nfp_tunnel_put_ipv6_off(app, nfp_flow->nfp_tun_ipv6); |
---|
590 | 1508 | |
---|
591 | | -err_free_flow: |
---|
592 | | - hash_del_rcu(&nfp_flow->link); |
---|
593 | | - port->tc_offload_cnt--; |
---|
| 1509 | + if (!nfp_flow->in_hw) { |
---|
| 1510 | + err = 0; |
---|
| 1511 | + goto err_free_merge_flow; |
---|
| 1512 | + } |
---|
| 1513 | + |
---|
| 1514 | + if (nfp_flow->pre_tun_rule.dev) |
---|
| 1515 | + err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow); |
---|
| 1516 | + else |
---|
| 1517 | + err = nfp_flower_xmit_flow(app, nfp_flow, |
---|
| 1518 | + NFP_FLOWER_CMSG_TYPE_FLOW_DEL); |
---|
| 1519 | + /* Fall through on error. */ |
---|
| 1520 | + |
---|
| 1521 | +err_free_merge_flow: |
---|
| 1522 | + nfp_flower_del_linked_merge_flows(app, nfp_flow); |
---|
| 1523 | + if (port) |
---|
| 1524 | + port->tc_offload_cnt--; |
---|
594 | 1525 | kfree(nfp_flow->action_data); |
---|
595 | 1526 | kfree(nfp_flow->mask_data); |
---|
596 | 1527 | kfree(nfp_flow->unmasked_data); |
---|
| 1528 | + WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, |
---|
| 1529 | + &nfp_flow->fl_node, |
---|
| 1530 | + nfp_flower_table_params)); |
---|
597 | 1531 | kfree_rcu(nfp_flow, rcu); |
---|
598 | 1532 | return err; |
---|
| 1533 | +} |
---|
| 1534 | + |
---|
| 1535 | +static void |
---|
| 1536 | +__nfp_flower_update_merge_stats(struct nfp_app *app, |
---|
| 1537 | + struct nfp_fl_payload *merge_flow) |
---|
| 1538 | +{ |
---|
| 1539 | + struct nfp_flower_priv *priv = app->priv; |
---|
| 1540 | + struct nfp_fl_payload_link *link; |
---|
| 1541 | + struct nfp_fl_payload *sub_flow; |
---|
| 1542 | + u64 pkts, bytes, used; |
---|
| 1543 | + u32 ctx_id; |
---|
| 1544 | + |
---|
| 1545 | + ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id); |
---|
| 1546 | + pkts = priv->stats[ctx_id].pkts; |
---|
| 1547 | + /* Do not cycle subflows if no stats to distribute. */ |
---|
| 1548 | + if (!pkts) |
---|
| 1549 | + return; |
---|
| 1550 | + bytes = priv->stats[ctx_id].bytes; |
---|
| 1551 | + used = priv->stats[ctx_id].used; |
---|
| 1552 | + |
---|
| 1553 | + /* Reset stats for the merge flow. */ |
---|
| 1554 | + priv->stats[ctx_id].pkts = 0; |
---|
| 1555 | + priv->stats[ctx_id].bytes = 0; |
---|
| 1556 | + |
---|
| 1557 | + /* The merge flow has received stats updates from firmware. |
---|
| 1558 | + * Distribute these stats to all subflows that form the merge. |
---|
| 1559 | + * The stats will collected from TC via the subflows. |
---|
| 1560 | + */ |
---|
| 1561 | + list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) { |
---|
| 1562 | + sub_flow = link->sub_flow.flow; |
---|
| 1563 | + ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id); |
---|
| 1564 | + priv->stats[ctx_id].pkts += pkts; |
---|
| 1565 | + priv->stats[ctx_id].bytes += bytes; |
---|
| 1566 | + priv->stats[ctx_id].used = max_t(u64, used, |
---|
| 1567 | + priv->stats[ctx_id].used); |
---|
| 1568 | + } |
---|
| 1569 | +} |
---|
| 1570 | + |
---|
| 1571 | +static void |
---|
| 1572 | +nfp_flower_update_merge_stats(struct nfp_app *app, |
---|
| 1573 | + struct nfp_fl_payload *sub_flow) |
---|
| 1574 | +{ |
---|
| 1575 | + struct nfp_fl_payload_link *link; |
---|
| 1576 | + |
---|
| 1577 | + /* Get merge flows that the subflow forms to distribute their stats. */ |
---|
| 1578 | + list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list) |
---|
| 1579 | + __nfp_flower_update_merge_stats(app, link->merge_flow.flow); |
---|
599 | 1580 | } |
---|
600 | 1581 | |
---|
601 | 1582 | /** |
---|
.. | .. |
---|
603 | 1584 | * @app: Pointer to the APP handle |
---|
604 | 1585 | * @netdev: Netdev structure. |
---|
605 | 1586 | * @flow: TC flower classifier offload structure |
---|
606 | | - * @egress: Netdev is the egress dev. |
---|
607 | 1587 | * |
---|
608 | 1588 | * Populates a flow statistics structure which which corresponds to a |
---|
609 | 1589 | * specific flow. |
---|
.. | .. |
---|
612 | 1592 | */ |
---|
613 | 1593 | static int |
---|
614 | 1594 | nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, |
---|
615 | | - struct tc_cls_flower_offload *flow, bool egress) |
---|
| 1595 | + struct flow_cls_offload *flow) |
---|
616 | 1596 | { |
---|
| 1597 | + struct nfp_flower_priv *priv = app->priv; |
---|
| 1598 | + struct netlink_ext_ack *extack = NULL; |
---|
617 | 1599 | struct nfp_fl_payload *nfp_flow; |
---|
618 | | - struct net_device *ingr_dev; |
---|
| 1600 | + u32 ctx_id; |
---|
619 | 1601 | |
---|
620 | | - ingr_dev = egress ? NULL : netdev; |
---|
621 | | - nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, |
---|
622 | | - NFP_FL_STATS_CTX_DONT_CARE); |
---|
623 | | - if (!nfp_flow) |
---|
| 1602 | + extack = flow->common.extack; |
---|
| 1603 | + nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); |
---|
| 1604 | + if (!nfp_flow) { |
---|
| 1605 | + NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist"); |
---|
624 | 1606 | return -EINVAL; |
---|
| 1607 | + } |
---|
625 | 1608 | |
---|
626 | | - if (nfp_flow->ingress_offload && egress) |
---|
627 | | - return 0; |
---|
| 1609 | + ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); |
---|
628 | 1610 | |
---|
629 | | - spin_lock_bh(&nfp_flow->lock); |
---|
630 | | - tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes, |
---|
631 | | - nfp_flow->stats.pkts, nfp_flow->stats.used); |
---|
| 1611 | + spin_lock_bh(&priv->stats_lock); |
---|
| 1612 | + /* If request is for a sub_flow, update stats from merged flows. */ |
---|
| 1613 | + if (!list_empty(&nfp_flow->linked_flows)) |
---|
| 1614 | + nfp_flower_update_merge_stats(app, nfp_flow); |
---|
632 | 1615 | |
---|
633 | | - nfp_flow->stats.pkts = 0; |
---|
634 | | - nfp_flow->stats.bytes = 0; |
---|
635 | | - spin_unlock_bh(&nfp_flow->lock); |
---|
| 1616 | + flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes, |
---|
| 1617 | + priv->stats[ctx_id].pkts, 0, priv->stats[ctx_id].used, |
---|
| 1618 | + FLOW_ACTION_HW_STATS_DELAYED); |
---|
| 1619 | + |
---|
| 1620 | + priv->stats[ctx_id].pkts = 0; |
---|
| 1621 | + priv->stats[ctx_id].bytes = 0; |
---|
| 1622 | + spin_unlock_bh(&priv->stats_lock); |
---|
636 | 1623 | |
---|
637 | 1624 | return 0; |
---|
638 | 1625 | } |
---|
639 | 1626 | |
---|
640 | 1627 | static int |
---|
641 | 1628 | nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, |
---|
642 | | - struct tc_cls_flower_offload *flower, bool egress) |
---|
| 1629 | + struct flow_cls_offload *flower) |
---|
643 | 1630 | { |
---|
644 | 1631 | if (!eth_proto_is_802_3(flower->common.protocol)) |
---|
645 | 1632 | return -EOPNOTSUPP; |
---|
646 | 1633 | |
---|
647 | 1634 | switch (flower->command) { |
---|
648 | | - case TC_CLSFLOWER_REPLACE: |
---|
649 | | - return nfp_flower_add_offload(app, netdev, flower, egress); |
---|
650 | | - case TC_CLSFLOWER_DESTROY: |
---|
651 | | - return nfp_flower_del_offload(app, netdev, flower, egress); |
---|
652 | | - case TC_CLSFLOWER_STATS: |
---|
653 | | - return nfp_flower_get_stats(app, netdev, flower, egress); |
---|
654 | | - default: |
---|
655 | | - return -EOPNOTSUPP; |
---|
656 | | - } |
---|
657 | | -} |
---|
658 | | - |
---|
659 | | -int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, |
---|
660 | | - void *cb_priv) |
---|
661 | | -{ |
---|
662 | | - struct nfp_repr *repr = cb_priv; |
---|
663 | | - |
---|
664 | | - if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data)) |
---|
665 | | - return -EOPNOTSUPP; |
---|
666 | | - |
---|
667 | | - switch (type) { |
---|
668 | | - case TC_SETUP_CLSFLOWER: |
---|
669 | | - return nfp_flower_repr_offload(repr->app, repr->netdev, |
---|
670 | | - type_data, true); |
---|
| 1635 | + case FLOW_CLS_REPLACE: |
---|
| 1636 | + return nfp_flower_add_offload(app, netdev, flower); |
---|
| 1637 | + case FLOW_CLS_DESTROY: |
---|
| 1638 | + return nfp_flower_del_offload(app, netdev, flower); |
---|
| 1639 | + case FLOW_CLS_STATS: |
---|
| 1640 | + return nfp_flower_get_stats(app, netdev, flower); |
---|
671 | 1641 | default: |
---|
672 | 1642 | return -EOPNOTSUPP; |
---|
673 | 1643 | } |
---|
.. | .. |
---|
684 | 1654 | switch (type) { |
---|
685 | 1655 | case TC_SETUP_CLSFLOWER: |
---|
686 | 1656 | return nfp_flower_repr_offload(repr->app, repr->netdev, |
---|
687 | | - type_data, false); |
---|
| 1657 | + type_data); |
---|
| 1658 | + case TC_SETUP_CLSMATCHALL: |
---|
| 1659 | + return nfp_flower_setup_qos_offload(repr->app, repr->netdev, |
---|
| 1660 | + type_data); |
---|
688 | 1661 | default: |
---|
689 | 1662 | return -EOPNOTSUPP; |
---|
690 | 1663 | } |
---|
691 | 1664 | } |
---|
692 | 1665 | |
---|
| 1666 | +static LIST_HEAD(nfp_block_cb_list); |
---|
| 1667 | + |
---|
693 | 1668 | static int nfp_flower_setup_tc_block(struct net_device *netdev, |
---|
694 | | - struct tc_block_offload *f) |
---|
| 1669 | + struct flow_block_offload *f) |
---|
695 | 1670 | { |
---|
696 | 1671 | struct nfp_repr *repr = netdev_priv(netdev); |
---|
| 1672 | + struct nfp_flower_repr_priv *repr_priv; |
---|
| 1673 | + struct flow_block_cb *block_cb; |
---|
697 | 1674 | |
---|
698 | | - if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
---|
| 1675 | + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) |
---|
699 | 1676 | return -EOPNOTSUPP; |
---|
700 | 1677 | |
---|
| 1678 | + repr_priv = repr->app_priv; |
---|
| 1679 | + repr_priv->block_shared = f->block_shared; |
---|
| 1680 | + f->driver_block_list = &nfp_block_cb_list; |
---|
| 1681 | + |
---|
701 | 1682 | switch (f->command) { |
---|
702 | | - case TC_BLOCK_BIND: |
---|
703 | | - return tcf_block_cb_register(f->block, |
---|
704 | | - nfp_flower_setup_tc_block_cb, |
---|
705 | | - repr, repr, f->extack); |
---|
706 | | - case TC_BLOCK_UNBIND: |
---|
707 | | - tcf_block_cb_unregister(f->block, |
---|
708 | | - nfp_flower_setup_tc_block_cb, |
---|
709 | | - repr); |
---|
| 1683 | + case FLOW_BLOCK_BIND: |
---|
| 1684 | + if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr, |
---|
| 1685 | + &nfp_block_cb_list)) |
---|
| 1686 | + return -EBUSY; |
---|
| 1687 | + |
---|
| 1688 | + block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb, |
---|
| 1689 | + repr, repr, NULL); |
---|
| 1690 | + if (IS_ERR(block_cb)) |
---|
| 1691 | + return PTR_ERR(block_cb); |
---|
| 1692 | + |
---|
| 1693 | + flow_block_cb_add(block_cb, f); |
---|
| 1694 | + list_add_tail(&block_cb->driver_list, &nfp_block_cb_list); |
---|
| 1695 | + return 0; |
---|
| 1696 | + case FLOW_BLOCK_UNBIND: |
---|
| 1697 | + block_cb = flow_block_cb_lookup(f->block, |
---|
| 1698 | + nfp_flower_setup_tc_block_cb, |
---|
| 1699 | + repr); |
---|
| 1700 | + if (!block_cb) |
---|
| 1701 | + return -ENOENT; |
---|
| 1702 | + |
---|
| 1703 | + flow_block_cb_remove(block_cb, f); |
---|
| 1704 | + list_del(&block_cb->driver_list); |
---|
710 | 1705 | return 0; |
---|
711 | 1706 | default: |
---|
712 | 1707 | return -EOPNOTSUPP; |
---|
.. | .. |
---|
723 | 1718 | return -EOPNOTSUPP; |
---|
724 | 1719 | } |
---|
725 | 1720 | } |
---|
| 1721 | + |
---|
| 1722 | +struct nfp_flower_indr_block_cb_priv { |
---|
| 1723 | + struct net_device *netdev; |
---|
| 1724 | + struct nfp_app *app; |
---|
| 1725 | + struct list_head list; |
---|
| 1726 | +}; |
---|
| 1727 | + |
---|
| 1728 | +static struct nfp_flower_indr_block_cb_priv * |
---|
| 1729 | +nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app, |
---|
| 1730 | + struct net_device *netdev) |
---|
| 1731 | +{ |
---|
| 1732 | + struct nfp_flower_indr_block_cb_priv *cb_priv; |
---|
| 1733 | + struct nfp_flower_priv *priv = app->priv; |
---|
| 1734 | + |
---|
| 1735 | + list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list) |
---|
| 1736 | + if (cb_priv->netdev == netdev) |
---|
| 1737 | + return cb_priv; |
---|
| 1738 | + |
---|
| 1739 | + return NULL; |
---|
| 1740 | +} |
---|
| 1741 | + |
---|
| 1742 | +static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, |
---|
| 1743 | + void *type_data, void *cb_priv) |
---|
| 1744 | +{ |
---|
| 1745 | + struct nfp_flower_indr_block_cb_priv *priv = cb_priv; |
---|
| 1746 | + struct flow_cls_offload *flower = type_data; |
---|
| 1747 | + |
---|
| 1748 | + if (flower->common.chain_index) |
---|
| 1749 | + return -EOPNOTSUPP; |
---|
| 1750 | + |
---|
| 1751 | + switch (type) { |
---|
| 1752 | + case TC_SETUP_CLSFLOWER: |
---|
| 1753 | + return nfp_flower_repr_offload(priv->app, priv->netdev, |
---|
| 1754 | + type_data); |
---|
| 1755 | + default: |
---|
| 1756 | + return -EOPNOTSUPP; |
---|
| 1757 | + } |
---|
| 1758 | +} |
---|
| 1759 | + |
---|
| 1760 | +void nfp_flower_setup_indr_tc_release(void *cb_priv) |
---|
| 1761 | +{ |
---|
| 1762 | + struct nfp_flower_indr_block_cb_priv *priv = cb_priv; |
---|
| 1763 | + |
---|
| 1764 | + list_del(&priv->list); |
---|
| 1765 | + kfree(priv); |
---|
| 1766 | +} |
---|
| 1767 | + |
---|
| 1768 | +static int |
---|
| 1769 | +nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, struct nfp_app *app, |
---|
| 1770 | + struct flow_block_offload *f, void *data, |
---|
| 1771 | + void (*cleanup)(struct flow_block_cb *block_cb)) |
---|
| 1772 | +{ |
---|
| 1773 | + struct nfp_flower_indr_block_cb_priv *cb_priv; |
---|
| 1774 | + struct nfp_flower_priv *priv = app->priv; |
---|
| 1775 | + struct flow_block_cb *block_cb; |
---|
| 1776 | + |
---|
| 1777 | + if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && |
---|
| 1778 | + !nfp_flower_internal_port_can_offload(app, netdev)) || |
---|
| 1779 | + (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && |
---|
| 1780 | + nfp_flower_internal_port_can_offload(app, netdev))) |
---|
| 1781 | + return -EOPNOTSUPP; |
---|
| 1782 | + |
---|
| 1783 | + switch (f->command) { |
---|
| 1784 | + case FLOW_BLOCK_BIND: |
---|
| 1785 | + cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); |
---|
| 1786 | + if (cb_priv && |
---|
| 1787 | + flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb, |
---|
| 1788 | + cb_priv, |
---|
| 1789 | + &nfp_block_cb_list)) |
---|
| 1790 | + return -EBUSY; |
---|
| 1791 | + |
---|
| 1792 | + cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); |
---|
| 1793 | + if (!cb_priv) |
---|
| 1794 | + return -ENOMEM; |
---|
| 1795 | + |
---|
| 1796 | + cb_priv->netdev = netdev; |
---|
| 1797 | + cb_priv->app = app; |
---|
| 1798 | + list_add(&cb_priv->list, &priv->indr_block_cb_priv); |
---|
| 1799 | + |
---|
| 1800 | + block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb, |
---|
| 1801 | + cb_priv, cb_priv, |
---|
| 1802 | + nfp_flower_setup_indr_tc_release, |
---|
| 1803 | + f, netdev, sch, data, app, cleanup); |
---|
| 1804 | + if (IS_ERR(block_cb)) { |
---|
| 1805 | + list_del(&cb_priv->list); |
---|
| 1806 | + kfree(cb_priv); |
---|
| 1807 | + return PTR_ERR(block_cb); |
---|
| 1808 | + } |
---|
| 1809 | + |
---|
| 1810 | + flow_block_cb_add(block_cb, f); |
---|
| 1811 | + list_add_tail(&block_cb->driver_list, &nfp_block_cb_list); |
---|
| 1812 | + return 0; |
---|
| 1813 | + case FLOW_BLOCK_UNBIND: |
---|
| 1814 | + cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); |
---|
| 1815 | + if (!cb_priv) |
---|
| 1816 | + return -ENOENT; |
---|
| 1817 | + |
---|
| 1818 | + block_cb = flow_block_cb_lookup(f->block, |
---|
| 1819 | + nfp_flower_setup_indr_block_cb, |
---|
| 1820 | + cb_priv); |
---|
| 1821 | + if (!block_cb) |
---|
| 1822 | + return -ENOENT; |
---|
| 1823 | + |
---|
| 1824 | + flow_indr_block_cb_remove(block_cb, f); |
---|
| 1825 | + list_del(&block_cb->driver_list); |
---|
| 1826 | + return 0; |
---|
| 1827 | + default: |
---|
| 1828 | + return -EOPNOTSUPP; |
---|
| 1829 | + } |
---|
| 1830 | + return 0; |
---|
| 1831 | +} |
---|
| 1832 | + |
---|
| 1833 | +int |
---|
| 1834 | +nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, |
---|
| 1835 | + enum tc_setup_type type, void *type_data, |
---|
| 1836 | + void *data, |
---|
| 1837 | + void (*cleanup)(struct flow_block_cb *block_cb)) |
---|
| 1838 | +{ |
---|
| 1839 | + if (!nfp_fl_is_netdev_to_offload(netdev)) |
---|
| 1840 | + return -EOPNOTSUPP; |
---|
| 1841 | + |
---|
| 1842 | + switch (type) { |
---|
| 1843 | + case TC_SETUP_BLOCK: |
---|
| 1844 | + return nfp_flower_setup_indr_tc_block(netdev, sch, cb_priv, |
---|
| 1845 | + type_data, data, cleanup); |
---|
| 1846 | + default: |
---|
| 1847 | + return -EOPNOTSUPP; |
---|
| 1848 | + } |
---|
| 1849 | +} |
---|