hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/net/ethernet/netronome/nfp/flower/match.c
....@@ -1,35 +1,5 @@
1
-/*
2
- * Copyright (C) 2017 Netronome Systems, Inc.
3
- *
4
- * This software is dual licensed under the GNU General License Version 2,
5
- * June 1991 as shown in the file COPYING in the top-level directory of this
6
- * source tree or the BSD 2-Clause License provided below. You have the
7
- * option to license this software under the complete terms of either license.
8
- *
9
- * The BSD 2-Clause License:
10
- *
11
- * Redistribution and use in source and binary forms, with or
12
- * without modification, are permitted provided that the following
13
- * conditions are met:
14
- *
15
- * 1. Redistributions of source code must retain the above
16
- * copyright notice, this list of conditions and the following
17
- * disclaimer.
18
- *
19
- * 2. Redistributions in binary form must reproduce the above
20
- * copyright notice, this list of conditions and the following
21
- * disclaimer in the documentation and/or other materials
22
- * provided with the distribution.
23
- *
24
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31
- * SOFTWARE.
32
- */
1
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2
+/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
333
344 #include <linux/bitfield.h>
355 #include <net/pkt_cls.h>
....@@ -38,30 +8,40 @@
388 #include "main.h"
399
4010 static void
41
-nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
42
- struct tc_cls_flower_offload *flow, u8 key_type,
43
- bool mask_version)
11
+nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
12
+ struct nfp_flower_meta_tci *msk,
13
+ struct flow_rule *rule, u8 key_type, bool qinq_sup)
4414 {
45
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
46
- struct flow_dissector_key_vlan *flow_vlan;
4715 u16 tmp_tci;
4816
49
- memset(frame, 0, sizeof(struct nfp_flower_meta_tci));
50
- /* Populate the metadata frame. */
51
- frame->nfp_flow_key_layer = key_type;
52
- frame->mask_id = ~0;
17
+ memset(ext, 0, sizeof(struct nfp_flower_meta_tci));
18
+ memset(msk, 0, sizeof(struct nfp_flower_meta_tci));
5319
54
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
55
- flow_vlan = skb_flow_dissector_target(flow->dissector,
56
- FLOW_DISSECTOR_KEY_VLAN,
57
- target);
20
+ /* Populate the metadata frame. */
21
+ ext->nfp_flow_key_layer = key_type;
22
+ ext->mask_id = ~0;
23
+
24
+ msk->nfp_flow_key_layer = key_type;
25
+ msk->mask_id = ~0;
26
+
27
+ if (!qinq_sup && flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
28
+ struct flow_match_vlan match;
29
+
30
+ flow_rule_match_vlan(rule, &match);
5831 /* Populate the tci field. */
5932 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
6033 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
61
- flow_vlan->vlan_priority) |
34
+ match.key->vlan_priority) |
6235 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
63
- flow_vlan->vlan_id);
64
- frame->tci = cpu_to_be16(tmp_tci);
36
+ match.key->vlan_id);
37
+ ext->tci = cpu_to_be16(tmp_tci);
38
+
39
+ tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
40
+ tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
41
+ match.mask->vlan_priority) |
42
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
43
+ match.mask->vlan_id);
44
+ msk->tci = cpu_to_be16(tmp_tci);
6545 }
6646 }
6747
....@@ -73,260 +53,439 @@
7353
7454 static int
7555 nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
76
- bool mask_version, enum nfp_flower_tun_type tun_type)
56
+ bool mask_version, enum nfp_flower_tun_type tun_type,
57
+ struct netlink_ext_ack *extack)
7758 {
7859 if (mask_version) {
7960 frame->in_port = cpu_to_be32(~0);
8061 return 0;
8162 }
8263
83
- if (tun_type)
64
+ if (tun_type) {
8465 frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
85
- else
66
+ } else {
67
+ if (!cmsg_port) {
68
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: invalid ingress interface for match offload");
69
+ return -EOPNOTSUPP;
70
+ }
8671 frame->in_port = cpu_to_be32(cmsg_port);
72
+ }
8773
8874 return 0;
8975 }
9076
91
-static void
92
-nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
93
- struct tc_cls_flower_offload *flow,
94
- bool mask_version)
77
+static int
78
+nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext,
79
+ struct nfp_flower_mac_mpls *msk, struct flow_rule *rule,
80
+ struct netlink_ext_ack *extack)
9581 {
96
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
97
- struct flow_dissector_key_eth_addrs *addr;
82
+ memset(ext, 0, sizeof(struct nfp_flower_mac_mpls));
83
+ memset(msk, 0, sizeof(struct nfp_flower_mac_mpls));
9884
99
- memset(frame, 0, sizeof(struct nfp_flower_mac_mpls));
85
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
86
+ struct flow_match_eth_addrs match;
10087
101
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
102
- addr = skb_flow_dissector_target(flow->dissector,
103
- FLOW_DISSECTOR_KEY_ETH_ADDRS,
104
- target);
88
+ flow_rule_match_eth_addrs(rule, &match);
10589 /* Populate mac frame. */
106
- ether_addr_copy(frame->mac_dst, &addr->dst[0]);
107
- ether_addr_copy(frame->mac_src, &addr->src[0]);
90
+ ether_addr_copy(ext->mac_dst, &match.key->dst[0]);
91
+ ether_addr_copy(ext->mac_src, &match.key->src[0]);
92
+ ether_addr_copy(msk->mac_dst, &match.mask->dst[0]);
93
+ ether_addr_copy(msk->mac_src, &match.mask->src[0]);
10894 }
10995
110
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
111
- struct flow_dissector_key_mpls *mpls;
96
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
97
+ struct flow_match_mpls match;
11298 u32 t_mpls;
11399
114
- mpls = skb_flow_dissector_target(flow->dissector,
115
- FLOW_DISSECTOR_KEY_MPLS,
116
- target);
100
+ flow_rule_match_mpls(rule, &match);
117101
118
- t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, mpls->mpls_label) |
119
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, mpls->mpls_tc) |
120
- FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, mpls->mpls_bos) |
102
+ /* Only support matching the first LSE */
103
+ if (match.mask->used_lses != 1) {
104
+ NL_SET_ERR_MSG_MOD(extack,
105
+ "unsupported offload: invalid LSE depth for MPLS match offload");
106
+ return -EOPNOTSUPP;
107
+ }
108
+
109
+ t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
110
+ match.key->ls[0].mpls_label) |
111
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
112
+ match.key->ls[0].mpls_tc) |
113
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
114
+ match.key->ls[0].mpls_bos) |
121115 NFP_FLOWER_MASK_MPLS_Q;
122
-
123
- frame->mpls_lse = cpu_to_be32(t_mpls);
124
- } else if (dissector_uses_key(flow->dissector,
125
- FLOW_DISSECTOR_KEY_BASIC)) {
116
+ ext->mpls_lse = cpu_to_be32(t_mpls);
117
+ t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB,
118
+ match.mask->ls[0].mpls_label) |
119
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC,
120
+ match.mask->ls[0].mpls_tc) |
121
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS,
122
+ match.mask->ls[0].mpls_bos) |
123
+ NFP_FLOWER_MASK_MPLS_Q;
124
+ msk->mpls_lse = cpu_to_be32(t_mpls);
125
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
126126 /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
127127 * bit, which indicates an mpls ether type but without any
128128 * mpls fields.
129129 */
130
- struct flow_dissector_key_basic *key_basic;
130
+ struct flow_match_basic match;
131131
132
- key_basic = skb_flow_dissector_target(flow->dissector,
133
- FLOW_DISSECTOR_KEY_BASIC,
134
- flow->key);
135
- if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
136
- key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC))
137
- frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
132
+ flow_rule_match_basic(rule, &match);
133
+ if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
134
+ match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) {
135
+ ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
136
+ msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
137
+ }
138138 }
139
-}
140
-
141
-static void
142
-nfp_flower_compile_tport(struct nfp_flower_tp_ports *frame,
143
- struct tc_cls_flower_offload *flow,
144
- bool mask_version)
145
-{
146
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
147
- struct flow_dissector_key_ports *tp;
148
-
149
- memset(frame, 0, sizeof(struct nfp_flower_tp_ports));
150
-
151
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
152
- tp = skb_flow_dissector_target(flow->dissector,
153
- FLOW_DISSECTOR_KEY_PORTS,
154
- target);
155
- frame->port_src = tp->src;
156
- frame->port_dst = tp->dst;
157
- }
158
-}
159
-
160
-static void
161
-nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *frame,
162
- struct tc_cls_flower_offload *flow,
163
- bool mask_version)
164
-{
165
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
166
-
167
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
168
- struct flow_dissector_key_basic *basic;
169
-
170
- basic = skb_flow_dissector_target(flow->dissector,
171
- FLOW_DISSECTOR_KEY_BASIC,
172
- target);
173
- frame->proto = basic->ip_proto;
174
- }
175
-
176
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
177
- struct flow_dissector_key_ip *flow_ip;
178
-
179
- flow_ip = skb_flow_dissector_target(flow->dissector,
180
- FLOW_DISSECTOR_KEY_IP,
181
- target);
182
- frame->tos = flow_ip->tos;
183
- frame->ttl = flow_ip->ttl;
184
- }
185
-
186
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
187
- struct flow_dissector_key_tcp *tcp;
188
- u32 tcp_flags;
189
-
190
- tcp = skb_flow_dissector_target(flow->dissector,
191
- FLOW_DISSECTOR_KEY_TCP, target);
192
- tcp_flags = be16_to_cpu(tcp->flags);
193
-
194
- if (tcp_flags & TCPHDR_FIN)
195
- frame->flags |= NFP_FL_TCP_FLAG_FIN;
196
- if (tcp_flags & TCPHDR_SYN)
197
- frame->flags |= NFP_FL_TCP_FLAG_SYN;
198
- if (tcp_flags & TCPHDR_RST)
199
- frame->flags |= NFP_FL_TCP_FLAG_RST;
200
- if (tcp_flags & TCPHDR_PSH)
201
- frame->flags |= NFP_FL_TCP_FLAG_PSH;
202
- if (tcp_flags & TCPHDR_URG)
203
- frame->flags |= NFP_FL_TCP_FLAG_URG;
204
- }
205
-
206
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
207
- struct flow_dissector_key_control *key;
208
-
209
- key = skb_flow_dissector_target(flow->dissector,
210
- FLOW_DISSECTOR_KEY_CONTROL,
211
- target);
212
- if (key->flags & FLOW_DIS_IS_FRAGMENT)
213
- frame->flags |= NFP_FL_IP_FRAGMENTED;
214
- if (key->flags & FLOW_DIS_FIRST_FRAG)
215
- frame->flags |= NFP_FL_IP_FRAG_FIRST;
216
- }
217
-}
218
-
219
-static void
220
-nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
221
- struct tc_cls_flower_offload *flow,
222
- bool mask_version)
223
-{
224
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
225
- struct flow_dissector_key_ipv4_addrs *addr;
226
-
227
- memset(frame, 0, sizeof(struct nfp_flower_ipv4));
228
-
229
- if (dissector_uses_key(flow->dissector,
230
- FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
231
- addr = skb_flow_dissector_target(flow->dissector,
232
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
233
- target);
234
- frame->ipv4_src = addr->src;
235
- frame->ipv4_dst = addr->dst;
236
- }
237
-
238
- nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
239
-}
240
-
241
-static void
242
-nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
243
- struct tc_cls_flower_offload *flow,
244
- bool mask_version)
245
-{
246
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
247
- struct flow_dissector_key_ipv6_addrs *addr;
248
-
249
- memset(frame, 0, sizeof(struct nfp_flower_ipv6));
250
-
251
- if (dissector_uses_key(flow->dissector,
252
- FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
253
- addr = skb_flow_dissector_target(flow->dissector,
254
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
255
- target);
256
- frame->ipv6_src = addr->src;
257
- frame->ipv6_dst = addr->dst;
258
- }
259
-
260
- nfp_flower_compile_ip_ext(&frame->ip_ext, flow, mask_version);
261
-}
262
-
263
-static int
264
-nfp_flower_compile_geneve_opt(void *key_buf, struct tc_cls_flower_offload *flow,
265
- bool mask_version)
266
-{
267
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
268
- struct flow_dissector_key_enc_opts *opts;
269
-
270
- opts = skb_flow_dissector_target(flow->dissector,
271
- FLOW_DISSECTOR_KEY_ENC_OPTS,
272
- target);
273
- memcpy(key_buf, opts->data, opts->len);
274139
275140 return 0;
276141 }
277142
278143 static void
279
-nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
280
- struct tc_cls_flower_offload *flow,
281
- bool mask_version)
144
+nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext,
145
+ struct nfp_flower_tp_ports *msk,
146
+ struct flow_rule *rule)
282147 {
283
- struct fl_flow_key *target = mask_version ? flow->mask : flow->key;
284
- struct flow_dissector_key_ipv4_addrs *tun_ips;
285
- struct flow_dissector_key_keyid *vni;
286
- struct flow_dissector_key_ip *ip;
148
+ memset(ext, 0, sizeof(struct nfp_flower_tp_ports));
149
+ memset(msk, 0, sizeof(struct nfp_flower_tp_ports));
287150
288
- memset(frame, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
151
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
152
+ struct flow_match_ports match;
289153
290
- if (dissector_uses_key(flow->dissector,
291
- FLOW_DISSECTOR_KEY_ENC_KEYID)) {
292
- u32 temp_vni;
293
-
294
- vni = skb_flow_dissector_target(flow->dissector,
295
- FLOW_DISSECTOR_KEY_ENC_KEYID,
296
- target);
297
- temp_vni = be32_to_cpu(vni->keyid) << NFP_FL_TUN_VNI_OFFSET;
298
- frame->tun_id = cpu_to_be32(temp_vni);
299
- }
300
-
301
- if (dissector_uses_key(flow->dissector,
302
- FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
303
- tun_ips =
304
- skb_flow_dissector_target(flow->dissector,
305
- FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
306
- target);
307
- frame->ip_src = tun_ips->src;
308
- frame->ip_dst = tun_ips->dst;
309
- }
310
-
311
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
312
- ip = skb_flow_dissector_target(flow->dissector,
313
- FLOW_DISSECTOR_KEY_ENC_IP,
314
- target);
315
- frame->tos = ip->tos;
316
- frame->ttl = ip->ttl;
154
+ flow_rule_match_ports(rule, &match);
155
+ ext->port_src = match.key->src;
156
+ ext->port_dst = match.key->dst;
157
+ msk->port_src = match.mask->src;
158
+ msk->port_dst = match.mask->dst;
317159 }
318160 }
319161
320
-int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
162
+static void
163
+nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
164
+ struct nfp_flower_ip_ext *msk, struct flow_rule *rule)
165
+{
166
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
167
+ struct flow_match_basic match;
168
+
169
+ flow_rule_match_basic(rule, &match);
170
+ ext->proto = match.key->ip_proto;
171
+ msk->proto = match.mask->ip_proto;
172
+ }
173
+
174
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
175
+ struct flow_match_ip match;
176
+
177
+ flow_rule_match_ip(rule, &match);
178
+ ext->tos = match.key->tos;
179
+ ext->ttl = match.key->ttl;
180
+ msk->tos = match.mask->tos;
181
+ msk->ttl = match.mask->ttl;
182
+ }
183
+
184
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
185
+ u16 tcp_flags, tcp_flags_mask;
186
+ struct flow_match_tcp match;
187
+
188
+ flow_rule_match_tcp(rule, &match);
189
+ tcp_flags = be16_to_cpu(match.key->flags);
190
+ tcp_flags_mask = be16_to_cpu(match.mask->flags);
191
+
192
+ if (tcp_flags & TCPHDR_FIN)
193
+ ext->flags |= NFP_FL_TCP_FLAG_FIN;
194
+ if (tcp_flags_mask & TCPHDR_FIN)
195
+ msk->flags |= NFP_FL_TCP_FLAG_FIN;
196
+
197
+ if (tcp_flags & TCPHDR_SYN)
198
+ ext->flags |= NFP_FL_TCP_FLAG_SYN;
199
+ if (tcp_flags_mask & TCPHDR_SYN)
200
+ msk->flags |= NFP_FL_TCP_FLAG_SYN;
201
+
202
+ if (tcp_flags & TCPHDR_RST)
203
+ ext->flags |= NFP_FL_TCP_FLAG_RST;
204
+ if (tcp_flags_mask & TCPHDR_RST)
205
+ msk->flags |= NFP_FL_TCP_FLAG_RST;
206
+
207
+ if (tcp_flags & TCPHDR_PSH)
208
+ ext->flags |= NFP_FL_TCP_FLAG_PSH;
209
+ if (tcp_flags_mask & TCPHDR_PSH)
210
+ msk->flags |= NFP_FL_TCP_FLAG_PSH;
211
+
212
+ if (tcp_flags & TCPHDR_URG)
213
+ ext->flags |= NFP_FL_TCP_FLAG_URG;
214
+ if (tcp_flags_mask & TCPHDR_URG)
215
+ msk->flags |= NFP_FL_TCP_FLAG_URG;
216
+ }
217
+
218
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
219
+ struct flow_match_control match;
220
+
221
+ flow_rule_match_control(rule, &match);
222
+ if (match.key->flags & FLOW_DIS_IS_FRAGMENT)
223
+ ext->flags |= NFP_FL_IP_FRAGMENTED;
224
+ if (match.mask->flags & FLOW_DIS_IS_FRAGMENT)
225
+ msk->flags |= NFP_FL_IP_FRAGMENTED;
226
+ if (match.key->flags & FLOW_DIS_FIRST_FRAG)
227
+ ext->flags |= NFP_FL_IP_FRAG_FIRST;
228
+ if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
229
+ msk->flags |= NFP_FL_IP_FRAG_FIRST;
230
+ }
231
+}
232
+
233
+static void
234
+nfp_flower_fill_vlan(struct flow_dissector_key_vlan *key,
235
+ struct nfp_flower_vlan *frame,
236
+ bool outer_vlan)
237
+{
238
+ u16 tci;
239
+
240
+ tci = NFP_FLOWER_MASK_VLAN_PRESENT;
241
+ tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
242
+ key->vlan_priority) |
243
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
244
+ key->vlan_id);
245
+
246
+ if (outer_vlan) {
247
+ frame->outer_tci = cpu_to_be16(tci);
248
+ frame->outer_tpid = key->vlan_tpid;
249
+ } else {
250
+ frame->inner_tci = cpu_to_be16(tci);
251
+ frame->inner_tpid = key->vlan_tpid;
252
+ }
253
+}
254
+
255
+static void
256
+nfp_flower_compile_vlan(struct nfp_flower_vlan *ext,
257
+ struct nfp_flower_vlan *msk,
258
+ struct flow_rule *rule)
259
+{
260
+ struct flow_match_vlan match;
261
+
262
+ memset(ext, 0, sizeof(struct nfp_flower_vlan));
263
+ memset(msk, 0, sizeof(struct nfp_flower_vlan));
264
+
265
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
266
+ flow_rule_match_vlan(rule, &match);
267
+ nfp_flower_fill_vlan(match.key, ext, true);
268
+ nfp_flower_fill_vlan(match.mask, msk, true);
269
+ }
270
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
271
+ flow_rule_match_cvlan(rule, &match);
272
+ nfp_flower_fill_vlan(match.key, ext, false);
273
+ nfp_flower_fill_vlan(match.mask, msk, false);
274
+ }
275
+}
276
+
277
+static void
278
+nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
279
+ struct nfp_flower_ipv4 *msk, struct flow_rule *rule)
280
+{
281
+ struct flow_match_ipv4_addrs match;
282
+
283
+ memset(ext, 0, sizeof(struct nfp_flower_ipv4));
284
+ memset(msk, 0, sizeof(struct nfp_flower_ipv4));
285
+
286
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
287
+ flow_rule_match_ipv4_addrs(rule, &match);
288
+ ext->ipv4_src = match.key->src;
289
+ ext->ipv4_dst = match.key->dst;
290
+ msk->ipv4_src = match.mask->src;
291
+ msk->ipv4_dst = match.mask->dst;
292
+ }
293
+
294
+ nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
295
+}
296
+
297
+static void
298
+nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext,
299
+ struct nfp_flower_ipv6 *msk, struct flow_rule *rule)
300
+{
301
+ memset(ext, 0, sizeof(struct nfp_flower_ipv6));
302
+ memset(msk, 0, sizeof(struct nfp_flower_ipv6));
303
+
304
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
305
+ struct flow_match_ipv6_addrs match;
306
+
307
+ flow_rule_match_ipv6_addrs(rule, &match);
308
+ ext->ipv6_src = match.key->src;
309
+ ext->ipv6_dst = match.key->dst;
310
+ msk->ipv6_src = match.mask->src;
311
+ msk->ipv6_dst = match.mask->dst;
312
+ }
313
+
314
+ nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
315
+}
316
+
317
+static int
318
+nfp_flower_compile_geneve_opt(void *ext, void *msk, struct flow_rule *rule)
319
+{
320
+ struct flow_match_enc_opts match;
321
+
322
+ flow_rule_match_enc_opts(rule, &match);
323
+ memcpy(ext, match.key->data, match.key->len);
324
+ memcpy(msk, match.mask->data, match.mask->len);
325
+
326
+ return 0;
327
+}
328
+
329
+static void
330
+nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext,
331
+ struct nfp_flower_tun_ipv4 *msk,
332
+ struct flow_rule *rule)
333
+{
334
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
335
+ struct flow_match_ipv4_addrs match;
336
+
337
+ flow_rule_match_enc_ipv4_addrs(rule, &match);
338
+ ext->src = match.key->src;
339
+ ext->dst = match.key->dst;
340
+ msk->src = match.mask->src;
341
+ msk->dst = match.mask->dst;
342
+ }
343
+}
344
+
345
+static void
346
+nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 *ext,
347
+ struct nfp_flower_tun_ipv6 *msk,
348
+ struct flow_rule *rule)
349
+{
350
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
351
+ struct flow_match_ipv6_addrs match;
352
+
353
+ flow_rule_match_enc_ipv6_addrs(rule, &match);
354
+ ext->src = match.key->src;
355
+ ext->dst = match.key->dst;
356
+ msk->src = match.mask->src;
357
+ msk->dst = match.mask->dst;
358
+ }
359
+}
360
+
361
+static void
362
+nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext,
363
+ struct nfp_flower_tun_ip_ext *msk,
364
+ struct flow_rule *rule)
365
+{
366
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
367
+ struct flow_match_ip match;
368
+
369
+ flow_rule_match_enc_ip(rule, &match);
370
+ ext->tos = match.key->tos;
371
+ ext->ttl = match.key->ttl;
372
+ msk->tos = match.mask->tos;
373
+ msk->ttl = match.mask->ttl;
374
+ }
375
+}
376
+
377
+static void
378
+nfp_flower_compile_tun_udp_key(__be32 *key, __be32 *key_msk,
379
+ struct flow_rule *rule)
380
+{
381
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
382
+ struct flow_match_enc_keyid match;
383
+ u32 vni;
384
+
385
+ flow_rule_match_enc_keyid(rule, &match);
386
+ vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET;
387
+ *key = cpu_to_be32(vni);
388
+ vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET;
389
+ *key_msk = cpu_to_be32(vni);
390
+ }
391
+}
392
+
393
+static void
394
+nfp_flower_compile_tun_gre_key(__be32 *key, __be32 *key_msk, __be16 *flags,
395
+ __be16 *flags_msk, struct flow_rule *rule)
396
+{
397
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
398
+ struct flow_match_enc_keyid match;
399
+
400
+ flow_rule_match_enc_keyid(rule, &match);
401
+ *key = match.key->keyid;
402
+ *key_msk = match.mask->keyid;
403
+
404
+ *flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
405
+ *flags_msk = cpu_to_be16(NFP_FL_GRE_FLAG_KEY);
406
+ }
407
+}
408
+
409
+static void
410
+nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext,
411
+ struct nfp_flower_ipv4_gre_tun *msk,
412
+ struct flow_rule *rule)
413
+{
414
+ memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
415
+ memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun));
416
+
417
+ /* NVGRE is the only supported GRE tunnel type */
418
+ ext->ethertype = cpu_to_be16(ETH_P_TEB);
419
+ msk->ethertype = cpu_to_be16(~0);
420
+
421
+ nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
422
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
423
+ nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
424
+ &ext->tun_flags, &msk->tun_flags, rule);
425
+}
426
+
427
+static void
428
+nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext,
429
+ struct nfp_flower_ipv4_udp_tun *msk,
430
+ struct flow_rule *rule)
431
+{
432
+ memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
433
+ memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun));
434
+
435
+ nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule);
436
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
437
+ nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
438
+}
439
+
440
+static void
441
+nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext,
442
+ struct nfp_flower_ipv6_udp_tun *msk,
443
+ struct flow_rule *rule)
444
+{
445
+ memset(ext, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
446
+ memset(msk, 0, sizeof(struct nfp_flower_ipv6_udp_tun));
447
+
448
+ nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
449
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
450
+ nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule);
451
+}
452
+
453
+static void
454
+nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext,
455
+ struct nfp_flower_ipv6_gre_tun *msk,
456
+ struct flow_rule *rule)
457
+{
458
+ memset(ext, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
459
+ memset(msk, 0, sizeof(struct nfp_flower_ipv6_gre_tun));
460
+
461
+ /* NVGRE is the only supported GRE tunnel type */
462
+ ext->ethertype = cpu_to_be16(ETH_P_TEB);
463
+ msk->ethertype = cpu_to_be16(~0);
464
+
465
+ nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule);
466
+ nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule);
467
+ nfp_flower_compile_tun_gre_key(&ext->tun_key, &msk->tun_key,
468
+ &ext->tun_flags, &msk->tun_flags, rule);
469
+}
470
+
471
+int nfp_flower_compile_flow_match(struct nfp_app *app,
472
+ struct flow_cls_offload *flow,
321473 struct nfp_fl_key_ls *key_ls,
322474 struct net_device *netdev,
323475 struct nfp_fl_payload *nfp_flow,
324
- enum nfp_flower_tun_type tun_type)
476
+ enum nfp_flower_tun_type tun_type,
477
+ struct netlink_ext_ack *extack)
325478 {
326
- struct nfp_repr *netdev_repr;
479
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
480
+ struct nfp_flower_priv *priv = app->priv;
481
+ bool qinq_sup;
482
+ u32 port_id;
483
+ int ext_len;
327484 int err;
328485 u8 *ext;
329486 u8 *msk;
487
+
488
+ port_id = nfp_flower_get_port_id_from_netdev(app, netdev);
330489
331490 memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
332491 memset(nfp_flow->mask_data, 0, key_ls->key_size);
....@@ -334,12 +493,11 @@
334493 ext = nfp_flow->unmasked_data;
335494 msk = nfp_flow->mask_data;
336495
337
- /* Populate Exact Metadata. */
496
+ qinq_sup = !!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ);
497
+
338498 nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
339
- flow, key_ls->key_layer, false);
340
- /* Populate Mask Metadata. */
341
- nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)msk,
342
- flow, key_ls->key_layer, true);
499
+ (struct nfp_flower_meta_tci *)msk,
500
+ rule, key_ls->key_layer, qinq_sup);
343501 ext += sizeof(struct nfp_flower_meta_tci);
344502 msk += sizeof(struct nfp_flower_meta_tci);
345503
....@@ -355,15 +513,13 @@
355513
356514 /* Populate Exact Port data. */
357515 err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
358
- nfp_repr_get_port_id(netdev),
359
- false, tun_type);
516
+ port_id, false, tun_type, extack);
360517 if (err)
361518 return err;
362519
363520 /* Populate Mask Port Data. */
364521 err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
365
- nfp_repr_get_port_id(netdev),
366
- true, tun_type);
522
+ port_id, true, tun_type, extack);
367523 if (err)
368524 return err;
369525
....@@ -371,83 +527,134 @@
371527 msk += sizeof(struct nfp_flower_in_port);
372528
373529 if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) {
374
- /* Populate Exact MAC Data. */
375
- nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
376
- flow, false);
377
- /* Populate Mask MAC Data. */
378
- nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)msk,
379
- flow, true);
530
+ err = nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext,
531
+ (struct nfp_flower_mac_mpls *)msk,
532
+ rule, extack);
533
+ if (err)
534
+ return err;
535
+
380536 ext += sizeof(struct nfp_flower_mac_mpls);
381537 msk += sizeof(struct nfp_flower_mac_mpls);
382538 }
383539
384540 if (NFP_FLOWER_LAYER_TP & key_ls->key_layer) {
385
- /* Populate Exact TP Data. */
386541 nfp_flower_compile_tport((struct nfp_flower_tp_ports *)ext,
387
- flow, false);
388
- /* Populate Mask TP Data. */
389
- nfp_flower_compile_tport((struct nfp_flower_tp_ports *)msk,
390
- flow, true);
542
+ (struct nfp_flower_tp_ports *)msk,
543
+ rule);
391544 ext += sizeof(struct nfp_flower_tp_ports);
392545 msk += sizeof(struct nfp_flower_tp_ports);
393546 }
394547
395548 if (NFP_FLOWER_LAYER_IPV4 & key_ls->key_layer) {
396
- /* Populate Exact IPv4 Data. */
397549 nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)ext,
398
- flow, false);
399
- /* Populate Mask IPv4 Data. */
400
- nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)msk,
401
- flow, true);
550
+ (struct nfp_flower_ipv4 *)msk,
551
+ rule);
402552 ext += sizeof(struct nfp_flower_ipv4);
403553 msk += sizeof(struct nfp_flower_ipv4);
404554 }
405555
406556 if (NFP_FLOWER_LAYER_IPV6 & key_ls->key_layer) {
407
- /* Populate Exact IPv4 Data. */
408557 nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)ext,
409
- flow, false);
410
- /* Populate Mask IPv4 Data. */
411
- nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)msk,
412
- flow, true);
558
+ (struct nfp_flower_ipv6 *)msk,
559
+ rule);
413560 ext += sizeof(struct nfp_flower_ipv6);
414561 msk += sizeof(struct nfp_flower_ipv6);
415562 }
416563
417
- if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
418
- key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
419
- __be32 tun_dst;
564
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
565
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
566
+ struct nfp_flower_ipv6_gre_tun *gre_match;
567
+ struct nfp_ipv6_addr_entry *entry;
568
+ struct in6_addr *dst;
420569
421
- /* Populate Exact VXLAN Data. */
422
- nfp_flower_compile_ipv4_udp_tun((void *)ext, flow, false);
423
- /* Populate Mask VXLAN Data. */
424
- nfp_flower_compile_ipv4_udp_tun((void *)msk, flow, true);
425
- tun_dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ip_dst;
426
- ext += sizeof(struct nfp_flower_ipv4_udp_tun);
427
- msk += sizeof(struct nfp_flower_ipv4_udp_tun);
570
+ nfp_flower_compile_ipv6_gre_tun((void *)ext,
571
+ (void *)msk, rule);
572
+ gre_match = (struct nfp_flower_ipv6_gre_tun *)ext;
573
+ dst = &gre_match->ipv6.dst;
574
+ ext += sizeof(struct nfp_flower_ipv6_gre_tun);
575
+ msk += sizeof(struct nfp_flower_ipv6_gre_tun);
428576
429
- /* Configure tunnel end point MAC. */
430
- if (nfp_netdev_is_nfp_repr(netdev)) {
431
- netdev_repr = netdev_priv(netdev);
432
- nfp_tunnel_write_macs(netdev_repr->app);
577
+ entry = nfp_tunnel_add_ipv6_off(app, dst);
578
+ if (!entry)
579
+ return -EOPNOTSUPP;
580
+
581
+ nfp_flow->nfp_tun_ipv6 = entry;
582
+ } else {
583
+ __be32 dst;
584
+
585
+ nfp_flower_compile_ipv4_gre_tun((void *)ext,
586
+ (void *)msk, rule);
587
+ dst = ((struct nfp_flower_ipv4_gre_tun *)ext)->ipv4.dst;
588
+ ext += sizeof(struct nfp_flower_ipv4_gre_tun);
589
+ msk += sizeof(struct nfp_flower_ipv4_gre_tun);
433590
434591 /* Store the tunnel destination in the rule data.
435592 * This must be present and be an exact match.
436593 */
437
- nfp_flow->nfp_tun_ipv4_addr = tun_dst;
438
- nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
594
+ nfp_flow->nfp_tun_ipv4_addr = dst;
595
+ nfp_tunnel_add_ipv4_off(app, dst);
596
+ }
597
+ }
598
+
599
+ if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
600
+ nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
601
+ (struct nfp_flower_vlan *)msk,
602
+ rule);
603
+ ext += sizeof(struct nfp_flower_vlan);
604
+ msk += sizeof(struct nfp_flower_vlan);
605
+ }
606
+
607
+ if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
608
+ key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
609
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
610
+ struct nfp_flower_ipv6_udp_tun *udp_match;
611
+ struct nfp_ipv6_addr_entry *entry;
612
+ struct in6_addr *dst;
613
+
614
+ nfp_flower_compile_ipv6_udp_tun((void *)ext,
615
+ (void *)msk, rule);
616
+ udp_match = (struct nfp_flower_ipv6_udp_tun *)ext;
617
+ dst = &udp_match->ipv6.dst;
618
+ ext += sizeof(struct nfp_flower_ipv6_udp_tun);
619
+ msk += sizeof(struct nfp_flower_ipv6_udp_tun);
620
+
621
+ entry = nfp_tunnel_add_ipv6_off(app, dst);
622
+ if (!entry)
623
+ return -EOPNOTSUPP;
624
+
625
+ nfp_flow->nfp_tun_ipv6 = entry;
626
+ } else {
627
+ __be32 dst;
628
+
629
+ nfp_flower_compile_ipv4_udp_tun((void *)ext,
630
+ (void *)msk, rule);
631
+ dst = ((struct nfp_flower_ipv4_udp_tun *)ext)->ipv4.dst;
632
+ ext += sizeof(struct nfp_flower_ipv4_udp_tun);
633
+ msk += sizeof(struct nfp_flower_ipv4_udp_tun);
634
+
635
+ /* Store the tunnel destination in the rule data.
636
+ * This must be present and be an exact match.
637
+ */
638
+ nfp_flow->nfp_tun_ipv4_addr = dst;
639
+ nfp_tunnel_add_ipv4_off(app, dst);
439640 }
440641
441642 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
442
- err = nfp_flower_compile_geneve_opt(ext, flow, false);
443
- if (err)
444
- return err;
445
-
446
- err = nfp_flower_compile_geneve_opt(msk, flow, true);
643
+ err = nfp_flower_compile_geneve_opt(ext, msk, rule);
447644 if (err)
448645 return err;
449646 }
450647 }
451648
649
+ /* Check that the flow key does not exceed the maximum limit.
650
+ * All structures in the key is multiples of 4 bytes, so use u32.
651
+ */
652
+ ext_len = (u32 *)ext - (u32 *)nfp_flow->unmasked_data;
653
+ if (ext_len > NFP_FLOWER_KEY_MAX_LW) {
654
+ NL_SET_ERR_MSG_MOD(extack,
655
+ "unsupported offload: flow key too long");
656
+ return -EOPNOTSUPP;
657
+ }
658
+
452659 return 0;
453660 }