hc
2024-01-31 f9004dbfff8a3fbbd7e2a88c8a4327c7f2f8e5b2
kernel/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
....@@ -16,7 +16,9 @@
1616 #include <net/tc_act/tc_skbedit.h>
1717 #include <net/tc_act/tc_mirred.h>
1818 #include <net/tc_act/tc_vlan.h>
19
+#include <net/tc_act/tc_pedit.h>
1920 #include <net/tc_act/tc_tunnel_key.h>
21
+#include <net/vxlan.h>
2022
2123 #include "bnxt_hsi.h"
2224 #include "bnxt.h"
....@@ -36,6 +38,8 @@
3638 #define is_vid_exactmatch(vlan_tci_mask) \
3739 ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
3840
41
+static bool is_wildcard(void *mask, int len);
42
+static bool is_exactmatch(void *mask, int len);
3943 /* Return the dst fid of the func for flow forwarding
4044 * For PFs: src_fid is the fid of the PF
4145 * For VF-reps: src_fid the fid of the VF
....@@ -45,8 +49,8 @@
4549 struct bnxt *bp;
4650
4751 /* check if dev belongs to the same switch */
48
- if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) {
49
- netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
52
+ if (!netdev_port_same_parent_id(pf_bp->dev, dev)) {
53
+ netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch\n",
5054 dev->ifindex);
5155 return BNXT_FID_INVALID;
5256 }
....@@ -61,12 +65,12 @@
6165
6266 static int bnxt_tc_parse_redir(struct bnxt *bp,
6367 struct bnxt_tc_actions *actions,
64
- const struct tc_action *tc_act)
68
+ const struct flow_action_entry *act)
6569 {
66
- struct net_device *dev = tcf_mirred_dev(tc_act);
70
+ struct net_device *dev = act->dev;
6771
6872 if (!dev) {
69
- netdev_info(bp->dev, "no dev in mirred action");
73
+ netdev_info(bp->dev, "no dev in mirred action\n");
7074 return -EINVAL;
7175 }
7276
....@@ -77,16 +81,16 @@
7781
7882 static int bnxt_tc_parse_vlan(struct bnxt *bp,
7983 struct bnxt_tc_actions *actions,
80
- const struct tc_action *tc_act)
84
+ const struct flow_action_entry *act)
8185 {
82
- switch (tcf_vlan_action(tc_act)) {
83
- case TCA_VLAN_ACT_POP:
86
+ switch (act->id) {
87
+ case FLOW_ACTION_VLAN_POP:
8488 actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
8589 break;
86
- case TCA_VLAN_ACT_PUSH:
90
+ case FLOW_ACTION_VLAN_PUSH:
8791 actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
88
- actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
89
- actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
92
+ actions->push_vlan_tci = htons(act->vlan.vid);
93
+ actions->push_vlan_tpid = act->vlan.proto;
9094 break;
9195 default:
9296 return -EOPNOTSUPP;
....@@ -96,13 +100,13 @@
96100
97101 static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
98102 struct bnxt_tc_actions *actions,
99
- const struct tc_action *tc_act)
103
+ const struct flow_action_entry *act)
100104 {
101
- struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act);
102
- struct ip_tunnel_key *tun_key = &tun_info->key;
105
+ const struct ip_tunnel_info *tun_info = act->tunnel;
106
+ const struct ip_tunnel_key *tun_key = &tun_info->key;
103107
104108 if (ip_tunnel_info_af(tun_info) != AF_INET) {
105
- netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
109
+ netdev_info(bp->dev, "only IPv4 tunnel-encap is supported\n");
106110 return -EOPNOTSUPP;
107111 }
108112
....@@ -111,54 +115,237 @@
111115 return 0;
112116 }
113117
118
+/* Key & Mask from the stack comes unaligned in multiple iterations of 4 bytes
119
+ * each(u32).
120
+ * This routine consolidates such multiple unaligned values into one
121
+ * field each for Key & Mask (for src and dst macs separately)
122
+ * For example,
123
+ * Mask/Key Offset Iteration
124
+ * ========== ====== =========
125
+ * dst mac 0xffffffff 0 1
126
+ * dst mac 0x0000ffff 4 2
127
+ *
128
+ * src mac 0xffff0000 4 1
129
+ * src mac 0xffffffff 8 2
130
+ *
131
+ * The above combination coming from the stack will be consolidated as
132
+ * Mask/Key
133
+ * ==============
134
+ * src mac: 0xffffffffffff
135
+ * dst mac: 0xffffffffffff
136
+ */
137
+static void bnxt_set_l2_key_mask(u32 part_key, u32 part_mask,
138
+ u8 *actual_key, u8 *actual_mask)
139
+{
140
+ u32 key = get_unaligned((u32 *)actual_key);
141
+ u32 mask = get_unaligned((u32 *)actual_mask);
142
+
143
+ part_key &= part_mask;
144
+ part_key |= key & ~part_mask;
145
+
146
+ put_unaligned(mask | part_mask, (u32 *)actual_mask);
147
+ put_unaligned(part_key, (u32 *)actual_key);
148
+}
149
+
150
+static int
151
+bnxt_fill_l2_rewrite_fields(struct bnxt_tc_actions *actions,
152
+ u16 *eth_addr, u16 *eth_addr_mask)
153
+{
154
+ u16 *p;
155
+ int j;
156
+
157
+ if (unlikely(bnxt_eth_addr_key_mask_invalid(eth_addr, eth_addr_mask)))
158
+ return -EINVAL;
159
+
160
+ if (!is_wildcard(&eth_addr_mask[0], ETH_ALEN)) {
161
+ if (!is_exactmatch(&eth_addr_mask[0], ETH_ALEN))
162
+ return -EINVAL;
163
+ /* FW expects dmac to be in u16 array format */
164
+ p = eth_addr;
165
+ for (j = 0; j < 3; j++)
166
+ actions->l2_rewrite_dmac[j] = cpu_to_be16(*(p + j));
167
+ }
168
+
169
+ if (!is_wildcard(&eth_addr_mask[ETH_ALEN / 2], ETH_ALEN)) {
170
+ if (!is_exactmatch(&eth_addr_mask[ETH_ALEN / 2], ETH_ALEN))
171
+ return -EINVAL;
172
+ /* FW expects smac to be in u16 array format */
173
+ p = &eth_addr[ETH_ALEN / 2];
174
+ for (j = 0; j < 3; j++)
175
+ actions->l2_rewrite_smac[j] = cpu_to_be16(*(p + j));
176
+ }
177
+
178
+ return 0;
179
+}
180
+
181
+static int
182
+bnxt_tc_parse_pedit(struct bnxt *bp, struct bnxt_tc_actions *actions,
183
+ struct flow_action_entry *act, int act_idx, u8 *eth_addr,
184
+ u8 *eth_addr_mask)
185
+{
186
+ size_t offset_of_ip6_daddr = offsetof(struct ipv6hdr, daddr);
187
+ size_t offset_of_ip6_saddr = offsetof(struct ipv6hdr, saddr);
188
+ u32 mask, val, offset, idx;
189
+ u8 htype;
190
+
191
+ offset = act->mangle.offset;
192
+ htype = act->mangle.htype;
193
+ mask = ~act->mangle.mask;
194
+ val = act->mangle.val;
195
+
196
+ switch (htype) {
197
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
198
+ if (offset > PEDIT_OFFSET_SMAC_LAST_4_BYTES) {
199
+ netdev_err(bp->dev,
200
+ "%s: eth_hdr: Invalid pedit field\n",
201
+ __func__);
202
+ return -EINVAL;
203
+ }
204
+ actions->flags |= BNXT_TC_ACTION_FLAG_L2_REWRITE;
205
+
206
+ bnxt_set_l2_key_mask(val, mask, &eth_addr[offset],
207
+ &eth_addr_mask[offset]);
208
+ break;
209
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
210
+ actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE;
211
+ actions->nat.l3_is_ipv4 = true;
212
+ if (offset == offsetof(struct iphdr, saddr)) {
213
+ actions->nat.src_xlate = true;
214
+ actions->nat.l3.ipv4.saddr.s_addr = htonl(val);
215
+ } else if (offset == offsetof(struct iphdr, daddr)) {
216
+ actions->nat.src_xlate = false;
217
+ actions->nat.l3.ipv4.daddr.s_addr = htonl(val);
218
+ } else {
219
+ netdev_err(bp->dev,
220
+ "%s: IPv4_hdr: Invalid pedit field\n",
221
+ __func__);
222
+ return -EINVAL;
223
+ }
224
+
225
+ netdev_dbg(bp->dev, "nat.src_xlate = %d src IP: %pI4 dst ip : %pI4\n",
226
+ actions->nat.src_xlate, &actions->nat.l3.ipv4.saddr,
227
+ &actions->nat.l3.ipv4.daddr);
228
+ break;
229
+
230
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
231
+ actions->flags |= BNXT_TC_ACTION_FLAG_NAT_XLATE;
232
+ actions->nat.l3_is_ipv4 = false;
233
+ if (offset >= offsetof(struct ipv6hdr, saddr) &&
234
+ offset < offset_of_ip6_daddr) {
235
+ /* 16 byte IPv6 address comes in 4 iterations of
236
+ * 4byte chunks each
237
+ */
238
+ actions->nat.src_xlate = true;
239
+ idx = (offset - offset_of_ip6_saddr) / 4;
240
+ /* First 4bytes will be copied to idx 0 and so on */
241
+ actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
242
+ } else if (offset >= offset_of_ip6_daddr &&
243
+ offset < offset_of_ip6_daddr + 16) {
244
+ actions->nat.src_xlate = false;
245
+ idx = (offset - offset_of_ip6_daddr) / 4;
246
+ actions->nat.l3.ipv6.saddr.s6_addr32[idx] = htonl(val);
247
+ } else {
248
+ netdev_err(bp->dev,
249
+ "%s: IPv6_hdr: Invalid pedit field\n",
250
+ __func__);
251
+ return -EINVAL;
252
+ }
253
+ break;
254
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
255
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
256
+ /* HW does not support L4 rewrite alone without L3
257
+ * rewrite
258
+ */
259
+ if (!(actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE)) {
260
+ netdev_err(bp->dev,
261
+ "Need to specify L3 rewrite as well\n");
262
+ return -EINVAL;
263
+ }
264
+ if (actions->nat.src_xlate)
265
+ actions->nat.l4.ports.sport = htons(val);
266
+ else
267
+ actions->nat.l4.ports.dport = htons(val);
268
+ netdev_dbg(bp->dev, "actions->nat.sport = %d dport = %d\n",
269
+ actions->nat.l4.ports.sport,
270
+ actions->nat.l4.ports.dport);
271
+ break;
272
+ default:
273
+ netdev_err(bp->dev, "%s: Unsupported pedit hdr type\n",
274
+ __func__);
275
+ return -EINVAL;
276
+ }
277
+ return 0;
278
+}
279
+
114280 static int bnxt_tc_parse_actions(struct bnxt *bp,
115281 struct bnxt_tc_actions *actions,
116
- struct tcf_exts *tc_exts)
282
+ struct flow_action *flow_action,
283
+ struct netlink_ext_ack *extack)
117284 {
118
- const struct tc_action *tc_act;
285
+ /* Used to store the L2 rewrite mask for dmac (6 bytes) followed by
286
+ * smac (6 bytes) if rewrite of both is specified, otherwise either
287
+ * dmac or smac
288
+ */
289
+ u16 eth_addr_mask[ETH_ALEN] = { 0 };
290
+ /* Used to store the L2 rewrite key for dmac (6 bytes) followed by
291
+ * smac (6 bytes) if rewrite of both is specified, otherwise either
292
+ * dmac or smac
293
+ */
294
+ u16 eth_addr[ETH_ALEN] = { 0 };
295
+ struct flow_action_entry *act;
119296 int i, rc;
120297
121
- if (!tcf_exts_has_actions(tc_exts)) {
122
- netdev_info(bp->dev, "no actions");
298
+ if (!flow_action_has_entries(flow_action)) {
299
+ netdev_info(bp->dev, "no actions\n");
123300 return -EINVAL;
124301 }
125302
126
- tcf_exts_for_each_action(i, tc_act, tc_exts) {
127
- /* Drop action */
128
- if (is_tcf_gact_shot(tc_act)) {
303
+ if (!flow_action_basic_hw_stats_check(flow_action, extack))
304
+ return -EOPNOTSUPP;
305
+
306
+ flow_action_for_each(i, act, flow_action) {
307
+ switch (act->id) {
308
+ case FLOW_ACTION_DROP:
129309 actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
130310 return 0; /* don't bother with other actions */
131
- }
132
-
133
- /* Redirect action */
134
- if (is_tcf_mirred_egress_redirect(tc_act)) {
135
- rc = bnxt_tc_parse_redir(bp, actions, tc_act);
311
+ case FLOW_ACTION_REDIRECT:
312
+ rc = bnxt_tc_parse_redir(bp, actions, act);
136313 if (rc)
137314 return rc;
138
- continue;
139
- }
140
-
141
- /* Push/pop VLAN */
142
- if (is_tcf_vlan(tc_act)) {
143
- rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
315
+ break;
316
+ case FLOW_ACTION_VLAN_POP:
317
+ case FLOW_ACTION_VLAN_PUSH:
318
+ case FLOW_ACTION_VLAN_MANGLE:
319
+ rc = bnxt_tc_parse_vlan(bp, actions, act);
144320 if (rc)
145321 return rc;
146
- continue;
147
- }
148
-
149
- /* Tunnel encap */
150
- if (is_tcf_tunnel_set(tc_act)) {
151
- rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act);
322
+ break;
323
+ case FLOW_ACTION_TUNNEL_ENCAP:
324
+ rc = bnxt_tc_parse_tunnel_set(bp, actions, act);
152325 if (rc)
153326 return rc;
154
- continue;
155
- }
156
-
157
- /* Tunnel decap */
158
- if (is_tcf_tunnel_release(tc_act)) {
327
+ break;
328
+ case FLOW_ACTION_TUNNEL_DECAP:
159329 actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
160
- continue;
330
+ break;
331
+ /* Packet edit: L2 rewrite, NAT, NAPT */
332
+ case FLOW_ACTION_MANGLE:
333
+ rc = bnxt_tc_parse_pedit(bp, actions, act, i,
334
+ (u8 *)eth_addr,
335
+ (u8 *)eth_addr_mask);
336
+ if (rc)
337
+ return rc;
338
+ break;
339
+ default:
340
+ break;
161341 }
342
+ }
343
+
344
+ if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) {
345
+ rc = bnxt_fill_l2_rewrite_fields(actions, eth_addr,
346
+ eth_addr_mask);
347
+ if (rc)
348
+ return rc;
162349 }
163350
164351 if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
....@@ -177,196 +364,157 @@
177364 return 0;
178365 }
179366
180
-#define GET_KEY(flow_cmd, key_type) \
181
- skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
182
- (flow_cmd)->key)
183
-#define GET_MASK(flow_cmd, key_type) \
184
- skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
185
- (flow_cmd)->mask)
186
-
187367 static int bnxt_tc_parse_flow(struct bnxt *bp,
188
- struct tc_cls_flower_offload *tc_flow_cmd,
368
+ struct flow_cls_offload *tc_flow_cmd,
189369 struct bnxt_tc_flow *flow)
190370 {
191
- struct flow_dissector *dissector = tc_flow_cmd->dissector;
192
- u16 addr_type = 0;
371
+ struct flow_rule *rule = flow_cls_offload_flow_rule(tc_flow_cmd);
372
+ struct flow_dissector *dissector = rule->match.dissector;
193373
194374 /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
195375 if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
196376 (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
197
- netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
377
+ netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x\n",
198378 dissector->used_keys);
199379 return -EOPNOTSUPP;
200380 }
201381
202
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
203
- struct flow_dissector_key_control *key =
204
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL);
382
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
383
+ struct flow_match_basic match;
205384
206
- addr_type = key->addr_type;
207
- }
385
+ flow_rule_match_basic(rule, &match);
386
+ flow->l2_key.ether_type = match.key->n_proto;
387
+ flow->l2_mask.ether_type = match.mask->n_proto;
208388
209
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
210
- struct flow_dissector_key_basic *key =
211
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
212
- struct flow_dissector_key_basic *mask =
213
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
214
-
215
- flow->l2_key.ether_type = key->n_proto;
216
- flow->l2_mask.ether_type = mask->n_proto;
217
-
218
- if (key->n_proto == htons(ETH_P_IP) ||
219
- key->n_proto == htons(ETH_P_IPV6)) {
220
- flow->l4_key.ip_proto = key->ip_proto;
221
- flow->l4_mask.ip_proto = mask->ip_proto;
389
+ if (match.key->n_proto == htons(ETH_P_IP) ||
390
+ match.key->n_proto == htons(ETH_P_IPV6)) {
391
+ flow->l4_key.ip_proto = match.key->ip_proto;
392
+ flow->l4_mask.ip_proto = match.mask->ip_proto;
222393 }
223394 }
224395
225
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
226
- struct flow_dissector_key_eth_addrs *key =
227
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
228
- struct flow_dissector_key_eth_addrs *mask =
229
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
396
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
397
+ struct flow_match_eth_addrs match;
230398
399
+ flow_rule_match_eth_addrs(rule, &match);
231400 flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
232
- ether_addr_copy(flow->l2_key.dmac, key->dst);
233
- ether_addr_copy(flow->l2_mask.dmac, mask->dst);
234
- ether_addr_copy(flow->l2_key.smac, key->src);
235
- ether_addr_copy(flow->l2_mask.smac, mask->src);
401
+ ether_addr_copy(flow->l2_key.dmac, match.key->dst);
402
+ ether_addr_copy(flow->l2_mask.dmac, match.mask->dst);
403
+ ether_addr_copy(flow->l2_key.smac, match.key->src);
404
+ ether_addr_copy(flow->l2_mask.smac, match.mask->src);
236405 }
237406
238
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) {
239
- struct flow_dissector_key_vlan *key =
240
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
241
- struct flow_dissector_key_vlan *mask =
242
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
407
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
408
+ struct flow_match_vlan match;
243409
410
+ flow_rule_match_vlan(rule, &match);
244411 flow->l2_key.inner_vlan_tci =
245
- cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority));
412
+ cpu_to_be16(VLAN_TCI(match.key->vlan_id,
413
+ match.key->vlan_priority));
246414 flow->l2_mask.inner_vlan_tci =
247
- cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority)));
415
+ cpu_to_be16((VLAN_TCI(match.mask->vlan_id,
416
+ match.mask->vlan_priority)));
248417 flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
249418 flow->l2_mask.inner_vlan_tpid = htons(0xffff);
250419 flow->l2_key.num_vlans = 1;
251420 }
252421
253
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
254
- struct flow_dissector_key_ipv4_addrs *key =
255
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
256
- struct flow_dissector_key_ipv4_addrs *mask =
257
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
422
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
423
+ struct flow_match_ipv4_addrs match;
258424
425
+ flow_rule_match_ipv4_addrs(rule, &match);
259426 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
260
- flow->l3_key.ipv4.daddr.s_addr = key->dst;
261
- flow->l3_mask.ipv4.daddr.s_addr = mask->dst;
262
- flow->l3_key.ipv4.saddr.s_addr = key->src;
263
- flow->l3_mask.ipv4.saddr.s_addr = mask->src;
264
- } else if (dissector_uses_key(dissector,
265
- FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
266
- struct flow_dissector_key_ipv6_addrs *key =
267
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
268
- struct flow_dissector_key_ipv6_addrs *mask =
269
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
427
+ flow->l3_key.ipv4.daddr.s_addr = match.key->dst;
428
+ flow->l3_mask.ipv4.daddr.s_addr = match.mask->dst;
429
+ flow->l3_key.ipv4.saddr.s_addr = match.key->src;
430
+ flow->l3_mask.ipv4.saddr.s_addr = match.mask->src;
431
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
432
+ struct flow_match_ipv6_addrs match;
270433
434
+ flow_rule_match_ipv6_addrs(rule, &match);
271435 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
272
- flow->l3_key.ipv6.daddr = key->dst;
273
- flow->l3_mask.ipv6.daddr = mask->dst;
274
- flow->l3_key.ipv6.saddr = key->src;
275
- flow->l3_mask.ipv6.saddr = mask->src;
436
+ flow->l3_key.ipv6.daddr = match.key->dst;
437
+ flow->l3_mask.ipv6.daddr = match.mask->dst;
438
+ flow->l3_key.ipv6.saddr = match.key->src;
439
+ flow->l3_mask.ipv6.saddr = match.mask->src;
276440 }
277441
278
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) {
279
- struct flow_dissector_key_ports *key =
280
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
281
- struct flow_dissector_key_ports *mask =
282
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
442
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
443
+ struct flow_match_ports match;
283444
445
+ flow_rule_match_ports(rule, &match);
284446 flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
285
- flow->l4_key.ports.dport = key->dst;
286
- flow->l4_mask.ports.dport = mask->dst;
287
- flow->l4_key.ports.sport = key->src;
288
- flow->l4_mask.ports.sport = mask->src;
447
+ flow->l4_key.ports.dport = match.key->dst;
448
+ flow->l4_mask.ports.dport = match.mask->dst;
449
+ flow->l4_key.ports.sport = match.key->src;
450
+ flow->l4_mask.ports.sport = match.mask->src;
289451 }
290452
291
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) {
292
- struct flow_dissector_key_icmp *key =
293
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
294
- struct flow_dissector_key_icmp *mask =
295
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
453
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
454
+ struct flow_match_icmp match;
296455
456
+ flow_rule_match_icmp(rule, &match);
297457 flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
298
- flow->l4_key.icmp.type = key->type;
299
- flow->l4_key.icmp.code = key->code;
300
- flow->l4_mask.icmp.type = mask->type;
301
- flow->l4_mask.icmp.code = mask->code;
458
+ flow->l4_key.icmp.type = match.key->type;
459
+ flow->l4_key.icmp.code = match.key->code;
460
+ flow->l4_mask.icmp.type = match.mask->type;
461
+ flow->l4_mask.icmp.code = match.mask->code;
302462 }
303463
304
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
305
- struct flow_dissector_key_control *key =
306
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL);
464
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
465
+ struct flow_match_ipv4_addrs match;
307466
308
- addr_type = key->addr_type;
309
- }
310
-
311
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
312
- struct flow_dissector_key_ipv4_addrs *key =
313
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
314
- struct flow_dissector_key_ipv4_addrs *mask =
315
- GET_MASK(tc_flow_cmd,
316
- FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
317
-
467
+ flow_rule_match_enc_ipv4_addrs(rule, &match);
318468 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
319
- flow->tun_key.u.ipv4.dst = key->dst;
320
- flow->tun_mask.u.ipv4.dst = mask->dst;
321
- flow->tun_key.u.ipv4.src = key->src;
322
- flow->tun_mask.u.ipv4.src = mask->src;
323
- } else if (dissector_uses_key(dissector,
469
+ flow->tun_key.u.ipv4.dst = match.key->dst;
470
+ flow->tun_mask.u.ipv4.dst = match.mask->dst;
471
+ flow->tun_key.u.ipv4.src = match.key->src;
472
+ flow->tun_mask.u.ipv4.src = match.mask->src;
473
+ } else if (flow_rule_match_key(rule,
324474 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
325475 return -EOPNOTSUPP;
326476 }
327477
328
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
329
- struct flow_dissector_key_keyid *key =
330
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
331
- struct flow_dissector_key_keyid *mask =
332
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
478
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
479
+ struct flow_match_enc_keyid match;
333480
481
+ flow_rule_match_enc_keyid(rule, &match);
334482 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
335
- flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid);
336
- flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid);
483
+ flow->tun_key.tun_id = key32_to_tunnel_id(match.key->keyid);
484
+ flow->tun_mask.tun_id = key32_to_tunnel_id(match.mask->keyid);
337485 }
338486
339
- if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
340
- struct flow_dissector_key_ports *key =
341
- GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
342
- struct flow_dissector_key_ports *mask =
343
- GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
487
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
488
+ struct flow_match_ports match;
344489
490
+ flow_rule_match_enc_ports(rule, &match);
345491 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
346
- flow->tun_key.tp_dst = key->dst;
347
- flow->tun_mask.tp_dst = mask->dst;
348
- flow->tun_key.tp_src = key->src;
349
- flow->tun_mask.tp_src = mask->src;
492
+ flow->tun_key.tp_dst = match.key->dst;
493
+ flow->tun_mask.tp_dst = match.mask->dst;
494
+ flow->tun_key.tp_src = match.key->src;
495
+ flow->tun_mask.tp_src = match.mask->src;
350496 }
351497
352
- return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
498
+ return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action,
499
+ tc_flow_cmd->common.extack);
353500 }
354501
355
-static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
502
+static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
503
+ struct bnxt_tc_flow_node *flow_node)
356504 {
357505 struct hwrm_cfa_flow_free_input req = { 0 };
358506 int rc;
359507
360508 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
361
- req.flow_handle = flow_handle;
509
+ if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
510
+ req.ext_flow_handle = flow_node->ext_flow_handle;
511
+ else
512
+ req.flow_handle = flow_node->flow_handle;
362513
363514 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
364515 if (rc)
365
- netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
366
- __func__, flow_handle, rc);
516
+ netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
367517
368
- if (rc)
369
- rc = -EIO;
370518 return rc;
371519 }
372520
....@@ -433,13 +581,14 @@
433581
434582 static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
435583 __le16 ref_flow_handle,
436
- __le32 tunnel_handle, __le16 *flow_handle)
584
+ __le32 tunnel_handle,
585
+ struct bnxt_tc_flow_node *flow_node)
437586 {
438
- struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
439587 struct bnxt_tc_actions *actions = &flow->actions;
440588 struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
441589 struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
442590 struct hwrm_cfa_flow_alloc_input req = { 0 };
591
+ struct hwrm_cfa_flow_alloc_output *resp;
443592 u16 flow_flags = 0, action_flags = 0;
444593 int rc;
445594
....@@ -447,6 +596,76 @@
447596
448597 req.src_fid = cpu_to_le16(flow->src_fid);
449598 req.ref_flow_handle = ref_flow_handle;
599
+
600
+ if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) {
601
+ memcpy(req.l2_rewrite_dmac, actions->l2_rewrite_dmac,
602
+ ETH_ALEN);
603
+ memcpy(req.l2_rewrite_smac, actions->l2_rewrite_smac,
604
+ ETH_ALEN);
605
+ action_flags |=
606
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
607
+ }
608
+
609
+ if (actions->flags & BNXT_TC_ACTION_FLAG_NAT_XLATE) {
610
+ if (actions->nat.l3_is_ipv4) {
611
+ action_flags |=
612
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS;
613
+
614
+ if (actions->nat.src_xlate) {
615
+ action_flags |=
616
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC;
617
+ /* L3 source rewrite */
618
+ req.nat_ip_address[0] =
619
+ actions->nat.l3.ipv4.saddr.s_addr;
620
+ /* L4 source port */
621
+ if (actions->nat.l4.ports.sport)
622
+ req.nat_port =
623
+ actions->nat.l4.ports.sport;
624
+ } else {
625
+ action_flags |=
626
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST;
627
+ /* L3 destination rewrite */
628
+ req.nat_ip_address[0] =
629
+ actions->nat.l3.ipv4.daddr.s_addr;
630
+ /* L4 destination port */
631
+ if (actions->nat.l4.ports.dport)
632
+ req.nat_port =
633
+ actions->nat.l4.ports.dport;
634
+ }
635
+ netdev_dbg(bp->dev,
636
+ "req.nat_ip_address: %pI4 src_xlate: %d req.nat_port: %x\n",
637
+ req.nat_ip_address, actions->nat.src_xlate,
638
+ req.nat_port);
639
+ } else {
640
+ if (actions->nat.src_xlate) {
641
+ action_flags |=
642
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC;
643
+ /* L3 source rewrite */
644
+ memcpy(req.nat_ip_address,
645
+ actions->nat.l3.ipv6.saddr.s6_addr32,
646
+ sizeof(req.nat_ip_address));
647
+ /* L4 source port */
648
+ if (actions->nat.l4.ports.sport)
649
+ req.nat_port =
650
+ actions->nat.l4.ports.sport;
651
+ } else {
652
+ action_flags |=
653
+ CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST;
654
+ /* L3 destination rewrite */
655
+ memcpy(req.nat_ip_address,
656
+ actions->nat.l3.ipv6.daddr.s6_addr32,
657
+ sizeof(req.nat_ip_address));
658
+ /* L4 destination port */
659
+ if (actions->nat.l4.ports.dport)
660
+ req.nat_port =
661
+ actions->nat.l4.ports.dport;
662
+ }
663
+ netdev_dbg(bp->dev,
664
+ "req.nat_ip_address: %pI6 src_xlate: %d req.nat_port: %x\n",
665
+ req.nat_ip_address, actions->nat.src_xlate,
666
+ req.nat_port);
667
+ }
668
+ }
450669
451670 if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
452671 actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
....@@ -542,14 +761,24 @@
542761
543762 mutex_lock(&bp->hwrm_cmd_lock);
544763 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
545
- if (!rc)
546
- *flow_handle = resp->flow_handle;
764
+ if (!rc) {
765
+ resp = bnxt_get_hwrm_resp_addr(bp, &req);
766
+ /* CFA_FLOW_ALLOC response interpretation:
767
+ * fw with fw with
768
+ * 16-bit 64-bit
769
+ * flow handle flow handle
770
+ * =========== ===========
771
+ * flow_handle flow handle flow context id
772
+ * ext_flow_handle INVALID flow handle
773
+ * flow_id INVALID flow counter id
774
+ */
775
+ flow_node->flow_handle = resp->flow_handle;
776
+ if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) {
777
+ flow_node->ext_flow_handle = resp->ext_flow_handle;
778
+ flow_node->flow_id = resp->flow_id;
779
+ }
780
+ }
547781 mutex_unlock(&bp->hwrm_cmd_lock);
548
-
549
- if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
550
- rc = -ENOSPC;
551
- else if (rc)
552
- rc = -EIO;
553782 return rc;
554783 }
555784
....@@ -559,9 +788,8 @@
559788 __le32 ref_decap_handle,
560789 __le32 *decap_filter_handle)
561790 {
562
- struct hwrm_cfa_decap_filter_alloc_output *resp =
563
- bp->hwrm_cmd_resp_addr;
564791 struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
792
+ struct hwrm_cfa_decap_filter_alloc_output *resp;
565793 struct ip_tunnel_key *tun_key = &flow->tun_key;
566794 u32 enables = 0;
567795 int rc;
....@@ -614,14 +842,14 @@
614842
615843 mutex_lock(&bp->hwrm_cmd_lock);
616844 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
617
- if (!rc)
845
+ if (!rc) {
846
+ resp = bnxt_get_hwrm_resp_addr(bp, &req);
618847 *decap_filter_handle = resp->decap_filter_id;
619
- else
620
- netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
848
+ } else {
849
+ netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
850
+ }
621851 mutex_unlock(&bp->hwrm_cmd_lock);
622852
623
- if (rc)
624
- rc = -EIO;
625853 return rc;
626854 }
627855
....@@ -636,10 +864,8 @@
636864
637865 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
638866 if (rc)
639
- netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
867
+ netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
640868
641
- if (rc)
642
- rc = -EIO;
643869 return rc;
644870 }
645871
....@@ -648,9 +874,8 @@
648874 struct bnxt_tc_l2_key *l2_info,
649875 __le32 *encap_record_handle)
650876 {
651
- struct hwrm_cfa_encap_record_alloc_output *resp =
652
- bp->hwrm_cmd_resp_addr;
653877 struct hwrm_cfa_encap_record_alloc_input req = { 0 };
878
+ struct hwrm_cfa_encap_record_alloc_output *resp;
654879 struct hwrm_cfa_encap_data_vxlan *encap =
655880 (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
656881 struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
....@@ -682,14 +907,14 @@
682907
683908 mutex_lock(&bp->hwrm_cmd_lock);
684909 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
685
- if (!rc)
910
+ if (!rc) {
911
+ resp = bnxt_get_hwrm_resp_addr(bp, &req);
686912 *encap_record_handle = resp->encap_record_id;
687
- else
688
- netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
913
+ } else {
914
+ netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
915
+ }
689916 mutex_unlock(&bp->hwrm_cmd_lock);
690917
691
- if (rc)
692
- rc = -EIO;
693918 return rc;
694919 }
695920
....@@ -704,10 +929,8 @@
704929
705930 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
706931 if (rc)
707
- netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
932
+ netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
708933
709
- if (rc)
710
- rc = -EIO;
711934 return rc;
712935 }
713936
....@@ -725,7 +948,7 @@
725948 tc_info->l2_ht_params);
726949 if (rc)
727950 netdev_err(bp->dev,
728
- "Error: %s: rhashtable_remove_fast: %d",
951
+ "Error: %s: rhashtable_remove_fast: %d\n",
729952 __func__, rc);
730953 kfree_rcu(l2_node, rcu);
731954 }
....@@ -754,7 +977,7 @@
754977 if (rc) {
755978 kfree_rcu(l2_node, rcu);
756979 netdev_err(bp->dev,
757
- "Error: %s: rhashtable_insert_fast: %d",
980
+ "Error: %s: rhashtable_insert_fast: %d\n",
758981 __func__, rc);
759982 return NULL;
760983 }
....@@ -813,7 +1036,7 @@
8131036 if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
8141037 (flow->l4_key.ip_proto != IPPROTO_TCP &&
8151038 flow->l4_key.ip_proto != IPPROTO_UDP)) {
816
- netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
1039
+ netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports\n",
8171040 flow->l4_key.ip_proto);
8181041 return false;
8191042 }
....@@ -870,7 +1093,7 @@
8701093 rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
8711094 *ht_params);
8721095 if (rc) {
873
- netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
1096
+ netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc);
8741097 rc = -1;
8751098 }
8761099 kfree_rcu(tunnel_node, rcu);
....@@ -911,7 +1134,7 @@
9111134 tunnel_node->refcount++;
9121135 return tunnel_node;
9131136 err:
914
- netdev_info(bp->dev, "error rc=%d", rc);
1137
+ netdev_info(bp->dev, "error rc=%d\n", rc);
9151138 return NULL;
9161139 }
9171140
....@@ -969,7 +1192,7 @@
9691192 &decap_l2_node->node,
9701193 tc_info->decap_l2_ht_params);
9711194 if (rc)
972
- netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
1195
+ netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc);
9731196 kfree_rcu(decap_l2_node, rcu);
9741197 }
9751198 }
....@@ -1009,7 +1232,7 @@
10091232
10101233 rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
10111234 if (IS_ERR(rt)) {
1012
- netdev_info(bp->dev, "no route to %pI4b", &flow.daddr);
1235
+ netdev_info(bp->dev, "no route to %pI4b\n", &flow.daddr);
10131236 return -EOPNOTSUPP;
10141237 }
10151238
....@@ -1023,7 +1246,7 @@
10231246
10241247 if (vlan->real_dev != real_dst_dev) {
10251248 netdev_info(bp->dev,
1026
- "dst_dev(%s) doesn't use PF-if(%s)",
1249
+ "dst_dev(%s) doesn't use PF-if(%s)\n",
10271250 netdev_name(dst_dev),
10281251 netdev_name(real_dst_dev));
10291252 rc = -EOPNOTSUPP;
....@@ -1035,7 +1258,7 @@
10351258 #endif
10361259 } else if (dst_dev != real_dst_dev) {
10371260 netdev_info(bp->dev,
1038
- "dst_dev(%s) for %pI4b is not PF-if(%s)",
1261
+ "dst_dev(%s) for %pI4b is not PF-if(%s)\n",
10391262 netdev_name(dst_dev), &flow.daddr,
10401263 netdev_name(real_dst_dev));
10411264 rc = -EOPNOTSUPP;
....@@ -1044,7 +1267,7 @@
10441267
10451268 nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
10461269 if (!nbr) {
1047
- netdev_info(bp->dev, "can't lookup neighbor for %pI4b",
1270
+ netdev_info(bp->dev, "can't lookup neighbor for %pI4b\n",
10481271 &flow.daddr);
10491272 rc = -EOPNOTSUPP;
10501273 goto put_rt;
....@@ -1239,7 +1462,7 @@
12391462 int rc;
12401463
12411464 /* send HWRM cmd to free the flow-id */
1242
- bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle);
1465
+ bnxt_hwrm_cfa_flow_free(bp, flow_node);
12431466
12441467 mutex_lock(&tc_info->lock);
12451468
....@@ -1254,11 +1477,17 @@
12541477 rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
12551478 tc_info->flow_ht_params);
12561479 if (rc)
1257
- netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
1480
+ netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d\n",
12581481 __func__, rc);
12591482
12601483 kfree_rcu(flow_node, rcu);
12611484 return 0;
1485
+}
1486
+
1487
+static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow,
1488
+ u16 src_fid)
1489
+{
1490
+ flow->l2_key.dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
12621491 }
12631492
12641493 static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
....@@ -1284,7 +1513,7 @@
12841513 * The hash-tables are already protected by the rhashtable API.
12851514 */
12861515 static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
1287
- struct tc_cls_flower_offload *tc_flow_cmd)
1516
+ struct flow_cls_offload *tc_flow_cmd)
12881517 {
12891518 struct bnxt_tc_flow_node *new_node, *old_node;
12901519 struct bnxt_tc_info *tc_info = bp->tc_info;
....@@ -1307,10 +1536,12 @@
13071536 goto free_node;
13081537
13091538 bnxt_tc_set_src_fid(bp, flow, src_fid);
1539
+ bnxt_tc_set_flow_dir(bp, flow, flow->src_fid);
13101540
13111541 if (!bnxt_tc_can_offload(bp, flow)) {
1312
- rc = -ENOSPC;
1313
- goto free_node;
1542
+ rc = -EOPNOTSUPP;
1543
+ kfree_rcu(new_node, rcu);
1544
+ return rc;
13141545 }
13151546
13161547 /* If a flow exists with the same cookie, delete it */
....@@ -1335,7 +1566,7 @@
13351566
13361567 /* send HWRM cmd to alloc the flow */
13371568 rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
1338
- tunnel_handle, &new_node->flow_handle);
1569
+ tunnel_handle, new_node);
13391570 if (rc)
13401571 goto put_tunnel;
13411572
....@@ -1351,7 +1582,7 @@
13511582 return 0;
13521583
13531584 hwrm_flow_free:
1354
- bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
1585
+ bnxt_hwrm_cfa_flow_free(bp, new_node);
13551586 put_tunnel:
13561587 bnxt_tc_put_tunnel_handle(bp, flow, new_node);
13571588 put_l2:
....@@ -1361,13 +1592,13 @@
13611592 free_node:
13621593 kfree_rcu(new_node, rcu);
13631594 done:
1364
- netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
1595
+ netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d\n",
13651596 __func__, tc_flow_cmd->cookie, rc);
13661597 return rc;
13671598 }
13681599
13691600 static int bnxt_tc_del_flow(struct bnxt *bp,
1370
- struct tc_cls_flower_offload *tc_flow_cmd)
1601
+ struct flow_cls_offload *tc_flow_cmd)
13711602 {
13721603 struct bnxt_tc_info *tc_info = bp->tc_info;
13731604 struct bnxt_tc_flow_node *flow_node;
....@@ -1382,7 +1613,7 @@
13821613 }
13831614
13841615 static int bnxt_tc_get_flow_stats(struct bnxt *bp,
1385
- struct tc_cls_flower_offload *tc_flow_cmd)
1616
+ struct flow_cls_offload *tc_flow_cmd)
13861617 {
13871618 struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats;
13881619 struct bnxt_tc_info *tc_info = bp->tc_info;
....@@ -1407,18 +1638,45 @@
14071638 lastused = flow->lastused;
14081639 spin_unlock(&flow->stats_lock);
14091640
1410
- tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets,
1411
- lastused);
1641
+ flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets, 0,
1642
+ lastused, FLOW_ACTION_HW_STATS_DELAYED);
14121643 return 0;
1644
+}
1645
+
1646
+static void bnxt_fill_cfa_stats_req(struct bnxt *bp,
1647
+ struct bnxt_tc_flow_node *flow_node,
1648
+ __le16 *flow_handle, __le32 *flow_id)
1649
+{
1650
+ u16 handle;
1651
+
1652
+ if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) {
1653
+ *flow_id = flow_node->flow_id;
1654
+
1655
+ /* If flow_id is used to fetch flow stats then:
1656
+ * 1. lower 12 bits of flow_handle must be set to all 1s.
1657
+ * 2. 15th bit of flow_handle must specify the flow
1658
+ * direction (TX/RX).
1659
+ */
1660
+ if (flow_node->flow.l2_key.dir == BNXT_DIR_RX)
1661
+ handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX |
1662
+ CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
1663
+ else
1664
+ handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
1665
+
1666
+ *flow_handle = cpu_to_le16(handle);
1667
+ } else {
1668
+ *flow_handle = flow_node->flow_handle;
1669
+ }
14131670 }
14141671
14151672 static int
14161673 bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
14171674 struct bnxt_tc_stats_batch stats_batch[])
14181675 {
1419
- struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
14201676 struct hwrm_cfa_flow_stats_input req = { 0 };
1677
+ struct hwrm_cfa_flow_stats_output *resp;
14211678 __le16 *req_flow_handles = &req.flow_handle_0;
1679
+ __le32 *req_flow_ids = &req.flow_id_0;
14221680 int rc, i;
14231681
14241682 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
....@@ -1426,14 +1684,19 @@
14261684 for (i = 0; i < num_flows; i++) {
14271685 struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
14281686
1429
- req_flow_handles[i] = flow_node->flow_handle;
1687
+ bnxt_fill_cfa_stats_req(bp, flow_node,
1688
+ &req_flow_handles[i], &req_flow_ids[i]);
14301689 }
14311690
14321691 mutex_lock(&bp->hwrm_cmd_lock);
14331692 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
14341693 if (!rc) {
1435
- __le64 *resp_packets = &resp->packet_0;
1436
- __le64 *resp_bytes = &resp->byte_0;
1694
+ __le64 *resp_packets;
1695
+ __le64 *resp_bytes;
1696
+
1697
+ resp = bnxt_get_hwrm_resp_addr(bp, &req);
1698
+ resp_packets = &resp->packet_0;
1699
+ resp_bytes = &resp->byte_0;
14371700
14381701 for (i = 0; i < num_flows; i++) {
14391702 stats_batch[i].hw_stats.packets =
....@@ -1442,12 +1705,10 @@
14421705 le64_to_cpu(resp_bytes[i]);
14431706 }
14441707 } else {
1445
- netdev_info(bp->dev, "error rc=%d", rc);
1708
+ netdev_info(bp->dev, "error rc=%d\n", rc);
14461709 }
14471710 mutex_unlock(&bp->hwrm_cmd_lock);
14481711
1449
- if (rc)
1450
- rc = -EIO;
14511712 return rc;
14521713 }
14531714
....@@ -1572,18 +1833,132 @@
15721833 }
15731834
15741835 int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
1575
- struct tc_cls_flower_offload *cls_flower)
1836
+ struct flow_cls_offload *cls_flower)
15761837 {
15771838 switch (cls_flower->command) {
1578
- case TC_CLSFLOWER_REPLACE:
1839
+ case FLOW_CLS_REPLACE:
15791840 return bnxt_tc_add_flow(bp, src_fid, cls_flower);
1580
- case TC_CLSFLOWER_DESTROY:
1841
+ case FLOW_CLS_DESTROY:
15811842 return bnxt_tc_del_flow(bp, cls_flower);
1582
- case TC_CLSFLOWER_STATS:
1843
+ case FLOW_CLS_STATS:
15831844 return bnxt_tc_get_flow_stats(bp, cls_flower);
15841845 default:
15851846 return -EOPNOTSUPP;
15861847 }
1848
+}
1849
+
1850
+static int bnxt_tc_setup_indr_block_cb(enum tc_setup_type type,
1851
+ void *type_data, void *cb_priv)
1852
+{
1853
+ struct bnxt_flower_indr_block_cb_priv *priv = cb_priv;
1854
+ struct flow_cls_offload *flower = type_data;
1855
+ struct bnxt *bp = priv->bp;
1856
+
1857
+ if (!tc_cls_can_offload_and_chain0(bp->dev, type_data))
1858
+ return -EOPNOTSUPP;
1859
+
1860
+ switch (type) {
1861
+ case TC_SETUP_CLSFLOWER:
1862
+ return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, flower);
1863
+ default:
1864
+ return -EOPNOTSUPP;
1865
+ }
1866
+}
1867
+
1868
+static struct bnxt_flower_indr_block_cb_priv *
1869
+bnxt_tc_indr_block_cb_lookup(struct bnxt *bp, struct net_device *netdev)
1870
+{
1871
+ struct bnxt_flower_indr_block_cb_priv *cb_priv;
1872
+
1873
+ list_for_each_entry(cb_priv, &bp->tc_indr_block_list, list)
1874
+ if (cb_priv->tunnel_netdev == netdev)
1875
+ return cb_priv;
1876
+
1877
+ return NULL;
1878
+}
1879
+
1880
+static void bnxt_tc_setup_indr_rel(void *cb_priv)
1881
+{
1882
+ struct bnxt_flower_indr_block_cb_priv *priv = cb_priv;
1883
+
1884
+ list_del(&priv->list);
1885
+ kfree(priv);
1886
+}
1887
+
1888
+static int bnxt_tc_setup_indr_block(struct net_device *netdev, struct Qdisc *sch, struct bnxt *bp,
1889
+ struct flow_block_offload *f, void *data,
1890
+ void (*cleanup)(struct flow_block_cb *block_cb))
1891
+{
1892
+ struct bnxt_flower_indr_block_cb_priv *cb_priv;
1893
+ struct flow_block_cb *block_cb;
1894
+
1895
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1896
+ return -EOPNOTSUPP;
1897
+
1898
+ switch (f->command) {
1899
+ case FLOW_BLOCK_BIND:
1900
+ cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1901
+ if (!cb_priv)
1902
+ return -ENOMEM;
1903
+
1904
+ cb_priv->tunnel_netdev = netdev;
1905
+ cb_priv->bp = bp;
1906
+ list_add(&cb_priv->list, &bp->tc_indr_block_list);
1907
+
1908
+ block_cb = flow_indr_block_cb_alloc(bnxt_tc_setup_indr_block_cb,
1909
+ cb_priv, cb_priv,
1910
+ bnxt_tc_setup_indr_rel, f,
1911
+ netdev, sch, data, bp, cleanup);
1912
+ if (IS_ERR(block_cb)) {
1913
+ list_del(&cb_priv->list);
1914
+ kfree(cb_priv);
1915
+ return PTR_ERR(block_cb);
1916
+ }
1917
+
1918
+ flow_block_cb_add(block_cb, f);
1919
+ list_add_tail(&block_cb->driver_list, &bnxt_block_cb_list);
1920
+ break;
1921
+ case FLOW_BLOCK_UNBIND:
1922
+ cb_priv = bnxt_tc_indr_block_cb_lookup(bp, netdev);
1923
+ if (!cb_priv)
1924
+ return -ENOENT;
1925
+
1926
+ block_cb = flow_block_cb_lookup(f->block,
1927
+ bnxt_tc_setup_indr_block_cb,
1928
+ cb_priv);
1929
+ if (!block_cb)
1930
+ return -ENOENT;
1931
+
1932
+ flow_indr_block_cb_remove(block_cb, f);
1933
+ list_del(&block_cb->driver_list);
1934
+ break;
1935
+ default:
1936
+ return -EOPNOTSUPP;
1937
+ }
1938
+ return 0;
1939
+}
1940
+
1941
+static bool bnxt_is_netdev_indr_offload(struct net_device *netdev)
1942
+{
1943
+ return netif_is_vxlan(netdev);
1944
+}
1945
+
1946
+static int bnxt_tc_setup_indr_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
1947
+ enum tc_setup_type type, void *type_data,
1948
+ void *data,
1949
+ void (*cleanup)(struct flow_block_cb *block_cb))
1950
+{
1951
+ if (!bnxt_is_netdev_indr_offload(netdev))
1952
+ return -EOPNOTSUPP;
1953
+
1954
+ switch (type) {
1955
+ case TC_SETUP_BLOCK:
1956
+ return bnxt_tc_setup_indr_block(netdev, sch, cb_priv, type_data, data, cleanup);
1957
+ default:
1958
+ break;
1959
+ }
1960
+
1961
+ return -EOPNOTSUPP;
15871962 }
15881963
15891964 static const struct rhashtable_params bnxt_tc_flow_ht_params = {
....@@ -1622,11 +1997,8 @@
16221997 struct bnxt_tc_info *tc_info;
16231998 int rc;
16241999
1625
- if (bp->hwrm_spec_code < 0x10803) {
1626
- netdev_warn(bp->dev,
1627
- "Firmware does not support TC flower offload.\n");
1628
- return -ENOTSUPP;
1629
- }
2000
+ if (bp->hwrm_spec_code < 0x10803)
2001
+ return 0;
16302002
16312003 tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL);
16322004 if (!tc_info)
....@@ -1669,7 +2041,15 @@
16692041 bp->dev->hw_features |= NETIF_F_HW_TC;
16702042 bp->dev->features |= NETIF_F_HW_TC;
16712043 bp->tc_info = tc_info;
1672
- return 0;
2044
+
2045
+ /* init indirect block notifications */
2046
+ INIT_LIST_HEAD(&bp->tc_indr_block_list);
2047
+
2048
+ rc = flow_indr_dev_register(bnxt_tc_setup_indr_cb, bp);
2049
+ if (!rc)
2050
+ return 0;
2051
+
2052
+ rhashtable_destroy(&tc_info->encap_table);
16732053
16742054 destroy_decap_table:
16752055 rhashtable_destroy(&tc_info->decap_table);
....@@ -1691,6 +2071,8 @@
16912071 if (!bnxt_tc_flower_enabled(bp))
16922072 return;
16932073
2074
+ flow_indr_dev_unregister(bnxt_tc_setup_indr_cb, bp,
2075
+ bnxt_tc_setup_indr_rel);
16942076 rhashtable_destroy(&tc_info->flow_table);
16952077 rhashtable_destroy(&tc_info->l2_table);
16962078 rhashtable_destroy(&tc_info->decap_l2_table);