forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
....@@ -58,11 +58,90 @@
5858 PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
5959 PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
6060 PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
61
- PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
62
- PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
63
- PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
64
- PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
6561 };
62
+
63
+static const struct cxgb4_natmode_config cxgb4_natmode_config_array[] = {
64
+ /* Default supported NAT modes */
65
+ {
66
+ .chip = CHELSIO_T5,
67
+ .flags = CXGB4_ACTION_NATMODE_NONE,
68
+ .natmode = NAT_MODE_NONE,
69
+ },
70
+ {
71
+ .chip = CHELSIO_T5,
72
+ .flags = CXGB4_ACTION_NATMODE_DIP,
73
+ .natmode = NAT_MODE_DIP,
74
+ },
75
+ {
76
+ .chip = CHELSIO_T5,
77
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT,
78
+ .natmode = NAT_MODE_DIP_DP,
79
+ },
80
+ {
81
+ .chip = CHELSIO_T5,
82
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
83
+ CXGB4_ACTION_NATMODE_SIP,
84
+ .natmode = NAT_MODE_DIP_DP_SIP,
85
+ },
86
+ {
87
+ .chip = CHELSIO_T5,
88
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
89
+ CXGB4_ACTION_NATMODE_SPORT,
90
+ .natmode = NAT_MODE_DIP_DP_SP,
91
+ },
92
+ {
93
+ .chip = CHELSIO_T5,
94
+ .flags = CXGB4_ACTION_NATMODE_SIP | CXGB4_ACTION_NATMODE_SPORT,
95
+ .natmode = NAT_MODE_SIP_SP,
96
+ },
97
+ {
98
+ .chip = CHELSIO_T5,
99
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
100
+ CXGB4_ACTION_NATMODE_SPORT,
101
+ .natmode = NAT_MODE_DIP_SIP_SP,
102
+ },
103
+ {
104
+ .chip = CHELSIO_T5,
105
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
106
+ CXGB4_ACTION_NATMODE_DPORT |
107
+ CXGB4_ACTION_NATMODE_SPORT,
108
+ .natmode = NAT_MODE_ALL,
109
+ },
110
+ /* T6+ can ignore L4 ports when they're disabled. */
111
+ {
112
+ .chip = CHELSIO_T6,
113
+ .flags = CXGB4_ACTION_NATMODE_SIP,
114
+ .natmode = NAT_MODE_SIP_SP,
115
+ },
116
+ {
117
+ .chip = CHELSIO_T6,
118
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SPORT,
119
+ .natmode = NAT_MODE_DIP_DP_SP,
120
+ },
121
+ {
122
+ .chip = CHELSIO_T6,
123
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP,
124
+ .natmode = NAT_MODE_ALL,
125
+ },
126
+};
127
+
128
+static void cxgb4_action_natmode_tweak(struct ch_filter_specification *fs,
129
+ u8 natmode_flags)
130
+{
131
+ u8 i = 0;
132
+
133
+ /* Translate the enabled NAT 4-tuple fields to one of the
134
+ * hardware supported NAT mode configurations. This ensures
135
+ * that we pick a valid combination, where the disabled fields
136
+ * do not get overwritten to 0.
137
+ */
138
+ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
139
+ if (cxgb4_natmode_config_array[i].flags == natmode_flags) {
140
+ fs->nat_mode = cxgb4_natmode_config_array[i].natmode;
141
+ return;
142
+ }
143
+ }
144
+}
66145
67146 static struct ch_tc_flower_entry *allocate_flower_entry(void)
68147 {
....@@ -81,31 +160,29 @@
81160 }
82161
83162 static void cxgb4_process_flow_match(struct net_device *dev,
84
- struct tc_cls_flower_offload *cls,
163
+ struct flow_rule *rule,
85164 struct ch_filter_specification *fs)
86165 {
87166 u16 addr_type = 0;
88167
89
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
90
- struct flow_dissector_key_control *key =
91
- skb_flow_dissector_target(cls->dissector,
92
- FLOW_DISSECTOR_KEY_CONTROL,
93
- cls->key);
168
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
169
+ struct flow_match_control match;
94170
95
- addr_type = key->addr_type;
171
+ flow_rule_match_control(rule, &match);
172
+ addr_type = match.key->addr_type;
173
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
174
+ addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
175
+ } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
176
+ addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
96177 }
97178
98
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
99
- struct flow_dissector_key_basic *key =
100
- skb_flow_dissector_target(cls->dissector,
101
- FLOW_DISSECTOR_KEY_BASIC,
102
- cls->key);
103
- struct flow_dissector_key_basic *mask =
104
- skb_flow_dissector_target(cls->dissector,
105
- FLOW_DISSECTOR_KEY_BASIC,
106
- cls->mask);
107
- u16 ethtype_key = ntohs(key->n_proto);
108
- u16 ethtype_mask = ntohs(mask->n_proto);
179
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
180
+ struct flow_match_basic match;
181
+ u16 ethtype_key, ethtype_mask;
182
+
183
+ flow_rule_match_basic(rule, &match);
184
+ ethtype_key = ntohs(match.key->n_proto);
185
+ ethtype_mask = ntohs(match.mask->n_proto);
109186
110187 if (ethtype_key == ETH_P_ALL) {
111188 ethtype_key = 0;
....@@ -117,115 +194,89 @@
117194
118195 fs->val.ethtype = ethtype_key;
119196 fs->mask.ethtype = ethtype_mask;
120
- fs->val.proto = key->ip_proto;
121
- fs->mask.proto = mask->ip_proto;
197
+ fs->val.proto = match.key->ip_proto;
198
+ fs->mask.proto = match.mask->ip_proto;
122199 }
123200
124201 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
125
- struct flow_dissector_key_ipv4_addrs *key =
126
- skb_flow_dissector_target(cls->dissector,
127
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
128
- cls->key);
129
- struct flow_dissector_key_ipv4_addrs *mask =
130
- skb_flow_dissector_target(cls->dissector,
131
- FLOW_DISSECTOR_KEY_IPV4_ADDRS,
132
- cls->mask);
202
+ struct flow_match_ipv4_addrs match;
203
+
204
+ flow_rule_match_ipv4_addrs(rule, &match);
133205 fs->type = 0;
134
- memcpy(&fs->val.lip[0], &key->dst, sizeof(key->dst));
135
- memcpy(&fs->val.fip[0], &key->src, sizeof(key->src));
136
- memcpy(&fs->mask.lip[0], &mask->dst, sizeof(mask->dst));
137
- memcpy(&fs->mask.fip[0], &mask->src, sizeof(mask->src));
206
+ memcpy(&fs->val.lip[0], &match.key->dst, sizeof(match.key->dst));
207
+ memcpy(&fs->val.fip[0], &match.key->src, sizeof(match.key->src));
208
+ memcpy(&fs->mask.lip[0], &match.mask->dst, sizeof(match.mask->dst));
209
+ memcpy(&fs->mask.fip[0], &match.mask->src, sizeof(match.mask->src));
138210
139211 /* also initialize nat_lip/fip to same values */
140
- memcpy(&fs->nat_lip[0], &key->dst, sizeof(key->dst));
141
- memcpy(&fs->nat_fip[0], &key->src, sizeof(key->src));
142
-
212
+ memcpy(&fs->nat_lip[0], &match.key->dst, sizeof(match.key->dst));
213
+ memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src));
143214 }
144215
145216 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
146
- struct flow_dissector_key_ipv6_addrs *key =
147
- skb_flow_dissector_target(cls->dissector,
148
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
149
- cls->key);
150
- struct flow_dissector_key_ipv6_addrs *mask =
151
- skb_flow_dissector_target(cls->dissector,
152
- FLOW_DISSECTOR_KEY_IPV6_ADDRS,
153
- cls->mask);
217
+ struct flow_match_ipv6_addrs match;
154218
219
+ flow_rule_match_ipv6_addrs(rule, &match);
155220 fs->type = 1;
156
- memcpy(&fs->val.lip[0], key->dst.s6_addr, sizeof(key->dst));
157
- memcpy(&fs->val.fip[0], key->src.s6_addr, sizeof(key->src));
158
- memcpy(&fs->mask.lip[0], mask->dst.s6_addr, sizeof(mask->dst));
159
- memcpy(&fs->mask.fip[0], mask->src.s6_addr, sizeof(mask->src));
221
+ memcpy(&fs->val.lip[0], match.key->dst.s6_addr,
222
+ sizeof(match.key->dst));
223
+ memcpy(&fs->val.fip[0], match.key->src.s6_addr,
224
+ sizeof(match.key->src));
225
+ memcpy(&fs->mask.lip[0], match.mask->dst.s6_addr,
226
+ sizeof(match.mask->dst));
227
+ memcpy(&fs->mask.fip[0], match.mask->src.s6_addr,
228
+ sizeof(match.mask->src));
160229
161230 /* also initialize nat_lip/fip to same values */
162
- memcpy(&fs->nat_lip[0], key->dst.s6_addr, sizeof(key->dst));
163
- memcpy(&fs->nat_fip[0], key->src.s6_addr, sizeof(key->src));
231
+ memcpy(&fs->nat_lip[0], match.key->dst.s6_addr,
232
+ sizeof(match.key->dst));
233
+ memcpy(&fs->nat_fip[0], match.key->src.s6_addr,
234
+ sizeof(match.key->src));
164235 }
165236
166
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
167
- struct flow_dissector_key_ports *key, *mask;
237
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
238
+ struct flow_match_ports match;
168239
169
- key = skb_flow_dissector_target(cls->dissector,
170
- FLOW_DISSECTOR_KEY_PORTS,
171
- cls->key);
172
- mask = skb_flow_dissector_target(cls->dissector,
173
- FLOW_DISSECTOR_KEY_PORTS,
174
- cls->mask);
175
- fs->val.lport = cpu_to_be16(key->dst);
176
- fs->mask.lport = cpu_to_be16(mask->dst);
177
- fs->val.fport = cpu_to_be16(key->src);
178
- fs->mask.fport = cpu_to_be16(mask->src);
240
+ flow_rule_match_ports(rule, &match);
241
+ fs->val.lport = be16_to_cpu(match.key->dst);
242
+ fs->mask.lport = be16_to_cpu(match.mask->dst);
243
+ fs->val.fport = be16_to_cpu(match.key->src);
244
+ fs->mask.fport = be16_to_cpu(match.mask->src);
179245
180246 /* also initialize nat_lport/fport to same values */
181
- fs->nat_lport = cpu_to_be16(key->dst);
182
- fs->nat_fport = cpu_to_be16(key->src);
247
+ fs->nat_lport = fs->val.lport;
248
+ fs->nat_fport = fs->val.fport;
183249 }
184250
185
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) {
186
- struct flow_dissector_key_ip *key, *mask;
251
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
252
+ struct flow_match_ip match;
187253
188
- key = skb_flow_dissector_target(cls->dissector,
189
- FLOW_DISSECTOR_KEY_IP,
190
- cls->key);
191
- mask = skb_flow_dissector_target(cls->dissector,
192
- FLOW_DISSECTOR_KEY_IP,
193
- cls->mask);
194
- fs->val.tos = key->tos;
195
- fs->mask.tos = mask->tos;
254
+ flow_rule_match_ip(rule, &match);
255
+ fs->val.tos = match.key->tos;
256
+ fs->mask.tos = match.mask->tos;
196257 }
197258
198
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
199
- struct flow_dissector_key_keyid *key, *mask;
259
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
260
+ struct flow_match_enc_keyid match;
200261
201
- key = skb_flow_dissector_target(cls->dissector,
202
- FLOW_DISSECTOR_KEY_ENC_KEYID,
203
- cls->key);
204
- mask = skb_flow_dissector_target(cls->dissector,
205
- FLOW_DISSECTOR_KEY_ENC_KEYID,
206
- cls->mask);
207
- fs->val.vni = be32_to_cpu(key->keyid);
208
- fs->mask.vni = be32_to_cpu(mask->keyid);
262
+ flow_rule_match_enc_keyid(rule, &match);
263
+ fs->val.vni = be32_to_cpu(match.key->keyid);
264
+ fs->mask.vni = be32_to_cpu(match.mask->keyid);
209265 if (fs->mask.vni) {
210266 fs->val.encap_vld = 1;
211267 fs->mask.encap_vld = 1;
212268 }
213269 }
214270
215
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
216
- struct flow_dissector_key_vlan *key, *mask;
271
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
272
+ struct flow_match_vlan match;
217273 u16 vlan_tci, vlan_tci_mask;
218274
219
- key = skb_flow_dissector_target(cls->dissector,
220
- FLOW_DISSECTOR_KEY_VLAN,
221
- cls->key);
222
- mask = skb_flow_dissector_target(cls->dissector,
223
- FLOW_DISSECTOR_KEY_VLAN,
224
- cls->mask);
225
- vlan_tci = key->vlan_id | (key->vlan_priority <<
226
- VLAN_PRIO_SHIFT);
227
- vlan_tci_mask = mask->vlan_id | (mask->vlan_priority <<
228
- VLAN_PRIO_SHIFT);
275
+ flow_rule_match_vlan(rule, &match);
276
+ vlan_tci = match.key->vlan_id | (match.key->vlan_priority <<
277
+ VLAN_PRIO_SHIFT);
278
+ vlan_tci_mask = match.mask->vlan_id | (match.mask->vlan_priority <<
279
+ VLAN_PRIO_SHIFT);
229280 fs->val.ivlan = vlan_tci;
230281 fs->mask.ivlan = vlan_tci_mask;
231282
....@@ -255,12 +306,13 @@
255306 }
256307
257308 static int cxgb4_validate_flow_match(struct net_device *dev,
258
- struct tc_cls_flower_offload *cls)
309
+ struct flow_rule *rule)
259310 {
311
+ struct flow_dissector *dissector = rule->match.dissector;
260312 u16 ethtype_mask = 0;
261313 u16 ethtype_key = 0;
262314
263
- if (cls->dissector->used_keys &
315
+ if (dissector->used_keys &
264316 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
265317 BIT(FLOW_DISSECTOR_KEY_BASIC) |
266318 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
....@@ -270,36 +322,29 @@
270322 BIT(FLOW_DISSECTOR_KEY_VLAN) |
271323 BIT(FLOW_DISSECTOR_KEY_IP))) {
272324 netdev_warn(dev, "Unsupported key used: 0x%x\n",
273
- cls->dissector->used_keys);
325
+ dissector->used_keys);
274326 return -EOPNOTSUPP;
275327 }
276328
277
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
278
- struct flow_dissector_key_basic *key =
279
- skb_flow_dissector_target(cls->dissector,
280
- FLOW_DISSECTOR_KEY_BASIC,
281
- cls->key);
282
- struct flow_dissector_key_basic *mask =
283
- skb_flow_dissector_target(cls->dissector,
284
- FLOW_DISSECTOR_KEY_BASIC,
285
- cls->mask);
286
- ethtype_key = ntohs(key->n_proto);
287
- ethtype_mask = ntohs(mask->n_proto);
329
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
330
+ struct flow_match_basic match;
331
+
332
+ flow_rule_match_basic(rule, &match);
333
+ ethtype_key = ntohs(match.key->n_proto);
334
+ ethtype_mask = ntohs(match.mask->n_proto);
288335 }
289336
290
- if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_IP)) {
337
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
291338 u16 eth_ip_type = ethtype_key & ethtype_mask;
292
- struct flow_dissector_key_ip *mask;
339
+ struct flow_match_ip match;
293340
294341 if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
295342 netdev_err(dev, "IP Key supported only with IPv4/v6");
296343 return -EINVAL;
297344 }
298345
299
- mask = skb_flow_dissector_target(cls->dissector,
300
- FLOW_DISSECTOR_KEY_IP,
301
- cls->mask);
302
- if (mask->ttl) {
346
+ flow_rule_match_ip(rule, &match);
347
+ if (match.mask->ttl) {
303348 netdev_warn(dev, "ttl match unsupported for offload");
304349 return -EOPNOTSUPP;
305350 }
....@@ -327,10 +372,11 @@
327372 }
328373
329374 static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
330
- u32 mask, u32 offset, u8 htype)
375
+ u32 mask, u32 offset, u8 htype,
376
+ u8 *natmode_flags)
331377 {
332378 switch (htype) {
333
- case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
379
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
334380 switch (offset) {
335381 case PEDIT_ETH_DMAC_31_0:
336382 fs->newdmac = 1;
....@@ -348,128 +394,173 @@
348394 offload_pedit(fs, val, mask, ETH_SMAC_47_16);
349395 }
350396 break;
351
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
397
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
352398 switch (offset) {
353399 case PEDIT_IP4_SRC:
354400 offload_pedit(fs, val, mask, IP4_SRC);
401
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
355402 break;
356403 case PEDIT_IP4_DST:
357404 offload_pedit(fs, val, mask, IP4_DST);
405
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
358406 }
359
- fs->nat_mode = NAT_MODE_ALL;
360407 break;
361
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
408
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
362409 switch (offset) {
363410 case PEDIT_IP6_SRC_31_0:
364411 offload_pedit(fs, val, mask, IP6_SRC_31_0);
412
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
365413 break;
366414 case PEDIT_IP6_SRC_63_32:
367415 offload_pedit(fs, val, mask, IP6_SRC_63_32);
416
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
368417 break;
369418 case PEDIT_IP6_SRC_95_64:
370419 offload_pedit(fs, val, mask, IP6_SRC_95_64);
420
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
371421 break;
372422 case PEDIT_IP6_SRC_127_96:
373423 offload_pedit(fs, val, mask, IP6_SRC_127_96);
424
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
374425 break;
375426 case PEDIT_IP6_DST_31_0:
376427 offload_pedit(fs, val, mask, IP6_DST_31_0);
428
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
377429 break;
378430 case PEDIT_IP6_DST_63_32:
379431 offload_pedit(fs, val, mask, IP6_DST_63_32);
432
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
380433 break;
381434 case PEDIT_IP6_DST_95_64:
382435 offload_pedit(fs, val, mask, IP6_DST_95_64);
436
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
383437 break;
384438 case PEDIT_IP6_DST_127_96:
385439 offload_pedit(fs, val, mask, IP6_DST_127_96);
440
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
386441 }
387
- fs->nat_mode = NAT_MODE_ALL;
388442 break;
389
- case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
443
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
390444 switch (offset) {
391445 case PEDIT_TCP_SPORT_DPORT:
392
- if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
393
- offload_pedit(fs, cpu_to_be32(val) >> 16,
394
- cpu_to_be32(mask) >> 16,
395
- TCP_SPORT);
396
- else
397
- offload_pedit(fs, cpu_to_be32(val),
398
- cpu_to_be32(mask), TCP_DPORT);
446
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
447
+ fs->nat_fport = val;
448
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
449
+ } else {
450
+ fs->nat_lport = val >> 16;
451
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
452
+ }
399453 }
400
- fs->nat_mode = NAT_MODE_ALL;
401454 break;
402
- case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
455
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
403456 switch (offset) {
404457 case PEDIT_UDP_SPORT_DPORT:
405
- if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
406
- offload_pedit(fs, cpu_to_be32(val) >> 16,
407
- cpu_to_be32(mask) >> 16,
408
- UDP_SPORT);
409
- else
410
- offload_pedit(fs, cpu_to_be32(val),
411
- cpu_to_be32(mask), UDP_DPORT);
458
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
459
+ fs->nat_fport = val;
460
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
461
+ } else {
462
+ fs->nat_lport = val >> 16;
463
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
464
+ }
412465 }
413
- fs->nat_mode = NAT_MODE_ALL;
466
+ break;
414467 }
415468 }
416469
417
-static void cxgb4_process_flow_actions(struct net_device *in,
418
- struct tc_cls_flower_offload *cls,
419
- struct ch_filter_specification *fs)
470
+static int cxgb4_action_natmode_validate(struct adapter *adap, u8 natmode_flags,
471
+ struct netlink_ext_ack *extack)
420472 {
421
- const struct tc_action *a;
473
+ u8 i = 0;
474
+
475
+ /* Extract the NAT mode to enable based on what 4-tuple fields
476
+ * are enabled to be overwritten. This ensures that the
477
+ * disabled fields don't get overwritten to 0.
478
+ */
479
+ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
480
+ const struct cxgb4_natmode_config *c;
481
+
482
+ c = &cxgb4_natmode_config_array[i];
483
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= c->chip &&
484
+ natmode_flags == c->flags)
485
+ return 0;
486
+ }
487
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported NAT mode 4-tuple combination");
488
+ return -EOPNOTSUPP;
489
+}
490
+
491
+void cxgb4_process_flow_actions(struct net_device *in,
492
+ struct flow_action *actions,
493
+ struct ch_filter_specification *fs)
494
+{
495
+ struct flow_action_entry *act;
496
+ u8 natmode_flags = 0;
422497 int i;
423498
424
- tcf_exts_for_each_action(i, a, cls->exts) {
425
- if (is_tcf_gact_ok(a)) {
499
+ flow_action_for_each(i, act, actions) {
500
+ switch (act->id) {
501
+ case FLOW_ACTION_ACCEPT:
426502 fs->action = FILTER_PASS;
427
- } else if (is_tcf_gact_shot(a)) {
503
+ break;
504
+ case FLOW_ACTION_DROP:
428505 fs->action = FILTER_DROP;
429
- } else if (is_tcf_mirred_egress_redirect(a)) {
430
- struct net_device *out = tcf_mirred_dev(a);
506
+ break;
507
+ case FLOW_ACTION_MIRRED:
508
+ case FLOW_ACTION_REDIRECT: {
509
+ struct net_device *out = act->dev;
431510 struct port_info *pi = netdev_priv(out);
432511
433512 fs->action = FILTER_SWITCH;
434513 fs->eport = pi->port_id;
435
- } else if (is_tcf_vlan(a)) {
436
- u32 vlan_action = tcf_vlan_action(a);
437
- u8 prio = tcf_vlan_push_prio(a);
438
- u16 vid = tcf_vlan_push_vid(a);
514
+ }
515
+ break;
516
+ case FLOW_ACTION_VLAN_POP:
517
+ case FLOW_ACTION_VLAN_PUSH:
518
+ case FLOW_ACTION_VLAN_MANGLE: {
519
+ u8 prio = act->vlan.prio;
520
+ u16 vid = act->vlan.vid;
439521 u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;
440
-
441
- switch (vlan_action) {
442
- case TCA_VLAN_ACT_POP:
522
+ switch (act->id) {
523
+ case FLOW_ACTION_VLAN_POP:
443524 fs->newvlan |= VLAN_REMOVE;
444525 break;
445
- case TCA_VLAN_ACT_PUSH:
526
+ case FLOW_ACTION_VLAN_PUSH:
446527 fs->newvlan |= VLAN_INSERT;
447528 fs->vlan = vlan_tci;
448529 break;
449
- case TCA_VLAN_ACT_MODIFY:
530
+ case FLOW_ACTION_VLAN_MANGLE:
450531 fs->newvlan |= VLAN_REWRITE;
451532 fs->vlan = vlan_tci;
452533 break;
453534 default:
454535 break;
455536 }
456
- } else if (is_tcf_pedit(a)) {
537
+ }
538
+ break;
539
+ case FLOW_ACTION_MANGLE: {
457540 u32 mask, val, offset;
458
- int nkeys, i;
459541 u8 htype;
460542
461
- nkeys = tcf_pedit_nkeys(a);
462
- for (i = 0; i < nkeys; i++) {
463
- htype = tcf_pedit_htype(a, i);
464
- mask = tcf_pedit_mask(a, i);
465
- val = tcf_pedit_val(a, i);
466
- offset = tcf_pedit_offset(a, i);
543
+ htype = act->mangle.htype;
544
+ mask = act->mangle.mask;
545
+ val = act->mangle.val;
546
+ offset = act->mangle.offset;
467547
468
- process_pedit_field(fs, val, mask, offset,
469
- htype);
548
+ process_pedit_field(fs, val, mask, offset, htype,
549
+ &natmode_flags);
470550 }
551
+ break;
552
+ case FLOW_ACTION_QUEUE:
553
+ fs->action = FILTER_PASS;
554
+ fs->dirsteer = 1;
555
+ fs->iq = act->queue.index;
556
+ break;
557
+ default:
558
+ break;
471559 }
472560 }
561
+ if (natmode_flags)
562
+ cxgb4_action_natmode_tweak(fs, natmode_flags);
563
+
473564 }
474565
475566 static bool valid_l4_mask(u32 mask)
....@@ -486,126 +577,144 @@
486577 }
487578
488579 static bool valid_pedit_action(struct net_device *dev,
489
- const struct tc_action *a)
580
+ const struct flow_action_entry *act,
581
+ u8 *natmode_flags)
490582 {
491583 u32 mask, offset;
492
- u8 cmd, htype;
493
- int nkeys, i;
584
+ u8 htype;
494585
495
- nkeys = tcf_pedit_nkeys(a);
496
- for (i = 0; i < nkeys; i++) {
497
- htype = tcf_pedit_htype(a, i);
498
- cmd = tcf_pedit_cmd(a, i);
499
- mask = tcf_pedit_mask(a, i);
500
- offset = tcf_pedit_offset(a, i);
586
+ htype = act->mangle.htype;
587
+ mask = act->mangle.mask;
588
+ offset = act->mangle.offset;
501589
502
- if (cmd != TCA_PEDIT_KEY_EX_CMD_SET) {
503
- netdev_err(dev, "%s: Unsupported pedit cmd\n",
504
- __func__);
505
- return false;
506
- }
507
-
508
- switch (htype) {
509
- case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
510
- switch (offset) {
511
- case PEDIT_ETH_DMAC_31_0:
512
- case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
513
- case PEDIT_ETH_SMAC_47_16:
514
- break;
515
- default:
516
- netdev_err(dev, "%s: Unsupported pedit field\n",
517
- __func__);
518
- return false;
519
- }
520
- break;
521
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
522
- switch (offset) {
523
- case PEDIT_IP4_SRC:
524
- case PEDIT_IP4_DST:
525
- break;
526
- default:
527
- netdev_err(dev, "%s: Unsupported pedit field\n",
528
- __func__);
529
- return false;
530
- }
531
- break;
532
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
533
- switch (offset) {
534
- case PEDIT_IP6_SRC_31_0:
535
- case PEDIT_IP6_SRC_63_32:
536
- case PEDIT_IP6_SRC_95_64:
537
- case PEDIT_IP6_SRC_127_96:
538
- case PEDIT_IP6_DST_31_0:
539
- case PEDIT_IP6_DST_63_32:
540
- case PEDIT_IP6_DST_95_64:
541
- case PEDIT_IP6_DST_127_96:
542
- break;
543
- default:
544
- netdev_err(dev, "%s: Unsupported pedit field\n",
545
- __func__);
546
- return false;
547
- }
548
- break;
549
- case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
550
- switch (offset) {
551
- case PEDIT_TCP_SPORT_DPORT:
552
- if (!valid_l4_mask(~mask)) {
553
- netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
554
- __func__);
555
- return false;
556
- }
557
- break;
558
- default:
559
- netdev_err(dev, "%s: Unsupported pedit field\n",
560
- __func__);
561
- return false;
562
- }
563
- break;
564
- case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
565
- switch (offset) {
566
- case PEDIT_UDP_SPORT_DPORT:
567
- if (!valid_l4_mask(~mask)) {
568
- netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
569
- __func__);
570
- return false;
571
- }
572
- break;
573
- default:
574
- netdev_err(dev, "%s: Unsupported pedit field\n",
575
- __func__);
576
- return false;
577
- }
590
+ switch (htype) {
591
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
592
+ switch (offset) {
593
+ case PEDIT_ETH_DMAC_31_0:
594
+ case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
595
+ case PEDIT_ETH_SMAC_47_16:
578596 break;
579597 default:
580
- netdev_err(dev, "%s: Unsupported pedit type\n",
598
+ netdev_err(dev, "%s: Unsupported pedit field\n",
581599 __func__);
582600 return false;
583601 }
602
+ break;
603
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
604
+ switch (offset) {
605
+ case PEDIT_IP4_SRC:
606
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
607
+ break;
608
+ case PEDIT_IP4_DST:
609
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
610
+ break;
611
+ default:
612
+ netdev_err(dev, "%s: Unsupported pedit field\n",
613
+ __func__);
614
+ return false;
615
+ }
616
+ break;
617
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
618
+ switch (offset) {
619
+ case PEDIT_IP6_SRC_31_0:
620
+ case PEDIT_IP6_SRC_63_32:
621
+ case PEDIT_IP6_SRC_95_64:
622
+ case PEDIT_IP6_SRC_127_96:
623
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
624
+ break;
625
+ case PEDIT_IP6_DST_31_0:
626
+ case PEDIT_IP6_DST_63_32:
627
+ case PEDIT_IP6_DST_95_64:
628
+ case PEDIT_IP6_DST_127_96:
629
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
630
+ break;
631
+ default:
632
+ netdev_err(dev, "%s: Unsupported pedit field\n",
633
+ __func__);
634
+ return false;
635
+ }
636
+ break;
637
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
638
+ switch (offset) {
639
+ case PEDIT_TCP_SPORT_DPORT:
640
+ if (!valid_l4_mask(~mask)) {
641
+ netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
642
+ __func__);
643
+ return false;
644
+ }
645
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
646
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
647
+ else
648
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
649
+ break;
650
+ default:
651
+ netdev_err(dev, "%s: Unsupported pedit field\n",
652
+ __func__);
653
+ return false;
654
+ }
655
+ break;
656
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
657
+ switch (offset) {
658
+ case PEDIT_UDP_SPORT_DPORT:
659
+ if (!valid_l4_mask(~mask)) {
660
+ netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
661
+ __func__);
662
+ return false;
663
+ }
664
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
665
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
666
+ else
667
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
668
+ break;
669
+ default:
670
+ netdev_err(dev, "%s: Unsupported pedit field\n",
671
+ __func__);
672
+ return false;
673
+ }
674
+ break;
675
+ default:
676
+ netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
677
+ return false;
584678 }
585679 return true;
586680 }
587681
588
-static int cxgb4_validate_flow_actions(struct net_device *dev,
589
- struct tc_cls_flower_offload *cls)
682
+int cxgb4_validate_flow_actions(struct net_device *dev,
683
+ struct flow_action *actions,
684
+ struct netlink_ext_ack *extack,
685
+ u8 matchall_filter)
590686 {
591
- const struct tc_action *a;
687
+ struct adapter *adap = netdev2adap(dev);
688
+ struct flow_action_entry *act;
592689 bool act_redir = false;
593690 bool act_pedit = false;
594691 bool act_vlan = false;
692
+ u8 natmode_flags = 0;
595693 int i;
596694
597
- tcf_exts_for_each_action(i, a, cls->exts) {
598
- if (is_tcf_gact_ok(a)) {
599
- /* Do nothing */
600
- } else if (is_tcf_gact_shot(a)) {
601
- /* Do nothing */
602
- } else if (is_tcf_mirred_egress_redirect(a)) {
603
- struct adapter *adap = netdev2adap(dev);
604
- struct net_device *n_dev, *target_dev;
605
- unsigned int i;
606
- bool found = false;
695
+ if (!flow_action_basic_hw_stats_check(actions, extack))
696
+ return -EOPNOTSUPP;
607697
608
- target_dev = tcf_mirred_dev(a);
698
+ flow_action_for_each(i, act, actions) {
699
+ switch (act->id) {
700
+ case FLOW_ACTION_ACCEPT:
701
+ case FLOW_ACTION_DROP:
702
+ /* Do nothing */
703
+ break;
704
+ case FLOW_ACTION_MIRRED:
705
+ case FLOW_ACTION_REDIRECT: {
706
+ struct net_device *n_dev, *target_dev;
707
+ bool found = false;
708
+ unsigned int i;
709
+
710
+ if (act->id == FLOW_ACTION_MIRRED &&
711
+ !matchall_filter) {
712
+ NL_SET_ERR_MSG_MOD(extack,
713
+ "Egress mirror action is only supported for tc-matchall");
714
+ return -EOPNOTSUPP;
715
+ }
716
+
717
+ target_dev = act->dev;
609718 for_each_port(adap, i) {
610719 n_dev = adap->port[i];
611720 if (target_dev == n_dev) {
....@@ -623,15 +732,18 @@
623732 return -EINVAL;
624733 }
625734 act_redir = true;
626
- } else if (is_tcf_vlan(a)) {
627
- u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
628
- u32 vlan_action = tcf_vlan_action(a);
735
+ }
736
+ break;
737
+ case FLOW_ACTION_VLAN_POP:
738
+ case FLOW_ACTION_VLAN_PUSH:
739
+ case FLOW_ACTION_VLAN_MANGLE: {
740
+ u16 proto = be16_to_cpu(act->vlan.proto);
629741
630
- switch (vlan_action) {
631
- case TCA_VLAN_ACT_POP:
742
+ switch (act->id) {
743
+ case FLOW_ACTION_VLAN_POP:
632744 break;
633
- case TCA_VLAN_ACT_PUSH:
634
- case TCA_VLAN_ACT_MODIFY:
745
+ case FLOW_ACTION_VLAN_PUSH:
746
+ case FLOW_ACTION_VLAN_MANGLE:
635747 if (proto != ETH_P_8021Q) {
636748 netdev_err(dev, "%s: Unsupported vlan proto\n",
637749 __func__);
....@@ -644,13 +756,21 @@
644756 return -EOPNOTSUPP;
645757 }
646758 act_vlan = true;
647
- } else if (is_tcf_pedit(a)) {
648
- bool pedit_valid = valid_pedit_action(dev, a);
759
+ }
760
+ break;
761
+ case FLOW_ACTION_MANGLE: {
762
+ bool pedit_valid = valid_pedit_action(dev, act,
763
+ &natmode_flags);
649764
650765 if (!pedit_valid)
651766 return -EOPNOTSUPP;
652767 act_pedit = true;
653
- } else {
768
+ }
769
+ break;
770
+ case FLOW_ACTION_QUEUE:
771
+ /* Do nothing. cxgb4_set_filter will validate */
772
+ break;
773
+ default:
654774 netdev_err(dev, "%s: Unsupported action\n", __func__);
655775 return -EOPNOTSUPP;
656776 }
....@@ -662,24 +782,156 @@
662782 return -EINVAL;
663783 }
664784
785
+ if (act_pedit) {
786
+ int ret;
787
+
788
+ ret = cxgb4_action_natmode_validate(adap, natmode_flags,
789
+ extack);
790
+ if (ret)
791
+ return ret;
792
+ }
793
+
794
+ return 0;
795
+}
796
+
797
+static void cxgb4_tc_flower_hash_prio_add(struct adapter *adap, u32 tc_prio)
798
+{
799
+ spin_lock_bh(&adap->tids.ftid_lock);
800
+ if (adap->tids.tc_hash_tids_max_prio < tc_prio)
801
+ adap->tids.tc_hash_tids_max_prio = tc_prio;
802
+ spin_unlock_bh(&adap->tids.ftid_lock);
803
+}
804
+
805
+static void cxgb4_tc_flower_hash_prio_del(struct adapter *adap, u32 tc_prio)
806
+{
807
+ struct tid_info *t = &adap->tids;
808
+ struct ch_tc_flower_entry *fe;
809
+ struct rhashtable_iter iter;
810
+ u32 found = 0;
811
+
812
+ spin_lock_bh(&t->ftid_lock);
813
+ /* Bail if the current rule is not the one with the max
814
+ * prio.
815
+ */
816
+ if (t->tc_hash_tids_max_prio != tc_prio)
817
+ goto out_unlock;
818
+
819
+ /* Search for the next rule having the same or next lower
820
+ * max prio.
821
+ */
822
+ rhashtable_walk_enter(&adap->flower_tbl, &iter);
823
+ do {
824
+ rhashtable_walk_start(&iter);
825
+
826
+ fe = rhashtable_walk_next(&iter);
827
+ while (!IS_ERR_OR_NULL(fe)) {
828
+ if (fe->fs.hash &&
829
+ fe->fs.tc_prio <= t->tc_hash_tids_max_prio) {
830
+ t->tc_hash_tids_max_prio = fe->fs.tc_prio;
831
+ found++;
832
+
833
+ /* Bail if we found another rule
834
+ * having the same prio as the
835
+ * current max one.
836
+ */
837
+ if (fe->fs.tc_prio == tc_prio)
838
+ break;
839
+ }
840
+
841
+ fe = rhashtable_walk_next(&iter);
842
+ }
843
+
844
+ rhashtable_walk_stop(&iter);
845
+ } while (fe == ERR_PTR(-EAGAIN));
846
+ rhashtable_walk_exit(&iter);
847
+
848
+ if (!found)
849
+ t->tc_hash_tids_max_prio = 0;
850
+
851
+out_unlock:
852
+ spin_unlock_bh(&t->ftid_lock);
853
+}
854
+
855
+int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
856
+ u32 tc_prio, struct netlink_ext_ack *extack,
857
+ struct ch_filter_specification *fs, u32 *tid)
858
+{
859
+ struct adapter *adap = netdev2adap(dev);
860
+ struct filter_ctx ctx;
861
+ u8 inet_family;
862
+ int fidx, ret;
863
+
864
+ if (cxgb4_validate_flow_actions(dev, &rule->action, extack, 0))
865
+ return -EOPNOTSUPP;
866
+
867
+ if (cxgb4_validate_flow_match(dev, rule))
868
+ return -EOPNOTSUPP;
869
+
870
+ cxgb4_process_flow_match(dev, rule, fs);
871
+ cxgb4_process_flow_actions(dev, &rule->action, fs);
872
+
873
+ fs->hash = is_filter_exact_match(adap, fs);
874
+ inet_family = fs->type ? PF_INET6 : PF_INET;
875
+
876
+ /* Get a free filter entry TID, where we can insert this new
877
+ * rule. Only insert rule if its prio doesn't conflict with
878
+ * existing rules.
879
+ */
880
+ fidx = cxgb4_get_free_ftid(dev, inet_family, fs->hash,
881
+ tc_prio);
882
+ if (fidx < 0) {
883
+ NL_SET_ERR_MSG_MOD(extack,
884
+ "No free LETCAM index available");
885
+ return -ENOMEM;
886
+ }
887
+
888
+ if (fidx < adap->tids.nhpftids) {
889
+ fs->prio = 1;
890
+ fs->hash = 0;
891
+ }
892
+
893
+ /* If the rule can be inserted into HASH region, then ignore
894
+ * the index to normal FILTER region.
895
+ */
896
+ if (fs->hash)
897
+ fidx = 0;
898
+
899
+ fs->tc_prio = tc_prio;
900
+
901
+ init_completion(&ctx.completion);
902
+ ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
903
+ if (ret) {
904
+ netdev_err(dev, "%s: filter creation err %d\n",
905
+ __func__, ret);
906
+ return ret;
907
+ }
908
+
909
+ /* Wait for reply */
910
+ ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
911
+ if (!ret)
912
+ return -ETIMEDOUT;
913
+
914
+ /* Check if hw returned error for filter creation */
915
+ if (ctx.result)
916
+ return ctx.result;
917
+
918
+ *tid = ctx.tid;
919
+
920
+ if (fs->hash)
921
+ cxgb4_tc_flower_hash_prio_add(adap, tc_prio);
922
+
665923 return 0;
666924 }
667925
668926 int cxgb4_tc_flower_replace(struct net_device *dev,
669
- struct tc_cls_flower_offload *cls)
927
+ struct flow_cls_offload *cls)
670928 {
929
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
930
+ struct netlink_ext_ack *extack = cls->common.extack;
671931 struct adapter *adap = netdev2adap(dev);
672932 struct ch_tc_flower_entry *ch_flower;
673933 struct ch_filter_specification *fs;
674
- struct filter_ctx ctx;
675
- int fidx;
676934 int ret;
677
-
678
- if (cxgb4_validate_flow_actions(dev, cls))
679
- return -EOPNOTSUPP;
680
-
681
- if (cxgb4_validate_flow_match(dev, cls))
682
- return -EOPNOTSUPP;
683935
684936 ch_flower = allocate_flower_entry();
685937 if (!ch_flower) {
....@@ -689,46 +941,14 @@
689941
690942 fs = &ch_flower->fs;
691943 fs->hitcnts = 1;
692
- cxgb4_process_flow_match(dev, cls, fs);
693
- cxgb4_process_flow_actions(dev, cls, fs);
944
+ fs->tc_cookie = cls->cookie;
694945
695
- fs->hash = is_filter_exact_match(adap, fs);
696
- if (fs->hash) {
697
- fidx = 0;
698
- } else {
699
- fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET);
700
- if (fidx < 0) {
701
- netdev_err(dev, "%s: No fidx for offload.\n", __func__);
702
- ret = -ENOMEM;
703
- goto free_entry;
704
- }
705
- }
706
-
707
- init_completion(&ctx.completion);
708
- ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
709
- if (ret) {
710
- netdev_err(dev, "%s: filter creation err %d\n",
711
- __func__, ret);
946
+ ret = cxgb4_flow_rule_replace(dev, rule, cls->common.prio, extack, fs,
947
+ &ch_flower->filter_id);
948
+ if (ret)
712949 goto free_entry;
713
- }
714
-
715
- /* Wait for reply */
716
- ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
717
- if (!ret) {
718
- ret = -ETIMEDOUT;
719
- goto free_entry;
720
- }
721
-
722
- ret = ctx.result;
723
- /* Check if hw returned error for filter creation */
724
- if (ret) {
725
- netdev_err(dev, "%s: filter creation err %d\n",
726
- __func__, ret);
727
- goto free_entry;
728
- }
729950
730951 ch_flower->tc_flower_cookie = cls->cookie;
731
- ch_flower->filter_id = ctx.tid;
732952 ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
733953 adap->flower_ht_params);
734954 if (ret)
....@@ -737,6 +957,9 @@
737957 return 0;
738958
739959 del_filter:
960
+ if (fs->hash)
961
+ cxgb4_tc_flower_hash_prio_del(adap, cls->common.prio);
962
+
740963 cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
741964
742965 free_entry:
....@@ -744,8 +967,27 @@
744967 return ret;
745968 }
746969
970
+int cxgb4_flow_rule_destroy(struct net_device *dev, u32 tc_prio,
971
+ struct ch_filter_specification *fs, int tid)
972
+{
973
+ struct adapter *adap = netdev2adap(dev);
974
+ u8 hash;
975
+ int ret;
976
+
977
+ hash = fs->hash;
978
+
979
+ ret = cxgb4_del_filter(dev, tid, fs);
980
+ if (ret)
981
+ return ret;
982
+
983
+ if (hash)
984
+ cxgb4_tc_flower_hash_prio_del(adap, tc_prio);
985
+
986
+ return ret;
987
+}
988
+
747989 int cxgb4_tc_flower_destroy(struct net_device *dev,
748
- struct tc_cls_flower_offload *cls)
990
+ struct flow_cls_offload *cls)
749991 {
750992 struct adapter *adap = netdev2adap(dev);
751993 struct ch_tc_flower_entry *ch_flower;
....@@ -755,19 +997,16 @@
755997 if (!ch_flower)
756998 return -ENOENT;
757999
758
- ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
1000
+ rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
1001
+ adap->flower_ht_params);
1002
+
1003
+ ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio,
1004
+ &ch_flower->fs, ch_flower->filter_id);
7591005 if (ret)
760
- goto err;
1006
+ netdev_err(dev, "Flow rule destroy failed for tid: %u, ret: %d",
1007
+ ch_flower->filter_id, ret);
7611008
762
- ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
763
- adap->flower_ht_params);
764
- if (ret) {
765
- netdev_err(dev, "Flow remove from rhashtable failed");
766
- goto err;
767
- }
7681009 kfree_rcu(ch_flower, rcu);
769
-
770
-err:
7711010 return ret;
7721011 }
7731012
....@@ -819,7 +1058,7 @@
8191058 }
8201059
8211060 int cxgb4_tc_flower_stats(struct net_device *dev,
822
- struct tc_cls_flower_offload *cls)
1061
+ struct flow_cls_offload *cls)
8231062 {
8241063 struct adapter *adap = netdev2adap(dev);
8251064 struct ch_tc_flower_stats *ofld_stats;
....@@ -845,9 +1084,10 @@
8451084 if (ofld_stats->packet_count != packets) {
8461085 if (ofld_stats->prev_packet_count != packets)
8471086 ofld_stats->last_used = jiffies;
848
- tcf_exts_stats_update(cls->exts, bytes - ofld_stats->byte_count,
849
- packets - ofld_stats->packet_count,
850
- ofld_stats->last_used);
1087
+ flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count,
1088
+ packets - ofld_stats->packet_count, 0,
1089
+ ofld_stats->last_used,
1090
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
8511091
8521092 ofld_stats->packet_count = packets;
8531093 ofld_stats->byte_count = bytes;