forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-04 1543e317f1da31b75942316931e8f491a8920811
kernel/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
....@@ -34,7 +34,7 @@
3434 #include <crypto/aead.h>
3535 #include <net/xfrm.h>
3636 #include <net/esp.h>
37
-
37
+#include "accel/ipsec_offload.h"
3838 #include "en_accel/ipsec_rxtx.h"
3939 #include "en_accel/ipsec.h"
4040 #include "accel/accel.h"
....@@ -121,7 +121,9 @@
121121
122122 trailer_len = alen + plen + 2;
123123
124
- pskb_trim(skb, skb->len - trailer_len);
124
+ ret = pskb_trim(skb, skb->len - trailer_len);
125
+ if (unlikely(ret))
126
+ return ret;
125127 if (skb->protocol == htons(ETH_P_IP)) {
126128 ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
127129 ip_send_check(ipv4hdr);
....@@ -136,7 +138,7 @@
136138 struct mlx5_wqe_eth_seg *eseg, u8 mode,
137139 struct xfrm_offload *xo)
138140 {
139
- u8 proto;
141
+ struct mlx5e_swp_spec swp_spec = {};
140142
141143 /* Tunnel Mode:
142144 * SWP: OutL3 InL3 InL4
....@@ -146,35 +148,23 @@
146148 * SWP: OutL3 InL4
147149 * InL3
148150 * Pkt: MAC IP ESP L4
149
- *
150
- * Offsets are in 2-byte words, counting from start of frame
151151 */
152
- eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
153
- if (skb->protocol == htons(ETH_P_IPV6))
154
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
155
-
156
- if (mode == XFRM_MODE_TUNNEL) {
157
- eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
152
+ swp_spec.l3_proto = skb->protocol;
153
+ swp_spec.is_tun = mode == XFRM_MODE_TUNNEL;
154
+ if (swp_spec.is_tun) {
158155 if (xo->proto == IPPROTO_IPV6) {
159
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
160
- proto = inner_ipv6_hdr(skb)->nexthdr;
156
+ swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
157
+ swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
161158 } else {
162
- proto = inner_ip_hdr(skb)->protocol;
159
+ swp_spec.tun_l3_proto = htons(ETH_P_IP);
160
+ swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
163161 }
164162 } else {
165
- eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
166
- if (skb->protocol == htons(ETH_P_IPV6))
167
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
168
- proto = xo->proto;
163
+ swp_spec.tun_l3_proto = skb->protocol;
164
+ swp_spec.tun_l4_proto = xo->proto;
169165 }
170
- switch (proto) {
171
- case IPPROTO_UDP:
172
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
173
- /* Fall through */
174
- case IPPROTO_TCP:
175
- eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
176
- break;
177
- }
166
+
167
+ mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
178168 }
179169
180170 void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
....@@ -245,20 +235,97 @@
245235 ntohs(mdata->content.tx.seq));
246236 }
247237
248
-struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
249
- struct mlx5e_tx_wqe *wqe,
250
- struct sk_buff *skb)
238
+void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
239
+ struct mlx5e_accel_tx_ipsec_state *ipsec_st,
240
+ struct mlx5_wqe_inline_seg *inlseg)
241
+{
242
+ inlseg->byte_count = cpu_to_be32(ipsec_st->tailen | MLX5_INLINE_SEG);
243
+ esp_output_fill_trailer((u8 *)inlseg->data, 0, ipsec_st->plen, ipsec_st->xo->proto);
244
+}
245
+
246
+static int mlx5e_ipsec_set_state(struct mlx5e_priv *priv,
247
+ struct sk_buff *skb,
248
+ struct xfrm_state *x,
249
+ struct xfrm_offload *xo,
250
+ struct mlx5e_accel_tx_ipsec_state *ipsec_st)
251
+{
252
+ unsigned int blksize, clen, alen, plen;
253
+ struct crypto_aead *aead;
254
+ unsigned int tailen;
255
+
256
+ ipsec_st->x = x;
257
+ ipsec_st->xo = xo;
258
+ if (mlx5_is_ipsec_device(priv->mdev)) {
259
+ aead = x->data;
260
+ alen = crypto_aead_authsize(aead);
261
+ blksize = ALIGN(crypto_aead_blocksize(aead), 4);
262
+ clen = ALIGN(skb->len + 2, blksize);
263
+ plen = max_t(u32, clen - skb->len, 4);
264
+ tailen = plen + alen;
265
+ ipsec_st->plen = plen;
266
+ ipsec_st->tailen = tailen;
267
+ }
268
+
269
+ return 0;
270
+}
271
+
272
+void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
273
+ struct mlx5_wqe_eth_seg *eseg)
274
+{
275
+ struct xfrm_offload *xo = xfrm_offload(skb);
276
+ struct xfrm_encap_tmpl *encap;
277
+ struct xfrm_state *x;
278
+ struct sec_path *sp;
279
+ u8 l3_proto;
280
+
281
+ sp = skb_sec_path(skb);
282
+ if (unlikely(sp->len != 1))
283
+ return;
284
+
285
+ x = xfrm_input_state(skb);
286
+ if (unlikely(!x))
287
+ return;
288
+
289
+ if (unlikely(!x->xso.offload_handle ||
290
+ (skb->protocol != htons(ETH_P_IP) &&
291
+ skb->protocol != htons(ETH_P_IPV6))))
292
+ return;
293
+
294
+ mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
295
+
296
+ l3_proto = (x->props.family == AF_INET) ?
297
+ ((struct iphdr *)skb_network_header(skb))->protocol :
298
+ ((struct ipv6hdr *)skb_network_header(skb))->nexthdr;
299
+
300
+ if (mlx5_is_ipsec_device(priv->mdev)) {
301
+ eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
302
+ eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
303
+ encap = x->encap;
304
+ if (!encap) {
305
+ eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
306
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) :
307
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
308
+ } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) {
309
+ eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
310
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
311
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
312
+ }
313
+ }
314
+}
315
+
316
+bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
317
+ struct sk_buff *skb,
318
+ struct mlx5e_accel_tx_ipsec_state *ipsec_st)
251319 {
252320 struct mlx5e_priv *priv = netdev_priv(netdev);
253321 struct xfrm_offload *xo = xfrm_offload(skb);
254
- struct mlx5e_ipsec_metadata *mdata;
255322 struct mlx5e_ipsec_sa_entry *sa_entry;
323
+ struct mlx5e_ipsec_metadata *mdata;
256324 struct xfrm_state *x;
325
+ struct sec_path *sp;
257326
258
- if (!xo)
259
- return skb;
260
-
261
- if (unlikely(skb->sp->len != 1)) {
327
+ sp = skb_sec_path(skb);
328
+ if (unlikely(sp->len != 1)) {
262329 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_bundle);
263330 goto drop;
264331 }
....@@ -281,21 +348,27 @@
281348 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer);
282349 goto drop;
283350 }
284
- mdata = mlx5e_ipsec_add_metadata(skb);
285
- if (IS_ERR(mdata)) {
286
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
287
- goto drop;
351
+
352
+ if (MLX5_CAP_GEN(priv->mdev, fpga)) {
353
+ mdata = mlx5e_ipsec_add_metadata(skb);
354
+ if (IS_ERR(mdata)) {
355
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
356
+ goto drop;
357
+ }
288358 }
289
- mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo);
359
+
290360 sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
291361 sa_entry->set_iv_op(skb, x, xo);
292
- mlx5e_ipsec_set_metadata(skb, mdata, xo);
362
+ if (MLX5_CAP_GEN(priv->mdev, fpga))
363
+ mlx5e_ipsec_set_metadata(skb, mdata, xo);
293364
294
- return skb;
365
+ mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st);
366
+
367
+ return true;
295368
296369 drop:
297370 kfree_skb(skb);
298
- return NULL;
371
+ return false;
299372 }
300373
301374 static inline struct xfrm_state *
....@@ -305,10 +378,11 @@
305378 struct mlx5e_priv *priv = netdev_priv(netdev);
306379 struct xfrm_offload *xo;
307380 struct xfrm_state *xs;
381
+ struct sec_path *sp;
308382 u32 sa_handle;
309383
310
- skb->sp = secpath_dup(skb->sp);
311
- if (unlikely(!skb->sp)) {
384
+ sp = secpath_set(skb);
385
+ if (unlikely(!sp)) {
312386 atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
313387 return NULL;
314388 }
....@@ -320,8 +394,9 @@
320394 return NULL;
321395 }
322396
323
- skb->sp->xvec[skb->sp->len++] = xs;
324
- skb->sp->olen++;
397
+ sp = skb_sec_path(skb);
398
+ sp->xvec[sp->len++] = xs;
399
+ sp->olen++;
325400
326401 xo = xfrm_offload(skb);
327402 xo->flags = CRYPTO_DONE;
....@@ -369,13 +444,69 @@
369444 return skb;
370445 }
371446
447
+enum {
448
+ MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
449
+ MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
450
+ MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
451
+};
452
+
453
+void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
454
+ struct sk_buff *skb,
455
+ struct mlx5_cqe64 *cqe)
456
+{
457
+ u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
458
+ struct mlx5e_priv *priv;
459
+ struct xfrm_offload *xo;
460
+ struct xfrm_state *xs;
461
+ struct sec_path *sp;
462
+ u32 sa_handle;
463
+
464
+ sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data);
465
+ priv = netdev_priv(netdev);
466
+ sp = secpath_set(skb);
467
+ if (unlikely(!sp)) {
468
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
469
+ return;
470
+ }
471
+
472
+ xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
473
+ if (unlikely(!xs)) {
474
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
475
+ return;
476
+ }
477
+
478
+ sp = skb_sec_path(skb);
479
+ sp->xvec[sp->len++] = xs;
480
+ sp->olen++;
481
+
482
+ xo = xfrm_offload(skb);
483
+ xo->flags = CRYPTO_DONE;
484
+
485
+ switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
486
+ case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
487
+ xo->status = CRYPTO_SUCCESS;
488
+ if (WARN_ON_ONCE(priv->ipsec->no_trailer))
489
+ xo->flags |= XFRM_ESP_NO_TRAILER;
490
+ break;
491
+ case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
492
+ xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
493
+ break;
494
+ case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
495
+ xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
496
+ break;
497
+ default:
498
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
499
+ }
500
+}
501
+
372502 bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
373503 netdev_features_t features)
374504 {
505
+ struct sec_path *sp = skb_sec_path(skb);
375506 struct xfrm_state *x;
376507
377
- if (skb->sp && skb->sp->len) {
378
- x = skb->sp->xvec[0];
508
+ if (sp && sp->len) {
509
+ x = sp->xvec[0];
379510 if (x && x->xso.offload_handle)
380511 return true;
381512 }