forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
....@@ -34,7 +34,7 @@
3434 #include <crypto/aead.h>
3535 #include <net/xfrm.h>
3636 #include <net/esp.h>
37
-
37
+#include "accel/ipsec_offload.h"
3838 #include "en_accel/ipsec_rxtx.h"
3939 #include "en_accel/ipsec.h"
4040 #include "accel/accel.h"
....@@ -136,7 +136,7 @@
136136 struct mlx5_wqe_eth_seg *eseg, u8 mode,
137137 struct xfrm_offload *xo)
138138 {
139
- u8 proto;
139
+ struct mlx5e_swp_spec swp_spec = {};
140140
141141 /* Tunnel Mode:
142142 * SWP: OutL3 InL3 InL4
....@@ -146,35 +146,23 @@
146146 * SWP: OutL3 InL4
147147 * InL3
148148 * Pkt: MAC IP ESP L4
149
- *
150
- * Offsets are in 2-byte words, counting from start of frame
151149 */
152
- eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
153
- if (skb->protocol == htons(ETH_P_IPV6))
154
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
155
-
156
- if (mode == XFRM_MODE_TUNNEL) {
157
- eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
150
+ swp_spec.l3_proto = skb->protocol;
151
+ swp_spec.is_tun = mode == XFRM_MODE_TUNNEL;
152
+ if (swp_spec.is_tun) {
158153 if (xo->proto == IPPROTO_IPV6) {
159
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
160
- proto = inner_ipv6_hdr(skb)->nexthdr;
154
+ swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
155
+ swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
161156 } else {
162
- proto = inner_ip_hdr(skb)->protocol;
157
+ swp_spec.tun_l3_proto = htons(ETH_P_IP);
158
+ swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
163159 }
164160 } else {
165
- eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
166
- if (skb->protocol == htons(ETH_P_IPV6))
167
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
168
- proto = xo->proto;
161
+ swp_spec.tun_l3_proto = skb->protocol;
162
+ swp_spec.tun_l4_proto = xo->proto;
169163 }
170
- switch (proto) {
171
- case IPPROTO_UDP:
172
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
173
- /* Fall through */
174
- case IPPROTO_TCP:
175
- eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
176
- break;
177
- }
164
+
165
+ mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
178166 }
179167
180168 void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
....@@ -245,20 +233,97 @@
245233 ntohs(mdata->content.tx.seq));
246234 }
247235
248
-struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
249
- struct mlx5e_tx_wqe *wqe,
250
- struct sk_buff *skb)
236
+void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
237
+ struct mlx5e_accel_tx_ipsec_state *ipsec_st,
238
+ struct mlx5_wqe_inline_seg *inlseg)
239
+{
240
+ inlseg->byte_count = cpu_to_be32(ipsec_st->tailen | MLX5_INLINE_SEG);
241
+ esp_output_fill_trailer((u8 *)inlseg->data, 0, ipsec_st->plen, ipsec_st->xo->proto);
242
+}
243
+
244
+static int mlx5e_ipsec_set_state(struct mlx5e_priv *priv,
245
+ struct sk_buff *skb,
246
+ struct xfrm_state *x,
247
+ struct xfrm_offload *xo,
248
+ struct mlx5e_accel_tx_ipsec_state *ipsec_st)
249
+{
250
+ unsigned int blksize, clen, alen, plen;
251
+ struct crypto_aead *aead;
252
+ unsigned int tailen;
253
+
254
+ ipsec_st->x = x;
255
+ ipsec_st->xo = xo;
256
+ if (mlx5_is_ipsec_device(priv->mdev)) {
257
+ aead = x->data;
258
+ alen = crypto_aead_authsize(aead);
259
+ blksize = ALIGN(crypto_aead_blocksize(aead), 4);
260
+ clen = ALIGN(skb->len + 2, blksize);
261
+ plen = max_t(u32, clen - skb->len, 4);
262
+ tailen = plen + alen;
263
+ ipsec_st->plen = plen;
264
+ ipsec_st->tailen = tailen;
265
+ }
266
+
267
+ return 0;
268
+}
269
+
270
+void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
271
+ struct mlx5_wqe_eth_seg *eseg)
272
+{
273
+ struct xfrm_offload *xo = xfrm_offload(skb);
274
+ struct xfrm_encap_tmpl *encap;
275
+ struct xfrm_state *x;
276
+ struct sec_path *sp;
277
+ u8 l3_proto;
278
+
279
+ sp = skb_sec_path(skb);
280
+ if (unlikely(sp->len != 1))
281
+ return;
282
+
283
+ x = xfrm_input_state(skb);
284
+ if (unlikely(!x))
285
+ return;
286
+
287
+ if (unlikely(!x->xso.offload_handle ||
288
+ (skb->protocol != htons(ETH_P_IP) &&
289
+ skb->protocol != htons(ETH_P_IPV6))))
290
+ return;
291
+
292
+ mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
293
+
294
+ l3_proto = (x->props.family == AF_INET) ?
295
+ ((struct iphdr *)skb_network_header(skb))->protocol :
296
+ ((struct ipv6hdr *)skb_network_header(skb))->nexthdr;
297
+
298
+ if (mlx5_is_ipsec_device(priv->mdev)) {
299
+ eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
300
+ eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
301
+ encap = x->encap;
302
+ if (!encap) {
303
+ eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
304
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) :
305
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
306
+ } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) {
307
+ eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
308
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
309
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
310
+ }
311
+ }
312
+}
313
+
314
+bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
315
+ struct sk_buff *skb,
316
+ struct mlx5e_accel_tx_ipsec_state *ipsec_st)
251317 {
252318 struct mlx5e_priv *priv = netdev_priv(netdev);
253319 struct xfrm_offload *xo = xfrm_offload(skb);
254
- struct mlx5e_ipsec_metadata *mdata;
255320 struct mlx5e_ipsec_sa_entry *sa_entry;
321
+ struct mlx5e_ipsec_metadata *mdata;
256322 struct xfrm_state *x;
323
+ struct sec_path *sp;
257324
258
- if (!xo)
259
- return skb;
260
-
261
- if (unlikely(skb->sp->len != 1)) {
325
+ sp = skb_sec_path(skb);
326
+ if (unlikely(sp->len != 1)) {
262327 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_bundle);
263328 goto drop;
264329 }
....@@ -281,21 +346,27 @@
281346 atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer);
282347 goto drop;
283348 }
284
- mdata = mlx5e_ipsec_add_metadata(skb);
285
- if (IS_ERR(mdata)) {
286
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
287
- goto drop;
349
+
350
+ if (MLX5_CAP_GEN(priv->mdev, fpga)) {
351
+ mdata = mlx5e_ipsec_add_metadata(skb);
352
+ if (IS_ERR(mdata)) {
353
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
354
+ goto drop;
355
+ }
288356 }
289
- mlx5e_ipsec_set_swp(skb, &wqe->eth, x->props.mode, xo);
357
+
290358 sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
291359 sa_entry->set_iv_op(skb, x, xo);
292
- mlx5e_ipsec_set_metadata(skb, mdata, xo);
360
+ if (MLX5_CAP_GEN(priv->mdev, fpga))
361
+ mlx5e_ipsec_set_metadata(skb, mdata, xo);
293362
294
- return skb;
363
+ mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st);
364
+
365
+ return true;
295366
296367 drop:
297368 kfree_skb(skb);
298
- return NULL;
369
+ return false;
299370 }
300371
301372 static inline struct xfrm_state *
....@@ -305,10 +376,11 @@
305376 struct mlx5e_priv *priv = netdev_priv(netdev);
306377 struct xfrm_offload *xo;
307378 struct xfrm_state *xs;
379
+ struct sec_path *sp;
308380 u32 sa_handle;
309381
310
- skb->sp = secpath_dup(skb->sp);
311
- if (unlikely(!skb->sp)) {
382
+ sp = secpath_set(skb);
383
+ if (unlikely(!sp)) {
312384 atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
313385 return NULL;
314386 }
....@@ -320,8 +392,9 @@
320392 return NULL;
321393 }
322394
323
- skb->sp->xvec[skb->sp->len++] = xs;
324
- skb->sp->olen++;
395
+ sp = skb_sec_path(skb);
396
+ sp->xvec[sp->len++] = xs;
397
+ sp->olen++;
325398
326399 xo = xfrm_offload(skb);
327400 xo->flags = CRYPTO_DONE;
....@@ -369,13 +442,69 @@
369442 return skb;
370443 }
371444
445
+enum {
446
+ MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
447
+ MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
448
+ MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
449
+};
450
+
451
+void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
452
+ struct sk_buff *skb,
453
+ struct mlx5_cqe64 *cqe)
454
+{
455
+ u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
456
+ struct mlx5e_priv *priv;
457
+ struct xfrm_offload *xo;
458
+ struct xfrm_state *xs;
459
+ struct sec_path *sp;
460
+ u32 sa_handle;
461
+
462
+ sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data);
463
+ priv = netdev_priv(netdev);
464
+ sp = secpath_set(skb);
465
+ if (unlikely(!sp)) {
466
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
467
+ return;
468
+ }
469
+
470
+ xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
471
+ if (unlikely(!xs)) {
472
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
473
+ return;
474
+ }
475
+
476
+ sp = skb_sec_path(skb);
477
+ sp->xvec[sp->len++] = xs;
478
+ sp->olen++;
479
+
480
+ xo = xfrm_offload(skb);
481
+ xo->flags = CRYPTO_DONE;
482
+
483
+ switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
484
+ case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
485
+ xo->status = CRYPTO_SUCCESS;
486
+ if (WARN_ON_ONCE(priv->ipsec->no_trailer))
487
+ xo->flags |= XFRM_ESP_NO_TRAILER;
488
+ break;
489
+ case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
490
+ xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
491
+ break;
492
+ case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
493
+ xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
494
+ break;
495
+ default:
496
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
497
+ }
498
+}
499
+
372500 bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
373501 netdev_features_t features)
374502 {
503
+ struct sec_path *sp = skb_sec_path(skb);
375504 struct xfrm_state *x;
376505
377
- if (skb->sp && skb->sp->len) {
378
- x = skb->sp->xvec[0];
506
+ if (sp && sp->len) {
507
+ x = sp->xvec[0];
379508 if (x && x->xso.offload_handle)
380509 return true;
381510 }