| .. | .. |
|---|
| 181 | 181 | */ |
|---|
| 182 | 182 | nskb->ip_summed = CHECKSUM_PARTIAL; |
|---|
| 183 | 183 | |
|---|
| 184 | | - nskb->xmit_more = 1; |
|---|
| 185 | 184 | nskb->queue_mapping = skb->queue_mapping; |
|---|
| 186 | 185 | } |
|---|
| 187 | 186 | |
|---|
| 188 | | -static struct sk_buff * |
|---|
| 189 | | -mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context, |
|---|
| 190 | | - struct mlx5e_txqsq *sq, struct sk_buff *skb, |
|---|
| 191 | | - struct mlx5e_tx_wqe **wqe, |
|---|
| 192 | | - u16 *pi, |
|---|
| 193 | | - struct mlx5e_tls *tls) |
|---|
| 187 | +static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context, |
|---|
| 188 | + struct mlx5e_txqsq *sq, struct sk_buff *skb, |
|---|
| 189 | + struct mlx5e_tls *tls) |
|---|
| 194 | 190 | { |
|---|
| 195 | 191 | u32 tcp_seq = ntohl(tcp_hdr(skb)->seq); |
|---|
| 196 | 192 | struct sync_info info; |
|---|
| .. | .. |
|---|
| 218 | 214 | if (likely(payload <= -info.sync_len)) |
|---|
| 219 | 215 | /* SKB payload doesn't require offload |
|---|
| 220 | 216 | */ |
|---|
| 221 | | - return skb; |
|---|
| 217 | + return true; |
|---|
| 222 | 218 | |
|---|
| 223 | 219 | atomic64_inc(&tls->sw_stats.tx_tls_drop_bypass_required); |
|---|
| 224 | 220 | goto err_out; |
|---|
| .. | .. |
|---|
| 248 | 244 | sq->stats->tls_resync_bytes += nskb->len; |
|---|
| 249 | 245 | mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln, |
|---|
| 250 | 246 | cpu_to_be64(info.rcd_sn)); |
|---|
| 251 | | - mlx5e_sq_xmit(sq, nskb, *wqe, *pi); |
|---|
| 252 | | - mlx5e_sq_fetch_wqe(sq, wqe, pi); |
|---|
| 253 | | - return skb; |
|---|
| 247 | + mlx5e_sq_xmit_simple(sq, nskb, true); |
|---|
| 248 | + |
|---|
| 249 | + return true; |
|---|
| 254 | 250 | |
|---|
| 255 | 251 | err_out: |
|---|
| 256 | 252 | dev_kfree_skb_any(skb); |
|---|
| 257 | | - return NULL; |
|---|
| 253 | + return false; |
|---|
| 258 | 254 | } |
|---|
| 259 | 255 | |
|---|
| 260 | | -struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev, |
|---|
| 261 | | - struct mlx5e_txqsq *sq, |
|---|
| 262 | | - struct sk_buff *skb, |
|---|
| 263 | | - struct mlx5e_tx_wqe **wqe, |
|---|
| 264 | | - u16 *pi) |
|---|
| 256 | +bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq, |
|---|
| 257 | + struct sk_buff *skb, struct mlx5e_accel_tx_tls_state *state) |
|---|
| 265 | 258 | { |
|---|
| 266 | 259 | struct mlx5e_priv *priv = netdev_priv(netdev); |
|---|
| 267 | 260 | struct mlx5e_tls_offload_context_tx *context; |
|---|
| .. | .. |
|---|
| 271 | 264 | u32 skb_seq; |
|---|
| 272 | 265 | |
|---|
| 273 | 266 | if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)) |
|---|
| 274 | | - goto out; |
|---|
| 267 | + return true; |
|---|
| 275 | 268 | |
|---|
| 276 | 269 | datalen = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)); |
|---|
| 277 | 270 | if (!datalen) |
|---|
| 278 | | - goto out; |
|---|
| 271 | + return true; |
|---|
| 272 | + |
|---|
| 273 | + mlx5e_tx_mpwqe_ensure_complete(sq); |
|---|
| 279 | 274 | |
|---|
| 280 | 275 | tls_ctx = tls_get_ctx(skb->sk); |
|---|
| 281 | | - if (unlikely(tls_ctx->netdev != netdev)) |
|---|
| 282 | | - goto out; |
|---|
| 276 | + if (WARN_ON_ONCE(tls_ctx->netdev != netdev)) |
|---|
| 277 | + goto err_out; |
|---|
| 283 | 278 | |
|---|
| 279 | + if (mlx5_accel_is_ktls_tx(sq->channel->mdev)) |
|---|
| 280 | + return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state); |
|---|
| 281 | + |
|---|
| 282 | + /* FPGA */ |
|---|
| 284 | 283 | skb_seq = ntohl(tcp_hdr(skb)->seq); |
|---|
| 285 | 284 | context = mlx5e_get_tls_tx_context(tls_ctx); |
|---|
| 286 | 285 | expected_seq = context->expected_seq; |
|---|
| 287 | 286 | |
|---|
| 288 | | - if (unlikely(expected_seq != skb_seq)) { |
|---|
| 289 | | - skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls); |
|---|
| 290 | | - goto out; |
|---|
| 291 | | - } |
|---|
| 287 | + if (unlikely(expected_seq != skb_seq)) |
|---|
| 288 | + return mlx5e_tls_handle_ooo(context, sq, skb, priv->tls); |
|---|
| 292 | 289 | |
|---|
| 293 | 290 | if (unlikely(mlx5e_tls_add_metadata(skb, context->swid))) { |
|---|
| 294 | 291 | atomic64_inc(&priv->tls->sw_stats.tx_tls_drop_metadata); |
|---|
| 295 | 292 | dev_kfree_skb_any(skb); |
|---|
| 296 | | - skb = NULL; |
|---|
| 297 | | - goto out; |
|---|
| 293 | + return false; |
|---|
| 298 | 294 | } |
|---|
| 299 | 295 | |
|---|
| 300 | 296 | context->expected_seq = skb_seq + datalen; |
|---|
| 301 | | -out: |
|---|
| 302 | | - return skb; |
|---|
| 297 | + return true; |
|---|
| 298 | + |
|---|
| 299 | +err_out: |
|---|
| 300 | + dev_kfree_skb_any(skb); |
|---|
| 301 | + return false; |
|---|
| 302 | +} |
|---|
| 303 | + |
|---|
| 304 | +void mlx5e_tls_handle_tx_wqe(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg, |
|---|
| 305 | + struct mlx5e_accel_tx_tls_state *state) |
|---|
| 306 | +{ |
|---|
| 307 | + cseg->tis_tir_num = cpu_to_be32(state->tls_tisn << 8); |
|---|
| 303 | 308 | } |
|---|
| 304 | 309 | |
|---|
| 305 | 310 | static int tls_update_resync_sn(struct net_device *netdev, |
|---|
| .. | .. |
|---|
| 348 | 353 | return 0; |
|---|
| 349 | 354 | } |
|---|
| 350 | 355 | |
|---|
| 351 | | -void mlx5e_tls_handle_rx_skb(struct net_device *netdev, struct sk_buff *skb, |
|---|
| 352 | | - u32 *cqe_bcnt) |
|---|
| 356 | +/* FPGA tls rx handler */ |
|---|
| 357 | +void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb, |
|---|
| 358 | + u32 *cqe_bcnt) |
|---|
| 353 | 359 | { |
|---|
| 354 | 360 | struct mlx5e_tls_metadata *mdata; |
|---|
| 355 | 361 | struct mlx5e_priv *priv; |
|---|
| 356 | | - |
|---|
| 357 | | - if (!is_metadata_hdr_valid(skb)) |
|---|
| 358 | | - return; |
|---|
| 359 | 362 | |
|---|
| 360 | 363 | /* Use the metadata */ |
|---|
| 361 | 364 | mdata = (struct mlx5e_tls_metadata *)(skb->data + ETH_HLEN); |
|---|
| .. | .. |
|---|
| 364 | 367 | skb->decrypted = 1; |
|---|
| 365 | 368 | break; |
|---|
| 366 | 369 | case SYNDROM_RESYNC_REQUEST: |
|---|
| 367 | | - tls_update_resync_sn(netdev, skb, mdata); |
|---|
| 368 | | - priv = netdev_priv(netdev); |
|---|
| 370 | + tls_update_resync_sn(rq->netdev, skb, mdata); |
|---|
| 371 | + priv = netdev_priv(rq->netdev); |
|---|
| 369 | 372 | atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_request); |
|---|
| 370 | 373 | break; |
|---|
| 371 | 374 | case SYNDROM_AUTH_FAILED: |
|---|
| 372 | 375 | /* Authentication failure will be observed and verified by kTLS */ |
|---|
| 373 | | - priv = netdev_priv(netdev); |
|---|
| 376 | + priv = netdev_priv(rq->netdev); |
|---|
| 374 | 377 | atomic64_inc(&priv->tls->sw_stats.rx_tls_auth_fail); |
|---|
| 375 | 378 | break; |
|---|
| 376 | 379 | default: |
|---|
| .. | .. |
|---|
| 381 | 384 | remove_metadata_hdr(skb); |
|---|
| 382 | 385 | *cqe_bcnt -= MLX5E_METADATA_ETHER_LEN; |
|---|
| 383 | 386 | } |
|---|
| 387 | + |
|---|
| 388 | +u16 mlx5e_tls_get_stop_room(struct mlx5e_txqsq *sq) |
|---|
| 389 | +{ |
|---|
| 390 | + struct mlx5_core_dev *mdev = sq->channel->mdev; |
|---|
| 391 | + |
|---|
| 392 | + if (!mlx5_accel_is_tls_device(mdev)) |
|---|
| 393 | + return 0; |
|---|
| 394 | + |
|---|
| 395 | + if (mlx5_accel_is_ktls_device(mdev)) |
|---|
| 396 | + return mlx5e_ktls_get_stop_room(sq); |
|---|
| 397 | + |
|---|
| 398 | + /* FPGA */ |
|---|
| 399 | + /* Resync SKB. */ |
|---|
| 400 | + return mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS); |
|---|
| 401 | +} |
|---|