.. | .. |
---|
37 | 37 | #include <linux/skbuff.h> |
---|
38 | 38 | #include <linux/netdevice.h> |
---|
39 | 39 | #include "en_accel/ipsec_rxtx.h" |
---|
| 40 | +#include "en_accel/tls.h" |
---|
40 | 41 | #include "en_accel/tls_rxtx.h" |
---|
41 | 42 | #include "en.h" |
---|
| 43 | +#include "en/txrx.h" |
---|
| 44 | + |
---|
| 45 | +#if IS_ENABLED(CONFIG_GENEVE) |
---|
| 46 | +#include <net/geneve.h> |
---|
| 47 | + |
---|
| 48 | +static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev) |
---|
| 49 | +{ |
---|
| 50 | + return mlx5_tx_swp_supported(mdev); |
---|
| 51 | +} |
---|
| 52 | + |
---|
| 53 | +static inline void |
---|
| 54 | +mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs) |
---|
| 55 | +{ |
---|
| 56 | + struct mlx5e_swp_spec swp_spec = {}; |
---|
| 57 | + unsigned int offset = 0; |
---|
| 58 | + __be16 l3_proto; |
---|
| 59 | + u8 l4_proto; |
---|
| 60 | + |
---|
| 61 | + l3_proto = vlan_get_protocol(skb); |
---|
| 62 | + switch (l3_proto) { |
---|
| 63 | + case htons(ETH_P_IP): |
---|
| 64 | + l4_proto = ip_hdr(skb)->protocol; |
---|
| 65 | + break; |
---|
| 66 | + case htons(ETH_P_IPV6): |
---|
| 67 | + l4_proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL); |
---|
| 68 | + break; |
---|
| 69 | + default: |
---|
| 70 | + return; |
---|
| 71 | + } |
---|
| 72 | + |
---|
| 73 | + if (l4_proto != IPPROTO_UDP || |
---|
| 74 | + udp_hdr(skb)->dest != cpu_to_be16(GENEVE_UDP_PORT)) |
---|
| 75 | + return; |
---|
| 76 | + swp_spec.l3_proto = l3_proto; |
---|
| 77 | + swp_spec.l4_proto = l4_proto; |
---|
| 78 | + swp_spec.is_tun = true; |
---|
| 79 | + if (inner_ip_hdr(skb)->version == 6) { |
---|
| 80 | + swp_spec.tun_l3_proto = htons(ETH_P_IPV6); |
---|
| 81 | + swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr; |
---|
| 82 | + } else { |
---|
| 83 | + swp_spec.tun_l3_proto = htons(ETH_P_IP); |
---|
| 84 | + swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol; |
---|
| 85 | + } |
---|
| 86 | + |
---|
| 87 | + mlx5e_set_eseg_swp(skb, eseg, &swp_spec); |
---|
| 88 | + if (skb_vlan_tag_present(skb) && ihs) |
---|
| 89 | + mlx5e_eseg_swp_offsets_add_vlan(eseg); |
---|
| 90 | +} |
---|
| 91 | + |
---|
| 92 | +#else |
---|
| 93 | +static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev) |
---|
| 94 | +{ |
---|
| 95 | + return false; |
---|
| 96 | +} |
---|
| 97 | + |
---|
| 98 | +#endif /* CONFIG_GENEVE */ |
---|
42 | 99 | |
---|
43 | 100 | static inline void |
---|
44 | 101 | mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb) |
---|
.. | .. |
---|
48 | 105 | udp_hdr(skb)->len = htons(payload_len); |
---|
49 | 106 | } |
---|
50 | 107 | |
---|
51 | | -static inline struct sk_buff * |
---|
52 | | -mlx5e_accel_handle_tx(struct sk_buff *skb, |
---|
53 | | - struct mlx5e_txqsq *sq, |
---|
54 | | - struct net_device *dev, |
---|
55 | | - struct mlx5e_tx_wqe **wqe, |
---|
56 | | - u16 *pi) |
---|
| 108 | +struct mlx5e_accel_tx_state { |
---|
| 109 | +#ifdef CONFIG_MLX5_EN_TLS |
---|
| 110 | + struct mlx5e_accel_tx_tls_state tls; |
---|
| 111 | +#endif |
---|
| 112 | +#ifdef CONFIG_MLX5_EN_IPSEC |
---|
| 113 | + struct mlx5e_accel_tx_ipsec_state ipsec; |
---|
| 114 | +#endif |
---|
| 115 | +}; |
---|
| 116 | + |
---|
| 117 | +static inline bool mlx5e_accel_tx_begin(struct net_device *dev, |
---|
| 118 | + struct mlx5e_txqsq *sq, |
---|
| 119 | + struct sk_buff *skb, |
---|
| 120 | + struct mlx5e_accel_tx_state *state) |
---|
57 | 121 | { |
---|
| 122 | + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) |
---|
| 123 | + mlx5e_udp_gso_handle_tx_skb(skb); |
---|
| 124 | + |
---|
58 | 125 | #ifdef CONFIG_MLX5_EN_TLS |
---|
59 | 126 | if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) { |
---|
60 | | - skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi); |
---|
61 | | - if (unlikely(!skb)) |
---|
62 | | - return NULL; |
---|
| 127 | + /* May send SKBs and WQEs. */ |
---|
| 128 | + if (unlikely(!mlx5e_tls_handle_tx_skb(dev, sq, skb, &state->tls))) |
---|
| 129 | + return false; |
---|
63 | 130 | } |
---|
64 | 131 | #endif |
---|
65 | 132 | |
---|
66 | 133 | #ifdef CONFIG_MLX5_EN_IPSEC |
---|
67 | | - if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) { |
---|
68 | | - skb = mlx5e_ipsec_handle_tx_skb(dev, *wqe, skb); |
---|
69 | | - if (unlikely(!skb)) |
---|
70 | | - return NULL; |
---|
| 134 | + if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) { |
---|
| 135 | + if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec))) |
---|
| 136 | + return false; |
---|
71 | 137 | } |
---|
72 | 138 | #endif |
---|
73 | 139 | |
---|
74 | | - if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) |
---|
75 | | - mlx5e_udp_gso_handle_tx_skb(skb); |
---|
76 | | - |
---|
77 | | - return skb; |
---|
| 140 | + return true; |
---|
78 | 141 | } |
---|
79 | 142 | |
---|
| 143 | +static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *state) |
---|
| 144 | +{ |
---|
| 145 | +#ifdef CONFIG_MLX5_EN_IPSEC |
---|
| 146 | + return mlx5e_ipsec_is_tx_flow(&state->ipsec); |
---|
| 147 | +#endif |
---|
| 148 | + |
---|
| 149 | + return false; |
---|
| 150 | +} |
---|
| 151 | + |
---|
| 152 | +static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq, |
---|
| 153 | + struct mlx5e_accel_tx_state *state) |
---|
| 154 | +{ |
---|
| 155 | +#ifdef CONFIG_MLX5_EN_IPSEC |
---|
| 156 | + if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) |
---|
| 157 | + return mlx5e_ipsec_tx_ids_len(&state->ipsec); |
---|
| 158 | +#endif |
---|
| 159 | + |
---|
| 160 | + return 0; |
---|
| 161 | +} |
---|
| 162 | + |
---|
| 163 | +/* Part of the eseg touched by TX offloads */ |
---|
| 164 | +#define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss) |
---|
| 165 | + |
---|
| 166 | +static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv, |
---|
| 167 | + struct sk_buff *skb, |
---|
| 168 | + struct mlx5_wqe_eth_seg *eseg, u16 ihs) |
---|
| 169 | +{ |
---|
| 170 | +#ifdef CONFIG_MLX5_EN_IPSEC |
---|
| 171 | + if (xfrm_offload(skb)) |
---|
| 172 | + mlx5e_ipsec_tx_build_eseg(priv, skb, eseg); |
---|
| 173 | +#endif |
---|
| 174 | + |
---|
| 175 | +#if IS_ENABLED(CONFIG_GENEVE) |
---|
| 176 | + if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) |
---|
| 177 | + mlx5e_tx_tunnel_accel(skb, eseg, ihs); |
---|
| 178 | +#endif |
---|
| 179 | + |
---|
| 180 | + return true; |
---|
| 181 | +} |
---|
| 182 | + |
---|
| 183 | +static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq, |
---|
| 184 | + struct mlx5e_tx_wqe *wqe, |
---|
| 185 | + struct mlx5e_accel_tx_state *state, |
---|
| 186 | + struct mlx5_wqe_inline_seg *inlseg) |
---|
| 187 | +{ |
---|
| 188 | +#ifdef CONFIG_MLX5_EN_TLS |
---|
| 189 | + mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls); |
---|
| 190 | +#endif |
---|
| 191 | + |
---|
| 192 | +#ifdef CONFIG_MLX5_EN_IPSEC |
---|
| 193 | + if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && |
---|
| 194 | + state->ipsec.xo && state->ipsec.tailen) |
---|
| 195 | + mlx5e_ipsec_handle_tx_wqe(wqe, &state->ipsec, inlseg); |
---|
| 196 | +#endif |
---|
| 197 | +} |
---|
| 198 | + |
---|
| 199 | +static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv) |
---|
| 200 | +{ |
---|
| 201 | + return mlx5e_ktls_init_rx(priv); |
---|
| 202 | +} |
---|
| 203 | + |
---|
| 204 | +static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv) |
---|
| 205 | +{ |
---|
| 206 | + mlx5e_ktls_cleanup_rx(priv); |
---|
| 207 | +} |
---|
80 | 208 | #endif /* __MLX5E_EN_ACCEL_H__ */ |
---|