.. | .. |
---|
30 | 30 | * SOFTWARE. |
---|
31 | 31 | */ |
---|
32 | 32 | |
---|
33 | | -#include <linux/prefetch.h> |
---|
34 | 33 | #include <linux/ip.h> |
---|
35 | 34 | #include <linux/ipv6.h> |
---|
36 | 35 | #include <linux/tcp.h> |
---|
37 | | -#include <net/busy_poll.h> |
---|
38 | 36 | #include <net/ip6_checksum.h> |
---|
39 | 37 | #include <net/page_pool.h> |
---|
40 | 38 | #include <net/inet_ecn.h> |
---|
41 | 39 | #include "en.h" |
---|
| 40 | +#include "en/txrx.h" |
---|
42 | 41 | #include "en_tc.h" |
---|
43 | 42 | #include "eswitch.h" |
---|
44 | 43 | #include "en_rep.h" |
---|
| 44 | +#include "en/rep/tc.h" |
---|
45 | 45 | #include "ipoib/ipoib.h" |
---|
| 46 | +#include "accel/ipsec.h" |
---|
| 47 | +#include "fpga/ipsec.h" |
---|
46 | 48 | #include "en_accel/ipsec_rxtx.h" |
---|
47 | 49 | #include "en_accel/tls_rxtx.h" |
---|
48 | 50 | #include "lib/clock.h" |
---|
49 | 51 | #include "en/xdp.h" |
---|
| 52 | +#include "en/xsk/rx.h" |
---|
| 53 | +#include "en/health.h" |
---|
| 54 | +#include "en/params.h" |
---|
| 55 | +#include "en/txrx.h" |
---|
| 56 | + |
---|
| 57 | +static struct sk_buff * |
---|
| 58 | +mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, |
---|
| 59 | + u16 cqe_bcnt, u32 head_offset, u32 page_idx); |
---|
| 60 | +static struct sk_buff * |
---|
| 61 | +mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, |
---|
| 62 | + u16 cqe_bcnt, u32 head_offset, u32 page_idx); |
---|
| 63 | +static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); |
---|
| 64 | +static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); |
---|
| 65 | + |
---|
| 66 | +const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = { |
---|
| 67 | + .handle_rx_cqe = mlx5e_handle_rx_cqe, |
---|
| 68 | + .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, |
---|
| 69 | +}; |
---|
50 | 70 | |
---|
51 | 71 | static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) |
---|
52 | 72 | { |
---|
53 | 73 | return config->rx_filter == HWTSTAMP_FILTER_ALL; |
---|
54 | 74 | } |
---|
55 | 75 | |
---|
56 | | -static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc, |
---|
57 | | - void *data) |
---|
| 76 | +static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq, |
---|
| 77 | + u32 cqcc, void *data) |
---|
58 | 78 | { |
---|
59 | | - u32 ci = mlx5_cqwq_ctr2ix(&cq->wq, cqcc); |
---|
| 79 | + u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); |
---|
60 | 80 | |
---|
61 | | - memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64)); |
---|
| 81 | + memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64)); |
---|
62 | 82 | } |
---|
63 | 83 | |
---|
64 | 84 | static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq, |
---|
65 | | - struct mlx5e_cq *cq, u32 cqcc) |
---|
| 85 | + struct mlx5_cqwq *wq, |
---|
| 86 | + u32 cqcc) |
---|
66 | 87 | { |
---|
67 | | - mlx5e_read_cqe_slot(cq, cqcc, &cq->title); |
---|
68 | | - cq->decmprs_left = be32_to_cpu(cq->title.byte_cnt); |
---|
69 | | - cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter); |
---|
| 88 | + struct mlx5e_cq_decomp *cqd = &rq->cqd; |
---|
| 89 | + struct mlx5_cqe64 *title = &cqd->title; |
---|
| 90 | + |
---|
| 91 | + mlx5e_read_cqe_slot(wq, cqcc, title); |
---|
| 92 | + cqd->left = be32_to_cpu(title->byte_cnt); |
---|
| 93 | + cqd->wqe_counter = be16_to_cpu(title->wqe_counter); |
---|
70 | 94 | rq->stats->cqe_compress_blks++; |
---|
71 | 95 | } |
---|
72 | 96 | |
---|
73 | | -static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc) |
---|
| 97 | +static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq, |
---|
| 98 | + struct mlx5e_cq_decomp *cqd, |
---|
| 99 | + u32 cqcc) |
---|
74 | 100 | { |
---|
75 | | - mlx5e_read_cqe_slot(cq, cqcc, cq->mini_arr); |
---|
76 | | - cq->mini_arr_idx = 0; |
---|
| 101 | + mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr); |
---|
| 102 | + cqd->mini_arr_idx = 0; |
---|
77 | 103 | } |
---|
78 | 104 | |
---|
79 | | -static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) |
---|
| 105 | +static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n) |
---|
80 | 106 | { |
---|
81 | | - struct mlx5_cqwq *wq = &cq->wq; |
---|
82 | | - |
---|
| 107 | + u32 cqcc = wq->cc; |
---|
83 | 108 | u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1; |
---|
84 | 109 | u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); |
---|
85 | 110 | u32 wq_sz = mlx5_cqwq_get_size(wq); |
---|
86 | 111 | u32 ci_top = min_t(u32, wq_sz, ci + n); |
---|
87 | 112 | |
---|
88 | 113 | for (; ci < ci_top; ci++, n--) { |
---|
89 | | - struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci); |
---|
| 114 | + struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); |
---|
90 | 115 | |
---|
91 | 116 | cqe->op_own = op_own; |
---|
92 | 117 | } |
---|
.. | .. |
---|
94 | 119 | if (unlikely(ci == wq_sz)) { |
---|
95 | 120 | op_own = !op_own; |
---|
96 | 121 | for (ci = 0; ci < n; ci++) { |
---|
97 | | - struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci); |
---|
| 122 | + struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); |
---|
98 | 123 | |
---|
99 | 124 | cqe->op_own = op_own; |
---|
100 | 125 | } |
---|
.. | .. |
---|
102 | 127 | } |
---|
103 | 128 | |
---|
104 | 129 | static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq, |
---|
105 | | - struct mlx5e_cq *cq, u32 cqcc) |
---|
| 130 | + struct mlx5_cqwq *wq, |
---|
| 131 | + u32 cqcc) |
---|
106 | 132 | { |
---|
107 | | - cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt; |
---|
108 | | - cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum; |
---|
109 | | - cq->title.op_own &= 0xf0; |
---|
110 | | - cq->title.op_own |= 0x01 & (cqcc >> cq->wq.fbc.log_sz); |
---|
111 | | - cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter); |
---|
| 133 | + struct mlx5e_cq_decomp *cqd = &rq->cqd; |
---|
| 134 | + struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx]; |
---|
| 135 | + struct mlx5_cqe64 *title = &cqd->title; |
---|
112 | 136 | |
---|
| 137 | + title->byte_cnt = mini_cqe->byte_cnt; |
---|
| 138 | + title->check_sum = mini_cqe->checksum; |
---|
| 139 | + title->op_own &= 0xf0; |
---|
| 140 | + title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz); |
---|
| 141 | + |
---|
| 142 | + /* state bit set implies linked-list striding RQ wq type and |
---|
| 143 | + * HW stride index capability supported |
---|
| 144 | + */ |
---|
| 145 | + if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) { |
---|
| 146 | + title->wqe_counter = mini_cqe->stridx; |
---|
| 147 | + return; |
---|
| 148 | + } |
---|
| 149 | + |
---|
| 150 | + /* HW stride index capability not supported */ |
---|
| 151 | + title->wqe_counter = cpu_to_be16(cqd->wqe_counter); |
---|
113 | 152 | if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) |
---|
114 | | - cq->decmprs_wqe_counter += |
---|
115 | | - mpwrq_get_cqe_consumed_strides(&cq->title); |
---|
| 153 | + cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title); |
---|
116 | 154 | else |
---|
117 | | - cq->decmprs_wqe_counter = |
---|
118 | | - mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cq->decmprs_wqe_counter + 1); |
---|
| 155 | + cqd->wqe_counter = |
---|
| 156 | + mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1); |
---|
119 | 157 | } |
---|
120 | 158 | |
---|
121 | 159 | static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq, |
---|
122 | | - struct mlx5e_cq *cq, u32 cqcc) |
---|
| 160 | + struct mlx5_cqwq *wq, |
---|
| 161 | + u32 cqcc) |
---|
123 | 162 | { |
---|
124 | | - mlx5e_decompress_cqe(rq, cq, cqcc); |
---|
125 | | - cq->title.rss_hash_type = 0; |
---|
126 | | - cq->title.rss_hash_result = 0; |
---|
| 163 | + struct mlx5e_cq_decomp *cqd = &rq->cqd; |
---|
| 164 | + |
---|
| 165 | + mlx5e_decompress_cqe(rq, wq, cqcc); |
---|
| 166 | + cqd->title.rss_hash_type = 0; |
---|
| 167 | + cqd->title.rss_hash_result = 0; |
---|
127 | 168 | } |
---|
128 | 169 | |
---|
129 | 170 | static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq, |
---|
130 | | - struct mlx5e_cq *cq, |
---|
| 171 | + struct mlx5_cqwq *wq, |
---|
131 | 172 | int update_owner_only, |
---|
132 | 173 | int budget_rem) |
---|
133 | 174 | { |
---|
134 | | - u32 cqcc = cq->wq.cc + update_owner_only; |
---|
| 175 | + struct mlx5e_cq_decomp *cqd = &rq->cqd; |
---|
| 176 | + u32 cqcc = wq->cc + update_owner_only; |
---|
135 | 177 | u32 cqe_count; |
---|
136 | 178 | u32 i; |
---|
137 | 179 | |
---|
138 | | - cqe_count = min_t(u32, cq->decmprs_left, budget_rem); |
---|
| 180 | + cqe_count = min_t(u32, cqd->left, budget_rem); |
---|
139 | 181 | |
---|
140 | 182 | for (i = update_owner_only; i < cqe_count; |
---|
141 | | - i++, cq->mini_arr_idx++, cqcc++) { |
---|
142 | | - if (cq->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE) |
---|
143 | | - mlx5e_read_mini_arr_slot(cq, cqcc); |
---|
| 183 | + i++, cqd->mini_arr_idx++, cqcc++) { |
---|
| 184 | + if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE) |
---|
| 185 | + mlx5e_read_mini_arr_slot(wq, cqd, cqcc); |
---|
144 | 186 | |
---|
145 | | - mlx5e_decompress_cqe_no_hash(rq, cq, cqcc); |
---|
146 | | - rq->handle_rx_cqe(rq, &cq->title); |
---|
| 187 | + mlx5e_decompress_cqe_no_hash(rq, wq, cqcc); |
---|
| 188 | + INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, |
---|
| 189 | + mlx5e_handle_rx_cqe, rq, &cqd->title); |
---|
147 | 190 | } |
---|
148 | | - mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc); |
---|
149 | | - cq->wq.cc = cqcc; |
---|
150 | | - cq->decmprs_left -= cqe_count; |
---|
| 191 | + mlx5e_cqes_update_owner(wq, cqcc - wq->cc); |
---|
| 192 | + wq->cc = cqcc; |
---|
| 193 | + cqd->left -= cqe_count; |
---|
151 | 194 | rq->stats->cqe_compress_pkts += cqe_count; |
---|
152 | 195 | |
---|
153 | 196 | return cqe_count; |
---|
154 | 197 | } |
---|
155 | 198 | |
---|
156 | 199 | static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq, |
---|
157 | | - struct mlx5e_cq *cq, |
---|
| 200 | + struct mlx5_cqwq *wq, |
---|
158 | 201 | int budget_rem) |
---|
159 | 202 | { |
---|
160 | | - mlx5e_read_title_slot(rq, cq, cq->wq.cc); |
---|
161 | | - mlx5e_read_mini_arr_slot(cq, cq->wq.cc + 1); |
---|
162 | | - mlx5e_decompress_cqe(rq, cq, cq->wq.cc); |
---|
163 | | - rq->handle_rx_cqe(rq, &cq->title); |
---|
164 | | - cq->mini_arr_idx++; |
---|
| 203 | + struct mlx5e_cq_decomp *cqd = &rq->cqd; |
---|
| 204 | + u32 cc = wq->cc; |
---|
165 | 205 | |
---|
166 | | - return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1; |
---|
| 206 | + mlx5e_read_title_slot(rq, wq, cc); |
---|
| 207 | + mlx5e_read_mini_arr_slot(wq, cqd, cc + 1); |
---|
| 208 | + mlx5e_decompress_cqe(rq, wq, cc); |
---|
| 209 | + INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, |
---|
| 210 | + mlx5e_handle_rx_cqe, rq, &cqd->title); |
---|
| 211 | + cqd->mini_arr_idx++; |
---|
| 212 | + |
---|
| 213 | + return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1; |
---|
167 | 214 | } |
---|
168 | 215 | |
---|
169 | 216 | static inline bool mlx5e_page_is_reserved(struct page *page) |
---|
.. | .. |
---|
219 | 266 | return true; |
---|
220 | 267 | } |
---|
221 | 268 | |
---|
222 | | -static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq, |
---|
223 | | - struct mlx5e_dma_info *dma_info) |
---|
| 269 | +static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, |
---|
| 270 | + struct mlx5e_dma_info *dma_info) |
---|
224 | 271 | { |
---|
225 | 272 | if (mlx5e_rx_cache_get(rq, dma_info)) |
---|
226 | 273 | return 0; |
---|
.. | .. |
---|
229 | 276 | if (unlikely(!dma_info->page)) |
---|
230 | 277 | return -ENOMEM; |
---|
231 | 278 | |
---|
232 | | - dma_info->addr = dma_map_page(rq->pdev, dma_info->page, 0, |
---|
233 | | - PAGE_SIZE, rq->buff.map_dir); |
---|
| 279 | + dma_info->addr = dma_map_page_attrs(rq->pdev, dma_info->page, 0, PAGE_SIZE, |
---|
| 280 | + rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC); |
---|
234 | 281 | if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) { |
---|
235 | | - put_page(dma_info->page); |
---|
| 282 | + page_pool_recycle_direct(rq->page_pool, dma_info->page); |
---|
236 | 283 | dma_info->page = NULL; |
---|
237 | 284 | return -ENOMEM; |
---|
238 | 285 | } |
---|
.. | .. |
---|
240 | 287 | return 0; |
---|
241 | 288 | } |
---|
242 | 289 | |
---|
243 | | -void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) |
---|
| 290 | +static inline int mlx5e_page_alloc(struct mlx5e_rq *rq, |
---|
| 291 | + struct mlx5e_dma_info *dma_info) |
---|
244 | 292 | { |
---|
245 | | - dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir); |
---|
| 293 | + if (rq->xsk_pool) |
---|
| 294 | + return mlx5e_xsk_page_alloc_pool(rq, dma_info); |
---|
| 295 | + else |
---|
| 296 | + return mlx5e_page_alloc_pool(rq, dma_info); |
---|
246 | 297 | } |
---|
247 | 298 | |
---|
248 | | -void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, |
---|
249 | | - bool recycle) |
---|
| 299 | +void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) |
---|
| 300 | +{ |
---|
| 301 | + dma_unmap_page_attrs(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir, |
---|
| 302 | + DMA_ATTR_SKIP_CPU_SYNC); |
---|
| 303 | +} |
---|
| 304 | + |
---|
| 305 | +void mlx5e_page_release_dynamic(struct mlx5e_rq *rq, |
---|
| 306 | + struct mlx5e_dma_info *dma_info, |
---|
| 307 | + bool recycle) |
---|
250 | 308 | { |
---|
251 | 309 | if (likely(recycle)) { |
---|
252 | 310 | if (mlx5e_rx_cache_put(rq, dma_info)) |
---|
.. | .. |
---|
256 | 314 | page_pool_recycle_direct(rq->page_pool, dma_info->page); |
---|
257 | 315 | } else { |
---|
258 | 316 | mlx5e_page_dma_unmap(rq, dma_info); |
---|
| 317 | + page_pool_release_page(rq->page_pool, dma_info->page); |
---|
259 | 318 | put_page(dma_info->page); |
---|
260 | 319 | } |
---|
| 320 | +} |
---|
| 321 | + |
---|
| 322 | +static inline void mlx5e_page_release(struct mlx5e_rq *rq, |
---|
| 323 | + struct mlx5e_dma_info *dma_info, |
---|
| 324 | + bool recycle) |
---|
| 325 | +{ |
---|
| 326 | + if (rq->xsk_pool) |
---|
| 327 | + /* The `recycle` parameter is ignored, and the page is always |
---|
| 328 | + * put into the Reuse Ring, because there is no way to return |
---|
| 329 | + * the page to the userspace when the interface goes down. |
---|
| 330 | + */ |
---|
| 331 | + xsk_buff_free(dma_info->xsk); |
---|
| 332 | + else |
---|
| 333 | + mlx5e_page_release_dynamic(rq, dma_info, recycle); |
---|
261 | 334 | } |
---|
262 | 335 | |
---|
263 | 336 | static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq, |
---|
.. | .. |
---|
271 | 344 | * offset) should just use the new one without replenishing again |
---|
272 | 345 | * by themselves. |
---|
273 | 346 | */ |
---|
274 | | - err = mlx5e_page_alloc_mapped(rq, frag->di); |
---|
| 347 | + err = mlx5e_page_alloc(rq, frag->di); |
---|
275 | 348 | |
---|
276 | 349 | return err; |
---|
277 | 350 | } |
---|
.. | .. |
---|
324 | 397 | mlx5e_put_rx_frag(rq, wi, recycle); |
---|
325 | 398 | } |
---|
326 | 399 | |
---|
327 | | -void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) |
---|
| 400 | +static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) |
---|
328 | 401 | { |
---|
329 | 402 | struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix); |
---|
330 | 403 | |
---|
.. | .. |
---|
336 | 409 | struct mlx5_wq_cyc *wq = &rq->wqe.wq; |
---|
337 | 410 | int err; |
---|
338 | 411 | int i; |
---|
| 412 | + |
---|
| 413 | + if (rq->xsk_pool) { |
---|
| 414 | + int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags; |
---|
| 415 | + |
---|
| 416 | + /* Check in advance that we have enough frames, instead of |
---|
| 417 | + * allocating one-by-one, failing and moving frames to the |
---|
| 418 | + * Reuse Ring. |
---|
| 419 | + */ |
---|
| 420 | + if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, pages_desired))) |
---|
| 421 | + return -ENOMEM; |
---|
| 422 | + } |
---|
339 | 423 | |
---|
340 | 424 | for (i = 0; i < wqe_bulk; i++) { |
---|
341 | 425 | struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i); |
---|
.. | .. |
---|
370 | 454 | static inline void |
---|
371 | 455 | mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb, |
---|
372 | 456 | struct mlx5e_dma_info *dma_info, |
---|
373 | | - int offset_from, int offset_to, u32 headlen) |
---|
| 457 | + int offset_from, u32 headlen) |
---|
374 | 458 | { |
---|
375 | 459 | const void *from = page_address(dma_info->page) + offset_from; |
---|
376 | 460 | /* Aligning len to sizeof(long) optimizes memcpy performance */ |
---|
.. | .. |
---|
378 | 462 | |
---|
379 | 463 | dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len, |
---|
380 | 464 | DMA_FROM_DEVICE); |
---|
381 | | - skb_copy_to_linear_data_offset(skb, offset_to, from, len); |
---|
382 | | -} |
---|
383 | | - |
---|
384 | | -static inline void |
---|
385 | | -mlx5e_copy_skb_header_mpwqe(struct device *pdev, |
---|
386 | | - struct sk_buff *skb, |
---|
387 | | - struct mlx5e_dma_info *dma_info, |
---|
388 | | - u32 offset, u32 headlen) |
---|
389 | | -{ |
---|
390 | | - u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset); |
---|
391 | | - |
---|
392 | | - mlx5e_copy_skb_header(pdev, skb, dma_info, offset, 0, headlen_pg); |
---|
393 | | - |
---|
394 | | - if (unlikely(offset + headlen > PAGE_SIZE)) { |
---|
395 | | - dma_info++; |
---|
396 | | - mlx5e_copy_skb_header(pdev, skb, dma_info, 0, headlen_pg, |
---|
397 | | - headlen - headlen_pg); |
---|
398 | | - } |
---|
| 465 | + skb_copy_to_linear_data(skb, from, len); |
---|
399 | 466 | } |
---|
400 | 467 | |
---|
401 | 468 | static void |
---|
402 | 469 | mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle) |
---|
403 | 470 | { |
---|
404 | | - const bool no_xdp_xmit = |
---|
405 | | - bitmap_empty(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); |
---|
| 471 | + bool no_xdp_xmit; |
---|
406 | 472 | struct mlx5e_dma_info *dma_info = wi->umr.dma_info; |
---|
407 | 473 | int i; |
---|
| 474 | + |
---|
| 475 | + /* A common case for AF_XDP. */ |
---|
| 476 | + if (bitmap_full(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE)) |
---|
| 477 | + return; |
---|
| 478 | + |
---|
| 479 | + no_xdp_xmit = bitmap_empty(wi->xdp_xmit_bitmap, |
---|
| 480 | + MLX5_MPWRQ_PAGES_PER_WQE); |
---|
408 | 481 | |
---|
409 | 482 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) |
---|
410 | 483 | if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) |
---|
411 | 484 | mlx5e_page_release(rq, &dma_info[i], recycle); |
---|
412 | 485 | } |
---|
413 | 486 | |
---|
414 | | -static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) |
---|
| 487 | +static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n) |
---|
415 | 488 | { |
---|
416 | 489 | struct mlx5_wq_ll *wq = &rq->mpwqe.wq; |
---|
417 | | - struct mlx5e_rx_wqe_ll *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); |
---|
418 | 490 | |
---|
419 | | - rq->mpwqe.umr_in_progress = false; |
---|
| 491 | + do { |
---|
| 492 | + u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head); |
---|
420 | 493 | |
---|
421 | | - mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); |
---|
| 494 | + mlx5_wq_ll_push(wq, next_wqe_index); |
---|
| 495 | + } while (--n); |
---|
422 | 496 | |
---|
423 | 497 | /* ensure wqes are visible to device before updating doorbell record */ |
---|
424 | 498 | dma_wmb(); |
---|
425 | 499 | |
---|
426 | 500 | mlx5_wq_ll_update_db_record(wq); |
---|
427 | | -} |
---|
428 | | - |
---|
429 | | -static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq) |
---|
430 | | -{ |
---|
431 | | - return sq->pc >> MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE; |
---|
432 | | -} |
---|
433 | | - |
---|
434 | | -static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq, |
---|
435 | | - struct mlx5_wq_cyc *wq, |
---|
436 | | - u16 pi, u16 nnops) |
---|
437 | | -{ |
---|
438 | | - struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi]; |
---|
439 | | - |
---|
440 | | - edge_wi = wi + nnops; |
---|
441 | | - |
---|
442 | | - /* fill sq frag edge with nops to avoid wqe wrapping two pages */ |
---|
443 | | - for (; wi < edge_wi; wi++) { |
---|
444 | | - wi->opcode = MLX5_OPCODE_NOP; |
---|
445 | | - mlx5e_post_nop(wq, sq->sqn, &sq->pc); |
---|
446 | | - } |
---|
447 | 501 | } |
---|
448 | 502 | |
---|
449 | 503 | static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) |
---|
.. | .. |
---|
453 | 507 | struct mlx5e_icosq *sq = &rq->channel->icosq; |
---|
454 | 508 | struct mlx5_wq_cyc *wq = &sq->wq; |
---|
455 | 509 | struct mlx5e_umr_wqe *umr_wqe; |
---|
456 | | - u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); |
---|
457 | | - u16 pi, contig_wqebbs_room; |
---|
| 510 | + u16 pi; |
---|
458 | 511 | int err; |
---|
459 | 512 | int i; |
---|
460 | 513 | |
---|
461 | | - pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); |
---|
462 | | - contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); |
---|
463 | | - if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) { |
---|
464 | | - mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room); |
---|
465 | | - pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); |
---|
| 514 | + /* Check in advance that we have enough frames, instead of allocating |
---|
| 515 | + * one-by-one, failing and moving frames to the Reuse Ring. |
---|
| 516 | + */ |
---|
| 517 | + if (rq->xsk_pool && |
---|
| 518 | + unlikely(!xsk_buff_can_alloc(rq->xsk_pool, MLX5_MPWRQ_PAGES_PER_WQE))) { |
---|
| 519 | + err = -ENOMEM; |
---|
| 520 | + goto err; |
---|
466 | 521 | } |
---|
467 | 522 | |
---|
| 523 | + pi = mlx5e_icosq_get_next_pi(sq, MLX5E_UMR_WQEBBS); |
---|
468 | 524 | umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); |
---|
469 | | - if (unlikely(mlx5e_icosq_wrap_cnt(sq) < 2)) |
---|
470 | | - memcpy(umr_wqe, &rq->mpwqe.umr_wqe, |
---|
471 | | - offsetof(struct mlx5e_umr_wqe, inline_mtts)); |
---|
| 525 | + memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts)); |
---|
472 | 526 | |
---|
473 | 527 | for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++, dma_info++) { |
---|
474 | | - err = mlx5e_page_alloc_mapped(rq, dma_info); |
---|
| 528 | + err = mlx5e_page_alloc(rq, dma_info); |
---|
475 | 529 | if (unlikely(err)) |
---|
476 | 530 | goto err_unmap; |
---|
477 | 531 | umr_wqe->inline_mtts[i].ptag = cpu_to_be64(dma_info->addr | MLX5_EN_WR); |
---|
.. | .. |
---|
480 | 534 | bitmap_zero(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); |
---|
481 | 535 | wi->consumed_strides = 0; |
---|
482 | 536 | |
---|
483 | | - rq->mpwqe.umr_in_progress = true; |
---|
484 | | - |
---|
485 | 537 | umr_wqe->ctrl.opmod_idx_opcode = |
---|
486 | 538 | cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | |
---|
487 | 539 | MLX5_OPCODE_UMR); |
---|
488 | | - umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset); |
---|
| 540 | + umr_wqe->uctrl.xlt_offset = |
---|
| 541 | + cpu_to_be16(MLX5_ALIGNED_MTTS_OCTW(MLX5E_REQUIRED_MTTS(ix))); |
---|
489 | 542 | |
---|
490 | | - sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; |
---|
| 543 | + sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { |
---|
| 544 | + .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX, |
---|
| 545 | + .num_wqebbs = MLX5E_UMR_WQEBBS, |
---|
| 546 | + .umr.rq = rq, |
---|
| 547 | + }; |
---|
| 548 | + |
---|
491 | 549 | sq->pc += MLX5E_UMR_WQEBBS; |
---|
492 | | - mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &umr_wqe->ctrl); |
---|
| 550 | + |
---|
| 551 | + sq->doorbell_cseg = &umr_wqe->ctrl; |
---|
493 | 552 | |
---|
494 | 553 | return 0; |
---|
495 | 554 | |
---|
.. | .. |
---|
498 | 557 | dma_info--; |
---|
499 | 558 | mlx5e_page_release(rq, dma_info, true); |
---|
500 | 559 | } |
---|
| 560 | + |
---|
| 561 | +err: |
---|
501 | 562 | rq->stats->buff_alloc_err++; |
---|
502 | 563 | |
---|
503 | 564 | return err; |
---|
504 | 565 | } |
---|
505 | 566 | |
---|
506 | | -void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) |
---|
| 567 | +static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) |
---|
507 | 568 | { |
---|
508 | 569 | struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; |
---|
509 | 570 | /* Don't recycle, this function is called on rq/netdev close */ |
---|
510 | 571 | mlx5e_free_rx_mpwqe(rq, wi, false); |
---|
511 | 572 | } |
---|
512 | 573 | |
---|
513 | | -bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) |
---|
| 574 | +INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) |
---|
514 | 575 | { |
---|
515 | 576 | struct mlx5_wq_cyc *wq = &rq->wqe.wq; |
---|
516 | 577 | u8 wqe_bulk; |
---|
.. | .. |
---|
544 | 605 | return !!err; |
---|
545 | 606 | } |
---|
546 | 607 | |
---|
547 | | -static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq, |
---|
548 | | - struct mlx5e_icosq *sq, |
---|
549 | | - struct mlx5e_rq *rq, |
---|
550 | | - struct mlx5_cqe64 *cqe) |
---|
| 608 | +void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq) |
---|
551 | 609 | { |
---|
552 | | - struct mlx5_wq_cyc *wq = &sq->wq; |
---|
553 | | - u16 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); |
---|
554 | | - struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci]; |
---|
| 610 | + u16 sqcc; |
---|
555 | 611 | |
---|
556 | | - mlx5_cqwq_pop(&cq->wq); |
---|
| 612 | + sqcc = sq->cc; |
---|
557 | 613 | |
---|
558 | | - if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) { |
---|
559 | | - netdev_WARN_ONCE(cq->channel->netdev, |
---|
560 | | - "Bad OP in ICOSQ CQE: 0x%x\n", cqe->op_own); |
---|
561 | | - return; |
---|
| 614 | + while (sqcc != sq->pc) { |
---|
| 615 | + struct mlx5e_icosq_wqe_info *wi; |
---|
| 616 | + u16 ci; |
---|
| 617 | + |
---|
| 618 | + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); |
---|
| 619 | + wi = &sq->db.wqe_info[ci]; |
---|
| 620 | + sqcc += wi->num_wqebbs; |
---|
| 621 | +#ifdef CONFIG_MLX5_EN_TLS |
---|
| 622 | + switch (wi->wqe_type) { |
---|
| 623 | + case MLX5E_ICOSQ_WQE_SET_PSV_TLS: |
---|
| 624 | + mlx5e_ktls_handle_ctx_completion(wi); |
---|
| 625 | + break; |
---|
| 626 | + case MLX5E_ICOSQ_WQE_GET_PSV_TLS: |
---|
| 627 | + mlx5e_ktls_handle_get_psv_completion(wi, sq); |
---|
| 628 | + break; |
---|
| 629 | + } |
---|
| 630 | +#endif |
---|
562 | 631 | } |
---|
563 | | - |
---|
564 | | - if (likely(icowi->opcode == MLX5_OPCODE_UMR)) { |
---|
565 | | - mlx5e_post_rx_mpwqe(rq); |
---|
566 | | - return; |
---|
567 | | - } |
---|
568 | | - |
---|
569 | | - if (unlikely(icowi->opcode != MLX5_OPCODE_NOP)) |
---|
570 | | - netdev_WARN_ONCE(cq->channel->netdev, |
---|
571 | | - "Bad OPCODE in ICOSQ WQE info: 0x%x\n", icowi->opcode); |
---|
| 632 | + sq->cc = sqcc; |
---|
572 | 633 | } |
---|
573 | 634 | |
---|
574 | | -static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) |
---|
| 635 | +int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) |
---|
575 | 636 | { |
---|
576 | 637 | struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); |
---|
577 | 638 | struct mlx5_cqe64 *cqe; |
---|
| 639 | + u16 sqcc; |
---|
| 640 | + int i; |
---|
578 | 641 | |
---|
579 | 642 | if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) |
---|
580 | | - return; |
---|
| 643 | + return 0; |
---|
581 | 644 | |
---|
582 | 645 | cqe = mlx5_cqwq_get_cqe(&cq->wq); |
---|
583 | 646 | if (likely(!cqe)) |
---|
584 | | - return; |
---|
| 647 | + return 0; |
---|
585 | 648 | |
---|
586 | | - /* by design, there's only a single cqe */ |
---|
587 | | - mlx5e_poll_ico_single_cqe(cq, sq, rq, cqe); |
---|
| 649 | + /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), |
---|
| 650 | + * otherwise a cq overrun may occur |
---|
| 651 | + */ |
---|
| 652 | + sqcc = sq->cc; |
---|
| 653 | + |
---|
| 654 | + i = 0; |
---|
| 655 | + do { |
---|
| 656 | + u16 wqe_counter; |
---|
| 657 | + bool last_wqe; |
---|
| 658 | + |
---|
| 659 | + mlx5_cqwq_pop(&cq->wq); |
---|
| 660 | + |
---|
| 661 | + wqe_counter = be16_to_cpu(cqe->wqe_counter); |
---|
| 662 | + |
---|
| 663 | + do { |
---|
| 664 | + struct mlx5e_icosq_wqe_info *wi; |
---|
| 665 | + u16 ci; |
---|
| 666 | + |
---|
| 667 | + last_wqe = (sqcc == wqe_counter); |
---|
| 668 | + |
---|
| 669 | + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); |
---|
| 670 | + wi = &sq->db.wqe_info[ci]; |
---|
| 671 | + sqcc += wi->num_wqebbs; |
---|
| 672 | + |
---|
| 673 | + if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { |
---|
| 674 | + netdev_WARN_ONCE(cq->channel->netdev, |
---|
| 675 | + "Bad OP in ICOSQ CQE: 0x%x\n", |
---|
| 676 | + get_cqe_opcode(cqe)); |
---|
| 677 | + mlx5e_dump_error_cqe(&sq->cq, sq->sqn, |
---|
| 678 | + (struct mlx5_err_cqe *)cqe); |
---|
| 679 | + if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) |
---|
| 680 | + queue_work(cq->channel->priv->wq, &sq->recover_work); |
---|
| 681 | + break; |
---|
| 682 | + } |
---|
| 683 | + |
---|
| 684 | + switch (wi->wqe_type) { |
---|
| 685 | + case MLX5E_ICOSQ_WQE_UMR_RX: |
---|
| 686 | + wi->umr.rq->mpwqe.umr_completed++; |
---|
| 687 | + break; |
---|
| 688 | + case MLX5E_ICOSQ_WQE_NOP: |
---|
| 689 | + break; |
---|
| 690 | +#ifdef CONFIG_MLX5_EN_TLS |
---|
| 691 | + case MLX5E_ICOSQ_WQE_UMR_TLS: |
---|
| 692 | + break; |
---|
| 693 | + case MLX5E_ICOSQ_WQE_SET_PSV_TLS: |
---|
| 694 | + mlx5e_ktls_handle_ctx_completion(wi); |
---|
| 695 | + break; |
---|
| 696 | + case MLX5E_ICOSQ_WQE_GET_PSV_TLS: |
---|
| 697 | + mlx5e_ktls_handle_get_psv_completion(wi, sq); |
---|
| 698 | + break; |
---|
| 699 | +#endif |
---|
| 700 | + default: |
---|
| 701 | + netdev_WARN_ONCE(cq->channel->netdev, |
---|
| 702 | + "Bad WQE type in ICOSQ WQE info: 0x%x\n", |
---|
| 703 | + wi->wqe_type); |
---|
| 704 | + } |
---|
| 705 | + } while (!last_wqe); |
---|
| 706 | + } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); |
---|
| 707 | + |
---|
| 708 | + sq->cc = sqcc; |
---|
588 | 709 | |
---|
589 | 710 | mlx5_cqwq_update_db_record(&cq->wq); |
---|
| 711 | + |
---|
| 712 | + return i; |
---|
590 | 713 | } |
---|
591 | 714 | |
---|
592 | | -bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) |
---|
| 715 | +INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq) |
---|
593 | 716 | { |
---|
| 717 | + struct mlx5e_icosq *sq = &rq->channel->icosq; |
---|
594 | 718 | struct mlx5_wq_ll *wq = &rq->mpwqe.wq; |
---|
| 719 | + u8 umr_completed = rq->mpwqe.umr_completed; |
---|
| 720 | + int alloc_err = 0; |
---|
| 721 | + u8 missing, i; |
---|
| 722 | + u16 head; |
---|
595 | 723 | |
---|
596 | 724 | if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) |
---|
597 | 725 | return false; |
---|
598 | 726 | |
---|
599 | | - mlx5e_poll_ico_cq(&rq->channel->icosq.cq, rq); |
---|
| 727 | + if (umr_completed) { |
---|
| 728 | + mlx5e_post_rx_mpwqe(rq, umr_completed); |
---|
| 729 | + rq->mpwqe.umr_in_progress -= umr_completed; |
---|
| 730 | + rq->mpwqe.umr_completed = 0; |
---|
| 731 | + } |
---|
600 | 732 | |
---|
601 | | - if (mlx5_wq_ll_is_full(wq)) |
---|
| 733 | + missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress; |
---|
| 734 | + |
---|
| 735 | + if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk)) |
---|
| 736 | + rq->stats->congst_umr++; |
---|
| 737 | + |
---|
| 738 | +#define UMR_WQE_BULK (2) |
---|
| 739 | + if (likely(missing < UMR_WQE_BULK)) |
---|
602 | 740 | return false; |
---|
603 | 741 | |
---|
604 | | - if (!rq->mpwqe.umr_in_progress) |
---|
605 | | - mlx5e_alloc_rx_mpwqe(rq, wq->head); |
---|
606 | | - else |
---|
607 | | - rq->stats->congst_umr += mlx5_wq_ll_missing(wq) > 2; |
---|
| 742 | + head = rq->mpwqe.actual_wq_head; |
---|
| 743 | + i = missing; |
---|
| 744 | + do { |
---|
| 745 | + alloc_err = mlx5e_alloc_rx_mpwqe(rq, head); |
---|
| 746 | + |
---|
| 747 | + if (unlikely(alloc_err)) |
---|
| 748 | + break; |
---|
| 749 | + head = mlx5_wq_ll_get_wqe_next_ix(wq, head); |
---|
| 750 | + } while (--i); |
---|
| 751 | + |
---|
| 752 | + rq->mpwqe.umr_last_bulk = missing - i; |
---|
| 753 | + if (sq->doorbell_cseg) { |
---|
| 754 | + mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg); |
---|
| 755 | + sq->doorbell_cseg = NULL; |
---|
| 756 | + } |
---|
| 757 | + |
---|
| 758 | + rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk; |
---|
| 759 | + rq->mpwqe.actual_wq_head = head; |
---|
| 760 | + |
---|
| 761 | + /* If XSK Fill Ring doesn't have enough frames, report the error, so |
---|
| 762 | + * that one of the actions can be performed: |
---|
| 763 | + * 1. If need_wakeup is used, signal that the application has to kick |
---|
| 764 | + * the driver when it refills the Fill Ring. |
---|
| 765 | + * 2. Otherwise, busy poll by rescheduling the NAPI poll. |
---|
| 766 | + */ |
---|
| 767 | + if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool)) |
---|
| 768 | + return true; |
---|
608 | 769 | |
---|
609 | 770 | return false; |
---|
610 | 771 | } |
---|
.. | .. |
---|
767 | 928 | } |
---|
768 | 929 | |
---|
769 | 930 | static void |
---|
770 | | -mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto, |
---|
771 | | - struct mlx5e_rq_stats *stats) |
---|
| 931 | +mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto, |
---|
| 932 | + struct mlx5e_rq_stats *stats) |
---|
772 | 933 | { |
---|
773 | 934 | struct ipv6hdr *ip6; |
---|
774 | 935 | struct iphdr *ip4; |
---|
775 | 936 | int pkt_len; |
---|
776 | 937 | |
---|
| 938 | + /* Fixup vlan headers, if any */ |
---|
| 939 | + if (network_depth > ETH_HLEN) |
---|
| 940 | + /* CQE csum is calculated from the IP header and does |
---|
| 941 | + * not cover VLAN headers (if present). This will add |
---|
| 942 | + * the checksum manually. |
---|
| 943 | + */ |
---|
| 944 | + skb->csum = csum_partial(skb->data + ETH_HLEN, |
---|
| 945 | + network_depth - ETH_HLEN, |
---|
| 946 | + skb->csum); |
---|
| 947 | + |
---|
| 948 | + /* Fixup tail padding, if any */ |
---|
777 | 949 | switch (proto) { |
---|
778 | 950 | case htons(ETH_P_IP): |
---|
779 | 951 | ip4 = (struct iphdr *)(skb->data + network_depth); |
---|
.. | .. |
---|
813 | 985 | } |
---|
814 | 986 | |
---|
815 | 987 | /* True when explicitly set via priv flag, or XDP prog is loaded */ |
---|
816 | | - if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)) |
---|
| 988 | + if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) || |
---|
| 989 | + get_cqe_tls_offload(cqe)) |
---|
817 | 990 | goto csum_unnecessary; |
---|
818 | 991 | |
---|
819 | 992 | /* CQE csum doesn't cover padding octets in short ethernet |
---|
.. | .. |
---|
831 | 1004 | if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP)) |
---|
832 | 1005 | goto csum_unnecessary; |
---|
833 | 1006 | |
---|
| 1007 | + stats->csum_complete++; |
---|
834 | 1008 | skb->ip_summed = CHECKSUM_COMPLETE; |
---|
835 | 1009 | skb->csum = csum_unfold((__force __sum16)cqe->check_sum); |
---|
836 | | - if (network_depth > ETH_HLEN) |
---|
837 | | - /* CQE csum is calculated from the IP header and does |
---|
838 | | - * not cover VLAN headers (if present). This will add |
---|
839 | | - * the checksum manually. |
---|
840 | | - */ |
---|
841 | | - skb->csum = csum_partial(skb->data + ETH_HLEN, |
---|
842 | | - network_depth - ETH_HLEN, |
---|
843 | | - skb->csum); |
---|
844 | 1010 | |
---|
845 | | - mlx5e_skb_padding_csum(skb, network_depth, proto, stats); |
---|
846 | | - stats->csum_complete++; |
---|
| 1011 | + if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state)) |
---|
| 1012 | + return; /* CQE csum covers all received bytes */ |
---|
| 1013 | + |
---|
| 1014 | + /* csum might need some fixups ...*/ |
---|
| 1015 | + mlx5e_skb_csum_fixup(skb, network_depth, proto, stats); |
---|
847 | 1016 | return; |
---|
848 | 1017 | } |
---|
849 | 1018 | |
---|
.. | .. |
---|
878 | 1047 | |
---|
879 | 1048 | skb->mac_len = ETH_HLEN; |
---|
880 | 1049 | |
---|
881 | | -#ifdef CONFIG_MLX5_EN_TLS |
---|
882 | | - mlx5e_tls_handle_rx_skb(netdev, skb, &cqe_bcnt); |
---|
883 | | -#endif |
---|
| 1050 | + mlx5e_tls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt); |
---|
| 1051 | + |
---|
| 1052 | + if (unlikely(mlx5_ipsec_is_rx_flow(cqe))) |
---|
| 1053 | + mlx5e_ipsec_offload_handle_rx_skb(netdev, skb, cqe); |
---|
884 | 1054 | |
---|
885 | 1055 | if (lro_num_seg > 1) { |
---|
886 | 1056 | mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); |
---|
.. | .. |
---|
916 | 1086 | mlx5e_enable_ecn(rq, skb); |
---|
917 | 1087 | |
---|
918 | 1088 | skb->protocol = eth_type_trans(skb, netdev); |
---|
| 1089 | + |
---|
| 1090 | + if (unlikely(mlx5e_skb_is_multicast(skb))) |
---|
| 1091 | + stats->mcast_packets++; |
---|
919 | 1092 | } |
---|
920 | 1093 | |
---|
921 | 1094 | static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, |
---|
.. | .. |
---|
948 | 1121 | return skb; |
---|
949 | 1122 | } |
---|
950 | 1123 | |
---|
951 | | -struct sk_buff * |
---|
| 1124 | +static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom, |
---|
| 1125 | + u32 len, struct xdp_buff *xdp) |
---|
| 1126 | +{ |
---|
| 1127 | + xdp->data_hard_start = va; |
---|
| 1128 | + xdp->data = va + headroom; |
---|
| 1129 | + xdp_set_data_meta_invalid(xdp); |
---|
| 1130 | + xdp->data_end = xdp->data + len; |
---|
| 1131 | + xdp->rxq = &rq->xdp_rxq; |
---|
| 1132 | + xdp->frame_sz = rq->buff.frame0_sz; |
---|
| 1133 | +} |
---|
| 1134 | + |
---|
| 1135 | +static struct sk_buff * |
---|
952 | 1136 | mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, |
---|
953 | 1137 | struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) |
---|
954 | 1138 | { |
---|
955 | 1139 | struct mlx5e_dma_info *di = wi->di; |
---|
956 | 1140 | u16 rx_headroom = rq->buff.headroom; |
---|
| 1141 | + struct xdp_buff xdp; |
---|
957 | 1142 | struct sk_buff *skb; |
---|
958 | 1143 | void *va, *data; |
---|
959 | | - bool consumed; |
---|
960 | 1144 | u32 frag_size; |
---|
961 | 1145 | |
---|
962 | 1146 | va = page_address(di->page) + wi->offset; |
---|
.. | .. |
---|
965 | 1149 | |
---|
966 | 1150 | dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset, |
---|
967 | 1151 | frag_size, DMA_FROM_DEVICE); |
---|
968 | | - prefetchw(va); /* xdp_frame data area */ |
---|
969 | | - prefetch(data); |
---|
| 1152 | + net_prefetchw(va); /* xdp_frame data area */ |
---|
| 1153 | + net_prefetch(data); |
---|
970 | 1154 | |
---|
971 | | - if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { |
---|
972 | | - rq->stats->wqe_err++; |
---|
973 | | - return NULL; |
---|
974 | | - } |
---|
975 | | - |
---|
976 | | - rcu_read_lock(); |
---|
977 | | - consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt); |
---|
978 | | - rcu_read_unlock(); |
---|
979 | | - if (consumed) |
---|
| 1155 | + mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp); |
---|
| 1156 | + if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp)) |
---|
980 | 1157 | return NULL; /* page/packet was consumed by XDP */ |
---|
981 | 1158 | |
---|
| 1159 | + rx_headroom = xdp.data - xdp.data_hard_start; |
---|
| 1160 | + frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt); |
---|
982 | 1161 | skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt); |
---|
983 | 1162 | if (unlikely(!skb)) |
---|
984 | 1163 | return NULL; |
---|
.. | .. |
---|
989 | 1168 | return skb; |
---|
990 | 1169 | } |
---|
991 | 1170 | |
---|
992 | | -struct sk_buff * |
---|
| 1171 | +static struct sk_buff * |
---|
993 | 1172 | mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, |
---|
994 | 1173 | struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) |
---|
995 | 1174 | { |
---|
.. | .. |
---|
999 | 1178 | u16 frag_headlen = headlen; |
---|
1000 | 1179 | u16 byte_cnt = cqe_bcnt - headlen; |
---|
1001 | 1180 | struct sk_buff *skb; |
---|
1002 | | - |
---|
1003 | | - if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { |
---|
1004 | | - rq->stats->wqe_err++; |
---|
1005 | | - return NULL; |
---|
1006 | | - } |
---|
1007 | 1181 | |
---|
1008 | 1182 | /* XDP is not supported in this configuration, as incoming packets |
---|
1009 | 1183 | * might spread among multiple pages. |
---|
.. | .. |
---|
1015 | 1189 | return NULL; |
---|
1016 | 1190 | } |
---|
1017 | 1191 | |
---|
1018 | | - prefetchw(skb->data); |
---|
| 1192 | + net_prefetchw(skb->data); |
---|
1019 | 1193 | |
---|
1020 | 1194 | while (byte_cnt) { |
---|
1021 | 1195 | u16 frag_consumed_bytes = |
---|
.. | .. |
---|
1030 | 1204 | } |
---|
1031 | 1205 | |
---|
1032 | 1206 | /* copy header */ |
---|
1033 | | - mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, |
---|
1034 | | - 0, headlen); |
---|
| 1207 | + mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, headlen); |
---|
1035 | 1208 | /* skb linear part was allocated with headlen and aligned to long */ |
---|
1036 | 1209 | skb->tail += headlen; |
---|
1037 | 1210 | skb->len += headlen; |
---|
.. | .. |
---|
1039 | 1212 | return skb; |
---|
1040 | 1213 | } |
---|
1041 | 1214 | |
---|
1042 | | -void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
---|
| 1215 | +static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
---|
| 1216 | +{ |
---|
| 1217 | + struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe; |
---|
| 1218 | + |
---|
| 1219 | + if (cqe_syndrome_needs_recover(err_cqe->syndrome) && |
---|
| 1220 | + !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) { |
---|
| 1221 | + mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe); |
---|
| 1222 | + queue_work(rq->channel->priv->wq, &rq->recover_work); |
---|
| 1223 | + } |
---|
| 1224 | +} |
---|
| 1225 | + |
---|
| 1226 | +static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
---|
1043 | 1227 | { |
---|
1044 | 1228 | struct mlx5_wq_cyc *wq = &rq->wqe.wq; |
---|
1045 | 1229 | struct mlx5e_wqe_frag_info *wi; |
---|
.. | .. |
---|
1051 | 1235 | wi = get_frag(rq, ci); |
---|
1052 | 1236 | cqe_bcnt = be32_to_cpu(cqe->byte_cnt); |
---|
1053 | 1237 | |
---|
1054 | | - skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); |
---|
| 1238 | + if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { |
---|
| 1239 | + trigger_report(rq, cqe); |
---|
| 1240 | + rq->stats->wqe_err++; |
---|
| 1241 | + goto free_wqe; |
---|
| 1242 | + } |
---|
| 1243 | + |
---|
| 1244 | + skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, |
---|
| 1245 | + mlx5e_skb_from_cqe_linear, |
---|
| 1246 | + mlx5e_skb_from_cqe_nonlinear, |
---|
| 1247 | + rq, cqe, wi, cqe_bcnt); |
---|
1055 | 1248 | if (!skb) { |
---|
1056 | 1249 | /* probably for XDP */ |
---|
1057 | 1250 | if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { |
---|
.. | .. |
---|
1064 | 1257 | } |
---|
1065 | 1258 | |
---|
1066 | 1259 | mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); |
---|
| 1260 | + |
---|
| 1261 | + if (mlx5e_cqe_regb_chain(cqe)) |
---|
| 1262 | + if (!mlx5e_tc_update_skb(cqe, skb)) { |
---|
| 1263 | + dev_kfree_skb_any(skb); |
---|
| 1264 | + goto free_wqe; |
---|
| 1265 | + } |
---|
| 1266 | + |
---|
1067 | 1267 | napi_gro_receive(rq->cq.napi, skb); |
---|
1068 | 1268 | |
---|
1069 | 1269 | free_wqe: |
---|
.. | .. |
---|
1073 | 1273 | } |
---|
1074 | 1274 | |
---|
1075 | 1275 | #ifdef CONFIG_MLX5_ESWITCH |
---|
1076 | | -void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
---|
| 1276 | +static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
---|
1077 | 1277 | { |
---|
1078 | 1278 | struct net_device *netdev = rq->netdev; |
---|
1079 | 1279 | struct mlx5e_priv *priv = netdev_priv(netdev); |
---|
1080 | 1280 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
---|
1081 | 1281 | struct mlx5_eswitch_rep *rep = rpriv->rep; |
---|
| 1282 | + struct mlx5e_tc_update_priv tc_priv = {}; |
---|
1082 | 1283 | struct mlx5_wq_cyc *wq = &rq->wqe.wq; |
---|
1083 | 1284 | struct mlx5e_wqe_frag_info *wi; |
---|
1084 | 1285 | struct sk_buff *skb; |
---|
.. | .. |
---|
1089 | 1290 | wi = get_frag(rq, ci); |
---|
1090 | 1291 | cqe_bcnt = be32_to_cpu(cqe->byte_cnt); |
---|
1091 | 1292 | |
---|
1092 | | - skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); |
---|
| 1293 | + if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { |
---|
| 1294 | + rq->stats->wqe_err++; |
---|
| 1295 | + goto free_wqe; |
---|
| 1296 | + } |
---|
| 1297 | + |
---|
| 1298 | + skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, |
---|
| 1299 | + mlx5e_skb_from_cqe_linear, |
---|
| 1300 | + mlx5e_skb_from_cqe_nonlinear, |
---|
| 1301 | + rq, cqe, wi, cqe_bcnt); |
---|
1093 | 1302 | if (!skb) { |
---|
1094 | 1303 | /* probably for XDP */ |
---|
1095 | 1304 | if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { |
---|
.. | .. |
---|
1106 | 1315 | if (rep->vlan && skb_vlan_tag_present(skb)) |
---|
1107 | 1316 | skb_vlan_pop(skb); |
---|
1108 | 1317 | |
---|
| 1318 | + if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) && |
---|
| 1319 | + !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) { |
---|
| 1320 | + dev_kfree_skb_any(skb); |
---|
| 1321 | + goto free_wqe; |
---|
| 1322 | + } |
---|
| 1323 | + |
---|
1109 | 1324 | napi_gro_receive(rq->cq.napi, skb); |
---|
| 1325 | + |
---|
| 1326 | + mlx5_rep_tc_post_napi_receive(&tc_priv); |
---|
1110 | 1327 | |
---|
1111 | 1328 | free_wqe: |
---|
1112 | 1329 | mlx5e_free_rx_wqe(rq, wi, true); |
---|
1113 | 1330 | wq_cyc_pop: |
---|
1114 | 1331 | mlx5_wq_cyc_pop(wq); |
---|
1115 | 1332 | } |
---|
| 1333 | + |
---|
| 1334 | +static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
---|
| 1335 | +{ |
---|
| 1336 | + u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); |
---|
| 1337 | + u16 wqe_id = be16_to_cpu(cqe->wqe_id); |
---|
| 1338 | + struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id]; |
---|
| 1339 | + u16 stride_ix = mpwrq_get_cqe_stride_index(cqe); |
---|
| 1340 | + u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz; |
---|
| 1341 | + u32 head_offset = wqe_offset & (PAGE_SIZE - 1); |
---|
| 1342 | + u32 page_idx = wqe_offset >> PAGE_SHIFT; |
---|
| 1343 | + struct mlx5e_tc_update_priv tc_priv = {}; |
---|
| 1344 | + struct mlx5e_rx_wqe_ll *wqe; |
---|
| 1345 | + struct mlx5_wq_ll *wq; |
---|
| 1346 | + struct sk_buff *skb; |
---|
| 1347 | + u16 cqe_bcnt; |
---|
| 1348 | + |
---|
| 1349 | + wi->consumed_strides += cstrides; |
---|
| 1350 | + |
---|
| 1351 | + if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { |
---|
| 1352 | + trigger_report(rq, cqe); |
---|
| 1353 | + rq->stats->wqe_err++; |
---|
| 1354 | + goto mpwrq_cqe_out; |
---|
| 1355 | + } |
---|
| 1356 | + |
---|
| 1357 | + if (unlikely(mpwrq_is_filler_cqe(cqe))) { |
---|
| 1358 | + struct mlx5e_rq_stats *stats = rq->stats; |
---|
| 1359 | + |
---|
| 1360 | + stats->mpwqe_filler_cqes++; |
---|
| 1361 | + stats->mpwqe_filler_strides += cstrides; |
---|
| 1362 | + goto mpwrq_cqe_out; |
---|
| 1363 | + } |
---|
| 1364 | + |
---|
| 1365 | + cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); |
---|
| 1366 | + |
---|
| 1367 | + skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq, |
---|
| 1368 | + mlx5e_skb_from_cqe_mpwrq_linear, |
---|
| 1369 | + mlx5e_skb_from_cqe_mpwrq_nonlinear, |
---|
| 1370 | + rq, wi, cqe_bcnt, head_offset, page_idx); |
---|
| 1371 | + if (!skb) |
---|
| 1372 | + goto mpwrq_cqe_out; |
---|
| 1373 | + |
---|
| 1374 | + mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); |
---|
| 1375 | + |
---|
| 1376 | + if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) && |
---|
| 1377 | + !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) { |
---|
| 1378 | + dev_kfree_skb_any(skb); |
---|
| 1379 | + goto mpwrq_cqe_out; |
---|
| 1380 | + } |
---|
| 1381 | + |
---|
| 1382 | + napi_gro_receive(rq->cq.napi, skb); |
---|
| 1383 | + |
---|
| 1384 | + mlx5_rep_tc_post_napi_receive(&tc_priv); |
---|
| 1385 | + |
---|
| 1386 | +mpwrq_cqe_out: |
---|
| 1387 | + if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) |
---|
| 1388 | + return; |
---|
| 1389 | + |
---|
| 1390 | + wq = &rq->mpwqe.wq; |
---|
| 1391 | + wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); |
---|
| 1392 | + mlx5e_free_rx_mpwqe(rq, wi, true); |
---|
| 1393 | + mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); |
---|
| 1394 | +} |
---|
| 1395 | + |
---|
| 1396 | +const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = { |
---|
| 1397 | + .handle_rx_cqe = mlx5e_handle_rx_cqe_rep, |
---|
| 1398 | + .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep, |
---|
| 1399 | +}; |
---|
1116 | 1400 | #endif |
---|
1117 | 1401 | |
---|
1118 | | -struct sk_buff * |
---|
| 1402 | +static struct sk_buff * |
---|
1119 | 1403 | mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, |
---|
1120 | 1404 | u16 cqe_bcnt, u32 head_offset, u32 page_idx) |
---|
1121 | 1405 | { |
---|
.. | .. |
---|
1133 | 1417 | return NULL; |
---|
1134 | 1418 | } |
---|
1135 | 1419 | |
---|
1136 | | - prefetchw(skb->data); |
---|
| 1420 | + net_prefetchw(skb->data); |
---|
1137 | 1421 | |
---|
1138 | 1422 | if (unlikely(frag_offset >= PAGE_SIZE)) { |
---|
1139 | 1423 | di++; |
---|
.. | .. |
---|
1153 | 1437 | di++; |
---|
1154 | 1438 | } |
---|
1155 | 1439 | /* copy header */ |
---|
1156 | | - mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, head_di, |
---|
1157 | | - head_offset, headlen); |
---|
| 1440 | + mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, headlen); |
---|
1158 | 1441 | /* skb linear part was allocated with headlen and aligned to long */ |
---|
1159 | 1442 | skb->tail += headlen; |
---|
1160 | 1443 | skb->len += headlen; |
---|
.. | .. |
---|
1162 | 1445 | return skb; |
---|
1163 | 1446 | } |
---|
1164 | 1447 | |
---|
1165 | | -struct sk_buff * |
---|
| 1448 | +static struct sk_buff * |
---|
1166 | 1449 | mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, |
---|
1167 | 1450 | u16 cqe_bcnt, u32 head_offset, u32 page_idx) |
---|
1168 | 1451 | { |
---|
1169 | 1452 | struct mlx5e_dma_info *di = &wi->umr.dma_info[page_idx]; |
---|
1170 | 1453 | u16 rx_headroom = rq->buff.headroom; |
---|
1171 | 1454 | u32 cqe_bcnt32 = cqe_bcnt; |
---|
| 1455 | + struct xdp_buff xdp; |
---|
1172 | 1456 | struct sk_buff *skb; |
---|
1173 | 1457 | void *va, *data; |
---|
1174 | 1458 | u32 frag_size; |
---|
1175 | | - bool consumed; |
---|
1176 | 1459 | |
---|
1177 | 1460 | /* Check packet size. Note LRO doesn't use linear SKB */ |
---|
1178 | 1461 | if (unlikely(cqe_bcnt > rq->hw_mtu)) { |
---|
.. | .. |
---|
1186 | 1469 | |
---|
1187 | 1470 | dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset, |
---|
1188 | 1471 | frag_size, DMA_FROM_DEVICE); |
---|
1189 | | - prefetchw(va); /* xdp_frame data area */ |
---|
1190 | | - prefetch(data); |
---|
| 1472 | + net_prefetchw(va); /* xdp_frame data area */ |
---|
| 1473 | + net_prefetch(data); |
---|
1191 | 1474 | |
---|
1192 | | - rcu_read_lock(); |
---|
1193 | | - consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt32); |
---|
1194 | | - rcu_read_unlock(); |
---|
1195 | | - if (consumed) { |
---|
| 1475 | + mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp); |
---|
| 1476 | + if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) { |
---|
1196 | 1477 | if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) |
---|
1197 | 1478 | __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ |
---|
1198 | 1479 | return NULL; /* page/packet was consumed by XDP */ |
---|
1199 | 1480 | } |
---|
1200 | 1481 | |
---|
| 1482 | + rx_headroom = xdp.data - xdp.data_hard_start; |
---|
| 1483 | + frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); |
---|
1201 | 1484 | skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32); |
---|
1202 | 1485 | if (unlikely(!skb)) |
---|
1203 | 1486 | return NULL; |
---|
.. | .. |
---|
1208 | 1491 | return skb; |
---|
1209 | 1492 | } |
---|
1210 | 1493 | |
---|
1211 | | -void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
---|
| 1494 | +static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
---|
1212 | 1495 | { |
---|
1213 | 1496 | u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); |
---|
1214 | 1497 | u16 wqe_id = be16_to_cpu(cqe->wqe_id); |
---|
.. | .. |
---|
1224 | 1507 | |
---|
1225 | 1508 | wi->consumed_strides += cstrides; |
---|
1226 | 1509 | |
---|
1227 | | - if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { |
---|
| 1510 | + if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { |
---|
| 1511 | + trigger_report(rq, cqe); |
---|
1228 | 1512 | rq->stats->wqe_err++; |
---|
1229 | 1513 | goto mpwrq_cqe_out; |
---|
1230 | 1514 | } |
---|
.. | .. |
---|
1239 | 1523 | |
---|
1240 | 1524 | cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); |
---|
1241 | 1525 | |
---|
1242 | | - skb = rq->mpwqe.skb_from_cqe_mpwrq(rq, wi, cqe_bcnt, head_offset, |
---|
1243 | | - page_idx); |
---|
| 1526 | + skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq, |
---|
| 1527 | + mlx5e_skb_from_cqe_mpwrq_linear, |
---|
| 1528 | + mlx5e_skb_from_cqe_mpwrq_nonlinear, |
---|
| 1529 | + rq, wi, cqe_bcnt, head_offset, page_idx); |
---|
1244 | 1530 | if (!skb) |
---|
1245 | 1531 | goto mpwrq_cqe_out; |
---|
1246 | 1532 | |
---|
1247 | 1533 | mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); |
---|
| 1534 | + |
---|
| 1535 | + if (mlx5e_cqe_regb_chain(cqe)) |
---|
| 1536 | + if (!mlx5e_tc_update_skb(cqe, skb)) { |
---|
| 1537 | + dev_kfree_skb_any(skb); |
---|
| 1538 | + goto mpwrq_cqe_out; |
---|
| 1539 | + } |
---|
| 1540 | + |
---|
1248 | 1541 | napi_gro_receive(rq->cq.napi, skb); |
---|
1249 | 1542 | |
---|
1250 | 1543 | mpwrq_cqe_out: |
---|
.. | .. |
---|
1260 | 1553 | int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) |
---|
1261 | 1554 | { |
---|
1262 | 1555 | struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); |
---|
1263 | | - struct mlx5e_xdpsq *xdpsq = &rq->xdpsq; |
---|
| 1556 | + struct mlx5_cqwq *cqwq = &cq->wq; |
---|
1264 | 1557 | struct mlx5_cqe64 *cqe; |
---|
1265 | 1558 | int work_done = 0; |
---|
1266 | 1559 | |
---|
1267 | 1560 | if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) |
---|
1268 | 1561 | return 0; |
---|
1269 | 1562 | |
---|
1270 | | - if (cq->decmprs_left) { |
---|
1271 | | - work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget); |
---|
1272 | | - if (cq->decmprs_left || work_done >= budget) |
---|
| 1563 | + if (rq->page_pool) |
---|
| 1564 | + page_pool_nid_changed(rq->page_pool, numa_mem_id()); |
---|
| 1565 | + |
---|
| 1566 | + if (rq->cqd.left) { |
---|
| 1567 | + work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget); |
---|
| 1568 | + if (rq->cqd.left || work_done >= budget) |
---|
1273 | 1569 | goto out; |
---|
1274 | 1570 | } |
---|
1275 | 1571 | |
---|
1276 | | - cqe = mlx5_cqwq_get_cqe(&cq->wq); |
---|
| 1572 | + cqe = mlx5_cqwq_get_cqe(cqwq); |
---|
1277 | 1573 | if (!cqe) { |
---|
1278 | 1574 | if (unlikely(work_done)) |
---|
1279 | 1575 | goto out; |
---|
.. | .. |
---|
1283 | 1579 | do { |
---|
1284 | 1580 | if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) { |
---|
1285 | 1581 | work_done += |
---|
1286 | | - mlx5e_decompress_cqes_start(rq, cq, |
---|
| 1582 | + mlx5e_decompress_cqes_start(rq, cqwq, |
---|
1287 | 1583 | budget - work_done); |
---|
1288 | 1584 | continue; |
---|
1289 | 1585 | } |
---|
1290 | 1586 | |
---|
1291 | | - mlx5_cqwq_pop(&cq->wq); |
---|
| 1587 | + mlx5_cqwq_pop(cqwq); |
---|
1292 | 1588 | |
---|
1293 | | - rq->handle_rx_cqe(rq, cqe); |
---|
1294 | | - } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); |
---|
| 1589 | + INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq, |
---|
| 1590 | + mlx5e_handle_rx_cqe, rq, cqe); |
---|
| 1591 | + } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq))); |
---|
1295 | 1592 | |
---|
1296 | 1593 | out: |
---|
1297 | | - if (xdpsq->doorbell) { |
---|
1298 | | - mlx5e_xmit_xdp_doorbell(xdpsq); |
---|
1299 | | - xdpsq->doorbell = false; |
---|
1300 | | - } |
---|
| 1594 | + if (rcu_access_pointer(rq->xdp_prog)) |
---|
| 1595 | + mlx5e_xdp_rx_poll_complete(rq); |
---|
1301 | 1596 | |
---|
1302 | | - if (xdpsq->redirect_flush) { |
---|
1303 | | - xdp_do_flush_map(); |
---|
1304 | | - xdpsq->redirect_flush = false; |
---|
1305 | | - } |
---|
1306 | | - |
---|
1307 | | - mlx5_cqwq_update_db_record(&cq->wq); |
---|
| 1597 | + mlx5_cqwq_update_db_record(cqwq); |
---|
1308 | 1598 | |
---|
1309 | 1599 | /* ensure cq space is freed before enabling more cqes */ |
---|
1310 | 1600 | wmb(); |
---|
.. | .. |
---|
1323 | 1613 | u32 cqe_bcnt, |
---|
1324 | 1614 | struct sk_buff *skb) |
---|
1325 | 1615 | { |
---|
1326 | | - struct mlx5e_rq_stats *stats = rq->stats; |
---|
1327 | 1616 | struct hwtstamp_config *tstamp; |
---|
| 1617 | + struct mlx5e_rq_stats *stats; |
---|
1328 | 1618 | struct net_device *netdev; |
---|
1329 | 1619 | struct mlx5e_priv *priv; |
---|
1330 | 1620 | char *pseudo_header; |
---|
.. | .. |
---|
1348 | 1638 | |
---|
1349 | 1639 | priv = mlx5i_epriv(netdev); |
---|
1350 | 1640 | tstamp = &priv->tstamp; |
---|
| 1641 | + stats = &priv->channel_stats[rq->ix].rq; |
---|
1351 | 1642 | |
---|
1352 | 1643 | flags_rqpn = be32_to_cpu(cqe->flags_rqpn); |
---|
1353 | 1644 | g = (flags_rqpn >> 28) & 3; |
---|
.. | .. |
---|
1403 | 1694 | stats->bytes += cqe_bcnt; |
---|
1404 | 1695 | } |
---|
1405 | 1696 | |
---|
1406 | | -void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
---|
| 1697 | +static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
---|
1407 | 1698 | { |
---|
1408 | 1699 | struct mlx5_wq_cyc *wq = &rq->wqe.wq; |
---|
1409 | 1700 | struct mlx5e_wqe_frag_info *wi; |
---|
.. | .. |
---|
1415 | 1706 | wi = get_frag(rq, ci); |
---|
1416 | 1707 | cqe_bcnt = be32_to_cpu(cqe->byte_cnt); |
---|
1417 | 1708 | |
---|
1418 | | - skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); |
---|
| 1709 | + if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { |
---|
| 1710 | + rq->stats->wqe_err++; |
---|
| 1711 | + goto wq_free_wqe; |
---|
| 1712 | + } |
---|
| 1713 | + |
---|
| 1714 | + skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, |
---|
| 1715 | + mlx5e_skb_from_cqe_linear, |
---|
| 1716 | + mlx5e_skb_from_cqe_nonlinear, |
---|
| 1717 | + rq, cqe, wi, cqe_bcnt); |
---|
1419 | 1718 | if (!skb) |
---|
1420 | 1719 | goto wq_free_wqe; |
---|
1421 | 1720 | |
---|
.. | .. |
---|
1431 | 1730 | mlx5_wq_cyc_pop(wq); |
---|
1432 | 1731 | } |
---|
1433 | 1732 | |
---|
| 1733 | +const struct mlx5e_rx_handlers mlx5i_rx_handlers = { |
---|
| 1734 | + .handle_rx_cqe = mlx5i_handle_rx_cqe, |
---|
| 1735 | + .handle_rx_cqe_mpwqe = NULL, /* Not supported */ |
---|
| 1736 | +}; |
---|
1434 | 1737 | #endif /* CONFIG_MLX5_CORE_IPOIB */ |
---|
1435 | 1738 | |
---|
1436 | 1739 | #ifdef CONFIG_MLX5_EN_IPSEC |
---|
1437 | 1740 | |
---|
1438 | | -void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
---|
| 1741 | +static void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) |
---|
1439 | 1742 | { |
---|
1440 | 1743 | struct mlx5_wq_cyc *wq = &rq->wqe.wq; |
---|
1441 | 1744 | struct mlx5e_wqe_frag_info *wi; |
---|
.. | .. |
---|
1447 | 1750 | wi = get_frag(rq, ci); |
---|
1448 | 1751 | cqe_bcnt = be32_to_cpu(cqe->byte_cnt); |
---|
1449 | 1752 | |
---|
1450 | | - skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); |
---|
1451 | | - if (unlikely(!skb)) { |
---|
1452 | | - /* a DROP, save the page-reuse checks */ |
---|
1453 | | - mlx5e_free_rx_wqe(rq, wi, true); |
---|
1454 | | - goto wq_cyc_pop; |
---|
| 1753 | + if (unlikely(MLX5E_RX_ERR_CQE(cqe))) { |
---|
| 1754 | + rq->stats->wqe_err++; |
---|
| 1755 | + goto wq_free_wqe; |
---|
1455 | 1756 | } |
---|
| 1757 | + |
---|
| 1758 | + skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe, |
---|
| 1759 | + mlx5e_skb_from_cqe_linear, |
---|
| 1760 | + mlx5e_skb_from_cqe_nonlinear, |
---|
| 1761 | + rq, cqe, wi, cqe_bcnt); |
---|
| 1762 | + if (unlikely(!skb)) /* a DROP, save the page-reuse checks */ |
---|
| 1763 | + goto wq_free_wqe; |
---|
| 1764 | + |
---|
1456 | 1765 | skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt); |
---|
1457 | | - if (unlikely(!skb)) { |
---|
1458 | | - mlx5e_free_rx_wqe(rq, wi, true); |
---|
1459 | | - goto wq_cyc_pop; |
---|
1460 | | - } |
---|
| 1766 | + if (unlikely(!skb)) |
---|
| 1767 | + goto wq_free_wqe; |
---|
1461 | 1768 | |
---|
1462 | 1769 | mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); |
---|
1463 | 1770 | napi_gro_receive(rq->cq.napi, skb); |
---|
1464 | 1771 | |
---|
| 1772 | +wq_free_wqe: |
---|
1465 | 1773 | mlx5e_free_rx_wqe(rq, wi, true); |
---|
1466 | | -wq_cyc_pop: |
---|
1467 | 1774 | mlx5_wq_cyc_pop(wq); |
---|
1468 | 1775 | } |
---|
1469 | 1776 | |
---|
1470 | 1777 | #endif /* CONFIG_MLX5_EN_IPSEC */ |
---|
| 1778 | + |
---|
| 1779 | +int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk) |
---|
| 1780 | +{ |
---|
| 1781 | + struct mlx5_core_dev *mdev = rq->mdev; |
---|
| 1782 | + struct mlx5e_channel *c = rq->channel; |
---|
| 1783 | + |
---|
| 1784 | + switch (rq->wq_type) { |
---|
| 1785 | + case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
---|
| 1786 | + rq->mpwqe.skb_from_cqe_mpwrq = xsk ? |
---|
| 1787 | + mlx5e_xsk_skb_from_cqe_mpwrq_linear : |
---|
| 1788 | + mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ? |
---|
| 1789 | + mlx5e_skb_from_cqe_mpwrq_linear : |
---|
| 1790 | + mlx5e_skb_from_cqe_mpwrq_nonlinear; |
---|
| 1791 | + rq->post_wqes = mlx5e_post_rx_mpwqes; |
---|
| 1792 | + rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; |
---|
| 1793 | + |
---|
| 1794 | + rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe_mpwqe; |
---|
| 1795 | +#ifdef CONFIG_MLX5_EN_IPSEC |
---|
| 1796 | + if (MLX5_IPSEC_DEV(mdev)) { |
---|
| 1797 | + netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n"); |
---|
| 1798 | + return -EINVAL; |
---|
| 1799 | + } |
---|
| 1800 | +#endif |
---|
| 1801 | + if (!rq->handle_rx_cqe) { |
---|
| 1802 | + netdev_err(c->netdev, "RX handler of MPWQE RQ is not set\n"); |
---|
| 1803 | + return -EINVAL; |
---|
| 1804 | + } |
---|
| 1805 | + break; |
---|
| 1806 | + default: /* MLX5_WQ_TYPE_CYCLIC */ |
---|
| 1807 | + rq->wqe.skb_from_cqe = xsk ? |
---|
| 1808 | + mlx5e_xsk_skb_from_cqe_linear : |
---|
| 1809 | + mlx5e_rx_is_linear_skb(params, NULL) ? |
---|
| 1810 | + mlx5e_skb_from_cqe_linear : |
---|
| 1811 | + mlx5e_skb_from_cqe_nonlinear; |
---|
| 1812 | + rq->post_wqes = mlx5e_post_rx_wqes; |
---|
| 1813 | + rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; |
---|
| 1814 | + |
---|
| 1815 | +#ifdef CONFIG_MLX5_EN_IPSEC |
---|
| 1816 | + if ((mlx5_fpga_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_DEVICE) && |
---|
| 1817 | + c->priv->ipsec) |
---|
| 1818 | + rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe; |
---|
| 1819 | + else |
---|
| 1820 | +#endif |
---|
| 1821 | + rq->handle_rx_cqe = c->priv->profile->rx_handlers->handle_rx_cqe; |
---|
| 1822 | + if (!rq->handle_rx_cqe) { |
---|
| 1823 | + netdev_err(c->netdev, "RX handler of RQ is not set\n"); |
---|
| 1824 | + return -EINVAL; |
---|
| 1825 | + } |
---|
| 1826 | + } |
---|
| 1827 | + |
---|
| 1828 | + return 0; |
---|
| 1829 | +} |
---|