forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/net/ethernet/amazon/ena/ena_eth_com.c
....@@ -1,38 +1,11 @@
1
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
12 /*
2
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
3
- *
4
- * This software is available to you under a choice of one of two
5
- * licenses. You may choose to be licensed under the terms of the GNU
6
- * General Public License (GPL) Version 2, available from the file
7
- * COPYING in the main directory of this source tree, or the
8
- * BSD license below:
9
- *
10
- * Redistribution and use in source and binary forms, with or
11
- * without modification, are permitted provided that the following
12
- * conditions are met:
13
- *
14
- * - Redistributions of source code must retain the above
15
- * copyright notice, this list of conditions and the following
16
- * disclaimer.
17
- *
18
- * - Redistributions in binary form must reproduce the above
19
- * copyright notice, this list of conditions and the following
20
- * disclaimer in the documentation and/or other materials
21
- * provided with the distribution.
22
- *
23
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30
- * SOFTWARE.
3
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
314 */
325
336 #include "ena_eth_com.h"
347
35
-static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
8
+static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
369 struct ena_com_io_cq *io_cq)
3710 {
3811 struct ena_eth_io_rx_cdesc_base *cdesc;
....@@ -45,8 +18,9 @@
4518 cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
4619 + (head_masked * io_cq->cdesc_entry_size_in_bytes));
4720
48
- desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
49
- ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
21
+ desc_phase = (READ_ONCE(cdesc->status) &
22
+ ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
23
+ ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
5024
5125 if (desc_phase != expected_phase)
5226 return NULL;
....@@ -59,16 +33,7 @@
5933 return cdesc;
6034 }
6135
62
-static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
63
-{
64
- io_cq->head++;
65
-
66
- /* Switch phase bit in case of wrap around */
67
- if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
68
- io_cq->phase ^= 1;
69
-}
70
-
71
-static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
36
+static void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
7237 {
7338 u16 tail_masked;
7439 u32 offset;
....@@ -80,50 +45,175 @@
8045 return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
8146 }
8247
83
-static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq *io_sq)
48
+static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
49
+ u8 *bounce_buffer)
8450 {
85
- u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
86
- u32 offset = tail_masked * io_sq->desc_entry_size;
51
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
8752
88
- /* In case this queue isn't a LLQ */
89
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
90
- return;
53
+ u16 dst_tail_mask;
54
+ u32 dst_offset;
9155
92
- memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
93
- io_sq->desc_addr.virt_addr + offset,
94
- io_sq->desc_entry_size);
95
-}
56
+ dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
57
+ dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
9658
97
-static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
98
-{
59
+ if (is_llq_max_tx_burst_exists(io_sq)) {
60
+ if (unlikely(!io_sq->entries_in_tx_burst_left)) {
61
+ pr_err("Error: trying to send more packets than tx burst allows\n");
62
+ return -ENOSPC;
63
+ }
64
+
65
+ io_sq->entries_in_tx_burst_left--;
66
+ pr_debug("Decreasing entries_in_tx_burst_left of queue %d to %d\n",
67
+ io_sq->qid, io_sq->entries_in_tx_burst_left);
68
+ }
69
+
70
+ /* Make sure everything was written into the bounce buffer before
71
+ * writing the bounce buffer to the device
72
+ */
73
+ wmb();
74
+
75
+ /* The line is completed. Copy it to dev */
76
+ __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
77
+ bounce_buffer, (llq_info->desc_list_entry_size) / 8);
78
+
9979 io_sq->tail++;
10080
10181 /* Switch phase bit in case of wrap around */
10282 if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
10383 io_sq->phase ^= 1;
104
-}
105
-
106
-static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,
107
- u8 *head_src, u16 header_len)
108
-{
109
- u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
110
- u8 __iomem *dev_head_addr =
111
- io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size);
112
-
113
- if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
114
- return 0;
115
-
116
- if (unlikely(!io_sq->header_addr)) {
117
- pr_err("Push buffer header ptr is NULL\n");
118
- return -EINVAL;
119
- }
120
-
121
- memcpy_toio(dev_head_addr, head_src, header_len);
12284
12385 return 0;
12486 }
12587
126
-static inline struct ena_eth_io_rx_cdesc_base *
88
+static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
89
+ u8 *header_src,
90
+ u16 header_len)
91
+{
92
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
93
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
94
+ u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
95
+ u16 header_offset;
96
+
97
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
98
+ return 0;
99
+
100
+ header_offset =
101
+ llq_info->descs_num_before_header * io_sq->desc_entry_size;
102
+
103
+ if (unlikely((header_offset + header_len) >
104
+ llq_info->desc_list_entry_size)) {
105
+ pr_err("Trying to write header larger than llq entry can accommodate\n");
106
+ return -EFAULT;
107
+ }
108
+
109
+ if (unlikely(!bounce_buffer)) {
110
+ pr_err("Bounce buffer is NULL\n");
111
+ return -EFAULT;
112
+ }
113
+
114
+ memcpy(bounce_buffer + header_offset, header_src, header_len);
115
+
116
+ return 0;
117
+}
118
+
119
+static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
120
+{
121
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
122
+ u8 *bounce_buffer;
123
+ void *sq_desc;
124
+
125
+ bounce_buffer = pkt_ctrl->curr_bounce_buf;
126
+
127
+ if (unlikely(!bounce_buffer)) {
128
+ pr_err("Bounce buffer is NULL\n");
129
+ return NULL;
130
+ }
131
+
132
+ sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
133
+ pkt_ctrl->idx++;
134
+ pkt_ctrl->descs_left_in_line--;
135
+
136
+ return sq_desc;
137
+}
138
+
139
+static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
140
+{
141
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
142
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
143
+ int rc;
144
+
145
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
146
+ return 0;
147
+
148
+ /* bounce buffer was used, so write it and get a new one */
149
+ if (pkt_ctrl->idx) {
150
+ rc = ena_com_write_bounce_buffer_to_dev(io_sq,
151
+ pkt_ctrl->curr_bounce_buf);
152
+ if (unlikely(rc))
153
+ return rc;
154
+
155
+ pkt_ctrl->curr_bounce_buf =
156
+ ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
157
+ memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
158
+ 0x0, llq_info->desc_list_entry_size);
159
+ }
160
+
161
+ pkt_ctrl->idx = 0;
162
+ pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
163
+ return 0;
164
+}
165
+
166
+static void *get_sq_desc(struct ena_com_io_sq *io_sq)
167
+{
168
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
169
+ return get_sq_desc_llq(io_sq);
170
+
171
+ return get_sq_desc_regular_queue(io_sq);
172
+}
173
+
174
+static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
175
+{
176
+ struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
177
+ struct ena_com_llq_info *llq_info = &io_sq->llq_info;
178
+ int rc;
179
+
180
+ if (!pkt_ctrl->descs_left_in_line) {
181
+ rc = ena_com_write_bounce_buffer_to_dev(io_sq,
182
+ pkt_ctrl->curr_bounce_buf);
183
+ if (unlikely(rc))
184
+ return rc;
185
+
186
+ pkt_ctrl->curr_bounce_buf =
187
+ ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
188
+ memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
189
+ 0x0, llq_info->desc_list_entry_size);
190
+
191
+ pkt_ctrl->idx = 0;
192
+ if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
193
+ pkt_ctrl->descs_left_in_line = 1;
194
+ else
195
+ pkt_ctrl->descs_left_in_line =
196
+ llq_info->desc_list_entry_size / io_sq->desc_entry_size;
197
+ }
198
+
199
+ return 0;
200
+}
201
+
202
+static int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
203
+{
204
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
205
+ return ena_com_sq_update_llq_tail(io_sq);
206
+
207
+ io_sq->tail++;
208
+
209
+ /* Switch phase bit in case of wrap around */
210
+ if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
211
+ io_sq->phase ^= 1;
212
+
213
+ return 0;
214
+}
215
+
216
+static struct ena_eth_io_rx_cdesc_base *
127217 ena_com_rx_cdesc_idx_to_ptr(struct ena_com_io_cq *io_cq, u16 idx)
128218 {
129219 idx &= (io_cq->q_depth - 1);
....@@ -132,7 +222,7 @@
132222 idx * io_cq->cdesc_entry_size_in_bytes);
133223 }
134224
135
-static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
225
+static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
136226 u16 *first_cdesc_idx)
137227 {
138228 struct ena_eth_io_rx_cdesc_base *cdesc;
....@@ -146,8 +236,9 @@
146236
147237 ena_com_cq_inc_head(io_cq);
148238 count++;
149
- last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
150
- ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
239
+ last = (READ_ONCE(cdesc->status) &
240
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
241
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
151242 } while (!last);
152243
153244 if (last) {
....@@ -159,7 +250,7 @@
159250 io_cq->cur_rx_pkt_cdesc_count = 0;
160251 io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
161252
162
- pr_debug("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
253
+ pr_debug("ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
163254 io_cq->qid, *first_cdesc_idx, count);
164255 } else {
165256 io_cq->cur_rx_pkt_cdesc_count += count;
....@@ -169,30 +260,15 @@
169260 return count;
170261 }
171262
172
-static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
173
- struct ena_com_tx_ctx *ena_tx_ctx)
174
-{
175
- int rc;
176
-
177
- if (ena_tx_ctx->meta_valid) {
178
- rc = memcmp(&io_sq->cached_tx_meta,
179
- &ena_tx_ctx->ena_meta,
180
- sizeof(struct ena_com_tx_meta));
181
-
182
- if (unlikely(rc != 0))
183
- return true;
184
- }
185
-
186
- return false;
187
-}
188
-
189
-static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
190
- struct ena_com_tx_ctx *ena_tx_ctx)
263
+static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
264
+ struct ena_com_tx_meta *ena_meta)
191265 {
192266 struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
193
- struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
194267
195268 meta_desc = get_sq_desc(io_sq);
269
+ if (unlikely(!meta_desc))
270
+ return -EFAULT;
271
+
196272 memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
197273
198274 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
....@@ -200,7 +276,7 @@
200276 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
201277
202278 /* bits 0-9 of the mss */
203
- meta_desc->word2 |= (ena_meta->mss <<
279
+ meta_desc->word2 |= ((u32)ena_meta->mss <<
204280 ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
205281 ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
206282 /* bits 10-13 of the mss */
....@@ -210,34 +286,57 @@
210286
211287 /* Extended meta desc */
212288 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
213
- meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
214
- meta_desc->len_ctrl |= (io_sq->phase <<
289
+ meta_desc->len_ctrl |= ((u32)io_sq->phase <<
215290 ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
216291 ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
217292
218293 meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_FIRST_MASK;
294
+ meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
295
+
219296 meta_desc->word2 |= ena_meta->l3_hdr_len &
220297 ENA_ETH_IO_TX_META_DESC_L3_HDR_LEN_MASK;
221298 meta_desc->word2 |= (ena_meta->l3_hdr_offset <<
222299 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
223300 ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
224301
225
- meta_desc->word2 |= (ena_meta->l4_hdr_len <<
302
+ meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
226303 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
227304 ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
228305
229
- meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_STORE_MASK;
230
-
231
- /* Cached the meta desc */
232
- memcpy(&io_sq->cached_tx_meta, ena_meta,
233
- sizeof(struct ena_com_tx_meta));
234
-
235
- ena_com_copy_curr_sq_desc_to_dev(io_sq);
236
- ena_com_sq_update_tail(io_sq);
306
+ return ena_com_sq_update_tail(io_sq);
237307 }
238308
239
-static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
240
- struct ena_eth_io_rx_cdesc_base *cdesc)
309
+static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
310
+ struct ena_com_tx_ctx *ena_tx_ctx,
311
+ bool *have_meta)
312
+{
313
+ struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
314
+
315
+ /* When disable meta caching is set, don't bother to save the meta and
316
+ * compare it to the stored version, just create the meta
317
+ */
318
+ if (io_sq->disable_meta_caching) {
319
+ if (unlikely(!ena_tx_ctx->meta_valid))
320
+ return -EINVAL;
321
+
322
+ *have_meta = true;
323
+ return ena_com_create_meta(io_sq, ena_meta);
324
+ }
325
+
326
+ if (ena_com_meta_desc_changed(io_sq, ena_tx_ctx)) {
327
+ *have_meta = true;
328
+ /* Cache the meta desc */
329
+ memcpy(&io_sq->cached_tx_meta, ena_meta,
330
+ sizeof(struct ena_com_tx_meta));
331
+ return ena_com_create_meta(io_sq, ena_meta);
332
+ }
333
+
334
+ *have_meta = false;
335
+ return 0;
336
+}
337
+
338
+static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
339
+ struct ena_eth_io_rx_cdesc_base *cdesc)
241340 {
242341 ena_rx_ctx->l3_proto = cdesc->status &
243342 ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
....@@ -250,12 +349,15 @@
250349 ena_rx_ctx->l4_csum_err =
251350 !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_MASK) >>
252351 ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_ERR_SHIFT);
352
+ ena_rx_ctx->l4_csum_checked =
353
+ !!((cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_MASK) >>
354
+ ENA_ETH_IO_RX_CDESC_BASE_L4_CSUM_CHECKED_SHIFT);
253355 ena_rx_ctx->hash = cdesc->hash;
254356 ena_rx_ctx->frag =
255357 (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
256358 ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
257359
258
- pr_debug("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
360
+ pr_debug("l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
259361 ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
260362 ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
261363 ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
....@@ -271,60 +373,68 @@
271373 {
272374 struct ena_eth_io_tx_desc *desc = NULL;
273375 struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
274
- void *push_header = ena_tx_ctx->push_header;
376
+ void *buffer_to_push = ena_tx_ctx->push_header;
275377 u16 header_len = ena_tx_ctx->header_len;
276378 u16 num_bufs = ena_tx_ctx->num_bufs;
277
- int total_desc, i, rc;
379
+ u16 start_tail = io_sq->tail;
380
+ int i, rc;
278381 bool have_meta;
279382 u64 addr_hi;
280383
281384 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
282385
283386 /* num_bufs +1 for potential meta desc */
284
- if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
285
- pr_err("Not enough space in the tx queue\n");
387
+ if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
388
+ pr_debug("Not enough space in the tx queue\n");
286389 return -ENOMEM;
287390 }
288391
289392 if (unlikely(header_len > io_sq->tx_max_header_size)) {
290
- pr_err("header size is too large %d max header: %d\n",
393
+ pr_err("Header size is too large %d max header: %d\n",
291394 header_len, io_sq->tx_max_header_size);
292395 return -EINVAL;
293396 }
294397
295
- /* start with pushing the header (if needed) */
296
- rc = ena_com_write_header(io_sq, push_header, header_len);
398
+ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
399
+ !buffer_to_push))
400
+ return -EINVAL;
401
+
402
+ rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
297403 if (unlikely(rc))
298404 return rc;
299405
300
- have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
301
- ena_tx_ctx);
302
- if (have_meta)
303
- ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
406
+ rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
407
+ if (unlikely(rc)) {
408
+ pr_err("Failed to create and store tx meta desc\n");
409
+ return rc;
410
+ }
304411
305
- /* If the caller doesn't want send packets */
412
+ /* If the caller doesn't want to send packets */
306413 if (unlikely(!num_bufs && !header_len)) {
307
- *nb_hw_desc = have_meta ? 0 : 1;
308
- return 0;
414
+ rc = ena_com_close_bounce_buffer(io_sq);
415
+ *nb_hw_desc = io_sq->tail - start_tail;
416
+ return rc;
309417 }
310418
311419 desc = get_sq_desc(io_sq);
420
+ if (unlikely(!desc))
421
+ return -EFAULT;
312422 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
313423
314424 /* Set first desc when we don't have meta descriptor */
315425 if (!have_meta)
316426 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
317427
318
- desc->buff_addr_hi_hdr_sz |= (header_len <<
428
+ desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
319429 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
320430 ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
321
- desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
431
+ desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
322432 ENA_ETH_IO_TX_DESC_PHASE_MASK;
323433
324434 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
325435
326436 /* Bits 0-9 */
327
- desc->meta_ctrl |= (ena_tx_ctx->req_id <<
437
+ desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
328438 ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
329439 ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
330440
....@@ -360,13 +470,17 @@
360470 for (i = 0; i < num_bufs; i++) {
361471 /* The first desc share the same desc as the header */
362472 if (likely(i != 0)) {
363
- ena_com_copy_curr_sq_desc_to_dev(io_sq);
364
- ena_com_sq_update_tail(io_sq);
473
+ rc = ena_com_sq_update_tail(io_sq);
474
+ if (unlikely(rc))
475
+ return rc;
365476
366477 desc = get_sq_desc(io_sq);
478
+ if (unlikely(!desc))
479
+ return -EFAULT;
480
+
367481 memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
368482
369
- desc->len_ctrl |= (io_sq->phase <<
483
+ desc->len_ctrl |= ((u32)io_sq->phase <<
370484 ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
371485 ENA_ETH_IO_TX_DESC_PHASE_MASK;
372486 }
....@@ -386,15 +500,14 @@
386500 /* set the last desc indicator */
387501 desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
388502
389
- ena_com_copy_curr_sq_desc_to_dev(io_sq);
503
+ rc = ena_com_sq_update_tail(io_sq);
504
+ if (unlikely(rc))
505
+ return rc;
390506
391
- ena_com_sq_update_tail(io_sq);
507
+ rc = ena_com_close_bounce_buffer(io_sq);
392508
393
- total_desc = max_t(u16, num_bufs, 1);
394
- total_desc += have_meta ? 1 : 0;
395
-
396
- *nb_hw_desc = total_desc;
397
- return 0;
509
+ *nb_hw_desc = io_sq->tail - start_tail;
510
+ return rc;
398511 }
399512
400513 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
....@@ -403,9 +516,10 @@
403516 {
404517 struct ena_com_rx_buf_info *ena_buf = &ena_rx_ctx->ena_bufs[0];
405518 struct ena_eth_io_rx_cdesc_base *cdesc = NULL;
519
+ u16 q_depth = io_cq->q_depth;
406520 u16 cdesc_idx = 0;
407521 u16 nb_hw_desc;
408
- u16 i;
522
+ u16 i = 0;
409523
410524 WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
411525
....@@ -415,7 +529,7 @@
415529 return 0;
416530 }
417531
418
- pr_debug("fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
532
+ pr_debug("Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
419533 nb_hw_desc);
420534
421535 if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
....@@ -424,13 +538,21 @@
424538 return -ENOSPC;
425539 }
426540
427
- for (i = 0; i < nb_hw_desc; i++) {
541
+ cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx);
542
+ ena_rx_ctx->pkt_offset = cdesc->offset;
543
+
544
+ do {
545
+ ena_buf[i].len = cdesc->length;
546
+ ena_buf[i].req_id = cdesc->req_id;
547
+ if (unlikely(ena_buf[i].req_id >= q_depth))
548
+ return -EIO;
549
+
550
+ if (++i >= nb_hw_desc)
551
+ break;
552
+
428553 cdesc = ena_com_rx_cdesc_idx_to_ptr(io_cq, cdesc_idx + i);
429554
430
- ena_buf->len = cdesc->length;
431
- ena_buf->req_id = cdesc->req_id;
432
- ena_buf++;
433
- }
555
+ } while (1);
434556
435557 /* Update SQ head ptr */
436558 io_sq->next_to_comp += nb_hw_desc;
....@@ -453,18 +575,21 @@
453575
454576 WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
455577
456
- if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
578
+ if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
457579 return -ENOSPC;
458580
459581 desc = get_sq_desc(io_sq);
582
+ if (unlikely(!desc))
583
+ return -EFAULT;
584
+
460585 memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
461586
462587 desc->length = ena_buf->len;
463588
464
- desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
465
- desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
466
- desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
467
- desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
589
+ desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
590
+ ENA_ETH_IO_RX_DESC_LAST_MASK |
591
+ (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |
592
+ ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
468593
469594 desc->req_id = req_id;
470595
....@@ -472,43 +597,7 @@
472597 desc->buff_addr_hi =
473598 ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) >> 32);
474599
475
- ena_com_sq_update_tail(io_sq);
476
-
477
- return 0;
478
-}
479
-
480
-int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
481
-{
482
- u8 expected_phase, cdesc_phase;
483
- struct ena_eth_io_tx_cdesc *cdesc;
484
- u16 masked_head;
485
-
486
- masked_head = io_cq->head & (io_cq->q_depth - 1);
487
- expected_phase = io_cq->phase;
488
-
489
- cdesc = (struct ena_eth_io_tx_cdesc *)
490
- ((uintptr_t)io_cq->cdesc_addr.virt_addr +
491
- (masked_head * io_cq->cdesc_entry_size_in_bytes));
492
-
493
- /* When the current completion descriptor phase isn't the same as the
494
- * expected, it mean that the device still didn't update
495
- * this completion.
496
- */
497
- cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
498
- if (cdesc_phase != expected_phase)
499
- return -EAGAIN;
500
-
501
- dma_rmb();
502
- if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
503
- pr_err("Invalid req id %d\n", cdesc->req_id);
504
- return -EINVAL;
505
- }
506
-
507
- ena_com_cq_inc_head(io_cq);
508
-
509
- *req_id = READ_ONCE(cdesc->req_id);
510
-
511
- return 0;
600
+ return ena_com_sq_update_tail(io_sq);
512601 }
513602
514603 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)