.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
---|
1 | 2 | /* |
---|
2 | | - * Copyright 2015 Amazon.com, Inc. or its affiliates. |
---|
3 | | - * |
---|
4 | | - * This software is available to you under a choice of one of two |
---|
5 | | - * licenses. You may choose to be licensed under the terms of the GNU |
---|
6 | | - * General Public License (GPL) Version 2, available from the file |
---|
7 | | - * COPYING in the main directory of this source tree, or the |
---|
8 | | - * BSD license below: |
---|
9 | | - * |
---|
10 | | - * Redistribution and use in source and binary forms, with or |
---|
11 | | - * without modification, are permitted provided that the following |
---|
12 | | - * conditions are met: |
---|
13 | | - * |
---|
14 | | - * - Redistributions of source code must retain the above |
---|
15 | | - * copyright notice, this list of conditions and the following |
---|
16 | | - * disclaimer. |
---|
17 | | - * |
---|
18 | | - * - Redistributions in binary form must reproduce the above |
---|
19 | | - * copyright notice, this list of conditions and the following |
---|
20 | | - * disclaimer in the documentation and/or other materials |
---|
21 | | - * provided with the distribution. |
---|
22 | | - * |
---|
23 | | - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
---|
24 | | - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
---|
25 | | - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
---|
26 | | - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
---|
27 | | - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
---|
28 | | - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
---|
29 | | - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
---|
30 | | - * SOFTWARE. |
---|
| 3 | + * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved. |
---|
31 | 4 | */ |
---|
32 | 5 | |
---|
33 | 6 | #include "ena_com.h" |
---|
.. | .. |
---|
41 | 14 | #define ENA_ASYNC_QUEUE_DEPTH 16 |
---|
42 | 15 | #define ENA_ADMIN_QUEUE_DEPTH 32 |
---|
43 | 16 | |
---|
44 | | -#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \ |
---|
45 | | - ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \ |
---|
46 | | - | (ENA_COMMON_SPEC_VERSION_MINOR)) |
---|
47 | 17 | |
---|
48 | 18 | #define ENA_CTRL_MAJOR 0 |
---|
49 | 19 | #define ENA_CTRL_MINOR 0 |
---|
.. | .. |
---|
61 | 31 | |
---|
62 | 32 | #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF |
---|
63 | 33 | |
---|
| 34 | +#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4 |
---|
| 35 | + |
---|
64 | 36 | #define ENA_REGS_ADMIN_INTR_MASK 1 |
---|
65 | 37 | |
---|
66 | | -#define ENA_POLL_MS 5 |
---|
| 38 | +#define ENA_MAX_BACKOFF_DELAY_EXP 16U |
---|
| 39 | + |
---|
| 40 | +#define ENA_MIN_ADMIN_POLL_US 100 |
---|
| 41 | + |
---|
| 42 | +#define ENA_MAX_ADMIN_POLL_US 5000 |
---|
67 | 43 | |
---|
68 | 44 | /*****************************************************************************/ |
---|
69 | 45 | /*****************************************************************************/ |
---|
.. | .. |
---|
92 | 68 | struct ena_admin_acq_get_stats_resp get_resp; |
---|
93 | 69 | }; |
---|
94 | 70 | |
---|
95 | | -static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, |
---|
| 71 | +static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, |
---|
96 | 72 | struct ena_common_mem_addr *ena_addr, |
---|
97 | 73 | dma_addr_t addr) |
---|
98 | 74 | { |
---|
99 | 75 | if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) { |
---|
100 | | - pr_err("dma address has more bits that the device supports\n"); |
---|
| 76 | + pr_err("DMA address has more bits that the device supports\n"); |
---|
101 | 77 | return -EINVAL; |
---|
102 | 78 | } |
---|
103 | 79 | |
---|
.. | .. |
---|
107 | 83 | return 0; |
---|
108 | 84 | } |
---|
109 | 85 | |
---|
110 | | -static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue) |
---|
| 86 | +static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue) |
---|
111 | 87 | { |
---|
112 | | - struct ena_com_admin_sq *sq = &queue->sq; |
---|
113 | | - u16 size = ADMIN_SQ_SIZE(queue->q_depth); |
---|
| 88 | + struct ena_com_admin_sq *sq = &admin_queue->sq; |
---|
| 89 | + u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth); |
---|
114 | 90 | |
---|
115 | | - sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr, |
---|
116 | | - GFP_KERNEL); |
---|
| 91 | + sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, |
---|
| 92 | + &sq->dma_addr, GFP_KERNEL); |
---|
117 | 93 | |
---|
118 | 94 | if (!sq->entries) { |
---|
119 | | - pr_err("memory allocation failed"); |
---|
| 95 | + pr_err("Memory allocation failed\n"); |
---|
120 | 96 | return -ENOMEM; |
---|
121 | 97 | } |
---|
122 | 98 | |
---|
.. | .. |
---|
129 | 105 | return 0; |
---|
130 | 106 | } |
---|
131 | 107 | |
---|
132 | | -static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue) |
---|
| 108 | +static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue) |
---|
133 | 109 | { |
---|
134 | | - struct ena_com_admin_cq *cq = &queue->cq; |
---|
135 | | - u16 size = ADMIN_CQ_SIZE(queue->q_depth); |
---|
| 110 | + struct ena_com_admin_cq *cq = &admin_queue->cq; |
---|
| 111 | + u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth); |
---|
136 | 112 | |
---|
137 | | - cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr, |
---|
138 | | - GFP_KERNEL); |
---|
| 113 | + cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, |
---|
| 114 | + &cq->dma_addr, GFP_KERNEL); |
---|
139 | 115 | |
---|
140 | 116 | if (!cq->entries) { |
---|
141 | | - pr_err("memory allocation failed"); |
---|
| 117 | + pr_err("Memory allocation failed\n"); |
---|
142 | 118 | return -ENOMEM; |
---|
143 | 119 | } |
---|
144 | 120 | |
---|
.. | .. |
---|
148 | 124 | return 0; |
---|
149 | 125 | } |
---|
150 | 126 | |
---|
151 | | -static int ena_com_admin_init_aenq(struct ena_com_dev *dev, |
---|
| 127 | +static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev, |
---|
152 | 128 | struct ena_aenq_handlers *aenq_handlers) |
---|
153 | 129 | { |
---|
154 | | - struct ena_com_aenq *aenq = &dev->aenq; |
---|
| 130 | + struct ena_com_aenq *aenq = &ena_dev->aenq; |
---|
155 | 131 | u32 addr_low, addr_high, aenq_caps; |
---|
156 | 132 | u16 size; |
---|
157 | 133 | |
---|
158 | | - dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; |
---|
| 134 | + ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; |
---|
159 | 135 | size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); |
---|
160 | | - aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr, |
---|
161 | | - GFP_KERNEL); |
---|
| 136 | + aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, |
---|
| 137 | + &aenq->dma_addr, GFP_KERNEL); |
---|
162 | 138 | |
---|
163 | 139 | if (!aenq->entries) { |
---|
164 | | - pr_err("memory allocation failed"); |
---|
| 140 | + pr_err("Memory allocation failed\n"); |
---|
165 | 141 | return -ENOMEM; |
---|
166 | 142 | } |
---|
167 | 143 | |
---|
.. | .. |
---|
171 | 147 | addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr); |
---|
172 | 148 | addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr); |
---|
173 | 149 | |
---|
174 | | - writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); |
---|
175 | | - writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); |
---|
| 150 | + writel(addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF); |
---|
| 151 | + writel(addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF); |
---|
176 | 152 | |
---|
177 | 153 | aenq_caps = 0; |
---|
178 | | - aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; |
---|
| 154 | + aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; |
---|
179 | 155 | aenq_caps |= (sizeof(struct ena_admin_aenq_entry) |
---|
180 | 156 | << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & |
---|
181 | 157 | ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; |
---|
182 | | - writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); |
---|
| 158 | + writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); |
---|
183 | 159 | |
---|
184 | 160 | if (unlikely(!aenq_handlers)) { |
---|
185 | | - pr_err("aenq handlers pointer is NULL\n"); |
---|
| 161 | + pr_err("AENQ handlers pointer is NULL\n"); |
---|
186 | 162 | return -EINVAL; |
---|
187 | 163 | } |
---|
188 | 164 | |
---|
.. | .. |
---|
191 | 167 | return 0; |
---|
192 | 168 | } |
---|
193 | 169 | |
---|
194 | | -static inline void comp_ctxt_release(struct ena_com_admin_queue *queue, |
---|
| 170 | +static void comp_ctxt_release(struct ena_com_admin_queue *queue, |
---|
195 | 171 | struct ena_comp_ctx *comp_ctx) |
---|
196 | 172 | { |
---|
197 | 173 | comp_ctx->occupied = false; |
---|
198 | 174 | atomic_dec(&queue->outstanding_cmds); |
---|
199 | 175 | } |
---|
200 | 176 | |
---|
201 | | -static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue, |
---|
| 177 | +static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue, |
---|
202 | 178 | u16 command_id, bool capture) |
---|
203 | 179 | { |
---|
204 | | - if (unlikely(!queue->comp_ctx)) { |
---|
| 180 | + if (unlikely(command_id >= admin_queue->q_depth)) { |
---|
| 181 | + pr_err("Command id is larger than the queue size. cmd_id: %u queue size %d\n", |
---|
| 182 | + command_id, admin_queue->q_depth); |
---|
| 183 | + return NULL; |
---|
| 184 | + } |
---|
| 185 | + |
---|
| 186 | + if (unlikely(!admin_queue->comp_ctx)) { |
---|
205 | 187 | pr_err("Completion context is NULL\n"); |
---|
206 | 188 | return NULL; |
---|
207 | 189 | } |
---|
208 | 190 | |
---|
209 | | - if (unlikely(command_id >= queue->q_depth)) { |
---|
210 | | - pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n", |
---|
211 | | - command_id, queue->q_depth); |
---|
212 | | - return NULL; |
---|
213 | | - } |
---|
214 | | - |
---|
215 | | - if (unlikely(queue->comp_ctx[command_id].occupied && capture)) { |
---|
| 191 | + if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) { |
---|
216 | 192 | pr_err("Completion context is occupied\n"); |
---|
217 | 193 | return NULL; |
---|
218 | 194 | } |
---|
219 | 195 | |
---|
220 | 196 | if (capture) { |
---|
221 | | - atomic_inc(&queue->outstanding_cmds); |
---|
222 | | - queue->comp_ctx[command_id].occupied = true; |
---|
| 197 | + atomic_inc(&admin_queue->outstanding_cmds); |
---|
| 198 | + admin_queue->comp_ctx[command_id].occupied = true; |
---|
223 | 199 | } |
---|
224 | 200 | |
---|
225 | | - return &queue->comp_ctx[command_id]; |
---|
| 201 | + return &admin_queue->comp_ctx[command_id]; |
---|
226 | 202 | } |
---|
227 | 203 | |
---|
228 | 204 | static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue, |
---|
.. | .. |
---|
241 | 217 | tail_masked = admin_queue->sq.tail & queue_size_mask; |
---|
242 | 218 | |
---|
243 | 219 | /* In case of queue FULL */ |
---|
244 | | - cnt = atomic_read(&admin_queue->outstanding_cmds); |
---|
| 220 | + cnt = (u16)atomic_read(&admin_queue->outstanding_cmds); |
---|
245 | 221 | if (cnt >= admin_queue->q_depth) { |
---|
246 | | - pr_debug("admin queue is full.\n"); |
---|
| 222 | + pr_debug("Admin queue is full.\n"); |
---|
247 | 223 | admin_queue->stats.out_of_space++; |
---|
248 | 224 | return ERR_PTR(-ENOSPC); |
---|
249 | 225 | } |
---|
.. | .. |
---|
283 | 259 | return comp_ctx; |
---|
284 | 260 | } |
---|
285 | 261 | |
---|
286 | | -static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue) |
---|
| 262 | +static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue) |
---|
287 | 263 | { |
---|
288 | | - size_t size = queue->q_depth * sizeof(struct ena_comp_ctx); |
---|
| 264 | + size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx); |
---|
289 | 265 | struct ena_comp_ctx *comp_ctx; |
---|
290 | 266 | u16 i; |
---|
291 | 267 | |
---|
292 | | - queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL); |
---|
293 | | - if (unlikely(!queue->comp_ctx)) { |
---|
294 | | - pr_err("memory allocation failed"); |
---|
| 268 | + admin_queue->comp_ctx = |
---|
| 269 | + devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL); |
---|
| 270 | + if (unlikely(!admin_queue->comp_ctx)) { |
---|
| 271 | + pr_err("Memory allocation failed\n"); |
---|
295 | 272 | return -ENOMEM; |
---|
296 | 273 | } |
---|
297 | 274 | |
---|
298 | | - for (i = 0; i < queue->q_depth; i++) { |
---|
299 | | - comp_ctx = get_comp_ctxt(queue, i, false); |
---|
| 275 | + for (i = 0; i < admin_queue->q_depth; i++) { |
---|
| 276 | + comp_ctx = get_comp_ctxt(admin_queue, i, false); |
---|
300 | 277 | if (comp_ctx) |
---|
301 | 278 | init_completion(&comp_ctx->wait_event); |
---|
302 | 279 | } |
---|
.. | .. |
---|
310 | 287 | struct ena_admin_acq_entry *comp, |
---|
311 | 288 | size_t comp_size_in_bytes) |
---|
312 | 289 | { |
---|
313 | | - unsigned long flags; |
---|
| 290 | + unsigned long flags = 0; |
---|
314 | 291 | struct ena_comp_ctx *comp_ctx; |
---|
315 | 292 | |
---|
316 | 293 | spin_lock_irqsave(&admin_queue->q_lock, flags); |
---|
.. | .. |
---|
338 | 315 | |
---|
339 | 316 | memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); |
---|
340 | 317 | |
---|
341 | | - io_sq->dma_addr_bits = ena_dev->dma_addr_bits; |
---|
| 318 | + io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits; |
---|
342 | 319 | io_sq->desc_entry_size = |
---|
343 | 320 | (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? |
---|
344 | 321 | sizeof(struct ena_eth_io_tx_desc) : |
---|
.. | .. |
---|
350 | 327 | dev_node = dev_to_node(ena_dev->dmadev); |
---|
351 | 328 | set_dev_node(ena_dev->dmadev, ctx->numa_node); |
---|
352 | 329 | io_sq->desc_addr.virt_addr = |
---|
353 | | - dma_zalloc_coherent(ena_dev->dmadev, size, |
---|
354 | | - &io_sq->desc_addr.phys_addr, |
---|
355 | | - GFP_KERNEL); |
---|
| 330 | + dma_alloc_coherent(ena_dev->dmadev, size, |
---|
| 331 | + &io_sq->desc_addr.phys_addr, |
---|
| 332 | + GFP_KERNEL); |
---|
356 | 333 | set_dev_node(ena_dev->dmadev, dev_node); |
---|
357 | 334 | if (!io_sq->desc_addr.virt_addr) { |
---|
358 | 335 | io_sq->desc_addr.virt_addr = |
---|
359 | | - dma_zalloc_coherent(ena_dev->dmadev, size, |
---|
360 | | - &io_sq->desc_addr.phys_addr, |
---|
361 | | - GFP_KERNEL); |
---|
| 336 | + dma_alloc_coherent(ena_dev->dmadev, size, |
---|
| 337 | + &io_sq->desc_addr.phys_addr, |
---|
| 338 | + GFP_KERNEL); |
---|
362 | 339 | } |
---|
363 | | - } else { |
---|
364 | | - dev_node = dev_to_node(ena_dev->dmadev); |
---|
365 | | - set_dev_node(ena_dev->dmadev, ctx->numa_node); |
---|
366 | | - io_sq->desc_addr.virt_addr = |
---|
367 | | - devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); |
---|
368 | | - set_dev_node(ena_dev->dmadev, dev_node); |
---|
| 340 | + |
---|
369 | 341 | if (!io_sq->desc_addr.virt_addr) { |
---|
370 | | - io_sq->desc_addr.virt_addr = |
---|
371 | | - devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); |
---|
| 342 | + pr_err("Memory allocation failed\n"); |
---|
| 343 | + return -ENOMEM; |
---|
372 | 344 | } |
---|
373 | 345 | } |
---|
374 | 346 | |
---|
375 | | - if (!io_sq->desc_addr.virt_addr) { |
---|
376 | | - pr_err("memory allocation failed"); |
---|
377 | | - return -ENOMEM; |
---|
| 347 | + if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { |
---|
| 348 | + /* Allocate bounce buffers */ |
---|
| 349 | + io_sq->bounce_buf_ctrl.buffer_size = |
---|
| 350 | + ena_dev->llq_info.desc_list_entry_size; |
---|
| 351 | + io_sq->bounce_buf_ctrl.buffers_num = |
---|
| 352 | + ENA_COM_BOUNCE_BUFFER_CNTRL_CNT; |
---|
| 353 | + io_sq->bounce_buf_ctrl.next_to_use = 0; |
---|
| 354 | + |
---|
| 355 | + size = io_sq->bounce_buf_ctrl.buffer_size * |
---|
| 356 | + io_sq->bounce_buf_ctrl.buffers_num; |
---|
| 357 | + |
---|
| 358 | + dev_node = dev_to_node(ena_dev->dmadev); |
---|
| 359 | + set_dev_node(ena_dev->dmadev, ctx->numa_node); |
---|
| 360 | + io_sq->bounce_buf_ctrl.base_buffer = |
---|
| 361 | + devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); |
---|
| 362 | + set_dev_node(ena_dev->dmadev, dev_node); |
---|
| 363 | + if (!io_sq->bounce_buf_ctrl.base_buffer) |
---|
| 364 | + io_sq->bounce_buf_ctrl.base_buffer = |
---|
| 365 | + devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); |
---|
| 366 | + |
---|
| 367 | + if (!io_sq->bounce_buf_ctrl.base_buffer) { |
---|
| 368 | + pr_err("Bounce buffer memory allocation failed\n"); |
---|
| 369 | + return -ENOMEM; |
---|
| 370 | + } |
---|
| 371 | + |
---|
| 372 | + memcpy(&io_sq->llq_info, &ena_dev->llq_info, |
---|
| 373 | + sizeof(io_sq->llq_info)); |
---|
| 374 | + |
---|
| 375 | + /* Initiate the first bounce buffer */ |
---|
| 376 | + io_sq->llq_buf_ctrl.curr_bounce_buf = |
---|
| 377 | + ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl); |
---|
| 378 | + memset(io_sq->llq_buf_ctrl.curr_bounce_buf, |
---|
| 379 | + 0x0, io_sq->llq_info.desc_list_entry_size); |
---|
| 380 | + io_sq->llq_buf_ctrl.descs_left_in_line = |
---|
| 381 | + io_sq->llq_info.descs_num_before_header; |
---|
| 382 | + io_sq->disable_meta_caching = |
---|
| 383 | + io_sq->llq_info.disable_meta_caching; |
---|
| 384 | + |
---|
| 385 | + if (io_sq->llq_info.max_entries_in_tx_burst > 0) |
---|
| 386 | + io_sq->entries_in_tx_burst_left = |
---|
| 387 | + io_sq->llq_info.max_entries_in_tx_burst; |
---|
378 | 388 | } |
---|
379 | 389 | |
---|
380 | 390 | io_sq->tail = 0; |
---|
.. | .. |
---|
404 | 414 | prev_node = dev_to_node(ena_dev->dmadev); |
---|
405 | 415 | set_dev_node(ena_dev->dmadev, ctx->numa_node); |
---|
406 | 416 | io_cq->cdesc_addr.virt_addr = |
---|
407 | | - dma_zalloc_coherent(ena_dev->dmadev, size, |
---|
408 | | - &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); |
---|
| 417 | + dma_alloc_coherent(ena_dev->dmadev, size, |
---|
| 418 | + &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); |
---|
409 | 419 | set_dev_node(ena_dev->dmadev, prev_node); |
---|
410 | 420 | if (!io_cq->cdesc_addr.virt_addr) { |
---|
411 | 421 | io_cq->cdesc_addr.virt_addr = |
---|
412 | | - dma_zalloc_coherent(ena_dev->dmadev, size, |
---|
413 | | - &io_cq->cdesc_addr.phys_addr, |
---|
414 | | - GFP_KERNEL); |
---|
| 422 | + dma_alloc_coherent(ena_dev->dmadev, size, |
---|
| 423 | + &io_cq->cdesc_addr.phys_addr, |
---|
| 424 | + GFP_KERNEL); |
---|
415 | 425 | } |
---|
416 | 426 | |
---|
417 | 427 | if (!io_cq->cdesc_addr.virt_addr) { |
---|
418 | | - pr_err("memory allocation failed"); |
---|
| 428 | + pr_err("Memory allocation failed\n"); |
---|
419 | 429 | return -ENOMEM; |
---|
420 | 430 | } |
---|
421 | 431 | |
---|
.. | .. |
---|
465 | 475 | |
---|
466 | 476 | /* Go over all the completions */ |
---|
467 | 477 | while ((READ_ONCE(cqe->acq_common_descriptor.flags) & |
---|
468 | | - ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { |
---|
| 478 | + ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { |
---|
469 | 479 | /* Do not read the rest of the completion entry before the |
---|
470 | 480 | * phase bit was validated |
---|
471 | 481 | */ |
---|
.. | .. |
---|
491 | 501 | static int ena_com_comp_status_to_errno(u8 comp_status) |
---|
492 | 502 | { |
---|
493 | 503 | if (unlikely(comp_status != 0)) |
---|
494 | | - pr_err("admin command failed[%u]\n", comp_status); |
---|
495 | | - |
---|
496 | | - if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR)) |
---|
497 | | - return -EINVAL; |
---|
| 504 | + pr_err("Admin command failed[%u]\n", comp_status); |
---|
498 | 505 | |
---|
499 | 506 | switch (comp_status) { |
---|
500 | 507 | case ENA_ADMIN_SUCCESS: |
---|
.. | .. |
---|
508 | 515 | case ENA_ADMIN_ILLEGAL_PARAMETER: |
---|
509 | 516 | case ENA_ADMIN_UNKNOWN_ERROR: |
---|
510 | 517 | return -EINVAL; |
---|
| 518 | + case ENA_ADMIN_RESOURCE_BUSY: |
---|
| 519 | + return -EAGAIN; |
---|
511 | 520 | } |
---|
512 | 521 | |
---|
513 | | - return 0; |
---|
| 522 | + return -EINVAL; |
---|
| 523 | +} |
---|
| 524 | + |
---|
| 525 | +static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us) |
---|
| 526 | +{ |
---|
| 527 | + exp = min_t(u32, exp, ENA_MAX_BACKOFF_DELAY_EXP); |
---|
| 528 | + delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us); |
---|
| 529 | + delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US); |
---|
| 530 | + usleep_range(delay_us, 2 * delay_us); |
---|
514 | 531 | } |
---|
515 | 532 | |
---|
516 | 533 | static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, |
---|
517 | 534 | struct ena_com_admin_queue *admin_queue) |
---|
518 | 535 | { |
---|
519 | | - unsigned long flags, timeout; |
---|
| 536 | + unsigned long flags = 0; |
---|
| 537 | + unsigned long timeout; |
---|
520 | 538 | int ret; |
---|
| 539 | + u32 exp = 0; |
---|
521 | 540 | |
---|
522 | 541 | timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout); |
---|
523 | 542 | |
---|
.. | .. |
---|
541 | 560 | goto err; |
---|
542 | 561 | } |
---|
543 | 562 | |
---|
544 | | - msleep(ENA_POLL_MS); |
---|
| 563 | + ena_delay_exponential_backoff_us(exp++, |
---|
| 564 | + admin_queue->ena_dev->ena_min_poll_delay_us); |
---|
545 | 565 | } |
---|
546 | 566 | |
---|
547 | 567 | if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { |
---|
.. | .. |
---|
562 | 582 | return ret; |
---|
563 | 583 | } |
---|
564 | 584 | |
---|
| 585 | +/* |
---|
| 586 | + * Set the LLQ configurations of the firmware |
---|
| 587 | + * |
---|
| 588 | + * The driver provides only the enabled feature values to the device, |
---|
| 589 | + * which in turn, checks if they are supported. |
---|
| 590 | + */ |
---|
| 591 | +static int ena_com_set_llq(struct ena_com_dev *ena_dev) |
---|
| 592 | +{ |
---|
| 593 | + struct ena_com_admin_queue *admin_queue; |
---|
| 594 | + struct ena_admin_set_feat_cmd cmd; |
---|
| 595 | + struct ena_admin_set_feat_resp resp; |
---|
| 596 | + struct ena_com_llq_info *llq_info = &ena_dev->llq_info; |
---|
| 597 | + int ret; |
---|
| 598 | + |
---|
| 599 | + memset(&cmd, 0x0, sizeof(cmd)); |
---|
| 600 | + admin_queue = &ena_dev->admin_queue; |
---|
| 601 | + |
---|
| 602 | + cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; |
---|
| 603 | + cmd.feat_common.feature_id = ENA_ADMIN_LLQ; |
---|
| 604 | + |
---|
| 605 | + cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl; |
---|
| 606 | + cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl; |
---|
| 607 | + cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header; |
---|
| 608 | + cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl; |
---|
| 609 | + |
---|
| 610 | + cmd.u.llq.accel_mode.u.set.enabled_flags = |
---|
| 611 | + BIT(ENA_ADMIN_DISABLE_META_CACHING) | |
---|
| 612 | + BIT(ENA_ADMIN_LIMIT_TX_BURST); |
---|
| 613 | + |
---|
| 614 | + ret = ena_com_execute_admin_command(admin_queue, |
---|
| 615 | + (struct ena_admin_aq_entry *)&cmd, |
---|
| 616 | + sizeof(cmd), |
---|
| 617 | + (struct ena_admin_acq_entry *)&resp, |
---|
| 618 | + sizeof(resp)); |
---|
| 619 | + |
---|
| 620 | + if (unlikely(ret)) |
---|
| 621 | + pr_err("Failed to set LLQ configurations: %d\n", ret); |
---|
| 622 | + |
---|
| 623 | + return ret; |
---|
| 624 | +} |
---|
| 625 | + |
---|
| 626 | +static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, |
---|
| 627 | + struct ena_admin_feature_llq_desc *llq_features, |
---|
| 628 | + struct ena_llq_configurations *llq_default_cfg) |
---|
| 629 | +{ |
---|
| 630 | + struct ena_com_llq_info *llq_info = &ena_dev->llq_info; |
---|
| 631 | + struct ena_admin_accel_mode_get llq_accel_mode_get; |
---|
| 632 | + u16 supported_feat; |
---|
| 633 | + int rc; |
---|
| 634 | + |
---|
| 635 | + memset(llq_info, 0, sizeof(*llq_info)); |
---|
| 636 | + |
---|
| 637 | + supported_feat = llq_features->header_location_ctrl_supported; |
---|
| 638 | + |
---|
| 639 | + if (likely(supported_feat & llq_default_cfg->llq_header_location)) { |
---|
| 640 | + llq_info->header_location_ctrl = |
---|
| 641 | + llq_default_cfg->llq_header_location; |
---|
| 642 | + } else { |
---|
| 643 | + pr_err("Invalid header location control, supported: 0x%x\n", |
---|
| 644 | + supported_feat); |
---|
| 645 | + return -EINVAL; |
---|
| 646 | + } |
---|
| 647 | + |
---|
| 648 | + if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) { |
---|
| 649 | + supported_feat = llq_features->descriptors_stride_ctrl_supported; |
---|
| 650 | + if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) { |
---|
| 651 | + llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl; |
---|
| 652 | + } else { |
---|
| 653 | + if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) { |
---|
| 654 | + llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; |
---|
| 655 | + } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) { |
---|
| 656 | + llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY; |
---|
| 657 | + } else { |
---|
| 658 | + pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n", |
---|
| 659 | + supported_feat); |
---|
| 660 | + return -EINVAL; |
---|
| 661 | + } |
---|
| 662 | + |
---|
| 663 | + pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", |
---|
| 664 | + llq_default_cfg->llq_stride_ctrl, supported_feat, |
---|
| 665 | + llq_info->desc_stride_ctrl); |
---|
| 666 | + } |
---|
| 667 | + } else { |
---|
| 668 | + llq_info->desc_stride_ctrl = 0; |
---|
| 669 | + } |
---|
| 670 | + |
---|
| 671 | + supported_feat = llq_features->entry_size_ctrl_supported; |
---|
| 672 | + if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) { |
---|
| 673 | + llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size; |
---|
| 674 | + llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value; |
---|
| 675 | + } else { |
---|
| 676 | + if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) { |
---|
| 677 | + llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B; |
---|
| 678 | + llq_info->desc_list_entry_size = 128; |
---|
| 679 | + } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) { |
---|
| 680 | + llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B; |
---|
| 681 | + llq_info->desc_list_entry_size = 192; |
---|
| 682 | + } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) { |
---|
| 683 | + llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B; |
---|
| 684 | + llq_info->desc_list_entry_size = 256; |
---|
| 685 | + } else { |
---|
| 686 | + pr_err("Invalid entry_size_ctrl, supported: 0x%x\n", |
---|
| 687 | + supported_feat); |
---|
| 688 | + return -EINVAL; |
---|
| 689 | + } |
---|
| 690 | + |
---|
| 691 | + pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", |
---|
| 692 | + llq_default_cfg->llq_ring_entry_size, supported_feat, |
---|
| 693 | + llq_info->desc_list_entry_size); |
---|
| 694 | + } |
---|
| 695 | + if (unlikely(llq_info->desc_list_entry_size & 0x7)) { |
---|
| 696 | + /* The desc list entry size should be whole multiply of 8 |
---|
| 697 | + * This requirement comes from __iowrite64_copy() |
---|
| 698 | + */ |
---|
| 699 | + pr_err("Illegal entry size %d\n", llq_info->desc_list_entry_size); |
---|
| 700 | + return -EINVAL; |
---|
| 701 | + } |
---|
| 702 | + |
---|
| 703 | + if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) |
---|
| 704 | + llq_info->descs_per_entry = llq_info->desc_list_entry_size / |
---|
| 705 | + sizeof(struct ena_eth_io_tx_desc); |
---|
| 706 | + else |
---|
| 707 | + llq_info->descs_per_entry = 1; |
---|
| 708 | + |
---|
| 709 | + supported_feat = llq_features->desc_num_before_header_supported; |
---|
| 710 | + if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) { |
---|
| 711 | + llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header; |
---|
| 712 | + } else { |
---|
| 713 | + if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) { |
---|
| 714 | + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; |
---|
| 715 | + } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) { |
---|
| 716 | + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1; |
---|
| 717 | + } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) { |
---|
| 718 | + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4; |
---|
| 719 | + } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) { |
---|
| 720 | + llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8; |
---|
| 721 | + } else { |
---|
| 722 | + pr_err("Invalid descs_num_before_header, supported: 0x%x\n", |
---|
| 723 | + supported_feat); |
---|
| 724 | + return -EINVAL; |
---|
| 725 | + } |
---|
| 726 | + |
---|
| 727 | + pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", |
---|
| 728 | + llq_default_cfg->llq_num_decs_before_header, |
---|
| 729 | + supported_feat, llq_info->descs_num_before_header); |
---|
| 730 | + } |
---|
| 731 | + /* Check for accelerated queue supported */ |
---|
| 732 | + llq_accel_mode_get = llq_features->accel_mode.u.get; |
---|
| 733 | + |
---|
| 734 | + llq_info->disable_meta_caching = |
---|
| 735 | + !!(llq_accel_mode_get.supported_flags & |
---|
| 736 | + BIT(ENA_ADMIN_DISABLE_META_CACHING)); |
---|
| 737 | + |
---|
| 738 | + if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST)) |
---|
| 739 | + llq_info->max_entries_in_tx_burst = |
---|
| 740 | + llq_accel_mode_get.max_tx_burst_size / |
---|
| 741 | + llq_default_cfg->llq_ring_entry_size_value; |
---|
| 742 | + |
---|
| 743 | + rc = ena_com_set_llq(ena_dev); |
---|
| 744 | + if (rc) |
---|
| 745 | + pr_err("Cannot set LLQ configuration: %d\n", rc); |
---|
| 746 | + |
---|
| 747 | + return rc; |
---|
| 748 | +} |
---|
| 749 | + |
---|
565 | 750 | static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx, |
---|
566 | 751 | struct ena_com_admin_queue *admin_queue) |
---|
567 | 752 | { |
---|
568 | | - unsigned long flags; |
---|
| 753 | + unsigned long flags = 0; |
---|
569 | 754 | int ret; |
---|
570 | 755 | |
---|
571 | 756 | wait_for_completion_timeout(&comp_ctx->wait_event, |
---|
.. | .. |
---|
583 | 768 | admin_queue->stats.no_completion++; |
---|
584 | 769 | spin_unlock_irqrestore(&admin_queue->q_lock, flags); |
---|
585 | 770 | |
---|
586 | | - if (comp_ctx->status == ENA_CMD_COMPLETED) |
---|
587 | | - pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n", |
---|
588 | | - comp_ctx->cmd_opcode); |
---|
589 | | - else |
---|
590 | | - pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n", |
---|
| 771 | + if (comp_ctx->status == ENA_CMD_COMPLETED) { |
---|
| 772 | + pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n", |
---|
| 773 | + comp_ctx->cmd_opcode, |
---|
| 774 | + admin_queue->auto_polling ? "ON" : "OFF"); |
---|
| 775 | + /* Check if fallback to polling is enabled */ |
---|
| 776 | + if (admin_queue->auto_polling) |
---|
| 777 | + admin_queue->polling = true; |
---|
| 778 | + } else { |
---|
| 779 | + pr_err("The ena device didn't send a completion for the admin cmd %d status %d\n", |
---|
591 | 780 | comp_ctx->cmd_opcode, comp_ctx->status); |
---|
592 | | - |
---|
593 | | - admin_queue->running_state = false; |
---|
594 | | - ret = -ETIME; |
---|
595 | | - goto err; |
---|
| 781 | + } |
---|
| 782 | + /* Check if shifted to polling mode. |
---|
| 783 | + * This will happen if there is a completion without an interrupt |
---|
| 784 | + * and autopolling mode is enabled. Continuing normal execution in such case |
---|
| 785 | + */ |
---|
| 786 | + if (!admin_queue->polling) { |
---|
| 787 | + admin_queue->running_state = false; |
---|
| 788 | + ret = -ETIME; |
---|
| 789 | + goto err; |
---|
| 790 | + } |
---|
596 | 791 | } |
---|
597 | 792 | |
---|
598 | 793 | ret = ena_com_comp_status_to_errno(comp_ctx->comp_status); |
---|
.. | .. |
---|
611 | 806 | volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = |
---|
612 | 807 | mmio_read->read_resp; |
---|
613 | 808 | u32 mmio_read_reg, ret, i; |
---|
614 | | - unsigned long flags; |
---|
| 809 | + unsigned long flags = 0; |
---|
615 | 810 | u32 timeout = mmio_read->reg_read_to; |
---|
616 | 811 | |
---|
617 | 812 | might_sleep(); |
---|
.. | .. |
---|
642 | 837 | } |
---|
643 | 838 | |
---|
644 | 839 | if (unlikely(i == timeout)) { |
---|
645 | | - pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", |
---|
| 840 | + pr_err("Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", |
---|
646 | 841 | mmio_read->seq_num, offset, read_resp->req_id, |
---|
647 | 842 | read_resp->reg_off); |
---|
648 | 843 | ret = ENA_MMIO_READ_TIMEOUT; |
---|
.. | .. |
---|
650 | 845 | } |
---|
651 | 846 | |
---|
652 | 847 | if (read_resp->reg_off != offset) { |
---|
653 | | - pr_err("Read failure: wrong offset provided"); |
---|
| 848 | + pr_err("Read failure: wrong offset provided\n"); |
---|
654 | 849 | ret = ENA_MMIO_READ_TIMEOUT; |
---|
655 | 850 | } else { |
---|
656 | 851 | ret = read_resp->reg_val; |
---|
.. | .. |
---|
709 | 904 | sizeof(destroy_resp)); |
---|
710 | 905 | |
---|
711 | 906 | if (unlikely(ret && (ret != -ENODEV))) |
---|
712 | | - pr_err("failed to destroy io sq error: %d\n", ret); |
---|
| 907 | + pr_err("Failed to destroy io sq error: %d\n", ret); |
---|
713 | 908 | |
---|
714 | 909 | return ret; |
---|
715 | 910 | } |
---|
.. | .. |
---|
733 | 928 | if (io_sq->desc_addr.virt_addr) { |
---|
734 | 929 | size = io_sq->desc_entry_size * io_sq->q_depth; |
---|
735 | 930 | |
---|
736 | | - if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) |
---|
737 | | - dma_free_coherent(ena_dev->dmadev, size, |
---|
738 | | - io_sq->desc_addr.virt_addr, |
---|
739 | | - io_sq->desc_addr.phys_addr); |
---|
740 | | - else |
---|
741 | | - devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr); |
---|
| 931 | + dma_free_coherent(ena_dev->dmadev, size, |
---|
| 932 | + io_sq->desc_addr.virt_addr, |
---|
| 933 | + io_sq->desc_addr.phys_addr); |
---|
742 | 934 | |
---|
743 | 935 | io_sq->desc_addr.virt_addr = NULL; |
---|
| 936 | + } |
---|
| 937 | + |
---|
| 938 | + if (io_sq->bounce_buf_ctrl.base_buffer) { |
---|
| 939 | + devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer); |
---|
| 940 | + io_sq->bounce_buf_ctrl.base_buffer = NULL; |
---|
744 | 941 | } |
---|
745 | 942 | } |
---|
746 | 943 | |
---|
747 | 944 | static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, |
---|
748 | 945 | u16 exp_state) |
---|
749 | 946 | { |
---|
750 | | - u32 val, i; |
---|
| 947 | + u32 val, exp = 0; |
---|
| 948 | + unsigned long timeout_stamp; |
---|
751 | 949 | |
---|
752 | | - /* Convert timeout from resolution of 100ms to ENA_POLL_MS */ |
---|
753 | | - timeout = (timeout * 100) / ENA_POLL_MS; |
---|
| 950 | + /* Convert timeout from resolution of 100ms to us resolution. */ |
---|
| 951 | + timeout_stamp = jiffies + usecs_to_jiffies(100 * 1000 * timeout); |
---|
754 | 952 | |
---|
755 | | - for (i = 0; i < timeout; i++) { |
---|
| 953 | + while (1) { |
---|
756 | 954 | val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); |
---|
757 | 955 | |
---|
758 | 956 | if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { |
---|
.. | .. |
---|
764 | 962 | exp_state) |
---|
765 | 963 | return 0; |
---|
766 | 964 | |
---|
767 | | - msleep(ENA_POLL_MS); |
---|
768 | | - } |
---|
| 965 | + if (time_is_before_jiffies(timeout_stamp)) |
---|
| 966 | + return -ETIME; |
---|
769 | 967 | |
---|
770 | | - return -ETIME; |
---|
| 968 | + ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us); |
---|
| 969 | + } |
---|
771 | 970 | } |
---|
772 | 971 | |
---|
773 | 972 | static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev, |
---|
.. | .. |
---|
787 | 986 | struct ena_admin_get_feat_resp *get_resp, |
---|
788 | 987 | enum ena_admin_aq_feature_id feature_id, |
---|
789 | 988 | dma_addr_t control_buf_dma_addr, |
---|
790 | | - u32 control_buff_size) |
---|
| 989 | + u32 control_buff_size, |
---|
| 990 | + u8 feature_ver) |
---|
791 | 991 | { |
---|
792 | 992 | struct ena_com_admin_queue *admin_queue; |
---|
793 | 993 | struct ena_admin_get_feat_cmd get_cmd; |
---|
.. | .. |
---|
813 | 1013 | &get_cmd.control_buffer.address, |
---|
814 | 1014 | control_buf_dma_addr); |
---|
815 | 1015 | if (unlikely(ret)) { |
---|
816 | | - pr_err("memory address set failed\n"); |
---|
| 1016 | + pr_err("Memory address set failed\n"); |
---|
817 | 1017 | return ret; |
---|
818 | 1018 | } |
---|
819 | 1019 | |
---|
820 | 1020 | get_cmd.control_buffer.length = control_buff_size; |
---|
821 | | - |
---|
| 1021 | + get_cmd.feat_common.feature_version = feature_ver; |
---|
822 | 1022 | get_cmd.feat_common.feature_id = feature_id; |
---|
823 | 1023 | |
---|
824 | 1024 | ret = ena_com_execute_admin_command(admin_queue, |
---|
.. | .. |
---|
838 | 1038 | |
---|
839 | 1039 | static int ena_com_get_feature(struct ena_com_dev *ena_dev, |
---|
840 | 1040 | struct ena_admin_get_feat_resp *get_resp, |
---|
841 | | - enum ena_admin_aq_feature_id feature_id) |
---|
| 1041 | + enum ena_admin_aq_feature_id feature_id, |
---|
| 1042 | + u8 feature_ver) |
---|
842 | 1043 | { |
---|
843 | 1044 | return ena_com_get_feature_ex(ena_dev, |
---|
844 | 1045 | get_resp, |
---|
845 | 1046 | feature_id, |
---|
846 | 1047 | 0, |
---|
847 | | - 0); |
---|
| 1048 | + 0, |
---|
| 1049 | + feature_ver); |
---|
| 1050 | +} |
---|
| 1051 | + |
---|
| 1052 | +int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev) |
---|
| 1053 | +{ |
---|
| 1054 | + return ena_dev->rss.hash_func; |
---|
848 | 1055 | } |
---|
849 | 1056 | |
---|
850 | 1057 | static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev) |
---|
.. | .. |
---|
853 | 1060 | (ena_dev->rss).hash_key; |
---|
854 | 1061 | |
---|
855 | 1062 | netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key)); |
---|
856 | | - /* The key is stored in the device in u32 array |
---|
857 | | - * as well as the API requires the key to be passed in this |
---|
858 | | - * format. Thus the size of our array should be divided by 4 |
---|
| 1063 | + /* The key buffer is stored in the device in an array of |
---|
| 1064 | + * uint32 elements. |
---|
859 | 1065 | */ |
---|
860 | | - hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32); |
---|
861 | | -} |
---|
862 | | - |
---|
863 | | -int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev) |
---|
864 | | -{ |
---|
865 | | - return ena_dev->rss.hash_func; |
---|
| 1066 | + hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS; |
---|
866 | 1067 | } |
---|
867 | 1068 | |
---|
868 | 1069 | static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) |
---|
869 | 1070 | { |
---|
870 | 1071 | struct ena_rss *rss = &ena_dev->rss; |
---|
871 | 1072 | |
---|
| 1073 | + if (!ena_com_check_supported_feature_id(ena_dev, |
---|
| 1074 | + ENA_ADMIN_RSS_HASH_FUNCTION)) |
---|
| 1075 | + return -EOPNOTSUPP; |
---|
| 1076 | + |
---|
872 | 1077 | rss->hash_key = |
---|
873 | | - dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), |
---|
874 | | - &rss->hash_key_dma_addr, GFP_KERNEL); |
---|
| 1078 | + dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), |
---|
| 1079 | + &rss->hash_key_dma_addr, GFP_KERNEL); |
---|
875 | 1080 | |
---|
876 | 1081 | if (unlikely(!rss->hash_key)) |
---|
877 | 1082 | return -ENOMEM; |
---|
.. | .. |
---|
894 | 1099 | struct ena_rss *rss = &ena_dev->rss; |
---|
895 | 1100 | |
---|
896 | 1101 | rss->hash_ctrl = |
---|
897 | | - dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), |
---|
898 | | - &rss->hash_ctrl_dma_addr, GFP_KERNEL); |
---|
| 1102 | + dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), |
---|
| 1103 | + &rss->hash_ctrl_dma_addr, GFP_KERNEL); |
---|
899 | 1104 | |
---|
900 | 1105 | if (unlikely(!rss->hash_ctrl)) |
---|
901 | 1106 | return -ENOMEM; |
---|
.. | .. |
---|
922 | 1127 | int ret; |
---|
923 | 1128 | |
---|
924 | 1129 | ret = ena_com_get_feature(ena_dev, &get_resp, |
---|
925 | | - ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); |
---|
| 1130 | + ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0); |
---|
926 | 1131 | if (unlikely(ret)) |
---|
927 | 1132 | return ret; |
---|
928 | 1133 | |
---|
929 | 1134 | if ((get_resp.u.ind_table.min_size > log_size) || |
---|
930 | 1135 | (get_resp.u.ind_table.max_size < log_size)) { |
---|
931 | | - pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", |
---|
| 1136 | + pr_err("Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n", |
---|
932 | 1137 | 1 << log_size, 1 << get_resp.u.ind_table.min_size, |
---|
933 | 1138 | 1 << get_resp.u.ind_table.max_size); |
---|
934 | 1139 | return -EINVAL; |
---|
.. | .. |
---|
938 | 1143 | sizeof(struct ena_admin_rss_ind_table_entry); |
---|
939 | 1144 | |
---|
940 | 1145 | rss->rss_ind_tbl = |
---|
941 | | - dma_zalloc_coherent(ena_dev->dmadev, tbl_size, |
---|
942 | | - &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); |
---|
| 1146 | + dma_alloc_coherent(ena_dev->dmadev, tbl_size, |
---|
| 1147 | + &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); |
---|
943 | 1148 | if (unlikely(!rss->rss_ind_tbl)) |
---|
944 | 1149 | goto mem_err1; |
---|
945 | 1150 | |
---|
.. | .. |
---|
1021 | 1226 | &create_cmd.sq_ba, |
---|
1022 | 1227 | io_sq->desc_addr.phys_addr); |
---|
1023 | 1228 | if (unlikely(ret)) { |
---|
1024 | | - pr_err("memory address set failed\n"); |
---|
| 1229 | + pr_err("Memory address set failed\n"); |
---|
1025 | 1230 | return ret; |
---|
1026 | 1231 | } |
---|
1027 | 1232 | } |
---|
.. | .. |
---|
1050 | 1255 | cmd_completion.llq_descriptors_offset); |
---|
1051 | 1256 | } |
---|
1052 | 1257 | |
---|
1053 | | - pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); |
---|
| 1258 | + pr_debug("Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); |
---|
1054 | 1259 | |
---|
1055 | 1260 | return ret; |
---|
1056 | 1261 | } |
---|
.. | .. |
---|
1078 | 1283 | return 0; |
---|
1079 | 1284 | } |
---|
1080 | 1285 | |
---|
1081 | | -static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev) |
---|
1082 | | -{ |
---|
1083 | | - u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 }; |
---|
1084 | | - struct ena_rss *rss = &ena_dev->rss; |
---|
1085 | | - u8 idx; |
---|
1086 | | - u16 i; |
---|
1087 | | - |
---|
1088 | | - for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++) |
---|
1089 | | - dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i; |
---|
1090 | | - |
---|
1091 | | - for (i = 0; i < 1 << rss->tbl_log_size; i++) { |
---|
1092 | | - if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES) |
---|
1093 | | - return -EINVAL; |
---|
1094 | | - idx = (u8)rss->rss_ind_tbl[i].cq_idx; |
---|
1095 | | - |
---|
1096 | | - if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES) |
---|
1097 | | - return -EINVAL; |
---|
1098 | | - |
---|
1099 | | - rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx]; |
---|
1100 | | - } |
---|
1101 | | - |
---|
1102 | | - return 0; |
---|
1103 | | -} |
---|
1104 | | - |
---|
1105 | | -static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev) |
---|
1106 | | -{ |
---|
1107 | | - size_t size; |
---|
1108 | | - |
---|
1109 | | - size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS; |
---|
1110 | | - |
---|
1111 | | - ena_dev->intr_moder_tbl = |
---|
1112 | | - devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); |
---|
1113 | | - if (!ena_dev->intr_moder_tbl) |
---|
1114 | | - return -ENOMEM; |
---|
1115 | | - |
---|
1116 | | - ena_com_config_default_interrupt_moderation_table(ena_dev); |
---|
1117 | | - |
---|
1118 | | - return 0; |
---|
1119 | | -} |
---|
1120 | | - |
---|
1121 | 1286 | static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev, |
---|
1122 | 1287 | u16 intr_delay_resolution) |
---|
1123 | 1288 | { |
---|
1124 | | - struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; |
---|
1125 | | - unsigned int i; |
---|
| 1289 | + u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution; |
---|
1126 | 1290 | |
---|
1127 | | - if (!intr_delay_resolution) { |
---|
| 1291 | + if (unlikely(!intr_delay_resolution)) { |
---|
1128 | 1292 | pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n"); |
---|
1129 | | - intr_delay_resolution = 1; |
---|
| 1293 | + intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; |
---|
1130 | 1294 | } |
---|
1131 | | - ena_dev->intr_delay_resolution = intr_delay_resolution; |
---|
1132 | 1295 | |
---|
1133 | 1296 | /* update Rx */ |
---|
1134 | | - for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++) |
---|
1135 | | - intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution; |
---|
| 1297 | + ena_dev->intr_moder_rx_interval = |
---|
| 1298 | + ena_dev->intr_moder_rx_interval * |
---|
| 1299 | + prev_intr_delay_resolution / |
---|
| 1300 | + intr_delay_resolution; |
---|
1136 | 1301 | |
---|
1137 | 1302 | /* update Tx */ |
---|
1138 | | - ena_dev->intr_moder_tx_interval /= intr_delay_resolution; |
---|
| 1303 | + ena_dev->intr_moder_tx_interval = |
---|
| 1304 | + ena_dev->intr_moder_tx_interval * |
---|
| 1305 | + prev_intr_delay_resolution / |
---|
| 1306 | + intr_delay_resolution; |
---|
| 1307 | + |
---|
| 1308 | + ena_dev->intr_delay_resolution = intr_delay_resolution; |
---|
1139 | 1309 | } |
---|
1140 | 1310 | |
---|
1141 | 1311 | /*****************************************************************************/ |
---|
.. | .. |
---|
1198 | 1368 | &create_cmd.cq_ba, |
---|
1199 | 1369 | io_cq->cdesc_addr.phys_addr); |
---|
1200 | 1370 | if (unlikely(ret)) { |
---|
1201 | | - pr_err("memory address set failed\n"); |
---|
| 1371 | + pr_err("Memory address set failed\n"); |
---|
1202 | 1372 | return ret; |
---|
1203 | 1373 | } |
---|
1204 | 1374 | |
---|
.. | .. |
---|
1227 | 1397 | (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + |
---|
1228 | 1398 | cmd_completion.numa_node_register_offset); |
---|
1229 | 1399 | |
---|
1230 | | - pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); |
---|
| 1400 | + pr_debug("Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); |
---|
1231 | 1401 | |
---|
1232 | 1402 | return ret; |
---|
1233 | 1403 | } |
---|
.. | .. |
---|
1271 | 1441 | void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) |
---|
1272 | 1442 | { |
---|
1273 | 1443 | struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; |
---|
1274 | | - unsigned long flags; |
---|
| 1444 | + unsigned long flags = 0; |
---|
| 1445 | + u32 exp = 0; |
---|
1275 | 1446 | |
---|
1276 | 1447 | spin_lock_irqsave(&admin_queue->q_lock, flags); |
---|
1277 | 1448 | while (atomic_read(&admin_queue->outstanding_cmds) != 0) { |
---|
1278 | 1449 | spin_unlock_irqrestore(&admin_queue->q_lock, flags); |
---|
1279 | | - msleep(ENA_POLL_MS); |
---|
| 1450 | + ena_delay_exponential_backoff_us(exp++, |
---|
| 1451 | + ena_dev->ena_min_poll_delay_us); |
---|
1280 | 1452 | spin_lock_irqsave(&admin_queue->q_lock, flags); |
---|
1281 | 1453 | } |
---|
1282 | 1454 | spin_unlock_irqrestore(&admin_queue->q_lock, flags); |
---|
.. | .. |
---|
1315 | 1487 | void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state) |
---|
1316 | 1488 | { |
---|
1317 | 1489 | struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; |
---|
1318 | | - unsigned long flags; |
---|
| 1490 | + unsigned long flags = 0; |
---|
1319 | 1491 | |
---|
1320 | 1492 | spin_lock_irqsave(&admin_queue->q_lock, flags); |
---|
1321 | 1493 | ena_dev->admin_queue.running_state = state; |
---|
.. | .. |
---|
1342 | 1514 | struct ena_admin_get_feat_resp get_resp; |
---|
1343 | 1515 | int ret; |
---|
1344 | 1516 | |
---|
1345 | | - ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG); |
---|
| 1517 | + ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0); |
---|
1346 | 1518 | if (ret) { |
---|
1347 | 1519 | pr_info("Can't get aenq configuration\n"); |
---|
1348 | 1520 | return ret; |
---|
1349 | 1521 | } |
---|
1350 | 1522 | |
---|
1351 | 1523 | if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { |
---|
1352 | | - pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n", |
---|
| 1524 | + pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n", |
---|
1353 | 1525 | get_resp.u.aenq.supported_groups, groups_flag); |
---|
1354 | 1526 | return -EOPNOTSUPP; |
---|
1355 | 1527 | } |
---|
.. | .. |
---|
1418 | 1590 | return -ETIME; |
---|
1419 | 1591 | } |
---|
1420 | 1592 | |
---|
1421 | | - pr_info("ena device version: %d.%d\n", |
---|
| 1593 | + pr_info("ENA device version: %d.%d\n", |
---|
1422 | 1594 | (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> |
---|
1423 | 1595 | ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, |
---|
1424 | 1596 | ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); |
---|
1425 | 1597 | |
---|
1426 | | - if (ver < MIN_ENA_VER) { |
---|
1427 | | - pr_err("ENA version is lower than the minimal version the driver supports\n"); |
---|
1428 | | - return -1; |
---|
1429 | | - } |
---|
1430 | | - |
---|
1431 | | - pr_info("ena controller version: %d.%d.%d implementation version %d\n", |
---|
| 1598 | + pr_info("ENA controller version: %d.%d.%d implementation version %d\n", |
---|
1432 | 1599 | (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> |
---|
1433 | 1600 | ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, |
---|
1434 | 1601 | (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >> |
---|
.. | .. |
---|
1451 | 1618 | return 0; |
---|
1452 | 1619 | } |
---|
1453 | 1620 | |
---|
| 1621 | +static void |
---|
| 1622 | +ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev, |
---|
| 1623 | + struct ena_com_admin_queue *admin_queue) |
---|
| 1624 | + |
---|
| 1625 | +{ |
---|
| 1626 | + if (!admin_queue->comp_ctx) |
---|
| 1627 | + return; |
---|
| 1628 | + |
---|
| 1629 | + devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx); |
---|
| 1630 | + |
---|
| 1631 | + admin_queue->comp_ctx = NULL; |
---|
| 1632 | +} |
---|
| 1633 | + |
---|
1454 | 1634 | void ena_com_admin_destroy(struct ena_com_dev *ena_dev) |
---|
1455 | 1635 | { |
---|
1456 | 1636 | struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; |
---|
.. | .. |
---|
1459 | 1639 | struct ena_com_aenq *aenq = &ena_dev->aenq; |
---|
1460 | 1640 | u16 size; |
---|
1461 | 1641 | |
---|
1462 | | - if (admin_queue->comp_ctx) |
---|
1463 | | - devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx); |
---|
1464 | | - admin_queue->comp_ctx = NULL; |
---|
| 1642 | + ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue); |
---|
| 1643 | + |
---|
1465 | 1644 | size = ADMIN_SQ_SIZE(admin_queue->q_depth); |
---|
1466 | 1645 | if (sq->entries) |
---|
1467 | 1646 | dma_free_coherent(ena_dev->dmadev, size, sq->entries, |
---|
.. | .. |
---|
1492 | 1671 | ena_dev->admin_queue.polling = polling; |
---|
1493 | 1672 | } |
---|
1494 | 1673 | |
---|
| 1674 | +void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev, |
---|
| 1675 | + bool polling) |
---|
| 1676 | +{ |
---|
| 1677 | + ena_dev->admin_queue.auto_polling = polling; |
---|
| 1678 | +} |
---|
| 1679 | + |
---|
1495 | 1680 | int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) |
---|
1496 | 1681 | { |
---|
1497 | 1682 | struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; |
---|
1498 | 1683 | |
---|
1499 | 1684 | spin_lock_init(&mmio_read->lock); |
---|
1500 | 1685 | mmio_read->read_resp = |
---|
1501 | | - dma_zalloc_coherent(ena_dev->dmadev, |
---|
1502 | | - sizeof(*mmio_read->read_resp), |
---|
1503 | | - &mmio_read->read_resp_dma_addr, GFP_KERNEL); |
---|
| 1686 | + dma_alloc_coherent(ena_dev->dmadev, |
---|
| 1687 | + sizeof(*mmio_read->read_resp), |
---|
| 1688 | + &mmio_read->read_resp_dma_addr, GFP_KERNEL); |
---|
1504 | 1689 | if (unlikely(!mmio_read->read_resp)) |
---|
1505 | | - return -ENOMEM; |
---|
| 1690 | + goto err; |
---|
1506 | 1691 | |
---|
1507 | 1692 | ena_com_mmio_reg_read_request_write_dev_addr(ena_dev); |
---|
1508 | 1693 | |
---|
.. | .. |
---|
1511 | 1696 | mmio_read->readless_supported = true; |
---|
1512 | 1697 | |
---|
1513 | 1698 | return 0; |
---|
| 1699 | + |
---|
| 1700 | +err: |
---|
| 1701 | + |
---|
| 1702 | + return -ENOMEM; |
---|
1514 | 1703 | } |
---|
1515 | 1704 | |
---|
1516 | 1705 | void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported) |
---|
.. | .. |
---|
1546 | 1735 | } |
---|
1547 | 1736 | |
---|
1548 | 1737 | int ena_com_admin_init(struct ena_com_dev *ena_dev, |
---|
1549 | | - struct ena_aenq_handlers *aenq_handlers, |
---|
1550 | | - bool init_spinlock) |
---|
| 1738 | + struct ena_aenq_handlers *aenq_handlers) |
---|
1551 | 1739 | { |
---|
1552 | 1740 | struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue; |
---|
1553 | 1741 | u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high; |
---|
.. | .. |
---|
1573 | 1761 | |
---|
1574 | 1762 | atomic_set(&admin_queue->outstanding_cmds, 0); |
---|
1575 | 1763 | |
---|
1576 | | - if (init_spinlock) |
---|
1577 | | - spin_lock_init(&admin_queue->q_lock); |
---|
| 1764 | + spin_lock_init(&admin_queue->q_lock); |
---|
1578 | 1765 | |
---|
1579 | 1766 | ret = ena_com_init_comp_ctxt(admin_queue); |
---|
1580 | 1767 | if (ret) |
---|
.. | .. |
---|
1621 | 1808 | if (ret) |
---|
1622 | 1809 | goto error; |
---|
1623 | 1810 | |
---|
| 1811 | + admin_queue->ena_dev = ena_dev; |
---|
1624 | 1812 | admin_queue->running_state = true; |
---|
1625 | 1813 | |
---|
1626 | 1814 | return 0; |
---|
.. | .. |
---|
1714 | 1902 | int ena_com_get_link_params(struct ena_com_dev *ena_dev, |
---|
1715 | 1903 | struct ena_admin_get_feat_resp *resp) |
---|
1716 | 1904 | { |
---|
1717 | | - return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG); |
---|
| 1905 | + return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0); |
---|
1718 | 1906 | } |
---|
1719 | 1907 | |
---|
1720 | 1908 | int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, |
---|
.. | .. |
---|
1724 | 1912 | int rc; |
---|
1725 | 1913 | |
---|
1726 | 1914 | rc = ena_com_get_feature(ena_dev, &get_resp, |
---|
1727 | | - ENA_ADMIN_DEVICE_ATTRIBUTES); |
---|
| 1915 | + ENA_ADMIN_DEVICE_ATTRIBUTES, 0); |
---|
1728 | 1916 | if (rc) |
---|
1729 | 1917 | return rc; |
---|
1730 | 1918 | |
---|
1731 | 1919 | memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr, |
---|
1732 | 1920 | sizeof(get_resp.u.dev_attr)); |
---|
| 1921 | + |
---|
1733 | 1922 | ena_dev->supported_features = get_resp.u.dev_attr.supported_features; |
---|
1734 | 1923 | |
---|
1735 | | - rc = ena_com_get_feature(ena_dev, &get_resp, |
---|
1736 | | - ENA_ADMIN_MAX_QUEUES_NUM); |
---|
1737 | | - if (rc) |
---|
1738 | | - return rc; |
---|
| 1924 | + if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { |
---|
| 1925 | + rc = ena_com_get_feature(ena_dev, &get_resp, |
---|
| 1926 | + ENA_ADMIN_MAX_QUEUES_EXT, |
---|
| 1927 | + ENA_FEATURE_MAX_QUEUE_EXT_VER); |
---|
| 1928 | + if (rc) |
---|
| 1929 | + return rc; |
---|
1739 | 1930 | |
---|
1740 | | - memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue, |
---|
1741 | | - sizeof(get_resp.u.max_queue)); |
---|
1742 | | - ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size; |
---|
| 1931 | + if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER) |
---|
| 1932 | + return -EINVAL; |
---|
| 1933 | + |
---|
| 1934 | + memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext, |
---|
| 1935 | + sizeof(get_resp.u.max_queue_ext)); |
---|
| 1936 | + ena_dev->tx_max_header_size = |
---|
| 1937 | + get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size; |
---|
| 1938 | + } else { |
---|
| 1939 | + rc = ena_com_get_feature(ena_dev, &get_resp, |
---|
| 1940 | + ENA_ADMIN_MAX_QUEUES_NUM, 0); |
---|
| 1941 | + memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue, |
---|
| 1942 | + sizeof(get_resp.u.max_queue)); |
---|
| 1943 | + ena_dev->tx_max_header_size = |
---|
| 1944 | + get_resp.u.max_queue.max_header_size; |
---|
| 1945 | + |
---|
| 1946 | + if (rc) |
---|
| 1947 | + return rc; |
---|
| 1948 | + } |
---|
1743 | 1949 | |
---|
1744 | 1950 | rc = ena_com_get_feature(ena_dev, &get_resp, |
---|
1745 | | - ENA_ADMIN_AENQ_CONFIG); |
---|
| 1951 | + ENA_ADMIN_AENQ_CONFIG, 0); |
---|
1746 | 1952 | if (rc) |
---|
1747 | 1953 | return rc; |
---|
1748 | 1954 | |
---|
.. | .. |
---|
1750 | 1956 | sizeof(get_resp.u.aenq)); |
---|
1751 | 1957 | |
---|
1752 | 1958 | rc = ena_com_get_feature(ena_dev, &get_resp, |
---|
1753 | | - ENA_ADMIN_STATELESS_OFFLOAD_CONFIG); |
---|
| 1959 | + ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); |
---|
1754 | 1960 | if (rc) |
---|
1755 | 1961 | return rc; |
---|
1756 | 1962 | |
---|
.. | .. |
---|
1760 | 1966 | /* Driver hints isn't mandatory admin command. So in case the |
---|
1761 | 1967 | * command isn't supported set driver hints to 0 |
---|
1762 | 1968 | */ |
---|
1763 | | - rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS); |
---|
| 1969 | + rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0); |
---|
1764 | 1970 | |
---|
1765 | 1971 | if (!rc) |
---|
1766 | 1972 | memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, |
---|
.. | .. |
---|
1768 | 1974 | else if (rc == -EOPNOTSUPP) |
---|
1769 | 1975 | memset(&get_feat_ctx->hw_hints, 0x0, |
---|
1770 | 1976 | sizeof(get_feat_ctx->hw_hints)); |
---|
| 1977 | + else |
---|
| 1978 | + return rc; |
---|
| 1979 | + |
---|
| 1980 | + rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0); |
---|
| 1981 | + if (!rc) |
---|
| 1982 | + memcpy(&get_feat_ctx->llq, &get_resp.u.llq, |
---|
| 1983 | + sizeof(get_resp.u.llq)); |
---|
| 1984 | + else if (rc == -EOPNOTSUPP) |
---|
| 1985 | + memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq)); |
---|
1771 | 1986 | else |
---|
1772 | 1987 | return rc; |
---|
1773 | 1988 | |
---|
.. | .. |
---|
1782 | 1997 | /* ena_handle_specific_aenq_event: |
---|
1783 | 1998 | * return the handler that is relevant to the specific event group |
---|
1784 | 1999 | */ |
---|
1785 | | -static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev, |
---|
| 2000 | +static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev, |
---|
1786 | 2001 | u16 group) |
---|
1787 | 2002 | { |
---|
1788 | | - struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers; |
---|
| 2003 | + struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers; |
---|
1789 | 2004 | |
---|
1790 | 2005 | if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group]) |
---|
1791 | 2006 | return aenq_handlers->handlers[group]; |
---|
.. | .. |
---|
1797 | 2012 | * handles the aenq incoming events. |
---|
1798 | 2013 | * pop events from the queue and apply the specific handler |
---|
1799 | 2014 | */ |
---|
1800 | | -void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) |
---|
| 2015 | +void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data) |
---|
1801 | 2016 | { |
---|
1802 | 2017 | struct ena_admin_aenq_entry *aenq_e; |
---|
1803 | 2018 | struct ena_admin_aenq_common_desc *aenq_common; |
---|
1804 | | - struct ena_com_aenq *aenq = &dev->aenq; |
---|
| 2019 | + struct ena_com_aenq *aenq = &ena_dev->aenq; |
---|
| 2020 | + u64 timestamp; |
---|
1805 | 2021 | ena_aenq_handler handler_cb; |
---|
1806 | 2022 | u16 masked_head, processed = 0; |
---|
1807 | 2023 | u8 phase; |
---|
.. | .. |
---|
1819 | 2035 | */ |
---|
1820 | 2036 | dma_rmb(); |
---|
1821 | 2037 | |
---|
1822 | | - pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", |
---|
1823 | | - aenq_common->group, aenq_common->syndrom, |
---|
1824 | | - (u64)aenq_common->timestamp_low + |
---|
1825 | | - ((u64)aenq_common->timestamp_high << 32)); |
---|
| 2038 | + timestamp = (u64)aenq_common->timestamp_low | |
---|
| 2039 | + ((u64)aenq_common->timestamp_high << 32); |
---|
| 2040 | + |
---|
| 2041 | + pr_debug("AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n", |
---|
| 2042 | + aenq_common->group, aenq_common->syndrome, timestamp); |
---|
1826 | 2043 | |
---|
1827 | 2044 | /* Handle specific event*/ |
---|
1828 | | - handler_cb = ena_com_get_specific_aenq_cb(dev, |
---|
| 2045 | + handler_cb = ena_com_get_specific_aenq_cb(ena_dev, |
---|
1829 | 2046 | aenq_common->group); |
---|
1830 | 2047 | handler_cb(data, aenq_e); /* call the actual event handler*/ |
---|
1831 | 2048 | |
---|
.. | .. |
---|
1851 | 2068 | /* write the aenq doorbell after all AENQ descriptors were read */ |
---|
1852 | 2069 | mb(); |
---|
1853 | 2070 | writel_relaxed((u32)aenq->head, |
---|
1854 | | - dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); |
---|
1855 | | - mmiowb(); |
---|
| 2071 | + ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); |
---|
1856 | 2072 | } |
---|
1857 | 2073 | |
---|
1858 | 2074 | int ena_com_dev_reset(struct ena_com_dev *ena_dev, |
---|
.. | .. |
---|
1944 | 2160 | return ret; |
---|
1945 | 2161 | } |
---|
1946 | 2162 | |
---|
| 2163 | +int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, |
---|
| 2164 | + struct ena_admin_eni_stats *stats) |
---|
| 2165 | +{ |
---|
| 2166 | + struct ena_com_stats_ctx ctx; |
---|
| 2167 | + int ret; |
---|
| 2168 | + |
---|
| 2169 | + memset(&ctx, 0x0, sizeof(ctx)); |
---|
| 2170 | + ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI); |
---|
| 2171 | + if (likely(ret == 0)) |
---|
| 2172 | + memcpy(stats, &ctx.get_resp.u.eni_stats, |
---|
| 2173 | + sizeof(ctx.get_resp.u.eni_stats)); |
---|
| 2174 | + |
---|
| 2175 | + return ret; |
---|
| 2176 | +} |
---|
| 2177 | + |
---|
1947 | 2178 | int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev, |
---|
1948 | 2179 | struct ena_admin_basic_stats *stats) |
---|
1949 | 2180 | { |
---|
.. | .. |
---|
1953 | 2184 | memset(&ctx, 0x0, sizeof(ctx)); |
---|
1954 | 2185 | ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC); |
---|
1955 | 2186 | if (likely(ret == 0)) |
---|
1956 | | - memcpy(stats, &ctx.get_resp.basic_stats, |
---|
1957 | | - sizeof(ctx.get_resp.basic_stats)); |
---|
| 2187 | + memcpy(stats, &ctx.get_resp.u.basic_stats, |
---|
| 2188 | + sizeof(ctx.get_resp.u.basic_stats)); |
---|
1958 | 2189 | |
---|
1959 | 2190 | return ret; |
---|
1960 | 2191 | } |
---|
.. | .. |
---|
1998 | 2229 | struct ena_admin_get_feat_resp resp; |
---|
1999 | 2230 | |
---|
2000 | 2231 | ret = ena_com_get_feature(ena_dev, &resp, |
---|
2001 | | - ENA_ADMIN_STATELESS_OFFLOAD_CONFIG); |
---|
| 2232 | + ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); |
---|
2002 | 2233 | if (unlikely(ret)) { |
---|
2003 | 2234 | pr_err("Failed to get offload capabilities %d\n", ret); |
---|
2004 | 2235 | return ret; |
---|
.. | .. |
---|
2027 | 2258 | |
---|
2028 | 2259 | /* Validate hash function is supported */ |
---|
2029 | 2260 | ret = ena_com_get_feature(ena_dev, &get_resp, |
---|
2030 | | - ENA_ADMIN_RSS_HASH_FUNCTION); |
---|
| 2261 | + ENA_ADMIN_RSS_HASH_FUNCTION, 0); |
---|
2031 | 2262 | if (unlikely(ret)) |
---|
2032 | 2263 | return ret; |
---|
2033 | 2264 | |
---|
.. | .. |
---|
2050 | 2281 | &cmd.control_buffer.address, |
---|
2051 | 2282 | rss->hash_key_dma_addr); |
---|
2052 | 2283 | if (unlikely(ret)) { |
---|
2053 | | - pr_err("memory address set failed\n"); |
---|
| 2284 | + pr_err("Memory address set failed\n"); |
---|
2054 | 2285 | return ret; |
---|
2055 | 2286 | } |
---|
2056 | 2287 | |
---|
.. | .. |
---|
2074 | 2305 | enum ena_admin_hash_functions func, |
---|
2075 | 2306 | const u8 *key, u16 key_len, u32 init_val) |
---|
2076 | 2307 | { |
---|
2077 | | - struct ena_rss *rss = &ena_dev->rss; |
---|
| 2308 | + struct ena_admin_feature_rss_flow_hash_control *hash_key; |
---|
2078 | 2309 | struct ena_admin_get_feat_resp get_resp; |
---|
2079 | | - struct ena_admin_feature_rss_flow_hash_control *hash_key = |
---|
2080 | | - rss->hash_key; |
---|
| 2310 | + enum ena_admin_hash_functions old_func; |
---|
| 2311 | + struct ena_rss *rss = &ena_dev->rss; |
---|
2081 | 2312 | int rc; |
---|
| 2313 | + |
---|
| 2314 | + hash_key = rss->hash_key; |
---|
2082 | 2315 | |
---|
2083 | 2316 | /* Make sure size is a mult of DWs */ |
---|
2084 | 2317 | if (unlikely(key_len & 0x3)) |
---|
.. | .. |
---|
2087 | 2320 | rc = ena_com_get_feature_ex(ena_dev, &get_resp, |
---|
2088 | 2321 | ENA_ADMIN_RSS_HASH_FUNCTION, |
---|
2089 | 2322 | rss->hash_key_dma_addr, |
---|
2090 | | - sizeof(*rss->hash_key)); |
---|
| 2323 | + sizeof(*rss->hash_key), 0); |
---|
2091 | 2324 | if (unlikely(rc)) |
---|
2092 | 2325 | return rc; |
---|
2093 | 2326 | |
---|
2094 | | - if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) { |
---|
| 2327 | + if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) { |
---|
2095 | 2328 | pr_err("Flow hash function %d isn't supported\n", func); |
---|
2096 | 2329 | return -EOPNOTSUPP; |
---|
2097 | 2330 | } |
---|
.. | .. |
---|
2106 | 2339 | } |
---|
2107 | 2340 | memcpy(hash_key->key, key, key_len); |
---|
2108 | 2341 | rss->hash_init_val = init_val; |
---|
2109 | | - hash_key->keys_num = key_len >> 2; |
---|
| 2342 | + hash_key->key_parts = key_len / sizeof(hash_key->key[0]); |
---|
2110 | 2343 | } |
---|
2111 | 2344 | break; |
---|
2112 | 2345 | case ENA_ADMIN_CRC32: |
---|
.. | .. |
---|
2117 | 2350 | return -EINVAL; |
---|
2118 | 2351 | } |
---|
2119 | 2352 | |
---|
| 2353 | + old_func = rss->hash_func; |
---|
2120 | 2354 | rss->hash_func = func; |
---|
2121 | 2355 | rc = ena_com_set_hash_function(ena_dev); |
---|
2122 | 2356 | |
---|
2123 | 2357 | /* Restore the old function */ |
---|
2124 | 2358 | if (unlikely(rc)) |
---|
2125 | | - ena_com_get_hash_function(ena_dev, NULL, NULL); |
---|
| 2359 | + rss->hash_func = old_func; |
---|
2126 | 2360 | |
---|
2127 | 2361 | return rc; |
---|
2128 | 2362 | } |
---|
2129 | 2363 | |
---|
2130 | 2364 | int ena_com_get_hash_function(struct ena_com_dev *ena_dev, |
---|
2131 | | - enum ena_admin_hash_functions *func, |
---|
2132 | | - u8 *key) |
---|
| 2365 | + enum ena_admin_hash_functions *func) |
---|
2133 | 2366 | { |
---|
2134 | 2367 | struct ena_rss *rss = &ena_dev->rss; |
---|
2135 | 2368 | struct ena_admin_get_feat_resp get_resp; |
---|
2136 | | - struct ena_admin_feature_rss_flow_hash_control *hash_key = |
---|
2137 | | - rss->hash_key; |
---|
2138 | 2369 | int rc; |
---|
2139 | 2370 | |
---|
2140 | 2371 | if (unlikely(!func)) |
---|
.. | .. |
---|
2143 | 2374 | rc = ena_com_get_feature_ex(ena_dev, &get_resp, |
---|
2144 | 2375 | ENA_ADMIN_RSS_HASH_FUNCTION, |
---|
2145 | 2376 | rss->hash_key_dma_addr, |
---|
2146 | | - sizeof(*rss->hash_key)); |
---|
| 2377 | + sizeof(*rss->hash_key), 0); |
---|
2147 | 2378 | if (unlikely(rc)) |
---|
2148 | 2379 | return rc; |
---|
2149 | 2380 | |
---|
.. | .. |
---|
2154 | 2385 | |
---|
2155 | 2386 | *func = rss->hash_func; |
---|
2156 | 2387 | |
---|
| 2388 | + return 0; |
---|
| 2389 | +} |
---|
| 2390 | + |
---|
| 2391 | +int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key) |
---|
| 2392 | +{ |
---|
| 2393 | + struct ena_admin_feature_rss_flow_hash_control *hash_key = |
---|
| 2394 | + ena_dev->rss.hash_key; |
---|
| 2395 | + |
---|
2157 | 2396 | if (key) |
---|
2158 | | - memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2); |
---|
| 2397 | + memcpy(key, hash_key->key, |
---|
| 2398 | + (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0])); |
---|
2159 | 2399 | |
---|
2160 | 2400 | return 0; |
---|
2161 | 2401 | } |
---|
.. | .. |
---|
2171 | 2411 | rc = ena_com_get_feature_ex(ena_dev, &get_resp, |
---|
2172 | 2412 | ENA_ADMIN_RSS_HASH_INPUT, |
---|
2173 | 2413 | rss->hash_ctrl_dma_addr, |
---|
2174 | | - sizeof(*rss->hash_ctrl)); |
---|
| 2414 | + sizeof(*rss->hash_ctrl), 0); |
---|
2175 | 2415 | if (unlikely(rc)) |
---|
2176 | 2416 | return rc; |
---|
2177 | 2417 | |
---|
.. | .. |
---|
2211 | 2451 | &cmd.control_buffer.address, |
---|
2212 | 2452 | rss->hash_ctrl_dma_addr); |
---|
2213 | 2453 | if (unlikely(ret)) { |
---|
2214 | | - pr_err("memory address set failed\n"); |
---|
| 2454 | + pr_err("Memory address set failed\n"); |
---|
2215 | 2455 | return ret; |
---|
2216 | 2456 | } |
---|
2217 | 2457 | cmd.control_buffer.length = sizeof(*hash_ctrl); |
---|
.. | .. |
---|
2272 | 2512 | available_fields = hash_ctrl->selected_fields[i].fields & |
---|
2273 | 2513 | hash_ctrl->supported_fields[i].fields; |
---|
2274 | 2514 | if (available_fields != hash_ctrl->selected_fields[i].fields) { |
---|
2275 | | - pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", |
---|
| 2515 | + pr_err("Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", |
---|
2276 | 2516 | i, hash_ctrl->supported_fields[i].fields, |
---|
2277 | 2517 | hash_ctrl->selected_fields[i].fields); |
---|
2278 | 2518 | return -EOPNOTSUPP; |
---|
.. | .. |
---|
2310 | 2550 | /* Make sure all the fields are supported */ |
---|
2311 | 2551 | supported_fields = hash_ctrl->supported_fields[proto].fields; |
---|
2312 | 2552 | if ((hash_fields & supported_fields) != hash_fields) { |
---|
2313 | | - pr_err("proto %d doesn't support the required fields %x. supports only: %x\n", |
---|
| 2553 | + pr_err("Proto %d doesn't support the required fields %x. supports only: %x\n", |
---|
2314 | 2554 | proto, hash_fields, supported_fields); |
---|
2315 | 2555 | } |
---|
2316 | 2556 | |
---|
.. | .. |
---|
2350 | 2590 | int ret; |
---|
2351 | 2591 | |
---|
2352 | 2592 | if (!ena_com_check_supported_feature_id( |
---|
2353 | | - ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) { |
---|
| 2593 | + ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) { |
---|
2354 | 2594 | pr_debug("Feature %d isn't supported\n", |
---|
2355 | | - ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); |
---|
| 2595 | + ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG); |
---|
2356 | 2596 | return -EOPNOTSUPP; |
---|
2357 | 2597 | } |
---|
2358 | 2598 | |
---|
.. | .. |
---|
2367 | 2607 | cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE; |
---|
2368 | 2608 | cmd.aq_common_descriptor.flags = |
---|
2369 | 2609 | ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK; |
---|
2370 | | - cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG; |
---|
| 2610 | + cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG; |
---|
2371 | 2611 | cmd.u.ind_table.size = rss->tbl_log_size; |
---|
2372 | 2612 | cmd.u.ind_table.inline_index = 0xFFFFFFFF; |
---|
2373 | 2613 | |
---|
.. | .. |
---|
2375 | 2615 | &cmd.control_buffer.address, |
---|
2376 | 2616 | rss->rss_ind_tbl_dma_addr); |
---|
2377 | 2617 | if (unlikely(ret)) { |
---|
2378 | | - pr_err("memory address set failed\n"); |
---|
| 2618 | + pr_err("Memory address set failed\n"); |
---|
2379 | 2619 | return ret; |
---|
2380 | 2620 | } |
---|
2381 | 2621 | |
---|
.. | .. |
---|
2405 | 2645 | sizeof(struct ena_admin_rss_ind_table_entry); |
---|
2406 | 2646 | |
---|
2407 | 2647 | rc = ena_com_get_feature_ex(ena_dev, &get_resp, |
---|
2408 | | - ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, |
---|
| 2648 | + ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, |
---|
2409 | 2649 | rss->rss_ind_tbl_dma_addr, |
---|
2410 | | - tbl_size); |
---|
| 2650 | + tbl_size, 0); |
---|
2411 | 2651 | if (unlikely(rc)) |
---|
2412 | 2652 | return rc; |
---|
2413 | 2653 | |
---|
2414 | 2654 | if (!ind_tbl) |
---|
2415 | 2655 | return 0; |
---|
2416 | | - |
---|
2417 | | - rc = ena_com_ind_tbl_convert_from_device(ena_dev); |
---|
2418 | | - if (unlikely(rc)) |
---|
2419 | | - return rc; |
---|
2420 | 2656 | |
---|
2421 | 2657 | for (i = 0; i < (1 << rss->tbl_log_size); i++) |
---|
2422 | 2658 | ind_tbl[i] = rss->host_rss_ind_tbl[i]; |
---|
.. | .. |
---|
2434 | 2670 | if (unlikely(rc)) |
---|
2435 | 2671 | goto err_indr_tbl; |
---|
2436 | 2672 | |
---|
| 2673 | + /* The following function might return unsupported in case the |
---|
| 2674 | + * device doesn't support setting the key / hash function. We can safely |
---|
| 2675 | + * ignore this error and have indirection table support only. |
---|
| 2676 | + */ |
---|
2437 | 2677 | rc = ena_com_hash_key_allocate(ena_dev); |
---|
2438 | | - if (unlikely(rc)) |
---|
| 2678 | + if (likely(!rc)) |
---|
| 2679 | + ena_com_hash_key_fill_default_key(ena_dev); |
---|
| 2680 | + else if (rc != -EOPNOTSUPP) |
---|
2439 | 2681 | goto err_hash_key; |
---|
2440 | | - |
---|
2441 | | - ena_com_hash_key_fill_default_key(ena_dev); |
---|
2442 | 2682 | |
---|
2443 | 2683 | rc = ena_com_hash_ctrl_init(ena_dev); |
---|
2444 | 2684 | if (unlikely(rc)) |
---|
.. | .. |
---|
2469 | 2709 | struct ena_host_attribute *host_attr = &ena_dev->host_attr; |
---|
2470 | 2710 | |
---|
2471 | 2711 | host_attr->host_info = |
---|
2472 | | - dma_zalloc_coherent(ena_dev->dmadev, SZ_4K, |
---|
2473 | | - &host_attr->host_info_dma_addr, GFP_KERNEL); |
---|
| 2712 | + dma_alloc_coherent(ena_dev->dmadev, SZ_4K, |
---|
| 2713 | + &host_attr->host_info_dma_addr, GFP_KERNEL); |
---|
2474 | 2714 | if (unlikely(!host_attr->host_info)) |
---|
2475 | 2715 | return -ENOMEM; |
---|
| 2716 | + |
---|
| 2717 | + host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR << |
---|
| 2718 | + ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) | |
---|
| 2719 | + (ENA_COMMON_SPEC_VERSION_MINOR)); |
---|
2476 | 2720 | |
---|
2477 | 2721 | return 0; |
---|
2478 | 2722 | } |
---|
.. | .. |
---|
2483 | 2727 | struct ena_host_attribute *host_attr = &ena_dev->host_attr; |
---|
2484 | 2728 | |
---|
2485 | 2729 | host_attr->debug_area_virt_addr = |
---|
2486 | | - dma_zalloc_coherent(ena_dev->dmadev, debug_area_size, |
---|
2487 | | - &host_attr->debug_area_dma_addr, GFP_KERNEL); |
---|
| 2730 | + dma_alloc_coherent(ena_dev->dmadev, debug_area_size, |
---|
| 2731 | + &host_attr->debug_area_dma_addr, GFP_KERNEL); |
---|
2488 | 2732 | if (unlikely(!host_attr->debug_area_virt_addr)) { |
---|
2489 | 2733 | host_attr->debug_area_size = 0; |
---|
2490 | 2734 | return -ENOMEM; |
---|
.. | .. |
---|
2541 | 2785 | &cmd.u.host_attr.debug_ba, |
---|
2542 | 2786 | host_attr->debug_area_dma_addr); |
---|
2543 | 2787 | if (unlikely(ret)) { |
---|
2544 | | - pr_err("memory address set failed\n"); |
---|
| 2788 | + pr_err("Memory address set failed\n"); |
---|
2545 | 2789 | return ret; |
---|
2546 | 2790 | } |
---|
2547 | 2791 | |
---|
.. | .. |
---|
2549 | 2793 | &cmd.u.host_attr.os_info_ba, |
---|
2550 | 2794 | host_attr->host_info_dma_addr); |
---|
2551 | 2795 | if (unlikely(ret)) { |
---|
2552 | | - pr_err("memory address set failed\n"); |
---|
| 2796 | + pr_err("Memory address set failed\n"); |
---|
2553 | 2797 | return ret; |
---|
2554 | 2798 | } |
---|
2555 | 2799 | |
---|
.. | .. |
---|
2574 | 2818 | ENA_ADMIN_INTERRUPT_MODERATION); |
---|
2575 | 2819 | } |
---|
2576 | 2820 | |
---|
2577 | | -int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, |
---|
2578 | | - u32 tx_coalesce_usecs) |
---|
| 2821 | +static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs, |
---|
| 2822 | + u32 intr_delay_resolution, |
---|
| 2823 | + u32 *intr_moder_interval) |
---|
2579 | 2824 | { |
---|
2580 | | - if (!ena_dev->intr_delay_resolution) { |
---|
| 2825 | + if (!intr_delay_resolution) { |
---|
2581 | 2826 | pr_err("Illegal interrupt delay granularity value\n"); |
---|
2582 | 2827 | return -EFAULT; |
---|
2583 | 2828 | } |
---|
2584 | 2829 | |
---|
2585 | | - ena_dev->intr_moder_tx_interval = tx_coalesce_usecs / |
---|
2586 | | - ena_dev->intr_delay_resolution; |
---|
| 2830 | + *intr_moder_interval = coalesce_usecs / intr_delay_resolution; |
---|
2587 | 2831 | |
---|
2588 | 2832 | return 0; |
---|
| 2833 | +} |
---|
| 2834 | + |
---|
| 2835 | +int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev, |
---|
| 2836 | + u32 tx_coalesce_usecs) |
---|
| 2837 | +{ |
---|
| 2838 | + return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs, |
---|
| 2839 | + ena_dev->intr_delay_resolution, |
---|
| 2840 | + &ena_dev->intr_moder_tx_interval); |
---|
2589 | 2841 | } |
---|
2590 | 2842 | |
---|
2591 | 2843 | int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev, |
---|
2592 | 2844 | u32 rx_coalesce_usecs) |
---|
2593 | 2845 | { |
---|
2594 | | - if (!ena_dev->intr_delay_resolution) { |
---|
2595 | | - pr_err("Illegal interrupt delay granularity value\n"); |
---|
2596 | | - return -EFAULT; |
---|
2597 | | - } |
---|
2598 | | - |
---|
2599 | | - /* We use LOWEST entry of moderation table for storing |
---|
2600 | | - * nonadaptive interrupt coalescing values |
---|
2601 | | - */ |
---|
2602 | | - ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = |
---|
2603 | | - rx_coalesce_usecs / ena_dev->intr_delay_resolution; |
---|
2604 | | - |
---|
2605 | | - return 0; |
---|
2606 | | -} |
---|
2607 | | - |
---|
2608 | | -void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev) |
---|
2609 | | -{ |
---|
2610 | | - if (ena_dev->intr_moder_tbl) |
---|
2611 | | - devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl); |
---|
2612 | | - ena_dev->intr_moder_tbl = NULL; |
---|
| 2846 | + return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs, |
---|
| 2847 | + ena_dev->intr_delay_resolution, |
---|
| 2848 | + &ena_dev->intr_moder_rx_interval); |
---|
2613 | 2849 | } |
---|
2614 | 2850 | |
---|
2615 | 2851 | int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) |
---|
.. | .. |
---|
2619 | 2855 | int rc; |
---|
2620 | 2856 | |
---|
2621 | 2857 | rc = ena_com_get_feature(ena_dev, &get_resp, |
---|
2622 | | - ENA_ADMIN_INTERRUPT_MODERATION); |
---|
| 2858 | + ENA_ADMIN_INTERRUPT_MODERATION, 0); |
---|
2623 | 2859 | |
---|
2624 | 2860 | if (rc) { |
---|
2625 | 2861 | if (rc == -EOPNOTSUPP) { |
---|
.. | .. |
---|
2636 | 2872 | return rc; |
---|
2637 | 2873 | } |
---|
2638 | 2874 | |
---|
2639 | | - rc = ena_com_init_interrupt_moderation_table(ena_dev); |
---|
2640 | | - if (rc) |
---|
2641 | | - goto err; |
---|
2642 | | - |
---|
2643 | 2875 | /* if moderation is supported by device we set adaptive moderation */ |
---|
2644 | 2876 | delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution; |
---|
2645 | 2877 | ena_com_update_intr_delay_resolution(ena_dev, delay_resolution); |
---|
2646 | | - ena_com_enable_adaptive_moderation(ena_dev); |
---|
| 2878 | + |
---|
| 2879 | + /* Disable adaptive moderation by default - can be enabled later */ |
---|
| 2880 | + ena_com_disable_adaptive_moderation(ena_dev); |
---|
2647 | 2881 | |
---|
2648 | 2882 | return 0; |
---|
2649 | | -err: |
---|
2650 | | - ena_com_destroy_interrupt_moderation(ena_dev); |
---|
2651 | | - return rc; |
---|
2652 | | -} |
---|
2653 | | - |
---|
2654 | | -void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev) |
---|
2655 | | -{ |
---|
2656 | | - struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; |
---|
2657 | | - |
---|
2658 | | - if (!intr_moder_tbl) |
---|
2659 | | - return; |
---|
2660 | | - |
---|
2661 | | - intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval = |
---|
2662 | | - ENA_INTR_LOWEST_USECS; |
---|
2663 | | - intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval = |
---|
2664 | | - ENA_INTR_LOWEST_PKTS; |
---|
2665 | | - intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval = |
---|
2666 | | - ENA_INTR_LOWEST_BYTES; |
---|
2667 | | - |
---|
2668 | | - intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval = |
---|
2669 | | - ENA_INTR_LOW_USECS; |
---|
2670 | | - intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval = |
---|
2671 | | - ENA_INTR_LOW_PKTS; |
---|
2672 | | - intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval = |
---|
2673 | | - ENA_INTR_LOW_BYTES; |
---|
2674 | | - |
---|
2675 | | - intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval = |
---|
2676 | | - ENA_INTR_MID_USECS; |
---|
2677 | | - intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval = |
---|
2678 | | - ENA_INTR_MID_PKTS; |
---|
2679 | | - intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval = |
---|
2680 | | - ENA_INTR_MID_BYTES; |
---|
2681 | | - |
---|
2682 | | - intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval = |
---|
2683 | | - ENA_INTR_HIGH_USECS; |
---|
2684 | | - intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval = |
---|
2685 | | - ENA_INTR_HIGH_PKTS; |
---|
2686 | | - intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval = |
---|
2687 | | - ENA_INTR_HIGH_BYTES; |
---|
2688 | | - |
---|
2689 | | - intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval = |
---|
2690 | | - ENA_INTR_HIGHEST_USECS; |
---|
2691 | | - intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval = |
---|
2692 | | - ENA_INTR_HIGHEST_PKTS; |
---|
2693 | | - intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval = |
---|
2694 | | - ENA_INTR_HIGHEST_BYTES; |
---|
2695 | 2883 | } |
---|
2696 | 2884 | |
---|
2697 | 2885 | unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev) |
---|
.. | .. |
---|
2701 | 2889 | |
---|
2702 | 2890 | unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev) |
---|
2703 | 2891 | { |
---|
2704 | | - struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; |
---|
| 2892 | + return ena_dev->intr_moder_rx_interval; |
---|
| 2893 | +} |
---|
2705 | 2894 | |
---|
2706 | | - if (intr_moder_tbl) |
---|
2707 | | - return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval; |
---|
| 2895 | +int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, |
---|
| 2896 | + struct ena_admin_feature_llq_desc *llq_features, |
---|
| 2897 | + struct ena_llq_configurations *llq_default_cfg) |
---|
| 2898 | +{ |
---|
| 2899 | + struct ena_com_llq_info *llq_info = &ena_dev->llq_info; |
---|
| 2900 | + int rc; |
---|
| 2901 | + |
---|
| 2902 | + if (!llq_features->max_llq_num) { |
---|
| 2903 | + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; |
---|
| 2904 | + return 0; |
---|
| 2905 | + } |
---|
| 2906 | + |
---|
| 2907 | + rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg); |
---|
| 2908 | + if (rc) |
---|
| 2909 | + return rc; |
---|
| 2910 | + |
---|
| 2911 | + ena_dev->tx_max_header_size = llq_info->desc_list_entry_size - |
---|
| 2912 | + (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc)); |
---|
| 2913 | + |
---|
| 2914 | + if (unlikely(ena_dev->tx_max_header_size == 0)) { |
---|
| 2915 | + pr_err("The size of the LLQ entry is smaller than needed\n"); |
---|
| 2916 | + return -EINVAL; |
---|
| 2917 | + } |
---|
| 2918 | + |
---|
| 2919 | + ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; |
---|
2708 | 2920 | |
---|
2709 | 2921 | return 0; |
---|
2710 | | -} |
---|
2711 | | - |
---|
2712 | | -void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev, |
---|
2713 | | - enum ena_intr_moder_level level, |
---|
2714 | | - struct ena_intr_moder_entry *entry) |
---|
2715 | | -{ |
---|
2716 | | - struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; |
---|
2717 | | - |
---|
2718 | | - if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) |
---|
2719 | | - return; |
---|
2720 | | - |
---|
2721 | | - intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval; |
---|
2722 | | - if (ena_dev->intr_delay_resolution) |
---|
2723 | | - intr_moder_tbl[level].intr_moder_interval /= |
---|
2724 | | - ena_dev->intr_delay_resolution; |
---|
2725 | | - intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval; |
---|
2726 | | - |
---|
2727 | | - /* use hardcoded value until ethtool supports bytecount parameter */ |
---|
2728 | | - if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED) |
---|
2729 | | - intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval; |
---|
2730 | | -} |
---|
2731 | | - |
---|
2732 | | -void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev, |
---|
2733 | | - enum ena_intr_moder_level level, |
---|
2734 | | - struct ena_intr_moder_entry *entry) |
---|
2735 | | -{ |
---|
2736 | | - struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl; |
---|
2737 | | - |
---|
2738 | | - if (level >= ENA_INTR_MAX_NUM_OF_LEVELS) |
---|
2739 | | - return; |
---|
2740 | | - |
---|
2741 | | - entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval; |
---|
2742 | | - if (ena_dev->intr_delay_resolution) |
---|
2743 | | - entry->intr_moder_interval *= ena_dev->intr_delay_resolution; |
---|
2744 | | - entry->pkts_per_interval = |
---|
2745 | | - intr_moder_tbl[level].pkts_per_interval; |
---|
2746 | | - entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval; |
---|
2747 | 2922 | } |
---|