.. | .. |
---|
1 | 1 | /****************************************************************************** |
---|
2 | 2 | * |
---|
| 3 | + * This file is provided under a dual BSD/GPLv2 license. When using or |
---|
| 4 | + * redistributing this file, you may do so under either license. |
---|
| 5 | + * |
---|
| 6 | + * GPL LICENSE SUMMARY |
---|
| 7 | + * |
---|
3 | 8 | * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. |
---|
4 | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
---|
5 | 10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
---|
6 | | - * Copyright(c) 2018 Intel Corporation |
---|
7 | | - * |
---|
8 | | - * Portions of this file are derived from the ipw3945 project, as well |
---|
9 | | - * as portions of the ieee80211 subsystem header files. |
---|
| 11 | + * Copyright(c) 2018 - 2020 Intel Corporation |
---|
10 | 12 | * |
---|
11 | 13 | * This program is free software; you can redistribute it and/or modify it |
---|
12 | 14 | * under the terms of version 2 of the GNU General Public License as |
---|
.. | .. |
---|
17 | 19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
---|
18 | 20 | * more details. |
---|
19 | 21 | * |
---|
20 | | - * You should have received a copy of the GNU General Public License along with |
---|
21 | | - * this program; if not, write to the Free Software Foundation, Inc., |
---|
22 | | - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA |
---|
23 | | - * |
---|
24 | 22 | * The full GNU General Public License is included in this distribution in the |
---|
25 | | - * file called LICENSE. |
---|
| 23 | + * file called COPYING. |
---|
26 | 24 | * |
---|
27 | 25 | * Contact Information: |
---|
28 | 26 | * Intel Linux Wireless <linuxwifi@intel.com> |
---|
29 | 27 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
---|
| 28 | + * |
---|
| 29 | + * BSD LICENSE |
---|
| 30 | + * |
---|
| 31 | + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. |
---|
| 32 | + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
---|
| 33 | + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
---|
| 34 | + * Copyright(c) 2018 - 2020 Intel Corporation |
---|
| 35 | + * All rights reserved. |
---|
| 36 | + * |
---|
| 37 | + * Redistribution and use in source and binary forms, with or without |
---|
| 38 | + * modification, are permitted provided that the following conditions |
---|
| 39 | + * are met: |
---|
| 40 | + * |
---|
| 41 | + * * Redistributions of source code must retain the above copyright |
---|
| 42 | + * notice, this list of conditions and the following disclaimer. |
---|
| 43 | + * * Redistributions in binary form must reproduce the above copyright |
---|
| 44 | + * notice, this list of conditions and the following disclaimer in |
---|
| 45 | + * the documentation and/or other materials provided with the |
---|
| 46 | + * distribution. |
---|
| 47 | + * * Neither the name Intel Corporation nor the names of its |
---|
| 48 | + * contributors may be used to endorse or promote products derived |
---|
| 49 | + * from this software without specific prior written permission. |
---|
| 50 | + * |
---|
| 51 | + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
---|
| 52 | + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
---|
| 53 | + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
---|
| 54 | + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
---|
| 55 | + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
---|
| 56 | + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
---|
| 57 | + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
---|
| 58 | + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
---|
| 59 | + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
---|
| 60 | + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
---|
| 61 | + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
---|
30 | 62 | * |
---|
31 | 63 | *****************************************************************************/ |
---|
32 | 64 | #include <linux/etherdevice.h> |
---|
33 | 65 | #include <linux/ieee80211.h> |
---|
34 | 66 | #include <linux/slab.h> |
---|
35 | 67 | #include <linux/sched.h> |
---|
36 | | -#include <linux/pm_runtime.h> |
---|
37 | 68 | #include <net/ip6_checksum.h> |
---|
38 | 69 | #include <net/tso.h> |
---|
39 | 70 | |
---|
.. | .. |
---|
45 | 76 | #include "iwl-op-mode.h" |
---|
46 | 77 | #include "internal.h" |
---|
47 | 78 | #include "fw/api/tx.h" |
---|
48 | | - |
---|
49 | | -#define IWL_TX_CRC_SIZE 4 |
---|
50 | | -#define IWL_TX_DELIMITER_SIZE 4 |
---|
51 | 79 | |
---|
52 | 80 | /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** |
---|
53 | 81 | * DMA services |
---|
.. | .. |
---|
71 | 99 | * |
---|
72 | 100 | ***************************************************/ |
---|
73 | 101 | |
---|
74 | | -int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q) |
---|
75 | | -{ |
---|
76 | | - unsigned int max; |
---|
77 | | - unsigned int used; |
---|
78 | | - |
---|
79 | | - /* |
---|
80 | | - * To avoid ambiguity between empty and completely full queues, there |
---|
81 | | - * should always be less than max_tfd_queue_size elements in the queue. |
---|
82 | | - * If q->n_window is smaller than max_tfd_queue_size, there is no need |
---|
83 | | - * to reserve any queue entries for this purpose. |
---|
84 | | - */ |
---|
85 | | - if (q->n_window < trans->cfg->base_params->max_tfd_queue_size) |
---|
86 | | - max = q->n_window; |
---|
87 | | - else |
---|
88 | | - max = trans->cfg->base_params->max_tfd_queue_size - 1; |
---|
89 | | - |
---|
90 | | - /* |
---|
91 | | - * max_tfd_queue_size is a power of 2, so the following is equivalent to |
---|
92 | | - * modulo by max_tfd_queue_size and is well defined. |
---|
93 | | - */ |
---|
94 | | - used = (q->write_ptr - q->read_ptr) & |
---|
95 | | - (trans->cfg->base_params->max_tfd_queue_size - 1); |
---|
96 | | - |
---|
97 | | - if (WARN_ON(used > max)) |
---|
98 | | - return 0; |
---|
99 | | - |
---|
100 | | - return max - used; |
---|
101 | | -} |
---|
102 | | - |
---|
103 | | -/* |
---|
104 | | - * iwl_queue_init - Initialize queue's high/low-water and read/write indexes |
---|
105 | | - */ |
---|
106 | | -static int iwl_queue_init(struct iwl_txq *q, int slots_num) |
---|
107 | | -{ |
---|
108 | | - q->n_window = slots_num; |
---|
109 | | - |
---|
110 | | - /* slots_num must be power-of-two size, otherwise |
---|
111 | | - * iwl_pcie_get_cmd_index is broken. */ |
---|
112 | | - if (WARN_ON(!is_power_of_2(slots_num))) |
---|
113 | | - return -EINVAL; |
---|
114 | | - |
---|
115 | | - q->low_mark = q->n_window / 4; |
---|
116 | | - if (q->low_mark < 4) |
---|
117 | | - q->low_mark = 4; |
---|
118 | | - |
---|
119 | | - q->high_mark = q->n_window / 8; |
---|
120 | | - if (q->high_mark < 2) |
---|
121 | | - q->high_mark = 2; |
---|
122 | | - |
---|
123 | | - q->write_ptr = 0; |
---|
124 | | - q->read_ptr = 0; |
---|
125 | | - |
---|
126 | | - return 0; |
---|
127 | | -} |
---|
128 | 102 | |
---|
129 | 103 | int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, |
---|
130 | 104 | struct iwl_dma_ptr *ptr, size_t size) |
---|
.. | .. |
---|
149 | 123 | memset(ptr, 0, sizeof(*ptr)); |
---|
150 | 124 | } |
---|
151 | 125 | |
---|
152 | | -static void iwl_pcie_txq_stuck_timer(struct timer_list *t) |
---|
153 | | -{ |
---|
154 | | - struct iwl_txq *txq = from_timer(txq, t, stuck_timer); |
---|
155 | | - struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; |
---|
156 | | - struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); |
---|
157 | | - |
---|
158 | | - spin_lock(&txq->lock); |
---|
159 | | - /* check if triggered erroneously */ |
---|
160 | | - if (txq->read_ptr == txq->write_ptr) { |
---|
161 | | - spin_unlock(&txq->lock); |
---|
162 | | - return; |
---|
163 | | - } |
---|
164 | | - spin_unlock(&txq->lock); |
---|
165 | | - |
---|
166 | | - iwl_trans_pcie_log_scd_error(trans, txq); |
---|
167 | | - |
---|
168 | | - iwl_force_nmi(trans); |
---|
169 | | -} |
---|
170 | | - |
---|
171 | | -/* |
---|
172 | | - * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array |
---|
173 | | - */ |
---|
174 | | -static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, |
---|
175 | | - struct iwl_txq *txq, u16 byte_cnt, |
---|
176 | | - int num_tbs) |
---|
177 | | -{ |
---|
178 | | - struct iwlagn_scd_bc_tbl *scd_bc_tbl; |
---|
179 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
180 | | - int write_ptr = txq->write_ptr; |
---|
181 | | - int txq_id = txq->id; |
---|
182 | | - u8 sec_ctl = 0; |
---|
183 | | - u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; |
---|
184 | | - __le16 bc_ent; |
---|
185 | | - struct iwl_tx_cmd *tx_cmd = |
---|
186 | | - (void *)txq->entries[txq->write_ptr].cmd->payload; |
---|
187 | | - u8 sta_id = tx_cmd->sta_id; |
---|
188 | | - |
---|
189 | | - scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; |
---|
190 | | - |
---|
191 | | - sec_ctl = tx_cmd->sec_ctl; |
---|
192 | | - |
---|
193 | | - switch (sec_ctl & TX_CMD_SEC_MSK) { |
---|
194 | | - case TX_CMD_SEC_CCM: |
---|
195 | | - len += IEEE80211_CCMP_MIC_LEN; |
---|
196 | | - break; |
---|
197 | | - case TX_CMD_SEC_TKIP: |
---|
198 | | - len += IEEE80211_TKIP_ICV_LEN; |
---|
199 | | - break; |
---|
200 | | - case TX_CMD_SEC_WEP: |
---|
201 | | - len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; |
---|
202 | | - break; |
---|
203 | | - } |
---|
204 | | - if (trans_pcie->bc_table_dword) |
---|
205 | | - len = DIV_ROUND_UP(len, 4); |
---|
206 | | - |
---|
207 | | - if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) |
---|
208 | | - return; |
---|
209 | | - |
---|
210 | | - bc_ent = cpu_to_le16(len | (sta_id << 12)); |
---|
211 | | - |
---|
212 | | - scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; |
---|
213 | | - |
---|
214 | | - if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) |
---|
215 | | - scd_bc_tbl[txq_id]. |
---|
216 | | - tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; |
---|
217 | | -} |
---|
218 | | - |
---|
219 | | -static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, |
---|
220 | | - struct iwl_txq *txq) |
---|
221 | | -{ |
---|
222 | | - struct iwl_trans_pcie *trans_pcie = |
---|
223 | | - IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
224 | | - struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; |
---|
225 | | - int txq_id = txq->id; |
---|
226 | | - int read_ptr = txq->read_ptr; |
---|
227 | | - u8 sta_id = 0; |
---|
228 | | - __le16 bc_ent; |
---|
229 | | - struct iwl_tx_cmd *tx_cmd = |
---|
230 | | - (void *)txq->entries[read_ptr].cmd->payload; |
---|
231 | | - |
---|
232 | | - WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); |
---|
233 | | - |
---|
234 | | - if (txq_id != trans_pcie->cmd_queue) |
---|
235 | | - sta_id = tx_cmd->sta_id; |
---|
236 | | - |
---|
237 | | - bc_ent = cpu_to_le16(1 | (sta_id << 12)); |
---|
238 | | - |
---|
239 | | - scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; |
---|
240 | | - |
---|
241 | | - if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) |
---|
242 | | - scd_bc_tbl[txq_id]. |
---|
243 | | - tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; |
---|
244 | | -} |
---|
245 | | - |
---|
246 | 126 | /* |
---|
247 | 127 | * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware |
---|
248 | 128 | */ |
---|
249 | 129 | static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, |
---|
250 | 130 | struct iwl_txq *txq) |
---|
251 | 131 | { |
---|
252 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
253 | 132 | u32 reg = 0; |
---|
254 | 133 | int txq_id = txq->id; |
---|
255 | 134 | |
---|
.. | .. |
---|
261 | 140 | * 2. NIC is woken up for CMD regardless of shadow outside this function |
---|
262 | 141 | * 3. there is a chance that the NIC is asleep |
---|
263 | 142 | */ |
---|
264 | | - if (!trans->cfg->base_params->shadow_reg_enable && |
---|
265 | | - txq_id != trans_pcie->cmd_queue && |
---|
| 143 | + if (!trans->trans_cfg->base_params->shadow_reg_enable && |
---|
| 144 | + txq_id != trans->txqs.cmd.q_id && |
---|
266 | 145 | test_bit(STATUS_TPOWER_PMI, &trans->status)) { |
---|
267 | 146 | /* |
---|
268 | 147 | * wake up nic if it's powered down ... |
---|
.. | .. |
---|
275 | 154 | IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", |
---|
276 | 155 | txq_id, reg); |
---|
277 | 156 | iwl_set_bit(trans, CSR_GP_CNTRL, |
---|
278 | | - BIT(trans->cfg->csr->flag_mac_access_req)); |
---|
| 157 | + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
---|
279 | 158 | txq->need_update = true; |
---|
280 | 159 | return; |
---|
281 | 160 | } |
---|
.. | .. |
---|
293 | 172 | |
---|
294 | 173 | void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) |
---|
295 | 174 | { |
---|
296 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
297 | 175 | int i; |
---|
298 | 176 | |
---|
299 | | - for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { |
---|
300 | | - struct iwl_txq *txq = trans_pcie->txq[i]; |
---|
| 177 | + for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { |
---|
| 178 | + struct iwl_txq *txq = trans->txqs.txq[i]; |
---|
301 | 179 | |
---|
302 | | - if (!test_bit(i, trans_pcie->queue_used)) |
---|
| 180 | + if (!test_bit(i, trans->txqs.queue_used)) |
---|
303 | 181 | continue; |
---|
304 | 182 | |
---|
305 | 183 | spin_lock_bh(&txq->lock); |
---|
.. | .. |
---|
308 | 186 | txq->need_update = false; |
---|
309 | 187 | } |
---|
310 | 188 | spin_unlock_bh(&txq->lock); |
---|
311 | | - } |
---|
312 | | -} |
---|
313 | | - |
---|
314 | | -static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans, |
---|
315 | | - void *_tfd, u8 idx) |
---|
316 | | -{ |
---|
317 | | - |
---|
318 | | - if (trans->cfg->use_tfh) { |
---|
319 | | - struct iwl_tfh_tfd *tfd = _tfd; |
---|
320 | | - struct iwl_tfh_tb *tb = &tfd->tbs[idx]; |
---|
321 | | - |
---|
322 | | - return (dma_addr_t)(le64_to_cpu(tb->addr)); |
---|
323 | | - } else { |
---|
324 | | - struct iwl_tfd *tfd = _tfd; |
---|
325 | | - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; |
---|
326 | | - dma_addr_t addr = get_unaligned_le32(&tb->lo); |
---|
327 | | - dma_addr_t hi_len; |
---|
328 | | - |
---|
329 | | - if (sizeof(dma_addr_t) <= sizeof(u32)) |
---|
330 | | - return addr; |
---|
331 | | - |
---|
332 | | - hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; |
---|
333 | | - |
---|
334 | | - /* |
---|
335 | | - * shift by 16 twice to avoid warnings on 32-bit |
---|
336 | | - * (where this code never runs anyway due to the |
---|
337 | | - * if statement above) |
---|
338 | | - */ |
---|
339 | | - return addr | ((hi_len << 16) << 16); |
---|
340 | 189 | } |
---|
341 | 190 | } |
---|
342 | 191 | |
---|
.. | .. |
---|
356 | 205 | tfd_fh->num_tbs = idx + 1; |
---|
357 | 206 | } |
---|
358 | 207 | |
---|
359 | | -static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd) |
---|
360 | | -{ |
---|
361 | | - if (trans->cfg->use_tfh) { |
---|
362 | | - struct iwl_tfh_tfd *tfd = _tfd; |
---|
363 | | - |
---|
364 | | - return le16_to_cpu(tfd->num_tbs) & 0x1f; |
---|
365 | | - } else { |
---|
366 | | - struct iwl_tfd *tfd = _tfd; |
---|
367 | | - |
---|
368 | | - return tfd->num_tbs & 0x1f; |
---|
369 | | - } |
---|
370 | | -} |
---|
371 | | - |
---|
372 | | -static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, |
---|
373 | | - struct iwl_cmd_meta *meta, |
---|
374 | | - struct iwl_txq *txq, int index) |
---|
375 | | -{ |
---|
376 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
377 | | - int i, num_tbs; |
---|
378 | | - void *tfd = iwl_pcie_get_tfd(trans, txq, index); |
---|
379 | | - |
---|
380 | | - /* Sanity check on number of chunks */ |
---|
381 | | - num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); |
---|
382 | | - |
---|
383 | | - if (num_tbs > trans_pcie->max_tbs) { |
---|
384 | | - IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); |
---|
385 | | - /* @todo issue fatal error, it is quite serious situation */ |
---|
386 | | - return; |
---|
387 | | - } |
---|
388 | | - |
---|
389 | | - /* first TB is never freed - it's the bidirectional DMA data */ |
---|
390 | | - |
---|
391 | | - for (i = 1; i < num_tbs; i++) { |
---|
392 | | - if (meta->tbs & BIT(i)) |
---|
393 | | - dma_unmap_page(trans->dev, |
---|
394 | | - iwl_pcie_tfd_tb_get_addr(trans, tfd, i), |
---|
395 | | - iwl_pcie_tfd_tb_get_len(trans, tfd, i), |
---|
396 | | - DMA_TO_DEVICE); |
---|
397 | | - else |
---|
398 | | - dma_unmap_single(trans->dev, |
---|
399 | | - iwl_pcie_tfd_tb_get_addr(trans, tfd, |
---|
400 | | - i), |
---|
401 | | - iwl_pcie_tfd_tb_get_len(trans, tfd, |
---|
402 | | - i), |
---|
403 | | - DMA_TO_DEVICE); |
---|
404 | | - } |
---|
405 | | - |
---|
406 | | - meta->tbs = 0; |
---|
407 | | - |
---|
408 | | - if (trans->cfg->use_tfh) { |
---|
409 | | - struct iwl_tfh_tfd *tfd_fh = (void *)tfd; |
---|
410 | | - |
---|
411 | | - tfd_fh->num_tbs = 0; |
---|
412 | | - } else { |
---|
413 | | - struct iwl_tfd *tfd_fh = (void *)tfd; |
---|
414 | | - |
---|
415 | | - tfd_fh->num_tbs = 0; |
---|
416 | | - } |
---|
417 | | - |
---|
418 | | -} |
---|
419 | | - |
---|
420 | 208 | /* |
---|
421 | 209 | * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] |
---|
422 | 210 | * @trans - transport private data |
---|
.. | .. |
---|
432 | 220 | * idx is bounded by n_window |
---|
433 | 221 | */ |
---|
434 | 222 | int rd_ptr = txq->read_ptr; |
---|
435 | | - int idx = iwl_pcie_get_cmd_index(txq, rd_ptr); |
---|
| 223 | + int idx = iwl_txq_get_cmd_index(txq, rd_ptr); |
---|
436 | 224 | |
---|
437 | 225 | lockdep_assert_held(&txq->lock); |
---|
438 | 226 | |
---|
439 | 227 | /* We have only q->n_window txq->entries, but we use |
---|
440 | 228 | * TFD_QUEUE_SIZE_MAX tfds |
---|
441 | 229 | */ |
---|
442 | | - iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); |
---|
| 230 | + iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); |
---|
443 | 231 | |
---|
444 | 232 | /* free SKB */ |
---|
445 | 233 | if (txq->entries) { |
---|
.. | .. |
---|
461 | 249 | static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, |
---|
462 | 250 | dma_addr_t addr, u16 len, bool reset) |
---|
463 | 251 | { |
---|
464 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
465 | 252 | void *tfd; |
---|
466 | 253 | u32 num_tbs; |
---|
467 | 254 | |
---|
468 | | - tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr; |
---|
| 255 | + tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr; |
---|
469 | 256 | |
---|
470 | 257 | if (reset) |
---|
471 | | - memset(tfd, 0, trans_pcie->tfd_size); |
---|
| 258 | + memset(tfd, 0, trans->txqs.tfd.size); |
---|
472 | 259 | |
---|
473 | | - num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); |
---|
| 260 | + num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); |
---|
474 | 261 | |
---|
475 | 262 | /* Each TFD can point to a maximum max_tbs Tx buffers */ |
---|
476 | | - if (num_tbs >= trans_pcie->max_tbs) { |
---|
| 263 | + if (num_tbs >= trans->txqs.tfd.max_tbs) { |
---|
477 | 264 | IWL_ERR(trans, "Error can not send more than %d chunks\n", |
---|
478 | | - trans_pcie->max_tbs); |
---|
| 265 | + trans->txqs.tfd.max_tbs); |
---|
479 | 266 | return -EINVAL; |
---|
480 | 267 | } |
---|
481 | 268 | |
---|
.. | .. |
---|
488 | 275 | return num_tbs; |
---|
489 | 276 | } |
---|
490 | 277 | |
---|
491 | | -int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, |
---|
492 | | - int slots_num, bool cmd_queue) |
---|
493 | | -{ |
---|
494 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
495 | | - size_t tfd_sz = trans_pcie->tfd_size * |
---|
496 | | - trans->cfg->base_params->max_tfd_queue_size; |
---|
497 | | - size_t tb0_buf_sz; |
---|
498 | | - int i; |
---|
499 | | - |
---|
500 | | - if (WARN_ON(txq->entries || txq->tfds)) |
---|
501 | | - return -EINVAL; |
---|
502 | | - |
---|
503 | | - if (trans->cfg->use_tfh) |
---|
504 | | - tfd_sz = trans_pcie->tfd_size * slots_num; |
---|
505 | | - |
---|
506 | | - timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0); |
---|
507 | | - txq->trans_pcie = trans_pcie; |
---|
508 | | - |
---|
509 | | - txq->n_window = slots_num; |
---|
510 | | - |
---|
511 | | - txq->entries = kcalloc(slots_num, |
---|
512 | | - sizeof(struct iwl_pcie_txq_entry), |
---|
513 | | - GFP_KERNEL); |
---|
514 | | - |
---|
515 | | - if (!txq->entries) |
---|
516 | | - goto error; |
---|
517 | | - |
---|
518 | | - if (cmd_queue) |
---|
519 | | - for (i = 0; i < slots_num; i++) { |
---|
520 | | - txq->entries[i].cmd = |
---|
521 | | - kmalloc(sizeof(struct iwl_device_cmd), |
---|
522 | | - GFP_KERNEL); |
---|
523 | | - if (!txq->entries[i].cmd) |
---|
524 | | - goto error; |
---|
525 | | - } |
---|
526 | | - |
---|
527 | | - /* Circular buffer of transmit frame descriptors (TFDs), |
---|
528 | | - * shared with device */ |
---|
529 | | - txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, |
---|
530 | | - &txq->dma_addr, GFP_KERNEL); |
---|
531 | | - if (!txq->tfds) |
---|
532 | | - goto error; |
---|
533 | | - |
---|
534 | | - BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs)); |
---|
535 | | - |
---|
536 | | - tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; |
---|
537 | | - |
---|
538 | | - txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, |
---|
539 | | - &txq->first_tb_dma, |
---|
540 | | - GFP_KERNEL); |
---|
541 | | - if (!txq->first_tb_bufs) |
---|
542 | | - goto err_free_tfds; |
---|
543 | | - |
---|
544 | | - return 0; |
---|
545 | | -err_free_tfds: |
---|
546 | | - dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); |
---|
547 | | -error: |
---|
548 | | - if (txq->entries && cmd_queue) |
---|
549 | | - for (i = 0; i < slots_num; i++) |
---|
550 | | - kfree(txq->entries[i].cmd); |
---|
551 | | - kfree(txq->entries); |
---|
552 | | - txq->entries = NULL; |
---|
553 | | - |
---|
554 | | - return -ENOMEM; |
---|
555 | | - |
---|
556 | | -} |
---|
557 | | - |
---|
558 | | -int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, |
---|
559 | | - int slots_num, bool cmd_queue) |
---|
560 | | -{ |
---|
561 | | - int ret; |
---|
562 | | - u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size; |
---|
563 | | - |
---|
564 | | - txq->need_update = false; |
---|
565 | | - |
---|
566 | | - /* max_tfd_queue_size must be power-of-two size, otherwise |
---|
567 | | - * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ |
---|
568 | | - if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), |
---|
569 | | - "Max tfd queue size must be a power of two, but is %d", |
---|
570 | | - tfd_queue_max_size)) |
---|
571 | | - return -EINVAL; |
---|
572 | | - |
---|
573 | | - /* Initialize queue's high/low-water marks, and head/tail indexes */ |
---|
574 | | - ret = iwl_queue_init(txq, slots_num); |
---|
575 | | - if (ret) |
---|
576 | | - return ret; |
---|
577 | | - |
---|
578 | | - spin_lock_init(&txq->lock); |
---|
579 | | - |
---|
580 | | - if (cmd_queue) { |
---|
581 | | - static struct lock_class_key iwl_pcie_cmd_queue_lock_class; |
---|
582 | | - |
---|
583 | | - lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class); |
---|
584 | | - } |
---|
585 | | - |
---|
586 | | - __skb_queue_head_init(&txq->overflow_q); |
---|
587 | | - |
---|
588 | | - return 0; |
---|
589 | | -} |
---|
590 | | - |
---|
591 | | -void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, |
---|
592 | | - struct sk_buff *skb) |
---|
593 | | -{ |
---|
594 | | - struct page **page_ptr; |
---|
595 | | - |
---|
596 | | - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); |
---|
597 | | - |
---|
598 | | - if (*page_ptr) { |
---|
599 | | - __free_page(*page_ptr); |
---|
600 | | - *page_ptr = NULL; |
---|
601 | | - } |
---|
602 | | -} |
---|
603 | | - |
---|
604 | 278 | static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) |
---|
605 | 279 | { |
---|
606 | 280 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
607 | 281 | |
---|
608 | 282 | lockdep_assert_held(&trans_pcie->reg_lock); |
---|
609 | 283 | |
---|
610 | | - if (trans_pcie->ref_cmd_in_flight) { |
---|
611 | | - trans_pcie->ref_cmd_in_flight = false; |
---|
612 | | - IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n"); |
---|
613 | | - iwl_trans_unref(trans); |
---|
614 | | - } |
---|
615 | | - |
---|
616 | | - if (!trans->cfg->base_params->apmg_wake_up_wa) |
---|
| 284 | + if (!trans->trans_cfg->base_params->apmg_wake_up_wa) |
---|
617 | 285 | return; |
---|
618 | 286 | if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) |
---|
619 | 287 | return; |
---|
620 | 288 | |
---|
621 | 289 | trans_pcie->cmd_hold_nic_awake = false; |
---|
622 | 290 | __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, |
---|
623 | | - BIT(trans->cfg->csr->flag_mac_access_req)); |
---|
| 291 | + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
---|
624 | 292 | } |
---|
625 | 293 | |
---|
626 | 294 | /* |
---|
.. | .. |
---|
629 | 297 | static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) |
---|
630 | 298 | { |
---|
631 | 299 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
632 | | - struct iwl_txq *txq = trans_pcie->txq[txq_id]; |
---|
| 300 | + struct iwl_txq *txq = trans->txqs.txq[txq_id]; |
---|
633 | 301 | |
---|
634 | 302 | if (!txq) { |
---|
635 | 303 | IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); |
---|
.. | .. |
---|
641 | 309 | IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", |
---|
642 | 310 | txq_id, txq->read_ptr); |
---|
643 | 311 | |
---|
644 | | - if (txq_id != trans_pcie->cmd_queue) { |
---|
| 312 | + if (txq_id != trans->txqs.cmd.q_id) { |
---|
645 | 313 | struct sk_buff *skb = txq->entries[txq->read_ptr].skb; |
---|
646 | 314 | |
---|
647 | 315 | if (WARN_ON_ONCE(!skb)) |
---|
648 | 316 | continue; |
---|
649 | 317 | |
---|
650 | | - iwl_pcie_free_tso_page(trans_pcie, skb); |
---|
| 318 | + iwl_txq_free_tso_page(trans, skb); |
---|
651 | 319 | } |
---|
652 | 320 | iwl_pcie_txq_free_tfd(trans, txq); |
---|
653 | | - txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); |
---|
| 321 | + txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); |
---|
654 | 322 | |
---|
655 | 323 | if (txq->read_ptr == txq->write_ptr) { |
---|
656 | | - unsigned long flags; |
---|
657 | | - |
---|
658 | | - spin_lock_irqsave(&trans_pcie->reg_lock, flags); |
---|
659 | | - if (txq_id != trans_pcie->cmd_queue) { |
---|
660 | | - IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", |
---|
661 | | - txq->id); |
---|
662 | | - iwl_trans_unref(trans); |
---|
663 | | - } else { |
---|
| 324 | + spin_lock(&trans_pcie->reg_lock); |
---|
| 325 | + if (txq_id == trans->txqs.cmd.q_id) |
---|
664 | 326 | iwl_pcie_clear_cmd_in_flight(trans); |
---|
665 | | - } |
---|
666 | | - spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); |
---|
| 327 | + spin_unlock(&trans_pcie->reg_lock); |
---|
667 | 328 | } |
---|
668 | 329 | } |
---|
669 | 330 | |
---|
.. | .. |
---|
689 | 350 | */ |
---|
690 | 351 | static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) |
---|
691 | 352 | { |
---|
692 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
693 | | - struct iwl_txq *txq = trans_pcie->txq[txq_id]; |
---|
| 353 | + struct iwl_txq *txq = trans->txqs.txq[txq_id]; |
---|
694 | 354 | struct device *dev = trans->dev; |
---|
695 | 355 | int i; |
---|
696 | 356 | |
---|
.. | .. |
---|
700 | 360 | iwl_pcie_txq_unmap(trans, txq_id); |
---|
701 | 361 | |
---|
702 | 362 | /* De-alloc array of command/tx buffers */ |
---|
703 | | - if (txq_id == trans_pcie->cmd_queue) |
---|
| 363 | + if (txq_id == trans->txqs.cmd.q_id) |
---|
704 | 364 | for (i = 0; i < txq->n_window; i++) { |
---|
705 | | - kzfree(txq->entries[i].cmd); |
---|
706 | | - kzfree(txq->entries[i].free_buf); |
---|
| 365 | + kfree_sensitive(txq->entries[i].cmd); |
---|
| 366 | + kfree_sensitive(txq->entries[i].free_buf); |
---|
707 | 367 | } |
---|
708 | 368 | |
---|
709 | 369 | /* De-alloc circular buffer of TFDs */ |
---|
710 | 370 | if (txq->tfds) { |
---|
711 | 371 | dma_free_coherent(dev, |
---|
712 | | - trans_pcie->tfd_size * |
---|
713 | | - trans->cfg->base_params->max_tfd_queue_size, |
---|
| 372 | + trans->txqs.tfd.size * |
---|
| 373 | + trans->trans_cfg->base_params->max_tfd_queue_size, |
---|
714 | 374 | txq->tfds, txq->dma_addr); |
---|
715 | 375 | txq->dma_addr = 0; |
---|
716 | 376 | txq->tfds = NULL; |
---|
.. | .. |
---|
732 | 392 | void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) |
---|
733 | 393 | { |
---|
734 | 394 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
735 | | - int nq = trans->cfg->base_params->num_of_queues; |
---|
| 395 | + int nq = trans->trans_cfg->base_params->num_of_queues; |
---|
736 | 396 | int chan; |
---|
737 | 397 | u32 reg_val; |
---|
738 | 398 | int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - |
---|
739 | 399 | SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); |
---|
740 | 400 | |
---|
741 | 401 | /* make sure all queue are not stopped/used */ |
---|
742 | | - memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); |
---|
743 | | - memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); |
---|
| 402 | + memset(trans->txqs.queue_stopped, 0, |
---|
| 403 | + sizeof(trans->txqs.queue_stopped)); |
---|
| 404 | + memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); |
---|
744 | 405 | |
---|
745 | 406 | trans_pcie->scd_base_addr = |
---|
746 | 407 | iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); |
---|
.. | .. |
---|
754 | 415 | NULL, clear_dwords); |
---|
755 | 416 | |
---|
756 | 417 | iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, |
---|
757 | | - trans_pcie->scd_bc_tbls.dma >> 10); |
---|
| 418 | + trans->txqs.scd_bc_tbls.dma >> 10); |
---|
758 | 419 | |
---|
759 | 420 | /* The chain extension of the SCD doesn't work well. This feature is |
---|
760 | 421 | * enabled by default by the HW, so we need to disable it manually. |
---|
761 | 422 | */ |
---|
762 | | - if (trans->cfg->base_params->scd_chain_ext_wa) |
---|
| 423 | + if (trans->trans_cfg->base_params->scd_chain_ext_wa) |
---|
763 | 424 | iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); |
---|
764 | 425 | |
---|
765 | | - iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, |
---|
766 | | - trans_pcie->cmd_fifo, |
---|
767 | | - trans_pcie->cmd_q_wdg_timeout); |
---|
| 426 | + iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, |
---|
| 427 | + trans->txqs.cmd.fifo, |
---|
| 428 | + trans->txqs.cmd.wdg_timeout); |
---|
768 | 429 | |
---|
769 | 430 | /* Activate all Tx DMA/FIFO channels */ |
---|
770 | 431 | iwl_scd_activate_fifos(trans); |
---|
.. | .. |
---|
781 | 442 | reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); |
---|
782 | 443 | |
---|
783 | 444 | /* Enable L1-Active */ |
---|
784 | | - if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) |
---|
| 445 | + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) |
---|
785 | 446 | iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, |
---|
786 | 447 | APMG_PCIDEV_STT_VAL_L1_ACT_DIS); |
---|
787 | 448 | } |
---|
.. | .. |
---|
795 | 456 | * we should never get here in gen2 trans mode return early to avoid |
---|
796 | 457 | * having invalid accesses |
---|
797 | 458 | */ |
---|
798 | | - if (WARN_ON_ONCE(trans->cfg->gen2)) |
---|
| 459 | + if (WARN_ON_ONCE(trans->trans_cfg->gen2)) |
---|
799 | 460 | return; |
---|
800 | 461 | |
---|
801 | | - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; |
---|
| 462 | + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; |
---|
802 | 463 | txq_id++) { |
---|
803 | | - struct iwl_txq *txq = trans_pcie->txq[txq_id]; |
---|
804 | | - if (trans->cfg->use_tfh) |
---|
| 464 | + struct iwl_txq *txq = trans->txqs.txq[txq_id]; |
---|
| 465 | + if (trans->trans_cfg->use_tfh) |
---|
805 | 466 | iwl_write_direct64(trans, |
---|
806 | 467 | FH_MEM_CBBC_QUEUE(trans, txq_id), |
---|
807 | 468 | txq->dma_addr); |
---|
.. | .. |
---|
876 | 537 | * queues. This happens when we have an rfkill interrupt. |
---|
877 | 538 | * Since we stop Tx altogether - mark the queues as stopped. |
---|
878 | 539 | */ |
---|
879 | | - memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); |
---|
880 | | - memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); |
---|
| 540 | + memset(trans->txqs.queue_stopped, 0, |
---|
| 541 | + sizeof(trans->txqs.queue_stopped)); |
---|
| 542 | + memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); |
---|
881 | 543 | |
---|
882 | 544 | /* This can happen: start_hw, stop_device */ |
---|
883 | 545 | if (!trans_pcie->txq_memory) |
---|
884 | 546 | return 0; |
---|
885 | 547 | |
---|
886 | 548 | /* Unmap DMA from host system and free skb's */ |
---|
887 | | - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; |
---|
| 549 | + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; |
---|
888 | 550 | txq_id++) |
---|
889 | 551 | iwl_pcie_txq_unmap(trans, txq_id); |
---|
890 | 552 | |
---|
.. | .. |
---|
901 | 563 | int txq_id; |
---|
902 | 564 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
903 | 565 | |
---|
904 | | - memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); |
---|
| 566 | + memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); |
---|
905 | 567 | |
---|
906 | 568 | /* Tx queues */ |
---|
907 | 569 | if (trans_pcie->txq_memory) { |
---|
908 | 570 | for (txq_id = 0; |
---|
909 | | - txq_id < trans->cfg->base_params->num_of_queues; |
---|
| 571 | + txq_id < trans->trans_cfg->base_params->num_of_queues; |
---|
910 | 572 | txq_id++) { |
---|
911 | 573 | iwl_pcie_txq_free(trans, txq_id); |
---|
912 | | - trans_pcie->txq[txq_id] = NULL; |
---|
| 574 | + trans->txqs.txq[txq_id] = NULL; |
---|
913 | 575 | } |
---|
914 | 576 | } |
---|
915 | 577 | |
---|
.. | .. |
---|
918 | 580 | |
---|
919 | 581 | iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); |
---|
920 | 582 | |
---|
921 | | - iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); |
---|
| 583 | + iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls); |
---|
922 | 584 | } |
---|
923 | 585 | |
---|
924 | 586 | /* |
---|
.. | .. |
---|
930 | 592 | int ret; |
---|
931 | 593 | int txq_id, slots_num; |
---|
932 | 594 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
933 | | - u16 bc_tbls_size = trans->cfg->base_params->num_of_queues; |
---|
| 595 | + u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; |
---|
934 | 596 | |
---|
935 | | - bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? |
---|
936 | | - sizeof(struct iwl_gen3_bc_tbl) : |
---|
937 | | - sizeof(struct iwlagn_scd_bc_tbl); |
---|
| 597 | + if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) |
---|
| 598 | + return -EINVAL; |
---|
| 599 | + |
---|
| 600 | + bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); |
---|
938 | 601 | |
---|
939 | 602 | /*It is not allowed to alloc twice, so warn when this happens. |
---|
940 | 603 | * We cannot rely on the previous allocation, so free and fail */ |
---|
.. | .. |
---|
943 | 606 | goto error; |
---|
944 | 607 | } |
---|
945 | 608 | |
---|
946 | | - ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, |
---|
| 609 | + ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls, |
---|
947 | 610 | bc_tbls_size); |
---|
948 | 611 | if (ret) { |
---|
949 | 612 | IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); |
---|
.. | .. |
---|
957 | 620 | goto error; |
---|
958 | 621 | } |
---|
959 | 622 | |
---|
960 | | - trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues, |
---|
961 | | - sizeof(struct iwl_txq), GFP_KERNEL); |
---|
| 623 | + trans_pcie->txq_memory = |
---|
| 624 | + kcalloc(trans->trans_cfg->base_params->num_of_queues, |
---|
| 625 | + sizeof(struct iwl_txq), GFP_KERNEL); |
---|
962 | 626 | if (!trans_pcie->txq_memory) { |
---|
963 | 627 | IWL_ERR(trans, "Not enough memory for txq\n"); |
---|
964 | 628 | ret = -ENOMEM; |
---|
.. | .. |
---|
966 | 630 | } |
---|
967 | 631 | |
---|
968 | 632 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ |
---|
969 | | - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; |
---|
| 633 | + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; |
---|
970 | 634 | txq_id++) { |
---|
971 | | - bool cmd_queue = (txq_id == trans_pcie->cmd_queue); |
---|
| 635 | + bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); |
---|
972 | 636 | |
---|
973 | | - slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; |
---|
974 | | - trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id]; |
---|
975 | | - ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id], |
---|
976 | | - slots_num, cmd_queue); |
---|
| 637 | + if (cmd_queue) |
---|
| 638 | + slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, |
---|
| 639 | + trans->cfg->min_txq_size); |
---|
| 640 | + else |
---|
| 641 | + slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, |
---|
| 642 | + trans->cfg->min_256_ba_txq_size); |
---|
| 643 | + trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; |
---|
| 644 | + ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num, |
---|
| 645 | + cmd_queue); |
---|
977 | 646 | if (ret) { |
---|
978 | 647 | IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); |
---|
979 | 648 | goto error; |
---|
980 | 649 | } |
---|
981 | | - trans_pcie->txq[txq_id]->id = txq_id; |
---|
| 650 | + trans->txqs.txq[txq_id]->id = txq_id; |
---|
982 | 651 | } |
---|
983 | 652 | |
---|
984 | 653 | return 0; |
---|
.. | .. |
---|
1015 | 684 | spin_unlock(&trans_pcie->irq_lock); |
---|
1016 | 685 | |
---|
1017 | 686 | /* Alloc and init all Tx queues, including the command queue (#4/#9) */ |
---|
1018 | | - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; |
---|
| 687 | + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; |
---|
1019 | 688 | txq_id++) { |
---|
1020 | | - bool cmd_queue = (txq_id == trans_pcie->cmd_queue); |
---|
| 689 | + bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); |
---|
1021 | 690 | |
---|
1022 | | - slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; |
---|
1023 | | - ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id], |
---|
1024 | | - slots_num, cmd_queue); |
---|
| 691 | + if (cmd_queue) |
---|
| 692 | + slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, |
---|
| 693 | + trans->cfg->min_txq_size); |
---|
| 694 | + else |
---|
| 695 | + slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, |
---|
| 696 | + trans->cfg->min_256_ba_txq_size); |
---|
| 697 | + ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num, |
---|
| 698 | + cmd_queue); |
---|
1025 | 699 | if (ret) { |
---|
1026 | 700 | IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); |
---|
1027 | 701 | goto error; |
---|
.. | .. |
---|
1034 | 708 | * Circular buffer (TFD queue in DRAM) physical base address |
---|
1035 | 709 | */ |
---|
1036 | 710 | iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), |
---|
1037 | | - trans_pcie->txq[txq_id]->dma_addr >> 8); |
---|
| 711 | + trans->txqs.txq[txq_id]->dma_addr >> 8); |
---|
1038 | 712 | } |
---|
1039 | 713 | |
---|
1040 | 714 | iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); |
---|
1041 | | - if (trans->cfg->base_params->num_of_queues > 20) |
---|
| 715 | + if (trans->trans_cfg->base_params->num_of_queues > 20) |
---|
1042 | 716 | iwl_set_bits_prph(trans, SCD_GP_CTRL, |
---|
1043 | 717 | SCD_GP_CTRL_ENABLE_31_QUEUES); |
---|
1044 | 718 | |
---|
.. | .. |
---|
1078 | 752 | void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, |
---|
1079 | 753 | struct sk_buff_head *skbs) |
---|
1080 | 754 | { |
---|
1081 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
1082 | | - struct iwl_txq *txq = trans_pcie->txq[txq_id]; |
---|
1083 | | - int tfd_num = iwl_pcie_get_cmd_index(txq, ssn); |
---|
1084 | | - int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr); |
---|
| 755 | + struct iwl_txq *txq = trans->txqs.txq[txq_id]; |
---|
| 756 | + int tfd_num = iwl_txq_get_cmd_index(txq, ssn); |
---|
| 757 | + int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); |
---|
1085 | 758 | int last_to_free; |
---|
1086 | 759 | |
---|
1087 | 760 | /* This function is not meant to release cmd queue*/ |
---|
1088 | | - if (WARN_ON(txq_id == trans_pcie->cmd_queue)) |
---|
| 761 | + if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) |
---|
1089 | 762 | return; |
---|
1090 | 763 | |
---|
1091 | 764 | spin_lock_bh(&txq->lock); |
---|
1092 | 765 | |
---|
1093 | | - if (!test_bit(txq_id, trans_pcie->queue_used)) { |
---|
| 766 | + if (!test_bit(txq_id, trans->txqs.queue_used)) { |
---|
1094 | 767 | IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", |
---|
1095 | 768 | txq_id, ssn); |
---|
1096 | 769 | goto out; |
---|
.. | .. |
---|
1104 | 777 | |
---|
1105 | 778 | /*Since we free until index _not_ inclusive, the one before index is |
---|
1106 | 779 | * the last we will free. This one must be used */ |
---|
1107 | | - last_to_free = iwl_queue_dec_wrap(trans, tfd_num); |
---|
| 780 | + last_to_free = iwl_txq_dec_wrap(trans, tfd_num); |
---|
1108 | 781 | |
---|
1109 | | - if (!iwl_queue_used(txq, last_to_free)) { |
---|
| 782 | + if (!iwl_txq_used(txq, last_to_free)) { |
---|
1110 | 783 | IWL_ERR(trans, |
---|
1111 | 784 | "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", |
---|
1112 | 785 | __func__, txq_id, last_to_free, |
---|
1113 | | - trans->cfg->base_params->max_tfd_queue_size, |
---|
| 786 | + trans->trans_cfg->base_params->max_tfd_queue_size, |
---|
1114 | 787 | txq->write_ptr, txq->read_ptr); |
---|
1115 | 788 | goto out; |
---|
1116 | 789 | } |
---|
.. | .. |
---|
1120 | 793 | |
---|
1121 | 794 | for (; |
---|
1122 | 795 | read_ptr != tfd_num; |
---|
1123 | | - txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr), |
---|
1124 | | - read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) { |
---|
| 796 | + txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), |
---|
| 797 | + read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) { |
---|
1125 | 798 | struct sk_buff *skb = txq->entries[read_ptr].skb; |
---|
1126 | 799 | |
---|
1127 | 800 | if (WARN_ON_ONCE(!skb)) |
---|
1128 | 801 | continue; |
---|
1129 | 802 | |
---|
1130 | | - iwl_pcie_free_tso_page(trans_pcie, skb); |
---|
| 803 | + iwl_txq_free_tso_page(trans, skb); |
---|
1131 | 804 | |
---|
1132 | 805 | __skb_queue_tail(skbs, skb); |
---|
1133 | 806 | |
---|
1134 | 807 | txq->entries[read_ptr].skb = NULL; |
---|
1135 | 808 | |
---|
1136 | | - if (!trans->cfg->use_tfh) |
---|
1137 | | - iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); |
---|
| 809 | + if (!trans->trans_cfg->use_tfh) |
---|
| 810 | + iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); |
---|
1138 | 811 | |
---|
1139 | 812 | iwl_pcie_txq_free_tfd(trans, txq); |
---|
1140 | 813 | } |
---|
1141 | 814 | |
---|
1142 | 815 | iwl_pcie_txq_progress(txq); |
---|
1143 | 816 | |
---|
1144 | | - if (iwl_queue_space(trans, txq) > txq->low_mark && |
---|
1145 | | - test_bit(txq_id, trans_pcie->queue_stopped)) { |
---|
| 817 | + if (iwl_txq_space(trans, txq) > txq->low_mark && |
---|
| 818 | + test_bit(txq_id, trans->txqs.queue_stopped)) { |
---|
1146 | 819 | struct sk_buff_head overflow_skbs; |
---|
1147 | 820 | |
---|
1148 | 821 | __skb_queue_head_init(&overflow_skbs); |
---|
1149 | 822 | skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); |
---|
| 823 | + |
---|
| 824 | + /* |
---|
| 825 | + * We are going to transmit from the overflow queue. |
---|
| 826 | + * Remember this state so that wait_for_txq_empty will know we |
---|
| 827 | + * are adding more packets to the TFD queue. It cannot rely on |
---|
| 828 | + * the state of &txq->overflow_q, as we just emptied it, but |
---|
| 829 | + * haven't TXed the content yet. |
---|
| 830 | + */ |
---|
| 831 | + txq->overflow_tx = true; |
---|
1150 | 832 | |
---|
1151 | 833 | /* |
---|
1152 | 834 | * This is tricky: we are in reclaim path which is non |
---|
.. | .. |
---|
1159 | 841 | |
---|
1160 | 842 | while (!skb_queue_empty(&overflow_skbs)) { |
---|
1161 | 843 | struct sk_buff *skb = __skb_dequeue(&overflow_skbs); |
---|
1162 | | - struct iwl_device_cmd *dev_cmd_ptr; |
---|
| 844 | + struct iwl_device_tx_cmd *dev_cmd_ptr; |
---|
1163 | 845 | |
---|
1164 | 846 | dev_cmd_ptr = *(void **)((u8 *)skb->cb + |
---|
1165 | | - trans_pcie->dev_cmd_offs); |
---|
| 847 | + trans->txqs.dev_cmd_offs); |
---|
1166 | 848 | |
---|
1167 | 849 | /* |
---|
1168 | 850 | * Note that we can very well be overflowing again. |
---|
1169 | | - * In that case, iwl_queue_space will be small again |
---|
| 851 | + * In that case, iwl_txq_space will be small again |
---|
1170 | 852 | * and we won't wake mac80211's queue. |
---|
1171 | 853 | */ |
---|
1172 | 854 | iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); |
---|
1173 | 855 | } |
---|
1174 | | - spin_lock_bh(&txq->lock); |
---|
1175 | 856 | |
---|
1176 | | - if (iwl_queue_space(trans, txq) > txq->low_mark) |
---|
| 857 | + if (iwl_txq_space(trans, txq) > txq->low_mark) |
---|
1177 | 858 | iwl_wake_queue(trans, txq); |
---|
1178 | | - } |
---|
1179 | 859 | |
---|
1180 | | - if (txq->read_ptr == txq->write_ptr) { |
---|
1181 | | - IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id); |
---|
1182 | | - iwl_trans_unref(trans); |
---|
| 860 | + spin_lock_bh(&txq->lock); |
---|
| 861 | + txq->overflow_tx = false; |
---|
1183 | 862 | } |
---|
1184 | 863 | |
---|
1185 | 864 | out: |
---|
| 865 | + spin_unlock_bh(&txq->lock); |
---|
| 866 | +} |
---|
| 867 | + |
---|
| 868 | +/* Set wr_ptr of specific device and txq */ |
---|
| 869 | +void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) |
---|
| 870 | +{ |
---|
| 871 | + struct iwl_txq *txq = trans->txqs.txq[txq_id]; |
---|
| 872 | + |
---|
| 873 | + spin_lock_bh(&txq->lock); |
---|
| 874 | + |
---|
| 875 | + txq->write_ptr = ptr; |
---|
| 876 | + txq->read_ptr = txq->write_ptr; |
---|
| 877 | + |
---|
1186 | 878 | spin_unlock_bh(&txq->lock); |
---|
1187 | 879 | } |
---|
1188 | 880 | |
---|
.. | .. |
---|
1190 | 882 | const struct iwl_host_cmd *cmd) |
---|
1191 | 883 | { |
---|
1192 | 884 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
1193 | | - const struct iwl_cfg *cfg = trans->cfg; |
---|
1194 | 885 | int ret; |
---|
1195 | 886 | |
---|
1196 | 887 | lockdep_assert_held(&trans_pcie->reg_lock); |
---|
1197 | 888 | |
---|
1198 | | - if (!(cmd->flags & CMD_SEND_IN_IDLE) && |
---|
1199 | | - !trans_pcie->ref_cmd_in_flight) { |
---|
1200 | | - trans_pcie->ref_cmd_in_flight = true; |
---|
1201 | | - IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); |
---|
1202 | | - iwl_trans_ref(trans); |
---|
1203 | | - } |
---|
| 889 | + /* Make sure the NIC is still alive in the bus */ |
---|
| 890 | + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) |
---|
| 891 | + return -ENODEV; |
---|
1204 | 892 | |
---|
1205 | 893 | /* |
---|
1206 | 894 | * wake up the NIC to make sure that the firmware will see the host |
---|
.. | .. |
---|
1208 | 896 | * returned. This needs to be done only on NICs that have |
---|
1209 | 897 | * apmg_wake_up_wa set. |
---|
1210 | 898 | */ |
---|
1211 | | - if (cfg->base_params->apmg_wake_up_wa && |
---|
| 899 | + if (trans->trans_cfg->base_params->apmg_wake_up_wa && |
---|
1212 | 900 | !trans_pcie->cmd_hold_nic_awake) { |
---|
1213 | 901 | __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, |
---|
1214 | | - BIT(cfg->csr->flag_mac_access_req)); |
---|
| 902 | + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
---|
1215 | 903 | |
---|
1216 | 904 | ret = iwl_poll_bit(trans, CSR_GP_CNTRL, |
---|
1217 | | - BIT(cfg->csr->flag_val_mac_access_en), |
---|
1218 | | - (BIT(cfg->csr->flag_mac_clock_ready) | |
---|
| 905 | + CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, |
---|
| 906 | + (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | |
---|
1219 | 907 | CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), |
---|
1220 | 908 | 15000); |
---|
1221 | 909 | if (ret < 0) { |
---|
1222 | 910 | __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, |
---|
1223 | | - BIT(cfg->csr->flag_mac_access_req)); |
---|
| 911 | + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); |
---|
1224 | 912 | IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); |
---|
1225 | 913 | return -EIO; |
---|
1226 | 914 | } |
---|
.. | .. |
---|
1240 | 928 | static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) |
---|
1241 | 929 | { |
---|
1242 | 930 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
1243 | | - struct iwl_txq *txq = trans_pcie->txq[txq_id]; |
---|
1244 | | - unsigned long flags; |
---|
| 931 | + struct iwl_txq *txq = trans->txqs.txq[txq_id]; |
---|
1245 | 932 | int nfreed = 0; |
---|
1246 | 933 | u16 r; |
---|
1247 | 934 | |
---|
1248 | 935 | lockdep_assert_held(&txq->lock); |
---|
1249 | 936 | |
---|
1250 | | - idx = iwl_pcie_get_cmd_index(txq, idx); |
---|
1251 | | - r = iwl_pcie_get_cmd_index(txq, txq->read_ptr); |
---|
| 937 | + idx = iwl_txq_get_cmd_index(txq, idx); |
---|
| 938 | + r = iwl_txq_get_cmd_index(txq, txq->read_ptr); |
---|
1252 | 939 | |
---|
1253 | | - if (idx >= trans->cfg->base_params->max_tfd_queue_size || |
---|
1254 | | - (!iwl_queue_used(txq, idx))) { |
---|
1255 | | - WARN_ONCE(test_bit(txq_id, trans_pcie->queue_used), |
---|
| 940 | + if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || |
---|
| 941 | + (!iwl_txq_used(txq, idx))) { |
---|
| 942 | + WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used), |
---|
1256 | 943 | "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", |
---|
1257 | 944 | __func__, txq_id, idx, |
---|
1258 | | - trans->cfg->base_params->max_tfd_queue_size, |
---|
| 945 | + trans->trans_cfg->base_params->max_tfd_queue_size, |
---|
1259 | 946 | txq->write_ptr, txq->read_ptr); |
---|
1260 | 947 | return; |
---|
1261 | 948 | } |
---|
1262 | 949 | |
---|
1263 | | - for (idx = iwl_queue_inc_wrap(trans, idx); r != idx; |
---|
1264 | | - r = iwl_queue_inc_wrap(trans, r)) { |
---|
1265 | | - txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); |
---|
| 950 | + for (idx = iwl_txq_inc_wrap(trans, idx); r != idx; |
---|
| 951 | + r = iwl_txq_inc_wrap(trans, r)) { |
---|
| 952 | + txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); |
---|
1266 | 953 | |
---|
1267 | 954 | if (nfreed++ > 0) { |
---|
1268 | 955 | IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", |
---|
.. | .. |
---|
1272 | 959 | } |
---|
1273 | 960 | |
---|
1274 | 961 | if (txq->read_ptr == txq->write_ptr) { |
---|
1275 | | - spin_lock_irqsave(&trans_pcie->reg_lock, flags); |
---|
| 962 | + /* BHs are also disabled due to txq->lock */ |
---|
| 963 | + spin_lock(&trans_pcie->reg_lock); |
---|
1276 | 964 | iwl_pcie_clear_cmd_in_flight(trans); |
---|
1277 | | - spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); |
---|
| 965 | + spin_unlock(&trans_pcie->reg_lock); |
---|
1278 | 966 | } |
---|
1279 | 967 | |
---|
1280 | 968 | iwl_pcie_txq_progress(txq); |
---|
.. | .. |
---|
1314 | 1002 | unsigned int wdg_timeout) |
---|
1315 | 1003 | { |
---|
1316 | 1004 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
1317 | | - struct iwl_txq *txq = trans_pcie->txq[txq_id]; |
---|
| 1005 | + struct iwl_txq *txq = trans->txqs.txq[txq_id]; |
---|
1318 | 1006 | int fifo = -1; |
---|
1319 | 1007 | bool scd_bug = false; |
---|
1320 | 1008 | |
---|
1321 | | - if (test_and_set_bit(txq_id, trans_pcie->queue_used)) |
---|
| 1009 | + if (test_and_set_bit(txq_id, trans->txqs.queue_used)) |
---|
1322 | 1010 | WARN_ONCE(1, "queue %d already used - expect issues", txq_id); |
---|
1323 | 1011 | |
---|
1324 | 1012 | txq->wd_timeout = msecs_to_jiffies(wdg_timeout); |
---|
.. | .. |
---|
1327 | 1015 | fifo = cfg->fifo; |
---|
1328 | 1016 | |
---|
1329 | 1017 | /* Disable the scheduler prior configuring the cmd queue */ |
---|
1330 | | - if (txq_id == trans_pcie->cmd_queue && |
---|
| 1018 | + if (txq_id == trans->txqs.cmd.q_id && |
---|
1331 | 1019 | trans_pcie->scd_set_active) |
---|
1332 | 1020 | iwl_scd_enable_set_active(trans, 0); |
---|
1333 | 1021 | |
---|
.. | .. |
---|
1335 | 1023 | iwl_scd_txq_set_inactive(trans, txq_id); |
---|
1336 | 1024 | |
---|
1337 | 1025 | /* Set this queue as a chain-building queue unless it is CMD */ |
---|
1338 | | - if (txq_id != trans_pcie->cmd_queue) |
---|
| 1026 | + if (txq_id != trans->txqs.cmd.q_id) |
---|
1339 | 1027 | iwl_scd_txq_set_chain(trans, txq_id); |
---|
1340 | 1028 | |
---|
1341 | 1029 | if (cfg->aggregate) { |
---|
.. | .. |
---|
1369 | 1057 | * this sad hardware issue. |
---|
1370 | 1058 | * This bug has been fixed on devices 9000 and up. |
---|
1371 | 1059 | */ |
---|
1372 | | - scd_bug = !trans->cfg->mq_rx_supported && |
---|
| 1060 | + scd_bug = !trans->trans_cfg->mq_rx_supported && |
---|
1373 | 1061 | !((ssn - txq->write_ptr) & 0x3f) && |
---|
1374 | 1062 | (ssn != txq->write_ptr); |
---|
1375 | 1063 | if (scd_bug) |
---|
.. | .. |
---|
1405 | 1093 | SCD_QUEUE_STTS_REG_MSK); |
---|
1406 | 1094 | |
---|
1407 | 1095 | /* enable the scheduler for this queue (only) */ |
---|
1408 | | - if (txq_id == trans_pcie->cmd_queue && |
---|
| 1096 | + if (txq_id == trans->txqs.cmd.q_id && |
---|
1409 | 1097 | trans_pcie->scd_set_active) |
---|
1410 | 1098 | iwl_scd_enable_set_active(trans, BIT(txq_id)); |
---|
1411 | 1099 | |
---|
.. | .. |
---|
1424 | 1112 | void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, |
---|
1425 | 1113 | bool shared_mode) |
---|
1426 | 1114 | { |
---|
1427 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
1428 | | - struct iwl_txq *txq = trans_pcie->txq[txq_id]; |
---|
| 1115 | + struct iwl_txq *txq = trans->txqs.txq[txq_id]; |
---|
1429 | 1116 | |
---|
1430 | 1117 | txq->ampdu = !shared_mode; |
---|
1431 | 1118 | } |
---|
.. | .. |
---|
1438 | 1125 | SCD_TX_STTS_QUEUE_OFFSET(txq_id); |
---|
1439 | 1126 | static const u32 zero_val[4] = {}; |
---|
1440 | 1127 | |
---|
1441 | | - trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0; |
---|
1442 | | - trans_pcie->txq[txq_id]->frozen = false; |
---|
| 1128 | + trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0; |
---|
| 1129 | + trans->txqs.txq[txq_id]->frozen = false; |
---|
1443 | 1130 | |
---|
1444 | 1131 | /* |
---|
1445 | 1132 | * Upon HW Rfkill - we stop the device, and then stop the queues |
---|
.. | .. |
---|
1447 | 1134 | * allow the op_mode to call txq_disable after it already called |
---|
1448 | 1135 | * stop_device. |
---|
1449 | 1136 | */ |
---|
1450 | | - if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { |
---|
| 1137 | + if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) { |
---|
1451 | 1138 | WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), |
---|
1452 | 1139 | "queue %d not used", txq_id); |
---|
1453 | 1140 | return; |
---|
.. | .. |
---|
1461 | 1148 | } |
---|
1462 | 1149 | |
---|
1463 | 1150 | iwl_pcie_txq_unmap(trans, txq_id); |
---|
1464 | | - trans_pcie->txq[txq_id]->ampdu = false; |
---|
| 1151 | + trans->txqs.txq[txq_id]->ampdu = false; |
---|
1465 | 1152 | |
---|
1466 | 1153 | IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); |
---|
1467 | 1154 | } |
---|
.. | .. |
---|
1481 | 1168 | struct iwl_host_cmd *cmd) |
---|
1482 | 1169 | { |
---|
1483 | 1170 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
1484 | | - struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; |
---|
| 1171 | + struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; |
---|
1485 | 1172 | struct iwl_device_cmd *out_cmd; |
---|
1486 | 1173 | struct iwl_cmd_meta *out_meta; |
---|
1487 | | - unsigned long flags; |
---|
1488 | 1174 | void *dup_buf = NULL; |
---|
1489 | 1175 | dma_addr_t phys_addr; |
---|
1490 | 1176 | int idx; |
---|
.. | .. |
---|
1495 | 1181 | u32 cmd_pos; |
---|
1496 | 1182 | const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; |
---|
1497 | 1183 | u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; |
---|
1498 | | - unsigned long flags2; |
---|
| 1184 | + unsigned long flags; |
---|
1499 | 1185 | |
---|
1500 | 1186 | if (WARN(!trans->wide_cmd_header && |
---|
1501 | 1187 | group_id > IWL_ALWAYS_LONG_GROUP, |
---|
.. | .. |
---|
1579 | 1265 | goto free_dup_buf; |
---|
1580 | 1266 | } |
---|
1581 | 1267 | |
---|
1582 | | - spin_lock_irqsave(&txq->lock, flags2); |
---|
| 1268 | + spin_lock_irqsave(&txq->lock, flags); |
---|
1583 | 1269 | |
---|
1584 | | - if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { |
---|
1585 | | - spin_unlock_irqrestore(&txq->lock, flags2); |
---|
| 1270 | + if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { |
---|
| 1271 | + spin_unlock_irqrestore(&txq->lock, flags); |
---|
1586 | 1272 | |
---|
1587 | 1273 | IWL_ERR(trans, "No space in command queue\n"); |
---|
1588 | 1274 | iwl_op_mode_cmd_queue_full(trans->op_mode); |
---|
.. | .. |
---|
1590 | 1276 | goto free_dup_buf; |
---|
1591 | 1277 | } |
---|
1592 | 1278 | |
---|
1593 | | - idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); |
---|
| 1279 | + idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); |
---|
1594 | 1280 | out_cmd = txq->entries[idx].cmd; |
---|
1595 | 1281 | out_meta = &txq->entries[idx].meta; |
---|
1596 | 1282 | |
---|
.. | .. |
---|
1608 | 1294 | sizeof(struct iwl_cmd_header_wide)); |
---|
1609 | 1295 | out_cmd->hdr_wide.reserved = 0; |
---|
1610 | 1296 | out_cmd->hdr_wide.sequence = |
---|
1611 | | - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | |
---|
| 1297 | + cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | |
---|
1612 | 1298 | INDEX_TO_SEQ(txq->write_ptr)); |
---|
1613 | 1299 | |
---|
1614 | 1300 | cmd_pos = sizeof(struct iwl_cmd_header_wide); |
---|
.. | .. |
---|
1616 | 1302 | } else { |
---|
1617 | 1303 | out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); |
---|
1618 | 1304 | out_cmd->hdr.sequence = |
---|
1619 | | - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | |
---|
| 1305 | + cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | |
---|
1620 | 1306 | INDEX_TO_SEQ(txq->write_ptr)); |
---|
1621 | 1307 | out_cmd->hdr.group_id = 0; |
---|
1622 | 1308 | |
---|
.. | .. |
---|
1667 | 1353 | iwl_get_cmd_string(trans, cmd->id), |
---|
1668 | 1354 | group_id, out_cmd->hdr.cmd, |
---|
1669 | 1355 | le16_to_cpu(out_cmd->hdr.sequence), |
---|
1670 | | - cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); |
---|
| 1356 | + cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); |
---|
1671 | 1357 | |
---|
1672 | 1358 | /* start the TFD with the minimum copy bytes */ |
---|
1673 | 1359 | tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); |
---|
1674 | 1360 | memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); |
---|
1675 | 1361 | iwl_pcie_txq_build_tfd(trans, txq, |
---|
1676 | | - iwl_pcie_get_first_tb_dma(txq, idx), |
---|
| 1362 | + iwl_txq_get_first_tb_dma(txq, idx), |
---|
1677 | 1363 | tb0_size, true); |
---|
1678 | 1364 | |
---|
1679 | 1365 | /* map first command fragment, if any remains */ |
---|
.. | .. |
---|
1683 | 1369 | copy_size - tb0_size, |
---|
1684 | 1370 | DMA_TO_DEVICE); |
---|
1685 | 1371 | if (dma_mapping_error(trans->dev, phys_addr)) { |
---|
1686 | | - iwl_pcie_tfd_unmap(trans, out_meta, txq, |
---|
1687 | | - txq->write_ptr); |
---|
| 1372 | + iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, |
---|
| 1373 | + txq->write_ptr); |
---|
1688 | 1374 | idx = -ENOMEM; |
---|
1689 | 1375 | goto out; |
---|
1690 | 1376 | } |
---|
.. | .. |
---|
1707 | 1393 | phys_addr = dma_map_single(trans->dev, (void *)data, |
---|
1708 | 1394 | cmdlen[i], DMA_TO_DEVICE); |
---|
1709 | 1395 | if (dma_mapping_error(trans->dev, phys_addr)) { |
---|
1710 | | - iwl_pcie_tfd_unmap(trans, out_meta, txq, |
---|
1711 | | - txq->write_ptr); |
---|
| 1396 | + iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, |
---|
| 1397 | + txq->write_ptr); |
---|
1712 | 1398 | idx = -ENOMEM; |
---|
1713 | 1399 | goto out; |
---|
1714 | 1400 | } |
---|
.. | .. |
---|
1719 | 1405 | BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); |
---|
1720 | 1406 | out_meta->flags = cmd->flags; |
---|
1721 | 1407 | if (WARN_ON_ONCE(txq->entries[idx].free_buf)) |
---|
1722 | | - kzfree(txq->entries[idx].free_buf); |
---|
| 1408 | + kfree_sensitive(txq->entries[idx].free_buf); |
---|
1723 | 1409 | txq->entries[idx].free_buf = dup_buf; |
---|
1724 | 1410 | |
---|
1725 | 1411 | trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); |
---|
.. | .. |
---|
1728 | 1414 | if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) |
---|
1729 | 1415 | mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); |
---|
1730 | 1416 | |
---|
1731 | | - spin_lock_irqsave(&trans_pcie->reg_lock, flags); |
---|
| 1417 | + spin_lock(&trans_pcie->reg_lock); |
---|
1732 | 1418 | ret = iwl_pcie_set_cmd_in_flight(trans, cmd); |
---|
1733 | 1419 | if (ret < 0) { |
---|
1734 | 1420 | idx = ret; |
---|
1735 | | - spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); |
---|
1736 | | - goto out; |
---|
| 1421 | + goto unlock_reg; |
---|
1737 | 1422 | } |
---|
1738 | 1423 | |
---|
1739 | 1424 | /* Increment and update queue's write index */ |
---|
1740 | | - txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); |
---|
| 1425 | + txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); |
---|
1741 | 1426 | iwl_pcie_txq_inc_wr_ptr(trans, txq); |
---|
1742 | 1427 | |
---|
1743 | | - spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); |
---|
1744 | | - |
---|
| 1428 | + unlock_reg: |
---|
| 1429 | + spin_unlock(&trans_pcie->reg_lock); |
---|
1745 | 1430 | out: |
---|
1746 | | - spin_unlock_irqrestore(&txq->lock, flags2); |
---|
| 1431 | + spin_unlock_irqrestore(&txq->lock, flags); |
---|
1747 | 1432 | free_dup_buf: |
---|
1748 | 1433 | if (idx < 0) |
---|
1749 | 1434 | kfree(dup_buf); |
---|
.. | .. |
---|
1767 | 1452 | struct iwl_device_cmd *cmd; |
---|
1768 | 1453 | struct iwl_cmd_meta *meta; |
---|
1769 | 1454 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
1770 | | - struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; |
---|
| 1455 | + struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; |
---|
1771 | 1456 | |
---|
1772 | 1457 | /* If a Tx command is being handled and it isn't in the actual |
---|
1773 | 1458 | * command queue then there a command routing bug has been introduced |
---|
1774 | 1459 | * in the queue management code. */ |
---|
1775 | | - if (WARN(txq_id != trans_pcie->cmd_queue, |
---|
| 1460 | + if (WARN(txq_id != trans->txqs.cmd.q_id, |
---|
1776 | 1461 | "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", |
---|
1777 | | - txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr, |
---|
| 1462 | + txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr, |
---|
1778 | 1463 | txq->write_ptr)) { |
---|
1779 | 1464 | iwl_print_hex_error(trans, pkt, 32); |
---|
1780 | 1465 | return; |
---|
.. | .. |
---|
1782 | 1467 | |
---|
1783 | 1468 | spin_lock_bh(&txq->lock); |
---|
1784 | 1469 | |
---|
1785 | | - cmd_index = iwl_pcie_get_cmd_index(txq, index); |
---|
| 1470 | + cmd_index = iwl_txq_get_cmd_index(txq, index); |
---|
1786 | 1471 | cmd = txq->entries[cmd_index].cmd; |
---|
1787 | 1472 | meta = &txq->entries[cmd_index].meta; |
---|
1788 | 1473 | group_id = cmd->hdr.group_id; |
---|
1789 | 1474 | cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); |
---|
1790 | 1475 | |
---|
1791 | | - iwl_pcie_tfd_unmap(trans, meta, txq, index); |
---|
| 1476 | + iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); |
---|
1792 | 1477 | |
---|
1793 | 1478 | /* Input error checking is done when commands are added to queue. */ |
---|
1794 | 1479 | if (meta->flags & CMD_WANT_SKB) { |
---|
.. | .. |
---|
1814 | 1499 | IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", |
---|
1815 | 1500 | iwl_get_cmd_string(trans, cmd_id)); |
---|
1816 | 1501 | wake_up(&trans_pcie->wait_command_queue); |
---|
1817 | | - } |
---|
1818 | | - |
---|
1819 | | - if (meta->flags & CMD_MAKE_TRANS_IDLE) { |
---|
1820 | | - IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n", |
---|
1821 | | - iwl_get_cmd_string(trans, cmd->hdr.cmd)); |
---|
1822 | | - set_bit(STATUS_TRANS_IDLE, &trans->status); |
---|
1823 | | - wake_up(&trans_pcie->d0i3_waitq); |
---|
1824 | | - } |
---|
1825 | | - |
---|
1826 | | - if (meta->flags & CMD_WAKE_UP_TRANS) { |
---|
1827 | | - IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n", |
---|
1828 | | - iwl_get_cmd_string(trans, cmd->hdr.cmd)); |
---|
1829 | | - clear_bit(STATUS_TRANS_IDLE, &trans->status); |
---|
1830 | | - wake_up(&trans_pcie->d0i3_waitq); |
---|
1831 | 1502 | } |
---|
1832 | 1503 | |
---|
1833 | 1504 | meta->flags = 0; |
---|
.. | .. |
---|
1860 | 1531 | struct iwl_host_cmd *cmd) |
---|
1861 | 1532 | { |
---|
1862 | 1533 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
1863 | | - struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; |
---|
| 1534 | + struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; |
---|
1864 | 1535 | int cmd_idx; |
---|
1865 | 1536 | int ret; |
---|
1866 | 1537 | |
---|
.. | .. |
---|
1875 | 1546 | |
---|
1876 | 1547 | IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", |
---|
1877 | 1548 | iwl_get_cmd_string(trans, cmd->id)); |
---|
1878 | | - |
---|
1879 | | - if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) { |
---|
1880 | | - ret = wait_event_timeout(trans_pcie->d0i3_waitq, |
---|
1881 | | - pm_runtime_active(&trans_pcie->pci_dev->dev), |
---|
1882 | | - msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); |
---|
1883 | | - if (!ret) { |
---|
1884 | | - IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n"); |
---|
1885 | | - return -ETIMEDOUT; |
---|
1886 | | - } |
---|
1887 | | - } |
---|
1888 | 1549 | |
---|
1889 | 1550 | cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); |
---|
1890 | 1551 | if (cmd_idx < 0) { |
---|
.. | .. |
---|
1913 | 1574 | iwl_get_cmd_string(trans, cmd->id)); |
---|
1914 | 1575 | ret = -ETIMEDOUT; |
---|
1915 | 1576 | |
---|
1916 | | - iwl_force_nmi(trans); |
---|
1917 | | - iwl_trans_fw_error(trans); |
---|
1918 | | - |
---|
| 1577 | + iwl_trans_pcie_sync_nmi(trans); |
---|
1919 | 1578 | goto cancel; |
---|
1920 | 1579 | } |
---|
1921 | 1580 | |
---|
1922 | 1581 | if (test_bit(STATUS_FW_ERROR, &trans->status)) { |
---|
1923 | | - iwl_trans_dump_regs(trans); |
---|
| 1582 | + iwl_trans_pcie_dump_regs(trans); |
---|
1924 | 1583 | IWL_ERR(trans, "FW error in SYNC CMD %s\n", |
---|
1925 | 1584 | iwl_get_cmd_string(trans, cmd->id)); |
---|
1926 | 1585 | dump_stack(); |
---|
.. | .. |
---|
1965 | 1624 | |
---|
1966 | 1625 | int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) |
---|
1967 | 1626 | { |
---|
| 1627 | + /* Make sure the NIC is still alive in the bus */ |
---|
| 1628 | + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) |
---|
| 1629 | + return -ENODEV; |
---|
| 1630 | + |
---|
1968 | 1631 | if (!(cmd->flags & CMD_SEND_IN_RFKILL) && |
---|
1969 | 1632 | test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { |
---|
1970 | 1633 | IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", |
---|
.. | .. |
---|
1981 | 1644 | |
---|
1982 | 1645 | static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, |
---|
1983 | 1646 | struct iwl_txq *txq, u8 hdr_len, |
---|
1984 | | - struct iwl_cmd_meta *out_meta, |
---|
1985 | | - struct iwl_device_cmd *dev_cmd, u16 tb1_len) |
---|
| 1647 | + struct iwl_cmd_meta *out_meta) |
---|
1986 | 1648 | { |
---|
1987 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
1988 | | - u16 tb2_len; |
---|
| 1649 | + u16 head_tb_len; |
---|
1989 | 1650 | int i; |
---|
1990 | 1651 | |
---|
1991 | 1652 | /* |
---|
1992 | 1653 | * Set up TFD's third entry to point directly to remainder |
---|
1993 | 1654 | * of skb's head, if any |
---|
1994 | 1655 | */ |
---|
1995 | | - tb2_len = skb_headlen(skb) - hdr_len; |
---|
| 1656 | + head_tb_len = skb_headlen(skb) - hdr_len; |
---|
1996 | 1657 | |
---|
1997 | | - if (tb2_len > 0) { |
---|
1998 | | - dma_addr_t tb2_phys = dma_map_single(trans->dev, |
---|
1999 | | - skb->data + hdr_len, |
---|
2000 | | - tb2_len, DMA_TO_DEVICE); |
---|
2001 | | - if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { |
---|
2002 | | - iwl_pcie_tfd_unmap(trans, out_meta, txq, |
---|
2003 | | - txq->write_ptr); |
---|
| 1658 | + if (head_tb_len > 0) { |
---|
| 1659 | + dma_addr_t tb_phys = dma_map_single(trans->dev, |
---|
| 1660 | + skb->data + hdr_len, |
---|
| 1661 | + head_tb_len, DMA_TO_DEVICE); |
---|
| 1662 | + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) |
---|
2004 | 1663 | return -EINVAL; |
---|
2005 | | - } |
---|
2006 | | - iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); |
---|
| 1664 | + trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, |
---|
| 1665 | + tb_phys, head_tb_len); |
---|
| 1666 | + iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); |
---|
2007 | 1667 | } |
---|
2008 | 1668 | |
---|
2009 | 1669 | /* set up the remaining entries to point to the data */ |
---|
.. | .. |
---|
2018 | 1678 | tb_phys = skb_frag_dma_map(trans->dev, frag, 0, |
---|
2019 | 1679 | skb_frag_size(frag), DMA_TO_DEVICE); |
---|
2020 | 1680 | |
---|
2021 | | - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { |
---|
2022 | | - iwl_pcie_tfd_unmap(trans, out_meta, txq, |
---|
2023 | | - txq->write_ptr); |
---|
| 1681 | + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) |
---|
2024 | 1682 | return -EINVAL; |
---|
2025 | | - } |
---|
| 1683 | + trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), |
---|
| 1684 | + tb_phys, skb_frag_size(frag)); |
---|
2026 | 1685 | tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, |
---|
2027 | 1686 | skb_frag_size(frag), false); |
---|
| 1687 | + if (tb_idx < 0) |
---|
| 1688 | + return tb_idx; |
---|
2028 | 1689 | |
---|
2029 | 1690 | out_meta->tbs |= BIT(tb_idx); |
---|
2030 | 1691 | } |
---|
2031 | 1692 | |
---|
2032 | | - trace_iwlwifi_dev_tx(trans->dev, skb, |
---|
2033 | | - iwl_pcie_get_tfd(trans, txq, txq->write_ptr), |
---|
2034 | | - trans_pcie->tfd_size, |
---|
2035 | | - &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, |
---|
2036 | | - hdr_len); |
---|
2037 | | - trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len); |
---|
2038 | 1693 | return 0; |
---|
2039 | 1694 | } |
---|
2040 | 1695 | |
---|
2041 | 1696 | #ifdef CONFIG_INET |
---|
2042 | | -struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len) |
---|
2043 | | -{ |
---|
2044 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
2045 | | - struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page); |
---|
2046 | | - |
---|
2047 | | - if (!p->page) |
---|
2048 | | - goto alloc; |
---|
2049 | | - |
---|
2050 | | - /* enough room on this page */ |
---|
2051 | | - if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE) |
---|
2052 | | - return p; |
---|
2053 | | - |
---|
2054 | | - /* We don't have enough room on this page, get a new one. */ |
---|
2055 | | - __free_page(p->page); |
---|
2056 | | - |
---|
2057 | | -alloc: |
---|
2058 | | - p->page = alloc_page(GFP_ATOMIC); |
---|
2059 | | - if (!p->page) |
---|
2060 | | - return NULL; |
---|
2061 | | - p->pos = page_address(p->page); |
---|
2062 | | - return p; |
---|
2063 | | -} |
---|
2064 | | - |
---|
2065 | 1697 | static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph, |
---|
2066 | 1698 | bool ipv6, unsigned int len) |
---|
2067 | 1699 | { |
---|
.. | .. |
---|
2084 | 1716 | static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, |
---|
2085 | 1717 | struct iwl_txq *txq, u8 hdr_len, |
---|
2086 | 1718 | struct iwl_cmd_meta *out_meta, |
---|
2087 | | - struct iwl_device_cmd *dev_cmd, u16 tb1_len) |
---|
| 1719 | + struct iwl_device_tx_cmd *dev_cmd, |
---|
| 1720 | + u16 tb1_len) |
---|
2088 | 1721 | { |
---|
2089 | 1722 | struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; |
---|
2090 | | - struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; |
---|
| 1723 | + struct iwl_trans_pcie *trans_pcie = |
---|
| 1724 | + IWL_TRANS_GET_PCIE_TRANS(txq->trans); |
---|
2091 | 1725 | struct ieee80211_hdr *hdr = (void *)skb->data; |
---|
2092 | 1726 | unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; |
---|
2093 | 1727 | unsigned int mss = skb_shinfo(skb)->gso_size; |
---|
2094 | 1728 | u16 length, iv_len, amsdu_pad; |
---|
2095 | 1729 | u8 *start_hdr; |
---|
2096 | 1730 | struct iwl_tso_hdr_page *hdr_page; |
---|
2097 | | - struct page **page_ptr; |
---|
2098 | | - int ret; |
---|
2099 | 1731 | struct tso_t tso; |
---|
2100 | 1732 | |
---|
2101 | 1733 | /* if the packet is protected, then it must be CCMP or GCMP */ |
---|
.. | .. |
---|
2104 | 1736 | IEEE80211_CCMP_HDR_LEN : 0; |
---|
2105 | 1737 | |
---|
2106 | 1738 | trace_iwlwifi_dev_tx(trans->dev, skb, |
---|
2107 | | - iwl_pcie_get_tfd(trans, txq, txq->write_ptr), |
---|
2108 | | - trans_pcie->tfd_size, |
---|
| 1739 | + iwl_txq_get_tfd(trans, txq, txq->write_ptr), |
---|
| 1740 | + trans->txqs.tfd.size, |
---|
2109 | 1741 | &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); |
---|
2110 | 1742 | |
---|
2111 | 1743 | ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); |
---|
.. | .. |
---|
2118 | 1750 | (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; |
---|
2119 | 1751 | |
---|
2120 | 1752 | /* Our device supports 9 segments at most, it will fit in 1 page */ |
---|
2121 | | - hdr_page = get_page_hdr(trans, hdr_room); |
---|
| 1753 | + hdr_page = get_page_hdr(trans, hdr_room, skb); |
---|
2122 | 1754 | if (!hdr_page) |
---|
2123 | 1755 | return -ENOMEM; |
---|
2124 | 1756 | |
---|
2125 | | - get_page(hdr_page->page); |
---|
2126 | 1757 | start_hdr = hdr_page->pos; |
---|
2127 | | - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); |
---|
2128 | | - *page_ptr = hdr_page->page; |
---|
2129 | 1758 | memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); |
---|
2130 | 1759 | hdr_page->pos += iv_len; |
---|
2131 | 1760 | |
---|
.. | .. |
---|
2181 | 1810 | if (trans_pcie->sw_csum_tx) { |
---|
2182 | 1811 | csum_skb = alloc_skb(data_left + tcp_hdrlen(skb), |
---|
2183 | 1812 | GFP_ATOMIC); |
---|
2184 | | - if (!csum_skb) { |
---|
2185 | | - ret = -ENOMEM; |
---|
2186 | | - goto out_unmap; |
---|
2187 | | - } |
---|
| 1813 | + if (!csum_skb) |
---|
| 1814 | + return -ENOMEM; |
---|
2188 | 1815 | |
---|
2189 | 1816 | iwl_compute_pseudo_hdr_csum(iph, tcph, |
---|
2190 | 1817 | skb->protocol == |
---|
.. | .. |
---|
2205 | 1832 | hdr_tb_len, DMA_TO_DEVICE); |
---|
2206 | 1833 | if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { |
---|
2207 | 1834 | dev_kfree_skb(csum_skb); |
---|
2208 | | - ret = -EINVAL; |
---|
2209 | | - goto out_unmap; |
---|
| 1835 | + return -EINVAL; |
---|
2210 | 1836 | } |
---|
2211 | 1837 | iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, |
---|
2212 | 1838 | hdr_tb_len, false); |
---|
2213 | | - trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, |
---|
2214 | | - hdr_tb_len); |
---|
| 1839 | + trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, |
---|
| 1840 | + hdr_tb_phys, hdr_tb_len); |
---|
2215 | 1841 | /* add this subframe's headers' length to the tx_cmd */ |
---|
2216 | 1842 | le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); |
---|
2217 | 1843 | |
---|
.. | .. |
---|
2231 | 1857 | size, DMA_TO_DEVICE); |
---|
2232 | 1858 | if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { |
---|
2233 | 1859 | dev_kfree_skb(csum_skb); |
---|
2234 | | - ret = -EINVAL; |
---|
2235 | | - goto out_unmap; |
---|
| 1860 | + return -EINVAL; |
---|
2236 | 1861 | } |
---|
2237 | 1862 | |
---|
2238 | 1863 | iwl_pcie_txq_build_tfd(trans, txq, tb_phys, |
---|
2239 | 1864 | size, false); |
---|
2240 | | - trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data, |
---|
2241 | | - size); |
---|
| 1865 | + trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, |
---|
| 1866 | + tb_phys, size); |
---|
2242 | 1867 | |
---|
2243 | 1868 | data_left -= size; |
---|
2244 | 1869 | tso_build_data(skb, &tso, size); |
---|
.. | .. |
---|
2266 | 1891 | skb_push(skb, hdr_len + iv_len); |
---|
2267 | 1892 | |
---|
2268 | 1893 | return 0; |
---|
2269 | | - |
---|
2270 | | -out_unmap: |
---|
2271 | | - iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr); |
---|
2272 | | - return ret; |
---|
2273 | 1894 | } |
---|
2274 | 1895 | #else /* CONFIG_INET */ |
---|
2275 | 1896 | static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, |
---|
2276 | 1897 | struct iwl_txq *txq, u8 hdr_len, |
---|
2277 | 1898 | struct iwl_cmd_meta *out_meta, |
---|
2278 | | - struct iwl_device_cmd *dev_cmd, u16 tb1_len) |
---|
| 1899 | + struct iwl_device_tx_cmd *dev_cmd, |
---|
| 1900 | + u16 tb1_len) |
---|
2279 | 1901 | { |
---|
2280 | 1902 | /* No A-MSDU without CONFIG_INET */ |
---|
2281 | 1903 | WARN_ON(1); |
---|
.. | .. |
---|
2285 | 1907 | #endif /* CONFIG_INET */ |
---|
2286 | 1908 | |
---|
2287 | 1909 | int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, |
---|
2288 | | - struct iwl_device_cmd *dev_cmd, int txq_id) |
---|
| 1910 | + struct iwl_device_tx_cmd *dev_cmd, int txq_id) |
---|
2289 | 1911 | { |
---|
2290 | 1912 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
---|
2291 | 1913 | struct ieee80211_hdr *hdr; |
---|
.. | .. |
---|
2302 | 1924 | u16 wifi_seq; |
---|
2303 | 1925 | bool amsdu; |
---|
2304 | 1926 | |
---|
2305 | | - txq = trans_pcie->txq[txq_id]; |
---|
| 1927 | + txq = trans->txqs.txq[txq_id]; |
---|
2306 | 1928 | |
---|
2307 | | - if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), |
---|
| 1929 | + if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), |
---|
2308 | 1930 | "TX on unused queue %d\n", txq_id)) |
---|
2309 | 1931 | return -EINVAL; |
---|
2310 | 1932 | |
---|
.. | .. |
---|
2324 | 1946 | } |
---|
2325 | 1947 | |
---|
2326 | 1948 | if (skb_is_nonlinear(skb) && |
---|
2327 | | - skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) && |
---|
| 1949 | + skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && |
---|
2328 | 1950 | __skb_linearize(skb)) |
---|
2329 | 1951 | return -ENOMEM; |
---|
2330 | 1952 | |
---|
.. | .. |
---|
2337 | 1959 | |
---|
2338 | 1960 | spin_lock(&txq->lock); |
---|
2339 | 1961 | |
---|
2340 | | - if (iwl_queue_space(trans, txq) < txq->high_mark) { |
---|
2341 | | - iwl_stop_queue(trans, txq); |
---|
| 1962 | + if (iwl_txq_space(trans, txq) < txq->high_mark) { |
---|
| 1963 | + iwl_txq_stop(trans, txq); |
---|
2342 | 1964 | |
---|
2343 | 1965 | /* don't put the packet on the ring, if there is no room */ |
---|
2344 | | - if (unlikely(iwl_queue_space(trans, txq) < 3)) { |
---|
2345 | | - struct iwl_device_cmd **dev_cmd_ptr; |
---|
| 1966 | + if (unlikely(iwl_txq_space(trans, txq) < 3)) { |
---|
| 1967 | + struct iwl_device_tx_cmd **dev_cmd_ptr; |
---|
2346 | 1968 | |
---|
2347 | 1969 | dev_cmd_ptr = (void *)((u8 *)skb->cb + |
---|
2348 | | - trans_pcie->dev_cmd_offs); |
---|
| 1970 | + trans->txqs.dev_cmd_offs); |
---|
2349 | 1971 | |
---|
2350 | 1972 | *dev_cmd_ptr = dev_cmd; |
---|
2351 | 1973 | __skb_queue_tail(&txq->overflow_q, skb); |
---|
.. | .. |
---|
2374 | 1996 | cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | |
---|
2375 | 1997 | INDEX_TO_SEQ(txq->write_ptr))); |
---|
2376 | 1998 | |
---|
2377 | | - tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr); |
---|
| 1999 | + tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr); |
---|
2378 | 2000 | scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + |
---|
2379 | 2001 | offsetof(struct iwl_tx_cmd, scratch); |
---|
2380 | 2002 | |
---|
.. | .. |
---|
2423 | 2045 | goto out_err; |
---|
2424 | 2046 | iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); |
---|
2425 | 2047 | |
---|
| 2048 | + trace_iwlwifi_dev_tx(trans->dev, skb, |
---|
| 2049 | + iwl_txq_get_tfd(trans, txq, txq->write_ptr), |
---|
| 2050 | + trans->txqs.tfd.size, |
---|
| 2051 | + &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, |
---|
| 2052 | + hdr_len); |
---|
| 2053 | + |
---|
2426 | 2054 | /* |
---|
2427 | 2055 | * If gso_size wasn't set, don't give the frame "amsdu treatment" |
---|
2428 | 2056 | * (adding subframes, etc.). |
---|
.. | .. |
---|
2434 | 2062 | out_meta, dev_cmd, |
---|
2435 | 2063 | tb1_len))) |
---|
2436 | 2064 | goto out_err; |
---|
2437 | | - } else if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, |
---|
2438 | | - out_meta, dev_cmd, tb1_len))) { |
---|
2439 | | - goto out_err; |
---|
| 2065 | + } else { |
---|
| 2066 | + struct sk_buff *frag; |
---|
| 2067 | + |
---|
| 2068 | + if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, |
---|
| 2069 | + out_meta))) |
---|
| 2070 | + goto out_err; |
---|
| 2071 | + |
---|
| 2072 | + skb_walk_frags(skb, frag) { |
---|
| 2073 | + if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, |
---|
| 2074 | + out_meta))) |
---|
| 2075 | + goto out_err; |
---|
| 2076 | + } |
---|
2440 | 2077 | } |
---|
2441 | 2078 | |
---|
2442 | 2079 | /* building the A-MSDU might have changed this data, so memcpy it now */ |
---|
2443 | | - memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, |
---|
2444 | | - IWL_FIRST_TB_SIZE); |
---|
| 2080 | + memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); |
---|
2445 | 2081 | |
---|
2446 | | - tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); |
---|
| 2082 | + tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); |
---|
2447 | 2083 | /* Set up entry for this TFD in Tx byte-count array */ |
---|
2448 | | - iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), |
---|
2449 | | - iwl_pcie_tfd_get_num_tbs(trans, tfd)); |
---|
| 2084 | + iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), |
---|
| 2085 | + iwl_txq_gen1_tfd_get_num_tbs(trans, |
---|
| 2086 | + tfd)); |
---|
2450 | 2087 | |
---|
2451 | 2088 | wait_write_ptr = ieee80211_has_morefrags(fc); |
---|
2452 | 2089 | |
---|
2453 | 2090 | /* start timer if queue currently empty */ |
---|
2454 | | - if (txq->read_ptr == txq->write_ptr) { |
---|
2455 | | - if (txq->wd_timeout) { |
---|
2456 | | - /* |
---|
2457 | | - * If the TXQ is active, then set the timer, if not, |
---|
2458 | | - * set the timer in remainder so that the timer will |
---|
2459 | | - * be armed with the right value when the station will |
---|
2460 | | - * wake up. |
---|
2461 | | - */ |
---|
2462 | | - if (!txq->frozen) |
---|
2463 | | - mod_timer(&txq->stuck_timer, |
---|
2464 | | - jiffies + txq->wd_timeout); |
---|
2465 | | - else |
---|
2466 | | - txq->frozen_expiry_remainder = txq->wd_timeout; |
---|
2467 | | - } |
---|
2468 | | - IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id); |
---|
2469 | | - iwl_trans_ref(trans); |
---|
| 2091 | + if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { |
---|
| 2092 | + /* |
---|
| 2093 | + * If the TXQ is active, then set the timer, if not, |
---|
| 2094 | + * set the timer in remainder so that the timer will |
---|
| 2095 | + * be armed with the right value when the station will |
---|
| 2096 | + * wake up. |
---|
| 2097 | + */ |
---|
| 2098 | + if (!txq->frozen) |
---|
| 2099 | + mod_timer(&txq->stuck_timer, |
---|
| 2100 | + jiffies + txq->wd_timeout); |
---|
| 2101 | + else |
---|
| 2102 | + txq->frozen_expiry_remainder = txq->wd_timeout; |
---|
2470 | 2103 | } |
---|
2471 | 2104 | |
---|
2472 | 2105 | /* Tell device the write index *just past* this latest filled TFD */ |
---|
2473 | | - txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); |
---|
| 2106 | + txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); |
---|
2474 | 2107 | if (!wait_write_ptr) |
---|
2475 | 2108 | iwl_pcie_txq_inc_wr_ptr(trans, txq); |
---|
2476 | 2109 | |
---|
.. | .. |
---|
2481 | 2114 | spin_unlock(&txq->lock); |
---|
2482 | 2115 | return 0; |
---|
2483 | 2116 | out_err: |
---|
| 2117 | + iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); |
---|
2484 | 2118 | spin_unlock(&txq->lock); |
---|
2485 | 2119 | return -1; |
---|
2486 | 2120 | } |
---|