hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
....@@ -1,12 +1,14 @@
11 /******************************************************************************
22 *
3
+ * This file is provided under a dual BSD/GPLv2 license. When using or
4
+ * redistributing this file, you may do so under either license.
5
+ *
6
+ * GPL LICENSE SUMMARY
7
+ *
38 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
49 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
510 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6
- * Copyright(c) 2018 Intel Corporation
7
- *
8
- * Portions of this file are derived from the ipw3945 project, as well
9
- * as portions of the ieee80211 subsystem header files.
11
+ * Copyright(c) 2018 - 2020 Intel Corporation
1012 *
1113 * This program is free software; you can redistribute it and/or modify it
1214 * under the terms of version 2 of the GNU General Public License as
....@@ -17,23 +19,52 @@
1719 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1820 * more details.
1921 *
20
- * You should have received a copy of the GNU General Public License along with
21
- * this program; if not, write to the Free Software Foundation, Inc.,
22
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
23
- *
2422 * The full GNU General Public License is included in this distribution in the
25
- * file called LICENSE.
23
+ * file called COPYING.
2624 *
2725 * Contact Information:
2826 * Intel Linux Wireless <linuxwifi@intel.com>
2927 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28
+ *
29
+ * BSD LICENSE
30
+ *
31
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
32
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34
+ * Copyright(c) 2018 - 2020 Intel Corporation
35
+ * All rights reserved.
36
+ *
37
+ * Redistribution and use in source and binary forms, with or without
38
+ * modification, are permitted provided that the following conditions
39
+ * are met:
40
+ *
41
+ * * Redistributions of source code must retain the above copyright
42
+ * notice, this list of conditions and the following disclaimer.
43
+ * * Redistributions in binary form must reproduce the above copyright
44
+ * notice, this list of conditions and the following disclaimer in
45
+ * the documentation and/or other materials provided with the
46
+ * distribution.
47
+ * * Neither the name Intel Corporation nor the names of its
48
+ * contributors may be used to endorse or promote products derived
49
+ * from this software without specific prior written permission.
50
+ *
51
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3062 *
3163 *****************************************************************************/
3264 #include <linux/etherdevice.h>
3365 #include <linux/ieee80211.h>
3466 #include <linux/slab.h>
3567 #include <linux/sched.h>
36
-#include <linux/pm_runtime.h>
3768 #include <net/ip6_checksum.h>
3869 #include <net/tso.h>
3970
....@@ -45,9 +76,6 @@
4576 #include "iwl-op-mode.h"
4677 #include "internal.h"
4778 #include "fw/api/tx.h"
48
-
49
-#define IWL_TX_CRC_SIZE 4
50
-#define IWL_TX_DELIMITER_SIZE 4
5179
5280 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
5381 * DMA services
....@@ -71,60 +99,6 @@
7199 *
72100 ***************************************************/
73101
74
-int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q)
75
-{
76
- unsigned int max;
77
- unsigned int used;
78
-
79
- /*
80
- * To avoid ambiguity between empty and completely full queues, there
81
- * should always be less than max_tfd_queue_size elements in the queue.
82
- * If q->n_window is smaller than max_tfd_queue_size, there is no need
83
- * to reserve any queue entries for this purpose.
84
- */
85
- if (q->n_window < trans->cfg->base_params->max_tfd_queue_size)
86
- max = q->n_window;
87
- else
88
- max = trans->cfg->base_params->max_tfd_queue_size - 1;
89
-
90
- /*
91
- * max_tfd_queue_size is a power of 2, so the following is equivalent to
92
- * modulo by max_tfd_queue_size and is well defined.
93
- */
94
- used = (q->write_ptr - q->read_ptr) &
95
- (trans->cfg->base_params->max_tfd_queue_size - 1);
96
-
97
- if (WARN_ON(used > max))
98
- return 0;
99
-
100
- return max - used;
101
-}
102
-
103
-/*
104
- * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
105
- */
106
-static int iwl_queue_init(struct iwl_txq *q, int slots_num)
107
-{
108
- q->n_window = slots_num;
109
-
110
- /* slots_num must be power-of-two size, otherwise
111
- * iwl_pcie_get_cmd_index is broken. */
112
- if (WARN_ON(!is_power_of_2(slots_num)))
113
- return -EINVAL;
114
-
115
- q->low_mark = q->n_window / 4;
116
- if (q->low_mark < 4)
117
- q->low_mark = 4;
118
-
119
- q->high_mark = q->n_window / 8;
120
- if (q->high_mark < 2)
121
- q->high_mark = 2;
122
-
123
- q->write_ptr = 0;
124
- q->read_ptr = 0;
125
-
126
- return 0;
127
-}
128102
129103 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
130104 struct iwl_dma_ptr *ptr, size_t size)
....@@ -149,107 +123,12 @@
149123 memset(ptr, 0, sizeof(*ptr));
150124 }
151125
152
-static void iwl_pcie_txq_stuck_timer(struct timer_list *t)
153
-{
154
- struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
155
- struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
156
- struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
157
-
158
- spin_lock(&txq->lock);
159
- /* check if triggered erroneously */
160
- if (txq->read_ptr == txq->write_ptr) {
161
- spin_unlock(&txq->lock);
162
- return;
163
- }
164
- spin_unlock(&txq->lock);
165
-
166
- iwl_trans_pcie_log_scd_error(trans, txq);
167
-
168
- iwl_force_nmi(trans);
169
-}
170
-
171
-/*
172
- * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
173
- */
174
-static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
175
- struct iwl_txq *txq, u16 byte_cnt,
176
- int num_tbs)
177
-{
178
- struct iwlagn_scd_bc_tbl *scd_bc_tbl;
179
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
180
- int write_ptr = txq->write_ptr;
181
- int txq_id = txq->id;
182
- u8 sec_ctl = 0;
183
- u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
184
- __le16 bc_ent;
185
- struct iwl_tx_cmd *tx_cmd =
186
- (void *)txq->entries[txq->write_ptr].cmd->payload;
187
- u8 sta_id = tx_cmd->sta_id;
188
-
189
- scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
190
-
191
- sec_ctl = tx_cmd->sec_ctl;
192
-
193
- switch (sec_ctl & TX_CMD_SEC_MSK) {
194
- case TX_CMD_SEC_CCM:
195
- len += IEEE80211_CCMP_MIC_LEN;
196
- break;
197
- case TX_CMD_SEC_TKIP:
198
- len += IEEE80211_TKIP_ICV_LEN;
199
- break;
200
- case TX_CMD_SEC_WEP:
201
- len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
202
- break;
203
- }
204
- if (trans_pcie->bc_table_dword)
205
- len = DIV_ROUND_UP(len, 4);
206
-
207
- if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
208
- return;
209
-
210
- bc_ent = cpu_to_le16(len | (sta_id << 12));
211
-
212
- scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
213
-
214
- if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
215
- scd_bc_tbl[txq_id].
216
- tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
217
-}
218
-
219
-static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
220
- struct iwl_txq *txq)
221
-{
222
- struct iwl_trans_pcie *trans_pcie =
223
- IWL_TRANS_GET_PCIE_TRANS(trans);
224
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
225
- int txq_id = txq->id;
226
- int read_ptr = txq->read_ptr;
227
- u8 sta_id = 0;
228
- __le16 bc_ent;
229
- struct iwl_tx_cmd *tx_cmd =
230
- (void *)txq->entries[read_ptr].cmd->payload;
231
-
232
- WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
233
-
234
- if (txq_id != trans_pcie->cmd_queue)
235
- sta_id = tx_cmd->sta_id;
236
-
237
- bc_ent = cpu_to_le16(1 | (sta_id << 12));
238
-
239
- scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
240
-
241
- if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
242
- scd_bc_tbl[txq_id].
243
- tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
244
-}
245
-
246126 /*
247127 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
248128 */
249129 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
250130 struct iwl_txq *txq)
251131 {
252
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
253132 u32 reg = 0;
254133 int txq_id = txq->id;
255134
....@@ -261,8 +140,8 @@
261140 * 2. NIC is woken up for CMD regardless of shadow outside this function
262141 * 3. there is a chance that the NIC is asleep
263142 */
264
- if (!trans->cfg->base_params->shadow_reg_enable &&
265
- txq_id != trans_pcie->cmd_queue &&
143
+ if (!trans->trans_cfg->base_params->shadow_reg_enable &&
144
+ txq_id != trans->txqs.cmd.q_id &&
266145 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
267146 /*
268147 * wake up nic if it's powered down ...
....@@ -275,7 +154,7 @@
275154 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
276155 txq_id, reg);
277156 iwl_set_bit(trans, CSR_GP_CNTRL,
278
- BIT(trans->cfg->csr->flag_mac_access_req));
157
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
279158 txq->need_update = true;
280159 return;
281160 }
....@@ -293,13 +172,12 @@
293172
294173 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
295174 {
296
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
297175 int i;
298176
299
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
300
- struct iwl_txq *txq = trans_pcie->txq[i];
177
+ for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
178
+ struct iwl_txq *txq = trans->txqs.txq[i];
301179
302
- if (!test_bit(i, trans_pcie->queue_used))
180
+ if (!test_bit(i, trans->txqs.queue_used))
303181 continue;
304182
305183 spin_lock_bh(&txq->lock);
....@@ -308,35 +186,6 @@
308186 txq->need_update = false;
309187 }
310188 spin_unlock_bh(&txq->lock);
311
- }
312
-}
313
-
314
-static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
315
- void *_tfd, u8 idx)
316
-{
317
-
318
- if (trans->cfg->use_tfh) {
319
- struct iwl_tfh_tfd *tfd = _tfd;
320
- struct iwl_tfh_tb *tb = &tfd->tbs[idx];
321
-
322
- return (dma_addr_t)(le64_to_cpu(tb->addr));
323
- } else {
324
- struct iwl_tfd *tfd = _tfd;
325
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
326
- dma_addr_t addr = get_unaligned_le32(&tb->lo);
327
- dma_addr_t hi_len;
328
-
329
- if (sizeof(dma_addr_t) <= sizeof(u32))
330
- return addr;
331
-
332
- hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
333
-
334
- /*
335
- * shift by 16 twice to avoid warnings on 32-bit
336
- * (where this code never runs anyway due to the
337
- * if statement above)
338
- */
339
- return addr | ((hi_len << 16) << 16);
340189 }
341190 }
342191
....@@ -356,67 +205,6 @@
356205 tfd_fh->num_tbs = idx + 1;
357206 }
358207
359
-static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd)
360
-{
361
- if (trans->cfg->use_tfh) {
362
- struct iwl_tfh_tfd *tfd = _tfd;
363
-
364
- return le16_to_cpu(tfd->num_tbs) & 0x1f;
365
- } else {
366
- struct iwl_tfd *tfd = _tfd;
367
-
368
- return tfd->num_tbs & 0x1f;
369
- }
370
-}
371
-
372
-static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
373
- struct iwl_cmd_meta *meta,
374
- struct iwl_txq *txq, int index)
375
-{
376
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
377
- int i, num_tbs;
378
- void *tfd = iwl_pcie_get_tfd(trans, txq, index);
379
-
380
- /* Sanity check on number of chunks */
381
- num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
382
-
383
- if (num_tbs > trans_pcie->max_tbs) {
384
- IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
385
- /* @todo issue fatal error, it is quite serious situation */
386
- return;
387
- }
388
-
389
- /* first TB is never freed - it's the bidirectional DMA data */
390
-
391
- for (i = 1; i < num_tbs; i++) {
392
- if (meta->tbs & BIT(i))
393
- dma_unmap_page(trans->dev,
394
- iwl_pcie_tfd_tb_get_addr(trans, tfd, i),
395
- iwl_pcie_tfd_tb_get_len(trans, tfd, i),
396
- DMA_TO_DEVICE);
397
- else
398
- dma_unmap_single(trans->dev,
399
- iwl_pcie_tfd_tb_get_addr(trans, tfd,
400
- i),
401
- iwl_pcie_tfd_tb_get_len(trans, tfd,
402
- i),
403
- DMA_TO_DEVICE);
404
- }
405
-
406
- meta->tbs = 0;
407
-
408
- if (trans->cfg->use_tfh) {
409
- struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
410
-
411
- tfd_fh->num_tbs = 0;
412
- } else {
413
- struct iwl_tfd *tfd_fh = (void *)tfd;
414
-
415
- tfd_fh->num_tbs = 0;
416
- }
417
-
418
-}
419
-
420208 /*
421209 * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
422210 * @trans - transport private data
....@@ -432,14 +220,14 @@
432220 * idx is bounded by n_window
433221 */
434222 int rd_ptr = txq->read_ptr;
435
- int idx = iwl_pcie_get_cmd_index(txq, rd_ptr);
223
+ int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
436224
437225 lockdep_assert_held(&txq->lock);
438226
439227 /* We have only q->n_window txq->entries, but we use
440228 * TFD_QUEUE_SIZE_MAX tfds
441229 */
442
- iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
230
+ iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
443231
444232 /* free SKB */
445233 if (txq->entries) {
....@@ -461,21 +249,20 @@
461249 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
462250 dma_addr_t addr, u16 len, bool reset)
463251 {
464
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
465252 void *tfd;
466253 u32 num_tbs;
467254
468
- tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr;
255
+ tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
469256
470257 if (reset)
471
- memset(tfd, 0, trans_pcie->tfd_size);
258
+ memset(tfd, 0, trans->txqs.tfd.size);
472259
473
- num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
260
+ num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
474261
475262 /* Each TFD can point to a maximum max_tbs Tx buffers */
476
- if (num_tbs >= trans_pcie->max_tbs) {
263
+ if (num_tbs >= trans->txqs.tfd.max_tbs) {
477264 IWL_ERR(trans, "Error can not send more than %d chunks\n",
478
- trans_pcie->max_tbs);
265
+ trans->txqs.tfd.max_tbs);
479266 return -EINVAL;
480267 }
481268
....@@ -488,139 +275,20 @@
488275 return num_tbs;
489276 }
490277
491
-int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
492
- int slots_num, bool cmd_queue)
493
-{
494
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
495
- size_t tfd_sz = trans_pcie->tfd_size *
496
- trans->cfg->base_params->max_tfd_queue_size;
497
- size_t tb0_buf_sz;
498
- int i;
499
-
500
- if (WARN_ON(txq->entries || txq->tfds))
501
- return -EINVAL;
502
-
503
- if (trans->cfg->use_tfh)
504
- tfd_sz = trans_pcie->tfd_size * slots_num;
505
-
506
- timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
507
- txq->trans_pcie = trans_pcie;
508
-
509
- txq->n_window = slots_num;
510
-
511
- txq->entries = kcalloc(slots_num,
512
- sizeof(struct iwl_pcie_txq_entry),
513
- GFP_KERNEL);
514
-
515
- if (!txq->entries)
516
- goto error;
517
-
518
- if (cmd_queue)
519
- for (i = 0; i < slots_num; i++) {
520
- txq->entries[i].cmd =
521
- kmalloc(sizeof(struct iwl_device_cmd),
522
- GFP_KERNEL);
523
- if (!txq->entries[i].cmd)
524
- goto error;
525
- }
526
-
527
- /* Circular buffer of transmit frame descriptors (TFDs),
528
- * shared with device */
529
- txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
530
- &txq->dma_addr, GFP_KERNEL);
531
- if (!txq->tfds)
532
- goto error;
533
-
534
- BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs));
535
-
536
- tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
537
-
538
- txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
539
- &txq->first_tb_dma,
540
- GFP_KERNEL);
541
- if (!txq->first_tb_bufs)
542
- goto err_free_tfds;
543
-
544
- return 0;
545
-err_free_tfds:
546
- dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
547
-error:
548
- if (txq->entries && cmd_queue)
549
- for (i = 0; i < slots_num; i++)
550
- kfree(txq->entries[i].cmd);
551
- kfree(txq->entries);
552
- txq->entries = NULL;
553
-
554
- return -ENOMEM;
555
-
556
-}
557
-
558
-int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
559
- int slots_num, bool cmd_queue)
560
-{
561
- int ret;
562
- u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size;
563
-
564
- txq->need_update = false;
565
-
566
- /* max_tfd_queue_size must be power-of-two size, otherwise
567
- * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
568
- if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
569
- "Max tfd queue size must be a power of two, but is %d",
570
- tfd_queue_max_size))
571
- return -EINVAL;
572
-
573
- /* Initialize queue's high/low-water marks, and head/tail indexes */
574
- ret = iwl_queue_init(txq, slots_num);
575
- if (ret)
576
- return ret;
577
-
578
- spin_lock_init(&txq->lock);
579
-
580
- if (cmd_queue) {
581
- static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
582
-
583
- lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
584
- }
585
-
586
- __skb_queue_head_init(&txq->overflow_q);
587
-
588
- return 0;
589
-}
590
-
591
-void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
592
- struct sk_buff *skb)
593
-{
594
- struct page **page_ptr;
595
-
596
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
597
-
598
- if (*page_ptr) {
599
- __free_page(*page_ptr);
600
- *page_ptr = NULL;
601
- }
602
-}
603
-
604278 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
605279 {
606280 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
607281
608282 lockdep_assert_held(&trans_pcie->reg_lock);
609283
610
- if (trans_pcie->ref_cmd_in_flight) {
611
- trans_pcie->ref_cmd_in_flight = false;
612
- IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
613
- iwl_trans_unref(trans);
614
- }
615
-
616
- if (!trans->cfg->base_params->apmg_wake_up_wa)
284
+ if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
617285 return;
618286 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
619287 return;
620288
621289 trans_pcie->cmd_hold_nic_awake = false;
622290 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
623
- BIT(trans->cfg->csr->flag_mac_access_req));
291
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
624292 }
625293
626294 /*
....@@ -629,7 +297,7 @@
629297 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
630298 {
631299 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
632
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
300
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
633301
634302 if (!txq) {
635303 IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
....@@ -641,29 +309,22 @@
641309 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
642310 txq_id, txq->read_ptr);
643311
644
- if (txq_id != trans_pcie->cmd_queue) {
312
+ if (txq_id != trans->txqs.cmd.q_id) {
645313 struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
646314
647315 if (WARN_ON_ONCE(!skb))
648316 continue;
649317
650
- iwl_pcie_free_tso_page(trans_pcie, skb);
318
+ iwl_txq_free_tso_page(trans, skb);
651319 }
652320 iwl_pcie_txq_free_tfd(trans, txq);
653
- txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
321
+ txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
654322
655323 if (txq->read_ptr == txq->write_ptr) {
656
- unsigned long flags;
657
-
658
- spin_lock_irqsave(&trans_pcie->reg_lock, flags);
659
- if (txq_id != trans_pcie->cmd_queue) {
660
- IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
661
- txq->id);
662
- iwl_trans_unref(trans);
663
- } else {
324
+ spin_lock(&trans_pcie->reg_lock);
325
+ if (txq_id == trans->txqs.cmd.q_id)
664326 iwl_pcie_clear_cmd_in_flight(trans);
665
- }
666
- spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
327
+ spin_unlock(&trans_pcie->reg_lock);
667328 }
668329 }
669330
....@@ -689,8 +350,7 @@
689350 */
690351 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
691352 {
692
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
693
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
353
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
694354 struct device *dev = trans->dev;
695355 int i;
696356
....@@ -700,17 +360,17 @@
700360 iwl_pcie_txq_unmap(trans, txq_id);
701361
702362 /* De-alloc array of command/tx buffers */
703
- if (txq_id == trans_pcie->cmd_queue)
363
+ if (txq_id == trans->txqs.cmd.q_id)
704364 for (i = 0; i < txq->n_window; i++) {
705
- kzfree(txq->entries[i].cmd);
706
- kzfree(txq->entries[i].free_buf);
365
+ kfree_sensitive(txq->entries[i].cmd);
366
+ kfree_sensitive(txq->entries[i].free_buf);
707367 }
708368
709369 /* De-alloc circular buffer of TFDs */
710370 if (txq->tfds) {
711371 dma_free_coherent(dev,
712
- trans_pcie->tfd_size *
713
- trans->cfg->base_params->max_tfd_queue_size,
372
+ trans->txqs.tfd.size *
373
+ trans->trans_cfg->base_params->max_tfd_queue_size,
714374 txq->tfds, txq->dma_addr);
715375 txq->dma_addr = 0;
716376 txq->tfds = NULL;
....@@ -732,15 +392,16 @@
732392 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
733393 {
734394 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
735
- int nq = trans->cfg->base_params->num_of_queues;
395
+ int nq = trans->trans_cfg->base_params->num_of_queues;
736396 int chan;
737397 u32 reg_val;
738398 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
739399 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
740400
741401 /* make sure all queue are not stopped/used */
742
- memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
743
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
402
+ memset(trans->txqs.queue_stopped, 0,
403
+ sizeof(trans->txqs.queue_stopped));
404
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
744405
745406 trans_pcie->scd_base_addr =
746407 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
....@@ -754,17 +415,17 @@
754415 NULL, clear_dwords);
755416
756417 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
757
- trans_pcie->scd_bc_tbls.dma >> 10);
418
+ trans->txqs.scd_bc_tbls.dma >> 10);
758419
759420 /* The chain extension of the SCD doesn't work well. This feature is
760421 * enabled by default by the HW, so we need to disable it manually.
761422 */
762
- if (trans->cfg->base_params->scd_chain_ext_wa)
423
+ if (trans->trans_cfg->base_params->scd_chain_ext_wa)
763424 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
764425
765
- iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
766
- trans_pcie->cmd_fifo,
767
- trans_pcie->cmd_q_wdg_timeout);
426
+ iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id,
427
+ trans->txqs.cmd.fifo,
428
+ trans->txqs.cmd.wdg_timeout);
768429
769430 /* Activate all Tx DMA/FIFO channels */
770431 iwl_scd_activate_fifos(trans);
....@@ -781,7 +442,7 @@
781442 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
782443
783444 /* Enable L1-Active */
784
- if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
445
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
785446 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
786447 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
787448 }
....@@ -795,13 +456,13 @@
795456 * we should never get here in gen2 trans mode return early to avoid
796457 * having invalid accesses
797458 */
798
- if (WARN_ON_ONCE(trans->cfg->gen2))
459
+ if (WARN_ON_ONCE(trans->trans_cfg->gen2))
799460 return;
800461
801
- for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
462
+ for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
802463 txq_id++) {
803
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
804
- if (trans->cfg->use_tfh)
464
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
465
+ if (trans->trans_cfg->use_tfh)
805466 iwl_write_direct64(trans,
806467 FH_MEM_CBBC_QUEUE(trans, txq_id),
807468 txq->dma_addr);
....@@ -876,15 +537,16 @@
876537 * queues. This happens when we have an rfkill interrupt.
877538 * Since we stop Tx altogether - mark the queues as stopped.
878539 */
879
- memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
880
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
540
+ memset(trans->txqs.queue_stopped, 0,
541
+ sizeof(trans->txqs.queue_stopped));
542
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
881543
882544 /* This can happen: start_hw, stop_device */
883545 if (!trans_pcie->txq_memory)
884546 return 0;
885547
886548 /* Unmap DMA from host system and free skb's */
887
- for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
549
+ for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
888550 txq_id++)
889551 iwl_pcie_txq_unmap(trans, txq_id);
890552
....@@ -901,15 +563,15 @@
901563 int txq_id;
902564 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
903565
904
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
566
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
905567
906568 /* Tx queues */
907569 if (trans_pcie->txq_memory) {
908570 for (txq_id = 0;
909
- txq_id < trans->cfg->base_params->num_of_queues;
571
+ txq_id < trans->trans_cfg->base_params->num_of_queues;
910572 txq_id++) {
911573 iwl_pcie_txq_free(trans, txq_id);
912
- trans_pcie->txq[txq_id] = NULL;
574
+ trans->txqs.txq[txq_id] = NULL;
913575 }
914576 }
915577
....@@ -918,7 +580,7 @@
918580
919581 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
920582
921
- iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
583
+ iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls);
922584 }
923585
924586 /*
....@@ -930,11 +592,12 @@
930592 int ret;
931593 int txq_id, slots_num;
932594 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
933
- u16 bc_tbls_size = trans->cfg->base_params->num_of_queues;
595
+ u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
934596
935
- bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
936
- sizeof(struct iwl_gen3_bc_tbl) :
937
- sizeof(struct iwlagn_scd_bc_tbl);
597
+ if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
598
+ return -EINVAL;
599
+
600
+ bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl);
938601
939602 /*It is not allowed to alloc twice, so warn when this happens.
940603 * We cannot rely on the previous allocation, so free and fail */
....@@ -943,7 +606,7 @@
943606 goto error;
944607 }
945608
946
- ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
609
+ ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls,
947610 bc_tbls_size);
948611 if (ret) {
949612 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
....@@ -957,8 +620,9 @@
957620 goto error;
958621 }
959622
960
- trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues,
961
- sizeof(struct iwl_txq), GFP_KERNEL);
623
+ trans_pcie->txq_memory =
624
+ kcalloc(trans->trans_cfg->base_params->num_of_queues,
625
+ sizeof(struct iwl_txq), GFP_KERNEL);
962626 if (!trans_pcie->txq_memory) {
963627 IWL_ERR(trans, "Not enough memory for txq\n");
964628 ret = -ENOMEM;
....@@ -966,19 +630,24 @@
966630 }
967631
968632 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
969
- for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
633
+ for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
970634 txq_id++) {
971
- bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
635
+ bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
972636
973
- slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
974
- trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
975
- ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
976
- slots_num, cmd_queue);
637
+ if (cmd_queue)
638
+ slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
639
+ trans->cfg->min_txq_size);
640
+ else
641
+ slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
642
+ trans->cfg->min_256_ba_txq_size);
643
+ trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
644
+ ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num,
645
+ cmd_queue);
977646 if (ret) {
978647 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
979648 goto error;
980649 }
981
- trans_pcie->txq[txq_id]->id = txq_id;
650
+ trans->txqs.txq[txq_id]->id = txq_id;
982651 }
983652
984653 return 0;
....@@ -1015,13 +684,18 @@
1015684 spin_unlock(&trans_pcie->irq_lock);
1016685
1017686 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
1018
- for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
687
+ for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
1019688 txq_id++) {
1020
- bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
689
+ bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
1021690
1022
- slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
1023
- ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
1024
- slots_num, cmd_queue);
691
+ if (cmd_queue)
692
+ slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
693
+ trans->cfg->min_txq_size);
694
+ else
695
+ slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
696
+ trans->cfg->min_256_ba_txq_size);
697
+ ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num,
698
+ cmd_queue);
1025699 if (ret) {
1026700 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1027701 goto error;
....@@ -1034,11 +708,11 @@
1034708 * Circular buffer (TFD queue in DRAM) physical base address
1035709 */
1036710 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
1037
- trans_pcie->txq[txq_id]->dma_addr >> 8);
711
+ trans->txqs.txq[txq_id]->dma_addr >> 8);
1038712 }
1039713
1040714 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1041
- if (trans->cfg->base_params->num_of_queues > 20)
715
+ if (trans->trans_cfg->base_params->num_of_queues > 20)
1042716 iwl_set_bits_prph(trans, SCD_GP_CTRL,
1043717 SCD_GP_CTRL_ENABLE_31_QUEUES);
1044718
....@@ -1078,19 +752,18 @@
1078752 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1079753 struct sk_buff_head *skbs)
1080754 {
1081
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1082
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
1083
- int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
1084
- int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
755
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
756
+ int tfd_num = iwl_txq_get_cmd_index(txq, ssn);
757
+ int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
1085758 int last_to_free;
1086759
1087760 /* This function is not meant to release cmd queue*/
1088
- if (WARN_ON(txq_id == trans_pcie->cmd_queue))
761
+ if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
1089762 return;
1090763
1091764 spin_lock_bh(&txq->lock);
1092765
1093
- if (!test_bit(txq_id, trans_pcie->queue_used)) {
766
+ if (!test_bit(txq_id, trans->txqs.queue_used)) {
1094767 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
1095768 txq_id, ssn);
1096769 goto out;
....@@ -1104,13 +777,13 @@
1104777
1105778 /*Since we free until index _not_ inclusive, the one before index is
1106779 * the last we will free. This one must be used */
1107
- last_to_free = iwl_queue_dec_wrap(trans, tfd_num);
780
+ last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
1108781
1109
- if (!iwl_queue_used(txq, last_to_free)) {
782
+ if (!iwl_txq_used(txq, last_to_free)) {
1110783 IWL_ERR(trans,
1111784 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
1112785 __func__, txq_id, last_to_free,
1113
- trans->cfg->base_params->max_tfd_queue_size,
786
+ trans->trans_cfg->base_params->max_tfd_queue_size,
1114787 txq->write_ptr, txq->read_ptr);
1115788 goto out;
1116789 }
....@@ -1120,33 +793,42 @@
1120793
1121794 for (;
1122795 read_ptr != tfd_num;
1123
- txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr),
1124
- read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) {
796
+ txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
797
+ read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
1125798 struct sk_buff *skb = txq->entries[read_ptr].skb;
1126799
1127800 if (WARN_ON_ONCE(!skb))
1128801 continue;
1129802
1130
- iwl_pcie_free_tso_page(trans_pcie, skb);
803
+ iwl_txq_free_tso_page(trans, skb);
1131804
1132805 __skb_queue_tail(skbs, skb);
1133806
1134807 txq->entries[read_ptr].skb = NULL;
1135808
1136
- if (!trans->cfg->use_tfh)
1137
- iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
809
+ if (!trans->trans_cfg->use_tfh)
810
+ iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
1138811
1139812 iwl_pcie_txq_free_tfd(trans, txq);
1140813 }
1141814
1142815 iwl_pcie_txq_progress(txq);
1143816
1144
- if (iwl_queue_space(trans, txq) > txq->low_mark &&
1145
- test_bit(txq_id, trans_pcie->queue_stopped)) {
817
+ if (iwl_txq_space(trans, txq) > txq->low_mark &&
818
+ test_bit(txq_id, trans->txqs.queue_stopped)) {
1146819 struct sk_buff_head overflow_skbs;
1147820
1148821 __skb_queue_head_init(&overflow_skbs);
1149822 skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
823
+
824
+ /*
825
+ * We are going to transmit from the overflow queue.
826
+ * Remember this state so that wait_for_txq_empty will know we
827
+ * are adding more packets to the TFD queue. It cannot rely on
828
+ * the state of &txq->overflow_q, as we just emptied it, but
829
+ * haven't TXed the content yet.
830
+ */
831
+ txq->overflow_tx = true;
1150832
1151833 /*
1152834 * This is tricky: we are in reclaim path which is non
....@@ -1159,30 +841,40 @@
1159841
1160842 while (!skb_queue_empty(&overflow_skbs)) {
1161843 struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
1162
- struct iwl_device_cmd *dev_cmd_ptr;
844
+ struct iwl_device_tx_cmd *dev_cmd_ptr;
1163845
1164846 dev_cmd_ptr = *(void **)((u8 *)skb->cb +
1165
- trans_pcie->dev_cmd_offs);
847
+ trans->txqs.dev_cmd_offs);
1166848
1167849 /*
1168850 * Note that we can very well be overflowing again.
1169
- * In that case, iwl_queue_space will be small again
851
+ * In that case, iwl_txq_space will be small again
1170852 * and we won't wake mac80211's queue.
1171853 */
1172854 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
1173855 }
1174
- spin_lock_bh(&txq->lock);
1175856
1176
- if (iwl_queue_space(trans, txq) > txq->low_mark)
857
+ if (iwl_txq_space(trans, txq) > txq->low_mark)
1177858 iwl_wake_queue(trans, txq);
1178
- }
1179859
1180
- if (txq->read_ptr == txq->write_ptr) {
1181
- IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id);
1182
- iwl_trans_unref(trans);
860
+ spin_lock_bh(&txq->lock);
861
+ txq->overflow_tx = false;
1183862 }
1184863
1185864 out:
865
+ spin_unlock_bh(&txq->lock);
866
+}
867
+
868
+/* Set wr_ptr of specific device and txq */
869
+void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
870
+{
871
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
872
+
873
+ spin_lock_bh(&txq->lock);
874
+
875
+ txq->write_ptr = ptr;
876
+ txq->read_ptr = txq->write_ptr;
877
+
1186878 spin_unlock_bh(&txq->lock);
1187879 }
1188880
....@@ -1190,17 +882,13 @@
1190882 const struct iwl_host_cmd *cmd)
1191883 {
1192884 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1193
- const struct iwl_cfg *cfg = trans->cfg;
1194885 int ret;
1195886
1196887 lockdep_assert_held(&trans_pcie->reg_lock);
1197888
1198
- if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
1199
- !trans_pcie->ref_cmd_in_flight) {
1200
- trans_pcie->ref_cmd_in_flight = true;
1201
- IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
1202
- iwl_trans_ref(trans);
1203
- }
889
+ /* Make sure the NIC is still alive in the bus */
890
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
891
+ return -ENODEV;
1204892
1205893 /*
1206894 * wake up the NIC to make sure that the firmware will see the host
....@@ -1208,19 +896,19 @@
1208896 * returned. This needs to be done only on NICs that have
1209897 * apmg_wake_up_wa set.
1210898 */
1211
- if (cfg->base_params->apmg_wake_up_wa &&
899
+ if (trans->trans_cfg->base_params->apmg_wake_up_wa &&
1212900 !trans_pcie->cmd_hold_nic_awake) {
1213901 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1214
- BIT(cfg->csr->flag_mac_access_req));
902
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1215903
1216904 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1217
- BIT(cfg->csr->flag_val_mac_access_en),
1218
- (BIT(cfg->csr->flag_mac_clock_ready) |
905
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
906
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1219907 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
1220908 15000);
1221909 if (ret < 0) {
1222910 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1223
- BIT(cfg->csr->flag_mac_access_req));
911
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1224912 IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
1225913 return -EIO;
1226914 }
....@@ -1240,29 +928,28 @@
1240928 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1241929 {
1242930 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1243
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
1244
- unsigned long flags;
931
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
1245932 int nfreed = 0;
1246933 u16 r;
1247934
1248935 lockdep_assert_held(&txq->lock);
1249936
1250
- idx = iwl_pcie_get_cmd_index(txq, idx);
1251
- r = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
937
+ idx = iwl_txq_get_cmd_index(txq, idx);
938
+ r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
1252939
1253
- if (idx >= trans->cfg->base_params->max_tfd_queue_size ||
1254
- (!iwl_queue_used(txq, idx))) {
1255
- WARN_ONCE(test_bit(txq_id, trans_pcie->queue_used),
940
+ if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
941
+ (!iwl_txq_used(txq, idx))) {
942
+ WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used),
1256943 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
1257944 __func__, txq_id, idx,
1258
- trans->cfg->base_params->max_tfd_queue_size,
945
+ trans->trans_cfg->base_params->max_tfd_queue_size,
1259946 txq->write_ptr, txq->read_ptr);
1260947 return;
1261948 }
1262949
1263
- for (idx = iwl_queue_inc_wrap(trans, idx); r != idx;
1264
- r = iwl_queue_inc_wrap(trans, r)) {
1265
- txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
950
+ for (idx = iwl_txq_inc_wrap(trans, idx); r != idx;
951
+ r = iwl_txq_inc_wrap(trans, r)) {
952
+ txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
1266953
1267954 if (nfreed++ > 0) {
1268955 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
....@@ -1272,9 +959,10 @@
1272959 }
1273960
1274961 if (txq->read_ptr == txq->write_ptr) {
1275
- spin_lock_irqsave(&trans_pcie->reg_lock, flags);
962
+ /* BHs are also disabled due to txq->lock */
963
+ spin_lock(&trans_pcie->reg_lock);
1276964 iwl_pcie_clear_cmd_in_flight(trans);
1277
- spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
965
+ spin_unlock(&trans_pcie->reg_lock);
1278966 }
1279967
1280968 iwl_pcie_txq_progress(txq);
....@@ -1314,11 +1002,11 @@
13141002 unsigned int wdg_timeout)
13151003 {
13161004 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1317
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
1005
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
13181006 int fifo = -1;
13191007 bool scd_bug = false;
13201008
1321
- if (test_and_set_bit(txq_id, trans_pcie->queue_used))
1009
+ if (test_and_set_bit(txq_id, trans->txqs.queue_used))
13221010 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
13231011
13241012 txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
....@@ -1327,7 +1015,7 @@
13271015 fifo = cfg->fifo;
13281016
13291017 /* Disable the scheduler prior configuring the cmd queue */
1330
- if (txq_id == trans_pcie->cmd_queue &&
1018
+ if (txq_id == trans->txqs.cmd.q_id &&
13311019 trans_pcie->scd_set_active)
13321020 iwl_scd_enable_set_active(trans, 0);
13331021
....@@ -1335,7 +1023,7 @@
13351023 iwl_scd_txq_set_inactive(trans, txq_id);
13361024
13371025 /* Set this queue as a chain-building queue unless it is CMD */
1338
- if (txq_id != trans_pcie->cmd_queue)
1026
+ if (txq_id != trans->txqs.cmd.q_id)
13391027 iwl_scd_txq_set_chain(trans, txq_id);
13401028
13411029 if (cfg->aggregate) {
....@@ -1369,7 +1057,7 @@
13691057 * this sad hardware issue.
13701058 * This bug has been fixed on devices 9000 and up.
13711059 */
1372
- scd_bug = !trans->cfg->mq_rx_supported &&
1060
+ scd_bug = !trans->trans_cfg->mq_rx_supported &&
13731061 !((ssn - txq->write_ptr) & 0x3f) &&
13741062 (ssn != txq->write_ptr);
13751063 if (scd_bug)
....@@ -1405,7 +1093,7 @@
14051093 SCD_QUEUE_STTS_REG_MSK);
14061094
14071095 /* enable the scheduler for this queue (only) */
1408
- if (txq_id == trans_pcie->cmd_queue &&
1096
+ if (txq_id == trans->txqs.cmd.q_id &&
14091097 trans_pcie->scd_set_active)
14101098 iwl_scd_enable_set_active(trans, BIT(txq_id));
14111099
....@@ -1424,8 +1112,7 @@
14241112 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
14251113 bool shared_mode)
14261114 {
1427
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1428
- struct iwl_txq *txq = trans_pcie->txq[txq_id];
1115
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
14291116
14301117 txq->ampdu = !shared_mode;
14311118 }
....@@ -1438,8 +1125,8 @@
14381125 SCD_TX_STTS_QUEUE_OFFSET(txq_id);
14391126 static const u32 zero_val[4] = {};
14401127
1441
- trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0;
1442
- trans_pcie->txq[txq_id]->frozen = false;
1128
+ trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
1129
+ trans->txqs.txq[txq_id]->frozen = false;
14431130
14441131 /*
14451132 * Upon HW Rfkill - we stop the device, and then stop the queues
....@@ -1447,7 +1134,7 @@
14471134 * allow the op_mode to call txq_disable after it already called
14481135 * stop_device.
14491136 */
1450
- if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
1137
+ if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) {
14511138 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
14521139 "queue %d not used", txq_id);
14531140 return;
....@@ -1461,7 +1148,7 @@
14611148 }
14621149
14631150 iwl_pcie_txq_unmap(trans, txq_id);
1464
- trans_pcie->txq[txq_id]->ampdu = false;
1151
+ trans->txqs.txq[txq_id]->ampdu = false;
14651152
14661153 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
14671154 }
....@@ -1481,10 +1168,9 @@
14811168 struct iwl_host_cmd *cmd)
14821169 {
14831170 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1484
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
1171
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
14851172 struct iwl_device_cmd *out_cmd;
14861173 struct iwl_cmd_meta *out_meta;
1487
- unsigned long flags;
14881174 void *dup_buf = NULL;
14891175 dma_addr_t phys_addr;
14901176 int idx;
....@@ -1495,7 +1181,7 @@
14951181 u32 cmd_pos;
14961182 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
14971183 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
1498
- unsigned long flags2;
1184
+ unsigned long flags;
14991185
15001186 if (WARN(!trans->wide_cmd_header &&
15011187 group_id > IWL_ALWAYS_LONG_GROUP,
....@@ -1579,10 +1265,10 @@
15791265 goto free_dup_buf;
15801266 }
15811267
1582
- spin_lock_irqsave(&txq->lock, flags2);
1268
+ spin_lock_irqsave(&txq->lock, flags);
15831269
1584
- if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1585
- spin_unlock_irqrestore(&txq->lock, flags2);
1270
+ if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1271
+ spin_unlock_irqrestore(&txq->lock, flags);
15861272
15871273 IWL_ERR(trans, "No space in command queue\n");
15881274 iwl_op_mode_cmd_queue_full(trans->op_mode);
....@@ -1590,7 +1276,7 @@
15901276 goto free_dup_buf;
15911277 }
15921278
1593
- idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
1279
+ idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
15941280 out_cmd = txq->entries[idx].cmd;
15951281 out_meta = &txq->entries[idx].meta;
15961282
....@@ -1608,7 +1294,7 @@
16081294 sizeof(struct iwl_cmd_header_wide));
16091295 out_cmd->hdr_wide.reserved = 0;
16101296 out_cmd->hdr_wide.sequence =
1611
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
1297
+ cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
16121298 INDEX_TO_SEQ(txq->write_ptr));
16131299
16141300 cmd_pos = sizeof(struct iwl_cmd_header_wide);
....@@ -1616,7 +1302,7 @@
16161302 } else {
16171303 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
16181304 out_cmd->hdr.sequence =
1619
- cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
1305
+ cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
16201306 INDEX_TO_SEQ(txq->write_ptr));
16211307 out_cmd->hdr.group_id = 0;
16221308
....@@ -1667,13 +1353,13 @@
16671353 iwl_get_cmd_string(trans, cmd->id),
16681354 group_id, out_cmd->hdr.cmd,
16691355 le16_to_cpu(out_cmd->hdr.sequence),
1670
- cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
1356
+ cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
16711357
16721358 /* start the TFD with the minimum copy bytes */
16731359 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
16741360 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
16751361 iwl_pcie_txq_build_tfd(trans, txq,
1676
- iwl_pcie_get_first_tb_dma(txq, idx),
1362
+ iwl_txq_get_first_tb_dma(txq, idx),
16771363 tb0_size, true);
16781364
16791365 /* map first command fragment, if any remains */
....@@ -1683,8 +1369,8 @@
16831369 copy_size - tb0_size,
16841370 DMA_TO_DEVICE);
16851371 if (dma_mapping_error(trans->dev, phys_addr)) {
1686
- iwl_pcie_tfd_unmap(trans, out_meta, txq,
1687
- txq->write_ptr);
1372
+ iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
1373
+ txq->write_ptr);
16881374 idx = -ENOMEM;
16891375 goto out;
16901376 }
....@@ -1707,8 +1393,8 @@
17071393 phys_addr = dma_map_single(trans->dev, (void *)data,
17081394 cmdlen[i], DMA_TO_DEVICE);
17091395 if (dma_mapping_error(trans->dev, phys_addr)) {
1710
- iwl_pcie_tfd_unmap(trans, out_meta, txq,
1711
- txq->write_ptr);
1396
+ iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
1397
+ txq->write_ptr);
17121398 idx = -ENOMEM;
17131399 goto out;
17141400 }
....@@ -1719,7 +1405,7 @@
17191405 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
17201406 out_meta->flags = cmd->flags;
17211407 if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1722
- kzfree(txq->entries[idx].free_buf);
1408
+ kfree_sensitive(txq->entries[idx].free_buf);
17231409 txq->entries[idx].free_buf = dup_buf;
17241410
17251411 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
....@@ -1728,22 +1414,21 @@
17281414 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
17291415 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
17301416
1731
- spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1417
+ spin_lock(&trans_pcie->reg_lock);
17321418 ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
17331419 if (ret < 0) {
17341420 idx = ret;
1735
- spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1736
- goto out;
1421
+ goto unlock_reg;
17371422 }
17381423
17391424 /* Increment and update queue's write index */
1740
- txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
1425
+ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
17411426 iwl_pcie_txq_inc_wr_ptr(trans, txq);
17421427
1743
- spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1744
-
1428
+ unlock_reg:
1429
+ spin_unlock(&trans_pcie->reg_lock);
17451430 out:
1746
- spin_unlock_irqrestore(&txq->lock, flags2);
1431
+ spin_unlock_irqrestore(&txq->lock, flags);
17471432 free_dup_buf:
17481433 if (idx < 0)
17491434 kfree(dup_buf);
....@@ -1767,14 +1452,14 @@
17671452 struct iwl_device_cmd *cmd;
17681453 struct iwl_cmd_meta *meta;
17691454 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1770
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
1455
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
17711456
17721457 /* If a Tx command is being handled and it isn't in the actual
17731458 * command queue then there a command routing bug has been introduced
17741459 * in the queue management code. */
1775
- if (WARN(txq_id != trans_pcie->cmd_queue,
1460
+ if (WARN(txq_id != trans->txqs.cmd.q_id,
17761461 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
1777
- txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr,
1462
+ txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr,
17781463 txq->write_ptr)) {
17791464 iwl_print_hex_error(trans, pkt, 32);
17801465 return;
....@@ -1782,13 +1467,13 @@
17821467
17831468 spin_lock_bh(&txq->lock);
17841469
1785
- cmd_index = iwl_pcie_get_cmd_index(txq, index);
1470
+ cmd_index = iwl_txq_get_cmd_index(txq, index);
17861471 cmd = txq->entries[cmd_index].cmd;
17871472 meta = &txq->entries[cmd_index].meta;
17881473 group_id = cmd->hdr.group_id;
17891474 cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
17901475
1791
- iwl_pcie_tfd_unmap(trans, meta, txq, index);
1476
+ iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
17921477
17931478 /* Input error checking is done when commands are added to queue. */
17941479 if (meta->flags & CMD_WANT_SKB) {
....@@ -1814,20 +1499,6 @@
18141499 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
18151500 iwl_get_cmd_string(trans, cmd_id));
18161501 wake_up(&trans_pcie->wait_command_queue);
1817
- }
1818
-
1819
- if (meta->flags & CMD_MAKE_TRANS_IDLE) {
1820
- IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n",
1821
- iwl_get_cmd_string(trans, cmd->hdr.cmd));
1822
- set_bit(STATUS_TRANS_IDLE, &trans->status);
1823
- wake_up(&trans_pcie->d0i3_waitq);
1824
- }
1825
-
1826
- if (meta->flags & CMD_WAKE_UP_TRANS) {
1827
- IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n",
1828
- iwl_get_cmd_string(trans, cmd->hdr.cmd));
1829
- clear_bit(STATUS_TRANS_IDLE, &trans->status);
1830
- wake_up(&trans_pcie->d0i3_waitq);
18311502 }
18321503
18331504 meta->flags = 0;
....@@ -1860,7 +1531,7 @@
18601531 struct iwl_host_cmd *cmd)
18611532 {
18621533 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1863
- struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
1534
+ struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
18641535 int cmd_idx;
18651536 int ret;
18661537
....@@ -1875,16 +1546,6 @@
18751546
18761547 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
18771548 iwl_get_cmd_string(trans, cmd->id));
1878
-
1879
- if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
1880
- ret = wait_event_timeout(trans_pcie->d0i3_waitq,
1881
- pm_runtime_active(&trans_pcie->pci_dev->dev),
1882
- msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
1883
- if (!ret) {
1884
- IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
1885
- return -ETIMEDOUT;
1886
- }
1887
- }
18881549
18891550 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
18901551 if (cmd_idx < 0) {
....@@ -1913,14 +1574,12 @@
19131574 iwl_get_cmd_string(trans, cmd->id));
19141575 ret = -ETIMEDOUT;
19151576
1916
- iwl_force_nmi(trans);
1917
- iwl_trans_fw_error(trans);
1918
-
1577
+ iwl_trans_pcie_sync_nmi(trans);
19191578 goto cancel;
19201579 }
19211580
19221581 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
1923
- iwl_trans_dump_regs(trans);
1582
+ iwl_trans_pcie_dump_regs(trans);
19241583 IWL_ERR(trans, "FW error in SYNC CMD %s\n",
19251584 iwl_get_cmd_string(trans, cmd->id));
19261585 dump_stack();
....@@ -1965,6 +1624,10 @@
19651624
19661625 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
19671626 {
1627
+ /* Make sure the NIC is still alive in the bus */
1628
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
1629
+ return -ENODEV;
1630
+
19681631 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
19691632 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
19701633 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
....@@ -1981,29 +1644,26 @@
19811644
19821645 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
19831646 struct iwl_txq *txq, u8 hdr_len,
1984
- struct iwl_cmd_meta *out_meta,
1985
- struct iwl_device_cmd *dev_cmd, u16 tb1_len)
1647
+ struct iwl_cmd_meta *out_meta)
19861648 {
1987
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1988
- u16 tb2_len;
1649
+ u16 head_tb_len;
19891650 int i;
19901651
19911652 /*
19921653 * Set up TFD's third entry to point directly to remainder
19931654 * of skb's head, if any
19941655 */
1995
- tb2_len = skb_headlen(skb) - hdr_len;
1656
+ head_tb_len = skb_headlen(skb) - hdr_len;
19961657
1997
- if (tb2_len > 0) {
1998
- dma_addr_t tb2_phys = dma_map_single(trans->dev,
1999
- skb->data + hdr_len,
2000
- tb2_len, DMA_TO_DEVICE);
2001
- if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
2002
- iwl_pcie_tfd_unmap(trans, out_meta, txq,
2003
- txq->write_ptr);
1658
+ if (head_tb_len > 0) {
1659
+ dma_addr_t tb_phys = dma_map_single(trans->dev,
1660
+ skb->data + hdr_len,
1661
+ head_tb_len, DMA_TO_DEVICE);
1662
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
20041663 return -EINVAL;
2005
- }
2006
- iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
1664
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
1665
+ tb_phys, head_tb_len);
1666
+ iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
20071667 }
20081668
20091669 /* set up the remaining entries to point to the data */
....@@ -2018,50 +1678,22 @@
20181678 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
20191679 skb_frag_size(frag), DMA_TO_DEVICE);
20201680
2021
- if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
2022
- iwl_pcie_tfd_unmap(trans, out_meta, txq,
2023
- txq->write_ptr);
1681
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
20241682 return -EINVAL;
2025
- }
1683
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
1684
+ tb_phys, skb_frag_size(frag));
20261685 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
20271686 skb_frag_size(frag), false);
1687
+ if (tb_idx < 0)
1688
+ return tb_idx;
20281689
20291690 out_meta->tbs |= BIT(tb_idx);
20301691 }
20311692
2032
- trace_iwlwifi_dev_tx(trans->dev, skb,
2033
- iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
2034
- trans_pcie->tfd_size,
2035
- &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
2036
- hdr_len);
2037
- trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
20381693 return 0;
20391694 }
20401695
20411696 #ifdef CONFIG_INET
2042
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len)
2043
-{
2044
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2045
- struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
2046
-
2047
- if (!p->page)
2048
- goto alloc;
2049
-
2050
- /* enough room on this page */
2051
- if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
2052
- return p;
2053
-
2054
- /* We don't have enough room on this page, get a new one. */
2055
- __free_page(p->page);
2056
-
2057
-alloc:
2058
- p->page = alloc_page(GFP_ATOMIC);
2059
- if (!p->page)
2060
- return NULL;
2061
- p->pos = page_address(p->page);
2062
- return p;
2063
-}
2064
-
20651697 static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
20661698 bool ipv6, unsigned int len)
20671699 {
....@@ -2084,18 +1716,18 @@
20841716 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
20851717 struct iwl_txq *txq, u8 hdr_len,
20861718 struct iwl_cmd_meta *out_meta,
2087
- struct iwl_device_cmd *dev_cmd, u16 tb1_len)
1719
+ struct iwl_device_tx_cmd *dev_cmd,
1720
+ u16 tb1_len)
20881721 {
20891722 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
2090
- struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
1723
+ struct iwl_trans_pcie *trans_pcie =
1724
+ IWL_TRANS_GET_PCIE_TRANS(txq->trans);
20911725 struct ieee80211_hdr *hdr = (void *)skb->data;
20921726 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
20931727 unsigned int mss = skb_shinfo(skb)->gso_size;
20941728 u16 length, iv_len, amsdu_pad;
20951729 u8 *start_hdr;
20961730 struct iwl_tso_hdr_page *hdr_page;
2097
- struct page **page_ptr;
2098
- int ret;
20991731 struct tso_t tso;
21001732
21011733 /* if the packet is protected, then it must be CCMP or GCMP */
....@@ -2104,8 +1736,8 @@
21041736 IEEE80211_CCMP_HDR_LEN : 0;
21051737
21061738 trace_iwlwifi_dev_tx(trans->dev, skb,
2107
- iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
2108
- trans_pcie->tfd_size,
1739
+ iwl_txq_get_tfd(trans, txq, txq->write_ptr),
1740
+ trans->txqs.tfd.size,
21091741 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
21101742
21111743 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
....@@ -2118,14 +1750,11 @@
21181750 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
21191751
21201752 /* Our device supports 9 segments at most, it will fit in 1 page */
2121
- hdr_page = get_page_hdr(trans, hdr_room);
1753
+ hdr_page = get_page_hdr(trans, hdr_room, skb);
21221754 if (!hdr_page)
21231755 return -ENOMEM;
21241756
2125
- get_page(hdr_page->page);
21261757 start_hdr = hdr_page->pos;
2127
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
2128
- *page_ptr = hdr_page->page;
21291758 memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
21301759 hdr_page->pos += iv_len;
21311760
....@@ -2181,10 +1810,8 @@
21811810 if (trans_pcie->sw_csum_tx) {
21821811 csum_skb = alloc_skb(data_left + tcp_hdrlen(skb),
21831812 GFP_ATOMIC);
2184
- if (!csum_skb) {
2185
- ret = -ENOMEM;
2186
- goto out_unmap;
2187
- }
1813
+ if (!csum_skb)
1814
+ return -ENOMEM;
21881815
21891816 iwl_compute_pseudo_hdr_csum(iph, tcph,
21901817 skb->protocol ==
....@@ -2205,13 +1832,12 @@
22051832 hdr_tb_len, DMA_TO_DEVICE);
22061833 if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) {
22071834 dev_kfree_skb(csum_skb);
2208
- ret = -EINVAL;
2209
- goto out_unmap;
1835
+ return -EINVAL;
22101836 }
22111837 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
22121838 hdr_tb_len, false);
2213
- trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr,
2214
- hdr_tb_len);
1839
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
1840
+ hdr_tb_phys, hdr_tb_len);
22151841 /* add this subframe's headers' length to the tx_cmd */
22161842 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
22171843
....@@ -2231,14 +1857,13 @@
22311857 size, DMA_TO_DEVICE);
22321858 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
22331859 dev_kfree_skb(csum_skb);
2234
- ret = -EINVAL;
2235
- goto out_unmap;
1860
+ return -EINVAL;
22361861 }
22371862
22381863 iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
22391864 size, false);
2240
- trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
2241
- size);
1865
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
1866
+ tb_phys, size);
22421867
22431868 data_left -= size;
22441869 tso_build_data(skb, &tso, size);
....@@ -2266,16 +1891,13 @@
22661891 skb_push(skb, hdr_len + iv_len);
22671892
22681893 return 0;
2269
-
2270
-out_unmap:
2271
- iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
2272
- return ret;
22731894 }
22741895 #else /* CONFIG_INET */
22751896 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
22761897 struct iwl_txq *txq, u8 hdr_len,
22771898 struct iwl_cmd_meta *out_meta,
2278
- struct iwl_device_cmd *dev_cmd, u16 tb1_len)
1899
+ struct iwl_device_tx_cmd *dev_cmd,
1900
+ u16 tb1_len)
22791901 {
22801902 /* No A-MSDU without CONFIG_INET */
22811903 WARN_ON(1);
....@@ -2285,7 +1907,7 @@
22851907 #endif /* CONFIG_INET */
22861908
22871909 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2288
- struct iwl_device_cmd *dev_cmd, int txq_id)
1910
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
22891911 {
22901912 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
22911913 struct ieee80211_hdr *hdr;
....@@ -2302,9 +1924,9 @@
23021924 u16 wifi_seq;
23031925 bool amsdu;
23041926
2305
- txq = trans_pcie->txq[txq_id];
1927
+ txq = trans->txqs.txq[txq_id];
23061928
2307
- if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
1929
+ if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
23081930 "TX on unused queue %d\n", txq_id))
23091931 return -EINVAL;
23101932
....@@ -2324,7 +1946,7 @@
23241946 }
23251947
23261948 if (skb_is_nonlinear(skb) &&
2327
- skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
1949
+ skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
23281950 __skb_linearize(skb))
23291951 return -ENOMEM;
23301952
....@@ -2337,15 +1959,15 @@
23371959
23381960 spin_lock(&txq->lock);
23391961
2340
- if (iwl_queue_space(trans, txq) < txq->high_mark) {
2341
- iwl_stop_queue(trans, txq);
1962
+ if (iwl_txq_space(trans, txq) < txq->high_mark) {
1963
+ iwl_txq_stop(trans, txq);
23421964
23431965 /* don't put the packet on the ring, if there is no room */
2344
- if (unlikely(iwl_queue_space(trans, txq) < 3)) {
2345
- struct iwl_device_cmd **dev_cmd_ptr;
1966
+ if (unlikely(iwl_txq_space(trans, txq) < 3)) {
1967
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
23461968
23471969 dev_cmd_ptr = (void *)((u8 *)skb->cb +
2348
- trans_pcie->dev_cmd_offs);
1970
+ trans->txqs.dev_cmd_offs);
23491971
23501972 *dev_cmd_ptr = dev_cmd;
23511973 __skb_queue_tail(&txq->overflow_q, skb);
....@@ -2374,7 +1996,7 @@
23741996 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
23751997 INDEX_TO_SEQ(txq->write_ptr)));
23761998
2377
- tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr);
1999
+ tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);
23782000 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
23792001 offsetof(struct iwl_tx_cmd, scratch);
23802002
....@@ -2423,6 +2045,12 @@
24232045 goto out_err;
24242046 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
24252047
2048
+ trace_iwlwifi_dev_tx(trans->dev, skb,
2049
+ iwl_txq_get_tfd(trans, txq, txq->write_ptr),
2050
+ trans->txqs.tfd.size,
2051
+ &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
2052
+ hdr_len);
2053
+
24262054 /*
24272055 * If gso_size wasn't set, don't give the frame "amsdu treatment"
24282056 * (adding subframes, etc.).
....@@ -2434,43 +2062,48 @@
24342062 out_meta, dev_cmd,
24352063 tb1_len)))
24362064 goto out_err;
2437
- } else if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
2438
- out_meta, dev_cmd, tb1_len))) {
2439
- goto out_err;
2065
+ } else {
2066
+ struct sk_buff *frag;
2067
+
2068
+ if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
2069
+ out_meta)))
2070
+ goto out_err;
2071
+
2072
+ skb_walk_frags(skb, frag) {
2073
+ if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0,
2074
+ out_meta)))
2075
+ goto out_err;
2076
+ }
24402077 }
24412078
24422079 /* building the A-MSDU might have changed this data, so memcpy it now */
2443
- memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
2444
- IWL_FIRST_TB_SIZE);
2080
+ memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
24452081
2446
- tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
2082
+ tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
24472083 /* Set up entry for this TFD in Tx byte-count array */
2448
- iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
2449
- iwl_pcie_tfd_get_num_tbs(trans, tfd));
2084
+ iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
2085
+ iwl_txq_gen1_tfd_get_num_tbs(trans,
2086
+ tfd));
24502087
24512088 wait_write_ptr = ieee80211_has_morefrags(fc);
24522089
24532090 /* start timer if queue currently empty */
2454
- if (txq->read_ptr == txq->write_ptr) {
2455
- if (txq->wd_timeout) {
2456
- /*
2457
- * If the TXQ is active, then set the timer, if not,
2458
- * set the timer in remainder so that the timer will
2459
- * be armed with the right value when the station will
2460
- * wake up.
2461
- */
2462
- if (!txq->frozen)
2463
- mod_timer(&txq->stuck_timer,
2464
- jiffies + txq->wd_timeout);
2465
- else
2466
- txq->frozen_expiry_remainder = txq->wd_timeout;
2467
- }
2468
- IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
2469
- iwl_trans_ref(trans);
2091
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) {
2092
+ /*
2093
+ * If the TXQ is active, then set the timer, if not,
2094
+ * set the timer in remainder so that the timer will
2095
+ * be armed with the right value when the station will
2096
+ * wake up.
2097
+ */
2098
+ if (!txq->frozen)
2099
+ mod_timer(&txq->stuck_timer,
2100
+ jiffies + txq->wd_timeout);
2101
+ else
2102
+ txq->frozen_expiry_remainder = txq->wd_timeout;
24702103 }
24712104
24722105 /* Tell device the write index *just past* this latest filled TFD */
2473
- txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
2106
+ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
24742107 if (!wait_write_ptr)
24752108 iwl_pcie_txq_inc_wr_ptr(trans, txq);
24762109
....@@ -2481,6 +2114,7 @@
24812114 spin_unlock(&txq->lock);
24822115 return 0;
24832116 out_err:
2117
+ iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
24842118 spin_unlock(&txq->lock);
24852119 return -1;
24862120 }