From a5969cabbb4660eab42b6ef0412cbbd1200cf14d Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Sat, 12 Oct 2024 07:10:09 +0000 Subject: [PATCH] 修改led为gpio --- kernel/drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 990 ++++++++++++++++++--------------------------------------- 1 files changed, 312 insertions(+), 678 deletions(-) diff --git a/kernel/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/kernel/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 41ba0a7..1333713 100644 --- a/kernel/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/kernel/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1,12 +1,14 @@ /****************************************************************************** * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation - * - * Portions of this file are derived from the ipw3945 project, as well - * as portions of the ieee80211 subsystem header files. + * Copyright(c) 2018 - 2020 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -17,23 +19,52 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA - * * The full GNU General Public License is included in this distribution in the - * file called LICENSE. + * file called COPYING. * * Contact Information: * Intel Linux Wireless <linuxwifi@intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 - 2020 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #include <linux/etherdevice.h> #include <linux/ieee80211.h> #include <linux/slab.h> #include <linux/sched.h> -#include <linux/pm_runtime.h> #include <net/ip6_checksum.h> #include <net/tso.h> @@ -45,9 +76,6 @@ #include "iwl-op-mode.h" #include "internal.h" #include "fw/api/tx.h" - -#define IWL_TX_CRC_SIZE 4 -#define IWL_TX_DELIMITER_SIZE 4 /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** * DMA services @@ -71,60 +99,6 @@ * ***************************************************/ -int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q) -{ - unsigned int max; - unsigned int used; - - /* - * To avoid ambiguity between empty and completely full queues, there - * should always be less than max_tfd_queue_size elements in the queue. - * If q->n_window is smaller than max_tfd_queue_size, there is no need - * to reserve any queue entries for this purpose. - */ - if (q->n_window < trans->cfg->base_params->max_tfd_queue_size) - max = q->n_window; - else - max = trans->cfg->base_params->max_tfd_queue_size - 1; - - /* - * max_tfd_queue_size is a power of 2, so the following is equivalent to - * modulo by max_tfd_queue_size and is well defined. - */ - used = (q->write_ptr - q->read_ptr) & - (trans->cfg->base_params->max_tfd_queue_size - 1); - - if (WARN_ON(used > max)) - return 0; - - return max - used; -} - -/* - * iwl_queue_init - Initialize queue's high/low-water and read/write indexes - */ -static int iwl_queue_init(struct iwl_txq *q, int slots_num) -{ - q->n_window = slots_num; - - /* slots_num must be power-of-two size, otherwise - * iwl_pcie_get_cmd_index is broken. */ - if (WARN_ON(!is_power_of_2(slots_num))) - return -EINVAL; - - q->low_mark = q->n_window / 4; - if (q->low_mark < 4) - q->low_mark = 4; - - q->high_mark = q->n_window / 8; - if (q->high_mark < 2) - q->high_mark = 2; - - q->write_ptr = 0; - q->read_ptr = 0; - - return 0; -} int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr, size_t size) @@ -149,107 +123,12 @@ memset(ptr, 0, sizeof(*ptr)); } -static void iwl_pcie_txq_stuck_timer(struct timer_list *t) -{ - struct iwl_txq *txq = from_timer(txq, t, stuck_timer); - struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; - struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); - - spin_lock(&txq->lock); - /* check if triggered erroneously */ - if (txq->read_ptr == txq->write_ptr) { - spin_unlock(&txq->lock); - return; - } - spin_unlock(&txq->lock); - - iwl_trans_pcie_log_scd_error(trans, txq); - - iwl_force_nmi(trans); -} - -/* - * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array - */ -static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, - struct iwl_txq *txq, u16 byte_cnt, - int num_tbs) -{ - struct iwlagn_scd_bc_tbl *scd_bc_tbl; - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int write_ptr = txq->write_ptr; - int txq_id = txq->id; - u8 sec_ctl = 0; - u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; - __le16 bc_ent; - struct iwl_tx_cmd *tx_cmd = - (void *)txq->entries[txq->write_ptr].cmd->payload; - u8 sta_id = tx_cmd->sta_id; - - scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; - - sec_ctl = tx_cmd->sec_ctl; - - switch (sec_ctl & TX_CMD_SEC_MSK) { - case TX_CMD_SEC_CCM: - len += IEEE80211_CCMP_MIC_LEN; - break; - case TX_CMD_SEC_TKIP: - len += IEEE80211_TKIP_ICV_LEN; - break; - case TX_CMD_SEC_WEP: - len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; - break; - } - if (trans_pcie->bc_table_dword) - len = DIV_ROUND_UP(len, 4); - - if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) - return; - - bc_ent = cpu_to_le16(len | (sta_id << 12)); - - scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; - - if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) - scd_bc_tbl[txq_id]. - tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent; -} - -static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, - struct iwl_txq *txq) -{ - struct iwl_trans_pcie *trans_pcie = - IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; - int txq_id = txq->id; - int read_ptr = txq->read_ptr; - u8 sta_id = 0; - __le16 bc_ent; - struct iwl_tx_cmd *tx_cmd = - (void *)txq->entries[read_ptr].cmd->payload; - - WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); - - if (txq_id != trans_pcie->cmd_queue) - sta_id = tx_cmd->sta_id; - - bc_ent = cpu_to_le16(1 | (sta_id << 12)); - - scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; - - if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) - scd_bc_tbl[txq_id]. - tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; -} - /* * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware */ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 reg = 0; int txq_id = txq->id; @@ -261,8 +140,8 @@ * 2. NIC is woken up for CMD regardless of shadow outside this function * 3. there is a chance that the NIC is asleep */ - if (!trans->cfg->base_params->shadow_reg_enable && - txq_id != trans_pcie->cmd_queue && + if (!trans->trans_cfg->base_params->shadow_reg_enable && + txq_id != trans->txqs.cmd.q_id && test_bit(STATUS_TPOWER_PMI, &trans->status)) { /* * wake up nic if it's powered down ... @@ -275,7 +154,7 @@ IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", txq_id, reg); iwl_set_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_access_req)); + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); txq->need_update = true; return; } @@ -293,13 +172,12 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; - for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { - struct iwl_txq *txq = trans_pcie->txq[i]; + for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { + struct iwl_txq *txq = trans->txqs.txq[i]; - if (!test_bit(i, trans_pcie->queue_used)) + if (!test_bit(i, trans->txqs.queue_used)) continue; spin_lock_bh(&txq->lock); @@ -308,35 +186,6 @@ txq->need_update = false; } spin_unlock_bh(&txq->lock); - } -} - -static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans, - void *_tfd, u8 idx) -{ - - if (trans->cfg->use_tfh) { - struct iwl_tfh_tfd *tfd = _tfd; - struct iwl_tfh_tb *tb = &tfd->tbs[idx]; - - return (dma_addr_t)(le64_to_cpu(tb->addr)); - } else { - struct iwl_tfd *tfd = _tfd; - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - dma_addr_t addr = get_unaligned_le32(&tb->lo); - dma_addr_t hi_len; - - if (sizeof(dma_addr_t) <= sizeof(u32)) - return addr; - - hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; - - /* - * shift by 16 twice to avoid warnings on 32-bit - * (where this code never runs anyway due to the - * if statement above) - */ - return addr | ((hi_len << 16) << 16); } } @@ -356,67 +205,6 @@ tfd_fh->num_tbs = idx + 1; } -static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd) -{ - if (trans->cfg->use_tfh) { - struct iwl_tfh_tfd *tfd = _tfd; - - return le16_to_cpu(tfd->num_tbs) & 0x1f; - } else { - struct iwl_tfd *tfd = _tfd; - - return tfd->num_tbs & 0x1f; - } -} - -static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, - struct iwl_cmd_meta *meta, - struct iwl_txq *txq, int index) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int i, num_tbs; - void *tfd = iwl_pcie_get_tfd(trans, txq, index); - - /* Sanity check on number of chunks */ - num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); - - if (num_tbs > trans_pcie->max_tbs) { - IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); - /* @todo issue fatal error, it is quite serious situation */ - return; - } - - /* first TB is never freed - it's the bidirectional DMA data */ - - for (i = 1; i < num_tbs; i++) { - if (meta->tbs & BIT(i)) - dma_unmap_page(trans->dev, - iwl_pcie_tfd_tb_get_addr(trans, tfd, i), - iwl_pcie_tfd_tb_get_len(trans, tfd, i), - DMA_TO_DEVICE); - else - dma_unmap_single(trans->dev, - iwl_pcie_tfd_tb_get_addr(trans, tfd, - i), - iwl_pcie_tfd_tb_get_len(trans, tfd, - i), - DMA_TO_DEVICE); - } - - meta->tbs = 0; - - if (trans->cfg->use_tfh) { - struct iwl_tfh_tfd *tfd_fh = (void *)tfd; - - tfd_fh->num_tbs = 0; - } else { - struct iwl_tfd *tfd_fh = (void *)tfd; - - tfd_fh->num_tbs = 0; - } - -} - /* * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] * @trans - transport private data @@ -432,14 +220,14 @@ * idx is bounded by n_window */ int rd_ptr = txq->read_ptr; - int idx = iwl_pcie_get_cmd_index(txq, rd_ptr); + int idx = iwl_txq_get_cmd_index(txq, rd_ptr); lockdep_assert_held(&txq->lock); /* We have only q->n_window txq->entries, but we use * TFD_QUEUE_SIZE_MAX tfds */ - iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); + iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); /* free SKB */ if (txq->entries) { @@ -461,21 +249,20 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, dma_addr_t addr, u16 len, bool reset) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); void *tfd; u32 num_tbs; - tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr; + tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr; if (reset) - memset(tfd, 0, trans_pcie->tfd_size); + memset(tfd, 0, trans->txqs.tfd.size); - num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); + num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); /* Each TFD can point to a maximum max_tbs Tx buffers */ - if (num_tbs >= trans_pcie->max_tbs) { + if (num_tbs >= trans->txqs.tfd.max_tbs) { IWL_ERR(trans, "Error can not send more than %d chunks\n", - trans_pcie->max_tbs); + trans->txqs.tfd.max_tbs); return -EINVAL; } @@ -488,139 +275,20 @@ return num_tbs; } -int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, - int slots_num, bool cmd_queue) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - size_t tfd_sz = trans_pcie->tfd_size * - trans->cfg->base_params->max_tfd_queue_size; - size_t tb0_buf_sz; - int i; - - if (WARN_ON(txq->entries || txq->tfds)) - return -EINVAL; - - if (trans->cfg->use_tfh) - tfd_sz = trans_pcie->tfd_size * slots_num; - - timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0); - txq->trans_pcie = trans_pcie; - - txq->n_window = slots_num; - - txq->entries = kcalloc(slots_num, - sizeof(struct iwl_pcie_txq_entry), - GFP_KERNEL); - - if (!txq->entries) - goto error; - - if (cmd_queue) - for (i = 0; i < slots_num; i++) { - txq->entries[i].cmd = - kmalloc(sizeof(struct iwl_device_cmd), - GFP_KERNEL); - if (!txq->entries[i].cmd) - goto error; - } - - /* Circular buffer of transmit frame descriptors (TFDs), - * shared with device */ - txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, - &txq->dma_addr, GFP_KERNEL); - if (!txq->tfds) - goto error; - - BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs)); - - tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; - - txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, - &txq->first_tb_dma, - GFP_KERNEL); - if (!txq->first_tb_bufs) - goto err_free_tfds; - - return 0; -err_free_tfds: - dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); -error: - if (txq->entries && cmd_queue) - for (i = 0; i < slots_num; i++) - kfree(txq->entries[i].cmd); - kfree(txq->entries); - txq->entries = NULL; - - return -ENOMEM; - -} - -int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, - int slots_num, bool cmd_queue) -{ - int ret; - u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size; - - txq->need_update = false; - - /* max_tfd_queue_size must be power-of-two size, otherwise - * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ - if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), - "Max tfd queue size must be a power of two, but is %d", - tfd_queue_max_size)) - return -EINVAL; - - /* Initialize queue's high/low-water marks, and head/tail indexes */ - ret = iwl_queue_init(txq, slots_num); - if (ret) - return ret; - - spin_lock_init(&txq->lock); - - if (cmd_queue) { - static struct lock_class_key iwl_pcie_cmd_queue_lock_class; - - lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class); - } - - __skb_queue_head_init(&txq->overflow_q); - - return 0; -} - -void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, - struct sk_buff *skb) -{ - struct page **page_ptr; - - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); - - if (*page_ptr) { - __free_page(*page_ptr); - *page_ptr = NULL; - } -} - static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); lockdep_assert_held(&trans_pcie->reg_lock); - if (trans_pcie->ref_cmd_in_flight) { - trans_pcie->ref_cmd_in_flight = false; - IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n"); - iwl_trans_unref(trans); - } - - if (!trans->cfg->base_params->apmg_wake_up_wa) + if (!trans->trans_cfg->base_params->apmg_wake_up_wa) return; if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) return; trans_pcie->cmd_hold_nic_awake = false; __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_access_req)); + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); } /* @@ -629,7 +297,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans->txqs.txq[txq_id]; if (!txq) { IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); @@ -641,29 +309,22 @@ IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, txq->read_ptr); - if (txq_id != trans_pcie->cmd_queue) { + if (txq_id != trans->txqs.cmd.q_id) { struct sk_buff *skb = txq->entries[txq->read_ptr].skb; if (WARN_ON_ONCE(!skb)) continue; - iwl_pcie_free_tso_page(trans_pcie, skb); + iwl_txq_free_tso_page(trans, skb); } iwl_pcie_txq_free_tfd(trans, txq); - txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); + txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); if (txq->read_ptr == txq->write_ptr) { - unsigned long flags; - - spin_lock_irqsave(&trans_pcie->reg_lock, flags); - if (txq_id != trans_pcie->cmd_queue) { - IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", - txq->id); - iwl_trans_unref(trans); - } else { + spin_lock(&trans_pcie->reg_lock); + if (txq_id == trans->txqs.cmd.q_id) iwl_pcie_clear_cmd_in_flight(trans); - } - spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); + spin_unlock(&trans_pcie->reg_lock); } } @@ -689,8 +350,7 @@ */ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans->txqs.txq[txq_id]; struct device *dev = trans->dev; int i; @@ -700,17 +360,17 @@ iwl_pcie_txq_unmap(trans, txq_id); /* De-alloc array of command/tx buffers */ - if (txq_id == trans_pcie->cmd_queue) + if (txq_id == trans->txqs.cmd.q_id) for (i = 0; i < txq->n_window; i++) { - kzfree(txq->entries[i].cmd); - kzfree(txq->entries[i].free_buf); + kfree_sensitive(txq->entries[i].cmd); + kfree_sensitive(txq->entries[i].free_buf); } /* De-alloc circular buffer of TFDs */ if (txq->tfds) { dma_free_coherent(dev, - trans_pcie->tfd_size * - trans->cfg->base_params->max_tfd_queue_size, + trans->txqs.tfd.size * + trans->trans_cfg->base_params->max_tfd_queue_size, txq->tfds, txq->dma_addr); txq->dma_addr = 0; txq->tfds = NULL; @@ -732,15 +392,16 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int nq = trans->cfg->base_params->num_of_queues; + int nq = trans->trans_cfg->base_params->num_of_queues; int chan; u32 reg_val; int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); /* make sure all queue are not stopped/used */ - memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); - memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + memset(trans->txqs.queue_stopped, 0, + sizeof(trans->txqs.queue_stopped)); + memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); trans_pcie->scd_base_addr = iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); @@ -754,17 +415,17 @@ NULL, clear_dwords); iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, - trans_pcie->scd_bc_tbls.dma >> 10); + trans->txqs.scd_bc_tbls.dma >> 10); /* The chain extension of the SCD doesn't work well. This feature is * enabled by default by the HW, so we need to disable it manually. */ - if (trans->cfg->base_params->scd_chain_ext_wa) + if (trans->trans_cfg->base_params->scd_chain_ext_wa) iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); - iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, - trans_pcie->cmd_fifo, - trans_pcie->cmd_q_wdg_timeout); + iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, + trans->txqs.cmd.fifo, + trans->txqs.cmd.wdg_timeout); /* Activate all Tx DMA/FIFO channels */ iwl_scd_activate_fifos(trans); @@ -781,7 +442,7 @@ reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); /* Enable L1-Active */ - if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_L1_ACT_DIS); } @@ -795,13 +456,13 @@ * we should never get here in gen2 trans mode return early to avoid * having invalid accesses */ - if (WARN_ON_ONCE(trans->cfg->gen2)) + if (WARN_ON_ONCE(trans->trans_cfg->gen2)) return; - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { - struct iwl_txq *txq = trans_pcie->txq[txq_id]; - if (trans->cfg->use_tfh) + struct iwl_txq *txq = trans->txqs.txq[txq_id]; + if (trans->trans_cfg->use_tfh) iwl_write_direct64(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), txq->dma_addr); @@ -876,15 +537,16 @@ * queues. This happens when we have an rfkill interrupt. * Since we stop Tx altogether - mark the queues as stopped. */ - memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); - memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + memset(trans->txqs.queue_stopped, 0, + sizeof(trans->txqs.queue_stopped)); + memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); /* This can happen: start_hw, stop_device */ if (!trans_pcie->txq_memory) return 0; /* Unmap DMA from host system and free skb's */ - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) iwl_pcie_txq_unmap(trans, txq_id); @@ -901,15 +563,15 @@ int txq_id; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); + memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); /* Tx queues */ if (trans_pcie->txq_memory) { for (txq_id = 0; - txq_id < trans->cfg->base_params->num_of_queues; + txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { iwl_pcie_txq_free(trans, txq_id); - trans_pcie->txq[txq_id] = NULL; + trans->txqs.txq[txq_id] = NULL; } } @@ -918,7 +580,7 @@ iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); - iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls); + iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls); } /* @@ -930,11 +592,12 @@ int ret; int txq_id, slots_num; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u16 bc_tbls_size = trans->cfg->base_params->num_of_queues; + u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; - bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? - sizeof(struct iwl_gen3_bc_tbl) : - sizeof(struct iwlagn_scd_bc_tbl); + if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) + return -EINVAL; + + bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); /*It is not allowed to alloc twice, so warn when this happens. * We cannot rely on the previous allocation, so free and fail */ @@ -943,7 +606,7 @@ goto error; } - ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, + ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls, bc_tbls_size); if (ret) { IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); @@ -957,8 +620,9 @@ goto error; } - trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues, - sizeof(struct iwl_txq), GFP_KERNEL); + trans_pcie->txq_memory = + kcalloc(trans->trans_cfg->base_params->num_of_queues, + sizeof(struct iwl_txq), GFP_KERNEL); if (!trans_pcie->txq_memory) { IWL_ERR(trans, "Not enough memory for txq\n"); ret = -ENOMEM; @@ -966,19 +630,24 @@ } /* Alloc and init all Tx queues, including the command queue (#4/#9) */ - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { - bool cmd_queue = (txq_id == trans_pcie->cmd_queue); + bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); - slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; - trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id]; - ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id], - slots_num, cmd_queue); + if (cmd_queue) + slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, + trans->cfg->min_txq_size); + else + slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, + trans->cfg->min_256_ba_txq_size); + trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; + ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num, + cmd_queue); if (ret) { IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); goto error; } - trans_pcie->txq[txq_id]->id = txq_id; + trans->txqs.txq[txq_id]->id = txq_id; } return 0; @@ -1015,13 +684,18 @@ spin_unlock(&trans_pcie->irq_lock); /* Alloc and init all Tx queues, including the command queue (#4/#9) */ - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { - bool cmd_queue = (txq_id == trans_pcie->cmd_queue); + bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); - slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; - ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id], - slots_num, cmd_queue); + if (cmd_queue) + slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, + trans->cfg->min_txq_size); + else + slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, + trans->cfg->min_256_ba_txq_size); + ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num, + cmd_queue); if (ret) { IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); goto error; @@ -1034,11 +708,11 @@ * Circular buffer (TFD queue in DRAM) physical base address */ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), - trans_pcie->txq[txq_id]->dma_addr >> 8); + trans->txqs.txq[txq_id]->dma_addr >> 8); } iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); - if (trans->cfg->base_params->num_of_queues > 20) + if (trans->trans_cfg->base_params->num_of_queues > 20) iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_ENABLE_31_QUEUES); @@ -1078,19 +752,18 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, struct sk_buff_head *skbs) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[txq_id]; - int tfd_num = iwl_pcie_get_cmd_index(txq, ssn); - int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr); + struct iwl_txq *txq = trans->txqs.txq[txq_id]; + int tfd_num = iwl_txq_get_cmd_index(txq, ssn); + int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); int last_to_free; /* This function is not meant to release cmd queue*/ - if (WARN_ON(txq_id == trans_pcie->cmd_queue)) + if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) return; spin_lock_bh(&txq->lock); - if (!test_bit(txq_id, trans_pcie->queue_used)) { + if (!test_bit(txq_id, trans->txqs.queue_used)) { IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", txq_id, ssn); goto out; @@ -1104,13 +777,13 @@ /*Since we free until index _not_ inclusive, the one before index is * the last we will free. This one must be used */ - last_to_free = iwl_queue_dec_wrap(trans, tfd_num); + last_to_free = iwl_txq_dec_wrap(trans, tfd_num); - if (!iwl_queue_used(txq, last_to_free)) { + if (!iwl_txq_used(txq, last_to_free)) { IWL_ERR(trans, "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", __func__, txq_id, last_to_free, - trans->cfg->base_params->max_tfd_queue_size, + trans->trans_cfg->base_params->max_tfd_queue_size, txq->write_ptr, txq->read_ptr); goto out; } @@ -1120,33 +793,42 @@ for (; read_ptr != tfd_num; - txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr), - read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) { + txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), + read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) { struct sk_buff *skb = txq->entries[read_ptr].skb; if (WARN_ON_ONCE(!skb)) continue; - iwl_pcie_free_tso_page(trans_pcie, skb); + iwl_txq_free_tso_page(trans, skb); __skb_queue_tail(skbs, skb); txq->entries[read_ptr].skb = NULL; - if (!trans->cfg->use_tfh) - iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); + if (!trans->trans_cfg->use_tfh) + iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); iwl_pcie_txq_free_tfd(trans, txq); } iwl_pcie_txq_progress(txq); - if (iwl_queue_space(trans, txq) > txq->low_mark && - test_bit(txq_id, trans_pcie->queue_stopped)) { + if (iwl_txq_space(trans, txq) > txq->low_mark && + test_bit(txq_id, trans->txqs.queue_stopped)) { struct sk_buff_head overflow_skbs; __skb_queue_head_init(&overflow_skbs); skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); + + /* + * We are going to transmit from the overflow queue. + * Remember this state so that wait_for_txq_empty will know we + * are adding more packets to the TFD queue. It cannot rely on + * the state of &txq->overflow_q, as we just emptied it, but + * haven't TXed the content yet. + */ + txq->overflow_tx = true; /* * This is tricky: we are in reclaim path which is non @@ -1159,30 +841,40 @@ while (!skb_queue_empty(&overflow_skbs)) { struct sk_buff *skb = __skb_dequeue(&overflow_skbs); - struct iwl_device_cmd *dev_cmd_ptr; + struct iwl_device_tx_cmd *dev_cmd_ptr; dev_cmd_ptr = *(void **)((u8 *)skb->cb + - trans_pcie->dev_cmd_offs); + trans->txqs.dev_cmd_offs); /* * Note that we can very well be overflowing again. - * In that case, iwl_queue_space will be small again + * In that case, iwl_txq_space will be small again * and we won't wake mac80211's queue. */ iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); } - spin_lock_bh(&txq->lock); - if (iwl_queue_space(trans, txq) > txq->low_mark) + if (iwl_txq_space(trans, txq) > txq->low_mark) iwl_wake_queue(trans, txq); - } - if (txq->read_ptr == txq->write_ptr) { - IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id); - iwl_trans_unref(trans); + spin_lock_bh(&txq->lock); + txq->overflow_tx = false; } out: + spin_unlock_bh(&txq->lock); +} + +/* Set wr_ptr of specific device and txq */ +void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) +{ + struct iwl_txq *txq = trans->txqs.txq[txq_id]; + + spin_lock_bh(&txq->lock); + + txq->write_ptr = ptr; + txq->read_ptr = txq->write_ptr; + spin_unlock_bh(&txq->lock); } @@ -1190,17 +882,13 @@ const struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - const struct iwl_cfg *cfg = trans->cfg; int ret; lockdep_assert_held(&trans_pcie->reg_lock); - if (!(cmd->flags & CMD_SEND_IN_IDLE) && - !trans_pcie->ref_cmd_in_flight) { - trans_pcie->ref_cmd_in_flight = true; - IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); - iwl_trans_ref(trans); - } + /* Make sure the NIC is still alive in the bus */ + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) + return -ENODEV; /* * wake up the NIC to make sure that the firmware will see the host @@ -1208,19 +896,19 @@ * returned. This needs to be done only on NICs that have * apmg_wake_up_wa set. */ - if (cfg->base_params->apmg_wake_up_wa && + if (trans->trans_cfg->base_params->apmg_wake_up_wa && !trans_pcie->cmd_hold_nic_awake) { __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, - BIT(cfg->csr->flag_mac_access_req)); + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - BIT(cfg->csr->flag_val_mac_access_en), - (BIT(cfg->csr->flag_mac_clock_ready) | + CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, + (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); if (ret < 0) { __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - BIT(cfg->csr->flag_mac_access_req)); + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); return -EIO; } @@ -1240,29 +928,28 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[txq_id]; - unsigned long flags; + struct iwl_txq *txq = trans->txqs.txq[txq_id]; int nfreed = 0; u16 r; lockdep_assert_held(&txq->lock); - idx = iwl_pcie_get_cmd_index(txq, idx); - r = iwl_pcie_get_cmd_index(txq, txq->read_ptr); + idx = iwl_txq_get_cmd_index(txq, idx); + r = iwl_txq_get_cmd_index(txq, txq->read_ptr); - if (idx >= trans->cfg->base_params->max_tfd_queue_size || - (!iwl_queue_used(txq, idx))) { - WARN_ONCE(test_bit(txq_id, trans_pcie->queue_used), + if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || + (!iwl_txq_used(txq, idx))) { + WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used), "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", __func__, txq_id, idx, - trans->cfg->base_params->max_tfd_queue_size, + trans->trans_cfg->base_params->max_tfd_queue_size, txq->write_ptr, txq->read_ptr); return; } - for (idx = iwl_queue_inc_wrap(trans, idx); r != idx; - r = iwl_queue_inc_wrap(trans, r)) { - txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); + for (idx = iwl_txq_inc_wrap(trans, idx); r != idx; + r = iwl_txq_inc_wrap(trans, r)) { + txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); if (nfreed++ > 0) { IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", @@ -1272,9 +959,10 @@ } if (txq->read_ptr == txq->write_ptr) { - spin_lock_irqsave(&trans_pcie->reg_lock, flags); + /* BHs are also disabled due to txq->lock */ + spin_lock(&trans_pcie->reg_lock); iwl_pcie_clear_cmd_in_flight(trans); - spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); + spin_unlock(&trans_pcie->reg_lock); } iwl_pcie_txq_progress(txq); @@ -1314,11 +1002,11 @@ unsigned int wdg_timeout) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans->txqs.txq[txq_id]; int fifo = -1; bool scd_bug = false; - if (test_and_set_bit(txq_id, trans_pcie->queue_used)) + if (test_and_set_bit(txq_id, trans->txqs.queue_used)) WARN_ONCE(1, "queue %d already used - expect issues", txq_id); txq->wd_timeout = msecs_to_jiffies(wdg_timeout); @@ -1327,7 +1015,7 @@ fifo = cfg->fifo; /* Disable the scheduler prior configuring the cmd queue */ - if (txq_id == trans_pcie->cmd_queue && + if (txq_id == trans->txqs.cmd.q_id && trans_pcie->scd_set_active) iwl_scd_enable_set_active(trans, 0); @@ -1335,7 +1023,7 @@ iwl_scd_txq_set_inactive(trans, txq_id); /* Set this queue as a chain-building queue unless it is CMD */ - if (txq_id != trans_pcie->cmd_queue) + if (txq_id != trans->txqs.cmd.q_id) iwl_scd_txq_set_chain(trans, txq_id); if (cfg->aggregate) { @@ -1369,7 +1057,7 @@ * this sad hardware issue. * This bug has been fixed on devices 9000 and up. */ - scd_bug = !trans->cfg->mq_rx_supported && + scd_bug = !trans->trans_cfg->mq_rx_supported && !((ssn - txq->write_ptr) & 0x3f) && (ssn != txq->write_ptr); if (scd_bug) @@ -1405,7 +1093,7 @@ SCD_QUEUE_STTS_REG_MSK); /* enable the scheduler for this queue (only) */ - if (txq_id == trans_pcie->cmd_queue && + if (txq_id == trans->txqs.cmd.q_id && trans_pcie->scd_set_active) iwl_scd_enable_set_active(trans, BIT(txq_id)); @@ -1424,8 +1112,7 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, bool shared_mode) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[txq_id]; + struct iwl_txq *txq = trans->txqs.txq[txq_id]; txq->ampdu = !shared_mode; } @@ -1438,8 +1125,8 @@ SCD_TX_STTS_QUEUE_OFFSET(txq_id); static const u32 zero_val[4] = {}; - trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0; - trans_pcie->txq[txq_id]->frozen = false; + trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0; + trans->txqs.txq[txq_id]->frozen = false; /* * Upon HW Rfkill - we stop the device, and then stop the queues @@ -1447,7 +1134,7 @@ * allow the op_mode to call txq_disable after it already called * stop_device. */ - if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { + if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) { WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), "queue %d not used", txq_id); return; @@ -1461,7 +1148,7 @@ } iwl_pcie_txq_unmap(trans, txq_id); - trans_pcie->txq[txq_id]->ampdu = false; + trans->txqs.txq[txq_id]->ampdu = false; IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); } @@ -1481,10 +1168,9 @@ struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; - unsigned long flags; void *dup_buf = NULL; dma_addr_t phys_addr; int idx; @@ -1495,7 +1181,7 @@ u32 cmd_pos; const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; - unsigned long flags2; + unsigned long flags; if (WARN(!trans->wide_cmd_header && group_id > IWL_ALWAYS_LONG_GROUP, @@ -1579,10 +1265,10 @@ goto free_dup_buf; } - spin_lock_irqsave(&txq->lock, flags2); + spin_lock_irqsave(&txq->lock, flags); - if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { - spin_unlock_irqrestore(&txq->lock, flags2); + if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { + spin_unlock_irqrestore(&txq->lock, flags); IWL_ERR(trans, "No space in command queue\n"); iwl_op_mode_cmd_queue_full(trans->op_mode); @@ -1590,7 +1276,7 @@ goto free_dup_buf; } - idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); + idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); out_cmd = txq->entries[idx].cmd; out_meta = &txq->entries[idx].meta; @@ -1608,7 +1294,7 @@ sizeof(struct iwl_cmd_header_wide)); out_cmd->hdr_wide.reserved = 0; out_cmd->hdr_wide.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | + cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | INDEX_TO_SEQ(txq->write_ptr)); cmd_pos = sizeof(struct iwl_cmd_header_wide); @@ -1616,7 +1302,7 @@ } else { out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); out_cmd->hdr.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | + cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | INDEX_TO_SEQ(txq->write_ptr)); out_cmd->hdr.group_id = 0; @@ -1667,13 +1353,13 @@ iwl_get_cmd_string(trans, cmd->id), group_id, out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), - cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); + cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); /* start the TFD with the minimum copy bytes */ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); iwl_pcie_txq_build_tfd(trans, txq, - iwl_pcie_get_first_tb_dma(txq, idx), + iwl_txq_get_first_tb_dma(txq, idx), tb0_size, true); /* map first command fragment, if any remains */ @@ -1683,8 +1369,8 @@ copy_size - tb0_size, DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { - iwl_pcie_tfd_unmap(trans, out_meta, txq, - txq->write_ptr); + iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, + txq->write_ptr); idx = -ENOMEM; goto out; } @@ -1707,8 +1393,8 @@ phys_addr = dma_map_single(trans->dev, (void *)data, cmdlen[i], DMA_TO_DEVICE); if (dma_mapping_error(trans->dev, phys_addr)) { - iwl_pcie_tfd_unmap(trans, out_meta, txq, - txq->write_ptr); + iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, + txq->write_ptr); idx = -ENOMEM; goto out; } @@ -1719,7 +1405,7 @@ BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); out_meta->flags = cmd->flags; if (WARN_ON_ONCE(txq->entries[idx].free_buf)) - kzfree(txq->entries[idx].free_buf); + kfree_sensitive(txq->entries[idx].free_buf); txq->entries[idx].free_buf = dup_buf; trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); @@ -1728,22 +1414,21 @@ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); - spin_lock_irqsave(&trans_pcie->reg_lock, flags); + spin_lock(&trans_pcie->reg_lock); ret = iwl_pcie_set_cmd_in_flight(trans, cmd); if (ret < 0) { idx = ret; - spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); - goto out; + goto unlock_reg; } /* Increment and update queue's write index */ - txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); + txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); iwl_pcie_txq_inc_wr_ptr(trans, txq); - spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); - + unlock_reg: + spin_unlock(&trans_pcie->reg_lock); out: - spin_unlock_irqrestore(&txq->lock, flags2); + spin_unlock_irqrestore(&txq->lock, flags); free_dup_buf: if (idx < 0) kfree(dup_buf); @@ -1767,14 +1452,14 @@ struct iwl_device_cmd *cmd; struct iwl_cmd_meta *meta; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; /* If a Tx command is being handled and it isn't in the actual * command queue then there a command routing bug has been introduced * in the queue management code. */ - if (WARN(txq_id != trans_pcie->cmd_queue, + if (WARN(txq_id != trans->txqs.cmd.q_id, "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", - txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr, + txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr, txq->write_ptr)) { iwl_print_hex_error(trans, pkt, 32); return; @@ -1782,13 +1467,13 @@ spin_lock_bh(&txq->lock); - cmd_index = iwl_pcie_get_cmd_index(txq, index); + cmd_index = iwl_txq_get_cmd_index(txq, index); cmd = txq->entries[cmd_index].cmd; meta = &txq->entries[cmd_index].meta; group_id = cmd->hdr.group_id; cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); - iwl_pcie_tfd_unmap(trans, meta, txq, index); + iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); /* Input error checking is done when commands are added to queue. */ if (meta->flags & CMD_WANT_SKB) { @@ -1814,20 +1499,6 @@ IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", iwl_get_cmd_string(trans, cmd_id)); wake_up(&trans_pcie->wait_command_queue); - } - - if (meta->flags & CMD_MAKE_TRANS_IDLE) { - IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n", - iwl_get_cmd_string(trans, cmd->hdr.cmd)); - set_bit(STATUS_TRANS_IDLE, &trans->status); - wake_up(&trans_pcie->d0i3_waitq); - } - - if (meta->flags & CMD_WAKE_UP_TRANS) { - IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n", - iwl_get_cmd_string(trans, cmd->hdr.cmd)); - clear_bit(STATUS_TRANS_IDLE, &trans->status); - wake_up(&trans_pcie->d0i3_waitq); } meta->flags = 0; @@ -1860,7 +1531,7 @@ struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; + struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; int cmd_idx; int ret; @@ -1875,16 +1546,6 @@ IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", iwl_get_cmd_string(trans, cmd->id)); - - if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) { - ret = wait_event_timeout(trans_pcie->d0i3_waitq, - pm_runtime_active(&trans_pcie->pci_dev->dev), - msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); - if (!ret) { - IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n"); - return -ETIMEDOUT; - } - } cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); if (cmd_idx < 0) { @@ -1913,14 +1574,12 @@ iwl_get_cmd_string(trans, cmd->id)); ret = -ETIMEDOUT; - iwl_force_nmi(trans); - iwl_trans_fw_error(trans); - + iwl_trans_pcie_sync_nmi(trans); goto cancel; } if (test_bit(STATUS_FW_ERROR, &trans->status)) { - iwl_trans_dump_regs(trans); + iwl_trans_pcie_dump_regs(trans); IWL_ERR(trans, "FW error in SYNC CMD %s\n", iwl_get_cmd_string(trans, cmd->id)); dump_stack(); @@ -1965,6 +1624,10 @@ int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { + /* Make sure the NIC is still alive in the bus */ + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) + return -ENODEV; + if (!(cmd->flags & CMD_SEND_IN_RFKILL) && test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", @@ -1981,29 +1644,26 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, - struct iwl_cmd_meta *out_meta, - struct iwl_device_cmd *dev_cmd, u16 tb1_len) + struct iwl_cmd_meta *out_meta) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u16 tb2_len; + u16 head_tb_len; int i; /* * Set up TFD's third entry to point directly to remainder * of skb's head, if any */ - tb2_len = skb_headlen(skb) - hdr_len; + head_tb_len = skb_headlen(skb) - hdr_len; - if (tb2_len > 0) { - dma_addr_t tb2_phys = dma_map_single(trans->dev, - skb->data + hdr_len, - tb2_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) { - iwl_pcie_tfd_unmap(trans, out_meta, txq, - txq->write_ptr); + if (head_tb_len > 0) { + dma_addr_t tb_phys = dma_map_single(trans->dev, + skb->data + hdr_len, + head_tb_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) return -EINVAL; - } - iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false); + trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, + tb_phys, head_tb_len); + iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); } /* set up the remaining entries to point to the data */ @@ -2018,50 +1678,22 @@ tb_phys = skb_frag_dma_map(trans->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { - iwl_pcie_tfd_unmap(trans, out_meta, txq, - txq->write_ptr); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) return -EINVAL; - } + trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), + tb_phys, skb_frag_size(frag)); tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, skb_frag_size(frag), false); + if (tb_idx < 0) + return tb_idx; out_meta->tbs |= BIT(tb_idx); } - trace_iwlwifi_dev_tx(trans->dev, skb, - iwl_pcie_get_tfd(trans, txq, txq->write_ptr), - trans_pcie->tfd_size, - &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, - hdr_len); - trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len); return 0; } #ifdef CONFIG_INET -struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page); - - if (!p->page) - goto alloc; - - /* enough room on this page */ - if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE) - return p; - - /* We don't have enough room on this page, get a new one. */ - __free_page(p->page); - -alloc: - p->page = alloc_page(GFP_ATOMIC); - if (!p->page) - return NULL; - p->pos = page_address(p->page); - return p; -} - static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph, bool ipv6, unsigned int len) { @@ -2084,18 +1716,18 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta, - struct iwl_device_cmd *dev_cmd, u16 tb1_len) + struct iwl_device_tx_cmd *dev_cmd, + u16 tb1_len) { struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; - struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; + struct iwl_trans_pcie *trans_pcie = + IWL_TRANS_GET_PCIE_TRANS(txq->trans); struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int mss = skb_shinfo(skb)->gso_size; u16 length, iv_len, amsdu_pad; u8 *start_hdr; struct iwl_tso_hdr_page *hdr_page; - struct page **page_ptr; - int ret; struct tso_t tso; /* if the packet is protected, then it must be CCMP or GCMP */ @@ -2104,8 +1736,8 @@ IEEE80211_CCMP_HDR_LEN : 0; trace_iwlwifi_dev_tx(trans->dev, skb, - iwl_pcie_get_tfd(trans, txq, txq->write_ptr), - trans_pcie->tfd_size, + iwl_txq_get_tfd(trans, txq, txq->write_ptr), + trans->txqs.tfd.size, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); @@ -2118,14 +1750,11 @@ (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; /* Our device supports 9 segments at most, it will fit in 1 page */ - hdr_page = get_page_hdr(trans, hdr_room); + hdr_page = get_page_hdr(trans, hdr_room, skb); if (!hdr_page) return -ENOMEM; - get_page(hdr_page->page); start_hdr = hdr_page->pos; - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); - *page_ptr = hdr_page->page; memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); hdr_page->pos += iv_len; @@ -2181,10 +1810,8 @@ if (trans_pcie->sw_csum_tx) { csum_skb = alloc_skb(data_left + tcp_hdrlen(skb), GFP_ATOMIC); - if (!csum_skb) { - ret = -ENOMEM; - goto out_unmap; - } + if (!csum_skb) + return -ENOMEM; iwl_compute_pseudo_hdr_csum(iph, tcph, skb->protocol == @@ -2205,13 +1832,12 @@ hdr_tb_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { dev_kfree_skb(csum_skb); - ret = -EINVAL; - goto out_unmap; + return -EINVAL; } iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, hdr_tb_len, false); - trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, - hdr_tb_len); + trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, + hdr_tb_phys, hdr_tb_len); /* add this subframe's headers' length to the tx_cmd */ le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); @@ -2231,14 +1857,13 @@ size, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { dev_kfree_skb(csum_skb); - ret = -EINVAL; - goto out_unmap; + return -EINVAL; } iwl_pcie_txq_build_tfd(trans, txq, tb_phys, size, false); - trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data, - size); + trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, + tb_phys, size); data_left -= size; tso_build_data(skb, &tso, size); @@ -2266,16 +1891,13 @@ skb_push(skb, hdr_len + iv_len); return 0; - -out_unmap: - iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr); - return ret; } #else /* CONFIG_INET */ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta, - struct iwl_device_cmd *dev_cmd, u16 tb1_len) + struct iwl_device_tx_cmd *dev_cmd, + u16 tb1_len) { /* No A-MSDU without CONFIG_INET */ WARN_ON(1); @@ -2285,7 +1907,7 @@ #endif /* CONFIG_INET */ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id) + struct iwl_device_tx_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr; @@ -2302,9 +1924,9 @@ u16 wifi_seq; bool amsdu; - txq = trans_pcie->txq[txq_id]; + txq = trans->txqs.txq[txq_id]; - if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), + if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), "TX on unused queue %d\n", txq_id)) return -EINVAL; @@ -2324,7 +1946,7 @@ } if (skb_is_nonlinear(skb) && - skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) && + skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && __skb_linearize(skb)) return -ENOMEM; @@ -2337,15 +1959,15 @@ spin_lock(&txq->lock); - if (iwl_queue_space(trans, txq) < txq->high_mark) { - iwl_stop_queue(trans, txq); + if (iwl_txq_space(trans, txq) < txq->high_mark) { + iwl_txq_stop(trans, txq); /* don't put the packet on the ring, if there is no room */ - if (unlikely(iwl_queue_space(trans, txq) < 3)) { - struct iwl_device_cmd **dev_cmd_ptr; + if (unlikely(iwl_txq_space(trans, txq) < 3)) { + struct iwl_device_tx_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + - trans_pcie->dev_cmd_offs); + trans->txqs.dev_cmd_offs); *dev_cmd_ptr = dev_cmd; __skb_queue_tail(&txq->overflow_q, skb); @@ -2374,7 +1996,7 @@ cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | INDEX_TO_SEQ(txq->write_ptr))); - tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr); + tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr); scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + offsetof(struct iwl_tx_cmd, scratch); @@ -2423,6 +2045,12 @@ goto out_err; iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); + trace_iwlwifi_dev_tx(trans->dev, skb, + iwl_txq_get_tfd(trans, txq, txq->write_ptr), + trans->txqs.tfd.size, + &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, + hdr_len); + /* * If gso_size wasn't set, don't give the frame "amsdu treatment" * (adding subframes, etc.). @@ -2434,43 +2062,48 @@ out_meta, dev_cmd, tb1_len))) goto out_err; - } else if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, - out_meta, dev_cmd, tb1_len))) { - goto out_err; + } else { + struct sk_buff *frag; + + if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, + out_meta))) + goto out_err; + + skb_walk_frags(skb, frag) { + if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, + out_meta))) + goto out_err; + } } /* building the A-MSDU might have changed this data, so memcpy it now */ - memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr, - IWL_FIRST_TB_SIZE); + memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); - tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); + tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); /* Set up entry for this TFD in Tx byte-count array */ - iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), - iwl_pcie_tfd_get_num_tbs(trans, tfd)); + iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), + iwl_txq_gen1_tfd_get_num_tbs(trans, + tfd)); wait_write_ptr = ieee80211_has_morefrags(fc); /* start timer if queue currently empty */ - if (txq->read_ptr == txq->write_ptr) { - if (txq->wd_timeout) { - /* - * If the TXQ is active, then set the timer, if not, - * set the timer in remainder so that the timer will - * be armed with the right value when the station will - * wake up. - */ - if (!txq->frozen) - mod_timer(&txq->stuck_timer, - jiffies + txq->wd_timeout); - else - txq->frozen_expiry_remainder = txq->wd_timeout; - } - IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id); - iwl_trans_ref(trans); + if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { + /* + * If the TXQ is active, then set the timer, if not, + * set the timer in remainder so that the timer will + * be armed with the right value when the station will + * wake up. + */ + if (!txq->frozen) + mod_timer(&txq->stuck_timer, + jiffies + txq->wd_timeout); + else + txq->frozen_expiry_remainder = txq->wd_timeout; } /* Tell device the write index *just past* this latest filled TFD */ - txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); + txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); if (!wait_write_ptr) iwl_pcie_txq_inc_wr_ptr(trans, txq); @@ -2481,6 +2114,7 @@ spin_unlock(&txq->lock); return 0; out_err: + iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); spin_unlock(&txq->lock); return -1; } -- Gitblit v1.6.2