| .. | .. |
|---|
| 6 | 6 | * GPL LICENSE SUMMARY |
|---|
| 7 | 7 | * |
|---|
| 8 | 8 | * Copyright(c) 2017 Intel Deutschland GmbH |
|---|
| 9 | | - * Copyright(c) 2018 Intel Corporation |
|---|
| 9 | + * Copyright(c) 2018 - 2020 Intel Corporation |
|---|
| 10 | 10 | * |
|---|
| 11 | 11 | * This program is free software; you can redistribute it and/or modify |
|---|
| 12 | 12 | * it under the terms of version 2 of the GNU General Public License as |
|---|
| .. | .. |
|---|
| 20 | 20 | * BSD LICENSE |
|---|
| 21 | 21 | * |
|---|
| 22 | 22 | * Copyright(c) 2017 Intel Deutschland GmbH |
|---|
| 23 | | - * Copyright(c) 2018 Intel Corporation |
|---|
| 23 | + * Copyright(c) 2018 - 2020 Intel Corporation |
|---|
| 24 | 24 | * All rights reserved. |
|---|
| 25 | 25 | * |
|---|
| 26 | 26 | * Redistribution and use in source and binary forms, with or without |
|---|
| .. | .. |
|---|
| 50 | 50 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|---|
| 51 | 51 | * |
|---|
| 52 | 52 | *****************************************************************************/ |
|---|
| 53 | | -#include <linux/pm_runtime.h> |
|---|
| 54 | 53 | #include <net/tso.h> |
|---|
| 55 | 54 | #include <linux/tcp.h> |
|---|
| 56 | 55 | |
|---|
| .. | .. |
|---|
| 59 | 58 | #include "iwl-io.h" |
|---|
| 60 | 59 | #include "internal.h" |
|---|
| 61 | 60 | #include "fw/api/tx.h" |
|---|
| 62 | | - |
|---|
| 63 | | - /* |
|---|
| 64 | | - * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels |
|---|
| 65 | | - */ |
|---|
| 66 | | -void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans) |
|---|
| 67 | | -{ |
|---|
| 68 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 69 | | - int txq_id; |
|---|
| 70 | | - |
|---|
| 71 | | - /* |
|---|
| 72 | | - * This function can be called before the op_mode disabled the |
|---|
| 73 | | - * queues. This happens when we have an rfkill interrupt. |
|---|
| 74 | | - * Since we stop Tx altogether - mark the queues as stopped. |
|---|
| 75 | | - */ |
|---|
| 76 | | - memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped)); |
|---|
| 77 | | - memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); |
|---|
| 78 | | - |
|---|
| 79 | | - /* Unmap DMA from host system and free skb's */ |
|---|
| 80 | | - for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) { |
|---|
| 81 | | - if (!trans_pcie->txq[txq_id]) |
|---|
| 82 | | - continue; |
|---|
| 83 | | - iwl_pcie_gen2_txq_unmap(trans, txq_id); |
|---|
| 84 | | - } |
|---|
| 85 | | -} |
|---|
| 86 | | - |
|---|
| 87 | | -/* |
|---|
| 88 | | - * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array |
|---|
| 89 | | - */ |
|---|
| 90 | | -static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, |
|---|
| 91 | | - struct iwl_txq *txq, u16 byte_cnt, |
|---|
| 92 | | - int num_tbs) |
|---|
| 93 | | -{ |
|---|
| 94 | | - struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; |
|---|
| 95 | | - struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); |
|---|
| 96 | | - struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr; |
|---|
| 97 | | - int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); |
|---|
| 98 | | - u8 filled_tfd_size, num_fetch_chunks; |
|---|
| 99 | | - u16 len = byte_cnt; |
|---|
| 100 | | - __le16 bc_ent; |
|---|
| 101 | | - |
|---|
| 102 | | - if (trans_pcie->bc_table_dword) |
|---|
| 103 | | - len = DIV_ROUND_UP(len, 4); |
|---|
| 104 | | - |
|---|
| 105 | | - if (WARN_ON(len > 0xFFF || idx >= txq->n_window)) |
|---|
| 106 | | - return; |
|---|
| 107 | | - |
|---|
| 108 | | - filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + |
|---|
| 109 | | - num_tbs * sizeof(struct iwl_tfh_tb); |
|---|
| 110 | | - /* |
|---|
| 111 | | - * filled_tfd_size contains the number of filled bytes in the TFD. |
|---|
| 112 | | - * Dividing it by 64 will give the number of chunks to fetch |
|---|
| 113 | | - * to SRAM- 0 for one chunk, 1 for 2 and so on. |
|---|
| 114 | | - * If, for example, TFD contains only 3 TBs then 32 bytes |
|---|
| 115 | | - * of the TFD are used, and only one chunk of 64 bytes should |
|---|
| 116 | | - * be fetched |
|---|
| 117 | | - */ |
|---|
| 118 | | - num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; |
|---|
| 119 | | - |
|---|
| 120 | | - bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); |
|---|
| 121 | | - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) |
|---|
| 122 | | - scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent; |
|---|
| 123 | | - else |
|---|
| 124 | | - scd_bc_tbl->tfd_offset[idx] = bc_ent; |
|---|
| 125 | | -} |
|---|
| 126 | | - |
|---|
| 127 | | -/* |
|---|
| 128 | | - * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware |
|---|
| 129 | | - */ |
|---|
| 130 | | -static void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans, |
|---|
| 131 | | - struct iwl_txq *txq) |
|---|
| 132 | | -{ |
|---|
| 133 | | - lockdep_assert_held(&txq->lock); |
|---|
| 134 | | - |
|---|
| 135 | | - IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); |
|---|
| 136 | | - |
|---|
| 137 | | - /* |
|---|
| 138 | | - * if not in power-save mode, uCode will never sleep when we're |
|---|
| 139 | | - * trying to tx (during RFKILL, we're not trying to tx). |
|---|
| 140 | | - */ |
|---|
| 141 | | - iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); |
|---|
| 142 | | -} |
|---|
| 143 | | - |
|---|
| 144 | | -static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans, |
|---|
| 145 | | - struct iwl_tfh_tfd *tfd) |
|---|
| 146 | | -{ |
|---|
| 147 | | - return le16_to_cpu(tfd->num_tbs) & 0x1f; |
|---|
| 148 | | -} |
|---|
| 149 | | - |
|---|
| 150 | | -static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans, |
|---|
| 151 | | - struct iwl_cmd_meta *meta, |
|---|
| 152 | | - struct iwl_tfh_tfd *tfd) |
|---|
| 153 | | -{ |
|---|
| 154 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 155 | | - int i, num_tbs; |
|---|
| 156 | | - |
|---|
| 157 | | - /* Sanity check on number of chunks */ |
|---|
| 158 | | - num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd); |
|---|
| 159 | | - |
|---|
| 160 | | - if (num_tbs > trans_pcie->max_tbs) { |
|---|
| 161 | | - IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); |
|---|
| 162 | | - return; |
|---|
| 163 | | - } |
|---|
| 164 | | - |
|---|
| 165 | | - /* first TB is never freed - it's the bidirectional DMA data */ |
|---|
| 166 | | - for (i = 1; i < num_tbs; i++) { |
|---|
| 167 | | - if (meta->tbs & BIT(i)) |
|---|
| 168 | | - dma_unmap_page(trans->dev, |
|---|
| 169 | | - le64_to_cpu(tfd->tbs[i].addr), |
|---|
| 170 | | - le16_to_cpu(tfd->tbs[i].tb_len), |
|---|
| 171 | | - DMA_TO_DEVICE); |
|---|
| 172 | | - else |
|---|
| 173 | | - dma_unmap_single(trans->dev, |
|---|
| 174 | | - le64_to_cpu(tfd->tbs[i].addr), |
|---|
| 175 | | - le16_to_cpu(tfd->tbs[i].tb_len), |
|---|
| 176 | | - DMA_TO_DEVICE); |
|---|
| 177 | | - } |
|---|
| 178 | | - |
|---|
| 179 | | - tfd->num_tbs = 0; |
|---|
| 180 | | -} |
|---|
| 181 | | - |
|---|
| 182 | | -static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) |
|---|
| 183 | | -{ |
|---|
| 184 | | - /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and |
|---|
| 185 | | - * idx is bounded by n_window |
|---|
| 186 | | - */ |
|---|
| 187 | | - int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr); |
|---|
| 188 | | - |
|---|
| 189 | | - lockdep_assert_held(&txq->lock); |
|---|
| 190 | | - |
|---|
| 191 | | - iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta, |
|---|
| 192 | | - iwl_pcie_get_tfd(trans, txq, idx)); |
|---|
| 193 | | - |
|---|
| 194 | | - /* free SKB */ |
|---|
| 195 | | - if (txq->entries) { |
|---|
| 196 | | - struct sk_buff *skb; |
|---|
| 197 | | - |
|---|
| 198 | | - skb = txq->entries[idx].skb; |
|---|
| 199 | | - |
|---|
| 200 | | - /* Can be called from irqs-disabled context |
|---|
| 201 | | - * If skb is not NULL, it means that the whole queue is being |
|---|
| 202 | | - * freed and that the queue is not empty - free the skb |
|---|
| 203 | | - */ |
|---|
| 204 | | - if (skb) { |
|---|
| 205 | | - iwl_op_mode_free_skb(trans->op_mode, skb); |
|---|
| 206 | | - txq->entries[idx].skb = NULL; |
|---|
| 207 | | - } |
|---|
| 208 | | - } |
|---|
| 209 | | -} |
|---|
| 210 | | - |
|---|
| 211 | | -static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans, |
|---|
| 212 | | - struct iwl_tfh_tfd *tfd, dma_addr_t addr, |
|---|
| 213 | | - u16 len) |
|---|
| 214 | | -{ |
|---|
| 215 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 216 | | - int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd); |
|---|
| 217 | | - struct iwl_tfh_tb *tb = &tfd->tbs[idx]; |
|---|
| 218 | | - |
|---|
| 219 | | - /* Each TFD can point to a maximum max_tbs Tx buffers */ |
|---|
| 220 | | - if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) { |
|---|
| 221 | | - IWL_ERR(trans, "Error can not send more than %d chunks\n", |
|---|
| 222 | | - trans_pcie->max_tbs); |
|---|
| 223 | | - return -EINVAL; |
|---|
| 224 | | - } |
|---|
| 225 | | - |
|---|
| 226 | | - put_unaligned_le64(addr, &tb->addr); |
|---|
| 227 | | - tb->tb_len = cpu_to_le16(len); |
|---|
| 228 | | - |
|---|
| 229 | | - tfd->num_tbs = cpu_to_le16(idx + 1); |
|---|
| 230 | | - |
|---|
| 231 | | - return idx; |
|---|
| 232 | | -} |
|---|
| 233 | | - |
|---|
| 234 | | -static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, |
|---|
| 235 | | - struct sk_buff *skb, |
|---|
| 236 | | - struct iwl_tfh_tfd *tfd, int start_len, |
|---|
| 237 | | - u8 hdr_len, struct iwl_device_cmd *dev_cmd) |
|---|
| 238 | | -{ |
|---|
| 239 | | -#ifdef CONFIG_INET |
|---|
| 240 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 241 | | - struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; |
|---|
| 242 | | - struct ieee80211_hdr *hdr = (void *)skb->data; |
|---|
| 243 | | - unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; |
|---|
| 244 | | - unsigned int mss = skb_shinfo(skb)->gso_size; |
|---|
| 245 | | - u16 length, amsdu_pad; |
|---|
| 246 | | - u8 *start_hdr; |
|---|
| 247 | | - struct iwl_tso_hdr_page *hdr_page; |
|---|
| 248 | | - struct page **page_ptr; |
|---|
| 249 | | - struct tso_t tso; |
|---|
| 250 | | - |
|---|
| 251 | | - trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), |
|---|
| 252 | | - &dev_cmd->hdr, start_len, 0); |
|---|
| 253 | | - |
|---|
| 254 | | - ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); |
|---|
| 255 | | - snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); |
|---|
| 256 | | - total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len; |
|---|
| 257 | | - amsdu_pad = 0; |
|---|
| 258 | | - |
|---|
| 259 | | - /* total amount of header we may need for this A-MSDU */ |
|---|
| 260 | | - hdr_room = DIV_ROUND_UP(total_len, mss) * |
|---|
| 261 | | - (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); |
|---|
| 262 | | - |
|---|
| 263 | | - /* Our device supports 9 segments at most, it will fit in 1 page */ |
|---|
| 264 | | - hdr_page = get_page_hdr(trans, hdr_room); |
|---|
| 265 | | - if (!hdr_page) |
|---|
| 266 | | - return -ENOMEM; |
|---|
| 267 | | - |
|---|
| 268 | | - get_page(hdr_page->page); |
|---|
| 269 | | - start_hdr = hdr_page->pos; |
|---|
| 270 | | - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); |
|---|
| 271 | | - *page_ptr = hdr_page->page; |
|---|
| 272 | | - |
|---|
| 273 | | - /* |
|---|
| 274 | | - * Pull the ieee80211 header to be able to use TSO core, |
|---|
| 275 | | - * we will restore it for the tx_status flow. |
|---|
| 276 | | - */ |
|---|
| 277 | | - skb_pull(skb, hdr_len); |
|---|
| 278 | | - |
|---|
| 279 | | - /* |
|---|
| 280 | | - * Remove the length of all the headers that we don't actually |
|---|
| 281 | | - * have in the MPDU by themselves, but that we duplicate into |
|---|
| 282 | | - * all the different MSDUs inside the A-MSDU. |
|---|
| 283 | | - */ |
|---|
| 284 | | - le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); |
|---|
| 285 | | - |
|---|
| 286 | | - tso_start(skb, &tso); |
|---|
| 287 | | - |
|---|
| 288 | | - while (total_len) { |
|---|
| 289 | | - /* this is the data left for this subframe */ |
|---|
| 290 | | - unsigned int data_left = min_t(unsigned int, mss, total_len); |
|---|
| 291 | | - struct sk_buff *csum_skb = NULL; |
|---|
| 292 | | - unsigned int tb_len; |
|---|
| 293 | | - dma_addr_t tb_phys; |
|---|
| 294 | | - u8 *subf_hdrs_start = hdr_page->pos; |
|---|
| 295 | | - |
|---|
| 296 | | - total_len -= data_left; |
|---|
| 297 | | - |
|---|
| 298 | | - memset(hdr_page->pos, 0, amsdu_pad); |
|---|
| 299 | | - hdr_page->pos += amsdu_pad; |
|---|
| 300 | | - amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + |
|---|
| 301 | | - data_left)) & 0x3; |
|---|
| 302 | | - ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); |
|---|
| 303 | | - hdr_page->pos += ETH_ALEN; |
|---|
| 304 | | - ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); |
|---|
| 305 | | - hdr_page->pos += ETH_ALEN; |
|---|
| 306 | | - |
|---|
| 307 | | - length = snap_ip_tcp_hdrlen + data_left; |
|---|
| 308 | | - *((__be16 *)hdr_page->pos) = cpu_to_be16(length); |
|---|
| 309 | | - hdr_page->pos += sizeof(length); |
|---|
| 310 | | - |
|---|
| 311 | | - /* |
|---|
| 312 | | - * This will copy the SNAP as well which will be considered |
|---|
| 313 | | - * as MAC header. |
|---|
| 314 | | - */ |
|---|
| 315 | | - tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); |
|---|
| 316 | | - |
|---|
| 317 | | - hdr_page->pos += snap_ip_tcp_hdrlen; |
|---|
| 318 | | - |
|---|
| 319 | | - tb_len = hdr_page->pos - start_hdr; |
|---|
| 320 | | - tb_phys = dma_map_single(trans->dev, start_hdr, |
|---|
| 321 | | - tb_len, DMA_TO_DEVICE); |
|---|
| 322 | | - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { |
|---|
| 323 | | - dev_kfree_skb(csum_skb); |
|---|
| 324 | | - goto out_err; |
|---|
| 325 | | - } |
|---|
| 326 | | - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len); |
|---|
| 327 | | - trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, tb_len); |
|---|
| 328 | | - /* add this subframe's headers' length to the tx_cmd */ |
|---|
| 329 | | - le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); |
|---|
| 330 | | - |
|---|
| 331 | | - /* prepare the start_hdr for the next subframe */ |
|---|
| 332 | | - start_hdr = hdr_page->pos; |
|---|
| 333 | | - |
|---|
| 334 | | - /* put the payload */ |
|---|
| 335 | | - while (data_left) { |
|---|
| 336 | | - tb_len = min_t(unsigned int, tso.size, data_left); |
|---|
| 337 | | - tb_phys = dma_map_single(trans->dev, tso.data, |
|---|
| 338 | | - tb_len, DMA_TO_DEVICE); |
|---|
| 339 | | - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { |
|---|
| 340 | | - dev_kfree_skb(csum_skb); |
|---|
| 341 | | - goto out_err; |
|---|
| 342 | | - } |
|---|
| 343 | | - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len); |
|---|
| 344 | | - trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data, |
|---|
| 345 | | - tb_len); |
|---|
| 346 | | - |
|---|
| 347 | | - data_left -= tb_len; |
|---|
| 348 | | - tso_build_data(skb, &tso, tb_len); |
|---|
| 349 | | - } |
|---|
| 350 | | - } |
|---|
| 351 | | - |
|---|
| 352 | | - /* re -add the WiFi header */ |
|---|
| 353 | | - skb_push(skb, hdr_len); |
|---|
| 354 | | - |
|---|
| 355 | | - return 0; |
|---|
| 356 | | - |
|---|
| 357 | | -out_err: |
|---|
| 358 | | -#endif |
|---|
| 359 | | - return -EINVAL; |
|---|
| 360 | | -} |
|---|
| 361 | | - |
|---|
| 362 | | -static struct |
|---|
| 363 | | -iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans, |
|---|
| 364 | | - struct iwl_txq *txq, |
|---|
| 365 | | - struct iwl_device_cmd *dev_cmd, |
|---|
| 366 | | - struct sk_buff *skb, |
|---|
| 367 | | - struct iwl_cmd_meta *out_meta, |
|---|
| 368 | | - int hdr_len, |
|---|
| 369 | | - int tx_cmd_len) |
|---|
| 370 | | -{ |
|---|
| 371 | | - int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); |
|---|
| 372 | | - struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); |
|---|
| 373 | | - dma_addr_t tb_phys; |
|---|
| 374 | | - int len; |
|---|
| 375 | | - void *tb1_addr; |
|---|
| 376 | | - |
|---|
| 377 | | - tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); |
|---|
| 378 | | - |
|---|
| 379 | | - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); |
|---|
| 380 | | - |
|---|
| 381 | | - /* |
|---|
| 382 | | - * The second TB (tb1) points to the remainder of the TX command |
|---|
| 383 | | - * and the 802.11 header - dword aligned size |
|---|
| 384 | | - * (This calculation modifies the TX command, so do it before the |
|---|
| 385 | | - * setup of the first TB) |
|---|
| 386 | | - */ |
|---|
| 387 | | - len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - |
|---|
| 388 | | - IWL_FIRST_TB_SIZE; |
|---|
| 389 | | - |
|---|
| 390 | | - /* do not align A-MSDU to dword as the subframe header aligns it */ |
|---|
| 391 | | - |
|---|
| 392 | | - /* map the data for TB1 */ |
|---|
| 393 | | - tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; |
|---|
| 394 | | - tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); |
|---|
| 395 | | - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) |
|---|
| 396 | | - goto out_err; |
|---|
| 397 | | - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len); |
|---|
| 398 | | - |
|---|
| 399 | | - if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd, |
|---|
| 400 | | - len + IWL_FIRST_TB_SIZE, |
|---|
| 401 | | - hdr_len, dev_cmd)) |
|---|
| 402 | | - goto out_err; |
|---|
| 403 | | - |
|---|
| 404 | | - /* building the A-MSDU might have changed this data, memcpy it now */ |
|---|
| 405 | | - memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE); |
|---|
| 406 | | - return tfd; |
|---|
| 407 | | - |
|---|
| 408 | | -out_err: |
|---|
| 409 | | - iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); |
|---|
| 410 | | - return NULL; |
|---|
| 411 | | -} |
|---|
| 412 | | - |
|---|
| 413 | | -static struct |
|---|
| 414 | | -iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, |
|---|
| 415 | | - struct iwl_txq *txq, |
|---|
| 416 | | - struct iwl_device_cmd *dev_cmd, |
|---|
| 417 | | - struct sk_buff *skb, |
|---|
| 418 | | - struct iwl_cmd_meta *out_meta, |
|---|
| 419 | | - int hdr_len, |
|---|
| 420 | | - int tx_cmd_len) |
|---|
| 421 | | -{ |
|---|
| 422 | | - int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); |
|---|
| 423 | | - struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); |
|---|
| 424 | | - dma_addr_t tb_phys; |
|---|
| 425 | | - int i, len, tb1_len, tb2_len; |
|---|
| 426 | | - void *tb1_addr; |
|---|
| 427 | | - |
|---|
| 428 | | - tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); |
|---|
| 429 | | - |
|---|
| 430 | | - /* The first TB points to bi-directional DMA data */ |
|---|
| 431 | | - memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE); |
|---|
| 432 | | - |
|---|
| 433 | | - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); |
|---|
| 434 | | - |
|---|
| 435 | | - /* |
|---|
| 436 | | - * The second TB (tb1) points to the remainder of the TX command |
|---|
| 437 | | - * and the 802.11 header - dword aligned size |
|---|
| 438 | | - * (This calculation modifies the TX command, so do it before the |
|---|
| 439 | | - * setup of the first TB) |
|---|
| 440 | | - */ |
|---|
| 441 | | - len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - |
|---|
| 442 | | - IWL_FIRST_TB_SIZE; |
|---|
| 443 | | - |
|---|
| 444 | | - tb1_len = ALIGN(len, 4); |
|---|
| 445 | | - |
|---|
| 446 | | - /* map the data for TB1 */ |
|---|
| 447 | | - tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; |
|---|
| 448 | | - tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); |
|---|
| 449 | | - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) |
|---|
| 450 | | - goto out_err; |
|---|
| 451 | | - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len); |
|---|
| 452 | | - |
|---|
| 453 | | - /* set up TFD's third entry to point to remainder of skb's head */ |
|---|
| 454 | | - tb2_len = skb_headlen(skb) - hdr_len; |
|---|
| 455 | | - |
|---|
| 456 | | - if (tb2_len > 0) { |
|---|
| 457 | | - tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, |
|---|
| 458 | | - tb2_len, DMA_TO_DEVICE); |
|---|
| 459 | | - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) |
|---|
| 460 | | - goto out_err; |
|---|
| 461 | | - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len); |
|---|
| 462 | | - } |
|---|
| 463 | | - |
|---|
| 464 | | - /* set up the remaining entries to point to the data */ |
|---|
| 465 | | - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
|---|
| 466 | | - const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
|---|
| 467 | | - int tb_idx; |
|---|
| 468 | | - |
|---|
| 469 | | - if (!skb_frag_size(frag)) |
|---|
| 470 | | - continue; |
|---|
| 471 | | - |
|---|
| 472 | | - tb_phys = skb_frag_dma_map(trans->dev, frag, 0, |
|---|
| 473 | | - skb_frag_size(frag), DMA_TO_DEVICE); |
|---|
| 474 | | - |
|---|
| 475 | | - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) |
|---|
| 476 | | - goto out_err; |
|---|
| 477 | | - tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, |
|---|
| 478 | | - skb_frag_size(frag)); |
|---|
| 479 | | - |
|---|
| 480 | | - out_meta->tbs |= BIT(tb_idx); |
|---|
| 481 | | - } |
|---|
| 482 | | - |
|---|
| 483 | | - trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, |
|---|
| 484 | | - IWL_FIRST_TB_SIZE + tb1_len, hdr_len); |
|---|
| 485 | | - trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len); |
|---|
| 486 | | - |
|---|
| 487 | | - return tfd; |
|---|
| 488 | | - |
|---|
| 489 | | -out_err: |
|---|
| 490 | | - iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); |
|---|
| 491 | | - return NULL; |
|---|
| 492 | | -} |
|---|
| 493 | | - |
|---|
| 494 | | -static |
|---|
| 495 | | -struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, |
|---|
| 496 | | - struct iwl_txq *txq, |
|---|
| 497 | | - struct iwl_device_cmd *dev_cmd, |
|---|
| 498 | | - struct sk_buff *skb, |
|---|
| 499 | | - struct iwl_cmd_meta *out_meta) |
|---|
| 500 | | -{ |
|---|
| 501 | | - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
|---|
| 502 | | - int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); |
|---|
| 503 | | - struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); |
|---|
| 504 | | - int len, hdr_len; |
|---|
| 505 | | - bool amsdu; |
|---|
| 506 | | - |
|---|
| 507 | | - /* There must be data left over for TB1 or this code must be changed */ |
|---|
| 508 | | - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); |
|---|
| 509 | | - |
|---|
| 510 | | - memset(tfd, 0, sizeof(*tfd)); |
|---|
| 511 | | - |
|---|
| 512 | | - if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) |
|---|
| 513 | | - len = sizeof(struct iwl_tx_cmd_gen2); |
|---|
| 514 | | - else |
|---|
| 515 | | - len = sizeof(struct iwl_tx_cmd_gen3); |
|---|
| 516 | | - |
|---|
| 517 | | - amsdu = ieee80211_is_data_qos(hdr->frame_control) && |
|---|
| 518 | | - (*ieee80211_get_qos_ctl(hdr) & |
|---|
| 519 | | - IEEE80211_QOS_CTL_A_MSDU_PRESENT); |
|---|
| 520 | | - |
|---|
| 521 | | - hdr_len = ieee80211_hdrlen(hdr->frame_control); |
|---|
| 522 | | - |
|---|
| 523 | | - /* |
|---|
| 524 | | - * Only build A-MSDUs here if doing so by GSO, otherwise it may be |
|---|
| 525 | | - * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been |
|---|
| 526 | | - * built in the higher layers already. |
|---|
| 527 | | - */ |
|---|
| 528 | | - if (amsdu && skb_shinfo(skb)->gso_size) |
|---|
| 529 | | - return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, |
|---|
| 530 | | - out_meta, hdr_len, len); |
|---|
| 531 | | - |
|---|
| 532 | | - return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, |
|---|
| 533 | | - hdr_len, len); |
|---|
| 534 | | -} |
|---|
| 535 | | - |
|---|
| 536 | | -int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, |
|---|
| 537 | | - struct iwl_device_cmd *dev_cmd, int txq_id) |
|---|
| 538 | | -{ |
|---|
| 539 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 540 | | - struct iwl_cmd_meta *out_meta; |
|---|
| 541 | | - struct iwl_txq *txq = trans_pcie->txq[txq_id]; |
|---|
| 542 | | - u16 cmd_len; |
|---|
| 543 | | - int idx; |
|---|
| 544 | | - void *tfd; |
|---|
| 545 | | - |
|---|
| 546 | | - if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used), |
|---|
| 547 | | - "TX on unused queue %d\n", txq_id)) |
|---|
| 548 | | - return -EINVAL; |
|---|
| 549 | | - |
|---|
| 550 | | - if (skb_is_nonlinear(skb) && |
|---|
| 551 | | - skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) && |
|---|
| 552 | | - __skb_linearize(skb)) |
|---|
| 553 | | - return -ENOMEM; |
|---|
| 554 | | - |
|---|
| 555 | | - spin_lock(&txq->lock); |
|---|
| 556 | | - |
|---|
| 557 | | - if (iwl_queue_space(trans, txq) < txq->high_mark) { |
|---|
| 558 | | - iwl_stop_queue(trans, txq); |
|---|
| 559 | | - |
|---|
| 560 | | - /* don't put the packet on the ring, if there is no room */ |
|---|
| 561 | | - if (unlikely(iwl_queue_space(trans, txq) < 3)) { |
|---|
| 562 | | - struct iwl_device_cmd **dev_cmd_ptr; |
|---|
| 563 | | - |
|---|
| 564 | | - dev_cmd_ptr = (void *)((u8 *)skb->cb + |
|---|
| 565 | | - trans_pcie->dev_cmd_offs); |
|---|
| 566 | | - |
|---|
| 567 | | - *dev_cmd_ptr = dev_cmd; |
|---|
| 568 | | - __skb_queue_tail(&txq->overflow_q, skb); |
|---|
| 569 | | - spin_unlock(&txq->lock); |
|---|
| 570 | | - return 0; |
|---|
| 571 | | - } |
|---|
| 572 | | - } |
|---|
| 573 | | - |
|---|
| 574 | | - idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); |
|---|
| 575 | | - |
|---|
| 576 | | - /* Set up driver data for this TFD */ |
|---|
| 577 | | - txq->entries[idx].skb = skb; |
|---|
| 578 | | - txq->entries[idx].cmd = dev_cmd; |
|---|
| 579 | | - |
|---|
| 580 | | - dev_cmd->hdr.sequence = |
|---|
| 581 | | - cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | |
|---|
| 582 | | - INDEX_TO_SEQ(idx))); |
|---|
| 583 | | - |
|---|
| 584 | | - /* Set up first empty entry in queue's array of Tx/cmd buffers */ |
|---|
| 585 | | - out_meta = &txq->entries[idx].meta; |
|---|
| 586 | | - out_meta->flags = 0; |
|---|
| 587 | | - |
|---|
| 588 | | - tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); |
|---|
| 589 | | - if (!tfd) { |
|---|
| 590 | | - spin_unlock(&txq->lock); |
|---|
| 591 | | - return -1; |
|---|
| 592 | | - } |
|---|
| 593 | | - |
|---|
| 594 | | - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { |
|---|
| 595 | | - struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = |
|---|
| 596 | | - (void *)dev_cmd->payload; |
|---|
| 597 | | - |
|---|
| 598 | | - cmd_len = le16_to_cpu(tx_cmd_gen3->len); |
|---|
| 599 | | - } else { |
|---|
| 600 | | - struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = |
|---|
| 601 | | - (void *)dev_cmd->payload; |
|---|
| 602 | | - |
|---|
| 603 | | - cmd_len = le16_to_cpu(tx_cmd_gen2->len); |
|---|
| 604 | | - } |
|---|
| 605 | | - |
|---|
| 606 | | - /* Set up entry for this TFD in Tx byte-count array */ |
|---|
| 607 | | - iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len, |
|---|
| 608 | | - iwl_pcie_gen2_get_num_tbs(trans, tfd)); |
|---|
| 609 | | - |
|---|
| 610 | | - /* start timer if queue currently empty */ |
|---|
| 611 | | - if (txq->read_ptr == txq->write_ptr) { |
|---|
| 612 | | - if (txq->wd_timeout) |
|---|
| 613 | | - mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); |
|---|
| 614 | | - IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id); |
|---|
| 615 | | - iwl_trans_ref(trans); |
|---|
| 616 | | - } |
|---|
| 617 | | - |
|---|
| 618 | | - /* Tell device the write index *just past* this latest filled TFD */ |
|---|
| 619 | | - txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); |
|---|
| 620 | | - iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); |
|---|
| 621 | | - /* |
|---|
| 622 | | - * At this point the frame is "transmitted" successfully |
|---|
| 623 | | - * and we will get a TX status notification eventually. |
|---|
| 624 | | - */ |
|---|
| 625 | | - spin_unlock(&txq->lock); |
|---|
| 626 | | - return 0; |
|---|
| 627 | | -} |
|---|
| 61 | +#include "queue/tx.h" |
|---|
| 628 | 62 | |
|---|
| 629 | 63 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ |
|---|
| 630 | 64 | |
|---|
| .. | .. |
|---|
| 641 | 75 | struct iwl_host_cmd *cmd) |
|---|
| 642 | 76 | { |
|---|
| 643 | 77 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 644 | | - struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; |
|---|
| 78 | + struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; |
|---|
| 645 | 79 | struct iwl_device_cmd *out_cmd; |
|---|
| 646 | 80 | struct iwl_cmd_meta *out_meta; |
|---|
| 647 | | - unsigned long flags; |
|---|
| 648 | 81 | void *dup_buf = NULL; |
|---|
| 649 | 82 | dma_addr_t phys_addr; |
|---|
| 650 | 83 | int i, cmd_pos, idx; |
|---|
| .. | .. |
|---|
| 654 | 87 | const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; |
|---|
| 655 | 88 | u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; |
|---|
| 656 | 89 | struct iwl_tfh_tfd *tfd; |
|---|
| 657 | | - unsigned long flags2; |
|---|
| 90 | + unsigned long flags; |
|---|
| 658 | 91 | |
|---|
| 659 | 92 | copy_size = sizeof(struct iwl_cmd_header_wide); |
|---|
| 660 | 93 | cmd_size = sizeof(struct iwl_cmd_header_wide); |
|---|
| .. | .. |
|---|
| 723 | 156 | goto free_dup_buf; |
|---|
| 724 | 157 | } |
|---|
| 725 | 158 | |
|---|
| 726 | | - spin_lock_irqsave(&txq->lock, flags2); |
|---|
| 159 | + spin_lock_irqsave(&txq->lock, flags); |
|---|
| 727 | 160 | |
|---|
| 728 | | - idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); |
|---|
| 729 | | - tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); |
|---|
| 161 | + idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); |
|---|
| 162 | + tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); |
|---|
| 730 | 163 | memset(tfd, 0, sizeof(*tfd)); |
|---|
| 731 | 164 | |
|---|
| 732 | | - if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { |
|---|
| 733 | | - spin_unlock_irqrestore(&txq->lock, flags2); |
|---|
| 165 | + if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { |
|---|
| 166 | + spin_unlock_irqrestore(&txq->lock, flags); |
|---|
| 734 | 167 | |
|---|
| 735 | 168 | IWL_ERR(trans, "No space in command queue\n"); |
|---|
| 736 | 169 | iwl_op_mode_cmd_queue_full(trans->op_mode); |
|---|
| .. | .. |
|---|
| 754 | 187 | cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide)); |
|---|
| 755 | 188 | out_cmd->hdr_wide.reserved = 0; |
|---|
| 756 | 189 | out_cmd->hdr_wide.sequence = |
|---|
| 757 | | - cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) | |
|---|
| 190 | + cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | |
|---|
| 758 | 191 | INDEX_TO_SEQ(txq->write_ptr)); |
|---|
| 759 | 192 | |
|---|
| 760 | 193 | cmd_pos = sizeof(struct iwl_cmd_header_wide); |
|---|
| .. | .. |
|---|
| 802 | 235 | "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", |
|---|
| 803 | 236 | iwl_get_cmd_string(trans, cmd->id), group_id, |
|---|
| 804 | 237 | out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), |
|---|
| 805 | | - cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue); |
|---|
| 238 | + cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); |
|---|
| 806 | 239 | |
|---|
| 807 | 240 | /* start the TFD with the minimum copy bytes */ |
|---|
| 808 | 241 | tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); |
|---|
| 809 | | - memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); |
|---|
| 810 | | - iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx), |
|---|
| 811 | | - tb0_size); |
|---|
| 242 | + memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size); |
|---|
| 243 | + iwl_txq_gen2_set_tb(trans, tfd, iwl_txq_get_first_tb_dma(txq, idx), |
|---|
| 244 | + tb0_size); |
|---|
| 812 | 245 | |
|---|
| 813 | 246 | /* map first command fragment, if any remains */ |
|---|
| 814 | 247 | if (copy_size > tb0_size) { |
|---|
| 815 | 248 | phys_addr = dma_map_single(trans->dev, |
|---|
| 816 | | - ((u8 *)&out_cmd->hdr) + tb0_size, |
|---|
| 249 | + (u8 *)out_cmd + tb0_size, |
|---|
| 817 | 250 | copy_size - tb0_size, |
|---|
| 818 | 251 | DMA_TO_DEVICE); |
|---|
| 819 | 252 | if (dma_mapping_error(trans->dev, phys_addr)) { |
|---|
| 820 | 253 | idx = -ENOMEM; |
|---|
| 821 | | - iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); |
|---|
| 254 | + iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); |
|---|
| 822 | 255 | goto out; |
|---|
| 823 | 256 | } |
|---|
| 824 | | - iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, |
|---|
| 825 | | - copy_size - tb0_size); |
|---|
| 257 | + iwl_txq_gen2_set_tb(trans, tfd, phys_addr, |
|---|
| 258 | + copy_size - tb0_size); |
|---|
| 826 | 259 | } |
|---|
| 827 | 260 | |
|---|
| 828 | 261 | /* map the remaining (adjusted) nocopy/dup fragments */ |
|---|
| .. | .. |
|---|
| 840 | 273 | cmdlen[i], DMA_TO_DEVICE); |
|---|
| 841 | 274 | if (dma_mapping_error(trans->dev, phys_addr)) { |
|---|
| 842 | 275 | idx = -ENOMEM; |
|---|
| 843 | | - iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); |
|---|
| 276 | + iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); |
|---|
| 844 | 277 | goto out; |
|---|
| 845 | 278 | } |
|---|
| 846 | | - iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]); |
|---|
| 279 | + iwl_txq_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]); |
|---|
| 847 | 280 | } |
|---|
| 848 | 281 | |
|---|
| 849 | 282 | BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); |
|---|
| 850 | 283 | out_meta->flags = cmd->flags; |
|---|
| 851 | 284 | if (WARN_ON_ONCE(txq->entries[idx].free_buf)) |
|---|
| 852 | | - kzfree(txq->entries[idx].free_buf); |
|---|
| 285 | + kfree_sensitive(txq->entries[idx].free_buf); |
|---|
| 853 | 286 | txq->entries[idx].free_buf = dup_buf; |
|---|
| 854 | 287 | |
|---|
| 855 | 288 | trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); |
|---|
| .. | .. |
|---|
| 858 | 291 | if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) |
|---|
| 859 | 292 | mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); |
|---|
| 860 | 293 | |
|---|
| 861 | | - spin_lock_irqsave(&trans_pcie->reg_lock, flags); |
|---|
| 862 | | - if (!(cmd->flags & CMD_SEND_IN_IDLE) && |
|---|
| 863 | | - !trans_pcie->ref_cmd_in_flight) { |
|---|
| 864 | | - trans_pcie->ref_cmd_in_flight = true; |
|---|
| 865 | | - IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); |
|---|
| 866 | | - iwl_trans_ref(trans); |
|---|
| 867 | | - } |
|---|
| 294 | + spin_lock(&trans_pcie->reg_lock); |
|---|
| 868 | 295 | /* Increment and update queue's write index */ |
|---|
| 869 | | - txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); |
|---|
| 870 | | - iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); |
|---|
| 871 | | - spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); |
|---|
| 296 | + txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); |
|---|
| 297 | + iwl_txq_inc_wr_ptr(trans, txq); |
|---|
| 298 | + spin_unlock(&trans_pcie->reg_lock); |
|---|
| 872 | 299 | |
|---|
| 873 | 300 | out: |
|---|
| 874 | | - spin_unlock_irqrestore(&txq->lock, flags2); |
|---|
| 301 | + spin_unlock_irqrestore(&txq->lock, flags); |
|---|
| 875 | 302 | free_dup_buf: |
|---|
| 876 | 303 | if (idx < 0) |
|---|
| 877 | 304 | kfree(dup_buf); |
|---|
| .. | .. |
|---|
| 885 | 312 | { |
|---|
| 886 | 313 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 887 | 314 | const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); |
|---|
| 888 | | - struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; |
|---|
| 315 | + struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; |
|---|
| 889 | 316 | int cmd_idx; |
|---|
| 890 | 317 | int ret; |
|---|
| 891 | 318 | |
|---|
| .. | .. |
|---|
| 897 | 324 | return -EIO; |
|---|
| 898 | 325 | |
|---|
| 899 | 326 | IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); |
|---|
| 900 | | - |
|---|
| 901 | | - if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) { |
|---|
| 902 | | - ret = wait_event_timeout(trans_pcie->d0i3_waitq, |
|---|
| 903 | | - pm_runtime_active(&trans_pcie->pci_dev->dev), |
|---|
| 904 | | - msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); |
|---|
| 905 | | - if (!ret) { |
|---|
| 906 | | - IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n"); |
|---|
| 907 | | - return -ETIMEDOUT; |
|---|
| 908 | | - } |
|---|
| 909 | | - } |
|---|
| 910 | 327 | |
|---|
| 911 | 328 | cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); |
|---|
| 912 | 329 | if (cmd_idx < 0) { |
|---|
| .. | .. |
|---|
| 933 | 350 | cmd_str); |
|---|
| 934 | 351 | ret = -ETIMEDOUT; |
|---|
| 935 | 352 | |
|---|
| 936 | | - iwl_force_nmi(trans); |
|---|
| 937 | | - iwl_trans_fw_error(trans); |
|---|
| 938 | | - |
|---|
| 353 | + iwl_trans_pcie_sync_nmi(trans); |
|---|
| 939 | 354 | goto cancel; |
|---|
| 940 | 355 | } |
|---|
| 941 | 356 | |
|---|
| .. | .. |
|---|
| 1008 | 423 | } |
|---|
| 1009 | 424 | |
|---|
| 1010 | 425 | return iwl_pcie_gen2_send_hcmd_sync(trans, cmd); |
|---|
| 1011 | | -} |
|---|
| 1012 | | - |
|---|
| 1013 | | -/* |
|---|
| 1014 | | - * iwl_pcie_gen2_txq_unmap - Unmap any remaining DMA mappings and free skb's |
|---|
| 1015 | | - */ |
|---|
| 1016 | | -void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) |
|---|
| 1017 | | -{ |
|---|
| 1018 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 1019 | | - struct iwl_txq *txq = trans_pcie->txq[txq_id]; |
|---|
| 1020 | | - |
|---|
| 1021 | | - spin_lock_bh(&txq->lock); |
|---|
| 1022 | | - while (txq->write_ptr != txq->read_ptr) { |
|---|
| 1023 | | - IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", |
|---|
| 1024 | | - txq_id, txq->read_ptr); |
|---|
| 1025 | | - |
|---|
| 1026 | | - if (txq_id != trans_pcie->cmd_queue) { |
|---|
| 1027 | | - int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr); |
|---|
| 1028 | | - struct sk_buff *skb = txq->entries[idx].skb; |
|---|
| 1029 | | - |
|---|
| 1030 | | - if (WARN_ON_ONCE(!skb)) |
|---|
| 1031 | | - continue; |
|---|
| 1032 | | - |
|---|
| 1033 | | - iwl_pcie_free_tso_page(trans_pcie, skb); |
|---|
| 1034 | | - } |
|---|
| 1035 | | - iwl_pcie_gen2_free_tfd(trans, txq); |
|---|
| 1036 | | - txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); |
|---|
| 1037 | | - |
|---|
| 1038 | | - if (txq->read_ptr == txq->write_ptr) { |
|---|
| 1039 | | - unsigned long flags; |
|---|
| 1040 | | - |
|---|
| 1041 | | - spin_lock_irqsave(&trans_pcie->reg_lock, flags); |
|---|
| 1042 | | - if (txq_id != trans_pcie->cmd_queue) { |
|---|
| 1043 | | - IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", |
|---|
| 1044 | | - txq->id); |
|---|
| 1045 | | - iwl_trans_unref(trans); |
|---|
| 1046 | | - } else if (trans_pcie->ref_cmd_in_flight) { |
|---|
| 1047 | | - trans_pcie->ref_cmd_in_flight = false; |
|---|
| 1048 | | - IWL_DEBUG_RPM(trans, |
|---|
| 1049 | | - "clear ref_cmd_in_flight\n"); |
|---|
| 1050 | | - iwl_trans_unref(trans); |
|---|
| 1051 | | - } |
|---|
| 1052 | | - spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); |
|---|
| 1053 | | - } |
|---|
| 1054 | | - } |
|---|
| 1055 | | - |
|---|
| 1056 | | - while (!skb_queue_empty(&txq->overflow_q)) { |
|---|
| 1057 | | - struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); |
|---|
| 1058 | | - |
|---|
| 1059 | | - iwl_op_mode_free_skb(trans->op_mode, skb); |
|---|
| 1060 | | - } |
|---|
| 1061 | | - |
|---|
| 1062 | | - spin_unlock_bh(&txq->lock); |
|---|
| 1063 | | - |
|---|
| 1064 | | - /* just in case - this queue may have been stopped */ |
|---|
| 1065 | | - iwl_wake_queue(trans, txq); |
|---|
| 1066 | | -} |
|---|
| 1067 | | - |
|---|
| 1068 | | -static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans, |
|---|
| 1069 | | - struct iwl_txq *txq) |
|---|
| 1070 | | -{ |
|---|
| 1071 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 1072 | | - struct device *dev = trans->dev; |
|---|
| 1073 | | - |
|---|
| 1074 | | - /* De-alloc circular buffer of TFDs */ |
|---|
| 1075 | | - if (txq->tfds) { |
|---|
| 1076 | | - dma_free_coherent(dev, |
|---|
| 1077 | | - trans_pcie->tfd_size * txq->n_window, |
|---|
| 1078 | | - txq->tfds, txq->dma_addr); |
|---|
| 1079 | | - dma_free_coherent(dev, |
|---|
| 1080 | | - sizeof(*txq->first_tb_bufs) * txq->n_window, |
|---|
| 1081 | | - txq->first_tb_bufs, txq->first_tb_dma); |
|---|
| 1082 | | - } |
|---|
| 1083 | | - |
|---|
| 1084 | | - kfree(txq->entries); |
|---|
| 1085 | | - iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl); |
|---|
| 1086 | | - kfree(txq); |
|---|
| 1087 | | -} |
|---|
| 1088 | | - |
|---|
| 1089 | | -/* |
|---|
| 1090 | | - * iwl_pcie_txq_free - Deallocate DMA queue. |
|---|
| 1091 | | - * @txq: Transmit queue to deallocate. |
|---|
| 1092 | | - * |
|---|
| 1093 | | - * Empty queue by removing and destroying all BD's. |
|---|
| 1094 | | - * Free all buffers. |
|---|
| 1095 | | - * 0-fill, but do not free "txq" descriptor structure. |
|---|
| 1096 | | - */ |
|---|
| 1097 | | -static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id) |
|---|
| 1098 | | -{ |
|---|
| 1099 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 1100 | | - struct iwl_txq *txq = trans_pcie->txq[txq_id]; |
|---|
| 1101 | | - int i; |
|---|
| 1102 | | - |
|---|
| 1103 | | - if (WARN_ON(!txq)) |
|---|
| 1104 | | - return; |
|---|
| 1105 | | - |
|---|
| 1106 | | - iwl_pcie_gen2_txq_unmap(trans, txq_id); |
|---|
| 1107 | | - |
|---|
| 1108 | | - /* De-alloc array of command/tx buffers */ |
|---|
| 1109 | | - if (txq_id == trans_pcie->cmd_queue) |
|---|
| 1110 | | - for (i = 0; i < txq->n_window; i++) { |
|---|
| 1111 | | - kzfree(txq->entries[i].cmd); |
|---|
| 1112 | | - kzfree(txq->entries[i].free_buf); |
|---|
| 1113 | | - } |
|---|
| 1114 | | - del_timer_sync(&txq->stuck_timer); |
|---|
| 1115 | | - |
|---|
| 1116 | | - iwl_pcie_gen2_txq_free_memory(trans, txq); |
|---|
| 1117 | | - |
|---|
| 1118 | | - trans_pcie->txq[txq_id] = NULL; |
|---|
| 1119 | | - |
|---|
| 1120 | | - clear_bit(txq_id, trans_pcie->queue_used); |
|---|
| 1121 | | -} |
|---|
| 1122 | | - |
|---|
| 1123 | | -int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, |
|---|
| 1124 | | - struct iwl_tx_queue_cfg_cmd *cmd, |
|---|
| 1125 | | - int cmd_id, int size, |
|---|
| 1126 | | - unsigned int timeout) |
|---|
| 1127 | | -{ |
|---|
| 1128 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 1129 | | - struct iwl_tx_queue_cfg_rsp *rsp; |
|---|
| 1130 | | - struct iwl_txq *txq; |
|---|
| 1131 | | - struct iwl_host_cmd hcmd = { |
|---|
| 1132 | | - .id = cmd_id, |
|---|
| 1133 | | - .len = { sizeof(*cmd) }, |
|---|
| 1134 | | - .data = { cmd, }, |
|---|
| 1135 | | - .flags = CMD_WANT_SKB, |
|---|
| 1136 | | - }; |
|---|
| 1137 | | - int ret, qid; |
|---|
| 1138 | | - u32 wr_ptr; |
|---|
| 1139 | | - |
|---|
| 1140 | | - txq = kzalloc(sizeof(*txq), GFP_KERNEL); |
|---|
| 1141 | | - if (!txq) |
|---|
| 1142 | | - return -ENOMEM; |
|---|
| 1143 | | - ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl, |
|---|
| 1144 | | - (trans->cfg->device_family >= |
|---|
| 1145 | | - IWL_DEVICE_FAMILY_22560) ? |
|---|
| 1146 | | - sizeof(struct iwl_gen3_bc_tbl) : |
|---|
| 1147 | | - sizeof(struct iwlagn_scd_bc_tbl)); |
|---|
| 1148 | | - if (ret) { |
|---|
| 1149 | | - IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); |
|---|
| 1150 | | - kfree(txq); |
|---|
| 1151 | | - return -ENOMEM; |
|---|
| 1152 | | - } |
|---|
| 1153 | | - |
|---|
| 1154 | | - ret = iwl_pcie_txq_alloc(trans, txq, size, false); |
|---|
| 1155 | | - if (ret) { |
|---|
| 1156 | | - IWL_ERR(trans, "Tx queue alloc failed\n"); |
|---|
| 1157 | | - goto error; |
|---|
| 1158 | | - } |
|---|
| 1159 | | - ret = iwl_pcie_txq_init(trans, txq, size, false); |
|---|
| 1160 | | - if (ret) { |
|---|
| 1161 | | - IWL_ERR(trans, "Tx queue init failed\n"); |
|---|
| 1162 | | - goto error; |
|---|
| 1163 | | - } |
|---|
| 1164 | | - |
|---|
| 1165 | | - txq->wd_timeout = msecs_to_jiffies(timeout); |
|---|
| 1166 | | - |
|---|
| 1167 | | - cmd->tfdq_addr = cpu_to_le64(txq->dma_addr); |
|---|
| 1168 | | - cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); |
|---|
| 1169 | | - cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); |
|---|
| 1170 | | - |
|---|
| 1171 | | - ret = iwl_trans_send_cmd(trans, &hcmd); |
|---|
| 1172 | | - if (ret) |
|---|
| 1173 | | - goto error; |
|---|
| 1174 | | - |
|---|
| 1175 | | - if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) { |
|---|
| 1176 | | - ret = -EINVAL; |
|---|
| 1177 | | - goto error_free_resp; |
|---|
| 1178 | | - } |
|---|
| 1179 | | - |
|---|
| 1180 | | - rsp = (void *)hcmd.resp_pkt->data; |
|---|
| 1181 | | - qid = le16_to_cpu(rsp->queue_number); |
|---|
| 1182 | | - wr_ptr = le16_to_cpu(rsp->write_pointer); |
|---|
| 1183 | | - |
|---|
| 1184 | | - if (qid >= ARRAY_SIZE(trans_pcie->txq)) { |
|---|
| 1185 | | - WARN_ONCE(1, "queue index %d unsupported", qid); |
|---|
| 1186 | | - ret = -EIO; |
|---|
| 1187 | | - goto error_free_resp; |
|---|
| 1188 | | - } |
|---|
| 1189 | | - |
|---|
| 1190 | | - if (test_and_set_bit(qid, trans_pcie->queue_used)) { |
|---|
| 1191 | | - WARN_ONCE(1, "queue %d already used", qid); |
|---|
| 1192 | | - ret = -EIO; |
|---|
| 1193 | | - goto error_free_resp; |
|---|
| 1194 | | - } |
|---|
| 1195 | | - |
|---|
| 1196 | | - txq->id = qid; |
|---|
| 1197 | | - trans_pcie->txq[qid] = txq; |
|---|
| 1198 | | - wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1); |
|---|
| 1199 | | - |
|---|
| 1200 | | - /* Place first TFD at index corresponding to start sequence number */ |
|---|
| 1201 | | - txq->read_ptr = wr_ptr; |
|---|
| 1202 | | - txq->write_ptr = wr_ptr; |
|---|
| 1203 | | - iwl_write_direct32(trans, HBUS_TARG_WRPTR, |
|---|
| 1204 | | - (txq->write_ptr) | (qid << 16)); |
|---|
| 1205 | | - IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); |
|---|
| 1206 | | - |
|---|
| 1207 | | - iwl_free_resp(&hcmd); |
|---|
| 1208 | | - return qid; |
|---|
| 1209 | | - |
|---|
| 1210 | | -error_free_resp: |
|---|
| 1211 | | - iwl_free_resp(&hcmd); |
|---|
| 1212 | | -error: |
|---|
| 1213 | | - iwl_pcie_gen2_txq_free_memory(trans, txq); |
|---|
| 1214 | | - return ret; |
|---|
| 1215 | | -} |
|---|
| 1216 | | - |
|---|
| 1217 | | -void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue) |
|---|
| 1218 | | -{ |
|---|
| 1219 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 1220 | | - |
|---|
| 1221 | | - /* |
|---|
| 1222 | | - * Upon HW Rfkill - we stop the device, and then stop the queues |
|---|
| 1223 | | - * in the op_mode. Just for the sake of the simplicity of the op_mode, |
|---|
| 1224 | | - * allow the op_mode to call txq_disable after it already called |
|---|
| 1225 | | - * stop_device. |
|---|
| 1226 | | - */ |
|---|
| 1227 | | - if (!test_and_clear_bit(queue, trans_pcie->queue_used)) { |
|---|
| 1228 | | - WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), |
|---|
| 1229 | | - "queue %d not used", queue); |
|---|
| 1230 | | - return; |
|---|
| 1231 | | - } |
|---|
| 1232 | | - |
|---|
| 1233 | | - iwl_pcie_gen2_txq_unmap(trans, queue); |
|---|
| 1234 | | - |
|---|
| 1235 | | - iwl_pcie_gen2_txq_free_memory(trans, trans_pcie->txq[queue]); |
|---|
| 1236 | | - trans_pcie->txq[queue] = NULL; |
|---|
| 1237 | | - |
|---|
| 1238 | | - IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); |
|---|
| 1239 | | -} |
|---|
| 1240 | | - |
|---|
| 1241 | | -void iwl_pcie_gen2_tx_free(struct iwl_trans *trans) |
|---|
| 1242 | | -{ |
|---|
| 1243 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 1244 | | - int i; |
|---|
| 1245 | | - |
|---|
| 1246 | | - memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used)); |
|---|
| 1247 | | - |
|---|
| 1248 | | - /* Free all TX queues */ |
|---|
| 1249 | | - for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) { |
|---|
| 1250 | | - if (!trans_pcie->txq[i]) |
|---|
| 1251 | | - continue; |
|---|
| 1252 | | - |
|---|
| 1253 | | - iwl_pcie_gen2_txq_free(trans, i); |
|---|
| 1254 | | - } |
|---|
| 1255 | | -} |
|---|
| 1256 | | - |
|---|
| 1257 | | -int iwl_pcie_gen2_tx_init(struct iwl_trans *trans) |
|---|
| 1258 | | -{ |
|---|
| 1259 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 1260 | | - struct iwl_txq *cmd_queue; |
|---|
| 1261 | | - int txq_id = trans_pcie->cmd_queue, ret; |
|---|
| 1262 | | - |
|---|
| 1263 | | - /* alloc and init the command queue */ |
|---|
| 1264 | | - if (!trans_pcie->txq[txq_id]) { |
|---|
| 1265 | | - cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL); |
|---|
| 1266 | | - if (!cmd_queue) { |
|---|
| 1267 | | - IWL_ERR(trans, "Not enough memory for command queue\n"); |
|---|
| 1268 | | - return -ENOMEM; |
|---|
| 1269 | | - } |
|---|
| 1270 | | - trans_pcie->txq[txq_id] = cmd_queue; |
|---|
| 1271 | | - ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true); |
|---|
| 1272 | | - if (ret) { |
|---|
| 1273 | | - IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); |
|---|
| 1274 | | - goto error; |
|---|
| 1275 | | - } |
|---|
| 1276 | | - } else { |
|---|
| 1277 | | - cmd_queue = trans_pcie->txq[txq_id]; |
|---|
| 1278 | | - } |
|---|
| 1279 | | - |
|---|
| 1280 | | - ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true); |
|---|
| 1281 | | - if (ret) { |
|---|
| 1282 | | - IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); |
|---|
| 1283 | | - goto error; |
|---|
| 1284 | | - } |
|---|
| 1285 | | - trans_pcie->txq[txq_id]->id = txq_id; |
|---|
| 1286 | | - set_bit(txq_id, trans_pcie->queue_used); |
|---|
| 1287 | | - |
|---|
| 1288 | | - return 0; |
|---|
| 1289 | | - |
|---|
| 1290 | | -error: |
|---|
| 1291 | | - iwl_pcie_gen2_tx_free(trans); |
|---|
| 1292 | | - return ret; |
|---|
| 1293 | 426 | } |
|---|
| 1294 | 427 | |
|---|