| .. | .. |
|---|
| 1 | 1 | /****************************************************************************** |
|---|
| 2 | 2 | * |
|---|
| 3 | + * This file is provided under a dual BSD/GPLv2 license. When using or |
|---|
| 4 | + * redistributing this file, you may do so under either license. |
|---|
| 5 | + * |
|---|
| 6 | + * GPL LICENSE SUMMARY |
|---|
| 7 | + * |
|---|
| 3 | 8 | * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. |
|---|
| 4 | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
|---|
| 5 | 10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
|---|
| 6 | | - * Copyright(c) 2018 Intel Corporation |
|---|
| 7 | | - * |
|---|
| 8 | | - * Portions of this file are derived from the ipw3945 project, as well |
|---|
| 9 | | - * as portions of the ieee80211 subsystem header files. |
|---|
| 11 | + * Copyright(c) 2018 - 2019 Intel Corporation |
|---|
| 10 | 12 | * |
|---|
| 11 | 13 | * This program is free software; you can redistribute it and/or modify it |
|---|
| 12 | 14 | * under the terms of version 2 of the GNU General Public License as |
|---|
| .. | .. |
|---|
| 17 | 19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
|---|
| 18 | 20 | * more details. |
|---|
| 19 | 21 | * |
|---|
| 20 | | - * You should have received a copy of the GNU General Public License along with |
|---|
| 21 | | - * this program. |
|---|
| 22 | | - * |
|---|
| 23 | 22 | * The full GNU General Public License is included in this distribution in the |
|---|
| 24 | | - * file called LICENSE. |
|---|
| 23 | + * file called COPYING. |
|---|
| 25 | 24 | * |
|---|
| 26 | 25 | * Contact Information: |
|---|
| 27 | 26 | * Intel Linux Wireless <linuxwifi@intel.com> |
|---|
| 28 | 27 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
|---|
| 28 | + * |
|---|
| 29 | + * BSD LICENSE |
|---|
| 30 | + * |
|---|
| 31 | + * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. |
|---|
| 32 | + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
|---|
| 33 | + * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
|---|
| 34 | + * Copyright(c) 2018 - 2019 Intel Corporation |
|---|
| 35 | + * All rights reserved. |
|---|
| 36 | + * |
|---|
| 37 | + * Redistribution and use in source and binary forms, with or without |
|---|
| 38 | + * modification, are permitted provided that the following conditions |
|---|
| 39 | + * are met: |
|---|
| 40 | + * |
|---|
| 41 | + * * Redistributions of source code must retain the above copyright |
|---|
| 42 | + * notice, this list of conditions and the following disclaimer. |
|---|
| 43 | + * * Redistributions in binary form must reproduce the above copyright |
|---|
| 44 | + * notice, this list of conditions and the following disclaimer in |
|---|
| 45 | + * the documentation and/or other materials provided with the |
|---|
| 46 | + * distribution. |
|---|
| 47 | + * * Neither the name Intel Corporation nor the names of its |
|---|
| 48 | + * contributors may be used to endorse or promote products derived |
|---|
| 49 | + * from this software without specific prior written permission. |
|---|
| 50 | + * |
|---|
| 51 | + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
|---|
| 52 | + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
|---|
| 53 | + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
|---|
| 54 | + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
|---|
| 55 | + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
|---|
| 56 | + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
|---|
| 57 | + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
|---|
| 58 | + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
|---|
| 59 | + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
|---|
| 60 | + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
|---|
| 61 | + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
|---|
| 29 | 62 | * |
|---|
| 30 | 63 | *****************************************************************************/ |
|---|
| 31 | 64 | #ifndef __iwl_trans_int_pcie_h__ |
|---|
| .. | .. |
|---|
| 46 | 79 | #include "iwl-io.h" |
|---|
| 47 | 80 | #include "iwl-op-mode.h" |
|---|
| 48 | 81 | #include "iwl-drv.h" |
|---|
| 49 | | - |
|---|
| 50 | | -/* We need 2 entries for the TX command and header, and another one might |
|---|
| 51 | | - * be needed for potential data in the SKB's head. The remaining ones can |
|---|
| 52 | | - * be used for frags. |
|---|
| 53 | | - */ |
|---|
| 54 | | -#define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3) |
|---|
| 82 | +#include "queue/tx.h" |
|---|
| 55 | 83 | |
|---|
| 56 | 84 | /* |
|---|
| 57 | 85 | * RX related structures and functions |
|---|
| .. | .. |
|---|
| 73 | 101 | * @page: driver's pointer to the rxb page |
|---|
| 74 | 102 | * @invalid: rxb is in driver ownership - not owned by HW |
|---|
| 75 | 103 | * @vid: index of this rxb in the global table |
|---|
| 76 | | - * @size: size used from the buffer |
|---|
| 104 | + * @offset: indicates which offset of the page (in bytes) |
|---|
| 105 | + * this buffer uses (if multiple RBs fit into one page) |
|---|
| 77 | 106 | */ |
|---|
| 78 | 107 | struct iwl_rx_mem_buffer { |
|---|
| 79 | 108 | dma_addr_t page_dma; |
|---|
| .. | .. |
|---|
| 81 | 110 | u16 vid; |
|---|
| 82 | 111 | bool invalid; |
|---|
| 83 | 112 | struct list_head list; |
|---|
| 84 | | - u32 size; |
|---|
| 113 | + u32 offset; |
|---|
| 85 | 114 | }; |
|---|
| 86 | 115 | |
|---|
| 87 | 116 | /** |
|---|
| .. | .. |
|---|
| 102 | 131 | u32 unhandled; |
|---|
| 103 | 132 | }; |
|---|
| 104 | 133 | |
|---|
| 105 | | -#define IWL_RX_TD_TYPE_MSK 0xff000000 |
|---|
| 106 | | -#define IWL_RX_TD_SIZE_MSK 0x00ffffff |
|---|
| 107 | | -#define IWL_RX_TD_SIZE_2K BIT(11) |
|---|
| 108 | | -#define IWL_RX_TD_TYPE 0 |
|---|
| 109 | | - |
|---|
| 110 | 134 | /** |
|---|
| 111 | 135 | * struct iwl_rx_transfer_desc - transfer descriptor |
|---|
| 112 | | - * @type_n_size: buffer type (bit 0: external buff valid, |
|---|
| 113 | | - * bit 1: optional footer valid, bit 2-7: reserved) |
|---|
| 114 | | - * and buffer size |
|---|
| 115 | 136 | * @addr: ptr to free buffer start address |
|---|
| 116 | 137 | * @rbid: unique tag of the buffer |
|---|
| 117 | 138 | * @reserved: reserved |
|---|
| 118 | 139 | */ |
|---|
| 119 | 140 | struct iwl_rx_transfer_desc { |
|---|
| 120 | | - __le32 type_n_size; |
|---|
| 121 | | - __le64 addr; |
|---|
| 122 | 141 | __le16 rbid; |
|---|
| 123 | | - __le16 reserved; |
|---|
| 142 | + __le16 reserved[3]; |
|---|
| 143 | + __le64 addr; |
|---|
| 124 | 144 | } __packed; |
|---|
| 125 | 145 | |
|---|
| 126 | | -#define IWL_RX_CD_SIZE 0xffffff00 |
|---|
| 146 | +#define IWL_RX_CD_FLAGS_FRAGMENTED BIT(0) |
|---|
| 127 | 147 | |
|---|
| 128 | 148 | /** |
|---|
| 129 | 149 | * struct iwl_rx_completion_desc - completion descriptor |
|---|
| 130 | | - * @type: buffer type (bit 0: external buff valid, |
|---|
| 131 | | - * bit 1: optional footer valid, bit 2-7: reserved) |
|---|
| 132 | | - * @status: status of the completion |
|---|
| 133 | 150 | * @reserved1: reserved |
|---|
| 134 | 151 | * @rbid: unique tag of the received buffer |
|---|
| 135 | | - * @size: buffer size, masked by IWL_RX_CD_SIZE |
|---|
| 152 | + * @flags: flags (0: fragmented, all others: reserved) |
|---|
| 136 | 153 | * @reserved2: reserved |
|---|
| 137 | 154 | */ |
|---|
| 138 | 155 | struct iwl_rx_completion_desc { |
|---|
| 139 | | - u8 type; |
|---|
| 140 | | - u8 status; |
|---|
| 141 | | - __le16 reserved1; |
|---|
| 156 | + __le32 reserved1; |
|---|
| 142 | 157 | __le16 rbid; |
|---|
| 143 | | - __le32 size; |
|---|
| 144 | | - u8 reserved2[22]; |
|---|
| 158 | + u8 flags; |
|---|
| 159 | + u8 reserved2[25]; |
|---|
| 145 | 160 | } __packed; |
|---|
| 146 | 161 | |
|---|
| 147 | 162 | /** |
|---|
| .. | .. |
|---|
| 149 | 164 | * @id: queue index |
|---|
| 150 | 165 | * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). |
|---|
| 151 | 166 | * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. |
|---|
| 152 | | - * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's |
|---|
| 167 | + * In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's |
|---|
| 153 | 168 | * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) |
|---|
| 154 | 169 | * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) |
|---|
| 155 | 170 | * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) |
|---|
| .. | .. |
|---|
| 169 | 184 | * @rb_stts_dma: bus address of receive buffer status |
|---|
| 170 | 185 | * @lock: |
|---|
| 171 | 186 | * @queue: actual rx queue. Not used for multi-rx queue. |
|---|
| 187 | + * @next_rb_is_fragment: indicates that the previous RB that we handled set |
|---|
| 188 | + * the fragmented flag, so the next one is still another fragment |
|---|
| 172 | 189 | * |
|---|
| 173 | 190 | * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers |
|---|
| 174 | 191 | */ |
|---|
| .. | .. |
|---|
| 194 | 211 | u32 queue_size; |
|---|
| 195 | 212 | struct list_head rx_free; |
|---|
| 196 | 213 | struct list_head rx_used; |
|---|
| 197 | | - bool need_update; |
|---|
| 214 | + bool need_update, next_rb_is_fragment; |
|---|
| 198 | 215 | void *rb_stts; |
|---|
| 199 | 216 | dma_addr_t rb_stts_dma; |
|---|
| 200 | 217 | spinlock_t lock; |
|---|
| .. | .. |
|---|
| 224 | 241 | struct work_struct rx_alloc; |
|---|
| 225 | 242 | }; |
|---|
| 226 | 243 | |
|---|
| 227 | | -struct iwl_dma_ptr { |
|---|
| 228 | | - dma_addr_t dma; |
|---|
| 229 | | - void *addr; |
|---|
| 230 | | - size_t size; |
|---|
| 231 | | -}; |
|---|
| 232 | | - |
|---|
| 233 | | -/** |
|---|
| 234 | | - * iwl_queue_inc_wrap - increment queue index, wrap back to beginning |
|---|
| 235 | | - * @index -- current index |
|---|
| 236 | | - */ |
|---|
| 237 | | -static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index) |
|---|
| 238 | | -{ |
|---|
| 239 | | - return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1); |
|---|
| 240 | | -} |
|---|
| 241 | | - |
|---|
| 242 | 244 | /** |
|---|
| 243 | 245 | * iwl_get_closed_rb_stts - get closed rb stts from different structs |
|---|
| 244 | 246 | * @rxq - the rxq to get the rb stts from |
|---|
| .. | .. |
|---|
| 246 | 248 | static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, |
|---|
| 247 | 249 | struct iwl_rxq *rxq) |
|---|
| 248 | 250 | { |
|---|
| 249 | | - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { |
|---|
| 251 | + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { |
|---|
| 250 | 252 | __le16 *rb_stts = rxq->rb_stts; |
|---|
| 251 | 253 | |
|---|
| 252 | 254 | return READ_ONCE(*rb_stts); |
|---|
| .. | .. |
|---|
| 257 | 259 | } |
|---|
| 258 | 260 | } |
|---|
| 259 | 261 | |
|---|
| 262 | +#ifdef CONFIG_IWLWIFI_DEBUGFS |
|---|
| 260 | 263 | /** |
|---|
| 261 | | - * iwl_queue_dec_wrap - decrement queue index, wrap back to end |
|---|
| 262 | | - * @index -- current index |
|---|
| 263 | | - */ |
|---|
| 264 | | -static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index) |
|---|
| 265 | | -{ |
|---|
| 266 | | - return --index & (trans->cfg->base_params->max_tfd_queue_size - 1); |
|---|
| 267 | | -} |
|---|
| 268 | | - |
|---|
| 269 | | -struct iwl_cmd_meta { |
|---|
| 270 | | - /* only for SYNC commands, iff the reply skb is wanted */ |
|---|
| 271 | | - struct iwl_host_cmd *source; |
|---|
| 272 | | - u32 flags; |
|---|
| 273 | | - u32 tbs; |
|---|
| 274 | | -}; |
|---|
| 275 | | - |
|---|
| 276 | | - |
|---|
| 277 | | -#define TFD_TX_CMD_SLOTS 256 |
|---|
| 278 | | -#define TFD_CMD_SLOTS 32 |
|---|
| 279 | | - |
|---|
| 280 | | -/* |
|---|
| 281 | | - * The FH will write back to the first TB only, so we need to copy some data |
|---|
| 282 | | - * into the buffer regardless of whether it should be mapped or not. |
|---|
| 283 | | - * This indicates how big the first TB must be to include the scratch buffer |
|---|
| 284 | | - * and the assigned PN. |
|---|
| 285 | | - * Since PN location is 8 bytes at offset 12, it's 20 now. |
|---|
| 286 | | - * If we make it bigger then allocations will be bigger and copy slower, so |
|---|
| 287 | | - * that's probably not useful. |
|---|
| 288 | | - */ |
|---|
| 289 | | -#define IWL_FIRST_TB_SIZE 20 |
|---|
| 290 | | -#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) |
|---|
| 291 | | - |
|---|
| 292 | | -struct iwl_pcie_txq_entry { |
|---|
| 293 | | - struct iwl_device_cmd *cmd; |
|---|
| 294 | | - struct sk_buff *skb; |
|---|
| 295 | | - /* buffer to free after command completes */ |
|---|
| 296 | | - const void *free_buf; |
|---|
| 297 | | - struct iwl_cmd_meta meta; |
|---|
| 298 | | -}; |
|---|
| 299 | | - |
|---|
| 300 | | -struct iwl_pcie_first_tb_buf { |
|---|
| 301 | | - u8 buf[IWL_FIRST_TB_SIZE_ALIGN]; |
|---|
| 302 | | -}; |
|---|
| 303 | | - |
|---|
| 304 | | -/** |
|---|
| 305 | | - * struct iwl_txq - Tx Queue for DMA |
|---|
| 306 | | - * @q: generic Rx/Tx queue descriptor |
|---|
| 307 | | - * @tfds: transmit frame descriptors (DMA memory) |
|---|
| 308 | | - * @first_tb_bufs: start of command headers, including scratch buffers, for |
|---|
| 309 | | - * the writeback -- this is DMA memory and an array holding one buffer |
|---|
| 310 | | - * for each command on the queue |
|---|
| 311 | | - * @first_tb_dma: DMA address for the first_tb_bufs start |
|---|
| 312 | | - * @entries: transmit entries (driver state) |
|---|
| 313 | | - * @lock: queue lock |
|---|
| 314 | | - * @stuck_timer: timer that fires if queue gets stuck |
|---|
| 315 | | - * @trans_pcie: pointer back to transport (for timer) |
|---|
| 316 | | - * @need_update: indicates need to update read/write index |
|---|
| 317 | | - * @ampdu: true if this queue is an ampdu queue for an specific RA/TID |
|---|
| 318 | | - * @wd_timeout: queue watchdog timeout (jiffies) - per queue |
|---|
| 319 | | - * @frozen: tx stuck queue timer is frozen |
|---|
| 320 | | - * @frozen_expiry_remainder: remember how long until the timer fires |
|---|
| 321 | | - * @bc_tbl: byte count table of the queue (relevant only for gen2 transport) |
|---|
| 322 | | - * @write_ptr: 1-st empty entry (index) host_w |
|---|
| 323 | | - * @read_ptr: last used entry (index) host_r |
|---|
| 324 | | - * @dma_addr: physical addr for BD's |
|---|
| 325 | | - * @n_window: safe queue window |
|---|
| 326 | | - * @id: queue id |
|---|
| 327 | | - * @low_mark: low watermark, resume queue if free space more than this |
|---|
| 328 | | - * @high_mark: high watermark, stop queue if free space less than this |
|---|
| 264 | + * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data |
|---|
| 265 | + * debugfs file |
|---|
| 329 | 266 | * |
|---|
| 330 | | - * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame |
|---|
| 331 | | - * descriptors) and required locking structures. |
|---|
| 332 | | - * |
|---|
| 333 | | - * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware |
|---|
| 334 | | - * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless |
|---|
| 335 | | - * there might be HW changes in the future). For the normal TX |
|---|
| 336 | | - * queues, n_window, which is the size of the software queue data |
|---|
| 337 | | - * is also 256; however, for the command queue, n_window is only |
|---|
| 338 | | - * 32 since we don't need so many commands pending. Since the HW |
|---|
| 339 | | - * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. |
|---|
| 340 | | - * This means that we end up with the following: |
|---|
| 341 | | - * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | |
|---|
| 342 | | - * SW entries: | 0 | ... | 31 | |
|---|
| 343 | | - * where N is a number between 0 and 7. This means that the SW |
|---|
| 344 | | - * data is a window overlayed over the HW queue. |
|---|
| 267 | + * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed. |
|---|
| 268 | + * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open. |
|---|
| 269 | + * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is |
|---|
| 270 | + * set the file can no longer be used. |
|---|
| 345 | 271 | */ |
|---|
| 346 | | -struct iwl_txq { |
|---|
| 347 | | - void *tfds; |
|---|
| 348 | | - struct iwl_pcie_first_tb_buf *first_tb_bufs; |
|---|
| 349 | | - dma_addr_t first_tb_dma; |
|---|
| 350 | | - struct iwl_pcie_txq_entry *entries; |
|---|
| 351 | | - spinlock_t lock; |
|---|
| 352 | | - unsigned long frozen_expiry_remainder; |
|---|
| 353 | | - struct timer_list stuck_timer; |
|---|
| 354 | | - struct iwl_trans_pcie *trans_pcie; |
|---|
| 355 | | - bool need_update; |
|---|
| 356 | | - bool frozen; |
|---|
| 357 | | - bool ampdu; |
|---|
| 358 | | - int block; |
|---|
| 359 | | - unsigned long wd_timeout; |
|---|
| 360 | | - struct sk_buff_head overflow_q; |
|---|
| 361 | | - struct iwl_dma_ptr bc_tbl; |
|---|
| 362 | | - |
|---|
| 363 | | - int write_ptr; |
|---|
| 364 | | - int read_ptr; |
|---|
| 365 | | - dma_addr_t dma_addr; |
|---|
| 366 | | - int n_window; |
|---|
| 367 | | - u32 id; |
|---|
| 368 | | - int low_mark; |
|---|
| 369 | | - int high_mark; |
|---|
| 272 | +enum iwl_fw_mon_dbgfs_state { |
|---|
| 273 | + IWL_FW_MON_DBGFS_STATE_CLOSED, |
|---|
| 274 | + IWL_FW_MON_DBGFS_STATE_OPEN, |
|---|
| 275 | + IWL_FW_MON_DBGFS_STATE_DISABLED, |
|---|
| 370 | 276 | }; |
|---|
| 371 | | - |
|---|
| 372 | | -static inline dma_addr_t |
|---|
| 373 | | -iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx) |
|---|
| 374 | | -{ |
|---|
| 375 | | - return txq->first_tb_dma + |
|---|
| 376 | | - sizeof(struct iwl_pcie_first_tb_buf) * idx; |
|---|
| 377 | | -} |
|---|
| 378 | | - |
|---|
| 379 | | -struct iwl_tso_hdr_page { |
|---|
| 380 | | - struct page *page; |
|---|
| 381 | | - u8 *pos; |
|---|
| 382 | | -}; |
|---|
| 277 | +#endif |
|---|
| 383 | 278 | |
|---|
| 384 | 279 | /** |
|---|
| 385 | 280 | * enum iwl_shared_irq_flags - level of sharing for irq |
|---|
| .. | .. |
|---|
| 404 | 299 | }; |
|---|
| 405 | 300 | |
|---|
| 406 | 301 | /** |
|---|
| 407 | | - * struct iwl_dram_data |
|---|
| 408 | | - * @physical: page phy pointer |
|---|
| 409 | | - * @block: pointer to the allocated block/page |
|---|
| 410 | | - * @size: size of the block/page |
|---|
| 302 | + * struct cont_rec: continuous recording data structure |
|---|
| 303 | + * @prev_wr_ptr: the last address that was read in monitor_data |
|---|
| 304 | + * debugfs file |
|---|
| 305 | + * @prev_wrap_cnt: the wrap count that was used during the last read in |
|---|
| 306 | + * monitor_data debugfs file |
|---|
| 307 | + * @state: the state of monitor_data debugfs file as described |
|---|
| 308 | + * in &iwl_fw_mon_dbgfs_state enum |
|---|
| 309 | + * @mutex: locked while reading from monitor_data debugfs file |
|---|
| 411 | 310 | */ |
|---|
| 412 | | -struct iwl_dram_data { |
|---|
| 413 | | - dma_addr_t physical; |
|---|
| 414 | | - void *block; |
|---|
| 415 | | - int size; |
|---|
| 311 | +#ifdef CONFIG_IWLWIFI_DEBUGFS |
|---|
| 312 | +struct cont_rec { |
|---|
| 313 | + u32 prev_wr_ptr; |
|---|
| 314 | + u32 prev_wrap_cnt; |
|---|
| 315 | + u8 state; |
|---|
| 316 | + /* Used to sync monitor_data debugfs file with driver unload flow */ |
|---|
| 317 | + struct mutex mutex; |
|---|
| 416 | 318 | }; |
|---|
| 417 | | - |
|---|
| 418 | | -/** |
|---|
| 419 | | - * struct iwl_self_init_dram - dram data used by self init process |
|---|
| 420 | | - * @fw: lmac and umac dram data |
|---|
| 421 | | - * @fw_cnt: total number of items in array |
|---|
| 422 | | - * @paging: paging dram data |
|---|
| 423 | | - * @paging_cnt: total number of items in array |
|---|
| 424 | | - */ |
|---|
| 425 | | -struct iwl_self_init_dram { |
|---|
| 426 | | - struct iwl_dram_data *fw; |
|---|
| 427 | | - int fw_cnt; |
|---|
| 428 | | - struct iwl_dram_data *paging; |
|---|
| 429 | | - int paging_cnt; |
|---|
| 430 | | -}; |
|---|
| 319 | +#endif |
|---|
| 431 | 320 | |
|---|
| 432 | 321 | /** |
|---|
| 433 | 322 | * struct iwl_trans_pcie - PCIe transport specific data |
|---|
| .. | .. |
|---|
| 451 | 340 | * @iml_dma_addr: image loader image DMA address |
|---|
| 452 | 341 | * @trans: pointer to the generic transport area |
|---|
| 453 | 342 | * @scd_base_addr: scheduler sram base address in SRAM |
|---|
| 454 | | - * @scd_bc_tbls: pointer to the byte count table of the scheduler |
|---|
| 455 | 343 | * @kw: keep warm address |
|---|
| 344 | + * @pnvm_dram: DRAM area that contains the PNVM data |
|---|
| 456 | 345 | * @pci_dev: basic pci-network driver stuff |
|---|
| 457 | 346 | * @hw_base: pci hardware address support |
|---|
| 458 | 347 | * @ucode_write_complete: indicates that the ucode has been copied. |
|---|
| 459 | 348 | * @ucode_write_waitq: wait queue for uCode load |
|---|
| 460 | 349 | * @cmd_queue - command queue number |
|---|
| 350 | + * @def_rx_queue - default rx queue number |
|---|
| 461 | 351 | * @rx_buf_size: Rx buffer size |
|---|
| 462 | | - * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) |
|---|
| 463 | 352 | * @scd_set_active: should the transport configure the SCD for HCMD queue |
|---|
| 464 | 353 | * @sw_csum_tx: if true, then the transport will compute the csum of the TXed |
|---|
| 465 | 354 | * frame. |
|---|
| 466 | 355 | * @rx_page_order: page order for receive buffer size |
|---|
| 356 | + * @rx_buf_bytes: RX buffer (RB) size in bytes |
|---|
| 467 | 357 | * @reg_lock: protect hw register access |
|---|
| 468 | 358 | * @mutex: to protect stop_device / start_fw / start_hw |
|---|
| 469 | 359 | * @cmd_in_flight: true when we have a host command in flight |
|---|
| 470 | | - * @fw_mon_phys: physical address of the buffer for the firmware monitor |
|---|
| 471 | | - * @fw_mon_page: points to the first page of the buffer for the firmware monitor |
|---|
| 472 | | - * @fw_mon_size: size of the buffer for the firmware monitor |
|---|
| 360 | +#ifdef CONFIG_IWLWIFI_DEBUGFS |
|---|
| 361 | + * @fw_mon_data: fw continuous recording data |
|---|
| 362 | +#endif |
|---|
| 473 | 363 | * @msix_entries: array of MSI-X entries |
|---|
| 474 | 364 | * @msix_enabled: true if managed to enable MSI-X |
|---|
| 475 | 365 | * @shared_vec_mask: the type of causes the shared vector handles |
|---|
| .. | .. |
|---|
| 481 | 371 | * @fh_mask: current unmasked fh causes |
|---|
| 482 | 372 | * @hw_mask: current unmasked hw causes |
|---|
| 483 | 373 | * @in_rescan: true if we have triggered a device rescan |
|---|
| 484 | | - * @scheduled_for_removal: true if we have scheduled a device removal |
|---|
| 374 | + * @base_rb_stts: base virtual address of receive buffer status for all queues |
|---|
| 375 | + * @base_rb_stts_dma: base physical address of receive buffer status |
|---|
| 376 | + * @supported_dma_mask: DMA mask to validate the actual address against, |
|---|
| 377 | + * will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device |
|---|
| 378 | + * @alloc_page_lock: spinlock for the page allocator |
|---|
| 379 | + * @alloc_page: allocated page to still use parts of |
|---|
| 380 | + * @alloc_page_used: how much of the allocated page was already used (bytes) |
|---|
| 485 | 381 | */ |
|---|
| 486 | 382 | struct iwl_trans_pcie { |
|---|
| 487 | 383 | struct iwl_rxq *rxq; |
|---|
| 488 | | - struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; |
|---|
| 489 | | - struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE]; |
|---|
| 384 | + struct iwl_rx_mem_buffer *rx_pool; |
|---|
| 385 | + struct iwl_rx_mem_buffer **global_table; |
|---|
| 490 | 386 | struct iwl_rb_allocator rba; |
|---|
| 491 | 387 | union { |
|---|
| 492 | 388 | struct iwl_context_info *ctxt_info; |
|---|
| .. | .. |
|---|
| 499 | 395 | dma_addr_t prph_info_dma_addr; |
|---|
| 500 | 396 | dma_addr_t prph_scratch_dma_addr; |
|---|
| 501 | 397 | dma_addr_t iml_dma_addr; |
|---|
| 502 | | - struct iwl_self_init_dram init_dram; |
|---|
| 503 | 398 | struct iwl_trans *trans; |
|---|
| 504 | 399 | |
|---|
| 505 | 400 | struct net_device napi_dev; |
|---|
| 506 | | - |
|---|
| 507 | | - struct __percpu iwl_tso_hdr_page *tso_hdr_page; |
|---|
| 508 | 401 | |
|---|
| 509 | 402 | /* INT ICT Table */ |
|---|
| 510 | 403 | __le32 *ict_tbl; |
|---|
| .. | .. |
|---|
| 512 | 405 | int ict_index; |
|---|
| 513 | 406 | bool use_ict; |
|---|
| 514 | 407 | bool is_down, opmode_down; |
|---|
| 515 | | - bool debug_rfkill; |
|---|
| 408 | + s8 debug_rfkill; |
|---|
| 516 | 409 | struct isr_statistics isr_stats; |
|---|
| 517 | 410 | |
|---|
| 518 | 411 | spinlock_t irq_lock; |
|---|
| 519 | 412 | struct mutex mutex; |
|---|
| 520 | 413 | u32 inta_mask; |
|---|
| 521 | 414 | u32 scd_base_addr; |
|---|
| 522 | | - struct iwl_dma_ptr scd_bc_tbls; |
|---|
| 523 | 415 | struct iwl_dma_ptr kw; |
|---|
| 524 | 416 | |
|---|
| 417 | + struct iwl_dram_data pnvm_dram; |
|---|
| 418 | + |
|---|
| 525 | 419 | struct iwl_txq *txq_memory; |
|---|
| 526 | | - struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; |
|---|
| 527 | | - unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; |
|---|
| 528 | | - unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; |
|---|
| 529 | 420 | |
|---|
| 530 | 421 | /* PCI bus related data */ |
|---|
| 531 | 422 | struct pci_dev *pci_dev; |
|---|
| 532 | 423 | void __iomem *hw_base; |
|---|
| 533 | 424 | |
|---|
| 534 | 425 | bool ucode_write_complete; |
|---|
| 426 | + bool sx_complete; |
|---|
| 535 | 427 | wait_queue_head_t ucode_write_waitq; |
|---|
| 536 | 428 | wait_queue_head_t wait_command_queue; |
|---|
| 537 | | - wait_queue_head_t d0i3_waitq; |
|---|
| 429 | + wait_queue_head_t sx_waitq; |
|---|
| 538 | 430 | |
|---|
| 539 | | - u8 page_offs, dev_cmd_offs; |
|---|
| 540 | | - |
|---|
| 541 | | - u8 cmd_queue; |
|---|
| 542 | | - u8 cmd_fifo; |
|---|
| 543 | | - unsigned int cmd_q_wdg_timeout; |
|---|
| 431 | + u8 def_rx_queue; |
|---|
| 544 | 432 | u8 n_no_reclaim_cmds; |
|---|
| 545 | 433 | u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; |
|---|
| 546 | | - u8 max_tbs; |
|---|
| 547 | | - u16 tfd_size; |
|---|
| 434 | + u16 num_rx_bufs; |
|---|
| 548 | 435 | |
|---|
| 549 | 436 | enum iwl_amsdu_size rx_buf_size; |
|---|
| 550 | | - bool bc_table_dword; |
|---|
| 551 | 437 | bool scd_set_active; |
|---|
| 552 | 438 | bool sw_csum_tx; |
|---|
| 553 | 439 | bool pcie_dbg_dumped_once; |
|---|
| 554 | 440 | u32 rx_page_order; |
|---|
| 441 | + u32 rx_buf_bytes; |
|---|
| 442 | + u32 supported_dma_mask; |
|---|
| 443 | + |
|---|
| 444 | + /* allocator lock for the two values below */ |
|---|
| 445 | + spinlock_t alloc_page_lock; |
|---|
| 446 | + struct page *alloc_page; |
|---|
| 447 | + u32 alloc_page_used; |
|---|
| 555 | 448 | |
|---|
| 556 | 449 | /*protect hw register */ |
|---|
| 557 | 450 | spinlock_t reg_lock; |
|---|
| 558 | 451 | bool cmd_hold_nic_awake; |
|---|
| 559 | | - bool ref_cmd_in_flight; |
|---|
| 560 | 452 | |
|---|
| 561 | | - dma_addr_t fw_mon_phys; |
|---|
| 562 | | - struct page *fw_mon_page; |
|---|
| 563 | | - u32 fw_mon_size; |
|---|
| 453 | +#ifdef CONFIG_IWLWIFI_DEBUGFS |
|---|
| 454 | + struct cont_rec fw_mon_data; |
|---|
| 455 | +#endif |
|---|
| 564 | 456 | |
|---|
| 565 | 457 | struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; |
|---|
| 566 | 458 | bool msix_enabled; |
|---|
| .. | .. |
|---|
| 574 | 466 | cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES]; |
|---|
| 575 | 467 | u16 tx_cmd_queue_size; |
|---|
| 576 | 468 | bool in_rescan; |
|---|
| 577 | | - bool scheduled_for_removal; |
|---|
| 469 | + |
|---|
| 470 | + void *base_rb_stts; |
|---|
| 471 | + dma_addr_t base_rb_stts_dma; |
|---|
| 578 | 472 | }; |
|---|
| 579 | 473 | |
|---|
| 580 | 474 | static inline struct iwl_trans_pcie * |
|---|
| .. | .. |
|---|
| 608 | 502 | * Convention: trans API functions: iwl_trans_pcie_XXX |
|---|
| 609 | 503 | * Other functions: iwl_pcie_XXX |
|---|
| 610 | 504 | */ |
|---|
| 611 | | -struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, |
|---|
| 612 | | - const struct pci_device_id *ent, |
|---|
| 613 | | - const struct iwl_cfg *cfg); |
|---|
| 505 | +struct iwl_trans |
|---|
| 506 | +*iwl_trans_pcie_alloc(struct pci_dev *pdev, |
|---|
| 507 | + const struct pci_device_id *ent, |
|---|
| 508 | + const struct iwl_cfg_trans_params *cfg_trans); |
|---|
| 614 | 509 | void iwl_trans_pcie_free(struct iwl_trans *trans); |
|---|
| 615 | 510 | |
|---|
| 616 | 511 | /***************************************************** |
|---|
| .. | .. |
|---|
| 643 | 538 | * TX / HCMD |
|---|
| 644 | 539 | ******************************************************/ |
|---|
| 645 | 540 | int iwl_pcie_tx_init(struct iwl_trans *trans); |
|---|
| 646 | | -int iwl_pcie_gen2_tx_init(struct iwl_trans *trans); |
|---|
| 647 | 541 | void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); |
|---|
| 648 | 542 | int iwl_pcie_tx_stop(struct iwl_trans *trans); |
|---|
| 649 | 543 | void iwl_pcie_tx_free(struct iwl_trans *trans); |
|---|
| .. | .. |
|---|
| 654 | 548 | bool configure_scd); |
|---|
| 655 | 549 | void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, |
|---|
| 656 | 550 | bool shared_mode); |
|---|
| 657 | | -void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, |
|---|
| 658 | | - struct iwl_txq *txq); |
|---|
| 659 | 551 | int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, |
|---|
| 660 | | - struct iwl_device_cmd *dev_cmd, int txq_id); |
|---|
| 552 | + struct iwl_device_tx_cmd *dev_cmd, int txq_id); |
|---|
| 661 | 553 | void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); |
|---|
| 662 | 554 | int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); |
|---|
| 663 | 555 | void iwl_pcie_hcmd_complete(struct iwl_trans *trans, |
|---|
| 664 | 556 | struct iwl_rx_cmd_buffer *rxb); |
|---|
| 665 | 557 | void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, |
|---|
| 666 | 558 | struct sk_buff_head *skbs); |
|---|
| 559 | +void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr); |
|---|
| 667 | 560 | void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); |
|---|
| 668 | | - |
|---|
| 669 | | -static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd, |
|---|
| 670 | | - u8 idx) |
|---|
| 671 | | -{ |
|---|
| 672 | | - if (trans->cfg->use_tfh) { |
|---|
| 673 | | - struct iwl_tfh_tfd *tfd = _tfd; |
|---|
| 674 | | - struct iwl_tfh_tb *tb = &tfd->tbs[idx]; |
|---|
| 675 | | - |
|---|
| 676 | | - return le16_to_cpu(tb->tb_len); |
|---|
| 677 | | - } else { |
|---|
| 678 | | - struct iwl_tfd *tfd = _tfd; |
|---|
| 679 | | - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; |
|---|
| 680 | | - |
|---|
| 681 | | - return le16_to_cpu(tb->hi_n_len) >> 4; |
|---|
| 682 | | - } |
|---|
| 683 | | -} |
|---|
| 684 | 561 | |
|---|
| 685 | 562 | /***************************************************** |
|---|
| 686 | 563 | * Error handling |
|---|
| .. | .. |
|---|
| 731 | 608 | return i; |
|---|
| 732 | 609 | } |
|---|
| 733 | 610 | |
|---|
| 734 | | -static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, |
|---|
| 735 | | - const struct fw_desc *sec, |
|---|
| 736 | | - struct iwl_dram_data *dram) |
|---|
| 737 | | -{ |
|---|
| 738 | | - dram->block = dma_alloc_coherent(trans->dev, sec->len, |
|---|
| 739 | | - &dram->physical, |
|---|
| 740 | | - GFP_KERNEL); |
|---|
| 741 | | - if (!dram->block) |
|---|
| 742 | | - return -ENOMEM; |
|---|
| 743 | | - |
|---|
| 744 | | - dram->size = sec->len; |
|---|
| 745 | | - memcpy(dram->block, sec->data, sec->len); |
|---|
| 746 | | - |
|---|
| 747 | | - return 0; |
|---|
| 748 | | -} |
|---|
| 749 | | - |
|---|
| 750 | 611 | static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) |
|---|
| 751 | 612 | { |
|---|
| 752 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 753 | | - struct iwl_self_init_dram *dram = &trans_pcie->init_dram; |
|---|
| 613 | + struct iwl_self_init_dram *dram = &trans->init_dram; |
|---|
| 754 | 614 | int i; |
|---|
| 755 | 615 | |
|---|
| 756 | 616 | if (!dram->fw) { |
|---|
| .. | .. |
|---|
| 866 | 726 | } |
|---|
| 867 | 727 | } |
|---|
| 868 | 728 | |
|---|
| 869 | | -static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index) |
|---|
| 870 | | -{ |
|---|
| 871 | | - return index & (q->n_window - 1); |
|---|
| 872 | | -} |
|---|
| 873 | | - |
|---|
| 874 | | -static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans, |
|---|
| 875 | | - struct iwl_txq *txq, int idx) |
|---|
| 876 | | -{ |
|---|
| 877 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 878 | | - |
|---|
| 879 | | - if (trans->cfg->use_tfh) |
|---|
| 880 | | - idx = iwl_pcie_get_cmd_index(txq, idx); |
|---|
| 881 | | - |
|---|
| 882 | | - return txq->tfds + trans_pcie->tfd_size * idx; |
|---|
| 883 | | -} |
|---|
| 884 | | - |
|---|
| 885 | 729 | static inline const char *queue_name(struct device *dev, |
|---|
| 886 | 730 | struct iwl_trans_pcie *trans_p, int i) |
|---|
| 887 | 731 | { |
|---|
| .. | .. |
|---|
| 920 | 764 | MSIX_HW_INT_CAUSES_REG_RF_KILL); |
|---|
| 921 | 765 | } |
|---|
| 922 | 766 | |
|---|
| 923 | | - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_9000) { |
|---|
| 767 | + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) { |
|---|
| 924 | 768 | /* |
|---|
| 925 | 769 | * On 9000-series devices this bit isn't enabled by default, so |
|---|
| 926 | 770 | * when we power down the device we need set the bit to allow it |
|---|
| .. | .. |
|---|
| 933 | 777 | |
|---|
| 934 | 778 | void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans); |
|---|
| 935 | 779 | |
|---|
| 936 | | -static inline void iwl_wake_queue(struct iwl_trans *trans, |
|---|
| 937 | | - struct iwl_txq *txq) |
|---|
| 938 | | -{ |
|---|
| 939 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 940 | | - |
|---|
| 941 | | - if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) { |
|---|
| 942 | | - IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); |
|---|
| 943 | | - iwl_op_mode_queue_not_full(trans->op_mode, txq->id); |
|---|
| 944 | | - } |
|---|
| 945 | | -} |
|---|
| 946 | | - |
|---|
| 947 | | -static inline void iwl_stop_queue(struct iwl_trans *trans, |
|---|
| 948 | | - struct iwl_txq *txq) |
|---|
| 949 | | -{ |
|---|
| 950 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 951 | | - |
|---|
| 952 | | - if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) { |
|---|
| 953 | | - iwl_op_mode_queue_full(trans->op_mode, txq->id); |
|---|
| 954 | | - IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); |
|---|
| 955 | | - } else |
|---|
| 956 | | - IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", |
|---|
| 957 | | - txq->id); |
|---|
| 958 | | -} |
|---|
| 959 | | - |
|---|
| 960 | | -static inline bool iwl_queue_used(const struct iwl_txq *q, int i) |
|---|
| 961 | | -{ |
|---|
| 962 | | - int index = iwl_pcie_get_cmd_index(q, i); |
|---|
| 963 | | - int r = iwl_pcie_get_cmd_index(q, q->read_ptr); |
|---|
| 964 | | - int w = iwl_pcie_get_cmd_index(q, q->write_ptr); |
|---|
| 965 | | - |
|---|
| 966 | | - return w >= r ? |
|---|
| 967 | | - (index >= r && index < w) : |
|---|
| 968 | | - !(index < r && index >= w); |
|---|
| 969 | | -} |
|---|
| 970 | | - |
|---|
| 971 | 780 | static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) |
|---|
| 972 | 781 | { |
|---|
| 973 | 782 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 974 | 783 | |
|---|
| 975 | 784 | lockdep_assert_held(&trans_pcie->mutex); |
|---|
| 976 | 785 | |
|---|
| 977 | | - if (trans_pcie->debug_rfkill) |
|---|
| 786 | + if (trans_pcie->debug_rfkill == 1) |
|---|
| 978 | 787 | return true; |
|---|
| 979 | 788 | |
|---|
| 980 | 789 | return !(iwl_read32(trans, CSR_GP_CNTRL) & |
|---|
| .. | .. |
|---|
| 1008 | 817 | __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); |
|---|
| 1009 | 818 | } |
|---|
| 1010 | 819 | |
|---|
| 820 | +static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) |
|---|
| 821 | +{ |
|---|
| 822 | + return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans)); |
|---|
| 823 | +} |
|---|
| 824 | + |
|---|
| 1011 | 825 | void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); |
|---|
| 826 | +void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); |
|---|
| 827 | +void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans); |
|---|
| 1012 | 828 | |
|---|
| 1013 | 829 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
|---|
| 1014 | | -int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); |
|---|
| 830 | +void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); |
|---|
| 1015 | 831 | #else |
|---|
| 1016 | | -static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) |
|---|
| 1017 | | -{ |
|---|
| 1018 | | - return 0; |
|---|
| 1019 | | -} |
|---|
| 832 | +static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { } |
|---|
| 1020 | 833 | #endif |
|---|
| 1021 | | - |
|---|
| 1022 | | -int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans); |
|---|
| 1023 | | -int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); |
|---|
| 1024 | | - |
|---|
| 1025 | | -void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable); |
|---|
| 1026 | 834 | |
|---|
| 1027 | 835 | void iwl_pcie_rx_allocator_work(struct work_struct *data); |
|---|
| 1028 | 836 | |
|---|
| 1029 | 837 | /* common functions that are used by gen2 transport */ |
|---|
| 838 | +int iwl_pcie_gen2_apm_init(struct iwl_trans *trans); |
|---|
| 1030 | 839 | void iwl_pcie_apm_config(struct iwl_trans *trans); |
|---|
| 1031 | 840 | int iwl_pcie_prepare_card_hw(struct iwl_trans *trans); |
|---|
| 1032 | 841 | void iwl_pcie_synchronize_irqs(struct iwl_trans *trans); |
|---|
| .. | .. |
|---|
| 1034 | 843 | void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, |
|---|
| 1035 | 844 | bool was_in_rfkill); |
|---|
| 1036 | 845 | void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); |
|---|
| 1037 | | -int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q); |
|---|
| 1038 | 846 | void iwl_pcie_apm_stop_master(struct iwl_trans *trans); |
|---|
| 1039 | 847 | void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); |
|---|
| 1040 | | -int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, |
|---|
| 1041 | | - int slots_num, bool cmd_queue); |
|---|
| 1042 | | -int iwl_pcie_txq_alloc(struct iwl_trans *trans, |
|---|
| 1043 | | - struct iwl_txq *txq, int slots_num, bool cmd_queue); |
|---|
| 1044 | 848 | int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, |
|---|
| 1045 | 849 | struct iwl_dma_ptr *ptr, size_t size); |
|---|
| 1046 | 850 | void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr); |
|---|
| 1047 | 851 | void iwl_pcie_apply_destination(struct iwl_trans *trans); |
|---|
| 1048 | | -void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, |
|---|
| 1049 | | - struct sk_buff *skb); |
|---|
| 1050 | | -#ifdef CONFIG_INET |
|---|
| 1051 | | -struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len); |
|---|
| 1052 | | -#endif |
|---|
| 1053 | 852 | |
|---|
| 1054 | 853 | /* common functions that are used by gen3 transport */ |
|---|
| 1055 | 854 | void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); |
|---|
| .. | .. |
|---|
| 1058 | 857 | int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, |
|---|
| 1059 | 858 | const struct fw_img *fw, bool run_in_rfkill); |
|---|
| 1060 | 859 | void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); |
|---|
| 1061 | | -int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, |
|---|
| 1062 | | - struct iwl_tx_queue_cfg_cmd *cmd, |
|---|
| 1063 | | - int cmd_id, int size, |
|---|
| 1064 | | - unsigned int timeout); |
|---|
| 1065 | | -void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); |
|---|
| 1066 | | -int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, |
|---|
| 1067 | | - struct iwl_device_cmd *dev_cmd, int txq_id); |
|---|
| 1068 | 860 | int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, |
|---|
| 1069 | 861 | struct iwl_host_cmd *cmd); |
|---|
| 1070 | | -void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, |
|---|
| 1071 | | - bool low_power); |
|---|
| 1072 | | -void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power); |
|---|
| 1073 | | -void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id); |
|---|
| 1074 | | -void iwl_pcie_gen2_tx_free(struct iwl_trans *trans); |
|---|
| 1075 | | -void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans); |
|---|
| 862 | +void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); |
|---|
| 863 | +void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); |
|---|
| 864 | +void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, |
|---|
| 865 | + bool test, bool reset); |
|---|
| 1076 | 866 | #endif /* __iwl_trans_int_pcie_h__ */ |
|---|