| .. | .. |
|---|
| 6 | 6 | * GPL LICENSE SUMMARY |
|---|
| 7 | 7 | * |
|---|
| 8 | 8 | * Copyright(c) 2017 Intel Deutschland GmbH |
|---|
| 9 | | - * Copyright(c) 2018 Intel Corporation |
|---|
| 9 | + * Copyright(c) 2018 - 2021 Intel Corporation |
|---|
| 10 | 10 | * |
|---|
| 11 | 11 | * This program is free software; you can redistribute it and/or modify |
|---|
| 12 | 12 | * it under the terms of version 2 of the GNU General Public License as |
|---|
| .. | .. |
|---|
| 20 | 20 | * BSD LICENSE |
|---|
| 21 | 21 | * |
|---|
| 22 | 22 | * Copyright(c) 2017 Intel Deutschland GmbH |
|---|
| 23 | | - * Copyright(c) 2018 Intel Corporation |
|---|
| 23 | + * Copyright(c) 2018 - 2020 Intel Corporation |
|---|
| 24 | 24 | * All rights reserved. |
|---|
| 25 | 25 | * |
|---|
| 26 | 26 | * Redistribution and use in source and binary forms, with or without |
|---|
| .. | .. |
|---|
| 57 | 57 | #include "internal.h" |
|---|
| 58 | 58 | #include "iwl-prph.h" |
|---|
| 59 | 59 | |
|---|
| 60 | +static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, |
|---|
| 61 | + size_t size, |
|---|
| 62 | + dma_addr_t *phys, |
|---|
| 63 | + int depth) |
|---|
| 64 | +{ |
|---|
| 65 | + void *result; |
|---|
| 66 | + |
|---|
| 67 | + if (WARN(depth > 2, |
|---|
| 68 | + "failed to allocate DMA memory not crossing 2^32 boundary")) |
|---|
| 69 | + return NULL; |
|---|
| 70 | + |
|---|
| 71 | + result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL); |
|---|
| 72 | + |
|---|
| 73 | + if (!result) |
|---|
| 74 | + return NULL; |
|---|
| 75 | + |
|---|
| 76 | + if (unlikely(iwl_txq_crosses_4g_boundary(*phys, size))) { |
|---|
| 77 | + void *old = result; |
|---|
| 78 | + dma_addr_t oldphys = *phys; |
|---|
| 79 | + |
|---|
| 80 | + result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, |
|---|
| 81 | + phys, |
|---|
| 82 | + depth + 1); |
|---|
| 83 | + dma_free_coherent(trans->dev, size, old, oldphys); |
|---|
| 84 | + } |
|---|
| 85 | + |
|---|
| 86 | + return result; |
|---|
| 87 | +} |
|---|
| 88 | + |
|---|
| 89 | +static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, |
|---|
| 90 | + size_t size, |
|---|
| 91 | + dma_addr_t *phys) |
|---|
| 92 | +{ |
|---|
| 93 | + return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0); |
|---|
| 94 | +} |
|---|
| 95 | + |
|---|
| 96 | +int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, |
|---|
| 97 | + const void *data, u32 len, |
|---|
| 98 | + struct iwl_dram_data *dram) |
|---|
| 99 | +{ |
|---|
| 100 | + dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, len, |
|---|
| 101 | + &dram->physical); |
|---|
| 102 | + if (!dram->block) |
|---|
| 103 | + return -ENOMEM; |
|---|
| 104 | + |
|---|
| 105 | + dram->size = len; |
|---|
| 106 | + memcpy(dram->block, data, len); |
|---|
| 107 | + |
|---|
| 108 | + return 0; |
|---|
| 109 | +} |
|---|
| 110 | + |
|---|
| 60 | 111 | void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans) |
|---|
| 61 | 112 | { |
|---|
| 62 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 63 | | - struct iwl_self_init_dram *dram = &trans_pcie->init_dram; |
|---|
| 113 | + struct iwl_self_init_dram *dram = &trans->init_dram; |
|---|
| 64 | 114 | int i; |
|---|
| 65 | 115 | |
|---|
| 66 | 116 | if (!dram->paging) { |
|---|
| .. | .. |
|---|
| 83 | 133 | const struct fw_img *fw, |
|---|
| 84 | 134 | struct iwl_context_info_dram *ctxt_dram) |
|---|
| 85 | 135 | { |
|---|
| 86 | | - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 87 | | - struct iwl_self_init_dram *dram = &trans_pcie->init_dram; |
|---|
| 136 | + struct iwl_self_init_dram *dram = &trans->init_dram; |
|---|
| 88 | 137 | int i, ret, lmac_cnt, umac_cnt, paging_cnt; |
|---|
| 89 | 138 | |
|---|
| 90 | 139 | if (WARN(dram->paging, |
|---|
| .. | .. |
|---|
| 107 | 156 | |
|---|
| 108 | 157 | /* initialize lmac sections */ |
|---|
| 109 | 158 | for (i = 0; i < lmac_cnt; i++) { |
|---|
| 110 | | - ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[i], |
|---|
| 159 | + ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[i].data, |
|---|
| 160 | + fw->sec[i].len, |
|---|
| 111 | 161 | &dram->fw[dram->fw_cnt]); |
|---|
| 112 | 162 | if (ret) |
|---|
| 113 | 163 | return ret; |
|---|
| .. | .. |
|---|
| 120 | 170 | for (i = 0; i < umac_cnt; i++) { |
|---|
| 121 | 171 | /* access FW with +1 to make up for lmac separator */ |
|---|
| 122 | 172 | ret = iwl_pcie_ctxt_info_alloc_dma(trans, |
|---|
| 123 | | - &fw->sec[dram->fw_cnt + 1], |
|---|
| 173 | + fw->sec[dram->fw_cnt + 1].data, |
|---|
| 174 | + fw->sec[dram->fw_cnt + 1].len, |
|---|
| 124 | 175 | &dram->fw[dram->fw_cnt]); |
|---|
| 125 | 176 | if (ret) |
|---|
| 126 | 177 | return ret; |
|---|
| .. | .. |
|---|
| 143 | 194 | /* access FW with +2 to make up for lmac & umac separators */ |
|---|
| 144 | 195 | int fw_idx = dram->fw_cnt + i + 2; |
|---|
| 145 | 196 | |
|---|
| 146 | | - ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[fw_idx], |
|---|
| 197 | + ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[fw_idx].data, |
|---|
| 198 | + fw->sec[fw_idx].len, |
|---|
| 147 | 199 | &dram->paging[i]); |
|---|
| 148 | 200 | if (ret) |
|---|
| 149 | 201 | return ret; |
|---|
| .. | .. |
|---|
| 162 | 214 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
|---|
| 163 | 215 | struct iwl_context_info *ctxt_info; |
|---|
| 164 | 216 | struct iwl_context_info_rbd_cfg *rx_cfg; |
|---|
| 165 | | - u32 control_flags = 0; |
|---|
| 217 | + u32 control_flags = 0, rb_size; |
|---|
| 218 | + dma_addr_t phys; |
|---|
| 166 | 219 | int ret; |
|---|
| 167 | 220 | |
|---|
| 168 | | - ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info), |
|---|
| 169 | | - &trans_pcie->ctxt_info_dma_addr, |
|---|
| 170 | | - GFP_KERNEL); |
|---|
| 221 | + ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, |
|---|
| 222 | + sizeof(*ctxt_info), |
|---|
| 223 | + &phys); |
|---|
| 171 | 224 | if (!ctxt_info) |
|---|
| 172 | 225 | return -ENOMEM; |
|---|
| 226 | + |
|---|
| 227 | + trans_pcie->ctxt_info_dma_addr = phys; |
|---|
| 173 | 228 | |
|---|
| 174 | 229 | ctxt_info->version.version = 0; |
|---|
| 175 | 230 | ctxt_info->version.mac_id = |
|---|
| .. | .. |
|---|
| 177 | 232 | /* size is in DWs */ |
|---|
| 178 | 233 | ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4); |
|---|
| 179 | 234 | |
|---|
| 180 | | - BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF); |
|---|
| 181 | | - control_flags = IWL_CTXT_INFO_RB_SIZE_4K | |
|---|
| 182 | | - IWL_CTXT_INFO_TFD_FORMAT_LONG | |
|---|
| 183 | | - RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) << |
|---|
| 184 | | - IWL_CTXT_INFO_RB_CB_SIZE_POS; |
|---|
| 235 | + switch (trans_pcie->rx_buf_size) { |
|---|
| 236 | + case IWL_AMSDU_2K: |
|---|
| 237 | + rb_size = IWL_CTXT_INFO_RB_SIZE_2K; |
|---|
| 238 | + break; |
|---|
| 239 | + case IWL_AMSDU_4K: |
|---|
| 240 | + rb_size = IWL_CTXT_INFO_RB_SIZE_4K; |
|---|
| 241 | + break; |
|---|
| 242 | + case IWL_AMSDU_8K: |
|---|
| 243 | + rb_size = IWL_CTXT_INFO_RB_SIZE_8K; |
|---|
| 244 | + break; |
|---|
| 245 | + case IWL_AMSDU_12K: |
|---|
| 246 | + rb_size = IWL_CTXT_INFO_RB_SIZE_12K; |
|---|
| 247 | + break; |
|---|
| 248 | + default: |
|---|
| 249 | + WARN_ON(1); |
|---|
| 250 | + rb_size = IWL_CTXT_INFO_RB_SIZE_4K; |
|---|
| 251 | + } |
|---|
| 252 | + |
|---|
| 253 | + WARN_ON(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds) > 12); |
|---|
| 254 | + control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG; |
|---|
| 255 | + control_flags |= |
|---|
| 256 | + u32_encode_bits(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds), |
|---|
| 257 | + IWL_CTXT_INFO_RB_CB_SIZE); |
|---|
| 258 | + control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE); |
|---|
| 185 | 259 | ctxt_info->control.control_flags = cpu_to_le32(control_flags); |
|---|
| 186 | 260 | |
|---|
| 187 | 261 | /* initialize RX default queue */ |
|---|
| .. | .. |
|---|
| 192 | 266 | |
|---|
| 193 | 267 | /* initialize TX command queue */ |
|---|
| 194 | 268 | ctxt_info->hcmd_cfg.cmd_queue_addr = |
|---|
| 195 | | - cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); |
|---|
| 269 | + cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr); |
|---|
| 196 | 270 | ctxt_info->hcmd_cfg.cmd_queue_size = |
|---|
| 197 | | - TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS); |
|---|
| 271 | + TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE); |
|---|
| 198 | 272 | |
|---|
| 199 | 273 | /* allocate ucode sections in dram and set addresses */ |
|---|
| 200 | 274 | ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram); |
|---|
| .. | .. |
|---|
| 209 | 283 | iwl_enable_fw_load_int_ctx_info(trans); |
|---|
| 210 | 284 | |
|---|
| 211 | 285 | /* Configure debug, if exists */ |
|---|
| 212 | | - if (trans->dbg_dest_tlv) |
|---|
| 286 | + if (iwl_pcie_dbg_on(trans)) |
|---|
| 213 | 287 | iwl_pcie_apply_destination(trans); |
|---|
| 214 | 288 | |
|---|
| 215 | 289 | /* kick FW self load */ |
|---|
| 216 | 290 | iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr); |
|---|
| 217 | | - iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1); |
|---|
| 218 | 291 | |
|---|
| 219 | 292 | /* Context info will be released upon alive or failure to get one */ |
|---|
| 220 | 293 | |
|---|