forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
....@@ -6,7 +6,7 @@
66 * GPL LICENSE SUMMARY
77 *
88 * Copyright(c) 2017 Intel Deutschland GmbH
9
- * Copyright(c) 2018 Intel Corporation
9
+ * Copyright(c) 2018 - 2021 Intel Corporation
1010 *
1111 * This program is free software; you can redistribute it and/or modify
1212 * it under the terms of version 2 of the GNU General Public License as
....@@ -20,7 +20,7 @@
2020 * BSD LICENSE
2121 *
2222 * Copyright(c) 2017 Intel Deutschland GmbH
23
- * Copyright(c) 2018 Intel Corporation
23
+ * Copyright(c) 2018 - 2020 Intel Corporation
2424 * All rights reserved.
2525 *
2626 * Redistribution and use in source and binary forms, with or without
....@@ -57,10 +57,60 @@
5757 #include "internal.h"
5858 #include "iwl-prph.h"
5959
60
+static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
61
+ size_t size,
62
+ dma_addr_t *phys,
63
+ int depth)
64
+{
65
+ void *result;
66
+
67
+ if (WARN(depth > 2,
68
+ "failed to allocate DMA memory not crossing 2^32 boundary"))
69
+ return NULL;
70
+
71
+ result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL);
72
+
73
+ if (!result)
74
+ return NULL;
75
+
76
+ if (unlikely(iwl_txq_crosses_4g_boundary(*phys, size))) {
77
+ void *old = result;
78
+ dma_addr_t oldphys = *phys;
79
+
80
+ result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size,
81
+ phys,
82
+ depth + 1);
83
+ dma_free_coherent(trans->dev, size, old, oldphys);
84
+ }
85
+
86
+ return result;
87
+}
88
+
89
+static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
90
+ size_t size,
91
+ dma_addr_t *phys)
92
+{
93
+ return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
94
+}
95
+
96
+int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
97
+ const void *data, u32 len,
98
+ struct iwl_dram_data *dram)
99
+{
100
+ dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, len,
101
+ &dram->physical);
102
+ if (!dram->block)
103
+ return -ENOMEM;
104
+
105
+ dram->size = len;
106
+ memcpy(dram->block, data, len);
107
+
108
+ return 0;
109
+}
110
+
60111 void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
61112 {
62
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
63
- struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
113
+ struct iwl_self_init_dram *dram = &trans->init_dram;
64114 int i;
65115
66116 if (!dram->paging) {
....@@ -83,8 +133,7 @@
83133 const struct fw_img *fw,
84134 struct iwl_context_info_dram *ctxt_dram)
85135 {
86
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
87
- struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
136
+ struct iwl_self_init_dram *dram = &trans->init_dram;
88137 int i, ret, lmac_cnt, umac_cnt, paging_cnt;
89138
90139 if (WARN(dram->paging,
....@@ -107,7 +156,8 @@
107156
108157 /* initialize lmac sections */
109158 for (i = 0; i < lmac_cnt; i++) {
110
- ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[i],
159
+ ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[i].data,
160
+ fw->sec[i].len,
111161 &dram->fw[dram->fw_cnt]);
112162 if (ret)
113163 return ret;
....@@ -120,7 +170,8 @@
120170 for (i = 0; i < umac_cnt; i++) {
121171 /* access FW with +1 to make up for lmac separator */
122172 ret = iwl_pcie_ctxt_info_alloc_dma(trans,
123
- &fw->sec[dram->fw_cnt + 1],
173
+ fw->sec[dram->fw_cnt + 1].data,
174
+ fw->sec[dram->fw_cnt + 1].len,
124175 &dram->fw[dram->fw_cnt]);
125176 if (ret)
126177 return ret;
....@@ -143,7 +194,8 @@
143194 /* access FW with +2 to make up for lmac & umac separators */
144195 int fw_idx = dram->fw_cnt + i + 2;
145196
146
- ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[fw_idx],
197
+ ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[fw_idx].data,
198
+ fw->sec[fw_idx].len,
147199 &dram->paging[i]);
148200 if (ret)
149201 return ret;
....@@ -162,14 +214,17 @@
162214 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
163215 struct iwl_context_info *ctxt_info;
164216 struct iwl_context_info_rbd_cfg *rx_cfg;
165
- u32 control_flags = 0;
217
+ u32 control_flags = 0, rb_size;
218
+ dma_addr_t phys;
166219 int ret;
167220
168
- ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
169
- &trans_pcie->ctxt_info_dma_addr,
170
- GFP_KERNEL);
221
+ ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans,
222
+ sizeof(*ctxt_info),
223
+ &phys);
171224 if (!ctxt_info)
172225 return -ENOMEM;
226
+
227
+ trans_pcie->ctxt_info_dma_addr = phys;
173228
174229 ctxt_info->version.version = 0;
175230 ctxt_info->version.mac_id =
....@@ -177,11 +232,30 @@
177232 /* size is in DWs */
178233 ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
179234
180
- BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
181
- control_flags = IWL_CTXT_INFO_RB_SIZE_4K |
182
- IWL_CTXT_INFO_TFD_FORMAT_LONG |
183
- RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
184
- IWL_CTXT_INFO_RB_CB_SIZE_POS;
235
+ switch (trans_pcie->rx_buf_size) {
236
+ case IWL_AMSDU_2K:
237
+ rb_size = IWL_CTXT_INFO_RB_SIZE_2K;
238
+ break;
239
+ case IWL_AMSDU_4K:
240
+ rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
241
+ break;
242
+ case IWL_AMSDU_8K:
243
+ rb_size = IWL_CTXT_INFO_RB_SIZE_8K;
244
+ break;
245
+ case IWL_AMSDU_12K:
246
+ rb_size = IWL_CTXT_INFO_RB_SIZE_12K;
247
+ break;
248
+ default:
249
+ WARN_ON(1);
250
+ rb_size = IWL_CTXT_INFO_RB_SIZE_4K;
251
+ }
252
+
253
+ WARN_ON(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds) > 12);
254
+ control_flags = IWL_CTXT_INFO_TFD_FORMAT_LONG;
255
+ control_flags |=
256
+ u32_encode_bits(RX_QUEUE_CB_SIZE(trans->cfg->num_rbds),
257
+ IWL_CTXT_INFO_RB_CB_SIZE);
258
+ control_flags |= u32_encode_bits(rb_size, IWL_CTXT_INFO_RB_SIZE);
185259 ctxt_info->control.control_flags = cpu_to_le32(control_flags);
186260
187261 /* initialize RX default queue */
....@@ -192,9 +266,9 @@
192266
193267 /* initialize TX command queue */
194268 ctxt_info->hcmd_cfg.cmd_queue_addr =
195
- cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
269
+ cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
196270 ctxt_info->hcmd_cfg.cmd_queue_size =
197
- TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
271
+ TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
198272
199273 /* allocate ucode sections in dram and set addresses */
200274 ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
....@@ -209,12 +283,11 @@
209283 iwl_enable_fw_load_int_ctx_info(trans);
210284
211285 /* Configure debug, if exists */
212
- if (trans->dbg_dest_tlv)
286
+ if (iwl_pcie_dbg_on(trans))
213287 iwl_pcie_apply_destination(trans);
214288
215289 /* kick FW self load */
216290 iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
217
- iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
218291
219292 /* Context info will be released upon alive or failure to get one */
220293