forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-11 04dd17822334871b23ea2862f7798fb0e0007777
kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
....@@ -19,38 +19,229 @@
1919 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
2020 * DEALINGS IN THE SOFTWARE.
2121 */
22
-
23
-#include <engine/falcon.h>
24
-#include <core/msgqueue.h>
2522 #include "priv.h"
2623
27
-static void
28
-gm20b_pmu_recv(struct nvkm_pmu *pmu)
24
+#include <core/memory.h>
25
+#include <subdev/acr.h>
26
+
27
+#include <nvfw/flcn.h>
28
+#include <nvfw/pmu.h>
29
+
30
+static int
31
+gm20b_pmu_acr_bootstrap_falcon_cb(void *priv, struct nvfw_falcon_msg *hdr)
2932 {
30
- if (!pmu->queue) {
31
- nvkm_warn(&pmu->subdev,
32
- "recv function called while no firmware set!\n");
33
- return;
33
+ struct nv_pmu_acr_bootstrap_falcon_msg *msg =
34
+ container_of(hdr, typeof(*msg), msg.hdr);
35
+ return msg->falcon_id;
36
+}
37
+
38
+int
39
+gm20b_pmu_acr_bootstrap_falcon(struct nvkm_falcon *falcon,
40
+ enum nvkm_acr_lsf_id id)
41
+{
42
+ struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon);
43
+ struct nv_pmu_acr_bootstrap_falcon_cmd cmd = {
44
+ .cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
45
+ .cmd.hdr.size = sizeof(cmd),
46
+ .cmd.cmd_type = NV_PMU_ACR_CMD_BOOTSTRAP_FALCON,
47
+ .flags = NV_PMU_ACR_BOOTSTRAP_FALCON_FLAGS_RESET_YES,
48
+ .falcon_id = id,
49
+ };
50
+ int ret;
51
+
52
+ ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
53
+ gm20b_pmu_acr_bootstrap_falcon_cb,
54
+ &pmu->subdev, msecs_to_jiffies(1000));
55
+ if (ret >= 0) {
56
+ if (ret != cmd.falcon_id)
57
+ ret = -EIO;
58
+ else
59
+ ret = 0;
3460 }
3561
36
- nvkm_msgqueue_recv(pmu->queue);
62
+ return ret;
63
+}
64
+
65
+int
66
+gm20b_pmu_acr_boot(struct nvkm_falcon *falcon)
67
+{
68
+ struct nv_pmu_args args = { .secure_mode = true };
69
+ const u32 addr_args = falcon->data.limit - sizeof(struct nv_pmu_args);
70
+ nvkm_falcon_load_dmem(falcon, &args, addr_args, sizeof(args), 0);
71
+ nvkm_falcon_start(falcon);
72
+ return 0;
73
+}
74
+
75
+void
76
+gm20b_pmu_acr_bld_patch(struct nvkm_acr *acr, u32 bld, s64 adjust)
77
+{
78
+ struct loader_config hdr;
79
+ u64 addr;
80
+
81
+ nvkm_robj(acr->wpr, bld, &hdr, sizeof(hdr));
82
+ addr = ((u64)hdr.code_dma_base1 << 40 | hdr.code_dma_base << 8);
83
+ hdr.code_dma_base = lower_32_bits((addr + adjust) >> 8);
84
+ hdr.code_dma_base1 = upper_32_bits((addr + adjust) >> 8);
85
+ addr = ((u64)hdr.data_dma_base1 << 40 | hdr.data_dma_base << 8);
86
+ hdr.data_dma_base = lower_32_bits((addr + adjust) >> 8);
87
+ hdr.data_dma_base1 = upper_32_bits((addr + adjust) >> 8);
88
+ addr = ((u64)hdr.overlay_dma_base1 << 40 | hdr.overlay_dma_base << 8);
89
+ hdr.overlay_dma_base = lower_32_bits((addr + adjust) << 8);
90
+ hdr.overlay_dma_base1 = upper_32_bits((addr + adjust) << 8);
91
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
92
+
93
+ loader_config_dump(&acr->subdev, &hdr);
94
+}
95
+
96
+void
97
+gm20b_pmu_acr_bld_write(struct nvkm_acr *acr, u32 bld,
98
+ struct nvkm_acr_lsfw *lsfw)
99
+{
100
+ const u64 base = lsfw->offset.img + lsfw->app_start_offset;
101
+ const u64 code = (base + lsfw->app_resident_code_offset) >> 8;
102
+ const u64 data = (base + lsfw->app_resident_data_offset) >> 8;
103
+ const struct loader_config hdr = {
104
+ .dma_idx = FALCON_DMAIDX_UCODE,
105
+ .code_dma_base = lower_32_bits(code),
106
+ .code_size_total = lsfw->app_size,
107
+ .code_size_to_load = lsfw->app_resident_code_size,
108
+ .code_entry_point = lsfw->app_imem_entry,
109
+ .data_dma_base = lower_32_bits(data),
110
+ .data_size = lsfw->app_resident_data_size,
111
+ .overlay_dma_base = lower_32_bits(code),
112
+ .argc = 1,
113
+ .argv = lsfw->falcon->data.limit - sizeof(struct nv_pmu_args),
114
+ .code_dma_base1 = upper_32_bits(code),
115
+ .data_dma_base1 = upper_32_bits(data),
116
+ .overlay_dma_base1 = upper_32_bits(code),
117
+ };
118
+
119
+ nvkm_wobj(acr->wpr, bld, &hdr, sizeof(hdr));
120
+}
121
+
122
+static const struct nvkm_acr_lsf_func
123
+gm20b_pmu_acr = {
124
+ .flags = NVKM_ACR_LSF_DMACTL_REQ_CTX,
125
+ .bld_size = sizeof(struct loader_config),
126
+ .bld_write = gm20b_pmu_acr_bld_write,
127
+ .bld_patch = gm20b_pmu_acr_bld_patch,
128
+ .boot = gm20b_pmu_acr_boot,
129
+ .bootstrap_falcons = BIT_ULL(NVKM_ACR_LSF_PMU) |
130
+ BIT_ULL(NVKM_ACR_LSF_FECS) |
131
+ BIT_ULL(NVKM_ACR_LSF_GPCCS),
132
+ .bootstrap_falcon = gm20b_pmu_acr_bootstrap_falcon,
133
+};
134
+
135
+static int
136
+gm20b_pmu_acr_init_wpr_callback(void *priv, struct nvfw_falcon_msg *hdr)
137
+{
138
+ struct nv_pmu_acr_init_wpr_region_msg *msg =
139
+ container_of(hdr, typeof(*msg), msg.hdr);
140
+ struct nvkm_pmu *pmu = priv;
141
+ struct nvkm_subdev *subdev = &pmu->subdev;
142
+
143
+ if (msg->error_code) {
144
+ nvkm_error(subdev, "ACR WPR init failure: %d\n",
145
+ msg->error_code);
146
+ return -EINVAL;
147
+ }
148
+
149
+ nvkm_debug(subdev, "ACR WPR init complete\n");
150
+ complete_all(&pmu->wpr_ready);
151
+ return 0;
152
+}
153
+
154
+static int
155
+gm20b_pmu_acr_init_wpr(struct nvkm_pmu *pmu)
156
+{
157
+ struct nv_pmu_acr_init_wpr_region_cmd cmd = {
158
+ .cmd.hdr.unit_id = NV_PMU_UNIT_ACR,
159
+ .cmd.hdr.size = sizeof(cmd),
160
+ .cmd.cmd_type = NV_PMU_ACR_CMD_INIT_WPR_REGION,
161
+ .region_id = 1,
162
+ .wpr_offset = 0,
163
+ };
164
+
165
+ return nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr,
166
+ gm20b_pmu_acr_init_wpr_callback, pmu, 0);
167
+}
168
+
169
+int
170
+gm20b_pmu_initmsg(struct nvkm_pmu *pmu)
171
+{
172
+ struct nv_pmu_init_msg msg;
173
+ int ret;
174
+
175
+ ret = nvkm_falcon_msgq_recv_initmsg(pmu->msgq, &msg, sizeof(msg));
176
+ if (ret)
177
+ return ret;
178
+
179
+ if (msg.hdr.unit_id != NV_PMU_UNIT_INIT ||
180
+ msg.msg_type != NV_PMU_INIT_MSG_INIT)
181
+ return -EINVAL;
182
+
183
+ nvkm_falcon_cmdq_init(pmu->hpq, msg.queue_info[0].index,
184
+ msg.queue_info[0].offset,
185
+ msg.queue_info[0].size);
186
+ nvkm_falcon_cmdq_init(pmu->lpq, msg.queue_info[1].index,
187
+ msg.queue_info[1].offset,
188
+ msg.queue_info[1].size);
189
+ nvkm_falcon_msgq_init(pmu->msgq, msg.queue_info[4].index,
190
+ msg.queue_info[4].offset,
191
+ msg.queue_info[4].size);
192
+ return gm20b_pmu_acr_init_wpr(pmu);
193
+}
194
+
195
+void
196
+gm20b_pmu_recv(struct nvkm_pmu *pmu)
197
+{
198
+ if (!pmu->initmsg_received) {
199
+ int ret = pmu->func->initmsg(pmu);
200
+ if (ret) {
201
+ nvkm_error(&pmu->subdev,
202
+ "error parsing init message: %d\n", ret);
203
+ return;
204
+ }
205
+
206
+ pmu->initmsg_received = true;
207
+ }
208
+
209
+ nvkm_falcon_msgq_recv(pmu->msgq);
37210 }
38211
39212 static const struct nvkm_pmu_func
40213 gm20b_pmu = {
214
+ .flcn = &gm200_pmu_flcn,
41215 .enabled = gf100_pmu_enabled,
42216 .intr = gt215_pmu_intr,
43217 .recv = gm20b_pmu_recv,
218
+ .initmsg = gm20b_pmu_initmsg,
219
+ .reset = gf100_pmu_reset,
220
+};
221
+
222
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
223
+MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
224
+MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
225
+MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
226
+#endif
227
+
228
+int
229
+gm20b_pmu_load(struct nvkm_pmu *pmu, int ver, const struct nvkm_pmu_fwif *fwif)
230
+{
231
+ return nvkm_acr_lsfw_load_sig_image_desc(&pmu->subdev, &pmu->falcon,
232
+ NVKM_ACR_LSF_PMU, "pmu/",
233
+ ver, fwif->acr);
234
+}
235
+
236
+static const struct nvkm_pmu_fwif
237
+gm20b_pmu_fwif[] = {
238
+ { 0, gm20b_pmu_load, &gm20b_pmu, &gm20b_pmu_acr },
239
+ { -1, gm200_pmu_nofw, &gm20b_pmu },
240
+ {}
44241 };
45242
46243 int
47244 gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
48245 {
49
- int ret;
50
-
51
- ret = nvkm_pmu_new_(&gm20b_pmu, device, index, ppmu);
52
- if (ret)
53
- return ret;
54
-
55
- return 0;
246
+ return nvkm_pmu_new_(gm20b_pmu_fwif, device, index, ppmu);
56247 }