forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-11 04dd17822334871b23ea2862f7798fb0e0007777
kernel/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
....@@ -23,8 +23,26 @@
2323 */
2424 #include "priv.h"
2525
26
-#include <core/msgqueue.h>
26
+#include <core/firmware.h>
2727 #include <subdev/timer.h>
28
+
29
+bool
30
+nvkm_pmu_fan_controlled(struct nvkm_device *device)
31
+{
32
+ struct nvkm_pmu *pmu = device->pmu;
33
+
34
+ /* Internal PMU FW does not currently control fans in any way,
35
+ * allow SW control of fans instead.
36
+ */
37
+ if (pmu && pmu->func->code.size)
38
+ return false;
39
+
40
+ /* Default (board-loaded, or VBIOS PMU/PREOS) PMU FW on Fermi
41
+ * and newer automatically control the fan speed, which would
42
+ * interfere with SW control.
43
+ */
44
+ return (device->chipset >= 0xc0);
45
+}
2846
2947 void
3048 nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable)
....@@ -67,23 +85,22 @@
6785 pmu->func->fini(pmu);
6886
6987 flush_work(&pmu->recv.work);
88
+
89
+ reinit_completion(&pmu->wpr_ready);
90
+
91
+ nvkm_falcon_cmdq_fini(pmu->lpq);
92
+ nvkm_falcon_cmdq_fini(pmu->hpq);
93
+ pmu->initmsg_received = false;
7094 return 0;
7195 }
7296
73
-static int
97
+static void
7498 nvkm_pmu_reset(struct nvkm_pmu *pmu)
7599 {
76100 struct nvkm_device *device = pmu->subdev.device;
77101
78102 if (!pmu->func->enabled(pmu))
79
- return 0;
80
-
81
- /* Inhibit interrupts, and wait for idle. */
82
- nvkm_wr32(device, 0x10a014, 0x0000ffff);
83
- nvkm_msec(device, 2000,
84
- if (!nvkm_rd32(device, 0x10a04c))
85
- break;
86
- );
103
+ return;
87104
88105 /* Reset. */
89106 if (pmu->func->reset)
....@@ -94,40 +111,48 @@
94111 if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006))
95112 break;
96113 );
97
-
98
- return 0;
99114 }
100115
101116 static int
102117 nvkm_pmu_preinit(struct nvkm_subdev *subdev)
103118 {
104119 struct nvkm_pmu *pmu = nvkm_pmu(subdev);
105
- return nvkm_pmu_reset(pmu);
120
+ nvkm_pmu_reset(pmu);
121
+ return 0;
106122 }
107123
108124 static int
109125 nvkm_pmu_init(struct nvkm_subdev *subdev)
110126 {
111127 struct nvkm_pmu *pmu = nvkm_pmu(subdev);
112
- int ret = nvkm_pmu_reset(pmu);
113
- if (ret == 0 && pmu->func->init)
114
- ret = pmu->func->init(pmu);
115
- return ret;
116
-}
128
+ struct nvkm_device *device = pmu->subdev.device;
117129
118
-static int
119
-nvkm_pmu_oneinit(struct nvkm_subdev *subdev)
120
-{
121
- struct nvkm_pmu *pmu = nvkm_pmu(subdev);
122
- return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon);
130
+ if (!pmu->func->init)
131
+ return 0;
132
+
133
+ if (pmu->func->enabled(pmu)) {
134
+ /* Inhibit interrupts, and wait for idle. */
135
+ nvkm_wr32(device, 0x10a014, 0x0000ffff);
136
+ nvkm_msec(device, 2000,
137
+ if (!nvkm_rd32(device, 0x10a04c))
138
+ break;
139
+ );
140
+
141
+ nvkm_pmu_reset(pmu);
142
+ }
143
+
144
+ return pmu->func->init(pmu);
123145 }
124146
125147 static void *
126148 nvkm_pmu_dtor(struct nvkm_subdev *subdev)
127149 {
128150 struct nvkm_pmu *pmu = nvkm_pmu(subdev);
129
- nvkm_msgqueue_del(&pmu->queue);
130
- nvkm_falcon_del(&pmu->falcon);
151
+ nvkm_falcon_msgq_del(&pmu->msgq);
152
+ nvkm_falcon_cmdq_del(&pmu->lpq);
153
+ nvkm_falcon_cmdq_del(&pmu->hpq);
154
+ nvkm_falcon_qmgr_del(&pmu->qmgr);
155
+ nvkm_falcon_dtor(&pmu->falcon);
131156 return nvkm_pmu(subdev);
132157 }
133158
....@@ -135,29 +160,50 @@
135160 nvkm_pmu = {
136161 .dtor = nvkm_pmu_dtor,
137162 .preinit = nvkm_pmu_preinit,
138
- .oneinit = nvkm_pmu_oneinit,
139163 .init = nvkm_pmu_init,
140164 .fini = nvkm_pmu_fini,
141165 .intr = nvkm_pmu_intr,
142166 };
143167
144168 int
145
-nvkm_pmu_ctor(const struct nvkm_pmu_func *func, struct nvkm_device *device,
169
+nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
146170 int index, struct nvkm_pmu *pmu)
147171 {
172
+ int ret;
173
+
148174 nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev);
149
- pmu->func = func;
175
+
150176 INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
151177 init_waitqueue_head(&pmu->recv.wait);
178
+
179
+ fwif = nvkm_firmware_load(&pmu->subdev, fwif, "Pmu", pmu);
180
+ if (IS_ERR(fwif))
181
+ return PTR_ERR(fwif);
182
+
183
+ pmu->func = fwif->func;
184
+
185
+ ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev,
186
+ nvkm_subdev_name[pmu->subdev.index], 0x10a000,
187
+ &pmu->falcon);
188
+ if (ret)
189
+ return ret;
190
+
191
+ if ((ret = nvkm_falcon_qmgr_new(&pmu->falcon, &pmu->qmgr)) ||
192
+ (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "hpq", &pmu->hpq)) ||
193
+ (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "lpq", &pmu->lpq)) ||
194
+ (ret = nvkm_falcon_msgq_new(pmu->qmgr, "msgq", &pmu->msgq)))
195
+ return ret;
196
+
197
+ init_completion(&pmu->wpr_ready);
152198 return 0;
153199 }
154200
155201 int
156
-nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device,
202
+nvkm_pmu_new_(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device,
157203 int index, struct nvkm_pmu **ppmu)
158204 {
159205 struct nvkm_pmu *pmu;
160206 if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL)))
161207 return -ENOMEM;
162
- return nvkm_pmu_ctor(func, device, index, *ppmu);
208
+ return nvkm_pmu_ctor(fwif, device, index, *ppmu);
163209 }