.. | .. |
---|
23 | 23 | */ |
---|
24 | 24 | #include "priv.h" |
---|
25 | 25 | |
---|
26 | | -#include <core/msgqueue.h> |
---|
| 26 | +#include <core/firmware.h> |
---|
27 | 27 | #include <subdev/timer.h> |
---|
| 28 | + |
---|
| 29 | +bool |
---|
| 30 | +nvkm_pmu_fan_controlled(struct nvkm_device *device) |
---|
| 31 | +{ |
---|
| 32 | + struct nvkm_pmu *pmu = device->pmu; |
---|
| 33 | + |
---|
| 34 | + /* Internal PMU FW does not currently control fans in any way, |
---|
| 35 | + * allow SW control of fans instead. |
---|
| 36 | + */ |
---|
| 37 | + if (pmu && pmu->func->code.size) |
---|
| 38 | + return false; |
---|
| 39 | + |
---|
| 40 | + /* Default (board-loaded, or VBIOS PMU/PREOS) PMU FW on Fermi |
---|
| 41 | + * and newer automatically control the fan speed, which would |
---|
| 42 | + * interfere with SW control. |
---|
| 43 | + */ |
---|
| 44 | + return (device->chipset >= 0xc0); |
---|
| 45 | +} |
---|
28 | 46 | |
---|
29 | 47 | void |
---|
30 | 48 | nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable) |
---|
.. | .. |
---|
67 | 85 | pmu->func->fini(pmu); |
---|
68 | 86 | |
---|
69 | 87 | flush_work(&pmu->recv.work); |
---|
| 88 | + |
---|
| 89 | + reinit_completion(&pmu->wpr_ready); |
---|
| 90 | + |
---|
| 91 | + nvkm_falcon_cmdq_fini(pmu->lpq); |
---|
| 92 | + nvkm_falcon_cmdq_fini(pmu->hpq); |
---|
| 93 | + pmu->initmsg_received = false; |
---|
70 | 94 | return 0; |
---|
71 | 95 | } |
---|
72 | 96 | |
---|
73 | | -static int |
---|
| 97 | +static void |
---|
74 | 98 | nvkm_pmu_reset(struct nvkm_pmu *pmu) |
---|
75 | 99 | { |
---|
76 | 100 | struct nvkm_device *device = pmu->subdev.device; |
---|
77 | 101 | |
---|
78 | 102 | if (!pmu->func->enabled(pmu)) |
---|
79 | | - return 0; |
---|
80 | | - |
---|
81 | | - /* Inhibit interrupts, and wait for idle. */ |
---|
82 | | - nvkm_wr32(device, 0x10a014, 0x0000ffff); |
---|
83 | | - nvkm_msec(device, 2000, |
---|
84 | | - if (!nvkm_rd32(device, 0x10a04c)) |
---|
85 | | - break; |
---|
86 | | - ); |
---|
| 103 | + return; |
---|
87 | 104 | |
---|
88 | 105 | /* Reset. */ |
---|
89 | 106 | if (pmu->func->reset) |
---|
.. | .. |
---|
94 | 111 | if (!(nvkm_rd32(device, 0x10a10c) & 0x00000006)) |
---|
95 | 112 | break; |
---|
96 | 113 | ); |
---|
97 | | - |
---|
98 | | - return 0; |
---|
99 | 114 | } |
---|
100 | 115 | |
---|
101 | 116 | static int |
---|
102 | 117 | nvkm_pmu_preinit(struct nvkm_subdev *subdev) |
---|
103 | 118 | { |
---|
104 | 119 | struct nvkm_pmu *pmu = nvkm_pmu(subdev); |
---|
105 | | - return nvkm_pmu_reset(pmu); |
---|
| 120 | + nvkm_pmu_reset(pmu); |
---|
| 121 | + return 0; |
---|
106 | 122 | } |
---|
107 | 123 | |
---|
108 | 124 | static int |
---|
109 | 125 | nvkm_pmu_init(struct nvkm_subdev *subdev) |
---|
110 | 126 | { |
---|
111 | 127 | struct nvkm_pmu *pmu = nvkm_pmu(subdev); |
---|
112 | | - int ret = nvkm_pmu_reset(pmu); |
---|
113 | | - if (ret == 0 && pmu->func->init) |
---|
114 | | - ret = pmu->func->init(pmu); |
---|
115 | | - return ret; |
---|
116 | | -} |
---|
| 128 | + struct nvkm_device *device = pmu->subdev.device; |
---|
117 | 129 | |
---|
118 | | -static int |
---|
119 | | -nvkm_pmu_oneinit(struct nvkm_subdev *subdev) |
---|
120 | | -{ |
---|
121 | | - struct nvkm_pmu *pmu = nvkm_pmu(subdev); |
---|
122 | | - return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon); |
---|
| 130 | + if (!pmu->func->init) |
---|
| 131 | + return 0; |
---|
| 132 | + |
---|
| 133 | + if (pmu->func->enabled(pmu)) { |
---|
| 134 | + /* Inhibit interrupts, and wait for idle. */ |
---|
| 135 | + nvkm_wr32(device, 0x10a014, 0x0000ffff); |
---|
| 136 | + nvkm_msec(device, 2000, |
---|
| 137 | + if (!nvkm_rd32(device, 0x10a04c)) |
---|
| 138 | + break; |
---|
| 139 | + ); |
---|
| 140 | + |
---|
| 141 | + nvkm_pmu_reset(pmu); |
---|
| 142 | + } |
---|
| 143 | + |
---|
| 144 | + return pmu->func->init(pmu); |
---|
123 | 145 | } |
---|
124 | 146 | |
---|
125 | 147 | static void * |
---|
126 | 148 | nvkm_pmu_dtor(struct nvkm_subdev *subdev) |
---|
127 | 149 | { |
---|
128 | 150 | struct nvkm_pmu *pmu = nvkm_pmu(subdev); |
---|
129 | | - nvkm_msgqueue_del(&pmu->queue); |
---|
130 | | - nvkm_falcon_del(&pmu->falcon); |
---|
| 151 | + nvkm_falcon_msgq_del(&pmu->msgq); |
---|
| 152 | + nvkm_falcon_cmdq_del(&pmu->lpq); |
---|
| 153 | + nvkm_falcon_cmdq_del(&pmu->hpq); |
---|
| 154 | + nvkm_falcon_qmgr_del(&pmu->qmgr); |
---|
| 155 | + nvkm_falcon_dtor(&pmu->falcon); |
---|
131 | 156 | return nvkm_pmu(subdev); |
---|
132 | 157 | } |
---|
133 | 158 | |
---|
.. | .. |
---|
135 | 160 | nvkm_pmu = { |
---|
136 | 161 | .dtor = nvkm_pmu_dtor, |
---|
137 | 162 | .preinit = nvkm_pmu_preinit, |
---|
138 | | - .oneinit = nvkm_pmu_oneinit, |
---|
139 | 163 | .init = nvkm_pmu_init, |
---|
140 | 164 | .fini = nvkm_pmu_fini, |
---|
141 | 165 | .intr = nvkm_pmu_intr, |
---|
142 | 166 | }; |
---|
143 | 167 | |
---|
144 | 168 | int |
---|
145 | | -nvkm_pmu_ctor(const struct nvkm_pmu_func *func, struct nvkm_device *device, |
---|
| 169 | +nvkm_pmu_ctor(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device, |
---|
146 | 170 | int index, struct nvkm_pmu *pmu) |
---|
147 | 171 | { |
---|
| 172 | + int ret; |
---|
| 173 | + |
---|
148 | 174 | nvkm_subdev_ctor(&nvkm_pmu, device, index, &pmu->subdev); |
---|
149 | | - pmu->func = func; |
---|
| 175 | + |
---|
150 | 176 | INIT_WORK(&pmu->recv.work, nvkm_pmu_recv); |
---|
151 | 177 | init_waitqueue_head(&pmu->recv.wait); |
---|
| 178 | + |
---|
| 179 | + fwif = nvkm_firmware_load(&pmu->subdev, fwif, "Pmu", pmu); |
---|
| 180 | + if (IS_ERR(fwif)) |
---|
| 181 | + return PTR_ERR(fwif); |
---|
| 182 | + |
---|
| 183 | + pmu->func = fwif->func; |
---|
| 184 | + |
---|
| 185 | + ret = nvkm_falcon_ctor(pmu->func->flcn, &pmu->subdev, |
---|
| 186 | + nvkm_subdev_name[pmu->subdev.index], 0x10a000, |
---|
| 187 | + &pmu->falcon); |
---|
| 188 | + if (ret) |
---|
| 189 | + return ret; |
---|
| 190 | + |
---|
| 191 | + if ((ret = nvkm_falcon_qmgr_new(&pmu->falcon, &pmu->qmgr)) || |
---|
| 192 | + (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "hpq", &pmu->hpq)) || |
---|
| 193 | + (ret = nvkm_falcon_cmdq_new(pmu->qmgr, "lpq", &pmu->lpq)) || |
---|
| 194 | + (ret = nvkm_falcon_msgq_new(pmu->qmgr, "msgq", &pmu->msgq))) |
---|
| 195 | + return ret; |
---|
| 196 | + |
---|
| 197 | + init_completion(&pmu->wpr_ready); |
---|
152 | 198 | return 0; |
---|
153 | 199 | } |
---|
154 | 200 | |
---|
155 | 201 | int |
---|
156 | | -nvkm_pmu_new_(const struct nvkm_pmu_func *func, struct nvkm_device *device, |
---|
| 202 | +nvkm_pmu_new_(const struct nvkm_pmu_fwif *fwif, struct nvkm_device *device, |
---|
157 | 203 | int index, struct nvkm_pmu **ppmu) |
---|
158 | 204 | { |
---|
159 | 205 | struct nvkm_pmu *pmu; |
---|
160 | 206 | if (!(pmu = *ppmu = kzalloc(sizeof(*pmu), GFP_KERNEL))) |
---|
161 | 207 | return -ENOMEM; |
---|
162 | | - return nvkm_pmu_ctor(func, device, index, *ppmu); |
---|
| 208 | + return nvkm_pmu_ctor(fwif, device, index, *ppmu); |
---|
163 | 209 | } |
---|