.. | .. |
---|
178 | 178 | }; |
---|
179 | 179 | |
---|
180 | 180 | void |
---|
181 | | -gf100_vmm_flush_(struct nvkm_vmm *vmm, int depth) |
---|
| 181 | +gf100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr) |
---|
| 182 | +{ |
---|
| 183 | + struct nvkm_device *device = vmm->mmu->subdev.device; |
---|
| 184 | + nvkm_wr32(device, 0x100cb8, addr); |
---|
| 185 | +} |
---|
| 186 | + |
---|
| 187 | +void |
---|
| 188 | +gf100_vmm_invalidate(struct nvkm_vmm *vmm, u32 type) |
---|
182 | 189 | { |
---|
183 | 190 | struct nvkm_subdev *subdev = &vmm->mmu->subdev; |
---|
184 | 191 | struct nvkm_device *device = subdev->device; |
---|
185 | | - u32 type = depth << 24; |
---|
186 | | - |
---|
187 | | - type = 0x00000001; /* PAGE_ALL */ |
---|
188 | | - if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR])) |
---|
189 | | - type |= 0x00000004; /* HUB_ONLY */ |
---|
| 192 | + struct nvkm_mmu_pt *pd = vmm->pd->pt[0]; |
---|
| 193 | + u64 addr = 0; |
---|
190 | 194 | |
---|
191 | 195 | mutex_lock(&subdev->mutex); |
---|
192 | 196 | /* Looks like maybe a "free flush slots" counter, the |
---|
.. | .. |
---|
197 | 201 | break; |
---|
198 | 202 | ); |
---|
199 | 203 | |
---|
200 | | - nvkm_wr32(device, 0x100cb8, vmm->pd->pt[0]->addr >> 8); |
---|
| 204 | + if (!(type & 0x00000002) /* ALL_PDB. */) { |
---|
| 205 | + switch (nvkm_memory_target(pd->memory)) { |
---|
| 206 | + case NVKM_MEM_TARGET_VRAM: addr |= 0x00000000; break; |
---|
| 207 | + case NVKM_MEM_TARGET_HOST: addr |= 0x00000002; break; |
---|
| 208 | + case NVKM_MEM_TARGET_NCOH: addr |= 0x00000003; break; |
---|
| 209 | + default: |
---|
| 210 | + WARN_ON(1); |
---|
| 211 | + break; |
---|
| 212 | + } |
---|
| 213 | + addr |= (vmm->pd->pt[0]->addr >> 12) << 4; |
---|
| 214 | + |
---|
| 215 | + vmm->func->invalidate_pdb(vmm, addr); |
---|
| 216 | + } |
---|
| 217 | + |
---|
201 | 218 | nvkm_wr32(device, 0x100cbc, 0x80000000 | type); |
---|
202 | 219 | |
---|
203 | 220 | /* Wait for flush to be queued? */ |
---|
.. | .. |
---|
211 | 228 | void |
---|
212 | 229 | gf100_vmm_flush(struct nvkm_vmm *vmm, int depth) |
---|
213 | 230 | { |
---|
214 | | - gf100_vmm_flush_(vmm, 0); |
---|
| 231 | + u32 type = 0x00000001; /* PAGE_ALL */ |
---|
| 232 | + if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR])) |
---|
| 233 | + type |= 0x00000004; /* HUB_ONLY */ |
---|
| 234 | + gf100_vmm_invalidate(vmm, type); |
---|
215 | 235 | } |
---|
216 | 236 | |
---|
217 | 237 | int |
---|
.. | .. |
---|
227 | 247 | } *args = argv; |
---|
228 | 248 | struct nvkm_device *device = vmm->mmu->subdev.device; |
---|
229 | 249 | struct nvkm_memory *memory = map->memory; |
---|
230 | | - u8 kind, priv, ro, vol; |
---|
| 250 | + u8 kind, kind_inv, priv, ro, vol; |
---|
231 | 251 | int kindn, aper, ret = -ENOSYS; |
---|
232 | 252 | const u8 *kindm; |
---|
233 | 253 | |
---|
.. | .. |
---|
254 | 274 | if (WARN_ON(aper < 0)) |
---|
255 | 275 | return aper; |
---|
256 | 276 | |
---|
257 | | - kindm = vmm->mmu->func->kind(vmm->mmu, &kindn); |
---|
258 | | - if (kind >= kindn || kindm[kind] == 0xff) { |
---|
| 277 | + kindm = vmm->mmu->func->kind(vmm->mmu, &kindn, &kind_inv); |
---|
| 278 | + if (kind >= kindn || kindm[kind] == kind_inv) { |
---|
259 | 279 | VMM_DEBUG(vmm, "kind %02x", kind); |
---|
260 | 280 | return -EINVAL; |
---|
261 | 281 | } |
---|
.. | .. |
---|
354 | 374 | .aper = gf100_vmm_aper, |
---|
355 | 375 | .valid = gf100_vmm_valid, |
---|
356 | 376 | .flush = gf100_vmm_flush, |
---|
| 377 | + .invalidate_pdb = gf100_vmm_invalidate_pdb, |
---|
357 | 378 | .page = { |
---|
358 | 379 | { 17, &gf100_vmm_desc_17_17[0], NVKM_VMM_PAGE_xVxC }, |
---|
359 | 380 | { 12, &gf100_vmm_desc_17_12[0], NVKM_VMM_PAGE_xVHx }, |
---|
.. | .. |
---|
368 | 389 | .aper = gf100_vmm_aper, |
---|
369 | 390 | .valid = gf100_vmm_valid, |
---|
370 | 391 | .flush = gf100_vmm_flush, |
---|
| 392 | + .invalidate_pdb = gf100_vmm_invalidate_pdb, |
---|
371 | 393 | .page = { |
---|
372 | 394 | { 16, &gf100_vmm_desc_16_16[0], NVKM_VMM_PAGE_xVxC }, |
---|
373 | 395 | { 12, &gf100_vmm_desc_16_12[0], NVKM_VMM_PAGE_xVHx }, |
---|
.. | .. |
---|
378 | 400 | int |
---|
379 | 401 | gf100_vmm_new_(const struct nvkm_vmm_func *func_16, |
---|
380 | 402 | const struct nvkm_vmm_func *func_17, |
---|
381 | | - struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc, |
---|
382 | | - struct lock_class_key *key, const char *name, |
---|
383 | | - struct nvkm_vmm **pvmm) |
---|
| 403 | + struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, |
---|
| 404 | + void *argv, u32 argc, struct lock_class_key *key, |
---|
| 405 | + const char *name, struct nvkm_vmm **pvmm) |
---|
384 | 406 | { |
---|
385 | 407 | switch (mmu->subdev.device->fb->page) { |
---|
386 | | - case 16: return nv04_vmm_new_(func_16, mmu, 0, addr, size, |
---|
| 408 | + case 16: return nv04_vmm_new_(func_16, mmu, 0, managed, addr, size, |
---|
387 | 409 | argv, argc, key, name, pvmm); |
---|
388 | | - case 17: return nv04_vmm_new_(func_17, mmu, 0, addr, size, |
---|
| 410 | + case 17: return nv04_vmm_new_(func_17, mmu, 0, managed, addr, size, |
---|
389 | 411 | argv, argc, key, name, pvmm); |
---|
390 | 412 | default: |
---|
391 | 413 | WARN_ON(1); |
---|
.. | .. |
---|
394 | 416 | } |
---|
395 | 417 | |
---|
396 | 418 | int |
---|
397 | | -gf100_vmm_new(struct nvkm_mmu *mmu, u64 addr, u64 size, void *argv, u32 argc, |
---|
398 | | - struct lock_class_key *key, const char *name, |
---|
399 | | - struct nvkm_vmm **pvmm) |
---|
| 419 | +gf100_vmm_new(struct nvkm_mmu *mmu, bool managed, u64 addr, u64 size, |
---|
| 420 | + void *argv, u32 argc, struct lock_class_key *key, |
---|
| 421 | + const char *name, struct nvkm_vmm **pvmm) |
---|
400 | 422 | { |
---|
401 | | - return gf100_vmm_new_(&gf100_vmm_16, &gf100_vmm_17, mmu, addr, |
---|
| 423 | + return gf100_vmm_new_(&gf100_vmm_16, &gf100_vmm_17, mmu, managed, addr, |
---|
402 | 424 | size, argv, argc, key, name, pvmm); |
---|
403 | 425 | } |
---|