.. | .. |
---|
31 | 31 | #include <linux/swiotlb.h> |
---|
32 | 32 | |
---|
33 | 33 | #include "nouveau_drv.h" |
---|
34 | | -#include "nouveau_dma.h" |
---|
| 34 | +#include "nouveau_chan.h" |
---|
35 | 35 | #include "nouveau_fence.h" |
---|
36 | 36 | |
---|
37 | 37 | #include "nouveau_bo.h" |
---|
.. | .. |
---|
43 | 43 | #include <nvif/class.h> |
---|
44 | 44 | #include <nvif/if500b.h> |
---|
45 | 45 | #include <nvif/if900b.h> |
---|
| 46 | + |
---|
| 47 | +static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, |
---|
| 48 | + struct ttm_resource *reg); |
---|
46 | 49 | |
---|
47 | 50 | /* |
---|
48 | 51 | * NV10-NV40 tiling helpers |
---|
.. | .. |
---|
136 | 139 | struct drm_device *dev = drm->dev; |
---|
137 | 140 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
---|
138 | 141 | |
---|
139 | | - if (unlikely(nvbo->gem.filp)) |
---|
140 | | - DRM_ERROR("bo %p still attached to GEM object\n", bo); |
---|
141 | 142 | WARN_ON(nvbo->pin_refcnt > 0); |
---|
| 143 | + nouveau_bo_del_io_reserve_lru(bo); |
---|
142 | 144 | nv10_bo_put_tile_region(dev, nvbo->tile, NULL); |
---|
| 145 | + |
---|
| 146 | + /* |
---|
| 147 | + * If nouveau_bo_new() allocated this buffer, the GEM object was never |
---|
| 148 | + * initialized, so don't attempt to release it. |
---|
| 149 | + */ |
---|
| 150 | + if (bo->base.dev) |
---|
| 151 | + drm_gem_object_release(&bo->base); |
---|
| 152 | + |
---|
143 | 153 | kfree(nvbo); |
---|
144 | 154 | } |
---|
145 | 155 | |
---|
.. | .. |
---|
152 | 162 | } |
---|
153 | 163 | |
---|
154 | 164 | static void |
---|
155 | | -nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, |
---|
156 | | - int *align, u64 *size) |
---|
| 165 | +nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size) |
---|
157 | 166 | { |
---|
158 | 167 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
---|
159 | 168 | struct nvif_device *device = &drm->client.device; |
---|
.. | .. |
---|
185 | 194 | *size = roundup_64(*size, PAGE_SIZE); |
---|
186 | 195 | } |
---|
187 | 196 | |
---|
188 | | -int |
---|
189 | | -nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, |
---|
190 | | - uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, |
---|
191 | | - struct sg_table *sg, struct reservation_object *robj, |
---|
192 | | - struct nouveau_bo **pnvbo) |
---|
| 197 | +struct nouveau_bo * |
---|
| 198 | +nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, |
---|
| 199 | + u32 tile_mode, u32 tile_flags) |
---|
193 | 200 | { |
---|
194 | 201 | struct nouveau_drm *drm = cli->drm; |
---|
195 | 202 | struct nouveau_bo *nvbo; |
---|
196 | 203 | struct nvif_mmu *mmu = &cli->mmu; |
---|
197 | | - struct nvif_vmm *vmm = &cli->vmm.vmm; |
---|
198 | | - size_t acc_size; |
---|
199 | | - int type = ttm_bo_type_device; |
---|
200 | | - int ret, i, pi = -1; |
---|
| 204 | + struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm; |
---|
| 205 | + int i, pi = -1; |
---|
201 | 206 | |
---|
202 | | - if (!size) { |
---|
203 | | - NV_WARN(drm, "skipped size %016llx\n", size); |
---|
204 | | - return -EINVAL; |
---|
| 207 | + if (!*size) { |
---|
| 208 | + NV_WARN(drm, "skipped size %016llx\n", *size); |
---|
| 209 | + return ERR_PTR(-EINVAL); |
---|
205 | 210 | } |
---|
206 | | - |
---|
207 | | - if (sg) |
---|
208 | | - type = ttm_bo_type_sg; |
---|
209 | 211 | |
---|
210 | 212 | nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); |
---|
211 | 213 | if (!nvbo) |
---|
212 | | - return -ENOMEM; |
---|
| 214 | + return ERR_PTR(-ENOMEM); |
---|
213 | 215 | INIT_LIST_HEAD(&nvbo->head); |
---|
214 | 216 | INIT_LIST_HEAD(&nvbo->entry); |
---|
215 | 217 | INIT_LIST_HEAD(&nvbo->vma_list); |
---|
.. | .. |
---|
219 | 221 | * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated |
---|
220 | 222 | * into in nouveau_gem_new(). |
---|
221 | 223 | */ |
---|
222 | | - if (flags & TTM_PL_FLAG_UNCACHED) { |
---|
| 224 | + if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) { |
---|
223 | 225 | /* Determine if we can get a cache-coherent map, forcing |
---|
224 | 226 | * uncached mapping if we can't. |
---|
225 | 227 | */ |
---|
.. | .. |
---|
231 | 233 | nvbo->kind = (tile_flags & 0x0000ff00) >> 8; |
---|
232 | 234 | if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { |
---|
233 | 235 | kfree(nvbo); |
---|
234 | | - return -EINVAL; |
---|
| 236 | + return ERR_PTR(-EINVAL); |
---|
235 | 237 | } |
---|
236 | 238 | |
---|
237 | 239 | nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; |
---|
.. | .. |
---|
241 | 243 | nvbo->comp = (tile_flags & 0x00030000) >> 16; |
---|
242 | 244 | if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { |
---|
243 | 245 | kfree(nvbo); |
---|
244 | | - return -EINVAL; |
---|
| 246 | + return ERR_PTR(-EINVAL); |
---|
245 | 247 | } |
---|
246 | 248 | } else { |
---|
247 | 249 | nvbo->zeta = (tile_flags & 0x00000007); |
---|
.. | .. |
---|
259 | 261 | * Skip page sizes that can't support needed domains. |
---|
260 | 262 | */ |
---|
261 | 263 | if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && |
---|
262 | | - (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram) |
---|
| 264 | + (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) |
---|
263 | 265 | continue; |
---|
264 | | - if ((flags & TTM_PL_FLAG_TT) && |
---|
| 266 | + if ((domain & NOUVEAU_GEM_DOMAIN_GART) && |
---|
265 | 267 | (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) |
---|
266 | 268 | continue; |
---|
267 | 269 | |
---|
.. | .. |
---|
273 | 275 | pi = i; |
---|
274 | 276 | |
---|
275 | 277 | /* Stop once the buffer is larger than the current page size. */ |
---|
276 | | - if (size >= 1ULL << vmm->page[i].shift) |
---|
| 278 | + if (*size >= 1ULL << vmm->page[i].shift) |
---|
277 | 279 | break; |
---|
278 | 280 | } |
---|
279 | 281 | |
---|
280 | | - if (WARN_ON(pi < 0)) |
---|
281 | | - return -EINVAL; |
---|
| 282 | + if (WARN_ON(pi < 0)) { |
---|
| 283 | + kfree(nvbo); |
---|
| 284 | + return ERR_PTR(-EINVAL); |
---|
| 285 | + } |
---|
282 | 286 | |
---|
283 | 287 | /* Disable compression if suitable settings couldn't be found. */ |
---|
284 | 288 | if (nvbo->comp && !vmm->page[pi].comp) { |
---|
.. | .. |
---|
288 | 292 | } |
---|
289 | 293 | nvbo->page = vmm->page[pi].shift; |
---|
290 | 294 | |
---|
291 | | - nouveau_bo_fixup_align(nvbo, flags, &align, &size); |
---|
| 295 | + nouveau_bo_fixup_align(nvbo, align, size); |
---|
| 296 | + |
---|
| 297 | + return nvbo; |
---|
| 298 | +} |
---|
| 299 | + |
---|
| 300 | +int |
---|
| 301 | +nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain, |
---|
| 302 | + struct sg_table *sg, struct dma_resv *robj) |
---|
| 303 | +{ |
---|
| 304 | + int type = sg ? ttm_bo_type_sg : ttm_bo_type_device; |
---|
| 305 | + size_t acc_size; |
---|
| 306 | + int ret; |
---|
| 307 | + |
---|
| 308 | + acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo)); |
---|
| 309 | + |
---|
292 | 310 | nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; |
---|
293 | | - nouveau_bo_placement_set(nvbo, flags, 0); |
---|
| 311 | + nouveau_bo_placement_set(nvbo, domain, 0); |
---|
| 312 | + INIT_LIST_HEAD(&nvbo->io_reserve_lru); |
---|
294 | 313 | |
---|
295 | | - acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size, |
---|
296 | | - sizeof(struct nouveau_bo)); |
---|
297 | | - |
---|
298 | | - ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, |
---|
299 | | - type, &nvbo->placement, |
---|
300 | | - align >> PAGE_SHIFT, false, acc_size, sg, |
---|
301 | | - robj, nouveau_bo_del_ttm); |
---|
| 314 | + ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type, |
---|
| 315 | + &nvbo->placement, align >> PAGE_SHIFT, false, |
---|
| 316 | + acc_size, sg, robj, nouveau_bo_del_ttm); |
---|
302 | 317 | if (ret) { |
---|
303 | 318 | /* ttm will call nouveau_bo_del_ttm if it fails.. */ |
---|
304 | 319 | return ret; |
---|
305 | 320 | } |
---|
| 321 | + |
---|
| 322 | + return 0; |
---|
| 323 | +} |
---|
| 324 | + |
---|
| 325 | +int |
---|
| 326 | +nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, |
---|
| 327 | + uint32_t domain, uint32_t tile_mode, uint32_t tile_flags, |
---|
| 328 | + struct sg_table *sg, struct dma_resv *robj, |
---|
| 329 | + struct nouveau_bo **pnvbo) |
---|
| 330 | +{ |
---|
| 331 | + struct nouveau_bo *nvbo; |
---|
| 332 | + int ret; |
---|
| 333 | + |
---|
| 334 | + nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode, |
---|
| 335 | + tile_flags); |
---|
| 336 | + if (IS_ERR(nvbo)) |
---|
| 337 | + return PTR_ERR(nvbo); |
---|
| 338 | + |
---|
| 339 | + ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj); |
---|
| 340 | + if (ret) |
---|
| 341 | + return ret; |
---|
306 | 342 | |
---|
307 | 343 | *pnvbo = nvbo; |
---|
308 | 344 | return 0; |
---|
309 | 345 | } |
---|
310 | 346 | |
---|
311 | 347 | static void |
---|
312 | | -set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags) |
---|
| 348 | +set_placement_list(struct nouveau_drm *drm, struct ttm_place *pl, unsigned *n, |
---|
| 349 | + uint32_t domain, uint32_t flags) |
---|
313 | 350 | { |
---|
314 | 351 | *n = 0; |
---|
315 | 352 | |
---|
316 | | - if (type & TTM_PL_FLAG_VRAM) |
---|
317 | | - pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags; |
---|
318 | | - if (type & TTM_PL_FLAG_TT) |
---|
319 | | - pl[(*n)++].flags = TTM_PL_FLAG_TT | flags; |
---|
320 | | - if (type & TTM_PL_FLAG_SYSTEM) |
---|
321 | | - pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags; |
---|
| 353 | + if (domain & NOUVEAU_GEM_DOMAIN_VRAM) { |
---|
| 354 | + struct nvif_mmu *mmu = &drm->client.mmu; |
---|
| 355 | + |
---|
| 356 | + pl[*n].mem_type = TTM_PL_VRAM; |
---|
| 357 | + pl[*n].flags = flags & ~TTM_PL_FLAG_CACHED; |
---|
| 358 | + |
---|
| 359 | + /* Some BARs do not support being ioremapped WC */ |
---|
| 360 | + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && |
---|
| 361 | + mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED) |
---|
| 362 | + pl[*n].flags &= ~TTM_PL_FLAG_WC; |
---|
| 363 | + |
---|
| 364 | + (*n)++; |
---|
| 365 | + } |
---|
| 366 | + if (domain & NOUVEAU_GEM_DOMAIN_GART) { |
---|
| 367 | + pl[*n].mem_type = TTM_PL_TT; |
---|
| 368 | + pl[*n].flags = flags; |
---|
| 369 | + |
---|
| 370 | + if (drm->agp.bridge) |
---|
| 371 | + pl[*n].flags &= ~TTM_PL_FLAG_CACHED; |
---|
| 372 | + |
---|
| 373 | + (*n)++; |
---|
| 374 | + } |
---|
| 375 | + if (domain & NOUVEAU_GEM_DOMAIN_CPU) { |
---|
| 376 | + pl[*n].mem_type = TTM_PL_SYSTEM; |
---|
| 377 | + pl[(*n)++].flags = flags; |
---|
| 378 | + } |
---|
322 | 379 | } |
---|
323 | 380 | |
---|
324 | 381 | static void |
---|
325 | | -set_placement_range(struct nouveau_bo *nvbo, uint32_t type) |
---|
| 382 | +set_placement_range(struct nouveau_bo *nvbo, uint32_t domain) |
---|
326 | 383 | { |
---|
327 | 384 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
---|
328 | 385 | u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT; |
---|
329 | 386 | unsigned i, fpfn, lpfn; |
---|
330 | 387 | |
---|
331 | 388 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && |
---|
332 | | - nvbo->mode && (type & TTM_PL_FLAG_VRAM) && |
---|
| 389 | + nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) && |
---|
333 | 390 | nvbo->bo.mem.num_pages < vram_pages / 4) { |
---|
334 | 391 | /* |
---|
335 | 392 | * Make sure that the color and depth buffers are handled |
---|
.. | .. |
---|
356 | 413 | } |
---|
357 | 414 | |
---|
358 | 415 | void |
---|
359 | | -nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) |
---|
| 416 | +nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain, |
---|
| 417 | + uint32_t busy) |
---|
360 | 418 | { |
---|
| 419 | + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
---|
361 | 420 | struct ttm_placement *pl = &nvbo->placement; |
---|
362 | 421 | uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED : |
---|
363 | 422 | TTM_PL_MASK_CACHING) | |
---|
364 | 423 | (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); |
---|
365 | 424 | |
---|
366 | 425 | pl->placement = nvbo->placements; |
---|
367 | | - set_placement_list(nvbo->placements, &pl->num_placement, |
---|
368 | | - type, flags); |
---|
| 426 | + set_placement_list(drm, nvbo->placements, &pl->num_placement, |
---|
| 427 | + domain, flags); |
---|
369 | 428 | |
---|
370 | 429 | pl->busy_placement = nvbo->busy_placements; |
---|
371 | | - set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, |
---|
372 | | - type | busy, flags); |
---|
| 430 | + set_placement_list(drm, nvbo->busy_placements, &pl->num_busy_placement, |
---|
| 431 | + domain | busy, flags); |
---|
373 | 432 | |
---|
374 | | - set_placement_range(nvbo, type); |
---|
| 433 | + set_placement_range(nvbo, domain); |
---|
375 | 434 | } |
---|
376 | 435 | |
---|
377 | 436 | int |
---|
378 | | -nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) |
---|
| 437 | +nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) |
---|
379 | 438 | { |
---|
380 | 439 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
---|
381 | 440 | struct ttm_buffer_object *bo = &nvbo->bo; |
---|
.. | .. |
---|
387 | 446 | return ret; |
---|
388 | 447 | |
---|
389 | 448 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && |
---|
390 | | - memtype == TTM_PL_FLAG_VRAM && contig) { |
---|
| 449 | + domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) { |
---|
391 | 450 | if (!nvbo->contig) { |
---|
392 | 451 | nvbo->contig = true; |
---|
393 | 452 | force = true; |
---|
.. | .. |
---|
396 | 455 | } |
---|
397 | 456 | |
---|
398 | 457 | if (nvbo->pin_refcnt) { |
---|
399 | | - if (!(memtype & (1 << bo->mem.mem_type)) || evict) { |
---|
| 458 | + bool error = evict; |
---|
| 459 | + |
---|
| 460 | + switch (bo->mem.mem_type) { |
---|
| 461 | + case TTM_PL_VRAM: |
---|
| 462 | + error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM); |
---|
| 463 | + break; |
---|
| 464 | + case TTM_PL_TT: |
---|
| 465 | + error |= !(domain & NOUVEAU_GEM_DOMAIN_GART); |
---|
| 466 | + default: |
---|
| 467 | + break; |
---|
| 468 | + } |
---|
| 469 | + |
---|
| 470 | + if (error) { |
---|
400 | 471 | NV_ERROR(drm, "bo %p pinned elsewhere: " |
---|
401 | 472 | "0x%08x vs 0x%08x\n", bo, |
---|
402 | | - 1 << bo->mem.mem_type, memtype); |
---|
| 473 | + bo->mem.mem_type, domain); |
---|
403 | 474 | ret = -EBUSY; |
---|
404 | 475 | } |
---|
405 | 476 | nvbo->pin_refcnt++; |
---|
.. | .. |
---|
407 | 478 | } |
---|
408 | 479 | |
---|
409 | 480 | if (evict) { |
---|
410 | | - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0); |
---|
| 481 | + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); |
---|
411 | 482 | ret = nouveau_bo_validate(nvbo, false, false); |
---|
412 | 483 | if (ret) |
---|
413 | 484 | goto out; |
---|
414 | 485 | } |
---|
415 | 486 | |
---|
416 | 487 | nvbo->pin_refcnt++; |
---|
417 | | - nouveau_bo_placement_set(nvbo, memtype, 0); |
---|
| 488 | + nouveau_bo_placement_set(nvbo, domain, 0); |
---|
418 | 489 | |
---|
419 | 490 | /* drop pin_refcnt temporarily, so we don't trip the assertion |
---|
420 | 491 | * in nouveau_bo_move() that makes sure we're not trying to |
---|
.. | .. |
---|
460 | 531 | if (ref) |
---|
461 | 532 | goto out; |
---|
462 | 533 | |
---|
463 | | - nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); |
---|
| 534 | + switch (bo->mem.mem_type) { |
---|
| 535 | + case TTM_PL_VRAM: |
---|
| 536 | + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0); |
---|
| 537 | + break; |
---|
| 538 | + case TTM_PL_TT: |
---|
| 539 | + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); |
---|
| 540 | + break; |
---|
| 541 | + default: |
---|
| 542 | + break; |
---|
| 543 | + } |
---|
464 | 544 | |
---|
465 | 545 | ret = nouveau_bo_validate(nvbo, false, false); |
---|
466 | 546 | if (ret == 0) { |
---|
.. | .. |
---|
544 | 624 | PAGE_SIZE, DMA_FROM_DEVICE); |
---|
545 | 625 | } |
---|
546 | 626 | |
---|
| 627 | +void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo) |
---|
| 628 | +{ |
---|
| 629 | + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
---|
| 630 | + struct nouveau_bo *nvbo = nouveau_bo(bo); |
---|
| 631 | + |
---|
| 632 | + mutex_lock(&drm->ttm.io_reserve_mutex); |
---|
| 633 | + list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); |
---|
| 634 | + mutex_unlock(&drm->ttm.io_reserve_mutex); |
---|
| 635 | +} |
---|
| 636 | + |
---|
| 637 | +void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo) |
---|
| 638 | +{ |
---|
| 639 | + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
---|
| 640 | + struct nouveau_bo *nvbo = nouveau_bo(bo); |
---|
| 641 | + |
---|
| 642 | + mutex_lock(&drm->ttm.io_reserve_mutex); |
---|
| 643 | + list_del_init(&nvbo->io_reserve_lru); |
---|
| 644 | + mutex_unlock(&drm->ttm.io_reserve_mutex); |
---|
| 645 | +} |
---|
| 646 | + |
---|
547 | 647 | int |
---|
548 | 648 | nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, |
---|
549 | 649 | bool no_wait_gpu) |
---|
.. | .. |
---|
617 | 717 | } |
---|
618 | 718 | |
---|
619 | 719 | static int |
---|
620 | | -nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
---|
| 720 | +nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, |
---|
| 721 | + struct ttm_resource *reg) |
---|
621 | 722 | { |
---|
622 | | - /* We'll do this from user space. */ |
---|
623 | | - return 0; |
---|
| 723 | +#if IS_ENABLED(CONFIG_AGP) |
---|
| 724 | + struct nouveau_drm *drm = nouveau_bdev(bdev); |
---|
| 725 | +#endif |
---|
| 726 | + if (!reg) |
---|
| 727 | + return -EINVAL; |
---|
| 728 | +#if IS_ENABLED(CONFIG_AGP) |
---|
| 729 | + if (drm->agp.bridge) |
---|
| 730 | + return ttm_agp_bind(ttm, reg); |
---|
| 731 | +#endif |
---|
| 732 | + return nouveau_sgdma_bind(bdev, ttm, reg); |
---|
624 | 733 | } |
---|
625 | 734 | |
---|
626 | | -static int |
---|
627 | | -nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
---|
628 | | - struct ttm_mem_type_manager *man) |
---|
| 735 | +static void |
---|
| 736 | +nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm) |
---|
629 | 737 | { |
---|
| 738 | +#if IS_ENABLED(CONFIG_AGP) |
---|
630 | 739 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
---|
631 | | - struct nvif_mmu *mmu = &drm->client.mmu; |
---|
632 | 740 | |
---|
633 | | - switch (type) { |
---|
634 | | - case TTM_PL_SYSTEM: |
---|
635 | | - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
---|
636 | | - man->available_caching = TTM_PL_MASK_CACHING; |
---|
637 | | - man->default_caching = TTM_PL_FLAG_CACHED; |
---|
638 | | - break; |
---|
639 | | - case TTM_PL_VRAM: |
---|
640 | | - man->flags = TTM_MEMTYPE_FLAG_FIXED | |
---|
641 | | - TTM_MEMTYPE_FLAG_MAPPABLE; |
---|
642 | | - man->available_caching = TTM_PL_FLAG_UNCACHED | |
---|
643 | | - TTM_PL_FLAG_WC; |
---|
644 | | - man->default_caching = TTM_PL_FLAG_WC; |
---|
645 | | - |
---|
646 | | - if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { |
---|
647 | | - /* Some BARs do not support being ioremapped WC */ |
---|
648 | | - const u8 type = mmu->type[drm->ttm.type_vram].type; |
---|
649 | | - if (type & NVIF_MEM_UNCACHED) { |
---|
650 | | - man->available_caching = TTM_PL_FLAG_UNCACHED; |
---|
651 | | - man->default_caching = TTM_PL_FLAG_UNCACHED; |
---|
652 | | - } |
---|
653 | | - |
---|
654 | | - man->func = &nouveau_vram_manager; |
---|
655 | | - man->io_reserve_fastpath = false; |
---|
656 | | - man->use_io_reserve_lru = true; |
---|
657 | | - } else { |
---|
658 | | - man->func = &ttm_bo_manager_func; |
---|
659 | | - } |
---|
660 | | - break; |
---|
661 | | - case TTM_PL_TT: |
---|
662 | | - if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) |
---|
663 | | - man->func = &nouveau_gart_manager; |
---|
664 | | - else |
---|
665 | | - if (!drm->agp.bridge) |
---|
666 | | - man->func = &nv04_gart_manager; |
---|
667 | | - else |
---|
668 | | - man->func = &ttm_bo_manager_func; |
---|
669 | | - |
---|
670 | | - if (drm->agp.bridge) { |
---|
671 | | - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
---|
672 | | - man->available_caching = TTM_PL_FLAG_UNCACHED | |
---|
673 | | - TTM_PL_FLAG_WC; |
---|
674 | | - man->default_caching = TTM_PL_FLAG_WC; |
---|
675 | | - } else { |
---|
676 | | - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | |
---|
677 | | - TTM_MEMTYPE_FLAG_CMA; |
---|
678 | | - man->available_caching = TTM_PL_MASK_CACHING; |
---|
679 | | - man->default_caching = TTM_PL_FLAG_CACHED; |
---|
680 | | - } |
---|
681 | | - |
---|
682 | | - break; |
---|
683 | | - default: |
---|
684 | | - return -EINVAL; |
---|
| 741 | + if (drm->agp.bridge) { |
---|
| 742 | + ttm_agp_unbind(ttm); |
---|
| 743 | + return; |
---|
685 | 744 | } |
---|
686 | | - return 0; |
---|
| 745 | +#endif |
---|
| 746 | + nouveau_sgdma_unbind(bdev, ttm); |
---|
687 | 747 | } |
---|
688 | 748 | |
---|
689 | 749 | static void |
---|
.. | .. |
---|
693 | 753 | |
---|
694 | 754 | switch (bo->mem.mem_type) { |
---|
695 | 755 | case TTM_PL_VRAM: |
---|
696 | | - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, |
---|
697 | | - TTM_PL_FLAG_SYSTEM); |
---|
| 756 | + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, |
---|
| 757 | + NOUVEAU_GEM_DOMAIN_CPU); |
---|
698 | 758 | break; |
---|
699 | 759 | default: |
---|
700 | | - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); |
---|
| 760 | + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0); |
---|
701 | 761 | break; |
---|
702 | 762 | } |
---|
703 | 763 | |
---|
704 | 764 | *pl = nvbo->placement; |
---|
705 | 765 | } |
---|
706 | 766 | |
---|
707 | | - |
---|
708 | | -static int |
---|
709 | | -nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) |
---|
710 | | -{ |
---|
711 | | - int ret = RING_SPACE(chan, 2); |
---|
712 | | - if (ret == 0) { |
---|
713 | | - BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); |
---|
714 | | - OUT_RING (chan, handle & 0x0000ffff); |
---|
715 | | - FIRE_RING (chan); |
---|
716 | | - } |
---|
717 | | - return ret; |
---|
718 | | -} |
---|
719 | | - |
---|
720 | | -static int |
---|
721 | | -nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
---|
722 | | - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
---|
723 | | -{ |
---|
724 | | - struct nouveau_mem *mem = nouveau_mem(old_reg); |
---|
725 | | - int ret = RING_SPACE(chan, 10); |
---|
726 | | - if (ret == 0) { |
---|
727 | | - BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8); |
---|
728 | | - OUT_RING (chan, upper_32_bits(mem->vma[0].addr)); |
---|
729 | | - OUT_RING (chan, lower_32_bits(mem->vma[0].addr)); |
---|
730 | | - OUT_RING (chan, upper_32_bits(mem->vma[1].addr)); |
---|
731 | | - OUT_RING (chan, lower_32_bits(mem->vma[1].addr)); |
---|
732 | | - OUT_RING (chan, PAGE_SIZE); |
---|
733 | | - OUT_RING (chan, PAGE_SIZE); |
---|
734 | | - OUT_RING (chan, PAGE_SIZE); |
---|
735 | | - OUT_RING (chan, new_reg->num_pages); |
---|
736 | | - BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386); |
---|
737 | | - } |
---|
738 | | - return ret; |
---|
739 | | -} |
---|
740 | | - |
---|
741 | | -static int |
---|
742 | | -nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle) |
---|
743 | | -{ |
---|
744 | | - int ret = RING_SPACE(chan, 2); |
---|
745 | | - if (ret == 0) { |
---|
746 | | - BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); |
---|
747 | | - OUT_RING (chan, handle); |
---|
748 | | - } |
---|
749 | | - return ret; |
---|
750 | | -} |
---|
751 | | - |
---|
752 | | -static int |
---|
753 | | -nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
---|
754 | | - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
---|
755 | | -{ |
---|
756 | | - struct nouveau_mem *mem = nouveau_mem(old_reg); |
---|
757 | | - u64 src_offset = mem->vma[0].addr; |
---|
758 | | - u64 dst_offset = mem->vma[1].addr; |
---|
759 | | - u32 page_count = new_reg->num_pages; |
---|
760 | | - int ret; |
---|
761 | | - |
---|
762 | | - page_count = new_reg->num_pages; |
---|
763 | | - while (page_count) { |
---|
764 | | - int line_count = (page_count > 8191) ? 8191 : page_count; |
---|
765 | | - |
---|
766 | | - ret = RING_SPACE(chan, 11); |
---|
767 | | - if (ret) |
---|
768 | | - return ret; |
---|
769 | | - |
---|
770 | | - BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8); |
---|
771 | | - OUT_RING (chan, upper_32_bits(src_offset)); |
---|
772 | | - OUT_RING (chan, lower_32_bits(src_offset)); |
---|
773 | | - OUT_RING (chan, upper_32_bits(dst_offset)); |
---|
774 | | - OUT_RING (chan, lower_32_bits(dst_offset)); |
---|
775 | | - OUT_RING (chan, PAGE_SIZE); |
---|
776 | | - OUT_RING (chan, PAGE_SIZE); |
---|
777 | | - OUT_RING (chan, PAGE_SIZE); |
---|
778 | | - OUT_RING (chan, line_count); |
---|
779 | | - BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); |
---|
780 | | - OUT_RING (chan, 0x00000110); |
---|
781 | | - |
---|
782 | | - page_count -= line_count; |
---|
783 | | - src_offset += (PAGE_SIZE * line_count); |
---|
784 | | - dst_offset += (PAGE_SIZE * line_count); |
---|
785 | | - } |
---|
786 | | - |
---|
787 | | - return 0; |
---|
788 | | -} |
---|
789 | | - |
---|
790 | | -static int |
---|
791 | | -nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
---|
792 | | - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
---|
793 | | -{ |
---|
794 | | - struct nouveau_mem *mem = nouveau_mem(old_reg); |
---|
795 | | - u64 src_offset = mem->vma[0].addr; |
---|
796 | | - u64 dst_offset = mem->vma[1].addr; |
---|
797 | | - u32 page_count = new_reg->num_pages; |
---|
798 | | - int ret; |
---|
799 | | - |
---|
800 | | - page_count = new_reg->num_pages; |
---|
801 | | - while (page_count) { |
---|
802 | | - int line_count = (page_count > 2047) ? 2047 : page_count; |
---|
803 | | - |
---|
804 | | - ret = RING_SPACE(chan, 12); |
---|
805 | | - if (ret) |
---|
806 | | - return ret; |
---|
807 | | - |
---|
808 | | - BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2); |
---|
809 | | - OUT_RING (chan, upper_32_bits(dst_offset)); |
---|
810 | | - OUT_RING (chan, lower_32_bits(dst_offset)); |
---|
811 | | - BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6); |
---|
812 | | - OUT_RING (chan, upper_32_bits(src_offset)); |
---|
813 | | - OUT_RING (chan, lower_32_bits(src_offset)); |
---|
814 | | - OUT_RING (chan, PAGE_SIZE); /* src_pitch */ |
---|
815 | | - OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ |
---|
816 | | - OUT_RING (chan, PAGE_SIZE); /* line_length */ |
---|
817 | | - OUT_RING (chan, line_count); |
---|
818 | | - BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); |
---|
819 | | - OUT_RING (chan, 0x00100110); |
---|
820 | | - |
---|
821 | | - page_count -= line_count; |
---|
822 | | - src_offset += (PAGE_SIZE * line_count); |
---|
823 | | - dst_offset += (PAGE_SIZE * line_count); |
---|
824 | | - } |
---|
825 | | - |
---|
826 | | - return 0; |
---|
827 | | -} |
---|
828 | | - |
---|
829 | | -static int |
---|
830 | | -nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
---|
831 | | - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
---|
832 | | -{ |
---|
833 | | - struct nouveau_mem *mem = nouveau_mem(old_reg); |
---|
834 | | - u64 src_offset = mem->vma[0].addr; |
---|
835 | | - u64 dst_offset = mem->vma[1].addr; |
---|
836 | | - u32 page_count = new_reg->num_pages; |
---|
837 | | - int ret; |
---|
838 | | - |
---|
839 | | - page_count = new_reg->num_pages; |
---|
840 | | - while (page_count) { |
---|
841 | | - int line_count = (page_count > 8191) ? 8191 : page_count; |
---|
842 | | - |
---|
843 | | - ret = RING_SPACE(chan, 11); |
---|
844 | | - if (ret) |
---|
845 | | - return ret; |
---|
846 | | - |
---|
847 | | - BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); |
---|
848 | | - OUT_RING (chan, upper_32_bits(src_offset)); |
---|
849 | | - OUT_RING (chan, lower_32_bits(src_offset)); |
---|
850 | | - OUT_RING (chan, upper_32_bits(dst_offset)); |
---|
851 | | - OUT_RING (chan, lower_32_bits(dst_offset)); |
---|
852 | | - OUT_RING (chan, PAGE_SIZE); |
---|
853 | | - OUT_RING (chan, PAGE_SIZE); |
---|
854 | | - OUT_RING (chan, PAGE_SIZE); |
---|
855 | | - OUT_RING (chan, line_count); |
---|
856 | | - BEGIN_NV04(chan, NvSubCopy, 0x0300, 1); |
---|
857 | | - OUT_RING (chan, 0x00000110); |
---|
858 | | - |
---|
859 | | - page_count -= line_count; |
---|
860 | | - src_offset += (PAGE_SIZE * line_count); |
---|
861 | | - dst_offset += (PAGE_SIZE * line_count); |
---|
862 | | - } |
---|
863 | | - |
---|
864 | | - return 0; |
---|
865 | | -} |
---|
866 | | - |
---|
867 | | -static int |
---|
868 | | -nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
---|
869 | | - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
---|
870 | | -{ |
---|
871 | | - struct nouveau_mem *mem = nouveau_mem(old_reg); |
---|
872 | | - int ret = RING_SPACE(chan, 7); |
---|
873 | | - if (ret == 0) { |
---|
874 | | - BEGIN_NV04(chan, NvSubCopy, 0x0320, 6); |
---|
875 | | - OUT_RING (chan, upper_32_bits(mem->vma[0].addr)); |
---|
876 | | - OUT_RING (chan, lower_32_bits(mem->vma[0].addr)); |
---|
877 | | - OUT_RING (chan, upper_32_bits(mem->vma[1].addr)); |
---|
878 | | - OUT_RING (chan, lower_32_bits(mem->vma[1].addr)); |
---|
879 | | - OUT_RING (chan, 0x00000000 /* COPY */); |
---|
880 | | - OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT); |
---|
881 | | - } |
---|
882 | | - return ret; |
---|
883 | | -} |
---|
884 | | - |
---|
885 | | -static int |
---|
886 | | -nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
---|
887 | | - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
---|
888 | | -{ |
---|
889 | | - struct nouveau_mem *mem = nouveau_mem(old_reg); |
---|
890 | | - int ret = RING_SPACE(chan, 7); |
---|
891 | | - if (ret == 0) { |
---|
892 | | - BEGIN_NV04(chan, NvSubCopy, 0x0304, 6); |
---|
893 | | - OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT); |
---|
894 | | - OUT_RING (chan, upper_32_bits(mem->vma[0].addr)); |
---|
895 | | - OUT_RING (chan, lower_32_bits(mem->vma[0].addr)); |
---|
896 | | - OUT_RING (chan, upper_32_bits(mem->vma[1].addr)); |
---|
897 | | - OUT_RING (chan, lower_32_bits(mem->vma[1].addr)); |
---|
898 | | - OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */); |
---|
899 | | - } |
---|
900 | | - return ret; |
---|
901 | | -} |
---|
902 | | - |
---|
903 | | -static int |
---|
904 | | -nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) |
---|
905 | | -{ |
---|
906 | | - int ret = RING_SPACE(chan, 6); |
---|
907 | | - if (ret == 0) { |
---|
908 | | - BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); |
---|
909 | | - OUT_RING (chan, handle); |
---|
910 | | - BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); |
---|
911 | | - OUT_RING (chan, chan->drm->ntfy.handle); |
---|
912 | | - OUT_RING (chan, chan->vram.handle); |
---|
913 | | - OUT_RING (chan, chan->vram.handle); |
---|
914 | | - } |
---|
915 | | - |
---|
916 | | - return ret; |
---|
917 | | -} |
---|
918 | | - |
---|
919 | | -static int |
---|
920 | | -nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
---|
921 | | - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
---|
922 | | -{ |
---|
923 | | - struct nouveau_mem *mem = nouveau_mem(old_reg); |
---|
924 | | - u64 length = (new_reg->num_pages << PAGE_SHIFT); |
---|
925 | | - u64 src_offset = mem->vma[0].addr; |
---|
926 | | - u64 dst_offset = mem->vma[1].addr; |
---|
927 | | - int src_tiled = !!mem->kind; |
---|
928 | | - int dst_tiled = !!nouveau_mem(new_reg)->kind; |
---|
929 | | - int ret; |
---|
930 | | - |
---|
931 | | - while (length) { |
---|
932 | | - u32 amount, stride, height; |
---|
933 | | - |
---|
934 | | - ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled)); |
---|
935 | | - if (ret) |
---|
936 | | - return ret; |
---|
937 | | - |
---|
938 | | - amount = min(length, (u64)(4 * 1024 * 1024)); |
---|
939 | | - stride = 16 * 4; |
---|
940 | | - height = amount / stride; |
---|
941 | | - |
---|
942 | | - if (src_tiled) { |
---|
943 | | - BEGIN_NV04(chan, NvSubCopy, 0x0200, 7); |
---|
944 | | - OUT_RING (chan, 0); |
---|
945 | | - OUT_RING (chan, 0); |
---|
946 | | - OUT_RING (chan, stride); |
---|
947 | | - OUT_RING (chan, height); |
---|
948 | | - OUT_RING (chan, 1); |
---|
949 | | - OUT_RING (chan, 0); |
---|
950 | | - OUT_RING (chan, 0); |
---|
951 | | - } else { |
---|
952 | | - BEGIN_NV04(chan, NvSubCopy, 0x0200, 1); |
---|
953 | | - OUT_RING (chan, 1); |
---|
954 | | - } |
---|
955 | | - if (dst_tiled) { |
---|
956 | | - BEGIN_NV04(chan, NvSubCopy, 0x021c, 7); |
---|
957 | | - OUT_RING (chan, 0); |
---|
958 | | - OUT_RING (chan, 0); |
---|
959 | | - OUT_RING (chan, stride); |
---|
960 | | - OUT_RING (chan, height); |
---|
961 | | - OUT_RING (chan, 1); |
---|
962 | | - OUT_RING (chan, 0); |
---|
963 | | - OUT_RING (chan, 0); |
---|
964 | | - } else { |
---|
965 | | - BEGIN_NV04(chan, NvSubCopy, 0x021c, 1); |
---|
966 | | - OUT_RING (chan, 1); |
---|
967 | | - } |
---|
968 | | - |
---|
969 | | - BEGIN_NV04(chan, NvSubCopy, 0x0238, 2); |
---|
970 | | - OUT_RING (chan, upper_32_bits(src_offset)); |
---|
971 | | - OUT_RING (chan, upper_32_bits(dst_offset)); |
---|
972 | | - BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); |
---|
973 | | - OUT_RING (chan, lower_32_bits(src_offset)); |
---|
974 | | - OUT_RING (chan, lower_32_bits(dst_offset)); |
---|
975 | | - OUT_RING (chan, stride); |
---|
976 | | - OUT_RING (chan, stride); |
---|
977 | | - OUT_RING (chan, stride); |
---|
978 | | - OUT_RING (chan, height); |
---|
979 | | - OUT_RING (chan, 0x00000101); |
---|
980 | | - OUT_RING (chan, 0x00000000); |
---|
981 | | - BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); |
---|
982 | | - OUT_RING (chan, 0); |
---|
983 | | - |
---|
984 | | - length -= amount; |
---|
985 | | - src_offset += amount; |
---|
986 | | - dst_offset += amount; |
---|
987 | | - } |
---|
988 | | - |
---|
989 | | - return 0; |
---|
990 | | -} |
---|
991 | | - |
---|
992 | | -static int |
---|
993 | | -nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) |
---|
994 | | -{ |
---|
995 | | - int ret = RING_SPACE(chan, 4); |
---|
996 | | - if (ret == 0) { |
---|
997 | | - BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); |
---|
998 | | - OUT_RING (chan, handle); |
---|
999 | | - BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); |
---|
1000 | | - OUT_RING (chan, chan->drm->ntfy.handle); |
---|
1001 | | - } |
---|
1002 | | - |
---|
1003 | | - return ret; |
---|
1004 | | -} |
---|
1005 | | - |
---|
1006 | | -static inline uint32_t |
---|
1007 | | -nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, |
---|
1008 | | - struct nouveau_channel *chan, struct ttm_mem_reg *reg) |
---|
1009 | | -{ |
---|
1010 | | - if (reg->mem_type == TTM_PL_TT) |
---|
1011 | | - return NvDmaTT; |
---|
1012 | | - return chan->vram.handle; |
---|
1013 | | -} |
---|
1014 | | - |
---|
1015 | | -static int |
---|
1016 | | -nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
---|
1017 | | - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
---|
1018 | | -{ |
---|
1019 | | - u32 src_offset = old_reg->start << PAGE_SHIFT; |
---|
1020 | | - u32 dst_offset = new_reg->start << PAGE_SHIFT; |
---|
1021 | | - u32 page_count = new_reg->num_pages; |
---|
1022 | | - int ret; |
---|
1023 | | - |
---|
1024 | | - ret = RING_SPACE(chan, 3); |
---|
1025 | | - if (ret) |
---|
1026 | | - return ret; |
---|
1027 | | - |
---|
1028 | | - BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); |
---|
1029 | | - OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg)); |
---|
1030 | | - OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg)); |
---|
1031 | | - |
---|
1032 | | - page_count = new_reg->num_pages; |
---|
1033 | | - while (page_count) { |
---|
1034 | | - int line_count = (page_count > 2047) ? 2047 : page_count; |
---|
1035 | | - |
---|
1036 | | - ret = RING_SPACE(chan, 11); |
---|
1037 | | - if (ret) |
---|
1038 | | - return ret; |
---|
1039 | | - |
---|
1040 | | - BEGIN_NV04(chan, NvSubCopy, |
---|
1041 | | - NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); |
---|
1042 | | - OUT_RING (chan, src_offset); |
---|
1043 | | - OUT_RING (chan, dst_offset); |
---|
1044 | | - OUT_RING (chan, PAGE_SIZE); /* src_pitch */ |
---|
1045 | | - OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ |
---|
1046 | | - OUT_RING (chan, PAGE_SIZE); /* line_length */ |
---|
1047 | | - OUT_RING (chan, line_count); |
---|
1048 | | - OUT_RING (chan, 0x00000101); |
---|
1049 | | - OUT_RING (chan, 0x00000000); |
---|
1050 | | - BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); |
---|
1051 | | - OUT_RING (chan, 0); |
---|
1052 | | - |
---|
1053 | | - page_count -= line_count; |
---|
1054 | | - src_offset += (PAGE_SIZE * line_count); |
---|
1055 | | - dst_offset += (PAGE_SIZE * line_count); |
---|
1056 | | - } |
---|
1057 | | - |
---|
1058 | | - return 0; |
---|
1059 | | -} |
---|
1060 | | - |
---|
1061 | 767 | static int |
---|
1062 | 768 | nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, |
---|
1063 | | - struct ttm_mem_reg *reg) |
---|
| 769 | + struct ttm_resource *reg) |
---|
1064 | 770 | { |
---|
1065 | 771 | struct nouveau_mem *old_mem = nouveau_mem(&bo->mem); |
---|
1066 | 772 | struct nouveau_mem *new_mem = nouveau_mem(reg); |
---|
.. | .. |
---|
1092 | 798 | |
---|
1093 | 799 | static int |
---|
1094 | 800 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, |
---|
1095 | | - bool no_wait_gpu, struct ttm_mem_reg *new_reg) |
---|
| 801 | + bool no_wait_gpu, struct ttm_resource *new_reg) |
---|
1096 | 802 | { |
---|
1097 | 803 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
---|
1098 | 804 | struct nouveau_channel *chan = drm->ttm.chan; |
---|
.. | .. |
---|
1102 | 808 | |
---|
1103 | 809 | /* create temporary vmas for the transfer and attach them to the |
---|
1104 | 810 | * old nvkm_mem node, these will get cleaned up after ttm has |
---|
1105 | | - * destroyed the ttm_mem_reg |
---|
| 811 | + * destroyed the ttm_resource |
---|
1106 | 812 | */ |
---|
1107 | 813 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { |
---|
1108 | 814 | ret = nouveau_bo_move_prep(drm, bo, new_reg); |
---|
.. | .. |
---|
1119 | 825 | if (ret == 0) { |
---|
1120 | 826 | ret = ttm_bo_move_accel_cleanup(bo, |
---|
1121 | 827 | &fence->base, |
---|
1122 | | - evict, |
---|
| 828 | + evict, false, |
---|
1123 | 829 | new_reg); |
---|
1124 | 830 | nouveau_fence_unref(&fence); |
---|
1125 | 831 | } |
---|
.. | .. |
---|
1132 | 838 | void |
---|
1133 | 839 | nouveau_bo_move_init(struct nouveau_drm *drm) |
---|
1134 | 840 | { |
---|
1135 | | - static const struct { |
---|
| 841 | + static const struct _method_table { |
---|
1136 | 842 | const char *name; |
---|
1137 | 843 | int engine; |
---|
1138 | 844 | s32 oclass; |
---|
1139 | 845 | int (*exec)(struct nouveau_channel *, |
---|
1140 | 846 | struct ttm_buffer_object *, |
---|
1141 | | - struct ttm_mem_reg *, struct ttm_mem_reg *); |
---|
| 847 | + struct ttm_resource *, struct ttm_resource *); |
---|
1142 | 848 | int (*init)(struct nouveau_channel *, u32 handle); |
---|
1143 | 849 | } _methods[] = { |
---|
| 850 | + { "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init }, |
---|
| 851 | + { "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init }, |
---|
1144 | 852 | { "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init }, |
---|
1145 | 853 | { "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init }, |
---|
1146 | 854 | { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init }, |
---|
.. | .. |
---|
1159 | 867 | { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init }, |
---|
1160 | 868 | { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init }, |
---|
1161 | 869 | {}, |
---|
1162 | | - { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init }, |
---|
1163 | | - }, *mthd = _methods; |
---|
| 870 | + }; |
---|
| 871 | + const struct _method_table *mthd = _methods; |
---|
1164 | 872 | const char *name = "CPU"; |
---|
1165 | 873 | int ret; |
---|
1166 | 874 | |
---|
.. | .. |
---|
1174 | 882 | if (chan == NULL) |
---|
1175 | 883 | continue; |
---|
1176 | 884 | |
---|
1177 | | - ret = nvif_object_init(&chan->user, |
---|
| 885 | + ret = nvif_object_ctor(&chan->user, "ttmBoMove", |
---|
1178 | 886 | mthd->oclass | (mthd->engine << 16), |
---|
1179 | 887 | mthd->oclass, NULL, 0, |
---|
1180 | 888 | &drm->ttm.copy); |
---|
1181 | 889 | if (ret == 0) { |
---|
1182 | 890 | ret = mthd->init(chan, drm->ttm.copy.handle); |
---|
1183 | 891 | if (ret) { |
---|
1184 | | - nvif_object_fini(&drm->ttm.copy); |
---|
| 892 | + nvif_object_dtor(&drm->ttm.copy); |
---|
1185 | 893 | continue; |
---|
1186 | 894 | } |
---|
1187 | 895 | |
---|
.. | .. |
---|
1197 | 905 | |
---|
1198 | 906 | static int |
---|
1199 | 907 | nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, |
---|
1200 | | - bool no_wait_gpu, struct ttm_mem_reg *new_reg) |
---|
| 908 | + bool no_wait_gpu, struct ttm_resource *new_reg) |
---|
1201 | 909 | { |
---|
1202 | 910 | struct ttm_operation_ctx ctx = { intr, no_wait_gpu }; |
---|
1203 | 911 | struct ttm_place placement_memtype = { |
---|
1204 | 912 | .fpfn = 0, |
---|
1205 | 913 | .lpfn = 0, |
---|
1206 | | - .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING |
---|
| 914 | + .mem_type = TTM_PL_TT, |
---|
| 915 | + .flags = TTM_PL_MASK_CACHING |
---|
1207 | 916 | }; |
---|
1208 | 917 | struct ttm_placement placement; |
---|
1209 | | - struct ttm_mem_reg tmp_reg; |
---|
| 918 | + struct ttm_resource tmp_reg; |
---|
1210 | 919 | int ret; |
---|
1211 | 920 | |
---|
1212 | 921 | placement.num_placement = placement.num_busy_placement = 1; |
---|
.. | .. |
---|
1218 | 927 | if (ret) |
---|
1219 | 928 | return ret; |
---|
1220 | 929 | |
---|
1221 | | - ret = ttm_tt_bind(bo->ttm, &tmp_reg, &ctx); |
---|
| 930 | + ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx); |
---|
| 931 | + if (ret) |
---|
| 932 | + goto out; |
---|
| 933 | + |
---|
| 934 | + ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg); |
---|
1222 | 935 | if (ret) |
---|
1223 | 936 | goto out; |
---|
1224 | 937 | |
---|
.. | .. |
---|
1228 | 941 | |
---|
1229 | 942 | ret = ttm_bo_move_ttm(bo, &ctx, new_reg); |
---|
1230 | 943 | out: |
---|
1231 | | - ttm_bo_mem_put(bo, &tmp_reg); |
---|
| 944 | + ttm_resource_free(bo, &tmp_reg); |
---|
1232 | 945 | return ret; |
---|
1233 | 946 | } |
---|
1234 | 947 | |
---|
1235 | 948 | static int |
---|
1236 | 949 | nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, |
---|
1237 | | - bool no_wait_gpu, struct ttm_mem_reg *new_reg) |
---|
| 950 | + bool no_wait_gpu, struct ttm_resource *new_reg) |
---|
1238 | 951 | { |
---|
1239 | 952 | struct ttm_operation_ctx ctx = { intr, no_wait_gpu }; |
---|
1240 | 953 | struct ttm_place placement_memtype = { |
---|
1241 | 954 | .fpfn = 0, |
---|
1242 | 955 | .lpfn = 0, |
---|
1243 | | - .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING |
---|
| 956 | + .mem_type = TTM_PL_TT, |
---|
| 957 | + .flags = TTM_PL_MASK_CACHING |
---|
1244 | 958 | }; |
---|
1245 | 959 | struct ttm_placement placement; |
---|
1246 | | - struct ttm_mem_reg tmp_reg; |
---|
| 960 | + struct ttm_resource tmp_reg; |
---|
1247 | 961 | int ret; |
---|
1248 | 962 | |
---|
1249 | 963 | placement.num_placement = placement.num_busy_placement = 1; |
---|
.. | .. |
---|
1264 | 978 | goto out; |
---|
1265 | 979 | |
---|
1266 | 980 | out: |
---|
1267 | | - ttm_bo_mem_put(bo, &tmp_reg); |
---|
| 981 | + ttm_resource_free(bo, &tmp_reg); |
---|
1268 | 982 | return ret; |
---|
1269 | 983 | } |
---|
1270 | 984 | |
---|
1271 | 985 | static void |
---|
1272 | 986 | nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, |
---|
1273 | | - struct ttm_mem_reg *new_reg) |
---|
| 987 | + struct ttm_resource *new_reg) |
---|
1274 | 988 | { |
---|
1275 | 989 | struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL; |
---|
1276 | 990 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
---|
.. | .. |
---|
1279 | 993 | /* ttm can now (stupidly) pass the driver bos it didn't create... */ |
---|
1280 | 994 | if (bo->destroy != nouveau_bo_del_ttm) |
---|
1281 | 995 | return; |
---|
| 996 | + |
---|
| 997 | + nouveau_bo_del_io_reserve_lru(bo); |
---|
1282 | 998 | |
---|
1283 | 999 | if (mem && new_reg->mem_type != TTM_PL_SYSTEM && |
---|
1284 | 1000 | mem->mem.page == nvbo->page) { |
---|
.. | .. |
---|
1291 | 1007 | nouveau_vma_unmap(vma); |
---|
1292 | 1008 | } |
---|
1293 | 1009 | } |
---|
| 1010 | + |
---|
| 1011 | + if (new_reg) { |
---|
| 1012 | + if (new_reg->mm_node) |
---|
| 1013 | + nvbo->offset = (new_reg->start << PAGE_SHIFT); |
---|
| 1014 | + else |
---|
| 1015 | + nvbo->offset = 0; |
---|
| 1016 | + } |
---|
| 1017 | + |
---|
1294 | 1018 | } |
---|
1295 | 1019 | |
---|
1296 | 1020 | static int |
---|
1297 | | -nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg, |
---|
| 1021 | +nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg, |
---|
1298 | 1022 | struct nouveau_drm_tile **new_tile) |
---|
1299 | 1023 | { |
---|
1300 | 1024 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
---|
.. | .. |
---|
1321 | 1045 | { |
---|
1322 | 1046 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
---|
1323 | 1047 | struct drm_device *dev = drm->dev; |
---|
1324 | | - struct dma_fence *fence = reservation_object_get_excl(bo->resv); |
---|
| 1048 | + struct dma_fence *fence = dma_resv_get_excl(bo->base.resv); |
---|
1325 | 1049 | |
---|
1326 | 1050 | nv10_bo_put_tile_region(dev, *old_tile, fence); |
---|
1327 | 1051 | *old_tile = new_tile; |
---|
.. | .. |
---|
1330 | 1054 | static int |
---|
1331 | 1055 | nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, |
---|
1332 | 1056 | struct ttm_operation_ctx *ctx, |
---|
1333 | | - struct ttm_mem_reg *new_reg) |
---|
| 1057 | + struct ttm_resource *new_reg) |
---|
1334 | 1058 | { |
---|
1335 | 1059 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
---|
1336 | 1060 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
---|
1337 | | - struct ttm_mem_reg *old_reg = &bo->mem; |
---|
| 1061 | + struct ttm_resource *old_reg = &bo->mem; |
---|
1338 | 1062 | struct nouveau_drm_tile *new_tile = NULL; |
---|
1339 | 1063 | int ret = 0; |
---|
1340 | 1064 | |
---|
.. | .. |
---|
1353 | 1077 | |
---|
1354 | 1078 | /* Fake bo copy. */ |
---|
1355 | 1079 | if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) { |
---|
1356 | | - BUG_ON(bo->mem.mm_node != NULL); |
---|
1357 | | - bo->mem = *new_reg; |
---|
1358 | | - new_reg->mm_node = NULL; |
---|
| 1080 | + ttm_bo_move_null(bo, new_reg); |
---|
1359 | 1081 | goto out; |
---|
1360 | 1082 | } |
---|
1361 | 1083 | |
---|
.. | .. |
---|
1398 | 1120 | { |
---|
1399 | 1121 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
---|
1400 | 1122 | |
---|
1401 | | - return drm_vma_node_verify_access(&nvbo->gem.vma_node, |
---|
| 1123 | + return drm_vma_node_verify_access(&nvbo->bo.base.vma_node, |
---|
1402 | 1124 | filp->private_data); |
---|
1403 | 1125 | } |
---|
1404 | 1126 | |
---|
1405 | | -static int |
---|
1406 | | -nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) |
---|
| 1127 | +static void |
---|
| 1128 | +nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm, |
---|
| 1129 | + struct ttm_resource *reg) |
---|
1407 | 1130 | { |
---|
1408 | | - struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type]; |
---|
| 1131 | + struct nouveau_mem *mem = nouveau_mem(reg); |
---|
| 1132 | + |
---|
| 1133 | + if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { |
---|
| 1134 | + switch (reg->mem_type) { |
---|
| 1135 | + case TTM_PL_TT: |
---|
| 1136 | + if (mem->kind) |
---|
| 1137 | + nvif_object_unmap_handle(&mem->mem.object); |
---|
| 1138 | + break; |
---|
| 1139 | + case TTM_PL_VRAM: |
---|
| 1140 | + nvif_object_unmap_handle(&mem->mem.object); |
---|
| 1141 | + break; |
---|
| 1142 | + default: |
---|
| 1143 | + break; |
---|
| 1144 | + } |
---|
| 1145 | + } |
---|
| 1146 | +} |
---|
| 1147 | + |
---|
| 1148 | +static int |
---|
| 1149 | +nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) |
---|
| 1150 | +{ |
---|
1409 | 1151 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
---|
1410 | 1152 | struct nvkm_device *device = nvxx_device(&drm->client.device); |
---|
1411 | 1153 | struct nouveau_mem *mem = nouveau_mem(reg); |
---|
| 1154 | + int ret; |
---|
1412 | 1155 | |
---|
1413 | | - reg->bus.addr = NULL; |
---|
1414 | | - reg->bus.offset = 0; |
---|
1415 | | - reg->bus.size = reg->num_pages << PAGE_SHIFT; |
---|
1416 | | - reg->bus.base = 0; |
---|
1417 | | - reg->bus.is_iomem = false; |
---|
1418 | | - if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) |
---|
1419 | | - return -EINVAL; |
---|
| 1156 | + mutex_lock(&drm->ttm.io_reserve_mutex); |
---|
| 1157 | +retry: |
---|
1420 | 1158 | switch (reg->mem_type) { |
---|
1421 | 1159 | case TTM_PL_SYSTEM: |
---|
1422 | 1160 | /* System memory */ |
---|
1423 | | - return 0; |
---|
| 1161 | + ret = 0; |
---|
| 1162 | + goto out; |
---|
1424 | 1163 | case TTM_PL_TT: |
---|
1425 | 1164 | #if IS_ENABLED(CONFIG_AGP) |
---|
1426 | 1165 | if (drm->agp.bridge) { |
---|
1427 | | - reg->bus.offset = reg->start << PAGE_SHIFT; |
---|
1428 | | - reg->bus.base = drm->agp.base; |
---|
| 1166 | + reg->bus.offset = (reg->start << PAGE_SHIFT) + |
---|
| 1167 | + drm->agp.base; |
---|
1429 | 1168 | reg->bus.is_iomem = !drm->agp.cma; |
---|
1430 | 1169 | } |
---|
1431 | 1170 | #endif |
---|
1432 | | - if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind) |
---|
| 1171 | + if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || |
---|
| 1172 | + !mem->kind) { |
---|
1433 | 1173 | /* untiled */ |
---|
| 1174 | + ret = 0; |
---|
1434 | 1175 | break; |
---|
1435 | | - /* fallthrough, tiled memory */ |
---|
| 1176 | + } |
---|
| 1177 | + fallthrough; /* tiled memory */ |
---|
1436 | 1178 | case TTM_PL_VRAM: |
---|
1437 | | - reg->bus.offset = reg->start << PAGE_SHIFT; |
---|
1438 | | - reg->bus.base = device->func->resource_addr(device, 1); |
---|
| 1179 | + reg->bus.offset = (reg->start << PAGE_SHIFT) + |
---|
| 1180 | + device->func->resource_addr(device, 1); |
---|
1439 | 1181 | reg->bus.is_iomem = true; |
---|
1440 | 1182 | if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { |
---|
1441 | 1183 | union { |
---|
.. | .. |
---|
1444 | 1186 | } args; |
---|
1445 | 1187 | u64 handle, length; |
---|
1446 | 1188 | u32 argc = 0; |
---|
1447 | | - int ret; |
---|
1448 | 1189 | |
---|
1449 | 1190 | switch (mem->mem.object.oclass) { |
---|
1450 | 1191 | case NVIF_CLASS_MEM_NV50: |
---|
.. | .. |
---|
1468 | 1209 | ret = nvif_object_map_handle(&mem->mem.object, |
---|
1469 | 1210 | &args, argc, |
---|
1470 | 1211 | &handle, &length); |
---|
1471 | | - if (ret != 1) |
---|
1472 | | - return ret ? ret : -EINVAL; |
---|
| 1212 | + if (ret != 1) { |
---|
| 1213 | + if (WARN_ON(ret == 0)) |
---|
| 1214 | + ret = -EINVAL; |
---|
| 1215 | + goto out; |
---|
| 1216 | + } |
---|
1473 | 1217 | |
---|
1474 | | - reg->bus.base = 0; |
---|
1475 | 1218 | reg->bus.offset = handle; |
---|
1476 | 1219 | } |
---|
| 1220 | + ret = 0; |
---|
1477 | 1221 | break; |
---|
1478 | 1222 | default: |
---|
1479 | | - return -EINVAL; |
---|
| 1223 | + ret = -EINVAL; |
---|
1480 | 1224 | } |
---|
1481 | | - return 0; |
---|
| 1225 | + |
---|
| 1226 | +out: |
---|
| 1227 | + if (ret == -ENOSPC) { |
---|
| 1228 | + struct nouveau_bo *nvbo; |
---|
| 1229 | + |
---|
| 1230 | + nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru, |
---|
| 1231 | + typeof(*nvbo), |
---|
| 1232 | + io_reserve_lru); |
---|
| 1233 | + if (nvbo) { |
---|
| 1234 | + list_del_init(&nvbo->io_reserve_lru); |
---|
| 1235 | + drm_vma_node_unmap(&nvbo->bo.base.vma_node, |
---|
| 1236 | + bdev->dev_mapping); |
---|
| 1237 | + nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem); |
---|
| 1238 | + goto retry; |
---|
| 1239 | + } |
---|
| 1240 | + |
---|
| 1241 | + } |
---|
| 1242 | + mutex_unlock(&drm->ttm.io_reserve_mutex); |
---|
| 1243 | + return ret; |
---|
1482 | 1244 | } |
---|
1483 | 1245 | |
---|
1484 | 1246 | static void |
---|
1485 | | -nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) |
---|
| 1247 | +nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg) |
---|
1486 | 1248 | { |
---|
1487 | 1249 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
---|
1488 | | - struct nouveau_mem *mem = nouveau_mem(reg); |
---|
1489 | 1250 | |
---|
1490 | | - if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { |
---|
1491 | | - switch (reg->mem_type) { |
---|
1492 | | - case TTM_PL_TT: |
---|
1493 | | - if (mem->kind) |
---|
1494 | | - nvif_object_unmap_handle(&mem->mem.object); |
---|
1495 | | - break; |
---|
1496 | | - case TTM_PL_VRAM: |
---|
1497 | | - nvif_object_unmap_handle(&mem->mem.object); |
---|
1498 | | - break; |
---|
1499 | | - default: |
---|
1500 | | - break; |
---|
1501 | | - } |
---|
1502 | | - } |
---|
| 1251 | + mutex_lock(&drm->ttm.io_reserve_mutex); |
---|
| 1252 | + nouveau_ttm_io_mem_free_locked(drm, reg); |
---|
| 1253 | + mutex_unlock(&drm->ttm.io_reserve_mutex); |
---|
1503 | 1254 | } |
---|
1504 | 1255 | |
---|
1505 | 1256 | static int |
---|
.. | .. |
---|
1520 | 1271 | return 0; |
---|
1521 | 1272 | |
---|
1522 | 1273 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { |
---|
1523 | | - nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0); |
---|
| 1274 | + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, |
---|
| 1275 | + 0); |
---|
1524 | 1276 | |
---|
1525 | 1277 | ret = nouveau_bo_validate(nvbo, false, false); |
---|
1526 | 1278 | if (ret) |
---|
.. | .. |
---|
1544 | 1296 | nvbo->busy_placements[i].lpfn = mappable; |
---|
1545 | 1297 | } |
---|
1546 | 1298 | |
---|
1547 | | - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); |
---|
| 1299 | + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0); |
---|
1548 | 1300 | return nouveau_bo_validate(nvbo, false, false); |
---|
1549 | 1301 | } |
---|
1550 | 1302 | |
---|
1551 | 1303 | static int |
---|
1552 | | -nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) |
---|
| 1304 | +nouveau_ttm_tt_populate(struct ttm_bo_device *bdev, |
---|
| 1305 | + struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) |
---|
1553 | 1306 | { |
---|
1554 | 1307 | struct ttm_dma_tt *ttm_dma = (void *)ttm; |
---|
1555 | 1308 | struct nouveau_drm *drm; |
---|
1556 | 1309 | struct device *dev; |
---|
1557 | | - unsigned i; |
---|
1558 | | - int r; |
---|
1559 | 1310 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
---|
1560 | 1311 | |
---|
1561 | | - if (ttm->state != tt_unpopulated) |
---|
| 1312 | + if (ttm_tt_is_populated(ttm)) |
---|
1562 | 1313 | return 0; |
---|
1563 | 1314 | |
---|
1564 | 1315 | if (slave && ttm->sg) { |
---|
1565 | 1316 | /* make userspace faulting work */ |
---|
1566 | 1317 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, |
---|
1567 | 1318 | ttm_dma->dma_address, ttm->num_pages); |
---|
1568 | | - ttm->state = tt_unbound; |
---|
| 1319 | + ttm_tt_set_populated(ttm); |
---|
1569 | 1320 | return 0; |
---|
1570 | 1321 | } |
---|
1571 | 1322 | |
---|
1572 | | - drm = nouveau_bdev(ttm->bdev); |
---|
| 1323 | + drm = nouveau_bdev(bdev); |
---|
1573 | 1324 | dev = drm->dev->dev; |
---|
1574 | 1325 | |
---|
1575 | 1326 | #if IS_ENABLED(CONFIG_AGP) |
---|
1576 | 1327 | if (drm->agp.bridge) { |
---|
1577 | | - return ttm_agp_tt_populate(ttm, ctx); |
---|
| 1328 | + return ttm_pool_populate(ttm, ctx); |
---|
1578 | 1329 | } |
---|
1579 | 1330 | #endif |
---|
1580 | 1331 | |
---|
.. | .. |
---|
1583 | 1334 | return ttm_dma_populate((void *)ttm, dev, ctx); |
---|
1584 | 1335 | } |
---|
1585 | 1336 | #endif |
---|
1586 | | - |
---|
1587 | | - r = ttm_pool_populate(ttm, ctx); |
---|
1588 | | - if (r) { |
---|
1589 | | - return r; |
---|
1590 | | - } |
---|
1591 | | - |
---|
1592 | | - for (i = 0; i < ttm->num_pages; i++) { |
---|
1593 | | - dma_addr_t addr; |
---|
1594 | | - |
---|
1595 | | - addr = dma_map_page(dev, ttm->pages[i], 0, PAGE_SIZE, |
---|
1596 | | - DMA_BIDIRECTIONAL); |
---|
1597 | | - |
---|
1598 | | - if (dma_mapping_error(dev, addr)) { |
---|
1599 | | - while (i--) { |
---|
1600 | | - dma_unmap_page(dev, ttm_dma->dma_address[i], |
---|
1601 | | - PAGE_SIZE, DMA_BIDIRECTIONAL); |
---|
1602 | | - ttm_dma->dma_address[i] = 0; |
---|
1603 | | - } |
---|
1604 | | - ttm_pool_unpopulate(ttm); |
---|
1605 | | - return -EFAULT; |
---|
1606 | | - } |
---|
1607 | | - |
---|
1608 | | - ttm_dma->dma_address[i] = addr; |
---|
1609 | | - } |
---|
1610 | | - return 0; |
---|
| 1337 | + return ttm_populate_and_map_pages(dev, ttm_dma, ctx); |
---|
1611 | 1338 | } |
---|
1612 | 1339 | |
---|
1613 | 1340 | static void |
---|
1614 | | -nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) |
---|
| 1341 | +nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev, |
---|
| 1342 | + struct ttm_tt *ttm) |
---|
1615 | 1343 | { |
---|
1616 | 1344 | struct ttm_dma_tt *ttm_dma = (void *)ttm; |
---|
1617 | 1345 | struct nouveau_drm *drm; |
---|
1618 | 1346 | struct device *dev; |
---|
1619 | | - unsigned i; |
---|
1620 | 1347 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
---|
1621 | 1348 | |
---|
1622 | 1349 | if (slave) |
---|
1623 | 1350 | return; |
---|
1624 | 1351 | |
---|
1625 | | - drm = nouveau_bdev(ttm->bdev); |
---|
| 1352 | + drm = nouveau_bdev(bdev); |
---|
1626 | 1353 | dev = drm->dev->dev; |
---|
1627 | 1354 | |
---|
1628 | 1355 | #if IS_ENABLED(CONFIG_AGP) |
---|
1629 | 1356 | if (drm->agp.bridge) { |
---|
1630 | | - ttm_agp_tt_unpopulate(ttm); |
---|
| 1357 | + ttm_pool_unpopulate(ttm); |
---|
1631 | 1358 | return; |
---|
1632 | 1359 | } |
---|
1633 | 1360 | #endif |
---|
.. | .. |
---|
1639 | 1366 | } |
---|
1640 | 1367 | #endif |
---|
1641 | 1368 | |
---|
1642 | | - for (i = 0; i < ttm->num_pages; i++) { |
---|
1643 | | - if (ttm_dma->dma_address[i]) { |
---|
1644 | | - dma_unmap_page(dev, ttm_dma->dma_address[i], PAGE_SIZE, |
---|
1645 | | - DMA_BIDIRECTIONAL); |
---|
1646 | | - } |
---|
1647 | | - } |
---|
| 1369 | + ttm_unmap_and_unpopulate_pages(dev, ttm_dma); |
---|
| 1370 | +} |
---|
1648 | 1371 | |
---|
1649 | | - ttm_pool_unpopulate(ttm); |
---|
| 1372 | +static void |
---|
| 1373 | +nouveau_ttm_tt_destroy(struct ttm_bo_device *bdev, |
---|
| 1374 | + struct ttm_tt *ttm) |
---|
| 1375 | +{ |
---|
| 1376 | +#if IS_ENABLED(CONFIG_AGP) |
---|
| 1377 | + struct nouveau_drm *drm = nouveau_bdev(bdev); |
---|
| 1378 | + if (drm->agp.bridge) { |
---|
| 1379 | + ttm_agp_unbind(ttm); |
---|
| 1380 | + ttm_tt_destroy_common(bdev, ttm); |
---|
| 1381 | + ttm_agp_destroy(ttm); |
---|
| 1382 | + return; |
---|
| 1383 | + } |
---|
| 1384 | +#endif |
---|
| 1385 | + nouveau_sgdma_destroy(bdev, ttm); |
---|
1650 | 1386 | } |
---|
1651 | 1387 | |
---|
1652 | 1388 | void |
---|
1653 | 1389 | nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive) |
---|
1654 | 1390 | { |
---|
1655 | | - struct reservation_object *resv = nvbo->bo.resv; |
---|
| 1391 | + struct dma_resv *resv = nvbo->bo.base.resv; |
---|
1656 | 1392 | |
---|
1657 | 1393 | if (exclusive) |
---|
1658 | | - reservation_object_add_excl_fence(resv, &fence->base); |
---|
| 1394 | + dma_resv_add_excl_fence(resv, &fence->base); |
---|
1659 | 1395 | else if (fence) |
---|
1660 | | - reservation_object_add_shared_fence(resv, &fence->base); |
---|
| 1396 | + dma_resv_add_shared_fence(resv, &fence->base); |
---|
1661 | 1397 | } |
---|
1662 | 1398 | |
---|
1663 | 1399 | struct ttm_bo_driver nouveau_bo_driver = { |
---|
1664 | 1400 | .ttm_tt_create = &nouveau_ttm_tt_create, |
---|
1665 | 1401 | .ttm_tt_populate = &nouveau_ttm_tt_populate, |
---|
1666 | 1402 | .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, |
---|
1667 | | - .invalidate_caches = nouveau_bo_invalidate_caches, |
---|
1668 | | - .init_mem_type = nouveau_bo_init_mem_type, |
---|
| 1403 | + .ttm_tt_bind = &nouveau_ttm_tt_bind, |
---|
| 1404 | + .ttm_tt_unbind = &nouveau_ttm_tt_unbind, |
---|
| 1405 | + .ttm_tt_destroy = &nouveau_ttm_tt_destroy, |
---|
1669 | 1406 | .eviction_valuable = ttm_bo_eviction_valuable, |
---|
1670 | 1407 | .evict_flags = nouveau_bo_evict_flags, |
---|
1671 | 1408 | .move_notify = nouveau_bo_move_ntfy, |
---|