| .. | .. |
|---|
| 31 | 31 | #include <linux/swiotlb.h> |
|---|
| 32 | 32 | |
|---|
| 33 | 33 | #include "nouveau_drv.h" |
|---|
| 34 | | -#include "nouveau_dma.h" |
|---|
| 34 | +#include "nouveau_chan.h" |
|---|
| 35 | 35 | #include "nouveau_fence.h" |
|---|
| 36 | 36 | |
|---|
| 37 | 37 | #include "nouveau_bo.h" |
|---|
| .. | .. |
|---|
| 43 | 43 | #include <nvif/class.h> |
|---|
| 44 | 44 | #include <nvif/if500b.h> |
|---|
| 45 | 45 | #include <nvif/if900b.h> |
|---|
| 46 | + |
|---|
| 47 | +static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, |
|---|
| 48 | + struct ttm_resource *reg); |
|---|
| 46 | 49 | |
|---|
| 47 | 50 | /* |
|---|
| 48 | 51 | * NV10-NV40 tiling helpers |
|---|
| .. | .. |
|---|
| 136 | 139 | struct drm_device *dev = drm->dev; |
|---|
| 137 | 140 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
|---|
| 138 | 141 | |
|---|
| 139 | | - if (unlikely(nvbo->gem.filp)) |
|---|
| 140 | | - DRM_ERROR("bo %p still attached to GEM object\n", bo); |
|---|
| 141 | 142 | WARN_ON(nvbo->pin_refcnt > 0); |
|---|
| 143 | + nouveau_bo_del_io_reserve_lru(bo); |
|---|
| 142 | 144 | nv10_bo_put_tile_region(dev, nvbo->tile, NULL); |
|---|
| 145 | + |
|---|
| 146 | + /* |
|---|
| 147 | + * If nouveau_bo_new() allocated this buffer, the GEM object was never |
|---|
| 148 | + * initialized, so don't attempt to release it. |
|---|
| 149 | + */ |
|---|
| 150 | + if (bo->base.dev) |
|---|
| 151 | + drm_gem_object_release(&bo->base); |
|---|
| 152 | + |
|---|
| 143 | 153 | kfree(nvbo); |
|---|
| 144 | 154 | } |
|---|
| 145 | 155 | |
|---|
| .. | .. |
|---|
| 152 | 162 | } |
|---|
| 153 | 163 | |
|---|
| 154 | 164 | static void |
|---|
| 155 | | -nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, |
|---|
| 156 | | - int *align, u64 *size) |
|---|
| 165 | +nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size) |
|---|
| 157 | 166 | { |
|---|
| 158 | 167 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
|---|
| 159 | 168 | struct nvif_device *device = &drm->client.device; |
|---|
| .. | .. |
|---|
| 185 | 194 | *size = roundup_64(*size, PAGE_SIZE); |
|---|
| 186 | 195 | } |
|---|
| 187 | 196 | |
|---|
| 188 | | -int |
|---|
| 189 | | -nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, |
|---|
| 190 | | - uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, |
|---|
| 191 | | - struct sg_table *sg, struct reservation_object *robj, |
|---|
| 192 | | - struct nouveau_bo **pnvbo) |
|---|
| 197 | +struct nouveau_bo * |
|---|
| 198 | +nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain, |
|---|
| 199 | + u32 tile_mode, u32 tile_flags) |
|---|
| 193 | 200 | { |
|---|
| 194 | 201 | struct nouveau_drm *drm = cli->drm; |
|---|
| 195 | 202 | struct nouveau_bo *nvbo; |
|---|
| 196 | 203 | struct nvif_mmu *mmu = &cli->mmu; |
|---|
| 197 | | - struct nvif_vmm *vmm = &cli->vmm.vmm; |
|---|
| 198 | | - size_t acc_size; |
|---|
| 199 | | - int type = ttm_bo_type_device; |
|---|
| 200 | | - int ret, i, pi = -1; |
|---|
| 204 | + struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm; |
|---|
| 205 | + int i, pi = -1; |
|---|
| 201 | 206 | |
|---|
| 202 | | - if (!size) { |
|---|
| 203 | | - NV_WARN(drm, "skipped size %016llx\n", size); |
|---|
| 204 | | - return -EINVAL; |
|---|
| 207 | + if (!*size) { |
|---|
| 208 | + NV_WARN(drm, "skipped size %016llx\n", *size); |
|---|
| 209 | + return ERR_PTR(-EINVAL); |
|---|
| 205 | 210 | } |
|---|
| 206 | | - |
|---|
| 207 | | - if (sg) |
|---|
| 208 | | - type = ttm_bo_type_sg; |
|---|
| 209 | 211 | |
|---|
| 210 | 212 | nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); |
|---|
| 211 | 213 | if (!nvbo) |
|---|
| 212 | | - return -ENOMEM; |
|---|
| 214 | + return ERR_PTR(-ENOMEM); |
|---|
| 213 | 215 | INIT_LIST_HEAD(&nvbo->head); |
|---|
| 214 | 216 | INIT_LIST_HEAD(&nvbo->entry); |
|---|
| 215 | 217 | INIT_LIST_HEAD(&nvbo->vma_list); |
|---|
| .. | .. |
|---|
| 219 | 221 | * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated |
|---|
| 220 | 222 | * into in nouveau_gem_new(). |
|---|
| 221 | 223 | */ |
|---|
| 222 | | - if (flags & TTM_PL_FLAG_UNCACHED) { |
|---|
| 224 | + if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) { |
|---|
| 223 | 225 | /* Determine if we can get a cache-coherent map, forcing |
|---|
| 224 | 226 | * uncached mapping if we can't. |
|---|
| 225 | 227 | */ |
|---|
| .. | .. |
|---|
| 231 | 233 | nvbo->kind = (tile_flags & 0x0000ff00) >> 8; |
|---|
| 232 | 234 | if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { |
|---|
| 233 | 235 | kfree(nvbo); |
|---|
| 234 | | - return -EINVAL; |
|---|
| 236 | + return ERR_PTR(-EINVAL); |
|---|
| 235 | 237 | } |
|---|
| 236 | 238 | |
|---|
| 237 | 239 | nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; |
|---|
| .. | .. |
|---|
| 241 | 243 | nvbo->comp = (tile_flags & 0x00030000) >> 16; |
|---|
| 242 | 244 | if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { |
|---|
| 243 | 245 | kfree(nvbo); |
|---|
| 244 | | - return -EINVAL; |
|---|
| 246 | + return ERR_PTR(-EINVAL); |
|---|
| 245 | 247 | } |
|---|
| 246 | 248 | } else { |
|---|
| 247 | 249 | nvbo->zeta = (tile_flags & 0x00000007); |
|---|
| .. | .. |
|---|
| 259 | 261 | * Skip page sizes that can't support needed domains. |
|---|
| 260 | 262 | */ |
|---|
| 261 | 263 | if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && |
|---|
| 262 | | - (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram) |
|---|
| 264 | + (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram) |
|---|
| 263 | 265 | continue; |
|---|
| 264 | | - if ((flags & TTM_PL_FLAG_TT) && |
|---|
| 266 | + if ((domain & NOUVEAU_GEM_DOMAIN_GART) && |
|---|
| 265 | 267 | (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) |
|---|
| 266 | 268 | continue; |
|---|
| 267 | 269 | |
|---|
| .. | .. |
|---|
| 273 | 275 | pi = i; |
|---|
| 274 | 276 | |
|---|
| 275 | 277 | /* Stop once the buffer is larger than the current page size. */ |
|---|
| 276 | | - if (size >= 1ULL << vmm->page[i].shift) |
|---|
| 278 | + if (*size >= 1ULL << vmm->page[i].shift) |
|---|
| 277 | 279 | break; |
|---|
| 278 | 280 | } |
|---|
| 279 | 281 | |
|---|
| 280 | | - if (WARN_ON(pi < 0)) |
|---|
| 281 | | - return -EINVAL; |
|---|
| 282 | + if (WARN_ON(pi < 0)) { |
|---|
| 283 | + kfree(nvbo); |
|---|
| 284 | + return ERR_PTR(-EINVAL); |
|---|
| 285 | + } |
|---|
| 282 | 286 | |
|---|
| 283 | 287 | /* Disable compression if suitable settings couldn't be found. */ |
|---|
| 284 | 288 | if (nvbo->comp && !vmm->page[pi].comp) { |
|---|
| .. | .. |
|---|
| 288 | 292 | } |
|---|
| 289 | 293 | nvbo->page = vmm->page[pi].shift; |
|---|
| 290 | 294 | |
|---|
| 291 | | - nouveau_bo_fixup_align(nvbo, flags, &align, &size); |
|---|
| 295 | + nouveau_bo_fixup_align(nvbo, align, size); |
|---|
| 296 | + |
|---|
| 297 | + return nvbo; |
|---|
| 298 | +} |
|---|
| 299 | + |
|---|
| 300 | +int |
|---|
| 301 | +nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain, |
|---|
| 302 | + struct sg_table *sg, struct dma_resv *robj) |
|---|
| 303 | +{ |
|---|
| 304 | + int type = sg ? ttm_bo_type_sg : ttm_bo_type_device; |
|---|
| 305 | + size_t acc_size; |
|---|
| 306 | + int ret; |
|---|
| 307 | + |
|---|
| 308 | + acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo)); |
|---|
| 309 | + |
|---|
| 292 | 310 | nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; |
|---|
| 293 | | - nouveau_bo_placement_set(nvbo, flags, 0); |
|---|
| 311 | + nouveau_bo_placement_set(nvbo, domain, 0); |
|---|
| 312 | + INIT_LIST_HEAD(&nvbo->io_reserve_lru); |
|---|
| 294 | 313 | |
|---|
| 295 | | - acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size, |
|---|
| 296 | | - sizeof(struct nouveau_bo)); |
|---|
| 297 | | - |
|---|
| 298 | | - ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, |
|---|
| 299 | | - type, &nvbo->placement, |
|---|
| 300 | | - align >> PAGE_SHIFT, false, acc_size, sg, |
|---|
| 301 | | - robj, nouveau_bo_del_ttm); |
|---|
| 314 | + ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type, |
|---|
| 315 | + &nvbo->placement, align >> PAGE_SHIFT, false, |
|---|
| 316 | + acc_size, sg, robj, nouveau_bo_del_ttm); |
|---|
| 302 | 317 | if (ret) { |
|---|
| 303 | 318 | /* ttm will call nouveau_bo_del_ttm if it fails.. */ |
|---|
| 304 | 319 | return ret; |
|---|
| 305 | 320 | } |
|---|
| 321 | + |
|---|
| 322 | + return 0; |
|---|
| 323 | +} |
|---|
| 324 | + |
|---|
| 325 | +int |
|---|
| 326 | +nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, |
|---|
| 327 | + uint32_t domain, uint32_t tile_mode, uint32_t tile_flags, |
|---|
| 328 | + struct sg_table *sg, struct dma_resv *robj, |
|---|
| 329 | + struct nouveau_bo **pnvbo) |
|---|
| 330 | +{ |
|---|
| 331 | + struct nouveau_bo *nvbo; |
|---|
| 332 | + int ret; |
|---|
| 333 | + |
|---|
| 334 | + nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode, |
|---|
| 335 | + tile_flags); |
|---|
| 336 | + if (IS_ERR(nvbo)) |
|---|
| 337 | + return PTR_ERR(nvbo); |
|---|
| 338 | + |
|---|
| 339 | + ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj); |
|---|
| 340 | + if (ret) |
|---|
| 341 | + return ret; |
|---|
| 306 | 342 | |
|---|
| 307 | 343 | *pnvbo = nvbo; |
|---|
| 308 | 344 | return 0; |
|---|
| 309 | 345 | } |
|---|
| 310 | 346 | |
|---|
| 311 | 347 | static void |
|---|
| 312 | | -set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags) |
|---|
| 348 | +set_placement_list(struct nouveau_drm *drm, struct ttm_place *pl, unsigned *n, |
|---|
| 349 | + uint32_t domain, uint32_t flags) |
|---|
| 313 | 350 | { |
|---|
| 314 | 351 | *n = 0; |
|---|
| 315 | 352 | |
|---|
| 316 | | - if (type & TTM_PL_FLAG_VRAM) |
|---|
| 317 | | - pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags; |
|---|
| 318 | | - if (type & TTM_PL_FLAG_TT) |
|---|
| 319 | | - pl[(*n)++].flags = TTM_PL_FLAG_TT | flags; |
|---|
| 320 | | - if (type & TTM_PL_FLAG_SYSTEM) |
|---|
| 321 | | - pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags; |
|---|
| 353 | + if (domain & NOUVEAU_GEM_DOMAIN_VRAM) { |
|---|
| 354 | + struct nvif_mmu *mmu = &drm->client.mmu; |
|---|
| 355 | + |
|---|
| 356 | + pl[*n].mem_type = TTM_PL_VRAM; |
|---|
| 357 | + pl[*n].flags = flags & ~TTM_PL_FLAG_CACHED; |
|---|
| 358 | + |
|---|
| 359 | + /* Some BARs do not support being ioremapped WC */ |
|---|
| 360 | + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && |
|---|
| 361 | + mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED) |
|---|
| 362 | + pl[*n].flags &= ~TTM_PL_FLAG_WC; |
|---|
| 363 | + |
|---|
| 364 | + (*n)++; |
|---|
| 365 | + } |
|---|
| 366 | + if (domain & NOUVEAU_GEM_DOMAIN_GART) { |
|---|
| 367 | + pl[*n].mem_type = TTM_PL_TT; |
|---|
| 368 | + pl[*n].flags = flags; |
|---|
| 369 | + |
|---|
| 370 | + if (drm->agp.bridge) |
|---|
| 371 | + pl[*n].flags &= ~TTM_PL_FLAG_CACHED; |
|---|
| 372 | + |
|---|
| 373 | + (*n)++; |
|---|
| 374 | + } |
|---|
| 375 | + if (domain & NOUVEAU_GEM_DOMAIN_CPU) { |
|---|
| 376 | + pl[*n].mem_type = TTM_PL_SYSTEM; |
|---|
| 377 | + pl[(*n)++].flags = flags; |
|---|
| 378 | + } |
|---|
| 322 | 379 | } |
|---|
| 323 | 380 | |
|---|
| 324 | 381 | static void |
|---|
| 325 | | -set_placement_range(struct nouveau_bo *nvbo, uint32_t type) |
|---|
| 382 | +set_placement_range(struct nouveau_bo *nvbo, uint32_t domain) |
|---|
| 326 | 383 | { |
|---|
| 327 | 384 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
|---|
| 328 | 385 | u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT; |
|---|
| 329 | 386 | unsigned i, fpfn, lpfn; |
|---|
| 330 | 387 | |
|---|
| 331 | 388 | if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && |
|---|
| 332 | | - nvbo->mode && (type & TTM_PL_FLAG_VRAM) && |
|---|
| 389 | + nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) && |
|---|
| 333 | 390 | nvbo->bo.mem.num_pages < vram_pages / 4) { |
|---|
| 334 | 391 | /* |
|---|
| 335 | 392 | * Make sure that the color and depth buffers are handled |
|---|
| .. | .. |
|---|
| 356 | 413 | } |
|---|
| 357 | 414 | |
|---|
| 358 | 415 | void |
|---|
| 359 | | -nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) |
|---|
| 416 | +nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain, |
|---|
| 417 | + uint32_t busy) |
|---|
| 360 | 418 | { |
|---|
| 419 | + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
|---|
| 361 | 420 | struct ttm_placement *pl = &nvbo->placement; |
|---|
| 362 | 421 | uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED : |
|---|
| 363 | 422 | TTM_PL_MASK_CACHING) | |
|---|
| 364 | 423 | (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); |
|---|
| 365 | 424 | |
|---|
| 366 | 425 | pl->placement = nvbo->placements; |
|---|
| 367 | | - set_placement_list(nvbo->placements, &pl->num_placement, |
|---|
| 368 | | - type, flags); |
|---|
| 426 | + set_placement_list(drm, nvbo->placements, &pl->num_placement, |
|---|
| 427 | + domain, flags); |
|---|
| 369 | 428 | |
|---|
| 370 | 429 | pl->busy_placement = nvbo->busy_placements; |
|---|
| 371 | | - set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, |
|---|
| 372 | | - type | busy, flags); |
|---|
| 430 | + set_placement_list(drm, nvbo->busy_placements, &pl->num_busy_placement, |
|---|
| 431 | + domain | busy, flags); |
|---|
| 373 | 432 | |
|---|
| 374 | | - set_placement_range(nvbo, type); |
|---|
| 433 | + set_placement_range(nvbo, domain); |
|---|
| 375 | 434 | } |
|---|
| 376 | 435 | |
|---|
| 377 | 436 | int |
|---|
| 378 | | -nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) |
|---|
| 437 | +nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) |
|---|
| 379 | 438 | { |
|---|
| 380 | 439 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
|---|
| 381 | 440 | struct ttm_buffer_object *bo = &nvbo->bo; |
|---|
| .. | .. |
|---|
| 387 | 446 | return ret; |
|---|
| 388 | 447 | |
|---|
| 389 | 448 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && |
|---|
| 390 | | - memtype == TTM_PL_FLAG_VRAM && contig) { |
|---|
| 449 | + domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) { |
|---|
| 391 | 450 | if (!nvbo->contig) { |
|---|
| 392 | 451 | nvbo->contig = true; |
|---|
| 393 | 452 | force = true; |
|---|
| .. | .. |
|---|
| 396 | 455 | } |
|---|
| 397 | 456 | |
|---|
| 398 | 457 | if (nvbo->pin_refcnt) { |
|---|
| 399 | | - if (!(memtype & (1 << bo->mem.mem_type)) || evict) { |
|---|
| 458 | + bool error = evict; |
|---|
| 459 | + |
|---|
| 460 | + switch (bo->mem.mem_type) { |
|---|
| 461 | + case TTM_PL_VRAM: |
|---|
| 462 | + error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM); |
|---|
| 463 | + break; |
|---|
| 464 | + case TTM_PL_TT: |
|---|
| 465 | + error |= !(domain & NOUVEAU_GEM_DOMAIN_GART); |
|---|
| 466 | + default: |
|---|
| 467 | + break; |
|---|
| 468 | + } |
|---|
| 469 | + |
|---|
| 470 | + if (error) { |
|---|
| 400 | 471 | NV_ERROR(drm, "bo %p pinned elsewhere: " |
|---|
| 401 | 472 | "0x%08x vs 0x%08x\n", bo, |
|---|
| 402 | | - 1 << bo->mem.mem_type, memtype); |
|---|
| 473 | + bo->mem.mem_type, domain); |
|---|
| 403 | 474 | ret = -EBUSY; |
|---|
| 404 | 475 | } |
|---|
| 405 | 476 | nvbo->pin_refcnt++; |
|---|
| .. | .. |
|---|
| 407 | 478 | } |
|---|
| 408 | 479 | |
|---|
| 409 | 480 | if (evict) { |
|---|
| 410 | | - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0); |
|---|
| 481 | + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); |
|---|
| 411 | 482 | ret = nouveau_bo_validate(nvbo, false, false); |
|---|
| 412 | 483 | if (ret) |
|---|
| 413 | 484 | goto out; |
|---|
| 414 | 485 | } |
|---|
| 415 | 486 | |
|---|
| 416 | 487 | nvbo->pin_refcnt++; |
|---|
| 417 | | - nouveau_bo_placement_set(nvbo, memtype, 0); |
|---|
| 488 | + nouveau_bo_placement_set(nvbo, domain, 0); |
|---|
| 418 | 489 | |
|---|
| 419 | 490 | /* drop pin_refcnt temporarily, so we don't trip the assertion |
|---|
| 420 | 491 | * in nouveau_bo_move() that makes sure we're not trying to |
|---|
| .. | .. |
|---|
| 460 | 531 | if (ref) |
|---|
| 461 | 532 | goto out; |
|---|
| 462 | 533 | |
|---|
| 463 | | - nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); |
|---|
| 534 | + switch (bo->mem.mem_type) { |
|---|
| 535 | + case TTM_PL_VRAM: |
|---|
| 536 | + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0); |
|---|
| 537 | + break; |
|---|
| 538 | + case TTM_PL_TT: |
|---|
| 539 | + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0); |
|---|
| 540 | + break; |
|---|
| 541 | + default: |
|---|
| 542 | + break; |
|---|
| 543 | + } |
|---|
| 464 | 544 | |
|---|
| 465 | 545 | ret = nouveau_bo_validate(nvbo, false, false); |
|---|
| 466 | 546 | if (ret == 0) { |
|---|
| .. | .. |
|---|
| 544 | 624 | PAGE_SIZE, DMA_FROM_DEVICE); |
|---|
| 545 | 625 | } |
|---|
| 546 | 626 | |
|---|
| 627 | +void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo) |
|---|
| 628 | +{ |
|---|
| 629 | + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
|---|
| 630 | + struct nouveau_bo *nvbo = nouveau_bo(bo); |
|---|
| 631 | + |
|---|
| 632 | + mutex_lock(&drm->ttm.io_reserve_mutex); |
|---|
| 633 | + list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); |
|---|
| 634 | + mutex_unlock(&drm->ttm.io_reserve_mutex); |
|---|
| 635 | +} |
|---|
| 636 | + |
|---|
| 637 | +void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo) |
|---|
| 638 | +{ |
|---|
| 639 | + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
|---|
| 640 | + struct nouveau_bo *nvbo = nouveau_bo(bo); |
|---|
| 641 | + |
|---|
| 642 | + mutex_lock(&drm->ttm.io_reserve_mutex); |
|---|
| 643 | + list_del_init(&nvbo->io_reserve_lru); |
|---|
| 644 | + mutex_unlock(&drm->ttm.io_reserve_mutex); |
|---|
| 645 | +} |
|---|
| 646 | + |
|---|
| 547 | 647 | int |
|---|
| 548 | 648 | nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, |
|---|
| 549 | 649 | bool no_wait_gpu) |
|---|
| .. | .. |
|---|
| 617 | 717 | } |
|---|
| 618 | 718 | |
|---|
| 619 | 719 | static int |
|---|
| 620 | | -nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
|---|
| 720 | +nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, |
|---|
| 721 | + struct ttm_resource *reg) |
|---|
| 621 | 722 | { |
|---|
| 622 | | - /* We'll do this from user space. */ |
|---|
| 623 | | - return 0; |
|---|
| 723 | +#if IS_ENABLED(CONFIG_AGP) |
|---|
| 724 | + struct nouveau_drm *drm = nouveau_bdev(bdev); |
|---|
| 725 | +#endif |
|---|
| 726 | + if (!reg) |
|---|
| 727 | + return -EINVAL; |
|---|
| 728 | +#if IS_ENABLED(CONFIG_AGP) |
|---|
| 729 | + if (drm->agp.bridge) |
|---|
| 730 | + return ttm_agp_bind(ttm, reg); |
|---|
| 731 | +#endif |
|---|
| 732 | + return nouveau_sgdma_bind(bdev, ttm, reg); |
|---|
| 624 | 733 | } |
|---|
| 625 | 734 | |
|---|
| 626 | | -static int |
|---|
| 627 | | -nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
|---|
| 628 | | - struct ttm_mem_type_manager *man) |
|---|
| 735 | +static void |
|---|
| 736 | +nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm) |
|---|
| 629 | 737 | { |
|---|
| 738 | +#if IS_ENABLED(CONFIG_AGP) |
|---|
| 630 | 739 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
|---|
| 631 | | - struct nvif_mmu *mmu = &drm->client.mmu; |
|---|
| 632 | 740 | |
|---|
| 633 | | - switch (type) { |
|---|
| 634 | | - case TTM_PL_SYSTEM: |
|---|
| 635 | | - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
|---|
| 636 | | - man->available_caching = TTM_PL_MASK_CACHING; |
|---|
| 637 | | - man->default_caching = TTM_PL_FLAG_CACHED; |
|---|
| 638 | | - break; |
|---|
| 639 | | - case TTM_PL_VRAM: |
|---|
| 640 | | - man->flags = TTM_MEMTYPE_FLAG_FIXED | |
|---|
| 641 | | - TTM_MEMTYPE_FLAG_MAPPABLE; |
|---|
| 642 | | - man->available_caching = TTM_PL_FLAG_UNCACHED | |
|---|
| 643 | | - TTM_PL_FLAG_WC; |
|---|
| 644 | | - man->default_caching = TTM_PL_FLAG_WC; |
|---|
| 645 | | - |
|---|
| 646 | | - if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { |
|---|
| 647 | | - /* Some BARs do not support being ioremapped WC */ |
|---|
| 648 | | - const u8 type = mmu->type[drm->ttm.type_vram].type; |
|---|
| 649 | | - if (type & NVIF_MEM_UNCACHED) { |
|---|
| 650 | | - man->available_caching = TTM_PL_FLAG_UNCACHED; |
|---|
| 651 | | - man->default_caching = TTM_PL_FLAG_UNCACHED; |
|---|
| 652 | | - } |
|---|
| 653 | | - |
|---|
| 654 | | - man->func = &nouveau_vram_manager; |
|---|
| 655 | | - man->io_reserve_fastpath = false; |
|---|
| 656 | | - man->use_io_reserve_lru = true; |
|---|
| 657 | | - } else { |
|---|
| 658 | | - man->func = &ttm_bo_manager_func; |
|---|
| 659 | | - } |
|---|
| 660 | | - break; |
|---|
| 661 | | - case TTM_PL_TT: |
|---|
| 662 | | - if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) |
|---|
| 663 | | - man->func = &nouveau_gart_manager; |
|---|
| 664 | | - else |
|---|
| 665 | | - if (!drm->agp.bridge) |
|---|
| 666 | | - man->func = &nv04_gart_manager; |
|---|
| 667 | | - else |
|---|
| 668 | | - man->func = &ttm_bo_manager_func; |
|---|
| 669 | | - |
|---|
| 670 | | - if (drm->agp.bridge) { |
|---|
| 671 | | - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
|---|
| 672 | | - man->available_caching = TTM_PL_FLAG_UNCACHED | |
|---|
| 673 | | - TTM_PL_FLAG_WC; |
|---|
| 674 | | - man->default_caching = TTM_PL_FLAG_WC; |
|---|
| 675 | | - } else { |
|---|
| 676 | | - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | |
|---|
| 677 | | - TTM_MEMTYPE_FLAG_CMA; |
|---|
| 678 | | - man->available_caching = TTM_PL_MASK_CACHING; |
|---|
| 679 | | - man->default_caching = TTM_PL_FLAG_CACHED; |
|---|
| 680 | | - } |
|---|
| 681 | | - |
|---|
| 682 | | - break; |
|---|
| 683 | | - default: |
|---|
| 684 | | - return -EINVAL; |
|---|
| 741 | + if (drm->agp.bridge) { |
|---|
| 742 | + ttm_agp_unbind(ttm); |
|---|
| 743 | + return; |
|---|
| 685 | 744 | } |
|---|
| 686 | | - return 0; |
|---|
| 745 | +#endif |
|---|
| 746 | + nouveau_sgdma_unbind(bdev, ttm); |
|---|
| 687 | 747 | } |
|---|
| 688 | 748 | |
|---|
| 689 | 749 | static void |
|---|
| .. | .. |
|---|
| 693 | 753 | |
|---|
| 694 | 754 | switch (bo->mem.mem_type) { |
|---|
| 695 | 755 | case TTM_PL_VRAM: |
|---|
| 696 | | - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, |
|---|
| 697 | | - TTM_PL_FLAG_SYSTEM); |
|---|
| 756 | + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, |
|---|
| 757 | + NOUVEAU_GEM_DOMAIN_CPU); |
|---|
| 698 | 758 | break; |
|---|
| 699 | 759 | default: |
|---|
| 700 | | - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); |
|---|
| 760 | + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0); |
|---|
| 701 | 761 | break; |
|---|
| 702 | 762 | } |
|---|
| 703 | 763 | |
|---|
| 704 | 764 | *pl = nvbo->placement; |
|---|
| 705 | 765 | } |
|---|
| 706 | 766 | |
|---|
| 707 | | - |
|---|
| 708 | | -static int |
|---|
| 709 | | -nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) |
|---|
| 710 | | -{ |
|---|
| 711 | | - int ret = RING_SPACE(chan, 2); |
|---|
| 712 | | - if (ret == 0) { |
|---|
| 713 | | - BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); |
|---|
| 714 | | - OUT_RING (chan, handle & 0x0000ffff); |
|---|
| 715 | | - FIRE_RING (chan); |
|---|
| 716 | | - } |
|---|
| 717 | | - return ret; |
|---|
| 718 | | -} |
|---|
| 719 | | - |
|---|
| 720 | | -static int |
|---|
| 721 | | -nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
|---|
| 722 | | - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
|---|
| 723 | | -{ |
|---|
| 724 | | - struct nouveau_mem *mem = nouveau_mem(old_reg); |
|---|
| 725 | | - int ret = RING_SPACE(chan, 10); |
|---|
| 726 | | - if (ret == 0) { |
|---|
| 727 | | - BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8); |
|---|
| 728 | | - OUT_RING (chan, upper_32_bits(mem->vma[0].addr)); |
|---|
| 729 | | - OUT_RING (chan, lower_32_bits(mem->vma[0].addr)); |
|---|
| 730 | | - OUT_RING (chan, upper_32_bits(mem->vma[1].addr)); |
|---|
| 731 | | - OUT_RING (chan, lower_32_bits(mem->vma[1].addr)); |
|---|
| 732 | | - OUT_RING (chan, PAGE_SIZE); |
|---|
| 733 | | - OUT_RING (chan, PAGE_SIZE); |
|---|
| 734 | | - OUT_RING (chan, PAGE_SIZE); |
|---|
| 735 | | - OUT_RING (chan, new_reg->num_pages); |
|---|
| 736 | | - BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386); |
|---|
| 737 | | - } |
|---|
| 738 | | - return ret; |
|---|
| 739 | | -} |
|---|
| 740 | | - |
|---|
| 741 | | -static int |
|---|
| 742 | | -nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle) |
|---|
| 743 | | -{ |
|---|
| 744 | | - int ret = RING_SPACE(chan, 2); |
|---|
| 745 | | - if (ret == 0) { |
|---|
| 746 | | - BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); |
|---|
| 747 | | - OUT_RING (chan, handle); |
|---|
| 748 | | - } |
|---|
| 749 | | - return ret; |
|---|
| 750 | | -} |
|---|
| 751 | | - |
|---|
| 752 | | -static int |
|---|
| 753 | | -nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
|---|
| 754 | | - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
|---|
| 755 | | -{ |
|---|
| 756 | | - struct nouveau_mem *mem = nouveau_mem(old_reg); |
|---|
| 757 | | - u64 src_offset = mem->vma[0].addr; |
|---|
| 758 | | - u64 dst_offset = mem->vma[1].addr; |
|---|
| 759 | | - u32 page_count = new_reg->num_pages; |
|---|
| 760 | | - int ret; |
|---|
| 761 | | - |
|---|
| 762 | | - page_count = new_reg->num_pages; |
|---|
| 763 | | - while (page_count) { |
|---|
| 764 | | - int line_count = (page_count > 8191) ? 8191 : page_count; |
|---|
| 765 | | - |
|---|
| 766 | | - ret = RING_SPACE(chan, 11); |
|---|
| 767 | | - if (ret) |
|---|
| 768 | | - return ret; |
|---|
| 769 | | - |
|---|
| 770 | | - BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8); |
|---|
| 771 | | - OUT_RING (chan, upper_32_bits(src_offset)); |
|---|
| 772 | | - OUT_RING (chan, lower_32_bits(src_offset)); |
|---|
| 773 | | - OUT_RING (chan, upper_32_bits(dst_offset)); |
|---|
| 774 | | - OUT_RING (chan, lower_32_bits(dst_offset)); |
|---|
| 775 | | - OUT_RING (chan, PAGE_SIZE); |
|---|
| 776 | | - OUT_RING (chan, PAGE_SIZE); |
|---|
| 777 | | - OUT_RING (chan, PAGE_SIZE); |
|---|
| 778 | | - OUT_RING (chan, line_count); |
|---|
| 779 | | - BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); |
|---|
| 780 | | - OUT_RING (chan, 0x00000110); |
|---|
| 781 | | - |
|---|
| 782 | | - page_count -= line_count; |
|---|
| 783 | | - src_offset += (PAGE_SIZE * line_count); |
|---|
| 784 | | - dst_offset += (PAGE_SIZE * line_count); |
|---|
| 785 | | - } |
|---|
| 786 | | - |
|---|
| 787 | | - return 0; |
|---|
| 788 | | -} |
|---|
| 789 | | - |
|---|
| 790 | | -static int |
|---|
| 791 | | -nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
|---|
| 792 | | - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
|---|
| 793 | | -{ |
|---|
| 794 | | - struct nouveau_mem *mem = nouveau_mem(old_reg); |
|---|
| 795 | | - u64 src_offset = mem->vma[0].addr; |
|---|
| 796 | | - u64 dst_offset = mem->vma[1].addr; |
|---|
| 797 | | - u32 page_count = new_reg->num_pages; |
|---|
| 798 | | - int ret; |
|---|
| 799 | | - |
|---|
| 800 | | - page_count = new_reg->num_pages; |
|---|
| 801 | | - while (page_count) { |
|---|
| 802 | | - int line_count = (page_count > 2047) ? 2047 : page_count; |
|---|
| 803 | | - |
|---|
| 804 | | - ret = RING_SPACE(chan, 12); |
|---|
| 805 | | - if (ret) |
|---|
| 806 | | - return ret; |
|---|
| 807 | | - |
|---|
| 808 | | - BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2); |
|---|
| 809 | | - OUT_RING (chan, upper_32_bits(dst_offset)); |
|---|
| 810 | | - OUT_RING (chan, lower_32_bits(dst_offset)); |
|---|
| 811 | | - BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6); |
|---|
| 812 | | - OUT_RING (chan, upper_32_bits(src_offset)); |
|---|
| 813 | | - OUT_RING (chan, lower_32_bits(src_offset)); |
|---|
| 814 | | - OUT_RING (chan, PAGE_SIZE); /* src_pitch */ |
|---|
| 815 | | - OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ |
|---|
| 816 | | - OUT_RING (chan, PAGE_SIZE); /* line_length */ |
|---|
| 817 | | - OUT_RING (chan, line_count); |
|---|
| 818 | | - BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); |
|---|
| 819 | | - OUT_RING (chan, 0x00100110); |
|---|
| 820 | | - |
|---|
| 821 | | - page_count -= line_count; |
|---|
| 822 | | - src_offset += (PAGE_SIZE * line_count); |
|---|
| 823 | | - dst_offset += (PAGE_SIZE * line_count); |
|---|
| 824 | | - } |
|---|
| 825 | | - |
|---|
| 826 | | - return 0; |
|---|
| 827 | | -} |
|---|
| 828 | | - |
|---|
| 829 | | -static int |
|---|
| 830 | | -nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
|---|
| 831 | | - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
|---|
| 832 | | -{ |
|---|
| 833 | | - struct nouveau_mem *mem = nouveau_mem(old_reg); |
|---|
| 834 | | - u64 src_offset = mem->vma[0].addr; |
|---|
| 835 | | - u64 dst_offset = mem->vma[1].addr; |
|---|
| 836 | | - u32 page_count = new_reg->num_pages; |
|---|
| 837 | | - int ret; |
|---|
| 838 | | - |
|---|
| 839 | | - page_count = new_reg->num_pages; |
|---|
| 840 | | - while (page_count) { |
|---|
| 841 | | - int line_count = (page_count > 8191) ? 8191 : page_count; |
|---|
| 842 | | - |
|---|
| 843 | | - ret = RING_SPACE(chan, 11); |
|---|
| 844 | | - if (ret) |
|---|
| 845 | | - return ret; |
|---|
| 846 | | - |
|---|
| 847 | | - BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); |
|---|
| 848 | | - OUT_RING (chan, upper_32_bits(src_offset)); |
|---|
| 849 | | - OUT_RING (chan, lower_32_bits(src_offset)); |
|---|
| 850 | | - OUT_RING (chan, upper_32_bits(dst_offset)); |
|---|
| 851 | | - OUT_RING (chan, lower_32_bits(dst_offset)); |
|---|
| 852 | | - OUT_RING (chan, PAGE_SIZE); |
|---|
| 853 | | - OUT_RING (chan, PAGE_SIZE); |
|---|
| 854 | | - OUT_RING (chan, PAGE_SIZE); |
|---|
| 855 | | - OUT_RING (chan, line_count); |
|---|
| 856 | | - BEGIN_NV04(chan, NvSubCopy, 0x0300, 1); |
|---|
| 857 | | - OUT_RING (chan, 0x00000110); |
|---|
| 858 | | - |
|---|
| 859 | | - page_count -= line_count; |
|---|
| 860 | | - src_offset += (PAGE_SIZE * line_count); |
|---|
| 861 | | - dst_offset += (PAGE_SIZE * line_count); |
|---|
| 862 | | - } |
|---|
| 863 | | - |
|---|
| 864 | | - return 0; |
|---|
| 865 | | -} |
|---|
| 866 | | - |
|---|
| 867 | | -static int |
|---|
| 868 | | -nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
|---|
| 869 | | - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
|---|
| 870 | | -{ |
|---|
| 871 | | - struct nouveau_mem *mem = nouveau_mem(old_reg); |
|---|
| 872 | | - int ret = RING_SPACE(chan, 7); |
|---|
| 873 | | - if (ret == 0) { |
|---|
| 874 | | - BEGIN_NV04(chan, NvSubCopy, 0x0320, 6); |
|---|
| 875 | | - OUT_RING (chan, upper_32_bits(mem->vma[0].addr)); |
|---|
| 876 | | - OUT_RING (chan, lower_32_bits(mem->vma[0].addr)); |
|---|
| 877 | | - OUT_RING (chan, upper_32_bits(mem->vma[1].addr)); |
|---|
| 878 | | - OUT_RING (chan, lower_32_bits(mem->vma[1].addr)); |
|---|
| 879 | | - OUT_RING (chan, 0x00000000 /* COPY */); |
|---|
| 880 | | - OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT); |
|---|
| 881 | | - } |
|---|
| 882 | | - return ret; |
|---|
| 883 | | -} |
|---|
| 884 | | - |
|---|
| 885 | | -static int |
|---|
| 886 | | -nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
|---|
| 887 | | - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
|---|
| 888 | | -{ |
|---|
| 889 | | - struct nouveau_mem *mem = nouveau_mem(old_reg); |
|---|
| 890 | | - int ret = RING_SPACE(chan, 7); |
|---|
| 891 | | - if (ret == 0) { |
|---|
| 892 | | - BEGIN_NV04(chan, NvSubCopy, 0x0304, 6); |
|---|
| 893 | | - OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT); |
|---|
| 894 | | - OUT_RING (chan, upper_32_bits(mem->vma[0].addr)); |
|---|
| 895 | | - OUT_RING (chan, lower_32_bits(mem->vma[0].addr)); |
|---|
| 896 | | - OUT_RING (chan, upper_32_bits(mem->vma[1].addr)); |
|---|
| 897 | | - OUT_RING (chan, lower_32_bits(mem->vma[1].addr)); |
|---|
| 898 | | - OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */); |
|---|
| 899 | | - } |
|---|
| 900 | | - return ret; |
|---|
| 901 | | -} |
|---|
| 902 | | - |
|---|
| 903 | | -static int |
|---|
| 904 | | -nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) |
|---|
| 905 | | -{ |
|---|
| 906 | | - int ret = RING_SPACE(chan, 6); |
|---|
| 907 | | - if (ret == 0) { |
|---|
| 908 | | - BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); |
|---|
| 909 | | - OUT_RING (chan, handle); |
|---|
| 910 | | - BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); |
|---|
| 911 | | - OUT_RING (chan, chan->drm->ntfy.handle); |
|---|
| 912 | | - OUT_RING (chan, chan->vram.handle); |
|---|
| 913 | | - OUT_RING (chan, chan->vram.handle); |
|---|
| 914 | | - } |
|---|
| 915 | | - |
|---|
| 916 | | - return ret; |
|---|
| 917 | | -} |
|---|
| 918 | | - |
|---|
| 919 | | -static int |
|---|
| 920 | | -nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
|---|
| 921 | | - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
|---|
| 922 | | -{ |
|---|
| 923 | | - struct nouveau_mem *mem = nouveau_mem(old_reg); |
|---|
| 924 | | - u64 length = (new_reg->num_pages << PAGE_SHIFT); |
|---|
| 925 | | - u64 src_offset = mem->vma[0].addr; |
|---|
| 926 | | - u64 dst_offset = mem->vma[1].addr; |
|---|
| 927 | | - int src_tiled = !!mem->kind; |
|---|
| 928 | | - int dst_tiled = !!nouveau_mem(new_reg)->kind; |
|---|
| 929 | | - int ret; |
|---|
| 930 | | - |
|---|
| 931 | | - while (length) { |
|---|
| 932 | | - u32 amount, stride, height; |
|---|
| 933 | | - |
|---|
| 934 | | - ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled)); |
|---|
| 935 | | - if (ret) |
|---|
| 936 | | - return ret; |
|---|
| 937 | | - |
|---|
| 938 | | - amount = min(length, (u64)(4 * 1024 * 1024)); |
|---|
| 939 | | - stride = 16 * 4; |
|---|
| 940 | | - height = amount / stride; |
|---|
| 941 | | - |
|---|
| 942 | | - if (src_tiled) { |
|---|
| 943 | | - BEGIN_NV04(chan, NvSubCopy, 0x0200, 7); |
|---|
| 944 | | - OUT_RING (chan, 0); |
|---|
| 945 | | - OUT_RING (chan, 0); |
|---|
| 946 | | - OUT_RING (chan, stride); |
|---|
| 947 | | - OUT_RING (chan, height); |
|---|
| 948 | | - OUT_RING (chan, 1); |
|---|
| 949 | | - OUT_RING (chan, 0); |
|---|
| 950 | | - OUT_RING (chan, 0); |
|---|
| 951 | | - } else { |
|---|
| 952 | | - BEGIN_NV04(chan, NvSubCopy, 0x0200, 1); |
|---|
| 953 | | - OUT_RING (chan, 1); |
|---|
| 954 | | - } |
|---|
| 955 | | - if (dst_tiled) { |
|---|
| 956 | | - BEGIN_NV04(chan, NvSubCopy, 0x021c, 7); |
|---|
| 957 | | - OUT_RING (chan, 0); |
|---|
| 958 | | - OUT_RING (chan, 0); |
|---|
| 959 | | - OUT_RING (chan, stride); |
|---|
| 960 | | - OUT_RING (chan, height); |
|---|
| 961 | | - OUT_RING (chan, 1); |
|---|
| 962 | | - OUT_RING (chan, 0); |
|---|
| 963 | | - OUT_RING (chan, 0); |
|---|
| 964 | | - } else { |
|---|
| 965 | | - BEGIN_NV04(chan, NvSubCopy, 0x021c, 1); |
|---|
| 966 | | - OUT_RING (chan, 1); |
|---|
| 967 | | - } |
|---|
| 968 | | - |
|---|
| 969 | | - BEGIN_NV04(chan, NvSubCopy, 0x0238, 2); |
|---|
| 970 | | - OUT_RING (chan, upper_32_bits(src_offset)); |
|---|
| 971 | | - OUT_RING (chan, upper_32_bits(dst_offset)); |
|---|
| 972 | | - BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); |
|---|
| 973 | | - OUT_RING (chan, lower_32_bits(src_offset)); |
|---|
| 974 | | - OUT_RING (chan, lower_32_bits(dst_offset)); |
|---|
| 975 | | - OUT_RING (chan, stride); |
|---|
| 976 | | - OUT_RING (chan, stride); |
|---|
| 977 | | - OUT_RING (chan, stride); |
|---|
| 978 | | - OUT_RING (chan, height); |
|---|
| 979 | | - OUT_RING (chan, 0x00000101); |
|---|
| 980 | | - OUT_RING (chan, 0x00000000); |
|---|
| 981 | | - BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); |
|---|
| 982 | | - OUT_RING (chan, 0); |
|---|
| 983 | | - |
|---|
| 984 | | - length -= amount; |
|---|
| 985 | | - src_offset += amount; |
|---|
| 986 | | - dst_offset += amount; |
|---|
| 987 | | - } |
|---|
| 988 | | - |
|---|
| 989 | | - return 0; |
|---|
| 990 | | -} |
|---|
| 991 | | - |
|---|
| 992 | | -static int |
|---|
| 993 | | -nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) |
|---|
| 994 | | -{ |
|---|
| 995 | | - int ret = RING_SPACE(chan, 4); |
|---|
| 996 | | - if (ret == 0) { |
|---|
| 997 | | - BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); |
|---|
| 998 | | - OUT_RING (chan, handle); |
|---|
| 999 | | - BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); |
|---|
| 1000 | | - OUT_RING (chan, chan->drm->ntfy.handle); |
|---|
| 1001 | | - } |
|---|
| 1002 | | - |
|---|
| 1003 | | - return ret; |
|---|
| 1004 | | -} |
|---|
| 1005 | | - |
|---|
| 1006 | | -static inline uint32_t |
|---|
| 1007 | | -nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, |
|---|
| 1008 | | - struct nouveau_channel *chan, struct ttm_mem_reg *reg) |
|---|
| 1009 | | -{ |
|---|
| 1010 | | - if (reg->mem_type == TTM_PL_TT) |
|---|
| 1011 | | - return NvDmaTT; |
|---|
| 1012 | | - return chan->vram.handle; |
|---|
| 1013 | | -} |
|---|
| 1014 | | - |
|---|
| 1015 | | -static int |
|---|
| 1016 | | -nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
|---|
| 1017 | | - struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) |
|---|
| 1018 | | -{ |
|---|
| 1019 | | - u32 src_offset = old_reg->start << PAGE_SHIFT; |
|---|
| 1020 | | - u32 dst_offset = new_reg->start << PAGE_SHIFT; |
|---|
| 1021 | | - u32 page_count = new_reg->num_pages; |
|---|
| 1022 | | - int ret; |
|---|
| 1023 | | - |
|---|
| 1024 | | - ret = RING_SPACE(chan, 3); |
|---|
| 1025 | | - if (ret) |
|---|
| 1026 | | - return ret; |
|---|
| 1027 | | - |
|---|
| 1028 | | - BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); |
|---|
| 1029 | | - OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg)); |
|---|
| 1030 | | - OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg)); |
|---|
| 1031 | | - |
|---|
| 1032 | | - page_count = new_reg->num_pages; |
|---|
| 1033 | | - while (page_count) { |
|---|
| 1034 | | - int line_count = (page_count > 2047) ? 2047 : page_count; |
|---|
| 1035 | | - |
|---|
| 1036 | | - ret = RING_SPACE(chan, 11); |
|---|
| 1037 | | - if (ret) |
|---|
| 1038 | | - return ret; |
|---|
| 1039 | | - |
|---|
| 1040 | | - BEGIN_NV04(chan, NvSubCopy, |
|---|
| 1041 | | - NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); |
|---|
| 1042 | | - OUT_RING (chan, src_offset); |
|---|
| 1043 | | - OUT_RING (chan, dst_offset); |
|---|
| 1044 | | - OUT_RING (chan, PAGE_SIZE); /* src_pitch */ |
|---|
| 1045 | | - OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ |
|---|
| 1046 | | - OUT_RING (chan, PAGE_SIZE); /* line_length */ |
|---|
| 1047 | | - OUT_RING (chan, line_count); |
|---|
| 1048 | | - OUT_RING (chan, 0x00000101); |
|---|
| 1049 | | - OUT_RING (chan, 0x00000000); |
|---|
| 1050 | | - BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); |
|---|
| 1051 | | - OUT_RING (chan, 0); |
|---|
| 1052 | | - |
|---|
| 1053 | | - page_count -= line_count; |
|---|
| 1054 | | - src_offset += (PAGE_SIZE * line_count); |
|---|
| 1055 | | - dst_offset += (PAGE_SIZE * line_count); |
|---|
| 1056 | | - } |
|---|
| 1057 | | - |
|---|
| 1058 | | - return 0; |
|---|
| 1059 | | -} |
|---|
| 1060 | | - |
|---|
| 1061 | 767 | static int |
|---|
| 1062 | 768 | nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, |
|---|
| 1063 | | - struct ttm_mem_reg *reg) |
|---|
| 769 | + struct ttm_resource *reg) |
|---|
| 1064 | 770 | { |
|---|
| 1065 | 771 | struct nouveau_mem *old_mem = nouveau_mem(&bo->mem); |
|---|
| 1066 | 772 | struct nouveau_mem *new_mem = nouveau_mem(reg); |
|---|
| .. | .. |
|---|
| 1092 | 798 | |
|---|
| 1093 | 799 | static int |
|---|
| 1094 | 800 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, |
|---|
| 1095 | | - bool no_wait_gpu, struct ttm_mem_reg *new_reg) |
|---|
| 801 | + bool no_wait_gpu, struct ttm_resource *new_reg) |
|---|
| 1096 | 802 | { |
|---|
| 1097 | 803 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
|---|
| 1098 | 804 | struct nouveau_channel *chan = drm->ttm.chan; |
|---|
| .. | .. |
|---|
| 1102 | 808 | |
|---|
| 1103 | 809 | /* create temporary vmas for the transfer and attach them to the |
|---|
| 1104 | 810 | * old nvkm_mem node, these will get cleaned up after ttm has |
|---|
| 1105 | | - * destroyed the ttm_mem_reg |
|---|
| 811 | + * destroyed the ttm_resource |
|---|
| 1106 | 812 | */ |
|---|
| 1107 | 813 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { |
|---|
| 1108 | 814 | ret = nouveau_bo_move_prep(drm, bo, new_reg); |
|---|
| .. | .. |
|---|
| 1117 | 823 | if (ret == 0) { |
|---|
| 1118 | 824 | ret = nouveau_fence_new(chan, false, &fence); |
|---|
| 1119 | 825 | if (ret == 0) { |
|---|
| 826 | + /* TODO: figure out a better solution here |
|---|
| 827 | + * |
|---|
| 828 | + * wait on the fence here explicitly as going through |
|---|
| 829 | + * ttm_bo_move_accel_cleanup somehow doesn't seem to do it. |
|---|
| 830 | + * |
|---|
| 831 | + * Without this the operation can timeout and we'll fallback to a |
|---|
| 832 | + * software copy, which might take several minutes to finish. |
|---|
| 833 | + */ |
|---|
| 834 | + nouveau_fence_wait(fence, false, false); |
|---|
| 1120 | 835 | ret = ttm_bo_move_accel_cleanup(bo, |
|---|
| 1121 | 836 | &fence->base, |
|---|
| 1122 | | - evict, |
|---|
| 837 | + evict, false, |
|---|
| 1123 | 838 | new_reg); |
|---|
| 1124 | 839 | nouveau_fence_unref(&fence); |
|---|
| 1125 | 840 | } |
|---|
| .. | .. |
|---|
| 1132 | 847 | void |
|---|
| 1133 | 848 | nouveau_bo_move_init(struct nouveau_drm *drm) |
|---|
| 1134 | 849 | { |
|---|
| 1135 | | - static const struct { |
|---|
| 850 | + static const struct _method_table { |
|---|
| 1136 | 851 | const char *name; |
|---|
| 1137 | 852 | int engine; |
|---|
| 1138 | 853 | s32 oclass; |
|---|
| 1139 | 854 | int (*exec)(struct nouveau_channel *, |
|---|
| 1140 | 855 | struct ttm_buffer_object *, |
|---|
| 1141 | | - struct ttm_mem_reg *, struct ttm_mem_reg *); |
|---|
| 856 | + struct ttm_resource *, struct ttm_resource *); |
|---|
| 1142 | 857 | int (*init)(struct nouveau_channel *, u32 handle); |
|---|
| 1143 | 858 | } _methods[] = { |
|---|
| 859 | + { "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init }, |
|---|
| 860 | + { "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init }, |
|---|
| 1144 | 861 | { "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init }, |
|---|
| 1145 | 862 | { "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init }, |
|---|
| 1146 | 863 | { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init }, |
|---|
| .. | .. |
|---|
| 1159 | 876 | { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init }, |
|---|
| 1160 | 877 | { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init }, |
|---|
| 1161 | 878 | {}, |
|---|
| 1162 | | - { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init }, |
|---|
| 1163 | | - }, *mthd = _methods; |
|---|
| 879 | + }; |
|---|
| 880 | + const struct _method_table *mthd = _methods; |
|---|
| 1164 | 881 | const char *name = "CPU"; |
|---|
| 1165 | 882 | int ret; |
|---|
| 1166 | 883 | |
|---|
| .. | .. |
|---|
| 1174 | 891 | if (chan == NULL) |
|---|
| 1175 | 892 | continue; |
|---|
| 1176 | 893 | |
|---|
| 1177 | | - ret = nvif_object_init(&chan->user, |
|---|
| 894 | + ret = nvif_object_ctor(&chan->user, "ttmBoMove", |
|---|
| 1178 | 895 | mthd->oclass | (mthd->engine << 16), |
|---|
| 1179 | 896 | mthd->oclass, NULL, 0, |
|---|
| 1180 | 897 | &drm->ttm.copy); |
|---|
| 1181 | 898 | if (ret == 0) { |
|---|
| 1182 | 899 | ret = mthd->init(chan, drm->ttm.copy.handle); |
|---|
| 1183 | 900 | if (ret) { |
|---|
| 1184 | | - nvif_object_fini(&drm->ttm.copy); |
|---|
| 901 | + nvif_object_dtor(&drm->ttm.copy); |
|---|
| 1185 | 902 | continue; |
|---|
| 1186 | 903 | } |
|---|
| 1187 | 904 | |
|---|
| .. | .. |
|---|
| 1197 | 914 | |
|---|
| 1198 | 915 | static int |
|---|
| 1199 | 916 | nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, |
|---|
| 1200 | | - bool no_wait_gpu, struct ttm_mem_reg *new_reg) |
|---|
| 917 | + bool no_wait_gpu, struct ttm_resource *new_reg) |
|---|
| 1201 | 918 | { |
|---|
| 1202 | 919 | struct ttm_operation_ctx ctx = { intr, no_wait_gpu }; |
|---|
| 1203 | 920 | struct ttm_place placement_memtype = { |
|---|
| 1204 | 921 | .fpfn = 0, |
|---|
| 1205 | 922 | .lpfn = 0, |
|---|
| 1206 | | - .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING |
|---|
| 923 | + .mem_type = TTM_PL_TT, |
|---|
| 924 | + .flags = TTM_PL_MASK_CACHING |
|---|
| 1207 | 925 | }; |
|---|
| 1208 | 926 | struct ttm_placement placement; |
|---|
| 1209 | | - struct ttm_mem_reg tmp_reg; |
|---|
| 927 | + struct ttm_resource tmp_reg; |
|---|
| 1210 | 928 | int ret; |
|---|
| 1211 | 929 | |
|---|
| 1212 | 930 | placement.num_placement = placement.num_busy_placement = 1; |
|---|
| .. | .. |
|---|
| 1218 | 936 | if (ret) |
|---|
| 1219 | 937 | return ret; |
|---|
| 1220 | 938 | |
|---|
| 1221 | | - ret = ttm_tt_bind(bo->ttm, &tmp_reg, &ctx); |
|---|
| 939 | + ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx); |
|---|
| 940 | + if (ret) |
|---|
| 941 | + goto out; |
|---|
| 942 | + |
|---|
| 943 | + ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg); |
|---|
| 1222 | 944 | if (ret) |
|---|
| 1223 | 945 | goto out; |
|---|
| 1224 | 946 | |
|---|
| .. | .. |
|---|
| 1228 | 950 | |
|---|
| 1229 | 951 | ret = ttm_bo_move_ttm(bo, &ctx, new_reg); |
|---|
| 1230 | 952 | out: |
|---|
| 1231 | | - ttm_bo_mem_put(bo, &tmp_reg); |
|---|
| 953 | + ttm_resource_free(bo, &tmp_reg); |
|---|
| 1232 | 954 | return ret; |
|---|
| 1233 | 955 | } |
|---|
| 1234 | 956 | |
|---|
| 1235 | 957 | static int |
|---|
| 1236 | 958 | nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, |
|---|
| 1237 | | - bool no_wait_gpu, struct ttm_mem_reg *new_reg) |
|---|
| 959 | + bool no_wait_gpu, struct ttm_resource *new_reg) |
|---|
| 1238 | 960 | { |
|---|
| 1239 | 961 | struct ttm_operation_ctx ctx = { intr, no_wait_gpu }; |
|---|
| 1240 | 962 | struct ttm_place placement_memtype = { |
|---|
| 1241 | 963 | .fpfn = 0, |
|---|
| 1242 | 964 | .lpfn = 0, |
|---|
| 1243 | | - .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING |
|---|
| 965 | + .mem_type = TTM_PL_TT, |
|---|
| 966 | + .flags = TTM_PL_MASK_CACHING |
|---|
| 1244 | 967 | }; |
|---|
| 1245 | 968 | struct ttm_placement placement; |
|---|
| 1246 | | - struct ttm_mem_reg tmp_reg; |
|---|
| 969 | + struct ttm_resource tmp_reg; |
|---|
| 1247 | 970 | int ret; |
|---|
| 1248 | 971 | |
|---|
| 1249 | 972 | placement.num_placement = placement.num_busy_placement = 1; |
|---|
| .. | .. |
|---|
| 1264 | 987 | goto out; |
|---|
| 1265 | 988 | |
|---|
| 1266 | 989 | out: |
|---|
| 1267 | | - ttm_bo_mem_put(bo, &tmp_reg); |
|---|
| 990 | + ttm_resource_free(bo, &tmp_reg); |
|---|
| 1268 | 991 | return ret; |
|---|
| 1269 | 992 | } |
|---|
| 1270 | 993 | |
|---|
| 1271 | 994 | static void |
|---|
| 1272 | 995 | nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, |
|---|
| 1273 | | - struct ttm_mem_reg *new_reg) |
|---|
| 996 | + struct ttm_resource *new_reg) |
|---|
| 1274 | 997 | { |
|---|
| 1275 | 998 | struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL; |
|---|
| 1276 | 999 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
|---|
| .. | .. |
|---|
| 1279 | 1002 | /* ttm can now (stupidly) pass the driver bos it didn't create... */ |
|---|
| 1280 | 1003 | if (bo->destroy != nouveau_bo_del_ttm) |
|---|
| 1281 | 1004 | return; |
|---|
| 1005 | + |
|---|
| 1006 | + nouveau_bo_del_io_reserve_lru(bo); |
|---|
| 1282 | 1007 | |
|---|
| 1283 | 1008 | if (mem && new_reg->mem_type != TTM_PL_SYSTEM && |
|---|
| 1284 | 1009 | mem->mem.page == nvbo->page) { |
|---|
| .. | .. |
|---|
| 1291 | 1016 | nouveau_vma_unmap(vma); |
|---|
| 1292 | 1017 | } |
|---|
| 1293 | 1018 | } |
|---|
| 1019 | + |
|---|
| 1020 | + if (new_reg) { |
|---|
| 1021 | + if (new_reg->mm_node) |
|---|
| 1022 | + nvbo->offset = (new_reg->start << PAGE_SHIFT); |
|---|
| 1023 | + else |
|---|
| 1024 | + nvbo->offset = 0; |
|---|
| 1025 | + } |
|---|
| 1026 | + |
|---|
| 1294 | 1027 | } |
|---|
| 1295 | 1028 | |
|---|
| 1296 | 1029 | static int |
|---|
| 1297 | | -nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg, |
|---|
| 1030 | +nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg, |
|---|
| 1298 | 1031 | struct nouveau_drm_tile **new_tile) |
|---|
| 1299 | 1032 | { |
|---|
| 1300 | 1033 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
|---|
| .. | .. |
|---|
| 1321 | 1054 | { |
|---|
| 1322 | 1055 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
|---|
| 1323 | 1056 | struct drm_device *dev = drm->dev; |
|---|
| 1324 | | - struct dma_fence *fence = reservation_object_get_excl(bo->resv); |
|---|
| 1057 | + struct dma_fence *fence = dma_resv_get_excl(bo->base.resv); |
|---|
| 1325 | 1058 | |
|---|
| 1326 | 1059 | nv10_bo_put_tile_region(dev, *old_tile, fence); |
|---|
| 1327 | 1060 | *old_tile = new_tile; |
|---|
| .. | .. |
|---|
| 1330 | 1063 | static int |
|---|
| 1331 | 1064 | nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, |
|---|
| 1332 | 1065 | struct ttm_operation_ctx *ctx, |
|---|
| 1333 | | - struct ttm_mem_reg *new_reg) |
|---|
| 1066 | + struct ttm_resource *new_reg) |
|---|
| 1334 | 1067 | { |
|---|
| 1335 | 1068 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
|---|
| 1336 | 1069 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
|---|
| 1337 | | - struct ttm_mem_reg *old_reg = &bo->mem; |
|---|
| 1070 | + struct ttm_resource *old_reg = &bo->mem; |
|---|
| 1338 | 1071 | struct nouveau_drm_tile *new_tile = NULL; |
|---|
| 1339 | 1072 | int ret = 0; |
|---|
| 1340 | 1073 | |
|---|
| .. | .. |
|---|
| 1353 | 1086 | |
|---|
| 1354 | 1087 | /* Fake bo copy. */ |
|---|
| 1355 | 1088 | if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) { |
|---|
| 1356 | | - BUG_ON(bo->mem.mm_node != NULL); |
|---|
| 1357 | | - bo->mem = *new_reg; |
|---|
| 1358 | | - new_reg->mm_node = NULL; |
|---|
| 1089 | + ttm_bo_move_null(bo, new_reg); |
|---|
| 1359 | 1090 | goto out; |
|---|
| 1360 | 1091 | } |
|---|
| 1361 | 1092 | |
|---|
| .. | .. |
|---|
| 1398 | 1129 | { |
|---|
| 1399 | 1130 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
|---|
| 1400 | 1131 | |
|---|
| 1401 | | - return drm_vma_node_verify_access(&nvbo->gem.vma_node, |
|---|
| 1132 | + return drm_vma_node_verify_access(&nvbo->bo.base.vma_node, |
|---|
| 1402 | 1133 | filp->private_data); |
|---|
| 1403 | 1134 | } |
|---|
| 1404 | 1135 | |
|---|
| 1405 | | -static int |
|---|
| 1406 | | -nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) |
|---|
| 1136 | +static void |
|---|
| 1137 | +nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm, |
|---|
| 1138 | + struct ttm_resource *reg) |
|---|
| 1407 | 1139 | { |
|---|
| 1408 | | - struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type]; |
|---|
| 1140 | + struct nouveau_mem *mem = nouveau_mem(reg); |
|---|
| 1141 | + |
|---|
| 1142 | + if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { |
|---|
| 1143 | + switch (reg->mem_type) { |
|---|
| 1144 | + case TTM_PL_TT: |
|---|
| 1145 | + if (mem->kind) |
|---|
| 1146 | + nvif_object_unmap_handle(&mem->mem.object); |
|---|
| 1147 | + break; |
|---|
| 1148 | + case TTM_PL_VRAM: |
|---|
| 1149 | + nvif_object_unmap_handle(&mem->mem.object); |
|---|
| 1150 | + break; |
|---|
| 1151 | + default: |
|---|
| 1152 | + break; |
|---|
| 1153 | + } |
|---|
| 1154 | + } |
|---|
| 1155 | +} |
|---|
| 1156 | + |
|---|
| 1157 | +static int |
|---|
| 1158 | +nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) |
|---|
| 1159 | +{ |
|---|
| 1409 | 1160 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
|---|
| 1410 | 1161 | struct nvkm_device *device = nvxx_device(&drm->client.device); |
|---|
| 1411 | 1162 | struct nouveau_mem *mem = nouveau_mem(reg); |
|---|
| 1163 | + int ret; |
|---|
| 1412 | 1164 | |
|---|
| 1413 | | - reg->bus.addr = NULL; |
|---|
| 1414 | | - reg->bus.offset = 0; |
|---|
| 1415 | | - reg->bus.size = reg->num_pages << PAGE_SHIFT; |
|---|
| 1416 | | - reg->bus.base = 0; |
|---|
| 1417 | | - reg->bus.is_iomem = false; |
|---|
| 1418 | | - if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) |
|---|
| 1419 | | - return -EINVAL; |
|---|
| 1165 | + mutex_lock(&drm->ttm.io_reserve_mutex); |
|---|
| 1166 | +retry: |
|---|
| 1420 | 1167 | switch (reg->mem_type) { |
|---|
| 1421 | 1168 | case TTM_PL_SYSTEM: |
|---|
| 1422 | 1169 | /* System memory */ |
|---|
| 1423 | | - return 0; |
|---|
| 1170 | + ret = 0; |
|---|
| 1171 | + goto out; |
|---|
| 1424 | 1172 | case TTM_PL_TT: |
|---|
| 1425 | 1173 | #if IS_ENABLED(CONFIG_AGP) |
|---|
| 1426 | 1174 | if (drm->agp.bridge) { |
|---|
| 1427 | | - reg->bus.offset = reg->start << PAGE_SHIFT; |
|---|
| 1428 | | - reg->bus.base = drm->agp.base; |
|---|
| 1175 | + reg->bus.offset = (reg->start << PAGE_SHIFT) + |
|---|
| 1176 | + drm->agp.base; |
|---|
| 1429 | 1177 | reg->bus.is_iomem = !drm->agp.cma; |
|---|
| 1430 | 1178 | } |
|---|
| 1431 | 1179 | #endif |
|---|
| 1432 | | - if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind) |
|---|
| 1180 | + if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || |
|---|
| 1181 | + !mem->kind) { |
|---|
| 1433 | 1182 | /* untiled */ |
|---|
| 1183 | + ret = 0; |
|---|
| 1434 | 1184 | break; |
|---|
| 1435 | | - /* fallthrough, tiled memory */ |
|---|
| 1185 | + } |
|---|
| 1186 | + fallthrough; /* tiled memory */ |
|---|
| 1436 | 1187 | case TTM_PL_VRAM: |
|---|
| 1437 | | - reg->bus.offset = reg->start << PAGE_SHIFT; |
|---|
| 1438 | | - reg->bus.base = device->func->resource_addr(device, 1); |
|---|
| 1188 | + reg->bus.offset = (reg->start << PAGE_SHIFT) + |
|---|
| 1189 | + device->func->resource_addr(device, 1); |
|---|
| 1439 | 1190 | reg->bus.is_iomem = true; |
|---|
| 1440 | 1191 | if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { |
|---|
| 1441 | 1192 | union { |
|---|
| .. | .. |
|---|
| 1444 | 1195 | } args; |
|---|
| 1445 | 1196 | u64 handle, length; |
|---|
| 1446 | 1197 | u32 argc = 0; |
|---|
| 1447 | | - int ret; |
|---|
| 1448 | 1198 | |
|---|
| 1449 | 1199 | switch (mem->mem.object.oclass) { |
|---|
| 1450 | 1200 | case NVIF_CLASS_MEM_NV50: |
|---|
| .. | .. |
|---|
| 1468 | 1218 | ret = nvif_object_map_handle(&mem->mem.object, |
|---|
| 1469 | 1219 | &args, argc, |
|---|
| 1470 | 1220 | &handle, &length); |
|---|
| 1471 | | - if (ret != 1) |
|---|
| 1472 | | - return ret ? ret : -EINVAL; |
|---|
| 1221 | + if (ret != 1) { |
|---|
| 1222 | + if (WARN_ON(ret == 0)) |
|---|
| 1223 | + ret = -EINVAL; |
|---|
| 1224 | + goto out; |
|---|
| 1225 | + } |
|---|
| 1473 | 1226 | |
|---|
| 1474 | | - reg->bus.base = 0; |
|---|
| 1475 | 1227 | reg->bus.offset = handle; |
|---|
| 1476 | 1228 | } |
|---|
| 1229 | + ret = 0; |
|---|
| 1477 | 1230 | break; |
|---|
| 1478 | 1231 | default: |
|---|
| 1479 | | - return -EINVAL; |
|---|
| 1232 | + ret = -EINVAL; |
|---|
| 1480 | 1233 | } |
|---|
| 1481 | | - return 0; |
|---|
| 1234 | + |
|---|
| 1235 | +out: |
|---|
| 1236 | + if (ret == -ENOSPC) { |
|---|
| 1237 | + struct nouveau_bo *nvbo; |
|---|
| 1238 | + |
|---|
| 1239 | + nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru, |
|---|
| 1240 | + typeof(*nvbo), |
|---|
| 1241 | + io_reserve_lru); |
|---|
| 1242 | + if (nvbo) { |
|---|
| 1243 | + list_del_init(&nvbo->io_reserve_lru); |
|---|
| 1244 | + drm_vma_node_unmap(&nvbo->bo.base.vma_node, |
|---|
| 1245 | + bdev->dev_mapping); |
|---|
| 1246 | + nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem); |
|---|
| 1247 | + goto retry; |
|---|
| 1248 | + } |
|---|
| 1249 | + |
|---|
| 1250 | + } |
|---|
| 1251 | + mutex_unlock(&drm->ttm.io_reserve_mutex); |
|---|
| 1252 | + return ret; |
|---|
| 1482 | 1253 | } |
|---|
| 1483 | 1254 | |
|---|
| 1484 | 1255 | static void |
|---|
| 1485 | | -nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) |
|---|
| 1256 | +nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg) |
|---|
| 1486 | 1257 | { |
|---|
| 1487 | 1258 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
|---|
| 1488 | | - struct nouveau_mem *mem = nouveau_mem(reg); |
|---|
| 1489 | 1259 | |
|---|
| 1490 | | - if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { |
|---|
| 1491 | | - switch (reg->mem_type) { |
|---|
| 1492 | | - case TTM_PL_TT: |
|---|
| 1493 | | - if (mem->kind) |
|---|
| 1494 | | - nvif_object_unmap_handle(&mem->mem.object); |
|---|
| 1495 | | - break; |
|---|
| 1496 | | - case TTM_PL_VRAM: |
|---|
| 1497 | | - nvif_object_unmap_handle(&mem->mem.object); |
|---|
| 1498 | | - break; |
|---|
| 1499 | | - default: |
|---|
| 1500 | | - break; |
|---|
| 1501 | | - } |
|---|
| 1502 | | - } |
|---|
| 1260 | + mutex_lock(&drm->ttm.io_reserve_mutex); |
|---|
| 1261 | + nouveau_ttm_io_mem_free_locked(drm, reg); |
|---|
| 1262 | + mutex_unlock(&drm->ttm.io_reserve_mutex); |
|---|
| 1503 | 1263 | } |
|---|
| 1504 | 1264 | |
|---|
| 1505 | 1265 | static int |
|---|
| .. | .. |
|---|
| 1520 | 1280 | return 0; |
|---|
| 1521 | 1281 | |
|---|
| 1522 | 1282 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { |
|---|
| 1523 | | - nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0); |
|---|
| 1283 | + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, |
|---|
| 1284 | + 0); |
|---|
| 1524 | 1285 | |
|---|
| 1525 | 1286 | ret = nouveau_bo_validate(nvbo, false, false); |
|---|
| 1526 | 1287 | if (ret) |
|---|
| .. | .. |
|---|
| 1544 | 1305 | nvbo->busy_placements[i].lpfn = mappable; |
|---|
| 1545 | 1306 | } |
|---|
| 1546 | 1307 | |
|---|
| 1547 | | - nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); |
|---|
| 1308 | + nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0); |
|---|
| 1548 | 1309 | return nouveau_bo_validate(nvbo, false, false); |
|---|
| 1549 | 1310 | } |
|---|
| 1550 | 1311 | |
|---|
| 1551 | 1312 | static int |
|---|
| 1552 | | -nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) |
|---|
| 1313 | +nouveau_ttm_tt_populate(struct ttm_bo_device *bdev, |
|---|
| 1314 | + struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) |
|---|
| 1553 | 1315 | { |
|---|
| 1554 | 1316 | struct ttm_dma_tt *ttm_dma = (void *)ttm; |
|---|
| 1555 | 1317 | struct nouveau_drm *drm; |
|---|
| 1556 | 1318 | struct device *dev; |
|---|
| 1557 | | - unsigned i; |
|---|
| 1558 | | - int r; |
|---|
| 1559 | 1319 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
|---|
| 1560 | 1320 | |
|---|
| 1561 | | - if (ttm->state != tt_unpopulated) |
|---|
| 1321 | + if (ttm_tt_is_populated(ttm)) |
|---|
| 1562 | 1322 | return 0; |
|---|
| 1563 | 1323 | |
|---|
| 1564 | 1324 | if (slave && ttm->sg) { |
|---|
| 1565 | 1325 | /* make userspace faulting work */ |
|---|
| 1566 | 1326 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, |
|---|
| 1567 | 1327 | ttm_dma->dma_address, ttm->num_pages); |
|---|
| 1568 | | - ttm->state = tt_unbound; |
|---|
| 1328 | + ttm_tt_set_populated(ttm); |
|---|
| 1569 | 1329 | return 0; |
|---|
| 1570 | 1330 | } |
|---|
| 1571 | 1331 | |
|---|
| 1572 | | - drm = nouveau_bdev(ttm->bdev); |
|---|
| 1332 | + drm = nouveau_bdev(bdev); |
|---|
| 1573 | 1333 | dev = drm->dev->dev; |
|---|
| 1574 | 1334 | |
|---|
| 1575 | 1335 | #if IS_ENABLED(CONFIG_AGP) |
|---|
| 1576 | 1336 | if (drm->agp.bridge) { |
|---|
| 1577 | | - return ttm_agp_tt_populate(ttm, ctx); |
|---|
| 1337 | + return ttm_pool_populate(ttm, ctx); |
|---|
| 1578 | 1338 | } |
|---|
| 1579 | 1339 | #endif |
|---|
| 1580 | 1340 | |
|---|
| .. | .. |
|---|
| 1583 | 1343 | return ttm_dma_populate((void *)ttm, dev, ctx); |
|---|
| 1584 | 1344 | } |
|---|
| 1585 | 1345 | #endif |
|---|
| 1586 | | - |
|---|
| 1587 | | - r = ttm_pool_populate(ttm, ctx); |
|---|
| 1588 | | - if (r) { |
|---|
| 1589 | | - return r; |
|---|
| 1590 | | - } |
|---|
| 1591 | | - |
|---|
| 1592 | | - for (i = 0; i < ttm->num_pages; i++) { |
|---|
| 1593 | | - dma_addr_t addr; |
|---|
| 1594 | | - |
|---|
| 1595 | | - addr = dma_map_page(dev, ttm->pages[i], 0, PAGE_SIZE, |
|---|
| 1596 | | - DMA_BIDIRECTIONAL); |
|---|
| 1597 | | - |
|---|
| 1598 | | - if (dma_mapping_error(dev, addr)) { |
|---|
| 1599 | | - while (i--) { |
|---|
| 1600 | | - dma_unmap_page(dev, ttm_dma->dma_address[i], |
|---|
| 1601 | | - PAGE_SIZE, DMA_BIDIRECTIONAL); |
|---|
| 1602 | | - ttm_dma->dma_address[i] = 0; |
|---|
| 1603 | | - } |
|---|
| 1604 | | - ttm_pool_unpopulate(ttm); |
|---|
| 1605 | | - return -EFAULT; |
|---|
| 1606 | | - } |
|---|
| 1607 | | - |
|---|
| 1608 | | - ttm_dma->dma_address[i] = addr; |
|---|
| 1609 | | - } |
|---|
| 1610 | | - return 0; |
|---|
| 1346 | + return ttm_populate_and_map_pages(dev, ttm_dma, ctx); |
|---|
| 1611 | 1347 | } |
|---|
| 1612 | 1348 | |
|---|
| 1613 | 1349 | static void |
|---|
| 1614 | | -nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) |
|---|
| 1350 | +nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev, |
|---|
| 1351 | + struct ttm_tt *ttm) |
|---|
| 1615 | 1352 | { |
|---|
| 1616 | 1353 | struct ttm_dma_tt *ttm_dma = (void *)ttm; |
|---|
| 1617 | 1354 | struct nouveau_drm *drm; |
|---|
| 1618 | 1355 | struct device *dev; |
|---|
| 1619 | | - unsigned i; |
|---|
| 1620 | 1356 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
|---|
| 1621 | 1357 | |
|---|
| 1622 | 1358 | if (slave) |
|---|
| 1623 | 1359 | return; |
|---|
| 1624 | 1360 | |
|---|
| 1625 | | - drm = nouveau_bdev(ttm->bdev); |
|---|
| 1361 | + drm = nouveau_bdev(bdev); |
|---|
| 1626 | 1362 | dev = drm->dev->dev; |
|---|
| 1627 | 1363 | |
|---|
| 1628 | 1364 | #if IS_ENABLED(CONFIG_AGP) |
|---|
| 1629 | 1365 | if (drm->agp.bridge) { |
|---|
| 1630 | | - ttm_agp_tt_unpopulate(ttm); |
|---|
| 1366 | + ttm_pool_unpopulate(ttm); |
|---|
| 1631 | 1367 | return; |
|---|
| 1632 | 1368 | } |
|---|
| 1633 | 1369 | #endif |
|---|
| .. | .. |
|---|
| 1639 | 1375 | } |
|---|
| 1640 | 1376 | #endif |
|---|
| 1641 | 1377 | |
|---|
| 1642 | | - for (i = 0; i < ttm->num_pages; i++) { |
|---|
| 1643 | | - if (ttm_dma->dma_address[i]) { |
|---|
| 1644 | | - dma_unmap_page(dev, ttm_dma->dma_address[i], PAGE_SIZE, |
|---|
| 1645 | | - DMA_BIDIRECTIONAL); |
|---|
| 1646 | | - } |
|---|
| 1647 | | - } |
|---|
| 1378 | + ttm_unmap_and_unpopulate_pages(dev, ttm_dma); |
|---|
| 1379 | +} |
|---|
| 1648 | 1380 | |
|---|
| 1649 | | - ttm_pool_unpopulate(ttm); |
|---|
| 1381 | +static void |
|---|
| 1382 | +nouveau_ttm_tt_destroy(struct ttm_bo_device *bdev, |
|---|
| 1383 | + struct ttm_tt *ttm) |
|---|
| 1384 | +{ |
|---|
| 1385 | +#if IS_ENABLED(CONFIG_AGP) |
|---|
| 1386 | + struct nouveau_drm *drm = nouveau_bdev(bdev); |
|---|
| 1387 | + if (drm->agp.bridge) { |
|---|
| 1388 | + ttm_agp_unbind(ttm); |
|---|
| 1389 | + ttm_tt_destroy_common(bdev, ttm); |
|---|
| 1390 | + ttm_agp_destroy(ttm); |
|---|
| 1391 | + return; |
|---|
| 1392 | + } |
|---|
| 1393 | +#endif |
|---|
| 1394 | + nouveau_sgdma_destroy(bdev, ttm); |
|---|
| 1650 | 1395 | } |
|---|
| 1651 | 1396 | |
|---|
| 1652 | 1397 | void |
|---|
| 1653 | 1398 | nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive) |
|---|
| 1654 | 1399 | { |
|---|
| 1655 | | - struct reservation_object *resv = nvbo->bo.resv; |
|---|
| 1400 | + struct dma_resv *resv = nvbo->bo.base.resv; |
|---|
| 1656 | 1401 | |
|---|
| 1657 | 1402 | if (exclusive) |
|---|
| 1658 | | - reservation_object_add_excl_fence(resv, &fence->base); |
|---|
| 1403 | + dma_resv_add_excl_fence(resv, &fence->base); |
|---|
| 1659 | 1404 | else if (fence) |
|---|
| 1660 | | - reservation_object_add_shared_fence(resv, &fence->base); |
|---|
| 1405 | + dma_resv_add_shared_fence(resv, &fence->base); |
|---|
| 1661 | 1406 | } |
|---|
| 1662 | 1407 | |
|---|
| 1663 | 1408 | struct ttm_bo_driver nouveau_bo_driver = { |
|---|
| 1664 | 1409 | .ttm_tt_create = &nouveau_ttm_tt_create, |
|---|
| 1665 | 1410 | .ttm_tt_populate = &nouveau_ttm_tt_populate, |
|---|
| 1666 | 1411 | .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, |
|---|
| 1667 | | - .invalidate_caches = nouveau_bo_invalidate_caches, |
|---|
| 1668 | | - .init_mem_type = nouveau_bo_init_mem_type, |
|---|
| 1412 | + .ttm_tt_bind = &nouveau_ttm_tt_bind, |
|---|
| 1413 | + .ttm_tt_unbind = &nouveau_ttm_tt_unbind, |
|---|
| 1414 | + .ttm_tt_destroy = &nouveau_ttm_tt_destroy, |
|---|
| 1669 | 1415 | .eviction_valuable = ttm_bo_eviction_valuable, |
|---|
| 1670 | 1416 | .evict_flags = nouveau_bo_evict_flags, |
|---|
| 1671 | 1417 | .move_notify = nouveau_bo_move_ntfy, |
|---|