| .. | .. |
|---|
| 35 | 35 | #include "nouveau_vmm.h" |
|---|
| 36 | 36 | |
|---|
| 37 | 37 | #include <nvif/class.h> |
|---|
| 38 | +#include <nvif/push206e.h> |
|---|
| 38 | 39 | |
|---|
| 39 | 40 | void |
|---|
| 40 | 41 | nouveau_gem_object_del(struct drm_gem_object *gem) |
|---|
| 41 | 42 | { |
|---|
| 42 | 43 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
|---|
| 43 | 44 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
|---|
| 44 | | - struct ttm_buffer_object *bo = &nvbo->bo; |
|---|
| 45 | 45 | struct device *dev = drm->dev->dev; |
|---|
| 46 | 46 | int ret; |
|---|
| 47 | 47 | |
|---|
| .. | .. |
|---|
| 54 | 54 | if (gem->import_attach) |
|---|
| 55 | 55 | drm_prime_gem_destroy(gem, nvbo->bo.sg); |
|---|
| 56 | 56 | |
|---|
| 57 | | - drm_gem_object_release(gem); |
|---|
| 58 | | - |
|---|
| 59 | | - /* reset filp so nouveau_bo_del_ttm() can test for it */ |
|---|
| 60 | | - gem->filp = NULL; |
|---|
| 61 | | - ttm_bo_unref(&bo); |
|---|
| 57 | + ttm_bo_put(&nvbo->bo); |
|---|
| 62 | 58 | |
|---|
| 63 | 59 | pm_runtime_mark_last_busy(dev); |
|---|
| 64 | 60 | pm_runtime_put_autosuspend(dev); |
|---|
| .. | .. |
|---|
| 71 | 67 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
|---|
| 72 | 68 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
|---|
| 73 | 69 | struct device *dev = drm->dev->dev; |
|---|
| 70 | + struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm; |
|---|
| 74 | 71 | struct nouveau_vma *vma; |
|---|
| 75 | 72 | int ret; |
|---|
| 76 | 73 | |
|---|
| 77 | | - if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50) |
|---|
| 74 | + if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50) |
|---|
| 78 | 75 | return 0; |
|---|
| 79 | 76 | |
|---|
| 80 | 77 | ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); |
|---|
| .. | .. |
|---|
| 87 | 84 | goto out; |
|---|
| 88 | 85 | } |
|---|
| 89 | 86 | |
|---|
| 90 | | - ret = nouveau_vma_new(nvbo, &cli->vmm, &vma); |
|---|
| 87 | + ret = nouveau_vma_new(nvbo, vmm, &vma); |
|---|
| 91 | 88 | pm_runtime_mark_last_busy(dev); |
|---|
| 92 | 89 | pm_runtime_put_autosuspend(dev); |
|---|
| 93 | 90 | out: |
|---|
| .. | .. |
|---|
| 147 | 144 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
|---|
| 148 | 145 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
|---|
| 149 | 146 | struct device *dev = drm->dev->dev; |
|---|
| 147 | + struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm; |
|---|
| 150 | 148 | struct nouveau_vma *vma; |
|---|
| 151 | 149 | int ret; |
|---|
| 152 | 150 | |
|---|
| 153 | | - if (cli->vmm.vmm.object.oclass < NVIF_CLASS_VMM_NV50) |
|---|
| 151 | + if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50) |
|---|
| 154 | 152 | return; |
|---|
| 155 | 153 | |
|---|
| 156 | 154 | ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); |
|---|
| 157 | 155 | if (ret) |
|---|
| 158 | 156 | return; |
|---|
| 159 | 157 | |
|---|
| 160 | | - vma = nouveau_vma_find(nvbo, &cli->vmm); |
|---|
| 158 | + vma = nouveau_vma_find(nvbo, vmm); |
|---|
| 161 | 159 | if (vma) { |
|---|
| 162 | 160 | if (--vma->refs == 0) { |
|---|
| 163 | 161 | ret = pm_runtime_get_sync(dev); |
|---|
| 164 | 162 | if (!WARN_ON(ret < 0 && ret != -EACCES)) { |
|---|
| 165 | 163 | nouveau_gem_object_unmap(nvbo, vma); |
|---|
| 166 | 164 | pm_runtime_mark_last_busy(dev); |
|---|
| 167 | | - pm_runtime_put_autosuspend(dev); |
|---|
| 168 | 165 | } |
|---|
| 166 | + pm_runtime_put_autosuspend(dev); |
|---|
| 169 | 167 | } |
|---|
| 170 | 168 | } |
|---|
| 171 | 169 | ttm_bo_unreserve(&nvbo->bo); |
|---|
| .. | .. |
|---|
| 178 | 176 | { |
|---|
| 179 | 177 | struct nouveau_drm *drm = cli->drm; |
|---|
| 180 | 178 | struct nouveau_bo *nvbo; |
|---|
| 181 | | - u32 flags = 0; |
|---|
| 182 | 179 | int ret; |
|---|
| 183 | 180 | |
|---|
| 184 | | - if (domain & NOUVEAU_GEM_DOMAIN_VRAM) |
|---|
| 185 | | - flags |= TTM_PL_FLAG_VRAM; |
|---|
| 186 | | - if (domain & NOUVEAU_GEM_DOMAIN_GART) |
|---|
| 187 | | - flags |= TTM_PL_FLAG_TT; |
|---|
| 188 | | - if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) |
|---|
| 189 | | - flags |= TTM_PL_FLAG_SYSTEM; |
|---|
| 181 | + if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART))) |
|---|
| 182 | + domain |= NOUVEAU_GEM_DOMAIN_CPU; |
|---|
| 190 | 183 | |
|---|
| 191 | | - if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) |
|---|
| 192 | | - flags |= TTM_PL_FLAG_UNCACHED; |
|---|
| 184 | + nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode, |
|---|
| 185 | + tile_flags); |
|---|
| 186 | + if (IS_ERR(nvbo)) |
|---|
| 187 | + return PTR_ERR(nvbo); |
|---|
| 193 | 188 | |
|---|
| 194 | | - ret = nouveau_bo_new(cli, size, align, flags, tile_mode, |
|---|
| 195 | | - tile_flags, NULL, NULL, pnvbo); |
|---|
| 189 | + /* Initialize the embedded gem-object. We return a single gem-reference |
|---|
| 190 | + * to the caller, instead of a normal nouveau_bo ttm reference. */ |
|---|
| 191 | + ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size); |
|---|
| 192 | + if (ret) { |
|---|
| 193 | + drm_gem_object_release(&nvbo->bo.base); |
|---|
| 194 | + kfree(nvbo); |
|---|
| 195 | + return ret; |
|---|
| 196 | + } |
|---|
| 197 | + |
|---|
| 198 | + ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL); |
|---|
| 196 | 199 | if (ret) |
|---|
| 197 | 200 | return ret; |
|---|
| 198 | | - nvbo = *pnvbo; |
|---|
| 199 | 201 | |
|---|
| 200 | 202 | /* we restrict allowed domains on nv50+ to only the types |
|---|
| 201 | 203 | * that were requested at creation time. not possibly on |
|---|
| .. | .. |
|---|
| 206 | 208 | if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) |
|---|
| 207 | 209 | nvbo->valid_domains &= domain; |
|---|
| 208 | 210 | |
|---|
| 209 | | - /* Initialize the embedded gem-object. We return a single gem-reference |
|---|
| 210 | | - * to the caller, instead of a normal nouveau_bo ttm reference. */ |
|---|
| 211 | | - ret = drm_gem_object_init(drm->dev, &nvbo->gem, nvbo->bo.mem.size); |
|---|
| 212 | | - if (ret) { |
|---|
| 213 | | - nouveau_bo_ref(NULL, pnvbo); |
|---|
| 214 | | - return -ENOMEM; |
|---|
| 215 | | - } |
|---|
| 216 | | - |
|---|
| 217 | | - nvbo->bo.persistent_swap_storage = nvbo->gem.filp; |
|---|
| 211 | + nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp; |
|---|
| 212 | + *pnvbo = nvbo; |
|---|
| 218 | 213 | return 0; |
|---|
| 219 | 214 | } |
|---|
| 220 | 215 | |
|---|
| .. | .. |
|---|
| 224 | 219 | { |
|---|
| 225 | 220 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
|---|
| 226 | 221 | struct nouveau_bo *nvbo = nouveau_gem_object(gem); |
|---|
| 222 | + struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm; |
|---|
| 227 | 223 | struct nouveau_vma *vma; |
|---|
| 228 | 224 | |
|---|
| 229 | 225 | if (is_power_of_2(nvbo->valid_domains)) |
|---|
| .. | .. |
|---|
| 232 | 228 | rep->domain = NOUVEAU_GEM_DOMAIN_GART; |
|---|
| 233 | 229 | else |
|---|
| 234 | 230 | rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; |
|---|
| 235 | | - rep->offset = nvbo->bo.offset; |
|---|
| 236 | | - if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) { |
|---|
| 237 | | - vma = nouveau_vma_find(nvbo, &cli->vmm); |
|---|
| 231 | + rep->offset = nvbo->offset; |
|---|
| 232 | + if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) { |
|---|
| 233 | + vma = nouveau_vma_find(nvbo, vmm); |
|---|
| 238 | 234 | if (!vma) |
|---|
| 239 | 235 | return -EINVAL; |
|---|
| 240 | 236 | |
|---|
| .. | .. |
|---|
| 242 | 238 | } |
|---|
| 243 | 239 | |
|---|
| 244 | 240 | rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; |
|---|
| 245 | | - rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node); |
|---|
| 241 | + rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node); |
|---|
| 246 | 242 | rep->tile_mode = nvbo->mode; |
|---|
| 247 | 243 | rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG; |
|---|
| 248 | 244 | if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) |
|---|
| .. | .. |
|---|
| 270 | 266 | if (ret) |
|---|
| 271 | 267 | return ret; |
|---|
| 272 | 268 | |
|---|
| 273 | | - ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle); |
|---|
| 269 | + ret = drm_gem_handle_create(file_priv, &nvbo->bo.base, |
|---|
| 270 | + &req->info.handle); |
|---|
| 274 | 271 | if (ret == 0) { |
|---|
| 275 | | - ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info); |
|---|
| 272 | + ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info); |
|---|
| 276 | 273 | if (ret) |
|---|
| 277 | 274 | drm_gem_handle_delete(file_priv, req->info.handle); |
|---|
| 278 | 275 | } |
|---|
| 279 | 276 | |
|---|
| 280 | 277 | /* drop reference from allocate - handle holds it now */ |
|---|
| 281 | | - drm_gem_object_put_unlocked(&nvbo->gem); |
|---|
| 278 | + drm_gem_object_put(&nvbo->bo.base); |
|---|
| 282 | 279 | return ret; |
|---|
| 283 | 280 | } |
|---|
| 284 | 281 | |
|---|
| .. | .. |
|---|
| 290 | 287 | struct ttm_buffer_object *bo = &nvbo->bo; |
|---|
| 291 | 288 | uint32_t domains = valid_domains & nvbo->valid_domains & |
|---|
| 292 | 289 | (write_domains ? write_domains : read_domains); |
|---|
| 293 | | - uint32_t pref_flags = 0, valid_flags = 0; |
|---|
| 290 | + uint32_t pref_domains = 0;; |
|---|
| 294 | 291 | |
|---|
| 295 | 292 | if (!domains) |
|---|
| 296 | 293 | return -EINVAL; |
|---|
| 297 | 294 | |
|---|
| 298 | | - if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) |
|---|
| 299 | | - valid_flags |= TTM_PL_FLAG_VRAM; |
|---|
| 300 | | - |
|---|
| 301 | | - if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) |
|---|
| 302 | | - valid_flags |= TTM_PL_FLAG_TT; |
|---|
| 295 | + valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART); |
|---|
| 303 | 296 | |
|---|
| 304 | 297 | if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && |
|---|
| 305 | 298 | bo->mem.mem_type == TTM_PL_VRAM) |
|---|
| 306 | | - pref_flags |= TTM_PL_FLAG_VRAM; |
|---|
| 299 | + pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM; |
|---|
| 307 | 300 | |
|---|
| 308 | 301 | else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && |
|---|
| 309 | 302 | bo->mem.mem_type == TTM_PL_TT) |
|---|
| 310 | | - pref_flags |= TTM_PL_FLAG_TT; |
|---|
| 303 | + pref_domains |= NOUVEAU_GEM_DOMAIN_GART; |
|---|
| 311 | 304 | |
|---|
| 312 | 305 | else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) |
|---|
| 313 | | - pref_flags |= TTM_PL_FLAG_VRAM; |
|---|
| 306 | + pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM; |
|---|
| 314 | 307 | |
|---|
| 315 | 308 | else |
|---|
| 316 | | - pref_flags |= TTM_PL_FLAG_TT; |
|---|
| 309 | + pref_domains |= NOUVEAU_GEM_DOMAIN_GART; |
|---|
| 317 | 310 | |
|---|
| 318 | | - nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); |
|---|
| 311 | + nouveau_bo_placement_set(nvbo, pref_domains, valid_domains); |
|---|
| 319 | 312 | |
|---|
| 320 | 313 | return 0; |
|---|
| 321 | 314 | } |
|---|
| .. | .. |
|---|
| 326 | 319 | }; |
|---|
| 327 | 320 | |
|---|
| 328 | 321 | static void |
|---|
| 329 | | -validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence, |
|---|
| 322 | +validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan, |
|---|
| 323 | + struct nouveau_fence *fence, |
|---|
| 330 | 324 | struct drm_nouveau_gem_pushbuf_bo *pbbo) |
|---|
| 331 | 325 | { |
|---|
| 332 | 326 | struct nouveau_bo *nvbo; |
|---|
| .. | .. |
|---|
| 337 | 331 | b = &pbbo[nvbo->pbbo_index]; |
|---|
| 338 | 332 | |
|---|
| 339 | 333 | if (likely(fence)) { |
|---|
| 340 | | - struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
|---|
| 341 | | - struct nouveau_vma *vma; |
|---|
| 342 | | - |
|---|
| 343 | 334 | nouveau_bo_fence(nvbo, fence, !!b->write_domains); |
|---|
| 344 | 335 | |
|---|
| 345 | | - if (drm->client.vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) { |
|---|
| 346 | | - vma = (void *)(unsigned long)b->user_priv; |
|---|
| 336 | + if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) { |
|---|
| 337 | + struct nouveau_vma *vma = |
|---|
| 338 | + (void *)(unsigned long)b->user_priv; |
|---|
| 347 | 339 | nouveau_fence_unref(&vma->fence); |
|---|
| 348 | 340 | dma_fence_get(&fence->base); |
|---|
| 349 | 341 | vma->fence = fence; |
|---|
| .. | .. |
|---|
| 358 | 350 | list_del(&nvbo->entry); |
|---|
| 359 | 351 | nvbo->reserved_by = NULL; |
|---|
| 360 | 352 | ttm_bo_unreserve(&nvbo->bo); |
|---|
| 361 | | - drm_gem_object_put_unlocked(&nvbo->gem); |
|---|
| 353 | + drm_gem_object_put(&nvbo->bo.base); |
|---|
| 362 | 354 | } |
|---|
| 363 | 355 | } |
|---|
| 364 | 356 | |
|---|
| 365 | 357 | static void |
|---|
| 366 | | -validate_fini(struct validate_op *op, struct nouveau_fence *fence, |
|---|
| 358 | +validate_fini(struct validate_op *op, struct nouveau_channel *chan, |
|---|
| 359 | + struct nouveau_fence *fence, |
|---|
| 367 | 360 | struct drm_nouveau_gem_pushbuf_bo *pbbo) |
|---|
| 368 | 361 | { |
|---|
| 369 | | - validate_fini_no_ticket(op, fence, pbbo); |
|---|
| 362 | + validate_fini_no_ticket(op, chan, fence, pbbo); |
|---|
| 370 | 363 | ww_acquire_fini(&op->ticket); |
|---|
| 371 | 364 | } |
|---|
| 372 | 365 | |
|---|
| .. | .. |
|---|
| 404 | 397 | nvbo = nouveau_gem_object(gem); |
|---|
| 405 | 398 | if (nvbo == res_bo) { |
|---|
| 406 | 399 | res_bo = NULL; |
|---|
| 407 | | - drm_gem_object_put_unlocked(gem); |
|---|
| 400 | + drm_gem_object_put(gem); |
|---|
| 408 | 401 | continue; |
|---|
| 409 | 402 | } |
|---|
| 410 | 403 | |
|---|
| 411 | 404 | if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { |
|---|
| 412 | 405 | NV_PRINTK(err, cli, "multiple instances of buffer %d on " |
|---|
| 413 | 406 | "validation list\n", b->handle); |
|---|
| 414 | | - drm_gem_object_put_unlocked(gem); |
|---|
| 407 | + drm_gem_object_put(gem); |
|---|
| 415 | 408 | ret = -EINVAL; |
|---|
| 416 | 409 | break; |
|---|
| 417 | 410 | } |
|---|
| .. | .. |
|---|
| 421 | 414 | list_splice_tail_init(&vram_list, &op->list); |
|---|
| 422 | 415 | list_splice_tail_init(&gart_list, &op->list); |
|---|
| 423 | 416 | list_splice_tail_init(&both_list, &op->list); |
|---|
| 424 | | - validate_fini_no_ticket(op, NULL, NULL); |
|---|
| 417 | + validate_fini_no_ticket(op, chan, NULL, NULL); |
|---|
| 425 | 418 | if (unlikely(ret == -EDEADLK)) { |
|---|
| 426 | 419 | ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, |
|---|
| 427 | 420 | &op->ticket); |
|---|
| .. | .. |
|---|
| 435 | 428 | } |
|---|
| 436 | 429 | } |
|---|
| 437 | 430 | |
|---|
| 438 | | - if (cli->vmm.vmm.object.oclass >= NVIF_CLASS_VMM_NV50) { |
|---|
| 439 | | - struct nouveau_vmm *vmm = &cli->vmm; |
|---|
| 431 | + if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) { |
|---|
| 432 | + struct nouveau_vmm *vmm = chan->vmm; |
|---|
| 440 | 433 | struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm); |
|---|
| 441 | 434 | if (!vma) { |
|---|
| 442 | 435 | NV_PRINTK(err, cli, "vma not found!\n"); |
|---|
| .. | .. |
|---|
| 476 | 469 | list_splice_tail(&gart_list, &op->list); |
|---|
| 477 | 470 | list_splice_tail(&both_list, &op->list); |
|---|
| 478 | 471 | if (ret) |
|---|
| 479 | | - validate_fini(op, NULL, NULL); |
|---|
| 472 | + validate_fini(op, chan, NULL, NULL); |
|---|
| 480 | 473 | return ret; |
|---|
| 481 | 474 | |
|---|
| 482 | 475 | } |
|---|
| 483 | 476 | |
|---|
| 484 | 477 | static int |
|---|
| 485 | 478 | validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, |
|---|
| 486 | | - struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo, |
|---|
| 487 | | - uint64_t user_pbbo_ptr) |
|---|
| 479 | + struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo) |
|---|
| 488 | 480 | { |
|---|
| 489 | 481 | struct nouveau_drm *drm = chan->drm; |
|---|
| 490 | | - struct drm_nouveau_gem_pushbuf_bo __user *upbbo = |
|---|
| 491 | | - (void __force __user *)(uintptr_t)user_pbbo_ptr; |
|---|
| 492 | 482 | struct nouveau_bo *nvbo; |
|---|
| 493 | 483 | int ret, relocs = 0; |
|---|
| 494 | 484 | |
|---|
| 495 | 485 | list_for_each_entry(nvbo, list, entry) { |
|---|
| 496 | 486 | struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; |
|---|
| 497 | 487 | |
|---|
| 498 | | - ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains, |
|---|
| 488 | + ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains, |
|---|
| 499 | 489 | b->write_domains, |
|---|
| 500 | 490 | b->valid_domains); |
|---|
| 501 | 491 | if (unlikely(ret)) { |
|---|
| .. | .. |
|---|
| 518 | 508 | } |
|---|
| 519 | 509 | |
|---|
| 520 | 510 | if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { |
|---|
| 521 | | - if (nvbo->bo.offset == b->presumed.offset && |
|---|
| 511 | + if (nvbo->offset == b->presumed.offset && |
|---|
| 522 | 512 | ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && |
|---|
| 523 | 513 | b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || |
|---|
| 524 | 514 | (nvbo->bo.mem.mem_type == TTM_PL_TT && |
|---|
| .. | .. |
|---|
| 529 | 519 | b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; |
|---|
| 530 | 520 | else |
|---|
| 531 | 521 | b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; |
|---|
| 532 | | - b->presumed.offset = nvbo->bo.offset; |
|---|
| 522 | + b->presumed.offset = nvbo->offset; |
|---|
| 533 | 523 | b->presumed.valid = 0; |
|---|
| 534 | 524 | relocs++; |
|---|
| 535 | | - |
|---|
| 536 | | - if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed, |
|---|
| 537 | | - &b->presumed, sizeof(b->presumed))) |
|---|
| 538 | | - return -EFAULT; |
|---|
| 539 | 525 | } |
|---|
| 540 | 526 | } |
|---|
| 541 | 527 | |
|---|
| .. | .. |
|---|
| 546 | 532 | nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, |
|---|
| 547 | 533 | struct drm_file *file_priv, |
|---|
| 548 | 534 | struct drm_nouveau_gem_pushbuf_bo *pbbo, |
|---|
| 549 | | - uint64_t user_buffers, int nr_buffers, |
|---|
| 550 | | - struct validate_op *op, int *apply_relocs) |
|---|
| 535 | + int nr_buffers, |
|---|
| 536 | + struct validate_op *op, bool *apply_relocs) |
|---|
| 551 | 537 | { |
|---|
| 552 | 538 | struct nouveau_cli *cli = nouveau_cli(file_priv); |
|---|
| 553 | 539 | int ret; |
|---|
| .. | .. |
|---|
| 564 | 550 | return ret; |
|---|
| 565 | 551 | } |
|---|
| 566 | 552 | |
|---|
| 567 | | - ret = validate_list(chan, cli, &op->list, pbbo, user_buffers); |
|---|
| 553 | + ret = validate_list(chan, cli, &op->list, pbbo); |
|---|
| 568 | 554 | if (unlikely(ret < 0)) { |
|---|
| 569 | 555 | if (ret != -ERESTARTSYS) |
|---|
| 570 | 556 | NV_PRINTK(err, cli, "validating bo list\n"); |
|---|
| 571 | | - validate_fini(op, NULL, NULL); |
|---|
| 557 | + validate_fini(op, chan, NULL, NULL); |
|---|
| 572 | 558 | return ret; |
|---|
| 559 | + } else if (ret > 0) { |
|---|
| 560 | + *apply_relocs = true; |
|---|
| 573 | 561 | } |
|---|
| 574 | | - *apply_relocs = ret; |
|---|
| 562 | + |
|---|
| 575 | 563 | return 0; |
|---|
| 576 | 564 | } |
|---|
| 577 | 565 | |
|---|
| .. | .. |
|---|
| 604 | 592 | static int |
|---|
| 605 | 593 | nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, |
|---|
| 606 | 594 | struct drm_nouveau_gem_pushbuf *req, |
|---|
| 595 | + struct drm_nouveau_gem_pushbuf_reloc *reloc, |
|---|
| 607 | 596 | struct drm_nouveau_gem_pushbuf_bo *bo) |
|---|
| 608 | 597 | { |
|---|
| 609 | | - struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; |
|---|
| 610 | 598 | int ret = 0; |
|---|
| 611 | 599 | unsigned i; |
|---|
| 612 | | - |
|---|
| 613 | | - reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); |
|---|
| 614 | | - if (IS_ERR(reloc)) |
|---|
| 615 | | - return PTR_ERR(reloc); |
|---|
| 616 | 600 | |
|---|
| 617 | 601 | for (i = 0; i < req->nr_relocs; i++) { |
|---|
| 618 | 602 | struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; |
|---|
| .. | .. |
|---|
| 678 | 662 | nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); |
|---|
| 679 | 663 | } |
|---|
| 680 | 664 | |
|---|
| 681 | | - u_free(reloc); |
|---|
| 682 | 665 | return ret; |
|---|
| 683 | 666 | } |
|---|
| 684 | 667 | |
|---|
| .. | .. |
|---|
| 692 | 675 | struct nouveau_drm *drm = nouveau_drm(dev); |
|---|
| 693 | 676 | struct drm_nouveau_gem_pushbuf *req = data; |
|---|
| 694 | 677 | struct drm_nouveau_gem_pushbuf_push *push; |
|---|
| 678 | + struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; |
|---|
| 695 | 679 | struct drm_nouveau_gem_pushbuf_bo *bo; |
|---|
| 696 | 680 | struct nouveau_channel *chan = NULL; |
|---|
| 697 | 681 | struct validate_op op; |
|---|
| 698 | 682 | struct nouveau_fence *fence = NULL; |
|---|
| 699 | | - int i, j, ret = 0, do_reloc = 0; |
|---|
| 683 | + int i, j, ret = 0; |
|---|
| 684 | + bool do_reloc = false, sync = false; |
|---|
| 700 | 685 | |
|---|
| 701 | 686 | if (unlikely(!abi16)) |
|---|
| 702 | 687 | return -ENOMEM; |
|---|
| .. | .. |
|---|
| 710 | 695 | |
|---|
| 711 | 696 | if (!chan) |
|---|
| 712 | 697 | return nouveau_abi16_put(abi16, -ENOENT); |
|---|
| 698 | + if (unlikely(atomic_read(&chan->killed))) |
|---|
| 699 | + return nouveau_abi16_put(abi16, -ENODEV); |
|---|
| 700 | + |
|---|
| 701 | + sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC; |
|---|
| 713 | 702 | |
|---|
| 714 | 703 | req->vram_available = drm->gem.vram_available; |
|---|
| 715 | 704 | req->gart_available = drm->gem.gart_available; |
|---|
| .. | .. |
|---|
| 754 | 743 | } |
|---|
| 755 | 744 | |
|---|
| 756 | 745 | /* Validate buffer list */ |
|---|
| 757 | | - ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, |
|---|
| 746 | +revalidate: |
|---|
| 747 | + ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, |
|---|
| 758 | 748 | req->nr_buffers, &op, &do_reloc); |
|---|
| 759 | 749 | if (ret) { |
|---|
| 760 | 750 | if (ret != -ERESTARTSYS) |
|---|
| .. | .. |
|---|
| 764 | 754 | |
|---|
| 765 | 755 | /* Apply any relocations that are required */ |
|---|
| 766 | 756 | if (do_reloc) { |
|---|
| 767 | | - ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo); |
|---|
| 757 | + if (!reloc) { |
|---|
| 758 | + validate_fini(&op, chan, NULL, bo); |
|---|
| 759 | + reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); |
|---|
| 760 | + if (IS_ERR(reloc)) { |
|---|
| 761 | + ret = PTR_ERR(reloc); |
|---|
| 762 | + goto out_prevalid; |
|---|
| 763 | + } |
|---|
| 764 | + |
|---|
| 765 | + goto revalidate; |
|---|
| 766 | + } |
|---|
| 767 | + |
|---|
| 768 | + ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo); |
|---|
| 768 | 769 | if (ret) { |
|---|
| 769 | 770 | NV_PRINTK(err, cli, "reloc apply: %d\n", ret); |
|---|
| 770 | 771 | goto out; |
|---|
| .. | .. |
|---|
| 787 | 788 | } |
|---|
| 788 | 789 | } else |
|---|
| 789 | 790 | if (drm->client.device.info.chipset >= 0x25) { |
|---|
| 790 | | - ret = RING_SPACE(chan, req->nr_push * 2); |
|---|
| 791 | + ret = PUSH_WAIT(chan->chan.push, req->nr_push * 2); |
|---|
| 791 | 792 | if (ret) { |
|---|
| 792 | 793 | NV_PRINTK(err, cli, "cal_space: %d\n", ret); |
|---|
| 793 | 794 | goto out; |
|---|
| .. | .. |
|---|
| 797 | 798 | struct nouveau_bo *nvbo = (void *)(unsigned long) |
|---|
| 798 | 799 | bo[push[i].bo_index].user_priv; |
|---|
| 799 | 800 | |
|---|
| 800 | | - OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2); |
|---|
| 801 | | - OUT_RING(chan, 0); |
|---|
| 801 | + PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset); |
|---|
| 802 | + PUSH_DATA(chan->chan.push, 0); |
|---|
| 802 | 803 | } |
|---|
| 803 | 804 | } else { |
|---|
| 804 | | - ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); |
|---|
| 805 | + ret = PUSH_WAIT(chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); |
|---|
| 805 | 806 | if (ret) { |
|---|
| 806 | 807 | NV_PRINTK(err, cli, "jmp_space: %d\n", ret); |
|---|
| 807 | 808 | goto out; |
|---|
| .. | .. |
|---|
| 831 | 832 | push[i].length - 8) / 4, cmd); |
|---|
| 832 | 833 | } |
|---|
| 833 | 834 | |
|---|
| 834 | | - OUT_RING(chan, 0x20000000 | |
|---|
| 835 | | - (nvbo->bo.offset + push[i].offset)); |
|---|
| 836 | | - OUT_RING(chan, 0); |
|---|
| 835 | + PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset); |
|---|
| 836 | + PUSH_DATA(chan->chan.push, 0); |
|---|
| 837 | 837 | for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) |
|---|
| 838 | | - OUT_RING(chan, 0); |
|---|
| 838 | + PUSH_DATA(chan->chan.push, 0); |
|---|
| 839 | 839 | } |
|---|
| 840 | 840 | } |
|---|
| 841 | 841 | |
|---|
| .. | .. |
|---|
| 846 | 846 | goto out; |
|---|
| 847 | 847 | } |
|---|
| 848 | 848 | |
|---|
| 849 | + if (sync) { |
|---|
| 850 | + if (!(ret = nouveau_fence_wait(fence, false, false))) { |
|---|
| 851 | + if ((ret = dma_fence_get_status(&fence->base)) == 1) |
|---|
| 852 | + ret = 0; |
|---|
| 853 | + } |
|---|
| 854 | + } |
|---|
| 855 | + |
|---|
| 849 | 856 | out: |
|---|
| 850 | | - validate_fini(&op, fence, bo); |
|---|
| 857 | + validate_fini(&op, chan, fence, bo); |
|---|
| 851 | 858 | nouveau_fence_unref(&fence); |
|---|
| 852 | 859 | |
|---|
| 860 | + if (do_reloc) { |
|---|
| 861 | + struct drm_nouveau_gem_pushbuf_bo __user *upbbo = |
|---|
| 862 | + u64_to_user_ptr(req->buffers); |
|---|
| 863 | + |
|---|
| 864 | + for (i = 0; i < req->nr_buffers; i++) { |
|---|
| 865 | + if (bo[i].presumed.valid) |
|---|
| 866 | + continue; |
|---|
| 867 | + |
|---|
| 868 | + if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed, |
|---|
| 869 | + sizeof(bo[i].presumed))) { |
|---|
| 870 | + ret = -EFAULT; |
|---|
| 871 | + break; |
|---|
| 872 | + } |
|---|
| 873 | + } |
|---|
| 874 | + } |
|---|
| 853 | 875 | out_prevalid: |
|---|
| 876 | + if (!IS_ERR(reloc)) |
|---|
| 877 | + u_free(reloc); |
|---|
| 854 | 878 | u_free(bo); |
|---|
| 855 | 879 | u_free(push); |
|---|
| 856 | 880 | |
|---|
| .. | .. |
|---|
| 888 | 912 | return -ENOENT; |
|---|
| 889 | 913 | nvbo = nouveau_gem_object(gem); |
|---|
| 890 | 914 | |
|---|
| 891 | | - lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, |
|---|
| 915 | + lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true, |
|---|
| 892 | 916 | no_wait ? 0 : 30 * HZ); |
|---|
| 893 | 917 | if (!lret) |
|---|
| 894 | 918 | ret = -EBUSY; |
|---|
| .. | .. |
|---|
| 898 | 922 | ret = lret; |
|---|
| 899 | 923 | |
|---|
| 900 | 924 | nouveau_bo_sync_for_cpu(nvbo); |
|---|
| 901 | | - drm_gem_object_put_unlocked(gem); |
|---|
| 925 | + drm_gem_object_put(gem); |
|---|
| 902 | 926 | |
|---|
| 903 | 927 | return ret; |
|---|
| 904 | 928 | } |
|---|
| .. | .. |
|---|
| 917 | 941 | nvbo = nouveau_gem_object(gem); |
|---|
| 918 | 942 | |
|---|
| 919 | 943 | nouveau_bo_sync_for_device(nvbo); |
|---|
| 920 | | - drm_gem_object_put_unlocked(gem); |
|---|
| 944 | + drm_gem_object_put(gem); |
|---|
| 921 | 945 | return 0; |
|---|
| 922 | 946 | } |
|---|
| 923 | 947 | |
|---|
| .. | .. |
|---|
| 934 | 958 | return -ENOENT; |
|---|
| 935 | 959 | |
|---|
| 936 | 960 | ret = nouveau_gem_info(file_priv, gem, req); |
|---|
| 937 | | - drm_gem_object_put_unlocked(gem); |
|---|
| 961 | + drm_gem_object_put(gem); |
|---|
| 938 | 962 | return ret; |
|---|
| 939 | 963 | } |
|---|
| 940 | 964 | |
|---|