forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/gpu/drm/nouveau/nouveau_bo.c
....@@ -31,7 +31,7 @@
3131 #include <linux/swiotlb.h>
3232
3333 #include "nouveau_drv.h"
34
-#include "nouveau_dma.h"
34
+#include "nouveau_chan.h"
3535 #include "nouveau_fence.h"
3636
3737 #include "nouveau_bo.h"
....@@ -43,6 +43,9 @@
4343 #include <nvif/class.h>
4444 #include <nvif/if500b.h>
4545 #include <nvif/if900b.h>
46
+
47
+static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
48
+ struct ttm_resource *reg);
4649
4750 /*
4851 * NV10-NV40 tiling helpers
....@@ -136,10 +139,17 @@
136139 struct drm_device *dev = drm->dev;
137140 struct nouveau_bo *nvbo = nouveau_bo(bo);
138141
139
- if (unlikely(nvbo->gem.filp))
140
- DRM_ERROR("bo %p still attached to GEM object\n", bo);
141142 WARN_ON(nvbo->pin_refcnt > 0);
143
+ nouveau_bo_del_io_reserve_lru(bo);
142144 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
145
+
146
+ /*
147
+ * If nouveau_bo_new() allocated this buffer, the GEM object was never
148
+ * initialized, so don't attempt to release it.
149
+ */
150
+ if (bo->base.dev)
151
+ drm_gem_object_release(&bo->base);
152
+
143153 kfree(nvbo);
144154 }
145155
....@@ -152,8 +162,7 @@
152162 }
153163
154164 static void
155
-nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
156
- int *align, u64 *size)
165
+nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, u64 *size)
157166 {
158167 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
159168 struct nvif_device *device = &drm->client.device;
....@@ -185,31 +194,24 @@
185194 *size = roundup_64(*size, PAGE_SIZE);
186195 }
187196
188
-int
189
-nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
190
- uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
191
- struct sg_table *sg, struct reservation_object *robj,
192
- struct nouveau_bo **pnvbo)
197
+struct nouveau_bo *
198
+nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
199
+ u32 tile_mode, u32 tile_flags)
193200 {
194201 struct nouveau_drm *drm = cli->drm;
195202 struct nouveau_bo *nvbo;
196203 struct nvif_mmu *mmu = &cli->mmu;
197
- struct nvif_vmm *vmm = &cli->vmm.vmm;
198
- size_t acc_size;
199
- int type = ttm_bo_type_device;
200
- int ret, i, pi = -1;
204
+ struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
205
+ int i, pi = -1;
201206
202
- if (!size) {
203
- NV_WARN(drm, "skipped size %016llx\n", size);
204
- return -EINVAL;
207
+ if (!*size) {
208
+ NV_WARN(drm, "skipped size %016llx\n", *size);
209
+ return ERR_PTR(-EINVAL);
205210 }
206
-
207
- if (sg)
208
- type = ttm_bo_type_sg;
209211
210212 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
211213 if (!nvbo)
212
- return -ENOMEM;
214
+ return ERR_PTR(-ENOMEM);
213215 INIT_LIST_HEAD(&nvbo->head);
214216 INIT_LIST_HEAD(&nvbo->entry);
215217 INIT_LIST_HEAD(&nvbo->vma_list);
....@@ -219,7 +221,7 @@
219221 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
220222 * into in nouveau_gem_new().
221223 */
222
- if (flags & TTM_PL_FLAG_UNCACHED) {
224
+ if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) {
223225 /* Determine if we can get a cache-coherent map, forcing
224226 * uncached mapping if we can't.
225227 */
....@@ -231,7 +233,7 @@
231233 nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
232234 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
233235 kfree(nvbo);
234
- return -EINVAL;
236
+ return ERR_PTR(-EINVAL);
235237 }
236238
237239 nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
....@@ -241,7 +243,7 @@
241243 nvbo->comp = (tile_flags & 0x00030000) >> 16;
242244 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
243245 kfree(nvbo);
244
- return -EINVAL;
246
+ return ERR_PTR(-EINVAL);
245247 }
246248 } else {
247249 nvbo->zeta = (tile_flags & 0x00000007);
....@@ -259,9 +261,9 @@
259261 * Skip page sizes that can't support needed domains.
260262 */
261263 if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
262
- (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
264
+ (domain & NOUVEAU_GEM_DOMAIN_VRAM) && !vmm->page[i].vram)
263265 continue;
264
- if ((flags & TTM_PL_FLAG_TT) &&
266
+ if ((domain & NOUVEAU_GEM_DOMAIN_GART) &&
265267 (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
266268 continue;
267269
....@@ -273,12 +275,14 @@
273275 pi = i;
274276
275277 /* Stop once the buffer is larger than the current page size. */
276
- if (size >= 1ULL << vmm->page[i].shift)
278
+ if (*size >= 1ULL << vmm->page[i].shift)
277279 break;
278280 }
279281
280
- if (WARN_ON(pi < 0))
281
- return -EINVAL;
282
+ if (WARN_ON(pi < 0)) {
283
+ kfree(nvbo);
284
+ return ERR_PTR(-EINVAL);
285
+ }
282286
283287 /* Disable compression if suitable settings couldn't be found. */
284288 if (nvbo->comp && !vmm->page[pi].comp) {
....@@ -288,48 +292,101 @@
288292 }
289293 nvbo->page = vmm->page[pi].shift;
290294
291
- nouveau_bo_fixup_align(nvbo, flags, &align, &size);
295
+ nouveau_bo_fixup_align(nvbo, align, size);
296
+
297
+ return nvbo;
298
+}
299
+
300
+int
301
+nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
302
+ struct sg_table *sg, struct dma_resv *robj)
303
+{
304
+ int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
305
+ size_t acc_size;
306
+ int ret;
307
+
308
+ acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
309
+
292310 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
293
- nouveau_bo_placement_set(nvbo, flags, 0);
311
+ nouveau_bo_placement_set(nvbo, domain, 0);
312
+ INIT_LIST_HEAD(&nvbo->io_reserve_lru);
294313
295
- acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
296
- sizeof(struct nouveau_bo));
297
-
298
- ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
299
- type, &nvbo->placement,
300
- align >> PAGE_SHIFT, false, acc_size, sg,
301
- robj, nouveau_bo_del_ttm);
314
+ ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
315
+ &nvbo->placement, align >> PAGE_SHIFT, false,
316
+ acc_size, sg, robj, nouveau_bo_del_ttm);
302317 if (ret) {
303318 /* ttm will call nouveau_bo_del_ttm if it fails.. */
304319 return ret;
305320 }
321
+
322
+ return 0;
323
+}
324
+
325
+int
326
+nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
327
+ uint32_t domain, uint32_t tile_mode, uint32_t tile_flags,
328
+ struct sg_table *sg, struct dma_resv *robj,
329
+ struct nouveau_bo **pnvbo)
330
+{
331
+ struct nouveau_bo *nvbo;
332
+ int ret;
333
+
334
+ nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
335
+ tile_flags);
336
+ if (IS_ERR(nvbo))
337
+ return PTR_ERR(nvbo);
338
+
339
+ ret = nouveau_bo_init(nvbo, size, align, domain, sg, robj);
340
+ if (ret)
341
+ return ret;
306342
307343 *pnvbo = nvbo;
308344 return 0;
309345 }
310346
311347 static void
312
-set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
348
+set_placement_list(struct nouveau_drm *drm, struct ttm_place *pl, unsigned *n,
349
+ uint32_t domain, uint32_t flags)
313350 {
314351 *n = 0;
315352
316
- if (type & TTM_PL_FLAG_VRAM)
317
- pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
318
- if (type & TTM_PL_FLAG_TT)
319
- pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
320
- if (type & TTM_PL_FLAG_SYSTEM)
321
- pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
353
+ if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
354
+ struct nvif_mmu *mmu = &drm->client.mmu;
355
+
356
+ pl[*n].mem_type = TTM_PL_VRAM;
357
+ pl[*n].flags = flags & ~TTM_PL_FLAG_CACHED;
358
+
359
+ /* Some BARs do not support being ioremapped WC */
360
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
361
+ mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED)
362
+ pl[*n].flags &= ~TTM_PL_FLAG_WC;
363
+
364
+ (*n)++;
365
+ }
366
+ if (domain & NOUVEAU_GEM_DOMAIN_GART) {
367
+ pl[*n].mem_type = TTM_PL_TT;
368
+ pl[*n].flags = flags;
369
+
370
+ if (drm->agp.bridge)
371
+ pl[*n].flags &= ~TTM_PL_FLAG_CACHED;
372
+
373
+ (*n)++;
374
+ }
375
+ if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
376
+ pl[*n].mem_type = TTM_PL_SYSTEM;
377
+ pl[(*n)++].flags = flags;
378
+ }
322379 }
323380
324381 static void
325
-set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
382
+set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
326383 {
327384 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
328385 u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
329386 unsigned i, fpfn, lpfn;
330387
331388 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
332
- nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
389
+ nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
333390 nvbo->bo.mem.num_pages < vram_pages / 4) {
334391 /*
335392 * Make sure that the color and depth buffers are handled
....@@ -356,26 +413,28 @@
356413 }
357414
358415 void
359
-nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
416
+nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
417
+ uint32_t busy)
360418 {
419
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
361420 struct ttm_placement *pl = &nvbo->placement;
362421 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
363422 TTM_PL_MASK_CACHING) |
364423 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
365424
366425 pl->placement = nvbo->placements;
367
- set_placement_list(nvbo->placements, &pl->num_placement,
368
- type, flags);
426
+ set_placement_list(drm, nvbo->placements, &pl->num_placement,
427
+ domain, flags);
369428
370429 pl->busy_placement = nvbo->busy_placements;
371
- set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
372
- type | busy, flags);
430
+ set_placement_list(drm, nvbo->busy_placements, &pl->num_busy_placement,
431
+ domain | busy, flags);
373432
374
- set_placement_range(nvbo, type);
433
+ set_placement_range(nvbo, domain);
375434 }
376435
377436 int
378
-nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
437
+nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
379438 {
380439 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
381440 struct ttm_buffer_object *bo = &nvbo->bo;
....@@ -387,7 +446,7 @@
387446 return ret;
388447
389448 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
390
- memtype == TTM_PL_FLAG_VRAM && contig) {
449
+ domain == NOUVEAU_GEM_DOMAIN_VRAM && contig) {
391450 if (!nvbo->contig) {
392451 nvbo->contig = true;
393452 force = true;
....@@ -396,10 +455,22 @@
396455 }
397456
398457 if (nvbo->pin_refcnt) {
399
- if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
458
+ bool error = evict;
459
+
460
+ switch (bo->mem.mem_type) {
461
+ case TTM_PL_VRAM:
462
+ error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
463
+ break;
464
+ case TTM_PL_TT:
465
+ error |= !(domain & NOUVEAU_GEM_DOMAIN_GART);
466
+ default:
467
+ break;
468
+ }
469
+
470
+ if (error) {
400471 NV_ERROR(drm, "bo %p pinned elsewhere: "
401472 "0x%08x vs 0x%08x\n", bo,
402
- 1 << bo->mem.mem_type, memtype);
473
+ bo->mem.mem_type, domain);
403474 ret = -EBUSY;
404475 }
405476 nvbo->pin_refcnt++;
....@@ -407,14 +478,14 @@
407478 }
408479
409480 if (evict) {
410
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
481
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
411482 ret = nouveau_bo_validate(nvbo, false, false);
412483 if (ret)
413484 goto out;
414485 }
415486
416487 nvbo->pin_refcnt++;
417
- nouveau_bo_placement_set(nvbo, memtype, 0);
488
+ nouveau_bo_placement_set(nvbo, domain, 0);
418489
419490 /* drop pin_refcnt temporarily, so we don't trip the assertion
420491 * in nouveau_bo_move() that makes sure we're not trying to
....@@ -460,7 +531,16 @@
460531 if (ref)
461532 goto out;
462533
463
- nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
534
+ switch (bo->mem.mem_type) {
535
+ case TTM_PL_VRAM:
536
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
537
+ break;
538
+ case TTM_PL_TT:
539
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
540
+ break;
541
+ default:
542
+ break;
543
+ }
464544
465545 ret = nouveau_bo_validate(nvbo, false, false);
466546 if (ret == 0) {
....@@ -544,6 +624,26 @@
544624 PAGE_SIZE, DMA_FROM_DEVICE);
545625 }
546626
627
+void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo)
628
+{
629
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
630
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
631
+
632
+ mutex_lock(&drm->ttm.io_reserve_mutex);
633
+ list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru);
634
+ mutex_unlock(&drm->ttm.io_reserve_mutex);
635
+}
636
+
637
+void nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo)
638
+{
639
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
640
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
641
+
642
+ mutex_lock(&drm->ttm.io_reserve_mutex);
643
+ list_del_init(&nvbo->io_reserve_lru);
644
+ mutex_unlock(&drm->ttm.io_reserve_mutex);
645
+}
646
+
547647 int
548648 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
549649 bool no_wait_gpu)
....@@ -617,73 +717,33 @@
617717 }
618718
619719 static int
620
-nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
720
+nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
721
+ struct ttm_resource *reg)
621722 {
622
- /* We'll do this from user space. */
623
- return 0;
723
+#if IS_ENABLED(CONFIG_AGP)
724
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
725
+#endif
726
+ if (!reg)
727
+ return -EINVAL;
728
+#if IS_ENABLED(CONFIG_AGP)
729
+ if (drm->agp.bridge)
730
+ return ttm_agp_bind(ttm, reg);
731
+#endif
732
+ return nouveau_sgdma_bind(bdev, ttm, reg);
624733 }
625734
626
-static int
627
-nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
628
- struct ttm_mem_type_manager *man)
735
+static void
736
+nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
629737 {
738
+#if IS_ENABLED(CONFIG_AGP)
630739 struct nouveau_drm *drm = nouveau_bdev(bdev);
631
- struct nvif_mmu *mmu = &drm->client.mmu;
632740
633
- switch (type) {
634
- case TTM_PL_SYSTEM:
635
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
636
- man->available_caching = TTM_PL_MASK_CACHING;
637
- man->default_caching = TTM_PL_FLAG_CACHED;
638
- break;
639
- case TTM_PL_VRAM:
640
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
641
- TTM_MEMTYPE_FLAG_MAPPABLE;
642
- man->available_caching = TTM_PL_FLAG_UNCACHED |
643
- TTM_PL_FLAG_WC;
644
- man->default_caching = TTM_PL_FLAG_WC;
645
-
646
- if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
647
- /* Some BARs do not support being ioremapped WC */
648
- const u8 type = mmu->type[drm->ttm.type_vram].type;
649
- if (type & NVIF_MEM_UNCACHED) {
650
- man->available_caching = TTM_PL_FLAG_UNCACHED;
651
- man->default_caching = TTM_PL_FLAG_UNCACHED;
652
- }
653
-
654
- man->func = &nouveau_vram_manager;
655
- man->io_reserve_fastpath = false;
656
- man->use_io_reserve_lru = true;
657
- } else {
658
- man->func = &ttm_bo_manager_func;
659
- }
660
- break;
661
- case TTM_PL_TT:
662
- if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
663
- man->func = &nouveau_gart_manager;
664
- else
665
- if (!drm->agp.bridge)
666
- man->func = &nv04_gart_manager;
667
- else
668
- man->func = &ttm_bo_manager_func;
669
-
670
- if (drm->agp.bridge) {
671
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
672
- man->available_caching = TTM_PL_FLAG_UNCACHED |
673
- TTM_PL_FLAG_WC;
674
- man->default_caching = TTM_PL_FLAG_WC;
675
- } else {
676
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
677
- TTM_MEMTYPE_FLAG_CMA;
678
- man->available_caching = TTM_PL_MASK_CACHING;
679
- man->default_caching = TTM_PL_FLAG_CACHED;
680
- }
681
-
682
- break;
683
- default:
684
- return -EINVAL;
741
+ if (drm->agp.bridge) {
742
+ ttm_agp_unbind(ttm);
743
+ return;
685744 }
686
- return 0;
745
+#endif
746
+ nouveau_sgdma_unbind(bdev, ttm);
687747 }
688748
689749 static void
....@@ -693,374 +753,20 @@
693753
694754 switch (bo->mem.mem_type) {
695755 case TTM_PL_VRAM:
696
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
697
- TTM_PL_FLAG_SYSTEM);
756
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
757
+ NOUVEAU_GEM_DOMAIN_CPU);
698758 break;
699759 default:
700
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
760
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_CPU, 0);
701761 break;
702762 }
703763
704764 *pl = nvbo->placement;
705765 }
706766
707
-
708
-static int
709
-nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
710
-{
711
- int ret = RING_SPACE(chan, 2);
712
- if (ret == 0) {
713
- BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
714
- OUT_RING (chan, handle & 0x0000ffff);
715
- FIRE_RING (chan);
716
- }
717
- return ret;
718
-}
719
-
720
-static int
721
-nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
722
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
723
-{
724
- struct nouveau_mem *mem = nouveau_mem(old_reg);
725
- int ret = RING_SPACE(chan, 10);
726
- if (ret == 0) {
727
- BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
728
- OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
729
- OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
730
- OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
731
- OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
732
- OUT_RING (chan, PAGE_SIZE);
733
- OUT_RING (chan, PAGE_SIZE);
734
- OUT_RING (chan, PAGE_SIZE);
735
- OUT_RING (chan, new_reg->num_pages);
736
- BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
737
- }
738
- return ret;
739
-}
740
-
741
-static int
742
-nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
743
-{
744
- int ret = RING_SPACE(chan, 2);
745
- if (ret == 0) {
746
- BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
747
- OUT_RING (chan, handle);
748
- }
749
- return ret;
750
-}
751
-
752
-static int
753
-nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
754
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
755
-{
756
- struct nouveau_mem *mem = nouveau_mem(old_reg);
757
- u64 src_offset = mem->vma[0].addr;
758
- u64 dst_offset = mem->vma[1].addr;
759
- u32 page_count = new_reg->num_pages;
760
- int ret;
761
-
762
- page_count = new_reg->num_pages;
763
- while (page_count) {
764
- int line_count = (page_count > 8191) ? 8191 : page_count;
765
-
766
- ret = RING_SPACE(chan, 11);
767
- if (ret)
768
- return ret;
769
-
770
- BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
771
- OUT_RING (chan, upper_32_bits(src_offset));
772
- OUT_RING (chan, lower_32_bits(src_offset));
773
- OUT_RING (chan, upper_32_bits(dst_offset));
774
- OUT_RING (chan, lower_32_bits(dst_offset));
775
- OUT_RING (chan, PAGE_SIZE);
776
- OUT_RING (chan, PAGE_SIZE);
777
- OUT_RING (chan, PAGE_SIZE);
778
- OUT_RING (chan, line_count);
779
- BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
780
- OUT_RING (chan, 0x00000110);
781
-
782
- page_count -= line_count;
783
- src_offset += (PAGE_SIZE * line_count);
784
- dst_offset += (PAGE_SIZE * line_count);
785
- }
786
-
787
- return 0;
788
-}
789
-
790
-static int
791
-nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
792
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
793
-{
794
- struct nouveau_mem *mem = nouveau_mem(old_reg);
795
- u64 src_offset = mem->vma[0].addr;
796
- u64 dst_offset = mem->vma[1].addr;
797
- u32 page_count = new_reg->num_pages;
798
- int ret;
799
-
800
- page_count = new_reg->num_pages;
801
- while (page_count) {
802
- int line_count = (page_count > 2047) ? 2047 : page_count;
803
-
804
- ret = RING_SPACE(chan, 12);
805
- if (ret)
806
- return ret;
807
-
808
- BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
809
- OUT_RING (chan, upper_32_bits(dst_offset));
810
- OUT_RING (chan, lower_32_bits(dst_offset));
811
- BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
812
- OUT_RING (chan, upper_32_bits(src_offset));
813
- OUT_RING (chan, lower_32_bits(src_offset));
814
- OUT_RING (chan, PAGE_SIZE); /* src_pitch */
815
- OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
816
- OUT_RING (chan, PAGE_SIZE); /* line_length */
817
- OUT_RING (chan, line_count);
818
- BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
819
- OUT_RING (chan, 0x00100110);
820
-
821
- page_count -= line_count;
822
- src_offset += (PAGE_SIZE * line_count);
823
- dst_offset += (PAGE_SIZE * line_count);
824
- }
825
-
826
- return 0;
827
-}
828
-
829
-static int
830
-nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
831
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
832
-{
833
- struct nouveau_mem *mem = nouveau_mem(old_reg);
834
- u64 src_offset = mem->vma[0].addr;
835
- u64 dst_offset = mem->vma[1].addr;
836
- u32 page_count = new_reg->num_pages;
837
- int ret;
838
-
839
- page_count = new_reg->num_pages;
840
- while (page_count) {
841
- int line_count = (page_count > 8191) ? 8191 : page_count;
842
-
843
- ret = RING_SPACE(chan, 11);
844
- if (ret)
845
- return ret;
846
-
847
- BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
848
- OUT_RING (chan, upper_32_bits(src_offset));
849
- OUT_RING (chan, lower_32_bits(src_offset));
850
- OUT_RING (chan, upper_32_bits(dst_offset));
851
- OUT_RING (chan, lower_32_bits(dst_offset));
852
- OUT_RING (chan, PAGE_SIZE);
853
- OUT_RING (chan, PAGE_SIZE);
854
- OUT_RING (chan, PAGE_SIZE);
855
- OUT_RING (chan, line_count);
856
- BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
857
- OUT_RING (chan, 0x00000110);
858
-
859
- page_count -= line_count;
860
- src_offset += (PAGE_SIZE * line_count);
861
- dst_offset += (PAGE_SIZE * line_count);
862
- }
863
-
864
- return 0;
865
-}
866
-
867
-static int
868
-nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
869
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
870
-{
871
- struct nouveau_mem *mem = nouveau_mem(old_reg);
872
- int ret = RING_SPACE(chan, 7);
873
- if (ret == 0) {
874
- BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
875
- OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
876
- OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
877
- OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
878
- OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
879
- OUT_RING (chan, 0x00000000 /* COPY */);
880
- OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
881
- }
882
- return ret;
883
-}
884
-
885
-static int
886
-nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
887
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
888
-{
889
- struct nouveau_mem *mem = nouveau_mem(old_reg);
890
- int ret = RING_SPACE(chan, 7);
891
- if (ret == 0) {
892
- BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
893
- OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
894
- OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
895
- OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
896
- OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
897
- OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
898
- OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
899
- }
900
- return ret;
901
-}
902
-
903
-static int
904
-nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
905
-{
906
- int ret = RING_SPACE(chan, 6);
907
- if (ret == 0) {
908
- BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
909
- OUT_RING (chan, handle);
910
- BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
911
- OUT_RING (chan, chan->drm->ntfy.handle);
912
- OUT_RING (chan, chan->vram.handle);
913
- OUT_RING (chan, chan->vram.handle);
914
- }
915
-
916
- return ret;
917
-}
918
-
919
-static int
920
-nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
921
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
922
-{
923
- struct nouveau_mem *mem = nouveau_mem(old_reg);
924
- u64 length = (new_reg->num_pages << PAGE_SHIFT);
925
- u64 src_offset = mem->vma[0].addr;
926
- u64 dst_offset = mem->vma[1].addr;
927
- int src_tiled = !!mem->kind;
928
- int dst_tiled = !!nouveau_mem(new_reg)->kind;
929
- int ret;
930
-
931
- while (length) {
932
- u32 amount, stride, height;
933
-
934
- ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
935
- if (ret)
936
- return ret;
937
-
938
- amount = min(length, (u64)(4 * 1024 * 1024));
939
- stride = 16 * 4;
940
- height = amount / stride;
941
-
942
- if (src_tiled) {
943
- BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
944
- OUT_RING (chan, 0);
945
- OUT_RING (chan, 0);
946
- OUT_RING (chan, stride);
947
- OUT_RING (chan, height);
948
- OUT_RING (chan, 1);
949
- OUT_RING (chan, 0);
950
- OUT_RING (chan, 0);
951
- } else {
952
- BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
953
- OUT_RING (chan, 1);
954
- }
955
- if (dst_tiled) {
956
- BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
957
- OUT_RING (chan, 0);
958
- OUT_RING (chan, 0);
959
- OUT_RING (chan, stride);
960
- OUT_RING (chan, height);
961
- OUT_RING (chan, 1);
962
- OUT_RING (chan, 0);
963
- OUT_RING (chan, 0);
964
- } else {
965
- BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
966
- OUT_RING (chan, 1);
967
- }
968
-
969
- BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
970
- OUT_RING (chan, upper_32_bits(src_offset));
971
- OUT_RING (chan, upper_32_bits(dst_offset));
972
- BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
973
- OUT_RING (chan, lower_32_bits(src_offset));
974
- OUT_RING (chan, lower_32_bits(dst_offset));
975
- OUT_RING (chan, stride);
976
- OUT_RING (chan, stride);
977
- OUT_RING (chan, stride);
978
- OUT_RING (chan, height);
979
- OUT_RING (chan, 0x00000101);
980
- OUT_RING (chan, 0x00000000);
981
- BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
982
- OUT_RING (chan, 0);
983
-
984
- length -= amount;
985
- src_offset += amount;
986
- dst_offset += amount;
987
- }
988
-
989
- return 0;
990
-}
991
-
992
-static int
993
-nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
994
-{
995
- int ret = RING_SPACE(chan, 4);
996
- if (ret == 0) {
997
- BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
998
- OUT_RING (chan, handle);
999
- BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
1000
- OUT_RING (chan, chan->drm->ntfy.handle);
1001
- }
1002
-
1003
- return ret;
1004
-}
1005
-
1006
-static inline uint32_t
1007
-nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
1008
- struct nouveau_channel *chan, struct ttm_mem_reg *reg)
1009
-{
1010
- if (reg->mem_type == TTM_PL_TT)
1011
- return NvDmaTT;
1012
- return chan->vram.handle;
1013
-}
1014
-
1015
-static int
1016
-nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
1017
- struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
1018
-{
1019
- u32 src_offset = old_reg->start << PAGE_SHIFT;
1020
- u32 dst_offset = new_reg->start << PAGE_SHIFT;
1021
- u32 page_count = new_reg->num_pages;
1022
- int ret;
1023
-
1024
- ret = RING_SPACE(chan, 3);
1025
- if (ret)
1026
- return ret;
1027
-
1028
- BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
1029
- OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg));
1030
- OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg));
1031
-
1032
- page_count = new_reg->num_pages;
1033
- while (page_count) {
1034
- int line_count = (page_count > 2047) ? 2047 : page_count;
1035
-
1036
- ret = RING_SPACE(chan, 11);
1037
- if (ret)
1038
- return ret;
1039
-
1040
- BEGIN_NV04(chan, NvSubCopy,
1041
- NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
1042
- OUT_RING (chan, src_offset);
1043
- OUT_RING (chan, dst_offset);
1044
- OUT_RING (chan, PAGE_SIZE); /* src_pitch */
1045
- OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
1046
- OUT_RING (chan, PAGE_SIZE); /* line_length */
1047
- OUT_RING (chan, line_count);
1048
- OUT_RING (chan, 0x00000101);
1049
- OUT_RING (chan, 0x00000000);
1050
- BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
1051
- OUT_RING (chan, 0);
1052
-
1053
- page_count -= line_count;
1054
- src_offset += (PAGE_SIZE * line_count);
1055
- dst_offset += (PAGE_SIZE * line_count);
1056
- }
1057
-
1058
- return 0;
1059
-}
1060
-
1061767 static int
1062768 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
1063
- struct ttm_mem_reg *reg)
769
+ struct ttm_resource *reg)
1064770 {
1065771 struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
1066772 struct nouveau_mem *new_mem = nouveau_mem(reg);
....@@ -1092,7 +798,7 @@
1092798
1093799 static int
1094800 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1095
- bool no_wait_gpu, struct ttm_mem_reg *new_reg)
801
+ bool no_wait_gpu, struct ttm_resource *new_reg)
1096802 {
1097803 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1098804 struct nouveau_channel *chan = drm->ttm.chan;
....@@ -1102,7 +808,7 @@
1102808
1103809 /* create temporary vmas for the transfer and attach them to the
1104810 * old nvkm_mem node, these will get cleaned up after ttm has
1105
- * destroyed the ttm_mem_reg
811
+ * destroyed the ttm_resource
1106812 */
1107813 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1108814 ret = nouveau_bo_move_prep(drm, bo, new_reg);
....@@ -1119,7 +825,7 @@
1119825 if (ret == 0) {
1120826 ret = ttm_bo_move_accel_cleanup(bo,
1121827 &fence->base,
1122
- evict,
828
+ evict, false,
1123829 new_reg);
1124830 nouveau_fence_unref(&fence);
1125831 }
....@@ -1132,15 +838,17 @@
1132838 void
1133839 nouveau_bo_move_init(struct nouveau_drm *drm)
1134840 {
1135
- static const struct {
841
+ static const struct _method_table {
1136842 const char *name;
1137843 int engine;
1138844 s32 oclass;
1139845 int (*exec)(struct nouveau_channel *,
1140846 struct ttm_buffer_object *,
1141
- struct ttm_mem_reg *, struct ttm_mem_reg *);
847
+ struct ttm_resource *, struct ttm_resource *);
1142848 int (*init)(struct nouveau_channel *, u32 handle);
1143849 } _methods[] = {
850
+ { "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
851
+ { "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
1144852 { "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
1145853 { "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
1146854 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
....@@ -1159,8 +867,8 @@
1159867 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1160868 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1161869 {},
1162
- { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1163
- }, *mthd = _methods;
870
+ };
871
+ const struct _method_table *mthd = _methods;
1164872 const char *name = "CPU";
1165873 int ret;
1166874
....@@ -1174,14 +882,14 @@
1174882 if (chan == NULL)
1175883 continue;
1176884
1177
- ret = nvif_object_init(&chan->user,
885
+ ret = nvif_object_ctor(&chan->user, "ttmBoMove",
1178886 mthd->oclass | (mthd->engine << 16),
1179887 mthd->oclass, NULL, 0,
1180888 &drm->ttm.copy);
1181889 if (ret == 0) {
1182890 ret = mthd->init(chan, drm->ttm.copy.handle);
1183891 if (ret) {
1184
- nvif_object_fini(&drm->ttm.copy);
892
+ nvif_object_dtor(&drm->ttm.copy);
1185893 continue;
1186894 }
1187895
....@@ -1197,16 +905,17 @@
1197905
1198906 static int
1199907 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1200
- bool no_wait_gpu, struct ttm_mem_reg *new_reg)
908
+ bool no_wait_gpu, struct ttm_resource *new_reg)
1201909 {
1202910 struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
1203911 struct ttm_place placement_memtype = {
1204912 .fpfn = 0,
1205913 .lpfn = 0,
1206
- .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
914
+ .mem_type = TTM_PL_TT,
915
+ .flags = TTM_PL_MASK_CACHING
1207916 };
1208917 struct ttm_placement placement;
1209
- struct ttm_mem_reg tmp_reg;
918
+ struct ttm_resource tmp_reg;
1210919 int ret;
1211920
1212921 placement.num_placement = placement.num_busy_placement = 1;
....@@ -1218,7 +927,11 @@
1218927 if (ret)
1219928 return ret;
1220929
1221
- ret = ttm_tt_bind(bo->ttm, &tmp_reg, &ctx);
930
+ ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
931
+ if (ret)
932
+ goto out;
933
+
934
+ ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg);
1222935 if (ret)
1223936 goto out;
1224937
....@@ -1228,22 +941,23 @@
1228941
1229942 ret = ttm_bo_move_ttm(bo, &ctx, new_reg);
1230943 out:
1231
- ttm_bo_mem_put(bo, &tmp_reg);
944
+ ttm_resource_free(bo, &tmp_reg);
1232945 return ret;
1233946 }
1234947
1235948 static int
1236949 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1237
- bool no_wait_gpu, struct ttm_mem_reg *new_reg)
950
+ bool no_wait_gpu, struct ttm_resource *new_reg)
1238951 {
1239952 struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
1240953 struct ttm_place placement_memtype = {
1241954 .fpfn = 0,
1242955 .lpfn = 0,
1243
- .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
956
+ .mem_type = TTM_PL_TT,
957
+ .flags = TTM_PL_MASK_CACHING
1244958 };
1245959 struct ttm_placement placement;
1246
- struct ttm_mem_reg tmp_reg;
960
+ struct ttm_resource tmp_reg;
1247961 int ret;
1248962
1249963 placement.num_placement = placement.num_busy_placement = 1;
....@@ -1264,13 +978,13 @@
1264978 goto out;
1265979
1266980 out:
1267
- ttm_bo_mem_put(bo, &tmp_reg);
981
+ ttm_resource_free(bo, &tmp_reg);
1268982 return ret;
1269983 }
1270984
1271985 static void
1272986 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
1273
- struct ttm_mem_reg *new_reg)
987
+ struct ttm_resource *new_reg)
1274988 {
1275989 struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
1276990 struct nouveau_bo *nvbo = nouveau_bo(bo);
....@@ -1279,6 +993,8 @@
1279993 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1280994 if (bo->destroy != nouveau_bo_del_ttm)
1281995 return;
996
+
997
+ nouveau_bo_del_io_reserve_lru(bo);
1282998
1283999 if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
12841000 mem->mem.page == nvbo->page) {
....@@ -1291,10 +1007,18 @@
12911007 nouveau_vma_unmap(vma);
12921008 }
12931009 }
1010
+
1011
+ if (new_reg) {
1012
+ if (new_reg->mm_node)
1013
+ nvbo->offset = (new_reg->start << PAGE_SHIFT);
1014
+ else
1015
+ nvbo->offset = 0;
1016
+ }
1017
+
12941018 }
12951019
12961020 static int
1297
-nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
1021
+nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg,
12981022 struct nouveau_drm_tile **new_tile)
12991023 {
13001024 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
....@@ -1321,7 +1045,7 @@
13211045 {
13221046 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
13231047 struct drm_device *dev = drm->dev;
1324
- struct dma_fence *fence = reservation_object_get_excl(bo->resv);
1048
+ struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
13251049
13261050 nv10_bo_put_tile_region(dev, *old_tile, fence);
13271051 *old_tile = new_tile;
....@@ -1330,11 +1054,11 @@
13301054 static int
13311055 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
13321056 struct ttm_operation_ctx *ctx,
1333
- struct ttm_mem_reg *new_reg)
1057
+ struct ttm_resource *new_reg)
13341058 {
13351059 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
13361060 struct nouveau_bo *nvbo = nouveau_bo(bo);
1337
- struct ttm_mem_reg *old_reg = &bo->mem;
1061
+ struct ttm_resource *old_reg = &bo->mem;
13381062 struct nouveau_drm_tile *new_tile = NULL;
13391063 int ret = 0;
13401064
....@@ -1353,9 +1077,7 @@
13531077
13541078 /* Fake bo copy. */
13551079 if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1356
- BUG_ON(bo->mem.mm_node != NULL);
1357
- bo->mem = *new_reg;
1358
- new_reg->mm_node = NULL;
1080
+ ttm_bo_move_null(bo, new_reg);
13591081 goto out;
13601082 }
13611083
....@@ -1398,44 +1120,64 @@
13981120 {
13991121 struct nouveau_bo *nvbo = nouveau_bo(bo);
14001122
1401
- return drm_vma_node_verify_access(&nvbo->gem.vma_node,
1123
+ return drm_vma_node_verify_access(&nvbo->bo.base.vma_node,
14021124 filp->private_data);
14031125 }
14041126
1405
-static int
1406
-nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1127
+static void
1128
+nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
1129
+ struct ttm_resource *reg)
14071130 {
1408
- struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
1131
+ struct nouveau_mem *mem = nouveau_mem(reg);
1132
+
1133
+ if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1134
+ switch (reg->mem_type) {
1135
+ case TTM_PL_TT:
1136
+ if (mem->kind)
1137
+ nvif_object_unmap_handle(&mem->mem.object);
1138
+ break;
1139
+ case TTM_PL_VRAM:
1140
+ nvif_object_unmap_handle(&mem->mem.object);
1141
+ break;
1142
+ default:
1143
+ break;
1144
+ }
1145
+ }
1146
+}
1147
+
1148
+static int
1149
+nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
1150
+{
14091151 struct nouveau_drm *drm = nouveau_bdev(bdev);
14101152 struct nvkm_device *device = nvxx_device(&drm->client.device);
14111153 struct nouveau_mem *mem = nouveau_mem(reg);
1154
+ int ret;
14121155
1413
- reg->bus.addr = NULL;
1414
- reg->bus.offset = 0;
1415
- reg->bus.size = reg->num_pages << PAGE_SHIFT;
1416
- reg->bus.base = 0;
1417
- reg->bus.is_iomem = false;
1418
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1419
- return -EINVAL;
1156
+ mutex_lock(&drm->ttm.io_reserve_mutex);
1157
+retry:
14201158 switch (reg->mem_type) {
14211159 case TTM_PL_SYSTEM:
14221160 /* System memory */
1423
- return 0;
1161
+ ret = 0;
1162
+ goto out;
14241163 case TTM_PL_TT:
14251164 #if IS_ENABLED(CONFIG_AGP)
14261165 if (drm->agp.bridge) {
1427
- reg->bus.offset = reg->start << PAGE_SHIFT;
1428
- reg->bus.base = drm->agp.base;
1166
+ reg->bus.offset = (reg->start << PAGE_SHIFT) +
1167
+ drm->agp.base;
14291168 reg->bus.is_iomem = !drm->agp.cma;
14301169 }
14311170 #endif
1432
- if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind)
1171
+ if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 ||
1172
+ !mem->kind) {
14331173 /* untiled */
1174
+ ret = 0;
14341175 break;
1435
- /* fallthrough, tiled memory */
1176
+ }
1177
+ fallthrough; /* tiled memory */
14361178 case TTM_PL_VRAM:
1437
- reg->bus.offset = reg->start << PAGE_SHIFT;
1438
- reg->bus.base = device->func->resource_addr(device, 1);
1179
+ reg->bus.offset = (reg->start << PAGE_SHIFT) +
1180
+ device->func->resource_addr(device, 1);
14391181 reg->bus.is_iomem = true;
14401182 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
14411183 union {
....@@ -1444,7 +1186,6 @@
14441186 } args;
14451187 u64 handle, length;
14461188 u32 argc = 0;
1447
- int ret;
14481189
14491190 switch (mem->mem.object.oclass) {
14501191 case NVIF_CLASS_MEM_NV50:
....@@ -1468,38 +1209,48 @@
14681209 ret = nvif_object_map_handle(&mem->mem.object,
14691210 &args, argc,
14701211 &handle, &length);
1471
- if (ret != 1)
1472
- return ret ? ret : -EINVAL;
1212
+ if (ret != 1) {
1213
+ if (WARN_ON(ret == 0))
1214
+ ret = -EINVAL;
1215
+ goto out;
1216
+ }
14731217
1474
- reg->bus.base = 0;
14751218 reg->bus.offset = handle;
14761219 }
1220
+ ret = 0;
14771221 break;
14781222 default:
1479
- return -EINVAL;
1223
+ ret = -EINVAL;
14801224 }
1481
- return 0;
1225
+
1226
+out:
1227
+ if (ret == -ENOSPC) {
1228
+ struct nouveau_bo *nvbo;
1229
+
1230
+ nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru,
1231
+ typeof(*nvbo),
1232
+ io_reserve_lru);
1233
+ if (nvbo) {
1234
+ list_del_init(&nvbo->io_reserve_lru);
1235
+ drm_vma_node_unmap(&nvbo->bo.base.vma_node,
1236
+ bdev->dev_mapping);
1237
+ nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
1238
+ goto retry;
1239
+ }
1240
+
1241
+ }
1242
+ mutex_unlock(&drm->ttm.io_reserve_mutex);
1243
+ return ret;
14821244 }
14831245
14841246 static void
1485
-nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1247
+nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
14861248 {
14871249 struct nouveau_drm *drm = nouveau_bdev(bdev);
1488
- struct nouveau_mem *mem = nouveau_mem(reg);
14891250
1490
- if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1491
- switch (reg->mem_type) {
1492
- case TTM_PL_TT:
1493
- if (mem->kind)
1494
- nvif_object_unmap_handle(&mem->mem.object);
1495
- break;
1496
- case TTM_PL_VRAM:
1497
- nvif_object_unmap_handle(&mem->mem.object);
1498
- break;
1499
- default:
1500
- break;
1501
- }
1502
- }
1251
+ mutex_lock(&drm->ttm.io_reserve_mutex);
1252
+ nouveau_ttm_io_mem_free_locked(drm, reg);
1253
+ mutex_unlock(&drm->ttm.io_reserve_mutex);
15031254 }
15041255
15051256 static int
....@@ -1520,7 +1271,8 @@
15201271 return 0;
15211272
15221273 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1523
- nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1274
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
1275
+ 0);
15241276
15251277 ret = nouveau_bo_validate(nvbo, false, false);
15261278 if (ret)
....@@ -1544,37 +1296,36 @@
15441296 nvbo->busy_placements[i].lpfn = mappable;
15451297 }
15461298
1547
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1299
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
15481300 return nouveau_bo_validate(nvbo, false, false);
15491301 }
15501302
15511303 static int
1552
-nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
1304
+nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
1305
+ struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
15531306 {
15541307 struct ttm_dma_tt *ttm_dma = (void *)ttm;
15551308 struct nouveau_drm *drm;
15561309 struct device *dev;
1557
- unsigned i;
1558
- int r;
15591310 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
15601311
1561
- if (ttm->state != tt_unpopulated)
1312
+ if (ttm_tt_is_populated(ttm))
15621313 return 0;
15631314
15641315 if (slave && ttm->sg) {
15651316 /* make userspace faulting work */
15661317 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
15671318 ttm_dma->dma_address, ttm->num_pages);
1568
- ttm->state = tt_unbound;
1319
+ ttm_tt_set_populated(ttm);
15691320 return 0;
15701321 }
15711322
1572
- drm = nouveau_bdev(ttm->bdev);
1323
+ drm = nouveau_bdev(bdev);
15731324 dev = drm->dev->dev;
15741325
15751326 #if IS_ENABLED(CONFIG_AGP)
15761327 if (drm->agp.bridge) {
1577
- return ttm_agp_tt_populate(ttm, ctx);
1328
+ return ttm_pool_populate(ttm, ctx);
15781329 }
15791330 #endif
15801331
....@@ -1583,51 +1334,27 @@
15831334 return ttm_dma_populate((void *)ttm, dev, ctx);
15841335 }
15851336 #endif
1586
-
1587
- r = ttm_pool_populate(ttm, ctx);
1588
- if (r) {
1589
- return r;
1590
- }
1591
-
1592
- for (i = 0; i < ttm->num_pages; i++) {
1593
- dma_addr_t addr;
1594
-
1595
- addr = dma_map_page(dev, ttm->pages[i], 0, PAGE_SIZE,
1596
- DMA_BIDIRECTIONAL);
1597
-
1598
- if (dma_mapping_error(dev, addr)) {
1599
- while (i--) {
1600
- dma_unmap_page(dev, ttm_dma->dma_address[i],
1601
- PAGE_SIZE, DMA_BIDIRECTIONAL);
1602
- ttm_dma->dma_address[i] = 0;
1603
- }
1604
- ttm_pool_unpopulate(ttm);
1605
- return -EFAULT;
1606
- }
1607
-
1608
- ttm_dma->dma_address[i] = addr;
1609
- }
1610
- return 0;
1337
+ return ttm_populate_and_map_pages(dev, ttm_dma, ctx);
16111338 }
16121339
16131340 static void
1614
-nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1341
+nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
1342
+ struct ttm_tt *ttm)
16151343 {
16161344 struct ttm_dma_tt *ttm_dma = (void *)ttm;
16171345 struct nouveau_drm *drm;
16181346 struct device *dev;
1619
- unsigned i;
16201347 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
16211348
16221349 if (slave)
16231350 return;
16241351
1625
- drm = nouveau_bdev(ttm->bdev);
1352
+ drm = nouveau_bdev(bdev);
16261353 dev = drm->dev->dev;
16271354
16281355 #if IS_ENABLED(CONFIG_AGP)
16291356 if (drm->agp.bridge) {
1630
- ttm_agp_tt_unpopulate(ttm);
1357
+ ttm_pool_unpopulate(ttm);
16311358 return;
16321359 }
16331360 #endif
....@@ -1639,33 +1366,43 @@
16391366 }
16401367 #endif
16411368
1642
- for (i = 0; i < ttm->num_pages; i++) {
1643
- if (ttm_dma->dma_address[i]) {
1644
- dma_unmap_page(dev, ttm_dma->dma_address[i], PAGE_SIZE,
1645
- DMA_BIDIRECTIONAL);
1646
- }
1647
- }
1369
+ ttm_unmap_and_unpopulate_pages(dev, ttm_dma);
1370
+}
16481371
1649
- ttm_pool_unpopulate(ttm);
1372
+static void
1373
+nouveau_ttm_tt_destroy(struct ttm_bo_device *bdev,
1374
+ struct ttm_tt *ttm)
1375
+{
1376
+#if IS_ENABLED(CONFIG_AGP)
1377
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
1378
+ if (drm->agp.bridge) {
1379
+ ttm_agp_unbind(ttm);
1380
+ ttm_tt_destroy_common(bdev, ttm);
1381
+ ttm_agp_destroy(ttm);
1382
+ return;
1383
+ }
1384
+#endif
1385
+ nouveau_sgdma_destroy(bdev, ttm);
16501386 }
16511387
16521388 void
16531389 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
16541390 {
1655
- struct reservation_object *resv = nvbo->bo.resv;
1391
+ struct dma_resv *resv = nvbo->bo.base.resv;
16561392
16571393 if (exclusive)
1658
- reservation_object_add_excl_fence(resv, &fence->base);
1394
+ dma_resv_add_excl_fence(resv, &fence->base);
16591395 else if (fence)
1660
- reservation_object_add_shared_fence(resv, &fence->base);
1396
+ dma_resv_add_shared_fence(resv, &fence->base);
16611397 }
16621398
16631399 struct ttm_bo_driver nouveau_bo_driver = {
16641400 .ttm_tt_create = &nouveau_ttm_tt_create,
16651401 .ttm_tt_populate = &nouveau_ttm_tt_populate,
16661402 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1667
- .invalidate_caches = nouveau_bo_invalidate_caches,
1668
- .init_mem_type = nouveau_bo_init_mem_type,
1403
+ .ttm_tt_bind = &nouveau_ttm_tt_bind,
1404
+ .ttm_tt_unbind = &nouveau_ttm_tt_unbind,
1405
+ .ttm_tt_destroy = &nouveau_ttm_tt_destroy,
16691406 .eviction_valuable = ttm_bo_eviction_valuable,
16701407 .evict_flags = nouveau_bo_evict_flags,
16711408 .move_notify = nouveau_bo_move_ntfy,