hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
....@@ -26,11 +26,17 @@
2626 * Jerome Glisse
2727 */
2828 #include <linux/ktime.h>
29
+#include <linux/module.h>
2930 #include <linux/pagemap.h>
30
-#include <drm/drmP.h>
31
+#include <linux/pci.h>
32
+#include <linux/dma-buf.h>
33
+
3134 #include <drm/amdgpu_drm.h>
35
+#include <drm/drm_debugfs.h>
36
+
3237 #include "amdgpu.h"
3338 #include "amdgpu_display.h"
39
+#include "amdgpu_xgmi.h"
3440
3541 void amdgpu_gem_object_free(struct drm_gem_object *gobj)
3642 {
....@@ -45,7 +51,7 @@
4551 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
4652 int alignment, u32 initial_domain,
4753 u64 flags, enum ttm_bo_type type,
48
- struct reservation_object *resv,
54
+ struct dma_resv *resv,
4955 struct drm_gem_object **obj)
5056 {
5157 struct amdgpu_bo *bo;
....@@ -54,44 +60,26 @@
5460
5561 memset(&bp, 0, sizeof(bp));
5662 *obj = NULL;
57
- /* At least align on page size */
58
- if (alignment < PAGE_SIZE) {
59
- alignment = PAGE_SIZE;
60
- }
6163
6264 bp.size = size;
6365 bp.byte_align = alignment;
6466 bp.type = type;
6567 bp.resv = resv;
6668 bp.preferred_domain = initial_domain;
67
-retry:
6869 bp.flags = flags;
6970 bp.domain = initial_domain;
7071 r = amdgpu_bo_create(adev, &bp, &bo);
71
- if (r) {
72
- if (r != -ERESTARTSYS) {
73
- if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
74
- flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
75
- goto retry;
76
- }
77
-
78
- if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
79
- initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
80
- goto retry;
81
- }
82
- DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
83
- size, initial_domain, alignment, r);
84
- }
72
+ if (r)
8573 return r;
86
- }
87
- *obj = &bo->gem_base;
74
+
75
+ *obj = &bo->tbo.base;
8876
8977 return 0;
9078 }
9179
9280 void amdgpu_gem_force_release(struct amdgpu_device *adev)
9381 {
94
- struct drm_device *ddev = adev->ddev;
82
+ struct drm_device *ddev = adev_to_drm(adev);
9583 struct drm_file *file;
9684
9785 mutex_lock(&ddev->filelist_mutex);
....@@ -104,7 +92,7 @@
10492 spin_lock(&file->table_lock);
10593 idr_for_each_entry(&file->object_idr, gobj, handle) {
10694 WARN_ONCE(1, "And also active allocations!\n");
107
- drm_gem_object_put_unlocked(gobj);
95
+ drm_gem_object_put(gobj);
10896 }
10997 idr_destroy(&file->object_idr);
11098 spin_unlock(&file->table_lock);
....@@ -133,7 +121,7 @@
133121 return -EPERM;
134122
135123 if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
136
- abo->tbo.resv != vm->root.base.bo->tbo.resv)
124
+ abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
137125 return -EPERM;
138126
139127 r = amdgpu_bo_reserve(abo, false);
....@@ -160,16 +148,17 @@
160148
161149 struct amdgpu_bo_list_entry vm_pd;
162150 struct list_head list, duplicates;
151
+ struct dma_fence *fence = NULL;
163152 struct ttm_validate_buffer tv;
164153 struct ww_acquire_ctx ticket;
165154 struct amdgpu_bo_va *bo_va;
166
- int r;
155
+ long r;
167156
168157 INIT_LIST_HEAD(&list);
169158 INIT_LIST_HEAD(&duplicates);
170159
171160 tv.bo = &bo->tbo;
172
- tv.shared = true;
161
+ tv.num_shared = 2;
173162 list_add(&tv.head, &list);
174163
175164 amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
....@@ -177,28 +166,34 @@
177166 r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
178167 if (r) {
179168 dev_err(adev->dev, "leaking bo va because "
180
- "we fail to reserve bo (%d)\n", r);
169
+ "we fail to reserve bo (%ld)\n", r);
181170 return;
182171 }
183172 bo_va = amdgpu_vm_bo_find(vm, bo);
184
- if (bo_va && --bo_va->ref_count == 0) {
185
- amdgpu_vm_bo_rmv(adev, bo_va);
173
+ if (!bo_va || --bo_va->ref_count)
174
+ goto out_unlock;
186175
187
- if (amdgpu_vm_ready(vm)) {
188
- struct dma_fence *fence = NULL;
176
+ amdgpu_vm_bo_rmv(adev, bo_va);
177
+ if (!amdgpu_vm_ready(vm))
178
+ goto out_unlock;
189179
190
- r = amdgpu_vm_clear_freed(adev, vm, &fence);
191
- if (unlikely(r)) {
192
- dev_err(adev->dev, "failed to clear page "
193
- "tables on GEM object close (%d)\n", r);
194
- }
195
-
196
- if (fence) {
197
- amdgpu_bo_fence(bo, fence, true);
198
- dma_fence_put(fence);
199
- }
200
- }
180
+ fence = dma_resv_get_excl(bo->tbo.base.resv);
181
+ if (fence) {
182
+ amdgpu_bo_fence(bo, fence, true);
183
+ fence = NULL;
201184 }
185
+
186
+ r = amdgpu_vm_clear_freed(adev, vm, &fence);
187
+ if (r || !fence)
188
+ goto out_unlock;
189
+
190
+ amdgpu_bo_fence(bo, fence, true);
191
+ dma_fence_put(fence);
192
+
193
+out_unlock:
194
+ if (unlikely(r < 0))
195
+ dev_err(adev->dev, "failed to clear page "
196
+ "tables on GEM object close (%ld)\n", r);
202197 ttm_eu_backoff_reservation(&ticket, &list);
203198 }
204199
....@@ -208,15 +203,15 @@
208203 int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
209204 struct drm_file *filp)
210205 {
211
- struct amdgpu_device *adev = dev->dev_private;
206
+ struct amdgpu_device *adev = drm_to_adev(dev);
212207 struct amdgpu_fpriv *fpriv = filp->driver_priv;
213208 struct amdgpu_vm *vm = &fpriv->vm;
214209 union drm_amdgpu_gem_create *args = data;
215210 uint64_t flags = args->in.domain_flags;
216211 uint64_t size = args->in.bo_size;
217
- struct reservation_object *resv = NULL;
212
+ struct dma_resv *resv = NULL;
218213 struct drm_gem_object *gobj;
219
- uint32_t handle;
214
+ uint32_t handle, initial_domain;
220215 int r;
221216
222217 /* reject invalid gem flags */
....@@ -225,13 +220,19 @@
225220 AMDGPU_GEM_CREATE_CPU_GTT_USWC |
226221 AMDGPU_GEM_CREATE_VRAM_CLEARED |
227222 AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
228
- AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
223
+ AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
224
+ AMDGPU_GEM_CREATE_ENCRYPTED))
229225
230226 return -EINVAL;
231227
232228 /* reject invalid gem domains */
233229 if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
234230 return -EINVAL;
231
+
232
+ if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
233
+ DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
234
+ return -EINVAL;
235
+ }
235236
236237 /* create a gem object to contain this object in */
237238 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
....@@ -244,28 +245,38 @@
244245 return -EINVAL;
245246 }
246247 flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
247
- if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
248
- size = size << AMDGPU_GDS_SHIFT;
249
- else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
250
- size = size << AMDGPU_GWS_SHIFT;
251
- else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
252
- size = size << AMDGPU_OA_SHIFT;
253
- else
254
- return -EINVAL;
255248 }
256
- size = roundup(size, PAGE_SIZE);
257249
258250 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
259251 r = amdgpu_bo_reserve(vm->root.base.bo, false);
260252 if (r)
261253 return r;
262254
263
- resv = vm->root.base.bo->tbo.resv;
255
+ resv = vm->root.base.bo->tbo.base.resv;
264256 }
265257
258
+retry:
259
+ initial_domain = (u32)(0xffffffff & args->in.domains);
266260 r = amdgpu_gem_object_create(adev, size, args->in.alignment,
267
- (u32)(0xffffffff & args->in.domains),
261
+ initial_domain,
268262 flags, ttm_bo_type_device, resv, &gobj);
263
+ if (r) {
264
+ if (r != -ERESTARTSYS) {
265
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
266
+ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
267
+ goto retry;
268
+ }
269
+
270
+ if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
271
+ initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
272
+ goto retry;
273
+ }
274
+ DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
275
+ size, initial_domain, args->in.alignment, r);
276
+ }
277
+ return r;
278
+ }
279
+
269280 if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
270281 if (!r) {
271282 struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
....@@ -279,7 +290,7 @@
279290
280291 r = drm_gem_handle_create(filp, gobj, &handle);
281292 /* drop reference from allocate - handle holds it now */
282
- drm_gem_object_put_unlocked(gobj);
293
+ drm_gem_object_put(gobj);
283294 if (r)
284295 return r;
285296
....@@ -292,7 +303,7 @@
292303 struct drm_file *filp)
293304 {
294305 struct ttm_operation_ctx ctx = { true, false };
295
- struct amdgpu_device *adev = dev->dev_private;
306
+ struct amdgpu_device *adev = drm_to_adev(dev);
296307 struct drm_amdgpu_gem_userptr *args = data;
297308 struct drm_gem_object *gobj;
298309 struct amdgpu_bo *bo;
....@@ -326,47 +337,42 @@
326337 bo = gem_to_amdgpu_bo(gobj);
327338 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
328339 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
329
- r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
340
+ r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags);
330341 if (r)
331342 goto release_object;
332343
333
- if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
334
- r = amdgpu_mn_register(bo, args->addr);
335
- if (r)
336
- goto release_object;
337
- }
344
+ r = amdgpu_mn_register(bo, args->addr);
345
+ if (r)
346
+ goto release_object;
338347
339348 if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
340
- r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
341
- bo->tbo.ttm->pages);
349
+ r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
342350 if (r)
343351 goto release_object;
344352
345353 r = amdgpu_bo_reserve(bo, true);
346354 if (r)
347
- goto free_pages;
355
+ goto user_pages_done;
348356
349357 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
350358 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
351359 amdgpu_bo_unreserve(bo);
352360 if (r)
353
- goto free_pages;
361
+ goto user_pages_done;
354362 }
355363
356364 r = drm_gem_handle_create(filp, gobj, &handle);
357
- /* drop reference from allocate - handle holds it now */
358
- drm_gem_object_put_unlocked(gobj);
359365 if (r)
360
- return r;
366
+ goto user_pages_done;
361367
362368 args->handle = handle;
363
- return 0;
364369
365
-free_pages:
366
- release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
370
+user_pages_done:
371
+ if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
372
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
367373
368374 release_object:
369
- drm_gem_object_put_unlocked(gobj);
375
+ drm_gem_object_put(gobj);
370376
371377 return r;
372378 }
....@@ -385,11 +391,11 @@
385391 robj = gem_to_amdgpu_bo(gobj);
386392 if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
387393 (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
388
- drm_gem_object_put_unlocked(gobj);
394
+ drm_gem_object_put(gobj);
389395 return -EPERM;
390396 }
391397 *offset_p = amdgpu_bo_mmap_offset(robj);
392
- drm_gem_object_put_unlocked(gobj);
398
+ drm_gem_object_put(gobj);
393399 return 0;
394400 }
395401
....@@ -446,7 +452,7 @@
446452 return -ENOENT;
447453 }
448454 robj = gem_to_amdgpu_bo(gobj);
449
- ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
455
+ ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
450456 timeout);
451457
452458 /* ret == 0 means not signaled,
....@@ -459,7 +465,7 @@
459465 } else
460466 r = ret;
461467
462
- drm_gem_object_put_unlocked(gobj);
468
+ drm_gem_object_put(gobj);
463469 return r;
464470 }
465471
....@@ -502,7 +508,7 @@
502508 unreserve:
503509 amdgpu_bo_unreserve(robj);
504510 out:
505
- drm_gem_object_put_unlocked(gobj);
511
+ drm_gem_object_put(gobj);
506512 return r;
507513 }
508514
....@@ -538,11 +544,39 @@
538544 goto error;
539545 }
540546
541
- r = amdgpu_vm_update_directories(adev, vm);
547
+ r = amdgpu_vm_update_pdes(adev, vm, false);
542548
543549 error:
544550 if (r && r != -ERESTARTSYS)
545551 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
552
+}
553
+
554
+/**
555
+ * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags
556
+ *
557
+ * @adev: amdgpu_device pointer
558
+ * @flags: GEM UAPI flags
559
+ *
560
+ * Returns the GEM UAPI flags mapped into hardware for the ASIC.
561
+ */
562
+uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
563
+{
564
+ uint64_t pte_flag = 0;
565
+
566
+ if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
567
+ pte_flag |= AMDGPU_PTE_EXECUTABLE;
568
+ if (flags & AMDGPU_VM_PAGE_READABLE)
569
+ pte_flag |= AMDGPU_PTE_READABLE;
570
+ if (flags & AMDGPU_VM_PAGE_WRITEABLE)
571
+ pte_flag |= AMDGPU_PTE_WRITEABLE;
572
+ if (flags & AMDGPU_VM_PAGE_PRT)
573
+ pte_flag |= AMDGPU_PTE_PRT;
574
+
575
+ if (adev->gmc.gmc_funcs->map_mtype)
576
+ pte_flag |= amdgpu_gmc_map_mtype(adev,
577
+ flags & AMDGPU_VM_MTYPE_MASK);
578
+
579
+ return pte_flag;
546580 }
547581
548582 int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
....@@ -556,7 +590,7 @@
556590
557591 struct drm_amdgpu_gem_va *args = data;
558592 struct drm_gem_object *gobj;
559
- struct amdgpu_device *adev = dev->dev_private;
593
+ struct amdgpu_device *adev = drm_to_adev(dev);
560594 struct amdgpu_fpriv *fpriv = filp->driver_priv;
561595 struct amdgpu_bo *abo;
562596 struct amdgpu_bo_va *bo_va;
....@@ -575,16 +609,16 @@
575609 return -EINVAL;
576610 }
577611
578
- if (args->va_address >= AMDGPU_VA_HOLE_START &&
579
- args->va_address < AMDGPU_VA_HOLE_END) {
612
+ if (args->va_address >= AMDGPU_GMC_HOLE_START &&
613
+ args->va_address < AMDGPU_GMC_HOLE_END) {
580614 dev_dbg(&dev->pdev->dev,
581615 "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
582
- args->va_address, AMDGPU_VA_HOLE_START,
583
- AMDGPU_VA_HOLE_END);
616
+ args->va_address, AMDGPU_GMC_HOLE_START,
617
+ AMDGPU_GMC_HOLE_END);
584618 return -EINVAL;
585619 }
586620
587
- args->va_address &= AMDGPU_VA_HOLE_MASK;
621
+ args->va_address &= AMDGPU_GMC_HOLE_MASK;
588622
589623 vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
590624 vm_size -= AMDGPU_VA_RESERVED_SIZE;
....@@ -622,7 +656,10 @@
622656 return -ENOENT;
623657 abo = gem_to_amdgpu_bo(gobj);
624658 tv.bo = &abo->tbo;
625
- tv.shared = !!(abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID);
659
+ if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
660
+ tv.num_shared = 1;
661
+ else
662
+ tv.num_shared = 0;
626663 list_add(&tv.head, &list);
627664 } else {
628665 gobj = NULL;
....@@ -649,12 +686,7 @@
649686
650687 switch (args->operation) {
651688 case AMDGPU_VA_OP_MAP:
652
- r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
653
- args->map_size);
654
- if (r)
655
- goto error_backoff;
656
-
657
- va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
689
+ va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
658690 r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
659691 args->offset_in_bo, args->map_size,
660692 va_flags);
....@@ -669,12 +701,7 @@
669701 args->map_size);
670702 break;
671703 case AMDGPU_VA_OP_REPLACE:
672
- r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
673
- args->map_size);
674
- if (r)
675
- goto error_backoff;
676
-
677
- va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
704
+ va_flags = amdgpu_gem_va_map_flags(adev, args->flags);
678705 r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
679706 args->offset_in_bo, args->map_size,
680707 va_flags);
....@@ -690,16 +717,17 @@
690717 ttm_eu_backoff_reservation(&ticket, &list);
691718
692719 error_unref:
693
- drm_gem_object_put_unlocked(gobj);
720
+ drm_gem_object_put(gobj);
694721 return r;
695722 }
696723
697724 int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
698725 struct drm_file *filp)
699726 {
700
- struct amdgpu_device *adev = dev->dev_private;
727
+ struct amdgpu_device *adev = drm_to_adev(dev);
701728 struct drm_amdgpu_gem_op *args = data;
702729 struct drm_gem_object *gobj;
730
+ struct amdgpu_vm_bo_base *base;
703731 struct amdgpu_bo *robj;
704732 int r;
705733
....@@ -718,7 +746,7 @@
718746 struct drm_amdgpu_gem_create_in info;
719747 void __user *out = u64_to_user_ptr(args->value);
720748
721
- info.bo_size = robj->gem_base.size;
749
+ info.bo_size = robj->tbo.base.size;
722750 info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
723751 info.domains = robj->preferred_domains;
724752 info.domain_flags = robj->flags;
....@@ -738,6 +766,15 @@
738766 amdgpu_bo_unreserve(robj);
739767 break;
740768 }
769
+ for (base = robj->vm_bo; base; base = base->next)
770
+ if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
771
+ amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
772
+ r = -EINVAL;
773
+ amdgpu_bo_unreserve(robj);
774
+ goto out;
775
+ }
776
+
777
+
741778 robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
742779 AMDGPU_GEM_DOMAIN_GTT |
743780 AMDGPU_GEM_DOMAIN_CPU);
....@@ -756,7 +793,7 @@
756793 }
757794
758795 out:
759
- drm_gem_object_put_unlocked(gobj);
796
+ drm_gem_object_put(gobj);
760797 return r;
761798 }
762799
....@@ -764,27 +801,36 @@
764801 struct drm_device *dev,
765802 struct drm_mode_create_dumb *args)
766803 {
767
- struct amdgpu_device *adev = dev->dev_private;
804
+ struct amdgpu_device *adev = drm_to_adev(dev);
768805 struct drm_gem_object *gobj;
769806 uint32_t handle;
807
+ u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
808
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC;
770809 u32 domain;
771810 int r;
811
+
812
+ /*
813
+ * The buffer returned from this function should be cleared, but
814
+ * it can only be done if the ring is enabled or we'll fail to
815
+ * create the buffer.
816
+ */
817
+ if (adev->mman.buffer_funcs_enabled)
818
+ flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
772819
773820 args->pitch = amdgpu_align_pitch(adev, args->width,
774821 DIV_ROUND_UP(args->bpp, 8), 0);
775822 args->size = (u64)args->pitch * args->height;
776823 args->size = ALIGN(args->size, PAGE_SIZE);
777824 domain = amdgpu_bo_get_preferred_pin_domain(adev,
778
- amdgpu_display_supported_domains(adev));
779
- r = amdgpu_gem_object_create(adev, args->size, 0, domain,
780
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
825
+ amdgpu_display_supported_domains(adev, flags));
826
+ r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
781827 ttm_bo_type_device, NULL, &gobj);
782828 if (r)
783829 return -ENOMEM;
784830
785831 r = drm_gem_handle_create(file_priv, gobj, &handle);
786832 /* drop reference from allocate - handle holds it now */
787
- drm_gem_object_put_unlocked(gobj);
833
+ drm_gem_object_put(gobj);
788834 if (r) {
789835 return r;
790836 }
....@@ -831,11 +877,12 @@
831877 if (pin_count)
832878 seq_printf(m, " pin count %d", pin_count);
833879
834
- dma_buf = READ_ONCE(bo->gem_base.dma_buf);
835
- attachment = READ_ONCE(bo->gem_base.import_attach);
880
+ dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
881
+ attachment = READ_ONCE(bo->tbo.base.import_attach);
836882
837883 if (attachment)
838
- seq_printf(m, " imported from %p", dma_buf);
884
+ seq_printf(m, " imported from %p%s", dma_buf,
885
+ attachment->peer2peer ? " P2P" : "");
839886 else if (dma_buf)
840887 seq_printf(m, " exported as %p", dma_buf);
841888
....@@ -896,7 +943,8 @@
896943 int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
897944 {
898945 #if defined(CONFIG_DEBUG_FS)
899
- return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
946
+ return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list,
947
+ ARRAY_SIZE(amdgpu_debugfs_gem_list));
900948 #endif
901949 return 0;
902950 }