.. | .. |
---|
26 | 26 | * Jerome Glisse |
---|
27 | 27 | */ |
---|
28 | 28 | #include <linux/ktime.h> |
---|
| 29 | +#include <linux/module.h> |
---|
29 | 30 | #include <linux/pagemap.h> |
---|
30 | | -#include <drm/drmP.h> |
---|
| 31 | +#include <linux/pci.h> |
---|
| 32 | +#include <linux/dma-buf.h> |
---|
| 33 | + |
---|
31 | 34 | #include <drm/amdgpu_drm.h> |
---|
| 35 | +#include <drm/drm_debugfs.h> |
---|
| 36 | + |
---|
32 | 37 | #include "amdgpu.h" |
---|
33 | 38 | #include "amdgpu_display.h" |
---|
| 39 | +#include "amdgpu_xgmi.h" |
---|
34 | 40 | |
---|
35 | 41 | void amdgpu_gem_object_free(struct drm_gem_object *gobj) |
---|
36 | 42 | { |
---|
.. | .. |
---|
45 | 51 | int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, |
---|
46 | 52 | int alignment, u32 initial_domain, |
---|
47 | 53 | u64 flags, enum ttm_bo_type type, |
---|
48 | | - struct reservation_object *resv, |
---|
| 54 | + struct dma_resv *resv, |
---|
49 | 55 | struct drm_gem_object **obj) |
---|
50 | 56 | { |
---|
51 | 57 | struct amdgpu_bo *bo; |
---|
.. | .. |
---|
54 | 60 | |
---|
55 | 61 | memset(&bp, 0, sizeof(bp)); |
---|
56 | 62 | *obj = NULL; |
---|
57 | | - /* At least align on page size */ |
---|
58 | | - if (alignment < PAGE_SIZE) { |
---|
59 | | - alignment = PAGE_SIZE; |
---|
60 | | - } |
---|
61 | 63 | |
---|
62 | 64 | bp.size = size; |
---|
63 | 65 | bp.byte_align = alignment; |
---|
64 | 66 | bp.type = type; |
---|
65 | 67 | bp.resv = resv; |
---|
66 | 68 | bp.preferred_domain = initial_domain; |
---|
67 | | -retry: |
---|
68 | 69 | bp.flags = flags; |
---|
69 | 70 | bp.domain = initial_domain; |
---|
70 | 71 | r = amdgpu_bo_create(adev, &bp, &bo); |
---|
71 | | - if (r) { |
---|
72 | | - if (r != -ERESTARTSYS) { |
---|
73 | | - if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { |
---|
74 | | - flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; |
---|
75 | | - goto retry; |
---|
76 | | - } |
---|
77 | | - |
---|
78 | | - if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { |
---|
79 | | - initial_domain |= AMDGPU_GEM_DOMAIN_GTT; |
---|
80 | | - goto retry; |
---|
81 | | - } |
---|
82 | | - DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n", |
---|
83 | | - size, initial_domain, alignment, r); |
---|
84 | | - } |
---|
| 72 | + if (r) |
---|
85 | 73 | return r; |
---|
86 | | - } |
---|
87 | | - *obj = &bo->gem_base; |
---|
| 74 | + |
---|
| 75 | + *obj = &bo->tbo.base; |
---|
88 | 76 | |
---|
89 | 77 | return 0; |
---|
90 | 78 | } |
---|
91 | 79 | |
---|
92 | 80 | void amdgpu_gem_force_release(struct amdgpu_device *adev) |
---|
93 | 81 | { |
---|
94 | | - struct drm_device *ddev = adev->ddev; |
---|
| 82 | + struct drm_device *ddev = adev_to_drm(adev); |
---|
95 | 83 | struct drm_file *file; |
---|
96 | 84 | |
---|
97 | 85 | mutex_lock(&ddev->filelist_mutex); |
---|
.. | .. |
---|
104 | 92 | spin_lock(&file->table_lock); |
---|
105 | 93 | idr_for_each_entry(&file->object_idr, gobj, handle) { |
---|
106 | 94 | WARN_ONCE(1, "And also active allocations!\n"); |
---|
107 | | - drm_gem_object_put_unlocked(gobj); |
---|
| 95 | + drm_gem_object_put(gobj); |
---|
108 | 96 | } |
---|
109 | 97 | idr_destroy(&file->object_idr); |
---|
110 | 98 | spin_unlock(&file->table_lock); |
---|
.. | .. |
---|
133 | 121 | return -EPERM; |
---|
134 | 122 | |
---|
135 | 123 | if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID && |
---|
136 | | - abo->tbo.resv != vm->root.base.bo->tbo.resv) |
---|
| 124 | + abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) |
---|
137 | 125 | return -EPERM; |
---|
138 | 126 | |
---|
139 | 127 | r = amdgpu_bo_reserve(abo, false); |
---|
.. | .. |
---|
160 | 148 | |
---|
161 | 149 | struct amdgpu_bo_list_entry vm_pd; |
---|
162 | 150 | struct list_head list, duplicates; |
---|
| 151 | + struct dma_fence *fence = NULL; |
---|
163 | 152 | struct ttm_validate_buffer tv; |
---|
164 | 153 | struct ww_acquire_ctx ticket; |
---|
165 | 154 | struct amdgpu_bo_va *bo_va; |
---|
166 | | - int r; |
---|
| 155 | + long r; |
---|
167 | 156 | |
---|
168 | 157 | INIT_LIST_HEAD(&list); |
---|
169 | 158 | INIT_LIST_HEAD(&duplicates); |
---|
170 | 159 | |
---|
171 | 160 | tv.bo = &bo->tbo; |
---|
172 | | - tv.shared = true; |
---|
| 161 | + tv.num_shared = 2; |
---|
173 | 162 | list_add(&tv.head, &list); |
---|
174 | 163 | |
---|
175 | 164 | amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); |
---|
.. | .. |
---|
177 | 166 | r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates); |
---|
178 | 167 | if (r) { |
---|
179 | 168 | dev_err(adev->dev, "leaking bo va because " |
---|
180 | | - "we fail to reserve bo (%d)\n", r); |
---|
| 169 | + "we fail to reserve bo (%ld)\n", r); |
---|
181 | 170 | return; |
---|
182 | 171 | } |
---|
183 | 172 | bo_va = amdgpu_vm_bo_find(vm, bo); |
---|
184 | | - if (bo_va && --bo_va->ref_count == 0) { |
---|
185 | | - amdgpu_vm_bo_rmv(adev, bo_va); |
---|
| 173 | + if (!bo_va || --bo_va->ref_count) |
---|
| 174 | + goto out_unlock; |
---|
186 | 175 | |
---|
187 | | - if (amdgpu_vm_ready(vm)) { |
---|
188 | | - struct dma_fence *fence = NULL; |
---|
| 176 | + amdgpu_vm_bo_rmv(adev, bo_va); |
---|
| 177 | + if (!amdgpu_vm_ready(vm)) |
---|
| 178 | + goto out_unlock; |
---|
189 | 179 | |
---|
190 | | - r = amdgpu_vm_clear_freed(adev, vm, &fence); |
---|
191 | | - if (unlikely(r)) { |
---|
192 | | - dev_err(adev->dev, "failed to clear page " |
---|
193 | | - "tables on GEM object close (%d)\n", r); |
---|
194 | | - } |
---|
195 | | - |
---|
196 | | - if (fence) { |
---|
197 | | - amdgpu_bo_fence(bo, fence, true); |
---|
198 | | - dma_fence_put(fence); |
---|
199 | | - } |
---|
200 | | - } |
---|
| 180 | + fence = dma_resv_get_excl(bo->tbo.base.resv); |
---|
| 181 | + if (fence) { |
---|
| 182 | + amdgpu_bo_fence(bo, fence, true); |
---|
| 183 | + fence = NULL; |
---|
201 | 184 | } |
---|
| 185 | + |
---|
| 186 | + r = amdgpu_vm_clear_freed(adev, vm, &fence); |
---|
| 187 | + if (r || !fence) |
---|
| 188 | + goto out_unlock; |
---|
| 189 | + |
---|
| 190 | + amdgpu_bo_fence(bo, fence, true); |
---|
| 191 | + dma_fence_put(fence); |
---|
| 192 | + |
---|
| 193 | +out_unlock: |
---|
| 194 | + if (unlikely(r < 0)) |
---|
| 195 | + dev_err(adev->dev, "failed to clear page " |
---|
| 196 | + "tables on GEM object close (%ld)\n", r); |
---|
202 | 197 | ttm_eu_backoff_reservation(&ticket, &list); |
---|
203 | 198 | } |
---|
204 | 199 | |
---|
.. | .. |
---|
208 | 203 | int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, |
---|
209 | 204 | struct drm_file *filp) |
---|
210 | 205 | { |
---|
211 | | - struct amdgpu_device *adev = dev->dev_private; |
---|
| 206 | + struct amdgpu_device *adev = drm_to_adev(dev); |
---|
212 | 207 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
---|
213 | 208 | struct amdgpu_vm *vm = &fpriv->vm; |
---|
214 | 209 | union drm_amdgpu_gem_create *args = data; |
---|
215 | 210 | uint64_t flags = args->in.domain_flags; |
---|
216 | 211 | uint64_t size = args->in.bo_size; |
---|
217 | | - struct reservation_object *resv = NULL; |
---|
| 212 | + struct dma_resv *resv = NULL; |
---|
218 | 213 | struct drm_gem_object *gobj; |
---|
219 | | - uint32_t handle; |
---|
| 214 | + uint32_t handle, initial_domain; |
---|
220 | 215 | int r; |
---|
221 | 216 | |
---|
222 | 217 | /* reject invalid gem flags */ |
---|
.. | .. |
---|
225 | 220 | AMDGPU_GEM_CREATE_CPU_GTT_USWC | |
---|
226 | 221 | AMDGPU_GEM_CREATE_VRAM_CLEARED | |
---|
227 | 222 | AMDGPU_GEM_CREATE_VM_ALWAYS_VALID | |
---|
228 | | - AMDGPU_GEM_CREATE_EXPLICIT_SYNC)) |
---|
| 223 | + AMDGPU_GEM_CREATE_EXPLICIT_SYNC | |
---|
| 224 | + AMDGPU_GEM_CREATE_ENCRYPTED)) |
---|
229 | 225 | |
---|
230 | 226 | return -EINVAL; |
---|
231 | 227 | |
---|
232 | 228 | /* reject invalid gem domains */ |
---|
233 | 229 | if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK) |
---|
234 | 230 | return -EINVAL; |
---|
| 231 | + |
---|
| 232 | + if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) { |
---|
| 233 | + DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n"); |
---|
| 234 | + return -EINVAL; |
---|
| 235 | + } |
---|
235 | 236 | |
---|
236 | 237 | /* create a gem object to contain this object in */ |
---|
237 | 238 | if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | |
---|
.. | .. |
---|
244 | 245 | return -EINVAL; |
---|
245 | 246 | } |
---|
246 | 247 | flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS; |
---|
247 | | - if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS) |
---|
248 | | - size = size << AMDGPU_GDS_SHIFT; |
---|
249 | | - else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS) |
---|
250 | | - size = size << AMDGPU_GWS_SHIFT; |
---|
251 | | - else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA) |
---|
252 | | - size = size << AMDGPU_OA_SHIFT; |
---|
253 | | - else |
---|
254 | | - return -EINVAL; |
---|
255 | 248 | } |
---|
256 | | - size = roundup(size, PAGE_SIZE); |
---|
257 | 249 | |
---|
258 | 250 | if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { |
---|
259 | 251 | r = amdgpu_bo_reserve(vm->root.base.bo, false); |
---|
260 | 252 | if (r) |
---|
261 | 253 | return r; |
---|
262 | 254 | |
---|
263 | | - resv = vm->root.base.bo->tbo.resv; |
---|
| 255 | + resv = vm->root.base.bo->tbo.base.resv; |
---|
264 | 256 | } |
---|
265 | 257 | |
---|
| 258 | +retry: |
---|
| 259 | + initial_domain = (u32)(0xffffffff & args->in.domains); |
---|
266 | 260 | r = amdgpu_gem_object_create(adev, size, args->in.alignment, |
---|
267 | | - (u32)(0xffffffff & args->in.domains), |
---|
| 261 | + initial_domain, |
---|
268 | 262 | flags, ttm_bo_type_device, resv, &gobj); |
---|
| 263 | + if (r) { |
---|
| 264 | + if (r != -ERESTARTSYS) { |
---|
| 265 | + if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { |
---|
| 266 | + flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; |
---|
| 267 | + goto retry; |
---|
| 268 | + } |
---|
| 269 | + |
---|
| 270 | + if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { |
---|
| 271 | + initial_domain |= AMDGPU_GEM_DOMAIN_GTT; |
---|
| 272 | + goto retry; |
---|
| 273 | + } |
---|
| 274 | + DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n", |
---|
| 275 | + size, initial_domain, args->in.alignment, r); |
---|
| 276 | + } |
---|
| 277 | + return r; |
---|
| 278 | + } |
---|
| 279 | + |
---|
269 | 280 | if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { |
---|
270 | 281 | if (!r) { |
---|
271 | 282 | struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); |
---|
.. | .. |
---|
279 | 290 | |
---|
280 | 291 | r = drm_gem_handle_create(filp, gobj, &handle); |
---|
281 | 292 | /* drop reference from allocate - handle holds it now */ |
---|
282 | | - drm_gem_object_put_unlocked(gobj); |
---|
| 293 | + drm_gem_object_put(gobj); |
---|
283 | 294 | if (r) |
---|
284 | 295 | return r; |
---|
285 | 296 | |
---|
.. | .. |
---|
292 | 303 | struct drm_file *filp) |
---|
293 | 304 | { |
---|
294 | 305 | struct ttm_operation_ctx ctx = { true, false }; |
---|
295 | | - struct amdgpu_device *adev = dev->dev_private; |
---|
| 306 | + struct amdgpu_device *adev = drm_to_adev(dev); |
---|
296 | 307 | struct drm_amdgpu_gem_userptr *args = data; |
---|
297 | 308 | struct drm_gem_object *gobj; |
---|
298 | 309 | struct amdgpu_bo *bo; |
---|
.. | .. |
---|
326 | 337 | bo = gem_to_amdgpu_bo(gobj); |
---|
327 | 338 | bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; |
---|
328 | 339 | bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; |
---|
329 | | - r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); |
---|
| 340 | + r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags); |
---|
330 | 341 | if (r) |
---|
331 | 342 | goto release_object; |
---|
332 | 343 | |
---|
333 | | - if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) { |
---|
334 | | - r = amdgpu_mn_register(bo, args->addr); |
---|
335 | | - if (r) |
---|
336 | | - goto release_object; |
---|
337 | | - } |
---|
| 344 | + r = amdgpu_mn_register(bo, args->addr); |
---|
| 345 | + if (r) |
---|
| 346 | + goto release_object; |
---|
338 | 347 | |
---|
339 | 348 | if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { |
---|
340 | | - r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, |
---|
341 | | - bo->tbo.ttm->pages); |
---|
| 349 | + r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); |
---|
342 | 350 | if (r) |
---|
343 | 351 | goto release_object; |
---|
344 | 352 | |
---|
345 | 353 | r = amdgpu_bo_reserve(bo, true); |
---|
346 | 354 | if (r) |
---|
347 | | - goto free_pages; |
---|
| 355 | + goto user_pages_done; |
---|
348 | 356 | |
---|
349 | 357 | amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); |
---|
350 | 358 | r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); |
---|
351 | 359 | amdgpu_bo_unreserve(bo); |
---|
352 | 360 | if (r) |
---|
353 | | - goto free_pages; |
---|
| 361 | + goto user_pages_done; |
---|
354 | 362 | } |
---|
355 | 363 | |
---|
356 | 364 | r = drm_gem_handle_create(filp, gobj, &handle); |
---|
357 | | - /* drop reference from allocate - handle holds it now */ |
---|
358 | | - drm_gem_object_put_unlocked(gobj); |
---|
359 | 365 | if (r) |
---|
360 | | - return r; |
---|
| 366 | + goto user_pages_done; |
---|
361 | 367 | |
---|
362 | 368 | args->handle = handle; |
---|
363 | | - return 0; |
---|
364 | 369 | |
---|
365 | | -free_pages: |
---|
366 | | - release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages); |
---|
| 370 | +user_pages_done: |
---|
| 371 | + if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) |
---|
| 372 | + amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); |
---|
367 | 373 | |
---|
368 | 374 | release_object: |
---|
369 | | - drm_gem_object_put_unlocked(gobj); |
---|
| 375 | + drm_gem_object_put(gobj); |
---|
370 | 376 | |
---|
371 | 377 | return r; |
---|
372 | 378 | } |
---|
.. | .. |
---|
385 | 391 | robj = gem_to_amdgpu_bo(gobj); |
---|
386 | 392 | if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || |
---|
387 | 393 | (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { |
---|
388 | | - drm_gem_object_put_unlocked(gobj); |
---|
| 394 | + drm_gem_object_put(gobj); |
---|
389 | 395 | return -EPERM; |
---|
390 | 396 | } |
---|
391 | 397 | *offset_p = amdgpu_bo_mmap_offset(robj); |
---|
392 | | - drm_gem_object_put_unlocked(gobj); |
---|
| 398 | + drm_gem_object_put(gobj); |
---|
393 | 399 | return 0; |
---|
394 | 400 | } |
---|
395 | 401 | |
---|
.. | .. |
---|
446 | 452 | return -ENOENT; |
---|
447 | 453 | } |
---|
448 | 454 | robj = gem_to_amdgpu_bo(gobj); |
---|
449 | | - ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, |
---|
| 455 | + ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, |
---|
450 | 456 | timeout); |
---|
451 | 457 | |
---|
452 | 458 | /* ret == 0 means not signaled, |
---|
.. | .. |
---|
459 | 465 | } else |
---|
460 | 466 | r = ret; |
---|
461 | 467 | |
---|
462 | | - drm_gem_object_put_unlocked(gobj); |
---|
| 468 | + drm_gem_object_put(gobj); |
---|
463 | 469 | return r; |
---|
464 | 470 | } |
---|
465 | 471 | |
---|
.. | .. |
---|
502 | 508 | unreserve: |
---|
503 | 509 | amdgpu_bo_unreserve(robj); |
---|
504 | 510 | out: |
---|
505 | | - drm_gem_object_put_unlocked(gobj); |
---|
| 511 | + drm_gem_object_put(gobj); |
---|
506 | 512 | return r; |
---|
507 | 513 | } |
---|
508 | 514 | |
---|
.. | .. |
---|
538 | 544 | goto error; |
---|
539 | 545 | } |
---|
540 | 546 | |
---|
541 | | - r = amdgpu_vm_update_directories(adev, vm); |
---|
| 547 | + r = amdgpu_vm_update_pdes(adev, vm, false); |
---|
542 | 548 | |
---|
543 | 549 | error: |
---|
544 | 550 | if (r && r != -ERESTARTSYS) |
---|
545 | 551 | DRM_ERROR("Couldn't update BO_VA (%d)\n", r); |
---|
| 552 | +} |
---|
| 553 | + |
---|
| 554 | +/** |
---|
| 555 | + * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags |
---|
| 556 | + * |
---|
| 557 | + * @adev: amdgpu_device pointer |
---|
| 558 | + * @flags: GEM UAPI flags |
---|
| 559 | + * |
---|
| 560 | + * Returns the GEM UAPI flags mapped into hardware for the ASIC. |
---|
| 561 | + */ |
---|
| 562 | +uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags) |
---|
| 563 | +{ |
---|
| 564 | + uint64_t pte_flag = 0; |
---|
| 565 | + |
---|
| 566 | + if (flags & AMDGPU_VM_PAGE_EXECUTABLE) |
---|
| 567 | + pte_flag |= AMDGPU_PTE_EXECUTABLE; |
---|
| 568 | + if (flags & AMDGPU_VM_PAGE_READABLE) |
---|
| 569 | + pte_flag |= AMDGPU_PTE_READABLE; |
---|
| 570 | + if (flags & AMDGPU_VM_PAGE_WRITEABLE) |
---|
| 571 | + pte_flag |= AMDGPU_PTE_WRITEABLE; |
---|
| 572 | + if (flags & AMDGPU_VM_PAGE_PRT) |
---|
| 573 | + pte_flag |= AMDGPU_PTE_PRT; |
---|
| 574 | + |
---|
| 575 | + if (adev->gmc.gmc_funcs->map_mtype) |
---|
| 576 | + pte_flag |= amdgpu_gmc_map_mtype(adev, |
---|
| 577 | + flags & AMDGPU_VM_MTYPE_MASK); |
---|
| 578 | + |
---|
| 579 | + return pte_flag; |
---|
546 | 580 | } |
---|
547 | 581 | |
---|
548 | 582 | int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, |
---|
.. | .. |
---|
556 | 590 | |
---|
557 | 591 | struct drm_amdgpu_gem_va *args = data; |
---|
558 | 592 | struct drm_gem_object *gobj; |
---|
559 | | - struct amdgpu_device *adev = dev->dev_private; |
---|
| 593 | + struct amdgpu_device *adev = drm_to_adev(dev); |
---|
560 | 594 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
---|
561 | 595 | struct amdgpu_bo *abo; |
---|
562 | 596 | struct amdgpu_bo_va *bo_va; |
---|
.. | .. |
---|
575 | 609 | return -EINVAL; |
---|
576 | 610 | } |
---|
577 | 611 | |
---|
578 | | - if (args->va_address >= AMDGPU_VA_HOLE_START && |
---|
579 | | - args->va_address < AMDGPU_VA_HOLE_END) { |
---|
| 612 | + if (args->va_address >= AMDGPU_GMC_HOLE_START && |
---|
| 613 | + args->va_address < AMDGPU_GMC_HOLE_END) { |
---|
580 | 614 | dev_dbg(&dev->pdev->dev, |
---|
581 | 615 | "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n", |
---|
582 | | - args->va_address, AMDGPU_VA_HOLE_START, |
---|
583 | | - AMDGPU_VA_HOLE_END); |
---|
| 616 | + args->va_address, AMDGPU_GMC_HOLE_START, |
---|
| 617 | + AMDGPU_GMC_HOLE_END); |
---|
584 | 618 | return -EINVAL; |
---|
585 | 619 | } |
---|
586 | 620 | |
---|
587 | | - args->va_address &= AMDGPU_VA_HOLE_MASK; |
---|
| 621 | + args->va_address &= AMDGPU_GMC_HOLE_MASK; |
---|
588 | 622 | |
---|
589 | 623 | vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; |
---|
590 | 624 | vm_size -= AMDGPU_VA_RESERVED_SIZE; |
---|
.. | .. |
---|
622 | 656 | return -ENOENT; |
---|
623 | 657 | abo = gem_to_amdgpu_bo(gobj); |
---|
624 | 658 | tv.bo = &abo->tbo; |
---|
625 | | - tv.shared = !!(abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID); |
---|
| 659 | + if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) |
---|
| 660 | + tv.num_shared = 1; |
---|
| 661 | + else |
---|
| 662 | + tv.num_shared = 0; |
---|
626 | 663 | list_add(&tv.head, &list); |
---|
627 | 664 | } else { |
---|
628 | 665 | gobj = NULL; |
---|
.. | .. |
---|
649 | 686 | |
---|
650 | 687 | switch (args->operation) { |
---|
651 | 688 | case AMDGPU_VA_OP_MAP: |
---|
652 | | - r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address, |
---|
653 | | - args->map_size); |
---|
654 | | - if (r) |
---|
655 | | - goto error_backoff; |
---|
656 | | - |
---|
657 | | - va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags); |
---|
| 689 | + va_flags = amdgpu_gem_va_map_flags(adev, args->flags); |
---|
658 | 690 | r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, |
---|
659 | 691 | args->offset_in_bo, args->map_size, |
---|
660 | 692 | va_flags); |
---|
.. | .. |
---|
669 | 701 | args->map_size); |
---|
670 | 702 | break; |
---|
671 | 703 | case AMDGPU_VA_OP_REPLACE: |
---|
672 | | - r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address, |
---|
673 | | - args->map_size); |
---|
674 | | - if (r) |
---|
675 | | - goto error_backoff; |
---|
676 | | - |
---|
677 | | - va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags); |
---|
| 704 | + va_flags = amdgpu_gem_va_map_flags(adev, args->flags); |
---|
678 | 705 | r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address, |
---|
679 | 706 | args->offset_in_bo, args->map_size, |
---|
680 | 707 | va_flags); |
---|
.. | .. |
---|
690 | 717 | ttm_eu_backoff_reservation(&ticket, &list); |
---|
691 | 718 | |
---|
692 | 719 | error_unref: |
---|
693 | | - drm_gem_object_put_unlocked(gobj); |
---|
| 720 | + drm_gem_object_put(gobj); |
---|
694 | 721 | return r; |
---|
695 | 722 | } |
---|
696 | 723 | |
---|
697 | 724 | int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, |
---|
698 | 725 | struct drm_file *filp) |
---|
699 | 726 | { |
---|
700 | | - struct amdgpu_device *adev = dev->dev_private; |
---|
| 727 | + struct amdgpu_device *adev = drm_to_adev(dev); |
---|
701 | 728 | struct drm_amdgpu_gem_op *args = data; |
---|
702 | 729 | struct drm_gem_object *gobj; |
---|
| 730 | + struct amdgpu_vm_bo_base *base; |
---|
703 | 731 | struct amdgpu_bo *robj; |
---|
704 | 732 | int r; |
---|
705 | 733 | |
---|
.. | .. |
---|
718 | 746 | struct drm_amdgpu_gem_create_in info; |
---|
719 | 747 | void __user *out = u64_to_user_ptr(args->value); |
---|
720 | 748 | |
---|
721 | | - info.bo_size = robj->gem_base.size; |
---|
| 749 | + info.bo_size = robj->tbo.base.size; |
---|
722 | 750 | info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; |
---|
723 | 751 | info.domains = robj->preferred_domains; |
---|
724 | 752 | info.domain_flags = robj->flags; |
---|
.. | .. |
---|
738 | 766 | amdgpu_bo_unreserve(robj); |
---|
739 | 767 | break; |
---|
740 | 768 | } |
---|
| 769 | + for (base = robj->vm_bo; base; base = base->next) |
---|
| 770 | + if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev), |
---|
| 771 | + amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) { |
---|
| 772 | + r = -EINVAL; |
---|
| 773 | + amdgpu_bo_unreserve(robj); |
---|
| 774 | + goto out; |
---|
| 775 | + } |
---|
| 776 | + |
---|
| 777 | + |
---|
741 | 778 | robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | |
---|
742 | 779 | AMDGPU_GEM_DOMAIN_GTT | |
---|
743 | 780 | AMDGPU_GEM_DOMAIN_CPU); |
---|
.. | .. |
---|
756 | 793 | } |
---|
757 | 794 | |
---|
758 | 795 | out: |
---|
759 | | - drm_gem_object_put_unlocked(gobj); |
---|
| 796 | + drm_gem_object_put(gobj); |
---|
760 | 797 | return r; |
---|
761 | 798 | } |
---|
762 | 799 | |
---|
.. | .. |
---|
764 | 801 | struct drm_device *dev, |
---|
765 | 802 | struct drm_mode_create_dumb *args) |
---|
766 | 803 | { |
---|
767 | | - struct amdgpu_device *adev = dev->dev_private; |
---|
| 804 | + struct amdgpu_device *adev = drm_to_adev(dev); |
---|
768 | 805 | struct drm_gem_object *gobj; |
---|
769 | 806 | uint32_t handle; |
---|
| 807 | + u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | |
---|
| 808 | + AMDGPU_GEM_CREATE_CPU_GTT_USWC; |
---|
770 | 809 | u32 domain; |
---|
771 | 810 | int r; |
---|
| 811 | + |
---|
| 812 | + /* |
---|
| 813 | + * The buffer returned from this function should be cleared, but |
---|
| 814 | + * it can only be done if the ring is enabled or we'll fail to |
---|
| 815 | + * create the buffer. |
---|
| 816 | + */ |
---|
| 817 | + if (adev->mman.buffer_funcs_enabled) |
---|
| 818 | + flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED; |
---|
772 | 819 | |
---|
773 | 820 | args->pitch = amdgpu_align_pitch(adev, args->width, |
---|
774 | 821 | DIV_ROUND_UP(args->bpp, 8), 0); |
---|
775 | 822 | args->size = (u64)args->pitch * args->height; |
---|
776 | 823 | args->size = ALIGN(args->size, PAGE_SIZE); |
---|
777 | 824 | domain = amdgpu_bo_get_preferred_pin_domain(adev, |
---|
778 | | - amdgpu_display_supported_domains(adev)); |
---|
779 | | - r = amdgpu_gem_object_create(adev, args->size, 0, domain, |
---|
780 | | - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
---|
| 825 | + amdgpu_display_supported_domains(adev, flags)); |
---|
| 826 | + r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags, |
---|
781 | 827 | ttm_bo_type_device, NULL, &gobj); |
---|
782 | 828 | if (r) |
---|
783 | 829 | return -ENOMEM; |
---|
784 | 830 | |
---|
785 | 831 | r = drm_gem_handle_create(file_priv, gobj, &handle); |
---|
786 | 832 | /* drop reference from allocate - handle holds it now */ |
---|
787 | | - drm_gem_object_put_unlocked(gobj); |
---|
| 833 | + drm_gem_object_put(gobj); |
---|
788 | 834 | if (r) { |
---|
789 | 835 | return r; |
---|
790 | 836 | } |
---|
.. | .. |
---|
831 | 877 | if (pin_count) |
---|
832 | 878 | seq_printf(m, " pin count %d", pin_count); |
---|
833 | 879 | |
---|
834 | | - dma_buf = READ_ONCE(bo->gem_base.dma_buf); |
---|
835 | | - attachment = READ_ONCE(bo->gem_base.import_attach); |
---|
| 880 | + dma_buf = READ_ONCE(bo->tbo.base.dma_buf); |
---|
| 881 | + attachment = READ_ONCE(bo->tbo.base.import_attach); |
---|
836 | 882 | |
---|
837 | 883 | if (attachment) |
---|
838 | | - seq_printf(m, " imported from %p", dma_buf); |
---|
| 884 | + seq_printf(m, " imported from %p%s", dma_buf, |
---|
| 885 | + attachment->peer2peer ? " P2P" : ""); |
---|
839 | 886 | else if (dma_buf) |
---|
840 | 887 | seq_printf(m, " exported as %p", dma_buf); |
---|
841 | 888 | |
---|
.. | .. |
---|
896 | 943 | int amdgpu_debugfs_gem_init(struct amdgpu_device *adev) |
---|
897 | 944 | { |
---|
898 | 945 | #if defined(CONFIG_DEBUG_FS) |
---|
899 | | - return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1); |
---|
| 946 | + return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, |
---|
| 947 | + ARRAY_SIZE(amdgpu_debugfs_gem_list)); |
---|
900 | 948 | #endif |
---|
901 | 949 | return 0; |
---|
902 | 950 | } |
---|