hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
....@@ -24,7 +24,7 @@
2424
2525 #include <linux/idr.h>
2626 #include <linux/dma-fence-array.h>
27
-#include <drm/drmP.h>
27
+
2828
2929 #include "amdgpu.h"
3030 #include "amdgpu_trace.h"
....@@ -43,7 +43,7 @@
4343 /* Helper to free pasid from a fence callback */
4444 struct amdgpu_pasid_cb {
4545 struct dma_fence_cb cb;
46
- unsigned int pasid;
46
+ u32 pasid;
4747 };
4848
4949 /**
....@@ -79,7 +79,7 @@
7979 * amdgpu_pasid_free - Free a PASID
8080 * @pasid: PASID to free
8181 */
82
-void amdgpu_pasid_free(unsigned int pasid)
82
+void amdgpu_pasid_free(u32 pasid)
8383 {
8484 trace_amdgpu_pasid_freed(pasid);
8585 ida_simple_remove(&amdgpu_pasid_ida, pasid);
....@@ -104,15 +104,15 @@
104104 *
105105 * Free the pasid only after all the fences in resv are signaled.
106106 */
107
-void amdgpu_pasid_free_delayed(struct reservation_object *resv,
108
- unsigned int pasid)
107
+void amdgpu_pasid_free_delayed(struct dma_resv *resv,
108
+ u32 pasid)
109109 {
110110 struct dma_fence *fence, **fences;
111111 struct amdgpu_pasid_cb *cb;
112112 unsigned count;
113113 int r;
114114
115
- r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences);
115
+ r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
116116 if (r)
117117 goto fallback;
118118
....@@ -156,7 +156,7 @@
156156 /* Not enough memory for the delayed delete, as last resort
157157 * block for all the fences to complete.
158158 */
159
- reservation_object_wait_timeout_rcu(resv, true, false,
159
+ dma_resv_wait_timeout_rcu(resv, true, false,
160160 MAX_SCHEDULE_TIMEOUT);
161161 amdgpu_pasid_free(pasid);
162162 }
....@@ -206,7 +206,7 @@
206206 int r;
207207
208208 if (ring->vmid_wait && !dma_fence_is_signaled(ring->vmid_wait))
209
- return amdgpu_sync_fence(adev, sync, ring->vmid_wait, false);
209
+ return amdgpu_sync_fence(sync, ring->vmid_wait);
210210
211211 fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
212212 if (!fences)
....@@ -215,7 +215,11 @@
215215 /* Check if we have an idle VMID */
216216 i = 0;
217217 list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
218
- fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
218
+ /* Don't use per engine and per process VMID at the same time */
219
+ struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
220
+ NULL : ring;
221
+
222
+ fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
219223 if (!fences[i])
220224 break;
221225 ++i;
....@@ -241,7 +245,7 @@
241245 return -ENOMEM;
242246 }
243247
244
- r = amdgpu_sync_fence(adev, sync, &array->base, false);
248
+ r = amdgpu_sync_fence(sync, &array->base);
245249 dma_fence_put(ring->vmid_wait);
246250 ring->vmid_wait = &array->base;
247251 return r;
....@@ -280,21 +284,25 @@
280284 if (updates && (*id)->flushed_updates &&
281285 updates->context == (*id)->flushed_updates->context &&
282286 !dma_fence_is_later(updates, (*id)->flushed_updates))
283
- updates = NULL;
287
+ updates = NULL;
284288
285
- if ((*id)->owner != vm->entity.fence_context ||
289
+ if ((*id)->owner != vm->immediate.fence_context ||
286290 job->vm_pd_addr != (*id)->pd_gpu_addr ||
287291 updates || !(*id)->last_flush ||
288292 ((*id)->last_flush->context != fence_context &&
289293 !dma_fence_is_signaled((*id)->last_flush))) {
290294 struct dma_fence *tmp;
291295
296
+ /* Don't use per engine and per process VMID at the same time */
297
+ if (adev->vm_manager.concurrent_flush)
298
+ ring = NULL;
299
+
292300 /* to prevent one context starved by another context */
293301 (*id)->pd_gpu_addr = 0;
294302 tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
295303 if (tmp) {
296304 *id = NULL;
297
- r = amdgpu_sync_fence(adev, sync, tmp, false);
305
+ r = amdgpu_sync_fence(sync, tmp);
298306 return r;
299307 }
300308 needs_flush = true;
....@@ -303,7 +311,7 @@
303311 /* Good we can use this VMID. Remember this submission as
304312 * user of the VMID.
305313 */
306
- r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
314
+ r = amdgpu_sync_fence(&(*id)->active, fence);
307315 if (r)
308316 return r;
309317
....@@ -349,7 +357,7 @@
349357 struct dma_fence *flushed;
350358
351359 /* Check all the prerequisites to using this VMID */
352
- if ((*id)->owner != vm->entity.fence_context)
360
+ if ((*id)->owner != vm->immediate.fence_context)
353361 continue;
354362
355363 if ((*id)->pd_gpu_addr != job->vm_pd_addr)
....@@ -364,14 +372,13 @@
364372 if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
365373 needs_flush = true;
366374
367
- /* Concurrent flushes are only possible starting with Vega10 */
368
- if (adev->asic_type < CHIP_VEGA10 && needs_flush)
375
+ if (needs_flush && !adev->vm_manager.concurrent_flush)
369376 continue;
370377
371378 /* Good, we can use this VMID. Remember this submission as
372379 * user of the VMID.
373380 */
374
- r = amdgpu_sync_fence(ring->adev, &(*id)->active, fence, false);
381
+ r = amdgpu_sync_fence(&(*id)->active, fence);
375382 if (r)
376383 return r;
377384
....@@ -431,8 +438,7 @@
431438 id = idle;
432439
433440 /* Remember this submission as user of the VMID */
434
- r = amdgpu_sync_fence(ring->adev, &id->active,
435
- fence, false);
441
+ r = amdgpu_sync_fence(&id->active, fence);
436442 if (r)
437443 goto error;
438444
....@@ -445,7 +451,7 @@
445451 }
446452
447453 id->pd_gpu_addr = job->vm_pd_addr;
448
- id->owner = vm->entity.fence_context;
454
+ id->owner = vm->immediate.fence_context;
449455
450456 if (job->vm_needs_flush) {
451457 dma_fence_put(id->last_flush);
....@@ -571,6 +577,9 @@
571577 INIT_LIST_HEAD(&id_mgr->ids_lru);
572578 atomic_set(&id_mgr->reserved_vmid_num, 0);
573579
580
+ /* manage only VMIDs not used by KFD */
581
+ id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
582
+
574583 /* skip over VMID 0, since it is the system VM */
575584 for (j = 1; j < id_mgr->num_ids; ++j) {
576585 amdgpu_vmid_reset(adev, i, j);