hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
....@@ -24,7 +24,7 @@
2424 #include <linux/kthread.h>
2525 #include <linux/wait.h>
2626 #include <linux/sched.h>
27
-#include <drm/drmP.h>
27
+
2828 #include "amdgpu.h"
2929 #include "amdgpu_trace.h"
3030
....@@ -32,12 +32,32 @@
3232 {
3333 struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
3434 struct amdgpu_job *job = to_amdgpu_job(s_job);
35
+ struct amdgpu_task_info ti;
36
+ struct amdgpu_device *adev = ring->adev;
3537
38
+ memset(&ti, 0, sizeof(struct amdgpu_task_info));
39
+
40
+ if (amdgpu_gpu_recovery &&
41
+ amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
42
+ DRM_ERROR("ring %s timeout, but soft recovered\n",
43
+ s_job->sched->name);
44
+ return;
45
+ }
46
+
47
+ amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
3648 DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
3749 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
3850 ring->fence_drv.sync_seq);
51
+ DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
52
+ ti.process_name, ti.tgid, ti.task_name, ti.pid);
3953
40
- amdgpu_device_gpu_recover(ring->adev, job, false);
54
+ if (amdgpu_device_should_recover_gpu(ring->adev)) {
55
+ amdgpu_device_gpu_recover(ring->adev, job);
56
+ } else {
57
+ drm_sched_suspend_timeout(&ring->sched);
58
+ if (amdgpu_sriov_vf(adev))
59
+ adev->virt.tdr_debug = true;
60
+ }
4161 }
4262
4363 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
....@@ -72,7 +92,8 @@
7292 }
7393
7494 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
75
- struct amdgpu_job **job)
95
+ enum amdgpu_ib_pool_type pool_type,
96
+ struct amdgpu_job **job)
7697 {
7798 int r;
7899
....@@ -80,11 +101,9 @@
80101 if (r)
81102 return r;
82103
83
- r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
104
+ r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
84105 if (r)
85106 kfree(*job);
86
- else
87
- (*job)->vm_pd_addr = adev->gart.table_addr;
88107
89108 return r;
90109 }
....@@ -104,10 +123,10 @@
104123
105124 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
106125 {
107
- struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
108126 struct amdgpu_job *job = to_amdgpu_job(s_job);
109127
110
- amdgpu_ring_priority_put(ring, s_job->s_priority);
128
+ drm_sched_job_cleanup(s_job);
129
+
111130 dma_fence_put(job->fence);
112131 amdgpu_sync_free(&job->sync);
113132 amdgpu_sync_free(&job->sched_sync);
....@@ -127,8 +146,6 @@
127146 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
128147 void *owner, struct dma_fence **f)
129148 {
130
- enum drm_sched_priority priority;
131
- struct amdgpu_ring *ring;
132149 int r;
133150
134151 if (!f)
....@@ -138,14 +155,9 @@
138155 if (r)
139156 return r;
140157
141
- job->owner = owner;
142158 *f = dma_fence_get(&job->base.s_fence->finished);
143159 amdgpu_job_free_resources(job);
144
- priority = job->base.s_priority;
145160 drm_sched_entity_push_job(&job->base, entity);
146
-
147
- ring = to_amdgpu_ring(entity->rq->sched);
148
- amdgpu_ring_priority_get(ring, priority);
149161
150162 return 0;
151163 }
....@@ -172,17 +184,13 @@
172184 struct amdgpu_job *job = to_amdgpu_job(sched_job);
173185 struct amdgpu_vm *vm = job->vm;
174186 struct dma_fence *fence;
175
- bool explicit = false;
176187 int r;
177188
178
- fence = amdgpu_sync_get_fence(&job->sync, &explicit);
179
- if (fence && explicit) {
180
- if (drm_sched_dependency_optimized(fence, s_entity)) {
181
- r = amdgpu_sync_fence(ring->adev, &job->sched_sync,
182
- fence, false);
183
- if (r)
184
- DRM_ERROR("Error adding fence (%d)\n", r);
185
- }
189
+ fence = amdgpu_sync_get_fence(&job->sync);
190
+ if (fence && drm_sched_dependency_optimized(fence, s_entity)) {
191
+ r = amdgpu_sync_fence(&job->sched_sync, fence);
192
+ if (r)
193
+ DRM_ERROR("Error adding fence (%d)\n", r);
186194 }
187195
188196 while (fence == NULL && vm && !job->vmid) {
....@@ -192,7 +200,7 @@
192200 if (r)
193201 DRM_ERROR("Error getting VM ID (%d)\n", r);
194202
195
- fence = amdgpu_sync_get_fence(&job->sync, NULL);
203
+ fence = amdgpu_sync_get_fence(&job->sync);
196204 }
197205
198206 return fence;
....@@ -233,6 +241,44 @@
233241 return fence;
234242 }
235243
244
+#define to_drm_sched_job(sched_job) \
245
+ container_of((sched_job), struct drm_sched_job, queue_node)
246
+
247
+void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
248
+{
249
+ struct drm_sched_job *s_job;
250
+ struct drm_sched_entity *s_entity = NULL;
251
+ int i;
252
+
253
+ /* Signal all jobs not yet scheduled */
254
+ for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
255
+ struct drm_sched_rq *rq = &sched->sched_rq[i];
256
+
257
+ if (!rq)
258
+ continue;
259
+
260
+ spin_lock(&rq->lock);
261
+ list_for_each_entry(s_entity, &rq->entities, list) {
262
+ while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
263
+ struct drm_sched_fence *s_fence = s_job->s_fence;
264
+
265
+ dma_fence_signal(&s_fence->scheduled);
266
+ dma_fence_set_error(&s_fence->finished, -EHWPOISON);
267
+ dma_fence_signal(&s_fence->finished);
268
+ }
269
+ }
270
+ spin_unlock(&rq->lock);
271
+ }
272
+
273
+ /* Signal all jobs already scheduled to HW */
274
+ list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
275
+ struct drm_sched_fence *s_fence = s_job->s_fence;
276
+
277
+ dma_fence_set_error(&s_fence->finished, -EHWPOISON);
278
+ dma_fence_signal(&s_fence->finished);
279
+ }
280
+}
281
+
236282 const struct drm_sched_backend_ops amdgpu_sched_ops = {
237283 .dependency = amdgpu_job_dependency,
238284 .run_job = amdgpu_job_run,