| .. | .. |
|---|
| 24 | 24 | #include <linux/kthread.h> |
|---|
| 25 | 25 | #include <linux/wait.h> |
|---|
| 26 | 26 | #include <linux/sched.h> |
|---|
| 27 | | -#include <drm/drmP.h> |
|---|
| 27 | + |
|---|
| 28 | 28 | #include "amdgpu.h" |
|---|
| 29 | 29 | #include "amdgpu_trace.h" |
|---|
| 30 | 30 | |
|---|
| .. | .. |
|---|
| 32 | 32 | { |
|---|
| 33 | 33 | struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); |
|---|
| 34 | 34 | struct amdgpu_job *job = to_amdgpu_job(s_job); |
|---|
| 35 | + struct amdgpu_task_info ti; |
|---|
| 36 | + struct amdgpu_device *adev = ring->adev; |
|---|
| 35 | 37 | |
|---|
| 38 | + memset(&ti, 0, sizeof(struct amdgpu_task_info)); |
|---|
| 39 | + |
|---|
| 40 | + if (amdgpu_gpu_recovery && |
|---|
| 41 | + amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { |
|---|
| 42 | + DRM_ERROR("ring %s timeout, but soft recovered\n", |
|---|
| 43 | + s_job->sched->name); |
|---|
| 44 | + return; |
|---|
| 45 | + } |
|---|
| 46 | + |
|---|
| 47 | + amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); |
|---|
| 36 | 48 | DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n", |
|---|
| 37 | 49 | job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), |
|---|
| 38 | 50 | ring->fence_drv.sync_seq); |
|---|
| 51 | + DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n", |
|---|
| 52 | + ti.process_name, ti.tgid, ti.task_name, ti.pid); |
|---|
| 39 | 53 | |
|---|
| 40 | | - amdgpu_device_gpu_recover(ring->adev, job, false); |
|---|
| 54 | + if (amdgpu_device_should_recover_gpu(ring->adev)) { |
|---|
| 55 | + amdgpu_device_gpu_recover(ring->adev, job); |
|---|
| 56 | + } else { |
|---|
| 57 | + drm_sched_suspend_timeout(&ring->sched); |
|---|
| 58 | + if (amdgpu_sriov_vf(adev)) |
|---|
| 59 | + adev->virt.tdr_debug = true; |
|---|
| 60 | + } |
|---|
| 41 | 61 | } |
|---|
| 42 | 62 | |
|---|
| 43 | 63 | int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, |
|---|
| .. | .. |
|---|
| 72 | 92 | } |
|---|
| 73 | 93 | |
|---|
| 74 | 94 | int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, |
|---|
| 75 | | - struct amdgpu_job **job) |
|---|
| 95 | + enum amdgpu_ib_pool_type pool_type, |
|---|
| 96 | + struct amdgpu_job **job) |
|---|
| 76 | 97 | { |
|---|
| 77 | 98 | int r; |
|---|
| 78 | 99 | |
|---|
| .. | .. |
|---|
| 80 | 101 | if (r) |
|---|
| 81 | 102 | return r; |
|---|
| 82 | 103 | |
|---|
| 83 | | - r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]); |
|---|
| 104 | + r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); |
|---|
| 84 | 105 | if (r) |
|---|
| 85 | 106 | kfree(*job); |
|---|
| 86 | | - else |
|---|
| 87 | | - (*job)->vm_pd_addr = adev->gart.table_addr; |
|---|
| 88 | 107 | |
|---|
| 89 | 108 | return r; |
|---|
| 90 | 109 | } |
|---|
| .. | .. |
|---|
| 104 | 123 | |
|---|
| 105 | 124 | static void amdgpu_job_free_cb(struct drm_sched_job *s_job) |
|---|
| 106 | 125 | { |
|---|
| 107 | | - struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); |
|---|
| 108 | 126 | struct amdgpu_job *job = to_amdgpu_job(s_job); |
|---|
| 109 | 127 | |
|---|
| 110 | | - amdgpu_ring_priority_put(ring, s_job->s_priority); |
|---|
| 128 | + drm_sched_job_cleanup(s_job); |
|---|
| 129 | + |
|---|
| 111 | 130 | dma_fence_put(job->fence); |
|---|
| 112 | 131 | amdgpu_sync_free(&job->sync); |
|---|
| 113 | 132 | amdgpu_sync_free(&job->sched_sync); |
|---|
| .. | .. |
|---|
| 127 | 146 | int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, |
|---|
| 128 | 147 | void *owner, struct dma_fence **f) |
|---|
| 129 | 148 | { |
|---|
| 130 | | - enum drm_sched_priority priority; |
|---|
| 131 | | - struct amdgpu_ring *ring; |
|---|
| 132 | 149 | int r; |
|---|
| 133 | 150 | |
|---|
| 134 | 151 | if (!f) |
|---|
| .. | .. |
|---|
| 138 | 155 | if (r) |
|---|
| 139 | 156 | return r; |
|---|
| 140 | 157 | |
|---|
| 141 | | - job->owner = owner; |
|---|
| 142 | 158 | *f = dma_fence_get(&job->base.s_fence->finished); |
|---|
| 143 | 159 | amdgpu_job_free_resources(job); |
|---|
| 144 | | - priority = job->base.s_priority; |
|---|
| 145 | 160 | drm_sched_entity_push_job(&job->base, entity); |
|---|
| 146 | | - |
|---|
| 147 | | - ring = to_amdgpu_ring(entity->rq->sched); |
|---|
| 148 | | - amdgpu_ring_priority_get(ring, priority); |
|---|
| 149 | 161 | |
|---|
| 150 | 162 | return 0; |
|---|
| 151 | 163 | } |
|---|
| .. | .. |
|---|
| 172 | 184 | struct amdgpu_job *job = to_amdgpu_job(sched_job); |
|---|
| 173 | 185 | struct amdgpu_vm *vm = job->vm; |
|---|
| 174 | 186 | struct dma_fence *fence; |
|---|
| 175 | | - bool explicit = false; |
|---|
| 176 | 187 | int r; |
|---|
| 177 | 188 | |
|---|
| 178 | | - fence = amdgpu_sync_get_fence(&job->sync, &explicit); |
|---|
| 179 | | - if (fence && explicit) { |
|---|
| 180 | | - if (drm_sched_dependency_optimized(fence, s_entity)) { |
|---|
| 181 | | - r = amdgpu_sync_fence(ring->adev, &job->sched_sync, |
|---|
| 182 | | - fence, false); |
|---|
| 183 | | - if (r) |
|---|
| 184 | | - DRM_ERROR("Error adding fence (%d)\n", r); |
|---|
| 185 | | - } |
|---|
| 189 | + fence = amdgpu_sync_get_fence(&job->sync); |
|---|
| 190 | + if (fence && drm_sched_dependency_optimized(fence, s_entity)) { |
|---|
| 191 | + r = amdgpu_sync_fence(&job->sched_sync, fence); |
|---|
| 192 | + if (r) |
|---|
| 193 | + DRM_ERROR("Error adding fence (%d)\n", r); |
|---|
| 186 | 194 | } |
|---|
| 187 | 195 | |
|---|
| 188 | 196 | while (fence == NULL && vm && !job->vmid) { |
|---|
| .. | .. |
|---|
| 192 | 200 | if (r) |
|---|
| 193 | 201 | DRM_ERROR("Error getting VM ID (%d)\n", r); |
|---|
| 194 | 202 | |
|---|
| 195 | | - fence = amdgpu_sync_get_fence(&job->sync, NULL); |
|---|
| 203 | + fence = amdgpu_sync_get_fence(&job->sync); |
|---|
| 196 | 204 | } |
|---|
| 197 | 205 | |
|---|
| 198 | 206 | return fence; |
|---|
| .. | .. |
|---|
| 233 | 241 | return fence; |
|---|
| 234 | 242 | } |
|---|
| 235 | 243 | |
|---|
| 244 | +#define to_drm_sched_job(sched_job) \ |
|---|
| 245 | + container_of((sched_job), struct drm_sched_job, queue_node) |
|---|
| 246 | + |
|---|
| 247 | +void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) |
|---|
| 248 | +{ |
|---|
| 249 | + struct drm_sched_job *s_job; |
|---|
| 250 | + struct drm_sched_entity *s_entity = NULL; |
|---|
| 251 | + int i; |
|---|
| 252 | + |
|---|
| 253 | + /* Signal all jobs not yet scheduled */ |
|---|
| 254 | + for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { |
|---|
| 255 | + struct drm_sched_rq *rq = &sched->sched_rq[i]; |
|---|
| 256 | + |
|---|
| 257 | + if (!rq) |
|---|
| 258 | + continue; |
|---|
| 259 | + |
|---|
| 260 | + spin_lock(&rq->lock); |
|---|
| 261 | + list_for_each_entry(s_entity, &rq->entities, list) { |
|---|
| 262 | + while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) { |
|---|
| 263 | + struct drm_sched_fence *s_fence = s_job->s_fence; |
|---|
| 264 | + |
|---|
| 265 | + dma_fence_signal(&s_fence->scheduled); |
|---|
| 266 | + dma_fence_set_error(&s_fence->finished, -EHWPOISON); |
|---|
| 267 | + dma_fence_signal(&s_fence->finished); |
|---|
| 268 | + } |
|---|
| 269 | + } |
|---|
| 270 | + spin_unlock(&rq->lock); |
|---|
| 271 | + } |
|---|
| 272 | + |
|---|
| 273 | + /* Signal all jobs already scheduled to HW */ |
|---|
| 274 | + list_for_each_entry(s_job, &sched->ring_mirror_list, node) { |
|---|
| 275 | + struct drm_sched_fence *s_fence = s_job->s_fence; |
|---|
| 276 | + |
|---|
| 277 | + dma_fence_set_error(&s_fence->finished, -EHWPOISON); |
|---|
| 278 | + dma_fence_signal(&s_fence->finished); |
|---|
| 279 | + } |
|---|
| 280 | +} |
|---|
| 281 | + |
|---|
| 236 | 282 | const struct drm_sched_backend_ops amdgpu_sched_ops = { |
|---|
| 237 | 283 | .dependency = amdgpu_job_dependency, |
|---|
| 238 | 284 | .run_job = amdgpu_job_run, |
|---|