.. | .. |
---|
3 | 3 | * Copyright (C) 2017 Etnaviv Project |
---|
4 | 4 | */ |
---|
5 | 5 | |
---|
6 | | -#include <linux/kthread.h> |
---|
| 6 | +#include <linux/moduleparam.h> |
---|
7 | 7 | |
---|
8 | 8 | #include "etnaviv_drv.h" |
---|
9 | 9 | #include "etnaviv_dump.h" |
---|
.. | .. |
---|
89 | 89 | u32 dma_addr; |
---|
90 | 90 | int change; |
---|
91 | 91 | |
---|
| 92 | + /* block scheduler */ |
---|
| 93 | + drm_sched_stop(&gpu->sched, sched_job); |
---|
| 94 | + |
---|
92 | 95 | /* |
---|
93 | 96 | * If the GPU managed to complete this jobs fence, the timout is |
---|
94 | 97 | * spurious. Bail out. |
---|
95 | 98 | */ |
---|
96 | 99 | if (dma_fence_is_signaled(submit->out_fence)) |
---|
97 | | - return; |
---|
| 100 | + goto out_no_timeout; |
---|
98 | 101 | |
---|
99 | 102 | /* |
---|
100 | 103 | * If the GPU is still making forward progress on the front-end (which |
---|
.. | .. |
---|
103 | 106 | */ |
---|
104 | 107 | dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); |
---|
105 | 108 | change = dma_addr - gpu->hangcheck_dma_addr; |
---|
106 | | - if (change < 0 || change > 16) { |
---|
| 109 | + if (gpu->completed_fence != gpu->hangcheck_fence || |
---|
| 110 | + change < 0 || change > 16) { |
---|
107 | 111 | gpu->hangcheck_dma_addr = dma_addr; |
---|
108 | | - schedule_delayed_work(&sched_job->work_tdr, |
---|
109 | | - sched_job->sched->timeout); |
---|
110 | | - return; |
---|
| 112 | + gpu->hangcheck_fence = gpu->completed_fence; |
---|
| 113 | + goto out_no_timeout; |
---|
111 | 114 | } |
---|
112 | 115 | |
---|
113 | | - /* block scheduler */ |
---|
114 | | - kthread_park(gpu->sched.thread); |
---|
115 | | - drm_sched_hw_job_reset(&gpu->sched, sched_job); |
---|
| 116 | + if(sched_job) |
---|
| 117 | + drm_sched_increase_karma(sched_job); |
---|
116 | 118 | |
---|
117 | 119 | /* get the GPU back into the init state */ |
---|
118 | | - etnaviv_core_dump(gpu); |
---|
| 120 | + etnaviv_core_dump(submit); |
---|
119 | 121 | etnaviv_gpu_recover_hang(gpu); |
---|
120 | 122 | |
---|
| 123 | + drm_sched_resubmit_jobs(&gpu->sched); |
---|
| 124 | + |
---|
| 125 | +out_no_timeout: |
---|
121 | 126 | /* restart scheduler after GPU is usable again */ |
---|
122 | | - drm_sched_job_recovery(&gpu->sched); |
---|
123 | | - kthread_unpark(gpu->sched.thread); |
---|
| 127 | + drm_sched_start(&gpu->sched, true); |
---|
124 | 128 | } |
---|
125 | 129 | |
---|
126 | 130 | static void etnaviv_sched_free_job(struct drm_sched_job *sched_job) |
---|
127 | 131 | { |
---|
128 | 132 | struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); |
---|
| 133 | + |
---|
| 134 | + drm_sched_job_cleanup(sched_job); |
---|
129 | 135 | |
---|
130 | 136 | etnaviv_submit_put(submit); |
---|
131 | 137 | } |
---|
.. | .. |
---|
150 | 156 | mutex_lock(&submit->gpu->fence_lock); |
---|
151 | 157 | |
---|
152 | 158 | ret = drm_sched_job_init(&submit->sched_job, sched_entity, |
---|
153 | | - submit->cmdbuf.ctx); |
---|
| 159 | + submit->ctx); |
---|
154 | 160 | if (ret) |
---|
155 | 161 | goto out_unlock; |
---|
156 | 162 | |
---|
.. | .. |
---|
159 | 165 | submit->out_fence, 0, |
---|
160 | 166 | INT_MAX, GFP_KERNEL); |
---|
161 | 167 | if (submit->out_fence_id < 0) { |
---|
| 168 | + drm_sched_job_cleanup(&submit->sched_job); |
---|
162 | 169 | ret = -ENOMEM; |
---|
163 | 170 | goto out_unlock; |
---|
164 | 171 | } |
---|