.. | .. |
---|
26 | 26 | |
---|
27 | 27 | #include <drm/spsc_queue.h> |
---|
28 | 28 | #include <linux/dma-fence.h> |
---|
| 29 | +#include <linux/completion.h> |
---|
29 | 30 | |
---|
30 | 31 | #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) |
---|
31 | 32 | |
---|
32 | 33 | struct drm_gpu_scheduler; |
---|
33 | 34 | struct drm_sched_rq; |
---|
34 | 35 | |
---|
| 36 | +/* These are often used as an (initial) index |
---|
| 37 | + * to an array, and as such should start at 0. |
---|
| 38 | + */ |
---|
35 | 39 | enum drm_sched_priority { |
---|
36 | 40 | DRM_SCHED_PRIORITY_MIN, |
---|
37 | | - DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN, |
---|
38 | 41 | DRM_SCHED_PRIORITY_NORMAL, |
---|
39 | | - DRM_SCHED_PRIORITY_HIGH_SW, |
---|
40 | | - DRM_SCHED_PRIORITY_HIGH_HW, |
---|
| 42 | + DRM_SCHED_PRIORITY_HIGH, |
---|
41 | 43 | DRM_SCHED_PRIORITY_KERNEL, |
---|
42 | | - DRM_SCHED_PRIORITY_MAX, |
---|
43 | | - DRM_SCHED_PRIORITY_INVALID = -1, |
---|
| 44 | + |
---|
| 45 | + DRM_SCHED_PRIORITY_COUNT, |
---|
44 | 46 | DRM_SCHED_PRIORITY_UNSET = -2 |
---|
45 | 47 | }; |
---|
46 | 48 | |
---|
.. | .. |
---|
50 | 52 | * |
---|
51 | 53 | * @list: used to append this struct to the list of entities in the |
---|
52 | 54 | * runqueue. |
---|
53 | | - * @rq: runqueue to which this entity belongs. |
---|
| 55 | + * @rq: runqueue on which this entity is currently scheduled. |
---|
| 56 | + * @sched_list: A list of schedulers (drm_gpu_schedulers). |
---|
| 57 | + * Jobs from this entity can be scheduled on any scheduler |
---|
| 58 | + * on this list. |
---|
| 59 | + * @num_sched_list: number of drm_gpu_schedulers in the sched_list. |
---|
| 60 | + * @priority: priority of the entity |
---|
54 | 61 | * @rq_lock: lock to modify the runqueue to which this entity belongs. |
---|
55 | 62 | * @job_queue: the list of jobs of this entity. |
---|
56 | 63 | * @fence_seq: a linearly increasing seqno incremented with each |
---|
.. | .. |
---|
67 | 74 | * @fini_status: contains the exit status in case the process was signalled. |
---|
68 | 75 | * @last_scheduled: points to the finished fence of the last scheduled job. |
---|
69 | 76 | * @last_user: last group leader pushing a job into the entity. |
---|
| 77 | + * @stopped: Marks the enity as removed from rq and destined for termination. |
---|
| 78 | + * @entity_idle: Signals when enityt is not in use |
---|
70 | 79 | * |
---|
71 | 80 | * Entities will emit jobs in order to their corresponding hardware |
---|
72 | 81 | * ring, and the scheduler will alternate between entities based on |
---|
.. | .. |
---|
75 | 84 | struct drm_sched_entity { |
---|
76 | 85 | struct list_head list; |
---|
77 | 86 | struct drm_sched_rq *rq; |
---|
| 87 | + struct drm_gpu_scheduler **sched_list; |
---|
| 88 | + unsigned int num_sched_list; |
---|
| 89 | + enum drm_sched_priority priority; |
---|
78 | 90 | spinlock_t rq_lock; |
---|
79 | 91 | |
---|
80 | 92 | struct spsc_queue job_queue; |
---|
.. | .. |
---|
87 | 99 | atomic_t *guilty; |
---|
88 | 100 | struct dma_fence *last_scheduled; |
---|
89 | 101 | struct task_struct *last_user; |
---|
| 102 | + bool stopped; |
---|
| 103 | + struct completion entity_idle; |
---|
90 | 104 | }; |
---|
91 | 105 | |
---|
92 | 106 | /** |
---|
.. | .. |
---|
131 | 145 | struct dma_fence finished; |
---|
132 | 146 | |
---|
133 | 147 | /** |
---|
134 | | - * @cb: the callback for the parent fence below. |
---|
135 | | - */ |
---|
136 | | - struct dma_fence_cb cb; |
---|
137 | | - /** |
---|
138 | 148 | * @parent: the fence returned by &drm_sched_backend_ops.run_job |
---|
139 | 149 | * when scheduling the job on hardware. We signal the |
---|
140 | 150 | * &drm_sched_fence.finished fence once parent is signalled. |
---|
.. | .. |
---|
164 | 174 | * @sched: the scheduler instance on which this job is scheduled. |
---|
165 | 175 | * @s_fence: contains the fences for the scheduling of job. |
---|
166 | 176 | * @finish_cb: the callback for the finished fence. |
---|
167 | | - * @finish_work: schedules the function @drm_sched_job_finish once the job has |
---|
168 | | - * finished to remove the job from the |
---|
169 | | - * @drm_gpu_scheduler.ring_mirror_list. |
---|
170 | 177 | * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list. |
---|
171 | | - * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the timeout |
---|
172 | | - * interval is over. |
---|
173 | 178 | * @id: a unique id assigned to each job scheduled on the scheduler. |
---|
174 | 179 | * @karma: increment on every hang caused by this job. If this exceeds the hang |
---|
175 | 180 | * limit of the scheduler then the job is marked guilty and will not |
---|
176 | 181 | * be scheduled further. |
---|
177 | 182 | * @s_priority: the priority of the job. |
---|
178 | 183 | * @entity: the entity to which this job belongs. |
---|
| 184 | + * @cb: the callback for the parent fence in s_fence. |
---|
179 | 185 | * |
---|
180 | 186 | * A job is created by the driver using drm_sched_job_init(), and |
---|
181 | 187 | * should call drm_sched_entity_push_job() once it wants the scheduler |
---|
.. | .. |
---|
186 | 192 | struct drm_gpu_scheduler *sched; |
---|
187 | 193 | struct drm_sched_fence *s_fence; |
---|
188 | 194 | struct dma_fence_cb finish_cb; |
---|
189 | | - struct work_struct finish_work; |
---|
190 | 195 | struct list_head node; |
---|
191 | | - struct delayed_work work_tdr; |
---|
192 | 196 | uint64_t id; |
---|
193 | 197 | atomic_t karma; |
---|
194 | 198 | enum drm_sched_priority s_priority; |
---|
195 | 199 | struct drm_sched_entity *entity; |
---|
| 200 | + struct dma_fence_cb cb; |
---|
196 | 201 | }; |
---|
197 | 202 | |
---|
198 | 203 | static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, |
---|
.. | .. |
---|
252 | 257 | * finished. |
---|
253 | 258 | * @hw_rq_count: the number of jobs currently in the hardware queue. |
---|
254 | 259 | * @job_id_count: used to assign unique id to the each job. |
---|
| 260 | + * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the |
---|
| 261 | + * timeout interval is over. |
---|
255 | 262 | * @thread: the kthread on which the scheduler which run. |
---|
256 | 263 | * @ring_mirror_list: the list of jobs which are currently in the job queue. |
---|
257 | 264 | * @job_list_lock: lock to protect the ring_mirror_list. |
---|
258 | 265 | * @hang_limit: once the hangs by a job crosses this limit then it is marked |
---|
259 | 266 | * guilty and it will be considered for scheduling further. |
---|
| 267 | + * @score: score to help loadbalancer pick a idle sched |
---|
| 268 | + * @ready: marks if the underlying HW is ready to work |
---|
| 269 | + * @free_guilty: A hit to time out handler to free the guilty job. |
---|
260 | 270 | * |
---|
261 | 271 | * One scheduler is implemented for each hardware ring. |
---|
262 | 272 | */ |
---|
.. | .. |
---|
265 | 275 | uint32_t hw_submission_limit; |
---|
266 | 276 | long timeout; |
---|
267 | 277 | const char *name; |
---|
268 | | - struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX]; |
---|
| 278 | + struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT]; |
---|
269 | 279 | wait_queue_head_t wake_up_worker; |
---|
270 | 280 | wait_queue_head_t job_scheduled; |
---|
271 | 281 | atomic_t hw_rq_count; |
---|
272 | 282 | atomic64_t job_id_count; |
---|
| 283 | + struct delayed_work work_tdr; |
---|
273 | 284 | struct task_struct *thread; |
---|
274 | 285 | struct list_head ring_mirror_list; |
---|
275 | 286 | spinlock_t job_list_lock; |
---|
276 | 287 | int hang_limit; |
---|
| 288 | + atomic_t score; |
---|
| 289 | + bool ready; |
---|
| 290 | + bool free_guilty; |
---|
277 | 291 | }; |
---|
278 | 292 | |
---|
279 | 293 | int drm_sched_init(struct drm_gpu_scheduler *sched, |
---|
280 | 294 | const struct drm_sched_backend_ops *ops, |
---|
281 | 295 | uint32_t hw_submission, unsigned hang_limit, long timeout, |
---|
282 | 296 | const char *name); |
---|
| 297 | + |
---|
283 | 298 | void drm_sched_fini(struct drm_gpu_scheduler *sched); |
---|
| 299 | +int drm_sched_job_init(struct drm_sched_job *job, |
---|
| 300 | + struct drm_sched_entity *entity, |
---|
| 301 | + void *owner); |
---|
| 302 | +void drm_sched_entity_modify_sched(struct drm_sched_entity *entity, |
---|
| 303 | + struct drm_gpu_scheduler **sched_list, |
---|
| 304 | + unsigned int num_sched_list); |
---|
| 305 | + |
---|
| 306 | +void drm_sched_job_cleanup(struct drm_sched_job *job); |
---|
| 307 | +void drm_sched_wakeup(struct drm_gpu_scheduler *sched); |
---|
| 308 | +void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); |
---|
| 309 | +void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); |
---|
| 310 | +void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); |
---|
| 311 | +void drm_sched_increase_karma(struct drm_sched_job *bad); |
---|
| 312 | +bool drm_sched_dependency_optimized(struct dma_fence* fence, |
---|
| 313 | + struct drm_sched_entity *entity); |
---|
| 314 | +void drm_sched_fault(struct drm_gpu_scheduler *sched); |
---|
| 315 | +void drm_sched_job_kickout(struct drm_sched_job *s_job); |
---|
| 316 | + |
---|
| 317 | +void drm_sched_rq_add_entity(struct drm_sched_rq *rq, |
---|
| 318 | + struct drm_sched_entity *entity); |
---|
| 319 | +void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, |
---|
| 320 | + struct drm_sched_entity *entity); |
---|
284 | 321 | |
---|
285 | 322 | int drm_sched_entity_init(struct drm_sched_entity *entity, |
---|
286 | | - struct drm_sched_rq **rq_list, |
---|
287 | | - unsigned int num_rq_list, |
---|
| 323 | + enum drm_sched_priority priority, |
---|
| 324 | + struct drm_gpu_scheduler **sched_list, |
---|
| 325 | + unsigned int num_sched_list, |
---|
288 | 326 | atomic_t *guilty); |
---|
289 | 327 | long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout); |
---|
290 | 328 | void drm_sched_entity_fini(struct drm_sched_entity *entity); |
---|
291 | 329 | void drm_sched_entity_destroy(struct drm_sched_entity *entity); |
---|
| 330 | +void drm_sched_entity_select_rq(struct drm_sched_entity *entity); |
---|
| 331 | +struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity); |
---|
292 | 332 | void drm_sched_entity_push_job(struct drm_sched_job *sched_job, |
---|
293 | 333 | struct drm_sched_entity *entity); |
---|
294 | | -void drm_sched_entity_set_rq(struct drm_sched_entity *entity, |
---|
295 | | - struct drm_sched_rq *rq); |
---|
| 334 | +void drm_sched_entity_set_priority(struct drm_sched_entity *entity, |
---|
| 335 | + enum drm_sched_priority priority); |
---|
| 336 | +bool drm_sched_entity_is_ready(struct drm_sched_entity *entity); |
---|
296 | 337 | |
---|
297 | 338 | struct drm_sched_fence *drm_sched_fence_create( |
---|
298 | 339 | struct drm_sched_entity *s_entity, void *owner); |
---|
299 | 340 | void drm_sched_fence_scheduled(struct drm_sched_fence *fence); |
---|
300 | 341 | void drm_sched_fence_finished(struct drm_sched_fence *fence); |
---|
301 | | -int drm_sched_job_init(struct drm_sched_job *job, |
---|
302 | | - struct drm_sched_entity *entity, |
---|
303 | | - void *owner); |
---|
304 | | -void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, |
---|
305 | | - struct drm_sched_job *job); |
---|
306 | | -void drm_sched_job_recovery(struct drm_gpu_scheduler *sched); |
---|
307 | | -bool drm_sched_dependency_optimized(struct dma_fence* fence, |
---|
308 | | - struct drm_sched_entity *entity); |
---|
309 | | -void drm_sched_job_kickout(struct drm_sched_job *s_job); |
---|
| 342 | + |
---|
| 343 | +unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched); |
---|
| 344 | +void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, |
---|
| 345 | + unsigned long remaining); |
---|
| 346 | +struct drm_gpu_scheduler * |
---|
| 347 | +drm_sched_pick_best(struct drm_gpu_scheduler **sched_list, |
---|
| 348 | + unsigned int num_sched_list); |
---|
310 | 349 | |
---|
311 | 350 | #endif |
---|