hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/include/drm/gpu_scheduler.h
....@@ -26,21 +26,23 @@
2626
2727 #include <drm/spsc_queue.h>
2828 #include <linux/dma-fence.h>
29
+#include <linux/completion.h>
2930
3031 #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
3132
3233 struct drm_gpu_scheduler;
3334 struct drm_sched_rq;
3435
36
+/* These are often used as an (initial) index
37
+ * to an array, and as such should start at 0.
38
+ */
3539 enum drm_sched_priority {
3640 DRM_SCHED_PRIORITY_MIN,
37
- DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
3841 DRM_SCHED_PRIORITY_NORMAL,
39
- DRM_SCHED_PRIORITY_HIGH_SW,
40
- DRM_SCHED_PRIORITY_HIGH_HW,
42
+ DRM_SCHED_PRIORITY_HIGH,
4143 DRM_SCHED_PRIORITY_KERNEL,
42
- DRM_SCHED_PRIORITY_MAX,
43
- DRM_SCHED_PRIORITY_INVALID = -1,
44
+
45
+ DRM_SCHED_PRIORITY_COUNT,
4446 DRM_SCHED_PRIORITY_UNSET = -2
4547 };
4648
....@@ -50,7 +52,12 @@
5052 *
5153 * @list: used to append this struct to the list of entities in the
5254 * runqueue.
53
- * @rq: runqueue to which this entity belongs.
55
+ * @rq: runqueue on which this entity is currently scheduled.
56
+ * @sched_list: A list of schedulers (drm_gpu_schedulers).
57
+ * Jobs from this entity can be scheduled on any scheduler
58
+ * on this list.
59
+ * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
60
+ * @priority: priority of the entity
5461 * @rq_lock: lock to modify the runqueue to which this entity belongs.
5562 * @job_queue: the list of jobs of this entity.
5663 * @fence_seq: a linearly increasing seqno incremented with each
....@@ -67,6 +74,8 @@
6774 * @fini_status: contains the exit status in case the process was signalled.
6875 * @last_scheduled: points to the finished fence of the last scheduled job.
6976 * @last_user: last group leader pushing a job into the entity.
77
+ * @stopped: Marks the enity as removed from rq and destined for termination.
78
+ * @entity_idle: Signals when enityt is not in use
7079 *
7180 * Entities will emit jobs in order to their corresponding hardware
7281 * ring, and the scheduler will alternate between entities based on
....@@ -75,6 +84,9 @@
7584 struct drm_sched_entity {
7685 struct list_head list;
7786 struct drm_sched_rq *rq;
87
+ struct drm_gpu_scheduler **sched_list;
88
+ unsigned int num_sched_list;
89
+ enum drm_sched_priority priority;
7890 spinlock_t rq_lock;
7991
8092 struct spsc_queue job_queue;
....@@ -87,6 +99,8 @@
8799 atomic_t *guilty;
88100 struct dma_fence *last_scheduled;
89101 struct task_struct *last_user;
102
+ bool stopped;
103
+ struct completion entity_idle;
90104 };
91105
92106 /**
....@@ -131,10 +145,6 @@
131145 struct dma_fence finished;
132146
133147 /**
134
- * @cb: the callback for the parent fence below.
135
- */
136
- struct dma_fence_cb cb;
137
- /**
138148 * @parent: the fence returned by &drm_sched_backend_ops.run_job
139149 * when scheduling the job on hardware. We signal the
140150 * &drm_sched_fence.finished fence once parent is signalled.
....@@ -164,18 +174,14 @@
164174 * @sched: the scheduler instance on which this job is scheduled.
165175 * @s_fence: contains the fences for the scheduling of job.
166176 * @finish_cb: the callback for the finished fence.
167
- * @finish_work: schedules the function @drm_sched_job_finish once the job has
168
- * finished to remove the job from the
169
- * @drm_gpu_scheduler.ring_mirror_list.
170177 * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
171
- * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the timeout
172
- * interval is over.
173178 * @id: a unique id assigned to each job scheduled on the scheduler.
174179 * @karma: increment on every hang caused by this job. If this exceeds the hang
175180 * limit of the scheduler then the job is marked guilty and will not
176181 * be scheduled further.
177182 * @s_priority: the priority of the job.
178183 * @entity: the entity to which this job belongs.
184
+ * @cb: the callback for the parent fence in s_fence.
179185 *
180186 * A job is created by the driver using drm_sched_job_init(), and
181187 * should call drm_sched_entity_push_job() once it wants the scheduler
....@@ -186,13 +192,12 @@
186192 struct drm_gpu_scheduler *sched;
187193 struct drm_sched_fence *s_fence;
188194 struct dma_fence_cb finish_cb;
189
- struct work_struct finish_work;
190195 struct list_head node;
191
- struct delayed_work work_tdr;
192196 uint64_t id;
193197 atomic_t karma;
194198 enum drm_sched_priority s_priority;
195199 struct drm_sched_entity *entity;
200
+ struct dma_fence_cb cb;
196201 };
197202
198203 static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
....@@ -252,11 +257,16 @@
252257 * finished.
253258 * @hw_rq_count: the number of jobs currently in the hardware queue.
254259 * @job_id_count: used to assign unique id to the each job.
260
+ * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
261
+ * timeout interval is over.
255262 * @thread: the kthread on which the scheduler which run.
256263 * @ring_mirror_list: the list of jobs which are currently in the job queue.
257264 * @job_list_lock: lock to protect the ring_mirror_list.
258265 * @hang_limit: once the hangs by a job crosses this limit then it is marked
259266 * guilty and it will be considered for scheduling further.
267
+ * @score: score to help loadbalancer pick a idle sched
268
+ * @ready: marks if the underlying HW is ready to work
269
+ * @free_guilty: A hit to time out handler to free the guilty job.
260270 *
261271 * One scheduler is implemented for each hardware ring.
262272 */
....@@ -265,47 +275,76 @@
265275 uint32_t hw_submission_limit;
266276 long timeout;
267277 const char *name;
268
- struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_MAX];
278
+ struct drm_sched_rq sched_rq[DRM_SCHED_PRIORITY_COUNT];
269279 wait_queue_head_t wake_up_worker;
270280 wait_queue_head_t job_scheduled;
271281 atomic_t hw_rq_count;
272282 atomic64_t job_id_count;
283
+ struct delayed_work work_tdr;
273284 struct task_struct *thread;
274285 struct list_head ring_mirror_list;
275286 spinlock_t job_list_lock;
276287 int hang_limit;
288
+ atomic_t score;
289
+ bool ready;
290
+ bool free_guilty;
277291 };
278292
279293 int drm_sched_init(struct drm_gpu_scheduler *sched,
280294 const struct drm_sched_backend_ops *ops,
281295 uint32_t hw_submission, unsigned hang_limit, long timeout,
282296 const char *name);
297
+
283298 void drm_sched_fini(struct drm_gpu_scheduler *sched);
299
+int drm_sched_job_init(struct drm_sched_job *job,
300
+ struct drm_sched_entity *entity,
301
+ void *owner);
302
+void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
303
+ struct drm_gpu_scheduler **sched_list,
304
+ unsigned int num_sched_list);
305
+
306
+void drm_sched_job_cleanup(struct drm_sched_job *job);
307
+void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
308
+void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
309
+void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
310
+void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
311
+void drm_sched_increase_karma(struct drm_sched_job *bad);
312
+bool drm_sched_dependency_optimized(struct dma_fence* fence,
313
+ struct drm_sched_entity *entity);
314
+void drm_sched_fault(struct drm_gpu_scheduler *sched);
315
+void drm_sched_job_kickout(struct drm_sched_job *s_job);
316
+
317
+void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
318
+ struct drm_sched_entity *entity);
319
+void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
320
+ struct drm_sched_entity *entity);
284321
285322 int drm_sched_entity_init(struct drm_sched_entity *entity,
286
- struct drm_sched_rq **rq_list,
287
- unsigned int num_rq_list,
323
+ enum drm_sched_priority priority,
324
+ struct drm_gpu_scheduler **sched_list,
325
+ unsigned int num_sched_list,
288326 atomic_t *guilty);
289327 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
290328 void drm_sched_entity_fini(struct drm_sched_entity *entity);
291329 void drm_sched_entity_destroy(struct drm_sched_entity *entity);
330
+void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
331
+struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
292332 void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
293333 struct drm_sched_entity *entity);
294
-void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
295
- struct drm_sched_rq *rq);
334
+void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
335
+ enum drm_sched_priority priority);
336
+bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
296337
297338 struct drm_sched_fence *drm_sched_fence_create(
298339 struct drm_sched_entity *s_entity, void *owner);
299340 void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
300341 void drm_sched_fence_finished(struct drm_sched_fence *fence);
301
-int drm_sched_job_init(struct drm_sched_job *job,
302
- struct drm_sched_entity *entity,
303
- void *owner);
304
-void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
305
- struct drm_sched_job *job);
306
-void drm_sched_job_recovery(struct drm_gpu_scheduler *sched);
307
-bool drm_sched_dependency_optimized(struct dma_fence* fence,
308
- struct drm_sched_entity *entity);
309
-void drm_sched_job_kickout(struct drm_sched_job *s_job);
342
+
343
+unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
344
+void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
345
+ unsigned long remaining);
346
+struct drm_gpu_scheduler *
347
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
348
+ unsigned int num_sched_list);
310349
311350 #endif