forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/drivers/gpu/drm/i915/gvt/sched_policy.c
....@@ -39,8 +39,8 @@
3939 enum intel_engine_id i;
4040 struct intel_engine_cs *engine;
4141
42
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
43
- if (!list_empty(workload_q_head(vgpu, i)))
42
+ for_each_engine(engine, vgpu->gvt->gt, i) {
43
+ if (!list_empty(workload_q_head(vgpu, engine)))
4444 return true;
4545 }
4646
....@@ -94,7 +94,7 @@
9494 {
9595 struct vgpu_sched_data *vgpu_data;
9696 struct list_head *pos;
97
- static uint64_t stage_check;
97
+ static u64 stage_check;
9898 int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM;
9999
100100 /* The timeslice accumulation reset at stage 0, which is
....@@ -152,8 +152,8 @@
152152 scheduler->need_reschedule = true;
153153
154154 /* still have uncompleted workload? */
155
- for_each_engine(engine, gvt->dev_priv, i) {
156
- if (scheduler->current_workload[i])
155
+ for_each_engine(engine, gvt->gt, i) {
156
+ if (scheduler->current_workload[engine->id])
157157 return;
158158 }
159159
....@@ -169,8 +169,8 @@
169169 scheduler->need_reschedule = false;
170170
171171 /* wake up workload dispatch thread */
172
- for_each_engine(engine, gvt->dev_priv, i)
173
- wake_up(&scheduler->waitq[i]);
172
+ for_each_engine(engine, gvt->gt, i)
173
+ wake_up(&scheduler->waitq[engine->id]);
174174 }
175175
176176 static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
....@@ -444,9 +444,10 @@
444444 {
445445 struct intel_gvt_workload_scheduler *scheduler =
446446 &vgpu->gvt->scheduler;
447
- int ring_id;
448447 struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
449
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
448
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
449
+ struct intel_engine_cs *engine;
450
+ enum intel_engine_id id;
450451
451452 if (!vgpu_data->active)
452453 return;
....@@ -465,15 +466,15 @@
465466 scheduler->current_vgpu = NULL;
466467 }
467468
468
- intel_runtime_pm_get(dev_priv);
469
+ intel_runtime_pm_get(&dev_priv->runtime_pm);
469470 spin_lock_bh(&scheduler->mmio_context_lock);
470
- for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
471
- if (scheduler->engine_owner[ring_id] == vgpu) {
472
- intel_gvt_switch_mmio(vgpu, NULL, ring_id);
473
- scheduler->engine_owner[ring_id] = NULL;
471
+ for_each_engine(engine, vgpu->gvt->gt, id) {
472
+ if (scheduler->engine_owner[engine->id] == vgpu) {
473
+ intel_gvt_switch_mmio(vgpu, NULL, engine);
474
+ scheduler->engine_owner[engine->id] = NULL;
474475 }
475476 }
476477 spin_unlock_bh(&scheduler->mmio_context_lock);
477
- intel_runtime_pm_put(dev_priv);
478
+ intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
478479 mutex_unlock(&vgpu->gvt->sched_lock);
479480 }