.. | .. |
---|
39 | 39 | enum intel_engine_id i; |
---|
40 | 40 | struct intel_engine_cs *engine; |
---|
41 | 41 | |
---|
42 | | - for_each_engine(engine, vgpu->gvt->dev_priv, i) { |
---|
43 | | - if (!list_empty(workload_q_head(vgpu, i))) |
---|
| 42 | + for_each_engine(engine, vgpu->gvt->gt, i) { |
---|
| 43 | + if (!list_empty(workload_q_head(vgpu, engine))) |
---|
44 | 44 | return true; |
---|
45 | 45 | } |
---|
46 | 46 | |
---|
.. | .. |
---|
94 | 94 | { |
---|
95 | 95 | struct vgpu_sched_data *vgpu_data; |
---|
96 | 96 | struct list_head *pos; |
---|
97 | | - static uint64_t stage_check; |
---|
| 97 | + static u64 stage_check; |
---|
98 | 98 | int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM; |
---|
99 | 99 | |
---|
100 | 100 | /* The timeslice accumulation reset at stage 0, which is |
---|
.. | .. |
---|
152 | 152 | scheduler->need_reschedule = true; |
---|
153 | 153 | |
---|
154 | 154 | /* still have uncompleted workload? */ |
---|
155 | | - for_each_engine(engine, gvt->dev_priv, i) { |
---|
156 | | - if (scheduler->current_workload[i]) |
---|
| 155 | + for_each_engine(engine, gvt->gt, i) { |
---|
| 156 | + if (scheduler->current_workload[engine->id]) |
---|
157 | 157 | return; |
---|
158 | 158 | } |
---|
159 | 159 | |
---|
.. | .. |
---|
169 | 169 | scheduler->need_reschedule = false; |
---|
170 | 170 | |
---|
171 | 171 | /* wake up workload dispatch thread */ |
---|
172 | | - for_each_engine(engine, gvt->dev_priv, i) |
---|
173 | | - wake_up(&scheduler->waitq[i]); |
---|
| 172 | + for_each_engine(engine, gvt->gt, i) |
---|
| 173 | + wake_up(&scheduler->waitq[engine->id]); |
---|
174 | 174 | } |
---|
175 | 175 | |
---|
176 | 176 | static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) |
---|
.. | .. |
---|
444 | 444 | { |
---|
445 | 445 | struct intel_gvt_workload_scheduler *scheduler = |
---|
446 | 446 | &vgpu->gvt->scheduler; |
---|
447 | | - int ring_id; |
---|
448 | 447 | struct vgpu_sched_data *vgpu_data = vgpu->sched_data; |
---|
449 | | - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
---|
| 448 | + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; |
---|
| 449 | + struct intel_engine_cs *engine; |
---|
| 450 | + enum intel_engine_id id; |
---|
450 | 451 | |
---|
451 | 452 | if (!vgpu_data->active) |
---|
452 | 453 | return; |
---|
.. | .. |
---|
465 | 466 | scheduler->current_vgpu = NULL; |
---|
466 | 467 | } |
---|
467 | 468 | |
---|
468 | | - intel_runtime_pm_get(dev_priv); |
---|
| 469 | + intel_runtime_pm_get(&dev_priv->runtime_pm); |
---|
469 | 470 | spin_lock_bh(&scheduler->mmio_context_lock); |
---|
470 | | - for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { |
---|
471 | | - if (scheduler->engine_owner[ring_id] == vgpu) { |
---|
472 | | - intel_gvt_switch_mmio(vgpu, NULL, ring_id); |
---|
473 | | - scheduler->engine_owner[ring_id] = NULL; |
---|
| 471 | + for_each_engine(engine, vgpu->gvt->gt, id) { |
---|
| 472 | + if (scheduler->engine_owner[engine->id] == vgpu) { |
---|
| 473 | + intel_gvt_switch_mmio(vgpu, NULL, engine); |
---|
| 474 | + scheduler->engine_owner[engine->id] = NULL; |
---|
474 | 475 | } |
---|
475 | 476 | } |
---|
476 | 477 | spin_unlock_bh(&scheduler->mmio_context_lock); |
---|
477 | | - intel_runtime_pm_put(dev_priv); |
---|
| 478 | + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); |
---|
478 | 479 | mutex_unlock(&vgpu->gvt->sched_lock); |
---|
479 | 480 | } |
---|