forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/drivers/gpu/drm/i915/gvt/scheduler.c
....@@ -35,7 +35,12 @@
3535
3636 #include <linux/kthread.h>
3737
38
+#include "gem/i915_gem_pm.h"
39
+#include "gt/intel_context.h"
40
+#include "gt/intel_ring.h"
41
+
3842 #include "i915_drv.h"
43
+#include "i915_gem_gtt.h"
3944 #include "gvt.h"
4045
4146 #define RING_CTX_OFF(x) \
....@@ -53,10 +58,8 @@
5358
5459 static void update_shadow_pdps(struct intel_vgpu_workload *workload)
5560 {
56
- struct drm_i915_gem_object *ctx_obj =
57
- workload->req->hw_context->state->obj;
5861 struct execlist_ring_context *shadow_ring_context;
59
- struct page *page;
62
+ struct intel_context *ctx = workload->req->context;
6063
6164 if (WARN_ON(!workload->shadow_mm))
6265 return;
....@@ -64,11 +67,9 @@
6467 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
6568 return;
6669
67
- page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
68
- shadow_ring_context = kmap(page);
70
+ shadow_ring_context = (struct execlist_ring_context *)ctx->lrc_reg_state;
6971 set_context_pdp_root_pointer(shadow_ring_context,
7072 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
71
- kunmap(page);
7273 }
7374
7475 /*
....@@ -79,9 +80,9 @@
7980 static void sr_oa_regs(struct intel_vgpu_workload *workload,
8081 u32 *reg_state, bool save)
8182 {
82
- struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
83
- u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
84
- u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
83
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915;
84
+ u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset;
85
+ u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset;
8586 int i = 0;
8687 u32 flex_mmio[] = {
8788 i915_mmio_reg_offset(EU_PERF_CNTL0),
....@@ -93,7 +94,7 @@
9394 i915_mmio_reg_offset(EU_PERF_CNTL6),
9495 };
9596
96
- if (workload->ring_id != RCS)
97
+ if (workload->engine->id != RCS0)
9798 return;
9899
99100 if (save) {
....@@ -123,46 +124,24 @@
123124 {
124125 struct intel_vgpu *vgpu = workload->vgpu;
125126 struct intel_gvt *gvt = vgpu->gvt;
126
- int ring_id = workload->ring_id;
127
- struct drm_i915_gem_object *ctx_obj =
128
- workload->req->hw_context->state->obj;
127
+ struct intel_context *ctx = workload->req->context;
129128 struct execlist_ring_context *shadow_ring_context;
130
- struct page *page;
131129 void *dst;
130
+ void *context_base;
132131 unsigned long context_gpa, context_page_num;
132
+ unsigned long gpa_base; /* first gpa of consecutive GPAs */
133
+ unsigned long gpa_size; /* size of consecutive GPAs */
134
+ struct intel_vgpu_submission *s = &vgpu->submission;
133135 int i;
136
+ bool skip = false;
137
+ int ring_id = workload->engine->id;
134138
135
- gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
136
- workload->ctx_desc.lrca);
139
+ GEM_BUG_ON(!intel_context_is_pinned(ctx));
137140
138
- context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
141
+ context_base = (void *) ctx->lrc_reg_state -
142
+ (LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
139143
140
- context_page_num = context_page_num >> PAGE_SHIFT;
141
-
142
- if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
143
- context_page_num = 19;
144
-
145
- i = 2;
146
-
147
- while (i < context_page_num) {
148
- context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
149
- (u32)((workload->ctx_desc.lrca + i) <<
150
- I915_GTT_PAGE_SHIFT));
151
- if (context_gpa == INTEL_GVT_INVALID_ADDR) {
152
- gvt_vgpu_err("Invalid guest context descriptor\n");
153
- return -EFAULT;
154
- }
155
-
156
- page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
157
- dst = kmap(page);
158
- intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
159
- I915_GTT_PAGE_SIZE);
160
- kunmap(page);
161
- i++;
162
- }
163
-
164
- page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
165
- shadow_ring_context = kmap(page);
144
+ shadow_ring_context = (void *) ctx->lrc_reg_state;
166145
167146 sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
168147 #define COPY_REG(name) \
....@@ -178,7 +157,7 @@
178157 COPY_REG_MASKED(ctx_ctrl);
179158 COPY_REG(ctx_timestamp);
180159
181
- if (ring_id == RCS) {
160
+ if (workload->engine->id == RCS0) {
182161 COPY_REG(bb_per_ctx_ptr);
183162 COPY_REG(rcs_indirect_ctx);
184163 COPY_REG(rcs_indirect_ctx_offset);
....@@ -194,47 +173,116 @@
194173 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
195174
196175 sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
197
- kunmap(page);
176
+
177
+ gvt_dbg_sched("ring %s workload lrca %x, ctx_id %x, ctx gpa %llx",
178
+ workload->engine->name, workload->ctx_desc.lrca,
179
+ workload->ctx_desc.context_id,
180
+ workload->ring_context_gpa);
181
+
182
+ /* only need to ensure this context is not pinned/unpinned during the
183
+ * period from last submission to this this submission.
184
+ * Upon reaching this function, the currently submitted context is not
185
+ * supposed to get unpinned. If a misbehaving guest driver ever does
186
+ * this, it would corrupt itself.
187
+ */
188
+ if (s->last_ctx[ring_id].valid &&
189
+ (s->last_ctx[ring_id].lrca ==
190
+ workload->ctx_desc.lrca) &&
191
+ (s->last_ctx[ring_id].ring_context_gpa ==
192
+ workload->ring_context_gpa))
193
+ skip = true;
194
+
195
+ s->last_ctx[ring_id].lrca = workload->ctx_desc.lrca;
196
+ s->last_ctx[ring_id].ring_context_gpa = workload->ring_context_gpa;
197
+
198
+ if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val) || skip)
199
+ return 0;
200
+
201
+ s->last_ctx[ring_id].valid = false;
202
+ context_page_num = workload->engine->context_size;
203
+ context_page_num = context_page_num >> PAGE_SHIFT;
204
+
205
+ if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
206
+ context_page_num = 19;
207
+
208
+ /* find consecutive GPAs from gma until the first inconsecutive GPA.
209
+ * read from the continuous GPAs into dst virtual address
210
+ */
211
+ gpa_size = 0;
212
+ for (i = 2; i < context_page_num; i++) {
213
+ context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
214
+ (u32)((workload->ctx_desc.lrca + i) <<
215
+ I915_GTT_PAGE_SHIFT));
216
+ if (context_gpa == INTEL_GVT_INVALID_ADDR) {
217
+ gvt_vgpu_err("Invalid guest context descriptor\n");
218
+ return -EFAULT;
219
+ }
220
+
221
+ if (gpa_size == 0) {
222
+ gpa_base = context_gpa;
223
+ dst = context_base + (i << I915_GTT_PAGE_SHIFT);
224
+ } else if (context_gpa != gpa_base + gpa_size)
225
+ goto read;
226
+
227
+ gpa_size += I915_GTT_PAGE_SIZE;
228
+
229
+ if (i == context_page_num - 1)
230
+ goto read;
231
+
232
+ continue;
233
+
234
+read:
235
+ intel_gvt_hypervisor_read_gpa(vgpu, gpa_base, dst, gpa_size);
236
+ gpa_base = context_gpa;
237
+ gpa_size = I915_GTT_PAGE_SIZE;
238
+ dst = context_base + (i << I915_GTT_PAGE_SHIFT);
239
+ }
240
+ s->last_ctx[ring_id].valid = true;
198241 return 0;
199242 }
200243
201
-static inline bool is_gvt_request(struct i915_request *req)
244
+static inline bool is_gvt_request(struct i915_request *rq)
202245 {
203
- return i915_gem_context_force_single_submission(req->gem_context);
246
+ return intel_context_force_single_submission(rq->context);
204247 }
205248
206
-static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
249
+static void save_ring_hw_state(struct intel_vgpu *vgpu,
250
+ const struct intel_engine_cs *engine)
207251 {
208
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
209
- u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
252
+ struct intel_uncore *uncore = engine->uncore;
210253 i915_reg_t reg;
211254
212
- reg = RING_INSTDONE(ring_base);
213
- vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
214
- reg = RING_ACTHD(ring_base);
215
- vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
216
- reg = RING_ACTHD_UDW(ring_base);
217
- vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
255
+ reg = RING_INSTDONE(engine->mmio_base);
256
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
257
+ intel_uncore_read(uncore, reg);
258
+
259
+ reg = RING_ACTHD(engine->mmio_base);
260
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
261
+ intel_uncore_read(uncore, reg);
262
+
263
+ reg = RING_ACTHD_UDW(engine->mmio_base);
264
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
265
+ intel_uncore_read(uncore, reg);
218266 }
219267
220268 static int shadow_context_status_change(struct notifier_block *nb,
221269 unsigned long action, void *data)
222270 {
223
- struct i915_request *req = data;
271
+ struct i915_request *rq = data;
224272 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
225
- shadow_ctx_notifier_block[req->engine->id]);
273
+ shadow_ctx_notifier_block[rq->engine->id]);
226274 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
227
- enum intel_engine_id ring_id = req->engine->id;
275
+ enum intel_engine_id ring_id = rq->engine->id;
228276 struct intel_vgpu_workload *workload;
229277 unsigned long flags;
230278
231
- if (!is_gvt_request(req)) {
279
+ if (!is_gvt_request(rq)) {
232280 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
233281 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
234282 scheduler->engine_owner[ring_id]) {
235283 /* Switch ring from vGPU to host. */
236284 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
237
- NULL, ring_id);
285
+ NULL, rq->engine);
238286 scheduler->engine_owner[ring_id] = NULL;
239287 }
240288 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
....@@ -252,7 +300,7 @@
252300 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
253301 /* Switch ring from host to vGPU or vGPU to vGPU. */
254302 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
255
- workload->vgpu, ring_id);
303
+ workload->vgpu, rq->engine);
256304 scheduler->engine_owner[ring_id] = workload->vgpu;
257305 } else
258306 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
....@@ -261,11 +309,11 @@
261309 atomic_set(&workload->shadow_ctx_active, 1);
262310 break;
263311 case INTEL_CONTEXT_SCHEDULE_OUT:
264
- save_ring_hw_state(workload->vgpu, ring_id);
312
+ save_ring_hw_state(workload->vgpu, rq->engine);
265313 atomic_set(&workload->shadow_ctx_active, 0);
266314 break;
267315 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
268
- save_ring_hw_state(workload->vgpu, ring_id);
316
+ save_ring_hw_state(workload->vgpu, rq->engine);
269317 break;
270318 default:
271319 WARN_ON(1);
....@@ -275,19 +323,21 @@
275323 return NOTIFY_OK;
276324 }
277325
278
-static void shadow_context_descriptor_update(struct intel_context *ce)
326
+static void
327
+shadow_context_descriptor_update(struct intel_context *ce,
328
+ struct intel_vgpu_workload *workload)
279329 {
280
- u64 desc = 0;
330
+ u64 desc = ce->lrc.desc;
281331
282
- desc = ce->lrc_desc;
283
-
284
- /* Update bits 0-11 of the context descriptor which includes flags
332
+ /*
333
+ * Update bits 0-11 of the context descriptor which includes flags
285334 * like GEN8_CTX_* cached in desc_template
286335 */
287
- desc &= U64_MAX << 12;
288
- desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
336
+ desc &= ~(0x3ull << GEN8_CTX_ADDRESSING_MODE_SHIFT);
337
+ desc |= (u64)workload->ctx_desc.addressing_mode <<
338
+ GEN8_CTX_ADDRESSING_MODE_SHIFT;
289339
290
- ce->lrc_desc = desc;
340
+ ce->lrc.desc = desc;
291341 }
292342
293343 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
....@@ -296,10 +346,28 @@
296346 struct i915_request *req = workload->req;
297347 void *shadow_ring_buffer_va;
298348 u32 *cs;
349
+ int err;
299350
300
- if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915))
301
- && is_inhibit_context(req->hw_context))
351
+ if (IS_GEN(req->engine->i915, 9) && is_inhibit_context(req->context))
302352 intel_vgpu_restore_inhibit_context(vgpu, req);
353
+
354
+ /*
355
+ * To track whether a request has started on HW, we can emit a
356
+ * breadcrumb at the beginning of the request and check its
357
+ * timeline's HWSP to see if the breadcrumb has advanced past the
358
+ * start of this request. Actually, the request must have the
359
+ * init_breadcrumb if its timeline set has_init_bread_crumb, or the
360
+ * scheduler might get a wrong state of it during reset. Since the
361
+ * requests from gvt always set the has_init_breadcrumb flag, here
362
+ * need to do the emit_init_breadcrumb for all the requests.
363
+ */
364
+ if (req->engine->emit_init_breadcrumb) {
365
+ err = req->engine->emit_init_breadcrumb(req);
366
+ if (err) {
367
+ gvt_vgpu_err("fail to emit init breadcrumb\n");
368
+ return err;
369
+ }
370
+ }
303371
304372 /* allocate shadow ring buffer */
305373 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
....@@ -330,6 +398,61 @@
330398
331399 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
332400 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
401
+
402
+ wa_ctx->indirect_ctx.obj = NULL;
403
+ wa_ctx->indirect_ctx.shadow_va = NULL;
404
+}
405
+
406
+static void set_dma_address(struct i915_page_directory *pd, dma_addr_t addr)
407
+{
408
+ struct scatterlist *sg = pd->pt.base->mm.pages->sgl;
409
+
410
+ /* This is not a good idea */
411
+ sg->dma_address = addr;
412
+}
413
+
414
+static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
415
+ struct intel_context *ce)
416
+{
417
+ struct intel_vgpu_mm *mm = workload->shadow_mm;
418
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
419
+ int i = 0;
420
+
421
+ if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
422
+ set_dma_address(ppgtt->pd, mm->ppgtt_mm.shadow_pdps[0]);
423
+ } else {
424
+ for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
425
+ struct i915_page_directory * const pd =
426
+ i915_pd_entry(ppgtt->pd, i);
427
+ /* skip now as current i915 ppgtt alloc won't allocate
428
+ top level pdp for non 4-level table, won't impact
429
+ shadow ppgtt. */
430
+ if (!pd)
431
+ break;
432
+
433
+ set_dma_address(pd, mm->ppgtt_mm.shadow_pdps[i]);
434
+ }
435
+ }
436
+}
437
+
438
+static int
439
+intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
440
+{
441
+ struct intel_vgpu *vgpu = workload->vgpu;
442
+ struct intel_vgpu_submission *s = &vgpu->submission;
443
+ struct i915_request *rq;
444
+
445
+ if (workload->req)
446
+ return 0;
447
+
448
+ rq = i915_request_create(s->shadow[workload->engine->id]);
449
+ if (IS_ERR(rq)) {
450
+ gvt_vgpu_err("fail to allocate gem request\n");
451
+ return PTR_ERR(rq);
452
+ }
453
+
454
+ workload->req = i915_request_get(rq);
455
+ return 0;
333456 }
334457
335458 /**
....@@ -344,69 +467,33 @@
344467 {
345468 struct intel_vgpu *vgpu = workload->vgpu;
346469 struct intel_vgpu_submission *s = &vgpu->submission;
347
- struct i915_gem_context *shadow_ctx = s->shadow_ctx;
348
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
349
- struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
350
- struct intel_context *ce;
351
- struct i915_request *rq;
352470 int ret;
353471
354
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
472
+ lockdep_assert_held(&vgpu->vgpu_lock);
355473
356
- if (workload->req)
474
+ if (workload->shadow)
357475 return 0;
358476
359
- /* pin shadow context by gvt even the shadow context will be pinned
360
- * when i915 alloc request. That is because gvt will update the guest
361
- * context from shadow context when workload is completed, and at that
362
- * moment, i915 may already unpined the shadow context to make the
363
- * shadow_ctx pages invalid. So gvt need to pin itself. After update
364
- * the guest context, gvt can unpin the shadow_ctx safely.
365
- */
366
- ce = intel_context_pin(shadow_ctx, engine);
367
- if (IS_ERR(ce)) {
368
- gvt_vgpu_err("fail to pin shadow context\n");
369
- return PTR_ERR(ce);
370
- }
371
-
372
- shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
373
- shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
374
- GEN8_CTX_ADDRESSING_MODE_SHIFT;
375
-
376
- if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
377
- shadow_context_descriptor_update(ce);
477
+ if (!test_and_set_bit(workload->engine->id, s->shadow_ctx_desc_updated))
478
+ shadow_context_descriptor_update(s->shadow[workload->engine->id],
479
+ workload);
378480
379481 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
380482 if (ret)
381
- goto err_unpin;
483
+ return ret;
382484
383
- if ((workload->ring_id == RCS) &&
384
- (workload->wa_ctx.indirect_ctx.size != 0)) {
485
+ if (workload->engine->id == RCS0 &&
486
+ workload->wa_ctx.indirect_ctx.size) {
385487 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
386488 if (ret)
387489 goto err_shadow;
388490 }
389491
390
- rq = i915_request_alloc(engine, shadow_ctx);
391
- if (IS_ERR(rq)) {
392
- gvt_vgpu_err("fail to allocate gem request\n");
393
- ret = PTR_ERR(rq);
394
- goto err_shadow;
395
- }
396
- workload->req = i915_request_get(rq);
397
-
398
- ret = populate_shadow_context(workload);
399
- if (ret)
400
- goto err_req;
401
-
492
+ workload->shadow = true;
402493 return 0;
403
-err_req:
404
- rq = fetch_and_zero(&workload->req);
405
- i915_request_put(rq);
494
+
406495 err_shadow:
407496 release_shadow_wa_ctx(&workload->wa_ctx);
408
-err_unpin:
409
- intel_context_unpin(ce);
410497 return ret;
411498 }
412499
....@@ -431,26 +518,18 @@
431518 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
432519 + bb->bb_offset;
433520
434
- if (bb->ppgtt) {
435
- /* for non-priv bb, scan&shadow is only for
436
- * debugging purpose, so the content of shadow bb
437
- * is the same as original bb. Therefore,
438
- * here, rather than switch to shadow bb's gma
439
- * address, we directly use original batch buffer's
440
- * gma address, and send original bb to hardware
441
- * directly
442
- */
443
- if (bb->clflush & CLFLUSH_AFTER) {
444
- drm_clflush_virt_range(bb->va,
445
- bb->obj->base.size);
446
- bb->clflush &= ~CLFLUSH_AFTER;
447
- }
448
- i915_gem_obj_finish_shmem_access(bb->obj);
449
- bb->accessing = false;
450
-
451
- } else {
521
+ /*
522
+ * For non-priv bb, scan&shadow is only for
523
+ * debugging purpose, so the content of shadow bb
524
+ * is the same as original bb. Therefore,
525
+ * here, rather than switch to shadow bb's gma
526
+ * address, we directly use original batch buffer's
527
+ * gma address, and send original bb to hardware
528
+ * directly
529
+ */
530
+ if (!bb->ppgtt) {
452531 bb->vma = i915_gem_object_ggtt_pin(bb->obj,
453
- NULL, 0, 0, 0);
532
+ NULL, 0, 0, 0);
454533 if (IS_ERR(bb->vma)) {
455534 ret = PTR_ERR(bb->vma);
456535 goto err;
....@@ -461,27 +540,15 @@
461540 if (gmadr_bytes == 8)
462541 bb->bb_start_cmd_va[2] = 0;
463542
464
- /* No one is going to touch shadow bb from now on. */
465
- if (bb->clflush & CLFLUSH_AFTER) {
466
- drm_clflush_virt_range(bb->va,
467
- bb->obj->base.size);
468
- bb->clflush &= ~CLFLUSH_AFTER;
469
- }
470
-
471
- ret = i915_gem_object_set_to_gtt_domain(bb->obj,
472
- false);
473
- if (ret)
474
- goto err;
475
-
476
- i915_gem_obj_finish_shmem_access(bb->obj);
477
- bb->accessing = false;
478
-
479543 ret = i915_vma_move_to_active(bb->vma,
480544 workload->req,
481545 0);
482546 if (ret)
483547 goto err;
484548 }
549
+
550
+ /* No one is going to touch shadow bb from now on. */
551
+ i915_gem_object_flush_map(bb->obj);
485552 }
486553 return 0;
487554 err:
....@@ -495,7 +562,7 @@
495562 container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
496563 struct i915_request *rq = workload->req;
497564 struct execlist_ring_context *shadow_ring_context =
498
- (struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
565
+ (struct execlist_ring_context *)rq->context->lrc_reg_state;
499566
500567 shadow_ring_context->bb_per_ctx_ptr.val =
501568 (shadow_ring_context->bb_per_ctx_ptr.val &
....@@ -534,10 +601,14 @@
534601 return 0;
535602 }
536603
604
+static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
605
+{
606
+ vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) =
607
+ workload->rb_start;
608
+}
609
+
537610 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
538611 {
539
- struct intel_vgpu *vgpu = workload->vgpu;
540
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
541612 struct intel_vgpu_shadow_bb *bb, *pos;
542613
543614 if (list_empty(&workload->shadow_bb))
....@@ -546,32 +617,26 @@
546617 bb = list_first_entry(&workload->shadow_bb,
547618 struct intel_vgpu_shadow_bb, list);
548619
549
- mutex_lock(&dev_priv->drm.struct_mutex);
550
-
551620 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
552621 if (bb->obj) {
553
- if (bb->accessing)
554
- i915_gem_obj_finish_shmem_access(bb->obj);
555
-
556622 if (bb->va && !IS_ERR(bb->va))
557623 i915_gem_object_unpin_map(bb->obj);
558624
559
- if (bb->vma && !IS_ERR(bb->vma)) {
625
+ if (bb->vma && !IS_ERR(bb->vma))
560626 i915_vma_unpin(bb->vma);
561
- i915_vma_close(bb->vma);
562
- }
563
- __i915_gem_object_release_unless_active(bb->obj);
627
+
628
+ i915_gem_object_put(bb->obj);
564629 }
565630 list_del(&bb->list);
566631 kfree(bb);
567632 }
568
-
569
- mutex_unlock(&dev_priv->drm.struct_mutex);
570633 }
571634
572
-static int prepare_workload(struct intel_vgpu_workload *workload)
635
+static int
636
+intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
573637 {
574638 struct intel_vgpu *vgpu = workload->vgpu;
639
+ struct intel_vgpu_mm *m;
575640 int ret = 0;
576641
577642 ret = intel_vgpu_pin_mm(workload->shadow_mm);
....@@ -580,7 +645,61 @@
580645 return ret;
581646 }
582647
648
+ if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
649
+ !workload->shadow_mm->ppgtt_mm.shadowed) {
650
+ gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
651
+ return -EINVAL;
652
+ }
653
+
654
+ if (!list_empty(&workload->lri_shadow_mm)) {
655
+ list_for_each_entry(m, &workload->lri_shadow_mm,
656
+ ppgtt_mm.link) {
657
+ ret = intel_vgpu_pin_mm(m);
658
+ if (ret) {
659
+ list_for_each_entry_from_reverse(m,
660
+ &workload->lri_shadow_mm,
661
+ ppgtt_mm.link)
662
+ intel_vgpu_unpin_mm(m);
663
+ gvt_vgpu_err("LRI shadow ppgtt fail to pin\n");
664
+ break;
665
+ }
666
+ }
667
+ }
668
+
669
+ if (ret)
670
+ intel_vgpu_unpin_mm(workload->shadow_mm);
671
+
672
+ return ret;
673
+}
674
+
675
+static void
676
+intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload)
677
+{
678
+ struct intel_vgpu_mm *m;
679
+
680
+ if (!list_empty(&workload->lri_shadow_mm)) {
681
+ list_for_each_entry(m, &workload->lri_shadow_mm,
682
+ ppgtt_mm.link)
683
+ intel_vgpu_unpin_mm(m);
684
+ }
685
+ intel_vgpu_unpin_mm(workload->shadow_mm);
686
+}
687
+
688
+static int prepare_workload(struct intel_vgpu_workload *workload)
689
+{
690
+ struct intel_vgpu *vgpu = workload->vgpu;
691
+ struct intel_vgpu_submission *s = &vgpu->submission;
692
+ int ret = 0;
693
+
694
+ ret = intel_vgpu_shadow_mm_pin(workload);
695
+ if (ret) {
696
+ gvt_vgpu_err("fail to pin shadow mm\n");
697
+ return ret;
698
+ }
699
+
583700 update_shadow_pdps(workload);
701
+
702
+ set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
584703
585704 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
586705 if (ret) {
....@@ -624,47 +743,60 @@
624743 err_shadow_batch:
625744 release_shadow_batch_buffer(workload);
626745 err_unpin_mm:
627
- intel_vgpu_unpin_mm(workload->shadow_mm);
746
+ intel_vgpu_shadow_mm_unpin(workload);
628747 return ret;
629748 }
630749
631750 static int dispatch_workload(struct intel_vgpu_workload *workload)
632751 {
633752 struct intel_vgpu *vgpu = workload->vgpu;
634
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
635
- int ring_id = workload->ring_id;
753
+ struct i915_request *rq;
636754 int ret;
637755
638
- gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
639
- ring_id, workload);
756
+ gvt_dbg_sched("ring id %s prepare to dispatch workload %p\n",
757
+ workload->engine->name, workload);
640758
641759 mutex_lock(&vgpu->vgpu_lock);
642
- mutex_lock(&dev_priv->drm.struct_mutex);
760
+
761
+ ret = intel_gvt_workload_req_alloc(workload);
762
+ if (ret)
763
+ goto err_req;
643764
644765 ret = intel_gvt_scan_and_shadow_workload(workload);
645766 if (ret)
646767 goto out;
647768
648
- ret = prepare_workload(workload);
769
+ ret = populate_shadow_context(workload);
770
+ if (ret) {
771
+ release_shadow_wa_ctx(&workload->wa_ctx);
772
+ goto out;
773
+ }
649774
775
+ ret = prepare_workload(workload);
650776 out:
651
- if (ret)
652
- workload->status = ret;
777
+ if (ret) {
778
+ /* We might still need to add request with
779
+ * clean ctx to retire it properly..
780
+ */
781
+ rq = fetch_and_zero(&workload->req);
782
+ i915_request_put(rq);
783
+ }
653784
654785 if (!IS_ERR_OR_NULL(workload->req)) {
655
- gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
656
- ring_id, workload->req);
786
+ gvt_dbg_sched("ring id %s submit workload to i915 %p\n",
787
+ workload->engine->name, workload->req);
657788 i915_request_add(workload->req);
658789 workload->dispatched = true;
659790 }
660
-
661
- mutex_unlock(&dev_priv->drm.struct_mutex);
791
+err_req:
792
+ if (ret)
793
+ workload->status = ret;
662794 mutex_unlock(&vgpu->vgpu_lock);
663795 return ret;
664796 }
665797
666
-static struct intel_vgpu_workload *pick_next_workload(
667
- struct intel_gvt *gvt, int ring_id)
798
+static struct intel_vgpu_workload *
799
+pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
668800 {
669801 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
670802 struct intel_vgpu_workload *workload = NULL;
....@@ -676,26 +808,27 @@
676808 * bail out
677809 */
678810 if (!scheduler->current_vgpu) {
679
- gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
811
+ gvt_dbg_sched("ring %s stop - no current vgpu\n", engine->name);
680812 goto out;
681813 }
682814
683815 if (scheduler->need_reschedule) {
684
- gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
816
+ gvt_dbg_sched("ring %s stop - will reschedule\n", engine->name);
685817 goto out;
686818 }
687819
688
- if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
820
+ if (!scheduler->current_vgpu->active ||
821
+ list_empty(workload_q_head(scheduler->current_vgpu, engine)))
689822 goto out;
690823
691824 /*
692825 * still have current workload, maybe the workload disptacher
693826 * fail to submit it for some reason, resubmit it.
694827 */
695
- if (scheduler->current_workload[ring_id]) {
696
- workload = scheduler->current_workload[ring_id];
697
- gvt_dbg_sched("ring id %d still have current workload %p\n",
698
- ring_id, workload);
828
+ if (scheduler->current_workload[engine->id]) {
829
+ workload = scheduler->current_workload[engine->id];
830
+ gvt_dbg_sched("ring %s still have current workload %p\n",
831
+ engine->name, workload);
699832 goto out;
700833 }
701834
....@@ -705,13 +838,14 @@
705838 * will wait the current workload is finished when trying to
706839 * schedule out a vgpu.
707840 */
708
- scheduler->current_workload[ring_id] = container_of(
709
- workload_q_head(scheduler->current_vgpu, ring_id)->next,
710
- struct intel_vgpu_workload, list);
841
+ scheduler->current_workload[engine->id] =
842
+ list_first_entry(workload_q_head(scheduler->current_vgpu,
843
+ engine),
844
+ struct intel_vgpu_workload, list);
711845
712
- workload = scheduler->current_workload[ring_id];
846
+ workload = scheduler->current_workload[engine->id];
713847
714
- gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
848
+ gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload);
715849
716850 atomic_inc(&workload->vgpu->submission.running_workload_num);
717851 out:
....@@ -719,30 +853,89 @@
719853 return workload;
720854 }
721855
856
+static void update_guest_pdps(struct intel_vgpu *vgpu,
857
+ u64 ring_context_gpa, u32 pdp[8])
858
+{
859
+ u64 gpa;
860
+ int i;
861
+
862
+ gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
863
+
864
+ for (i = 0; i < 8; i++)
865
+ intel_gvt_hypervisor_write_gpa(vgpu,
866
+ gpa + i * 8, &pdp[7 - i], 4);
867
+}
868
+
869
+static __maybe_unused bool
870
+check_shadow_context_ppgtt(struct execlist_ring_context *c, struct intel_vgpu_mm *m)
871
+{
872
+ if (m->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
873
+ u64 shadow_pdp = c->pdps[7].val | (u64) c->pdps[6].val << 32;
874
+
875
+ if (shadow_pdp != m->ppgtt_mm.shadow_pdps[0]) {
876
+ gvt_dbg_mm("4-level context ppgtt not match LRI command\n");
877
+ return false;
878
+ }
879
+ return true;
880
+ } else {
881
+ /* see comment in LRI handler in cmd_parser.c */
882
+ gvt_dbg_mm("invalid shadow mm type\n");
883
+ return false;
884
+ }
885
+}
886
+
722887 static void update_guest_context(struct intel_vgpu_workload *workload)
723888 {
724889 struct i915_request *rq = workload->req;
725890 struct intel_vgpu *vgpu = workload->vgpu;
726
- struct intel_gvt *gvt = vgpu->gvt;
727
- struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
728891 struct execlist_ring_context *shadow_ring_context;
729
- struct page *page;
892
+ struct intel_context *ctx = workload->req->context;
893
+ void *context_base;
730894 void *src;
731895 unsigned long context_gpa, context_page_num;
896
+ unsigned long gpa_base; /* first gpa of consecutive GPAs */
897
+ unsigned long gpa_size; /* size of consecutive GPAs*/
732898 int i;
899
+ u32 ring_base;
900
+ u32 head, tail;
901
+ u16 wrap_count;
733902
734903 gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
735904 workload->ctx_desc.lrca);
736905
906
+ GEM_BUG_ON(!intel_context_is_pinned(ctx));
907
+
908
+ head = workload->rb_head;
909
+ tail = workload->rb_tail;
910
+ wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
911
+
912
+ if (tail < head) {
913
+ if (wrap_count == RB_HEAD_WRAP_CNT_MAX)
914
+ wrap_count = 0;
915
+ else
916
+ wrap_count += 1;
917
+ }
918
+
919
+ head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
920
+
921
+ ring_base = rq->engine->mmio_base;
922
+ vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
923
+ vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
924
+
737925 context_page_num = rq->engine->context_size;
738926 context_page_num = context_page_num >> PAGE_SHIFT;
739927
740
- if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
928
+ if (IS_BROADWELL(rq->engine->i915) && rq->engine->id == RCS0)
741929 context_page_num = 19;
742930
743
- i = 2;
931
+ context_base = (void *) ctx->lrc_reg_state -
932
+ (LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
744933
745
- while (i < context_page_num) {
934
+ /* find consecutive GPAs from gma until the first inconsecutive GPA.
935
+ * write to the consecutive GPAs from src virtual address
936
+ */
937
+ gpa_size = 0;
938
+ for (i = 2; i < context_page_num; i++) {
746939 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
747940 (u32)((workload->ctx_desc.lrca + i) <<
748941 I915_GTT_PAGE_SHIFT));
....@@ -751,19 +944,39 @@
751944 return;
752945 }
753946
754
- page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
755
- src = kmap(page);
756
- intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
757
- I915_GTT_PAGE_SIZE);
758
- kunmap(page);
759
- i++;
947
+ if (gpa_size == 0) {
948
+ gpa_base = context_gpa;
949
+ src = context_base + (i << I915_GTT_PAGE_SHIFT);
950
+ } else if (context_gpa != gpa_base + gpa_size)
951
+ goto write;
952
+
953
+ gpa_size += I915_GTT_PAGE_SIZE;
954
+
955
+ if (i == context_page_num - 1)
956
+ goto write;
957
+
958
+ continue;
959
+
960
+write:
961
+ intel_gvt_hypervisor_write_gpa(vgpu, gpa_base, src, gpa_size);
962
+ gpa_base = context_gpa;
963
+ gpa_size = I915_GTT_PAGE_SIZE;
964
+ src = context_base + (i << I915_GTT_PAGE_SHIFT);
760965 }
761966
762967 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
763968 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
764969
765
- page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
766
- shadow_ring_context = kmap(page);
970
+ shadow_ring_context = (void *) ctx->lrc_reg_state;
971
+
972
+ if (!list_empty(&workload->lri_shadow_mm)) {
973
+ struct intel_vgpu_mm *m = list_last_entry(&workload->lri_shadow_mm,
974
+ struct intel_vgpu_mm,
975
+ ppgtt_mm.link);
976
+ GEM_BUG_ON(!check_shadow_context_ppgtt(shadow_ring_context, m));
977
+ update_guest_pdps(vgpu, workload->ring_context_gpa,
978
+ (void *)m->ppgtt_mm.guest_pdps);
979
+ }
767980
768981 #define COPY_REG(name) \
769982 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
....@@ -780,21 +993,19 @@
780993 (void *)shadow_ring_context +
781994 sizeof(*shadow_ring_context),
782995 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
783
-
784
- kunmap(page);
785996 }
786997
787998 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
788
- unsigned long engine_mask)
999
+ intel_engine_mask_t engine_mask)
7891000 {
7901001 struct intel_vgpu_submission *s = &vgpu->submission;
791
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1002
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
7921003 struct intel_engine_cs *engine;
7931004 struct intel_vgpu_workload *pos, *n;
794
- unsigned int tmp;
1005
+ intel_engine_mask_t tmp;
7951006
7961007 /* free the unsubmited workloads in the queues. */
797
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
1008
+ for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
7981009 list_for_each_entry_safe(pos, n,
7991010 &s->workload_q_head[engine->id], list) {
8001011 list_del_init(&pos->list);
....@@ -837,19 +1048,14 @@
8371048 workload->status = 0;
8381049 }
8391050
840
- if (!workload->status && !(vgpu->resetting_eng &
841
- ENGINE_MASK(ring_id))) {
1051
+ if (!workload->status &&
1052
+ !(vgpu->resetting_eng & BIT(ring_id))) {
8421053 update_guest_context(workload);
8431054
8441055 for_each_set_bit(event, workload->pending_events,
8451056 INTEL_GVT_EVENT_MAX)
8461057 intel_vgpu_trigger_virtual_event(vgpu, event);
8471058 }
848
-
849
- /* unpin shadow ctx as the shadow_ctx update is done */
850
- mutex_lock(&rq->i915->drm.struct_mutex);
851
- intel_context_unpin(rq->hw_context);
852
- mutex_unlock(&rq->i915->drm.struct_mutex);
8531059
8541060 i915_request_put(fetch_and_zero(&workload->req));
8551061 }
....@@ -861,12 +1067,7 @@
8611067
8621068 list_del_init(&workload->list);
8631069
864
- if (!workload->status) {
865
- release_shadow_batch_buffer(workload);
866
- release_shadow_wa_ctx(&workload->wa_ctx);
867
- }
868
-
869
- if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
1070
+ if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
8701071 /* if workload->status is not successful means HW GPU
8711072 * has occurred GPU hang or something wrong with i915/GVT,
8721073 * and GVT won't inject context switch interrupt to guest.
....@@ -880,10 +1081,13 @@
8801081 * cleaned up during the resetting process later, so doing
8811082 * the workload clean up here doesn't have any impact.
8821083 **/
883
- intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id));
1084
+ intel_vgpu_clean_workloads(vgpu, BIT(ring_id));
8841085 }
8851086
8861087 workload->complete(workload);
1088
+
1089
+ intel_vgpu_shadow_mm_unpin(workload);
1090
+ intel_vgpu_destroy_workload(workload);
8871091
8881092 atomic_dec(&s->running_workload_num);
8891093 wake_up(&scheduler->workload_complete_wq);
....@@ -895,55 +1099,54 @@
8951099 mutex_unlock(&vgpu->vgpu_lock);
8961100 }
8971101
898
-struct workload_thread_param {
899
- struct intel_gvt *gvt;
900
- int ring_id;
901
-};
902
-
903
-static int workload_thread(void *priv)
1102
+static int workload_thread(void *arg)
9041103 {
905
- struct workload_thread_param *p = (struct workload_thread_param *)priv;
906
- struct intel_gvt *gvt = p->gvt;
907
- int ring_id = p->ring_id;
1104
+ struct intel_engine_cs *engine = arg;
1105
+ const bool need_force_wake = INTEL_GEN(engine->i915) >= 9;
1106
+ struct intel_gvt *gvt = engine->i915->gvt;
9081107 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
9091108 struct intel_vgpu_workload *workload = NULL;
9101109 struct intel_vgpu *vgpu = NULL;
9111110 int ret;
912
- bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
913
- || IS_KABYLAKE(gvt->dev_priv)
914
- || IS_BROXTON(gvt->dev_priv);
9151111 DEFINE_WAIT_FUNC(wait, woken_wake_function);
9161112
917
- kfree(p);
918
-
919
- gvt_dbg_core("workload thread for ring %d started\n", ring_id);
1113
+ gvt_dbg_core("workload thread for ring %s started\n", engine->name);
9201114
9211115 while (!kthread_should_stop()) {
922
- add_wait_queue(&scheduler->waitq[ring_id], &wait);
1116
+ intel_wakeref_t wakeref;
1117
+
1118
+ add_wait_queue(&scheduler->waitq[engine->id], &wait);
9231119 do {
924
- workload = pick_next_workload(gvt, ring_id);
1120
+ workload = pick_next_workload(gvt, engine);
9251121 if (workload)
9261122 break;
9271123 wait_woken(&wait, TASK_INTERRUPTIBLE,
9281124 MAX_SCHEDULE_TIMEOUT);
9291125 } while (!kthread_should_stop());
930
- remove_wait_queue(&scheduler->waitq[ring_id], &wait);
1126
+ remove_wait_queue(&scheduler->waitq[engine->id], &wait);
9311127
9321128 if (!workload)
9331129 break;
9341130
935
- gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
936
- workload->ring_id, workload,
937
- workload->vgpu->id);
1131
+ gvt_dbg_sched("ring %s next workload %p vgpu %d\n",
1132
+ engine->name, workload,
1133
+ workload->vgpu->id);
9381134
939
- intel_runtime_pm_get(gvt->dev_priv);
1135
+ wakeref = intel_runtime_pm_get(engine->uncore->rpm);
9401136
941
- gvt_dbg_sched("ring id %d will dispatch workload %p\n",
942
- workload->ring_id, workload);
1137
+ gvt_dbg_sched("ring %s will dispatch workload %p\n",
1138
+ engine->name, workload);
9431139
9441140 if (need_force_wake)
945
- intel_uncore_forcewake_get(gvt->dev_priv,
946
- FORCEWAKE_ALL);
1141
+ intel_uncore_forcewake_get(engine->uncore,
1142
+ FORCEWAKE_ALL);
1143
+ /*
1144
+ * Update the vReg of the vGPU which submitted this
1145
+ * workload. The vGPU may use these registers for checking
1146
+ * the context state. The value comes from GPU commands
1147
+ * in this workload.
1148
+ */
1149
+ update_vreg_in_ctx(workload);
9471150
9481151 ret = dispatch_workload(workload);
9491152
....@@ -953,21 +1156,21 @@
9531156 goto complete;
9541157 }
9551158
956
- gvt_dbg_sched("ring id %d wait workload %p\n",
957
- workload->ring_id, workload);
1159
+ gvt_dbg_sched("ring %s wait workload %p\n",
1160
+ engine->name, workload);
9581161 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
9591162
9601163 complete:
9611164 gvt_dbg_sched("will complete workload %p, status: %d\n",
962
- workload, workload->status);
1165
+ workload, workload->status);
9631166
964
- complete_current_workload(gvt, ring_id);
1167
+ complete_current_workload(gvt, engine->id);
9651168
9661169 if (need_force_wake)
967
- intel_uncore_forcewake_put(gvt->dev_priv,
968
- FORCEWAKE_ALL);
1170
+ intel_uncore_forcewake_put(engine->uncore,
1171
+ FORCEWAKE_ALL);
9691172
970
- intel_runtime_pm_put(gvt->dev_priv);
1173
+ intel_runtime_pm_put(engine->uncore->rpm, wakeref);
9711174 if (ret && (vgpu_is_vm_unhealthy(ret)))
9721175 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
9731176 }
....@@ -996,7 +1199,7 @@
9961199
9971200 gvt_dbg_core("clean workload scheduler\n");
9981201
999
- for_each_engine(engine, gvt->dev_priv, i) {
1202
+ for_each_engine(engine, gvt->gt, i) {
10001203 atomic_notifier_chain_unregister(
10011204 &engine->context_status_notifier,
10021205 &gvt->shadow_ctx_notifier_block[i]);
....@@ -1007,7 +1210,6 @@
10071210 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
10081211 {
10091212 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1010
- struct workload_thread_param *param = NULL;
10111213 struct intel_engine_cs *engine;
10121214 enum intel_engine_id i;
10131215 int ret;
....@@ -1016,20 +1218,11 @@
10161218
10171219 init_waitqueue_head(&scheduler->workload_complete_wq);
10181220
1019
- for_each_engine(engine, gvt->dev_priv, i) {
1221
+ for_each_engine(engine, gvt->gt, i) {
10201222 init_waitqueue_head(&scheduler->waitq[i]);
10211223
1022
- param = kzalloc(sizeof(*param), GFP_KERNEL);
1023
- if (!param) {
1024
- ret = -ENOMEM;
1025
- goto err;
1026
- }
1027
-
1028
- param->gvt = gvt;
1029
- param->ring_id = i;
1030
-
1031
- scheduler->thread[i] = kthread_run(workload_thread, param,
1032
- "gvt workload %d", i);
1224
+ scheduler->thread[i] = kthread_run(workload_thread, engine,
1225
+ "gvt:%s", engine->name);
10331226 if (IS_ERR(scheduler->thread[i])) {
10341227 gvt_err("fail to create workload thread\n");
10351228 ret = PTR_ERR(scheduler->thread[i]);
....@@ -1041,12 +1234,30 @@
10411234 atomic_notifier_chain_register(&engine->context_status_notifier,
10421235 &gvt->shadow_ctx_notifier_block[i]);
10431236 }
1237
+
10441238 return 0;
1239
+
10451240 err:
10461241 intel_gvt_clean_workload_scheduler(gvt);
1047
- kfree(param);
1048
- param = NULL;
10491242 return ret;
1243
+}
1244
+
1245
+static void
1246
+i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s,
1247
+ struct i915_ppgtt *ppgtt)
1248
+{
1249
+ int i;
1250
+
1251
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
1252
+ set_dma_address(ppgtt->pd, s->i915_context_pml4);
1253
+ } else {
1254
+ for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1255
+ struct i915_page_directory * const pd =
1256
+ i915_pd_entry(ppgtt->pd, i);
1257
+
1258
+ set_dma_address(pd, s->i915_context_pdps[i]);
1259
+ }
1260
+ }
10501261 }
10511262
10521263 /**
....@@ -1059,9 +1270,15 @@
10591270 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
10601271 {
10611272 struct intel_vgpu_submission *s = &vgpu->submission;
1273
+ struct intel_engine_cs *engine;
1274
+ enum intel_engine_id id;
10621275
10631276 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
1064
- i915_gem_context_put(s->shadow_ctx);
1277
+
1278
+ i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
1279
+ for_each_engine(engine, vgpu->gvt->gt, id)
1280
+ intel_context_put(s->shadow[id]);
1281
+
10651282 kmem_cache_destroy(s->workloads);
10661283 }
10671284
....@@ -1075,7 +1292,7 @@
10751292 *
10761293 */
10771294 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
1078
- unsigned long engine_mask)
1295
+ intel_engine_mask_t engine_mask)
10791296 {
10801297 struct intel_vgpu_submission *s = &vgpu->submission;
10811298
....@@ -1084,6 +1301,24 @@
10841301
10851302 intel_vgpu_clean_workloads(vgpu, engine_mask);
10861303 s->ops->reset(vgpu, engine_mask);
1304
+}
1305
+
1306
+static void
1307
+i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
1308
+ struct i915_ppgtt *ppgtt)
1309
+{
1310
+ int i;
1311
+
1312
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
1313
+ s->i915_context_pml4 = px_dma(ppgtt->pd);
1314
+ } else {
1315
+ for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1316
+ struct i915_page_directory * const pd =
1317
+ i915_pd_entry(ppgtt->pd, i);
1318
+
1319
+ s->i915_context_pdps[i] = px_dma(pd);
1320
+ }
1321
+ }
10871322 }
10881323
10891324 /**
....@@ -1098,15 +1333,44 @@
10981333 */
10991334 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
11001335 {
1336
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
11011337 struct intel_vgpu_submission *s = &vgpu->submission;
1102
- enum intel_engine_id i;
11031338 struct intel_engine_cs *engine;
1339
+ struct i915_ppgtt *ppgtt;
1340
+ enum intel_engine_id i;
11041341 int ret;
11051342
1106
- s->shadow_ctx = i915_gem_context_create_gvt(
1107
- &vgpu->gvt->dev_priv->drm);
1108
- if (IS_ERR(s->shadow_ctx))
1109
- return PTR_ERR(s->shadow_ctx);
1343
+ ppgtt = i915_ppgtt_create(&i915->gt);
1344
+ if (IS_ERR(ppgtt))
1345
+ return PTR_ERR(ppgtt);
1346
+
1347
+ i915_context_ppgtt_root_save(s, ppgtt);
1348
+
1349
+ for_each_engine(engine, vgpu->gvt->gt, i) {
1350
+ struct intel_context *ce;
1351
+
1352
+ INIT_LIST_HEAD(&s->workload_q_head[i]);
1353
+ s->shadow[i] = ERR_PTR(-EINVAL);
1354
+
1355
+ ce = intel_context_create(engine);
1356
+ if (IS_ERR(ce)) {
1357
+ ret = PTR_ERR(ce);
1358
+ goto out_shadow_ctx;
1359
+ }
1360
+
1361
+ i915_vm_put(ce->vm);
1362
+ ce->vm = i915_vm_get(&ppgtt->vm);
1363
+ intel_context_set_single_submission(ce);
1364
+
1365
+ /* Max ring buffer size */
1366
+ if (!intel_uc_wants_guc_submission(&engine->gt->uc)) {
1367
+ const unsigned int ring_size = 512 * SZ_4K;
1368
+
1369
+ ce->ring = __intel_context_ring_size(ring_size);
1370
+ }
1371
+
1372
+ s->shadow[i] = ce;
1373
+ }
11101374
11111375 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
11121376
....@@ -1122,22 +1386,30 @@
11221386 goto out_shadow_ctx;
11231387 }
11241388
1125
- for_each_engine(engine, vgpu->gvt->dev_priv, i)
1126
- INIT_LIST_HEAD(&s->workload_q_head[i]);
1127
-
11281389 atomic_set(&s->running_workload_num, 0);
11291390 bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
11301391
1392
+ memset(s->last_ctx, 0, sizeof(s->last_ctx));
1393
+
1394
+ i915_vm_put(&ppgtt->vm);
11311395 return 0;
11321396
11331397 out_shadow_ctx:
1134
- i915_gem_context_put(s->shadow_ctx);
1398
+ i915_context_ppgtt_root_restore(s, ppgtt);
1399
+ for_each_engine(engine, vgpu->gvt->gt, i) {
1400
+ if (IS_ERR(s->shadow[i]))
1401
+ break;
1402
+
1403
+ intel_context_put(s->shadow[i]);
1404
+ }
1405
+ i915_vm_put(&ppgtt->vm);
11351406 return ret;
11361407 }
11371408
11381409 /**
11391410 * intel_vgpu_select_submission_ops - select virtual submission interface
11401411 * @vgpu: a vGPU
1412
+ * @engine_mask: either ALL_ENGINES or target engine mask
11411413 * @interface: expected vGPU virtual submission interface
11421414 *
11431415 * This function is called when guest configures submission interface.
....@@ -1147,9 +1419,10 @@
11471419 *
11481420 */
11491421 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
1150
- unsigned long engine_mask,
1422
+ intel_engine_mask_t engine_mask,
11511423 unsigned int interface)
11521424 {
1425
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
11531426 struct intel_vgpu_submission *s = &vgpu->submission;
11541427 const struct intel_vgpu_submission_ops *ops[] = {
11551428 [INTEL_VGPU_EXECLIST_SUBMISSION] =
....@@ -1157,10 +1430,11 @@
11571430 };
11581431 int ret;
11591432
1160
- if (WARN_ON(interface >= ARRAY_SIZE(ops)))
1433
+ if (drm_WARN_ON(&i915->drm, interface >= ARRAY_SIZE(ops)))
11611434 return -EINVAL;
11621435
1163
- if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
1436
+ if (drm_WARN_ON(&i915->drm,
1437
+ interface == 0 && engine_mask != ALL_ENGINES))
11641438 return -EINVAL;
11651439
11661440 if (s->active)
....@@ -1190,7 +1464,7 @@
11901464
11911465 /**
11921466 * intel_vgpu_destroy_workload - destroy a vGPU workload
1193
- * @vgpu: a vGPU
1467
+ * @workload: workload to destroy
11941468 *
11951469 * This function is called when destroy a vGPU workload.
11961470 *
....@@ -1199,6 +1473,20 @@
11991473 {
12001474 struct intel_vgpu_submission *s = &workload->vgpu->submission;
12011475
1476
+ intel_context_unpin(s->shadow[workload->engine->id]);
1477
+ release_shadow_batch_buffer(workload);
1478
+ release_shadow_wa_ctx(&workload->wa_ctx);
1479
+
1480
+ if (!list_empty(&workload->lri_shadow_mm)) {
1481
+ struct intel_vgpu_mm *m, *mm;
1482
+ list_for_each_entry_safe(m, mm, &workload->lri_shadow_mm,
1483
+ ppgtt_mm.link) {
1484
+ list_del(&m->ppgtt_mm.link);
1485
+ intel_vgpu_mm_put(m);
1486
+ }
1487
+ }
1488
+
1489
+ GEM_BUG_ON(!list_empty(&workload->lri_shadow_mm));
12021490 if (workload->shadow_mm)
12031491 intel_vgpu_mm_put(workload->shadow_mm);
12041492
....@@ -1217,6 +1505,7 @@
12171505
12181506 INIT_LIST_HEAD(&workload->list);
12191507 INIT_LIST_HEAD(&workload->shadow_bb);
1508
+ INIT_LIST_HEAD(&workload->lri_shadow_mm);
12201509
12211510 init_waitqueue_head(&workload->shadow_ctx_status_wq);
12221511 atomic_set(&workload->shadow_ctx_active, 0);
....@@ -1248,7 +1537,7 @@
12481537 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
12491538 struct intel_vgpu_mm *mm;
12501539 struct intel_vgpu *vgpu = workload->vgpu;
1251
- intel_gvt_gtt_type_t root_entry_type;
1540
+ enum intel_gvt_gtt_type root_entry_type;
12521541 u64 pdps[GVT_RING_CTX_NR_PDPS];
12531542
12541543 switch (desc->addressing_mode) {
....@@ -1279,6 +1568,7 @@
12791568 /**
12801569 * intel_vgpu_create_workload - create a vGPU workload
12811570 * @vgpu: a vGPU
1571
+ * @engine: the engine
12821572 * @desc: a guest context descriptor
12831573 *
12841574 * This function is called when creating a vGPU workload.
....@@ -1289,16 +1579,17 @@
12891579 *
12901580 */
12911581 struct intel_vgpu_workload *
1292
-intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1582
+intel_vgpu_create_workload(struct intel_vgpu *vgpu,
1583
+ const struct intel_engine_cs *engine,
12931584 struct execlist_ctx_descriptor_format *desc)
12941585 {
12951586 struct intel_vgpu_submission *s = &vgpu->submission;
1296
- struct list_head *q = workload_q_head(vgpu, ring_id);
1587
+ struct list_head *q = workload_q_head(vgpu, engine);
12971588 struct intel_vgpu_workload *last_workload = NULL;
12981589 struct intel_vgpu_workload *workload = NULL;
1299
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
13001590 u64 ring_context_gpa;
13011591 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
1592
+ u32 guest_head;
13021593 int ret;
13031594
13041595 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
....@@ -1314,16 +1605,18 @@
13141605 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
13151606 RING_CTX_OFF(ring_tail.val), &tail, 4);
13161607
1608
+ guest_head = head;
1609
+
13171610 head &= RB_HEAD_OFF_MASK;
13181611 tail &= RB_TAIL_OFF_MASK;
13191612
13201613 list_for_each_entry_reverse(last_workload, q, list) {
13211614
13221615 if (same_context(&last_workload->ctx_desc, desc)) {
1323
- gvt_dbg_el("ring id %d cur workload == last\n",
1324
- ring_id);
1616
+ gvt_dbg_el("ring %s cur workload == last\n",
1617
+ engine->name);
13251618 gvt_dbg_el("ctx head %x real head %lx\n", head,
1326
- last_workload->rb_tail);
1619
+ last_workload->rb_tail);
13271620 /*
13281621 * cannot use guest context head pointer here,
13291622 * as it might not be updated at this time
....@@ -1333,7 +1626,7 @@
13331626 }
13341627 }
13351628
1336
- gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
1629
+ gvt_dbg_el("ring %s begin a new workload\n", engine->name);
13371630
13381631 /* record some ring buffer register values for scan and shadow */
13391632 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
....@@ -1343,19 +1636,26 @@
13431636 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
13441637 RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
13451638
1639
+ if (!intel_gvt_ggtt_validate_range(vgpu, start,
1640
+ _RING_CTL_BUF_SIZE(ctl))) {
1641
+ gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start);
1642
+ return ERR_PTR(-EINVAL);
1643
+ }
1644
+
13461645 workload = alloc_workload(vgpu);
13471646 if (IS_ERR(workload))
13481647 return workload;
13491648
1350
- workload->ring_id = ring_id;
1649
+ workload->engine = engine;
13511650 workload->ctx_desc = *desc;
13521651 workload->ring_context_gpa = ring_context_gpa;
13531652 workload->rb_head = head;
1653
+ workload->guest_rb_head = guest_head;
13541654 workload->rb_tail = tail;
13551655 workload->rb_start = start;
13561656 workload->rb_ctl = ctl;
13571657
1358
- if (ring_id == RCS) {
1658
+ if (engine->id == RCS0) {
13591659 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
13601660 RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
13611661 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
....@@ -1366,13 +1666,35 @@
13661666 workload->wa_ctx.indirect_ctx.size =
13671667 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
13681668 CACHELINE_BYTES;
1669
+
1670
+ if (workload->wa_ctx.indirect_ctx.size != 0) {
1671
+ if (!intel_gvt_ggtt_validate_range(vgpu,
1672
+ workload->wa_ctx.indirect_ctx.guest_gma,
1673
+ workload->wa_ctx.indirect_ctx.size)) {
1674
+ gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
1675
+ workload->wa_ctx.indirect_ctx.guest_gma);
1676
+ kmem_cache_free(s->workloads, workload);
1677
+ return ERR_PTR(-EINVAL);
1678
+ }
1679
+ }
1680
+
13691681 workload->wa_ctx.per_ctx.guest_gma =
13701682 per_ctx & PER_CTX_ADDR_MASK;
13711683 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
1684
+ if (workload->wa_ctx.per_ctx.valid) {
1685
+ if (!intel_gvt_ggtt_validate_range(vgpu,
1686
+ workload->wa_ctx.per_ctx.guest_gma,
1687
+ CACHELINE_BYTES)) {
1688
+ gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
1689
+ workload->wa_ctx.per_ctx.guest_gma);
1690
+ kmem_cache_free(s->workloads, workload);
1691
+ return ERR_PTR(-EINVAL);
1692
+ }
1693
+ }
13721694 }
13731695
1374
- gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
1375
- workload, ring_id, head, tail, start, ctl);
1696
+ gvt_dbg_el("workload %p ring %s head %x tail %x start %x ctl %x\n",
1697
+ workload, engine->name, head, tail, start, ctl);
13761698
13771699 ret = prepare_mm(workload);
13781700 if (ret) {
....@@ -1383,17 +1705,22 @@
13831705 /* Only scan and shadow the first workload in the queue
13841706 * as there is only one pre-allocated buf-obj for shadow.
13851707 */
1386
- if (list_empty(workload_q_head(vgpu, ring_id))) {
1387
- intel_runtime_pm_get(dev_priv);
1388
- mutex_lock(&dev_priv->drm.struct_mutex);
1389
- ret = intel_gvt_scan_and_shadow_workload(workload);
1390
- mutex_unlock(&dev_priv->drm.struct_mutex);
1391
- intel_runtime_pm_put(dev_priv);
1708
+ if (list_empty(q)) {
1709
+ intel_wakeref_t wakeref;
1710
+
1711
+ with_intel_runtime_pm(engine->gt->uncore->rpm, wakeref)
1712
+ ret = intel_gvt_scan_and_shadow_workload(workload);
13921713 }
13931714
13941715 if (ret) {
13951716 if (vgpu_is_vm_unhealthy(ret))
13961717 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1718
+ intel_vgpu_destroy_workload(workload);
1719
+ return ERR_PTR(ret);
1720
+ }
1721
+
1722
+ ret = intel_context_pin(s->shadow[engine->id]);
1723
+ if (ret) {
13971724 intel_vgpu_destroy_workload(workload);
13981725 return ERR_PTR(ret);
13991726 }
....@@ -1408,7 +1735,7 @@
14081735 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
14091736 {
14101737 list_add_tail(&workload->list,
1411
- workload_q_head(workload->vgpu, workload->ring_id));
1738
+ workload_q_head(workload->vgpu, workload->engine));
14121739 intel_gvt_kick_schedule(workload->vgpu->gvt);
1413
- wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
1740
+ wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]);
14141741 }