.. | .. |
---|
39 | 39 | #define _EL_OFFSET_STATUS_BUF 0x370 |
---|
40 | 40 | #define _EL_OFFSET_STATUS_PTR 0x3A0 |
---|
41 | 41 | |
---|
42 | | -#define execlist_ring_mmio(gvt, ring_id, offset) \ |
---|
43 | | - (gvt->dev_priv->engine[ring_id]->mmio_base + (offset)) |
---|
| 42 | +#define execlist_ring_mmio(e, offset) ((e)->mmio_base + (offset)) |
---|
44 | 43 | |
---|
45 | 44 | #define valid_context(ctx) ((ctx)->valid) |
---|
46 | 45 | #define same_context(a, b) (((a)->context_id == (b)->context_id) && \ |
---|
47 | 46 | ((a)->lrca == (b)->lrca)) |
---|
48 | 47 | |
---|
49 | 48 | static int context_switch_events[] = { |
---|
50 | | - [RCS] = RCS_AS_CONTEXT_SWITCH, |
---|
51 | | - [BCS] = BCS_AS_CONTEXT_SWITCH, |
---|
52 | | - [VCS] = VCS_AS_CONTEXT_SWITCH, |
---|
53 | | - [VCS2] = VCS2_AS_CONTEXT_SWITCH, |
---|
54 | | - [VECS] = VECS_AS_CONTEXT_SWITCH, |
---|
| 49 | + [RCS0] = RCS_AS_CONTEXT_SWITCH, |
---|
| 50 | + [BCS0] = BCS_AS_CONTEXT_SWITCH, |
---|
| 51 | + [VCS0] = VCS_AS_CONTEXT_SWITCH, |
---|
| 52 | + [VCS1] = VCS2_AS_CONTEXT_SWITCH, |
---|
| 53 | + [VECS0] = VECS_AS_CONTEXT_SWITCH, |
---|
55 | 54 | }; |
---|
56 | 55 | |
---|
57 | | -static int ring_id_to_context_switch_event(int ring_id) |
---|
| 56 | +static int to_context_switch_event(const struct intel_engine_cs *engine) |
---|
58 | 57 | { |
---|
59 | | - if (WARN_ON(ring_id < RCS || |
---|
60 | | - ring_id >= ARRAY_SIZE(context_switch_events))) |
---|
| 58 | + if (WARN_ON(engine->id >= ARRAY_SIZE(context_switch_events))) |
---|
61 | 59 | return -EINVAL; |
---|
62 | 60 | |
---|
63 | | - return context_switch_events[ring_id]; |
---|
| 61 | + return context_switch_events[engine->id]; |
---|
64 | 62 | } |
---|
65 | 63 | |
---|
66 | 64 | static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist) |
---|
.. | .. |
---|
94 | 92 | struct execlist_ctx_descriptor_format *desc = execlist->running_context; |
---|
95 | 93 | struct intel_vgpu *vgpu = execlist->vgpu; |
---|
96 | 94 | struct execlist_status_format status; |
---|
97 | | - int ring_id = execlist->ring_id; |
---|
98 | | - u32 status_reg = execlist_ring_mmio(vgpu->gvt, |
---|
99 | | - ring_id, _EL_OFFSET_STATUS); |
---|
| 95 | + u32 status_reg = |
---|
| 96 | + execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS); |
---|
100 | 97 | |
---|
101 | 98 | status.ldw = vgpu_vreg(vgpu, status_reg); |
---|
102 | 99 | status.udw = vgpu_vreg(vgpu, status_reg + 4); |
---|
.. | .. |
---|
125 | 122 | } |
---|
126 | 123 | |
---|
127 | 124 | static void emulate_csb_update(struct intel_vgpu_execlist *execlist, |
---|
128 | | - struct execlist_context_status_format *status, |
---|
129 | | - bool trigger_interrupt_later) |
---|
| 125 | + struct execlist_context_status_format *status, |
---|
| 126 | + bool trigger_interrupt_later) |
---|
130 | 127 | { |
---|
131 | 128 | struct intel_vgpu *vgpu = execlist->vgpu; |
---|
132 | | - int ring_id = execlist->ring_id; |
---|
133 | 129 | struct execlist_context_status_pointer_format ctx_status_ptr; |
---|
134 | 130 | u32 write_pointer; |
---|
135 | 131 | u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset; |
---|
136 | 132 | unsigned long hwsp_gpa; |
---|
137 | | - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
---|
138 | 133 | |
---|
139 | | - ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, |
---|
140 | | - _EL_OFFSET_STATUS_PTR); |
---|
141 | | - ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id, |
---|
142 | | - _EL_OFFSET_STATUS_BUF); |
---|
| 134 | + ctx_status_ptr_reg = |
---|
| 135 | + execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_PTR); |
---|
| 136 | + ctx_status_buf_reg = |
---|
| 137 | + execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_BUF); |
---|
143 | 138 | |
---|
144 | 139 | ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); |
---|
145 | 140 | |
---|
.. | .. |
---|
162 | 157 | |
---|
163 | 158 | /* Update the CSB and CSB write pointer in HWSP */ |
---|
164 | 159 | hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, |
---|
165 | | - vgpu->hws_pga[ring_id]); |
---|
| 160 | + vgpu->hws_pga[execlist->engine->id]); |
---|
166 | 161 | if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) { |
---|
167 | 162 | intel_gvt_hypervisor_write_gpa(vgpu, |
---|
168 | | - hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + |
---|
169 | | - write_pointer * 8, |
---|
170 | | - status, 8); |
---|
| 163 | + hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8, |
---|
| 164 | + status, 8); |
---|
171 | 165 | intel_gvt_hypervisor_write_gpa(vgpu, |
---|
172 | | - hwsp_gpa + |
---|
173 | | - intel_hws_csb_write_index(dev_priv) * 4, |
---|
174 | | - &write_pointer, 4); |
---|
| 166 | + hwsp_gpa + intel_hws_csb_write_index(execlist->engine->i915) * 4, |
---|
| 167 | + &write_pointer, 4); |
---|
175 | 168 | } |
---|
176 | 169 | |
---|
177 | 170 | gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n", |
---|
178 | | - vgpu->id, write_pointer, offset, status->ldw, status->udw); |
---|
| 171 | + vgpu->id, write_pointer, offset, status->ldw, status->udw); |
---|
179 | 172 | |
---|
180 | 173 | if (trigger_interrupt_later) |
---|
181 | 174 | return; |
---|
182 | 175 | |
---|
183 | 176 | intel_vgpu_trigger_virtual_event(vgpu, |
---|
184 | | - ring_id_to_context_switch_event(execlist->ring_id)); |
---|
| 177 | + to_context_switch_event(execlist->engine)); |
---|
185 | 178 | } |
---|
186 | 179 | |
---|
187 | 180 | static int emulate_execlist_ctx_schedule_out( |
---|
.. | .. |
---|
262 | 255 | struct intel_vgpu_execlist *execlist) |
---|
263 | 256 | { |
---|
264 | 257 | struct intel_vgpu *vgpu = execlist->vgpu; |
---|
265 | | - int ring_id = execlist->ring_id; |
---|
266 | | - u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id, |
---|
267 | | - _EL_OFFSET_STATUS); |
---|
| 258 | + u32 status_reg = |
---|
| 259 | + execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS); |
---|
268 | 260 | struct execlist_status_format status; |
---|
269 | 261 | |
---|
270 | 262 | status.ldw = vgpu_vreg(vgpu, status_reg); |
---|
.. | .. |
---|
380 | 372 | struct intel_vgpu *vgpu = workload->vgpu; |
---|
381 | 373 | struct intel_vgpu_submission *s = &vgpu->submission; |
---|
382 | 374 | struct execlist_ctx_descriptor_format ctx[2]; |
---|
383 | | - int ring_id = workload->ring_id; |
---|
384 | 375 | int ret; |
---|
385 | 376 | |
---|
386 | 377 | if (!workload->emulate_schedule_in) |
---|
.. | .. |
---|
389 | 380 | ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0); |
---|
390 | 381 | ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1); |
---|
391 | 382 | |
---|
392 | | - ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx); |
---|
| 383 | + ret = emulate_execlist_schedule_in(&s->execlist[workload->engine->id], |
---|
| 384 | + ctx); |
---|
393 | 385 | if (ret) { |
---|
394 | 386 | gvt_vgpu_err("fail to emulate execlist schedule in\n"); |
---|
395 | 387 | return ret; |
---|
.. | .. |
---|
400 | 392 | static int complete_execlist_workload(struct intel_vgpu_workload *workload) |
---|
401 | 393 | { |
---|
402 | 394 | struct intel_vgpu *vgpu = workload->vgpu; |
---|
403 | | - int ring_id = workload->ring_id; |
---|
404 | 395 | struct intel_vgpu_submission *s = &vgpu->submission; |
---|
405 | | - struct intel_vgpu_execlist *execlist = &s->execlist[ring_id]; |
---|
| 396 | + struct intel_vgpu_execlist *execlist = |
---|
| 397 | + &s->execlist[workload->engine->id]; |
---|
406 | 398 | struct intel_vgpu_workload *next_workload; |
---|
407 | | - struct list_head *next = workload_q_head(vgpu, ring_id)->next; |
---|
| 399 | + struct list_head *next = workload_q_head(vgpu, workload->engine)->next; |
---|
408 | 400 | bool lite_restore = false; |
---|
409 | 401 | int ret = 0; |
---|
410 | 402 | |
---|
411 | | - gvt_dbg_el("complete workload %p status %d\n", workload, |
---|
412 | | - workload->status); |
---|
| 403 | + gvt_dbg_el("complete workload %p status %d\n", |
---|
| 404 | + workload, workload->status); |
---|
413 | 405 | |
---|
414 | | - if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) |
---|
| 406 | + if (workload->status || vgpu->resetting_eng & workload->engine->mask) |
---|
415 | 407 | goto out; |
---|
416 | 408 | |
---|
417 | | - if (!list_empty(workload_q_head(vgpu, ring_id))) { |
---|
| 409 | + if (!list_empty(workload_q_head(vgpu, workload->engine))) { |
---|
418 | 410 | struct execlist_ctx_descriptor_format *this_desc, *next_desc; |
---|
419 | 411 | |
---|
420 | 412 | next_workload = container_of(next, |
---|
.. | .. |
---|
432 | 424 | |
---|
433 | 425 | ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc); |
---|
434 | 426 | out: |
---|
435 | | - intel_vgpu_unpin_mm(workload->shadow_mm); |
---|
436 | | - intel_vgpu_destroy_workload(workload); |
---|
437 | 427 | return ret; |
---|
438 | 428 | } |
---|
439 | 429 | |
---|
440 | | -static int submit_context(struct intel_vgpu *vgpu, int ring_id, |
---|
441 | | - struct execlist_ctx_descriptor_format *desc, |
---|
442 | | - bool emulate_schedule_in) |
---|
| 430 | +static int submit_context(struct intel_vgpu *vgpu, |
---|
| 431 | + const struct intel_engine_cs *engine, |
---|
| 432 | + struct execlist_ctx_descriptor_format *desc, |
---|
| 433 | + bool emulate_schedule_in) |
---|
443 | 434 | { |
---|
444 | 435 | struct intel_vgpu_submission *s = &vgpu->submission; |
---|
445 | 436 | struct intel_vgpu_workload *workload = NULL; |
---|
446 | 437 | |
---|
447 | | - workload = intel_vgpu_create_workload(vgpu, ring_id, desc); |
---|
| 438 | + workload = intel_vgpu_create_workload(vgpu, engine, desc); |
---|
448 | 439 | if (IS_ERR(workload)) |
---|
449 | 440 | return PTR_ERR(workload); |
---|
450 | 441 | |
---|
.. | .. |
---|
453 | 444 | workload->emulate_schedule_in = emulate_schedule_in; |
---|
454 | 445 | |
---|
455 | 446 | if (emulate_schedule_in) |
---|
456 | | - workload->elsp_dwords = s->execlist[ring_id].elsp_dwords; |
---|
| 447 | + workload->elsp_dwords = s->execlist[engine->id].elsp_dwords; |
---|
457 | 448 | |
---|
458 | 449 | gvt_dbg_el("workload %p emulate schedule_in %d\n", workload, |
---|
459 | | - emulate_schedule_in); |
---|
| 450 | + emulate_schedule_in); |
---|
460 | 451 | |
---|
461 | 452 | intel_vgpu_queue_workload(workload); |
---|
462 | 453 | return 0; |
---|
463 | 454 | } |
---|
464 | 455 | |
---|
465 | | -int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) |
---|
| 456 | +int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, |
---|
| 457 | + const struct intel_engine_cs *engine) |
---|
466 | 458 | { |
---|
467 | 459 | struct intel_vgpu_submission *s = &vgpu->submission; |
---|
468 | | - struct intel_vgpu_execlist *execlist = &s->execlist[ring_id]; |
---|
| 460 | + struct intel_vgpu_execlist *execlist = &s->execlist[engine->id]; |
---|
469 | 461 | struct execlist_ctx_descriptor_format *desc[2]; |
---|
470 | 462 | int i, ret; |
---|
471 | 463 | |
---|
.. | .. |
---|
490 | 482 | for (i = 0; i < ARRAY_SIZE(desc); i++) { |
---|
491 | 483 | if (!desc[i]->valid) |
---|
492 | 484 | continue; |
---|
493 | | - ret = submit_context(vgpu, ring_id, desc[i], i == 0); |
---|
| 485 | + ret = submit_context(vgpu, engine, desc[i], i == 0); |
---|
494 | 486 | if (ret) { |
---|
495 | 487 | gvt_vgpu_err("failed to submit desc %d\n", i); |
---|
496 | 488 | return ret; |
---|
.. | .. |
---|
505 | 497 | return -EINVAL; |
---|
506 | 498 | } |
---|
507 | 499 | |
---|
508 | | -static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) |
---|
| 500 | +static void init_vgpu_execlist(struct intel_vgpu *vgpu, |
---|
| 501 | + const struct intel_engine_cs *engine) |
---|
509 | 502 | { |
---|
510 | 503 | struct intel_vgpu_submission *s = &vgpu->submission; |
---|
511 | | - struct intel_vgpu_execlist *execlist = &s->execlist[ring_id]; |
---|
| 504 | + struct intel_vgpu_execlist *execlist = &s->execlist[engine->id]; |
---|
512 | 505 | struct execlist_context_status_pointer_format ctx_status_ptr; |
---|
513 | 506 | u32 ctx_status_ptr_reg; |
---|
514 | 507 | |
---|
515 | 508 | memset(execlist, 0, sizeof(*execlist)); |
---|
516 | 509 | |
---|
517 | 510 | execlist->vgpu = vgpu; |
---|
518 | | - execlist->ring_id = ring_id; |
---|
| 511 | + execlist->engine = engine; |
---|
519 | 512 | execlist->slot[0].index = 0; |
---|
520 | 513 | execlist->slot[1].index = 1; |
---|
521 | 514 | |
---|
522 | | - ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id, |
---|
523 | | - _EL_OFFSET_STATUS_PTR); |
---|
| 515 | + ctx_status_ptr_reg = execlist_ring_mmio(engine, _EL_OFFSET_STATUS_PTR); |
---|
524 | 516 | ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); |
---|
525 | 517 | ctx_status_ptr.read_ptr = 0; |
---|
526 | 518 | ctx_status_ptr.write_ptr = 0x7; |
---|
527 | 519 | vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; |
---|
528 | 520 | } |
---|
529 | 521 | |
---|
530 | | -static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask) |
---|
| 522 | +static void clean_execlist(struct intel_vgpu *vgpu, |
---|
| 523 | + intel_engine_mask_t engine_mask) |
---|
531 | 524 | { |
---|
532 | | - unsigned int tmp; |
---|
533 | | - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
---|
| 525 | + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; |
---|
534 | 526 | struct intel_engine_cs *engine; |
---|
535 | 527 | struct intel_vgpu_submission *s = &vgpu->submission; |
---|
| 528 | + intel_engine_mask_t tmp; |
---|
536 | 529 | |
---|
537 | | - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { |
---|
| 530 | + for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) { |
---|
538 | 531 | kfree(s->ring_scan_buffer[engine->id]); |
---|
539 | 532 | s->ring_scan_buffer[engine->id] = NULL; |
---|
540 | 533 | s->ring_scan_buffer_size[engine->id] = 0; |
---|
.. | .. |
---|
542 | 535 | } |
---|
543 | 536 | |
---|
544 | 537 | static void reset_execlist(struct intel_vgpu *vgpu, |
---|
545 | | - unsigned long engine_mask) |
---|
| 538 | + intel_engine_mask_t engine_mask) |
---|
546 | 539 | { |
---|
547 | | - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
---|
| 540 | + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; |
---|
548 | 541 | struct intel_engine_cs *engine; |
---|
549 | | - unsigned int tmp; |
---|
| 542 | + intel_engine_mask_t tmp; |
---|
550 | 543 | |
---|
551 | | - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) |
---|
552 | | - init_vgpu_execlist(vgpu, engine->id); |
---|
| 544 | + for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) |
---|
| 545 | + init_vgpu_execlist(vgpu, engine); |
---|
553 | 546 | } |
---|
554 | 547 | |
---|
555 | 548 | static int init_execlist(struct intel_vgpu *vgpu, |
---|
556 | | - unsigned long engine_mask) |
---|
| 549 | + intel_engine_mask_t engine_mask) |
---|
557 | 550 | { |
---|
558 | 551 | reset_execlist(vgpu, engine_mask); |
---|
559 | 552 | return 0; |
---|