forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/drivers/gpu/drm/i915/gvt/execlist.c
....@@ -39,28 +39,26 @@
3939 #define _EL_OFFSET_STATUS_BUF 0x370
4040 #define _EL_OFFSET_STATUS_PTR 0x3A0
4141
42
-#define execlist_ring_mmio(gvt, ring_id, offset) \
43
- (gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
42
+#define execlist_ring_mmio(e, offset) ((e)->mmio_base + (offset))
4443
4544 #define valid_context(ctx) ((ctx)->valid)
4645 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
4746 ((a)->lrca == (b)->lrca))
4847
4948 static int context_switch_events[] = {
50
- [RCS] = RCS_AS_CONTEXT_SWITCH,
51
- [BCS] = BCS_AS_CONTEXT_SWITCH,
52
- [VCS] = VCS_AS_CONTEXT_SWITCH,
53
- [VCS2] = VCS2_AS_CONTEXT_SWITCH,
54
- [VECS] = VECS_AS_CONTEXT_SWITCH,
49
+ [RCS0] = RCS_AS_CONTEXT_SWITCH,
50
+ [BCS0] = BCS_AS_CONTEXT_SWITCH,
51
+ [VCS0] = VCS_AS_CONTEXT_SWITCH,
52
+ [VCS1] = VCS2_AS_CONTEXT_SWITCH,
53
+ [VECS0] = VECS_AS_CONTEXT_SWITCH,
5554 };
5655
57
-static int ring_id_to_context_switch_event(int ring_id)
56
+static int to_context_switch_event(const struct intel_engine_cs *engine)
5857 {
59
- if (WARN_ON(ring_id < RCS ||
60
- ring_id >= ARRAY_SIZE(context_switch_events)))
58
+ if (WARN_ON(engine->id >= ARRAY_SIZE(context_switch_events)))
6159 return -EINVAL;
6260
63
- return context_switch_events[ring_id];
61
+ return context_switch_events[engine->id];
6462 }
6563
6664 static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
....@@ -94,9 +92,8 @@
9492 struct execlist_ctx_descriptor_format *desc = execlist->running_context;
9593 struct intel_vgpu *vgpu = execlist->vgpu;
9694 struct execlist_status_format status;
97
- int ring_id = execlist->ring_id;
98
- u32 status_reg = execlist_ring_mmio(vgpu->gvt,
99
- ring_id, _EL_OFFSET_STATUS);
95
+ u32 status_reg =
96
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS);
10097
10198 status.ldw = vgpu_vreg(vgpu, status_reg);
10299 status.udw = vgpu_vreg(vgpu, status_reg + 4);
....@@ -125,21 +122,19 @@
125122 }
126123
127124 static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
128
- struct execlist_context_status_format *status,
129
- bool trigger_interrupt_later)
125
+ struct execlist_context_status_format *status,
126
+ bool trigger_interrupt_later)
130127 {
131128 struct intel_vgpu *vgpu = execlist->vgpu;
132
- int ring_id = execlist->ring_id;
133129 struct execlist_context_status_pointer_format ctx_status_ptr;
134130 u32 write_pointer;
135131 u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
136132 unsigned long hwsp_gpa;
137
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
138133
139
- ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
140
- _EL_OFFSET_STATUS_PTR);
141
- ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
142
- _EL_OFFSET_STATUS_BUF);
134
+ ctx_status_ptr_reg =
135
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_PTR);
136
+ ctx_status_buf_reg =
137
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_BUF);
143138
144139 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
145140
....@@ -162,26 +157,24 @@
162157
163158 /* Update the CSB and CSB write pointer in HWSP */
164159 hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
165
- vgpu->hws_pga[ring_id]);
160
+ vgpu->hws_pga[execlist->engine->id]);
166161 if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
167162 intel_gvt_hypervisor_write_gpa(vgpu,
168
- hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 +
169
- write_pointer * 8,
170
- status, 8);
163
+ hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
164
+ status, 8);
171165 intel_gvt_hypervisor_write_gpa(vgpu,
172
- hwsp_gpa +
173
- intel_hws_csb_write_index(dev_priv) * 4,
174
- &write_pointer, 4);
166
+ hwsp_gpa + intel_hws_csb_write_index(execlist->engine->i915) * 4,
167
+ &write_pointer, 4);
175168 }
176169
177170 gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
178
- vgpu->id, write_pointer, offset, status->ldw, status->udw);
171
+ vgpu->id, write_pointer, offset, status->ldw, status->udw);
179172
180173 if (trigger_interrupt_later)
181174 return;
182175
183176 intel_vgpu_trigger_virtual_event(vgpu,
184
- ring_id_to_context_switch_event(execlist->ring_id));
177
+ to_context_switch_event(execlist->engine));
185178 }
186179
187180 static int emulate_execlist_ctx_schedule_out(
....@@ -262,9 +255,8 @@
262255 struct intel_vgpu_execlist *execlist)
263256 {
264257 struct intel_vgpu *vgpu = execlist->vgpu;
265
- int ring_id = execlist->ring_id;
266
- u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
267
- _EL_OFFSET_STATUS);
258
+ u32 status_reg =
259
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS);
268260 struct execlist_status_format status;
269261
270262 status.ldw = vgpu_vreg(vgpu, status_reg);
....@@ -380,7 +372,6 @@
380372 struct intel_vgpu *vgpu = workload->vgpu;
381373 struct intel_vgpu_submission *s = &vgpu->submission;
382374 struct execlist_ctx_descriptor_format ctx[2];
383
- int ring_id = workload->ring_id;
384375 int ret;
385376
386377 if (!workload->emulate_schedule_in)
....@@ -389,7 +380,8 @@
389380 ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
390381 ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
391382
392
- ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx);
383
+ ret = emulate_execlist_schedule_in(&s->execlist[workload->engine->id],
384
+ ctx);
393385 if (ret) {
394386 gvt_vgpu_err("fail to emulate execlist schedule in\n");
395387 return ret;
....@@ -400,21 +392,21 @@
400392 static int complete_execlist_workload(struct intel_vgpu_workload *workload)
401393 {
402394 struct intel_vgpu *vgpu = workload->vgpu;
403
- int ring_id = workload->ring_id;
404395 struct intel_vgpu_submission *s = &vgpu->submission;
405
- struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
396
+ struct intel_vgpu_execlist *execlist =
397
+ &s->execlist[workload->engine->id];
406398 struct intel_vgpu_workload *next_workload;
407
- struct list_head *next = workload_q_head(vgpu, ring_id)->next;
399
+ struct list_head *next = workload_q_head(vgpu, workload->engine)->next;
408400 bool lite_restore = false;
409401 int ret = 0;
410402
411
- gvt_dbg_el("complete workload %p status %d\n", workload,
412
- workload->status);
403
+ gvt_dbg_el("complete workload %p status %d\n",
404
+ workload, workload->status);
413405
414
- if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id)))
406
+ if (workload->status || vgpu->resetting_eng & workload->engine->mask)
415407 goto out;
416408
417
- if (!list_empty(workload_q_head(vgpu, ring_id))) {
409
+ if (!list_empty(workload_q_head(vgpu, workload->engine))) {
418410 struct execlist_ctx_descriptor_format *this_desc, *next_desc;
419411
420412 next_workload = container_of(next,
....@@ -432,19 +424,18 @@
432424
433425 ret = emulate_execlist_ctx_schedule_out(execlist, &workload->ctx_desc);
434426 out:
435
- intel_vgpu_unpin_mm(workload->shadow_mm);
436
- intel_vgpu_destroy_workload(workload);
437427 return ret;
438428 }
439429
440
-static int submit_context(struct intel_vgpu *vgpu, int ring_id,
441
- struct execlist_ctx_descriptor_format *desc,
442
- bool emulate_schedule_in)
430
+static int submit_context(struct intel_vgpu *vgpu,
431
+ const struct intel_engine_cs *engine,
432
+ struct execlist_ctx_descriptor_format *desc,
433
+ bool emulate_schedule_in)
443434 {
444435 struct intel_vgpu_submission *s = &vgpu->submission;
445436 struct intel_vgpu_workload *workload = NULL;
446437
447
- workload = intel_vgpu_create_workload(vgpu, ring_id, desc);
438
+ workload = intel_vgpu_create_workload(vgpu, engine, desc);
448439 if (IS_ERR(workload))
449440 return PTR_ERR(workload);
450441
....@@ -453,19 +444,20 @@
453444 workload->emulate_schedule_in = emulate_schedule_in;
454445
455446 if (emulate_schedule_in)
456
- workload->elsp_dwords = s->execlist[ring_id].elsp_dwords;
447
+ workload->elsp_dwords = s->execlist[engine->id].elsp_dwords;
457448
458449 gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
459
- emulate_schedule_in);
450
+ emulate_schedule_in);
460451
461452 intel_vgpu_queue_workload(workload);
462453 return 0;
463454 }
464455
465
-int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
456
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
457
+ const struct intel_engine_cs *engine)
466458 {
467459 struct intel_vgpu_submission *s = &vgpu->submission;
468
- struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
460
+ struct intel_vgpu_execlist *execlist = &s->execlist[engine->id];
469461 struct execlist_ctx_descriptor_format *desc[2];
470462 int i, ret;
471463
....@@ -490,7 +482,7 @@
490482 for (i = 0; i < ARRAY_SIZE(desc); i++) {
491483 if (!desc[i]->valid)
492484 continue;
493
- ret = submit_context(vgpu, ring_id, desc[i], i == 0);
485
+ ret = submit_context(vgpu, engine, desc[i], i == 0);
494486 if (ret) {
495487 gvt_vgpu_err("failed to submit desc %d\n", i);
496488 return ret;
....@@ -505,36 +497,37 @@
505497 return -EINVAL;
506498 }
507499
508
-static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
500
+static void init_vgpu_execlist(struct intel_vgpu *vgpu,
501
+ const struct intel_engine_cs *engine)
509502 {
510503 struct intel_vgpu_submission *s = &vgpu->submission;
511
- struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
504
+ struct intel_vgpu_execlist *execlist = &s->execlist[engine->id];
512505 struct execlist_context_status_pointer_format ctx_status_ptr;
513506 u32 ctx_status_ptr_reg;
514507
515508 memset(execlist, 0, sizeof(*execlist));
516509
517510 execlist->vgpu = vgpu;
518
- execlist->ring_id = ring_id;
511
+ execlist->engine = engine;
519512 execlist->slot[0].index = 0;
520513 execlist->slot[1].index = 1;
521514
522
- ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
523
- _EL_OFFSET_STATUS_PTR);
515
+ ctx_status_ptr_reg = execlist_ring_mmio(engine, _EL_OFFSET_STATUS_PTR);
524516 ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
525517 ctx_status_ptr.read_ptr = 0;
526518 ctx_status_ptr.write_ptr = 0x7;
527519 vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
528520 }
529521
530
-static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
522
+static void clean_execlist(struct intel_vgpu *vgpu,
523
+ intel_engine_mask_t engine_mask)
531524 {
532
- unsigned int tmp;
533
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
525
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
534526 struct intel_engine_cs *engine;
535527 struct intel_vgpu_submission *s = &vgpu->submission;
528
+ intel_engine_mask_t tmp;
536529
537
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
530
+ for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) {
538531 kfree(s->ring_scan_buffer[engine->id]);
539532 s->ring_scan_buffer[engine->id] = NULL;
540533 s->ring_scan_buffer_size[engine->id] = 0;
....@@ -542,18 +535,18 @@
542535 }
543536
544537 static void reset_execlist(struct intel_vgpu *vgpu,
545
- unsigned long engine_mask)
538
+ intel_engine_mask_t engine_mask)
546539 {
547
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
540
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
548541 struct intel_engine_cs *engine;
549
- unsigned int tmp;
542
+ intel_engine_mask_t tmp;
550543
551
- for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
552
- init_vgpu_execlist(vgpu, engine->id);
544
+ for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
545
+ init_vgpu_execlist(vgpu, engine);
553546 }
554547
555548 static int init_execlist(struct intel_vgpu *vgpu,
556
- unsigned long engine_mask)
549
+ intel_engine_mask_t engine_mask)
557550 {
558551 reset_execlist(vgpu, engine_mask);
559552 return 0;