forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/gpu/drm/i915/gvt/scheduler.h
....@@ -61,7 +61,7 @@
6161 unsigned long guest_gma;
6262 unsigned long shadow_gma;
6363 void *shadow_va;
64
- uint32_t size;
64
+ u32 size;
6565 };
6666
6767 #define PER_CTX_ADDR_MASK 0xfffff000
....@@ -79,13 +79,15 @@
7979
8080 struct intel_vgpu_workload {
8181 struct intel_vgpu *vgpu;
82
- int ring_id;
82
+ const struct intel_engine_cs *engine;
8383 struct i915_request *req;
8484 /* if this workload has been dispatched to i915? */
8585 bool dispatched;
86
+ bool shadow; /* if workload has done shadow of guest request */
8687 int status;
8788
8889 struct intel_vgpu_mm *shadow_mm;
90
+ struct list_head lri_shadow_mm; /* For PPGTT load cmd */
8991
9092 /* different submission model may need different handler */
9193 int (*prepare)(struct intel_vgpu_workload *);
....@@ -99,6 +101,7 @@
99101 struct execlist_ctx_descriptor_format ctx_desc;
100102 struct execlist_ring_context *ring_context;
101103 unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
104
+ unsigned long guest_rb_head;
102105 bool restore_inhibit;
103106 struct intel_vgpu_elsp_dwords elsp_dwords;
104107 bool emulate_schedule_in;
....@@ -121,14 +124,12 @@
121124 struct i915_vma *vma;
122125 void *va;
123126 u32 *bb_start_cmd_va;
124
- unsigned int clflush;
125
- bool accessing;
126127 unsigned long bb_offset;
127128 bool ppgtt;
128129 };
129130
130
-#define workload_q_head(vgpu, ring_id) \
131
- (&(vgpu->submission.workload_q_head[ring_id]))
131
+#define workload_q_head(vgpu, e) \
132
+ (&(vgpu)->submission.workload_q_head[(e)->id])
132133
133134 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
134135
....@@ -141,24 +142,25 @@
141142 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
142143
143144 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
144
- unsigned long engine_mask);
145
+ intel_engine_mask_t engine_mask);
145146
146147 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
147148
148149 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
149
- unsigned long engine_mask,
150
+ intel_engine_mask_t engine_mask,
150151 unsigned int interface);
151152
152153 extern const struct intel_vgpu_submission_ops
153154 intel_vgpu_execlist_submission_ops;
154155
155156 struct intel_vgpu_workload *
156
-intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
157
+intel_vgpu_create_workload(struct intel_vgpu *vgpu,
158
+ const struct intel_engine_cs *engine,
157159 struct execlist_ctx_descriptor_format *desc);
158160
159161 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
160162
161163 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
162
- unsigned long engine_mask);
164
+ intel_engine_mask_t engine_mask);
163165
164166 #endif