.. | .. |
---|
61 | 61 | unsigned long guest_gma; |
---|
62 | 62 | unsigned long shadow_gma; |
---|
63 | 63 | void *shadow_va; |
---|
64 | | - uint32_t size; |
---|
| 64 | + u32 size; |
---|
65 | 65 | }; |
---|
66 | 66 | |
---|
67 | 67 | #define PER_CTX_ADDR_MASK 0xfffff000 |
---|
.. | .. |
---|
79 | 79 | |
---|
80 | 80 | struct intel_vgpu_workload { |
---|
81 | 81 | struct intel_vgpu *vgpu; |
---|
82 | | - int ring_id; |
---|
| 82 | + const struct intel_engine_cs *engine; |
---|
83 | 83 | struct i915_request *req; |
---|
84 | 84 | /* if this workload has been dispatched to i915? */ |
---|
85 | 85 | bool dispatched; |
---|
| 86 | + bool shadow; /* if workload has done shadow of guest request */ |
---|
86 | 87 | int status; |
---|
87 | 88 | |
---|
88 | 89 | struct intel_vgpu_mm *shadow_mm; |
---|
| 90 | + struct list_head lri_shadow_mm; /* For PPGTT load cmd */ |
---|
89 | 91 | |
---|
90 | 92 | /* different submission model may need different handler */ |
---|
91 | 93 | int (*prepare)(struct intel_vgpu_workload *); |
---|
.. | .. |
---|
99 | 101 | struct execlist_ctx_descriptor_format ctx_desc; |
---|
100 | 102 | struct execlist_ring_context *ring_context; |
---|
101 | 103 | unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len; |
---|
| 104 | + unsigned long guest_rb_head; |
---|
102 | 105 | bool restore_inhibit; |
---|
103 | 106 | struct intel_vgpu_elsp_dwords elsp_dwords; |
---|
104 | 107 | bool emulate_schedule_in; |
---|
.. | .. |
---|
121 | 124 | struct i915_vma *vma; |
---|
122 | 125 | void *va; |
---|
123 | 126 | u32 *bb_start_cmd_va; |
---|
124 | | - unsigned int clflush; |
---|
125 | | - bool accessing; |
---|
126 | 127 | unsigned long bb_offset; |
---|
127 | 128 | bool ppgtt; |
---|
128 | 129 | }; |
---|
129 | 130 | |
---|
130 | | -#define workload_q_head(vgpu, ring_id) \ |
---|
131 | | - (&(vgpu->submission.workload_q_head[ring_id])) |
---|
| 131 | +#define workload_q_head(vgpu, e) \ |
---|
| 132 | + (&(vgpu)->submission.workload_q_head[(e)->id]) |
---|
132 | 133 | |
---|
133 | 134 | void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload); |
---|
134 | 135 | |
---|
.. | .. |
---|
141 | 142 | int intel_vgpu_setup_submission(struct intel_vgpu *vgpu); |
---|
142 | 143 | |
---|
143 | 144 | void intel_vgpu_reset_submission(struct intel_vgpu *vgpu, |
---|
144 | | - unsigned long engine_mask); |
---|
| 145 | + intel_engine_mask_t engine_mask); |
---|
145 | 146 | |
---|
146 | 147 | void intel_vgpu_clean_submission(struct intel_vgpu *vgpu); |
---|
147 | 148 | |
---|
148 | 149 | int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu, |
---|
149 | | - unsigned long engine_mask, |
---|
| 150 | + intel_engine_mask_t engine_mask, |
---|
150 | 151 | unsigned int interface); |
---|
151 | 152 | |
---|
152 | 153 | extern const struct intel_vgpu_submission_ops |
---|
153 | 154 | intel_vgpu_execlist_submission_ops; |
---|
154 | 155 | |
---|
155 | 156 | struct intel_vgpu_workload * |
---|
156 | | -intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, |
---|
| 157 | +intel_vgpu_create_workload(struct intel_vgpu *vgpu, |
---|
| 158 | + const struct intel_engine_cs *engine, |
---|
157 | 159 | struct execlist_ctx_descriptor_format *desc); |
---|
158 | 160 | |
---|
159 | 161 | void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload); |
---|
160 | 162 | |
---|
161 | 163 | void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, |
---|
162 | | - unsigned long engine_mask); |
---|
| 164 | + intel_engine_mask_t engine_mask); |
---|
163 | 165 | |
---|
164 | 166 | #endif |
---|