From 1543e317f1da31b75942316931e8f491a8920811 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Thu, 04 Jan 2024 10:08:02 +0000
Subject: [PATCH] disable FB
---
kernel/drivers/gpu/drm/i915/gvt/cmd_parser.c | 613 ++++++++++++++++++++++++++++++++++---------------------
1 files changed, 380 insertions(+), 233 deletions(-)
diff --git a/kernel/drivers/gpu/drm/i915/gvt/cmd_parser.c b/kernel/drivers/gpu/drm/i915/gvt/cmd_parser.c
index be15289..16b582c 100644
--- a/kernel/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/kernel/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -35,7 +35,9 @@
*/
#include <linux/slab.h>
+
#include "i915_drv.h"
+#include "gt/intel_ring.h"
#include "gvt.h"
#include "i915_pvinfo.h"
#include "trace.h"
@@ -55,10 +57,10 @@
int low;
};
struct decode_info {
- char *name;
+ const char *name;
int op_len;
int nr_sub_op;
- struct sub_op_bits *sub_op;
+ const struct sub_op_bits *sub_op;
};
#define MAX_CMD_BUDGET 0x7fffffff
@@ -162,6 +164,7 @@
#define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01)
#define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02)
#define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04)
+#define OP_SWTESS_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x03)
#define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B)
@@ -374,35 +377,51 @@
#define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
#define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
+#define DWORD_FIELD(dword, end, start) \
+ FIELD_GET(GENMASK(end, start), cmd_val(s, dword))
+
+#define OP_LENGTH_BIAS 2
+#define CMD_LEN(value) (value + OP_LENGTH_BIAS)
+
+static int gvt_check_valid_cmd_length(int len, int valid_len)
+{
+ if (valid_len != len) {
+ gvt_err("len is not valid: len=%u valid_len=%u\n",
+ len, valid_len);
+ return -EFAULT;
+ }
+ return 0;
+}
+
struct cmd_info {
- char *name;
+ const char *name;
u32 opcode;
-#define F_LEN_MASK (1U<<0)
+#define F_LEN_MASK 3U
#define F_LEN_CONST 1U
#define F_LEN_VAR 0U
+/* value is const although LEN maybe variable */
+#define F_LEN_VAR_FIXED (1<<1)
/*
* command has its own ip advance logic
* e.g. MI_BATCH_START, MI_BATCH_END
*/
-#define F_IP_ADVANCE_CUSTOM (1<<1)
-
-#define F_POST_HANDLE (1<<2)
+#define F_IP_ADVANCE_CUSTOM (1<<2)
u32 flag;
-#define R_RCS (1 << RCS)
-#define R_VCS1 (1 << VCS)
-#define R_VCS2 (1 << VCS2)
+#define R_RCS BIT(RCS0)
+#define R_VCS1 BIT(VCS0)
+#define R_VCS2 BIT(VCS1)
#define R_VCS (R_VCS1 | R_VCS2)
-#define R_BCS (1 << BCS)
-#define R_VECS (1 << VECS)
+#define R_BCS BIT(BCS0)
+#define R_VECS BIT(VECS0)
#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
/* rings that support this cmd: BLT/RCS/VCS/VECS */
- uint16_t rings;
+ u16 rings;
/* devices that support this cmd: SNB/IVB/HSW/... */
- uint16_t devices;
+ u16 devices;
/* which DWords are address that need fix up.
* bit 0 means a 32-bit non address operand in command
@@ -412,20 +431,23 @@
* No matter the address length, each address only takes
* one bit in the bitmap.
*/
- uint16_t addr_bitmap;
+ u16 addr_bitmap;
/* flag == F_LEN_CONST : command length
* flag == F_LEN_VAR : length bias bits
* Note: length is in DWord
*/
- uint8_t len;
+ u32 len;
parser_cmd_handler handler;
+
+ /* valid length in DWord */
+ u32 valid_len;
};
struct cmd_entry {
struct hlist_node hlist;
- struct cmd_info *info;
+ const struct cmd_info *info;
};
enum {
@@ -441,7 +463,7 @@
struct parser_exec_state {
struct intel_vgpu *vgpu;
- int ring_id;
+ const struct intel_engine_cs *engine;
int buf_type;
@@ -474,7 +496,7 @@
int saved_buf_addr_type;
bool is_ctx_wa;
- struct cmd_info *info;
+ const struct cmd_info *info;
struct intel_vgpu_workload *workload;
};
@@ -485,12 +507,12 @@
static unsigned long bypass_scan_mask = 0;
/* ring ALL, type = 0 */
-static struct sub_op_bits sub_op_mi[] = {
+static const struct sub_op_bits sub_op_mi[] = {
{31, 29},
{28, 23},
};
-static struct decode_info decode_info_mi = {
+static const struct decode_info decode_info_mi = {
"MI",
OP_LEN_MI,
ARRAY_SIZE(sub_op_mi),
@@ -498,12 +520,12 @@
};
/* ring RCS, command type 2 */
-static struct sub_op_bits sub_op_2d[] = {
+static const struct sub_op_bits sub_op_2d[] = {
{31, 29},
{28, 22},
};
-static struct decode_info decode_info_2d = {
+static const struct decode_info decode_info_2d = {
"2D",
OP_LEN_2D,
ARRAY_SIZE(sub_op_2d),
@@ -511,14 +533,14 @@
};
/* ring RCS, command type 3 */
-static struct sub_op_bits sub_op_3d_media[] = {
+static const struct sub_op_bits sub_op_3d_media[] = {
{31, 29},
{28, 27},
{26, 24},
{23, 16},
};
-static struct decode_info decode_info_3d_media = {
+static const struct decode_info decode_info_3d_media = {
"3D_Media",
OP_LEN_3D_MEDIA,
ARRAY_SIZE(sub_op_3d_media),
@@ -526,7 +548,7 @@
};
/* ring VCS, command type 3 */
-static struct sub_op_bits sub_op_mfx_vc[] = {
+static const struct sub_op_bits sub_op_mfx_vc[] = {
{31, 29},
{28, 27},
{26, 24},
@@ -534,7 +556,7 @@
{20, 16},
};
-static struct decode_info decode_info_mfx_vc = {
+static const struct decode_info decode_info_mfx_vc = {
"MFX_VC",
OP_LEN_MFX_VC,
ARRAY_SIZE(sub_op_mfx_vc),
@@ -542,7 +564,7 @@
};
/* ring VECS, command type 3 */
-static struct sub_op_bits sub_op_vebox[] = {
+static const struct sub_op_bits sub_op_vebox[] = {
{31, 29},
{28, 27},
{26, 24},
@@ -550,15 +572,15 @@
{20, 16},
};
-static struct decode_info decode_info_vebox = {
+static const struct decode_info decode_info_vebox = {
"VEBOX",
OP_LEN_VEBOX,
ARRAY_SIZE(sub_op_vebox),
sub_op_vebox,
};
-static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
- [RCS] = {
+static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
+ [RCS0] = {
&decode_info_mi,
NULL,
NULL,
@@ -569,7 +591,7 @@
NULL,
},
- [VCS] = {
+ [VCS0] = {
&decode_info_mi,
NULL,
NULL,
@@ -580,7 +602,7 @@
NULL,
},
- [BCS] = {
+ [BCS0] = {
&decode_info_mi,
NULL,
&decode_info_2d,
@@ -591,7 +613,7 @@
NULL,
},
- [VECS] = {
+ [VECS0] = {
&decode_info_mi,
NULL,
NULL,
@@ -602,7 +624,7 @@
NULL,
},
- [VCS2] = {
+ [VCS1] = {
&decode_info_mi,
NULL,
NULL,
@@ -614,40 +636,42 @@
},
};
-static inline u32 get_opcode(u32 cmd, int ring_id)
+static inline u32 get_opcode(u32 cmd, const struct intel_engine_cs *engine)
{
- struct decode_info *d_info;
+ const struct decode_info *d_info;
- d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
if (d_info == NULL)
return INVALID_OP;
return cmd >> (32 - d_info->op_len);
}
-static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
- unsigned int opcode, int ring_id)
+static inline const struct cmd_info *
+find_cmd_entry(struct intel_gvt *gvt, unsigned int opcode,
+ const struct intel_engine_cs *engine)
{
struct cmd_entry *e;
hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
- if ((opcode == e->info->opcode) &&
- (e->info->rings & (1 << ring_id)))
+ if (opcode == e->info->opcode &&
+ e->info->rings & engine->mask)
return e->info;
}
return NULL;
}
-static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
- u32 cmd, int ring_id)
+static inline const struct cmd_info *
+get_cmd_info(struct intel_gvt *gvt, u32 cmd,
+ const struct intel_engine_cs *engine)
{
u32 opcode;
- opcode = get_opcode(cmd, ring_id);
+ opcode = get_opcode(cmd, engine);
if (opcode == INVALID_OP)
return NULL;
- return find_cmd_entry(gvt, opcode, ring_id);
+ return find_cmd_entry(gvt, opcode, engine);
}
static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
@@ -655,12 +679,12 @@
return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
}
-static inline void print_opcode(u32 cmd, int ring_id)
+static inline void print_opcode(u32 cmd, const struct intel_engine_cs *engine)
{
- struct decode_info *d_info;
+ const struct decode_info *d_info;
int i;
- d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
if (d_info == NULL)
return;
@@ -689,10 +713,11 @@
int cnt = 0;
int i;
- gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
- " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
- s->ring_id, s->ring_start, s->ring_start + s->ring_size,
- s->ring_head, s->ring_tail);
+ gvt_dbg_cmd(" vgpu%d RING%s: ring_start(%08lx) ring_end(%08lx)"
+ " ring_head(%08lx) ring_tail(%08lx)\n",
+ s->vgpu->id, s->engine->name,
+ s->ring_start, s->ring_start + s->ring_size,
+ s->ring_head, s->ring_tail);
gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
s->buf_type == RING_BUFFER_INSTRUCTION ?
@@ -709,7 +734,7 @@
s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
cmd_val(s, 2), cmd_val(s, 3));
- print_opcode(cmd_val(s, 0), s->ring_id);
+ print_opcode(cmd_val(s, 0), s->engine);
s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
@@ -776,7 +801,7 @@
return 0;
}
-static inline int get_cmd_length(struct cmd_info *info, u32 cmd)
+static inline int get_cmd_length(const struct cmd_info *info, u32 cmd)
{
if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
return info->len;
@@ -820,7 +845,6 @@
unsigned int data;
u32 ring_base;
u32 nopid;
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
if (!strcmp(cmd, "lri"))
data = cmd_val(s, index + 1);
@@ -830,7 +854,7 @@
return -EINVAL;
}
- ring_base = dev_priv->engine[s->ring_id]->mmio_base;
+ ring_base = s->engine->mmio_base;
nopid = i915_mmio_reg_offset(RING_NOPID(ring_base));
if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) &&
@@ -858,6 +882,47 @@
return 0;
}
+static int is_cmd_update_pdps(unsigned int offset,
+ struct parser_exec_state *s)
+{
+ u32 base = s->workload->engine->mmio_base;
+ return i915_mmio_reg_equal(_MMIO(offset), GEN8_RING_PDP_UDW(base, 0));
+}
+
+static int cmd_pdp_mmio_update_handler(struct parser_exec_state *s,
+ unsigned int offset, unsigned int index)
+{
+ struct intel_vgpu *vgpu = s->vgpu;
+ struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm;
+ struct intel_vgpu_mm *mm;
+ u64 pdps[GEN8_3LVL_PDPES];
+
+ if (shadow_mm->ppgtt_mm.root_entry_type ==
+ GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
+ pdps[0] = (u64)cmd_val(s, 2) << 32;
+ pdps[0] |= cmd_val(s, 4);
+
+ mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
+ if (!mm) {
+ gvt_vgpu_err("failed to get the 4-level shadow vm\n");
+ return -EINVAL;
+ }
+ intel_vgpu_mm_get(mm);
+ list_add_tail(&mm->ppgtt_mm.link,
+ &s->workload->lri_shadow_mm);
+ *cmd_ptr(s, 2) = upper_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
+ *cmd_ptr(s, 4) = lower_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
+ } else {
+ /* Currently all guests use PML4 table and now can't
+ * have a guest with 3-level table but uses LRI for
+ * PPGTT update. So this is simply un-testable. */
+ GEM_BUG_ON(1);
+ gvt_vgpu_err("invalid shared shadow vm type\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int cmd_reg_handler(struct parser_exec_state *s,
unsigned int offset, unsigned int index, char *cmd)
{
@@ -871,7 +936,7 @@
return -EFAULT;
}
- if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
+ if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
gvt_vgpu_err("%s access to non-render register (%x)\n",
cmd, offset);
return -EBADRQC;
@@ -896,14 +961,23 @@
patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
}
+ if (is_cmd_update_pdps(offset, s) &&
+ cmd_pdp_mmio_update_handler(s, offset, index))
+ return -EINVAL;
+
/* TODO
- * Right now only scan LRI command on KBL and in inhibit context.
- * It's good enough to support initializing mmio by lri command in
- * vgpu inhibit context on KBL.
+ * In order to let workload with inhibit context to generate
+ * correct image data into memory, vregs values will be loaded to
+ * hw via LRIs in the workload with inhibit context. But as
+ * indirect context is loaded prior to LRIs in workload, we don't
+ * want reg values specified in indirect context overwritten by
+ * LRIs in workloads. So, when scanning an indirect context, we
+ * update reg values in it into vregs, so LRIs in workload with
+ * inhibit context will restore with correct values
*/
- if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
- intel_gvt_mmio_is_in_ctx(gvt, offset) &&
- !strncmp(cmd, "lri", 3)) {
+ if (IS_GEN(s->engine->i915, 9) &&
+ intel_gvt_mmio_is_sr_in_ctx(gvt, offset) &&
+ !strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu,
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
/* check inhibit context */
@@ -918,8 +992,6 @@
}
}
- /* TODO: Update the global mask if this MMIO is a masked-MMIO */
- intel_gvt_mmio_set_cmd_accessed(gvt, offset);
return 0;
}
@@ -939,18 +1011,14 @@
{
int i, ret = 0;
int cmd_len = cmd_length(s);
- struct intel_gvt *gvt = s->vgpu->gvt;
for (i = 1; i < cmd_len; i += 2) {
- if (IS_BROADWELL(gvt->dev_priv) &&
- (s->ring_id != RCS)) {
- if (s->ring_id == BCS &&
- cmd_reg(s, i) ==
- i915_mmio_reg_offset(DERRMR))
+ if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) {
+ if (s->engine->id == BCS0 &&
+ cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
ret |= 0;
else
- ret |= (cmd_reg_inhibit(s, i)) ?
- -EBADRQC : 0;
+ ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0;
}
if (ret)
break;
@@ -967,9 +1035,9 @@
int cmd_len = cmd_length(s);
for (i = 1; i < cmd_len; i += 2) {
- if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
+ if (IS_BROADWELL(s->engine->i915))
ret |= ((cmd_reg_inhibit(s, i) ||
- (cmd_reg_inhibit(s, i + 1)))) ?
+ (cmd_reg_inhibit(s, i + 1)))) ?
-EBADRQC : 0;
if (ret)
break;
@@ -995,7 +1063,7 @@
int cmd_len = cmd_length(s);
for (i = 1; i < cmd_len;) {
- if (IS_BROADWELL(gvt->dev_priv))
+ if (IS_BROADWELL(s->engine->i915))
ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
if (ret)
break;
@@ -1046,27 +1114,27 @@
};
static struct cmd_interrupt_event cmd_interrupt_events[] = {
- [RCS] = {
+ [RCS0] = {
.pipe_control_notify = RCS_PIPE_CONTROL,
.mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
.mi_user_interrupt = RCS_MI_USER_INTERRUPT,
},
- [BCS] = {
+ [BCS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = BCS_MI_FLUSH_DW,
.mi_user_interrupt = BCS_MI_USER_INTERRUPT,
},
- [VCS] = {
+ [VCS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VCS_MI_FLUSH_DW,
.mi_user_interrupt = VCS_MI_USER_INTERRUPT,
},
- [VCS2] = {
+ [VCS1] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VCS2_MI_FLUSH_DW,
.mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
},
- [VECS] = {
+ [VECS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VECS_MI_FLUSH_DW,
.mi_user_interrupt = VECS_MI_USER_INTERRUPT,
@@ -1080,6 +1148,7 @@
bool index_mode = false;
unsigned int post_sync;
int ret = 0;
+ u32 hws_pga, val;
post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
@@ -1103,6 +1172,15 @@
index_mode = true;
ret |= cmd_address_audit(s, gma, sizeof(u64),
index_mode);
+ if (ret)
+ return ret;
+ if (index_mode) {
+ hws_pga = s->vgpu->hws_pga[s->engine->id];
+ gma = hws_pga + gma;
+ patch_value(s, cmd_ptr(s, 2), gma);
+ val = cmd_val(s, 1) & (~(1 << 21));
+ patch_value(s, cmd_ptr(s, 1), val);
+ }
}
}
}
@@ -1111,15 +1189,15 @@
return ret;
if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
- set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
- s->workload->pending_events);
+ set_bit(cmd_interrupt_events[s->engine->id].pipe_control_notify,
+ s->workload->pending_events);
return 0;
}
static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
{
- set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
- s->workload->pending_events);
+ set_bit(cmd_interrupt_events[s->engine->id].mi_user_interrupt,
+ s->workload->pending_events);
patch_value(s, cmd_ptr(s, 0), MI_NOOP);
return 0;
}
@@ -1169,7 +1247,7 @@
static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = s->engine->i915;
struct plane_code_mapping gen8_plane_code[] = {
[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
@@ -1186,7 +1264,7 @@
dword2 = cmd_val(s, 2);
v = (dword0 & GENMASK(21, 19)) >> 19;
- if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
+ if (drm_WARN_ON(&dev_priv->drm, v >= ARRAY_SIZE(gen8_plane_code)))
return -EBADRQC;
info->pipe = gen8_plane_code[v].pipe;
@@ -1206,7 +1284,7 @@
info->stride_reg = SPRSTRIDE(info->pipe);
info->surf_reg = SPRSURF(info->pipe);
} else {
- WARN_ON(1);
+ drm_WARN_ON(&dev_priv->drm, 1);
return -EBADRQC;
}
return 0;
@@ -1215,7 +1293,7 @@
static int skl_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = s->engine->i915;
struct intel_vgpu *vgpu = s->vgpu;
u32 dword0 = cmd_val(s, 0);
u32 dword1 = cmd_val(s, 1);
@@ -1274,15 +1352,12 @@
static int gen8_check_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
u32 stride, tile;
if (!info->async_flip)
return 0;
- if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_BROXTON(dev_priv)) {
+ if (INTEL_GEN(s->engine->i915) >= 9) {
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
@@ -1305,14 +1380,12 @@
struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = s->engine->i915;
struct intel_vgpu *vgpu = s->vgpu;
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12);
- if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_BROXTON(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 9) {
set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val);
set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
@@ -1324,21 +1397,23 @@
info->tile_val << 10);
}
- vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
- intel_vgpu_trigger_virtual_event(vgpu, info->event);
+ if (info->plane == PLANE_PRIMARY)
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(info->pipe))++;
+
+ if (info->async_flip)
+ intel_vgpu_trigger_virtual_event(vgpu, info->event);
+ else
+ set_bit(info->event, vgpu->irq.flip_done_event[info->pipe]);
+
return 0;
}
static int decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
-
- if (IS_BROADWELL(dev_priv))
+ if (IS_BROADWELL(s->engine->i915))
return gen8_decode_mi_display_flip(s, info);
- if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_BROXTON(dev_priv))
+ if (INTEL_GEN(s->engine->i915) >= 9)
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
@@ -1364,6 +1439,15 @@
int ret;
int i;
int len = cmd_length(s);
+ u32 valid_len = CMD_LEN(1);
+
+ /* Flip Type == Stereo 3D Flip */
+ if (DWORD_FIELD(2, 1, 0) == 2)
+ valid_len++;
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ valid_len);
+ if (ret)
+ return ret;
ret = decode_mi_display_flip(s, &info);
if (ret) {
@@ -1483,11 +1567,20 @@
int op_size = (cmd_length(s) - 3) * sizeof(u32);
int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
unsigned long gma, gma_low, gma_high;
+ u32 valid_len = CMD_LEN(2);
int ret = 0;
/* check ppggt */
if (!(cmd_val(s, 0) & (1 << 22)))
return 0;
+
+ /* check if QWORD */
+ if (DWORD_FIELD(0, 21, 21))
+ valid_len++;
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ valid_len);
+ if (ret)
+ return ret;
gma = cmd_val(s, 2) & GENMASK(31, 2);
@@ -1531,9 +1624,18 @@
int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
sizeof(u32);
unsigned long gma, gma_high;
+ u32 valid_len = CMD_LEN(1);
int ret = 0;
if (!(cmd_val(s, 0) & (1 << 22)))
+ return ret;
+
+ /* check inline data */
+ if (cmd_val(s, 0) & BIT(18))
+ valid_len = CMD_LEN(9);
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ valid_len);
+ if (ret)
return ret;
gma = cmd_val(s, 1) & GENMASK(31, 2);
@@ -1572,6 +1674,17 @@
unsigned long gma;
bool index_mode = false;
int ret = 0;
+ u32 hws_pga, val;
+ u32 valid_len = CMD_LEN(2);
+
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ valid_len);
+ if (ret) {
+ /* Check again for Qword */
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ ++valid_len);
+ return ret;
+ }
/* Check post-sync and ppgtt bit */
if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
@@ -1582,11 +1695,20 @@
if (cmd_val(s, 0) & (1 << 21))
index_mode = true;
ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
+ if (ret)
+ return ret;
+ if (index_mode) {
+ hws_pga = s->vgpu->hws_pga[s->engine->id];
+ gma = hws_pga + gma;
+ patch_value(s, cmd_ptr(s, 1), gma);
+ val = cmd_val(s, 0) & (~(1 << 21));
+ patch_value(s, cmd_ptr(s, 0), val);
+ }
}
/* Check notify bit */
if ((cmd_val(s, 0) & (1 << 8)))
- set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
- s->workload->pending_events);
+ set_bit(cmd_interrupt_events[s->engine->id].mi_flush_dw,
+ s->workload->pending_events);
return ret;
}
@@ -1634,17 +1756,25 @@
static int batch_buffer_needs_scan(struct parser_exec_state *s)
{
/* Decide privilege based on address space */
- if (cmd_val(s, 0) & (1 << 8) &&
- !(s->vgpu->scan_nonprivbb & (1 << s->ring_id)))
+ if (cmd_val(s, 0) & BIT(8) &&
+ !(s->vgpu->scan_nonprivbb & s->engine->mask))
return 0;
+
return 1;
}
-static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
+static const char *repr_addr_type(unsigned int type)
+{
+ return type == PPGTT_BUFFER ? "ppgtt" : "ggtt";
+}
+
+static int find_bb_size(struct parser_exec_state *s,
+ unsigned long *bb_size,
+ unsigned long *bb_end_cmd_offset)
{
unsigned long gma = 0;
- struct cmd_info *info;
- uint32_t cmd_len = 0;
+ const struct cmd_info *info;
+ u32 cmd_len = 0;
bool bb_end = false;
struct intel_vgpu *vgpu = s->vgpu;
u32 cmd;
@@ -1652,6 +1782,7 @@
s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
*bb_size = 0;
+ *bb_end_cmd_offset = 0;
/* get the start gm address of the batch buffer */
gma = get_gma_bb_from_cmd(s, 1);
@@ -1659,24 +1790,24 @@
return -EFAULT;
cmd = cmd_val(s, 0);
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
do {
if (copy_gma_to_hva(s->vgpu, mm,
- gma, gma + 4, &cmd) < 0)
+ gma, gma + 4, &cmd) < 0)
return -EFAULT;
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
@@ -1687,6 +1818,10 @@
/* chained batch buffer */
bb_end = true;
}
+
+ if (bb_end)
+ *bb_end_cmd_offset = *bb_size;
+
cmd_len = get_cmd_length(info, cmd) << 2;
*bb_size += cmd_len;
gma += cmd_len;
@@ -1695,23 +1830,47 @@
return 0;
}
+static int audit_bb_end(struct parser_exec_state *s, void *va)
+{
+ struct intel_vgpu *vgpu = s->vgpu;
+ u32 cmd = *(u32 *)va;
+ const struct cmd_info *info;
+
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
+ if (info == NULL) {
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
+ return -EBADRQC;
+ }
+
+ if ((info->opcode == OP_MI_BATCH_BUFFER_END) ||
+ ((info->opcode == OP_MI_BATCH_BUFFER_START) &&
+ (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)))
+ return 0;
+
+ return -EBADRQC;
+}
+
static int perform_bb_shadow(struct parser_exec_state *s)
{
struct intel_vgpu *vgpu = s->vgpu;
struct intel_vgpu_shadow_bb *bb;
unsigned long gma = 0;
unsigned long bb_size;
+ unsigned long bb_end_cmd_offset;
int ret = 0;
struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
- unsigned long gma_start_offset = 0;
+ unsigned long start_offset = 0;
/* get the start gm address of the batch buffer */
gma = get_gma_bb_from_cmd(s, 1);
if (gma == INTEL_GVT_INVALID_ADDR)
return -EFAULT;
- ret = find_bb_size(s, &bb_size);
+ ret = find_bb_size(s, &bb_size, &bb_end_cmd_offset);
if (ret)
return ret;
@@ -1721,7 +1880,7 @@
bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true;
- /* the gma_start_offset stores the batch buffer's start gma's
+ /* the start_offset stores the batch buffer's start gma's
* offset relative to page boundary. so for non-privileged batch
* buffer, the shadowed gem object holds exactly the same page
* layout as original gem object. This is for the convience of
@@ -1733,43 +1892,39 @@
* that of shadowed page.
*/
if (bb->ppgtt)
- gma_start_offset = gma & ~I915_GTT_PAGE_MASK;
+ start_offset = gma & ~I915_GTT_PAGE_MASK;
- bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv,
- roundup(bb_size + gma_start_offset, PAGE_SIZE));
+ bb->obj = i915_gem_object_create_shmem(s->engine->i915,
+ round_up(bb_size + start_offset,
+ PAGE_SIZE));
if (IS_ERR(bb->obj)) {
ret = PTR_ERR(bb->obj);
goto err_free_bb;
}
- ret = i915_gem_obj_prepare_shmem_write(bb->obj, &bb->clflush);
- if (ret)
- goto err_free_obj;
-
bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
if (IS_ERR(bb->va)) {
ret = PTR_ERR(bb->va);
- goto err_finish_shmem_access;
- }
-
- if (bb->clflush & CLFLUSH_BEFORE) {
- drm_clflush_virt_range(bb->va, bb->obj->base.size);
- bb->clflush &= ~CLFLUSH_BEFORE;
+ goto err_free_obj;
}
ret = copy_gma_to_hva(s->vgpu, mm,
gma, gma + bb_size,
- bb->va + gma_start_offset);
+ bb->va + start_offset);
if (ret < 0) {
gvt_vgpu_err("fail to copy guest ring buffer\n");
ret = -EFAULT;
goto err_unmap;
}
+ ret = audit_bb_end(s, bb->va + start_offset + bb_end_cmd_offset);
+ if (ret)
+ goto err_unmap;
+
+ i915_gem_object_unlock(bb->obj);
INIT_LIST_HEAD(&bb->list);
list_add(&bb->list, &s->workload->shadow_bb);
- bb->accessing = true;
bb->bb_start_cmd_va = s->ip_va;
if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
@@ -1785,13 +1940,11 @@
* buffer's gma in pair. After all, we don't want to pin the shadow
* buffer here (too early).
*/
- s->ip_va = bb->va + gma_start_offset;
+ s->ip_va = bb->va + start_offset;
s->ip_gma = gma;
return 0;
err_unmap:
i915_gem_object_unpin_map(bb->obj);
-err_finish_shmem_access:
- i915_gem_obj_finish_shmem_access(bb->obj);
err_free_obj:
i915_gem_object_put(bb->obj);
err_free_bb:
@@ -1840,7 +1993,9 @@
return ret;
}
-static struct cmd_info cmd_info[] = {
+static int mi_noop_index;
+
+static const struct cmd_info cmd_info[] = {
{"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
{"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
@@ -1888,21 +2043,24 @@
{"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
NULL},
- {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE,
+ {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR,
R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
- {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL,
- 0, 8, NULL},
+ {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR | F_LEN_VAR_FIXED,
+ R_ALL, D_ALL, 0, 8, NULL, CMD_LEN(1)},
{"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
- {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
+ {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS,
+ D_ALL, 0, 8, NULL, CMD_LEN(0)},
- {"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
- D_BDW_PLUS, 0, 8, NULL},
+ {"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, 0, 8,
+ NULL, CMD_LEN(0)},
- {"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS,
- ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
+ {"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, ADDR_FIX_1(2),
+ 8, cmd_handler_mi_semaphore_wait, CMD_LEN(2)},
{"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
@@ -1916,8 +2074,9 @@
{"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
cmd_handler_mi_update_gtt},
- {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL,
- D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm},
+ {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
+ cmd_handler_srm, CMD_LEN(2)},
{"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
cmd_handler_mi_flush_dw},
@@ -1925,26 +2084,30 @@
{"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
10, cmd_handler_mi_clflush},
- {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL,
- D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count},
+ {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(1), 6,
+ cmd_handler_mi_report_perf_count, CMD_LEN(2)},
- {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL,
- D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm},
+ {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
+ cmd_handler_lrm, CMD_LEN(2)},
- {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL,
- D_ALL, 0, 8, cmd_handler_lrr},
+ {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, 0, 8,
+ cmd_handler_lrr, CMD_LEN(1)},
- {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS,
- D_ALL, 0, 8, NULL},
+ {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM,
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, D_ALL, 0,
+ 8, NULL, CMD_LEN(2)},
- {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL,
- ADDR_FIX_1(2), 8, NULL},
+ {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR | F_LEN_VAR_FIXED,
+ R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL, CMD_LEN(2)},
{"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
ADDR_FIX_1(2), 8, NULL},
- {"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2),
- 8, cmd_handler_mi_op_2e},
+ {"MI_OP_2E", OP_MI_2E, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS,
+ ADDR_FIX_2(1, 2), 8, cmd_handler_mi_op_2e, CMD_LEN(3)},
{"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
8, cmd_handler_mi_op_2f},
@@ -1954,8 +2117,8 @@
cmd_handler_mi_batch_buffer_start},
{"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
- F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
- cmd_handler_mi_conditional_batch_buffer_end},
+ F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
+ cmd_handler_mi_conditional_batch_buffer_end, CMD_LEN(2)},
{"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
R_RCS | R_BCS, D_ALL, 0, 2, NULL},
@@ -2343,6 +2506,9 @@
{"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
ADDR_FIX_1(1), 8, NULL},
+ {"OP_SWTESS_BASE_ADDRESS", OP_SWTESS_BASE_ADDRESS,
+ F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_2(1, 2), 3, NULL},
+
{"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
@@ -2507,7 +2673,7 @@
0, 12, NULL},
{"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
- 0, 20, NULL},
+ 0, 12, NULL},
};
static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
@@ -2519,26 +2685,38 @@
static int cmd_parser_exec(struct parser_exec_state *s)
{
struct intel_vgpu *vgpu = s->vgpu;
- struct cmd_info *info;
+ const struct cmd_info *info;
u32 cmd;
int ret = 0;
cmd = cmd_val(s, 0);
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ /* fastpath for MI_NOOP */
+ if (cmd == MI_NOOP)
+ info = &cmd_info[mi_noop_index];
+ else
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
+
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
s->info = info;
- trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va,
+ trace_gvt_command(vgpu->id, s->engine->id, s->ip_gma, s->ip_va,
cmd_length(s), s->buf_type, s->buf_addr_type,
s->workload, info->name);
+
+ if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) {
+ ret = gvt_check_valid_cmd_length(cmd_length(s),
+ info->valid_len);
+ if (ret)
+ return ret;
+ }
if (info->handler) {
ret = info->handler(s);
@@ -2632,7 +2810,7 @@
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
s.vgpu = workload->vgpu;
- s.ring_id = workload->ring_id;
+ s.engine = workload->engine;
s.ring_start = workload->rb_start;
s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
s.ring_head = gma_head;
@@ -2641,14 +2819,8 @@
s.workload = workload;
s.is_ctx_wa = false;
- if ((bypass_scan_mask & (1 << workload->ring_id)) ||
- gma_head == gma_tail)
+ if (bypass_scan_mask & workload->engine->mask || gma_head == gma_tail)
return 0;
-
- if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
- ret = -EINVAL;
- goto out;
- }
ret = ip_gma_set(&s, gma_head);
if (ret)
@@ -2676,7 +2848,7 @@
I915_GTT_PAGE_SIZE)))
return -EINVAL;
- ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
+ ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(u32);
ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
PAGE_SIZE);
gma_head = wa_ctx->indirect_ctx.guest_gma;
@@ -2686,7 +2858,7 @@
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
s.vgpu = workload->vgpu;
- s.ring_id = workload->ring_id;
+ s.engine = workload->engine;
s.ring_start = wa_ctx->indirect_ctx.guest_gma;
s.ring_size = ring_size;
s.ring_head = gma_head;
@@ -2694,11 +2866,6 @@
s.rb_va = wa_ctx->indirect_ctx.shadow_va;
s.workload = workload;
s.is_ctx_wa = true;
-
- if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
- ret = -EINVAL;
- goto out;
- }
ret = ip_gma_set(&s, gma_head);
if (ret)
@@ -2716,7 +2883,6 @@
struct intel_vgpu_submission *s = &vgpu->submission;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
void *shadow_ring_buffer_va;
- int ring_id = workload->ring_id;
int ret;
guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
@@ -2729,21 +2895,21 @@
gma_tail = workload->rb_start + workload->rb_tail;
gma_top = workload->rb_start + guest_rb_size;
- if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
+ if (workload->rb_len > s->ring_scan_buffer_size[workload->engine->id]) {
void *p;
/* realloc the new ring buffer if needed */
- p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
- GFP_KERNEL);
+ p = krealloc(s->ring_scan_buffer[workload->engine->id],
+ workload->rb_len, GFP_KERNEL);
if (!p) {
gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
return -ENOMEM;
}
- s->ring_scan_buffer[ring_id] = p;
- s->ring_scan_buffer_size[ring_id] = workload->rb_len;
+ s->ring_scan_buffer[workload->engine->id] = p;
+ s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len;
}
- shadow_ring_buffer_va = s->ring_scan_buffer[ring_id];
+ shadow_ring_buffer_va = s->ring_scan_buffer[workload->engine->id];
/* get shadow ring buffer va */
workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
@@ -2801,9 +2967,9 @@
int ret = 0;
void *map;
- obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv,
- roundup(ctx_size + CACHELINE_BYTES,
- PAGE_SIZE));
+ obj = i915_gem_object_create_shmem(workload->engine->i915,
+ roundup(ctx_size + CACHELINE_BYTES,
+ PAGE_SIZE));
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -2815,7 +2981,9 @@
goto put_obj;
}
+ i915_gem_object_lock(obj, NULL);
ret = i915_gem_object_set_to_cpu_domain(obj, false);
+ i915_gem_object_unlock(obj);
if (ret) {
gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
goto unmap_src;
@@ -2843,7 +3011,7 @@
static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
- uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
+ u32 per_ctx_start[CACHELINE_DWORDS] = {0};
unsigned char *bb_start_sva;
if (!wa_ctx->per_ctx.valid)
@@ -2888,30 +3056,14 @@
return 0;
}
-static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
- unsigned int opcode, unsigned long rings)
-{
- struct cmd_info *info = NULL;
- unsigned int ring;
-
- for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
- info = find_cmd_entry(gvt, opcode, ring);
- if (info)
- break;
- }
- return info;
-}
-
static int init_cmd_table(struct intel_gvt *gvt)
{
+ unsigned int gen_type = intel_gvt_get_device_type(gvt);
int i;
- struct cmd_entry *e;
- struct cmd_info *info;
- unsigned int gen_type;
-
- gen_type = intel_gvt_get_device_type(gvt);
for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
+ struct cmd_entry *e;
+
if (!(cmd_info[i].devices & gen_type))
continue;
@@ -2920,21 +3072,16 @@
return -ENOMEM;
e->info = &cmd_info[i];
- info = find_cmd_entry_any_ring(gvt,
- e->info->opcode, e->info->rings);
- if (info) {
- gvt_err("%s %s duplicated\n", e->info->name,
- info->name);
- kfree(e);
- return -EEXIST;
- }
+ if (cmd_info[i].opcode == OP_MI_NOOP)
+ mi_noop_index = i;
INIT_HLIST_NODE(&e->hlist);
add_cmd_entry(gvt, e);
gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
- e->info->name, e->info->opcode, e->info->flag,
- e->info->devices, e->info->rings);
+ e->info->name, e->info->opcode, e->info->flag,
+ e->info->devices, e->info->rings);
}
+
return 0;
}
--
Gitblit v1.6.2