From d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Mon, 11 Dec 2023 02:45:28 +0000 Subject: [PATCH] add boot partition size --- kernel/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 165 +++++++++++++++++++++++++------------------------------ 1 files changed, 75 insertions(+), 90 deletions(-) diff --git a/kernel/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/kernel/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 880ac11..aea49ba 100644 --- a/kernel/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/kernel/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -24,11 +24,25 @@ #ifndef AMDGPU_VIRT_H #define AMDGPU_VIRT_H +#include "amdgv_sriovmsg.h" + #define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */ #define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */ #define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */ #define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ #define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */ +#define AMDGPU_VF_MMIO_ACCESS_PROTECT (1 << 5) /* MMIO write access is not allowed in sriov runtime */ + +/* all asic after AI use this offset */ +#define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5 +/* tonga/fiji use this offset */ +#define mmBIF_IOV_FUNC_IDENTIFIER 0x1503 + +enum amdgpu_sriov_vf_mode { + SRIOV_VF_MODE_BARE_METAL = 0, + SRIOV_VF_MODE_ONE_VF, + SRIOV_VF_MODE_MULTI_VF, +}; struct amdgpu_mm_table { struct amdgpu_bo *bo; @@ -54,6 +68,7 @@ struct amdgpu_virt_ops { int (*req_full_gpu)(struct amdgpu_device *adev, bool init); int (*rel_full_gpu)(struct amdgpu_device *adev, bool init); + int (*req_init_data)(struct amdgpu_device *adev); int (*reset_gpu)(struct amdgpu_device *adev); int (*wait_reset)(struct amdgpu_device *adev); void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); @@ -63,11 +78,14 @@ * Firmware Reserve Frame buffer */ struct amdgpu_virt_fw_reserve { - struct amdgim_pf2vf_info_header *p_pf2vf; - struct amdgim_vf2pf_info_header *p_vf2pf; + struct amd_sriov_msg_pf2vf_info_header *p_pf2vf; + struct amd_sriov_msg_vf2pf_info_header *p_vf2pf; unsigned int checksum_key; }; + /* + * Legacy GIM header + * * Defination between PF and VF * Structures forcibly aligned to 4 to keep the same style as PF. */ @@ -83,17 +101,15 @@ AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2, /* VRAM LOST by GIM */ AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, + /* MM bandwidth */ + AMDGIM_FEATURE_GIM_MM_BW_MGR = 0x8, + /* PP ONE VF MODE in GIM */ + AMDGIM_FEATURE_PP_ONE_VF = (1 << 4), }; -struct amdgim_pf2vf_info_header { - /* the total structure size in byte. */ - uint32_t size; - /* version of this structure, written by the GIM */ - uint32_t version; -} __aligned(4); -struct amdgim_pf2vf_info_v1 { +struct amdgim_pf2vf_info_v1 { /* header contains size and version */ - struct amdgim_pf2vf_info_header header; + struct amd_sriov_msg_pf2vf_info_header header; /* max_width * max_height */ unsigned int uvd_enc_max_pixels_count; /* 16x16 pixels/sec, codec independent */ @@ -110,47 +126,9 @@ unsigned int checksum; } __aligned(4); -struct amdgim_pf2vf_info_v2 { - /* header contains size and version */ - struct amdgim_pf2vf_info_header header; - /* use private key from mailbox 2 to create chueksum */ - uint32_t checksum; - /* The features flags of the GIM driver supports. */ - uint32_t feature_flags; - /* max_width * max_height */ - uint32_t uvd_enc_max_pixels_count; - /* 16x16 pixels/sec, codec independent */ - uint32_t uvd_enc_max_bandwidth; - /* max_width * max_height */ - uint32_t vce_enc_max_pixels_count; - /* 16x16 pixels/sec, codec independent */ - uint32_t vce_enc_max_bandwidth; - /* MEC FW position in kb from the start of VF visible frame buffer */ - uint64_t mecfw_kboffset; - /* MEC FW size in KB */ - uint32_t mecfw_ksize; - /* UVD FW position in kb from the start of VF visible frame buffer */ - uint64_t uvdfw_kboffset; - /* UVD FW size in KB */ - uint32_t uvdfw_ksize; - /* VCE FW position in kb from the start of VF visible frame buffer */ - uint64_t vcefw_kboffset; - /* VCE FW size in KB */ - uint32_t vcefw_ksize; - uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (9 + sizeof(struct amdgim_pf2vf_info_header)/sizeof(uint32_t)), 3)]; -} __aligned(4); - - -struct amdgim_vf2pf_info_header { - /* the total structure size in byte. */ - uint32_t size; - /*version of this structure, written by the guest */ - uint32_t version; -} __aligned(4); - struct amdgim_vf2pf_info_v1 { /* header contains size and version */ - struct amdgim_vf2pf_info_header header; + struct amd_sriov_msg_vf2pf_info_header header; /* driver version */ char driver_version[64]; /* driver certification, 1=WHQL, 0=None */ @@ -180,7 +158,7 @@ struct amdgim_vf2pf_info_v2 { /* header contains size and version */ - struct amdgim_vf2pf_info_header header; + struct amd_sriov_msg_vf2pf_info_header header; uint32_t checksum; /* driver version */ uint8_t driver_version[64]; @@ -206,39 +184,25 @@ uint32_t uvd_enc_usage; /* guest uvd engine usage percentage. 0xffff means N/A. */ uint32_t uvd_enc_health; - uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amdgim_vf2pf_info_header)/sizeof(uint32_t)), 0)]; + uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)]; } __aligned(4); -#define AMDGPU_FW_VRAM_VF2PF_VER 2 -typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ; - -#define AMDGPU_FW_VRAM_VF2PF_WRITE(adev, field, val) \ - do { \ - ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field = (val); \ - } while (0) - -#define AMDGPU_FW_VRAM_VF2PF_READ(adev, field, val) \ - do { \ - (*val) = ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field; \ - } while (0) - -#define AMDGPU_FW_VRAM_PF2VF_READ(adev, field, val) \ - do { \ - if (!adev->virt.fw_reserve.p_pf2vf) \ - *(val) = 0; \ - else { \ - if (adev->virt.fw_reserve.p_pf2vf->version == 1) \ - *(val) = ((struct amdgim_pf2vf_info_v1 *)adev->virt.fw_reserve.p_pf2vf)->field; \ - if (adev->virt.fw_reserve.p_pf2vf->version == 2) \ - *(val) = ((struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf)->field; \ - } \ - } while (0) +struct amdgpu_virt_ras_err_handler_data { + /* point to bad page records array */ + struct eeprom_table_record *bps; + /* point to reserved bo array */ + struct amdgpu_bo **bps_bo; + /* the count of entries */ + int count; + /* last reserved entry's index + 1 */ + int last_reserved; +}; /* GPU virtualization */ struct amdgpu_virt { uint32_t caps; struct amdgpu_bo *csa_obj; - uint64_t csa_vmid0_addr; + void *csa_cpu_addr; bool chained_ib_support; uint32_t reg_val_offs; struct amdgpu_irq_src ack_irq; @@ -246,12 +210,19 @@ struct work_struct flr_work; struct amdgpu_mm_table mm_table; const struct amdgpu_virt_ops *ops; - struct amdgpu_vf_error_buffer vf_errors; + struct amdgpu_vf_error_buffer vf_errors; struct amdgpu_virt_fw_reserve fw_reserve; uint32_t gim_feature; -}; + uint32_t reg_access_mode; + int req_init_data_ver; + bool tdr_debug; + struct amdgpu_virt_ras_err_handler_data *virt_eh_data; + bool ras_init_done; -#define AMDGPU_CSA_SIZE (8 * 1024) + /* vf2pf message */ + struct delayed_work vf2pf_work; + uint32_t vf2pf_update_interval_ms; +}; #define amdgpu_sriov_enabled(adev) \ ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV) @@ -265,8 +236,14 @@ #define amdgpu_sriov_runtime(adev) \ ((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME) +#define amdgpu_sriov_fullaccess(adev) \ +(amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev))) + #define amdgpu_passthrough(adev) \ ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) + +#define amdgpu_sriov_vf_mmio_access_protection(adev) \ +((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT) static inline bool is_virtual_machine(void) { @@ -277,26 +254,34 @@ #endif } -struct amdgpu_vm; +#define amdgpu_sriov_is_pp_one_vf(adev) \ + ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF) +#define amdgpu_sriov_is_debug(adev) \ + ((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug) +#define amdgpu_sriov_is_normal(adev) \ + ((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug)) -uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev); bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); -int amdgpu_allocate_static_csa(struct amdgpu_device *adev); -int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_bo_va **bo_va); -void amdgpu_free_static_csa(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); -uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); -void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); +void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, + uint32_t reg0, uint32_t rreg1, + uint32_t ref, uint32_t mask); int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init); int amdgpu_virt_reset_gpu(struct amdgpu_device *adev); +void amdgpu_virt_request_init_data(struct amdgpu_device *adev); int amdgpu_virt_wait_reset(struct amdgpu_device *adev); int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev); void amdgpu_virt_free_mm_table(struct amdgpu_device *adev); -int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size, - unsigned int key, - unsigned int chksum); +void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev); void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); +void amdgpu_virt_exchange_data(struct amdgpu_device *adev); +void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev); +void amdgpu_detect_virtualization(struct amdgpu_device *adev); +bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev); +int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev); +void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev); + +enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev); #endif -- Gitblit v1.6.2