.. | .. |
---|
53 | 53 | */ |
---|
54 | 54 | bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) |
---|
55 | 55 | { |
---|
56 | | - if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size |
---|
57 | | - && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { |
---|
58 | | - gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n", |
---|
59 | | - addr, size); |
---|
60 | | - return false; |
---|
61 | | - } |
---|
62 | | - return true; |
---|
| 56 | + if (size == 0) |
---|
| 57 | + return vgpu_gmadr_is_valid(vgpu, addr); |
---|
| 58 | + |
---|
| 59 | + if (vgpu_gmadr_is_aperture(vgpu, addr) && |
---|
| 60 | + vgpu_gmadr_is_aperture(vgpu, addr + size - 1)) |
---|
| 61 | + return true; |
---|
| 62 | + else if (vgpu_gmadr_is_hidden(vgpu, addr) && |
---|
| 63 | + vgpu_gmadr_is_hidden(vgpu, addr + size - 1)) |
---|
| 64 | + return true; |
---|
| 65 | + |
---|
| 66 | + gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n", |
---|
| 67 | + addr, size); |
---|
| 68 | + return false; |
---|
63 | 69 | } |
---|
64 | 70 | |
---|
65 | 71 | /* translate a guest gmadr to host gmadr */ |
---|
66 | 72 | int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr) |
---|
67 | 73 | { |
---|
68 | | - if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr), |
---|
69 | | - "invalid guest gmadr %llx\n", g_addr)) |
---|
| 74 | + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; |
---|
| 75 | + |
---|
| 76 | + if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr), |
---|
| 77 | + "invalid guest gmadr %llx\n", g_addr)) |
---|
70 | 78 | return -EACCES; |
---|
71 | 79 | |
---|
72 | 80 | if (vgpu_gmadr_is_aperture(vgpu, g_addr)) |
---|
.. | .. |
---|
81 | 89 | /* translate a host gmadr to guest gmadr */ |
---|
82 | 90 | int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr) |
---|
83 | 91 | { |
---|
84 | | - if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr), |
---|
85 | | - "invalid host gmadr %llx\n", h_addr)) |
---|
| 92 | + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; |
---|
| 93 | + |
---|
| 94 | + if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr), |
---|
| 95 | + "invalid host gmadr %llx\n", h_addr)) |
---|
86 | 96 | return -EACCES; |
---|
87 | 97 | |
---|
88 | 98 | if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr)) |
---|
.. | .. |
---|
269 | 279 | return gtt_type_table[type].pse_entry_type; |
---|
270 | 280 | } |
---|
271 | 281 | |
---|
272 | | -static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index) |
---|
| 282 | +static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index) |
---|
273 | 283 | { |
---|
274 | | - void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; |
---|
| 284 | + void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index; |
---|
275 | 285 | |
---|
276 | 286 | return readq(addr); |
---|
277 | 287 | } |
---|
278 | 288 | |
---|
279 | | -static void ggtt_invalidate(struct drm_i915_private *dev_priv) |
---|
| 289 | +static void ggtt_invalidate(struct intel_gt *gt) |
---|
280 | 290 | { |
---|
281 | | - mmio_hw_access_pre(dev_priv); |
---|
282 | | - I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
---|
283 | | - mmio_hw_access_post(dev_priv); |
---|
| 291 | + mmio_hw_access_pre(gt); |
---|
| 292 | + intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); |
---|
| 293 | + mmio_hw_access_post(gt); |
---|
284 | 294 | } |
---|
285 | 295 | |
---|
286 | | -static void write_pte64(struct drm_i915_private *dev_priv, |
---|
287 | | - unsigned long index, u64 pte) |
---|
| 296 | +static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte) |
---|
288 | 297 | { |
---|
289 | | - void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index; |
---|
| 298 | + void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index; |
---|
290 | 299 | |
---|
291 | 300 | writeq(pte, addr); |
---|
292 | 301 | } |
---|
.. | .. |
---|
309 | 318 | if (WARN_ON(ret)) |
---|
310 | 319 | return ret; |
---|
311 | 320 | } else if (!pt) { |
---|
312 | | - e->val64 = read_pte64(vgpu->gvt->dev_priv, index); |
---|
| 321 | + e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index); |
---|
313 | 322 | } else { |
---|
314 | 323 | e->val64 = *((u64 *)pt + index); |
---|
315 | 324 | } |
---|
.. | .. |
---|
334 | 343 | if (WARN_ON(ret)) |
---|
335 | 344 | return ret; |
---|
336 | 345 | } else if (!pt) { |
---|
337 | | - write_pte64(vgpu->gvt->dev_priv, index, e->val64); |
---|
| 346 | + write_pte64(vgpu->gvt->gt->ggtt, index, e->val64); |
---|
338 | 347 | } else { |
---|
339 | 348 | *((u64 *)pt + index) = e->val64; |
---|
340 | 349 | } |
---|
.. | .. |
---|
627 | 636 | struct intel_gvt_gtt_entry *entry, unsigned long index) |
---|
628 | 637 | { |
---|
629 | 638 | struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; |
---|
| 639 | + unsigned long offset = index; |
---|
630 | 640 | |
---|
631 | 641 | GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); |
---|
| 642 | + |
---|
| 643 | + if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) { |
---|
| 644 | + offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT); |
---|
| 645 | + mm->ggtt_mm.host_ggtt_aperture[offset] = entry->val64; |
---|
| 646 | + } else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) { |
---|
| 647 | + offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT); |
---|
| 648 | + mm->ggtt_mm.host_ggtt_hidden[offset] = entry->val64; |
---|
| 649 | + } |
---|
632 | 650 | |
---|
633 | 651 | pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu); |
---|
634 | 652 | } |
---|
.. | .. |
---|
728 | 746 | |
---|
729 | 747 | static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) |
---|
730 | 748 | { |
---|
731 | | - struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev; |
---|
| 749 | + struct device *kdev = &spt->vgpu->gvt->gt->i915->drm.pdev->dev; |
---|
732 | 750 | |
---|
733 | 751 | trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type); |
---|
734 | 752 | |
---|
.. | .. |
---|
750 | 768 | |
---|
751 | 769 | static void ppgtt_free_all_spt(struct intel_vgpu *vgpu) |
---|
752 | 770 | { |
---|
753 | | - struct intel_vgpu_ppgtt_spt *spt; |
---|
| 771 | + struct intel_vgpu_ppgtt_spt *spt, *spn; |
---|
754 | 772 | struct radix_tree_iter iter; |
---|
755 | | - void **slot; |
---|
| 773 | + LIST_HEAD(all_spt); |
---|
| 774 | + void __rcu **slot; |
---|
756 | 775 | |
---|
| 776 | + rcu_read_lock(); |
---|
757 | 777 | radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) { |
---|
758 | 778 | spt = radix_tree_deref_slot(slot); |
---|
759 | | - ppgtt_free_spt(spt); |
---|
| 779 | + list_move(&spt->post_shadow_list, &all_spt); |
---|
760 | 780 | } |
---|
| 781 | + rcu_read_unlock(); |
---|
| 782 | + |
---|
| 783 | + list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list) |
---|
| 784 | + ppgtt_free_spt(spt); |
---|
761 | 785 | } |
---|
762 | 786 | |
---|
763 | 787 | static int ppgtt_handle_guest_write_page_table_bytes( |
---|
.. | .. |
---|
805 | 829 | |
---|
806 | 830 | /* Allocate shadow page table without guest page. */ |
---|
807 | 831 | static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( |
---|
808 | | - struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type) |
---|
| 832 | + struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type) |
---|
809 | 833 | { |
---|
810 | | - struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; |
---|
| 834 | + struct device *kdev = &vgpu->gvt->gt->i915->drm.pdev->dev; |
---|
811 | 835 | struct intel_vgpu_ppgtt_spt *spt = NULL; |
---|
812 | 836 | dma_addr_t daddr; |
---|
813 | 837 | int ret; |
---|
.. | .. |
---|
855 | 879 | |
---|
856 | 880 | /* Allocate shadow page table associated with specific gfn. */ |
---|
857 | 881 | static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn( |
---|
858 | | - struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type, |
---|
| 882 | + struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type, |
---|
859 | 883 | unsigned long gfn, bool guest_pde_ips) |
---|
860 | 884 | { |
---|
861 | 885 | struct intel_vgpu_ppgtt_spt *spt; |
---|
.. | .. |
---|
928 | 952 | static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu, |
---|
929 | 953 | struct intel_gvt_gtt_entry *e) |
---|
930 | 954 | { |
---|
| 955 | + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; |
---|
931 | 956 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; |
---|
932 | 957 | struct intel_vgpu_ppgtt_spt *s; |
---|
933 | | - intel_gvt_gtt_type_t cur_pt_type; |
---|
| 958 | + enum intel_gvt_gtt_type cur_pt_type; |
---|
934 | 959 | |
---|
935 | 960 | GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type))); |
---|
936 | 961 | |
---|
937 | 962 | if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY |
---|
938 | 963 | && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { |
---|
939 | | - cur_pt_type = get_next_pt_type(e->type) + 1; |
---|
| 964 | + cur_pt_type = get_next_pt_type(e->type); |
---|
| 965 | + |
---|
| 966 | + if (!gtt_type_is_pt(cur_pt_type) || |
---|
| 967 | + !gtt_type_is_pt(cur_pt_type + 1)) { |
---|
| 968 | + drm_WARN(&i915->drm, 1, |
---|
| 969 | + "Invalid page table type, cur_pt_type is: %d\n", |
---|
| 970 | + cur_pt_type); |
---|
| 971 | + return -EINVAL; |
---|
| 972 | + } |
---|
| 973 | + |
---|
| 974 | + cur_pt_type += 1; |
---|
| 975 | + |
---|
940 | 976 | if (ops->get_pfn(e) == |
---|
941 | 977 | vgpu->gtt.scratch_pt[cur_pt_type].page_mfn) |
---|
942 | 978 | return 0; |
---|
.. | .. |
---|
1023 | 1059 | |
---|
1024 | 1060 | static bool vgpu_ips_enabled(struct intel_vgpu *vgpu) |
---|
1025 | 1061 | { |
---|
1026 | | - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
---|
| 1062 | + struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; |
---|
1027 | 1063 | |
---|
1028 | 1064 | if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) { |
---|
1029 | 1065 | u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) & |
---|
.. | .. |
---|
1070 | 1106 | } else { |
---|
1071 | 1107 | int type = get_next_pt_type(we->type); |
---|
1072 | 1108 | |
---|
| 1109 | + if (!gtt_type_is_pt(type)) { |
---|
| 1110 | + ret = -EINVAL; |
---|
| 1111 | + goto err; |
---|
| 1112 | + } |
---|
| 1113 | + |
---|
1073 | 1114 | spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips); |
---|
1074 | 1115 | if (IS_ERR(spt)) { |
---|
1075 | 1116 | ret = PTR_ERR(spt); |
---|
.. | .. |
---|
1091 | 1132 | |
---|
1092 | 1133 | err_free_spt: |
---|
1093 | 1134 | ppgtt_free_spt(spt); |
---|
| 1135 | + spt = NULL; |
---|
1094 | 1136 | err: |
---|
1095 | 1137 | gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", |
---|
1096 | 1138 | spt, we->val64, we->type); |
---|
.. | .. |
---|
1113 | 1155 | } |
---|
1114 | 1156 | |
---|
1115 | 1157 | /** |
---|
| 1158 | + * Check if can do 2M page |
---|
| 1159 | + * @vgpu: target vgpu |
---|
| 1160 | + * @entry: target pfn's gtt entry |
---|
| 1161 | + * |
---|
1116 | 1162 | * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition, |
---|
1117 | 1163 | * negtive if found err. |
---|
1118 | 1164 | */ |
---|
.. | .. |
---|
1122 | 1168 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; |
---|
1123 | 1169 | unsigned long pfn; |
---|
1124 | 1170 | |
---|
1125 | | - if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M)) |
---|
| 1171 | + if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M)) |
---|
1126 | 1172 | return 0; |
---|
1127 | 1173 | |
---|
1128 | 1174 | pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry)); |
---|
.. | .. |
---|
1155 | 1201 | for_each_shadow_entry(sub_spt, &sub_se, sub_index) { |
---|
1156 | 1202 | ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, |
---|
1157 | 1203 | start_gfn + sub_index, PAGE_SIZE, &dma_addr); |
---|
1158 | | - if (ret) { |
---|
1159 | | - ppgtt_invalidate_spt(spt); |
---|
1160 | | - return ret; |
---|
1161 | | - } |
---|
| 1204 | + if (ret) |
---|
| 1205 | + goto err; |
---|
1162 | 1206 | sub_se.val64 = se->val64; |
---|
1163 | 1207 | |
---|
1164 | 1208 | /* Copy the PAT field from PDE. */ |
---|
.. | .. |
---|
1177 | 1221 | ops->set_pfn(se, sub_spt->shadow_page.mfn); |
---|
1178 | 1222 | ppgtt_set_shadow_entry(spt, se, index); |
---|
1179 | 1223 | return 0; |
---|
| 1224 | +err: |
---|
| 1225 | + /* Cancel the existing addess mappings of DMA addr. */ |
---|
| 1226 | + for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) { |
---|
| 1227 | + gvt_vdbg_mm("invalidate 4K entry\n"); |
---|
| 1228 | + ppgtt_invalidate_pte(sub_spt, &sub_se); |
---|
| 1229 | + } |
---|
| 1230 | + /* Release the new allocated spt. */ |
---|
| 1231 | + trace_spt_change(sub_spt->vgpu->id, "release", sub_spt, |
---|
| 1232 | + sub_spt->guest_page.gfn, sub_spt->shadow_page.type); |
---|
| 1233 | + ppgtt_free_spt(sub_spt); |
---|
| 1234 | + return ret; |
---|
1180 | 1235 | } |
---|
1181 | 1236 | |
---|
1182 | 1237 | static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, |
---|
.. | .. |
---|
1251 | 1306 | return -EINVAL; |
---|
1252 | 1307 | default: |
---|
1253 | 1308 | GEM_BUG_ON(1); |
---|
1254 | | - }; |
---|
| 1309 | + } |
---|
1255 | 1310 | |
---|
1256 | 1311 | /* direct shadow */ |
---|
1257 | 1312 | ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size, |
---|
.. | .. |
---|
1845 | 1900 | * Zero on success, negative error code in pointer if failed. |
---|
1846 | 1901 | */ |
---|
1847 | 1902 | struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, |
---|
1848 | | - intel_gvt_gtt_type_t root_entry_type, u64 pdps[]) |
---|
| 1903 | + enum intel_gvt_gtt_type root_entry_type, u64 pdps[]) |
---|
1849 | 1904 | { |
---|
1850 | 1905 | struct intel_gvt *gvt = vgpu->gvt; |
---|
1851 | 1906 | struct intel_vgpu_mm *mm; |
---|
.. | .. |
---|
1863 | 1918 | |
---|
1864 | 1919 | INIT_LIST_HEAD(&mm->ppgtt_mm.list); |
---|
1865 | 1920 | INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list); |
---|
| 1921 | + INIT_LIST_HEAD(&mm->ppgtt_mm.link); |
---|
1866 | 1922 | |
---|
1867 | 1923 | if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) |
---|
1868 | 1924 | mm->ppgtt_mm.guest_pdps[0] = pdps[0]; |
---|
.. | .. |
---|
1878 | 1934 | } |
---|
1879 | 1935 | |
---|
1880 | 1936 | list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head); |
---|
| 1937 | + |
---|
| 1938 | + mutex_lock(&gvt->gtt.ppgtt_mm_lock); |
---|
1881 | 1939 | list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head); |
---|
| 1940 | + mutex_unlock(&gvt->gtt.ppgtt_mm_lock); |
---|
| 1941 | + |
---|
1882 | 1942 | return mm; |
---|
1883 | 1943 | } |
---|
1884 | 1944 | |
---|
.. | .. |
---|
1901 | 1961 | vgpu_free_mm(mm); |
---|
1902 | 1962 | return ERR_PTR(-ENOMEM); |
---|
1903 | 1963 | } |
---|
1904 | | - mm->ggtt_mm.last_partial_off = -1UL; |
---|
| 1964 | + |
---|
| 1965 | + mm->ggtt_mm.host_ggtt_aperture = vzalloc((vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64)); |
---|
| 1966 | + if (!mm->ggtt_mm.host_ggtt_aperture) { |
---|
| 1967 | + vfree(mm->ggtt_mm.virtual_ggtt); |
---|
| 1968 | + vgpu_free_mm(mm); |
---|
| 1969 | + return ERR_PTR(-ENOMEM); |
---|
| 1970 | + } |
---|
| 1971 | + |
---|
| 1972 | + mm->ggtt_mm.host_ggtt_hidden = vzalloc((vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64)); |
---|
| 1973 | + if (!mm->ggtt_mm.host_ggtt_hidden) { |
---|
| 1974 | + vfree(mm->ggtt_mm.host_ggtt_aperture); |
---|
| 1975 | + vfree(mm->ggtt_mm.virtual_ggtt); |
---|
| 1976 | + vgpu_free_mm(mm); |
---|
| 1977 | + return ERR_PTR(-ENOMEM); |
---|
| 1978 | + } |
---|
1905 | 1979 | |
---|
1906 | 1980 | return mm; |
---|
1907 | 1981 | } |
---|
.. | .. |
---|
1922 | 1996 | |
---|
1923 | 1997 | if (mm->type == INTEL_GVT_MM_PPGTT) { |
---|
1924 | 1998 | list_del(&mm->ppgtt_mm.list); |
---|
| 1999 | + |
---|
| 2000 | + mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); |
---|
1925 | 2001 | list_del(&mm->ppgtt_mm.lru_list); |
---|
| 2002 | + mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); |
---|
| 2003 | + |
---|
1926 | 2004 | invalidate_ppgtt_mm(mm); |
---|
1927 | 2005 | } else { |
---|
1928 | 2006 | vfree(mm->ggtt_mm.virtual_ggtt); |
---|
1929 | | - mm->ggtt_mm.last_partial_off = -1UL; |
---|
| 2007 | + vfree(mm->ggtt_mm.host_ggtt_aperture); |
---|
| 2008 | + vfree(mm->ggtt_mm.host_ggtt_hidden); |
---|
1930 | 2009 | } |
---|
1931 | 2010 | |
---|
1932 | 2011 | vgpu_free_mm(mm); |
---|
.. | .. |
---|
1945 | 2024 | |
---|
1946 | 2025 | /** |
---|
1947 | 2026 | * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object |
---|
1948 | | - * @vgpu: a vGPU |
---|
| 2027 | + * @mm: target vgpu mm |
---|
1949 | 2028 | * |
---|
1950 | 2029 | * This function is called when user wants to use a vGPU mm object. If this |
---|
1951 | 2030 | * mm object hasn't been shadowed yet, the shadow will be populated at this |
---|
.. | .. |
---|
1965 | 2044 | if (ret) |
---|
1966 | 2045 | return ret; |
---|
1967 | 2046 | |
---|
| 2047 | + mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); |
---|
1968 | 2048 | list_move_tail(&mm->ppgtt_mm.lru_list, |
---|
1969 | 2049 | &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head); |
---|
1970 | | - |
---|
| 2050 | + mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); |
---|
1971 | 2051 | } |
---|
1972 | 2052 | |
---|
1973 | 2053 | return 0; |
---|
.. | .. |
---|
1978 | 2058 | struct intel_vgpu_mm *mm; |
---|
1979 | 2059 | struct list_head *pos, *n; |
---|
1980 | 2060 | |
---|
| 2061 | + mutex_lock(&gvt->gtt.ppgtt_mm_lock); |
---|
| 2062 | + |
---|
1981 | 2063 | list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) { |
---|
1982 | 2064 | mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list); |
---|
1983 | 2065 | |
---|
.. | .. |
---|
1985 | 2067 | continue; |
---|
1986 | 2068 | |
---|
1987 | 2069 | list_del_init(&mm->ppgtt_mm.lru_list); |
---|
| 2070 | + mutex_unlock(&gvt->gtt.ppgtt_mm_lock); |
---|
1988 | 2071 | invalidate_ppgtt_mm(mm); |
---|
1989 | 2072 | return 1; |
---|
1990 | 2073 | } |
---|
| 2074 | + mutex_unlock(&gvt->gtt.ppgtt_mm_lock); |
---|
1991 | 2075 | return 0; |
---|
1992 | 2076 | } |
---|
1993 | 2077 | |
---|
.. | .. |
---|
2103 | 2187 | struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; |
---|
2104 | 2188 | const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; |
---|
2105 | 2189 | unsigned long index = off >> info->gtt_entry_size_shift; |
---|
| 2190 | + unsigned long gma; |
---|
2106 | 2191 | struct intel_gvt_gtt_entry e; |
---|
2107 | 2192 | |
---|
2108 | 2193 | if (bytes != 4 && bytes != 8) |
---|
2109 | 2194 | return -EINVAL; |
---|
| 2195 | + |
---|
| 2196 | + gma = index << I915_GTT_PAGE_SHIFT; |
---|
| 2197 | + if (!intel_gvt_ggtt_validate_range(vgpu, |
---|
| 2198 | + gma, 1 << I915_GTT_PAGE_SHIFT)) { |
---|
| 2199 | + gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma); |
---|
| 2200 | + memset(p_data, 0, bytes); |
---|
| 2201 | + return 0; |
---|
| 2202 | + } |
---|
2110 | 2203 | |
---|
2111 | 2204 | ggtt_get_guest_entry(ggtt_mm, &e, index); |
---|
2112 | 2205 | memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)), |
---|
.. | .. |
---|
2165 | 2258 | struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE}; |
---|
2166 | 2259 | dma_addr_t dma_addr; |
---|
2167 | 2260 | int ret; |
---|
| 2261 | + struct intel_gvt_partial_pte *partial_pte, *pos, *n; |
---|
| 2262 | + bool partial_update = false; |
---|
2168 | 2263 | |
---|
2169 | 2264 | if (bytes != 4 && bytes != 8) |
---|
2170 | 2265 | return -EINVAL; |
---|
.. | .. |
---|
2175 | 2270 | if (!vgpu_gmadr_is_valid(vgpu, gma)) |
---|
2176 | 2271 | return 0; |
---|
2177 | 2272 | |
---|
2178 | | - ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index); |
---|
2179 | | - |
---|
| 2273 | + e.type = GTT_TYPE_GGTT_PTE; |
---|
2180 | 2274 | memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, |
---|
2181 | 2275 | bytes); |
---|
2182 | 2276 | |
---|
2183 | 2277 | /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes |
---|
2184 | | - * write, we assume the two 4 bytes writes are consecutive. |
---|
2185 | | - * Otherwise, we abort and report error |
---|
| 2278 | + * write, save the first 4 bytes in a list and update virtual |
---|
| 2279 | + * PTE. Only update shadow PTE when the second 4 bytes comes. |
---|
2186 | 2280 | */ |
---|
2187 | 2281 | if (bytes < info->gtt_entry_size) { |
---|
2188 | | - if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) { |
---|
2189 | | - /* the first partial part*/ |
---|
2190 | | - ggtt_mm->ggtt_mm.last_partial_off = off; |
---|
2191 | | - ggtt_mm->ggtt_mm.last_partial_data = e.val64; |
---|
2192 | | - return 0; |
---|
2193 | | - } else if ((g_gtt_index == |
---|
2194 | | - (ggtt_mm->ggtt_mm.last_partial_off >> |
---|
2195 | | - info->gtt_entry_size_shift)) && |
---|
2196 | | - (off != ggtt_mm->ggtt_mm.last_partial_off)) { |
---|
2197 | | - /* the second partial part */ |
---|
| 2282 | + bool found = false; |
---|
2198 | 2283 | |
---|
2199 | | - int last_off = ggtt_mm->ggtt_mm.last_partial_off & |
---|
2200 | | - (info->gtt_entry_size - 1); |
---|
| 2284 | + list_for_each_entry_safe(pos, n, |
---|
| 2285 | + &ggtt_mm->ggtt_mm.partial_pte_list, list) { |
---|
| 2286 | + if (g_gtt_index == pos->offset >> |
---|
| 2287 | + info->gtt_entry_size_shift) { |
---|
| 2288 | + if (off != pos->offset) { |
---|
| 2289 | + /* the second partial part*/ |
---|
| 2290 | + int last_off = pos->offset & |
---|
| 2291 | + (info->gtt_entry_size - 1); |
---|
2201 | 2292 | |
---|
2202 | | - memcpy((void *)&e.val64 + last_off, |
---|
2203 | | - (void *)&ggtt_mm->ggtt_mm.last_partial_data + |
---|
2204 | | - last_off, bytes); |
---|
| 2293 | + memcpy((void *)&e.val64 + last_off, |
---|
| 2294 | + (void *)&pos->data + last_off, |
---|
| 2295 | + bytes); |
---|
2205 | 2296 | |
---|
2206 | | - ggtt_mm->ggtt_mm.last_partial_off = -1UL; |
---|
2207 | | - } else { |
---|
2208 | | - int last_offset; |
---|
| 2297 | + list_del(&pos->list); |
---|
| 2298 | + kfree(pos); |
---|
| 2299 | + found = true; |
---|
| 2300 | + break; |
---|
| 2301 | + } |
---|
2209 | 2302 | |
---|
2210 | | - gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n", |
---|
2211 | | - ggtt_mm->ggtt_mm.last_partial_off, off, |
---|
2212 | | - bytes, info->gtt_entry_size); |
---|
| 2303 | + /* update of the first partial part */ |
---|
| 2304 | + pos->data = e.val64; |
---|
| 2305 | + ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); |
---|
| 2306 | + return 0; |
---|
| 2307 | + } |
---|
| 2308 | + } |
---|
2213 | 2309 | |
---|
2214 | | - /* set host ggtt entry to scratch page and clear |
---|
2215 | | - * virtual ggtt entry as not present for last |
---|
2216 | | - * partially write offset |
---|
2217 | | - */ |
---|
2218 | | - last_offset = ggtt_mm->ggtt_mm.last_partial_off & |
---|
2219 | | - (~(info->gtt_entry_size - 1)); |
---|
2220 | | - |
---|
2221 | | - ggtt_get_host_entry(ggtt_mm, &m, last_offset); |
---|
2222 | | - ggtt_invalidate_pte(vgpu, &m); |
---|
2223 | | - ops->set_pfn(&m, gvt->gtt.scratch_mfn); |
---|
2224 | | - ops->clear_present(&m); |
---|
2225 | | - ggtt_set_host_entry(ggtt_mm, &m, last_offset); |
---|
2226 | | - ggtt_invalidate(gvt->dev_priv); |
---|
2227 | | - |
---|
2228 | | - ggtt_get_guest_entry(ggtt_mm, &e, last_offset); |
---|
2229 | | - ops->clear_present(&e); |
---|
2230 | | - ggtt_set_guest_entry(ggtt_mm, &e, last_offset); |
---|
2231 | | - |
---|
2232 | | - ggtt_mm->ggtt_mm.last_partial_off = off; |
---|
2233 | | - ggtt_mm->ggtt_mm.last_partial_data = e.val64; |
---|
2234 | | - |
---|
2235 | | - return 0; |
---|
| 2310 | + if (!found) { |
---|
| 2311 | + /* the first partial part */ |
---|
| 2312 | + partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL); |
---|
| 2313 | + if (!partial_pte) |
---|
| 2314 | + return -ENOMEM; |
---|
| 2315 | + partial_pte->offset = off; |
---|
| 2316 | + partial_pte->data = e.val64; |
---|
| 2317 | + list_add_tail(&partial_pte->list, |
---|
| 2318 | + &ggtt_mm->ggtt_mm.partial_pte_list); |
---|
| 2319 | + partial_update = true; |
---|
2236 | 2320 | } |
---|
2237 | 2321 | } |
---|
2238 | 2322 | |
---|
2239 | | - if (ops->test_present(&e)) { |
---|
| 2323 | + if (!partial_update && (ops->test_present(&e))) { |
---|
2240 | 2324 | gfn = ops->get_pfn(&e); |
---|
2241 | 2325 | m.val64 = e.val64; |
---|
2242 | 2326 | m.type = e.type; |
---|
.. | .. |
---|
2261 | 2345 | } else |
---|
2262 | 2346 | ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); |
---|
2263 | 2347 | } else { |
---|
2264 | | - ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index); |
---|
2265 | | - ggtt_invalidate_pte(vgpu, &m); |
---|
2266 | 2348 | ops->set_pfn(&m, gvt->gtt.scratch_mfn); |
---|
2267 | 2349 | ops->clear_present(&m); |
---|
2268 | 2350 | } |
---|
2269 | 2351 | |
---|
2270 | 2352 | out: |
---|
2271 | | - ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); |
---|
2272 | | - ggtt_invalidate(gvt->dev_priv); |
---|
2273 | 2353 | ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index); |
---|
| 2354 | + |
---|
| 2355 | + ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index); |
---|
| 2356 | + ggtt_invalidate_pte(vgpu, &e); |
---|
| 2357 | + |
---|
| 2358 | + ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); |
---|
| 2359 | + ggtt_invalidate(gvt->gt); |
---|
2274 | 2360 | return 0; |
---|
2275 | 2361 | } |
---|
2276 | 2362 | |
---|
.. | .. |
---|
2291 | 2377 | { |
---|
2292 | 2378 | const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; |
---|
2293 | 2379 | int ret; |
---|
| 2380 | + struct intel_vgpu_submission *s = &vgpu->submission; |
---|
| 2381 | + struct intel_engine_cs *engine; |
---|
| 2382 | + int i; |
---|
2294 | 2383 | |
---|
2295 | 2384 | if (bytes != 4 && bytes != 8) |
---|
2296 | 2385 | return -EINVAL; |
---|
2297 | 2386 | |
---|
2298 | 2387 | off -= info->gtt_start_offset; |
---|
2299 | 2388 | ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes); |
---|
| 2389 | + |
---|
| 2390 | + /* if ggtt of last submitted context is written, |
---|
| 2391 | + * that context is probably got unpinned. |
---|
| 2392 | + * Set last shadowed ctx to invalid. |
---|
| 2393 | + */ |
---|
| 2394 | + for_each_engine(engine, vgpu->gvt->gt, i) { |
---|
| 2395 | + if (!s->last_ctx[i].valid) |
---|
| 2396 | + continue; |
---|
| 2397 | + |
---|
| 2398 | + if (s->last_ctx[i].lrca == (off >> info->gtt_entry_size_shift)) |
---|
| 2399 | + s->last_ctx[i].valid = false; |
---|
| 2400 | + } |
---|
2300 | 2401 | return ret; |
---|
2301 | 2402 | } |
---|
2302 | 2403 | |
---|
2303 | 2404 | static int alloc_scratch_pages(struct intel_vgpu *vgpu, |
---|
2304 | | - intel_gvt_gtt_type_t type) |
---|
| 2405 | + enum intel_gvt_gtt_type type) |
---|
2305 | 2406 | { |
---|
| 2407 | + struct drm_i915_private *i915 = vgpu->gvt->gt->i915; |
---|
2306 | 2408 | struct intel_vgpu_gtt *gtt = &vgpu->gtt; |
---|
2307 | 2409 | struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; |
---|
2308 | 2410 | int page_entry_num = I915_GTT_PAGE_SIZE >> |
---|
2309 | 2411 | vgpu->gvt->device_info.gtt_entry_size_shift; |
---|
2310 | 2412 | void *scratch_pt; |
---|
2311 | 2413 | int i; |
---|
2312 | | - struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; |
---|
| 2414 | + struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev; |
---|
2313 | 2415 | dma_addr_t daddr; |
---|
2314 | 2416 | |
---|
2315 | | - if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) |
---|
| 2417 | + if (drm_WARN_ON(&i915->drm, |
---|
| 2418 | + type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX)) |
---|
2316 | 2419 | return -EINVAL; |
---|
2317 | 2420 | |
---|
2318 | 2421 | scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); |
---|
.. | .. |
---|
2366 | 2469 | static int release_scratch_page_tree(struct intel_vgpu *vgpu) |
---|
2367 | 2470 | { |
---|
2368 | 2471 | int i; |
---|
2369 | | - struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; |
---|
| 2472 | + struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev; |
---|
2370 | 2473 | dma_addr_t daddr; |
---|
2371 | 2474 | |
---|
2372 | 2475 | for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { |
---|
.. | .. |
---|
2428 | 2531 | |
---|
2429 | 2532 | intel_vgpu_reset_ggtt(vgpu, false); |
---|
2430 | 2533 | |
---|
| 2534 | + INIT_LIST_HEAD(>t->ggtt_mm->ggtt_mm.partial_pte_list); |
---|
| 2535 | + |
---|
2431 | 2536 | return create_scratch_page_tree(vgpu); |
---|
2432 | 2537 | } |
---|
2433 | 2538 | |
---|
2434 | | -static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu) |
---|
| 2539 | +void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu) |
---|
2435 | 2540 | { |
---|
2436 | 2541 | struct list_head *pos, *n; |
---|
2437 | 2542 | struct intel_vgpu_mm *mm; |
---|
.. | .. |
---|
2452 | 2557 | |
---|
2453 | 2558 | static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu) |
---|
2454 | 2559 | { |
---|
| 2560 | + struct intel_gvt_partial_pte *pos, *next; |
---|
| 2561 | + |
---|
| 2562 | + list_for_each_entry_safe(pos, next, |
---|
| 2563 | + &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, |
---|
| 2564 | + list) { |
---|
| 2565 | + gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n", |
---|
| 2566 | + pos->offset, pos->data); |
---|
| 2567 | + kfree(pos); |
---|
| 2568 | + } |
---|
2455 | 2569 | intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm); |
---|
2456 | 2570 | vgpu->gtt.ggtt_mm = NULL; |
---|
2457 | 2571 | } |
---|
.. | .. |
---|
2485 | 2599 | list_for_each_safe(pos, n, >t->oos_page_free_list_head) { |
---|
2486 | 2600 | oos_page = container_of(pos, struct intel_vgpu_oos_page, list); |
---|
2487 | 2601 | list_del(&oos_page->list); |
---|
| 2602 | + free_page((unsigned long)oos_page->mem); |
---|
2488 | 2603 | kfree(oos_page); |
---|
2489 | 2604 | } |
---|
2490 | 2605 | } |
---|
.. | .. |
---|
2505 | 2620 | ret = -ENOMEM; |
---|
2506 | 2621 | goto fail; |
---|
2507 | 2622 | } |
---|
| 2623 | + oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0); |
---|
| 2624 | + if (!oos_page->mem) { |
---|
| 2625 | + ret = -ENOMEM; |
---|
| 2626 | + kfree(oos_page); |
---|
| 2627 | + goto fail; |
---|
| 2628 | + } |
---|
2508 | 2629 | |
---|
2509 | 2630 | INIT_LIST_HEAD(&oos_page->list); |
---|
2510 | 2631 | INIT_LIST_HEAD(&oos_page->vm_list); |
---|
.. | .. |
---|
2523 | 2644 | /** |
---|
2524 | 2645 | * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object |
---|
2525 | 2646 | * @vgpu: a vGPU |
---|
2526 | | - * @page_table_level: PPGTT page table level |
---|
2527 | | - * @root_entry: PPGTT page table root pointers |
---|
| 2647 | + * @pdps: pdp root array |
---|
2528 | 2648 | * |
---|
2529 | 2649 | * This function is used to find a PPGTT mm object from mm object pool |
---|
2530 | 2650 | * |
---|
.. | .. |
---|
2569 | 2689 | * Zero on success, negative error code if failed. |
---|
2570 | 2690 | */ |
---|
2571 | 2691 | struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu, |
---|
2572 | | - intel_gvt_gtt_type_t root_entry_type, u64 pdps[]) |
---|
| 2692 | + enum intel_gvt_gtt_type root_entry_type, u64 pdps[]) |
---|
2573 | 2693 | { |
---|
2574 | 2694 | struct intel_vgpu_mm *mm; |
---|
2575 | 2695 | |
---|
.. | .. |
---|
2621 | 2741 | { |
---|
2622 | 2742 | int ret; |
---|
2623 | 2743 | void *page; |
---|
2624 | | - struct device *dev = &gvt->dev_priv->drm.pdev->dev; |
---|
| 2744 | + struct device *dev = &gvt->gt->i915->drm.pdev->dev; |
---|
2625 | 2745 | dma_addr_t daddr; |
---|
2626 | 2746 | |
---|
2627 | 2747 | gvt_dbg_core("init gtt\n"); |
---|
.. | .. |
---|
2656 | 2776 | } |
---|
2657 | 2777 | } |
---|
2658 | 2778 | INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head); |
---|
| 2779 | + mutex_init(&gvt->gtt.ppgtt_mm_lock); |
---|
2659 | 2780 | return 0; |
---|
2660 | 2781 | } |
---|
2661 | 2782 | |
---|
.. | .. |
---|
2669 | 2790 | */ |
---|
2670 | 2791 | void intel_gvt_clean_gtt(struct intel_gvt *gvt) |
---|
2671 | 2792 | { |
---|
2672 | | - struct device *dev = &gvt->dev_priv->drm.pdev->dev; |
---|
| 2793 | + struct device *dev = &gvt->gt->i915->drm.pdev->dev; |
---|
2673 | 2794 | dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn << |
---|
2674 | 2795 | I915_GTT_PAGE_SHIFT); |
---|
2675 | 2796 | |
---|
.. | .. |
---|
2696 | 2817 | list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { |
---|
2697 | 2818 | mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); |
---|
2698 | 2819 | if (mm->type == INTEL_GVT_MM_PPGTT) { |
---|
| 2820 | + mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock); |
---|
2699 | 2821 | list_del_init(&mm->ppgtt_mm.lru_list); |
---|
| 2822 | + mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock); |
---|
2700 | 2823 | if (mm->ppgtt_mm.shadowed) |
---|
2701 | 2824 | invalidate_ppgtt_mm(mm); |
---|
2702 | 2825 | } |
---|
.. | .. |
---|
2715 | 2838 | void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) |
---|
2716 | 2839 | { |
---|
2717 | 2840 | struct intel_gvt *gvt = vgpu->gvt; |
---|
2718 | | - struct drm_i915_private *dev_priv = gvt->dev_priv; |
---|
2719 | 2841 | struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; |
---|
2720 | 2842 | struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; |
---|
2721 | 2843 | struct intel_gvt_gtt_entry old_entry; |
---|
.. | .. |
---|
2745 | 2867 | ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); |
---|
2746 | 2868 | } |
---|
2747 | 2869 | |
---|
2748 | | - ggtt_invalidate(dev_priv); |
---|
| 2870 | + ggtt_invalidate(gvt->gt); |
---|
2749 | 2871 | } |
---|
2750 | 2872 | |
---|
2751 | 2873 | /** |
---|
2752 | | - * intel_vgpu_reset_gtt - reset the all GTT related status |
---|
2753 | | - * @vgpu: a vGPU |
---|
| 2874 | + * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries |
---|
| 2875 | + * @gvt: intel gvt device |
---|
2754 | 2876 | * |
---|
2755 | | - * This function is called from vfio core to reset reset all |
---|
2756 | | - * GTT related status, including GGTT, PPGTT, scratch page. |
---|
| 2877 | + * This function is called at driver resume stage to restore |
---|
| 2878 | + * GGTT entries of every vGPU. |
---|
2757 | 2879 | * |
---|
2758 | 2880 | */ |
---|
2759 | | -void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) |
---|
| 2881 | +void intel_gvt_restore_ggtt(struct intel_gvt *gvt) |
---|
2760 | 2882 | { |
---|
2761 | | - /* Shadow pages are only created when there is no page |
---|
2762 | | - * table tracking data, so remove page tracking data after |
---|
2763 | | - * removing the shadow pages. |
---|
2764 | | - */ |
---|
2765 | | - intel_vgpu_destroy_all_ppgtt_mm(vgpu); |
---|
2766 | | - intel_vgpu_reset_ggtt(vgpu, true); |
---|
| 2883 | + struct intel_vgpu *vgpu; |
---|
| 2884 | + struct intel_vgpu_mm *mm; |
---|
| 2885 | + int id; |
---|
| 2886 | + gen8_pte_t pte; |
---|
| 2887 | + u32 idx, num_low, num_hi, offset; |
---|
| 2888 | + |
---|
| 2889 | + /* Restore dirty host ggtt for all vGPUs */ |
---|
| 2890 | + idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { |
---|
| 2891 | + mm = vgpu->gtt.ggtt_mm; |
---|
| 2892 | + |
---|
| 2893 | + num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; |
---|
| 2894 | + offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; |
---|
| 2895 | + for (idx = 0; idx < num_low; idx++) { |
---|
| 2896 | + pte = mm->ggtt_mm.host_ggtt_aperture[idx]; |
---|
| 2897 | + if (pte & _PAGE_PRESENT) |
---|
| 2898 | + write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); |
---|
| 2899 | + } |
---|
| 2900 | + |
---|
| 2901 | + num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; |
---|
| 2902 | + offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; |
---|
| 2903 | + for (idx = 0; idx < num_hi; idx++) { |
---|
| 2904 | + pte = mm->ggtt_mm.host_ggtt_hidden[idx]; |
---|
| 2905 | + if (pte & _PAGE_PRESENT) |
---|
| 2906 | + write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); |
---|
| 2907 | + } |
---|
| 2908 | + } |
---|
2767 | 2909 | } |
---|