hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/arch/powerpc/kvm/book3s_hv_rm_mmu.c
....@@ -1,7 +1,5 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
2
- * This program is free software; you can redistribute it and/or modify
3
- * it under the terms of the GNU General Public License, version 2, as
4
- * published by the Free Software Foundation.
53 *
64 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
75 */
....@@ -13,6 +11,7 @@
1311 #include <linux/hugetlb.h>
1412 #include <linux/module.h>
1513 #include <linux/log2.h>
14
+#include <linux/sizes.h>
1615
1716 #include <asm/trace.h>
1817 #include <asm/kvm_ppc.h>
....@@ -68,7 +67,7 @@
6867 * so use the bit for the first thread to represent the core.
6968 */
7069 if (cpu_has_feature(CPU_FTR_ARCH_300))
71
- cpu = cpu_first_thread_sibling(cpu);
70
+ cpu = cpu_first_tlb_thread_sibling(cpu);
7271 cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
7372 }
7473
....@@ -100,14 +99,14 @@
10099 } else {
101100 rev->forw = rev->back = pte_index;
102101 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
103
- pte_index | KVMPPC_RMAP_PRESENT;
102
+ pte_index | KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_HPT;
104103 }
105104 unlock_rmap(rmap);
106105 }
107106 EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
108107
109108 /* Update the dirty bitmap of a memslot */
110
-void kvmppc_update_dirty_map(struct kvm_memory_slot *memslot,
109
+void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot,
111110 unsigned long gfn, unsigned long psize)
112111 {
113112 unsigned long npages;
....@@ -211,7 +210,7 @@
211210 pte_t *ptep;
212211 unsigned int writing;
213212 unsigned long mmu_seq;
214
- unsigned long rcbits, irq_flags = 0;
213
+ unsigned long rcbits;
215214
216215 if (kvm_is_radix(kvm))
217216 return H_FUNCTION;
....@@ -249,17 +248,9 @@
249248
250249 /* Translate to host virtual address */
251250 hva = __gfn_to_hva_memslot(memslot, gfn);
252
- /*
253
- * If we had a page table table change after lookup, we would
254
- * retry via mmu_notifier_retry.
255
- */
256
- if (!realmode)
257
- local_irq_save(irq_flags);
258
- /*
259
- * If called in real mode we have MSR_EE = 0. Otherwise
260
- * we disable irq above.
261
- */
262
- ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift);
251
+
252
+ arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
253
+ ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &hpage_shift);
263254 if (ptep) {
264255 pte_t pte;
265256 unsigned int host_pte_size;
....@@ -273,8 +264,7 @@
273264 * to <= host page size, if host is using hugepage
274265 */
275266 if (host_pte_size < psize) {
276
- if (!realmode)
277
- local_irq_restore(flags);
267
+ arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
278268 return H_PARAMETER;
279269 }
280270 pte = kvmppc_read_update_linux_pte(ptep, writing);
....@@ -288,8 +278,7 @@
288278 pa |= gpa & ~PAGE_MASK;
289279 }
290280 }
291
- if (!realmode)
292
- local_irq_restore(irq_flags);
281
+ arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
293282
294283 ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1);
295284 ptel |= pa;
....@@ -889,6 +878,139 @@
889878 return ret;
890879 }
891880
881
+static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
882
+ unsigned long gpa, int writing, unsigned long *hpa,
883
+ struct kvm_memory_slot **memslot_p)
884
+{
885
+ struct kvm *kvm = vcpu->kvm;
886
+ struct kvm_memory_slot *memslot;
887
+ unsigned long gfn, hva, pa, psize = PAGE_SHIFT;
888
+ unsigned int shift;
889
+ pte_t *ptep, pte;
890
+
891
+ /* Find the memslot for this address */
892
+ gfn = gpa >> PAGE_SHIFT;
893
+ memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
894
+ if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
895
+ return H_PARAMETER;
896
+
897
+ /* Translate to host virtual address */
898
+ hva = __gfn_to_hva_memslot(memslot, gfn);
899
+
900
+ /* Try to find the host pte for that virtual address */
901
+ ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift);
902
+ if (!ptep)
903
+ return H_TOO_HARD;
904
+ pte = kvmppc_read_update_linux_pte(ptep, writing);
905
+ if (!pte_present(pte))
906
+ return H_TOO_HARD;
907
+
908
+ /* Convert to a physical address */
909
+ if (shift)
910
+ psize = 1UL << shift;
911
+ pa = pte_pfn(pte) << PAGE_SHIFT;
912
+ pa |= hva & (psize - 1);
913
+ pa |= gpa & ~PAGE_MASK;
914
+
915
+ if (hpa)
916
+ *hpa = pa;
917
+ if (memslot_p)
918
+ *memslot_p = memslot;
919
+
920
+ return H_SUCCESS;
921
+}
922
+
923
+static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
924
+ unsigned long dest)
925
+{
926
+ struct kvm_memory_slot *memslot;
927
+ struct kvm *kvm = vcpu->kvm;
928
+ unsigned long pa, mmu_seq;
929
+ long ret = H_SUCCESS;
930
+ int i;
931
+
932
+ /* Used later to detect if we might have been invalidated */
933
+ mmu_seq = kvm->mmu_notifier_seq;
934
+ smp_rmb();
935
+
936
+ arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
937
+
938
+ ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &pa, &memslot);
939
+ if (ret != H_SUCCESS)
940
+ goto out_unlock;
941
+
942
+ /* Zero the page */
943
+ for (i = 0; i < SZ_4K; i += L1_CACHE_BYTES, pa += L1_CACHE_BYTES)
944
+ dcbz((void *)pa);
945
+ kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
946
+
947
+out_unlock:
948
+ arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
949
+ return ret;
950
+}
951
+
952
+static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
953
+ unsigned long dest, unsigned long src)
954
+{
955
+ unsigned long dest_pa, src_pa, mmu_seq;
956
+ struct kvm_memory_slot *dest_memslot;
957
+ struct kvm *kvm = vcpu->kvm;
958
+ long ret = H_SUCCESS;
959
+
960
+ /* Used later to detect if we might have been invalidated */
961
+ mmu_seq = kvm->mmu_notifier_seq;
962
+ smp_rmb();
963
+
964
+ arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
965
+ ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &dest_pa, &dest_memslot);
966
+ if (ret != H_SUCCESS)
967
+ goto out_unlock;
968
+
969
+ ret = kvmppc_get_hpa(vcpu, mmu_seq, src, 0, &src_pa, NULL);
970
+ if (ret != H_SUCCESS)
971
+ goto out_unlock;
972
+
973
+ /* Copy the page */
974
+ memcpy((void *)dest_pa, (void *)src_pa, SZ_4K);
975
+
976
+ kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
977
+
978
+out_unlock:
979
+ arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
980
+ return ret;
981
+}
982
+
983
+long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
984
+ unsigned long dest, unsigned long src)
985
+{
986
+ struct kvm *kvm = vcpu->kvm;
987
+ u64 pg_mask = SZ_4K - 1; /* 4K page size */
988
+ long ret = H_SUCCESS;
989
+
990
+ /* Don't handle radix mode here, go up to the virtual mode handler */
991
+ if (kvm_is_radix(kvm))
992
+ return H_TOO_HARD;
993
+
994
+ /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */
995
+ if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
996
+ H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED))
997
+ return H_PARAMETER;
998
+
999
+ /* dest (and src if copy_page flag set) must be page aligned */
1000
+ if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask)))
1001
+ return H_PARAMETER;
1002
+
1003
+ /* zero and/or copy the page as determined by the flags */
1004
+ if (flags & H_COPY_PAGE)
1005
+ ret = kvmppc_do_h_page_init_copy(vcpu, dest, src);
1006
+ else if (flags & H_ZERO_PAGE)
1007
+ ret = kvmppc_do_h_page_init_zero(vcpu, dest);
1008
+
1009
+ /* We can ignore the other flags */
1010
+
1011
+ return ret;
1012
+}
1013
+
8921014 void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
8931015 unsigned long pte_index)
8941016 {
....@@ -1118,7 +1240,7 @@
11181240 status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
11191241 if (!data) {
11201242 if (gr & (HPTE_R_N | HPTE_R_G))
1121
- return status | SRR1_ISI_N_OR_G;
1243
+ return status | SRR1_ISI_N_G_OR_CIP;
11221244 if (!hpte_read_permission(pp, slb_v & key))
11231245 return status | SRR1_ISI_PROT;
11241246 } else if (status & DSISR_ISSTORE) {