.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | | - * This program is free software; you can redistribute it and/or modify |
---|
3 | | - * it under the terms of the GNU General Public License, version 2, as |
---|
4 | | - * published by the Free Software Foundation. |
---|
5 | 3 | * |
---|
6 | 4 | * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
---|
7 | 5 | */ |
---|
.. | .. |
---|
13 | 11 | #include <linux/hugetlb.h> |
---|
14 | 12 | #include <linux/module.h> |
---|
15 | 13 | #include <linux/log2.h> |
---|
| 14 | +#include <linux/sizes.h> |
---|
16 | 15 | |
---|
17 | 16 | #include <asm/trace.h> |
---|
18 | 17 | #include <asm/kvm_ppc.h> |
---|
.. | .. |
---|
68 | 67 | * so use the bit for the first thread to represent the core. |
---|
69 | 68 | */ |
---|
70 | 69 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
---|
71 | | - cpu = cpu_first_thread_sibling(cpu); |
---|
| 70 | + cpu = cpu_first_tlb_thread_sibling(cpu); |
---|
72 | 71 | cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush); |
---|
73 | 72 | } |
---|
74 | 73 | |
---|
.. | .. |
---|
100 | 99 | } else { |
---|
101 | 100 | rev->forw = rev->back = pte_index; |
---|
102 | 101 | *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | |
---|
103 | | - pte_index | KVMPPC_RMAP_PRESENT; |
---|
| 102 | + pte_index | KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_HPT; |
---|
104 | 103 | } |
---|
105 | 104 | unlock_rmap(rmap); |
---|
106 | 105 | } |
---|
107 | 106 | EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain); |
---|
108 | 107 | |
---|
109 | 108 | /* Update the dirty bitmap of a memslot */ |
---|
110 | | -void kvmppc_update_dirty_map(struct kvm_memory_slot *memslot, |
---|
| 109 | +void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, |
---|
111 | 110 | unsigned long gfn, unsigned long psize) |
---|
112 | 111 | { |
---|
113 | 112 | unsigned long npages; |
---|
.. | .. |
---|
211 | 210 | pte_t *ptep; |
---|
212 | 211 | unsigned int writing; |
---|
213 | 212 | unsigned long mmu_seq; |
---|
214 | | - unsigned long rcbits, irq_flags = 0; |
---|
| 213 | + unsigned long rcbits; |
---|
215 | 214 | |
---|
216 | 215 | if (kvm_is_radix(kvm)) |
---|
217 | 216 | return H_FUNCTION; |
---|
.. | .. |
---|
249 | 248 | |
---|
250 | 249 | /* Translate to host virtual address */ |
---|
251 | 250 | hva = __gfn_to_hva_memslot(memslot, gfn); |
---|
252 | | - /* |
---|
253 | | - * If we had a page table table change after lookup, we would |
---|
254 | | - * retry via mmu_notifier_retry. |
---|
255 | | - */ |
---|
256 | | - if (!realmode) |
---|
257 | | - local_irq_save(irq_flags); |
---|
258 | | - /* |
---|
259 | | - * If called in real mode we have MSR_EE = 0. Otherwise |
---|
260 | | - * we disable irq above. |
---|
261 | | - */ |
---|
262 | | - ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift); |
---|
| 251 | + |
---|
| 252 | + arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); |
---|
| 253 | + ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &hpage_shift); |
---|
263 | 254 | if (ptep) { |
---|
264 | 255 | pte_t pte; |
---|
265 | 256 | unsigned int host_pte_size; |
---|
.. | .. |
---|
273 | 264 | * to <= host page size, if host is using hugepage |
---|
274 | 265 | */ |
---|
275 | 266 | if (host_pte_size < psize) { |
---|
276 | | - if (!realmode) |
---|
277 | | - local_irq_restore(flags); |
---|
| 267 | + arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); |
---|
278 | 268 | return H_PARAMETER; |
---|
279 | 269 | } |
---|
280 | 270 | pte = kvmppc_read_update_linux_pte(ptep, writing); |
---|
.. | .. |
---|
288 | 278 | pa |= gpa & ~PAGE_MASK; |
---|
289 | 279 | } |
---|
290 | 280 | } |
---|
291 | | - if (!realmode) |
---|
292 | | - local_irq_restore(irq_flags); |
---|
| 281 | + arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); |
---|
293 | 282 | |
---|
294 | 283 | ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1); |
---|
295 | 284 | ptel |= pa; |
---|
.. | .. |
---|
889 | 878 | return ret; |
---|
890 | 879 | } |
---|
891 | 880 | |
---|
| 881 | +static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq, |
---|
| 882 | + unsigned long gpa, int writing, unsigned long *hpa, |
---|
| 883 | + struct kvm_memory_slot **memslot_p) |
---|
| 884 | +{ |
---|
| 885 | + struct kvm *kvm = vcpu->kvm; |
---|
| 886 | + struct kvm_memory_slot *memslot; |
---|
| 887 | + unsigned long gfn, hva, pa, psize = PAGE_SHIFT; |
---|
| 888 | + unsigned int shift; |
---|
| 889 | + pte_t *ptep, pte; |
---|
| 890 | + |
---|
| 891 | + /* Find the memslot for this address */ |
---|
| 892 | + gfn = gpa >> PAGE_SHIFT; |
---|
| 893 | + memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); |
---|
| 894 | + if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) |
---|
| 895 | + return H_PARAMETER; |
---|
| 896 | + |
---|
| 897 | + /* Translate to host virtual address */ |
---|
| 898 | + hva = __gfn_to_hva_memslot(memslot, gfn); |
---|
| 899 | + |
---|
| 900 | + /* Try to find the host pte for that virtual address */ |
---|
| 901 | + ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift); |
---|
| 902 | + if (!ptep) |
---|
| 903 | + return H_TOO_HARD; |
---|
| 904 | + pte = kvmppc_read_update_linux_pte(ptep, writing); |
---|
| 905 | + if (!pte_present(pte)) |
---|
| 906 | + return H_TOO_HARD; |
---|
| 907 | + |
---|
| 908 | + /* Convert to a physical address */ |
---|
| 909 | + if (shift) |
---|
| 910 | + psize = 1UL << shift; |
---|
| 911 | + pa = pte_pfn(pte) << PAGE_SHIFT; |
---|
| 912 | + pa |= hva & (psize - 1); |
---|
| 913 | + pa |= gpa & ~PAGE_MASK; |
---|
| 914 | + |
---|
| 915 | + if (hpa) |
---|
| 916 | + *hpa = pa; |
---|
| 917 | + if (memslot_p) |
---|
| 918 | + *memslot_p = memslot; |
---|
| 919 | + |
---|
| 920 | + return H_SUCCESS; |
---|
| 921 | +} |
---|
| 922 | + |
---|
| 923 | +static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu, |
---|
| 924 | + unsigned long dest) |
---|
| 925 | +{ |
---|
| 926 | + struct kvm_memory_slot *memslot; |
---|
| 927 | + struct kvm *kvm = vcpu->kvm; |
---|
| 928 | + unsigned long pa, mmu_seq; |
---|
| 929 | + long ret = H_SUCCESS; |
---|
| 930 | + int i; |
---|
| 931 | + |
---|
| 932 | + /* Used later to detect if we might have been invalidated */ |
---|
| 933 | + mmu_seq = kvm->mmu_notifier_seq; |
---|
| 934 | + smp_rmb(); |
---|
| 935 | + |
---|
| 936 | + arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); |
---|
| 937 | + |
---|
| 938 | + ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &pa, &memslot); |
---|
| 939 | + if (ret != H_SUCCESS) |
---|
| 940 | + goto out_unlock; |
---|
| 941 | + |
---|
| 942 | + /* Zero the page */ |
---|
| 943 | + for (i = 0; i < SZ_4K; i += L1_CACHE_BYTES, pa += L1_CACHE_BYTES) |
---|
| 944 | + dcbz((void *)pa); |
---|
| 945 | + kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE); |
---|
| 946 | + |
---|
| 947 | +out_unlock: |
---|
| 948 | + arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); |
---|
| 949 | + return ret; |
---|
| 950 | +} |
---|
| 951 | + |
---|
| 952 | +static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu, |
---|
| 953 | + unsigned long dest, unsigned long src) |
---|
| 954 | +{ |
---|
| 955 | + unsigned long dest_pa, src_pa, mmu_seq; |
---|
| 956 | + struct kvm_memory_slot *dest_memslot; |
---|
| 957 | + struct kvm *kvm = vcpu->kvm; |
---|
| 958 | + long ret = H_SUCCESS; |
---|
| 959 | + |
---|
| 960 | + /* Used later to detect if we might have been invalidated */ |
---|
| 961 | + mmu_seq = kvm->mmu_notifier_seq; |
---|
| 962 | + smp_rmb(); |
---|
| 963 | + |
---|
| 964 | + arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); |
---|
| 965 | + ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &dest_pa, &dest_memslot); |
---|
| 966 | + if (ret != H_SUCCESS) |
---|
| 967 | + goto out_unlock; |
---|
| 968 | + |
---|
| 969 | + ret = kvmppc_get_hpa(vcpu, mmu_seq, src, 0, &src_pa, NULL); |
---|
| 970 | + if (ret != H_SUCCESS) |
---|
| 971 | + goto out_unlock; |
---|
| 972 | + |
---|
| 973 | + /* Copy the page */ |
---|
| 974 | + memcpy((void *)dest_pa, (void *)src_pa, SZ_4K); |
---|
| 975 | + |
---|
| 976 | + kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE); |
---|
| 977 | + |
---|
| 978 | +out_unlock: |
---|
| 979 | + arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); |
---|
| 980 | + return ret; |
---|
| 981 | +} |
---|
| 982 | + |
---|
| 983 | +long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, |
---|
| 984 | + unsigned long dest, unsigned long src) |
---|
| 985 | +{ |
---|
| 986 | + struct kvm *kvm = vcpu->kvm; |
---|
| 987 | + u64 pg_mask = SZ_4K - 1; /* 4K page size */ |
---|
| 988 | + long ret = H_SUCCESS; |
---|
| 989 | + |
---|
| 990 | + /* Don't handle radix mode here, go up to the virtual mode handler */ |
---|
| 991 | + if (kvm_is_radix(kvm)) |
---|
| 992 | + return H_TOO_HARD; |
---|
| 993 | + |
---|
| 994 | + /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */ |
---|
| 995 | + if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE | |
---|
| 996 | + H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED)) |
---|
| 997 | + return H_PARAMETER; |
---|
| 998 | + |
---|
| 999 | + /* dest (and src if copy_page flag set) must be page aligned */ |
---|
| 1000 | + if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask))) |
---|
| 1001 | + return H_PARAMETER; |
---|
| 1002 | + |
---|
| 1003 | + /* zero and/or copy the page as determined by the flags */ |
---|
| 1004 | + if (flags & H_COPY_PAGE) |
---|
| 1005 | + ret = kvmppc_do_h_page_init_copy(vcpu, dest, src); |
---|
| 1006 | + else if (flags & H_ZERO_PAGE) |
---|
| 1007 | + ret = kvmppc_do_h_page_init_zero(vcpu, dest); |
---|
| 1008 | + |
---|
| 1009 | + /* We can ignore the other flags */ |
---|
| 1010 | + |
---|
| 1011 | + return ret; |
---|
| 1012 | +} |
---|
| 1013 | + |
---|
892 | 1014 | void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep, |
---|
893 | 1015 | unsigned long pte_index) |
---|
894 | 1016 | { |
---|
.. | .. |
---|
1118 | 1240 | status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */ |
---|
1119 | 1241 | if (!data) { |
---|
1120 | 1242 | if (gr & (HPTE_R_N | HPTE_R_G)) |
---|
1121 | | - return status | SRR1_ISI_N_OR_G; |
---|
| 1243 | + return status | SRR1_ISI_N_G_OR_CIP; |
---|
1122 | 1244 | if (!hpte_read_permission(pp, slb_v & key)) |
---|
1123 | 1245 | return status | SRR1_ISI_PROT; |
---|
1124 | 1246 | } else if (status & DSISR_ISSTORE) { |
---|