.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | | - * This program is free software; you can redistribute it and/or modify |
---|
3 | | - * it under the terms of the GNU General Public License, version 2, as |
---|
4 | | - * published by the Free Software Foundation. |
---|
5 | 3 | * |
---|
6 | 4 | * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
---|
7 | 5 | */ |
---|
.. | .. |
---|
10 | 8 | #include <linux/string.h> |
---|
11 | 9 | #include <linux/kvm.h> |
---|
12 | 10 | #include <linux/kvm_host.h> |
---|
| 11 | +#include <linux/anon_inodes.h> |
---|
| 12 | +#include <linux/file.h> |
---|
| 13 | +#include <linux/debugfs.h> |
---|
| 14 | +#include <linux/pgtable.h> |
---|
13 | 15 | |
---|
14 | 16 | #include <asm/kvm_ppc.h> |
---|
15 | 17 | #include <asm/kvm_book3s.h> |
---|
16 | 18 | #include <asm/page.h> |
---|
17 | 19 | #include <asm/mmu.h> |
---|
18 | | -#include <asm/pgtable.h> |
---|
19 | 20 | #include <asm/pgalloc.h> |
---|
20 | 21 | #include <asm/pte-walk.h> |
---|
| 22 | +#include <asm/ultravisor.h> |
---|
| 23 | +#include <asm/kvm_book3s_uvmem.h> |
---|
21 | 24 | |
---|
22 | 25 | /* |
---|
23 | 26 | * Supported radix tree geometry. |
---|
.. | .. |
---|
26 | 29 | */ |
---|
27 | 30 | static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 }; |
---|
28 | 31 | |
---|
| 32 | +unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, |
---|
| 33 | + gva_t eaddr, void *to, void *from, |
---|
| 34 | + unsigned long n) |
---|
| 35 | +{ |
---|
| 36 | + int old_pid, old_lpid; |
---|
| 37 | + unsigned long quadrant, ret = n; |
---|
| 38 | + bool is_load = !!to; |
---|
| 39 | + |
---|
| 40 | + /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */ |
---|
| 41 | + if (kvmhv_on_pseries()) |
---|
| 42 | + return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr, |
---|
| 43 | + (to != NULL) ? __pa(to): 0, |
---|
| 44 | + (from != NULL) ? __pa(from): 0, n); |
---|
| 45 | + |
---|
| 46 | + quadrant = 1; |
---|
| 47 | + if (!pid) |
---|
| 48 | + quadrant = 2; |
---|
| 49 | + if (is_load) |
---|
| 50 | + from = (void *) (eaddr | (quadrant << 62)); |
---|
| 51 | + else |
---|
| 52 | + to = (void *) (eaddr | (quadrant << 62)); |
---|
| 53 | + |
---|
| 54 | + preempt_disable(); |
---|
| 55 | + |
---|
| 56 | + /* switch the lpid first to avoid running host with unallocated pid */ |
---|
| 57 | + old_lpid = mfspr(SPRN_LPID); |
---|
| 58 | + if (old_lpid != lpid) |
---|
| 59 | + mtspr(SPRN_LPID, lpid); |
---|
| 60 | + if (quadrant == 1) { |
---|
| 61 | + old_pid = mfspr(SPRN_PID); |
---|
| 62 | + if (old_pid != pid) |
---|
| 63 | + mtspr(SPRN_PID, pid); |
---|
| 64 | + } |
---|
| 65 | + isync(); |
---|
| 66 | + |
---|
| 67 | + pagefault_disable(); |
---|
| 68 | + if (is_load) |
---|
| 69 | + ret = __copy_from_user_inatomic(to, (const void __user *)from, n); |
---|
| 70 | + else |
---|
| 71 | + ret = __copy_to_user_inatomic((void __user *)to, from, n); |
---|
| 72 | + pagefault_enable(); |
---|
| 73 | + |
---|
| 74 | + /* switch the pid first to avoid running host with unallocated pid */ |
---|
| 75 | + if (quadrant == 1 && pid != old_pid) |
---|
| 76 | + mtspr(SPRN_PID, old_pid); |
---|
| 77 | + if (lpid != old_lpid) |
---|
| 78 | + mtspr(SPRN_LPID, old_lpid); |
---|
| 79 | + isync(); |
---|
| 80 | + |
---|
| 81 | + preempt_enable(); |
---|
| 82 | + |
---|
| 83 | + return ret; |
---|
| 84 | +} |
---|
| 85 | +EXPORT_SYMBOL_GPL(__kvmhv_copy_tofrom_guest_radix); |
---|
| 86 | + |
---|
| 87 | +static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, |
---|
| 88 | + void *to, void *from, unsigned long n) |
---|
| 89 | +{ |
---|
| 90 | + int lpid = vcpu->kvm->arch.lpid; |
---|
| 91 | + int pid = vcpu->arch.pid; |
---|
| 92 | + |
---|
| 93 | + /* This would cause a data segment intr so don't allow the access */ |
---|
| 94 | + if (eaddr & (0x3FFUL << 52)) |
---|
| 95 | + return -EINVAL; |
---|
| 96 | + |
---|
| 97 | + /* Should we be using the nested lpid */ |
---|
| 98 | + if (vcpu->arch.nested) |
---|
| 99 | + lpid = vcpu->arch.nested->shadow_lpid; |
---|
| 100 | + |
---|
| 101 | + /* If accessing quadrant 3 then pid is expected to be 0 */ |
---|
| 102 | + if (((eaddr >> 62) & 0x3) == 0x3) |
---|
| 103 | + pid = 0; |
---|
| 104 | + |
---|
| 105 | + eaddr &= ~(0xFFFUL << 52); |
---|
| 106 | + |
---|
| 107 | + return __kvmhv_copy_tofrom_guest_radix(lpid, pid, eaddr, to, from, n); |
---|
| 108 | +} |
---|
| 109 | + |
---|
| 110 | +long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to, |
---|
| 111 | + unsigned long n) |
---|
| 112 | +{ |
---|
| 113 | + long ret; |
---|
| 114 | + |
---|
| 115 | + ret = kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, to, NULL, n); |
---|
| 116 | + if (ret > 0) |
---|
| 117 | + memset(to + (n - ret), 0, ret); |
---|
| 118 | + |
---|
| 119 | + return ret; |
---|
| 120 | +} |
---|
| 121 | +EXPORT_SYMBOL_GPL(kvmhv_copy_from_guest_radix); |
---|
| 122 | + |
---|
| 123 | +long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from, |
---|
| 124 | + unsigned long n) |
---|
| 125 | +{ |
---|
| 126 | + return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n); |
---|
| 127 | +} |
---|
| 128 | +EXPORT_SYMBOL_GPL(kvmhv_copy_to_guest_radix); |
---|
| 129 | + |
---|
| 130 | +int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, |
---|
| 131 | + struct kvmppc_pte *gpte, u64 root, |
---|
| 132 | + u64 *pte_ret_p) |
---|
| 133 | +{ |
---|
| 134 | + struct kvm *kvm = vcpu->kvm; |
---|
| 135 | + int ret, level, ps; |
---|
| 136 | + unsigned long rts, bits, offset, index; |
---|
| 137 | + u64 pte, base, gpa; |
---|
| 138 | + __be64 rpte; |
---|
| 139 | + |
---|
| 140 | + rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) | |
---|
| 141 | + ((root & RTS2_MASK) >> RTS2_SHIFT); |
---|
| 142 | + bits = root & RPDS_MASK; |
---|
| 143 | + base = root & RPDB_MASK; |
---|
| 144 | + |
---|
| 145 | + offset = rts + 31; |
---|
| 146 | + |
---|
| 147 | + /* Current implementations only support 52-bit space */ |
---|
| 148 | + if (offset != 52) |
---|
| 149 | + return -EINVAL; |
---|
| 150 | + |
---|
| 151 | + /* Walk each level of the radix tree */ |
---|
| 152 | + for (level = 3; level >= 0; --level) { |
---|
| 153 | + u64 addr; |
---|
| 154 | + /* Check a valid size */ |
---|
| 155 | + if (level && bits != p9_supported_radix_bits[level]) |
---|
| 156 | + return -EINVAL; |
---|
| 157 | + if (level == 0 && !(bits == 5 || bits == 9)) |
---|
| 158 | + return -EINVAL; |
---|
| 159 | + offset -= bits; |
---|
| 160 | + index = (eaddr >> offset) & ((1UL << bits) - 1); |
---|
| 161 | + /* Check that low bits of page table base are zero */ |
---|
| 162 | + if (base & ((1UL << (bits + 3)) - 1)) |
---|
| 163 | + return -EINVAL; |
---|
| 164 | + /* Read the entry from guest memory */ |
---|
| 165 | + addr = base + (index * sizeof(rpte)); |
---|
| 166 | + vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); |
---|
| 167 | + ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte)); |
---|
| 168 | + srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); |
---|
| 169 | + if (ret) { |
---|
| 170 | + if (pte_ret_p) |
---|
| 171 | + *pte_ret_p = addr; |
---|
| 172 | + return ret; |
---|
| 173 | + } |
---|
| 174 | + pte = __be64_to_cpu(rpte); |
---|
| 175 | + if (!(pte & _PAGE_PRESENT)) |
---|
| 176 | + return -ENOENT; |
---|
| 177 | + /* Check if a leaf entry */ |
---|
| 178 | + if (pte & _PAGE_PTE) |
---|
| 179 | + break; |
---|
| 180 | + /* Get ready to walk the next level */ |
---|
| 181 | + base = pte & RPDB_MASK; |
---|
| 182 | + bits = pte & RPDS_MASK; |
---|
| 183 | + } |
---|
| 184 | + |
---|
| 185 | + /* Need a leaf at lowest level; 512GB pages not supported */ |
---|
| 186 | + if (level < 0 || level == 3) |
---|
| 187 | + return -EINVAL; |
---|
| 188 | + |
---|
| 189 | + /* We found a valid leaf PTE */ |
---|
| 190 | + /* Offset is now log base 2 of the page size */ |
---|
| 191 | + gpa = pte & 0x01fffffffffff000ul; |
---|
| 192 | + if (gpa & ((1ul << offset) - 1)) |
---|
| 193 | + return -EINVAL; |
---|
| 194 | + gpa |= eaddr & ((1ul << offset) - 1); |
---|
| 195 | + for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps) |
---|
| 196 | + if (offset == mmu_psize_defs[ps].shift) |
---|
| 197 | + break; |
---|
| 198 | + gpte->page_size = ps; |
---|
| 199 | + gpte->page_shift = offset; |
---|
| 200 | + |
---|
| 201 | + gpte->eaddr = eaddr; |
---|
| 202 | + gpte->raddr = gpa; |
---|
| 203 | + |
---|
| 204 | + /* Work out permissions */ |
---|
| 205 | + gpte->may_read = !!(pte & _PAGE_READ); |
---|
| 206 | + gpte->may_write = !!(pte & _PAGE_WRITE); |
---|
| 207 | + gpte->may_execute = !!(pte & _PAGE_EXEC); |
---|
| 208 | + |
---|
| 209 | + gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY); |
---|
| 210 | + |
---|
| 211 | + if (pte_ret_p) |
---|
| 212 | + *pte_ret_p = pte; |
---|
| 213 | + |
---|
| 214 | + return 0; |
---|
| 215 | +} |
---|
| 216 | + |
---|
| 217 | +/* |
---|
| 218 | + * Used to walk a partition or process table radix tree in guest memory |
---|
| 219 | + * Note: We exploit the fact that a partition table and a process |
---|
| 220 | + * table have the same layout, a partition-scoped page table and a |
---|
| 221 | + * process-scoped page table have the same layout, and the 2nd |
---|
| 222 | + * doubleword of a partition table entry has the same layout as |
---|
| 223 | + * the PTCR register. |
---|
| 224 | + */ |
---|
| 225 | +int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, |
---|
| 226 | + struct kvmppc_pte *gpte, u64 table, |
---|
| 227 | + int table_index, u64 *pte_ret_p) |
---|
| 228 | +{ |
---|
| 229 | + struct kvm *kvm = vcpu->kvm; |
---|
| 230 | + int ret; |
---|
| 231 | + unsigned long size, ptbl, root; |
---|
| 232 | + struct prtb_entry entry; |
---|
| 233 | + |
---|
| 234 | + if ((table & PRTS_MASK) > 24) |
---|
| 235 | + return -EINVAL; |
---|
| 236 | + size = 1ul << ((table & PRTS_MASK) + 12); |
---|
| 237 | + |
---|
| 238 | + /* Is the table big enough to contain this entry? */ |
---|
| 239 | + if ((table_index * sizeof(entry)) >= size) |
---|
| 240 | + return -EINVAL; |
---|
| 241 | + |
---|
| 242 | + /* Read the table to find the root of the radix tree */ |
---|
| 243 | + ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry)); |
---|
| 244 | + vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); |
---|
| 245 | + ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry)); |
---|
| 246 | + srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); |
---|
| 247 | + if (ret) |
---|
| 248 | + return ret; |
---|
| 249 | + |
---|
| 250 | + /* Root is stored in the first double word */ |
---|
| 251 | + root = be64_to_cpu(entry.prtb0); |
---|
| 252 | + |
---|
| 253 | + return kvmppc_mmu_walk_radix_tree(vcpu, eaddr, gpte, root, pte_ret_p); |
---|
| 254 | +} |
---|
| 255 | + |
---|
29 | 256 | int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, |
---|
30 | 257 | struct kvmppc_pte *gpte, bool data, bool iswrite) |
---|
31 | 258 | { |
---|
32 | | - struct kvm *kvm = vcpu->kvm; |
---|
33 | 259 | u32 pid; |
---|
34 | | - int ret, level, ps; |
---|
35 | | - __be64 prte, rpte; |
---|
36 | | - unsigned long ptbl; |
---|
37 | | - unsigned long root, pte, index; |
---|
38 | | - unsigned long rts, bits, offset; |
---|
39 | | - unsigned long gpa; |
---|
40 | | - unsigned long proc_tbl_size; |
---|
| 260 | + u64 pte; |
---|
| 261 | + int ret; |
---|
41 | 262 | |
---|
42 | 263 | /* Work out effective PID */ |
---|
43 | 264 | switch (eaddr >> 62) { |
---|
.. | .. |
---|
50 | 271 | default: |
---|
51 | 272 | return -EINVAL; |
---|
52 | 273 | } |
---|
53 | | - proc_tbl_size = 1 << ((kvm->arch.process_table & PRTS_MASK) + 12); |
---|
54 | | - if (pid * 16 >= proc_tbl_size) |
---|
55 | | - return -EINVAL; |
---|
56 | 274 | |
---|
57 | | - /* Read partition table to find root of tree for effective PID */ |
---|
58 | | - ptbl = (kvm->arch.process_table & PRTB_MASK) + (pid * 16); |
---|
59 | | - ret = kvm_read_guest(kvm, ptbl, &prte, sizeof(prte)); |
---|
| 275 | + ret = kvmppc_mmu_radix_translate_table(vcpu, eaddr, gpte, |
---|
| 276 | + vcpu->kvm->arch.process_table, pid, &pte); |
---|
60 | 277 | if (ret) |
---|
61 | 278 | return ret; |
---|
62 | 279 | |
---|
63 | | - root = be64_to_cpu(prte); |
---|
64 | | - rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) | |
---|
65 | | - ((root & RTS2_MASK) >> RTS2_SHIFT); |
---|
66 | | - bits = root & RPDS_MASK; |
---|
67 | | - root = root & RPDB_MASK; |
---|
68 | | - |
---|
69 | | - offset = rts + 31; |
---|
70 | | - |
---|
71 | | - /* current implementations only support 52-bit space */ |
---|
72 | | - if (offset != 52) |
---|
73 | | - return -EINVAL; |
---|
74 | | - |
---|
75 | | - for (level = 3; level >= 0; --level) { |
---|
76 | | - if (level && bits != p9_supported_radix_bits[level]) |
---|
77 | | - return -EINVAL; |
---|
78 | | - if (level == 0 && !(bits == 5 || bits == 9)) |
---|
79 | | - return -EINVAL; |
---|
80 | | - offset -= bits; |
---|
81 | | - index = (eaddr >> offset) & ((1UL << bits) - 1); |
---|
82 | | - /* check that low bits of page table base are zero */ |
---|
83 | | - if (root & ((1UL << (bits + 3)) - 1)) |
---|
84 | | - return -EINVAL; |
---|
85 | | - ret = kvm_read_guest(kvm, root + index * 8, |
---|
86 | | - &rpte, sizeof(rpte)); |
---|
87 | | - if (ret) |
---|
88 | | - return ret; |
---|
89 | | - pte = __be64_to_cpu(rpte); |
---|
90 | | - if (!(pte & _PAGE_PRESENT)) |
---|
91 | | - return -ENOENT; |
---|
92 | | - if (pte & _PAGE_PTE) |
---|
93 | | - break; |
---|
94 | | - bits = pte & 0x1f; |
---|
95 | | - root = pte & 0x0fffffffffffff00ul; |
---|
96 | | - } |
---|
97 | | - /* need a leaf at lowest level; 512GB pages not supported */ |
---|
98 | | - if (level < 0 || level == 3) |
---|
99 | | - return -EINVAL; |
---|
100 | | - |
---|
101 | | - /* offset is now log base 2 of the page size */ |
---|
102 | | - gpa = pte & 0x01fffffffffff000ul; |
---|
103 | | - if (gpa & ((1ul << offset) - 1)) |
---|
104 | | - return -EINVAL; |
---|
105 | | - gpa += eaddr & ((1ul << offset) - 1); |
---|
106 | | - for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps) |
---|
107 | | - if (offset == mmu_psize_defs[ps].shift) |
---|
108 | | - break; |
---|
109 | | - gpte->page_size = ps; |
---|
110 | | - |
---|
111 | | - gpte->eaddr = eaddr; |
---|
112 | | - gpte->raddr = gpa; |
---|
113 | | - |
---|
114 | | - /* Work out permissions */ |
---|
115 | | - gpte->may_read = !!(pte & _PAGE_READ); |
---|
116 | | - gpte->may_write = !!(pte & _PAGE_WRITE); |
---|
117 | | - gpte->may_execute = !!(pte & _PAGE_EXEC); |
---|
| 280 | + /* Check privilege (applies only to process scoped translations) */ |
---|
118 | 281 | if (kvmppc_get_msr(vcpu) & MSR_PR) { |
---|
119 | 282 | if (pte & _PAGE_PRIVILEGED) { |
---|
120 | 283 | gpte->may_read = 0; |
---|
.. | .. |
---|
136 | 299 | return 0; |
---|
137 | 300 | } |
---|
138 | 301 | |
---|
139 | | -static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, |
---|
140 | | - unsigned int pshift) |
---|
| 302 | +void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, |
---|
| 303 | + unsigned int pshift, unsigned int lpid) |
---|
141 | 304 | { |
---|
142 | 305 | unsigned long psize = PAGE_SIZE; |
---|
| 306 | + int psi; |
---|
| 307 | + long rc; |
---|
| 308 | + unsigned long rb; |
---|
143 | 309 | |
---|
144 | 310 | if (pshift) |
---|
145 | 311 | psize = 1UL << pshift; |
---|
| 312 | + else |
---|
| 313 | + pshift = PAGE_SHIFT; |
---|
146 | 314 | |
---|
147 | 315 | addr &= ~(psize - 1); |
---|
148 | | - radix__flush_tlb_lpid_page(kvm->arch.lpid, addr, psize); |
---|
| 316 | + |
---|
| 317 | + if (!kvmhv_on_pseries()) { |
---|
| 318 | + radix__flush_tlb_lpid_page(lpid, addr, psize); |
---|
| 319 | + return; |
---|
| 320 | + } |
---|
| 321 | + |
---|
| 322 | + psi = shift_to_mmu_psize(pshift); |
---|
| 323 | + rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58)); |
---|
| 324 | + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1), |
---|
| 325 | + lpid, rb); |
---|
| 326 | + if (rc) |
---|
| 327 | + pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc); |
---|
149 | 328 | } |
---|
150 | 329 | |
---|
151 | | -static void kvmppc_radix_flush_pwc(struct kvm *kvm) |
---|
| 330 | +static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid) |
---|
152 | 331 | { |
---|
153 | | - radix__flush_pwc_lpid(kvm->arch.lpid); |
---|
| 332 | + long rc; |
---|
| 333 | + |
---|
| 334 | + if (!kvmhv_on_pseries()) { |
---|
| 335 | + radix__flush_pwc_lpid(lpid); |
---|
| 336 | + return; |
---|
| 337 | + } |
---|
| 338 | + |
---|
| 339 | + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1), |
---|
| 340 | + lpid, TLBIEL_INVAL_SET_LPID); |
---|
| 341 | + if (rc) |
---|
| 342 | + pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc); |
---|
154 | 343 | } |
---|
155 | 344 | |
---|
156 | 345 | static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, |
---|
.. | .. |
---|
160 | 349 | return __radix_pte_update(ptep, clr, set); |
---|
161 | 350 | } |
---|
162 | 351 | |
---|
163 | | -void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr, |
---|
| 352 | +static void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr, |
---|
164 | 353 | pte_t *ptep, pte_t pte) |
---|
165 | 354 | { |
---|
166 | 355 | radix__set_pte_at(kvm->mm, addr, ptep, pte, 0); |
---|
.. | .. |
---|
185 | 374 | kmem_cache_free(kvm_pte_cache, ptep); |
---|
186 | 375 | } |
---|
187 | 376 | |
---|
188 | | -/* Like pmd_huge() and pmd_large(), but works regardless of config options */ |
---|
189 | | -static inline int pmd_is_leaf(pmd_t pmd) |
---|
190 | | -{ |
---|
191 | | - return !!(pmd_val(pmd) & _PAGE_PTE); |
---|
192 | | -} |
---|
193 | | - |
---|
194 | 377 | static pmd_t *kvmppc_pmd_alloc(void) |
---|
195 | 378 | { |
---|
196 | 379 | pmd_t *pmd; |
---|
.. | .. |
---|
207 | 390 | kmem_cache_free(kvm_pmd_cache, pmdp); |
---|
208 | 391 | } |
---|
209 | 392 | |
---|
210 | | -static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, |
---|
211 | | - unsigned long gpa, unsigned int shift) |
---|
| 393 | +/* Called with kvm->mmu_lock held */ |
---|
| 394 | +void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, |
---|
| 395 | + unsigned int shift, |
---|
| 396 | + const struct kvm_memory_slot *memslot, |
---|
| 397 | + unsigned int lpid) |
---|
212 | 398 | |
---|
213 | 399 | { |
---|
214 | | - unsigned long page_size = 1ul << shift; |
---|
215 | 400 | unsigned long old; |
---|
| 401 | + unsigned long gfn = gpa >> PAGE_SHIFT; |
---|
| 402 | + unsigned long page_size = PAGE_SIZE; |
---|
| 403 | + unsigned long hpa; |
---|
216 | 404 | |
---|
217 | 405 | old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); |
---|
218 | | - kvmppc_radix_tlbie_page(kvm, gpa, shift); |
---|
219 | | - if (old & _PAGE_DIRTY) { |
---|
220 | | - unsigned long gfn = gpa >> PAGE_SHIFT; |
---|
221 | | - struct kvm_memory_slot *memslot; |
---|
| 406 | + kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); |
---|
222 | 407 | |
---|
| 408 | + /* The following only applies to L1 entries */ |
---|
| 409 | + if (lpid != kvm->arch.lpid) |
---|
| 410 | + return; |
---|
| 411 | + |
---|
| 412 | + if (!memslot) { |
---|
223 | 413 | memslot = gfn_to_memslot(kvm, gfn); |
---|
224 | | - if (memslot && memslot->dirty_bitmap) |
---|
225 | | - kvmppc_update_dirty_map(memslot, gfn, page_size); |
---|
| 414 | + if (!memslot) |
---|
| 415 | + return; |
---|
226 | 416 | } |
---|
| 417 | + if (shift) { /* 1GB or 2MB page */ |
---|
| 418 | + page_size = 1ul << shift; |
---|
| 419 | + if (shift == PMD_SHIFT) |
---|
| 420 | + kvm->stat.num_2M_pages--; |
---|
| 421 | + else if (shift == PUD_SHIFT) |
---|
| 422 | + kvm->stat.num_1G_pages--; |
---|
| 423 | + } |
---|
| 424 | + |
---|
| 425 | + gpa &= ~(page_size - 1); |
---|
| 426 | + hpa = old & PTE_RPN_MASK; |
---|
| 427 | + kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size); |
---|
| 428 | + |
---|
| 429 | + if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) |
---|
| 430 | + kvmppc_update_dirty_map(memslot, gfn, page_size); |
---|
227 | 431 | } |
---|
228 | 432 | |
---|
229 | 433 | /* |
---|
.. | .. |
---|
232 | 436 | * Callers are responsible for flushing the PWC. |
---|
233 | 437 | * |
---|
234 | 438 | * When page tables are being unmapped/freed as part of page fault path |
---|
235 | | - * (full == false), ptes are not expected. There is code to unmap them |
---|
236 | | - * and emit a warning if encountered, but there may already be data |
---|
237 | | - * corruption due to the unexpected mappings. |
---|
| 439 | + * (full == false), valid ptes are generally not expected; however, there |
---|
| 440 | + * is one situation where they arise, which is when dirty page logging is |
---|
| 441 | + * turned off for a memslot while the VM is running. The new memslot |
---|
| 442 | + * becomes visible to page faults before the memslot commit function |
---|
| 443 | + * gets to flush the memslot, which can lead to a 2MB page mapping being |
---|
| 444 | + * installed for a guest physical address where there are already 64kB |
---|
| 445 | + * (or 4kB) mappings (of sub-pages of the same 2MB page). |
---|
238 | 446 | */ |
---|
239 | | -static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full) |
---|
| 447 | +static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, |
---|
| 448 | + unsigned int lpid) |
---|
240 | 449 | { |
---|
241 | 450 | if (full) { |
---|
242 | | - memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE); |
---|
| 451 | + memset(pte, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE); |
---|
243 | 452 | } else { |
---|
244 | 453 | pte_t *p = pte; |
---|
245 | 454 | unsigned long it; |
---|
.. | .. |
---|
247 | 456 | for (it = 0; it < PTRS_PER_PTE; ++it, ++p) { |
---|
248 | 457 | if (pte_val(*p) == 0) |
---|
249 | 458 | continue; |
---|
250 | | - WARN_ON_ONCE(1); |
---|
251 | 459 | kvmppc_unmap_pte(kvm, p, |
---|
252 | 460 | pte_pfn(*p) << PAGE_SHIFT, |
---|
253 | | - PAGE_SHIFT); |
---|
| 461 | + PAGE_SHIFT, NULL, lpid); |
---|
254 | 462 | } |
---|
255 | 463 | } |
---|
256 | 464 | |
---|
257 | 465 | kvmppc_pte_free(pte); |
---|
258 | 466 | } |
---|
259 | 467 | |
---|
260 | | -static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full) |
---|
| 468 | +static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, |
---|
| 469 | + unsigned int lpid) |
---|
261 | 470 | { |
---|
262 | 471 | unsigned long im; |
---|
263 | 472 | pmd_t *p = pmd; |
---|
.. | .. |
---|
272 | 481 | WARN_ON_ONCE(1); |
---|
273 | 482 | kvmppc_unmap_pte(kvm, (pte_t *)p, |
---|
274 | 483 | pte_pfn(*(pte_t *)p) << PAGE_SHIFT, |
---|
275 | | - PMD_SHIFT); |
---|
| 484 | + PMD_SHIFT, NULL, lpid); |
---|
276 | 485 | } |
---|
277 | 486 | } else { |
---|
278 | 487 | pte_t *pte; |
---|
279 | 488 | |
---|
280 | 489 | pte = pte_offset_map(p, 0); |
---|
281 | | - kvmppc_unmap_free_pte(kvm, pte, full); |
---|
| 490 | + kvmppc_unmap_free_pte(kvm, pte, full, lpid); |
---|
282 | 491 | pmd_clear(p); |
---|
283 | 492 | } |
---|
284 | 493 | } |
---|
285 | 494 | kvmppc_pmd_free(pmd); |
---|
286 | 495 | } |
---|
287 | 496 | |
---|
288 | | -static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud) |
---|
| 497 | +static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, |
---|
| 498 | + unsigned int lpid) |
---|
289 | 499 | { |
---|
290 | 500 | unsigned long iu; |
---|
291 | 501 | pud_t *p = pud; |
---|
.. | .. |
---|
293 | 503 | for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) { |
---|
294 | 504 | if (!pud_present(*p)) |
---|
295 | 505 | continue; |
---|
296 | | - if (pud_huge(*p)) { |
---|
| 506 | + if (pud_is_leaf(*p)) { |
---|
297 | 507 | pud_clear(p); |
---|
298 | 508 | } else { |
---|
299 | 509 | pmd_t *pmd; |
---|
300 | 510 | |
---|
301 | 511 | pmd = pmd_offset(p, 0); |
---|
302 | | - kvmppc_unmap_free_pmd(kvm, pmd, true); |
---|
| 512 | + kvmppc_unmap_free_pmd(kvm, pmd, true, lpid); |
---|
303 | 513 | pud_clear(p); |
---|
304 | 514 | } |
---|
305 | 515 | } |
---|
306 | 516 | pud_free(kvm->mm, pud); |
---|
307 | 517 | } |
---|
308 | 518 | |
---|
309 | | -void kvmppc_free_radix(struct kvm *kvm) |
---|
| 519 | +void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid) |
---|
310 | 520 | { |
---|
311 | 521 | unsigned long ig; |
---|
312 | | - pgd_t *pgd; |
---|
313 | 522 | |
---|
314 | | - if (!kvm->arch.pgtable) |
---|
315 | | - return; |
---|
316 | | - pgd = kvm->arch.pgtable; |
---|
317 | 523 | for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) { |
---|
| 524 | + p4d_t *p4d = p4d_offset(pgd, 0); |
---|
318 | 525 | pud_t *pud; |
---|
319 | 526 | |
---|
320 | | - if (!pgd_present(*pgd)) |
---|
| 527 | + if (!p4d_present(*p4d)) |
---|
321 | 528 | continue; |
---|
322 | | - pud = pud_offset(pgd, 0); |
---|
323 | | - kvmppc_unmap_free_pud(kvm, pud); |
---|
324 | | - pgd_clear(pgd); |
---|
| 529 | + pud = pud_offset(p4d, 0); |
---|
| 530 | + kvmppc_unmap_free_pud(kvm, pud, lpid); |
---|
| 531 | + p4d_clear(p4d); |
---|
325 | 532 | } |
---|
326 | | - pgd_free(kvm->mm, kvm->arch.pgtable); |
---|
327 | | - kvm->arch.pgtable = NULL; |
---|
| 533 | +} |
---|
| 534 | + |
---|
| 535 | +void kvmppc_free_radix(struct kvm *kvm) |
---|
| 536 | +{ |
---|
| 537 | + if (kvm->arch.pgtable) { |
---|
| 538 | + kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable, |
---|
| 539 | + kvm->arch.lpid); |
---|
| 540 | + pgd_free(kvm->mm, kvm->arch.pgtable); |
---|
| 541 | + kvm->arch.pgtable = NULL; |
---|
| 542 | + } |
---|
328 | 543 | } |
---|
329 | 544 | |
---|
330 | 545 | static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, |
---|
331 | | - unsigned long gpa) |
---|
| 546 | + unsigned long gpa, unsigned int lpid) |
---|
332 | 547 | { |
---|
333 | 548 | pte_t *pte = pte_offset_kernel(pmd, 0); |
---|
334 | 549 | |
---|
.. | .. |
---|
338 | 553 | * flushing the PWC again. |
---|
339 | 554 | */ |
---|
340 | 555 | pmd_clear(pmd); |
---|
341 | | - kvmppc_radix_flush_pwc(kvm); |
---|
| 556 | + kvmppc_radix_flush_pwc(kvm, lpid); |
---|
342 | 557 | |
---|
343 | | - kvmppc_unmap_free_pte(kvm, pte, false); |
---|
| 558 | + kvmppc_unmap_free_pte(kvm, pte, false, lpid); |
---|
344 | 559 | } |
---|
345 | 560 | |
---|
346 | 561 | static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, |
---|
347 | | - unsigned long gpa) |
---|
| 562 | + unsigned long gpa, unsigned int lpid) |
---|
348 | 563 | { |
---|
349 | 564 | pmd_t *pmd = pmd_offset(pud, 0); |
---|
350 | 565 | |
---|
.. | .. |
---|
354 | 569 | * so can be freed without flushing the PWC again. |
---|
355 | 570 | */ |
---|
356 | 571 | pud_clear(pud); |
---|
357 | | - kvmppc_radix_flush_pwc(kvm); |
---|
| 572 | + kvmppc_radix_flush_pwc(kvm, lpid); |
---|
358 | 573 | |
---|
359 | | - kvmppc_unmap_free_pmd(kvm, pmd, false); |
---|
| 574 | + kvmppc_unmap_free_pmd(kvm, pmd, false, lpid); |
---|
360 | 575 | } |
---|
361 | 576 | |
---|
362 | 577 | /* |
---|
.. | .. |
---|
368 | 583 | */ |
---|
369 | 584 | #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)) |
---|
370 | 585 | |
---|
371 | | -static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa, |
---|
372 | | - unsigned int level, unsigned long mmu_seq) |
---|
| 586 | +int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, |
---|
| 587 | + unsigned long gpa, unsigned int level, |
---|
| 588 | + unsigned long mmu_seq, unsigned int lpid, |
---|
| 589 | + unsigned long *rmapp, struct rmap_nested **n_rmap) |
---|
373 | 590 | { |
---|
374 | 591 | pgd_t *pgd; |
---|
| 592 | + p4d_t *p4d; |
---|
375 | 593 | pud_t *pud, *new_pud = NULL; |
---|
376 | 594 | pmd_t *pmd, *new_pmd = NULL; |
---|
377 | 595 | pte_t *ptep, *new_ptep = NULL; |
---|
378 | 596 | int ret; |
---|
379 | 597 | |
---|
380 | 598 | /* Traverse the guest's 2nd-level tree, allocate new levels needed */ |
---|
381 | | - pgd = kvm->arch.pgtable + pgd_index(gpa); |
---|
| 599 | + pgd = pgtable + pgd_index(gpa); |
---|
| 600 | + p4d = p4d_offset(pgd, gpa); |
---|
| 601 | + |
---|
382 | 602 | pud = NULL; |
---|
383 | | - if (pgd_present(*pgd)) |
---|
384 | | - pud = pud_offset(pgd, gpa); |
---|
| 603 | + if (p4d_present(*p4d)) |
---|
| 604 | + pud = pud_offset(p4d, gpa); |
---|
385 | 605 | else |
---|
386 | 606 | new_pud = pud_alloc_one(kvm->mm, gpa); |
---|
387 | 607 | |
---|
388 | 608 | pmd = NULL; |
---|
389 | | - if (pud && pud_present(*pud) && !pud_huge(*pud)) |
---|
| 609 | + if (pud && pud_present(*pud) && !pud_is_leaf(*pud)) |
---|
390 | 610 | pmd = pmd_offset(pud, gpa); |
---|
391 | 611 | else if (level <= 1) |
---|
392 | 612 | new_pmd = kvmppc_pmd_alloc(); |
---|
.. | .. |
---|
402 | 622 | |
---|
403 | 623 | /* Now traverse again under the lock and change the tree */ |
---|
404 | 624 | ret = -ENOMEM; |
---|
405 | | - if (pgd_none(*pgd)) { |
---|
| 625 | + if (p4d_none(*p4d)) { |
---|
406 | 626 | if (!new_pud) |
---|
407 | 627 | goto out_unlock; |
---|
408 | | - pgd_populate(kvm->mm, pgd, new_pud); |
---|
| 628 | + p4d_populate(kvm->mm, p4d, new_pud); |
---|
409 | 629 | new_pud = NULL; |
---|
410 | 630 | } |
---|
411 | | - pud = pud_offset(pgd, gpa); |
---|
412 | | - if (pud_huge(*pud)) { |
---|
| 631 | + pud = pud_offset(p4d, gpa); |
---|
| 632 | + if (pud_is_leaf(*pud)) { |
---|
413 | 633 | unsigned long hgpa = gpa & PUD_MASK; |
---|
414 | 634 | |
---|
415 | 635 | /* Check if we raced and someone else has set the same thing */ |
---|
.. | .. |
---|
435 | 655 | goto out_unlock; |
---|
436 | 656 | } |
---|
437 | 657 | /* Valid 1GB page here already, remove it */ |
---|
438 | | - kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT); |
---|
| 658 | + kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL, |
---|
| 659 | + lpid); |
---|
439 | 660 | } |
---|
440 | 661 | if (level == 2) { |
---|
441 | 662 | if (!pud_none(*pud)) { |
---|
.. | .. |
---|
444 | 665 | * install a large page, so remove and free the page |
---|
445 | 666 | * table page. |
---|
446 | 667 | */ |
---|
447 | | - kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa); |
---|
| 668 | + kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid); |
---|
448 | 669 | } |
---|
449 | 670 | kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte); |
---|
| 671 | + if (rmapp && n_rmap) |
---|
| 672 | + kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); |
---|
450 | 673 | ret = 0; |
---|
451 | 674 | goto out_unlock; |
---|
452 | 675 | } |
---|
.. | .. |
---|
470 | 693 | WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) & |
---|
471 | 694 | PTE_BITS_MUST_MATCH); |
---|
472 | 695 | kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd), |
---|
473 | | - 0, pte_val(pte), lgpa, PMD_SHIFT); |
---|
| 696 | + 0, pte_val(pte), lgpa, PMD_SHIFT); |
---|
474 | 697 | ret = 0; |
---|
475 | 698 | goto out_unlock; |
---|
476 | 699 | } |
---|
.. | .. |
---|
484 | 707 | goto out_unlock; |
---|
485 | 708 | } |
---|
486 | 709 | /* Valid 2MB page here already, remove it */ |
---|
487 | | - kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT); |
---|
| 710 | + kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL, |
---|
| 711 | + lpid); |
---|
488 | 712 | } |
---|
489 | 713 | if (level == 1) { |
---|
490 | 714 | if (!pmd_none(*pmd)) { |
---|
.. | .. |
---|
493 | 717 | * install a large page, so remove and free the page |
---|
494 | 718 | * table page. |
---|
495 | 719 | */ |
---|
496 | | - kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa); |
---|
| 720 | + kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid); |
---|
497 | 721 | } |
---|
498 | 722 | kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte); |
---|
| 723 | + if (rmapp && n_rmap) |
---|
| 724 | + kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); |
---|
499 | 725 | ret = 0; |
---|
500 | 726 | goto out_unlock; |
---|
501 | 727 | } |
---|
.. | .. |
---|
520 | 746 | goto out_unlock; |
---|
521 | 747 | } |
---|
522 | 748 | kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte); |
---|
| 749 | + if (rmapp && n_rmap) |
---|
| 750 | + kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); |
---|
523 | 751 | ret = 0; |
---|
524 | 752 | |
---|
525 | 753 | out_unlock: |
---|
.. | .. |
---|
533 | 761 | return ret; |
---|
534 | 762 | } |
---|
535 | 763 | |
---|
536 | | -int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, |
---|
537 | | - unsigned long ea, unsigned long dsisr) |
---|
| 764 | +bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing, |
---|
| 765 | + unsigned long gpa, unsigned int lpid) |
---|
| 766 | +{ |
---|
| 767 | + unsigned long pgflags; |
---|
| 768 | + unsigned int shift; |
---|
| 769 | + pte_t *ptep; |
---|
| 770 | + |
---|
| 771 | + /* |
---|
| 772 | + * Need to set an R or C bit in the 2nd-level tables; |
---|
| 773 | + * since we are just helping out the hardware here, |
---|
| 774 | + * it is sufficient to do what the hardware does. |
---|
| 775 | + */ |
---|
| 776 | + pgflags = _PAGE_ACCESSED; |
---|
| 777 | + if (writing) |
---|
| 778 | + pgflags |= _PAGE_DIRTY; |
---|
| 779 | + |
---|
| 780 | + if (nested) |
---|
| 781 | + ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift); |
---|
| 782 | + else |
---|
| 783 | + ptep = find_kvm_secondary_pte(kvm, gpa, &shift); |
---|
| 784 | + |
---|
| 785 | + if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) { |
---|
| 786 | + kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift); |
---|
| 787 | + return true; |
---|
| 788 | + } |
---|
| 789 | + return false; |
---|
| 790 | +} |
---|
| 791 | + |
---|
| 792 | +int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, |
---|
| 793 | + unsigned long gpa, |
---|
| 794 | + struct kvm_memory_slot *memslot, |
---|
| 795 | + bool writing, bool kvm_ro, |
---|
| 796 | + pte_t *inserted_pte, unsigned int *levelp) |
---|
538 | 797 | { |
---|
539 | 798 | struct kvm *kvm = vcpu->kvm; |
---|
540 | | - unsigned long mmu_seq; |
---|
541 | | - unsigned long gpa, gfn, hva; |
---|
542 | | - struct kvm_memory_slot *memslot; |
---|
543 | 799 | struct page *page = NULL; |
---|
544 | | - long ret; |
---|
545 | | - bool writing; |
---|
| 800 | + unsigned long mmu_seq; |
---|
| 801 | + unsigned long hva, gfn = gpa >> PAGE_SHIFT; |
---|
546 | 802 | bool upgrade_write = false; |
---|
547 | 803 | bool *upgrade_p = &upgrade_write; |
---|
548 | 804 | pte_t pte, *ptep; |
---|
549 | | - unsigned long pgflags; |
---|
550 | 805 | unsigned int shift, level; |
---|
551 | | - |
---|
552 | | - /* Check for unusual errors */ |
---|
553 | | - if (dsisr & DSISR_UNSUPP_MMU) { |
---|
554 | | - pr_err("KVM: Got unsupported MMU fault\n"); |
---|
555 | | - return -EFAULT; |
---|
556 | | - } |
---|
557 | | - if (dsisr & DSISR_BADACCESS) { |
---|
558 | | - /* Reflect to the guest as DSI */ |
---|
559 | | - pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr); |
---|
560 | | - kvmppc_core_queue_data_storage(vcpu, ea, dsisr); |
---|
561 | | - return RESUME_GUEST; |
---|
562 | | - } |
---|
563 | | - |
---|
564 | | - /* Translate the logical address and get the page */ |
---|
565 | | - gpa = vcpu->arch.fault_gpa & ~0xfffUL; |
---|
566 | | - gpa &= ~0xF000000000000000ul; |
---|
567 | | - gfn = gpa >> PAGE_SHIFT; |
---|
568 | | - if (!(dsisr & DSISR_PRTABLE_FAULT)) |
---|
569 | | - gpa |= ea & 0xfff; |
---|
570 | | - memslot = gfn_to_memslot(kvm, gfn); |
---|
571 | | - |
---|
572 | | - /* No memslot means it's an emulated MMIO region */ |
---|
573 | | - if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { |
---|
574 | | - if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS | |
---|
575 | | - DSISR_SET_RC)) { |
---|
576 | | - /* |
---|
577 | | - * Bad address in guest page table tree, or other |
---|
578 | | - * unusual error - reflect it to the guest as DSI. |
---|
579 | | - */ |
---|
580 | | - kvmppc_core_queue_data_storage(vcpu, ea, dsisr); |
---|
581 | | - return RESUME_GUEST; |
---|
582 | | - } |
---|
583 | | - return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, |
---|
584 | | - dsisr & DSISR_ISSTORE); |
---|
585 | | - } |
---|
586 | | - |
---|
587 | | - writing = (dsisr & DSISR_ISSTORE) != 0; |
---|
588 | | - if (memslot->flags & KVM_MEM_READONLY) { |
---|
589 | | - if (writing) { |
---|
590 | | - /* give the guest a DSI */ |
---|
591 | | - dsisr = DSISR_ISSTORE | DSISR_PROTFAULT; |
---|
592 | | - kvmppc_core_queue_data_storage(vcpu, ea, dsisr); |
---|
593 | | - return RESUME_GUEST; |
---|
594 | | - } |
---|
595 | | - upgrade_p = NULL; |
---|
596 | | - } |
---|
597 | | - |
---|
598 | | - if (dsisr & DSISR_SET_RC) { |
---|
599 | | - /* |
---|
600 | | - * Need to set an R or C bit in the 2nd-level tables; |
---|
601 | | - * since we are just helping out the hardware here, |
---|
602 | | - * it is sufficient to do what the hardware does. |
---|
603 | | - */ |
---|
604 | | - pgflags = _PAGE_ACCESSED; |
---|
605 | | - if (writing) |
---|
606 | | - pgflags |= _PAGE_DIRTY; |
---|
607 | | - /* |
---|
608 | | - * We are walking the secondary page table here. We can do this |
---|
609 | | - * without disabling irq. |
---|
610 | | - */ |
---|
611 | | - spin_lock(&kvm->mmu_lock); |
---|
612 | | - ptep = __find_linux_pte(kvm->arch.pgtable, |
---|
613 | | - gpa, NULL, &shift); |
---|
614 | | - if (ptep && pte_present(*ptep) && |
---|
615 | | - (!writing || pte_write(*ptep))) { |
---|
616 | | - kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, |
---|
617 | | - gpa, shift); |
---|
618 | | - dsisr &= ~DSISR_SET_RC; |
---|
619 | | - } |
---|
620 | | - spin_unlock(&kvm->mmu_lock); |
---|
621 | | - if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE | |
---|
622 | | - DSISR_PROTFAULT | DSISR_SET_RC))) |
---|
623 | | - return RESUME_GUEST; |
---|
624 | | - } |
---|
| 806 | + int ret; |
---|
| 807 | + bool large_enable; |
---|
625 | 808 | |
---|
626 | 809 | /* used to check for invalidations in progress */ |
---|
627 | 810 | mmu_seq = kvm->mmu_notifier_seq; |
---|
.. | .. |
---|
634 | 817 | * is that the page is writable. |
---|
635 | 818 | */ |
---|
636 | 819 | hva = gfn_to_hva_memslot(memslot, gfn); |
---|
637 | | - if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) { |
---|
| 820 | + if (!kvm_ro && get_user_page_fast_only(hva, FOLL_WRITE, &page)) { |
---|
638 | 821 | upgrade_write = true; |
---|
639 | 822 | } else { |
---|
640 | 823 | unsigned long pfn; |
---|
.. | .. |
---|
656 | 839 | * Read the PTE from the process' radix tree and use that |
---|
657 | 840 | * so we get the shift and attribute bits. |
---|
658 | 841 | */ |
---|
659 | | - local_irq_disable(); |
---|
660 | | - ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); |
---|
| 842 | + spin_lock(&kvm->mmu_lock); |
---|
| 843 | + ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift); |
---|
| 844 | + pte = __pte(0); |
---|
| 845 | + if (ptep) |
---|
| 846 | + pte = READ_ONCE(*ptep); |
---|
| 847 | + spin_unlock(&kvm->mmu_lock); |
---|
661 | 848 | /* |
---|
662 | 849 | * If the PTE disappeared temporarily due to a THP |
---|
663 | 850 | * collapse, just return and let the guest try again. |
---|
664 | 851 | */ |
---|
665 | | - if (!ptep) { |
---|
666 | | - local_irq_enable(); |
---|
| 852 | + if (!pte_present(pte)) { |
---|
667 | 853 | if (page) |
---|
668 | 854 | put_page(page); |
---|
669 | 855 | return RESUME_GUEST; |
---|
670 | 856 | } |
---|
671 | | - pte = *ptep; |
---|
672 | | - local_irq_enable(); |
---|
| 857 | + |
---|
| 858 | + /* If we're logging dirty pages, always map single pages */ |
---|
| 859 | + large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES); |
---|
673 | 860 | |
---|
674 | 861 | /* Get pte level from shift/size */ |
---|
675 | | - if (shift == PUD_SHIFT && |
---|
| 862 | + if (large_enable && shift == PUD_SHIFT && |
---|
676 | 863 | (gpa & (PUD_SIZE - PAGE_SIZE)) == |
---|
677 | 864 | (hva & (PUD_SIZE - PAGE_SIZE))) { |
---|
678 | 865 | level = 2; |
---|
679 | | - } else if (shift == PMD_SHIFT && |
---|
| 866 | + } else if (large_enable && shift == PMD_SHIFT && |
---|
680 | 867 | (gpa & (PMD_SIZE - PAGE_SIZE)) == |
---|
681 | 868 | (hva & (PMD_SIZE - PAGE_SIZE))) { |
---|
682 | 869 | level = 1; |
---|
.. | .. |
---|
702 | 889 | } |
---|
703 | 890 | |
---|
704 | 891 | /* Allocate space in the tree and write the PTE */ |
---|
705 | | - ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq); |
---|
| 892 | + ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level, |
---|
| 893 | + mmu_seq, kvm->arch.lpid, NULL, NULL); |
---|
| 894 | + if (inserted_pte) |
---|
| 895 | + *inserted_pte = pte; |
---|
| 896 | + if (levelp) |
---|
| 897 | + *levelp = level; |
---|
706 | 898 | |
---|
707 | 899 | if (page) { |
---|
708 | 900 | if (!ret && (pte_val(pte) & _PAGE_WRITE)) |
---|
.. | .. |
---|
710 | 902 | put_page(page); |
---|
711 | 903 | } |
---|
712 | 904 | |
---|
| 905 | + /* Increment number of large pages if we (successfully) inserted one */ |
---|
| 906 | + if (!ret) { |
---|
| 907 | + if (level == 1) |
---|
| 908 | + kvm->stat.num_2M_pages++; |
---|
| 909 | + else if (level == 2) |
---|
| 910 | + kvm->stat.num_1G_pages++; |
---|
| 911 | + } |
---|
| 912 | + |
---|
| 913 | + return ret; |
---|
| 914 | +} |
---|
| 915 | + |
---|
| 916 | +int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, |
---|
| 917 | + unsigned long ea, unsigned long dsisr) |
---|
| 918 | +{ |
---|
| 919 | + struct kvm *kvm = vcpu->kvm; |
---|
| 920 | + unsigned long gpa, gfn; |
---|
| 921 | + struct kvm_memory_slot *memslot; |
---|
| 922 | + long ret; |
---|
| 923 | + bool writing = !!(dsisr & DSISR_ISSTORE); |
---|
| 924 | + bool kvm_ro = false; |
---|
| 925 | + |
---|
| 926 | + /* Check for unusual errors */ |
---|
| 927 | + if (dsisr & DSISR_UNSUPP_MMU) { |
---|
| 928 | + pr_err("KVM: Got unsupported MMU fault\n"); |
---|
| 929 | + return -EFAULT; |
---|
| 930 | + } |
---|
| 931 | + if (dsisr & DSISR_BADACCESS) { |
---|
| 932 | + /* Reflect to the guest as DSI */ |
---|
| 933 | + pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr); |
---|
| 934 | + kvmppc_core_queue_data_storage(vcpu, ea, dsisr); |
---|
| 935 | + return RESUME_GUEST; |
---|
| 936 | + } |
---|
| 937 | + |
---|
| 938 | + /* Translate the logical address */ |
---|
| 939 | + gpa = vcpu->arch.fault_gpa & ~0xfffUL; |
---|
| 940 | + gpa &= ~0xF000000000000000ul; |
---|
| 941 | + gfn = gpa >> PAGE_SHIFT; |
---|
| 942 | + if (!(dsisr & DSISR_PRTABLE_FAULT)) |
---|
| 943 | + gpa |= ea & 0xfff; |
---|
| 944 | + |
---|
| 945 | + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) |
---|
| 946 | + return kvmppc_send_page_to_uv(kvm, gfn); |
---|
| 947 | + |
---|
| 948 | + /* Get the corresponding memslot */ |
---|
| 949 | + memslot = gfn_to_memslot(kvm, gfn); |
---|
| 950 | + |
---|
| 951 | + /* No memslot means it's an emulated MMIO region */ |
---|
| 952 | + if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { |
---|
| 953 | + if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS | |
---|
| 954 | + DSISR_SET_RC)) { |
---|
| 955 | + /* |
---|
| 956 | + * Bad address in guest page table tree, or other |
---|
| 957 | + * unusual error - reflect it to the guest as DSI. |
---|
| 958 | + */ |
---|
| 959 | + kvmppc_core_queue_data_storage(vcpu, ea, dsisr); |
---|
| 960 | + return RESUME_GUEST; |
---|
| 961 | + } |
---|
| 962 | + return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing); |
---|
| 963 | + } |
---|
| 964 | + |
---|
| 965 | + if (memslot->flags & KVM_MEM_READONLY) { |
---|
| 966 | + if (writing) { |
---|
| 967 | + /* give the guest a DSI */ |
---|
| 968 | + kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE | |
---|
| 969 | + DSISR_PROTFAULT); |
---|
| 970 | + return RESUME_GUEST; |
---|
| 971 | + } |
---|
| 972 | + kvm_ro = true; |
---|
| 973 | + } |
---|
| 974 | + |
---|
| 975 | + /* Failed to set the reference/change bits */ |
---|
| 976 | + if (dsisr & DSISR_SET_RC) { |
---|
| 977 | + spin_lock(&kvm->mmu_lock); |
---|
| 978 | + if (kvmppc_hv_handle_set_rc(kvm, false, writing, |
---|
| 979 | + gpa, kvm->arch.lpid)) |
---|
| 980 | + dsisr &= ~DSISR_SET_RC; |
---|
| 981 | + spin_unlock(&kvm->mmu_lock); |
---|
| 982 | + |
---|
| 983 | + if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE | |
---|
| 984 | + DSISR_PROTFAULT | DSISR_SET_RC))) |
---|
| 985 | + return RESUME_GUEST; |
---|
| 986 | + } |
---|
| 987 | + |
---|
| 988 | + /* Try to insert a pte */ |
---|
| 989 | + ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing, |
---|
| 990 | + kvm_ro, NULL, NULL); |
---|
| 991 | + |
---|
713 | 992 | if (ret == 0 || ret == -EAGAIN) |
---|
714 | 993 | ret = RESUME_GUEST; |
---|
715 | 994 | return ret; |
---|
716 | 995 | } |
---|
717 | 996 | |
---|
718 | | -/* Called with kvm->lock held */ |
---|
| 997 | +/* Called with kvm->mmu_lock held */ |
---|
719 | 998 | int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, |
---|
720 | 999 | unsigned long gfn) |
---|
721 | 1000 | { |
---|
722 | 1001 | pte_t *ptep; |
---|
723 | 1002 | unsigned long gpa = gfn << PAGE_SHIFT; |
---|
724 | 1003 | unsigned int shift; |
---|
725 | | - unsigned long old; |
---|
726 | 1004 | |
---|
727 | | - ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); |
---|
728 | | - if (ptep && pte_present(*ptep)) { |
---|
729 | | - old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0, |
---|
730 | | - gpa, shift); |
---|
731 | | - kvmppc_radix_tlbie_page(kvm, gpa, shift); |
---|
732 | | - if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) { |
---|
733 | | - unsigned long psize = PAGE_SIZE; |
---|
734 | | - if (shift) |
---|
735 | | - psize = 1ul << shift; |
---|
736 | | - kvmppc_update_dirty_map(memslot, gfn, psize); |
---|
737 | | - } |
---|
| 1005 | + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) { |
---|
| 1006 | + uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT); |
---|
| 1007 | + return 0; |
---|
738 | 1008 | } |
---|
739 | | - return 0; |
---|
| 1009 | + |
---|
| 1010 | + ptep = find_kvm_secondary_pte(kvm, gpa, &shift); |
---|
| 1011 | + if (ptep && pte_present(*ptep)) |
---|
| 1012 | + kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, |
---|
| 1013 | + kvm->arch.lpid); |
---|
| 1014 | + return 0; |
---|
740 | 1015 | } |
---|
741 | 1016 | |
---|
742 | | -/* Called with kvm->lock held */ |
---|
| 1017 | +/* Called with kvm->mmu_lock held */ |
---|
743 | 1018 | int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, |
---|
744 | 1019 | unsigned long gfn) |
---|
745 | 1020 | { |
---|
.. | .. |
---|
747 | 1022 | unsigned long gpa = gfn << PAGE_SHIFT; |
---|
748 | 1023 | unsigned int shift; |
---|
749 | 1024 | int ref = 0; |
---|
| 1025 | + unsigned long old, *rmapp; |
---|
750 | 1026 | |
---|
751 | | - ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); |
---|
| 1027 | + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) |
---|
| 1028 | + return ref; |
---|
| 1029 | + |
---|
| 1030 | + ptep = find_kvm_secondary_pte(kvm, gpa, &shift); |
---|
752 | 1031 | if (ptep && pte_present(*ptep) && pte_young(*ptep)) { |
---|
753 | | - kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0, |
---|
754 | | - gpa, shift); |
---|
| 1032 | + old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0, |
---|
| 1033 | + gpa, shift); |
---|
755 | 1034 | /* XXX need to flush tlb here? */ |
---|
| 1035 | + /* Also clear bit in ptes in shadow pgtable for nested guests */ |
---|
| 1036 | + rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; |
---|
| 1037 | + kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0, |
---|
| 1038 | + old & PTE_RPN_MASK, |
---|
| 1039 | + 1UL << shift); |
---|
756 | 1040 | ref = 1; |
---|
757 | 1041 | } |
---|
758 | 1042 | return ref; |
---|
759 | 1043 | } |
---|
760 | 1044 | |
---|
761 | | -/* Called with kvm->lock held */ |
---|
| 1045 | +/* Called with kvm->mmu_lock held */ |
---|
762 | 1046 | int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, |
---|
763 | 1047 | unsigned long gfn) |
---|
764 | 1048 | { |
---|
.. | .. |
---|
767 | 1051 | unsigned int shift; |
---|
768 | 1052 | int ref = 0; |
---|
769 | 1053 | |
---|
770 | | - ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); |
---|
| 1054 | + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) |
---|
| 1055 | + return ref; |
---|
| 1056 | + |
---|
| 1057 | + ptep = find_kvm_secondary_pte(kvm, gpa, &shift); |
---|
771 | 1058 | if (ptep && pte_present(*ptep) && pte_young(*ptep)) |
---|
772 | 1059 | ref = 1; |
---|
773 | 1060 | return ref; |
---|
.. | .. |
---|
779 | 1066 | { |
---|
780 | 1067 | unsigned long gfn = memslot->base_gfn + pagenum; |
---|
781 | 1068 | unsigned long gpa = gfn << PAGE_SHIFT; |
---|
782 | | - pte_t *ptep; |
---|
| 1069 | + pte_t *ptep, pte; |
---|
783 | 1070 | unsigned int shift; |
---|
784 | 1071 | int ret = 0; |
---|
| 1072 | + unsigned long old, *rmapp; |
---|
785 | 1073 | |
---|
786 | | - ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift); |
---|
787 | | - if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) { |
---|
| 1074 | + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) |
---|
| 1075 | + return ret; |
---|
| 1076 | + |
---|
| 1077 | + /* |
---|
| 1078 | + * For performance reasons we don't hold kvm->mmu_lock while walking the |
---|
| 1079 | + * partition scoped table. |
---|
| 1080 | + */ |
---|
| 1081 | + ptep = find_kvm_secondary_pte_unlocked(kvm, gpa, &shift); |
---|
| 1082 | + if (!ptep) |
---|
| 1083 | + return 0; |
---|
| 1084 | + |
---|
| 1085 | + pte = READ_ONCE(*ptep); |
---|
| 1086 | + if (pte_present(pte) && pte_dirty(pte)) { |
---|
| 1087 | + spin_lock(&kvm->mmu_lock); |
---|
| 1088 | + /* |
---|
| 1089 | + * Recheck the pte again |
---|
| 1090 | + */ |
---|
| 1091 | + if (pte_val(pte) != pte_val(*ptep)) { |
---|
| 1092 | + /* |
---|
| 1093 | + * We have KVM_MEM_LOG_DIRTY_PAGES enabled. Hence we can |
---|
| 1094 | + * only find PAGE_SIZE pte entries here. We can continue |
---|
| 1095 | + * to use the pte addr returned by above page table |
---|
| 1096 | + * walk. |
---|
| 1097 | + */ |
---|
| 1098 | + if (!pte_present(*ptep) || !pte_dirty(*ptep)) { |
---|
| 1099 | + spin_unlock(&kvm->mmu_lock); |
---|
| 1100 | + return 0; |
---|
| 1101 | + } |
---|
| 1102 | + } |
---|
| 1103 | + |
---|
788 | 1104 | ret = 1; |
---|
789 | | - if (shift) |
---|
790 | | - ret = 1 << (shift - PAGE_SHIFT); |
---|
791 | | - kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0, |
---|
792 | | - gpa, shift); |
---|
793 | | - kvmppc_radix_tlbie_page(kvm, gpa, shift); |
---|
| 1105 | + VM_BUG_ON(shift); |
---|
| 1106 | + old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0, |
---|
| 1107 | + gpa, shift); |
---|
| 1108 | + kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid); |
---|
| 1109 | + /* Also clear bit in ptes in shadow pgtable for nested guests */ |
---|
| 1110 | + rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; |
---|
| 1111 | + kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_DIRTY, 0, |
---|
| 1112 | + old & PTE_RPN_MASK, |
---|
| 1113 | + 1UL << shift); |
---|
| 1114 | + spin_unlock(&kvm->mmu_lock); |
---|
794 | 1115 | } |
---|
795 | 1116 | return ret; |
---|
796 | 1117 | } |
---|
.. | .. |
---|
818 | 1139 | } |
---|
819 | 1140 | } |
---|
820 | 1141 | return 0; |
---|
| 1142 | +} |
---|
| 1143 | + |
---|
| 1144 | +void kvmppc_radix_flush_memslot(struct kvm *kvm, |
---|
| 1145 | + const struct kvm_memory_slot *memslot) |
---|
| 1146 | +{ |
---|
| 1147 | + unsigned long n; |
---|
| 1148 | + pte_t *ptep; |
---|
| 1149 | + unsigned long gpa; |
---|
| 1150 | + unsigned int shift; |
---|
| 1151 | + |
---|
| 1152 | + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START) |
---|
| 1153 | + kvmppc_uvmem_drop_pages(memslot, kvm, true); |
---|
| 1154 | + |
---|
| 1155 | + if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) |
---|
| 1156 | + return; |
---|
| 1157 | + |
---|
| 1158 | + gpa = memslot->base_gfn << PAGE_SHIFT; |
---|
| 1159 | + spin_lock(&kvm->mmu_lock); |
---|
| 1160 | + for (n = memslot->npages; n; --n) { |
---|
| 1161 | + ptep = find_kvm_secondary_pte(kvm, gpa, &shift); |
---|
| 1162 | + if (ptep && pte_present(*ptep)) |
---|
| 1163 | + kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, |
---|
| 1164 | + kvm->arch.lpid); |
---|
| 1165 | + gpa += PAGE_SIZE; |
---|
| 1166 | + } |
---|
| 1167 | + /* |
---|
| 1168 | + * Increase the mmu notifier sequence number to prevent any page |
---|
| 1169 | + * fault that read the memslot earlier from writing a PTE. |
---|
| 1170 | + */ |
---|
| 1171 | + kvm->mmu_notifier_seq++; |
---|
| 1172 | + spin_unlock(&kvm->mmu_lock); |
---|
821 | 1173 | } |
---|
822 | 1174 | |
---|
823 | 1175 | static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info, |
---|
.. | .. |
---|
875 | 1227 | memset(addr, 0, RADIX_PMD_TABLE_SIZE); |
---|
876 | 1228 | } |
---|
877 | 1229 | |
---|
| 1230 | +struct debugfs_radix_state { |
---|
| 1231 | + struct kvm *kvm; |
---|
| 1232 | + struct mutex mutex; |
---|
| 1233 | + unsigned long gpa; |
---|
| 1234 | + int lpid; |
---|
| 1235 | + int chars_left; |
---|
| 1236 | + int buf_index; |
---|
| 1237 | + char buf[128]; |
---|
| 1238 | + u8 hdr; |
---|
| 1239 | +}; |
---|
| 1240 | + |
---|
| 1241 | +static int debugfs_radix_open(struct inode *inode, struct file *file) |
---|
| 1242 | +{ |
---|
| 1243 | + struct kvm *kvm = inode->i_private; |
---|
| 1244 | + struct debugfs_radix_state *p; |
---|
| 1245 | + |
---|
| 1246 | + p = kzalloc(sizeof(*p), GFP_KERNEL); |
---|
| 1247 | + if (!p) |
---|
| 1248 | + return -ENOMEM; |
---|
| 1249 | + |
---|
| 1250 | + kvm_get_kvm(kvm); |
---|
| 1251 | + p->kvm = kvm; |
---|
| 1252 | + mutex_init(&p->mutex); |
---|
| 1253 | + file->private_data = p; |
---|
| 1254 | + |
---|
| 1255 | + return nonseekable_open(inode, file); |
---|
| 1256 | +} |
---|
| 1257 | + |
---|
| 1258 | +static int debugfs_radix_release(struct inode *inode, struct file *file) |
---|
| 1259 | +{ |
---|
| 1260 | + struct debugfs_radix_state *p = file->private_data; |
---|
| 1261 | + |
---|
| 1262 | + kvm_put_kvm(p->kvm); |
---|
| 1263 | + kfree(p); |
---|
| 1264 | + return 0; |
---|
| 1265 | +} |
---|
| 1266 | + |
---|
| 1267 | +static ssize_t debugfs_radix_read(struct file *file, char __user *buf, |
---|
| 1268 | + size_t len, loff_t *ppos) |
---|
| 1269 | +{ |
---|
| 1270 | + struct debugfs_radix_state *p = file->private_data; |
---|
| 1271 | + ssize_t ret, r; |
---|
| 1272 | + unsigned long n; |
---|
| 1273 | + struct kvm *kvm; |
---|
| 1274 | + unsigned long gpa; |
---|
| 1275 | + pgd_t *pgt; |
---|
| 1276 | + struct kvm_nested_guest *nested; |
---|
| 1277 | + pgd_t *pgdp; |
---|
| 1278 | + p4d_t p4d, *p4dp; |
---|
| 1279 | + pud_t pud, *pudp; |
---|
| 1280 | + pmd_t pmd, *pmdp; |
---|
| 1281 | + pte_t *ptep; |
---|
| 1282 | + int shift; |
---|
| 1283 | + unsigned long pte; |
---|
| 1284 | + |
---|
| 1285 | + kvm = p->kvm; |
---|
| 1286 | + if (!kvm_is_radix(kvm)) |
---|
| 1287 | + return 0; |
---|
| 1288 | + |
---|
| 1289 | + ret = mutex_lock_interruptible(&p->mutex); |
---|
| 1290 | + if (ret) |
---|
| 1291 | + return ret; |
---|
| 1292 | + |
---|
| 1293 | + if (p->chars_left) { |
---|
| 1294 | + n = p->chars_left; |
---|
| 1295 | + if (n > len) |
---|
| 1296 | + n = len; |
---|
| 1297 | + r = copy_to_user(buf, p->buf + p->buf_index, n); |
---|
| 1298 | + n -= r; |
---|
| 1299 | + p->chars_left -= n; |
---|
| 1300 | + p->buf_index += n; |
---|
| 1301 | + buf += n; |
---|
| 1302 | + len -= n; |
---|
| 1303 | + ret = n; |
---|
| 1304 | + if (r) { |
---|
| 1305 | + if (!n) |
---|
| 1306 | + ret = -EFAULT; |
---|
| 1307 | + goto out; |
---|
| 1308 | + } |
---|
| 1309 | + } |
---|
| 1310 | + |
---|
| 1311 | + gpa = p->gpa; |
---|
| 1312 | + nested = NULL; |
---|
| 1313 | + pgt = NULL; |
---|
| 1314 | + while (len != 0 && p->lpid >= 0) { |
---|
| 1315 | + if (gpa >= RADIX_PGTABLE_RANGE) { |
---|
| 1316 | + gpa = 0; |
---|
| 1317 | + pgt = NULL; |
---|
| 1318 | + if (nested) { |
---|
| 1319 | + kvmhv_put_nested(nested); |
---|
| 1320 | + nested = NULL; |
---|
| 1321 | + } |
---|
| 1322 | + p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid); |
---|
| 1323 | + p->hdr = 0; |
---|
| 1324 | + if (p->lpid < 0) |
---|
| 1325 | + break; |
---|
| 1326 | + } |
---|
| 1327 | + if (!pgt) { |
---|
| 1328 | + if (p->lpid == 0) { |
---|
| 1329 | + pgt = kvm->arch.pgtable; |
---|
| 1330 | + } else { |
---|
| 1331 | + nested = kvmhv_get_nested(kvm, p->lpid, false); |
---|
| 1332 | + if (!nested) { |
---|
| 1333 | + gpa = RADIX_PGTABLE_RANGE; |
---|
| 1334 | + continue; |
---|
| 1335 | + } |
---|
| 1336 | + pgt = nested->shadow_pgtable; |
---|
| 1337 | + } |
---|
| 1338 | + } |
---|
| 1339 | + n = 0; |
---|
| 1340 | + if (!p->hdr) { |
---|
| 1341 | + if (p->lpid > 0) |
---|
| 1342 | + n = scnprintf(p->buf, sizeof(p->buf), |
---|
| 1343 | + "\nNested LPID %d: ", p->lpid); |
---|
| 1344 | + n += scnprintf(p->buf + n, sizeof(p->buf) - n, |
---|
| 1345 | + "pgdir: %lx\n", (unsigned long)pgt); |
---|
| 1346 | + p->hdr = 1; |
---|
| 1347 | + goto copy; |
---|
| 1348 | + } |
---|
| 1349 | + |
---|
| 1350 | + pgdp = pgt + pgd_index(gpa); |
---|
| 1351 | + p4dp = p4d_offset(pgdp, gpa); |
---|
| 1352 | + p4d = READ_ONCE(*p4dp); |
---|
| 1353 | + if (!(p4d_val(p4d) & _PAGE_PRESENT)) { |
---|
| 1354 | + gpa = (gpa & P4D_MASK) + P4D_SIZE; |
---|
| 1355 | + continue; |
---|
| 1356 | + } |
---|
| 1357 | + |
---|
| 1358 | + pudp = pud_offset(&p4d, gpa); |
---|
| 1359 | + pud = READ_ONCE(*pudp); |
---|
| 1360 | + if (!(pud_val(pud) & _PAGE_PRESENT)) { |
---|
| 1361 | + gpa = (gpa & PUD_MASK) + PUD_SIZE; |
---|
| 1362 | + continue; |
---|
| 1363 | + } |
---|
| 1364 | + if (pud_val(pud) & _PAGE_PTE) { |
---|
| 1365 | + pte = pud_val(pud); |
---|
| 1366 | + shift = PUD_SHIFT; |
---|
| 1367 | + goto leaf; |
---|
| 1368 | + } |
---|
| 1369 | + |
---|
| 1370 | + pmdp = pmd_offset(&pud, gpa); |
---|
| 1371 | + pmd = READ_ONCE(*pmdp); |
---|
| 1372 | + if (!(pmd_val(pmd) & _PAGE_PRESENT)) { |
---|
| 1373 | + gpa = (gpa & PMD_MASK) + PMD_SIZE; |
---|
| 1374 | + continue; |
---|
| 1375 | + } |
---|
| 1376 | + if (pmd_val(pmd) & _PAGE_PTE) { |
---|
| 1377 | + pte = pmd_val(pmd); |
---|
| 1378 | + shift = PMD_SHIFT; |
---|
| 1379 | + goto leaf; |
---|
| 1380 | + } |
---|
| 1381 | + |
---|
| 1382 | + ptep = pte_offset_kernel(&pmd, gpa); |
---|
| 1383 | + pte = pte_val(READ_ONCE(*ptep)); |
---|
| 1384 | + if (!(pte & _PAGE_PRESENT)) { |
---|
| 1385 | + gpa += PAGE_SIZE; |
---|
| 1386 | + continue; |
---|
| 1387 | + } |
---|
| 1388 | + shift = PAGE_SHIFT; |
---|
| 1389 | + leaf: |
---|
| 1390 | + n = scnprintf(p->buf, sizeof(p->buf), |
---|
| 1391 | + " %lx: %lx %d\n", gpa, pte, shift); |
---|
| 1392 | + gpa += 1ul << shift; |
---|
| 1393 | + copy: |
---|
| 1394 | + p->chars_left = n; |
---|
| 1395 | + if (n > len) |
---|
| 1396 | + n = len; |
---|
| 1397 | + r = copy_to_user(buf, p->buf, n); |
---|
| 1398 | + n -= r; |
---|
| 1399 | + p->chars_left -= n; |
---|
| 1400 | + p->buf_index = n; |
---|
| 1401 | + buf += n; |
---|
| 1402 | + len -= n; |
---|
| 1403 | + ret += n; |
---|
| 1404 | + if (r) { |
---|
| 1405 | + if (!ret) |
---|
| 1406 | + ret = -EFAULT; |
---|
| 1407 | + break; |
---|
| 1408 | + } |
---|
| 1409 | + } |
---|
| 1410 | + p->gpa = gpa; |
---|
| 1411 | + if (nested) |
---|
| 1412 | + kvmhv_put_nested(nested); |
---|
| 1413 | + |
---|
| 1414 | + out: |
---|
| 1415 | + mutex_unlock(&p->mutex); |
---|
| 1416 | + return ret; |
---|
| 1417 | +} |
---|
| 1418 | + |
---|
| 1419 | +static ssize_t debugfs_radix_write(struct file *file, const char __user *buf, |
---|
| 1420 | + size_t len, loff_t *ppos) |
---|
| 1421 | +{ |
---|
| 1422 | + return -EACCES; |
---|
| 1423 | +} |
---|
| 1424 | + |
---|
| 1425 | +static const struct file_operations debugfs_radix_fops = { |
---|
| 1426 | + .owner = THIS_MODULE, |
---|
| 1427 | + .open = debugfs_radix_open, |
---|
| 1428 | + .release = debugfs_radix_release, |
---|
| 1429 | + .read = debugfs_radix_read, |
---|
| 1430 | + .write = debugfs_radix_write, |
---|
| 1431 | + .llseek = generic_file_llseek, |
---|
| 1432 | +}; |
---|
| 1433 | + |
---|
| 1434 | +void kvmhv_radix_debugfs_init(struct kvm *kvm) |
---|
| 1435 | +{ |
---|
| 1436 | + debugfs_create_file("radix", 0400, kvm->arch.debugfs_dir, kvm, |
---|
| 1437 | + &debugfs_radix_fops); |
---|
| 1438 | +} |
---|
| 1439 | + |
---|
878 | 1440 | int kvmppc_radix_init(void) |
---|
879 | 1441 | { |
---|
880 | 1442 | unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE; |
---|