hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/powerpc/kvm/book3s_64_mmu_radix.c
....@@ -1,7 +1,5 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
2
- * This program is free software; you can redistribute it and/or modify
3
- * it under the terms of the GNU General Public License, version 2, as
4
- * published by the Free Software Foundation.
53 *
64 * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
75 */
....@@ -10,14 +8,19 @@
108 #include <linux/string.h>
119 #include <linux/kvm.h>
1210 #include <linux/kvm_host.h>
11
+#include <linux/anon_inodes.h>
12
+#include <linux/file.h>
13
+#include <linux/debugfs.h>
14
+#include <linux/pgtable.h>
1315
1416 #include <asm/kvm_ppc.h>
1517 #include <asm/kvm_book3s.h>
1618 #include <asm/page.h>
1719 #include <asm/mmu.h>
18
-#include <asm/pgtable.h>
1920 #include <asm/pgalloc.h>
2021 #include <asm/pte-walk.h>
22
+#include <asm/ultravisor.h>
23
+#include <asm/kvm_book3s_uvmem.h>
2124
2225 /*
2326 * Supported radix tree geometry.
....@@ -26,18 +29,236 @@
2629 */
2730 static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
2831
32
+unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
33
+ gva_t eaddr, void *to, void *from,
34
+ unsigned long n)
35
+{
36
+ int old_pid, old_lpid;
37
+ unsigned long quadrant, ret = n;
38
+ bool is_load = !!to;
39
+
40
+ /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */
41
+ if (kvmhv_on_pseries())
42
+ return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr,
43
+ (to != NULL) ? __pa(to): 0,
44
+ (from != NULL) ? __pa(from): 0, n);
45
+
46
+ quadrant = 1;
47
+ if (!pid)
48
+ quadrant = 2;
49
+ if (is_load)
50
+ from = (void *) (eaddr | (quadrant << 62));
51
+ else
52
+ to = (void *) (eaddr | (quadrant << 62));
53
+
54
+ preempt_disable();
55
+
56
+ /* switch the lpid first to avoid running host with unallocated pid */
57
+ old_lpid = mfspr(SPRN_LPID);
58
+ if (old_lpid != lpid)
59
+ mtspr(SPRN_LPID, lpid);
60
+ if (quadrant == 1) {
61
+ old_pid = mfspr(SPRN_PID);
62
+ if (old_pid != pid)
63
+ mtspr(SPRN_PID, pid);
64
+ }
65
+ isync();
66
+
67
+ pagefault_disable();
68
+ if (is_load)
69
+ ret = __copy_from_user_inatomic(to, (const void __user *)from, n);
70
+ else
71
+ ret = __copy_to_user_inatomic((void __user *)to, from, n);
72
+ pagefault_enable();
73
+
74
+ /* switch the pid first to avoid running host with unallocated pid */
75
+ if (quadrant == 1 && pid != old_pid)
76
+ mtspr(SPRN_PID, old_pid);
77
+ if (lpid != old_lpid)
78
+ mtspr(SPRN_LPID, old_lpid);
79
+ isync();
80
+
81
+ preempt_enable();
82
+
83
+ return ret;
84
+}
85
+EXPORT_SYMBOL_GPL(__kvmhv_copy_tofrom_guest_radix);
86
+
87
+static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
88
+ void *to, void *from, unsigned long n)
89
+{
90
+ int lpid = vcpu->kvm->arch.lpid;
91
+ int pid = vcpu->arch.pid;
92
+
93
+ /* This would cause a data segment intr so don't allow the access */
94
+ if (eaddr & (0x3FFUL << 52))
95
+ return -EINVAL;
96
+
97
+ /* Should we be using the nested lpid */
98
+ if (vcpu->arch.nested)
99
+ lpid = vcpu->arch.nested->shadow_lpid;
100
+
101
+ /* If accessing quadrant 3 then pid is expected to be 0 */
102
+ if (((eaddr >> 62) & 0x3) == 0x3)
103
+ pid = 0;
104
+
105
+ eaddr &= ~(0xFFFUL << 52);
106
+
107
+ return __kvmhv_copy_tofrom_guest_radix(lpid, pid, eaddr, to, from, n);
108
+}
109
+
110
+long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to,
111
+ unsigned long n)
112
+{
113
+ long ret;
114
+
115
+ ret = kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, to, NULL, n);
116
+ if (ret > 0)
117
+ memset(to + (n - ret), 0, ret);
118
+
119
+ return ret;
120
+}
121
+EXPORT_SYMBOL_GPL(kvmhv_copy_from_guest_radix);
122
+
123
+long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from,
124
+ unsigned long n)
125
+{
126
+ return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n);
127
+}
128
+EXPORT_SYMBOL_GPL(kvmhv_copy_to_guest_radix);
129
+
130
+int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
131
+ struct kvmppc_pte *gpte, u64 root,
132
+ u64 *pte_ret_p)
133
+{
134
+ struct kvm *kvm = vcpu->kvm;
135
+ int ret, level, ps;
136
+ unsigned long rts, bits, offset, index;
137
+ u64 pte, base, gpa;
138
+ __be64 rpte;
139
+
140
+ rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
141
+ ((root & RTS2_MASK) >> RTS2_SHIFT);
142
+ bits = root & RPDS_MASK;
143
+ base = root & RPDB_MASK;
144
+
145
+ offset = rts + 31;
146
+
147
+ /* Current implementations only support 52-bit space */
148
+ if (offset != 52)
149
+ return -EINVAL;
150
+
151
+ /* Walk each level of the radix tree */
152
+ for (level = 3; level >= 0; --level) {
153
+ u64 addr;
154
+ /* Check a valid size */
155
+ if (level && bits != p9_supported_radix_bits[level])
156
+ return -EINVAL;
157
+ if (level == 0 && !(bits == 5 || bits == 9))
158
+ return -EINVAL;
159
+ offset -= bits;
160
+ index = (eaddr >> offset) & ((1UL << bits) - 1);
161
+ /* Check that low bits of page table base are zero */
162
+ if (base & ((1UL << (bits + 3)) - 1))
163
+ return -EINVAL;
164
+ /* Read the entry from guest memory */
165
+ addr = base + (index * sizeof(rpte));
166
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
167
+ ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte));
168
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
169
+ if (ret) {
170
+ if (pte_ret_p)
171
+ *pte_ret_p = addr;
172
+ return ret;
173
+ }
174
+ pte = __be64_to_cpu(rpte);
175
+ if (!(pte & _PAGE_PRESENT))
176
+ return -ENOENT;
177
+ /* Check if a leaf entry */
178
+ if (pte & _PAGE_PTE)
179
+ break;
180
+ /* Get ready to walk the next level */
181
+ base = pte & RPDB_MASK;
182
+ bits = pte & RPDS_MASK;
183
+ }
184
+
185
+ /* Need a leaf at lowest level; 512GB pages not supported */
186
+ if (level < 0 || level == 3)
187
+ return -EINVAL;
188
+
189
+ /* We found a valid leaf PTE */
190
+ /* Offset is now log base 2 of the page size */
191
+ gpa = pte & 0x01fffffffffff000ul;
192
+ if (gpa & ((1ul << offset) - 1))
193
+ return -EINVAL;
194
+ gpa |= eaddr & ((1ul << offset) - 1);
195
+ for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
196
+ if (offset == mmu_psize_defs[ps].shift)
197
+ break;
198
+ gpte->page_size = ps;
199
+ gpte->page_shift = offset;
200
+
201
+ gpte->eaddr = eaddr;
202
+ gpte->raddr = gpa;
203
+
204
+ /* Work out permissions */
205
+ gpte->may_read = !!(pte & _PAGE_READ);
206
+ gpte->may_write = !!(pte & _PAGE_WRITE);
207
+ gpte->may_execute = !!(pte & _PAGE_EXEC);
208
+
209
+ gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY);
210
+
211
+ if (pte_ret_p)
212
+ *pte_ret_p = pte;
213
+
214
+ return 0;
215
+}
216
+
217
+/*
218
+ * Used to walk a partition or process table radix tree in guest memory
219
+ * Note: We exploit the fact that a partition table and a process
220
+ * table have the same layout, a partition-scoped page table and a
221
+ * process-scoped page table have the same layout, and the 2nd
222
+ * doubleword of a partition table entry has the same layout as
223
+ * the PTCR register.
224
+ */
225
+int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
226
+ struct kvmppc_pte *gpte, u64 table,
227
+ int table_index, u64 *pte_ret_p)
228
+{
229
+ struct kvm *kvm = vcpu->kvm;
230
+ int ret;
231
+ unsigned long size, ptbl, root;
232
+ struct prtb_entry entry;
233
+
234
+ if ((table & PRTS_MASK) > 24)
235
+ return -EINVAL;
236
+ size = 1ul << ((table & PRTS_MASK) + 12);
237
+
238
+ /* Is the table big enough to contain this entry? */
239
+ if ((table_index * sizeof(entry)) >= size)
240
+ return -EINVAL;
241
+
242
+ /* Read the table to find the root of the radix tree */
243
+ ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry));
244
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
245
+ ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry));
246
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
247
+ if (ret)
248
+ return ret;
249
+
250
+ /* Root is stored in the first double word */
251
+ root = be64_to_cpu(entry.prtb0);
252
+
253
+ return kvmppc_mmu_walk_radix_tree(vcpu, eaddr, gpte, root, pte_ret_p);
254
+}
255
+
29256 int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
30257 struct kvmppc_pte *gpte, bool data, bool iswrite)
31258 {
32
- struct kvm *kvm = vcpu->kvm;
33259 u32 pid;
34
- int ret, level, ps;
35
- __be64 prte, rpte;
36
- unsigned long ptbl;
37
- unsigned long root, pte, index;
38
- unsigned long rts, bits, offset;
39
- unsigned long gpa;
40
- unsigned long proc_tbl_size;
260
+ u64 pte;
261
+ int ret;
41262
42263 /* Work out effective PID */
43264 switch (eaddr >> 62) {
....@@ -50,71 +271,13 @@
50271 default:
51272 return -EINVAL;
52273 }
53
- proc_tbl_size = 1 << ((kvm->arch.process_table & PRTS_MASK) + 12);
54
- if (pid * 16 >= proc_tbl_size)
55
- return -EINVAL;
56274
57
- /* Read partition table to find root of tree for effective PID */
58
- ptbl = (kvm->arch.process_table & PRTB_MASK) + (pid * 16);
59
- ret = kvm_read_guest(kvm, ptbl, &prte, sizeof(prte));
275
+ ret = kvmppc_mmu_radix_translate_table(vcpu, eaddr, gpte,
276
+ vcpu->kvm->arch.process_table, pid, &pte);
60277 if (ret)
61278 return ret;
62279
63
- root = be64_to_cpu(prte);
64
- rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
65
- ((root & RTS2_MASK) >> RTS2_SHIFT);
66
- bits = root & RPDS_MASK;
67
- root = root & RPDB_MASK;
68
-
69
- offset = rts + 31;
70
-
71
- /* current implementations only support 52-bit space */
72
- if (offset != 52)
73
- return -EINVAL;
74
-
75
- for (level = 3; level >= 0; --level) {
76
- if (level && bits != p9_supported_radix_bits[level])
77
- return -EINVAL;
78
- if (level == 0 && !(bits == 5 || bits == 9))
79
- return -EINVAL;
80
- offset -= bits;
81
- index = (eaddr >> offset) & ((1UL << bits) - 1);
82
- /* check that low bits of page table base are zero */
83
- if (root & ((1UL << (bits + 3)) - 1))
84
- return -EINVAL;
85
- ret = kvm_read_guest(kvm, root + index * 8,
86
- &rpte, sizeof(rpte));
87
- if (ret)
88
- return ret;
89
- pte = __be64_to_cpu(rpte);
90
- if (!(pte & _PAGE_PRESENT))
91
- return -ENOENT;
92
- if (pte & _PAGE_PTE)
93
- break;
94
- bits = pte & 0x1f;
95
- root = pte & 0x0fffffffffffff00ul;
96
- }
97
- /* need a leaf at lowest level; 512GB pages not supported */
98
- if (level < 0 || level == 3)
99
- return -EINVAL;
100
-
101
- /* offset is now log base 2 of the page size */
102
- gpa = pte & 0x01fffffffffff000ul;
103
- if (gpa & ((1ul << offset) - 1))
104
- return -EINVAL;
105
- gpa += eaddr & ((1ul << offset) - 1);
106
- for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
107
- if (offset == mmu_psize_defs[ps].shift)
108
- break;
109
- gpte->page_size = ps;
110
-
111
- gpte->eaddr = eaddr;
112
- gpte->raddr = gpa;
113
-
114
- /* Work out permissions */
115
- gpte->may_read = !!(pte & _PAGE_READ);
116
- gpte->may_write = !!(pte & _PAGE_WRITE);
117
- gpte->may_execute = !!(pte & _PAGE_EXEC);
280
+ /* Check privilege (applies only to process scoped translations) */
118281 if (kvmppc_get_msr(vcpu) & MSR_PR) {
119282 if (pte & _PAGE_PRIVILEGED) {
120283 gpte->may_read = 0;
....@@ -136,21 +299,47 @@
136299 return 0;
137300 }
138301
139
-static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
140
- unsigned int pshift)
302
+void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
303
+ unsigned int pshift, unsigned int lpid)
141304 {
142305 unsigned long psize = PAGE_SIZE;
306
+ int psi;
307
+ long rc;
308
+ unsigned long rb;
143309
144310 if (pshift)
145311 psize = 1UL << pshift;
312
+ else
313
+ pshift = PAGE_SHIFT;
146314
147315 addr &= ~(psize - 1);
148
- radix__flush_tlb_lpid_page(kvm->arch.lpid, addr, psize);
316
+
317
+ if (!kvmhv_on_pseries()) {
318
+ radix__flush_tlb_lpid_page(lpid, addr, psize);
319
+ return;
320
+ }
321
+
322
+ psi = shift_to_mmu_psize(pshift);
323
+ rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
324
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
325
+ lpid, rb);
326
+ if (rc)
327
+ pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
149328 }
150329
151
-static void kvmppc_radix_flush_pwc(struct kvm *kvm)
330
+static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
152331 {
153
- radix__flush_pwc_lpid(kvm->arch.lpid);
332
+ long rc;
333
+
334
+ if (!kvmhv_on_pseries()) {
335
+ radix__flush_pwc_lpid(lpid);
336
+ return;
337
+ }
338
+
339
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
340
+ lpid, TLBIEL_INVAL_SET_LPID);
341
+ if (rc)
342
+ pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
154343 }
155344
156345 static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
....@@ -160,7 +349,7 @@
160349 return __radix_pte_update(ptep, clr, set);
161350 }
162351
163
-void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
352
+static void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
164353 pte_t *ptep, pte_t pte)
165354 {
166355 radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
....@@ -185,12 +374,6 @@
185374 kmem_cache_free(kvm_pte_cache, ptep);
186375 }
187376
188
-/* Like pmd_huge() and pmd_large(), but works regardless of config options */
189
-static inline int pmd_is_leaf(pmd_t pmd)
190
-{
191
- return !!(pmd_val(pmd) & _PAGE_PTE);
192
-}
193
-
194377 static pmd_t *kvmppc_pmd_alloc(void)
195378 {
196379 pmd_t *pmd;
....@@ -207,23 +390,44 @@
207390 kmem_cache_free(kvm_pmd_cache, pmdp);
208391 }
209392
210
-static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte,
211
- unsigned long gpa, unsigned int shift)
393
+/* Called with kvm->mmu_lock held */
394
+void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
395
+ unsigned int shift,
396
+ const struct kvm_memory_slot *memslot,
397
+ unsigned int lpid)
212398
213399 {
214
- unsigned long page_size = 1ul << shift;
215400 unsigned long old;
401
+ unsigned long gfn = gpa >> PAGE_SHIFT;
402
+ unsigned long page_size = PAGE_SIZE;
403
+ unsigned long hpa;
216404
217405 old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift);
218
- kvmppc_radix_tlbie_page(kvm, gpa, shift);
219
- if (old & _PAGE_DIRTY) {
220
- unsigned long gfn = gpa >> PAGE_SHIFT;
221
- struct kvm_memory_slot *memslot;
406
+ kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
222407
408
+ /* The following only applies to L1 entries */
409
+ if (lpid != kvm->arch.lpid)
410
+ return;
411
+
412
+ if (!memslot) {
223413 memslot = gfn_to_memslot(kvm, gfn);
224
- if (memslot && memslot->dirty_bitmap)
225
- kvmppc_update_dirty_map(memslot, gfn, page_size);
414
+ if (!memslot)
415
+ return;
226416 }
417
+ if (shift) { /* 1GB or 2MB page */
418
+ page_size = 1ul << shift;
419
+ if (shift == PMD_SHIFT)
420
+ kvm->stat.num_2M_pages--;
421
+ else if (shift == PUD_SHIFT)
422
+ kvm->stat.num_1G_pages--;
423
+ }
424
+
425
+ gpa &= ~(page_size - 1);
426
+ hpa = old & PTE_RPN_MASK;
427
+ kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size);
428
+
429
+ if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap)
430
+ kvmppc_update_dirty_map(memslot, gfn, page_size);
227431 }
228432
229433 /*
....@@ -232,14 +436,19 @@
232436 * Callers are responsible for flushing the PWC.
233437 *
234438 * When page tables are being unmapped/freed as part of page fault path
235
- * (full == false), ptes are not expected. There is code to unmap them
236
- * and emit a warning if encountered, but there may already be data
237
- * corruption due to the unexpected mappings.
439
+ * (full == false), valid ptes are generally not expected; however, there
440
+ * is one situation where they arise, which is when dirty page logging is
441
+ * turned off for a memslot while the VM is running. The new memslot
442
+ * becomes visible to page faults before the memslot commit function
443
+ * gets to flush the memslot, which can lead to a 2MB page mapping being
444
+ * installed for a guest physical address where there are already 64kB
445
+ * (or 4kB) mappings (of sub-pages of the same 2MB page).
238446 */
239
-static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full)
447
+static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
448
+ unsigned int lpid)
240449 {
241450 if (full) {
242
- memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
451
+ memset(pte, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE);
243452 } else {
244453 pte_t *p = pte;
245454 unsigned long it;
....@@ -247,17 +456,17 @@
247456 for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
248457 if (pte_val(*p) == 0)
249458 continue;
250
- WARN_ON_ONCE(1);
251459 kvmppc_unmap_pte(kvm, p,
252460 pte_pfn(*p) << PAGE_SHIFT,
253
- PAGE_SHIFT);
461
+ PAGE_SHIFT, NULL, lpid);
254462 }
255463 }
256464
257465 kvmppc_pte_free(pte);
258466 }
259467
260
-static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full)
468
+static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
469
+ unsigned int lpid)
261470 {
262471 unsigned long im;
263472 pmd_t *p = pmd;
....@@ -272,20 +481,21 @@
272481 WARN_ON_ONCE(1);
273482 kvmppc_unmap_pte(kvm, (pte_t *)p,
274483 pte_pfn(*(pte_t *)p) << PAGE_SHIFT,
275
- PMD_SHIFT);
484
+ PMD_SHIFT, NULL, lpid);
276485 }
277486 } else {
278487 pte_t *pte;
279488
280489 pte = pte_offset_map(p, 0);
281
- kvmppc_unmap_free_pte(kvm, pte, full);
490
+ kvmppc_unmap_free_pte(kvm, pte, full, lpid);
282491 pmd_clear(p);
283492 }
284493 }
285494 kvmppc_pmd_free(pmd);
286495 }
287496
288
-static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud)
497
+static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
498
+ unsigned int lpid)
289499 {
290500 unsigned long iu;
291501 pud_t *p = pud;
....@@ -293,42 +503,47 @@
293503 for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
294504 if (!pud_present(*p))
295505 continue;
296
- if (pud_huge(*p)) {
506
+ if (pud_is_leaf(*p)) {
297507 pud_clear(p);
298508 } else {
299509 pmd_t *pmd;
300510
301511 pmd = pmd_offset(p, 0);
302
- kvmppc_unmap_free_pmd(kvm, pmd, true);
512
+ kvmppc_unmap_free_pmd(kvm, pmd, true, lpid);
303513 pud_clear(p);
304514 }
305515 }
306516 pud_free(kvm->mm, pud);
307517 }
308518
309
-void kvmppc_free_radix(struct kvm *kvm)
519
+void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid)
310520 {
311521 unsigned long ig;
312
- pgd_t *pgd;
313522
314
- if (!kvm->arch.pgtable)
315
- return;
316
- pgd = kvm->arch.pgtable;
317523 for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
524
+ p4d_t *p4d = p4d_offset(pgd, 0);
318525 pud_t *pud;
319526
320
- if (!pgd_present(*pgd))
527
+ if (!p4d_present(*p4d))
321528 continue;
322
- pud = pud_offset(pgd, 0);
323
- kvmppc_unmap_free_pud(kvm, pud);
324
- pgd_clear(pgd);
529
+ pud = pud_offset(p4d, 0);
530
+ kvmppc_unmap_free_pud(kvm, pud, lpid);
531
+ p4d_clear(p4d);
325532 }
326
- pgd_free(kvm->mm, kvm->arch.pgtable);
327
- kvm->arch.pgtable = NULL;
533
+}
534
+
535
+void kvmppc_free_radix(struct kvm *kvm)
536
+{
537
+ if (kvm->arch.pgtable) {
538
+ kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable,
539
+ kvm->arch.lpid);
540
+ pgd_free(kvm->mm, kvm->arch.pgtable);
541
+ kvm->arch.pgtable = NULL;
542
+ }
328543 }
329544
330545 static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
331
- unsigned long gpa)
546
+ unsigned long gpa, unsigned int lpid)
332547 {
333548 pte_t *pte = pte_offset_kernel(pmd, 0);
334549
....@@ -338,13 +553,13 @@
338553 * flushing the PWC again.
339554 */
340555 pmd_clear(pmd);
341
- kvmppc_radix_flush_pwc(kvm);
556
+ kvmppc_radix_flush_pwc(kvm, lpid);
342557
343
- kvmppc_unmap_free_pte(kvm, pte, false);
558
+ kvmppc_unmap_free_pte(kvm, pte, false, lpid);
344559 }
345560
346561 static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
347
- unsigned long gpa)
562
+ unsigned long gpa, unsigned int lpid)
348563 {
349564 pmd_t *pmd = pmd_offset(pud, 0);
350565
....@@ -354,9 +569,9 @@
354569 * so can be freed without flushing the PWC again.
355570 */
356571 pud_clear(pud);
357
- kvmppc_radix_flush_pwc(kvm);
572
+ kvmppc_radix_flush_pwc(kvm, lpid);
358573
359
- kvmppc_unmap_free_pmd(kvm, pmd, false);
574
+ kvmppc_unmap_free_pmd(kvm, pmd, false, lpid);
360575 }
361576
362577 /*
....@@ -368,25 +583,30 @@
368583 */
369584 #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
370585
371
-static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
372
- unsigned int level, unsigned long mmu_seq)
586
+int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
587
+ unsigned long gpa, unsigned int level,
588
+ unsigned long mmu_seq, unsigned int lpid,
589
+ unsigned long *rmapp, struct rmap_nested **n_rmap)
373590 {
374591 pgd_t *pgd;
592
+ p4d_t *p4d;
375593 pud_t *pud, *new_pud = NULL;
376594 pmd_t *pmd, *new_pmd = NULL;
377595 pte_t *ptep, *new_ptep = NULL;
378596 int ret;
379597
380598 /* Traverse the guest's 2nd-level tree, allocate new levels needed */
381
- pgd = kvm->arch.pgtable + pgd_index(gpa);
599
+ pgd = pgtable + pgd_index(gpa);
600
+ p4d = p4d_offset(pgd, gpa);
601
+
382602 pud = NULL;
383
- if (pgd_present(*pgd))
384
- pud = pud_offset(pgd, gpa);
603
+ if (p4d_present(*p4d))
604
+ pud = pud_offset(p4d, gpa);
385605 else
386606 new_pud = pud_alloc_one(kvm->mm, gpa);
387607
388608 pmd = NULL;
389
- if (pud && pud_present(*pud) && !pud_huge(*pud))
609
+ if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
390610 pmd = pmd_offset(pud, gpa);
391611 else if (level <= 1)
392612 new_pmd = kvmppc_pmd_alloc();
....@@ -402,14 +622,14 @@
402622
403623 /* Now traverse again under the lock and change the tree */
404624 ret = -ENOMEM;
405
- if (pgd_none(*pgd)) {
625
+ if (p4d_none(*p4d)) {
406626 if (!new_pud)
407627 goto out_unlock;
408
- pgd_populate(kvm->mm, pgd, new_pud);
628
+ p4d_populate(kvm->mm, p4d, new_pud);
409629 new_pud = NULL;
410630 }
411
- pud = pud_offset(pgd, gpa);
412
- if (pud_huge(*pud)) {
631
+ pud = pud_offset(p4d, gpa);
632
+ if (pud_is_leaf(*pud)) {
413633 unsigned long hgpa = gpa & PUD_MASK;
414634
415635 /* Check if we raced and someone else has set the same thing */
....@@ -435,7 +655,8 @@
435655 goto out_unlock;
436656 }
437657 /* Valid 1GB page here already, remove it */
438
- kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT);
658
+ kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL,
659
+ lpid);
439660 }
440661 if (level == 2) {
441662 if (!pud_none(*pud)) {
....@@ -444,9 +665,11 @@
444665 * install a large page, so remove and free the page
445666 * table page.
446667 */
447
- kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa);
668
+ kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid);
448669 }
449670 kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
671
+ if (rmapp && n_rmap)
672
+ kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
450673 ret = 0;
451674 goto out_unlock;
452675 }
....@@ -470,7 +693,7 @@
470693 WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) &
471694 PTE_BITS_MUST_MATCH);
472695 kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
473
- 0, pte_val(pte), lgpa, PMD_SHIFT);
696
+ 0, pte_val(pte), lgpa, PMD_SHIFT);
474697 ret = 0;
475698 goto out_unlock;
476699 }
....@@ -484,7 +707,8 @@
484707 goto out_unlock;
485708 }
486709 /* Valid 2MB page here already, remove it */
487
- kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT);
710
+ kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL,
711
+ lpid);
488712 }
489713 if (level == 1) {
490714 if (!pmd_none(*pmd)) {
....@@ -493,9 +717,11 @@
493717 * install a large page, so remove and free the page
494718 * table page.
495719 */
496
- kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa);
720
+ kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid);
497721 }
498722 kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
723
+ if (rmapp && n_rmap)
724
+ kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
499725 ret = 0;
500726 goto out_unlock;
501727 }
....@@ -520,6 +746,8 @@
520746 goto out_unlock;
521747 }
522748 kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
749
+ if (rmapp && n_rmap)
750
+ kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
523751 ret = 0;
524752
525753 out_unlock:
....@@ -533,95 +761,50 @@
533761 return ret;
534762 }
535763
536
-int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
537
- unsigned long ea, unsigned long dsisr)
764
+bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing,
765
+ unsigned long gpa, unsigned int lpid)
766
+{
767
+ unsigned long pgflags;
768
+ unsigned int shift;
769
+ pte_t *ptep;
770
+
771
+ /*
772
+ * Need to set an R or C bit in the 2nd-level tables;
773
+ * since we are just helping out the hardware here,
774
+ * it is sufficient to do what the hardware does.
775
+ */
776
+ pgflags = _PAGE_ACCESSED;
777
+ if (writing)
778
+ pgflags |= _PAGE_DIRTY;
779
+
780
+ if (nested)
781
+ ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
782
+ else
783
+ ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
784
+
785
+ if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) {
786
+ kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift);
787
+ return true;
788
+ }
789
+ return false;
790
+}
791
+
792
+int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
793
+ unsigned long gpa,
794
+ struct kvm_memory_slot *memslot,
795
+ bool writing, bool kvm_ro,
796
+ pte_t *inserted_pte, unsigned int *levelp)
538797 {
539798 struct kvm *kvm = vcpu->kvm;
540
- unsigned long mmu_seq;
541
- unsigned long gpa, gfn, hva;
542
- struct kvm_memory_slot *memslot;
543799 struct page *page = NULL;
544
- long ret;
545
- bool writing;
800
+ unsigned long mmu_seq;
801
+ unsigned long hva, gfn = gpa >> PAGE_SHIFT;
546802 bool upgrade_write = false;
547803 bool *upgrade_p = &upgrade_write;
548804 pte_t pte, *ptep;
549
- unsigned long pgflags;
550805 unsigned int shift, level;
551
-
552
- /* Check for unusual errors */
553
- if (dsisr & DSISR_UNSUPP_MMU) {
554
- pr_err("KVM: Got unsupported MMU fault\n");
555
- return -EFAULT;
556
- }
557
- if (dsisr & DSISR_BADACCESS) {
558
- /* Reflect to the guest as DSI */
559
- pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
560
- kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
561
- return RESUME_GUEST;
562
- }
563
-
564
- /* Translate the logical address and get the page */
565
- gpa = vcpu->arch.fault_gpa & ~0xfffUL;
566
- gpa &= ~0xF000000000000000ul;
567
- gfn = gpa >> PAGE_SHIFT;
568
- if (!(dsisr & DSISR_PRTABLE_FAULT))
569
- gpa |= ea & 0xfff;
570
- memslot = gfn_to_memslot(kvm, gfn);
571
-
572
- /* No memslot means it's an emulated MMIO region */
573
- if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
574
- if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
575
- DSISR_SET_RC)) {
576
- /*
577
- * Bad address in guest page table tree, or other
578
- * unusual error - reflect it to the guest as DSI.
579
- */
580
- kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
581
- return RESUME_GUEST;
582
- }
583
- return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
584
- dsisr & DSISR_ISSTORE);
585
- }
586
-
587
- writing = (dsisr & DSISR_ISSTORE) != 0;
588
- if (memslot->flags & KVM_MEM_READONLY) {
589
- if (writing) {
590
- /* give the guest a DSI */
591
- dsisr = DSISR_ISSTORE | DSISR_PROTFAULT;
592
- kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
593
- return RESUME_GUEST;
594
- }
595
- upgrade_p = NULL;
596
- }
597
-
598
- if (dsisr & DSISR_SET_RC) {
599
- /*
600
- * Need to set an R or C bit in the 2nd-level tables;
601
- * since we are just helping out the hardware here,
602
- * it is sufficient to do what the hardware does.
603
- */
604
- pgflags = _PAGE_ACCESSED;
605
- if (writing)
606
- pgflags |= _PAGE_DIRTY;
607
- /*
608
- * We are walking the secondary page table here. We can do this
609
- * without disabling irq.
610
- */
611
- spin_lock(&kvm->mmu_lock);
612
- ptep = __find_linux_pte(kvm->arch.pgtable,
613
- gpa, NULL, &shift);
614
- if (ptep && pte_present(*ptep) &&
615
- (!writing || pte_write(*ptep))) {
616
- kvmppc_radix_update_pte(kvm, ptep, 0, pgflags,
617
- gpa, shift);
618
- dsisr &= ~DSISR_SET_RC;
619
- }
620
- spin_unlock(&kvm->mmu_lock);
621
- if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
622
- DSISR_PROTFAULT | DSISR_SET_RC)))
623
- return RESUME_GUEST;
624
- }
806
+ int ret;
807
+ bool large_enable;
625808
626809 /* used to check for invalidations in progress */
627810 mmu_seq = kvm->mmu_notifier_seq;
....@@ -634,7 +817,7 @@
634817 * is that the page is writable.
635818 */
636819 hva = gfn_to_hva_memslot(memslot, gfn);
637
- if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
820
+ if (!kvm_ro && get_user_page_fast_only(hva, FOLL_WRITE, &page)) {
638821 upgrade_write = true;
639822 } else {
640823 unsigned long pfn;
....@@ -656,27 +839,31 @@
656839 * Read the PTE from the process' radix tree and use that
657840 * so we get the shift and attribute bits.
658841 */
659
- local_irq_disable();
660
- ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
842
+ spin_lock(&kvm->mmu_lock);
843
+ ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift);
844
+ pte = __pte(0);
845
+ if (ptep)
846
+ pte = READ_ONCE(*ptep);
847
+ spin_unlock(&kvm->mmu_lock);
661848 /*
662849 * If the PTE disappeared temporarily due to a THP
663850 * collapse, just return and let the guest try again.
664851 */
665
- if (!ptep) {
666
- local_irq_enable();
852
+ if (!pte_present(pte)) {
667853 if (page)
668854 put_page(page);
669855 return RESUME_GUEST;
670856 }
671
- pte = *ptep;
672
- local_irq_enable();
857
+
858
+ /* If we're logging dirty pages, always map single pages */
859
+ large_enable = !(memslot->flags & KVM_MEM_LOG_DIRTY_PAGES);
673860
674861 /* Get pte level from shift/size */
675
- if (shift == PUD_SHIFT &&
862
+ if (large_enable && shift == PUD_SHIFT &&
676863 (gpa & (PUD_SIZE - PAGE_SIZE)) ==
677864 (hva & (PUD_SIZE - PAGE_SIZE))) {
678865 level = 2;
679
- } else if (shift == PMD_SHIFT &&
866
+ } else if (large_enable && shift == PMD_SHIFT &&
680867 (gpa & (PMD_SIZE - PAGE_SIZE)) ==
681868 (hva & (PMD_SIZE - PAGE_SIZE))) {
682869 level = 1;
....@@ -702,7 +889,12 @@
702889 }
703890
704891 /* Allocate space in the tree and write the PTE */
705
- ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
892
+ ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level,
893
+ mmu_seq, kvm->arch.lpid, NULL, NULL);
894
+ if (inserted_pte)
895
+ *inserted_pte = pte;
896
+ if (levelp)
897
+ *levelp = level;
706898
707899 if (page) {
708900 if (!ret && (pte_val(pte) & _PAGE_WRITE))
....@@ -710,36 +902,119 @@
710902 put_page(page);
711903 }
712904
905
+ /* Increment number of large pages if we (successfully) inserted one */
906
+ if (!ret) {
907
+ if (level == 1)
908
+ kvm->stat.num_2M_pages++;
909
+ else if (level == 2)
910
+ kvm->stat.num_1G_pages++;
911
+ }
912
+
913
+ return ret;
914
+}
915
+
916
+int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu,
917
+ unsigned long ea, unsigned long dsisr)
918
+{
919
+ struct kvm *kvm = vcpu->kvm;
920
+ unsigned long gpa, gfn;
921
+ struct kvm_memory_slot *memslot;
922
+ long ret;
923
+ bool writing = !!(dsisr & DSISR_ISSTORE);
924
+ bool kvm_ro = false;
925
+
926
+ /* Check for unusual errors */
927
+ if (dsisr & DSISR_UNSUPP_MMU) {
928
+ pr_err("KVM: Got unsupported MMU fault\n");
929
+ return -EFAULT;
930
+ }
931
+ if (dsisr & DSISR_BADACCESS) {
932
+ /* Reflect to the guest as DSI */
933
+ pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
934
+ kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
935
+ return RESUME_GUEST;
936
+ }
937
+
938
+ /* Translate the logical address */
939
+ gpa = vcpu->arch.fault_gpa & ~0xfffUL;
940
+ gpa &= ~0xF000000000000000ul;
941
+ gfn = gpa >> PAGE_SHIFT;
942
+ if (!(dsisr & DSISR_PRTABLE_FAULT))
943
+ gpa |= ea & 0xfff;
944
+
945
+ if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
946
+ return kvmppc_send_page_to_uv(kvm, gfn);
947
+
948
+ /* Get the corresponding memslot */
949
+ memslot = gfn_to_memslot(kvm, gfn);
950
+
951
+ /* No memslot means it's an emulated MMIO region */
952
+ if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
953
+ if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
954
+ DSISR_SET_RC)) {
955
+ /*
956
+ * Bad address in guest page table tree, or other
957
+ * unusual error - reflect it to the guest as DSI.
958
+ */
959
+ kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
960
+ return RESUME_GUEST;
961
+ }
962
+ return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
963
+ }
964
+
965
+ if (memslot->flags & KVM_MEM_READONLY) {
966
+ if (writing) {
967
+ /* give the guest a DSI */
968
+ kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE |
969
+ DSISR_PROTFAULT);
970
+ return RESUME_GUEST;
971
+ }
972
+ kvm_ro = true;
973
+ }
974
+
975
+ /* Failed to set the reference/change bits */
976
+ if (dsisr & DSISR_SET_RC) {
977
+ spin_lock(&kvm->mmu_lock);
978
+ if (kvmppc_hv_handle_set_rc(kvm, false, writing,
979
+ gpa, kvm->arch.lpid))
980
+ dsisr &= ~DSISR_SET_RC;
981
+ spin_unlock(&kvm->mmu_lock);
982
+
983
+ if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
984
+ DSISR_PROTFAULT | DSISR_SET_RC)))
985
+ return RESUME_GUEST;
986
+ }
987
+
988
+ /* Try to insert a pte */
989
+ ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing,
990
+ kvm_ro, NULL, NULL);
991
+
713992 if (ret == 0 || ret == -EAGAIN)
714993 ret = RESUME_GUEST;
715994 return ret;
716995 }
717996
718
-/* Called with kvm->lock held */
997
+/* Called with kvm->mmu_lock held */
719998 int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
720999 unsigned long gfn)
7211000 {
7221001 pte_t *ptep;
7231002 unsigned long gpa = gfn << PAGE_SHIFT;
7241003 unsigned int shift;
725
- unsigned long old;
7261004
727
- ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
728
- if (ptep && pte_present(*ptep)) {
729
- old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0,
730
- gpa, shift);
731
- kvmppc_radix_tlbie_page(kvm, gpa, shift);
732
- if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
733
- unsigned long psize = PAGE_SIZE;
734
- if (shift)
735
- psize = 1ul << shift;
736
- kvmppc_update_dirty_map(memslot, gfn, psize);
737
- }
1005
+ if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) {
1006
+ uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT);
1007
+ return 0;
7381008 }
739
- return 0;
1009
+
1010
+ ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
1011
+ if (ptep && pte_present(*ptep))
1012
+ kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
1013
+ kvm->arch.lpid);
1014
+ return 0;
7401015 }
7411016
742
-/* Called with kvm->lock held */
1017
+/* Called with kvm->mmu_lock held */
7431018 int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
7441019 unsigned long gfn)
7451020 {
....@@ -747,18 +1022,27 @@
7471022 unsigned long gpa = gfn << PAGE_SHIFT;
7481023 unsigned int shift;
7491024 int ref = 0;
1025
+ unsigned long old, *rmapp;
7501026
751
- ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1027
+ if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1028
+ return ref;
1029
+
1030
+ ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
7521031 if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
753
- kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
754
- gpa, shift);
1032
+ old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
1033
+ gpa, shift);
7551034 /* XXX need to flush tlb here? */
1035
+ /* Also clear bit in ptes in shadow pgtable for nested guests */
1036
+ rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1037
+ kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0,
1038
+ old & PTE_RPN_MASK,
1039
+ 1UL << shift);
7561040 ref = 1;
7571041 }
7581042 return ref;
7591043 }
7601044
761
-/* Called with kvm->lock held */
1045
+/* Called with kvm->mmu_lock held */
7621046 int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
7631047 unsigned long gfn)
7641048 {
....@@ -767,7 +1051,10 @@
7671051 unsigned int shift;
7681052 int ref = 0;
7691053
770
- ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
1054
+ if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1055
+ return ref;
1056
+
1057
+ ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
7711058 if (ptep && pte_present(*ptep) && pte_young(*ptep))
7721059 ref = 1;
7731060 return ref;
....@@ -779,18 +1066,52 @@
7791066 {
7801067 unsigned long gfn = memslot->base_gfn + pagenum;
7811068 unsigned long gpa = gfn << PAGE_SHIFT;
782
- pte_t *ptep;
1069
+ pte_t *ptep, pte;
7831070 unsigned int shift;
7841071 int ret = 0;
1072
+ unsigned long old, *rmapp;
7851073
786
- ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
787
- if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
1074
+ if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1075
+ return ret;
1076
+
1077
+ /*
1078
+ * For performance reasons we don't hold kvm->mmu_lock while walking the
1079
+ * partition scoped table.
1080
+ */
1081
+ ptep = find_kvm_secondary_pte_unlocked(kvm, gpa, &shift);
1082
+ if (!ptep)
1083
+ return 0;
1084
+
1085
+ pte = READ_ONCE(*ptep);
1086
+ if (pte_present(pte) && pte_dirty(pte)) {
1087
+ spin_lock(&kvm->mmu_lock);
1088
+ /*
1089
+ * Recheck the pte again
1090
+ */
1091
+ if (pte_val(pte) != pte_val(*ptep)) {
1092
+ /*
1093
+ * We have KVM_MEM_LOG_DIRTY_PAGES enabled. Hence we can
1094
+ * only find PAGE_SIZE pte entries here. We can continue
1095
+ * to use the pte addr returned by above page table
1096
+ * walk.
1097
+ */
1098
+ if (!pte_present(*ptep) || !pte_dirty(*ptep)) {
1099
+ spin_unlock(&kvm->mmu_lock);
1100
+ return 0;
1101
+ }
1102
+ }
1103
+
7881104 ret = 1;
789
- if (shift)
790
- ret = 1 << (shift - PAGE_SHIFT);
791
- kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
792
- gpa, shift);
793
- kvmppc_radix_tlbie_page(kvm, gpa, shift);
1105
+ VM_BUG_ON(shift);
1106
+ old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
1107
+ gpa, shift);
1108
+ kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
1109
+ /* Also clear bit in ptes in shadow pgtable for nested guests */
1110
+ rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1111
+ kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_DIRTY, 0,
1112
+ old & PTE_RPN_MASK,
1113
+ 1UL << shift);
1114
+ spin_unlock(&kvm->mmu_lock);
7941115 }
7951116 return ret;
7961117 }
....@@ -818,6 +1139,37 @@
8181139 }
8191140 }
8201141 return 0;
1142
+}
1143
+
1144
+void kvmppc_radix_flush_memslot(struct kvm *kvm,
1145
+ const struct kvm_memory_slot *memslot)
1146
+{
1147
+ unsigned long n;
1148
+ pte_t *ptep;
1149
+ unsigned long gpa;
1150
+ unsigned int shift;
1151
+
1152
+ if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)
1153
+ kvmppc_uvmem_drop_pages(memslot, kvm, true);
1154
+
1155
+ if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
1156
+ return;
1157
+
1158
+ gpa = memslot->base_gfn << PAGE_SHIFT;
1159
+ spin_lock(&kvm->mmu_lock);
1160
+ for (n = memslot->npages; n; --n) {
1161
+ ptep = find_kvm_secondary_pte(kvm, gpa, &shift);
1162
+ if (ptep && pte_present(*ptep))
1163
+ kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
1164
+ kvm->arch.lpid);
1165
+ gpa += PAGE_SIZE;
1166
+ }
1167
+ /*
1168
+ * Increase the mmu notifier sequence number to prevent any page
1169
+ * fault that read the memslot earlier from writing a PTE.
1170
+ */
1171
+ kvm->mmu_notifier_seq++;
1172
+ spin_unlock(&kvm->mmu_lock);
8211173 }
8221174
8231175 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
....@@ -875,6 +1227,216 @@
8751227 memset(addr, 0, RADIX_PMD_TABLE_SIZE);
8761228 }
8771229
1230
+struct debugfs_radix_state {
1231
+ struct kvm *kvm;
1232
+ struct mutex mutex;
1233
+ unsigned long gpa;
1234
+ int lpid;
1235
+ int chars_left;
1236
+ int buf_index;
1237
+ char buf[128];
1238
+ u8 hdr;
1239
+};
1240
+
1241
+static int debugfs_radix_open(struct inode *inode, struct file *file)
1242
+{
1243
+ struct kvm *kvm = inode->i_private;
1244
+ struct debugfs_radix_state *p;
1245
+
1246
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
1247
+ if (!p)
1248
+ return -ENOMEM;
1249
+
1250
+ kvm_get_kvm(kvm);
1251
+ p->kvm = kvm;
1252
+ mutex_init(&p->mutex);
1253
+ file->private_data = p;
1254
+
1255
+ return nonseekable_open(inode, file);
1256
+}
1257
+
1258
+static int debugfs_radix_release(struct inode *inode, struct file *file)
1259
+{
1260
+ struct debugfs_radix_state *p = file->private_data;
1261
+
1262
+ kvm_put_kvm(p->kvm);
1263
+ kfree(p);
1264
+ return 0;
1265
+}
1266
+
1267
+static ssize_t debugfs_radix_read(struct file *file, char __user *buf,
1268
+ size_t len, loff_t *ppos)
1269
+{
1270
+ struct debugfs_radix_state *p = file->private_data;
1271
+ ssize_t ret, r;
1272
+ unsigned long n;
1273
+ struct kvm *kvm;
1274
+ unsigned long gpa;
1275
+ pgd_t *pgt;
1276
+ struct kvm_nested_guest *nested;
1277
+ pgd_t *pgdp;
1278
+ p4d_t p4d, *p4dp;
1279
+ pud_t pud, *pudp;
1280
+ pmd_t pmd, *pmdp;
1281
+ pte_t *ptep;
1282
+ int shift;
1283
+ unsigned long pte;
1284
+
1285
+ kvm = p->kvm;
1286
+ if (!kvm_is_radix(kvm))
1287
+ return 0;
1288
+
1289
+ ret = mutex_lock_interruptible(&p->mutex);
1290
+ if (ret)
1291
+ return ret;
1292
+
1293
+ if (p->chars_left) {
1294
+ n = p->chars_left;
1295
+ if (n > len)
1296
+ n = len;
1297
+ r = copy_to_user(buf, p->buf + p->buf_index, n);
1298
+ n -= r;
1299
+ p->chars_left -= n;
1300
+ p->buf_index += n;
1301
+ buf += n;
1302
+ len -= n;
1303
+ ret = n;
1304
+ if (r) {
1305
+ if (!n)
1306
+ ret = -EFAULT;
1307
+ goto out;
1308
+ }
1309
+ }
1310
+
1311
+ gpa = p->gpa;
1312
+ nested = NULL;
1313
+ pgt = NULL;
1314
+ while (len != 0 && p->lpid >= 0) {
1315
+ if (gpa >= RADIX_PGTABLE_RANGE) {
1316
+ gpa = 0;
1317
+ pgt = NULL;
1318
+ if (nested) {
1319
+ kvmhv_put_nested(nested);
1320
+ nested = NULL;
1321
+ }
1322
+ p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid);
1323
+ p->hdr = 0;
1324
+ if (p->lpid < 0)
1325
+ break;
1326
+ }
1327
+ if (!pgt) {
1328
+ if (p->lpid == 0) {
1329
+ pgt = kvm->arch.pgtable;
1330
+ } else {
1331
+ nested = kvmhv_get_nested(kvm, p->lpid, false);
1332
+ if (!nested) {
1333
+ gpa = RADIX_PGTABLE_RANGE;
1334
+ continue;
1335
+ }
1336
+ pgt = nested->shadow_pgtable;
1337
+ }
1338
+ }
1339
+ n = 0;
1340
+ if (!p->hdr) {
1341
+ if (p->lpid > 0)
1342
+ n = scnprintf(p->buf, sizeof(p->buf),
1343
+ "\nNested LPID %d: ", p->lpid);
1344
+ n += scnprintf(p->buf + n, sizeof(p->buf) - n,
1345
+ "pgdir: %lx\n", (unsigned long)pgt);
1346
+ p->hdr = 1;
1347
+ goto copy;
1348
+ }
1349
+
1350
+ pgdp = pgt + pgd_index(gpa);
1351
+ p4dp = p4d_offset(pgdp, gpa);
1352
+ p4d = READ_ONCE(*p4dp);
1353
+ if (!(p4d_val(p4d) & _PAGE_PRESENT)) {
1354
+ gpa = (gpa & P4D_MASK) + P4D_SIZE;
1355
+ continue;
1356
+ }
1357
+
1358
+ pudp = pud_offset(&p4d, gpa);
1359
+ pud = READ_ONCE(*pudp);
1360
+ if (!(pud_val(pud) & _PAGE_PRESENT)) {
1361
+ gpa = (gpa & PUD_MASK) + PUD_SIZE;
1362
+ continue;
1363
+ }
1364
+ if (pud_val(pud) & _PAGE_PTE) {
1365
+ pte = pud_val(pud);
1366
+ shift = PUD_SHIFT;
1367
+ goto leaf;
1368
+ }
1369
+
1370
+ pmdp = pmd_offset(&pud, gpa);
1371
+ pmd = READ_ONCE(*pmdp);
1372
+ if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
1373
+ gpa = (gpa & PMD_MASK) + PMD_SIZE;
1374
+ continue;
1375
+ }
1376
+ if (pmd_val(pmd) & _PAGE_PTE) {
1377
+ pte = pmd_val(pmd);
1378
+ shift = PMD_SHIFT;
1379
+ goto leaf;
1380
+ }
1381
+
1382
+ ptep = pte_offset_kernel(&pmd, gpa);
1383
+ pte = pte_val(READ_ONCE(*ptep));
1384
+ if (!(pte & _PAGE_PRESENT)) {
1385
+ gpa += PAGE_SIZE;
1386
+ continue;
1387
+ }
1388
+ shift = PAGE_SHIFT;
1389
+ leaf:
1390
+ n = scnprintf(p->buf, sizeof(p->buf),
1391
+ " %lx: %lx %d\n", gpa, pte, shift);
1392
+ gpa += 1ul << shift;
1393
+ copy:
1394
+ p->chars_left = n;
1395
+ if (n > len)
1396
+ n = len;
1397
+ r = copy_to_user(buf, p->buf, n);
1398
+ n -= r;
1399
+ p->chars_left -= n;
1400
+ p->buf_index = n;
1401
+ buf += n;
1402
+ len -= n;
1403
+ ret += n;
1404
+ if (r) {
1405
+ if (!ret)
1406
+ ret = -EFAULT;
1407
+ break;
1408
+ }
1409
+ }
1410
+ p->gpa = gpa;
1411
+ if (nested)
1412
+ kvmhv_put_nested(nested);
1413
+
1414
+ out:
1415
+ mutex_unlock(&p->mutex);
1416
+ return ret;
1417
+}
1418
+
1419
+static ssize_t debugfs_radix_write(struct file *file, const char __user *buf,
1420
+ size_t len, loff_t *ppos)
1421
+{
1422
+ return -EACCES;
1423
+}
1424
+
1425
+static const struct file_operations debugfs_radix_fops = {
1426
+ .owner = THIS_MODULE,
1427
+ .open = debugfs_radix_open,
1428
+ .release = debugfs_radix_release,
1429
+ .read = debugfs_radix_read,
1430
+ .write = debugfs_radix_write,
1431
+ .llseek = generic_file_llseek,
1432
+};
1433
+
1434
+void kvmhv_radix_debugfs_init(struct kvm *kvm)
1435
+{
1436
+ debugfs_create_file("radix", 0400, kvm->arch.debugfs_dir, kvm,
1437
+ &debugfs_radix_fops);
1438
+}
1439
+
8781440 int kvmppc_radix_init(void)
8791441 {
8801442 unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE;