forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-09-20 cf4ce59b3b70238352c7f1729f0f7223214828ad
kernel/arch/powerpc/include/asm/kvm_book3s_64.h
....@@ -1,16 +1,5 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
2
- * This program is free software; you can redistribute it and/or modify
3
- * it under the terms of the GNU General Public License, version 2, as
4
- * published by the Free Software Foundation.
5
- *
6
- * This program is distributed in the hope that it will be useful,
7
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
8
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9
- * GNU General Public License for more details.
10
- *
11
- * You should have received a copy of the GNU General Public License
12
- * along with this program; if not, write to the Free Software
13
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
143 *
154 * Copyright SUSE Linux Products GmbH 2010
165 *
....@@ -23,6 +12,110 @@
2312 #include <linux/string.h>
2413 #include <asm/bitops.h>
2514 #include <asm/book3s/64/mmu-hash.h>
15
+#include <asm/cpu_has_feature.h>
16
+#include <asm/ppc-opcode.h>
17
+#include <asm/pte-walk.h>
18
+
19
+#ifdef CONFIG_PPC_PSERIES
20
+static inline bool kvmhv_on_pseries(void)
21
+{
22
+ return !cpu_has_feature(CPU_FTR_HVMODE);
23
+}
24
+#else
25
+static inline bool kvmhv_on_pseries(void)
26
+{
27
+ return false;
28
+}
29
+#endif
30
+
31
+/*
32
+ * Structure for a nested guest, that is, for a guest that is managed by
33
+ * one of our guests.
34
+ */
35
+struct kvm_nested_guest {
36
+ struct kvm *l1_host; /* L1 VM that owns this nested guest */
37
+ int l1_lpid; /* lpid L1 guest thinks this guest is */
38
+ int shadow_lpid; /* real lpid of this nested guest */
39
+ pgd_t *shadow_pgtable; /* our page table for this guest */
40
+ u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */
41
+ u64 process_table; /* process table entry for this guest */
42
+ long refcnt; /* number of pointers to this struct */
43
+ struct mutex tlb_lock; /* serialize page faults and tlbies */
44
+ struct kvm_nested_guest *next;
45
+ cpumask_t need_tlb_flush;
46
+ cpumask_t cpu_in_guest;
47
+ short prev_cpu[NR_CPUS];
48
+ u8 radix; /* is this nested guest radix */
49
+};
50
+
51
+/*
52
+ * We define a nested rmap entry as a single 64-bit quantity
53
+ * 0xFFF0000000000000 12-bit lpid field
54
+ * 0x000FFFFFFFFFF000 40-bit guest 4k page frame number
55
+ * 0x0000000000000001 1-bit single entry flag
56
+ */
57
+#define RMAP_NESTED_LPID_MASK 0xFFF0000000000000UL
58
+#define RMAP_NESTED_LPID_SHIFT (52)
59
+#define RMAP_NESTED_GPA_MASK 0x000FFFFFFFFFF000UL
60
+#define RMAP_NESTED_IS_SINGLE_ENTRY 0x0000000000000001UL
61
+
62
+/* Structure for a nested guest rmap entry */
63
+struct rmap_nested {
64
+ struct llist_node list;
65
+ u64 rmap;
66
+};
67
+
68
+/*
69
+ * for_each_nest_rmap_safe - iterate over the list of nested rmap entries
70
+ * safe against removal of the list entry or NULL list
71
+ * @pos: a (struct rmap_nested *) to use as a loop cursor
72
+ * @node: pointer to the first entry
73
+ * NOTE: this can be NULL
74
+ * @rmapp: an (unsigned long *) in which to return the rmap entries on each
75
+ * iteration
76
+ * NOTE: this must point to already allocated memory
77
+ *
78
+ * The nested_rmap is a llist of (struct rmap_nested) entries pointed to by the
79
+ * rmap entry in the memslot. The list is always terminated by a "single entry"
80
+ * stored in the list element of the final entry of the llist. If there is ONLY
81
+ * a single entry then this is itself in the rmap entry of the memslot, not a
82
+ * llist head pointer.
83
+ *
84
+ * Note that the iterator below assumes that a nested rmap entry is always
85
+ * non-zero. This is true for our usage because the LPID field is always
86
+ * non-zero (zero is reserved for the host).
87
+ *
88
+ * This should be used to iterate over the list of rmap_nested entries with
89
+ * processing done on the u64 rmap value given by each iteration. This is safe
90
+ * against removal of list entries and it is always safe to call free on (pos).
91
+ *
92
+ * e.g.
93
+ * struct rmap_nested *cursor;
94
+ * struct llist_node *first;
95
+ * unsigned long rmap;
96
+ * for_each_nest_rmap_safe(cursor, first, &rmap) {
97
+ * do_something(rmap);
98
+ * free(cursor);
99
+ * }
100
+ */
101
+#define for_each_nest_rmap_safe(pos, node, rmapp) \
102
+ for ((pos) = llist_entry((node), typeof(*(pos)), list); \
103
+ (node) && \
104
+ (*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
105
+ ((u64) (node)) : ((pos)->rmap))) && \
106
+ (((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
107
+ ((struct llist_node *) ((pos) = NULL)) : \
108
+ (pos)->list.next)), true); \
109
+ (pos) = llist_entry((node), typeof(*(pos)), list))
110
+
111
+struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
112
+ bool create);
113
+void kvmhv_put_nested(struct kvm_nested_guest *gp);
114
+int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid);
115
+
116
+/* Encoding of first parameter for H_TLB_INVALIDATE */
117
+#define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \
118
+ ___PPC_R(r))
26119
27120 /* Power architecture requires HPT is at least 256kiB, at most 64TiB */
28121 #define PPC_MIN_HPT_ORDER 18
....@@ -46,6 +139,18 @@
46139 static inline bool kvm_is_radix(struct kvm *kvm)
47140 {
48141 return kvm->arch.radix;
142
+}
143
+
144
+static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
145
+{
146
+ bool radix;
147
+
148
+ if (vcpu->arch.nested)
149
+ radix = vcpu->arch.nested->radix;
150
+ else
151
+ radix = kvm_is_radix(vcpu->kvm);
152
+
153
+ return radix;
49154 }
50155
51156 #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
....@@ -330,7 +435,7 @@
330435 continue;
331436 }
332437 /* If pte is not present return None */
333
- if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
438
+ if (unlikely(!pte_present(old_pte)))
334439 return __pte(0);
335440
336441 new_pte = pte_mkyoung(old_pte);
....@@ -431,10 +536,11 @@
431536 */
432537 static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
433538 {
434
- return rcu_dereference_raw_notrace(kvm->memslots[0]);
539
+ return rcu_dereference_raw_check(kvm->memslots[0]);
435540 }
436541
437542 extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
543
+extern void kvmhv_radix_debugfs_init(struct kvm *kvm);
438544
439545 extern void kvmhv_rm_send_ipi(int cpu);
440546
....@@ -515,6 +621,61 @@
515621 }
516622 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
517623
624
+extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
625
+ unsigned long gpa, unsigned int level,
626
+ unsigned long mmu_seq, unsigned int lpid,
627
+ unsigned long *rmapp, struct rmap_nested **n_rmap);
628
+extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
629
+ struct rmap_nested **n_rmap);
630
+extern void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
631
+ unsigned long clr, unsigned long set,
632
+ unsigned long hpa, unsigned long nbytes);
633
+extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
634
+ const struct kvm_memory_slot *memslot,
635
+ unsigned long gpa, unsigned long hpa,
636
+ unsigned long nbytes);
637
+
638
+static inline pte_t *
639
+find_kvm_secondary_pte_unlocked(struct kvm *kvm, unsigned long ea,
640
+ unsigned *hshift)
641
+{
642
+ pte_t *pte;
643
+
644
+ pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
645
+ return pte;
646
+}
647
+
648
+static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea,
649
+ unsigned *hshift)
650
+{
651
+ pte_t *pte;
652
+
653
+ VM_WARN(!spin_is_locked(&kvm->mmu_lock),
654
+ "%s called with kvm mmu_lock not held \n", __func__);
655
+ pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
656
+
657
+ return pte;
658
+}
659
+
660
+static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
661
+ unsigned long ea, unsigned *hshift)
662
+{
663
+ pte_t *pte;
664
+
665
+ VM_WARN(!spin_is_locked(&kvm->mmu_lock),
666
+ "%s called with kvm mmu_lock not held \n", __func__);
667
+
668
+ if (mmu_notifier_retry(kvm, mmu_seq))
669
+ return NULL;
670
+
671
+ pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift);
672
+
673
+ return pte;
674
+}
675
+
676
+extern pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
677
+ unsigned long ea, unsigned *hshift);
678
+
518679 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
519680
520681 #endif /* __ASM_KVM_BOOK3S_64_H__ */