| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 3 | | - * it under the terms of the GNU General Public License, version 2, as |
|---|
| 4 | | - * published by the Free Software Foundation. |
|---|
| 5 | | - * |
|---|
| 6 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 7 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 8 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 9 | | - * GNU General Public License for more details. |
|---|
| 10 | | - * |
|---|
| 11 | | - * You should have received a copy of the GNU General Public License |
|---|
| 12 | | - * along with this program; if not, write to the Free Software |
|---|
| 13 | | - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
|---|
| 14 | 3 | * |
|---|
| 15 | 4 | * Copyright SUSE Linux Products GmbH 2010 |
|---|
| 16 | 5 | * |
|---|
| .. | .. |
|---|
| 23 | 12 | #include <linux/string.h> |
|---|
| 24 | 13 | #include <asm/bitops.h> |
|---|
| 25 | 14 | #include <asm/book3s/64/mmu-hash.h> |
|---|
| 15 | +#include <asm/cpu_has_feature.h> |
|---|
| 16 | +#include <asm/ppc-opcode.h> |
|---|
| 17 | +#include <asm/pte-walk.h> |
|---|
| 18 | + |
|---|
| 19 | +#ifdef CONFIG_PPC_PSERIES |
|---|
| 20 | +static inline bool kvmhv_on_pseries(void) |
|---|
| 21 | +{ |
|---|
| 22 | + return !cpu_has_feature(CPU_FTR_HVMODE); |
|---|
| 23 | +} |
|---|
| 24 | +#else |
|---|
| 25 | +static inline bool kvmhv_on_pseries(void) |
|---|
| 26 | +{ |
|---|
| 27 | + return false; |
|---|
| 28 | +} |
|---|
| 29 | +#endif |
|---|
| 30 | + |
|---|
| 31 | +/* |
|---|
| 32 | + * Structure for a nested guest, that is, for a guest that is managed by |
|---|
| 33 | + * one of our guests. |
|---|
| 34 | + */ |
|---|
| 35 | +struct kvm_nested_guest { |
|---|
| 36 | + struct kvm *l1_host; /* L1 VM that owns this nested guest */ |
|---|
| 37 | + int l1_lpid; /* lpid L1 guest thinks this guest is */ |
|---|
| 38 | + int shadow_lpid; /* real lpid of this nested guest */ |
|---|
| 39 | + pgd_t *shadow_pgtable; /* our page table for this guest */ |
|---|
| 40 | + u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */ |
|---|
| 41 | + u64 process_table; /* process table entry for this guest */ |
|---|
| 42 | + long refcnt; /* number of pointers to this struct */ |
|---|
| 43 | + struct mutex tlb_lock; /* serialize page faults and tlbies */ |
|---|
| 44 | + struct kvm_nested_guest *next; |
|---|
| 45 | + cpumask_t need_tlb_flush; |
|---|
| 46 | + cpumask_t cpu_in_guest; |
|---|
| 47 | + short prev_cpu[NR_CPUS]; |
|---|
| 48 | + u8 radix; /* is this nested guest radix */ |
|---|
| 49 | +}; |
|---|
| 50 | + |
|---|
| 51 | +/* |
|---|
| 52 | + * We define a nested rmap entry as a single 64-bit quantity |
|---|
| 53 | + * 0xFFF0000000000000 12-bit lpid field |
|---|
| 54 | + * 0x000FFFFFFFFFF000 40-bit guest 4k page frame number |
|---|
| 55 | + * 0x0000000000000001 1-bit single entry flag |
|---|
| 56 | + */ |
|---|
| 57 | +#define RMAP_NESTED_LPID_MASK 0xFFF0000000000000UL |
|---|
| 58 | +#define RMAP_NESTED_LPID_SHIFT (52) |
|---|
| 59 | +#define RMAP_NESTED_GPA_MASK 0x000FFFFFFFFFF000UL |
|---|
| 60 | +#define RMAP_NESTED_IS_SINGLE_ENTRY 0x0000000000000001UL |
|---|
| 61 | + |
|---|
| 62 | +/* Structure for a nested guest rmap entry */ |
|---|
| 63 | +struct rmap_nested { |
|---|
| 64 | + struct llist_node list; |
|---|
| 65 | + u64 rmap; |
|---|
| 66 | +}; |
|---|
| 67 | + |
|---|
| 68 | +/* |
|---|
| 69 | + * for_each_nest_rmap_safe - iterate over the list of nested rmap entries |
|---|
| 70 | + * safe against removal of the list entry or NULL list |
|---|
| 71 | + * @pos: a (struct rmap_nested *) to use as a loop cursor |
|---|
| 72 | + * @node: pointer to the first entry |
|---|
| 73 | + * NOTE: this can be NULL |
|---|
| 74 | + * @rmapp: an (unsigned long *) in which to return the rmap entries on each |
|---|
| 75 | + * iteration |
|---|
| 76 | + * NOTE: this must point to already allocated memory |
|---|
| 77 | + * |
|---|
| 78 | + * The nested_rmap is a llist of (struct rmap_nested) entries pointed to by the |
|---|
| 79 | + * rmap entry in the memslot. The list is always terminated by a "single entry" |
|---|
| 80 | + * stored in the list element of the final entry of the llist. If there is ONLY |
|---|
| 81 | + * a single entry then this is itself in the rmap entry of the memslot, not a |
|---|
| 82 | + * llist head pointer. |
|---|
| 83 | + * |
|---|
| 84 | + * Note that the iterator below assumes that a nested rmap entry is always |
|---|
| 85 | + * non-zero. This is true for our usage because the LPID field is always |
|---|
| 86 | + * non-zero (zero is reserved for the host). |
|---|
| 87 | + * |
|---|
| 88 | + * This should be used to iterate over the list of rmap_nested entries with |
|---|
| 89 | + * processing done on the u64 rmap value given by each iteration. This is safe |
|---|
| 90 | + * against removal of list entries and it is always safe to call free on (pos). |
|---|
| 91 | + * |
|---|
| 92 | + * e.g. |
|---|
| 93 | + * struct rmap_nested *cursor; |
|---|
| 94 | + * struct llist_node *first; |
|---|
| 95 | + * unsigned long rmap; |
|---|
| 96 | + * for_each_nest_rmap_safe(cursor, first, &rmap) { |
|---|
| 97 | + * do_something(rmap); |
|---|
| 98 | + * free(cursor); |
|---|
| 99 | + * } |
|---|
| 100 | + */ |
|---|
| 101 | +#define for_each_nest_rmap_safe(pos, node, rmapp) \ |
|---|
| 102 | + for ((pos) = llist_entry((node), typeof(*(pos)), list); \ |
|---|
| 103 | + (node) && \ |
|---|
| 104 | + (*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \ |
|---|
| 105 | + ((u64) (node)) : ((pos)->rmap))) && \ |
|---|
| 106 | + (((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \ |
|---|
| 107 | + ((struct llist_node *) ((pos) = NULL)) : \ |
|---|
| 108 | + (pos)->list.next)), true); \ |
|---|
| 109 | + (pos) = llist_entry((node), typeof(*(pos)), list)) |
|---|
| 110 | + |
|---|
| 111 | +struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid, |
|---|
| 112 | + bool create); |
|---|
| 113 | +void kvmhv_put_nested(struct kvm_nested_guest *gp); |
|---|
| 114 | +int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid); |
|---|
| 115 | + |
|---|
| 116 | +/* Encoding of first parameter for H_TLB_INVALIDATE */ |
|---|
| 117 | +#define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \ |
|---|
| 118 | + ___PPC_R(r)) |
|---|
| 26 | 119 | |
|---|
| 27 | 120 | /* Power architecture requires HPT is at least 256kiB, at most 64TiB */ |
|---|
| 28 | 121 | #define PPC_MIN_HPT_ORDER 18 |
|---|
| .. | .. |
|---|
| 46 | 139 | static inline bool kvm_is_radix(struct kvm *kvm) |
|---|
| 47 | 140 | { |
|---|
| 48 | 141 | return kvm->arch.radix; |
|---|
| 142 | +} |
|---|
| 143 | + |
|---|
| 144 | +static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu) |
|---|
| 145 | +{ |
|---|
| 146 | + bool radix; |
|---|
| 147 | + |
|---|
| 148 | + if (vcpu->arch.nested) |
|---|
| 149 | + radix = vcpu->arch.nested->radix; |
|---|
| 150 | + else |
|---|
| 151 | + radix = kvm_is_radix(vcpu->kvm); |
|---|
| 152 | + |
|---|
| 153 | + return radix; |
|---|
| 49 | 154 | } |
|---|
| 50 | 155 | |
|---|
| 51 | 156 | #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ |
|---|
| .. | .. |
|---|
| 330 | 435 | continue; |
|---|
| 331 | 436 | } |
|---|
| 332 | 437 | /* If pte is not present return None */ |
|---|
| 333 | | - if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT))) |
|---|
| 438 | + if (unlikely(!pte_present(old_pte))) |
|---|
| 334 | 439 | return __pte(0); |
|---|
| 335 | 440 | |
|---|
| 336 | 441 | new_pte = pte_mkyoung(old_pte); |
|---|
| .. | .. |
|---|
| 431 | 536 | */ |
|---|
| 432 | 537 | static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm) |
|---|
| 433 | 538 | { |
|---|
| 434 | | - return rcu_dereference_raw_notrace(kvm->memslots[0]); |
|---|
| 539 | + return rcu_dereference_raw_check(kvm->memslots[0]); |
|---|
| 435 | 540 | } |
|---|
| 436 | 541 | |
|---|
| 437 | 542 | extern void kvmppc_mmu_debugfs_init(struct kvm *kvm); |
|---|
| 543 | +extern void kvmhv_radix_debugfs_init(struct kvm *kvm); |
|---|
| 438 | 544 | |
|---|
| 439 | 545 | extern void kvmhv_rm_send_ipi(int cpu); |
|---|
| 440 | 546 | |
|---|
| .. | .. |
|---|
| 515 | 621 | } |
|---|
| 516 | 622 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
|---|
| 517 | 623 | |
|---|
| 624 | +extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, |
|---|
| 625 | + unsigned long gpa, unsigned int level, |
|---|
| 626 | + unsigned long mmu_seq, unsigned int lpid, |
|---|
| 627 | + unsigned long *rmapp, struct rmap_nested **n_rmap); |
|---|
| 628 | +extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp, |
|---|
| 629 | + struct rmap_nested **n_rmap); |
|---|
| 630 | +extern void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp, |
|---|
| 631 | + unsigned long clr, unsigned long set, |
|---|
| 632 | + unsigned long hpa, unsigned long nbytes); |
|---|
| 633 | +extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm, |
|---|
| 634 | + const struct kvm_memory_slot *memslot, |
|---|
| 635 | + unsigned long gpa, unsigned long hpa, |
|---|
| 636 | + unsigned long nbytes); |
|---|
| 637 | + |
|---|
| 638 | +static inline pte_t * |
|---|
| 639 | +find_kvm_secondary_pte_unlocked(struct kvm *kvm, unsigned long ea, |
|---|
| 640 | + unsigned *hshift) |
|---|
| 641 | +{ |
|---|
| 642 | + pte_t *pte; |
|---|
| 643 | + |
|---|
| 644 | + pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift); |
|---|
| 645 | + return pte; |
|---|
| 646 | +} |
|---|
| 647 | + |
|---|
| 648 | +static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea, |
|---|
| 649 | + unsigned *hshift) |
|---|
| 650 | +{ |
|---|
| 651 | + pte_t *pte; |
|---|
| 652 | + |
|---|
| 653 | + VM_WARN(!spin_is_locked(&kvm->mmu_lock), |
|---|
| 654 | + "%s called with kvm mmu_lock not held \n", __func__); |
|---|
| 655 | + pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift); |
|---|
| 656 | + |
|---|
| 657 | + return pte; |
|---|
| 658 | +} |
|---|
| 659 | + |
|---|
| 660 | +static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq, |
|---|
| 661 | + unsigned long ea, unsigned *hshift) |
|---|
| 662 | +{ |
|---|
| 663 | + pte_t *pte; |
|---|
| 664 | + |
|---|
| 665 | + VM_WARN(!spin_is_locked(&kvm->mmu_lock), |
|---|
| 666 | + "%s called with kvm mmu_lock not held \n", __func__); |
|---|
| 667 | + |
|---|
| 668 | + if (mmu_notifier_retry(kvm, mmu_seq)) |
|---|
| 669 | + return NULL; |
|---|
| 670 | + |
|---|
| 671 | + pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift); |
|---|
| 672 | + |
|---|
| 673 | + return pte; |
|---|
| 674 | +} |
|---|
| 675 | + |
|---|
| 676 | +extern pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid, |
|---|
| 677 | + unsigned long ea, unsigned *hshift); |
|---|
| 678 | + |
|---|
| 518 | 679 | #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ |
|---|
| 519 | 680 | |
|---|
| 520 | 681 | #endif /* __ASM_KVM_BOOK3S_64_H__ */ |
|---|