.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2012 ARM Ltd. |
---|
3 | 4 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
---|
4 | | - * |
---|
5 | | - * This program is free software: you can redistribute it and/or modify |
---|
6 | | - * it under the terms of the GNU General Public License version 2 as |
---|
7 | | - * published by the Free Software Foundation. |
---|
8 | | - * |
---|
9 | | - * This program is distributed in the hope that it will be useful, |
---|
10 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
11 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
12 | | - * GNU General Public License for more details. |
---|
13 | | - * |
---|
14 | | - * You should have received a copy of the GNU General Public License |
---|
15 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
16 | 5 | */ |
---|
17 | 6 | |
---|
18 | 7 | #ifndef __ASM__VIRT_H |
---|
.. | .. |
---|
46 | 35 | */ |
---|
47 | 36 | #define HVC_RESET_VECTORS 2 |
---|
48 | 37 | |
---|
| 38 | +/* |
---|
| 39 | + * HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible |
---|
| 40 | + */ |
---|
| 41 | +#define HVC_VHE_RESTART 3 |
---|
| 42 | + |
---|
49 | 43 | /* Max number of HYP stub hypercalls */ |
---|
50 | | -#define HVC_STUB_HCALL_NR 3 |
---|
| 44 | +#define HVC_STUB_HCALL_NR 4 |
---|
51 | 45 | |
---|
52 | 46 | /* Error returned when an invalid stub number is passed into x0 */ |
---|
53 | 47 | #define HVC_STUB_ERR 0xbadca11 |
---|
.. | .. |
---|
76 | 70 | void __hyp_set_vectors(phys_addr_t phys_vector_base); |
---|
77 | 71 | void __hyp_reset_vectors(void); |
---|
78 | 72 | |
---|
| 73 | +DECLARE_STATIC_KEY_FALSE(kvm_protected_mode_initialized); |
---|
| 74 | + |
---|
79 | 75 | /* Reports the availability of HYP mode */ |
---|
80 | 76 | static inline bool is_hyp_mode_available(void) |
---|
81 | 77 | { |
---|
| 78 | + /* |
---|
| 79 | + * If KVM protected mode is initialized, all CPUs must have been booted |
---|
| 80 | + * in EL2. Avoid checking __boot_cpu_mode as CPUs now come up in EL1. |
---|
| 81 | + */ |
---|
| 82 | + if (IS_ENABLED(CONFIG_KVM) && |
---|
| 83 | + static_branch_likely(&kvm_protected_mode_initialized)) |
---|
| 84 | + return true; |
---|
| 85 | + |
---|
82 | 86 | return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 && |
---|
83 | 87 | __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2); |
---|
84 | 88 | } |
---|
.. | .. |
---|
86 | 90 | /* Check if the bootloader has booted CPUs in different modes */ |
---|
87 | 91 | static inline bool is_hyp_mode_mismatched(void) |
---|
88 | 92 | { |
---|
| 93 | + /* |
---|
| 94 | + * If KVM protected mode is initialized, all CPUs must have been booted |
---|
| 95 | + * in EL2. Avoid checking __boot_cpu_mode as CPUs now come up in EL1. |
---|
| 96 | + */ |
---|
| 97 | + if (IS_ENABLED(CONFIG_KVM) && |
---|
| 98 | + static_branch_likely(&kvm_protected_mode_initialized)) |
---|
| 99 | + return false; |
---|
| 100 | + |
---|
89 | 101 | return __boot_cpu_mode[0] != __boot_cpu_mode[1]; |
---|
90 | 102 | } |
---|
91 | 103 | |
---|
.. | .. |
---|
94 | 106 | return read_sysreg(CurrentEL) == CurrentEL_EL2; |
---|
95 | 107 | } |
---|
96 | 108 | |
---|
97 | | -static inline bool has_vhe(void) |
---|
| 109 | +static __always_inline bool has_vhe(void) |
---|
98 | 110 | { |
---|
99 | | - if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN)) |
---|
| 111 | + /* |
---|
| 112 | + * Code only run in VHE/NVHE hyp context can assume VHE is present or |
---|
| 113 | + * absent. Otherwise fall back to caps. |
---|
| 114 | + */ |
---|
| 115 | + if (is_vhe_hyp_code()) |
---|
100 | 116 | return true; |
---|
| 117 | + else if (is_nvhe_hyp_code()) |
---|
| 118 | + return false; |
---|
| 119 | + else |
---|
| 120 | + return cpus_have_final_cap(ARM64_HAS_VIRT_HOST_EXTN); |
---|
| 121 | +} |
---|
101 | 122 | |
---|
102 | | - return false; |
---|
| 123 | +static __always_inline bool is_protected_kvm_enabled(void) |
---|
| 124 | +{ |
---|
| 125 | + if (is_vhe_hyp_code()) |
---|
| 126 | + return false; |
---|
| 127 | + else |
---|
| 128 | + return cpus_have_final_cap(ARM64_KVM_PROTECTED_MODE); |
---|
103 | 129 | } |
---|
104 | 130 | |
---|
105 | 131 | #endif /* __ASSEMBLY__ */ |
---|