.. | .. |
---|
2 | 2 | /* |
---|
3 | 3 | * hosting IBM Z kernel virtual machines (s390x) |
---|
4 | 4 | * |
---|
5 | | - * Copyright IBM Corp. 2008, 2018 |
---|
| 5 | + * Copyright IBM Corp. 2008, 2020 |
---|
6 | 6 | * |
---|
7 | 7 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
---|
8 | 8 | * Christian Borntraeger <borntraeger@de.ibm.com> |
---|
.. | .. |
---|
10 | 10 | * Christian Ehrhardt <ehrhardt@de.ibm.com> |
---|
11 | 11 | * Jason J. Herne <jjherne@us.ibm.com> |
---|
12 | 12 | */ |
---|
| 13 | + |
---|
| 14 | +#define KMSG_COMPONENT "kvm-s390" |
---|
| 15 | +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
---|
13 | 16 | |
---|
14 | 17 | #include <linux/compiler.h> |
---|
15 | 18 | #include <linux/err.h> |
---|
.. | .. |
---|
28 | 31 | #include <linux/bitmap.h> |
---|
29 | 32 | #include <linux/sched/signal.h> |
---|
30 | 33 | #include <linux/string.h> |
---|
| 34 | +#include <linux/pgtable.h> |
---|
31 | 35 | |
---|
32 | 36 | #include <asm/asm-offsets.h> |
---|
33 | 37 | #include <asm/lowcore.h> |
---|
34 | 38 | #include <asm/stp.h> |
---|
35 | | -#include <asm/pgtable.h> |
---|
36 | 39 | #include <asm/gmap.h> |
---|
37 | 40 | #include <asm/nmi.h> |
---|
38 | 41 | #include <asm/switch_to.h> |
---|
.. | .. |
---|
40 | 43 | #include <asm/sclp.h> |
---|
41 | 44 | #include <asm/cpacf.h> |
---|
42 | 45 | #include <asm/timex.h> |
---|
| 46 | +#include <asm/ap.h> |
---|
| 47 | +#include <asm/uv.h> |
---|
43 | 48 | #include "kvm-s390.h" |
---|
44 | 49 | #include "gaccess.h" |
---|
45 | | - |
---|
46 | | -#define KMSG_COMPONENT "kvm-s390" |
---|
47 | | -#undef pr_fmt |
---|
48 | | -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
---|
49 | 50 | |
---|
50 | 51 | #define CREATE_TRACE_POINTS |
---|
51 | 52 | #include "trace.h" |
---|
.. | .. |
---|
56 | 57 | #define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \ |
---|
57 | 58 | (KVM_MAX_VCPUS + LOCAL_IRQS)) |
---|
58 | 59 | |
---|
59 | | -#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU |
---|
60 | | -#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM |
---|
61 | | - |
---|
62 | 60 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
---|
63 | | - { "userspace_handled", VCPU_STAT(exit_userspace) }, |
---|
64 | | - { "exit_null", VCPU_STAT(exit_null) }, |
---|
65 | | - { "exit_validity", VCPU_STAT(exit_validity) }, |
---|
66 | | - { "exit_stop_request", VCPU_STAT(exit_stop_request) }, |
---|
67 | | - { "exit_external_request", VCPU_STAT(exit_external_request) }, |
---|
68 | | - { "exit_io_request", VCPU_STAT(exit_io_request) }, |
---|
69 | | - { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) }, |
---|
70 | | - { "exit_instruction", VCPU_STAT(exit_instruction) }, |
---|
71 | | - { "exit_pei", VCPU_STAT(exit_pei) }, |
---|
72 | | - { "exit_program_interruption", VCPU_STAT(exit_program_interruption) }, |
---|
73 | | - { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) }, |
---|
74 | | - { "exit_operation_exception", VCPU_STAT(exit_operation_exception) }, |
---|
75 | | - { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, |
---|
76 | | - { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) }, |
---|
77 | | - { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) }, |
---|
78 | | - { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
---|
79 | | - { "instruction_lctlg", VCPU_STAT(instruction_lctlg) }, |
---|
80 | | - { "instruction_lctl", VCPU_STAT(instruction_lctl) }, |
---|
81 | | - { "instruction_stctl", VCPU_STAT(instruction_stctl) }, |
---|
82 | | - { "instruction_stctg", VCPU_STAT(instruction_stctg) }, |
---|
83 | | - { "deliver_ckc", VCPU_STAT(deliver_ckc) }, |
---|
84 | | - { "deliver_cputm", VCPU_STAT(deliver_cputm) }, |
---|
85 | | - { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) }, |
---|
86 | | - { "deliver_external_call", VCPU_STAT(deliver_external_call) }, |
---|
87 | | - { "deliver_service_signal", VCPU_STAT(deliver_service_signal) }, |
---|
88 | | - { "deliver_virtio", VCPU_STAT(deliver_virtio) }, |
---|
89 | | - { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) }, |
---|
90 | | - { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) }, |
---|
91 | | - { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) }, |
---|
92 | | - { "deliver_program", VCPU_STAT(deliver_program) }, |
---|
93 | | - { "deliver_io", VCPU_STAT(deliver_io) }, |
---|
94 | | - { "deliver_machine_check", VCPU_STAT(deliver_machine_check) }, |
---|
95 | | - { "exit_wait_state", VCPU_STAT(exit_wait_state) }, |
---|
96 | | - { "inject_ckc", VCPU_STAT(inject_ckc) }, |
---|
97 | | - { "inject_cputm", VCPU_STAT(inject_cputm) }, |
---|
98 | | - { "inject_external_call", VCPU_STAT(inject_external_call) }, |
---|
99 | | - { "inject_float_mchk", VM_STAT(inject_float_mchk) }, |
---|
100 | | - { "inject_emergency_signal", VCPU_STAT(inject_emergency_signal) }, |
---|
101 | | - { "inject_io", VM_STAT(inject_io) }, |
---|
102 | | - { "inject_mchk", VCPU_STAT(inject_mchk) }, |
---|
103 | | - { "inject_pfault_done", VM_STAT(inject_pfault_done) }, |
---|
104 | | - { "inject_program", VCPU_STAT(inject_program) }, |
---|
105 | | - { "inject_restart", VCPU_STAT(inject_restart) }, |
---|
106 | | - { "inject_service_signal", VM_STAT(inject_service_signal) }, |
---|
107 | | - { "inject_set_prefix", VCPU_STAT(inject_set_prefix) }, |
---|
108 | | - { "inject_stop_signal", VCPU_STAT(inject_stop_signal) }, |
---|
109 | | - { "inject_pfault_init", VCPU_STAT(inject_pfault_init) }, |
---|
110 | | - { "inject_virtio", VM_STAT(inject_virtio) }, |
---|
111 | | - { "instruction_epsw", VCPU_STAT(instruction_epsw) }, |
---|
112 | | - { "instruction_gs", VCPU_STAT(instruction_gs) }, |
---|
113 | | - { "instruction_io_other", VCPU_STAT(instruction_io_other) }, |
---|
114 | | - { "instruction_lpsw", VCPU_STAT(instruction_lpsw) }, |
---|
115 | | - { "instruction_lpswe", VCPU_STAT(instruction_lpswe) }, |
---|
116 | | - { "instruction_pfmf", VCPU_STAT(instruction_pfmf) }, |
---|
117 | | - { "instruction_ptff", VCPU_STAT(instruction_ptff) }, |
---|
118 | | - { "instruction_stidp", VCPU_STAT(instruction_stidp) }, |
---|
119 | | - { "instruction_sck", VCPU_STAT(instruction_sck) }, |
---|
120 | | - { "instruction_sckpf", VCPU_STAT(instruction_sckpf) }, |
---|
121 | | - { "instruction_spx", VCPU_STAT(instruction_spx) }, |
---|
122 | | - { "instruction_stpx", VCPU_STAT(instruction_stpx) }, |
---|
123 | | - { "instruction_stap", VCPU_STAT(instruction_stap) }, |
---|
124 | | - { "instruction_iske", VCPU_STAT(instruction_iske) }, |
---|
125 | | - { "instruction_ri", VCPU_STAT(instruction_ri) }, |
---|
126 | | - { "instruction_rrbe", VCPU_STAT(instruction_rrbe) }, |
---|
127 | | - { "instruction_sske", VCPU_STAT(instruction_sske) }, |
---|
128 | | - { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) }, |
---|
129 | | - { "instruction_essa", VCPU_STAT(instruction_essa) }, |
---|
130 | | - { "instruction_stsi", VCPU_STAT(instruction_stsi) }, |
---|
131 | | - { "instruction_stfl", VCPU_STAT(instruction_stfl) }, |
---|
132 | | - { "instruction_tb", VCPU_STAT(instruction_tb) }, |
---|
133 | | - { "instruction_tpi", VCPU_STAT(instruction_tpi) }, |
---|
134 | | - { "instruction_tprot", VCPU_STAT(instruction_tprot) }, |
---|
135 | | - { "instruction_tsch", VCPU_STAT(instruction_tsch) }, |
---|
136 | | - { "instruction_sthyi", VCPU_STAT(instruction_sthyi) }, |
---|
137 | | - { "instruction_sie", VCPU_STAT(instruction_sie) }, |
---|
138 | | - { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) }, |
---|
139 | | - { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) }, |
---|
140 | | - { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) }, |
---|
141 | | - { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) }, |
---|
142 | | - { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) }, |
---|
143 | | - { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) }, |
---|
144 | | - { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) }, |
---|
145 | | - { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) }, |
---|
146 | | - { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) }, |
---|
147 | | - { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) }, |
---|
148 | | - { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) }, |
---|
149 | | - { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) }, |
---|
150 | | - { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, |
---|
151 | | - { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) }, |
---|
152 | | - { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) }, |
---|
153 | | - { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) }, |
---|
154 | | - { "instruction_diag_10", VCPU_STAT(diagnose_10) }, |
---|
155 | | - { "instruction_diag_44", VCPU_STAT(diagnose_44) }, |
---|
156 | | - { "instruction_diag_9c", VCPU_STAT(diagnose_9c) }, |
---|
157 | | - { "instruction_diag_258", VCPU_STAT(diagnose_258) }, |
---|
158 | | - { "instruction_diag_308", VCPU_STAT(diagnose_308) }, |
---|
159 | | - { "instruction_diag_500", VCPU_STAT(diagnose_500) }, |
---|
160 | | - { "instruction_diag_other", VCPU_STAT(diagnose_other) }, |
---|
| 61 | + VCPU_STAT("userspace_handled", exit_userspace), |
---|
| 62 | + VCPU_STAT("exit_null", exit_null), |
---|
| 63 | + VCPU_STAT("exit_validity", exit_validity), |
---|
| 64 | + VCPU_STAT("exit_stop_request", exit_stop_request), |
---|
| 65 | + VCPU_STAT("exit_external_request", exit_external_request), |
---|
| 66 | + VCPU_STAT("exit_io_request", exit_io_request), |
---|
| 67 | + VCPU_STAT("exit_external_interrupt", exit_external_interrupt), |
---|
| 68 | + VCPU_STAT("exit_instruction", exit_instruction), |
---|
| 69 | + VCPU_STAT("exit_pei", exit_pei), |
---|
| 70 | + VCPU_STAT("exit_program_interruption", exit_program_interruption), |
---|
| 71 | + VCPU_STAT("exit_instr_and_program_int", exit_instr_and_program), |
---|
| 72 | + VCPU_STAT("exit_operation_exception", exit_operation_exception), |
---|
| 73 | + VCPU_STAT("halt_successful_poll", halt_successful_poll), |
---|
| 74 | + VCPU_STAT("halt_attempted_poll", halt_attempted_poll), |
---|
| 75 | + VCPU_STAT("halt_poll_invalid", halt_poll_invalid), |
---|
| 76 | + VCPU_STAT("halt_no_poll_steal", halt_no_poll_steal), |
---|
| 77 | + VCPU_STAT("halt_wakeup", halt_wakeup), |
---|
| 78 | + VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns), |
---|
| 79 | + VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns), |
---|
| 80 | + VCPU_STAT("instruction_lctlg", instruction_lctlg), |
---|
| 81 | + VCPU_STAT("instruction_lctl", instruction_lctl), |
---|
| 82 | + VCPU_STAT("instruction_stctl", instruction_stctl), |
---|
| 83 | + VCPU_STAT("instruction_stctg", instruction_stctg), |
---|
| 84 | + VCPU_STAT("deliver_ckc", deliver_ckc), |
---|
| 85 | + VCPU_STAT("deliver_cputm", deliver_cputm), |
---|
| 86 | + VCPU_STAT("deliver_emergency_signal", deliver_emergency_signal), |
---|
| 87 | + VCPU_STAT("deliver_external_call", deliver_external_call), |
---|
| 88 | + VCPU_STAT("deliver_service_signal", deliver_service_signal), |
---|
| 89 | + VCPU_STAT("deliver_virtio", deliver_virtio), |
---|
| 90 | + VCPU_STAT("deliver_stop_signal", deliver_stop_signal), |
---|
| 91 | + VCPU_STAT("deliver_prefix_signal", deliver_prefix_signal), |
---|
| 92 | + VCPU_STAT("deliver_restart_signal", deliver_restart_signal), |
---|
| 93 | + VCPU_STAT("deliver_program", deliver_program), |
---|
| 94 | + VCPU_STAT("deliver_io", deliver_io), |
---|
| 95 | + VCPU_STAT("deliver_machine_check", deliver_machine_check), |
---|
| 96 | + VCPU_STAT("exit_wait_state", exit_wait_state), |
---|
| 97 | + VCPU_STAT("inject_ckc", inject_ckc), |
---|
| 98 | + VCPU_STAT("inject_cputm", inject_cputm), |
---|
| 99 | + VCPU_STAT("inject_external_call", inject_external_call), |
---|
| 100 | + VM_STAT("inject_float_mchk", inject_float_mchk), |
---|
| 101 | + VCPU_STAT("inject_emergency_signal", inject_emergency_signal), |
---|
| 102 | + VM_STAT("inject_io", inject_io), |
---|
| 103 | + VCPU_STAT("inject_mchk", inject_mchk), |
---|
| 104 | + VM_STAT("inject_pfault_done", inject_pfault_done), |
---|
| 105 | + VCPU_STAT("inject_program", inject_program), |
---|
| 106 | + VCPU_STAT("inject_restart", inject_restart), |
---|
| 107 | + VM_STAT("inject_service_signal", inject_service_signal), |
---|
| 108 | + VCPU_STAT("inject_set_prefix", inject_set_prefix), |
---|
| 109 | + VCPU_STAT("inject_stop_signal", inject_stop_signal), |
---|
| 110 | + VCPU_STAT("inject_pfault_init", inject_pfault_init), |
---|
| 111 | + VM_STAT("inject_virtio", inject_virtio), |
---|
| 112 | + VCPU_STAT("instruction_epsw", instruction_epsw), |
---|
| 113 | + VCPU_STAT("instruction_gs", instruction_gs), |
---|
| 114 | + VCPU_STAT("instruction_io_other", instruction_io_other), |
---|
| 115 | + VCPU_STAT("instruction_lpsw", instruction_lpsw), |
---|
| 116 | + VCPU_STAT("instruction_lpswe", instruction_lpswe), |
---|
| 117 | + VCPU_STAT("instruction_pfmf", instruction_pfmf), |
---|
| 118 | + VCPU_STAT("instruction_ptff", instruction_ptff), |
---|
| 119 | + VCPU_STAT("instruction_stidp", instruction_stidp), |
---|
| 120 | + VCPU_STAT("instruction_sck", instruction_sck), |
---|
| 121 | + VCPU_STAT("instruction_sckpf", instruction_sckpf), |
---|
| 122 | + VCPU_STAT("instruction_spx", instruction_spx), |
---|
| 123 | + VCPU_STAT("instruction_stpx", instruction_stpx), |
---|
| 124 | + VCPU_STAT("instruction_stap", instruction_stap), |
---|
| 125 | + VCPU_STAT("instruction_iske", instruction_iske), |
---|
| 126 | + VCPU_STAT("instruction_ri", instruction_ri), |
---|
| 127 | + VCPU_STAT("instruction_rrbe", instruction_rrbe), |
---|
| 128 | + VCPU_STAT("instruction_sske", instruction_sske), |
---|
| 129 | + VCPU_STAT("instruction_ipte_interlock", instruction_ipte_interlock), |
---|
| 130 | + VCPU_STAT("instruction_essa", instruction_essa), |
---|
| 131 | + VCPU_STAT("instruction_stsi", instruction_stsi), |
---|
| 132 | + VCPU_STAT("instruction_stfl", instruction_stfl), |
---|
| 133 | + VCPU_STAT("instruction_tb", instruction_tb), |
---|
| 134 | + VCPU_STAT("instruction_tpi", instruction_tpi), |
---|
| 135 | + VCPU_STAT("instruction_tprot", instruction_tprot), |
---|
| 136 | + VCPU_STAT("instruction_tsch", instruction_tsch), |
---|
| 137 | + VCPU_STAT("instruction_sthyi", instruction_sthyi), |
---|
| 138 | + VCPU_STAT("instruction_sie", instruction_sie), |
---|
| 139 | + VCPU_STAT("instruction_sigp_sense", instruction_sigp_sense), |
---|
| 140 | + VCPU_STAT("instruction_sigp_sense_running", instruction_sigp_sense_running), |
---|
| 141 | + VCPU_STAT("instruction_sigp_external_call", instruction_sigp_external_call), |
---|
| 142 | + VCPU_STAT("instruction_sigp_emergency", instruction_sigp_emergency), |
---|
| 143 | + VCPU_STAT("instruction_sigp_cond_emergency", instruction_sigp_cond_emergency), |
---|
| 144 | + VCPU_STAT("instruction_sigp_start", instruction_sigp_start), |
---|
| 145 | + VCPU_STAT("instruction_sigp_stop", instruction_sigp_stop), |
---|
| 146 | + VCPU_STAT("instruction_sigp_stop_store_status", instruction_sigp_stop_store_status), |
---|
| 147 | + VCPU_STAT("instruction_sigp_store_status", instruction_sigp_store_status), |
---|
| 148 | + VCPU_STAT("instruction_sigp_store_adtl_status", instruction_sigp_store_adtl_status), |
---|
| 149 | + VCPU_STAT("instruction_sigp_set_arch", instruction_sigp_arch), |
---|
| 150 | + VCPU_STAT("instruction_sigp_set_prefix", instruction_sigp_prefix), |
---|
| 151 | + VCPU_STAT("instruction_sigp_restart", instruction_sigp_restart), |
---|
| 152 | + VCPU_STAT("instruction_sigp_cpu_reset", instruction_sigp_cpu_reset), |
---|
| 153 | + VCPU_STAT("instruction_sigp_init_cpu_reset", instruction_sigp_init_cpu_reset), |
---|
| 154 | + VCPU_STAT("instruction_sigp_unknown", instruction_sigp_unknown), |
---|
| 155 | + VCPU_STAT("instruction_diag_10", diagnose_10), |
---|
| 156 | + VCPU_STAT("instruction_diag_44", diagnose_44), |
---|
| 157 | + VCPU_STAT("instruction_diag_9c", diagnose_9c), |
---|
| 158 | + VCPU_STAT("diag_9c_ignored", diagnose_9c_ignored), |
---|
| 159 | + VCPU_STAT("instruction_diag_258", diagnose_258), |
---|
| 160 | + VCPU_STAT("instruction_diag_308", diagnose_308), |
---|
| 161 | + VCPU_STAT("instruction_diag_500", diagnose_500), |
---|
| 162 | + VCPU_STAT("instruction_diag_other", diagnose_other), |
---|
161 | 163 | { NULL } |
---|
162 | 164 | }; |
---|
163 | 165 | |
---|
.. | .. |
---|
176 | 178 | static int hpage; |
---|
177 | 179 | module_param(hpage, int, 0444); |
---|
178 | 180 | MODULE_PARM_DESC(hpage, "1m huge page backing support"); |
---|
| 181 | + |
---|
| 182 | +/* maximum percentage of steal time for polling. >100 is treated like 100 */ |
---|
| 183 | +static u8 halt_poll_max_steal = 10; |
---|
| 184 | +module_param(halt_poll_max_steal, byte, 0644); |
---|
| 185 | +MODULE_PARM_DESC(halt_poll_max_steal, "Maximum percentage of steal time to allow polling"); |
---|
| 186 | + |
---|
| 187 | +/* if set to true, the GISA will be initialized and used if available */ |
---|
| 188 | +static bool use_gisa = true; |
---|
| 189 | +module_param(use_gisa, bool, 0644); |
---|
| 190 | +MODULE_PARM_DESC(use_gisa, "Use the GISA if the host supports it."); |
---|
179 | 191 | |
---|
180 | 192 | /* |
---|
181 | 193 | * For now we handle at most 16 double words as this is what the s390 base |
---|
.. | .. |
---|
213 | 225 | static struct gmap_notifier gmap_notifier; |
---|
214 | 226 | static struct gmap_notifier vsie_gmap_notifier; |
---|
215 | 227 | debug_info_t *kvm_s390_dbf; |
---|
| 228 | +debug_info_t *kvm_s390_dbf_uv; |
---|
216 | 229 | |
---|
217 | 230 | /* Section: not file related */ |
---|
218 | 231 | int kvm_arch_hardware_enable(void) |
---|
.. | .. |
---|
221 | 234 | return 0; |
---|
222 | 235 | } |
---|
223 | 236 | |
---|
| 237 | +int kvm_arch_check_processor_compat(void *opaque) |
---|
| 238 | +{ |
---|
| 239 | + return 0; |
---|
| 240 | +} |
---|
| 241 | + |
---|
| 242 | +/* forward declarations */ |
---|
224 | 243 | static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start, |
---|
225 | 244 | unsigned long end); |
---|
| 245 | +static int sca_switch_to_extended(struct kvm *kvm); |
---|
226 | 246 | |
---|
227 | 247 | static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta) |
---|
228 | 248 | { |
---|
.. | .. |
---|
281 | 301 | .notifier_call = kvm_clock_sync, |
---|
282 | 302 | }; |
---|
283 | 303 | |
---|
284 | | -int kvm_arch_hardware_setup(void) |
---|
| 304 | +int kvm_arch_hardware_setup(void *opaque) |
---|
285 | 305 | { |
---|
286 | 306 | gmap_notifier.notifier_call = kvm_gmap_notifier; |
---|
287 | 307 | gmap_register_pte_notifier(&gmap_notifier); |
---|
.. | .. |
---|
307 | 327 | |
---|
308 | 328 | static inline int plo_test_bit(unsigned char nr) |
---|
309 | 329 | { |
---|
310 | | - register unsigned long r0 asm("0") = (unsigned long) nr | 0x100; |
---|
| 330 | + unsigned long function = (unsigned long)nr | 0x100; |
---|
311 | 331 | int cc; |
---|
312 | 332 | |
---|
313 | 333 | asm volatile( |
---|
| 334 | + " lgr 0,%[function]\n" |
---|
314 | 335 | /* Parameter registers are ignored for "test bit" */ |
---|
315 | 336 | " plo 0,0,0,0(0)\n" |
---|
316 | 337 | " ipm %0\n" |
---|
317 | 338 | " srl %0,28\n" |
---|
318 | 339 | : "=d" (cc) |
---|
319 | | - : "d" (r0) |
---|
320 | | - : "cc"); |
---|
| 340 | + : [function] "d" (function) |
---|
| 341 | + : "cc", "0"); |
---|
321 | 342 | return cc == 0; |
---|
322 | 343 | } |
---|
| 344 | + |
---|
| 345 | +static __always_inline void __insn32_query(unsigned int opcode, u8 *query) |
---|
| 346 | +{ |
---|
| 347 | + asm volatile( |
---|
| 348 | + " lghi 0,0\n" |
---|
| 349 | + " lgr 1,%[query]\n" |
---|
| 350 | + /* Parameter registers are ignored */ |
---|
| 351 | + " .insn rrf,%[opc] << 16,2,4,6,0\n" |
---|
| 352 | + : |
---|
| 353 | + : [query] "d" ((unsigned long)query), [opc] "i" (opcode) |
---|
| 354 | + : "cc", "memory", "0", "1"); |
---|
| 355 | +} |
---|
| 356 | + |
---|
| 357 | +#define INSN_SORTL 0xb938 |
---|
| 358 | +#define INSN_DFLTCC 0xb939 |
---|
323 | 359 | |
---|
324 | 360 | static void kvm_s390_cpu_feat_init(void) |
---|
325 | 361 | { |
---|
.. | .. |
---|
367 | 403 | if (test_facility(146)) /* MSA8 */ |
---|
368 | 404 | __cpacf_query(CPACF_KMA, (cpacf_mask_t *) |
---|
369 | 405 | kvm_s390_available_subfunc.kma); |
---|
| 406 | + |
---|
| 407 | + if (test_facility(155)) /* MSA9 */ |
---|
| 408 | + __cpacf_query(CPACF_KDSA, (cpacf_mask_t *) |
---|
| 409 | + kvm_s390_available_subfunc.kdsa); |
---|
| 410 | + |
---|
| 411 | + if (test_facility(150)) /* SORTL */ |
---|
| 412 | + __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl); |
---|
| 413 | + |
---|
| 414 | + if (test_facility(151)) /* DFLTCC */ |
---|
| 415 | + __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc); |
---|
370 | 416 | |
---|
371 | 417 | if (MACHINE_HAS_ESOP) |
---|
372 | 418 | allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP); |
---|
.. | .. |
---|
416 | 462 | |
---|
417 | 463 | int kvm_arch_init(void *opaque) |
---|
418 | 464 | { |
---|
419 | | - int rc; |
---|
| 465 | + int rc = -ENOMEM; |
---|
420 | 466 | |
---|
421 | 467 | kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long)); |
---|
422 | 468 | if (!kvm_s390_dbf) |
---|
423 | 469 | return -ENOMEM; |
---|
424 | 470 | |
---|
425 | | - if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) { |
---|
426 | | - rc = -ENOMEM; |
---|
427 | | - goto out_debug_unreg; |
---|
428 | | - } |
---|
| 471 | + kvm_s390_dbf_uv = debug_register("kvm-uv", 32, 1, 7 * sizeof(long)); |
---|
| 472 | + if (!kvm_s390_dbf_uv) |
---|
| 473 | + goto out; |
---|
| 474 | + |
---|
| 475 | + if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view) || |
---|
| 476 | + debug_register_view(kvm_s390_dbf_uv, &debug_sprintf_view)) |
---|
| 477 | + goto out; |
---|
429 | 478 | |
---|
430 | 479 | kvm_s390_cpu_feat_init(); |
---|
431 | 480 | |
---|
432 | 481 | /* Register floating interrupt controller interface. */ |
---|
433 | 482 | rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC); |
---|
434 | 483 | if (rc) { |
---|
435 | | - pr_err("Failed to register FLIC rc=%d\n", rc); |
---|
436 | | - goto out_debug_unreg; |
---|
| 484 | + pr_err("A FLIC registration call failed with rc=%d\n", rc); |
---|
| 485 | + goto out; |
---|
437 | 486 | } |
---|
| 487 | + |
---|
| 488 | + rc = kvm_s390_gib_init(GAL_ISC); |
---|
| 489 | + if (rc) |
---|
| 490 | + goto out; |
---|
| 491 | + |
---|
438 | 492 | return 0; |
---|
439 | 493 | |
---|
440 | | -out_debug_unreg: |
---|
441 | | - debug_unregister(kvm_s390_dbf); |
---|
| 494 | +out: |
---|
| 495 | + kvm_arch_exit(); |
---|
442 | 496 | return rc; |
---|
443 | 497 | } |
---|
444 | 498 | |
---|
445 | 499 | void kvm_arch_exit(void) |
---|
446 | 500 | { |
---|
| 501 | + kvm_s390_gib_destroy(); |
---|
447 | 502 | debug_unregister(kvm_s390_dbf); |
---|
| 503 | + debug_unregister(kvm_s390_dbf_uv); |
---|
448 | 504 | } |
---|
449 | 505 | |
---|
450 | 506 | /* Section: device related */ |
---|
.. | .. |
---|
474 | 530 | case KVM_CAP_S390_CSS_SUPPORT: |
---|
475 | 531 | case KVM_CAP_IOEVENTFD: |
---|
476 | 532 | case KVM_CAP_DEVICE_CTRL: |
---|
477 | | - case KVM_CAP_ENABLE_CAP_VM: |
---|
478 | 533 | case KVM_CAP_S390_IRQCHIP: |
---|
479 | 534 | case KVM_CAP_VM_ATTRIBUTES: |
---|
480 | 535 | case KVM_CAP_MP_STATE: |
---|
.. | .. |
---|
488 | 543 | case KVM_CAP_S390_CMMA_MIGRATION: |
---|
489 | 544 | case KVM_CAP_S390_AIS: |
---|
490 | 545 | case KVM_CAP_S390_AIS_MIGRATION: |
---|
| 546 | + case KVM_CAP_S390_VCPU_RESETS: |
---|
| 547 | + case KVM_CAP_SET_GUEST_DEBUG: |
---|
| 548 | + case KVM_CAP_S390_DIAG318: |
---|
491 | 549 | r = 1; |
---|
492 | 550 | break; |
---|
493 | 551 | case KVM_CAP_S390_HPAGE_1M: |
---|
.. | .. |
---|
507 | 565 | else if (sclp.has_esca && sclp.has_64bscao) |
---|
508 | 566 | r = KVM_S390_ESCA_CPU_SLOTS; |
---|
509 | 567 | break; |
---|
510 | | - case KVM_CAP_NR_MEMSLOTS: |
---|
511 | | - r = KVM_USER_MEM_SLOTS; |
---|
512 | | - break; |
---|
513 | 568 | case KVM_CAP_S390_COW: |
---|
514 | 569 | r = MACHINE_HAS_ESOP; |
---|
515 | 570 | break; |
---|
.. | .. |
---|
525 | 580 | case KVM_CAP_S390_BPB: |
---|
526 | 581 | r = test_facility(82); |
---|
527 | 582 | break; |
---|
| 583 | + case KVM_CAP_S390_PROTECTED: |
---|
| 584 | + r = is_prot_virt_host(); |
---|
| 585 | + break; |
---|
528 | 586 | default: |
---|
529 | 587 | r = 0; |
---|
530 | 588 | } |
---|
531 | 589 | return r; |
---|
532 | 590 | } |
---|
533 | 591 | |
---|
534 | | -static void kvm_s390_sync_dirty_log(struct kvm *kvm, |
---|
535 | | - struct kvm_memory_slot *memslot) |
---|
| 592 | +void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) |
---|
536 | 593 | { |
---|
537 | 594 | int i; |
---|
538 | 595 | gfn_t cur_gfn, last_gfn; |
---|
.. | .. |
---|
573 | 630 | { |
---|
574 | 631 | int r; |
---|
575 | 632 | unsigned long n; |
---|
576 | | - struct kvm_memslots *slots; |
---|
577 | 633 | struct kvm_memory_slot *memslot; |
---|
578 | | - int is_dirty = 0; |
---|
| 634 | + int is_dirty; |
---|
579 | 635 | |
---|
580 | 636 | if (kvm_is_ucontrol(kvm)) |
---|
581 | 637 | return -EINVAL; |
---|
.. | .. |
---|
586 | 642 | if (log->slot >= KVM_USER_MEM_SLOTS) |
---|
587 | 643 | goto out; |
---|
588 | 644 | |
---|
589 | | - slots = kvm_memslots(kvm); |
---|
590 | | - memslot = id_to_memslot(slots, log->slot); |
---|
591 | | - r = -ENOENT; |
---|
592 | | - if (!memslot->dirty_bitmap) |
---|
593 | | - goto out; |
---|
594 | | - |
---|
595 | | - kvm_s390_sync_dirty_log(kvm, memslot); |
---|
596 | | - r = kvm_get_dirty_log(kvm, log, &is_dirty); |
---|
| 645 | + r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot); |
---|
597 | 646 | if (r) |
---|
598 | 647 | goto out; |
---|
599 | 648 | |
---|
.. | .. |
---|
618 | 667 | } |
---|
619 | 668 | } |
---|
620 | 669 | |
---|
621 | | -static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) |
---|
| 670 | +int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap) |
---|
622 | 671 | { |
---|
623 | 672 | int r; |
---|
624 | 673 | |
---|
.. | .. |
---|
650 | 699 | if (test_facility(135)) { |
---|
651 | 700 | set_kvm_facility(kvm->arch.model.fac_mask, 135); |
---|
652 | 701 | set_kvm_facility(kvm->arch.model.fac_list, 135); |
---|
| 702 | + } |
---|
| 703 | + if (test_facility(148)) { |
---|
| 704 | + set_kvm_facility(kvm->arch.model.fac_mask, 148); |
---|
| 705 | + set_kvm_facility(kvm->arch.model.fac_list, 148); |
---|
| 706 | + } |
---|
| 707 | + if (test_facility(152)) { |
---|
| 708 | + set_kvm_facility(kvm->arch.model.fac_mask, 152); |
---|
| 709 | + set_kvm_facility(kvm->arch.model.fac_list, 152); |
---|
653 | 710 | } |
---|
654 | 711 | r = 0; |
---|
655 | 712 | } else |
---|
.. | .. |
---|
707 | 764 | r = -EINVAL; |
---|
708 | 765 | else { |
---|
709 | 766 | r = 0; |
---|
710 | | - down_write(&kvm->mm->mmap_sem); |
---|
| 767 | + mmap_write_lock(kvm->mm); |
---|
711 | 768 | kvm->mm->context.allow_gmap_hpage_1m = 1; |
---|
712 | | - up_write(&kvm->mm->mmap_sem); |
---|
| 769 | + mmap_write_unlock(kvm->mm); |
---|
713 | 770 | /* |
---|
714 | 771 | * We might have to create fake 4k page |
---|
715 | 772 | * tables. To avoid that the hardware works on |
---|
.. | .. |
---|
856 | 913 | |
---|
857 | 914 | kvm_s390_vcpu_block_all(kvm); |
---|
858 | 915 | |
---|
859 | | - kvm_for_each_vcpu(i, vcpu, kvm) |
---|
| 916 | + kvm_for_each_vcpu(i, vcpu, kvm) { |
---|
860 | 917 | kvm_s390_vcpu_crypto_setup(vcpu); |
---|
| 918 | + /* recreate the shadow crycb by leaving the VSIE handler */ |
---|
| 919 | + kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu); |
---|
| 920 | + } |
---|
861 | 921 | |
---|
862 | 922 | kvm_s390_vcpu_unblock_all(kvm); |
---|
863 | 923 | } |
---|
864 | 924 | |
---|
865 | 925 | static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr) |
---|
866 | 926 | { |
---|
867 | | - if (!test_kvm_facility(kvm, 76)) |
---|
868 | | - return -EINVAL; |
---|
869 | | - |
---|
870 | 927 | mutex_lock(&kvm->lock); |
---|
871 | 928 | switch (attr->attr) { |
---|
872 | 929 | case KVM_S390_VM_CRYPTO_ENABLE_AES_KW: |
---|
| 930 | + if (!test_kvm_facility(kvm, 76)) { |
---|
| 931 | + mutex_unlock(&kvm->lock); |
---|
| 932 | + return -EINVAL; |
---|
| 933 | + } |
---|
873 | 934 | get_random_bytes( |
---|
874 | 935 | kvm->arch.crypto.crycb->aes_wrapping_key_mask, |
---|
875 | 936 | sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); |
---|
.. | .. |
---|
877 | 938 | VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support"); |
---|
878 | 939 | break; |
---|
879 | 940 | case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW: |
---|
| 941 | + if (!test_kvm_facility(kvm, 76)) { |
---|
| 942 | + mutex_unlock(&kvm->lock); |
---|
| 943 | + return -EINVAL; |
---|
| 944 | + } |
---|
880 | 945 | get_random_bytes( |
---|
881 | 946 | kvm->arch.crypto.crycb->dea_wrapping_key_mask, |
---|
882 | 947 | sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); |
---|
.. | .. |
---|
884 | 949 | VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support"); |
---|
885 | 950 | break; |
---|
886 | 951 | case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: |
---|
| 952 | + if (!test_kvm_facility(kvm, 76)) { |
---|
| 953 | + mutex_unlock(&kvm->lock); |
---|
| 954 | + return -EINVAL; |
---|
| 955 | + } |
---|
887 | 956 | kvm->arch.crypto.aes_kw = 0; |
---|
888 | 957 | memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0, |
---|
889 | 958 | sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask)); |
---|
890 | 959 | VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support"); |
---|
891 | 960 | break; |
---|
892 | 961 | case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: |
---|
| 962 | + if (!test_kvm_facility(kvm, 76)) { |
---|
| 963 | + mutex_unlock(&kvm->lock); |
---|
| 964 | + return -EINVAL; |
---|
| 965 | + } |
---|
893 | 966 | kvm->arch.crypto.dea_kw = 0; |
---|
894 | 967 | memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0, |
---|
895 | 968 | sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask)); |
---|
896 | 969 | VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support"); |
---|
| 970 | + break; |
---|
| 971 | + case KVM_S390_VM_CRYPTO_ENABLE_APIE: |
---|
| 972 | + if (!ap_instructions_available()) { |
---|
| 973 | + mutex_unlock(&kvm->lock); |
---|
| 974 | + return -EOPNOTSUPP; |
---|
| 975 | + } |
---|
| 976 | + kvm->arch.crypto.apie = 1; |
---|
| 977 | + break; |
---|
| 978 | + case KVM_S390_VM_CRYPTO_DISABLE_APIE: |
---|
| 979 | + if (!ap_instructions_available()) { |
---|
| 980 | + mutex_unlock(&kvm->lock); |
---|
| 981 | + return -EOPNOTSUPP; |
---|
| 982 | + } |
---|
| 983 | + kvm->arch.crypto.apie = 0; |
---|
897 | 984 | break; |
---|
898 | 985 | default: |
---|
899 | 986 | mutex_unlock(&kvm->lock); |
---|
.. | .. |
---|
1005 | 1092 | return 0; |
---|
1006 | 1093 | } |
---|
1007 | 1094 | |
---|
| 1095 | +static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod); |
---|
| 1096 | + |
---|
1008 | 1097 | static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr) |
---|
1009 | 1098 | { |
---|
1010 | 1099 | struct kvm_s390_vm_tod_clock gtod; |
---|
.. | .. |
---|
1014 | 1103 | |
---|
1015 | 1104 | if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx) |
---|
1016 | 1105 | return -EINVAL; |
---|
1017 | | - kvm_s390_set_tod_clock(kvm, >od); |
---|
| 1106 | + __kvm_s390_set_tod_clock(kvm, >od); |
---|
1018 | 1107 | |
---|
1019 | 1108 | VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx", |
---|
1020 | 1109 | gtod.epoch_idx, gtod.tod); |
---|
.. | .. |
---|
1045 | 1134 | sizeof(gtod.tod))) |
---|
1046 | 1135 | return -EFAULT; |
---|
1047 | 1136 | |
---|
1048 | | - kvm_s390_set_tod_clock(kvm, >od); |
---|
| 1137 | + __kvm_s390_set_tod_clock(kvm, >od); |
---|
1049 | 1138 | VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod); |
---|
1050 | 1139 | return 0; |
---|
1051 | 1140 | } |
---|
.. | .. |
---|
1056 | 1145 | |
---|
1057 | 1146 | if (attr->flags) |
---|
1058 | 1147 | return -EINVAL; |
---|
| 1148 | + |
---|
| 1149 | + mutex_lock(&kvm->lock); |
---|
| 1150 | + /* |
---|
| 1151 | + * For protected guests, the TOD is managed by the ultravisor, so trying |
---|
| 1152 | + * to change it will never bring the expected results. |
---|
| 1153 | + */ |
---|
| 1154 | + if (kvm_s390_pv_is_protected(kvm)) { |
---|
| 1155 | + ret = -EOPNOTSUPP; |
---|
| 1156 | + goto out_unlock; |
---|
| 1157 | + } |
---|
1059 | 1158 | |
---|
1060 | 1159 | switch (attr->attr) { |
---|
1061 | 1160 | case KVM_S390_VM_TOD_EXT: |
---|
.. | .. |
---|
1071 | 1170 | ret = -ENXIO; |
---|
1072 | 1171 | break; |
---|
1073 | 1172 | } |
---|
| 1173 | + |
---|
| 1174 | +out_unlock: |
---|
| 1175 | + mutex_unlock(&kvm->lock); |
---|
1074 | 1176 | return ret; |
---|
1075 | 1177 | } |
---|
1076 | 1178 | |
---|
.. | .. |
---|
1232 | 1334 | static int kvm_s390_set_processor_subfunc(struct kvm *kvm, |
---|
1233 | 1335 | struct kvm_device_attr *attr) |
---|
1234 | 1336 | { |
---|
1235 | | - /* |
---|
1236 | | - * Once supported by kernel + hw, we have to store the subfunctions |
---|
1237 | | - * in kvm->arch and remember that user space configured them. |
---|
1238 | | - */ |
---|
1239 | | - return -ENXIO; |
---|
| 1337 | + mutex_lock(&kvm->lock); |
---|
| 1338 | + if (kvm->created_vcpus) { |
---|
| 1339 | + mutex_unlock(&kvm->lock); |
---|
| 1340 | + return -EBUSY; |
---|
| 1341 | + } |
---|
| 1342 | + |
---|
| 1343 | + if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr, |
---|
| 1344 | + sizeof(struct kvm_s390_vm_cpu_subfunc))) { |
---|
| 1345 | + mutex_unlock(&kvm->lock); |
---|
| 1346 | + return -EFAULT; |
---|
| 1347 | + } |
---|
| 1348 | + mutex_unlock(&kvm->lock); |
---|
| 1349 | + |
---|
| 1350 | + VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", |
---|
| 1351 | + ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], |
---|
| 1352 | + ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], |
---|
| 1353 | + ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], |
---|
| 1354 | + ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); |
---|
| 1355 | + VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx", |
---|
| 1356 | + ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], |
---|
| 1357 | + ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); |
---|
| 1358 | + VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx", |
---|
| 1359 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], |
---|
| 1360 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); |
---|
| 1361 | + VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx", |
---|
| 1362 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], |
---|
| 1363 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); |
---|
| 1364 | + VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx", |
---|
| 1365 | + ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], |
---|
| 1366 | + ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); |
---|
| 1367 | + VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx", |
---|
| 1368 | + ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], |
---|
| 1369 | + ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); |
---|
| 1370 | + VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx", |
---|
| 1371 | + ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], |
---|
| 1372 | + ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); |
---|
| 1373 | + VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx", |
---|
| 1374 | + ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], |
---|
| 1375 | + ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); |
---|
| 1376 | + VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx", |
---|
| 1377 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], |
---|
| 1378 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); |
---|
| 1379 | + VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx", |
---|
| 1380 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], |
---|
| 1381 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); |
---|
| 1382 | + VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx", |
---|
| 1383 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], |
---|
| 1384 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); |
---|
| 1385 | + VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx", |
---|
| 1386 | + ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], |
---|
| 1387 | + ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); |
---|
| 1388 | + VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx", |
---|
| 1389 | + ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], |
---|
| 1390 | + ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); |
---|
| 1391 | + VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx", |
---|
| 1392 | + ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], |
---|
| 1393 | + ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); |
---|
| 1394 | + VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx", |
---|
| 1395 | + ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], |
---|
| 1396 | + ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); |
---|
| 1397 | + VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", |
---|
| 1398 | + ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], |
---|
| 1399 | + ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], |
---|
| 1400 | + ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], |
---|
| 1401 | + ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); |
---|
| 1402 | + VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", |
---|
| 1403 | + ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], |
---|
| 1404 | + ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], |
---|
| 1405 | + ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], |
---|
| 1406 | + ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); |
---|
| 1407 | + |
---|
| 1408 | + return 0; |
---|
1240 | 1409 | } |
---|
1241 | 1410 | |
---|
1242 | 1411 | static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) |
---|
.. | .. |
---|
1355 | 1524 | static int kvm_s390_get_processor_subfunc(struct kvm *kvm, |
---|
1356 | 1525 | struct kvm_device_attr *attr) |
---|
1357 | 1526 | { |
---|
1358 | | - /* |
---|
1359 | | - * Once we can actually configure subfunctions (kernel + hw support), |
---|
1360 | | - * we have to check if they were already set by user space, if so copy |
---|
1361 | | - * them from kvm->arch. |
---|
1362 | | - */ |
---|
1363 | | - return -ENXIO; |
---|
| 1527 | + if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs, |
---|
| 1528 | + sizeof(struct kvm_s390_vm_cpu_subfunc))) |
---|
| 1529 | + return -EFAULT; |
---|
| 1530 | + |
---|
| 1531 | + VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", |
---|
| 1532 | + ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0], |
---|
| 1533 | + ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1], |
---|
| 1534 | + ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2], |
---|
| 1535 | + ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]); |
---|
| 1536 | + VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx", |
---|
| 1537 | + ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0], |
---|
| 1538 | + ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]); |
---|
| 1539 | + VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx", |
---|
| 1540 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0], |
---|
| 1541 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]); |
---|
| 1542 | + VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx", |
---|
| 1543 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0], |
---|
| 1544 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]); |
---|
| 1545 | + VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx", |
---|
| 1546 | + ((unsigned long *) &kvm->arch.model.subfuncs.km)[0], |
---|
| 1547 | + ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]); |
---|
| 1548 | + VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx", |
---|
| 1549 | + ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0], |
---|
| 1550 | + ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]); |
---|
| 1551 | + VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx", |
---|
| 1552 | + ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0], |
---|
| 1553 | + ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]); |
---|
| 1554 | + VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx", |
---|
| 1555 | + ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0], |
---|
| 1556 | + ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]); |
---|
| 1557 | + VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx", |
---|
| 1558 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0], |
---|
| 1559 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]); |
---|
| 1560 | + VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx", |
---|
| 1561 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0], |
---|
| 1562 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]); |
---|
| 1563 | + VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx", |
---|
| 1564 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0], |
---|
| 1565 | + ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]); |
---|
| 1566 | + VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx", |
---|
| 1567 | + ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0], |
---|
| 1568 | + ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]); |
---|
| 1569 | + VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx", |
---|
| 1570 | + ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0], |
---|
| 1571 | + ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]); |
---|
| 1572 | + VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx", |
---|
| 1573 | + ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0], |
---|
| 1574 | + ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]); |
---|
| 1575 | + VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx", |
---|
| 1576 | + ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0], |
---|
| 1577 | + ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]); |
---|
| 1578 | + VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", |
---|
| 1579 | + ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0], |
---|
| 1580 | + ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1], |
---|
| 1581 | + ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2], |
---|
| 1582 | + ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]); |
---|
| 1583 | + VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", |
---|
| 1584 | + ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0], |
---|
| 1585 | + ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], |
---|
| 1586 | + ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], |
---|
| 1587 | + ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); |
---|
| 1588 | + |
---|
| 1589 | + return 0; |
---|
1364 | 1590 | } |
---|
1365 | 1591 | |
---|
1366 | 1592 | static int kvm_s390_get_machine_subfunc(struct kvm *kvm, |
---|
.. | .. |
---|
1369 | 1595 | if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc, |
---|
1370 | 1596 | sizeof(struct kvm_s390_vm_cpu_subfunc))) |
---|
1371 | 1597 | return -EFAULT; |
---|
| 1598 | + |
---|
| 1599 | + VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", |
---|
| 1600 | + ((unsigned long *) &kvm_s390_available_subfunc.plo)[0], |
---|
| 1601 | + ((unsigned long *) &kvm_s390_available_subfunc.plo)[1], |
---|
| 1602 | + ((unsigned long *) &kvm_s390_available_subfunc.plo)[2], |
---|
| 1603 | + ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]); |
---|
| 1604 | + VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx", |
---|
| 1605 | + ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0], |
---|
| 1606 | + ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]); |
---|
| 1607 | + VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx", |
---|
| 1608 | + ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0], |
---|
| 1609 | + ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]); |
---|
| 1610 | + VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx", |
---|
| 1611 | + ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0], |
---|
| 1612 | + ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]); |
---|
| 1613 | + VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx", |
---|
| 1614 | + ((unsigned long *) &kvm_s390_available_subfunc.km)[0], |
---|
| 1615 | + ((unsigned long *) &kvm_s390_available_subfunc.km)[1]); |
---|
| 1616 | + VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx", |
---|
| 1617 | + ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0], |
---|
| 1618 | + ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]); |
---|
| 1619 | + VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx", |
---|
| 1620 | + ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0], |
---|
| 1621 | + ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]); |
---|
| 1622 | + VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx", |
---|
| 1623 | + ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0], |
---|
| 1624 | + ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]); |
---|
| 1625 | + VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx", |
---|
| 1626 | + ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0], |
---|
| 1627 | + ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]); |
---|
| 1628 | + VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx", |
---|
| 1629 | + ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0], |
---|
| 1630 | + ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]); |
---|
| 1631 | + VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx", |
---|
| 1632 | + ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0], |
---|
| 1633 | + ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]); |
---|
| 1634 | + VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx", |
---|
| 1635 | + ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0], |
---|
| 1636 | + ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]); |
---|
| 1637 | + VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx", |
---|
| 1638 | + ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0], |
---|
| 1639 | + ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]); |
---|
| 1640 | + VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx", |
---|
| 1641 | + ((unsigned long *) &kvm_s390_available_subfunc.kma)[0], |
---|
| 1642 | + ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]); |
---|
| 1643 | + VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx", |
---|
| 1644 | + ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0], |
---|
| 1645 | + ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]); |
---|
| 1646 | + VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", |
---|
| 1647 | + ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0], |
---|
| 1648 | + ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1], |
---|
| 1649 | + ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2], |
---|
| 1650 | + ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]); |
---|
| 1651 | + VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx", |
---|
| 1652 | + ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0], |
---|
| 1653 | + ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1], |
---|
| 1654 | + ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2], |
---|
| 1655 | + ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]); |
---|
| 1656 | + |
---|
1372 | 1657 | return 0; |
---|
1373 | 1658 | } |
---|
| 1659 | + |
---|
1374 | 1660 | static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr) |
---|
1375 | 1661 | { |
---|
1376 | 1662 | int ret = -ENXIO; |
---|
.. | .. |
---|
1488 | 1774 | case KVM_S390_VM_CPU_PROCESSOR_FEAT: |
---|
1489 | 1775 | case KVM_S390_VM_CPU_MACHINE_FEAT: |
---|
1490 | 1776 | case KVM_S390_VM_CPU_MACHINE_SUBFUNC: |
---|
| 1777 | + case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC: |
---|
1491 | 1778 | ret = 0; |
---|
1492 | 1779 | break; |
---|
1493 | | - /* configuring subfunctions is not supported yet */ |
---|
1494 | | - case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC: |
---|
1495 | 1780 | default: |
---|
1496 | 1781 | ret = -ENXIO; |
---|
1497 | 1782 | break; |
---|
.. | .. |
---|
1504 | 1789 | case KVM_S390_VM_CRYPTO_DISABLE_AES_KW: |
---|
1505 | 1790 | case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW: |
---|
1506 | 1791 | ret = 0; |
---|
| 1792 | + break; |
---|
| 1793 | + case KVM_S390_VM_CRYPTO_ENABLE_APIE: |
---|
| 1794 | + case KVM_S390_VM_CRYPTO_DISABLE_APIE: |
---|
| 1795 | + ret = ap_instructions_available() ? 0 : -ENXIO; |
---|
1507 | 1796 | break; |
---|
1508 | 1797 | default: |
---|
1509 | 1798 | ret = -ENXIO; |
---|
.. | .. |
---|
1542 | 1831 | if (!keys) |
---|
1543 | 1832 | return -ENOMEM; |
---|
1544 | 1833 | |
---|
1545 | | - down_read(¤t->mm->mmap_sem); |
---|
| 1834 | + mmap_read_lock(current->mm); |
---|
1546 | 1835 | srcu_idx = srcu_read_lock(&kvm->srcu); |
---|
1547 | 1836 | for (i = 0; i < args->count; i++) { |
---|
1548 | 1837 | hva = gfn_to_hva(kvm, args->start_gfn + i); |
---|
.. | .. |
---|
1556 | 1845 | break; |
---|
1557 | 1846 | } |
---|
1558 | 1847 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
---|
1559 | | - up_read(¤t->mm->mmap_sem); |
---|
| 1848 | + mmap_read_unlock(current->mm); |
---|
1560 | 1849 | |
---|
1561 | 1850 | if (!r) { |
---|
1562 | 1851 | r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, |
---|
.. | .. |
---|
1600 | 1889 | goto out; |
---|
1601 | 1890 | |
---|
1602 | 1891 | i = 0; |
---|
1603 | | - down_read(¤t->mm->mmap_sem); |
---|
| 1892 | + mmap_read_lock(current->mm); |
---|
1604 | 1893 | srcu_idx = srcu_read_lock(&kvm->srcu); |
---|
1605 | 1894 | while (i < args->count) { |
---|
1606 | 1895 | unlocked = false; |
---|
.. | .. |
---|
1618 | 1907 | |
---|
1619 | 1908 | r = set_guest_storage_key(current->mm, hva, keys[i], 0); |
---|
1620 | 1909 | if (r) { |
---|
1621 | | - r = fixup_user_fault(current, current->mm, hva, |
---|
| 1910 | + r = fixup_user_fault(current->mm, hva, |
---|
1622 | 1911 | FAULT_FLAG_WRITE, &unlocked); |
---|
1623 | 1912 | if (r) |
---|
1624 | 1913 | break; |
---|
.. | .. |
---|
1627 | 1916 | i++; |
---|
1628 | 1917 | } |
---|
1629 | 1918 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
---|
1630 | | - up_read(¤t->mm->mmap_sem); |
---|
| 1919 | + mmap_read_unlock(current->mm); |
---|
1631 | 1920 | out: |
---|
1632 | 1921 | kvfree(keys); |
---|
1633 | 1922 | return r; |
---|
.. | .. |
---|
1716 | 2005 | ms = slots->memslots + slotidx; |
---|
1717 | 2006 | ofs = 0; |
---|
1718 | 2007 | } |
---|
| 2008 | + |
---|
| 2009 | + if (cur_gfn < ms->base_gfn) |
---|
| 2010 | + ofs = 0; |
---|
| 2011 | + |
---|
1719 | 2012 | ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs); |
---|
1720 | 2013 | while ((slotidx > 0) && (ofs >= ms->npages)) { |
---|
1721 | 2014 | slotidx--; |
---|
.. | .. |
---|
1731 | 2024 | unsigned long mem_end, cur_gfn, next_gfn, hva, pgstev; |
---|
1732 | 2025 | struct kvm_memslots *slots = kvm_memslots(kvm); |
---|
1733 | 2026 | struct kvm_memory_slot *ms; |
---|
| 2027 | + |
---|
| 2028 | + if (unlikely(!slots->used_slots)) |
---|
| 2029 | + return 0; |
---|
1734 | 2030 | |
---|
1735 | 2031 | cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn); |
---|
1736 | 2032 | ms = gfn_to_memslot(kvm, cur_gfn); |
---|
.. | .. |
---|
1813 | 2109 | if (!values) |
---|
1814 | 2110 | return -ENOMEM; |
---|
1815 | 2111 | |
---|
1816 | | - down_read(&kvm->mm->mmap_sem); |
---|
| 2112 | + mmap_read_lock(kvm->mm); |
---|
1817 | 2113 | srcu_idx = srcu_read_lock(&kvm->srcu); |
---|
1818 | 2114 | if (peek) |
---|
1819 | 2115 | ret = kvm_s390_peek_cmma(kvm, args, values, bufsize); |
---|
1820 | 2116 | else |
---|
1821 | 2117 | ret = kvm_s390_get_cmma(kvm, args, values, bufsize); |
---|
1822 | 2118 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
---|
1823 | | - up_read(&kvm->mm->mmap_sem); |
---|
| 2119 | + mmap_read_unlock(kvm->mm); |
---|
1824 | 2120 | |
---|
1825 | 2121 | if (kvm->arch.migration_mode) |
---|
1826 | 2122 | args->remaining = atomic64_read(&kvm->arch.cmma_dirty_pages); |
---|
.. | .. |
---|
1870 | 2166 | goto out; |
---|
1871 | 2167 | } |
---|
1872 | 2168 | |
---|
1873 | | - down_read(&kvm->mm->mmap_sem); |
---|
| 2169 | + mmap_read_lock(kvm->mm); |
---|
1874 | 2170 | srcu_idx = srcu_read_lock(&kvm->srcu); |
---|
1875 | 2171 | for (i = 0; i < args->count; i++) { |
---|
1876 | 2172 | hva = gfn_to_hva(kvm, args->start_gfn + i); |
---|
.. | .. |
---|
1885 | 2181 | set_pgste_bits(kvm->mm, hva, mask, pgstev); |
---|
1886 | 2182 | } |
---|
1887 | 2183 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
---|
1888 | | - up_read(&kvm->mm->mmap_sem); |
---|
| 2184 | + mmap_read_unlock(kvm->mm); |
---|
1889 | 2185 | |
---|
1890 | 2186 | if (!kvm->mm->context.uses_cmm) { |
---|
1891 | | - down_write(&kvm->mm->mmap_sem); |
---|
| 2187 | + mmap_write_lock(kvm->mm); |
---|
1892 | 2188 | kvm->mm->context.uses_cmm = 1; |
---|
1893 | | - up_write(&kvm->mm->mmap_sem); |
---|
| 2189 | + mmap_write_unlock(kvm->mm); |
---|
1894 | 2190 | } |
---|
1895 | 2191 | out: |
---|
1896 | 2192 | vfree(bits); |
---|
| 2193 | + return r; |
---|
| 2194 | +} |
---|
| 2195 | + |
---|
| 2196 | +static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp) |
---|
| 2197 | +{ |
---|
| 2198 | + struct kvm_vcpu *vcpu; |
---|
| 2199 | + u16 rc, rrc; |
---|
| 2200 | + int ret = 0; |
---|
| 2201 | + int i; |
---|
| 2202 | + |
---|
| 2203 | + /* |
---|
| 2204 | + * We ignore failures and try to destroy as many CPUs as possible. |
---|
| 2205 | + * At the same time we must not free the assigned resources when |
---|
| 2206 | + * this fails, as the ultravisor has still access to that memory. |
---|
| 2207 | + * So kvm_s390_pv_destroy_cpu can leave a "wanted" memory leak |
---|
| 2208 | + * behind. |
---|
| 2209 | + * We want to return the first failure rc and rrc, though. |
---|
| 2210 | + */ |
---|
| 2211 | + kvm_for_each_vcpu(i, vcpu, kvm) { |
---|
| 2212 | + mutex_lock(&vcpu->mutex); |
---|
| 2213 | + if (kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc) && !ret) { |
---|
| 2214 | + *rcp = rc; |
---|
| 2215 | + *rrcp = rrc; |
---|
| 2216 | + ret = -EIO; |
---|
| 2217 | + } |
---|
| 2218 | + mutex_unlock(&vcpu->mutex); |
---|
| 2219 | + } |
---|
| 2220 | + return ret; |
---|
| 2221 | +} |
---|
| 2222 | + |
---|
| 2223 | +static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc) |
---|
| 2224 | +{ |
---|
| 2225 | + int i, r = 0; |
---|
| 2226 | + u16 dummy; |
---|
| 2227 | + |
---|
| 2228 | + struct kvm_vcpu *vcpu; |
---|
| 2229 | + |
---|
| 2230 | + kvm_for_each_vcpu(i, vcpu, kvm) { |
---|
| 2231 | + mutex_lock(&vcpu->mutex); |
---|
| 2232 | + r = kvm_s390_pv_create_cpu(vcpu, rc, rrc); |
---|
| 2233 | + mutex_unlock(&vcpu->mutex); |
---|
| 2234 | + if (r) |
---|
| 2235 | + break; |
---|
| 2236 | + } |
---|
| 2237 | + if (r) |
---|
| 2238 | + kvm_s390_cpus_from_pv(kvm, &dummy, &dummy); |
---|
| 2239 | + return r; |
---|
| 2240 | +} |
---|
| 2241 | + |
---|
| 2242 | +static int kvm_s390_handle_pv(struct kvm *kvm, struct kvm_pv_cmd *cmd) |
---|
| 2243 | +{ |
---|
| 2244 | + int r = 0; |
---|
| 2245 | + u16 dummy; |
---|
| 2246 | + void __user *argp = (void __user *)cmd->data; |
---|
| 2247 | + |
---|
| 2248 | + switch (cmd->cmd) { |
---|
| 2249 | + case KVM_PV_ENABLE: { |
---|
| 2250 | + r = -EINVAL; |
---|
| 2251 | + if (kvm_s390_pv_is_protected(kvm)) |
---|
| 2252 | + break; |
---|
| 2253 | + |
---|
| 2254 | + /* |
---|
| 2255 | + * FMT 4 SIE needs esca. As we never switch back to bsca from |
---|
| 2256 | + * esca, we need no cleanup in the error cases below |
---|
| 2257 | + */ |
---|
| 2258 | + r = sca_switch_to_extended(kvm); |
---|
| 2259 | + if (r) |
---|
| 2260 | + break; |
---|
| 2261 | + |
---|
| 2262 | + mmap_write_lock(current->mm); |
---|
| 2263 | + r = gmap_mark_unmergeable(); |
---|
| 2264 | + mmap_write_unlock(current->mm); |
---|
| 2265 | + if (r) |
---|
| 2266 | + break; |
---|
| 2267 | + |
---|
| 2268 | + r = kvm_s390_pv_init_vm(kvm, &cmd->rc, &cmd->rrc); |
---|
| 2269 | + if (r) |
---|
| 2270 | + break; |
---|
| 2271 | + |
---|
| 2272 | + r = kvm_s390_cpus_to_pv(kvm, &cmd->rc, &cmd->rrc); |
---|
| 2273 | + if (r) |
---|
| 2274 | + kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy); |
---|
| 2275 | + |
---|
| 2276 | + /* we need to block service interrupts from now on */ |
---|
| 2277 | + set_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); |
---|
| 2278 | + break; |
---|
| 2279 | + } |
---|
| 2280 | + case KVM_PV_DISABLE: { |
---|
| 2281 | + r = -EINVAL; |
---|
| 2282 | + if (!kvm_s390_pv_is_protected(kvm)) |
---|
| 2283 | + break; |
---|
| 2284 | + |
---|
| 2285 | + r = kvm_s390_cpus_from_pv(kvm, &cmd->rc, &cmd->rrc); |
---|
| 2286 | + /* |
---|
| 2287 | + * If a CPU could not be destroyed, destroy VM will also fail. |
---|
| 2288 | + * There is no point in trying to destroy it. Instead return |
---|
| 2289 | + * the rc and rrc from the first CPU that failed destroying. |
---|
| 2290 | + */ |
---|
| 2291 | + if (r) |
---|
| 2292 | + break; |
---|
| 2293 | + r = kvm_s390_pv_deinit_vm(kvm, &cmd->rc, &cmd->rrc); |
---|
| 2294 | + |
---|
| 2295 | + /* no need to block service interrupts any more */ |
---|
| 2296 | + clear_bit(IRQ_PEND_EXT_SERVICE, &kvm->arch.float_int.masked_irqs); |
---|
| 2297 | + break; |
---|
| 2298 | + } |
---|
| 2299 | + case KVM_PV_SET_SEC_PARMS: { |
---|
| 2300 | + struct kvm_s390_pv_sec_parm parms = {}; |
---|
| 2301 | + void *hdr; |
---|
| 2302 | + |
---|
| 2303 | + r = -EINVAL; |
---|
| 2304 | + if (!kvm_s390_pv_is_protected(kvm)) |
---|
| 2305 | + break; |
---|
| 2306 | + |
---|
| 2307 | + r = -EFAULT; |
---|
| 2308 | + if (copy_from_user(&parms, argp, sizeof(parms))) |
---|
| 2309 | + break; |
---|
| 2310 | + |
---|
| 2311 | + /* Currently restricted to 8KB */ |
---|
| 2312 | + r = -EINVAL; |
---|
| 2313 | + if (parms.length > PAGE_SIZE * 2) |
---|
| 2314 | + break; |
---|
| 2315 | + |
---|
| 2316 | + r = -ENOMEM; |
---|
| 2317 | + hdr = vmalloc(parms.length); |
---|
| 2318 | + if (!hdr) |
---|
| 2319 | + break; |
---|
| 2320 | + |
---|
| 2321 | + r = -EFAULT; |
---|
| 2322 | + if (!copy_from_user(hdr, (void __user *)parms.origin, |
---|
| 2323 | + parms.length)) |
---|
| 2324 | + r = kvm_s390_pv_set_sec_parms(kvm, hdr, parms.length, |
---|
| 2325 | + &cmd->rc, &cmd->rrc); |
---|
| 2326 | + |
---|
| 2327 | + vfree(hdr); |
---|
| 2328 | + break; |
---|
| 2329 | + } |
---|
| 2330 | + case KVM_PV_UNPACK: { |
---|
| 2331 | + struct kvm_s390_pv_unp unp = {}; |
---|
| 2332 | + |
---|
| 2333 | + r = -EINVAL; |
---|
| 2334 | + if (!kvm_s390_pv_is_protected(kvm) || !mm_is_protected(kvm->mm)) |
---|
| 2335 | + break; |
---|
| 2336 | + |
---|
| 2337 | + r = -EFAULT; |
---|
| 2338 | + if (copy_from_user(&unp, argp, sizeof(unp))) |
---|
| 2339 | + break; |
---|
| 2340 | + |
---|
| 2341 | + r = kvm_s390_pv_unpack(kvm, unp.addr, unp.size, unp.tweak, |
---|
| 2342 | + &cmd->rc, &cmd->rrc); |
---|
| 2343 | + break; |
---|
| 2344 | + } |
---|
| 2345 | + case KVM_PV_VERIFY: { |
---|
| 2346 | + r = -EINVAL; |
---|
| 2347 | + if (!kvm_s390_pv_is_protected(kvm)) |
---|
| 2348 | + break; |
---|
| 2349 | + |
---|
| 2350 | + r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), |
---|
| 2351 | + UVC_CMD_VERIFY_IMG, &cmd->rc, &cmd->rrc); |
---|
| 2352 | + KVM_UV_EVENT(kvm, 3, "PROTVIRT VERIFY: rc %x rrc %x", cmd->rc, |
---|
| 2353 | + cmd->rrc); |
---|
| 2354 | + break; |
---|
| 2355 | + } |
---|
| 2356 | + case KVM_PV_PREP_RESET: { |
---|
| 2357 | + r = -EINVAL; |
---|
| 2358 | + if (!kvm_s390_pv_is_protected(kvm)) |
---|
| 2359 | + break; |
---|
| 2360 | + |
---|
| 2361 | + r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), |
---|
| 2362 | + UVC_CMD_PREPARE_RESET, &cmd->rc, &cmd->rrc); |
---|
| 2363 | + KVM_UV_EVENT(kvm, 3, "PROTVIRT PREP RESET: rc %x rrc %x", |
---|
| 2364 | + cmd->rc, cmd->rrc); |
---|
| 2365 | + break; |
---|
| 2366 | + } |
---|
| 2367 | + case KVM_PV_UNSHARE_ALL: { |
---|
| 2368 | + r = -EINVAL; |
---|
| 2369 | + if (!kvm_s390_pv_is_protected(kvm)) |
---|
| 2370 | + break; |
---|
| 2371 | + |
---|
| 2372 | + r = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm), |
---|
| 2373 | + UVC_CMD_SET_UNSHARE_ALL, &cmd->rc, &cmd->rrc); |
---|
| 2374 | + KVM_UV_EVENT(kvm, 3, "PROTVIRT UNSHARE: rc %x rrc %x", |
---|
| 2375 | + cmd->rc, cmd->rrc); |
---|
| 2376 | + break; |
---|
| 2377 | + } |
---|
| 2378 | + default: |
---|
| 2379 | + r = -ENOTTY; |
---|
| 2380 | + } |
---|
1897 | 2381 | return r; |
---|
1898 | 2382 | } |
---|
1899 | 2383 | |
---|
.. | .. |
---|
1913 | 2397 | if (copy_from_user(&s390int, argp, sizeof(s390int))) |
---|
1914 | 2398 | break; |
---|
1915 | 2399 | r = kvm_s390_inject_vm(kvm, &s390int); |
---|
1916 | | - break; |
---|
1917 | | - } |
---|
1918 | | - case KVM_ENABLE_CAP: { |
---|
1919 | | - struct kvm_enable_cap cap; |
---|
1920 | | - r = -EFAULT; |
---|
1921 | | - if (copy_from_user(&cap, argp, sizeof(cap))) |
---|
1922 | | - break; |
---|
1923 | | - r = kvm_vm_ioctl_enable_cap(kvm, &cap); |
---|
1924 | 2400 | break; |
---|
1925 | 2401 | } |
---|
1926 | 2402 | case KVM_CREATE_IRQCHIP: { |
---|
.. | .. |
---|
2002 | 2478 | mutex_unlock(&kvm->slots_lock); |
---|
2003 | 2479 | break; |
---|
2004 | 2480 | } |
---|
| 2481 | + case KVM_S390_PV_COMMAND: { |
---|
| 2482 | + struct kvm_pv_cmd args; |
---|
| 2483 | + |
---|
| 2484 | + /* protvirt means user sigp */ |
---|
| 2485 | + kvm->arch.user_cpu_state_ctrl = 1; |
---|
| 2486 | + r = 0; |
---|
| 2487 | + if (!is_prot_virt_host()) { |
---|
| 2488 | + r = -EINVAL; |
---|
| 2489 | + break; |
---|
| 2490 | + } |
---|
| 2491 | + if (copy_from_user(&args, argp, sizeof(args))) { |
---|
| 2492 | + r = -EFAULT; |
---|
| 2493 | + break; |
---|
| 2494 | + } |
---|
| 2495 | + if (args.flags) { |
---|
| 2496 | + r = -EINVAL; |
---|
| 2497 | + break; |
---|
| 2498 | + } |
---|
| 2499 | + mutex_lock(&kvm->lock); |
---|
| 2500 | + r = kvm_s390_handle_pv(kvm, &args); |
---|
| 2501 | + mutex_unlock(&kvm->lock); |
---|
| 2502 | + if (copy_to_user(argp, &args, sizeof(args))) { |
---|
| 2503 | + r = -EFAULT; |
---|
| 2504 | + break; |
---|
| 2505 | + } |
---|
| 2506 | + break; |
---|
| 2507 | + } |
---|
2005 | 2508 | default: |
---|
2006 | 2509 | r = -ENOTTY; |
---|
2007 | 2510 | } |
---|
.. | .. |
---|
2009 | 2512 | return r; |
---|
2010 | 2513 | } |
---|
2011 | 2514 | |
---|
2012 | | -static int kvm_s390_query_ap_config(u8 *config) |
---|
2013 | | -{ |
---|
2014 | | - u32 fcn_code = 0x04000000UL; |
---|
2015 | | - u32 cc = 0; |
---|
2016 | | - |
---|
2017 | | - memset(config, 0, 128); |
---|
2018 | | - asm volatile( |
---|
2019 | | - "lgr 0,%1\n" |
---|
2020 | | - "lgr 2,%2\n" |
---|
2021 | | - ".long 0xb2af0000\n" /* PQAP(QCI) */ |
---|
2022 | | - "0: ipm %0\n" |
---|
2023 | | - "srl %0,28\n" |
---|
2024 | | - "1:\n" |
---|
2025 | | - EX_TABLE(0b, 1b) |
---|
2026 | | - : "+r" (cc) |
---|
2027 | | - : "r" (fcn_code), "r" (config) |
---|
2028 | | - : "cc", "0", "2", "memory" |
---|
2029 | | - ); |
---|
2030 | | - |
---|
2031 | | - return cc; |
---|
2032 | | -} |
---|
2033 | | - |
---|
2034 | 2515 | static int kvm_s390_apxa_installed(void) |
---|
2035 | 2516 | { |
---|
2036 | | - u8 config[128]; |
---|
2037 | | - int cc; |
---|
| 2517 | + struct ap_config_info info; |
---|
2038 | 2518 | |
---|
2039 | | - if (test_facility(12)) { |
---|
2040 | | - cc = kvm_s390_query_ap_config(config); |
---|
2041 | | - |
---|
2042 | | - if (cc) |
---|
2043 | | - pr_err("PQAP(QCI) failed with cc=%d", cc); |
---|
2044 | | - else |
---|
2045 | | - return config[0] & 0x40; |
---|
| 2519 | + if (ap_instructions_available()) { |
---|
| 2520 | + if (ap_qci(&info) == 0) |
---|
| 2521 | + return info.apxa; |
---|
2046 | 2522 | } |
---|
2047 | 2523 | |
---|
2048 | 2524 | return 0; |
---|
2049 | 2525 | } |
---|
2050 | 2526 | |
---|
| 2527 | +/* |
---|
| 2528 | + * The format of the crypto control block (CRYCB) is specified in the 3 low |
---|
| 2529 | + * order bits of the CRYCB designation (CRYCBD) field as follows: |
---|
| 2530 | + * Format 0: Neither the message security assist extension 3 (MSAX3) nor the |
---|
| 2531 | + * AP extended addressing (APXA) facility are installed. |
---|
| 2532 | + * Format 1: The APXA facility is not installed but the MSAX3 facility is. |
---|
| 2533 | + * Format 2: Both the APXA and MSAX3 facilities are installed |
---|
| 2534 | + */ |
---|
2051 | 2535 | static void kvm_s390_set_crycb_format(struct kvm *kvm) |
---|
2052 | 2536 | { |
---|
2053 | 2537 | kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb; |
---|
| 2538 | + |
---|
| 2539 | + /* Clear the CRYCB format bits - i.e., set format 0 by default */ |
---|
| 2540 | + kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK); |
---|
| 2541 | + |
---|
| 2542 | + /* Check whether MSAX3 is installed */ |
---|
| 2543 | + if (!test_kvm_facility(kvm, 76)) |
---|
| 2544 | + return; |
---|
2054 | 2545 | |
---|
2055 | 2546 | if (kvm_s390_apxa_installed()) |
---|
2056 | 2547 | kvm->arch.crypto.crycbd |= CRYCB_FORMAT2; |
---|
2057 | 2548 | else |
---|
2058 | 2549 | kvm->arch.crypto.crycbd |= CRYCB_FORMAT1; |
---|
2059 | 2550 | } |
---|
| 2551 | + |
---|
| 2552 | +void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm, |
---|
| 2553 | + unsigned long *aqm, unsigned long *adm) |
---|
| 2554 | +{ |
---|
| 2555 | + struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb; |
---|
| 2556 | + |
---|
| 2557 | + mutex_lock(&kvm->lock); |
---|
| 2558 | + kvm_s390_vcpu_block_all(kvm); |
---|
| 2559 | + |
---|
| 2560 | + switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) { |
---|
| 2561 | + case CRYCB_FORMAT2: /* APCB1 use 256 bits */ |
---|
| 2562 | + memcpy(crycb->apcb1.apm, apm, 32); |
---|
| 2563 | + VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx", |
---|
| 2564 | + apm[0], apm[1], apm[2], apm[3]); |
---|
| 2565 | + memcpy(crycb->apcb1.aqm, aqm, 32); |
---|
| 2566 | + VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx", |
---|
| 2567 | + aqm[0], aqm[1], aqm[2], aqm[3]); |
---|
| 2568 | + memcpy(crycb->apcb1.adm, adm, 32); |
---|
| 2569 | + VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx", |
---|
| 2570 | + adm[0], adm[1], adm[2], adm[3]); |
---|
| 2571 | + break; |
---|
| 2572 | + case CRYCB_FORMAT1: |
---|
| 2573 | + case CRYCB_FORMAT0: /* Fall through both use APCB0 */ |
---|
| 2574 | + memcpy(crycb->apcb0.apm, apm, 8); |
---|
| 2575 | + memcpy(crycb->apcb0.aqm, aqm, 2); |
---|
| 2576 | + memcpy(crycb->apcb0.adm, adm, 2); |
---|
| 2577 | + VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x", |
---|
| 2578 | + apm[0], *((unsigned short *)aqm), |
---|
| 2579 | + *((unsigned short *)adm)); |
---|
| 2580 | + break; |
---|
| 2581 | + default: /* Can not happen */ |
---|
| 2582 | + break; |
---|
| 2583 | + } |
---|
| 2584 | + |
---|
| 2585 | + /* recreate the shadow crycb for each vcpu */ |
---|
| 2586 | + kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); |
---|
| 2587 | + kvm_s390_vcpu_unblock_all(kvm); |
---|
| 2588 | + mutex_unlock(&kvm->lock); |
---|
| 2589 | +} |
---|
| 2590 | +EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks); |
---|
| 2591 | + |
---|
| 2592 | +void kvm_arch_crypto_clear_masks(struct kvm *kvm) |
---|
| 2593 | +{ |
---|
| 2594 | + mutex_lock(&kvm->lock); |
---|
| 2595 | + kvm_s390_vcpu_block_all(kvm); |
---|
| 2596 | + |
---|
| 2597 | + memset(&kvm->arch.crypto.crycb->apcb0, 0, |
---|
| 2598 | + sizeof(kvm->arch.crypto.crycb->apcb0)); |
---|
| 2599 | + memset(&kvm->arch.crypto.crycb->apcb1, 0, |
---|
| 2600 | + sizeof(kvm->arch.crypto.crycb->apcb1)); |
---|
| 2601 | + |
---|
| 2602 | + VM_EVENT(kvm, 3, "%s", "CLR CRYCB:"); |
---|
| 2603 | + /* recreate the shadow crycb for each vcpu */ |
---|
| 2604 | + kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART); |
---|
| 2605 | + kvm_s390_vcpu_unblock_all(kvm); |
---|
| 2606 | + mutex_unlock(&kvm->lock); |
---|
| 2607 | +} |
---|
| 2608 | +EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks); |
---|
2060 | 2609 | |
---|
2061 | 2610 | static u64 kvm_s390_get_initial_cpuid(void) |
---|
2062 | 2611 | { |
---|
.. | .. |
---|
2069 | 2618 | |
---|
2070 | 2619 | static void kvm_s390_crypto_init(struct kvm *kvm) |
---|
2071 | 2620 | { |
---|
2072 | | - if (!test_kvm_facility(kvm, 76)) |
---|
2073 | | - return; |
---|
2074 | | - |
---|
2075 | 2621 | kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb; |
---|
2076 | 2622 | kvm_s390_set_crycb_format(kvm); |
---|
| 2623 | + |
---|
| 2624 | + if (!test_kvm_facility(kvm, 76)) |
---|
| 2625 | + return; |
---|
2077 | 2626 | |
---|
2078 | 2627 | /* Enable AES/DEA protected key functions by default */ |
---|
2079 | 2628 | kvm->arch.crypto.aes_kw = 1; |
---|
.. | .. |
---|
2144 | 2693 | if (!kvm->arch.sie_page2) |
---|
2145 | 2694 | goto out_err; |
---|
2146 | 2695 | |
---|
| 2696 | + kvm->arch.sie_page2->kvm = kvm; |
---|
2147 | 2697 | kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list; |
---|
2148 | 2698 | |
---|
2149 | 2699 | for (i = 0; i < kvm_s390_fac_size(); i++) { |
---|
.. | .. |
---|
2153 | 2703 | kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] & |
---|
2154 | 2704 | kvm_s390_fac_base[i]; |
---|
2155 | 2705 | } |
---|
| 2706 | + kvm->arch.model.subfuncs = kvm_s390_available_subfunc; |
---|
2156 | 2707 | |
---|
2157 | 2708 | /* we are always in czam mode - even on pre z14 machines */ |
---|
2158 | 2709 | set_kvm_facility(kvm->arch.model.fac_mask, 138); |
---|
.. | .. |
---|
2164 | 2715 | set_kvm_facility(kvm->arch.model.fac_mask, 147); |
---|
2165 | 2716 | set_kvm_facility(kvm->arch.model.fac_list, 147); |
---|
2166 | 2717 | } |
---|
| 2718 | + |
---|
| 2719 | + if (css_general_characteristics.aiv && test_facility(65)) |
---|
| 2720 | + set_kvm_facility(kvm->arch.model.fac_mask, 65); |
---|
2167 | 2721 | |
---|
2168 | 2722 | kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid(); |
---|
2169 | 2723 | kvm->arch.model.ibc = sclp.ibc & 0x0fff; |
---|
.. | .. |
---|
2200 | 2754 | kvm->arch.use_skf = sclp.has_skey; |
---|
2201 | 2755 | spin_lock_init(&kvm->arch.start_stop_lock); |
---|
2202 | 2756 | kvm_s390_vsie_init(kvm); |
---|
2203 | | - kvm_s390_gisa_init(kvm); |
---|
| 2757 | + if (use_gisa) |
---|
| 2758 | + kvm_s390_gisa_init(kvm); |
---|
2204 | 2759 | KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid); |
---|
2205 | 2760 | |
---|
2206 | 2761 | return 0; |
---|
.. | .. |
---|
2212 | 2767 | return rc; |
---|
2213 | 2768 | } |
---|
2214 | 2769 | |
---|
2215 | | -bool kvm_arch_has_vcpu_debugfs(void) |
---|
2216 | | -{ |
---|
2217 | | - return false; |
---|
2218 | | -} |
---|
2219 | | - |
---|
2220 | | -int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) |
---|
2221 | | -{ |
---|
2222 | | - return 0; |
---|
2223 | | -} |
---|
2224 | | - |
---|
2225 | 2770 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
---|
2226 | 2771 | { |
---|
| 2772 | + u16 rc, rrc; |
---|
| 2773 | + |
---|
2227 | 2774 | VCPU_EVENT(vcpu, 3, "%s", "free cpu"); |
---|
2228 | 2775 | trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id); |
---|
2229 | 2776 | kvm_s390_clear_local_irqs(vcpu); |
---|
.. | .. |
---|
2236 | 2783 | |
---|
2237 | 2784 | if (vcpu->kvm->arch.use_cmma) |
---|
2238 | 2785 | kvm_s390_vcpu_unsetup_cmma(vcpu); |
---|
| 2786 | + /* We can not hold the vcpu mutex here, we are already dying */ |
---|
| 2787 | + if (kvm_s390_pv_cpu_get_handle(vcpu)) |
---|
| 2788 | + kvm_s390_pv_destroy_cpu(vcpu, &rc, &rrc); |
---|
2239 | 2789 | free_page((unsigned long)(vcpu->arch.sie_block)); |
---|
2240 | | - |
---|
2241 | | - kvm_vcpu_uninit(vcpu); |
---|
2242 | | - kmem_cache_free(kvm_vcpu_cache, vcpu); |
---|
2243 | 2790 | } |
---|
2244 | 2791 | |
---|
2245 | 2792 | static void kvm_free_vcpus(struct kvm *kvm) |
---|
.. | .. |
---|
2248 | 2795 | struct kvm_vcpu *vcpu; |
---|
2249 | 2796 | |
---|
2250 | 2797 | kvm_for_each_vcpu(i, vcpu, kvm) |
---|
2251 | | - kvm_arch_vcpu_destroy(vcpu); |
---|
| 2798 | + kvm_vcpu_destroy(vcpu); |
---|
2252 | 2799 | |
---|
2253 | 2800 | mutex_lock(&kvm->lock); |
---|
2254 | 2801 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) |
---|
.. | .. |
---|
2260 | 2807 | |
---|
2261 | 2808 | void kvm_arch_destroy_vm(struct kvm *kvm) |
---|
2262 | 2809 | { |
---|
| 2810 | + u16 rc, rrc; |
---|
| 2811 | + |
---|
2263 | 2812 | kvm_free_vcpus(kvm); |
---|
2264 | 2813 | sca_dispose(kvm); |
---|
2265 | | - debug_unregister(kvm->arch.dbf); |
---|
2266 | 2814 | kvm_s390_gisa_destroy(kvm); |
---|
| 2815 | + /* |
---|
| 2816 | + * We are already at the end of life and kvm->lock is not taken. |
---|
| 2817 | + * This is ok as the file descriptor is closed by now and nobody |
---|
| 2818 | + * can mess with the pv state. To avoid lockdep_assert_held from |
---|
| 2819 | + * complaining we do not use kvm_s390_pv_is_protected. |
---|
| 2820 | + */ |
---|
| 2821 | + if (kvm_s390_pv_get_handle(kvm)) |
---|
| 2822 | + kvm_s390_pv_deinit_vm(kvm, &rc, &rrc); |
---|
| 2823 | + debug_unregister(kvm->arch.dbf); |
---|
2267 | 2824 | free_page((unsigned long)kvm->arch.sie_page2); |
---|
2268 | 2825 | if (!kvm_is_ucontrol(kvm)) |
---|
2269 | 2826 | gmap_remove(kvm->arch.gmap); |
---|
.. | .. |
---|
2359 | 2916 | unsigned int vcpu_idx; |
---|
2360 | 2917 | u32 scaol, scaoh; |
---|
2361 | 2918 | |
---|
| 2919 | + if (kvm->arch.use_esca) |
---|
| 2920 | + return 0; |
---|
| 2921 | + |
---|
2362 | 2922 | new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO); |
---|
2363 | 2923 | if (!new_sca) |
---|
2364 | 2924 | return -ENOMEM; |
---|
.. | .. |
---|
2408 | 2968 | mutex_unlock(&kvm->lock); |
---|
2409 | 2969 | |
---|
2410 | 2970 | return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS; |
---|
2411 | | -} |
---|
2412 | | - |
---|
2413 | | -int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
---|
2414 | | -{ |
---|
2415 | | - vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; |
---|
2416 | | - kvm_clear_async_pf_completion_queue(vcpu); |
---|
2417 | | - vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | |
---|
2418 | | - KVM_SYNC_GPRS | |
---|
2419 | | - KVM_SYNC_ACRS | |
---|
2420 | | - KVM_SYNC_CRS | |
---|
2421 | | - KVM_SYNC_ARCH0 | |
---|
2422 | | - KVM_SYNC_PFAULT; |
---|
2423 | | - kvm_s390_set_prefix(vcpu, 0); |
---|
2424 | | - if (test_kvm_facility(vcpu->kvm, 64)) |
---|
2425 | | - vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; |
---|
2426 | | - if (test_kvm_facility(vcpu->kvm, 82)) |
---|
2427 | | - vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC; |
---|
2428 | | - if (test_kvm_facility(vcpu->kvm, 133)) |
---|
2429 | | - vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; |
---|
2430 | | - if (test_kvm_facility(vcpu->kvm, 156)) |
---|
2431 | | - vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN; |
---|
2432 | | - /* fprs can be synchronized via vrs, even if the guest has no vx. With |
---|
2433 | | - * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format. |
---|
2434 | | - */ |
---|
2435 | | - if (MACHINE_HAS_VX) |
---|
2436 | | - vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; |
---|
2437 | | - else |
---|
2438 | | - vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS; |
---|
2439 | | - |
---|
2440 | | - if (kvm_is_ucontrol(vcpu->kvm)) |
---|
2441 | | - return __kvm_ucontrol_vcpu_init(vcpu); |
---|
2442 | | - |
---|
2443 | | - return 0; |
---|
2444 | 2971 | } |
---|
2445 | 2972 | |
---|
2446 | 2973 | /* needs disabled preemption to protect from TOD sync and vcpu_load/put */ |
---|
.. | .. |
---|
2551 | 3078 | |
---|
2552 | 3079 | } |
---|
2553 | 3080 | |
---|
2554 | | -static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) |
---|
2555 | | -{ |
---|
2556 | | - /* this equals initial cpu reset in pop, but we don't switch to ESA */ |
---|
2557 | | - vcpu->arch.sie_block->gpsw.mask = 0UL; |
---|
2558 | | - vcpu->arch.sie_block->gpsw.addr = 0UL; |
---|
2559 | | - kvm_s390_set_prefix(vcpu, 0); |
---|
2560 | | - kvm_s390_set_cpu_timer(vcpu, 0); |
---|
2561 | | - vcpu->arch.sie_block->ckc = 0UL; |
---|
2562 | | - vcpu->arch.sie_block->todpr = 0; |
---|
2563 | | - memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); |
---|
2564 | | - vcpu->arch.sie_block->gcr[0] = CR0_UNUSED_56 | |
---|
2565 | | - CR0_INTERRUPT_KEY_SUBMASK | |
---|
2566 | | - CR0_MEASUREMENT_ALERT_SUBMASK; |
---|
2567 | | - vcpu->arch.sie_block->gcr[14] = CR14_UNUSED_32 | |
---|
2568 | | - CR14_UNUSED_33 | |
---|
2569 | | - CR14_EXTERNAL_DAMAGE_SUBMASK; |
---|
2570 | | - vcpu->run->s.regs.fpc = 0; |
---|
2571 | | - vcpu->arch.sie_block->gbea = 1; |
---|
2572 | | - vcpu->arch.sie_block->pp = 0; |
---|
2573 | | - vcpu->arch.sie_block->fpf &= ~FPF_BPBC; |
---|
2574 | | - vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; |
---|
2575 | | - kvm_clear_async_pf_completion_queue(vcpu); |
---|
2576 | | - if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) |
---|
2577 | | - kvm_s390_vcpu_stop(vcpu); |
---|
2578 | | - kvm_s390_clear_local_irqs(vcpu); |
---|
2579 | | -} |
---|
2580 | | - |
---|
2581 | 3081 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
---|
2582 | 3082 | { |
---|
2583 | 3083 | mutex_lock(&vcpu->kvm->lock); |
---|
.. | .. |
---|
2596 | 3096 | vcpu->arch.enabled_gmap = vcpu->arch.gmap; |
---|
2597 | 3097 | } |
---|
2598 | 3098 | |
---|
| 3099 | +static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr) |
---|
| 3100 | +{ |
---|
| 3101 | + if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) && |
---|
| 3102 | + test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo)) |
---|
| 3103 | + return true; |
---|
| 3104 | + return false; |
---|
| 3105 | +} |
---|
| 3106 | + |
---|
| 3107 | +static bool kvm_has_pckmo_ecc(struct kvm *kvm) |
---|
| 3108 | +{ |
---|
| 3109 | + /* At least one ECC subfunction must be present */ |
---|
| 3110 | + return kvm_has_pckmo_subfunc(kvm, 32) || |
---|
| 3111 | + kvm_has_pckmo_subfunc(kvm, 33) || |
---|
| 3112 | + kvm_has_pckmo_subfunc(kvm, 34) || |
---|
| 3113 | + kvm_has_pckmo_subfunc(kvm, 40) || |
---|
| 3114 | + kvm_has_pckmo_subfunc(kvm, 41); |
---|
| 3115 | + |
---|
| 3116 | +} |
---|
| 3117 | + |
---|
2599 | 3118 | static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) |
---|
2600 | 3119 | { |
---|
2601 | | - if (!test_kvm_facility(vcpu->kvm, 76)) |
---|
| 3120 | + /* |
---|
| 3121 | + * If the AP instructions are not being interpreted and the MSAX3 |
---|
| 3122 | + * facility is not configured for the guest, there is nothing to set up. |
---|
| 3123 | + */ |
---|
| 3124 | + if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76)) |
---|
2602 | 3125 | return; |
---|
2603 | 3126 | |
---|
| 3127 | + vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; |
---|
2604 | 3128 | vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); |
---|
| 3129 | + vcpu->arch.sie_block->eca &= ~ECA_APIE; |
---|
| 3130 | + vcpu->arch.sie_block->ecd &= ~ECD_ECC; |
---|
2605 | 3131 | |
---|
2606 | | - if (vcpu->kvm->arch.crypto.aes_kw) |
---|
| 3132 | + if (vcpu->kvm->arch.crypto.apie) |
---|
| 3133 | + vcpu->arch.sie_block->eca |= ECA_APIE; |
---|
| 3134 | + |
---|
| 3135 | + /* Set up protected key support */ |
---|
| 3136 | + if (vcpu->kvm->arch.crypto.aes_kw) { |
---|
2607 | 3137 | vcpu->arch.sie_block->ecb3 |= ECB3_AES; |
---|
| 3138 | + /* ecc is also wrapped with AES key */ |
---|
| 3139 | + if (kvm_has_pckmo_ecc(vcpu->kvm)) |
---|
| 3140 | + vcpu->arch.sie_block->ecd |= ECD_ECC; |
---|
| 3141 | + } |
---|
| 3142 | + |
---|
2608 | 3143 | if (vcpu->kvm->arch.crypto.dea_kw) |
---|
2609 | 3144 | vcpu->arch.sie_block->ecb3 |= ECB3_DEA; |
---|
2610 | | - |
---|
2611 | | - vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; |
---|
2612 | 3145 | } |
---|
2613 | 3146 | |
---|
2614 | 3147 | void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu) |
---|
.. | .. |
---|
2634 | 3167 | vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list; |
---|
2635 | 3168 | } |
---|
2636 | 3169 | |
---|
2637 | | -int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
---|
| 3170 | +static int kvm_s390_vcpu_setup(struct kvm_vcpu *vcpu) |
---|
2638 | 3171 | { |
---|
2639 | 3172 | int rc = 0; |
---|
| 3173 | + u16 uvrc, uvrrc; |
---|
2640 | 3174 | |
---|
2641 | 3175 | atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH | |
---|
2642 | 3176 | CPUSTAT_SM | |
---|
.. | .. |
---|
2700 | 3234 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
---|
2701 | 3235 | vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; |
---|
2702 | 3236 | |
---|
| 3237 | + vcpu->arch.sie_block->hpid = HPID_KVM; |
---|
| 3238 | + |
---|
2703 | 3239 | kvm_s390_vcpu_crypto_setup(vcpu); |
---|
| 3240 | + |
---|
| 3241 | + mutex_lock(&vcpu->kvm->lock); |
---|
| 3242 | + if (kvm_s390_pv_is_protected(vcpu->kvm)) { |
---|
| 3243 | + rc = kvm_s390_pv_create_cpu(vcpu, &uvrc, &uvrrc); |
---|
| 3244 | + if (rc) |
---|
| 3245 | + kvm_s390_vcpu_unsetup_cmma(vcpu); |
---|
| 3246 | + } |
---|
| 3247 | + mutex_unlock(&vcpu->kvm->lock); |
---|
2704 | 3248 | |
---|
2705 | 3249 | return rc; |
---|
2706 | 3250 | } |
---|
2707 | 3251 | |
---|
2708 | | -struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, |
---|
2709 | | - unsigned int id) |
---|
| 3252 | +int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) |
---|
2710 | 3253 | { |
---|
2711 | | - struct kvm_vcpu *vcpu; |
---|
2712 | | - struct sie_page *sie_page; |
---|
2713 | | - int rc = -EINVAL; |
---|
2714 | | - |
---|
2715 | 3254 | if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id)) |
---|
2716 | | - goto out; |
---|
| 3255 | + return -EINVAL; |
---|
| 3256 | + return 0; |
---|
| 3257 | +} |
---|
2717 | 3258 | |
---|
2718 | | - rc = -ENOMEM; |
---|
2719 | | - |
---|
2720 | | - vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); |
---|
2721 | | - if (!vcpu) |
---|
2722 | | - goto out; |
---|
| 3259 | +int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) |
---|
| 3260 | +{ |
---|
| 3261 | + struct sie_page *sie_page; |
---|
| 3262 | + int rc; |
---|
2723 | 3263 | |
---|
2724 | 3264 | BUILD_BUG_ON(sizeof(struct sie_page) != 4096); |
---|
2725 | 3265 | sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL); |
---|
2726 | 3266 | if (!sie_page) |
---|
2727 | | - goto out_free_cpu; |
---|
| 3267 | + return -ENOMEM; |
---|
2728 | 3268 | |
---|
2729 | 3269 | vcpu->arch.sie_block = &sie_page->sie_block; |
---|
2730 | 3270 | vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; |
---|
.. | .. |
---|
2733 | 3273 | vcpu->arch.sie_block->mso = 0; |
---|
2734 | 3274 | vcpu->arch.sie_block->msl = sclp.hamax; |
---|
2735 | 3275 | |
---|
2736 | | - vcpu->arch.sie_block->icpua = id; |
---|
| 3276 | + vcpu->arch.sie_block->icpua = vcpu->vcpu_id; |
---|
2737 | 3277 | spin_lock_init(&vcpu->arch.local_int.lock); |
---|
2738 | | - vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa; |
---|
| 3278 | + vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin; |
---|
2739 | 3279 | if (vcpu->arch.sie_block->gd && sclp.has_gisaf) |
---|
2740 | 3280 | vcpu->arch.sie_block->gd |= GISA_FORMAT1; |
---|
2741 | 3281 | seqcount_init(&vcpu->arch.cputm_seqcount); |
---|
2742 | 3282 | |
---|
2743 | | - rc = kvm_vcpu_init(vcpu, kvm, id); |
---|
2744 | | - if (rc) |
---|
2745 | | - goto out_free_sie_block; |
---|
2746 | | - VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu, |
---|
2747 | | - vcpu->arch.sie_block); |
---|
2748 | | - trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block); |
---|
| 3283 | + vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; |
---|
| 3284 | + kvm_clear_async_pf_completion_queue(vcpu); |
---|
| 3285 | + vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX | |
---|
| 3286 | + KVM_SYNC_GPRS | |
---|
| 3287 | + KVM_SYNC_ACRS | |
---|
| 3288 | + KVM_SYNC_CRS | |
---|
| 3289 | + KVM_SYNC_ARCH0 | |
---|
| 3290 | + KVM_SYNC_PFAULT | |
---|
| 3291 | + KVM_SYNC_DIAG318; |
---|
| 3292 | + kvm_s390_set_prefix(vcpu, 0); |
---|
| 3293 | + if (test_kvm_facility(vcpu->kvm, 64)) |
---|
| 3294 | + vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB; |
---|
| 3295 | + if (test_kvm_facility(vcpu->kvm, 82)) |
---|
| 3296 | + vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC; |
---|
| 3297 | + if (test_kvm_facility(vcpu->kvm, 133)) |
---|
| 3298 | + vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB; |
---|
| 3299 | + if (test_kvm_facility(vcpu->kvm, 156)) |
---|
| 3300 | + vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN; |
---|
| 3301 | + /* fprs can be synchronized via vrs, even if the guest has no vx. With |
---|
| 3302 | + * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format. |
---|
| 3303 | + */ |
---|
| 3304 | + if (MACHINE_HAS_VX) |
---|
| 3305 | + vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS; |
---|
| 3306 | + else |
---|
| 3307 | + vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS; |
---|
2749 | 3308 | |
---|
2750 | | - return vcpu; |
---|
| 3309 | + if (kvm_is_ucontrol(vcpu->kvm)) { |
---|
| 3310 | + rc = __kvm_ucontrol_vcpu_init(vcpu); |
---|
| 3311 | + if (rc) |
---|
| 3312 | + goto out_free_sie_block; |
---|
| 3313 | + } |
---|
| 3314 | + |
---|
| 3315 | + VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", |
---|
| 3316 | + vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); |
---|
| 3317 | + trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block); |
---|
| 3318 | + |
---|
| 3319 | + rc = kvm_s390_vcpu_setup(vcpu); |
---|
| 3320 | + if (rc) |
---|
| 3321 | + goto out_ucontrol_uninit; |
---|
| 3322 | + return 0; |
---|
| 3323 | + |
---|
| 3324 | +out_ucontrol_uninit: |
---|
| 3325 | + if (kvm_is_ucontrol(vcpu->kvm)) |
---|
| 3326 | + gmap_remove(vcpu->arch.gmap); |
---|
2751 | 3327 | out_free_sie_block: |
---|
2752 | 3328 | free_page((unsigned long)(vcpu->arch.sie_block)); |
---|
2753 | | -out_free_cpu: |
---|
2754 | | - kmem_cache_free(kvm_vcpu_cache, vcpu); |
---|
2755 | | -out: |
---|
2756 | | - return ERR_PTR(rc); |
---|
| 3329 | + return rc; |
---|
2757 | 3330 | } |
---|
2758 | 3331 | |
---|
2759 | 3332 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
---|
2760 | 3333 | { |
---|
| 3334 | + clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.gisa_int.kicked_mask); |
---|
2761 | 3335 | return kvm_s390_vcpu_has_irq(vcpu, 0); |
---|
2762 | 3336 | } |
---|
2763 | 3337 | |
---|
.. | .. |
---|
2783 | 3357 | exit_sie(vcpu); |
---|
2784 | 3358 | } |
---|
2785 | 3359 | |
---|
| 3360 | +bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu) |
---|
| 3361 | +{ |
---|
| 3362 | + return atomic_read(&vcpu->arch.sie_block->prog20) & |
---|
| 3363 | + (PROG_BLOCK_SIE | PROG_REQUEST); |
---|
| 3364 | +} |
---|
| 3365 | + |
---|
2786 | 3366 | static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu) |
---|
2787 | 3367 | { |
---|
2788 | 3368 | atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20); |
---|
2789 | 3369 | } |
---|
2790 | 3370 | |
---|
2791 | 3371 | /* |
---|
2792 | | - * Kick a guest cpu out of SIE and wait until SIE is not running. |
---|
| 3372 | + * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running. |
---|
2793 | 3373 | * If the CPU is not running (e.g. waiting as idle) the function will |
---|
2794 | 3374 | * return immediately. */ |
---|
2795 | 3375 | void exit_sie(struct kvm_vcpu *vcpu) |
---|
2796 | 3376 | { |
---|
2797 | 3377 | kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT); |
---|
| 3378 | + kvm_s390_vsie_kick(vcpu); |
---|
2798 | 3379 | while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE) |
---|
2799 | 3380 | cpu_relax(); |
---|
2800 | 3381 | } |
---|
.. | .. |
---|
2828 | 3409 | kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu); |
---|
2829 | 3410 | } |
---|
2830 | 3411 | } |
---|
| 3412 | +} |
---|
| 3413 | + |
---|
| 3414 | +bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) |
---|
| 3415 | +{ |
---|
| 3416 | + /* do not poll with more than halt_poll_max_steal percent of steal time */ |
---|
| 3417 | + if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >= |
---|
| 3418 | + halt_poll_max_steal) { |
---|
| 3419 | + vcpu->stat.halt_no_poll_steal++; |
---|
| 3420 | + return true; |
---|
| 3421 | + } |
---|
| 3422 | + return false; |
---|
2831 | 3423 | } |
---|
2832 | 3424 | |
---|
2833 | 3425 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
---|
.. | .. |
---|
2939 | 3531 | return r; |
---|
2940 | 3532 | } |
---|
2941 | 3533 | |
---|
2942 | | -static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) |
---|
| 3534 | +static void kvm_arch_vcpu_ioctl_normal_reset(struct kvm_vcpu *vcpu) |
---|
2943 | 3535 | { |
---|
2944 | | - kvm_s390_vcpu_initial_reset(vcpu); |
---|
2945 | | - return 0; |
---|
| 3536 | + vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_RI; |
---|
| 3537 | + vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; |
---|
| 3538 | + memset(vcpu->run->s.regs.riccb, 0, sizeof(vcpu->run->s.regs.riccb)); |
---|
| 3539 | + |
---|
| 3540 | + kvm_clear_async_pf_completion_queue(vcpu); |
---|
| 3541 | + if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) |
---|
| 3542 | + kvm_s390_vcpu_stop(vcpu); |
---|
| 3543 | + kvm_s390_clear_local_irqs(vcpu); |
---|
| 3544 | +} |
---|
| 3545 | + |
---|
| 3546 | +static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) |
---|
| 3547 | +{ |
---|
| 3548 | + /* Initial reset is a superset of the normal reset */ |
---|
| 3549 | + kvm_arch_vcpu_ioctl_normal_reset(vcpu); |
---|
| 3550 | + |
---|
| 3551 | + /* |
---|
| 3552 | + * This equals initial cpu reset in pop, but we don't switch to ESA. |
---|
| 3553 | + * We do not only reset the internal data, but also ... |
---|
| 3554 | + */ |
---|
| 3555 | + vcpu->arch.sie_block->gpsw.mask = 0; |
---|
| 3556 | + vcpu->arch.sie_block->gpsw.addr = 0; |
---|
| 3557 | + kvm_s390_set_prefix(vcpu, 0); |
---|
| 3558 | + kvm_s390_set_cpu_timer(vcpu, 0); |
---|
| 3559 | + vcpu->arch.sie_block->ckc = 0; |
---|
| 3560 | + memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr)); |
---|
| 3561 | + vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK; |
---|
| 3562 | + vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK; |
---|
| 3563 | + |
---|
| 3564 | + /* ... the data in sync regs */ |
---|
| 3565 | + memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs)); |
---|
| 3566 | + vcpu->run->s.regs.ckc = 0; |
---|
| 3567 | + vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK; |
---|
| 3568 | + vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK; |
---|
| 3569 | + vcpu->run->psw_addr = 0; |
---|
| 3570 | + vcpu->run->psw_mask = 0; |
---|
| 3571 | + vcpu->run->s.regs.todpr = 0; |
---|
| 3572 | + vcpu->run->s.regs.cputm = 0; |
---|
| 3573 | + vcpu->run->s.regs.ckc = 0; |
---|
| 3574 | + vcpu->run->s.regs.pp = 0; |
---|
| 3575 | + vcpu->run->s.regs.gbea = 1; |
---|
| 3576 | + vcpu->run->s.regs.fpc = 0; |
---|
| 3577 | + /* |
---|
| 3578 | + * Do not reset these registers in the protected case, as some of |
---|
| 3579 | + * them are overlayed and they are not accessible in this case |
---|
| 3580 | + * anyway. |
---|
| 3581 | + */ |
---|
| 3582 | + if (!kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 3583 | + vcpu->arch.sie_block->gbea = 1; |
---|
| 3584 | + vcpu->arch.sie_block->pp = 0; |
---|
| 3585 | + vcpu->arch.sie_block->fpf &= ~FPF_BPBC; |
---|
| 3586 | + vcpu->arch.sie_block->todpr = 0; |
---|
| 3587 | + } |
---|
| 3588 | +} |
---|
| 3589 | + |
---|
| 3590 | +static void kvm_arch_vcpu_ioctl_clear_reset(struct kvm_vcpu *vcpu) |
---|
| 3591 | +{ |
---|
| 3592 | + struct kvm_sync_regs *regs = &vcpu->run->s.regs; |
---|
| 3593 | + |
---|
| 3594 | + /* Clear reset is a superset of the initial reset */ |
---|
| 3595 | + kvm_arch_vcpu_ioctl_initial_reset(vcpu); |
---|
| 3596 | + |
---|
| 3597 | + memset(®s->gprs, 0, sizeof(regs->gprs)); |
---|
| 3598 | + memset(®s->vrs, 0, sizeof(regs->vrs)); |
---|
| 3599 | + memset(®s->acrs, 0, sizeof(regs->acrs)); |
---|
| 3600 | + memset(®s->gscb, 0, sizeof(regs->gscb)); |
---|
| 3601 | + |
---|
| 3602 | + regs->etoken = 0; |
---|
| 3603 | + regs->etoken_extension = 0; |
---|
2946 | 3604 | } |
---|
2947 | 3605 | |
---|
2948 | 3606 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
---|
.. | .. |
---|
3116 | 3774 | |
---|
3117 | 3775 | switch (mp_state->mp_state) { |
---|
3118 | 3776 | case KVM_MP_STATE_STOPPED: |
---|
3119 | | - kvm_s390_vcpu_stop(vcpu); |
---|
| 3777 | + rc = kvm_s390_vcpu_stop(vcpu); |
---|
3120 | 3778 | break; |
---|
3121 | 3779 | case KVM_MP_STATE_OPERATING: |
---|
3122 | | - kvm_s390_vcpu_start(vcpu); |
---|
| 3780 | + rc = kvm_s390_vcpu_start(vcpu); |
---|
3123 | 3781 | break; |
---|
3124 | 3782 | case KVM_MP_STATE_LOAD: |
---|
| 3783 | + if (!kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 3784 | + rc = -ENXIO; |
---|
| 3785 | + break; |
---|
| 3786 | + } |
---|
| 3787 | + rc = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR_LOAD); |
---|
| 3788 | + break; |
---|
3125 | 3789 | case KVM_MP_STATE_CHECK_STOP: |
---|
3126 | | - /* fall through - CHECK_STOP and LOAD are not supported yet */ |
---|
| 3790 | + fallthrough; /* CHECK_STOP and LOAD are not supported yet */ |
---|
3127 | 3791 | default: |
---|
3128 | 3792 | rc = -ENXIO; |
---|
3129 | 3793 | } |
---|
.. | .. |
---|
3211 | 3875 | |
---|
3212 | 3876 | /* nothing to do, just clear the request */ |
---|
3213 | 3877 | kvm_clear_request(KVM_REQ_UNHALT, vcpu); |
---|
| 3878 | + /* we left the vsie handler, nothing to do, just clear the request */ |
---|
| 3879 | + kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu); |
---|
3214 | 3880 | |
---|
3215 | 3881 | return 0; |
---|
3216 | 3882 | } |
---|
3217 | 3883 | |
---|
3218 | | -void kvm_s390_set_tod_clock(struct kvm *kvm, |
---|
3219 | | - const struct kvm_s390_vm_tod_clock *gtod) |
---|
| 3884 | +static void __kvm_s390_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) |
---|
3220 | 3885 | { |
---|
3221 | 3886 | struct kvm_vcpu *vcpu; |
---|
3222 | 3887 | struct kvm_s390_tod_clock_ext htod; |
---|
3223 | 3888 | int i; |
---|
3224 | 3889 | |
---|
3225 | | - mutex_lock(&kvm->lock); |
---|
3226 | 3890 | preempt_disable(); |
---|
3227 | 3891 | |
---|
3228 | 3892 | get_tod_clock_ext((char *)&htod); |
---|
.. | .. |
---|
3243 | 3907 | |
---|
3244 | 3908 | kvm_s390_vcpu_unblock_all(kvm); |
---|
3245 | 3909 | preempt_enable(); |
---|
| 3910 | +} |
---|
| 3911 | + |
---|
| 3912 | +int kvm_s390_try_set_tod_clock(struct kvm *kvm, const struct kvm_s390_vm_tod_clock *gtod) |
---|
| 3913 | +{ |
---|
| 3914 | + if (!mutex_trylock(&kvm->lock)) |
---|
| 3915 | + return 0; |
---|
| 3916 | + __kvm_s390_set_tod_clock(kvm, gtod); |
---|
3246 | 3917 | mutex_unlock(&kvm->lock); |
---|
| 3918 | + return 1; |
---|
3247 | 3919 | } |
---|
3248 | 3920 | |
---|
3249 | 3921 | /** |
---|
.. | .. |
---|
3279 | 3951 | } |
---|
3280 | 3952 | } |
---|
3281 | 3953 | |
---|
3282 | | -void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, |
---|
| 3954 | +bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, |
---|
3283 | 3955 | struct kvm_async_pf *work) |
---|
3284 | 3956 | { |
---|
3285 | 3957 | trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token); |
---|
3286 | 3958 | __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token); |
---|
| 3959 | + |
---|
| 3960 | + return true; |
---|
3287 | 3961 | } |
---|
3288 | 3962 | |
---|
3289 | 3963 | void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, |
---|
.. | .. |
---|
3299 | 3973 | /* s390 will always inject the page directly */ |
---|
3300 | 3974 | } |
---|
3301 | 3975 | |
---|
3302 | | -bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) |
---|
| 3976 | +bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) |
---|
3303 | 3977 | { |
---|
3304 | 3978 | /* |
---|
3305 | 3979 | * s390 will always inject the page directly, |
---|
.. | .. |
---|
3308 | 3982 | return true; |
---|
3309 | 3983 | } |
---|
3310 | 3984 | |
---|
3311 | | -static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) |
---|
| 3985 | +static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu) |
---|
3312 | 3986 | { |
---|
3313 | 3987 | hva_t hva; |
---|
3314 | 3988 | struct kvm_arch_async_pf arch; |
---|
3315 | | - int rc; |
---|
3316 | 3989 | |
---|
3317 | 3990 | if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) |
---|
3318 | | - return 0; |
---|
| 3991 | + return false; |
---|
3319 | 3992 | if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) != |
---|
3320 | 3993 | vcpu->arch.pfault_compare) |
---|
3321 | | - return 0; |
---|
| 3994 | + return false; |
---|
3322 | 3995 | if (psw_extint_disabled(vcpu)) |
---|
3323 | | - return 0; |
---|
| 3996 | + return false; |
---|
3324 | 3997 | if (kvm_s390_vcpu_has_irq(vcpu, 0)) |
---|
3325 | | - return 0; |
---|
| 3998 | + return false; |
---|
3326 | 3999 | if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK)) |
---|
3327 | | - return 0; |
---|
| 4000 | + return false; |
---|
3328 | 4001 | if (!vcpu->arch.gmap->pfault_enabled) |
---|
3329 | | - return 0; |
---|
| 4002 | + return false; |
---|
3330 | 4003 | |
---|
3331 | 4004 | hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr)); |
---|
3332 | 4005 | hva += current->thread.gmap_addr & ~PAGE_MASK; |
---|
3333 | 4006 | if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8)) |
---|
3334 | | - return 0; |
---|
| 4007 | + return false; |
---|
3335 | 4008 | |
---|
3336 | | - rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); |
---|
3337 | | - return rc; |
---|
| 4009 | + return kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch); |
---|
3338 | 4010 | } |
---|
3339 | 4011 | |
---|
3340 | 4012 | static int vcpu_pre_run(struct kvm_vcpu *vcpu) |
---|
.. | .. |
---|
3354 | 4026 | if (need_resched()) |
---|
3355 | 4027 | schedule(); |
---|
3356 | 4028 | |
---|
3357 | | - if (test_cpu_flag(CIF_MCCK_PENDING)) |
---|
3358 | | - s390_handle_mcck(); |
---|
3359 | | - |
---|
3360 | 4029 | if (!kvm_is_ucontrol(vcpu->kvm)) { |
---|
3361 | 4030 | rc = kvm_s390_deliver_pending_interrupts(vcpu); |
---|
3362 | 4031 | if (rc) |
---|
.. | .. |
---|
3371 | 4040 | kvm_s390_backup_guest_per_regs(vcpu); |
---|
3372 | 4041 | kvm_s390_patch_guest_per_regs(vcpu); |
---|
3373 | 4042 | } |
---|
| 4043 | + |
---|
| 4044 | + clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask); |
---|
3374 | 4045 | |
---|
3375 | 4046 | vcpu->arch.sie_block->icptcode = 0; |
---|
3376 | 4047 | cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags); |
---|
.. | .. |
---|
3469 | 4140 | return vcpu_post_run_fault_in_sie(vcpu); |
---|
3470 | 4141 | } |
---|
3471 | 4142 | |
---|
| 4143 | +#define PSW_INT_MASK (PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_MCHECK) |
---|
3472 | 4144 | static int __vcpu_run(struct kvm_vcpu *vcpu) |
---|
3473 | 4145 | { |
---|
3474 | 4146 | int rc, exit_reason; |
---|
| 4147 | + struct sie_page *sie_page = (struct sie_page *)vcpu->arch.sie_block; |
---|
3475 | 4148 | |
---|
3476 | 4149 | /* |
---|
3477 | 4150 | * We try to hold kvm->srcu during most of vcpu_run (except when run- |
---|
.. | .. |
---|
3493 | 4166 | guest_enter_irqoff(); |
---|
3494 | 4167 | __disable_cpu_timer_accounting(vcpu); |
---|
3495 | 4168 | local_irq_enable(); |
---|
| 4169 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 4170 | + memcpy(sie_page->pv_grregs, |
---|
| 4171 | + vcpu->run->s.regs.gprs, |
---|
| 4172 | + sizeof(sie_page->pv_grregs)); |
---|
| 4173 | + } |
---|
3496 | 4174 | exit_reason = sie64a(vcpu->arch.sie_block, |
---|
3497 | 4175 | vcpu->run->s.regs.gprs); |
---|
| 4176 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 4177 | + memcpy(vcpu->run->s.regs.gprs, |
---|
| 4178 | + sie_page->pv_grregs, |
---|
| 4179 | + sizeof(sie_page->pv_grregs)); |
---|
| 4180 | + /* |
---|
| 4181 | + * We're not allowed to inject interrupts on intercepts |
---|
| 4182 | + * that leave the guest state in an "in-between" state |
---|
| 4183 | + * where the next SIE entry will do a continuation. |
---|
| 4184 | + * Fence interrupts in our "internal" PSW. |
---|
| 4185 | + */ |
---|
| 4186 | + if (vcpu->arch.sie_block->icptcode == ICPT_PV_INSTR || |
---|
| 4187 | + vcpu->arch.sie_block->icptcode == ICPT_PV_PREF) { |
---|
| 4188 | + vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; |
---|
| 4189 | + } |
---|
| 4190 | + } |
---|
3498 | 4191 | local_irq_disable(); |
---|
3499 | 4192 | __enable_cpu_timer_accounting(vcpu); |
---|
3500 | 4193 | guest_exit_irqoff(); |
---|
.. | .. |
---|
3508 | 4201 | return rc; |
---|
3509 | 4202 | } |
---|
3510 | 4203 | |
---|
3511 | | -static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
---|
| 4204 | +static void sync_regs_fmt2(struct kvm_vcpu *vcpu) |
---|
3512 | 4205 | { |
---|
| 4206 | + struct kvm_run *kvm_run = vcpu->run; |
---|
3513 | 4207 | struct runtime_instr_cb *riccb; |
---|
3514 | 4208 | struct gs_cb *gscb; |
---|
3515 | 4209 | |
---|
.. | .. |
---|
3517 | 4211 | gscb = (struct gs_cb *) &kvm_run->s.regs.gscb; |
---|
3518 | 4212 | vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; |
---|
3519 | 4213 | vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; |
---|
3520 | | - if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) |
---|
3521 | | - kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); |
---|
3522 | | - if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { |
---|
3523 | | - memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); |
---|
3524 | | - /* some control register changes require a tlb flush */ |
---|
3525 | | - kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
---|
3526 | | - } |
---|
3527 | 4214 | if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { |
---|
3528 | | - kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm); |
---|
3529 | | - vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; |
---|
3530 | 4215 | vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr; |
---|
3531 | 4216 | vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; |
---|
3532 | 4217 | vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea; |
---|
.. | .. |
---|
3537 | 4222 | vcpu->arch.pfault_compare = kvm_run->s.regs.pfc; |
---|
3538 | 4223 | if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID) |
---|
3539 | 4224 | kvm_clear_async_pf_completion_queue(vcpu); |
---|
| 4225 | + } |
---|
| 4226 | + if (kvm_run->kvm_dirty_regs & KVM_SYNC_DIAG318) { |
---|
| 4227 | + vcpu->arch.diag318_info.val = kvm_run->s.regs.diag318; |
---|
| 4228 | + vcpu->arch.sie_block->cpnc = vcpu->arch.diag318_info.cpnc; |
---|
3540 | 4229 | } |
---|
3541 | 4230 | /* |
---|
3542 | 4231 | * If userspace sets the riccb (e.g. after migration) to a valid state, |
---|
.. | .. |
---|
3567 | 4256 | vcpu->arch.sie_block->fpf &= ~FPF_BPBC; |
---|
3568 | 4257 | vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0; |
---|
3569 | 4258 | } |
---|
3570 | | - save_access_regs(vcpu->arch.host_acrs); |
---|
3571 | | - restore_access_regs(vcpu->run->s.regs.acrs); |
---|
3572 | | - /* save host (userspace) fprs/vrs */ |
---|
3573 | | - save_fpu_regs(); |
---|
3574 | | - vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; |
---|
3575 | | - vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; |
---|
3576 | | - if (MACHINE_HAS_VX) |
---|
3577 | | - current->thread.fpu.regs = vcpu->run->s.regs.vrs; |
---|
3578 | | - else |
---|
3579 | | - current->thread.fpu.regs = vcpu->run->s.regs.fprs; |
---|
3580 | | - current->thread.fpu.fpc = vcpu->run->s.regs.fpc; |
---|
3581 | | - if (test_fp_ctl(current->thread.fpu.fpc)) |
---|
3582 | | - /* User space provided an invalid FPC, let's clear it */ |
---|
3583 | | - current->thread.fpu.fpc = 0; |
---|
3584 | 4259 | if (MACHINE_HAS_GS) { |
---|
3585 | 4260 | preempt_disable(); |
---|
3586 | 4261 | __ctl_set_bit(2, 4); |
---|
.. | .. |
---|
3596 | 4271 | preempt_enable(); |
---|
3597 | 4272 | } |
---|
3598 | 4273 | /* SIE will load etoken directly from SDNX and therefore kvm_run */ |
---|
| 4274 | +} |
---|
| 4275 | + |
---|
| 4276 | +static void sync_regs(struct kvm_vcpu *vcpu) |
---|
| 4277 | +{ |
---|
| 4278 | + struct kvm_run *kvm_run = vcpu->run; |
---|
| 4279 | + |
---|
| 4280 | + if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) |
---|
| 4281 | + kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); |
---|
| 4282 | + if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { |
---|
| 4283 | + memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128); |
---|
| 4284 | + /* some control register changes require a tlb flush */ |
---|
| 4285 | + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
---|
| 4286 | + } |
---|
| 4287 | + if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) { |
---|
| 4288 | + kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm); |
---|
| 4289 | + vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc; |
---|
| 4290 | + } |
---|
| 4291 | + save_access_regs(vcpu->arch.host_acrs); |
---|
| 4292 | + restore_access_regs(vcpu->run->s.regs.acrs); |
---|
| 4293 | + /* save host (userspace) fprs/vrs */ |
---|
| 4294 | + save_fpu_regs(); |
---|
| 4295 | + vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; |
---|
| 4296 | + vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; |
---|
| 4297 | + if (MACHINE_HAS_VX) |
---|
| 4298 | + current->thread.fpu.regs = vcpu->run->s.regs.vrs; |
---|
| 4299 | + else |
---|
| 4300 | + current->thread.fpu.regs = vcpu->run->s.regs.fprs; |
---|
| 4301 | + current->thread.fpu.fpc = vcpu->run->s.regs.fpc; |
---|
| 4302 | + if (test_fp_ctl(current->thread.fpu.fpc)) |
---|
| 4303 | + /* User space provided an invalid FPC, let's clear it */ |
---|
| 4304 | + current->thread.fpu.fpc = 0; |
---|
| 4305 | + |
---|
| 4306 | + /* Sync fmt2 only data */ |
---|
| 4307 | + if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) { |
---|
| 4308 | + sync_regs_fmt2(vcpu); |
---|
| 4309 | + } else { |
---|
| 4310 | + /* |
---|
| 4311 | + * In several places we have to modify our internal view to |
---|
| 4312 | + * not do things that are disallowed by the ultravisor. For |
---|
| 4313 | + * example we must not inject interrupts after specific exits |
---|
| 4314 | + * (e.g. 112 prefix page not secure). We do this by turning |
---|
| 4315 | + * off the machine check, external and I/O interrupt bits |
---|
| 4316 | + * of our PSW copy. To avoid getting validity intercepts, we |
---|
| 4317 | + * do only accept the condition code from userspace. |
---|
| 4318 | + */ |
---|
| 4319 | + vcpu->arch.sie_block->gpsw.mask &= ~PSW_MASK_CC; |
---|
| 4320 | + vcpu->arch.sie_block->gpsw.mask |= kvm_run->psw_mask & |
---|
| 4321 | + PSW_MASK_CC; |
---|
| 4322 | + } |
---|
3599 | 4323 | |
---|
3600 | 4324 | kvm_run->kvm_dirty_regs = 0; |
---|
3601 | 4325 | } |
---|
3602 | 4326 | |
---|
3603 | | -static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
---|
| 4327 | +static void store_regs_fmt2(struct kvm_vcpu *vcpu) |
---|
3604 | 4328 | { |
---|
3605 | | - kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; |
---|
3606 | | - kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; |
---|
3607 | | - kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); |
---|
3608 | | - memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); |
---|
3609 | | - kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu); |
---|
3610 | | - kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; |
---|
| 4329 | + struct kvm_run *kvm_run = vcpu->run; |
---|
| 4330 | + |
---|
3611 | 4331 | kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; |
---|
3612 | 4332 | kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; |
---|
3613 | 4333 | kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; |
---|
3614 | | - kvm_run->s.regs.pft = vcpu->arch.pfault_token; |
---|
3615 | | - kvm_run->s.regs.pfs = vcpu->arch.pfault_select; |
---|
3616 | | - kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; |
---|
3617 | 4334 | kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC; |
---|
3618 | | - save_access_regs(vcpu->run->s.regs.acrs); |
---|
3619 | | - restore_access_regs(vcpu->arch.host_acrs); |
---|
3620 | | - /* Save guest register state */ |
---|
3621 | | - save_fpu_regs(); |
---|
3622 | | - vcpu->run->s.regs.fpc = current->thread.fpu.fpc; |
---|
3623 | | - /* Restore will be done lazily at return */ |
---|
3624 | | - current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; |
---|
3625 | | - current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; |
---|
| 4335 | + kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val; |
---|
3626 | 4336 | if (MACHINE_HAS_GS) { |
---|
3627 | 4337 | preempt_disable(); |
---|
3628 | 4338 | __ctl_set_bit(2, 4); |
---|
.. | .. |
---|
3638 | 4348 | /* SIE will save etoken directly into SDNX and therefore kvm_run */ |
---|
3639 | 4349 | } |
---|
3640 | 4350 | |
---|
3641 | | -int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
---|
| 4351 | +static void store_regs(struct kvm_vcpu *vcpu) |
---|
3642 | 4352 | { |
---|
| 4353 | + struct kvm_run *kvm_run = vcpu->run; |
---|
| 4354 | + |
---|
| 4355 | + kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; |
---|
| 4356 | + kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; |
---|
| 4357 | + kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); |
---|
| 4358 | + memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128); |
---|
| 4359 | + kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu); |
---|
| 4360 | + kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc; |
---|
| 4361 | + kvm_run->s.regs.pft = vcpu->arch.pfault_token; |
---|
| 4362 | + kvm_run->s.regs.pfs = vcpu->arch.pfault_select; |
---|
| 4363 | + kvm_run->s.regs.pfc = vcpu->arch.pfault_compare; |
---|
| 4364 | + save_access_regs(vcpu->run->s.regs.acrs); |
---|
| 4365 | + restore_access_regs(vcpu->arch.host_acrs); |
---|
| 4366 | + /* Save guest register state */ |
---|
| 4367 | + save_fpu_regs(); |
---|
| 4368 | + vcpu->run->s.regs.fpc = current->thread.fpu.fpc; |
---|
| 4369 | + /* Restore will be done lazily at return */ |
---|
| 4370 | + current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; |
---|
| 4371 | + current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; |
---|
| 4372 | + if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) |
---|
| 4373 | + store_regs_fmt2(vcpu); |
---|
| 4374 | +} |
---|
| 4375 | + |
---|
| 4376 | +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) |
---|
| 4377 | +{ |
---|
| 4378 | + struct kvm_run *kvm_run = vcpu->run; |
---|
3643 | 4379 | int rc; |
---|
3644 | 4380 | |
---|
3645 | 4381 | if (kvm_run->immediate_exit) |
---|
3646 | 4382 | return -EINTR; |
---|
| 4383 | + |
---|
| 4384 | + if (kvm_run->kvm_valid_regs & ~KVM_SYNC_S390_VALID_FIELDS || |
---|
| 4385 | + kvm_run->kvm_dirty_regs & ~KVM_SYNC_S390_VALID_FIELDS) |
---|
| 4386 | + return -EINVAL; |
---|
3647 | 4387 | |
---|
3648 | 4388 | vcpu_load(vcpu); |
---|
3649 | 4389 | |
---|
.. | .. |
---|
3655 | 4395 | |
---|
3656 | 4396 | kvm_sigset_activate(vcpu); |
---|
3657 | 4397 | |
---|
| 4398 | + /* |
---|
| 4399 | + * no need to check the return value of vcpu_start as it can only have |
---|
| 4400 | + * an error for protvirt, but protvirt means user cpu state |
---|
| 4401 | + */ |
---|
3658 | 4402 | if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { |
---|
3659 | 4403 | kvm_s390_vcpu_start(vcpu); |
---|
3660 | 4404 | } else if (is_vcpu_stopped(vcpu)) { |
---|
.. | .. |
---|
3664 | 4408 | goto out; |
---|
3665 | 4409 | } |
---|
3666 | 4410 | |
---|
3667 | | - sync_regs(vcpu, kvm_run); |
---|
| 4411 | + sync_regs(vcpu); |
---|
3668 | 4412 | enable_cpu_timer_accounting(vcpu); |
---|
3669 | 4413 | |
---|
3670 | 4414 | might_fault(); |
---|
.. | .. |
---|
3686 | 4430 | } |
---|
3687 | 4431 | |
---|
3688 | 4432 | disable_cpu_timer_accounting(vcpu); |
---|
3689 | | - store_regs(vcpu, kvm_run); |
---|
| 4433 | + store_regs(vcpu); |
---|
3690 | 4434 | |
---|
3691 | 4435 | kvm_sigset_deactivate(vcpu); |
---|
3692 | 4436 | |
---|
.. | .. |
---|
3792 | 4536 | kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu); |
---|
3793 | 4537 | } |
---|
3794 | 4538 | |
---|
3795 | | -void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) |
---|
| 4539 | +int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) |
---|
3796 | 4540 | { |
---|
3797 | | - int i, online_vcpus, started_vcpus = 0; |
---|
| 4541 | + int i, online_vcpus, r = 0, started_vcpus = 0; |
---|
3798 | 4542 | |
---|
3799 | 4543 | if (!is_vcpu_stopped(vcpu)) |
---|
3800 | | - return; |
---|
| 4544 | + return 0; |
---|
3801 | 4545 | |
---|
3802 | 4546 | trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); |
---|
3803 | 4547 | /* Only one cpu at a time may enter/leave the STOPPED state. */ |
---|
3804 | 4548 | spin_lock(&vcpu->kvm->arch.start_stop_lock); |
---|
3805 | 4549 | online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); |
---|
| 4550 | + |
---|
| 4551 | + /* Let's tell the UV that we want to change into the operating state */ |
---|
| 4552 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 4553 | + r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_OPR); |
---|
| 4554 | + if (r) { |
---|
| 4555 | + spin_unlock(&vcpu->kvm->arch.start_stop_lock); |
---|
| 4556 | + return r; |
---|
| 4557 | + } |
---|
| 4558 | + } |
---|
3806 | 4559 | |
---|
3807 | 4560 | for (i = 0; i < online_vcpus; i++) { |
---|
3808 | 4561 | if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) |
---|
.. | .. |
---|
3823 | 4576 | |
---|
3824 | 4577 | kvm_s390_clear_cpuflags(vcpu, CPUSTAT_STOPPED); |
---|
3825 | 4578 | /* |
---|
| 4579 | + * The real PSW might have changed due to a RESTART interpreted by the |
---|
| 4580 | + * ultravisor. We block all interrupts and let the next sie exit |
---|
| 4581 | + * refresh our view. |
---|
| 4582 | + */ |
---|
| 4583 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) |
---|
| 4584 | + vcpu->arch.sie_block->gpsw.mask &= ~PSW_INT_MASK; |
---|
| 4585 | + /* |
---|
3826 | 4586 | * Another VCPU might have used IBS while we were offline. |
---|
3827 | 4587 | * Let's play safe and flush the VCPU at startup. |
---|
3828 | 4588 | */ |
---|
3829 | 4589 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); |
---|
3830 | 4590 | spin_unlock(&vcpu->kvm->arch.start_stop_lock); |
---|
3831 | | - return; |
---|
| 4591 | + return 0; |
---|
3832 | 4592 | } |
---|
3833 | 4593 | |
---|
3834 | | -void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) |
---|
| 4594 | +int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) |
---|
3835 | 4595 | { |
---|
3836 | | - int i, online_vcpus, started_vcpus = 0; |
---|
| 4596 | + int i, online_vcpus, r = 0, started_vcpus = 0; |
---|
3837 | 4597 | struct kvm_vcpu *started_vcpu = NULL; |
---|
3838 | 4598 | |
---|
3839 | 4599 | if (is_vcpu_stopped(vcpu)) |
---|
3840 | | - return; |
---|
| 4600 | + return 0; |
---|
3841 | 4601 | |
---|
3842 | 4602 | trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); |
---|
3843 | 4603 | /* Only one cpu at a time may enter/leave the STOPPED state. */ |
---|
3844 | 4604 | spin_lock(&vcpu->kvm->arch.start_stop_lock); |
---|
3845 | 4605 | online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); |
---|
| 4606 | + |
---|
| 4607 | + /* Let's tell the UV that we want to change into the stopped state */ |
---|
| 4608 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 4609 | + r = kvm_s390_pv_set_cpu_state(vcpu, PV_CPU_STATE_STP); |
---|
| 4610 | + if (r) { |
---|
| 4611 | + spin_unlock(&vcpu->kvm->arch.start_stop_lock); |
---|
| 4612 | + return r; |
---|
| 4613 | + } |
---|
| 4614 | + } |
---|
3846 | 4615 | |
---|
3847 | 4616 | /* |
---|
3848 | 4617 | * Set the VCPU to STOPPED and THEN clear the interrupt flag, |
---|
.. | .. |
---|
3871 | 4640 | } |
---|
3872 | 4641 | |
---|
3873 | 4642 | spin_unlock(&vcpu->kvm->arch.start_stop_lock); |
---|
3874 | | - return; |
---|
| 4643 | + return 0; |
---|
3875 | 4644 | } |
---|
3876 | 4645 | |
---|
3877 | 4646 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
---|
.. | .. |
---|
3898 | 4667 | return r; |
---|
3899 | 4668 | } |
---|
3900 | 4669 | |
---|
| 4670 | +static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu, |
---|
| 4671 | + struct kvm_s390_mem_op *mop) |
---|
| 4672 | +{ |
---|
| 4673 | + void __user *uaddr = (void __user *)mop->buf; |
---|
| 4674 | + int r = 0; |
---|
| 4675 | + |
---|
| 4676 | + if (mop->flags || !mop->size) |
---|
| 4677 | + return -EINVAL; |
---|
| 4678 | + if (mop->size + mop->sida_offset < mop->size) |
---|
| 4679 | + return -EINVAL; |
---|
| 4680 | + if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block)) |
---|
| 4681 | + return -E2BIG; |
---|
| 4682 | + if (!kvm_s390_pv_cpu_is_protected(vcpu)) |
---|
| 4683 | + return -EINVAL; |
---|
| 4684 | + |
---|
| 4685 | + switch (mop->op) { |
---|
| 4686 | + case KVM_S390_MEMOP_SIDA_READ: |
---|
| 4687 | + if (copy_to_user(uaddr, (void *)(sida_origin(vcpu->arch.sie_block) + |
---|
| 4688 | + mop->sida_offset), mop->size)) |
---|
| 4689 | + r = -EFAULT; |
---|
| 4690 | + |
---|
| 4691 | + break; |
---|
| 4692 | + case KVM_S390_MEMOP_SIDA_WRITE: |
---|
| 4693 | + if (copy_from_user((void *)(sida_origin(vcpu->arch.sie_block) + |
---|
| 4694 | + mop->sida_offset), uaddr, mop->size)) |
---|
| 4695 | + r = -EFAULT; |
---|
| 4696 | + break; |
---|
| 4697 | + } |
---|
| 4698 | + return r; |
---|
| 4699 | +} |
---|
3901 | 4700 | static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu, |
---|
3902 | 4701 | struct kvm_s390_mem_op *mop) |
---|
3903 | 4702 | { |
---|
3904 | 4703 | void __user *uaddr = (void __user *)mop->buf; |
---|
3905 | 4704 | void *tmpbuf = NULL; |
---|
3906 | | - int r, srcu_idx; |
---|
| 4705 | + int r = 0; |
---|
3907 | 4706 | const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION |
---|
3908 | 4707 | | KVM_S390_MEMOP_F_CHECK_ONLY; |
---|
3909 | 4708 | |
---|
.. | .. |
---|
3913 | 4712 | if (mop->size > MEM_OP_MAX_SIZE) |
---|
3914 | 4713 | return -E2BIG; |
---|
3915 | 4714 | |
---|
| 4715 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) |
---|
| 4716 | + return -EINVAL; |
---|
| 4717 | + |
---|
3916 | 4718 | if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { |
---|
3917 | 4719 | tmpbuf = vmalloc(mop->size); |
---|
3918 | 4720 | if (!tmpbuf) |
---|
3919 | 4721 | return -ENOMEM; |
---|
3920 | 4722 | } |
---|
3921 | | - |
---|
3922 | | - srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); |
---|
3923 | 4723 | |
---|
3924 | 4724 | switch (mop->op) { |
---|
3925 | 4725 | case KVM_S390_MEMOP_LOGICAL_READ: |
---|
.. | .. |
---|
3946 | 4746 | } |
---|
3947 | 4747 | r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); |
---|
3948 | 4748 | break; |
---|
3949 | | - default: |
---|
3950 | | - r = -EINVAL; |
---|
3951 | 4749 | } |
---|
3952 | | - |
---|
3953 | | - srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); |
---|
3954 | 4750 | |
---|
3955 | 4751 | if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0) |
---|
3956 | 4752 | kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); |
---|
3957 | 4753 | |
---|
3958 | 4754 | vfree(tmpbuf); |
---|
| 4755 | + return r; |
---|
| 4756 | +} |
---|
| 4757 | + |
---|
| 4758 | +static long kvm_s390_guest_memsida_op(struct kvm_vcpu *vcpu, |
---|
| 4759 | + struct kvm_s390_mem_op *mop) |
---|
| 4760 | +{ |
---|
| 4761 | + int r, srcu_idx; |
---|
| 4762 | + |
---|
| 4763 | + srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); |
---|
| 4764 | + |
---|
| 4765 | + switch (mop->op) { |
---|
| 4766 | + case KVM_S390_MEMOP_LOGICAL_READ: |
---|
| 4767 | + case KVM_S390_MEMOP_LOGICAL_WRITE: |
---|
| 4768 | + r = kvm_s390_guest_mem_op(vcpu, mop); |
---|
| 4769 | + break; |
---|
| 4770 | + case KVM_S390_MEMOP_SIDA_READ: |
---|
| 4771 | + case KVM_S390_MEMOP_SIDA_WRITE: |
---|
| 4772 | + /* we are locked against sida going away by the vcpu->mutex */ |
---|
| 4773 | + r = kvm_s390_guest_sida_op(vcpu, mop); |
---|
| 4774 | + break; |
---|
| 4775 | + default: |
---|
| 4776 | + r = -EINVAL; |
---|
| 4777 | + } |
---|
| 4778 | + |
---|
| 4779 | + srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); |
---|
3959 | 4780 | return r; |
---|
3960 | 4781 | } |
---|
3961 | 4782 | |
---|
.. | .. |
---|
3994 | 4815 | void __user *argp = (void __user *)arg; |
---|
3995 | 4816 | int idx; |
---|
3996 | 4817 | long r; |
---|
| 4818 | + u16 rc, rrc; |
---|
3997 | 4819 | |
---|
3998 | 4820 | vcpu_load(vcpu); |
---|
3999 | 4821 | |
---|
.. | .. |
---|
4012 | 4834 | r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw); |
---|
4013 | 4835 | break; |
---|
4014 | 4836 | } |
---|
| 4837 | + case KVM_S390_CLEAR_RESET: |
---|
| 4838 | + r = 0; |
---|
| 4839 | + kvm_arch_vcpu_ioctl_clear_reset(vcpu); |
---|
| 4840 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 4841 | + r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), |
---|
| 4842 | + UVC_CMD_CPU_RESET_CLEAR, &rc, &rrc); |
---|
| 4843 | + VCPU_EVENT(vcpu, 3, "PROTVIRT RESET CLEAR VCPU: rc %x rrc %x", |
---|
| 4844 | + rc, rrc); |
---|
| 4845 | + } |
---|
| 4846 | + break; |
---|
4015 | 4847 | case KVM_S390_INITIAL_RESET: |
---|
4016 | | - r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); |
---|
| 4848 | + r = 0; |
---|
| 4849 | + kvm_arch_vcpu_ioctl_initial_reset(vcpu); |
---|
| 4850 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 4851 | + r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), |
---|
| 4852 | + UVC_CMD_CPU_RESET_INITIAL, |
---|
| 4853 | + &rc, &rrc); |
---|
| 4854 | + VCPU_EVENT(vcpu, 3, "PROTVIRT RESET INITIAL VCPU: rc %x rrc %x", |
---|
| 4855 | + rc, rrc); |
---|
| 4856 | + } |
---|
| 4857 | + break; |
---|
| 4858 | + case KVM_S390_NORMAL_RESET: |
---|
| 4859 | + r = 0; |
---|
| 4860 | + kvm_arch_vcpu_ioctl_normal_reset(vcpu); |
---|
| 4861 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) { |
---|
| 4862 | + r = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), |
---|
| 4863 | + UVC_CMD_CPU_RESET, &rc, &rrc); |
---|
| 4864 | + VCPU_EVENT(vcpu, 3, "PROTVIRT RESET NORMAL VCPU: rc %x rrc %x", |
---|
| 4865 | + rc, rrc); |
---|
| 4866 | + } |
---|
4017 | 4867 | break; |
---|
4018 | 4868 | case KVM_SET_ONE_REG: |
---|
4019 | 4869 | case KVM_GET_ONE_REG: { |
---|
4020 | 4870 | struct kvm_one_reg reg; |
---|
| 4871 | + r = -EINVAL; |
---|
| 4872 | + if (kvm_s390_pv_cpu_is_protected(vcpu)) |
---|
| 4873 | + break; |
---|
4021 | 4874 | r = -EFAULT; |
---|
4022 | 4875 | if (copy_from_user(®, argp, sizeof(reg))) |
---|
4023 | 4876 | break; |
---|
.. | .. |
---|
4080 | 4933 | struct kvm_s390_mem_op mem_op; |
---|
4081 | 4934 | |
---|
4082 | 4935 | if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0) |
---|
4083 | | - r = kvm_s390_guest_mem_op(vcpu, &mem_op); |
---|
| 4936 | + r = kvm_s390_guest_memsida_op(vcpu, &mem_op); |
---|
4084 | 4937 | else |
---|
4085 | 4938 | r = -EFAULT; |
---|
4086 | 4939 | break; |
---|
.. | .. |
---|
4140 | 4993 | return VM_FAULT_SIGBUS; |
---|
4141 | 4994 | } |
---|
4142 | 4995 | |
---|
4143 | | -int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
---|
4144 | | - unsigned long npages) |
---|
4145 | | -{ |
---|
4146 | | - return 0; |
---|
4147 | | -} |
---|
4148 | | - |
---|
4149 | 4996 | /* Section: memory related */ |
---|
4150 | 4997 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
---|
4151 | 4998 | struct kvm_memory_slot *memslot, |
---|
.. | .. |
---|
4166 | 5013 | if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit) |
---|
4167 | 5014 | return -EINVAL; |
---|
4168 | 5015 | |
---|
| 5016 | + /* When we are protected, we should not change the memory slots */ |
---|
| 5017 | + if (kvm_s390_pv_get_handle(kvm)) |
---|
| 5018 | + return -EINVAL; |
---|
| 5019 | + |
---|
| 5020 | + if (!kvm->arch.migration_mode) |
---|
| 5021 | + return 0; |
---|
| 5022 | + |
---|
| 5023 | + /* |
---|
| 5024 | + * Turn off migration mode when: |
---|
| 5025 | + * - userspace creates a new memslot with dirty logging off, |
---|
| 5026 | + * - userspace modifies an existing memslot (MOVE or FLAGS_ONLY) and |
---|
| 5027 | + * dirty logging is turned off. |
---|
| 5028 | + * Migration mode expects dirty page logging being enabled to store |
---|
| 5029 | + * its dirty bitmap. |
---|
| 5030 | + */ |
---|
| 5031 | + if (change != KVM_MR_DELETE && |
---|
| 5032 | + !(mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) |
---|
| 5033 | + WARN(kvm_s390_vm_stop_migration(kvm), |
---|
| 5034 | + "Failed to stop migration mode"); |
---|
| 5035 | + |
---|
4169 | 5036 | return 0; |
---|
4170 | 5037 | } |
---|
4171 | 5038 | |
---|
4172 | 5039 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
---|
4173 | 5040 | const struct kvm_userspace_memory_region *mem, |
---|
4174 | | - const struct kvm_memory_slot *old, |
---|
| 5041 | + struct kvm_memory_slot *old, |
---|
4175 | 5042 | const struct kvm_memory_slot *new, |
---|
4176 | 5043 | enum kvm_mr_change change) |
---|
4177 | 5044 | { |
---|
.. | .. |
---|
4187 | 5054 | old->npages * PAGE_SIZE); |
---|
4188 | 5055 | if (rc) |
---|
4189 | 5056 | break; |
---|
4190 | | - /* FALLTHROUGH */ |
---|
| 5057 | + fallthrough; |
---|
4191 | 5058 | case KVM_MR_CREATE: |
---|
4192 | 5059 | rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr, |
---|
4193 | 5060 | mem->guest_phys_addr, mem->memory_size); |
---|
.. | .. |
---|
4219 | 5086 | int i; |
---|
4220 | 5087 | |
---|
4221 | 5088 | if (!sclp.has_sief2) { |
---|
4222 | | - pr_info("SIE not available\n"); |
---|
| 5089 | + pr_info("SIE is not available\n"); |
---|
4223 | 5090 | return -ENODEV; |
---|
4224 | 5091 | } |
---|
4225 | 5092 | |
---|
4226 | 5093 | if (nested && hpage) { |
---|
4227 | | - pr_info("nested (vSIE) and hpage (huge page backing) can currently not be activated concurrently"); |
---|
| 5094 | + pr_info("A KVM host that supports nesting cannot back its KVM guests with huge pages\n"); |
---|
4228 | 5095 | return -EINVAL; |
---|
4229 | 5096 | } |
---|
4230 | 5097 | |
---|