hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/arch/x86/include/asm/mshyperv.h
....@@ -3,78 +3,16 @@
33 #define _ASM_X86_MSHYPER_H
44
55 #include <linux/types.h>
6
-#include <linux/atomic.h>
76 #include <linux/nmi.h>
7
+#include <linux/msi.h>
88 #include <asm/io.h>
99 #include <asm/hyperv-tlfs.h>
1010 #include <asm/nospec-branch.h>
11
+#include <asm/paravirt.h>
1112
12
-#define VP_INVAL U32_MAX
13
-
14
-struct ms_hyperv_info {
15
- u32 features;
16
- u32 misc_features;
17
- u32 hints;
18
- u32 nested_features;
19
- u32 max_vp_index;
20
- u32 max_lp_index;
21
-};
22
-
23
-extern struct ms_hyperv_info ms_hyperv;
24
-
25
-/*
26
- * Generate the guest ID.
27
- */
28
-
29
-static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
30
- __u64 d_info2)
31
-{
32
- __u64 guest_id = 0;
33
-
34
- guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
35
- guest_id |= (d_info1 << 48);
36
- guest_id |= (kernel_version << 16);
37
- guest_id |= d_info2;
38
-
39
- return guest_id;
40
-}
41
-
42
-
43
-/* Free the message slot and signal end-of-message if required */
44
-static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
45
-{
46
- /*
47
- * On crash we're reading some other CPU's message page and we need
48
- * to be careful: this other CPU may already had cleared the header
49
- * and the host may already had delivered some other message there.
50
- * In case we blindly write msg->header.message_type we're going
51
- * to lose it. We can still lose a message of the same type but
52
- * we count on the fact that there can only be one
53
- * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
54
- * on crash.
55
- */
56
- if (cmpxchg(&msg->header.message_type, old_msg_type,
57
- HVMSG_NONE) != old_msg_type)
58
- return;
59
-
60
- /*
61
- * Make sure the write to MessageType (ie set to
62
- * HVMSG_NONE) happens before we read the
63
- * MessagePending and EOMing. Otherwise, the EOMing
64
- * will not deliver any more messages since there is
65
- * no empty slot
66
- */
67
- mb();
68
-
69
- if (msg->header.message_flags.msg_pending) {
70
- /*
71
- * This will cause message queue rescan to
72
- * possibly deliver another msg from the
73
- * hypervisor
74
- */
75
- wrmsrl(HV_X64_MSR_EOM, 0);
76
- }
77
-}
13
+typedef int (*hyperv_fill_flush_list_func)(
14
+ struct hv_guest_mapping_flush_list *flush,
15
+ void *data);
7816
7917 #define hv_init_timer(timer, tick) \
8018 wrmsrl(HV_X64_MSR_STIMER0_COUNT + (2*timer), tick)
....@@ -92,43 +30,52 @@
9230
9331 #define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index)
9432
33
+#define hv_signal_eom() wrmsrl(HV_X64_MSR_EOM, 0)
34
+
9535 #define hv_get_synint_state(int_num, val) \
9636 rdmsrl(HV_X64_MSR_SINT0 + int_num, val)
9737 #define hv_set_synint_state(int_num, val) \
9838 wrmsrl(HV_X64_MSR_SINT0 + int_num, val)
39
+#define hv_recommend_using_aeoi() \
40
+ (!(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED))
9941
10042 #define hv_get_crash_ctl(val) \
10143 rdmsrl(HV_X64_MSR_CRASH_CTL, val)
10244
103
-void hyperv_callback_vector(void);
104
-void hyperv_reenlightenment_vector(void);
105
-#ifdef CONFIG_TRACING
106
-#define trace_hyperv_callback_vector hyperv_callback_vector
107
-#endif
108
-void hyperv_vector_handler(struct pt_regs *regs);
109
-void hv_setup_vmbus_irq(void (*handler)(void));
110
-void hv_remove_vmbus_irq(void);
45
+#define hv_get_time_ref_count(val) \
46
+ rdmsrl(HV_X64_MSR_TIME_REF_COUNT, val)
11147
112
-void hv_setup_kexec_handler(void (*handler)(void));
113
-void hv_remove_kexec_handler(void);
114
-void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
115
-void hv_remove_crash_handler(void);
48
+#define hv_get_reference_tsc(val) \
49
+ rdmsrl(HV_X64_MSR_REFERENCE_TSC, val)
50
+#define hv_set_reference_tsc(val) \
51
+ wrmsrl(HV_X64_MSR_REFERENCE_TSC, val)
52
+#define hv_set_clocksource_vdso(val) \
53
+ ((val).vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK)
54
+#define hv_enable_vdso_clocksource() \
55
+ vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
56
+#define hv_get_raw_timer() rdtsc_ordered()
57
+#define hv_get_vector() HYPERVISOR_CALLBACK_VECTOR
11658
11759 /*
118
- * Routines for stimer0 Direct Mode handling.
119
- * On x86/x64, there are no percpu actions to take.
60
+ * Reference to pv_ops must be inline so objtool
61
+ * detection of noinstr violations can work correctly.
12062 */
121
-void hv_stimer0_vector_handler(struct pt_regs *regs);
122
-void hv_stimer0_callback_vector(void);
123
-int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void));
124
-void hv_remove_stimer0_irq(int irq);
63
+static __always_inline void hv_setup_sched_clock(void *sched_clock)
64
+{
65
+#ifdef CONFIG_PARAVIRT
66
+ pv_ops.time.sched_clock = sched_clock;
67
+#endif
68
+}
69
+
70
+void hyperv_vector_handler(struct pt_regs *regs);
12571
12672 static inline void hv_enable_stimer0_percpu_irq(int irq) {}
12773 static inline void hv_disable_stimer0_percpu_irq(int irq) {}
12874
12975
13076 #if IS_ENABLED(CONFIG_HYPERV)
131
-extern struct clocksource *hyperv_cs;
77
+extern int hyperv_init_cpuhp;
78
+
13279 extern void *hv_hypercall_pg;
13380 extern void __percpu **hyperv_pcpu_input_arg;
13481
....@@ -232,7 +179,7 @@
232179 : "cc");
233180 }
234181 #endif
235
- return hv_status;
182
+ return hv_status;
236183 }
237184
238185 /*
....@@ -267,14 +214,6 @@
267214 return status;
268215 }
269216
270
-/*
271
- * Hypervisor's notion of virtual processor ID is different from
272
- * Linux' notion of CPU ID. This information can only be retrieved
273
- * in the context of the calling CPU. Setup a map for easy access
274
- * to this information.
275
- */
276
-extern u32 *hv_vp_index;
277
-extern u32 hv_max_vp_index;
278217 extern struct hv_vp_assist_page **hv_vp_assist_page;
279218
280219 static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
....@@ -285,81 +224,34 @@
285224 return hv_vp_assist_page[cpu];
286225 }
287226
288
-/**
289
- * hv_cpu_number_to_vp_number() - Map CPU to VP.
290
- * @cpu_number: CPU number in Linux terms
291
- *
292
- * This function returns the mapping between the Linux processor
293
- * number and the hypervisor's virtual processor number, useful
294
- * in making hypercalls and such that talk about specific
295
- * processors.
296
- *
297
- * Return: Virtual processor number in Hyper-V terms
298
- */
299
-static inline int hv_cpu_number_to_vp_number(int cpu_number)
300
-{
301
- return hv_vp_index[cpu_number];
302
-}
303
-
304
-static inline int cpumask_to_vpset(struct hv_vpset *vpset,
305
- const struct cpumask *cpus)
306
-{
307
- int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
308
-
309
- /* valid_bank_mask can represent up to 64 banks */
310
- if (hv_max_vp_index / 64 >= 64)
311
- return 0;
312
-
313
- /*
314
- * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex
315
- * structs are not cleared between calls, we risk flushing unneeded
316
- * vCPUs otherwise.
317
- */
318
- for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
319
- vpset->bank_contents[vcpu_bank] = 0;
320
-
321
- /*
322
- * Some banks may end up being empty but this is acceptable.
323
- */
324
- for_each_cpu(cpu, cpus) {
325
- vcpu = hv_cpu_number_to_vp_number(cpu);
326
- if (vcpu == VP_INVAL)
327
- return -1;
328
- vcpu_bank = vcpu / 64;
329
- vcpu_offset = vcpu % 64;
330
- __set_bit(vcpu_offset, (unsigned long *)
331
- &vpset->bank_contents[vcpu_bank]);
332
- if (vcpu_bank >= nr_bank)
333
- nr_bank = vcpu_bank + 1;
334
- }
335
- vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
336
- return nr_bank;
337
-}
338
-
339227 void __init hyperv_init(void);
340228 void hyperv_setup_mmu_ops(void);
341
-void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
342
-void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
343
-bool hv_is_hyperv_initialized(void);
344
-void hyperv_cleanup(void);
345
-
346
-void hyperv_reenlightenment_intr(struct pt_regs *regs);
229
+void *hv_alloc_hyperv_page(void);
230
+void *hv_alloc_hyperv_zeroed_page(void);
231
+void hv_free_hyperv_page(unsigned long addr);
347232 void set_hv_tscchange_cb(void (*cb)(void));
348233 void clear_hv_tscchange_cb(void);
349234 void hyperv_stop_tsc_emulation(void);
350235 int hyperv_flush_guest_mapping(u64 as);
236
+int hyperv_flush_guest_mapping_range(u64 as,
237
+ hyperv_fill_flush_list_func fill_func, void *data);
238
+int hyperv_fill_flush_guest_mapping_list(
239
+ struct hv_guest_mapping_flush_list *flush,
240
+ u64 start_gfn, u64 end_gfn);
351241
352242 #ifdef CONFIG_X86_64
353243 void hv_apic_init(void);
244
+void __init hv_init_spinlocks(void);
245
+bool hv_vcpu_is_preempted(int vcpu);
354246 #else
355247 static inline void hv_apic_init(void) {}
356248 #endif
357249
358250 #else /* CONFIG_HYPERV */
359251 static inline void hyperv_init(void) {}
360
-static inline bool hv_is_hyperv_initialized(void) { return false; }
361
-static inline void hyperv_cleanup(void) {}
362252 static inline void hyperv_setup_mmu_ops(void) {}
253
+static inline void *hv_alloc_hyperv_page(void) { return NULL; }
254
+static inline void hv_free_hyperv_page(unsigned long addr) {}
363255 static inline void set_hv_tscchange_cb(void (*cb)(void)) {}
364256 static inline void clear_hv_tscchange_cb(void) {}
365257 static inline void hyperv_stop_tsc_emulation(void) {};
....@@ -368,75 +260,14 @@
368260 return NULL;
369261 }
370262 static inline int hyperv_flush_guest_mapping(u64 as) { return -1; }
263
+static inline int hyperv_flush_guest_mapping_range(u64 as,
264
+ hyperv_fill_flush_list_func fill_func, void *data)
265
+{
266
+ return -1;
267
+}
371268 #endif /* CONFIG_HYPERV */
372269
373
-#ifdef CONFIG_HYPERV_TSCPAGE
374
-struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
375
-static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
376
- u64 *cur_tsc)
377
-{
378
- u64 scale, offset;
379
- u32 sequence;
380270
381
- /*
382
- * The protocol for reading Hyper-V TSC page is specified in Hypervisor
383
- * Top-Level Functional Specification ver. 3.0 and above. To get the
384
- * reference time we must do the following:
385
- * - READ ReferenceTscSequence
386
- * A special '0' value indicates the time source is unreliable and we
387
- * need to use something else. The currently published specification
388
- * versions (up to 4.0b) contain a mistake and wrongly claim '-1'
389
- * instead of '0' as the special value, see commit c35b82ef0294.
390
- * - ReferenceTime =
391
- * ((RDTSC() * ReferenceTscScale) >> 64) + ReferenceTscOffset
392
- * - READ ReferenceTscSequence again. In case its value has changed
393
- * since our first reading we need to discard ReferenceTime and repeat
394
- * the whole sequence as the hypervisor was updating the page in
395
- * between.
396
- */
397
- do {
398
- sequence = READ_ONCE(tsc_pg->tsc_sequence);
399
- if (!sequence)
400
- return U64_MAX;
401
- /*
402
- * Make sure we read sequence before we read other values from
403
- * TSC page.
404
- */
405
- smp_rmb();
271
+#include <asm-generic/mshyperv.h>
406272
407
- scale = READ_ONCE(tsc_pg->tsc_scale);
408
- offset = READ_ONCE(tsc_pg->tsc_offset);
409
- *cur_tsc = rdtsc_ordered();
410
-
411
- /*
412
- * Make sure we read sequence after we read all other values
413
- * from TSC page.
414
- */
415
- smp_rmb();
416
-
417
- } while (READ_ONCE(tsc_pg->tsc_sequence) != sequence);
418
-
419
- return mul_u64_u64_shr(*cur_tsc, scale, 64) + offset;
420
-}
421
-
422
-static inline u64 hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg)
423
-{
424
- u64 cur_tsc;
425
-
426
- return hv_read_tsc_page_tsc(tsc_pg, &cur_tsc);
427
-}
428
-
429
-#else
430
-static inline struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
431
-{
432
- return NULL;
433
-}
434
-
435
-static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
436
- u64 *cur_tsc)
437
-{
438
- BUG();
439
- return U64_MAX;
440
-}
441
-#endif
442273 #endif