hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/hv/hyperv_vmbus.h
....@@ -1,25 +1,12 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 *
34 * Copyright (c) 2011, Microsoft Corporation.
4
- *
5
- * This program is free software; you can redistribute it and/or modify it
6
- * under the terms and conditions of the GNU General Public License,
7
- * version 2, as published by the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope it will be useful, but WITHOUT
10
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
- * more details.
13
- *
14
- * You should have received a copy of the GNU General Public License along with
15
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16
- * Place - Suite 330, Boston, MA 02111-1307 USA.
175 *
186 * Authors:
197 * Haiyang Zhang <haiyangz@microsoft.com>
208 * Hank Janssen <hjanssen@microsoft.com>
219 * K. Y. Srinivasan <kys@microsoft.com>
22
- *
2310 */
2411
2512 #ifndef _HYPERV_VMBUS_H
....@@ -45,74 +32,6 @@
4532 */
4633 #define HV_UTIL_NEGO_TIMEOUT 55
4734
48
-/* Define synthetic interrupt controller flag constants. */
49
-#define HV_EVENT_FLAGS_COUNT (256 * 8)
50
-#define HV_EVENT_FLAGS_LONG_COUNT (256 / sizeof(unsigned long))
51
-
52
-/*
53
- * Timer configuration register.
54
- */
55
-union hv_timer_config {
56
- u64 as_uint64;
57
- struct {
58
- u64 enable:1;
59
- u64 periodic:1;
60
- u64 lazy:1;
61
- u64 auto_enable:1;
62
- u64 apic_vector:8;
63
- u64 direct_mode:1;
64
- u64 reserved_z0:3;
65
- u64 sintx:4;
66
- u64 reserved_z1:44;
67
- };
68
-};
69
-
70
-
71
-/* Define the synthetic interrupt controller event flags format. */
72
-union hv_synic_event_flags {
73
- unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT];
74
-};
75
-
76
-/* Define SynIC control register. */
77
-union hv_synic_scontrol {
78
- u64 as_uint64;
79
- struct {
80
- u64 enable:1;
81
- u64 reserved:63;
82
- };
83
-};
84
-
85
-/* Define synthetic interrupt source. */
86
-union hv_synic_sint {
87
- u64 as_uint64;
88
- struct {
89
- u64 vector:8;
90
- u64 reserved1:8;
91
- u64 masked:1;
92
- u64 auto_eoi:1;
93
- u64 reserved2:46;
94
- };
95
-};
96
-
97
-/* Define the format of the SIMP register */
98
-union hv_synic_simp {
99
- u64 as_uint64;
100
- struct {
101
- u64 simp_enabled:1;
102
- u64 preserved:11;
103
- u64 base_simp_gpa:52;
104
- };
105
-};
106
-
107
-/* Define the format of the SIEFP register */
108
-union hv_synic_siefp {
109
- u64 as_uint64;
110
- struct {
111
- u64 siefp_enabled:1;
112
- u64 preserved:11;
113
- u64 base_siefp_gpa:52;
114
- };
115
-};
11635
11736 /* Definitions for the monitored notification facility */
11837 union hv_monitor_trigger_group {
....@@ -214,13 +133,6 @@
214133 * basis.
215134 */
216135 struct tasklet_struct msg_dpc;
217
-
218
- /*
219
- * To optimize the mapping of relid to channel, maintain
220
- * per-cpu list of the channels based on their CPU affinity.
221
- */
222
- struct list_head chan_list;
223
- struct clock_event_device *clk_evt;
224136 };
225137
226138 struct hv_context {
....@@ -228,10 +140,6 @@
228140 * So at this point this really can only contain the Hyper-V ID
229141 */
230142 u64 guestid;
231
-
232
- void *tsc_page;
233
-
234
- bool synic_initialized;
235143
236144 struct hv_per_cpu_context __percpu *cpu_context;
237145
....@@ -256,14 +164,15 @@
256164
257165 extern void hv_synic_free(void);
258166
167
+extern void hv_synic_enable_regs(unsigned int cpu);
259168 extern int hv_synic_init(unsigned int cpu);
260169
170
+extern void hv_synic_disable_regs(unsigned int cpu);
261171 extern int hv_synic_cleanup(unsigned int cpu);
262
-
263
-extern void hv_synic_clockevents_cleanup(void);
264172
265173 /* Interface */
266174
175
+void hv_ringbuffer_pre_init(struct vmbus_channel *channel);
267176
268177 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
269178 struct page *pages, u32 pagecnt);
....@@ -278,16 +187,18 @@
278187 u64 *requestid, bool raw);
279188
280189 /*
281
- * Maximum channels is determined by the size of the interrupt page
282
- * which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt
283
- * and the other is receive endpoint interrupt
190
+ * The Maximum number of channels (16348) is determined by the size of the
191
+ * interrupt page, which is HV_HYP_PAGE_SIZE. 1/2 of HV_HYP_PAGE_SIZE is to
192
+ * send endpoint interrupts, and the other is to receive endpoint interrupts.
284193 */
285
-#define MAX_NUM_CHANNELS ((PAGE_SIZE >> 1) << 3) /* 16348 channels */
194
+#define MAX_NUM_CHANNELS ((HV_HYP_PAGE_SIZE >> 1) << 3)
286195
287196 /* The value here must be in multiple of 32 */
288197 /* TODO: Need to make this configurable */
289198 #define MAX_NUM_CHANNELS_SUPPORTED 256
290199
200
+#define MAX_CHANNEL_RELIDS \
201
+ max(MAX_NUM_CHANNELS_SUPPORTED, HV_EVENT_FLAGS_COUNT)
291202
292203 enum vmbus_connect_state {
293204 DISCONNECTED,
....@@ -298,12 +209,13 @@
298209
299210 #define MAX_SIZE_CHANNEL_MESSAGE HV_MESSAGE_PAYLOAD_BYTE_COUNT
300211
301
-struct vmbus_connection {
302
- /*
303
- * CPU on which the initial host contact was made.
304
- */
305
- int connect_cpu;
212
+/*
213
+ * The CPU that Hyper-V will interrupt for VMBUS messages, such as
214
+ * CHANNELMSG_OFFERCHANNEL and CHANNELMSG_RESCIND_CHANNELOFFER.
215
+ */
216
+#define VMBUS_CONNECT_CPU 0
306217
218
+struct vmbus_connection {
307219 u32 msg_conn_id;
308220
309221 atomic_t offer_in_progress;
....@@ -336,6 +248,9 @@
336248 struct list_head chn_list;
337249 struct mutex channel_mutex;
338250
251
+ /* Array of channels */
252
+ struct vmbus_channel **channels;
253
+
339254 /*
340255 * An offer message is handled first on the work_queue, and then
341256 * is further handled on handle_primary_chan_wq or
....@@ -344,6 +259,32 @@
344259 struct workqueue_struct *work_queue;
345260 struct workqueue_struct *handle_primary_chan_wq;
346261 struct workqueue_struct *handle_sub_chan_wq;
262
+
263
+ /*
264
+ * The number of sub-channels and hv_sock channels that should be
265
+ * cleaned up upon suspend: sub-channels will be re-created upon
266
+ * resume, and hv_sock channels should not survive suspend.
267
+ */
268
+ atomic_t nr_chan_close_on_suspend;
269
+ /*
270
+ * vmbus_bus_suspend() waits for "nr_chan_close_on_suspend" to
271
+ * drop to zero.
272
+ */
273
+ struct completion ready_for_suspend_event;
274
+
275
+ /*
276
+ * The number of primary channels that should be "fixed up"
277
+ * upon resume: these channels are re-offered upon resume, and some
278
+ * fields of the channel offers (i.e. child_relid and connection_id)
279
+ * can change, so the old offermsg must be fixed up, before the resume
280
+ * callbacks of the VSC drivers start to further touch the channels.
281
+ */
282
+ atomic_t nr_chan_fixup_on_resume;
283
+ /*
284
+ * vmbus_bus_resume() waits for "nr_chan_fixup_on_resume" to
285
+ * drop to zero.
286
+ */
287
+ struct completion ready_for_resume_event;
347288 };
348289
349290
....@@ -352,11 +293,13 @@
352293 struct list_head msglist_entry;
353294
354295 /* The message itself */
355
- unsigned char msg[0];
296
+ unsigned char msg[];
356297 };
357298
358299
359300 extern struct vmbus_connection vmbus_connection;
301
+
302
+int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version);
360303
361304 static inline void vmbus_send_interrupt(u32 relid)
362305 {
....@@ -375,6 +318,7 @@
375318 enum vmbus_channel_message_type message_type;
376319 enum vmbus_message_handler_type handler_type;
377320 void (*message_handler)(struct vmbus_channel_message_header *msg);
321
+ u32 min_payload_len;
378322 };
379323
380324 extern const struct vmbus_channel_message_table_entry
....@@ -383,8 +327,8 @@
383327
384328 /* General vmbus interface */
385329
386
-struct hv_device *vmbus_device_create(const uuid_le *type,
387
- const uuid_le *instance,
330
+struct hv_device *vmbus_device_create(const guid_t *type,
331
+ const guid_t *instance,
388332 struct vmbus_channel *channel);
389333
390334 int vmbus_device_register(struct hv_device *child_device_obj);
....@@ -393,6 +337,9 @@
393337 struct vmbus_channel *channel);
394338
395339 void vmbus_remove_channel_attr_group(struct vmbus_channel *channel);
340
+
341
+void vmbus_channel_map_relid(struct vmbus_channel *channel);
342
+void vmbus_channel_unmap_relid(struct vmbus_channel *channel);
396343
397344 struct vmbus_channel *relid2channel(u32 relid);
398345
....@@ -410,14 +357,20 @@
410357
411358 int hv_kvp_init(struct hv_util_service *srv);
412359 void hv_kvp_deinit(void);
360
+int hv_kvp_pre_suspend(void);
361
+int hv_kvp_pre_resume(void);
413362 void hv_kvp_onchannelcallback(void *context);
414363
415364 int hv_vss_init(struct hv_util_service *srv);
416365 void hv_vss_deinit(void);
366
+int hv_vss_pre_suspend(void);
367
+int hv_vss_pre_resume(void);
417368 void hv_vss_onchannelcallback(void *context);
418369
419370 int hv_fcopy_init(struct hv_util_service *srv);
420371 void hv_fcopy_deinit(void);
372
+int hv_fcopy_pre_suspend(void);
373
+int hv_fcopy_pre_resume(void);
421374 void hv_fcopy_onchannelcallback(void *context);
422375 void vmbus_initiate_unload(bool crash);
423376
....@@ -426,12 +379,7 @@
426379 {
427380 if (!channel)
428381 return;
429
-
430
- if (in_interrupt() && (channel->target_cpu == smp_processor_id())) {
431
- cb(channel);
432
- return;
433
- }
434
- smp_call_function_single(channel->target_cpu, cb, channel, true);
382
+ cb(channel);
435383 }
436384
437385 enum hvutil_device_state {
....@@ -443,4 +391,83 @@
443391 HVUTIL_DEVICE_DYING, /* driver unload is in progress */
444392 };
445393
394
+enum delay {
395
+ INTERRUPT_DELAY = 0,
396
+ MESSAGE_DELAY = 1,
397
+};
398
+
399
+extern const struct vmbus_device vmbus_devs[];
400
+
401
+static inline bool hv_is_perf_channel(struct vmbus_channel *channel)
402
+{
403
+ return vmbus_devs[channel->device_id].perf_device;
404
+}
405
+
406
+static inline bool hv_is_alloced_cpu(unsigned int cpu)
407
+{
408
+ struct vmbus_channel *channel, *sc;
409
+
410
+ lockdep_assert_held(&vmbus_connection.channel_mutex);
411
+ /*
412
+ * List additions/deletions as well as updates of the target CPUs are
413
+ * protected by channel_mutex.
414
+ */
415
+ list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
416
+ if (!hv_is_perf_channel(channel))
417
+ continue;
418
+ if (channel->target_cpu == cpu)
419
+ return true;
420
+ list_for_each_entry(sc, &channel->sc_list, sc_list) {
421
+ if (sc->target_cpu == cpu)
422
+ return true;
423
+ }
424
+ }
425
+ return false;
426
+}
427
+
428
+static inline void hv_set_alloced_cpu(unsigned int cpu)
429
+{
430
+ cpumask_set_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
431
+}
432
+
433
+static inline void hv_clear_alloced_cpu(unsigned int cpu)
434
+{
435
+ if (hv_is_alloced_cpu(cpu))
436
+ return;
437
+ cpumask_clear_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
438
+}
439
+
440
+static inline void hv_update_alloced_cpus(unsigned int old_cpu,
441
+ unsigned int new_cpu)
442
+{
443
+ hv_set_alloced_cpu(new_cpu);
444
+ hv_clear_alloced_cpu(old_cpu);
445
+}
446
+
447
+#ifdef CONFIG_HYPERV_TESTING
448
+
449
+int hv_debug_add_dev_dir(struct hv_device *dev);
450
+void hv_debug_rm_dev_dir(struct hv_device *dev);
451
+void hv_debug_rm_all_dir(void);
452
+int hv_debug_init(void);
453
+void hv_debug_delay_test(struct vmbus_channel *channel, enum delay delay_type);
454
+
455
+#else /* CONFIG_HYPERV_TESTING */
456
+
457
+static inline void hv_debug_rm_dev_dir(struct hv_device *dev) {};
458
+static inline void hv_debug_rm_all_dir(void) {};
459
+static inline void hv_debug_delay_test(struct vmbus_channel *channel,
460
+ enum delay delay_type) {};
461
+static inline int hv_debug_init(void)
462
+{
463
+ return -1;
464
+}
465
+
466
+static inline int hv_debug_add_dev_dir(struct hv_device *dev)
467
+{
468
+ return -1;
469
+}
470
+
471
+#endif /* CONFIG_HYPERV_TESTING */
472
+
446473 #endif /* _HYPERV_VMBUS_H */