From 093a6c67005148ae32a5c9e4553491b9f5c2457b Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 10 May 2024 07:40:51 +0000
Subject: [PATCH] disable kernel build waring
---
kernel/include/linux/hyperv.h | 429 ++++++++++++++++++++++++++++++++++++-----------------
1 files changed, 289 insertions(+), 140 deletions(-)
diff --git a/kernel/include/linux/hyperv.h b/kernel/include/linux/hyperv.h
index 35461d4..eada4d8 100644
--- a/kernel/include/linux/hyperv.h
+++ b/kernel/include/linux/hyperv.h
@@ -1,25 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
*
* Copyright (c) 2011, Microsoft Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
* Hank Janssen <hjanssen@microsoft.com>
* K. Y. Srinivasan <kys@microsoft.com>
- *
*/
#ifndef _HYPERV_H
@@ -27,6 +14,7 @@
#include <uapi/linux/hyperv.h>
+#include <linux/mm.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <linux/list.h>
@@ -36,11 +24,54 @@
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
#include <linux/reciprocal_div.h>
+#include <asm/hyperv-tlfs.h>
#define MAX_PAGE_BUFFER_COUNT 32
#define MAX_MULTIPAGE_BUFFER_COUNT 32 /* 128K */
#pragma pack(push, 1)
+
+/*
+ * Types for GPADL, decides is how GPADL header is created.
+ *
+ * It doesn't make much difference between BUFFER and RING if PAGE_SIZE is the
+ * same as HV_HYP_PAGE_SIZE.
+ *
+ * If PAGE_SIZE is bigger than HV_HYP_PAGE_SIZE, the headers of ring buffers
+ * will be of PAGE_SIZE, however, only the first HV_HYP_PAGE will be put
+ * into gpadl, therefore the number for HV_HYP_PAGE and the indexes of each
+ * HV_HYP_PAGE will be different between different types of GPADL, for example
+ * if PAGE_SIZE is 64K:
+ *
+ * BUFFER:
+ *
+ * gva: |-- 64k --|-- 64k --| ... |
+ * gpa: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k |
+ * index: 0 1 2 15 16 17 18 .. 31 32 ...
+ * | | ... | | | ... | ...
+ * v V V V V V
+ * gpadl: | 4k | 4k | ... | 4k | 4k | 4k | ... | 4k | ... |
+ * index: 0 1 2 ... 15 16 17 18 .. 31 32 ...
+ *
+ * RING:
+ *
+ * | header | data | header | data |
+ * gva: |-- 64k --|-- 64k --| ... |-- 64k --|-- 64k --| ... |
+ * gpa: | 4k | .. | 4k | 4k | ... | 4k | ... | 4k | .. | 4k | .. | ... |
+ * index: 0 1 16 17 18 31 ... n n+1 n+16 ... 2n
+ * | / / / | / /
+ * | / / / | / /
+ * | / / ... / ... | / ... /
+ * | / / / | / /
+ * | / / / | / /
+ * V V V V V V v
+ * gpadl: | 4k | 4k | ... | ... | 4k | 4k | ... |
+ * index: 0 1 2 ... 16 ... n-15 n-14 n-13 ... 2n-30
+ */
+enum hv_gpadl_type {
+ HV_GPADL_BUFFER,
+ HV_GPADL_RING
+};
/* Single-page buffer */
struct hv_page_buffer {
@@ -124,14 +155,18 @@
} feature_bits;
/* Pad it to PAGE_SIZE so that data starts on page boundary */
- u8 reserved2[4028];
+ u8 reserved2[PAGE_SIZE - 68];
/*
* Ring data starts here + RingDataStartOffset
* !!! DO NOT place any fields below this !!!
*/
- u8 buffer[0];
+ u8 buffer[];
} __packed;
+
+/* Calculate the proper size of a ringbuffer, it must be page-aligned */
+#define VMBUS_RING_SIZE(payload_sz) PAGE_ALIGN(sizeof(struct hv_ring_buffer) + \
+ (payload_sz))
struct hv_ring_buffer_info {
struct hv_ring_buffer *ring_buffer;
@@ -141,6 +176,11 @@
u32 ring_datasize; /* < ring_size */
u32 priv_read_index;
+ /*
+ * The ring buffer mutex lock. This lock prevents the ring buffer from
+ * being freed while the ring buffer is being accessed.
+ */
+ struct mutex ring_buffer_mutex;
};
@@ -190,19 +230,21 @@
* 2 . 4 (Windows 8)
* 3 . 0 (Windows 8 R2)
* 4 . 0 (Windows 10)
+ * 4 . 1 (Windows 10 RS3)
* 5 . 0 (Newer Windows 10)
+ * 5 . 1 (Windows 10 RS4)
+ * 5 . 2 (Windows Server 2019, RS5)
*/
#define VERSION_WS2008 ((0 << 16) | (13))
#define VERSION_WIN7 ((1 << 16) | (1))
#define VERSION_WIN8 ((2 << 16) | (4))
#define VERSION_WIN8_1 ((3 << 16) | (0))
-#define VERSION_WIN10 ((4 << 16) | (0))
+#define VERSION_WIN10 ((4 << 16) | (0))
+#define VERSION_WIN10_V4_1 ((4 << 16) | (1))
#define VERSION_WIN10_V5 ((5 << 16) | (0))
-
-#define VERSION_INVAL -1
-
-#define VERSION_CURRENT VERSION_WIN10_V5
+#define VERSION_WIN10_V5_1 ((5 << 16) | (1))
+#define VERSION_WIN10_V5_2 ((5 << 16) | (2))
/* Make maximum size of pipe payload of 16K */
#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@@ -222,8 +264,8 @@
* struct contains the fundamental information about an offer.
*/
struct vmbus_channel_offer {
- uuid_le if_type;
- uuid_le if_instance;
+ guid_t if_type;
+ guid_t if_instance;
/*
* These two fields are not currently used.
@@ -253,7 +295,10 @@
} pipe;
} u;
/*
- * The sub_channel_index is defined in win8.
+ * The sub_channel_index is defined in Win8: a value of zero means a
+ * primary channel and a value of non-zero means a sub-channel.
+ *
+ * Before Win8, the field is reserved, meaning it's always zero.
*/
u16 sub_channel_index;
u16 reserved3;
@@ -316,7 +361,7 @@
struct gpa_range {
u32 byte_count;
u32 byte_offset;
- u64 pfn_array[0];
+ u64 pfn_array[];
};
/*
@@ -428,10 +473,13 @@
CHANNELMSG_19 = 19,
CHANNELMSG_20 = 20,
CHANNELMSG_TL_CONNECT_REQUEST = 21,
- CHANNELMSG_22 = 22,
+ CHANNELMSG_MODIFYCHANNEL = 22,
CHANNELMSG_TL_CONNECT_RESULT = 23,
CHANNELMSG_COUNT
};
+
+/* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */
+#define INVALID_RELID U32_MAX
struct vmbus_channel_message_header {
enum vmbus_channel_message_type msgtype;
@@ -563,7 +611,7 @@
u32 gpadl;
u16 range_buflen;
u16 rangecount;
- struct gpa_range range[0];
+ struct gpa_range range[];
} __packed;
/* This is the followup packet that contains more PFNs. */
@@ -571,7 +619,7 @@
struct vmbus_channel_message_header header;
u32 msgnumber;
u32 gpadl;
- u64 pfn[0];
+ u64 pfn[];
} __packed;
struct vmbus_channel_gpadl_created {
@@ -616,8 +664,15 @@
/* Hyper-V socket: guest's connect()-ing to host */
struct vmbus_channel_tl_connect_request {
struct vmbus_channel_message_header header;
- uuid_le guest_endpoint_id;
- uuid_le host_service_id;
+ guid_t guest_endpoint_id;
+ guid_t host_service_id;
+} __packed;
+
+/* Modify Channel parameters, cf. vmbus_send_modifychannel() */
+struct vmbus_channel_modifychannel {
+ struct vmbus_channel_message_header header;
+ u32 child_relid;
+ u32 target_vp;
} __packed;
struct vmbus_channel_version_response {
@@ -672,7 +727,7 @@
* The channel message that goes out on the "wire".
* It will contain at minimum the VMBUS_CHANNEL_MESSAGE_HEADER header
*/
- unsigned char msg[0];
+ unsigned char msg[];
};
struct vmbus_close_msg {
@@ -687,11 +742,6 @@
u32 id:24;
u32 reserved:8;
} u;
-};
-
-enum hv_numa_policy {
- HV_BALANCED = 0,
- HV_LOCALIZED,
};
enum vmbus_device_type {
@@ -716,7 +766,7 @@
struct vmbus_device {
u16 dev_type;
- uuid_le guid;
+ guid_t guid;
bool perf_device;
};
@@ -743,6 +793,7 @@
/* Allocated memory for ring buffer */
struct page *ringbuffer_page;
u32 ringbuffer_pagecount;
+ u32 ringbuffer_send_offset;
struct hv_ring_buffer_info outbound; /* send to parent */
struct hv_ring_buffer_info inbound; /* receive from parent */
@@ -752,10 +803,32 @@
u64 interrupts; /* Host to Guest interrupts */
u64 sig_events; /* Guest to Host events */
+ /*
+ * Guest to host interrupts caused by the outbound ring buffer changing
+ * from empty to not empty.
+ */
+ u64 intr_out_empty;
+
+ /*
+ * Indicates that a full outbound ring buffer was encountered. The flag
+ * is set to true when a full outbound ring buffer is encountered and
+ * set to false when a write to the outbound ring buffer is completed.
+ */
+ bool out_full_flag;
+
/* Channel callback's invoked in softirq context */
struct tasklet_struct callback_event;
void (*onchannel_callback)(void *context);
void *channel_callback_context;
+
+ void (*change_target_cpu_callback)(struct vmbus_channel *channel,
+ u32 old, u32 new);
+
+ /*
+ * Synchronize channel scheduling and channel removal; see the inline
+ * comments in vmbus_chan_sched() and vmbus_reset_channel_cb().
+ */
+ spinlock_t sched_lock;
/*
* A channel can be marked for one of three modes of reading:
@@ -778,21 +851,15 @@
u64 sig_event;
/*
- * Starting with win8, this field will be used to specify
- * the target virtual processor on which to deliver the interrupt for
- * the host to guest communication.
- * Prior to win8, incoming channel interrupts would only
- * be delivered on cpu 0. Setting this value to 0 would
- * preserve the earlier behavior.
+ * Starting with win8, this field will be used to specify the
+ * target CPU on which to deliver the interrupt for the host
+ * to guest communication.
+ *
+ * Prior to win8, incoming channel interrupts would only be
+ * delivered on CPU 0. Setting this value to 0 would preserve
+ * the earlier behavior.
*/
- u32 target_vp;
- /* The corresponding CPUID in the guest */
u32 target_cpu;
- /*
- * State to manage the CPU affiliation of channels.
- */
- struct cpumask alloced_cpus_in_node;
- int numa_node;
/*
* Support for sub-channels. For high performance devices,
* it will be useful to have multiple sub-channels to support
@@ -822,24 +889,9 @@
void (*chn_rescind_callback)(struct vmbus_channel *channel);
/*
- * The spinlock to protect the structure. It is being used to protect
- * test-and-set access to various attributes of the structure as well
- * as all sc_list operations.
- */
- spinlock_t lock;
- /*
* All Sub-channels of a primary channel are linked here.
*/
struct list_head sc_list;
- /*
- * Current number of sub-channels.
- */
- int num_sc;
- /*
- * Number of a sub-channel (position within sc_list) which is supposed
- * to be used as the next outgoing channel.
- */
- int next_oc;
/*
* The primary channel this sub-channel belongs to.
* This will be NULL for the primary channel.
@@ -849,11 +901,6 @@
* Support per-channel state for use by vmbus drivers.
*/
void *per_channel_state;
- /*
- * To support per-cpu lookup mapping of relid to channel,
- * link up channels based on their CPU affinity.
- */
- struct list_head percpu_list;
/*
* Defer freeing channel until after all cpu's have
@@ -892,19 +939,14 @@
*/
bool low_latency;
- /*
- * NUMA distribution policy:
- * We support two policies:
- * 1) Balanced: Here all performance critical channels are
- * distributed evenly amongst all the NUMA nodes.
- * This policy will be the default policy.
- * 2) Localized: All channels of a given instance of a
- * performance critical service will be assigned CPUs
- * within a selected NUMA node.
- */
- enum hv_numa_policy affinity_policy;
-
bool probe_done;
+
+ /*
+ * Cache the device ID here for easy access; this is useful, in
+ * particular, in situations where the channel's device_obj has
+ * not been allocated/initialized yet.
+ */
+ u16 device_id;
/*
* We must offload the handling of the primary/sub channels
@@ -913,6 +955,39 @@
* vmbus_connection.work_queue and hang: see vmbus_process_offer().
*/
struct work_struct add_channel_work;
+
+ /*
+ * Guest to host interrupts caused by the inbound ring buffer changing
+ * from full to not full while a packet is waiting.
+ */
+ u64 intr_in_full;
+
+ /*
+ * The total number of write operations that encountered a full
+ * outbound ring buffer.
+ */
+ u64 out_full_total;
+
+ /*
+ * The number of write operations that were the first to encounter a
+ * full outbound ring buffer.
+ */
+ u64 out_full_first;
+
+ /* enabling/disabling fuzz testing on the channel (default is false)*/
+ bool fuzz_testing_state;
+
+ /*
+ * Interrupt delay will delay the guest from emptying the ring buffer
+ * for a specific amount of time. The delay is in microseconds and will
+ * be between 1 to a maximum of 1000, its default is 0 (no delay).
+ * The Message delay will delay guest reading on a per message basis
+ * in microseconds between 1 to 1000 with the default being 0
+ * (no delay).
+ */
+ u32 fuzz_testing_interrupt_delay;
+ u32 fuzz_testing_message_delay;
+
};
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
@@ -921,10 +996,9 @@
VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
}
-static inline void set_channel_affinity_state(struct vmbus_channel *c,
- enum hv_numa_policy policy)
+static inline bool is_sub_channel(const struct vmbus_channel *c)
{
- c->affinity_policy = policy;
+ return c->offermsg.offer.sub_channel_index != 0;
}
static inline void set_channel_read_mode(struct vmbus_channel *c,
@@ -946,6 +1020,21 @@
static inline void set_channel_pending_send_size(struct vmbus_channel *c,
u32 size)
{
+ unsigned long flags;
+
+ if (size) {
+ spin_lock_irqsave(&c->outbound.ring_lock, flags);
+ ++c->out_full_total;
+
+ if (!c->out_full_flag) {
+ ++c->out_full_first;
+ c->out_full_flag = true;
+ }
+ spin_unlock_irqrestore(&c->outbound.ring_lock, flags);
+ } else {
+ c->out_full_flag = false;
+ }
+
c->outbound.ring_buffer->pending_send_sz = size;
}
@@ -959,7 +1048,7 @@
c->low_latency = false;
}
-void vmbus_onmessage(void *context);
+void vmbus_onmessage(struct vmbus_channel_message_header *hdr);
int vmbus_request_offers(void);
@@ -972,14 +1061,6 @@
void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
void (*chn_rescind_cb)(struct vmbus_channel *));
-
-/*
- * Retrieve the (sub) channel on which to send an outgoing request.
- * When a primary channel has multiple sub-channels, we choose a
- * channel whose VCPU binding is closest to the VCPU on which
- * this call is being made.
- */
-struct vmbus_channel *vmbus_get_outgoing_channel(struct vmbus_channel *primary);
/*
* Check if sub-channels have already been offerred. This API will be useful
@@ -1030,6 +1111,14 @@
struct hv_mpb_array range;
} __packed;
+int vmbus_alloc_ring(struct vmbus_channel *channel,
+ u32 send_size, u32 recv_size);
+void vmbus_free_ring(struct vmbus_channel *channel);
+
+int vmbus_connect_ring(struct vmbus_channel *channel,
+ void (*onchannel_callback)(void *context),
+ void *context);
+int vmbus_disconnect_ring(struct vmbus_channel *channel);
extern int vmbus_open(struct vmbus_channel *channel,
u32 send_ringbuffersize,
@@ -1106,7 +1195,7 @@
bool hvsock;
/* the device type supported by this driver */
- uuid_le dev_type;
+ guid_t dev_type;
const struct hv_vmbus_device_id *id_table;
struct device_driver driver;
@@ -1121,22 +1210,30 @@
int (*remove)(struct hv_device *);
void (*shutdown)(struct hv_device *);
+ int (*suspend)(struct hv_device *);
+ int (*resume)(struct hv_device *);
+
};
/* Base device object */
struct hv_device {
/* the device type id of this device */
- uuid_le dev_type;
+ guid_t dev_type;
/* the device instance id of this device */
- uuid_le dev_instance;
+ guid_t dev_instance;
u16 vendor_id;
u16 device_id;
struct device device;
+ char *driver_override; /* Driver name to force a match */
struct vmbus_channel *channel;
struct kset *channels_kset;
+
+ /* place holder to keep track of the dir for hv device in debugfs */
+ struct dentry *debug_dir;
+
};
@@ -1169,8 +1266,10 @@
};
-int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
struct hv_ring_buffer_debug_info *debug_info);
+
+bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel);
/* Vmbus interface */
#define vmbus_driver_register(driver) \
@@ -1197,102 +1296,102 @@
* {f8615163-df3e-46c5-913f-f2d2f965ed0e}
*/
#define HV_NIC_GUID \
- .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
- 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
+ .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \
+ 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e)
/*
* IDE GUID
* {32412632-86cb-44a2-9b5c-50d1417354f5}
*/
#define HV_IDE_GUID \
- .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
- 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
+ .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \
+ 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
/*
* SCSI GUID
* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
*/
#define HV_SCSI_GUID \
- .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
- 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
+ .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \
+ 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
/*
* Shutdown GUID
* {0e0b6031-5213-4934-818b-38d90ced39db}
*/
#define HV_SHUTDOWN_GUID \
- .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
- 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
+ .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \
+ 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb)
/*
* Time Synch GUID
* {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
*/
#define HV_TS_GUID \
- .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
- 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
+ .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \
+ 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
/*
* Heartbeat GUID
* {57164f39-9115-4e78-ab55-382f3bd5422d}
*/
#define HV_HEART_BEAT_GUID \
- .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
- 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
+ .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \
+ 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
/*
* KVP GUID
* {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
*/
#define HV_KVP_GUID \
- .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
- 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
+ .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \
+ 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6)
/*
* Dynamic memory GUID
* {525074dc-8985-46e2-8057-a307dc18a502}
*/
#define HV_DM_GUID \
- .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
- 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
+ .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \
+ 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
/*
* Mouse GUID
* {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
*/
#define HV_MOUSE_GUID \
- .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
- 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
+ .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \
+ 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a)
/*
* Keyboard GUID
* {f912ad6d-2b17-48ea-bd65-f927a61c7684}
*/
#define HV_KBD_GUID \
- .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
- 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
+ .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \
+ 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84)
/*
* VSS (Backup/Restore) GUID
*/
#define HV_VSS_GUID \
- .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
- 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
+ .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \
+ 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40)
/*
* Synthetic Video GUID
* {DA0A7802-E377-4aac-8E77-0558EB1073F8}
*/
#define HV_SYNTHVID_GUID \
- .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
- 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
+ .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \
+ 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8)
/*
* Synthetic FC GUID
* {2f9bcc4a-0069-4af3-b76b-6fd0be528cda}
*/
#define HV_SYNTHFC_GUID \
- .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
- 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
+ .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \
+ 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda)
/*
* Guest File Copy Service
@@ -1300,16 +1399,16 @@
*/
#define HV_FCOPY_GUID \
- .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
- 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
+ .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \
+ 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92)
/*
* NetworkDirect. This is the guest RDMA service.
* {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501}
*/
#define HV_ND_GUID \
- .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
- 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
+ .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \
+ 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01)
/*
* PCI Express Pass Through
@@ -1317,8 +1416,8 @@
*/
#define HV_PCIE_GUID \
- .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
- 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
+ .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \
+ 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f)
/*
* Linux doesn't support the 3 devices: the first two are for
@@ -1330,16 +1429,16 @@
*/
#define HV_AVMA1_GUID \
- .guid = UUID_LE(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
- 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
+ .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \
+ 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5)
#define HV_AVMA2_GUID \
- .guid = UUID_LE(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
- 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
+ .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \
+ 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b)
#define HV_RDV_GUID \
- .guid = UUID_LE(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
- 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
+ .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \
+ 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe)
/*
* Common header for Hyper-V ICs
@@ -1369,6 +1468,8 @@
void (*util_cb)(void *);
int (*util_init)(struct hv_util_service *);
void (*util_deinit)(void);
+ int (*util_pre_suspend)(void);
+ int (*util_pre_resume)(void);
};
struct vmbuspipe_hdr {
@@ -1441,7 +1542,7 @@
struct hyperv_service_callback {
u8 msg_type;
char *log_msg;
- uuid_le data;
+ guid_t data;
struct vmbus_channel *channel;
void (*callback)(void *context);
};
@@ -1452,7 +1553,7 @@
const int *srv_version, int srv_vercnt,
int *nego_fw_version, int *nego_srv_version);
-void hv_process_channel_removal(u32 relid);
+void hv_process_channel_removal(struct vmbus_channel *channel);
void vmbus_setevent(struct vmbus_channel *channel);
/*
@@ -1461,8 +1562,9 @@
extern __u32 vmbus_proto_version;
-int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
- const uuid_le *shv_host_servie_id);
+int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id,
+ const guid_t *shv_host_servie_id);
+int vmbus_send_modifychannel(u32 child_relid, u32 target_vp);
void vmbus_set_event(struct vmbus_channel *channel);
/* Get the start of the ring buffer. */
@@ -1549,4 +1651,51 @@
for (pkt = hv_pkt_iter_first(channel); pkt; \
pkt = hv_pkt_iter_next(channel, pkt))
+/*
+ * Interface for passing data between SR-IOV PF and VF drivers. The VF driver
+ * sends requests to read and write blocks. Each block must be 128 bytes or
+ * smaller. Optionally, the VF driver can register a callback function which
+ * will be invoked when the host says that one or more of the first 64 block
+ * IDs is "invalid" which means that the VF driver should reread them.
+ */
+#define HV_CONFIG_BLOCK_SIZE_MAX 128
+
+int hyperv_read_cfg_blk(struct pci_dev *dev, void *buf, unsigned int buf_len,
+ unsigned int block_id, unsigned int *bytes_returned);
+int hyperv_write_cfg_blk(struct pci_dev *dev, void *buf, unsigned int len,
+ unsigned int block_id);
+int hyperv_reg_block_invalidate(struct pci_dev *dev, void *context,
+ void (*block_invalidate)(void *context,
+ u64 block_mask));
+
+struct hyperv_pci_block_ops {
+ int (*read_block)(struct pci_dev *dev, void *buf, unsigned int buf_len,
+ unsigned int block_id, unsigned int *bytes_returned);
+ int (*write_block)(struct pci_dev *dev, void *buf, unsigned int len,
+ unsigned int block_id);
+ int (*reg_blk_invalidate)(struct pci_dev *dev, void *context,
+ void (*block_invalidate)(void *context,
+ u64 block_mask));
+};
+
+extern struct hyperv_pci_block_ops hvpci_block_ops;
+
+static inline unsigned long virt_to_hvpfn(void *addr)
+{
+ phys_addr_t paddr;
+
+ if (is_vmalloc_addr(addr))
+ paddr = page_to_phys(vmalloc_to_page(addr)) +
+ offset_in_page(addr);
+ else
+ paddr = __pa(addr);
+
+ return paddr >> HV_HYP_PAGE_SHIFT;
+}
+
+#define NR_HV_HYP_PAGES_IN_PAGE (PAGE_SIZE / HV_HYP_PAGE_SIZE)
+#define offset_in_hvpage(ptr) ((unsigned long)(ptr) & ~HV_HYP_PAGE_MASK)
+#define HVPFN_UP(x) (((x) + HV_HYP_PAGE_SIZE-1) >> HV_HYP_PAGE_SHIFT)
+#define page_to_hvpfn(page) (page_to_pfn(page) * NR_HV_HYP_PAGES_IN_PAGE)
+
#endif /* _HYPERV_H */
--
Gitblit v1.6.2