hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/hv/ring_buffer.c
....@@ -1,25 +1,12 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 *
34 * Copyright (c) 2009, Microsoft Corporation.
4
- *
5
- * This program is free software; you can redistribute it and/or modify it
6
- * under the terms and conditions of the GNU General Public License,
7
- * version 2, as published by the Free Software Foundation.
8
- *
9
- * This program is distributed in the hope it will be useful, but WITHOUT
10
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
- * more details.
13
- *
14
- * You should have received a copy of the GNU General Public License along with
15
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16
- * Place - Suite 330, Boston, MA 02111-1307 USA.
175 *
186 * Authors:
197 * Haiyang Zhang <haiyangz@microsoft.com>
208 * Hank Janssen <hjanssen@microsoft.com>
219 * K. Y. Srinivasan <kys@microsoft.com>
22
- *
2310 */
2411 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2512
....@@ -74,8 +61,10 @@
7461 * This is the only case we need to signal when the
7562 * ring transitions from being empty to non-empty.
7663 */
77
- if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
64
+ if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) {
65
+ ++channel->intr_out_empty;
7866 vmbus_setevent(channel);
67
+ }
7968 }
8069
8170 /* Get the next write location for the specified ring buffer. */
....@@ -164,14 +153,18 @@
164153 }
165154
166155 /* Get various debug metrics for the specified ring buffer. */
167
-int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
156
+int hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
168157 struct hv_ring_buffer_debug_info *debug_info)
169158 {
170159 u32 bytes_avail_towrite;
171160 u32 bytes_avail_toread;
172161
173
- if (!ring_info->ring_buffer)
162
+ mutex_lock(&ring_info->ring_buffer_mutex);
163
+
164
+ if (!ring_info->ring_buffer) {
165
+ mutex_unlock(&ring_info->ring_buffer_mutex);
174166 return -EINVAL;
167
+ }
175168
176169 hv_get_ringbuffer_availbytes(ring_info,
177170 &bytes_avail_toread,
....@@ -182,9 +175,18 @@
182175 debug_info->current_write_index = ring_info->ring_buffer->write_index;
183176 debug_info->current_interrupt_mask
184177 = ring_info->ring_buffer->interrupt_mask;
178
+ mutex_unlock(&ring_info->ring_buffer_mutex);
179
+
185180 return 0;
186181 }
187182 EXPORT_SYMBOL_GPL(hv_ringbuffer_get_debuginfo);
183
+
184
+/* Initialize a channel's ring buffer info mutex locks */
185
+void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
186
+{
187
+ mutex_init(&channel->inbound.ring_buffer_mutex);
188
+ mutex_init(&channel->outbound.ring_buffer_mutex);
189
+}
188190
189191 /* Initialize the ring buffer. */
190192 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
....@@ -194,8 +196,6 @@
194196 struct page **pages_wraparound;
195197
196198 BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
197
-
198
- memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
199199
200200 /*
201201 * First page holds struct hv_ring_buffer, do wraparound mapping for
....@@ -230,6 +230,7 @@
230230 reciprocal_value(ring_info->ring_size / 10);
231231 ring_info->ring_datasize = ring_info->ring_size -
232232 sizeof(struct hv_ring_buffer);
233
+ ring_info->priv_read_index = 0;
233234
234235 spin_lock_init(&ring_info->ring_lock);
235236
....@@ -239,8 +240,24 @@
239240 /* Cleanup the ring buffer. */
240241 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
241242 {
243
+ mutex_lock(&ring_info->ring_buffer_mutex);
242244 vunmap(ring_info->ring_buffer);
245
+ ring_info->ring_buffer = NULL;
246
+ mutex_unlock(&ring_info->ring_buffer_mutex);
243247 }
248
+
249
+/*
250
+ * Check if the ring buffer spinlock is available to take or not; used on
251
+ * atomic contexts, like panic path (see the Hyper-V framebuffer driver).
252
+ */
253
+
254
+bool hv_ringbuffer_spinlock_busy(struct vmbus_channel *channel)
255
+{
256
+ struct hv_ring_buffer_info *rinfo = &channel->outbound;
257
+
258
+ return spin_is_locked(&rinfo->ring_lock);
259
+}
260
+EXPORT_SYMBOL_GPL(hv_ringbuffer_spinlock_busy);
244261
245262 /* Write to the ring buffer. */
246263 int hv_ringbuffer_write(struct vmbus_channel *channel,
....@@ -271,9 +288,18 @@
271288 * is empty since the read index == write index.
272289 */
273290 if (bytes_avail_towrite <= totalbytes_towrite) {
291
+ ++channel->out_full_total;
292
+
293
+ if (!channel->out_full_flag) {
294
+ ++channel->out_full_first;
295
+ channel->out_full_flag = true;
296
+ }
297
+
274298 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
275299 return -EAGAIN;
276300 }
301
+
302
+ channel->out_full_flag = false;
277303
278304 /* Write to the ring buffer */
279305 next_write_location = hv_get_next_write_location(outring_info);
....@@ -365,7 +391,16 @@
365391 static u32 hv_pkt_iter_avail(const struct hv_ring_buffer_info *rbi)
366392 {
367393 u32 priv_read_loc = rbi->priv_read_index;
368
- u32 write_loc = READ_ONCE(rbi->ring_buffer->write_index);
394
+ u32 write_loc;
395
+
396
+ /*
397
+ * The Hyper-V host writes the packet data, then uses
398
+ * store_release() to update the write_index. Use load_acquire()
399
+ * here to prevent loads of the packet data from being re-ordered
400
+ * before the read of the write_index and potentially getting
401
+ * stale data.
402
+ */
403
+ write_loc = virt_load_acquire(&rbi->ring_buffer->write_index);
369404
370405 if (write_loc >= priv_read_loc)
371406 return write_loc - priv_read_loc;
....@@ -383,6 +418,7 @@
383418 struct hv_ring_buffer_info *rbi = &channel->inbound;
384419 struct vmpacket_descriptor *desc;
385420
421
+ hv_debug_delay_test(channel, MESSAGE_DELAY);
386422 if (hv_pkt_iter_avail(rbi) < sizeof(struct vmpacket_descriptor))
387423 return NULL;
388424
....@@ -408,6 +444,7 @@
408444 u32 packetlen = desc->len8 << 3;
409445 u32 dsize = rbi->ring_datasize;
410446
447
+ hv_debug_delay_test(channel, MESSAGE_DELAY);
411448 /* bump offset to next potential packet */
412449 rbi->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
413450 if (rbi->priv_read_index >= dsize)
....@@ -529,6 +566,7 @@
529566 if (curr_write_sz <= pending_sz)
530567 return;
531568
569
+ ++channel->intr_in_full;
532570 vmbus_setevent(channel);
533571 }
534572 EXPORT_SYMBOL_GPL(hv_pkt_iter_close);