hc
2024-05-14 bedbef8ad3e75a304af6361af235302bcc61d06b
kernel/drivers/vhost/vsock.c
....@@ -1,11 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * vhost transport for vsock
34 *
45 * Copyright (C) 2013-2015 Red Hat, Inc.
56 * Author: Asias He <asias@redhat.com>
67 * Stefan Hajnoczi <stefanha@redhat.com>
7
- *
8
- * This work is licensed under the terms of the GNU GPL, version 2.
98 */
109 #include <linux/miscdevice.h>
1110 #include <linux/atomic.h>
....@@ -35,14 +34,14 @@
3534 };
3635
3736 /* Used to track all the vhost_vsock instances on the system. */
38
-static DEFINE_SPINLOCK(vhost_vsock_lock);
37
+static DEFINE_MUTEX(vhost_vsock_mutex);
3938 static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
4039
4140 struct vhost_vsock {
4241 struct vhost_dev dev;
4342 struct vhost_virtqueue vqs[2];
4443
45
- /* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
44
+ /* Link to global vhost_vsock_hash, writes use vhost_vsock_mutex */
4645 struct hlist_node hash;
4746
4847 struct vhost_work send_pkt_work;
....@@ -59,7 +58,7 @@
5958 return VHOST_VSOCK_DEFAULT_HOST_CID;
6059 }
6160
62
-/* Callers that dereference the return value must hold vhost_vsock_lock or the
61
+/* Callers that dereference the return value must hold vhost_vsock_mutex or the
6362 * RCU read lock.
6463 */
6564 static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
....@@ -92,7 +91,7 @@
9291
9392 mutex_lock(&vq->mutex);
9493
95
- if (!vq->private_data)
94
+ if (!vhost_vq_get_backend(vq))
9695 goto out;
9796
9897 /* Avoid further vmexits, we're already processing the virtqueue */
....@@ -197,6 +196,12 @@
197196 * to send it with the next available buffer.
198197 */
199198 if (pkt->off < pkt->len) {
199
+ /* We are queueing the same virtio_vsock_pkt to handle
200
+ * the remaining bytes, and we want to deliver it
201
+ * to monitoring devices in the next iteration.
202
+ */
203
+ pkt->tap_delivered = false;
204
+
200205 spin_lock_bh(&vsock->send_pkt_list_lock);
201206 list_add(&pkt->list, &vsock->send_pkt_list);
202207 spin_unlock_bh(&vsock->send_pkt_list_lock);
....@@ -354,11 +359,13 @@
354359 return NULL;
355360 }
356361
357
- pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
362
+ pkt->buf = kvmalloc(pkt->len, GFP_KERNEL);
358363 if (!pkt->buf) {
359364 kfree(pkt);
360365 return NULL;
361366 }
367
+
368
+ pkt->buf_len = pkt->len;
362369
363370 nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
364371 if (nbytes != pkt->len) {
....@@ -385,6 +392,8 @@
385392
386393 static struct virtio_transport vhost_transport = {
387394 .transport = {
395
+ .module = THIS_MODULE,
396
+
388397 .get_local_cid = vhost_transport_get_local_cid,
389398
390399 .init = virtio_transport_do_socket_init,
....@@ -417,13 +426,8 @@
417426 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
418427 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
419428 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
429
+ .notify_buffer_size = virtio_transport_notify_buffer_size,
420430
421
- .set_buffer_size = virtio_transport_set_buffer_size,
422
- .set_min_buffer_size = virtio_transport_set_min_buffer_size,
423
- .set_max_buffer_size = virtio_transport_set_max_buffer_size,
424
- .get_buffer_size = virtio_transport_get_buffer_size,
425
- .get_min_buffer_size = virtio_transport_get_min_buffer_size,
426
- .get_max_buffer_size = virtio_transport_get_max_buffer_size,
427431 },
428432
429433 .send_pkt = vhost_transport_send_pkt,
....@@ -442,7 +446,7 @@
442446
443447 mutex_lock(&vq->mutex);
444448
445
- if (!vq->private_data)
449
+ if (!vhost_vq_get_backend(vq))
446450 goto out;
447451
448452 vhost_disable_notify(&vsock->dev, vq);
....@@ -535,8 +539,8 @@
535539 goto err_vq;
536540 }
537541
538
- if (!vq->private_data) {
539
- vq->private_data = vsock;
542
+ if (!vhost_vq_get_backend(vq)) {
543
+ vhost_vq_set_backend(vq, vsock);
540544 ret = vhost_vq_init_access(vq);
541545 if (ret)
542546 goto err_vq;
....@@ -554,14 +558,14 @@
554558 return 0;
555559
556560 err_vq:
557
- vq->private_data = NULL;
561
+ vhost_vq_set_backend(vq, NULL);
558562 mutex_unlock(&vq->mutex);
559563
560564 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
561565 vq = &vsock->vqs[i];
562566
563567 mutex_lock(&vq->mutex);
564
- vq->private_data = NULL;
568
+ vhost_vq_set_backend(vq, NULL);
565569 mutex_unlock(&vq->mutex);
566570 }
567571 err:
....@@ -586,7 +590,7 @@
586590 struct vhost_virtqueue *vq = &vsock->vqs[i];
587591
588592 mutex_lock(&vq->mutex);
589
- vq->private_data = NULL;
593
+ vhost_vq_set_backend(vq, NULL);
590594 mutex_unlock(&vq->mutex);
591595 }
592596
....@@ -630,7 +634,7 @@
630634
631635 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
632636 UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
633
- VHOST_VSOCK_WEIGHT);
637
+ VHOST_VSOCK_WEIGHT, true, NULL);
634638
635639 file->private_data = vsock;
636640 spin_lock_init(&vsock->send_pkt_list_lock);
....@@ -683,10 +687,10 @@
683687 {
684688 struct vhost_vsock *vsock = file->private_data;
685689
686
- spin_lock_bh(&vhost_vsock_lock);
690
+ mutex_lock(&vhost_vsock_mutex);
687691 if (vsock->guest_cid)
688692 hash_del_rcu(&vsock->hash);
689
- spin_unlock_bh(&vhost_vsock_lock);
693
+ mutex_unlock(&vhost_vsock_mutex);
690694
691695 /* Wait for other CPUs to finish using vsock */
692696 synchronize_rcu();
....@@ -734,11 +738,17 @@
734738 if (guest_cid > U32_MAX)
735739 return -EINVAL;
736740
741
+ /* Refuse if CID is assigned to the guest->host transport (i.e. nested
742
+ * VM), to make the loopback work.
743
+ */
744
+ if (vsock_find_cid(guest_cid))
745
+ return -EADDRINUSE;
746
+
737747 /* Refuse if CID is already in use */
738
- spin_lock_bh(&vhost_vsock_lock);
748
+ mutex_lock(&vhost_vsock_mutex);
739749 other = vhost_vsock_get(guest_cid);
740750 if (other && other != vsock) {
741
- spin_unlock_bh(&vhost_vsock_lock);
751
+ mutex_unlock(&vhost_vsock_mutex);
742752 return -EADDRINUSE;
743753 }
744754
....@@ -747,7 +757,7 @@
747757
748758 vsock->guest_cid = guest_cid;
749759 hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
750
- spin_unlock_bh(&vhost_vsock_lock);
760
+ mutex_unlock(&vhost_vsock_mutex);
751761
752762 return 0;
753763 }
....@@ -820,23 +830,13 @@
820830 }
821831 }
822832
823
-#ifdef CONFIG_COMPAT
824
-static long vhost_vsock_dev_compat_ioctl(struct file *f, unsigned int ioctl,
825
- unsigned long arg)
826
-{
827
- return vhost_vsock_dev_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
828
-}
829
-#endif
830
-
831833 static const struct file_operations vhost_vsock_fops = {
832834 .owner = THIS_MODULE,
833835 .open = vhost_vsock_dev_open,
834836 .release = vhost_vsock_dev_release,
835837 .llseek = noop_llseek,
836838 .unlocked_ioctl = vhost_vsock_dev_ioctl,
837
-#ifdef CONFIG_COMPAT
838
- .compat_ioctl = vhost_vsock_dev_compat_ioctl,
839
-#endif
839
+ .compat_ioctl = compat_ptr_ioctl,
840840 };
841841
842842 static struct miscdevice vhost_vsock_misc = {
....@@ -849,16 +849,24 @@
849849 {
850850 int ret;
851851
852
- ret = vsock_core_init(&vhost_transport.transport);
852
+ ret = vsock_core_register(&vhost_transport.transport,
853
+ VSOCK_TRANSPORT_F_H2G);
853854 if (ret < 0)
854855 return ret;
855
- return misc_register(&vhost_vsock_misc);
856
+
857
+ ret = misc_register(&vhost_vsock_misc);
858
+ if (ret) {
859
+ vsock_core_unregister(&vhost_transport.transport);
860
+ return ret;
861
+ }
862
+
863
+ return 0;
856864 };
857865
858866 static void __exit vhost_vsock_exit(void)
859867 {
860868 misc_deregister(&vhost_vsock_misc);
861
- vsock_core_exit();
869
+ vsock_core_unregister(&vhost_transport.transport);
862870 };
863871
864872 module_init(vhost_vsock_init);