| .. | .. |
|---|
| 6 | 6 | */ |
|---|
| 7 | 7 | |
|---|
| 8 | 8 | #include <linux/device.h> |
|---|
| 9 | +#include <linux/io.h> |
|---|
| 9 | 10 | #include <linux/mm.h> |
|---|
| 10 | 11 | #include <linux/sched.h> |
|---|
| 11 | 12 | #include <linux/sizes.h> |
|---|
| .. | .. |
|---|
| 26 | 27 | (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call))) |
|---|
| 27 | 28 | |
|---|
| 28 | 29 | #define GUEST_MAPPINGS_TRIES 5 |
|---|
| 30 | + |
|---|
| 31 | +#define VBG_KERNEL_REQUEST \ |
|---|
| 32 | + (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \ |
|---|
| 33 | + VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN) |
|---|
| 29 | 34 | |
|---|
| 30 | 35 | /** |
|---|
| 31 | 36 | * Reserves memory in which the VMM can relocate any guest mappings |
|---|
| .. | .. |
|---|
| 48 | 53 | int i, rc; |
|---|
| 49 | 54 | |
|---|
| 50 | 55 | /* Query the required space. */ |
|---|
| 51 | | - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO); |
|---|
| 56 | + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO, |
|---|
| 57 | + VBG_KERNEL_REQUEST); |
|---|
| 52 | 58 | if (!req) |
|---|
| 53 | 59 | return; |
|---|
| 54 | 60 | |
|---|
| .. | .. |
|---|
| 135 | 141 | * Tell the host that we're going to free the memory we reserved for |
|---|
| 136 | 142 | * it, the free it up. (Leak the memory if anything goes wrong here.) |
|---|
| 137 | 143 | */ |
|---|
| 138 | | - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO); |
|---|
| 144 | + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO, |
|---|
| 145 | + VBG_KERNEL_REQUEST); |
|---|
| 139 | 146 | if (!req) |
|---|
| 140 | 147 | return; |
|---|
| 141 | 148 | |
|---|
| .. | .. |
|---|
| 172 | 179 | struct vmmdev_guest_info2 *req2 = NULL; |
|---|
| 173 | 180 | int rc, ret = -ENOMEM; |
|---|
| 174 | 181 | |
|---|
| 175 | | - req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO); |
|---|
| 176 | | - req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2); |
|---|
| 182 | + req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO, |
|---|
| 183 | + VBG_KERNEL_REQUEST); |
|---|
| 184 | + req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2, |
|---|
| 185 | + VBG_KERNEL_REQUEST); |
|---|
| 177 | 186 | if (!req1 || !req2) |
|---|
| 178 | 187 | goto out_free; |
|---|
| 179 | 188 | |
|---|
| .. | .. |
|---|
| 187 | 196 | req2->additions_minor = VBG_VERSION_MINOR; |
|---|
| 188 | 197 | req2->additions_build = VBG_VERSION_BUILD; |
|---|
| 189 | 198 | req2->additions_revision = VBG_SVN_REV; |
|---|
| 190 | | - /* (no features defined yet) */ |
|---|
| 191 | | - req2->additions_features = 0; |
|---|
| 199 | + req2->additions_features = |
|---|
| 200 | + VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO; |
|---|
| 192 | 201 | strlcpy(req2->name, VBG_VERSION_STRING, |
|---|
| 193 | 202 | sizeof(req2->name)); |
|---|
| 194 | 203 | |
|---|
| .. | .. |
|---|
| 230 | 239 | struct vmmdev_guest_status *req; |
|---|
| 231 | 240 | int rc; |
|---|
| 232 | 241 | |
|---|
| 233 | | - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS); |
|---|
| 242 | + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS, |
|---|
| 243 | + VBG_KERNEL_REQUEST); |
|---|
| 234 | 244 | if (!req) |
|---|
| 235 | 245 | return -ENOMEM; |
|---|
| 236 | 246 | |
|---|
| .. | .. |
|---|
| 423 | 433 | struct vmmdev_heartbeat *req; |
|---|
| 424 | 434 | int rc; |
|---|
| 425 | 435 | |
|---|
| 426 | | - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE); |
|---|
| 436 | + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE, |
|---|
| 437 | + VBG_KERNEL_REQUEST); |
|---|
| 427 | 438 | if (!req) |
|---|
| 428 | 439 | return -ENOMEM; |
|---|
| 429 | 440 | |
|---|
| .. | .. |
|---|
| 457 | 468 | |
|---|
| 458 | 469 | gdev->guest_heartbeat_req = vbg_req_alloc( |
|---|
| 459 | 470 | sizeof(*gdev->guest_heartbeat_req), |
|---|
| 460 | | - VMMDEVREQ_GUEST_HEARTBEAT); |
|---|
| 471 | + VMMDEVREQ_GUEST_HEARTBEAT, |
|---|
| 472 | + VBG_KERNEL_REQUEST); |
|---|
| 461 | 473 | if (!gdev->guest_heartbeat_req) |
|---|
| 462 | 474 | return -ENOMEM; |
|---|
| 463 | 475 | |
|---|
| .. | .. |
|---|
| 528 | 540 | struct vmmdev_mask *req; |
|---|
| 529 | 541 | int rc; |
|---|
| 530 | 542 | |
|---|
| 531 | | - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); |
|---|
| 543 | + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK, |
|---|
| 544 | + VBG_KERNEL_REQUEST); |
|---|
| 532 | 545 | if (!req) |
|---|
| 533 | 546 | return -ENOMEM; |
|---|
| 534 | 547 | |
|---|
| .. | .. |
|---|
| 546 | 559 | * Changes the event filter mask for the given session. |
|---|
| 547 | 560 | * |
|---|
| 548 | 561 | * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to |
|---|
| 549 | | - * do session cleanup. Takes the session spinlock. |
|---|
| 562 | + * do session cleanup. Takes the session mutex. |
|---|
| 550 | 563 | * |
|---|
| 551 | 564 | * Return: 0 or negative errno value. |
|---|
| 552 | 565 | * @gdev: The Guest extension device. |
|---|
| .. | .. |
|---|
| 567 | 580 | u32 changed, previous; |
|---|
| 568 | 581 | int rc, ret = 0; |
|---|
| 569 | 582 | |
|---|
| 570 | | - /* Allocate a request buffer before taking the spinlock */ |
|---|
| 571 | | - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); |
|---|
| 583 | + /* |
|---|
| 584 | + * Allocate a request buffer before taking the spinlock, when |
|---|
| 585 | + * the session is being terminated the requestor is the kernel, |
|---|
| 586 | + * as we're cleaning up. |
|---|
| 587 | + */ |
|---|
| 588 | + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK, |
|---|
| 589 | + session_termination ? VBG_KERNEL_REQUEST : |
|---|
| 590 | + session->requestor); |
|---|
| 572 | 591 | if (!req) { |
|---|
| 573 | 592 | if (!session_termination) |
|---|
| 574 | 593 | return -ENOMEM; |
|---|
| .. | .. |
|---|
| 627 | 646 | struct vmmdev_mask *req; |
|---|
| 628 | 647 | int rc; |
|---|
| 629 | 648 | |
|---|
| 630 | | - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); |
|---|
| 649 | + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES, |
|---|
| 650 | + VBG_KERNEL_REQUEST); |
|---|
| 631 | 651 | if (!req) |
|---|
| 632 | 652 | return -ENOMEM; |
|---|
| 633 | 653 | |
|---|
| .. | .. |
|---|
| 642 | 662 | } |
|---|
| 643 | 663 | |
|---|
| 644 | 664 | /** |
|---|
| 645 | | - * Sets the guest capabilities for a session. Takes the session spinlock. |
|---|
| 665 | + * Set guest capabilities on the host. |
|---|
| 666 | + * Must be called with gdev->session_mutex hold. |
|---|
| 667 | + * Return: 0 or negative errno value. |
|---|
| 668 | + * @gdev: The Guest extension device. |
|---|
| 669 | + * @session: The session. |
|---|
| 670 | + * @session_termination: Set if we're called by the session cleanup code. |
|---|
| 671 | + */ |
|---|
| 672 | +static int vbg_set_host_capabilities(struct vbg_dev *gdev, |
|---|
| 673 | + struct vbg_session *session, |
|---|
| 674 | + bool session_termination) |
|---|
| 675 | +{ |
|---|
| 676 | + struct vmmdev_mask *req; |
|---|
| 677 | + u32 caps; |
|---|
| 678 | + int rc; |
|---|
| 679 | + |
|---|
| 680 | + WARN_ON(!mutex_is_locked(&gdev->session_mutex)); |
|---|
| 681 | + |
|---|
| 682 | + caps = gdev->acquired_guest_caps | gdev->set_guest_caps_tracker.mask; |
|---|
| 683 | + |
|---|
| 684 | + if (gdev->guest_caps_host == caps) |
|---|
| 685 | + return 0; |
|---|
| 686 | + |
|---|
| 687 | + /* On termination the requestor is the kernel, as we're cleaning up. */ |
|---|
| 688 | + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES, |
|---|
| 689 | + session_termination ? VBG_KERNEL_REQUEST : |
|---|
| 690 | + session->requestor); |
|---|
| 691 | + if (!req) { |
|---|
| 692 | + gdev->guest_caps_host = U32_MAX; |
|---|
| 693 | + return -ENOMEM; |
|---|
| 694 | + } |
|---|
| 695 | + |
|---|
| 696 | + req->or_mask = caps; |
|---|
| 697 | + req->not_mask = ~caps; |
|---|
| 698 | + rc = vbg_req_perform(gdev, req); |
|---|
| 699 | + vbg_req_free(req, sizeof(*req)); |
|---|
| 700 | + |
|---|
| 701 | + gdev->guest_caps_host = (rc >= 0) ? caps : U32_MAX; |
|---|
| 702 | + |
|---|
| 703 | + return vbg_status_code_to_errno(rc); |
|---|
| 704 | +} |
|---|
| 705 | + |
|---|
| 706 | +/** |
|---|
| 707 | + * Acquire (get exclusive access) guest capabilities for a session. |
|---|
| 708 | + * Takes the session mutex. |
|---|
| 709 | + * Return: 0 or negative errno value. |
|---|
| 710 | + * @gdev: The Guest extension device. |
|---|
| 711 | + * @session: The session. |
|---|
| 712 | + * @flags: Flags (VBGL_IOC_AGC_FLAGS_XXX). |
|---|
| 713 | + * @or_mask: The capabilities to add. |
|---|
| 714 | + * @not_mask: The capabilities to remove. |
|---|
| 715 | + * @session_termination: Set if we're called by the session cleanup code. |
|---|
| 716 | + * This tweaks the error handling so we perform |
|---|
| 717 | + * proper session cleanup even if the host |
|---|
| 718 | + * misbehaves. |
|---|
| 719 | + */ |
|---|
| 720 | +static int vbg_acquire_session_capabilities(struct vbg_dev *gdev, |
|---|
| 721 | + struct vbg_session *session, |
|---|
| 722 | + u32 or_mask, u32 not_mask, |
|---|
| 723 | + u32 flags, bool session_termination) |
|---|
| 724 | +{ |
|---|
| 725 | + unsigned long irqflags; |
|---|
| 726 | + bool wakeup = false; |
|---|
| 727 | + int ret = 0; |
|---|
| 728 | + |
|---|
| 729 | + mutex_lock(&gdev->session_mutex); |
|---|
| 730 | + |
|---|
| 731 | + if (gdev->set_guest_caps_tracker.mask & or_mask) { |
|---|
| 732 | + vbg_err("%s error: cannot acquire caps which are currently set\n", |
|---|
| 733 | + __func__); |
|---|
| 734 | + ret = -EINVAL; |
|---|
| 735 | + goto out; |
|---|
| 736 | + } |
|---|
| 737 | + |
|---|
| 738 | + /* |
|---|
| 739 | + * Mark any caps in the or_mask as now being in acquire-mode. Note |
|---|
| 740 | + * once caps are in acquire_mode they always stay in this mode. |
|---|
| 741 | + * This impacts event handling, so we take the event-lock. |
|---|
| 742 | + */ |
|---|
| 743 | + spin_lock_irqsave(&gdev->event_spinlock, irqflags); |
|---|
| 744 | + gdev->acquire_mode_guest_caps |= or_mask; |
|---|
| 745 | + spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); |
|---|
| 746 | + |
|---|
| 747 | + /* If we only have to switch the caps to acquire mode, we're done. */ |
|---|
| 748 | + if (flags & VBGL_IOC_AGC_FLAGS_CONFIG_ACQUIRE_MODE) |
|---|
| 749 | + goto out; |
|---|
| 750 | + |
|---|
| 751 | + not_mask &= ~or_mask; /* or_mask takes priority over not_mask */ |
|---|
| 752 | + not_mask &= session->acquired_guest_caps; |
|---|
| 753 | + or_mask &= ~session->acquired_guest_caps; |
|---|
| 754 | + |
|---|
| 755 | + if (or_mask == 0 && not_mask == 0) |
|---|
| 756 | + goto out; |
|---|
| 757 | + |
|---|
| 758 | + if (gdev->acquired_guest_caps & or_mask) { |
|---|
| 759 | + ret = -EBUSY; |
|---|
| 760 | + goto out; |
|---|
| 761 | + } |
|---|
| 762 | + |
|---|
| 763 | + gdev->acquired_guest_caps |= or_mask; |
|---|
| 764 | + gdev->acquired_guest_caps &= ~not_mask; |
|---|
| 765 | + /* session->acquired_guest_caps impacts event handling, take the lock */ |
|---|
| 766 | + spin_lock_irqsave(&gdev->event_spinlock, irqflags); |
|---|
| 767 | + session->acquired_guest_caps |= or_mask; |
|---|
| 768 | + session->acquired_guest_caps &= ~not_mask; |
|---|
| 769 | + spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); |
|---|
| 770 | + |
|---|
| 771 | + ret = vbg_set_host_capabilities(gdev, session, session_termination); |
|---|
| 772 | + /* Roll back on failure, unless it's session termination time. */ |
|---|
| 773 | + if (ret < 0 && !session_termination) { |
|---|
| 774 | + gdev->acquired_guest_caps &= ~or_mask; |
|---|
| 775 | + gdev->acquired_guest_caps |= not_mask; |
|---|
| 776 | + spin_lock_irqsave(&gdev->event_spinlock, irqflags); |
|---|
| 777 | + session->acquired_guest_caps &= ~or_mask; |
|---|
| 778 | + session->acquired_guest_caps |= not_mask; |
|---|
| 779 | + spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); |
|---|
| 780 | + } |
|---|
| 781 | + |
|---|
| 782 | + /* |
|---|
| 783 | + * If we added a capability, check if that means some other thread in |
|---|
| 784 | + * our session should be unblocked because there are events pending |
|---|
| 785 | + * (the result of vbg_get_allowed_event_mask_for_session() may change). |
|---|
| 786 | + * |
|---|
| 787 | + * HACK ALERT! When the seamless support capability is added we generate |
|---|
| 788 | + * a seamless change event so that the ring-3 client can sync with |
|---|
| 789 | + * the seamless state. |
|---|
| 790 | + */ |
|---|
| 791 | + if (ret == 0 && or_mask != 0) { |
|---|
| 792 | + spin_lock_irqsave(&gdev->event_spinlock, irqflags); |
|---|
| 793 | + |
|---|
| 794 | + if (or_mask & VMMDEV_GUEST_SUPPORTS_SEAMLESS) |
|---|
| 795 | + gdev->pending_events |= |
|---|
| 796 | + VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST; |
|---|
| 797 | + |
|---|
| 798 | + if (gdev->pending_events) |
|---|
| 799 | + wakeup = true; |
|---|
| 800 | + |
|---|
| 801 | + spin_unlock_irqrestore(&gdev->event_spinlock, irqflags); |
|---|
| 802 | + |
|---|
| 803 | + if (wakeup) |
|---|
| 804 | + wake_up(&gdev->event_wq); |
|---|
| 805 | + } |
|---|
| 806 | + |
|---|
| 807 | +out: |
|---|
| 808 | + mutex_unlock(&gdev->session_mutex); |
|---|
| 809 | + |
|---|
| 810 | + return ret; |
|---|
| 811 | +} |
|---|
| 812 | + |
|---|
| 813 | +/** |
|---|
| 814 | + * Sets the guest capabilities for a session. Takes the session mutex. |
|---|
| 646 | 815 | * Return: 0 or negative errno value. |
|---|
| 647 | 816 | * @gdev: The Guest extension device. |
|---|
| 648 | 817 | * @session: The session. |
|---|
| .. | .. |
|---|
| 658 | 827 | u32 or_mask, u32 not_mask, |
|---|
| 659 | 828 | bool session_termination) |
|---|
| 660 | 829 | { |
|---|
| 661 | | - struct vmmdev_mask *req; |
|---|
| 662 | 830 | u32 changed, previous; |
|---|
| 663 | | - int rc, ret = 0; |
|---|
| 664 | | - |
|---|
| 665 | | - /* Allocate a request buffer before taking the spinlock */ |
|---|
| 666 | | - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); |
|---|
| 667 | | - if (!req) { |
|---|
| 668 | | - if (!session_termination) |
|---|
| 669 | | - return -ENOMEM; |
|---|
| 670 | | - /* Ignore allocation failure, we must do session cleanup. */ |
|---|
| 671 | | - } |
|---|
| 831 | + int ret = 0; |
|---|
| 672 | 832 | |
|---|
| 673 | 833 | mutex_lock(&gdev->session_mutex); |
|---|
| 674 | 834 | |
|---|
| 835 | + if (gdev->acquire_mode_guest_caps & or_mask) { |
|---|
| 836 | + vbg_err("%s error: cannot set caps which are in acquire_mode\n", |
|---|
| 837 | + __func__); |
|---|
| 838 | + ret = -EBUSY; |
|---|
| 839 | + goto out; |
|---|
| 840 | + } |
|---|
| 841 | + |
|---|
| 675 | 842 | /* Apply the changes to the session mask. */ |
|---|
| 676 | | - previous = session->guest_caps; |
|---|
| 677 | | - session->guest_caps |= or_mask; |
|---|
| 678 | | - session->guest_caps &= ~not_mask; |
|---|
| 843 | + previous = session->set_guest_caps; |
|---|
| 844 | + session->set_guest_caps |= or_mask; |
|---|
| 845 | + session->set_guest_caps &= ~not_mask; |
|---|
| 679 | 846 | |
|---|
| 680 | 847 | /* If anything actually changed, update the global usage counters. */ |
|---|
| 681 | | - changed = previous ^ session->guest_caps; |
|---|
| 848 | + changed = previous ^ session->set_guest_caps; |
|---|
| 682 | 849 | if (!changed) |
|---|
| 683 | 850 | goto out; |
|---|
| 684 | 851 | |
|---|
| 685 | | - vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous); |
|---|
| 686 | | - or_mask = gdev->guest_caps_tracker.mask; |
|---|
| 852 | + vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed, previous); |
|---|
| 687 | 853 | |
|---|
| 688 | | - if (gdev->guest_caps_host == or_mask || !req) |
|---|
| 689 | | - goto out; |
|---|
| 690 | | - |
|---|
| 691 | | - gdev->guest_caps_host = or_mask; |
|---|
| 692 | | - req->or_mask = or_mask; |
|---|
| 693 | | - req->not_mask = ~or_mask; |
|---|
| 694 | | - rc = vbg_req_perform(gdev, req); |
|---|
| 695 | | - if (rc < 0) { |
|---|
| 696 | | - ret = vbg_status_code_to_errno(rc); |
|---|
| 697 | | - |
|---|
| 698 | | - /* Failed, roll back (unless it's session termination time). */ |
|---|
| 699 | | - gdev->guest_caps_host = U32_MAX; |
|---|
| 700 | | - if (session_termination) |
|---|
| 701 | | - goto out; |
|---|
| 702 | | - |
|---|
| 703 | | - vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, |
|---|
| 704 | | - session->guest_caps); |
|---|
| 705 | | - session->guest_caps = previous; |
|---|
| 854 | + ret = vbg_set_host_capabilities(gdev, session, session_termination); |
|---|
| 855 | + /* Roll back on failure, unless it's session termination time. */ |
|---|
| 856 | + if (ret < 0 && !session_termination) { |
|---|
| 857 | + vbg_track_bit_usage(&gdev->set_guest_caps_tracker, changed, |
|---|
| 858 | + session->set_guest_caps); |
|---|
| 859 | + session->set_guest_caps = previous; |
|---|
| 706 | 860 | } |
|---|
| 707 | 861 | |
|---|
| 708 | 862 | out: |
|---|
| 709 | 863 | mutex_unlock(&gdev->session_mutex); |
|---|
| 710 | | - vbg_req_free(req, sizeof(*req)); |
|---|
| 711 | 864 | |
|---|
| 712 | 865 | return ret; |
|---|
| 713 | 866 | } |
|---|
| .. | .. |
|---|
| 722 | 875 | struct vmmdev_host_version *req; |
|---|
| 723 | 876 | int rc, ret; |
|---|
| 724 | 877 | |
|---|
| 725 | | - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION); |
|---|
| 878 | + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION, |
|---|
| 879 | + VBG_KERNEL_REQUEST); |
|---|
| 726 | 880 | if (!req) |
|---|
| 727 | 881 | return -ENOMEM; |
|---|
| 728 | 882 | |
|---|
| .. | .. |
|---|
| 783 | 937 | |
|---|
| 784 | 938 | gdev->mem_balloon.get_req = |
|---|
| 785 | 939 | vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req), |
|---|
| 786 | | - VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ); |
|---|
| 940 | + VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ, |
|---|
| 941 | + VBG_KERNEL_REQUEST); |
|---|
| 787 | 942 | gdev->mem_balloon.change_req = |
|---|
| 788 | 943 | vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req), |
|---|
| 789 | | - VMMDEVREQ_CHANGE_MEMBALLOON); |
|---|
| 944 | + VMMDEVREQ_CHANGE_MEMBALLOON, |
|---|
| 945 | + VBG_KERNEL_REQUEST); |
|---|
| 790 | 946 | gdev->cancel_req = |
|---|
| 791 | 947 | vbg_req_alloc(sizeof(*(gdev->cancel_req)), |
|---|
| 792 | | - VMMDEVREQ_HGCM_CANCEL2); |
|---|
| 948 | + VMMDEVREQ_HGCM_CANCEL2, |
|---|
| 949 | + VBG_KERNEL_REQUEST); |
|---|
| 793 | 950 | gdev->ack_events_req = |
|---|
| 794 | 951 | vbg_req_alloc(sizeof(*gdev->ack_events_req), |
|---|
| 795 | | - VMMDEVREQ_ACKNOWLEDGE_EVENTS); |
|---|
| 952 | + VMMDEVREQ_ACKNOWLEDGE_EVENTS, |
|---|
| 953 | + VBG_KERNEL_REQUEST); |
|---|
| 796 | 954 | gdev->mouse_status_req = |
|---|
| 797 | 955 | vbg_req_alloc(sizeof(*gdev->mouse_status_req), |
|---|
| 798 | | - VMMDEVREQ_GET_MOUSE_STATUS); |
|---|
| 956 | + VMMDEVREQ_GET_MOUSE_STATUS, |
|---|
| 957 | + VBG_KERNEL_REQUEST); |
|---|
| 799 | 958 | |
|---|
| 800 | 959 | if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req || |
|---|
| 801 | 960 | !gdev->cancel_req || !gdev->ack_events_req || |
|---|
| .. | .. |
|---|
| 892 | 1051 | * vboxguest_linux.c calls this when userspace opens the char-device. |
|---|
| 893 | 1052 | * Return: A pointer to the new session or an ERR_PTR on error. |
|---|
| 894 | 1053 | * @gdev: The Guest extension device. |
|---|
| 895 | | - * @user: Set if this is a session for the vboxuser device. |
|---|
| 1054 | + * @requestor: VMMDEV_REQUESTOR_* flags |
|---|
| 896 | 1055 | */ |
|---|
| 897 | | -struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user) |
|---|
| 1056 | +struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor) |
|---|
| 898 | 1057 | { |
|---|
| 899 | 1058 | struct vbg_session *session; |
|---|
| 900 | 1059 | |
|---|
| .. | .. |
|---|
| 903 | 1062 | return ERR_PTR(-ENOMEM); |
|---|
| 904 | 1063 | |
|---|
| 905 | 1064 | session->gdev = gdev; |
|---|
| 906 | | - session->user_session = user; |
|---|
| 1065 | + session->requestor = requestor; |
|---|
| 907 | 1066 | |
|---|
| 908 | 1067 | return session; |
|---|
| 909 | 1068 | } |
|---|
| .. | .. |
|---|
| 917 | 1076 | struct vbg_dev *gdev = session->gdev; |
|---|
| 918 | 1077 | int i, rc; |
|---|
| 919 | 1078 | |
|---|
| 1079 | + vbg_acquire_session_capabilities(gdev, session, 0, U32_MAX, 0, true); |
|---|
| 920 | 1080 | vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true); |
|---|
| 921 | 1081 | vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true); |
|---|
| 922 | 1082 | |
|---|
| .. | .. |
|---|
| 924 | 1084 | if (!session->hgcm_client_ids[i]) |
|---|
| 925 | 1085 | continue; |
|---|
| 926 | 1086 | |
|---|
| 927 | | - vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc); |
|---|
| 1087 | + /* requestor is kernel here, as we're cleaning up. */ |
|---|
| 1088 | + vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST, |
|---|
| 1089 | + session->hgcm_client_ids[i], &rc); |
|---|
| 928 | 1090 | } |
|---|
| 929 | 1091 | |
|---|
| 930 | 1092 | kfree(session); |
|---|
| .. | .. |
|---|
| 972 | 1134 | return 0; |
|---|
| 973 | 1135 | } |
|---|
| 974 | 1136 | |
|---|
| 1137 | +/* Must be called with the event_lock held */ |
|---|
| 1138 | +static u32 vbg_get_allowed_event_mask_for_session(struct vbg_dev *gdev, |
|---|
| 1139 | + struct vbg_session *session) |
|---|
| 1140 | +{ |
|---|
| 1141 | + u32 acquire_mode_caps = gdev->acquire_mode_guest_caps; |
|---|
| 1142 | + u32 session_acquired_caps = session->acquired_guest_caps; |
|---|
| 1143 | + u32 allowed_events = VMMDEV_EVENT_VALID_EVENT_MASK; |
|---|
| 1144 | + |
|---|
| 1145 | + if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS) && |
|---|
| 1146 | + !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_GRAPHICS)) |
|---|
| 1147 | + allowed_events &= ~VMMDEV_EVENT_DISPLAY_CHANGE_REQUEST; |
|---|
| 1148 | + |
|---|
| 1149 | + if ((acquire_mode_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS) && |
|---|
| 1150 | + !(session_acquired_caps & VMMDEV_GUEST_SUPPORTS_SEAMLESS)) |
|---|
| 1151 | + allowed_events &= ~VMMDEV_EVENT_SEAMLESS_MODE_CHANGE_REQUEST; |
|---|
| 1152 | + |
|---|
| 1153 | + return allowed_events; |
|---|
| 1154 | +} |
|---|
| 1155 | + |
|---|
| 975 | 1156 | static bool vbg_wait_event_cond(struct vbg_dev *gdev, |
|---|
| 976 | 1157 | struct vbg_session *session, |
|---|
| 977 | 1158 | u32 event_mask) |
|---|
| .. | .. |
|---|
| 983 | 1164 | spin_lock_irqsave(&gdev->event_spinlock, flags); |
|---|
| 984 | 1165 | |
|---|
| 985 | 1166 | events = gdev->pending_events & event_mask; |
|---|
| 1167 | + events &= vbg_get_allowed_event_mask_for_session(gdev, session); |
|---|
| 986 | 1168 | wakeup = events || session->cancel_waiters; |
|---|
| 987 | 1169 | |
|---|
| 988 | 1170 | spin_unlock_irqrestore(&gdev->event_spinlock, flags); |
|---|
| .. | .. |
|---|
| 997 | 1179 | { |
|---|
| 998 | 1180 | u32 events = gdev->pending_events & event_mask; |
|---|
| 999 | 1181 | |
|---|
| 1182 | + events &= vbg_get_allowed_event_mask_for_session(gdev, session); |
|---|
| 1000 | 1183 | gdev->pending_events &= ~events; |
|---|
| 1001 | 1184 | return events; |
|---|
| 1002 | 1185 | } |
|---|
| .. | .. |
|---|
| 1116 | 1299 | case VMMDEVREQ_VIDEO_ACCEL_ENABLE: |
|---|
| 1117 | 1300 | case VMMDEVREQ_VIDEO_ACCEL_FLUSH: |
|---|
| 1118 | 1301 | case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION: |
|---|
| 1302 | + case VMMDEVREQ_VIDEO_UPDATE_MONITOR_POSITIONS: |
|---|
| 1119 | 1303 | case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX: |
|---|
| 1304 | + case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ_MULTI: |
|---|
| 1120 | 1305 | case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ: |
|---|
| 1121 | 1306 | case VMMDEVREQ_GET_VRDPCHANGE_REQ: |
|---|
| 1122 | 1307 | case VMMDEVREQ_LOG_STRING: |
|---|
| .. | .. |
|---|
| 1152 | 1337 | return -EPERM; |
|---|
| 1153 | 1338 | } |
|---|
| 1154 | 1339 | |
|---|
| 1155 | | - if (trusted_apps_only && session->user_session) { |
|---|
| 1340 | + if (trusted_apps_only && |
|---|
| 1341 | + (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) { |
|---|
| 1156 | 1342 | vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n", |
|---|
| 1157 | 1343 | req->request_type); |
|---|
| 1158 | 1344 | return -EPERM; |
|---|
| .. | .. |
|---|
| 1209 | 1395 | if (i >= ARRAY_SIZE(session->hgcm_client_ids)) |
|---|
| 1210 | 1396 | return -EMFILE; |
|---|
| 1211 | 1397 | |
|---|
| 1212 | | - ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id, |
|---|
| 1213 | | - &conn->hdr.rc); |
|---|
| 1398 | + ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc, |
|---|
| 1399 | + &client_id, &conn->hdr.rc); |
|---|
| 1214 | 1400 | |
|---|
| 1215 | 1401 | mutex_lock(&gdev->session_mutex); |
|---|
| 1216 | 1402 | if (ret == 0 && conn->hdr.rc >= 0) { |
|---|
| .. | .. |
|---|
| 1251 | 1437 | if (i >= ARRAY_SIZE(session->hgcm_client_ids)) |
|---|
| 1252 | 1438 | return -EINVAL; |
|---|
| 1253 | 1439 | |
|---|
| 1254 | | - ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc); |
|---|
| 1440 | + ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id, |
|---|
| 1441 | + &disconn->hdr.rc); |
|---|
| 1255 | 1442 | |
|---|
| 1256 | 1443 | mutex_lock(&gdev->session_mutex); |
|---|
| 1257 | 1444 | if (ret == 0 && disconn->hdr.rc >= 0) |
|---|
| .. | .. |
|---|
| 1344 | 1531 | } |
|---|
| 1345 | 1532 | |
|---|
| 1346 | 1533 | if (IS_ENABLED(CONFIG_COMPAT) && f32bit) |
|---|
| 1347 | | - ret = vbg_hgcm_call32(gdev, client_id, |
|---|
| 1534 | + ret = vbg_hgcm_call32(gdev, session->requestor, client_id, |
|---|
| 1348 | 1535 | call->function, call->timeout_ms, |
|---|
| 1349 | 1536 | VBG_IOCTL_HGCM_CALL_PARMS32(call), |
|---|
| 1350 | 1537 | call->parm_count, &call->hdr.rc); |
|---|
| 1351 | 1538 | else |
|---|
| 1352 | | - ret = vbg_hgcm_call(gdev, client_id, |
|---|
| 1539 | + ret = vbg_hgcm_call(gdev, session->requestor, client_id, |
|---|
| 1353 | 1540 | call->function, call->timeout_ms, |
|---|
| 1354 | 1541 | VBG_IOCTL_HGCM_CALL_PARMS(call), |
|---|
| 1355 | 1542 | call->parm_count, &call->hdr.rc); |
|---|
| .. | .. |
|---|
| 1396 | 1583 | false); |
|---|
| 1397 | 1584 | } |
|---|
| 1398 | 1585 | |
|---|
| 1586 | +static int vbg_ioctl_acquire_guest_capabilities(struct vbg_dev *gdev, |
|---|
| 1587 | + struct vbg_session *session, |
|---|
| 1588 | + struct vbg_ioctl_acquire_guest_caps *caps) |
|---|
| 1589 | +{ |
|---|
| 1590 | + u32 flags, or_mask, not_mask; |
|---|
| 1591 | + |
|---|
| 1592 | + if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), 0)) |
|---|
| 1593 | + return -EINVAL; |
|---|
| 1594 | + |
|---|
| 1595 | + flags = caps->u.in.flags; |
|---|
| 1596 | + or_mask = caps->u.in.or_mask; |
|---|
| 1597 | + not_mask = caps->u.in.not_mask; |
|---|
| 1598 | + |
|---|
| 1599 | + if (flags & ~VBGL_IOC_AGC_FLAGS_VALID_MASK) |
|---|
| 1600 | + return -EINVAL; |
|---|
| 1601 | + |
|---|
| 1602 | + if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK) |
|---|
| 1603 | + return -EINVAL; |
|---|
| 1604 | + |
|---|
| 1605 | + return vbg_acquire_session_capabilities(gdev, session, or_mask, |
|---|
| 1606 | + not_mask, flags, false); |
|---|
| 1607 | +} |
|---|
| 1608 | + |
|---|
| 1399 | 1609 | static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev, |
|---|
| 1400 | 1610 | struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps) |
|---|
| 1401 | 1611 | { |
|---|
| .. | .. |
|---|
| 1416 | 1626 | if (ret) |
|---|
| 1417 | 1627 | return ret; |
|---|
| 1418 | 1628 | |
|---|
| 1419 | | - caps->u.out.session_caps = session->guest_caps; |
|---|
| 1629 | + caps->u.out.session_caps = session->set_guest_caps; |
|---|
| 1420 | 1630 | caps->u.out.global_caps = gdev->guest_caps_host; |
|---|
| 1421 | 1631 | |
|---|
| 1422 | 1632 | return 0; |
|---|
| .. | .. |
|---|
| 1439 | 1649 | } |
|---|
| 1440 | 1650 | |
|---|
| 1441 | 1651 | static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev, |
|---|
| 1652 | + struct vbg_session *session, |
|---|
| 1442 | 1653 | struct vbg_ioctl_write_coredump *dump) |
|---|
| 1443 | 1654 | { |
|---|
| 1444 | 1655 | struct vmmdev_write_core_dump *req; |
|---|
| .. | .. |
|---|
| 1446 | 1657 | if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0)) |
|---|
| 1447 | 1658 | return -EINVAL; |
|---|
| 1448 | 1659 | |
|---|
| 1449 | | - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP); |
|---|
| 1660 | + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP, |
|---|
| 1661 | + session->requestor); |
|---|
| 1450 | 1662 | if (!req) |
|---|
| 1451 | 1663 | return -ENOMEM; |
|---|
| 1452 | 1664 | |
|---|
| .. | .. |
|---|
| 1503 | 1715 | return vbg_ioctl_interrupt_all_wait_events(gdev, session, data); |
|---|
| 1504 | 1716 | case VBG_IOCTL_CHANGE_FILTER_MASK: |
|---|
| 1505 | 1717 | return vbg_ioctl_change_filter_mask(gdev, session, data); |
|---|
| 1718 | + case VBG_IOCTL_ACQUIRE_GUEST_CAPABILITIES: |
|---|
| 1719 | + return vbg_ioctl_acquire_guest_capabilities(gdev, session, data); |
|---|
| 1506 | 1720 | case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES: |
|---|
| 1507 | 1721 | return vbg_ioctl_change_guest_capabilities(gdev, session, data); |
|---|
| 1508 | 1722 | case VBG_IOCTL_CHECK_BALLOON: |
|---|
| 1509 | 1723 | return vbg_ioctl_check_balloon(gdev, data); |
|---|
| 1510 | 1724 | case VBG_IOCTL_WRITE_CORE_DUMP: |
|---|
| 1511 | | - return vbg_ioctl_write_core_dump(gdev, data); |
|---|
| 1725 | + return vbg_ioctl_write_core_dump(gdev, session, data); |
|---|
| 1512 | 1726 | } |
|---|
| 1513 | 1727 | |
|---|
| 1514 | 1728 | /* Variable sized requests. */ |
|---|
| .. | .. |
|---|
| 1516 | 1730 | #ifdef CONFIG_COMPAT |
|---|
| 1517 | 1731 | case VBG_IOCTL_HGCM_CALL_32(0): |
|---|
| 1518 | 1732 | f32bit = true; |
|---|
| 1519 | | - /* Fall through */ |
|---|
| 1733 | + fallthrough; |
|---|
| 1520 | 1734 | #endif |
|---|
| 1521 | 1735 | case VBG_IOCTL_HGCM_CALL(0): |
|---|
| 1522 | 1736 | return vbg_ioctl_hgcm_call(gdev, session, f32bit, data); |
|---|
| .. | .. |
|---|
| 1525 | 1739 | return vbg_ioctl_log(data); |
|---|
| 1526 | 1740 | } |
|---|
| 1527 | 1741 | |
|---|
| 1528 | | - vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req); |
|---|
| 1742 | + vbg_err_ratelimited("Userspace made an unknown ioctl req %#08x\n", req); |
|---|
| 1529 | 1743 | return -ENOTTY; |
|---|
| 1530 | 1744 | } |
|---|
| 1531 | 1745 | |
|---|
| .. | .. |
|---|
| 1541 | 1755 | struct vmmdev_mouse_status *req; |
|---|
| 1542 | 1756 | int rc; |
|---|
| 1543 | 1757 | |
|---|
| 1544 | | - req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS); |
|---|
| 1758 | + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS, |
|---|
| 1759 | + VBG_KERNEL_REQUEST); |
|---|
| 1545 | 1760 | if (!req) |
|---|
| 1546 | 1761 | return -ENOMEM; |
|---|
| 1547 | 1762 | |
|---|