.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | /* |
---|
2 | 3 | * VMware VMCI Driver |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 2012 VMware, Inc. All rights reserved. |
---|
5 | | - * |
---|
6 | | - * This program is free software; you can redistribute it and/or modify it |
---|
7 | | - * under the terms of the GNU General Public License as published by the |
---|
8 | | - * Free Software Foundation version 2 and no later version. |
---|
9 | | - * |
---|
10 | | - * This program is distributed in the hope that it will be useful, but |
---|
11 | | - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
---|
12 | | - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
---|
13 | | - * for more details. |
---|
14 | 6 | */ |
---|
15 | 7 | |
---|
16 | 8 | #ifndef _VMW_VMCI_DEF_H_ |
---|
17 | 9 | #define _VMW_VMCI_DEF_H_ |
---|
18 | 10 | |
---|
19 | 11 | #include <linux/atomic.h> |
---|
| 12 | +#include <linux/bits.h> |
---|
20 | 13 | |
---|
21 | 14 | /* Register offsets. */ |
---|
22 | 15 | #define VMCI_STATUS_ADDR 0x00 |
---|
.. | .. |
---|
33 | 26 | #define VMCI_MAX_DEVICES 1 |
---|
34 | 27 | |
---|
35 | 28 | /* Status register bits. */ |
---|
36 | | -#define VMCI_STATUS_INT_ON 0x1 |
---|
| 29 | +#define VMCI_STATUS_INT_ON BIT(0) |
---|
37 | 30 | |
---|
38 | 31 | /* Control register bits. */ |
---|
39 | | -#define VMCI_CONTROL_RESET 0x1 |
---|
40 | | -#define VMCI_CONTROL_INT_ENABLE 0x2 |
---|
41 | | -#define VMCI_CONTROL_INT_DISABLE 0x4 |
---|
| 32 | +#define VMCI_CONTROL_RESET BIT(0) |
---|
| 33 | +#define VMCI_CONTROL_INT_ENABLE BIT(1) |
---|
| 34 | +#define VMCI_CONTROL_INT_DISABLE BIT(2) |
---|
42 | 35 | |
---|
43 | 36 | /* Capabilities register bits. */ |
---|
44 | | -#define VMCI_CAPS_HYPERCALL 0x1 |
---|
45 | | -#define VMCI_CAPS_GUESTCALL 0x2 |
---|
46 | | -#define VMCI_CAPS_DATAGRAM 0x4 |
---|
47 | | -#define VMCI_CAPS_NOTIFICATIONS 0x8 |
---|
| 37 | +#define VMCI_CAPS_HYPERCALL BIT(0) |
---|
| 38 | +#define VMCI_CAPS_GUESTCALL BIT(1) |
---|
| 39 | +#define VMCI_CAPS_DATAGRAM BIT(2) |
---|
| 40 | +#define VMCI_CAPS_NOTIFICATIONS BIT(3) |
---|
| 41 | +#define VMCI_CAPS_PPN64 BIT(4) |
---|
48 | 42 | |
---|
49 | 43 | /* Interrupt Cause register bits. */ |
---|
50 | | -#define VMCI_ICR_DATAGRAM 0x1 |
---|
51 | | -#define VMCI_ICR_NOTIFICATION 0x2 |
---|
| 44 | +#define VMCI_ICR_DATAGRAM BIT(0) |
---|
| 45 | +#define VMCI_ICR_NOTIFICATION BIT(1) |
---|
52 | 46 | |
---|
53 | 47 | /* Interrupt Mask register bits. */ |
---|
54 | | -#define VMCI_IMR_DATAGRAM 0x1 |
---|
55 | | -#define VMCI_IMR_NOTIFICATION 0x2 |
---|
| 48 | +#define VMCI_IMR_DATAGRAM BIT(0) |
---|
| 49 | +#define VMCI_IMR_NOTIFICATION BIT(1) |
---|
56 | 50 | |
---|
57 | 51 | /* Maximum MSI/MSI-X interrupt vectors in the device. */ |
---|
58 | 52 | #define VMCI_MAX_INTRS 2 |
---|
.. | .. |
---|
165 | 159 | */ |
---|
166 | 160 | #define VMCI_ANON_SRC_CONTEXT_ID VMCI_INVALID_ID |
---|
167 | 161 | #define VMCI_ANON_SRC_RESOURCE_ID VMCI_INVALID_ID |
---|
168 | | -static const struct vmci_handle VMCI_ANON_SRC_HANDLE = { |
---|
| 162 | +static const struct vmci_handle __maybe_unused VMCI_ANON_SRC_HANDLE = { |
---|
169 | 163 | .context = VMCI_ANON_SRC_CONTEXT_ID, |
---|
170 | 164 | .resource = VMCI_ANON_SRC_RESOURCE_ID |
---|
171 | 165 | }; |
---|
.. | .. |
---|
445 | 439 | struct vmci_queue_header { |
---|
446 | 440 | /* All fields are 64bit and aligned. */ |
---|
447 | 441 | struct vmci_handle handle; /* Identifier. */ |
---|
448 | | - atomic64_t producer_tail; /* Offset in this queue. */ |
---|
449 | | - atomic64_t consumer_head; /* Offset in peer queue. */ |
---|
| 442 | + u64 producer_tail; /* Offset in this queue. */ |
---|
| 443 | + u64 consumer_head; /* Offset in peer queue. */ |
---|
450 | 444 | }; |
---|
451 | 445 | |
---|
452 | 446 | /* |
---|
.. | .. |
---|
471 | 465 | * datagram callback is invoked in a delayed context (not interrupt context). |
---|
472 | 466 | */ |
---|
473 | 467 | #define VMCI_FLAG_DG_NONE 0 |
---|
474 | | -#define VMCI_FLAG_WELLKNOWN_DG_HND 0x1 |
---|
475 | | -#define VMCI_FLAG_ANYCID_DG_HND 0x2 |
---|
476 | | -#define VMCI_FLAG_DG_DELAYED_CB 0x4 |
---|
| 468 | +#define VMCI_FLAG_WELLKNOWN_DG_HND BIT(0) |
---|
| 469 | +#define VMCI_FLAG_ANYCID_DG_HND BIT(1) |
---|
| 470 | +#define VMCI_FLAG_DG_DELAYED_CB BIT(2) |
---|
477 | 471 | |
---|
478 | 472 | /* |
---|
479 | 473 | * Maximum supported size of a VMCI datagram for routable datagrams. |
---|
.. | .. |
---|
578 | 572 | */ |
---|
579 | 573 | struct vmci_notify_bm_set_msg { |
---|
580 | 574 | struct vmci_datagram hdr; |
---|
581 | | - u32 bitmap_ppn; |
---|
582 | | - u32 _pad; |
---|
| 575 | + union { |
---|
| 576 | + u32 bitmap_ppn32; |
---|
| 577 | + u64 bitmap_ppn64; |
---|
| 578 | + }; |
---|
583 | 579 | }; |
---|
584 | 580 | |
---|
585 | 581 | /* |
---|
.. | .. |
---|
700 | 696 | }; |
---|
701 | 697 | |
---|
702 | 698 | /* VMCI Doorbell API. */ |
---|
703 | | -#define VMCI_FLAG_DELAYED_CB 0x01 |
---|
| 699 | +#define VMCI_FLAG_DELAYED_CB BIT(0) |
---|
704 | 700 | |
---|
705 | 701 | typedef void (*vmci_callback) (void *client_data); |
---|
706 | 702 | |
---|
.. | .. |
---|
745 | 741 | * prefix will be used, so correctness isn't an issue, but using a |
---|
746 | 742 | * 64bit operation still adds unnecessary overhead. |
---|
747 | 743 | */ |
---|
748 | | -static inline u64 vmci_q_read_pointer(atomic64_t *var) |
---|
| 744 | +static inline u64 vmci_q_read_pointer(u64 *var) |
---|
749 | 745 | { |
---|
750 | | -#if defined(CONFIG_X86_32) |
---|
751 | | - return atomic_read((atomic_t *)var); |
---|
752 | | -#else |
---|
753 | | - return atomic64_read(var); |
---|
754 | | -#endif |
---|
| 746 | + return READ_ONCE(*(unsigned long *)var); |
---|
755 | 747 | } |
---|
756 | 748 | |
---|
757 | 749 | /* |
---|
.. | .. |
---|
760 | 752 | * never exceeds a 32bit value in this case. On 32bit SMP, using a |
---|
761 | 753 | * locked cmpxchg8b adds unnecessary overhead. |
---|
762 | 754 | */ |
---|
763 | | -static inline void vmci_q_set_pointer(atomic64_t *var, |
---|
764 | | - u64 new_val) |
---|
| 755 | +static inline void vmci_q_set_pointer(u64 *var, u64 new_val) |
---|
765 | 756 | { |
---|
766 | | -#if defined(CONFIG_X86_32) |
---|
767 | | - return atomic_set((atomic_t *)var, (u32)new_val); |
---|
768 | | -#else |
---|
769 | | - return atomic64_set(var, new_val); |
---|
770 | | -#endif |
---|
| 757 | + /* XXX buggered on big-endian */ |
---|
| 758 | + WRITE_ONCE(*(unsigned long *)var, (unsigned long)new_val); |
---|
771 | 759 | } |
---|
772 | 760 | |
---|
773 | 761 | /* |
---|
774 | 762 | * Helper to add a given offset to a head or tail pointer. Wraps the |
---|
775 | 763 | * value of the pointer around the max size of the queue. |
---|
776 | 764 | */ |
---|
777 | | -static inline void vmci_qp_add_pointer(atomic64_t *var, |
---|
778 | | - size_t add, |
---|
779 | | - u64 size) |
---|
| 765 | +static inline void vmci_qp_add_pointer(u64 *var, size_t add, u64 size) |
---|
780 | 766 | { |
---|
781 | 767 | u64 new_val = vmci_q_read_pointer(var); |
---|
782 | 768 | |
---|
.. | .. |
---|
853 | 839 | const struct vmci_handle handle) |
---|
854 | 840 | { |
---|
855 | 841 | q_header->handle = handle; |
---|
856 | | - atomic64_set(&q_header->producer_tail, 0); |
---|
857 | | - atomic64_set(&q_header->consumer_head, 0); |
---|
| 842 | + q_header->producer_tail = 0; |
---|
| 843 | + q_header->consumer_head = 0; |
---|
858 | 844 | } |
---|
859 | 845 | |
---|
860 | 846 | /* |
---|