.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Helpers for the host side of a virtio ring. |
---|
3 | 4 | * |
---|
.. | .. |
---|
12 | 13 | #include <linux/uaccess.h> |
---|
13 | 14 | #include <linux/slab.h> |
---|
14 | 15 | #include <linux/export.h> |
---|
| 16 | +#if IS_REACHABLE(CONFIG_VHOST_IOTLB) |
---|
| 17 | +#include <linux/bvec.h> |
---|
| 18 | +#include <linux/highmem.h> |
---|
| 19 | +#include <linux/vhost_iotlb.h> |
---|
| 20 | +#endif |
---|
15 | 21 | #include <uapi/linux/virtio_config.h> |
---|
16 | 22 | |
---|
17 | 23 | static __printf(1,2) __cold void vringh_bad(const char *fmt, ...) |
---|
.. | .. |
---|
70 | 76 | } |
---|
71 | 77 | |
---|
72 | 78 | /* Copy some bytes to/from the iovec. Returns num copied. */ |
---|
73 | | -static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov, |
---|
| 79 | +static inline ssize_t vringh_iov_xfer(struct vringh *vrh, |
---|
| 80 | + struct vringh_kiov *iov, |
---|
74 | 81 | void *ptr, size_t len, |
---|
75 | | - int (*xfer)(void *addr, void *ptr, |
---|
| 82 | + int (*xfer)(const struct vringh *vrh, |
---|
| 83 | + void *addr, void *ptr, |
---|
76 | 84 | size_t len)) |
---|
77 | 85 | { |
---|
78 | 86 | int err, done = 0; |
---|
.. | .. |
---|
81 | 89 | size_t partlen; |
---|
82 | 90 | |
---|
83 | 91 | partlen = min(iov->iov[iov->i].iov_len, len); |
---|
84 | | - err = xfer(iov->iov[iov->i].iov_base, ptr, partlen); |
---|
| 92 | + err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen); |
---|
85 | 93 | if (err) |
---|
86 | 94 | return err; |
---|
87 | 95 | done += partlen; |
---|
.. | .. |
---|
95 | 103 | /* Fix up old iov element then increment. */ |
---|
96 | 104 | iov->iov[iov->i].iov_len = iov->consumed; |
---|
97 | 105 | iov->iov[iov->i].iov_base -= iov->consumed; |
---|
| 106 | + |
---|
98 | 107 | |
---|
99 | 108 | iov->consumed = 0; |
---|
100 | 109 | iov->i++; |
---|
.. | .. |
---|
226 | 235 | u64 addr, |
---|
227 | 236 | struct vringh_range *r), |
---|
228 | 237 | struct vringh_range *range, |
---|
229 | | - int (*copy)(void *dst, const void *src, size_t len)) |
---|
| 238 | + int (*copy)(const struct vringh *vrh, |
---|
| 239 | + void *dst, const void *src, size_t len)) |
---|
230 | 240 | { |
---|
231 | 241 | size_t part, len = sizeof(struct vring_desc); |
---|
232 | 242 | |
---|
.. | .. |
---|
240 | 250 | if (!rcheck(vrh, addr, &part, range, getrange)) |
---|
241 | 251 | return -EINVAL; |
---|
242 | 252 | |
---|
243 | | - err = copy(dst, src, part); |
---|
| 253 | + err = copy(vrh, dst, src, part); |
---|
244 | 254 | if (err) |
---|
245 | 255 | return err; |
---|
246 | 256 | |
---|
.. | .. |
---|
261 | 271 | struct vringh_range *)), |
---|
262 | 272 | bool (*getrange)(struct vringh *, u64, struct vringh_range *), |
---|
263 | 273 | gfp_t gfp, |
---|
264 | | - int (*copy)(void *dst, const void *src, size_t len)) |
---|
| 274 | + int (*copy)(const struct vringh *vrh, |
---|
| 275 | + void *dst, const void *src, size_t len)) |
---|
265 | 276 | { |
---|
266 | | - int err, count = 0, up_next, desc_max; |
---|
| 277 | + int err, count = 0, indirect_count = 0, up_next, desc_max; |
---|
267 | 278 | struct vring_desc desc, *descs; |
---|
268 | 279 | struct vringh_range range = { -1ULL, 0 }, slowrange; |
---|
269 | 280 | bool slow = false; |
---|
.. | .. |
---|
291 | 302 | err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange, |
---|
292 | 303 | &slowrange, copy); |
---|
293 | 304 | else |
---|
294 | | - err = copy(&desc, &descs[i], sizeof(desc)); |
---|
| 305 | + err = copy(vrh, &desc, &descs[i], sizeof(desc)); |
---|
295 | 306 | if (unlikely(err)) |
---|
296 | 307 | goto fail; |
---|
297 | 308 | |
---|
.. | .. |
---|
320 | 331 | continue; |
---|
321 | 332 | } |
---|
322 | 333 | |
---|
323 | | - if (count++ == vrh->vring.num) { |
---|
| 334 | + if (up_next == -1) |
---|
| 335 | + count++; |
---|
| 336 | + else |
---|
| 337 | + indirect_count++; |
---|
| 338 | + |
---|
| 339 | + if (count > vrh->vring.num || indirect_count > desc_max) { |
---|
324 | 340 | vringh_bad("Descriptor loop in %p", descs); |
---|
325 | 341 | err = -ELOOP; |
---|
326 | 342 | goto fail; |
---|
.. | .. |
---|
382 | 398 | i = return_from_indirect(vrh, &up_next, |
---|
383 | 399 | &descs, &desc_max); |
---|
384 | 400 | slow = false; |
---|
| 401 | + indirect_count = 0; |
---|
385 | 402 | } else |
---|
386 | 403 | break; |
---|
387 | 404 | } |
---|
.. | .. |
---|
404 | 421 | unsigned int num_used, |
---|
405 | 422 | int (*putu16)(const struct vringh *vrh, |
---|
406 | 423 | __virtio16 *p, u16 val), |
---|
407 | | - int (*putused)(struct vring_used_elem *dst, |
---|
| 424 | + int (*putused)(const struct vringh *vrh, |
---|
| 425 | + struct vring_used_elem *dst, |
---|
408 | 426 | const struct vring_used_elem |
---|
409 | 427 | *src, unsigned num)) |
---|
410 | 428 | { |
---|
.. | .. |
---|
420 | 438 | /* Compiler knows num_used == 1 sometimes, hence extra check */ |
---|
421 | 439 | if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) { |
---|
422 | 440 | u16 part = vrh->vring.num - off; |
---|
423 | | - err = putused(&used_ring->ring[off], used, part); |
---|
| 441 | + err = putused(vrh, &used_ring->ring[off], used, part); |
---|
424 | 442 | if (!err) |
---|
425 | | - err = putused(&used_ring->ring[0], used + part, |
---|
| 443 | + err = putused(vrh, &used_ring->ring[0], used + part, |
---|
426 | 444 | num_used - part); |
---|
427 | 445 | } else |
---|
428 | | - err = putused(&used_ring->ring[off], used, num_used); |
---|
| 446 | + err = putused(vrh, &used_ring->ring[off], used, num_used); |
---|
429 | 447 | |
---|
430 | 448 | if (err) { |
---|
431 | 449 | vringh_bad("Failed to write %u used entries %u at %p", |
---|
.. | .. |
---|
564 | 582 | return put_user(v, (__force __virtio16 __user *)p); |
---|
565 | 583 | } |
---|
566 | 584 | |
---|
567 | | -static inline int copydesc_user(void *dst, const void *src, size_t len) |
---|
| 585 | +static inline int copydesc_user(const struct vringh *vrh, |
---|
| 586 | + void *dst, const void *src, size_t len) |
---|
568 | 587 | { |
---|
569 | 588 | return copy_from_user(dst, (__force void __user *)src, len) ? |
---|
570 | 589 | -EFAULT : 0; |
---|
571 | 590 | } |
---|
572 | 591 | |
---|
573 | | -static inline int putused_user(struct vring_used_elem *dst, |
---|
| 592 | +static inline int putused_user(const struct vringh *vrh, |
---|
| 593 | + struct vring_used_elem *dst, |
---|
574 | 594 | const struct vring_used_elem *src, |
---|
575 | 595 | unsigned int num) |
---|
576 | 596 | { |
---|
.. | .. |
---|
578 | 598 | sizeof(*dst) * num) ? -EFAULT : 0; |
---|
579 | 599 | } |
---|
580 | 600 | |
---|
581 | | -static inline int xfer_from_user(void *src, void *dst, size_t len) |
---|
| 601 | +static inline int xfer_from_user(const struct vringh *vrh, void *src, |
---|
| 602 | + void *dst, size_t len) |
---|
582 | 603 | { |
---|
583 | 604 | return copy_from_user(dst, (__force void __user *)src, len) ? |
---|
584 | 605 | -EFAULT : 0; |
---|
585 | 606 | } |
---|
586 | 607 | |
---|
587 | | -static inline int xfer_to_user(void *dst, void *src, size_t len) |
---|
| 608 | +static inline int xfer_to_user(const struct vringh *vrh, |
---|
| 609 | + void *dst, void *src, size_t len) |
---|
588 | 610 | { |
---|
589 | 611 | return copy_to_user((__force void __user *)dst, src, len) ? |
---|
590 | 612 | -EFAULT : 0; |
---|
.. | .. |
---|
605 | 627 | */ |
---|
606 | 628 | int vringh_init_user(struct vringh *vrh, u64 features, |
---|
607 | 629 | unsigned int num, bool weak_barriers, |
---|
608 | | - struct vring_desc __user *desc, |
---|
609 | | - struct vring_avail __user *avail, |
---|
610 | | - struct vring_used __user *used) |
---|
| 630 | + vring_desc_t __user *desc, |
---|
| 631 | + vring_avail_t __user *avail, |
---|
| 632 | + vring_used_t __user *used) |
---|
611 | 633 | { |
---|
612 | 634 | /* Sane power of 2 please! */ |
---|
613 | 635 | if (!num || num > 0xffff || (num & (num - 1))) { |
---|
.. | .. |
---|
706 | 728 | */ |
---|
707 | 729 | ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len) |
---|
708 | 730 | { |
---|
709 | | - return vringh_iov_xfer((struct vringh_kiov *)riov, |
---|
| 731 | + return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov, |
---|
710 | 732 | dst, len, xfer_from_user); |
---|
711 | 733 | } |
---|
712 | 734 | EXPORT_SYMBOL(vringh_iov_pull_user); |
---|
.. | .. |
---|
714 | 736 | /** |
---|
715 | 737 | * vringh_iov_push_user - copy bytes into vring_iov. |
---|
716 | 738 | * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume) |
---|
717 | | - * @dst: the place to copy. |
---|
| 739 | + * @src: the place to copy from. |
---|
718 | 740 | * @len: the maximum length to copy. |
---|
719 | 741 | * |
---|
720 | 742 | * Returns the bytes copied <= len or a negative errno. |
---|
.. | .. |
---|
722 | 744 | ssize_t vringh_iov_push_user(struct vringh_iov *wiov, |
---|
723 | 745 | const void *src, size_t len) |
---|
724 | 746 | { |
---|
725 | | - return vringh_iov_xfer((struct vringh_kiov *)wiov, |
---|
| 747 | + return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov, |
---|
726 | 748 | (void *)src, len, xfer_to_user); |
---|
727 | 749 | } |
---|
728 | 750 | EXPORT_SYMBOL(vringh_iov_push_user); |
---|
.. | .. |
---|
832 | 854 | return 0; |
---|
833 | 855 | } |
---|
834 | 856 | |
---|
835 | | -static inline int copydesc_kern(void *dst, const void *src, size_t len) |
---|
| 857 | +static inline int copydesc_kern(const struct vringh *vrh, |
---|
| 858 | + void *dst, const void *src, size_t len) |
---|
836 | 859 | { |
---|
837 | 860 | memcpy(dst, src, len); |
---|
838 | 861 | return 0; |
---|
839 | 862 | } |
---|
840 | 863 | |
---|
841 | | -static inline int putused_kern(struct vring_used_elem *dst, |
---|
| 864 | +static inline int putused_kern(const struct vringh *vrh, |
---|
| 865 | + struct vring_used_elem *dst, |
---|
842 | 866 | const struct vring_used_elem *src, |
---|
843 | 867 | unsigned int num) |
---|
844 | 868 | { |
---|
.. | .. |
---|
846 | 870 | return 0; |
---|
847 | 871 | } |
---|
848 | 872 | |
---|
849 | | -static inline int xfer_kern(void *src, void *dst, size_t len) |
---|
| 873 | +static inline int xfer_kern(const struct vringh *vrh, void *src, |
---|
| 874 | + void *dst, size_t len) |
---|
| 875 | +{ |
---|
| 876 | + memcpy(dst, src, len); |
---|
| 877 | + return 0; |
---|
| 878 | +} |
---|
| 879 | + |
---|
| 880 | +static inline int kern_xfer(const struct vringh *vrh, void *dst, |
---|
| 881 | + void *src, size_t len) |
---|
850 | 882 | { |
---|
851 | 883 | memcpy(dst, src, len); |
---|
852 | 884 | return 0; |
---|
.. | .. |
---|
943 | 975 | */ |
---|
944 | 976 | ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len) |
---|
945 | 977 | { |
---|
946 | | - return vringh_iov_xfer(riov, dst, len, xfer_kern); |
---|
| 978 | + return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern); |
---|
947 | 979 | } |
---|
948 | 980 | EXPORT_SYMBOL(vringh_iov_pull_kern); |
---|
949 | 981 | |
---|
950 | 982 | /** |
---|
951 | 983 | * vringh_iov_push_kern - copy bytes into vring_iov. |
---|
952 | 984 | * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume) |
---|
953 | | - * @dst: the place to copy. |
---|
| 985 | + * @src: the place to copy from. |
---|
954 | 986 | * @len: the maximum length to copy. |
---|
955 | 987 | * |
---|
956 | 988 | * Returns the bytes copied <= len or a negative errno. |
---|
.. | .. |
---|
958 | 990 | ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov, |
---|
959 | 991 | const void *src, size_t len) |
---|
960 | 992 | { |
---|
961 | | - return vringh_iov_xfer(wiov, (void *)src, len, xfer_kern); |
---|
| 993 | + return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer); |
---|
962 | 994 | } |
---|
963 | 995 | EXPORT_SYMBOL(vringh_iov_push_kern); |
---|
964 | 996 | |
---|
.. | .. |
---|
1036 | 1068 | } |
---|
1037 | 1069 | EXPORT_SYMBOL(vringh_need_notify_kern); |
---|
1038 | 1070 | |
---|
| 1071 | +#if IS_REACHABLE(CONFIG_VHOST_IOTLB) |
---|
| 1072 | + |
---|
| 1073 | +static int iotlb_translate(const struct vringh *vrh, |
---|
| 1074 | + u64 addr, u64 len, struct bio_vec iov[], |
---|
| 1075 | + int iov_size, u32 perm) |
---|
| 1076 | +{ |
---|
| 1077 | + struct vhost_iotlb_map *map; |
---|
| 1078 | + struct vhost_iotlb *iotlb = vrh->iotlb; |
---|
| 1079 | + int ret = 0; |
---|
| 1080 | + u64 s = 0, last = addr + len - 1; |
---|
| 1081 | + |
---|
| 1082 | + while (len > s) { |
---|
| 1083 | + u64 size, pa, pfn; |
---|
| 1084 | + |
---|
| 1085 | + if (unlikely(ret >= iov_size)) { |
---|
| 1086 | + ret = -ENOBUFS; |
---|
| 1087 | + break; |
---|
| 1088 | + } |
---|
| 1089 | + |
---|
| 1090 | + map = vhost_iotlb_itree_first(iotlb, addr, last); |
---|
| 1091 | + if (!map || map->start > addr) { |
---|
| 1092 | + ret = -EINVAL; |
---|
| 1093 | + break; |
---|
| 1094 | + } else if (!(map->perm & perm)) { |
---|
| 1095 | + ret = -EPERM; |
---|
| 1096 | + break; |
---|
| 1097 | + } |
---|
| 1098 | + |
---|
| 1099 | + size = map->size - addr + map->start; |
---|
| 1100 | + pa = map->addr + addr - map->start; |
---|
| 1101 | + pfn = pa >> PAGE_SHIFT; |
---|
| 1102 | + iov[ret].bv_page = pfn_to_page(pfn); |
---|
| 1103 | + iov[ret].bv_len = min(len - s, size); |
---|
| 1104 | + iov[ret].bv_offset = pa & (PAGE_SIZE - 1); |
---|
| 1105 | + s += size; |
---|
| 1106 | + addr += size; |
---|
| 1107 | + ++ret; |
---|
| 1108 | + } |
---|
| 1109 | + |
---|
| 1110 | + return ret; |
---|
| 1111 | +} |
---|
| 1112 | + |
---|
| 1113 | +static inline int copy_from_iotlb(const struct vringh *vrh, void *dst, |
---|
| 1114 | + void *src, size_t len) |
---|
| 1115 | +{ |
---|
| 1116 | + struct iov_iter iter; |
---|
| 1117 | + struct bio_vec iov[16]; |
---|
| 1118 | + int ret; |
---|
| 1119 | + |
---|
| 1120 | + ret = iotlb_translate(vrh, (u64)(uintptr_t)src, |
---|
| 1121 | + len, iov, 16, VHOST_MAP_RO); |
---|
| 1122 | + if (ret < 0) |
---|
| 1123 | + return ret; |
---|
| 1124 | + |
---|
| 1125 | + iov_iter_bvec(&iter, READ, iov, ret, len); |
---|
| 1126 | + |
---|
| 1127 | + ret = copy_from_iter(dst, len, &iter); |
---|
| 1128 | + |
---|
| 1129 | + return ret; |
---|
| 1130 | +} |
---|
| 1131 | + |
---|
| 1132 | +static inline int copy_to_iotlb(const struct vringh *vrh, void *dst, |
---|
| 1133 | + void *src, size_t len) |
---|
| 1134 | +{ |
---|
| 1135 | + struct iov_iter iter; |
---|
| 1136 | + struct bio_vec iov[16]; |
---|
| 1137 | + int ret; |
---|
| 1138 | + |
---|
| 1139 | + ret = iotlb_translate(vrh, (u64)(uintptr_t)dst, |
---|
| 1140 | + len, iov, 16, VHOST_MAP_WO); |
---|
| 1141 | + if (ret < 0) |
---|
| 1142 | + return ret; |
---|
| 1143 | + |
---|
| 1144 | + iov_iter_bvec(&iter, WRITE, iov, ret, len); |
---|
| 1145 | + |
---|
| 1146 | + return copy_to_iter(src, len, &iter); |
---|
| 1147 | +} |
---|
| 1148 | + |
---|
| 1149 | +static inline int getu16_iotlb(const struct vringh *vrh, |
---|
| 1150 | + u16 *val, const __virtio16 *p) |
---|
| 1151 | +{ |
---|
| 1152 | + struct bio_vec iov; |
---|
| 1153 | + void *kaddr, *from; |
---|
| 1154 | + int ret; |
---|
| 1155 | + |
---|
| 1156 | + /* Atomic read is needed for getu16 */ |
---|
| 1157 | + ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), |
---|
| 1158 | + &iov, 1, VHOST_MAP_RO); |
---|
| 1159 | + if (ret < 0) |
---|
| 1160 | + return ret; |
---|
| 1161 | + |
---|
| 1162 | + kaddr = kmap_atomic(iov.bv_page); |
---|
| 1163 | + from = kaddr + iov.bv_offset; |
---|
| 1164 | + *val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from)); |
---|
| 1165 | + kunmap_atomic(kaddr); |
---|
| 1166 | + |
---|
| 1167 | + return 0; |
---|
| 1168 | +} |
---|
| 1169 | + |
---|
| 1170 | +static inline int putu16_iotlb(const struct vringh *vrh, |
---|
| 1171 | + __virtio16 *p, u16 val) |
---|
| 1172 | +{ |
---|
| 1173 | + struct bio_vec iov; |
---|
| 1174 | + void *kaddr, *to; |
---|
| 1175 | + int ret; |
---|
| 1176 | + |
---|
| 1177 | + /* Atomic write is needed for putu16 */ |
---|
| 1178 | + ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), |
---|
| 1179 | + &iov, 1, VHOST_MAP_WO); |
---|
| 1180 | + if (ret < 0) |
---|
| 1181 | + return ret; |
---|
| 1182 | + |
---|
| 1183 | + kaddr = kmap_atomic(iov.bv_page); |
---|
| 1184 | + to = kaddr + iov.bv_offset; |
---|
| 1185 | + WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val)); |
---|
| 1186 | + kunmap_atomic(kaddr); |
---|
| 1187 | + |
---|
| 1188 | + return 0; |
---|
| 1189 | +} |
---|
| 1190 | + |
---|
| 1191 | +static inline int copydesc_iotlb(const struct vringh *vrh, |
---|
| 1192 | + void *dst, const void *src, size_t len) |
---|
| 1193 | +{ |
---|
| 1194 | + int ret; |
---|
| 1195 | + |
---|
| 1196 | + ret = copy_from_iotlb(vrh, dst, (void *)src, len); |
---|
| 1197 | + if (ret != len) |
---|
| 1198 | + return -EFAULT; |
---|
| 1199 | + |
---|
| 1200 | + return 0; |
---|
| 1201 | +} |
---|
| 1202 | + |
---|
| 1203 | +static inline int xfer_from_iotlb(const struct vringh *vrh, void *src, |
---|
| 1204 | + void *dst, size_t len) |
---|
| 1205 | +{ |
---|
| 1206 | + int ret; |
---|
| 1207 | + |
---|
| 1208 | + ret = copy_from_iotlb(vrh, dst, src, len); |
---|
| 1209 | + if (ret != len) |
---|
| 1210 | + return -EFAULT; |
---|
| 1211 | + |
---|
| 1212 | + return 0; |
---|
| 1213 | +} |
---|
| 1214 | + |
---|
| 1215 | +static inline int xfer_to_iotlb(const struct vringh *vrh, |
---|
| 1216 | + void *dst, void *src, size_t len) |
---|
| 1217 | +{ |
---|
| 1218 | + int ret; |
---|
| 1219 | + |
---|
| 1220 | + ret = copy_to_iotlb(vrh, dst, src, len); |
---|
| 1221 | + if (ret != len) |
---|
| 1222 | + return -EFAULT; |
---|
| 1223 | + |
---|
| 1224 | + return 0; |
---|
| 1225 | +} |
---|
| 1226 | + |
---|
| 1227 | +static inline int putused_iotlb(const struct vringh *vrh, |
---|
| 1228 | + struct vring_used_elem *dst, |
---|
| 1229 | + const struct vring_used_elem *src, |
---|
| 1230 | + unsigned int num) |
---|
| 1231 | +{ |
---|
| 1232 | + int size = num * sizeof(*dst); |
---|
| 1233 | + int ret; |
---|
| 1234 | + |
---|
| 1235 | + ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst)); |
---|
| 1236 | + if (ret != size) |
---|
| 1237 | + return -EFAULT; |
---|
| 1238 | + |
---|
| 1239 | + return 0; |
---|
| 1240 | +} |
---|
| 1241 | + |
---|
| 1242 | +/** |
---|
| 1243 | + * vringh_init_iotlb - initialize a vringh for a ring with IOTLB. |
---|
| 1244 | + * @vrh: the vringh to initialize. |
---|
| 1245 | + * @features: the feature bits for this ring. |
---|
| 1246 | + * @num: the number of elements. |
---|
| 1247 | + * @weak_barriers: true if we only need memory barriers, not I/O. |
---|
| 1248 | + * @desc: the userpace descriptor pointer. |
---|
| 1249 | + * @avail: the userpace avail pointer. |
---|
| 1250 | + * @used: the userpace used pointer. |
---|
| 1251 | + * |
---|
| 1252 | + * Returns an error if num is invalid. |
---|
| 1253 | + */ |
---|
| 1254 | +int vringh_init_iotlb(struct vringh *vrh, u64 features, |
---|
| 1255 | + unsigned int num, bool weak_barriers, |
---|
| 1256 | + struct vring_desc *desc, |
---|
| 1257 | + struct vring_avail *avail, |
---|
| 1258 | + struct vring_used *used) |
---|
| 1259 | +{ |
---|
| 1260 | + return vringh_init_kern(vrh, features, num, weak_barriers, |
---|
| 1261 | + desc, avail, used); |
---|
| 1262 | +} |
---|
| 1263 | +EXPORT_SYMBOL(vringh_init_iotlb); |
---|
| 1264 | + |
---|
| 1265 | +/** |
---|
| 1266 | + * vringh_set_iotlb - initialize a vringh for a ring with IOTLB. |
---|
| 1267 | + * @vrh: the vring |
---|
| 1268 | + * @iotlb: iotlb associated with this vring |
---|
| 1269 | + */ |
---|
| 1270 | +void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb) |
---|
| 1271 | +{ |
---|
| 1272 | + vrh->iotlb = iotlb; |
---|
| 1273 | +} |
---|
| 1274 | +EXPORT_SYMBOL(vringh_set_iotlb); |
---|
| 1275 | + |
---|
| 1276 | +/** |
---|
| 1277 | + * vringh_getdesc_iotlb - get next available descriptor from ring with |
---|
| 1278 | + * IOTLB. |
---|
| 1279 | + * @vrh: the kernelspace vring. |
---|
| 1280 | + * @riov: where to put the readable descriptors (or NULL) |
---|
| 1281 | + * @wiov: where to put the writable descriptors (or NULL) |
---|
| 1282 | + * @head: head index we received, for passing to vringh_complete_iotlb(). |
---|
| 1283 | + * @gfp: flags for allocating larger riov/wiov. |
---|
| 1284 | + * |
---|
| 1285 | + * Returns 0 if there was no descriptor, 1 if there was, or -errno. |
---|
| 1286 | + * |
---|
| 1287 | + * Note that on error return, you can tell the difference between an |
---|
| 1288 | + * invalid ring and a single invalid descriptor: in the former case, |
---|
| 1289 | + * *head will be vrh->vring.num. You may be able to ignore an invalid |
---|
| 1290 | + * descriptor, but there's not much you can do with an invalid ring. |
---|
| 1291 | + * |
---|
| 1292 | + * Note that you may need to clean up riov and wiov, even on error! |
---|
| 1293 | + */ |
---|
| 1294 | +int vringh_getdesc_iotlb(struct vringh *vrh, |
---|
| 1295 | + struct vringh_kiov *riov, |
---|
| 1296 | + struct vringh_kiov *wiov, |
---|
| 1297 | + u16 *head, |
---|
| 1298 | + gfp_t gfp) |
---|
| 1299 | +{ |
---|
| 1300 | + int err; |
---|
| 1301 | + |
---|
| 1302 | + err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx); |
---|
| 1303 | + if (err < 0) |
---|
| 1304 | + return err; |
---|
| 1305 | + |
---|
| 1306 | + /* Empty... */ |
---|
| 1307 | + if (err == vrh->vring.num) |
---|
| 1308 | + return 0; |
---|
| 1309 | + |
---|
| 1310 | + *head = err; |
---|
| 1311 | + err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL, |
---|
| 1312 | + gfp, copydesc_iotlb); |
---|
| 1313 | + if (err) |
---|
| 1314 | + return err; |
---|
| 1315 | + |
---|
| 1316 | + return 1; |
---|
| 1317 | +} |
---|
| 1318 | +EXPORT_SYMBOL(vringh_getdesc_iotlb); |
---|
| 1319 | + |
---|
| 1320 | +/** |
---|
| 1321 | + * vringh_iov_pull_iotlb - copy bytes from vring_iov. |
---|
| 1322 | + * @vrh: the vring. |
---|
| 1323 | + * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume) |
---|
| 1324 | + * @dst: the place to copy. |
---|
| 1325 | + * @len: the maximum length to copy. |
---|
| 1326 | + * |
---|
| 1327 | + * Returns the bytes copied <= len or a negative errno. |
---|
| 1328 | + */ |
---|
| 1329 | +ssize_t vringh_iov_pull_iotlb(struct vringh *vrh, |
---|
| 1330 | + struct vringh_kiov *riov, |
---|
| 1331 | + void *dst, size_t len) |
---|
| 1332 | +{ |
---|
| 1333 | + return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb); |
---|
| 1334 | +} |
---|
| 1335 | +EXPORT_SYMBOL(vringh_iov_pull_iotlb); |
---|
| 1336 | + |
---|
| 1337 | +/** |
---|
| 1338 | + * vringh_iov_push_iotlb - copy bytes into vring_iov. |
---|
| 1339 | + * @vrh: the vring. |
---|
| 1340 | + * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume) |
---|
| 1341 | + * @src: the place to copy from. |
---|
| 1342 | + * @len: the maximum length to copy. |
---|
| 1343 | + * |
---|
| 1344 | + * Returns the bytes copied <= len or a negative errno. |
---|
| 1345 | + */ |
---|
| 1346 | +ssize_t vringh_iov_push_iotlb(struct vringh *vrh, |
---|
| 1347 | + struct vringh_kiov *wiov, |
---|
| 1348 | + const void *src, size_t len) |
---|
| 1349 | +{ |
---|
| 1350 | + return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb); |
---|
| 1351 | +} |
---|
| 1352 | +EXPORT_SYMBOL(vringh_iov_push_iotlb); |
---|
| 1353 | + |
---|
| 1354 | +/** |
---|
| 1355 | + * vringh_abandon_iotlb - we've decided not to handle the descriptor(s). |
---|
| 1356 | + * @vrh: the vring. |
---|
| 1357 | + * @num: the number of descriptors to put back (ie. num |
---|
| 1358 | + * vringh_get_iotlb() to undo). |
---|
| 1359 | + * |
---|
| 1360 | + * The next vringh_get_iotlb() will return the old descriptor(s) again. |
---|
| 1361 | + */ |
---|
| 1362 | +void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num) |
---|
| 1363 | +{ |
---|
| 1364 | + /* We only update vring_avail_event(vr) when we want to be notified, |
---|
| 1365 | + * so we haven't changed that yet. |
---|
| 1366 | + */ |
---|
| 1367 | + vrh->last_avail_idx -= num; |
---|
| 1368 | +} |
---|
| 1369 | +EXPORT_SYMBOL(vringh_abandon_iotlb); |
---|
| 1370 | + |
---|
| 1371 | +/** |
---|
| 1372 | + * vringh_complete_iotlb - we've finished with descriptor, publish it. |
---|
| 1373 | + * @vrh: the vring. |
---|
| 1374 | + * @head: the head as filled in by vringh_getdesc_iotlb. |
---|
| 1375 | + * @len: the length of data we have written. |
---|
| 1376 | + * |
---|
| 1377 | + * You should check vringh_need_notify_iotlb() after one or more calls |
---|
| 1378 | + * to this function. |
---|
| 1379 | + */ |
---|
| 1380 | +int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len) |
---|
| 1381 | +{ |
---|
| 1382 | + struct vring_used_elem used; |
---|
| 1383 | + |
---|
| 1384 | + used.id = cpu_to_vringh32(vrh, head); |
---|
| 1385 | + used.len = cpu_to_vringh32(vrh, len); |
---|
| 1386 | + |
---|
| 1387 | + return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb); |
---|
| 1388 | +} |
---|
| 1389 | +EXPORT_SYMBOL(vringh_complete_iotlb); |
---|
| 1390 | + |
---|
| 1391 | +/** |
---|
| 1392 | + * vringh_notify_enable_iotlb - we want to know if something changes. |
---|
| 1393 | + * @vrh: the vring. |
---|
| 1394 | + * |
---|
| 1395 | + * This always enables notifications, but returns false if there are |
---|
| 1396 | + * now more buffers available in the vring. |
---|
| 1397 | + */ |
---|
| 1398 | +bool vringh_notify_enable_iotlb(struct vringh *vrh) |
---|
| 1399 | +{ |
---|
| 1400 | + return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb); |
---|
| 1401 | +} |
---|
| 1402 | +EXPORT_SYMBOL(vringh_notify_enable_iotlb); |
---|
| 1403 | + |
---|
| 1404 | +/** |
---|
| 1405 | + * vringh_notify_disable_iotlb - don't tell us if something changes. |
---|
| 1406 | + * @vrh: the vring. |
---|
| 1407 | + * |
---|
| 1408 | + * This is our normal running state: we disable and then only enable when |
---|
| 1409 | + * we're going to sleep. |
---|
| 1410 | + */ |
---|
| 1411 | +void vringh_notify_disable_iotlb(struct vringh *vrh) |
---|
| 1412 | +{ |
---|
| 1413 | + __vringh_notify_disable(vrh, putu16_iotlb); |
---|
| 1414 | +} |
---|
| 1415 | +EXPORT_SYMBOL(vringh_notify_disable_iotlb); |
---|
| 1416 | + |
---|
| 1417 | +/** |
---|
| 1418 | + * vringh_need_notify_iotlb - must we tell the other side about used buffers? |
---|
| 1419 | + * @vrh: the vring we've called vringh_complete_iotlb() on. |
---|
| 1420 | + * |
---|
| 1421 | + * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. |
---|
| 1422 | + */ |
---|
| 1423 | +int vringh_need_notify_iotlb(struct vringh *vrh) |
---|
| 1424 | +{ |
---|
| 1425 | + return __vringh_need_notify(vrh, getu16_iotlb); |
---|
| 1426 | +} |
---|
| 1427 | +EXPORT_SYMBOL(vringh_need_notify_iotlb); |
---|
| 1428 | + |
---|
| 1429 | +#endif |
---|
| 1430 | + |
---|
1039 | 1431 | MODULE_LICENSE("GPL"); |
---|