hc
2024-05-14 bedbef8ad3e75a304af6361af235302bcc61d06b
kernel/drivers/vhost/vringh.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Helpers for the host side of a virtio ring.
34 *
....@@ -12,6 +13,11 @@
1213 #include <linux/uaccess.h>
1314 #include <linux/slab.h>
1415 #include <linux/export.h>
16
+#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
17
+#include <linux/bvec.h>
18
+#include <linux/highmem.h>
19
+#include <linux/vhost_iotlb.h>
20
+#endif
1521 #include <uapi/linux/virtio_config.h>
1622
1723 static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
....@@ -70,9 +76,11 @@
7076 }
7177
7278 /* Copy some bytes to/from the iovec. Returns num copied. */
73
-static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov,
79
+static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
80
+ struct vringh_kiov *iov,
7481 void *ptr, size_t len,
75
- int (*xfer)(void *addr, void *ptr,
82
+ int (*xfer)(const struct vringh *vrh,
83
+ void *addr, void *ptr,
7684 size_t len))
7785 {
7886 int err, done = 0;
....@@ -81,7 +89,7 @@
8189 size_t partlen;
8290
8391 partlen = min(iov->iov[iov->i].iov_len, len);
84
- err = xfer(iov->iov[iov->i].iov_base, ptr, partlen);
92
+ err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen);
8593 if (err)
8694 return err;
8795 done += partlen;
....@@ -95,6 +103,7 @@
95103 /* Fix up old iov element then increment. */
96104 iov->iov[iov->i].iov_len = iov->consumed;
97105 iov->iov[iov->i].iov_base -= iov->consumed;
106
+
98107
99108 iov->consumed = 0;
100109 iov->i++;
....@@ -226,7 +235,8 @@
226235 u64 addr,
227236 struct vringh_range *r),
228237 struct vringh_range *range,
229
- int (*copy)(void *dst, const void *src, size_t len))
238
+ int (*copy)(const struct vringh *vrh,
239
+ void *dst, const void *src, size_t len))
230240 {
231241 size_t part, len = sizeof(struct vring_desc);
232242
....@@ -240,7 +250,7 @@
240250 if (!rcheck(vrh, addr, &part, range, getrange))
241251 return -EINVAL;
242252
243
- err = copy(dst, src, part);
253
+ err = copy(vrh, dst, src, part);
244254 if (err)
245255 return err;
246256
....@@ -261,9 +271,10 @@
261271 struct vringh_range *)),
262272 bool (*getrange)(struct vringh *, u64, struct vringh_range *),
263273 gfp_t gfp,
264
- int (*copy)(void *dst, const void *src, size_t len))
274
+ int (*copy)(const struct vringh *vrh,
275
+ void *dst, const void *src, size_t len))
265276 {
266
- int err, count = 0, up_next, desc_max;
277
+ int err, count = 0, indirect_count = 0, up_next, desc_max;
267278 struct vring_desc desc, *descs;
268279 struct vringh_range range = { -1ULL, 0 }, slowrange;
269280 bool slow = false;
....@@ -291,7 +302,7 @@
291302 err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange,
292303 &slowrange, copy);
293304 else
294
- err = copy(&desc, &descs[i], sizeof(desc));
305
+ err = copy(vrh, &desc, &descs[i], sizeof(desc));
295306 if (unlikely(err))
296307 goto fail;
297308
....@@ -320,7 +331,12 @@
320331 continue;
321332 }
322333
323
- if (count++ == vrh->vring.num) {
334
+ if (up_next == -1)
335
+ count++;
336
+ else
337
+ indirect_count++;
338
+
339
+ if (count > vrh->vring.num || indirect_count > desc_max) {
324340 vringh_bad("Descriptor loop in %p", descs);
325341 err = -ELOOP;
326342 goto fail;
....@@ -382,6 +398,7 @@
382398 i = return_from_indirect(vrh, &up_next,
383399 &descs, &desc_max);
384400 slow = false;
401
+ indirect_count = 0;
385402 } else
386403 break;
387404 }
....@@ -404,7 +421,8 @@
404421 unsigned int num_used,
405422 int (*putu16)(const struct vringh *vrh,
406423 __virtio16 *p, u16 val),
407
- int (*putused)(struct vring_used_elem *dst,
424
+ int (*putused)(const struct vringh *vrh,
425
+ struct vring_used_elem *dst,
408426 const struct vring_used_elem
409427 *src, unsigned num))
410428 {
....@@ -420,12 +438,12 @@
420438 /* Compiler knows num_used == 1 sometimes, hence extra check */
421439 if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) {
422440 u16 part = vrh->vring.num - off;
423
- err = putused(&used_ring->ring[off], used, part);
441
+ err = putused(vrh, &used_ring->ring[off], used, part);
424442 if (!err)
425
- err = putused(&used_ring->ring[0], used + part,
443
+ err = putused(vrh, &used_ring->ring[0], used + part,
426444 num_used - part);
427445 } else
428
- err = putused(&used_ring->ring[off], used, num_used);
446
+ err = putused(vrh, &used_ring->ring[off], used, num_used);
429447
430448 if (err) {
431449 vringh_bad("Failed to write %u used entries %u at %p",
....@@ -564,13 +582,15 @@
564582 return put_user(v, (__force __virtio16 __user *)p);
565583 }
566584
567
-static inline int copydesc_user(void *dst, const void *src, size_t len)
585
+static inline int copydesc_user(const struct vringh *vrh,
586
+ void *dst, const void *src, size_t len)
568587 {
569588 return copy_from_user(dst, (__force void __user *)src, len) ?
570589 -EFAULT : 0;
571590 }
572591
573
-static inline int putused_user(struct vring_used_elem *dst,
592
+static inline int putused_user(const struct vringh *vrh,
593
+ struct vring_used_elem *dst,
574594 const struct vring_used_elem *src,
575595 unsigned int num)
576596 {
....@@ -578,13 +598,15 @@
578598 sizeof(*dst) * num) ? -EFAULT : 0;
579599 }
580600
581
-static inline int xfer_from_user(void *src, void *dst, size_t len)
601
+static inline int xfer_from_user(const struct vringh *vrh, void *src,
602
+ void *dst, size_t len)
582603 {
583604 return copy_from_user(dst, (__force void __user *)src, len) ?
584605 -EFAULT : 0;
585606 }
586607
587
-static inline int xfer_to_user(void *dst, void *src, size_t len)
608
+static inline int xfer_to_user(const struct vringh *vrh,
609
+ void *dst, void *src, size_t len)
588610 {
589611 return copy_to_user((__force void __user *)dst, src, len) ?
590612 -EFAULT : 0;
....@@ -605,9 +627,9 @@
605627 */
606628 int vringh_init_user(struct vringh *vrh, u64 features,
607629 unsigned int num, bool weak_barriers,
608
- struct vring_desc __user *desc,
609
- struct vring_avail __user *avail,
610
- struct vring_used __user *used)
630
+ vring_desc_t __user *desc,
631
+ vring_avail_t __user *avail,
632
+ vring_used_t __user *used)
611633 {
612634 /* Sane power of 2 please! */
613635 if (!num || num > 0xffff || (num & (num - 1))) {
....@@ -706,7 +728,7 @@
706728 */
707729 ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len)
708730 {
709
- return vringh_iov_xfer((struct vringh_kiov *)riov,
731
+ return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov,
710732 dst, len, xfer_from_user);
711733 }
712734 EXPORT_SYMBOL(vringh_iov_pull_user);
....@@ -714,7 +736,7 @@
714736 /**
715737 * vringh_iov_push_user - copy bytes into vring_iov.
716738 * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume)
717
- * @dst: the place to copy.
739
+ * @src: the place to copy from.
718740 * @len: the maximum length to copy.
719741 *
720742 * Returns the bytes copied <= len or a negative errno.
....@@ -722,7 +744,7 @@
722744 ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
723745 const void *src, size_t len)
724746 {
725
- return vringh_iov_xfer((struct vringh_kiov *)wiov,
747
+ return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov,
726748 (void *)src, len, xfer_to_user);
727749 }
728750 EXPORT_SYMBOL(vringh_iov_push_user);
....@@ -832,13 +854,15 @@
832854 return 0;
833855 }
834856
835
-static inline int copydesc_kern(void *dst, const void *src, size_t len)
857
+static inline int copydesc_kern(const struct vringh *vrh,
858
+ void *dst, const void *src, size_t len)
836859 {
837860 memcpy(dst, src, len);
838861 return 0;
839862 }
840863
841
-static inline int putused_kern(struct vring_used_elem *dst,
864
+static inline int putused_kern(const struct vringh *vrh,
865
+ struct vring_used_elem *dst,
842866 const struct vring_used_elem *src,
843867 unsigned int num)
844868 {
....@@ -846,7 +870,15 @@
846870 return 0;
847871 }
848872
849
-static inline int xfer_kern(void *src, void *dst, size_t len)
873
+static inline int xfer_kern(const struct vringh *vrh, void *src,
874
+ void *dst, size_t len)
875
+{
876
+ memcpy(dst, src, len);
877
+ return 0;
878
+}
879
+
880
+static inline int kern_xfer(const struct vringh *vrh, void *dst,
881
+ void *src, size_t len)
850882 {
851883 memcpy(dst, src, len);
852884 return 0;
....@@ -943,14 +975,14 @@
943975 */
944976 ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len)
945977 {
946
- return vringh_iov_xfer(riov, dst, len, xfer_kern);
978
+ return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern);
947979 }
948980 EXPORT_SYMBOL(vringh_iov_pull_kern);
949981
950982 /**
951983 * vringh_iov_push_kern - copy bytes into vring_iov.
952984 * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume)
953
- * @dst: the place to copy.
985
+ * @src: the place to copy from.
954986 * @len: the maximum length to copy.
955987 *
956988 * Returns the bytes copied <= len or a negative errno.
....@@ -958,7 +990,7 @@
958990 ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
959991 const void *src, size_t len)
960992 {
961
- return vringh_iov_xfer(wiov, (void *)src, len, xfer_kern);
993
+ return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer);
962994 }
963995 EXPORT_SYMBOL(vringh_iov_push_kern);
964996
....@@ -1036,4 +1068,364 @@
10361068 }
10371069 EXPORT_SYMBOL(vringh_need_notify_kern);
10381070
1071
+#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
1072
+
1073
+static int iotlb_translate(const struct vringh *vrh,
1074
+ u64 addr, u64 len, struct bio_vec iov[],
1075
+ int iov_size, u32 perm)
1076
+{
1077
+ struct vhost_iotlb_map *map;
1078
+ struct vhost_iotlb *iotlb = vrh->iotlb;
1079
+ int ret = 0;
1080
+ u64 s = 0, last = addr + len - 1;
1081
+
1082
+ while (len > s) {
1083
+ u64 size, pa, pfn;
1084
+
1085
+ if (unlikely(ret >= iov_size)) {
1086
+ ret = -ENOBUFS;
1087
+ break;
1088
+ }
1089
+
1090
+ map = vhost_iotlb_itree_first(iotlb, addr, last);
1091
+ if (!map || map->start > addr) {
1092
+ ret = -EINVAL;
1093
+ break;
1094
+ } else if (!(map->perm & perm)) {
1095
+ ret = -EPERM;
1096
+ break;
1097
+ }
1098
+
1099
+ size = map->size - addr + map->start;
1100
+ pa = map->addr + addr - map->start;
1101
+ pfn = pa >> PAGE_SHIFT;
1102
+ iov[ret].bv_page = pfn_to_page(pfn);
1103
+ iov[ret].bv_len = min(len - s, size);
1104
+ iov[ret].bv_offset = pa & (PAGE_SIZE - 1);
1105
+ s += size;
1106
+ addr += size;
1107
+ ++ret;
1108
+ }
1109
+
1110
+ return ret;
1111
+}
1112
+
1113
+static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
1114
+ void *src, size_t len)
1115
+{
1116
+ struct iov_iter iter;
1117
+ struct bio_vec iov[16];
1118
+ int ret;
1119
+
1120
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
1121
+ len, iov, 16, VHOST_MAP_RO);
1122
+ if (ret < 0)
1123
+ return ret;
1124
+
1125
+ iov_iter_bvec(&iter, READ, iov, ret, len);
1126
+
1127
+ ret = copy_from_iter(dst, len, &iter);
1128
+
1129
+ return ret;
1130
+}
1131
+
1132
+static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
1133
+ void *src, size_t len)
1134
+{
1135
+ struct iov_iter iter;
1136
+ struct bio_vec iov[16];
1137
+ int ret;
1138
+
1139
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
1140
+ len, iov, 16, VHOST_MAP_WO);
1141
+ if (ret < 0)
1142
+ return ret;
1143
+
1144
+ iov_iter_bvec(&iter, WRITE, iov, ret, len);
1145
+
1146
+ return copy_to_iter(src, len, &iter);
1147
+}
1148
+
1149
+static inline int getu16_iotlb(const struct vringh *vrh,
1150
+ u16 *val, const __virtio16 *p)
1151
+{
1152
+ struct bio_vec iov;
1153
+ void *kaddr, *from;
1154
+ int ret;
1155
+
1156
+ /* Atomic read is needed for getu16 */
1157
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1158
+ &iov, 1, VHOST_MAP_RO);
1159
+ if (ret < 0)
1160
+ return ret;
1161
+
1162
+ kaddr = kmap_atomic(iov.bv_page);
1163
+ from = kaddr + iov.bv_offset;
1164
+ *val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from));
1165
+ kunmap_atomic(kaddr);
1166
+
1167
+ return 0;
1168
+}
1169
+
1170
+static inline int putu16_iotlb(const struct vringh *vrh,
1171
+ __virtio16 *p, u16 val)
1172
+{
1173
+ struct bio_vec iov;
1174
+ void *kaddr, *to;
1175
+ int ret;
1176
+
1177
+ /* Atomic write is needed for putu16 */
1178
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1179
+ &iov, 1, VHOST_MAP_WO);
1180
+ if (ret < 0)
1181
+ return ret;
1182
+
1183
+ kaddr = kmap_atomic(iov.bv_page);
1184
+ to = kaddr + iov.bv_offset;
1185
+ WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val));
1186
+ kunmap_atomic(kaddr);
1187
+
1188
+ return 0;
1189
+}
1190
+
1191
+static inline int copydesc_iotlb(const struct vringh *vrh,
1192
+ void *dst, const void *src, size_t len)
1193
+{
1194
+ int ret;
1195
+
1196
+ ret = copy_from_iotlb(vrh, dst, (void *)src, len);
1197
+ if (ret != len)
1198
+ return -EFAULT;
1199
+
1200
+ return 0;
1201
+}
1202
+
1203
+static inline int xfer_from_iotlb(const struct vringh *vrh, void *src,
1204
+ void *dst, size_t len)
1205
+{
1206
+ int ret;
1207
+
1208
+ ret = copy_from_iotlb(vrh, dst, src, len);
1209
+ if (ret != len)
1210
+ return -EFAULT;
1211
+
1212
+ return 0;
1213
+}
1214
+
1215
+static inline int xfer_to_iotlb(const struct vringh *vrh,
1216
+ void *dst, void *src, size_t len)
1217
+{
1218
+ int ret;
1219
+
1220
+ ret = copy_to_iotlb(vrh, dst, src, len);
1221
+ if (ret != len)
1222
+ return -EFAULT;
1223
+
1224
+ return 0;
1225
+}
1226
+
1227
+static inline int putused_iotlb(const struct vringh *vrh,
1228
+ struct vring_used_elem *dst,
1229
+ const struct vring_used_elem *src,
1230
+ unsigned int num)
1231
+{
1232
+ int size = num * sizeof(*dst);
1233
+ int ret;
1234
+
1235
+ ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst));
1236
+ if (ret != size)
1237
+ return -EFAULT;
1238
+
1239
+ return 0;
1240
+}
1241
+
1242
+/**
1243
+ * vringh_init_iotlb - initialize a vringh for a ring with IOTLB.
1244
+ * @vrh: the vringh to initialize.
1245
+ * @features: the feature bits for this ring.
1246
+ * @num: the number of elements.
1247
+ * @weak_barriers: true if we only need memory barriers, not I/O.
1248
+ * @desc: the userpace descriptor pointer.
1249
+ * @avail: the userpace avail pointer.
1250
+ * @used: the userpace used pointer.
1251
+ *
1252
+ * Returns an error if num is invalid.
1253
+ */
1254
+int vringh_init_iotlb(struct vringh *vrh, u64 features,
1255
+ unsigned int num, bool weak_barriers,
1256
+ struct vring_desc *desc,
1257
+ struct vring_avail *avail,
1258
+ struct vring_used *used)
1259
+{
1260
+ return vringh_init_kern(vrh, features, num, weak_barriers,
1261
+ desc, avail, used);
1262
+}
1263
+EXPORT_SYMBOL(vringh_init_iotlb);
1264
+
1265
+/**
1266
+ * vringh_set_iotlb - initialize a vringh for a ring with IOTLB.
1267
+ * @vrh: the vring
1268
+ * @iotlb: iotlb associated with this vring
1269
+ */
1270
+void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb)
1271
+{
1272
+ vrh->iotlb = iotlb;
1273
+}
1274
+EXPORT_SYMBOL(vringh_set_iotlb);
1275
+
1276
+/**
1277
+ * vringh_getdesc_iotlb - get next available descriptor from ring with
1278
+ * IOTLB.
1279
+ * @vrh: the kernelspace vring.
1280
+ * @riov: where to put the readable descriptors (or NULL)
1281
+ * @wiov: where to put the writable descriptors (or NULL)
1282
+ * @head: head index we received, for passing to vringh_complete_iotlb().
1283
+ * @gfp: flags for allocating larger riov/wiov.
1284
+ *
1285
+ * Returns 0 if there was no descriptor, 1 if there was, or -errno.
1286
+ *
1287
+ * Note that on error return, you can tell the difference between an
1288
+ * invalid ring and a single invalid descriptor: in the former case,
1289
+ * *head will be vrh->vring.num. You may be able to ignore an invalid
1290
+ * descriptor, but there's not much you can do with an invalid ring.
1291
+ *
1292
+ * Note that you may need to clean up riov and wiov, even on error!
1293
+ */
1294
+int vringh_getdesc_iotlb(struct vringh *vrh,
1295
+ struct vringh_kiov *riov,
1296
+ struct vringh_kiov *wiov,
1297
+ u16 *head,
1298
+ gfp_t gfp)
1299
+{
1300
+ int err;
1301
+
1302
+ err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx);
1303
+ if (err < 0)
1304
+ return err;
1305
+
1306
+ /* Empty... */
1307
+ if (err == vrh->vring.num)
1308
+ return 0;
1309
+
1310
+ *head = err;
1311
+ err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
1312
+ gfp, copydesc_iotlb);
1313
+ if (err)
1314
+ return err;
1315
+
1316
+ return 1;
1317
+}
1318
+EXPORT_SYMBOL(vringh_getdesc_iotlb);
1319
+
1320
+/**
1321
+ * vringh_iov_pull_iotlb - copy bytes from vring_iov.
1322
+ * @vrh: the vring.
1323
+ * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume)
1324
+ * @dst: the place to copy.
1325
+ * @len: the maximum length to copy.
1326
+ *
1327
+ * Returns the bytes copied <= len or a negative errno.
1328
+ */
1329
+ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
1330
+ struct vringh_kiov *riov,
1331
+ void *dst, size_t len)
1332
+{
1333
+ return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb);
1334
+}
1335
+EXPORT_SYMBOL(vringh_iov_pull_iotlb);
1336
+
1337
+/**
1338
+ * vringh_iov_push_iotlb - copy bytes into vring_iov.
1339
+ * @vrh: the vring.
1340
+ * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume)
1341
+ * @src: the place to copy from.
1342
+ * @len: the maximum length to copy.
1343
+ *
1344
+ * Returns the bytes copied <= len or a negative errno.
1345
+ */
1346
+ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
1347
+ struct vringh_kiov *wiov,
1348
+ const void *src, size_t len)
1349
+{
1350
+ return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb);
1351
+}
1352
+EXPORT_SYMBOL(vringh_iov_push_iotlb);
1353
+
1354
+/**
1355
+ * vringh_abandon_iotlb - we've decided not to handle the descriptor(s).
1356
+ * @vrh: the vring.
1357
+ * @num: the number of descriptors to put back (ie. num
1358
+ * vringh_get_iotlb() to undo).
1359
+ *
1360
+ * The next vringh_get_iotlb() will return the old descriptor(s) again.
1361
+ */
1362
+void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num)
1363
+{
1364
+ /* We only update vring_avail_event(vr) when we want to be notified,
1365
+ * so we haven't changed that yet.
1366
+ */
1367
+ vrh->last_avail_idx -= num;
1368
+}
1369
+EXPORT_SYMBOL(vringh_abandon_iotlb);
1370
+
1371
+/**
1372
+ * vringh_complete_iotlb - we've finished with descriptor, publish it.
1373
+ * @vrh: the vring.
1374
+ * @head: the head as filled in by vringh_getdesc_iotlb.
1375
+ * @len: the length of data we have written.
1376
+ *
1377
+ * You should check vringh_need_notify_iotlb() after one or more calls
1378
+ * to this function.
1379
+ */
1380
+int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len)
1381
+{
1382
+ struct vring_used_elem used;
1383
+
1384
+ used.id = cpu_to_vringh32(vrh, head);
1385
+ used.len = cpu_to_vringh32(vrh, len);
1386
+
1387
+ return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb);
1388
+}
1389
+EXPORT_SYMBOL(vringh_complete_iotlb);
1390
+
1391
+/**
1392
+ * vringh_notify_enable_iotlb - we want to know if something changes.
1393
+ * @vrh: the vring.
1394
+ *
1395
+ * This always enables notifications, but returns false if there are
1396
+ * now more buffers available in the vring.
1397
+ */
1398
+bool vringh_notify_enable_iotlb(struct vringh *vrh)
1399
+{
1400
+ return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb);
1401
+}
1402
+EXPORT_SYMBOL(vringh_notify_enable_iotlb);
1403
+
1404
+/**
1405
+ * vringh_notify_disable_iotlb - don't tell us if something changes.
1406
+ * @vrh: the vring.
1407
+ *
1408
+ * This is our normal running state: we disable and then only enable when
1409
+ * we're going to sleep.
1410
+ */
1411
+void vringh_notify_disable_iotlb(struct vringh *vrh)
1412
+{
1413
+ __vringh_notify_disable(vrh, putu16_iotlb);
1414
+}
1415
+EXPORT_SYMBOL(vringh_notify_disable_iotlb);
1416
+
1417
+/**
1418
+ * vringh_need_notify_iotlb - must we tell the other side about used buffers?
1419
+ * @vrh: the vring we've called vringh_complete_iotlb() on.
1420
+ *
1421
+ * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
1422
+ */
1423
+int vringh_need_notify_iotlb(struct vringh *vrh)
1424
+{
1425
+ return __vringh_need_notify(vrh, getu16_iotlb);
1426
+}
1427
+EXPORT_SYMBOL(vringh_need_notify_iotlb);
1428
+
1429
+#endif
1430
+
10391431 MODULE_LICENSE("GPL");