forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/drivers/vhost/vringh.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Helpers for the host side of a virtio ring.
34 *
....@@ -12,6 +13,11 @@
1213 #include <linux/uaccess.h>
1314 #include <linux/slab.h>
1415 #include <linux/export.h>
16
+#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
17
+#include <linux/bvec.h>
18
+#include <linux/highmem.h>
19
+#include <linux/vhost_iotlb.h>
20
+#endif
1521 #include <uapi/linux/virtio_config.h>
1622
1723 static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
....@@ -70,9 +76,11 @@
7076 }
7177
7278 /* Copy some bytes to/from the iovec. Returns num copied. */
73
-static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov,
79
+static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
80
+ struct vringh_kiov *iov,
7481 void *ptr, size_t len,
75
- int (*xfer)(void *addr, void *ptr,
82
+ int (*xfer)(const struct vringh *vrh,
83
+ void *addr, void *ptr,
7684 size_t len))
7785 {
7886 int err, done = 0;
....@@ -81,7 +89,7 @@
8189 size_t partlen;
8290
8391 partlen = min(iov->iov[iov->i].iov_len, len);
84
- err = xfer(iov->iov[iov->i].iov_base, ptr, partlen);
92
+ err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen);
8593 if (err)
8694 return err;
8795 done += partlen;
....@@ -95,6 +103,7 @@
95103 /* Fix up old iov element then increment. */
96104 iov->iov[iov->i].iov_len = iov->consumed;
97105 iov->iov[iov->i].iov_base -= iov->consumed;
106
+
98107
99108 iov->consumed = 0;
100109 iov->i++;
....@@ -226,7 +235,8 @@
226235 u64 addr,
227236 struct vringh_range *r),
228237 struct vringh_range *range,
229
- int (*copy)(void *dst, const void *src, size_t len))
238
+ int (*copy)(const struct vringh *vrh,
239
+ void *dst, const void *src, size_t len))
230240 {
231241 size_t part, len = sizeof(struct vring_desc);
232242
....@@ -240,7 +250,7 @@
240250 if (!rcheck(vrh, addr, &part, range, getrange))
241251 return -EINVAL;
242252
243
- err = copy(dst, src, part);
253
+ err = copy(vrh, dst, src, part);
244254 if (err)
245255 return err;
246256
....@@ -261,9 +271,10 @@
261271 struct vringh_range *)),
262272 bool (*getrange)(struct vringh *, u64, struct vringh_range *),
263273 gfp_t gfp,
264
- int (*copy)(void *dst, const void *src, size_t len))
274
+ int (*copy)(const struct vringh *vrh,
275
+ void *dst, const void *src, size_t len))
265276 {
266
- int err, count = 0, up_next, desc_max;
277
+ int err, count = 0, indirect_count = 0, up_next, desc_max;
267278 struct vring_desc desc, *descs;
268279 struct vringh_range range = { -1ULL, 0 }, slowrange;
269280 bool slow = false;
....@@ -291,7 +302,7 @@
291302 err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange,
292303 &slowrange, copy);
293304 else
294
- err = copy(&desc, &descs[i], sizeof(desc));
305
+ err = copy(vrh, &desc, &descs[i], sizeof(desc));
295306 if (unlikely(err))
296307 goto fail;
297308
....@@ -320,7 +331,12 @@
320331 continue;
321332 }
322333
323
- if (count++ == vrh->vring.num) {
334
+ if (up_next == -1)
335
+ count++;
336
+ else
337
+ indirect_count++;
338
+
339
+ if (count > vrh->vring.num || indirect_count > desc_max) {
324340 vringh_bad("Descriptor loop in %p", descs);
325341 err = -ELOOP;
326342 goto fail;
....@@ -382,6 +398,7 @@
382398 i = return_from_indirect(vrh, &up_next,
383399 &descs, &desc_max);
384400 slow = false;
401
+ indirect_count = 0;
385402 } else
386403 break;
387404 }
....@@ -404,7 +421,8 @@
404421 unsigned int num_used,
405422 int (*putu16)(const struct vringh *vrh,
406423 __virtio16 *p, u16 val),
407
- int (*putused)(struct vring_used_elem *dst,
424
+ int (*putused)(const struct vringh *vrh,
425
+ struct vring_used_elem *dst,
408426 const struct vring_used_elem
409427 *src, unsigned num))
410428 {
....@@ -420,12 +438,12 @@
420438 /* Compiler knows num_used == 1 sometimes, hence extra check */
421439 if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) {
422440 u16 part = vrh->vring.num - off;
423
- err = putused(&used_ring->ring[off], used, part);
441
+ err = putused(vrh, &used_ring->ring[off], used, part);
424442 if (!err)
425
- err = putused(&used_ring->ring[0], used + part,
443
+ err = putused(vrh, &used_ring->ring[0], used + part,
426444 num_used - part);
427445 } else
428
- err = putused(&used_ring->ring[off], used, num_used);
446
+ err = putused(vrh, &used_ring->ring[off], used, num_used);
429447
430448 if (err) {
431449 vringh_bad("Failed to write %u used entries %u at %p",
....@@ -564,13 +582,15 @@
564582 return put_user(v, (__force __virtio16 __user *)p);
565583 }
566584
567
-static inline int copydesc_user(void *dst, const void *src, size_t len)
585
+static inline int copydesc_user(const struct vringh *vrh,
586
+ void *dst, const void *src, size_t len)
568587 {
569588 return copy_from_user(dst, (__force void __user *)src, len) ?
570589 -EFAULT : 0;
571590 }
572591
573
-static inline int putused_user(struct vring_used_elem *dst,
592
+static inline int putused_user(const struct vringh *vrh,
593
+ struct vring_used_elem *dst,
574594 const struct vring_used_elem *src,
575595 unsigned int num)
576596 {
....@@ -578,13 +598,15 @@
578598 sizeof(*dst) * num) ? -EFAULT : 0;
579599 }
580600
581
-static inline int xfer_from_user(void *src, void *dst, size_t len)
601
+static inline int xfer_from_user(const struct vringh *vrh, void *src,
602
+ void *dst, size_t len)
582603 {
583604 return copy_from_user(dst, (__force void __user *)src, len) ?
584605 -EFAULT : 0;
585606 }
586607
587
-static inline int xfer_to_user(void *dst, void *src, size_t len)
608
+static inline int xfer_to_user(const struct vringh *vrh,
609
+ void *dst, void *src, size_t len)
588610 {
589611 return copy_to_user((__force void __user *)dst, src, len) ?
590612 -EFAULT : 0;
....@@ -605,9 +627,9 @@
605627 */
606628 int vringh_init_user(struct vringh *vrh, u64 features,
607629 unsigned int num, bool weak_barriers,
608
- struct vring_desc __user *desc,
609
- struct vring_avail __user *avail,
610
- struct vring_used __user *used)
630
+ vring_desc_t __user *desc,
631
+ vring_avail_t __user *avail,
632
+ vring_used_t __user *used)
611633 {
612634 /* Sane power of 2 please! */
613635 if (!num || num > 0xffff || (num & (num - 1))) {
....@@ -706,7 +728,7 @@
706728 */
707729 ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len)
708730 {
709
- return vringh_iov_xfer((struct vringh_kiov *)riov,
731
+ return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov,
710732 dst, len, xfer_from_user);
711733 }
712734 EXPORT_SYMBOL(vringh_iov_pull_user);
....@@ -714,7 +736,7 @@
714736 /**
715737 * vringh_iov_push_user - copy bytes into vring_iov.
716738 * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume)
717
- * @dst: the place to copy.
739
+ * @src: the place to copy from.
718740 * @len: the maximum length to copy.
719741 *
720742 * Returns the bytes copied <= len or a negative errno.
....@@ -722,7 +744,7 @@
722744 ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
723745 const void *src, size_t len)
724746 {
725
- return vringh_iov_xfer((struct vringh_kiov *)wiov,
747
+ return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov,
726748 (void *)src, len, xfer_to_user);
727749 }
728750 EXPORT_SYMBOL(vringh_iov_push_user);
....@@ -832,13 +854,15 @@
832854 return 0;
833855 }
834856
835
-static inline int copydesc_kern(void *dst, const void *src, size_t len)
857
+static inline int copydesc_kern(const struct vringh *vrh,
858
+ void *dst, const void *src, size_t len)
836859 {
837860 memcpy(dst, src, len);
838861 return 0;
839862 }
840863
841
-static inline int putused_kern(struct vring_used_elem *dst,
864
+static inline int putused_kern(const struct vringh *vrh,
865
+ struct vring_used_elem *dst,
842866 const struct vring_used_elem *src,
843867 unsigned int num)
844868 {
....@@ -846,7 +870,15 @@
846870 return 0;
847871 }
848872
849
-static inline int xfer_kern(void *src, void *dst, size_t len)
873
+static inline int xfer_kern(const struct vringh *vrh, void *src,
874
+ void *dst, size_t len)
875
+{
876
+ memcpy(dst, src, len);
877
+ return 0;
878
+}
879
+
880
+static inline int kern_xfer(const struct vringh *vrh, void *dst,
881
+ void *src, size_t len)
850882 {
851883 memcpy(dst, src, len);
852884 return 0;
....@@ -943,14 +975,14 @@
943975 */
944976 ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len)
945977 {
946
- return vringh_iov_xfer(riov, dst, len, xfer_kern);
978
+ return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern);
947979 }
948980 EXPORT_SYMBOL(vringh_iov_pull_kern);
949981
950982 /**
951983 * vringh_iov_push_kern - copy bytes into vring_iov.
952984 * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume)
953
- * @dst: the place to copy.
985
+ * @src: the place to copy from.
954986 * @len: the maximum length to copy.
955987 *
956988 * Returns the bytes copied <= len or a negative errno.
....@@ -958,7 +990,7 @@
958990 ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
959991 const void *src, size_t len)
960992 {
961
- return vringh_iov_xfer(wiov, (void *)src, len, xfer_kern);
993
+ return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer);
962994 }
963995 EXPORT_SYMBOL(vringh_iov_push_kern);
964996
....@@ -1036,4 +1068,365 @@
10361068 }
10371069 EXPORT_SYMBOL(vringh_need_notify_kern);
10381070
1071
+#if IS_REACHABLE(CONFIG_VHOST_IOTLB)
1072
+
1073
+static int iotlb_translate(const struct vringh *vrh,
1074
+ u64 addr, u64 len, struct bio_vec iov[],
1075
+ int iov_size, u32 perm)
1076
+{
1077
+ struct vhost_iotlb_map *map;
1078
+ struct vhost_iotlb *iotlb = vrh->iotlb;
1079
+ int ret = 0;
1080
+ u64 s = 0;
1081
+
1082
+ while (len > s) {
1083
+ u64 size, pa, pfn;
1084
+
1085
+ if (unlikely(ret >= iov_size)) {
1086
+ ret = -ENOBUFS;
1087
+ break;
1088
+ }
1089
+
1090
+ map = vhost_iotlb_itree_first(iotlb, addr,
1091
+ addr + len - 1);
1092
+ if (!map || map->start > addr) {
1093
+ ret = -EINVAL;
1094
+ break;
1095
+ } else if (!(map->perm & perm)) {
1096
+ ret = -EPERM;
1097
+ break;
1098
+ }
1099
+
1100
+ size = map->size - addr + map->start;
1101
+ pa = map->addr + addr - map->start;
1102
+ pfn = pa >> PAGE_SHIFT;
1103
+ iov[ret].bv_page = pfn_to_page(pfn);
1104
+ iov[ret].bv_len = min(len - s, size);
1105
+ iov[ret].bv_offset = pa & (PAGE_SIZE - 1);
1106
+ s += size;
1107
+ addr += size;
1108
+ ++ret;
1109
+ }
1110
+
1111
+ return ret;
1112
+}
1113
+
1114
+static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
1115
+ void *src, size_t len)
1116
+{
1117
+ struct iov_iter iter;
1118
+ struct bio_vec iov[16];
1119
+ int ret;
1120
+
1121
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
1122
+ len, iov, 16, VHOST_MAP_RO);
1123
+ if (ret < 0)
1124
+ return ret;
1125
+
1126
+ iov_iter_bvec(&iter, READ, iov, ret, len);
1127
+
1128
+ ret = copy_from_iter(dst, len, &iter);
1129
+
1130
+ return ret;
1131
+}
1132
+
1133
+static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
1134
+ void *src, size_t len)
1135
+{
1136
+ struct iov_iter iter;
1137
+ struct bio_vec iov[16];
1138
+ int ret;
1139
+
1140
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
1141
+ len, iov, 16, VHOST_MAP_WO);
1142
+ if (ret < 0)
1143
+ return ret;
1144
+
1145
+ iov_iter_bvec(&iter, WRITE, iov, ret, len);
1146
+
1147
+ return copy_to_iter(src, len, &iter);
1148
+}
1149
+
1150
+static inline int getu16_iotlb(const struct vringh *vrh,
1151
+ u16 *val, const __virtio16 *p)
1152
+{
1153
+ struct bio_vec iov;
1154
+ void *kaddr, *from;
1155
+ int ret;
1156
+
1157
+ /* Atomic read is needed for getu16 */
1158
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1159
+ &iov, 1, VHOST_MAP_RO);
1160
+ if (ret < 0)
1161
+ return ret;
1162
+
1163
+ kaddr = kmap_atomic(iov.bv_page);
1164
+ from = kaddr + iov.bv_offset;
1165
+ *val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from));
1166
+ kunmap_atomic(kaddr);
1167
+
1168
+ return 0;
1169
+}
1170
+
1171
+static inline int putu16_iotlb(const struct vringh *vrh,
1172
+ __virtio16 *p, u16 val)
1173
+{
1174
+ struct bio_vec iov;
1175
+ void *kaddr, *to;
1176
+ int ret;
1177
+
1178
+ /* Atomic write is needed for putu16 */
1179
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1180
+ &iov, 1, VHOST_MAP_WO);
1181
+ if (ret < 0)
1182
+ return ret;
1183
+
1184
+ kaddr = kmap_atomic(iov.bv_page);
1185
+ to = kaddr + iov.bv_offset;
1186
+ WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val));
1187
+ kunmap_atomic(kaddr);
1188
+
1189
+ return 0;
1190
+}
1191
+
1192
+static inline int copydesc_iotlb(const struct vringh *vrh,
1193
+ void *dst, const void *src, size_t len)
1194
+{
1195
+ int ret;
1196
+
1197
+ ret = copy_from_iotlb(vrh, dst, (void *)src, len);
1198
+ if (ret != len)
1199
+ return -EFAULT;
1200
+
1201
+ return 0;
1202
+}
1203
+
1204
+static inline int xfer_from_iotlb(const struct vringh *vrh, void *src,
1205
+ void *dst, size_t len)
1206
+{
1207
+ int ret;
1208
+
1209
+ ret = copy_from_iotlb(vrh, dst, src, len);
1210
+ if (ret != len)
1211
+ return -EFAULT;
1212
+
1213
+ return 0;
1214
+}
1215
+
1216
+static inline int xfer_to_iotlb(const struct vringh *vrh,
1217
+ void *dst, void *src, size_t len)
1218
+{
1219
+ int ret;
1220
+
1221
+ ret = copy_to_iotlb(vrh, dst, src, len);
1222
+ if (ret != len)
1223
+ return -EFAULT;
1224
+
1225
+ return 0;
1226
+}
1227
+
1228
+static inline int putused_iotlb(const struct vringh *vrh,
1229
+ struct vring_used_elem *dst,
1230
+ const struct vring_used_elem *src,
1231
+ unsigned int num)
1232
+{
1233
+ int size = num * sizeof(*dst);
1234
+ int ret;
1235
+
1236
+ ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst));
1237
+ if (ret != size)
1238
+ return -EFAULT;
1239
+
1240
+ return 0;
1241
+}
1242
+
1243
+/**
1244
+ * vringh_init_iotlb - initialize a vringh for a ring with IOTLB.
1245
+ * @vrh: the vringh to initialize.
1246
+ * @features: the feature bits for this ring.
1247
+ * @num: the number of elements.
1248
+ * @weak_barriers: true if we only need memory barriers, not I/O.
1249
+ * @desc: the userpace descriptor pointer.
1250
+ * @avail: the userpace avail pointer.
1251
+ * @used: the userpace used pointer.
1252
+ *
1253
+ * Returns an error if num is invalid.
1254
+ */
1255
+int vringh_init_iotlb(struct vringh *vrh, u64 features,
1256
+ unsigned int num, bool weak_barriers,
1257
+ struct vring_desc *desc,
1258
+ struct vring_avail *avail,
1259
+ struct vring_used *used)
1260
+{
1261
+ return vringh_init_kern(vrh, features, num, weak_barriers,
1262
+ desc, avail, used);
1263
+}
1264
+EXPORT_SYMBOL(vringh_init_iotlb);
1265
+
1266
+/**
1267
+ * vringh_set_iotlb - initialize a vringh for a ring with IOTLB.
1268
+ * @vrh: the vring
1269
+ * @iotlb: iotlb associated with this vring
1270
+ */
1271
+void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb)
1272
+{
1273
+ vrh->iotlb = iotlb;
1274
+}
1275
+EXPORT_SYMBOL(vringh_set_iotlb);
1276
+
1277
+/**
1278
+ * vringh_getdesc_iotlb - get next available descriptor from ring with
1279
+ * IOTLB.
1280
+ * @vrh: the kernelspace vring.
1281
+ * @riov: where to put the readable descriptors (or NULL)
1282
+ * @wiov: where to put the writable descriptors (or NULL)
1283
+ * @head: head index we received, for passing to vringh_complete_iotlb().
1284
+ * @gfp: flags for allocating larger riov/wiov.
1285
+ *
1286
+ * Returns 0 if there was no descriptor, 1 if there was, or -errno.
1287
+ *
1288
+ * Note that on error return, you can tell the difference between an
1289
+ * invalid ring and a single invalid descriptor: in the former case,
1290
+ * *head will be vrh->vring.num. You may be able to ignore an invalid
1291
+ * descriptor, but there's not much you can do with an invalid ring.
1292
+ *
1293
+ * Note that you may need to clean up riov and wiov, even on error!
1294
+ */
1295
+int vringh_getdesc_iotlb(struct vringh *vrh,
1296
+ struct vringh_kiov *riov,
1297
+ struct vringh_kiov *wiov,
1298
+ u16 *head,
1299
+ gfp_t gfp)
1300
+{
1301
+ int err;
1302
+
1303
+ err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx);
1304
+ if (err < 0)
1305
+ return err;
1306
+
1307
+ /* Empty... */
1308
+ if (err == vrh->vring.num)
1309
+ return 0;
1310
+
1311
+ *head = err;
1312
+ err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
1313
+ gfp, copydesc_iotlb);
1314
+ if (err)
1315
+ return err;
1316
+
1317
+ return 1;
1318
+}
1319
+EXPORT_SYMBOL(vringh_getdesc_iotlb);
1320
+
1321
+/**
1322
+ * vringh_iov_pull_iotlb - copy bytes from vring_iov.
1323
+ * @vrh: the vring.
1324
+ * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume)
1325
+ * @dst: the place to copy.
1326
+ * @len: the maximum length to copy.
1327
+ *
1328
+ * Returns the bytes copied <= len or a negative errno.
1329
+ */
1330
+ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
1331
+ struct vringh_kiov *riov,
1332
+ void *dst, size_t len)
1333
+{
1334
+ return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb);
1335
+}
1336
+EXPORT_SYMBOL(vringh_iov_pull_iotlb);
1337
+
1338
+/**
1339
+ * vringh_iov_push_iotlb - copy bytes into vring_iov.
1340
+ * @vrh: the vring.
1341
+ * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume)
1342
+ * @src: the place to copy from.
1343
+ * @len: the maximum length to copy.
1344
+ *
1345
+ * Returns the bytes copied <= len or a negative errno.
1346
+ */
1347
+ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
1348
+ struct vringh_kiov *wiov,
1349
+ const void *src, size_t len)
1350
+{
1351
+ return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb);
1352
+}
1353
+EXPORT_SYMBOL(vringh_iov_push_iotlb);
1354
+
1355
+/**
1356
+ * vringh_abandon_iotlb - we've decided not to handle the descriptor(s).
1357
+ * @vrh: the vring.
1358
+ * @num: the number of descriptors to put back (ie. num
1359
+ * vringh_get_iotlb() to undo).
1360
+ *
1361
+ * The next vringh_get_iotlb() will return the old descriptor(s) again.
1362
+ */
1363
+void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num)
1364
+{
1365
+ /* We only update vring_avail_event(vr) when we want to be notified,
1366
+ * so we haven't changed that yet.
1367
+ */
1368
+ vrh->last_avail_idx -= num;
1369
+}
1370
+EXPORT_SYMBOL(vringh_abandon_iotlb);
1371
+
1372
+/**
1373
+ * vringh_complete_iotlb - we've finished with descriptor, publish it.
1374
+ * @vrh: the vring.
1375
+ * @head: the head as filled in by vringh_getdesc_iotlb.
1376
+ * @len: the length of data we have written.
1377
+ *
1378
+ * You should check vringh_need_notify_iotlb() after one or more calls
1379
+ * to this function.
1380
+ */
1381
+int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len)
1382
+{
1383
+ struct vring_used_elem used;
1384
+
1385
+ used.id = cpu_to_vringh32(vrh, head);
1386
+ used.len = cpu_to_vringh32(vrh, len);
1387
+
1388
+ return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb);
1389
+}
1390
+EXPORT_SYMBOL(vringh_complete_iotlb);
1391
+
1392
+/**
1393
+ * vringh_notify_enable_iotlb - we want to know if something changes.
1394
+ * @vrh: the vring.
1395
+ *
1396
+ * This always enables notifications, but returns false if there are
1397
+ * now more buffers available in the vring.
1398
+ */
1399
+bool vringh_notify_enable_iotlb(struct vringh *vrh)
1400
+{
1401
+ return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb);
1402
+}
1403
+EXPORT_SYMBOL(vringh_notify_enable_iotlb);
1404
+
1405
+/**
1406
+ * vringh_notify_disable_iotlb - don't tell us if something changes.
1407
+ * @vrh: the vring.
1408
+ *
1409
+ * This is our normal running state: we disable and then only enable when
1410
+ * we're going to sleep.
1411
+ */
1412
+void vringh_notify_disable_iotlb(struct vringh *vrh)
1413
+{
1414
+ __vringh_notify_disable(vrh, putu16_iotlb);
1415
+}
1416
+EXPORT_SYMBOL(vringh_notify_disable_iotlb);
1417
+
1418
+/**
1419
+ * vringh_need_notify_iotlb - must we tell the other side about used buffers?
1420
+ * @vrh: the vring we've called vringh_complete_iotlb() on.
1421
+ *
1422
+ * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
1423
+ */
1424
+int vringh_need_notify_iotlb(struct vringh *vrh)
1425
+{
1426
+ return __vringh_need_notify(vrh, getu16_iotlb);
1427
+}
1428
+EXPORT_SYMBOL(vringh_need_notify_iotlb);
1429
+
1430
+#endif
1431
+
10391432 MODULE_LICENSE("GPL");