hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/include/xen/grant_table.h
....@@ -97,16 +97,31 @@
9797 * access has been ended, free the given page too. Access will be ended
9898 * immediately iff the grant entry is not in use, otherwise it will happen
9999 * some time later. page may be 0, in which case no freeing will occur.
100
+ * Note that the granted page might still be accessed (read or write) by the
101
+ * other side after gnttab_end_foreign_access() returns, so even if page was
102
+ * specified as 0 it is not allowed to just reuse the page for other
103
+ * purposes immediately. gnttab_end_foreign_access() will take an additional
104
+ * reference to the granted page in this case, which is dropped only after
105
+ * the grant is no longer in use.
106
+ * This requires that multi page allocations for areas subject to
107
+ * gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing
108
+ * via free_pages_exact()) in order to avoid high order pages.
100109 */
101110 void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
102111 unsigned long page);
112
+
113
+/*
114
+ * End access through the given grant reference, iff the grant entry is
115
+ * no longer in use. In case of success ending foreign access, the
116
+ * grant reference is deallocated.
117
+ * Return 1 if the grant entry was freed, 0 if it is still in use.
118
+ */
119
+int gnttab_try_end_foreign_access(grant_ref_t ref);
103120
104121 int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
105122
106123 unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
107124 unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
108
-
109
-int gnttab_query_foreign_access(grant_ref_t ref);
110125
111126 /*
112127 * operations on reserved batches of grant references
....@@ -199,6 +214,23 @@
199214 int gnttab_alloc_pages(int nr_pages, struct page **pages);
200215 void gnttab_free_pages(int nr_pages, struct page **pages);
201216
217
+struct gnttab_page_cache {
218
+ spinlock_t lock;
219
+#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
220
+ struct page *pages;
221
+#else
222
+ struct list_head pages;
223
+#endif
224
+ unsigned int num_pages;
225
+};
226
+
227
+void gnttab_page_cache_init(struct gnttab_page_cache *cache);
228
+int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page);
229
+void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
230
+ unsigned int num);
231
+void gnttab_page_cache_shrink(struct gnttab_page_cache *cache,
232
+ unsigned int num);
233
+
202234 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
203235 struct gnttab_dma_alloc_args {
204236 /* Device for which DMA memory will be/was allocated. */