hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/drivers/xen/xlate_mmu.c
....@@ -93,8 +93,7 @@
9393 info->fgfn++;
9494 }
9595
96
-static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
97
- void *data)
96
+static int remap_pte_fn(pte_t *ptep, unsigned long addr, void *data)
9897 {
9998 struct remap_data *info = data;
10099 struct page *page = info->pages[info->index++];
....@@ -233,7 +232,7 @@
233232 kfree(pages);
234233 return -ENOMEM;
235234 }
236
- rc = alloc_xenballooned_pages(nr_pages, pages);
235
+ rc = xen_alloc_unpopulated_pages(nr_pages, pages);
237236 if (rc) {
238237 pr_warn("%s Couldn't balloon alloc %ld pages rc:%d\n", __func__,
239238 nr_pages, rc);
....@@ -250,7 +249,7 @@
250249 if (!vaddr) {
251250 pr_warn("%s Couldn't map %ld pages rc:%d\n", __func__,
252251 nr_pages, rc);
253
- free_xenballooned_pages(nr_pages, pages);
252
+ xen_free_unpopulated_pages(nr_pages, pages);
254253 kfree(pages);
255254 kfree(pfns);
256255 return -ENOMEM;
....@@ -262,4 +261,35 @@
262261
263262 return 0;
264263 }
265
-EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages);
264
+
265
+struct remap_pfn {
266
+ struct mm_struct *mm;
267
+ struct page **pages;
268
+ pgprot_t prot;
269
+ unsigned long i;
270
+};
271
+
272
+static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data)
273
+{
274
+ struct remap_pfn *r = data;
275
+ struct page *page = r->pages[r->i];
276
+ pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
277
+
278
+ set_pte_at(r->mm, addr, ptep, pte);
279
+ r->i++;
280
+
281
+ return 0;
282
+}
283
+
284
+/* Used by the privcmd module, but has to be built-in on ARM */
285
+int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, unsigned long len)
286
+{
287
+ struct remap_pfn r = {
288
+ .mm = vma->vm_mm,
289
+ .pages = vma->vm_private_data,
290
+ .prot = vma->vm_page_prot,
291
+ };
292
+
293
+ return apply_to_page_range(vma->vm_mm, addr, len, remap_pfn_fn, &r);
294
+}
295
+EXPORT_SYMBOL_GPL(xen_remap_vma_range);