hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/arch/x86/xen/mmu_pv.c
....@@ -1,3 +1,5 @@
1
+// SPDX-License-Identifier: GPL-2.0
2
+
13 /*
24 * Xen mmu operations
35 *
....@@ -49,13 +51,13 @@
4951 #include <linux/memblock.h>
5052 #include <linux/seq_file.h>
5153 #include <linux/crash_dump.h>
54
+#include <linux/pgtable.h>
5255 #ifdef CONFIG_KEXEC_CORE
5356 #include <linux/kexec.h>
5457 #endif
5558
5659 #include <trace/events/xen.h>
5760
58
-#include <asm/pgtable.h>
5961 #include <asm/tlbflush.h>
6062 #include <asm/fixmap.h>
6163 #include <asm/mmu_context.h>
....@@ -65,7 +67,7 @@
6567 #include <asm/linkage.h>
6668 #include <asm/page.h>
6769 #include <asm/init.h>
68
-#include <asm/pat.h>
70
+#include <asm/memtype.h>
6971 #include <asm/smp.h>
7072 #include <asm/tlb.h>
7173
....@@ -84,19 +86,14 @@
8486 #include "mmu.h"
8587 #include "debugfs.h"
8688
87
-#ifdef CONFIG_X86_32
88
-/*
89
- * Identity map, in addition to plain kernel map. This needs to be
90
- * large enough to allocate page table pages to allocate the rest.
91
- * Each page can map 2MB.
92
- */
93
-#define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4)
94
-static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
95
-#endif
96
-#ifdef CONFIG_X86_64
9789 /* l3 pud for userspace vsyscall mapping */
9890 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
99
-#endif /* CONFIG_X86_64 */
91
+
92
+/*
93
+ * Protects atomic reservation decrease/increase against concurrent increases.
94
+ * Also protects non-atomic updates of current_pages and balloon lists.
95
+ */
96
+static DEFINE_SPINLOCK(xen_reservation_lock);
10097
10198 /*
10299 * Note about cr3 (pagetable base) values:
....@@ -272,10 +269,7 @@
272269 if (!xen_batched_set_pte(ptep, pteval)) {
273270 /*
274271 * Could call native_set_pte() here and trap and
275
- * emulate the PTE write but with 32-bit guests this
276
- * needs two traps (one for each of the two 32-bit
277
- * words in the PTE) so do one hypercall directly
278
- * instead.
272
+ * emulate the PTE write, but a hypercall is much cheaper.
279273 */
280274 struct mmu_update u;
281275
....@@ -291,27 +285,20 @@
291285 __xen_set_pte(ptep, pteval);
292286 }
293287
294
-static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
295
- pte_t *ptep, pte_t pteval)
296
-{
297
- trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval);
298
- __xen_set_pte(ptep, pteval);
299
-}
300
-
301
-pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
288
+pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma,
302289 unsigned long addr, pte_t *ptep)
303290 {
304291 /* Just return the pte as-is. We preserve the bits on commit */
305
- trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep);
292
+ trace_xen_mmu_ptep_modify_prot_start(vma->vm_mm, addr, ptep, *ptep);
306293 return *ptep;
307294 }
308295
309
-void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
296
+void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
310297 pte_t *ptep, pte_t pte)
311298 {
312299 struct mmu_update u;
313300
314
- trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte);
301
+ trace_xen_mmu_ptep_modify_prot_commit(vma->vm_mm, addr, ptep, pte);
315302 xen_mc_batch();
316303
317304 u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
....@@ -431,26 +418,6 @@
431418 xen_set_pud_hyper(ptr, val);
432419 }
433420
434
-#ifdef CONFIG_X86_PAE
435
-static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
436
-{
437
- trace_xen_mmu_set_pte_atomic(ptep, pte);
438
- __xen_set_pte(ptep, pte);
439
-}
440
-
441
-static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
442
-{
443
- trace_xen_mmu_pte_clear(mm, addr, ptep);
444
- __xen_set_pte(ptep, native_make_pte(0));
445
-}
446
-
447
-static void xen_pmd_clear(pmd_t *pmdp)
448
-{
449
- trace_xen_mmu_pmd_clear(pmdp);
450
- set_pmd(pmdp, __pmd(0));
451
-}
452
-#endif /* CONFIG_X86_PAE */
453
-
454421 __visible pmd_t xen_make_pmd(pmdval_t pmd)
455422 {
456423 pmd = pte_pfn_to_mfn(pmd);
....@@ -458,7 +425,6 @@
458425 }
459426 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
460427
461
-#ifdef CONFIG_X86_64
462428 __visible pudval_t xen_pud_val(pud_t pud)
463429 {
464430 return pte_mfn_to_pfn(pud.pud);
....@@ -563,27 +529,27 @@
563529 }
564530 PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d);
565531 #endif /* CONFIG_PGTABLE_LEVELS >= 5 */
566
-#endif /* CONFIG_X86_64 */
567532
568
-static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
569
- int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
570
- bool last, unsigned long limit)
533
+static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
534
+ void (*func)(struct mm_struct *mm, struct page *,
535
+ enum pt_level),
536
+ bool last, unsigned long limit)
571537 {
572
- int i, nr, flush = 0;
538
+ int i, nr;
573539
574540 nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD;
575541 for (i = 0; i < nr; i++) {
576542 if (!pmd_none(pmd[i]))
577
- flush |= (*func)(mm, pmd_page(pmd[i]), PT_PTE);
543
+ (*func)(mm, pmd_page(pmd[i]), PT_PTE);
578544 }
579
- return flush;
580545 }
581546
582
-static int xen_pud_walk(struct mm_struct *mm, pud_t *pud,
583
- int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
584
- bool last, unsigned long limit)
547
+static void xen_pud_walk(struct mm_struct *mm, pud_t *pud,
548
+ void (*func)(struct mm_struct *mm, struct page *,
549
+ enum pt_level),
550
+ bool last, unsigned long limit)
585551 {
586
- int i, nr, flush = 0;
552
+ int i, nr;
587553
588554 nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD;
589555 for (i = 0; i < nr; i++) {
....@@ -594,29 +560,26 @@
594560
595561 pmd = pmd_offset(&pud[i], 0);
596562 if (PTRS_PER_PMD > 1)
597
- flush |= (*func)(mm, virt_to_page(pmd), PT_PMD);
598
- flush |= xen_pmd_walk(mm, pmd, func,
599
- last && i == nr - 1, limit);
563
+ (*func)(mm, virt_to_page(pmd), PT_PMD);
564
+ xen_pmd_walk(mm, pmd, func, last && i == nr - 1, limit);
600565 }
601
- return flush;
602566 }
603567
604
-static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
605
- int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
606
- bool last, unsigned long limit)
568
+static void xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
569
+ void (*func)(struct mm_struct *mm, struct page *,
570
+ enum pt_level),
571
+ bool last, unsigned long limit)
607572 {
608
- int flush = 0;
609573 pud_t *pud;
610574
611575
612576 if (p4d_none(*p4d))
613
- return flush;
577
+ return;
614578
615579 pud = pud_offset(p4d, 0);
616580 if (PTRS_PER_PUD > 1)
617
- flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
618
- flush |= xen_pud_walk(mm, pud, func, last, limit);
619
- return flush;
581
+ (*func)(mm, virt_to_page(pud), PT_PUD);
582
+ xen_pud_walk(mm, pud, func, last, limit);
620583 }
621584
622585 /*
....@@ -628,32 +591,27 @@
628591 * will be STACK_TOP_MAX, but at boot we need to pin up to
629592 * FIXADDR_TOP.
630593 *
631
- * For 32-bit the important bit is that we don't pin beyond there,
632
- * because then we start getting into Xen's ptes.
633
- *
634
- * For 64-bit, we must skip the Xen hole in the middle of the address
635
- * space, just after the big x86-64 virtual hole.
594
+ * We must skip the Xen hole in the middle of the address space, just after
595
+ * the big x86-64 virtual hole.
636596 */
637
-static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
638
- int (*func)(struct mm_struct *mm, struct page *,
639
- enum pt_level),
640
- unsigned long limit)
597
+static void __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
598
+ void (*func)(struct mm_struct *mm, struct page *,
599
+ enum pt_level),
600
+ unsigned long limit)
641601 {
642
- int i, nr, flush = 0;
602
+ int i, nr;
643603 unsigned hole_low = 0, hole_high = 0;
644604
645605 /* The limit is the last byte to be touched */
646606 limit--;
647607 BUG_ON(limit >= FIXADDR_TOP);
648608
649
-#ifdef CONFIG_X86_64
650609 /*
651610 * 64-bit has a great big hole in the middle of the address
652611 * space, which contains the Xen mappings.
653612 */
654613 hole_low = pgd_index(GUARD_HOLE_BASE_ADDR);
655614 hole_high = pgd_index(GUARD_HOLE_END_ADDR);
656
-#endif
657615
658616 nr = pgd_index(limit) + 1;
659617 for (i = 0; i < nr; i++) {
....@@ -666,22 +624,20 @@
666624 continue;
667625
668626 p4d = p4d_offset(&pgd[i], 0);
669
- flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
627
+ xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
670628 }
671629
672630 /* Do the top level last, so that the callbacks can use it as
673631 a cue to do final things like tlb flushes. */
674
- flush |= (*func)(mm, virt_to_page(pgd), PT_PGD);
675
-
676
- return flush;
632
+ (*func)(mm, virt_to_page(pgd), PT_PGD);
677633 }
678634
679
-static int xen_pgd_walk(struct mm_struct *mm,
680
- int (*func)(struct mm_struct *mm, struct page *,
681
- enum pt_level),
682
- unsigned long limit)
635
+static void xen_pgd_walk(struct mm_struct *mm,
636
+ void (*func)(struct mm_struct *mm, struct page *,
637
+ enum pt_level),
638
+ unsigned long limit)
683639 {
684
- return __xen_pgd_walk(mm, mm->pgd, func, limit);
640
+ __xen_pgd_walk(mm, mm->pgd, func, limit);
685641 }
686642
687643 /* If we're using split pte locks, then take the page's lock and
....@@ -714,25 +670,16 @@
714670 xen_extend_mmuext_op(&op);
715671 }
716672
717
-static int xen_pin_page(struct mm_struct *mm, struct page *page,
718
- enum pt_level level)
673
+static void xen_pin_page(struct mm_struct *mm, struct page *page,
674
+ enum pt_level level)
719675 {
720676 unsigned pgfl = TestSetPagePinned(page);
721
- int flush;
722677
723
- if (pgfl)
724
- flush = 0; /* already pinned */
725
- else if (PageHighMem(page))
726
- /* kmaps need flushing if we found an unpinned
727
- highpage */
728
- flush = 1;
729
- else {
678
+ if (!pgfl) {
730679 void *pt = lowmem_page_address(page);
731680 unsigned long pfn = page_to_pfn(page);
732681 struct multicall_space mcs = __xen_mc_entry(0);
733682 spinlock_t *ptl;
734
-
735
- flush = 0;
736683
737684 /*
738685 * We need to hold the pagetable lock between the time
....@@ -770,8 +717,6 @@
770717 xen_mc_callback(xen_pte_unlock, ptl);
771718 }
772719 }
773
-
774
- return flush;
775720 }
776721
777722 /* This is called just after a mm has been created, but it has not
....@@ -779,39 +724,22 @@
779724 read-only, and can be pinned. */
780725 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
781726 {
727
+ pgd_t *user_pgd = xen_get_user_pgd(pgd);
728
+
782729 trace_xen_mmu_pgd_pin(mm, pgd);
783730
784731 xen_mc_batch();
785732
786
- if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
787
- /* re-enable interrupts for flushing */
788
- xen_mc_issue(0);
733
+ __xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT);
789734
790
- kmap_flush_unused();
735
+ xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
791736
792
- xen_mc_batch();
737
+ if (user_pgd) {
738
+ xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
739
+ xen_do_pin(MMUEXT_PIN_L4_TABLE,
740
+ PFN_DOWN(__pa(user_pgd)));
793741 }
794742
795
-#ifdef CONFIG_X86_64
796
- {
797
- pgd_t *user_pgd = xen_get_user_pgd(pgd);
798
-
799
- xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
800
-
801
- if (user_pgd) {
802
- xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD);
803
- xen_do_pin(MMUEXT_PIN_L4_TABLE,
804
- PFN_DOWN(__pa(user_pgd)));
805
- }
806
- }
807
-#else /* CONFIG_X86_32 */
808
-#ifdef CONFIG_X86_PAE
809
- /* Need to make sure unshared kernel PMD is pinnable */
810
- xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
811
- PT_PMD);
812
-#endif
813
- xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
814
-#endif /* CONFIG_X86_64 */
815743 xen_mc_issue(0);
816744 }
817745
....@@ -846,34 +774,31 @@
846774 spin_unlock(&pgd_lock);
847775 }
848776
849
-static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
850
- enum pt_level level)
777
+static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page,
778
+ enum pt_level level)
851779 {
852780 SetPagePinned(page);
853
- return 0;
854781 }
855782
856783 /*
857784 * The init_mm pagetable is really pinned as soon as its created, but
858785 * that's before we have page structures to store the bits. So do all
859786 * the book-keeping now once struct pages for allocated pages are
860
- * initialized. This happens only after free_all_bootmem() is called.
787
+ * initialized. This happens only after memblock_free_all() is called.
861788 */
862789 static void __init xen_after_bootmem(void)
863790 {
864791 static_branch_enable(&xen_struct_pages_ready);
865
-#ifdef CONFIG_X86_64
866792 SetPagePinned(virt_to_page(level3_user_vsyscall));
867
-#endif
868793 xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP);
869794 }
870795
871
-static int xen_unpin_page(struct mm_struct *mm, struct page *page,
872
- enum pt_level level)
796
+static void xen_unpin_page(struct mm_struct *mm, struct page *page,
797
+ enum pt_level level)
873798 {
874799 unsigned pgfl = TestClearPagePinned(page);
875800
876
- if (pgfl && !PageHighMem(page)) {
801
+ if (pgfl) {
877802 void *pt = lowmem_page_address(page);
878803 unsigned long pfn = page_to_pfn(page);
879804 spinlock_t *ptl = NULL;
....@@ -904,36 +829,24 @@
904829 xen_mc_callback(xen_pte_unlock, ptl);
905830 }
906831 }
907
-
908
- return 0; /* never need to flush on unpin */
909832 }
910833
911834 /* Release a pagetables pages back as normal RW */
912835 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd)
913836 {
837
+ pgd_t *user_pgd = xen_get_user_pgd(pgd);
838
+
914839 trace_xen_mmu_pgd_unpin(mm, pgd);
915840
916841 xen_mc_batch();
917842
918843 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
919844
920
-#ifdef CONFIG_X86_64
921
- {
922
- pgd_t *user_pgd = xen_get_user_pgd(pgd);
923
-
924
- if (user_pgd) {
925
- xen_do_pin(MMUEXT_UNPIN_TABLE,
926
- PFN_DOWN(__pa(user_pgd)));
927
- xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
928
- }
845
+ if (user_pgd) {
846
+ xen_do_pin(MMUEXT_UNPIN_TABLE,
847
+ PFN_DOWN(__pa(user_pgd)));
848
+ xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD);
929849 }
930
-#endif
931
-
932
-#ifdef CONFIG_X86_PAE
933
- /* Need to make sure unshared kernel PMD is unpinned */
934
- xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]),
935
- PT_PMD);
936
-#endif
937850
938851 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT);
939852
....@@ -1081,7 +994,6 @@
1081994 BUG();
1082995 }
1083996
1084
-#ifdef CONFIG_X86_64
1085997 static void __init xen_cleanhighmap(unsigned long vaddr,
1086998 unsigned long vaddr_end)
1087999 {
....@@ -1230,7 +1142,7 @@
12301142 * We could be in __ka space.
12311143 * We roundup to the PMD, which means that if anybody at this stage is
12321144 * using the __ka address of xen_start_info or
1233
- * xen_start_info->shared_info they are in going to crash. Fortunatly
1145
+ * xen_start_info->shared_info they are in going to crash. Fortunately
12341146 * we have already revectored in xen_setup_kernel_pagetable.
12351147 */
12361148 size = roundup(size, PMD_SIZE);
....@@ -1265,17 +1177,15 @@
12651177 xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2));
12661178 xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base));
12671179 }
1268
-#endif
12691180
12701181 static void __init xen_pagetable_p2m_setup(void)
12711182 {
12721183 xen_vmalloc_p2m_tree();
12731184
1274
-#ifdef CONFIG_X86_64
12751185 xen_pagetable_p2m_free();
12761186
12771187 xen_pagetable_cleanhighmap();
1278
-#endif
1188
+
12791189 /* And revector! Bye bye old array */
12801190 xen_start_info->mfn_list = (unsigned long)xen_p2m_addr;
12811191 }
....@@ -1297,16 +1207,6 @@
12971207 static void xen_write_cr2(unsigned long cr2)
12981208 {
12991209 this_cpu_read(xen_vcpu)->arch.cr2 = cr2;
1300
-}
1301
-
1302
-static unsigned long xen_read_cr2(void)
1303
-{
1304
- return this_cpu_read(xen_vcpu)->arch.cr2;
1305
-}
1306
-
1307
-unsigned long xen_read_cr2_direct(void)
1308
-{
1309
- return this_cpu_read(xen_vcpu_info.arch.cr2);
13101210 }
13111211
13121212 static noinline void xen_flush_tlb(void)
....@@ -1422,6 +1322,8 @@
14221322 }
14231323 static void xen_write_cr3(unsigned long cr3)
14241324 {
1325
+ pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1326
+
14251327 BUG_ON(preemptible());
14261328
14271329 xen_mc_batch(); /* disables interrupts */
....@@ -1432,20 +1334,14 @@
14321334
14331335 __xen_write_cr3(true, cr3);
14341336
1435
-#ifdef CONFIG_X86_64
1436
- {
1437
- pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1438
- if (user_pgd)
1439
- __xen_write_cr3(false, __pa(user_pgd));
1440
- else
1441
- __xen_write_cr3(false, 0);
1442
- }
1443
-#endif
1337
+ if (user_pgd)
1338
+ __xen_write_cr3(false, __pa(user_pgd));
1339
+ else
1340
+ __xen_write_cr3(false, 0);
14441341
14451342 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
14461343 }
14471344
1448
-#ifdef CONFIG_X86_64
14491345 /*
14501346 * At the start of the day - when Xen launches a guest, it has already
14511347 * built pagetables for the guest. We diligently look over them
....@@ -1480,49 +1376,39 @@
14801376
14811377 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
14821378 }
1483
-#endif
14841379
14851380 static int xen_pgd_alloc(struct mm_struct *mm)
14861381 {
14871382 pgd_t *pgd = mm->pgd;
1488
- int ret = 0;
1383
+ struct page *page = virt_to_page(pgd);
1384
+ pgd_t *user_pgd;
1385
+ int ret = -ENOMEM;
14891386
14901387 BUG_ON(PagePinned(virt_to_page(pgd)));
1388
+ BUG_ON(page->private != 0);
14911389
1492
-#ifdef CONFIG_X86_64
1493
- {
1494
- struct page *page = virt_to_page(pgd);
1495
- pgd_t *user_pgd;
1390
+ user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1391
+ page->private = (unsigned long)user_pgd;
14961392
1497
- BUG_ON(page->private != 0);
1498
-
1499
- ret = -ENOMEM;
1500
-
1501
- user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1502
- page->private = (unsigned long)user_pgd;
1503
-
1504
- if (user_pgd != NULL) {
1393
+ if (user_pgd != NULL) {
15051394 #ifdef CONFIG_X86_VSYSCALL_EMULATION
1506
- user_pgd[pgd_index(VSYSCALL_ADDR)] =
1507
- __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1395
+ user_pgd[pgd_index(VSYSCALL_ADDR)] =
1396
+ __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
15081397 #endif
1509
- ret = 0;
1510
- }
1511
-
1512
- BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1398
+ ret = 0;
15131399 }
1514
-#endif
1400
+
1401
+ BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1402
+
15151403 return ret;
15161404 }
15171405
15181406 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
15191407 {
1520
-#ifdef CONFIG_X86_64
15211408 pgd_t *user_pgd = xen_get_user_pgd(pgd);
15221409
15231410 if (user_pgd)
15241411 free_page((unsigned long)user_pgd);
1525
-#endif
15261412 }
15271413
15281414 /*
....@@ -1541,7 +1427,6 @@
15411427 */
15421428 __visible pte_t xen_make_pte_init(pteval_t pte)
15431429 {
1544
-#ifdef CONFIG_X86_64
15451430 unsigned long pfn;
15461431
15471432 /*
....@@ -1555,7 +1440,7 @@
15551440 pfn >= xen_start_info->first_p2m_pfn &&
15561441 pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
15571442 pte &= ~_PAGE_RW;
1558
-#endif
1443
+
15591444 pte = pte_pfn_to_mfn(pte);
15601445 return native_make_pte(pte);
15611446 }
....@@ -1563,13 +1448,6 @@
15631448
15641449 static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
15651450 {
1566
-#ifdef CONFIG_X86_32
1567
- /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1568
- if (pte_mfn(pte) != INVALID_P2M_ENTRY
1569
- && pte_val_ma(*ptep) & _PAGE_PRESENT)
1570
- pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1571
- pte_val_ma(pte));
1572
-#endif
15731451 __xen_set_pte(ptep, pte);
15741452 }
15751453
....@@ -1644,20 +1522,14 @@
16441522 if (static_branch_likely(&xen_struct_pages_ready))
16451523 SetPagePinned(page);
16461524
1647
- if (!PageHighMem(page)) {
1648
- xen_mc_batch();
1525
+ xen_mc_batch();
16491526
1650
- __set_pfn_prot(pfn, PAGE_KERNEL_RO);
1527
+ __set_pfn_prot(pfn, PAGE_KERNEL_RO);
16511528
1652
- if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1653
- __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1529
+ if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1530
+ __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
16541531
1655
- xen_mc_issue(PARAVIRT_LAZY_MMU);
1656
- } else {
1657
- /* make sure there are no stray mappings of
1658
- this page */
1659
- kmap_flush_unused();
1660
- }
1532
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
16611533 }
16621534 }
16631535
....@@ -1680,16 +1552,15 @@
16801552 trace_xen_mmu_release_ptpage(pfn, level, pinned);
16811553
16821554 if (pinned) {
1683
- if (!PageHighMem(page)) {
1684
- xen_mc_batch();
1555
+ xen_mc_batch();
16851556
1686
- if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1687
- __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1557
+ if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS)
1558
+ __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
16881559
1689
- __set_pfn_prot(pfn, PAGE_KERNEL);
1560
+ __set_pfn_prot(pfn, PAGE_KERNEL);
16901561
1691
- xen_mc_issue(PARAVIRT_LAZY_MMU);
1692
- }
1562
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
1563
+
16931564 ClearPagePinned(page);
16941565 }
16951566 }
....@@ -1704,7 +1575,6 @@
17041575 xen_release_ptpage(pfn, PT_PMD);
17051576 }
17061577
1707
-#ifdef CONFIG_X86_64
17081578 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
17091579 {
17101580 xen_alloc_ptpage(mm, pfn, PT_PUD);
....@@ -1714,20 +1584,6 @@
17141584 {
17151585 xen_release_ptpage(pfn, PT_PUD);
17161586 }
1717
-#endif
1718
-
1719
-void __init xen_reserve_top(void)
1720
-{
1721
-#ifdef CONFIG_X86_32
1722
- unsigned long top = HYPERVISOR_VIRT_START;
1723
- struct xen_platform_parameters pp;
1724
-
1725
- if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1726
- top = pp.virt_start;
1727
-
1728
- reserve_top_address(-top);
1729
-#endif /* CONFIG_X86_32 */
1730
-}
17311587
17321588 /*
17331589 * Like __va(), but returns address in the kernel mapping (which is
....@@ -1735,11 +1591,7 @@
17351591 */
17361592 static void * __init __ka(phys_addr_t paddr)
17371593 {
1738
-#ifdef CONFIG_X86_64
17391594 return (void *)(paddr + __START_KERNEL_map);
1740
-#else
1741
- return __va(paddr);
1742
-#endif
17431595 }
17441596
17451597 /* Convert a machine address to physical address */
....@@ -1773,56 +1625,7 @@
17731625 {
17741626 return set_page_prot_flags(addr, prot, UVMF_NONE);
17751627 }
1776
-#ifdef CONFIG_X86_32
1777
-static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1778
-{
1779
- unsigned pmdidx, pteidx;
1780
- unsigned ident_pte;
1781
- unsigned long pfn;
17821628
1783
- level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
1784
- PAGE_SIZE);
1785
-
1786
- ident_pte = 0;
1787
- pfn = 0;
1788
- for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1789
- pte_t *pte_page;
1790
-
1791
- /* Reuse or allocate a page of ptes */
1792
- if (pmd_present(pmd[pmdidx]))
1793
- pte_page = m2v(pmd[pmdidx].pmd);
1794
- else {
1795
- /* Check for free pte pages */
1796
- if (ident_pte == LEVEL1_IDENT_ENTRIES)
1797
- break;
1798
-
1799
- pte_page = &level1_ident_pgt[ident_pte];
1800
- ident_pte += PTRS_PER_PTE;
1801
-
1802
- pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1803
- }
1804
-
1805
- /* Install mappings */
1806
- for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1807
- pte_t pte;
1808
-
1809
- if (pfn > max_pfn_mapped)
1810
- max_pfn_mapped = pfn;
1811
-
1812
- if (!pte_none(pte_page[pteidx]))
1813
- continue;
1814
-
1815
- pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1816
- pte_page[pteidx] = pte;
1817
- }
1818
- }
1819
-
1820
- for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1821
- set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1822
-
1823
- set_page_prot(pmd, PAGE_KERNEL_RO);
1824
-}
1825
-#endif
18261629 void __init xen_setup_machphys_mapping(void)
18271630 {
18281631 struct xen_machphys_mapping mapping;
....@@ -1833,13 +1636,8 @@
18331636 } else {
18341637 machine_to_phys_nr = MACH2PHYS_NR_ENTRIES;
18351638 }
1836
-#ifdef CONFIG_X86_32
1837
- WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1))
1838
- < machine_to_phys_mapping);
1839
-#endif
18401639 }
18411640
1842
-#ifdef CONFIG_X86_64
18431641 static void __init convert_pfn_mfn(void *v)
18441642 {
18451643 pte_t *pte = v;
....@@ -2170,105 +1968,6 @@
21701968 xen_start_info->nr_p2m_frames = n_frames;
21711969 }
21721970
2173
-#else /* !CONFIG_X86_64 */
2174
-static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
2175
-static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
2176
-RESERVE_BRK(fixup_kernel_pmd, PAGE_SIZE);
2177
-RESERVE_BRK(fixup_kernel_pte, PAGE_SIZE);
2178
-
2179
-static void __init xen_write_cr3_init(unsigned long cr3)
2180
-{
2181
- unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
2182
-
2183
- BUG_ON(read_cr3_pa() != __pa(initial_page_table));
2184
- BUG_ON(cr3 != __pa(swapper_pg_dir));
2185
-
2186
- /*
2187
- * We are switching to swapper_pg_dir for the first time (from
2188
- * initial_page_table) and therefore need to mark that page
2189
- * read-only and then pin it.
2190
- *
2191
- * Xen disallows sharing of kernel PMDs for PAE
2192
- * guests. Therefore we must copy the kernel PMD from
2193
- * initial_page_table into a new kernel PMD to be used in
2194
- * swapper_pg_dir.
2195
- */
2196
- swapper_kernel_pmd =
2197
- extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2198
- copy_page(swapper_kernel_pmd, initial_kernel_pmd);
2199
- swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
2200
- __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
2201
- set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
2202
-
2203
- set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
2204
- xen_write_cr3(cr3);
2205
- pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
2206
-
2207
- pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
2208
- PFN_DOWN(__pa(initial_page_table)));
2209
- set_page_prot(initial_page_table, PAGE_KERNEL);
2210
- set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
2211
-
2212
- pv_mmu_ops.write_cr3 = &xen_write_cr3;
2213
-}
2214
-
2215
-/*
2216
- * For 32 bit domains xen_start_info->pt_base is the pgd address which might be
2217
- * not the first page table in the page table pool.
2218
- * Iterate through the initial page tables to find the real page table base.
2219
- */
2220
-static phys_addr_t __init xen_find_pt_base(pmd_t *pmd)
2221
-{
2222
- phys_addr_t pt_base, paddr;
2223
- unsigned pmdidx;
2224
-
2225
- pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd));
2226
-
2227
- for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++)
2228
- if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) {
2229
- paddr = m2p(pmd[pmdidx].pmd);
2230
- pt_base = min(pt_base, paddr);
2231
- }
2232
-
2233
- return pt_base;
2234
-}
2235
-
2236
-void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
2237
-{
2238
- pmd_t *kernel_pmd;
2239
-
2240
- kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
2241
-
2242
- xen_pt_base = xen_find_pt_base(kernel_pmd);
2243
- xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE;
2244
-
2245
- initial_kernel_pmd =
2246
- extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2247
-
2248
- max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024);
2249
-
2250
- copy_page(initial_kernel_pmd, kernel_pmd);
2251
-
2252
- xen_map_identity_early(initial_kernel_pmd, max_pfn);
2253
-
2254
- copy_page(initial_page_table, pgd);
2255
- initial_page_table[KERNEL_PGD_BOUNDARY] =
2256
- __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
2257
-
2258
- set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
2259
- set_page_prot(initial_page_table, PAGE_KERNEL_RO);
2260
- set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2261
-
2262
- pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2263
-
2264
- pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2265
- PFN_DOWN(__pa(initial_page_table)));
2266
- xen_write_cr3(__pa(initial_page_table));
2267
-
2268
- memblock_reserve(xen_pt_base, xen_pt_size);
2269
-}
2270
-#endif /* CONFIG_X86_64 */
2271
-
22721971 void __init xen_reserve_special_pages(void)
22731972 {
22741973 phys_addr_t paddr;
....@@ -2302,16 +2001,9 @@
23022001
23032002 switch (idx) {
23042003 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
2305
-#ifdef CONFIG_X86_32
2306
- case FIX_WP_TEST:
2307
-# ifdef CONFIG_HIGHMEM
2308
- case FIX_KMAP_BEGIN ... FIX_KMAP_END:
2309
-# endif
2310
-#elif defined(CONFIG_X86_VSYSCALL_EMULATION)
2004
+#ifdef CONFIG_X86_VSYSCALL_EMULATION
23112005 case VSYSCALL_PAGE:
23122006 #endif
2313
- case FIX_TEXT_POKE0:
2314
- case FIX_TEXT_POKE1:
23152007 /* All local page mappings */
23162008 pte = pfn_pte(phys, prot);
23172009 break;
....@@ -2358,28 +2050,22 @@
23582050
23592051 static void __init xen_post_allocator_init(void)
23602052 {
2361
- pv_mmu_ops.set_pte = xen_set_pte;
2362
- pv_mmu_ops.set_pmd = xen_set_pmd;
2363
- pv_mmu_ops.set_pud = xen_set_pud;
2364
-#ifdef CONFIG_X86_64
2365
- pv_mmu_ops.set_p4d = xen_set_p4d;
2366
-#endif
2053
+ pv_ops.mmu.set_pte = xen_set_pte;
2054
+ pv_ops.mmu.set_pmd = xen_set_pmd;
2055
+ pv_ops.mmu.set_pud = xen_set_pud;
2056
+ pv_ops.mmu.set_p4d = xen_set_p4d;
23672057
23682058 /* This will work as long as patching hasn't happened yet
23692059 (which it hasn't) */
2370
- pv_mmu_ops.alloc_pte = xen_alloc_pte;
2371
- pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
2372
- pv_mmu_ops.release_pte = xen_release_pte;
2373
- pv_mmu_ops.release_pmd = xen_release_pmd;
2374
-#ifdef CONFIG_X86_64
2375
- pv_mmu_ops.alloc_pud = xen_alloc_pud;
2376
- pv_mmu_ops.release_pud = xen_release_pud;
2377
-#endif
2378
- pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
2060
+ pv_ops.mmu.alloc_pte = xen_alloc_pte;
2061
+ pv_ops.mmu.alloc_pmd = xen_alloc_pmd;
2062
+ pv_ops.mmu.release_pte = xen_release_pte;
2063
+ pv_ops.mmu.release_pmd = xen_release_pmd;
2064
+ pv_ops.mmu.alloc_pud = xen_alloc_pud;
2065
+ pv_ops.mmu.release_pud = xen_release_pud;
2066
+ pv_ops.mmu.make_pte = PV_CALLEE_SAVE(xen_make_pte);
23792067
2380
-#ifdef CONFIG_X86_64
2381
- pv_mmu_ops.write_cr3 = &xen_write_cr3;
2382
-#endif
2068
+ pv_ops.mmu.write_cr3 = &xen_write_cr3;
23832069 }
23842070
23852071 static void xen_leave_lazy_mmu(void)
....@@ -2391,7 +2077,7 @@
23912077 }
23922078
23932079 static const struct pv_mmu_ops xen_mmu_ops __initconst = {
2394
- .read_cr2 = xen_read_cr2,
2080
+ .read_cr2 = __PV_IS_CALLEE_SAVE(xen_read_cr2),
23952081 .write_cr2 = xen_write_cr2,
23962082
23972083 .read_cr3 = xen_read_cr3,
....@@ -2412,7 +2098,6 @@
24122098 .release_pmd = xen_release_pmd_init,
24132099
24142100 .set_pte = xen_set_pte_init,
2415
- .set_pte_at = xen_set_pte_at,
24162101 .set_pmd = xen_set_pmd_hyper,
24172102
24182103 .ptep_modify_prot_start = __ptep_modify_prot_start,
....@@ -2424,17 +2109,11 @@
24242109 .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
24252110 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
24262111
2427
-#ifdef CONFIG_X86_PAE
2428
- .set_pte_atomic = xen_set_pte_atomic,
2429
- .pte_clear = xen_pte_clear,
2430
- .pmd_clear = xen_pmd_clear,
2431
-#endif /* CONFIG_X86_PAE */
24322112 .set_pud = xen_set_pud_hyper,
24332113
24342114 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
24352115 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
24362116
2437
-#ifdef CONFIG_X86_64
24382117 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
24392118 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
24402119 .set_p4d = xen_set_p4d_hyper,
....@@ -2446,7 +2125,6 @@
24462125 .p4d_val = PV_CALLEE_SAVE(xen_p4d_val),
24472126 .make_p4d = PV_CALLEE_SAVE(xen_make_p4d),
24482127 #endif
2449
-#endif /* CONFIG_X86_64 */
24502128
24512129 .activate_mm = xen_activate_mm,
24522130 .dup_mmap = xen_dup_mmap,
....@@ -2466,7 +2144,7 @@
24662144 x86_init.paging.pagetable_init = xen_pagetable_init;
24672145 x86_init.hyper.init_after_bootmem = xen_after_bootmem;
24682146
2469
- pv_mmu_ops = xen_mmu_ops;
2147
+ pv_ops.mmu = xen_mmu_ops;
24702148
24712149 memset(dummy_mapping, 0xff, PAGE_SIZE);
24722150 }
....@@ -2629,7 +2307,6 @@
26292307 *dma_handle = virt_to_machine(vstart).maddr;
26302308 return success ? 0 : -ENOMEM;
26312309 }
2632
-EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
26332310
26342311 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
26352312 {
....@@ -2664,7 +2341,137 @@
26642341
26652342 spin_unlock_irqrestore(&xen_reservation_lock, flags);
26662343 }
2667
-EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2344
+
2345
+static noinline void xen_flush_tlb_all(void)
2346
+{
2347
+ struct mmuext_op *op;
2348
+ struct multicall_space mcs;
2349
+
2350
+ preempt_disable();
2351
+
2352
+ mcs = xen_mc_entry(sizeof(*op));
2353
+
2354
+ op = mcs.args;
2355
+ op->cmd = MMUEXT_TLB_FLUSH_ALL;
2356
+ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
2357
+
2358
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
2359
+
2360
+ preempt_enable();
2361
+}
2362
+
2363
+#define REMAP_BATCH_SIZE 16
2364
+
2365
+struct remap_data {
2366
+ xen_pfn_t *pfn;
2367
+ bool contiguous;
2368
+ bool no_translate;
2369
+ pgprot_t prot;
2370
+ struct mmu_update *mmu_update;
2371
+};
2372
+
2373
+static int remap_area_pfn_pte_fn(pte_t *ptep, unsigned long addr, void *data)
2374
+{
2375
+ struct remap_data *rmd = data;
2376
+ pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
2377
+
2378
+ /*
2379
+ * If we have a contiguous range, just update the pfn itself,
2380
+ * else update pointer to be "next pfn".
2381
+ */
2382
+ if (rmd->contiguous)
2383
+ (*rmd->pfn)++;
2384
+ else
2385
+ rmd->pfn++;
2386
+
2387
+ rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
2388
+ rmd->mmu_update->ptr |= rmd->no_translate ?
2389
+ MMU_PT_UPDATE_NO_TRANSLATE :
2390
+ MMU_NORMAL_PT_UPDATE;
2391
+ rmd->mmu_update->val = pte_val_ma(pte);
2392
+ rmd->mmu_update++;
2393
+
2394
+ return 0;
2395
+}
2396
+
2397
+int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
2398
+ xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
2399
+ unsigned int domid, bool no_translate, struct page **pages)
2400
+{
2401
+ int err = 0;
2402
+ struct remap_data rmd;
2403
+ struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2404
+ unsigned long range;
2405
+ int mapped = 0;
2406
+
2407
+ BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
2408
+
2409
+ rmd.pfn = pfn;
2410
+ rmd.prot = prot;
2411
+ /*
2412
+ * We use the err_ptr to indicate if there we are doing a contiguous
2413
+ * mapping or a discontigious mapping.
2414
+ */
2415
+ rmd.contiguous = !err_ptr;
2416
+ rmd.no_translate = no_translate;
2417
+
2418
+ while (nr) {
2419
+ int index = 0;
2420
+ int done = 0;
2421
+ int batch = min(REMAP_BATCH_SIZE, nr);
2422
+ int batch_left = batch;
2423
+
2424
+ range = (unsigned long)batch << PAGE_SHIFT;
2425
+
2426
+ rmd.mmu_update = mmu_update;
2427
+ err = apply_to_page_range(vma->vm_mm, addr, range,
2428
+ remap_area_pfn_pte_fn, &rmd);
2429
+ if (err)
2430
+ goto out;
2431
+
2432
+ /*
2433
+ * We record the error for each page that gives an error, but
2434
+ * continue mapping until the whole set is done
2435
+ */
2436
+ do {
2437
+ int i;
2438
+
2439
+ err = HYPERVISOR_mmu_update(&mmu_update[index],
2440
+ batch_left, &done, domid);
2441
+
2442
+ /*
2443
+ * @err_ptr may be the same buffer as @gfn, so
2444
+ * only clear it after each chunk of @gfn is
2445
+ * used.
2446
+ */
2447
+ if (err_ptr) {
2448
+ for (i = index; i < index + done; i++)
2449
+ err_ptr[i] = 0;
2450
+ }
2451
+ if (err < 0) {
2452
+ if (!err_ptr)
2453
+ goto out;
2454
+ err_ptr[i] = err;
2455
+ done++; /* Skip failed frame. */
2456
+ } else
2457
+ mapped += done;
2458
+ batch_left -= done;
2459
+ index += done;
2460
+ } while (batch_left);
2461
+
2462
+ nr -= batch;
2463
+ addr += range;
2464
+ if (err_ptr)
2465
+ err_ptr += batch;
2466
+ cond_resched();
2467
+ }
2468
+out:
2469
+
2470
+ xen_flush_tlb_all();
2471
+
2472
+ return err < 0 ? err : mapped;
2473
+}
2474
+EXPORT_SYMBOL_GPL(xen_remap_pfn);
26682475
26692476 #ifdef CONFIG_KEXEC_CORE
26702477 phys_addr_t paddr_vmcoreinfo_note(void)