.. | .. |
---|
6 | 6 | #include <linux/mm.h> /* for struct page */ |
---|
7 | 7 | #include <linux/pagemap.h> |
---|
8 | 8 | |
---|
| 9 | +#define __HAVE_ARCH_PTE_ALLOC_ONE |
---|
| 10 | +#define __HAVE_ARCH_PGD_FREE |
---|
| 11 | +#include <asm-generic/pgalloc.h> |
---|
| 12 | + |
---|
9 | 13 | static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; } |
---|
10 | 14 | |
---|
11 | | -#ifdef CONFIG_PARAVIRT |
---|
| 15 | +#ifdef CONFIG_PARAVIRT_XXL |
---|
12 | 16 | #include <asm/paravirt.h> |
---|
13 | 17 | #else |
---|
14 | 18 | #define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm) |
---|
.. | .. |
---|
47 | 51 | extern pgd_t *pgd_alloc(struct mm_struct *); |
---|
48 | 52 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); |
---|
49 | 53 | |
---|
50 | | -extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long); |
---|
51 | | -extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long); |
---|
52 | | - |
---|
53 | | -/* Should really implement gc for free page table pages. This could be |
---|
54 | | - done with a reference count in struct page. */ |
---|
55 | | - |
---|
56 | | -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
---|
57 | | -{ |
---|
58 | | - BUG_ON((unsigned long)pte & (PAGE_SIZE-1)); |
---|
59 | | - free_page((unsigned long)pte); |
---|
60 | | -} |
---|
61 | | - |
---|
62 | | -static inline void pte_free(struct mm_struct *mm, struct page *pte) |
---|
63 | | -{ |
---|
64 | | - pgtable_page_dtor(pte); |
---|
65 | | - __free_page(pte); |
---|
66 | | -} |
---|
| 54 | +extern pgtable_t pte_alloc_one(struct mm_struct *); |
---|
67 | 55 | |
---|
68 | 56 | extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte); |
---|
69 | 57 | |
---|
.. | .. |
---|
80 | 68 | set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); |
---|
81 | 69 | } |
---|
82 | 70 | |
---|
| 71 | +static inline void pmd_populate_kernel_safe(struct mm_struct *mm, |
---|
| 72 | + pmd_t *pmd, pte_t *pte) |
---|
| 73 | +{ |
---|
| 74 | + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); |
---|
| 75 | + set_pmd_safe(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); |
---|
| 76 | +} |
---|
| 77 | + |
---|
83 | 78 | static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, |
---|
84 | 79 | struct page *pte) |
---|
85 | 80 | { |
---|
.. | .. |
---|
92 | 87 | #define pmd_pgtable(pmd) pmd_page(pmd) |
---|
93 | 88 | |
---|
94 | 89 | #if CONFIG_PGTABLE_LEVELS > 2 |
---|
95 | | -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) |
---|
96 | | -{ |
---|
97 | | - struct page *page; |
---|
98 | | - gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO; |
---|
99 | | - |
---|
100 | | - if (mm == &init_mm) |
---|
101 | | - gfp &= ~__GFP_ACCOUNT; |
---|
102 | | - page = alloc_pages(gfp, 0); |
---|
103 | | - if (!page) |
---|
104 | | - return NULL; |
---|
105 | | - if (!pgtable_pmd_page_ctor(page)) { |
---|
106 | | - __free_pages(page, 0); |
---|
107 | | - return NULL; |
---|
108 | | - } |
---|
109 | | - return (pmd_t *)page_address(page); |
---|
110 | | -} |
---|
111 | | - |
---|
112 | | -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
---|
113 | | -{ |
---|
114 | | - BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); |
---|
115 | | - pgtable_pmd_page_dtor(virt_to_page(pmd)); |
---|
116 | | - free_page((unsigned long)pmd); |
---|
117 | | -} |
---|
118 | | - |
---|
119 | 90 | extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd); |
---|
120 | 91 | |
---|
121 | 92 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, |
---|
.. | .. |
---|
132 | 103 | paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); |
---|
133 | 104 | set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd))); |
---|
134 | 105 | } |
---|
| 106 | + |
---|
| 107 | +static inline void pud_populate_safe(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) |
---|
| 108 | +{ |
---|
| 109 | + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); |
---|
| 110 | + set_pud_safe(pud, __pud(_PAGE_TABLE | __pa(pmd))); |
---|
| 111 | +} |
---|
135 | 112 | #endif /* CONFIG_X86_PAE */ |
---|
136 | 113 | |
---|
137 | 114 | #if CONFIG_PGTABLE_LEVELS > 3 |
---|
.. | .. |
---|
141 | 118 | set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud))); |
---|
142 | 119 | } |
---|
143 | 120 | |
---|
144 | | -static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) |
---|
| 121 | +static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) |
---|
145 | 122 | { |
---|
146 | | - gfp_t gfp = GFP_KERNEL_ACCOUNT; |
---|
147 | | - |
---|
148 | | - if (mm == &init_mm) |
---|
149 | | - gfp &= ~__GFP_ACCOUNT; |
---|
150 | | - return (pud_t *)get_zeroed_page(gfp); |
---|
151 | | -} |
---|
152 | | - |
---|
153 | | -static inline void pud_free(struct mm_struct *mm, pud_t *pud) |
---|
154 | | -{ |
---|
155 | | - BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); |
---|
156 | | - free_page((unsigned long)pud); |
---|
| 123 | + paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT); |
---|
| 124 | + set_p4d_safe(p4d, __p4d(_PAGE_TABLE | __pa(pud))); |
---|
157 | 125 | } |
---|
158 | 126 | |
---|
159 | 127 | extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud); |
---|
.. | .. |
---|
173 | 141 | set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); |
---|
174 | 142 | } |
---|
175 | 143 | |
---|
| 144 | +static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) |
---|
| 145 | +{ |
---|
| 146 | + if (!pgtable_l5_enabled()) |
---|
| 147 | + return; |
---|
| 148 | + paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); |
---|
| 149 | + set_pgd_safe(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); |
---|
| 150 | +} |
---|
| 151 | + |
---|
176 | 152 | static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr) |
---|
177 | 153 | { |
---|
178 | 154 | gfp_t gfp = GFP_KERNEL_ACCOUNT; |
---|