.. | .. |
---|
10 | 10 | |
---|
11 | 11 | #include <asm/cache.h> |
---|
12 | 12 | |
---|
13 | | -/* Allocate the top level pgd (page directory) |
---|
14 | | - * |
---|
15 | | - * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we |
---|
16 | | - * allocate the first pmd adjacent to the pgd. This means that we can |
---|
17 | | - * subtract a constant offset to get to it. The pmd and pgd sizes are |
---|
18 | | - * arranged so that a single pmd covers 4GB (giving a full 64-bit |
---|
19 | | - * process access to 8TB) so our lookups are effectively L2 for the |
---|
20 | | - * first 4GB of the kernel (i.e. for all ILP32 processes and all the |
---|
21 | | - * kernel for machines with under 4GB of memory) */ |
---|
| 13 | +#define __HAVE_ARCH_PMD_ALLOC_ONE |
---|
| 14 | +#define __HAVE_ARCH_PMD_FREE |
---|
| 15 | +#define __HAVE_ARCH_PGD_FREE |
---|
| 16 | +#include <asm-generic/pgalloc.h> |
---|
| 17 | + |
---|
| 18 | +/* Allocate the top level pgd (page directory) */ |
---|
22 | 19 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
---|
23 | 20 | { |
---|
24 | | - pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, |
---|
25 | | - PGD_ALLOC_ORDER); |
---|
26 | | - pgd_t *actual_pgd = pgd; |
---|
| 21 | + pgd_t *pgd; |
---|
27 | 22 | |
---|
28 | | - if (likely(pgd != NULL)) { |
---|
29 | | - memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); |
---|
30 | | -#if CONFIG_PGTABLE_LEVELS == 3 |
---|
31 | | - actual_pgd += PTRS_PER_PGD; |
---|
32 | | - /* Populate first pmd with allocated memory. We mark it |
---|
33 | | - * with PxD_FLAG_ATTACHED as a signal to the system that this |
---|
34 | | - * pmd entry may not be cleared. */ |
---|
35 | | - __pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT | |
---|
36 | | - PxD_FLAG_VALID | |
---|
37 | | - PxD_FLAG_ATTACHED) |
---|
38 | | - + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)); |
---|
39 | | - /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as |
---|
40 | | - * a signal that this pmd may not be freed */ |
---|
41 | | - __pgd_val_set(*pgd, PxD_FLAG_ATTACHED); |
---|
42 | | -#endif |
---|
43 | | - } |
---|
44 | | - return actual_pgd; |
---|
| 23 | + pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER); |
---|
| 24 | + if (unlikely(pgd == NULL)) |
---|
| 25 | + return NULL; |
---|
| 26 | + |
---|
| 27 | + memset(pgd, 0, PAGE_SIZE << PGD_ORDER); |
---|
| 28 | + |
---|
| 29 | + return pgd; |
---|
45 | 30 | } |
---|
46 | 31 | |
---|
47 | 32 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
---|
48 | 33 | { |
---|
49 | | -#if CONFIG_PGTABLE_LEVELS == 3 |
---|
50 | | - pgd -= PTRS_PER_PGD; |
---|
51 | | -#endif |
---|
52 | | - free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); |
---|
| 34 | + free_pages((unsigned long)pgd, PGD_ORDER); |
---|
53 | 35 | } |
---|
54 | 36 | |
---|
55 | 37 | #if CONFIG_PGTABLE_LEVELS == 3 |
---|
56 | 38 | |
---|
57 | 39 | /* Three Level Page Table Support for pmd's */ |
---|
58 | 40 | |
---|
59 | | -static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) |
---|
| 41 | +static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) |
---|
60 | 42 | { |
---|
61 | | - __pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) + |
---|
62 | | - (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)); |
---|
| 43 | + set_pud(pud, __pud((PxD_FLAG_PRESENT | PxD_FLAG_VALID) + |
---|
| 44 | + (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT))); |
---|
63 | 45 | } |
---|
64 | 46 | |
---|
65 | 47 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) |
---|
66 | 48 | { |
---|
67 | | - pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER); |
---|
68 | | - if (pmd) |
---|
69 | | - memset(pmd, 0, PAGE_SIZE<<PMD_ORDER); |
---|
| 49 | + pmd_t *pmd; |
---|
| 50 | + |
---|
| 51 | + pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_ORDER); |
---|
| 52 | + if (likely(pmd)) |
---|
| 53 | + memset ((void *)pmd, 0, PAGE_SIZE << PMD_ORDER); |
---|
70 | 54 | return pmd; |
---|
71 | 55 | } |
---|
72 | 56 | |
---|
73 | 57 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
---|
74 | 58 | { |
---|
75 | | - if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) { |
---|
76 | | - /* |
---|
77 | | - * This is the permanent pmd attached to the pgd; |
---|
78 | | - * cannot free it. |
---|
79 | | - * Increment the counter to compensate for the decrement |
---|
80 | | - * done by generic mm code. |
---|
81 | | - */ |
---|
82 | | - mm_inc_nr_pmds(mm); |
---|
83 | | - return; |
---|
84 | | - } |
---|
85 | 59 | free_pages((unsigned long)pmd, PMD_ORDER); |
---|
86 | 60 | } |
---|
87 | | - |
---|
88 | | -#else |
---|
89 | | - |
---|
90 | | -/* Two Level Page Table Support for pmd's */ |
---|
91 | | - |
---|
92 | | -/* |
---|
93 | | - * allocating and freeing a pmd is trivial: the 1-entry pmd is |
---|
94 | | - * inside the pgd, so has no extra memory associated with it. |
---|
95 | | - */ |
---|
96 | | - |
---|
97 | | -#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) |
---|
98 | | -#define pmd_free(mm, x) do { } while (0) |
---|
99 | | -#define pgd_populate(mm, pmd, pte) BUG() |
---|
100 | | - |
---|
101 | 61 | #endif |
---|
102 | 62 | |
---|
103 | 63 | static inline void |
---|
104 | 64 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) |
---|
105 | 65 | { |
---|
106 | | -#if CONFIG_PGTABLE_LEVELS == 3 |
---|
107 | | - /* preserve the gateway marker if this is the beginning of |
---|
108 | | - * the permanent pmd */ |
---|
109 | | - if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) |
---|
110 | | - __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | |
---|
111 | | - PxD_FLAG_VALID | |
---|
112 | | - PxD_FLAG_ATTACHED) |
---|
113 | | - + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)); |
---|
114 | | - else |
---|
115 | | -#endif |
---|
116 | | - __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) |
---|
117 | | - + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)); |
---|
| 66 | + set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID) |
---|
| 67 | + + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT))); |
---|
118 | 68 | } |
---|
119 | 69 | |
---|
120 | 70 | #define pmd_populate(mm, pmd, pte_page) \ |
---|
121 | 71 | pmd_populate_kernel(mm, pmd, page_address(pte_page)) |
---|
122 | 72 | #define pmd_pgtable(pmd) pmd_page(pmd) |
---|
123 | | - |
---|
124 | | -static inline pgtable_t |
---|
125 | | -pte_alloc_one(struct mm_struct *mm, unsigned long address) |
---|
126 | | -{ |
---|
127 | | - struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO); |
---|
128 | | - if (!page) |
---|
129 | | - return NULL; |
---|
130 | | - if (!pgtable_page_ctor(page)) { |
---|
131 | | - __free_page(page); |
---|
132 | | - return NULL; |
---|
133 | | - } |
---|
134 | | - return page; |
---|
135 | | -} |
---|
136 | | - |
---|
137 | | -static inline pte_t * |
---|
138 | | -pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) |
---|
139 | | -{ |
---|
140 | | - pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO); |
---|
141 | | - return pte; |
---|
142 | | -} |
---|
143 | | - |
---|
144 | | -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
---|
145 | | -{ |
---|
146 | | - free_page((unsigned long)pte); |
---|
147 | | -} |
---|
148 | | - |
---|
149 | | -static inline void pte_free(struct mm_struct *mm, struct page *pte) |
---|
150 | | -{ |
---|
151 | | - pgtable_page_dtor(pte); |
---|
152 | | - pte_free_kernel(mm, page_address(pte)); |
---|
153 | | -} |
---|
154 | | - |
---|
155 | | -#define check_pgt_cache() do { } while (0) |
---|
156 | 73 | |
---|
157 | 74 | #endif |
---|