.. | .. |
---|
16 | 16 | #include <asm/cachectl.h> |
---|
17 | 17 | #include <asm/fixmap.h> |
---|
18 | 18 | |
---|
19 | | -#define __ARCH_USE_5LEVEL_HACK |
---|
20 | 19 | #include <asm-generic/pgtable-nopmd.h> |
---|
21 | 20 | |
---|
22 | 21 | #ifdef CONFIG_HIGHMEM |
---|
23 | 22 | #include <asm/highmem.h> |
---|
24 | 23 | #endif |
---|
| 24 | + |
---|
| 25 | +/* |
---|
| 26 | + * Regarding 32-bit MIPS huge page support (and the tradeoff it entails): |
---|
| 27 | + * |
---|
| 28 | + * We use the same huge page sizes as 64-bit MIPS. Assuming a 4KB page size, |
---|
| 29 | + * our 2-level table layout would normally have a PGD entry cover a contiguous |
---|
| 30 | + * 4MB virtual address region (pointing to a 4KB PTE page of 1,024 32-bit pte_t |
---|
| 31 | + * pointers, each pointing to a 4KB physical page). The problem is that 4MB, |
---|
| 32 | + * spanning both halves of a TLB EntryLo0,1 pair, requires 2MB hardware page |
---|
| 33 | + * support, not one of the standard supported sizes (1MB,4MB,16MB,...). |
---|
| 34 | + * To correct for this, when huge pages are enabled, we halve the number of |
---|
| 35 | + * pointers a PTE page holds, making its last half go to waste. Correspondingly, |
---|
| 36 | + * we double the number of PGD pages. Overall, page table memory overhead |
---|
| 37 | + * increases to match 64-bit MIPS, but PTE lookups remain CPU cache-friendly. |
---|
| 38 | + * |
---|
| 39 | + * NOTE: We don't yet support huge pages if extended-addressing is enabled |
---|
| 40 | + * (i.e. EVA, XPA, 36-bit Alchemy/Netlogic). |
---|
| 41 | + */ |
---|
25 | 42 | |
---|
26 | 43 | extern int temp_tlb_entry; |
---|
27 | 44 | |
---|
.. | .. |
---|
44 | 61 | */ |
---|
45 | 62 | |
---|
46 | 63 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ |
---|
47 | | -#define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2) |
---|
| 64 | +#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT) |
---|
| 65 | +# define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2 - 1) |
---|
| 66 | +#else |
---|
| 67 | +# define PGDIR_SHIFT (2 * PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2) |
---|
| 68 | +#endif |
---|
| 69 | + |
---|
48 | 70 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
---|
49 | 71 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
---|
50 | 72 | |
---|
.. | .. |
---|
52 | 74 | * Entries per page directory level: we use two-level, so |
---|
53 | 75 | * we don't really have any PUD/PMD directory physically. |
---|
54 | 76 | */ |
---|
55 | | -#define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2) |
---|
| 77 | +#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT) |
---|
| 78 | +# define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2 + 1) |
---|
| 79 | +#else |
---|
| 80 | +# define __PGD_ORDER (32 - 3 * PAGE_SHIFT + PGD_T_LOG2 + PTE_T_LOG2) |
---|
| 81 | +#endif |
---|
| 82 | + |
---|
56 | 83 | #define PGD_ORDER (__PGD_ORDER >= 0 ? __PGD_ORDER : 0) |
---|
57 | 84 | #define PUD_ORDER aieeee_attempt_to_allocate_pud |
---|
58 | | -#define PMD_ORDER 1 |
---|
| 85 | +#define PMD_ORDER aieeee_attempt_to_allocate_pmd |
---|
59 | 86 | #define PTE_ORDER 0 |
---|
60 | 87 | |
---|
61 | 88 | #define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2) |
---|
62 | | -#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) |
---|
| 89 | +#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && !defined(CONFIG_PHYS_ADDR_T_64BIT) |
---|
| 90 | +# define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t) / 2) |
---|
| 91 | +#else |
---|
| 92 | +# define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) |
---|
| 93 | +#endif |
---|
63 | 94 | |
---|
64 | 95 | #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) |
---|
65 | 96 | #define FIRST_USER_ADDRESS 0UL |
---|
.. | .. |
---|
87 | 118 | |
---|
88 | 119 | extern void load_pgd(unsigned long pg_dir); |
---|
89 | 120 | |
---|
90 | | -extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)]; |
---|
| 121 | +extern pte_t invalid_pte_table[PTRS_PER_PTE]; |
---|
91 | 122 | |
---|
92 | 123 | /* |
---|
93 | 124 | * Empty pgd/pmd entries point to the invalid_pte_table. |
---|
.. | .. |
---|
97 | 128 | return pmd_val(pmd) == (unsigned long) invalid_pte_table; |
---|
98 | 129 | } |
---|
99 | 130 | |
---|
100 | | -#define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) |
---|
| 131 | +static inline int pmd_bad(pmd_t pmd) |
---|
| 132 | +{ |
---|
| 133 | +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT |
---|
| 134 | + /* pmd_huge(pmd) but inline */ |
---|
| 135 | + if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) |
---|
| 136 | + return 0; |
---|
| 137 | +#endif |
---|
| 138 | + |
---|
| 139 | + if (unlikely(pmd_val(pmd) & ~PAGE_MASK)) |
---|
| 140 | + return 1; |
---|
| 141 | + |
---|
| 142 | + return 0; |
---|
| 143 | +} |
---|
101 | 144 | |
---|
102 | 145 | static inline int pmd_present(pmd_t pmd) |
---|
103 | 146 | { |
---|
.. | .. |
---|
149 | 192 | #else |
---|
150 | 193 | #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT)) |
---|
151 | 194 | #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot)) |
---|
| 195 | +#define pfn_pmd(pfn, prot) __pmd(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot)) |
---|
152 | 196 | #endif |
---|
153 | 197 | #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */ |
---|
154 | 198 | |
---|
155 | 199 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
---|
156 | 200 | |
---|
157 | | -#define __pgd_offset(address) pgd_index(address) |
---|
158 | | -#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) |
---|
159 | | -#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) |
---|
160 | | - |
---|
161 | | -/* to find an entry in a kernel page-table-directory */ |
---|
162 | | -#define pgd_offset_k(address) pgd_offset(&init_mm, address) |
---|
163 | | - |
---|
164 | | -#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
---|
165 | | - |
---|
166 | | -/* to find an entry in a page-table-directory */ |
---|
167 | | -#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) |
---|
168 | | - |
---|
169 | | -/* Find an entry in the third-level page table.. */ |
---|
170 | | -#define __pte_offset(address) \ |
---|
171 | | - (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
---|
172 | | -#define pte_offset(dir, address) \ |
---|
173 | | - ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) |
---|
174 | | -#define pte_offset_kernel(dir, address) \ |
---|
175 | | - ((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address)) |
---|
176 | | - |
---|
177 | | -#define pte_offset_map(dir, address) \ |
---|
178 | | - ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address)) |
---|
179 | | -#define pte_unmap(pte) ((void)(pte)) |
---|
180 | | - |
---|
181 | | -#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) |
---|
| 201 | +#if defined(CONFIG_CPU_R3K_TLB) |
---|
182 | 202 | |
---|
183 | 203 | /* Swap entries must have VALID bit cleared. */ |
---|
184 | 204 | #define __swp_type(x) (((x).val >> 10) & 0x1f) |
---|
.. | .. |
---|
223 | 243 | |
---|
224 | 244 | #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */ |
---|
225 | 245 | |
---|
226 | | -#endif /* defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) */ |
---|
| 246 | +#endif /* defined(CONFIG_CPU_R3K_TLB) */ |
---|
227 | 247 | |
---|
228 | 248 | #endif /* _ASM_PGTABLE_32_H */ |
---|