.. | .. |
---|
35 | 35 | #define HPAGE_SHIFT 22 |
---|
36 | 36 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB) |
---|
37 | 37 | #define HPAGE_SHIFT 26 |
---|
38 | | -#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB) |
---|
39 | | -#define HPAGE_SHIFT 29 |
---|
40 | 38 | #endif |
---|
41 | 39 | |
---|
42 | 40 | #ifdef CONFIG_HUGETLB_PAGE |
---|
.. | .. |
---|
82 | 80 | ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) |
---|
83 | 81 | #define __pte(x) \ |
---|
84 | 82 | ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) |
---|
85 | | -#elif defined(CONFIG_SUPERH32) |
---|
| 83 | +#else |
---|
86 | 84 | typedef struct { unsigned long pte_low; } pte_t; |
---|
87 | 85 | typedef struct { unsigned long pgprot; } pgprot_t; |
---|
88 | | -typedef struct { unsigned long pgd; } pgd_t; |
---|
89 | | -#define pte_val(x) ((x).pte_low) |
---|
90 | | -#define __pte(x) ((pte_t) { (x) } ) |
---|
91 | | -#else |
---|
92 | | -typedef struct { unsigned long long pte_low; } pte_t; |
---|
93 | | -typedef struct { unsigned long long pgprot; } pgprot_t; |
---|
94 | 86 | typedef struct { unsigned long pgd; } pgd_t; |
---|
95 | 87 | #define pte_val(x) ((x).pte_low) |
---|
96 | 88 | #define __pte(x) ((pte_t) { (x) } ) |
---|
.. | .. |
---|
182 | 174 | #endif |
---|
183 | 175 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
---|
184 | 176 | |
---|
185 | | -#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ |
---|
186 | | - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
---|
187 | | - |
---|
188 | 177 | #include <asm-generic/memory_model.h> |
---|
189 | 178 | #include <asm-generic/getorder.h> |
---|
190 | 179 | |
---|
.. | .. |
---|
193 | 182 | * and so we have to increase the kmalloc minalign for this. |
---|
194 | 183 | */ |
---|
195 | 184 | #define ARCH_DMA_MINALIGN L1_CACHE_BYTES |
---|
196 | | - |
---|
197 | | -#ifdef CONFIG_SUPERH64 |
---|
198 | | -/* |
---|
199 | | - * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still |
---|
200 | | - * happily generate {ld/st}.q pairs, requiring us to have 8-byte |
---|
201 | | - * alignment to avoid traps. The kmalloc alignment is guaranteed by |
---|
202 | | - * virtue of L1_CACHE_BYTES, requiring this to only be special cased |
---|
203 | | - * for slab caches. |
---|
204 | | - */ |
---|
205 | | -#define ARCH_SLAB_MINALIGN 8 |
---|
206 | | -#endif |
---|
207 | 185 | |
---|
208 | 186 | #endif /* __ASM_SH_PAGE_H */ |
---|