hc
2024-10-16 50a212ec906f7524620675f0c57357691c26c81f
kernel/arch/sh/include/asm/page.h
....@@ -35,8 +35,6 @@
3535 #define HPAGE_SHIFT 22
3636 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
3737 #define HPAGE_SHIFT 26
38
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
39
-#define HPAGE_SHIFT 29
4038 #endif
4139
4240 #ifdef CONFIG_HUGETLB_PAGE
....@@ -82,15 +80,9 @@
8280 ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
8381 #define __pte(x) \
8482 ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
85
-#elif defined(CONFIG_SUPERH32)
83
+#else
8684 typedef struct { unsigned long pte_low; } pte_t;
8785 typedef struct { unsigned long pgprot; } pgprot_t;
88
-typedef struct { unsigned long pgd; } pgd_t;
89
-#define pte_val(x) ((x).pte_low)
90
-#define __pte(x) ((pte_t) { (x) } )
91
-#else
92
-typedef struct { unsigned long long pte_low; } pte_t;
93
-typedef struct { unsigned long long pgprot; } pgprot_t;
9486 typedef struct { unsigned long pgd; } pgd_t;
9587 #define pte_val(x) ((x).pte_low)
9688 #define __pte(x) ((pte_t) { (x) } )
....@@ -182,9 +174,6 @@
182174 #endif
183175 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
184176
185
-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
186
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
187
-
188177 #include <asm-generic/memory_model.h>
189178 #include <asm-generic/getorder.h>
190179
....@@ -193,16 +182,5 @@
193182 * and so we have to increase the kmalloc minalign for this.
194183 */
195184 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
196
-
197
-#ifdef CONFIG_SUPERH64
198
-/*
199
- * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still
200
- * happily generate {ld/st}.q pairs, requiring us to have 8-byte
201
- * alignment to avoid traps. The kmalloc alignment is guaranteed by
202
- * virtue of L1_CACHE_BYTES, requiring this to only be special cased
203
- * for slab caches.
204
- */
205
-#define ARCH_SLAB_MINALIGN 8
206
-#endif
207185
208186 #endif /* __ASM_SH_PAGE_H */