forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/arch/powerpc/include/asm/book3s/32/pgtable.h
....@@ -2,13 +2,104 @@
22 #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
33 #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
44
5
-#define __ARCH_USE_5LEVEL_HACK
65 #include <asm-generic/pgtable-nopmd.h>
76
87 #include <asm/book3s/32/hash.h>
98
109 /* And here we include common definitions */
11
-#include <asm/pte-common.h>
10
+
11
+#define _PAGE_KERNEL_RO 0
12
+#define _PAGE_KERNEL_ROX (_PAGE_EXEC)
13
+#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
14
+#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
15
+
16
+#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
17
+
18
+#ifndef __ASSEMBLY__
19
+
20
+static inline bool pte_user(pte_t pte)
21
+{
22
+ return pte_val(pte) & _PAGE_USER;
23
+}
24
+#endif /* __ASSEMBLY__ */
25
+
26
+/*
27
+ * Location of the PFN in the PTE. Most 32-bit platforms use the same
28
+ * as _PAGE_SHIFT here (ie, naturally aligned).
29
+ * Platform who don't just pre-define the value so we don't override it here.
30
+ */
31
+#define PTE_RPN_SHIFT (PAGE_SHIFT)
32
+
33
+/*
34
+ * The mask covered by the RPN must be a ULL on 32-bit platforms with
35
+ * 64-bit PTEs.
36
+ */
37
+#ifdef CONFIG_PTE_64BIT
38
+#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
39
+#define MAX_POSSIBLE_PHYSMEM_BITS 36
40
+#else
41
+#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
42
+#define MAX_POSSIBLE_PHYSMEM_BITS 32
43
+#endif
44
+
45
+/*
46
+ * _PAGE_CHG_MASK masks of bits that are to be preserved across
47
+ * pgprot changes.
48
+ */
49
+#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
50
+ _PAGE_ACCESSED | _PAGE_SPECIAL)
51
+
52
+/*
53
+ * We define 2 sets of base prot bits, one for basic pages (ie,
54
+ * cacheable kernel and user pages) and one for non cacheable
55
+ * pages. We always set _PAGE_COHERENT when SMP is enabled or
56
+ * the processor might need it for DMA coherency.
57
+ */
58
+#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
59
+#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
60
+
61
+/*
62
+ * Permission masks used to generate the __P and __S table.
63
+ *
64
+ * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
65
+ *
66
+ * Write permissions imply read permissions for now.
67
+ */
68
+#define PAGE_NONE __pgprot(_PAGE_BASE)
69
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
70
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
71
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
72
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
73
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
74
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
75
+
76
+/* Permission masks used for kernel mappings */
77
+#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
78
+#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
79
+#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
80
+ _PAGE_NO_CACHE | _PAGE_GUARDED)
81
+#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
82
+#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
83
+#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
84
+
85
+/*
86
+ * Protection used for kernel text. We want the debuggers to be able to
87
+ * set breakpoints anywhere, so don't write protect the kernel text
88
+ * on platforms where such control is possible.
89
+ */
90
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
91
+ defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
92
+#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
93
+#else
94
+#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
95
+#endif
96
+
97
+/* Make modules code happy. We don't set RO yet */
98
+#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
99
+
100
+/* Advertise special mapping type for AGP */
101
+#define PAGE_AGP (PAGE_KERNEL_NC)
102
+#define HAVE_PAGE_AGP
12103
13104 #define PTE_INDEX_SIZE PTE_SHIFT
14105 #define PMD_INDEX_SIZE 0
....@@ -23,6 +114,9 @@
23114 #define PMD_TABLE_SIZE 0
24115 #define PUD_TABLE_SIZE 0
25116 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
117
+
118
+/* Bits to mask out from a PMD to get to the PTE page */
119
+#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
26120 #endif /* __ASSEMBLY__ */
27121
28122 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
....@@ -44,27 +138,35 @@
44138 #define PGDIR_MASK (~(PGDIR_SIZE-1))
45139
46140 #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
141
+
142
+#ifndef __ASSEMBLY__
143
+
144
+int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
145
+void unmap_kernel_page(unsigned long va);
146
+
147
+#endif /* !__ASSEMBLY__ */
148
+
47149 /*
48150 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
49151 * value (for now) on others, from where we can start layout kernel
50152 * virtual space that goes below PKMAP and FIXMAP
51153 */
52
-#ifdef CONFIG_HIGHMEM
53
-#define KVIRT_TOP PKMAP_BASE
54
-#else
55
-#define KVIRT_TOP (0xfe000000UL) /* for now, could be FIXMAP_BASE ? */
56
-#endif
154
+#include <asm/fixmap.h>
57155
58156 /*
59157 * ioremap_bot starts at that address. Early ioremaps move down from there,
60158 * until mem_init() at which point this becomes the top of the vmalloc
61159 * and ioremap space
62160 */
63
-#ifdef CONFIG_NOT_COHERENT_CACHE
64
-#define IOREMAP_TOP ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
161
+#ifdef CONFIG_HIGHMEM
162
+#define IOREMAP_TOP PKMAP_BASE
65163 #else
66
-#define IOREMAP_TOP KVIRT_TOP
164
+#define IOREMAP_TOP FIXADDR_START
67165 #endif
166
+
167
+/* PPC32 shares vmalloc area with ioremap */
168
+#define IOREMAP_START VMALLOC_START
169
+#define IOREMAP_END VMALLOC_END
68170
69171 /*
70172 * Just any arbitrary offset to the start of the vmalloc VM area: the
....@@ -84,14 +186,23 @@
84186 * of RAM. -- Cort
85187 */
86188 #define VMALLOC_OFFSET (0x1000000) /* 16M */
189
+
87190 #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
191
+
192
+#ifdef CONFIG_KASAN_VMALLOC
193
+#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
194
+#else
88195 #define VMALLOC_END ioremap_bot
196
+#endif
197
+
198
+#ifdef CONFIG_STRICT_KERNEL_RWX
199
+#define MODULES_END ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
200
+#define MODULES_VADDR (MODULES_END - SZ_256M)
201
+#endif
89202
90203 #ifndef __ASSEMBLY__
91204 #include <linux/sched.h>
92205 #include <linux/threads.h>
93
-
94
-extern unsigned long ioremap_bot;
95206
96207 /* Bits to mask out from a PGD to get to the PUD page */
97208 #define PGD_MASKED_BITS 0
....@@ -107,7 +218,7 @@
107218 */
108219
109220 #define pte_clear(mm, addr, ptep) \
110
- do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
221
+ do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0)
111222
112223 #define pmd_none(pmd) (!pmd_val(pmd))
113224 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
....@@ -142,91 +253,69 @@
142253 * and the PTE may be either 32 or 64 bit wide. In the later case,
143254 * when using atomic updates, only the low part of the PTE is
144255 * accessed atomically.
145
- *
146
- * In addition, on 44x, we also maintain a global flag indicating
147
- * that an executable user mapping was modified, which is needed
148
- * to properly flush the virtually tagged instruction cache of
149
- * those implementations.
150256 */
151
-#ifndef CONFIG_PTE_64BIT
152
-static inline unsigned long pte_update(pte_t *p,
153
- unsigned long clr,
154
- unsigned long set)
257
+static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
258
+ unsigned long clr, unsigned long set, int huge)
155259 {
156
- unsigned long old, tmp;
157
-
158
- __asm__ __volatile__("\
159
-1: lwarx %0,0,%3\n\
160
- andc %1,%0,%4\n\
161
- or %1,%1,%5\n"
162
-" stwcx. %1,0,%3\n\
163
- bne- 1b"
164
- : "=&r" (old), "=&r" (tmp), "=m" (*p)
165
- : "r" (p), "r" (clr), "r" (set), "m" (*p)
166
- : "cc" );
167
-
168
- return old;
169
-}
170
-#else /* CONFIG_PTE_64BIT */
171
-static inline unsigned long long pte_update(pte_t *p,
172
- unsigned long clr,
173
- unsigned long set)
174
-{
175
- unsigned long long old;
260
+ pte_basic_t old;
176261 unsigned long tmp;
177262
178
- __asm__ __volatile__("\
179
-1: lwarx %L0,0,%4\n\
180
- lwzx %0,0,%3\n\
181
- andc %1,%L0,%5\n\
182
- or %1,%1,%6\n"
183
-" stwcx. %1,0,%4\n\
184
- bne- 1b"
263
+ __asm__ __volatile__(
264
+#ifndef CONFIG_PTE_64BIT
265
+"1: lwarx %0, 0, %3\n"
266
+" andc %1, %0, %4\n"
267
+#else
268
+"1: lwarx %L0, 0, %3\n"
269
+" lwz %0, -4(%3)\n"
270
+" andc %1, %L0, %4\n"
271
+#endif
272
+" or %1, %1, %5\n"
273
+" stwcx. %1, 0, %3\n"
274
+" bne- 1b"
185275 : "=&r" (old), "=&r" (tmp), "=m" (*p)
186
- : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
276
+#ifndef CONFIG_PTE_64BIT
277
+ : "r" (p),
278
+#else
279
+ : "b" ((unsigned long)(p) + 4),
280
+#endif
281
+ "r" (clr), "r" (set), "m" (*p)
187282 : "cc" );
188283
189284 return old;
190285 }
191
-#endif /* CONFIG_PTE_64BIT */
192286
193287 /*
194288 * 2.6 calls this without flushing the TLB entry; this is wrong
195289 * for our hash-based implementation, we fix that up here.
196290 */
197291 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
198
-static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
292
+static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
293
+ unsigned long addr, pte_t *ptep)
199294 {
200295 unsigned long old;
201
- old = pte_update(ptep, _PAGE_ACCESSED, 0);
296
+ old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
202297 if (old & _PAGE_HASHPTE) {
203298 unsigned long ptephys = __pa(ptep) & PAGE_MASK;
204
- flush_hash_pages(context, addr, ptephys, 1);
299
+ flush_hash_pages(mm->context.id, addr, ptephys, 1);
205300 }
206301 return (old & _PAGE_ACCESSED) != 0;
207302 }
208303 #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
209
- __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
304
+ __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
210305
211306 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
212307 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
213308 pte_t *ptep)
214309 {
215
- return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
310
+ return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
216311 }
217312
218313 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
219314 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
220315 pte_t *ptep)
221316 {
222
- pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
317
+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
223318 }
224
-static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
225
- unsigned long addr, pte_t *ptep)
226
-{
227
- ptep_set_wrprotect(mm, addr, ptep);
228
-}
229
-
230319
231320 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
232321 pte_t *ptep, pte_t entry,
....@@ -235,9 +324,8 @@
235324 {
236325 unsigned long set = pte_val(entry) &
237326 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
238
- unsigned long clr = ~pte_val(entry) & _PAGE_RO;
239327
240
- pte_update(ptep, clr, set);
328
+ pte_update(vma->vm_mm, address, ptep, 0, set, 0);
241329
242330 flush_tlb_page(vma, address);
243331 }
....@@ -245,40 +333,8 @@
245333 #define __HAVE_ARCH_PTE_SAME
246334 #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
247335
248
-/*
249
- * Note that on Book E processors, the pmd contains the kernel virtual
250
- * (lowmem) address of the pte page. The physical address is less useful
251
- * because everything runs with translation enabled (even the TLB miss
252
- * handler). On everything else the pmd contains the physical address
253
- * of the pte page. -- paulus
254
- */
255
-#ifndef CONFIG_BOOKE
256
-#define pmd_page_vaddr(pmd) \
257
- ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
258336 #define pmd_page(pmd) \
259337 pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
260
-#else
261
-#define pmd_page_vaddr(pmd) \
262
- ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
263
-#define pmd_page(pmd) \
264
- pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
265
-#endif
266
-
267
-/* to find an entry in a kernel page-table-directory */
268
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
269
-
270
-/* to find an entry in a page-table-directory */
271
-#define pgd_index(address) ((address) >> PGDIR_SHIFT)
272
-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
273
-
274
-/* Find an entry in the third-level page table.. */
275
-#define pte_index(address) \
276
- (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
277
-#define pte_offset_kernel(dir, addr) \
278
- ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
279
-#define pte_offset_map(dir, addr) \
280
- ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
281
-#define pte_unmap(pte) kunmap_atomic(pte)
282338
283339 /*
284340 * Encode and decode a swap entry.
....@@ -292,8 +348,6 @@
292348 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
293349 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
294350
295
-int map_kernel_page(unsigned long va, phys_addr_t pa, int flags);
296
-
297351 /* Generic accessors to PTE bits */
298352 static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
299353 static inline int pte_read(pte_t pte) { return 1; }
....@@ -301,11 +355,26 @@
301355 static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
302356 static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
303357 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
304
-static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
358
+static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
305359
306360 static inline int pte_present(pte_t pte)
307361 {
308362 return pte_val(pte) & _PAGE_PRESENT;
363
+}
364
+
365
+static inline bool pte_hw_valid(pte_t pte)
366
+{
367
+ return pte_val(pte) & _PAGE_PRESENT;
368
+}
369
+
370
+static inline bool pte_hashpte(pte_t pte)
371
+{
372
+ return !!(pte_val(pte) & _PAGE_HASHPTE);
373
+}
374
+
375
+static inline bool pte_ci(pte_t pte)
376
+{
377
+ return !!(pte_val(pte) & _PAGE_NO_CACHE);
309378 }
310379
311380 /*
....@@ -315,17 +384,14 @@
315384 #define pte_access_permitted pte_access_permitted
316385 static inline bool pte_access_permitted(pte_t pte, bool write)
317386 {
318
- unsigned long pteval = pte_val(pte);
319387 /*
320388 * A read-only access is controlled by _PAGE_USER bit.
321389 * We have _PAGE_READ set for WRITE and EXECUTE
322390 */
323
- unsigned long need_pte_bits = _PAGE_PRESENT | _PAGE_USER;
391
+ if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
392
+ return false;
324393
325
- if (write)
326
- need_pte_bits |= _PAGE_WRITE;
327
-
328
- if ((pteval & need_pte_bits) != need_pte_bits)
394
+ if (write && !pte_write(pte))
329395 return false;
330396
331397 return true;
....@@ -354,6 +420,11 @@
354420 return __pte(pte_val(pte) & ~_PAGE_RW);
355421 }
356422
423
+static inline pte_t pte_exprotect(pte_t pte)
424
+{
425
+ return __pte(pte_val(pte) & ~_PAGE_EXEC);
426
+}
427
+
357428 static inline pte_t pte_mkclean(pte_t pte)
358429 {
359430 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
....@@ -362,6 +433,16 @@
362433 static inline pte_t pte_mkold(pte_t pte)
363434 {
364435 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
436
+}
437
+
438
+static inline pte_t pte_mkexec(pte_t pte)
439
+{
440
+ return __pte(pte_val(pte) | _PAGE_EXEC);
441
+}
442
+
443
+static inline pte_t pte_mkpte(pte_t pte)
444
+{
445
+ return pte;
365446 }
366447
367448 static inline pte_t pte_mkwrite(pte_t pte)
....@@ -389,6 +470,16 @@
389470 return pte;
390471 }
391472
473
+static inline pte_t pte_mkprivileged(pte_t pte)
474
+{
475
+ return __pte(pte_val(pte) & ~_PAGE_USER);
476
+}
477
+
478
+static inline pte_t pte_mkuser(pte_t pte)
479
+{
480
+ return __pte(pte_val(pte) | _PAGE_USER);
481
+}
482
+
392483 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
393484 {
394485 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
....@@ -404,7 +495,7 @@
404495 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
405496 pte_t *ptep, pte_t pte, int percpu)
406497 {
407
-#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
498
+#if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
408499 /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
409500 * helper pte_update() which does an atomic update. We need to do that
410501 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
....@@ -415,9 +506,9 @@
415506 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
416507 | (pte_val(pte) & ~_PAGE_HASHPTE));
417508 else
418
- pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
509
+ pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0);
419510
420
-#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
511
+#elif defined(CONFIG_PTE_64BIT)
421512 /* Second case is 32-bit with 64-bit PTE. In this case, we
422513 * can just store as long as we do the two halves in the right order
423514 * with a barrier in between. This is possible because we take care,
....@@ -440,7 +531,7 @@
440531 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
441532 : "r" (pte) : "memory");
442533
443
-#elif defined(CONFIG_PPC_STD_MMU_32)
534
+#else
444535 /* Third case is 32-bit hash table in UP mode, we need to preserve
445536 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
446537 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
....@@ -448,9 +539,6 @@
448539 */
449540 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
450541 | (pte_val(pte) & ~_PAGE_HASHPTE));
451
-
452
-#else
453
-#error "Not supported "
454542 #endif
455543 }
456544