.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ |
---|
1 | 2 | /* |
---|
2 | | - * This program is free software; you can redistribute it and/or |
---|
3 | | - * modify it under the terms of the GNU General Public License |
---|
4 | | - * as published by the Free Software Foundation; either version |
---|
5 | | - * 2 of the License, or (at your option) any later version. |
---|
6 | 3 | */ |
---|
7 | 4 | #ifndef _ASM_POWERPC_CACHEFLUSH_H |
---|
8 | 5 | #define _ASM_POWERPC_CACHEFLUSH_H |
---|
9 | 6 | |
---|
10 | | -#ifdef __KERNEL__ |
---|
11 | | - |
---|
12 | 7 | #include <linux/mm.h> |
---|
13 | 8 | #include <asm/cputable.h> |
---|
14 | | - |
---|
15 | | -/* |
---|
16 | | - * No cache flushing is required when address mappings are changed, |
---|
17 | | - * because the caches on PowerPCs are physically addressed. |
---|
18 | | - */ |
---|
19 | | -#define flush_cache_all() do { } while (0) |
---|
20 | | -#define flush_cache_mm(mm) do { } while (0) |
---|
21 | | -#define flush_cache_dup_mm(mm) do { } while (0) |
---|
22 | | -#define flush_cache_range(vma, start, end) do { } while (0) |
---|
23 | | -#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) |
---|
24 | | -#define flush_icache_page(vma, page) do { } while (0) |
---|
25 | | -#define flush_cache_vunmap(start, end) do { } while (0) |
---|
| 9 | +#include <asm/cpu_has_feature.h> |
---|
26 | 10 | |
---|
27 | 11 | #ifdef CONFIG_PPC_BOOK3S_64 |
---|
28 | 12 | /* |
---|
.. | .. |
---|
36 | 20 | { |
---|
37 | 21 | asm volatile("ptesync" ::: "memory"); |
---|
38 | 22 | } |
---|
39 | | -#else |
---|
40 | | -static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } |
---|
41 | | -#endif |
---|
| 23 | +#define flush_cache_vmap flush_cache_vmap |
---|
| 24 | +#endif /* CONFIG_PPC_BOOK3S_64 */ |
---|
42 | 25 | |
---|
43 | 26 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 |
---|
44 | 27 | extern void flush_dcache_page(struct page *page); |
---|
45 | | -#define flush_dcache_mmap_lock(mapping) do { } while (0) |
---|
46 | | -#define flush_dcache_mmap_unlock(mapping) do { } while (0) |
---|
47 | 28 | |
---|
48 | | -extern void flush_icache_range(unsigned long, unsigned long); |
---|
49 | | -extern void flush_icache_user_range(struct vm_area_struct *vma, |
---|
50 | | - struct page *page, unsigned long addr, |
---|
51 | | - int len); |
---|
52 | | -extern void __flush_dcache_icache(void *page_va); |
---|
53 | | -extern void flush_dcache_icache_page(struct page *page); |
---|
54 | | -#if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE) |
---|
55 | | -extern void __flush_dcache_icache_phys(unsigned long physaddr); |
---|
56 | | -#else |
---|
57 | | -static inline void __flush_dcache_icache_phys(unsigned long physaddr) |
---|
58 | | -{ |
---|
59 | | - BUG(); |
---|
60 | | -} |
---|
61 | | -#endif |
---|
| 29 | +void flush_icache_range(unsigned long start, unsigned long stop); |
---|
| 30 | +#define flush_icache_range flush_icache_range |
---|
62 | 31 | |
---|
63 | | -#ifdef CONFIG_PPC32 |
---|
64 | | -/* |
---|
65 | | - * Write any modified data cache blocks out to memory and invalidate them. |
---|
66 | | - * Does not invalidate the corresponding instruction cache blocks. |
---|
| 32 | +void flush_icache_user_page(struct vm_area_struct *vma, struct page *page, |
---|
| 33 | + unsigned long addr, int len); |
---|
| 34 | +#define flush_icache_user_page flush_icache_user_page |
---|
| 35 | + |
---|
| 36 | +void flush_dcache_icache_page(struct page *page); |
---|
| 37 | +void __flush_dcache_icache(void *page); |
---|
| 38 | + |
---|
| 39 | +/** |
---|
| 40 | + * flush_dcache_range(): Write any modified data cache blocks out to memory and |
---|
| 41 | + * invalidate them. Does not invalidate the corresponding instruction cache |
---|
| 42 | + * blocks. |
---|
| 43 | + * |
---|
| 44 | + * @start: the start address |
---|
| 45 | + * @stop: the stop address (exclusive) |
---|
67 | 46 | */ |
---|
68 | 47 | static inline void flush_dcache_range(unsigned long start, unsigned long stop) |
---|
69 | 48 | { |
---|
70 | | - void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1)); |
---|
71 | | - unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1); |
---|
| 49 | + unsigned long shift = l1_dcache_shift(); |
---|
| 50 | + unsigned long bytes = l1_dcache_bytes(); |
---|
| 51 | + void *addr = (void *)(start & ~(bytes - 1)); |
---|
| 52 | + unsigned long size = stop - (unsigned long)addr + (bytes - 1); |
---|
72 | 53 | unsigned long i; |
---|
73 | 54 | |
---|
74 | | - for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES) |
---|
| 55 | + if (IS_ENABLED(CONFIG_PPC64)) |
---|
| 56 | + mb(); /* sync */ |
---|
| 57 | + |
---|
| 58 | + for (i = 0; i < size >> shift; i++, addr += bytes) |
---|
75 | 59 | dcbf(addr); |
---|
76 | 60 | mb(); /* sync */ |
---|
| 61 | + |
---|
77 | 62 | } |
---|
78 | 63 | |
---|
79 | 64 | /* |
---|
.. | .. |
---|
83 | 68 | */ |
---|
84 | 69 | static inline void clean_dcache_range(unsigned long start, unsigned long stop) |
---|
85 | 70 | { |
---|
86 | | - void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1)); |
---|
87 | | - unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1); |
---|
| 71 | + unsigned long shift = l1_dcache_shift(); |
---|
| 72 | + unsigned long bytes = l1_dcache_bytes(); |
---|
| 73 | + void *addr = (void *)(start & ~(bytes - 1)); |
---|
| 74 | + unsigned long size = stop - (unsigned long)addr + (bytes - 1); |
---|
88 | 75 | unsigned long i; |
---|
89 | 76 | |
---|
90 | | - for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES) |
---|
| 77 | + for (i = 0; i < size >> shift; i++, addr += bytes) |
---|
91 | 78 | dcbst(addr); |
---|
92 | 79 | mb(); /* sync */ |
---|
93 | 80 | } |
---|
.. | .. |
---|
100 | 87 | static inline void invalidate_dcache_range(unsigned long start, |
---|
101 | 88 | unsigned long stop) |
---|
102 | 89 | { |
---|
103 | | - void *addr = (void *)(start & ~(L1_CACHE_BYTES - 1)); |
---|
104 | | - unsigned long size = stop - (unsigned long)addr + (L1_CACHE_BYTES - 1); |
---|
| 90 | + unsigned long shift = l1_dcache_shift(); |
---|
| 91 | + unsigned long bytes = l1_dcache_bytes(); |
---|
| 92 | + void *addr = (void *)(start & ~(bytes - 1)); |
---|
| 93 | + unsigned long size = stop - (unsigned long)addr + (bytes - 1); |
---|
105 | 94 | unsigned long i; |
---|
106 | 95 | |
---|
107 | | - for (i = 0; i < size >> L1_CACHE_SHIFT; i++, addr += L1_CACHE_BYTES) |
---|
| 96 | + for (i = 0; i < size >> shift; i++, addr += bytes) |
---|
108 | 97 | dcbi(addr); |
---|
109 | 98 | mb(); /* sync */ |
---|
110 | 99 | } |
---|
111 | 100 | |
---|
112 | | -#endif /* CONFIG_PPC32 */ |
---|
113 | | -#ifdef CONFIG_PPC64 |
---|
114 | | -extern void flush_dcache_range(unsigned long start, unsigned long stop); |
---|
115 | | -extern void flush_inval_dcache_range(unsigned long start, unsigned long stop); |
---|
| 101 | +#ifdef CONFIG_4xx |
---|
| 102 | +static inline void flush_instruction_cache(void) |
---|
| 103 | +{ |
---|
| 104 | + iccci((void *)KERNELBASE); |
---|
| 105 | + isync(); |
---|
| 106 | +} |
---|
| 107 | +#else |
---|
| 108 | +void flush_instruction_cache(void); |
---|
116 | 109 | #endif |
---|
117 | 110 | |
---|
118 | | -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ |
---|
119 | | - do { \ |
---|
120 | | - memcpy(dst, src, len); \ |
---|
121 | | - flush_icache_user_range(vma, page, vaddr, len); \ |
---|
122 | | - } while (0) |
---|
123 | | -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
---|
124 | | - memcpy(dst, src, len) |
---|
125 | | - |
---|
126 | | -#endif /* __KERNEL__ */ |
---|
| 111 | +#include <asm-generic/cacheflush.h> |
---|
127 | 112 | |
---|
128 | 113 | #endif /* _ASM_POWERPC_CACHEFLUSH_H */ |
---|