hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/include/asm-generic/cacheflush.h
....@@ -1,35 +1,118 @@
11 /* SPDX-License-Identifier: GPL-2.0 */
2
-#ifndef __ASM_CACHEFLUSH_H
3
-#define __ASM_CACHEFLUSH_H
2
+#ifndef _ASM_GENERIC_CACHEFLUSH_H
3
+#define _ASM_GENERIC_CACHEFLUSH_H
44
5
-/* Keep includes the same across arches. */
6
-#include <linux/mm.h>
5
+struct mm_struct;
6
+struct vm_area_struct;
7
+struct page;
8
+struct address_space;
79
810 /*
911 * The cache doesn't need to be flushed when TLB entries change when
1012 * the cache is mapped to physical memory, not virtual memory
1113 */
12
-#define flush_cache_all() do { } while (0)
13
-#define flush_cache_mm(mm) do { } while (0)
14
-#define flush_cache_dup_mm(mm) do { } while (0)
15
-#define flush_cache_range(vma, start, end) do { } while (0)
16
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
17
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
18
-#define flush_dcache_page(page) do { } while (0)
19
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
20
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
21
-#define flush_icache_range(start, end) do { } while (0)
22
-#define flush_icache_page(vma,pg) do { } while (0)
23
-#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
24
-#define flush_cache_vmap(start, end) do { } while (0)
25
-#define flush_cache_vunmap(start, end) do { } while (0)
14
+#ifndef flush_cache_all
15
+static inline void flush_cache_all(void)
16
+{
17
+}
18
+#endif
2619
27
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
20
+#ifndef flush_cache_mm
21
+static inline void flush_cache_mm(struct mm_struct *mm)
22
+{
23
+}
24
+#endif
25
+
26
+#ifndef flush_cache_dup_mm
27
+static inline void flush_cache_dup_mm(struct mm_struct *mm)
28
+{
29
+}
30
+#endif
31
+
32
+#ifndef flush_cache_range
33
+static inline void flush_cache_range(struct vm_area_struct *vma,
34
+ unsigned long start,
35
+ unsigned long end)
36
+{
37
+}
38
+#endif
39
+
40
+#ifndef flush_cache_page
41
+static inline void flush_cache_page(struct vm_area_struct *vma,
42
+ unsigned long vmaddr,
43
+ unsigned long pfn)
44
+{
45
+}
46
+#endif
47
+
48
+#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
49
+static inline void flush_dcache_page(struct page *page)
50
+{
51
+}
52
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
53
+#endif
54
+
55
+
56
+#ifndef flush_dcache_mmap_lock
57
+static inline void flush_dcache_mmap_lock(struct address_space *mapping)
58
+{
59
+}
60
+#endif
61
+
62
+#ifndef flush_dcache_mmap_unlock
63
+static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
64
+{
65
+}
66
+#endif
67
+
68
+#ifndef flush_icache_range
69
+static inline void flush_icache_range(unsigned long start, unsigned long end)
70
+{
71
+}
72
+#endif
73
+
74
+#ifndef flush_icache_user_range
75
+#define flush_icache_user_range flush_icache_range
76
+#endif
77
+
78
+#ifndef flush_icache_page
79
+static inline void flush_icache_page(struct vm_area_struct *vma,
80
+ struct page *page)
81
+{
82
+}
83
+#endif
84
+
85
+#ifndef flush_icache_user_page
86
+static inline void flush_icache_user_page(struct vm_area_struct *vma,
87
+ struct page *page,
88
+ unsigned long addr, int len)
89
+{
90
+}
91
+#endif
92
+
93
+#ifndef flush_cache_vmap
94
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
95
+{
96
+}
97
+#endif
98
+
99
+#ifndef flush_cache_vunmap
100
+static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
101
+{
102
+}
103
+#endif
104
+
105
+#ifndef copy_to_user_page
106
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
28107 do { \
29108 memcpy(dst, src, len); \
30
- flush_icache_user_range(vma, page, vaddr, len); \
109
+ flush_icache_user_page(vma, page, vaddr, len); \
31110 } while (0)
111
+#endif
112
+
113
+#ifndef copy_from_user_page
32114 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
33115 memcpy(dst, src, len)
116
+#endif
34117
35
-#endif /* __ASM_CACHEFLUSH_H */
118
+#endif /* _ASM_GENERIC_CACHEFLUSH_H */