forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-09-20 a36159eec6ca17402b0e146b86efaf76568dc353
kernel/arch/arm64/include/asm/barrier.h
....@@ -1,24 +1,15 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * Based on arch/arm/include/asm/barrier.h
34 *
45 * Copyright (C) 2012 ARM Ltd.
5
- *
6
- * This program is free software; you can redistribute it and/or modify
7
- * it under the terms of the GNU General Public License version 2 as
8
- * published by the Free Software Foundation.
9
- *
10
- * This program is distributed in the hope that it will be useful,
11
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
- * GNU General Public License for more details.
14
- *
15
- * You should have received a copy of the GNU General Public License
16
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
176 */
187 #ifndef __ASM_BARRIER_H
198 #define __ASM_BARRIER_H
209
2110 #ifndef __ASSEMBLY__
11
+
12
+#include <linux/kasan-checks.h>
2213
2314 #define __nops(n) ".rept " #n "\nnop\n.endr\n"
2415 #define nops(n) asm volatile(__nops(n))
....@@ -32,14 +23,46 @@
3223 #define dsb(opt) asm volatile("dsb " #opt : : : "memory")
3324
3425 #define psb_csync() asm volatile("hint #17" : : : "memory")
26
+#define __tsb_csync() asm volatile("hint #18" : : : "memory")
3527 #define csdb() asm volatile("hint #20" : : : "memory")
28
+
29
+#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \
30
+ SB_BARRIER_INSN"nop\n", \
31
+ ARM64_HAS_SB))
32
+
33
+#ifdef CONFIG_ARM64_PSEUDO_NMI
34
+#define pmr_sync() \
35
+ do { \
36
+ extern struct static_key_false gic_pmr_sync; \
37
+ \
38
+ if (static_branch_unlikely(&gic_pmr_sync)) \
39
+ dsb(sy); \
40
+ } while(0)
41
+#else
42
+#define pmr_sync() do {} while (0)
43
+#endif
3644
3745 #define mb() dsb(sy)
3846 #define rmb() dsb(ld)
3947 #define wmb() dsb(st)
4048
49
+#define dma_mb() dmb(osh)
4150 #define dma_rmb() dmb(oshld)
4251 #define dma_wmb() dmb(oshst)
52
+
53
+
54
+#define tsb_csync() \
55
+ do { \
56
+ /* \
57
+ * CPUs affected by Arm Erratum 2054223 or 2067961 needs \
58
+ * another TSB to ensure the trace is flushed. The barriers \
59
+ * don't have to be strictly back to back, as long as the \
60
+ * CPU is in trace prohibited state. \
61
+ */ \
62
+ if (cpus_have_final_cap(ARM64_WORKAROUND_TSB_FLUSH_FAILURE)) \
63
+ __tsb_csync(); \
64
+ __tsb_csync(); \
65
+ } while (0)
4366
4467 /*
4568 * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz
....@@ -62,37 +85,58 @@
6285 return mask;
6386 }
6487
88
+/*
89
+ * Ensure that reads of the counter are treated the same as memory reads
90
+ * for the purposes of ordering by subsequent memory barriers.
91
+ *
92
+ * This insanity brought to you by speculative system register reads,
93
+ * out-of-order memory accesses, sequence locks and Thomas Gleixner.
94
+ *
95
+ * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
96
+ */
97
+#define arch_counter_enforce_ordering(val) do { \
98
+ u64 tmp, _val = (val); \
99
+ \
100
+ asm volatile( \
101
+ " eor %0, %1, %1\n" \
102
+ " add %0, sp, %0\n" \
103
+ " ldr xzr, [%0]" \
104
+ : "=r" (tmp) : "r" (_val)); \
105
+} while (0)
106
+
65107 #define __smp_mb() dmb(ish)
66108 #define __smp_rmb() dmb(ishld)
67109 #define __smp_wmb() dmb(ishst)
68110
69111 #define __smp_store_release(p, v) \
70112 do { \
71
- union { typeof(*p) __val; char __c[1]; } __u = \
72
- { .__val = (__force typeof(*p)) (v) }; \
113
+ typeof(p) __p = (p); \
114
+ union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u = \
115
+ { .__val = (__force __unqual_scalar_typeof(*p)) (v) }; \
73116 compiletime_assert_atomic_type(*p); \
117
+ kasan_check_write(__p, sizeof(*p)); \
74118 switch (sizeof(*p)) { \
75119 case 1: \
76120 asm volatile ("stlrb %w1, %0" \
77
- : "=Q" (*p) \
121
+ : "=Q" (*__p) \
78122 : "r" (*(__u8 *)__u.__c) \
79123 : "memory"); \
80124 break; \
81125 case 2: \
82126 asm volatile ("stlrh %w1, %0" \
83
- : "=Q" (*p) \
127
+ : "=Q" (*__p) \
84128 : "r" (*(__u16 *)__u.__c) \
85129 : "memory"); \
86130 break; \
87131 case 4: \
88132 asm volatile ("stlr %w1, %0" \
89
- : "=Q" (*p) \
133
+ : "=Q" (*__p) \
90134 : "r" (*(__u32 *)__u.__c) \
91135 : "memory"); \
92136 break; \
93137 case 8: \
94138 asm volatile ("stlr %1, %0" \
95
- : "=Q" (*p) \
139
+ : "=Q" (*__p) \
96140 : "r" (*(__u64 *)__u.__c) \
97141 : "memory"); \
98142 break; \
....@@ -101,57 +145,59 @@
101145
102146 #define __smp_load_acquire(p) \
103147 ({ \
104
- union { typeof(*p) __val; char __c[1]; } __u; \
148
+ union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u; \
149
+ typeof(p) __p = (p); \
105150 compiletime_assert_atomic_type(*p); \
151
+ kasan_check_read(__p, sizeof(*p)); \
106152 switch (sizeof(*p)) { \
107153 case 1: \
108154 asm volatile ("ldarb %w0, %1" \
109155 : "=r" (*(__u8 *)__u.__c) \
110
- : "Q" (*p) : "memory"); \
156
+ : "Q" (*__p) : "memory"); \
111157 break; \
112158 case 2: \
113159 asm volatile ("ldarh %w0, %1" \
114160 : "=r" (*(__u16 *)__u.__c) \
115
- : "Q" (*p) : "memory"); \
161
+ : "Q" (*__p) : "memory"); \
116162 break; \
117163 case 4: \
118164 asm volatile ("ldar %w0, %1" \
119165 : "=r" (*(__u32 *)__u.__c) \
120
- : "Q" (*p) : "memory"); \
166
+ : "Q" (*__p) : "memory"); \
121167 break; \
122168 case 8: \
123169 asm volatile ("ldar %0, %1" \
124170 : "=r" (*(__u64 *)__u.__c) \
125
- : "Q" (*p) : "memory"); \
171
+ : "Q" (*__p) : "memory"); \
126172 break; \
127173 } \
128
- __u.__val; \
174
+ (typeof(*p))__u.__val; \
129175 })
130176
131177 #define smp_cond_load_relaxed(ptr, cond_expr) \
132178 ({ \
133179 typeof(ptr) __PTR = (ptr); \
134
- typeof(*ptr) VAL; \
180
+ __unqual_scalar_typeof(*ptr) VAL; \
135181 for (;;) { \
136182 VAL = READ_ONCE(*__PTR); \
137183 if (cond_expr) \
138184 break; \
139185 __cmpwait_relaxed(__PTR, VAL); \
140186 } \
141
- VAL; \
187
+ (typeof(*ptr))VAL; \
142188 })
143189
144190 #define smp_cond_load_acquire(ptr, cond_expr) \
145191 ({ \
146192 typeof(ptr) __PTR = (ptr); \
147
- typeof(*ptr) VAL; \
193
+ __unqual_scalar_typeof(*ptr) VAL; \
148194 for (;;) { \
149195 VAL = smp_load_acquire(__PTR); \
150196 if (cond_expr) \
151197 break; \
152198 __cmpwait_relaxed(__PTR, VAL); \
153199 } \
154
- VAL; \
200
+ (typeof(*ptr))VAL; \
155201 })
156202
157203 #include <asm-generic/barrier.h>