.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | /* |
---|
2 | 3 | * Based on arch/arm/include/asm/barrier.h |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 2012 ARM Ltd. |
---|
5 | | - * |
---|
6 | | - * This program is free software; you can redistribute it and/or modify |
---|
7 | | - * it under the terms of the GNU General Public License version 2 as |
---|
8 | | - * published by the Free Software Foundation. |
---|
9 | | - * |
---|
10 | | - * This program is distributed in the hope that it will be useful, |
---|
11 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
12 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
13 | | - * GNU General Public License for more details. |
---|
14 | | - * |
---|
15 | | - * You should have received a copy of the GNU General Public License |
---|
16 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
17 | 6 | */ |
---|
18 | 7 | #ifndef __ASM_BARRIER_H |
---|
19 | 8 | #define __ASM_BARRIER_H |
---|
20 | 9 | |
---|
21 | 10 | #ifndef __ASSEMBLY__ |
---|
| 11 | + |
---|
| 12 | +#include <linux/kasan-checks.h> |
---|
22 | 13 | |
---|
23 | 14 | #define __nops(n) ".rept " #n "\nnop\n.endr\n" |
---|
24 | 15 | #define nops(n) asm volatile(__nops(n)) |
---|
.. | .. |
---|
32 | 23 | #define dsb(opt) asm volatile("dsb " #opt : : : "memory") |
---|
33 | 24 | |
---|
34 | 25 | #define psb_csync() asm volatile("hint #17" : : : "memory") |
---|
| 26 | +#define __tsb_csync() asm volatile("hint #18" : : : "memory") |
---|
35 | 27 | #define csdb() asm volatile("hint #20" : : : "memory") |
---|
| 28 | + |
---|
| 29 | +#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \ |
---|
| 30 | + SB_BARRIER_INSN"nop\n", \ |
---|
| 31 | + ARM64_HAS_SB)) |
---|
| 32 | + |
---|
| 33 | +#ifdef CONFIG_ARM64_PSEUDO_NMI |
---|
| 34 | +#define pmr_sync() \ |
---|
| 35 | + do { \ |
---|
| 36 | + extern struct static_key_false gic_pmr_sync; \ |
---|
| 37 | + \ |
---|
| 38 | + if (static_branch_unlikely(&gic_pmr_sync)) \ |
---|
| 39 | + dsb(sy); \ |
---|
| 40 | + } while(0) |
---|
| 41 | +#else |
---|
| 42 | +#define pmr_sync() do {} while (0) |
---|
| 43 | +#endif |
---|
36 | 44 | |
---|
37 | 45 | #define mb() dsb(sy) |
---|
38 | 46 | #define rmb() dsb(ld) |
---|
39 | 47 | #define wmb() dsb(st) |
---|
40 | 48 | |
---|
| 49 | +#define dma_mb() dmb(osh) |
---|
41 | 50 | #define dma_rmb() dmb(oshld) |
---|
42 | 51 | #define dma_wmb() dmb(oshst) |
---|
| 52 | + |
---|
| 53 | + |
---|
| 54 | +#define tsb_csync() \ |
---|
| 55 | + do { \ |
---|
| 56 | + /* \ |
---|
| 57 | + * CPUs affected by Arm Erratum 2054223 or 2067961 needs \ |
---|
| 58 | + * another TSB to ensure the trace is flushed. The barriers \ |
---|
| 59 | + * don't have to be strictly back to back, as long as the \ |
---|
| 60 | + * CPU is in trace prohibited state. \ |
---|
| 61 | + */ \ |
---|
| 62 | + if (cpus_have_final_cap(ARM64_WORKAROUND_TSB_FLUSH_FAILURE)) \ |
---|
| 63 | + __tsb_csync(); \ |
---|
| 64 | + __tsb_csync(); \ |
---|
| 65 | + } while (0) |
---|
43 | 66 | |
---|
44 | 67 | /* |
---|
45 | 68 | * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz |
---|
.. | .. |
---|
62 | 85 | return mask; |
---|
63 | 86 | } |
---|
64 | 87 | |
---|
| 88 | +/* |
---|
| 89 | + * Ensure that reads of the counter are treated the same as memory reads |
---|
| 90 | + * for the purposes of ordering by subsequent memory barriers. |
---|
| 91 | + * |
---|
| 92 | + * This insanity brought to you by speculative system register reads, |
---|
| 93 | + * out-of-order memory accesses, sequence locks and Thomas Gleixner. |
---|
| 94 | + * |
---|
| 95 | + * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html |
---|
| 96 | + */ |
---|
| 97 | +#define arch_counter_enforce_ordering(val) do { \ |
---|
| 98 | + u64 tmp, _val = (val); \ |
---|
| 99 | + \ |
---|
| 100 | + asm volatile( \ |
---|
| 101 | + " eor %0, %1, %1\n" \ |
---|
| 102 | + " add %0, sp, %0\n" \ |
---|
| 103 | + " ldr xzr, [%0]" \ |
---|
| 104 | + : "=r" (tmp) : "r" (_val)); \ |
---|
| 105 | +} while (0) |
---|
| 106 | + |
---|
65 | 107 | #define __smp_mb() dmb(ish) |
---|
66 | 108 | #define __smp_rmb() dmb(ishld) |
---|
67 | 109 | #define __smp_wmb() dmb(ishst) |
---|
68 | 110 | |
---|
69 | 111 | #define __smp_store_release(p, v) \ |
---|
70 | 112 | do { \ |
---|
71 | | - union { typeof(*p) __val; char __c[1]; } __u = \ |
---|
72 | | - { .__val = (__force typeof(*p)) (v) }; \ |
---|
| 113 | + typeof(p) __p = (p); \ |
---|
| 114 | + union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u = \ |
---|
| 115 | + { .__val = (__force __unqual_scalar_typeof(*p)) (v) }; \ |
---|
73 | 116 | compiletime_assert_atomic_type(*p); \ |
---|
| 117 | + kasan_check_write(__p, sizeof(*p)); \ |
---|
74 | 118 | switch (sizeof(*p)) { \ |
---|
75 | 119 | case 1: \ |
---|
76 | 120 | asm volatile ("stlrb %w1, %0" \ |
---|
77 | | - : "=Q" (*p) \ |
---|
| 121 | + : "=Q" (*__p) \ |
---|
78 | 122 | : "r" (*(__u8 *)__u.__c) \ |
---|
79 | 123 | : "memory"); \ |
---|
80 | 124 | break; \ |
---|
81 | 125 | case 2: \ |
---|
82 | 126 | asm volatile ("stlrh %w1, %0" \ |
---|
83 | | - : "=Q" (*p) \ |
---|
| 127 | + : "=Q" (*__p) \ |
---|
84 | 128 | : "r" (*(__u16 *)__u.__c) \ |
---|
85 | 129 | : "memory"); \ |
---|
86 | 130 | break; \ |
---|
87 | 131 | case 4: \ |
---|
88 | 132 | asm volatile ("stlr %w1, %0" \ |
---|
89 | | - : "=Q" (*p) \ |
---|
| 133 | + : "=Q" (*__p) \ |
---|
90 | 134 | : "r" (*(__u32 *)__u.__c) \ |
---|
91 | 135 | : "memory"); \ |
---|
92 | 136 | break; \ |
---|
93 | 137 | case 8: \ |
---|
94 | 138 | asm volatile ("stlr %1, %0" \ |
---|
95 | | - : "=Q" (*p) \ |
---|
| 139 | + : "=Q" (*__p) \ |
---|
96 | 140 | : "r" (*(__u64 *)__u.__c) \ |
---|
97 | 141 | : "memory"); \ |
---|
98 | 142 | break; \ |
---|
.. | .. |
---|
101 | 145 | |
---|
102 | 146 | #define __smp_load_acquire(p) \ |
---|
103 | 147 | ({ \ |
---|
104 | | - union { typeof(*p) __val; char __c[1]; } __u; \ |
---|
| 148 | + union { __unqual_scalar_typeof(*p) __val; char __c[1]; } __u; \ |
---|
| 149 | + typeof(p) __p = (p); \ |
---|
105 | 150 | compiletime_assert_atomic_type(*p); \ |
---|
| 151 | + kasan_check_read(__p, sizeof(*p)); \ |
---|
106 | 152 | switch (sizeof(*p)) { \ |
---|
107 | 153 | case 1: \ |
---|
108 | 154 | asm volatile ("ldarb %w0, %1" \ |
---|
109 | 155 | : "=r" (*(__u8 *)__u.__c) \ |
---|
110 | | - : "Q" (*p) : "memory"); \ |
---|
| 156 | + : "Q" (*__p) : "memory"); \ |
---|
111 | 157 | break; \ |
---|
112 | 158 | case 2: \ |
---|
113 | 159 | asm volatile ("ldarh %w0, %1" \ |
---|
114 | 160 | : "=r" (*(__u16 *)__u.__c) \ |
---|
115 | | - : "Q" (*p) : "memory"); \ |
---|
| 161 | + : "Q" (*__p) : "memory"); \ |
---|
116 | 162 | break; \ |
---|
117 | 163 | case 4: \ |
---|
118 | 164 | asm volatile ("ldar %w0, %1" \ |
---|
119 | 165 | : "=r" (*(__u32 *)__u.__c) \ |
---|
120 | | - : "Q" (*p) : "memory"); \ |
---|
| 166 | + : "Q" (*__p) : "memory"); \ |
---|
121 | 167 | break; \ |
---|
122 | 168 | case 8: \ |
---|
123 | 169 | asm volatile ("ldar %0, %1" \ |
---|
124 | 170 | : "=r" (*(__u64 *)__u.__c) \ |
---|
125 | | - : "Q" (*p) : "memory"); \ |
---|
| 171 | + : "Q" (*__p) : "memory"); \ |
---|
126 | 172 | break; \ |
---|
127 | 173 | } \ |
---|
128 | | - __u.__val; \ |
---|
| 174 | + (typeof(*p))__u.__val; \ |
---|
129 | 175 | }) |
---|
130 | 176 | |
---|
131 | 177 | #define smp_cond_load_relaxed(ptr, cond_expr) \ |
---|
132 | 178 | ({ \ |
---|
133 | 179 | typeof(ptr) __PTR = (ptr); \ |
---|
134 | | - typeof(*ptr) VAL; \ |
---|
| 180 | + __unqual_scalar_typeof(*ptr) VAL; \ |
---|
135 | 181 | for (;;) { \ |
---|
136 | 182 | VAL = READ_ONCE(*__PTR); \ |
---|
137 | 183 | if (cond_expr) \ |
---|
138 | 184 | break; \ |
---|
139 | 185 | __cmpwait_relaxed(__PTR, VAL); \ |
---|
140 | 186 | } \ |
---|
141 | | - VAL; \ |
---|
| 187 | + (typeof(*ptr))VAL; \ |
---|
142 | 188 | }) |
---|
143 | 189 | |
---|
144 | 190 | #define smp_cond_load_acquire(ptr, cond_expr) \ |
---|
145 | 191 | ({ \ |
---|
146 | 192 | typeof(ptr) __PTR = (ptr); \ |
---|
147 | | - typeof(*ptr) VAL; \ |
---|
| 193 | + __unqual_scalar_typeof(*ptr) VAL; \ |
---|
148 | 194 | for (;;) { \ |
---|
149 | 195 | VAL = smp_load_acquire(__PTR); \ |
---|
150 | 196 | if (cond_expr) \ |
---|
151 | 197 | break; \ |
---|
152 | 198 | __cmpwait_relaxed(__PTR, VAL); \ |
---|
153 | 199 | } \ |
---|
154 | | - VAL; \ |
---|
| 200 | + (typeof(*ptr))VAL; \ |
---|
155 | 201 | }) |
---|
156 | 202 | |
---|
157 | 203 | #include <asm-generic/barrier.h> |
---|