.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2013 ARM Ltd. |
---|
3 | | - * |
---|
4 | | - * This program is free software; you can redistribute it and/or modify |
---|
5 | | - * it under the terms of the GNU General Public License version 2 as |
---|
6 | | - * published by the Free Software Foundation. |
---|
7 | | - * |
---|
8 | | - * This program is distributed in the hope that it will be useful, |
---|
9 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
10 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
11 | | - * GNU General Public License for more details. |
---|
12 | | - * |
---|
13 | | - * You should have received a copy of the GNU General Public License |
---|
14 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
15 | 4 | */ |
---|
16 | 5 | #ifndef __ASM_PERCPU_H |
---|
17 | 6 | #define __ASM_PERCPU_H |
---|
.. | .. |
---|
21 | 10 | #include <asm/alternative.h> |
---|
22 | 11 | #include <asm/cmpxchg.h> |
---|
23 | 12 | #include <asm/stack_pointer.h> |
---|
| 13 | +#include <asm/sysreg.h> |
---|
24 | 14 | |
---|
25 | 15 | static inline void set_my_cpu_offset(unsigned long off) |
---|
26 | 16 | { |
---|
.. | .. |
---|
30 | 20 | :: "r" (off) : "memory"); |
---|
31 | 21 | } |
---|
32 | 22 | |
---|
33 | | -static inline unsigned long __my_cpu_offset(void) |
---|
| 23 | +static inline unsigned long __hyp_my_cpu_offset(void) |
---|
| 24 | +{ |
---|
| 25 | + /* |
---|
| 26 | + * Non-VHE hyp code runs with preemption disabled. No need to hazard |
---|
| 27 | + * the register access against barrier() as in __kern_my_cpu_offset. |
---|
| 28 | + */ |
---|
| 29 | + return read_sysreg(tpidr_el2); |
---|
| 30 | +} |
---|
| 31 | + |
---|
| 32 | +static inline unsigned long __kern_my_cpu_offset(void) |
---|
34 | 33 | { |
---|
35 | 34 | unsigned long off; |
---|
36 | 35 | |
---|
.. | .. |
---|
46 | 45 | |
---|
47 | 46 | return off; |
---|
48 | 47 | } |
---|
49 | | -#define __my_cpu_offset __my_cpu_offset() |
---|
50 | 48 | |
---|
51 | | -#define PERCPU_OP(op, asm_op) \ |
---|
52 | | -static inline unsigned long __percpu_##op(void *ptr, \ |
---|
53 | | - unsigned long val, int size) \ |
---|
| 49 | +#ifdef __KVM_NVHE_HYPERVISOR__ |
---|
| 50 | +#define __my_cpu_offset __hyp_my_cpu_offset() |
---|
| 51 | +#else |
---|
| 52 | +#define __my_cpu_offset __kern_my_cpu_offset() |
---|
| 53 | +#endif |
---|
| 54 | + |
---|
| 55 | +#define PERCPU_RW_OPS(sz) \ |
---|
| 56 | +static inline unsigned long __percpu_read_##sz(void *ptr) \ |
---|
54 | 57 | { \ |
---|
55 | | - unsigned long loop, ret; \ |
---|
| 58 | + return READ_ONCE(*(u##sz *)ptr); \ |
---|
| 59 | +} \ |
---|
56 | 60 | \ |
---|
57 | | - switch (size) { \ |
---|
58 | | - case 1: \ |
---|
59 | | - asm ("//__per_cpu_" #op "_1\n" \ |
---|
60 | | - "1: ldxrb %w[ret], %[ptr]\n" \ |
---|
61 | | - #asm_op " %w[ret], %w[ret], %w[val]\n" \ |
---|
62 | | - " stxrb %w[loop], %w[ret], %[ptr]\n" \ |
---|
63 | | - " cbnz %w[loop], 1b" \ |
---|
64 | | - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ |
---|
65 | | - [ptr] "+Q"(*(u8 *)ptr) \ |
---|
66 | | - : [val] "Ir" (val)); \ |
---|
67 | | - break; \ |
---|
68 | | - case 2: \ |
---|
69 | | - asm ("//__per_cpu_" #op "_2\n" \ |
---|
70 | | - "1: ldxrh %w[ret], %[ptr]\n" \ |
---|
71 | | - #asm_op " %w[ret], %w[ret], %w[val]\n" \ |
---|
72 | | - " stxrh %w[loop], %w[ret], %[ptr]\n" \ |
---|
73 | | - " cbnz %w[loop], 1b" \ |
---|
74 | | - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ |
---|
75 | | - [ptr] "+Q"(*(u16 *)ptr) \ |
---|
76 | | - : [val] "Ir" (val)); \ |
---|
77 | | - break; \ |
---|
78 | | - case 4: \ |
---|
79 | | - asm ("//__per_cpu_" #op "_4\n" \ |
---|
80 | | - "1: ldxr %w[ret], %[ptr]\n" \ |
---|
81 | | - #asm_op " %w[ret], %w[ret], %w[val]\n" \ |
---|
82 | | - " stxr %w[loop], %w[ret], %[ptr]\n" \ |
---|
83 | | - " cbnz %w[loop], 1b" \ |
---|
84 | | - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ |
---|
85 | | - [ptr] "+Q"(*(u32 *)ptr) \ |
---|
86 | | - : [val] "Ir" (val)); \ |
---|
87 | | - break; \ |
---|
88 | | - case 8: \ |
---|
89 | | - asm ("//__per_cpu_" #op "_8\n" \ |
---|
90 | | - "1: ldxr %[ret], %[ptr]\n" \ |
---|
91 | | - #asm_op " %[ret], %[ret], %[val]\n" \ |
---|
92 | | - " stxr %w[loop], %[ret], %[ptr]\n" \ |
---|
93 | | - " cbnz %w[loop], 1b" \ |
---|
94 | | - : [loop] "=&r" (loop), [ret] "=&r" (ret), \ |
---|
95 | | - [ptr] "+Q"(*(u64 *)ptr) \ |
---|
96 | | - : [val] "Ir" (val)); \ |
---|
97 | | - break; \ |
---|
98 | | - default: \ |
---|
99 | | - ret = 0; \ |
---|
100 | | - BUILD_BUG(); \ |
---|
101 | | - } \ |
---|
| 61 | +static inline void __percpu_write_##sz(void *ptr, unsigned long val) \ |
---|
| 62 | +{ \ |
---|
| 63 | + WRITE_ONCE(*(u##sz *)ptr, (u##sz)val); \ |
---|
| 64 | +} |
---|
| 65 | + |
---|
| 66 | +#define __PERCPU_OP_CASE(w, sfx, name, sz, op_llsc, op_lse) \ |
---|
| 67 | +static inline void \ |
---|
| 68 | +__percpu_##name##_case_##sz(void *ptr, unsigned long val) \ |
---|
| 69 | +{ \ |
---|
| 70 | + unsigned int loop; \ |
---|
| 71 | + u##sz tmp; \ |
---|
| 72 | + \ |
---|
| 73 | + asm volatile (ARM64_LSE_ATOMIC_INSN( \ |
---|
| 74 | + /* LL/SC */ \ |
---|
| 75 | + "1: ldxr" #sfx "\t%" #w "[tmp], %[ptr]\n" \ |
---|
| 76 | + #op_llsc "\t%" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \ |
---|
| 77 | + " stxr" #sfx "\t%w[loop], %" #w "[tmp], %[ptr]\n" \ |
---|
| 78 | + " cbnz %w[loop], 1b", \ |
---|
| 79 | + /* LSE atomics */ \ |
---|
| 80 | + #op_lse "\t%" #w "[val], %[ptr]\n" \ |
---|
| 81 | + __nops(3)) \ |
---|
| 82 | + : [loop] "=&r" (loop), [tmp] "=&r" (tmp), \ |
---|
| 83 | + [ptr] "+Q"(*(u##sz *)ptr) \ |
---|
| 84 | + : [val] "r" ((u##sz)(val))); \ |
---|
| 85 | +} |
---|
| 86 | + |
---|
| 87 | +#define __PERCPU_RET_OP_CASE(w, sfx, name, sz, op_llsc, op_lse) \ |
---|
| 88 | +static inline u##sz \ |
---|
| 89 | +__percpu_##name##_return_case_##sz(void *ptr, unsigned long val) \ |
---|
| 90 | +{ \ |
---|
| 91 | + unsigned int loop; \ |
---|
| 92 | + u##sz ret; \ |
---|
| 93 | + \ |
---|
| 94 | + asm volatile (ARM64_LSE_ATOMIC_INSN( \ |
---|
| 95 | + /* LL/SC */ \ |
---|
| 96 | + "1: ldxr" #sfx "\t%" #w "[ret], %[ptr]\n" \ |
---|
| 97 | + #op_llsc "\t%" #w "[ret], %" #w "[ret], %" #w "[val]\n" \ |
---|
| 98 | + " stxr" #sfx "\t%w[loop], %" #w "[ret], %[ptr]\n" \ |
---|
| 99 | + " cbnz %w[loop], 1b", \ |
---|
| 100 | + /* LSE atomics */ \ |
---|
| 101 | + #op_lse "\t%" #w "[val], %" #w "[ret], %[ptr]\n" \ |
---|
| 102 | + #op_llsc "\t%" #w "[ret], %" #w "[ret], %" #w "[val]\n" \ |
---|
| 103 | + __nops(2)) \ |
---|
| 104 | + : [loop] "=&r" (loop), [ret] "=&r" (ret), \ |
---|
| 105 | + [ptr] "+Q"(*(u##sz *)ptr) \ |
---|
| 106 | + : [val] "r" ((u##sz)(val))); \ |
---|
102 | 107 | \ |
---|
103 | 108 | return ret; \ |
---|
104 | 109 | } |
---|
105 | 110 | |
---|
106 | | -PERCPU_OP(add, add) |
---|
107 | | -PERCPU_OP(and, and) |
---|
108 | | -PERCPU_OP(or, orr) |
---|
| 111 | +#define PERCPU_OP(name, op_llsc, op_lse) \ |
---|
| 112 | + __PERCPU_OP_CASE(w, b, name, 8, op_llsc, op_lse) \ |
---|
| 113 | + __PERCPU_OP_CASE(w, h, name, 16, op_llsc, op_lse) \ |
---|
| 114 | + __PERCPU_OP_CASE(w, , name, 32, op_llsc, op_lse) \ |
---|
| 115 | + __PERCPU_OP_CASE( , , name, 64, op_llsc, op_lse) |
---|
| 116 | + |
---|
| 117 | +#define PERCPU_RET_OP(name, op_llsc, op_lse) \ |
---|
| 118 | + __PERCPU_RET_OP_CASE(w, b, name, 8, op_llsc, op_lse) \ |
---|
| 119 | + __PERCPU_RET_OP_CASE(w, h, name, 16, op_llsc, op_lse) \ |
---|
| 120 | + __PERCPU_RET_OP_CASE(w, , name, 32, op_llsc, op_lse) \ |
---|
| 121 | + __PERCPU_RET_OP_CASE( , , name, 64, op_llsc, op_lse) |
---|
| 122 | + |
---|
| 123 | +PERCPU_RW_OPS(8) |
---|
| 124 | +PERCPU_RW_OPS(16) |
---|
| 125 | +PERCPU_RW_OPS(32) |
---|
| 126 | +PERCPU_RW_OPS(64) |
---|
| 127 | +PERCPU_OP(add, add, stadd) |
---|
| 128 | +PERCPU_OP(andnot, bic, stclr) |
---|
| 129 | +PERCPU_OP(or, orr, stset) |
---|
| 130 | +PERCPU_RET_OP(add, add, ldadd) |
---|
| 131 | + |
---|
| 132 | +#undef PERCPU_RW_OPS |
---|
| 133 | +#undef __PERCPU_OP_CASE |
---|
| 134 | +#undef __PERCPU_RET_OP_CASE |
---|
109 | 135 | #undef PERCPU_OP |
---|
| 136 | +#undef PERCPU_RET_OP |
---|
110 | 137 | |
---|
111 | | -static inline unsigned long __percpu_read(void *ptr, int size) |
---|
112 | | -{ |
---|
113 | | - unsigned long ret; |
---|
114 | | - |
---|
115 | | - switch (size) { |
---|
116 | | - case 1: |
---|
117 | | - ret = READ_ONCE(*(u8 *)ptr); |
---|
118 | | - break; |
---|
119 | | - case 2: |
---|
120 | | - ret = READ_ONCE(*(u16 *)ptr); |
---|
121 | | - break; |
---|
122 | | - case 4: |
---|
123 | | - ret = READ_ONCE(*(u32 *)ptr); |
---|
124 | | - break; |
---|
125 | | - case 8: |
---|
126 | | - ret = READ_ONCE(*(u64 *)ptr); |
---|
127 | | - break; |
---|
128 | | - default: |
---|
129 | | - ret = 0; |
---|
130 | | - BUILD_BUG(); |
---|
131 | | - } |
---|
132 | | - |
---|
133 | | - return ret; |
---|
134 | | -} |
---|
135 | | - |
---|
136 | | -static inline void __percpu_write(void *ptr, unsigned long val, int size) |
---|
137 | | -{ |
---|
138 | | - switch (size) { |
---|
139 | | - case 1: |
---|
140 | | - WRITE_ONCE(*(u8 *)ptr, (u8)val); |
---|
141 | | - break; |
---|
142 | | - case 2: |
---|
143 | | - WRITE_ONCE(*(u16 *)ptr, (u16)val); |
---|
144 | | - break; |
---|
145 | | - case 4: |
---|
146 | | - WRITE_ONCE(*(u32 *)ptr, (u32)val); |
---|
147 | | - break; |
---|
148 | | - case 8: |
---|
149 | | - WRITE_ONCE(*(u64 *)ptr, (u64)val); |
---|
150 | | - break; |
---|
151 | | - default: |
---|
152 | | - BUILD_BUG(); |
---|
153 | | - } |
---|
154 | | -} |
---|
155 | | - |
---|
156 | | -static inline unsigned long __percpu_xchg(void *ptr, unsigned long val, |
---|
157 | | - int size) |
---|
158 | | -{ |
---|
159 | | - unsigned long ret, loop; |
---|
160 | | - |
---|
161 | | - switch (size) { |
---|
162 | | - case 1: |
---|
163 | | - asm ("//__percpu_xchg_1\n" |
---|
164 | | - "1: ldxrb %w[ret], %[ptr]\n" |
---|
165 | | - " stxrb %w[loop], %w[val], %[ptr]\n" |
---|
166 | | - " cbnz %w[loop], 1b" |
---|
167 | | - : [loop] "=&r"(loop), [ret] "=&r"(ret), |
---|
168 | | - [ptr] "+Q"(*(u8 *)ptr) |
---|
169 | | - : [val] "r" (val)); |
---|
170 | | - break; |
---|
171 | | - case 2: |
---|
172 | | - asm ("//__percpu_xchg_2\n" |
---|
173 | | - "1: ldxrh %w[ret], %[ptr]\n" |
---|
174 | | - " stxrh %w[loop], %w[val], %[ptr]\n" |
---|
175 | | - " cbnz %w[loop], 1b" |
---|
176 | | - : [loop] "=&r"(loop), [ret] "=&r"(ret), |
---|
177 | | - [ptr] "+Q"(*(u16 *)ptr) |
---|
178 | | - : [val] "r" (val)); |
---|
179 | | - break; |
---|
180 | | - case 4: |
---|
181 | | - asm ("//__percpu_xchg_4\n" |
---|
182 | | - "1: ldxr %w[ret], %[ptr]\n" |
---|
183 | | - " stxr %w[loop], %w[val], %[ptr]\n" |
---|
184 | | - " cbnz %w[loop], 1b" |
---|
185 | | - : [loop] "=&r"(loop), [ret] "=&r"(ret), |
---|
186 | | - [ptr] "+Q"(*(u32 *)ptr) |
---|
187 | | - : [val] "r" (val)); |
---|
188 | | - break; |
---|
189 | | - case 8: |
---|
190 | | - asm ("//__percpu_xchg_8\n" |
---|
191 | | - "1: ldxr %[ret], %[ptr]\n" |
---|
192 | | - " stxr %w[loop], %[val], %[ptr]\n" |
---|
193 | | - " cbnz %w[loop], 1b" |
---|
194 | | - : [loop] "=&r"(loop), [ret] "=&r"(ret), |
---|
195 | | - [ptr] "+Q"(*(u64 *)ptr) |
---|
196 | | - : [val] "r" (val)); |
---|
197 | | - break; |
---|
198 | | - default: |
---|
199 | | - ret = 0; |
---|
200 | | - BUILD_BUG(); |
---|
201 | | - } |
---|
202 | | - |
---|
203 | | - return ret; |
---|
204 | | -} |
---|
205 | | - |
---|
206 | | -/* this_cpu_cmpxchg */ |
---|
207 | | -#define _protect_cmpxchg_local(pcp, o, n) \ |
---|
208 | | -({ \ |
---|
209 | | - typeof(*raw_cpu_ptr(&(pcp))) __ret; \ |
---|
210 | | - preempt_disable(); \ |
---|
211 | | - __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \ |
---|
212 | | - preempt_enable(); \ |
---|
213 | | - __ret; \ |
---|
214 | | -}) |
---|
215 | | - |
---|
216 | | -#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) |
---|
217 | | -#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) |
---|
218 | | -#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) |
---|
219 | | -#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n) |
---|
220 | | - |
---|
| 138 | +/* |
---|
| 139 | + * It would be nice to avoid the conditional call into the scheduler when |
---|
| 140 | + * re-enabling preemption for preemptible kernels, but doing that in a way |
---|
| 141 | + * which builds inside a module would mean messing directly with the preempt |
---|
| 142 | + * count. If you do this, peterz and tglx will hunt you down. |
---|
| 143 | + */ |
---|
221 | 144 | #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \ |
---|
222 | 145 | ({ \ |
---|
223 | 146 | int __ret; \ |
---|
224 | | - preempt_disable(); \ |
---|
| 147 | + preempt_disable_notrace(); \ |
---|
225 | 148 | __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \ |
---|
226 | 149 | raw_cpu_ptr(&(ptr2)), \ |
---|
227 | 150 | o1, o2, n1, n2); \ |
---|
228 | | - preempt_enable(); \ |
---|
| 151 | + preempt_enable_notrace(); \ |
---|
229 | 152 | __ret; \ |
---|
230 | 153 | }) |
---|
231 | 154 | |
---|
232 | | -#define _percpu_read(pcp) \ |
---|
| 155 | +#define _pcp_protect(op, pcp, ...) \ |
---|
| 156 | +({ \ |
---|
| 157 | + preempt_disable_notrace(); \ |
---|
| 158 | + op(raw_cpu_ptr(&(pcp)), __VA_ARGS__); \ |
---|
| 159 | + preempt_enable_notrace(); \ |
---|
| 160 | +}) |
---|
| 161 | + |
---|
| 162 | +#define _pcp_protect_return(op, pcp, args...) \ |
---|
233 | 163 | ({ \ |
---|
234 | 164 | typeof(pcp) __retval; \ |
---|
235 | 165 | preempt_disable_notrace(); \ |
---|
236 | | - __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \ |
---|
237 | | - sizeof(pcp)); \ |
---|
| 166 | + __retval = (typeof(pcp))op(raw_cpu_ptr(&(pcp)), ##args); \ |
---|
238 | 167 | preempt_enable_notrace(); \ |
---|
239 | 168 | __retval; \ |
---|
240 | 169 | }) |
---|
241 | 170 | |
---|
242 | | -#define _percpu_write(pcp, val) \ |
---|
243 | | -do { \ |
---|
244 | | - preempt_disable_notrace(); \ |
---|
245 | | - __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \ |
---|
246 | | - sizeof(pcp)); \ |
---|
247 | | - preempt_enable_notrace(); \ |
---|
248 | | -} while(0) \ |
---|
| 171 | +#define this_cpu_read_1(pcp) \ |
---|
| 172 | + _pcp_protect_return(__percpu_read_8, pcp) |
---|
| 173 | +#define this_cpu_read_2(pcp) \ |
---|
| 174 | + _pcp_protect_return(__percpu_read_16, pcp) |
---|
| 175 | +#define this_cpu_read_4(pcp) \ |
---|
| 176 | + _pcp_protect_return(__percpu_read_32, pcp) |
---|
| 177 | +#define this_cpu_read_8(pcp) \ |
---|
| 178 | + _pcp_protect_return(__percpu_read_64, pcp) |
---|
249 | 179 | |
---|
250 | | -#define _pcp_protect(operation, pcp, val) \ |
---|
251 | | -({ \ |
---|
252 | | - typeof(pcp) __retval; \ |
---|
253 | | - preempt_disable(); \ |
---|
254 | | - __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \ |
---|
255 | | - (val), sizeof(pcp)); \ |
---|
256 | | - preempt_enable(); \ |
---|
257 | | - __retval; \ |
---|
258 | | -}) |
---|
| 180 | +#define this_cpu_write_1(pcp, val) \ |
---|
| 181 | + _pcp_protect(__percpu_write_8, pcp, (unsigned long)val) |
---|
| 182 | +#define this_cpu_write_2(pcp, val) \ |
---|
| 183 | + _pcp_protect(__percpu_write_16, pcp, (unsigned long)val) |
---|
| 184 | +#define this_cpu_write_4(pcp, val) \ |
---|
| 185 | + _pcp_protect(__percpu_write_32, pcp, (unsigned long)val) |
---|
| 186 | +#define this_cpu_write_8(pcp, val) \ |
---|
| 187 | + _pcp_protect(__percpu_write_64, pcp, (unsigned long)val) |
---|
259 | 188 | |
---|
260 | | -#define _percpu_add(pcp, val) \ |
---|
261 | | - _pcp_protect(__percpu_add, pcp, val) |
---|
| 189 | +#define this_cpu_add_1(pcp, val) \ |
---|
| 190 | + _pcp_protect(__percpu_add_case_8, pcp, val) |
---|
| 191 | +#define this_cpu_add_2(pcp, val) \ |
---|
| 192 | + _pcp_protect(__percpu_add_case_16, pcp, val) |
---|
| 193 | +#define this_cpu_add_4(pcp, val) \ |
---|
| 194 | + _pcp_protect(__percpu_add_case_32, pcp, val) |
---|
| 195 | +#define this_cpu_add_8(pcp, val) \ |
---|
| 196 | + _pcp_protect(__percpu_add_case_64, pcp, val) |
---|
262 | 197 | |
---|
263 | | -#define _percpu_add_return(pcp, val) _percpu_add(pcp, val) |
---|
| 198 | +#define this_cpu_add_return_1(pcp, val) \ |
---|
| 199 | + _pcp_protect_return(__percpu_add_return_case_8, pcp, val) |
---|
| 200 | +#define this_cpu_add_return_2(pcp, val) \ |
---|
| 201 | + _pcp_protect_return(__percpu_add_return_case_16, pcp, val) |
---|
| 202 | +#define this_cpu_add_return_4(pcp, val) \ |
---|
| 203 | + _pcp_protect_return(__percpu_add_return_case_32, pcp, val) |
---|
| 204 | +#define this_cpu_add_return_8(pcp, val) \ |
---|
| 205 | + _pcp_protect_return(__percpu_add_return_case_64, pcp, val) |
---|
264 | 206 | |
---|
265 | | -#define _percpu_and(pcp, val) \ |
---|
266 | | - _pcp_protect(__percpu_and, pcp, val) |
---|
| 207 | +#define this_cpu_and_1(pcp, val) \ |
---|
| 208 | + _pcp_protect(__percpu_andnot_case_8, pcp, ~val) |
---|
| 209 | +#define this_cpu_and_2(pcp, val) \ |
---|
| 210 | + _pcp_protect(__percpu_andnot_case_16, pcp, ~val) |
---|
| 211 | +#define this_cpu_and_4(pcp, val) \ |
---|
| 212 | + _pcp_protect(__percpu_andnot_case_32, pcp, ~val) |
---|
| 213 | +#define this_cpu_and_8(pcp, val) \ |
---|
| 214 | + _pcp_protect(__percpu_andnot_case_64, pcp, ~val) |
---|
267 | 215 | |
---|
268 | | -#define _percpu_or(pcp, val) \ |
---|
269 | | - _pcp_protect(__percpu_or, pcp, val) |
---|
| 216 | +#define this_cpu_or_1(pcp, val) \ |
---|
| 217 | + _pcp_protect(__percpu_or_case_8, pcp, val) |
---|
| 218 | +#define this_cpu_or_2(pcp, val) \ |
---|
| 219 | + _pcp_protect(__percpu_or_case_16, pcp, val) |
---|
| 220 | +#define this_cpu_or_4(pcp, val) \ |
---|
| 221 | + _pcp_protect(__percpu_or_case_32, pcp, val) |
---|
| 222 | +#define this_cpu_or_8(pcp, val) \ |
---|
| 223 | + _pcp_protect(__percpu_or_case_64, pcp, val) |
---|
270 | 224 | |
---|
271 | | -#define _percpu_xchg(pcp, val) (typeof(pcp)) \ |
---|
272 | | - _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)) |
---|
| 225 | +#define this_cpu_xchg_1(pcp, val) \ |
---|
| 226 | + _pcp_protect_return(xchg_relaxed, pcp, val) |
---|
| 227 | +#define this_cpu_xchg_2(pcp, val) \ |
---|
| 228 | + _pcp_protect_return(xchg_relaxed, pcp, val) |
---|
| 229 | +#define this_cpu_xchg_4(pcp, val) \ |
---|
| 230 | + _pcp_protect_return(xchg_relaxed, pcp, val) |
---|
| 231 | +#define this_cpu_xchg_8(pcp, val) \ |
---|
| 232 | + _pcp_protect_return(xchg_relaxed, pcp, val) |
---|
273 | 233 | |
---|
274 | | -#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val) |
---|
275 | | -#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val) |
---|
276 | | -#define this_cpu_add_4(pcp, val) _percpu_add(pcp, val) |
---|
277 | | -#define this_cpu_add_8(pcp, val) _percpu_add(pcp, val) |
---|
| 234 | +#define this_cpu_cmpxchg_1(pcp, o, n) \ |
---|
| 235 | + _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) |
---|
| 236 | +#define this_cpu_cmpxchg_2(pcp, o, n) \ |
---|
| 237 | + _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) |
---|
| 238 | +#define this_cpu_cmpxchg_4(pcp, o, n) \ |
---|
| 239 | + _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) |
---|
| 240 | +#define this_cpu_cmpxchg_8(pcp, o, n) \ |
---|
| 241 | + _pcp_protect_return(cmpxchg_relaxed, pcp, o, n) |
---|
278 | 242 | |
---|
279 | | -#define this_cpu_add_return_1(pcp, val) _percpu_add_return(pcp, val) |
---|
280 | | -#define this_cpu_add_return_2(pcp, val) _percpu_add_return(pcp, val) |
---|
281 | | -#define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val) |
---|
282 | | -#define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val) |
---|
283 | | - |
---|
284 | | -#define this_cpu_and_1(pcp, val) _percpu_and(pcp, val) |
---|
285 | | -#define this_cpu_and_2(pcp, val) _percpu_and(pcp, val) |
---|
286 | | -#define this_cpu_and_4(pcp, val) _percpu_and(pcp, val) |
---|
287 | | -#define this_cpu_and_8(pcp, val) _percpu_and(pcp, val) |
---|
288 | | - |
---|
289 | | -#define this_cpu_or_1(pcp, val) _percpu_or(pcp, val) |
---|
290 | | -#define this_cpu_or_2(pcp, val) _percpu_or(pcp, val) |
---|
291 | | -#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val) |
---|
292 | | -#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val) |
---|
293 | | - |
---|
294 | | -#define this_cpu_read_1(pcp) _percpu_read(pcp) |
---|
295 | | -#define this_cpu_read_2(pcp) _percpu_read(pcp) |
---|
296 | | -#define this_cpu_read_4(pcp) _percpu_read(pcp) |
---|
297 | | -#define this_cpu_read_8(pcp) _percpu_read(pcp) |
---|
298 | | - |
---|
299 | | -#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val) |
---|
300 | | -#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val) |
---|
301 | | -#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val) |
---|
302 | | -#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val) |
---|
303 | | - |
---|
304 | | -#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val) |
---|
305 | | -#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val) |
---|
306 | | -#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val) |
---|
307 | | -#define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val) |
---|
| 243 | +#ifdef __KVM_NVHE_HYPERVISOR__ |
---|
| 244 | +extern unsigned long __hyp_per_cpu_offset(unsigned int cpu); |
---|
| 245 | +#define __per_cpu_offset |
---|
| 246 | +#define per_cpu_offset(cpu) __hyp_per_cpu_offset((cpu)) |
---|
| 247 | +#endif |
---|
308 | 248 | |
---|
309 | 249 | #include <asm-generic/percpu.h> |
---|
310 | 250 | |
---|
| 251 | +/* Redefine macros for nVHE hyp under DEBUG_PREEMPT to avoid its dependencies. */ |
---|
| 252 | +#if defined(__KVM_NVHE_HYPERVISOR__) && defined(CONFIG_DEBUG_PREEMPT) |
---|
| 253 | +#undef this_cpu_ptr |
---|
| 254 | +#define this_cpu_ptr raw_cpu_ptr |
---|
| 255 | +#undef __this_cpu_read |
---|
| 256 | +#define __this_cpu_read raw_cpu_read |
---|
| 257 | +#undef __this_cpu_write |
---|
| 258 | +#define __this_cpu_write raw_cpu_write |
---|
| 259 | +#endif |
---|
| 260 | + |
---|
311 | 261 | #endif /* __ASM_PERCPU_H */ |
---|