.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * FP/SIMD context switching and fault handling |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 2012 ARM Ltd. |
---|
5 | 6 | * Author: Catalin Marinas <catalin.marinas@arm.com> |
---|
6 | | - * |
---|
7 | | - * This program is free software; you can redistribute it and/or modify |
---|
8 | | - * it under the terms of the GNU General Public License version 2 as |
---|
9 | | - * published by the Free Software Foundation. |
---|
10 | | - * |
---|
11 | | - * This program is distributed in the hope that it will be useful, |
---|
12 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
13 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
14 | | - * GNU General Public License for more details. |
---|
15 | | - * |
---|
16 | | - * You should have received a copy of the GNU General Public License |
---|
17 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
18 | 7 | */ |
---|
19 | 8 | |
---|
20 | 9 | #include <linux/bitmap.h> |
---|
| 10 | +#include <linux/bitops.h> |
---|
21 | 11 | #include <linux/bottom_half.h> |
---|
22 | 12 | #include <linux/bug.h> |
---|
23 | 13 | #include <linux/cache.h> |
---|
24 | 14 | #include <linux/compat.h> |
---|
| 15 | +#include <linux/compiler.h> |
---|
25 | 16 | #include <linux/cpu.h> |
---|
26 | 17 | #include <linux/cpu_pm.h> |
---|
27 | 18 | #include <linux/kernel.h> |
---|
.. | .. |
---|
38 | 29 | #include <linux/slab.h> |
---|
39 | 30 | #include <linux/stddef.h> |
---|
40 | 31 | #include <linux/sysctl.h> |
---|
| 32 | +#include <linux/swab.h> |
---|
41 | 33 | |
---|
42 | 34 | #include <asm/esr.h> |
---|
| 35 | +#include <asm/exception.h> |
---|
43 | 36 | #include <asm/fpsimd.h> |
---|
44 | 37 | #include <asm/cpufeature.h> |
---|
45 | 38 | #include <asm/cputype.h> |
---|
| 39 | +#include <asm/neon.h> |
---|
46 | 40 | #include <asm/processor.h> |
---|
47 | 41 | #include <asm/simd.h> |
---|
48 | 42 | #include <asm/sigcontext.h> |
---|
49 | 43 | #include <asm/sysreg.h> |
---|
50 | 44 | #include <asm/traps.h> |
---|
| 45 | +#include <asm/virt.h> |
---|
51 | 46 | |
---|
52 | 47 | #define FPEXC_IOF (1 << 0) |
---|
53 | 48 | #define FPEXC_DZF (1 << 1) |
---|
.. | .. |
---|
90 | 85 | * To prevent this from racing with the manipulation of the task's FPSIMD state |
---|
91 | 86 | * from task context and thereby corrupting the state, it is necessary to |
---|
92 | 87 | * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE |
---|
93 | | - * flag with local_bh_disable() unless softirqs are already masked. |
---|
| 88 | + * flag with {, __}get_cpu_fpsimd_context(). This will still allow softirqs to |
---|
| 89 | + * run but prevent them to use FPSIMD. |
---|
94 | 90 | * |
---|
95 | 91 | * For a certain task, the sequence may look something like this: |
---|
96 | 92 | * - the task gets scheduled in; if both the task's fpsimd_cpu field |
---|
.. | .. |
---|
119 | 115 | */ |
---|
120 | 116 | struct fpsimd_last_state_struct { |
---|
121 | 117 | struct user_fpsimd_state *st; |
---|
| 118 | + void *sve_state; |
---|
| 119 | + unsigned int sve_vl; |
---|
122 | 120 | }; |
---|
123 | 121 | |
---|
124 | 122 | static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state); |
---|
125 | 123 | |
---|
126 | 124 | /* Default VL for tasks that don't set it explicitly: */ |
---|
127 | | -static int sve_default_vl = -1; |
---|
| 125 | +static int __sve_default_vl = -1; |
---|
| 126 | + |
---|
| 127 | +static int get_sve_default_vl(void) |
---|
| 128 | +{ |
---|
| 129 | + return READ_ONCE(__sve_default_vl); |
---|
| 130 | +} |
---|
128 | 131 | |
---|
129 | 132 | #ifdef CONFIG_ARM64_SVE |
---|
130 | 133 | |
---|
| 134 | +static void set_sve_default_vl(int val) |
---|
| 135 | +{ |
---|
| 136 | + WRITE_ONCE(__sve_default_vl, val); |
---|
| 137 | +} |
---|
| 138 | + |
---|
131 | 139 | /* Maximum supported vector length across all CPUs (initially poisoned) */ |
---|
132 | 140 | int __ro_after_init sve_max_vl = SVE_VL_MIN; |
---|
133 | | -/* Set of available vector lengths, as vq_to_bit(vq): */ |
---|
134 | | -static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); |
---|
| 141 | +int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN; |
---|
| 142 | + |
---|
| 143 | +/* |
---|
| 144 | + * Set of available vector lengths, |
---|
| 145 | + * where length vq encoded as bit __vq_to_bit(vq): |
---|
| 146 | + */ |
---|
| 147 | +__ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); |
---|
| 148 | +/* Set of vector lengths present on at least one cpu: */ |
---|
| 149 | +static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX); |
---|
| 150 | + |
---|
135 | 151 | static void __percpu *efi_sve_state; |
---|
136 | 152 | |
---|
137 | 153 | #else /* ! CONFIG_ARM64_SVE */ |
---|
138 | 154 | |
---|
139 | 155 | /* Dummy declaration for code that will be optimised out: */ |
---|
140 | 156 | extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); |
---|
| 157 | +extern __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX); |
---|
141 | 158 | extern void __percpu *efi_sve_state; |
---|
142 | 159 | |
---|
143 | 160 | #endif /* ! CONFIG_ARM64_SVE */ |
---|
144 | 161 | |
---|
145 | | -/* |
---|
146 | | - * Call __sve_free() directly only if you know task can't be scheduled |
---|
147 | | - * or preempted. |
---|
148 | | - */ |
---|
149 | | -static void __sve_free(struct task_struct *task) |
---|
| 162 | +DEFINE_PER_CPU(bool, fpsimd_context_busy); |
---|
| 163 | +EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy); |
---|
| 164 | + |
---|
| 165 | +static void __get_cpu_fpsimd_context(void) |
---|
150 | 166 | { |
---|
151 | | - kfree(task->thread.sve_state); |
---|
152 | | - task->thread.sve_state = NULL; |
---|
| 167 | + bool busy = __this_cpu_xchg(fpsimd_context_busy, true); |
---|
| 168 | + |
---|
| 169 | + WARN_ON(busy); |
---|
153 | 170 | } |
---|
154 | 171 | |
---|
155 | | -static void sve_free(struct task_struct *task) |
---|
| 172 | +/* |
---|
| 173 | + * Claim ownership of the CPU FPSIMD context for use by the calling context. |
---|
| 174 | + * |
---|
| 175 | + * The caller may freely manipulate the FPSIMD context metadata until |
---|
| 176 | + * put_cpu_fpsimd_context() is called. |
---|
| 177 | + * |
---|
| 178 | + * The double-underscore version must only be called if you know the task |
---|
| 179 | + * can't be preempted. |
---|
| 180 | + */ |
---|
| 181 | +static void get_cpu_fpsimd_context(void) |
---|
156 | 182 | { |
---|
| 183 | + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
---|
| 184 | + local_bh_disable(); |
---|
| 185 | + else |
---|
| 186 | + preempt_disable(); |
---|
| 187 | + __get_cpu_fpsimd_context(); |
---|
| 188 | +} |
---|
| 189 | + |
---|
| 190 | +static void __put_cpu_fpsimd_context(void) |
---|
| 191 | +{ |
---|
| 192 | + bool busy = __this_cpu_xchg(fpsimd_context_busy, false); |
---|
| 193 | + |
---|
| 194 | + WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */ |
---|
| 195 | +} |
---|
| 196 | + |
---|
| 197 | +/* |
---|
| 198 | + * Release the CPU FPSIMD context. |
---|
| 199 | + * |
---|
| 200 | + * Must be called from a context in which get_cpu_fpsimd_context() was |
---|
| 201 | + * previously called, with no call to put_cpu_fpsimd_context() in the |
---|
| 202 | + * meantime. |
---|
| 203 | + */ |
---|
| 204 | +static void put_cpu_fpsimd_context(void) |
---|
| 205 | +{ |
---|
| 206 | + __put_cpu_fpsimd_context(); |
---|
| 207 | + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
---|
| 208 | + local_bh_enable(); |
---|
| 209 | + else |
---|
| 210 | + preempt_enable(); |
---|
| 211 | +} |
---|
| 212 | + |
---|
| 213 | +static bool have_cpu_fpsimd_context(void) |
---|
| 214 | +{ |
---|
| 215 | + return !preemptible() && __this_cpu_read(fpsimd_context_busy); |
---|
| 216 | +} |
---|
| 217 | + |
---|
| 218 | +static void *sve_free_atomic(struct task_struct *task) |
---|
| 219 | +{ |
---|
| 220 | + void *sve_state = task->thread.sve_state; |
---|
| 221 | + |
---|
157 | 222 | WARN_ON(test_tsk_thread_flag(task, TIF_SVE)); |
---|
158 | 223 | |
---|
159 | | - __sve_free(task); |
---|
| 224 | + task->thread.sve_state = NULL; |
---|
| 225 | + return sve_state; |
---|
160 | 226 | } |
---|
161 | 227 | |
---|
162 | 228 | /* |
---|
.. | .. |
---|
212 | 278 | * This function should be called only when the FPSIMD/SVE state in |
---|
213 | 279 | * thread_struct is known to be up to date, when preparing to enter |
---|
214 | 280 | * userspace. |
---|
215 | | - * |
---|
216 | | - * Softirqs (and preemption) must be disabled. |
---|
217 | 281 | */ |
---|
218 | 282 | static void task_fpsimd_load(void) |
---|
219 | 283 | { |
---|
220 | | - WARN_ON(!in_softirq() && !irqs_disabled()); |
---|
221 | 284 | WARN_ON(!system_supports_fpsimd()); |
---|
| 285 | + WARN_ON(!have_cpu_fpsimd_context()); |
---|
222 | 286 | |
---|
223 | 287 | if (system_supports_sve() && test_thread_flag(TIF_SVE)) |
---|
224 | 288 | sve_load_state(sve_pffr(¤t->thread), |
---|
.. | .. |
---|
231 | 295 | /* |
---|
232 | 296 | * Ensure FPSIMD/SVE storage in memory for the loaded context is up to |
---|
233 | 297 | * date with respect to the CPU registers. |
---|
234 | | - * |
---|
235 | | - * Softirqs (and preemption) must be disabled. |
---|
236 | 298 | */ |
---|
237 | | -void fpsimd_save(void) |
---|
| 299 | +static void fpsimd_save(void) |
---|
238 | 300 | { |
---|
239 | | - struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st); |
---|
| 301 | + struct fpsimd_last_state_struct const *last = |
---|
| 302 | + this_cpu_ptr(&fpsimd_last_state); |
---|
240 | 303 | /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */ |
---|
241 | 304 | |
---|
242 | 305 | WARN_ON(!system_supports_fpsimd()); |
---|
243 | | - WARN_ON(!in_softirq() && !irqs_disabled()); |
---|
| 306 | + WARN_ON(!have_cpu_fpsimd_context()); |
---|
244 | 307 | |
---|
245 | 308 | if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { |
---|
246 | 309 | if (system_supports_sve() && test_thread_flag(TIF_SVE)) { |
---|
247 | | - if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) { |
---|
| 310 | + if (WARN_ON(sve_get_vl() != last->sve_vl)) { |
---|
248 | 311 | /* |
---|
249 | 312 | * Can't save the user regs, so current would |
---|
250 | 313 | * re-enter user with corrupt state. |
---|
251 | 314 | * There's no way to recover, so kill it: |
---|
252 | 315 | */ |
---|
253 | | - force_signal_inject(SIGKILL, SI_KERNEL, 0); |
---|
| 316 | + force_signal_inject(SIGKILL, SI_KERNEL, 0, 0); |
---|
254 | 317 | return; |
---|
255 | 318 | } |
---|
256 | 319 | |
---|
257 | | - sve_save_state(sve_pffr(¤t->thread), &st->fpsr); |
---|
| 320 | + sve_save_state((char *)last->sve_state + |
---|
| 321 | + sve_ffr_offset(last->sve_vl), |
---|
| 322 | + &last->st->fpsr); |
---|
258 | 323 | } else |
---|
259 | | - fpsimd_save_state(st); |
---|
| 324 | + fpsimd_save_state(last->st); |
---|
260 | 325 | } |
---|
261 | | -} |
---|
262 | | - |
---|
263 | | -/* |
---|
264 | | - * Helpers to translate bit indices in sve_vq_map to VQ values (and |
---|
265 | | - * vice versa). This allows find_next_bit() to be used to find the |
---|
266 | | - * _maximum_ VQ not exceeding a certain value. |
---|
267 | | - */ |
---|
268 | | - |
---|
269 | | -static unsigned int vq_to_bit(unsigned int vq) |
---|
270 | | -{ |
---|
271 | | - return SVE_VQ_MAX - vq; |
---|
272 | | -} |
---|
273 | | - |
---|
274 | | -static unsigned int bit_to_vq(unsigned int bit) |
---|
275 | | -{ |
---|
276 | | - if (WARN_ON(bit >= SVE_VQ_MAX)) |
---|
277 | | - bit = SVE_VQ_MAX - 1; |
---|
278 | | - |
---|
279 | | - return SVE_VQ_MAX - bit; |
---|
280 | 326 | } |
---|
281 | 327 | |
---|
282 | 328 | /* |
---|
.. | .. |
---|
300 | 346 | vl = max_vl; |
---|
301 | 347 | |
---|
302 | 348 | bit = find_next_bit(sve_vq_map, SVE_VQ_MAX, |
---|
303 | | - vq_to_bit(sve_vq_from_vl(vl))); |
---|
304 | | - return sve_vl_from_vq(bit_to_vq(bit)); |
---|
| 349 | + __vq_to_bit(sve_vq_from_vl(vl))); |
---|
| 350 | + return sve_vl_from_vq(__bit_to_vq(bit)); |
---|
305 | 351 | } |
---|
306 | 352 | |
---|
307 | 353 | #if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL) |
---|
308 | 354 | |
---|
309 | 355 | static int sve_proc_do_default_vl(struct ctl_table *table, int write, |
---|
310 | | - void __user *buffer, size_t *lenp, |
---|
311 | | - loff_t *ppos) |
---|
| 356 | + void *buffer, size_t *lenp, loff_t *ppos) |
---|
312 | 357 | { |
---|
313 | 358 | int ret; |
---|
314 | | - int vl = sve_default_vl; |
---|
| 359 | + int vl = get_sve_default_vl(); |
---|
315 | 360 | struct ctl_table tmp_table = { |
---|
316 | 361 | .data = &vl, |
---|
317 | 362 | .maxlen = sizeof(vl), |
---|
.. | .. |
---|
328 | 373 | if (!sve_vl_valid(vl)) |
---|
329 | 374 | return -EINVAL; |
---|
330 | 375 | |
---|
331 | | - sve_default_vl = find_supported_vector_length(vl); |
---|
| 376 | + set_sve_default_vl(find_supported_vector_length(vl)); |
---|
332 | 377 | return 0; |
---|
333 | 378 | } |
---|
334 | 379 | |
---|
.. | .. |
---|
357 | 402 | #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \ |
---|
358 | 403 | (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET)) |
---|
359 | 404 | |
---|
| 405 | +#ifdef CONFIG_CPU_BIG_ENDIAN |
---|
| 406 | +static __uint128_t arm64_cpu_to_le128(__uint128_t x) |
---|
| 407 | +{ |
---|
| 408 | + u64 a = swab64(x); |
---|
| 409 | + u64 b = swab64(x >> 64); |
---|
| 410 | + |
---|
| 411 | + return ((__uint128_t)a << 64) | b; |
---|
| 412 | +} |
---|
| 413 | +#else |
---|
| 414 | +static __uint128_t arm64_cpu_to_le128(__uint128_t x) |
---|
| 415 | +{ |
---|
| 416 | + return x; |
---|
| 417 | +} |
---|
| 418 | +#endif |
---|
| 419 | + |
---|
| 420 | +#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x) |
---|
| 421 | + |
---|
| 422 | +static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst, |
---|
| 423 | + unsigned int vq) |
---|
| 424 | +{ |
---|
| 425 | + unsigned int i; |
---|
| 426 | + __uint128_t *p; |
---|
| 427 | + |
---|
| 428 | + for (i = 0; i < SVE_NUM_ZREGS; ++i) { |
---|
| 429 | + p = (__uint128_t *)ZREG(sst, vq, i); |
---|
| 430 | + *p = arm64_cpu_to_le128(fst->vregs[i]); |
---|
| 431 | + } |
---|
| 432 | +} |
---|
| 433 | + |
---|
360 | 434 | /* |
---|
361 | 435 | * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to |
---|
362 | 436 | * task->thread.sve_state. |
---|
363 | 437 | * |
---|
364 | 438 | * Task can be a non-runnable task, or current. In the latter case, |
---|
365 | | - * softirqs (and preemption) must be disabled. |
---|
| 439 | + * the caller must have ownership of the cpu FPSIMD context before calling |
---|
| 440 | + * this function. |
---|
366 | 441 | * task->thread.sve_state must point to at least sve_state_size(task) |
---|
367 | 442 | * bytes of allocated kernel memory. |
---|
368 | 443 | * task->thread.uw.fpsimd_state must be up to date before calling this |
---|
.. | .. |
---|
373 | 448 | unsigned int vq; |
---|
374 | 449 | void *sst = task->thread.sve_state; |
---|
375 | 450 | struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; |
---|
376 | | - unsigned int i; |
---|
377 | 451 | |
---|
378 | 452 | if (!system_supports_sve()) |
---|
379 | 453 | return; |
---|
380 | 454 | |
---|
381 | 455 | vq = sve_vq_from_vl(task->thread.sve_vl); |
---|
382 | | - for (i = 0; i < 32; ++i) |
---|
383 | | - memcpy(ZREG(sst, vq, i), &fst->vregs[i], |
---|
384 | | - sizeof(fst->vregs[i])); |
---|
| 456 | + __fpsimd_to_sve(sst, fst, vq); |
---|
385 | 457 | } |
---|
386 | 458 | |
---|
387 | 459 | /* |
---|
.. | .. |
---|
389 | 461 | * task->thread.uw.fpsimd_state. |
---|
390 | 462 | * |
---|
391 | 463 | * Task can be a non-runnable task, or current. In the latter case, |
---|
392 | | - * softirqs (and preemption) must be disabled. |
---|
| 464 | + * the caller must have ownership of the cpu FPSIMD context before calling |
---|
| 465 | + * this function. |
---|
393 | 466 | * task->thread.sve_state must point to at least sve_state_size(task) |
---|
394 | 467 | * bytes of allocated kernel memory. |
---|
395 | 468 | * task->thread.sve_state must be up to date before calling this function. |
---|
.. | .. |
---|
400 | 473 | void const *sst = task->thread.sve_state; |
---|
401 | 474 | struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state; |
---|
402 | 475 | unsigned int i; |
---|
| 476 | + __uint128_t const *p; |
---|
403 | 477 | |
---|
404 | 478 | if (!system_supports_sve()) |
---|
405 | 479 | return; |
---|
406 | 480 | |
---|
407 | 481 | vq = sve_vq_from_vl(task->thread.sve_vl); |
---|
408 | | - for (i = 0; i < 32; ++i) |
---|
409 | | - memcpy(&fst->vregs[i], ZREG(sst, vq, i), |
---|
410 | | - sizeof(fst->vregs[i])); |
---|
| 482 | + for (i = 0; i < SVE_NUM_ZREGS; ++i) { |
---|
| 483 | + p = (__uint128_t const *)ZREG(sst, vq, i); |
---|
| 484 | + fst->vregs[i] = arm64_le128_to_cpu(*p); |
---|
| 485 | + } |
---|
411 | 486 | } |
---|
412 | 487 | |
---|
413 | 488 | #ifdef CONFIG_ARM64_SVE |
---|
.. | .. |
---|
495 | 570 | unsigned int vq; |
---|
496 | 571 | void *sst = task->thread.sve_state; |
---|
497 | 572 | struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; |
---|
498 | | - unsigned int i; |
---|
499 | 573 | |
---|
500 | 574 | if (!test_tsk_thread_flag(task, TIF_SVE)) |
---|
501 | 575 | return; |
---|
.. | .. |
---|
503 | 577 | vq = sve_vq_from_vl(task->thread.sve_vl); |
---|
504 | 578 | |
---|
505 | 579 | memset(sst, 0, SVE_SIG_REGS_SIZE(vq)); |
---|
506 | | - |
---|
507 | | - for (i = 0; i < 32; ++i) |
---|
508 | | - memcpy(ZREG(sst, vq, i), &fst->vregs[i], |
---|
509 | | - sizeof(fst->vregs[i])); |
---|
| 580 | + __fpsimd_to_sve(sst, fst, vq); |
---|
510 | 581 | } |
---|
511 | 582 | |
---|
512 | 583 | int sve_set_vector_length(struct task_struct *task, |
---|
513 | 584 | unsigned long vl, unsigned long flags) |
---|
514 | 585 | { |
---|
| 586 | + void *mem = NULL; |
---|
515 | 587 | if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT | |
---|
516 | 588 | PR_SVE_SET_VL_ONEXEC)) |
---|
517 | 589 | return -EINVAL; |
---|
.. | .. |
---|
549 | 621 | * non-SVE thread. |
---|
550 | 622 | */ |
---|
551 | 623 | if (task == current) { |
---|
552 | | - local_bh_disable(); |
---|
| 624 | + get_cpu_fpsimd_context(); |
---|
553 | 625 | |
---|
554 | 626 | fpsimd_save(); |
---|
555 | | - set_thread_flag(TIF_FOREIGN_FPSTATE); |
---|
556 | 627 | } |
---|
557 | 628 | |
---|
558 | 629 | fpsimd_flush_task_state(task); |
---|
.. | .. |
---|
560 | 631 | sve_to_fpsimd(task); |
---|
561 | 632 | |
---|
562 | 633 | if (task == current) |
---|
563 | | - local_bh_enable(); |
---|
| 634 | + put_cpu_fpsimd_context(); |
---|
564 | 635 | |
---|
565 | 636 | /* |
---|
566 | 637 | * Force reallocation of task SVE state to the correct size |
---|
567 | 638 | * on next use: |
---|
568 | 639 | */ |
---|
569 | | - sve_free(task); |
---|
| 640 | + mem = sve_free_atomic(task); |
---|
570 | 641 | |
---|
571 | 642 | task->thread.sve_vl = vl; |
---|
| 643 | + kfree(mem); |
---|
572 | 644 | |
---|
573 | 645 | out: |
---|
574 | 646 | update_tsk_thread_flag(task, TIF_SVE_VL_INHERIT, |
---|
.. | .. |
---|
607 | 679 | vl = arg & PR_SVE_VL_LEN_MASK; |
---|
608 | 680 | flags = arg & ~vl; |
---|
609 | 681 | |
---|
610 | | - if (!system_supports_sve()) |
---|
| 682 | + if (!system_supports_sve() || is_compat_task()) |
---|
611 | 683 | return -EINVAL; |
---|
612 | 684 | |
---|
613 | 685 | ret = sve_set_vector_length(current, vl, flags); |
---|
.. | .. |
---|
620 | 692 | /* PR_SVE_GET_VL */ |
---|
621 | 693 | int sve_get_current_vl(void) |
---|
622 | 694 | { |
---|
623 | | - if (!system_supports_sve()) |
---|
| 695 | + if (!system_supports_sve() || is_compat_task()) |
---|
624 | 696 | return -EINVAL; |
---|
625 | 697 | |
---|
626 | 698 | return sve_prctl_status(0); |
---|
627 | 699 | } |
---|
628 | | - |
---|
629 | | -/* |
---|
630 | | - * Bitmap for temporary storage of the per-CPU set of supported vector lengths |
---|
631 | | - * during secondary boot. |
---|
632 | | - */ |
---|
633 | | -static DECLARE_BITMAP(sve_secondary_vq_map, SVE_VQ_MAX); |
---|
634 | 700 | |
---|
635 | 701 | static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX)) |
---|
636 | 702 | { |
---|
.. | .. |
---|
646 | 712 | write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */ |
---|
647 | 713 | vl = sve_get_vl(); |
---|
648 | 714 | vq = sve_vq_from_vl(vl); /* skip intervening lengths */ |
---|
649 | | - set_bit(vq_to_bit(vq), map); |
---|
| 715 | + set_bit(__vq_to_bit(vq), map); |
---|
650 | 716 | } |
---|
651 | 717 | } |
---|
652 | 718 | |
---|
| 719 | +/* |
---|
| 720 | + * Initialise the set of known supported VQs for the boot CPU. |
---|
| 721 | + * This is called during kernel boot, before secondary CPUs are brought up. |
---|
| 722 | + */ |
---|
653 | 723 | void __init sve_init_vq_map(void) |
---|
654 | 724 | { |
---|
655 | 725 | sve_probe_vqs(sve_vq_map); |
---|
| 726 | + bitmap_copy(sve_vq_partial_map, sve_vq_map, SVE_VQ_MAX); |
---|
656 | 727 | } |
---|
657 | 728 | |
---|
658 | 729 | /* |
---|
659 | 730 | * If we haven't committed to the set of supported VQs yet, filter out |
---|
660 | 731 | * those not supported by the current CPU. |
---|
| 732 | + * This function is called during the bring-up of early secondary CPUs only. |
---|
661 | 733 | */ |
---|
662 | 734 | void sve_update_vq_map(void) |
---|
663 | 735 | { |
---|
664 | | - sve_probe_vqs(sve_secondary_vq_map); |
---|
665 | | - bitmap_and(sve_vq_map, sve_vq_map, sve_secondary_vq_map, SVE_VQ_MAX); |
---|
| 736 | + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); |
---|
| 737 | + |
---|
| 738 | + sve_probe_vqs(tmp_map); |
---|
| 739 | + bitmap_and(sve_vq_map, sve_vq_map, tmp_map, SVE_VQ_MAX); |
---|
| 740 | + bitmap_or(sve_vq_partial_map, sve_vq_partial_map, tmp_map, SVE_VQ_MAX); |
---|
666 | 741 | } |
---|
667 | 742 | |
---|
668 | | -/* Check whether the current CPU supports all VQs in the committed set */ |
---|
| 743 | +/* |
---|
| 744 | + * Check whether the current CPU supports all VQs in the committed set. |
---|
| 745 | + * This function is called during the bring-up of late secondary CPUs only. |
---|
| 746 | + */ |
---|
669 | 747 | int sve_verify_vq_map(void) |
---|
670 | 748 | { |
---|
671 | | - int ret = 0; |
---|
| 749 | + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); |
---|
| 750 | + unsigned long b; |
---|
672 | 751 | |
---|
673 | | - sve_probe_vqs(sve_secondary_vq_map); |
---|
674 | | - bitmap_andnot(sve_secondary_vq_map, sve_vq_map, sve_secondary_vq_map, |
---|
675 | | - SVE_VQ_MAX); |
---|
676 | | - if (!bitmap_empty(sve_secondary_vq_map, SVE_VQ_MAX)) { |
---|
| 752 | + sve_probe_vqs(tmp_map); |
---|
| 753 | + |
---|
| 754 | + bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX); |
---|
| 755 | + if (bitmap_intersects(tmp_map, sve_vq_map, SVE_VQ_MAX)) { |
---|
677 | 756 | pr_warn("SVE: cpu%d: Required vector length(s) missing\n", |
---|
678 | 757 | smp_processor_id()); |
---|
679 | | - ret = -EINVAL; |
---|
| 758 | + return -EINVAL; |
---|
680 | 759 | } |
---|
681 | 760 | |
---|
682 | | - return ret; |
---|
| 761 | + if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available()) |
---|
| 762 | + return 0; |
---|
| 763 | + |
---|
| 764 | + /* |
---|
| 765 | + * For KVM, it is necessary to ensure that this CPU doesn't |
---|
| 766 | + * support any vector length that guests may have probed as |
---|
| 767 | + * unsupported. |
---|
| 768 | + */ |
---|
| 769 | + |
---|
| 770 | + /* Recover the set of supported VQs: */ |
---|
| 771 | + bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX); |
---|
| 772 | + /* Find VQs supported that are not globally supported: */ |
---|
| 773 | + bitmap_andnot(tmp_map, tmp_map, sve_vq_map, SVE_VQ_MAX); |
---|
| 774 | + |
---|
| 775 | + /* Find the lowest such VQ, if any: */ |
---|
| 776 | + b = find_last_bit(tmp_map, SVE_VQ_MAX); |
---|
| 777 | + if (b >= SVE_VQ_MAX) |
---|
| 778 | + return 0; /* no mismatches */ |
---|
| 779 | + |
---|
| 780 | + /* |
---|
| 781 | + * Mismatches above sve_max_virtualisable_vl are fine, since |
---|
| 782 | + * no guest is allowed to configure ZCR_EL2.LEN to exceed this: |
---|
| 783 | + */ |
---|
| 784 | + if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) { |
---|
| 785 | + pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n", |
---|
| 786 | + smp_processor_id()); |
---|
| 787 | + return -EINVAL; |
---|
| 788 | + } |
---|
| 789 | + |
---|
| 790 | + return 0; |
---|
683 | 791 | } |
---|
684 | 792 | |
---|
685 | 793 | static void __init sve_efi_setup(void) |
---|
.. | .. |
---|
746 | 854 | void __init sve_setup(void) |
---|
747 | 855 | { |
---|
748 | 856 | u64 zcr; |
---|
| 857 | + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); |
---|
| 858 | + unsigned long b; |
---|
749 | 859 | |
---|
750 | 860 | if (!system_supports_sve()) |
---|
751 | 861 | return; |
---|
.. | .. |
---|
755 | 865 | * so sve_vq_map must have at least SVE_VQ_MIN set. |
---|
756 | 866 | * If something went wrong, at least try to patch it up: |
---|
757 | 867 | */ |
---|
758 | | - if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map))) |
---|
759 | | - set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map); |
---|
| 868 | + if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map))) |
---|
| 869 | + set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map); |
---|
760 | 870 | |
---|
761 | 871 | zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1); |
---|
762 | 872 | sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1); |
---|
.. | .. |
---|
772 | 882 | * For the default VL, pick the maximum supported value <= 64. |
---|
773 | 883 | * VL == 64 is guaranteed not to grow the signal frame. |
---|
774 | 884 | */ |
---|
775 | | - sve_default_vl = find_supported_vector_length(64); |
---|
| 885 | + set_sve_default_vl(find_supported_vector_length(64)); |
---|
| 886 | + |
---|
| 887 | + bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map, |
---|
| 888 | + SVE_VQ_MAX); |
---|
| 889 | + |
---|
| 890 | + b = find_last_bit(tmp_map, SVE_VQ_MAX); |
---|
| 891 | + if (b >= SVE_VQ_MAX) |
---|
| 892 | + /* No non-virtualisable VLs found */ |
---|
| 893 | + sve_max_virtualisable_vl = SVE_VQ_MAX; |
---|
| 894 | + else if (WARN_ON(b == SVE_VQ_MAX - 1)) |
---|
| 895 | + /* No virtualisable VLs? This is architecturally forbidden. */ |
---|
| 896 | + sve_max_virtualisable_vl = SVE_VQ_MIN; |
---|
| 897 | + else /* b + 1 < SVE_VQ_MAX */ |
---|
| 898 | + sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1)); |
---|
| 899 | + |
---|
| 900 | + if (sve_max_virtualisable_vl > sve_max_vl) |
---|
| 901 | + sve_max_virtualisable_vl = sve_max_vl; |
---|
776 | 902 | |
---|
777 | 903 | pr_info("SVE: maximum available vector length %u bytes per vector\n", |
---|
778 | 904 | sve_max_vl); |
---|
779 | 905 | pr_info("SVE: default vector length %u bytes per vector\n", |
---|
780 | | - sve_default_vl); |
---|
| 906 | + get_sve_default_vl()); |
---|
| 907 | + |
---|
| 908 | + /* KVM decides whether to support mismatched systems. Just warn here: */ |
---|
| 909 | + if (sve_max_virtualisable_vl < sve_max_vl) |
---|
| 910 | + pr_warn("SVE: unvirtualisable vector lengths present\n"); |
---|
781 | 911 | |
---|
782 | 912 | sve_efi_setup(); |
---|
783 | 913 | } |
---|
.. | .. |
---|
788 | 918 | */ |
---|
789 | 919 | void fpsimd_release_task(struct task_struct *dead_task) |
---|
790 | 920 | { |
---|
791 | | - __sve_free(dead_task); |
---|
| 921 | + void *mem = NULL; |
---|
| 922 | + mem = sve_free_atomic(dead_task); |
---|
| 923 | + kfree(mem); |
---|
792 | 924 | } |
---|
793 | 925 | |
---|
794 | 926 | #endif /* CONFIG_ARM64_SVE */ |
---|
.. | .. |
---|
801 | 933 | * the SVE access trap will be disabled the next time this task |
---|
802 | 934 | * reaches ret_to_user. |
---|
803 | 935 | * |
---|
804 | | - * TIF_SVE should be clear on entry: otherwise, task_fpsimd_load() |
---|
| 936 | + * TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state() |
---|
805 | 937 | * would have disabled the SVE access trap for userspace during |
---|
806 | 938 | * ret_to_user, making an SVE access trap impossible in that case. |
---|
807 | 939 | */ |
---|
808 | | -asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) |
---|
| 940 | +void do_sve_acc(unsigned int esr, struct pt_regs *regs) |
---|
809 | 941 | { |
---|
810 | 942 | /* Even if we chose not to use SVE, the hardware could still trap: */ |
---|
811 | 943 | if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) { |
---|
812 | | - force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); |
---|
| 944 | + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); |
---|
813 | 945 | return; |
---|
814 | 946 | } |
---|
815 | 947 | |
---|
816 | 948 | sve_alloc(current); |
---|
817 | 949 | |
---|
818 | | - local_bh_disable(); |
---|
| 950 | + get_cpu_fpsimd_context(); |
---|
819 | 951 | |
---|
820 | 952 | fpsimd_save(); |
---|
821 | | - fpsimd_to_sve(current); |
---|
822 | 953 | |
---|
823 | 954 | /* Force ret_to_user to reload the registers: */ |
---|
824 | 955 | fpsimd_flush_task_state(current); |
---|
825 | | - set_thread_flag(TIF_FOREIGN_FPSTATE); |
---|
826 | 956 | |
---|
| 957 | + fpsimd_to_sve(current); |
---|
827 | 958 | if (test_and_set_thread_flag(TIF_SVE)) |
---|
828 | 959 | WARN_ON(1); /* SVE access shouldn't have trapped */ |
---|
829 | 960 | |
---|
830 | | - local_bh_enable(); |
---|
| 961 | + put_cpu_fpsimd_context(); |
---|
831 | 962 | } |
---|
832 | 963 | |
---|
833 | 964 | /* |
---|
834 | 965 | * Trapped FP/ASIMD access. |
---|
835 | 966 | */ |
---|
836 | | -asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) |
---|
| 967 | +void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) |
---|
837 | 968 | { |
---|
838 | 969 | /* TODO: implement lazy context saving/restoring */ |
---|
839 | 970 | WARN_ON(1); |
---|
.. | .. |
---|
842 | 973 | /* |
---|
843 | 974 | * Raise a SIGFPE for the current process. |
---|
844 | 975 | */ |
---|
845 | | -asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) |
---|
| 976 | +void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) |
---|
846 | 977 | { |
---|
847 | | - siginfo_t info; |
---|
848 | 978 | unsigned int si_code = FPE_FLTUNK; |
---|
849 | 979 | |
---|
850 | 980 | if (esr & ESR_ELx_FP_EXC_TFV) { |
---|
.. | .. |
---|
860 | 990 | si_code = FPE_FLTRES; |
---|
861 | 991 | } |
---|
862 | 992 | |
---|
863 | | - clear_siginfo(&info); |
---|
864 | | - info.si_signo = SIGFPE; |
---|
865 | | - info.si_code = si_code; |
---|
866 | | - info.si_addr = (void __user *)instruction_pointer(regs); |
---|
867 | | - |
---|
868 | | - send_sig_info(SIGFPE, &info, current); |
---|
| 993 | + send_sig_fault(SIGFPE, si_code, |
---|
| 994 | + (void __user *)instruction_pointer(regs), |
---|
| 995 | + current); |
---|
869 | 996 | } |
---|
870 | 997 | |
---|
871 | 998 | void fpsimd_thread_switch(struct task_struct *next) |
---|
.. | .. |
---|
874 | 1001 | |
---|
875 | 1002 | if (!system_supports_fpsimd()) |
---|
876 | 1003 | return; |
---|
| 1004 | + |
---|
| 1005 | + __get_cpu_fpsimd_context(); |
---|
877 | 1006 | |
---|
878 | 1007 | /* Save unsaved fpsimd state, if any: */ |
---|
879 | 1008 | fpsimd_save(); |
---|
.. | .. |
---|
889 | 1018 | |
---|
890 | 1019 | update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE, |
---|
891 | 1020 | wrong_task || wrong_cpu); |
---|
| 1021 | + |
---|
| 1022 | + __put_cpu_fpsimd_context(); |
---|
892 | 1023 | } |
---|
893 | 1024 | |
---|
894 | 1025 | void fpsimd_flush_thread(void) |
---|
895 | 1026 | { |
---|
896 | 1027 | int vl, supported_vl; |
---|
| 1028 | + void *mem = NULL; |
---|
897 | 1029 | |
---|
898 | 1030 | if (!system_supports_fpsimd()) |
---|
899 | 1031 | return; |
---|
900 | 1032 | |
---|
901 | | - local_bh_disable(); |
---|
| 1033 | + get_cpu_fpsimd_context(); |
---|
902 | 1034 | |
---|
| 1035 | + fpsimd_flush_task_state(current); |
---|
903 | 1036 | memset(¤t->thread.uw.fpsimd_state, 0, |
---|
904 | 1037 | sizeof(current->thread.uw.fpsimd_state)); |
---|
905 | | - fpsimd_flush_task_state(current); |
---|
906 | 1038 | |
---|
907 | 1039 | if (system_supports_sve()) { |
---|
908 | 1040 | clear_thread_flag(TIF_SVE); |
---|
909 | | - sve_free(current); |
---|
| 1041 | + mem = sve_free_atomic(current); |
---|
910 | 1042 | |
---|
911 | 1043 | /* |
---|
912 | 1044 | * Reset the task vector length as required. |
---|
.. | .. |
---|
914 | 1046 | * vector length configured: no kernel task can become a user |
---|
915 | 1047 | * task without an exec and hence a call to this function. |
---|
916 | 1048 | * By the time the first call to this function is made, all |
---|
917 | | - * early hardware probing is complete, so sve_default_vl |
---|
| 1049 | + * early hardware probing is complete, so __sve_default_vl |
---|
918 | 1050 | * should be valid. |
---|
919 | 1051 | * If a bug causes this to go wrong, we make some noise and |
---|
920 | 1052 | * try to fudge thread.sve_vl to a safe value here. |
---|
921 | 1053 | */ |
---|
922 | 1054 | vl = current->thread.sve_vl_onexec ? |
---|
923 | | - current->thread.sve_vl_onexec : sve_default_vl; |
---|
| 1055 | + current->thread.sve_vl_onexec : get_sve_default_vl(); |
---|
924 | 1056 | |
---|
925 | 1057 | if (WARN_ON(!sve_vl_valid(vl))) |
---|
926 | 1058 | vl = SVE_VL_MIN; |
---|
.. | .. |
---|
939 | 1071 | current->thread.sve_vl_onexec = 0; |
---|
940 | 1072 | } |
---|
941 | 1073 | |
---|
942 | | - set_thread_flag(TIF_FOREIGN_FPSTATE); |
---|
943 | | - |
---|
944 | | - local_bh_enable(); |
---|
| 1074 | + put_cpu_fpsimd_context(); |
---|
| 1075 | + kfree(mem); |
---|
945 | 1076 | } |
---|
946 | 1077 | |
---|
947 | 1078 | /* |
---|
.. | .. |
---|
953 | 1084 | if (!system_supports_fpsimd()) |
---|
954 | 1085 | return; |
---|
955 | 1086 | |
---|
956 | | - local_bh_disable(); |
---|
| 1087 | + get_cpu_fpsimd_context(); |
---|
957 | 1088 | fpsimd_save(); |
---|
958 | | - local_bh_enable(); |
---|
| 1089 | + put_cpu_fpsimd_context(); |
---|
959 | 1090 | } |
---|
960 | 1091 | |
---|
961 | 1092 | /* |
---|
.. | .. |
---|
972 | 1103 | |
---|
973 | 1104 | /* |
---|
974 | 1105 | * Associate current's FPSIMD context with this cpu |
---|
975 | | - * Preemption must be disabled when calling this function. |
---|
| 1106 | + * The caller must have ownership of the cpu FPSIMD context before calling |
---|
| 1107 | + * this function. |
---|
976 | 1108 | */ |
---|
977 | 1109 | void fpsimd_bind_task_to_cpu(void) |
---|
978 | 1110 | { |
---|
.. | .. |
---|
981 | 1113 | |
---|
982 | 1114 | WARN_ON(!system_supports_fpsimd()); |
---|
983 | 1115 | last->st = ¤t->thread.uw.fpsimd_state; |
---|
| 1116 | + last->sve_state = current->thread.sve_state; |
---|
| 1117 | + last->sve_vl = current->thread.sve_vl; |
---|
984 | 1118 | current->thread.fpsimd_cpu = smp_processor_id(); |
---|
985 | 1119 | |
---|
986 | 1120 | if (system_supports_sve()) { |
---|
.. | .. |
---|
994 | 1128 | } |
---|
995 | 1129 | } |
---|
996 | 1130 | |
---|
997 | | -void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st) |
---|
| 1131 | +void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, |
---|
| 1132 | + unsigned int sve_vl) |
---|
998 | 1133 | { |
---|
999 | 1134 | struct fpsimd_last_state_struct *last = |
---|
1000 | 1135 | this_cpu_ptr(&fpsimd_last_state); |
---|
.. | .. |
---|
1003 | 1138 | WARN_ON(!in_softirq() && !irqs_disabled()); |
---|
1004 | 1139 | |
---|
1005 | 1140 | last->st = st; |
---|
| 1141 | + last->sve_state = sve_state; |
---|
| 1142 | + last->sve_vl = sve_vl; |
---|
1006 | 1143 | } |
---|
1007 | 1144 | |
---|
1008 | 1145 | /* |
---|
.. | .. |
---|
1026 | 1163 | return; |
---|
1027 | 1164 | } |
---|
1028 | 1165 | |
---|
1029 | | - local_bh_disable(); |
---|
| 1166 | + get_cpu_fpsimd_context(); |
---|
1030 | 1167 | |
---|
1031 | 1168 | if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { |
---|
1032 | 1169 | task_fpsimd_load(); |
---|
1033 | 1170 | fpsimd_bind_task_to_cpu(); |
---|
1034 | 1171 | } |
---|
1035 | 1172 | |
---|
1036 | | - local_bh_enable(); |
---|
| 1173 | + put_cpu_fpsimd_context(); |
---|
1037 | 1174 | } |
---|
1038 | 1175 | |
---|
1039 | 1176 | /* |
---|
.. | .. |
---|
1046 | 1183 | if (WARN_ON(!system_supports_fpsimd())) |
---|
1047 | 1184 | return; |
---|
1048 | 1185 | |
---|
1049 | | - local_bh_disable(); |
---|
| 1186 | + get_cpu_fpsimd_context(); |
---|
1050 | 1187 | |
---|
1051 | 1188 | current->thread.uw.fpsimd_state = *state; |
---|
1052 | 1189 | if (system_supports_sve() && test_thread_flag(TIF_SVE)) |
---|
.. | .. |
---|
1057 | 1194 | |
---|
1058 | 1195 | clear_thread_flag(TIF_FOREIGN_FPSTATE); |
---|
1059 | 1196 | |
---|
1060 | | - local_bh_enable(); |
---|
| 1197 | + put_cpu_fpsimd_context(); |
---|
1061 | 1198 | } |
---|
1062 | 1199 | |
---|
1063 | 1200 | /* |
---|
1064 | 1201 | * Invalidate live CPU copies of task t's FPSIMD state |
---|
| 1202 | + * |
---|
| 1203 | + * This function may be called with preemption enabled. The barrier() |
---|
| 1204 | + * ensures that the assignment to fpsimd_cpu is visible to any |
---|
| 1205 | + * preemption/softirq that could race with set_tsk_thread_flag(), so |
---|
| 1206 | + * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared. |
---|
| 1207 | + * |
---|
| 1208 | + * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any |
---|
| 1209 | + * subsequent code. |
---|
1065 | 1210 | */ |
---|
1066 | 1211 | void fpsimd_flush_task_state(struct task_struct *t) |
---|
1067 | 1212 | { |
---|
1068 | 1213 | t->thread.fpsimd_cpu = NR_CPUS; |
---|
| 1214 | + /* |
---|
| 1215 | + * If we don't support fpsimd, bail out after we have |
---|
| 1216 | + * reset the fpsimd_cpu for this task and clear the |
---|
| 1217 | + * FPSTATE. |
---|
| 1218 | + */ |
---|
| 1219 | + if (!system_supports_fpsimd()) |
---|
| 1220 | + return; |
---|
| 1221 | + barrier(); |
---|
| 1222 | + set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE); |
---|
| 1223 | + |
---|
| 1224 | + barrier(); |
---|
1069 | 1225 | } |
---|
1070 | 1226 | |
---|
1071 | | -void fpsimd_flush_cpu_state(void) |
---|
| 1227 | +/* |
---|
| 1228 | + * Invalidate any task's FPSIMD state that is present on this cpu. |
---|
| 1229 | + * The FPSIMD context should be acquired with get_cpu_fpsimd_context() |
---|
| 1230 | + * before calling this function. |
---|
| 1231 | + */ |
---|
| 1232 | +static void fpsimd_flush_cpu_state(void) |
---|
1072 | 1233 | { |
---|
1073 | 1234 | WARN_ON(!system_supports_fpsimd()); |
---|
1074 | 1235 | __this_cpu_write(fpsimd_last_state.st, NULL); |
---|
1075 | 1236 | set_thread_flag(TIF_FOREIGN_FPSTATE); |
---|
1076 | 1237 | } |
---|
1077 | 1238 | |
---|
1078 | | -#ifdef CONFIG_KERNEL_MODE_NEON |
---|
| 1239 | +/* |
---|
| 1240 | + * Save the FPSIMD state to memory and invalidate cpu view. |
---|
| 1241 | + * This function must be called with preemption disabled. |
---|
| 1242 | + */ |
---|
| 1243 | +void fpsimd_save_and_flush_cpu_state(void) |
---|
| 1244 | +{ |
---|
| 1245 | + if (!system_supports_fpsimd()) |
---|
| 1246 | + return; |
---|
| 1247 | + WARN_ON(preemptible()); |
---|
| 1248 | + __get_cpu_fpsimd_context(); |
---|
| 1249 | + fpsimd_save(); |
---|
| 1250 | + fpsimd_flush_cpu_state(); |
---|
| 1251 | + __put_cpu_fpsimd_context(); |
---|
| 1252 | +} |
---|
1079 | 1253 | |
---|
1080 | | -DEFINE_PER_CPU(bool, kernel_neon_busy); |
---|
1081 | | -EXPORT_PER_CPU_SYMBOL(kernel_neon_busy); |
---|
| 1254 | +#ifdef CONFIG_KERNEL_MODE_NEON |
---|
1082 | 1255 | |
---|
1083 | 1256 | /* |
---|
1084 | 1257 | * Kernel-side NEON support functions |
---|
.. | .. |
---|
1104 | 1277 | |
---|
1105 | 1278 | BUG_ON(!may_use_simd()); |
---|
1106 | 1279 | |
---|
1107 | | - local_bh_disable(); |
---|
1108 | | - |
---|
1109 | | - __this_cpu_write(kernel_neon_busy, true); |
---|
| 1280 | + get_cpu_fpsimd_context(); |
---|
1110 | 1281 | |
---|
1111 | 1282 | /* Save unsaved fpsimd state, if any: */ |
---|
1112 | 1283 | fpsimd_save(); |
---|
1113 | 1284 | |
---|
1114 | 1285 | /* Invalidate any task state remaining in the fpsimd regs: */ |
---|
1115 | 1286 | fpsimd_flush_cpu_state(); |
---|
1116 | | - |
---|
1117 | | - preempt_disable(); |
---|
1118 | | - |
---|
1119 | | - local_bh_enable(); |
---|
1120 | 1287 | } |
---|
1121 | 1288 | EXPORT_SYMBOL(kernel_neon_begin); |
---|
1122 | 1289 | |
---|
.. | .. |
---|
1131 | 1298 | */ |
---|
1132 | 1299 | void kernel_neon_end(void) |
---|
1133 | 1300 | { |
---|
1134 | | - bool busy; |
---|
1135 | | - |
---|
1136 | 1301 | if (!system_supports_fpsimd()) |
---|
1137 | 1302 | return; |
---|
1138 | 1303 | |
---|
1139 | | - busy = __this_cpu_xchg(kernel_neon_busy, false); |
---|
1140 | | - WARN_ON(!busy); /* No matching kernel_neon_begin()? */ |
---|
1141 | | - |
---|
1142 | | - preempt_enable(); |
---|
| 1304 | + put_cpu_fpsimd_context(); |
---|
1143 | 1305 | } |
---|
1144 | 1306 | EXPORT_SYMBOL(kernel_neon_end); |
---|
1145 | 1307 | |
---|
.. | .. |
---|
1231 | 1393 | { |
---|
1232 | 1394 | switch (cmd) { |
---|
1233 | 1395 | case CPU_PM_ENTER: |
---|
1234 | | - fpsimd_save(); |
---|
1235 | | - fpsimd_flush_cpu_state(); |
---|
| 1396 | + fpsimd_save_and_flush_cpu_state(); |
---|
1236 | 1397 | break; |
---|
1237 | 1398 | case CPU_PM_EXIT: |
---|
1238 | 1399 | break; |
---|
.. | .. |
---|
1278 | 1439 | */ |
---|
1279 | 1440 | static int __init fpsimd_init(void) |
---|
1280 | 1441 | { |
---|
1281 | | - if (elf_hwcap & HWCAP_FP) { |
---|
| 1442 | + if (cpu_have_named_feature(FP)) { |
---|
1282 | 1443 | fpsimd_pm_init(); |
---|
1283 | 1444 | fpsimd_hotplug_init(); |
---|
1284 | 1445 | } else { |
---|
1285 | 1446 | pr_notice("Floating-point is not implemented\n"); |
---|
1286 | 1447 | } |
---|
1287 | 1448 | |
---|
1288 | | - if (!(elf_hwcap & HWCAP_ASIMD)) |
---|
| 1449 | + if (!cpu_have_named_feature(ASIMD)) |
---|
1289 | 1450 | pr_notice("Advanced SIMD is not implemented\n"); |
---|
1290 | 1451 | |
---|
1291 | 1452 | return sve_sysctl_init(); |
---|