.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * FP/SIMD context switching and fault handling |
---|
3 | 4 | * |
---|
4 | 5 | * Copyright (C) 2012 ARM Ltd. |
---|
5 | 6 | * Author: Catalin Marinas <catalin.marinas@arm.com> |
---|
6 | | - * |
---|
7 | | - * This program is free software; you can redistribute it and/or modify |
---|
8 | | - * it under the terms of the GNU General Public License version 2 as |
---|
9 | | - * published by the Free Software Foundation. |
---|
10 | | - * |
---|
11 | | - * This program is distributed in the hope that it will be useful, |
---|
12 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
13 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
14 | | - * GNU General Public License for more details. |
---|
15 | | - * |
---|
16 | | - * You should have received a copy of the GNU General Public License |
---|
17 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
---|
18 | 7 | */ |
---|
19 | 8 | |
---|
20 | 9 | #include <linux/bitmap.h> |
---|
| 10 | +#include <linux/bitops.h> |
---|
21 | 11 | #include <linux/bottom_half.h> |
---|
22 | 12 | #include <linux/bug.h> |
---|
23 | 13 | #include <linux/cache.h> |
---|
24 | 14 | #include <linux/compat.h> |
---|
| 15 | +#include <linux/compiler.h> |
---|
25 | 16 | #include <linux/cpu.h> |
---|
26 | 17 | #include <linux/cpu_pm.h> |
---|
27 | 18 | #include <linux/kernel.h> |
---|
.. | .. |
---|
38 | 29 | #include <linux/slab.h> |
---|
39 | 30 | #include <linux/stddef.h> |
---|
40 | 31 | #include <linux/sysctl.h> |
---|
| 32 | +#include <linux/swab.h> |
---|
41 | 33 | |
---|
42 | 34 | #include <asm/esr.h> |
---|
| 35 | +#include <asm/exception.h> |
---|
43 | 36 | #include <asm/fpsimd.h> |
---|
44 | 37 | #include <asm/cpufeature.h> |
---|
45 | 38 | #include <asm/cputype.h> |
---|
| 39 | +#include <asm/neon.h> |
---|
46 | 40 | #include <asm/processor.h> |
---|
47 | 41 | #include <asm/simd.h> |
---|
48 | 42 | #include <asm/sigcontext.h> |
---|
49 | 43 | #include <asm/sysreg.h> |
---|
50 | 44 | #include <asm/traps.h> |
---|
| 45 | +#include <asm/virt.h> |
---|
51 | 46 | |
---|
52 | 47 | #define FPEXC_IOF (1 << 0) |
---|
53 | 48 | #define FPEXC_DZF (1 << 1) |
---|
.. | .. |
---|
90 | 85 | * To prevent this from racing with the manipulation of the task's FPSIMD state |
---|
91 | 86 | * from task context and thereby corrupting the state, it is necessary to |
---|
92 | 87 | * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE |
---|
93 | | - * flag with local_bh_disable() unless softirqs are already masked. |
---|
| 88 | + * flag with {, __}get_cpu_fpsimd_context(). This will still allow softirqs to |
---|
| 89 | + * run but prevent them to use FPSIMD. |
---|
94 | 90 | * |
---|
95 | 91 | * For a certain task, the sequence may look something like this: |
---|
96 | 92 | * - the task gets scheduled in; if both the task's fpsimd_cpu field |
---|
.. | .. |
---|
119 | 115 | */ |
---|
120 | 116 | struct fpsimd_last_state_struct { |
---|
121 | 117 | struct user_fpsimd_state *st; |
---|
| 118 | + void *sve_state; |
---|
| 119 | + unsigned int sve_vl; |
---|
122 | 120 | }; |
---|
123 | 121 | |
---|
124 | 122 | static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state); |
---|
125 | 123 | |
---|
126 | 124 | /* Default VL for tasks that don't set it explicitly: */ |
---|
127 | | -static int sve_default_vl = -1; |
---|
| 125 | +static int __sve_default_vl = -1; |
---|
| 126 | + |
---|
| 127 | +static int get_sve_default_vl(void) |
---|
| 128 | +{ |
---|
| 129 | + return READ_ONCE(__sve_default_vl); |
---|
| 130 | +} |
---|
128 | 131 | |
---|
129 | 132 | #ifdef CONFIG_ARM64_SVE |
---|
130 | 133 | |
---|
| 134 | +static void set_sve_default_vl(int val) |
---|
| 135 | +{ |
---|
| 136 | + WRITE_ONCE(__sve_default_vl, val); |
---|
| 137 | +} |
---|
| 138 | + |
---|
131 | 139 | /* Maximum supported vector length across all CPUs (initially poisoned) */ |
---|
132 | 140 | int __ro_after_init sve_max_vl = SVE_VL_MIN; |
---|
133 | | -/* Set of available vector lengths, as vq_to_bit(vq): */ |
---|
134 | | -static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); |
---|
| 141 | +int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN; |
---|
| 142 | + |
---|
| 143 | +/* |
---|
| 144 | + * Set of available vector lengths, |
---|
| 145 | + * where length vq encoded as bit __vq_to_bit(vq): |
---|
| 146 | + */ |
---|
| 147 | +__ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); |
---|
| 148 | +/* Set of vector lengths present on at least one cpu: */ |
---|
| 149 | +static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX); |
---|
| 150 | + |
---|
135 | 151 | static void __percpu *efi_sve_state; |
---|
136 | 152 | |
---|
137 | 153 | #else /* ! CONFIG_ARM64_SVE */ |
---|
138 | 154 | |
---|
139 | 155 | /* Dummy declaration for code that will be optimised out: */ |
---|
140 | 156 | extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); |
---|
| 157 | +extern __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX); |
---|
141 | 158 | extern void __percpu *efi_sve_state; |
---|
142 | 159 | |
---|
143 | 160 | #endif /* ! CONFIG_ARM64_SVE */ |
---|
| 161 | + |
---|
| 162 | +DEFINE_PER_CPU(bool, fpsimd_context_busy); |
---|
| 163 | +EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy); |
---|
| 164 | + |
---|
| 165 | +static void __get_cpu_fpsimd_context(void) |
---|
| 166 | +{ |
---|
| 167 | + bool busy = __this_cpu_xchg(fpsimd_context_busy, true); |
---|
| 168 | + |
---|
| 169 | + WARN_ON(busy); |
---|
| 170 | +} |
---|
| 171 | + |
---|
| 172 | +/* |
---|
| 173 | + * Claim ownership of the CPU FPSIMD context for use by the calling context. |
---|
| 174 | + * |
---|
| 175 | + * The caller may freely manipulate the FPSIMD context metadata until |
---|
| 176 | + * put_cpu_fpsimd_context() is called. |
---|
| 177 | + * |
---|
| 178 | + * The double-underscore version must only be called if you know the task |
---|
| 179 | + * can't be preempted. |
---|
| 180 | + */ |
---|
| 181 | +static void get_cpu_fpsimd_context(void) |
---|
| 182 | +{ |
---|
| 183 | + local_bh_disable(); |
---|
| 184 | + __get_cpu_fpsimd_context(); |
---|
| 185 | +} |
---|
| 186 | + |
---|
| 187 | +static void __put_cpu_fpsimd_context(void) |
---|
| 188 | +{ |
---|
| 189 | + bool busy = __this_cpu_xchg(fpsimd_context_busy, false); |
---|
| 190 | + |
---|
| 191 | + WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */ |
---|
| 192 | +} |
---|
| 193 | + |
---|
| 194 | +/* |
---|
| 195 | + * Release the CPU FPSIMD context. |
---|
| 196 | + * |
---|
| 197 | + * Must be called from a context in which get_cpu_fpsimd_context() was |
---|
| 198 | + * previously called, with no call to put_cpu_fpsimd_context() in the |
---|
| 199 | + * meantime. |
---|
| 200 | + */ |
---|
| 201 | +static void put_cpu_fpsimd_context(void) |
---|
| 202 | +{ |
---|
| 203 | + __put_cpu_fpsimd_context(); |
---|
| 204 | + local_bh_enable(); |
---|
| 205 | +} |
---|
| 206 | + |
---|
| 207 | +static bool have_cpu_fpsimd_context(void) |
---|
| 208 | +{ |
---|
| 209 | + return !preemptible() && __this_cpu_read(fpsimd_context_busy); |
---|
| 210 | +} |
---|
144 | 211 | |
---|
145 | 212 | /* |
---|
146 | 213 | * Call __sve_free() directly only if you know task can't be scheduled |
---|
.. | .. |
---|
212 | 279 | * This function should be called only when the FPSIMD/SVE state in |
---|
213 | 280 | * thread_struct is known to be up to date, when preparing to enter |
---|
214 | 281 | * userspace. |
---|
215 | | - * |
---|
216 | | - * Softirqs (and preemption) must be disabled. |
---|
217 | 282 | */ |
---|
218 | 283 | static void task_fpsimd_load(void) |
---|
219 | 284 | { |
---|
220 | | - WARN_ON(!in_softirq() && !irqs_disabled()); |
---|
221 | 285 | WARN_ON(!system_supports_fpsimd()); |
---|
| 286 | + WARN_ON(!have_cpu_fpsimd_context()); |
---|
222 | 287 | |
---|
223 | 288 | if (system_supports_sve() && test_thread_flag(TIF_SVE)) |
---|
224 | 289 | sve_load_state(sve_pffr(¤t->thread), |
---|
.. | .. |
---|
231 | 296 | /* |
---|
232 | 297 | * Ensure FPSIMD/SVE storage in memory for the loaded context is up to |
---|
233 | 298 | * date with respect to the CPU registers. |
---|
234 | | - * |
---|
235 | | - * Softirqs (and preemption) must be disabled. |
---|
236 | 299 | */ |
---|
237 | | -void fpsimd_save(void) |
---|
| 300 | +static void fpsimd_save(void) |
---|
238 | 301 | { |
---|
239 | | - struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st); |
---|
| 302 | + struct fpsimd_last_state_struct const *last = |
---|
| 303 | + this_cpu_ptr(&fpsimd_last_state); |
---|
240 | 304 | /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */ |
---|
241 | 305 | |
---|
242 | 306 | WARN_ON(!system_supports_fpsimd()); |
---|
243 | | - WARN_ON(!in_softirq() && !irqs_disabled()); |
---|
| 307 | + WARN_ON(!have_cpu_fpsimd_context()); |
---|
244 | 308 | |
---|
245 | 309 | if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { |
---|
246 | 310 | if (system_supports_sve() && test_thread_flag(TIF_SVE)) { |
---|
247 | | - if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) { |
---|
| 311 | + if (WARN_ON(sve_get_vl() != last->sve_vl)) { |
---|
248 | 312 | /* |
---|
249 | 313 | * Can't save the user regs, so current would |
---|
250 | 314 | * re-enter user with corrupt state. |
---|
251 | 315 | * There's no way to recover, so kill it: |
---|
252 | 316 | */ |
---|
253 | | - force_signal_inject(SIGKILL, SI_KERNEL, 0); |
---|
| 317 | + force_signal_inject(SIGKILL, SI_KERNEL, 0, 0); |
---|
254 | 318 | return; |
---|
255 | 319 | } |
---|
256 | 320 | |
---|
257 | | - sve_save_state(sve_pffr(¤t->thread), &st->fpsr); |
---|
| 321 | + sve_save_state((char *)last->sve_state + |
---|
| 322 | + sve_ffr_offset(last->sve_vl), |
---|
| 323 | + &last->st->fpsr); |
---|
258 | 324 | } else |
---|
259 | | - fpsimd_save_state(st); |
---|
| 325 | + fpsimd_save_state(last->st); |
---|
260 | 326 | } |
---|
261 | | -} |
---|
262 | | - |
---|
263 | | -/* |
---|
264 | | - * Helpers to translate bit indices in sve_vq_map to VQ values (and |
---|
265 | | - * vice versa). This allows find_next_bit() to be used to find the |
---|
266 | | - * _maximum_ VQ not exceeding a certain value. |
---|
267 | | - */ |
---|
268 | | - |
---|
269 | | -static unsigned int vq_to_bit(unsigned int vq) |
---|
270 | | -{ |
---|
271 | | - return SVE_VQ_MAX - vq; |
---|
272 | | -} |
---|
273 | | - |
---|
274 | | -static unsigned int bit_to_vq(unsigned int bit) |
---|
275 | | -{ |
---|
276 | | - if (WARN_ON(bit >= SVE_VQ_MAX)) |
---|
277 | | - bit = SVE_VQ_MAX - 1; |
---|
278 | | - |
---|
279 | | - return SVE_VQ_MAX - bit; |
---|
280 | 327 | } |
---|
281 | 328 | |
---|
282 | 329 | /* |
---|
.. | .. |
---|
300 | 347 | vl = max_vl; |
---|
301 | 348 | |
---|
302 | 349 | bit = find_next_bit(sve_vq_map, SVE_VQ_MAX, |
---|
303 | | - vq_to_bit(sve_vq_from_vl(vl))); |
---|
304 | | - return sve_vl_from_vq(bit_to_vq(bit)); |
---|
| 350 | + __vq_to_bit(sve_vq_from_vl(vl))); |
---|
| 351 | + return sve_vl_from_vq(__bit_to_vq(bit)); |
---|
305 | 352 | } |
---|
306 | 353 | |
---|
307 | 354 | #if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL) |
---|
308 | 355 | |
---|
309 | 356 | static int sve_proc_do_default_vl(struct ctl_table *table, int write, |
---|
310 | | - void __user *buffer, size_t *lenp, |
---|
311 | | - loff_t *ppos) |
---|
| 357 | + void *buffer, size_t *lenp, loff_t *ppos) |
---|
312 | 358 | { |
---|
313 | 359 | int ret; |
---|
314 | | - int vl = sve_default_vl; |
---|
| 360 | + int vl = get_sve_default_vl(); |
---|
315 | 361 | struct ctl_table tmp_table = { |
---|
316 | 362 | .data = &vl, |
---|
317 | 363 | .maxlen = sizeof(vl), |
---|
.. | .. |
---|
328 | 374 | if (!sve_vl_valid(vl)) |
---|
329 | 375 | return -EINVAL; |
---|
330 | 376 | |
---|
331 | | - sve_default_vl = find_supported_vector_length(vl); |
---|
| 377 | + set_sve_default_vl(find_supported_vector_length(vl)); |
---|
332 | 378 | return 0; |
---|
333 | 379 | } |
---|
334 | 380 | |
---|
.. | .. |
---|
357 | 403 | #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \ |
---|
358 | 404 | (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET)) |
---|
359 | 405 | |
---|
| 406 | +#ifdef CONFIG_CPU_BIG_ENDIAN |
---|
| 407 | +static __uint128_t arm64_cpu_to_le128(__uint128_t x) |
---|
| 408 | +{ |
---|
| 409 | + u64 a = swab64(x); |
---|
| 410 | + u64 b = swab64(x >> 64); |
---|
| 411 | + |
---|
| 412 | + return ((__uint128_t)a << 64) | b; |
---|
| 413 | +} |
---|
| 414 | +#else |
---|
| 415 | +static __uint128_t arm64_cpu_to_le128(__uint128_t x) |
---|
| 416 | +{ |
---|
| 417 | + return x; |
---|
| 418 | +} |
---|
| 419 | +#endif |
---|
| 420 | + |
---|
| 421 | +#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x) |
---|
| 422 | + |
---|
| 423 | +static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst, |
---|
| 424 | + unsigned int vq) |
---|
| 425 | +{ |
---|
| 426 | + unsigned int i; |
---|
| 427 | + __uint128_t *p; |
---|
| 428 | + |
---|
| 429 | + for (i = 0; i < SVE_NUM_ZREGS; ++i) { |
---|
| 430 | + p = (__uint128_t *)ZREG(sst, vq, i); |
---|
| 431 | + *p = arm64_cpu_to_le128(fst->vregs[i]); |
---|
| 432 | + } |
---|
| 433 | +} |
---|
| 434 | + |
---|
360 | 435 | /* |
---|
361 | 436 | * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to |
---|
362 | 437 | * task->thread.sve_state. |
---|
363 | 438 | * |
---|
364 | 439 | * Task can be a non-runnable task, or current. In the latter case, |
---|
365 | | - * softirqs (and preemption) must be disabled. |
---|
| 440 | + * the caller must have ownership of the cpu FPSIMD context before calling |
---|
| 441 | + * this function. |
---|
366 | 442 | * task->thread.sve_state must point to at least sve_state_size(task) |
---|
367 | 443 | * bytes of allocated kernel memory. |
---|
368 | 444 | * task->thread.uw.fpsimd_state must be up to date before calling this |
---|
.. | .. |
---|
373 | 449 | unsigned int vq; |
---|
374 | 450 | void *sst = task->thread.sve_state; |
---|
375 | 451 | struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; |
---|
376 | | - unsigned int i; |
---|
377 | 452 | |
---|
378 | 453 | if (!system_supports_sve()) |
---|
379 | 454 | return; |
---|
380 | 455 | |
---|
381 | 456 | vq = sve_vq_from_vl(task->thread.sve_vl); |
---|
382 | | - for (i = 0; i < 32; ++i) |
---|
383 | | - memcpy(ZREG(sst, vq, i), &fst->vregs[i], |
---|
384 | | - sizeof(fst->vregs[i])); |
---|
| 457 | + __fpsimd_to_sve(sst, fst, vq); |
---|
385 | 458 | } |
---|
386 | 459 | |
---|
387 | 460 | /* |
---|
.. | .. |
---|
389 | 462 | * task->thread.uw.fpsimd_state. |
---|
390 | 463 | * |
---|
391 | 464 | * Task can be a non-runnable task, or current. In the latter case, |
---|
392 | | - * softirqs (and preemption) must be disabled. |
---|
| 465 | + * the caller must have ownership of the cpu FPSIMD context before calling |
---|
| 466 | + * this function. |
---|
393 | 467 | * task->thread.sve_state must point to at least sve_state_size(task) |
---|
394 | 468 | * bytes of allocated kernel memory. |
---|
395 | 469 | * task->thread.sve_state must be up to date before calling this function. |
---|
.. | .. |
---|
400 | 474 | void const *sst = task->thread.sve_state; |
---|
401 | 475 | struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state; |
---|
402 | 476 | unsigned int i; |
---|
| 477 | + __uint128_t const *p; |
---|
403 | 478 | |
---|
404 | 479 | if (!system_supports_sve()) |
---|
405 | 480 | return; |
---|
406 | 481 | |
---|
407 | 482 | vq = sve_vq_from_vl(task->thread.sve_vl); |
---|
408 | | - for (i = 0; i < 32; ++i) |
---|
409 | | - memcpy(&fst->vregs[i], ZREG(sst, vq, i), |
---|
410 | | - sizeof(fst->vregs[i])); |
---|
| 483 | + for (i = 0; i < SVE_NUM_ZREGS; ++i) { |
---|
| 484 | + p = (__uint128_t const *)ZREG(sst, vq, i); |
---|
| 485 | + fst->vregs[i] = arm64_le128_to_cpu(*p); |
---|
| 486 | + } |
---|
411 | 487 | } |
---|
412 | 488 | |
---|
413 | 489 | #ifdef CONFIG_ARM64_SVE |
---|
.. | .. |
---|
495 | 571 | unsigned int vq; |
---|
496 | 572 | void *sst = task->thread.sve_state; |
---|
497 | 573 | struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; |
---|
498 | | - unsigned int i; |
---|
499 | 574 | |
---|
500 | 575 | if (!test_tsk_thread_flag(task, TIF_SVE)) |
---|
501 | 576 | return; |
---|
.. | .. |
---|
503 | 578 | vq = sve_vq_from_vl(task->thread.sve_vl); |
---|
504 | 579 | |
---|
505 | 580 | memset(sst, 0, SVE_SIG_REGS_SIZE(vq)); |
---|
506 | | - |
---|
507 | | - for (i = 0; i < 32; ++i) |
---|
508 | | - memcpy(ZREG(sst, vq, i), &fst->vregs[i], |
---|
509 | | - sizeof(fst->vregs[i])); |
---|
| 581 | + __fpsimd_to_sve(sst, fst, vq); |
---|
510 | 582 | } |
---|
511 | 583 | |
---|
512 | 584 | int sve_set_vector_length(struct task_struct *task, |
---|
.. | .. |
---|
549 | 621 | * non-SVE thread. |
---|
550 | 622 | */ |
---|
551 | 623 | if (task == current) { |
---|
552 | | - local_bh_disable(); |
---|
| 624 | + get_cpu_fpsimd_context(); |
---|
553 | 625 | |
---|
554 | 626 | fpsimd_save(); |
---|
555 | | - set_thread_flag(TIF_FOREIGN_FPSTATE); |
---|
556 | 627 | } |
---|
557 | 628 | |
---|
558 | 629 | fpsimd_flush_task_state(task); |
---|
.. | .. |
---|
560 | 631 | sve_to_fpsimd(task); |
---|
561 | 632 | |
---|
562 | 633 | if (task == current) |
---|
563 | | - local_bh_enable(); |
---|
| 634 | + put_cpu_fpsimd_context(); |
---|
564 | 635 | |
---|
565 | 636 | /* |
---|
566 | 637 | * Force reallocation of task SVE state to the correct size |
---|
.. | .. |
---|
607 | 678 | vl = arg & PR_SVE_VL_LEN_MASK; |
---|
608 | 679 | flags = arg & ~vl; |
---|
609 | 680 | |
---|
610 | | - if (!system_supports_sve()) |
---|
| 681 | + if (!system_supports_sve() || is_compat_task()) |
---|
611 | 682 | return -EINVAL; |
---|
612 | 683 | |
---|
613 | 684 | ret = sve_set_vector_length(current, vl, flags); |
---|
.. | .. |
---|
620 | 691 | /* PR_SVE_GET_VL */ |
---|
621 | 692 | int sve_get_current_vl(void) |
---|
622 | 693 | { |
---|
623 | | - if (!system_supports_sve()) |
---|
| 694 | + if (!system_supports_sve() || is_compat_task()) |
---|
624 | 695 | return -EINVAL; |
---|
625 | 696 | |
---|
626 | 697 | return sve_prctl_status(0); |
---|
627 | 698 | } |
---|
628 | | - |
---|
629 | | -/* |
---|
630 | | - * Bitmap for temporary storage of the per-CPU set of supported vector lengths |
---|
631 | | - * during secondary boot. |
---|
632 | | - */ |
---|
633 | | -static DECLARE_BITMAP(sve_secondary_vq_map, SVE_VQ_MAX); |
---|
634 | 699 | |
---|
635 | 700 | static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX)) |
---|
636 | 701 | { |
---|
.. | .. |
---|
646 | 711 | write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */ |
---|
647 | 712 | vl = sve_get_vl(); |
---|
648 | 713 | vq = sve_vq_from_vl(vl); /* skip intervening lengths */ |
---|
649 | | - set_bit(vq_to_bit(vq), map); |
---|
| 714 | + set_bit(__vq_to_bit(vq), map); |
---|
650 | 715 | } |
---|
651 | 716 | } |
---|
652 | 717 | |
---|
| 718 | +/* |
---|
| 719 | + * Initialise the set of known supported VQs for the boot CPU. |
---|
| 720 | + * This is called during kernel boot, before secondary CPUs are brought up. |
---|
| 721 | + */ |
---|
653 | 722 | void __init sve_init_vq_map(void) |
---|
654 | 723 | { |
---|
655 | 724 | sve_probe_vqs(sve_vq_map); |
---|
| 725 | + bitmap_copy(sve_vq_partial_map, sve_vq_map, SVE_VQ_MAX); |
---|
656 | 726 | } |
---|
657 | 727 | |
---|
658 | 728 | /* |
---|
659 | 729 | * If we haven't committed to the set of supported VQs yet, filter out |
---|
660 | 730 | * those not supported by the current CPU. |
---|
| 731 | + * This function is called during the bring-up of early secondary CPUs only. |
---|
661 | 732 | */ |
---|
662 | 733 | void sve_update_vq_map(void) |
---|
663 | 734 | { |
---|
664 | | - sve_probe_vqs(sve_secondary_vq_map); |
---|
665 | | - bitmap_and(sve_vq_map, sve_vq_map, sve_secondary_vq_map, SVE_VQ_MAX); |
---|
| 735 | + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); |
---|
| 736 | + |
---|
| 737 | + sve_probe_vqs(tmp_map); |
---|
| 738 | + bitmap_and(sve_vq_map, sve_vq_map, tmp_map, SVE_VQ_MAX); |
---|
| 739 | + bitmap_or(sve_vq_partial_map, sve_vq_partial_map, tmp_map, SVE_VQ_MAX); |
---|
666 | 740 | } |
---|
667 | 741 | |
---|
668 | | -/* Check whether the current CPU supports all VQs in the committed set */ |
---|
| 742 | +/* |
---|
| 743 | + * Check whether the current CPU supports all VQs in the committed set. |
---|
| 744 | + * This function is called during the bring-up of late secondary CPUs only. |
---|
| 745 | + */ |
---|
669 | 746 | int sve_verify_vq_map(void) |
---|
670 | 747 | { |
---|
671 | | - int ret = 0; |
---|
| 748 | + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); |
---|
| 749 | + unsigned long b; |
---|
672 | 750 | |
---|
673 | | - sve_probe_vqs(sve_secondary_vq_map); |
---|
674 | | - bitmap_andnot(sve_secondary_vq_map, sve_vq_map, sve_secondary_vq_map, |
---|
675 | | - SVE_VQ_MAX); |
---|
676 | | - if (!bitmap_empty(sve_secondary_vq_map, SVE_VQ_MAX)) { |
---|
| 751 | + sve_probe_vqs(tmp_map); |
---|
| 752 | + |
---|
| 753 | + bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX); |
---|
| 754 | + if (bitmap_intersects(tmp_map, sve_vq_map, SVE_VQ_MAX)) { |
---|
677 | 755 | pr_warn("SVE: cpu%d: Required vector length(s) missing\n", |
---|
678 | 756 | smp_processor_id()); |
---|
679 | | - ret = -EINVAL; |
---|
| 757 | + return -EINVAL; |
---|
680 | 758 | } |
---|
681 | 759 | |
---|
682 | | - return ret; |
---|
| 760 | + if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available()) |
---|
| 761 | + return 0; |
---|
| 762 | + |
---|
| 763 | + /* |
---|
| 764 | + * For KVM, it is necessary to ensure that this CPU doesn't |
---|
| 765 | + * support any vector length that guests may have probed as |
---|
| 766 | + * unsupported. |
---|
| 767 | + */ |
---|
| 768 | + |
---|
| 769 | + /* Recover the set of supported VQs: */ |
---|
| 770 | + bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX); |
---|
| 771 | + /* Find VQs supported that are not globally supported: */ |
---|
| 772 | + bitmap_andnot(tmp_map, tmp_map, sve_vq_map, SVE_VQ_MAX); |
---|
| 773 | + |
---|
| 774 | + /* Find the lowest such VQ, if any: */ |
---|
| 775 | + b = find_last_bit(tmp_map, SVE_VQ_MAX); |
---|
| 776 | + if (b >= SVE_VQ_MAX) |
---|
| 777 | + return 0; /* no mismatches */ |
---|
| 778 | + |
---|
| 779 | + /* |
---|
| 780 | + * Mismatches above sve_max_virtualisable_vl are fine, since |
---|
| 781 | + * no guest is allowed to configure ZCR_EL2.LEN to exceed this: |
---|
| 782 | + */ |
---|
| 783 | + if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) { |
---|
| 784 | + pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n", |
---|
| 785 | + smp_processor_id()); |
---|
| 786 | + return -EINVAL; |
---|
| 787 | + } |
---|
| 788 | + |
---|
| 789 | + return 0; |
---|
683 | 790 | } |
---|
684 | 791 | |
---|
685 | 792 | static void __init sve_efi_setup(void) |
---|
.. | .. |
---|
746 | 853 | void __init sve_setup(void) |
---|
747 | 854 | { |
---|
748 | 855 | u64 zcr; |
---|
| 856 | + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); |
---|
| 857 | + unsigned long b; |
---|
749 | 858 | |
---|
750 | 859 | if (!system_supports_sve()) |
---|
751 | 860 | return; |
---|
.. | .. |
---|
755 | 864 | * so sve_vq_map must have at least SVE_VQ_MIN set. |
---|
756 | 865 | * If something went wrong, at least try to patch it up: |
---|
757 | 866 | */ |
---|
758 | | - if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map))) |
---|
759 | | - set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map); |
---|
| 867 | + if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map))) |
---|
| 868 | + set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map); |
---|
760 | 869 | |
---|
761 | 870 | zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1); |
---|
762 | 871 | sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1); |
---|
.. | .. |
---|
772 | 881 | * For the default VL, pick the maximum supported value <= 64. |
---|
773 | 882 | * VL == 64 is guaranteed not to grow the signal frame. |
---|
774 | 883 | */ |
---|
775 | | - sve_default_vl = find_supported_vector_length(64); |
---|
| 884 | + set_sve_default_vl(find_supported_vector_length(64)); |
---|
| 885 | + |
---|
| 886 | + bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map, |
---|
| 887 | + SVE_VQ_MAX); |
---|
| 888 | + |
---|
| 889 | + b = find_last_bit(tmp_map, SVE_VQ_MAX); |
---|
| 890 | + if (b >= SVE_VQ_MAX) |
---|
| 891 | + /* No non-virtualisable VLs found */ |
---|
| 892 | + sve_max_virtualisable_vl = SVE_VQ_MAX; |
---|
| 893 | + else if (WARN_ON(b == SVE_VQ_MAX - 1)) |
---|
| 894 | + /* No virtualisable VLs? This is architecturally forbidden. */ |
---|
| 895 | + sve_max_virtualisable_vl = SVE_VQ_MIN; |
---|
| 896 | + else /* b + 1 < SVE_VQ_MAX */ |
---|
| 897 | + sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1)); |
---|
| 898 | + |
---|
| 899 | + if (sve_max_virtualisable_vl > sve_max_vl) |
---|
| 900 | + sve_max_virtualisable_vl = sve_max_vl; |
---|
776 | 901 | |
---|
777 | 902 | pr_info("SVE: maximum available vector length %u bytes per vector\n", |
---|
778 | 903 | sve_max_vl); |
---|
779 | 904 | pr_info("SVE: default vector length %u bytes per vector\n", |
---|
780 | | - sve_default_vl); |
---|
| 905 | + get_sve_default_vl()); |
---|
| 906 | + |
---|
| 907 | + /* KVM decides whether to support mismatched systems. Just warn here: */ |
---|
| 908 | + if (sve_max_virtualisable_vl < sve_max_vl) |
---|
| 909 | + pr_warn("SVE: unvirtualisable vector lengths present\n"); |
---|
781 | 910 | |
---|
782 | 911 | sve_efi_setup(); |
---|
783 | 912 | } |
---|
.. | .. |
---|
801 | 930 | * the SVE access trap will be disabled the next time this task |
---|
802 | 931 | * reaches ret_to_user. |
---|
803 | 932 | * |
---|
804 | | - * TIF_SVE should be clear on entry: otherwise, task_fpsimd_load() |
---|
| 933 | + * TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state() |
---|
805 | 934 | * would have disabled the SVE access trap for userspace during |
---|
806 | 935 | * ret_to_user, making an SVE access trap impossible in that case. |
---|
807 | 936 | */ |
---|
808 | | -asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) |
---|
| 937 | +void do_sve_acc(unsigned int esr, struct pt_regs *regs) |
---|
809 | 938 | { |
---|
810 | 939 | /* Even if we chose not to use SVE, the hardware could still trap: */ |
---|
811 | 940 | if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) { |
---|
812 | | - force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); |
---|
| 941 | + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); |
---|
813 | 942 | return; |
---|
814 | 943 | } |
---|
815 | 944 | |
---|
816 | 945 | sve_alloc(current); |
---|
817 | 946 | |
---|
818 | | - local_bh_disable(); |
---|
| 947 | + get_cpu_fpsimd_context(); |
---|
819 | 948 | |
---|
820 | 949 | fpsimd_save(); |
---|
821 | | - fpsimd_to_sve(current); |
---|
822 | 950 | |
---|
823 | 951 | /* Force ret_to_user to reload the registers: */ |
---|
824 | 952 | fpsimd_flush_task_state(current); |
---|
825 | | - set_thread_flag(TIF_FOREIGN_FPSTATE); |
---|
826 | 953 | |
---|
| 954 | + fpsimd_to_sve(current); |
---|
827 | 955 | if (test_and_set_thread_flag(TIF_SVE)) |
---|
828 | 956 | WARN_ON(1); /* SVE access shouldn't have trapped */ |
---|
829 | 957 | |
---|
830 | | - local_bh_enable(); |
---|
| 958 | + put_cpu_fpsimd_context(); |
---|
831 | 959 | } |
---|
832 | 960 | |
---|
833 | 961 | /* |
---|
834 | 962 | * Trapped FP/ASIMD access. |
---|
835 | 963 | */ |
---|
836 | | -asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) |
---|
| 964 | +void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) |
---|
837 | 965 | { |
---|
838 | 966 | /* TODO: implement lazy context saving/restoring */ |
---|
839 | 967 | WARN_ON(1); |
---|
.. | .. |
---|
842 | 970 | /* |
---|
843 | 971 | * Raise a SIGFPE for the current process. |
---|
844 | 972 | */ |
---|
845 | | -asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) |
---|
| 973 | +void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) |
---|
846 | 974 | { |
---|
847 | | - siginfo_t info; |
---|
848 | 975 | unsigned int si_code = FPE_FLTUNK; |
---|
849 | 976 | |
---|
850 | 977 | if (esr & ESR_ELx_FP_EXC_TFV) { |
---|
.. | .. |
---|
860 | 987 | si_code = FPE_FLTRES; |
---|
861 | 988 | } |
---|
862 | 989 | |
---|
863 | | - clear_siginfo(&info); |
---|
864 | | - info.si_signo = SIGFPE; |
---|
865 | | - info.si_code = si_code; |
---|
866 | | - info.si_addr = (void __user *)instruction_pointer(regs); |
---|
867 | | - |
---|
868 | | - send_sig_info(SIGFPE, &info, current); |
---|
| 990 | + send_sig_fault(SIGFPE, si_code, |
---|
| 991 | + (void __user *)instruction_pointer(regs), |
---|
| 992 | + current); |
---|
869 | 993 | } |
---|
870 | 994 | |
---|
871 | 995 | void fpsimd_thread_switch(struct task_struct *next) |
---|
.. | .. |
---|
874 | 998 | |
---|
875 | 999 | if (!system_supports_fpsimd()) |
---|
876 | 1000 | return; |
---|
| 1001 | + |
---|
| 1002 | + __get_cpu_fpsimd_context(); |
---|
877 | 1003 | |
---|
878 | 1004 | /* Save unsaved fpsimd state, if any: */ |
---|
879 | 1005 | fpsimd_save(); |
---|
.. | .. |
---|
889 | 1015 | |
---|
890 | 1016 | update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE, |
---|
891 | 1017 | wrong_task || wrong_cpu); |
---|
| 1018 | + |
---|
| 1019 | + __put_cpu_fpsimd_context(); |
---|
892 | 1020 | } |
---|
893 | 1021 | |
---|
894 | 1022 | void fpsimd_flush_thread(void) |
---|
.. | .. |
---|
898 | 1026 | if (!system_supports_fpsimd()) |
---|
899 | 1027 | return; |
---|
900 | 1028 | |
---|
901 | | - local_bh_disable(); |
---|
| 1029 | + get_cpu_fpsimd_context(); |
---|
902 | 1030 | |
---|
| 1031 | + fpsimd_flush_task_state(current); |
---|
903 | 1032 | memset(¤t->thread.uw.fpsimd_state, 0, |
---|
904 | 1033 | sizeof(current->thread.uw.fpsimd_state)); |
---|
905 | | - fpsimd_flush_task_state(current); |
---|
906 | 1034 | |
---|
907 | 1035 | if (system_supports_sve()) { |
---|
908 | 1036 | clear_thread_flag(TIF_SVE); |
---|
.. | .. |
---|
914 | 1042 | * vector length configured: no kernel task can become a user |
---|
915 | 1043 | * task without an exec and hence a call to this function. |
---|
916 | 1044 | * By the time the first call to this function is made, all |
---|
917 | | - * early hardware probing is complete, so sve_default_vl |
---|
| 1045 | + * early hardware probing is complete, so __sve_default_vl |
---|
918 | 1046 | * should be valid. |
---|
919 | 1047 | * If a bug causes this to go wrong, we make some noise and |
---|
920 | 1048 | * try to fudge thread.sve_vl to a safe value here. |
---|
921 | 1049 | */ |
---|
922 | 1050 | vl = current->thread.sve_vl_onexec ? |
---|
923 | | - current->thread.sve_vl_onexec : sve_default_vl; |
---|
| 1051 | + current->thread.sve_vl_onexec : get_sve_default_vl(); |
---|
924 | 1052 | |
---|
925 | 1053 | if (WARN_ON(!sve_vl_valid(vl))) |
---|
926 | 1054 | vl = SVE_VL_MIN; |
---|
.. | .. |
---|
939 | 1067 | current->thread.sve_vl_onexec = 0; |
---|
940 | 1068 | } |
---|
941 | 1069 | |
---|
942 | | - set_thread_flag(TIF_FOREIGN_FPSTATE); |
---|
943 | | - |
---|
944 | | - local_bh_enable(); |
---|
| 1070 | + put_cpu_fpsimd_context(); |
---|
945 | 1071 | } |
---|
946 | 1072 | |
---|
947 | 1073 | /* |
---|
.. | .. |
---|
953 | 1079 | if (!system_supports_fpsimd()) |
---|
954 | 1080 | return; |
---|
955 | 1081 | |
---|
956 | | - local_bh_disable(); |
---|
| 1082 | + get_cpu_fpsimd_context(); |
---|
957 | 1083 | fpsimd_save(); |
---|
958 | | - local_bh_enable(); |
---|
| 1084 | + put_cpu_fpsimd_context(); |
---|
959 | 1085 | } |
---|
960 | 1086 | |
---|
961 | 1087 | /* |
---|
.. | .. |
---|
972 | 1098 | |
---|
973 | 1099 | /* |
---|
974 | 1100 | * Associate current's FPSIMD context with this cpu |
---|
975 | | - * Preemption must be disabled when calling this function. |
---|
| 1101 | + * The caller must have ownership of the cpu FPSIMD context before calling |
---|
| 1102 | + * this function. |
---|
976 | 1103 | */ |
---|
977 | 1104 | void fpsimd_bind_task_to_cpu(void) |
---|
978 | 1105 | { |
---|
.. | .. |
---|
981 | 1108 | |
---|
982 | 1109 | WARN_ON(!system_supports_fpsimd()); |
---|
983 | 1110 | last->st = ¤t->thread.uw.fpsimd_state; |
---|
| 1111 | + last->sve_state = current->thread.sve_state; |
---|
| 1112 | + last->sve_vl = current->thread.sve_vl; |
---|
984 | 1113 | current->thread.fpsimd_cpu = smp_processor_id(); |
---|
985 | 1114 | |
---|
986 | 1115 | if (system_supports_sve()) { |
---|
.. | .. |
---|
994 | 1123 | } |
---|
995 | 1124 | } |
---|
996 | 1125 | |
---|
997 | | -void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st) |
---|
| 1126 | +void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, |
---|
| 1127 | + unsigned int sve_vl) |
---|
998 | 1128 | { |
---|
999 | 1129 | struct fpsimd_last_state_struct *last = |
---|
1000 | 1130 | this_cpu_ptr(&fpsimd_last_state); |
---|
.. | .. |
---|
1003 | 1133 | WARN_ON(!in_softirq() && !irqs_disabled()); |
---|
1004 | 1134 | |
---|
1005 | 1135 | last->st = st; |
---|
| 1136 | + last->sve_state = sve_state; |
---|
| 1137 | + last->sve_vl = sve_vl; |
---|
1006 | 1138 | } |
---|
1007 | 1139 | |
---|
1008 | 1140 | /* |
---|
.. | .. |
---|
1026 | 1158 | return; |
---|
1027 | 1159 | } |
---|
1028 | 1160 | |
---|
1029 | | - local_bh_disable(); |
---|
| 1161 | + get_cpu_fpsimd_context(); |
---|
1030 | 1162 | |
---|
1031 | 1163 | if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { |
---|
1032 | 1164 | task_fpsimd_load(); |
---|
1033 | 1165 | fpsimd_bind_task_to_cpu(); |
---|
1034 | 1166 | } |
---|
1035 | 1167 | |
---|
1036 | | - local_bh_enable(); |
---|
| 1168 | + put_cpu_fpsimd_context(); |
---|
1037 | 1169 | } |
---|
1038 | 1170 | |
---|
1039 | 1171 | /* |
---|
.. | .. |
---|
1046 | 1178 | if (WARN_ON(!system_supports_fpsimd())) |
---|
1047 | 1179 | return; |
---|
1048 | 1180 | |
---|
1049 | | - local_bh_disable(); |
---|
| 1181 | + get_cpu_fpsimd_context(); |
---|
1050 | 1182 | |
---|
1051 | 1183 | current->thread.uw.fpsimd_state = *state; |
---|
1052 | 1184 | if (system_supports_sve() && test_thread_flag(TIF_SVE)) |
---|
.. | .. |
---|
1057 | 1189 | |
---|
1058 | 1190 | clear_thread_flag(TIF_FOREIGN_FPSTATE); |
---|
1059 | 1191 | |
---|
1060 | | - local_bh_enable(); |
---|
| 1192 | + put_cpu_fpsimd_context(); |
---|
1061 | 1193 | } |
---|
1062 | 1194 | |
---|
1063 | 1195 | /* |
---|
1064 | 1196 | * Invalidate live CPU copies of task t's FPSIMD state |
---|
| 1197 | + * |
---|
| 1198 | + * This function may be called with preemption enabled. The barrier() |
---|
| 1199 | + * ensures that the assignment to fpsimd_cpu is visible to any |
---|
| 1200 | + * preemption/softirq that could race with set_tsk_thread_flag(), so |
---|
| 1201 | + * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared. |
---|
| 1202 | + * |
---|
| 1203 | + * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any |
---|
| 1204 | + * subsequent code. |
---|
1065 | 1205 | */ |
---|
1066 | 1206 | void fpsimd_flush_task_state(struct task_struct *t) |
---|
1067 | 1207 | { |
---|
1068 | 1208 | t->thread.fpsimd_cpu = NR_CPUS; |
---|
| 1209 | + /* |
---|
| 1210 | + * If we don't support fpsimd, bail out after we have |
---|
| 1211 | + * reset the fpsimd_cpu for this task and clear the |
---|
| 1212 | + * FPSTATE. |
---|
| 1213 | + */ |
---|
| 1214 | + if (!system_supports_fpsimd()) |
---|
| 1215 | + return; |
---|
| 1216 | + barrier(); |
---|
| 1217 | + set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE); |
---|
| 1218 | + |
---|
| 1219 | + barrier(); |
---|
1069 | 1220 | } |
---|
1070 | 1221 | |
---|
1071 | | -void fpsimd_flush_cpu_state(void) |
---|
| 1222 | +/* |
---|
| 1223 | + * Invalidate any task's FPSIMD state that is present on this cpu. |
---|
| 1224 | + * The FPSIMD context should be acquired with get_cpu_fpsimd_context() |
---|
| 1225 | + * before calling this function. |
---|
| 1226 | + */ |
---|
| 1227 | +static void fpsimd_flush_cpu_state(void) |
---|
1072 | 1228 | { |
---|
1073 | 1229 | WARN_ON(!system_supports_fpsimd()); |
---|
1074 | 1230 | __this_cpu_write(fpsimd_last_state.st, NULL); |
---|
1075 | 1231 | set_thread_flag(TIF_FOREIGN_FPSTATE); |
---|
1076 | 1232 | } |
---|
1077 | 1233 | |
---|
1078 | | -#ifdef CONFIG_KERNEL_MODE_NEON |
---|
| 1234 | +/* |
---|
| 1235 | + * Save the FPSIMD state to memory and invalidate cpu view. |
---|
| 1236 | + * This function must be called with preemption disabled. |
---|
| 1237 | + */ |
---|
| 1238 | +void fpsimd_save_and_flush_cpu_state(void) |
---|
| 1239 | +{ |
---|
| 1240 | + if (!system_supports_fpsimd()) |
---|
| 1241 | + return; |
---|
| 1242 | + WARN_ON(preemptible()); |
---|
| 1243 | + __get_cpu_fpsimd_context(); |
---|
| 1244 | + fpsimd_save(); |
---|
| 1245 | + fpsimd_flush_cpu_state(); |
---|
| 1246 | + __put_cpu_fpsimd_context(); |
---|
| 1247 | +} |
---|
1079 | 1248 | |
---|
1080 | | -DEFINE_PER_CPU(bool, kernel_neon_busy); |
---|
1081 | | -EXPORT_PER_CPU_SYMBOL(kernel_neon_busy); |
---|
| 1249 | +#ifdef CONFIG_KERNEL_MODE_NEON |
---|
1082 | 1250 | |
---|
1083 | 1251 | /* |
---|
1084 | 1252 | * Kernel-side NEON support functions |
---|
.. | .. |
---|
1104 | 1272 | |
---|
1105 | 1273 | BUG_ON(!may_use_simd()); |
---|
1106 | 1274 | |
---|
1107 | | - local_bh_disable(); |
---|
1108 | | - |
---|
1109 | | - __this_cpu_write(kernel_neon_busy, true); |
---|
| 1275 | + get_cpu_fpsimd_context(); |
---|
1110 | 1276 | |
---|
1111 | 1277 | /* Save unsaved fpsimd state, if any: */ |
---|
1112 | 1278 | fpsimd_save(); |
---|
1113 | 1279 | |
---|
1114 | 1280 | /* Invalidate any task state remaining in the fpsimd regs: */ |
---|
1115 | 1281 | fpsimd_flush_cpu_state(); |
---|
1116 | | - |
---|
1117 | | - preempt_disable(); |
---|
1118 | | - |
---|
1119 | | - local_bh_enable(); |
---|
1120 | 1282 | } |
---|
1121 | 1283 | EXPORT_SYMBOL(kernel_neon_begin); |
---|
1122 | 1284 | |
---|
.. | .. |
---|
1131 | 1293 | */ |
---|
1132 | 1294 | void kernel_neon_end(void) |
---|
1133 | 1295 | { |
---|
1134 | | - bool busy; |
---|
1135 | | - |
---|
1136 | 1296 | if (!system_supports_fpsimd()) |
---|
1137 | 1297 | return; |
---|
1138 | 1298 | |
---|
1139 | | - busy = __this_cpu_xchg(kernel_neon_busy, false); |
---|
1140 | | - WARN_ON(!busy); /* No matching kernel_neon_begin()? */ |
---|
1141 | | - |
---|
1142 | | - preempt_enable(); |
---|
| 1299 | + put_cpu_fpsimd_context(); |
---|
1143 | 1300 | } |
---|
1144 | 1301 | EXPORT_SYMBOL(kernel_neon_end); |
---|
1145 | 1302 | |
---|
.. | .. |
---|
1231 | 1388 | { |
---|
1232 | 1389 | switch (cmd) { |
---|
1233 | 1390 | case CPU_PM_ENTER: |
---|
1234 | | - fpsimd_save(); |
---|
1235 | | - fpsimd_flush_cpu_state(); |
---|
| 1391 | + fpsimd_save_and_flush_cpu_state(); |
---|
1236 | 1392 | break; |
---|
1237 | 1393 | case CPU_PM_EXIT: |
---|
1238 | 1394 | break; |
---|
.. | .. |
---|
1278 | 1434 | */ |
---|
1279 | 1435 | static int __init fpsimd_init(void) |
---|
1280 | 1436 | { |
---|
1281 | | - if (elf_hwcap & HWCAP_FP) { |
---|
| 1437 | + if (cpu_have_named_feature(FP)) { |
---|
1282 | 1438 | fpsimd_pm_init(); |
---|
1283 | 1439 | fpsimd_hotplug_init(); |
---|
1284 | 1440 | } else { |
---|
1285 | 1441 | pr_notice("Floating-point is not implemented\n"); |
---|
1286 | 1442 | } |
---|
1287 | 1443 | |
---|
1288 | | - if (!(elf_hwcap & HWCAP_ASIMD)) |
---|
| 1444 | + if (!cpu_have_named_feature(ASIMD)) |
---|
1289 | 1445 | pr_notice("Advanced SIMD is not implemented\n"); |
---|
1290 | 1446 | |
---|
1291 | 1447 | return sve_sysctl_init(); |
---|