| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * FP/SIMD context switching and fault handling |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Copyright (C) 2012 ARM Ltd. |
|---|
| 5 | 6 | * Author: Catalin Marinas <catalin.marinas@arm.com> |
|---|
| 6 | | - * |
|---|
| 7 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 8 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 9 | | - * published by the Free Software Foundation. |
|---|
| 10 | | - * |
|---|
| 11 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 12 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 13 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 14 | | - * GNU General Public License for more details. |
|---|
| 15 | | - * |
|---|
| 16 | | - * You should have received a copy of the GNU General Public License |
|---|
| 17 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
|---|
| 18 | 7 | */ |
|---|
| 19 | 8 | |
|---|
| 20 | 9 | #include <linux/bitmap.h> |
|---|
| 10 | +#include <linux/bitops.h> |
|---|
| 21 | 11 | #include <linux/bottom_half.h> |
|---|
| 22 | 12 | #include <linux/bug.h> |
|---|
| 23 | 13 | #include <linux/cache.h> |
|---|
| 24 | 14 | #include <linux/compat.h> |
|---|
| 15 | +#include <linux/compiler.h> |
|---|
| 25 | 16 | #include <linux/cpu.h> |
|---|
| 26 | 17 | #include <linux/cpu_pm.h> |
|---|
| 27 | 18 | #include <linux/kernel.h> |
|---|
| .. | .. |
|---|
| 38 | 29 | #include <linux/slab.h> |
|---|
| 39 | 30 | #include <linux/stddef.h> |
|---|
| 40 | 31 | #include <linux/sysctl.h> |
|---|
| 32 | +#include <linux/swab.h> |
|---|
| 41 | 33 | |
|---|
| 42 | 34 | #include <asm/esr.h> |
|---|
| 35 | +#include <asm/exception.h> |
|---|
| 43 | 36 | #include <asm/fpsimd.h> |
|---|
| 44 | 37 | #include <asm/cpufeature.h> |
|---|
| 45 | 38 | #include <asm/cputype.h> |
|---|
| 39 | +#include <asm/neon.h> |
|---|
| 46 | 40 | #include <asm/processor.h> |
|---|
| 47 | 41 | #include <asm/simd.h> |
|---|
| 48 | 42 | #include <asm/sigcontext.h> |
|---|
| 49 | 43 | #include <asm/sysreg.h> |
|---|
| 50 | 44 | #include <asm/traps.h> |
|---|
| 45 | +#include <asm/virt.h> |
|---|
| 51 | 46 | |
|---|
| 52 | 47 | #define FPEXC_IOF (1 << 0) |
|---|
| 53 | 48 | #define FPEXC_DZF (1 << 1) |
|---|
| .. | .. |
|---|
| 90 | 85 | * To prevent this from racing with the manipulation of the task's FPSIMD state |
|---|
| 91 | 86 | * from task context and thereby corrupting the state, it is necessary to |
|---|
| 92 | 87 | * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE |
|---|
| 93 | | - * flag with local_bh_disable() unless softirqs are already masked. |
|---|
| 88 | + * flag with {, __}get_cpu_fpsimd_context(). This will still allow softirqs to |
|---|
| 89 | + * run but prevent them to use FPSIMD. |
|---|
| 94 | 90 | * |
|---|
| 95 | 91 | * For a certain task, the sequence may look something like this: |
|---|
| 96 | 92 | * - the task gets scheduled in; if both the task's fpsimd_cpu field |
|---|
| .. | .. |
|---|
| 119 | 115 | */ |
|---|
| 120 | 116 | struct fpsimd_last_state_struct { |
|---|
| 121 | 117 | struct user_fpsimd_state *st; |
|---|
| 118 | + void *sve_state; |
|---|
| 119 | + unsigned int sve_vl; |
|---|
| 122 | 120 | }; |
|---|
| 123 | 121 | |
|---|
| 124 | 122 | static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state); |
|---|
| 125 | 123 | |
|---|
| 126 | 124 | /* Default VL for tasks that don't set it explicitly: */ |
|---|
| 127 | | -static int sve_default_vl = -1; |
|---|
| 125 | +static int __sve_default_vl = -1; |
|---|
| 126 | + |
|---|
| 127 | +static int get_sve_default_vl(void) |
|---|
| 128 | +{ |
|---|
| 129 | + return READ_ONCE(__sve_default_vl); |
|---|
| 130 | +} |
|---|
| 128 | 131 | |
|---|
| 129 | 132 | #ifdef CONFIG_ARM64_SVE |
|---|
| 130 | 133 | |
|---|
| 134 | +static void set_sve_default_vl(int val) |
|---|
| 135 | +{ |
|---|
| 136 | + WRITE_ONCE(__sve_default_vl, val); |
|---|
| 137 | +} |
|---|
| 138 | + |
|---|
| 131 | 139 | /* Maximum supported vector length across all CPUs (initially poisoned) */ |
|---|
| 132 | 140 | int __ro_after_init sve_max_vl = SVE_VL_MIN; |
|---|
| 133 | | -/* Set of available vector lengths, as vq_to_bit(vq): */ |
|---|
| 134 | | -static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); |
|---|
| 141 | +int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN; |
|---|
| 142 | + |
|---|
| 143 | +/* |
|---|
| 144 | + * Set of available vector lengths, |
|---|
| 145 | + * where length vq encoded as bit __vq_to_bit(vq): |
|---|
| 146 | + */ |
|---|
| 147 | +__ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); |
|---|
| 148 | +/* Set of vector lengths present on at least one cpu: */ |
|---|
| 149 | +static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX); |
|---|
| 150 | + |
|---|
| 135 | 151 | static void __percpu *efi_sve_state; |
|---|
| 136 | 152 | |
|---|
| 137 | 153 | #else /* ! CONFIG_ARM64_SVE */ |
|---|
| 138 | 154 | |
|---|
| 139 | 155 | /* Dummy declaration for code that will be optimised out: */ |
|---|
| 140 | 156 | extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); |
|---|
| 157 | +extern __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX); |
|---|
| 141 | 158 | extern void __percpu *efi_sve_state; |
|---|
| 142 | 159 | |
|---|
| 143 | 160 | #endif /* ! CONFIG_ARM64_SVE */ |
|---|
| 161 | + |
|---|
| 162 | +DEFINE_PER_CPU(bool, fpsimd_context_busy); |
|---|
| 163 | +EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy); |
|---|
| 164 | + |
|---|
| 165 | +static void __get_cpu_fpsimd_context(void) |
|---|
| 166 | +{ |
|---|
| 167 | + bool busy = __this_cpu_xchg(fpsimd_context_busy, true); |
|---|
| 168 | + |
|---|
| 169 | + WARN_ON(busy); |
|---|
| 170 | +} |
|---|
| 171 | + |
|---|
| 172 | +/* |
|---|
| 173 | + * Claim ownership of the CPU FPSIMD context for use by the calling context. |
|---|
| 174 | + * |
|---|
| 175 | + * The caller may freely manipulate the FPSIMD context metadata until |
|---|
| 176 | + * put_cpu_fpsimd_context() is called. |
|---|
| 177 | + * |
|---|
| 178 | + * The double-underscore version must only be called if you know the task |
|---|
| 179 | + * can't be preempted. |
|---|
| 180 | + */ |
|---|
| 181 | +static void get_cpu_fpsimd_context(void) |
|---|
| 182 | +{ |
|---|
| 183 | + local_bh_disable(); |
|---|
| 184 | + __get_cpu_fpsimd_context(); |
|---|
| 185 | +} |
|---|
| 186 | + |
|---|
| 187 | +static void __put_cpu_fpsimd_context(void) |
|---|
| 188 | +{ |
|---|
| 189 | + bool busy = __this_cpu_xchg(fpsimd_context_busy, false); |
|---|
| 190 | + |
|---|
| 191 | + WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */ |
|---|
| 192 | +} |
|---|
| 193 | + |
|---|
| 194 | +/* |
|---|
| 195 | + * Release the CPU FPSIMD context. |
|---|
| 196 | + * |
|---|
| 197 | + * Must be called from a context in which get_cpu_fpsimd_context() was |
|---|
| 198 | + * previously called, with no call to put_cpu_fpsimd_context() in the |
|---|
| 199 | + * meantime. |
|---|
| 200 | + */ |
|---|
| 201 | +static void put_cpu_fpsimd_context(void) |
|---|
| 202 | +{ |
|---|
| 203 | + __put_cpu_fpsimd_context(); |
|---|
| 204 | + local_bh_enable(); |
|---|
| 205 | +} |
|---|
| 206 | + |
|---|
| 207 | +static bool have_cpu_fpsimd_context(void) |
|---|
| 208 | +{ |
|---|
| 209 | + return !preemptible() && __this_cpu_read(fpsimd_context_busy); |
|---|
| 210 | +} |
|---|
| 144 | 211 | |
|---|
| 145 | 212 | /* |
|---|
| 146 | 213 | * Call __sve_free() directly only if you know task can't be scheduled |
|---|
| .. | .. |
|---|
| 157 | 224 | WARN_ON(test_tsk_thread_flag(task, TIF_SVE)); |
|---|
| 158 | 225 | |
|---|
| 159 | 226 | __sve_free(task); |
|---|
| 160 | | -} |
|---|
| 161 | | - |
|---|
| 162 | | -static void *sve_free_atomic(struct task_struct *task) |
|---|
| 163 | | -{ |
|---|
| 164 | | - void *sve_state = task->thread.sve_state; |
|---|
| 165 | | - |
|---|
| 166 | | - WARN_ON(test_tsk_thread_flag(task, TIF_SVE)); |
|---|
| 167 | | - |
|---|
| 168 | | - task->thread.sve_state = NULL; |
|---|
| 169 | | - return sve_state; |
|---|
| 170 | 227 | } |
|---|
| 171 | 228 | |
|---|
| 172 | 229 | /* |
|---|
| .. | .. |
|---|
| 222 | 279 | * This function should be called only when the FPSIMD/SVE state in |
|---|
| 223 | 280 | * thread_struct is known to be up to date, when preparing to enter |
|---|
| 224 | 281 | * userspace. |
|---|
| 225 | | - * |
|---|
| 226 | | - * Softirqs (and preemption) must be disabled. |
|---|
| 227 | 282 | */ |
|---|
| 228 | 283 | static void task_fpsimd_load(void) |
|---|
| 229 | 284 | { |
|---|
| 230 | | - WARN_ON(!in_softirq() && !irqs_disabled()); |
|---|
| 231 | 285 | WARN_ON(!system_supports_fpsimd()); |
|---|
| 286 | + WARN_ON(!have_cpu_fpsimd_context()); |
|---|
| 232 | 287 | |
|---|
| 233 | 288 | if (system_supports_sve() && test_thread_flag(TIF_SVE)) |
|---|
| 234 | 289 | sve_load_state(sve_pffr(¤t->thread), |
|---|
| .. | .. |
|---|
| 241 | 296 | /* |
|---|
| 242 | 297 | * Ensure FPSIMD/SVE storage in memory for the loaded context is up to |
|---|
| 243 | 298 | * date with respect to the CPU registers. |
|---|
| 244 | | - * |
|---|
| 245 | | - * Softirqs (and preemption) must be disabled. |
|---|
| 246 | 299 | */ |
|---|
| 247 | | -void fpsimd_save(void) |
|---|
| 300 | +static void fpsimd_save(void) |
|---|
| 248 | 301 | { |
|---|
| 249 | | - struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st); |
|---|
| 302 | + struct fpsimd_last_state_struct const *last = |
|---|
| 303 | + this_cpu_ptr(&fpsimd_last_state); |
|---|
| 250 | 304 | /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */ |
|---|
| 251 | 305 | |
|---|
| 252 | 306 | WARN_ON(!system_supports_fpsimd()); |
|---|
| 253 | | - WARN_ON(!in_softirq() && !irqs_disabled()); |
|---|
| 307 | + WARN_ON(!have_cpu_fpsimd_context()); |
|---|
| 254 | 308 | |
|---|
| 255 | 309 | if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { |
|---|
| 256 | 310 | if (system_supports_sve() && test_thread_flag(TIF_SVE)) { |
|---|
| 257 | | - if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) { |
|---|
| 311 | + if (WARN_ON(sve_get_vl() != last->sve_vl)) { |
|---|
| 258 | 312 | /* |
|---|
| 259 | 313 | * Can't save the user regs, so current would |
|---|
| 260 | 314 | * re-enter user with corrupt state. |
|---|
| 261 | 315 | * There's no way to recover, so kill it: |
|---|
| 262 | 316 | */ |
|---|
| 263 | | - force_signal_inject(SIGKILL, SI_KERNEL, 0); |
|---|
| 317 | + force_signal_inject(SIGKILL, SI_KERNEL, 0, 0); |
|---|
| 264 | 318 | return; |
|---|
| 265 | 319 | } |
|---|
| 266 | 320 | |
|---|
| 267 | | - sve_save_state(sve_pffr(¤t->thread), &st->fpsr); |
|---|
| 321 | + sve_save_state((char *)last->sve_state + |
|---|
| 322 | + sve_ffr_offset(last->sve_vl), |
|---|
| 323 | + &last->st->fpsr); |
|---|
| 268 | 324 | } else |
|---|
| 269 | | - fpsimd_save_state(st); |
|---|
| 325 | + fpsimd_save_state(last->st); |
|---|
| 270 | 326 | } |
|---|
| 271 | | -} |
|---|
| 272 | | - |
|---|
| 273 | | -/* |
|---|
| 274 | | - * Helpers to translate bit indices in sve_vq_map to VQ values (and |
|---|
| 275 | | - * vice versa). This allows find_next_bit() to be used to find the |
|---|
| 276 | | - * _maximum_ VQ not exceeding a certain value. |
|---|
| 277 | | - */ |
|---|
| 278 | | - |
|---|
| 279 | | -static unsigned int vq_to_bit(unsigned int vq) |
|---|
| 280 | | -{ |
|---|
| 281 | | - return SVE_VQ_MAX - vq; |
|---|
| 282 | | -} |
|---|
| 283 | | - |
|---|
| 284 | | -static unsigned int bit_to_vq(unsigned int bit) |
|---|
| 285 | | -{ |
|---|
| 286 | | - if (WARN_ON(bit >= SVE_VQ_MAX)) |
|---|
| 287 | | - bit = SVE_VQ_MAX - 1; |
|---|
| 288 | | - |
|---|
| 289 | | - return SVE_VQ_MAX - bit; |
|---|
| 290 | 327 | } |
|---|
| 291 | 328 | |
|---|
| 292 | 329 | /* |
|---|
| .. | .. |
|---|
| 310 | 347 | vl = max_vl; |
|---|
| 311 | 348 | |
|---|
| 312 | 349 | bit = find_next_bit(sve_vq_map, SVE_VQ_MAX, |
|---|
| 313 | | - vq_to_bit(sve_vq_from_vl(vl))); |
|---|
| 314 | | - return sve_vl_from_vq(bit_to_vq(bit)); |
|---|
| 350 | + __vq_to_bit(sve_vq_from_vl(vl))); |
|---|
| 351 | + return sve_vl_from_vq(__bit_to_vq(bit)); |
|---|
| 315 | 352 | } |
|---|
| 316 | 353 | |
|---|
| 317 | 354 | #if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL) |
|---|
| 318 | 355 | |
|---|
| 319 | 356 | static int sve_proc_do_default_vl(struct ctl_table *table, int write, |
|---|
| 320 | | - void __user *buffer, size_t *lenp, |
|---|
| 321 | | - loff_t *ppos) |
|---|
| 357 | + void *buffer, size_t *lenp, loff_t *ppos) |
|---|
| 322 | 358 | { |
|---|
| 323 | 359 | int ret; |
|---|
| 324 | | - int vl = sve_default_vl; |
|---|
| 360 | + int vl = get_sve_default_vl(); |
|---|
| 325 | 361 | struct ctl_table tmp_table = { |
|---|
| 326 | 362 | .data = &vl, |
|---|
| 327 | 363 | .maxlen = sizeof(vl), |
|---|
| .. | .. |
|---|
| 338 | 374 | if (!sve_vl_valid(vl)) |
|---|
| 339 | 375 | return -EINVAL; |
|---|
| 340 | 376 | |
|---|
| 341 | | - sve_default_vl = find_supported_vector_length(vl); |
|---|
| 377 | + set_sve_default_vl(find_supported_vector_length(vl)); |
|---|
| 342 | 378 | return 0; |
|---|
| 343 | 379 | } |
|---|
| 344 | 380 | |
|---|
| .. | .. |
|---|
| 367 | 403 | #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \ |
|---|
| 368 | 404 | (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET)) |
|---|
| 369 | 405 | |
|---|
| 406 | +#ifdef CONFIG_CPU_BIG_ENDIAN |
|---|
| 407 | +static __uint128_t arm64_cpu_to_le128(__uint128_t x) |
|---|
| 408 | +{ |
|---|
| 409 | + u64 a = swab64(x); |
|---|
| 410 | + u64 b = swab64(x >> 64); |
|---|
| 411 | + |
|---|
| 412 | + return ((__uint128_t)a << 64) | b; |
|---|
| 413 | +} |
|---|
| 414 | +#else |
|---|
| 415 | +static __uint128_t arm64_cpu_to_le128(__uint128_t x) |
|---|
| 416 | +{ |
|---|
| 417 | + return x; |
|---|
| 418 | +} |
|---|
| 419 | +#endif |
|---|
| 420 | + |
|---|
| 421 | +#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x) |
|---|
| 422 | + |
|---|
| 423 | +static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst, |
|---|
| 424 | + unsigned int vq) |
|---|
| 425 | +{ |
|---|
| 426 | + unsigned int i; |
|---|
| 427 | + __uint128_t *p; |
|---|
| 428 | + |
|---|
| 429 | + for (i = 0; i < SVE_NUM_ZREGS; ++i) { |
|---|
| 430 | + p = (__uint128_t *)ZREG(sst, vq, i); |
|---|
| 431 | + *p = arm64_cpu_to_le128(fst->vregs[i]); |
|---|
| 432 | + } |
|---|
| 433 | +} |
|---|
| 434 | + |
|---|
| 370 | 435 | /* |
|---|
| 371 | 436 | * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to |
|---|
| 372 | 437 | * task->thread.sve_state. |
|---|
| 373 | 438 | * |
|---|
| 374 | 439 | * Task can be a non-runnable task, or current. In the latter case, |
|---|
| 375 | | - * softirqs (and preemption) must be disabled. |
|---|
| 440 | + * the caller must have ownership of the cpu FPSIMD context before calling |
|---|
| 441 | + * this function. |
|---|
| 376 | 442 | * task->thread.sve_state must point to at least sve_state_size(task) |
|---|
| 377 | 443 | * bytes of allocated kernel memory. |
|---|
| 378 | 444 | * task->thread.uw.fpsimd_state must be up to date before calling this |
|---|
| .. | .. |
|---|
| 383 | 449 | unsigned int vq; |
|---|
| 384 | 450 | void *sst = task->thread.sve_state; |
|---|
| 385 | 451 | struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; |
|---|
| 386 | | - unsigned int i; |
|---|
| 387 | 452 | |
|---|
| 388 | 453 | if (!system_supports_sve()) |
|---|
| 389 | 454 | return; |
|---|
| 390 | 455 | |
|---|
| 391 | 456 | vq = sve_vq_from_vl(task->thread.sve_vl); |
|---|
| 392 | | - for (i = 0; i < 32; ++i) |
|---|
| 393 | | - memcpy(ZREG(sst, vq, i), &fst->vregs[i], |
|---|
| 394 | | - sizeof(fst->vregs[i])); |
|---|
| 457 | + __fpsimd_to_sve(sst, fst, vq); |
|---|
| 395 | 458 | } |
|---|
| 396 | 459 | |
|---|
| 397 | 460 | /* |
|---|
| .. | .. |
|---|
| 399 | 462 | * task->thread.uw.fpsimd_state. |
|---|
| 400 | 463 | * |
|---|
| 401 | 464 | * Task can be a non-runnable task, or current. In the latter case, |
|---|
| 402 | | - * softirqs (and preemption) must be disabled. |
|---|
| 465 | + * the caller must have ownership of the cpu FPSIMD context before calling |
|---|
| 466 | + * this function. |
|---|
| 403 | 467 | * task->thread.sve_state must point to at least sve_state_size(task) |
|---|
| 404 | 468 | * bytes of allocated kernel memory. |
|---|
| 405 | 469 | * task->thread.sve_state must be up to date before calling this function. |
|---|
| .. | .. |
|---|
| 410 | 474 | void const *sst = task->thread.sve_state; |
|---|
| 411 | 475 | struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state; |
|---|
| 412 | 476 | unsigned int i; |
|---|
| 477 | + __uint128_t const *p; |
|---|
| 413 | 478 | |
|---|
| 414 | 479 | if (!system_supports_sve()) |
|---|
| 415 | 480 | return; |
|---|
| 416 | 481 | |
|---|
| 417 | 482 | vq = sve_vq_from_vl(task->thread.sve_vl); |
|---|
| 418 | | - for (i = 0; i < 32; ++i) |
|---|
| 419 | | - memcpy(&fst->vregs[i], ZREG(sst, vq, i), |
|---|
| 420 | | - sizeof(fst->vregs[i])); |
|---|
| 483 | + for (i = 0; i < SVE_NUM_ZREGS; ++i) { |
|---|
| 484 | + p = (__uint128_t const *)ZREG(sst, vq, i); |
|---|
| 485 | + fst->vregs[i] = arm64_le128_to_cpu(*p); |
|---|
| 486 | + } |
|---|
| 421 | 487 | } |
|---|
| 422 | 488 | |
|---|
| 423 | 489 | #ifdef CONFIG_ARM64_SVE |
|---|
| .. | .. |
|---|
| 505 | 571 | unsigned int vq; |
|---|
| 506 | 572 | void *sst = task->thread.sve_state; |
|---|
| 507 | 573 | struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; |
|---|
| 508 | | - unsigned int i; |
|---|
| 509 | 574 | |
|---|
| 510 | 575 | if (!test_tsk_thread_flag(task, TIF_SVE)) |
|---|
| 511 | 576 | return; |
|---|
| .. | .. |
|---|
| 513 | 578 | vq = sve_vq_from_vl(task->thread.sve_vl); |
|---|
| 514 | 579 | |
|---|
| 515 | 580 | memset(sst, 0, SVE_SIG_REGS_SIZE(vq)); |
|---|
| 516 | | - |
|---|
| 517 | | - for (i = 0; i < 32; ++i) |
|---|
| 518 | | - memcpy(ZREG(sst, vq, i), &fst->vregs[i], |
|---|
| 519 | | - sizeof(fst->vregs[i])); |
|---|
| 581 | + __fpsimd_to_sve(sst, fst, vq); |
|---|
| 520 | 582 | } |
|---|
| 521 | 583 | |
|---|
| 522 | 584 | int sve_set_vector_length(struct task_struct *task, |
|---|
| .. | .. |
|---|
| 559 | 621 | * non-SVE thread. |
|---|
| 560 | 622 | */ |
|---|
| 561 | 623 | if (task == current) { |
|---|
| 562 | | - preempt_disable(); |
|---|
| 563 | | - local_bh_disable(); |
|---|
| 624 | + get_cpu_fpsimd_context(); |
|---|
| 564 | 625 | |
|---|
| 565 | 626 | fpsimd_save(); |
|---|
| 566 | | - set_thread_flag(TIF_FOREIGN_FPSTATE); |
|---|
| 567 | 627 | } |
|---|
| 568 | 628 | |
|---|
| 569 | 629 | fpsimd_flush_task_state(task); |
|---|
| 570 | 630 | if (test_and_clear_tsk_thread_flag(task, TIF_SVE)) |
|---|
| 571 | 631 | sve_to_fpsimd(task); |
|---|
| 572 | 632 | |
|---|
| 573 | | - if (task == current) { |
|---|
| 574 | | - local_bh_enable(); |
|---|
| 575 | | - preempt_enable(); |
|---|
| 576 | | - } |
|---|
| 633 | + if (task == current) |
|---|
| 634 | + put_cpu_fpsimd_context(); |
|---|
| 577 | 635 | |
|---|
| 578 | 636 | /* |
|---|
| 579 | 637 | * Force reallocation of task SVE state to the correct size |
|---|
| .. | .. |
|---|
| 620 | 678 | vl = arg & PR_SVE_VL_LEN_MASK; |
|---|
| 621 | 679 | flags = arg & ~vl; |
|---|
| 622 | 680 | |
|---|
| 623 | | - if (!system_supports_sve()) |
|---|
| 681 | + if (!system_supports_sve() || is_compat_task()) |
|---|
| 624 | 682 | return -EINVAL; |
|---|
| 625 | 683 | |
|---|
| 626 | 684 | ret = sve_set_vector_length(current, vl, flags); |
|---|
| .. | .. |
|---|
| 633 | 691 | /* PR_SVE_GET_VL */ |
|---|
| 634 | 692 | int sve_get_current_vl(void) |
|---|
| 635 | 693 | { |
|---|
| 636 | | - if (!system_supports_sve()) |
|---|
| 694 | + if (!system_supports_sve() || is_compat_task()) |
|---|
| 637 | 695 | return -EINVAL; |
|---|
| 638 | 696 | |
|---|
| 639 | 697 | return sve_prctl_status(0); |
|---|
| 640 | 698 | } |
|---|
| 641 | | - |
|---|
| 642 | | -/* |
|---|
| 643 | | - * Bitmap for temporary storage of the per-CPU set of supported vector lengths |
|---|
| 644 | | - * during secondary boot. |
|---|
| 645 | | - */ |
|---|
| 646 | | -static DECLARE_BITMAP(sve_secondary_vq_map, SVE_VQ_MAX); |
|---|
| 647 | 699 | |
|---|
| 648 | 700 | static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX)) |
|---|
| 649 | 701 | { |
|---|
| .. | .. |
|---|
| 659 | 711 | write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */ |
|---|
| 660 | 712 | vl = sve_get_vl(); |
|---|
| 661 | 713 | vq = sve_vq_from_vl(vl); /* skip intervening lengths */ |
|---|
| 662 | | - set_bit(vq_to_bit(vq), map); |
|---|
| 714 | + set_bit(__vq_to_bit(vq), map); |
|---|
| 663 | 715 | } |
|---|
| 664 | 716 | } |
|---|
| 665 | 717 | |
|---|
| 718 | +/* |
|---|
| 719 | + * Initialise the set of known supported VQs for the boot CPU. |
|---|
| 720 | + * This is called during kernel boot, before secondary CPUs are brought up. |
|---|
| 721 | + */ |
|---|
| 666 | 722 | void __init sve_init_vq_map(void) |
|---|
| 667 | 723 | { |
|---|
| 668 | 724 | sve_probe_vqs(sve_vq_map); |
|---|
| 725 | + bitmap_copy(sve_vq_partial_map, sve_vq_map, SVE_VQ_MAX); |
|---|
| 669 | 726 | } |
|---|
| 670 | 727 | |
|---|
| 671 | 728 | /* |
|---|
| 672 | 729 | * If we haven't committed to the set of supported VQs yet, filter out |
|---|
| 673 | 730 | * those not supported by the current CPU. |
|---|
| 731 | + * This function is called during the bring-up of early secondary CPUs only. |
|---|
| 674 | 732 | */ |
|---|
| 675 | 733 | void sve_update_vq_map(void) |
|---|
| 676 | 734 | { |
|---|
| 677 | | - sve_probe_vqs(sve_secondary_vq_map); |
|---|
| 678 | | - bitmap_and(sve_vq_map, sve_vq_map, sve_secondary_vq_map, SVE_VQ_MAX); |
|---|
| 735 | + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); |
|---|
| 736 | + |
|---|
| 737 | + sve_probe_vqs(tmp_map); |
|---|
| 738 | + bitmap_and(sve_vq_map, sve_vq_map, tmp_map, SVE_VQ_MAX); |
|---|
| 739 | + bitmap_or(sve_vq_partial_map, sve_vq_partial_map, tmp_map, SVE_VQ_MAX); |
|---|
| 679 | 740 | } |
|---|
| 680 | 741 | |
|---|
| 681 | | -/* Check whether the current CPU supports all VQs in the committed set */ |
|---|
| 742 | +/* |
|---|
| 743 | + * Check whether the current CPU supports all VQs in the committed set. |
|---|
| 744 | + * This function is called during the bring-up of late secondary CPUs only. |
|---|
| 745 | + */ |
|---|
| 682 | 746 | int sve_verify_vq_map(void) |
|---|
| 683 | 747 | { |
|---|
| 684 | | - int ret = 0; |
|---|
| 748 | + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); |
|---|
| 749 | + unsigned long b; |
|---|
| 685 | 750 | |
|---|
| 686 | | - sve_probe_vqs(sve_secondary_vq_map); |
|---|
| 687 | | - bitmap_andnot(sve_secondary_vq_map, sve_vq_map, sve_secondary_vq_map, |
|---|
| 688 | | - SVE_VQ_MAX); |
|---|
| 689 | | - if (!bitmap_empty(sve_secondary_vq_map, SVE_VQ_MAX)) { |
|---|
| 751 | + sve_probe_vqs(tmp_map); |
|---|
| 752 | + |
|---|
| 753 | + bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX); |
|---|
| 754 | + if (bitmap_intersects(tmp_map, sve_vq_map, SVE_VQ_MAX)) { |
|---|
| 690 | 755 | pr_warn("SVE: cpu%d: Required vector length(s) missing\n", |
|---|
| 691 | 756 | smp_processor_id()); |
|---|
| 692 | | - ret = -EINVAL; |
|---|
| 757 | + return -EINVAL; |
|---|
| 693 | 758 | } |
|---|
| 694 | 759 | |
|---|
| 695 | | - return ret; |
|---|
| 760 | + if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available()) |
|---|
| 761 | + return 0; |
|---|
| 762 | + |
|---|
| 763 | + /* |
|---|
| 764 | + * For KVM, it is necessary to ensure that this CPU doesn't |
|---|
| 765 | + * support any vector length that guests may have probed as |
|---|
| 766 | + * unsupported. |
|---|
| 767 | + */ |
|---|
| 768 | + |
|---|
| 769 | + /* Recover the set of supported VQs: */ |
|---|
| 770 | + bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX); |
|---|
| 771 | + /* Find VQs supported that are not globally supported: */ |
|---|
| 772 | + bitmap_andnot(tmp_map, tmp_map, sve_vq_map, SVE_VQ_MAX); |
|---|
| 773 | + |
|---|
| 774 | + /* Find the lowest such VQ, if any: */ |
|---|
| 775 | + b = find_last_bit(tmp_map, SVE_VQ_MAX); |
|---|
| 776 | + if (b >= SVE_VQ_MAX) |
|---|
| 777 | + return 0; /* no mismatches */ |
|---|
| 778 | + |
|---|
| 779 | + /* |
|---|
| 780 | + * Mismatches above sve_max_virtualisable_vl are fine, since |
|---|
| 781 | + * no guest is allowed to configure ZCR_EL2.LEN to exceed this: |
|---|
| 782 | + */ |
|---|
| 783 | + if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) { |
|---|
| 784 | + pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n", |
|---|
| 785 | + smp_processor_id()); |
|---|
| 786 | + return -EINVAL; |
|---|
| 787 | + } |
|---|
| 788 | + |
|---|
| 789 | + return 0; |
|---|
| 696 | 790 | } |
|---|
| 697 | 791 | |
|---|
| 698 | 792 | static void __init sve_efi_setup(void) |
|---|
| .. | .. |
|---|
| 759 | 853 | void __init sve_setup(void) |
|---|
| 760 | 854 | { |
|---|
| 761 | 855 | u64 zcr; |
|---|
| 856 | + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); |
|---|
| 857 | + unsigned long b; |
|---|
| 762 | 858 | |
|---|
| 763 | 859 | if (!system_supports_sve()) |
|---|
| 764 | 860 | return; |
|---|
| .. | .. |
|---|
| 768 | 864 | * so sve_vq_map must have at least SVE_VQ_MIN set. |
|---|
| 769 | 865 | * If something went wrong, at least try to patch it up: |
|---|
| 770 | 866 | */ |
|---|
| 771 | | - if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map))) |
|---|
| 772 | | - set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map); |
|---|
| 867 | + if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map))) |
|---|
| 868 | + set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map); |
|---|
| 773 | 869 | |
|---|
| 774 | 870 | zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1); |
|---|
| 775 | 871 | sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1); |
|---|
| .. | .. |
|---|
| 785 | 881 | * For the default VL, pick the maximum supported value <= 64. |
|---|
| 786 | 882 | * VL == 64 is guaranteed not to grow the signal frame. |
|---|
| 787 | 883 | */ |
|---|
| 788 | | - sve_default_vl = find_supported_vector_length(64); |
|---|
| 884 | + set_sve_default_vl(find_supported_vector_length(64)); |
|---|
| 885 | + |
|---|
| 886 | + bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map, |
|---|
| 887 | + SVE_VQ_MAX); |
|---|
| 888 | + |
|---|
| 889 | + b = find_last_bit(tmp_map, SVE_VQ_MAX); |
|---|
| 890 | + if (b >= SVE_VQ_MAX) |
|---|
| 891 | + /* No non-virtualisable VLs found */ |
|---|
| 892 | + sve_max_virtualisable_vl = SVE_VQ_MAX; |
|---|
| 893 | + else if (WARN_ON(b == SVE_VQ_MAX - 1)) |
|---|
| 894 | + /* No virtualisable VLs? This is architecturally forbidden. */ |
|---|
| 895 | + sve_max_virtualisable_vl = SVE_VQ_MIN; |
|---|
| 896 | + else /* b + 1 < SVE_VQ_MAX */ |
|---|
| 897 | + sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1)); |
|---|
| 898 | + |
|---|
| 899 | + if (sve_max_virtualisable_vl > sve_max_vl) |
|---|
| 900 | + sve_max_virtualisable_vl = sve_max_vl; |
|---|
| 789 | 901 | |
|---|
| 790 | 902 | pr_info("SVE: maximum available vector length %u bytes per vector\n", |
|---|
| 791 | 903 | sve_max_vl); |
|---|
| 792 | 904 | pr_info("SVE: default vector length %u bytes per vector\n", |
|---|
| 793 | | - sve_default_vl); |
|---|
| 905 | + get_sve_default_vl()); |
|---|
| 906 | + |
|---|
| 907 | + /* KVM decides whether to support mismatched systems. Just warn here: */ |
|---|
| 908 | + if (sve_max_virtualisable_vl < sve_max_vl) |
|---|
| 909 | + pr_warn("SVE: unvirtualisable vector lengths present\n"); |
|---|
| 794 | 910 | |
|---|
| 795 | 911 | sve_efi_setup(); |
|---|
| 796 | 912 | } |
|---|
| .. | .. |
|---|
| 814 | 930 | * the SVE access trap will be disabled the next time this task |
|---|
| 815 | 931 | * reaches ret_to_user. |
|---|
| 816 | 932 | * |
|---|
| 817 | | - * TIF_SVE should be clear on entry: otherwise, task_fpsimd_load() |
|---|
| 933 | + * TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state() |
|---|
| 818 | 934 | * would have disabled the SVE access trap for userspace during |
|---|
| 819 | 935 | * ret_to_user, making an SVE access trap impossible in that case. |
|---|
| 820 | 936 | */ |
|---|
| 821 | | -asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) |
|---|
| 937 | +void do_sve_acc(unsigned int esr, struct pt_regs *regs) |
|---|
| 822 | 938 | { |
|---|
| 823 | 939 | /* Even if we chose not to use SVE, the hardware could still trap: */ |
|---|
| 824 | 940 | if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) { |
|---|
| 825 | | - force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); |
|---|
| 941 | + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); |
|---|
| 826 | 942 | return; |
|---|
| 827 | 943 | } |
|---|
| 828 | 944 | |
|---|
| 829 | 945 | sve_alloc(current); |
|---|
| 830 | 946 | |
|---|
| 831 | | - preempt_disable(); |
|---|
| 832 | | - local_bh_disable(); |
|---|
| 947 | + get_cpu_fpsimd_context(); |
|---|
| 833 | 948 | |
|---|
| 834 | 949 | fpsimd_save(); |
|---|
| 835 | | - fpsimd_to_sve(current); |
|---|
| 836 | 950 | |
|---|
| 837 | 951 | /* Force ret_to_user to reload the registers: */ |
|---|
| 838 | 952 | fpsimd_flush_task_state(current); |
|---|
| 839 | | - set_thread_flag(TIF_FOREIGN_FPSTATE); |
|---|
| 840 | 953 | |
|---|
| 954 | + fpsimd_to_sve(current); |
|---|
| 841 | 955 | if (test_and_set_thread_flag(TIF_SVE)) |
|---|
| 842 | 956 | WARN_ON(1); /* SVE access shouldn't have trapped */ |
|---|
| 843 | 957 | |
|---|
| 844 | | - local_bh_enable(); |
|---|
| 845 | | - preempt_enable(); |
|---|
| 958 | + put_cpu_fpsimd_context(); |
|---|
| 846 | 959 | } |
|---|
| 847 | 960 | |
|---|
| 848 | 961 | /* |
|---|
| 849 | 962 | * Trapped FP/ASIMD access. |
|---|
| 850 | 963 | */ |
|---|
| 851 | | -asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) |
|---|
| 964 | +void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) |
|---|
| 852 | 965 | { |
|---|
| 853 | 966 | /* TODO: implement lazy context saving/restoring */ |
|---|
| 854 | 967 | WARN_ON(1); |
|---|
| .. | .. |
|---|
| 857 | 970 | /* |
|---|
| 858 | 971 | * Raise a SIGFPE for the current process. |
|---|
| 859 | 972 | */ |
|---|
| 860 | | -asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) |
|---|
| 973 | +void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) |
|---|
| 861 | 974 | { |
|---|
| 862 | | - siginfo_t info; |
|---|
| 863 | 975 | unsigned int si_code = FPE_FLTUNK; |
|---|
| 864 | 976 | |
|---|
| 865 | 977 | if (esr & ESR_ELx_FP_EXC_TFV) { |
|---|
| .. | .. |
|---|
| 875 | 987 | si_code = FPE_FLTRES; |
|---|
| 876 | 988 | } |
|---|
| 877 | 989 | |
|---|
| 878 | | - clear_siginfo(&info); |
|---|
| 879 | | - info.si_signo = SIGFPE; |
|---|
| 880 | | - info.si_code = si_code; |
|---|
| 881 | | - info.si_addr = (void __user *)instruction_pointer(regs); |
|---|
| 882 | | - |
|---|
| 883 | | - send_sig_info(SIGFPE, &info, current); |
|---|
| 990 | + send_sig_fault(SIGFPE, si_code, |
|---|
| 991 | + (void __user *)instruction_pointer(regs), |
|---|
| 992 | + current); |
|---|
| 884 | 993 | } |
|---|
| 885 | 994 | |
|---|
| 886 | 995 | void fpsimd_thread_switch(struct task_struct *next) |
|---|
| .. | .. |
|---|
| 889 | 998 | |
|---|
| 890 | 999 | if (!system_supports_fpsimd()) |
|---|
| 891 | 1000 | return; |
|---|
| 1001 | + |
|---|
| 1002 | + __get_cpu_fpsimd_context(); |
|---|
| 892 | 1003 | |
|---|
| 893 | 1004 | /* Save unsaved fpsimd state, if any: */ |
|---|
| 894 | 1005 | fpsimd_save(); |
|---|
| .. | .. |
|---|
| 904 | 1015 | |
|---|
| 905 | 1016 | update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE, |
|---|
| 906 | 1017 | wrong_task || wrong_cpu); |
|---|
| 1018 | + |
|---|
| 1019 | + __put_cpu_fpsimd_context(); |
|---|
| 907 | 1020 | } |
|---|
| 908 | 1021 | |
|---|
| 909 | 1022 | void fpsimd_flush_thread(void) |
|---|
| 910 | 1023 | { |
|---|
| 911 | 1024 | int vl, supported_vl; |
|---|
| 912 | | - void *mem = NULL; |
|---|
| 913 | 1025 | |
|---|
| 914 | 1026 | if (!system_supports_fpsimd()) |
|---|
| 915 | 1027 | return; |
|---|
| 916 | 1028 | |
|---|
| 917 | | - preempt_disable(); |
|---|
| 918 | | - local_bh_disable(); |
|---|
| 1029 | + get_cpu_fpsimd_context(); |
|---|
| 919 | 1030 | |
|---|
| 1031 | + fpsimd_flush_task_state(current); |
|---|
| 920 | 1032 | memset(¤t->thread.uw.fpsimd_state, 0, |
|---|
| 921 | 1033 | sizeof(current->thread.uw.fpsimd_state)); |
|---|
| 922 | | - fpsimd_flush_task_state(current); |
|---|
| 923 | 1034 | |
|---|
| 924 | 1035 | if (system_supports_sve()) { |
|---|
| 925 | 1036 | clear_thread_flag(TIF_SVE); |
|---|
| 926 | | - mem = sve_free_atomic(current); |
|---|
| 1037 | + sve_free(current); |
|---|
| 927 | 1038 | |
|---|
| 928 | 1039 | /* |
|---|
| 929 | 1040 | * Reset the task vector length as required. |
|---|
| .. | .. |
|---|
| 931 | 1042 | * vector length configured: no kernel task can become a user |
|---|
| 932 | 1043 | * task without an exec and hence a call to this function. |
|---|
| 933 | 1044 | * By the time the first call to this function is made, all |
|---|
| 934 | | - * early hardware probing is complete, so sve_default_vl |
|---|
| 1045 | + * early hardware probing is complete, so __sve_default_vl |
|---|
| 935 | 1046 | * should be valid. |
|---|
| 936 | 1047 | * If a bug causes this to go wrong, we make some noise and |
|---|
| 937 | 1048 | * try to fudge thread.sve_vl to a safe value here. |
|---|
| 938 | 1049 | */ |
|---|
| 939 | 1050 | vl = current->thread.sve_vl_onexec ? |
|---|
| 940 | | - current->thread.sve_vl_onexec : sve_default_vl; |
|---|
| 1051 | + current->thread.sve_vl_onexec : get_sve_default_vl(); |
|---|
| 941 | 1052 | |
|---|
| 942 | 1053 | if (WARN_ON(!sve_vl_valid(vl))) |
|---|
| 943 | 1054 | vl = SVE_VL_MIN; |
|---|
| .. | .. |
|---|
| 956 | 1067 | current->thread.sve_vl_onexec = 0; |
|---|
| 957 | 1068 | } |
|---|
| 958 | 1069 | |
|---|
| 959 | | - set_thread_flag(TIF_FOREIGN_FPSTATE); |
|---|
| 960 | | - |
|---|
| 961 | | - local_bh_enable(); |
|---|
| 962 | | - preempt_enable(); |
|---|
| 963 | | - kfree(mem); |
|---|
| 1070 | + put_cpu_fpsimd_context(); |
|---|
| 964 | 1071 | } |
|---|
| 965 | 1072 | |
|---|
| 966 | 1073 | /* |
|---|
| .. | .. |
|---|
| 972 | 1079 | if (!system_supports_fpsimd()) |
|---|
| 973 | 1080 | return; |
|---|
| 974 | 1081 | |
|---|
| 975 | | - preempt_disable(); |
|---|
| 976 | | - local_bh_disable(); |
|---|
| 1082 | + get_cpu_fpsimd_context(); |
|---|
| 977 | 1083 | fpsimd_save(); |
|---|
| 978 | | - local_bh_enable(); |
|---|
| 979 | | - preempt_enable(); |
|---|
| 1084 | + put_cpu_fpsimd_context(); |
|---|
| 980 | 1085 | } |
|---|
| 981 | 1086 | |
|---|
| 982 | 1087 | /* |
|---|
| .. | .. |
|---|
| 993 | 1098 | |
|---|
| 994 | 1099 | /* |
|---|
| 995 | 1100 | * Associate current's FPSIMD context with this cpu |
|---|
| 996 | | - * Preemption must be disabled when calling this function. |
|---|
| 1101 | + * The caller must have ownership of the cpu FPSIMD context before calling |
|---|
| 1102 | + * this function. |
|---|
| 997 | 1103 | */ |
|---|
| 998 | 1104 | void fpsimd_bind_task_to_cpu(void) |
|---|
| 999 | 1105 | { |
|---|
| .. | .. |
|---|
| 1002 | 1108 | |
|---|
| 1003 | 1109 | WARN_ON(!system_supports_fpsimd()); |
|---|
| 1004 | 1110 | last->st = ¤t->thread.uw.fpsimd_state; |
|---|
| 1111 | + last->sve_state = current->thread.sve_state; |
|---|
| 1112 | + last->sve_vl = current->thread.sve_vl; |
|---|
| 1005 | 1113 | current->thread.fpsimd_cpu = smp_processor_id(); |
|---|
| 1006 | 1114 | |
|---|
| 1007 | 1115 | if (system_supports_sve()) { |
|---|
| .. | .. |
|---|
| 1015 | 1123 | } |
|---|
| 1016 | 1124 | } |
|---|
| 1017 | 1125 | |
|---|
| 1018 | | -void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st) |
|---|
| 1126 | +void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, |
|---|
| 1127 | + unsigned int sve_vl) |
|---|
| 1019 | 1128 | { |
|---|
| 1020 | 1129 | struct fpsimd_last_state_struct *last = |
|---|
| 1021 | 1130 | this_cpu_ptr(&fpsimd_last_state); |
|---|
| .. | .. |
|---|
| 1024 | 1133 | WARN_ON(!in_softirq() && !irqs_disabled()); |
|---|
| 1025 | 1134 | |
|---|
| 1026 | 1135 | last->st = st; |
|---|
| 1136 | + last->sve_state = sve_state; |
|---|
| 1137 | + last->sve_vl = sve_vl; |
|---|
| 1027 | 1138 | } |
|---|
| 1028 | 1139 | |
|---|
| 1029 | 1140 | /* |
|---|
| .. | .. |
|---|
| 1047 | 1158 | return; |
|---|
| 1048 | 1159 | } |
|---|
| 1049 | 1160 | |
|---|
| 1050 | | - preempt_disable(); |
|---|
| 1051 | | - local_bh_disable(); |
|---|
| 1161 | + get_cpu_fpsimd_context(); |
|---|
| 1052 | 1162 | |
|---|
| 1053 | 1163 | if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { |
|---|
| 1054 | 1164 | task_fpsimd_load(); |
|---|
| 1055 | 1165 | fpsimd_bind_task_to_cpu(); |
|---|
| 1056 | 1166 | } |
|---|
| 1057 | 1167 | |
|---|
| 1058 | | - local_bh_enable(); |
|---|
| 1059 | | - preempt_enable(); |
|---|
| 1168 | + put_cpu_fpsimd_context(); |
|---|
| 1060 | 1169 | } |
|---|
| 1061 | 1170 | |
|---|
| 1062 | 1171 | /* |
|---|
| .. | .. |
|---|
| 1069 | 1178 | if (WARN_ON(!system_supports_fpsimd())) |
|---|
| 1070 | 1179 | return; |
|---|
| 1071 | 1180 | |
|---|
| 1072 | | - preempt_disable(); |
|---|
| 1073 | | - local_bh_disable(); |
|---|
| 1181 | + get_cpu_fpsimd_context(); |
|---|
| 1074 | 1182 | |
|---|
| 1075 | 1183 | current->thread.uw.fpsimd_state = *state; |
|---|
| 1076 | 1184 | if (system_supports_sve() && test_thread_flag(TIF_SVE)) |
|---|
| .. | .. |
|---|
| 1081 | 1189 | |
|---|
| 1082 | 1190 | clear_thread_flag(TIF_FOREIGN_FPSTATE); |
|---|
| 1083 | 1191 | |
|---|
| 1084 | | - local_bh_enable(); |
|---|
| 1085 | | - preempt_enable(); |
|---|
| 1192 | + put_cpu_fpsimd_context(); |
|---|
| 1086 | 1193 | } |
|---|
| 1087 | 1194 | |
|---|
| 1088 | 1195 | /* |
|---|
| 1089 | 1196 | * Invalidate live CPU copies of task t's FPSIMD state |
|---|
| 1197 | + * |
|---|
| 1198 | + * This function may be called with preemption enabled. The barrier() |
|---|
| 1199 | + * ensures that the assignment to fpsimd_cpu is visible to any |
|---|
| 1200 | + * preemption/softirq that could race with set_tsk_thread_flag(), so |
|---|
| 1201 | + * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared. |
|---|
| 1202 | + * |
|---|
| 1203 | + * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any |
|---|
| 1204 | + * subsequent code. |
|---|
| 1090 | 1205 | */ |
|---|
| 1091 | 1206 | void fpsimd_flush_task_state(struct task_struct *t) |
|---|
| 1092 | 1207 | { |
|---|
| 1093 | 1208 | t->thread.fpsimd_cpu = NR_CPUS; |
|---|
| 1209 | + /* |
|---|
| 1210 | + * If we don't support fpsimd, bail out after we have |
|---|
| 1211 | + * reset the fpsimd_cpu for this task and clear the |
|---|
| 1212 | + * FPSTATE. |
|---|
| 1213 | + */ |
|---|
| 1214 | + if (!system_supports_fpsimd()) |
|---|
| 1215 | + return; |
|---|
| 1216 | + barrier(); |
|---|
| 1217 | + set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE); |
|---|
| 1218 | + |
|---|
| 1219 | + barrier(); |
|---|
| 1094 | 1220 | } |
|---|
| 1095 | 1221 | |
|---|
| 1096 | | -void fpsimd_flush_cpu_state(void) |
|---|
| 1222 | +/* |
|---|
| 1223 | + * Invalidate any task's FPSIMD state that is present on this cpu. |
|---|
| 1224 | + * The FPSIMD context should be acquired with get_cpu_fpsimd_context() |
|---|
| 1225 | + * before calling this function. |
|---|
| 1226 | + */ |
|---|
| 1227 | +static void fpsimd_flush_cpu_state(void) |
|---|
| 1097 | 1228 | { |
|---|
| 1098 | 1229 | WARN_ON(!system_supports_fpsimd()); |
|---|
| 1099 | 1230 | __this_cpu_write(fpsimd_last_state.st, NULL); |
|---|
| 1100 | 1231 | set_thread_flag(TIF_FOREIGN_FPSTATE); |
|---|
| 1101 | 1232 | } |
|---|
| 1102 | 1233 | |
|---|
| 1103 | | -#ifdef CONFIG_KERNEL_MODE_NEON |
|---|
| 1234 | +/* |
|---|
| 1235 | + * Save the FPSIMD state to memory and invalidate cpu view. |
|---|
| 1236 | + * This function must be called with preemption disabled. |
|---|
| 1237 | + */ |
|---|
| 1238 | +void fpsimd_save_and_flush_cpu_state(void) |
|---|
| 1239 | +{ |
|---|
| 1240 | + if (!system_supports_fpsimd()) |
|---|
| 1241 | + return; |
|---|
| 1242 | + WARN_ON(preemptible()); |
|---|
| 1243 | + __get_cpu_fpsimd_context(); |
|---|
| 1244 | + fpsimd_save(); |
|---|
| 1245 | + fpsimd_flush_cpu_state(); |
|---|
| 1246 | + __put_cpu_fpsimd_context(); |
|---|
| 1247 | +} |
|---|
| 1104 | 1248 | |
|---|
| 1105 | | -DEFINE_PER_CPU(bool, kernel_neon_busy); |
|---|
| 1106 | | -EXPORT_PER_CPU_SYMBOL(kernel_neon_busy); |
|---|
| 1249 | +#ifdef CONFIG_KERNEL_MODE_NEON |
|---|
| 1107 | 1250 | |
|---|
| 1108 | 1251 | /* |
|---|
| 1109 | 1252 | * Kernel-side NEON support functions |
|---|
| .. | .. |
|---|
| 1129 | 1272 | |
|---|
| 1130 | 1273 | BUG_ON(!may_use_simd()); |
|---|
| 1131 | 1274 | |
|---|
| 1132 | | - preempt_disable(); |
|---|
| 1133 | | - local_bh_disable(); |
|---|
| 1134 | | - |
|---|
| 1135 | | - __this_cpu_write(kernel_neon_busy, true); |
|---|
| 1275 | + get_cpu_fpsimd_context(); |
|---|
| 1136 | 1276 | |
|---|
| 1137 | 1277 | /* Save unsaved fpsimd state, if any: */ |
|---|
| 1138 | 1278 | fpsimd_save(); |
|---|
| 1139 | 1279 | |
|---|
| 1140 | 1280 | /* Invalidate any task state remaining in the fpsimd regs: */ |
|---|
| 1141 | 1281 | fpsimd_flush_cpu_state(); |
|---|
| 1142 | | - |
|---|
| 1143 | | - preempt_disable(); |
|---|
| 1144 | | - |
|---|
| 1145 | | - local_bh_enable(); |
|---|
| 1146 | | - preempt_enable(); |
|---|
| 1147 | 1282 | } |
|---|
| 1148 | 1283 | EXPORT_SYMBOL(kernel_neon_begin); |
|---|
| 1149 | 1284 | |
|---|
| .. | .. |
|---|
| 1158 | 1293 | */ |
|---|
| 1159 | 1294 | void kernel_neon_end(void) |
|---|
| 1160 | 1295 | { |
|---|
| 1161 | | - bool busy; |
|---|
| 1162 | | - |
|---|
| 1163 | 1296 | if (!system_supports_fpsimd()) |
|---|
| 1164 | 1297 | return; |
|---|
| 1165 | 1298 | |
|---|
| 1166 | | - busy = __this_cpu_xchg(kernel_neon_busy, false); |
|---|
| 1167 | | - WARN_ON(!busy); /* No matching kernel_neon_begin()? */ |
|---|
| 1168 | | - |
|---|
| 1169 | | - preempt_enable(); |
|---|
| 1299 | + put_cpu_fpsimd_context(); |
|---|
| 1170 | 1300 | } |
|---|
| 1171 | 1301 | EXPORT_SYMBOL(kernel_neon_end); |
|---|
| 1172 | 1302 | |
|---|
| .. | .. |
|---|
| 1258 | 1388 | { |
|---|
| 1259 | 1389 | switch (cmd) { |
|---|
| 1260 | 1390 | case CPU_PM_ENTER: |
|---|
| 1261 | | - fpsimd_save(); |
|---|
| 1262 | | - fpsimd_flush_cpu_state(); |
|---|
| 1391 | + fpsimd_save_and_flush_cpu_state(); |
|---|
| 1263 | 1392 | break; |
|---|
| 1264 | 1393 | case CPU_PM_EXIT: |
|---|
| 1265 | 1394 | break; |
|---|
| .. | .. |
|---|
| 1305 | 1434 | */ |
|---|
| 1306 | 1435 | static int __init fpsimd_init(void) |
|---|
| 1307 | 1436 | { |
|---|
| 1308 | | - if (elf_hwcap & HWCAP_FP) { |
|---|
| 1437 | + if (cpu_have_named_feature(FP)) { |
|---|
| 1309 | 1438 | fpsimd_pm_init(); |
|---|
| 1310 | 1439 | fpsimd_hotplug_init(); |
|---|
| 1311 | 1440 | } else { |
|---|
| 1312 | 1441 | pr_notice("Floating-point is not implemented\n"); |
|---|
| 1313 | 1442 | } |
|---|
| 1314 | 1443 | |
|---|
| 1315 | | - if (!(elf_hwcap & HWCAP_ASIMD)) |
|---|
| 1444 | + if (!cpu_have_named_feature(ASIMD)) |
|---|
| 1316 | 1445 | pr_notice("Advanced SIMD is not implemented\n"); |
|---|
| 1317 | 1446 | |
|---|
| 1318 | 1447 | return sve_sysctl_init(); |
|---|