| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * FP/SIMD context switching and fault handling |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Copyright (C) 2012 ARM Ltd. |
|---|
| 5 | 6 | * Author: Catalin Marinas <catalin.marinas@arm.com> |
|---|
| 6 | | - * |
|---|
| 7 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 8 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 9 | | - * published by the Free Software Foundation. |
|---|
| 10 | | - * |
|---|
| 11 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 12 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 13 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 14 | | - * GNU General Public License for more details. |
|---|
| 15 | | - * |
|---|
| 16 | | - * You should have received a copy of the GNU General Public License |
|---|
| 17 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
|---|
| 18 | 7 | */ |
|---|
| 19 | 8 | |
|---|
| 20 | 9 | #include <linux/bitmap.h> |
|---|
| 10 | +#include <linux/bitops.h> |
|---|
| 21 | 11 | #include <linux/bottom_half.h> |
|---|
| 22 | 12 | #include <linux/bug.h> |
|---|
| 23 | 13 | #include <linux/cache.h> |
|---|
| 24 | 14 | #include <linux/compat.h> |
|---|
| 15 | +#include <linux/compiler.h> |
|---|
| 25 | 16 | #include <linux/cpu.h> |
|---|
| 26 | 17 | #include <linux/cpu_pm.h> |
|---|
| 27 | 18 | #include <linux/kernel.h> |
|---|
| .. | .. |
|---|
| 38 | 29 | #include <linux/slab.h> |
|---|
| 39 | 30 | #include <linux/stddef.h> |
|---|
| 40 | 31 | #include <linux/sysctl.h> |
|---|
| 32 | +#include <linux/swab.h> |
|---|
| 41 | 33 | |
|---|
| 42 | 34 | #include <asm/esr.h> |
|---|
| 35 | +#include <asm/exception.h> |
|---|
| 43 | 36 | #include <asm/fpsimd.h> |
|---|
| 44 | 37 | #include <asm/cpufeature.h> |
|---|
| 45 | 38 | #include <asm/cputype.h> |
|---|
| 39 | +#include <asm/neon.h> |
|---|
| 46 | 40 | #include <asm/processor.h> |
|---|
| 47 | 41 | #include <asm/simd.h> |
|---|
| 48 | 42 | #include <asm/sigcontext.h> |
|---|
| 49 | 43 | #include <asm/sysreg.h> |
|---|
| 50 | 44 | #include <asm/traps.h> |
|---|
| 45 | +#include <asm/virt.h> |
|---|
| 51 | 46 | |
|---|
| 52 | 47 | #define FPEXC_IOF (1 << 0) |
|---|
| 53 | 48 | #define FPEXC_DZF (1 << 1) |
|---|
| .. | .. |
|---|
| 90 | 85 | * To prevent this from racing with the manipulation of the task's FPSIMD state |
|---|
| 91 | 86 | * from task context and thereby corrupting the state, it is necessary to |
|---|
| 92 | 87 | * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE |
|---|
| 93 | | - * flag with local_bh_disable() unless softirqs are already masked. |
|---|
| 88 | + * flag with {, __}get_cpu_fpsimd_context(). This will still allow softirqs to |
|---|
| 89 | + * run but prevent them to use FPSIMD. |
|---|
| 94 | 90 | * |
|---|
| 95 | 91 | * For a certain task, the sequence may look something like this: |
|---|
| 96 | 92 | * - the task gets scheduled in; if both the task's fpsimd_cpu field |
|---|
| .. | .. |
|---|
| 119 | 115 | */ |
|---|
| 120 | 116 | struct fpsimd_last_state_struct { |
|---|
| 121 | 117 | struct user_fpsimd_state *st; |
|---|
| 118 | + void *sve_state; |
|---|
| 119 | + unsigned int sve_vl; |
|---|
| 122 | 120 | }; |
|---|
| 123 | 121 | |
|---|
| 124 | 122 | static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state); |
|---|
| 125 | 123 | |
|---|
| 126 | 124 | /* Default VL for tasks that don't set it explicitly: */ |
|---|
| 127 | | -static int sve_default_vl = -1; |
|---|
| 125 | +static int __sve_default_vl = -1; |
|---|
| 126 | + |
|---|
| 127 | +static int get_sve_default_vl(void) |
|---|
| 128 | +{ |
|---|
| 129 | + return READ_ONCE(__sve_default_vl); |
|---|
| 130 | +} |
|---|
| 128 | 131 | |
|---|
| 129 | 132 | #ifdef CONFIG_ARM64_SVE |
|---|
| 130 | 133 | |
|---|
| 134 | +static void set_sve_default_vl(int val) |
|---|
| 135 | +{ |
|---|
| 136 | + WRITE_ONCE(__sve_default_vl, val); |
|---|
| 137 | +} |
|---|
| 138 | + |
|---|
| 131 | 139 | /* Maximum supported vector length across all CPUs (initially poisoned) */ |
|---|
| 132 | 140 | int __ro_after_init sve_max_vl = SVE_VL_MIN; |
|---|
| 133 | | -/* Set of available vector lengths, as vq_to_bit(vq): */ |
|---|
| 134 | | -static __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); |
|---|
| 141 | +int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN; |
|---|
| 142 | + |
|---|
| 143 | +/* |
|---|
| 144 | + * Set of available vector lengths, |
|---|
| 145 | + * where length vq encoded as bit __vq_to_bit(vq): |
|---|
| 146 | + */ |
|---|
| 147 | +__ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); |
|---|
| 148 | +/* Set of vector lengths present on at least one cpu: */ |
|---|
| 149 | +static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX); |
|---|
| 150 | + |
|---|
| 135 | 151 | static void __percpu *efi_sve_state; |
|---|
| 136 | 152 | |
|---|
| 137 | 153 | #else /* ! CONFIG_ARM64_SVE */ |
|---|
| 138 | 154 | |
|---|
| 139 | 155 | /* Dummy declaration for code that will be optimised out: */ |
|---|
| 140 | 156 | extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX); |
|---|
| 157 | +extern __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX); |
|---|
| 141 | 158 | extern void __percpu *efi_sve_state; |
|---|
| 142 | 159 | |
|---|
| 143 | 160 | #endif /* ! CONFIG_ARM64_SVE */ |
|---|
| 144 | 161 | |
|---|
| 145 | | -/* |
|---|
| 146 | | - * Call __sve_free() directly only if you know task can't be scheduled |
|---|
| 147 | | - * or preempted. |
|---|
| 148 | | - */ |
|---|
| 149 | | -static void __sve_free(struct task_struct *task) |
|---|
| 162 | +DEFINE_PER_CPU(bool, fpsimd_context_busy); |
|---|
| 163 | +EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy); |
|---|
| 164 | + |
|---|
| 165 | +static void __get_cpu_fpsimd_context(void) |
|---|
| 150 | 166 | { |
|---|
| 151 | | - kfree(task->thread.sve_state); |
|---|
| 152 | | - task->thread.sve_state = NULL; |
|---|
| 167 | + bool busy = __this_cpu_xchg(fpsimd_context_busy, true); |
|---|
| 168 | + |
|---|
| 169 | + WARN_ON(busy); |
|---|
| 153 | 170 | } |
|---|
| 154 | 171 | |
|---|
| 155 | | -static void sve_free(struct task_struct *task) |
|---|
| 172 | +/* |
|---|
| 173 | + * Claim ownership of the CPU FPSIMD context for use by the calling context. |
|---|
| 174 | + * |
|---|
| 175 | + * The caller may freely manipulate the FPSIMD context metadata until |
|---|
| 176 | + * put_cpu_fpsimd_context() is called. |
|---|
| 177 | + * |
|---|
| 178 | + * The double-underscore version must only be called if you know the task |
|---|
| 179 | + * can't be preempted. |
|---|
| 180 | + */ |
|---|
| 181 | +static void get_cpu_fpsimd_context(void) |
|---|
| 156 | 182 | { |
|---|
| 157 | | - WARN_ON(test_tsk_thread_flag(task, TIF_SVE)); |
|---|
| 183 | + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
|---|
| 184 | + local_bh_disable(); |
|---|
| 185 | + else |
|---|
| 186 | + preempt_disable(); |
|---|
| 187 | + __get_cpu_fpsimd_context(); |
|---|
| 188 | +} |
|---|
| 158 | 189 | |
|---|
| 159 | | - __sve_free(task); |
|---|
| 190 | +static void __put_cpu_fpsimd_context(void) |
|---|
| 191 | +{ |
|---|
| 192 | + bool busy = __this_cpu_xchg(fpsimd_context_busy, false); |
|---|
| 193 | + |
|---|
| 194 | + WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */ |
|---|
| 195 | +} |
|---|
| 196 | + |
|---|
| 197 | +/* |
|---|
| 198 | + * Release the CPU FPSIMD context. |
|---|
| 199 | + * |
|---|
| 200 | + * Must be called from a context in which get_cpu_fpsimd_context() was |
|---|
| 201 | + * previously called, with no call to put_cpu_fpsimd_context() in the |
|---|
| 202 | + * meantime. |
|---|
| 203 | + */ |
|---|
| 204 | +static void put_cpu_fpsimd_context(void) |
|---|
| 205 | +{ |
|---|
| 206 | + __put_cpu_fpsimd_context(); |
|---|
| 207 | + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
|---|
| 208 | + local_bh_enable(); |
|---|
| 209 | + else |
|---|
| 210 | + preempt_enable(); |
|---|
| 211 | +} |
|---|
| 212 | + |
|---|
| 213 | +static bool have_cpu_fpsimd_context(void) |
|---|
| 214 | +{ |
|---|
| 215 | + return !preemptible() && __this_cpu_read(fpsimd_context_busy); |
|---|
| 160 | 216 | } |
|---|
| 161 | 217 | |
|---|
| 162 | 218 | static void *sve_free_atomic(struct task_struct *task) |
|---|
| .. | .. |
|---|
| 222 | 278 | * This function should be called only when the FPSIMD/SVE state in |
|---|
| 223 | 279 | * thread_struct is known to be up to date, when preparing to enter |
|---|
| 224 | 280 | * userspace. |
|---|
| 225 | | - * |
|---|
| 226 | | - * Softirqs (and preemption) must be disabled. |
|---|
| 227 | 281 | */ |
|---|
| 228 | 282 | static void task_fpsimd_load(void) |
|---|
| 229 | 283 | { |
|---|
| 230 | | - WARN_ON(!in_softirq() && !irqs_disabled()); |
|---|
| 231 | 284 | WARN_ON(!system_supports_fpsimd()); |
|---|
| 285 | + WARN_ON(!have_cpu_fpsimd_context()); |
|---|
| 232 | 286 | |
|---|
| 233 | 287 | if (system_supports_sve() && test_thread_flag(TIF_SVE)) |
|---|
| 234 | 288 | sve_load_state(sve_pffr(¤t->thread), |
|---|
| .. | .. |
|---|
| 241 | 295 | /* |
|---|
| 242 | 296 | * Ensure FPSIMD/SVE storage in memory for the loaded context is up to |
|---|
| 243 | 297 | * date with respect to the CPU registers. |
|---|
| 244 | | - * |
|---|
| 245 | | - * Softirqs (and preemption) must be disabled. |
|---|
| 246 | 298 | */ |
|---|
| 247 | | -void fpsimd_save(void) |
|---|
| 299 | +static void fpsimd_save(void) |
|---|
| 248 | 300 | { |
|---|
| 249 | | - struct user_fpsimd_state *st = __this_cpu_read(fpsimd_last_state.st); |
|---|
| 301 | + struct fpsimd_last_state_struct const *last = |
|---|
| 302 | + this_cpu_ptr(&fpsimd_last_state); |
|---|
| 250 | 303 | /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */ |
|---|
| 251 | 304 | |
|---|
| 252 | 305 | WARN_ON(!system_supports_fpsimd()); |
|---|
| 253 | | - WARN_ON(!in_softirq() && !irqs_disabled()); |
|---|
| 306 | + WARN_ON(!have_cpu_fpsimd_context()); |
|---|
| 254 | 307 | |
|---|
| 255 | 308 | if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { |
|---|
| 256 | 309 | if (system_supports_sve() && test_thread_flag(TIF_SVE)) { |
|---|
| 257 | | - if (WARN_ON(sve_get_vl() != current->thread.sve_vl)) { |
|---|
| 310 | + if (WARN_ON(sve_get_vl() != last->sve_vl)) { |
|---|
| 258 | 311 | /* |
|---|
| 259 | 312 | * Can't save the user regs, so current would |
|---|
| 260 | 313 | * re-enter user with corrupt state. |
|---|
| 261 | 314 | * There's no way to recover, so kill it: |
|---|
| 262 | 315 | */ |
|---|
| 263 | | - force_signal_inject(SIGKILL, SI_KERNEL, 0); |
|---|
| 316 | + force_signal_inject(SIGKILL, SI_KERNEL, 0, 0); |
|---|
| 264 | 317 | return; |
|---|
| 265 | 318 | } |
|---|
| 266 | 319 | |
|---|
| 267 | | - sve_save_state(sve_pffr(¤t->thread), &st->fpsr); |
|---|
| 320 | + sve_save_state((char *)last->sve_state + |
|---|
| 321 | + sve_ffr_offset(last->sve_vl), |
|---|
| 322 | + &last->st->fpsr); |
|---|
| 268 | 323 | } else |
|---|
| 269 | | - fpsimd_save_state(st); |
|---|
| 324 | + fpsimd_save_state(last->st); |
|---|
| 270 | 325 | } |
|---|
| 271 | | -} |
|---|
| 272 | | - |
|---|
| 273 | | -/* |
|---|
| 274 | | - * Helpers to translate bit indices in sve_vq_map to VQ values (and |
|---|
| 275 | | - * vice versa). This allows find_next_bit() to be used to find the |
|---|
| 276 | | - * _maximum_ VQ not exceeding a certain value. |
|---|
| 277 | | - */ |
|---|
| 278 | | - |
|---|
| 279 | | -static unsigned int vq_to_bit(unsigned int vq) |
|---|
| 280 | | -{ |
|---|
| 281 | | - return SVE_VQ_MAX - vq; |
|---|
| 282 | | -} |
|---|
| 283 | | - |
|---|
| 284 | | -static unsigned int bit_to_vq(unsigned int bit) |
|---|
| 285 | | -{ |
|---|
| 286 | | - if (WARN_ON(bit >= SVE_VQ_MAX)) |
|---|
| 287 | | - bit = SVE_VQ_MAX - 1; |
|---|
| 288 | | - |
|---|
| 289 | | - return SVE_VQ_MAX - bit; |
|---|
| 290 | 326 | } |
|---|
| 291 | 327 | |
|---|
| 292 | 328 | /* |
|---|
| .. | .. |
|---|
| 310 | 346 | vl = max_vl; |
|---|
| 311 | 347 | |
|---|
| 312 | 348 | bit = find_next_bit(sve_vq_map, SVE_VQ_MAX, |
|---|
| 313 | | - vq_to_bit(sve_vq_from_vl(vl))); |
|---|
| 314 | | - return sve_vl_from_vq(bit_to_vq(bit)); |
|---|
| 349 | + __vq_to_bit(sve_vq_from_vl(vl))); |
|---|
| 350 | + return sve_vl_from_vq(__bit_to_vq(bit)); |
|---|
| 315 | 351 | } |
|---|
| 316 | 352 | |
|---|
| 317 | 353 | #if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL) |
|---|
| 318 | 354 | |
|---|
| 319 | 355 | static int sve_proc_do_default_vl(struct ctl_table *table, int write, |
|---|
| 320 | | - void __user *buffer, size_t *lenp, |
|---|
| 321 | | - loff_t *ppos) |
|---|
| 356 | + void *buffer, size_t *lenp, loff_t *ppos) |
|---|
| 322 | 357 | { |
|---|
| 323 | 358 | int ret; |
|---|
| 324 | | - int vl = sve_default_vl; |
|---|
| 359 | + int vl = get_sve_default_vl(); |
|---|
| 325 | 360 | struct ctl_table tmp_table = { |
|---|
| 326 | 361 | .data = &vl, |
|---|
| 327 | 362 | .maxlen = sizeof(vl), |
|---|
| .. | .. |
|---|
| 338 | 373 | if (!sve_vl_valid(vl)) |
|---|
| 339 | 374 | return -EINVAL; |
|---|
| 340 | 375 | |
|---|
| 341 | | - sve_default_vl = find_supported_vector_length(vl); |
|---|
| 376 | + set_sve_default_vl(find_supported_vector_length(vl)); |
|---|
| 342 | 377 | return 0; |
|---|
| 343 | 378 | } |
|---|
| 344 | 379 | |
|---|
| .. | .. |
|---|
| 367 | 402 | #define ZREG(sve_state, vq, n) ((char *)(sve_state) + \ |
|---|
| 368 | 403 | (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET)) |
|---|
| 369 | 404 | |
|---|
| 405 | +#ifdef CONFIG_CPU_BIG_ENDIAN |
|---|
| 406 | +static __uint128_t arm64_cpu_to_le128(__uint128_t x) |
|---|
| 407 | +{ |
|---|
| 408 | + u64 a = swab64(x); |
|---|
| 409 | + u64 b = swab64(x >> 64); |
|---|
| 410 | + |
|---|
| 411 | + return ((__uint128_t)a << 64) | b; |
|---|
| 412 | +} |
|---|
| 413 | +#else |
|---|
| 414 | +static __uint128_t arm64_cpu_to_le128(__uint128_t x) |
|---|
| 415 | +{ |
|---|
| 416 | + return x; |
|---|
| 417 | +} |
|---|
| 418 | +#endif |
|---|
| 419 | + |
|---|
| 420 | +#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x) |
|---|
| 421 | + |
|---|
| 422 | +static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst, |
|---|
| 423 | + unsigned int vq) |
|---|
| 424 | +{ |
|---|
| 425 | + unsigned int i; |
|---|
| 426 | + __uint128_t *p; |
|---|
| 427 | + |
|---|
| 428 | + for (i = 0; i < SVE_NUM_ZREGS; ++i) { |
|---|
| 429 | + p = (__uint128_t *)ZREG(sst, vq, i); |
|---|
| 430 | + *p = arm64_cpu_to_le128(fst->vregs[i]); |
|---|
| 431 | + } |
|---|
| 432 | +} |
|---|
| 433 | + |
|---|
| 370 | 434 | /* |
|---|
| 371 | 435 | * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to |
|---|
| 372 | 436 | * task->thread.sve_state. |
|---|
| 373 | 437 | * |
|---|
| 374 | 438 | * Task can be a non-runnable task, or current. In the latter case, |
|---|
| 375 | | - * softirqs (and preemption) must be disabled. |
|---|
| 439 | + * the caller must have ownership of the cpu FPSIMD context before calling |
|---|
| 440 | + * this function. |
|---|
| 376 | 441 | * task->thread.sve_state must point to at least sve_state_size(task) |
|---|
| 377 | 442 | * bytes of allocated kernel memory. |
|---|
| 378 | 443 | * task->thread.uw.fpsimd_state must be up to date before calling this |
|---|
| .. | .. |
|---|
| 383 | 448 | unsigned int vq; |
|---|
| 384 | 449 | void *sst = task->thread.sve_state; |
|---|
| 385 | 450 | struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; |
|---|
| 386 | | - unsigned int i; |
|---|
| 387 | 451 | |
|---|
| 388 | 452 | if (!system_supports_sve()) |
|---|
| 389 | 453 | return; |
|---|
| 390 | 454 | |
|---|
| 391 | 455 | vq = sve_vq_from_vl(task->thread.sve_vl); |
|---|
| 392 | | - for (i = 0; i < 32; ++i) |
|---|
| 393 | | - memcpy(ZREG(sst, vq, i), &fst->vregs[i], |
|---|
| 394 | | - sizeof(fst->vregs[i])); |
|---|
| 456 | + __fpsimd_to_sve(sst, fst, vq); |
|---|
| 395 | 457 | } |
|---|
| 396 | 458 | |
|---|
| 397 | 459 | /* |
|---|
| .. | .. |
|---|
| 399 | 461 | * task->thread.uw.fpsimd_state. |
|---|
| 400 | 462 | * |
|---|
| 401 | 463 | * Task can be a non-runnable task, or current. In the latter case, |
|---|
| 402 | | - * softirqs (and preemption) must be disabled. |
|---|
| 464 | + * the caller must have ownership of the cpu FPSIMD context before calling |
|---|
| 465 | + * this function. |
|---|
| 403 | 466 | * task->thread.sve_state must point to at least sve_state_size(task) |
|---|
| 404 | 467 | * bytes of allocated kernel memory. |
|---|
| 405 | 468 | * task->thread.sve_state must be up to date before calling this function. |
|---|
| .. | .. |
|---|
| 410 | 473 | void const *sst = task->thread.sve_state; |
|---|
| 411 | 474 | struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state; |
|---|
| 412 | 475 | unsigned int i; |
|---|
| 476 | + __uint128_t const *p; |
|---|
| 413 | 477 | |
|---|
| 414 | 478 | if (!system_supports_sve()) |
|---|
| 415 | 479 | return; |
|---|
| 416 | 480 | |
|---|
| 417 | 481 | vq = sve_vq_from_vl(task->thread.sve_vl); |
|---|
| 418 | | - for (i = 0; i < 32; ++i) |
|---|
| 419 | | - memcpy(&fst->vregs[i], ZREG(sst, vq, i), |
|---|
| 420 | | - sizeof(fst->vregs[i])); |
|---|
| 482 | + for (i = 0; i < SVE_NUM_ZREGS; ++i) { |
|---|
| 483 | + p = (__uint128_t const *)ZREG(sst, vq, i); |
|---|
| 484 | + fst->vregs[i] = arm64_le128_to_cpu(*p); |
|---|
| 485 | + } |
|---|
| 421 | 486 | } |
|---|
| 422 | 487 | |
|---|
| 423 | 488 | #ifdef CONFIG_ARM64_SVE |
|---|
| .. | .. |
|---|
| 505 | 570 | unsigned int vq; |
|---|
| 506 | 571 | void *sst = task->thread.sve_state; |
|---|
| 507 | 572 | struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; |
|---|
| 508 | | - unsigned int i; |
|---|
| 509 | 573 | |
|---|
| 510 | 574 | if (!test_tsk_thread_flag(task, TIF_SVE)) |
|---|
| 511 | 575 | return; |
|---|
| .. | .. |
|---|
| 513 | 577 | vq = sve_vq_from_vl(task->thread.sve_vl); |
|---|
| 514 | 578 | |
|---|
| 515 | 579 | memset(sst, 0, SVE_SIG_REGS_SIZE(vq)); |
|---|
| 516 | | - |
|---|
| 517 | | - for (i = 0; i < 32; ++i) |
|---|
| 518 | | - memcpy(ZREG(sst, vq, i), &fst->vregs[i], |
|---|
| 519 | | - sizeof(fst->vregs[i])); |
|---|
| 580 | + __fpsimd_to_sve(sst, fst, vq); |
|---|
| 520 | 581 | } |
|---|
| 521 | 582 | |
|---|
| 522 | 583 | int sve_set_vector_length(struct task_struct *task, |
|---|
| 523 | 584 | unsigned long vl, unsigned long flags) |
|---|
| 524 | 585 | { |
|---|
| 586 | + void *mem = NULL; |
|---|
| 525 | 587 | if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT | |
|---|
| 526 | 588 | PR_SVE_SET_VL_ONEXEC)) |
|---|
| 527 | 589 | return -EINVAL; |
|---|
| .. | .. |
|---|
| 559 | 621 | * non-SVE thread. |
|---|
| 560 | 622 | */ |
|---|
| 561 | 623 | if (task == current) { |
|---|
| 562 | | - preempt_disable(); |
|---|
| 563 | | - local_bh_disable(); |
|---|
| 624 | + get_cpu_fpsimd_context(); |
|---|
| 564 | 625 | |
|---|
| 565 | 626 | fpsimd_save(); |
|---|
| 566 | | - set_thread_flag(TIF_FOREIGN_FPSTATE); |
|---|
| 567 | 627 | } |
|---|
| 568 | 628 | |
|---|
| 569 | 629 | fpsimd_flush_task_state(task); |
|---|
| 570 | 630 | if (test_and_clear_tsk_thread_flag(task, TIF_SVE)) |
|---|
| 571 | 631 | sve_to_fpsimd(task); |
|---|
| 572 | 632 | |
|---|
| 573 | | - if (task == current) { |
|---|
| 574 | | - local_bh_enable(); |
|---|
| 575 | | - preempt_enable(); |
|---|
| 576 | | - } |
|---|
| 633 | + if (task == current) |
|---|
| 634 | + put_cpu_fpsimd_context(); |
|---|
| 577 | 635 | |
|---|
| 578 | 636 | /* |
|---|
| 579 | 637 | * Force reallocation of task SVE state to the correct size |
|---|
| 580 | 638 | * on next use: |
|---|
| 581 | 639 | */ |
|---|
| 582 | | - sve_free(task); |
|---|
| 640 | + mem = sve_free_atomic(task); |
|---|
| 583 | 641 | |
|---|
| 584 | 642 | task->thread.sve_vl = vl; |
|---|
| 643 | + kfree(mem); |
|---|
| 585 | 644 | |
|---|
| 586 | 645 | out: |
|---|
| 587 | 646 | update_tsk_thread_flag(task, TIF_SVE_VL_INHERIT, |
|---|
| .. | .. |
|---|
| 620 | 679 | vl = arg & PR_SVE_VL_LEN_MASK; |
|---|
| 621 | 680 | flags = arg & ~vl; |
|---|
| 622 | 681 | |
|---|
| 623 | | - if (!system_supports_sve()) |
|---|
| 682 | + if (!system_supports_sve() || is_compat_task()) |
|---|
| 624 | 683 | return -EINVAL; |
|---|
| 625 | 684 | |
|---|
| 626 | 685 | ret = sve_set_vector_length(current, vl, flags); |
|---|
| .. | .. |
|---|
| 633 | 692 | /* PR_SVE_GET_VL */ |
|---|
| 634 | 693 | int sve_get_current_vl(void) |
|---|
| 635 | 694 | { |
|---|
| 636 | | - if (!system_supports_sve()) |
|---|
| 695 | + if (!system_supports_sve() || is_compat_task()) |
|---|
| 637 | 696 | return -EINVAL; |
|---|
| 638 | 697 | |
|---|
| 639 | 698 | return sve_prctl_status(0); |
|---|
| 640 | 699 | } |
|---|
| 641 | | - |
|---|
| 642 | | -/* |
|---|
| 643 | | - * Bitmap for temporary storage of the per-CPU set of supported vector lengths |
|---|
| 644 | | - * during secondary boot. |
|---|
| 645 | | - */ |
|---|
| 646 | | -static DECLARE_BITMAP(sve_secondary_vq_map, SVE_VQ_MAX); |
|---|
| 647 | 700 | |
|---|
| 648 | 701 | static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX)) |
|---|
| 649 | 702 | { |
|---|
| .. | .. |
|---|
| 659 | 712 | write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */ |
|---|
| 660 | 713 | vl = sve_get_vl(); |
|---|
| 661 | 714 | vq = sve_vq_from_vl(vl); /* skip intervening lengths */ |
|---|
| 662 | | - set_bit(vq_to_bit(vq), map); |
|---|
| 715 | + set_bit(__vq_to_bit(vq), map); |
|---|
| 663 | 716 | } |
|---|
| 664 | 717 | } |
|---|
| 665 | 718 | |
|---|
| 719 | +/* |
|---|
| 720 | + * Initialise the set of known supported VQs for the boot CPU. |
|---|
| 721 | + * This is called during kernel boot, before secondary CPUs are brought up. |
|---|
| 722 | + */ |
|---|
| 666 | 723 | void __init sve_init_vq_map(void) |
|---|
| 667 | 724 | { |
|---|
| 668 | 725 | sve_probe_vqs(sve_vq_map); |
|---|
| 726 | + bitmap_copy(sve_vq_partial_map, sve_vq_map, SVE_VQ_MAX); |
|---|
| 669 | 727 | } |
|---|
| 670 | 728 | |
|---|
| 671 | 729 | /* |
|---|
| 672 | 730 | * If we haven't committed to the set of supported VQs yet, filter out |
|---|
| 673 | 731 | * those not supported by the current CPU. |
|---|
| 732 | + * This function is called during the bring-up of early secondary CPUs only. |
|---|
| 674 | 733 | */ |
|---|
| 675 | 734 | void sve_update_vq_map(void) |
|---|
| 676 | 735 | { |
|---|
| 677 | | - sve_probe_vqs(sve_secondary_vq_map); |
|---|
| 678 | | - bitmap_and(sve_vq_map, sve_vq_map, sve_secondary_vq_map, SVE_VQ_MAX); |
|---|
| 736 | + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); |
|---|
| 737 | + |
|---|
| 738 | + sve_probe_vqs(tmp_map); |
|---|
| 739 | + bitmap_and(sve_vq_map, sve_vq_map, tmp_map, SVE_VQ_MAX); |
|---|
| 740 | + bitmap_or(sve_vq_partial_map, sve_vq_partial_map, tmp_map, SVE_VQ_MAX); |
|---|
| 679 | 741 | } |
|---|
| 680 | 742 | |
|---|
| 681 | | -/* Check whether the current CPU supports all VQs in the committed set */ |
|---|
| 743 | +/* |
|---|
| 744 | + * Check whether the current CPU supports all VQs in the committed set. |
|---|
| 745 | + * This function is called during the bring-up of late secondary CPUs only. |
|---|
| 746 | + */ |
|---|
| 682 | 747 | int sve_verify_vq_map(void) |
|---|
| 683 | 748 | { |
|---|
| 684 | | - int ret = 0; |
|---|
| 749 | + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); |
|---|
| 750 | + unsigned long b; |
|---|
| 685 | 751 | |
|---|
| 686 | | - sve_probe_vqs(sve_secondary_vq_map); |
|---|
| 687 | | - bitmap_andnot(sve_secondary_vq_map, sve_vq_map, sve_secondary_vq_map, |
|---|
| 688 | | - SVE_VQ_MAX); |
|---|
| 689 | | - if (!bitmap_empty(sve_secondary_vq_map, SVE_VQ_MAX)) { |
|---|
| 752 | + sve_probe_vqs(tmp_map); |
|---|
| 753 | + |
|---|
| 754 | + bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX); |
|---|
| 755 | + if (bitmap_intersects(tmp_map, sve_vq_map, SVE_VQ_MAX)) { |
|---|
| 690 | 756 | pr_warn("SVE: cpu%d: Required vector length(s) missing\n", |
|---|
| 691 | 757 | smp_processor_id()); |
|---|
| 692 | | - ret = -EINVAL; |
|---|
| 758 | + return -EINVAL; |
|---|
| 693 | 759 | } |
|---|
| 694 | 760 | |
|---|
| 695 | | - return ret; |
|---|
| 761 | + if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available()) |
|---|
| 762 | + return 0; |
|---|
| 763 | + |
|---|
| 764 | + /* |
|---|
| 765 | + * For KVM, it is necessary to ensure that this CPU doesn't |
|---|
| 766 | + * support any vector length that guests may have probed as |
|---|
| 767 | + * unsupported. |
|---|
| 768 | + */ |
|---|
| 769 | + |
|---|
| 770 | + /* Recover the set of supported VQs: */ |
|---|
| 771 | + bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX); |
|---|
| 772 | + /* Find VQs supported that are not globally supported: */ |
|---|
| 773 | + bitmap_andnot(tmp_map, tmp_map, sve_vq_map, SVE_VQ_MAX); |
|---|
| 774 | + |
|---|
| 775 | + /* Find the lowest such VQ, if any: */ |
|---|
| 776 | + b = find_last_bit(tmp_map, SVE_VQ_MAX); |
|---|
| 777 | + if (b >= SVE_VQ_MAX) |
|---|
| 778 | + return 0; /* no mismatches */ |
|---|
| 779 | + |
|---|
| 780 | + /* |
|---|
| 781 | + * Mismatches above sve_max_virtualisable_vl are fine, since |
|---|
| 782 | + * no guest is allowed to configure ZCR_EL2.LEN to exceed this: |
|---|
| 783 | + */ |
|---|
| 784 | + if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) { |
|---|
| 785 | + pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n", |
|---|
| 786 | + smp_processor_id()); |
|---|
| 787 | + return -EINVAL; |
|---|
| 788 | + } |
|---|
| 789 | + |
|---|
| 790 | + return 0; |
|---|
| 696 | 791 | } |
|---|
| 697 | 792 | |
|---|
| 698 | 793 | static void __init sve_efi_setup(void) |
|---|
| .. | .. |
|---|
| 759 | 854 | void __init sve_setup(void) |
|---|
| 760 | 855 | { |
|---|
| 761 | 856 | u64 zcr; |
|---|
| 857 | + DECLARE_BITMAP(tmp_map, SVE_VQ_MAX); |
|---|
| 858 | + unsigned long b; |
|---|
| 762 | 859 | |
|---|
| 763 | 860 | if (!system_supports_sve()) |
|---|
| 764 | 861 | return; |
|---|
| .. | .. |
|---|
| 768 | 865 | * so sve_vq_map must have at least SVE_VQ_MIN set. |
|---|
| 769 | 866 | * If something went wrong, at least try to patch it up: |
|---|
| 770 | 867 | */ |
|---|
| 771 | | - if (WARN_ON(!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map))) |
|---|
| 772 | | - set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map); |
|---|
| 868 | + if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map))) |
|---|
| 869 | + set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map); |
|---|
| 773 | 870 | |
|---|
| 774 | 871 | zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1); |
|---|
| 775 | 872 | sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1); |
|---|
| .. | .. |
|---|
| 785 | 882 | * For the default VL, pick the maximum supported value <= 64. |
|---|
| 786 | 883 | * VL == 64 is guaranteed not to grow the signal frame. |
|---|
| 787 | 884 | */ |
|---|
| 788 | | - sve_default_vl = find_supported_vector_length(64); |
|---|
| 885 | + set_sve_default_vl(find_supported_vector_length(64)); |
|---|
| 886 | + |
|---|
| 887 | + bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map, |
|---|
| 888 | + SVE_VQ_MAX); |
|---|
| 889 | + |
|---|
| 890 | + b = find_last_bit(tmp_map, SVE_VQ_MAX); |
|---|
| 891 | + if (b >= SVE_VQ_MAX) |
|---|
| 892 | + /* No non-virtualisable VLs found */ |
|---|
| 893 | + sve_max_virtualisable_vl = SVE_VQ_MAX; |
|---|
| 894 | + else if (WARN_ON(b == SVE_VQ_MAX - 1)) |
|---|
| 895 | + /* No virtualisable VLs? This is architecturally forbidden. */ |
|---|
| 896 | + sve_max_virtualisable_vl = SVE_VQ_MIN; |
|---|
| 897 | + else /* b + 1 < SVE_VQ_MAX */ |
|---|
| 898 | + sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1)); |
|---|
| 899 | + |
|---|
| 900 | + if (sve_max_virtualisable_vl > sve_max_vl) |
|---|
| 901 | + sve_max_virtualisable_vl = sve_max_vl; |
|---|
| 789 | 902 | |
|---|
| 790 | 903 | pr_info("SVE: maximum available vector length %u bytes per vector\n", |
|---|
| 791 | 904 | sve_max_vl); |
|---|
| 792 | 905 | pr_info("SVE: default vector length %u bytes per vector\n", |
|---|
| 793 | | - sve_default_vl); |
|---|
| 906 | + get_sve_default_vl()); |
|---|
| 907 | + |
|---|
| 908 | + /* KVM decides whether to support mismatched systems. Just warn here: */ |
|---|
| 909 | + if (sve_max_virtualisable_vl < sve_max_vl) |
|---|
| 910 | + pr_warn("SVE: unvirtualisable vector lengths present\n"); |
|---|
| 794 | 911 | |
|---|
| 795 | 912 | sve_efi_setup(); |
|---|
| 796 | 913 | } |
|---|
| .. | .. |
|---|
| 801 | 918 | */ |
|---|
| 802 | 919 | void fpsimd_release_task(struct task_struct *dead_task) |
|---|
| 803 | 920 | { |
|---|
| 804 | | - __sve_free(dead_task); |
|---|
| 921 | + void *mem = NULL; |
|---|
| 922 | + mem = sve_free_atomic(dead_task); |
|---|
| 923 | + kfree(mem); |
|---|
| 805 | 924 | } |
|---|
| 806 | 925 | |
|---|
| 807 | 926 | #endif /* CONFIG_ARM64_SVE */ |
|---|
| .. | .. |
|---|
| 814 | 933 | * the SVE access trap will be disabled the next time this task |
|---|
| 815 | 934 | * reaches ret_to_user. |
|---|
| 816 | 935 | * |
|---|
| 817 | | - * TIF_SVE should be clear on entry: otherwise, task_fpsimd_load() |
|---|
| 936 | + * TIF_SVE should be clear on entry: otherwise, fpsimd_restore_current_state() |
|---|
| 818 | 937 | * would have disabled the SVE access trap for userspace during |
|---|
| 819 | 938 | * ret_to_user, making an SVE access trap impossible in that case. |
|---|
| 820 | 939 | */ |
|---|
| 821 | | -asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) |
|---|
| 940 | +void do_sve_acc(unsigned int esr, struct pt_regs *regs) |
|---|
| 822 | 941 | { |
|---|
| 823 | 942 | /* Even if we chose not to use SVE, the hardware could still trap: */ |
|---|
| 824 | 943 | if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) { |
|---|
| 825 | | - force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); |
|---|
| 944 | + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); |
|---|
| 826 | 945 | return; |
|---|
| 827 | 946 | } |
|---|
| 828 | 947 | |
|---|
| 829 | 948 | sve_alloc(current); |
|---|
| 830 | 949 | |
|---|
| 831 | | - preempt_disable(); |
|---|
| 832 | | - local_bh_disable(); |
|---|
| 950 | + get_cpu_fpsimd_context(); |
|---|
| 833 | 951 | |
|---|
| 834 | 952 | fpsimd_save(); |
|---|
| 835 | | - fpsimd_to_sve(current); |
|---|
| 836 | 953 | |
|---|
| 837 | 954 | /* Force ret_to_user to reload the registers: */ |
|---|
| 838 | 955 | fpsimd_flush_task_state(current); |
|---|
| 839 | | - set_thread_flag(TIF_FOREIGN_FPSTATE); |
|---|
| 840 | 956 | |
|---|
| 957 | + fpsimd_to_sve(current); |
|---|
| 841 | 958 | if (test_and_set_thread_flag(TIF_SVE)) |
|---|
| 842 | 959 | WARN_ON(1); /* SVE access shouldn't have trapped */ |
|---|
| 843 | 960 | |
|---|
| 844 | | - local_bh_enable(); |
|---|
| 845 | | - preempt_enable(); |
|---|
| 961 | + put_cpu_fpsimd_context(); |
|---|
| 846 | 962 | } |
|---|
| 847 | 963 | |
|---|
| 848 | 964 | /* |
|---|
| 849 | 965 | * Trapped FP/ASIMD access. |
|---|
| 850 | 966 | */ |
|---|
| 851 | | -asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) |
|---|
| 967 | +void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) |
|---|
| 852 | 968 | { |
|---|
| 853 | 969 | /* TODO: implement lazy context saving/restoring */ |
|---|
| 854 | 970 | WARN_ON(1); |
|---|
| .. | .. |
|---|
| 857 | 973 | /* |
|---|
| 858 | 974 | * Raise a SIGFPE for the current process. |
|---|
| 859 | 975 | */ |
|---|
| 860 | | -asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) |
|---|
| 976 | +void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) |
|---|
| 861 | 977 | { |
|---|
| 862 | | - siginfo_t info; |
|---|
| 863 | 978 | unsigned int si_code = FPE_FLTUNK; |
|---|
| 864 | 979 | |
|---|
| 865 | 980 | if (esr & ESR_ELx_FP_EXC_TFV) { |
|---|
| .. | .. |
|---|
| 875 | 990 | si_code = FPE_FLTRES; |
|---|
| 876 | 991 | } |
|---|
| 877 | 992 | |
|---|
| 878 | | - clear_siginfo(&info); |
|---|
| 879 | | - info.si_signo = SIGFPE; |
|---|
| 880 | | - info.si_code = si_code; |
|---|
| 881 | | - info.si_addr = (void __user *)instruction_pointer(regs); |
|---|
| 882 | | - |
|---|
| 883 | | - send_sig_info(SIGFPE, &info, current); |
|---|
| 993 | + send_sig_fault(SIGFPE, si_code, |
|---|
| 994 | + (void __user *)instruction_pointer(regs), |
|---|
| 995 | + current); |
|---|
| 884 | 996 | } |
|---|
| 885 | 997 | |
|---|
| 886 | 998 | void fpsimd_thread_switch(struct task_struct *next) |
|---|
| .. | .. |
|---|
| 889 | 1001 | |
|---|
| 890 | 1002 | if (!system_supports_fpsimd()) |
|---|
| 891 | 1003 | return; |
|---|
| 1004 | + |
|---|
| 1005 | + __get_cpu_fpsimd_context(); |
|---|
| 892 | 1006 | |
|---|
| 893 | 1007 | /* Save unsaved fpsimd state, if any: */ |
|---|
| 894 | 1008 | fpsimd_save(); |
|---|
| .. | .. |
|---|
| 904 | 1018 | |
|---|
| 905 | 1019 | update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE, |
|---|
| 906 | 1020 | wrong_task || wrong_cpu); |
|---|
| 1021 | + |
|---|
| 1022 | + __put_cpu_fpsimd_context(); |
|---|
| 907 | 1023 | } |
|---|
| 908 | 1024 | |
|---|
| 909 | 1025 | void fpsimd_flush_thread(void) |
|---|
| .. | .. |
|---|
| 914 | 1030 | if (!system_supports_fpsimd()) |
|---|
| 915 | 1031 | return; |
|---|
| 916 | 1032 | |
|---|
| 917 | | - preempt_disable(); |
|---|
| 918 | | - local_bh_disable(); |
|---|
| 1033 | + get_cpu_fpsimd_context(); |
|---|
| 919 | 1034 | |
|---|
| 1035 | + fpsimd_flush_task_state(current); |
|---|
| 920 | 1036 | memset(¤t->thread.uw.fpsimd_state, 0, |
|---|
| 921 | 1037 | sizeof(current->thread.uw.fpsimd_state)); |
|---|
| 922 | | - fpsimd_flush_task_state(current); |
|---|
| 923 | 1038 | |
|---|
| 924 | 1039 | if (system_supports_sve()) { |
|---|
| 925 | 1040 | clear_thread_flag(TIF_SVE); |
|---|
| .. | .. |
|---|
| 931 | 1046 | * vector length configured: no kernel task can become a user |
|---|
| 932 | 1047 | * task without an exec and hence a call to this function. |
|---|
| 933 | 1048 | * By the time the first call to this function is made, all |
|---|
| 934 | | - * early hardware probing is complete, so sve_default_vl |
|---|
| 1049 | + * early hardware probing is complete, so __sve_default_vl |
|---|
| 935 | 1050 | * should be valid. |
|---|
| 936 | 1051 | * If a bug causes this to go wrong, we make some noise and |
|---|
| 937 | 1052 | * try to fudge thread.sve_vl to a safe value here. |
|---|
| 938 | 1053 | */ |
|---|
| 939 | 1054 | vl = current->thread.sve_vl_onexec ? |
|---|
| 940 | | - current->thread.sve_vl_onexec : sve_default_vl; |
|---|
| 1055 | + current->thread.sve_vl_onexec : get_sve_default_vl(); |
|---|
| 941 | 1056 | |
|---|
| 942 | 1057 | if (WARN_ON(!sve_vl_valid(vl))) |
|---|
| 943 | 1058 | vl = SVE_VL_MIN; |
|---|
| .. | .. |
|---|
| 956 | 1071 | current->thread.sve_vl_onexec = 0; |
|---|
| 957 | 1072 | } |
|---|
| 958 | 1073 | |
|---|
| 959 | | - set_thread_flag(TIF_FOREIGN_FPSTATE); |
|---|
| 960 | | - |
|---|
| 961 | | - local_bh_enable(); |
|---|
| 962 | | - preempt_enable(); |
|---|
| 1074 | + put_cpu_fpsimd_context(); |
|---|
| 963 | 1075 | kfree(mem); |
|---|
| 964 | 1076 | } |
|---|
| 965 | 1077 | |
|---|
| .. | .. |
|---|
| 972 | 1084 | if (!system_supports_fpsimd()) |
|---|
| 973 | 1085 | return; |
|---|
| 974 | 1086 | |
|---|
| 975 | | - preempt_disable(); |
|---|
| 976 | | - local_bh_disable(); |
|---|
| 1087 | + get_cpu_fpsimd_context(); |
|---|
| 977 | 1088 | fpsimd_save(); |
|---|
| 978 | | - local_bh_enable(); |
|---|
| 979 | | - preempt_enable(); |
|---|
| 1089 | + put_cpu_fpsimd_context(); |
|---|
| 980 | 1090 | } |
|---|
| 981 | 1091 | |
|---|
| 982 | 1092 | /* |
|---|
| .. | .. |
|---|
| 993 | 1103 | |
|---|
| 994 | 1104 | /* |
|---|
| 995 | 1105 | * Associate current's FPSIMD context with this cpu |
|---|
| 996 | | - * Preemption must be disabled when calling this function. |
|---|
| 1106 | + * The caller must have ownership of the cpu FPSIMD context before calling |
|---|
| 1107 | + * this function. |
|---|
| 997 | 1108 | */ |
|---|
| 998 | 1109 | void fpsimd_bind_task_to_cpu(void) |
|---|
| 999 | 1110 | { |
|---|
| .. | .. |
|---|
| 1002 | 1113 | |
|---|
| 1003 | 1114 | WARN_ON(!system_supports_fpsimd()); |
|---|
| 1004 | 1115 | last->st = ¤t->thread.uw.fpsimd_state; |
|---|
| 1116 | + last->sve_state = current->thread.sve_state; |
|---|
| 1117 | + last->sve_vl = current->thread.sve_vl; |
|---|
| 1005 | 1118 | current->thread.fpsimd_cpu = smp_processor_id(); |
|---|
| 1006 | 1119 | |
|---|
| 1007 | 1120 | if (system_supports_sve()) { |
|---|
| .. | .. |
|---|
| 1015 | 1128 | } |
|---|
| 1016 | 1129 | } |
|---|
| 1017 | 1130 | |
|---|
| 1018 | | -void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st) |
|---|
| 1131 | +void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, |
|---|
| 1132 | + unsigned int sve_vl) |
|---|
| 1019 | 1133 | { |
|---|
| 1020 | 1134 | struct fpsimd_last_state_struct *last = |
|---|
| 1021 | 1135 | this_cpu_ptr(&fpsimd_last_state); |
|---|
| .. | .. |
|---|
| 1024 | 1138 | WARN_ON(!in_softirq() && !irqs_disabled()); |
|---|
| 1025 | 1139 | |
|---|
| 1026 | 1140 | last->st = st; |
|---|
| 1141 | + last->sve_state = sve_state; |
|---|
| 1142 | + last->sve_vl = sve_vl; |
|---|
| 1027 | 1143 | } |
|---|
| 1028 | 1144 | |
|---|
| 1029 | 1145 | /* |
|---|
| .. | .. |
|---|
| 1047 | 1163 | return; |
|---|
| 1048 | 1164 | } |
|---|
| 1049 | 1165 | |
|---|
| 1050 | | - preempt_disable(); |
|---|
| 1051 | | - local_bh_disable(); |
|---|
| 1166 | + get_cpu_fpsimd_context(); |
|---|
| 1052 | 1167 | |
|---|
| 1053 | 1168 | if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { |
|---|
| 1054 | 1169 | task_fpsimd_load(); |
|---|
| 1055 | 1170 | fpsimd_bind_task_to_cpu(); |
|---|
| 1056 | 1171 | } |
|---|
| 1057 | 1172 | |
|---|
| 1058 | | - local_bh_enable(); |
|---|
| 1059 | | - preempt_enable(); |
|---|
| 1173 | + put_cpu_fpsimd_context(); |
|---|
| 1060 | 1174 | } |
|---|
| 1061 | 1175 | |
|---|
| 1062 | 1176 | /* |
|---|
| .. | .. |
|---|
| 1069 | 1183 | if (WARN_ON(!system_supports_fpsimd())) |
|---|
| 1070 | 1184 | return; |
|---|
| 1071 | 1185 | |
|---|
| 1072 | | - preempt_disable(); |
|---|
| 1073 | | - local_bh_disable(); |
|---|
| 1186 | + get_cpu_fpsimd_context(); |
|---|
| 1074 | 1187 | |
|---|
| 1075 | 1188 | current->thread.uw.fpsimd_state = *state; |
|---|
| 1076 | 1189 | if (system_supports_sve() && test_thread_flag(TIF_SVE)) |
|---|
| .. | .. |
|---|
| 1081 | 1194 | |
|---|
| 1082 | 1195 | clear_thread_flag(TIF_FOREIGN_FPSTATE); |
|---|
| 1083 | 1196 | |
|---|
| 1084 | | - local_bh_enable(); |
|---|
| 1085 | | - preempt_enable(); |
|---|
| 1197 | + put_cpu_fpsimd_context(); |
|---|
| 1086 | 1198 | } |
|---|
| 1087 | 1199 | |
|---|
| 1088 | 1200 | /* |
|---|
| 1089 | 1201 | * Invalidate live CPU copies of task t's FPSIMD state |
|---|
| 1202 | + * |
|---|
| 1203 | + * This function may be called with preemption enabled. The barrier() |
|---|
| 1204 | + * ensures that the assignment to fpsimd_cpu is visible to any |
|---|
| 1205 | + * preemption/softirq that could race with set_tsk_thread_flag(), so |
|---|
| 1206 | + * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared. |
|---|
| 1207 | + * |
|---|
| 1208 | + * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any |
|---|
| 1209 | + * subsequent code. |
|---|
| 1090 | 1210 | */ |
|---|
| 1091 | 1211 | void fpsimd_flush_task_state(struct task_struct *t) |
|---|
| 1092 | 1212 | { |
|---|
| 1093 | 1213 | t->thread.fpsimd_cpu = NR_CPUS; |
|---|
| 1214 | + /* |
|---|
| 1215 | + * If we don't support fpsimd, bail out after we have |
|---|
| 1216 | + * reset the fpsimd_cpu for this task and clear the |
|---|
| 1217 | + * FPSTATE. |
|---|
| 1218 | + */ |
|---|
| 1219 | + if (!system_supports_fpsimd()) |
|---|
| 1220 | + return; |
|---|
| 1221 | + barrier(); |
|---|
| 1222 | + set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE); |
|---|
| 1223 | + |
|---|
| 1224 | + barrier(); |
|---|
| 1094 | 1225 | } |
|---|
| 1095 | 1226 | |
|---|
| 1096 | | -void fpsimd_flush_cpu_state(void) |
|---|
| 1227 | +/* |
|---|
| 1228 | + * Invalidate any task's FPSIMD state that is present on this cpu. |
|---|
| 1229 | + * The FPSIMD context should be acquired with get_cpu_fpsimd_context() |
|---|
| 1230 | + * before calling this function. |
|---|
| 1231 | + */ |
|---|
| 1232 | +static void fpsimd_flush_cpu_state(void) |
|---|
| 1097 | 1233 | { |
|---|
| 1098 | 1234 | WARN_ON(!system_supports_fpsimd()); |
|---|
| 1099 | 1235 | __this_cpu_write(fpsimd_last_state.st, NULL); |
|---|
| 1100 | 1236 | set_thread_flag(TIF_FOREIGN_FPSTATE); |
|---|
| 1101 | 1237 | } |
|---|
| 1102 | 1238 | |
|---|
| 1103 | | -#ifdef CONFIG_KERNEL_MODE_NEON |
|---|
| 1239 | +/* |
|---|
| 1240 | + * Save the FPSIMD state to memory and invalidate cpu view. |
|---|
| 1241 | + * This function must be called with preemption disabled. |
|---|
| 1242 | + */ |
|---|
| 1243 | +void fpsimd_save_and_flush_cpu_state(void) |
|---|
| 1244 | +{ |
|---|
| 1245 | + if (!system_supports_fpsimd()) |
|---|
| 1246 | + return; |
|---|
| 1247 | + WARN_ON(preemptible()); |
|---|
| 1248 | + __get_cpu_fpsimd_context(); |
|---|
| 1249 | + fpsimd_save(); |
|---|
| 1250 | + fpsimd_flush_cpu_state(); |
|---|
| 1251 | + __put_cpu_fpsimd_context(); |
|---|
| 1252 | +} |
|---|
| 1104 | 1253 | |
|---|
| 1105 | | -DEFINE_PER_CPU(bool, kernel_neon_busy); |
|---|
| 1106 | | -EXPORT_PER_CPU_SYMBOL(kernel_neon_busy); |
|---|
| 1254 | +#ifdef CONFIG_KERNEL_MODE_NEON |
|---|
| 1107 | 1255 | |
|---|
| 1108 | 1256 | /* |
|---|
| 1109 | 1257 | * Kernel-side NEON support functions |
|---|
| .. | .. |
|---|
| 1129 | 1277 | |
|---|
| 1130 | 1278 | BUG_ON(!may_use_simd()); |
|---|
| 1131 | 1279 | |
|---|
| 1132 | | - preempt_disable(); |
|---|
| 1133 | | - local_bh_disable(); |
|---|
| 1134 | | - |
|---|
| 1135 | | - __this_cpu_write(kernel_neon_busy, true); |
|---|
| 1280 | + get_cpu_fpsimd_context(); |
|---|
| 1136 | 1281 | |
|---|
| 1137 | 1282 | /* Save unsaved fpsimd state, if any: */ |
|---|
| 1138 | 1283 | fpsimd_save(); |
|---|
| 1139 | 1284 | |
|---|
| 1140 | 1285 | /* Invalidate any task state remaining in the fpsimd regs: */ |
|---|
| 1141 | 1286 | fpsimd_flush_cpu_state(); |
|---|
| 1142 | | - |
|---|
| 1143 | | - preempt_disable(); |
|---|
| 1144 | | - |
|---|
| 1145 | | - local_bh_enable(); |
|---|
| 1146 | | - preempt_enable(); |
|---|
| 1147 | 1287 | } |
|---|
| 1148 | 1288 | EXPORT_SYMBOL(kernel_neon_begin); |
|---|
| 1149 | 1289 | |
|---|
| .. | .. |
|---|
| 1158 | 1298 | */ |
|---|
| 1159 | 1299 | void kernel_neon_end(void) |
|---|
| 1160 | 1300 | { |
|---|
| 1161 | | - bool busy; |
|---|
| 1162 | | - |
|---|
| 1163 | 1301 | if (!system_supports_fpsimd()) |
|---|
| 1164 | 1302 | return; |
|---|
| 1165 | 1303 | |
|---|
| 1166 | | - busy = __this_cpu_xchg(kernel_neon_busy, false); |
|---|
| 1167 | | - WARN_ON(!busy); /* No matching kernel_neon_begin()? */ |
|---|
| 1168 | | - |
|---|
| 1169 | | - preempt_enable(); |
|---|
| 1304 | + put_cpu_fpsimd_context(); |
|---|
| 1170 | 1305 | } |
|---|
| 1171 | 1306 | EXPORT_SYMBOL(kernel_neon_end); |
|---|
| 1172 | 1307 | |
|---|
| .. | .. |
|---|
| 1258 | 1393 | { |
|---|
| 1259 | 1394 | switch (cmd) { |
|---|
| 1260 | 1395 | case CPU_PM_ENTER: |
|---|
| 1261 | | - fpsimd_save(); |
|---|
| 1262 | | - fpsimd_flush_cpu_state(); |
|---|
| 1396 | + fpsimd_save_and_flush_cpu_state(); |
|---|
| 1263 | 1397 | break; |
|---|
| 1264 | 1398 | case CPU_PM_EXIT: |
|---|
| 1265 | 1399 | break; |
|---|
| .. | .. |
|---|
| 1305 | 1439 | */ |
|---|
| 1306 | 1440 | static int __init fpsimd_init(void) |
|---|
| 1307 | 1441 | { |
|---|
| 1308 | | - if (elf_hwcap & HWCAP_FP) { |
|---|
| 1442 | + if (cpu_have_named_feature(FP)) { |
|---|
| 1309 | 1443 | fpsimd_pm_init(); |
|---|
| 1310 | 1444 | fpsimd_hotplug_init(); |
|---|
| 1311 | 1445 | } else { |
|---|
| 1312 | 1446 | pr_notice("Floating-point is not implemented\n"); |
|---|
| 1313 | 1447 | } |
|---|
| 1314 | 1448 | |
|---|
| 1315 | | - if (!(elf_hwcap & HWCAP_ASIMD)) |
|---|
| 1449 | + if (!cpu_have_named_feature(ASIMD)) |
|---|
| 1316 | 1450 | pr_notice("Advanced SIMD is not implemented\n"); |
|---|
| 1317 | 1451 | |
|---|
| 1318 | 1452 | return sve_sysctl_init(); |
|---|