| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * FP/SIMD state saving and restoring macros |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Copyright (C) 2012 ARM Ltd. |
|---|
| 5 | 6 | * Author: Catalin Marinas <catalin.marinas@arm.com> |
|---|
| 6 | | - * |
|---|
| 7 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 8 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 9 | | - * published by the Free Software Foundation. |
|---|
| 10 | | - * |
|---|
| 11 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 12 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 13 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 14 | | - * GNU General Public License for more details. |
|---|
| 15 | | - * |
|---|
| 16 | | - * You should have received a copy of the GNU General Public License |
|---|
| 17 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
|---|
| 18 | 7 | */ |
|---|
| 8 | + |
|---|
| 9 | +#include <asm/assembler.h> |
|---|
| 19 | 10 | |
|---|
| 20 | 11 | .macro fpsimd_save state, tmpnr |
|---|
| 21 | 12 | stp q0, q1, [\state, #16 * 0] |
|---|
| .. | .. |
|---|
| 175 | 166 | | ((\np) << 5) |
|---|
| 176 | 167 | .endm |
|---|
| 177 | 168 | |
|---|
| 169 | +/* PFALSE P\np.B */ |
|---|
| 170 | +.macro _sve_pfalse np |
|---|
| 171 | + _sve_check_preg \np |
|---|
| 172 | + .inst 0x2518e400 \ |
|---|
| 173 | + | (\np) |
|---|
| 174 | +.endm |
|---|
| 175 | + |
|---|
| 178 | 176 | .macro __for from:req, to:req |
|---|
| 179 | 177 | .if (\from) == (\to) |
|---|
| 180 | | - _for__body \from |
|---|
| 178 | + _for__body %\from |
|---|
| 181 | 179 | .else |
|---|
| 182 | | - __for \from, (\from) + ((\to) - (\from)) / 2 |
|---|
| 183 | | - __for (\from) + ((\to) - (\from)) / 2 + 1, \to |
|---|
| 180 | + __for %\from, %((\from) + ((\to) - (\from)) / 2) |
|---|
| 181 | + __for %((\from) + ((\to) - (\from)) / 2 + 1), %\to |
|---|
| 184 | 182 | .endif |
|---|
| 185 | 183 | .endm |
|---|
| 186 | 184 | |
|---|
| 187 | 185 | .macro _for var:req, from:req, to:req, insn:vararg |
|---|
| 188 | 186 | .macro _for__body \var:req |
|---|
| 187 | + .noaltmacro |
|---|
| 189 | 188 | \insn |
|---|
| 189 | + .altmacro |
|---|
| 190 | 190 | .endm |
|---|
| 191 | 191 | |
|---|
| 192 | + .altmacro |
|---|
| 192 | 193 | __for \from, \to |
|---|
| 194 | + .noaltmacro |
|---|
| 193 | 195 | |
|---|
| 194 | 196 | .purgem _for__body |
|---|
| 197 | +.endm |
|---|
| 198 | + |
|---|
| 199 | +/* Update ZCR_EL1.LEN with the new VQ */ |
|---|
| 200 | +.macro sve_load_vq xvqminus1, xtmp, xtmp2 |
|---|
| 201 | + mrs_s \xtmp, SYS_ZCR_EL1 |
|---|
| 202 | + bic \xtmp2, \xtmp, ZCR_ELx_LEN_MASK |
|---|
| 203 | + orr \xtmp2, \xtmp2, \xvqminus1 |
|---|
| 204 | + cmp \xtmp2, \xtmp |
|---|
| 205 | + b.eq 921f |
|---|
| 206 | + msr_s SYS_ZCR_EL1, \xtmp2 //self-synchronising |
|---|
| 207 | +921: |
|---|
| 208 | +.endm |
|---|
| 209 | + |
|---|
| 210 | +/* Preserve the first 128-bits of Znz and zero the rest. */ |
|---|
| 211 | +.macro _sve_flush_z nz |
|---|
| 212 | + _sve_check_zreg \nz |
|---|
| 213 | + mov v\nz\().16b, v\nz\().16b |
|---|
| 214 | +.endm |
|---|
| 215 | + |
|---|
| 216 | +.macro sve_flush |
|---|
| 217 | + _for n, 0, 31, _sve_flush_z \n |
|---|
| 218 | + _for n, 0, 15, _sve_pfalse \n |
|---|
| 219 | + _sve_wrffr 0 |
|---|
| 195 | 220 | .endm |
|---|
| 196 | 221 | |
|---|
| 197 | 222 | .macro sve_save nxbase, xpfpsr, nxtmp |
|---|
| .. | .. |
|---|
| 207 | 232 | str w\nxtmp, [\xpfpsr, #4] |
|---|
| 208 | 233 | .endm |
|---|
| 209 | 234 | |
|---|
| 210 | | -.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2 |
|---|
| 211 | | - mrs_s x\nxtmp, SYS_ZCR_EL1 |
|---|
| 212 | | - bic \xtmp2, x\nxtmp, ZCR_ELx_LEN_MASK |
|---|
| 213 | | - orr \xtmp2, \xtmp2, \xvqminus1 |
|---|
| 214 | | - cmp \xtmp2, x\nxtmp |
|---|
| 215 | | - b.eq 921f |
|---|
| 216 | | - msr_s SYS_ZCR_EL1, \xtmp2 // self-synchronising |
|---|
| 217 | | -921: |
|---|
| 235 | +.macro __sve_load nxbase, xpfpsr, nxtmp |
|---|
| 218 | 236 | _for n, 0, 31, _sve_ldr_v \n, \nxbase, \n - 34 |
|---|
| 219 | 237 | _sve_ldr_p 0, \nxbase |
|---|
| 220 | 238 | _sve_wrffr 0 |
|---|
| .. | .. |
|---|
| 225 | 243 | ldr w\nxtmp, [\xpfpsr, #4] |
|---|
| 226 | 244 | msr fpcr, x\nxtmp |
|---|
| 227 | 245 | .endm |
|---|
| 246 | + |
|---|
| 247 | +.macro sve_load nxbase, xpfpsr, xvqminus1, nxtmp, xtmp2 |
|---|
| 248 | + sve_load_vq \xvqminus1, x\nxtmp, \xtmp2 |
|---|
| 249 | + __sve_load \nxbase, \xpfpsr, \nxtmp |
|---|
| 250 | +.endm |
|---|