.. | .. |
---|
169 | 169 | WARN_ON(busy); |
---|
170 | 170 | } |
---|
171 | 171 | |
---|
| 172 | +static void __put_cpu_fpsimd_context(void) |
---|
| 173 | +{ |
---|
| 174 | + bool busy = __this_cpu_xchg(fpsimd_context_busy, false); |
---|
| 175 | + |
---|
| 176 | + WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */ |
---|
| 177 | +} |
---|
| 178 | + |
---|
| 179 | +#ifdef CONFIG_DOVETAIL |
---|
| 180 | + |
---|
| 181 | +#define get_cpu_fpsimd_context(__flags) \ |
---|
| 182 | + do { \ |
---|
| 183 | + (__flags) = hard_preempt_disable(); \ |
---|
| 184 | + __get_cpu_fpsimd_context(); \ |
---|
| 185 | + } while (0) |
---|
| 186 | + |
---|
| 187 | +#define put_cpu_fpsimd_context(__flags) \ |
---|
| 188 | + do { \ |
---|
| 189 | + __put_cpu_fpsimd_context(); \ |
---|
| 190 | + hard_preempt_enable(__flags); \ |
---|
| 191 | + } while (0) |
---|
| 192 | + |
---|
| 193 | +void fpsimd_restore_current_oob(void) |
---|
| 194 | +{ |
---|
| 195 | + /* |
---|
| 196 | + * Restore the fpsimd context for the current task as it |
---|
| 197 | + * resumes from dovetail_context_switch(), which always happen |
---|
| 198 | + * on the out-of-band stage. Skip this for kernel threads |
---|
| 199 | + * which have no such context but always bear |
---|
| 200 | + * TIF_FOREIGN_FPSTATE. |
---|
| 201 | + */ |
---|
| 202 | + if (current->mm) |
---|
| 203 | + fpsimd_restore_current_state(); |
---|
| 204 | +} |
---|
| 205 | + |
---|
| 206 | +#else |
---|
| 207 | + |
---|
172 | 208 | /* |
---|
173 | 209 | * Claim ownership of the CPU FPSIMD context for use by the calling context. |
---|
174 | 210 | * |
---|
.. | .. |
---|
178 | 214 | * The double-underscore version must only be called if you know the task |
---|
179 | 215 | * can't be preempted. |
---|
180 | 216 | */ |
---|
181 | | -static void get_cpu_fpsimd_context(void) |
---|
182 | | -{ |
---|
183 | | - local_bh_disable(); |
---|
184 | | - __get_cpu_fpsimd_context(); |
---|
185 | | -} |
---|
186 | | - |
---|
187 | | -static void __put_cpu_fpsimd_context(void) |
---|
188 | | -{ |
---|
189 | | - bool busy = __this_cpu_xchg(fpsimd_context_busy, false); |
---|
190 | | - |
---|
191 | | - WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */ |
---|
192 | | -} |
---|
193 | | - |
---|
| 217 | +#define get_cpu_fpsimd_context(__flags) \ |
---|
| 218 | + do { \ |
---|
| 219 | + preempt_disable(); \ |
---|
| 220 | + __get_cpu_fpsimd_context(); \ |
---|
| 221 | + (void)(__flags); \ |
---|
| 222 | + } while (0) |
---|
194 | 223 | /* |
---|
195 | 224 | * Release the CPU FPSIMD context. |
---|
196 | 225 | * |
---|
.. | .. |
---|
198 | 227 | * previously called, with no call to put_cpu_fpsimd_context() in the |
---|
199 | 228 | * meantime. |
---|
200 | 229 | */ |
---|
201 | | -static void put_cpu_fpsimd_context(void) |
---|
202 | | -{ |
---|
203 | | - __put_cpu_fpsimd_context(); |
---|
204 | | - local_bh_enable(); |
---|
205 | | -} |
---|
| 230 | +#define put_cpu_fpsimd_context(__flags) \ |
---|
| 231 | + do { \ |
---|
| 232 | + __put_cpu_fpsimd_context(); \ |
---|
| 233 | + preempt_enable(); \ |
---|
| 234 | + (void)(__flags); \ |
---|
| 235 | + } while (0) |
---|
206 | 236 | |
---|
| 237 | +#endif /* !CONFIG_DOVETAIL */ |
---|
207 | 238 | static bool have_cpu_fpsimd_context(void) |
---|
208 | 239 | { |
---|
209 | 240 | return !preemptible() && __this_cpu_read(fpsimd_context_busy); |
---|
.. | .. |
---|
283 | 314 | static void task_fpsimd_load(void) |
---|
284 | 315 | { |
---|
285 | 316 | WARN_ON(!system_supports_fpsimd()); |
---|
286 | | - WARN_ON(!have_cpu_fpsimd_context()); |
---|
| 317 | + WARN_ON(!hard_irqs_disabled() && !have_cpu_fpsimd_context()); |
---|
287 | 318 | |
---|
288 | 319 | if (system_supports_sve() && test_thread_flag(TIF_SVE)) |
---|
289 | 320 | sve_load_state(sve_pffr(¤t->thread), |
---|
.. | .. |
---|
297 | 328 | * Ensure FPSIMD/SVE storage in memory for the loaded context is up to |
---|
298 | 329 | * date with respect to the CPU registers. |
---|
299 | 330 | */ |
---|
300 | | -static void fpsimd_save(void) |
---|
| 331 | +static void __fpsimd_save(void) |
---|
301 | 332 | { |
---|
302 | 333 | struct fpsimd_last_state_struct const *last = |
---|
303 | 334 | this_cpu_ptr(&fpsimd_last_state); |
---|
304 | 335 | /* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */ |
---|
305 | 336 | |
---|
306 | 337 | WARN_ON(!system_supports_fpsimd()); |
---|
307 | | - WARN_ON(!have_cpu_fpsimd_context()); |
---|
| 338 | + WARN_ON(!hard_irqs_disabled() && !have_cpu_fpsimd_context()); |
---|
308 | 339 | |
---|
309 | 340 | if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { |
---|
310 | 341 | if (system_supports_sve() && test_thread_flag(TIF_SVE)) { |
---|
.. | .. |
---|
324 | 355 | } else |
---|
325 | 356 | fpsimd_save_state(last->st); |
---|
326 | 357 | } |
---|
| 358 | +} |
---|
| 359 | + |
---|
| 360 | +void fpsimd_save(void) |
---|
| 361 | +{ |
---|
| 362 | + unsigned long flags; |
---|
| 363 | + |
---|
| 364 | + flags = hard_cond_local_irq_save(); |
---|
| 365 | + __fpsimd_save(); |
---|
| 366 | + hard_cond_local_irq_restore(flags); |
---|
327 | 367 | } |
---|
328 | 368 | |
---|
329 | 369 | /* |
---|
.. | .. |
---|
444 | 484 | * task->thread.uw.fpsimd_state must be up to date before calling this |
---|
445 | 485 | * function. |
---|
446 | 486 | */ |
---|
447 | | -static void fpsimd_to_sve(struct task_struct *task) |
---|
| 487 | +static void _fpsimd_to_sve(struct task_struct *task) |
---|
448 | 488 | { |
---|
449 | 489 | unsigned int vq; |
---|
450 | 490 | void *sst = task->thread.sve_state; |
---|
.. | .. |
---|
455 | 495 | |
---|
456 | 496 | vq = sve_vq_from_vl(task->thread.sve_vl); |
---|
457 | 497 | __fpsimd_to_sve(sst, fst, vq); |
---|
| 498 | +} |
---|
| 499 | + |
---|
| 500 | +static void fpsimd_to_sve(struct task_struct *task) |
---|
| 501 | +{ |
---|
| 502 | + unsigned long flags; |
---|
| 503 | + |
---|
| 504 | + flags = hard_cond_local_irq_save(); |
---|
| 505 | + _fpsimd_to_sve(task); |
---|
| 506 | + hard_cond_local_irq_restore(flags); |
---|
458 | 507 | } |
---|
459 | 508 | |
---|
460 | 509 | /* |
---|
.. | .. |
---|
475 | 524 | struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state; |
---|
476 | 525 | unsigned int i; |
---|
477 | 526 | __uint128_t const *p; |
---|
| 527 | + unsigned long flags; |
---|
478 | 528 | |
---|
479 | 529 | if (!system_supports_sve()) |
---|
480 | 530 | return; |
---|
| 531 | + |
---|
| 532 | + flags = hard_cond_local_irq_save(); |
---|
481 | 533 | |
---|
482 | 534 | vq = sve_vq_from_vl(task->thread.sve_vl); |
---|
483 | 535 | for (i = 0; i < SVE_NUM_ZREGS; ++i) { |
---|
484 | 536 | p = (__uint128_t const *)ZREG(sst, vq, i); |
---|
485 | 537 | fst->vregs[i] = arm64_le128_to_cpu(*p); |
---|
486 | 538 | } |
---|
| 539 | + |
---|
| 540 | + hard_cond_local_irq_restore(flags); |
---|
487 | 541 | } |
---|
488 | 542 | |
---|
489 | 543 | #ifdef CONFIG_ARM64_SVE |
---|
.. | .. |
---|
584 | 638 | int sve_set_vector_length(struct task_struct *task, |
---|
585 | 639 | unsigned long vl, unsigned long flags) |
---|
586 | 640 | { |
---|
| 641 | + unsigned long irqflags = 0; |
---|
| 642 | + |
---|
587 | 643 | if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT | |
---|
588 | 644 | PR_SVE_SET_VL_ONEXEC)) |
---|
589 | 645 | return -EINVAL; |
---|
.. | .. |
---|
621 | 677 | * non-SVE thread. |
---|
622 | 678 | */ |
---|
623 | 679 | if (task == current) { |
---|
624 | | - get_cpu_fpsimd_context(); |
---|
| 680 | + get_cpu_fpsimd_context(irqflags); |
---|
625 | 681 | |
---|
626 | | - fpsimd_save(); |
---|
| 682 | + __fpsimd_save(); |
---|
627 | 683 | } |
---|
628 | 684 | |
---|
629 | 685 | fpsimd_flush_task_state(task); |
---|
.. | .. |
---|
631 | 687 | sve_to_fpsimd(task); |
---|
632 | 688 | |
---|
633 | 689 | if (task == current) |
---|
634 | | - put_cpu_fpsimd_context(); |
---|
| 690 | + put_cpu_fpsimd_context(irqflags); |
---|
635 | 691 | |
---|
636 | 692 | /* |
---|
637 | 693 | * Force reallocation of task SVE state to the correct size |
---|
.. | .. |
---|
936 | 992 | */ |
---|
937 | 993 | void do_sve_acc(unsigned int esr, struct pt_regs *regs) |
---|
938 | 994 | { |
---|
| 995 | + unsigned long flags; |
---|
| 996 | + |
---|
| 997 | + mark_trap_entry(ARM64_TRAP_SVE, regs); |
---|
| 998 | + |
---|
939 | 999 | /* Even if we chose not to use SVE, the hardware could still trap: */ |
---|
940 | 1000 | if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) { |
---|
941 | 1001 | force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); |
---|
942 | | - return; |
---|
| 1002 | + goto out; |
---|
943 | 1003 | } |
---|
944 | 1004 | |
---|
945 | 1005 | sve_alloc(current); |
---|
946 | 1006 | |
---|
947 | | - get_cpu_fpsimd_context(); |
---|
| 1007 | + get_cpu_fpsimd_context(flags); |
---|
948 | 1008 | |
---|
949 | | - fpsimd_save(); |
---|
| 1009 | + __fpsimd_save(); |
---|
950 | 1010 | |
---|
951 | 1011 | /* Force ret_to_user to reload the registers: */ |
---|
952 | 1012 | fpsimd_flush_task_state(current); |
---|
.. | .. |
---|
955 | 1015 | if (test_and_set_thread_flag(TIF_SVE)) |
---|
956 | 1016 | WARN_ON(1); /* SVE access shouldn't have trapped */ |
---|
957 | 1017 | |
---|
958 | | - put_cpu_fpsimd_context(); |
---|
| 1018 | + put_cpu_fpsimd_context(flags); |
---|
| 1019 | +out: |
---|
| 1020 | + mark_trap_exit(ARM64_TRAP_SVE, regs); |
---|
959 | 1021 | } |
---|
960 | 1022 | |
---|
961 | 1023 | /* |
---|
.. | .. |
---|
974 | 1036 | { |
---|
975 | 1037 | unsigned int si_code = FPE_FLTUNK; |
---|
976 | 1038 | |
---|
| 1039 | + if (!mark_cond_trap_entry(ARM64_TRAP_FPE, regs)) |
---|
| 1040 | + return; |
---|
| 1041 | + |
---|
977 | 1042 | if (esr & ESR_ELx_FP_EXC_TFV) { |
---|
978 | 1043 | if (esr & FPEXC_IOF) |
---|
979 | 1044 | si_code = FPE_FLTINV; |
---|
.. | .. |
---|
990 | 1055 | send_sig_fault(SIGFPE, si_code, |
---|
991 | 1056 | (void __user *)instruction_pointer(regs), |
---|
992 | 1057 | current); |
---|
| 1058 | + |
---|
| 1059 | + mark_trap_exit(ARM64_TRAP_FPE, regs); |
---|
993 | 1060 | } |
---|
994 | 1061 | |
---|
995 | 1062 | void fpsimd_thread_switch(struct task_struct *next) |
---|
996 | 1063 | { |
---|
997 | 1064 | bool wrong_task, wrong_cpu; |
---|
| 1065 | + unsigned long flags; |
---|
998 | 1066 | |
---|
999 | 1067 | if (!system_supports_fpsimd()) |
---|
1000 | 1068 | return; |
---|
1001 | 1069 | |
---|
| 1070 | + flags = hard_cond_local_irq_save(); |
---|
| 1071 | + |
---|
1002 | 1072 | __get_cpu_fpsimd_context(); |
---|
1003 | 1073 | |
---|
1004 | 1074 | /* Save unsaved fpsimd state, if any: */ |
---|
1005 | | - fpsimd_save(); |
---|
| 1075 | + __fpsimd_save(); |
---|
1006 | 1076 | |
---|
1007 | 1077 | /* |
---|
1008 | 1078 | * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's |
---|
.. | .. |
---|
1017 | 1087 | wrong_task || wrong_cpu); |
---|
1018 | 1088 | |
---|
1019 | 1089 | __put_cpu_fpsimd_context(); |
---|
| 1090 | + |
---|
| 1091 | + hard_cond_local_irq_restore(flags); |
---|
1020 | 1092 | } |
---|
1021 | 1093 | |
---|
1022 | 1094 | void fpsimd_flush_thread(void) |
---|
1023 | 1095 | { |
---|
1024 | 1096 | int vl, supported_vl; |
---|
| 1097 | + unsigned long flags; |
---|
1025 | 1098 | |
---|
1026 | 1099 | if (!system_supports_fpsimd()) |
---|
1027 | 1100 | return; |
---|
1028 | 1101 | |
---|
1029 | | - get_cpu_fpsimd_context(); |
---|
| 1102 | + get_cpu_fpsimd_context(flags); |
---|
1030 | 1103 | |
---|
1031 | 1104 | fpsimd_flush_task_state(current); |
---|
1032 | 1105 | memset(¤t->thread.uw.fpsimd_state, 0, |
---|
.. | .. |
---|
1067 | 1140 | current->thread.sve_vl_onexec = 0; |
---|
1068 | 1141 | } |
---|
1069 | 1142 | |
---|
1070 | | - put_cpu_fpsimd_context(); |
---|
| 1143 | + put_cpu_fpsimd_context(flags); |
---|
1071 | 1144 | } |
---|
1072 | 1145 | |
---|
1073 | 1146 | /* |
---|
.. | .. |
---|
1076 | 1149 | */ |
---|
1077 | 1150 | void fpsimd_preserve_current_state(void) |
---|
1078 | 1151 | { |
---|
| 1152 | + unsigned long flags; |
---|
| 1153 | + |
---|
1079 | 1154 | if (!system_supports_fpsimd()) |
---|
1080 | 1155 | return; |
---|
1081 | 1156 | |
---|
1082 | | - get_cpu_fpsimd_context(); |
---|
1083 | | - fpsimd_save(); |
---|
1084 | | - put_cpu_fpsimd_context(); |
---|
| 1157 | + get_cpu_fpsimd_context(flags); |
---|
| 1158 | + __fpsimd_save(); |
---|
| 1159 | + put_cpu_fpsimd_context(flags); |
---|
1085 | 1160 | } |
---|
1086 | 1161 | |
---|
1087 | 1162 | /* |
---|
.. | .. |
---|
1123 | 1198 | } |
---|
1124 | 1199 | } |
---|
1125 | 1200 | |
---|
1126 | | -void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, |
---|
| 1201 | +static void __fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, |
---|
1127 | 1202 | unsigned int sve_vl) |
---|
1128 | 1203 | { |
---|
1129 | 1204 | struct fpsimd_last_state_struct *last = |
---|
.. | .. |
---|
1137 | 1212 | last->sve_vl = sve_vl; |
---|
1138 | 1213 | } |
---|
1139 | 1214 | |
---|
| 1215 | +void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state, |
---|
| 1216 | + unsigned int sve_vl) |
---|
| 1217 | +{ |
---|
| 1218 | + unsigned long flags; |
---|
| 1219 | + |
---|
| 1220 | + WARN_ON(!in_softirq() && !irqs_disabled()); |
---|
| 1221 | + |
---|
| 1222 | + flags = hard_cond_local_irq_save(); |
---|
| 1223 | + __fpsimd_bind_state_to_cpu(st, sve_state, sve_vl); |
---|
| 1224 | + hard_cond_local_irq_restore(flags); |
---|
| 1225 | +} |
---|
| 1226 | + |
---|
1140 | 1227 | /* |
---|
1141 | 1228 | * Load the userland FPSIMD state of 'current' from memory, but only if the |
---|
1142 | 1229 | * FPSIMD state already held in the registers is /not/ the most recent FPSIMD |
---|
.. | .. |
---|
1144 | 1231 | */ |
---|
1145 | 1232 | void fpsimd_restore_current_state(void) |
---|
1146 | 1233 | { |
---|
| 1234 | + unsigned long flags; |
---|
| 1235 | + |
---|
1147 | 1236 | /* |
---|
1148 | 1237 | * For the tasks that were created before we detected the absence of |
---|
1149 | 1238 | * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(), |
---|
.. | .. |
---|
1158 | 1247 | return; |
---|
1159 | 1248 | } |
---|
1160 | 1249 | |
---|
1161 | | - get_cpu_fpsimd_context(); |
---|
| 1250 | + get_cpu_fpsimd_context(flags); |
---|
1162 | 1251 | |
---|
1163 | 1252 | if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { |
---|
1164 | 1253 | task_fpsimd_load(); |
---|
1165 | 1254 | fpsimd_bind_task_to_cpu(); |
---|
1166 | 1255 | } |
---|
1167 | 1256 | |
---|
1168 | | - put_cpu_fpsimd_context(); |
---|
| 1257 | + put_cpu_fpsimd_context(flags); |
---|
1169 | 1258 | } |
---|
1170 | 1259 | |
---|
1171 | 1260 | /* |
---|
.. | .. |
---|
1175 | 1264 | */ |
---|
1176 | 1265 | void fpsimd_update_current_state(struct user_fpsimd_state const *state) |
---|
1177 | 1266 | { |
---|
| 1267 | + unsigned long flags; |
---|
| 1268 | + |
---|
1178 | 1269 | if (WARN_ON(!system_supports_fpsimd())) |
---|
1179 | 1270 | return; |
---|
1180 | 1271 | |
---|
1181 | | - get_cpu_fpsimd_context(); |
---|
| 1272 | + get_cpu_fpsimd_context(flags); |
---|
1182 | 1273 | |
---|
1183 | 1274 | current->thread.uw.fpsimd_state = *state; |
---|
1184 | 1275 | if (system_supports_sve() && test_thread_flag(TIF_SVE)) |
---|
1185 | | - fpsimd_to_sve(current); |
---|
| 1276 | + _fpsimd_to_sve(current); |
---|
1186 | 1277 | |
---|
1187 | 1278 | task_fpsimd_load(); |
---|
1188 | 1279 | fpsimd_bind_task_to_cpu(); |
---|
1189 | 1280 | |
---|
1190 | 1281 | clear_thread_flag(TIF_FOREIGN_FPSTATE); |
---|
1191 | 1282 | |
---|
1192 | | - put_cpu_fpsimd_context(); |
---|
| 1283 | + put_cpu_fpsimd_context(flags); |
---|
1193 | 1284 | } |
---|
1194 | 1285 | |
---|
1195 | 1286 | /* |
---|
.. | .. |
---|
1239 | 1330 | { |
---|
1240 | 1331 | if (!system_supports_fpsimd()) |
---|
1241 | 1332 | return; |
---|
1242 | | - WARN_ON(preemptible()); |
---|
| 1333 | + WARN_ON(!hard_irqs_disabled() && preemptible()); |
---|
1243 | 1334 | __get_cpu_fpsimd_context(); |
---|
1244 | | - fpsimd_save(); |
---|
| 1335 | + __fpsimd_save(); |
---|
1245 | 1336 | fpsimd_flush_cpu_state(); |
---|
1246 | 1337 | __put_cpu_fpsimd_context(); |
---|
1247 | 1338 | } |
---|
.. | .. |
---|
1267 | 1358 | */ |
---|
1268 | 1359 | void kernel_neon_begin(void) |
---|
1269 | 1360 | { |
---|
| 1361 | + unsigned long flags; |
---|
| 1362 | + |
---|
1270 | 1363 | if (WARN_ON(!system_supports_fpsimd())) |
---|
1271 | 1364 | return; |
---|
1272 | 1365 | |
---|
1273 | 1366 | BUG_ON(!may_use_simd()); |
---|
1274 | 1367 | |
---|
1275 | | - get_cpu_fpsimd_context(); |
---|
| 1368 | + get_cpu_fpsimd_context(flags); |
---|
1276 | 1369 | |
---|
1277 | 1370 | /* Save unsaved fpsimd state, if any: */ |
---|
1278 | | - fpsimd_save(); |
---|
| 1371 | + __fpsimd_save(); |
---|
1279 | 1372 | |
---|
1280 | 1373 | /* Invalidate any task state remaining in the fpsimd regs: */ |
---|
1281 | 1374 | fpsimd_flush_cpu_state(); |
---|
| 1375 | + |
---|
| 1376 | + if (dovetailing()) |
---|
| 1377 | + hard_cond_local_irq_restore(flags); |
---|
1282 | 1378 | } |
---|
1283 | 1379 | EXPORT_SYMBOL(kernel_neon_begin); |
---|
1284 | 1380 | |
---|
.. | .. |
---|
1293 | 1389 | */ |
---|
1294 | 1390 | void kernel_neon_end(void) |
---|
1295 | 1391 | { |
---|
| 1392 | + unsigned long flags = hard_local_save_flags(); |
---|
| 1393 | + |
---|
1296 | 1394 | if (!system_supports_fpsimd()) |
---|
1297 | 1395 | return; |
---|
1298 | 1396 | |
---|
1299 | | - put_cpu_fpsimd_context(); |
---|
| 1397 | + put_cpu_fpsimd_context(flags); |
---|
1300 | 1398 | } |
---|
1301 | 1399 | EXPORT_SYMBOL(kernel_neon_end); |
---|
1302 | 1400 | |
---|
.. | .. |
---|
1386 | 1484 | static int fpsimd_cpu_pm_notifier(struct notifier_block *self, |
---|
1387 | 1485 | unsigned long cmd, void *v) |
---|
1388 | 1486 | { |
---|
| 1487 | + unsigned long flags; |
---|
| 1488 | + |
---|
1389 | 1489 | switch (cmd) { |
---|
1390 | 1490 | case CPU_PM_ENTER: |
---|
| 1491 | + flags = hard_cond_local_irq_save(); |
---|
1391 | 1492 | fpsimd_save_and_flush_cpu_state(); |
---|
| 1493 | + hard_cond_local_irq_restore(flags); |
---|
1392 | 1494 | break; |
---|
1393 | 1495 | case CPU_PM_EXIT: |
---|
1394 | 1496 | break; |
---|