.. | .. |
---|
14 | 14 | #include <linux/signal.h> |
---|
15 | 15 | #include <linux/sched/signal.h> |
---|
16 | 16 | #include <linux/smp.h> |
---|
| 17 | +#include <linux/dovetail.h> |
---|
17 | 18 | #include <linux/init.h> |
---|
18 | 19 | #include <linux/uaccess.h> |
---|
19 | 20 | #include <linux/user.h> |
---|
20 | 21 | #include <linux/export.h> |
---|
| 22 | +#include <linux/smp.h> |
---|
21 | 23 | |
---|
22 | 24 | #include <asm/cp15.h> |
---|
23 | 25 | #include <asm/cputype.h> |
---|
.. | .. |
---|
90 | 92 | static void vfp_thread_flush(struct thread_info *thread) |
---|
91 | 93 | { |
---|
92 | 94 | union vfp_state *vfp = &thread->vfpstate; |
---|
| 95 | + unsigned long flags; |
---|
93 | 96 | unsigned int cpu; |
---|
94 | 97 | |
---|
95 | 98 | /* |
---|
.. | .. |
---|
100 | 103 | * Do this first to ensure that preemption won't overwrite our |
---|
101 | 104 | * state saving should access to the VFP be enabled at this point. |
---|
102 | 105 | */ |
---|
103 | | - cpu = get_cpu(); |
---|
| 106 | + cpu = hard_get_cpu(flags); |
---|
104 | 107 | if (vfp_current_hw_state[cpu] == vfp) |
---|
105 | 108 | vfp_current_hw_state[cpu] = NULL; |
---|
106 | 109 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); |
---|
107 | | - put_cpu(); |
---|
| 110 | + hard_put_cpu(flags); |
---|
108 | 111 | |
---|
109 | 112 | memset(vfp, 0, sizeof(union vfp_state)); |
---|
110 | 113 | |
---|
.. | .. |
---|
119 | 122 | { |
---|
120 | 123 | /* release case: Per-thread VFP cleanup. */ |
---|
121 | 124 | union vfp_state *vfp = &thread->vfpstate; |
---|
122 | | - unsigned int cpu = get_cpu(); |
---|
| 125 | + unsigned long flags; |
---|
| 126 | + unsigned int cpu = hard_get_cpu(flags); |
---|
123 | 127 | |
---|
124 | 128 | if (vfp_current_hw_state[cpu] == vfp) |
---|
125 | 129 | vfp_current_hw_state[cpu] = NULL; |
---|
126 | | - put_cpu(); |
---|
| 130 | + hard_put_cpu(flags); |
---|
127 | 131 | } |
---|
128 | 132 | |
---|
129 | 133 | static void vfp_thread_copy(struct thread_info *thread) |
---|
.. | .. |
---|
159 | 163 | static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) |
---|
160 | 164 | { |
---|
161 | 165 | struct thread_info *thread = v; |
---|
| 166 | + unsigned long flags; |
---|
162 | 167 | u32 fpexc; |
---|
163 | 168 | #ifdef CONFIG_SMP |
---|
164 | 169 | unsigned int cpu; |
---|
.. | .. |
---|
166 | 171 | |
---|
167 | 172 | switch (cmd) { |
---|
168 | 173 | case THREAD_NOTIFY_SWITCH: |
---|
| 174 | + flags = hard_cond_local_irq_save(); |
---|
169 | 175 | fpexc = fmrx(FPEXC); |
---|
170 | 176 | |
---|
171 | 177 | #ifdef CONFIG_SMP |
---|
.. | .. |
---|
185 | 191 | * old state. |
---|
186 | 192 | */ |
---|
187 | 193 | fmxr(FPEXC, fpexc & ~FPEXC_EN); |
---|
| 194 | + hard_cond_local_irq_restore(flags); |
---|
188 | 195 | break; |
---|
189 | 196 | |
---|
190 | 197 | case THREAD_NOTIFY_FLUSH: |
---|
.. | .. |
---|
248 | 255 | |
---|
249 | 256 | if (exceptions == VFP_EXCEPTION_ERROR) { |
---|
250 | 257 | vfp_panic("unhandled bounce", inst); |
---|
251 | | - vfp_raise_sigfpe(FPE_FLTINV, regs); |
---|
| 258 | + if (mark_cond_trap_entry(ARM_TRAP_VFP, regs)) { |
---|
| 259 | + vfp_raise_sigfpe(FPE_FLTINV, regs); |
---|
| 260 | + mark_trap_exit(ARM_TRAP_VFP, regs); |
---|
| 261 | + } |
---|
252 | 262 | return; |
---|
253 | 263 | } |
---|
254 | 264 | |
---|
.. | .. |
---|
322 | 332 | */ |
---|
323 | 333 | void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs) |
---|
324 | 334 | { |
---|
325 | | - u32 fpscr, orig_fpscr, fpsid, exceptions; |
---|
| 335 | + u32 fpscr, orig_fpscr, fpsid, exceptions, next_trigger = 0; |
---|
326 | 336 | |
---|
327 | 337 | pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc); |
---|
328 | 338 | |
---|
.. | .. |
---|
352 | 362 | /* |
---|
353 | 363 | * Synchronous exception, emulate the trigger instruction |
---|
354 | 364 | */ |
---|
| 365 | + hard_cond_local_irq_enable(); |
---|
355 | 366 | goto emulate; |
---|
356 | 367 | } |
---|
357 | 368 | |
---|
.. | .. |
---|
364 | 375 | trigger = fmrx(FPINST); |
---|
365 | 376 | regs->ARM_pc -= 4; |
---|
366 | 377 | #endif |
---|
367 | | - } else if (!(fpexc & FPEXC_DEX)) { |
---|
| 378 | + if (fpexc & FPEXC_FP2V) { |
---|
| 379 | + /* |
---|
| 380 | + * The barrier() here prevents fpinst2 being read |
---|
| 381 | + * before the condition above. |
---|
| 382 | + */ |
---|
| 383 | + barrier(); |
---|
| 384 | + next_trigger = fmrx(FPINST2); |
---|
| 385 | + } |
---|
| 386 | + } |
---|
| 387 | + hard_cond_local_irq_enable(); |
---|
| 388 | + |
---|
| 389 | + if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) { |
---|
368 | 390 | /* |
---|
369 | 391 | * Illegal combination of bits. It can be caused by an |
---|
370 | 392 | * unallocated VFP instruction but with FPSCR.IXE set and not |
---|
.. | .. |
---|
404 | 426 | if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V)) |
---|
405 | 427 | goto exit; |
---|
406 | 428 | |
---|
407 | | - /* |
---|
408 | | - * The barrier() here prevents fpinst2 being read |
---|
409 | | - * before the condition above. |
---|
410 | | - */ |
---|
411 | | - barrier(); |
---|
412 | | - trigger = fmrx(FPINST2); |
---|
| 429 | + trigger = next_trigger; |
---|
413 | 430 | |
---|
414 | 431 | emulate: |
---|
415 | 432 | exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs); |
---|
416 | 433 | if (exceptions) |
---|
417 | 434 | vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs); |
---|
418 | 435 | exit: |
---|
| 436 | + hard_cond_local_irq_enable(); |
---|
419 | 437 | preempt_enable(); |
---|
420 | 438 | } |
---|
421 | 439 | |
---|
.. | .. |
---|
515 | 533 | */ |
---|
516 | 534 | void vfp_sync_hwstate(struct thread_info *thread) |
---|
517 | 535 | { |
---|
518 | | - unsigned int cpu = get_cpu(); |
---|
| 536 | + unsigned long flags; |
---|
| 537 | + unsigned int cpu = hard_get_cpu(flags); |
---|
519 | 538 | |
---|
520 | 539 | if (vfp_state_in_hw(cpu, thread)) { |
---|
521 | 540 | u32 fpexc = fmrx(FPEXC); |
---|
.. | .. |
---|
528 | 547 | fmxr(FPEXC, fpexc); |
---|
529 | 548 | } |
---|
530 | 549 | |
---|
531 | | - put_cpu(); |
---|
| 550 | + hard_put_cpu(flags); |
---|
532 | 551 | } |
---|
533 | 552 | |
---|
534 | 553 | /* Ensure that the thread reloads the hardware VFP state on the next use. */ |
---|
535 | 554 | void vfp_flush_hwstate(struct thread_info *thread) |
---|
536 | 555 | { |
---|
537 | | - unsigned int cpu = get_cpu(); |
---|
| 556 | + unsigned long flags; |
---|
| 557 | + unsigned int cpu = hard_get_cpu(flags); |
---|
538 | 558 | |
---|
539 | 559 | vfp_force_reload(cpu, thread); |
---|
540 | 560 | |
---|
541 | | - put_cpu(); |
---|
| 561 | + hard_put_cpu(flags); |
---|
542 | 562 | } |
---|
543 | 563 | |
---|
544 | 564 | /* |
---|