hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/arch/arm/vfp/vfpmodule.c
....@@ -14,10 +14,12 @@
1414 #include <linux/signal.h>
1515 #include <linux/sched/signal.h>
1616 #include <linux/smp.h>
17
+#include <linux/dovetail.h>
1718 #include <linux/init.h>
1819 #include <linux/uaccess.h>
1920 #include <linux/user.h>
2021 #include <linux/export.h>
22
+#include <linux/smp.h>
2123
2224 #include <asm/cp15.h>
2325 #include <asm/cputype.h>
....@@ -90,6 +92,7 @@
9092 static void vfp_thread_flush(struct thread_info *thread)
9193 {
9294 union vfp_state *vfp = &thread->vfpstate;
95
+ unsigned long flags;
9396 unsigned int cpu;
9497
9598 /*
....@@ -100,11 +103,11 @@
100103 * Do this first to ensure that preemption won't overwrite our
101104 * state saving should access to the VFP be enabled at this point.
102105 */
103
- cpu = get_cpu();
106
+ cpu = hard_get_cpu(flags);
104107 if (vfp_current_hw_state[cpu] == vfp)
105108 vfp_current_hw_state[cpu] = NULL;
106109 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
107
- put_cpu();
110
+ hard_put_cpu(flags);
108111
109112 memset(vfp, 0, sizeof(union vfp_state));
110113
....@@ -119,11 +122,12 @@
119122 {
120123 /* release case: Per-thread VFP cleanup. */
121124 union vfp_state *vfp = &thread->vfpstate;
122
- unsigned int cpu = get_cpu();
125
+ unsigned long flags;
126
+ unsigned int cpu = hard_get_cpu(flags);
123127
124128 if (vfp_current_hw_state[cpu] == vfp)
125129 vfp_current_hw_state[cpu] = NULL;
126
- put_cpu();
130
+ hard_put_cpu(flags);
127131 }
128132
129133 static void vfp_thread_copy(struct thread_info *thread)
....@@ -159,6 +163,7 @@
159163 static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
160164 {
161165 struct thread_info *thread = v;
166
+ unsigned long flags;
162167 u32 fpexc;
163168 #ifdef CONFIG_SMP
164169 unsigned int cpu;
....@@ -166,6 +171,7 @@
166171
167172 switch (cmd) {
168173 case THREAD_NOTIFY_SWITCH:
174
+ flags = hard_cond_local_irq_save();
169175 fpexc = fmrx(FPEXC);
170176
171177 #ifdef CONFIG_SMP
....@@ -185,6 +191,7 @@
185191 * old state.
186192 */
187193 fmxr(FPEXC, fpexc & ~FPEXC_EN);
194
+ hard_cond_local_irq_restore(flags);
188195 break;
189196
190197 case THREAD_NOTIFY_FLUSH:
....@@ -248,7 +255,10 @@
248255
249256 if (exceptions == VFP_EXCEPTION_ERROR) {
250257 vfp_panic("unhandled bounce", inst);
251
- vfp_raise_sigfpe(FPE_FLTINV, regs);
258
+ if (mark_cond_trap_entry(ARM_TRAP_VFP, regs)) {
259
+ vfp_raise_sigfpe(FPE_FLTINV, regs);
260
+ mark_trap_exit(ARM_TRAP_VFP, regs);
261
+ }
252262 return;
253263 }
254264
....@@ -322,7 +332,7 @@
322332 */
323333 void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
324334 {
325
- u32 fpscr, orig_fpscr, fpsid, exceptions;
335
+ u32 fpscr, orig_fpscr, fpsid, exceptions, next_trigger = 0;
326336
327337 pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
328338
....@@ -352,6 +362,7 @@
352362 /*
353363 * Synchronous exception, emulate the trigger instruction
354364 */
365
+ hard_cond_local_irq_enable();
355366 goto emulate;
356367 }
357368
....@@ -364,7 +375,18 @@
364375 trigger = fmrx(FPINST);
365376 regs->ARM_pc -= 4;
366377 #endif
367
- } else if (!(fpexc & FPEXC_DEX)) {
378
+ if (fpexc & FPEXC_FP2V) {
379
+ /*
380
+ * The barrier() here prevents fpinst2 being read
381
+ * before the condition above.
382
+ */
383
+ barrier();
384
+ next_trigger = fmrx(FPINST2);
385
+ }
386
+ }
387
+ hard_cond_local_irq_enable();
388
+
389
+ if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) {
368390 /*
369391 * Illegal combination of bits. It can be caused by an
370392 * unallocated VFP instruction but with FPSCR.IXE set and not
....@@ -404,18 +426,14 @@
404426 if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
405427 goto exit;
406428
407
- /*
408
- * The barrier() here prevents fpinst2 being read
409
- * before the condition above.
410
- */
411
- barrier();
412
- trigger = fmrx(FPINST2);
429
+ trigger = next_trigger;
413430
414431 emulate:
415432 exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
416433 if (exceptions)
417434 vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
418435 exit:
436
+ hard_cond_local_irq_enable();
419437 preempt_enable();
420438 }
421439
....@@ -515,7 +533,8 @@
515533 */
516534 void vfp_sync_hwstate(struct thread_info *thread)
517535 {
518
- unsigned int cpu = get_cpu();
536
+ unsigned long flags;
537
+ unsigned int cpu = hard_get_cpu(flags);
519538
520539 if (vfp_state_in_hw(cpu, thread)) {
521540 u32 fpexc = fmrx(FPEXC);
....@@ -528,17 +547,18 @@
528547 fmxr(FPEXC, fpexc);
529548 }
530549
531
- put_cpu();
550
+ hard_put_cpu(flags);
532551 }
533552
534553 /* Ensure that the thread reloads the hardware VFP state on the next use. */
535554 void vfp_flush_hwstate(struct thread_info *thread)
536555 {
537
- unsigned int cpu = get_cpu();
556
+ unsigned long flags;
557
+ unsigned int cpu = hard_get_cpu(flags);
538558
539559 vfp_force_reload(cpu, thread);
540560
541
- put_cpu();
561
+ hard_put_cpu(flags);
542562 }
543563
544564 /*