.. | .. |
---|
42 | 42 | #include <asm/irq.h> |
---|
43 | 43 | #include <asm/mips-cps.h> |
---|
44 | 44 | #include <asm/msa.h> |
---|
45 | | -#include <asm/pgtable.h> |
---|
46 | 45 | #include <asm/mipsregs.h> |
---|
47 | 46 | #include <asm/processor.h> |
---|
48 | 47 | #include <asm/reg.h> |
---|
.. | .. |
---|
53 | 52 | #include <asm/inst.h> |
---|
54 | 53 | #include <asm/stacktrace.h> |
---|
55 | 54 | #include <asm/irq_regs.h> |
---|
| 55 | +#include <asm/exec.h> |
---|
56 | 56 | |
---|
57 | 57 | #ifdef CONFIG_HOTPLUG_CPU |
---|
58 | 58 | void arch_cpu_idle_dead(void) |
---|
.. | .. |
---|
69 | 69 | unsigned long status; |
---|
70 | 70 | |
---|
71 | 71 | /* New thread loses kernel privileges. */ |
---|
72 | | - status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK); |
---|
| 72 | + status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_CU2|ST0_FR|KU_MASK); |
---|
73 | 73 | status |= KU_USER; |
---|
74 | 74 | regs->cp0_status = status; |
---|
75 | 75 | lose_fpu(0); |
---|
76 | 76 | clear_thread_flag(TIF_MSA_CTX_LIVE); |
---|
77 | 77 | clear_used_math(); |
---|
| 78 | +#ifdef CONFIG_MIPS_FP_SUPPORT |
---|
78 | 79 | atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE); |
---|
| 80 | +#endif |
---|
79 | 81 | init_dsp(); |
---|
80 | 82 | regs->cp0_epc = pc; |
---|
81 | 83 | regs->regs[29] = sp; |
---|
.. | .. |
---|
118 | 120 | /* |
---|
119 | 121 | * Copy architecture-specific thread state |
---|
120 | 122 | */ |
---|
121 | | -int copy_thread_tls(unsigned long clone_flags, unsigned long usp, |
---|
122 | | - unsigned long kthread_arg, struct task_struct *p, unsigned long tls) |
---|
| 123 | +int copy_thread(unsigned long clone_flags, unsigned long usp, |
---|
| 124 | + unsigned long kthread_arg, struct task_struct *p, |
---|
| 125 | + unsigned long tls) |
---|
123 | 126 | { |
---|
124 | 127 | struct thread_info *ti = task_thread_info(p); |
---|
125 | 128 | struct pt_regs *childregs, *regs = current_pt_regs(); |
---|
.. | .. |
---|
131 | 134 | childregs = (struct pt_regs *) childksp - 1; |
---|
132 | 135 | /* Put the stack after the struct pt_regs. */ |
---|
133 | 136 | childksp = (unsigned long) childregs; |
---|
134 | | - p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); |
---|
135 | | - if (unlikely(p->flags & PF_KTHREAD)) { |
---|
| 137 | + p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK; |
---|
| 138 | + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { |
---|
136 | 139 | /* kernel thread */ |
---|
137 | 140 | unsigned long status = p->thread.cp0_status; |
---|
138 | 141 | memset(childregs, 0, sizeof(struct pt_regs)); |
---|
.. | .. |
---|
176 | 179 | clear_tsk_thread_flag(p, TIF_FPUBOUND); |
---|
177 | 180 | #endif /* CONFIG_MIPS_MT_FPAFF */ |
---|
178 | 181 | |
---|
| 182 | +#ifdef CONFIG_MIPS_FP_SUPPORT |
---|
179 | 183 | atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE); |
---|
| 184 | +#endif |
---|
180 | 185 | |
---|
181 | 186 | if (clone_flags & CLONE_SETTLS) |
---|
182 | 187 | ti->tp_value = tls; |
---|
.. | .. |
---|
275 | 280 | *poff = ip->i_format.simmediate / sizeof(ulong); |
---|
276 | 281 | return 1; |
---|
277 | 282 | } |
---|
278 | | - |
---|
| 283 | +#ifdef CONFIG_CPU_LOONGSON64 |
---|
| 284 | + if ((ip->loongson3_lswc2_format.opcode == swc2_op) && |
---|
| 285 | + (ip->loongson3_lswc2_format.ls == 1) && |
---|
| 286 | + (ip->loongson3_lswc2_format.fr == 0) && |
---|
| 287 | + (ip->loongson3_lswc2_format.base == 29)) { |
---|
| 288 | + if (ip->loongson3_lswc2_format.rt == 31) { |
---|
| 289 | + *poff = ip->loongson3_lswc2_format.offset << 1; |
---|
| 290 | + return 1; |
---|
| 291 | + } |
---|
| 292 | + if (ip->loongson3_lswc2_format.rq == 31) { |
---|
| 293 | + *poff = (ip->loongson3_lswc2_format.offset << 1) + 1; |
---|
| 294 | + return 1; |
---|
| 295 | + } |
---|
| 296 | + } |
---|
| 297 | +#endif |
---|
279 | 298 | return 0; |
---|
280 | 299 | #endif |
---|
281 | 300 | } |
---|
.. | .. |
---|
650 | 669 | { |
---|
651 | 670 | unsigned long top = TASK_SIZE & PAGE_MASK; |
---|
652 | 671 | |
---|
653 | | - /* One page for branch delay slot "emulation" */ |
---|
654 | | - top -= PAGE_SIZE; |
---|
| 672 | + if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) { |
---|
| 673 | + /* One page for branch delay slot "emulation" */ |
---|
| 674 | + top -= PAGE_SIZE; |
---|
| 675 | + } |
---|
655 | 676 | |
---|
656 | 677 | /* Space for the VDSO, data page & GIC user page */ |
---|
657 | 678 | top -= PAGE_ALIGN(current->thread.abi->vdso->size); |
---|
.. | .. |
---|
736 | 757 | /* |
---|
737 | 758 | * This is icky, but we use this to simply ensure that all CPUs have |
---|
738 | 759 | * context switched, regardless of whether they were previously running |
---|
739 | | - * kernel or user code. This ensures that no CPU currently has its FPU |
---|
740 | | - * enabled, or is about to attempt to enable it through any path other |
---|
741 | | - * than enable_restore_fp_context() which will wait appropriately for |
---|
742 | | - * fp_mode_switching to be zero. |
---|
| 760 | + * kernel or user code. This ensures that no CPU that a mode-switching |
---|
| 761 | + * program may execute on keeps its FPU enabled (& in the old mode) |
---|
| 762 | + * throughout the mode switch. |
---|
743 | 763 | */ |
---|
744 | 764 | return 0; |
---|
745 | 765 | } |
---|
.. | .. |
---|
827 | 847 | for_each_cpu_and(cpu, &process_cpus, cpu_online_mask) |
---|
828 | 848 | work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL); |
---|
829 | 849 | put_online_cpus(); |
---|
830 | | - |
---|
831 | | - wake_up_var(&task->mm->context.fp_mode_switching); |
---|
832 | 850 | |
---|
833 | 851 | return 0; |
---|
834 | 852 | } |
---|