.. | .. |
---|
9 | 9 | #include <linux/uaccess.h> |
---|
10 | 10 | #include <asm/elf.h> |
---|
11 | 11 | #include <asm/proc-fns.h> |
---|
| 12 | +#include <asm/fpu.h> |
---|
12 | 13 | #include <linux/ptrace.h> |
---|
13 | 14 | #include <linux/reboot.h> |
---|
14 | 15 | |
---|
15 | | -extern void setup_mm_for_reboot(char mode); |
---|
16 | | -#ifdef CONFIG_PROC_FS |
---|
17 | | -struct proc_dir_entry *proc_dir_cpu; |
---|
18 | | -EXPORT_SYMBOL(proc_dir_cpu); |
---|
| 16 | +#if IS_ENABLED(CONFIG_LAZY_FPU) |
---|
| 17 | +struct task_struct *last_task_used_math; |
---|
19 | 18 | #endif |
---|
| 19 | + |
---|
| 20 | +extern void setup_mm_for_reboot(char mode); |
---|
20 | 21 | |
---|
21 | 22 | extern inline void arch_reset(char mode) |
---|
22 | 23 | { |
---|
.. | .. |
---|
120 | 121 | regs->uregs[3], regs->uregs[2], regs->uregs[1], regs->uregs[0]); |
---|
121 | 122 | pr_info(" IRQs o%s Segment %s\n", |
---|
122 | 123 | interrupts_enabled(regs) ? "n" : "ff", |
---|
123 | | - segment_eq(get_fs(), get_ds())? "kernel" : "user"); |
---|
| 124 | + uaccess_kernel() ? "kernel" : "user"); |
---|
124 | 125 | } |
---|
125 | 126 | |
---|
126 | 127 | EXPORT_SYMBOL(show_regs); |
---|
127 | 128 | |
---|
| 129 | +void exit_thread(struct task_struct *tsk) |
---|
| 130 | +{ |
---|
| 131 | +#if defined(CONFIG_FPU) && defined(CONFIG_LAZY_FPU) |
---|
| 132 | + if (last_task_used_math == tsk) |
---|
| 133 | + last_task_used_math = NULL; |
---|
| 134 | +#endif |
---|
| 135 | +} |
---|
| 136 | + |
---|
128 | 137 | void flush_thread(void) |
---|
129 | 138 | { |
---|
| 139 | +#if defined(CONFIG_FPU) |
---|
| 140 | + clear_fpu(task_pt_regs(current)); |
---|
| 141 | + clear_used_math(); |
---|
| 142 | +# ifdef CONFIG_LAZY_FPU |
---|
| 143 | + if (last_task_used_math == current) |
---|
| 144 | + last_task_used_math = NULL; |
---|
| 145 | +# endif |
---|
| 146 | +#endif |
---|
130 | 147 | } |
---|
131 | 148 | |
---|
132 | 149 | DEFINE_PER_CPU(struct task_struct *, __entry_task); |
---|
133 | 150 | |
---|
134 | 151 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
---|
135 | 152 | int copy_thread(unsigned long clone_flags, unsigned long stack_start, |
---|
136 | | - unsigned long stk_sz, struct task_struct *p) |
---|
| 153 | + unsigned long stk_sz, struct task_struct *p, unsigned long tls) |
---|
137 | 154 | { |
---|
138 | 155 | struct pt_regs *childregs = task_pt_regs(p); |
---|
139 | 156 | |
---|
140 | 157 | memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); |
---|
141 | 158 | |
---|
142 | | - if (unlikely(p->flags & PF_KTHREAD)) { |
---|
| 159 | + if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { |
---|
143 | 160 | memset(childregs, 0, sizeof(struct pt_regs)); |
---|
144 | 161 | /* kernel thread fn */ |
---|
145 | 162 | p->thread.cpu_context.r6 = stack_start; |
---|
.. | .. |
---|
153 | 170 | childregs->uregs[0] = 0; |
---|
154 | 171 | childregs->osp = 0; |
---|
155 | 172 | if (clone_flags & CLONE_SETTLS) |
---|
156 | | - childregs->uregs[25] = childregs->uregs[3]; |
---|
| 173 | + childregs->uregs[25] = tls; |
---|
157 | 174 | } |
---|
158 | 175 | /* cpu context switching */ |
---|
159 | 176 | p->thread.cpu_context.pc = (unsigned long)ret_from_fork; |
---|
160 | 177 | p->thread.cpu_context.sp = (unsigned long)childregs; |
---|
| 178 | + |
---|
| 179 | +#if IS_ENABLED(CONFIG_FPU) |
---|
| 180 | + if (used_math()) { |
---|
| 181 | +# if !IS_ENABLED(CONFIG_LAZY_FPU) |
---|
| 182 | + unlazy_fpu(current); |
---|
| 183 | +# else |
---|
| 184 | + preempt_disable(); |
---|
| 185 | + if (last_task_used_math == current) |
---|
| 186 | + save_fpu(current); |
---|
| 187 | + preempt_enable(); |
---|
| 188 | +# endif |
---|
| 189 | + p->thread.fpu = current->thread.fpu; |
---|
| 190 | + clear_fpu(task_pt_regs(p)); |
---|
| 191 | + set_stopped_child_used_math(p); |
---|
| 192 | + } |
---|
| 193 | +#endif |
---|
161 | 194 | |
---|
162 | 195 | #ifdef CONFIG_HWZOL |
---|
163 | 196 | childregs->lb = 0; |
---|
.. | .. |
---|
168 | 201 | return 0; |
---|
169 | 202 | } |
---|
170 | 203 | |
---|
| 204 | +#if IS_ENABLED(CONFIG_FPU) |
---|
| 205 | +struct task_struct *_switch_fpu(struct task_struct *prev, struct task_struct *next) |
---|
| 206 | +{ |
---|
| 207 | +#if !IS_ENABLED(CONFIG_LAZY_FPU) |
---|
| 208 | + unlazy_fpu(prev); |
---|
| 209 | +#endif |
---|
| 210 | + if (!(next->flags & PF_KTHREAD)) |
---|
| 211 | + clear_fpu(task_pt_regs(next)); |
---|
| 212 | + return prev; |
---|
| 213 | +} |
---|
| 214 | +#endif |
---|
| 215 | + |
---|
171 | 216 | /* |
---|
172 | 217 | * fill in the fpe structure for a core dump... |
---|
173 | 218 | */ |
---|
174 | 219 | int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpu) |
---|
175 | 220 | { |
---|
176 | 221 | int fpvalid = 0; |
---|
| 222 | +#if IS_ENABLED(CONFIG_FPU) |
---|
| 223 | + struct task_struct *tsk = current; |
---|
| 224 | + |
---|
| 225 | + fpvalid = tsk_used_math(tsk); |
---|
| 226 | + if (fpvalid) { |
---|
| 227 | + lose_fpu(); |
---|
| 228 | + memcpy(fpu, &tsk->thread.fpu, sizeof(*fpu)); |
---|
| 229 | + } |
---|
| 230 | +#endif |
---|
177 | 231 | return fpvalid; |
---|
178 | 232 | } |
---|
179 | 233 | |
---|