.. | .. |
---|
38 | 38 | #include <linux/io.h> |
---|
39 | 39 | #include <linux/kdebug.h> |
---|
40 | 40 | #include <linux/syscalls.h> |
---|
41 | | -#include <linux/highmem.h> |
---|
42 | 41 | |
---|
43 | | -#include <asm/pgtable.h> |
---|
44 | 42 | #include <asm/ldt.h> |
---|
45 | 43 | #include <asm/processor.h> |
---|
46 | 44 | #include <asm/fpu/internal.h> |
---|
47 | 45 | #include <asm/desc.h> |
---|
48 | | -#ifdef CONFIG_MATH_EMULATION |
---|
49 | | -#include <asm/math_emu.h> |
---|
50 | | -#endif |
---|
51 | 46 | |
---|
52 | 47 | #include <linux/err.h> |
---|
53 | 48 | |
---|
54 | 49 | #include <asm/tlbflush.h> |
---|
55 | 50 | #include <asm/cpu.h> |
---|
56 | | -#include <asm/syscalls.h> |
---|
57 | 51 | #include <asm/debugreg.h> |
---|
58 | 52 | #include <asm/switch_to.h> |
---|
59 | 53 | #include <asm/vm86.h> |
---|
60 | | -#include <asm/intel_rdt_sched.h> |
---|
| 54 | +#include <asm/resctrl.h> |
---|
61 | 55 | #include <asm/proto.h> |
---|
62 | 56 | |
---|
63 | 57 | #include "process.h" |
---|
64 | 58 | |
---|
65 | | -void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) |
---|
| 59 | +void __show_regs(struct pt_regs *regs, enum show_regs_mode mode, |
---|
| 60 | + const char *log_lvl) |
---|
66 | 61 | { |
---|
67 | 62 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; |
---|
68 | 63 | unsigned long d0, d1, d2, d3, d6, d7; |
---|
69 | | - unsigned long sp; |
---|
70 | | - unsigned short ss, gs; |
---|
| 64 | + unsigned short gs; |
---|
71 | 65 | |
---|
72 | | - if (user_mode(regs)) { |
---|
73 | | - sp = regs->sp; |
---|
74 | | - ss = regs->ss; |
---|
| 66 | + if (user_mode(regs)) |
---|
75 | 67 | gs = get_user_gs(regs); |
---|
76 | | - } else { |
---|
77 | | - sp = kernel_stack_pointer(regs); |
---|
78 | | - savesegment(ss, ss); |
---|
| 68 | + else |
---|
79 | 69 | savesegment(gs, gs); |
---|
80 | | - } |
---|
81 | 70 | |
---|
82 | | - show_ip(regs, KERN_DEFAULT); |
---|
| 71 | + show_ip(regs, log_lvl); |
---|
83 | 72 | |
---|
84 | | - printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", |
---|
85 | | - regs->ax, regs->bx, regs->cx, regs->dx); |
---|
86 | | - printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", |
---|
87 | | - regs->si, regs->di, regs->bp, sp); |
---|
88 | | - printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n", |
---|
89 | | - (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags); |
---|
| 73 | + printk("%sEAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", |
---|
| 74 | + log_lvl, regs->ax, regs->bx, regs->cx, regs->dx); |
---|
| 75 | + printk("%sESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", |
---|
| 76 | + log_lvl, regs->si, regs->di, regs->bp, regs->sp); |
---|
| 77 | + printk("%sDS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n", |
---|
| 78 | + log_lvl, (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, regs->ss, regs->flags); |
---|
90 | 79 | |
---|
91 | 80 | if (mode != SHOW_REGS_ALL) |
---|
92 | 81 | return; |
---|
.. | .. |
---|
95 | 84 | cr2 = read_cr2(); |
---|
96 | 85 | cr3 = __read_cr3(); |
---|
97 | 86 | cr4 = __read_cr4(); |
---|
98 | | - printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", |
---|
99 | | - cr0, cr2, cr3, cr4); |
---|
| 87 | + printk("%sCR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", |
---|
| 88 | + log_lvl, cr0, cr2, cr3, cr4); |
---|
100 | 89 | |
---|
101 | 90 | get_debugreg(d0, 0); |
---|
102 | 91 | get_debugreg(d1, 1); |
---|
.. | .. |
---|
110 | 99 | (d6 == DR6_RESERVED) && (d7 == 0x400)) |
---|
111 | 100 | return; |
---|
112 | 101 | |
---|
113 | | - printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", |
---|
114 | | - d0, d1, d2, d3); |
---|
115 | | - printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n", |
---|
116 | | - d6, d7); |
---|
| 102 | + printk("%sDR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", |
---|
| 103 | + log_lvl, d0, d1, d2, d3); |
---|
| 104 | + printk("%sDR6: %08lx DR7: %08lx\n", |
---|
| 105 | + log_lvl, d6, d7); |
---|
117 | 106 | } |
---|
118 | 107 | |
---|
119 | 108 | void release_thread(struct task_struct *dead_task) |
---|
120 | 109 | { |
---|
121 | 110 | BUG_ON(dead_task->mm); |
---|
122 | 111 | release_vm86_irqs(dead_task); |
---|
123 | | -} |
---|
124 | | - |
---|
125 | | -int copy_thread_tls(unsigned long clone_flags, unsigned long sp, |
---|
126 | | - unsigned long arg, struct task_struct *p, unsigned long tls) |
---|
127 | | -{ |
---|
128 | | - struct pt_regs *childregs = task_pt_regs(p); |
---|
129 | | - struct fork_frame *fork_frame = container_of(childregs, struct fork_frame, regs); |
---|
130 | | - struct inactive_task_frame *frame = &fork_frame->frame; |
---|
131 | | - struct task_struct *tsk; |
---|
132 | | - int err; |
---|
133 | | - |
---|
134 | | - /* |
---|
135 | | - * For a new task use the RESET flags value since there is no before. |
---|
136 | | - * All the status flags are zero; DF and all the system flags must also |
---|
137 | | - * be 0, specifically IF must be 0 because we context switch to the new |
---|
138 | | - * task with interrupts disabled. |
---|
139 | | - */ |
---|
140 | | - frame->flags = X86_EFLAGS_FIXED; |
---|
141 | | - frame->bp = 0; |
---|
142 | | - frame->ret_addr = (unsigned long) ret_from_fork; |
---|
143 | | - p->thread.sp = (unsigned long) fork_frame; |
---|
144 | | - p->thread.sp0 = (unsigned long) (childregs+1); |
---|
145 | | - memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); |
---|
146 | | - |
---|
147 | | - if (unlikely(p->flags & PF_KTHREAD)) { |
---|
148 | | - /* kernel thread */ |
---|
149 | | - memset(childregs, 0, sizeof(struct pt_regs)); |
---|
150 | | - frame->bx = sp; /* function */ |
---|
151 | | - frame->di = arg; |
---|
152 | | - p->thread.io_bitmap_ptr = NULL; |
---|
153 | | - return 0; |
---|
154 | | - } |
---|
155 | | - frame->bx = 0; |
---|
156 | | - *childregs = *current_pt_regs(); |
---|
157 | | - childregs->ax = 0; |
---|
158 | | - if (sp) |
---|
159 | | - childregs->sp = sp; |
---|
160 | | - |
---|
161 | | - task_user_gs(p) = get_user_gs(current_pt_regs()); |
---|
162 | | - |
---|
163 | | - p->thread.io_bitmap_ptr = NULL; |
---|
164 | | - tsk = current; |
---|
165 | | - err = -ENOMEM; |
---|
166 | | - |
---|
167 | | - if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { |
---|
168 | | - p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, |
---|
169 | | - IO_BITMAP_BYTES, GFP_KERNEL); |
---|
170 | | - if (!p->thread.io_bitmap_ptr) { |
---|
171 | | - p->thread.io_bitmap_max = 0; |
---|
172 | | - return -ENOMEM; |
---|
173 | | - } |
---|
174 | | - set_tsk_thread_flag(p, TIF_IO_BITMAP); |
---|
175 | | - } |
---|
176 | | - |
---|
177 | | - err = 0; |
---|
178 | | - |
---|
179 | | - /* |
---|
180 | | - * Set a new TLS for the child thread? |
---|
181 | | - */ |
---|
182 | | - if (clone_flags & CLONE_SETTLS) |
---|
183 | | - err = do_set_thread_area(p, -1, |
---|
184 | | - (struct user_desc __user *)tls, 0); |
---|
185 | | - |
---|
186 | | - if (err && p->thread.io_bitmap_ptr) { |
---|
187 | | - kfree(p->thread.io_bitmap_ptr); |
---|
188 | | - p->thread.io_bitmap_max = 0; |
---|
189 | | - } |
---|
190 | | - return err; |
---|
191 | 112 | } |
---|
192 | 113 | |
---|
193 | 114 | void |
---|
.. | .. |
---|
202 | 123 | regs->ip = new_ip; |
---|
203 | 124 | regs->sp = new_sp; |
---|
204 | 125 | regs->flags = X86_EFLAGS_IF; |
---|
205 | | - force_iret(); |
---|
206 | 126 | } |
---|
207 | 127 | EXPORT_SYMBOL_GPL(start_thread); |
---|
208 | | - |
---|
209 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
---|
210 | | -static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) |
---|
211 | | -{ |
---|
212 | | - int i; |
---|
213 | | - |
---|
214 | | - /* |
---|
215 | | - * Clear @prev's kmap_atomic mappings |
---|
216 | | - */ |
---|
217 | | - for (i = 0; i < prev_p->kmap_idx; i++) { |
---|
218 | | - int idx = i + KM_TYPE_NR * smp_processor_id(); |
---|
219 | | - pte_t *ptep = kmap_pte - idx; |
---|
220 | | - |
---|
221 | | - kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
---|
222 | | - } |
---|
223 | | - /* |
---|
224 | | - * Restore @next_p's kmap_atomic mappings |
---|
225 | | - */ |
---|
226 | | - for (i = 0; i < next_p->kmap_idx; i++) { |
---|
227 | | - int idx = i + KM_TYPE_NR * smp_processor_id(); |
---|
228 | | - |
---|
229 | | - if (!pte_none(next_p->kmap_pte[i])) |
---|
230 | | - set_pte(kmap_pte - idx, next_p->kmap_pte[i]); |
---|
231 | | - } |
---|
232 | | -} |
---|
233 | | -#else |
---|
234 | | -static inline void |
---|
235 | | -switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } |
---|
236 | | -#endif |
---|
237 | 128 | |
---|
238 | 129 | |
---|
239 | 130 | /* |
---|
.. | .. |
---|
268 | 159 | { |
---|
269 | 160 | struct thread_struct *prev = &prev_p->thread, |
---|
270 | 161 | *next = &next_p->thread; |
---|
271 | | - struct fpu *prev_fpu = &prev->fpu; |
---|
272 | | - struct fpu *next_fpu = &next->fpu; |
---|
273 | 162 | int cpu = smp_processor_id(); |
---|
274 | 163 | |
---|
275 | 164 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ |
---|
276 | 165 | |
---|
277 | | - switch_fpu_prepare(prev_fpu, cpu); |
---|
| 166 | + if (!test_thread_flag(TIF_NEED_FPU_LOAD)) |
---|
| 167 | + switch_fpu_prepare(prev_p, cpu); |
---|
278 | 168 | |
---|
279 | 169 | /* |
---|
280 | 170 | * Save away %gs. No need to save %fs, as it was saved on the |
---|
.. | .. |
---|
293 | 183 | */ |
---|
294 | 184 | load_TLS(next, cpu); |
---|
295 | 185 | |
---|
296 | | - /* |
---|
297 | | - * Restore IOPL if needed. In normal use, the flags restore |
---|
298 | | - * in the switch assembly will handle this. But if the kernel |
---|
299 | | - * is running virtualized at a non-zero CPL, the popf will |
---|
300 | | - * not restore flags, so it must be done in a separate step. |
---|
301 | | - */ |
---|
302 | | - if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) |
---|
303 | | - set_iopl_mask(next->iopl); |
---|
304 | | - |
---|
305 | 186 | switch_to_extra(prev_p, next_p); |
---|
306 | | - |
---|
307 | | - switch_kmaps(prev_p, next_p); |
---|
308 | 187 | |
---|
309 | 188 | /* |
---|
310 | 189 | * Leave lazy mode, flushing any hypercalls made here. |
---|
311 | 190 | * This must be done before restoring TLS segments so |
---|
312 | | - * the GDT and LDT are properly updated, and must be |
---|
313 | | - * done before fpu__restore(), so the TS bit is up |
---|
314 | | - * to date. |
---|
| 191 | + * the GDT and LDT are properly updated. |
---|
315 | 192 | */ |
---|
316 | 193 | arch_end_context_switch(next_p); |
---|
317 | 194 | |
---|
.. | .. |
---|
332 | 209 | if (prev->gs | next->gs) |
---|
333 | 210 | lazy_load_gs(next->gs); |
---|
334 | 211 | |
---|
335 | | - switch_fpu_finish(next_fpu, cpu); |
---|
336 | | - |
---|
337 | 212 | this_cpu_write(current_task, next_p); |
---|
338 | 213 | |
---|
| 214 | + switch_fpu_finish(next_p); |
---|
| 215 | + |
---|
339 | 216 | /* Load the Intel cache allocation PQR MSR. */ |
---|
340 | | - intel_rdt_sched_in(); |
---|
| 217 | + resctrl_sched_in(next_p); |
---|
341 | 218 | |
---|
342 | 219 | return prev_p; |
---|
343 | 220 | } |
---|