hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/x86/kernel/process_32.c
....@@ -38,55 +38,44 @@
3838 #include <linux/io.h>
3939 #include <linux/kdebug.h>
4040 #include <linux/syscalls.h>
41
-#include <linux/highmem.h>
4241
43
-#include <asm/pgtable.h>
4442 #include <asm/ldt.h>
4543 #include <asm/processor.h>
4644 #include <asm/fpu/internal.h>
4745 #include <asm/desc.h>
48
-#ifdef CONFIG_MATH_EMULATION
49
-#include <asm/math_emu.h>
50
-#endif
5146
5247 #include <linux/err.h>
5348
5449 #include <asm/tlbflush.h>
5550 #include <asm/cpu.h>
56
-#include <asm/syscalls.h>
5751 #include <asm/debugreg.h>
5852 #include <asm/switch_to.h>
5953 #include <asm/vm86.h>
60
-#include <asm/intel_rdt_sched.h>
54
+#include <asm/resctrl.h>
6155 #include <asm/proto.h>
6256
6357 #include "process.h"
6458
65
-void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
59
+void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
60
+ const char *log_lvl)
6661 {
6762 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
6863 unsigned long d0, d1, d2, d3, d6, d7;
69
- unsigned long sp;
70
- unsigned short ss, gs;
64
+ unsigned short gs;
7165
72
- if (user_mode(regs)) {
73
- sp = regs->sp;
74
- ss = regs->ss;
66
+ if (user_mode(regs))
7567 gs = get_user_gs(regs);
76
- } else {
77
- sp = kernel_stack_pointer(regs);
78
- savesegment(ss, ss);
68
+ else
7969 savesegment(gs, gs);
80
- }
8170
82
- show_ip(regs, KERN_DEFAULT);
71
+ show_ip(regs, log_lvl);
8372
84
- printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
85
- regs->ax, regs->bx, regs->cx, regs->dx);
86
- printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
87
- regs->si, regs->di, regs->bp, sp);
88
- printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n",
89
- (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags);
73
+ printk("%sEAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
74
+ log_lvl, regs->ax, regs->bx, regs->cx, regs->dx);
75
+ printk("%sESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
76
+ log_lvl, regs->si, regs->di, regs->bp, regs->sp);
77
+ printk("%sDS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n",
78
+ log_lvl, (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, regs->ss, regs->flags);
9079
9180 if (mode != SHOW_REGS_ALL)
9281 return;
....@@ -95,8 +84,8 @@
9584 cr2 = read_cr2();
9685 cr3 = __read_cr3();
9786 cr4 = __read_cr4();
98
- printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
99
- cr0, cr2, cr3, cr4);
87
+ printk("%sCR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
88
+ log_lvl, cr0, cr2, cr3, cr4);
10089
10190 get_debugreg(d0, 0);
10291 get_debugreg(d1, 1);
....@@ -110,84 +99,16 @@
11099 (d6 == DR6_RESERVED) && (d7 == 0x400))
111100 return;
112101
113
- printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
114
- d0, d1, d2, d3);
115
- printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n",
116
- d6, d7);
102
+ printk("%sDR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
103
+ log_lvl, d0, d1, d2, d3);
104
+ printk("%sDR6: %08lx DR7: %08lx\n",
105
+ log_lvl, d6, d7);
117106 }
118107
119108 void release_thread(struct task_struct *dead_task)
120109 {
121110 BUG_ON(dead_task->mm);
122111 release_vm86_irqs(dead_task);
123
-}
124
-
125
-int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
126
- unsigned long arg, struct task_struct *p, unsigned long tls)
127
-{
128
- struct pt_regs *childregs = task_pt_regs(p);
129
- struct fork_frame *fork_frame = container_of(childregs, struct fork_frame, regs);
130
- struct inactive_task_frame *frame = &fork_frame->frame;
131
- struct task_struct *tsk;
132
- int err;
133
-
134
- /*
135
- * For a new task use the RESET flags value since there is no before.
136
- * All the status flags are zero; DF and all the system flags must also
137
- * be 0, specifically IF must be 0 because we context switch to the new
138
- * task with interrupts disabled.
139
- */
140
- frame->flags = X86_EFLAGS_FIXED;
141
- frame->bp = 0;
142
- frame->ret_addr = (unsigned long) ret_from_fork;
143
- p->thread.sp = (unsigned long) fork_frame;
144
- p->thread.sp0 = (unsigned long) (childregs+1);
145
- memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
146
-
147
- if (unlikely(p->flags & PF_KTHREAD)) {
148
- /* kernel thread */
149
- memset(childregs, 0, sizeof(struct pt_regs));
150
- frame->bx = sp; /* function */
151
- frame->di = arg;
152
- p->thread.io_bitmap_ptr = NULL;
153
- return 0;
154
- }
155
- frame->bx = 0;
156
- *childregs = *current_pt_regs();
157
- childregs->ax = 0;
158
- if (sp)
159
- childregs->sp = sp;
160
-
161
- task_user_gs(p) = get_user_gs(current_pt_regs());
162
-
163
- p->thread.io_bitmap_ptr = NULL;
164
- tsk = current;
165
- err = -ENOMEM;
166
-
167
- if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
168
- p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
169
- IO_BITMAP_BYTES, GFP_KERNEL);
170
- if (!p->thread.io_bitmap_ptr) {
171
- p->thread.io_bitmap_max = 0;
172
- return -ENOMEM;
173
- }
174
- set_tsk_thread_flag(p, TIF_IO_BITMAP);
175
- }
176
-
177
- err = 0;
178
-
179
- /*
180
- * Set a new TLS for the child thread?
181
- */
182
- if (clone_flags & CLONE_SETTLS)
183
- err = do_set_thread_area(p, -1,
184
- (struct user_desc __user *)tls, 0);
185
-
186
- if (err && p->thread.io_bitmap_ptr) {
187
- kfree(p->thread.io_bitmap_ptr);
188
- p->thread.io_bitmap_max = 0;
189
- }
190
- return err;
191112 }
192113
193114 void
....@@ -202,38 +123,8 @@
202123 regs->ip = new_ip;
203124 regs->sp = new_sp;
204125 regs->flags = X86_EFLAGS_IF;
205
- force_iret();
206126 }
207127 EXPORT_SYMBOL_GPL(start_thread);
208
-
209
-#ifdef CONFIG_PREEMPT_RT_FULL
210
-static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
211
-{
212
- int i;
213
-
214
- /*
215
- * Clear @prev's kmap_atomic mappings
216
- */
217
- for (i = 0; i < prev_p->kmap_idx; i++) {
218
- int idx = i + KM_TYPE_NR * smp_processor_id();
219
- pte_t *ptep = kmap_pte - idx;
220
-
221
- kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
222
- }
223
- /*
224
- * Restore @next_p's kmap_atomic mappings
225
- */
226
- for (i = 0; i < next_p->kmap_idx; i++) {
227
- int idx = i + KM_TYPE_NR * smp_processor_id();
228
-
229
- if (!pte_none(next_p->kmap_pte[i]))
230
- set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
231
- }
232
-}
233
-#else
234
-static inline void
235
-switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
236
-#endif
237128
238129
239130 /*
....@@ -268,13 +159,12 @@
268159 {
269160 struct thread_struct *prev = &prev_p->thread,
270161 *next = &next_p->thread;
271
- struct fpu *prev_fpu = &prev->fpu;
272
- struct fpu *next_fpu = &next->fpu;
273162 int cpu = smp_processor_id();
274163
275164 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
276165
277
- switch_fpu_prepare(prev_fpu, cpu);
166
+ if (!test_thread_flag(TIF_NEED_FPU_LOAD))
167
+ switch_fpu_prepare(prev_p, cpu);
278168
279169 /*
280170 * Save away %gs. No need to save %fs, as it was saved on the
....@@ -293,25 +183,12 @@
293183 */
294184 load_TLS(next, cpu);
295185
296
- /*
297
- * Restore IOPL if needed. In normal use, the flags restore
298
- * in the switch assembly will handle this. But if the kernel
299
- * is running virtualized at a non-zero CPL, the popf will
300
- * not restore flags, so it must be done in a separate step.
301
- */
302
- if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
303
- set_iopl_mask(next->iopl);
304
-
305186 switch_to_extra(prev_p, next_p);
306
-
307
- switch_kmaps(prev_p, next_p);
308187
309188 /*
310189 * Leave lazy mode, flushing any hypercalls made here.
311190 * This must be done before restoring TLS segments so
312
- * the GDT and LDT are properly updated, and must be
313
- * done before fpu__restore(), so the TS bit is up
314
- * to date.
191
+ * the GDT and LDT are properly updated.
315192 */
316193 arch_end_context_switch(next_p);
317194
....@@ -332,12 +209,12 @@
332209 if (prev->gs | next->gs)
333210 lazy_load_gs(next->gs);
334211
335
- switch_fpu_finish(next_fpu, cpu);
336
-
337212 this_cpu_write(current_task, next_p);
338213
214
+ switch_fpu_finish(next_p);
215
+
339216 /* Load the Intel cache allocation PQR MSR. */
340
- intel_rdt_sched_in();
217
+ resctrl_sched_in(next_p);
341218
342219 return prev_p;
343220 }