hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/x86/xen/smp_pv.c
....@@ -22,11 +22,15 @@
2222 #include <linux/tick.h>
2323 #include <linux/nmi.h>
2424 #include <linux/cpuhotplug.h>
25
+#include <linux/stackprotector.h>
26
+#include <linux/pgtable.h>
2527
2628 #include <asm/paravirt.h>
29
+#include <asm/idtentry.h>
2730 #include <asm/desc.h>
28
-#include <asm/pgtable.h>
2931 #include <asm/cpu.h>
32
+#include <asm/io_apic.h>
33
+#include <asm/fpu/internal.h>
3034
3135 #include <xen/interface/xen.h>
3236 #include <xen/interface/vcpu.h>
....@@ -52,12 +56,15 @@
5256 static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 };
5357
5458 static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
59
+void asm_cpu_bringup_and_idle(void);
5560
5661 static void cpu_bringup(void)
5762 {
5863 int cpu;
5964
65
+ cr4_init();
6066 cpu_init();
67
+ fpu__init_cpu();
6168 touch_softlockup_watchdog();
6269 preempt_disable();
6370
....@@ -89,23 +96,22 @@
8996 {
9097 cpu_bringup();
9198 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
92
- prevent_tail_call_optimization();
9399 }
94100
95101 void xen_smp_intr_free_pv(unsigned int cpu)
96102 {
103
+ kfree(per_cpu(xen_irq_work, cpu).name);
104
+ per_cpu(xen_irq_work, cpu).name = NULL;
97105 if (per_cpu(xen_irq_work, cpu).irq >= 0) {
98106 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
99107 per_cpu(xen_irq_work, cpu).irq = -1;
100
- kfree(per_cpu(xen_irq_work, cpu).name);
101
- per_cpu(xen_irq_work, cpu).name = NULL;
102108 }
103109
110
+ kfree(per_cpu(xen_pmu_irq, cpu).name);
111
+ per_cpu(xen_pmu_irq, cpu).name = NULL;
104112 if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
105113 unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
106114 per_cpu(xen_pmu_irq, cpu).irq = -1;
107
- kfree(per_cpu(xen_pmu_irq, cpu).name);
108
- per_cpu(xen_pmu_irq, cpu).name = NULL;
109115 }
110116 }
111117
....@@ -115,6 +121,7 @@
115121 char *callfunc_name, *pmu_name;
116122
117123 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
124
+ per_cpu(xen_irq_work, cpu).name = callfunc_name;
118125 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
119126 cpu,
120127 xen_irq_work_interrupt,
....@@ -124,10 +131,10 @@
124131 if (rc < 0)
125132 goto fail;
126133 per_cpu(xen_irq_work, cpu).irq = rc;
127
- per_cpu(xen_irq_work, cpu).name = callfunc_name;
128134
129
- if (is_xen_pmu(cpu)) {
135
+ if (is_xen_pmu) {
130136 pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
137
+ per_cpu(xen_pmu_irq, cpu).name = pmu_name;
131138 rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
132139 xen_pmu_irq_handler,
133140 IRQF_PERCPU|IRQF_NOBALANCING,
....@@ -135,7 +142,6 @@
135142 if (rc < 0)
136143 goto fail;
137144 per_cpu(xen_pmu_irq, cpu).irq = rc;
138
- per_cpu(xen_pmu_irq, cpu).name = pmu_name;
139145 }
140146
141147 return 0;
....@@ -145,28 +151,12 @@
145151 return rc;
146152 }
147153
148
-static void __init xen_fill_possible_map(void)
149
-{
150
- int i, rc;
151
-
152
- if (xen_initial_domain())
153
- return;
154
-
155
- for (i = 0; i < nr_cpu_ids; i++) {
156
- rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
157
- if (rc >= 0) {
158
- num_processors++;
159
- set_cpu_possible(i, true);
160
- }
161
- }
162
-}
163
-
164
-static void __init xen_filter_cpu_maps(void)
154
+static void __init _get_smp_config(unsigned int early)
165155 {
166156 int i, rc;
167157 unsigned int subtract = 0;
168158
169
- if (!xen_initial_domain())
159
+ if (early)
170160 return;
171161
172162 num_processors = 0;
....@@ -207,16 +197,6 @@
207197 * sure the old memory can be recycled. */
208198 make_lowmem_page_readwrite(xen_initial_gdt);
209199
210
-#ifdef CONFIG_X86_32
211
- /*
212
- * Xen starts us with XEN_FLAT_RING1_DS, but linux code
213
- * expects __USER_DS
214
- */
215
- loadsegment(ds, __USER_DS);
216
- loadsegment(es, __USER_DS);
217
-#endif
218
-
219
- xen_filter_cpu_maps();
220200 xen_setup_vcpu_info_placement();
221201
222202 /*
....@@ -250,6 +230,7 @@
250230 for_each_possible_cpu(i) {
251231 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
252232 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
233
+ zalloc_cpumask_var(&per_cpu(cpu_die_map, i), GFP_KERNEL);
253234 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
254235 }
255236 set_cpu_sibling_map(0);
....@@ -295,10 +276,6 @@
295276
296277 gdt = get_cpu_gdt_rw(cpu);
297278
298
-#ifdef CONFIG_X86_32
299
- ctxt->user_regs.fs = __KERNEL_PERCPU;
300
- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
301
-#endif
302279 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
303280
304281 /*
....@@ -306,7 +283,7 @@
306283 * pointing just below where pt_regs would be if it were a normal
307284 * kernel entry.
308285 */
309
- ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
286
+ ctxt->user_regs.eip = (unsigned long)asm_cpu_bringup_and_idle;
310287 ctxt->flags = VGCF_IN_KERNEL;
311288 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
312289 ctxt->user_regs.ds = __USER_DS;
....@@ -336,14 +313,9 @@
336313 ctxt->kernel_ss = __KERNEL_DS;
337314 ctxt->kernel_sp = task_top_of_stack(idle);
338315
339
-#ifdef CONFIG_X86_32
340
- ctxt->event_callback_cs = __KERNEL_CS;
341
- ctxt->failsafe_callback_cs = __KERNEL_CS;
342
-#else
343316 ctxt->gs_base_kernel = per_cpu_offset(cpu);
344
-#endif
345317 ctxt->event_callback_eip =
346
- (unsigned long)xen_hypervisor_callback;
318
+ (unsigned long)xen_asm_exc_xen_hypervisor_callback;
347319 ctxt->failsafe_callback_eip =
348320 (unsigned long)xen_failsafe_callback;
349321 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
....@@ -360,7 +332,9 @@
360332 {
361333 int rc;
362334
363
- common_cpu_up(cpu, idle);
335
+ rc = common_cpu_up(cpu, idle);
336
+ if (rc)
337
+ return rc;
364338
365339 xen_setup_runstate_info(cpu);
366340
....@@ -502,5 +476,8 @@
502476 void __init xen_smp_init(void)
503477 {
504478 smp_ops = xen_smp_ops;
505
- xen_fill_possible_map();
479
+
480
+ /* Avoid searching for BIOS MP tables */
481
+ x86_init.mpparse.find_smp_config = x86_init_noop;
482
+ x86_init.mpparse.get_smp_config = _get_smp_config;
506483 }