.. | .. |
---|
22 | 22 | #include <linux/tick.h> |
---|
23 | 23 | #include <linux/nmi.h> |
---|
24 | 24 | #include <linux/cpuhotplug.h> |
---|
| 25 | +#include <linux/stackprotector.h> |
---|
| 26 | +#include <linux/pgtable.h> |
---|
25 | 27 | |
---|
26 | 28 | #include <asm/paravirt.h> |
---|
| 29 | +#include <asm/idtentry.h> |
---|
27 | 30 | #include <asm/desc.h> |
---|
28 | | -#include <asm/pgtable.h> |
---|
29 | 31 | #include <asm/cpu.h> |
---|
| 32 | +#include <asm/io_apic.h> |
---|
| 33 | +#include <asm/fpu/internal.h> |
---|
30 | 34 | |
---|
31 | 35 | #include <xen/interface/xen.h> |
---|
32 | 36 | #include <xen/interface/vcpu.h> |
---|
.. | .. |
---|
52 | 56 | static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 }; |
---|
53 | 57 | |
---|
54 | 58 | static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id); |
---|
| 59 | +void asm_cpu_bringup_and_idle(void); |
---|
55 | 60 | |
---|
56 | 61 | static void cpu_bringup(void) |
---|
57 | 62 | { |
---|
58 | 63 | int cpu; |
---|
59 | 64 | |
---|
| 65 | + cr4_init(); |
---|
60 | 66 | cpu_init(); |
---|
| 67 | + fpu__init_cpu(); |
---|
61 | 68 | touch_softlockup_watchdog(); |
---|
62 | 69 | preempt_disable(); |
---|
63 | 70 | |
---|
.. | .. |
---|
89 | 96 | { |
---|
90 | 97 | cpu_bringup(); |
---|
91 | 98 | cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); |
---|
92 | | - prevent_tail_call_optimization(); |
---|
93 | 99 | } |
---|
94 | 100 | |
---|
95 | 101 | void xen_smp_intr_free_pv(unsigned int cpu) |
---|
96 | 102 | { |
---|
| 103 | + kfree(per_cpu(xen_irq_work, cpu).name); |
---|
| 104 | + per_cpu(xen_irq_work, cpu).name = NULL; |
---|
97 | 105 | if (per_cpu(xen_irq_work, cpu).irq >= 0) { |
---|
98 | 106 | unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); |
---|
99 | 107 | per_cpu(xen_irq_work, cpu).irq = -1; |
---|
100 | | - kfree(per_cpu(xen_irq_work, cpu).name); |
---|
101 | | - per_cpu(xen_irq_work, cpu).name = NULL; |
---|
102 | 108 | } |
---|
103 | 109 | |
---|
| 110 | + kfree(per_cpu(xen_pmu_irq, cpu).name); |
---|
| 111 | + per_cpu(xen_pmu_irq, cpu).name = NULL; |
---|
104 | 112 | if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { |
---|
105 | 113 | unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL); |
---|
106 | 114 | per_cpu(xen_pmu_irq, cpu).irq = -1; |
---|
107 | | - kfree(per_cpu(xen_pmu_irq, cpu).name); |
---|
108 | | - per_cpu(xen_pmu_irq, cpu).name = NULL; |
---|
109 | 115 | } |
---|
110 | 116 | } |
---|
111 | 117 | |
---|
.. | .. |
---|
115 | 121 | char *callfunc_name, *pmu_name; |
---|
116 | 122 | |
---|
117 | 123 | callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); |
---|
| 124 | + per_cpu(xen_irq_work, cpu).name = callfunc_name; |
---|
118 | 125 | rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, |
---|
119 | 126 | cpu, |
---|
120 | 127 | xen_irq_work_interrupt, |
---|
.. | .. |
---|
124 | 131 | if (rc < 0) |
---|
125 | 132 | goto fail; |
---|
126 | 133 | per_cpu(xen_irq_work, cpu).irq = rc; |
---|
127 | | - per_cpu(xen_irq_work, cpu).name = callfunc_name; |
---|
128 | 134 | |
---|
129 | | - if (is_xen_pmu(cpu)) { |
---|
| 135 | + if (is_xen_pmu) { |
---|
130 | 136 | pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu); |
---|
| 137 | + per_cpu(xen_pmu_irq, cpu).name = pmu_name; |
---|
131 | 138 | rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu, |
---|
132 | 139 | xen_pmu_irq_handler, |
---|
133 | 140 | IRQF_PERCPU|IRQF_NOBALANCING, |
---|
.. | .. |
---|
135 | 142 | if (rc < 0) |
---|
136 | 143 | goto fail; |
---|
137 | 144 | per_cpu(xen_pmu_irq, cpu).irq = rc; |
---|
138 | | - per_cpu(xen_pmu_irq, cpu).name = pmu_name; |
---|
139 | 145 | } |
---|
140 | 146 | |
---|
141 | 147 | return 0; |
---|
.. | .. |
---|
145 | 151 | return rc; |
---|
146 | 152 | } |
---|
147 | 153 | |
---|
148 | | -static void __init xen_fill_possible_map(void) |
---|
149 | | -{ |
---|
150 | | - int i, rc; |
---|
151 | | - |
---|
152 | | - if (xen_initial_domain()) |
---|
153 | | - return; |
---|
154 | | - |
---|
155 | | - for (i = 0; i < nr_cpu_ids; i++) { |
---|
156 | | - rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); |
---|
157 | | - if (rc >= 0) { |
---|
158 | | - num_processors++; |
---|
159 | | - set_cpu_possible(i, true); |
---|
160 | | - } |
---|
161 | | - } |
---|
162 | | -} |
---|
163 | | - |
---|
164 | | -static void __init xen_filter_cpu_maps(void) |
---|
| 154 | +static void __init _get_smp_config(unsigned int early) |
---|
165 | 155 | { |
---|
166 | 156 | int i, rc; |
---|
167 | 157 | unsigned int subtract = 0; |
---|
168 | 158 | |
---|
169 | | - if (!xen_initial_domain()) |
---|
| 159 | + if (early) |
---|
170 | 160 | return; |
---|
171 | 161 | |
---|
172 | 162 | num_processors = 0; |
---|
.. | .. |
---|
207 | 197 | * sure the old memory can be recycled. */ |
---|
208 | 198 | make_lowmem_page_readwrite(xen_initial_gdt); |
---|
209 | 199 | |
---|
210 | | -#ifdef CONFIG_X86_32 |
---|
211 | | - /* |
---|
212 | | - * Xen starts us with XEN_FLAT_RING1_DS, but linux code |
---|
213 | | - * expects __USER_DS |
---|
214 | | - */ |
---|
215 | | - loadsegment(ds, __USER_DS); |
---|
216 | | - loadsegment(es, __USER_DS); |
---|
217 | | -#endif |
---|
218 | | - |
---|
219 | | - xen_filter_cpu_maps(); |
---|
220 | 200 | xen_setup_vcpu_info_placement(); |
---|
221 | 201 | |
---|
222 | 202 | /* |
---|
.. | .. |
---|
250 | 230 | for_each_possible_cpu(i) { |
---|
251 | 231 | zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); |
---|
252 | 232 | zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); |
---|
| 233 | + zalloc_cpumask_var(&per_cpu(cpu_die_map, i), GFP_KERNEL); |
---|
253 | 234 | zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); |
---|
254 | 235 | } |
---|
255 | 236 | set_cpu_sibling_map(0); |
---|
.. | .. |
---|
295 | 276 | |
---|
296 | 277 | gdt = get_cpu_gdt_rw(cpu); |
---|
297 | 278 | |
---|
298 | | -#ifdef CONFIG_X86_32 |
---|
299 | | - ctxt->user_regs.fs = __KERNEL_PERCPU; |
---|
300 | | - ctxt->user_regs.gs = __KERNEL_STACK_CANARY; |
---|
301 | | -#endif |
---|
302 | 279 | memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); |
---|
303 | 280 | |
---|
304 | 281 | /* |
---|
.. | .. |
---|
306 | 283 | * pointing just below where pt_regs would be if it were a normal |
---|
307 | 284 | * kernel entry. |
---|
308 | 285 | */ |
---|
309 | | - ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; |
---|
| 286 | + ctxt->user_regs.eip = (unsigned long)asm_cpu_bringup_and_idle; |
---|
310 | 287 | ctxt->flags = VGCF_IN_KERNEL; |
---|
311 | 288 | ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ |
---|
312 | 289 | ctxt->user_regs.ds = __USER_DS; |
---|
.. | .. |
---|
336 | 313 | ctxt->kernel_ss = __KERNEL_DS; |
---|
337 | 314 | ctxt->kernel_sp = task_top_of_stack(idle); |
---|
338 | 315 | |
---|
339 | | -#ifdef CONFIG_X86_32 |
---|
340 | | - ctxt->event_callback_cs = __KERNEL_CS; |
---|
341 | | - ctxt->failsafe_callback_cs = __KERNEL_CS; |
---|
342 | | -#else |
---|
343 | 316 | ctxt->gs_base_kernel = per_cpu_offset(cpu); |
---|
344 | | -#endif |
---|
345 | 317 | ctxt->event_callback_eip = |
---|
346 | | - (unsigned long)xen_hypervisor_callback; |
---|
| 318 | + (unsigned long)xen_asm_exc_xen_hypervisor_callback; |
---|
347 | 319 | ctxt->failsafe_callback_eip = |
---|
348 | 320 | (unsigned long)xen_failsafe_callback; |
---|
349 | 321 | per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); |
---|
.. | .. |
---|
360 | 332 | { |
---|
361 | 333 | int rc; |
---|
362 | 334 | |
---|
363 | | - common_cpu_up(cpu, idle); |
---|
| 335 | + rc = common_cpu_up(cpu, idle); |
---|
| 336 | + if (rc) |
---|
| 337 | + return rc; |
---|
364 | 338 | |
---|
365 | 339 | xen_setup_runstate_info(cpu); |
---|
366 | 340 | |
---|
.. | .. |
---|
502 | 476 | void __init xen_smp_init(void) |
---|
503 | 477 | { |
---|
504 | 478 | smp_ops = xen_smp_ops; |
---|
505 | | - xen_fill_possible_map(); |
---|
| 479 | + |
---|
| 480 | + /* Avoid searching for BIOS MP tables */ |
---|
| 481 | + x86_init.mpparse.find_smp_config = x86_init_noop; |
---|
| 482 | + x86_init.mpparse.get_smp_config = _get_smp_config; |
---|
506 | 483 | } |
---|