| .. | .. |
|---|
| 1 | 1 | // SPDX-License-Identifier: GPL-2.0 |
|---|
| 2 | +#include <linux/thread_info.h> |
|---|
| 2 | 3 | #include <asm/smp.h> |
|---|
| 3 | 4 | |
|---|
| 4 | 5 | #include <xen/events.h> |
|---|
| .. | .. |
|---|
| 19 | 20 | xen_vcpu_setup(0); |
|---|
| 20 | 21 | |
|---|
| 21 | 22 | /* |
|---|
| 23 | + * Called again in case the kernel boots on vcpu >= MAX_VIRT_CPUS. |
|---|
| 24 | + * Refer to comments in xen_hvm_init_time_ops(). |
|---|
| 25 | + */ |
|---|
| 26 | + xen_hvm_init_time_ops(); |
|---|
| 27 | + |
|---|
| 28 | + /* |
|---|
| 22 | 29 | * The alternative logic (which patches the unlock/lock) runs before |
|---|
| 23 | 30 | * the smp bootup up code is activated. Hence we need to set this up |
|---|
| 24 | 31 | * the core kernel is being patched. Otherwise we will have only |
|---|
| .. | .. |
|---|
| 32 | 39 | int cpu; |
|---|
| 33 | 40 | |
|---|
| 34 | 41 | native_smp_prepare_cpus(max_cpus); |
|---|
| 35 | | - WARN_ON(xen_smp_intr_init(0)); |
|---|
| 36 | 42 | |
|---|
| 37 | | - xen_init_lock_cpu(0); |
|---|
| 43 | + if (xen_have_vector_callback) { |
|---|
| 44 | + WARN_ON(xen_smp_intr_init(0)); |
|---|
| 45 | + xen_init_lock_cpu(0); |
|---|
| 46 | + } |
|---|
| 38 | 47 | |
|---|
| 39 | 48 | for_each_possible_cpu(cpu) { |
|---|
| 40 | 49 | if (cpu == 0) |
|---|
| .. | .. |
|---|
| 49 | 58 | static void xen_hvm_cpu_die(unsigned int cpu) |
|---|
| 50 | 59 | { |
|---|
| 51 | 60 | if (common_cpu_die(cpu) == 0) { |
|---|
| 52 | | - xen_smp_intr_free(cpu); |
|---|
| 53 | | - xen_uninit_lock_cpu(cpu); |
|---|
| 54 | | - xen_teardown_timer(cpu); |
|---|
| 61 | + if (xen_have_vector_callback) { |
|---|
| 62 | + xen_smp_intr_free(cpu); |
|---|
| 63 | + xen_uninit_lock_cpu(cpu); |
|---|
| 64 | + xen_teardown_timer(cpu); |
|---|
| 65 | + } |
|---|
| 55 | 66 | } |
|---|
| 56 | 67 | } |
|---|
| 57 | 68 | #else |
|---|
| .. | .. |
|---|
| 63 | 74 | |
|---|
| 64 | 75 | void __init xen_hvm_smp_init(void) |
|---|
| 65 | 76 | { |
|---|
| 66 | | - if (!xen_have_vector_callback) |
|---|
| 67 | | - return; |
|---|
| 68 | | - |
|---|
| 77 | + smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu; |
|---|
| 69 | 78 | smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; |
|---|
| 70 | | - smp_ops.smp_send_reschedule = xen_smp_send_reschedule; |
|---|
| 79 | + smp_ops.smp_cpus_done = xen_smp_cpus_done; |
|---|
| 71 | 80 | smp_ops.cpu_die = xen_hvm_cpu_die; |
|---|
| 81 | + |
|---|
| 82 | + if (!xen_have_vector_callback) { |
|---|
| 83 | +#ifdef CONFIG_PARAVIRT_SPINLOCKS |
|---|
| 84 | + nopvspin = true; |
|---|
| 85 | +#endif |
|---|
| 86 | + return; |
|---|
| 87 | + } |
|---|
| 88 | + |
|---|
| 89 | + smp_ops.smp_send_reschedule = xen_smp_send_reschedule; |
|---|
| 72 | 90 | smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; |
|---|
| 73 | 91 | smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; |
|---|
| 74 | | - smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu; |
|---|
| 75 | | - smp_ops.smp_cpus_done = xen_smp_cpus_done; |
|---|
| 76 | 92 | } |
|---|