forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-11 297b60346df8beafee954a0fd7c2d64f33f3b9bc
kernel/arch/x86/xen/spinlock.c
....@@ -3,22 +3,17 @@
33 * Split spinlock implementation out into its own file, so it can be
44 * compiled in a FTRACE-compatible way.
55 */
6
-#include <linux/kernel_stat.h>
6
+#include <linux/kernel.h>
77 #include <linux/spinlock.h>
8
-#include <linux/debugfs.h>
9
-#include <linux/log2.h>
10
-#include <linux/gfp.h>
118 #include <linux/slab.h>
129 #include <linux/atomic.h>
1310
1411 #include <asm/paravirt.h>
1512 #include <asm/qspinlock.h>
1613
17
-#include <xen/interface/xen.h>
1814 #include <xen/events.h>
1915
2016 #include "xen-ops.h"
21
-#include "debugfs.h"
2217
2318 static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
2419 static DEFINE_PER_CPU(char *, irq_name);
....@@ -73,16 +68,14 @@
7368 int irq;
7469 char *name;
7570
76
- if (!xen_pvspin) {
77
- if (cpu == 0)
78
- static_branch_disable(&virt_spin_lock_key);
71
+ if (!xen_pvspin)
7972 return;
80
- }
8173
8274 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
8375 cpu, per_cpu(lock_kicker_irq, cpu));
8476
8577 name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
78
+ per_cpu(irq_name, cpu) = name;
8679 irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
8780 cpu,
8881 dummy_handler,
....@@ -93,7 +86,6 @@
9386 if (irq >= 0) {
9487 disable_irq(irq); /* make sure it's never delivered */
9588 per_cpu(lock_kicker_irq, cpu) = irq;
96
- per_cpu(irq_name, cpu) = name;
9789 }
9890
9991 printk("cpu %d spinlock event irq %d\n", cpu, irq);
....@@ -106,6 +98,8 @@
10698 if (!xen_pvspin)
10799 return;
108100
101
+ kfree(per_cpu(irq_name, cpu));
102
+ per_cpu(irq_name, cpu) = NULL;
109103 /*
110104 * When booting the kernel with 'mitigations=auto,nosmt', the secondary
111105 * CPUs are not activated, and lock_kicker_irq is not initialized.
....@@ -116,8 +110,6 @@
116110
117111 unbind_from_irqhandler(irq, NULL);
118112 per_cpu(lock_kicker_irq, cpu) = -1;
119
- kfree(per_cpu(irq_name, cpu));
120
- per_cpu(irq_name, cpu) = NULL;
121113 }
122114
123115 PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);
....@@ -132,27 +124,29 @@
132124 */
133125 void __init xen_init_spinlocks(void)
134126 {
135
-
136127 /* Don't need to use pvqspinlock code if there is only 1 vCPU. */
137
- if (num_possible_cpus() == 1)
128
+ if (num_possible_cpus() == 1 || nopvspin)
138129 xen_pvspin = false;
139130
140131 if (!xen_pvspin) {
141132 printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
133
+ static_branch_disable(&virt_spin_lock_key);
142134 return;
143135 }
144136 printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
145137
146138 __pv_init_lock_hash();
147
- pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
148
- pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
149
- pv_lock_ops.wait = xen_qlock_wait;
150
- pv_lock_ops.kick = xen_qlock_kick;
151
- pv_lock_ops.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
139
+ pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
140
+ pv_ops.lock.queued_spin_unlock =
141
+ PV_CALLEE_SAVE(__pv_queued_spin_unlock);
142
+ pv_ops.lock.wait = xen_qlock_wait;
143
+ pv_ops.lock.kick = xen_qlock_kick;
144
+ pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen);
152145 }
153146
154147 static __init int xen_parse_nopvspin(char *arg)
155148 {
149
+ pr_notice("\"xen_nopvspin\" is deprecated, please use \"nopvspin\" instead\n");
156150 xen_pvspin = false;
157151 return 0;
158152 }