forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb
kernel/arch/x86/kernel/apic/ipi.c
....@@ -1,24 +1,114 @@
11 // SPDX-License-Identifier: GPL-2.0
2
+
23 #include <linux/cpumask.h>
3
-#include <linux/interrupt.h>
4
+#include <linux/smp.h>
5
+#include <asm/io_apic.h>
46
5
-#include <linux/mm.h>
6
-#include <linux/delay.h>
7
-#include <linux/spinlock.h>
8
-#include <linux/kernel_stat.h>
9
-#include <linux/mc146818rtc.h>
10
-#include <linux/cache.h>
11
-#include <linux/cpu.h>
7
+#include "local.h"
128
13
-#include <asm/smp.h>
14
-#include <asm/mtrr.h>
15
-#include <asm/tlbflush.h>
16
-#include <asm/mmu_context.h>
17
-#include <asm/apic.h>
18
-#include <asm/proto.h>
19
-#include <asm/ipi.h>
9
+DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand);
2010
21
-void __default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
11
+#ifdef CONFIG_SMP
12
+static int apic_ipi_shorthand_off __ro_after_init;
13
+
14
+static __init int apic_ipi_shorthand(char *str)
15
+{
16
+ get_option(&str, &apic_ipi_shorthand_off);
17
+ return 1;
18
+}
19
+__setup("no_ipi_broadcast=", apic_ipi_shorthand);
20
+
21
+static int __init print_ipi_mode(void)
22
+{
23
+ pr_info("IPI shorthand broadcast: %s\n",
24
+ apic_ipi_shorthand_off ? "disabled" : "enabled");
25
+ return 0;
26
+}
27
+late_initcall(print_ipi_mode);
28
+
29
+void apic_smt_update(void)
30
+{
31
+ /*
32
+ * Do not switch to broadcast mode if:
33
+ * - Disabled on the command line
34
+ * - Only a single CPU is online
35
+ * - Not all present CPUs have been at least booted once
36
+ *
37
+ * The latter is important as the local APIC might be in some
38
+ * random state and a broadcast might cause havoc. That's
39
+ * especially true for NMI broadcasting.
40
+ */
41
+ if (apic_ipi_shorthand_off || num_online_cpus() == 1 ||
42
+ !cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) {
43
+ static_branch_disable(&apic_use_ipi_shorthand);
44
+ } else {
45
+ static_branch_enable(&apic_use_ipi_shorthand);
46
+ }
47
+}
48
+
49
+void apic_send_IPI_allbutself(unsigned int vector)
50
+{
51
+ if (num_online_cpus() < 2)
52
+ return;
53
+
54
+ if (static_branch_likely(&apic_use_ipi_shorthand))
55
+ apic->send_IPI_allbutself(vector);
56
+ else
57
+ apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
58
+}
59
+
60
+/*
61
+ * Send a 'reschedule' IPI to another CPU. It goes straight through and
62
+ * wastes no time serializing anything. Worst case is that we lose a
63
+ * reschedule ...
64
+ */
65
+void native_smp_send_reschedule(int cpu)
66
+{
67
+ if (unlikely(cpu_is_offline(cpu))) {
68
+ WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
69
+ return;
70
+ }
71
+ apic->send_IPI(cpu, RESCHEDULE_VECTOR);
72
+}
73
+
74
+void native_send_call_func_single_ipi(int cpu)
75
+{
76
+ apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
77
+}
78
+
79
+void native_send_call_func_ipi(const struct cpumask *mask)
80
+{
81
+ if (static_branch_likely(&apic_use_ipi_shorthand)) {
82
+ unsigned int cpu = smp_processor_id();
83
+
84
+ if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask))
85
+ goto sendmask;
86
+
87
+ if (cpumask_test_cpu(cpu, mask))
88
+ apic->send_IPI_all(CALL_FUNCTION_VECTOR);
89
+ else if (num_online_cpus() > 1)
90
+ apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
91
+ return;
92
+ }
93
+
94
+sendmask:
95
+ apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
96
+}
97
+
98
+#endif /* CONFIG_SMP */
99
+
100
+static inline int __prepare_ICR2(unsigned int mask)
101
+{
102
+ return SET_APIC_DEST_FIELD(mask);
103
+}
104
+
105
+static inline void __xapic_wait_icr_idle(void)
106
+{
107
+ while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
108
+ cpu_relax();
109
+}
110
+
111
+void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
22112 {
23113 /*
24114 * Subtle. In the case of the 'never do double writes' workaround
....@@ -32,12 +122,16 @@
32122 /*
33123 * Wait for idle.
34124 */
35
- __xapic_wait_icr_idle();
125
+ if (unlikely(vector == NMI_VECTOR))
126
+ safe_apic_wait_icr_idle();
127
+ else
128
+ __xapic_wait_icr_idle();
36129
37130 /*
38
- * No need to touch the target chip field
131
+ * No need to touch the target chip field. Also the destination
132
+ * mode is ignored when a shorthand is used.
39133 */
40
- cfg = __prepare_ICR(shortcut, vector, dest);
134
+ cfg = __prepare_ICR(shortcut, vector, 0);
41135
42136 /*
43137 * Send the IPI. The write to APIC_ICR fires this off.
....@@ -133,6 +227,21 @@
133227 apic->send_IPI_mask(cpumask_of(cpu), vector);
134228 }
135229
230
+void default_send_IPI_allbutself(int vector)
231
+{
232
+ __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
233
+}
234
+
235
+void default_send_IPI_all(int vector)
236
+{
237
+ __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
238
+}
239
+
240
+void default_send_IPI_self(int vector)
241
+{
242
+ __default_send_IPI_shortcut(APIC_DEST_SELF, vector);
243
+}
244
+
136245 #ifdef CONFIG_X86_32
137246
138247 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
....@@ -190,28 +299,6 @@
190299 WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
191300 __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
192301 local_irq_restore(flags);
193
-}
194
-
195
-void default_send_IPI_allbutself(int vector)
196
-{
197
- /*
198
- * if there are no other CPUs in the system then we get an APIC send
199
- * error if we try to broadcast, thus avoid sending IPIs in this case.
200
- */
201
- if (!(num_online_cpus() > 1))
202
- return;
203
-
204
- __default_local_send_IPI_allbutself(vector);
205
-}
206
-
207
-void default_send_IPI_all(int vector)
208
-{
209
- __default_local_send_IPI_all(vector);
210
-}
211
-
212
-void default_send_IPI_self(int vector)
213
-{
214
- __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical);
215302 }
216303
217304 /* must come after the send_IPI functions above for inlining */