hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/riscv/kernel/smp.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * SMP initialisation and IPI support
34 * Based on arch/arm64/kernel/smp.c
....@@ -5,39 +6,71 @@
56 * Copyright (C) 2012 ARM Ltd.
67 * Copyright (C) 2015 Regents of the University of California
78 * Copyright (C) 2017 SiFive
8
- *
9
- * This program is free software; you can redistribute it and/or modify
10
- * it under the terms of the GNU General Public License version 2 as
11
- * published by the Free Software Foundation.
12
- *
13
- * This program is distributed in the hope that it will be useful,
14
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16
- * GNU General Public License for more details.
17
- *
18
- * You should have received a copy of the GNU General Public License
19
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
209 */
2110
11
+#include <linux/cpu.h>
2212 #include <linux/interrupt.h>
13
+#include <linux/module.h>
14
+#include <linux/profile.h>
2315 #include <linux/smp.h>
2416 #include <linux/sched.h>
17
+#include <linux/seq_file.h>
18
+#include <linux/delay.h>
19
+#include <linux/irq_work.h>
2520
2621 #include <asm/sbi.h>
2722 #include <asm/tlbflush.h>
2823 #include <asm/cacheflush.h>
2924
30
-/* A collection of single bit ipi messages. */
31
-static struct {
32
- unsigned long bits ____cacheline_aligned;
33
-} ipi_data[NR_CPUS] __cacheline_aligned;
34
-
3525 enum ipi_message_type {
3626 IPI_RESCHEDULE,
3727 IPI_CALL_FUNC,
28
+ IPI_CPU_STOP,
29
+ IPI_IRQ_WORK,
3830 IPI_MAX
3931 };
4032
33
+unsigned long __cpuid_to_hartid_map[NR_CPUS] = {
34
+ [0 ... NR_CPUS-1] = INVALID_HARTID
35
+};
36
+
37
+void __init smp_setup_processor_id(void)
38
+{
39
+ cpuid_to_hartid_map(0) = boot_cpu_hartid;
40
+}
41
+
42
+/* A collection of single bit ipi messages. */
43
+static struct {
44
+ unsigned long stats[IPI_MAX] ____cacheline_aligned;
45
+ unsigned long bits ____cacheline_aligned;
46
+} ipi_data[NR_CPUS] __cacheline_aligned;
47
+
48
+int riscv_hartid_to_cpuid(int hartid)
49
+{
50
+ int i;
51
+
52
+ for (i = 0; i < NR_CPUS; i++)
53
+ if (cpuid_to_hartid_map(i) == hartid)
54
+ return i;
55
+
56
+ pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
57
+ return -ENOENT;
58
+}
59
+
60
+void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
61
+{
62
+ int cpu;
63
+
64
+ cpumask_clear(out);
65
+ for_each_cpu(cpu, in)
66
+ cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
67
+}
68
+EXPORT_SYMBOL_GPL(riscv_cpuid_to_hartid_mask);
69
+
70
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
71
+{
72
+ return phys_id == cpuid_to_hartid_map(cpu);
73
+}
4174
4275 /* Unsupported */
4376 int setup_profiling_timer(unsigned int multiplier)
....@@ -45,12 +78,73 @@
4578 return -EINVAL;
4679 }
4780
48
-void riscv_software_interrupt(void)
81
+static void ipi_stop(void)
4982 {
50
- unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
83
+ set_cpu_online(smp_processor_id(), false);
84
+ while (1)
85
+ wait_for_interrupt();
86
+}
5187
52
- /* Clear pending IPI */
53
- csr_clear(sip, SIE_SSIE);
88
+static struct riscv_ipi_ops *ipi_ops;
89
+
90
+void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
91
+{
92
+ ipi_ops = ops;
93
+}
94
+EXPORT_SYMBOL_GPL(riscv_set_ipi_ops);
95
+
96
+void riscv_clear_ipi(void)
97
+{
98
+ if (ipi_ops && ipi_ops->ipi_clear)
99
+ ipi_ops->ipi_clear();
100
+
101
+ csr_clear(CSR_IP, IE_SIE);
102
+}
103
+EXPORT_SYMBOL_GPL(riscv_clear_ipi);
104
+
105
+static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
106
+{
107
+ int cpu;
108
+
109
+ smp_mb__before_atomic();
110
+ for_each_cpu(cpu, mask)
111
+ set_bit(op, &ipi_data[cpu].bits);
112
+ smp_mb__after_atomic();
113
+
114
+ if (ipi_ops && ipi_ops->ipi_inject)
115
+ ipi_ops->ipi_inject(mask);
116
+ else
117
+ pr_warn("SMP: IPI inject method not available\n");
118
+}
119
+
120
+static void send_ipi_single(int cpu, enum ipi_message_type op)
121
+{
122
+ smp_mb__before_atomic();
123
+ set_bit(op, &ipi_data[cpu].bits);
124
+ smp_mb__after_atomic();
125
+
126
+ if (ipi_ops && ipi_ops->ipi_inject)
127
+ ipi_ops->ipi_inject(cpumask_of(cpu));
128
+ else
129
+ pr_warn("SMP: IPI inject method not available\n");
130
+}
131
+
132
+#ifdef CONFIG_IRQ_WORK
133
+void arch_irq_work_raise(void)
134
+{
135
+ send_ipi_single(smp_processor_id(), IPI_IRQ_WORK);
136
+}
137
+#endif
138
+
139
+void handle_IPI(struct pt_regs *regs)
140
+{
141
+ struct pt_regs *old_regs = set_irq_regs(regs);
142
+ unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
143
+ unsigned long *stats = ipi_data[smp_processor_id()].stats;
144
+
145
+ irq_enter();
146
+
147
+ riscv_clear_ipi();
54148
55149 while (true) {
56150 unsigned long ops;
....@@ -60,104 +154,96 @@
60154
61155 ops = xchg(pending_ipis, 0);
62156 if (ops == 0)
63
- return;
157
+ goto done;
64158
65
- if (ops & (1 << IPI_RESCHEDULE))
159
+ if (ops & (1 << IPI_RESCHEDULE)) {
160
+ stats[IPI_RESCHEDULE]++;
66161 scheduler_ipi();
162
+ }
67163
68
- if (ops & (1 << IPI_CALL_FUNC))
164
+ if (ops & (1 << IPI_CALL_FUNC)) {
165
+ stats[IPI_CALL_FUNC]++;
69166 generic_smp_call_function_interrupt();
167
+ }
168
+
169
+ if (ops & (1 << IPI_CPU_STOP)) {
170
+ stats[IPI_CPU_STOP]++;
171
+ ipi_stop();
172
+ }
173
+
174
+ if (ops & (1 << IPI_IRQ_WORK)) {
175
+ stats[IPI_IRQ_WORK]++;
176
+ irq_work_run();
177
+ }
70178
71179 BUG_ON((ops >> IPI_MAX) != 0);
72180
73181 /* Order data access and bit testing. */
74182 mb();
75183 }
184
+
185
+done:
186
+ irq_exit();
187
+ set_irq_regs(old_regs);
76188 }
77189
78
-static void
79
-send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
190
+static const char * const ipi_names[] = {
191
+ [IPI_RESCHEDULE] = "Rescheduling interrupts",
192
+ [IPI_CALL_FUNC] = "Function call interrupts",
193
+ [IPI_CPU_STOP] = "CPU stop interrupts",
194
+ [IPI_IRQ_WORK] = "IRQ work interrupts",
195
+};
196
+
197
+void show_ipi_stats(struct seq_file *p, int prec)
80198 {
81
- int i;
199
+ unsigned int cpu, i;
82200
83
- mb();
84
- for_each_cpu(i, to_whom)
85
- set_bit(operation, &ipi_data[i].bits);
86
-
87
- mb();
88
- sbi_send_ipi(cpumask_bits(to_whom));
201
+ for (i = 0; i < IPI_MAX; i++) {
202
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
203
+ prec >= 4 ? " " : "");
204
+ for_each_online_cpu(cpu)
205
+ seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
206
+ seq_printf(p, " %s\n", ipi_names[i]);
207
+ }
89208 }
90209
91210 void arch_send_call_function_ipi_mask(struct cpumask *mask)
92211 {
93
- send_ipi_message(mask, IPI_CALL_FUNC);
212
+ send_ipi_mask(mask, IPI_CALL_FUNC);
94213 }
95214
96215 void arch_send_call_function_single_ipi(int cpu)
97216 {
98
- send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
99
-}
100
-
101
-static void ipi_stop(void *unused)
102
-{
103
- while (1)
104
- wait_for_interrupt();
217
+ send_ipi_single(cpu, IPI_CALL_FUNC);
105218 }
106219
107220 void smp_send_stop(void)
108221 {
109
- on_each_cpu(ipi_stop, NULL, 1);
222
+ unsigned long timeout;
223
+
224
+ if (num_online_cpus() > 1) {
225
+ cpumask_t mask;
226
+
227
+ cpumask_copy(&mask, cpu_online_mask);
228
+ cpumask_clear_cpu(smp_processor_id(), &mask);
229
+
230
+ if (system_state <= SYSTEM_RUNNING)
231
+ pr_crit("SMP: stopping secondary CPUs\n");
232
+ send_ipi_mask(&mask, IPI_CPU_STOP);
233
+ }
234
+
235
+ /* Wait up to one second for other CPUs to stop */
236
+ timeout = USEC_PER_SEC;
237
+ while (num_online_cpus() > 1 && timeout--)
238
+ udelay(1);
239
+
240
+ if (num_online_cpus() > 1)
241
+ pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
242
+ cpumask_pr_args(cpu_online_mask));
110243 }
111244
112245 void smp_send_reschedule(int cpu)
113246 {
114
- send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
247
+ send_ipi_single(cpu, IPI_RESCHEDULE);
115248 }
116
-
117
-/*
118
- * Performs an icache flush for the given MM context. RISC-V has no direct
119
- * mechanism for instruction cache shoot downs, so instead we send an IPI that
120
- * informs the remote harts they need to flush their local instruction caches.
121
- * To avoid pathologically slow behavior in a common case (a bunch of
122
- * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
123
- * IPIs for harts that are not currently executing a MM context and instead
124
- * schedule a deferred local instruction cache flush to be performed before
125
- * execution resumes on each hart.
126
- */
127
-void flush_icache_mm(struct mm_struct *mm, bool local)
128
-{
129
- unsigned int cpu;
130
- cpumask_t others, *mask;
131
-
132
- preempt_disable();
133
-
134
- /* Mark every hart's icache as needing a flush for this MM. */
135
- mask = &mm->context.icache_stale_mask;
136
- cpumask_setall(mask);
137
- /* Flush this hart's I$ now, and mark it as flushed. */
138
- cpu = smp_processor_id();
139
- cpumask_clear_cpu(cpu, mask);
140
- local_flush_icache_all();
141
-
142
- /*
143
- * Flush the I$ of other harts concurrently executing, and mark them as
144
- * flushed.
145
- */
146
- cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
147
- local |= cpumask_empty(&others);
148
- if (mm != current->active_mm || !local)
149
- sbi_remote_fence_i(others.bits);
150
- else {
151
- /*
152
- * It's assumed that at least one strongly ordered operation is
153
- * performed on this hart between setting a hart's cpumask bit
154
- * and scheduling this MM context on that hart. Sending an SBI
155
- * remote message will do this, but in the case where no
156
- * messages are sent we still need to order this hart's writes
157
- * with flush_icache_deferred().
158
- */
159
- smp_mb();
160
- }
161
-
162
- preempt_enable();
163
-}
249
+EXPORT_SYMBOL_GPL(smp_send_reschedule);