hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/include/linux/smp.h
....@@ -12,19 +12,38 @@
1212 #include <linux/list.h>
1313 #include <linux/cpumask.h>
1414 #include <linux/init.h>
15
-#include <linux/llist.h>
15
+#include <linux/smp_types.h>
1616
1717 typedef void (*smp_call_func_t)(void *info);
18
+typedef bool (*smp_cond_func_t)(int cpu, void *info);
19
+
20
+/*
21
+ * structure shares (partial) layout with struct irq_work
22
+ */
1823 struct __call_single_data {
19
- struct llist_node llist;
24
+ union {
25
+ struct __call_single_node node;
26
+ struct {
27
+ struct llist_node llist;
28
+ unsigned int flags;
29
+#ifdef CONFIG_64BIT
30
+ u16 src, dst;
31
+#endif
32
+ };
33
+ };
2034 smp_call_func_t func;
2135 void *info;
22
- unsigned int flags;
2336 };
2437
2538 /* Use __aligned() to avoid to use 2 cache lines for 1 csd */
2639 typedef struct __call_single_data call_single_data_t
2740 __aligned(sizeof(struct __call_single_data));
41
+
42
+/*
43
+ * Enqueue a llist_node on the call_single_queue; be very careful, read
44
+ * flush_smp_call_function_queue() in detail.
45
+ */
46
+extern void __smp_call_single_queue(int cpu, struct llist_node *node);
2847
2948 /* total number of cpus in this system (may exceed NR_CPUS) */
3049 extern unsigned int total_cpus;
....@@ -35,7 +54,7 @@
3554 /*
3655 * Call a function on all processors
3756 */
38
-int on_each_cpu(smp_call_func_t func, void *info, int wait);
57
+void on_each_cpu(smp_call_func_t func, void *info, int wait);
3958
4059 /*
4160 * Call a function on processors specified by mask, which might include
....@@ -49,9 +68,11 @@
4968 * cond_func returns a positive value. This may include the local
5069 * processor.
5170 */
52
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
53
- smp_call_func_t func, void *info, bool wait,
54
- gfp_t gfp_flags);
71
+void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
72
+ void *info, bool wait);
73
+
74
+void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
75
+ void *info, bool wait, const struct cpumask *mask);
5576
5677 int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
5778
....@@ -97,7 +118,7 @@
97118 /*
98119 * Call a function on all other processors
99120 */
100
-int smp_call_function(smp_call_func_t func, void *info, int wait);
121
+void smp_call_function(smp_call_func_t func, void *info, int wait);
101122 void smp_call_function_many(const struct cpumask *mask,
102123 smp_call_func_t func, void *info, bool wait);
103124
....@@ -106,6 +127,7 @@
106127
107128 void kick_all_cpus_sync(void);
108129 void wake_up_all_idle_cpus(void);
130
+void wake_up_all_online_idle_cpus(void);
109131
110132 /*
111133 * Generic and arch helpers
....@@ -140,9 +162,8 @@
140162 * These macros fold the SMP functionality into a single CPU system
141163 */
142164 #define raw_smp_processor_id() 0
143
-static inline int up_smp_call_function(smp_call_func_t func, void *info)
165
+static inline void up_smp_call_function(smp_call_func_t func, void *info)
144166 {
145
- return 0;
146167 }
147168 #define smp_call_function(func, info, wait) \
148169 (up_smp_call_function(func, info))
....@@ -162,6 +183,7 @@
162183
163184 static inline void kick_all_cpus_sync(void) { }
164185 static inline void wake_up_all_idle_cpus(void) { }
186
+static inline void wake_up_all_online_idle_cpus(void) { }
165187
166188 #ifdef CONFIG_UP_LATE_INIT
167189 extern void __init up_late_init(void);
....@@ -177,29 +199,46 @@
177199
178200 #endif /* !SMP */
179201
180
-/*
181
- * smp_processor_id(): get the current CPU ID.
202
+/**
203
+ * raw_processor_id() - get the current (unstable) CPU id
182204 *
183
- * if DEBUG_PREEMPT is enabled then we check whether it is
184
- * used in a preemption-safe way. (smp_processor_id() is safe
185
- * if it's used in a preemption-off critical section, or in
186
- * a thread that is bound to the current CPU.)
187
- *
188
- * NOTE: raw_smp_processor_id() is for internal use only
189
- * (smp_processor_id() is the preferred variant), but in rare
190
- * instances it might also be used to turn off false positives
191
- * (i.e. smp_processor_id() use that the debugging code reports but
192
- * which use for some reason is legal). Don't use this to hack around
193
- * the warning message, as your code might not work under PREEMPT.
205
+ * For then you know what you are doing and need an unstable
206
+ * CPU id.
194207 */
208
+
209
+/**
210
+ * smp_processor_id() - get the current (stable) CPU id
211
+ *
212
+ * This is the normal accessor to the CPU id and should be used
213
+ * whenever possible.
214
+ *
215
+ * The CPU id is stable when:
216
+ *
217
+ * - IRQs are disabled;
218
+ * - preemption is disabled;
219
+ * - the task is CPU affine.
220
+ *
221
+ * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN
222
+ * when smp_processor_id() is used when the CPU id is not stable.
223
+ */
224
+
225
+/*
226
+ * Allow the architecture to differentiate between a stable and unstable read.
227
+ * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a
228
+ * regular asm read for the stable.
229
+ */
230
+#ifndef __smp_processor_id
231
+#define __smp_processor_id(x) raw_smp_processor_id(x)
232
+#endif
233
+
195234 #ifdef CONFIG_DEBUG_PREEMPT
196235 extern unsigned int debug_smp_processor_id(void);
197236 # define smp_processor_id() debug_smp_processor_id()
198237 #else
199
-# define smp_processor_id() raw_smp_processor_id()
238
+# define smp_processor_id() __smp_processor_id()
200239 #endif
201240
202
-#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
241
+#define get_cpu() ({ preempt_disable(); __smp_processor_id(); })
203242 #define put_cpu() preempt_enable()
204243
205244 /*
....@@ -208,8 +247,8 @@
208247 */
209248 extern void arch_disable_smp_support(void);
210249
211
-extern void arch_enable_nonboot_cpus_begin(void);
212
-extern void arch_enable_nonboot_cpus_end(void);
250
+extern void arch_thaw_secondary_cpus_begin(void);
251
+extern void arch_thaw_secondary_cpus_end(void);
213252
214253 void smp_setup_processor_id(void);
215254