hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/kernel/stop_machine.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * kernel/stop_machine.c
34 *
....@@ -5,9 +6,8 @@
56 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
67 * Copyright (C) 2010 SUSE Linux Products GmbH
78 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
8
- *
9
- * This file is released under the GPLv2 and any later version.
109 */
10
+#include <linux/compiler.h>
1111 #include <linux/completion.h>
1212 #include <linux/cpu.h>
1313 #include <linux/init.h>
....@@ -22,16 +22,7 @@
2222 #include <linux/atomic.h>
2323 #include <linux/nmi.h>
2424 #include <linux/sched/wake_q.h>
25
-
26
-/*
27
- * Structure to determine completion condition and record errors. May
28
- * be shared by works on different cpus.
29
- */
30
-struct cpu_stop_done {
31
- atomic_t nr_todo; /* nr left to execute */
32
- int ret; /* collected return value */
33
- struct completion completion; /* fired if nr_todo reaches 0 */
34
-};
25
+#include <linux/slab.h>
3526
3627 /* the actual stopper, one per every possible cpu, enabled on online cpus */
3728 struct cpu_stopper {
....@@ -86,11 +77,8 @@
8677 enabled = stopper->enabled;
8778 if (enabled)
8879 __cpu_stop_queue_work(stopper, work, &wakeq);
89
- else {
90
- work->disabled = true;
91
- if (work->done)
92
- cpu_stop_signal_done(work->done);
93
- }
80
+ else if (work->done)
81
+ cpu_stop_signal_done(work->done);
9482 raw_spin_unlock_irqrestore(&stopper->lock, flags);
9583
9684 wake_up_q(&wakeq);
....@@ -171,7 +159,7 @@
171159 /* Reset ack counter. */
172160 atomic_set(&msdata->thread_ack, msdata->num_threads);
173161 smp_wmb();
174
- msdata->state = newstate;
162
+ WRITE_ONCE(msdata->state, newstate);
175163 }
176164
177165 /* Last one to ack a state moves to the next state. */
....@@ -181,12 +169,18 @@
181169 set_state(msdata, msdata->state + 1);
182170 }
183171
172
+notrace void __weak stop_machine_yield(const struct cpumask *cpumask)
173
+{
174
+ cpu_relax();
175
+}
176
+
184177 /* This is the cpu_stop function which stops the CPU. */
185178 static int multi_cpu_stop(void *data)
186179 {
187180 struct multi_stop_data *msdata = data;
188
- enum multi_stop_state curstate = MULTI_STOP_NONE;
181
+ enum multi_stop_state newstate, curstate = MULTI_STOP_NONE;
189182 int cpu = smp_processor_id(), err = 0;
183
+ const struct cpumask *cpumask;
190184 unsigned long flags;
191185 bool is_active;
192186
....@@ -196,17 +190,21 @@
196190 */
197191 local_save_flags(flags);
198192
199
- if (!msdata->active_cpus)
200
- is_active = cpu == cpumask_first(cpu_online_mask);
201
- else
202
- is_active = cpumask_test_cpu(cpu, msdata->active_cpus);
193
+ if (!msdata->active_cpus) {
194
+ cpumask = cpu_online_mask;
195
+ is_active = cpu == cpumask_first(cpumask);
196
+ } else {
197
+ cpumask = msdata->active_cpus;
198
+ is_active = cpumask_test_cpu(cpu, cpumask);
199
+ }
203200
204201 /* Simple state machine */
205202 do {
206203 /* Chill out and ensure we re-read multi_stop_state. */
207
- cpu_relax_yield();
208
- if (msdata->state != curstate) {
209
- curstate = msdata->state;
204
+ stop_machine_yield(cpumask);
205
+ newstate = READ_ONCE(msdata->state);
206
+ if (newstate != curstate) {
207
+ curstate = newstate;
210208 switch (curstate) {
211209 case MULTI_STOP_DISABLE_IRQ:
212210 local_irq_disable();
....@@ -228,6 +226,7 @@
228226 */
229227 touch_nmi_watchdog();
230228 }
229
+ rcu_momentary_dyntick_idle();
231230 } while (curstate != MULTI_STOP_EXIT);
232231
233232 local_irq_restore(flags);
....@@ -362,6 +361,55 @@
362361 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
363362 return cpu_stop_queue_work(cpu, work_buf);
364363 }
364
+EXPORT_SYMBOL_GPL(stop_one_cpu_nowait);
365
+
366
+/**
367
+ * stop_one_cpu_async - stop a cpu and wait for completion in a separated
368
+ * function: stop_wait_work()
369
+ * @cpu: cpu to stop
370
+ * @fn: function to execute
371
+ * @arg: argument to @fn
372
+ * @work_buf: pointer to cpu_stop_work structure
373
+ *
374
+ * CONTEXT:
375
+ * Might sleep.
376
+ *
377
+ * RETURNS:
378
+ * 0 if cpu_stop_work was queued successfully and @fn will be called.
379
+ * ENOENT if @fn(@arg) was not executed because @cpu was offline.
380
+ */
381
+int stop_one_cpu_async(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
382
+ struct cpu_stop_work *work_buf,
383
+ struct cpu_stop_done *done)
384
+{
385
+ cpu_stop_init_done(done, 1);
386
+
387
+ work_buf->done = done;
388
+ work_buf->fn = fn;
389
+ work_buf->arg = arg;
390
+
391
+ if (cpu_stop_queue_work(cpu, work_buf))
392
+ return 0;
393
+
394
+ work_buf->done = NULL;
395
+
396
+ return -ENOENT;
397
+}
398
+
399
+/**
400
+ * cpu_stop_work_wait - wait for a stop initiated by stop_one_cpu_async().
401
+ * @work_buf: pointer to cpu_stop_work structure
402
+ *
403
+ * CONTEXT:
404
+ * Might sleep.
405
+ */
406
+void cpu_stop_work_wait(struct cpu_stop_work *work_buf)
407
+{
408
+ struct cpu_stop_done *done = work_buf->done;
409
+
410
+ wait_for_completion(&done->completion);
411
+ work_buf->done = NULL;
412
+}
365413
366414 static bool queue_stop_cpus_work(const struct cpumask *cpumask,
367415 cpu_stop_fn_t fn, void *arg,
....@@ -378,6 +426,7 @@
378426 */
379427 preempt_disable();
380428 stop_cpus_in_progress = true;
429
+ barrier();
381430 for_each_cpu(cpu, cpumask) {
382431 work = &per_cpu(cpu_stopper.stop_work, cpu);
383432 work->fn = fn;
....@@ -386,6 +435,7 @@
386435 if (cpu_stop_queue_work(cpu, work))
387436 queued = true;
388437 }
438
+ barrier();
389439 stop_cpus_in_progress = false;
390440 preempt_enable();
391441
....@@ -432,42 +482,12 @@
432482 * @cpumask were offline; otherwise, 0 if all executions of @fn
433483 * returned 0, any non zero return value if any returned non zero.
434484 */
435
-int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
485
+static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
436486 {
437487 int ret;
438488
439489 /* static works are used, process one request at a time */
440490 mutex_lock(&stop_cpus_mutex);
441
- ret = __stop_cpus(cpumask, fn, arg);
442
- mutex_unlock(&stop_cpus_mutex);
443
- return ret;
444
-}
445
-
446
-/**
447
- * try_stop_cpus - try to stop multiple cpus
448
- * @cpumask: cpus to stop
449
- * @fn: function to execute
450
- * @arg: argument to @fn
451
- *
452
- * Identical to stop_cpus() except that it fails with -EAGAIN if
453
- * someone else is already using the facility.
454
- *
455
- * CONTEXT:
456
- * Might sleep.
457
- *
458
- * RETURNS:
459
- * -EAGAIN if someone else is already stopping cpus, -ENOENT if
460
- * @fn(@arg) was not executed at all because all cpus in @cpumask were
461
- * offline; otherwise, 0 if all executions of @fn returned 0, any non
462
- * zero return value if any returned non zero.
463
- */
464
-int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
465
-{
466
- int ret;
467
-
468
- /* static works are used, process one request at a time */
469
- if (!mutex_trylock(&stop_cpus_mutex))
470
- return -EAGAIN;
471491 ret = __stop_cpus(cpumask, fn, arg);
472492 mutex_unlock(&stop_cpus_mutex);
473493 return ret;
....@@ -516,7 +536,7 @@
516536 }
517537 preempt_count_dec();
518538 WARN_ONCE(preempt_count(),
519
- "cpu_stop: %pf(%p) leaked preempt count\n", fn, arg);
539
+ "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg);
520540 goto repeat;
521541 }
522542 }