hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/kernel/stop_machine.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * kernel/stop_machine.c
34 *
....@@ -5,9 +6,8 @@
56 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
67 * Copyright (C) 2010 SUSE Linux Products GmbH
78 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
8
- *
9
- * This file is released under the GPLv2 and any later version.
109 */
10
+#include <linux/compiler.h>
1111 #include <linux/completion.h>
1212 #include <linux/cpu.h>
1313 #include <linux/init.h>
....@@ -22,16 +22,7 @@
2222 #include <linux/atomic.h>
2323 #include <linux/nmi.h>
2424 #include <linux/sched/wake_q.h>
25
-
26
-/*
27
- * Structure to determine completion condition and record errors. May
28
- * be shared by works on different cpus.
29
- */
30
-struct cpu_stop_done {
31
- atomic_t nr_todo; /* nr left to execute */
32
- int ret; /* collected return value */
33
- struct completion completion; /* fired if nr_todo reaches 0 */
34
-};
25
+#include <linux/slab.h>
3526
3627 /* the actual stopper, one per every possible cpu, enabled on online cpus */
3728 struct cpu_stopper {
....@@ -168,7 +159,7 @@
168159 /* Reset ack counter. */
169160 atomic_set(&msdata->thread_ack, msdata->num_threads);
170161 smp_wmb();
171
- msdata->state = newstate;
162
+ WRITE_ONCE(msdata->state, newstate);
172163 }
173164
174165 /* Last one to ack a state moves to the next state. */
....@@ -178,12 +169,18 @@
178169 set_state(msdata, msdata->state + 1);
179170 }
180171
172
+notrace void __weak stop_machine_yield(const struct cpumask *cpumask)
173
+{
174
+ cpu_relax();
175
+}
176
+
181177 /* This is the cpu_stop function which stops the CPU. */
182178 static int multi_cpu_stop(void *data)
183179 {
184180 struct multi_stop_data *msdata = data;
185
- enum multi_stop_state curstate = MULTI_STOP_NONE;
181
+ enum multi_stop_state newstate, curstate = MULTI_STOP_NONE;
186182 int cpu = smp_processor_id(), err = 0;
183
+ const struct cpumask *cpumask;
187184 unsigned long flags;
188185 bool is_active;
189186
....@@ -193,17 +190,21 @@
193190 */
194191 local_save_flags(flags);
195192
196
- if (!msdata->active_cpus)
197
- is_active = cpu == cpumask_first(cpu_online_mask);
198
- else
199
- is_active = cpumask_test_cpu(cpu, msdata->active_cpus);
193
+ if (!msdata->active_cpus) {
194
+ cpumask = cpu_online_mask;
195
+ is_active = cpu == cpumask_first(cpumask);
196
+ } else {
197
+ cpumask = msdata->active_cpus;
198
+ is_active = cpumask_test_cpu(cpu, cpumask);
199
+ }
200200
201201 /* Simple state machine */
202202 do {
203203 /* Chill out and ensure we re-read multi_stop_state. */
204
- cpu_relax_yield();
205
- if (msdata->state != curstate) {
206
- curstate = msdata->state;
204
+ stop_machine_yield(cpumask);
205
+ newstate = READ_ONCE(msdata->state);
206
+ if (newstate != curstate) {
207
+ curstate = newstate;
207208 switch (curstate) {
208209 case MULTI_STOP_DISABLE_IRQ:
209210 local_irq_disable();
....@@ -225,6 +226,7 @@
225226 */
226227 touch_nmi_watchdog();
227228 }
229
+ rcu_momentary_dyntick_idle();
228230 } while (curstate != MULTI_STOP_EXIT);
229231
230232 local_irq_restore(flags);
....@@ -359,6 +361,55 @@
359361 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
360362 return cpu_stop_queue_work(cpu, work_buf);
361363 }
364
+EXPORT_SYMBOL_GPL(stop_one_cpu_nowait);
365
+
366
+/**
367
+ * stop_one_cpu_async - stop a cpu and wait for completion in a separated
368
+ * function: stop_wait_work()
369
+ * @cpu: cpu to stop
370
+ * @fn: function to execute
371
+ * @arg: argument to @fn
372
+ * @work_buf: pointer to cpu_stop_work structure
373
+ *
374
+ * CONTEXT:
375
+ * Might sleep.
376
+ *
377
+ * RETURNS:
378
+ * 0 if cpu_stop_work was queued successfully and @fn will be called.
379
+ * ENOENT if @fn(@arg) was not executed because @cpu was offline.
380
+ */
381
+int stop_one_cpu_async(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
382
+ struct cpu_stop_work *work_buf,
383
+ struct cpu_stop_done *done)
384
+{
385
+ cpu_stop_init_done(done, 1);
386
+
387
+ work_buf->done = done;
388
+ work_buf->fn = fn;
389
+ work_buf->arg = arg;
390
+
391
+ if (cpu_stop_queue_work(cpu, work_buf))
392
+ return 0;
393
+
394
+ work_buf->done = NULL;
395
+
396
+ return -ENOENT;
397
+}
398
+
399
+/**
400
+ * cpu_stop_work_wait - wait for a stop initiated by stop_one_cpu_async().
401
+ * @work_buf: pointer to cpu_stop_work structure
402
+ *
403
+ * CONTEXT:
404
+ * Might sleep.
405
+ */
406
+void cpu_stop_work_wait(struct cpu_stop_work *work_buf)
407
+{
408
+ struct cpu_stop_done *done = work_buf->done;
409
+
410
+ wait_for_completion(&done->completion);
411
+ work_buf->done = NULL;
412
+}
362413
363414 static bool queue_stop_cpus_work(const struct cpumask *cpumask,
364415 cpu_stop_fn_t fn, void *arg,
....@@ -375,6 +426,7 @@
375426 */
376427 preempt_disable();
377428 stop_cpus_in_progress = true;
429
+ barrier();
378430 for_each_cpu(cpu, cpumask) {
379431 work = &per_cpu(cpu_stopper.stop_work, cpu);
380432 work->fn = fn;
....@@ -383,6 +435,7 @@
383435 if (cpu_stop_queue_work(cpu, work))
384436 queued = true;
385437 }
438
+ barrier();
386439 stop_cpus_in_progress = false;
387440 preempt_enable();
388441
....@@ -429,42 +482,12 @@
429482 * @cpumask were offline; otherwise, 0 if all executions of @fn
430483 * returned 0, any non zero return value if any returned non zero.
431484 */
432
-int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
485
+static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
433486 {
434487 int ret;
435488
436489 /* static works are used, process one request at a time */
437490 mutex_lock(&stop_cpus_mutex);
438
- ret = __stop_cpus(cpumask, fn, arg);
439
- mutex_unlock(&stop_cpus_mutex);
440
- return ret;
441
-}
442
-
443
-/**
444
- * try_stop_cpus - try to stop multiple cpus
445
- * @cpumask: cpus to stop
446
- * @fn: function to execute
447
- * @arg: argument to @fn
448
- *
449
- * Identical to stop_cpus() except that it fails with -EAGAIN if
450
- * someone else is already using the facility.
451
- *
452
- * CONTEXT:
453
- * Might sleep.
454
- *
455
- * RETURNS:
456
- * -EAGAIN if someone else is already stopping cpus, -ENOENT if
457
- * @fn(@arg) was not executed at all because all cpus in @cpumask were
458
- * offline; otherwise, 0 if all executions of @fn returned 0, any non
459
- * zero return value if any returned non zero.
460
- */
461
-int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
462
-{
463
- int ret;
464
-
465
- /* static works are used, process one request at a time */
466
- if (!mutex_trylock(&stop_cpus_mutex))
467
- return -EAGAIN;
468491 ret = __stop_cpus(cpumask, fn, arg);
469492 mutex_unlock(&stop_cpus_mutex);
470493 return ret;
....@@ -513,7 +536,7 @@
513536 }
514537 preempt_count_dec();
515538 WARN_ONCE(preempt_count(),
516
- "cpu_stop: %pf(%p) leaked preempt count\n", fn, arg);
539
+ "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg);
517540 goto repeat;
518541 }
519542 }