hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/kernel/stop_machine.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * kernel/stop_machine.c
34 *
....@@ -5,9 +6,8 @@
56 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
67 * Copyright (C) 2010 SUSE Linux Products GmbH
78 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
8
- *
9
- * This file is released under the GPLv2 and any later version.
109 */
10
+#include <linux/compiler.h>
1111 #include <linux/completion.h>
1212 #include <linux/cpu.h>
1313 #include <linux/init.h>
....@@ -22,16 +22,7 @@
2222 #include <linux/atomic.h>
2323 #include <linux/nmi.h>
2424 #include <linux/sched/wake_q.h>
25
-
26
-/*
27
- * Structure to determine completion condition and record errors. May
28
- * be shared by works on different cpus.
29
- */
30
-struct cpu_stop_done {
31
- atomic_t nr_todo; /* nr left to execute */
32
- int ret; /* collected return value */
33
- struct completion completion; /* fired if nr_todo reaches 0 */
34
-};
25
+#include <linux/slab.h>
3526
3627 /* the actual stopper, one per every possible cpu, enabled on online cpus */
3728 struct cpu_stopper {
....@@ -42,10 +33,26 @@
4233 struct list_head works; /* list of pending works */
4334
4435 struct cpu_stop_work stop_work; /* for stop_cpus */
36
+ unsigned long caller;
37
+ cpu_stop_fn_t fn;
4538 };
4639
4740 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
4841 static bool stop_machine_initialized = false;
42
+
43
+void print_stop_info(const char *log_lvl, struct task_struct *task)
44
+{
45
+ /*
46
+ * If @task is a stopper task, it cannot migrate and task_cpu() is
47
+ * stable.
48
+ */
49
+ struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task));
50
+
51
+ if (task != stopper->thread)
52
+ return;
53
+
54
+ printk("%sStopper: %pS <- %pS\n", log_lvl, stopper->fn, (void *)stopper->caller);
55
+}
4956
5057 /* static data for stop_cpus */
5158 static DEFINE_MUTEX(stop_cpus_mutex);
....@@ -86,11 +93,8 @@
8693 enabled = stopper->enabled;
8794 if (enabled)
8895 __cpu_stop_queue_work(stopper, work, &wakeq);
89
- else {
90
- work->disabled = true;
91
- if (work->done)
92
- cpu_stop_signal_done(work->done);
93
- }
96
+ else if (work->done)
97
+ cpu_stop_signal_done(work->done);
9498 raw_spin_unlock_irqrestore(&stopper->lock, flags);
9599
96100 wake_up_q(&wakeq);
....@@ -126,7 +130,7 @@
126130 int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
127131 {
128132 struct cpu_stop_done done;
129
- struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
133
+ struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done, .caller = _RET_IP_ };
130134
131135 cpu_stop_init_done(&done, 1);
132136 if (!cpu_stop_queue_work(cpu, &work))
....@@ -171,7 +175,7 @@
171175 /* Reset ack counter. */
172176 atomic_set(&msdata->thread_ack, msdata->num_threads);
173177 smp_wmb();
174
- msdata->state = newstate;
178
+ WRITE_ONCE(msdata->state, newstate);
175179 }
176180
177181 /* Last one to ack a state moves to the next state. */
....@@ -181,12 +185,18 @@
181185 set_state(msdata, msdata->state + 1);
182186 }
183187
188
+notrace void __weak stop_machine_yield(const struct cpumask *cpumask)
189
+{
190
+ cpu_relax();
191
+}
192
+
184193 /* This is the cpu_stop function which stops the CPU. */
185194 static int multi_cpu_stop(void *data)
186195 {
187196 struct multi_stop_data *msdata = data;
188
- enum multi_stop_state curstate = MULTI_STOP_NONE;
197
+ enum multi_stop_state newstate, curstate = MULTI_STOP_NONE;
189198 int cpu = smp_processor_id(), err = 0;
199
+ const struct cpumask *cpumask;
190200 unsigned long flags;
191201 bool is_active;
192202
....@@ -196,17 +206,21 @@
196206 */
197207 local_save_flags(flags);
198208
199
- if (!msdata->active_cpus)
200
- is_active = cpu == cpumask_first(cpu_online_mask);
201
- else
202
- is_active = cpumask_test_cpu(cpu, msdata->active_cpus);
209
+ if (!msdata->active_cpus) {
210
+ cpumask = cpu_online_mask;
211
+ is_active = cpu == cpumask_first(cpumask);
212
+ } else {
213
+ cpumask = msdata->active_cpus;
214
+ is_active = cpumask_test_cpu(cpu, cpumask);
215
+ }
203216
204217 /* Simple state machine */
205218 do {
206219 /* Chill out and ensure we re-read multi_stop_state. */
207
- cpu_relax_yield();
208
- if (msdata->state != curstate) {
209
- curstate = msdata->state;
220
+ stop_machine_yield(cpumask);
221
+ newstate = READ_ONCE(msdata->state);
222
+ if (newstate != curstate) {
223
+ curstate = newstate;
210224 switch (curstate) {
211225 case MULTI_STOP_DISABLE_IRQ:
212226 local_irq_disable();
....@@ -228,6 +242,7 @@
228242 */
229243 touch_nmi_watchdog();
230244 }
245
+ rcu_momentary_dyntick_idle();
231246 } while (curstate != MULTI_STOP_EXIT);
232247
233248 local_irq_restore(flags);
....@@ -323,7 +338,8 @@
323338 work1 = work2 = (struct cpu_stop_work){
324339 .fn = multi_cpu_stop,
325340 .arg = &msdata,
326
- .done = &done
341
+ .done = &done,
342
+ .caller = _RET_IP_,
327343 };
328344
329345 cpu_stop_init_done(&done, 2);
....@@ -359,8 +375,57 @@
359375 bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
360376 struct cpu_stop_work *work_buf)
361377 {
362
- *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
378
+ *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, .caller = _RET_IP_, };
363379 return cpu_stop_queue_work(cpu, work_buf);
380
+}
381
+EXPORT_SYMBOL_GPL(stop_one_cpu_nowait);
382
+
383
+/**
384
+ * stop_one_cpu_async - stop a cpu and wait for completion in a separated
385
+ * function: stop_wait_work()
386
+ * @cpu: cpu to stop
387
+ * @fn: function to execute
388
+ * @arg: argument to @fn
389
+ * @work_buf: pointer to cpu_stop_work structure
390
+ *
391
+ * CONTEXT:
392
+ * Might sleep.
393
+ *
394
+ * RETURNS:
395
+ * 0 if cpu_stop_work was queued successfully and @fn will be called.
396
+ * ENOENT if @fn(@arg) was not executed because @cpu was offline.
397
+ */
398
+int stop_one_cpu_async(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
399
+ struct cpu_stop_work *work_buf,
400
+ struct cpu_stop_done *done)
401
+{
402
+ cpu_stop_init_done(done, 1);
403
+
404
+ work_buf->done = done;
405
+ work_buf->fn = fn;
406
+ work_buf->arg = arg;
407
+
408
+ if (cpu_stop_queue_work(cpu, work_buf))
409
+ return 0;
410
+
411
+ work_buf->done = NULL;
412
+
413
+ return -ENOENT;
414
+}
415
+
416
+/**
417
+ * cpu_stop_work_wait - wait for a stop initiated by stop_one_cpu_async().
418
+ * @work_buf: pointer to cpu_stop_work structure
419
+ *
420
+ * CONTEXT:
421
+ * Might sleep.
422
+ */
423
+void cpu_stop_work_wait(struct cpu_stop_work *work_buf)
424
+{
425
+ struct cpu_stop_done *done = work_buf->done;
426
+
427
+ wait_for_completion(&done->completion);
428
+ work_buf->done = NULL;
364429 }
365430
366431 static bool queue_stop_cpus_work(const struct cpumask *cpumask,
....@@ -378,6 +443,7 @@
378443 */
379444 preempt_disable();
380445 stop_cpus_in_progress = true;
446
+ barrier();
381447 for_each_cpu(cpu, cpumask) {
382448 work = &per_cpu(cpu_stopper.stop_work, cpu);
383449 work->fn = fn;
....@@ -386,6 +452,7 @@
386452 if (cpu_stop_queue_work(cpu, work))
387453 queued = true;
388454 }
455
+ barrier();
389456 stop_cpus_in_progress = false;
390457 preempt_enable();
391458
....@@ -432,42 +499,12 @@
432499 * @cpumask were offline; otherwise, 0 if all executions of @fn
433500 * returned 0, any non zero return value if any returned non zero.
434501 */
435
-int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
502
+static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
436503 {
437504 int ret;
438505
439506 /* static works are used, process one request at a time */
440507 mutex_lock(&stop_cpus_mutex);
441
- ret = __stop_cpus(cpumask, fn, arg);
442
- mutex_unlock(&stop_cpus_mutex);
443
- return ret;
444
-}
445
-
446
-/**
447
- * try_stop_cpus - try to stop multiple cpus
448
- * @cpumask: cpus to stop
449
- * @fn: function to execute
450
- * @arg: argument to @fn
451
- *
452
- * Identical to stop_cpus() except that it fails with -EAGAIN if
453
- * someone else is already using the facility.
454
- *
455
- * CONTEXT:
456
- * Might sleep.
457
- *
458
- * RETURNS:
459
- * -EAGAIN if someone else is already stopping cpus, -ENOENT if
460
- * @fn(@arg) was not executed at all because all cpus in @cpumask were
461
- * offline; otherwise, 0 if all executions of @fn returned 0, any non
462
- * zero return value if any returned non zero.
463
- */
464
-int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
465
-{
466
- int ret;
467
-
468
- /* static works are used, process one request at a time */
469
- if (!mutex_trylock(&stop_cpus_mutex))
470
- return -EAGAIN;
471508 ret = __stop_cpus(cpumask, fn, arg);
472509 mutex_unlock(&stop_cpus_mutex);
473510 return ret;
....@@ -507,6 +544,8 @@
507544 int ret;
508545
509546 /* cpu stop callbacks must not sleep, make in_atomic() == T */
547
+ stopper->caller = work->caller;
548
+ stopper->fn = fn;
510549 preempt_count_inc();
511550 ret = fn(arg);
512551 if (done) {
....@@ -515,8 +554,10 @@
515554 cpu_stop_signal_done(done);
516555 }
517556 preempt_count_dec();
557
+ stopper->fn = NULL;
558
+ stopper->caller = 0;
518559 WARN_ONCE(preempt_count(),
519
- "cpu_stop: %pf(%p) leaked preempt count\n", fn, arg);
560
+ "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg);
520561 goto repeat;
521562 }
522563 }