.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * kernel/stop_machine.c |
---|
3 | 4 | * |
---|
.. | .. |
---|
5 | 6 | * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au |
---|
6 | 7 | * Copyright (C) 2010 SUSE Linux Products GmbH |
---|
7 | 8 | * Copyright (C) 2010 Tejun Heo <tj@kernel.org> |
---|
8 | | - * |
---|
9 | | - * This file is released under the GPLv2 and any later version. |
---|
10 | 9 | */ |
---|
| 10 | +#include <linux/compiler.h> |
---|
11 | 11 | #include <linux/completion.h> |
---|
12 | 12 | #include <linux/cpu.h> |
---|
13 | 13 | #include <linux/init.h> |
---|
.. | .. |
---|
22 | 22 | #include <linux/atomic.h> |
---|
23 | 23 | #include <linux/nmi.h> |
---|
24 | 24 | #include <linux/sched/wake_q.h> |
---|
25 | | - |
---|
26 | | -/* |
---|
27 | | - * Structure to determine completion condition and record errors. May |
---|
28 | | - * be shared by works on different cpus. |
---|
29 | | - */ |
---|
30 | | -struct cpu_stop_done { |
---|
31 | | - atomic_t nr_todo; /* nr left to execute */ |
---|
32 | | - int ret; /* collected return value */ |
---|
33 | | - struct completion completion; /* fired if nr_todo reaches 0 */ |
---|
34 | | -}; |
---|
| 25 | +#include <linux/slab.h> |
---|
35 | 26 | |
---|
36 | 27 | /* the actual stopper, one per every possible cpu, enabled on online cpus */ |
---|
37 | 28 | struct cpu_stopper { |
---|
.. | .. |
---|
86 | 77 | enabled = stopper->enabled; |
---|
87 | 78 | if (enabled) |
---|
88 | 79 | __cpu_stop_queue_work(stopper, work, &wakeq); |
---|
89 | | - else { |
---|
90 | | - work->disabled = true; |
---|
91 | | - if (work->done) |
---|
92 | | - cpu_stop_signal_done(work->done); |
---|
93 | | - } |
---|
| 80 | + else if (work->done) |
---|
| 81 | + cpu_stop_signal_done(work->done); |
---|
94 | 82 | raw_spin_unlock_irqrestore(&stopper->lock, flags); |
---|
95 | 83 | |
---|
96 | 84 | wake_up_q(&wakeq); |
---|
.. | .. |
---|
171 | 159 | /* Reset ack counter. */ |
---|
172 | 160 | atomic_set(&msdata->thread_ack, msdata->num_threads); |
---|
173 | 161 | smp_wmb(); |
---|
174 | | - msdata->state = newstate; |
---|
| 162 | + WRITE_ONCE(msdata->state, newstate); |
---|
175 | 163 | } |
---|
176 | 164 | |
---|
177 | 165 | /* Last one to ack a state moves to the next state. */ |
---|
.. | .. |
---|
181 | 169 | set_state(msdata, msdata->state + 1); |
---|
182 | 170 | } |
---|
183 | 171 | |
---|
| 172 | +notrace void __weak stop_machine_yield(const struct cpumask *cpumask) |
---|
| 173 | +{ |
---|
| 174 | + cpu_relax(); |
---|
| 175 | +} |
---|
| 176 | + |
---|
184 | 177 | /* This is the cpu_stop function which stops the CPU. */ |
---|
185 | 178 | static int multi_cpu_stop(void *data) |
---|
186 | 179 | { |
---|
187 | 180 | struct multi_stop_data *msdata = data; |
---|
188 | | - enum multi_stop_state curstate = MULTI_STOP_NONE; |
---|
| 181 | + enum multi_stop_state newstate, curstate = MULTI_STOP_NONE; |
---|
189 | 182 | int cpu = smp_processor_id(), err = 0; |
---|
| 183 | + const struct cpumask *cpumask; |
---|
190 | 184 | unsigned long flags; |
---|
191 | 185 | bool is_active; |
---|
192 | 186 | |
---|
.. | .. |
---|
196 | 190 | */ |
---|
197 | 191 | local_save_flags(flags); |
---|
198 | 192 | |
---|
199 | | - if (!msdata->active_cpus) |
---|
200 | | - is_active = cpu == cpumask_first(cpu_online_mask); |
---|
201 | | - else |
---|
202 | | - is_active = cpumask_test_cpu(cpu, msdata->active_cpus); |
---|
| 193 | + if (!msdata->active_cpus) { |
---|
| 194 | + cpumask = cpu_online_mask; |
---|
| 195 | + is_active = cpu == cpumask_first(cpumask); |
---|
| 196 | + } else { |
---|
| 197 | + cpumask = msdata->active_cpus; |
---|
| 198 | + is_active = cpumask_test_cpu(cpu, cpumask); |
---|
| 199 | + } |
---|
203 | 200 | |
---|
204 | 201 | /* Simple state machine */ |
---|
205 | 202 | do { |
---|
206 | 203 | /* Chill out and ensure we re-read multi_stop_state. */ |
---|
207 | | - cpu_relax_yield(); |
---|
208 | | - if (msdata->state != curstate) { |
---|
209 | | - curstate = msdata->state; |
---|
| 204 | + stop_machine_yield(cpumask); |
---|
| 205 | + newstate = READ_ONCE(msdata->state); |
---|
| 206 | + if (newstate != curstate) { |
---|
| 207 | + curstate = newstate; |
---|
210 | 208 | switch (curstate) { |
---|
211 | 209 | case MULTI_STOP_DISABLE_IRQ: |
---|
212 | 210 | local_irq_disable(); |
---|
.. | .. |
---|
228 | 226 | */ |
---|
229 | 227 | touch_nmi_watchdog(); |
---|
230 | 228 | } |
---|
| 229 | + rcu_momentary_dyntick_idle(); |
---|
231 | 230 | } while (curstate != MULTI_STOP_EXIT); |
---|
232 | 231 | |
---|
233 | 232 | local_irq_restore(flags); |
---|
.. | .. |
---|
362 | 361 | *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, }; |
---|
363 | 362 | return cpu_stop_queue_work(cpu, work_buf); |
---|
364 | 363 | } |
---|
| 364 | +EXPORT_SYMBOL_GPL(stop_one_cpu_nowait); |
---|
| 365 | + |
---|
| 366 | +/** |
---|
| 367 | + * stop_one_cpu_async - stop a cpu and wait for completion in a separated |
---|
| 368 | + * function: stop_wait_work() |
---|
| 369 | + * @cpu: cpu to stop |
---|
| 370 | + * @fn: function to execute |
---|
| 371 | + * @arg: argument to @fn |
---|
| 372 | + * @work_buf: pointer to cpu_stop_work structure |
---|
| 373 | + * |
---|
| 374 | + * CONTEXT: |
---|
| 375 | + * Might sleep. |
---|
| 376 | + * |
---|
| 377 | + * RETURNS: |
---|
| 378 | + * 0 if cpu_stop_work was queued successfully and @fn will be called. |
---|
| 379 | + * ENOENT if @fn(@arg) was not executed because @cpu was offline. |
---|
| 380 | + */ |
---|
| 381 | +int stop_one_cpu_async(unsigned int cpu, cpu_stop_fn_t fn, void *arg, |
---|
| 382 | + struct cpu_stop_work *work_buf, |
---|
| 383 | + struct cpu_stop_done *done) |
---|
| 384 | +{ |
---|
| 385 | + cpu_stop_init_done(done, 1); |
---|
| 386 | + |
---|
| 387 | + work_buf->done = done; |
---|
| 388 | + work_buf->fn = fn; |
---|
| 389 | + work_buf->arg = arg; |
---|
| 390 | + |
---|
| 391 | + if (cpu_stop_queue_work(cpu, work_buf)) |
---|
| 392 | + return 0; |
---|
| 393 | + |
---|
| 394 | + work_buf->done = NULL; |
---|
| 395 | + |
---|
| 396 | + return -ENOENT; |
---|
| 397 | +} |
---|
| 398 | + |
---|
| 399 | +/** |
---|
| 400 | + * cpu_stop_work_wait - wait for a stop initiated by stop_one_cpu_async(). |
---|
| 401 | + * @work_buf: pointer to cpu_stop_work structure |
---|
| 402 | + * |
---|
| 403 | + * CONTEXT: |
---|
| 404 | + * Might sleep. |
---|
| 405 | + */ |
---|
| 406 | +void cpu_stop_work_wait(struct cpu_stop_work *work_buf) |
---|
| 407 | +{ |
---|
| 408 | + struct cpu_stop_done *done = work_buf->done; |
---|
| 409 | + |
---|
| 410 | + wait_for_completion(&done->completion); |
---|
| 411 | + work_buf->done = NULL; |
---|
| 412 | +} |
---|
365 | 413 | |
---|
366 | 414 | static bool queue_stop_cpus_work(const struct cpumask *cpumask, |
---|
367 | 415 | cpu_stop_fn_t fn, void *arg, |
---|
.. | .. |
---|
378 | 426 | */ |
---|
379 | 427 | preempt_disable(); |
---|
380 | 428 | stop_cpus_in_progress = true; |
---|
| 429 | + barrier(); |
---|
381 | 430 | for_each_cpu(cpu, cpumask) { |
---|
382 | 431 | work = &per_cpu(cpu_stopper.stop_work, cpu); |
---|
383 | 432 | work->fn = fn; |
---|
.. | .. |
---|
386 | 435 | if (cpu_stop_queue_work(cpu, work)) |
---|
387 | 436 | queued = true; |
---|
388 | 437 | } |
---|
| 438 | + barrier(); |
---|
389 | 439 | stop_cpus_in_progress = false; |
---|
390 | 440 | preempt_enable(); |
---|
391 | 441 | |
---|
.. | .. |
---|
432 | 482 | * @cpumask were offline; otherwise, 0 if all executions of @fn |
---|
433 | 483 | * returned 0, any non zero return value if any returned non zero. |
---|
434 | 484 | */ |
---|
435 | | -int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) |
---|
| 485 | +static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) |
---|
436 | 486 | { |
---|
437 | 487 | int ret; |
---|
438 | 488 | |
---|
439 | 489 | /* static works are used, process one request at a time */ |
---|
440 | 490 | mutex_lock(&stop_cpus_mutex); |
---|
441 | | - ret = __stop_cpus(cpumask, fn, arg); |
---|
442 | | - mutex_unlock(&stop_cpus_mutex); |
---|
443 | | - return ret; |
---|
444 | | -} |
---|
445 | | - |
---|
446 | | -/** |
---|
447 | | - * try_stop_cpus - try to stop multiple cpus |
---|
448 | | - * @cpumask: cpus to stop |
---|
449 | | - * @fn: function to execute |
---|
450 | | - * @arg: argument to @fn |
---|
451 | | - * |
---|
452 | | - * Identical to stop_cpus() except that it fails with -EAGAIN if |
---|
453 | | - * someone else is already using the facility. |
---|
454 | | - * |
---|
455 | | - * CONTEXT: |
---|
456 | | - * Might sleep. |
---|
457 | | - * |
---|
458 | | - * RETURNS: |
---|
459 | | - * -EAGAIN if someone else is already stopping cpus, -ENOENT if |
---|
460 | | - * @fn(@arg) was not executed at all because all cpus in @cpumask were |
---|
461 | | - * offline; otherwise, 0 if all executions of @fn returned 0, any non |
---|
462 | | - * zero return value if any returned non zero. |
---|
463 | | - */ |
---|
464 | | -int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) |
---|
465 | | -{ |
---|
466 | | - int ret; |
---|
467 | | - |
---|
468 | | - /* static works are used, process one request at a time */ |
---|
469 | | - if (!mutex_trylock(&stop_cpus_mutex)) |
---|
470 | | - return -EAGAIN; |
---|
471 | 491 | ret = __stop_cpus(cpumask, fn, arg); |
---|
472 | 492 | mutex_unlock(&stop_cpus_mutex); |
---|
473 | 493 | return ret; |
---|
.. | .. |
---|
516 | 536 | } |
---|
517 | 537 | preempt_count_dec(); |
---|
518 | 538 | WARN_ONCE(preempt_count(), |
---|
519 | | - "cpu_stop: %pf(%p) leaked preempt count\n", fn, arg); |
---|
| 539 | + "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg); |
---|
520 | 540 | goto repeat; |
---|
521 | 541 | } |
---|
522 | 542 | } |
---|