From 244b2c5ca8b14627e4a17755e5922221e121c771 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 09 Oct 2024 06:15:07 +0000
Subject: [PATCH] change system file
---
kernel/kernel/stop_machine.c | 136 ++++++++++++++++++++++++++-------------------
1 files changed, 78 insertions(+), 58 deletions(-)
diff --git a/kernel/kernel/stop_machine.c b/kernel/kernel/stop_machine.c
index 2d15c0d..c65cfb7 100644
--- a/kernel/kernel/stop_machine.c
+++ b/kernel/kernel/stop_machine.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* kernel/stop_machine.c
*
@@ -5,9 +6,8 @@
* Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
* Copyright (C) 2010 SUSE Linux Products GmbH
* Copyright (C) 2010 Tejun Heo <tj@kernel.org>
- *
- * This file is released under the GPLv2 and any later version.
*/
+#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/cpu.h>
#include <linux/init.h>
@@ -22,16 +22,7 @@
#include <linux/atomic.h>
#include <linux/nmi.h>
#include <linux/sched/wake_q.h>
-
-/*
- * Structure to determine completion condition and record errors. May
- * be shared by works on different cpus.
- */
-struct cpu_stop_done {
- atomic_t nr_todo; /* nr left to execute */
- int ret; /* collected return value */
- struct completion completion; /* fired if nr_todo reaches 0 */
-};
+#include <linux/slab.h>
/* the actual stopper, one per every possible cpu, enabled on online cpus */
struct cpu_stopper {
@@ -86,11 +77,8 @@
enabled = stopper->enabled;
if (enabled)
__cpu_stop_queue_work(stopper, work, &wakeq);
- else {
- work->disabled = true;
- if (work->done)
- cpu_stop_signal_done(work->done);
- }
+ else if (work->done)
+ cpu_stop_signal_done(work->done);
raw_spin_unlock_irqrestore(&stopper->lock, flags);
wake_up_q(&wakeq);
@@ -171,7 +159,7 @@
/* Reset ack counter. */
atomic_set(&msdata->thread_ack, msdata->num_threads);
smp_wmb();
- msdata->state = newstate;
+ WRITE_ONCE(msdata->state, newstate);
}
/* Last one to ack a state moves to the next state. */
@@ -181,12 +169,18 @@
set_state(msdata, msdata->state + 1);
}
+notrace void __weak stop_machine_yield(const struct cpumask *cpumask)
+{
+ cpu_relax();
+}
+
/* This is the cpu_stop function which stops the CPU. */
static int multi_cpu_stop(void *data)
{
struct multi_stop_data *msdata = data;
- enum multi_stop_state curstate = MULTI_STOP_NONE;
+ enum multi_stop_state newstate, curstate = MULTI_STOP_NONE;
int cpu = smp_processor_id(), err = 0;
+ const struct cpumask *cpumask;
unsigned long flags;
bool is_active;
@@ -196,17 +190,21 @@
*/
local_save_flags(flags);
- if (!msdata->active_cpus)
- is_active = cpu == cpumask_first(cpu_online_mask);
- else
- is_active = cpumask_test_cpu(cpu, msdata->active_cpus);
+ if (!msdata->active_cpus) {
+ cpumask = cpu_online_mask;
+ is_active = cpu == cpumask_first(cpumask);
+ } else {
+ cpumask = msdata->active_cpus;
+ is_active = cpumask_test_cpu(cpu, cpumask);
+ }
/* Simple state machine */
do {
/* Chill out and ensure we re-read multi_stop_state. */
- cpu_relax_yield();
- if (msdata->state != curstate) {
- curstate = msdata->state;
+ stop_machine_yield(cpumask);
+ newstate = READ_ONCE(msdata->state);
+ if (newstate != curstate) {
+ curstate = newstate;
switch (curstate) {
case MULTI_STOP_DISABLE_IRQ:
local_irq_disable();
@@ -228,6 +226,7 @@
*/
touch_nmi_watchdog();
}
+ rcu_momentary_dyntick_idle();
} while (curstate != MULTI_STOP_EXIT);
local_irq_restore(flags);
@@ -362,6 +361,55 @@
*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
return cpu_stop_queue_work(cpu, work_buf);
}
+EXPORT_SYMBOL_GPL(stop_one_cpu_nowait);
+
+/**
+ * stop_one_cpu_async - stop a cpu and wait for completion in a separated
+ * function: stop_wait_work()
+ * @cpu: cpu to stop
+ * @fn: function to execute
+ * @arg: argument to @fn
+ * @work_buf: pointer to cpu_stop_work structure
+ *
+ * CONTEXT:
+ * Might sleep.
+ *
+ * RETURNS:
+ * 0 if cpu_stop_work was queued successfully and @fn will be called.
+ * ENOENT if @fn(@arg) was not executed because @cpu was offline.
+ */
+int stop_one_cpu_async(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
+ struct cpu_stop_work *work_buf,
+ struct cpu_stop_done *done)
+{
+ cpu_stop_init_done(done, 1);
+
+ work_buf->done = done;
+ work_buf->fn = fn;
+ work_buf->arg = arg;
+
+ if (cpu_stop_queue_work(cpu, work_buf))
+ return 0;
+
+ work_buf->done = NULL;
+
+ return -ENOENT;
+}
+
+/**
+ * cpu_stop_work_wait - wait for a stop initiated by stop_one_cpu_async().
+ * @work_buf: pointer to cpu_stop_work structure
+ *
+ * CONTEXT:
+ * Might sleep.
+ */
+void cpu_stop_work_wait(struct cpu_stop_work *work_buf)
+{
+ struct cpu_stop_done *done = work_buf->done;
+
+ wait_for_completion(&done->completion);
+ work_buf->done = NULL;
+}
static bool queue_stop_cpus_work(const struct cpumask *cpumask,
cpu_stop_fn_t fn, void *arg,
@@ -378,6 +426,7 @@
*/
preempt_disable();
stop_cpus_in_progress = true;
+ barrier();
for_each_cpu(cpu, cpumask) {
work = &per_cpu(cpu_stopper.stop_work, cpu);
work->fn = fn;
@@ -386,6 +435,7 @@
if (cpu_stop_queue_work(cpu, work))
queued = true;
}
+ barrier();
stop_cpus_in_progress = false;
preempt_enable();
@@ -432,42 +482,12 @@
* @cpumask were offline; otherwise, 0 if all executions of @fn
* returned 0, any non zero return value if any returned non zero.
*/
-int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
+static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
{
int ret;
/* static works are used, process one request at a time */
mutex_lock(&stop_cpus_mutex);
- ret = __stop_cpus(cpumask, fn, arg);
- mutex_unlock(&stop_cpus_mutex);
- return ret;
-}
-
-/**
- * try_stop_cpus - try to stop multiple cpus
- * @cpumask: cpus to stop
- * @fn: function to execute
- * @arg: argument to @fn
- *
- * Identical to stop_cpus() except that it fails with -EAGAIN if
- * someone else is already using the facility.
- *
- * CONTEXT:
- * Might sleep.
- *
- * RETURNS:
- * -EAGAIN if someone else is already stopping cpus, -ENOENT if
- * @fn(@arg) was not executed at all because all cpus in @cpumask were
- * offline; otherwise, 0 if all executions of @fn returned 0, any non
- * zero return value if any returned non zero.
- */
-int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
-{
- int ret;
-
- /* static works are used, process one request at a time */
- if (!mutex_trylock(&stop_cpus_mutex))
- return -EAGAIN;
ret = __stop_cpus(cpumask, fn, arg);
mutex_unlock(&stop_cpus_mutex);
return ret;
@@ -516,7 +536,7 @@
}
preempt_count_dec();
WARN_ONCE(preempt_count(),
- "cpu_stop: %pf(%p) leaked preempt count\n", fn, arg);
+ "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg);
goto repeat;
}
}
--
Gitblit v1.6.2