From d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Mon, 11 Dec 2023 02:45:28 +0000 Subject: [PATCH] add boot partition size --- kernel/kernel/stop_machine.c | 156 +++++++++++++++++++++++++++++++++------------------ 1 files changed, 100 insertions(+), 56 deletions(-) diff --git a/kernel/kernel/stop_machine.c b/kernel/kernel/stop_machine.c index 067cb83..30395a6 100644 --- a/kernel/kernel/stop_machine.c +++ b/kernel/kernel/stop_machine.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* * kernel/stop_machine.c * @@ -5,9 +6,8 @@ * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au * Copyright (C) 2010 SUSE Linux Products GmbH * Copyright (C) 2010 Tejun Heo <tj@kernel.org> - * - * This file is released under the GPLv2 and any later version. */ +#include <linux/compiler.h> #include <linux/completion.h> #include <linux/cpu.h> #include <linux/init.h> @@ -22,16 +22,7 @@ #include <linux/atomic.h> #include <linux/nmi.h> #include <linux/sched/wake_q.h> - -/* - * Structure to determine completion condition and record errors. May - * be shared by works on different cpus. - */ -struct cpu_stop_done { - atomic_t nr_todo; /* nr left to execute */ - int ret; /* collected return value */ - struct completion completion; /* fired if nr_todo reaches 0 */ -}; +#include <linux/slab.h> /* the actual stopper, one per every possible cpu, enabled on online cpus */ struct cpu_stopper { @@ -42,10 +33,26 @@ struct list_head works; /* list of pending works */ struct cpu_stop_work stop_work; /* for stop_cpus */ + unsigned long caller; + cpu_stop_fn_t fn; }; static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); static bool stop_machine_initialized = false; + +void print_stop_info(const char *log_lvl, struct task_struct *task) +{ + /* + * If @task is a stopper task, it cannot migrate and task_cpu() is + * stable. + */ + struct cpu_stopper *stopper = per_cpu_ptr(&cpu_stopper, task_cpu(task)); + + if (task != stopper->thread) + return; + + printk("%sStopper: %pS <- %pS\n", log_lvl, stopper->fn, (void *)stopper->caller); +} /* static data for stop_cpus */ static DEFINE_MUTEX(stop_cpus_mutex); @@ -123,7 +130,7 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) { struct cpu_stop_done done; - struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; + struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done, .caller = _RET_IP_ }; cpu_stop_init_done(&done, 1); if (!cpu_stop_queue_work(cpu, &work)) @@ -168,7 +175,7 @@ /* Reset ack counter. */ atomic_set(&msdata->thread_ack, msdata->num_threads); smp_wmb(); - msdata->state = newstate; + WRITE_ONCE(msdata->state, newstate); } /* Last one to ack a state moves to the next state. */ @@ -178,12 +185,18 @@ set_state(msdata, msdata->state + 1); } +notrace void __weak stop_machine_yield(const struct cpumask *cpumask) +{ + cpu_relax(); +} + /* This is the cpu_stop function which stops the CPU. */ static int multi_cpu_stop(void *data) { struct multi_stop_data *msdata = data; - enum multi_stop_state curstate = MULTI_STOP_NONE; + enum multi_stop_state newstate, curstate = MULTI_STOP_NONE; int cpu = smp_processor_id(), err = 0; + const struct cpumask *cpumask; unsigned long flags; bool is_active; @@ -193,17 +206,21 @@ */ local_save_flags(flags); - if (!msdata->active_cpus) - is_active = cpu == cpumask_first(cpu_online_mask); - else - is_active = cpumask_test_cpu(cpu, msdata->active_cpus); + if (!msdata->active_cpus) { + cpumask = cpu_online_mask; + is_active = cpu == cpumask_first(cpumask); + } else { + cpumask = msdata->active_cpus; + is_active = cpumask_test_cpu(cpu, cpumask); + } /* Simple state machine */ do { /* Chill out and ensure we re-read multi_stop_state. */ - cpu_relax_yield(); - if (msdata->state != curstate) { - curstate = msdata->state; + stop_machine_yield(cpumask); + newstate = READ_ONCE(msdata->state); + if (newstate != curstate) { + curstate = newstate; switch (curstate) { case MULTI_STOP_DISABLE_IRQ: local_irq_disable(); @@ -225,6 +242,7 @@ */ touch_nmi_watchdog(); } + rcu_momentary_dyntick_idle(); } while (curstate != MULTI_STOP_EXIT); local_irq_restore(flags); @@ -320,7 +338,8 @@ work1 = work2 = (struct cpu_stop_work){ .fn = multi_cpu_stop, .arg = &msdata, - .done = &done + .done = &done, + .caller = _RET_IP_, }; cpu_stop_init_done(&done, 2); @@ -356,8 +375,57 @@ bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, struct cpu_stop_work *work_buf) { - *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, }; + *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, .caller = _RET_IP_, }; return cpu_stop_queue_work(cpu, work_buf); +} +EXPORT_SYMBOL_GPL(stop_one_cpu_nowait); + +/** + * stop_one_cpu_async - stop a cpu and wait for completion in a separated + * function: stop_wait_work() + * @cpu: cpu to stop + * @fn: function to execute + * @arg: argument to @fn + * @work_buf: pointer to cpu_stop_work structure + * + * CONTEXT: + * Might sleep. + * + * RETURNS: + * 0 if cpu_stop_work was queued successfully and @fn will be called. + * ENOENT if @fn(@arg) was not executed because @cpu was offline. + */ +int stop_one_cpu_async(unsigned int cpu, cpu_stop_fn_t fn, void *arg, + struct cpu_stop_work *work_buf, + struct cpu_stop_done *done) +{ + cpu_stop_init_done(done, 1); + + work_buf->done = done; + work_buf->fn = fn; + work_buf->arg = arg; + + if (cpu_stop_queue_work(cpu, work_buf)) + return 0; + + work_buf->done = NULL; + + return -ENOENT; +} + +/** + * cpu_stop_work_wait - wait for a stop initiated by stop_one_cpu_async(). + * @work_buf: pointer to cpu_stop_work structure + * + * CONTEXT: + * Might sleep. + */ +void cpu_stop_work_wait(struct cpu_stop_work *work_buf) +{ + struct cpu_stop_done *done = work_buf->done; + + wait_for_completion(&done->completion); + work_buf->done = NULL; } static bool queue_stop_cpus_work(const struct cpumask *cpumask, @@ -375,6 +443,7 @@ */ preempt_disable(); stop_cpus_in_progress = true; + barrier(); for_each_cpu(cpu, cpumask) { work = &per_cpu(cpu_stopper.stop_work, cpu); work->fn = fn; @@ -383,6 +452,7 @@ if (cpu_stop_queue_work(cpu, work)) queued = true; } + barrier(); stop_cpus_in_progress = false; preempt_enable(); @@ -429,42 +499,12 @@ * @cpumask were offline; otherwise, 0 if all executions of @fn * returned 0, any non zero return value if any returned non zero. */ -int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) +static int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) { int ret; /* static works are used, process one request at a time */ mutex_lock(&stop_cpus_mutex); - ret = __stop_cpus(cpumask, fn, arg); - mutex_unlock(&stop_cpus_mutex); - return ret; -} - -/** - * try_stop_cpus - try to stop multiple cpus - * @cpumask: cpus to stop - * @fn: function to execute - * @arg: argument to @fn - * - * Identical to stop_cpus() except that it fails with -EAGAIN if - * someone else is already using the facility. - * - * CONTEXT: - * Might sleep. - * - * RETURNS: - * -EAGAIN if someone else is already stopping cpus, -ENOENT if - * @fn(@arg) was not executed at all because all cpus in @cpumask were - * offline; otherwise, 0 if all executions of @fn returned 0, any non - * zero return value if any returned non zero. - */ -int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) -{ - int ret; - - /* static works are used, process one request at a time */ - if (!mutex_trylock(&stop_cpus_mutex)) - return -EAGAIN; ret = __stop_cpus(cpumask, fn, arg); mutex_unlock(&stop_cpus_mutex); return ret; @@ -504,6 +544,8 @@ int ret; /* cpu stop callbacks must not sleep, make in_atomic() == T */ + stopper->caller = work->caller; + stopper->fn = fn; preempt_count_inc(); ret = fn(arg); if (done) { @@ -512,8 +554,10 @@ cpu_stop_signal_done(done); } preempt_count_dec(); + stopper->fn = NULL; + stopper->caller = 0; WARN_ONCE(preempt_count(), - "cpu_stop: %pf(%p) leaked preempt count\n", fn, arg); + "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg); goto repeat; } } -- Gitblit v1.6.2