From 10ebd8556b7990499c896a550e3d416b444211e6 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Fri, 10 May 2024 02:23:07 +0000 Subject: [PATCH] add led --- kernel/arch/arm/kernel/smp.c | 207 +++++++++++++++++++++++++++++++-------------------- 1 files changed, 125 insertions(+), 82 deletions(-) diff --git a/kernel/arch/arm/kernel/smp.c b/kernel/arch/arm/kernel/smp.c index bc6e518..123432b 100644 --- a/kernel/arch/arm/kernel/smp.c +++ b/kernel/arch/arm/kernel/smp.c @@ -1,11 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/kernel/smp.c * * Copyright (C) 2002 ARM Limited, All Rights Reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/delay.h> @@ -29,6 +26,7 @@ #include <linux/completion.h> #include <linux/cpufreq.h> #include <linux/irq_work.h> +#include <linux/kernel_stat.h> #include <linux/atomic.h> #include <asm/bugs.h> @@ -40,8 +38,6 @@ #include <asm/idmap.h> #include <asm/topology.h> #include <asm/mmu_context.h> -#include <asm/pgtable.h> -#include <asm/pgalloc.h> #include <asm/procinfo.h> #include <asm/processor.h> #include <asm/sections.h> @@ -55,18 +51,16 @@ #define CREATE_TRACE_POINTS #include <trace/events/ipi.h> +EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_raise); +EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_entry); +EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_exit); + /* * as from 2.5, kernels no longer have an init_tasks structure * so we need some other way of telling a new secondary core * where to place its SVC stack */ struct secondary_data secondary_data; - -/* - * control for which core is the next to come out of the secondary - * boot "holding pen" - */ -volatile int pen_release = -1; enum ipi_msg_type { IPI_WAKEUP, @@ -76,17 +70,25 @@ IPI_CPU_STOP, IPI_IRQ_WORK, IPI_COMPLETION, + NR_IPI, /* * CPU_BACKTRACE is special and not included in NR_IPI * or tracable with trace_ipi_* */ - IPI_CPU_BACKTRACE, + IPI_CPU_BACKTRACE = NR_IPI, /* * SGI8-15 can be reserved by secure firmware, and thus may * not be usable by the kernel. Please keep the above limited * to at most 8 entries. */ + MAX_IPI }; + +static int ipi_irq_base __read_mostly; +static int nr_ipi __read_mostly = NR_IPI; +static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly; + +static void ipi_setup(int cpu); static DECLARE_COMPLETION(cpu_running); @@ -237,6 +239,17 @@ return cpu != 0; } +static void ipi_teardown(int cpu) +{ + int i; + + if (WARN_ON_ONCE(!ipi_irq_base)) + return; + + for (i = 0; i < nr_ipi; i++) + disable_percpu_irq(ipi_irq_base + i); +} + /* * __cpu_disable runs on the processor to be shutdown. */ @@ -249,11 +262,16 @@ if (ret) return ret; +#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY + remove_cpu_topology(cpu); +#endif + /* * Take this CPU offline. Once we clear this, we can't return, * and we must not schedule until we're ready to give up the cpu. */ set_cpu_online(cpu, false); + ipi_teardown(cpu); /* * OK - migrate IRQs away from this CPU @@ -273,15 +291,13 @@ return 0; } -static DECLARE_COMPLETION(cpu_died); - /* * called on the thread which is asking for a CPU to be shutdown - * waits until shutdown has completed, or it is timed out. */ void __cpu_die(unsigned int cpu) { - if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { + if (!cpu_wait_death(cpu, 5)) { pr_err("CPU%u: cpu didn't die\n", cpu); return; } @@ -328,7 +344,7 @@ * this returns, power and/or clocks can be removed at any point * from this CPU and its cache by platform_cpu_kill(). */ - complete(&cpu_died); + (void)cpu_report_death(); /* * Ensure that the cache lines associated with that completion are @@ -381,6 +397,7 @@ cpu_info->cpuid = read_cpuid_id(); store_cpu_topology(cpuid); + check_cpu_icache_size(cpuid); } /* @@ -419,7 +436,6 @@ #endif pr_debug("CPU%u: Booted secondary processor\n", cpu); - preempt_disable(); trace_hardirqs_off(); /* @@ -429,6 +445,8 @@ smp_ops.smp_secondary_init(cpu); notify_cpu_starting(cpu); + + ipi_setup(cpu); calibrate_delay(); @@ -508,22 +526,6 @@ } } -static void (*__smp_cross_call)(const struct cpumask *, unsigned int); - -void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) -{ - if (!__smp_cross_call) - __smp_cross_call = fn; -} - -static void (*__smp_update_ipi_history_cb)(int cpu); - -void set_update_ipi_history_callback(void (*fn)(int)) -{ - __smp_update_ipi_history_cb = fn; -} -EXPORT_SYMBOL_GPL(set_update_ipi_history_callback); - static const char *ipi_types[NR_IPI] __tracepoint_string = { #define S(x,s) [x] = s S(IPI_WAKEUP, "CPU wakeup interrupts"), @@ -535,36 +537,26 @@ S(IPI_COMPLETION, "completion interrupts"), }; -static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) -{ - trace_ipi_raise_rcuidle(target, ipi_types[ipinr]); - __smp_cross_call(target, ipinr); -} +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr); void show_ipi_list(struct seq_file *p, int prec) { unsigned int cpu, i; for (i = 0; i < NR_IPI; i++) { + unsigned int irq; + + if (!ipi_desc[i]) + continue; + + irq = irq_desc_get_irq(ipi_desc[i]); seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); for_each_online_cpu(cpu) - seq_printf(p, "%10u ", - __get_irq_stat(cpu, ipi_irqs[i])); + seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); seq_printf(p, " %s\n", ipi_types[i]); } -} - -u64 smp_irq_stat_cpu(unsigned int cpu) -{ - u64 sum = 0; - int i; - - for (i = 0; i < NR_IPI; i++) - sum += __get_irq_stat(cpu, ipi_irqs[i]); - - return sum; } void arch_send_call_function_ipi_mask(const struct cpumask *mask) @@ -643,15 +635,12 @@ handle_IPI(ipinr, regs); } -void handle_IPI(int ipinr, struct pt_regs *regs) +static void do_handle_IPI(int ipinr) { unsigned int cpu = smp_processor_id(); - struct pt_regs *old_regs = set_irq_regs(regs); - if ((unsigned)ipinr < NR_IPI) { + if ((unsigned)ipinr < NR_IPI) trace_ipi_entry_rcuidle(ipi_types[ipinr]); - __inc_irq_stat(cpu, ipi_irqs[ipinr]); - } switch (ipinr) { case IPI_WAKEUP: @@ -659,9 +648,7 @@ #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST case IPI_TIMER: - irq_enter(); tick_receive_broadcast(); - irq_exit(); break; #endif @@ -670,36 +657,26 @@ break; case IPI_CALL_FUNC: - irq_enter(); generic_smp_call_function_interrupt(); - irq_exit(); break; case IPI_CPU_STOP: - irq_enter(); ipi_cpu_stop(cpu); - irq_exit(); break; #ifdef CONFIG_IRQ_WORK case IPI_IRQ_WORK: - irq_enter(); irq_work_run(); - irq_exit(); break; #endif case IPI_COMPLETION: - irq_enter(); ipi_complete(cpu); - irq_exit(); break; case IPI_CPU_BACKTRACE: printk_nmi_enter(); - irq_enter(); - nmi_cpu_backtrace(regs); - irq_exit(); + nmi_cpu_backtrace(get_irq_regs()); printk_nmi_exit(); break; @@ -711,13 +688,73 @@ if ((unsigned)ipinr < NR_IPI) trace_ipi_exit_rcuidle(ipi_types[ipinr]); +} + +/* Legacy version, should go away once all irqchips have been converted */ +void handle_IPI(int ipinr, struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + + irq_enter(); + do_handle_IPI(ipinr); + irq_exit(); + set_irq_regs(old_regs); +} + +static irqreturn_t ipi_handler(int irq, void *data) +{ + do_handle_IPI(irq - ipi_irq_base); + return IRQ_HANDLED; +} + +static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) +{ + trace_ipi_raise_rcuidle(target, ipi_types[ipinr]); + __ipi_send_mask(ipi_desc[ipinr], target); +} + +static void ipi_setup(int cpu) +{ + int i; + + if (WARN_ON_ONCE(!ipi_irq_base)) + return; + + for (i = 0; i < nr_ipi; i++) + enable_percpu_irq(ipi_irq_base + i, 0); +} + +void __init set_smp_ipi_range(int ipi_base, int n) +{ + int i; + + WARN_ON(n < MAX_IPI); + nr_ipi = min(n, MAX_IPI); + + for (i = 0; i < nr_ipi; i++) { + int err; + + err = request_percpu_irq(ipi_base + i, ipi_handler, + "IPI", &irq_stat); + WARN_ON(err); + + ipi_desc[i] = irq_to_desc(ipi_base + i); + irq_set_status_flags(ipi_base + i, IRQ_HIDDEN); + + /* The recheduling IPI is special... */ + if (i == IPI_RESCHEDULE) + __irq_modify_status(ipi_base + i, 0, IRQ_RAW, ~0); + } + + ipi_irq_base = ipi_base; + + /* Setup the boot CPU immediately */ + ipi_setup(smp_processor_id()); } void smp_send_reschedule(int cpu) { - if (__smp_update_ipi_history_cb) - __smp_update_ipi_history_cb(cpu); smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } @@ -774,15 +811,20 @@ unsigned long val, void *data) { struct cpufreq_freqs *freq = data; - int cpu = freq->cpu; + struct cpumask *cpus = freq->policy->cpus; + int cpu, first = cpumask_first(cpus); + unsigned int lpj; if (freq->flags & CPUFREQ_CONST_LOOPS) return NOTIFY_OK; - if (!per_cpu(l_p_j_ref, cpu)) { - per_cpu(l_p_j_ref, cpu) = - per_cpu(cpu_data, cpu).loops_per_jiffy; - per_cpu(l_p_j_ref_freq, cpu) = freq->old; + if (!per_cpu(l_p_j_ref, first)) { + for_each_cpu(cpu, cpus) { + per_cpu(l_p_j_ref, cpu) = + per_cpu(cpu_data, cpu).loops_per_jiffy; + per_cpu(l_p_j_ref_freq, cpu) = freq->old; + } + if (!global_l_p_j_ref) { global_l_p_j_ref = loops_per_jiffy; global_l_p_j_ref_freq = freq->old; @@ -794,10 +836,11 @@ loops_per_jiffy = cpufreq_scale(global_l_p_j_ref, global_l_p_j_ref_freq, freq->new); - per_cpu(cpu_data, cpu).loops_per_jiffy = - cpufreq_scale(per_cpu(l_p_j_ref, cpu), - per_cpu(l_p_j_ref_freq, cpu), - freq->new); + + lpj = cpufreq_scale(per_cpu(l_p_j_ref, first), + per_cpu(l_p_j_ref_freq, first), freq->new); + for_each_cpu(cpu, cpus) + per_cpu(cpu_data, cpu).loops_per_jiffy = lpj; } return NOTIFY_OK; } @@ -817,7 +860,7 @@ static void raise_nmi(cpumask_t *mask) { - __smp_cross_call(mask, IPI_CPU_BACKTRACE); + __ipi_send_mask(ipi_desc[IPI_CPU_BACKTRACE], mask); } void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) -- Gitblit v1.6.2