From 6778948f9de86c3cfaf36725a7c87dcff9ba247f Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Mon, 11 Dec 2023 08:20:59 +0000
Subject: [PATCH] kernel_5.10 no rt
---
kernel/arch/riscv/kernel/smpboot.c | 144 ++++++++++++++++++++++++++++++++++--------------
1 files changed, 102 insertions(+), 42 deletions(-)
diff --git a/kernel/arch/riscv/kernel/smpboot.c b/kernel/arch/riscv/kernel/smpboot.c
index 56abab6..0e0aed3 100644
--- a/kernel/arch/riscv/kernel/smpboot.c
+++ b/kernel/arch/riscv/kernel/smpboot.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* SMP initialisation and IPI support
* Based on arch/arm64/kernel/smp.c
@@ -5,17 +6,9 @@
* Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2015 Regents of the University of California
* Copyright (C) 2017 SiFive
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
+#include <linux/arch_topology.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -30,62 +23,117 @@
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/sched/task_stack.h>
+#include <linux/sched/mm.h>
+#include <asm/cpu_ops.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
#include <asm/sections.h>
#include <asm/sbi.h>
+#include <asm/smp.h>
-void *__cpu_up_stack_pointer[NR_CPUS];
-void *__cpu_up_task_pointer[NR_CPUS];
+#include "head.h"
+
+static DECLARE_COMPLETION(cpu_running);
void __init smp_prepare_boot_cpu(void)
{
+ init_cpu_topology();
}
void __init smp_prepare_cpus(unsigned int max_cpus)
{
+ int cpuid;
+ int ret;
+
+ store_cpu_topology(smp_processor_id());
+
+ /* This covers non-smp usecase mandated by "nosmp" option */
+ if (max_cpus == 0)
+ return;
+
+ for_each_possible_cpu(cpuid) {
+ if (cpuid == smp_processor_id())
+ continue;
+ if (cpu_ops[cpuid]->cpu_prepare) {
+ ret = cpu_ops[cpuid]->cpu_prepare(cpuid);
+ if (ret)
+ continue;
+ }
+ set_cpu_present(cpuid, true);
+ }
}
void __init setup_smp(void)
{
- struct device_node *dn = NULL;
- int hart, im_okay_therefore_i_am = 0;
+ struct device_node *dn;
+ int hart;
+ bool found_boot_cpu = false;
+ int cpuid = 1;
- while ((dn = of_find_node_by_type(dn, "cpu"))) {
- hart = riscv_of_processor_hart(dn);
- if (hart >= 0) {
- set_cpu_possible(hart, true);
- set_cpu_present(hart, true);
- if (hart == smp_processor_id()) {
- BUG_ON(im_okay_therefore_i_am);
- im_okay_therefore_i_am = 1;
- }
+ cpu_set_ops(0);
+
+ for_each_of_cpu_node(dn) {
+ hart = riscv_of_processor_hartid(dn);
+ if (hart < 0)
+ continue;
+
+ if (hart == cpuid_to_hartid_map(0)) {
+ BUG_ON(found_boot_cpu);
+ found_boot_cpu = 1;
+ continue;
}
+ if (cpuid >= NR_CPUS) {
+ pr_warn("Invalid cpuid [%d] for hartid [%d]\n",
+ cpuid, hart);
+ break;
+ }
+
+ cpuid_to_hartid_map(cpuid) = hart;
+ cpuid++;
}
- BUG_ON(!im_okay_therefore_i_am);
+ BUG_ON(!found_boot_cpu);
+
+ if (cpuid > nr_cpu_ids)
+ pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n",
+ cpuid, nr_cpu_ids);
+
+ for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
+ if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
+ cpu_set_ops(cpuid);
+ set_cpu_possible(cpuid, true);
+ }
+ }
+}
+
+static int start_secondary_cpu(int cpu, struct task_struct *tidle)
+{
+ if (cpu_ops[cpu]->cpu_start)
+ return cpu_ops[cpu]->cpu_start(cpu, tidle);
+
+ return -EOPNOTSUPP;
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
+ int ret = 0;
tidle->thread_info.cpu = cpu;
- /*
- * On RISC-V systems, all harts boot on their own accord. Our _start
- * selects the first hart to boot the kernel and causes the remainder
- * of the harts to spin in a loop waiting for their stack pointer to be
- * setup by that main hart. Writing __cpu_up_stack_pointer signals to
- * the spinning harts that they can continue the boot process.
- */
- smp_mb();
- __cpu_up_stack_pointer[cpu] = task_stack_page(tidle) + THREAD_SIZE;
- __cpu_up_task_pointer[cpu] = tidle;
+ ret = start_secondary_cpu(cpu, tidle);
+ if (!ret) {
+ wait_for_completion_timeout(&cpu_running,
+ msecs_to_jiffies(1000));
- while (!cpu_online(cpu))
- cpu_relax();
+ if (!cpu_online(cpu)) {
+ pr_crit("CPU%u: failed to come online\n", cpu);
+ ret = -EIO;
+ }
+ } else {
+ pr_crit("CPU%u: failed to start\n", cpu);
+ }
- return 0;
+ return ret;
}
void __init smp_cpus_done(unsigned int max_cpus)
@@ -95,19 +143,31 @@
/*
* C entry point for a secondary processor.
*/
-asmlinkage void __init smp_callin(void)
+asmlinkage __visible void smp_callin(void)
{
struct mm_struct *mm = &init_mm;
+ unsigned int curr_cpuid = smp_processor_id();
+
+ riscv_clear_ipi();
/* All kernel threads share the same mm context. */
- atomic_inc(&mm->mm_count);
+ mmgrab(mm);
current->active_mm = mm;
- trap_init();
- notify_cpu_starting(smp_processor_id());
- set_cpu_online(smp_processor_id(), 1);
+ store_cpu_topology(curr_cpuid);
+ notify_cpu_starting(curr_cpuid);
+ set_cpu_online(curr_cpuid, 1);
+
+ /*
+ * Remote TLB flushes are ignored while the CPU is offline, so emit
+ * a local TLB flush right now just in case.
+ */
local_flush_tlb_all();
+ complete(&cpu_running);
+ /*
+ * Disable preemption before enabling interrupts, so we don't try to
+ * schedule a CPU that hasn't actually started yet.
+ */
local_irq_enable();
- preempt_disable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
--
Gitblit v1.6.2