From b22da3d8526a935aa31e086e63f60ff3246cb61c Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 09 Dec 2023 07:24:11 +0000
Subject: [PATCH] add stmac read mac form eeprom
---
kernel/arch/mips/kernel/smp.c | 132 +++++++++++++++++++++++++------------------
1 files changed, 76 insertions(+), 56 deletions(-)
diff --git a/kernel/arch/mips/kernel/smp.c b/kernel/arch/mips/kernel/smp.c
index d84b906..14db66d 100644
--- a/kernel/arch/mips/kernel/smp.c
+++ b/kernel/arch/mips/kernel/smp.c
@@ -1,17 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) 2000, 2001 Kanoj Sarcar
* Copyright (C) 2000, 2001 Ralf Baechle
@@ -39,6 +27,7 @@
#include <linux/atomic.h>
#include <asm/cpu.h>
+#include <asm/ginvt.h>
#include <asm/processor.h>
#include <asm/idle.h>
#include <asm/r4k-timer.h>
@@ -218,25 +207,13 @@
return IRQ_HANDLED;
}
-static struct irqaction irq_resched = {
- .handler = ipi_resched_interrupt,
- .flags = IRQF_PERCPU,
- .name = "IPI resched"
-};
-
-static struct irqaction irq_call = {
- .handler = ipi_call_interrupt,
- .flags = IRQF_PERCPU,
- .name = "IPI call"
-};
-
-static void smp_ipi_init_one(unsigned int virq,
- struct irqaction *action)
+static void smp_ipi_init_one(unsigned int virq, const char *name,
+ irq_handler_t handler)
{
int ret;
irq_set_handler(virq, handle_percpu_irq);
- ret = setup_irq(virq, action);
+ ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL);
BUG_ON(ret);
}
@@ -289,12 +266,15 @@
int cpu;
for_each_cpu(cpu, mask) {
- smp_ipi_init_one(call_virq + cpu, &irq_call);
- smp_ipi_init_one(sched_virq + cpu, &irq_resched);
+ smp_ipi_init_one(call_virq + cpu, "IPI call",
+ ipi_call_interrupt);
+ smp_ipi_init_one(sched_virq + cpu, "IPI resched",
+ ipi_resched_interrupt);
}
} else {
- smp_ipi_init_one(call_virq, &irq_call);
- smp_ipi_init_one(sched_virq, &irq_resched);
+ smp_ipi_init_one(call_virq, "IPI call", ipi_call_interrupt);
+ smp_ipi_init_one(sched_virq, "IPI resched",
+ ipi_resched_interrupt);
}
return 0;
@@ -322,8 +302,8 @@
int cpu;
for_each_cpu(cpu, mask) {
- remove_irq(call_virq + cpu, &irq_call);
- remove_irq(sched_virq + cpu, &irq_resched);
+ free_irq(call_virq + cpu, NULL);
+ free_irq(sched_virq + cpu, NULL);
}
}
irq_destroy_ipi(call_virq, mask);
@@ -368,9 +348,11 @@
*/
calibrate_delay();
- preempt_disable();
cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy;
+
+ set_cpu_sibling_map(cpu);
+ set_cpu_core_map(cpu);
cpumask_set_cpu(cpu, &cpu_coherent_mask);
notify_cpu_starting(cpu);
@@ -382,9 +364,6 @@
/* The CPU is running and counters synchronised, now mark it online */
set_cpu_online(cpu, true);
-
- set_cpu_sibling_map(cpu);
- set_cpu_core_map(cpu);
calculate_cpu_foreign_map();
@@ -443,6 +422,8 @@
/* preload SMP state for boot cpu */
void smp_prepare_boot_cpu(void)
{
+ if (mp_ops->prepare_boot_cpu)
+ mp_ops->prepare_boot_cpu();
set_cpu_possible(0, true);
set_cpu_online(0, true);
}
@@ -482,12 +463,21 @@
void flush_tlb_all(void)
{
+ if (cpu_has_mmid) {
+ htw_stop();
+ ginvt_full();
+ sync_ginv();
+ instruction_hazard();
+ htw_start();
+ return;
+ }
+
on_each_cpu(flush_tlb_all_ipi, NULL, 1);
}
static void flush_tlb_mm_ipi(void *mm)
{
- local_flush_tlb_mm((struct mm_struct *)mm);
+ drop_mmu_context((struct mm_struct *)mm);
}
/*
@@ -530,17 +520,22 @@
{
preempt_disable();
- if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ if (cpu_has_mmid) {
+ /*
+ * No need to worry about other CPUs - the ginvt in
+ * drop_mmu_context() will be globalized.
+ */
+ } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
} else {
unsigned int cpu;
for_each_online_cpu(cpu) {
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
- cpu_context(cpu, mm) = 0;
+ set_cpu_context(cpu, mm, 0);
}
}
- local_flush_tlb_mm(mm);
+ drop_mmu_context(mm);
preempt_enable();
}
@@ -561,9 +556,26 @@
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
+ unsigned long addr;
+ u32 old_mmid;
preempt_disable();
- if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ if (cpu_has_mmid) {
+ htw_stop();
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(cpu_asid(0, mm));
+ mtc0_tlbw_hazard();
+ addr = round_down(start, PAGE_SIZE * 2);
+ end = round_up(end, PAGE_SIZE * 2);
+ do {
+ ginvt_va_mmid(addr);
+ sync_ginv();
+ addr += PAGE_SIZE * 2;
+ } while (addr < end);
+ write_c0_memorymapid(old_mmid);
+ instruction_hazard();
+ htw_start();
+ } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
struct flush_tlb_data fd = {
.vma = vma,
.addr1 = start,
@@ -571,6 +583,7 @@
};
smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
+ local_flush_tlb_range(vma, start, end);
} else {
unsigned int cpu;
int exec = vma->vm_flags & VM_EXEC;
@@ -583,10 +596,10 @@
* mm has been completely unused by that CPU.
*/
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
- cpu_context(cpu, mm) = !exec;
+ set_cpu_context(cpu, mm, !exec);
}
+ local_flush_tlb_range(vma, start, end);
}
- local_flush_tlb_range(vma, start, end);
preempt_enable();
}
@@ -616,14 +629,28 @@
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
+ u32 old_mmid;
+
preempt_disable();
- if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
+ if (cpu_has_mmid) {
+ htw_stop();
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
+ mtc0_tlbw_hazard();
+ ginvt_va_mmid(page);
+ sync_ginv();
+ write_c0_memorymapid(old_mmid);
+ instruction_hazard();
+ htw_start();
+ } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
+ (current->mm != vma->vm_mm)) {
struct flush_tlb_data fd = {
.vma = vma,
.addr1 = page,
};
smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
+ local_flush_tlb_page(vma, page);
} else {
unsigned int cpu;
@@ -635,10 +662,10 @@
* by that CPU.
*/
if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
- cpu_context(cpu, vma->vm_mm) = 1;
+ set_cpu_context(cpu, vma->vm_mm, 1);
}
+ local_flush_tlb_page(vma, page);
}
- local_flush_tlb_page(vma, page);
preempt_enable();
}
@@ -659,29 +686,22 @@
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
-static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd);
void tick_broadcast(const struct cpumask *mask)
{
- atomic_t *count;
call_single_data_t *csd;
int cpu;
for_each_cpu(cpu, mask) {
- count = &per_cpu(tick_broadcast_count, cpu);
csd = &per_cpu(tick_broadcast_csd, cpu);
-
- if (atomic_inc_return(count) == 1)
- smp_call_function_single_async(cpu, csd);
+ smp_call_function_single_async(cpu, csd);
}
}
static void tick_broadcast_callee(void *info)
{
- int cpu = smp_processor_id();
tick_receive_broadcast();
- atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
}
static int __init tick_broadcast_init(void)
--
Gitblit v1.6.2