From 071106ecf68c401173c58808b1cf5f68cc50d390 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 05 Jan 2024 08:39:27 +0000
Subject: [PATCH] change wifi driver to cypress
---
kernel/arch/mips/kernel/traps.c | 422 +++++++++++++++++++++++++++++++++-------------------
1 files changed, 266 insertions(+), 156 deletions(-)
diff --git a/kernel/arch/mips/kernel/traps.c b/kernel/arch/mips/kernel/traps.c
index b9da2ce..ebd0101 100644
--- a/kernel/arch/mips/kernel/traps.c
+++ b/kernel/arch/mips/kernel/traps.c
@@ -28,7 +28,7 @@
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
-#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/kgdb.h>
@@ -50,13 +50,13 @@
#include <asm/fpu.h>
#include <asm/fpu_emulator.h>
#include <asm/idle.h>
+#include <asm/isa-rev.h>
#include <asm/mips-cps.h>
#include <asm/mips-r2-to-r6-emul.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/module.h>
#include <asm/msa.h>
-#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
#include <asm/siginfo.h>
@@ -69,6 +69,8 @@
#include <asm/stacktrace.h>
#include <asm/tlbex.h>
#include <asm/uasm.h>
+
+#include <asm/mach-loongson64/cpucfg-emul.h>
extern void check_wait(void);
extern asmlinkage void rollback_handle_int(void);
@@ -88,6 +90,7 @@
extern asmlinkage void handle_msa_fpe(void);
extern asmlinkage void handle_fpe(void);
extern asmlinkage void handle_ftlb(void);
+extern asmlinkage void handle_gsexc(void);
extern asmlinkage void handle_msa(void);
extern asmlinkage void handle_mdmx(void);
extern asmlinkage void handle_watch(void);
@@ -105,26 +108,26 @@
void (*board_ebase_setup)(void);
void(*board_cache_error_setup)(void);
-static void show_raw_backtrace(unsigned long reg29)
+static void show_raw_backtrace(unsigned long reg29, const char *loglvl)
{
unsigned long *sp = (unsigned long *)(reg29 & ~3);
unsigned long addr;
- printk("Call Trace:");
+ printk("%sCall Trace:", loglvl);
#ifdef CONFIG_KALLSYMS
- printk("\n");
+ printk("%s\n", loglvl);
#endif
while (!kstack_end(sp)) {
unsigned long __user *p =
(unsigned long __user *)(unsigned long)sp++;
if (__get_user(addr, p)) {
- printk(" (Bad stack address)");
+ printk("%s (Bad stack address)", loglvl);
break;
}
if (__kernel_text_address(addr))
- print_ip_sym(addr);
+ print_ip_sym(loglvl, addr);
}
- printk("\n");
+ printk("%s\n", loglvl);
}
#ifdef CONFIG_KALLSYMS
@@ -137,7 +140,8 @@
__setup("raw_show_trace", set_raw_show_trace);
#endif
-static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
+static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
+ const char *loglvl)
{
unsigned long sp = regs->regs[29];
unsigned long ra = regs->regs[31];
@@ -147,12 +151,12 @@
task = current;
if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
- show_raw_backtrace(sp);
+ show_raw_backtrace(sp, loglvl);
return;
}
- printk("Call Trace:\n");
+ printk("%sCall Trace:\n", loglvl);
do {
- print_ip_sym(pc);
+ print_ip_sym(loglvl, pc);
pc = unwind_stack(task, &sp, pc, &ra);
} while (pc);
pr_cont("\n");
@@ -163,19 +167,19 @@
* with at least a bit of error checking ...
*/
static void show_stacktrace(struct task_struct *task,
- const struct pt_regs *regs)
+ const struct pt_regs *regs, const char *loglvl)
{
const int field = 2 * sizeof(unsigned long);
long stackdata;
int i;
unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
- printk("Stack :");
+ printk("%sStack :", loglvl);
i = 0;
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
if (i && ((i % (64 / field)) == 0)) {
pr_cont("\n");
- printk(" ");
+ printk("%s ", loglvl);
}
if (i > 39) {
pr_cont(" ...");
@@ -191,10 +195,10 @@
i++;
}
pr_cont("\n");
- show_backtrace(task, regs);
+ show_backtrace(task, regs, loglvl);
}
-void show_stack(struct task_struct *task, unsigned long *sp)
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
{
struct pt_regs regs;
mm_segment_t old_fs = get_fs();
@@ -209,11 +213,6 @@
regs.regs[29] = task->thread.reg29;
regs.regs[31] = 0;
regs.cp0_epc = task->thread.reg31;
-#ifdef CONFIG_KGDB_KDB
- } else if (atomic_read(&kgdb_active) != -1 &&
- kdb_current_regs) {
- memcpy(®s, kdb_current_regs, sizeof(regs));
-#endif /* CONFIG_KGDB_KDB */
} else {
prepare_frametrace(®s);
}
@@ -223,7 +222,7 @@
* the stack in the kernel (not user) address space.
*/
set_fs(KERNEL_DS);
- show_stacktrace(task, ®s);
+ show_stacktrace(task, ®s, loglvl);
set_fs(old_fs);
}
@@ -277,8 +276,10 @@
#ifdef CONFIG_CPU_HAS_SMARTMIPS
printk("Acx : %0*lx\n", field, regs->acx);
#endif
- printk("Hi : %0*lx\n", field, regs->hi);
- printk("Lo : %0*lx\n", field, regs->lo);
+ if (MIPS_ISA_REV < 6) {
+ printk("Hi : %0*lx\n", field, regs->hi);
+ printk("Lo : %0*lx\n", field, regs->lo);
+ }
/*
* Saved cp0 registers
@@ -348,7 +349,7 @@
*/
void show_regs(struct pt_regs *regs)
{
- __show_regs((struct pt_regs *)regs);
+ __show_regs(regs);
dump_stack();
}
@@ -373,7 +374,7 @@
if (!user_mode(regs))
/* Necessary for getting the correct stack content */
set_fs(KERNEL_DS);
- show_stacktrace(current, regs);
+ show_stacktrace(current, regs, KERN_DEFAULT);
show_code((unsigned int __user *) regs->cp0_epc);
printk("\n");
set_fs(old_fs);
@@ -412,7 +413,7 @@
if (regs && kexec_should_crash(current))
crash_kexec(regs);
- do_exit(sig);
+ make_task_dead(sig);
}
extern struct exception_table_entry __start___dbe_table[];
@@ -479,7 +480,7 @@
goto out;
die_if_kernel("Oops", regs);
- force_sig(SIGBUS, current);
+ force_sig(SIGBUS);
out:
exception_exit(prev_state);
@@ -695,6 +696,50 @@
return -1; /* Must be something else ... */
}
+/*
+ * Loongson-3 CSR instructions emulation
+ */
+
+#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
+
+#define LWC2 0xc8000000
+#define RS BASE
+#define CSR_OPCODE2 0x00000118
+#define CSR_OPCODE2_MASK 0x000007ff
+#define CSR_FUNC_MASK RT
+#define CSR_FUNC_CPUCFG 0x8
+
+static int simulate_loongson3_cpucfg(struct pt_regs *regs,
+ unsigned int opcode)
+{
+ int op = opcode & OPCODE;
+ int op2 = opcode & CSR_OPCODE2_MASK;
+ int csr_func = (opcode & CSR_FUNC_MASK) >> 16;
+
+ if (op == LWC2 && op2 == CSR_OPCODE2 && csr_func == CSR_FUNC_CPUCFG) {
+ int rd = (opcode & RD) >> 11;
+ int rs = (opcode & RS) >> 21;
+ __u64 sel = regs->regs[rs];
+
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
+
+ /* Do not emulate on unsupported core models. */
+ preempt_disable();
+ if (!loongson3_cpucfg_emulation_enabled(¤t_cpu_data)) {
+ preempt_enable();
+ return -1;
+ }
+ regs->regs[rd] = loongson3_cpucfg_read_synthesized(
+ ¤t_cpu_data, sel);
+ preempt_enable();
+ return 0;
+ }
+
+ /* Not ours. */
+ return -1;
+}
+#endif /* CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION */
+
asmlinkage void do_ov(struct pt_regs *regs)
{
enum ctx_state prev_state;
@@ -702,9 +747,11 @@
prev_state = exception_enter();
die_if_kernel("Integer overflow", regs);
- force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc, current);
+ force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc);
exception_exit(prev_state);
}
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
/*
* Send SIGFPE according to FCSR Cause bits, which must have already
@@ -728,7 +775,7 @@
else if (fcr31 & FPU_CSR_INE_X)
si_code = FPE_FLTRES;
- force_sig_fault(SIGFPE, si_code, fault_addr, tsk);
+ force_sig_fault_to_task(SIGFPE, si_code, fault_addr, tsk);
}
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
@@ -745,22 +792,22 @@
return 1;
case SIGBUS:
- force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr, current);
+ force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
return 1;
case SIGSEGV:
- down_read(¤t->mm->mmap_sem);
+ mmap_read_lock(current->mm);
vma = find_vma(current->mm, (unsigned long)fault_addr);
if (vma && (vma->vm_start <= (unsigned long)fault_addr))
si_code = SEGV_ACCERR;
else
si_code = SEGV_MAPERR;
- up_read(¤t->mm->mmap_sem);
- force_sig_fault(SIGSEGV, si_code, fault_addr, current);
+ mmap_read_unlock(current->mm);
+ force_sig_fault(SIGSEGV, si_code, fault_addr);
return 1;
default:
- force_sig(sig, current);
+ force_sig(sig);
return 1;
}
}
@@ -793,9 +840,6 @@
*/
regs->cp0_epc = old_epc;
regs->regs[31] = old_ra;
-
- /* Save the FP context to struct thread_struct */
- lose_fpu(1);
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
@@ -848,8 +892,6 @@
* register operands before invoking the emulator, which seems
* a bit extreme for what should be an infrequent event.
*/
- /* Ensure 'resume' not overwrite saved fp context again. */
- lose_fpu(1);
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
@@ -875,6 +917,45 @@
out:
exception_exit(prev_state);
}
+
+/*
+ * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
+ * emulated more than some threshold number of instructions, force migration to
+ * a "CPU" that has FP support.
+ */
+static void mt_ase_fp_affinity(void)
+{
+#ifdef CONFIG_MIPS_MT_FPAFF
+ if (mt_fpemul_threshold > 0 &&
+ ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
+ /*
+ * If there's no FPU present, or if the application has already
+ * restricted the allowed set to exclude any CPUs with FPUs,
+ * we'll skip the procedure.
+ */
+ if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) {
+ cpumask_t tmask;
+
+ current->thread.user_cpus_allowed
+ = current->cpus_mask;
+ cpumask_and(&tmask, ¤t->cpus_mask,
+ &mt_fpu_cpumask);
+ set_cpus_allowed_ptr(current, &tmask);
+ set_thread_flag(TIF_FPUBOUND);
+ }
+ }
+#endif /* CONFIG_MIPS_MT_FPAFF */
+}
+
+#else /* !CONFIG_MIPS_FP_SUPPORT */
+
+static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
+ unsigned long old_epc, unsigned long old_ra)
+{
+ return -1;
+}
+
+#endif /* !CONFIG_MIPS_FP_SUPPORT */
void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
const char *str)
@@ -904,11 +985,11 @@
die_if_kernel(b, regs);
force_sig_fault(SIGFPE,
code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF,
- (void __user *) regs->cp0_epc, current);
+ (void __user *) regs->cp0_epc);
break;
case BRK_BUG:
die_if_kernel("Kernel bug detected", regs);
- force_sig(SIGTRAP, current);
+ force_sig(SIGTRAP);
break;
case BRK_MEMU:
/*
@@ -923,15 +1004,15 @@
return;
die_if_kernel("Math emu break/trap", regs);
- force_sig(SIGTRAP, current);
+ force_sig(SIGTRAP);
break;
default:
scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
die_if_kernel(b, regs);
if (si_code) {
- force_sig_fault(SIGTRAP, si_code, NULL, current);
+ force_sig_fault(SIGTRAP, si_code, NULL);
} else {
- force_sig(SIGTRAP, current);
+ force_sig(SIGTRAP);
}
}
}
@@ -1024,7 +1105,7 @@
return;
out_sigsegv:
- force_sig(SIGSEGV, current);
+ force_sig(SIGSEGV);
goto out;
}
@@ -1038,7 +1119,7 @@
seg = get_fs();
if (!user_mode(regs))
- set_fs(get_ds());
+ set_fs(KERNEL_DS);
prev_state = exception_enter();
current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
@@ -1066,7 +1147,7 @@
return;
out_sigsegv:
- force_sig(SIGSEGV, current);
+ force_sig(SIGSEGV);
goto out;
}
@@ -1132,6 +1213,11 @@
if (status < 0)
status = simulate_fp(regs, opcode, old_epc, old31);
+
+#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
+ if (status < 0)
+ status = simulate_loongson3_cpucfg(regs, opcode);
+#endif
} else if (cpu_has_mmips) {
unsigned short mmop[2] = { 0 };
@@ -1152,40 +1238,11 @@
if (unlikely(status > 0)) {
regs->cp0_epc = old_epc; /* Undo skip-over. */
regs->regs[31] = old31;
- force_sig(status, current);
+ force_sig(status);
}
out:
exception_exit(prev_state);
-}
-
-/*
- * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
- * emulated more than some threshold number of instructions, force migration to
- * a "CPU" that has FP support.
- */
-static void mt_ase_fp_affinity(void)
-{
-#ifdef CONFIG_MIPS_MT_FPAFF
- if (mt_fpemul_threshold > 0 &&
- ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
- /*
- * If there's no FPU present, or if the application has already
- * restricted the allowed set to exclude any CPUs with FPUs,
- * we'll skip the procedure.
- */
- if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) {
- cpumask_t tmask;
-
- current->thread.user_cpus_allowed
- = current->cpus_allowed;
- cpumask_and(&tmask, ¤t->cpus_allowed,
- &mt_fpu_cpumask);
- set_cpus_allowed_ptr(current, &tmask);
- set_thread_flag(TIF_FPUBOUND);
- }
- }
-#endif /* CONFIG_MIPS_MT_FPAFF */
}
/*
@@ -1210,28 +1267,42 @@
die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
"instruction", regs);
- force_sig(SIGILL, current);
+ force_sig(SIGILL);
return NOTIFY_OK;
}
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
static int enable_restore_fp_context(int msa)
{
int err, was_fpu_owner, prior_msa;
+ bool first_fp;
- if (!used_math()) {
- /* First time FP context user. */
+ /* Initialize context if it hasn't been used already */
+ first_fp = init_fp_ctx(current);
+
+ if (first_fp) {
preempt_disable();
- err = init_fpu();
+ err = own_fpu_inatomic(1);
if (msa && !err) {
enable_msa();
+ /*
+ * with MSA enabled, userspace can see MSACSR
+ * and MSA regs, but the values in them are from
+ * other task before current task, restore them
+ * from saved fp/msa context
+ */
+ write_msa_csr(current->thread.fpu.msacsr);
+ /*
+ * own_fpu_inatomic(1) just restore low 64bit,
+ * fix the high 64bit
+ */
init_msa_upper();
set_thread_flag(TIF_USEDMSA);
set_thread_flag(TIF_MSA_CTX_LIVE);
}
preempt_enable();
- if (!err)
- set_used_math();
return err;
}
@@ -1322,17 +1393,23 @@
return 0;
}
+#else /* !CONFIG_MIPS_FP_SUPPORT */
+
+static int enable_restore_fp_context(int msa)
+{
+ return SIGILL;
+}
+
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
asmlinkage void do_cpu(struct pt_regs *regs)
{
enum ctx_state prev_state;
unsigned int __user *epc;
unsigned long old_epc, old31;
- void __user *fault_addr;
unsigned int opcode;
- unsigned long fcr31;
unsigned int cpid;
- int status, err;
- int sig;
+ int status;
prev_state = exception_enter();
cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
@@ -1365,11 +1442,12 @@
if (unlikely(status > 0)) {
regs->cp0_epc = old_epc; /* Undo skip-over. */
regs->regs[31] = old31;
- force_sig(status, current);
+ force_sig(status);
}
break;
+#ifdef CONFIG_MIPS_FP_SUPPORT
case 3:
/*
* The COP3 opcode space and consequently the CP0.Status.CU3
@@ -1384,12 +1462,15 @@
* emulator too.
*/
if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
- force_sig(SIGILL, current);
+ force_sig(SIGILL);
break;
}
- /* Fall through. */
+ fallthrough;
+ case 1: {
+ void __user *fault_addr;
+ unsigned long fcr31;
+ int err, sig;
- case 1:
err = enable_restore_fp_context(0);
if (raw_cpu_has_fpu && !err)
@@ -1410,6 +1491,13 @@
mt_ase_fp_affinity();
break;
+ }
+#else /* CONFIG_MIPS_FP_SUPPORT */
+ case 1:
+ case 3:
+ force_sig(SIGILL);
+ break;
+#endif /* CONFIG_MIPS_FP_SUPPORT */
case 2:
raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
@@ -1434,7 +1522,7 @@
local_irq_enable();
die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
- force_sig(SIGFPE, current);
+ force_sig(SIGFPE);
out:
exception_exit(prev_state);
}
@@ -1447,7 +1535,7 @@
prev_state = exception_enter();
if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
- force_sig(SIGILL, current);
+ force_sig(SIGILL);
goto out;
}
@@ -1455,7 +1543,7 @@
err = enable_restore_fp_context(1);
if (err)
- force_sig(SIGILL, current);
+ force_sig(SIGILL);
out:
exception_exit(prev_state);
}
@@ -1465,7 +1553,7 @@
enum ctx_state prev_state;
prev_state = exception_enter();
- force_sig(SIGILL, current);
+ force_sig(SIGILL);
exception_exit(prev_state);
}
@@ -1491,7 +1579,7 @@
if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
mips_read_watch_registers();
local_irq_enable();
- force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL, current);
+ force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL);
} else {
mips_clear_watch_registers();
local_irq_enable();
@@ -1562,7 +1650,7 @@
}
die_if_kernel("MIPS MT Thread exception in kernel", regs);
- force_sig(SIGILL, current);
+ force_sig(SIGILL);
}
@@ -1571,7 +1659,7 @@
if (cpu_has_dsp)
panic("Unexpected DSP exception");
- force_sig(SIGILL, current);
+ force_sig(SIGILL);
}
asmlinkage void do_reserved(struct pt_regs *regs)
@@ -1605,7 +1693,7 @@
* Some MIPS CPUs can enable/disable for cache parity detection, but do
* it different ways.
*/
-static inline void parity_protection_init(void)
+static inline __init void parity_protection_init(void)
{
#define ERRCTL_PE 0x80000000
#define ERRCTL_L2P 0x00800000
@@ -1731,7 +1819,7 @@
case CPU_5KC:
case CPU_5KE:
- case CPU_LOONGSON1:
+ case CPU_LOONGSON32:
write_c0_ecc(0x80000000);
back_to_back_c0_hazard();
/* Set the PE bit (bit 31) in the c0_errctl register. */
@@ -1825,6 +1913,37 @@
}
/* Just print the cacheerr bits for now */
cache_parity_error();
+}
+
+asmlinkage void do_gsexc(struct pt_regs *regs, u32 diag1)
+{
+ u32 exccode = (diag1 & LOONGSON_DIAG1_EXCCODE) >>
+ LOONGSON_DIAG1_EXCCODE_SHIFT;
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+
+ switch (exccode) {
+ case 0x08:
+ /* Undocumented exception, will trigger on certain
+ * also-undocumented instructions accessible from userspace.
+ * Processor state is not otherwise corrupted, but currently
+ * we don't know how to proceed. Maybe there is some
+ * undocumented control flag to enable the instructions?
+ */
+ force_sig(SIGILL);
+ break;
+
+ default:
+ /* None of the other exceptions, documented or not, have
+ * further details given; none are encountered in the wild
+ * either. Panic in case some of them turn out to be fatal.
+ */
+ show_regs(regs);
+ panic("Unhandled Loongson exception - GSCause = %08x", diag1);
+ }
+
+ exception_exit(prev_state);
}
/*
@@ -1978,19 +2097,19 @@
* If no shadow set is selected then use the default handler
* that does normal register saving and standard interrupt exit
*/
- extern char except_vec_vi, except_vec_vi_lui;
- extern char except_vec_vi_ori, except_vec_vi_end;
- extern char rollback_except_vec_vi;
- char *vec_start = using_rollback_handler() ?
- &rollback_except_vec_vi : &except_vec_vi;
+ extern const u8 except_vec_vi[], except_vec_vi_lui[];
+ extern const u8 except_vec_vi_ori[], except_vec_vi_end[];
+ extern const u8 rollback_except_vec_vi[];
+ const u8 *vec_start = using_rollback_handler() ?
+ rollback_except_vec_vi : except_vec_vi;
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
- const int lui_offset = &except_vec_vi_lui - vec_start + 2;
- const int ori_offset = &except_vec_vi_ori - vec_start + 2;
+ const int lui_offset = except_vec_vi_lui - vec_start + 2;
+ const int ori_offset = except_vec_vi_ori - vec_start + 2;
#else
- const int lui_offset = &except_vec_vi_lui - vec_start;
- const int ori_offset = &except_vec_vi_ori - vec_start;
+ const int lui_offset = except_vec_vi_lui - vec_start;
+ const int ori_offset = except_vec_vi_ori - vec_start;
#endif
- const int handler_len = &except_vec_vi_end - vec_start;
+ const int handler_len = except_vec_vi_end - vec_start;
if (handler_len > VECTORSPACING) {
/*
@@ -2085,7 +2204,7 @@
* flag that some firmware may have left set and the TS bit (for
* IP27). Set XX for ISA IV code to work.
*/
- unsigned int status_set = ST0_CU0;
+ unsigned int status_set = ST0_KERNEL_CUMASK;
#ifdef CONFIG_64BIT
status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
#endif
@@ -2122,7 +2241,7 @@
static void configure_exception_vector(void)
{
- if (cpu_has_veic || cpu_has_vint) {
+ if (cpu_has_mips_r2_r6) {
unsigned long sr = set_c0_status(ST0_BEV);
/* If available, use WG to set top bits of EBASE */
if (cpu_has_ebase_wg) {
@@ -2134,6 +2253,8 @@
}
write_c0_ebase(ebase);
write_c0_status(sr);
+ }
+ if (cpu_has_veic || cpu_has_vint) {
/* Setting vector spacing enables EI/VI mode */
change_c0_intctl(0x3e0, VECTORSPACING);
}
@@ -2164,22 +2285,6 @@
* o read IntCtl.IPFDC to determine the fast debug channel interrupt
*/
if (cpu_has_mips_r2_r6) {
- /*
- * We shouldn't trust a secondary core has a sane EBASE register
- * so use the one calculated by the boot CPU.
- */
- if (!is_boot_cpu) {
- /* If available, use WG to set top bits of EBASE */
- if (cpu_has_ebase_wg) {
-#ifdef CONFIG_64BIT
- write_c0_ebase_64(ebase | MIPS_EBASE_WG);
-#else
- write_c0_ebase(ebase | MIPS_EBASE_WG);
-#endif
- }
- write_c0_ebase(ebase);
- }
-
cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
@@ -2194,7 +2299,9 @@
cp0_fdc_irq = -1;
}
- if (!cpu_data[cpu].asid_cache)
+ if (cpu_has_mmid)
+ cpu_data[cpu].asid_cache = 0;
+ else if (!cpu_data[cpu].asid_cache)
cpu_data[cpu].asid_cache = asid_first_version(cpu);
mmgrab(&init_mm);
@@ -2210,7 +2317,7 @@
}
/* Install CPU exception handler */
-void set_handler(unsigned long offset, void *addr, unsigned long size)
+void set_handler(unsigned long offset, const void *addr, unsigned long size)
{
#ifdef CONFIG_CPU_MICROMIPS
memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
@@ -2253,16 +2360,27 @@
extern char except_vec3_generic;
extern char except_vec4;
extern char except_vec3_r4000;
- unsigned long i;
+ unsigned long i, vec_size;
+ phys_addr_t ebase_pa;
check_wait();
- if (cpu_has_veic || cpu_has_vint) {
- unsigned long size = 0x200 + VECTORSPACING*64;
- phys_addr_t ebase_pa;
+ if (!cpu_has_mips_r2_r6) {
+ ebase = CAC_BASE;
+ ebase_pa = virt_to_phys((void *)ebase);
+ vec_size = 0x400;
- ebase = (unsigned long)
- __alloc_bootmem(size, 1 << fls(size), 0);
+ memblock_reserve(ebase_pa, vec_size);
+ } else {
+ if (cpu_has_veic || cpu_has_vint)
+ vec_size = 0x200 + VECTORSPACING*64;
+ else
+ vec_size = PAGE_SIZE;
+
+ ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
+ if (!ebase_pa)
+ panic("%s: Failed to allocate %lu bytes align=0x%x\n",
+ __func__, vec_size, 1 << fls(vec_size));
/*
* Try to ensure ebase resides in KSeg0 if possible.
@@ -2275,23 +2393,10 @@
* EVA is special though as it allows segments to be rearranged
* and to become uncached during cache error handling.
*/
- ebase_pa = __pa(ebase);
if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
ebase = CKSEG0ADDR(ebase_pa);
- } else {
- ebase = CAC_BASE;
-
- if (cpu_has_mips_r2_r6) {
- if (cpu_has_ebase_wg) {
-#ifdef CONFIG_64BIT
- ebase = (read_c0_ebase_64() & ~0xfff);
-#else
- ebase = (read_c0_ebase() & ~0xfff);
-#endif
- } else {
- ebase += (read_c0_ebase() & 0x3ffff000);
- }
- }
+ else
+ ebase = (unsigned long)phys_to_virt(ebase_pa);
}
if (cpu_has_mmips) {
@@ -2306,6 +2411,7 @@
if (board_ebase_setup)
board_ebase_setup();
per_cpu_trap_init(true);
+ memblock_set_bottom_up(false);
/*
* Copy the generic exception handlers to their final destination.
@@ -2378,7 +2484,7 @@
else {
if (cpu_has_vtag_icache)
set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
- else if (current_cpu_type() == CPU_LOONGSON3)
+ else if (current_cpu_type() == CPU_LOONGSON64)
set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
else
set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
@@ -2395,7 +2501,11 @@
if (cpu_has_fpu && !cpu_has_nofpuex)
set_except_vector(EXCCODE_FPE, handle_fpe);
- set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
+ if (cpu_has_ftlbparex)
+ set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
+
+ if (cpu_has_gsexcex)
+ set_except_vector(LOONGSON_EXCCODE_GSEXC, handle_gsexc);
if (cpu_has_rixiex) {
set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
@@ -2424,7 +2534,7 @@
else
set_handler(0x080, &except_vec3_generic, 0x80);
- local_flush_icache_range(ebase, ebase + 0x400);
+ local_flush_icache_range(ebase, ebase + vec_size);
sort_extable(__start___dbe_table, __stop___dbe_table);
--
Gitblit v1.6.2