hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/arch/mips/kernel/traps.c
....@@ -28,7 +28,7 @@
2828 #include <linux/smp.h>
2929 #include <linux/spinlock.h>
3030 #include <linux/kallsyms.h>
31
-#include <linux/bootmem.h>
31
+#include <linux/memblock.h>
3232 #include <linux/interrupt.h>
3333 #include <linux/ptrace.h>
3434 #include <linux/kgdb.h>
....@@ -50,13 +50,13 @@
5050 #include <asm/fpu.h>
5151 #include <asm/fpu_emulator.h>
5252 #include <asm/idle.h>
53
+#include <asm/isa-rev.h>
5354 #include <asm/mips-cps.h>
5455 #include <asm/mips-r2-to-r6-emul.h>
5556 #include <asm/mipsregs.h>
5657 #include <asm/mipsmtregs.h>
5758 #include <asm/module.h>
5859 #include <asm/msa.h>
59
-#include <asm/pgtable.h>
6060 #include <asm/ptrace.h>
6161 #include <asm/sections.h>
6262 #include <asm/siginfo.h>
....@@ -69,6 +69,8 @@
6969 #include <asm/stacktrace.h>
7070 #include <asm/tlbex.h>
7171 #include <asm/uasm.h>
72
+
73
+#include <asm/mach-loongson64/cpucfg-emul.h>
7274
7375 extern void check_wait(void);
7476 extern asmlinkage void rollback_handle_int(void);
....@@ -88,6 +90,7 @@
8890 extern asmlinkage void handle_msa_fpe(void);
8991 extern asmlinkage void handle_fpe(void);
9092 extern asmlinkage void handle_ftlb(void);
93
+extern asmlinkage void handle_gsexc(void);
9194 extern asmlinkage void handle_msa(void);
9295 extern asmlinkage void handle_mdmx(void);
9396 extern asmlinkage void handle_watch(void);
....@@ -105,26 +108,26 @@
105108 void (*board_ebase_setup)(void);
106109 void(*board_cache_error_setup)(void);
107110
108
-static void show_raw_backtrace(unsigned long reg29)
111
+static void show_raw_backtrace(unsigned long reg29, const char *loglvl)
109112 {
110113 unsigned long *sp = (unsigned long *)(reg29 & ~3);
111114 unsigned long addr;
112115
113
- printk("Call Trace:");
116
+ printk("%sCall Trace:", loglvl);
114117 #ifdef CONFIG_KALLSYMS
115
- printk("\n");
118
+ printk("%s\n", loglvl);
116119 #endif
117120 while (!kstack_end(sp)) {
118121 unsigned long __user *p =
119122 (unsigned long __user *)(unsigned long)sp++;
120123 if (__get_user(addr, p)) {
121
- printk(" (Bad stack address)");
124
+ printk("%s (Bad stack address)", loglvl);
122125 break;
123126 }
124127 if (__kernel_text_address(addr))
125
- print_ip_sym(addr);
128
+ print_ip_sym(loglvl, addr);
126129 }
127
- printk("\n");
130
+ printk("%s\n", loglvl);
128131 }
129132
130133 #ifdef CONFIG_KALLSYMS
....@@ -137,7 +140,8 @@
137140 __setup("raw_show_trace", set_raw_show_trace);
138141 #endif
139142
140
-static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
143
+static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
144
+ const char *loglvl)
141145 {
142146 unsigned long sp = regs->regs[29];
143147 unsigned long ra = regs->regs[31];
....@@ -147,12 +151,12 @@
147151 task = current;
148152
149153 if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
150
- show_raw_backtrace(sp);
154
+ show_raw_backtrace(sp, loglvl);
151155 return;
152156 }
153
- printk("Call Trace:\n");
157
+ printk("%sCall Trace:\n", loglvl);
154158 do {
155
- print_ip_sym(pc);
159
+ print_ip_sym(loglvl, pc);
156160 pc = unwind_stack(task, &sp, pc, &ra);
157161 } while (pc);
158162 pr_cont("\n");
....@@ -163,19 +167,19 @@
163167 * with at least a bit of error checking ...
164168 */
165169 static void show_stacktrace(struct task_struct *task,
166
- const struct pt_regs *regs)
170
+ const struct pt_regs *regs, const char *loglvl)
167171 {
168172 const int field = 2 * sizeof(unsigned long);
169173 long stackdata;
170174 int i;
171175 unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
172176
173
- printk("Stack :");
177
+ printk("%sStack :", loglvl);
174178 i = 0;
175179 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
176180 if (i && ((i % (64 / field)) == 0)) {
177181 pr_cont("\n");
178
- printk(" ");
182
+ printk("%s ", loglvl);
179183 }
180184 if (i > 39) {
181185 pr_cont(" ...");
....@@ -191,10 +195,10 @@
191195 i++;
192196 }
193197 pr_cont("\n");
194
- show_backtrace(task, regs);
198
+ show_backtrace(task, regs, loglvl);
195199 }
196200
197
-void show_stack(struct task_struct *task, unsigned long *sp)
201
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
198202 {
199203 struct pt_regs regs;
200204 mm_segment_t old_fs = get_fs();
....@@ -209,11 +213,6 @@
209213 regs.regs[29] = task->thread.reg29;
210214 regs.regs[31] = 0;
211215 regs.cp0_epc = task->thread.reg31;
212
-#ifdef CONFIG_KGDB_KDB
213
- } else if (atomic_read(&kgdb_active) != -1 &&
214
- kdb_current_regs) {
215
- memcpy(&regs, kdb_current_regs, sizeof(regs));
216
-#endif /* CONFIG_KGDB_KDB */
217216 } else {
218217 prepare_frametrace(&regs);
219218 }
....@@ -223,7 +222,7 @@
223222 * the stack in the kernel (not user) address space.
224223 */
225224 set_fs(KERNEL_DS);
226
- show_stacktrace(task, &regs);
225
+ show_stacktrace(task, &regs, loglvl);
227226 set_fs(old_fs);
228227 }
229228
....@@ -277,8 +276,10 @@
277276 #ifdef CONFIG_CPU_HAS_SMARTMIPS
278277 printk("Acx : %0*lx\n", field, regs->acx);
279278 #endif
280
- printk("Hi : %0*lx\n", field, regs->hi);
281
- printk("Lo : %0*lx\n", field, regs->lo);
279
+ if (MIPS_ISA_REV < 6) {
280
+ printk("Hi : %0*lx\n", field, regs->hi);
281
+ printk("Lo : %0*lx\n", field, regs->lo);
282
+ }
282283
283284 /*
284285 * Saved cp0 registers
....@@ -348,7 +349,7 @@
348349 */
349350 void show_regs(struct pt_regs *regs)
350351 {
351
- __show_regs((struct pt_regs *)regs);
352
+ __show_regs(regs);
352353 dump_stack();
353354 }
354355
....@@ -373,7 +374,7 @@
373374 if (!user_mode(regs))
374375 /* Necessary for getting the correct stack content */
375376 set_fs(KERNEL_DS);
376
- show_stacktrace(current, regs);
377
+ show_stacktrace(current, regs, KERN_DEFAULT);
377378 show_code((unsigned int __user *) regs->cp0_epc);
378379 printk("\n");
379380 set_fs(old_fs);
....@@ -412,7 +413,7 @@
412413 if (regs && kexec_should_crash(current))
413414 crash_kexec(regs);
414415
415
- do_exit(sig);
416
+ make_task_dead(sig);
416417 }
417418
418419 extern struct exception_table_entry __start___dbe_table[];
....@@ -479,7 +480,7 @@
479480 goto out;
480481
481482 die_if_kernel("Oops", regs);
482
- force_sig(SIGBUS, current);
483
+ force_sig(SIGBUS);
483484
484485 out:
485486 exception_exit(prev_state);
....@@ -695,6 +696,50 @@
695696 return -1; /* Must be something else ... */
696697 }
697698
699
+/*
700
+ * Loongson-3 CSR instructions emulation
701
+ */
702
+
703
+#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
704
+
705
+#define LWC2 0xc8000000
706
+#define RS BASE
707
+#define CSR_OPCODE2 0x00000118
708
+#define CSR_OPCODE2_MASK 0x000007ff
709
+#define CSR_FUNC_MASK RT
710
+#define CSR_FUNC_CPUCFG 0x8
711
+
712
+static int simulate_loongson3_cpucfg(struct pt_regs *regs,
713
+ unsigned int opcode)
714
+{
715
+ int op = opcode & OPCODE;
716
+ int op2 = opcode & CSR_OPCODE2_MASK;
717
+ int csr_func = (opcode & CSR_FUNC_MASK) >> 16;
718
+
719
+ if (op == LWC2 && op2 == CSR_OPCODE2 && csr_func == CSR_FUNC_CPUCFG) {
720
+ int rd = (opcode & RD) >> 11;
721
+ int rs = (opcode & RS) >> 21;
722
+ __u64 sel = regs->regs[rs];
723
+
724
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
725
+
726
+ /* Do not emulate on unsupported core models. */
727
+ preempt_disable();
728
+ if (!loongson3_cpucfg_emulation_enabled(&current_cpu_data)) {
729
+ preempt_enable();
730
+ return -1;
731
+ }
732
+ regs->regs[rd] = loongson3_cpucfg_read_synthesized(
733
+ &current_cpu_data, sel);
734
+ preempt_enable();
735
+ return 0;
736
+ }
737
+
738
+ /* Not ours. */
739
+ return -1;
740
+}
741
+#endif /* CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION */
742
+
698743 asmlinkage void do_ov(struct pt_regs *regs)
699744 {
700745 enum ctx_state prev_state;
....@@ -702,9 +747,11 @@
702747 prev_state = exception_enter();
703748 die_if_kernel("Integer overflow", regs);
704749
705
- force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc, current);
750
+ force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc);
706751 exception_exit(prev_state);
707752 }
753
+
754
+#ifdef CONFIG_MIPS_FP_SUPPORT
708755
709756 /*
710757 * Send SIGFPE according to FCSR Cause bits, which must have already
....@@ -728,7 +775,7 @@
728775 else if (fcr31 & FPU_CSR_INE_X)
729776 si_code = FPE_FLTRES;
730777
731
- force_sig_fault(SIGFPE, si_code, fault_addr, tsk);
778
+ force_sig_fault_to_task(SIGFPE, si_code, fault_addr, tsk);
732779 }
733780
734781 int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
....@@ -745,22 +792,22 @@
745792 return 1;
746793
747794 case SIGBUS:
748
- force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr, current);
795
+ force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
749796 return 1;
750797
751798 case SIGSEGV:
752
- down_read(&current->mm->mmap_sem);
799
+ mmap_read_lock(current->mm);
753800 vma = find_vma(current->mm, (unsigned long)fault_addr);
754801 if (vma && (vma->vm_start <= (unsigned long)fault_addr))
755802 si_code = SEGV_ACCERR;
756803 else
757804 si_code = SEGV_MAPERR;
758
- up_read(&current->mm->mmap_sem);
759
- force_sig_fault(SIGSEGV, si_code, fault_addr, current);
805
+ mmap_read_unlock(current->mm);
806
+ force_sig_fault(SIGSEGV, si_code, fault_addr);
760807 return 1;
761808
762809 default:
763
- force_sig(sig, current);
810
+ force_sig(sig);
764811 return 1;
765812 }
766813 }
....@@ -793,9 +840,6 @@
793840 */
794841 regs->cp0_epc = old_epc;
795842 regs->regs[31] = old_ra;
796
-
797
- /* Save the FP context to struct thread_struct */
798
- lose_fpu(1);
799843
800844 /* Run the emulator */
801845 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
....@@ -848,8 +892,6 @@
848892 * register operands before invoking the emulator, which seems
849893 * a bit extreme for what should be an infrequent event.
850894 */
851
- /* Ensure 'resume' not overwrite saved fp context again. */
852
- lose_fpu(1);
853895
854896 /* Run the emulator */
855897 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
....@@ -875,6 +917,45 @@
875917 out:
876918 exception_exit(prev_state);
877919 }
920
+
921
+/*
922
+ * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
923
+ * emulated more than some threshold number of instructions, force migration to
924
+ * a "CPU" that has FP support.
925
+ */
926
+static void mt_ase_fp_affinity(void)
927
+{
928
+#ifdef CONFIG_MIPS_MT_FPAFF
929
+ if (mt_fpemul_threshold > 0 &&
930
+ ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
931
+ /*
932
+ * If there's no FPU present, or if the application has already
933
+ * restricted the allowed set to exclude any CPUs with FPUs,
934
+ * we'll skip the procedure.
935
+ */
936
+ if (cpumask_intersects(&current->cpus_mask, &mt_fpu_cpumask)) {
937
+ cpumask_t tmask;
938
+
939
+ current->thread.user_cpus_allowed
940
+ = current->cpus_mask;
941
+ cpumask_and(&tmask, &current->cpus_mask,
942
+ &mt_fpu_cpumask);
943
+ set_cpus_allowed_ptr(current, &tmask);
944
+ set_thread_flag(TIF_FPUBOUND);
945
+ }
946
+ }
947
+#endif /* CONFIG_MIPS_MT_FPAFF */
948
+}
949
+
950
+#else /* !CONFIG_MIPS_FP_SUPPORT */
951
+
952
+static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
953
+ unsigned long old_epc, unsigned long old_ra)
954
+{
955
+ return -1;
956
+}
957
+
958
+#endif /* !CONFIG_MIPS_FP_SUPPORT */
878959
879960 void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
880961 const char *str)
....@@ -904,11 +985,11 @@
904985 die_if_kernel(b, regs);
905986 force_sig_fault(SIGFPE,
906987 code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF,
907
- (void __user *) regs->cp0_epc, current);
988
+ (void __user *) regs->cp0_epc);
908989 break;
909990 case BRK_BUG:
910991 die_if_kernel("Kernel bug detected", regs);
911
- force_sig(SIGTRAP, current);
992
+ force_sig(SIGTRAP);
912993 break;
913994 case BRK_MEMU:
914995 /*
....@@ -923,15 +1004,15 @@
9231004 return;
9241005
9251006 die_if_kernel("Math emu break/trap", regs);
926
- force_sig(SIGTRAP, current);
1007
+ force_sig(SIGTRAP);
9271008 break;
9281009 default:
9291010 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
9301011 die_if_kernel(b, regs);
9311012 if (si_code) {
932
- force_sig_fault(SIGTRAP, si_code, NULL, current);
1013
+ force_sig_fault(SIGTRAP, si_code, NULL);
9331014 } else {
934
- force_sig(SIGTRAP, current);
1015
+ force_sig(SIGTRAP);
9351016 }
9361017 }
9371018 }
....@@ -1024,7 +1105,7 @@
10241105 return;
10251106
10261107 out_sigsegv:
1027
- force_sig(SIGSEGV, current);
1108
+ force_sig(SIGSEGV);
10281109 goto out;
10291110 }
10301111
....@@ -1038,7 +1119,7 @@
10381119
10391120 seg = get_fs();
10401121 if (!user_mode(regs))
1041
- set_fs(get_ds());
1122
+ set_fs(KERNEL_DS);
10421123
10431124 prev_state = exception_enter();
10441125 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
....@@ -1066,7 +1147,7 @@
10661147 return;
10671148
10681149 out_sigsegv:
1069
- force_sig(SIGSEGV, current);
1150
+ force_sig(SIGSEGV);
10701151 goto out;
10711152 }
10721153
....@@ -1132,6 +1213,11 @@
11321213
11331214 if (status < 0)
11341215 status = simulate_fp(regs, opcode, old_epc, old31);
1216
+
1217
+#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
1218
+ if (status < 0)
1219
+ status = simulate_loongson3_cpucfg(regs, opcode);
1220
+#endif
11351221 } else if (cpu_has_mmips) {
11361222 unsigned short mmop[2] = { 0 };
11371223
....@@ -1152,40 +1238,11 @@
11521238 if (unlikely(status > 0)) {
11531239 regs->cp0_epc = old_epc; /* Undo skip-over. */
11541240 regs->regs[31] = old31;
1155
- force_sig(status, current);
1241
+ force_sig(status);
11561242 }
11571243
11581244 out:
11591245 exception_exit(prev_state);
1160
-}
1161
-
1162
-/*
1163
- * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
1164
- * emulated more than some threshold number of instructions, force migration to
1165
- * a "CPU" that has FP support.
1166
- */
1167
-static void mt_ase_fp_affinity(void)
1168
-{
1169
-#ifdef CONFIG_MIPS_MT_FPAFF
1170
- if (mt_fpemul_threshold > 0 &&
1171
- ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1172
- /*
1173
- * If there's no FPU present, or if the application has already
1174
- * restricted the allowed set to exclude any CPUs with FPUs,
1175
- * we'll skip the procedure.
1176
- */
1177
- if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) {
1178
- cpumask_t tmask;
1179
-
1180
- current->thread.user_cpus_allowed
1181
- = current->cpus_allowed;
1182
- cpumask_and(&tmask, &current->cpus_allowed,
1183
- &mt_fpu_cpumask);
1184
- set_cpus_allowed_ptr(current, &tmask);
1185
- set_thread_flag(TIF_FPUBOUND);
1186
- }
1187
- }
1188
-#endif /* CONFIG_MIPS_MT_FPAFF */
11891246 }
11901247
11911248 /*
....@@ -1210,28 +1267,42 @@
12101267
12111268 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
12121269 "instruction", regs);
1213
- force_sig(SIGILL, current);
1270
+ force_sig(SIGILL);
12141271
12151272 return NOTIFY_OK;
12161273 }
12171274
1275
+#ifdef CONFIG_MIPS_FP_SUPPORT
1276
+
12181277 static int enable_restore_fp_context(int msa)
12191278 {
12201279 int err, was_fpu_owner, prior_msa;
1280
+ bool first_fp;
12211281
1222
- if (!used_math()) {
1223
- /* First time FP context user. */
1282
+ /* Initialize context if it hasn't been used already */
1283
+ first_fp = init_fp_ctx(current);
1284
+
1285
+ if (first_fp) {
12241286 preempt_disable();
1225
- err = init_fpu();
1287
+ err = own_fpu_inatomic(1);
12261288 if (msa && !err) {
12271289 enable_msa();
1290
+ /*
1291
+ * with MSA enabled, userspace can see MSACSR
1292
+ * and MSA regs, but the values in them are from
1293
+ * other task before current task, restore them
1294
+ * from saved fp/msa context
1295
+ */
1296
+ write_msa_csr(current->thread.fpu.msacsr);
1297
+ /*
1298
+ * own_fpu_inatomic(1) just restore low 64bit,
1299
+ * fix the high 64bit
1300
+ */
12281301 init_msa_upper();
12291302 set_thread_flag(TIF_USEDMSA);
12301303 set_thread_flag(TIF_MSA_CTX_LIVE);
12311304 }
12321305 preempt_enable();
1233
- if (!err)
1234
- set_used_math();
12351306 return err;
12361307 }
12371308
....@@ -1322,17 +1393,23 @@
13221393 return 0;
13231394 }
13241395
1396
+#else /* !CONFIG_MIPS_FP_SUPPORT */
1397
+
1398
+static int enable_restore_fp_context(int msa)
1399
+{
1400
+ return SIGILL;
1401
+}
1402
+
1403
+#endif /* CONFIG_MIPS_FP_SUPPORT */
1404
+
13251405 asmlinkage void do_cpu(struct pt_regs *regs)
13261406 {
13271407 enum ctx_state prev_state;
13281408 unsigned int __user *epc;
13291409 unsigned long old_epc, old31;
1330
- void __user *fault_addr;
13311410 unsigned int opcode;
1332
- unsigned long fcr31;
13331411 unsigned int cpid;
1334
- int status, err;
1335
- int sig;
1412
+ int status;
13361413
13371414 prev_state = exception_enter();
13381415 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
....@@ -1365,11 +1442,12 @@
13651442 if (unlikely(status > 0)) {
13661443 regs->cp0_epc = old_epc; /* Undo skip-over. */
13671444 regs->regs[31] = old31;
1368
- force_sig(status, current);
1445
+ force_sig(status);
13691446 }
13701447
13711448 break;
13721449
1450
+#ifdef CONFIG_MIPS_FP_SUPPORT
13731451 case 3:
13741452 /*
13751453 * The COP3 opcode space and consequently the CP0.Status.CU3
....@@ -1384,12 +1462,15 @@
13841462 * emulator too.
13851463 */
13861464 if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1387
- force_sig(SIGILL, current);
1465
+ force_sig(SIGILL);
13881466 break;
13891467 }
1390
- /* Fall through. */
1468
+ fallthrough;
1469
+ case 1: {
1470
+ void __user *fault_addr;
1471
+ unsigned long fcr31;
1472
+ int err, sig;
13911473
1392
- case 1:
13931474 err = enable_restore_fp_context(0);
13941475
13951476 if (raw_cpu_has_fpu && !err)
....@@ -1410,6 +1491,13 @@
14101491 mt_ase_fp_affinity();
14111492
14121493 break;
1494
+ }
1495
+#else /* CONFIG_MIPS_FP_SUPPORT */
1496
+ case 1:
1497
+ case 3:
1498
+ force_sig(SIGILL);
1499
+ break;
1500
+#endif /* CONFIG_MIPS_FP_SUPPORT */
14131501
14141502 case 2:
14151503 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
....@@ -1434,7 +1522,7 @@
14341522 local_irq_enable();
14351523
14361524 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1437
- force_sig(SIGFPE, current);
1525
+ force_sig(SIGFPE);
14381526 out:
14391527 exception_exit(prev_state);
14401528 }
....@@ -1447,7 +1535,7 @@
14471535 prev_state = exception_enter();
14481536
14491537 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1450
- force_sig(SIGILL, current);
1538
+ force_sig(SIGILL);
14511539 goto out;
14521540 }
14531541
....@@ -1455,7 +1543,7 @@
14551543
14561544 err = enable_restore_fp_context(1);
14571545 if (err)
1458
- force_sig(SIGILL, current);
1546
+ force_sig(SIGILL);
14591547 out:
14601548 exception_exit(prev_state);
14611549 }
....@@ -1465,7 +1553,7 @@
14651553 enum ctx_state prev_state;
14661554
14671555 prev_state = exception_enter();
1468
- force_sig(SIGILL, current);
1556
+ force_sig(SIGILL);
14691557 exception_exit(prev_state);
14701558 }
14711559
....@@ -1491,7 +1579,7 @@
14911579 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
14921580 mips_read_watch_registers();
14931581 local_irq_enable();
1494
- force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL, current);
1582
+ force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL);
14951583 } else {
14961584 mips_clear_watch_registers();
14971585 local_irq_enable();
....@@ -1562,7 +1650,7 @@
15621650 }
15631651 die_if_kernel("MIPS MT Thread exception in kernel", regs);
15641652
1565
- force_sig(SIGILL, current);
1653
+ force_sig(SIGILL);
15661654 }
15671655
15681656
....@@ -1571,7 +1659,7 @@
15711659 if (cpu_has_dsp)
15721660 panic("Unexpected DSP exception");
15731661
1574
- force_sig(SIGILL, current);
1662
+ force_sig(SIGILL);
15751663 }
15761664
15771665 asmlinkage void do_reserved(struct pt_regs *regs)
....@@ -1605,7 +1693,7 @@
16051693 * Some MIPS CPUs can enable/disable for cache parity detection, but do
16061694 * it different ways.
16071695 */
1608
-static inline void parity_protection_init(void)
1696
+static inline __init void parity_protection_init(void)
16091697 {
16101698 #define ERRCTL_PE 0x80000000
16111699 #define ERRCTL_L2P 0x00800000
....@@ -1731,7 +1819,7 @@
17311819
17321820 case CPU_5KC:
17331821 case CPU_5KE:
1734
- case CPU_LOONGSON1:
1822
+ case CPU_LOONGSON32:
17351823 write_c0_ecc(0x80000000);
17361824 back_to_back_c0_hazard();
17371825 /* Set the PE bit (bit 31) in the c0_errctl register. */
....@@ -1825,6 +1913,37 @@
18251913 }
18261914 /* Just print the cacheerr bits for now */
18271915 cache_parity_error();
1916
+}
1917
+
1918
+asmlinkage void do_gsexc(struct pt_regs *regs, u32 diag1)
1919
+{
1920
+ u32 exccode = (diag1 & LOONGSON_DIAG1_EXCCODE) >>
1921
+ LOONGSON_DIAG1_EXCCODE_SHIFT;
1922
+ enum ctx_state prev_state;
1923
+
1924
+ prev_state = exception_enter();
1925
+
1926
+ switch (exccode) {
1927
+ case 0x08:
1928
+ /* Undocumented exception, will trigger on certain
1929
+ * also-undocumented instructions accessible from userspace.
1930
+ * Processor state is not otherwise corrupted, but currently
1931
+ * we don't know how to proceed. Maybe there is some
1932
+ * undocumented control flag to enable the instructions?
1933
+ */
1934
+ force_sig(SIGILL);
1935
+ break;
1936
+
1937
+ default:
1938
+ /* None of the other exceptions, documented or not, have
1939
+ * further details given; none are encountered in the wild
1940
+ * either. Panic in case some of them turn out to be fatal.
1941
+ */
1942
+ show_regs(regs);
1943
+ panic("Unhandled Loongson exception - GSCause = %08x", diag1);
1944
+ }
1945
+
1946
+ exception_exit(prev_state);
18281947 }
18291948
18301949 /*
....@@ -1978,19 +2097,19 @@
19782097 * If no shadow set is selected then use the default handler
19792098 * that does normal register saving and standard interrupt exit
19802099 */
1981
- extern char except_vec_vi, except_vec_vi_lui;
1982
- extern char except_vec_vi_ori, except_vec_vi_end;
1983
- extern char rollback_except_vec_vi;
1984
- char *vec_start = using_rollback_handler() ?
1985
- &rollback_except_vec_vi : &except_vec_vi;
2100
+ extern const u8 except_vec_vi[], except_vec_vi_lui[];
2101
+ extern const u8 except_vec_vi_ori[], except_vec_vi_end[];
2102
+ extern const u8 rollback_except_vec_vi[];
2103
+ const u8 *vec_start = using_rollback_handler() ?
2104
+ rollback_except_vec_vi : except_vec_vi;
19862105 #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1987
- const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1988
- const int ori_offset = &except_vec_vi_ori - vec_start + 2;
2106
+ const int lui_offset = except_vec_vi_lui - vec_start + 2;
2107
+ const int ori_offset = except_vec_vi_ori - vec_start + 2;
19892108 #else
1990
- const int lui_offset = &except_vec_vi_lui - vec_start;
1991
- const int ori_offset = &except_vec_vi_ori - vec_start;
2109
+ const int lui_offset = except_vec_vi_lui - vec_start;
2110
+ const int ori_offset = except_vec_vi_ori - vec_start;
19922111 #endif
1993
- const int handler_len = &except_vec_vi_end - vec_start;
2112
+ const int handler_len = except_vec_vi_end - vec_start;
19942113
19952114 if (handler_len > VECTORSPACING) {
19962115 /*
....@@ -2085,7 +2204,7 @@
20852204 * flag that some firmware may have left set and the TS bit (for
20862205 * IP27). Set XX for ISA IV code to work.
20872206 */
2088
- unsigned int status_set = ST0_CU0;
2207
+ unsigned int status_set = ST0_KERNEL_CUMASK;
20892208 #ifdef CONFIG_64BIT
20902209 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
20912210 #endif
....@@ -2122,7 +2241,7 @@
21222241
21232242 static void configure_exception_vector(void)
21242243 {
2125
- if (cpu_has_veic || cpu_has_vint) {
2244
+ if (cpu_has_mips_r2_r6) {
21262245 unsigned long sr = set_c0_status(ST0_BEV);
21272246 /* If available, use WG to set top bits of EBASE */
21282247 if (cpu_has_ebase_wg) {
....@@ -2134,6 +2253,8 @@
21342253 }
21352254 write_c0_ebase(ebase);
21362255 write_c0_status(sr);
2256
+ }
2257
+ if (cpu_has_veic || cpu_has_vint) {
21372258 /* Setting vector spacing enables EI/VI mode */
21382259 change_c0_intctl(0x3e0, VECTORSPACING);
21392260 }
....@@ -2164,22 +2285,6 @@
21642285 * o read IntCtl.IPFDC to determine the fast debug channel interrupt
21652286 */
21662287 if (cpu_has_mips_r2_r6) {
2167
- /*
2168
- * We shouldn't trust a secondary core has a sane EBASE register
2169
- * so use the one calculated by the boot CPU.
2170
- */
2171
- if (!is_boot_cpu) {
2172
- /* If available, use WG to set top bits of EBASE */
2173
- if (cpu_has_ebase_wg) {
2174
-#ifdef CONFIG_64BIT
2175
- write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2176
-#else
2177
- write_c0_ebase(ebase | MIPS_EBASE_WG);
2178
-#endif
2179
- }
2180
- write_c0_ebase(ebase);
2181
- }
2182
-
21832288 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
21842289 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
21852290 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
....@@ -2194,7 +2299,9 @@
21942299 cp0_fdc_irq = -1;
21952300 }
21962301
2197
- if (!cpu_data[cpu].asid_cache)
2302
+ if (cpu_has_mmid)
2303
+ cpu_data[cpu].asid_cache = 0;
2304
+ else if (!cpu_data[cpu].asid_cache)
21982305 cpu_data[cpu].asid_cache = asid_first_version(cpu);
21992306
22002307 mmgrab(&init_mm);
....@@ -2210,7 +2317,7 @@
22102317 }
22112318
22122319 /* Install CPU exception handler */
2213
-void set_handler(unsigned long offset, void *addr, unsigned long size)
2320
+void set_handler(unsigned long offset, const void *addr, unsigned long size)
22142321 {
22152322 #ifdef CONFIG_CPU_MICROMIPS
22162323 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
....@@ -2253,16 +2360,27 @@
22532360 extern char except_vec3_generic;
22542361 extern char except_vec4;
22552362 extern char except_vec3_r4000;
2256
- unsigned long i;
2363
+ unsigned long i, vec_size;
2364
+ phys_addr_t ebase_pa;
22572365
22582366 check_wait();
22592367
2260
- if (cpu_has_veic || cpu_has_vint) {
2261
- unsigned long size = 0x200 + VECTORSPACING*64;
2262
- phys_addr_t ebase_pa;
2368
+ if (!cpu_has_mips_r2_r6) {
2369
+ ebase = CAC_BASE;
2370
+ ebase_pa = virt_to_phys((void *)ebase);
2371
+ vec_size = 0x400;
22632372
2264
- ebase = (unsigned long)
2265
- __alloc_bootmem(size, 1 << fls(size), 0);
2373
+ memblock_reserve(ebase_pa, vec_size);
2374
+ } else {
2375
+ if (cpu_has_veic || cpu_has_vint)
2376
+ vec_size = 0x200 + VECTORSPACING*64;
2377
+ else
2378
+ vec_size = PAGE_SIZE;
2379
+
2380
+ ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
2381
+ if (!ebase_pa)
2382
+ panic("%s: Failed to allocate %lu bytes align=0x%x\n",
2383
+ __func__, vec_size, 1 << fls(vec_size));
22662384
22672385 /*
22682386 * Try to ensure ebase resides in KSeg0 if possible.
....@@ -2275,23 +2393,10 @@
22752393 * EVA is special though as it allows segments to be rearranged
22762394 * and to become uncached during cache error handling.
22772395 */
2278
- ebase_pa = __pa(ebase);
22792396 if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
22802397 ebase = CKSEG0ADDR(ebase_pa);
2281
- } else {
2282
- ebase = CAC_BASE;
2283
-
2284
- if (cpu_has_mips_r2_r6) {
2285
- if (cpu_has_ebase_wg) {
2286
-#ifdef CONFIG_64BIT
2287
- ebase = (read_c0_ebase_64() & ~0xfff);
2288
-#else
2289
- ebase = (read_c0_ebase() & ~0xfff);
2290
-#endif
2291
- } else {
2292
- ebase += (read_c0_ebase() & 0x3ffff000);
2293
- }
2294
- }
2398
+ else
2399
+ ebase = (unsigned long)phys_to_virt(ebase_pa);
22952400 }
22962401
22972402 if (cpu_has_mmips) {
....@@ -2306,6 +2411,7 @@
23062411 if (board_ebase_setup)
23072412 board_ebase_setup();
23082413 per_cpu_trap_init(true);
2414
+ memblock_set_bottom_up(false);
23092415
23102416 /*
23112417 * Copy the generic exception handlers to their final destination.
....@@ -2378,7 +2484,7 @@
23782484 else {
23792485 if (cpu_has_vtag_icache)
23802486 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2381
- else if (current_cpu_type() == CPU_LOONGSON3)
2487
+ else if (current_cpu_type() == CPU_LOONGSON64)
23822488 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
23832489 else
23842490 set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
....@@ -2395,7 +2501,11 @@
23952501 if (cpu_has_fpu && !cpu_has_nofpuex)
23962502 set_except_vector(EXCCODE_FPE, handle_fpe);
23972503
2398
- set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2504
+ if (cpu_has_ftlbparex)
2505
+ set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2506
+
2507
+ if (cpu_has_gsexcex)
2508
+ set_except_vector(LOONGSON_EXCCODE_GSEXC, handle_gsexc);
23992509
24002510 if (cpu_has_rixiex) {
24012511 set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
....@@ -2424,7 +2534,7 @@
24242534 else
24252535 set_handler(0x080, &except_vec3_generic, 0x80);
24262536
2427
- local_flush_icache_range(ebase, ebase + 0x400);
2537
+ local_flush_icache_range(ebase, ebase + vec_size);
24282538
24292539 sort_extable(__start___dbe_table, __stop___dbe_table);
24302540