.. | .. |
---|
28 | 28 | #include <linux/smp.h> |
---|
29 | 29 | #include <linux/spinlock.h> |
---|
30 | 30 | #include <linux/kallsyms.h> |
---|
31 | | -#include <linux/bootmem.h> |
---|
| 31 | +#include <linux/memblock.h> |
---|
32 | 32 | #include <linux/interrupt.h> |
---|
33 | 33 | #include <linux/ptrace.h> |
---|
34 | 34 | #include <linux/kgdb.h> |
---|
.. | .. |
---|
50 | 50 | #include <asm/fpu.h> |
---|
51 | 51 | #include <asm/fpu_emulator.h> |
---|
52 | 52 | #include <asm/idle.h> |
---|
| 53 | +#include <asm/isa-rev.h> |
---|
53 | 54 | #include <asm/mips-cps.h> |
---|
54 | 55 | #include <asm/mips-r2-to-r6-emul.h> |
---|
55 | 56 | #include <asm/mipsregs.h> |
---|
56 | 57 | #include <asm/mipsmtregs.h> |
---|
57 | 58 | #include <asm/module.h> |
---|
58 | 59 | #include <asm/msa.h> |
---|
59 | | -#include <asm/pgtable.h> |
---|
60 | 60 | #include <asm/ptrace.h> |
---|
61 | 61 | #include <asm/sections.h> |
---|
62 | 62 | #include <asm/siginfo.h> |
---|
.. | .. |
---|
69 | 69 | #include <asm/stacktrace.h> |
---|
70 | 70 | #include <asm/tlbex.h> |
---|
71 | 71 | #include <asm/uasm.h> |
---|
| 72 | + |
---|
| 73 | +#include <asm/mach-loongson64/cpucfg-emul.h> |
---|
72 | 74 | |
---|
73 | 75 | extern void check_wait(void); |
---|
74 | 76 | extern asmlinkage void rollback_handle_int(void); |
---|
.. | .. |
---|
88 | 90 | extern asmlinkage void handle_msa_fpe(void); |
---|
89 | 91 | extern asmlinkage void handle_fpe(void); |
---|
90 | 92 | extern asmlinkage void handle_ftlb(void); |
---|
| 93 | +extern asmlinkage void handle_gsexc(void); |
---|
91 | 94 | extern asmlinkage void handle_msa(void); |
---|
92 | 95 | extern asmlinkage void handle_mdmx(void); |
---|
93 | 96 | extern asmlinkage void handle_watch(void); |
---|
.. | .. |
---|
105 | 108 | void (*board_ebase_setup)(void); |
---|
106 | 109 | void(*board_cache_error_setup)(void); |
---|
107 | 110 | |
---|
108 | | -static void show_raw_backtrace(unsigned long reg29) |
---|
| 111 | +static void show_raw_backtrace(unsigned long reg29, const char *loglvl) |
---|
109 | 112 | { |
---|
110 | 113 | unsigned long *sp = (unsigned long *)(reg29 & ~3); |
---|
111 | 114 | unsigned long addr; |
---|
112 | 115 | |
---|
113 | | - printk("Call Trace:"); |
---|
| 116 | + printk("%sCall Trace:", loglvl); |
---|
114 | 117 | #ifdef CONFIG_KALLSYMS |
---|
115 | | - printk("\n"); |
---|
| 118 | + printk("%s\n", loglvl); |
---|
116 | 119 | #endif |
---|
117 | 120 | while (!kstack_end(sp)) { |
---|
118 | 121 | unsigned long __user *p = |
---|
119 | 122 | (unsigned long __user *)(unsigned long)sp++; |
---|
120 | 123 | if (__get_user(addr, p)) { |
---|
121 | | - printk(" (Bad stack address)"); |
---|
| 124 | + printk("%s (Bad stack address)", loglvl); |
---|
122 | 125 | break; |
---|
123 | 126 | } |
---|
124 | 127 | if (__kernel_text_address(addr)) |
---|
125 | | - print_ip_sym(addr); |
---|
| 128 | + print_ip_sym(loglvl, addr); |
---|
126 | 129 | } |
---|
127 | | - printk("\n"); |
---|
| 130 | + printk("%s\n", loglvl); |
---|
128 | 131 | } |
---|
129 | 132 | |
---|
130 | 133 | #ifdef CONFIG_KALLSYMS |
---|
.. | .. |
---|
137 | 140 | __setup("raw_show_trace", set_raw_show_trace); |
---|
138 | 141 | #endif |
---|
139 | 142 | |
---|
140 | | -static void show_backtrace(struct task_struct *task, const struct pt_regs *regs) |
---|
| 143 | +static void show_backtrace(struct task_struct *task, const struct pt_regs *regs, |
---|
| 144 | + const char *loglvl) |
---|
141 | 145 | { |
---|
142 | 146 | unsigned long sp = regs->regs[29]; |
---|
143 | 147 | unsigned long ra = regs->regs[31]; |
---|
.. | .. |
---|
147 | 151 | task = current; |
---|
148 | 152 | |
---|
149 | 153 | if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) { |
---|
150 | | - show_raw_backtrace(sp); |
---|
| 154 | + show_raw_backtrace(sp, loglvl); |
---|
151 | 155 | return; |
---|
152 | 156 | } |
---|
153 | | - printk("Call Trace:\n"); |
---|
| 157 | + printk("%sCall Trace:\n", loglvl); |
---|
154 | 158 | do { |
---|
155 | | - print_ip_sym(pc); |
---|
| 159 | + print_ip_sym(loglvl, pc); |
---|
156 | 160 | pc = unwind_stack(task, &sp, pc, &ra); |
---|
157 | 161 | } while (pc); |
---|
158 | 162 | pr_cont("\n"); |
---|
.. | .. |
---|
163 | 167 | * with at least a bit of error checking ... |
---|
164 | 168 | */ |
---|
165 | 169 | static void show_stacktrace(struct task_struct *task, |
---|
166 | | - const struct pt_regs *regs) |
---|
| 170 | + const struct pt_regs *regs, const char *loglvl) |
---|
167 | 171 | { |
---|
168 | 172 | const int field = 2 * sizeof(unsigned long); |
---|
169 | 173 | long stackdata; |
---|
170 | 174 | int i; |
---|
171 | 175 | unsigned long __user *sp = (unsigned long __user *)regs->regs[29]; |
---|
172 | 176 | |
---|
173 | | - printk("Stack :"); |
---|
| 177 | + printk("%sStack :", loglvl); |
---|
174 | 178 | i = 0; |
---|
175 | 179 | while ((unsigned long) sp & (PAGE_SIZE - 1)) { |
---|
176 | 180 | if (i && ((i % (64 / field)) == 0)) { |
---|
177 | 181 | pr_cont("\n"); |
---|
178 | | - printk(" "); |
---|
| 182 | + printk("%s ", loglvl); |
---|
179 | 183 | } |
---|
180 | 184 | if (i > 39) { |
---|
181 | 185 | pr_cont(" ..."); |
---|
.. | .. |
---|
191 | 195 | i++; |
---|
192 | 196 | } |
---|
193 | 197 | pr_cont("\n"); |
---|
194 | | - show_backtrace(task, regs); |
---|
| 198 | + show_backtrace(task, regs, loglvl); |
---|
195 | 199 | } |
---|
196 | 200 | |
---|
197 | | -void show_stack(struct task_struct *task, unsigned long *sp) |
---|
| 201 | +void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) |
---|
198 | 202 | { |
---|
199 | 203 | struct pt_regs regs; |
---|
200 | 204 | mm_segment_t old_fs = get_fs(); |
---|
.. | .. |
---|
209 | 213 | regs.regs[29] = task->thread.reg29; |
---|
210 | 214 | regs.regs[31] = 0; |
---|
211 | 215 | regs.cp0_epc = task->thread.reg31; |
---|
212 | | -#ifdef CONFIG_KGDB_KDB |
---|
213 | | - } else if (atomic_read(&kgdb_active) != -1 && |
---|
214 | | - kdb_current_regs) { |
---|
215 | | - memcpy(®s, kdb_current_regs, sizeof(regs)); |
---|
216 | | -#endif /* CONFIG_KGDB_KDB */ |
---|
217 | 216 | } else { |
---|
218 | 217 | prepare_frametrace(®s); |
---|
219 | 218 | } |
---|
.. | .. |
---|
223 | 222 | * the stack in the kernel (not user) address space. |
---|
224 | 223 | */ |
---|
225 | 224 | set_fs(KERNEL_DS); |
---|
226 | | - show_stacktrace(task, ®s); |
---|
| 225 | + show_stacktrace(task, ®s, loglvl); |
---|
227 | 226 | set_fs(old_fs); |
---|
228 | 227 | } |
---|
229 | 228 | |
---|
.. | .. |
---|
277 | 276 | #ifdef CONFIG_CPU_HAS_SMARTMIPS |
---|
278 | 277 | printk("Acx : %0*lx\n", field, regs->acx); |
---|
279 | 278 | #endif |
---|
280 | | - printk("Hi : %0*lx\n", field, regs->hi); |
---|
281 | | - printk("Lo : %0*lx\n", field, regs->lo); |
---|
| 279 | + if (MIPS_ISA_REV < 6) { |
---|
| 280 | + printk("Hi : %0*lx\n", field, regs->hi); |
---|
| 281 | + printk("Lo : %0*lx\n", field, regs->lo); |
---|
| 282 | + } |
---|
282 | 283 | |
---|
283 | 284 | /* |
---|
284 | 285 | * Saved cp0 registers |
---|
.. | .. |
---|
348 | 349 | */ |
---|
349 | 350 | void show_regs(struct pt_regs *regs) |
---|
350 | 351 | { |
---|
351 | | - __show_regs((struct pt_regs *)regs); |
---|
| 352 | + __show_regs(regs); |
---|
352 | 353 | dump_stack(); |
---|
353 | 354 | } |
---|
354 | 355 | |
---|
.. | .. |
---|
373 | 374 | if (!user_mode(regs)) |
---|
374 | 375 | /* Necessary for getting the correct stack content */ |
---|
375 | 376 | set_fs(KERNEL_DS); |
---|
376 | | - show_stacktrace(current, regs); |
---|
| 377 | + show_stacktrace(current, regs, KERN_DEFAULT); |
---|
377 | 378 | show_code((unsigned int __user *) regs->cp0_epc); |
---|
378 | 379 | printk("\n"); |
---|
379 | 380 | set_fs(old_fs); |
---|
.. | .. |
---|
412 | 413 | if (regs && kexec_should_crash(current)) |
---|
413 | 414 | crash_kexec(regs); |
---|
414 | 415 | |
---|
415 | | - do_exit(sig); |
---|
| 416 | + make_task_dead(sig); |
---|
416 | 417 | } |
---|
417 | 418 | |
---|
418 | 419 | extern struct exception_table_entry __start___dbe_table[]; |
---|
.. | .. |
---|
479 | 480 | goto out; |
---|
480 | 481 | |
---|
481 | 482 | die_if_kernel("Oops", regs); |
---|
482 | | - force_sig(SIGBUS, current); |
---|
| 483 | + force_sig(SIGBUS); |
---|
483 | 484 | |
---|
484 | 485 | out: |
---|
485 | 486 | exception_exit(prev_state); |
---|
.. | .. |
---|
695 | 696 | return -1; /* Must be something else ... */ |
---|
696 | 697 | } |
---|
697 | 698 | |
---|
| 699 | +/* |
---|
| 700 | + * Loongson-3 CSR instructions emulation |
---|
| 701 | + */ |
---|
| 702 | + |
---|
| 703 | +#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION |
---|
| 704 | + |
---|
| 705 | +#define LWC2 0xc8000000 |
---|
| 706 | +#define RS BASE |
---|
| 707 | +#define CSR_OPCODE2 0x00000118 |
---|
| 708 | +#define CSR_OPCODE2_MASK 0x000007ff |
---|
| 709 | +#define CSR_FUNC_MASK RT |
---|
| 710 | +#define CSR_FUNC_CPUCFG 0x8 |
---|
| 711 | + |
---|
| 712 | +static int simulate_loongson3_cpucfg(struct pt_regs *regs, |
---|
| 713 | + unsigned int opcode) |
---|
| 714 | +{ |
---|
| 715 | + int op = opcode & OPCODE; |
---|
| 716 | + int op2 = opcode & CSR_OPCODE2_MASK; |
---|
| 717 | + int csr_func = (opcode & CSR_FUNC_MASK) >> 16; |
---|
| 718 | + |
---|
| 719 | + if (op == LWC2 && op2 == CSR_OPCODE2 && csr_func == CSR_FUNC_CPUCFG) { |
---|
| 720 | + int rd = (opcode & RD) >> 11; |
---|
| 721 | + int rs = (opcode & RS) >> 21; |
---|
| 722 | + __u64 sel = regs->regs[rs]; |
---|
| 723 | + |
---|
| 724 | + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); |
---|
| 725 | + |
---|
| 726 | + /* Do not emulate on unsupported core models. */ |
---|
| 727 | + preempt_disable(); |
---|
| 728 | + if (!loongson3_cpucfg_emulation_enabled(¤t_cpu_data)) { |
---|
| 729 | + preempt_enable(); |
---|
| 730 | + return -1; |
---|
| 731 | + } |
---|
| 732 | + regs->regs[rd] = loongson3_cpucfg_read_synthesized( |
---|
| 733 | + ¤t_cpu_data, sel); |
---|
| 734 | + preempt_enable(); |
---|
| 735 | + return 0; |
---|
| 736 | + } |
---|
| 737 | + |
---|
| 738 | + /* Not ours. */ |
---|
| 739 | + return -1; |
---|
| 740 | +} |
---|
| 741 | +#endif /* CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION */ |
---|
| 742 | + |
---|
698 | 743 | asmlinkage void do_ov(struct pt_regs *regs) |
---|
699 | 744 | { |
---|
700 | 745 | enum ctx_state prev_state; |
---|
.. | .. |
---|
702 | 747 | prev_state = exception_enter(); |
---|
703 | 748 | die_if_kernel("Integer overflow", regs); |
---|
704 | 749 | |
---|
705 | | - force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc, current); |
---|
| 750 | + force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc); |
---|
706 | 751 | exception_exit(prev_state); |
---|
707 | 752 | } |
---|
| 753 | + |
---|
| 754 | +#ifdef CONFIG_MIPS_FP_SUPPORT |
---|
708 | 755 | |
---|
709 | 756 | /* |
---|
710 | 757 | * Send SIGFPE according to FCSR Cause bits, which must have already |
---|
.. | .. |
---|
728 | 775 | else if (fcr31 & FPU_CSR_INE_X) |
---|
729 | 776 | si_code = FPE_FLTRES; |
---|
730 | 777 | |
---|
731 | | - force_sig_fault(SIGFPE, si_code, fault_addr, tsk); |
---|
| 778 | + force_sig_fault_to_task(SIGFPE, si_code, fault_addr, tsk); |
---|
732 | 779 | } |
---|
733 | 780 | |
---|
734 | 781 | int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) |
---|
.. | .. |
---|
745 | 792 | return 1; |
---|
746 | 793 | |
---|
747 | 794 | case SIGBUS: |
---|
748 | | - force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr, current); |
---|
| 795 | + force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr); |
---|
749 | 796 | return 1; |
---|
750 | 797 | |
---|
751 | 798 | case SIGSEGV: |
---|
752 | | - down_read(¤t->mm->mmap_sem); |
---|
| 799 | + mmap_read_lock(current->mm); |
---|
753 | 800 | vma = find_vma(current->mm, (unsigned long)fault_addr); |
---|
754 | 801 | if (vma && (vma->vm_start <= (unsigned long)fault_addr)) |
---|
755 | 802 | si_code = SEGV_ACCERR; |
---|
756 | 803 | else |
---|
757 | 804 | si_code = SEGV_MAPERR; |
---|
758 | | - up_read(¤t->mm->mmap_sem); |
---|
759 | | - force_sig_fault(SIGSEGV, si_code, fault_addr, current); |
---|
| 805 | + mmap_read_unlock(current->mm); |
---|
| 806 | + force_sig_fault(SIGSEGV, si_code, fault_addr); |
---|
760 | 807 | return 1; |
---|
761 | 808 | |
---|
762 | 809 | default: |
---|
763 | | - force_sig(sig, current); |
---|
| 810 | + force_sig(sig); |
---|
764 | 811 | return 1; |
---|
765 | 812 | } |
---|
766 | 813 | } |
---|
.. | .. |
---|
793 | 840 | */ |
---|
794 | 841 | regs->cp0_epc = old_epc; |
---|
795 | 842 | regs->regs[31] = old_ra; |
---|
796 | | - |
---|
797 | | - /* Save the FP context to struct thread_struct */ |
---|
798 | | - lose_fpu(1); |
---|
799 | 843 | |
---|
800 | 844 | /* Run the emulator */ |
---|
801 | 845 | sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, |
---|
.. | .. |
---|
848 | 892 | * register operands before invoking the emulator, which seems |
---|
849 | 893 | * a bit extreme for what should be an infrequent event. |
---|
850 | 894 | */ |
---|
851 | | - /* Ensure 'resume' not overwrite saved fp context again. */ |
---|
852 | | - lose_fpu(1); |
---|
853 | 895 | |
---|
854 | 896 | /* Run the emulator */ |
---|
855 | 897 | sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, |
---|
.. | .. |
---|
875 | 917 | out: |
---|
876 | 918 | exception_exit(prev_state); |
---|
877 | 919 | } |
---|
| 920 | + |
---|
| 921 | +/* |
---|
| 922 | + * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've |
---|
| 923 | + * emulated more than some threshold number of instructions, force migration to |
---|
| 924 | + * a "CPU" that has FP support. |
---|
| 925 | + */ |
---|
| 926 | +static void mt_ase_fp_affinity(void) |
---|
| 927 | +{ |
---|
| 928 | +#ifdef CONFIG_MIPS_MT_FPAFF |
---|
| 929 | + if (mt_fpemul_threshold > 0 && |
---|
| 930 | + ((current->thread.emulated_fp++ > mt_fpemul_threshold))) { |
---|
| 931 | + /* |
---|
| 932 | + * If there's no FPU present, or if the application has already |
---|
| 933 | + * restricted the allowed set to exclude any CPUs with FPUs, |
---|
| 934 | + * we'll skip the procedure. |
---|
| 935 | + */ |
---|
| 936 | + if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) { |
---|
| 937 | + cpumask_t tmask; |
---|
| 938 | + |
---|
| 939 | + current->thread.user_cpus_allowed |
---|
| 940 | + = current->cpus_mask; |
---|
| 941 | + cpumask_and(&tmask, ¤t->cpus_mask, |
---|
| 942 | + &mt_fpu_cpumask); |
---|
| 943 | + set_cpus_allowed_ptr(current, &tmask); |
---|
| 944 | + set_thread_flag(TIF_FPUBOUND); |
---|
| 945 | + } |
---|
| 946 | + } |
---|
| 947 | +#endif /* CONFIG_MIPS_MT_FPAFF */ |
---|
| 948 | +} |
---|
| 949 | + |
---|
| 950 | +#else /* !CONFIG_MIPS_FP_SUPPORT */ |
---|
| 951 | + |
---|
| 952 | +static int simulate_fp(struct pt_regs *regs, unsigned int opcode, |
---|
| 953 | + unsigned long old_epc, unsigned long old_ra) |
---|
| 954 | +{ |
---|
| 955 | + return -1; |
---|
| 956 | +} |
---|
| 957 | + |
---|
| 958 | +#endif /* !CONFIG_MIPS_FP_SUPPORT */ |
---|
878 | 959 | |
---|
879 | 960 | void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code, |
---|
880 | 961 | const char *str) |
---|
.. | .. |
---|
904 | 985 | die_if_kernel(b, regs); |
---|
905 | 986 | force_sig_fault(SIGFPE, |
---|
906 | 987 | code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF, |
---|
907 | | - (void __user *) regs->cp0_epc, current); |
---|
| 988 | + (void __user *) regs->cp0_epc); |
---|
908 | 989 | break; |
---|
909 | 990 | case BRK_BUG: |
---|
910 | 991 | die_if_kernel("Kernel bug detected", regs); |
---|
911 | | - force_sig(SIGTRAP, current); |
---|
| 992 | + force_sig(SIGTRAP); |
---|
912 | 993 | break; |
---|
913 | 994 | case BRK_MEMU: |
---|
914 | 995 | /* |
---|
.. | .. |
---|
923 | 1004 | return; |
---|
924 | 1005 | |
---|
925 | 1006 | die_if_kernel("Math emu break/trap", regs); |
---|
926 | | - force_sig(SIGTRAP, current); |
---|
| 1007 | + force_sig(SIGTRAP); |
---|
927 | 1008 | break; |
---|
928 | 1009 | default: |
---|
929 | 1010 | scnprintf(b, sizeof(b), "%s instruction in kernel code", str); |
---|
930 | 1011 | die_if_kernel(b, regs); |
---|
931 | 1012 | if (si_code) { |
---|
932 | | - force_sig_fault(SIGTRAP, si_code, NULL, current); |
---|
| 1013 | + force_sig_fault(SIGTRAP, si_code, NULL); |
---|
933 | 1014 | } else { |
---|
934 | | - force_sig(SIGTRAP, current); |
---|
| 1015 | + force_sig(SIGTRAP); |
---|
935 | 1016 | } |
---|
936 | 1017 | } |
---|
937 | 1018 | } |
---|
.. | .. |
---|
1024 | 1105 | return; |
---|
1025 | 1106 | |
---|
1026 | 1107 | out_sigsegv: |
---|
1027 | | - force_sig(SIGSEGV, current); |
---|
| 1108 | + force_sig(SIGSEGV); |
---|
1028 | 1109 | goto out; |
---|
1029 | 1110 | } |
---|
1030 | 1111 | |
---|
.. | .. |
---|
1038 | 1119 | |
---|
1039 | 1120 | seg = get_fs(); |
---|
1040 | 1121 | if (!user_mode(regs)) |
---|
1041 | | - set_fs(get_ds()); |
---|
| 1122 | + set_fs(KERNEL_DS); |
---|
1042 | 1123 | |
---|
1043 | 1124 | prev_state = exception_enter(); |
---|
1044 | 1125 | current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; |
---|
.. | .. |
---|
1066 | 1147 | return; |
---|
1067 | 1148 | |
---|
1068 | 1149 | out_sigsegv: |
---|
1069 | | - force_sig(SIGSEGV, current); |
---|
| 1150 | + force_sig(SIGSEGV); |
---|
1070 | 1151 | goto out; |
---|
1071 | 1152 | } |
---|
1072 | 1153 | |
---|
.. | .. |
---|
1132 | 1213 | |
---|
1133 | 1214 | if (status < 0) |
---|
1134 | 1215 | status = simulate_fp(regs, opcode, old_epc, old31); |
---|
| 1216 | + |
---|
| 1217 | +#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION |
---|
| 1218 | + if (status < 0) |
---|
| 1219 | + status = simulate_loongson3_cpucfg(regs, opcode); |
---|
| 1220 | +#endif |
---|
1135 | 1221 | } else if (cpu_has_mmips) { |
---|
1136 | 1222 | unsigned short mmop[2] = { 0 }; |
---|
1137 | 1223 | |
---|
.. | .. |
---|
1152 | 1238 | if (unlikely(status > 0)) { |
---|
1153 | 1239 | regs->cp0_epc = old_epc; /* Undo skip-over. */ |
---|
1154 | 1240 | regs->regs[31] = old31; |
---|
1155 | | - force_sig(status, current); |
---|
| 1241 | + force_sig(status); |
---|
1156 | 1242 | } |
---|
1157 | 1243 | |
---|
1158 | 1244 | out: |
---|
1159 | 1245 | exception_exit(prev_state); |
---|
1160 | | -} |
---|
1161 | | - |
---|
1162 | | -/* |
---|
1163 | | - * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've |
---|
1164 | | - * emulated more than some threshold number of instructions, force migration to |
---|
1165 | | - * a "CPU" that has FP support. |
---|
1166 | | - */ |
---|
1167 | | -static void mt_ase_fp_affinity(void) |
---|
1168 | | -{ |
---|
1169 | | -#ifdef CONFIG_MIPS_MT_FPAFF |
---|
1170 | | - if (mt_fpemul_threshold > 0 && |
---|
1171 | | - ((current->thread.emulated_fp++ > mt_fpemul_threshold))) { |
---|
1172 | | - /* |
---|
1173 | | - * If there's no FPU present, or if the application has already |
---|
1174 | | - * restricted the allowed set to exclude any CPUs with FPUs, |
---|
1175 | | - * we'll skip the procedure. |
---|
1176 | | - */ |
---|
1177 | | - if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) { |
---|
1178 | | - cpumask_t tmask; |
---|
1179 | | - |
---|
1180 | | - current->thread.user_cpus_allowed |
---|
1181 | | - = current->cpus_mask; |
---|
1182 | | - cpumask_and(&tmask, ¤t->cpus_mask, |
---|
1183 | | - &mt_fpu_cpumask); |
---|
1184 | | - set_cpus_allowed_ptr(current, &tmask); |
---|
1185 | | - set_thread_flag(TIF_FPUBOUND); |
---|
1186 | | - } |
---|
1187 | | - } |
---|
1188 | | -#endif /* CONFIG_MIPS_MT_FPAFF */ |
---|
1189 | 1246 | } |
---|
1190 | 1247 | |
---|
1191 | 1248 | /* |
---|
.. | .. |
---|
1210 | 1267 | |
---|
1211 | 1268 | die_if_kernel("COP2: Unhandled kernel unaligned access or invalid " |
---|
1212 | 1269 | "instruction", regs); |
---|
1213 | | - force_sig(SIGILL, current); |
---|
| 1270 | + force_sig(SIGILL); |
---|
1214 | 1271 | |
---|
1215 | 1272 | return NOTIFY_OK; |
---|
1216 | 1273 | } |
---|
1217 | 1274 | |
---|
| 1275 | +#ifdef CONFIG_MIPS_FP_SUPPORT |
---|
| 1276 | + |
---|
1218 | 1277 | static int enable_restore_fp_context(int msa) |
---|
1219 | 1278 | { |
---|
1220 | 1279 | int err, was_fpu_owner, prior_msa; |
---|
| 1280 | + bool first_fp; |
---|
1221 | 1281 | |
---|
1222 | | - if (!used_math()) { |
---|
1223 | | - /* First time FP context user. */ |
---|
| 1282 | + /* Initialize context if it hasn't been used already */ |
---|
| 1283 | + first_fp = init_fp_ctx(current); |
---|
| 1284 | + |
---|
| 1285 | + if (first_fp) { |
---|
1224 | 1286 | preempt_disable(); |
---|
1225 | | - err = init_fpu(); |
---|
| 1287 | + err = own_fpu_inatomic(1); |
---|
1226 | 1288 | if (msa && !err) { |
---|
1227 | 1289 | enable_msa(); |
---|
| 1290 | + /* |
---|
| 1291 | + * with MSA enabled, userspace can see MSACSR |
---|
| 1292 | + * and MSA regs, but the values in them are from |
---|
| 1293 | + * other task before current task, restore them |
---|
| 1294 | + * from saved fp/msa context |
---|
| 1295 | + */ |
---|
| 1296 | + write_msa_csr(current->thread.fpu.msacsr); |
---|
| 1297 | + /* |
---|
| 1298 | + * own_fpu_inatomic(1) just restore low 64bit, |
---|
| 1299 | + * fix the high 64bit |
---|
| 1300 | + */ |
---|
1228 | 1301 | init_msa_upper(); |
---|
1229 | 1302 | set_thread_flag(TIF_USEDMSA); |
---|
1230 | 1303 | set_thread_flag(TIF_MSA_CTX_LIVE); |
---|
1231 | 1304 | } |
---|
1232 | 1305 | preempt_enable(); |
---|
1233 | | - if (!err) |
---|
1234 | | - set_used_math(); |
---|
1235 | 1306 | return err; |
---|
1236 | 1307 | } |
---|
1237 | 1308 | |
---|
.. | .. |
---|
1322 | 1393 | return 0; |
---|
1323 | 1394 | } |
---|
1324 | 1395 | |
---|
| 1396 | +#else /* !CONFIG_MIPS_FP_SUPPORT */ |
---|
| 1397 | + |
---|
| 1398 | +static int enable_restore_fp_context(int msa) |
---|
| 1399 | +{ |
---|
| 1400 | + return SIGILL; |
---|
| 1401 | +} |
---|
| 1402 | + |
---|
| 1403 | +#endif /* CONFIG_MIPS_FP_SUPPORT */ |
---|
| 1404 | + |
---|
1325 | 1405 | asmlinkage void do_cpu(struct pt_regs *regs) |
---|
1326 | 1406 | { |
---|
1327 | 1407 | enum ctx_state prev_state; |
---|
1328 | 1408 | unsigned int __user *epc; |
---|
1329 | 1409 | unsigned long old_epc, old31; |
---|
1330 | | - void __user *fault_addr; |
---|
1331 | 1410 | unsigned int opcode; |
---|
1332 | | - unsigned long fcr31; |
---|
1333 | 1411 | unsigned int cpid; |
---|
1334 | | - int status, err; |
---|
1335 | | - int sig; |
---|
| 1412 | + int status; |
---|
1336 | 1413 | |
---|
1337 | 1414 | prev_state = exception_enter(); |
---|
1338 | 1415 | cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; |
---|
.. | .. |
---|
1365 | 1442 | if (unlikely(status > 0)) { |
---|
1366 | 1443 | regs->cp0_epc = old_epc; /* Undo skip-over. */ |
---|
1367 | 1444 | regs->regs[31] = old31; |
---|
1368 | | - force_sig(status, current); |
---|
| 1445 | + force_sig(status); |
---|
1369 | 1446 | } |
---|
1370 | 1447 | |
---|
1371 | 1448 | break; |
---|
1372 | 1449 | |
---|
| 1450 | +#ifdef CONFIG_MIPS_FP_SUPPORT |
---|
1373 | 1451 | case 3: |
---|
1374 | 1452 | /* |
---|
1375 | 1453 | * The COP3 opcode space and consequently the CP0.Status.CU3 |
---|
.. | .. |
---|
1384 | 1462 | * emulator too. |
---|
1385 | 1463 | */ |
---|
1386 | 1464 | if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) { |
---|
1387 | | - force_sig(SIGILL, current); |
---|
| 1465 | + force_sig(SIGILL); |
---|
1388 | 1466 | break; |
---|
1389 | 1467 | } |
---|
1390 | | - /* Fall through. */ |
---|
| 1468 | + fallthrough; |
---|
| 1469 | + case 1: { |
---|
| 1470 | + void __user *fault_addr; |
---|
| 1471 | + unsigned long fcr31; |
---|
| 1472 | + int err, sig; |
---|
1391 | 1473 | |
---|
1392 | | - case 1: |
---|
1393 | 1474 | err = enable_restore_fp_context(0); |
---|
1394 | 1475 | |
---|
1395 | 1476 | if (raw_cpu_has_fpu && !err) |
---|
.. | .. |
---|
1410 | 1491 | mt_ase_fp_affinity(); |
---|
1411 | 1492 | |
---|
1412 | 1493 | break; |
---|
| 1494 | + } |
---|
| 1495 | +#else /* CONFIG_MIPS_FP_SUPPORT */ |
---|
| 1496 | + case 1: |
---|
| 1497 | + case 3: |
---|
| 1498 | + force_sig(SIGILL); |
---|
| 1499 | + break; |
---|
| 1500 | +#endif /* CONFIG_MIPS_FP_SUPPORT */ |
---|
1413 | 1501 | |
---|
1414 | 1502 | case 2: |
---|
1415 | 1503 | raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); |
---|
.. | .. |
---|
1434 | 1522 | local_irq_enable(); |
---|
1435 | 1523 | |
---|
1436 | 1524 | die_if_kernel("do_msa_fpe invoked from kernel context!", regs); |
---|
1437 | | - force_sig(SIGFPE, current); |
---|
| 1525 | + force_sig(SIGFPE); |
---|
1438 | 1526 | out: |
---|
1439 | 1527 | exception_exit(prev_state); |
---|
1440 | 1528 | } |
---|
.. | .. |
---|
1447 | 1535 | prev_state = exception_enter(); |
---|
1448 | 1536 | |
---|
1449 | 1537 | if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) { |
---|
1450 | | - force_sig(SIGILL, current); |
---|
| 1538 | + force_sig(SIGILL); |
---|
1451 | 1539 | goto out; |
---|
1452 | 1540 | } |
---|
1453 | 1541 | |
---|
.. | .. |
---|
1455 | 1543 | |
---|
1456 | 1544 | err = enable_restore_fp_context(1); |
---|
1457 | 1545 | if (err) |
---|
1458 | | - force_sig(SIGILL, current); |
---|
| 1546 | + force_sig(SIGILL); |
---|
1459 | 1547 | out: |
---|
1460 | 1548 | exception_exit(prev_state); |
---|
1461 | 1549 | } |
---|
.. | .. |
---|
1465 | 1553 | enum ctx_state prev_state; |
---|
1466 | 1554 | |
---|
1467 | 1555 | prev_state = exception_enter(); |
---|
1468 | | - force_sig(SIGILL, current); |
---|
| 1556 | + force_sig(SIGILL); |
---|
1469 | 1557 | exception_exit(prev_state); |
---|
1470 | 1558 | } |
---|
1471 | 1559 | |
---|
.. | .. |
---|
1491 | 1579 | if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { |
---|
1492 | 1580 | mips_read_watch_registers(); |
---|
1493 | 1581 | local_irq_enable(); |
---|
1494 | | - force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL, current); |
---|
| 1582 | + force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL); |
---|
1495 | 1583 | } else { |
---|
1496 | 1584 | mips_clear_watch_registers(); |
---|
1497 | 1585 | local_irq_enable(); |
---|
.. | .. |
---|
1562 | 1650 | } |
---|
1563 | 1651 | die_if_kernel("MIPS MT Thread exception in kernel", regs); |
---|
1564 | 1652 | |
---|
1565 | | - force_sig(SIGILL, current); |
---|
| 1653 | + force_sig(SIGILL); |
---|
1566 | 1654 | } |
---|
1567 | 1655 | |
---|
1568 | 1656 | |
---|
.. | .. |
---|
1571 | 1659 | if (cpu_has_dsp) |
---|
1572 | 1660 | panic("Unexpected DSP exception"); |
---|
1573 | 1661 | |
---|
1574 | | - force_sig(SIGILL, current); |
---|
| 1662 | + force_sig(SIGILL); |
---|
1575 | 1663 | } |
---|
1576 | 1664 | |
---|
1577 | 1665 | asmlinkage void do_reserved(struct pt_regs *regs) |
---|
.. | .. |
---|
1605 | 1693 | * Some MIPS CPUs can enable/disable for cache parity detection, but do |
---|
1606 | 1694 | * it different ways. |
---|
1607 | 1695 | */ |
---|
1608 | | -static inline void parity_protection_init(void) |
---|
| 1696 | +static inline __init void parity_protection_init(void) |
---|
1609 | 1697 | { |
---|
1610 | 1698 | #define ERRCTL_PE 0x80000000 |
---|
1611 | 1699 | #define ERRCTL_L2P 0x00800000 |
---|
.. | .. |
---|
1731 | 1819 | |
---|
1732 | 1820 | case CPU_5KC: |
---|
1733 | 1821 | case CPU_5KE: |
---|
1734 | | - case CPU_LOONGSON1: |
---|
| 1822 | + case CPU_LOONGSON32: |
---|
1735 | 1823 | write_c0_ecc(0x80000000); |
---|
1736 | 1824 | back_to_back_c0_hazard(); |
---|
1737 | 1825 | /* Set the PE bit (bit 31) in the c0_errctl register. */ |
---|
.. | .. |
---|
1825 | 1913 | } |
---|
1826 | 1914 | /* Just print the cacheerr bits for now */ |
---|
1827 | 1915 | cache_parity_error(); |
---|
| 1916 | +} |
---|
| 1917 | + |
---|
| 1918 | +asmlinkage void do_gsexc(struct pt_regs *regs, u32 diag1) |
---|
| 1919 | +{ |
---|
| 1920 | + u32 exccode = (diag1 & LOONGSON_DIAG1_EXCCODE) >> |
---|
| 1921 | + LOONGSON_DIAG1_EXCCODE_SHIFT; |
---|
| 1922 | + enum ctx_state prev_state; |
---|
| 1923 | + |
---|
| 1924 | + prev_state = exception_enter(); |
---|
| 1925 | + |
---|
| 1926 | + switch (exccode) { |
---|
| 1927 | + case 0x08: |
---|
| 1928 | + /* Undocumented exception, will trigger on certain |
---|
| 1929 | + * also-undocumented instructions accessible from userspace. |
---|
| 1930 | + * Processor state is not otherwise corrupted, but currently |
---|
| 1931 | + * we don't know how to proceed. Maybe there is some |
---|
| 1932 | + * undocumented control flag to enable the instructions? |
---|
| 1933 | + */ |
---|
| 1934 | + force_sig(SIGILL); |
---|
| 1935 | + break; |
---|
| 1936 | + |
---|
| 1937 | + default: |
---|
| 1938 | + /* None of the other exceptions, documented or not, have |
---|
| 1939 | + * further details given; none are encountered in the wild |
---|
| 1940 | + * either. Panic in case some of them turn out to be fatal. |
---|
| 1941 | + */ |
---|
| 1942 | + show_regs(regs); |
---|
| 1943 | + panic("Unhandled Loongson exception - GSCause = %08x", diag1); |
---|
| 1944 | + } |
---|
| 1945 | + |
---|
| 1946 | + exception_exit(prev_state); |
---|
1828 | 1947 | } |
---|
1829 | 1948 | |
---|
1830 | 1949 | /* |
---|
.. | .. |
---|
1978 | 2097 | * If no shadow set is selected then use the default handler |
---|
1979 | 2098 | * that does normal register saving and standard interrupt exit |
---|
1980 | 2099 | */ |
---|
1981 | | - extern char except_vec_vi, except_vec_vi_lui; |
---|
1982 | | - extern char except_vec_vi_ori, except_vec_vi_end; |
---|
1983 | | - extern char rollback_except_vec_vi; |
---|
1984 | | - char *vec_start = using_rollback_handler() ? |
---|
1985 | | - &rollback_except_vec_vi : &except_vec_vi; |
---|
| 2100 | + extern const u8 except_vec_vi[], except_vec_vi_lui[]; |
---|
| 2101 | + extern const u8 except_vec_vi_ori[], except_vec_vi_end[]; |
---|
| 2102 | + extern const u8 rollback_except_vec_vi[]; |
---|
| 2103 | + const u8 *vec_start = using_rollback_handler() ? |
---|
| 2104 | + rollback_except_vec_vi : except_vec_vi; |
---|
1986 | 2105 | #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) |
---|
1987 | | - const int lui_offset = &except_vec_vi_lui - vec_start + 2; |
---|
1988 | | - const int ori_offset = &except_vec_vi_ori - vec_start + 2; |
---|
| 2106 | + const int lui_offset = except_vec_vi_lui - vec_start + 2; |
---|
| 2107 | + const int ori_offset = except_vec_vi_ori - vec_start + 2; |
---|
1989 | 2108 | #else |
---|
1990 | | - const int lui_offset = &except_vec_vi_lui - vec_start; |
---|
1991 | | - const int ori_offset = &except_vec_vi_ori - vec_start; |
---|
| 2109 | + const int lui_offset = except_vec_vi_lui - vec_start; |
---|
| 2110 | + const int ori_offset = except_vec_vi_ori - vec_start; |
---|
1992 | 2111 | #endif |
---|
1993 | | - const int handler_len = &except_vec_vi_end - vec_start; |
---|
| 2112 | + const int handler_len = except_vec_vi_end - vec_start; |
---|
1994 | 2113 | |
---|
1995 | 2114 | if (handler_len > VECTORSPACING) { |
---|
1996 | 2115 | /* |
---|
.. | .. |
---|
2085 | 2204 | * flag that some firmware may have left set and the TS bit (for |
---|
2086 | 2205 | * IP27). Set XX for ISA IV code to work. |
---|
2087 | 2206 | */ |
---|
2088 | | - unsigned int status_set = ST0_CU0; |
---|
| 2207 | + unsigned int status_set = ST0_KERNEL_CUMASK; |
---|
2089 | 2208 | #ifdef CONFIG_64BIT |
---|
2090 | 2209 | status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; |
---|
2091 | 2210 | #endif |
---|
.. | .. |
---|
2122 | 2241 | |
---|
2123 | 2242 | static void configure_exception_vector(void) |
---|
2124 | 2243 | { |
---|
2125 | | - if (cpu_has_veic || cpu_has_vint) { |
---|
| 2244 | + if (cpu_has_mips_r2_r6) { |
---|
2126 | 2245 | unsigned long sr = set_c0_status(ST0_BEV); |
---|
2127 | 2246 | /* If available, use WG to set top bits of EBASE */ |
---|
2128 | 2247 | if (cpu_has_ebase_wg) { |
---|
.. | .. |
---|
2134 | 2253 | } |
---|
2135 | 2254 | write_c0_ebase(ebase); |
---|
2136 | 2255 | write_c0_status(sr); |
---|
| 2256 | + } |
---|
| 2257 | + if (cpu_has_veic || cpu_has_vint) { |
---|
2137 | 2258 | /* Setting vector spacing enables EI/VI mode */ |
---|
2138 | 2259 | change_c0_intctl(0x3e0, VECTORSPACING); |
---|
2139 | 2260 | } |
---|
.. | .. |
---|
2164 | 2285 | * o read IntCtl.IPFDC to determine the fast debug channel interrupt |
---|
2165 | 2286 | */ |
---|
2166 | 2287 | if (cpu_has_mips_r2_r6) { |
---|
2167 | | - /* |
---|
2168 | | - * We shouldn't trust a secondary core has a sane EBASE register |
---|
2169 | | - * so use the one calculated by the boot CPU. |
---|
2170 | | - */ |
---|
2171 | | - if (!is_boot_cpu) { |
---|
2172 | | - /* If available, use WG to set top bits of EBASE */ |
---|
2173 | | - if (cpu_has_ebase_wg) { |
---|
2174 | | -#ifdef CONFIG_64BIT |
---|
2175 | | - write_c0_ebase_64(ebase | MIPS_EBASE_WG); |
---|
2176 | | -#else |
---|
2177 | | - write_c0_ebase(ebase | MIPS_EBASE_WG); |
---|
2178 | | -#endif |
---|
2179 | | - } |
---|
2180 | | - write_c0_ebase(ebase); |
---|
2181 | | - } |
---|
2182 | | - |
---|
2183 | 2288 | cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; |
---|
2184 | 2289 | cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; |
---|
2185 | 2290 | cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; |
---|
.. | .. |
---|
2194 | 2299 | cp0_fdc_irq = -1; |
---|
2195 | 2300 | } |
---|
2196 | 2301 | |
---|
2197 | | - if (!cpu_data[cpu].asid_cache) |
---|
| 2302 | + if (cpu_has_mmid) |
---|
| 2303 | + cpu_data[cpu].asid_cache = 0; |
---|
| 2304 | + else if (!cpu_data[cpu].asid_cache) |
---|
2198 | 2305 | cpu_data[cpu].asid_cache = asid_first_version(cpu); |
---|
2199 | 2306 | |
---|
2200 | 2307 | mmgrab(&init_mm); |
---|
.. | .. |
---|
2210 | 2317 | } |
---|
2211 | 2318 | |
---|
2212 | 2319 | /* Install CPU exception handler */ |
---|
2213 | | -void set_handler(unsigned long offset, void *addr, unsigned long size) |
---|
| 2320 | +void set_handler(unsigned long offset, const void *addr, unsigned long size) |
---|
2214 | 2321 | { |
---|
2215 | 2322 | #ifdef CONFIG_CPU_MICROMIPS |
---|
2216 | 2323 | memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size); |
---|
.. | .. |
---|
2253 | 2360 | extern char except_vec3_generic; |
---|
2254 | 2361 | extern char except_vec4; |
---|
2255 | 2362 | extern char except_vec3_r4000; |
---|
2256 | | - unsigned long i; |
---|
| 2363 | + unsigned long i, vec_size; |
---|
| 2364 | + phys_addr_t ebase_pa; |
---|
2257 | 2365 | |
---|
2258 | 2366 | check_wait(); |
---|
2259 | 2367 | |
---|
2260 | | - if (cpu_has_veic || cpu_has_vint) { |
---|
2261 | | - unsigned long size = 0x200 + VECTORSPACING*64; |
---|
2262 | | - phys_addr_t ebase_pa; |
---|
| 2368 | + if (!cpu_has_mips_r2_r6) { |
---|
| 2369 | + ebase = CAC_BASE; |
---|
| 2370 | + ebase_pa = virt_to_phys((void *)ebase); |
---|
| 2371 | + vec_size = 0x400; |
---|
2263 | 2372 | |
---|
2264 | | - ebase = (unsigned long) |
---|
2265 | | - __alloc_bootmem(size, 1 << fls(size), 0); |
---|
| 2373 | + memblock_reserve(ebase_pa, vec_size); |
---|
| 2374 | + } else { |
---|
| 2375 | + if (cpu_has_veic || cpu_has_vint) |
---|
| 2376 | + vec_size = 0x200 + VECTORSPACING*64; |
---|
| 2377 | + else |
---|
| 2378 | + vec_size = PAGE_SIZE; |
---|
| 2379 | + |
---|
| 2380 | + ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size)); |
---|
| 2381 | + if (!ebase_pa) |
---|
| 2382 | + panic("%s: Failed to allocate %lu bytes align=0x%x\n", |
---|
| 2383 | + __func__, vec_size, 1 << fls(vec_size)); |
---|
2266 | 2384 | |
---|
2267 | 2385 | /* |
---|
2268 | 2386 | * Try to ensure ebase resides in KSeg0 if possible. |
---|
.. | .. |
---|
2275 | 2393 | * EVA is special though as it allows segments to be rearranged |
---|
2276 | 2394 | * and to become uncached during cache error handling. |
---|
2277 | 2395 | */ |
---|
2278 | | - ebase_pa = __pa(ebase); |
---|
2279 | 2396 | if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000)) |
---|
2280 | 2397 | ebase = CKSEG0ADDR(ebase_pa); |
---|
2281 | | - } else { |
---|
2282 | | - ebase = CAC_BASE; |
---|
2283 | | - |
---|
2284 | | - if (cpu_has_mips_r2_r6) { |
---|
2285 | | - if (cpu_has_ebase_wg) { |
---|
2286 | | -#ifdef CONFIG_64BIT |
---|
2287 | | - ebase = (read_c0_ebase_64() & ~0xfff); |
---|
2288 | | -#else |
---|
2289 | | - ebase = (read_c0_ebase() & ~0xfff); |
---|
2290 | | -#endif |
---|
2291 | | - } else { |
---|
2292 | | - ebase += (read_c0_ebase() & 0x3ffff000); |
---|
2293 | | - } |
---|
2294 | | - } |
---|
| 2398 | + else |
---|
| 2399 | + ebase = (unsigned long)phys_to_virt(ebase_pa); |
---|
2295 | 2400 | } |
---|
2296 | 2401 | |
---|
2297 | 2402 | if (cpu_has_mmips) { |
---|
.. | .. |
---|
2306 | 2411 | if (board_ebase_setup) |
---|
2307 | 2412 | board_ebase_setup(); |
---|
2308 | 2413 | per_cpu_trap_init(true); |
---|
| 2414 | + memblock_set_bottom_up(false); |
---|
2309 | 2415 | |
---|
2310 | 2416 | /* |
---|
2311 | 2417 | * Copy the generic exception handlers to their final destination. |
---|
.. | .. |
---|
2378 | 2484 | else { |
---|
2379 | 2485 | if (cpu_has_vtag_icache) |
---|
2380 | 2486 | set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp); |
---|
2381 | | - else if (current_cpu_type() == CPU_LOONGSON3) |
---|
| 2487 | + else if (current_cpu_type() == CPU_LOONGSON64) |
---|
2382 | 2488 | set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp); |
---|
2383 | 2489 | else |
---|
2384 | 2490 | set_except_vector(EXCCODE_RI, handle_ri_rdhwr); |
---|
.. | .. |
---|
2395 | 2501 | if (cpu_has_fpu && !cpu_has_nofpuex) |
---|
2396 | 2502 | set_except_vector(EXCCODE_FPE, handle_fpe); |
---|
2397 | 2503 | |
---|
2398 | | - set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb); |
---|
| 2504 | + if (cpu_has_ftlbparex) |
---|
| 2505 | + set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb); |
---|
| 2506 | + |
---|
| 2507 | + if (cpu_has_gsexcex) |
---|
| 2508 | + set_except_vector(LOONGSON_EXCCODE_GSEXC, handle_gsexc); |
---|
2399 | 2509 | |
---|
2400 | 2510 | if (cpu_has_rixiex) { |
---|
2401 | 2511 | set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0); |
---|
.. | .. |
---|
2424 | 2534 | else |
---|
2425 | 2535 | set_handler(0x080, &except_vec3_generic, 0x80); |
---|
2426 | 2536 | |
---|
2427 | | - local_flush_icache_range(ebase, ebase + 0x400); |
---|
| 2537 | + local_flush_icache_range(ebase, ebase + vec_size); |
---|
2428 | 2538 | |
---|
2429 | 2539 | sort_extable(__start___dbe_table, __stop___dbe_table); |
---|
2430 | 2540 | |
---|