| .. | .. |
|---|
| 42 | 42 | * inline to try to keep the overhead down. If we have been forced to run on |
|---|
| 43 | 43 | * a "CPU" with an FPU because of a previous high level of FP computation, |
|---|
| 44 | 44 | * but did not actually use the FPU during the most recent time-slice (CU1 |
|---|
| 45 | | - * isn't set), we undo the restriction on cpus_allowed. |
|---|
| 45 | + * isn't set), we undo the restriction on cpus_mask. |
|---|
| 46 | 46 | * |
|---|
| 47 | 47 | * We're not calling set_cpus_allowed() here, because we have no need to |
|---|
| 48 | 48 | * force prompt migration - we're already switching the current CPU to a |
|---|
| .. | .. |
|---|
| 57 | 57 | test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \ |
|---|
| 58 | 58 | (!(KSTK_STATUS(prev) & ST0_CU1))) { \ |
|---|
| 59 | 59 | clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \ |
|---|
| 60 | | - prev->cpus_allowed = prev->thread.user_cpus_allowed; \ |
|---|
| 60 | + prev->cpus_mask = prev->thread.user_cpus_allowed; \ |
|---|
| 61 | 61 | } \ |
|---|
| 62 | 62 | next->thread.emulated_fp = 0; \ |
|---|
| 63 | 63 | } while(0) |
|---|
| .. | .. |
|---|
| 67 | 67 | #endif |
|---|
| 68 | 68 | |
|---|
| 69 | 69 | /* |
|---|
| 70 | | - * Clear LLBit during context switches on MIPSr6 such that eretnc can be used |
|---|
| 70 | + * Clear LLBit during context switches on MIPSr5+ such that eretnc can be used |
|---|
| 71 | 71 | * unconditionally when returning to userland in entry.S. |
|---|
| 72 | 72 | */ |
|---|
| 73 | | -#define __clear_r6_hw_ll_bit() do { \ |
|---|
| 74 | | - if (cpu_has_mips_r6) \ |
|---|
| 73 | +#define __clear_r5_hw_ll_bit() do { \ |
|---|
| 74 | + if (cpu_has_mips_r5 || cpu_has_mips_r6) \ |
|---|
| 75 | 75 | write_c0_lladdr(0); \ |
|---|
| 76 | 76 | } while (0) |
|---|
| 77 | 77 | |
|---|
| .. | .. |
|---|
| 84 | 84 | * Check FCSR for any unmasked exceptions pending set with `ptrace', |
|---|
| 85 | 85 | * clear them and send a signal. |
|---|
| 86 | 86 | */ |
|---|
| 87 | | -#define __sanitize_fcr31(next) \ |
|---|
| 87 | +#ifdef CONFIG_MIPS_FP_SUPPORT |
|---|
| 88 | +# define __sanitize_fcr31(next) \ |
|---|
| 88 | 89 | do { \ |
|---|
| 89 | 90 | unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \ |
|---|
| 90 | 91 | void __user *pc; \ |
|---|
| .. | .. |
|---|
| 95 | 96 | force_fcr31_sig(fcr31, pc, next); \ |
|---|
| 96 | 97 | } \ |
|---|
| 97 | 98 | } while (0) |
|---|
| 99 | +#else |
|---|
| 100 | +# define __sanitize_fcr31(next) |
|---|
| 101 | +#endif |
|---|
| 98 | 102 | |
|---|
| 99 | 103 | /* |
|---|
| 100 | 104 | * For newly created kernel threads switch_to() will return to |
|---|
| .. | .. |
|---|
| 113 | 117 | __restore_dsp(next); \ |
|---|
| 114 | 118 | } \ |
|---|
| 115 | 119 | if (cop2_present) { \ |
|---|
| 120 | + u32 status = read_c0_status(); \ |
|---|
| 121 | + \ |
|---|
| 116 | 122 | set_c0_status(ST0_CU2); \ |
|---|
| 117 | 123 | if ((KSTK_STATUS(prev) & ST0_CU2)) { \ |
|---|
| 118 | 124 | if (cop2_lazy_restore) \ |
|---|
| .. | .. |
|---|
| 123 | 129 | !cop2_lazy_restore) { \ |
|---|
| 124 | 130 | cop2_restore(next); \ |
|---|
| 125 | 131 | } \ |
|---|
| 126 | | - clear_c0_status(ST0_CU2); \ |
|---|
| 132 | + write_c0_status(status); \ |
|---|
| 127 | 133 | } \ |
|---|
| 128 | | - __clear_r6_hw_ll_bit(); \ |
|---|
| 134 | + __clear_r5_hw_ll_bit(); \ |
|---|
| 129 | 135 | __clear_software_ll_bit(); \ |
|---|
| 130 | 136 | if (cpu_has_userlocal) \ |
|---|
| 131 | 137 | write_c0_userlocal(task_thread_info(next)->tp_value); \ |
|---|