forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-09-20 a36159eec6ca17402b0e146b86efaf76568dc353
kernel/arch/mips/include/asm/switch_to.h
....@@ -42,7 +42,7 @@
4242 * inline to try to keep the overhead down. If we have been forced to run on
4343 * a "CPU" with an FPU because of a previous high level of FP computation,
4444 * but did not actually use the FPU during the most recent time-slice (CU1
45
- * isn't set), we undo the restriction on cpus_allowed.
45
+ * isn't set), we undo the restriction on cpus_mask.
4646 *
4747 * We're not calling set_cpus_allowed() here, because we have no need to
4848 * force prompt migration - we're already switching the current CPU to a
....@@ -57,7 +57,7 @@
5757 test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
5858 (!(KSTK_STATUS(prev) & ST0_CU1))) { \
5959 clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
60
- prev->cpus_allowed = prev->thread.user_cpus_allowed; \
60
+ prev->cpus_mask = prev->thread.user_cpus_allowed; \
6161 } \
6262 next->thread.emulated_fp = 0; \
6363 } while(0)
....@@ -67,11 +67,11 @@
6767 #endif
6868
6969 /*
70
- * Clear LLBit during context switches on MIPSr6 such that eretnc can be used
70
+ * Clear LLBit during context switches on MIPSr5+ such that eretnc can be used
7171 * unconditionally when returning to userland in entry.S.
7272 */
73
-#define __clear_r6_hw_ll_bit() do { \
74
- if (cpu_has_mips_r6) \
73
+#define __clear_r5_hw_ll_bit() do { \
74
+ if (cpu_has_mips_r5 || cpu_has_mips_r6) \
7575 write_c0_lladdr(0); \
7676 } while (0)
7777
....@@ -84,7 +84,8 @@
8484 * Check FCSR for any unmasked exceptions pending set with `ptrace',
8585 * clear them and send a signal.
8686 */
87
-#define __sanitize_fcr31(next) \
87
+#ifdef CONFIG_MIPS_FP_SUPPORT
88
+# define __sanitize_fcr31(next) \
8889 do { \
8990 unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \
9091 void __user *pc; \
....@@ -95,6 +96,9 @@
9596 force_fcr31_sig(fcr31, pc, next); \
9697 } \
9798 } while (0)
99
+#else
100
+# define __sanitize_fcr31(next)
101
+#endif
98102
99103 /*
100104 * For newly created kernel threads switch_to() will return to
....@@ -113,6 +117,8 @@
113117 __restore_dsp(next); \
114118 } \
115119 if (cop2_present) { \
120
+ u32 status = read_c0_status(); \
121
+ \
116122 set_c0_status(ST0_CU2); \
117123 if ((KSTK_STATUS(prev) & ST0_CU2)) { \
118124 if (cop2_lazy_restore) \
....@@ -123,9 +129,9 @@
123129 !cop2_lazy_restore) { \
124130 cop2_restore(next); \
125131 } \
126
- clear_c0_status(ST0_CU2); \
132
+ write_c0_status(status); \
127133 } \
128
- __clear_r6_hw_ll_bit(); \
134
+ __clear_r5_hw_ll_bit(); \
129135 __clear_software_ll_bit(); \
130136 if (cpu_has_userlocal) \
131137 write_c0_userlocal(task_thread_info(next)->tp_value); \