hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/arch/arm64/kernel/fpsimd.c
....@@ -180,10 +180,7 @@
180180 */
181181 static void get_cpu_fpsimd_context(void)
182182 {
183
- if (!IS_ENABLED(CONFIG_PREEMPT_RT))
184
- local_bh_disable();
185
- else
186
- preempt_disable();
183
+ local_bh_disable();
187184 __get_cpu_fpsimd_context();
188185 }
189186
....@@ -204,10 +201,7 @@
204201 static void put_cpu_fpsimd_context(void)
205202 {
206203 __put_cpu_fpsimd_context();
207
- if (!IS_ENABLED(CONFIG_PREEMPT_RT))
208
- local_bh_enable();
209
- else
210
- preempt_enable();
204
+ local_bh_enable();
211205 }
212206
213207 static bool have_cpu_fpsimd_context(void)
....@@ -215,14 +209,21 @@
215209 return !preemptible() && __this_cpu_read(fpsimd_context_busy);
216210 }
217211
218
-static void *sve_free_atomic(struct task_struct *task)
212
+/*
213
+ * Call __sve_free() directly only if you know task can't be scheduled
214
+ * or preempted.
215
+ */
216
+static void __sve_free(struct task_struct *task)
219217 {
220
- void *sve_state = task->thread.sve_state;
218
+ kfree(task->thread.sve_state);
219
+ task->thread.sve_state = NULL;
220
+}
221221
222
+static void sve_free(struct task_struct *task)
223
+{
222224 WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
223225
224
- task->thread.sve_state = NULL;
225
- return sve_state;
226
+ __sve_free(task);
226227 }
227228
228229 /*
....@@ -583,7 +584,6 @@
583584 int sve_set_vector_length(struct task_struct *task,
584585 unsigned long vl, unsigned long flags)
585586 {
586
- void *mem = NULL;
587587 if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
588588 PR_SVE_SET_VL_ONEXEC))
589589 return -EINVAL;
....@@ -637,10 +637,9 @@
637637 * Force reallocation of task SVE state to the correct size
638638 * on next use:
639639 */
640
- mem = sve_free_atomic(task);
640
+ sve_free(task);
641641
642642 task->thread.sve_vl = vl;
643
- kfree(mem);
644643
645644 out:
646645 update_tsk_thread_flag(task, TIF_SVE_VL_INHERIT,
....@@ -918,9 +917,7 @@
918917 */
919918 void fpsimd_release_task(struct task_struct *dead_task)
920919 {
921
- void *mem = NULL;
922
- mem = sve_free_atomic(dead_task);
923
- kfree(mem);
920
+ __sve_free(dead_task);
924921 }
925922
926923 #endif /* CONFIG_ARM64_SVE */
....@@ -1025,7 +1022,6 @@
10251022 void fpsimd_flush_thread(void)
10261023 {
10271024 int vl, supported_vl;
1028
- void *mem = NULL;
10291025
10301026 if (!system_supports_fpsimd())
10311027 return;
....@@ -1038,7 +1034,7 @@
10381034
10391035 if (system_supports_sve()) {
10401036 clear_thread_flag(TIF_SVE);
1041
- mem = sve_free_atomic(current);
1037
+ sve_free(current);
10421038
10431039 /*
10441040 * Reset the task vector length as required.
....@@ -1072,7 +1068,6 @@
10721068 }
10731069
10741070 put_cpu_fpsimd_context();
1075
- kfree(mem);
10761071 }
10771072
10781073 /*