From a46a1ad097419aeea7350987dd95230f50d90392 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 15 Nov 2024 08:53:41 +0000
Subject: [PATCH] 固定GMAC1 网卡名为 eth3
---
kernel/arch/arm64/kernel/fpsimd.c | 194 +++++++++++++++++++++++++++++++++++++-----------
1 files changed, 148 insertions(+), 46 deletions(-)
diff --git a/kernel/arch/arm64/kernel/fpsimd.c b/kernel/arch/arm64/kernel/fpsimd.c
index 5335a6b..175353e 100644
--- a/kernel/arch/arm64/kernel/fpsimd.c
+++ b/kernel/arch/arm64/kernel/fpsimd.c
@@ -169,6 +169,42 @@
WARN_ON(busy);
}
+static void __put_cpu_fpsimd_context(void)
+{
+ bool busy = __this_cpu_xchg(fpsimd_context_busy, false);
+
+ WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */
+}
+
+#ifdef CONFIG_DOVETAIL
+
+#define get_cpu_fpsimd_context(__flags) \
+ do { \
+ (__flags) = hard_preempt_disable(); \
+ __get_cpu_fpsimd_context(); \
+ } while (0)
+
+#define put_cpu_fpsimd_context(__flags) \
+ do { \
+ __put_cpu_fpsimd_context(); \
+ hard_preempt_enable(__flags); \
+ } while (0)
+
+void fpsimd_restore_current_oob(void)
+{
+ /*
+ * Restore the fpsimd context for the current task as it
+ * resumes from dovetail_context_switch(), which always happen
+ * on the out-of-band stage. Skip this for kernel threads
+ * which have no such context but always bear
+ * TIF_FOREIGN_FPSTATE.
+ */
+ if (current->mm)
+ fpsimd_restore_current_state();
+}
+
+#else
+
/*
* Claim ownership of the CPU FPSIMD context for use by the calling context.
*
@@ -178,19 +214,12 @@
* The double-underscore version must only be called if you know the task
* can't be preempted.
*/
-static void get_cpu_fpsimd_context(void)
-{
- local_bh_disable();
- __get_cpu_fpsimd_context();
-}
-
-static void __put_cpu_fpsimd_context(void)
-{
- bool busy = __this_cpu_xchg(fpsimd_context_busy, false);
-
- WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */
-}
-
+#define get_cpu_fpsimd_context(__flags) \
+ do { \
+ preempt_disable(); \
+ __get_cpu_fpsimd_context(); \
+ (void)(__flags); \
+ } while (0)
/*
* Release the CPU FPSIMD context.
*
@@ -198,12 +227,14 @@
* previously called, with no call to put_cpu_fpsimd_context() in the
* meantime.
*/
-static void put_cpu_fpsimd_context(void)
-{
- __put_cpu_fpsimd_context();
- local_bh_enable();
-}
+#define put_cpu_fpsimd_context(__flags) \
+ do { \
+ __put_cpu_fpsimd_context(); \
+ preempt_enable(); \
+ (void)(__flags); \
+ } while (0)
+#endif /* !CONFIG_DOVETAIL */
static bool have_cpu_fpsimd_context(void)
{
return !preemptible() && __this_cpu_read(fpsimd_context_busy);
@@ -283,7 +314,7 @@
static void task_fpsimd_load(void)
{
WARN_ON(!system_supports_fpsimd());
- WARN_ON(!have_cpu_fpsimd_context());
+ WARN_ON(!hard_irqs_disabled() && !have_cpu_fpsimd_context());
if (system_supports_sve() && test_thread_flag(TIF_SVE))
sve_load_state(sve_pffr(¤t->thread),
@@ -297,14 +328,14 @@
* Ensure FPSIMD/SVE storage in memory for the loaded context is up to
* date with respect to the CPU registers.
*/
-static void fpsimd_save(void)
+static void __fpsimd_save(void)
{
struct fpsimd_last_state_struct const *last =
this_cpu_ptr(&fpsimd_last_state);
/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
WARN_ON(!system_supports_fpsimd());
- WARN_ON(!have_cpu_fpsimd_context());
+ WARN_ON(!hard_irqs_disabled() && !have_cpu_fpsimd_context());
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
@@ -324,6 +355,15 @@
} else
fpsimd_save_state(last->st);
}
+}
+
+void fpsimd_save(void)
+{
+ unsigned long flags;
+
+ flags = hard_cond_local_irq_save();
+ __fpsimd_save();
+ hard_cond_local_irq_restore(flags);
}
/*
@@ -444,7 +484,7 @@
* task->thread.uw.fpsimd_state must be up to date before calling this
* function.
*/
-static void fpsimd_to_sve(struct task_struct *task)
+static void _fpsimd_to_sve(struct task_struct *task)
{
unsigned int vq;
void *sst = task->thread.sve_state;
@@ -455,6 +495,15 @@
vq = sve_vq_from_vl(task->thread.sve_vl);
__fpsimd_to_sve(sst, fst, vq);
+}
+
+static void fpsimd_to_sve(struct task_struct *task)
+{
+ unsigned long flags;
+
+ flags = hard_cond_local_irq_save();
+ _fpsimd_to_sve(task);
+ hard_cond_local_irq_restore(flags);
}
/*
@@ -475,15 +524,20 @@
struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
unsigned int i;
__uint128_t const *p;
+ unsigned long flags;
if (!system_supports_sve())
return;
+
+ flags = hard_cond_local_irq_save();
vq = sve_vq_from_vl(task->thread.sve_vl);
for (i = 0; i < SVE_NUM_ZREGS; ++i) {
p = (__uint128_t const *)ZREG(sst, vq, i);
fst->vregs[i] = arm64_le128_to_cpu(*p);
}
+
+ hard_cond_local_irq_restore(flags);
}
#ifdef CONFIG_ARM64_SVE
@@ -584,6 +638,8 @@
int sve_set_vector_length(struct task_struct *task,
unsigned long vl, unsigned long flags)
{
+ unsigned long irqflags = 0;
+
if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
PR_SVE_SET_VL_ONEXEC))
return -EINVAL;
@@ -621,9 +677,9 @@
* non-SVE thread.
*/
if (task == current) {
- get_cpu_fpsimd_context();
+ get_cpu_fpsimd_context(irqflags);
- fpsimd_save();
+ __fpsimd_save();
}
fpsimd_flush_task_state(task);
@@ -631,7 +687,7 @@
sve_to_fpsimd(task);
if (task == current)
- put_cpu_fpsimd_context();
+ put_cpu_fpsimd_context(irqflags);
/*
* Force reallocation of task SVE state to the correct size
@@ -936,17 +992,21 @@
*/
void do_sve_acc(unsigned int esr, struct pt_regs *regs)
{
+ unsigned long flags;
+
+ mark_trap_entry(ARM64_TRAP_SVE, regs);
+
/* Even if we chose not to use SVE, the hardware could still trap: */
if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
- return;
+ goto out;
}
sve_alloc(current);
- get_cpu_fpsimd_context();
+ get_cpu_fpsimd_context(flags);
- fpsimd_save();
+ __fpsimd_save();
/* Force ret_to_user to reload the registers: */
fpsimd_flush_task_state(current);
@@ -955,7 +1015,9 @@
if (test_and_set_thread_flag(TIF_SVE))
WARN_ON(1); /* SVE access shouldn't have trapped */
- put_cpu_fpsimd_context();
+ put_cpu_fpsimd_context(flags);
+out:
+ mark_trap_exit(ARM64_TRAP_SVE, regs);
}
/*
@@ -974,6 +1036,9 @@
{
unsigned int si_code = FPE_FLTUNK;
+ if (!mark_cond_trap_entry(ARM64_TRAP_FPE, regs))
+ return;
+
if (esr & ESR_ELx_FP_EXC_TFV) {
if (esr & FPEXC_IOF)
si_code = FPE_FLTINV;
@@ -990,19 +1055,24 @@
send_sig_fault(SIGFPE, si_code,
(void __user *)instruction_pointer(regs),
current);
+
+ mark_trap_exit(ARM64_TRAP_FPE, regs);
}
void fpsimd_thread_switch(struct task_struct *next)
{
bool wrong_task, wrong_cpu;
+ unsigned long flags;
if (!system_supports_fpsimd())
return;
+ flags = hard_cond_local_irq_save();
+
__get_cpu_fpsimd_context();
/* Save unsaved fpsimd state, if any: */
- fpsimd_save();
+ __fpsimd_save();
/*
* Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
@@ -1017,16 +1087,19 @@
wrong_task || wrong_cpu);
__put_cpu_fpsimd_context();
+
+ hard_cond_local_irq_restore(flags);
}
void fpsimd_flush_thread(void)
{
int vl, supported_vl;
+ unsigned long flags;
if (!system_supports_fpsimd())
return;
- get_cpu_fpsimd_context();
+ get_cpu_fpsimd_context(flags);
fpsimd_flush_task_state(current);
memset(¤t->thread.uw.fpsimd_state, 0,
@@ -1067,7 +1140,7 @@
current->thread.sve_vl_onexec = 0;
}
- put_cpu_fpsimd_context();
+ put_cpu_fpsimd_context(flags);
}
/*
@@ -1076,12 +1149,14 @@
*/
void fpsimd_preserve_current_state(void)
{
+ unsigned long flags;
+
if (!system_supports_fpsimd())
return;
- get_cpu_fpsimd_context();
- fpsimd_save();
- put_cpu_fpsimd_context();
+ get_cpu_fpsimd_context(flags);
+ __fpsimd_save();
+ put_cpu_fpsimd_context(flags);
}
/*
@@ -1123,7 +1198,7 @@
}
}
-void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
+static void __fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
unsigned int sve_vl)
{
struct fpsimd_last_state_struct *last =
@@ -1137,6 +1212,18 @@
last->sve_vl = sve_vl;
}
+void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
+ unsigned int sve_vl)
+{
+ unsigned long flags;
+
+ WARN_ON(!in_softirq() && !irqs_disabled());
+
+ flags = hard_cond_local_irq_save();
+ __fpsimd_bind_state_to_cpu(st, sve_state, sve_vl);
+ hard_cond_local_irq_restore(flags);
+}
+
/*
* Load the userland FPSIMD state of 'current' from memory, but only if the
* FPSIMD state already held in the registers is /not/ the most recent FPSIMD
@@ -1144,6 +1231,8 @@
*/
void fpsimd_restore_current_state(void)
{
+ unsigned long flags;
+
/*
* For the tasks that were created before we detected the absence of
* FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
@@ -1158,14 +1247,14 @@
return;
}
- get_cpu_fpsimd_context();
+ get_cpu_fpsimd_context(flags);
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
task_fpsimd_load();
fpsimd_bind_task_to_cpu();
}
- put_cpu_fpsimd_context();
+ put_cpu_fpsimd_context(flags);
}
/*
@@ -1175,21 +1264,23 @@
*/
void fpsimd_update_current_state(struct user_fpsimd_state const *state)
{
+ unsigned long flags;
+
if (WARN_ON(!system_supports_fpsimd()))
return;
- get_cpu_fpsimd_context();
+ get_cpu_fpsimd_context(flags);
current->thread.uw.fpsimd_state = *state;
if (system_supports_sve() && test_thread_flag(TIF_SVE))
- fpsimd_to_sve(current);
+ _fpsimd_to_sve(current);
task_fpsimd_load();
fpsimd_bind_task_to_cpu();
clear_thread_flag(TIF_FOREIGN_FPSTATE);
- put_cpu_fpsimd_context();
+ put_cpu_fpsimd_context(flags);
}
/*
@@ -1239,9 +1330,9 @@
{
if (!system_supports_fpsimd())
return;
- WARN_ON(preemptible());
+ WARN_ON(!hard_irqs_disabled() && preemptible());
__get_cpu_fpsimd_context();
- fpsimd_save();
+ __fpsimd_save();
fpsimd_flush_cpu_state();
__put_cpu_fpsimd_context();
}
@@ -1267,18 +1358,23 @@
*/
void kernel_neon_begin(void)
{
+ unsigned long flags;
+
if (WARN_ON(!system_supports_fpsimd()))
return;
BUG_ON(!may_use_simd());
- get_cpu_fpsimd_context();
+ get_cpu_fpsimd_context(flags);
/* Save unsaved fpsimd state, if any: */
- fpsimd_save();
+ __fpsimd_save();
/* Invalidate any task state remaining in the fpsimd regs: */
fpsimd_flush_cpu_state();
+
+ if (dovetailing())
+ hard_cond_local_irq_restore(flags);
}
EXPORT_SYMBOL(kernel_neon_begin);
@@ -1293,10 +1389,12 @@
*/
void kernel_neon_end(void)
{
+ unsigned long flags = hard_local_save_flags();
+
if (!system_supports_fpsimd())
return;
- put_cpu_fpsimd_context();
+ put_cpu_fpsimd_context(flags);
}
EXPORT_SYMBOL(kernel_neon_end);
@@ -1386,9 +1484,13 @@
static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
unsigned long cmd, void *v)
{
+ unsigned long flags;
+
switch (cmd) {
case CPU_PM_ENTER:
+ flags = hard_cond_local_irq_save();
fpsimd_save_and_flush_cpu_state();
+ hard_cond_local_irq_restore(flags);
break;
case CPU_PM_EXIT:
break;
--
Gitblit v1.6.2