From a01b5c9f91adaee088a817861603a5dbe14775c2 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 01 Nov 2024 02:40:28 +0000
Subject: [PATCH] rootfs patch
---
kernel/arch/x86/mm/tlb.c | 50 ++++++++++++++++++++++++++++++++++++++++++--------
1 files changed, 42 insertions(+), 8 deletions(-)
diff --git a/kernel/arch/x86/mm/tlb.c b/kernel/arch/x86/mm/tlb.c
index 569ac1d..b720da2 100644
--- a/kernel/arch/x86/mm/tlb.c
+++ b/kernel/arch/x86/mm/tlb.c
@@ -5,6 +5,7 @@
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
+#include <linux/irq_pipeline.h>
#include <linux/export.h>
#include <linux/cpu.h>
#include <linux/debugfs.h>
@@ -309,10 +310,12 @@
void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
- unsigned long flags;
+ unsigned long flags, _flags;
local_irq_save(flags);
+ protect_inband_mm(_flags);
switch_mm_irqs_off(prev, next, tsk);
+ unprotect_inband_mm(_flags);
local_irq_restore(flags);
}
@@ -440,7 +443,9 @@
*/
/* We don't want flush_tlb_func_* to run concurrently with us. */
- if (IS_ENABLED(CONFIG_PROVE_LOCKING))
+ if (IS_ENABLED(CONFIG_DOVETAIL))
+ WARN_ON_ONCE(!hard_irqs_disabled());
+ else if (IS_ENABLED(CONFIG_PROVE_LOCKING))
WARN_ON_ONCE(!irqs_disabled());
/*
@@ -666,15 +671,24 @@
* wants us to catch up to.
*/
struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
- u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
- u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
- u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
+ u32 loaded_mm_asid;
+ u64 mm_tlb_gen;
+ u64 local_tlb_gen;
+ unsigned long flags;
/* This code cannot presently handle being reentered. */
VM_WARN_ON(!irqs_disabled());
- if (unlikely(loaded_mm == &init_mm))
+ protect_inband_mm(flags);
+
+ loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+ mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
+ local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
+
+ if (unlikely(loaded_mm == &init_mm)) {
+ unprotect_inband_mm(flags);
return;
+ }
VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
loaded_mm->context.ctx_id);
@@ -690,6 +704,7 @@
* IPIs to lazy TLB mode CPUs.
*/
switch_mm_irqs_off(NULL, &init_mm, NULL);
+ unprotect_inband_mm(flags);
return;
}
@@ -700,12 +715,15 @@
* be handled can catch us all the way up, leaving no work for
* the second flush.
*/
+ unprotect_inband_mm(flags);
trace_tlb_flush(reason, 0);
return;
}
WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen);
+
+ unprotect_inband_mm(flags);
/*
* If we get to this point, we know that our TLB is out of date.
@@ -1063,7 +1081,7 @@
* from interrupts. (Use the raw variant because this code can
* be called from deep inside debugging code.)
*/
- raw_local_irq_save(flags);
+ flags = hard_local_irq_save();
cr4 = this_cpu_read(cpu_tlbstate.cr4);
/* toggle PGE */
@@ -1071,7 +1089,7 @@
/* write old PGE again and flush TLBs */
native_write_cr4(cr4);
- raw_local_irq_restore(flags);
+ hard_local_irq_restore(flags);
}
/*
@@ -1079,6 +1097,8 @@
*/
STATIC_NOPV void native_flush_tlb_local(void)
{
+ unsigned long flags;
+
/*
* Preemption or interrupts must be disabled to protect the access
* to the per CPU variable and to prevent being preempted between
@@ -1086,10 +1106,14 @@
*/
WARN_ON_ONCE(preemptible());
+ flags = hard_cond_local_irq_save();
+
invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
/* If current->mm == NULL then the read_cr3() "borrows" an mm */
native_write_cr3(__native_read_cr3());
+
+ hard_cond_local_irq_restore(flags);
}
void flush_tlb_local(void)
@@ -1165,6 +1189,16 @@
VM_WARN_ON_ONCE(!loaded_mm);
/*
+ * There would be no way for the companion core to switch an
+ * out-of-band task back in-band in order to handle an access
+ * fault over NMI safely. Tell the caller that uaccess from
+ * NMI is NOT ok if the preempted task was running
+ * out-of-band.
+ */
+ if (running_oob())
+ return false;
+
+ /*
* The condition we want to check is
* current_mm->pgd == __va(read_cr3_pa()). This may be slow, though,
* if we're running in a VM with shadow paging, and nmi_uaccess_okay()
--
Gitblit v1.6.2