From 10ebd8556b7990499c896a550e3d416b444211e6 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 10 May 2024 02:23:07 +0000
Subject: [PATCH] add led

---
 kernel/arch/arc/mm/highmem.c |   54 +++++++++++++++++++++++++++++++++++++++++++++++++-----
 1 files changed, 49 insertions(+), 5 deletions(-)

diff --git a/kernel/arch/arc/mm/highmem.c b/kernel/arch/arc/mm/highmem.c
index c79912a..1b9f473 100644
--- a/kernel/arch/arc/mm/highmem.c
+++ b/kernel/arch/arc/mm/highmem.c
@@ -36,8 +36,9 @@
  *   This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
  *   2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
  *
- * - The fixed KMAP slots for kmap_local/atomic() require KM_MAX_IDX slots per
- *   CPU. So the number of CPUs sharing a single PTE page is limited.
+ * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
+ *   slots across NR_CPUS would be more than sufficient (generic code defines
+ *   KM_TYPE_NR as 20).
  *
  * - pkmap being preemptible, in theory could do with more than 256 concurrent
  *   mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
@@ -46,6 +47,48 @@
  */
 
 extern pte_t * pkmap_page_table;
+static pte_t * fixmap_page_table;
+
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
+{
+	int idx, cpu_idx;
+	unsigned long vaddr;
+
+	cpu_idx = kmap_atomic_idx_push();
+	idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
+	vaddr = FIXMAP_ADDR(idx);
+
+	set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
+		   mk_pte(page, prot));
+
+	return (void *)vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic_high_prot);
+
+void kunmap_atomic_high(void *kv)
+{
+	unsigned long kvaddr = (unsigned long)kv;
+
+	if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
+
+		/*
+		 * Because preemption is disabled, this vaddr can be associated
+		 * with the current allocated index.
+		 * But in case of multiple live kmap_atomic(), it still relies on
+		 * callers to unmap in right order.
+		 */
+		int cpu_idx = kmap_atomic_idx();
+		int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
+
+		WARN_ON(kvaddr != FIXMAP_ADDR(idx));
+
+		pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
+		local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
+
+		kmap_atomic_idx_pop();
+	}
+}
+EXPORT_SYMBOL(kunmap_atomic_high);
 
 static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
 {
@@ -65,9 +108,10 @@
 {
 	/* Due to recursive include hell, we can't do this in processor.h */
 	BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
-	BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
-	BUILD_BUG_ON(FIX_KMAP_SLOTS > PTRS_PER_PTE);
 
+	BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE);
 	pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
-	alloc_kmap_pgtable(FIXMAP_BASE);
+
+	BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
+	fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);
 }

--
Gitblit v1.6.2