hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/arch/arc/mm/highmem.c
....@@ -36,8 +36,9 @@
3636 * This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
3737 * 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
3838 *
39
- * - The fixed KMAP slots for kmap_local/atomic() require KM_MAX_IDX slots per
40
- * CPU. So the number of CPUs sharing a single PTE page is limited.
39
+ * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
40
+ * slots across NR_CPUS would be more than sufficient (generic code defines
41
+ * KM_TYPE_NR as 20).
4142 *
4243 * - pkmap being preemptible, in theory could do with more than 256 concurrent
4344 * mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
....@@ -46,6 +47,48 @@
4647 */
4748
4849 extern pte_t * pkmap_page_table;
50
+static pte_t * fixmap_page_table;
51
+
52
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
53
+{
54
+ int idx, cpu_idx;
55
+ unsigned long vaddr;
56
+
57
+ cpu_idx = kmap_atomic_idx_push();
58
+ idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
59
+ vaddr = FIXMAP_ADDR(idx);
60
+
61
+ set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
62
+ mk_pte(page, prot));
63
+
64
+ return (void *)vaddr;
65
+}
66
+EXPORT_SYMBOL(kmap_atomic_high_prot);
67
+
68
+void kunmap_atomic_high(void *kv)
69
+{
70
+ unsigned long kvaddr = (unsigned long)kv;
71
+
72
+ if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
73
+
74
+ /*
75
+ * Because preemption is disabled, this vaddr can be associated
76
+ * with the current allocated index.
77
+ * But in case of multiple live kmap_atomic(), it still relies on
78
+ * callers to unmap in right order.
79
+ */
80
+ int cpu_idx = kmap_atomic_idx();
81
+ int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
82
+
83
+ WARN_ON(kvaddr != FIXMAP_ADDR(idx));
84
+
85
+ pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
86
+ local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
87
+
88
+ kmap_atomic_idx_pop();
89
+ }
90
+}
91
+EXPORT_SYMBOL(kunmap_atomic_high);
4992
5093 static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
5194 {
....@@ -65,9 +108,10 @@
65108 {
66109 /* Due to recursive include hell, we can't do this in processor.h */
67110 BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
68
- BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
69
- BUILD_BUG_ON(FIX_KMAP_SLOTS > PTRS_PER_PTE);
70111
112
+ BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE);
71113 pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
72
- alloc_kmap_pgtable(FIXMAP_BASE);
114
+
115
+ BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
116
+ fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);
73117 }