hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/arch/mips/mm/highmem.c
....@@ -8,6 +8,8 @@
88 #include <asm/fixmap.h>
99 #include <asm/tlbflush.h>
1010
11
+static pte_t *kmap_pte;
12
+
1113 unsigned long highstart_pfn, highend_pfn;
1214
1315 void kmap_flush_tlb(unsigned long addr)
....@@ -15,3 +17,78 @@
1517 flush_tlb_one(addr);
1618 }
1719 EXPORT_SYMBOL(kmap_flush_tlb);
20
+
21
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
22
+{
23
+ unsigned long vaddr;
24
+ int idx, type;
25
+
26
+ type = kmap_atomic_idx_push();
27
+ idx = type + KM_TYPE_NR*smp_processor_id();
28
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
29
+#ifdef CONFIG_DEBUG_HIGHMEM
30
+ BUG_ON(!pte_none(*(kmap_pte - idx)));
31
+#endif
32
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
33
+ local_flush_tlb_one((unsigned long)vaddr);
34
+
35
+ return (void*) vaddr;
36
+}
37
+EXPORT_SYMBOL(kmap_atomic_high_prot);
38
+
39
+void kunmap_atomic_high(void *kvaddr)
40
+{
41
+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
42
+ int type __maybe_unused;
43
+
44
+ if (vaddr < FIXADDR_START)
45
+ return;
46
+
47
+ type = kmap_atomic_idx();
48
+#ifdef CONFIG_DEBUG_HIGHMEM
49
+ {
50
+ int idx = type + KM_TYPE_NR * smp_processor_id();
51
+
52
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
53
+
54
+ /*
55
+ * force other mappings to Oops if they'll try to access
56
+ * this pte without first remap it
57
+ */
58
+ pte_clear(&init_mm, vaddr, kmap_pte-idx);
59
+ local_flush_tlb_one(vaddr);
60
+ }
61
+#endif
62
+ kmap_atomic_idx_pop();
63
+}
64
+EXPORT_SYMBOL(kunmap_atomic_high);
65
+
66
+/*
67
+ * This is the same as kmap_atomic() but can map memory that doesn't
68
+ * have a struct page associated with it.
69
+ */
70
+void *kmap_atomic_pfn(unsigned long pfn)
71
+{
72
+ unsigned long vaddr;
73
+ int idx, type;
74
+
75
+ preempt_disable();
76
+ pagefault_disable();
77
+
78
+ type = kmap_atomic_idx_push();
79
+ idx = type + KM_TYPE_NR*smp_processor_id();
80
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
81
+ set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
82
+ flush_tlb_one(vaddr);
83
+
84
+ return (void*) vaddr;
85
+}
86
+
87
+void __init kmap_init(void)
88
+{
89
+ unsigned long kmap_vstart;
90
+
91
+ /* cache the first kmap pte */
92
+ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
93
+ kmap_pte = virt_to_kpte(kmap_vstart);
94
+}