hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/arch/x86/mm/highmem_32.c
....@@ -4,6 +4,65 @@
44 #include <linux/swap.h> /* for totalram_pages */
55 #include <linux/memblock.h>
66
7
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
8
+{
9
+ unsigned long vaddr;
10
+ int idx, type;
11
+
12
+ type = kmap_atomic_idx_push();
13
+ idx = type + KM_TYPE_NR*smp_processor_id();
14
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
15
+ BUG_ON(!pte_none(*(kmap_pte-idx)));
16
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
17
+ arch_flush_lazy_mmu_mode();
18
+
19
+ return (void *)vaddr;
20
+}
21
+EXPORT_SYMBOL(kmap_atomic_high_prot);
22
+
23
+/*
24
+ * This is the same as kmap_atomic() but can map memory that doesn't
25
+ * have a struct page associated with it.
26
+ */
27
+void *kmap_atomic_pfn(unsigned long pfn)
28
+{
29
+ return kmap_atomic_prot_pfn(pfn, kmap_prot);
30
+}
31
+EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
32
+
33
+void kunmap_atomic_high(void *kvaddr)
34
+{
35
+ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
36
+
37
+ if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
38
+ vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
39
+ int idx, type;
40
+
41
+ type = kmap_atomic_idx();
42
+ idx = type + KM_TYPE_NR * smp_processor_id();
43
+
44
+#ifdef CONFIG_DEBUG_HIGHMEM
45
+ WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
46
+#endif
47
+ /*
48
+ * Force other mappings to Oops if they'll try to access this
49
+ * pte without first remap it. Keeping stale mappings around
50
+ * is a bad idea also, in case the page changes cacheability
51
+ * attributes or becomes a protected page in a hypervisor.
52
+ */
53
+ kpte_clear_flush(kmap_pte-idx, vaddr);
54
+ kmap_atomic_idx_pop();
55
+ arch_flush_lazy_mmu_mode();
56
+ }
57
+#ifdef CONFIG_DEBUG_HIGHMEM
58
+ else {
59
+ BUG_ON(vaddr < PAGE_OFFSET);
60
+ BUG_ON(vaddr >= (unsigned long)high_memory);
61
+ }
62
+#endif
63
+}
64
+EXPORT_SYMBOL(kunmap_atomic_high);
65
+
766 void __init set_highmem_pages_init(void)
867 {
968 struct zone *zone;