hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/x86/mm/highmem_32.c
....@@ -1,115 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 #include <linux/highmem.h>
23 #include <linux/export.h>
34 #include <linux/swap.h> /* for totalram_pages */
4
-#include <linux/bootmem.h>
5
-
6
-void *kmap(struct page *page)
7
-{
8
- might_sleep();
9
- if (!PageHighMem(page))
10
- return page_address(page);
11
- return kmap_high(page);
12
-}
13
-EXPORT_SYMBOL(kmap);
14
-
15
-void kunmap(struct page *page)
16
-{
17
- if (in_interrupt())
18
- BUG();
19
- if (!PageHighMem(page))
20
- return;
21
- kunmap_high(page);
22
-}
23
-EXPORT_SYMBOL(kunmap);
24
-
25
-/*
26
- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
27
- * no global lock is needed and because the kmap code must perform a global TLB
28
- * invalidation when the kmap pool wraps.
29
- *
30
- * However when holding an atomic kmap it is not legal to sleep, so atomic
31
- * kmaps are appropriate for short, tight code paths only.
32
- */
33
-void *kmap_atomic_prot(struct page *page, pgprot_t prot)
34
-{
35
- pte_t pte = mk_pte(page, prot);
36
- unsigned long vaddr;
37
- int idx, type;
38
-
39
- preempt_disable_nort();
40
- pagefault_disable();
41
-
42
- if (!PageHighMem(page))
43
- return page_address(page);
44
-
45
- type = kmap_atomic_idx_push();
46
- idx = type + KM_TYPE_NR*smp_processor_id();
47
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
48
- BUG_ON(!pte_none(*(kmap_pte-idx)));
49
-#ifdef CONFIG_PREEMPT_RT_FULL
50
- current->kmap_pte[type] = pte;
51
-#endif
52
- set_pte(kmap_pte-idx, pte);
53
- arch_flush_lazy_mmu_mode();
54
-
55
- return (void *)vaddr;
56
-}
57
-EXPORT_SYMBOL(kmap_atomic_prot);
58
-
59
-void *kmap_atomic(struct page *page)
60
-{
61
- return kmap_atomic_prot(page, kmap_prot);
62
-}
63
-EXPORT_SYMBOL(kmap_atomic);
64
-
65
-/*
66
- * This is the same as kmap_atomic() but can map memory that doesn't
67
- * have a struct page associated with it.
68
- */
69
-void *kmap_atomic_pfn(unsigned long pfn)
70
-{
71
- return kmap_atomic_prot_pfn(pfn, kmap_prot);
72
-}
73
-EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
74
-
75
-void __kunmap_atomic(void *kvaddr)
76
-{
77
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
78
-
79
- if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
80
- vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
81
- int idx, type;
82
-
83
- type = kmap_atomic_idx();
84
- idx = type + KM_TYPE_NR * smp_processor_id();
85
-
86
-#ifdef CONFIG_DEBUG_HIGHMEM
87
- WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
88
-#endif
89
- /*
90
- * Force other mappings to Oops if they'll try to access this
91
- * pte without first remap it. Keeping stale mappings around
92
- * is a bad idea also, in case the page changes cacheability
93
- * attributes or becomes a protected page in a hypervisor.
94
- */
95
-#ifdef CONFIG_PREEMPT_RT_FULL
96
- current->kmap_pte[type] = __pte(0);
97
-#endif
98
- kpte_clear_flush(kmap_pte-idx, vaddr);
99
- kmap_atomic_idx_pop();
100
- arch_flush_lazy_mmu_mode();
101
- }
102
-#ifdef CONFIG_DEBUG_HIGHMEM
103
- else {
104
- BUG_ON(vaddr < PAGE_OFFSET);
105
- BUG_ON(vaddr >= (unsigned long)high_memory);
106
- }
107
-#endif
108
-
109
- pagefault_enable();
110
- preempt_enable_nort();
111
-}
112
-EXPORT_SYMBOL(__kunmap_atomic);
5
+#include <linux/memblock.h>
1136
1147 void __init set_highmem_pages_init(void)
1158 {
....@@ -118,7 +11,7 @@
11811
11912 /*
12013 * Explicitly reset zone->managed_pages because set_highmem_pages_init()
121
- * is invoked before free_all_bootmem()
14
+ * is invoked before memblock_free_all()
12215 */
12316 reset_all_zones_managed_pages();
12417 for_each_zone(zone) {