hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/x86/mm/highmem_32.c
....@@ -1,108 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 #include <linux/highmem.h>
23 #include <linux/export.h>
34 #include <linux/swap.h> /* for totalram_pages */
4
-#include <linux/bootmem.h>
5
-
6
-void *kmap(struct page *page)
7
-{
8
- might_sleep();
9
- if (!PageHighMem(page))
10
- return page_address(page);
11
- return kmap_high(page);
12
-}
13
-EXPORT_SYMBOL(kmap);
14
-
15
-void kunmap(struct page *page)
16
-{
17
- if (in_interrupt())
18
- BUG();
19
- if (!PageHighMem(page))
20
- return;
21
- kunmap_high(page);
22
-}
23
-EXPORT_SYMBOL(kunmap);
24
-
25
-/*
26
- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
27
- * no global lock is needed and because the kmap code must perform a global TLB
28
- * invalidation when the kmap pool wraps.
29
- *
30
- * However when holding an atomic kmap it is not legal to sleep, so atomic
31
- * kmaps are appropriate for short, tight code paths only.
32
- */
33
-void *kmap_atomic_prot(struct page *page, pgprot_t prot)
34
-{
35
- unsigned long vaddr;
36
- int idx, type;
37
-
38
- preempt_disable();
39
- pagefault_disable();
40
-
41
- if (!PageHighMem(page))
42
- return page_address(page);
43
-
44
- type = kmap_atomic_idx_push();
45
- idx = type + KM_TYPE_NR*smp_processor_id();
46
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
47
- BUG_ON(!pte_none(*(kmap_pte-idx)));
48
- set_pte(kmap_pte-idx, mk_pte(page, prot));
49
- arch_flush_lazy_mmu_mode();
50
-
51
- return (void *)vaddr;
52
-}
53
-EXPORT_SYMBOL(kmap_atomic_prot);
54
-
55
-void *kmap_atomic(struct page *page)
56
-{
57
- return kmap_atomic_prot(page, kmap_prot);
58
-}
59
-EXPORT_SYMBOL(kmap_atomic);
60
-
61
-/*
62
- * This is the same as kmap_atomic() but can map memory that doesn't
63
- * have a struct page associated with it.
64
- */
65
-void *kmap_atomic_pfn(unsigned long pfn)
66
-{
67
- return kmap_atomic_prot_pfn(pfn, kmap_prot);
68
-}
69
-EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
70
-
71
-void __kunmap_atomic(void *kvaddr)
72
-{
73
- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
74
-
75
- if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
76
- vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
77
- int idx, type;
78
-
79
- type = kmap_atomic_idx();
80
- idx = type + KM_TYPE_NR * smp_processor_id();
81
-
82
-#ifdef CONFIG_DEBUG_HIGHMEM
83
- WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
84
-#endif
85
- /*
86
- * Force other mappings to Oops if they'll try to access this
87
- * pte without first remap it. Keeping stale mappings around
88
- * is a bad idea also, in case the page changes cacheability
89
- * attributes or becomes a protected page in a hypervisor.
90
- */
91
- kpte_clear_flush(kmap_pte-idx, vaddr);
92
- kmap_atomic_idx_pop();
93
- arch_flush_lazy_mmu_mode();
94
- }
95
-#ifdef CONFIG_DEBUG_HIGHMEM
96
- else {
97
- BUG_ON(vaddr < PAGE_OFFSET);
98
- BUG_ON(vaddr >= (unsigned long)high_memory);
99
- }
100
-#endif
101
-
102
- pagefault_enable();
103
- preempt_enable();
104
-}
105
-EXPORT_SYMBOL(__kunmap_atomic);
5
+#include <linux/memblock.h>
1066
1077 void __init set_highmem_pages_init(void)
1088 {
....@@ -111,7 +11,7 @@
11111
11212 /*
11313 * Explicitly reset zone->managed_pages because set_highmem_pages_init()
114
- * is invoked before free_all_bootmem()
14
+ * is invoked before memblock_free_all()
11515 */
11616 reset_all_zones_managed_pages();
11717 for_each_zone(zone) {