forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-11 297b60346df8beafee954a0fd7c2d64f33f3b9bc
kernel/arch/x86/mm/highmem_32.c
....@@ -1,66 +1,24 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 #include <linux/highmem.h>
23 #include <linux/export.h>
34 #include <linux/swap.h> /* for totalram_pages */
4
-#include <linux/bootmem.h>
5
+#include <linux/memblock.h>
56
6
-void *kmap(struct page *page)
7
+void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
78 {
8
- might_sleep();
9
- if (!PageHighMem(page))
10
- return page_address(page);
11
- return kmap_high(page);
12
-}
13
-EXPORT_SYMBOL(kmap);
14
-
15
-void kunmap(struct page *page)
16
-{
17
- if (in_interrupt())
18
- BUG();
19
- if (!PageHighMem(page))
20
- return;
21
- kunmap_high(page);
22
-}
23
-EXPORT_SYMBOL(kunmap);
24
-
25
-/*
26
- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
27
- * no global lock is needed and because the kmap code must perform a global TLB
28
- * invalidation when the kmap pool wraps.
29
- *
30
- * However when holding an atomic kmap it is not legal to sleep, so atomic
31
- * kmaps are appropriate for short, tight code paths only.
32
- */
33
-void *kmap_atomic_prot(struct page *page, pgprot_t prot)
34
-{
35
- pte_t pte = mk_pte(page, prot);
369 unsigned long vaddr;
3710 int idx, type;
38
-
39
- preempt_disable_nort();
40
- pagefault_disable();
41
-
42
- if (!PageHighMem(page))
43
- return page_address(page);
4411
4512 type = kmap_atomic_idx_push();
4613 idx = type + KM_TYPE_NR*smp_processor_id();
4714 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
4815 BUG_ON(!pte_none(*(kmap_pte-idx)));
49
-#ifdef CONFIG_PREEMPT_RT_FULL
50
- current->kmap_pte[type] = pte;
51
-#endif
52
- set_pte(kmap_pte-idx, pte);
16
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
5317 arch_flush_lazy_mmu_mode();
5418
5519 return (void *)vaddr;
5620 }
57
-EXPORT_SYMBOL(kmap_atomic_prot);
58
-
59
-void *kmap_atomic(struct page *page)
60
-{
61
- return kmap_atomic_prot(page, kmap_prot);
62
-}
63
-EXPORT_SYMBOL(kmap_atomic);
21
+EXPORT_SYMBOL(kmap_atomic_high_prot);
6422
6523 /*
6624 * This is the same as kmap_atomic() but can map memory that doesn't
....@@ -72,7 +30,7 @@
7230 }
7331 EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
7432
75
-void __kunmap_atomic(void *kvaddr)
33
+void kunmap_atomic_high(void *kvaddr)
7634 {
7735 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
7836
....@@ -92,9 +50,6 @@
9250 * is a bad idea also, in case the page changes cacheability
9351 * attributes or becomes a protected page in a hypervisor.
9452 */
95
-#ifdef CONFIG_PREEMPT_RT_FULL
96
- current->kmap_pte[type] = __pte(0);
97
-#endif
9853 kpte_clear_flush(kmap_pte-idx, vaddr);
9954 kmap_atomic_idx_pop();
10055 arch_flush_lazy_mmu_mode();
....@@ -105,11 +60,8 @@
10560 BUG_ON(vaddr >= (unsigned long)high_memory);
10661 }
10762 #endif
108
-
109
- pagefault_enable();
110
- preempt_enable_nort();
11163 }
112
-EXPORT_SYMBOL(__kunmap_atomic);
64
+EXPORT_SYMBOL(kunmap_atomic_high);
11365
11466 void __init set_highmem_pages_init(void)
11567 {
....@@ -118,7 +70,7 @@
11870
11971 /*
12072 * Explicitly reset zone->managed_pages because set_highmem_pages_init()
121
- * is invoked before free_all_bootmem()
73
+ * is invoked before memblock_free_all()
12274 */
12375 reset_all_zones_managed_pages();
12476 for_each_zone(zone) {