forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/arch/arc/mm/highmem.c
....@@ -1,17 +1,13 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
3
- *
4
- * This program is free software; you can redistribute it and/or modify
5
- * it under the terms of the GNU General Public License version 2 as
6
- * published by the Free Software Foundation.
7
- *
84 */
95
10
-#include <linux/bootmem.h>
6
+#include <linux/memblock.h>
117 #include <linux/export.h>
128 #include <linux/highmem.h>
9
+#include <linux/pgtable.h>
1310 #include <asm/processor.h>
14
-#include <asm/pgtable.h>
1511 #include <asm/pgalloc.h>
1612 #include <asm/tlbflush.h>
1713
....@@ -40,9 +36,8 @@
4036 * This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
4137 * 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
4238 *
43
- * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
44
- * slots across NR_CPUS would be more than sufficient (generic code defines
45
- * KM_TYPE_NR as 20).
39
+ * - The fixed KMAP slots for kmap_local/atomic() require KM_MAX_IDX slots per
40
+ * CPU. So the number of CPUs sharing a single PTE page is limited.
4641 *
4742 * - pkmap being preemptible, in theory could do with more than 256 concurrent
4843 * mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
....@@ -51,79 +46,17 @@
5146 */
5247
5348 extern pte_t * pkmap_page_table;
54
-static pte_t * fixmap_page_table;
55
-
56
-void *kmap(struct page *page)
57
-{
58
- BUG_ON(in_interrupt());
59
- if (!PageHighMem(page))
60
- return page_address(page);
61
-
62
- return kmap_high(page);
63
-}
64
-EXPORT_SYMBOL(kmap);
65
-
66
-void *kmap_atomic(struct page *page)
67
-{
68
- int idx, cpu_idx;
69
- unsigned long vaddr;
70
-
71
- preempt_disable();
72
- pagefault_disable();
73
- if (!PageHighMem(page))
74
- return page_address(page);
75
-
76
- cpu_idx = kmap_atomic_idx_push();
77
- idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
78
- vaddr = FIXMAP_ADDR(idx);
79
-
80
- set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
81
- mk_pte(page, kmap_prot));
82
-
83
- return (void *)vaddr;
84
-}
85
-EXPORT_SYMBOL(kmap_atomic);
86
-
87
-void __kunmap_atomic(void *kv)
88
-{
89
- unsigned long kvaddr = (unsigned long)kv;
90
-
91
- if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
92
-
93
- /*
94
- * Because preemption is disabled, this vaddr can be associated
95
- * with the current allocated index.
96
- * But in case of multiple live kmap_atomic(), it still relies on
97
- * callers to unmap in right order.
98
- */
99
- int cpu_idx = kmap_atomic_idx();
100
- int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
101
-
102
- WARN_ON(kvaddr != FIXMAP_ADDR(idx));
103
-
104
- pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
105
- local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
106
-
107
- kmap_atomic_idx_pop();
108
- }
109
-
110
- pagefault_enable();
111
- preempt_enable();
112
-}
113
-EXPORT_SYMBOL(__kunmap_atomic);
11449
11550 static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
11651 {
117
- pgd_t *pgd_k;
118
- pud_t *pud_k;
119
- pmd_t *pmd_k;
52
+ pmd_t *pmd_k = pmd_off_k(kvaddr);
12053 pte_t *pte_k;
12154
122
- pgd_k = pgd_offset_k(kvaddr);
123
- pud_k = pud_offset(pgd_k, kvaddr);
124
- pmd_k = pmd_offset(pud_k, kvaddr);
55
+ pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
56
+ if (!pte_k)
57
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
58
+ __func__, PAGE_SIZE, PAGE_SIZE);
12559
126
- pte_k = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
12760 pmd_populate_kernel(&init_mm, pmd_k, pte_k);
12861 return pte_k;
12962 }
....@@ -132,10 +65,9 @@
13265 {
13366 /* Due to recursive include hell, we can't do this in processor.h */
13467 BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
135
-
136
- BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE);
137
- pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
138
-
13968 BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
140
- fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);
69
+ BUILD_BUG_ON(FIX_KMAP_SLOTS > PTRS_PER_PTE);
70
+
71
+ pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
72
+ alloc_kmap_pgtable(FIXMAP_BASE);
14173 }