hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/xtensa/mm/highmem.c
....@@ -12,8 +12,6 @@
1212 #include <linux/highmem.h>
1313 #include <asm/tlbflush.h>
1414
15
-static pte_t *kmap_pte;
16
-
1715 #if DCACHE_WAY_SIZE > PAGE_SIZE
1816 unsigned int last_pkmap_nr_arr[DCACHE_N_COLORS];
1917 wait_queue_head_t pkmap_map_wait_arr[DCACHE_N_COLORS];
....@@ -33,63 +31,25 @@
3331
3432 static inline enum fixed_addresses kmap_idx(int type, unsigned long color)
3533 {
36
- return (type + KM_TYPE_NR * smp_processor_id()) * DCACHE_N_COLORS +
34
+ return (type + KM_MAX_IDX * smp_processor_id()) * DCACHE_N_COLORS +
3735 color;
3836 }
3937
40
-void *kmap_atomic(struct page *page)
38
+enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn)
4139 {
42
- enum fixed_addresses idx;
43
- unsigned long vaddr;
44
-
45
- preempt_disable();
46
- pagefault_disable();
47
- if (!PageHighMem(page))
48
- return page_address(page);
49
-
50
- idx = kmap_idx(kmap_atomic_idx_push(),
51
- DCACHE_ALIAS(page_to_phys(page)));
52
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
53
-#ifdef CONFIG_DEBUG_HIGHMEM
54
- BUG_ON(!pte_none(*(kmap_pte + idx)));
55
-#endif
56
- set_pte(kmap_pte + idx, mk_pte(page, PAGE_KERNEL_EXEC));
57
-
58
- return (void *)vaddr;
40
+ return kmap_idx(type, DCACHE_ALIAS(pfn << PAGE_SHIFT));
5941 }
60
-EXPORT_SYMBOL(kmap_atomic);
6142
62
-void __kunmap_atomic(void *kvaddr)
43
+enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr)
6344 {
64
- if (kvaddr >= (void *)FIXADDR_START &&
65
- kvaddr < (void *)FIXADDR_TOP) {
66
- int idx = kmap_idx(kmap_atomic_idx(),
67
- DCACHE_ALIAS((unsigned long)kvaddr));
68
-
69
- /*
70
- * Force other mappings to Oops if they'll try to access this
71
- * pte without first remap it. Keeping stale mappings around
72
- * is a bad idea also, in case the page changes cacheability
73
- * attributes or becomes a protected page in a hypervisor.
74
- */
75
- pte_clear(&init_mm, kvaddr, kmap_pte + idx);
76
- local_flush_tlb_kernel_range((unsigned long)kvaddr,
77
- (unsigned long)kvaddr + PAGE_SIZE);
78
-
79
- kmap_atomic_idx_pop();
80
- }
81
-
82
- pagefault_enable();
83
- preempt_enable();
45
+ return kmap_idx(type, DCACHE_ALIAS(addr));
8446 }
85
-EXPORT_SYMBOL(__kunmap_atomic);
8647
8748 void __init kmap_init(void)
8849 {
89
- unsigned long kmap_vstart;
90
-
91
- /* cache the first kmap pte */
92
- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
93
- kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
50
+ /* Check if this memory layout is broken because PKMAP overlaps
51
+ * page table.
52
+ */
53
+ BUILD_BUG_ON(PKMAP_BASE < TLBTEMP_BASE_1 + TLBTEMP_SIZE);
9454 kmap_waitqueues_init();
9555 }