.. | .. |
---|
12 | 12 | #include <linux/highmem.h> |
---|
13 | 13 | #include <asm/tlbflush.h> |
---|
14 | 14 | |
---|
| 15 | +static pte_t *kmap_pte; |
---|
| 16 | + |
---|
15 | 17 | #if DCACHE_WAY_SIZE > PAGE_SIZE |
---|
16 | 18 | unsigned int last_pkmap_nr_arr[DCACHE_N_COLORS]; |
---|
17 | 19 | wait_queue_head_t pkmap_map_wait_arr[DCACHE_N_COLORS]; |
---|
.. | .. |
---|
31 | 33 | |
---|
32 | 34 | static inline enum fixed_addresses kmap_idx(int type, unsigned long color) |
---|
33 | 35 | { |
---|
34 | | - return (type + KM_MAX_IDX * smp_processor_id()) * DCACHE_N_COLORS + |
---|
| 36 | + return (type + KM_TYPE_NR * smp_processor_id()) * DCACHE_N_COLORS + |
---|
35 | 37 | color; |
---|
36 | 38 | } |
---|
37 | 39 | |
---|
38 | | -enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn) |
---|
| 40 | +void *kmap_atomic_high_prot(struct page *page, pgprot_t prot) |
---|
39 | 41 | { |
---|
40 | | - return kmap_idx(type, DCACHE_ALIAS(pfn << PAGE_SHIFT)); |
---|
41 | | -} |
---|
| 42 | + enum fixed_addresses idx; |
---|
| 43 | + unsigned long vaddr; |
---|
42 | 44 | |
---|
43 | | -enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr) |
---|
44 | | -{ |
---|
45 | | - return kmap_idx(type, DCACHE_ALIAS(addr)); |
---|
| 45 | + idx = kmap_idx(kmap_atomic_idx_push(), |
---|
| 46 | + DCACHE_ALIAS(page_to_phys(page))); |
---|
| 47 | + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
---|
| 48 | +#ifdef CONFIG_DEBUG_HIGHMEM |
---|
| 49 | + BUG_ON(!pte_none(*(kmap_pte + idx))); |
---|
| 50 | +#endif |
---|
| 51 | + set_pte(kmap_pte + idx, mk_pte(page, prot)); |
---|
| 52 | + |
---|
| 53 | + return (void *)vaddr; |
---|
46 | 54 | } |
---|
| 55 | +EXPORT_SYMBOL(kmap_atomic_high_prot); |
---|
| 56 | + |
---|
| 57 | +void kunmap_atomic_high(void *kvaddr) |
---|
| 58 | +{ |
---|
| 59 | + if (kvaddr >= (void *)FIXADDR_START && |
---|
| 60 | + kvaddr < (void *)FIXADDR_TOP) { |
---|
| 61 | + int idx = kmap_idx(kmap_atomic_idx(), |
---|
| 62 | + DCACHE_ALIAS((unsigned long)kvaddr)); |
---|
| 63 | + |
---|
| 64 | + /* |
---|
| 65 | + * Force other mappings to Oops if they'll try to access this |
---|
| 66 | + * pte without first remap it. Keeping stale mappings around |
---|
| 67 | + * is a bad idea also, in case the page changes cacheability |
---|
| 68 | + * attributes or becomes a protected page in a hypervisor. |
---|
| 69 | + */ |
---|
| 70 | + pte_clear(&init_mm, kvaddr, kmap_pte + idx); |
---|
| 71 | + local_flush_tlb_kernel_range((unsigned long)kvaddr, |
---|
| 72 | + (unsigned long)kvaddr + PAGE_SIZE); |
---|
| 73 | + |
---|
| 74 | + kmap_atomic_idx_pop(); |
---|
| 75 | + } |
---|
| 76 | +} |
---|
| 77 | +EXPORT_SYMBOL(kunmap_atomic_high); |
---|
47 | 78 | |
---|
48 | 79 | void __init kmap_init(void) |
---|
49 | 80 | { |
---|
| 81 | + unsigned long kmap_vstart; |
---|
| 82 | + |
---|
50 | 83 | /* Check if this memory layout is broken because PKMAP overlaps |
---|
51 | 84 | * page table. |
---|
52 | 85 | */ |
---|
53 | 86 | BUILD_BUG_ON(PKMAP_BASE < TLBTEMP_BASE_1 + TLBTEMP_SIZE); |
---|
| 87 | + /* cache the first kmap pte */ |
---|
| 88 | + kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); |
---|
| 89 | + kmap_pte = virt_to_kpte(kmap_vstart); |
---|
54 | 90 | kmap_waitqueues_init(); |
---|
55 | 91 | } |
---|