| .. | .. |
|---|
| 36 | 36 | * This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means |
|---|
| 37 | 37 | * 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split) |
|---|
| 38 | 38 | * |
|---|
| 39 | | - * - The fixed KMAP slots for kmap_local/atomic() require KM_MAX_IDX slots per |
|---|
| 40 | | - * CPU. So the number of CPUs sharing a single PTE page is limited. |
|---|
| 39 | + * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE |
|---|
| 40 | + * slots across NR_CPUS would be more than sufficient (generic code defines |
|---|
| 41 | + * KM_TYPE_NR as 20). |
|---|
| 41 | 42 | * |
|---|
| 42 | 43 | * - pkmap being preemptible, in theory could do with more than 256 concurrent |
|---|
| 43 | 44 | * mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse |
|---|
| .. | .. |
|---|
| 46 | 47 | */ |
|---|
| 47 | 48 | |
|---|
| 48 | 49 | extern pte_t * pkmap_page_table; |
|---|
| 50 | +static pte_t * fixmap_page_table; |
|---|
| 51 | + |
|---|
| 52 | +void *kmap_atomic_high_prot(struct page *page, pgprot_t prot) |
|---|
| 53 | +{ |
|---|
| 54 | + int idx, cpu_idx; |
|---|
| 55 | + unsigned long vaddr; |
|---|
| 56 | + |
|---|
| 57 | + cpu_idx = kmap_atomic_idx_push(); |
|---|
| 58 | + idx = cpu_idx + KM_TYPE_NR * smp_processor_id(); |
|---|
| 59 | + vaddr = FIXMAP_ADDR(idx); |
|---|
| 60 | + |
|---|
| 61 | + set_pte_at(&init_mm, vaddr, fixmap_page_table + idx, |
|---|
| 62 | + mk_pte(page, prot)); |
|---|
| 63 | + |
|---|
| 64 | + return (void *)vaddr; |
|---|
| 65 | +} |
|---|
| 66 | +EXPORT_SYMBOL(kmap_atomic_high_prot); |
|---|
| 67 | + |
|---|
| 68 | +void kunmap_atomic_high(void *kv) |
|---|
| 69 | +{ |
|---|
| 70 | + unsigned long kvaddr = (unsigned long)kv; |
|---|
| 71 | + |
|---|
| 72 | + if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) { |
|---|
| 73 | + |
|---|
| 74 | + /* |
|---|
| 75 | + * Because preemption is disabled, this vaddr can be associated |
|---|
| 76 | + * with the current allocated index. |
|---|
| 77 | + * But in case of multiple live kmap_atomic(), it still relies on |
|---|
| 78 | + * callers to unmap in right order. |
|---|
| 79 | + */ |
|---|
| 80 | + int cpu_idx = kmap_atomic_idx(); |
|---|
| 81 | + int idx = cpu_idx + KM_TYPE_NR * smp_processor_id(); |
|---|
| 82 | + |
|---|
| 83 | + WARN_ON(kvaddr != FIXMAP_ADDR(idx)); |
|---|
| 84 | + |
|---|
| 85 | + pte_clear(&init_mm, kvaddr, fixmap_page_table + idx); |
|---|
| 86 | + local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE); |
|---|
| 87 | + |
|---|
| 88 | + kmap_atomic_idx_pop(); |
|---|
| 89 | + } |
|---|
| 90 | +} |
|---|
| 91 | +EXPORT_SYMBOL(kunmap_atomic_high); |
|---|
| 49 | 92 | |
|---|
| 50 | 93 | static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr) |
|---|
| 51 | 94 | { |
|---|
| .. | .. |
|---|
| 65 | 108 | { |
|---|
| 66 | 109 | /* Due to recursive include hell, we can't do this in processor.h */ |
|---|
| 67 | 110 | BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE)); |
|---|
| 68 | | - BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE); |
|---|
| 69 | | - BUILD_BUG_ON(FIX_KMAP_SLOTS > PTRS_PER_PTE); |
|---|
| 70 | 111 | |
|---|
| 112 | + BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE); |
|---|
| 71 | 113 | pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE); |
|---|
| 72 | | - alloc_kmap_pgtable(FIXMAP_BASE); |
|---|
| 114 | + |
|---|
| 115 | + BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE); |
|---|
| 116 | + fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE); |
|---|
| 73 | 117 | } |
|---|