| .. | .. |
|---|
| 34 | 34 | return *ptep; |
|---|
| 35 | 35 | } |
|---|
| 36 | 36 | |
|---|
| 37 | +static unsigned int fixmap_idx(int type) |
|---|
| 38 | +{ |
|---|
| 39 | + return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); |
|---|
| 40 | +} |
|---|
| 41 | + |
|---|
| 37 | 42 | void *kmap(struct page *page) |
|---|
| 38 | 43 | { |
|---|
| 39 | 44 | might_sleep(); |
|---|
| .. | .. |
|---|
| 54 | 59 | |
|---|
| 55 | 60 | void *kmap_atomic(struct page *page) |
|---|
| 56 | 61 | { |
|---|
| 62 | + pte_t pte = mk_pte(page, kmap_prot); |
|---|
| 57 | 63 | unsigned int idx; |
|---|
| 58 | 64 | unsigned long vaddr; |
|---|
| 59 | 65 | void *kmap; |
|---|
| 60 | 66 | int type; |
|---|
| 61 | 67 | |
|---|
| 62 | | - preempt_disable(); |
|---|
| 68 | + preempt_disable_nort(); |
|---|
| 63 | 69 | pagefault_disable(); |
|---|
| 64 | 70 | if (!PageHighMem(page)) |
|---|
| 65 | 71 | return page_address(page); |
|---|
| .. | .. |
|---|
| 79 | 85 | |
|---|
| 80 | 86 | type = kmap_atomic_idx_push(); |
|---|
| 81 | 87 | |
|---|
| 82 | | - idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); |
|---|
| 88 | + idx = fixmap_idx(type); |
|---|
| 83 | 89 | vaddr = __fix_to_virt(idx); |
|---|
| 84 | 90 | #ifdef CONFIG_DEBUG_HIGHMEM |
|---|
| 85 | 91 | /* |
|---|
| .. | .. |
|---|
| 93 | 99 | * in place, so the contained TLB flush ensures the TLB is updated |
|---|
| 94 | 100 | * with the new mapping. |
|---|
| 95 | 101 | */ |
|---|
| 96 | | - set_fixmap_pte(idx, mk_pte(page, kmap_prot)); |
|---|
| 102 | +#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 103 | + current->kmap_pte[type] = pte; |
|---|
| 104 | +#endif |
|---|
| 105 | + set_fixmap_pte(idx, pte); |
|---|
| 97 | 106 | |
|---|
| 98 | 107 | return (void *)vaddr; |
|---|
| 99 | 108 | } |
|---|
| .. | .. |
|---|
| 106 | 115 | |
|---|
| 107 | 116 | if (kvaddr >= (void *)FIXADDR_START) { |
|---|
| 108 | 117 | type = kmap_atomic_idx(); |
|---|
| 109 | | - idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); |
|---|
| 118 | + idx = fixmap_idx(type); |
|---|
| 110 | 119 | |
|---|
| 111 | 120 | if (cache_is_vivt()) |
|---|
| 112 | 121 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); |
|---|
| 122 | +#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 123 | + current->kmap_pte[type] = __pte(0); |
|---|
| 124 | +#endif |
|---|
| 113 | 125 | #ifdef CONFIG_DEBUG_HIGHMEM |
|---|
| 114 | 126 | BUG_ON(vaddr != __fix_to_virt(idx)); |
|---|
| 115 | | - set_fixmap_pte(idx, __pte(0)); |
|---|
| 116 | 127 | #else |
|---|
| 117 | 128 | (void) idx; /* to kill a warning */ |
|---|
| 118 | 129 | #endif |
|---|
| 130 | + set_fixmap_pte(idx, __pte(0)); |
|---|
| 119 | 131 | kmap_atomic_idx_pop(); |
|---|
| 120 | 132 | } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { |
|---|
| 121 | 133 | /* this address was obtained through kmap_high_get() */ |
|---|
| 122 | 134 | kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); |
|---|
| 123 | 135 | } |
|---|
| 124 | 136 | pagefault_enable(); |
|---|
| 125 | | - preempt_enable(); |
|---|
| 137 | + preempt_enable_nort(); |
|---|
| 126 | 138 | } |
|---|
| 127 | 139 | EXPORT_SYMBOL(__kunmap_atomic); |
|---|
| 128 | 140 | |
|---|
| 129 | 141 | void *kmap_atomic_pfn(unsigned long pfn) |
|---|
| 130 | 142 | { |
|---|
| 143 | + pte_t pte = pfn_pte(pfn, kmap_prot); |
|---|
| 131 | 144 | unsigned long vaddr; |
|---|
| 132 | 145 | int idx, type; |
|---|
| 133 | 146 | struct page *page = pfn_to_page(pfn); |
|---|
| 134 | 147 | |
|---|
| 135 | | - preempt_disable(); |
|---|
| 148 | + preempt_disable_nort(); |
|---|
| 136 | 149 | pagefault_disable(); |
|---|
| 137 | 150 | if (!PageHighMem(page)) |
|---|
| 138 | 151 | return page_address(page); |
|---|
| 139 | 152 | |
|---|
| 140 | 153 | type = kmap_atomic_idx_push(); |
|---|
| 141 | | - idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); |
|---|
| 154 | + idx = fixmap_idx(type); |
|---|
| 142 | 155 | vaddr = __fix_to_virt(idx); |
|---|
| 143 | 156 | #ifdef CONFIG_DEBUG_HIGHMEM |
|---|
| 144 | 157 | BUG_ON(!pte_none(get_fixmap_pte(vaddr))); |
|---|
| 145 | 158 | #endif |
|---|
| 146 | | - set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); |
|---|
| 159 | +#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 160 | + current->kmap_pte[type] = pte; |
|---|
| 161 | +#endif |
|---|
| 162 | + set_fixmap_pte(idx, pte); |
|---|
| 147 | 163 | |
|---|
| 148 | 164 | return (void *)vaddr; |
|---|
| 149 | 165 | } |
|---|
| 166 | +#if defined CONFIG_PREEMPT_RT_FULL |
|---|
| 167 | +void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) |
|---|
| 168 | +{ |
|---|
| 169 | + int i; |
|---|
| 170 | + |
|---|
| 171 | + /* |
|---|
| 172 | + * Clear @prev's kmap_atomic mappings |
|---|
| 173 | + */ |
|---|
| 174 | + for (i = 0; i < prev_p->kmap_idx; i++) { |
|---|
| 175 | + int idx = fixmap_idx(i); |
|---|
| 176 | + |
|---|
| 177 | + set_fixmap_pte(idx, __pte(0)); |
|---|
| 178 | + } |
|---|
| 179 | + /* |
|---|
| 180 | + * Restore @next_p's kmap_atomic mappings |
|---|
| 181 | + */ |
|---|
| 182 | + for (i = 0; i < next_p->kmap_idx; i++) { |
|---|
| 183 | + int idx = fixmap_idx(i); |
|---|
| 184 | + |
|---|
| 185 | + if (!pte_none(next_p->kmap_pte[i])) |
|---|
| 186 | + set_fixmap_pte(idx, next_p->kmap_pte[i]); |
|---|
| 187 | + } |
|---|
| 188 | +} |
|---|
| 189 | +#endif |
|---|