.. | .. |
---|
32 | 32 | */ |
---|
33 | 33 | void *kmap_atomic_prot(struct page *page, pgprot_t prot) |
---|
34 | 34 | { |
---|
| 35 | + pte_t pte = mk_pte(page, prot); |
---|
35 | 36 | unsigned long vaddr; |
---|
36 | 37 | int idx, type; |
---|
37 | 38 | |
---|
38 | | - preempt_disable(); |
---|
| 39 | + preempt_disable_nort(); |
---|
39 | 40 | pagefault_disable(); |
---|
40 | 41 | |
---|
41 | 42 | if (!PageHighMem(page)) |
---|
.. | .. |
---|
45 | 46 | idx = type + KM_TYPE_NR*smp_processor_id(); |
---|
46 | 47 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
---|
47 | 48 | BUG_ON(!pte_none(*(kmap_pte-idx))); |
---|
48 | | - set_pte(kmap_pte-idx, mk_pte(page, prot)); |
---|
| 49 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 50 | + current->kmap_pte[type] = pte; |
---|
| 51 | +#endif |
---|
| 52 | + set_pte(kmap_pte-idx, pte); |
---|
49 | 53 | arch_flush_lazy_mmu_mode(); |
---|
50 | 54 | |
---|
51 | 55 | return (void *)vaddr; |
---|
.. | .. |
---|
88 | 92 | * is a bad idea also, in case the page changes cacheability |
---|
89 | 93 | * attributes or becomes a protected page in a hypervisor. |
---|
90 | 94 | */ |
---|
| 95 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 96 | + current->kmap_pte[type] = __pte(0); |
---|
| 97 | +#endif |
---|
91 | 98 | kpte_clear_flush(kmap_pte-idx, vaddr); |
---|
92 | 99 | kmap_atomic_idx_pop(); |
---|
93 | 100 | arch_flush_lazy_mmu_mode(); |
---|
.. | .. |
---|
100 | 107 | #endif |
---|
101 | 108 | |
---|
102 | 109 | pagefault_enable(); |
---|
103 | | - preempt_enable(); |
---|
| 110 | + preempt_enable_nort(); |
---|
104 | 111 | } |
---|
105 | 112 | EXPORT_SYMBOL(__kunmap_atomic); |
---|
106 | 113 | |
---|