.. | .. |
---|
59 | 59 | |
---|
60 | 60 | void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) |
---|
61 | 61 | { |
---|
| 62 | + pte_t pte = pfn_pte(pfn, prot); |
---|
62 | 63 | unsigned long vaddr; |
---|
63 | 64 | int idx, type; |
---|
64 | 65 | |
---|
.. | .. |
---|
68 | 69 | type = kmap_atomic_idx_push(); |
---|
69 | 70 | idx = type + KM_TYPE_NR * smp_processor_id(); |
---|
70 | 71 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
---|
71 | | - set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); |
---|
| 72 | + WARN_ON(!pte_none(*(kmap_pte - idx))); |
---|
| 73 | + |
---|
| 74 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 75 | + current->kmap_pte[type] = pte; |
---|
| 76 | +#endif |
---|
| 77 | + set_pte(kmap_pte - idx, pte); |
---|
72 | 78 | arch_flush_lazy_mmu_mode(); |
---|
73 | 79 | |
---|
74 | 80 | return (void *)vaddr; |
---|
.. | .. |
---|
119 | 125 | * is a bad idea also, in case the page changes cacheability |
---|
120 | 126 | * attributes or becomes a protected page in a hypervisor. |
---|
121 | 127 | */ |
---|
| 128 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 129 | + current->kmap_pte[type] = __pte(0); |
---|
| 130 | +#endif |
---|
122 | 131 | kpte_clear_flush(kmap_pte-idx, vaddr); |
---|
123 | 132 | kmap_atomic_idx_pop(); |
---|
124 | 133 | } |
---|