hc
2023-11-06 e3e12f52b214121840b44c91de5b3e5af5d3eb84
kernel/arch/x86/mm/highmem_32.c
....@@ -32,10 +32,11 @@
3232 */
3333 void *kmap_atomic_prot(struct page *page, pgprot_t prot)
3434 {
35
+ pte_t pte = mk_pte(page, prot);
3536 unsigned long vaddr;
3637 int idx, type;
3738
38
- preempt_disable();
39
+ preempt_disable_nort();
3940 pagefault_disable();
4041
4142 if (!PageHighMem(page))
....@@ -45,7 +46,10 @@
4546 idx = type + KM_TYPE_NR*smp_processor_id();
4647 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
4748 BUG_ON(!pte_none(*(kmap_pte-idx)));
48
- set_pte(kmap_pte-idx, mk_pte(page, prot));
49
+#ifdef CONFIG_PREEMPT_RT_FULL
50
+ current->kmap_pte[type] = pte;
51
+#endif
52
+ set_pte(kmap_pte-idx, pte);
4953 arch_flush_lazy_mmu_mode();
5054
5155 return (void *)vaddr;
....@@ -88,6 +92,9 @@
8892 * is a bad idea also, in case the page changes cacheability
8993 * attributes or becomes a protected page in a hypervisor.
9094 */
95
+#ifdef CONFIG_PREEMPT_RT_FULL
96
+ current->kmap_pte[type] = __pte(0);
97
+#endif
9198 kpte_clear_flush(kmap_pte-idx, vaddr);
9299 kmap_atomic_idx_pop();
93100 arch_flush_lazy_mmu_mode();
....@@ -100,7 +107,7 @@
100107 #endif
101108
102109 pagefault_enable();
103
- preempt_enable();
110
+ preempt_enable_nort();
104111 }
105112 EXPORT_SYMBOL(__kunmap_atomic);
106113