hc
2023-11-06 e3e12f52b214121840b44c91de5b3e5af5d3eb84
kernel/arch/x86/mm/iomap_32.c
....@@ -59,6 +59,7 @@
5959
6060 void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
6161 {
62
+ pte_t pte = pfn_pte(pfn, prot);
6263 unsigned long vaddr;
6364 int idx, type;
6465
....@@ -68,7 +69,12 @@
6869 type = kmap_atomic_idx_push();
6970 idx = type + KM_TYPE_NR * smp_processor_id();
7071 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
71
- set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
72
+ WARN_ON(!pte_none(*(kmap_pte - idx)));
73
+
74
+#ifdef CONFIG_PREEMPT_RT_FULL
75
+ current->kmap_pte[type] = pte;
76
+#endif
77
+ set_pte(kmap_pte - idx, pte);
7278 arch_flush_lazy_mmu_mode();
7379
7480 return (void *)vaddr;
....@@ -119,6 +125,9 @@
119125 * is a bad idea also, in case the page changes cacheability
120126 * attributes or becomes a protected page in a hypervisor.
121127 */
128
+#ifdef CONFIG_PREEMPT_RT_FULL
129
+ current->kmap_pte[type] = __pte(0);
130
+#endif
122131 kpte_clear_flush(kmap_pte-idx, vaddr);
123132 kmap_atomic_idx_pop();
124133 }