hc
2023-11-06 e3e12f52b214121840b44c91de5b3e5af5d3eb84
kernel/arch/x86/kernel/process_32.c
....@@ -38,6 +38,7 @@
3838 #include <linux/io.h>
3939 #include <linux/kdebug.h>
4040 #include <linux/syscalls.h>
41
+#include <linux/highmem.h>
4142
4243 #include <asm/pgtable.h>
4344 #include <asm/ldt.h>
....@@ -205,6 +206,35 @@
205206 }
206207 EXPORT_SYMBOL_GPL(start_thread);
207208
209
+#ifdef CONFIG_PREEMPT_RT_FULL
210
+static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
211
+{
212
+ int i;
213
+
214
+ /*
215
+ * Clear @prev's kmap_atomic mappings
216
+ */
217
+ for (i = 0; i < prev_p->kmap_idx; i++) {
218
+ int idx = i + KM_TYPE_NR * smp_processor_id();
219
+ pte_t *ptep = kmap_pte - idx;
220
+
221
+ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
222
+ }
223
+ /*
224
+ * Restore @next_p's kmap_atomic mappings
225
+ */
226
+ for (i = 0; i < next_p->kmap_idx; i++) {
227
+ int idx = i + KM_TYPE_NR * smp_processor_id();
228
+
229
+ if (!pte_none(next_p->kmap_pte[i]))
230
+ set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
231
+ }
232
+}
233
+#else
234
+static inline void
235
+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
236
+#endif
237
+
208238
209239 /*
210240 * switch_to(x,y) should switch tasks from x to y.
....@@ -274,6 +304,8 @@
274304
275305 switch_to_extra(prev_p, next_p);
276306
307
+ switch_kmaps(prev_p, next_p);
308
+
277309 /*
278310 * Leave lazy mode, flushing any hypercalls made here.
279311 * This must be done before restoring TLS segments so