hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/arch/x86/include/asm/paravirt.h
....@@ -17,16 +17,84 @@
1717 #include <linux/cpumask.h>
1818 #include <asm/frame.h>
1919
20
+static inline unsigned long long paravirt_sched_clock(void)
21
+{
22
+ return PVOP_CALL0(unsigned long long, time.sched_clock);
23
+}
24
+
25
+struct static_key;
26
+extern struct static_key paravirt_steal_enabled;
27
+extern struct static_key paravirt_steal_rq_enabled;
28
+
29
+__visible void __native_queued_spin_unlock(struct qspinlock *lock);
30
+bool pv_is_native_spin_unlock(void);
31
+__visible bool __native_vcpu_is_preempted(long cpu);
32
+bool pv_is_native_vcpu_is_preempted(void);
33
+
34
+static inline u64 paravirt_steal_clock(int cpu)
35
+{
36
+ return PVOP_CALL1(u64, time.steal_clock, cpu);
37
+}
38
+
39
+/* The paravirtualized I/O functions */
40
+static inline void slow_down_io(void)
41
+{
42
+ pv_ops.cpu.io_delay();
43
+#ifdef REALLY_SLOW_IO
44
+ pv_ops.cpu.io_delay();
45
+ pv_ops.cpu.io_delay();
46
+ pv_ops.cpu.io_delay();
47
+#endif
48
+}
49
+
50
+void native_flush_tlb_local(void);
51
+void native_flush_tlb_global(void);
52
+void native_flush_tlb_one_user(unsigned long addr);
53
+void native_flush_tlb_others(const struct cpumask *cpumask,
54
+ const struct flush_tlb_info *info);
55
+
56
+static inline void __flush_tlb_local(void)
57
+{
58
+ PVOP_VCALL0(mmu.flush_tlb_user);
59
+}
60
+
61
+static inline void __flush_tlb_global(void)
62
+{
63
+ PVOP_VCALL0(mmu.flush_tlb_kernel);
64
+}
65
+
66
+static inline void __flush_tlb_one_user(unsigned long addr)
67
+{
68
+ PVOP_VCALL1(mmu.flush_tlb_one_user, addr);
69
+}
70
+
71
+static inline void __flush_tlb_others(const struct cpumask *cpumask,
72
+ const struct flush_tlb_info *info)
73
+{
74
+ PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info);
75
+}
76
+
77
+static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
78
+{
79
+ PVOP_VCALL2(mmu.tlb_remove_table, tlb, table);
80
+}
81
+
82
+static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
83
+{
84
+ PVOP_VCALL1(mmu.exit_mmap, mm);
85
+}
86
+
87
+#ifdef CONFIG_PARAVIRT_XXL
2088 static inline void load_sp0(unsigned long sp0)
2189 {
22
- PVOP_VCALL1(pv_cpu_ops.load_sp0, sp0);
90
+ PVOP_VCALL1(cpu.load_sp0, sp0);
2391 }
2492
2593 /* The paravirtualized CPUID instruction. */
2694 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
2795 unsigned int *ecx, unsigned int *edx)
2896 {
29
- PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
97
+ PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx);
3098 }
3199
32100 /*
....@@ -34,98 +102,84 @@
34102 */
35103 static inline unsigned long paravirt_get_debugreg(int reg)
36104 {
37
- return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
105
+ return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
38106 }
39107 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
40108 static inline void set_debugreg(unsigned long val, int reg)
41109 {
42
- PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
110
+ PVOP_VCALL2(cpu.set_debugreg, reg, val);
43111 }
44112
45113 static inline unsigned long read_cr0(void)
46114 {
47
- return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
115
+ return PVOP_CALL0(unsigned long, cpu.read_cr0);
48116 }
49117
50118 static inline void write_cr0(unsigned long x)
51119 {
52
- PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
120
+ PVOP_VCALL1(cpu.write_cr0, x);
53121 }
54122
55123 static inline unsigned long read_cr2(void)
56124 {
57
- return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
125
+ return PVOP_CALLEE0(unsigned long, mmu.read_cr2);
58126 }
59127
60128 static inline void write_cr2(unsigned long x)
61129 {
62
- PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
130
+ PVOP_VCALL1(mmu.write_cr2, x);
63131 }
64132
65133 static inline unsigned long __read_cr3(void)
66134 {
67
- return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
135
+ return PVOP_CALL0(unsigned long, mmu.read_cr3);
68136 }
69137
70138 static inline void write_cr3(unsigned long x)
71139 {
72
- PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
140
+ PVOP_VCALL1(mmu.write_cr3, x);
73141 }
74142
75143 static inline void __write_cr4(unsigned long x)
76144 {
77
- PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
145
+ PVOP_VCALL1(cpu.write_cr4, x);
78146 }
79
-
80
-#ifdef CONFIG_X86_64
81
-static inline unsigned long read_cr8(void)
82
-{
83
- return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
84
-}
85
-
86
-static inline void write_cr8(unsigned long x)
87
-{
88
- PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
89
-}
90
-#endif
91147
92148 static inline void arch_safe_halt(void)
93149 {
94
- PVOP_VCALL0(pv_irq_ops.safe_halt);
150
+ PVOP_VCALL0(irq.safe_halt);
95151 }
96152
97153 static inline void halt(void)
98154 {
99
- PVOP_VCALL0(pv_irq_ops.halt);
155
+ PVOP_VCALL0(irq.halt);
100156 }
101157
102158 static inline void wbinvd(void)
103159 {
104
- PVOP_VCALL0(pv_cpu_ops.wbinvd);
160
+ PVOP_VCALL0(cpu.wbinvd);
105161 }
106
-
107
-#define get_kernel_rpl() (pv_info.kernel_rpl)
108162
109163 static inline u64 paravirt_read_msr(unsigned msr)
110164 {
111
- return PVOP_CALL1(u64, pv_cpu_ops.read_msr, msr);
165
+ return PVOP_CALL1(u64, cpu.read_msr, msr);
112166 }
113167
114168 static inline void paravirt_write_msr(unsigned msr,
115169 unsigned low, unsigned high)
116170 {
117
- PVOP_VCALL3(pv_cpu_ops.write_msr, msr, low, high);
171
+ PVOP_VCALL3(cpu.write_msr, msr, low, high);
118172 }
119173
120174 static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
121175 {
122
- return PVOP_CALL2(u64, pv_cpu_ops.read_msr_safe, msr, err);
176
+ return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err);
123177 }
124178
125179 static inline int paravirt_write_msr_safe(unsigned msr,
126180 unsigned low, unsigned high)
127181 {
128
- return PVOP_CALL3(int, pv_cpu_ops.write_msr_safe, msr, low, high);
182
+ return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high);
129183 }
130184
131185 #define rdmsr(msr, val1, val2) \
....@@ -170,23 +224,9 @@
170224 return err;
171225 }
172226
173
-static inline unsigned long long paravirt_sched_clock(void)
174
-{
175
- return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
176
-}
177
-
178
-struct static_key;
179
-extern struct static_key paravirt_steal_enabled;
180
-extern struct static_key paravirt_steal_rq_enabled;
181
-
182
-static inline u64 paravirt_steal_clock(int cpu)
183
-{
184
- return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
185
-}
186
-
187227 static inline unsigned long long paravirt_read_pmc(int counter)
188228 {
189
- return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
229
+ return PVOP_CALL1(u64, cpu.read_pmc, counter);
190230 }
191231
192232 #define rdpmc(counter, low, high) \
....@@ -200,382 +240,241 @@
200240
201241 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
202242 {
203
- PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
243
+ PVOP_VCALL2(cpu.alloc_ldt, ldt, entries);
204244 }
205245
206246 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
207247 {
208
- PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
248
+ PVOP_VCALL2(cpu.free_ldt, ldt, entries);
209249 }
210250
211251 static inline void load_TR_desc(void)
212252 {
213
- PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
253
+ PVOP_VCALL0(cpu.load_tr_desc);
214254 }
215255 static inline void load_gdt(const struct desc_ptr *dtr)
216256 {
217
- PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
257
+ PVOP_VCALL1(cpu.load_gdt, dtr);
218258 }
219259 static inline void load_idt(const struct desc_ptr *dtr)
220260 {
221
- PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
261
+ PVOP_VCALL1(cpu.load_idt, dtr);
222262 }
223263 static inline void set_ldt(const void *addr, unsigned entries)
224264 {
225
- PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
265
+ PVOP_VCALL2(cpu.set_ldt, addr, entries);
226266 }
227267 static inline unsigned long paravirt_store_tr(void)
228268 {
229
- return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
269
+ return PVOP_CALL0(unsigned long, cpu.store_tr);
230270 }
271
+
231272 #define store_tr(tr) ((tr) = paravirt_store_tr())
232273 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
233274 {
234
- PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
275
+ PVOP_VCALL2(cpu.load_tls, t, cpu);
235276 }
236277
237
-#ifdef CONFIG_X86_64
238278 static inline void load_gs_index(unsigned int gs)
239279 {
240
- PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
280
+ PVOP_VCALL1(cpu.load_gs_index, gs);
241281 }
242
-#endif
243282
244283 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
245284 const void *desc)
246285 {
247
- PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
286
+ PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc);
248287 }
249288
250289 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
251290 void *desc, int type)
252291 {
253
- PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
292
+ PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type);
254293 }
255294
256295 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
257296 {
258
- PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
259
-}
260
-static inline void set_iopl_mask(unsigned mask)
261
-{
262
- PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
297
+ PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
263298 }
264299
265
-/* The paravirtualized I/O functions */
266
-static inline void slow_down_io(void)
300
+#ifdef CONFIG_X86_IOPL_IOPERM
301
+static inline void tss_invalidate_io_bitmap(void)
267302 {
268
- pv_cpu_ops.io_delay();
269
-#ifdef REALLY_SLOW_IO
270
- pv_cpu_ops.io_delay();
271
- pv_cpu_ops.io_delay();
272
- pv_cpu_ops.io_delay();
273
-#endif
303
+ PVOP_VCALL0(cpu.invalidate_io_bitmap);
274304 }
305
+
306
+static inline void tss_update_io_bitmap(void)
307
+{
308
+ PVOP_VCALL0(cpu.update_io_bitmap);
309
+}
310
+#endif
275311
276312 static inline void paravirt_activate_mm(struct mm_struct *prev,
277313 struct mm_struct *next)
278314 {
279
- PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
315
+ PVOP_VCALL2(mmu.activate_mm, prev, next);
280316 }
281317
282318 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
283319 struct mm_struct *mm)
284320 {
285
- PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
286
-}
287
-
288
-static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
289
-{
290
- PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
291
-}
292
-
293
-static inline void __flush_tlb(void)
294
-{
295
- PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
296
-}
297
-static inline void __flush_tlb_global(void)
298
-{
299
- PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
300
-}
301
-static inline void __flush_tlb_one_user(unsigned long addr)
302
-{
303
- PVOP_VCALL1(pv_mmu_ops.flush_tlb_one_user, addr);
304
-}
305
-
306
-static inline void flush_tlb_others(const struct cpumask *cpumask,
307
- const struct flush_tlb_info *info)
308
-{
309
- PVOP_VCALL2(pv_mmu_ops.flush_tlb_others, cpumask, info);
310
-}
311
-
312
-static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
313
-{
314
- PVOP_VCALL2(pv_mmu_ops.tlb_remove_table, tlb, table);
321
+ PVOP_VCALL2(mmu.dup_mmap, oldmm, mm);
315322 }
316323
317324 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
318325 {
319
- return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
326
+ return PVOP_CALL1(int, mmu.pgd_alloc, mm);
320327 }
321328
322329 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
323330 {
324
- PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
331
+ PVOP_VCALL2(mmu.pgd_free, mm, pgd);
325332 }
326333
327334 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
328335 {
329
- PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
336
+ PVOP_VCALL2(mmu.alloc_pte, mm, pfn);
330337 }
331338 static inline void paravirt_release_pte(unsigned long pfn)
332339 {
333
- PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
340
+ PVOP_VCALL1(mmu.release_pte, pfn);
334341 }
335342
336343 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
337344 {
338
- PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
345
+ PVOP_VCALL2(mmu.alloc_pmd, mm, pfn);
339346 }
340347
341348 static inline void paravirt_release_pmd(unsigned long pfn)
342349 {
343
- PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
350
+ PVOP_VCALL1(mmu.release_pmd, pfn);
344351 }
345352
346353 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
347354 {
348
- PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
355
+ PVOP_VCALL2(mmu.alloc_pud, mm, pfn);
349356 }
350357 static inline void paravirt_release_pud(unsigned long pfn)
351358 {
352
- PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
359
+ PVOP_VCALL1(mmu.release_pud, pfn);
353360 }
354361
355362 static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
356363 {
357
- PVOP_VCALL2(pv_mmu_ops.alloc_p4d, mm, pfn);
364
+ PVOP_VCALL2(mmu.alloc_p4d, mm, pfn);
358365 }
359366
360367 static inline void paravirt_release_p4d(unsigned long pfn)
361368 {
362
- PVOP_VCALL1(pv_mmu_ops.release_p4d, pfn);
369
+ PVOP_VCALL1(mmu.release_p4d, pfn);
363370 }
364371
365372 static inline pte_t __pte(pteval_t val)
366373 {
367
- pteval_t ret;
368
-
369
- if (sizeof(pteval_t) > sizeof(long))
370
- ret = PVOP_CALLEE2(pteval_t,
371
- pv_mmu_ops.make_pte,
372
- val, (u64)val >> 32);
373
- else
374
- ret = PVOP_CALLEE1(pteval_t,
375
- pv_mmu_ops.make_pte,
376
- val);
377
-
378
- return (pte_t) { .pte = ret };
374
+ return (pte_t) { PVOP_CALLEE1(pteval_t, mmu.make_pte, val) };
379375 }
380376
381377 static inline pteval_t pte_val(pte_t pte)
382378 {
383
- pteval_t ret;
384
-
385
- if (sizeof(pteval_t) > sizeof(long))
386
- ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
387
- pte.pte, (u64)pte.pte >> 32);
388
- else
389
- ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
390
- pte.pte);
391
-
392
- return ret;
379
+ return PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
393380 }
394381
395382 static inline pgd_t __pgd(pgdval_t val)
396383 {
397
- pgdval_t ret;
398
-
399
- if (sizeof(pgdval_t) > sizeof(long))
400
- ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
401
- val, (u64)val >> 32);
402
- else
403
- ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
404
- val);
405
-
406
- return (pgd_t) { ret };
384
+ return (pgd_t) { PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val) };
407385 }
408386
409387 static inline pgdval_t pgd_val(pgd_t pgd)
410388 {
411
- pgdval_t ret;
412
-
413
- if (sizeof(pgdval_t) > sizeof(long))
414
- ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
415
- pgd.pgd, (u64)pgd.pgd >> 32);
416
- else
417
- ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
418
- pgd.pgd);
419
-
420
- return ret;
389
+ return PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
421390 }
422391
423392 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
424
-static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
393
+static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
425394 pte_t *ptep)
426395 {
427396 pteval_t ret;
428397
429
- ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
430
- mm, addr, ptep);
398
+ ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep);
431399
432400 return (pte_t) { .pte = ret };
433401 }
434402
435
-static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
436
- pte_t *ptep, pte_t pte)
403
+static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
404
+ pte_t *ptep, pte_t old_pte, pte_t pte)
437405 {
438
- if (sizeof(pteval_t) > sizeof(long))
439
- /* 5 arg words */
440
- pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
441
- else
442
- PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
443
- mm, addr, ptep, pte.pte);
406
+
407
+ PVOP_VCALL4(mmu.ptep_modify_prot_commit, vma, addr, ptep, pte.pte);
444408 }
445409
446410 static inline void set_pte(pte_t *ptep, pte_t pte)
447411 {
448
- if (sizeof(pteval_t) > sizeof(long))
449
- PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
450
- pte.pte, (u64)pte.pte >> 32);
451
- else
452
- PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
453
- pte.pte);
454
-}
455
-
456
-static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
457
- pte_t *ptep, pte_t pte)
458
-{
459
- if (sizeof(pteval_t) > sizeof(long))
460
- /* 5 arg words */
461
- pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
462
- else
463
- PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
412
+ PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
464413 }
465414
466415 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
467416 {
468
- pmdval_t val = native_pmd_val(pmd);
469
-
470
- if (sizeof(pmdval_t) > sizeof(long))
471
- PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
472
- else
473
- PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
417
+ PVOP_VCALL2(mmu.set_pmd, pmdp, native_pmd_val(pmd));
474418 }
475419
476
-#if CONFIG_PGTABLE_LEVELS >= 3
477420 static inline pmd_t __pmd(pmdval_t val)
478421 {
479
- pmdval_t ret;
480
-
481
- if (sizeof(pmdval_t) > sizeof(long))
482
- ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
483
- val, (u64)val >> 32);
484
- else
485
- ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
486
- val);
487
-
488
- return (pmd_t) { ret };
422
+ return (pmd_t) { PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val) };
489423 }
490424
491425 static inline pmdval_t pmd_val(pmd_t pmd)
492426 {
493
- pmdval_t ret;
494
-
495
- if (sizeof(pmdval_t) > sizeof(long))
496
- ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
497
- pmd.pmd, (u64)pmd.pmd >> 32);
498
- else
499
- ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
500
- pmd.pmd);
501
-
502
- return ret;
427
+ return PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
503428 }
504429
505430 static inline void set_pud(pud_t *pudp, pud_t pud)
506431 {
507
- pudval_t val = native_pud_val(pud);
508
-
509
- if (sizeof(pudval_t) > sizeof(long))
510
- PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
511
- val, (u64)val >> 32);
512
- else
513
- PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
514
- val);
432
+ PVOP_VCALL2(mmu.set_pud, pudp, native_pud_val(pud));
515433 }
516
-#if CONFIG_PGTABLE_LEVELS >= 4
434
+
517435 static inline pud_t __pud(pudval_t val)
518436 {
519437 pudval_t ret;
520438
521
- if (sizeof(pudval_t) > sizeof(long))
522
- ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
523
- val, (u64)val >> 32);
524
- else
525
- ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
526
- val);
439
+ ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val);
527440
528441 return (pud_t) { ret };
529442 }
530443
531444 static inline pudval_t pud_val(pud_t pud)
532445 {
533
- pudval_t ret;
534
-
535
- if (sizeof(pudval_t) > sizeof(long))
536
- ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
537
- pud.pud, (u64)pud.pud >> 32);
538
- else
539
- ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
540
- pud.pud);
541
-
542
- return ret;
446
+ return PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud);
543447 }
544448
545449 static inline void pud_clear(pud_t *pudp)
546450 {
547
- set_pud(pudp, __pud(0));
451
+ set_pud(pudp, native_make_pud(0));
548452 }
549453
550454 static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
551455 {
552456 p4dval_t val = native_p4d_val(p4d);
553457
554
- if (sizeof(p4dval_t) > sizeof(long))
555
- PVOP_VCALL3(pv_mmu_ops.set_p4d, p4dp,
556
- val, (u64)val >> 32);
557
- else
558
- PVOP_VCALL2(pv_mmu_ops.set_p4d, p4dp,
559
- val);
458
+ PVOP_VCALL2(mmu.set_p4d, p4dp, val);
560459 }
561460
562461 #if CONFIG_PGTABLE_LEVELS >= 5
563462
564463 static inline p4d_t __p4d(p4dval_t val)
565464 {
566
- p4dval_t ret = PVOP_CALLEE1(p4dval_t, pv_mmu_ops.make_p4d, val);
465
+ p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val);
567466
568467 return (p4d_t) { ret };
569468 }
570469
571470 static inline p4dval_t p4d_val(p4d_t p4d)
572471 {
573
- return PVOP_CALLEE1(p4dval_t, pv_mmu_ops.p4d_val, p4d.p4d);
472
+ return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d);
574473 }
575474
576475 static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
577476 {
578
- PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, native_pgd_val(pgd));
477
+ PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd));
579478 }
580479
581480 #define set_pgd(pgdp, pgdval) do { \
....@@ -586,41 +485,17 @@
586485 } while (0)
587486
588487 #define pgd_clear(pgdp) do { \
589
- if (pgtable_l5_enabled()) \
590
- set_pgd(pgdp, __pgd(0)); \
488
+ if (pgtable_l5_enabled()) \
489
+ set_pgd(pgdp, native_make_pgd(0)); \
591490 } while (0)
592491
593492 #endif /* CONFIG_PGTABLE_LEVELS == 5 */
594493
595494 static inline void p4d_clear(p4d_t *p4dp)
596495 {
597
- set_p4d(p4dp, __p4d(0));
496
+ set_p4d(p4dp, native_make_p4d(0));
598497 }
599498
600
-#endif /* CONFIG_PGTABLE_LEVELS == 4 */
601
-
602
-#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
603
-
604
-#ifdef CONFIG_X86_PAE
605
-/* Special-case pte-setting operations for PAE, which can't update a
606
- 64-bit pte atomically */
607
-static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
608
-{
609
- PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
610
- pte.pte, pte.pte >> 32);
611
-}
612
-
613
-static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
614
- pte_t *ptep)
615
-{
616
- PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
617
-}
618
-
619
-static inline void pmd_clear(pmd_t *pmdp)
620
-{
621
- PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
622
-}
623
-#else /* !CONFIG_X86_PAE */
624499 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
625500 {
626501 set_pte(ptep, pte);
....@@ -629,89 +504,85 @@
629504 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
630505 pte_t *ptep)
631506 {
632
- set_pte_at(mm, addr, ptep, __pte(0));
507
+ set_pte(ptep, native_make_pte(0));
633508 }
634509
635510 static inline void pmd_clear(pmd_t *pmdp)
636511 {
637
- set_pmd(pmdp, __pmd(0));
512
+ set_pmd(pmdp, native_make_pmd(0));
638513 }
639
-#endif /* CONFIG_X86_PAE */
640514
641515 #define __HAVE_ARCH_START_CONTEXT_SWITCH
642516 static inline void arch_start_context_switch(struct task_struct *prev)
643517 {
644
- PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
518
+ PVOP_VCALL1(cpu.start_context_switch, prev);
645519 }
646520
647521 static inline void arch_end_context_switch(struct task_struct *next)
648522 {
649
- PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
523
+ PVOP_VCALL1(cpu.end_context_switch, next);
650524 }
651525
652526 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
653527 static inline void arch_enter_lazy_mmu_mode(void)
654528 {
655
- PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
529
+ PVOP_VCALL0(mmu.lazy_mode.enter);
656530 }
657531
658532 static inline void arch_leave_lazy_mmu_mode(void)
659533 {
660
- PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
534
+ PVOP_VCALL0(mmu.lazy_mode.leave);
661535 }
662536
663537 static inline void arch_flush_lazy_mmu_mode(void)
664538 {
665
- PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
539
+ PVOP_VCALL0(mmu.lazy_mode.flush);
666540 }
667541
668542 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
669543 phys_addr_t phys, pgprot_t flags)
670544 {
671
- pv_mmu_ops.set_fixmap(idx, phys, flags);
545
+ pv_ops.mmu.set_fixmap(idx, phys, flags);
672546 }
547
+#endif
673548
674549 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
675550
676551 static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
677552 u32 val)
678553 {
679
- PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val);
554
+ PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val);
680555 }
681556
682557 static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
683558 {
684
- PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock);
559
+ PVOP_VCALLEE1(lock.queued_spin_unlock, lock);
685560 }
686561
687562 static __always_inline void pv_wait(u8 *ptr, u8 val)
688563 {
689
- PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
564
+ PVOP_VCALL2(lock.wait, ptr, val);
690565 }
691566
692567 static __always_inline void pv_kick(int cpu)
693568 {
694
- PVOP_VCALL1(pv_lock_ops.kick, cpu);
569
+ PVOP_VCALL1(lock.kick, cpu);
695570 }
696571
697572 static __always_inline bool pv_vcpu_is_preempted(long cpu)
698573 {
699
- return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
574
+ return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu);
700575 }
576
+
577
+void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock);
578
+bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
701579
702580 #endif /* SMP && PARAVIRT_SPINLOCKS */
703581
704582 #ifdef CONFIG_X86_32
705
-#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
706
-#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
707
-
708583 /* save and restore all caller-save registers, except return value */
709584 #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
710585 #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
711
-
712
-#define PV_FLAGS_ARG "0"
713
-#define PV_EXTRA_CLOBBERS
714
-#define PV_VEXTRA_CLOBBERS
715586 #else
716587 /* save and restore all caller-save registers, except return value */
717588 #define PV_SAVE_ALL_CALLER_REGS \
....@@ -732,14 +603,6 @@
732603 "pop %rsi;" \
733604 "pop %rdx;" \
734605 "pop %rcx;"
735
-
736
-/* We save some registers, but all of them, that's too much. We clobber all
737
- * caller saved registers but the argument parameter */
738
-#define PV_SAVE_REGS "pushq %%rdi;"
739
-#define PV_RESTORE_REGS "popq %%rdi;"
740
-#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
741
-#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
742
-#define PV_FLAGS_ARG "D"
743606 #endif
744607
745608 /*
....@@ -767,7 +630,7 @@
767630 "call " #func ";" \
768631 PV_RESTORE_ALL_CALLER_REGS \
769632 FRAME_END \
770
- "ret;" \
633
+ ASM_RET \
771634 ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
772635 ".popsection")
773636
....@@ -779,24 +642,25 @@
779642 #define __PV_IS_CALLEE_SAVE(func) \
780643 ((struct paravirt_callee_save) { func })
781644
645
+#ifdef CONFIG_PARAVIRT_XXL
782646 static inline notrace unsigned long arch_local_save_flags(void)
783647 {
784
- return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
648
+ return PVOP_CALLEE0(unsigned long, irq.save_fl);
785649 }
786650
787651 static inline notrace void arch_local_irq_restore(unsigned long f)
788652 {
789
- PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
653
+ PVOP_VCALLEE1(irq.restore_fl, f);
790654 }
791655
792656 static inline notrace void arch_local_irq_disable(void)
793657 {
794
- PVOP_VCALLEE0(pv_irq_ops.irq_disable);
658
+ PVOP_VCALLEE0(irq.irq_disable);
795659 }
796660
797661 static inline notrace void arch_local_irq_enable(void)
798662 {
799
- PVOP_VCALLEE0(pv_irq_ops.irq_enable);
663
+ PVOP_VCALLEE0(irq.irq_enable);
800664 }
801665
802666 static inline notrace unsigned long arch_local_irq_save(void)
....@@ -807,6 +671,7 @@
807671 arch_local_irq_disable();
808672 return f;
809673 }
674
+#endif
810675
811676
812677 /* Make sure as little as possible of this mess escapes. */
....@@ -828,7 +693,7 @@
828693
829694 #else /* __ASSEMBLY__ */
830695
831
-#define _PVSITE(ptype, clobbers, ops, word, algn) \
696
+#define _PVSITE(ptype, ops, word, algn) \
832697 771:; \
833698 ops; \
834699 772:; \
....@@ -837,7 +702,6 @@
837702 word 771b; \
838703 .byte ptype; \
839704 .byte 772b-771b; \
840
- .short clobbers; \
841705 .popsection
842706
843707
....@@ -869,8 +733,8 @@
869733 COND_POP(set, CLBR_RCX, rcx); \
870734 COND_POP(set, CLBR_RAX, rax)
871735
872
-#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
873
-#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
736
+#define PARA_PATCH(off) ((off) / 8)
737
+#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8)
874738 #define PARA_INDIRECT(addr) *addr(%rip)
875739 #else
876740 #define PV_SAVE_REGS(set) \
....@@ -884,92 +748,78 @@
884748 COND_POP(set, CLBR_EDI, edi); \
885749 COND_POP(set, CLBR_EAX, eax)
886750
887
-#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
888
-#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
751
+#define PARA_PATCH(off) ((off) / 4)
752
+#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .long, 4)
889753 #define PARA_INDIRECT(addr) *%cs:addr
890754 #endif
891755
756
+#ifdef CONFIG_PARAVIRT_XXL
892757 #define INTERRUPT_RETURN \
893
- PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
894
- ANNOTATE_RETPOLINE_SAFE; \
895
- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret);)
758
+ PARA_SITE(PARA_PATCH(PV_CPU_iret), \
759
+ ANNOTATE_RETPOLINE_SAFE; \
760
+ jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);)
896761
897762 #define DISABLE_INTERRUPTS(clobbers) \
898
- PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
763
+ PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable), \
899764 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
900
- ANNOTATE_RETPOLINE_SAFE; \
901
- call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
765
+ ANNOTATE_RETPOLINE_SAFE; \
766
+ call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable); \
902767 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
903768
904769 #define ENABLE_INTERRUPTS(clobbers) \
905
- PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
770
+ PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable), \
906771 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
907
- ANNOTATE_RETPOLINE_SAFE; \
908
- call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
909
- PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
910
-
911
-#ifdef CONFIG_X86_32
912
-#define GET_CR0_INTO_EAX \
913
- push %ecx; push %edx; \
914
- ANNOTATE_RETPOLINE_SAFE; \
915
- call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
916
- pop %edx; pop %ecx
917
-#else /* !CONFIG_X86_32 */
918
-
919
-/*
920
- * If swapgs is used while the userspace stack is still current,
921
- * there's no way to call a pvop. The PV replacement *must* be
922
- * inlined, or the swapgs instruction must be trapped and emulated.
923
- */
924
-#define SWAPGS_UNSAFE_STACK \
925
- PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
926
- swapgs)
927
-
928
-/*
929
- * Note: swapgs is very special, and in practise is either going to be
930
- * implemented with a single "swapgs" instruction or something very
931
- * special. Either way, we don't need to save any registers for
932
- * it.
933
- */
934
-#define SWAPGS \
935
- PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
936
- ANNOTATE_RETPOLINE_SAFE; \
937
- call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \
938
- )
939
-
940
-#define GET_CR2_INTO_RAX \
941
- ANNOTATE_RETPOLINE_SAFE; \
942
- call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2);
943
-
944
-#define USERGS_SYSRET64 \
945
- PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
946
- CLBR_NONE, \
947
- ANNOTATE_RETPOLINE_SAFE; \
948
- jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64);)
949
-
950
-#ifdef CONFIG_DEBUG_ENTRY
951
-#define SAVE_FLAGS(clobbers) \
952
- PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \
953
- PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
954
- ANNOTATE_RETPOLINE_SAFE; \
955
- call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl); \
772
+ ANNOTATE_RETPOLINE_SAFE; \
773
+ call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable); \
956774 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
957775 #endif
958776
959
-#endif /* CONFIG_X86_32 */
777
+#ifdef CONFIG_X86_64
778
+#ifdef CONFIG_PARAVIRT_XXL
779
+#define USERGS_SYSRET64 \
780
+ PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64), \
781
+ ANNOTATE_RETPOLINE_SAFE; \
782
+ jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);)
783
+
784
+#ifdef CONFIG_DEBUG_ENTRY
785
+#define SAVE_FLAGS(clobbers) \
786
+ PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \
787
+ PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
788
+ ANNOTATE_RETPOLINE_SAFE; \
789
+ call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl); \
790
+ PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
791
+#endif
792
+#endif /* CONFIG_PARAVIRT_XXL */
793
+#endif /* CONFIG_X86_64 */
794
+
795
+#ifdef CONFIG_PARAVIRT_XXL
796
+
797
+#define GET_CR2_INTO_AX \
798
+ PARA_SITE(PARA_PATCH(PV_MMU_read_cr2), \
799
+ ANNOTATE_RETPOLINE_SAFE; \
800
+ call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2); \
801
+ )
802
+
803
+#endif /* CONFIG_PARAVIRT_XXL */
804
+
960805
961806 #endif /* __ASSEMBLY__ */
962807 #else /* CONFIG_PARAVIRT */
963808 # define default_banner x86_init_noop
809
+#endif /* !CONFIG_PARAVIRT */
810
+
964811 #ifndef __ASSEMBLY__
812
+#ifndef CONFIG_PARAVIRT_XXL
965813 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
966814 struct mm_struct *mm)
967815 {
968816 }
817
+#endif
969818
819
+#ifndef CONFIG_PARAVIRT
970820 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
971821 {
972822 }
823
+#endif
973824 #endif /* __ASSEMBLY__ */
974
-#endif /* !CONFIG_PARAVIRT */
975825 #endif /* _ASM_X86_PARAVIRT_H */