| .. | .. |
|---|
| 17 | 17 | #include <linux/cpumask.h> |
|---|
| 18 | 18 | #include <asm/frame.h> |
|---|
| 19 | 19 | |
|---|
| 20 | +static inline unsigned long long paravirt_sched_clock(void) |
|---|
| 21 | +{ |
|---|
| 22 | + return PVOP_CALL0(unsigned long long, time.sched_clock); |
|---|
| 23 | +} |
|---|
| 24 | + |
|---|
| 25 | +struct static_key; |
|---|
| 26 | +extern struct static_key paravirt_steal_enabled; |
|---|
| 27 | +extern struct static_key paravirt_steal_rq_enabled; |
|---|
| 28 | + |
|---|
| 29 | +__visible void __native_queued_spin_unlock(struct qspinlock *lock); |
|---|
| 30 | +bool pv_is_native_spin_unlock(void); |
|---|
| 31 | +__visible bool __native_vcpu_is_preempted(long cpu); |
|---|
| 32 | +bool pv_is_native_vcpu_is_preempted(void); |
|---|
| 33 | + |
|---|
| 34 | +static inline u64 paravirt_steal_clock(int cpu) |
|---|
| 35 | +{ |
|---|
| 36 | + return PVOP_CALL1(u64, time.steal_clock, cpu); |
|---|
| 37 | +} |
|---|
| 38 | + |
|---|
| 39 | +/* The paravirtualized I/O functions */ |
|---|
| 40 | +static inline void slow_down_io(void) |
|---|
| 41 | +{ |
|---|
| 42 | + pv_ops.cpu.io_delay(); |
|---|
| 43 | +#ifdef REALLY_SLOW_IO |
|---|
| 44 | + pv_ops.cpu.io_delay(); |
|---|
| 45 | + pv_ops.cpu.io_delay(); |
|---|
| 46 | + pv_ops.cpu.io_delay(); |
|---|
| 47 | +#endif |
|---|
| 48 | +} |
|---|
| 49 | + |
|---|
| 50 | +void native_flush_tlb_local(void); |
|---|
| 51 | +void native_flush_tlb_global(void); |
|---|
| 52 | +void native_flush_tlb_one_user(unsigned long addr); |
|---|
| 53 | +void native_flush_tlb_others(const struct cpumask *cpumask, |
|---|
| 54 | + const struct flush_tlb_info *info); |
|---|
| 55 | + |
|---|
| 56 | +static inline void __flush_tlb_local(void) |
|---|
| 57 | +{ |
|---|
| 58 | + PVOP_VCALL0(mmu.flush_tlb_user); |
|---|
| 59 | +} |
|---|
| 60 | + |
|---|
| 61 | +static inline void __flush_tlb_global(void) |
|---|
| 62 | +{ |
|---|
| 63 | + PVOP_VCALL0(mmu.flush_tlb_kernel); |
|---|
| 64 | +} |
|---|
| 65 | + |
|---|
| 66 | +static inline void __flush_tlb_one_user(unsigned long addr) |
|---|
| 67 | +{ |
|---|
| 68 | + PVOP_VCALL1(mmu.flush_tlb_one_user, addr); |
|---|
| 69 | +} |
|---|
| 70 | + |
|---|
| 71 | +static inline void __flush_tlb_others(const struct cpumask *cpumask, |
|---|
| 72 | + const struct flush_tlb_info *info) |
|---|
| 73 | +{ |
|---|
| 74 | + PVOP_VCALL2(mmu.flush_tlb_others, cpumask, info); |
|---|
| 75 | +} |
|---|
| 76 | + |
|---|
| 77 | +static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) |
|---|
| 78 | +{ |
|---|
| 79 | + PVOP_VCALL2(mmu.tlb_remove_table, tlb, table); |
|---|
| 80 | +} |
|---|
| 81 | + |
|---|
| 82 | +static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) |
|---|
| 83 | +{ |
|---|
| 84 | + PVOP_VCALL1(mmu.exit_mmap, mm); |
|---|
| 85 | +} |
|---|
| 86 | + |
|---|
| 87 | +#ifdef CONFIG_PARAVIRT_XXL |
|---|
| 20 | 88 | static inline void load_sp0(unsigned long sp0) |
|---|
| 21 | 89 | { |
|---|
| 22 | | - PVOP_VCALL1(pv_cpu_ops.load_sp0, sp0); |
|---|
| 90 | + PVOP_VCALL1(cpu.load_sp0, sp0); |
|---|
| 23 | 91 | } |
|---|
| 24 | 92 | |
|---|
| 25 | 93 | /* The paravirtualized CPUID instruction. */ |
|---|
| 26 | 94 | static inline void __cpuid(unsigned int *eax, unsigned int *ebx, |
|---|
| 27 | 95 | unsigned int *ecx, unsigned int *edx) |
|---|
| 28 | 96 | { |
|---|
| 29 | | - PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx); |
|---|
| 97 | + PVOP_VCALL4(cpu.cpuid, eax, ebx, ecx, edx); |
|---|
| 30 | 98 | } |
|---|
| 31 | 99 | |
|---|
| 32 | 100 | /* |
|---|
| .. | .. |
|---|
| 34 | 102 | */ |
|---|
| 35 | 103 | static inline unsigned long paravirt_get_debugreg(int reg) |
|---|
| 36 | 104 | { |
|---|
| 37 | | - return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg); |
|---|
| 105 | + return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg); |
|---|
| 38 | 106 | } |
|---|
| 39 | 107 | #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg) |
|---|
| 40 | 108 | static inline void set_debugreg(unsigned long val, int reg) |
|---|
| 41 | 109 | { |
|---|
| 42 | | - PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val); |
|---|
| 110 | + PVOP_VCALL2(cpu.set_debugreg, reg, val); |
|---|
| 43 | 111 | } |
|---|
| 44 | 112 | |
|---|
| 45 | 113 | static inline unsigned long read_cr0(void) |
|---|
| 46 | 114 | { |
|---|
| 47 | | - return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0); |
|---|
| 115 | + return PVOP_CALL0(unsigned long, cpu.read_cr0); |
|---|
| 48 | 116 | } |
|---|
| 49 | 117 | |
|---|
| 50 | 118 | static inline void write_cr0(unsigned long x) |
|---|
| 51 | 119 | { |
|---|
| 52 | | - PVOP_VCALL1(pv_cpu_ops.write_cr0, x); |
|---|
| 120 | + PVOP_VCALL1(cpu.write_cr0, x); |
|---|
| 53 | 121 | } |
|---|
| 54 | 122 | |
|---|
| 55 | 123 | static inline unsigned long read_cr2(void) |
|---|
| 56 | 124 | { |
|---|
| 57 | | - return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2); |
|---|
| 125 | + return PVOP_CALLEE0(unsigned long, mmu.read_cr2); |
|---|
| 58 | 126 | } |
|---|
| 59 | 127 | |
|---|
| 60 | 128 | static inline void write_cr2(unsigned long x) |
|---|
| 61 | 129 | { |
|---|
| 62 | | - PVOP_VCALL1(pv_mmu_ops.write_cr2, x); |
|---|
| 130 | + PVOP_VCALL1(mmu.write_cr2, x); |
|---|
| 63 | 131 | } |
|---|
| 64 | 132 | |
|---|
| 65 | 133 | static inline unsigned long __read_cr3(void) |
|---|
| 66 | 134 | { |
|---|
| 67 | | - return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3); |
|---|
| 135 | + return PVOP_CALL0(unsigned long, mmu.read_cr3); |
|---|
| 68 | 136 | } |
|---|
| 69 | 137 | |
|---|
| 70 | 138 | static inline void write_cr3(unsigned long x) |
|---|
| 71 | 139 | { |
|---|
| 72 | | - PVOP_VCALL1(pv_mmu_ops.write_cr3, x); |
|---|
| 140 | + PVOP_VCALL1(mmu.write_cr3, x); |
|---|
| 73 | 141 | } |
|---|
| 74 | 142 | |
|---|
| 75 | 143 | static inline void __write_cr4(unsigned long x) |
|---|
| 76 | 144 | { |
|---|
| 77 | | - PVOP_VCALL1(pv_cpu_ops.write_cr4, x); |
|---|
| 145 | + PVOP_VCALL1(cpu.write_cr4, x); |
|---|
| 78 | 146 | } |
|---|
| 79 | | - |
|---|
| 80 | | -#ifdef CONFIG_X86_64 |
|---|
| 81 | | -static inline unsigned long read_cr8(void) |
|---|
| 82 | | -{ |
|---|
| 83 | | - return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8); |
|---|
| 84 | | -} |
|---|
| 85 | | - |
|---|
| 86 | | -static inline void write_cr8(unsigned long x) |
|---|
| 87 | | -{ |
|---|
| 88 | | - PVOP_VCALL1(pv_cpu_ops.write_cr8, x); |
|---|
| 89 | | -} |
|---|
| 90 | | -#endif |
|---|
| 91 | 147 | |
|---|
| 92 | 148 | static inline void arch_safe_halt(void) |
|---|
| 93 | 149 | { |
|---|
| 94 | | - PVOP_VCALL0(pv_irq_ops.safe_halt); |
|---|
| 150 | + PVOP_VCALL0(irq.safe_halt); |
|---|
| 95 | 151 | } |
|---|
| 96 | 152 | |
|---|
| 97 | 153 | static inline void halt(void) |
|---|
| 98 | 154 | { |
|---|
| 99 | | - PVOP_VCALL0(pv_irq_ops.halt); |
|---|
| 155 | + PVOP_VCALL0(irq.halt); |
|---|
| 100 | 156 | } |
|---|
| 101 | 157 | |
|---|
| 102 | 158 | static inline void wbinvd(void) |
|---|
| 103 | 159 | { |
|---|
| 104 | | - PVOP_VCALL0(pv_cpu_ops.wbinvd); |
|---|
| 160 | + PVOP_VCALL0(cpu.wbinvd); |
|---|
| 105 | 161 | } |
|---|
| 106 | | - |
|---|
| 107 | | -#define get_kernel_rpl() (pv_info.kernel_rpl) |
|---|
| 108 | 162 | |
|---|
| 109 | 163 | static inline u64 paravirt_read_msr(unsigned msr) |
|---|
| 110 | 164 | { |
|---|
| 111 | | - return PVOP_CALL1(u64, pv_cpu_ops.read_msr, msr); |
|---|
| 165 | + return PVOP_CALL1(u64, cpu.read_msr, msr); |
|---|
| 112 | 166 | } |
|---|
| 113 | 167 | |
|---|
| 114 | 168 | static inline void paravirt_write_msr(unsigned msr, |
|---|
| 115 | 169 | unsigned low, unsigned high) |
|---|
| 116 | 170 | { |
|---|
| 117 | | - PVOP_VCALL3(pv_cpu_ops.write_msr, msr, low, high); |
|---|
| 171 | + PVOP_VCALL3(cpu.write_msr, msr, low, high); |
|---|
| 118 | 172 | } |
|---|
| 119 | 173 | |
|---|
| 120 | 174 | static inline u64 paravirt_read_msr_safe(unsigned msr, int *err) |
|---|
| 121 | 175 | { |
|---|
| 122 | | - return PVOP_CALL2(u64, pv_cpu_ops.read_msr_safe, msr, err); |
|---|
| 176 | + return PVOP_CALL2(u64, cpu.read_msr_safe, msr, err); |
|---|
| 123 | 177 | } |
|---|
| 124 | 178 | |
|---|
| 125 | 179 | static inline int paravirt_write_msr_safe(unsigned msr, |
|---|
| 126 | 180 | unsigned low, unsigned high) |
|---|
| 127 | 181 | { |
|---|
| 128 | | - return PVOP_CALL3(int, pv_cpu_ops.write_msr_safe, msr, low, high); |
|---|
| 182 | + return PVOP_CALL3(int, cpu.write_msr_safe, msr, low, high); |
|---|
| 129 | 183 | } |
|---|
| 130 | 184 | |
|---|
| 131 | 185 | #define rdmsr(msr, val1, val2) \ |
|---|
| .. | .. |
|---|
| 170 | 224 | return err; |
|---|
| 171 | 225 | } |
|---|
| 172 | 226 | |
|---|
| 173 | | -static inline unsigned long long paravirt_sched_clock(void) |
|---|
| 174 | | -{ |
|---|
| 175 | | - return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock); |
|---|
| 176 | | -} |
|---|
| 177 | | - |
|---|
| 178 | | -struct static_key; |
|---|
| 179 | | -extern struct static_key paravirt_steal_enabled; |
|---|
| 180 | | -extern struct static_key paravirt_steal_rq_enabled; |
|---|
| 181 | | - |
|---|
| 182 | | -static inline u64 paravirt_steal_clock(int cpu) |
|---|
| 183 | | -{ |
|---|
| 184 | | - return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu); |
|---|
| 185 | | -} |
|---|
| 186 | | - |
|---|
| 187 | 227 | static inline unsigned long long paravirt_read_pmc(int counter) |
|---|
| 188 | 228 | { |
|---|
| 189 | | - return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter); |
|---|
| 229 | + return PVOP_CALL1(u64, cpu.read_pmc, counter); |
|---|
| 190 | 230 | } |
|---|
| 191 | 231 | |
|---|
| 192 | 232 | #define rdpmc(counter, low, high) \ |
|---|
| .. | .. |
|---|
| 200 | 240 | |
|---|
| 201 | 241 | static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries) |
|---|
| 202 | 242 | { |
|---|
| 203 | | - PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries); |
|---|
| 243 | + PVOP_VCALL2(cpu.alloc_ldt, ldt, entries); |
|---|
| 204 | 244 | } |
|---|
| 205 | 245 | |
|---|
| 206 | 246 | static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries) |
|---|
| 207 | 247 | { |
|---|
| 208 | | - PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries); |
|---|
| 248 | + PVOP_VCALL2(cpu.free_ldt, ldt, entries); |
|---|
| 209 | 249 | } |
|---|
| 210 | 250 | |
|---|
| 211 | 251 | static inline void load_TR_desc(void) |
|---|
| 212 | 252 | { |
|---|
| 213 | | - PVOP_VCALL0(pv_cpu_ops.load_tr_desc); |
|---|
| 253 | + PVOP_VCALL0(cpu.load_tr_desc); |
|---|
| 214 | 254 | } |
|---|
| 215 | 255 | static inline void load_gdt(const struct desc_ptr *dtr) |
|---|
| 216 | 256 | { |
|---|
| 217 | | - PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr); |
|---|
| 257 | + PVOP_VCALL1(cpu.load_gdt, dtr); |
|---|
| 218 | 258 | } |
|---|
| 219 | 259 | static inline void load_idt(const struct desc_ptr *dtr) |
|---|
| 220 | 260 | { |
|---|
| 221 | | - PVOP_VCALL1(pv_cpu_ops.load_idt, dtr); |
|---|
| 261 | + PVOP_VCALL1(cpu.load_idt, dtr); |
|---|
| 222 | 262 | } |
|---|
| 223 | 263 | static inline void set_ldt(const void *addr, unsigned entries) |
|---|
| 224 | 264 | { |
|---|
| 225 | | - PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries); |
|---|
| 265 | + PVOP_VCALL2(cpu.set_ldt, addr, entries); |
|---|
| 226 | 266 | } |
|---|
| 227 | 267 | static inline unsigned long paravirt_store_tr(void) |
|---|
| 228 | 268 | { |
|---|
| 229 | | - return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr); |
|---|
| 269 | + return PVOP_CALL0(unsigned long, cpu.store_tr); |
|---|
| 230 | 270 | } |
|---|
| 271 | + |
|---|
| 231 | 272 | #define store_tr(tr) ((tr) = paravirt_store_tr()) |
|---|
| 232 | 273 | static inline void load_TLS(struct thread_struct *t, unsigned cpu) |
|---|
| 233 | 274 | { |
|---|
| 234 | | - PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu); |
|---|
| 275 | + PVOP_VCALL2(cpu.load_tls, t, cpu); |
|---|
| 235 | 276 | } |
|---|
| 236 | 277 | |
|---|
| 237 | | -#ifdef CONFIG_X86_64 |
|---|
| 238 | 278 | static inline void load_gs_index(unsigned int gs) |
|---|
| 239 | 279 | { |
|---|
| 240 | | - PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs); |
|---|
| 280 | + PVOP_VCALL1(cpu.load_gs_index, gs); |
|---|
| 241 | 281 | } |
|---|
| 242 | | -#endif |
|---|
| 243 | 282 | |
|---|
| 244 | 283 | static inline void write_ldt_entry(struct desc_struct *dt, int entry, |
|---|
| 245 | 284 | const void *desc) |
|---|
| 246 | 285 | { |
|---|
| 247 | | - PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc); |
|---|
| 286 | + PVOP_VCALL3(cpu.write_ldt_entry, dt, entry, desc); |
|---|
| 248 | 287 | } |
|---|
| 249 | 288 | |
|---|
| 250 | 289 | static inline void write_gdt_entry(struct desc_struct *dt, int entry, |
|---|
| 251 | 290 | void *desc, int type) |
|---|
| 252 | 291 | { |
|---|
| 253 | | - PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type); |
|---|
| 292 | + PVOP_VCALL4(cpu.write_gdt_entry, dt, entry, desc, type); |
|---|
| 254 | 293 | } |
|---|
| 255 | 294 | |
|---|
| 256 | 295 | static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) |
|---|
| 257 | 296 | { |
|---|
| 258 | | - PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g); |
|---|
| 259 | | -} |
|---|
| 260 | | -static inline void set_iopl_mask(unsigned mask) |
|---|
| 261 | | -{ |
|---|
| 262 | | - PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask); |
|---|
| 297 | + PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g); |
|---|
| 263 | 298 | } |
|---|
| 264 | 299 | |
|---|
| 265 | | -/* The paravirtualized I/O functions */ |
|---|
| 266 | | -static inline void slow_down_io(void) |
|---|
| 300 | +#ifdef CONFIG_X86_IOPL_IOPERM |
|---|
| 301 | +static inline void tss_invalidate_io_bitmap(void) |
|---|
| 267 | 302 | { |
|---|
| 268 | | - pv_cpu_ops.io_delay(); |
|---|
| 269 | | -#ifdef REALLY_SLOW_IO |
|---|
| 270 | | - pv_cpu_ops.io_delay(); |
|---|
| 271 | | - pv_cpu_ops.io_delay(); |
|---|
| 272 | | - pv_cpu_ops.io_delay(); |
|---|
| 273 | | -#endif |
|---|
| 303 | + PVOP_VCALL0(cpu.invalidate_io_bitmap); |
|---|
| 274 | 304 | } |
|---|
| 305 | + |
|---|
| 306 | +static inline void tss_update_io_bitmap(void) |
|---|
| 307 | +{ |
|---|
| 308 | + PVOP_VCALL0(cpu.update_io_bitmap); |
|---|
| 309 | +} |
|---|
| 310 | +#endif |
|---|
| 275 | 311 | |
|---|
| 276 | 312 | static inline void paravirt_activate_mm(struct mm_struct *prev, |
|---|
| 277 | 313 | struct mm_struct *next) |
|---|
| 278 | 314 | { |
|---|
| 279 | | - PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next); |
|---|
| 315 | + PVOP_VCALL2(mmu.activate_mm, prev, next); |
|---|
| 280 | 316 | } |
|---|
| 281 | 317 | |
|---|
| 282 | 318 | static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm, |
|---|
| 283 | 319 | struct mm_struct *mm) |
|---|
| 284 | 320 | { |
|---|
| 285 | | - PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm); |
|---|
| 286 | | -} |
|---|
| 287 | | - |
|---|
| 288 | | -static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) |
|---|
| 289 | | -{ |
|---|
| 290 | | - PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm); |
|---|
| 291 | | -} |
|---|
| 292 | | - |
|---|
| 293 | | -static inline void __flush_tlb(void) |
|---|
| 294 | | -{ |
|---|
| 295 | | - PVOP_VCALL0(pv_mmu_ops.flush_tlb_user); |
|---|
| 296 | | -} |
|---|
| 297 | | -static inline void __flush_tlb_global(void) |
|---|
| 298 | | -{ |
|---|
| 299 | | - PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel); |
|---|
| 300 | | -} |
|---|
| 301 | | -static inline void __flush_tlb_one_user(unsigned long addr) |
|---|
| 302 | | -{ |
|---|
| 303 | | - PVOP_VCALL1(pv_mmu_ops.flush_tlb_one_user, addr); |
|---|
| 304 | | -} |
|---|
| 305 | | - |
|---|
| 306 | | -static inline void flush_tlb_others(const struct cpumask *cpumask, |
|---|
| 307 | | - const struct flush_tlb_info *info) |
|---|
| 308 | | -{ |
|---|
| 309 | | - PVOP_VCALL2(pv_mmu_ops.flush_tlb_others, cpumask, info); |
|---|
| 310 | | -} |
|---|
| 311 | | - |
|---|
| 312 | | -static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) |
|---|
| 313 | | -{ |
|---|
| 314 | | - PVOP_VCALL2(pv_mmu_ops.tlb_remove_table, tlb, table); |
|---|
| 321 | + PVOP_VCALL2(mmu.dup_mmap, oldmm, mm); |
|---|
| 315 | 322 | } |
|---|
| 316 | 323 | |
|---|
| 317 | 324 | static inline int paravirt_pgd_alloc(struct mm_struct *mm) |
|---|
| 318 | 325 | { |
|---|
| 319 | | - return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm); |
|---|
| 326 | + return PVOP_CALL1(int, mmu.pgd_alloc, mm); |
|---|
| 320 | 327 | } |
|---|
| 321 | 328 | |
|---|
| 322 | 329 | static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) |
|---|
| 323 | 330 | { |
|---|
| 324 | | - PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd); |
|---|
| 331 | + PVOP_VCALL2(mmu.pgd_free, mm, pgd); |
|---|
| 325 | 332 | } |
|---|
| 326 | 333 | |
|---|
| 327 | 334 | static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) |
|---|
| 328 | 335 | { |
|---|
| 329 | | - PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn); |
|---|
| 336 | + PVOP_VCALL2(mmu.alloc_pte, mm, pfn); |
|---|
| 330 | 337 | } |
|---|
| 331 | 338 | static inline void paravirt_release_pte(unsigned long pfn) |
|---|
| 332 | 339 | { |
|---|
| 333 | | - PVOP_VCALL1(pv_mmu_ops.release_pte, pfn); |
|---|
| 340 | + PVOP_VCALL1(mmu.release_pte, pfn); |
|---|
| 334 | 341 | } |
|---|
| 335 | 342 | |
|---|
| 336 | 343 | static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) |
|---|
| 337 | 344 | { |
|---|
| 338 | | - PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn); |
|---|
| 345 | + PVOP_VCALL2(mmu.alloc_pmd, mm, pfn); |
|---|
| 339 | 346 | } |
|---|
| 340 | 347 | |
|---|
| 341 | 348 | static inline void paravirt_release_pmd(unsigned long pfn) |
|---|
| 342 | 349 | { |
|---|
| 343 | | - PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn); |
|---|
| 350 | + PVOP_VCALL1(mmu.release_pmd, pfn); |
|---|
| 344 | 351 | } |
|---|
| 345 | 352 | |
|---|
| 346 | 353 | static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) |
|---|
| 347 | 354 | { |
|---|
| 348 | | - PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn); |
|---|
| 355 | + PVOP_VCALL2(mmu.alloc_pud, mm, pfn); |
|---|
| 349 | 356 | } |
|---|
| 350 | 357 | static inline void paravirt_release_pud(unsigned long pfn) |
|---|
| 351 | 358 | { |
|---|
| 352 | | - PVOP_VCALL1(pv_mmu_ops.release_pud, pfn); |
|---|
| 359 | + PVOP_VCALL1(mmu.release_pud, pfn); |
|---|
| 353 | 360 | } |
|---|
| 354 | 361 | |
|---|
| 355 | 362 | static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) |
|---|
| 356 | 363 | { |
|---|
| 357 | | - PVOP_VCALL2(pv_mmu_ops.alloc_p4d, mm, pfn); |
|---|
| 364 | + PVOP_VCALL2(mmu.alloc_p4d, mm, pfn); |
|---|
| 358 | 365 | } |
|---|
| 359 | 366 | |
|---|
| 360 | 367 | static inline void paravirt_release_p4d(unsigned long pfn) |
|---|
| 361 | 368 | { |
|---|
| 362 | | - PVOP_VCALL1(pv_mmu_ops.release_p4d, pfn); |
|---|
| 369 | + PVOP_VCALL1(mmu.release_p4d, pfn); |
|---|
| 363 | 370 | } |
|---|
| 364 | 371 | |
|---|
| 365 | 372 | static inline pte_t __pte(pteval_t val) |
|---|
| 366 | 373 | { |
|---|
| 367 | | - pteval_t ret; |
|---|
| 368 | | - |
|---|
| 369 | | - if (sizeof(pteval_t) > sizeof(long)) |
|---|
| 370 | | - ret = PVOP_CALLEE2(pteval_t, |
|---|
| 371 | | - pv_mmu_ops.make_pte, |
|---|
| 372 | | - val, (u64)val >> 32); |
|---|
| 373 | | - else |
|---|
| 374 | | - ret = PVOP_CALLEE1(pteval_t, |
|---|
| 375 | | - pv_mmu_ops.make_pte, |
|---|
| 376 | | - val); |
|---|
| 377 | | - |
|---|
| 378 | | - return (pte_t) { .pte = ret }; |
|---|
| 374 | + return (pte_t) { PVOP_CALLEE1(pteval_t, mmu.make_pte, val) }; |
|---|
| 379 | 375 | } |
|---|
| 380 | 376 | |
|---|
| 381 | 377 | static inline pteval_t pte_val(pte_t pte) |
|---|
| 382 | 378 | { |
|---|
| 383 | | - pteval_t ret; |
|---|
| 384 | | - |
|---|
| 385 | | - if (sizeof(pteval_t) > sizeof(long)) |
|---|
| 386 | | - ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val, |
|---|
| 387 | | - pte.pte, (u64)pte.pte >> 32); |
|---|
| 388 | | - else |
|---|
| 389 | | - ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val, |
|---|
| 390 | | - pte.pte); |
|---|
| 391 | | - |
|---|
| 392 | | - return ret; |
|---|
| 379 | + return PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte); |
|---|
| 393 | 380 | } |
|---|
| 394 | 381 | |
|---|
| 395 | 382 | static inline pgd_t __pgd(pgdval_t val) |
|---|
| 396 | 383 | { |
|---|
| 397 | | - pgdval_t ret; |
|---|
| 398 | | - |
|---|
| 399 | | - if (sizeof(pgdval_t) > sizeof(long)) |
|---|
| 400 | | - ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd, |
|---|
| 401 | | - val, (u64)val >> 32); |
|---|
| 402 | | - else |
|---|
| 403 | | - ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd, |
|---|
| 404 | | - val); |
|---|
| 405 | | - |
|---|
| 406 | | - return (pgd_t) { ret }; |
|---|
| 384 | + return (pgd_t) { PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val) }; |
|---|
| 407 | 385 | } |
|---|
| 408 | 386 | |
|---|
| 409 | 387 | static inline pgdval_t pgd_val(pgd_t pgd) |
|---|
| 410 | 388 | { |
|---|
| 411 | | - pgdval_t ret; |
|---|
| 412 | | - |
|---|
| 413 | | - if (sizeof(pgdval_t) > sizeof(long)) |
|---|
| 414 | | - ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val, |
|---|
| 415 | | - pgd.pgd, (u64)pgd.pgd >> 32); |
|---|
| 416 | | - else |
|---|
| 417 | | - ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val, |
|---|
| 418 | | - pgd.pgd); |
|---|
| 419 | | - |
|---|
| 420 | | - return ret; |
|---|
| 389 | + return PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd); |
|---|
| 421 | 390 | } |
|---|
| 422 | 391 | |
|---|
| 423 | 392 | #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION |
|---|
| 424 | | -static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, |
|---|
| 393 | +static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, |
|---|
| 425 | 394 | pte_t *ptep) |
|---|
| 426 | 395 | { |
|---|
| 427 | 396 | pteval_t ret; |
|---|
| 428 | 397 | |
|---|
| 429 | | - ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start, |
|---|
| 430 | | - mm, addr, ptep); |
|---|
| 398 | + ret = PVOP_CALL3(pteval_t, mmu.ptep_modify_prot_start, vma, addr, ptep); |
|---|
| 431 | 399 | |
|---|
| 432 | 400 | return (pte_t) { .pte = ret }; |
|---|
| 433 | 401 | } |
|---|
| 434 | 402 | |
|---|
| 435 | | -static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, |
|---|
| 436 | | - pte_t *ptep, pte_t pte) |
|---|
| 403 | +static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, |
|---|
| 404 | + pte_t *ptep, pte_t old_pte, pte_t pte) |
|---|
| 437 | 405 | { |
|---|
| 438 | | - if (sizeof(pteval_t) > sizeof(long)) |
|---|
| 439 | | - /* 5 arg words */ |
|---|
| 440 | | - pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte); |
|---|
| 441 | | - else |
|---|
| 442 | | - PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit, |
|---|
| 443 | | - mm, addr, ptep, pte.pte); |
|---|
| 406 | + |
|---|
| 407 | + PVOP_VCALL4(mmu.ptep_modify_prot_commit, vma, addr, ptep, pte.pte); |
|---|
| 444 | 408 | } |
|---|
| 445 | 409 | |
|---|
| 446 | 410 | static inline void set_pte(pte_t *ptep, pte_t pte) |
|---|
| 447 | 411 | { |
|---|
| 448 | | - if (sizeof(pteval_t) > sizeof(long)) |
|---|
| 449 | | - PVOP_VCALL3(pv_mmu_ops.set_pte, ptep, |
|---|
| 450 | | - pte.pte, (u64)pte.pte >> 32); |
|---|
| 451 | | - else |
|---|
| 452 | | - PVOP_VCALL2(pv_mmu_ops.set_pte, ptep, |
|---|
| 453 | | - pte.pte); |
|---|
| 454 | | -} |
|---|
| 455 | | - |
|---|
| 456 | | -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, |
|---|
| 457 | | - pte_t *ptep, pte_t pte) |
|---|
| 458 | | -{ |
|---|
| 459 | | - if (sizeof(pteval_t) > sizeof(long)) |
|---|
| 460 | | - /* 5 arg words */ |
|---|
| 461 | | - pv_mmu_ops.set_pte_at(mm, addr, ptep, pte); |
|---|
| 462 | | - else |
|---|
| 463 | | - PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte); |
|---|
| 412 | + PVOP_VCALL2(mmu.set_pte, ptep, pte.pte); |
|---|
| 464 | 413 | } |
|---|
| 465 | 414 | |
|---|
| 466 | 415 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
|---|
| 467 | 416 | { |
|---|
| 468 | | - pmdval_t val = native_pmd_val(pmd); |
|---|
| 469 | | - |
|---|
| 470 | | - if (sizeof(pmdval_t) > sizeof(long)) |
|---|
| 471 | | - PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32); |
|---|
| 472 | | - else |
|---|
| 473 | | - PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val); |
|---|
| 417 | + PVOP_VCALL2(mmu.set_pmd, pmdp, native_pmd_val(pmd)); |
|---|
| 474 | 418 | } |
|---|
| 475 | 419 | |
|---|
| 476 | | -#if CONFIG_PGTABLE_LEVELS >= 3 |
|---|
| 477 | 420 | static inline pmd_t __pmd(pmdval_t val) |
|---|
| 478 | 421 | { |
|---|
| 479 | | - pmdval_t ret; |
|---|
| 480 | | - |
|---|
| 481 | | - if (sizeof(pmdval_t) > sizeof(long)) |
|---|
| 482 | | - ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd, |
|---|
| 483 | | - val, (u64)val >> 32); |
|---|
| 484 | | - else |
|---|
| 485 | | - ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd, |
|---|
| 486 | | - val); |
|---|
| 487 | | - |
|---|
| 488 | | - return (pmd_t) { ret }; |
|---|
| 422 | + return (pmd_t) { PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val) }; |
|---|
| 489 | 423 | } |
|---|
| 490 | 424 | |
|---|
| 491 | 425 | static inline pmdval_t pmd_val(pmd_t pmd) |
|---|
| 492 | 426 | { |
|---|
| 493 | | - pmdval_t ret; |
|---|
| 494 | | - |
|---|
| 495 | | - if (sizeof(pmdval_t) > sizeof(long)) |
|---|
| 496 | | - ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val, |
|---|
| 497 | | - pmd.pmd, (u64)pmd.pmd >> 32); |
|---|
| 498 | | - else |
|---|
| 499 | | - ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val, |
|---|
| 500 | | - pmd.pmd); |
|---|
| 501 | | - |
|---|
| 502 | | - return ret; |
|---|
| 427 | + return PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd); |
|---|
| 503 | 428 | } |
|---|
| 504 | 429 | |
|---|
| 505 | 430 | static inline void set_pud(pud_t *pudp, pud_t pud) |
|---|
| 506 | 431 | { |
|---|
| 507 | | - pudval_t val = native_pud_val(pud); |
|---|
| 508 | | - |
|---|
| 509 | | - if (sizeof(pudval_t) > sizeof(long)) |
|---|
| 510 | | - PVOP_VCALL3(pv_mmu_ops.set_pud, pudp, |
|---|
| 511 | | - val, (u64)val >> 32); |
|---|
| 512 | | - else |
|---|
| 513 | | - PVOP_VCALL2(pv_mmu_ops.set_pud, pudp, |
|---|
| 514 | | - val); |
|---|
| 432 | + PVOP_VCALL2(mmu.set_pud, pudp, native_pud_val(pud)); |
|---|
| 515 | 433 | } |
|---|
| 516 | | -#if CONFIG_PGTABLE_LEVELS >= 4 |
|---|
| 434 | + |
|---|
| 517 | 435 | static inline pud_t __pud(pudval_t val) |
|---|
| 518 | 436 | { |
|---|
| 519 | 437 | pudval_t ret; |
|---|
| 520 | 438 | |
|---|
| 521 | | - if (sizeof(pudval_t) > sizeof(long)) |
|---|
| 522 | | - ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud, |
|---|
| 523 | | - val, (u64)val >> 32); |
|---|
| 524 | | - else |
|---|
| 525 | | - ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud, |
|---|
| 526 | | - val); |
|---|
| 439 | + ret = PVOP_CALLEE1(pudval_t, mmu.make_pud, val); |
|---|
| 527 | 440 | |
|---|
| 528 | 441 | return (pud_t) { ret }; |
|---|
| 529 | 442 | } |
|---|
| 530 | 443 | |
|---|
| 531 | 444 | static inline pudval_t pud_val(pud_t pud) |
|---|
| 532 | 445 | { |
|---|
| 533 | | - pudval_t ret; |
|---|
| 534 | | - |
|---|
| 535 | | - if (sizeof(pudval_t) > sizeof(long)) |
|---|
| 536 | | - ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val, |
|---|
| 537 | | - pud.pud, (u64)pud.pud >> 32); |
|---|
| 538 | | - else |
|---|
| 539 | | - ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val, |
|---|
| 540 | | - pud.pud); |
|---|
| 541 | | - |
|---|
| 542 | | - return ret; |
|---|
| 446 | + return PVOP_CALLEE1(pudval_t, mmu.pud_val, pud.pud); |
|---|
| 543 | 447 | } |
|---|
| 544 | 448 | |
|---|
| 545 | 449 | static inline void pud_clear(pud_t *pudp) |
|---|
| 546 | 450 | { |
|---|
| 547 | | - set_pud(pudp, __pud(0)); |
|---|
| 451 | + set_pud(pudp, native_make_pud(0)); |
|---|
| 548 | 452 | } |
|---|
| 549 | 453 | |
|---|
| 550 | 454 | static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) |
|---|
| 551 | 455 | { |
|---|
| 552 | 456 | p4dval_t val = native_p4d_val(p4d); |
|---|
| 553 | 457 | |
|---|
| 554 | | - if (sizeof(p4dval_t) > sizeof(long)) |
|---|
| 555 | | - PVOP_VCALL3(pv_mmu_ops.set_p4d, p4dp, |
|---|
| 556 | | - val, (u64)val >> 32); |
|---|
| 557 | | - else |
|---|
| 558 | | - PVOP_VCALL2(pv_mmu_ops.set_p4d, p4dp, |
|---|
| 559 | | - val); |
|---|
| 458 | + PVOP_VCALL2(mmu.set_p4d, p4dp, val); |
|---|
| 560 | 459 | } |
|---|
| 561 | 460 | |
|---|
| 562 | 461 | #if CONFIG_PGTABLE_LEVELS >= 5 |
|---|
| 563 | 462 | |
|---|
| 564 | 463 | static inline p4d_t __p4d(p4dval_t val) |
|---|
| 565 | 464 | { |
|---|
| 566 | | - p4dval_t ret = PVOP_CALLEE1(p4dval_t, pv_mmu_ops.make_p4d, val); |
|---|
| 465 | + p4dval_t ret = PVOP_CALLEE1(p4dval_t, mmu.make_p4d, val); |
|---|
| 567 | 466 | |
|---|
| 568 | 467 | return (p4d_t) { ret }; |
|---|
| 569 | 468 | } |
|---|
| 570 | 469 | |
|---|
| 571 | 470 | static inline p4dval_t p4d_val(p4d_t p4d) |
|---|
| 572 | 471 | { |
|---|
| 573 | | - return PVOP_CALLEE1(p4dval_t, pv_mmu_ops.p4d_val, p4d.p4d); |
|---|
| 472 | + return PVOP_CALLEE1(p4dval_t, mmu.p4d_val, p4d.p4d); |
|---|
| 574 | 473 | } |
|---|
| 575 | 474 | |
|---|
| 576 | 475 | static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd) |
|---|
| 577 | 476 | { |
|---|
| 578 | | - PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, native_pgd_val(pgd)); |
|---|
| 477 | + PVOP_VCALL2(mmu.set_pgd, pgdp, native_pgd_val(pgd)); |
|---|
| 579 | 478 | } |
|---|
| 580 | 479 | |
|---|
| 581 | 480 | #define set_pgd(pgdp, pgdval) do { \ |
|---|
| .. | .. |
|---|
| 586 | 485 | } while (0) |
|---|
| 587 | 486 | |
|---|
| 588 | 487 | #define pgd_clear(pgdp) do { \ |
|---|
| 589 | | - if (pgtable_l5_enabled()) \ |
|---|
| 590 | | - set_pgd(pgdp, __pgd(0)); \ |
|---|
| 488 | + if (pgtable_l5_enabled()) \ |
|---|
| 489 | + set_pgd(pgdp, native_make_pgd(0)); \ |
|---|
| 591 | 490 | } while (0) |
|---|
| 592 | 491 | |
|---|
| 593 | 492 | #endif /* CONFIG_PGTABLE_LEVELS == 5 */ |
|---|
| 594 | 493 | |
|---|
| 595 | 494 | static inline void p4d_clear(p4d_t *p4dp) |
|---|
| 596 | 495 | { |
|---|
| 597 | | - set_p4d(p4dp, __p4d(0)); |
|---|
| 496 | + set_p4d(p4dp, native_make_p4d(0)); |
|---|
| 598 | 497 | } |
|---|
| 599 | 498 | |
|---|
| 600 | | -#endif /* CONFIG_PGTABLE_LEVELS == 4 */ |
|---|
| 601 | | - |
|---|
| 602 | | -#endif /* CONFIG_PGTABLE_LEVELS >= 3 */ |
|---|
| 603 | | - |
|---|
| 604 | | -#ifdef CONFIG_X86_PAE |
|---|
| 605 | | -/* Special-case pte-setting operations for PAE, which can't update a |
|---|
| 606 | | - 64-bit pte atomically */ |
|---|
| 607 | | -static inline void set_pte_atomic(pte_t *ptep, pte_t pte) |
|---|
| 608 | | -{ |
|---|
| 609 | | - PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep, |
|---|
| 610 | | - pte.pte, pte.pte >> 32); |
|---|
| 611 | | -} |
|---|
| 612 | | - |
|---|
| 613 | | -static inline void pte_clear(struct mm_struct *mm, unsigned long addr, |
|---|
| 614 | | - pte_t *ptep) |
|---|
| 615 | | -{ |
|---|
| 616 | | - PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep); |
|---|
| 617 | | -} |
|---|
| 618 | | - |
|---|
| 619 | | -static inline void pmd_clear(pmd_t *pmdp) |
|---|
| 620 | | -{ |
|---|
| 621 | | - PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp); |
|---|
| 622 | | -} |
|---|
| 623 | | -#else /* !CONFIG_X86_PAE */ |
|---|
| 624 | 499 | static inline void set_pte_atomic(pte_t *ptep, pte_t pte) |
|---|
| 625 | 500 | { |
|---|
| 626 | 501 | set_pte(ptep, pte); |
|---|
| .. | .. |
|---|
| 629 | 504 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, |
|---|
| 630 | 505 | pte_t *ptep) |
|---|
| 631 | 506 | { |
|---|
| 632 | | - set_pte_at(mm, addr, ptep, __pte(0)); |
|---|
| 507 | + set_pte(ptep, native_make_pte(0)); |
|---|
| 633 | 508 | } |
|---|
| 634 | 509 | |
|---|
| 635 | 510 | static inline void pmd_clear(pmd_t *pmdp) |
|---|
| 636 | 511 | { |
|---|
| 637 | | - set_pmd(pmdp, __pmd(0)); |
|---|
| 512 | + set_pmd(pmdp, native_make_pmd(0)); |
|---|
| 638 | 513 | } |
|---|
| 639 | | -#endif /* CONFIG_X86_PAE */ |
|---|
| 640 | 514 | |
|---|
| 641 | 515 | #define __HAVE_ARCH_START_CONTEXT_SWITCH |
|---|
| 642 | 516 | static inline void arch_start_context_switch(struct task_struct *prev) |
|---|
| 643 | 517 | { |
|---|
| 644 | | - PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev); |
|---|
| 518 | + PVOP_VCALL1(cpu.start_context_switch, prev); |
|---|
| 645 | 519 | } |
|---|
| 646 | 520 | |
|---|
| 647 | 521 | static inline void arch_end_context_switch(struct task_struct *next) |
|---|
| 648 | 522 | { |
|---|
| 649 | | - PVOP_VCALL1(pv_cpu_ops.end_context_switch, next); |
|---|
| 523 | + PVOP_VCALL1(cpu.end_context_switch, next); |
|---|
| 650 | 524 | } |
|---|
| 651 | 525 | |
|---|
| 652 | 526 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE |
|---|
| 653 | 527 | static inline void arch_enter_lazy_mmu_mode(void) |
|---|
| 654 | 528 | { |
|---|
| 655 | | - PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter); |
|---|
| 529 | + PVOP_VCALL0(mmu.lazy_mode.enter); |
|---|
| 656 | 530 | } |
|---|
| 657 | 531 | |
|---|
| 658 | 532 | static inline void arch_leave_lazy_mmu_mode(void) |
|---|
| 659 | 533 | { |
|---|
| 660 | | - PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); |
|---|
| 534 | + PVOP_VCALL0(mmu.lazy_mode.leave); |
|---|
| 661 | 535 | } |
|---|
| 662 | 536 | |
|---|
| 663 | 537 | static inline void arch_flush_lazy_mmu_mode(void) |
|---|
| 664 | 538 | { |
|---|
| 665 | | - PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush); |
|---|
| 539 | + PVOP_VCALL0(mmu.lazy_mode.flush); |
|---|
| 666 | 540 | } |
|---|
| 667 | 541 | |
|---|
| 668 | 542 | static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, |
|---|
| 669 | 543 | phys_addr_t phys, pgprot_t flags) |
|---|
| 670 | 544 | { |
|---|
| 671 | | - pv_mmu_ops.set_fixmap(idx, phys, flags); |
|---|
| 545 | + pv_ops.mmu.set_fixmap(idx, phys, flags); |
|---|
| 672 | 546 | } |
|---|
| 547 | +#endif |
|---|
| 673 | 548 | |
|---|
| 674 | 549 | #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) |
|---|
| 675 | 550 | |
|---|
| 676 | 551 | static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock, |
|---|
| 677 | 552 | u32 val) |
|---|
| 678 | 553 | { |
|---|
| 679 | | - PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val); |
|---|
| 554 | + PVOP_VCALL2(lock.queued_spin_lock_slowpath, lock, val); |
|---|
| 680 | 555 | } |
|---|
| 681 | 556 | |
|---|
| 682 | 557 | static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock) |
|---|
| 683 | 558 | { |
|---|
| 684 | | - PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock); |
|---|
| 559 | + PVOP_VCALLEE1(lock.queued_spin_unlock, lock); |
|---|
| 685 | 560 | } |
|---|
| 686 | 561 | |
|---|
| 687 | 562 | static __always_inline void pv_wait(u8 *ptr, u8 val) |
|---|
| 688 | 563 | { |
|---|
| 689 | | - PVOP_VCALL2(pv_lock_ops.wait, ptr, val); |
|---|
| 564 | + PVOP_VCALL2(lock.wait, ptr, val); |
|---|
| 690 | 565 | } |
|---|
| 691 | 566 | |
|---|
| 692 | 567 | static __always_inline void pv_kick(int cpu) |
|---|
| 693 | 568 | { |
|---|
| 694 | | - PVOP_VCALL1(pv_lock_ops.kick, cpu); |
|---|
| 569 | + PVOP_VCALL1(lock.kick, cpu); |
|---|
| 695 | 570 | } |
|---|
| 696 | 571 | |
|---|
| 697 | 572 | static __always_inline bool pv_vcpu_is_preempted(long cpu) |
|---|
| 698 | 573 | { |
|---|
| 699 | | - return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu); |
|---|
| 574 | + return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu); |
|---|
| 700 | 575 | } |
|---|
| 576 | + |
|---|
| 577 | +void __raw_callee_save___native_queued_spin_unlock(struct qspinlock *lock); |
|---|
| 578 | +bool __raw_callee_save___native_vcpu_is_preempted(long cpu); |
|---|
| 701 | 579 | |
|---|
| 702 | 580 | #endif /* SMP && PARAVIRT_SPINLOCKS */ |
|---|
| 703 | 581 | |
|---|
| 704 | 582 | #ifdef CONFIG_X86_32 |
|---|
| 705 | | -#define PV_SAVE_REGS "pushl %ecx; pushl %edx;" |
|---|
| 706 | | -#define PV_RESTORE_REGS "popl %edx; popl %ecx;" |
|---|
| 707 | | - |
|---|
| 708 | 583 | /* save and restore all caller-save registers, except return value */ |
|---|
| 709 | 584 | #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;" |
|---|
| 710 | 585 | #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;" |
|---|
| 711 | | - |
|---|
| 712 | | -#define PV_FLAGS_ARG "0" |
|---|
| 713 | | -#define PV_EXTRA_CLOBBERS |
|---|
| 714 | | -#define PV_VEXTRA_CLOBBERS |
|---|
| 715 | 586 | #else |
|---|
| 716 | 587 | /* save and restore all caller-save registers, except return value */ |
|---|
| 717 | 588 | #define PV_SAVE_ALL_CALLER_REGS \ |
|---|
| .. | .. |
|---|
| 732 | 603 | "pop %rsi;" \ |
|---|
| 733 | 604 | "pop %rdx;" \ |
|---|
| 734 | 605 | "pop %rcx;" |
|---|
| 735 | | - |
|---|
| 736 | | -/* We save some registers, but all of them, that's too much. We clobber all |
|---|
| 737 | | - * caller saved registers but the argument parameter */ |
|---|
| 738 | | -#define PV_SAVE_REGS "pushq %%rdi;" |
|---|
| 739 | | -#define PV_RESTORE_REGS "popq %%rdi;" |
|---|
| 740 | | -#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi" |
|---|
| 741 | | -#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi" |
|---|
| 742 | | -#define PV_FLAGS_ARG "D" |
|---|
| 743 | 606 | #endif |
|---|
| 744 | 607 | |
|---|
| 745 | 608 | /* |
|---|
| .. | .. |
|---|
| 767 | 630 | "call " #func ";" \ |
|---|
| 768 | 631 | PV_RESTORE_ALL_CALLER_REGS \ |
|---|
| 769 | 632 | FRAME_END \ |
|---|
| 770 | | - "ret;" \ |
|---|
| 633 | + ASM_RET \ |
|---|
| 771 | 634 | ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \ |
|---|
| 772 | 635 | ".popsection") |
|---|
| 773 | 636 | |
|---|
| .. | .. |
|---|
| 779 | 642 | #define __PV_IS_CALLEE_SAVE(func) \ |
|---|
| 780 | 643 | ((struct paravirt_callee_save) { func }) |
|---|
| 781 | 644 | |
|---|
| 645 | +#ifdef CONFIG_PARAVIRT_XXL |
|---|
| 782 | 646 | static inline notrace unsigned long arch_local_save_flags(void) |
|---|
| 783 | 647 | { |
|---|
| 784 | | - return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); |
|---|
| 648 | + return PVOP_CALLEE0(unsigned long, irq.save_fl); |
|---|
| 785 | 649 | } |
|---|
| 786 | 650 | |
|---|
| 787 | 651 | static inline notrace void arch_local_irq_restore(unsigned long f) |
|---|
| 788 | 652 | { |
|---|
| 789 | | - PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); |
|---|
| 653 | + PVOP_VCALLEE1(irq.restore_fl, f); |
|---|
| 790 | 654 | } |
|---|
| 791 | 655 | |
|---|
| 792 | 656 | static inline notrace void arch_local_irq_disable(void) |
|---|
| 793 | 657 | { |
|---|
| 794 | | - PVOP_VCALLEE0(pv_irq_ops.irq_disable); |
|---|
| 658 | + PVOP_VCALLEE0(irq.irq_disable); |
|---|
| 795 | 659 | } |
|---|
| 796 | 660 | |
|---|
| 797 | 661 | static inline notrace void arch_local_irq_enable(void) |
|---|
| 798 | 662 | { |
|---|
| 799 | | - PVOP_VCALLEE0(pv_irq_ops.irq_enable); |
|---|
| 663 | + PVOP_VCALLEE0(irq.irq_enable); |
|---|
| 800 | 664 | } |
|---|
| 801 | 665 | |
|---|
| 802 | 666 | static inline notrace unsigned long arch_local_irq_save(void) |
|---|
| .. | .. |
|---|
| 807 | 671 | arch_local_irq_disable(); |
|---|
| 808 | 672 | return f; |
|---|
| 809 | 673 | } |
|---|
| 674 | +#endif |
|---|
| 810 | 675 | |
|---|
| 811 | 676 | |
|---|
| 812 | 677 | /* Make sure as little as possible of this mess escapes. */ |
|---|
| .. | .. |
|---|
| 828 | 693 | |
|---|
| 829 | 694 | #else /* __ASSEMBLY__ */ |
|---|
| 830 | 695 | |
|---|
| 831 | | -#define _PVSITE(ptype, clobbers, ops, word, algn) \ |
|---|
| 696 | +#define _PVSITE(ptype, ops, word, algn) \ |
|---|
| 832 | 697 | 771:; \ |
|---|
| 833 | 698 | ops; \ |
|---|
| 834 | 699 | 772:; \ |
|---|
| .. | .. |
|---|
| 837 | 702 | word 771b; \ |
|---|
| 838 | 703 | .byte ptype; \ |
|---|
| 839 | 704 | .byte 772b-771b; \ |
|---|
| 840 | | - .short clobbers; \ |
|---|
| 841 | 705 | .popsection |
|---|
| 842 | 706 | |
|---|
| 843 | 707 | |
|---|
| .. | .. |
|---|
| 869 | 733 | COND_POP(set, CLBR_RCX, rcx); \ |
|---|
| 870 | 734 | COND_POP(set, CLBR_RAX, rax) |
|---|
| 871 | 735 | |
|---|
| 872 | | -#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8) |
|---|
| 873 | | -#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8) |
|---|
| 736 | +#define PARA_PATCH(off) ((off) / 8) |
|---|
| 737 | +#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8) |
|---|
| 874 | 738 | #define PARA_INDIRECT(addr) *addr(%rip) |
|---|
| 875 | 739 | #else |
|---|
| 876 | 740 | #define PV_SAVE_REGS(set) \ |
|---|
| .. | .. |
|---|
| 884 | 748 | COND_POP(set, CLBR_EDI, edi); \ |
|---|
| 885 | 749 | COND_POP(set, CLBR_EAX, eax) |
|---|
| 886 | 750 | |
|---|
| 887 | | -#define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) |
|---|
| 888 | | -#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) |
|---|
| 751 | +#define PARA_PATCH(off) ((off) / 4) |
|---|
| 752 | +#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .long, 4) |
|---|
| 889 | 753 | #define PARA_INDIRECT(addr) *%cs:addr |
|---|
| 890 | 754 | #endif |
|---|
| 891 | 755 | |
|---|
| 756 | +#ifdef CONFIG_PARAVIRT_XXL |
|---|
| 892 | 757 | #define INTERRUPT_RETURN \ |
|---|
| 893 | | - PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \ |
|---|
| 894 | | - ANNOTATE_RETPOLINE_SAFE; \ |
|---|
| 895 | | - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret);) |
|---|
| 758 | + PARA_SITE(PARA_PATCH(PV_CPU_iret), \ |
|---|
| 759 | + ANNOTATE_RETPOLINE_SAFE; \ |
|---|
| 760 | + jmp PARA_INDIRECT(pv_ops+PV_CPU_iret);) |
|---|
| 896 | 761 | |
|---|
| 897 | 762 | #define DISABLE_INTERRUPTS(clobbers) \ |
|---|
| 898 | | - PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \ |
|---|
| 763 | + PARA_SITE(PARA_PATCH(PV_IRQ_irq_disable), \ |
|---|
| 899 | 764 | PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ |
|---|
| 900 | | - ANNOTATE_RETPOLINE_SAFE; \ |
|---|
| 901 | | - call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \ |
|---|
| 765 | + ANNOTATE_RETPOLINE_SAFE; \ |
|---|
| 766 | + call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable); \ |
|---|
| 902 | 767 | PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) |
|---|
| 903 | 768 | |
|---|
| 904 | 769 | #define ENABLE_INTERRUPTS(clobbers) \ |
|---|
| 905 | | - PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \ |
|---|
| 770 | + PARA_SITE(PARA_PATCH(PV_IRQ_irq_enable), \ |
|---|
| 906 | 771 | PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ |
|---|
| 907 | | - ANNOTATE_RETPOLINE_SAFE; \ |
|---|
| 908 | | - call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \ |
|---|
| 909 | | - PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) |
|---|
| 910 | | - |
|---|
| 911 | | -#ifdef CONFIG_X86_32 |
|---|
| 912 | | -#define GET_CR0_INTO_EAX \ |
|---|
| 913 | | - push %ecx; push %edx; \ |
|---|
| 914 | | - ANNOTATE_RETPOLINE_SAFE; \ |
|---|
| 915 | | - call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ |
|---|
| 916 | | - pop %edx; pop %ecx |
|---|
| 917 | | -#else /* !CONFIG_X86_32 */ |
|---|
| 918 | | - |
|---|
| 919 | | -/* |
|---|
| 920 | | - * If swapgs is used while the userspace stack is still current, |
|---|
| 921 | | - * there's no way to call a pvop. The PV replacement *must* be |
|---|
| 922 | | - * inlined, or the swapgs instruction must be trapped and emulated. |
|---|
| 923 | | - */ |
|---|
| 924 | | -#define SWAPGS_UNSAFE_STACK \ |
|---|
| 925 | | - PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ |
|---|
| 926 | | - swapgs) |
|---|
| 927 | | - |
|---|
| 928 | | -/* |
|---|
| 929 | | - * Note: swapgs is very special, and in practise is either going to be |
|---|
| 930 | | - * implemented with a single "swapgs" instruction or something very |
|---|
| 931 | | - * special. Either way, we don't need to save any registers for |
|---|
| 932 | | - * it. |
|---|
| 933 | | - */ |
|---|
| 934 | | -#define SWAPGS \ |
|---|
| 935 | | - PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \ |
|---|
| 936 | | - ANNOTATE_RETPOLINE_SAFE; \ |
|---|
| 937 | | - call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs); \ |
|---|
| 938 | | - ) |
|---|
| 939 | | - |
|---|
| 940 | | -#define GET_CR2_INTO_RAX \ |
|---|
| 941 | | - ANNOTATE_RETPOLINE_SAFE; \ |
|---|
| 942 | | - call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); |
|---|
| 943 | | - |
|---|
| 944 | | -#define USERGS_SYSRET64 \ |
|---|
| 945 | | - PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \ |
|---|
| 946 | | - CLBR_NONE, \ |
|---|
| 947 | | - ANNOTATE_RETPOLINE_SAFE; \ |
|---|
| 948 | | - jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64);) |
|---|
| 949 | | - |
|---|
| 950 | | -#ifdef CONFIG_DEBUG_ENTRY |
|---|
| 951 | | -#define SAVE_FLAGS(clobbers) \ |
|---|
| 952 | | - PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_save_fl), clobbers, \ |
|---|
| 953 | | - PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ |
|---|
| 954 | | - ANNOTATE_RETPOLINE_SAFE; \ |
|---|
| 955 | | - call PARA_INDIRECT(pv_irq_ops+PV_IRQ_save_fl); \ |
|---|
| 772 | + ANNOTATE_RETPOLINE_SAFE; \ |
|---|
| 773 | + call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable); \ |
|---|
| 956 | 774 | PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) |
|---|
| 957 | 775 | #endif |
|---|
| 958 | 776 | |
|---|
| 959 | | -#endif /* CONFIG_X86_32 */ |
|---|
| 777 | +#ifdef CONFIG_X86_64 |
|---|
| 778 | +#ifdef CONFIG_PARAVIRT_XXL |
|---|
| 779 | +#define USERGS_SYSRET64 \ |
|---|
| 780 | + PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64), \ |
|---|
| 781 | + ANNOTATE_RETPOLINE_SAFE; \ |
|---|
| 782 | + jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);) |
|---|
| 783 | + |
|---|
| 784 | +#ifdef CONFIG_DEBUG_ENTRY |
|---|
| 785 | +#define SAVE_FLAGS(clobbers) \ |
|---|
| 786 | + PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \ |
|---|
| 787 | + PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \ |
|---|
| 788 | + ANNOTATE_RETPOLINE_SAFE; \ |
|---|
| 789 | + call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl); \ |
|---|
| 790 | + PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);) |
|---|
| 791 | +#endif |
|---|
| 792 | +#endif /* CONFIG_PARAVIRT_XXL */ |
|---|
| 793 | +#endif /* CONFIG_X86_64 */ |
|---|
| 794 | + |
|---|
| 795 | +#ifdef CONFIG_PARAVIRT_XXL |
|---|
| 796 | + |
|---|
| 797 | +#define GET_CR2_INTO_AX \ |
|---|
| 798 | + PARA_SITE(PARA_PATCH(PV_MMU_read_cr2), \ |
|---|
| 799 | + ANNOTATE_RETPOLINE_SAFE; \ |
|---|
| 800 | + call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2); \ |
|---|
| 801 | + ) |
|---|
| 802 | + |
|---|
| 803 | +#endif /* CONFIG_PARAVIRT_XXL */ |
|---|
| 804 | + |
|---|
| 960 | 805 | |
|---|
| 961 | 806 | #endif /* __ASSEMBLY__ */ |
|---|
| 962 | 807 | #else /* CONFIG_PARAVIRT */ |
|---|
| 963 | 808 | # define default_banner x86_init_noop |
|---|
| 809 | +#endif /* !CONFIG_PARAVIRT */ |
|---|
| 810 | + |
|---|
| 964 | 811 | #ifndef __ASSEMBLY__ |
|---|
| 812 | +#ifndef CONFIG_PARAVIRT_XXL |
|---|
| 965 | 813 | static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm, |
|---|
| 966 | 814 | struct mm_struct *mm) |
|---|
| 967 | 815 | { |
|---|
| 968 | 816 | } |
|---|
| 817 | +#endif |
|---|
| 969 | 818 | |
|---|
| 819 | +#ifndef CONFIG_PARAVIRT |
|---|
| 970 | 820 | static inline void paravirt_arch_exit_mmap(struct mm_struct *mm) |
|---|
| 971 | 821 | { |
|---|
| 972 | 822 | } |
|---|
| 823 | +#endif |
|---|
| 973 | 824 | #endif /* __ASSEMBLY__ */ |
|---|
| 974 | | -#endif /* !CONFIG_PARAVIRT */ |
|---|
| 975 | 825 | #endif /* _ASM_X86_PARAVIRT_H */ |
|---|