hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/powerpc/include/asm/asm-prototypes.h
....@@ -1,3 +1,4 @@
1
+/* SPDX-License-Identifier: GPL-2.0-or-later */
12 #ifndef _ASM_POWERPC_ASM_PROTOTYPES_H
23 #define _ASM_POWERPC_ASM_PROTOTYPES_H
34 /*
....@@ -5,11 +6,6 @@
56 * from asm, and any associated variables.
67 *
78 * Copyright 2016, Daniel Axtens, IBM Corporation.
8
- *
9
- * This program is free software; you can redistribute it and/or
10
- * modify it under the terms of the GNU General Public License
11
- * as published by the Free Software Foundation; either version 2
12
- * of the License, or (at your option) any later version.
139 */
1410
1511 #include <linux/threads.h>
....@@ -19,12 +15,13 @@
1915 #include <asm/epapr_hcalls.h>
2016 #include <asm/dcr.h>
2117 #include <asm/mmu_context.h>
18
+#include <asm/ultravisor-api.h>
2219
2320 #include <uapi/asm/ucontext.h>
2421
2522 /* SMP */
26
-extern struct thread_info *current_set[NR_CPUS];
27
-extern struct thread_info *secondary_ti;
23
+extern struct task_struct *current_set[NR_CPUS];
24
+extern struct task_struct *secondary_current;
2825 void start_secondary(void *unused);
2926
3027 /* kexec */
....@@ -37,13 +34,21 @@
3734 extern struct static_key hcall_tracepoint_key;
3835 void __trace_hcall_entry(unsigned long opcode, unsigned long *args);
3936 void __trace_hcall_exit(long opcode, long retval, unsigned long *retbuf);
40
-/* OPAL tracing */
41
-#ifdef CONFIG_JUMP_LABEL
42
-extern struct static_key opal_tracepoint_key;
37
+
38
+/* Ultravisor */
39
+#if defined(CONFIG_PPC_POWERNV) || defined(CONFIG_PPC_SVM)
40
+long ucall_norets(unsigned long opcode, ...);
41
+#else
42
+static inline long ucall_norets(unsigned long opcode, ...)
43
+{
44
+ return U_NOT_AVAILABLE;
45
+}
4346 #endif
4447
45
-void __trace_opal_entry(unsigned long opcode, unsigned long *args);
46
-void __trace_opal_exit(long opcode, unsigned long retval);
48
+/* OPAL */
49
+int64_t __opal_call(int64_t a0, int64_t a1, int64_t a2, int64_t a3,
50
+ int64_t a4, int64_t a5, int64_t a6, int64_t a7,
51
+ int64_t opcode, uint64_t msr);
4752
4853 /* VMX copying */
4954 int enter_vmx_usercopy(void);
....@@ -61,9 +66,8 @@
6166 void single_step_exception(struct pt_regs *regs);
6267 void program_check_exception(struct pt_regs *regs);
6368 void alignment_exception(struct pt_regs *regs);
64
-void slb_miss_bad_addr(struct pt_regs *regs);
6569 void StackOverflow(struct pt_regs *regs);
66
-void nonrecoverable_exception(struct pt_regs *regs);
70
+void stack_overflow_exception(struct pt_regs *regs);
6771 void kernel_fp_unavailable_exception(struct pt_regs *regs);
6872 void altivec_unavailable_exception(struct pt_regs *regs);
6973 void vsx_unavailable_exception(struct pt_regs *regs);
....@@ -78,6 +82,8 @@
7882 void system_reset_exception(struct pt_regs *regs);
7983 void machine_check_exception(struct pt_regs *regs);
8084 void emulation_assist_interrupt(struct pt_regs *regs);
85
+long do_slb_fault(struct pt_regs *regs, unsigned long ea);
86
+void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err);
8187
8288 /* signals, syscalls and interrupts */
8389 long sys_swapcontext(struct ucontext __user *old_ctx,
....@@ -87,24 +93,21 @@
8793 long sys_debug_setcontext(struct ucontext __user *ctx,
8894 int ndbg, struct sig_dbg_op __user *dbg);
8995 int
90
-ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp, struct timeval __user *tvp);
96
+ppc_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp,
97
+ struct __kernel_old_timeval __user *tvp);
9198 unsigned long __init early_init(unsigned long dt_ptr);
9299 void __init machine_init(u64 dt_ptr);
93100 #endif
101
+long system_call_exception(long r3, long r4, long r5, long r6, long r7, long r8, unsigned long r0, struct pt_regs *regs);
102
+notrace unsigned long syscall_exit_prepare(unsigned long r3, struct pt_regs *regs, long scv);
103
+notrace unsigned long interrupt_exit_user_prepare(struct pt_regs *regs, unsigned long msr);
104
+notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs, unsigned long msr);
94105
95106 long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
96107 u32 len_high, u32 len_low);
97108 long sys_switch_endian(void);
98109 notrace unsigned int __check_irq_replay(void);
99110 void notrace restore_interrupts(void);
100
-
101
-/* ptrace */
102
-long do_syscall_trace_enter(struct pt_regs *regs);
103
-void do_syscall_trace_leave(struct pt_regs *regs);
104
-
105
-/* process */
106
-void restore_math(struct pt_regs *regs);
107
-void restore_tm_state(struct pt_regs *regs);
108111
109112 /* prom_init (OpenFirmware) */
110113 unsigned long __init prom_init(unsigned long r3, unsigned long r4,
....@@ -116,9 +119,6 @@
116119 void __init early_setup(unsigned long dt_ptr);
117120 void early_setup_secondary(void);
118121
119
-/* time */
120
-void accumulate_stolen_time(void);
121
-
122122 /* misc runtime */
123123 extern u64 __bswapdi2(u64);
124124 extern s64 __lshrdi3(s64, int);
....@@ -129,7 +129,8 @@
129129
130130 /* tracing */
131131 void _mcount(void);
132
-unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip);
132
+unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
133
+ unsigned long sp);
133134
134135 void pnv_power9_force_smt4_catch(void);
135136 void pnv_power9_force_smt4_release(void);
....@@ -144,13 +145,36 @@
144145 void _kvmppc_save_tm_pr(struct kvm_vcpu *vcpu, u64 guest_msr);
145146
146147 /* Patch sites */
147
-extern s32 patch__call_flush_count_cache;
148
+extern s32 patch__call_flush_branch_caches1;
149
+extern s32 patch__call_flush_branch_caches2;
150
+extern s32 patch__call_flush_branch_caches3;
148151 extern s32 patch__flush_count_cache_return;
149152 extern s32 patch__flush_link_stack_return;
150153 extern s32 patch__call_kvm_flush_link_stack;
151154 extern s32 patch__memset_nocache, patch__memcpy_nocache;
152155
153
-extern long flush_count_cache;
156
+extern long flush_branch_caches;
154157 extern long kvm_flush_link_stack;
155158
159
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
160
+void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
161
+void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
162
+#else
163
+static inline void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
164
+ bool preserve_nv) { }
165
+static inline void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
166
+ bool preserve_nv) { }
167
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
168
+
169
+void kvmhv_save_host_pmu(void);
170
+void kvmhv_load_host_pmu(void);
171
+void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use);
172
+void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu);
173
+
174
+int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu);
175
+
176
+long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);
177
+long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr,
178
+ unsigned long dabrx);
179
+
156180 #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */