.. | .. |
---|
37 | 37 | unsigned short __esh; |
---|
38 | 38 | unsigned short fs; |
---|
39 | 39 | unsigned short __fsh; |
---|
| 40 | + /* On interrupt, gs and __gsh store the vector number. */ |
---|
40 | 41 | unsigned short gs; |
---|
41 | 42 | unsigned short __gsh; |
---|
| 43 | + /* On interrupt, this is the error code. */ |
---|
42 | 44 | unsigned long orig_ax; |
---|
43 | 45 | unsigned long ip; |
---|
44 | 46 | unsigned short cs; |
---|
.. | .. |
---|
92 | 94 | #include <asm/paravirt_types.h> |
---|
93 | 95 | #endif |
---|
94 | 96 | |
---|
| 97 | +#include <asm/proto.h> |
---|
| 98 | + |
---|
95 | 99 | struct cpuinfo_x86; |
---|
96 | 100 | struct task_struct; |
---|
97 | 101 | |
---|
98 | 102 | extern unsigned long profile_pc(struct pt_regs *regs); |
---|
99 | | -#define profile_pc profile_pc |
---|
100 | 103 | |
---|
101 | 104 | extern unsigned long |
---|
102 | 105 | convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); |
---|
103 | | -extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, |
---|
104 | | - int error_code, int si_code); |
---|
| 106 | +extern void send_sigtrap(struct pt_regs *regs, int error_code, int si_code); |
---|
105 | 107 | |
---|
106 | 108 | |
---|
107 | 109 | static inline unsigned long regs_return_value(struct pt_regs *regs) |
---|
.. | .. |
---|
123 | 125 | * On x86_64, vm86 mode is mercifully nonexistent, and we don't need |
---|
124 | 126 | * the extra check. |
---|
125 | 127 | */ |
---|
126 | | -static inline int user_mode(struct pt_regs *regs) |
---|
| 128 | +static __always_inline int user_mode(struct pt_regs *regs) |
---|
127 | 129 | { |
---|
128 | 130 | #ifdef CONFIG_X86_32 |
---|
129 | 131 | return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL; |
---|
.. | .. |
---|
144 | 146 | static inline bool user_64bit_mode(struct pt_regs *regs) |
---|
145 | 147 | { |
---|
146 | 148 | #ifdef CONFIG_X86_64 |
---|
147 | | -#ifndef CONFIG_PARAVIRT |
---|
| 149 | +#ifndef CONFIG_PARAVIRT_XXL |
---|
148 | 150 | /* |
---|
149 | 151 | * On non-paravirt systems, this is the only long mode CPL 3 |
---|
150 | 152 | * selector. We do not allow long mode selectors in the LDT. |
---|
.. | .. |
---|
159 | 161 | #endif |
---|
160 | 162 | } |
---|
161 | 163 | |
---|
| 164 | +/* |
---|
| 165 | + * Determine whether the register set came from any context that is running in |
---|
| 166 | + * 64-bit mode. |
---|
| 167 | + */ |
---|
| 168 | +static inline bool any_64bit_mode(struct pt_regs *regs) |
---|
| 169 | +{ |
---|
| 170 | +#ifdef CONFIG_X86_64 |
---|
| 171 | + return !user_mode(regs) || user_64bit_mode(regs); |
---|
| 172 | +#else |
---|
| 173 | + return false; |
---|
| 174 | +#endif |
---|
| 175 | +} |
---|
| 176 | + |
---|
162 | 177 | #ifdef CONFIG_X86_64 |
---|
163 | 178 | #define current_user_stack_pointer() current_pt_regs()->sp |
---|
164 | 179 | #define compat_user_stack_pointer() current_pt_regs()->sp |
---|
| 180 | + |
---|
| 181 | +static inline bool ip_within_syscall_gap(struct pt_regs *regs) |
---|
| 182 | +{ |
---|
| 183 | + bool ret = (regs->ip >= (unsigned long)entry_SYSCALL_64 && |
---|
| 184 | + regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack); |
---|
| 185 | + |
---|
| 186 | +#ifdef CONFIG_IA32_EMULATION |
---|
| 187 | + ret = ret || (regs->ip >= (unsigned long)entry_SYSCALL_compat && |
---|
| 188 | + regs->ip < (unsigned long)entry_SYSCALL_compat_safe_stack); |
---|
165 | 189 | #endif |
---|
166 | 190 | |
---|
167 | | -#ifdef CONFIG_X86_32 |
---|
168 | | -extern unsigned long kernel_stack_pointer(struct pt_regs *regs); |
---|
169 | | -#else |
---|
| 191 | + return ret; |
---|
| 192 | +} |
---|
| 193 | +#endif |
---|
| 194 | + |
---|
170 | 195 | static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) |
---|
171 | 196 | { |
---|
172 | 197 | return regs->sp; |
---|
173 | 198 | } |
---|
174 | | -#endif |
---|
175 | 199 | |
---|
176 | | -#define GET_IP(regs) ((regs)->ip) |
---|
177 | | -#define GET_FP(regs) ((regs)->bp) |
---|
178 | | -#define GET_USP(regs) ((regs)->sp) |
---|
| 200 | +static inline unsigned long instruction_pointer(struct pt_regs *regs) |
---|
| 201 | +{ |
---|
| 202 | + return regs->ip; |
---|
| 203 | +} |
---|
179 | 204 | |
---|
180 | | -#include <asm-generic/ptrace.h> |
---|
| 205 | +static inline void instruction_pointer_set(struct pt_regs *regs, |
---|
| 206 | + unsigned long val) |
---|
| 207 | +{ |
---|
| 208 | + regs->ip = val; |
---|
| 209 | +} |
---|
| 210 | + |
---|
| 211 | +static inline unsigned long frame_pointer(struct pt_regs *regs) |
---|
| 212 | +{ |
---|
| 213 | + return regs->bp; |
---|
| 214 | +} |
---|
| 215 | + |
---|
| 216 | +static inline unsigned long user_stack_pointer(struct pt_regs *regs) |
---|
| 217 | +{ |
---|
| 218 | + return regs->sp; |
---|
| 219 | +} |
---|
| 220 | + |
---|
| 221 | +static inline void user_stack_pointer_set(struct pt_regs *regs, |
---|
| 222 | + unsigned long val) |
---|
| 223 | +{ |
---|
| 224 | + regs->sp = val; |
---|
| 225 | +} |
---|
| 226 | + |
---|
| 227 | +static __always_inline bool regs_irqs_disabled(struct pt_regs *regs) |
---|
| 228 | +{ |
---|
| 229 | + return !(regs->flags & X86_EFLAGS_IF); |
---|
| 230 | +} |
---|
181 | 231 | |
---|
182 | 232 | /* Query offset/name of register from its name/offset */ |
---|
183 | 233 | extern int regs_query_register_offset(const char *name); |
---|
.. | .. |
---|
199 | 249 | if (unlikely(offset > MAX_REG_OFFSET)) |
---|
200 | 250 | return 0; |
---|
201 | 251 | #ifdef CONFIG_X86_32 |
---|
202 | | - /* |
---|
203 | | - * Traps from the kernel do not save sp and ss. |
---|
204 | | - * Use the helper function to retrieve sp. |
---|
205 | | - */ |
---|
206 | | - if (offset == offsetof(struct pt_regs, sp) && |
---|
207 | | - regs->cs == __KERNEL_CS) |
---|
208 | | - return kernel_stack_pointer(regs); |
---|
209 | | - |
---|
210 | 252 | /* The selector fields are 16-bit. */ |
---|
211 | 253 | if (offset == offsetof(struct pt_regs, cs) || |
---|
212 | 254 | offset == offsetof(struct pt_regs, ss) || |
---|
.. | .. |
---|
232 | 274 | static inline int regs_within_kernel_stack(struct pt_regs *regs, |
---|
233 | 275 | unsigned long addr) |
---|
234 | 276 | { |
---|
235 | | - return ((addr & ~(THREAD_SIZE - 1)) == |
---|
236 | | - (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))); |
---|
| 277 | + return ((addr & ~(THREAD_SIZE - 1)) == (regs->sp & ~(THREAD_SIZE - 1))); |
---|
237 | 278 | } |
---|
238 | 279 | |
---|
239 | 280 | /** |
---|
.. | .. |
---|
247 | 288 | */ |
---|
248 | 289 | static inline unsigned long *regs_get_kernel_stack_nth_addr(struct pt_regs *regs, unsigned int n) |
---|
249 | 290 | { |
---|
250 | | - unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); |
---|
| 291 | + unsigned long *addr = (unsigned long *)regs->sp; |
---|
251 | 292 | |
---|
252 | 293 | addr += n; |
---|
253 | 294 | if (regs_within_kernel_stack(regs, (unsigned long)addr)) |
---|
.. | .. |
---|
257 | 298 | } |
---|
258 | 299 | |
---|
259 | 300 | /* To avoid include hell, we can't include uaccess.h */ |
---|
260 | | -extern long probe_kernel_read(void *dst, const void *src, size_t size); |
---|
| 301 | +extern long copy_from_kernel_nofault(void *dst, const void *src, size_t size); |
---|
261 | 302 | |
---|
262 | 303 | /** |
---|
263 | 304 | * regs_get_kernel_stack_nth() - get Nth entry of the stack |
---|
.. | .. |
---|
277 | 318 | |
---|
278 | 319 | addr = regs_get_kernel_stack_nth_addr(regs, n); |
---|
279 | 320 | if (addr) { |
---|
280 | | - ret = probe_kernel_read(&val, addr, sizeof(val)); |
---|
| 321 | + ret = copy_from_kernel_nofault(&val, addr, sizeof(val)); |
---|
281 | 322 | if (!ret) |
---|
282 | 323 | return val; |
---|
283 | 324 | } |
---|
284 | 325 | return 0; |
---|
| 326 | +} |
---|
| 327 | + |
---|
| 328 | +/** |
---|
| 329 | + * regs_get_kernel_argument() - get Nth function argument in kernel |
---|
| 330 | + * @regs: pt_regs of that context |
---|
| 331 | + * @n: function argument number (start from 0) |
---|
| 332 | + * |
---|
| 333 | + * regs_get_argument() returns @n th argument of the function call. |
---|
| 334 | + * Note that this chooses most probably assignment, in some case |
---|
| 335 | + * it can be incorrect. |
---|
| 336 | + * This is expected to be called from kprobes or ftrace with regs |
---|
| 337 | + * where the top of stack is the return address. |
---|
| 338 | + */ |
---|
| 339 | +static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs, |
---|
| 340 | + unsigned int n) |
---|
| 341 | +{ |
---|
| 342 | + static const unsigned int argument_offs[] = { |
---|
| 343 | +#ifdef __i386__ |
---|
| 344 | + offsetof(struct pt_regs, ax), |
---|
| 345 | + offsetof(struct pt_regs, dx), |
---|
| 346 | + offsetof(struct pt_regs, cx), |
---|
| 347 | +#define NR_REG_ARGUMENTS 3 |
---|
| 348 | +#else |
---|
| 349 | + offsetof(struct pt_regs, di), |
---|
| 350 | + offsetof(struct pt_regs, si), |
---|
| 351 | + offsetof(struct pt_regs, dx), |
---|
| 352 | + offsetof(struct pt_regs, cx), |
---|
| 353 | + offsetof(struct pt_regs, r8), |
---|
| 354 | + offsetof(struct pt_regs, r9), |
---|
| 355 | +#define NR_REG_ARGUMENTS 6 |
---|
| 356 | +#endif |
---|
| 357 | + }; |
---|
| 358 | + |
---|
| 359 | + if (n >= NR_REG_ARGUMENTS) { |
---|
| 360 | + n -= NR_REG_ARGUMENTS - 1; |
---|
| 361 | + return regs_get_kernel_stack_nth(regs, n); |
---|
| 362 | + } else |
---|
| 363 | + return regs_get_register(regs, argument_offs[n]); |
---|
285 | 364 | } |
---|
286 | 365 | |
---|
287 | 366 | #define arch_has_single_step() (1) |
---|
.. | .. |
---|
291 | 370 | #define arch_has_block_step() (boot_cpu_data.x86 >= 6) |
---|
292 | 371 | #endif |
---|
293 | 372 | |
---|
294 | | -#define ARCH_HAS_USER_SINGLE_STEP_INFO |
---|
295 | | - |
---|
296 | | -/* |
---|
297 | | - * When hitting ptrace_stop(), we cannot return using SYSRET because |
---|
298 | | - * that does not restore the full CPU state, only a minimal set. The |
---|
299 | | - * ptracer can change arbitrary register values, which is usually okay |
---|
300 | | - * because the usual ptrace stops run off the signal delivery path which |
---|
301 | | - * forces IRET; however, ptrace_event() stops happen in arbitrary places |
---|
302 | | - * in the kernel and don't force IRET path. |
---|
303 | | - * |
---|
304 | | - * So force IRET path after a ptrace stop. |
---|
305 | | - */ |
---|
306 | | -#define arch_ptrace_stop_needed(code, info) \ |
---|
307 | | -({ \ |
---|
308 | | - force_iret(); \ |
---|
309 | | - false; \ |
---|
310 | | -}) |
---|
| 373 | +#define ARCH_HAS_USER_SINGLE_STEP_REPORT |
---|
311 | 374 | |
---|
312 | 375 | struct user_desc; |
---|
313 | 376 | extern int do_get_thread_area(struct task_struct *p, int idx, |
---|
.. | .. |
---|
315 | 378 | extern int do_set_thread_area(struct task_struct *p, int idx, |
---|
316 | 379 | struct user_desc __user *info, int can_allocate); |
---|
317 | 380 | |
---|
| 381 | +#ifdef CONFIG_X86_64 |
---|
| 382 | +# define do_set_thread_area_64(p, s, t) do_arch_prctl_64(p, s, t) |
---|
| 383 | +#else |
---|
| 384 | +# define do_set_thread_area_64(p, s, t) (0) |
---|
| 385 | +#endif |
---|
| 386 | + |
---|
318 | 387 | #endif /* !__ASSEMBLY__ */ |
---|
319 | 388 | #endif /* _ASM_X86_PTRACE_H */ |
---|