| .. | .. | 
|---|
| 12 | 12 |  __visible struct task_struct *__switch_to(struct task_struct *prev, | 
|---|
| 13 | 13 |  					  struct task_struct *next); | 
|---|
| 14 | 14 |   | 
|---|
| 15 |  | -/* This runs runs on the previous thread's stack. */  | 
|---|
| 16 |  | -static inline void prepare_switch_to(struct task_struct *next)  | 
|---|
| 17 |  | -{  | 
|---|
| 18 |  | -#ifdef CONFIG_VMAP_STACK  | 
|---|
| 19 |  | -	/*  | 
|---|
| 20 |  | -	 * If we switch to a stack that has a top-level paging entry  | 
|---|
| 21 |  | -	 * that is not present in the current mm, the resulting #PF will  | 
|---|
| 22 |  | -	 * will be promoted to a double-fault and we'll panic.  Probe  | 
|---|
| 23 |  | -	 * the new stack now so that vmalloc_fault can fix up the page  | 
|---|
| 24 |  | -	 * tables if needed.  This can only happen if we use a stack  | 
|---|
| 25 |  | -	 * in vmap space.  | 
|---|
| 26 |  | -	 *  | 
|---|
| 27 |  | -	 * We assume that the stack is aligned so that it never spans  | 
|---|
| 28 |  | -	 * more than one top-level paging entry.  | 
|---|
| 29 |  | -	 *  | 
|---|
| 30 |  | -	 * To minimize cache pollution, just follow the stack pointer.  | 
|---|
| 31 |  | -	 */  | 
|---|
| 32 |  | -	READ_ONCE(*(unsigned char *)next->thread.sp);  | 
|---|
| 33 |  | -#endif  | 
|---|
| 34 |  | -}  | 
|---|
| 35 |  | -  | 
|---|
| 36 | 15 |  asmlinkage void ret_from_fork(void); | 
|---|
| 37 | 16 |   | 
|---|
| 38 | 17 |  /* | 
|---|
| .. | .. | 
|---|
| 40 | 19 |   * order of the fields must match the code in __switch_to_asm(). | 
|---|
| 41 | 20 |   */ | 
|---|
| 42 | 21 |  struct inactive_task_frame { | 
|---|
| 43 |  | -	unsigned long flags;  | 
|---|
| 44 | 22 |  #ifdef CONFIG_X86_64 | 
|---|
| 45 | 23 |  	unsigned long r15; | 
|---|
| 46 | 24 |  	unsigned long r14; | 
|---|
| 47 | 25 |  	unsigned long r13; | 
|---|
| 48 | 26 |  	unsigned long r12; | 
|---|
| 49 | 27 |  #else | 
|---|
 | 28 | +	unsigned long flags;  | 
|---|
| 50 | 29 |  	unsigned long si; | 
|---|
| 51 | 30 |  	unsigned long di; | 
|---|
| 52 | 31 |  #endif | 
|---|
| .. | .. | 
|---|
| 67 | 46 |   | 
|---|
| 68 | 47 |  #define switch_to(prev, next, last)					\ | 
|---|
| 69 | 48 |  do {									\ | 
|---|
| 70 |  | -	prepare_switch_to(next);					\  | 
|---|
| 71 |  | -									\  | 
|---|
| 72 | 49 |  	((last) = __switch_to_asm((prev), (next)));			\ | 
|---|
| 73 | 50 |  } while (0) | 
|---|
| 74 | 51 |   | 
|---|
| .. | .. | 
|---|
| 103 | 80 |  	if (static_cpu_has(X86_FEATURE_XENPV)) | 
|---|
| 104 | 81 |  		load_sp0(task_top_of_stack(task)); | 
|---|
| 105 | 82 |  #endif | 
|---|
 | 83 | +}  | 
|---|
| 106 | 84 |   | 
|---|
 | 85 | +static inline void kthread_frame_init(struct inactive_task_frame *frame,  | 
|---|
 | 86 | +				      unsigned long fun, unsigned long arg)  | 
|---|
 | 87 | +{  | 
|---|
 | 88 | +	frame->bx = fun;  | 
|---|
 | 89 | +#ifdef CONFIG_X86_32  | 
|---|
 | 90 | +	frame->di = arg;  | 
|---|
 | 91 | +#else  | 
|---|
 | 92 | +	frame->r12 = arg;  | 
|---|
 | 93 | +#endif  | 
|---|
| 107 | 94 |  } | 
|---|
| 108 | 95 |   | 
|---|
| 109 | 96 |  #endif /* _ASM_X86_SWITCH_TO_H */ | 
|---|