.. | .. |
---|
14 | 14 | #include <linux/sched.h> |
---|
15 | 15 | #include <linux/mm_types.h> |
---|
16 | 16 | #include <linux/preempt.h> |
---|
| 17 | +#include <linux/irq_pipeline.h> |
---|
17 | 18 | |
---|
18 | 19 | #include <asm/cacheflush.h> |
---|
19 | 20 | #include <asm/cachetype.h> |
---|
.. | .. |
---|
72 | 73 | static inline void finish_arch_post_lock_switch(void) |
---|
73 | 74 | { |
---|
74 | 75 | struct mm_struct *mm = current->mm; |
---|
| 76 | + unsigned long flags; |
---|
75 | 77 | |
---|
76 | 78 | if (mm && mm->context.switch_pending) { |
---|
77 | 79 | /* |
---|
.. | .. |
---|
83 | 85 | preempt_disable(); |
---|
84 | 86 | if (mm->context.switch_pending) { |
---|
85 | 87 | mm->context.switch_pending = 0; |
---|
| 88 | + protect_inband_mm(flags); |
---|
86 | 89 | cpu_switch_mm(mm->pgd, mm); |
---|
| 90 | + unprotect_inband_mm(flags); |
---|
87 | 91 | } |
---|
88 | 92 | preempt_enable_no_resched(); |
---|
89 | 93 | } |
---|
.. | .. |
---|
102 | 106 | #endif /* CONFIG_CPU_HAS_ASID */ |
---|
103 | 107 | |
---|
104 | 108 | #define destroy_context(mm) do { } while(0) |
---|
105 | | -#define activate_mm(prev,next) switch_mm(prev, next, NULL) |
---|
| 109 | +#define activate_mm(prev,next) __switch_mm(prev, next, NULL) |
---|
106 | 110 | |
---|
107 | 111 | /* |
---|
108 | 112 | * This is called when "tsk" is about to enter lazy TLB mode. |
---|
.. | .. |
---|
118 | 122 | { |
---|
119 | 123 | } |
---|
120 | 124 | |
---|
121 | | -/* |
---|
122 | | - * This is the actual mm switch as far as the scheduler |
---|
123 | | - * is concerned. No registers are touched. We avoid |
---|
124 | | - * calling the CPU specific function when the mm hasn't |
---|
125 | | - * actually changed. |
---|
126 | | - */ |
---|
127 | 125 | static inline void |
---|
128 | | -switch_mm(struct mm_struct *prev, struct mm_struct *next, |
---|
129 | | - struct task_struct *tsk) |
---|
| 126 | +__switch_mm(struct mm_struct *prev, struct mm_struct *next, |
---|
| 127 | + struct task_struct *tsk) |
---|
130 | 128 | { |
---|
131 | 129 | #ifdef CONFIG_MMU |
---|
132 | 130 | unsigned int cpu = smp_processor_id(); |
---|
.. | .. |
---|
149 | 147 | #endif |
---|
150 | 148 | } |
---|
151 | 149 | |
---|
| 150 | +/* |
---|
| 151 | + * This is the actual mm switch as far as the scheduler |
---|
| 152 | + * is concerned. No registers are touched. We avoid |
---|
| 153 | + * calling the CPU specific function when the mm hasn't |
---|
| 154 | + * actually changed. |
---|
| 155 | + */ |
---|
| 156 | +static inline void |
---|
| 157 | +switch_mm(struct mm_struct *prev, struct mm_struct *next, |
---|
| 158 | + struct task_struct *tsk) |
---|
| 159 | +{ |
---|
| 160 | + unsigned long flags; |
---|
| 161 | + |
---|
| 162 | + protect_inband_mm(flags); |
---|
| 163 | + __switch_mm(prev, next, tsk); |
---|
| 164 | + unprotect_inband_mm(flags); |
---|
| 165 | +} |
---|
| 166 | + |
---|
152 | 167 | #define deactivate_mm(tsk,mm) do { } while (0) |
---|
153 | 168 | |
---|
| 169 | +static inline void |
---|
| 170 | +switch_oob_mm(struct mm_struct *prev, struct mm_struct *next, |
---|
| 171 | + struct task_struct *tsk) |
---|
| 172 | +{ |
---|
| 173 | + __switch_mm(prev, next, tsk); |
---|
| 174 | +} |
---|
| 175 | + |
---|
154 | 176 | #endif |
---|