.. | .. |
---|
51 | 51 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) |
---|
52 | 52 | #define NMI_OFFSET (1UL << NMI_SHIFT) |
---|
53 | 53 | |
---|
54 | | -#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) |
---|
| 54 | +#ifndef CONFIG_PREEMPT_RT_FULL |
---|
| 55 | +# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) |
---|
| 56 | +#else |
---|
| 57 | +# define SOFTIRQ_DISABLE_OFFSET (0) |
---|
| 58 | +#endif |
---|
55 | 59 | |
---|
56 | 60 | /* We use the MSB mostly because its available */ |
---|
57 | 61 | #define PREEMPT_NEED_RESCHED 0x80000000 |
---|
.. | .. |
---|
81 | 85 | #include <asm/preempt.h> |
---|
82 | 86 | |
---|
83 | 87 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
---|
84 | | -#define softirq_count() (preempt_count() & SOFTIRQ_MASK) |
---|
85 | 88 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ |
---|
86 | 89 | | NMI_MASK)) |
---|
| 90 | +#ifndef CONFIG_PREEMPT_RT_FULL |
---|
| 91 | +# define softirq_count() (preempt_count() & SOFTIRQ_MASK) |
---|
| 92 | +# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) |
---|
| 93 | +#else |
---|
| 94 | +# define softirq_count() ((unsigned long)current->softirq_nestcnt) |
---|
| 95 | +extern int in_serving_softirq(void); |
---|
| 96 | +#endif |
---|
87 | 97 | |
---|
88 | 98 | /* |
---|
89 | 99 | * Are we doing bottom half or hardware interrupt processing? |
---|
.. | .. |
---|
101 | 111 | #define in_irq() (hardirq_count()) |
---|
102 | 112 | #define in_softirq() (softirq_count()) |
---|
103 | 113 | #define in_interrupt() (irq_count()) |
---|
104 | | -#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) |
---|
105 | 114 | #define in_nmi() (preempt_count() & NMI_MASK) |
---|
106 | 115 | #define in_task() (!(preempt_count() & \ |
---|
107 | 116 | (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) |
---|
.. | .. |
---|
118 | 127 | /* |
---|
119 | 128 | * The preempt_count offset after spin_lock() |
---|
120 | 129 | */ |
---|
| 130 | +#if !defined(CONFIG_PREEMPT_RT_FULL) |
---|
121 | 131 | #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET |
---|
| 132 | +#else |
---|
| 133 | +#define PREEMPT_LOCK_OFFSET 0 |
---|
| 134 | +#endif |
---|
122 | 135 | |
---|
123 | 136 | /* |
---|
124 | 137 | * The preempt_count offset needed for things like: |
---|
.. | .. |
---|
167 | 180 | #define preempt_count_inc() preempt_count_add(1) |
---|
168 | 181 | #define preempt_count_dec() preempt_count_sub(1) |
---|
169 | 182 | |
---|
| 183 | +#ifdef CONFIG_PREEMPT_LAZY |
---|
| 184 | +#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) |
---|
| 185 | +#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) |
---|
| 186 | +#define inc_preempt_lazy_count() add_preempt_lazy_count(1) |
---|
| 187 | +#define dec_preempt_lazy_count() sub_preempt_lazy_count(1) |
---|
| 188 | +#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) |
---|
| 189 | +#else |
---|
| 190 | +#define add_preempt_lazy_count(val) do { } while (0) |
---|
| 191 | +#define sub_preempt_lazy_count(val) do { } while (0) |
---|
| 192 | +#define inc_preempt_lazy_count() do { } while (0) |
---|
| 193 | +#define dec_preempt_lazy_count() do { } while (0) |
---|
| 194 | +#define preempt_lazy_count() (0) |
---|
| 195 | +#endif |
---|
| 196 | + |
---|
170 | 197 | #ifdef CONFIG_PREEMPT_COUNT |
---|
171 | 198 | |
---|
172 | 199 | #define preempt_disable() \ |
---|
173 | 200 | do { \ |
---|
174 | 201 | preempt_count_inc(); \ |
---|
| 202 | + barrier(); \ |
---|
| 203 | +} while (0) |
---|
| 204 | + |
---|
| 205 | +#define preempt_lazy_disable() \ |
---|
| 206 | +do { \ |
---|
| 207 | + inc_preempt_lazy_count(); \ |
---|
175 | 208 | barrier(); \ |
---|
176 | 209 | } while (0) |
---|
177 | 210 | |
---|
.. | .. |
---|
181 | 214 | preempt_count_dec(); \ |
---|
182 | 215 | } while (0) |
---|
183 | 216 | |
---|
184 | | -#define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
---|
| 217 | +#ifdef CONFIG_PREEMPT_RT_BASE |
---|
| 218 | +# define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
---|
| 219 | +# define preempt_check_resched_rt() preempt_check_resched() |
---|
| 220 | +#else |
---|
| 221 | +# define preempt_enable_no_resched() preempt_enable() |
---|
| 222 | +# define preempt_check_resched_rt() barrier(); |
---|
| 223 | +#endif |
---|
185 | 224 | |
---|
186 | 225 | #define preemptible() (preempt_count() == 0 && !irqs_disabled()) |
---|
| 226 | + |
---|
| 227 | +#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) |
---|
| 228 | + |
---|
| 229 | +extern void migrate_disable(void); |
---|
| 230 | +extern void migrate_enable(void); |
---|
| 231 | + |
---|
| 232 | +int __migrate_disabled(struct task_struct *p); |
---|
| 233 | + |
---|
| 234 | +#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) |
---|
| 235 | + |
---|
| 236 | +extern void migrate_disable(void); |
---|
| 237 | +extern void migrate_enable(void); |
---|
| 238 | +static inline int __migrate_disabled(struct task_struct *p) |
---|
| 239 | +{ |
---|
| 240 | + return 0; |
---|
| 241 | +} |
---|
| 242 | + |
---|
| 243 | +#else |
---|
| 244 | +#define migrate_disable() preempt_disable() |
---|
| 245 | +#define migrate_enable() preempt_enable() |
---|
| 246 | +static inline int __migrate_disabled(struct task_struct *p) |
---|
| 247 | +{ |
---|
| 248 | + return 0; |
---|
| 249 | +} |
---|
| 250 | +#endif |
---|
187 | 251 | |
---|
188 | 252 | #ifdef CONFIG_PREEMPT |
---|
189 | 253 | #define preempt_enable() \ |
---|
.. | .. |
---|
206 | 270 | __preempt_schedule(); \ |
---|
207 | 271 | } while (0) |
---|
208 | 272 | |
---|
| 273 | +#define preempt_lazy_enable() \ |
---|
| 274 | +do { \ |
---|
| 275 | + dec_preempt_lazy_count(); \ |
---|
| 276 | + barrier(); \ |
---|
| 277 | + preempt_check_resched(); \ |
---|
| 278 | +} while (0) |
---|
| 279 | + |
---|
209 | 280 | #else /* !CONFIG_PREEMPT */ |
---|
210 | 281 | #define preempt_enable() \ |
---|
211 | 282 | do { \ |
---|
212 | 283 | barrier(); \ |
---|
213 | 284 | preempt_count_dec(); \ |
---|
| 285 | +} while (0) |
---|
| 286 | + |
---|
| 287 | +#define preempt_lazy_enable() \ |
---|
| 288 | +do { \ |
---|
| 289 | + dec_preempt_lazy_count(); \ |
---|
| 290 | + barrier(); \ |
---|
214 | 291 | } while (0) |
---|
215 | 292 | |
---|
216 | 293 | #define preempt_enable_notrace() \ |
---|
.. | .. |
---|
251 | 328 | #define preempt_disable_notrace() barrier() |
---|
252 | 329 | #define preempt_enable_no_resched_notrace() barrier() |
---|
253 | 330 | #define preempt_enable_notrace() barrier() |
---|
| 331 | +#define preempt_check_resched_rt() barrier() |
---|
254 | 332 | #define preemptible() 0 |
---|
255 | 333 | |
---|
| 334 | +#define migrate_disable() barrier() |
---|
| 335 | +#define migrate_enable() barrier() |
---|
| 336 | + |
---|
| 337 | +static inline int __migrate_disabled(struct task_struct *p) |
---|
| 338 | +{ |
---|
| 339 | + return 0; |
---|
| 340 | +} |
---|
256 | 341 | #endif /* CONFIG_PREEMPT_COUNT */ |
---|
257 | 342 | |
---|
258 | 343 | #ifdef MODULE |
---|
.. | .. |
---|
271 | 356 | } while (0) |
---|
272 | 357 | #define preempt_fold_need_resched() \ |
---|
273 | 358 | do { \ |
---|
274 | | - if (tif_need_resched()) \ |
---|
| 359 | + if (tif_need_resched_now()) \ |
---|
275 | 360 | set_preempt_need_resched(); \ |
---|
276 | 361 | } while (0) |
---|
277 | 362 | |
---|
| 363 | +#ifdef CONFIG_PREEMPT_RT_FULL |
---|
| 364 | +# define preempt_disable_rt() preempt_disable() |
---|
| 365 | +# define preempt_enable_rt() preempt_enable() |
---|
| 366 | +# define preempt_disable_nort() barrier() |
---|
| 367 | +# define preempt_enable_nort() barrier() |
---|
| 368 | +#else |
---|
| 369 | +# define preempt_disable_rt() barrier() |
---|
| 370 | +# define preempt_enable_rt() barrier() |
---|
| 371 | +# define preempt_disable_nort() preempt_disable() |
---|
| 372 | +# define preempt_enable_nort() preempt_enable() |
---|
| 373 | +#endif |
---|
| 374 | + |
---|
278 | 375 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
---|
279 | 376 | |
---|
280 | 377 | struct preempt_notifier; |
---|