| .. | .. |
|---|
| 26 | 26 | * PREEMPT_MASK: 0x000000ff |
|---|
| 27 | 27 | * SOFTIRQ_MASK: 0x0000ff00 |
|---|
| 28 | 28 | * HARDIRQ_MASK: 0x000f0000 |
|---|
| 29 | | - * NMI_MASK: 0x00100000 |
|---|
| 29 | + * NMI_MASK: 0x00f00000 |
|---|
| 30 | 30 | * PREEMPT_NEED_RESCHED: 0x80000000 |
|---|
| 31 | 31 | */ |
|---|
| 32 | 32 | #define PREEMPT_BITS 8 |
|---|
| 33 | 33 | #define SOFTIRQ_BITS 8 |
|---|
| 34 | 34 | #define HARDIRQ_BITS 4 |
|---|
| 35 | | -#define NMI_BITS 1 |
|---|
| 35 | +#define NMI_BITS 4 |
|---|
| 36 | 36 | |
|---|
| 37 | 37 | #define PREEMPT_SHIFT 0 |
|---|
| 38 | 38 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) |
|---|
| .. | .. |
|---|
| 51 | 51 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) |
|---|
| 52 | 52 | #define NMI_OFFSET (1UL << NMI_SHIFT) |
|---|
| 53 | 53 | |
|---|
| 54 | | -#ifndef CONFIG_PREEMPT_RT_FULL |
|---|
| 55 | | -# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) |
|---|
| 56 | | -#else |
|---|
| 57 | | -# define SOFTIRQ_DISABLE_OFFSET (0) |
|---|
| 58 | | -#endif |
|---|
| 59 | | - |
|---|
| 60 | | -/* We use the MSB mostly because its available */ |
|---|
| 61 | | -#define PREEMPT_NEED_RESCHED 0x80000000 |
|---|
| 54 | +#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) |
|---|
| 62 | 55 | |
|---|
| 63 | 56 | #define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) |
|---|
| 64 | 57 | |
|---|
| .. | .. |
|---|
| 85 | 78 | #include <asm/preempt.h> |
|---|
| 86 | 79 | |
|---|
| 87 | 80 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
|---|
| 81 | +#define softirq_count() (preempt_count() & SOFTIRQ_MASK) |
|---|
| 88 | 82 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ |
|---|
| 89 | 83 | | NMI_MASK)) |
|---|
| 90 | | -#ifndef CONFIG_PREEMPT_RT_FULL |
|---|
| 91 | | -# define softirq_count() (preempt_count() & SOFTIRQ_MASK) |
|---|
| 92 | | -# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) |
|---|
| 93 | | -#else |
|---|
| 94 | | -# define softirq_count() ((unsigned long)current->softirq_nestcnt) |
|---|
| 95 | | -extern int in_serving_softirq(void); |
|---|
| 96 | | -#endif |
|---|
| 97 | 84 | |
|---|
| 98 | 85 | /* |
|---|
| 99 | 86 | * Are we doing bottom half or hardware interrupt processing? |
|---|
| .. | .. |
|---|
| 111 | 98 | #define in_irq() (hardirq_count()) |
|---|
| 112 | 99 | #define in_softirq() (softirq_count()) |
|---|
| 113 | 100 | #define in_interrupt() (irq_count()) |
|---|
| 101 | +#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) |
|---|
| 114 | 102 | #define in_nmi() (preempt_count() & NMI_MASK) |
|---|
| 115 | 103 | #define in_task() (!(preempt_count() & \ |
|---|
| 116 | 104 | (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) |
|---|
| .. | .. |
|---|
| 127 | 115 | /* |
|---|
| 128 | 116 | * The preempt_count offset after spin_lock() |
|---|
| 129 | 117 | */ |
|---|
| 130 | | -#if !defined(CONFIG_PREEMPT_RT_FULL) |
|---|
| 131 | 118 | #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET |
|---|
| 132 | | -#else |
|---|
| 133 | | -#define PREEMPT_LOCK_OFFSET 0 |
|---|
| 134 | | -#endif |
|---|
| 135 | 119 | |
|---|
| 136 | 120 | /* |
|---|
| 137 | 121 | * The preempt_count offset needed for things like: |
|---|
| .. | .. |
|---|
| 180 | 164 | #define preempt_count_inc() preempt_count_add(1) |
|---|
| 181 | 165 | #define preempt_count_dec() preempt_count_sub(1) |
|---|
| 182 | 166 | |
|---|
| 183 | | -#ifdef CONFIG_PREEMPT_LAZY |
|---|
| 184 | | -#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) |
|---|
| 185 | | -#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) |
|---|
| 186 | | -#define inc_preempt_lazy_count() add_preempt_lazy_count(1) |
|---|
| 187 | | -#define dec_preempt_lazy_count() sub_preempt_lazy_count(1) |
|---|
| 188 | | -#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) |
|---|
| 189 | | -#else |
|---|
| 190 | | -#define add_preempt_lazy_count(val) do { } while (0) |
|---|
| 191 | | -#define sub_preempt_lazy_count(val) do { } while (0) |
|---|
| 192 | | -#define inc_preempt_lazy_count() do { } while (0) |
|---|
| 193 | | -#define dec_preempt_lazy_count() do { } while (0) |
|---|
| 194 | | -#define preempt_lazy_count() (0) |
|---|
| 195 | | -#endif |
|---|
| 196 | | - |
|---|
| 197 | 167 | #ifdef CONFIG_PREEMPT_COUNT |
|---|
| 198 | 168 | |
|---|
| 199 | 169 | #define preempt_disable() \ |
|---|
| 200 | 170 | do { \ |
|---|
| 201 | 171 | preempt_count_inc(); \ |
|---|
| 202 | | - barrier(); \ |
|---|
| 203 | | -} while (0) |
|---|
| 204 | | - |
|---|
| 205 | | -#define preempt_lazy_disable() \ |
|---|
| 206 | | -do { \ |
|---|
| 207 | | - inc_preempt_lazy_count(); \ |
|---|
| 208 | 172 | barrier(); \ |
|---|
| 209 | 173 | } while (0) |
|---|
| 210 | 174 | |
|---|
| .. | .. |
|---|
| 214 | 178 | preempt_count_dec(); \ |
|---|
| 215 | 179 | } while (0) |
|---|
| 216 | 180 | |
|---|
| 217 | | -#ifdef CONFIG_PREEMPT_RT_BASE |
|---|
| 218 | | -# define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
|---|
| 219 | | -# define preempt_check_resched_rt() preempt_check_resched() |
|---|
| 220 | | -#else |
|---|
| 221 | | -# define preempt_enable_no_resched() preempt_enable() |
|---|
| 222 | | -# define preempt_check_resched_rt() barrier(); |
|---|
| 223 | | -#endif |
|---|
| 181 | +#define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
|---|
| 224 | 182 | |
|---|
| 225 | 183 | #define preemptible() (preempt_count() == 0 && !irqs_disabled()) |
|---|
| 226 | 184 | |
|---|
| 227 | | -#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) |
|---|
| 228 | | - |
|---|
| 229 | | -extern void migrate_disable(void); |
|---|
| 230 | | -extern void migrate_enable(void); |
|---|
| 231 | | - |
|---|
| 232 | | -int __migrate_disabled(struct task_struct *p); |
|---|
| 233 | | - |
|---|
| 234 | | -#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) |
|---|
| 235 | | - |
|---|
| 236 | | -extern void migrate_disable(void); |
|---|
| 237 | | -extern void migrate_enable(void); |
|---|
| 238 | | -static inline int __migrate_disabled(struct task_struct *p) |
|---|
| 239 | | -{ |
|---|
| 240 | | - return 0; |
|---|
| 241 | | -} |
|---|
| 242 | | - |
|---|
| 243 | | -#else |
|---|
| 244 | | -#define migrate_disable() preempt_disable() |
|---|
| 245 | | -#define migrate_enable() preempt_enable() |
|---|
| 246 | | -static inline int __migrate_disabled(struct task_struct *p) |
|---|
| 247 | | -{ |
|---|
| 248 | | - return 0; |
|---|
| 249 | | -} |
|---|
| 250 | | -#endif |
|---|
| 251 | | - |
|---|
| 252 | | -#ifdef CONFIG_PREEMPT |
|---|
| 185 | +#ifdef CONFIG_PREEMPTION |
|---|
| 253 | 186 | #define preempt_enable() \ |
|---|
| 254 | 187 | do { \ |
|---|
| 255 | 188 | barrier(); \ |
|---|
| .. | .. |
|---|
| 270 | 203 | __preempt_schedule(); \ |
|---|
| 271 | 204 | } while (0) |
|---|
| 272 | 205 | |
|---|
| 273 | | -#define preempt_lazy_enable() \ |
|---|
| 274 | | -do { \ |
|---|
| 275 | | - dec_preempt_lazy_count(); \ |
|---|
| 276 | | - barrier(); \ |
|---|
| 277 | | - preempt_check_resched(); \ |
|---|
| 278 | | -} while (0) |
|---|
| 279 | | - |
|---|
| 280 | | -#else /* !CONFIG_PREEMPT */ |
|---|
| 206 | +#else /* !CONFIG_PREEMPTION */ |
|---|
| 281 | 207 | #define preempt_enable() \ |
|---|
| 282 | 208 | do { \ |
|---|
| 283 | 209 | barrier(); \ |
|---|
| 284 | 210 | preempt_count_dec(); \ |
|---|
| 285 | | -} while (0) |
|---|
| 286 | | - |
|---|
| 287 | | -#define preempt_lazy_enable() \ |
|---|
| 288 | | -do { \ |
|---|
| 289 | | - dec_preempt_lazy_count(); \ |
|---|
| 290 | | - barrier(); \ |
|---|
| 291 | 211 | } while (0) |
|---|
| 292 | 212 | |
|---|
| 293 | 213 | #define preempt_enable_notrace() \ |
|---|
| .. | .. |
|---|
| 297 | 217 | } while (0) |
|---|
| 298 | 218 | |
|---|
| 299 | 219 | #define preempt_check_resched() do { } while (0) |
|---|
| 300 | | -#endif /* CONFIG_PREEMPT */ |
|---|
| 220 | +#endif /* CONFIG_PREEMPTION */ |
|---|
| 301 | 221 | |
|---|
| 302 | 222 | #define preempt_disable_notrace() \ |
|---|
| 303 | 223 | do { \ |
|---|
| .. | .. |
|---|
| 328 | 248 | #define preempt_disable_notrace() barrier() |
|---|
| 329 | 249 | #define preempt_enable_no_resched_notrace() barrier() |
|---|
| 330 | 250 | #define preempt_enable_notrace() barrier() |
|---|
| 331 | | -#define preempt_check_resched_rt() barrier() |
|---|
| 332 | 251 | #define preemptible() 0 |
|---|
| 333 | 252 | |
|---|
| 334 | | -#define migrate_disable() barrier() |
|---|
| 335 | | -#define migrate_enable() barrier() |
|---|
| 336 | | - |
|---|
| 337 | | -static inline int __migrate_disabled(struct task_struct *p) |
|---|
| 338 | | -{ |
|---|
| 339 | | - return 0; |
|---|
| 340 | | -} |
|---|
| 341 | 253 | #endif /* CONFIG_PREEMPT_COUNT */ |
|---|
| 342 | 254 | |
|---|
| 343 | 255 | #ifdef MODULE |
|---|
| .. | .. |
|---|
| 356 | 268 | } while (0) |
|---|
| 357 | 269 | #define preempt_fold_need_resched() \ |
|---|
| 358 | 270 | do { \ |
|---|
| 359 | | - if (tif_need_resched_now()) \ |
|---|
| 271 | + if (tif_need_resched()) \ |
|---|
| 360 | 272 | set_preempt_need_resched(); \ |
|---|
| 361 | 273 | } while (0) |
|---|
| 362 | | - |
|---|
| 363 | | -#ifdef CONFIG_PREEMPT_RT_FULL |
|---|
| 364 | | -# define preempt_disable_rt() preempt_disable() |
|---|
| 365 | | -# define preempt_enable_rt() preempt_enable() |
|---|
| 366 | | -# define preempt_disable_nort() barrier() |
|---|
| 367 | | -# define preempt_enable_nort() barrier() |
|---|
| 368 | | -#else |
|---|
| 369 | | -# define preempt_disable_rt() barrier() |
|---|
| 370 | | -# define preempt_enable_rt() barrier() |
|---|
| 371 | | -# define preempt_disable_nort() preempt_disable() |
|---|
| 372 | | -# define preempt_enable_nort() preempt_enable() |
|---|
| 373 | | -#endif |
|---|
| 374 | 274 | |
|---|
| 375 | 275 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
|---|
| 376 | 276 | |
|---|
| .. | .. |
|---|
| 422 | 322 | |
|---|
| 423 | 323 | #endif |
|---|
| 424 | 324 | |
|---|
| 325 | +/** |
|---|
| 326 | + * migrate_disable - Prevent migration of the current task |
|---|
| 327 | + * |
|---|
| 328 | + * Maps to preempt_disable() which also disables preemption. Use |
|---|
| 329 | + * migrate_disable() to annotate that the intent is to prevent migration, |
|---|
| 330 | + * but not necessarily preemption. |
|---|
| 331 | + * |
|---|
| 332 | + * Can be invoked nested like preempt_disable() and needs the corresponding |
|---|
| 333 | + * number of migrate_enable() invocations. |
|---|
| 334 | + */ |
|---|
| 335 | +static __always_inline void migrate_disable(void) |
|---|
| 336 | +{ |
|---|
| 337 | + preempt_disable(); |
|---|
| 338 | +} |
|---|
| 339 | + |
|---|
| 340 | +/** |
|---|
| 341 | + * migrate_enable - Allow migration of the current task |
|---|
| 342 | + * |
|---|
| 343 | + * Counterpart to migrate_disable(). |
|---|
| 344 | + * |
|---|
| 345 | + * As migrate_disable() can be invoked nested, only the outermost invocation |
|---|
| 346 | + * reenables migration. |
|---|
| 347 | + * |
|---|
| 348 | + * Currently mapped to preempt_enable(). |
|---|
| 349 | + */ |
|---|
| 350 | +static __always_inline void migrate_enable(void) |
|---|
| 351 | +{ |
|---|
| 352 | + preempt_enable(); |
|---|
| 353 | +} |
|---|
| 354 | + |
|---|
| 425 | 355 | #endif /* __LINUX_PREEMPT_H */ |
|---|