| .. | .. |
|---|
| 26 | 26 | * PREEMPT_MASK: 0x000000ff |
|---|
| 27 | 27 | * SOFTIRQ_MASK: 0x0000ff00 |
|---|
| 28 | 28 | * HARDIRQ_MASK: 0x000f0000 |
|---|
| 29 | | - * NMI_MASK: 0x00100000 |
|---|
| 29 | + * NMI_MASK: 0x00f00000 |
|---|
| 30 | 30 | * PREEMPT_NEED_RESCHED: 0x80000000 |
|---|
| 31 | 31 | */ |
|---|
| 32 | 32 | #define PREEMPT_BITS 8 |
|---|
| 33 | 33 | #define SOFTIRQ_BITS 8 |
|---|
| 34 | 34 | #define HARDIRQ_BITS 4 |
|---|
| 35 | | -#define NMI_BITS 1 |
|---|
| 35 | +#define NMI_BITS 4 |
|---|
| 36 | 36 | |
|---|
| 37 | 37 | #define PREEMPT_SHIFT 0 |
|---|
| 38 | 38 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) |
|---|
| .. | .. |
|---|
| 52 | 52 | #define NMI_OFFSET (1UL << NMI_SHIFT) |
|---|
| 53 | 53 | |
|---|
| 54 | 54 | #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) |
|---|
| 55 | | - |
|---|
| 56 | | -/* We use the MSB mostly because its available */ |
|---|
| 57 | | -#define PREEMPT_NEED_RESCHED 0x80000000 |
|---|
| 58 | 55 | |
|---|
| 59 | 56 | #define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) |
|---|
| 60 | 57 | |
|---|
| .. | .. |
|---|
| 185 | 182 | |
|---|
| 186 | 183 | #define preemptible() (preempt_count() == 0 && !irqs_disabled()) |
|---|
| 187 | 184 | |
|---|
| 188 | | -#ifdef CONFIG_PREEMPT |
|---|
| 185 | +#ifdef CONFIG_PREEMPTION |
|---|
| 189 | 186 | #define preempt_enable() \ |
|---|
| 190 | 187 | do { \ |
|---|
| 191 | 188 | barrier(); \ |
|---|
| .. | .. |
|---|
| 206 | 203 | __preempt_schedule(); \ |
|---|
| 207 | 204 | } while (0) |
|---|
| 208 | 205 | |
|---|
| 209 | | -#else /* !CONFIG_PREEMPT */ |
|---|
| 206 | +#else /* !CONFIG_PREEMPTION */ |
|---|
| 210 | 207 | #define preempt_enable() \ |
|---|
| 211 | 208 | do { \ |
|---|
| 212 | 209 | barrier(); \ |
|---|
| .. | .. |
|---|
| 220 | 217 | } while (0) |
|---|
| 221 | 218 | |
|---|
| 222 | 219 | #define preempt_check_resched() do { } while (0) |
|---|
| 223 | | -#endif /* CONFIG_PREEMPT */ |
|---|
| 220 | +#endif /* CONFIG_PREEMPTION */ |
|---|
| 224 | 221 | |
|---|
| 225 | 222 | #define preempt_disable_notrace() \ |
|---|
| 226 | 223 | do { \ |
|---|
| .. | .. |
|---|
| 325 | 322 | |
|---|
| 326 | 323 | #endif |
|---|
| 327 | 324 | |
|---|
| 325 | +/** |
|---|
| 326 | + * migrate_disable - Prevent migration of the current task |
|---|
| 327 | + * |
|---|
| 328 | + * Maps to preempt_disable() which also disables preemption. Use |
|---|
| 329 | + * migrate_disable() to annotate that the intent is to prevent migration, |
|---|
| 330 | + * but not necessarily preemption. |
|---|
| 331 | + * |
|---|
| 332 | + * Can be invoked nested like preempt_disable() and needs the corresponding |
|---|
| 333 | + * number of migrate_enable() invocations. |
|---|
| 334 | + */ |
|---|
| 335 | +static __always_inline void migrate_disable(void) |
|---|
| 336 | +{ |
|---|
| 337 | + preempt_disable(); |
|---|
| 338 | +} |
|---|
| 339 | + |
|---|
| 340 | +/** |
|---|
| 341 | + * migrate_enable - Allow migration of the current task |
|---|
| 342 | + * |
|---|
| 343 | + * Counterpart to migrate_disable(). |
|---|
| 344 | + * |
|---|
| 345 | + * As migrate_disable() can be invoked nested, only the outermost invocation |
|---|
| 346 | + * reenables migration. |
|---|
| 347 | + * |
|---|
| 348 | + * Currently mapped to preempt_enable(). |
|---|
| 349 | + */ |
|---|
| 350 | +static __always_inline void migrate_enable(void) |
|---|
| 351 | +{ |
|---|
| 352 | + preempt_enable(); |
|---|
| 353 | +} |
|---|
| 354 | + |
|---|
| 328 | 355 | #endif /* __LINUX_PREEMPT_H */ |
|---|