| .. | .. |
|---|
| 8 | 8 | |
|---|
| 9 | 9 | DECLARE_PER_CPU(int, __preempt_count); |
|---|
| 10 | 10 | |
|---|
| 11 | +/* We use the MSB mostly because its available */ |
|---|
| 12 | +#define PREEMPT_NEED_RESCHED 0x80000000 |
|---|
| 13 | + |
|---|
| 11 | 14 | /* |
|---|
| 12 | 15 | * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such |
|---|
| 13 | 16 | * that a decrement hitting 0 means we can and should reschedule. |
|---|
| .. | .. |
|---|
| 40 | 43 | #define init_task_preempt_count(p) do { } while (0) |
|---|
| 41 | 44 | |
|---|
| 42 | 45 | #define init_idle_preempt_count(p, cpu) do { \ |
|---|
| 43 | | - per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \ |
|---|
| 46 | + per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \ |
|---|
| 44 | 47 | } while (0) |
|---|
| 45 | 48 | |
|---|
| 46 | 49 | /* |
|---|
| .. | .. |
|---|
| 86 | 89 | * a decrement which hits zero means we have no preempt_count and should |
|---|
| 87 | 90 | * reschedule. |
|---|
| 88 | 91 | */ |
|---|
| 89 | | -static __always_inline bool ____preempt_count_dec_and_test(void) |
|---|
| 90 | | -{ |
|---|
| 91 | | - GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e); |
|---|
| 92 | | -} |
|---|
| 93 | | - |
|---|
| 94 | 92 | static __always_inline bool __preempt_count_dec_and_test(void) |
|---|
| 95 | 93 | { |
|---|
| 96 | | - if (____preempt_count_dec_and_test()) |
|---|
| 97 | | - return true; |
|---|
| 98 | | -#ifdef CONFIG_PREEMPT_LAZY |
|---|
| 99 | | - if (preempt_count()) |
|---|
| 100 | | - return false; |
|---|
| 101 | | - if (current_thread_info()->preempt_lazy_count) |
|---|
| 102 | | - return false; |
|---|
| 103 | | - return test_thread_flag(TIF_NEED_RESCHED_LAZY); |
|---|
| 104 | | -#else |
|---|
| 105 | | - return false; |
|---|
| 106 | | -#endif |
|---|
| 94 | + return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var])); |
|---|
| 107 | 95 | } |
|---|
| 108 | 96 | |
|---|
| 109 | 97 | /* |
|---|
| .. | .. |
|---|
| 111 | 99 | */ |
|---|
| 112 | 100 | static __always_inline bool should_resched(int preempt_offset) |
|---|
| 113 | 101 | { |
|---|
| 114 | | -#ifdef CONFIG_PREEMPT_LAZY |
|---|
| 115 | | - u32 tmp; |
|---|
| 116 | | - |
|---|
| 117 | | - tmp = raw_cpu_read_4(__preempt_count); |
|---|
| 118 | | - if (tmp == preempt_offset) |
|---|
| 119 | | - return true; |
|---|
| 120 | | - |
|---|
| 121 | | - /* preempt count == 0 ? */ |
|---|
| 122 | | - tmp &= ~PREEMPT_NEED_RESCHED; |
|---|
| 123 | | - if (tmp != preempt_offset) |
|---|
| 124 | | - return false; |
|---|
| 125 | | - if (current_thread_info()->preempt_lazy_count) |
|---|
| 126 | | - return false; |
|---|
| 127 | | - return test_thread_flag(TIF_NEED_RESCHED_LAZY); |
|---|
| 128 | | -#else |
|---|
| 129 | 102 | return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); |
|---|
| 130 | | -#endif |
|---|
| 131 | 103 | } |
|---|
| 132 | 104 | |
|---|
| 133 | | -#ifdef CONFIG_PREEMPT |
|---|
| 134 | | - extern asmlinkage void ___preempt_schedule(void); |
|---|
| 105 | +#ifdef CONFIG_PREEMPTION |
|---|
| 106 | + extern asmlinkage void preempt_schedule_thunk(void); |
|---|
| 135 | 107 | # define __preempt_schedule() \ |
|---|
| 136 | | - asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT) |
|---|
| 108 | + asm volatile ("call preempt_schedule_thunk" : ASM_CALL_CONSTRAINT) |
|---|
| 137 | 109 | |
|---|
| 138 | 110 | extern asmlinkage void preempt_schedule(void); |
|---|
| 139 | | - extern asmlinkage void ___preempt_schedule_notrace(void); |
|---|
| 111 | + extern asmlinkage void preempt_schedule_notrace_thunk(void); |
|---|
| 140 | 112 | # define __preempt_schedule_notrace() \ |
|---|
| 141 | | - asm volatile ("call ___preempt_schedule_notrace" : ASM_CALL_CONSTRAINT) |
|---|
| 113 | + asm volatile ("call preempt_schedule_notrace_thunk" : ASM_CALL_CONSTRAINT) |
|---|
| 142 | 114 | |
|---|
| 143 | 115 | extern asmlinkage void preempt_schedule_notrace(void); |
|---|
| 144 | 116 | #endif |
|---|