.. | .. |
---|
8 | 8 | |
---|
9 | 9 | DECLARE_PER_CPU(int, __preempt_count); |
---|
10 | 10 | |
---|
| 11 | +/* We use the MSB mostly because its available */ |
---|
| 12 | +#define PREEMPT_NEED_RESCHED 0x80000000 |
---|
| 13 | + |
---|
11 | 14 | /* |
---|
12 | 15 | * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such |
---|
13 | 16 | * that a decrement hitting 0 means we can and should reschedule. |
---|
.. | .. |
---|
40 | 43 | #define init_task_preempt_count(p) do { } while (0) |
---|
41 | 44 | |
---|
42 | 45 | #define init_idle_preempt_count(p, cpu) do { \ |
---|
43 | | - per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \ |
---|
| 46 | + per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \ |
---|
44 | 47 | } while (0) |
---|
45 | 48 | |
---|
46 | 49 | /* |
---|
.. | .. |
---|
86 | 89 | * a decrement which hits zero means we have no preempt_count and should |
---|
87 | 90 | * reschedule. |
---|
88 | 91 | */ |
---|
| 92 | +static __always_inline bool ____preempt_count_dec_and_test(void) |
---|
| 93 | +{ |
---|
| 94 | + return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var])); |
---|
| 95 | +} |
---|
| 96 | + |
---|
89 | 97 | static __always_inline bool __preempt_count_dec_and_test(void) |
---|
90 | 98 | { |
---|
91 | | - GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e); |
---|
| 99 | + if (____preempt_count_dec_and_test()) |
---|
| 100 | + return true; |
---|
| 101 | +#ifdef CONFIG_PREEMPT_LAZY |
---|
| 102 | + if (preempt_count()) |
---|
| 103 | + return false; |
---|
| 104 | + if (current_thread_info()->preempt_lazy_count) |
---|
| 105 | + return false; |
---|
| 106 | + return test_thread_flag(TIF_NEED_RESCHED_LAZY); |
---|
| 107 | +#else |
---|
| 108 | + return false; |
---|
| 109 | +#endif |
---|
92 | 110 | } |
---|
93 | 111 | |
---|
94 | 112 | /* |
---|
.. | .. |
---|
96 | 114 | */ |
---|
97 | 115 | static __always_inline bool should_resched(int preempt_offset) |
---|
98 | 116 | { |
---|
| 117 | +#ifdef CONFIG_PREEMPT_LAZY |
---|
| 118 | + u32 tmp; |
---|
| 119 | + tmp = raw_cpu_read_4(__preempt_count); |
---|
| 120 | + if (tmp == preempt_offset) |
---|
| 121 | + return true; |
---|
| 122 | + |
---|
| 123 | + /* preempt count == 0 ? */ |
---|
| 124 | + tmp &= ~PREEMPT_NEED_RESCHED; |
---|
| 125 | + if (tmp != preempt_offset) |
---|
| 126 | + return false; |
---|
| 127 | + /* XXX PREEMPT_LOCK_OFFSET */ |
---|
| 128 | + if (current_thread_info()->preempt_lazy_count) |
---|
| 129 | + return false; |
---|
| 130 | + return test_thread_flag(TIF_NEED_RESCHED_LAZY); |
---|
| 131 | +#else |
---|
99 | 132 | return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); |
---|
| 133 | +#endif |
---|
100 | 134 | } |
---|
101 | 135 | |
---|
102 | | -#ifdef CONFIG_PREEMPT |
---|
103 | | - extern asmlinkage void ___preempt_schedule(void); |
---|
| 136 | +#ifdef CONFIG_PREEMPTION |
---|
| 137 | +#ifdef CONFIG_PREEMPT_RT |
---|
| 138 | + extern void preempt_schedule_lock(void); |
---|
| 139 | +#endif |
---|
| 140 | + extern asmlinkage void preempt_schedule_thunk(void); |
---|
104 | 141 | # define __preempt_schedule() \ |
---|
105 | | - asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT) |
---|
| 142 | + asm volatile ("call preempt_schedule_thunk" : ASM_CALL_CONSTRAINT) |
---|
106 | 143 | |
---|
107 | 144 | extern asmlinkage void preempt_schedule(void); |
---|
108 | | - extern asmlinkage void ___preempt_schedule_notrace(void); |
---|
| 145 | + extern asmlinkage void preempt_schedule_notrace_thunk(void); |
---|
109 | 146 | # define __preempt_schedule_notrace() \ |
---|
110 | | - asm volatile ("call ___preempt_schedule_notrace" : ASM_CALL_CONSTRAINT) |
---|
| 147 | + asm volatile ("call preempt_schedule_notrace_thunk" : ASM_CALL_CONSTRAINT) |
---|
111 | 148 | |
---|
112 | 149 | extern asmlinkage void preempt_schedule_notrace(void); |
---|
113 | 150 | #endif |
---|