.. | .. |
---|
89 | 89 | * a decrement which hits zero means we have no preempt_count and should |
---|
90 | 90 | * reschedule. |
---|
91 | 91 | */ |
---|
92 | | -static __always_inline bool ____preempt_count_dec_and_test(void) |
---|
93 | | -{ |
---|
94 | | - return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var])); |
---|
95 | | -} |
---|
96 | | - |
---|
97 | 92 | static __always_inline bool __preempt_count_dec_and_test(void) |
---|
98 | 93 | { |
---|
99 | | - if (____preempt_count_dec_and_test()) |
---|
100 | | - return true; |
---|
101 | | -#ifdef CONFIG_PREEMPT_LAZY |
---|
102 | | - if (preempt_count()) |
---|
103 | | - return false; |
---|
104 | | - if (current_thread_info()->preempt_lazy_count) |
---|
105 | | - return false; |
---|
106 | | - return test_thread_flag(TIF_NEED_RESCHED_LAZY); |
---|
107 | | -#else |
---|
108 | | - return false; |
---|
109 | | -#endif |
---|
| 94 | + return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var])); |
---|
110 | 95 | } |
---|
111 | 96 | |
---|
112 | 97 | /* |
---|
.. | .. |
---|
114 | 99 | */ |
---|
115 | 100 | static __always_inline bool should_resched(int preempt_offset) |
---|
116 | 101 | { |
---|
117 | | -#ifdef CONFIG_PREEMPT_LAZY |
---|
118 | | - u32 tmp; |
---|
119 | | - tmp = raw_cpu_read_4(__preempt_count); |
---|
120 | | - if (tmp == preempt_offset) |
---|
121 | | - return true; |
---|
122 | | - |
---|
123 | | - /* preempt count == 0 ? */ |
---|
124 | | - tmp &= ~PREEMPT_NEED_RESCHED; |
---|
125 | | - if (tmp != preempt_offset) |
---|
126 | | - return false; |
---|
127 | | - /* XXX PREEMPT_LOCK_OFFSET */ |
---|
128 | | - if (current_thread_info()->preempt_lazy_count) |
---|
129 | | - return false; |
---|
130 | | - return test_thread_flag(TIF_NEED_RESCHED_LAZY); |
---|
131 | | -#else |
---|
132 | 102 | return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); |
---|
133 | | -#endif |
---|
134 | 103 | } |
---|
135 | 104 | |
---|
136 | 105 | #ifdef CONFIG_PREEMPTION |
---|
137 | | -#ifdef CONFIG_PREEMPT_RT |
---|
138 | | - extern void preempt_schedule_lock(void); |
---|
139 | | -#endif |
---|
140 | 106 | extern asmlinkage void preempt_schedule_thunk(void); |
---|
141 | 107 | # define __preempt_schedule() \ |
---|
142 | 108 | asm volatile ("call preempt_schedule_thunk" : ASM_CALL_CONSTRAINT) |
---|