| .. | .. |
|---|
| 86 | 86 | * a decrement which hits zero means we have no preempt_count and should |
|---|
| 87 | 87 | * reschedule. |
|---|
| 88 | 88 | */ |
|---|
| 89 | | -static __always_inline bool __preempt_count_dec_and_test(void) |
|---|
| 89 | +static __always_inline bool ____preempt_count_dec_and_test(void) |
|---|
| 90 | 90 | { |
|---|
| 91 | 91 | GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e); |
|---|
| 92 | +} |
|---|
| 93 | + |
|---|
| 94 | +static __always_inline bool __preempt_count_dec_and_test(void) |
|---|
| 95 | +{ |
|---|
| 96 | + if (____preempt_count_dec_and_test()) |
|---|
| 97 | + return true; |
|---|
| 98 | +#ifdef CONFIG_PREEMPT_LAZY |
|---|
| 99 | + if (preempt_count()) |
|---|
| 100 | + return false; |
|---|
| 101 | + if (current_thread_info()->preempt_lazy_count) |
|---|
| 102 | + return false; |
|---|
| 103 | + return test_thread_flag(TIF_NEED_RESCHED_LAZY); |
|---|
| 104 | +#else |
|---|
| 105 | + return false; |
|---|
| 106 | +#endif |
|---|
| 92 | 107 | } |
|---|
| 93 | 108 | |
|---|
| 94 | 109 | /* |
|---|
| .. | .. |
|---|
| 96 | 111 | */ |
|---|
| 97 | 112 | static __always_inline bool should_resched(int preempt_offset) |
|---|
| 98 | 113 | { |
|---|
| 114 | +#ifdef CONFIG_PREEMPT_LAZY |
|---|
| 115 | + u32 tmp; |
|---|
| 116 | + |
|---|
| 117 | + tmp = raw_cpu_read_4(__preempt_count); |
|---|
| 118 | + if (tmp == preempt_offset) |
|---|
| 119 | + return true; |
|---|
| 120 | + |
|---|
| 121 | + /* preempt count == 0 ? */ |
|---|
| 122 | + tmp &= ~PREEMPT_NEED_RESCHED; |
|---|
| 123 | + if (tmp != preempt_offset) |
|---|
| 124 | + return false; |
|---|
| 125 | + if (current_thread_info()->preempt_lazy_count) |
|---|
| 126 | + return false; |
|---|
| 127 | + return test_thread_flag(TIF_NEED_RESCHED_LAZY); |
|---|
| 128 | +#else |
|---|
| 99 | 129 | return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); |
|---|
| 130 | +#endif |
|---|
| 100 | 131 | } |
|---|
| 101 | 132 | |
|---|
| 102 | 133 | #ifdef CONFIG_PREEMPT |
|---|