| .. | .. |
|---|
| 70 | 70 | * interrupt occurring between the non-atomic READ_ONCE/WRITE_ONCE |
|---|
| 71 | 71 | * pair. |
|---|
| 72 | 72 | */ |
|---|
| 73 | | - if (!pc || !READ_ONCE(ti->preempt_count)) |
|---|
| 74 | | - return true; |
|---|
| 75 | | -#ifdef CONFIG_PREEMPT_LAZY |
|---|
| 76 | | - if ((pc & ~PREEMPT_NEED_RESCHED)) |
|---|
| 77 | | - return false; |
|---|
| 78 | | - if (current_thread_info()->preempt_lazy_count) |
|---|
| 79 | | - return false; |
|---|
| 80 | | - return test_thread_flag(TIF_NEED_RESCHED_LAZY); |
|---|
| 81 | | -#else |
|---|
| 82 | | - return false; |
|---|
| 83 | | -#endif |
|---|
| 73 | + return !pc || !READ_ONCE(ti->preempt_count); |
|---|
| 84 | 74 | } |
|---|
| 85 | 75 | |
|---|
| 86 | 76 | static inline bool should_resched(int preempt_offset) |
|---|
| 87 | 77 | { |
|---|
| 88 | | -#ifdef CONFIG_PREEMPT_LAZY |
|---|
| 89 | | - u64 pc = READ_ONCE(current_thread_info()->preempt_count); |
|---|
| 90 | | - if (pc == preempt_offset) |
|---|
| 91 | | - return true; |
|---|
| 92 | | - |
|---|
| 93 | | - if ((pc & ~PREEMPT_NEED_RESCHED) != preempt_offset) |
|---|
| 94 | | - return false; |
|---|
| 95 | | - |
|---|
| 96 | | - if (current_thread_info()->preempt_lazy_count) |
|---|
| 97 | | - return false; |
|---|
| 98 | | - return test_thread_flag(TIF_NEED_RESCHED_LAZY); |
|---|
| 99 | | -#else |
|---|
| 100 | 78 | u64 pc = READ_ONCE(current_thread_info()->preempt_count); |
|---|
| 101 | 79 | return pc == preempt_offset; |
|---|
| 102 | | -#endif |
|---|
| 103 | 80 | } |
|---|
| 104 | 81 | |
|---|
| 105 | 82 | #ifdef CONFIG_PREEMPTION |
|---|
| 106 | 83 | void preempt_schedule(void); |
|---|
| 107 | | -#ifdef CONFIG_PREEMPT_RT |
|---|
| 108 | | -void preempt_schedule_lock(void); |
|---|
| 109 | | -#endif |
|---|
| 110 | 84 | #define __preempt_schedule() preempt_schedule() |
|---|
| 111 | 85 | void preempt_schedule_notrace(void); |
|---|
| 112 | 86 | #define __preempt_schedule_notrace() preempt_schedule_notrace() |
|---|