hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/arch/x86/include/asm/preempt.h
....@@ -89,24 +89,9 @@
8989 * a decrement which hits zero means we have no preempt_count and should
9090 * reschedule.
9191 */
92
-static __always_inline bool ____preempt_count_dec_and_test(void)
93
-{
94
- return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var]));
95
-}
96
-
9792 static __always_inline bool __preempt_count_dec_and_test(void)
9893 {
99
- if (____preempt_count_dec_and_test())
100
- return true;
101
-#ifdef CONFIG_PREEMPT_LAZY
102
- if (preempt_count())
103
- return false;
104
- if (current_thread_info()->preempt_lazy_count)
105
- return false;
106
- return test_thread_flag(TIF_NEED_RESCHED_LAZY);
107
-#else
108
- return false;
109
-#endif
94
+ return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var]));
11095 }
11196
11297 /*
....@@ -114,29 +99,10 @@
11499 */
115100 static __always_inline bool should_resched(int preempt_offset)
116101 {
117
-#ifdef CONFIG_PREEMPT_LAZY
118
- u32 tmp;
119
- tmp = raw_cpu_read_4(__preempt_count);
120
- if (tmp == preempt_offset)
121
- return true;
122
-
123
- /* preempt count == 0 ? */
124
- tmp &= ~PREEMPT_NEED_RESCHED;
125
- if (tmp != preempt_offset)
126
- return false;
127
- /* XXX PREEMPT_LOCK_OFFSET */
128
- if (current_thread_info()->preempt_lazy_count)
129
- return false;
130
- return test_thread_flag(TIF_NEED_RESCHED_LAZY);
131
-#else
132102 return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
133
-#endif
134103 }
135104
136105 #ifdef CONFIG_PREEMPTION
137
-#ifdef CONFIG_PREEMPT_RT
138
- extern void preempt_schedule_lock(void);
139
-#endif
140106 extern asmlinkage void preempt_schedule_thunk(void);
141107 # define __preempt_schedule() \
142108 asm volatile ("call preempt_schedule_thunk" : ASM_CALL_CONSTRAINT)