hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/arch/x86/include/asm/preempt.h
....@@ -8,6 +8,9 @@
88
99 DECLARE_PER_CPU(int, __preempt_count);
1010
11
+/* We use the MSB mostly because its available */
12
+#define PREEMPT_NEED_RESCHED 0x80000000
13
+
1114 /*
1215 * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
1316 * that a decrement hitting 0 means we can and should reschedule.
....@@ -40,7 +43,7 @@
4043 #define init_task_preempt_count(p) do { } while (0)
4144
4245 #define init_idle_preempt_count(p, cpu) do { \
43
- per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
46
+ per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \
4447 } while (0)
4548
4649 /*
....@@ -86,24 +89,9 @@
8689 * a decrement which hits zero means we have no preempt_count and should
8790 * reschedule.
8891 */
89
-static __always_inline bool ____preempt_count_dec_and_test(void)
90
-{
91
- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
92
-}
93
-
9492 static __always_inline bool __preempt_count_dec_and_test(void)
9593 {
96
- if (____preempt_count_dec_and_test())
97
- return true;
98
-#ifdef CONFIG_PREEMPT_LAZY
99
- if (preempt_count())
100
- return false;
101
- if (current_thread_info()->preempt_lazy_count)
102
- return false;
103
- return test_thread_flag(TIF_NEED_RESCHED_LAZY);
104
-#else
105
- return false;
106
-#endif
94
+ return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var]));
10795 }
10896
10997 /*
....@@ -111,34 +99,18 @@
11199 */
112100 static __always_inline bool should_resched(int preempt_offset)
113101 {
114
-#ifdef CONFIG_PREEMPT_LAZY
115
- u32 tmp;
116
-
117
- tmp = raw_cpu_read_4(__preempt_count);
118
- if (tmp == preempt_offset)
119
- return true;
120
-
121
- /* preempt count == 0 ? */
122
- tmp &= ~PREEMPT_NEED_RESCHED;
123
- if (tmp != preempt_offset)
124
- return false;
125
- if (current_thread_info()->preempt_lazy_count)
126
- return false;
127
- return test_thread_flag(TIF_NEED_RESCHED_LAZY);
128
-#else
129102 return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
130
-#endif
131103 }
132104
133
-#ifdef CONFIG_PREEMPT
134
- extern asmlinkage void ___preempt_schedule(void);
105
+#ifdef CONFIG_PREEMPTION
106
+ extern asmlinkage void preempt_schedule_thunk(void);
135107 # define __preempt_schedule() \
136
- asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT)
108
+ asm volatile ("call preempt_schedule_thunk" : ASM_CALL_CONSTRAINT)
137109
138110 extern asmlinkage void preempt_schedule(void);
139
- extern asmlinkage void ___preempt_schedule_notrace(void);
111
+ extern asmlinkage void preempt_schedule_notrace_thunk(void);
140112 # define __preempt_schedule_notrace() \
141
- asm volatile ("call ___preempt_schedule_notrace" : ASM_CALL_CONSTRAINT)
113
+ asm volatile ("call preempt_schedule_notrace_thunk" : ASM_CALL_CONSTRAINT)
142114
143115 extern asmlinkage void preempt_schedule_notrace(void);
144116 #endif