hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/x86/include/asm/preempt.h
....@@ -8,6 +8,9 @@
88
99 DECLARE_PER_CPU(int, __preempt_count);
1010
11
+/* We use the MSB mostly because its available */
12
+#define PREEMPT_NEED_RESCHED 0x80000000
13
+
1114 /*
1215 * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
1316 * that a decrement hitting 0 means we can and should reschedule.
....@@ -40,7 +43,7 @@
4043 #define init_task_preempt_count(p) do { } while (0)
4144
4245 #define init_idle_preempt_count(p, cpu) do { \
43
- per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
46
+ per_cpu(__preempt_count, (cpu)) = PREEMPT_DISABLED; \
4447 } while (0)
4548
4649 /*
....@@ -86,9 +89,24 @@
8689 * a decrement which hits zero means we have no preempt_count and should
8790 * reschedule.
8891 */
92
+static __always_inline bool ____preempt_count_dec_and_test(void)
93
+{
94
+ return GEN_UNARY_RMWcc("decl", __preempt_count, e, __percpu_arg([var]));
95
+}
96
+
8997 static __always_inline bool __preempt_count_dec_and_test(void)
9098 {
91
- GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
99
+ if (____preempt_count_dec_and_test())
100
+ return true;
101
+#ifdef CONFIG_PREEMPT_LAZY
102
+ if (preempt_count())
103
+ return false;
104
+ if (current_thread_info()->preempt_lazy_count)
105
+ return false;
106
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
107
+#else
108
+ return false;
109
+#endif
92110 }
93111
94112 /*
....@@ -96,18 +114,37 @@
96114 */
97115 static __always_inline bool should_resched(int preempt_offset)
98116 {
117
+#ifdef CONFIG_PREEMPT_LAZY
118
+ u32 tmp;
119
+ tmp = raw_cpu_read_4(__preempt_count);
120
+ if (tmp == preempt_offset)
121
+ return true;
122
+
123
+ /* preempt count == 0 ? */
124
+ tmp &= ~PREEMPT_NEED_RESCHED;
125
+ if (tmp != preempt_offset)
126
+ return false;
127
+ /* XXX PREEMPT_LOCK_OFFSET */
128
+ if (current_thread_info()->preempt_lazy_count)
129
+ return false;
130
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
131
+#else
99132 return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
133
+#endif
100134 }
101135
102
-#ifdef CONFIG_PREEMPT
103
- extern asmlinkage void ___preempt_schedule(void);
136
+#ifdef CONFIG_PREEMPTION
137
+#ifdef CONFIG_PREEMPT_RT
138
+ extern void preempt_schedule_lock(void);
139
+#endif
140
+ extern asmlinkage void preempt_schedule_thunk(void);
104141 # define __preempt_schedule() \
105
- asm volatile ("call ___preempt_schedule" : ASM_CALL_CONSTRAINT)
142
+ asm volatile ("call preempt_schedule_thunk" : ASM_CALL_CONSTRAINT)
106143
107144 extern asmlinkage void preempt_schedule(void);
108
- extern asmlinkage void ___preempt_schedule_notrace(void);
145
+ extern asmlinkage void preempt_schedule_notrace_thunk(void);
109146 # define __preempt_schedule_notrace() \
110
- asm volatile ("call ___preempt_schedule_notrace" : ASM_CALL_CONSTRAINT)
147
+ asm volatile ("call preempt_schedule_notrace_thunk" : ASM_CALL_CONSTRAINT)
111148
112149 extern asmlinkage void preempt_schedule_notrace(void);
113150 #endif