hc
2023-11-06 e3e12f52b214121840b44c91de5b3e5af5d3eb84
kernel/arch/x86/include/asm/preempt.h
....@@ -86,9 +86,24 @@
8686 * a decrement which hits zero means we have no preempt_count and should
8787 * reschedule.
8888 */
89
-static __always_inline bool __preempt_count_dec_and_test(void)
89
+static __always_inline bool ____preempt_count_dec_and_test(void)
9090 {
9191 GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
92
+}
93
+
94
+static __always_inline bool __preempt_count_dec_and_test(void)
95
+{
96
+ if (____preempt_count_dec_and_test())
97
+ return true;
98
+#ifdef CONFIG_PREEMPT_LAZY
99
+ if (preempt_count())
100
+ return false;
101
+ if (current_thread_info()->preempt_lazy_count)
102
+ return false;
103
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
104
+#else
105
+ return false;
106
+#endif
92107 }
93108
94109 /*
....@@ -96,7 +111,23 @@
96111 */
97112 static __always_inline bool should_resched(int preempt_offset)
98113 {
114
+#ifdef CONFIG_PREEMPT_LAZY
115
+ u32 tmp;
116
+
117
+ tmp = raw_cpu_read_4(__preempt_count);
118
+ if (tmp == preempt_offset)
119
+ return true;
120
+
121
+ /* preempt count == 0 ? */
122
+ tmp &= ~PREEMPT_NEED_RESCHED;
123
+ if (tmp != preempt_offset)
124
+ return false;
125
+ if (current_thread_info()->preempt_lazy_count)
126
+ return false;
127
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
128
+#else
99129 return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
130
+#endif
100131 }
101132
102133 #ifdef CONFIG_PREEMPT