hc
2024-05-13 9d77db3c730780c8ef5ccd4b66403ff5675cfe4e
kernel/arch/s390/include/asm/preempt.h
....@@ -8,6 +8,8 @@
88
99 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
1010
11
+/* We use the MSB mostly because its available */
12
+#define PREEMPT_NEED_RESCHED 0x80000000
1113 #define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
1214
1315 static inline int preempt_count(void)
....@@ -27,12 +29,6 @@
2729 old, new) != old);
2830 }
2931
30
-#define init_task_preempt_count(p) do { } while (0)
31
-
32
-#define init_idle_preempt_count(p, cpu) do { \
33
- S390_lowcore.preempt_count = PREEMPT_ENABLED; \
34
-} while (0)
35
-
3632 static inline void set_preempt_need_resched(void)
3733 {
3834 __atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
....@@ -50,10 +46,17 @@
5046
5147 static inline void __preempt_count_add(int val)
5248 {
53
- if (__builtin_constant_p(val) && (val >= -128) && (val <= 127))
54
- __atomic_add_const(val, &S390_lowcore.preempt_count);
55
- else
56
- __atomic_add(val, &S390_lowcore.preempt_count);
49
+ /*
50
+ * With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
51
+ * enabled, gcc 12 fails to handle __builtin_constant_p().
52
+ */
53
+ if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES)) {
54
+ if (__builtin_constant_p(val) && (val >= -128) && (val <= 127)) {
55
+ __atomic_add_const(val, &S390_lowcore.preempt_count);
56
+ return;
57
+ }
58
+ }
59
+ __atomic_add(val, &S390_lowcore.preempt_count);
5760 }
5861
5962 static inline void __preempt_count_sub(int val)
....@@ -85,12 +88,6 @@
8588 {
8689 S390_lowcore.preempt_count = pc;
8790 }
88
-
89
-#define init_task_preempt_count(p) do { } while (0)
90
-
91
-#define init_idle_preempt_count(p, cpu) do { \
92
- S390_lowcore.preempt_count = PREEMPT_ENABLED; \
93
-} while (0)
9491
9592 static inline void set_preempt_need_resched(void)
9693 {
....@@ -128,11 +125,15 @@
128125
129126 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
130127
131
-#ifdef CONFIG_PREEMPT
128
+#define init_task_preempt_count(p) do { } while (0)
129
+/* Deferred to CPU bringup time */
130
+#define init_idle_preempt_count(p, cpu) do { } while (0)
131
+
132
+#ifdef CONFIG_PREEMPTION
132133 extern asmlinkage void preempt_schedule(void);
133134 #define __preempt_schedule() preempt_schedule()
134135 extern asmlinkage void preempt_schedule_notrace(void);
135136 #define __preempt_schedule_notrace() preempt_schedule_notrace()
136
-#endif /* CONFIG_PREEMPT */
137
+#endif /* CONFIG_PREEMPTION */
137138
138139 #endif /* __ASM_PREEMPT_H */