hc
2023-11-07 f45e756958099c35d6afb746df1d40a1c6302cfc
kernel/include/linux/preempt.h
....@@ -51,7 +51,11 @@
5151 #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
5252 #define NMI_OFFSET (1UL << NMI_SHIFT)
5353
54
-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
54
+#ifndef CONFIG_PREEMPT_RT_FULL
55
+# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
56
+#else
57
+# define SOFTIRQ_DISABLE_OFFSET (0)
58
+#endif
5559
5660 /* We use the MSB mostly because its available */
5761 #define PREEMPT_NEED_RESCHED 0x80000000
....@@ -81,9 +85,15 @@
8185 #include <asm/preempt.h>
8286
8387 #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
84
-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
8588 #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
8689 | NMI_MASK))
90
+#ifndef CONFIG_PREEMPT_RT_FULL
91
+# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
92
+# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
93
+#else
94
+# define softirq_count() ((unsigned long)current->softirq_nestcnt)
95
+extern int in_serving_softirq(void);
96
+#endif
8797
8898 /*
8999 * Are we doing bottom half or hardware interrupt processing?
....@@ -101,7 +111,6 @@
101111 #define in_irq() (hardirq_count())
102112 #define in_softirq() (softirq_count())
103113 #define in_interrupt() (irq_count())
104
-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
105114 #define in_nmi() (preempt_count() & NMI_MASK)
106115 #define in_task() (!(preempt_count() & \
107116 (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
....@@ -118,7 +127,11 @@
118127 /*
119128 * The preempt_count offset after spin_lock()
120129 */
130
+#if !defined(CONFIG_PREEMPT_RT_FULL)
121131 #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
132
+#else
133
+#define PREEMPT_LOCK_OFFSET 0
134
+#endif
122135
123136 /*
124137 * The preempt_count offset needed for things like:
....@@ -167,11 +180,31 @@
167180 #define preempt_count_inc() preempt_count_add(1)
168181 #define preempt_count_dec() preempt_count_sub(1)
169182
183
+#ifdef CONFIG_PREEMPT_LAZY
184
+#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0)
185
+#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0)
186
+#define inc_preempt_lazy_count() add_preempt_lazy_count(1)
187
+#define dec_preempt_lazy_count() sub_preempt_lazy_count(1)
188
+#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count)
189
+#else
190
+#define add_preempt_lazy_count(val) do { } while (0)
191
+#define sub_preempt_lazy_count(val) do { } while (0)
192
+#define inc_preempt_lazy_count() do { } while (0)
193
+#define dec_preempt_lazy_count() do { } while (0)
194
+#define preempt_lazy_count() (0)
195
+#endif
196
+
170197 #ifdef CONFIG_PREEMPT_COUNT
171198
172199 #define preempt_disable() \
173200 do { \
174201 preempt_count_inc(); \
202
+ barrier(); \
203
+} while (0)
204
+
205
+#define preempt_lazy_disable() \
206
+do { \
207
+ inc_preempt_lazy_count(); \
175208 barrier(); \
176209 } while (0)
177210
....@@ -181,9 +214,40 @@
181214 preempt_count_dec(); \
182215 } while (0)
183216
184
-#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
217
+#ifdef CONFIG_PREEMPT_RT_BASE
218
+# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
219
+# define preempt_check_resched_rt() preempt_check_resched()
220
+#else
221
+# define preempt_enable_no_resched() preempt_enable()
222
+# define preempt_check_resched_rt() barrier();
223
+#endif
185224
186225 #define preemptible() (preempt_count() == 0 && !irqs_disabled())
226
+
227
+#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
228
+
229
+extern void migrate_disable(void);
230
+extern void migrate_enable(void);
231
+
232
+int __migrate_disabled(struct task_struct *p);
233
+
234
+#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
235
+
236
+extern void migrate_disable(void);
237
+extern void migrate_enable(void);
238
+static inline int __migrate_disabled(struct task_struct *p)
239
+{
240
+ return 0;
241
+}
242
+
243
+#else
244
+#define migrate_disable() preempt_disable()
245
+#define migrate_enable() preempt_enable()
246
+static inline int __migrate_disabled(struct task_struct *p)
247
+{
248
+ return 0;
249
+}
250
+#endif
187251
188252 #ifdef CONFIG_PREEMPT
189253 #define preempt_enable() \
....@@ -206,11 +270,24 @@
206270 __preempt_schedule(); \
207271 } while (0)
208272
273
+#define preempt_lazy_enable() \
274
+do { \
275
+ dec_preempt_lazy_count(); \
276
+ barrier(); \
277
+ preempt_check_resched(); \
278
+} while (0)
279
+
209280 #else /* !CONFIG_PREEMPT */
210281 #define preempt_enable() \
211282 do { \
212283 barrier(); \
213284 preempt_count_dec(); \
285
+} while (0)
286
+
287
+#define preempt_lazy_enable() \
288
+do { \
289
+ dec_preempt_lazy_count(); \
290
+ barrier(); \
214291 } while (0)
215292
216293 #define preempt_enable_notrace() \
....@@ -251,8 +328,16 @@
251328 #define preempt_disable_notrace() barrier()
252329 #define preempt_enable_no_resched_notrace() barrier()
253330 #define preempt_enable_notrace() barrier()
331
+#define preempt_check_resched_rt() barrier()
254332 #define preemptible() 0
255333
334
+#define migrate_disable() barrier()
335
+#define migrate_enable() barrier()
336
+
337
+static inline int __migrate_disabled(struct task_struct *p)
338
+{
339
+ return 0;
340
+}
256341 #endif /* CONFIG_PREEMPT_COUNT */
257342
258343 #ifdef MODULE
....@@ -271,10 +356,22 @@
271356 } while (0)
272357 #define preempt_fold_need_resched() \
273358 do { \
274
- if (tif_need_resched()) \
359
+ if (tif_need_resched_now()) \
275360 set_preempt_need_resched(); \
276361 } while (0)
277362
363
+#ifdef CONFIG_PREEMPT_RT_FULL
364
+# define preempt_disable_rt() preempt_disable()
365
+# define preempt_enable_rt() preempt_enable()
366
+# define preempt_disable_nort() barrier()
367
+# define preempt_enable_nort() barrier()
368
+#else
369
+# define preempt_disable_rt() barrier()
370
+# define preempt_enable_rt() barrier()
371
+# define preempt_disable_nort() preempt_disable()
372
+# define preempt_enable_nort() preempt_enable()
373
+#endif
374
+
278375 #ifdef CONFIG_PREEMPT_NOTIFIERS
279376
280377 struct preempt_notifier;