hc
2023-11-20 2e7bd41e4e8ab3d1efdabd9e263a2f7fe79bff8c
kernel/include/linux/interrupt.h
....@@ -61,6 +61,7 @@
6161 * interrupt handler after suspending interrupts. For system
6262 * wakeup devices users need to implement wakeup detection in
6363 * their interrupt handlers.
64
+ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
6465 */
6566 #define IRQF_SHARED 0x00000080
6667 #define IRQF_PROBE_SHARED 0x00000100
....@@ -74,6 +75,7 @@
7475 #define IRQF_NO_THREAD 0x00010000
7576 #define IRQF_EARLY_RESUME 0x00020000
7677 #define IRQF_COND_SUSPEND 0x00040000
78
+#define IRQF_NO_SOFTIRQ_CALL 0x00080000
7779
7880 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
7981
....@@ -427,7 +429,11 @@
427429 bool state);
428430
429431 #ifdef CONFIG_IRQ_FORCED_THREADING
432
+# ifdef CONFIG_PREEMPT_RT_BASE
433
+# define force_irqthreads (true)
434
+# else
430435 extern bool force_irqthreads;
436
+# endif
431437 #else
432438 #define force_irqthreads (0)
433439 #endif
....@@ -493,9 +499,10 @@
493499 void (*action)(struct softirq_action *);
494500 };
495501
502
+#ifndef CONFIG_PREEMPT_RT_FULL
496503 asmlinkage void do_softirq(void);
497504 asmlinkage void __do_softirq(void);
498
-
505
+static inline void thread_do_softirq(void) { do_softirq(); }
499506 #ifdef __ARCH_HAS_DO_SOFTIRQ
500507 void do_softirq_own_stack(void);
501508 #else
....@@ -504,13 +511,25 @@
504511 __do_softirq();
505512 }
506513 #endif
514
+#else
515
+extern void thread_do_softirq(void);
516
+#endif
507517
508518 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
509519 extern void softirq_init(void);
510520 extern void __raise_softirq_irqoff(unsigned int nr);
521
+#ifdef CONFIG_PREEMPT_RT_FULL
522
+extern void __raise_softirq_irqoff_ksoft(unsigned int nr);
523
+#else
524
+static inline void __raise_softirq_irqoff_ksoft(unsigned int nr)
525
+{
526
+ __raise_softirq_irqoff(nr);
527
+}
528
+#endif
511529
512530 extern void raise_softirq_irqoff(unsigned int nr);
513531 extern void raise_softirq(unsigned int nr);
532
+extern void softirq_check_pending_idle(void);
514533
515534 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
516535
....@@ -532,8 +551,9 @@
532551 to be executed on some cpu at least once after this.
533552 * If the tasklet is already scheduled, but its execution is still not
534553 started, it will be executed only once.
535
- * If this tasklet is already running on another CPU (or schedule is called
536
- from tasklet itself), it is rescheduled for later.
554
+ * If this tasklet is already running on another CPU, it is rescheduled
555
+ for later.
556
+ * Schedule must not be called from the tasklet itself (a lockup occurs)
537557 * Tasklet is strictly serialized wrt itself, but not
538558 wrt another tasklets. If client needs some intertask synchronization,
539559 he makes it with spinlocks.
....@@ -558,13 +578,26 @@
558578 enum
559579 {
560580 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
561
- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
581
+ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
582
+ TASKLET_STATE_PENDING, /* Tasklet is pending */
583
+ TASKLET_STATE_CHAINED /* Tasklet is chained */
562584 };
563585
564
-#ifdef CONFIG_SMP
586
+#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
587
+#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
588
+#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
589
+#define TASKLET_STATEF_CHAINED (1 << TASKLET_STATE_CHAINED)
590
+#define TASKLET_STATEF_RC (TASKLET_STATEF_RUN | TASKLET_STATEF_CHAINED)
591
+
592
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
565593 static inline int tasklet_trylock(struct tasklet_struct *t)
566594 {
567595 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
596
+}
597
+
598
+static inline int tasklet_tryunlock(struct tasklet_struct *t)
599
+{
600
+ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
568601 }
569602
570603 static inline void tasklet_unlock(struct tasklet_struct *t)
....@@ -573,12 +606,11 @@
573606 clear_bit(TASKLET_STATE_RUN, &(t)->state);
574607 }
575608
576
-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
577
-{
578
- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
579
-}
609
+extern void tasklet_unlock_wait(struct tasklet_struct *t);
610
+
580611 #else
581612 #define tasklet_trylock(t) 1
613
+#define tasklet_tryunlock(t) 1
582614 #define tasklet_unlock_wait(t) do { } while (0)
583615 #define tasklet_unlock(t) do { } while (0)
584616 #endif
....@@ -612,17 +644,18 @@
612644 smp_mb();
613645 }
614646
615
-static inline void tasklet_enable(struct tasklet_struct *t)
616
-{
617
- smp_mb__before_atomic();
618
- atomic_dec(&t->count);
619
-}
620
-
647
+extern void tasklet_enable(struct tasklet_struct *t);
621648 extern void tasklet_kill(struct tasklet_struct *t);
622649 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
623650 extern void tasklet_init(struct tasklet_struct *t,
624651 void (*func)(unsigned long), unsigned long data);
625652
653
+#ifdef CONFIG_PREEMPT_RT_FULL
654
+extern void softirq_early_init(void);
655
+#else
656
+static inline void softirq_early_init(void) { }
657
+#endif
658
+
626659 struct tasklet_hrtimer {
627660 struct hrtimer timer;
628661 struct tasklet_struct tasklet;