From 244b2c5ca8b14627e4a17755e5922221e121c771 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Wed, 09 Oct 2024 06:15:07 +0000
Subject: [PATCH] change system file
---
kernel/kernel/softirq.c | 954 +++++++++++++----------------------------------------------
1 files changed, 210 insertions(+), 744 deletions(-)
diff --git a/kernel/kernel/softirq.c b/kernel/kernel/softirq.c
index 9bad7a1..d59412b 100644
--- a/kernel/kernel/softirq.c
+++ b/kernel/kernel/softirq.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/kernel/softirq.c
*
* Copyright (C) 1992 Linus Torvalds
- *
- * Distribute under GPLv2.
*
* Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
*/
@@ -21,17 +20,17 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/rcupdate.h>
-#include <linux/delay.h>
#include <linux/ftrace.h>
#include <linux/smp.h>
#include <linux/smpboot.h>
#include <linux/tick.h>
-#include <linux/locallock.h>
#include <linux/irq.h>
-#include <linux/sched/types.h>
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(irq_handler_entry);
+EXPORT_TRACEPOINT_SYMBOL_GPL(irq_handler_exit);
/*
- No shared variables, all the data are CPU local.
@@ -59,135 +58,19 @@
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
-#ifdef CONFIG_PREEMPT_RT_FULL
-#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ))
-DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd);
-#endif
+EXPORT_PER_CPU_SYMBOL_GPL(ksoftirqd);
+
+/*
+ * active_softirqs -- per cpu, a mask of softirqs that are being handled,
+ * with the expectation that approximate answers are acceptable and therefore
+ * no synchronization.
+ */
+DEFINE_PER_CPU(__u32, active_softirqs);
const char * const softirq_to_name[NR_SOFTIRQS] = {
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
"TASKLET", "SCHED", "HRTIMER", "RCU"
};
-
-#ifdef CONFIG_NO_HZ_COMMON
-# ifdef CONFIG_PREEMPT_RT_FULL
-
-struct softirq_runner {
- struct task_struct *runner[NR_SOFTIRQS];
-};
-
-static DEFINE_PER_CPU(struct softirq_runner, softirq_runners);
-
-static inline void softirq_set_runner(unsigned int sirq)
-{
- struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
-
- sr->runner[sirq] = current;
-}
-
-static inline void softirq_clr_runner(unsigned int sirq)
-{
- struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
-
- sr->runner[sirq] = NULL;
-}
-
-static bool softirq_check_runner_tsk(struct task_struct *tsk,
- unsigned int *pending)
-{
- bool ret = false;
-
- if (!tsk)
- return ret;
-
- /*
- * The wakeup code in rtmutex.c wakes up the task
- * _before_ it sets pi_blocked_on to NULL under
- * tsk->pi_lock. So we need to check for both: state
- * and pi_blocked_on.
- * The test against UNINTERRUPTIBLE + ->sleeping_lock is in case the
- * task does cpu_chill().
- */
- raw_spin_lock(&tsk->pi_lock);
- if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING ||
- (tsk->state == TASK_UNINTERRUPTIBLE && tsk->sleeping_lock)) {
- /* Clear all bits pending in that task */
- *pending &= ~(tsk->softirqs_raised);
- ret = true;
- }
- raw_spin_unlock(&tsk->pi_lock);
-
- return ret;
-}
-
-/*
- * On preempt-rt a softirq running context might be blocked on a
- * lock. There might be no other runnable task on this CPU because the
- * lock owner runs on some other CPU. So we have to go into idle with
- * the pending bit set. Therefor we need to check this otherwise we
- * warn about false positives which confuses users and defeats the
- * whole purpose of this test.
- *
- * This code is called with interrupts disabled.
- */
-void softirq_check_pending_idle(void)
-{
- struct task_struct *tsk;
- static int rate_limit;
- struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
- u32 warnpending;
- int i;
-
- if (rate_limit >= 10)
- return;
-
- warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
- if (!warnpending)
- return;
- for (i = 0; i < NR_SOFTIRQS; i++) {
- tsk = sr->runner[i];
-
- if (softirq_check_runner_tsk(tsk, &warnpending))
- warnpending &= ~(1 << i);
- }
-
- if (warnpending) {
- tsk = __this_cpu_read(ksoftirqd);
- softirq_check_runner_tsk(tsk, &warnpending);
- }
-
- if (warnpending) {
- tsk = __this_cpu_read(ktimer_softirqd);
- softirq_check_runner_tsk(tsk, &warnpending);
- }
-
- if (warnpending) {
- printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
- warnpending);
- rate_limit++;
- }
-}
-# else
-/*
- * On !PREEMPT_RT we just printk rate limited:
- */
-void softirq_check_pending_idle(void)
-{
- static int rate_limit;
-
- if (rate_limit < 10 &&
- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
- printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
- local_softirq_pending());
- rate_limit++;
- }
-}
-# endif
-
-#else /* !CONFIG_NO_HZ_COMMON */
-static inline void softirq_set_runner(unsigned int sirq) { }
-static inline void softirq_clr_runner(unsigned int sirq) { }
-#endif
/*
* we cannot loop indefinitely here to avoid userspace starvation,
@@ -202,94 +85,6 @@
if (tsk && tsk->state != TASK_RUNNING)
wake_up_process(tsk);
-}
-
-#ifdef CONFIG_PREEMPT_RT_FULL
-static void wakeup_timer_softirqd(void)
-{
- /* Interrupts are disabled: no need to stop preemption */
- struct task_struct *tsk = __this_cpu_read(ktimer_softirqd);
-
- if (tsk && tsk->state != TASK_RUNNING)
- wake_up_process(tsk);
-}
-#endif
-
-static void handle_softirq(unsigned int vec_nr)
-{
- struct softirq_action *h = softirq_vec + vec_nr;
- int prev_count;
-
- prev_count = preempt_count();
-
- kstat_incr_softirqs_this_cpu(vec_nr);
-
- trace_softirq_entry(vec_nr);
- h->action(h);
- trace_softirq_exit(vec_nr);
- if (unlikely(prev_count != preempt_count())) {
- pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
- vec_nr, softirq_to_name[vec_nr], h->action,
- prev_count, preempt_count());
- preempt_count_set(prev_count);
- }
-}
-
-#ifndef CONFIG_PREEMPT_RT_FULL
-/*
- * If ksoftirqd is scheduled, we do not want to process pending softirqs
- * right now. Let ksoftirqd handle this at its own rate, to get fairness,
- * unless we're doing some of the synchronous softirqs.
- */
-#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
-static bool ksoftirqd_running(unsigned long pending)
-{
- struct task_struct *tsk = __this_cpu_read(ksoftirqd);
-
- if (pending & SOFTIRQ_NOW_MASK)
- return false;
- return tsk && (tsk->state == TASK_RUNNING);
-}
-
-static inline int ksoftirqd_softirq_pending(void)
-{
- return local_softirq_pending();
-}
-
-static void handle_pending_softirqs(u32 pending)
-{
- struct softirq_action *h = softirq_vec;
- int softirq_bit;
-
- local_irq_enable();
-
- h = softirq_vec;
-
- while ((softirq_bit = ffs(pending))) {
- unsigned int vec_nr;
-
- h += softirq_bit - 1;
- vec_nr = h - softirq_vec;
- handle_softirq(vec_nr);
-
- h++;
- pending >>= softirq_bit;
- }
-
- rcu_bh_qs();
- local_irq_disable();
-}
-
-static void run_ksoftirqd(unsigned int cpu)
-{
- local_irq_disable();
- if (ksoftirqd_softirq_pending()) {
- __do_softirq();
- local_irq_enable();
- cond_resched();
- return;
- }
- local_irq_enable();
}
/*
@@ -307,6 +102,12 @@
* where hardirqs are disabled legitimately:
*/
#ifdef CONFIG_TRACE_IRQFLAGS
+
+DEFINE_PER_CPU(int, hardirqs_enabled);
+DEFINE_PER_CPU(int, hardirq_context);
+EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled);
+EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context);
+
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
{
unsigned long flags;
@@ -326,7 +127,7 @@
* Were softirqs turned off above:
*/
if (softirq_count() == (cnt & SOFTIRQ_MASK))
- trace_softirqs_off(ip);
+ lockdep_softirqs_off(ip);
raw_local_irq_restore(flags);
if (preempt_count() == cnt) {
@@ -347,7 +148,7 @@
trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
if (softirq_count() == (cnt & SOFTIRQ_MASK))
- trace_softirqs_on(_RET_IP_);
+ lockdep_softirqs_on(_RET_IP_);
__preempt_count_sub(cnt);
}
@@ -374,7 +175,7 @@
* Are softirqs going to be turned on now:
*/
if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
- trace_softirqs_on(ip);
+ lockdep_softirqs_on(ip);
/*
* Keep preemption disabled until we are done with
* softirq processing:
@@ -424,9 +225,9 @@
{
bool in_hardirq = false;
- if (trace_hardirq_context(current)) {
+ if (lockdep_hardirq_context()) {
in_hardirq = true;
- trace_hardirq_exit();
+ lockdep_hardirq_exit();
}
lockdep_softirq_enter();
@@ -439,49 +240,102 @@
lockdep_softirq_exit();
if (in_hardirq)
- trace_hardirq_enter();
+ lockdep_hardirq_enter();
}
#else
static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif
+#define softirq_deferred_for_rt(pending) \
+({ \
+ __u32 deferred = 0; \
+ if (cpupri_check_rt()) { \
+ deferred = pending & LONG_SOFTIRQ_MASK; \
+ pending &= ~LONG_SOFTIRQ_MASK; \
+ } \
+ deferred; \
+})
+
asmlinkage __visible void __softirq_entry __do_softirq(void)
{
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
unsigned long old_flags = current->flags;
int max_restart = MAX_SOFTIRQ_RESTART;
+ struct softirq_action *h;
bool in_hardirq;
+ __u32 deferred;
__u32 pending;
+ int softirq_bit;
/*
- * Mask out PF_MEMALLOC s current task context is borrowed for the
- * softirq. A softirq handled such as network RX might set PF_MEMALLOC
- * again if the socket is related to swap
+ * Mask out PF_MEMALLOC as the current task context is borrowed for the
+ * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
+ * again if the socket is related to swapping.
*/
current->flags &= ~PF_MEMALLOC;
pending = local_softirq_pending();
+ deferred = softirq_deferred_for_rt(pending);
account_irq_enter_time(current);
-
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
in_hardirq = lockdep_softirq_start();
restart:
/* Reset the pending bitmask before enabling irqs */
- set_softirq_pending(0);
+ set_softirq_pending(deferred);
+ __this_cpu_write(active_softirqs, pending);
- handle_pending_softirqs(pending);
+ local_irq_enable();
+
+ h = softirq_vec;
+
+ while ((softirq_bit = ffs(pending))) {
+ unsigned int vec_nr;
+ int prev_count;
+
+ h += softirq_bit - 1;
+
+ vec_nr = h - softirq_vec;
+ prev_count = preempt_count();
+
+ kstat_incr_softirqs_this_cpu(vec_nr);
+
+ trace_softirq_entry(vec_nr);
+ h->action(h);
+ trace_softirq_exit(vec_nr);
+ if (unlikely(prev_count != preempt_count())) {
+ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
+ vec_nr, softirq_to_name[vec_nr], h->action,
+ prev_count, preempt_count());
+ preempt_count_set(prev_count);
+ }
+ h++;
+ pending >>= softirq_bit;
+ }
+
+ __this_cpu_write(active_softirqs, 0);
+ if (__this_cpu_read(ksoftirqd) == current)
+ rcu_softirq_qs();
+ local_irq_disable();
pending = local_softirq_pending();
+ deferred = softirq_deferred_for_rt(pending);
+
if (pending) {
if (time_before(jiffies, end) && !need_resched() &&
--max_restart)
goto restart;
+#ifndef CONFIG_RT_SOFTINT_OPTIMIZATION
wakeup_softirqd();
+#endif
}
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+ if (pending | deferred)
+ wakeup_softirqd();
+#endif
lockdep_softirq_end(in_hardirq);
account_irq_exit_time(current);
__local_bh_enable(SOFTIRQ_OFFSET);
@@ -501,340 +355,40 @@
pending = local_softirq_pending();
- if (pending && !ksoftirqd_running(pending))
+ if (pending)
do_softirq_own_stack();
local_irq_restore(flags);
}
-/*
- * This function must run with irqs disabled!
+/**
+ * irq_enter_rcu - Enter an interrupt context with RCU watching
*/
-void raise_softirq_irqoff(unsigned int nr)
+void irq_enter_rcu(void)
{
- __raise_softirq_irqoff(nr);
-
- /*
- * If we're in an interrupt or softirq, we're done
- * (this also catches softirq-disabled code). We will
- * actually run the softirq once we return from
- * the irq or softirq.
- *
- * Otherwise we wake up ksoftirqd to make sure we
- * schedule the softirq soon.
- */
- if (!in_interrupt())
- wakeup_softirqd();
-}
-
-void __raise_softirq_irqoff(unsigned int nr)
-{
- trace_softirq_raise(nr);
- or_softirq_pending(1UL << nr);
-}
-
-static inline void local_bh_disable_nort(void) { local_bh_disable(); }
-static inline void _local_bh_enable_nort(void) { _local_bh_enable(); }
-static void ksoftirqd_set_sched_params(unsigned int cpu) { }
-
-#else /* !PREEMPT_RT_FULL */
-
-/*
- * On RT we serialize softirq execution with a cpu local lock per softirq
- */
-static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks);
-
-void __init softirq_early_init(void)
-{
- int i;
-
- for (i = 0; i < NR_SOFTIRQS; i++)
- local_irq_lock_init(local_softirq_locks[i]);
-}
-
-static void lock_softirq(int which)
-{
- local_lock(local_softirq_locks[which]);
-}
-
-static void unlock_softirq(int which)
-{
- local_unlock(local_softirq_locks[which]);
-}
-
-static void do_single_softirq(int which)
-{
- unsigned long old_flags = current->flags;
-
- current->flags &= ~PF_MEMALLOC;
- vtime_account_irq_enter(current);
- current->flags |= PF_IN_SOFTIRQ;
- lockdep_softirq_enter();
- local_irq_enable();
- handle_softirq(which);
- local_irq_disable();
- lockdep_softirq_exit();
- current->flags &= ~PF_IN_SOFTIRQ;
- vtime_account_irq_enter(current);
- current_restore_flags(old_flags, PF_MEMALLOC);
-}
-
-/*
- * Called with interrupts disabled. Process softirqs which were raised
- * in current context (or on behalf of ksoftirqd).
- */
-static void do_current_softirqs(void)
-{
- while (current->softirqs_raised) {
- int i = __ffs(current->softirqs_raised);
- unsigned int pending, mask = (1U << i);
-
- current->softirqs_raised &= ~mask;
- local_irq_enable();
-
- /*
- * If the lock is contended, we boost the owner to
- * process the softirq or leave the critical section
- * now.
- */
- lock_softirq(i);
- local_irq_disable();
- softirq_set_runner(i);
- /*
- * Check with the local_softirq_pending() bits,
- * whether we need to process this still or if someone
- * else took care of it.
- */
- pending = local_softirq_pending();
- if (pending & mask) {
- set_softirq_pending(pending & ~mask);
- do_single_softirq(i);
- }
- softirq_clr_runner(i);
- WARN_ON(current->softirq_nestcnt != 1);
- local_irq_enable();
- unlock_softirq(i);
- local_irq_disable();
- }
-}
-
-void __local_bh_disable(void)
-{
- if (++current->softirq_nestcnt == 1)
- migrate_disable();
-}
-EXPORT_SYMBOL(__local_bh_disable);
-
-void __local_bh_enable(void)
-{
- if (WARN_ON(current->softirq_nestcnt == 0))
- return;
-
- local_irq_disable();
- if (current->softirq_nestcnt == 1 && current->softirqs_raised)
- do_current_softirqs();
- local_irq_enable();
-
- if (--current->softirq_nestcnt == 0)
- migrate_enable();
-}
-EXPORT_SYMBOL(__local_bh_enable);
-
-void _local_bh_enable(void)
-{
- if (WARN_ON(current->softirq_nestcnt == 0))
- return;
- if (--current->softirq_nestcnt == 0)
- migrate_enable();
-}
-EXPORT_SYMBOL(_local_bh_enable);
-
-int in_serving_softirq(void)
-{
- return current->flags & PF_IN_SOFTIRQ;
-}
-EXPORT_SYMBOL(in_serving_softirq);
-
-/* Called with preemption disabled */
-static void run_ksoftirqd(unsigned int cpu)
-{
- local_irq_disable();
- current->softirq_nestcnt++;
-
- do_current_softirqs();
- current->softirq_nestcnt--;
- local_irq_enable();
- cond_resched();
-}
-
-/*
- * Called from netif_rx_ni(). Preemption enabled, but migration
- * disabled. So the cpu can't go away under us.
- */
-void thread_do_softirq(void)
-{
- if (!in_serving_softirq() && current->softirqs_raised) {
- current->softirq_nestcnt++;
- do_current_softirqs();
- current->softirq_nestcnt--;
- }
-}
-
-static void do_raise_softirq_irqoff(unsigned int nr)
-{
- unsigned int mask;
-
- mask = 1UL << nr;
-
- trace_softirq_raise(nr);
- or_softirq_pending(mask);
-
- /*
- * If we are not in a hard interrupt and inside a bh disabled
- * region, we simply raise the flag on current. local_bh_enable()
- * will make sure that the softirq is executed. Otherwise we
- * delegate it to ksoftirqd.
- */
- if (!in_irq() && current->softirq_nestcnt)
- current->softirqs_raised |= mask;
- else if (!__this_cpu_read(ksoftirqd) || !__this_cpu_read(ktimer_softirqd))
- return;
-
- if (mask & TIMER_SOFTIRQS)
- __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask;
- else
- __this_cpu_read(ksoftirqd)->softirqs_raised |= mask;
-}
-
-static void wakeup_proper_softirq(unsigned int nr)
-{
- if ((1UL << nr) & TIMER_SOFTIRQS)
- wakeup_timer_softirqd();
- else
- wakeup_softirqd();
-}
-
-void __raise_softirq_irqoff(unsigned int nr)
-{
- do_raise_softirq_irqoff(nr);
- if (!in_irq() && !current->softirq_nestcnt)
- wakeup_proper_softirq(nr);
-}
-
-/*
- * Same as __raise_softirq_irqoff() but will process them in ksoftirqd
- */
-void __raise_softirq_irqoff_ksoft(unsigned int nr)
-{
- unsigned int mask;
-
- if (WARN_ON_ONCE(!__this_cpu_read(ksoftirqd) ||
- !__this_cpu_read(ktimer_softirqd)))
- return;
- mask = 1UL << nr;
-
- trace_softirq_raise(nr);
- or_softirq_pending(mask);
- if (mask & TIMER_SOFTIRQS)
- __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask;
- else
- __this_cpu_read(ksoftirqd)->softirqs_raised |= mask;
- wakeup_proper_softirq(nr);
-}
-
-/*
- * This function must run with irqs disabled!
- */
-void raise_softirq_irqoff(unsigned int nr)
-{
- do_raise_softirq_irqoff(nr);
-
- /*
- * If we're in an hard interrupt we let irq return code deal
- * with the wakeup of ksoftirqd.
- */
- if (in_irq())
- return;
- /*
- * If we are in thread context but outside of a bh disabled
- * region, we need to wake ksoftirqd as well.
- *
- * CHECKME: Some of the places which do that could be wrapped
- * into local_bh_disable/enable pairs. Though it's unclear
- * whether this is worth the effort. To find those places just
- * raise a WARN() if the condition is met.
- */
- if (!current->softirq_nestcnt)
- wakeup_proper_softirq(nr);
-}
-
-static inline int ksoftirqd_softirq_pending(void)
-{
- return current->softirqs_raised;
-}
-
-static inline void local_bh_disable_nort(void) { }
-static inline void _local_bh_enable_nort(void) { }
-
-static inline void ksoftirqd_set_sched_params(unsigned int cpu)
-{
- /* Take over all but timer pending softirqs when starting */
- local_irq_disable();
- current->softirqs_raised = local_softirq_pending() & ~TIMER_SOFTIRQS;
- local_irq_enable();
-}
-
-static inline void ktimer_softirqd_set_sched_params(unsigned int cpu)
-{
- struct sched_param param = { .sched_priority = 1 };
-
- sched_setscheduler(current, SCHED_FIFO, ¶m);
-
- /* Take over timer pending softirqs when starting */
- local_irq_disable();
- current->softirqs_raised = local_softirq_pending() & TIMER_SOFTIRQS;
- local_irq_enable();
-}
-
-static inline void ktimer_softirqd_clr_sched_params(unsigned int cpu,
- bool online)
-{
- struct sched_param param = { .sched_priority = 0 };
-
- sched_setscheduler(current, SCHED_NORMAL, ¶m);
-}
-
-static int ktimer_softirqd_should_run(unsigned int cpu)
-{
- return current->softirqs_raised;
-}
-
-#endif /* PREEMPT_RT_FULL */
-/*
- * Enter an interrupt context.
- */
-void irq_enter(void)
-{
- rcu_irq_enter();
if (is_idle_task(current) && !in_interrupt()) {
/*
* Prevent raise_softirq from needlessly waking up ksoftirqd
* here, as softirq will be serviced on return from interrupt.
*/
- local_bh_disable_nort();
+ local_bh_disable();
tick_irq_enter();
- _local_bh_enable_nort();
+ _local_bh_enable();
}
-
__irq_enter();
+}
+
+/**
+ * irq_enter - Enter an interrupt context including RCU update
+ */
+void irq_enter(void)
+{
+ rcu_irq_enter();
+ irq_enter_rcu();
}
static inline void invoke_softirq(void)
{
-#ifndef CONFIG_PREEMPT_RT_FULL
- if (ksoftirqd_running(local_softirq_pending()))
- return;
-
if (!force_irqthreads) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
/*
@@ -854,18 +408,6 @@
} else {
wakeup_softirqd();
}
-#else /* PREEMPT_RT_FULL */
- unsigned long flags;
-
- local_irq_save(flags);
- if (__this_cpu_read(ksoftirqd) &&
- __this_cpu_read(ksoftirqd)->softirqs_raised)
- wakeup_softirqd();
- if (__this_cpu_read(ktimer_softirqd) &&
- __this_cpu_read(ktimer_softirqd)->softirqs_raised)
- wakeup_timer_softirqd();
- local_irq_restore(flags);
-#endif
}
static inline void tick_irq_exit(void)
@@ -881,10 +423,7 @@
#endif
}
-/*
- * Exit an interrupt context. Process softirqs if needed and possible:
- */
-void irq_exit(void)
+static inline void __irq_exit_rcu(void)
{
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
local_irq_disable();
@@ -897,8 +436,51 @@
invoke_softirq();
tick_irq_exit();
+}
+
+/**
+ * irq_exit_rcu() - Exit an interrupt context without updating RCU
+ *
+ * Also processes softirqs if needed and possible.
+ */
+void irq_exit_rcu(void)
+{
+ __irq_exit_rcu();
+ /* must be last! */
+ lockdep_hardirq_exit();
+}
+
+/**
+ * irq_exit - Exit an interrupt context, update RCU and lockdep
+ *
+ * Also processes softirqs if needed and possible.
+ */
+void irq_exit(void)
+{
+ __irq_exit_rcu();
rcu_irq_exit();
- trace_hardirq_exit(); /* must be last! */
+ /* must be last! */
+ lockdep_hardirq_exit();
+}
+
+/*
+ * This function must run with irqs disabled!
+ */
+inline void raise_softirq_irqoff(unsigned int nr)
+{
+ __raise_softirq_irqoff(nr);
+
+ /*
+ * If we're in an interrupt or softirq, we're done
+ * (this also catches softirq-disabled code). We will
+ * actually run the softirq once we return from
+ * the irq or softirq.
+ *
+ * Otherwise we wake up ksoftirqd to make sure we
+ * schedule the softirq soon.
+ */
+ if (!in_interrupt())
+ wakeup_softirqd();
}
void raise_softirq(unsigned int nr)
@@ -908,6 +490,13 @@
local_irq_save(flags);
raise_softirq_irqoff(nr);
local_irq_restore(flags);
+}
+
+void __raise_softirq_irqoff(unsigned int nr)
+{
+ lockdep_assert_irqs_disabled();
+ trace_softirq_raise(nr);
+ or_softirq_pending(1UL << nr);
}
void open_softirq(int nr, void (*action)(struct softirq_action *))
@@ -934,44 +523,11 @@
unsigned long flags;
local_irq_save(flags);
- if (!tasklet_trylock(t)) {
- local_irq_restore(flags);
- return;
- }
-
head = this_cpu_ptr(headp);
-again:
- /* We may have been preempted before tasklet_trylock
- * and __tasklet_action may have already run.
- * So double check the sched bit while the takslet
- * is locked before adding it to the list.
- */
- if (test_bit(TASKLET_STATE_SCHED, &t->state)) {
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
- if (test_and_set_bit(TASKLET_STATE_CHAINED, &t->state)) {
- tasklet_unlock(t);
- return;
- }
-#endif
- t->next = NULL;
- *head->tail = t;
- head->tail = &(t->next);
- raise_softirq_irqoff(softirq_nr);
- tasklet_unlock(t);
- } else {
- /* This is subtle. If we hit the corner case above
- * It is possible that we get preempted right here,
- * and another task has successfully called
- * tasklet_schedule(), then this function, and
- * failed on the trylock. Thus we must be sure
- * before releasing the tasklet lock, that the
- * SCHED_BIT is clear. Otherwise the tasklet
- * may get its SCHED_BIT set, but not added to the
- * list
- */
- if (!tasklet_tryunlock(t))
- goto again;
- }
+ t->next = NULL;
+ *head->tail = t;
+ head->tail = &(t->next);
+ raise_softirq_irqoff(softirq_nr);
local_irq_restore(flags);
}
@@ -989,21 +545,11 @@
}
EXPORT_SYMBOL(__tasklet_hi_schedule);
-void tasklet_enable(struct tasklet_struct *t)
-{
- if (!atomic_dec_and_test(&t->count))
- return;
- if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state))
- tasklet_schedule(t);
-}
-EXPORT_SYMBOL(tasklet_enable);
-
static void tasklet_action_common(struct softirq_action *a,
struct tasklet_head *tl_head,
unsigned int softirq_nr)
{
struct tasklet_struct *list;
- int loops = 1000000;
local_irq_disable();
list = tl_head->head;
@@ -1015,60 +561,33 @@
struct tasklet_struct *t = list;
list = list->next;
- /*
- * Should always succeed - after a tasklist got on the
- * list (after getting the SCHED bit set from 0 to 1),
- * nothing but the tasklet softirq it got queued to can
- * lock it:
- */
- if (!tasklet_trylock(t)) {
- WARN_ON(1);
- continue;
- }
- t->next = NULL;
-
- if (unlikely(atomic_read(&t->count))) {
-out_disabled:
- /* implicit unlock: */
- wmb();
- t->state = TASKLET_STATEF_PENDING;
- continue;
- }
- /*
- * After this point on the tasklet might be rescheduled
- * on another CPU, but it can only be added to another
- * CPU's tasklet list if we unlock the tasklet (which we
- * dont do yet).
- */
- if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
- WARN_ON(1);
-again:
- t->func(t->data);
-
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
- while (cmpxchg(&t->state, TASKLET_STATEF_RC, 0) != TASKLET_STATEF_RC) {
-#else
- while (!tasklet_tryunlock(t)) {
-#endif
- /*
- * If it got disabled meanwhile, bail out:
- */
- if (atomic_read(&t->count))
- goto out_disabled;
- /*
- * If it got scheduled meanwhile, re-execute
- * the tasklet function:
- */
- if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
- goto again;
- if (!--loops) {
- printk("hm, tasklet state: %08lx\n", t->state);
- WARN_ON(1);
+ if (tasklet_trylock(t)) {
+ if (!atomic_read(&t->count)) {
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED,
+ &t->state))
+ BUG();
+ if (t->use_callback) {
+ trace_tasklet_entry(t->callback);
+ t->callback(t);
+ trace_tasklet_exit(t->callback);
+ } else {
+ trace_tasklet_entry(t->func);
+ t->func(t->data);
+ trace_tasklet_exit(t->func);
+ }
tasklet_unlock(t);
- break;
+ continue;
}
+ tasklet_unlock(t);
}
+
+ local_irq_disable();
+ t->next = NULL;
+ *tl_head->tail = t;
+ tl_head->tail = &t->next;
+ __raise_softirq_irqoff(softirq_nr);
+ local_irq_enable();
}
}
@@ -1082,6 +601,18 @@
tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ);
}
+void tasklet_setup(struct tasklet_struct *t,
+ void (*callback)(struct tasklet_struct *))
+{
+ t->next = NULL;
+ t->state = 0;
+ atomic_set(&t->count, 0);
+ t->callback = callback;
+ t->use_callback = true;
+ t->data = 0;
+}
+EXPORT_SYMBOL(tasklet_setup);
+
void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data)
{
@@ -1089,6 +620,7 @@
t->state = 0;
atomic_set(&t->count, 0);
t->func = func;
+ t->use_callback = false;
t->data = data;
}
EXPORT_SYMBOL(tasklet_init);
@@ -1100,64 +632,13 @@
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
do {
- msleep(1);
+ yield();
} while (test_bit(TASKLET_STATE_SCHED, &t->state));
}
tasklet_unlock_wait(t);
clear_bit(TASKLET_STATE_SCHED, &t->state);
}
EXPORT_SYMBOL(tasklet_kill);
-
-/*
- * tasklet_hrtimer
- */
-
-/*
- * The trampoline is called when the hrtimer expires. It schedules a tasklet
- * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
- * hrtimer callback, but from softirq context.
- */
-static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
-{
- struct tasklet_hrtimer *ttimer =
- container_of(timer, struct tasklet_hrtimer, timer);
-
- tasklet_hi_schedule(&ttimer->tasklet);
- return HRTIMER_NORESTART;
-}
-
-/*
- * Helper function which calls the hrtimer callback from
- * tasklet/softirq context
- */
-static void __tasklet_hrtimer_trampoline(unsigned long data)
-{
- struct tasklet_hrtimer *ttimer = (void *)data;
- enum hrtimer_restart restart;
-
- restart = ttimer->function(&ttimer->timer);
- if (restart != HRTIMER_NORESTART)
- hrtimer_restart(&ttimer->timer);
-}
-
-/**
- * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
- * @ttimer: tasklet_hrtimer which is initialized
- * @function: hrtimer callback function which gets called from softirq context
- * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
- * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
- */
-void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
- enum hrtimer_restart (*function)(struct hrtimer *),
- clockid_t which_clock, enum hrtimer_mode mode)
-{
- hrtimer_init(&ttimer->timer, which_clock, mode);
- ttimer->timer.function = __hrtimer_tasklet_trampoline;
- tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
- (unsigned long)ttimer);
- ttimer->function = function;
-}
-EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
void __init softirq_init(void)
{
@@ -1174,26 +655,25 @@
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
}
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
-void tasklet_unlock_wait(struct tasklet_struct *t)
-{
- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
- /*
- * Hack for now to avoid this busy-loop:
- */
-#ifdef CONFIG_PREEMPT_RT_FULL
- msleep(1);
-#else
- barrier();
-#endif
- }
-}
-EXPORT_SYMBOL(tasklet_unlock_wait);
-#endif
-
static int ksoftirqd_should_run(unsigned int cpu)
{
- return ksoftirqd_softirq_pending();
+ return local_softirq_pending();
+}
+
+static void run_ksoftirqd(unsigned int cpu)
+{
+ local_irq_disable();
+ if (local_softirq_pending()) {
+ /*
+ * We can safely run softirq on inline stack, as we are not deep
+ * in the task stack here.
+ */
+ __do_softirq();
+ local_irq_enable();
+ cond_resched();
+ return;
+ }
+ local_irq_enable();
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -1237,7 +717,7 @@
/* Find end, append list for that CPU. */
if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
- this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
+ __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
per_cpu(tasklet_vec, cpu).head = NULL;
per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
}
@@ -1260,31 +740,17 @@
static struct smp_hotplug_thread softirq_threads = {
.store = &ksoftirqd,
- .setup = ksoftirqd_set_sched_params,
.thread_should_run = ksoftirqd_should_run,
.thread_fn = run_ksoftirqd,
.thread_comm = "ksoftirqd/%u",
};
-
-#ifdef CONFIG_PREEMPT_RT_FULL
-static struct smp_hotplug_thread softirq_timer_threads = {
- .store = &ktimer_softirqd,
- .setup = ktimer_softirqd_set_sched_params,
- .cleanup = ktimer_softirqd_clr_sched_params,
- .thread_should_run = ktimer_softirqd_should_run,
- .thread_fn = run_ksoftirqd,
- .thread_comm = "ktimersoftd/%u",
-};
-#endif
static __init int spawn_ksoftirqd(void)
{
cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
takeover_tasklets);
BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
-#ifdef CONFIG_PREEMPT_RT_FULL
- BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads));
-#endif
+
return 0;
}
early_initcall(spawn_ksoftirqd);
--
Gitblit v1.6.2