From 071106ecf68c401173c58808b1cf5f68cc50d390 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Fri, 05 Jan 2024 08:39:27 +0000 Subject: [PATCH] change wifi driver to cypress --- kernel/kernel/irq_work.c | 135 ++++---------------------------------------- 1 files changed, 14 insertions(+), 121 deletions(-) diff --git a/kernel/kernel/irq_work.c b/kernel/kernel/irq_work.c index 820798c..e0ed16d 100644 --- a/kernel/kernel/irq_work.c +++ b/kernel/kernel/irq_work.c @@ -18,37 +18,11 @@ #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/smp.h> -#include <linux/smpboot.h> -#include <linux/interrupt.h> #include <asm/processor.h> static DEFINE_PER_CPU(struct llist_head, raised_list); static DEFINE_PER_CPU(struct llist_head, lazy_list); -static DEFINE_PER_CPU(struct task_struct *, irq_workd); - -static void wake_irq_workd(void) -{ - struct task_struct *tsk = __this_cpu_read(irq_workd); - - if (!llist_empty(this_cpu_ptr(&lazy_list)) && tsk) - wake_up_process(tsk); -} - -#ifdef CONFIG_SMP -static void irq_work_wake(struct irq_work *entry) -{ - wake_irq_workd(); -} - -static DEFINE_PER_CPU(struct irq_work, irq_work_wakeup) = - IRQ_WORK_INIT_HARD(irq_work_wake); -#endif - -static int irq_workd_should_run(unsigned int cpu) -{ - return !llist_empty(this_cpu_ptr(&lazy_list)); -} /* * Claim the entry so that no one else will poke at it. @@ -78,29 +52,15 @@ /* Enqueue on current CPU, work must already be claimed and preempt disabled */ static void __irq_work_queue_local(struct irq_work *work) { - struct llist_head *list; - bool rt_lazy_work = false; - bool lazy_work = false; - int work_flags; - - work_flags = atomic_read(&work->flags); - if (work_flags & IRQ_WORK_LAZY) - lazy_work = true; - else if (IS_ENABLED(CONFIG_PREEMPT_RT) && - !(work_flags & IRQ_WORK_HARD_IRQ)) - rt_lazy_work = true; - - if (lazy_work || rt_lazy_work) - list = this_cpu_ptr(&lazy_list); - else - list = this_cpu_ptr(&raised_list); - - if (!llist_add(&work->llnode, list)) - return; - /* If the work is "lazy", handle it from next tick if any */ - if (!lazy_work || tick_nohz_tick_stopped()) - arch_irq_work_raise(); + if (atomic_read(&work->flags) & IRQ_WORK_LAZY) { + if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && + tick_nohz_tick_stopped()) + arch_irq_work_raise(); + } else { + if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) + arch_irq_work_raise(); + } } /* Enqueue the irq work @work on the current CPU */ @@ -142,28 +102,10 @@ if (cpu != smp_processor_id()) { /* Arch remote IPI send/receive backend aren't NMI safe */ WARN_ON_ONCE(in_nmi()); - - /* - * On PREEMPT_RT the items which are not marked as - * IRQ_WORK_HARD_IRQ are added to the lazy list and a HARD work - * item is used on the remote CPU to wake the thread. - */ - if (IS_ENABLED(CONFIG_PREEMPT_RT) && - !(atomic_read(&work->flags) & IRQ_WORK_HARD_IRQ)) { - - if (!llist_add(&work->llnode, &per_cpu(lazy_list, cpu))) - goto out; - - work = &per_cpu(irq_work_wakeup, cpu); - if (!irq_work_claim(work)) - goto out; - } - __smp_call_single_queue(cpu, &work->llnode); } else { __irq_work_queue_local(work); } -out: preempt_enable(); return true; @@ -178,8 +120,9 @@ raised = this_cpu_ptr(&raised_list); lazy = this_cpu_ptr(&lazy_list); - if (llist_empty(raised) && llist_empty(lazy)) - return false; + if (llist_empty(raised) || arch_irq_work_has_interrupt()) + if (llist_empty(lazy)) + return false; /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); @@ -210,10 +153,6 @@ */ flags &= ~IRQ_WORK_PENDING; (void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); - - if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || - !arch_irq_work_has_interrupt()) - rcuwait_wake_up(&work->irqwait); } static void irq_work_run_list(struct llist_head *list) @@ -221,12 +160,7 @@ struct irq_work *work, *tmp; struct llist_node *llnode; - /* - * On PREEMPT_RT IRQ-work which is not marked as HARD will be processed - * in a per-CPU thread in preemptible context. Only the items which are - * marked as IRQ_WORK_HARD_IRQ will be processed in hardirq context. - */ - BUG_ON(!irqs_disabled() && !IS_ENABLED(CONFIG_PREEMPT_RT)); + BUG_ON(!irqs_disabled()); if (llist_empty(list)) return; @@ -243,10 +177,7 @@ void irq_work_run(void) { irq_work_run_list(this_cpu_ptr(&raised_list)); - if (!IS_ENABLED(CONFIG_PREEMPT_RT)) - irq_work_run_list(this_cpu_ptr(&lazy_list)); - else - wake_irq_workd(); + irq_work_run_list(this_cpu_ptr(&lazy_list)); } EXPORT_SYMBOL_GPL(irq_work_run); @@ -256,11 +187,7 @@ if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) irq_work_run_list(raised); - - if (!IS_ENABLED(CONFIG_PREEMPT_RT)) - irq_work_run_list(this_cpu_ptr(&lazy_list)); - else - wake_irq_workd(); + irq_work_run_list(this_cpu_ptr(&lazy_list)); } /* @@ -270,42 +197,8 @@ void irq_work_sync(struct irq_work *work) { lockdep_assert_irqs_enabled(); - might_sleep(); - - if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) || - !arch_irq_work_has_interrupt()) { - rcuwait_wait_event(&work->irqwait, !irq_work_is_busy(work), - TASK_UNINTERRUPTIBLE); - return; - } while (atomic_read(&work->flags) & IRQ_WORK_BUSY) cpu_relax(); } EXPORT_SYMBOL_GPL(irq_work_sync); - -static void run_irq_workd(unsigned int cpu) -{ - irq_work_run_list(this_cpu_ptr(&lazy_list)); -} - -static void irq_workd_setup(unsigned int cpu) -{ - sched_set_fifo_low(current); -} - -static struct smp_hotplug_thread irqwork_threads = { - .store = &irq_workd, - .setup = irq_workd_setup, - .thread_should_run = irq_workd_should_run, - .thread_fn = run_irq_workd, - .thread_comm = "irq_work/%u", -}; - -static __init int irq_work_init_threads(void) -{ - if (IS_ENABLED(CONFIG_PREEMPT_RT)) - BUG_ON(smpboot_register_percpu_thread(&irqwork_threads)); - return 0; -} -early_initcall(irq_work_init_threads); -- Gitblit v1.6.2