From 9370bb92b2d16684ee45cf24e879c93c509162da Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Thu, 19 Dec 2024 01:47:39 +0000
Subject: [PATCH] add wifi6 8852be driver

---
 kernel/kernel/irq_work.c |  150 ++++++++++++++++++-------------------------------
 1 files changed, 55 insertions(+), 95 deletions(-)

diff --git a/kernel/kernel/irq_work.c b/kernel/kernel/irq_work.c
index b6d9d35..e0ed16d 100644
--- a/kernel/kernel/irq_work.c
+++ b/kernel/kernel/irq_work.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
  *
@@ -17,7 +18,6 @@
 #include <linux/cpu.h>
 #include <linux/notifier.h>
 #include <linux/smp.h>
-#include <linux/interrupt.h>
 #include <asm/processor.h>
 
 
@@ -29,24 +29,16 @@
  */
 static bool irq_work_claim(struct irq_work *work)
 {
-	unsigned long flags, oflags, nflags;
+	int oflags;
 
+	oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->flags);
 	/*
-	 * Start with our best wish as a premise but only trust any
-	 * flag value after cmpxchg() result.
+	 * If the work is already pending, no need to raise the IPI.
+	 * The pairing atomic_fetch_andnot() in irq_work_run() makes sure
+	 * everything we did before is visible.
 	 */
-	flags = work->flags & ~IRQ_WORK_PENDING;
-	for (;;) {
-		nflags = flags | IRQ_WORK_CLAIMED;
-		oflags = cmpxchg(&work->flags, flags, nflags);
-		if (oflags == flags)
-			break;
-		if (oflags & IRQ_WORK_PENDING)
-			return false;
-		flags = oflags;
-		cpu_relax();
-	}
-
+	if (oflags & IRQ_WORK_PENDING)
+		return false;
 	return true;
 }
 
@@ -58,40 +50,29 @@
 }
 
 /* Enqueue on current CPU, work must already be claimed and preempt disabled */
-static void __irq_work_queue_local(struct irq_work *work, struct llist_head *list)
+static void __irq_work_queue_local(struct irq_work *work)
 {
-	bool empty;
-
-	empty = llist_add(&work->llnode, list);
-
-	if (empty &&
-	    (!(work->flags & IRQ_WORK_LAZY) ||
-	     tick_nohz_tick_stopped()))
-		arch_irq_work_raise();
-}
-
-static inline bool use_lazy_list(struct irq_work *work)
-{
-	return (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
-		|| (work->flags & IRQ_WORK_LAZY);
+	/* If the work is "lazy", handle it from next tick if any */
+	if (atomic_read(&work->flags) & IRQ_WORK_LAZY) {
+		if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
+		    tick_nohz_tick_stopped())
+			arch_irq_work_raise();
+	} else {
+		if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
+			arch_irq_work_raise();
+	}
 }
 
 /* Enqueue the irq work @work on the current CPU */
 bool irq_work_queue(struct irq_work *work)
 {
-	struct llist_head *list;
-
 	/* Only queue if not already pending */
 	if (!irq_work_claim(work))
 		return false;
 
 	/* Queue the entry and raise the IPI if needed. */
 	preempt_disable();
-	if (use_lazy_list(work))
-		list = this_cpu_ptr(&lazy_list);
-	else
-		list = this_cpu_ptr(&raised_list);
-	__irq_work_queue_local(work, list);
+	__irq_work_queue_local(work);
 	preempt_enable();
 
 	return true;
@@ -110,8 +91,6 @@
 	return irq_work_queue(work);
 
 #else /* CONFIG_SMP: */
-	struct llist_head *list;
-
 	/* All work should have been flushed before going offline */
 	WARN_ON_ONCE(cpu_is_offline(cpu));
 
@@ -120,25 +99,19 @@
 		return false;
 
 	preempt_disable();
-	if (use_lazy_list(work))
-		list = &per_cpu(lazy_list, cpu);
-	else
-		list = &per_cpu(raised_list, cpu);
-
 	if (cpu != smp_processor_id()) {
 		/* Arch remote IPI send/receive backend aren't NMI safe */
 		WARN_ON_ONCE(in_nmi());
-		if (llist_add(&work->llnode, list))
-			arch_send_call_function_single_ipi(cpu);
+		__smp_call_single_queue(cpu, &work->llnode);
 	} else {
-		__irq_work_queue_local(work, list);
+		__irq_work_queue_local(work);
 	}
 	preempt_enable();
 
 	return true;
 #endif /* CONFIG_SMP */
 }
-
+EXPORT_SYMBOL_GPL(irq_work_queue_on);
 
 bool irq_work_needs_cpu(void)
 {
@@ -147,8 +120,9 @@
 	raised = this_cpu_ptr(&raised_list);
 	lazy = this_cpu_ptr(&lazy_list);
 
-	if (llist_empty(raised) && llist_empty(lazy))
-		return false;
+	if (llist_empty(raised) || arch_irq_work_has_interrupt())
+		if (llist_empty(lazy))
+			return false;
 
 	/* All work should have been flushed before going offline */
 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
@@ -156,40 +130,44 @@
 	return true;
 }
 
+void irq_work_single(void *arg)
+{
+	struct irq_work *work = arg;
+	int flags;
+
+	/*
+	 * Clear the PENDING bit, after this point the @work
+	 * can be re-used.
+	 * Make it immediately visible so that other CPUs trying
+	 * to claim that work don't rely on us to handle their data
+	 * while we are in the middle of the func.
+	 */
+	flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->flags);
+
+	lockdep_irq_work_enter(work);
+	work->func(work);
+	lockdep_irq_work_exit(work);
+	/*
+	 * Clear the BUSY bit and return to the free state if
+	 * no-one else claimed it meanwhile.
+	 */
+	flags &= ~IRQ_WORK_PENDING;
+	(void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
+}
+
 static void irq_work_run_list(struct llist_head *list)
 {
 	struct irq_work *work, *tmp;
 	struct llist_node *llnode;
-	unsigned long flags;
 
-#ifndef CONFIG_PREEMPT_RT_FULL
-	/*
-	 * nort: On RT IRQ-work may run in SOFTIRQ context.
-	 */
 	BUG_ON(!irqs_disabled());
-#endif
+
 	if (llist_empty(list))
 		return;
 
 	llnode = llist_del_all(list);
-	llist_for_each_entry_safe(work, tmp, llnode, llnode) {
-		/*
-		 * Clear the PENDING bit, after this point the @work
-		 * can be re-used.
-		 * Make it immediately visible so that other CPUs trying
-		 * to claim that work don't rely on us to handle their data
-		 * while we are in the middle of the func.
-		 */
-		flags = work->flags & ~IRQ_WORK_PENDING;
-		xchg(&work->flags, flags);
-
-		work->func(work);
-		/*
-		 * Clear the BUSY bit and return to the free state if
-		 * no-one else claimed it meanwhile.
-		 */
-		(void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
-	}
+	llist_for_each_entry_safe(work, tmp, llnode, llnode)
+		irq_work_single(work);
 }
 
 /*
@@ -199,16 +177,7 @@
 void irq_work_run(void)
 {
 	irq_work_run_list(this_cpu_ptr(&raised_list));
-	if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) {
-		/*
-		 * NOTE: we raise softirq via IPI for safety,
-		 * and execute in irq_work_tick() to move the
-		 * overhead from hard to soft irq context.
-		 */
-		if (!llist_empty(this_cpu_ptr(&lazy_list)))
-			raise_softirq(TIMER_SOFTIRQ);
-	} else
-		irq_work_run_list(this_cpu_ptr(&lazy_list));
+	irq_work_run_list(this_cpu_ptr(&lazy_list));
 }
 EXPORT_SYMBOL_GPL(irq_work_run);
 
@@ -218,17 +187,8 @@
 
 	if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
 		irq_work_run_list(raised);
-
-	if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
-		irq_work_run_list(this_cpu_ptr(&lazy_list));
-}
-
-#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL)
-void irq_work_tick_soft(void)
-{
 	irq_work_run_list(this_cpu_ptr(&lazy_list));
 }
-#endif
 
 /*
  * Synchronize against the irq_work @entry, ensures the entry is not
@@ -238,7 +198,7 @@
 {
 	lockdep_assert_irqs_enabled();
 
-	while (work->flags & IRQ_WORK_BUSY)
+	while (atomic_read(&work->flags) & IRQ_WORK_BUSY)
 		cpu_relax();
 }
 EXPORT_SYMBOL_GPL(irq_work_sync);

--
Gitblit v1.6.2