From 7e970c18f85f99acc678d90128b6e01dce1bf273 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 01 Nov 2024 02:40:12 +0000
Subject: [PATCH] gmac get mac form eeprom
---
kernel/kernel/locking/lockdep.c | 213 +++++++++++++++++++++++++++++++++++++----------------
1 files changed, 148 insertions(+), 65 deletions(-)
diff --git a/kernel/kernel/locking/lockdep.c b/kernel/kernel/locking/lockdep.c
index 6cbd2b4..48f5a6b 100644
--- a/kernel/kernel/locking/lockdep.c
+++ b/kernel/kernel/locking/lockdep.c
@@ -42,6 +42,7 @@
#include <linux/stacktrace.h>
#include <linux/debug_locks.h>
#include <linux/irqflags.h>
+#include <linux/irqstage.h>
#include <linux/utsname.h>
#include <linux/hash.h>
#include <linux/ftrace.h>
@@ -104,9 +105,56 @@
static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
static struct task_struct *__owner;
+static __always_inline bool lockdep_stage_disabled(void)
+{
+ return stage_disabled();
+}
+
+#ifdef CONFIG_IRQ_PIPELINE
+/*
+ * If LOCKDEP is enabled, we want irqs to be disabled for both stages
+ * when traversing the lockdep code for hard and mutable locks (at the
+ * expense of massive latency overhead though).
+ */
+static __always_inline unsigned long lockdep_stage_test_and_disable(int *irqsoff)
+{
+ return test_and_lock_stage(irqsoff);
+}
+
+static __always_inline unsigned long lockdep_stage_disable(void)
+{
+ return lockdep_stage_test_and_disable(NULL);
+}
+
+static __always_inline void lockdep_stage_restore(unsigned long flags)
+{
+ unlock_stage(flags);
+}
+
+#else
+
+#define lockdep_stage_test_and_disable(__irqsoff) \
+ ({ \
+ unsigned long __flags; \
+ raw_local_irq_save(__flags); \
+ *(__irqsoff) = irqs_disabled_flags(__flags); \
+ __flags; \
+ })
+
+#define lockdep_stage_disable() \
+ ({ \
+ unsigned long __flags; \
+ raw_local_irq_save(__flags); \
+ __flags; \
+ })
+
+#define lockdep_stage_restore(__flags) raw_local_irq_restore(__flags)
+
+#endif /* !CONFIG_IRQ_PIPELINE */
+
static inline void lockdep_lock(void)
{
- DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+ DEBUG_LOCKS_WARN_ON(!hard_irqs_disabled());
__this_cpu_inc(lockdep_recursion);
arch_spin_lock(&__lock);
@@ -115,7 +163,7 @@
static inline void lockdep_unlock(void)
{
- DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+ DEBUG_LOCKS_WARN_ON(!hard_irqs_disabled());
if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
return;
@@ -882,7 +930,7 @@
/*
* We do an RCU walk of the hash, see lockdep_free_key_range().
*/
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+ if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
return NULL;
hlist_for_each_entry_rcu_notrace(class, hash_head, hash_entry) {
@@ -1179,7 +1227,7 @@
return;
hash_head = keyhashentry(key);
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
if (!graph_lock())
goto restore_irqs;
hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
@@ -1190,7 +1238,7 @@
out_unlock:
graph_unlock();
restore_irqs:
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
}
EXPORT_SYMBOL_GPL(lockdep_register_key);
@@ -1239,7 +1287,7 @@
struct lock_class *class;
int idx;
- DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+ DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled());
class = look_up_lock_class(lock, subclass);
if (likely(class))
@@ -2035,11 +2083,11 @@
__bfs_init_root(&this, class);
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
lockdep_lock();
ret = __lockdep_count_forward_deps(&this);
lockdep_unlock();
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
return ret;
}
@@ -2061,11 +2109,11 @@
__bfs_init_root(&this, class);
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
lockdep_lock();
ret = __lockdep_count_backward_deps(&this);
lockdep_unlock();
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
return ret;
}
@@ -4170,6 +4218,8 @@
*/
void lockdep_hardirqs_on_prepare(unsigned long ip)
{
+ unsigned long flags;
+
if (unlikely(!debug_locks))
return;
@@ -4192,38 +4242,43 @@
return;
}
+ flags = hard_cond_local_irq_save();
+
/*
* We're enabling irqs and according to our state above irqs weren't
* already enabled, yet we find the hardware thinks they are in fact
* enabled.. someone messed up their IRQ state tracing.
*/
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
- return;
+ if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
+ goto out;
/*
* See the fine text that goes along with this variable definition.
*/
if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
- return;
+ goto out;
/*
* Can't allow enabling interrupts while in an interrupt handler,
* that's general bad form and such. Recursion, limited stack etc..
*/
- if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context()))
- return;
+ if (DEBUG_LOCKS_WARN_ON(running_inband() && lockdep_hardirq_context()))
+ goto out;
current->hardirq_chain_key = current->curr_chain_key;
lockdep_recursion_inc();
__trace_hardirqs_on_caller();
lockdep_recursion_finish();
+out:
+ hard_cond_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
void noinstr lockdep_hardirqs_on(unsigned long ip)
{
struct irqtrace_events *trace = ¤t->irqtrace;
+ unsigned long flags;
if (unlikely(!debug_locks))
return;
@@ -4261,13 +4316,15 @@
return;
}
+ flags = hard_cond_local_irq_save();
+
/*
* We're enabling irqs and according to our state above irqs weren't
* already enabled, yet we find the hardware thinks they are in fact
* enabled.. someone messed up their IRQ state tracing.
*/
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
- return;
+ if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
+ goto out;
/*
* Ensure the lock stack remained unchanged between
@@ -4282,6 +4339,8 @@
trace->hardirq_enable_ip = ip;
trace->hardirq_enable_event = ++trace->irq_events;
debug_atomic_inc(hardirqs_on_events);
+out:
+ hard_cond_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
@@ -4290,6 +4349,8 @@
*/
void noinstr lockdep_hardirqs_off(unsigned long ip)
{
+ unsigned long flags;
+
if (unlikely(!debug_locks))
return;
@@ -4304,12 +4365,14 @@
} else if (__this_cpu_read(lockdep_recursion))
return;
+ flags = hard_cond_local_irq_save();
+
/*
* So we're supposed to get called after you mask local IRQs, but for
* some reason the hardware doesn't quite think you did a proper job.
*/
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
- return;
+ if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
+ goto out;
if (lockdep_hardirqs_enabled()) {
struct irqtrace_events *trace = ¤t->irqtrace;
@@ -4324,6 +4387,8 @@
} else {
debug_atomic_inc(redundant_hardirqs_off);
}
+out:
+ hard_cond_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
@@ -4333,20 +4398,23 @@
void lockdep_softirqs_on(unsigned long ip)
{
struct irqtrace_events *trace = ¤t->irqtrace;
+ unsigned long flags;
if (unlikely(!lockdep_enabled()))
return;
+
+ flags = hard_cond_local_irq_save();
/*
* We fancy IRQs being disabled here, see softirq.c, avoids
* funny state and nesting things.
*/
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
- return;
+ if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
+ goto out;
if (current->softirqs_enabled) {
debug_atomic_inc(redundant_softirqs_on);
- return;
+ goto out;
}
lockdep_recursion_inc();
@@ -4365,6 +4433,8 @@
if (lockdep_hardirqs_enabled())
mark_held_locks(current, LOCK_ENABLED_SOFTIRQ);
lockdep_recursion_finish();
+out:
+ hard_cond_local_irq_restore(flags);
}
/*
@@ -4372,14 +4442,18 @@
*/
void lockdep_softirqs_off(unsigned long ip)
{
+ unsigned long flags;
+
if (unlikely(!lockdep_enabled()))
return;
+
+ flags = hard_cond_local_irq_save();
/*
* We fancy IRQs being disabled here, see softirq.c
*/
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
- return;
+ if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
+ goto out;
if (current->softirqs_enabled) {
struct irqtrace_events *trace = ¤t->irqtrace;
@@ -4397,6 +4471,8 @@
DEBUG_LOCKS_WARN_ON(!softirq_count());
} else
debug_atomic_inc(redundant_softirqs_off);
+out:
+ hard_cond_local_irq_restore(flags);
}
static int
@@ -4751,11 +4827,11 @@
if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled()))
return;
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
lockdep_recursion_inc();
register_lock_class(lock, subclass, 1);
lockdep_recursion_finish();
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
}
}
EXPORT_SYMBOL_GPL(lockdep_init_map_type);
@@ -5085,7 +5161,7 @@
struct held_lock *hlock;
int first_idx = idx;
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+ if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
return 0;
for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
@@ -5397,7 +5473,13 @@
static noinstr void check_flags(unsigned long flags)
{
#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
- if (!debug_locks)
+ /*
+ * irq_pipeline: we can't and don't want to check the
+ * consistency of the irq tracer when running over the
+ * pipeline entry or oob stage contexts, since the inband
+ * stall bit does not reflect the current irq state there.
+ */
+ if (on_pipeline_entry() || running_oob() || !debug_locks)
return;
/* Get the warning out.. */
@@ -5444,13 +5526,13 @@
if (unlikely(!lockdep_enabled()))
return;
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
lockdep_recursion_inc();
check_flags(flags);
if (__lock_set_class(lock, name, key, subclass, ip))
check_chain_key(current);
lockdep_recursion_finish();
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_set_class);
@@ -5461,13 +5543,13 @@
if (unlikely(!lockdep_enabled()))
return;
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
lockdep_recursion_inc();
check_flags(flags);
if (__lock_downgrade(lock, ip))
check_chain_key(current);
lockdep_recursion_finish();
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_downgrade);
@@ -5532,6 +5614,7 @@
struct lockdep_map *nest_lock, unsigned long ip)
{
unsigned long flags;
+ int irqsoff;
trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
@@ -5558,14 +5641,14 @@
return;
}
- raw_local_irq_save(flags);
+ flags = lockdep_stage_test_and_disable(&irqsoff);
check_flags(flags);
lockdep_recursion_inc();
__lock_acquire(lock, subclass, trylock, read, check,
- irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
+ irqsoff, nest_lock, ip, 0, 0);
lockdep_recursion_finish();
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_acquire);
@@ -5578,14 +5661,14 @@
if (unlikely(!lockdep_enabled()))
return;
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
check_flags(flags);
lockdep_recursion_inc();
if (__lock_release(lock, ip))
check_chain_key(current);
lockdep_recursion_finish();
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_release);
@@ -5597,13 +5680,13 @@
if (unlikely(!lockdep_enabled()))
return 1; /* avoid false negative lockdep_assert_held() */
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
check_flags(flags);
lockdep_recursion_inc();
ret = __lock_is_held(lock, read);
lockdep_recursion_finish();
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
return ret;
}
@@ -5618,13 +5701,13 @@
if (unlikely(!lockdep_enabled()))
return cookie;
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
check_flags(flags);
lockdep_recursion_inc();
cookie = __lock_pin_lock(lock);
lockdep_recursion_finish();
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
return cookie;
}
@@ -5637,13 +5720,13 @@
if (unlikely(!lockdep_enabled()))
return;
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
check_flags(flags);
lockdep_recursion_inc();
__lock_repin_lock(lock, cookie);
lockdep_recursion_finish();
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_repin_lock);
@@ -5654,13 +5737,13 @@
if (unlikely(!lockdep_enabled()))
return;
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
check_flags(flags);
lockdep_recursion_inc();
__lock_unpin_lock(lock, cookie);
lockdep_recursion_finish();
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_unpin_lock);
@@ -5790,12 +5873,12 @@
if (unlikely(!lock_stat || !lockdep_enabled()))
return;
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
check_flags(flags);
lockdep_recursion_inc();
__lock_contended(lock, ip);
lockdep_recursion_finish();
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_contended);
@@ -5808,12 +5891,12 @@
if (unlikely(!lock_stat || !lockdep_enabled()))
return;
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
check_flags(flags);
lockdep_recursion_inc();
__lock_acquired(lock, ip);
lockdep_recursion_finish();
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
}
EXPORT_SYMBOL_GPL(lock_acquired);
#endif
@@ -5828,7 +5911,7 @@
unsigned long flags;
int i;
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
lockdep_init_task(current);
memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
nr_hardirq_chains = 0;
@@ -5837,7 +5920,7 @@
debug_locks = 1;
for (i = 0; i < CHAINHASH_SIZE; i++)
INIT_HLIST_HEAD(chainhash_table + i);
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
}
/* Remove a class from a lock chain. Must be called with the graph lock held. */
@@ -6014,7 +6097,7 @@
if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
return;
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
lockdep_lock();
/* closed head */
@@ -6028,7 +6111,7 @@
call_rcu_zapped(delayed_free.pf + delayed_free.index);
lockdep_unlock();
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
}
/*
@@ -6071,13 +6154,13 @@
init_data_structures_once();
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
lockdep_lock();
pf = get_pending_free();
__lockdep_free_key_range(pf, start, size);
call_rcu_zapped(pf);
lockdep_unlock();
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
/*
* Wait for any possible iterators from look_up_lock_class() to pass
@@ -6097,12 +6180,12 @@
init_data_structures_once();
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
lockdep_lock();
__lockdep_free_key_range(pf, start, size);
__free_zapped_classes(pf);
lockdep_unlock();
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
}
void lockdep_free_key_range(void *start, unsigned long size)
@@ -6173,7 +6256,7 @@
unsigned long flags;
int locked;
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
locked = graph_lock();
if (!locked)
goto out_irq;
@@ -6184,7 +6267,7 @@
graph_unlock();
out_irq:
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
}
/*
@@ -6196,12 +6279,12 @@
struct pending_free *pf = delayed_free.pf;
unsigned long flags;
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
lockdep_lock();
__lockdep_reset_lock(pf, lock);
__free_zapped_classes(pf);
lockdep_unlock();
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
}
void lockdep_reset_lock(struct lockdep_map *lock)
@@ -6234,7 +6317,7 @@
if (WARN_ON_ONCE(static_obj(key)))
return;
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
lockdep_lock();
hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
@@ -6251,7 +6334,7 @@
call_rcu_zapped(pf);
}
lockdep_unlock();
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
synchronize_rcu();
@@ -6342,7 +6425,7 @@
if (unlikely(!debug_locks))
return;
- raw_local_irq_save(flags);
+ flags = lockdep_stage_disable();
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
@@ -6353,7 +6436,7 @@
print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
break;
}
- raw_local_irq_restore(flags);
+ lockdep_stage_restore(flags);
}
EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
--
Gitblit v1.6.2