From 37f49e37ab4cb5d0bc4c60eb5c6d4dd57db767bb Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Fri, 10 May 2024 07:44:59 +0000 Subject: [PATCH] gmac get mac form eeprom --- kernel/lib/percpu-refcount.c | 177 ++++++++++++++++++++++++++++++++++++++++++++++------------ 1 files changed, 139 insertions(+), 38 deletions(-) diff --git a/kernel/lib/percpu-refcount.c b/kernel/lib/percpu-refcount.c index 9f96fa7..493093b 100644 --- a/kernel/lib/percpu-refcount.c +++ b/kernel/lib/percpu-refcount.c @@ -1,8 +1,10 @@ -#define pr_fmt(fmt) "%s: " fmt "\n", __func__ +// SPDX-License-Identifier: GPL-2.0-only +#define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/wait.h> +#include <linux/slab.h> #include <linux/percpu-refcount.h> /* @@ -49,9 +51,10 @@ * @flags: PERCPU_REF_INIT_* flags * @gfp: allocation mask to use * - * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a - * refcount of 1; analagous to atomic_long_set(ref, 1). See the - * definitions of PERCPU_REF_INIT_* flags for flag behaviors. + * Initializes @ref. @ref starts out in percpu mode with a refcount of 1 unless + * @flags contains PERCPU_REF_INIT_ATOMIC or PERCPU_REF_INIT_DEAD. These flags + * change the start state to atomic with the latter setting the initial refcount + * to 0. See the definitions of PERCPU_REF_INIT_* flags for flag behaviors. * * Note that @release must not sleep - it may potentially be called from RCU * callback context by percpu_ref_kill(). @@ -62,31 +65,56 @@ size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS, __alignof__(unsigned long)); unsigned long start_count = 0; + struct percpu_ref_data *data; ref->percpu_count_ptr = (unsigned long) __alloc_percpu_gfp(sizeof(unsigned long), align, gfp); if (!ref->percpu_count_ptr) return -ENOMEM; - ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC; + data = kzalloc(sizeof(*ref->data), gfp); + if (!data) { + free_percpu((void __percpu *)ref->percpu_count_ptr); + ref->percpu_count_ptr = 0; + return -ENOMEM; + } - if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) + data->force_atomic = flags & PERCPU_REF_INIT_ATOMIC; + data->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT; + + if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) { ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; - else + data->allow_reinit = true; + } else { start_count += PERCPU_COUNT_BIAS; + } if (flags & PERCPU_REF_INIT_DEAD) ref->percpu_count_ptr |= __PERCPU_REF_DEAD; else start_count++; - atomic_long_set(&ref->count, start_count); + atomic_long_set(&data->count, start_count); - ref->release = release; - ref->confirm_switch = NULL; + data->release = release; + data->confirm_switch = NULL; + data->ref = ref; + ref->data = data; return 0; } EXPORT_SYMBOL_GPL(percpu_ref_init); + +static void __percpu_ref_exit(struct percpu_ref *ref) +{ + unsigned long __percpu *percpu_count = percpu_count_ptr(ref); + + if (percpu_count) { + /* non-NULL confirm_switch indicates switching in progress */ + WARN_ON_ONCE(ref->data && ref->data->confirm_switch); + free_percpu(percpu_count); + ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD; + } +} /** * percpu_ref_exit - undo percpu_ref_init() @@ -100,24 +128,36 @@ */ void percpu_ref_exit(struct percpu_ref *ref) { - unsigned long __percpu *percpu_count = percpu_count_ptr(ref); + struct percpu_ref_data *data = ref->data; + unsigned long flags; - if (percpu_count) { - /* non-NULL confirm_switch indicates switching in progress */ - WARN_ON_ONCE(ref->confirm_switch); - free_percpu(percpu_count); - ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD; - } + __percpu_ref_exit(ref); + + if (!data) + return; + + spin_lock_irqsave(&percpu_ref_switch_lock, flags); + ref->percpu_count_ptr |= atomic_long_read(&ref->data->count) << + __PERCPU_REF_FLAG_BITS; + ref->data = NULL; + spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); + + kfree(data); } EXPORT_SYMBOL_GPL(percpu_ref_exit); static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu) { - struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); + struct percpu_ref_data *data = container_of(rcu, + struct percpu_ref_data, rcu); + struct percpu_ref *ref = data->ref; - ref->confirm_switch(ref); - ref->confirm_switch = NULL; + data->confirm_switch(ref); + data->confirm_switch = NULL; wake_up_all(&percpu_ref_switch_waitq); + + if (!data->allow_reinit) + __percpu_ref_exit(ref); /* drop ref from percpu_ref_switch_to_atomic() */ percpu_ref_put(ref); @@ -125,7 +165,9 @@ static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu) { - struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); + struct percpu_ref_data *data = container_of(rcu, + struct percpu_ref_data, rcu); + struct percpu_ref *ref = data->ref; unsigned long __percpu *percpu_count = percpu_count_ptr(ref); unsigned long count = 0; int cpu; @@ -133,8 +175,8 @@ for_each_possible_cpu(cpu) count += *per_cpu_ptr(percpu_count, cpu); - pr_debug("global %ld percpu %ld", - atomic_long_read(&ref->count), (long)count); + pr_debug("global %lu percpu %lu\n", + atomic_long_read(&data->count), count); /* * It's crucial that we sum the percpu counters _before_ adding the sum @@ -148,11 +190,11 @@ * reaching 0 before we add the percpu counts. But doing it at the same * time is equivalent and saves us atomic operations: */ - atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count); + atomic_long_add((long)count - PERCPU_COUNT_BIAS, &data->count); - WARN_ONCE(atomic_long_read(&ref->count) <= 0, - "percpu ref (%pf) <= 0 (%ld) after switching to atomic", - ref->release, atomic_long_read(&ref->count)); + WARN_ONCE(atomic_long_read(&data->count) <= 0, + "percpu ref (%ps) <= 0 (%ld) after switching to atomic", + data->release, atomic_long_read(&data->count)); /* @ref is viewed as dead on all CPUs, send out switch confirmation */ percpu_ref_call_confirm_rcu(rcu); @@ -178,10 +220,11 @@ * Non-NULL ->confirm_switch is used to indicate that switching is * in progress. Use noop one if unspecified. */ - ref->confirm_switch = confirm_switch ?: percpu_ref_noop_confirm_switch; + ref->data->confirm_switch = confirm_switch ?: + percpu_ref_noop_confirm_switch; percpu_ref_get(ref); /* put after confirmation */ - call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu); + call_rcu(&ref->data->rcu, percpu_ref_switch_to_atomic_rcu); } static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) @@ -194,7 +237,10 @@ if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) return; - atomic_long_add(PERCPU_COUNT_BIAS, &ref->count); + if (WARN_ON_ONCE(!ref->data->allow_reinit)) + return; + + atomic_long_add(PERCPU_COUNT_BIAS, &ref->data->count); /* * Restore per-cpu operation. smp_store_release() is paired @@ -212,6 +258,8 @@ static void __percpu_ref_switch_mode(struct percpu_ref *ref, percpu_ref_func_t *confirm_switch) { + struct percpu_ref_data *data = ref->data; + lockdep_assert_held(&percpu_ref_switch_lock); /* @@ -219,10 +267,10 @@ * its completion. If the caller ensures that ATOMIC switching * isn't in progress, this function can be called from any context. */ - wait_event_lock_irq(percpu_ref_switch_waitq, !ref->confirm_switch, + wait_event_lock_irq(percpu_ref_switch_waitq, !data->confirm_switch, percpu_ref_switch_lock); - if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD)) + if (data->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD)) __percpu_ref_switch_to_atomic(ref, confirm_switch); else __percpu_ref_switch_to_percpu(ref); @@ -255,7 +303,7 @@ spin_lock_irqsave(&percpu_ref_switch_lock, flags); - ref->force_atomic = true; + ref->data->force_atomic = true; __percpu_ref_switch_mode(ref, confirm_switch); spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); @@ -273,7 +321,7 @@ void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref) { percpu_ref_switch_to_atomic(ref, NULL); - wait_event(percpu_ref_switch_waitq, !ref->confirm_switch); + wait_event(percpu_ref_switch_waitq, !ref->data->confirm_switch); } EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync); @@ -301,7 +349,7 @@ spin_lock_irqsave(&percpu_ref_switch_lock, flags); - ref->force_atomic = false; + ref->data->force_atomic = false; __percpu_ref_switch_mode(ref, NULL); spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); @@ -333,7 +381,8 @@ spin_lock_irqsave(&percpu_ref_switch_lock, flags); WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD, - "%s called more than once on %pf!", __func__, ref->release); + "%s called more than once on %ps!", __func__, + ref->data->release); ref->percpu_count_ptr |= __PERCPU_REF_DEAD; __percpu_ref_switch_mode(ref, confirm_kill); @@ -342,6 +391,34 @@ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); } EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); + +/** + * percpu_ref_is_zero - test whether a percpu refcount reached zero + * @ref: percpu_ref to test + * + * Returns %true if @ref reached zero. + * + * This function is safe to call as long as @ref is between init and exit. + */ +bool percpu_ref_is_zero(struct percpu_ref *ref) +{ + unsigned long __percpu *percpu_count; + unsigned long count, flags; + + if (__ref_is_percpu(ref, &percpu_count)) + return false; + + /* protect us from being destroyed */ + spin_lock_irqsave(&percpu_ref_switch_lock, flags); + if (ref->data) + count = atomic_long_read(&ref->data->count); + else + count = ref->percpu_count_ptr >> __PERCPU_REF_FLAG_BITS; + spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); + + return count == 0; +} +EXPORT_SYMBOL_GPL(percpu_ref_is_zero); /** * percpu_ref_reinit - re-initialize a percpu refcount @@ -356,11 +433,35 @@ */ void percpu_ref_reinit(struct percpu_ref *ref) { + WARN_ON_ONCE(!percpu_ref_is_zero(ref)); + + percpu_ref_resurrect(ref); +} +EXPORT_SYMBOL_GPL(percpu_ref_reinit); + +/** + * percpu_ref_resurrect - modify a percpu refcount from dead to live + * @ref: perpcu_ref to resurrect + * + * Modify @ref so that it's in the same state as before percpu_ref_kill() was + * called. @ref must be dead but must not yet have exited. + * + * If @ref->release() frees @ref then the caller is responsible for + * guaranteeing that @ref->release() does not get called while this + * function is in progress. + * + * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while + * this function is in progress. + */ +void percpu_ref_resurrect(struct percpu_ref *ref) +{ + unsigned long __percpu *percpu_count; unsigned long flags; spin_lock_irqsave(&percpu_ref_switch_lock, flags); - WARN_ON_ONCE(!percpu_ref_is_zero(ref)); + WARN_ON_ONCE(!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)); + WARN_ON_ONCE(__ref_is_percpu(ref, &percpu_count)); ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; percpu_ref_get(ref); @@ -368,4 +469,4 @@ spin_unlock_irqrestore(&percpu_ref_switch_lock, flags); } -EXPORT_SYMBOL_GPL(percpu_ref_reinit); +EXPORT_SYMBOL_GPL(percpu_ref_resurrect); -- Gitblit v1.6.2