From 093a6c67005148ae32a5c9e4553491b9f5c2457b Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 10 May 2024 07:40:51 +0000
Subject: [PATCH] disable kernel build waring
---
kernel/include/linux/percpu-refcount.h | 119 +++++++++++++++++++++++++++++++++++------------------------
1 files changed, 70 insertions(+), 49 deletions(-)
diff --git a/kernel/include/linux/percpu-refcount.h b/kernel/include/linux/percpu-refcount.h
index 009cdf3..16c35a7 100644
--- a/kernel/include/linux/percpu-refcount.h
+++ b/kernel/include/linux/percpu-refcount.h
@@ -75,27 +75,47 @@
* operation using percpu_ref_switch_to_percpu(). If initialized
* with this flag, the ref will stay in atomic mode until
* percpu_ref_switch_to_percpu() is invoked on it.
+ * Implies ALLOW_REINIT.
*/
PERCPU_REF_INIT_ATOMIC = 1 << 0,
/*
* Start dead w/ ref == 0 in atomic mode. Must be revived with
- * percpu_ref_reinit() before used. Implies INIT_ATOMIC.
+ * percpu_ref_reinit() before used. Implies INIT_ATOMIC and
+ * ALLOW_REINIT.
*/
PERCPU_REF_INIT_DEAD = 1 << 1,
+
+ /*
+ * Allow switching from atomic mode to percpu mode.
+ */
+ PERCPU_REF_ALLOW_REINIT = 1 << 2,
+};
+
+struct percpu_ref_data {
+ atomic_long_t count;
+ percpu_ref_func_t *release;
+ percpu_ref_func_t *confirm_switch;
+ bool force_atomic:1;
+ bool allow_reinit:1;
+ struct rcu_head rcu;
+ struct percpu_ref *ref;
};
struct percpu_ref {
- atomic_long_t count;
/*
* The low bit of the pointer indicates whether the ref is in percpu
* mode; if set, then get/put will manipulate the atomic_t.
*/
unsigned long percpu_count_ptr;
- percpu_ref_func_t *release;
- percpu_ref_func_t *confirm_switch;
- bool force_atomic:1;
- struct rcu_head rcu;
+
+ /*
+ * 'percpu_ref' is often embedded into user structure, and only
+ * 'percpu_count_ptr' is required in fast path, move other fields
+ * into 'percpu_ref_data', so we can reduce memory footprint in
+ * fast path.
+ */
+ struct percpu_ref_data *data;
};
int __must_check percpu_ref_init(struct percpu_ref *ref,
@@ -108,7 +128,9 @@
void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill);
+void percpu_ref_resurrect(struct percpu_ref *ref);
void percpu_ref_reinit(struct percpu_ref *ref);
+bool percpu_ref_is_zero(struct percpu_ref *ref);
/**
* percpu_ref_kill - drop the initial ref
@@ -146,7 +168,7 @@
* between contaminating the pointer value, meaning that
* READ_ONCE() is required when fetching it.
*
- * The smp_read_barrier_depends() implied by READ_ONCE() pairs
+ * The dependency ordering from the READ_ONCE() pairs
* with smp_store_release() in __percpu_ref_switch_to_percpu().
*/
percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
@@ -177,14 +199,14 @@
{
unsigned long __percpu *percpu_count;
- rcu_read_lock_sched();
+ rcu_read_lock();
if (__ref_is_percpu(ref, &percpu_count))
this_cpu_add(*percpu_count, nr);
else
- atomic_long_add(nr, &ref->count);
+ atomic_long_add(nr, &ref->data->count);
- rcu_read_unlock_sched();
+ rcu_read_unlock();
}
/**
@@ -201,6 +223,36 @@
}
/**
+ * percpu_ref_tryget_many - try to increment a percpu refcount
+ * @ref: percpu_ref to try-get
+ * @nr: number of references to get
+ *
+ * Increment a percpu refcount by @nr unless its count already reached zero.
+ * Returns %true on success; %false on failure.
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline bool percpu_ref_tryget_many(struct percpu_ref *ref,
+ unsigned long nr)
+{
+ unsigned long __percpu *percpu_count;
+ bool ret;
+
+ rcu_read_lock();
+
+ if (__ref_is_percpu(ref, &percpu_count)) {
+ this_cpu_add(*percpu_count, nr);
+ ret = true;
+ } else {
+ ret = atomic_long_add_unless(&ref->data->count, nr, 0);
+ }
+
+ rcu_read_unlock();
+
+ return ret;
+}
+
+/**
* percpu_ref_tryget - try to increment a percpu refcount
* @ref: percpu_ref to try-get
*
@@ -211,21 +263,7 @@
*/
static inline bool percpu_ref_tryget(struct percpu_ref *ref)
{
- unsigned long __percpu *percpu_count;
- bool ret;
-
- rcu_read_lock_sched();
-
- if (__ref_is_percpu(ref, &percpu_count)) {
- this_cpu_inc(*percpu_count);
- ret = true;
- } else {
- ret = atomic_long_inc_not_zero(&ref->count);
- }
-
- rcu_read_unlock_sched();
-
- return ret;
+ return percpu_ref_tryget_many(ref, 1);
}
/**
@@ -248,16 +286,16 @@
unsigned long __percpu *percpu_count;
bool ret = false;
- rcu_read_lock_sched();
+ rcu_read_lock();
if (__ref_is_percpu(ref, &percpu_count)) {
this_cpu_inc(*percpu_count);
ret = true;
} else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
- ret = atomic_long_inc_not_zero(&ref->count);
+ ret = atomic_long_inc_not_zero(&ref->data->count);
}
- rcu_read_unlock_sched();
+ rcu_read_unlock();
return ret;
}
@@ -276,14 +314,14 @@
{
unsigned long __percpu *percpu_count;
- rcu_read_lock_sched();
+ rcu_read_lock();
if (__ref_is_percpu(ref, &percpu_count))
this_cpu_sub(*percpu_count, nr);
- else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
- ref->release(ref);
+ else if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count)))
+ ref->data->release(ref);
- rcu_read_unlock_sched();
+ rcu_read_unlock();
}
/**
@@ -312,23 +350,6 @@
static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
{
return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
-}
-
-/**
- * percpu_ref_is_zero - test whether a percpu refcount reached zero
- * @ref: percpu_ref to test
- *
- * Returns %true if @ref reached zero.
- *
- * This function is safe to call as long as @ref is between init and exit.
- */
-static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
-{
- unsigned long __percpu *percpu_count;
-
- if (__ref_is_percpu(ref, &percpu_count))
- return false;
- return !atomic_long_read(&ref->count);
}
#endif
--
Gitblit v1.6.2