hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/include/linux/mutex.h
....@@ -19,19 +19,9 @@
1919 #include <asm/processor.h>
2020 #include <linux/osq_lock.h>
2121 #include <linux/debug_locks.h>
22
+#include <linux/android_vendor.h>
2223
2324 struct ww_acquire_ctx;
24
-
25
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
26
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
27
- , .dep_map = { .name = #lockname }
28
-#else
29
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
30
-#endif
31
-
32
-#ifdef CONFIG_PREEMPT_RT_FULL
33
-# include <linux/mutex_rt.h>
34
-#else
3525
3626 /*
3727 * Simple, straightforward mutexes with strict semantics:
....@@ -74,17 +64,19 @@
7464 #ifdef CONFIG_DEBUG_LOCK_ALLOC
7565 struct lockdep_map dep_map;
7666 #endif
67
+ ANDROID_OEM_DATA_ARRAY(1, 2);
7768 };
7869
79
-/*
80
- * Internal helper function; C doesn't allow us to hide it :/
81
- *
82
- * DO NOT USE (outside of mutex code).
83
- */
84
-static inline struct task_struct *__mutex_owner(struct mutex *lock)
85
-{
86
- return (struct task_struct *)(atomic_long_read(&lock->owner) & ~0x07);
87
-}
70
+struct ww_class;
71
+struct ww_acquire_ctx;
72
+
73
+struct ww_mutex {
74
+ struct mutex base;
75
+ struct ww_acquire_ctx *ctx;
76
+#ifdef CONFIG_DEBUG_MUTEXES
77
+ struct ww_class *ww_class;
78
+#endif
79
+};
8880
8981 /*
9082 * This is the control structure for tasks blocked on mutex,
....@@ -129,6 +121,16 @@
129121 __mutex_init((mutex), #mutex, &__key); \
130122 } while (0)
131123
124
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
125
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
126
+ , .dep_map = { \
127
+ .name = #lockname, \
128
+ .wait_type_inner = LD_WAIT_SLEEP, \
129
+ }
130
+#else
131
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
132
+#endif
133
+
132134 #define __MUTEX_INITIALIZER(lockname) \
133135 { .owner = ATOMIC_LONG_INIT(0) \
134136 , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
....@@ -148,14 +150,11 @@
148150 *
149151 * Returns true if the mutex is locked, false if unlocked.
150152 */
151
-static inline bool mutex_is_locked(struct mutex *lock)
152
-{
153
- return __mutex_owner(lock) != NULL;
154
-}
153
+extern bool mutex_is_locked(struct mutex *lock);
155154
156155 /*
157156 * See kernel/locking/mutex.c for detailed documentation of these APIs.
158
- * Also see Documentation/locking/mutex-design.txt.
157
+ * Also see Documentation/locking/mutex-design.rst.
159158 */
160159 #ifdef CONFIG_DEBUG_LOCK_ALLOC
161160 extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
....@@ -224,15 +223,7 @@
224223 * - MUTEX_TRYLOCK_SUCCESS - lock acquired,
225224 * - MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
226225 */
227
-static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
228
-mutex_trylock_recursive(struct mutex *lock)
229
-{
230
- if (unlikely(__mutex_owner(lock) == current))
231
- return MUTEX_TRYLOCK_RECURSIVE;
232
-
233
- return mutex_trylock(lock);
234
-}
235
-
236
-#endif /* !PREEMPT_RT_FULL */
226
+extern /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
227
+mutex_trylock_recursive(struct mutex *lock);
237228
238229 #endif /* __LINUX_MUTEX_H */