.. | .. |
---|
19 | 19 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
---|
20 | 20 | #include <linux/osq_lock.h> |
---|
21 | 21 | #endif |
---|
| 22 | +#include <linux/android_vendor.h> |
---|
22 | 23 | |
---|
23 | | -struct rw_semaphore; |
---|
24 | | - |
---|
25 | | -#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK |
---|
26 | | -#include <linux/rwsem-spinlock.h> /* use a generic implementation */ |
---|
27 | | -#define __RWSEM_INIT_COUNT(name) .count = RWSEM_UNLOCKED_VALUE |
---|
28 | | -#else |
---|
29 | | -/* All arch specific implementations share the same struct */ |
---|
| 24 | +/* |
---|
| 25 | + * For an uncontended rwsem, count and owner are the only fields a task |
---|
| 26 | + * needs to touch when acquiring the rwsem. So they are put next to each |
---|
| 27 | + * other to increase the chance that they will share the same cacheline. |
---|
| 28 | + * |
---|
| 29 | + * In a contended rwsem, the owner is likely the most frequently accessed |
---|
| 30 | + * field in the structure as the optimistic waiter that holds the osq lock |
---|
| 31 | + * will spin on owner. For an embedded rwsem, other hot fields in the |
---|
| 32 | + * containing structure should be moved further away from the rwsem to |
---|
| 33 | + * reduce the chance that they will share the same cacheline causing |
---|
| 34 | + * cacheline bouncing problem. |
---|
| 35 | + */ |
---|
30 | 36 | struct rw_semaphore { |
---|
31 | 37 | atomic_long_t count; |
---|
32 | | - struct list_head wait_list; |
---|
33 | | - raw_spinlock_t wait_lock; |
---|
| 38 | + /* |
---|
| 39 | + * Write owner or one of the read owners as well flags regarding |
---|
| 40 | + * the current state of the rwsem. Can be used as a speculative |
---|
| 41 | + * check to see if the write owner is running on the cpu. |
---|
| 42 | + */ |
---|
| 43 | + atomic_long_t owner; |
---|
34 | 44 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
---|
35 | 45 | struct optimistic_spin_queue osq; /* spinner MCS lock */ |
---|
36 | | - /* |
---|
37 | | - * Write owner. Used as a speculative check to see |
---|
38 | | - * if the owner is running on the cpu. |
---|
39 | | - */ |
---|
40 | | - struct task_struct *owner; |
---|
| 46 | +#endif |
---|
| 47 | + raw_spinlock_t wait_lock; |
---|
| 48 | + struct list_head wait_list; |
---|
| 49 | +#ifdef CONFIG_DEBUG_RWSEMS |
---|
| 50 | + void *magic; |
---|
41 | 51 | #endif |
---|
42 | 52 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
---|
43 | 53 | struct lockdep_map dep_map; |
---|
44 | 54 | #endif |
---|
45 | | - /* NOTICE: m_count is a vendor variable used for the config |
---|
46 | | - * CONFIG_RWSEM_PRIO_AWARE. This is included here to maintain ABI |
---|
47 | | - * compatibility with our vendors */ |
---|
48 | | - /* count for waiters preempt to queue in wait list */ |
---|
49 | | - long m_count; |
---|
| 55 | + ANDROID_VENDOR_DATA(1); |
---|
| 56 | + ANDROID_OEM_DATA_ARRAY(1, 2); |
---|
50 | 57 | }; |
---|
51 | 58 | |
---|
52 | | -/* |
---|
53 | | - * Setting bit 0 of the owner field with other non-zero bits will indicate |
---|
54 | | - * that the rwsem is writer-owned with an unknown owner. |
---|
55 | | - */ |
---|
56 | | -#define RWSEM_OWNER_UNKNOWN ((struct task_struct *)-1L) |
---|
| 59 | +enum rwsem_waiter_type { |
---|
| 60 | + RWSEM_WAITING_FOR_WRITE, |
---|
| 61 | + RWSEM_WAITING_FOR_READ |
---|
| 62 | +}; |
---|
57 | 63 | |
---|
58 | | -extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); |
---|
59 | | -extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem); |
---|
60 | | -extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); |
---|
61 | | -extern struct rw_semaphore *rwsem_down_write_failed_killable(struct rw_semaphore *sem); |
---|
62 | | -extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *); |
---|
63 | | -extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); |
---|
64 | | - |
---|
65 | | -/* Include the arch specific part */ |
---|
66 | | -#include <asm/rwsem.h> |
---|
| 64 | +struct rwsem_waiter { |
---|
| 65 | + struct list_head list; |
---|
| 66 | + struct task_struct *task; |
---|
| 67 | + enum rwsem_waiter_type type; |
---|
| 68 | + unsigned long timeout; |
---|
| 69 | + unsigned long last_rowner; |
---|
| 70 | +}; |
---|
67 | 71 | |
---|
68 | 72 | /* In all implementations count != 0 means locked */ |
---|
69 | 73 | static inline int rwsem_is_locked(struct rw_semaphore *sem) |
---|
.. | .. |
---|
71 | 75 | return atomic_long_read(&sem->count) != 0; |
---|
72 | 76 | } |
---|
73 | 77 | |
---|
74 | | -#define __RWSEM_INIT_COUNT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) |
---|
75 | | -#endif |
---|
| 78 | +#define RWSEM_UNLOCKED_VALUE 0L |
---|
| 79 | +#define __RWSEM_COUNT_INIT(name) .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE) |
---|
76 | 80 | |
---|
77 | 81 | /* Common initializer macros and functions */ |
---|
78 | 82 | |
---|
79 | 83 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
---|
80 | | -# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } |
---|
| 84 | +# define __RWSEM_DEP_MAP_INIT(lockname) \ |
---|
| 85 | + .dep_map = { \ |
---|
| 86 | + .name = #lockname, \ |
---|
| 87 | + .wait_type_inner = LD_WAIT_SLEEP, \ |
---|
| 88 | + }, |
---|
81 | 89 | #else |
---|
82 | 90 | # define __RWSEM_DEP_MAP_INIT(lockname) |
---|
83 | 91 | #endif |
---|
84 | 92 | |
---|
| 93 | +#ifdef CONFIG_DEBUG_RWSEMS |
---|
| 94 | +# define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname, |
---|
| 95 | +#else |
---|
| 96 | +# define __RWSEM_DEBUG_INIT(lockname) |
---|
| 97 | +#endif |
---|
| 98 | + |
---|
85 | 99 | #ifdef CONFIG_RWSEM_SPIN_ON_OWNER |
---|
86 | | -#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL |
---|
| 100 | +#define __RWSEM_OPT_INIT(lockname) .osq = OSQ_LOCK_UNLOCKED, |
---|
87 | 101 | #else |
---|
88 | 102 | #define __RWSEM_OPT_INIT(lockname) |
---|
89 | 103 | #endif |
---|
90 | 104 | |
---|
91 | 105 | #define __RWSEM_INITIALIZER(name) \ |
---|
92 | | - { __RWSEM_INIT_COUNT(name), \ |
---|
93 | | - .wait_list = LIST_HEAD_INIT((name).wait_list), \ |
---|
94 | | - .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \ |
---|
| 106 | + { __RWSEM_COUNT_INIT(name), \ |
---|
| 107 | + .owner = ATOMIC_LONG_INIT(0), \ |
---|
95 | 108 | __RWSEM_OPT_INIT(name) \ |
---|
| 109 | + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\ |
---|
| 110 | + .wait_list = LIST_HEAD_INIT((name).wait_list), \ |
---|
| 111 | + __RWSEM_DEBUG_INIT(name) \ |
---|
96 | 112 | __RWSEM_DEP_MAP_INIT(name) } |
---|
97 | 113 | |
---|
98 | 114 | #define DECLARE_RWSEM(name) \ |
---|
.. | .. |
---|
123 | 139 | * lock for reading |
---|
124 | 140 | */ |
---|
125 | 141 | extern void down_read(struct rw_semaphore *sem); |
---|
| 142 | +extern int __must_check down_read_interruptible(struct rw_semaphore *sem); |
---|
126 | 143 | extern int __must_check down_read_killable(struct rw_semaphore *sem); |
---|
127 | 144 | |
---|
128 | 145 | /* |
---|
.. | .. |
---|
168 | 185 | * static then another method for expressing nested locking is |
---|
169 | 186 | * the explicit definition of lock class keys and the use of |
---|
170 | 187 | * lockdep_set_class() at lock initialization time. |
---|
171 | | - * See Documentation/locking/lockdep-design.txt for more details.) |
---|
| 188 | + * See Documentation/locking/lockdep-design.rst for more details.) |
---|
172 | 189 | */ |
---|
173 | 190 | extern void down_read_nested(struct rw_semaphore *sem, int subclass); |
---|
| 191 | +extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass); |
---|
174 | 192 | extern void down_write_nested(struct rw_semaphore *sem, int subclass); |
---|
175 | 193 | extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass); |
---|
176 | 194 | extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock); |
---|
.. | .. |
---|
191 | 209 | extern void up_read_non_owner(struct rw_semaphore *sem); |
---|
192 | 210 | #else |
---|
193 | 211 | # define down_read_nested(sem, subclass) down_read(sem) |
---|
| 212 | +# define down_read_killable_nested(sem, subclass) down_read_killable(sem) |
---|
194 | 213 | # define down_write_nest_lock(sem, nest_lock) down_write(sem) |
---|
195 | 214 | # define down_write_nested(sem, subclass) down_write(sem) |
---|
196 | 215 | # define down_write_killable_nested(sem, subclass) down_write_killable(sem) |
---|