hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/kernel/locking/rtmutex.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
34 *
....@@ -7,13 +8,8 @@
78 * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
89 * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
910 * Copyright (C) 2006 Esben Nielsen
10
- * Adaptive Spinlocks:
11
- * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
12
- * and Peter Morreale,
13
- * Adaptive Spinlocks simplification:
14
- * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
1511 *
16
- * See Documentation/locking/rt-mutex-design.txt for details.
12
+ * See Documentation/locking/rt-mutex-design.rst for details.
1713 */
1814 #include <linux/spinlock.h>
1915 #include <linux/export.h>
....@@ -23,8 +19,7 @@
2319 #include <linux/sched/wake_q.h>
2420 #include <linux/sched/debug.h>
2521 #include <linux/timer.h>
26
-#include <linux/ww_mutex.h>
27
-#include <linux/blkdev.h>
22
+#include <trace/hooks/dtask.h>
2823
2924 #include "rtmutex_common.h"
3025
....@@ -63,7 +58,7 @@
6358 if (rt_mutex_has_waiters(lock))
6459 val |= RT_MUTEX_HAS_WAITERS;
6560
66
- lock->owner = (struct task_struct *)val;
61
+ WRITE_ONCE(lock->owner, (struct task_struct *)val);
6762 }
6863
6964 static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
....@@ -142,18 +137,11 @@
142137 WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
143138 }
144139
145
-static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
146
-{
147
- return waiter && waiter != PI_WAKEUP_INPROGRESS &&
148
- waiter != PI_REQUEUE_INPROGRESS;
149
-}
150
-
151140 /*
152141 * We can speed up the acquire/release, if there's no debugging state to be
153142 * set up.
154143 */
155144 #ifndef CONFIG_DEBUG_RT_MUTEXES
156
-# define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c)
157145 # define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
158146 # define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
159147
....@@ -214,7 +202,6 @@
214202 }
215203
216204 #else
217
-# define rt_mutex_cmpxchg_relaxed(l,c,n) (0)
218205 # define rt_mutex_cmpxchg_acquire(l,c,n) (0)
219206 # define rt_mutex_cmpxchg_release(l,c,n) (0)
220207
....@@ -241,7 +228,7 @@
241228 * Only use with rt_mutex_waiter_{less,equal}()
242229 */
243230 #define task_to_waiter(p) \
244
- &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline, .task = (p) }
231
+ &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
245232
246233 static inline int
247234 rt_mutex_waiter_less(struct rt_mutex_waiter *left,
....@@ -279,27 +266,6 @@
279266 return left->deadline == right->deadline;
280267
281268 return 1;
282
-}
283
-
284
-#define STEAL_NORMAL 0
285
-#define STEAL_LATERAL 1
286
-
287
-static inline int
288
-rt_mutex_steal(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, int mode)
289
-{
290
- struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock);
291
-
292
- if (waiter == top_waiter || rt_mutex_waiter_less(waiter, top_waiter))
293
- return 1;
294
-
295
- /*
296
- * Note that RT tasks are excluded from lateral-steals
297
- * to prevent the introduction of an unbounded latency.
298
- */
299
- if (mode == STEAL_NORMAL || rt_task(waiter->task))
300
- return 0;
301
-
302
- return rt_mutex_waiter_equal(waiter, top_waiter);
303269 }
304270
305271 static void
....@@ -406,14 +372,6 @@
406372 return debug_rt_mutex_detect_deadlock(waiter, chwalk);
407373 }
408374
409
-static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
410
-{
411
- if (waiter->savestate)
412
- wake_up_lock_sleeper(waiter->task);
413
- else
414
- wake_up_process(waiter->task);
415
-}
416
-
417375 /*
418376 * Max number of times we'll walk the boosting chain:
419377 */
....@@ -421,8 +379,7 @@
421379
422380 static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
423381 {
424
- return rt_mutex_real_waiter(p->pi_blocked_on) ?
425
- p->pi_blocked_on->lock : NULL;
382
+ return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
426383 }
427384
428385 /*
....@@ -558,7 +515,7 @@
558515 * reached or the state of the chain has changed while we
559516 * dropped the locks.
560517 */
561
- if (!rt_mutex_real_waiter(waiter))
518
+ if (!waiter)
562519 goto out_unlock_pi;
563520
564521 /*
....@@ -670,8 +627,7 @@
670627 }
671628
672629 /* [10] Grab the next task, i.e. owner of @lock */
673
- task = rt_mutex_owner(lock);
674
- get_task_struct(task);
630
+ task = get_task_struct(rt_mutex_owner(lock));
675631 raw_spin_lock(&task->pi_lock);
676632
677633 /*
....@@ -739,23 +695,19 @@
739695 * follow here. This is the end of the chain we are walking.
740696 */
741697 if (!rt_mutex_owner(lock)) {
742
- struct rt_mutex_waiter *lock_top_waiter;
743
-
744698 /*
745699 * If the requeue [7] above changed the top waiter,
746700 * then we need to wake the new top waiter up to try
747701 * to get the lock.
748702 */
749
- lock_top_waiter = rt_mutex_top_waiter(lock);
750
- if (prerequeue_top_waiter != lock_top_waiter)
751
- rt_mutex_wake_waiter(lock_top_waiter);
703
+ if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
704
+ wake_up_process(rt_mutex_top_waiter(lock)->task);
752705 raw_spin_unlock_irq(&lock->wait_lock);
753706 return 0;
754707 }
755708
756709 /* [10] Grab the next task, i.e. the owner of @lock */
757
- task = rt_mutex_owner(lock);
758
- get_task_struct(task);
710
+ task = get_task_struct(rt_mutex_owner(lock));
759711 raw_spin_lock(&task->pi_lock);
760712
761713 /* [11] requeue the pi waiters if necessary */
....@@ -850,11 +802,9 @@
850802 * @task: The task which wants to acquire the lock
851803 * @waiter: The waiter that is queued to the lock's wait tree if the
852804 * callsite called task_blocked_on_lock(), otherwise NULL
853
- * @mode: Lock steal mode (STEAL_NORMAL, STEAL_LATERAL)
854805 */
855
-static int __try_to_take_rt_mutex(struct rt_mutex *lock,
856
- struct task_struct *task,
857
- struct rt_mutex_waiter *waiter, int mode)
806
+static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
807
+ struct rt_mutex_waiter *waiter)
858808 {
859809 lockdep_assert_held(&lock->wait_lock);
860810
....@@ -890,11 +840,12 @@
890840 */
891841 if (waiter) {
892842 /*
893
- * If waiter is not the highest priority waiter of @lock,
894
- * or its peer when lateral steal is allowed, give up.
843
+ * If waiter is not the highest priority waiter of
844
+ * @lock, give up.
895845 */
896
- if (!rt_mutex_steal(lock, waiter, mode))
846
+ if (waiter != rt_mutex_top_waiter(lock))
897847 return 0;
848
+
898849 /*
899850 * We can acquire the lock. Remove the waiter from the
900851 * lock waiters tree.
....@@ -912,12 +863,14 @@
912863 */
913864 if (rt_mutex_has_waiters(lock)) {
914865 /*
915
- * If @task->prio is greater than the top waiter
916
- * priority (kernel view), or equal to it when a
917
- * lateral steal is forbidden, @task lost.
866
+ * If @task->prio is greater than or equal to
867
+ * the top waiter priority (kernel view),
868
+ * @task lost.
918869 */
919
- if (!rt_mutex_steal(lock, task_to_waiter(task), mode))
870
+ if (!rt_mutex_waiter_less(task_to_waiter(task),
871
+ rt_mutex_top_waiter(lock)))
920872 return 0;
873
+
921874 /*
922875 * The current top waiter stays enqueued. We
923876 * don't have to change anything in the lock
....@@ -964,344 +917,6 @@
964917 return 1;
965918 }
966919
967
-#ifdef CONFIG_PREEMPT_RT_FULL
968
-/*
969
- * preemptible spin_lock functions:
970
- */
971
-static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
972
- void (*slowfn)(struct rt_mutex *lock))
973
-{
974
- might_sleep_no_state_check();
975
-
976
- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
977
- return;
978
- else
979
- slowfn(lock);
980
-}
981
-
982
-static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
983
- void (*slowfn)(struct rt_mutex *lock))
984
-{
985
- if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
986
- return;
987
- else
988
- slowfn(lock);
989
-}
990
-#ifdef CONFIG_SMP
991
-/*
992
- * Note that owner is a speculative pointer and dereferencing relies
993
- * on rcu_read_lock() and the check against the lock owner.
994
- */
995
-static int adaptive_wait(struct rt_mutex *lock,
996
- struct task_struct *owner)
997
-{
998
- int res = 0;
999
-
1000
- rcu_read_lock();
1001
- for (;;) {
1002
- if (owner != rt_mutex_owner(lock))
1003
- break;
1004
- /*
1005
- * Ensure that owner->on_cpu is dereferenced _after_
1006
- * checking the above to be valid.
1007
- */
1008
- barrier();
1009
- if (!owner->on_cpu) {
1010
- res = 1;
1011
- break;
1012
- }
1013
- cpu_relax();
1014
- }
1015
- rcu_read_unlock();
1016
- return res;
1017
-}
1018
-#else
1019
-static int adaptive_wait(struct rt_mutex *lock,
1020
- struct task_struct *orig_owner)
1021
-{
1022
- return 1;
1023
-}
1024
-#endif
1025
-
1026
-static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
1027
- struct rt_mutex_waiter *waiter,
1028
- struct task_struct *task,
1029
- enum rtmutex_chainwalk chwalk);
1030
-/*
1031
- * Slow path lock function spin_lock style: this variant is very
1032
- * careful not to miss any non-lock wakeups.
1033
- *
1034
- * We store the current state under p->pi_lock in p->saved_state and
1035
- * the try_to_wake_up() code handles this accordingly.
1036
- */
1037
-void __sched rt_spin_lock_slowlock_locked(struct rt_mutex *lock,
1038
- struct rt_mutex_waiter *waiter,
1039
- unsigned long flags)
1040
-{
1041
- struct task_struct *lock_owner, *self = current;
1042
- struct rt_mutex_waiter *top_waiter;
1043
- int ret;
1044
-
1045
- if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL))
1046
- return;
1047
-
1048
- BUG_ON(rt_mutex_owner(lock) == self);
1049
-
1050
- /*
1051
- * We save whatever state the task is in and we'll restore it
1052
- * after acquiring the lock taking real wakeups into account
1053
- * as well. We are serialized via pi_lock against wakeups. See
1054
- * try_to_wake_up().
1055
- */
1056
- raw_spin_lock(&self->pi_lock);
1057
- self->saved_state = self->state;
1058
- __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
1059
- raw_spin_unlock(&self->pi_lock);
1060
-
1061
- ret = task_blocks_on_rt_mutex(lock, waiter, self, RT_MUTEX_MIN_CHAINWALK);
1062
- BUG_ON(ret);
1063
-
1064
- for (;;) {
1065
- /* Try to acquire the lock again. */
1066
- if (__try_to_take_rt_mutex(lock, self, waiter, STEAL_LATERAL))
1067
- break;
1068
-
1069
- top_waiter = rt_mutex_top_waiter(lock);
1070
- lock_owner = rt_mutex_owner(lock);
1071
-
1072
- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1073
-
1074
- debug_rt_mutex_print_deadlock(waiter);
1075
-
1076
- if (top_waiter != waiter || adaptive_wait(lock, lock_owner))
1077
- schedule();
1078
-
1079
- raw_spin_lock_irqsave(&lock->wait_lock, flags);
1080
-
1081
- raw_spin_lock(&self->pi_lock);
1082
- __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
1083
- raw_spin_unlock(&self->pi_lock);
1084
- }
1085
-
1086
- /*
1087
- * Restore the task state to current->saved_state. We set it
1088
- * to the original state above and the try_to_wake_up() code
1089
- * has possibly updated it when a real (non-rtmutex) wakeup
1090
- * happened while we were blocked. Clear saved_state so
1091
- * try_to_wakeup() does not get confused.
1092
- */
1093
- raw_spin_lock(&self->pi_lock);
1094
- __set_current_state_no_track(self->saved_state);
1095
- self->saved_state = TASK_RUNNING;
1096
- raw_spin_unlock(&self->pi_lock);
1097
-
1098
- /*
1099
- * try_to_take_rt_mutex() sets the waiter bit
1100
- * unconditionally. We might have to fix that up:
1101
- */
1102
- fixup_rt_mutex_waiters(lock);
1103
-
1104
- BUG_ON(rt_mutex_has_waiters(lock) && waiter == rt_mutex_top_waiter(lock));
1105
- BUG_ON(!RB_EMPTY_NODE(&waiter->tree_entry));
1106
-}
1107
-
1108
-static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
1109
-{
1110
- struct rt_mutex_waiter waiter;
1111
- unsigned long flags;
1112
-
1113
- rt_mutex_init_waiter(&waiter, true);
1114
-
1115
- raw_spin_lock_irqsave(&lock->wait_lock, flags);
1116
- rt_spin_lock_slowlock_locked(lock, &waiter, flags);
1117
- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1118
- debug_rt_mutex_free_waiter(&waiter);
1119
-}
1120
-
1121
-static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock,
1122
- struct wake_q_head *wake_q,
1123
- struct wake_q_head *wq_sleeper);
1124
-/*
1125
- * Slow path to release a rt_mutex spin_lock style
1126
- */
1127
-void __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
1128
-{
1129
- unsigned long flags;
1130
- DEFINE_WAKE_Q(wake_q);
1131
- DEFINE_WAKE_Q(wake_sleeper_q);
1132
- bool postunlock;
1133
-
1134
- raw_spin_lock_irqsave(&lock->wait_lock, flags);
1135
- postunlock = __rt_mutex_unlock_common(lock, &wake_q, &wake_sleeper_q);
1136
- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1137
-
1138
- if (postunlock)
1139
- rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
1140
-}
1141
-
1142
-void __lockfunc rt_spin_lock(spinlock_t *lock)
1143
-{
1144
- sleeping_lock_inc();
1145
- rcu_read_lock();
1146
- migrate_disable();
1147
- spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1148
- rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
1149
-}
1150
-EXPORT_SYMBOL(rt_spin_lock);
1151
-
1152
-void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
1153
-{
1154
- rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
1155
-}
1156
-
1157
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
1158
-void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
1159
-{
1160
- sleeping_lock_inc();
1161
- rcu_read_lock();
1162
- migrate_disable();
1163
- spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
1164
- rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
1165
-}
1166
-EXPORT_SYMBOL(rt_spin_lock_nested);
1167
-#endif
1168
-
1169
-void __lockfunc rt_spin_unlock(spinlock_t *lock)
1170
-{
1171
- /* NOTE: we always pass in '1' for nested, for simplicity */
1172
- spin_release(&lock->dep_map, 1, _RET_IP_);
1173
- rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
1174
- migrate_enable();
1175
- rcu_read_unlock();
1176
- sleeping_lock_dec();
1177
-}
1178
-EXPORT_SYMBOL(rt_spin_unlock);
1179
-
1180
-void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
1181
-{
1182
- rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
1183
-}
1184
-EXPORT_SYMBOL(__rt_spin_unlock);
1185
-
1186
-/*
1187
- * Wait for the lock to get unlocked: instead of polling for an unlock
1188
- * (like raw spinlocks do), we lock and unlock, to force the kernel to
1189
- * schedule if there's contention:
1190
- */
1191
-void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
1192
-{
1193
- spin_lock(lock);
1194
- spin_unlock(lock);
1195
-}
1196
-EXPORT_SYMBOL(rt_spin_unlock_wait);
1197
-
1198
-int __lockfunc rt_spin_trylock(spinlock_t *lock)
1199
-{
1200
- int ret;
1201
-
1202
- sleeping_lock_inc();
1203
- migrate_disable();
1204
- ret = __rt_mutex_trylock(&lock->lock);
1205
- if (ret) {
1206
- spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1207
- rcu_read_lock();
1208
- } else {
1209
- migrate_enable();
1210
- sleeping_lock_dec();
1211
- }
1212
- return ret;
1213
-}
1214
-EXPORT_SYMBOL(rt_spin_trylock);
1215
-
1216
-int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
1217
-{
1218
- int ret;
1219
-
1220
- local_bh_disable();
1221
- ret = __rt_mutex_trylock(&lock->lock);
1222
- if (ret) {
1223
- sleeping_lock_inc();
1224
- rcu_read_lock();
1225
- migrate_disable();
1226
- spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1227
- } else
1228
- local_bh_enable();
1229
- return ret;
1230
-}
1231
-EXPORT_SYMBOL(rt_spin_trylock_bh);
1232
-
1233
-int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
1234
-{
1235
- int ret;
1236
-
1237
- *flags = 0;
1238
- ret = __rt_mutex_trylock(&lock->lock);
1239
- if (ret) {
1240
- sleeping_lock_inc();
1241
- rcu_read_lock();
1242
- migrate_disable();
1243
- spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1244
- }
1245
- return ret;
1246
-}
1247
-EXPORT_SYMBOL(rt_spin_trylock_irqsave);
1248
-
1249
-void
1250
-__rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key)
1251
-{
1252
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
1253
- /*
1254
- * Make sure we are not reinitializing a held lock:
1255
- */
1256
- debug_check_no_locks_freed((void *)lock, sizeof(*lock));
1257
- lockdep_init_map(&lock->dep_map, name, key, 0);
1258
-#endif
1259
-}
1260
-EXPORT_SYMBOL(__rt_spin_lock_init);
1261
-
1262
-#endif /* PREEMPT_RT_FULL */
1263
-
1264
-#ifdef CONFIG_PREEMPT_RT_FULL
1265
- static inline int __sched
1266
-__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
1267
-{
1268
- struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
1269
- struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
1270
-
1271
- if (!hold_ctx)
1272
- return 0;
1273
-
1274
- if (unlikely(ctx == hold_ctx))
1275
- return -EALREADY;
1276
-
1277
- if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
1278
- (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
1279
-#ifdef CONFIG_DEBUG_MUTEXES
1280
- DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
1281
- ctx->contending_lock = ww;
1282
-#endif
1283
- return -EDEADLK;
1284
- }
1285
-
1286
- return 0;
1287
-}
1288
-#else
1289
- static inline int __sched
1290
-__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
1291
-{
1292
- BUG();
1293
- return 0;
1294
-}
1295
-
1296
-#endif
1297
-
1298
-static inline int
1299
-try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
1300
- struct rt_mutex_waiter *waiter)
1301
-{
1302
- return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
1303
-}
1304
-
1305920 /*
1306921 * Task blocks on lock.
1307922 *
....@@ -1334,22 +949,6 @@
1334949 return -EDEADLK;
1335950
1336951 raw_spin_lock(&task->pi_lock);
1337
- /*
1338
- * In the case of futex requeue PI, this will be a proxy
1339
- * lock. The task will wake unaware that it is enqueueed on
1340
- * this lock. Avoid blocking on two locks and corrupting
1341
- * pi_blocked_on via the PI_WAKEUP_INPROGRESS
1342
- * flag. futex_wait_requeue_pi() sets this when it wakes up
1343
- * before requeue (due to a signal or timeout). Do not enqueue
1344
- * the task if PI_WAKEUP_INPROGRESS is set.
1345
- */
1346
- if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
1347
- raw_spin_unlock(&task->pi_lock);
1348
- return -EAGAIN;
1349
- }
1350
-
1351
- BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
1352
-
1353952 waiter->task = task;
1354953 waiter->lock = lock;
1355954 waiter->prio = task->prio;
....@@ -1373,7 +972,7 @@
1373972 rt_mutex_enqueue_pi(owner, waiter);
1374973
1375974 rt_mutex_adjust_prio(owner);
1376
- if (rt_mutex_real_waiter(owner->pi_blocked_on))
975
+ if (owner->pi_blocked_on)
1377976 chain_walk = 1;
1378977 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
1379978 chain_walk = 1;
....@@ -1415,7 +1014,6 @@
14151014 * Called with lock->wait_lock held and interrupts disabled.
14161015 */
14171016 static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
1418
- struct wake_q_head *wake_sleeper_q,
14191017 struct rt_mutex *lock)
14201018 {
14211019 struct rt_mutex_waiter *waiter;
....@@ -1455,10 +1053,7 @@
14551053 * Pairs with preempt_enable() in rt_mutex_postunlock();
14561054 */
14571055 preempt_disable();
1458
- if (waiter->savestate)
1459
- wake_q_add_sleeper(wake_sleeper_q, waiter->task);
1460
- else
1461
- wake_q_add(wake_q, waiter->task);
1056
+ wake_q_add(wake_q, waiter->task);
14621057 raw_spin_unlock(&current->pi_lock);
14631058 }
14641059
....@@ -1473,7 +1068,7 @@
14731068 {
14741069 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
14751070 struct task_struct *owner = rt_mutex_owner(lock);
1476
- struct rt_mutex *next_lock = NULL;
1071
+ struct rt_mutex *next_lock;
14771072
14781073 lockdep_assert_held(&lock->wait_lock);
14791074
....@@ -1499,8 +1094,7 @@
14991094 rt_mutex_adjust_prio(owner);
15001095
15011096 /* Store the lock on which owner is blocked or NULL */
1502
- if (rt_mutex_real_waiter(owner->pi_blocked_on))
1503
- next_lock = task_blocked_on_lock(owner);
1097
+ next_lock = task_blocked_on_lock(owner);
15041098
15051099 raw_spin_unlock(&owner->pi_lock);
15061100
....@@ -1536,28 +1130,26 @@
15361130 raw_spin_lock_irqsave(&task->pi_lock, flags);
15371131
15381132 waiter = task->pi_blocked_on;
1539
- if (!rt_mutex_real_waiter(waiter) ||
1540
- rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
1133
+ if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
15411134 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
15421135 return;
15431136 }
15441137 next_lock = waiter->lock;
1138
+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
15451139
15461140 /* gets dropped in rt_mutex_adjust_prio_chain()! */
15471141 get_task_struct(task);
15481142
1549
- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
15501143 rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
15511144 next_lock, NULL, task);
15521145 }
15531146
1554
-void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
1147
+void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
15551148 {
15561149 debug_rt_mutex_init_waiter(waiter);
15571150 RB_CLEAR_NODE(&waiter->pi_tree_entry);
15581151 RB_CLEAR_NODE(&waiter->tree_entry);
15591152 waiter->task = NULL;
1560
- waiter->savestate = savestate;
15611153 }
15621154
15631155 /**
....@@ -1573,27 +1165,26 @@
15731165 static int __sched
15741166 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
15751167 struct hrtimer_sleeper *timeout,
1576
- struct rt_mutex_waiter *waiter,
1577
- struct ww_acquire_ctx *ww_ctx)
1168
+ struct rt_mutex_waiter *waiter)
15781169 {
15791170 int ret = 0;
15801171
1172
+ trace_android_vh_rtmutex_wait_start(lock);
15811173 for (;;) {
15821174 /* Try to acquire the lock: */
15831175 if (try_to_take_rt_mutex(lock, current, waiter))
15841176 break;
15851177
1586
- if (timeout && !timeout->task) {
1587
- ret = -ETIMEDOUT;
1588
- break;
1589
- }
1590
- if (signal_pending_state(state, current)) {
1591
- ret = -EINTR;
1592
- break;
1593
- }
1594
-
1595
- if (ww_ctx && ww_ctx->acquired > 0) {
1596
- ret = __mutex_lock_check_stamp(lock, ww_ctx);
1178
+ /*
1179
+ * TASK_INTERRUPTIBLE checks for signals and
1180
+ * timeout. Ignored otherwise.
1181
+ */
1182
+ if (likely(state == TASK_INTERRUPTIBLE)) {
1183
+ /* Signal pending? */
1184
+ if (signal_pending(current))
1185
+ ret = -EINTR;
1186
+ if (timeout && !timeout->task)
1187
+ ret = -ETIMEDOUT;
15971188 if (ret)
15981189 break;
15991190 }
....@@ -1608,6 +1199,7 @@
16081199 set_current_state(state);
16091200 }
16101201
1202
+ trace_android_vh_rtmutex_wait_finish(lock);
16111203 __set_current_state(TASK_RUNNING);
16121204 return ret;
16131205 }
....@@ -1632,157 +1224,19 @@
16321224 }
16331225 }
16341226
1635
-static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
1636
- struct ww_acquire_ctx *ww_ctx)
1637
-{
1638
-#ifdef CONFIG_DEBUG_MUTEXES
1639
- /*
1640
- * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
1641
- * but released with a normal mutex_unlock in this call.
1642
- *
1643
- * This should never happen, always use ww_mutex_unlock.
1644
- */
1645
- DEBUG_LOCKS_WARN_ON(ww->ctx);
1646
-
1647
- /*
1648
- * Not quite done after calling ww_acquire_done() ?
1649
- */
1650
- DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
1651
-
1652
- if (ww_ctx->contending_lock) {
1653
- /*
1654
- * After -EDEADLK you tried to
1655
- * acquire a different ww_mutex? Bad!
1656
- */
1657
- DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
1658
-
1659
- /*
1660
- * You called ww_mutex_lock after receiving -EDEADLK,
1661
- * but 'forgot' to unlock everything else first?
1662
- */
1663
- DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
1664
- ww_ctx->contending_lock = NULL;
1665
- }
1666
-
1667
- /*
1668
- * Naughty, using a different class will lead to undefined behavior!
1669
- */
1670
- DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
1671
-#endif
1672
- ww_ctx->acquired++;
1673
-}
1674
-
1675
-#ifdef CONFIG_PREEMPT_RT_FULL
1676
-static void ww_mutex_account_lock(struct rt_mutex *lock,
1677
- struct ww_acquire_ctx *ww_ctx)
1678
-{
1679
- struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
1680
- struct rt_mutex_waiter *waiter, *n;
1681
-
1682
- /*
1683
- * This branch gets optimized out for the common case,
1684
- * and is only important for ww_mutex_lock.
1685
- */
1686
- ww_mutex_lock_acquired(ww, ww_ctx);
1687
- ww->ctx = ww_ctx;
1688
-
1689
- /*
1690
- * Give any possible sleeping processes the chance to wake up,
1691
- * so they can recheck if they have to back off.
1692
- */
1693
- rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters.rb_root,
1694
- tree_entry) {
1695
- /* XXX debug rt mutex waiter wakeup */
1696
-
1697
- BUG_ON(waiter->lock != lock);
1698
- rt_mutex_wake_waiter(waiter);
1699
- }
1700
-}
1701
-
1702
-#else
1703
-
1704
-static void ww_mutex_account_lock(struct rt_mutex *lock,
1705
- struct ww_acquire_ctx *ww_ctx)
1706
-{
1707
- BUG();
1708
-}
1709
-#endif
1710
-
1711
-int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
1712
- struct hrtimer_sleeper *timeout,
1713
- enum rtmutex_chainwalk chwalk,
1714
- struct ww_acquire_ctx *ww_ctx,
1715
- struct rt_mutex_waiter *waiter)
1716
-{
1717
- int ret;
1718
-
1719
-#ifdef CONFIG_PREEMPT_RT_FULL
1720
- if (ww_ctx) {
1721
- struct ww_mutex *ww;
1722
-
1723
- ww = container_of(lock, struct ww_mutex, base.lock);
1724
- if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
1725
- return -EALREADY;
1726
- }
1727
-#endif
1728
-
1729
- /* Try to acquire the lock again: */
1730
- if (try_to_take_rt_mutex(lock, current, NULL)) {
1731
- if (ww_ctx)
1732
- ww_mutex_account_lock(lock, ww_ctx);
1733
- return 0;
1734
- }
1735
-
1736
- set_current_state(state);
1737
-
1738
- /* Setup the timer, when timeout != NULL */
1739
- if (unlikely(timeout))
1740
- hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1741
-
1742
- ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
1743
-
1744
- if (likely(!ret)) {
1745
- /* sleep on the mutex */
1746
- ret = __rt_mutex_slowlock(lock, state, timeout, waiter,
1747
- ww_ctx);
1748
- } else if (ww_ctx) {
1749
- /* ww_mutex received EDEADLK, let it become EALREADY */
1750
- ret = __mutex_lock_check_stamp(lock, ww_ctx);
1751
- BUG_ON(!ret);
1752
- }
1753
-
1754
- if (unlikely(ret)) {
1755
- __set_current_state(TASK_RUNNING);
1756
- remove_waiter(lock, waiter);
1757
- /* ww_mutex wants to report EDEADLK/EALREADY, let it */
1758
- if (!ww_ctx)
1759
- rt_mutex_handle_deadlock(ret, chwalk, waiter);
1760
- } else if (ww_ctx) {
1761
- ww_mutex_account_lock(lock, ww_ctx);
1762
- }
1763
-
1764
- /*
1765
- * try_to_take_rt_mutex() sets the waiter bit
1766
- * unconditionally. We might have to fix that up.
1767
- */
1768
- fixup_rt_mutex_waiters(lock);
1769
- return ret;
1770
-}
1771
-
17721227 /*
17731228 * Slow path lock function:
17741229 */
17751230 static int __sched
17761231 rt_mutex_slowlock(struct rt_mutex *lock, int state,
17771232 struct hrtimer_sleeper *timeout,
1778
- enum rtmutex_chainwalk chwalk,
1779
- struct ww_acquire_ctx *ww_ctx)
1233
+ enum rtmutex_chainwalk chwalk)
17801234 {
17811235 struct rt_mutex_waiter waiter;
17821236 unsigned long flags;
17831237 int ret = 0;
17841238
1785
- rt_mutex_init_waiter(&waiter, false);
1239
+ rt_mutex_init_waiter(&waiter);
17861240
17871241 /*
17881242 * Technically we could use raw_spin_[un]lock_irq() here, but this can
....@@ -1794,8 +1248,35 @@
17941248 */
17951249 raw_spin_lock_irqsave(&lock->wait_lock, flags);
17961250
1797
- ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, ww_ctx,
1798
- &waiter);
1251
+ /* Try to acquire the lock again: */
1252
+ if (try_to_take_rt_mutex(lock, current, NULL)) {
1253
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1254
+ return 0;
1255
+ }
1256
+
1257
+ set_current_state(state);
1258
+
1259
+ /* Setup the timer, when timeout != NULL */
1260
+ if (unlikely(timeout))
1261
+ hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1262
+
1263
+ ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
1264
+
1265
+ if (likely(!ret))
1266
+ /* sleep on the mutex */
1267
+ ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
1268
+
1269
+ if (unlikely(ret)) {
1270
+ __set_current_state(TASK_RUNNING);
1271
+ remove_waiter(lock, &waiter);
1272
+ rt_mutex_handle_deadlock(ret, chwalk, &waiter);
1273
+ }
1274
+
1275
+ /*
1276
+ * try_to_take_rt_mutex() sets the waiter bit
1277
+ * unconditionally. We might have to fix that up.
1278
+ */
1279
+ fixup_rt_mutex_waiters(lock);
17991280
18001281 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
18011282
....@@ -1856,8 +1337,7 @@
18561337 * Return whether the current task needs to call rt_mutex_postunlock().
18571338 */
18581339 static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
1859
- struct wake_q_head *wake_q,
1860
- struct wake_q_head *wake_sleeper_q)
1340
+ struct wake_q_head *wake_q)
18611341 {
18621342 unsigned long flags;
18631343
....@@ -1911,7 +1391,7 @@
19111391 *
19121392 * Queue the next waiter for wakeup once we release the wait_lock.
19131393 */
1914
- mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock);
1394
+ mark_wakeup_next_waiter(wake_q, lock);
19151395 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
19161396
19171397 return true; /* call rt_mutex_postunlock() */
....@@ -1925,45 +1405,29 @@
19251405 */
19261406 static inline int
19271407 rt_mutex_fastlock(struct rt_mutex *lock, int state,
1928
- struct ww_acquire_ctx *ww_ctx,
19291408 int (*slowfn)(struct rt_mutex *lock, int state,
19301409 struct hrtimer_sleeper *timeout,
1931
- enum rtmutex_chainwalk chwalk,
1932
- struct ww_acquire_ctx *ww_ctx))
1410
+ enum rtmutex_chainwalk chwalk))
19331411 {
19341412 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
19351413 return 0;
19361414
1937
- /*
1938
- * If rt_mutex blocks, the function sched_submit_work will not call
1939
- * blk_schedule_flush_plug (because tsk_is_pi_blocked would be true).
1940
- * We must call blk_schedule_flush_plug here, if we don't call it,
1941
- * a deadlock in I/O may happen.
1942
- */
1943
- if (unlikely(blk_needs_flush_plug(current)))
1944
- blk_schedule_flush_plug(current);
1945
-
1946
- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx);
1415
+ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
19471416 }
19481417
19491418 static inline int
19501419 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
19511420 struct hrtimer_sleeper *timeout,
19521421 enum rtmutex_chainwalk chwalk,
1953
- struct ww_acquire_ctx *ww_ctx,
19541422 int (*slowfn)(struct rt_mutex *lock, int state,
19551423 struct hrtimer_sleeper *timeout,
1956
- enum rtmutex_chainwalk chwalk,
1957
- struct ww_acquire_ctx *ww_ctx))
1424
+ enum rtmutex_chainwalk chwalk))
19581425 {
19591426 if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
19601427 likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
19611428 return 0;
19621429
1963
- if (unlikely(blk_needs_flush_plug(current)))
1964
- blk_schedule_flush_plug(current);
1965
-
1966
- return slowfn(lock, state, timeout, chwalk, ww_ctx);
1430
+ return slowfn(lock, state, timeout, chwalk);
19671431 }
19681432
19691433 static inline int
....@@ -1977,13 +1441,11 @@
19771441 }
19781442
19791443 /*
1980
- * Performs the wakeup of the the top-waiter and re-enables preemption.
1444
+ * Performs the wakeup of the top-waiter and re-enables preemption.
19811445 */
1982
-void rt_mutex_postunlock(struct wake_q_head *wake_q,
1983
- struct wake_q_head *wake_sleeper_q)
1446
+void rt_mutex_postunlock(struct wake_q_head *wake_q)
19841447 {
19851448 wake_up_q(wake_q);
1986
- wake_up_q_sleeper(wake_sleeper_q);
19871449
19881450 /* Pairs with preempt_disable() in rt_mutex_slowunlock() */
19891451 preempt_enable();
....@@ -1992,46 +1454,24 @@
19921454 static inline void
19931455 rt_mutex_fastunlock(struct rt_mutex *lock,
19941456 bool (*slowfn)(struct rt_mutex *lock,
1995
- struct wake_q_head *wqh,
1996
- struct wake_q_head *wq_sleeper))
1457
+ struct wake_q_head *wqh))
19971458 {
19981459 DEFINE_WAKE_Q(wake_q);
1999
- DEFINE_WAKE_Q(wake_sleeper_q);
20001460
20011461 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
20021462 return;
20031463
2004
- if (slowfn(lock, &wake_q, &wake_sleeper_q))
2005
- rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
2006
-}
2007
-
2008
-int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state)
2009
-{
2010
- might_sleep();
2011
- return rt_mutex_fastlock(lock, state, NULL, rt_mutex_slowlock);
2012
-}
2013
-
2014
-/**
2015
- * rt_mutex_lock_state - lock a rt_mutex with a given state
2016
- *
2017
- * @lock: The rt_mutex to be locked
2018
- * @state: The state to set when blocking on the rt_mutex
2019
- */
2020
-static inline int __sched rt_mutex_lock_state(struct rt_mutex *lock,
2021
- unsigned int subclass, int state)
2022
-{
2023
- int ret;
2024
-
2025
- mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
2026
- ret = __rt_mutex_lock_state(lock, state);
2027
- if (ret)
2028
- mutex_release(&lock->dep_map, 1, _RET_IP_);
2029
- return ret;
1464
+ if (slowfn(lock, &wake_q))
1465
+ rt_mutex_postunlock(&wake_q);
20301466 }
20311467
20321468 static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
20331469 {
2034
- rt_mutex_lock_state(lock, subclass, TASK_UNINTERRUPTIBLE);
1470
+ might_sleep();
1471
+
1472
+ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
1473
+ rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
1474
+ trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
20351475 }
20361476
20371477 #ifdef CONFIG_DEBUG_LOCK_ALLOC
....@@ -2046,9 +1486,9 @@
20461486 __rt_mutex_lock(lock, subclass);
20471487 }
20481488 EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
2049
-#endif
20501489
2051
-#ifndef CONFIG_DEBUG_LOCK_ALLOC
1490
+#else /* !CONFIG_DEBUG_LOCK_ALLOC */
1491
+
20521492 /**
20531493 * rt_mutex_lock - lock a rt_mutex
20541494 *
....@@ -2072,7 +1512,18 @@
20721512 */
20731513 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
20741514 {
2075
- return rt_mutex_lock_state(lock, 0, TASK_INTERRUPTIBLE);
1515
+ int ret;
1516
+
1517
+ might_sleep();
1518
+
1519
+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1520
+ ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
1521
+ if (ret)
1522
+ mutex_release(&lock->dep_map, _RET_IP_);
1523
+ else
1524
+ trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
1525
+
1526
+ return ret;
20761527 }
20771528 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
20781529
....@@ -2088,22 +1539,6 @@
20881539 {
20891540 return __rt_mutex_slowtrylock(lock);
20901541 }
2091
-
2092
-/**
2093
- * rt_mutex_lock_killable - lock a rt_mutex killable
2094
- *
2095
- * @lock: the rt_mutex to be locked
2096
- * @detect_deadlock: deadlock detection on/off
2097
- *
2098
- * Returns:
2099
- * 0 on success
2100
- * -EINTR when interrupted by a signal
2101
- */
2102
-int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
2103
-{
2104
- return rt_mutex_lock_state(lock, 0, TASK_KILLABLE);
2105
-}
2106
-EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
21071542
21081543 /**
21091544 * rt_mutex_timed_lock - lock a rt_mutex interruptible
....@@ -2128,26 +1563,15 @@
21281563 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
21291564 ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
21301565 RT_MUTEX_MIN_CHAINWALK,
2131
- NULL,
21321566 rt_mutex_slowlock);
21331567 if (ret)
2134
- mutex_release(&lock->dep_map, 1, _RET_IP_);
1568
+ mutex_release(&lock->dep_map, _RET_IP_);
1569
+ else
1570
+ trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
21351571
21361572 return ret;
21371573 }
21381574 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
2139
-
2140
-int __sched __rt_mutex_trylock(struct rt_mutex *lock)
2141
-{
2142
-#ifdef CONFIG_PREEMPT_RT_FULL
2143
- if (WARN_ON_ONCE(in_irq() || in_nmi()))
2144
-#else
2145
- if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
2146
-#endif
2147
- return 0;
2148
-
2149
- return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
2150
-}
21511575
21521576 /**
21531577 * rt_mutex_trylock - try to lock a rt_mutex
....@@ -2164,18 +1588,18 @@
21641588 {
21651589 int ret;
21661590
2167
- ret = __rt_mutex_trylock(lock);
1591
+ if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
1592
+ return 0;
1593
+
1594
+ ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
21681595 if (ret)
21691596 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1597
+ else
1598
+ trace_android_vh_record_rtmutex_lock_starttime(current, jiffies);
21701599
21711600 return ret;
21721601 }
21731602 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
2174
-
2175
-void __sched __rt_mutex_unlock(struct rt_mutex *lock)
2176
-{
2177
- rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
2178
-}
21791603
21801604 /**
21811605 * rt_mutex_unlock - unlock a rt_mutex
....@@ -2184,14 +1608,18 @@
21841608 */
21851609 void __sched rt_mutex_unlock(struct rt_mutex *lock)
21861610 {
2187
- mutex_release(&lock->dep_map, 1, _RET_IP_);
2188
- __rt_mutex_unlock(lock);
1611
+ mutex_release(&lock->dep_map, _RET_IP_);
1612
+ rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
1613
+ trace_android_vh_record_rtmutex_lock_starttime(current, 0);
21891614 }
21901615 EXPORT_SYMBOL_GPL(rt_mutex_unlock);
21911616
2192
-static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock,
2193
- struct wake_q_head *wake_q,
2194
- struct wake_q_head *wq_sleeper)
1617
+/**
1618
+ * Futex variant, that since futex variants do not use the fast-path, can be
1619
+ * simple and will not need to retry.
1620
+ */
1621
+bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
1622
+ struct wake_q_head *wake_q)
21951623 {
21961624 lockdep_assert_held(&lock->wait_lock);
21971625
....@@ -2208,35 +1636,23 @@
22081636 * avoid inversion prior to the wakeup. preempt_disable()
22091637 * therein pairs with rt_mutex_postunlock().
22101638 */
2211
- mark_wakeup_next_waiter(wake_q, wq_sleeper, lock);
1639
+ mark_wakeup_next_waiter(wake_q, lock);
22121640
22131641 return true; /* call postunlock() */
2214
-}
2215
-
2216
-/**
2217
- * Futex variant, that since futex variants do not use the fast-path, can be
2218
- * simple and will not need to retry.
2219
- */
2220
-bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
2221
- struct wake_q_head *wake_q,
2222
- struct wake_q_head *wq_sleeper)
2223
-{
2224
- return __rt_mutex_unlock_common(lock, wake_q, wq_sleeper);
22251642 }
22261643
22271644 void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
22281645 {
22291646 DEFINE_WAKE_Q(wake_q);
2230
- DEFINE_WAKE_Q(wake_sleeper_q);
22311647 unsigned long flags;
22321648 bool postunlock;
22331649
22341650 raw_spin_lock_irqsave(&lock->wait_lock, flags);
2235
- postunlock = __rt_mutex_futex_unlock(lock, &wake_q, &wake_sleeper_q);
1651
+ postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
22361652 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
22371653
22381654 if (postunlock)
2239
- rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
1655
+ rt_mutex_postunlock(&wake_q);
22401656 }
22411657
22421658 /**
....@@ -2275,7 +1691,7 @@
22751691 if (name && key)
22761692 debug_rt_mutex_init(lock, name, key);
22771693 }
2278
-EXPORT_SYMBOL(__rt_mutex_init);
1694
+EXPORT_SYMBOL_GPL(__rt_mutex_init);
22791695
22801696 /**
22811697 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
....@@ -2295,14 +1711,6 @@
22951711 struct task_struct *proxy_owner)
22961712 {
22971713 __rt_mutex_init(lock, NULL, NULL);
2298
-#ifdef CONFIG_DEBUG_SPINLOCK
2299
- /*
2300
- * get another key class for the wait_lock. LOCK_PI and UNLOCK_PI is
2301
- * holding the ->wait_lock of the proxy_lock while unlocking a sleeping
2302
- * lock.
2303
- */
2304
- raw_spin_lock_init(&lock->wait_lock);
2305
-#endif
23061714 debug_rt_mutex_proxy_lock(lock, proxy_owner);
23071715 rt_mutex_set_owner(lock, proxy_owner);
23081716 }
....@@ -2323,26 +1731,6 @@
23231731 {
23241732 debug_rt_mutex_proxy_unlock(lock);
23251733 rt_mutex_set_owner(lock, NULL);
2326
-}
2327
-
2328
-static void fixup_rt_mutex_blocked(struct rt_mutex *lock)
2329
-{
2330
- struct task_struct *tsk = current;
2331
- /*
2332
- * RT has a problem here when the wait got interrupted by a timeout
2333
- * or a signal. task->pi_blocked_on is still set. The task must
2334
- * acquire the hash bucket lock when returning from this function.
2335
- *
2336
- * If the hash bucket lock is contended then the
2337
- * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in
2338
- * task_blocks_on_rt_mutex() will trigger. This can be avoided by
2339
- * clearing task->pi_blocked_on which removes the task from the
2340
- * boosting chain of the rtmutex. That's correct because the task
2341
- * is not longer blocked on it.
2342
- */
2343
- raw_spin_lock(&tsk->pi_lock);
2344
- tsk->pi_blocked_on = NULL;
2345
- raw_spin_unlock(&tsk->pi_lock);
23461734 }
23471735
23481736 /**
....@@ -2375,34 +1763,6 @@
23751763 if (try_to_take_rt_mutex(lock, task, NULL))
23761764 return 1;
23771765
2378
-#ifdef CONFIG_PREEMPT_RT_FULL
2379
- /*
2380
- * In PREEMPT_RT there's an added race.
2381
- * If the task, that we are about to requeue, times out,
2382
- * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
2383
- * to skip this task. But right after the task sets
2384
- * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
2385
- * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
2386
- * This will replace the PI_WAKEUP_INPROGRESS with the actual
2387
- * lock that it blocks on. We *must not* place this task
2388
- * on this proxy lock in that case.
2389
- *
2390
- * To prevent this race, we first take the task's pi_lock
2391
- * and check if it has updated its pi_blocked_on. If it has,
2392
- * we assume that it woke up and we return -EAGAIN.
2393
- * Otherwise, we set the task's pi_blocked_on to
2394
- * PI_REQUEUE_INPROGRESS, so that if the task is waking up
2395
- * it will know that we are in the process of requeuing it.
2396
- */
2397
- raw_spin_lock(&task->pi_lock);
2398
- if (task->pi_blocked_on) {
2399
- raw_spin_unlock(&task->pi_lock);
2400
- return -EAGAIN;
2401
- }
2402
- task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
2403
- raw_spin_unlock(&task->pi_lock);
2404
-#endif
2405
-
24061766 /* We enforce deadlock detection for futexes */
24071767 ret = task_blocks_on_rt_mutex(lock, waiter, task,
24081768 RT_MUTEX_FULL_CHAINWALK);
....@@ -2416,9 +1776,6 @@
24161776 */
24171777 ret = 0;
24181778 }
2419
-
2420
- if (ret)
2421
- fixup_rt_mutex_blocked(lock);
24221779
24231780 debug_rt_mutex_print_deadlock(waiter);
24241781
....@@ -2486,7 +1843,7 @@
24861843 * been started.
24871844 * @waiter: the pre-initialized rt_mutex_waiter
24881845 *
2489
- * Wait for the the lock acquisition started on our behalf by
1846
+ * Wait for the lock acquisition started on our behalf by
24901847 * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
24911848 * rt_mutex_cleanup_proxy_lock().
24921849 *
....@@ -2505,15 +1862,12 @@
25051862 raw_spin_lock_irq(&lock->wait_lock);
25061863 /* sleep on the mutex */
25071864 set_current_state(TASK_INTERRUPTIBLE);
2508
- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
1865
+ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
25091866 /*
25101867 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
25111868 * have to fix that up.
25121869 */
25131870 fixup_rt_mutex_waiters(lock);
2514
- if (ret)
2515
- fixup_rt_mutex_blocked(lock);
2516
-
25171871 raw_spin_unlock_irq(&lock->wait_lock);
25181872
25191873 return ret;
....@@ -2575,99 +1929,3 @@
25751929
25761930 return cleanup;
25771931 }
2578
-
2579
-static inline int
2580
-ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
2581
-{
2582
-#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
2583
- unsigned tmp;
2584
-
2585
- if (ctx->deadlock_inject_countdown-- == 0) {
2586
- tmp = ctx->deadlock_inject_interval;
2587
- if (tmp > UINT_MAX/4)
2588
- tmp = UINT_MAX;
2589
- else
2590
- tmp = tmp*2 + tmp + tmp/2;
2591
-
2592
- ctx->deadlock_inject_interval = tmp;
2593
- ctx->deadlock_inject_countdown = tmp;
2594
- ctx->contending_lock = lock;
2595
-
2596
- ww_mutex_unlock(lock);
2597
-
2598
- return -EDEADLK;
2599
- }
2600
-#endif
2601
-
2602
- return 0;
2603
-}
2604
-
2605
-#ifdef CONFIG_PREEMPT_RT_FULL
2606
-int __sched
2607
-ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
2608
-{
2609
- int ret;
2610
-
2611
- might_sleep();
2612
-
2613
- mutex_acquire_nest(&lock->base.dep_map, 0, 0,
2614
- ctx ? &ctx->dep_map : NULL, _RET_IP_);
2615
- ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0,
2616
- ctx);
2617
- if (ret)
2618
- mutex_release(&lock->base.dep_map, 1, _RET_IP_);
2619
- else if (!ret && ctx && ctx->acquired > 1)
2620
- return ww_mutex_deadlock_injection(lock, ctx);
2621
-
2622
- return ret;
2623
-}
2624
-EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
2625
-
2626
-int __sched
2627
-ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
2628
-{
2629
- int ret;
2630
-
2631
- might_sleep();
2632
-
2633
- mutex_acquire_nest(&lock->base.dep_map, 0, 0,
2634
- ctx ? &ctx->dep_map : NULL, _RET_IP_);
2635
- ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0,
2636
- ctx);
2637
- if (ret)
2638
- mutex_release(&lock->base.dep_map, 1, _RET_IP_);
2639
- else if (!ret && ctx && ctx->acquired > 1)
2640
- return ww_mutex_deadlock_injection(lock, ctx);
2641
-
2642
- return ret;
2643
-}
2644
-EXPORT_SYMBOL_GPL(ww_mutex_lock);
2645
-
2646
-void __sched ww_mutex_unlock(struct ww_mutex *lock)
2647
-{
2648
- int nest = !!lock->ctx;
2649
-
2650
- /*
2651
- * The unlocking fastpath is the 0->1 transition from 'locked'
2652
- * into 'unlocked' state:
2653
- */
2654
- if (nest) {
2655
-#ifdef CONFIG_DEBUG_MUTEXES
2656
- DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
2657
-#endif
2658
- if (lock->ctx->acquired > 0)
2659
- lock->ctx->acquired--;
2660
- lock->ctx = NULL;
2661
- }
2662
-
2663
- mutex_release(&lock->base.dep_map, nest, _RET_IP_);
2664
- __rt_mutex_unlock(&lock->base.lock);
2665
-}
2666
-EXPORT_SYMBOL(ww_mutex_unlock);
2667
-
2668
-int __rt_mutex_owner_current(struct rt_mutex *lock)
2669
-{
2670
- return rt_mutex_owner(lock) == current;
2671
-}
2672
-EXPORT_SYMBOL(__rt_mutex_owner_current);
2673
-#endif