.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
---|
1 | 2 | /* |
---|
2 | 3 | * Fast Userspace Mutexes (which I call "Futexes!"). |
---|
3 | 4 | * (C) Rusty Russell, IBM 2002 |
---|
.. | .. |
---|
29 | 30 | * |
---|
30 | 31 | * "The futexes are also cursed." |
---|
31 | 32 | * "But they come in a choice of three flavours!" |
---|
32 | | - * |
---|
33 | | - * This program is free software; you can redistribute it and/or modify |
---|
34 | | - * it under the terms of the GNU General Public License as published by |
---|
35 | | - * the Free Software Foundation; either version 2 of the License, or |
---|
36 | | - * (at your option) any later version. |
---|
37 | | - * |
---|
38 | | - * This program is distributed in the hope that it will be useful, |
---|
39 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
40 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
---|
41 | | - * GNU General Public License for more details. |
---|
42 | | - * |
---|
43 | | - * You should have received a copy of the GNU General Public License |
---|
44 | | - * along with this program; if not, write to the Free Software |
---|
45 | | - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
---|
46 | 33 | */ |
---|
47 | 34 | #include <linux/compat.h> |
---|
48 | | -#include <linux/slab.h> |
---|
49 | | -#include <linux/poll.h> |
---|
50 | | -#include <linux/fs.h> |
---|
51 | | -#include <linux/file.h> |
---|
52 | 35 | #include <linux/jhash.h> |
---|
53 | | -#include <linux/init.h> |
---|
54 | | -#include <linux/futex.h> |
---|
55 | | -#include <linux/mount.h> |
---|
56 | 36 | #include <linux/pagemap.h> |
---|
57 | 37 | #include <linux/syscalls.h> |
---|
58 | | -#include <linux/signal.h> |
---|
59 | | -#include <linux/export.h> |
---|
60 | | -#include <linux/magic.h> |
---|
61 | | -#include <linux/pid.h> |
---|
62 | | -#include <linux/nsproxy.h> |
---|
63 | | -#include <linux/ptrace.h> |
---|
64 | | -#include <linux/sched/rt.h> |
---|
65 | | -#include <linux/sched/wake_q.h> |
---|
66 | | -#include <linux/sched/mm.h> |
---|
67 | | -#include <linux/hugetlb.h> |
---|
68 | 38 | #include <linux/freezer.h> |
---|
69 | | -#include <linux/bootmem.h> |
---|
| 39 | +#include <linux/memblock.h> |
---|
70 | 40 | #include <linux/fault-inject.h> |
---|
| 41 | +#include <linux/time_namespace.h> |
---|
71 | 42 | |
---|
72 | 43 | #include <asm/futex.h> |
---|
73 | 44 | |
---|
74 | 45 | #include "locking/rtmutex_common.h" |
---|
| 46 | +#include <trace/hooks/futex.h> |
---|
75 | 47 | |
---|
76 | 48 | /* |
---|
77 | 49 | * READ this before attempting to hack on futexes! |
---|
.. | .. |
---|
147 | 119 | * |
---|
148 | 120 | * Where (A) orders the waiters increment and the futex value read through |
---|
149 | 121 | * atomic operations (see hb_waiters_inc) and where (B) orders the write |
---|
150 | | - * to futex and the waiters read -- this is done by the barriers for both |
---|
151 | | - * shared and private futexes in get_futex_key_refs(). |
---|
| 122 | + * to futex and the waiters read (see hb_waiters_pending()). |
---|
152 | 123 | * |
---|
153 | 124 | * This yields the following case (where X:=waiters, Y:=futex): |
---|
154 | 125 | * |
---|
.. | .. |
---|
212 | 183 | struct rt_mutex pi_mutex; |
---|
213 | 184 | |
---|
214 | 185 | struct task_struct *owner; |
---|
215 | | - atomic_t refcount; |
---|
| 186 | + refcount_t refcount; |
---|
216 | 187 | |
---|
217 | 188 | union futex_key key; |
---|
218 | 189 | } __randomize_layout; |
---|
.. | .. |
---|
321 | 292 | if (IS_ERR(dir)) |
---|
322 | 293 | return PTR_ERR(dir); |
---|
323 | 294 | |
---|
324 | | - if (!debugfs_create_bool("ignore-private", mode, dir, |
---|
325 | | - &fail_futex.ignore_private)) { |
---|
326 | | - debugfs_remove_recursive(dir); |
---|
327 | | - return -ENOMEM; |
---|
328 | | - } |
---|
329 | | - |
---|
| 295 | + debugfs_create_bool("ignore-private", mode, dir, |
---|
| 296 | + &fail_futex.ignore_private); |
---|
330 | 297 | return 0; |
---|
331 | 298 | } |
---|
332 | 299 | |
---|
.. | .. |
---|
346 | 313 | #else |
---|
347 | 314 | static inline void compat_exit_robust_list(struct task_struct *curr) { } |
---|
348 | 315 | #endif |
---|
349 | | - |
---|
350 | | -static inline void futex_get_mm(union futex_key *key) |
---|
351 | | -{ |
---|
352 | | - mmgrab(key->private.mm); |
---|
353 | | - /* |
---|
354 | | - * Ensure futex_get_mm() implies a full barrier such that |
---|
355 | | - * get_futex_key() implies a full barrier. This is relied upon |
---|
356 | | - * as smp_mb(); (B), see the ordering comment above. |
---|
357 | | - */ |
---|
358 | | - smp_mb__after_atomic(); |
---|
359 | | -} |
---|
360 | 316 | |
---|
361 | 317 | /* |
---|
362 | 318 | * Reflects a new waiter being added to the waitqueue. |
---|
.. | .. |
---|
386 | 342 | static inline int hb_waiters_pending(struct futex_hash_bucket *hb) |
---|
387 | 343 | { |
---|
388 | 344 | #ifdef CONFIG_SMP |
---|
| 345 | + /* |
---|
| 346 | + * Full barrier (B), see the ordering comment above. |
---|
| 347 | + */ |
---|
| 348 | + smp_mb(); |
---|
389 | 349 | return atomic_read(&hb->waiters); |
---|
390 | 350 | #else |
---|
391 | 351 | return 1; |
---|
.. | .. |
---|
423 | 383 | && key1->both.offset == key2->both.offset); |
---|
424 | 384 | } |
---|
425 | 385 | |
---|
426 | | -/* |
---|
427 | | - * Take a reference to the resource addressed by a key. |
---|
428 | | - * Can be called while holding spinlocks. |
---|
| 386 | +enum futex_access { |
---|
| 387 | + FUTEX_READ, |
---|
| 388 | + FUTEX_WRITE |
---|
| 389 | +}; |
---|
| 390 | + |
---|
| 391 | +/** |
---|
| 392 | + * futex_setup_timer - set up the sleeping hrtimer. |
---|
| 393 | + * @time: ptr to the given timeout value |
---|
| 394 | + * @timeout: the hrtimer_sleeper structure to be set up |
---|
| 395 | + * @flags: futex flags |
---|
| 396 | + * @range_ns: optional range in ns |
---|
429 | 397 | * |
---|
| 398 | + * Return: Initialized hrtimer_sleeper structure or NULL if no timeout |
---|
| 399 | + * value given |
---|
430 | 400 | */ |
---|
431 | | -static void get_futex_key_refs(union futex_key *key) |
---|
| 401 | +static inline struct hrtimer_sleeper * |
---|
| 402 | +futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout, |
---|
| 403 | + int flags, u64 range_ns) |
---|
432 | 404 | { |
---|
433 | | - if (!key->both.ptr) |
---|
434 | | - return; |
---|
| 405 | + if (!time) |
---|
| 406 | + return NULL; |
---|
435 | 407 | |
---|
| 408 | + hrtimer_init_sleeper_on_stack(timeout, (flags & FLAGS_CLOCKRT) ? |
---|
| 409 | + CLOCK_REALTIME : CLOCK_MONOTONIC, |
---|
| 410 | + HRTIMER_MODE_ABS); |
---|
436 | 411 | /* |
---|
437 | | - * On MMU less systems futexes are always "private" as there is no per |
---|
438 | | - * process address space. We need the smp wmb nevertheless - yes, |
---|
439 | | - * arch/blackfin has MMU less SMP ... |
---|
| 412 | + * If range_ns is 0, calling hrtimer_set_expires_range_ns() is |
---|
| 413 | + * effectively the same as calling hrtimer_set_expires(). |
---|
440 | 414 | */ |
---|
441 | | - if (!IS_ENABLED(CONFIG_MMU)) { |
---|
442 | | - smp_mb(); /* explicit smp_mb(); (B) */ |
---|
443 | | - return; |
---|
444 | | - } |
---|
| 415 | + hrtimer_set_expires_range_ns(&timeout->timer, *time, range_ns); |
---|
445 | 416 | |
---|
446 | | - switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { |
---|
447 | | - case FUT_OFF_INODE: |
---|
448 | | - smp_mb(); /* explicit smp_mb(); (B) */ |
---|
449 | | - break; |
---|
450 | | - case FUT_OFF_MMSHARED: |
---|
451 | | - futex_get_mm(key); /* implies smp_mb(); (B) */ |
---|
452 | | - break; |
---|
453 | | - default: |
---|
454 | | - /* |
---|
455 | | - * Private futexes do not hold reference on an inode or |
---|
456 | | - * mm, therefore the only purpose of calling get_futex_key_refs |
---|
457 | | - * is because we need the barrier for the lockless waiter check. |
---|
458 | | - */ |
---|
459 | | - smp_mb(); /* explicit smp_mb(); (B) */ |
---|
460 | | - } |
---|
461 | | -} |
---|
462 | | - |
---|
463 | | -/* |
---|
464 | | - * Drop a reference to the resource addressed by a key. |
---|
465 | | - * The hash bucket spinlock must not be held. This is |
---|
466 | | - * a no-op for private futexes, see comment in the get |
---|
467 | | - * counterpart. |
---|
468 | | - */ |
---|
469 | | -static void drop_futex_key_refs(union futex_key *key) |
---|
470 | | -{ |
---|
471 | | - if (!key->both.ptr) { |
---|
472 | | - /* If we're here then we tried to put a key we failed to get */ |
---|
473 | | - WARN_ON_ONCE(1); |
---|
474 | | - return; |
---|
475 | | - } |
---|
476 | | - |
---|
477 | | - if (!IS_ENABLED(CONFIG_MMU)) |
---|
478 | | - return; |
---|
479 | | - |
---|
480 | | - switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { |
---|
481 | | - case FUT_OFF_INODE: |
---|
482 | | - break; |
---|
483 | | - case FUT_OFF_MMSHARED: |
---|
484 | | - mmdrop(key->private.mm); |
---|
485 | | - break; |
---|
486 | | - } |
---|
| 417 | + return timeout; |
---|
487 | 418 | } |
---|
488 | 419 | |
---|
489 | 420 | /* |
---|
.. | .. |
---|
529 | 460 | /** |
---|
530 | 461 | * get_futex_key() - Get parameters which are the keys for a futex |
---|
531 | 462 | * @uaddr: virtual address of the futex |
---|
532 | | - * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED |
---|
| 463 | + * @fshared: false for a PROCESS_PRIVATE futex, true for PROCESS_SHARED |
---|
533 | 464 | * @key: address where result is stored. |
---|
534 | | - * @rw: mapping needs to be read/write (values: VERIFY_READ, |
---|
535 | | - * VERIFY_WRITE) |
---|
| 465 | + * @rw: mapping needs to be read/write (values: FUTEX_READ, |
---|
| 466 | + * FUTEX_WRITE) |
---|
536 | 467 | * |
---|
537 | 468 | * Return: a negative error code or 0 |
---|
538 | 469 | * |
---|
539 | 470 | * The key words are stored in @key on success. |
---|
540 | 471 | * |
---|
541 | 472 | * For shared mappings (when @fshared), the key is: |
---|
| 473 | + * |
---|
542 | 474 | * ( inode->i_sequence, page->index, offset_within_page ) |
---|
| 475 | + * |
---|
543 | 476 | * [ also see get_inode_sequence_number() ] |
---|
544 | 477 | * |
---|
545 | 478 | * For private mappings (or when !@fshared), the key is: |
---|
| 479 | + * |
---|
546 | 480 | * ( current->mm, address, 0 ) |
---|
547 | 481 | * |
---|
548 | 482 | * This allows (cross process, where applicable) identification of the futex |
---|
.. | .. |
---|
550 | 484 | * |
---|
551 | 485 | * lock_page() might sleep, the caller should not hold a spinlock. |
---|
552 | 486 | */ |
---|
553 | | -static int |
---|
554 | | -get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) |
---|
| 487 | +static int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key, |
---|
| 488 | + enum futex_access rw) |
---|
555 | 489 | { |
---|
556 | 490 | unsigned long address = (unsigned long)uaddr; |
---|
557 | 491 | struct mm_struct *mm = current->mm; |
---|
.. | .. |
---|
567 | 501 | return -EINVAL; |
---|
568 | 502 | address -= key->both.offset; |
---|
569 | 503 | |
---|
570 | | - if (unlikely(!access_ok(rw, uaddr, sizeof(u32)))) |
---|
| 504 | + if (unlikely(!access_ok(uaddr, sizeof(u32)))) |
---|
571 | 505 | return -EFAULT; |
---|
572 | 506 | |
---|
573 | 507 | if (unlikely(should_fail_futex(fshared))) |
---|
.. | .. |
---|
583 | 517 | if (!fshared) { |
---|
584 | 518 | key->private.mm = mm; |
---|
585 | 519 | key->private.address = address; |
---|
586 | | - get_futex_key_refs(key); /* implies smp_mb(); (B) */ |
---|
587 | 520 | return 0; |
---|
588 | 521 | } |
---|
589 | 522 | |
---|
590 | 523 | again: |
---|
591 | 524 | /* Ignore any VERIFY_READ mapping (futex common case) */ |
---|
592 | | - if (unlikely(should_fail_futex(fshared))) |
---|
| 525 | + if (unlikely(should_fail_futex(true))) |
---|
593 | 526 | return -EFAULT; |
---|
594 | 527 | |
---|
595 | | - err = get_user_pages_fast(address, 1, 1, &page); |
---|
| 528 | + err = get_user_pages_fast(address, 1, FOLL_WRITE, &page); |
---|
596 | 529 | /* |
---|
597 | 530 | * If write access is not required (eg. FUTEX_WAIT), try |
---|
598 | 531 | * and get read-only access. |
---|
599 | 532 | */ |
---|
600 | | - if (err == -EFAULT && rw == VERIFY_READ) { |
---|
| 533 | + if (err == -EFAULT && rw == FUTEX_READ) { |
---|
601 | 534 | err = get_user_pages_fast(address, 1, 0, &page); |
---|
602 | 535 | ro = 1; |
---|
603 | 536 | } |
---|
.. | .. |
---|
654 | 587 | lock_page(page); |
---|
655 | 588 | shmem_swizzled = PageSwapCache(page) || page->mapping; |
---|
656 | 589 | unlock_page(page); |
---|
657 | | - put_page(page); |
---|
| 590 | + put_user_page(page); |
---|
658 | 591 | |
---|
659 | 592 | if (shmem_swizzled) |
---|
660 | 593 | goto again; |
---|
.. | .. |
---|
677 | 610 | * A RO anonymous page will never change and thus doesn't make |
---|
678 | 611 | * sense for futex operations. |
---|
679 | 612 | */ |
---|
680 | | - if (unlikely(should_fail_futex(fshared)) || ro) { |
---|
| 613 | + if (unlikely(should_fail_futex(true)) || ro) { |
---|
681 | 614 | err = -EFAULT; |
---|
682 | 615 | goto out; |
---|
683 | 616 | } |
---|
.. | .. |
---|
704 | 637 | |
---|
705 | 638 | if (READ_ONCE(page->mapping) != mapping) { |
---|
706 | 639 | rcu_read_unlock(); |
---|
707 | | - put_page(page); |
---|
| 640 | + put_user_page(page); |
---|
708 | 641 | |
---|
709 | 642 | goto again; |
---|
710 | 643 | } |
---|
.. | .. |
---|
712 | 645 | inode = READ_ONCE(mapping->host); |
---|
713 | 646 | if (!inode) { |
---|
714 | 647 | rcu_read_unlock(); |
---|
715 | | - put_page(page); |
---|
| 648 | + put_user_page(page); |
---|
716 | 649 | |
---|
717 | 650 | goto again; |
---|
718 | 651 | } |
---|
.. | .. |
---|
723 | 656 | rcu_read_unlock(); |
---|
724 | 657 | } |
---|
725 | 658 | |
---|
726 | | - get_futex_key_refs(key); /* implies smp_mb(); (B) */ |
---|
727 | | - |
---|
728 | 659 | out: |
---|
729 | | - put_page(page); |
---|
| 660 | + put_user_page(page); |
---|
730 | 661 | return err; |
---|
731 | | -} |
---|
732 | | - |
---|
733 | | -static inline void put_futex_key(union futex_key *key) |
---|
734 | | -{ |
---|
735 | | - drop_futex_key_refs(key); |
---|
736 | 662 | } |
---|
737 | 663 | |
---|
738 | 664 | /** |
---|
.. | .. |
---|
752 | 678 | struct mm_struct *mm = current->mm; |
---|
753 | 679 | int ret; |
---|
754 | 680 | |
---|
755 | | - down_read(&mm->mmap_sem); |
---|
756 | | - ret = fixup_user_fault(current, mm, (unsigned long)uaddr, |
---|
| 681 | + mmap_read_lock(mm); |
---|
| 682 | + ret = fixup_user_fault(mm, (unsigned long)uaddr, |
---|
757 | 683 | FAULT_FLAG_WRITE, NULL); |
---|
758 | | - up_read(&mm->mmap_sem); |
---|
| 684 | + mmap_read_unlock(mm); |
---|
759 | 685 | |
---|
760 | 686 | return ret < 0 ? ret : 0; |
---|
761 | 687 | } |
---|
.. | .. |
---|
821 | 747 | INIT_LIST_HEAD(&pi_state->list); |
---|
822 | 748 | /* pi_mutex gets initialized later */ |
---|
823 | 749 | pi_state->owner = NULL; |
---|
824 | | - atomic_set(&pi_state->refcount, 1); |
---|
| 750 | + refcount_set(&pi_state->refcount, 1); |
---|
825 | 751 | pi_state->key = FUTEX_KEY_INIT; |
---|
826 | 752 | |
---|
827 | 753 | current->pi_state_cache = pi_state; |
---|
.. | .. |
---|
864 | 790 | |
---|
865 | 791 | static void get_pi_state(struct futex_pi_state *pi_state) |
---|
866 | 792 | { |
---|
867 | | - WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount)); |
---|
| 793 | + WARN_ON_ONCE(!refcount_inc_not_zero(&pi_state->refcount)); |
---|
868 | 794 | } |
---|
869 | 795 | |
---|
870 | 796 | /* |
---|
.. | .. |
---|
876 | 802 | if (!pi_state) |
---|
877 | 803 | return; |
---|
878 | 804 | |
---|
879 | | - if (!atomic_dec_and_test(&pi_state->refcount)) |
---|
| 805 | + if (!refcount_dec_and_test(&pi_state->refcount)) |
---|
880 | 806 | return; |
---|
881 | 807 | |
---|
882 | 808 | /* |
---|
.. | .. |
---|
901 | 827 | * refcount is at 0 - put it back to 1. |
---|
902 | 828 | */ |
---|
903 | 829 | pi_state->owner = NULL; |
---|
904 | | - atomic_set(&pi_state->refcount, 1); |
---|
| 830 | + refcount_set(&pi_state->refcount, 1); |
---|
905 | 831 | current->pi_state_cache = pi_state; |
---|
906 | 832 | } |
---|
907 | 833 | } |
---|
.. | .. |
---|
944 | 870 | * In that case; drop the locks to let put_pi_state() make |
---|
945 | 871 | * progress and retry the loop. |
---|
946 | 872 | */ |
---|
947 | | - if (!atomic_inc_not_zero(&pi_state->refcount)) { |
---|
| 873 | + if (!refcount_inc_not_zero(&pi_state->refcount)) { |
---|
948 | 874 | raw_spin_unlock_irq(&curr->pi_lock); |
---|
949 | 875 | cpu_relax(); |
---|
950 | 876 | raw_spin_lock_irq(&curr->pi_lock); |
---|
.. | .. |
---|
1009 | 935 | * [10] Found | Found | task | !=taskTID | 0/1 | Invalid |
---|
1010 | 936 | * |
---|
1011 | 937 | * [1] Indicates that the kernel can acquire the futex atomically. We |
---|
1012 | | - * came came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit. |
---|
| 938 | + * came here due to a stale FUTEX_WAITERS/FUTEX_OWNER_DIED bit. |
---|
1013 | 939 | * |
---|
1014 | 940 | * [2] Valid, if TID does not belong to a kernel thread. If no matching |
---|
1015 | 941 | * thread is found then it indicates that the owner TID has died. |
---|
.. | .. |
---|
1102 | 1028 | * and futex_wait_requeue_pi() as it cannot go to 0 and consequently |
---|
1103 | 1029 | * free pi_state before we can take a reference ourselves. |
---|
1104 | 1030 | */ |
---|
1105 | | - WARN_ON(!atomic_read(&pi_state->refcount)); |
---|
| 1031 | + WARN_ON(!refcount_read(&pi_state->refcount)); |
---|
1106 | 1032 | |
---|
1107 | 1033 | /* |
---|
1108 | 1034 | * Now that we have a pi_state, we can acquire wait_lock |
---|
.. | .. |
---|
1196 | 1122 | |
---|
1197 | 1123 | /** |
---|
1198 | 1124 | * wait_for_owner_exiting - Block until the owner has exited |
---|
| 1125 | + * @ret: owner's current futex lock status |
---|
1199 | 1126 | * @exiting: Pointer to the exiting task |
---|
1200 | 1127 | * |
---|
1201 | 1128 | * Caller must hold a refcount on @exiting. |
---|
.. | .. |
---|
1398 | 1325 | static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) |
---|
1399 | 1326 | { |
---|
1400 | 1327 | int err; |
---|
1401 | | - u32 uninitialized_var(curval); |
---|
| 1328 | + u32 curval; |
---|
1402 | 1329 | |
---|
1403 | 1330 | if (unlikely(should_fail_futex(true))) |
---|
1404 | 1331 | return -EFAULT; |
---|
.. | .. |
---|
1523 | 1450 | { |
---|
1524 | 1451 | struct futex_hash_bucket *hb; |
---|
1525 | 1452 | |
---|
1526 | | - if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr)) |
---|
1527 | | - || WARN_ON(plist_node_empty(&q->list))) |
---|
| 1453 | + if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list))) |
---|
1528 | 1454 | return; |
---|
| 1455 | + lockdep_assert_held(q->lock_ptr); |
---|
1529 | 1456 | |
---|
1530 | 1457 | hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); |
---|
1531 | 1458 | plist_del(&q->list, &hb->chain); |
---|
.. | .. |
---|
1558 | 1485 | |
---|
1559 | 1486 | /* |
---|
1560 | 1487 | * Queue the task for later wakeup for after we've released |
---|
1561 | | - * the hb->lock. wake_q_add() grabs reference to p. |
---|
| 1488 | + * the hb->lock. |
---|
1562 | 1489 | */ |
---|
1563 | | - wake_q_add(wake_q, p); |
---|
1564 | | - put_task_struct(p); |
---|
| 1490 | + wake_q_add_safe(wake_q, p); |
---|
1565 | 1491 | } |
---|
1566 | 1492 | |
---|
1567 | 1493 | /* |
---|
.. | .. |
---|
1569 | 1495 | */ |
---|
1570 | 1496 | static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state) |
---|
1571 | 1497 | { |
---|
1572 | | - u32 uninitialized_var(curval), newval; |
---|
| 1498 | + u32 curval, newval; |
---|
1573 | 1499 | struct task_struct *new_owner; |
---|
1574 | 1500 | bool postunlock = false; |
---|
1575 | 1501 | DEFINE_WAKE_Q(wake_q); |
---|
.. | .. |
---|
1668 | 1594 | struct futex_q *this, *next; |
---|
1669 | 1595 | union futex_key key = FUTEX_KEY_INIT; |
---|
1670 | 1596 | int ret; |
---|
| 1597 | + int target_nr; |
---|
1671 | 1598 | DEFINE_WAKE_Q(wake_q); |
---|
1672 | 1599 | |
---|
1673 | 1600 | if (!bitset) |
---|
1674 | 1601 | return -EINVAL; |
---|
1675 | 1602 | |
---|
1676 | | - ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ); |
---|
| 1603 | + ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_READ); |
---|
1677 | 1604 | if (unlikely(ret != 0)) |
---|
1678 | | - goto out; |
---|
| 1605 | + return ret; |
---|
1679 | 1606 | |
---|
1680 | 1607 | hb = hash_futex(&key); |
---|
1681 | 1608 | |
---|
1682 | 1609 | /* Make sure we really have tasks to wakeup */ |
---|
1683 | 1610 | if (!hb_waiters_pending(hb)) |
---|
1684 | | - goto out_put_key; |
---|
| 1611 | + return ret; |
---|
1685 | 1612 | |
---|
1686 | 1613 | spin_lock(&hb->lock); |
---|
1687 | 1614 | |
---|
| 1615 | + trace_android_vh_futex_wake_traverse_plist(&hb->chain, &target_nr, key, bitset); |
---|
1688 | 1616 | plist_for_each_entry_safe(this, next, &hb->chain, list) { |
---|
1689 | 1617 | if (match_futex (&this->key, &key)) { |
---|
1690 | 1618 | if (this->pi_state || this->rt_waiter) { |
---|
.. | .. |
---|
1696 | 1624 | if (!(this->bitset & bitset)) |
---|
1697 | 1625 | continue; |
---|
1698 | 1626 | |
---|
| 1627 | + trace_android_vh_futex_wake_this(ret, nr_wake, target_nr, this->task); |
---|
1699 | 1628 | mark_wake_futex(&wake_q, this); |
---|
1700 | 1629 | if (++ret >= nr_wake) |
---|
1701 | 1630 | break; |
---|
.. | .. |
---|
1704 | 1633 | |
---|
1705 | 1634 | spin_unlock(&hb->lock); |
---|
1706 | 1635 | wake_up_q(&wake_q); |
---|
1707 | | -out_put_key: |
---|
1708 | | - put_futex_key(&key); |
---|
1709 | | -out: |
---|
| 1636 | + trace_android_vh_futex_wake_up_q_finish(nr_wake, target_nr); |
---|
1710 | 1637 | return ret; |
---|
1711 | 1638 | } |
---|
1712 | 1639 | |
---|
.. | .. |
---|
1732 | 1659 | oparg = 1 << oparg; |
---|
1733 | 1660 | } |
---|
1734 | 1661 | |
---|
1735 | | - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
---|
1736 | | - return -EFAULT; |
---|
1737 | | - |
---|
| 1662 | + pagefault_disable(); |
---|
1738 | 1663 | ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr); |
---|
| 1664 | + pagefault_enable(); |
---|
1739 | 1665 | if (ret) |
---|
1740 | 1666 | return ret; |
---|
1741 | 1667 | |
---|
.. | .. |
---|
1772 | 1698 | DEFINE_WAKE_Q(wake_q); |
---|
1773 | 1699 | |
---|
1774 | 1700 | retry: |
---|
1775 | | - ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ); |
---|
| 1701 | + ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ); |
---|
1776 | 1702 | if (unlikely(ret != 0)) |
---|
1777 | | - goto out; |
---|
1778 | | - ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); |
---|
| 1703 | + return ret; |
---|
| 1704 | + ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE); |
---|
1779 | 1705 | if (unlikely(ret != 0)) |
---|
1780 | | - goto out_put_key1; |
---|
| 1706 | + return ret; |
---|
1781 | 1707 | |
---|
1782 | 1708 | hb1 = hash_futex(&key1); |
---|
1783 | 1709 | hb2 = hash_futex(&key2); |
---|
.. | .. |
---|
1795 | 1721 | * an MMU, but we might get them from range checking |
---|
1796 | 1722 | */ |
---|
1797 | 1723 | ret = op_ret; |
---|
1798 | | - goto out_put_keys; |
---|
| 1724 | + return ret; |
---|
1799 | 1725 | } |
---|
1800 | 1726 | |
---|
1801 | 1727 | if (op_ret == -EFAULT) { |
---|
1802 | 1728 | ret = fault_in_user_writeable(uaddr2); |
---|
1803 | 1729 | if (ret) |
---|
1804 | | - goto out_put_keys; |
---|
| 1730 | + return ret; |
---|
1805 | 1731 | } |
---|
1806 | 1732 | |
---|
1807 | 1733 | if (!(flags & FLAGS_SHARED)) { |
---|
.. | .. |
---|
1809 | 1735 | goto retry_private; |
---|
1810 | 1736 | } |
---|
1811 | 1737 | |
---|
1812 | | - put_futex_key(&key2); |
---|
1813 | | - put_futex_key(&key1); |
---|
1814 | 1738 | cond_resched(); |
---|
1815 | 1739 | goto retry; |
---|
1816 | 1740 | } |
---|
.. | .. |
---|
1846 | 1770 | out_unlock: |
---|
1847 | 1771 | double_unlock_hb(hb1, hb2); |
---|
1848 | 1772 | wake_up_q(&wake_q); |
---|
1849 | | -out_put_keys: |
---|
1850 | | - put_futex_key(&key2); |
---|
1851 | | -out_put_key1: |
---|
1852 | | - put_futex_key(&key1); |
---|
1853 | | -out: |
---|
1854 | 1773 | return ret; |
---|
1855 | 1774 | } |
---|
1856 | 1775 | |
---|
.. | .. |
---|
1877 | 1796 | plist_add(&q->list, &hb2->chain); |
---|
1878 | 1797 | q->lock_ptr = &hb2->lock; |
---|
1879 | 1798 | } |
---|
1880 | | - get_futex_key_refs(key2); |
---|
1881 | 1799 | q->key = *key2; |
---|
1882 | 1800 | } |
---|
1883 | 1801 | |
---|
.. | .. |
---|
1899 | 1817 | void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, |
---|
1900 | 1818 | struct futex_hash_bucket *hb) |
---|
1901 | 1819 | { |
---|
1902 | | - get_futex_key_refs(key); |
---|
1903 | 1820 | q->key = *key; |
---|
1904 | 1821 | |
---|
1905 | 1822 | __unqueue_futex(q); |
---|
.. | .. |
---|
2010 | 1927 | u32 *cmpval, int requeue_pi) |
---|
2011 | 1928 | { |
---|
2012 | 1929 | union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT; |
---|
2013 | | - int drop_count = 0, task_count = 0, ret; |
---|
| 1930 | + int task_count = 0, ret; |
---|
2014 | 1931 | struct futex_pi_state *pi_state = NULL; |
---|
2015 | 1932 | struct futex_hash_bucket *hb1, *hb2; |
---|
2016 | 1933 | struct futex_q *this, *next; |
---|
.. | .. |
---|
2057 | 1974 | } |
---|
2058 | 1975 | |
---|
2059 | 1976 | retry: |
---|
2060 | | - ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ); |
---|
| 1977 | + ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, FUTEX_READ); |
---|
2061 | 1978 | if (unlikely(ret != 0)) |
---|
2062 | | - goto out; |
---|
| 1979 | + return ret; |
---|
2063 | 1980 | ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, |
---|
2064 | | - requeue_pi ? VERIFY_WRITE : VERIFY_READ); |
---|
| 1981 | + requeue_pi ? FUTEX_WRITE : FUTEX_READ); |
---|
2065 | 1982 | if (unlikely(ret != 0)) |
---|
2066 | | - goto out_put_key1; |
---|
| 1983 | + return ret; |
---|
2067 | 1984 | |
---|
2068 | 1985 | /* |
---|
2069 | 1986 | * The check above which compares uaddrs is not sufficient for |
---|
2070 | 1987 | * shared futexes. We need to compare the keys: |
---|
2071 | 1988 | */ |
---|
2072 | | - if (requeue_pi && match_futex(&key1, &key2)) { |
---|
2073 | | - ret = -EINVAL; |
---|
2074 | | - goto out_put_keys; |
---|
2075 | | - } |
---|
| 1989 | + if (requeue_pi && match_futex(&key1, &key2)) |
---|
| 1990 | + return -EINVAL; |
---|
2076 | 1991 | |
---|
2077 | 1992 | hb1 = hash_futex(&key1); |
---|
2078 | 1993 | hb2 = hash_futex(&key2); |
---|
.. | .. |
---|
2092 | 2007 | |
---|
2093 | 2008 | ret = get_user(curval, uaddr1); |
---|
2094 | 2009 | if (ret) |
---|
2095 | | - goto out_put_keys; |
---|
| 2010 | + return ret; |
---|
2096 | 2011 | |
---|
2097 | 2012 | if (!(flags & FLAGS_SHARED)) |
---|
2098 | 2013 | goto retry_private; |
---|
2099 | 2014 | |
---|
2100 | | - put_futex_key(&key2); |
---|
2101 | | - put_futex_key(&key1); |
---|
2102 | 2015 | goto retry; |
---|
2103 | 2016 | } |
---|
2104 | 2017 | if (curval != *cmpval) { |
---|
.. | .. |
---|
2131 | 2044 | */ |
---|
2132 | 2045 | if (ret > 0) { |
---|
2133 | 2046 | WARN_ON(pi_state); |
---|
2134 | | - drop_count++; |
---|
2135 | 2047 | task_count++; |
---|
2136 | 2048 | /* |
---|
2137 | 2049 | * If we acquired the lock, then the user space value |
---|
.. | .. |
---|
2158 | 2070 | case -EFAULT: |
---|
2159 | 2071 | double_unlock_hb(hb1, hb2); |
---|
2160 | 2072 | hb_waiters_dec(hb2); |
---|
2161 | | - put_futex_key(&key2); |
---|
2162 | | - put_futex_key(&key1); |
---|
2163 | 2073 | ret = fault_in_user_writeable(uaddr2); |
---|
2164 | 2074 | if (!ret) |
---|
2165 | 2075 | goto retry; |
---|
2166 | | - goto out; |
---|
| 2076 | + return ret; |
---|
2167 | 2077 | case -EBUSY: |
---|
2168 | 2078 | case -EAGAIN: |
---|
2169 | 2079 | /* |
---|
.. | .. |
---|
2174 | 2084 | */ |
---|
2175 | 2085 | double_unlock_hb(hb1, hb2); |
---|
2176 | 2086 | hb_waiters_dec(hb2); |
---|
2177 | | - put_futex_key(&key2); |
---|
2178 | | - put_futex_key(&key1); |
---|
2179 | 2087 | /* |
---|
2180 | 2088 | * Handle the case where the owner is in the middle of |
---|
2181 | 2089 | * exiting. Wait for the exit to complete otherwise |
---|
.. | .. |
---|
2251 | 2159 | * doing so. |
---|
2252 | 2160 | */ |
---|
2253 | 2161 | requeue_pi_wake_futex(this, &key2, hb2); |
---|
2254 | | - drop_count++; |
---|
2255 | 2162 | continue; |
---|
2256 | 2163 | } else if (ret) { |
---|
2257 | 2164 | /* |
---|
.. | .. |
---|
2272 | 2179 | } |
---|
2273 | 2180 | } |
---|
2274 | 2181 | requeue_futex(this, hb1, hb2, &key2); |
---|
2275 | | - drop_count++; |
---|
2276 | 2182 | } |
---|
2277 | 2183 | |
---|
2278 | 2184 | /* |
---|
.. | .. |
---|
2286 | 2192 | double_unlock_hb(hb1, hb2); |
---|
2287 | 2193 | wake_up_q(&wake_q); |
---|
2288 | 2194 | hb_waiters_dec(hb2); |
---|
2289 | | - |
---|
2290 | | - /* |
---|
2291 | | - * drop_futex_key_refs() must be called outside the spinlocks. During |
---|
2292 | | - * the requeue we moved futex_q's from the hash bucket at key1 to the |
---|
2293 | | - * one at key2 and updated their key pointer. We no longer need to |
---|
2294 | | - * hold the references to key1. |
---|
2295 | | - */ |
---|
2296 | | - while (--drop_count >= 0) |
---|
2297 | | - drop_futex_key_refs(&key1); |
---|
2298 | | - |
---|
2299 | | -out_put_keys: |
---|
2300 | | - put_futex_key(&key2); |
---|
2301 | | -out_put_key1: |
---|
2302 | | - put_futex_key(&key1); |
---|
2303 | | -out: |
---|
2304 | 2195 | return ret ? ret : task_count; |
---|
2305 | 2196 | } |
---|
2306 | 2197 | |
---|
.. | .. |
---|
2320 | 2211 | * decrement the counter at queue_unlock() when some error has |
---|
2321 | 2212 | * occurred and we don't end up adding the task to the list. |
---|
2322 | 2213 | */ |
---|
2323 | | - hb_waiters_inc(hb); |
---|
| 2214 | + hb_waiters_inc(hb); /* implies smp_mb(); (A) */ |
---|
2324 | 2215 | |
---|
2325 | 2216 | q->lock_ptr = &hb->lock; |
---|
2326 | 2217 | |
---|
2327 | | - spin_lock(&hb->lock); /* implies smp_mb(); (A) */ |
---|
| 2218 | + spin_lock(&hb->lock); |
---|
2328 | 2219 | return hb; |
---|
2329 | 2220 | } |
---|
2330 | 2221 | |
---|
.. | .. |
---|
2339 | 2230 | static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb) |
---|
2340 | 2231 | { |
---|
2341 | 2232 | int prio; |
---|
| 2233 | + bool already_on_hb = false; |
---|
2342 | 2234 | |
---|
2343 | 2235 | /* |
---|
2344 | 2236 | * The priority used to register this element is |
---|
.. | .. |
---|
2351 | 2243 | prio = min(current->normal_prio, MAX_RT_PRIO); |
---|
2352 | 2244 | |
---|
2353 | 2245 | plist_node_init(&q->list, prio); |
---|
2354 | | - plist_add(&q->list, &hb->chain); |
---|
| 2246 | + trace_android_vh_alter_futex_plist_add(&q->list, &hb->chain, &already_on_hb); |
---|
| 2247 | + if (!already_on_hb) |
---|
| 2248 | + plist_add(&q->list, &hb->chain); |
---|
2355 | 2249 | q->task = current; |
---|
2356 | 2250 | } |
---|
2357 | 2251 | |
---|
.. | .. |
---|
2425 | 2319 | ret = 1; |
---|
2426 | 2320 | } |
---|
2427 | 2321 | |
---|
2428 | | - drop_futex_key_refs(&q->key); |
---|
2429 | 2322 | return ret; |
---|
2430 | 2323 | } |
---|
2431 | 2324 | |
---|
.. | .. |
---|
2449 | 2342 | static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, |
---|
2450 | 2343 | struct task_struct *argowner) |
---|
2451 | 2344 | { |
---|
2452 | | - u32 uval, uninitialized_var(curval), newval, newtid; |
---|
2453 | 2345 | struct futex_pi_state *pi_state = q->pi_state; |
---|
2454 | 2346 | struct task_struct *oldowner, *newowner; |
---|
| 2347 | + u32 uval, curval, newval, newtid; |
---|
2455 | 2348 | int err = 0; |
---|
2456 | 2349 | |
---|
2457 | 2350 | oldowner = pi_state->owner; |
---|
.. | .. |
---|
2706 | 2599 | |
---|
2707 | 2600 | /* Arm the timer */ |
---|
2708 | 2601 | if (timeout) |
---|
2709 | | - hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); |
---|
| 2602 | + hrtimer_sleeper_start_expires(timeout, HRTIMER_MODE_ABS); |
---|
2710 | 2603 | |
---|
2711 | 2604 | /* |
---|
2712 | 2605 | * If we have been removed from the hash list, then another task |
---|
.. | .. |
---|
2718 | 2611 | * flagged for rescheduling. Only call schedule if there |
---|
2719 | 2612 | * is no timeout, or if it has yet to expire. |
---|
2720 | 2613 | */ |
---|
2721 | | - if (!timeout || timeout->task) |
---|
| 2614 | + if (!timeout || timeout->task) { |
---|
| 2615 | + trace_android_vh_futex_sleep_start(current); |
---|
2722 | 2616 | freezable_schedule(); |
---|
| 2617 | + } |
---|
2723 | 2618 | } |
---|
2724 | 2619 | __set_current_state(TASK_RUNNING); |
---|
2725 | 2620 | } |
---|
.. | .. |
---|
2766 | 2661 | * while the syscall executes. |
---|
2767 | 2662 | */ |
---|
2768 | 2663 | retry: |
---|
2769 | | - ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ); |
---|
| 2664 | + ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, FUTEX_READ); |
---|
2770 | 2665 | if (unlikely(ret != 0)) |
---|
2771 | 2666 | return ret; |
---|
2772 | 2667 | |
---|
.. | .. |
---|
2780 | 2675 | |
---|
2781 | 2676 | ret = get_user(uval, uaddr); |
---|
2782 | 2677 | if (ret) |
---|
2783 | | - goto out; |
---|
| 2678 | + return ret; |
---|
2784 | 2679 | |
---|
2785 | 2680 | if (!(flags & FLAGS_SHARED)) |
---|
2786 | 2681 | goto retry_private; |
---|
2787 | 2682 | |
---|
2788 | | - put_futex_key(&q->key); |
---|
2789 | 2683 | goto retry; |
---|
2790 | 2684 | } |
---|
2791 | 2685 | |
---|
.. | .. |
---|
2794 | 2688 | ret = -EWOULDBLOCK; |
---|
2795 | 2689 | } |
---|
2796 | 2690 | |
---|
2797 | | -out: |
---|
2798 | | - if (ret) |
---|
2799 | | - put_futex_key(&q->key); |
---|
2800 | 2691 | return ret; |
---|
2801 | 2692 | } |
---|
2802 | 2693 | |
---|
2803 | 2694 | static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, |
---|
2804 | 2695 | ktime_t *abs_time, u32 bitset) |
---|
2805 | 2696 | { |
---|
2806 | | - struct hrtimer_sleeper timeout, *to = NULL; |
---|
| 2697 | + struct hrtimer_sleeper timeout, *to; |
---|
2807 | 2698 | struct restart_block *restart; |
---|
2808 | 2699 | struct futex_hash_bucket *hb; |
---|
2809 | 2700 | struct futex_q q = futex_q_init; |
---|
.. | .. |
---|
2812 | 2703 | if (!bitset) |
---|
2813 | 2704 | return -EINVAL; |
---|
2814 | 2705 | q.bitset = bitset; |
---|
| 2706 | + trace_android_vh_futex_wait_start(flags, bitset); |
---|
2815 | 2707 | |
---|
2816 | | - if (abs_time) { |
---|
2817 | | - to = &timeout; |
---|
2818 | | - |
---|
2819 | | - hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? |
---|
2820 | | - CLOCK_REALTIME : CLOCK_MONOTONIC, |
---|
2821 | | - HRTIMER_MODE_ABS); |
---|
2822 | | - hrtimer_init_sleeper(to, current); |
---|
2823 | | - hrtimer_set_expires_range_ns(&to->timer, *abs_time, |
---|
2824 | | - current->timer_slack_ns); |
---|
2825 | | - } |
---|
2826 | | - |
---|
| 2708 | + to = futex_setup_timer(abs_time, &timeout, flags, |
---|
| 2709 | + current->timer_slack_ns); |
---|
2827 | 2710 | retry: |
---|
2828 | 2711 | /* |
---|
2829 | 2712 | * Prepare to wait on uaddr. On success, holds hb lock and increments |
---|
.. | .. |
---|
2870 | 2753 | hrtimer_cancel(&to->timer); |
---|
2871 | 2754 | destroy_hrtimer_on_stack(&to->timer); |
---|
2872 | 2755 | } |
---|
| 2756 | + trace_android_vh_futex_wait_end(flags, bitset); |
---|
2873 | 2757 | return ret; |
---|
2874 | 2758 | } |
---|
2875 | 2759 | |
---|
.. | .. |
---|
2902 | 2786 | static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, |
---|
2903 | 2787 | ktime_t *time, int trylock) |
---|
2904 | 2788 | { |
---|
2905 | | - struct hrtimer_sleeper timeout, *to = NULL; |
---|
| 2789 | + struct hrtimer_sleeper timeout, *to; |
---|
2906 | 2790 | struct task_struct *exiting = NULL; |
---|
2907 | 2791 | struct rt_mutex_waiter rt_waiter; |
---|
2908 | 2792 | struct futex_hash_bucket *hb; |
---|
.. | .. |
---|
2915 | 2799 | if (refill_pi_state_cache()) |
---|
2916 | 2800 | return -ENOMEM; |
---|
2917 | 2801 | |
---|
2918 | | - if (time) { |
---|
2919 | | - to = &timeout; |
---|
2920 | | - hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME, |
---|
2921 | | - HRTIMER_MODE_ABS); |
---|
2922 | | - hrtimer_init_sleeper(to, current); |
---|
2923 | | - hrtimer_set_expires(&to->timer, *time); |
---|
2924 | | - } |
---|
| 2802 | + to = futex_setup_timer(time, &timeout, FLAGS_CLOCKRT, 0); |
---|
2925 | 2803 | |
---|
2926 | 2804 | retry: |
---|
2927 | | - ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE); |
---|
| 2805 | + ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE); |
---|
2928 | 2806 | if (unlikely(ret != 0)) |
---|
2929 | 2807 | goto out; |
---|
2930 | 2808 | |
---|
.. | .. |
---|
2954 | 2832 | * - EAGAIN: The user space value changed. |
---|
2955 | 2833 | */ |
---|
2956 | 2834 | queue_unlock(hb); |
---|
2957 | | - put_futex_key(&q.key); |
---|
2958 | 2835 | /* |
---|
2959 | 2836 | * Handle the case where the owner is in the middle of |
---|
2960 | 2837 | * exiting. Wait for the exit to complete otherwise |
---|
.. | .. |
---|
3014 | 2891 | } |
---|
3015 | 2892 | |
---|
3016 | 2893 | if (unlikely(to)) |
---|
3017 | | - hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS); |
---|
| 2894 | + hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS); |
---|
3018 | 2895 | |
---|
3019 | 2896 | ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); |
---|
3020 | 2897 | |
---|
.. | .. |
---|
3047 | 2924 | |
---|
3048 | 2925 | /* Unqueue and drop the lock */ |
---|
3049 | 2926 | unqueue_me_pi(&q); |
---|
3050 | | - |
---|
3051 | | - goto out_put_key; |
---|
| 2927 | + goto out; |
---|
3052 | 2928 | |
---|
3053 | 2929 | out_unlock_put_key: |
---|
3054 | 2930 | queue_unlock(hb); |
---|
3055 | 2931 | |
---|
3056 | | -out_put_key: |
---|
3057 | | - put_futex_key(&q.key); |
---|
3058 | 2932 | out: |
---|
3059 | 2933 | if (to) { |
---|
3060 | 2934 | hrtimer_cancel(&to->timer); |
---|
.. | .. |
---|
3067 | 2941 | |
---|
3068 | 2942 | ret = fault_in_user_writeable(uaddr); |
---|
3069 | 2943 | if (ret) |
---|
3070 | | - goto out_put_key; |
---|
| 2944 | + goto out; |
---|
3071 | 2945 | |
---|
3072 | 2946 | if (!(flags & FLAGS_SHARED)) |
---|
3073 | 2947 | goto retry_private; |
---|
3074 | 2948 | |
---|
3075 | | - put_futex_key(&q.key); |
---|
3076 | 2949 | goto retry; |
---|
3077 | 2950 | } |
---|
3078 | 2951 | |
---|
.. | .. |
---|
3083 | 2956 | */ |
---|
3084 | 2957 | static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) |
---|
3085 | 2958 | { |
---|
3086 | | - u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current); |
---|
| 2959 | + u32 curval, uval, vpid = task_pid_vnr(current); |
---|
3087 | 2960 | union futex_key key = FUTEX_KEY_INIT; |
---|
3088 | 2961 | struct futex_hash_bucket *hb; |
---|
3089 | 2962 | struct futex_q *top_waiter; |
---|
.. | .. |
---|
3101 | 2974 | if ((uval & FUTEX_TID_MASK) != vpid) |
---|
3102 | 2975 | return -EPERM; |
---|
3103 | 2976 | |
---|
3104 | | - ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE); |
---|
| 2977 | + ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, FUTEX_WRITE); |
---|
3105 | 2978 | if (ret) |
---|
3106 | 2979 | return ret; |
---|
3107 | 2980 | |
---|
.. | .. |
---|
3201 | 3074 | out_unlock: |
---|
3202 | 3075 | spin_unlock(&hb->lock); |
---|
3203 | 3076 | out_putkey: |
---|
3204 | | - put_futex_key(&key); |
---|
3205 | 3077 | return ret; |
---|
3206 | 3078 | |
---|
3207 | 3079 | pi_retry: |
---|
3208 | | - put_futex_key(&key); |
---|
3209 | 3080 | cond_resched(); |
---|
3210 | 3081 | goto retry; |
---|
3211 | 3082 | |
---|
3212 | 3083 | pi_faulted: |
---|
3213 | | - put_futex_key(&key); |
---|
3214 | 3084 | |
---|
3215 | 3085 | ret = fault_in_user_writeable(uaddr); |
---|
3216 | 3086 | if (!ret) |
---|
.. | .. |
---|
3312 | 3182 | u32 val, ktime_t *abs_time, u32 bitset, |
---|
3313 | 3183 | u32 __user *uaddr2) |
---|
3314 | 3184 | { |
---|
3315 | | - struct hrtimer_sleeper timeout, *to = NULL; |
---|
| 3185 | + struct hrtimer_sleeper timeout, *to; |
---|
3316 | 3186 | struct rt_mutex_waiter rt_waiter; |
---|
3317 | 3187 | struct futex_hash_bucket *hb; |
---|
3318 | 3188 | union futex_key key2 = FUTEX_KEY_INIT; |
---|
.. | .. |
---|
3328 | 3198 | if (!bitset) |
---|
3329 | 3199 | return -EINVAL; |
---|
3330 | 3200 | |
---|
3331 | | - if (abs_time) { |
---|
3332 | | - to = &timeout; |
---|
3333 | | - hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ? |
---|
3334 | | - CLOCK_REALTIME : CLOCK_MONOTONIC, |
---|
3335 | | - HRTIMER_MODE_ABS); |
---|
3336 | | - hrtimer_init_sleeper(to, current); |
---|
3337 | | - hrtimer_set_expires_range_ns(&to->timer, *abs_time, |
---|
3338 | | - current->timer_slack_ns); |
---|
3339 | | - } |
---|
| 3201 | + to = futex_setup_timer(abs_time, &timeout, flags, |
---|
| 3202 | + current->timer_slack_ns); |
---|
3340 | 3203 | |
---|
3341 | 3204 | /* |
---|
3342 | 3205 | * The waiter is allocated on our stack, manipulated by the requeue |
---|
.. | .. |
---|
3344 | 3207 | */ |
---|
3345 | 3208 | rt_mutex_init_waiter(&rt_waiter); |
---|
3346 | 3209 | |
---|
3347 | | - ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); |
---|
| 3210 | + ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, FUTEX_WRITE); |
---|
3348 | 3211 | if (unlikely(ret != 0)) |
---|
3349 | 3212 | goto out; |
---|
3350 | 3213 | |
---|
.. | .. |
---|
3358 | 3221 | */ |
---|
3359 | 3222 | ret = futex_wait_setup(uaddr, val, flags, &q, &hb); |
---|
3360 | 3223 | if (ret) |
---|
3361 | | - goto out_key2; |
---|
| 3224 | + goto out; |
---|
3362 | 3225 | |
---|
3363 | 3226 | /* |
---|
3364 | 3227 | * The check above which compares uaddrs is not sufficient for |
---|
.. | .. |
---|
3367 | 3230 | if (match_futex(&q.key, &key2)) { |
---|
3368 | 3231 | queue_unlock(hb); |
---|
3369 | 3232 | ret = -EINVAL; |
---|
3370 | | - goto out_put_keys; |
---|
| 3233 | + goto out; |
---|
3371 | 3234 | } |
---|
3372 | 3235 | |
---|
3373 | 3236 | /* Queue the futex_q, drop the hb lock, wait for wakeup. */ |
---|
.. | .. |
---|
3377 | 3240 | ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); |
---|
3378 | 3241 | spin_unlock(&hb->lock); |
---|
3379 | 3242 | if (ret) |
---|
3380 | | - goto out_put_keys; |
---|
| 3243 | + goto out; |
---|
3381 | 3244 | |
---|
3382 | 3245 | /* |
---|
3383 | 3246 | * In order for us to be here, we know our q.key == key2, and since |
---|
.. | .. |
---|
3452 | 3315 | */ |
---|
3453 | 3316 | ret = -EWOULDBLOCK; |
---|
3454 | 3317 | } |
---|
3455 | | - |
---|
3456 | | -out_put_keys: |
---|
3457 | | - put_futex_key(&q.key); |
---|
3458 | | -out_key2: |
---|
3459 | | - put_futex_key(&key2); |
---|
3460 | 3318 | |
---|
3461 | 3319 | out: |
---|
3462 | 3320 | if (to) { |
---|
.. | .. |
---|
3558 | 3416 | static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, |
---|
3559 | 3417 | bool pi, bool pending_op) |
---|
3560 | 3418 | { |
---|
3561 | | - u32 uval, uninitialized_var(nval), mval; |
---|
| 3419 | + u32 uval, nval, mval; |
---|
3562 | 3420 | int err; |
---|
3563 | 3421 | |
---|
3564 | 3422 | /* Futex address must be 32bit aligned */ |
---|
.. | .. |
---|
3688 | 3546 | struct robust_list_head __user *head = curr->robust_list; |
---|
3689 | 3547 | struct robust_list __user *entry, *next_entry, *pending; |
---|
3690 | 3548 | unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; |
---|
3691 | | - unsigned int uninitialized_var(next_pi); |
---|
| 3549 | + unsigned int next_pi; |
---|
3692 | 3550 | unsigned long futex_offset; |
---|
3693 | 3551 | int rc; |
---|
3694 | 3552 | |
---|
.. | .. |
---|
3881 | 3739 | return -ENOSYS; |
---|
3882 | 3740 | } |
---|
3883 | 3741 | |
---|
| 3742 | + trace_android_vh_do_futex(cmd, &flags, uaddr2); |
---|
3884 | 3743 | switch (cmd) { |
---|
3885 | 3744 | case FUTEX_WAIT: |
---|
3886 | 3745 | val3 = FUTEX_BITSET_MATCH_ANY; |
---|
3887 | | - /* fall through */ |
---|
| 3746 | + fallthrough; |
---|
3888 | 3747 | case FUTEX_WAIT_BITSET: |
---|
3889 | 3748 | return futex_wait(uaddr, flags, val, timeout, val3); |
---|
3890 | 3749 | case FUTEX_WAKE: |
---|
3891 | 3750 | val3 = FUTEX_BITSET_MATCH_ANY; |
---|
3892 | | - /* fall through */ |
---|
| 3751 | + fallthrough; |
---|
3893 | 3752 | case FUTEX_WAKE_BITSET: |
---|
3894 | 3753 | return futex_wake(uaddr, flags, val, val3); |
---|
3895 | 3754 | case FUTEX_REQUEUE: |
---|
.. | .. |
---|
3916 | 3775 | |
---|
3917 | 3776 | |
---|
3918 | 3777 | SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, |
---|
3919 | | - struct timespec __user *, utime, u32 __user *, uaddr2, |
---|
| 3778 | + struct __kernel_timespec __user *, utime, u32 __user *, uaddr2, |
---|
3920 | 3779 | u32, val3) |
---|
3921 | 3780 | { |
---|
3922 | | - struct timespec ts; |
---|
| 3781 | + struct timespec64 ts; |
---|
3923 | 3782 | ktime_t t, *tp = NULL; |
---|
3924 | 3783 | u32 val2 = 0; |
---|
3925 | 3784 | int cmd = op & FUTEX_CMD_MASK; |
---|
.. | .. |
---|
3929 | 3788 | cmd == FUTEX_WAIT_REQUEUE_PI)) { |
---|
3930 | 3789 | if (unlikely(should_fail_futex(!(op & FUTEX_PRIVATE_FLAG)))) |
---|
3931 | 3790 | return -EFAULT; |
---|
3932 | | - if (copy_from_user(&ts, utime, sizeof(ts)) != 0) |
---|
| 3791 | + if (get_timespec64(&ts, utime)) |
---|
3933 | 3792 | return -EFAULT; |
---|
3934 | | - if (!timespec_valid(&ts)) |
---|
| 3793 | + if (!timespec64_valid(&ts)) |
---|
3935 | 3794 | return -EINVAL; |
---|
3936 | 3795 | |
---|
3937 | | - t = timespec_to_ktime(ts); |
---|
| 3796 | + t = timespec64_to_ktime(ts); |
---|
3938 | 3797 | if (cmd == FUTEX_WAIT) |
---|
3939 | 3798 | t = ktime_add_safe(ktime_get(), t); |
---|
| 3799 | + else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME)) |
---|
| 3800 | + t = timens_ktime_to_host(CLOCK_MONOTONIC, t); |
---|
3940 | 3801 | tp = &t; |
---|
3941 | 3802 | } |
---|
3942 | 3803 | /* |
---|
.. | .. |
---|
3987 | 3848 | struct compat_robust_list_head __user *head = curr->compat_robust_list; |
---|
3988 | 3849 | struct robust_list __user *entry, *next_entry, *pending; |
---|
3989 | 3850 | unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; |
---|
3990 | | - unsigned int uninitialized_var(next_pi); |
---|
| 3851 | + unsigned int next_pi; |
---|
3991 | 3852 | compat_uptr_t uentry, next_uentry, upending; |
---|
3992 | 3853 | compat_long_t futex_offset; |
---|
3993 | 3854 | int rc; |
---|
.. | .. |
---|
4106 | 3967 | |
---|
4107 | 3968 | return ret; |
---|
4108 | 3969 | } |
---|
| 3970 | +#endif /* CONFIG_COMPAT */ |
---|
4109 | 3971 | |
---|
4110 | | -COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val, |
---|
| 3972 | +#ifdef CONFIG_COMPAT_32BIT_TIME |
---|
| 3973 | +SYSCALL_DEFINE6(futex_time32, u32 __user *, uaddr, int, op, u32, val, |
---|
4111 | 3974 | struct old_timespec32 __user *, utime, u32 __user *, uaddr2, |
---|
4112 | 3975 | u32, val3) |
---|
4113 | 3976 | { |
---|
4114 | | - struct timespec ts; |
---|
| 3977 | + struct timespec64 ts; |
---|
4115 | 3978 | ktime_t t, *tp = NULL; |
---|
4116 | 3979 | int val2 = 0; |
---|
4117 | 3980 | int cmd = op & FUTEX_CMD_MASK; |
---|
.. | .. |
---|
4119 | 3982 | if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || |
---|
4120 | 3983 | cmd == FUTEX_WAIT_BITSET || |
---|
4121 | 3984 | cmd == FUTEX_WAIT_REQUEUE_PI)) { |
---|
4122 | | - if (compat_get_timespec(&ts, utime)) |
---|
| 3985 | + if (get_old_timespec32(&ts, utime)) |
---|
4123 | 3986 | return -EFAULT; |
---|
4124 | | - if (!timespec_valid(&ts)) |
---|
| 3987 | + if (!timespec64_valid(&ts)) |
---|
4125 | 3988 | return -EINVAL; |
---|
4126 | 3989 | |
---|
4127 | | - t = timespec_to_ktime(ts); |
---|
| 3990 | + t = timespec64_to_ktime(ts); |
---|
4128 | 3991 | if (cmd == FUTEX_WAIT) |
---|
4129 | 3992 | t = ktime_add_safe(ktime_get(), t); |
---|
| 3993 | + else if (cmd != FUTEX_LOCK_PI && !(op & FUTEX_CLOCK_REALTIME)) |
---|
| 3994 | + t = timens_ktime_to_host(CLOCK_MONOTONIC, t); |
---|
4130 | 3995 | tp = &t; |
---|
4131 | 3996 | } |
---|
4132 | 3997 | if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE || |
---|
.. | .. |
---|
4135 | 4000 | |
---|
4136 | 4001 | return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); |
---|
4137 | 4002 | } |
---|
4138 | | -#endif /* CONFIG_COMPAT */ |
---|
| 4003 | +#endif /* CONFIG_COMPAT_32BIT_TIME */ |
---|
4139 | 4004 | |
---|
4140 | 4005 | static void __init futex_detect_cmpxchg(void) |
---|
4141 | 4006 | { |
---|