.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0+ |
---|
1 | 2 | /* |
---|
2 | | - * linux/kernel/posix-timers.c |
---|
3 | | - * |
---|
4 | | - * |
---|
5 | 3 | * 2002-10-15 Posix Clocks & timers |
---|
6 | 4 | * by George Anzinger george@mvista.com |
---|
7 | | - * |
---|
8 | 5 | * Copyright (C) 2002 2003 by MontaVista Software. |
---|
9 | 6 | * |
---|
10 | 7 | * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug. |
---|
11 | 8 | * Copyright (C) 2004 Boris Hu |
---|
12 | 9 | * |
---|
13 | | - * This program is free software; you can redistribute it and/or modify |
---|
14 | | - * it under the terms of the GNU General Public License as published by |
---|
15 | | - * the Free Software Foundation; either version 2 of the License, or (at |
---|
16 | | - * your option) any later version. |
---|
17 | | - * |
---|
18 | | - * This program is distributed in the hope that it will be useful, but |
---|
19 | | - * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
20 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
21 | | - * General Public License for more details. |
---|
22 | | - |
---|
23 | | - * You should have received a copy of the GNU General Public License |
---|
24 | | - * along with this program; if not, write to the Free Software |
---|
25 | | - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
---|
26 | | - * |
---|
27 | | - * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA |
---|
28 | | - */ |
---|
29 | | - |
---|
30 | | -/* These are all the functions necessary to implement |
---|
31 | | - * POSIX clocks & timers |
---|
| 10 | + * These are all the functions necessary to implement POSIX clocks & timers |
---|
32 | 11 | */ |
---|
33 | 12 | #include <linux/mm.h> |
---|
34 | 13 | #include <linux/interrupt.h> |
---|
.. | .. |
---|
51 | 30 | #include <linux/hashtable.h> |
---|
52 | 31 | #include <linux/compat.h> |
---|
53 | 32 | #include <linux/nospec.h> |
---|
| 33 | +#include <linux/time_namespace.h> |
---|
54 | 34 | |
---|
55 | 35 | #include "timekeeping.h" |
---|
56 | 36 | #include "posix-timers.h" |
---|
.. | .. |
---|
141 | 121 | { |
---|
142 | 122 | struct k_itimer *timer; |
---|
143 | 123 | |
---|
144 | | - hlist_for_each_entry_rcu(timer, head, t_hash) { |
---|
| 124 | + hlist_for_each_entry_rcu(timer, head, t_hash, |
---|
| 125 | + lockdep_is_held(&hash_lock)) { |
---|
145 | 126 | if ((timer->it_signal == sig) && (timer->it_id == id)) |
---|
146 | 127 | return timer; |
---|
147 | 128 | } |
---|
.. | .. |
---|
186 | 167 | } |
---|
187 | 168 | |
---|
188 | 169 | /* Get clock_realtime */ |
---|
189 | | -static int posix_clock_realtime_get(clockid_t which_clock, struct timespec64 *tp) |
---|
| 170 | +static int posix_get_realtime_timespec(clockid_t which_clock, struct timespec64 *tp) |
---|
190 | 171 | { |
---|
191 | 172 | ktime_get_real_ts64(tp); |
---|
192 | 173 | return 0; |
---|
| 174 | +} |
---|
| 175 | + |
---|
| 176 | +static ktime_t posix_get_realtime_ktime(clockid_t which_clock) |
---|
| 177 | +{ |
---|
| 178 | + return ktime_get_real(); |
---|
193 | 179 | } |
---|
194 | 180 | |
---|
195 | 181 | /* Set clock_realtime */ |
---|
.. | .. |
---|
200 | 186 | } |
---|
201 | 187 | |
---|
202 | 188 | static int posix_clock_realtime_adj(const clockid_t which_clock, |
---|
203 | | - struct timex *t) |
---|
| 189 | + struct __kernel_timex *t) |
---|
204 | 190 | { |
---|
205 | 191 | return do_adjtimex(t); |
---|
206 | 192 | } |
---|
.. | .. |
---|
208 | 194 | /* |
---|
209 | 195 | * Get monotonic time for posix timers |
---|
210 | 196 | */ |
---|
211 | | -static int posix_ktime_get_ts(clockid_t which_clock, struct timespec64 *tp) |
---|
| 197 | +static int posix_get_monotonic_timespec(clockid_t which_clock, struct timespec64 *tp) |
---|
212 | 198 | { |
---|
213 | 199 | ktime_get_ts64(tp); |
---|
| 200 | + timens_add_monotonic(tp); |
---|
214 | 201 | return 0; |
---|
| 202 | +} |
---|
| 203 | + |
---|
| 204 | +static ktime_t posix_get_monotonic_ktime(clockid_t which_clock) |
---|
| 205 | +{ |
---|
| 206 | + return ktime_get(); |
---|
215 | 207 | } |
---|
216 | 208 | |
---|
217 | 209 | /* |
---|
.. | .. |
---|
220 | 212 | static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp) |
---|
221 | 213 | { |
---|
222 | 214 | ktime_get_raw_ts64(tp); |
---|
| 215 | + timens_add_monotonic(tp); |
---|
223 | 216 | return 0; |
---|
224 | 217 | } |
---|
225 | 218 | |
---|
.. | .. |
---|
234 | 227 | struct timespec64 *tp) |
---|
235 | 228 | { |
---|
236 | 229 | ktime_get_coarse_ts64(tp); |
---|
| 230 | + timens_add_monotonic(tp); |
---|
237 | 231 | return 0; |
---|
238 | 232 | } |
---|
239 | 233 | |
---|
.. | .. |
---|
243 | 237 | return 0; |
---|
244 | 238 | } |
---|
245 | 239 | |
---|
246 | | -static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp) |
---|
| 240 | +static int posix_get_boottime_timespec(const clockid_t which_clock, struct timespec64 *tp) |
---|
247 | 241 | { |
---|
248 | 242 | ktime_get_boottime_ts64(tp); |
---|
| 243 | + timens_add_boottime(tp); |
---|
249 | 244 | return 0; |
---|
250 | 245 | } |
---|
251 | 246 | |
---|
252 | | -static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp) |
---|
| 247 | +static ktime_t posix_get_boottime_ktime(const clockid_t which_clock) |
---|
| 248 | +{ |
---|
| 249 | + return ktime_get_boottime(); |
---|
| 250 | +} |
---|
| 251 | + |
---|
| 252 | +static int posix_get_tai_timespec(clockid_t which_clock, struct timespec64 *tp) |
---|
253 | 253 | { |
---|
254 | 254 | ktime_get_clocktai_ts64(tp); |
---|
255 | 255 | return 0; |
---|
| 256 | +} |
---|
| 257 | + |
---|
| 258 | +static ktime_t posix_get_tai_ktime(clockid_t which_clock) |
---|
| 259 | +{ |
---|
| 260 | + return ktime_get_clocktai(); |
---|
256 | 261 | } |
---|
257 | 262 | |
---|
258 | 263 | static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp) |
---|
.. | .. |
---|
305 | 310 | * To protect against the timer going away while the interrupt is queued, |
---|
306 | 311 | * we require that the it_requeue_pending flag be set. |
---|
307 | 312 | */ |
---|
308 | | -void posixtimer_rearm(struct siginfo *info) |
---|
| 313 | +void posixtimer_rearm(struct kernel_siginfo *info) |
---|
309 | 314 | { |
---|
310 | 315 | struct k_itimer *timr; |
---|
311 | 316 | unsigned long flags; |
---|
.. | .. |
---|
434 | 439 | rtn = pid_task(pid, PIDTYPE_PID); |
---|
435 | 440 | if (!rtn || !same_thread_group(rtn, current)) |
---|
436 | 441 | return NULL; |
---|
437 | | - /* FALLTHRU */ |
---|
| 442 | + fallthrough; |
---|
438 | 443 | case SIGEV_SIGNAL: |
---|
439 | 444 | case SIGEV_THREAD: |
---|
440 | 445 | if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX) |
---|
441 | 446 | return NULL; |
---|
442 | | - /* FALLTHRU */ |
---|
| 447 | + fallthrough; |
---|
443 | 448 | case SIGEV_NONE: |
---|
444 | 449 | return pid; |
---|
445 | 450 | default: |
---|
.. | .. |
---|
666 | 671 | { |
---|
667 | 672 | const struct k_clock *kc = timr->kclock; |
---|
668 | 673 | ktime_t now, remaining, iv; |
---|
669 | | - struct timespec64 ts64; |
---|
670 | 674 | bool sig_none; |
---|
671 | 675 | |
---|
672 | 676 | sig_none = timr->it_sigev_notify == SIGEV_NONE; |
---|
.. | .. |
---|
684 | 688 | return; |
---|
685 | 689 | } |
---|
686 | 690 | |
---|
687 | | - /* |
---|
688 | | - * The timespec64 based conversion is suboptimal, but it's not |
---|
689 | | - * worth to implement yet another callback. |
---|
690 | | - */ |
---|
691 | | - kc->clock_get(timr->it_clock, &ts64); |
---|
692 | | - now = timespec64_to_ktime(ts64); |
---|
| 691 | + now = kc->clock_get_ktime(timr->it_clock); |
---|
693 | 692 | |
---|
694 | 693 | /* |
---|
695 | 694 | * When a requeue is pending or this is a SIGEV_NONE timer move the |
---|
.. | .. |
---|
751 | 750 | |
---|
752 | 751 | #ifdef CONFIG_COMPAT_32BIT_TIME |
---|
753 | 752 | |
---|
754 | | -COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id, |
---|
755 | | - struct compat_itimerspec __user *, setting) |
---|
| 753 | +SYSCALL_DEFINE2(timer_gettime32, timer_t, timer_id, |
---|
| 754 | + struct old_itimerspec32 __user *, setting) |
---|
756 | 755 | { |
---|
757 | 756 | struct itimerspec64 cur_setting; |
---|
758 | 757 | |
---|
759 | 758 | int ret = do_timer_gettime(timer_id, &cur_setting); |
---|
760 | 759 | if (!ret) { |
---|
761 | | - if (put_compat_itimerspec64(&cur_setting, setting)) |
---|
| 760 | + if (put_old_itimerspec32(&cur_setting, setting)) |
---|
762 | 761 | ret = -EFAULT; |
---|
763 | 762 | } |
---|
764 | 763 | return ret; |
---|
.. | .. |
---|
802 | 801 | * Posix magic: Relative CLOCK_REALTIME timers are not affected by |
---|
803 | 802 | * clock modifications, so they become CLOCK_MONOTONIC based under the |
---|
804 | 803 | * hood. See hrtimer_init(). Update timr->kclock, so the generic |
---|
805 | | - * functions which use timr->kclock->clock_get() work. |
---|
| 804 | + * functions which use timr->kclock->clock_get_*() work. |
---|
806 | 805 | * |
---|
807 | 806 | * Note: it_clock stays unmodified, because the next timer_set() might |
---|
808 | 807 | * use ABSTIME, so it needs to switch back. |
---|
.. | .. |
---|
826 | 825 | return hrtimer_try_to_cancel(&timr->it.real.timer); |
---|
827 | 826 | } |
---|
828 | 827 | |
---|
829 | | -static void timer_wait_for_callback(const struct k_clock *kc, struct k_itimer *timer) |
---|
| 828 | +static void common_timer_wait_running(struct k_itimer *timer) |
---|
830 | 829 | { |
---|
831 | | - if (kc->timer_arm == common_hrtimer_arm) |
---|
832 | | - hrtimer_grab_expiry_lock(&timer->it.real.timer); |
---|
833 | | - else if (kc == &alarm_clock) |
---|
834 | | - hrtimer_grab_expiry_lock(&timer->it.alarm.alarmtimer.timer); |
---|
835 | | - else |
---|
836 | | - /* posix-cpu-timers */ |
---|
837 | | - cpu_timers_grab_expiry_lock(timer); |
---|
| 830 | + hrtimer_cancel_wait_running(&timer->it.real.timer); |
---|
| 831 | +} |
---|
| 832 | + |
---|
| 833 | +/* |
---|
| 834 | + * On PREEMPT_RT this prevent priority inversion against softirq kthread in |
---|
| 835 | + * case it gets preempted while executing a timer callback. See comments in |
---|
| 836 | + * hrtimer_cancel_wait_running. For PREEMPT_RT=n this just results in a |
---|
| 837 | + * cpu_relax(). |
---|
| 838 | + */ |
---|
| 839 | +static struct k_itimer *timer_wait_running(struct k_itimer *timer, |
---|
| 840 | + unsigned long *flags) |
---|
| 841 | +{ |
---|
| 842 | + const struct k_clock *kc = READ_ONCE(timer->kclock); |
---|
| 843 | + timer_t timer_id = READ_ONCE(timer->it_id); |
---|
| 844 | + |
---|
| 845 | + /* Prevent kfree(timer) after dropping the lock */ |
---|
| 846 | + rcu_read_lock(); |
---|
| 847 | + unlock_timer(timer, *flags); |
---|
| 848 | + |
---|
| 849 | + /* |
---|
| 850 | + * kc->timer_wait_running() might drop RCU lock. So @timer |
---|
| 851 | + * cannot be touched anymore after the function returns! |
---|
| 852 | + */ |
---|
| 853 | + if (!WARN_ON_ONCE(!kc->timer_wait_running)) |
---|
| 854 | + kc->timer_wait_running(timer); |
---|
| 855 | + |
---|
| 856 | + rcu_read_unlock(); |
---|
| 857 | + /* Relock the timer. It might be not longer hashed. */ |
---|
| 858 | + return lock_timer(timer_id, flags); |
---|
838 | 859 | } |
---|
839 | 860 | |
---|
840 | 861 | /* Set a POSIX.1b interval timer. */ |
---|
.. | .. |
---|
869 | 890 | |
---|
870 | 891 | timr->it_interval = timespec64_to_ktime(new_setting->it_interval); |
---|
871 | 892 | expires = timespec64_to_ktime(new_setting->it_value); |
---|
| 893 | + if (flags & TIMER_ABSTIME) |
---|
| 894 | + expires = timens_ktime_to_host(timr->it_clock, expires); |
---|
872 | 895 | sigev_none = timr->it_sigev_notify == SIGEV_NONE; |
---|
873 | 896 | |
---|
874 | 897 | kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none); |
---|
.. | .. |
---|
876 | 899 | return 0; |
---|
877 | 900 | } |
---|
878 | 901 | |
---|
879 | | -static int do_timer_settime(timer_t timer_id, int flags, |
---|
| 902 | +static int do_timer_settime(timer_t timer_id, int tmr_flags, |
---|
880 | 903 | struct itimerspec64 *new_spec64, |
---|
881 | 904 | struct itimerspec64 *old_spec64) |
---|
882 | 905 | { |
---|
883 | 906 | const struct k_clock *kc; |
---|
884 | 907 | struct k_itimer *timr; |
---|
885 | | - unsigned long flag; |
---|
| 908 | + unsigned long flags; |
---|
886 | 909 | int error = 0; |
---|
887 | 910 | |
---|
888 | 911 | if (!timespec64_valid(&new_spec64->it_interval) || |
---|
.. | .. |
---|
891 | 914 | |
---|
892 | 915 | if (old_spec64) |
---|
893 | 916 | memset(old_spec64, 0, sizeof(*old_spec64)); |
---|
| 917 | + |
---|
| 918 | + timr = lock_timer(timer_id, &flags); |
---|
894 | 919 | retry: |
---|
895 | | - timr = lock_timer(timer_id, &flag); |
---|
896 | 920 | if (!timr) |
---|
897 | 921 | return -EINVAL; |
---|
898 | 922 | |
---|
.. | .. |
---|
900 | 924 | if (WARN_ON_ONCE(!kc || !kc->timer_set)) |
---|
901 | 925 | error = -EINVAL; |
---|
902 | 926 | else |
---|
903 | | - error = kc->timer_set(timr, flags, new_spec64, old_spec64); |
---|
| 927 | + error = kc->timer_set(timr, tmr_flags, new_spec64, old_spec64); |
---|
904 | 928 | |
---|
905 | 929 | if (error == TIMER_RETRY) { |
---|
906 | | - rcu_read_lock(); |
---|
907 | | - unlock_timer(timr, flag); |
---|
908 | | - timer_wait_for_callback(kc, timr); |
---|
909 | | - rcu_read_unlock(); |
---|
910 | | - old_spec64 = NULL; // We already got the old time... |
---|
| 930 | + // We already got the old time... |
---|
| 931 | + old_spec64 = NULL; |
---|
| 932 | + /* Unlocks and relocks the timer if it still exists */ |
---|
| 933 | + timr = timer_wait_running(timr, &flags); |
---|
911 | 934 | goto retry; |
---|
912 | 935 | } |
---|
913 | | - unlock_timer(timr, flag); |
---|
| 936 | + unlock_timer(timr, flags); |
---|
914 | 937 | |
---|
915 | 938 | return error; |
---|
916 | 939 | } |
---|
.. | .. |
---|
939 | 962 | } |
---|
940 | 963 | |
---|
941 | 964 | #ifdef CONFIG_COMPAT_32BIT_TIME |
---|
942 | | -COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags, |
---|
943 | | - struct compat_itimerspec __user *, new, |
---|
944 | | - struct compat_itimerspec __user *, old) |
---|
| 965 | +SYSCALL_DEFINE4(timer_settime32, timer_t, timer_id, int, flags, |
---|
| 966 | + struct old_itimerspec32 __user *, new, |
---|
| 967 | + struct old_itimerspec32 __user *, old) |
---|
945 | 968 | { |
---|
946 | 969 | struct itimerspec64 new_spec, old_spec; |
---|
947 | 970 | struct itimerspec64 *rtn = old ? &old_spec : NULL; |
---|
.. | .. |
---|
949 | 972 | |
---|
950 | 973 | if (!new) |
---|
951 | 974 | return -EINVAL; |
---|
952 | | - if (get_compat_itimerspec64(&new_spec, new)) |
---|
| 975 | + if (get_old_itimerspec32(&new_spec, new)) |
---|
953 | 976 | return -EFAULT; |
---|
954 | 977 | |
---|
955 | 978 | error = do_timer_settime(timer_id, flags, &new_spec, rtn); |
---|
956 | 979 | if (!error && old) { |
---|
957 | | - if (put_compat_itimerspec64(&old_spec, old)) |
---|
| 980 | + if (put_old_itimerspec32(&old_spec, old)) |
---|
958 | 981 | error = -EFAULT; |
---|
959 | 982 | } |
---|
960 | 983 | return error; |
---|
.. | .. |
---|
972 | 995 | return 0; |
---|
973 | 996 | } |
---|
974 | 997 | |
---|
975 | | -static int timer_delete_hook(struct k_itimer *timer) |
---|
| 998 | +static inline int timer_delete_hook(struct k_itimer *timer) |
---|
976 | 999 | { |
---|
977 | 1000 | const struct k_clock *kc = timer->kclock; |
---|
978 | | - int ret; |
---|
979 | 1001 | |
---|
980 | 1002 | if (WARN_ON_ONCE(!kc || !kc->timer_del)) |
---|
981 | 1003 | return -EINVAL; |
---|
982 | | - ret = kc->timer_del(timer); |
---|
983 | | - if (ret == TIMER_RETRY) { |
---|
984 | | - rcu_read_lock(); |
---|
985 | | - spin_unlock_irq(&timer->it_lock); |
---|
986 | | - timer_wait_for_callback(kc, timer); |
---|
987 | | - rcu_read_unlock(); |
---|
988 | | - } |
---|
989 | | - return ret; |
---|
| 1004 | + return kc->timer_del(timer); |
---|
990 | 1005 | } |
---|
991 | 1006 | |
---|
992 | 1007 | /* Delete a POSIX.1b interval timer. */ |
---|
.. | .. |
---|
995 | 1010 | struct k_itimer *timer; |
---|
996 | 1011 | unsigned long flags; |
---|
997 | 1012 | |
---|
998 | | -retry_delete: |
---|
999 | 1013 | timer = lock_timer(timer_id, &flags); |
---|
| 1014 | + |
---|
| 1015 | +retry_delete: |
---|
1000 | 1016 | if (!timer) |
---|
1001 | 1017 | return -EINVAL; |
---|
1002 | 1018 | |
---|
1003 | | - if (timer_delete_hook(timer) == TIMER_RETRY) |
---|
| 1019 | + if (unlikely(timer_delete_hook(timer) == TIMER_RETRY)) { |
---|
| 1020 | + /* Unlocks and relocks the timer if it still exists */ |
---|
| 1021 | + timer = timer_wait_running(timer, &flags); |
---|
1004 | 1022 | goto retry_delete; |
---|
| 1023 | + } |
---|
1005 | 1024 | |
---|
1006 | 1025 | spin_lock(¤t->sighand->siglock); |
---|
1007 | 1026 | list_del(&timer->list); |
---|
.. | .. |
---|
1018 | 1037 | } |
---|
1019 | 1038 | |
---|
1020 | 1039 | /* |
---|
1021 | | - * return timer owned by the process, used by exit_itimers |
---|
| 1040 | + * Delete a timer if it is armed, remove it from the hash and schedule it |
---|
| 1041 | + * for RCU freeing. |
---|
1022 | 1042 | */ |
---|
1023 | 1043 | static void itimer_delete(struct k_itimer *timer) |
---|
1024 | 1044 | { |
---|
1025 | 1045 | unsigned long flags; |
---|
1026 | 1046 | |
---|
1027 | | -retry_delete: |
---|
| 1047 | + /* |
---|
| 1048 | + * irqsave is required to make timer_wait_running() work. |
---|
| 1049 | + */ |
---|
1028 | 1050 | spin_lock_irqsave(&timer->it_lock, flags); |
---|
1029 | 1051 | |
---|
1030 | | - if (timer_delete_hook(timer) == TIMER_RETRY) |
---|
1031 | | - goto retry_delete; |
---|
1032 | | - |
---|
1033 | | - list_del(&timer->list); |
---|
| 1052 | +retry_delete: |
---|
1034 | 1053 | /* |
---|
1035 | | - * This keeps any tasks waiting on the spin lock from thinking |
---|
1036 | | - * they got something (see the lock code above). |
---|
| 1054 | + * Even if the timer is not longer accessible from other tasks |
---|
| 1055 | + * it still might be armed and queued in the underlying timer |
---|
| 1056 | + * mechanism. Worse, that timer mechanism might run the expiry |
---|
| 1057 | + * function concurrently. |
---|
1037 | 1058 | */ |
---|
1038 | | - timer->it_signal = NULL; |
---|
| 1059 | + if (timer_delete_hook(timer) == TIMER_RETRY) { |
---|
| 1060 | + /* |
---|
| 1061 | + * Timer is expired concurrently, prevent livelocks |
---|
| 1062 | + * and pointless spinning on RT. |
---|
| 1063 | + * |
---|
| 1064 | + * timer_wait_running() drops timer::it_lock, which opens |
---|
| 1065 | + * the possibility for another task to delete the timer. |
---|
| 1066 | + * |
---|
| 1067 | + * That's not possible here because this is invoked from |
---|
| 1068 | + * do_exit() only for the last thread of the thread group. |
---|
| 1069 | + * So no other task can access and delete that timer. |
---|
| 1070 | + */ |
---|
| 1071 | + if (WARN_ON_ONCE(timer_wait_running(timer, &flags) != timer)) |
---|
| 1072 | + return; |
---|
1039 | 1073 | |
---|
1040 | | - unlock_timer(timer, flags); |
---|
| 1074 | + goto retry_delete; |
---|
| 1075 | + } |
---|
| 1076 | + list_del(&timer->list); |
---|
| 1077 | + |
---|
| 1078 | + spin_unlock_irqrestore(&timer->it_lock, flags); |
---|
1041 | 1079 | release_posix_timer(timer, IT_ID_SET); |
---|
1042 | 1080 | } |
---|
1043 | 1081 | |
---|
1044 | 1082 | /* |
---|
1045 | | - * This is called by do_exit or de_thread, only when there are no more |
---|
1046 | | - * references to the shared signal_struct. |
---|
| 1083 | + * Invoked from do_exit() when the last thread of a thread group exits. |
---|
| 1084 | + * At that point no other task can access the timers of the dying |
---|
| 1085 | + * task anymore. |
---|
1047 | 1086 | */ |
---|
1048 | | -void exit_itimers(struct signal_struct *sig) |
---|
| 1087 | +void exit_itimers(struct task_struct *tsk) |
---|
1049 | 1088 | { |
---|
| 1089 | + struct list_head timers; |
---|
1050 | 1090 | struct k_itimer *tmr; |
---|
1051 | 1091 | |
---|
1052 | | - while (!list_empty(&sig->posix_timers)) { |
---|
1053 | | - tmr = list_entry(sig->posix_timers.next, struct k_itimer, list); |
---|
| 1092 | + if (list_empty(&tsk->signal->posix_timers)) |
---|
| 1093 | + return; |
---|
| 1094 | + |
---|
| 1095 | + /* Protect against concurrent read via /proc/$PID/timers */ |
---|
| 1096 | + spin_lock_irq(&tsk->sighand->siglock); |
---|
| 1097 | + list_replace_init(&tsk->signal->posix_timers, &timers); |
---|
| 1098 | + spin_unlock_irq(&tsk->sighand->siglock); |
---|
| 1099 | + |
---|
| 1100 | + /* The timers are not longer accessible via tsk::signal */ |
---|
| 1101 | + while (!list_empty(&timers)) { |
---|
| 1102 | + tmr = list_first_entry(&timers, struct k_itimer, list); |
---|
1054 | 1103 | itimer_delete(tmr); |
---|
1055 | 1104 | } |
---|
1056 | 1105 | } |
---|
.. | .. |
---|
1080 | 1129 | if (!kc) |
---|
1081 | 1130 | return -EINVAL; |
---|
1082 | 1131 | |
---|
1083 | | - error = kc->clock_get(which_clock, &kernel_tp); |
---|
| 1132 | + error = kc->clock_get_timespec(which_clock, &kernel_tp); |
---|
1084 | 1133 | |
---|
1085 | 1134 | if (!error && put_timespec64(&kernel_tp, tp)) |
---|
1086 | 1135 | error = -EFAULT; |
---|
.. | .. |
---|
1088 | 1137 | return error; |
---|
1089 | 1138 | } |
---|
1090 | 1139 | |
---|
1091 | | -SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock, |
---|
1092 | | - struct timex __user *, utx) |
---|
| 1140 | +int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx) |
---|
1093 | 1141 | { |
---|
1094 | 1142 | const struct k_clock *kc = clockid_to_kclock(which_clock); |
---|
1095 | | - struct timex ktx; |
---|
1096 | | - int err; |
---|
1097 | 1143 | |
---|
1098 | 1144 | if (!kc) |
---|
1099 | 1145 | return -EINVAL; |
---|
1100 | 1146 | if (!kc->clock_adj) |
---|
1101 | 1147 | return -EOPNOTSUPP; |
---|
1102 | 1148 | |
---|
| 1149 | + return kc->clock_adj(which_clock, ktx); |
---|
| 1150 | +} |
---|
| 1151 | + |
---|
| 1152 | +SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock, |
---|
| 1153 | + struct __kernel_timex __user *, utx) |
---|
| 1154 | +{ |
---|
| 1155 | + struct __kernel_timex ktx; |
---|
| 1156 | + int err; |
---|
| 1157 | + |
---|
1103 | 1158 | if (copy_from_user(&ktx, utx, sizeof(ktx))) |
---|
1104 | 1159 | return -EFAULT; |
---|
1105 | 1160 | |
---|
1106 | | - err = kc->clock_adj(which_clock, &ktx); |
---|
| 1161 | + err = do_clock_adjtime(which_clock, &ktx); |
---|
1107 | 1162 | |
---|
1108 | 1163 | if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx))) |
---|
1109 | 1164 | return -EFAULT; |
---|
.. | .. |
---|
1131 | 1186 | |
---|
1132 | 1187 | #ifdef CONFIG_COMPAT_32BIT_TIME |
---|
1133 | 1188 | |
---|
1134 | | -COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock, |
---|
1135 | | - struct compat_timespec __user *, tp) |
---|
| 1189 | +SYSCALL_DEFINE2(clock_settime32, clockid_t, which_clock, |
---|
| 1190 | + struct old_timespec32 __user *, tp) |
---|
1136 | 1191 | { |
---|
1137 | 1192 | const struct k_clock *kc = clockid_to_kclock(which_clock); |
---|
1138 | 1193 | struct timespec64 ts; |
---|
.. | .. |
---|
1140 | 1195 | if (!kc || !kc->clock_set) |
---|
1141 | 1196 | return -EINVAL; |
---|
1142 | 1197 | |
---|
1143 | | - if (compat_get_timespec64(&ts, tp)) |
---|
| 1198 | + if (get_old_timespec32(&ts, tp)) |
---|
1144 | 1199 | return -EFAULT; |
---|
1145 | 1200 | |
---|
1146 | 1201 | return kc->clock_set(which_clock, &ts); |
---|
1147 | 1202 | } |
---|
1148 | 1203 | |
---|
1149 | | -COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock, |
---|
1150 | | - struct compat_timespec __user *, tp) |
---|
| 1204 | +SYSCALL_DEFINE2(clock_gettime32, clockid_t, which_clock, |
---|
| 1205 | + struct old_timespec32 __user *, tp) |
---|
1151 | 1206 | { |
---|
1152 | 1207 | const struct k_clock *kc = clockid_to_kclock(which_clock); |
---|
1153 | 1208 | struct timespec64 ts; |
---|
.. | .. |
---|
1156 | 1211 | if (!kc) |
---|
1157 | 1212 | return -EINVAL; |
---|
1158 | 1213 | |
---|
1159 | | - err = kc->clock_get(which_clock, &ts); |
---|
| 1214 | + err = kc->clock_get_timespec(which_clock, &ts); |
---|
1160 | 1215 | |
---|
1161 | | - if (!err && compat_put_timespec64(&ts, tp)) |
---|
| 1216 | + if (!err && put_old_timespec32(&ts, tp)) |
---|
1162 | 1217 | err = -EFAULT; |
---|
1163 | 1218 | |
---|
1164 | 1219 | return err; |
---|
1165 | 1220 | } |
---|
1166 | 1221 | |
---|
1167 | | -#endif |
---|
1168 | | - |
---|
1169 | | -#ifdef CONFIG_COMPAT |
---|
1170 | | - |
---|
1171 | | -COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock, |
---|
1172 | | - struct compat_timex __user *, utp) |
---|
| 1222 | +SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock, |
---|
| 1223 | + struct old_timex32 __user *, utp) |
---|
1173 | 1224 | { |
---|
1174 | | - const struct k_clock *kc = clockid_to_kclock(which_clock); |
---|
1175 | | - struct timex ktx; |
---|
| 1225 | + struct __kernel_timex ktx; |
---|
1176 | 1226 | int err; |
---|
1177 | 1227 | |
---|
1178 | | - if (!kc) |
---|
1179 | | - return -EINVAL; |
---|
1180 | | - if (!kc->clock_adj) |
---|
1181 | | - return -EOPNOTSUPP; |
---|
1182 | | - |
---|
1183 | | - err = compat_get_timex(&ktx, utp); |
---|
| 1228 | + err = get_old_timex32(&ktx, utp); |
---|
1184 | 1229 | if (err) |
---|
1185 | 1230 | return err; |
---|
1186 | 1231 | |
---|
1187 | | - err = kc->clock_adj(which_clock, &ktx); |
---|
| 1232 | + err = do_clock_adjtime(which_clock, &ktx); |
---|
1188 | 1233 | |
---|
1189 | | - if (err >= 0 && compat_put_timex(utp, &ktx)) |
---|
| 1234 | + if (err >= 0 && put_old_timex32(utp, &ktx)) |
---|
1190 | 1235 | return -EFAULT; |
---|
1191 | 1236 | |
---|
1192 | 1237 | return err; |
---|
1193 | 1238 | } |
---|
1194 | 1239 | |
---|
1195 | | -#endif |
---|
1196 | | - |
---|
1197 | | -#ifdef CONFIG_COMPAT_32BIT_TIME |
---|
1198 | | - |
---|
1199 | | -COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock, |
---|
1200 | | - struct compat_timespec __user *, tp) |
---|
| 1240 | +SYSCALL_DEFINE2(clock_getres_time32, clockid_t, which_clock, |
---|
| 1241 | + struct old_timespec32 __user *, tp) |
---|
1201 | 1242 | { |
---|
1202 | 1243 | const struct k_clock *kc = clockid_to_kclock(which_clock); |
---|
1203 | 1244 | struct timespec64 ts; |
---|
.. | .. |
---|
1207 | 1248 | return -EINVAL; |
---|
1208 | 1249 | |
---|
1209 | 1250 | err = kc->clock_getres(which_clock, &ts); |
---|
1210 | | - if (!err && tp && compat_put_timespec64(&ts, tp)) |
---|
| 1251 | + if (!err && tp && put_old_timespec32(&ts, tp)) |
---|
1211 | 1252 | return -EFAULT; |
---|
1212 | 1253 | |
---|
1213 | 1254 | return err; |
---|
.. | .. |
---|
1221 | 1262 | static int common_nsleep(const clockid_t which_clock, int flags, |
---|
1222 | 1263 | const struct timespec64 *rqtp) |
---|
1223 | 1264 | { |
---|
1224 | | - return hrtimer_nanosleep(rqtp, flags & TIMER_ABSTIME ? |
---|
| 1265 | + ktime_t texp = timespec64_to_ktime(*rqtp); |
---|
| 1266 | + |
---|
| 1267 | + return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ? |
---|
| 1268 | + HRTIMER_MODE_ABS : HRTIMER_MODE_REL, |
---|
| 1269 | + which_clock); |
---|
| 1270 | +} |
---|
| 1271 | + |
---|
| 1272 | +static int common_nsleep_timens(const clockid_t which_clock, int flags, |
---|
| 1273 | + const struct timespec64 *rqtp) |
---|
| 1274 | +{ |
---|
| 1275 | + ktime_t texp = timespec64_to_ktime(*rqtp); |
---|
| 1276 | + |
---|
| 1277 | + if (flags & TIMER_ABSTIME) |
---|
| 1278 | + texp = timens_ktime_to_host(which_clock, texp); |
---|
| 1279 | + |
---|
| 1280 | + return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ? |
---|
1225 | 1281 | HRTIMER_MODE_ABS : HRTIMER_MODE_REL, |
---|
1226 | 1282 | which_clock); |
---|
1227 | 1283 | } |
---|
.. | .. |
---|
1245 | 1301 | return -EINVAL; |
---|
1246 | 1302 | if (flags & TIMER_ABSTIME) |
---|
1247 | 1303 | rmtp = NULL; |
---|
| 1304 | + current->restart_block.fn = do_no_restart_syscall; |
---|
1248 | 1305 | current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE; |
---|
1249 | 1306 | current->restart_block.nanosleep.rmtp = rmtp; |
---|
1250 | 1307 | |
---|
.. | .. |
---|
1253 | 1310 | |
---|
1254 | 1311 | #ifdef CONFIG_COMPAT_32BIT_TIME |
---|
1255 | 1312 | |
---|
1256 | | -COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags, |
---|
1257 | | - struct compat_timespec __user *, rqtp, |
---|
1258 | | - struct compat_timespec __user *, rmtp) |
---|
| 1313 | +SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags, |
---|
| 1314 | + struct old_timespec32 __user *, rqtp, |
---|
| 1315 | + struct old_timespec32 __user *, rmtp) |
---|
1259 | 1316 | { |
---|
1260 | 1317 | const struct k_clock *kc = clockid_to_kclock(which_clock); |
---|
1261 | 1318 | struct timespec64 t; |
---|
.. | .. |
---|
1265 | 1322 | if (!kc->nsleep) |
---|
1266 | 1323 | return -EOPNOTSUPP; |
---|
1267 | 1324 | |
---|
1268 | | - if (compat_get_timespec64(&t, rqtp)) |
---|
| 1325 | + if (get_old_timespec32(&t, rqtp)) |
---|
1269 | 1326 | return -EFAULT; |
---|
1270 | 1327 | |
---|
1271 | 1328 | if (!timespec64_valid(&t)) |
---|
1272 | 1329 | return -EINVAL; |
---|
1273 | 1330 | if (flags & TIMER_ABSTIME) |
---|
1274 | 1331 | rmtp = NULL; |
---|
| 1332 | + current->restart_block.fn = do_no_restart_syscall; |
---|
1275 | 1333 | current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE; |
---|
1276 | 1334 | current->restart_block.nanosleep.compat_rmtp = rmtp; |
---|
1277 | 1335 | |
---|
.. | .. |
---|
1282 | 1340 | |
---|
1283 | 1341 | static const struct k_clock clock_realtime = { |
---|
1284 | 1342 | .clock_getres = posix_get_hrtimer_res, |
---|
1285 | | - .clock_get = posix_clock_realtime_get, |
---|
| 1343 | + .clock_get_timespec = posix_get_realtime_timespec, |
---|
| 1344 | + .clock_get_ktime = posix_get_realtime_ktime, |
---|
1286 | 1345 | .clock_set = posix_clock_realtime_set, |
---|
1287 | 1346 | .clock_adj = posix_clock_realtime_adj, |
---|
1288 | 1347 | .nsleep = common_nsleep, |
---|
.. | .. |
---|
1294 | 1353 | .timer_forward = common_hrtimer_forward, |
---|
1295 | 1354 | .timer_remaining = common_hrtimer_remaining, |
---|
1296 | 1355 | .timer_try_to_cancel = common_hrtimer_try_to_cancel, |
---|
| 1356 | + .timer_wait_running = common_timer_wait_running, |
---|
1297 | 1357 | .timer_arm = common_hrtimer_arm, |
---|
1298 | 1358 | }; |
---|
1299 | 1359 | |
---|
1300 | 1360 | static const struct k_clock clock_monotonic = { |
---|
1301 | 1361 | .clock_getres = posix_get_hrtimer_res, |
---|
1302 | | - .clock_get = posix_ktime_get_ts, |
---|
1303 | | - .nsleep = common_nsleep, |
---|
| 1362 | + .clock_get_timespec = posix_get_monotonic_timespec, |
---|
| 1363 | + .clock_get_ktime = posix_get_monotonic_ktime, |
---|
| 1364 | + .nsleep = common_nsleep_timens, |
---|
1304 | 1365 | .timer_create = common_timer_create, |
---|
1305 | 1366 | .timer_set = common_timer_set, |
---|
1306 | 1367 | .timer_get = common_timer_get, |
---|
.. | .. |
---|
1309 | 1370 | .timer_forward = common_hrtimer_forward, |
---|
1310 | 1371 | .timer_remaining = common_hrtimer_remaining, |
---|
1311 | 1372 | .timer_try_to_cancel = common_hrtimer_try_to_cancel, |
---|
| 1373 | + .timer_wait_running = common_timer_wait_running, |
---|
1312 | 1374 | .timer_arm = common_hrtimer_arm, |
---|
1313 | 1375 | }; |
---|
1314 | 1376 | |
---|
1315 | 1377 | static const struct k_clock clock_monotonic_raw = { |
---|
1316 | 1378 | .clock_getres = posix_get_hrtimer_res, |
---|
1317 | | - .clock_get = posix_get_monotonic_raw, |
---|
| 1379 | + .clock_get_timespec = posix_get_monotonic_raw, |
---|
1318 | 1380 | }; |
---|
1319 | 1381 | |
---|
1320 | 1382 | static const struct k_clock clock_realtime_coarse = { |
---|
1321 | 1383 | .clock_getres = posix_get_coarse_res, |
---|
1322 | | - .clock_get = posix_get_realtime_coarse, |
---|
| 1384 | + .clock_get_timespec = posix_get_realtime_coarse, |
---|
1323 | 1385 | }; |
---|
1324 | 1386 | |
---|
1325 | 1387 | static const struct k_clock clock_monotonic_coarse = { |
---|
1326 | 1388 | .clock_getres = posix_get_coarse_res, |
---|
1327 | | - .clock_get = posix_get_monotonic_coarse, |
---|
| 1389 | + .clock_get_timespec = posix_get_monotonic_coarse, |
---|
1328 | 1390 | }; |
---|
1329 | 1391 | |
---|
1330 | 1392 | static const struct k_clock clock_tai = { |
---|
1331 | 1393 | .clock_getres = posix_get_hrtimer_res, |
---|
1332 | | - .clock_get = posix_get_tai, |
---|
| 1394 | + .clock_get_ktime = posix_get_tai_ktime, |
---|
| 1395 | + .clock_get_timespec = posix_get_tai_timespec, |
---|
1333 | 1396 | .nsleep = common_nsleep, |
---|
1334 | 1397 | .timer_create = common_timer_create, |
---|
1335 | 1398 | .timer_set = common_timer_set, |
---|
.. | .. |
---|
1339 | 1402 | .timer_forward = common_hrtimer_forward, |
---|
1340 | 1403 | .timer_remaining = common_hrtimer_remaining, |
---|
1341 | 1404 | .timer_try_to_cancel = common_hrtimer_try_to_cancel, |
---|
| 1405 | + .timer_wait_running = common_timer_wait_running, |
---|
1342 | 1406 | .timer_arm = common_hrtimer_arm, |
---|
1343 | 1407 | }; |
---|
1344 | 1408 | |
---|
1345 | 1409 | static const struct k_clock clock_boottime = { |
---|
1346 | 1410 | .clock_getres = posix_get_hrtimer_res, |
---|
1347 | | - .clock_get = posix_get_boottime, |
---|
1348 | | - .nsleep = common_nsleep, |
---|
| 1411 | + .clock_get_ktime = posix_get_boottime_ktime, |
---|
| 1412 | + .clock_get_timespec = posix_get_boottime_timespec, |
---|
| 1413 | + .nsleep = common_nsleep_timens, |
---|
1349 | 1414 | .timer_create = common_timer_create, |
---|
1350 | 1415 | .timer_set = common_timer_set, |
---|
1351 | 1416 | .timer_get = common_timer_get, |
---|
.. | .. |
---|
1354 | 1419 | .timer_forward = common_hrtimer_forward, |
---|
1355 | 1420 | .timer_remaining = common_hrtimer_remaining, |
---|
1356 | 1421 | .timer_try_to_cancel = common_hrtimer_try_to_cancel, |
---|
| 1422 | + .timer_wait_running = common_timer_wait_running, |
---|
1357 | 1423 | .timer_arm = common_hrtimer_arm, |
---|
1358 | 1424 | }; |
---|
1359 | 1425 | |
---|