hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/kernel/time/posix-timers.c
....@@ -1,34 +1,13 @@
1
+// SPDX-License-Identifier: GPL-2.0+
12 /*
2
- * linux/kernel/posix-timers.c
3
- *
4
- *
53 * 2002-10-15 Posix Clocks & timers
64 * by George Anzinger george@mvista.com
7
- *
85 * Copyright (C) 2002 2003 by MontaVista Software.
96 *
107 * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
118 * Copyright (C) 2004 Boris Hu
129 *
13
- * This program is free software; you can redistribute it and/or modify
14
- * it under the terms of the GNU General Public License as published by
15
- * the Free Software Foundation; either version 2 of the License, or (at
16
- * your option) any later version.
17
- *
18
- * This program is distributed in the hope that it will be useful, but
19
- * WITHOUT ANY WARRANTY; without even the implied warranty of
20
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21
- * General Public License for more details.
22
-
23
- * You should have received a copy of the GNU General Public License
24
- * along with this program; if not, write to the Free Software
25
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26
- *
27
- * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA
28
- */
29
-
30
-/* These are all the functions necessary to implement
31
- * POSIX clocks & timers
10
+ * These are all the functions necessary to implement POSIX clocks & timers
3211 */
3312 #include <linux/mm.h>
3413 #include <linux/interrupt.h>
....@@ -51,6 +30,7 @@
5130 #include <linux/hashtable.h>
5231 #include <linux/compat.h>
5332 #include <linux/nospec.h>
33
+#include <linux/time_namespace.h>
5434
5535 #include "timekeeping.h"
5636 #include "posix-timers.h"
....@@ -141,7 +121,8 @@
141121 {
142122 struct k_itimer *timer;
143123
144
- hlist_for_each_entry_rcu(timer, head, t_hash) {
124
+ hlist_for_each_entry_rcu(timer, head, t_hash,
125
+ lockdep_is_held(&hash_lock)) {
145126 if ((timer->it_signal == sig) && (timer->it_id == id))
146127 return timer;
147128 }
....@@ -186,10 +167,15 @@
186167 }
187168
188169 /* Get clock_realtime */
189
-static int posix_clock_realtime_get(clockid_t which_clock, struct timespec64 *tp)
170
+static int posix_get_realtime_timespec(clockid_t which_clock, struct timespec64 *tp)
190171 {
191172 ktime_get_real_ts64(tp);
192173 return 0;
174
+}
175
+
176
+static ktime_t posix_get_realtime_ktime(clockid_t which_clock)
177
+{
178
+ return ktime_get_real();
193179 }
194180
195181 /* Set clock_realtime */
....@@ -200,7 +186,7 @@
200186 }
201187
202188 static int posix_clock_realtime_adj(const clockid_t which_clock,
203
- struct timex *t)
189
+ struct __kernel_timex *t)
204190 {
205191 return do_adjtimex(t);
206192 }
....@@ -208,10 +194,16 @@
208194 /*
209195 * Get monotonic time for posix timers
210196 */
211
-static int posix_ktime_get_ts(clockid_t which_clock, struct timespec64 *tp)
197
+static int posix_get_monotonic_timespec(clockid_t which_clock, struct timespec64 *tp)
212198 {
213199 ktime_get_ts64(tp);
200
+ timens_add_monotonic(tp);
214201 return 0;
202
+}
203
+
204
+static ktime_t posix_get_monotonic_ktime(clockid_t which_clock)
205
+{
206
+ return ktime_get();
215207 }
216208
217209 /*
....@@ -220,6 +212,7 @@
220212 static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp)
221213 {
222214 ktime_get_raw_ts64(tp);
215
+ timens_add_monotonic(tp);
223216 return 0;
224217 }
225218
....@@ -234,6 +227,7 @@
234227 struct timespec64 *tp)
235228 {
236229 ktime_get_coarse_ts64(tp);
230
+ timens_add_monotonic(tp);
237231 return 0;
238232 }
239233
....@@ -243,16 +237,27 @@
243237 return 0;
244238 }
245239
246
-static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp)
240
+static int posix_get_boottime_timespec(const clockid_t which_clock, struct timespec64 *tp)
247241 {
248242 ktime_get_boottime_ts64(tp);
243
+ timens_add_boottime(tp);
249244 return 0;
250245 }
251246
252
-static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp)
247
+static ktime_t posix_get_boottime_ktime(const clockid_t which_clock)
248
+{
249
+ return ktime_get_boottime();
250
+}
251
+
252
+static int posix_get_tai_timespec(clockid_t which_clock, struct timespec64 *tp)
253253 {
254254 ktime_get_clocktai_ts64(tp);
255255 return 0;
256
+}
257
+
258
+static ktime_t posix_get_tai_ktime(clockid_t which_clock)
259
+{
260
+ return ktime_get_clocktai();
256261 }
257262
258263 static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp)
....@@ -305,7 +310,7 @@
305310 * To protect against the timer going away while the interrupt is queued,
306311 * we require that the it_requeue_pending flag be set.
307312 */
308
-void posixtimer_rearm(struct siginfo *info)
313
+void posixtimer_rearm(struct kernel_siginfo *info)
309314 {
310315 struct k_itimer *timr;
311316 unsigned long flags;
....@@ -434,12 +439,12 @@
434439 rtn = pid_task(pid, PIDTYPE_PID);
435440 if (!rtn || !same_thread_group(rtn, current))
436441 return NULL;
437
- /* FALLTHRU */
442
+ fallthrough;
438443 case SIGEV_SIGNAL:
439444 case SIGEV_THREAD:
440445 if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX)
441446 return NULL;
442
- /* FALLTHRU */
447
+ fallthrough;
443448 case SIGEV_NONE:
444449 return pid;
445450 default:
....@@ -666,7 +671,6 @@
666671 {
667672 const struct k_clock *kc = timr->kclock;
668673 ktime_t now, remaining, iv;
669
- struct timespec64 ts64;
670674 bool sig_none;
671675
672676 sig_none = timr->it_sigev_notify == SIGEV_NONE;
....@@ -684,12 +688,7 @@
684688 return;
685689 }
686690
687
- /*
688
- * The timespec64 based conversion is suboptimal, but it's not
689
- * worth to implement yet another callback.
690
- */
691
- kc->clock_get(timr->it_clock, &ts64);
692
- now = timespec64_to_ktime(ts64);
691
+ now = kc->clock_get_ktime(timr->it_clock);
693692
694693 /*
695694 * When a requeue is pending or this is a SIGEV_NONE timer move the
....@@ -751,14 +750,14 @@
751750
752751 #ifdef CONFIG_COMPAT_32BIT_TIME
753752
754
-COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
755
- struct compat_itimerspec __user *, setting)
753
+SYSCALL_DEFINE2(timer_gettime32, timer_t, timer_id,
754
+ struct old_itimerspec32 __user *, setting)
756755 {
757756 struct itimerspec64 cur_setting;
758757
759758 int ret = do_timer_gettime(timer_id, &cur_setting);
760759 if (!ret) {
761
- if (put_compat_itimerspec64(&cur_setting, setting))
760
+ if (put_old_itimerspec32(&cur_setting, setting))
762761 ret = -EFAULT;
763762 }
764763 return ret;
....@@ -802,7 +801,7 @@
802801 * Posix magic: Relative CLOCK_REALTIME timers are not affected by
803802 * clock modifications, so they become CLOCK_MONOTONIC based under the
804803 * hood. See hrtimer_init(). Update timr->kclock, so the generic
805
- * functions which use timr->kclock->clock_get() work.
804
+ * functions which use timr->kclock->clock_get_*() work.
806805 *
807806 * Note: it_clock stays unmodified, because the next timer_set() might
808807 * use ABSTIME, so it needs to switch back.
....@@ -826,15 +825,37 @@
826825 return hrtimer_try_to_cancel(&timr->it.real.timer);
827826 }
828827
829
-static void timer_wait_for_callback(const struct k_clock *kc, struct k_itimer *timer)
828
+static void common_timer_wait_running(struct k_itimer *timer)
830829 {
831
- if (kc->timer_arm == common_hrtimer_arm)
832
- hrtimer_grab_expiry_lock(&timer->it.real.timer);
833
- else if (kc == &alarm_clock)
834
- hrtimer_grab_expiry_lock(&timer->it.alarm.alarmtimer.timer);
835
- else
836
- /* posix-cpu-timers */
837
- cpu_timers_grab_expiry_lock(timer);
830
+ hrtimer_cancel_wait_running(&timer->it.real.timer);
831
+}
832
+
833
+/*
834
+ * On PREEMPT_RT this prevent priority inversion against softirq kthread in
835
+ * case it gets preempted while executing a timer callback. See comments in
836
+ * hrtimer_cancel_wait_running. For PREEMPT_RT=n this just results in a
837
+ * cpu_relax().
838
+ */
839
+static struct k_itimer *timer_wait_running(struct k_itimer *timer,
840
+ unsigned long *flags)
841
+{
842
+ const struct k_clock *kc = READ_ONCE(timer->kclock);
843
+ timer_t timer_id = READ_ONCE(timer->it_id);
844
+
845
+ /* Prevent kfree(timer) after dropping the lock */
846
+ rcu_read_lock();
847
+ unlock_timer(timer, *flags);
848
+
849
+ /*
850
+ * kc->timer_wait_running() might drop RCU lock. So @timer
851
+ * cannot be touched anymore after the function returns!
852
+ */
853
+ if (!WARN_ON_ONCE(!kc->timer_wait_running))
854
+ kc->timer_wait_running(timer);
855
+
856
+ rcu_read_unlock();
857
+ /* Relock the timer. It might be not longer hashed. */
858
+ return lock_timer(timer_id, flags);
838859 }
839860
840861 /* Set a POSIX.1b interval timer. */
....@@ -869,6 +890,8 @@
869890
870891 timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
871892 expires = timespec64_to_ktime(new_setting->it_value);
893
+ if (flags & TIMER_ABSTIME)
894
+ expires = timens_ktime_to_host(timr->it_clock, expires);
872895 sigev_none = timr->it_sigev_notify == SIGEV_NONE;
873896
874897 kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none);
....@@ -876,13 +899,13 @@
876899 return 0;
877900 }
878901
879
-static int do_timer_settime(timer_t timer_id, int flags,
902
+static int do_timer_settime(timer_t timer_id, int tmr_flags,
880903 struct itimerspec64 *new_spec64,
881904 struct itimerspec64 *old_spec64)
882905 {
883906 const struct k_clock *kc;
884907 struct k_itimer *timr;
885
- unsigned long flag;
908
+ unsigned long flags;
886909 int error = 0;
887910
888911 if (!timespec64_valid(&new_spec64->it_interval) ||
....@@ -891,8 +914,9 @@
891914
892915 if (old_spec64)
893916 memset(old_spec64, 0, sizeof(*old_spec64));
917
+
918
+ timr = lock_timer(timer_id, &flags);
894919 retry:
895
- timr = lock_timer(timer_id, &flag);
896920 if (!timr)
897921 return -EINVAL;
898922
....@@ -900,17 +924,16 @@
900924 if (WARN_ON_ONCE(!kc || !kc->timer_set))
901925 error = -EINVAL;
902926 else
903
- error = kc->timer_set(timr, flags, new_spec64, old_spec64);
927
+ error = kc->timer_set(timr, tmr_flags, new_spec64, old_spec64);
904928
905929 if (error == TIMER_RETRY) {
906
- rcu_read_lock();
907
- unlock_timer(timr, flag);
908
- timer_wait_for_callback(kc, timr);
909
- rcu_read_unlock();
910
- old_spec64 = NULL; // We already got the old time...
930
+ // We already got the old time...
931
+ old_spec64 = NULL;
932
+ /* Unlocks and relocks the timer if it still exists */
933
+ timr = timer_wait_running(timr, &flags);
911934 goto retry;
912935 }
913
- unlock_timer(timr, flag);
936
+ unlock_timer(timr, flags);
914937
915938 return error;
916939 }
....@@ -939,9 +962,9 @@
939962 }
940963
941964 #ifdef CONFIG_COMPAT_32BIT_TIME
942
-COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
943
- struct compat_itimerspec __user *, new,
944
- struct compat_itimerspec __user *, old)
965
+SYSCALL_DEFINE4(timer_settime32, timer_t, timer_id, int, flags,
966
+ struct old_itimerspec32 __user *, new,
967
+ struct old_itimerspec32 __user *, old)
945968 {
946969 struct itimerspec64 new_spec, old_spec;
947970 struct itimerspec64 *rtn = old ? &old_spec : NULL;
....@@ -949,12 +972,12 @@
949972
950973 if (!new)
951974 return -EINVAL;
952
- if (get_compat_itimerspec64(&new_spec, new))
975
+ if (get_old_itimerspec32(&new_spec, new))
953976 return -EFAULT;
954977
955978 error = do_timer_settime(timer_id, flags, &new_spec, rtn);
956979 if (!error && old) {
957
- if (put_compat_itimerspec64(&old_spec, old))
980
+ if (put_old_itimerspec32(&old_spec, old))
958981 error = -EFAULT;
959982 }
960983 return error;
....@@ -972,21 +995,13 @@
972995 return 0;
973996 }
974997
975
-static int timer_delete_hook(struct k_itimer *timer)
998
+static inline int timer_delete_hook(struct k_itimer *timer)
976999 {
9771000 const struct k_clock *kc = timer->kclock;
978
- int ret;
9791001
9801002 if (WARN_ON_ONCE(!kc || !kc->timer_del))
9811003 return -EINVAL;
982
- ret = kc->timer_del(timer);
983
- if (ret == TIMER_RETRY) {
984
- rcu_read_lock();
985
- spin_unlock_irq(&timer->it_lock);
986
- timer_wait_for_callback(kc, timer);
987
- rcu_read_unlock();
988
- }
989
- return ret;
1004
+ return kc->timer_del(timer);
9901005 }
9911006
9921007 /* Delete a POSIX.1b interval timer. */
....@@ -995,13 +1010,17 @@
9951010 struct k_itimer *timer;
9961011 unsigned long flags;
9971012
998
-retry_delete:
9991013 timer = lock_timer(timer_id, &flags);
1014
+
1015
+retry_delete:
10001016 if (!timer)
10011017 return -EINVAL;
10021018
1003
- if (timer_delete_hook(timer) == TIMER_RETRY)
1019
+ if (unlikely(timer_delete_hook(timer) == TIMER_RETRY)) {
1020
+ /* Unlocks and relocks the timer if it still exists */
1021
+ timer = timer_wait_running(timer, &flags);
10041022 goto retry_delete;
1023
+ }
10051024
10061025 spin_lock(&current->sighand->siglock);
10071026 list_del(&timer->list);
....@@ -1018,39 +1037,69 @@
10181037 }
10191038
10201039 /*
1021
- * return timer owned by the process, used by exit_itimers
1040
+ * Delete a timer if it is armed, remove it from the hash and schedule it
1041
+ * for RCU freeing.
10221042 */
10231043 static void itimer_delete(struct k_itimer *timer)
10241044 {
10251045 unsigned long flags;
10261046
1027
-retry_delete:
1047
+ /*
1048
+ * irqsave is required to make timer_wait_running() work.
1049
+ */
10281050 spin_lock_irqsave(&timer->it_lock, flags);
10291051
1030
- if (timer_delete_hook(timer) == TIMER_RETRY)
1031
- goto retry_delete;
1032
-
1033
- list_del(&timer->list);
1052
+retry_delete:
10341053 /*
1035
- * This keeps any tasks waiting on the spin lock from thinking
1036
- * they got something (see the lock code above).
1054
+ * Even if the timer is not longer accessible from other tasks
1055
+ * it still might be armed and queued in the underlying timer
1056
+ * mechanism. Worse, that timer mechanism might run the expiry
1057
+ * function concurrently.
10371058 */
1038
- timer->it_signal = NULL;
1059
+ if (timer_delete_hook(timer) == TIMER_RETRY) {
1060
+ /*
1061
+ * Timer is expired concurrently, prevent livelocks
1062
+ * and pointless spinning on RT.
1063
+ *
1064
+ * timer_wait_running() drops timer::it_lock, which opens
1065
+ * the possibility for another task to delete the timer.
1066
+ *
1067
+ * That's not possible here because this is invoked from
1068
+ * do_exit() only for the last thread of the thread group.
1069
+ * So no other task can access and delete that timer.
1070
+ */
1071
+ if (WARN_ON_ONCE(timer_wait_running(timer, &flags) != timer))
1072
+ return;
10391073
1040
- unlock_timer(timer, flags);
1074
+ goto retry_delete;
1075
+ }
1076
+ list_del(&timer->list);
1077
+
1078
+ spin_unlock_irqrestore(&timer->it_lock, flags);
10411079 release_posix_timer(timer, IT_ID_SET);
10421080 }
10431081
10441082 /*
1045
- * This is called by do_exit or de_thread, only when there are no more
1046
- * references to the shared signal_struct.
1083
+ * Invoked from do_exit() when the last thread of a thread group exits.
1084
+ * At that point no other task can access the timers of the dying
1085
+ * task anymore.
10471086 */
1048
-void exit_itimers(struct signal_struct *sig)
1087
+void exit_itimers(struct task_struct *tsk)
10491088 {
1089
+ struct list_head timers;
10501090 struct k_itimer *tmr;
10511091
1052
- while (!list_empty(&sig->posix_timers)) {
1053
- tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
1092
+ if (list_empty(&tsk->signal->posix_timers))
1093
+ return;
1094
+
1095
+ /* Protect against concurrent read via /proc/$PID/timers */
1096
+ spin_lock_irq(&tsk->sighand->siglock);
1097
+ list_replace_init(&tsk->signal->posix_timers, &timers);
1098
+ spin_unlock_irq(&tsk->sighand->siglock);
1099
+
1100
+ /* The timers are not longer accessible via tsk::signal */
1101
+ while (!list_empty(&timers)) {
1102
+ tmr = list_first_entry(&timers, struct k_itimer, list);
10541103 itimer_delete(tmr);
10551104 }
10561105 }
....@@ -1080,7 +1129,7 @@
10801129 if (!kc)
10811130 return -EINVAL;
10821131
1083
- error = kc->clock_get(which_clock, &kernel_tp);
1132
+ error = kc->clock_get_timespec(which_clock, &kernel_tp);
10841133
10851134 if (!error && put_timespec64(&kernel_tp, tp))
10861135 error = -EFAULT;
....@@ -1088,22 +1137,28 @@
10881137 return error;
10891138 }
10901139
1091
-SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
1092
- struct timex __user *, utx)
1140
+int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx)
10931141 {
10941142 const struct k_clock *kc = clockid_to_kclock(which_clock);
1095
- struct timex ktx;
1096
- int err;
10971143
10981144 if (!kc)
10991145 return -EINVAL;
11001146 if (!kc->clock_adj)
11011147 return -EOPNOTSUPP;
11021148
1149
+ return kc->clock_adj(which_clock, ktx);
1150
+}
1151
+
1152
+SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
1153
+ struct __kernel_timex __user *, utx)
1154
+{
1155
+ struct __kernel_timex ktx;
1156
+ int err;
1157
+
11031158 if (copy_from_user(&ktx, utx, sizeof(ktx)))
11041159 return -EFAULT;
11051160
1106
- err = kc->clock_adj(which_clock, &ktx);
1161
+ err = do_clock_adjtime(which_clock, &ktx);
11071162
11081163 if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx)))
11091164 return -EFAULT;
....@@ -1131,8 +1186,8 @@
11311186
11321187 #ifdef CONFIG_COMPAT_32BIT_TIME
11331188
1134
-COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
1135
- struct compat_timespec __user *, tp)
1189
+SYSCALL_DEFINE2(clock_settime32, clockid_t, which_clock,
1190
+ struct old_timespec32 __user *, tp)
11361191 {
11371192 const struct k_clock *kc = clockid_to_kclock(which_clock);
11381193 struct timespec64 ts;
....@@ -1140,14 +1195,14 @@
11401195 if (!kc || !kc->clock_set)
11411196 return -EINVAL;
11421197
1143
- if (compat_get_timespec64(&ts, tp))
1198
+ if (get_old_timespec32(&ts, tp))
11441199 return -EFAULT;
11451200
11461201 return kc->clock_set(which_clock, &ts);
11471202 }
11481203
1149
-COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
1150
- struct compat_timespec __user *, tp)
1204
+SYSCALL_DEFINE2(clock_gettime32, clockid_t, which_clock,
1205
+ struct old_timespec32 __user *, tp)
11511206 {
11521207 const struct k_clock *kc = clockid_to_kclock(which_clock);
11531208 struct timespec64 ts;
....@@ -1156,48 +1211,34 @@
11561211 if (!kc)
11571212 return -EINVAL;
11581213
1159
- err = kc->clock_get(which_clock, &ts);
1214
+ err = kc->clock_get_timespec(which_clock, &ts);
11601215
1161
- if (!err && compat_put_timespec64(&ts, tp))
1216
+ if (!err && put_old_timespec32(&ts, tp))
11621217 err = -EFAULT;
11631218
11641219 return err;
11651220 }
11661221
1167
-#endif
1168
-
1169
-#ifdef CONFIG_COMPAT
1170
-
1171
-COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
1172
- struct compat_timex __user *, utp)
1222
+SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock,
1223
+ struct old_timex32 __user *, utp)
11731224 {
1174
- const struct k_clock *kc = clockid_to_kclock(which_clock);
1175
- struct timex ktx;
1225
+ struct __kernel_timex ktx;
11761226 int err;
11771227
1178
- if (!kc)
1179
- return -EINVAL;
1180
- if (!kc->clock_adj)
1181
- return -EOPNOTSUPP;
1182
-
1183
- err = compat_get_timex(&ktx, utp);
1228
+ err = get_old_timex32(&ktx, utp);
11841229 if (err)
11851230 return err;
11861231
1187
- err = kc->clock_adj(which_clock, &ktx);
1232
+ err = do_clock_adjtime(which_clock, &ktx);
11881233
1189
- if (err >= 0 && compat_put_timex(utp, &ktx))
1234
+ if (err >= 0 && put_old_timex32(utp, &ktx))
11901235 return -EFAULT;
11911236
11921237 return err;
11931238 }
11941239
1195
-#endif
1196
-
1197
-#ifdef CONFIG_COMPAT_32BIT_TIME
1198
-
1199
-COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
1200
- struct compat_timespec __user *, tp)
1240
+SYSCALL_DEFINE2(clock_getres_time32, clockid_t, which_clock,
1241
+ struct old_timespec32 __user *, tp)
12011242 {
12021243 const struct k_clock *kc = clockid_to_kclock(which_clock);
12031244 struct timespec64 ts;
....@@ -1207,7 +1248,7 @@
12071248 return -EINVAL;
12081249
12091250 err = kc->clock_getres(which_clock, &ts);
1210
- if (!err && tp && compat_put_timespec64(&ts, tp))
1251
+ if (!err && tp && put_old_timespec32(&ts, tp))
12111252 return -EFAULT;
12121253
12131254 return err;
....@@ -1221,7 +1262,22 @@
12211262 static int common_nsleep(const clockid_t which_clock, int flags,
12221263 const struct timespec64 *rqtp)
12231264 {
1224
- return hrtimer_nanosleep(rqtp, flags & TIMER_ABSTIME ?
1265
+ ktime_t texp = timespec64_to_ktime(*rqtp);
1266
+
1267
+ return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ?
1268
+ HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
1269
+ which_clock);
1270
+}
1271
+
1272
+static int common_nsleep_timens(const clockid_t which_clock, int flags,
1273
+ const struct timespec64 *rqtp)
1274
+{
1275
+ ktime_t texp = timespec64_to_ktime(*rqtp);
1276
+
1277
+ if (flags & TIMER_ABSTIME)
1278
+ texp = timens_ktime_to_host(which_clock, texp);
1279
+
1280
+ return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ?
12251281 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
12261282 which_clock);
12271283 }
....@@ -1245,6 +1301,7 @@
12451301 return -EINVAL;
12461302 if (flags & TIMER_ABSTIME)
12471303 rmtp = NULL;
1304
+ current->restart_block.fn = do_no_restart_syscall;
12481305 current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
12491306 current->restart_block.nanosleep.rmtp = rmtp;
12501307
....@@ -1253,9 +1310,9 @@
12531310
12541311 #ifdef CONFIG_COMPAT_32BIT_TIME
12551312
1256
-COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
1257
- struct compat_timespec __user *, rqtp,
1258
- struct compat_timespec __user *, rmtp)
1313
+SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags,
1314
+ struct old_timespec32 __user *, rqtp,
1315
+ struct old_timespec32 __user *, rmtp)
12591316 {
12601317 const struct k_clock *kc = clockid_to_kclock(which_clock);
12611318 struct timespec64 t;
....@@ -1265,13 +1322,14 @@
12651322 if (!kc->nsleep)
12661323 return -EOPNOTSUPP;
12671324
1268
- if (compat_get_timespec64(&t, rqtp))
1325
+ if (get_old_timespec32(&t, rqtp))
12691326 return -EFAULT;
12701327
12711328 if (!timespec64_valid(&t))
12721329 return -EINVAL;
12731330 if (flags & TIMER_ABSTIME)
12741331 rmtp = NULL;
1332
+ current->restart_block.fn = do_no_restart_syscall;
12751333 current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
12761334 current->restart_block.nanosleep.compat_rmtp = rmtp;
12771335
....@@ -1282,7 +1340,8 @@
12821340
12831341 static const struct k_clock clock_realtime = {
12841342 .clock_getres = posix_get_hrtimer_res,
1285
- .clock_get = posix_clock_realtime_get,
1343
+ .clock_get_timespec = posix_get_realtime_timespec,
1344
+ .clock_get_ktime = posix_get_realtime_ktime,
12861345 .clock_set = posix_clock_realtime_set,
12871346 .clock_adj = posix_clock_realtime_adj,
12881347 .nsleep = common_nsleep,
....@@ -1294,13 +1353,15 @@
12941353 .timer_forward = common_hrtimer_forward,
12951354 .timer_remaining = common_hrtimer_remaining,
12961355 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1356
+ .timer_wait_running = common_timer_wait_running,
12971357 .timer_arm = common_hrtimer_arm,
12981358 };
12991359
13001360 static const struct k_clock clock_monotonic = {
13011361 .clock_getres = posix_get_hrtimer_res,
1302
- .clock_get = posix_ktime_get_ts,
1303
- .nsleep = common_nsleep,
1362
+ .clock_get_timespec = posix_get_monotonic_timespec,
1363
+ .clock_get_ktime = posix_get_monotonic_ktime,
1364
+ .nsleep = common_nsleep_timens,
13041365 .timer_create = common_timer_create,
13051366 .timer_set = common_timer_set,
13061367 .timer_get = common_timer_get,
....@@ -1309,27 +1370,29 @@
13091370 .timer_forward = common_hrtimer_forward,
13101371 .timer_remaining = common_hrtimer_remaining,
13111372 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1373
+ .timer_wait_running = common_timer_wait_running,
13121374 .timer_arm = common_hrtimer_arm,
13131375 };
13141376
13151377 static const struct k_clock clock_monotonic_raw = {
13161378 .clock_getres = posix_get_hrtimer_res,
1317
- .clock_get = posix_get_monotonic_raw,
1379
+ .clock_get_timespec = posix_get_monotonic_raw,
13181380 };
13191381
13201382 static const struct k_clock clock_realtime_coarse = {
13211383 .clock_getres = posix_get_coarse_res,
1322
- .clock_get = posix_get_realtime_coarse,
1384
+ .clock_get_timespec = posix_get_realtime_coarse,
13231385 };
13241386
13251387 static const struct k_clock clock_monotonic_coarse = {
13261388 .clock_getres = posix_get_coarse_res,
1327
- .clock_get = posix_get_monotonic_coarse,
1389
+ .clock_get_timespec = posix_get_monotonic_coarse,
13281390 };
13291391
13301392 static const struct k_clock clock_tai = {
13311393 .clock_getres = posix_get_hrtimer_res,
1332
- .clock_get = posix_get_tai,
1394
+ .clock_get_ktime = posix_get_tai_ktime,
1395
+ .clock_get_timespec = posix_get_tai_timespec,
13331396 .nsleep = common_nsleep,
13341397 .timer_create = common_timer_create,
13351398 .timer_set = common_timer_set,
....@@ -1339,13 +1402,15 @@
13391402 .timer_forward = common_hrtimer_forward,
13401403 .timer_remaining = common_hrtimer_remaining,
13411404 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1405
+ .timer_wait_running = common_timer_wait_running,
13421406 .timer_arm = common_hrtimer_arm,
13431407 };
13441408
13451409 static const struct k_clock clock_boottime = {
13461410 .clock_getres = posix_get_hrtimer_res,
1347
- .clock_get = posix_get_boottime,
1348
- .nsleep = common_nsleep,
1411
+ .clock_get_ktime = posix_get_boottime_ktime,
1412
+ .clock_get_timespec = posix_get_boottime_timespec,
1413
+ .nsleep = common_nsleep_timens,
13491414 .timer_create = common_timer_create,
13501415 .timer_set = common_timer_set,
13511416 .timer_get = common_timer_get,
....@@ -1354,6 +1419,7 @@
13541419 .timer_forward = common_hrtimer_forward,
13551420 .timer_remaining = common_hrtimer_remaining,
13561421 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1422
+ .timer_wait_running = common_timer_wait_running,
13571423 .timer_arm = common_hrtimer_arm,
13581424 };
13591425