hc
2024-05-10 748e4f3d702def1a4bff191e0cf93b6a05340f01
kernel/arch/sparc/vdso/vclock_gettime.c
....@@ -1,6 +1,6 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright 2006 Andi Kleen, SUSE Labs.
3
- * Subject to the GNU Public License, v.2
44 *
55 * Fast user context implementation of clock_gettime, gettimeofday, and time.
66 *
....@@ -12,11 +12,6 @@
1212 * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
1313 */
1414
15
-/* Disable profiling for userspace code: */
16
-#ifndef DISABLE_BRANCH_PROFILING
17
-#define DISABLE_BRANCH_PROFILING
18
-#endif
19
-
2015 #include <linux/kernel.h>
2116 #include <linux/time.h>
2217 #include <linux/string.h>
....@@ -25,13 +20,6 @@
2520 #include <asm/timex.h>
2621 #include <asm/clocksource.h>
2722 #include <asm/vvar.h>
28
-
29
-#undef TICK_PRIV_BIT
30
-#ifdef CONFIG_SPARC64
31
-#define TICK_PRIV_BIT (1UL << 63)
32
-#else
33
-#define TICK_PRIV_BIT (1ULL << 63)
34
-#endif
3523
3624 #ifdef CONFIG_SPARC64
3725 #define SYSCALL_STRING \
....@@ -60,24 +48,22 @@
6048 * Compute the vvar page's address in the process address space, and return it
6149 * as a pointer to the vvar_data.
6250 */
63
-static notrace noinline struct vvar_data *
64
-get_vvar_data(void)
51
+notrace static __always_inline struct vvar_data *get_vvar_data(void)
6552 {
6653 unsigned long ret;
6754
6855 /*
69
- * vdso data page is the first vDSO page so grab the return address
56
+ * vdso data page is the first vDSO page so grab the PC
7057 * and move up a page to get to the data page.
7158 */
72
- ret = (unsigned long)__builtin_return_address(0);
59
+ __asm__("rd %%pc, %0" : "=r" (ret));
7360 ret &= ~(8192 - 1);
7461 ret -= 8192;
7562
7663 return (struct vvar_data *) ret;
7764 }
7865
79
-static notrace long
80
-vdso_fallback_gettime(long clock, struct timespec *ts)
66
+notrace static long vdso_fallback_gettime(long clock, struct __kernel_old_timespec *ts)
8167 {
8268 register long num __asm__("g1") = __NR_clock_gettime;
8369 register long o0 __asm__("o0") = clock;
....@@ -88,8 +74,7 @@
8874 return o0;
8975 }
9076
91
-static notrace __always_inline long
92
-vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
77
+notrace static long vdso_fallback_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
9378 {
9479 register long num __asm__("g1") = __NR_gettimeofday;
9580 register long o0 __asm__("o0") = (long) tv;
....@@ -101,38 +86,44 @@
10186 }
10287
10388 #ifdef CONFIG_SPARC64
104
-static notrace noinline u64
105
-vread_tick(void) {
89
+notrace static __always_inline u64 vread_tick(void)
90
+{
10691 u64 ret;
10792
108
- __asm__ __volatile__("rd %%asr24, %0 \n"
109
- ".section .vread_tick_patch, \"ax\" \n"
110
- "rd %%tick, %0 \n"
111
- ".previous \n"
112
- : "=&r" (ret));
113
- return ret & ~TICK_PRIV_BIT;
93
+ __asm__ __volatile__("rd %%tick, %0" : "=r" (ret));
94
+ return ret;
95
+}
96
+
97
+notrace static __always_inline u64 vread_tick_stick(void)
98
+{
99
+ u64 ret;
100
+
101
+ __asm__ __volatile__("rd %%asr24, %0" : "=r" (ret));
102
+ return ret;
114103 }
115104 #else
116
-static notrace noinline u64
117
-vread_tick(void)
105
+notrace static __always_inline u64 vread_tick(void)
118106 {
119
- unsigned int lo, hi;
107
+ register unsigned long long ret asm("o4");
120108
121
- __asm__ __volatile__("rd %%asr24, %%g1\n\t"
122
- "srlx %%g1, 32, %1\n\t"
123
- "srl %%g1, 0, %0\n"
124
- ".section .vread_tick_patch, \"ax\" \n"
125
- "rd %%tick, %%g1\n"
126
- ".previous \n"
127
- : "=&r" (lo), "=&r" (hi)
128
- :
129
- : "g1");
130
- return lo | ((u64)hi << 32);
109
+ __asm__ __volatile__("rd %%tick, %L0\n\t"
110
+ "srlx %L0, 32, %H0"
111
+ : "=r" (ret));
112
+ return ret;
113
+}
114
+
115
+notrace static __always_inline u64 vread_tick_stick(void)
116
+{
117
+ register unsigned long long ret asm("o4");
118
+
119
+ __asm__ __volatile__("rd %%asr24, %L0\n\t"
120
+ "srlx %L0, 32, %H0"
121
+ : "=r" (ret));
122
+ return ret;
131123 }
132124 #endif
133125
134
-static notrace inline u64
135
-vgetsns(struct vvar_data *vvar)
126
+notrace static __always_inline u64 vgetsns(struct vvar_data *vvar)
136127 {
137128 u64 v;
138129 u64 cycles;
....@@ -142,13 +133,22 @@
142133 return v * vvar->clock.mult;
143134 }
144135
145
-static notrace noinline int
146
-do_realtime(struct vvar_data *vvar, struct timespec *ts)
136
+notrace static __always_inline u64 vgetsns_stick(struct vvar_data *vvar)
137
+{
138
+ u64 v;
139
+ u64 cycles;
140
+
141
+ cycles = vread_tick_stick();
142
+ v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask;
143
+ return v * vvar->clock.mult;
144
+}
145
+
146
+notrace static __always_inline int do_realtime(struct vvar_data *vvar,
147
+ struct __kernel_old_timespec *ts)
147148 {
148149 unsigned long seq;
149150 u64 ns;
150151
151
- ts->tv_nsec = 0;
152152 do {
153153 seq = vvar_read_begin(vvar);
154154 ts->tv_sec = vvar->wall_time_sec;
....@@ -157,18 +157,38 @@
157157 ns >>= vvar->clock.shift;
158158 } while (unlikely(vvar_read_retry(vvar, seq)));
159159
160
- timespec_add_ns(ts, ns);
160
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
161
+ ts->tv_nsec = ns;
161162
162163 return 0;
163164 }
164165
165
-static notrace noinline int
166
-do_monotonic(struct vvar_data *vvar, struct timespec *ts)
166
+notrace static __always_inline int do_realtime_stick(struct vvar_data *vvar,
167
+ struct __kernel_old_timespec *ts)
167168 {
168169 unsigned long seq;
169170 u64 ns;
170171
171
- ts->tv_nsec = 0;
172
+ do {
173
+ seq = vvar_read_begin(vvar);
174
+ ts->tv_sec = vvar->wall_time_sec;
175
+ ns = vvar->wall_time_snsec;
176
+ ns += vgetsns_stick(vvar);
177
+ ns >>= vvar->clock.shift;
178
+ } while (unlikely(vvar_read_retry(vvar, seq)));
179
+
180
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
181
+ ts->tv_nsec = ns;
182
+
183
+ return 0;
184
+}
185
+
186
+notrace static __always_inline int do_monotonic(struct vvar_data *vvar,
187
+ struct __kernel_old_timespec *ts)
188
+{
189
+ unsigned long seq;
190
+ u64 ns;
191
+
172192 do {
173193 seq = vvar_read_begin(vvar);
174194 ts->tv_sec = vvar->monotonic_time_sec;
....@@ -177,13 +197,34 @@
177197 ns >>= vvar->clock.shift;
178198 } while (unlikely(vvar_read_retry(vvar, seq)));
179199
180
- timespec_add_ns(ts, ns);
200
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
201
+ ts->tv_nsec = ns;
181202
182203 return 0;
183204 }
184205
185
-static notrace noinline int
186
-do_realtime_coarse(struct vvar_data *vvar, struct timespec *ts)
206
+notrace static __always_inline int do_monotonic_stick(struct vvar_data *vvar,
207
+ struct __kernel_old_timespec *ts)
208
+{
209
+ unsigned long seq;
210
+ u64 ns;
211
+
212
+ do {
213
+ seq = vvar_read_begin(vvar);
214
+ ts->tv_sec = vvar->monotonic_time_sec;
215
+ ns = vvar->monotonic_time_snsec;
216
+ ns += vgetsns_stick(vvar);
217
+ ns >>= vvar->clock.shift;
218
+ } while (unlikely(vvar_read_retry(vvar, seq)));
219
+
220
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
221
+ ts->tv_nsec = ns;
222
+
223
+ return 0;
224
+}
225
+
226
+notrace static int do_realtime_coarse(struct vvar_data *vvar,
227
+ struct __kernel_old_timespec *ts)
187228 {
188229 unsigned long seq;
189230
....@@ -195,8 +236,8 @@
195236 return 0;
196237 }
197238
198
-static notrace noinline int
199
-do_monotonic_coarse(struct vvar_data *vvar, struct timespec *ts)
239
+notrace static int do_monotonic_coarse(struct vvar_data *vvar,
240
+ struct __kernel_old_timespec *ts)
200241 {
201242 unsigned long seq;
202243
....@@ -210,7 +251,7 @@
210251 }
211252
212253 notrace int
213
-__vdso_clock_gettime(clockid_t clock, struct timespec *ts)
254
+__vdso_clock_gettime(clockid_t clock, struct __kernel_old_timespec *ts)
214255 {
215256 struct vvar_data *vvd = get_vvar_data();
216257
....@@ -234,19 +275,44 @@
234275 return vdso_fallback_gettime(clock, ts);
235276 }
236277 int
237
-clock_gettime(clockid_t, struct timespec *)
278
+clock_gettime(clockid_t, struct __kernel_old_timespec *)
238279 __attribute__((weak, alias("__vdso_clock_gettime")));
239280
240281 notrace int
241
-__vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
282
+__vdso_clock_gettime_stick(clockid_t clock, struct __kernel_old_timespec *ts)
283
+{
284
+ struct vvar_data *vvd = get_vvar_data();
285
+
286
+ switch (clock) {
287
+ case CLOCK_REALTIME:
288
+ if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
289
+ break;
290
+ return do_realtime_stick(vvd, ts);
291
+ case CLOCK_MONOTONIC:
292
+ if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
293
+ break;
294
+ return do_monotonic_stick(vvd, ts);
295
+ case CLOCK_REALTIME_COARSE:
296
+ return do_realtime_coarse(vvd, ts);
297
+ case CLOCK_MONOTONIC_COARSE:
298
+ return do_monotonic_coarse(vvd, ts);
299
+ }
300
+ /*
301
+ * Unknown clock ID ? Fall back to the syscall.
302
+ */
303
+ return vdso_fallback_gettime(clock, ts);
304
+}
305
+
306
+notrace int
307
+__vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
242308 {
243309 struct vvar_data *vvd = get_vvar_data();
244310
245311 if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
246312 if (likely(tv != NULL)) {
247313 union tstv_t {
248
- struct timespec ts;
249
- struct timeval tv;
314
+ struct __kernel_old_timespec ts;
315
+ struct __kernel_old_timeval tv;
250316 } *tstv = (union tstv_t *) tv;
251317 do_realtime(vvd, &tstv->ts);
252318 /*
....@@ -270,5 +336,38 @@
270336 return vdso_fallback_gettimeofday(tv, tz);
271337 }
272338 int
273
-gettimeofday(struct timeval *, struct timezone *)
339
+gettimeofday(struct __kernel_old_timeval *, struct timezone *)
274340 __attribute__((weak, alias("__vdso_gettimeofday")));
341
+
342
+notrace int
343
+__vdso_gettimeofday_stick(struct __kernel_old_timeval *tv, struct timezone *tz)
344
+{
345
+ struct vvar_data *vvd = get_vvar_data();
346
+
347
+ if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
348
+ if (likely(tv != NULL)) {
349
+ union tstv_t {
350
+ struct __kernel_old_timespec ts;
351
+ struct __kernel_old_timeval tv;
352
+ } *tstv = (union tstv_t *) tv;
353
+ do_realtime_stick(vvd, &tstv->ts);
354
+ /*
355
+ * Assign before dividing to ensure that the division is
356
+ * done in the type of tv_usec, not tv_nsec.
357
+ *
358
+ * There cannot be > 1 billion usec in a second:
359
+ * do_realtime() has already distributed such overflow
360
+ * into tv_sec. So we can assign it to an int safely.
361
+ */
362
+ tstv->tv.tv_usec = tstv->ts.tv_nsec;
363
+ tstv->tv.tv_usec /= 1000;
364
+ }
365
+ if (unlikely(tz != NULL)) {
366
+ /* Avoid memcpy. Some old compilers fail to inline it */
367
+ tz->tz_minuteswest = vvd->tz_minuteswest;
368
+ tz->tz_dsttime = vvd->tz_dsttime;
369
+ }
370
+ return 0;
371
+ }
372
+ return vdso_fallback_gettimeofday(tv, tz);
373
+}