hc
2024-05-14 bedbef8ad3e75a304af6361af235302bcc61d06b
kernel/kernel/time/clocksource.c
....@@ -1,26 +1,8 @@
1
+// SPDX-License-Identifier: GPL-2.0+
12 /*
2
- * linux/kernel/time/clocksource.c
3
- *
43 * This file contains the functions which manage clocksource drivers.
54 *
65 * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
7
- *
8
- * This program is free software; you can redistribute it and/or modify
9
- * it under the terms of the GNU General Public License as published by
10
- * the Free Software Foundation; either version 2 of the License, or
11
- * (at your option) any later version.
12
- *
13
- * This program is distributed in the hope that it will be useful,
14
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16
- * GNU General Public License for more details.
17
- *
18
- * You should have received a copy of the GNU General Public License
19
- * along with this program; if not, write to the Free Software
20
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21
- *
22
- * TODO WishList:
23
- * o Allow clocksource drivers to be unregistered
246 */
257
268 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
....@@ -123,12 +105,12 @@
123105 static int watchdog_running;
124106 static atomic_t watchdog_reset_pending;
125107
126
-static void inline clocksource_watchdog_lock(unsigned long *flags)
108
+static inline void clocksource_watchdog_lock(unsigned long *flags)
127109 {
128110 spin_lock_irqsave(&watchdog_lock, *flags);
129111 }
130112
131
-static void inline clocksource_watchdog_unlock(unsigned long *flags)
113
+static inline void clocksource_watchdog_unlock(unsigned long *flags)
132114 {
133115 spin_unlock_irqrestore(&watchdog_lock, *flags);
134116 }
....@@ -240,6 +222,60 @@
240222 pr_warn("timekeeping watchdog on CPU%d: %s read-back delay of %lldns, attempt %d, marking unstable\n",
241223 smp_processor_id(), watchdog->name, wd_delay, nretries);
242224 return false;
225
+}
226
+
227
+static u64 csnow_mid;
228
+static cpumask_t cpus_ahead;
229
+static cpumask_t cpus_behind;
230
+
231
+static void clocksource_verify_one_cpu(void *csin)
232
+{
233
+ struct clocksource *cs = (struct clocksource *)csin;
234
+
235
+ csnow_mid = cs->read(cs);
236
+}
237
+
238
+static void clocksource_verify_percpu(struct clocksource *cs)
239
+{
240
+ int64_t cs_nsec, cs_nsec_max = 0, cs_nsec_min = LLONG_MAX;
241
+ u64 csnow_begin, csnow_end;
242
+ int cpu, testcpu;
243
+ s64 delta;
244
+
245
+ cpumask_clear(&cpus_ahead);
246
+ cpumask_clear(&cpus_behind);
247
+ preempt_disable();
248
+ testcpu = smp_processor_id();
249
+ pr_warn("Checking clocksource %s synchronization from CPU %d.\n", cs->name, testcpu);
250
+ for_each_online_cpu(cpu) {
251
+ if (cpu == testcpu)
252
+ continue;
253
+ csnow_begin = cs->read(cs);
254
+ smp_call_function_single(cpu, clocksource_verify_one_cpu, cs, 1);
255
+ csnow_end = cs->read(cs);
256
+ delta = (s64)((csnow_mid - csnow_begin) & cs->mask);
257
+ if (delta < 0)
258
+ cpumask_set_cpu(cpu, &cpus_behind);
259
+ delta = (csnow_end - csnow_mid) & cs->mask;
260
+ if (delta < 0)
261
+ cpumask_set_cpu(cpu, &cpus_ahead);
262
+ delta = clocksource_delta(csnow_end, csnow_begin, cs->mask);
263
+ cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift);
264
+ if (cs_nsec > cs_nsec_max)
265
+ cs_nsec_max = cs_nsec;
266
+ if (cs_nsec < cs_nsec_min)
267
+ cs_nsec_min = cs_nsec;
268
+ }
269
+ preempt_enable();
270
+ if (!cpumask_empty(&cpus_ahead))
271
+ pr_warn(" CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
272
+ cpumask_pr_args(&cpus_ahead), testcpu, cs->name);
273
+ if (!cpumask_empty(&cpus_behind))
274
+ pr_warn(" CPUs %*pbl behind CPU %d for clocksource %s.\n",
275
+ cpumask_pr_args(&cpus_behind), testcpu, cs->name);
276
+ if (!cpumask_empty(&cpus_ahead) || !cpumask_empty(&cpus_behind))
277
+ pr_warn(" CPU %d check durations %lldns - %lldns for clocksource %s.\n",
278
+ testcpu, cs_nsec_min, cs_nsec_max, cs->name);
243279 }
244280
245281 static void clocksource_watchdog(struct timer_list *unused)
....@@ -465,6 +501,12 @@
465501 struct clocksource *cs, *tmp;
466502 unsigned long flags;
467503 int select = 0;
504
+
505
+ /* Do any required per-CPU skew verification. */
506
+ if (curr_clocksource &&
507
+ curr_clocksource->flags & CLOCK_SOURCE_UNSTABLE &&
508
+ curr_clocksource->flags & CLOCK_SOURCE_VERIFY_PERCPU)
509
+ clocksource_verify_percpu(curr_clocksource);
468510
469511 spin_lock_irqsave(&watchdog_lock, flags);
470512 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
....@@ -883,11 +925,7 @@
883925 mutex_unlock(&clocksource_mutex);
884926 return 0;
885927 }
886
-#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
887
-pure_initcall(clocksource_done_booting);
888
-#else
889928 fs_initcall(clocksource_done_booting);
890
-#endif
891929
892930 /*
893931 * Enqueue the clocksource sorted by rating
....@@ -989,6 +1027,15 @@
9891027 {
9901028 unsigned long flags;
9911029
1030
+ clocksource_arch_init(cs);
1031
+
1032
+ if (cs->vdso_clock_mode < 0 ||
1033
+ cs->vdso_clock_mode >= VDSO_CLOCKMODE_MAX) {
1034
+ pr_warn("clocksource %s registered with invalid VDSO mode %d. Disabling VDSO support.\n",
1035
+ cs->name, cs->vdso_clock_mode);
1036
+ cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
1037
+ }
1038
+
9921039 /* Initialize mult/shift and max_idle_ns */
9931040 __clocksource_update_freq_scale(cs, scale, freq);
9941041