.. | .. |
---|
30 | 30 | }; |
---|
31 | 31 | |
---|
32 | 32 | static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust); |
---|
| 33 | +static struct timer_list tsc_sync_check_timer; |
---|
33 | 34 | |
---|
34 | 35 | /* |
---|
35 | 36 | * TSC's on different sockets may be reset asynchronously. |
---|
.. | .. |
---|
76 | 77 | adj->warned = true; |
---|
77 | 78 | } |
---|
78 | 79 | } |
---|
| 80 | + |
---|
| 81 | +/* |
---|
| 82 | + * Normally the tsc_sync will be checked every time system enters idle |
---|
| 83 | + * state, but there is still caveat that a system won't enter idle, |
---|
| 84 | + * either because it's too busy or configured purposely to not enter |
---|
| 85 | + * idle. |
---|
| 86 | + * |
---|
| 87 | + * So setup a periodic timer (every 10 minutes) to make sure the check |
---|
| 88 | + * is always on. |
---|
| 89 | + */ |
---|
| 90 | + |
---|
| 91 | +#define SYNC_CHECK_INTERVAL (HZ * 600) |
---|
| 92 | + |
---|
| 93 | +static void tsc_sync_check_timer_fn(struct timer_list *unused) |
---|
| 94 | +{ |
---|
| 95 | + int next_cpu; |
---|
| 96 | + |
---|
| 97 | + tsc_verify_tsc_adjust(false); |
---|
| 98 | + |
---|
| 99 | + /* Run the check for all onlined CPUs in turn */ |
---|
| 100 | + next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); |
---|
| 101 | + if (next_cpu >= nr_cpu_ids) |
---|
| 102 | + next_cpu = cpumask_first(cpu_online_mask); |
---|
| 103 | + |
---|
| 104 | + tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL; |
---|
| 105 | + add_timer_on(&tsc_sync_check_timer, next_cpu); |
---|
| 106 | +} |
---|
| 107 | + |
---|
| 108 | +static int __init start_sync_check_timer(void) |
---|
| 109 | +{ |
---|
| 110 | + if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable) |
---|
| 111 | + return 0; |
---|
| 112 | + |
---|
| 113 | + timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0); |
---|
| 114 | + tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL; |
---|
| 115 | + add_timer(&tsc_sync_check_timer); |
---|
| 116 | + |
---|
| 117 | + return 0; |
---|
| 118 | +} |
---|
| 119 | +late_initcall(start_sync_check_timer); |
---|
79 | 120 | |
---|
80 | 121 | static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval, |
---|
81 | 122 | unsigned int cpu, bool bootcpu) |
---|
.. | .. |
---|
233 | 274 | * The measurement runs for 'timeout' msecs: |
---|
234 | 275 | */ |
---|
235 | 276 | end = start + (cycles_t) tsc_khz * timeout; |
---|
236 | | - now = start; |
---|
237 | 277 | |
---|
238 | 278 | for (i = 0; ; i++) { |
---|
239 | 279 | /* |
---|
.. | .. |
---|
296 | 336 | * But as the TSC is per-logical CPU and can potentially be modified wrongly |
---|
297 | 337 | * by the bios, TSC sync test for smaller duration should be able |
---|
298 | 338 | * to catch such errors. Also this will catch the condition where all the |
---|
299 | | - * cores in the socket doesn't get reset at the same time. |
---|
| 339 | + * cores in the socket don't get reset at the same time. |
---|
300 | 340 | */ |
---|
301 | 341 | static inline unsigned int loop_timeout(int cpu) |
---|
302 | 342 | { |
---|
.. | .. |
---|
364 | 404 | /* Force it to 0 if random warps brought us here */ |
---|
365 | 405 | atomic_set(&test_runs, 0); |
---|
366 | 406 | |
---|
367 | | - pr_warning("TSC synchronization [CPU#%d -> CPU#%d]:\n", |
---|
| 407 | + pr_warn("TSC synchronization [CPU#%d -> CPU#%d]:\n", |
---|
368 | 408 | smp_processor_id(), cpu); |
---|
369 | | - pr_warning("Measured %Ld cycles TSC warp between CPUs, " |
---|
370 | | - "turning off TSC clock.\n", max_warp); |
---|
| 409 | + pr_warn("Measured %Ld cycles TSC warp between CPUs, " |
---|
| 410 | + "turning off TSC clock.\n", max_warp); |
---|
371 | 411 | if (random_warps) |
---|
372 | | - pr_warning("TSC warped randomly between CPUs\n"); |
---|
| 412 | + pr_warn("TSC warped randomly between CPUs\n"); |
---|
373 | 413 | mark_tsc_unstable("check_tsc_sync_source failed"); |
---|
374 | 414 | } |
---|
375 | 415 | |
---|