| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-or-later |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Common time routines among all ppc machines. |
|---|
| 3 | 4 | * |
|---|
| .. | .. |
|---|
| 24 | 25 | * |
|---|
| 25 | 26 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 |
|---|
| 26 | 27 | * "A Kernel Model for Precision Timekeeping" by Dave Mills |
|---|
| 27 | | - * |
|---|
| 28 | | - * This program is free software; you can redistribute it and/or |
|---|
| 29 | | - * modify it under the terms of the GNU General Public License |
|---|
| 30 | | - * as published by the Free Software Foundation; either version |
|---|
| 31 | | - * 2 of the License, or (at your option) any later version. |
|---|
| 32 | 28 | */ |
|---|
| 33 | 29 | |
|---|
| 34 | 30 | #include <linux/errno.h> |
|---|
| .. | .. |
|---|
| 43 | 39 | #include <linux/timex.h> |
|---|
| 44 | 40 | #include <linux/kernel_stat.h> |
|---|
| 45 | 41 | #include <linux/time.h> |
|---|
| 46 | | -#include <linux/clockchips.h> |
|---|
| 47 | 42 | #include <linux/init.h> |
|---|
| 48 | 43 | #include <linux/profile.h> |
|---|
| 49 | 44 | #include <linux/cpu.h> |
|---|
| .. | .. |
|---|
| 55 | 50 | #include <linux/irq.h> |
|---|
| 56 | 51 | #include <linux/delay.h> |
|---|
| 57 | 52 | #include <linux/irq_work.h> |
|---|
| 58 | | -#include <linux/clk-provider.h> |
|---|
| 53 | +#include <linux/of_clk.h> |
|---|
| 59 | 54 | #include <linux/suspend.h> |
|---|
| 60 | | -#include <linux/rtc.h> |
|---|
| 61 | 55 | #include <linux/sched/cputime.h> |
|---|
| 56 | +#include <linux/sched/clock.h> |
|---|
| 62 | 57 | #include <linux/processor.h> |
|---|
| 63 | 58 | #include <asm/trace.h> |
|---|
| 64 | 59 | |
|---|
| .. | .. |
|---|
| 81 | 76 | #include <linux/clockchips.h> |
|---|
| 82 | 77 | #include <linux/timekeeper_internal.h> |
|---|
| 83 | 78 | |
|---|
| 84 | | -static u64 rtc_read(struct clocksource *); |
|---|
| 85 | | -static struct clocksource clocksource_rtc = { |
|---|
| 86 | | - .name = "rtc", |
|---|
| 87 | | - .rating = 400, |
|---|
| 88 | | - .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
|---|
| 89 | | - .mask = CLOCKSOURCE_MASK(64), |
|---|
| 90 | | - .read = rtc_read, |
|---|
| 91 | | -}; |
|---|
| 92 | | - |
|---|
| 93 | 79 | static u64 timebase_read(struct clocksource *); |
|---|
| 94 | 80 | static struct clocksource clocksource_timebase = { |
|---|
| 95 | 81 | .name = "timebase", |
|---|
| .. | .. |
|---|
| 111 | 97 | .rating = 200, |
|---|
| 112 | 98 | .irq = 0, |
|---|
| 113 | 99 | .set_next_event = decrementer_set_next_event, |
|---|
| 100 | + .set_state_oneshot_stopped = decrementer_shutdown, |
|---|
| 114 | 101 | .set_state_shutdown = decrementer_shutdown, |
|---|
| 115 | 102 | .tick_resume = decrementer_shutdown, |
|---|
| 116 | 103 | .features = CLOCK_EVT_FEAT_ONESHOT | |
|---|
| .. | .. |
|---|
| 151 | 138 | unsigned long ppc_tb_freq; |
|---|
| 152 | 139 | EXPORT_SYMBOL_GPL(ppc_tb_freq); |
|---|
| 153 | 140 | |
|---|
| 141 | +bool tb_invalid; |
|---|
| 142 | + |
|---|
| 154 | 143 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
|---|
| 155 | 144 | /* |
|---|
| 156 | 145 | * Factor for converting from cputime_t (timebase ticks) to |
|---|
| .. | .. |
|---|
| 175 | 164 | * Read the SPURR on systems that have it, otherwise the PURR, |
|---|
| 176 | 165 | * or if that doesn't exist return the timebase value passed in. |
|---|
| 177 | 166 | */ |
|---|
| 178 | | -static unsigned long read_spurr(unsigned long tb) |
|---|
| 167 | +static inline unsigned long read_spurr(unsigned long tb) |
|---|
| 179 | 168 | { |
|---|
| 180 | 169 | if (cpu_has_feature(CPU_FTR_SPURR)) |
|---|
| 181 | 170 | return mfspr(SPRN_SPURR); |
|---|
| .. | .. |
|---|
| 185 | 174 | } |
|---|
| 186 | 175 | |
|---|
| 187 | 176 | #ifdef CONFIG_PPC_SPLPAR |
|---|
| 177 | + |
|---|
| 178 | +#include <asm/dtl.h> |
|---|
| 188 | 179 | |
|---|
| 189 | 180 | /* |
|---|
| 190 | 181 | * Scan the dispatch trace log and count up the stolen time. |
|---|
| .. | .. |
|---|
| 281 | 272 | * Account time for a transition between system, hard irq |
|---|
| 282 | 273 | * or soft irq state. |
|---|
| 283 | 274 | */ |
|---|
| 284 | | -static unsigned long vtime_delta(struct task_struct *tsk, |
|---|
| 285 | | - unsigned long *stime_scaled, |
|---|
| 286 | | - unsigned long *steal_time) |
|---|
| 275 | +static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct, |
|---|
| 276 | + unsigned long now, unsigned long stime) |
|---|
| 287 | 277 | { |
|---|
| 288 | | - unsigned long now, nowscaled, deltascaled; |
|---|
| 289 | | - unsigned long stime; |
|---|
| 278 | + unsigned long stime_scaled = 0; |
|---|
| 279 | +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME |
|---|
| 280 | + unsigned long nowscaled, deltascaled; |
|---|
| 290 | 281 | unsigned long utime, utime_scaled; |
|---|
| 291 | | - struct cpu_accounting_data *acct = get_accounting(tsk); |
|---|
| 292 | 282 | |
|---|
| 293 | | - WARN_ON_ONCE(!irqs_disabled()); |
|---|
| 294 | | - |
|---|
| 295 | | - now = mftb(); |
|---|
| 296 | 283 | nowscaled = read_spurr(now); |
|---|
| 297 | | - stime = now - acct->starttime; |
|---|
| 298 | | - acct->starttime = now; |
|---|
| 299 | 284 | deltascaled = nowscaled - acct->startspurr; |
|---|
| 300 | 285 | acct->startspurr = nowscaled; |
|---|
| 301 | | - |
|---|
| 302 | | - *steal_time = calculate_stolen_time(now); |
|---|
| 303 | | - |
|---|
| 304 | 286 | utime = acct->utime - acct->utime_sspurr; |
|---|
| 305 | 287 | acct->utime_sspurr = acct->utime; |
|---|
| 306 | 288 | |
|---|
| .. | .. |
|---|
| 314 | 296 | * the user ticks get saved up in paca->user_time_scaled to be |
|---|
| 315 | 297 | * used by account_process_tick. |
|---|
| 316 | 298 | */ |
|---|
| 317 | | - *stime_scaled = stime; |
|---|
| 299 | + stime_scaled = stime; |
|---|
| 318 | 300 | utime_scaled = utime; |
|---|
| 319 | 301 | if (deltascaled != stime + utime) { |
|---|
| 320 | 302 | if (utime) { |
|---|
| 321 | | - *stime_scaled = deltascaled * stime / (stime + utime); |
|---|
| 322 | | - utime_scaled = deltascaled - *stime_scaled; |
|---|
| 303 | + stime_scaled = deltascaled * stime / (stime + utime); |
|---|
| 304 | + utime_scaled = deltascaled - stime_scaled; |
|---|
| 323 | 305 | } else { |
|---|
| 324 | | - *stime_scaled = deltascaled; |
|---|
| 306 | + stime_scaled = deltascaled; |
|---|
| 325 | 307 | } |
|---|
| 326 | 308 | } |
|---|
| 327 | 309 | acct->utime_scaled += utime_scaled; |
|---|
| 310 | +#endif |
|---|
| 311 | + |
|---|
| 312 | + return stime_scaled; |
|---|
| 313 | +} |
|---|
| 314 | + |
|---|
| 315 | +static unsigned long vtime_delta(struct task_struct *tsk, |
|---|
| 316 | + unsigned long *stime_scaled, |
|---|
| 317 | + unsigned long *steal_time) |
|---|
| 318 | +{ |
|---|
| 319 | + unsigned long now, stime; |
|---|
| 320 | + struct cpu_accounting_data *acct = get_accounting(tsk); |
|---|
| 321 | + |
|---|
| 322 | + WARN_ON_ONCE(!irqs_disabled()); |
|---|
| 323 | + |
|---|
| 324 | + now = mftb(); |
|---|
| 325 | + stime = now - acct->starttime; |
|---|
| 326 | + acct->starttime = now; |
|---|
| 327 | + |
|---|
| 328 | + *stime_scaled = vtime_delta_scaled(acct, now, stime); |
|---|
| 329 | + |
|---|
| 330 | + *steal_time = calculate_stolen_time(now); |
|---|
| 328 | 331 | |
|---|
| 329 | 332 | return stime; |
|---|
| 330 | 333 | } |
|---|
| 331 | 334 | |
|---|
| 332 | | -void vtime_account_system(struct task_struct *tsk) |
|---|
| 335 | +void vtime_account_kernel(struct task_struct *tsk) |
|---|
| 333 | 336 | { |
|---|
| 334 | 337 | unsigned long stime, stime_scaled, steal_time; |
|---|
| 335 | 338 | struct cpu_accounting_data *acct = get_accounting(tsk); |
|---|
| .. | .. |
|---|
| 341 | 344 | |
|---|
| 342 | 345 | if ((tsk->flags & PF_VCPU) && !irq_count()) { |
|---|
| 343 | 346 | acct->gtime += stime; |
|---|
| 347 | +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME |
|---|
| 344 | 348 | acct->utime_scaled += stime_scaled; |
|---|
| 349 | +#endif |
|---|
| 345 | 350 | } else { |
|---|
| 346 | 351 | if (hardirq_count()) |
|---|
| 347 | 352 | acct->hardirq_time += stime; |
|---|
| .. | .. |
|---|
| 350 | 355 | else |
|---|
| 351 | 356 | acct->stime += stime; |
|---|
| 352 | 357 | |
|---|
| 358 | +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME |
|---|
| 353 | 359 | acct->stime_scaled += stime_scaled; |
|---|
| 360 | +#endif |
|---|
| 354 | 361 | } |
|---|
| 355 | 362 | } |
|---|
| 356 | | -EXPORT_SYMBOL_GPL(vtime_account_system); |
|---|
| 363 | +EXPORT_SYMBOL_GPL(vtime_account_kernel); |
|---|
| 357 | 364 | |
|---|
| 358 | 365 | void vtime_account_idle(struct task_struct *tsk) |
|---|
| 359 | 366 | { |
|---|
| .. | .. |
|---|
| 364 | 371 | acct->idle_time += stime + steal_time; |
|---|
| 365 | 372 | } |
|---|
| 366 | 373 | |
|---|
| 374 | +static void vtime_flush_scaled(struct task_struct *tsk, |
|---|
| 375 | + struct cpu_accounting_data *acct) |
|---|
| 376 | +{ |
|---|
| 377 | +#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME |
|---|
| 378 | + if (acct->utime_scaled) |
|---|
| 379 | + tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled); |
|---|
| 380 | + if (acct->stime_scaled) |
|---|
| 381 | + tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled); |
|---|
| 382 | + |
|---|
| 383 | + acct->utime_scaled = 0; |
|---|
| 384 | + acct->utime_sspurr = 0; |
|---|
| 385 | + acct->stime_scaled = 0; |
|---|
| 386 | +#endif |
|---|
| 387 | +} |
|---|
| 388 | + |
|---|
| 367 | 389 | /* |
|---|
| 368 | 390 | * Account the whole cputime accumulated in the paca |
|---|
| 369 | 391 | * Must be called with interrupts disabled. |
|---|
| 370 | | - * Assumes that vtime_account_system/idle() has been called |
|---|
| 392 | + * Assumes that vtime_account_kernel/idle() has been called |
|---|
| 371 | 393 | * recently (i.e. since the last entry from usermode) so that |
|---|
| 372 | 394 | * get_paca()->user_time_scaled is up to date. |
|---|
| 373 | 395 | */ |
|---|
| .. | .. |
|---|
| 378 | 400 | if (acct->utime) |
|---|
| 379 | 401 | account_user_time(tsk, cputime_to_nsecs(acct->utime)); |
|---|
| 380 | 402 | |
|---|
| 381 | | - if (acct->utime_scaled) |
|---|
| 382 | | - tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled); |
|---|
| 383 | | - |
|---|
| 384 | 403 | if (acct->gtime) |
|---|
| 385 | 404 | account_guest_time(tsk, cputime_to_nsecs(acct->gtime)); |
|---|
| 386 | 405 | |
|---|
| 387 | | - if (acct->steal_time) |
|---|
| 406 | + if (IS_ENABLED(CONFIG_PPC_SPLPAR) && acct->steal_time) { |
|---|
| 388 | 407 | account_steal_time(cputime_to_nsecs(acct->steal_time)); |
|---|
| 408 | + acct->steal_time = 0; |
|---|
| 409 | + } |
|---|
| 389 | 410 | |
|---|
| 390 | 411 | if (acct->idle_time) |
|---|
| 391 | 412 | account_idle_time(cputime_to_nsecs(acct->idle_time)); |
|---|
| .. | .. |
|---|
| 393 | 414 | if (acct->stime) |
|---|
| 394 | 415 | account_system_index_time(tsk, cputime_to_nsecs(acct->stime), |
|---|
| 395 | 416 | CPUTIME_SYSTEM); |
|---|
| 396 | | - if (acct->stime_scaled) |
|---|
| 397 | | - tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled); |
|---|
| 398 | 417 | |
|---|
| 399 | 418 | if (acct->hardirq_time) |
|---|
| 400 | 419 | account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time), |
|---|
| .. | .. |
|---|
| 403 | 422 | account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time), |
|---|
| 404 | 423 | CPUTIME_SOFTIRQ); |
|---|
| 405 | 424 | |
|---|
| 425 | + vtime_flush_scaled(tsk, acct); |
|---|
| 426 | + |
|---|
| 406 | 427 | acct->utime = 0; |
|---|
| 407 | | - acct->utime_scaled = 0; |
|---|
| 408 | | - acct->utime_sspurr = 0; |
|---|
| 409 | 428 | acct->gtime = 0; |
|---|
| 410 | | - acct->steal_time = 0; |
|---|
| 411 | 429 | acct->idle_time = 0; |
|---|
| 412 | 430 | acct->stime = 0; |
|---|
| 413 | | - acct->stime_scaled = 0; |
|---|
| 414 | 431 | acct->hardirq_time = 0; |
|---|
| 415 | 432 | acct->softirq_time = 0; |
|---|
| 416 | 433 | } |
|---|
| .. | .. |
|---|
| 419 | 436 | #define calc_cputime_factors() |
|---|
| 420 | 437 | #endif |
|---|
| 421 | 438 | |
|---|
| 422 | | -void __delay(unsigned long loops) |
|---|
| 439 | +void __no_kcsan __delay(unsigned long loops) |
|---|
| 423 | 440 | { |
|---|
| 424 | 441 | unsigned long start; |
|---|
| 425 | | - int diff; |
|---|
| 426 | 442 | |
|---|
| 427 | 443 | spin_begin(); |
|---|
| 428 | | - if (__USE_RTC()) { |
|---|
| 429 | | - start = get_rtcl(); |
|---|
| 430 | | - do { |
|---|
| 431 | | - /* the RTCL register wraps at 1000000000 */ |
|---|
| 432 | | - diff = get_rtcl() - start; |
|---|
| 433 | | - if (diff < 0) |
|---|
| 434 | | - diff += 1000000000; |
|---|
| 435 | | - spin_cpu_relax(); |
|---|
| 436 | | - } while (diff < loops); |
|---|
| 444 | + if (tb_invalid) { |
|---|
| 445 | + /* |
|---|
| 446 | + * TB is in error state and isn't ticking anymore. |
|---|
| 447 | + * HMI handler was unable to recover from TB error. |
|---|
| 448 | + * Return immediately, so that kernel won't get stuck here. |
|---|
| 449 | + */ |
|---|
| 450 | + spin_cpu_relax(); |
|---|
| 437 | 451 | } else { |
|---|
| 438 | | - start = get_tbl(); |
|---|
| 439 | | - while (get_tbl() - start < loops) |
|---|
| 452 | + start = mftb(); |
|---|
| 453 | + while (mftb() - start < loops) |
|---|
| 440 | 454 | spin_cpu_relax(); |
|---|
| 441 | 455 | } |
|---|
| 442 | 456 | spin_end(); |
|---|
| 443 | 457 | } |
|---|
| 444 | 458 | EXPORT_SYMBOL(__delay); |
|---|
| 445 | 459 | |
|---|
| 446 | | -void udelay(unsigned long usecs) |
|---|
| 460 | +void __no_kcsan udelay(unsigned long usecs) |
|---|
| 447 | 461 | { |
|---|
| 448 | 462 | __delay(tb_ticks_per_usec * usecs); |
|---|
| 449 | 463 | } |
|---|
| .. | .. |
|---|
| 539 | 553 | struct pt_regs *old_regs; |
|---|
| 540 | 554 | u64 now; |
|---|
| 541 | 555 | |
|---|
| 542 | | - /* Some implementations of hotplug will get timer interrupts while |
|---|
| 543 | | - * offline, just ignore these and we also need to set |
|---|
| 544 | | - * decrementers_next_tb as MAX to make sure __check_irq_replay |
|---|
| 545 | | - * don't replay timer interrupt when return, otherwise we'll trap |
|---|
| 546 | | - * here infinitely :( |
|---|
| 556 | + /* |
|---|
| 557 | + * Some implementations of hotplug will get timer interrupts while |
|---|
| 558 | + * offline, just ignore these. |
|---|
| 547 | 559 | */ |
|---|
| 548 | 560 | if (unlikely(!cpu_online(smp_processor_id()))) { |
|---|
| 549 | | - *next_tb = ~(u64)0; |
|---|
| 550 | 561 | set_dec(decrementer_max); |
|---|
| 551 | 562 | return; |
|---|
| 552 | 563 | } |
|---|
| .. | .. |
|---|
| 582 | 593 | irq_work_run(); |
|---|
| 583 | 594 | } |
|---|
| 584 | 595 | |
|---|
| 585 | | - now = get_tb_or_rtc(); |
|---|
| 596 | + now = get_tb(); |
|---|
| 586 | 597 | if (now >= *next_tb) { |
|---|
| 587 | 598 | *next_tb = ~(u64)0; |
|---|
| 588 | 599 | if (evt->event_handler) |
|---|
| .. | .. |
|---|
| 614 | 625 | __this_cpu_inc(irq_stat.broadcast_irqs_event); |
|---|
| 615 | 626 | } |
|---|
| 616 | 627 | #endif |
|---|
| 617 | | - |
|---|
| 618 | | -/* |
|---|
| 619 | | - * Hypervisor decrementer interrupts shouldn't occur but are sometimes |
|---|
| 620 | | - * left pending on exit from a KVM guest. We don't need to do anything |
|---|
| 621 | | - * to clear them, as they are edge-triggered. |
|---|
| 622 | | - */ |
|---|
| 623 | | -void hdec_interrupt(struct pt_regs *regs) |
|---|
| 624 | | -{ |
|---|
| 625 | | -} |
|---|
| 626 | 628 | |
|---|
| 627 | 629 | #ifdef CONFIG_SUSPEND |
|---|
| 628 | 630 | static void generic_suspend_disable_irqs(void) |
|---|
| .. | .. |
|---|
| 673 | 675 | */ |
|---|
| 674 | 676 | notrace unsigned long long sched_clock(void) |
|---|
| 675 | 677 | { |
|---|
| 676 | | - if (__USE_RTC()) |
|---|
| 677 | | - return get_rtc(); |
|---|
| 678 | 678 | return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift; |
|---|
| 679 | 679 | } |
|---|
| 680 | 680 | |
|---|
| .. | .. |
|---|
| 824 | 824 | } |
|---|
| 825 | 825 | |
|---|
| 826 | 826 | /* clocksource code */ |
|---|
| 827 | | -static notrace u64 rtc_read(struct clocksource *cs) |
|---|
| 828 | | -{ |
|---|
| 829 | | - return (u64)get_rtc(); |
|---|
| 830 | | -} |
|---|
| 831 | | - |
|---|
| 832 | 827 | static notrace u64 timebase_read(struct clocksource *cs) |
|---|
| 833 | 828 | { |
|---|
| 834 | 829 | return (u64)get_tb(); |
|---|
| .. | .. |
|---|
| 837 | 832 | |
|---|
| 838 | 833 | void update_vsyscall(struct timekeeper *tk) |
|---|
| 839 | 834 | { |
|---|
| 840 | | - struct timespec xt; |
|---|
| 835 | + struct timespec64 xt; |
|---|
| 841 | 836 | struct clocksource *clock = tk->tkr_mono.clock; |
|---|
| 842 | 837 | u32 mult = tk->tkr_mono.mult; |
|---|
| 843 | 838 | u32 shift = tk->tkr_mono.shift; |
|---|
| .. | .. |
|---|
| 909 | 904 | vdso_data->tb_to_xs = new_tb_to_xs; |
|---|
| 910 | 905 | vdso_data->wtom_clock_sec = tk->wall_to_monotonic.tv_sec; |
|---|
| 911 | 906 | vdso_data->wtom_clock_nsec = tk->wall_to_monotonic.tv_nsec; |
|---|
| 912 | | - vdso_data->stamp_xtime = xt; |
|---|
| 907 | + vdso_data->stamp_xtime_sec = xt.tv_sec; |
|---|
| 908 | + vdso_data->stamp_xtime_nsec = xt.tv_nsec; |
|---|
| 913 | 909 | vdso_data->stamp_sec_fraction = frac_sec; |
|---|
| 914 | 910 | vdso_data->hrtimer_res = hrtimer_resolution; |
|---|
| 915 | 911 | smp_wmb(); |
|---|
| .. | .. |
|---|
| 924 | 920 | |
|---|
| 925 | 921 | static void __init clocksource_init(void) |
|---|
| 926 | 922 | { |
|---|
| 927 | | - struct clocksource *clock; |
|---|
| 928 | | - |
|---|
| 929 | | - if (__USE_RTC()) |
|---|
| 930 | | - clock = &clocksource_rtc; |
|---|
| 931 | | - else |
|---|
| 932 | | - clock = &clocksource_timebase; |
|---|
| 923 | + struct clocksource *clock = &clocksource_timebase; |
|---|
| 933 | 924 | |
|---|
| 934 | 925 | if (clocksource_register_hz(clock, tb_ticks_per_sec)) { |
|---|
| 935 | 926 | printk(KERN_ERR "clocksource: %s is already registered\n", |
|---|
| .. | .. |
|---|
| 944 | 935 | static int decrementer_set_next_event(unsigned long evt, |
|---|
| 945 | 936 | struct clock_event_device *dev) |
|---|
| 946 | 937 | { |
|---|
| 947 | | - __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt); |
|---|
| 938 | + __this_cpu_write(decrementers_next_tb, get_tb() + evt); |
|---|
| 948 | 939 | set_dec(evt); |
|---|
| 949 | 940 | |
|---|
| 950 | 941 | /* We may have raced with new irq work */ |
|---|
| .. | .. |
|---|
| 1047 | 1038 | u64 scale; |
|---|
| 1048 | 1039 | unsigned shift; |
|---|
| 1049 | 1040 | |
|---|
| 1050 | | - if (__USE_RTC()) { |
|---|
| 1051 | | - /* 601 processor: dec counts down by 128 every 128ns */ |
|---|
| 1052 | | - ppc_tb_freq = 1000000000; |
|---|
| 1053 | | - } else { |
|---|
| 1054 | | - /* Normal PowerPC with timebase register */ |
|---|
| 1055 | | - ppc_md.calibrate_decr(); |
|---|
| 1056 | | - printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n", |
|---|
| 1057 | | - ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); |
|---|
| 1058 | | - printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", |
|---|
| 1059 | | - ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); |
|---|
| 1060 | | - } |
|---|
| 1041 | + /* Normal PowerPC with timebase register */ |
|---|
| 1042 | + ppc_md.calibrate_decr(); |
|---|
| 1043 | + printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n", |
|---|
| 1044 | + ppc_tb_freq / 1000000, ppc_tb_freq % 1000000); |
|---|
| 1045 | + printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n", |
|---|
| 1046 | + ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); |
|---|
| 1061 | 1047 | |
|---|
| 1062 | 1048 | tb_ticks_per_jiffy = ppc_tb_freq / HZ; |
|---|
| 1063 | 1049 | tb_ticks_per_sec = ppc_tb_freq; |
|---|
| .. | .. |
|---|
| 1083 | 1069 | tb_to_ns_scale = scale; |
|---|
| 1084 | 1070 | tb_to_ns_shift = shift; |
|---|
| 1085 | 1071 | /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */ |
|---|
| 1086 | | - boot_tb = get_tb_or_rtc(); |
|---|
| 1072 | + boot_tb = get_tb(); |
|---|
| 1087 | 1073 | |
|---|
| 1088 | 1074 | /* If platform provided a timezone (pmac), we correct the time */ |
|---|
| 1089 | 1075 | if (timezone_offset) { |
|---|
| .. | .. |
|---|
| 1109 | 1095 | init_decrementer_clockevent(); |
|---|
| 1110 | 1096 | tick_setup_hrtimer_broadcast(); |
|---|
| 1111 | 1097 | |
|---|
| 1112 | | -#ifdef CONFIG_COMMON_CLK |
|---|
| 1113 | 1098 | of_clk_init(NULL); |
|---|
| 1114 | | -#endif |
|---|
| 1099 | + enable_sched_clock_irqtime(); |
|---|
| 1115 | 1100 | } |
|---|
| 1116 | 1101 | |
|---|
| 1117 | 1102 | /* |
|---|