forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/arch/powerpc/kernel/time.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 * Common time routines among all ppc machines.
34 *
....@@ -24,11 +25,6 @@
2425 *
2526 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
2627 * "A Kernel Model for Precision Timekeeping" by Dave Mills
27
- *
28
- * This program is free software; you can redistribute it and/or
29
- * modify it under the terms of the GNU General Public License
30
- * as published by the Free Software Foundation; either version
31
- * 2 of the License, or (at your option) any later version.
3228 */
3329
3430 #include <linux/errno.h>
....@@ -43,7 +39,6 @@
4339 #include <linux/timex.h>
4440 #include <linux/kernel_stat.h>
4541 #include <linux/time.h>
46
-#include <linux/clockchips.h>
4742 #include <linux/init.h>
4843 #include <linux/profile.h>
4944 #include <linux/cpu.h>
....@@ -55,10 +50,10 @@
5550 #include <linux/irq.h>
5651 #include <linux/delay.h>
5752 #include <linux/irq_work.h>
58
-#include <linux/clk-provider.h>
53
+#include <linux/of_clk.h>
5954 #include <linux/suspend.h>
60
-#include <linux/rtc.h>
6155 #include <linux/sched/cputime.h>
56
+#include <linux/sched/clock.h>
6257 #include <linux/processor.h>
6358 #include <asm/trace.h>
6459
....@@ -81,15 +76,6 @@
8176 #include <linux/clockchips.h>
8277 #include <linux/timekeeper_internal.h>
8378
84
-static u64 rtc_read(struct clocksource *);
85
-static struct clocksource clocksource_rtc = {
86
- .name = "rtc",
87
- .rating = 400,
88
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
89
- .mask = CLOCKSOURCE_MASK(64),
90
- .read = rtc_read,
91
-};
92
-
9379 static u64 timebase_read(struct clocksource *);
9480 static struct clocksource clocksource_timebase = {
9581 .name = "timebase",
....@@ -111,6 +97,7 @@
11197 .rating = 200,
11298 .irq = 0,
11399 .set_next_event = decrementer_set_next_event,
100
+ .set_state_oneshot_stopped = decrementer_shutdown,
114101 .set_state_shutdown = decrementer_shutdown,
115102 .tick_resume = decrementer_shutdown,
116103 .features = CLOCK_EVT_FEAT_ONESHOT |
....@@ -151,6 +138,8 @@
151138 unsigned long ppc_tb_freq;
152139 EXPORT_SYMBOL_GPL(ppc_tb_freq);
153140
141
+bool tb_invalid;
142
+
154143 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
155144 /*
156145 * Factor for converting from cputime_t (timebase ticks) to
....@@ -175,7 +164,7 @@
175164 * Read the SPURR on systems that have it, otherwise the PURR,
176165 * or if that doesn't exist return the timebase value passed in.
177166 */
178
-static unsigned long read_spurr(unsigned long tb)
167
+static inline unsigned long read_spurr(unsigned long tb)
179168 {
180169 if (cpu_has_feature(CPU_FTR_SPURR))
181170 return mfspr(SPRN_SPURR);
....@@ -185,6 +174,8 @@
185174 }
186175
187176 #ifdef CONFIG_PPC_SPLPAR
177
+
178
+#include <asm/dtl.h>
188179
189180 /*
190181 * Scan the dispatch trace log and count up the stolen time.
....@@ -281,26 +272,17 @@
281272 * Account time for a transition between system, hard irq
282273 * or soft irq state.
283274 */
284
-static unsigned long vtime_delta(struct task_struct *tsk,
285
- unsigned long *stime_scaled,
286
- unsigned long *steal_time)
275
+static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct,
276
+ unsigned long now, unsigned long stime)
287277 {
288
- unsigned long now, nowscaled, deltascaled;
289
- unsigned long stime;
278
+ unsigned long stime_scaled = 0;
279
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
280
+ unsigned long nowscaled, deltascaled;
290281 unsigned long utime, utime_scaled;
291
- struct cpu_accounting_data *acct = get_accounting(tsk);
292282
293
- WARN_ON_ONCE(!irqs_disabled());
294
-
295
- now = mftb();
296283 nowscaled = read_spurr(now);
297
- stime = now - acct->starttime;
298
- acct->starttime = now;
299284 deltascaled = nowscaled - acct->startspurr;
300285 acct->startspurr = nowscaled;
301
-
302
- *steal_time = calculate_stolen_time(now);
303
-
304286 utime = acct->utime - acct->utime_sspurr;
305287 acct->utime_sspurr = acct->utime;
306288
....@@ -314,60 +296,124 @@
314296 * the user ticks get saved up in paca->user_time_scaled to be
315297 * used by account_process_tick.
316298 */
317
- *stime_scaled = stime;
299
+ stime_scaled = stime;
318300 utime_scaled = utime;
319301 if (deltascaled != stime + utime) {
320302 if (utime) {
321
- *stime_scaled = deltascaled * stime / (stime + utime);
322
- utime_scaled = deltascaled - *stime_scaled;
303
+ stime_scaled = deltascaled * stime / (stime + utime);
304
+ utime_scaled = deltascaled - stime_scaled;
323305 } else {
324
- *stime_scaled = deltascaled;
306
+ stime_scaled = deltascaled;
325307 }
326308 }
327309 acct->utime_scaled += utime_scaled;
310
+#endif
311
+
312
+ return stime_scaled;
313
+}
314
+
315
+static unsigned long vtime_delta(struct cpu_accounting_data *acct,
316
+ unsigned long *stime_scaled,
317
+ unsigned long *steal_time)
318
+{
319
+ unsigned long now, stime;
320
+
321
+ WARN_ON_ONCE(!irqs_disabled());
322
+
323
+ now = mftb();
324
+ stime = now - acct->starttime;
325
+ acct->starttime = now;
326
+
327
+ *stime_scaled = vtime_delta_scaled(acct, now, stime);
328
+
329
+ *steal_time = calculate_stolen_time(now);
328330
329331 return stime;
330332 }
331333
332
-void vtime_account_system(struct task_struct *tsk)
334
+static void vtime_delta_kernel(struct cpu_accounting_data *acct,
335
+ unsigned long *stime, unsigned long *stime_scaled)
333336 {
334
- unsigned long stime, stime_scaled, steal_time;
335
- struct cpu_accounting_data *acct = get_accounting(tsk);
337
+ unsigned long steal_time;
336338
337
- stime = vtime_delta(tsk, &stime_scaled, &steal_time);
338
-
339
- stime -= min(stime, steal_time);
339
+ *stime = vtime_delta(acct, stime_scaled, &steal_time);
340
+ *stime -= min(*stime, steal_time);
340341 acct->steal_time += steal_time;
342
+}
341343
342
- if ((tsk->flags & PF_VCPU) && !irq_count()) {
344
+void vtime_account_kernel(struct task_struct *tsk)
345
+{
346
+ struct cpu_accounting_data *acct = get_accounting(tsk);
347
+ unsigned long stime, stime_scaled;
348
+
349
+ vtime_delta_kernel(acct, &stime, &stime_scaled);
350
+
351
+ if (tsk->flags & PF_VCPU) {
343352 acct->gtime += stime;
353
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
344354 acct->utime_scaled += stime_scaled;
355
+#endif
345356 } else {
346
- if (hardirq_count())
347
- acct->hardirq_time += stime;
348
- else if (in_serving_softirq())
349
- acct->softirq_time += stime;
350
- else
351
- acct->stime += stime;
352
-
357
+ acct->stime += stime;
358
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
353359 acct->stime_scaled += stime_scaled;
360
+#endif
354361 }
355362 }
356
-EXPORT_SYMBOL_GPL(vtime_account_system);
363
+EXPORT_SYMBOL_GPL(vtime_account_kernel);
357364
358365 void vtime_account_idle(struct task_struct *tsk)
359366 {
360367 unsigned long stime, stime_scaled, steal_time;
361368 struct cpu_accounting_data *acct = get_accounting(tsk);
362369
363
- stime = vtime_delta(tsk, &stime_scaled, &steal_time);
370
+ stime = vtime_delta(acct, &stime_scaled, &steal_time);
364371 acct->idle_time += stime + steal_time;
372
+}
373
+
374
+static void vtime_account_irq_field(struct cpu_accounting_data *acct,
375
+ unsigned long *field)
376
+{
377
+ unsigned long stime, stime_scaled;
378
+
379
+ vtime_delta_kernel(acct, &stime, &stime_scaled);
380
+ *field += stime;
381
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
382
+ acct->stime_scaled += stime_scaled;
383
+#endif
384
+}
385
+
386
+void vtime_account_softirq(struct task_struct *tsk)
387
+{
388
+ struct cpu_accounting_data *acct = get_accounting(tsk);
389
+ vtime_account_irq_field(acct, &acct->softirq_time);
390
+}
391
+
392
+void vtime_account_hardirq(struct task_struct *tsk)
393
+{
394
+ struct cpu_accounting_data *acct = get_accounting(tsk);
395
+ vtime_account_irq_field(acct, &acct->hardirq_time);
396
+}
397
+
398
+static void vtime_flush_scaled(struct task_struct *tsk,
399
+ struct cpu_accounting_data *acct)
400
+{
401
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
402
+ if (acct->utime_scaled)
403
+ tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
404
+ if (acct->stime_scaled)
405
+ tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
406
+
407
+ acct->utime_scaled = 0;
408
+ acct->utime_sspurr = 0;
409
+ acct->stime_scaled = 0;
410
+#endif
365411 }
366412
367413 /*
368414 * Account the whole cputime accumulated in the paca
369415 * Must be called with interrupts disabled.
370
- * Assumes that vtime_account_system/idle() has been called
416
+ * Assumes that vtime_account_kernel/idle() has been called
371417 * recently (i.e. since the last entry from usermode) so that
372418 * get_paca()->user_time_scaled is up to date.
373419 */
....@@ -378,14 +424,13 @@
378424 if (acct->utime)
379425 account_user_time(tsk, cputime_to_nsecs(acct->utime));
380426
381
- if (acct->utime_scaled)
382
- tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
383
-
384427 if (acct->gtime)
385428 account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
386429
387
- if (acct->steal_time)
430
+ if (IS_ENABLED(CONFIG_PPC_SPLPAR) && acct->steal_time) {
388431 account_steal_time(cputime_to_nsecs(acct->steal_time));
432
+ acct->steal_time = 0;
433
+ }
389434
390435 if (acct->idle_time)
391436 account_idle_time(cputime_to_nsecs(acct->idle_time));
....@@ -393,8 +438,6 @@
393438 if (acct->stime)
394439 account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
395440 CPUTIME_SYSTEM);
396
- if (acct->stime_scaled)
397
- tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
398441
399442 if (acct->hardirq_time)
400443 account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
....@@ -403,14 +446,12 @@
403446 account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
404447 CPUTIME_SOFTIRQ);
405448
449
+ vtime_flush_scaled(tsk, acct);
450
+
406451 acct->utime = 0;
407
- acct->utime_scaled = 0;
408
- acct->utime_sspurr = 0;
409452 acct->gtime = 0;
410
- acct->steal_time = 0;
411453 acct->idle_time = 0;
412454 acct->stime = 0;
413
- acct->stime_scaled = 0;
414455 acct->hardirq_time = 0;
415456 acct->softirq_time = 0;
416457 }
....@@ -422,21 +463,18 @@
422463 void __delay(unsigned long loops)
423464 {
424465 unsigned long start;
425
- int diff;
426466
427467 spin_begin();
428
- if (__USE_RTC()) {
429
- start = get_rtcl();
430
- do {
431
- /* the RTCL register wraps at 1000000000 */
432
- diff = get_rtcl() - start;
433
- if (diff < 0)
434
- diff += 1000000000;
435
- spin_cpu_relax();
436
- } while (diff < loops);
468
+ if (tb_invalid) {
469
+ /*
470
+ * TB is in error state and isn't ticking anymore.
471
+ * HMI handler was unable to recover from TB error.
472
+ * Return immediately, so that kernel won't get stuck here.
473
+ */
474
+ spin_cpu_relax();
437475 } else {
438
- start = get_tbl();
439
- while (get_tbl() - start < loops)
476
+ start = mftb();
477
+ while (mftb() - start < loops)
440478 spin_cpu_relax();
441479 }
442480 spin_end();
....@@ -539,14 +577,11 @@
539577 struct pt_regs *old_regs;
540578 u64 now;
541579
542
- /* Some implementations of hotplug will get timer interrupts while
543
- * offline, just ignore these and we also need to set
544
- * decrementers_next_tb as MAX to make sure __check_irq_replay
545
- * don't replay timer interrupt when return, otherwise we'll trap
546
- * here infinitely :(
580
+ /*
581
+ * Some implementations of hotplug will get timer interrupts while
582
+ * offline, just ignore these.
547583 */
548584 if (unlikely(!cpu_online(smp_processor_id()))) {
549
- *next_tb = ~(u64)0;
550585 set_dec(decrementer_max);
551586 return;
552587 }
....@@ -582,7 +617,7 @@
582617 irq_work_run();
583618 }
584619
585
- now = get_tb_or_rtc();
620
+ now = get_tb();
586621 if (now >= *next_tb) {
587622 *next_tb = ~(u64)0;
588623 if (evt->event_handler)
....@@ -614,15 +649,6 @@
614649 __this_cpu_inc(irq_stat.broadcast_irqs_event);
615650 }
616651 #endif
617
-
618
-/*
619
- * Hypervisor decrementer interrupts shouldn't occur but are sometimes
620
- * left pending on exit from a KVM guest. We don't need to do anything
621
- * to clear them, as they are edge-triggered.
622
- */
623
-void hdec_interrupt(struct pt_regs *regs)
624
-{
625
-}
626652
627653 #ifdef CONFIG_SUSPEND
628654 static void generic_suspend_disable_irqs(void)
....@@ -673,8 +699,6 @@
673699 */
674700 notrace unsigned long long sched_clock(void)
675701 {
676
- if (__USE_RTC())
677
- return get_rtc();
678702 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
679703 }
680704
....@@ -824,11 +848,6 @@
824848 }
825849
826850 /* clocksource code */
827
-static notrace u64 rtc_read(struct clocksource *cs)
828
-{
829
- return (u64)get_rtc();
830
-}
831
-
832851 static notrace u64 timebase_read(struct clocksource *cs)
833852 {
834853 return (u64)get_tb();
....@@ -837,7 +856,7 @@
837856
838857 void update_vsyscall(struct timekeeper *tk)
839858 {
840
- struct timespec xt;
859
+ struct timespec64 xt;
841860 struct clocksource *clock = tk->tkr_mono.clock;
842861 u32 mult = tk->tkr_mono.mult;
843862 u32 shift = tk->tkr_mono.shift;
....@@ -909,7 +928,8 @@
909928 vdso_data->tb_to_xs = new_tb_to_xs;
910929 vdso_data->wtom_clock_sec = tk->wall_to_monotonic.tv_sec;
911930 vdso_data->wtom_clock_nsec = tk->wall_to_monotonic.tv_nsec;
912
- vdso_data->stamp_xtime = xt;
931
+ vdso_data->stamp_xtime_sec = xt.tv_sec;
932
+ vdso_data->stamp_xtime_nsec = xt.tv_nsec;
913933 vdso_data->stamp_sec_fraction = frac_sec;
914934 vdso_data->hrtimer_res = hrtimer_resolution;
915935 smp_wmb();
....@@ -924,12 +944,7 @@
924944
925945 static void __init clocksource_init(void)
926946 {
927
- struct clocksource *clock;
928
-
929
- if (__USE_RTC())
930
- clock = &clocksource_rtc;
931
- else
932
- clock = &clocksource_timebase;
947
+ struct clocksource *clock = &clocksource_timebase;
933948
934949 if (clocksource_register_hz(clock, tb_ticks_per_sec)) {
935950 printk(KERN_ERR "clocksource: %s is already registered\n",
....@@ -944,7 +959,7 @@
944959 static int decrementer_set_next_event(unsigned long evt,
945960 struct clock_event_device *dev)
946961 {
947
- __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt);
962
+ __this_cpu_write(decrementers_next_tb, get_tb() + evt);
948963 set_dec(evt);
949964
950965 /* We may have raced with new irq work */
....@@ -1047,17 +1062,12 @@
10471062 u64 scale;
10481063 unsigned shift;
10491064
1050
- if (__USE_RTC()) {
1051
- /* 601 processor: dec counts down by 128 every 128ns */
1052
- ppc_tb_freq = 1000000000;
1053
- } else {
1054
- /* Normal PowerPC with timebase register */
1055
- ppc_md.calibrate_decr();
1056
- printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
1057
- ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
1058
- printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
1059
- ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
1060
- }
1065
+ /* Normal PowerPC with timebase register */
1066
+ ppc_md.calibrate_decr();
1067
+ printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
1068
+ ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
1069
+ printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
1070
+ ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
10611071
10621072 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
10631073 tb_ticks_per_sec = ppc_tb_freq;
....@@ -1083,7 +1093,7 @@
10831093 tb_to_ns_scale = scale;
10841094 tb_to_ns_shift = shift;
10851095 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
1086
- boot_tb = get_tb_or_rtc();
1096
+ boot_tb = get_tb();
10871097
10881098 /* If platform provided a timezone (pmac), we correct the time */
10891099 if (timezone_offset) {
....@@ -1109,9 +1119,8 @@
11091119 init_decrementer_clockevent();
11101120 tick_setup_hrtimer_broadcast();
11111121
1112
-#ifdef CONFIG_COMMON_CLK
11131122 of_clk_init(NULL);
1114
-#endif
1123
+ enable_sched_clock_irqtime();
11151124 }
11161125
11171126 /*