hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/s390/kernel/time.c
....@@ -41,6 +41,9 @@
4141 #include <linux/gfp.h>
4242 #include <linux/kprobes.h>
4343 #include <linux/uaccess.h>
44
+#include <vdso/vsyscall.h>
45
+#include <vdso/clocksource.h>
46
+#include <vdso/helpers.h>
4447 #include <asm/facility.h>
4548 #include <asm/delay.h>
4649 #include <asm/div64.h>
....@@ -84,7 +87,7 @@
8487
8588 /* Initialize TOD steering parameters */
8689 tod_steering_end = *(unsigned long long *) &tod_clock_base[1];
87
- vdso_data->ts_end = tod_steering_end;
90
+ vdso_data->arch_data.tod_steering_end = tod_steering_end;
8891
8992 if (!test_facility(28))
9093 return;
....@@ -109,15 +112,6 @@
109112 return tod_to_ns(get_tod_clock_monotonic());
110113 }
111114 NOKPROBE_SYMBOL(sched_clock);
112
-
113
-/*
114
- * Monotonic_clock - returns # of nanoseconds passed since time_init()
115
- */
116
-unsigned long long monotonic_clock(void)
117
-{
118
- return sched_clock();
119
-}
120
-EXPORT_SYMBOL(monotonic_clock);
121115
122116 static void ext_to_timespec64(unsigned char *clk, struct timespec64 *xt)
123117 {
....@@ -246,7 +240,7 @@
246240 preempt_disable(); /* protect from changes to steering parameters */
247241 now = get_tod_clock();
248242 adj = tod_steering_end - now;
249
- if (unlikely((s64) adj >= 0))
243
+ if (unlikely((s64) adj > 0))
250244 /*
251245 * manually steer by 1 cycle every 2^16 cycles. This
252246 * corresponds to shifting the tod delta by 15. 1s is
....@@ -262,65 +256,16 @@
262256 .name = "tod",
263257 .rating = 400,
264258 .read = read_tod_clock,
265
- .mask = -1ULL,
259
+ .mask = CLOCKSOURCE_MASK(64),
266260 .mult = 1000,
267261 .shift = 12,
268262 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
263
+ .vdso_clock_mode = VDSO_CLOCKMODE_TOD,
269264 };
270265
271266 struct clocksource * __init clocksource_default_clock(void)
272267 {
273268 return &clocksource_tod;
274
-}
275
-
276
-void update_vsyscall(struct timekeeper *tk)
277
-{
278
- u64 nsecps;
279
-
280
- if (tk->tkr_mono.clock != &clocksource_tod)
281
- return;
282
-
283
- /* Make userspace gettimeofday spin until we're done. */
284
- ++vdso_data->tb_update_count;
285
- smp_wmb();
286
- vdso_data->xtime_tod_stamp = tk->tkr_mono.cycle_last;
287
- vdso_data->xtime_clock_sec = tk->xtime_sec;
288
- vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
289
- vdso_data->wtom_clock_sec =
290
- tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
291
- vdso_data->wtom_clock_nsec = tk->tkr_mono.xtime_nsec +
292
- + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
293
- nsecps = (u64) NSEC_PER_SEC << tk->tkr_mono.shift;
294
- while (vdso_data->wtom_clock_nsec >= nsecps) {
295
- vdso_data->wtom_clock_nsec -= nsecps;
296
- vdso_data->wtom_clock_sec++;
297
- }
298
-
299
- vdso_data->xtime_coarse_sec = tk->xtime_sec;
300
- vdso_data->xtime_coarse_nsec =
301
- (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
302
- vdso_data->wtom_coarse_sec =
303
- vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec;
304
- vdso_data->wtom_coarse_nsec =
305
- vdso_data->xtime_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
306
- while (vdso_data->wtom_coarse_nsec >= NSEC_PER_SEC) {
307
- vdso_data->wtom_coarse_nsec -= NSEC_PER_SEC;
308
- vdso_data->wtom_coarse_sec++;
309
- }
310
-
311
- vdso_data->tk_mult = tk->tkr_mono.mult;
312
- vdso_data->tk_shift = tk->tkr_mono.shift;
313
- vdso_data->hrtimer_res = hrtimer_resolution;
314
- smp_wmb();
315
- ++vdso_data->tb_update_count;
316
-}
317
-
318
-extern struct timezone sys_tz;
319
-
320
-void update_vsyscall_tz(void)
321
-{
322
- vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
323
- vdso_data->tz_dsttime = sys_tz.tz_dsttime;
324269 }
325270
326271 /*
....@@ -351,7 +296,7 @@
351296 }
352297
353298 static DEFINE_PER_CPU(atomic_t, clock_sync_word);
354
-static DEFINE_MUTEX(clock_sync_mutex);
299
+static DEFINE_MUTEX(stp_mutex);
355300 static unsigned long clock_sync_flags;
356301
357302 #define CLOCK_SYNC_HAS_STP 0
....@@ -441,7 +386,6 @@
441386 /* Epoch overflow */
442387 tod_clock_base[0]++;
443388 /* Adjust TOD steering parameters. */
444
- vdso_data->tb_update_count++;
445389 now = get_tod_clock();
446390 adj = tod_steering_end - now;
447391 if (unlikely((s64) adj >= 0))
....@@ -453,9 +397,9 @@
453397 panic("TOD clock sync offset %lli is too large to drift\n",
454398 tod_steering_delta);
455399 tod_steering_end = now + (abs(tod_steering_delta) << 15);
456
- vdso_data->ts_dir = (tod_steering_delta < 0) ? 0 : 1;
457
- vdso_data->ts_end = tod_steering_end;
458
- vdso_data->tb_update_count++;
400
+ vdso_data->arch_data.tod_steering_end = tod_steering_end;
401
+ vdso_data->arch_data.tod_steering_delta = tod_steering_delta;
402
+
459403 /* Update LPAR offset. */
460404 if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
461405 lpar_offset = qto.tod_epoch_difference;
....@@ -502,7 +446,6 @@
502446 static void *stp_page;
503447
504448 static void stp_work_fn(struct work_struct *work);
505
-static DEFINE_MUTEX(stp_work_mutex);
506449 static DECLARE_WORK(stp_work, stp_work_fn);
507450 static struct timer_list stp_timer;
508451
....@@ -612,7 +555,7 @@
612555 static int stp_sync_clock(void *data)
613556 {
614557 struct clock_sync_data *sync = data;
615
- unsigned long long clock_delta;
558
+ unsigned long long clock_delta, flags;
616559 static int first;
617560 int rc;
618561
....@@ -625,6 +568,7 @@
625568 if (stp_info.todoff[0] || stp_info.todoff[1] ||
626569 stp_info.todoff[2] || stp_info.todoff[3] ||
627570 stp_info.tmd != 2) {
571
+ flags = vdso_update_begin();
628572 rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0,
629573 &clock_delta);
630574 if (rc == 0) {
....@@ -634,6 +578,7 @@
634578 if (rc == 0 && stp_info.tmd != 2)
635579 rc = -EAGAIN;
636580 }
581
+ vdso_update_end(flags);
637582 }
638583 sync->in_sync = rc ? -EAGAIN : 1;
639584 xchg(&first, 0);
....@@ -653,6 +598,81 @@
653598 return 0;
654599 }
655600
601
+static int stp_clear_leap(void)
602
+{
603
+ struct __kernel_timex txc;
604
+ int ret;
605
+
606
+ memset(&txc, 0, sizeof(txc));
607
+
608
+ ret = do_adjtimex(&txc);
609
+ if (ret < 0)
610
+ return ret;
611
+
612
+ txc.modes = ADJ_STATUS;
613
+ txc.status &= ~(STA_INS|STA_DEL);
614
+ return do_adjtimex(&txc);
615
+}
616
+
617
+static void stp_check_leap(void)
618
+{
619
+ struct stp_stzi stzi;
620
+ struct stp_lsoib *lsoib = &stzi.lsoib;
621
+ struct __kernel_timex txc;
622
+ int64_t timediff;
623
+ int leapdiff, ret;
624
+
625
+ if (!stp_info.lu || !check_sync_clock()) {
626
+ /*
627
+ * Either a scheduled leap second was removed by the operator,
628
+ * or STP is out of sync. In both cases, clear the leap second
629
+ * kernel flags.
630
+ */
631
+ if (stp_clear_leap() < 0)
632
+ pr_err("failed to clear leap second flags\n");
633
+ return;
634
+ }
635
+
636
+ if (chsc_stzi(stp_page, &stzi, sizeof(stzi))) {
637
+ pr_err("stzi failed\n");
638
+ return;
639
+ }
640
+
641
+ timediff = tod_to_ns(lsoib->nlsout - get_tod_clock()) / NSEC_PER_SEC;
642
+ leapdiff = lsoib->nlso - lsoib->also;
643
+
644
+ if (leapdiff != 1 && leapdiff != -1) {
645
+ pr_err("Cannot schedule %d leap seconds\n", leapdiff);
646
+ return;
647
+ }
648
+
649
+ if (timediff < 0) {
650
+ if (stp_clear_leap() < 0)
651
+ pr_err("failed to clear leap second flags\n");
652
+ } else if (timediff < 7200) {
653
+ memset(&txc, 0, sizeof(txc));
654
+ ret = do_adjtimex(&txc);
655
+ if (ret < 0)
656
+ return;
657
+
658
+ txc.modes = ADJ_STATUS;
659
+ if (leapdiff > 0)
660
+ txc.status |= STA_INS;
661
+ else
662
+ txc.status |= STA_DEL;
663
+ ret = do_adjtimex(&txc);
664
+ if (ret < 0)
665
+ pr_err("failed to set leap second flags\n");
666
+ /* arm Timer to clear leap second flags */
667
+ mod_timer(&stp_timer, jiffies + msecs_to_jiffies(14400 * MSEC_PER_SEC));
668
+ } else {
669
+ /* The day the leap second is scheduled for hasn't been reached. Retry
670
+ * in one hour.
671
+ */
672
+ mod_timer(&stp_timer, jiffies + msecs_to_jiffies(3600 * MSEC_PER_SEC));
673
+ }
674
+}
675
+
656676 /*
657677 * STP work. Check for the STP state and take over the clock
658678 * synchronization if the STP clock source is usable.
....@@ -663,7 +683,7 @@
663683 int rc;
664684
665685 /* prevent multiple execution. */
666
- mutex_lock(&stp_work_mutex);
686
+ mutex_lock(&stp_mutex);
667687
668688 if (!stp_online) {
669689 chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000, NULL);
....@@ -671,7 +691,7 @@
671691 goto out_unlock;
672692 }
673693
674
- rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0, NULL);
694
+ rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xf0e0, NULL);
675695 if (rc)
676696 goto out_unlock;
677697
....@@ -680,24 +700,25 @@
680700 goto out_unlock;
681701
682702 /* Skip synchronization if the clock is already in sync. */
683
- if (check_sync_clock())
684
- goto out_unlock;
685
-
686
- memset(&stp_sync, 0, sizeof(stp_sync));
687
- cpus_read_lock();
688
- atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
689
- stop_machine_cpuslocked(stp_sync_clock, &stp_sync, cpu_online_mask);
690
- cpus_read_unlock();
703
+ if (!check_sync_clock()) {
704
+ memset(&stp_sync, 0, sizeof(stp_sync));
705
+ cpus_read_lock();
706
+ atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
707
+ stop_machine_cpuslocked(stp_sync_clock, &stp_sync, cpu_online_mask);
708
+ cpus_read_unlock();
709
+ }
691710
692711 if (!check_sync_clock())
693712 /*
694713 * There is a usable clock but the synchonization failed.
695714 * Retry after a second.
696715 */
697
- mod_timer(&stp_timer, jiffies + HZ);
716
+ mod_timer(&stp_timer, jiffies + msecs_to_jiffies(MSEC_PER_SEC));
717
+ else if (stp_info.lu)
718
+ stp_check_leap();
698719
699720 out_unlock:
700
- mutex_unlock(&stp_work_mutex);
721
+ mutex_unlock(&stp_mutex);
701722 }
702723
703724 /*
....@@ -708,151 +729,178 @@
708729 .dev_name = "stp",
709730 };
710731
711
-static ssize_t stp_ctn_id_show(struct device *dev,
732
+static ssize_t ctn_id_show(struct device *dev,
712733 struct device_attribute *attr,
713734 char *buf)
714735 {
715736 ssize_t ret = -ENODATA;
716737
717
- mutex_lock(&stp_work_mutex);
738
+ mutex_lock(&stp_mutex);
718739 if (stpinfo_valid())
719740 ret = sprintf(buf, "%016llx\n",
720741 *(unsigned long long *) stp_info.ctnid);
721
- mutex_unlock(&stp_work_mutex);
742
+ mutex_unlock(&stp_mutex);
722743 return ret;
723744 }
724745
725
-static DEVICE_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL);
746
+static DEVICE_ATTR_RO(ctn_id);
726747
727
-static ssize_t stp_ctn_type_show(struct device *dev,
748
+static ssize_t ctn_type_show(struct device *dev,
728749 struct device_attribute *attr,
729750 char *buf)
730751 {
731752 ssize_t ret = -ENODATA;
732753
733
- mutex_lock(&stp_work_mutex);
754
+ mutex_lock(&stp_mutex);
734755 if (stpinfo_valid())
735756 ret = sprintf(buf, "%i\n", stp_info.ctn);
736
- mutex_unlock(&stp_work_mutex);
757
+ mutex_unlock(&stp_mutex);
737758 return ret;
738759 }
739760
740
-static DEVICE_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL);
761
+static DEVICE_ATTR_RO(ctn_type);
741762
742
-static ssize_t stp_dst_offset_show(struct device *dev,
763
+static ssize_t dst_offset_show(struct device *dev,
743764 struct device_attribute *attr,
744765 char *buf)
745766 {
746767 ssize_t ret = -ENODATA;
747768
748
- mutex_lock(&stp_work_mutex);
769
+ mutex_lock(&stp_mutex);
749770 if (stpinfo_valid() && (stp_info.vbits & 0x2000))
750771 ret = sprintf(buf, "%i\n", (int)(s16) stp_info.dsto);
751
- mutex_unlock(&stp_work_mutex);
772
+ mutex_unlock(&stp_mutex);
752773 return ret;
753774 }
754775
755
-static DEVICE_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL);
776
+static DEVICE_ATTR_RO(dst_offset);
756777
757
-static ssize_t stp_leap_seconds_show(struct device *dev,
778
+static ssize_t leap_seconds_show(struct device *dev,
758779 struct device_attribute *attr,
759780 char *buf)
760781 {
761782 ssize_t ret = -ENODATA;
762783
763
- mutex_lock(&stp_work_mutex);
784
+ mutex_lock(&stp_mutex);
764785 if (stpinfo_valid() && (stp_info.vbits & 0x8000))
765786 ret = sprintf(buf, "%i\n", (int)(s16) stp_info.leaps);
766
- mutex_unlock(&stp_work_mutex);
787
+ mutex_unlock(&stp_mutex);
767788 return ret;
768789 }
769790
770
-static DEVICE_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL);
791
+static DEVICE_ATTR_RO(leap_seconds);
771792
772
-static ssize_t stp_stratum_show(struct device *dev,
793
+static ssize_t leap_seconds_scheduled_show(struct device *dev,
794
+ struct device_attribute *attr,
795
+ char *buf)
796
+{
797
+ struct stp_stzi stzi;
798
+ ssize_t ret;
799
+
800
+ mutex_lock(&stp_mutex);
801
+ if (!stpinfo_valid() || !(stp_info.vbits & 0x8000) || !stp_info.lu) {
802
+ mutex_unlock(&stp_mutex);
803
+ return -ENODATA;
804
+ }
805
+
806
+ ret = chsc_stzi(stp_page, &stzi, sizeof(stzi));
807
+ mutex_unlock(&stp_mutex);
808
+ if (ret < 0)
809
+ return ret;
810
+
811
+ if (!stzi.lsoib.p)
812
+ return sprintf(buf, "0,0\n");
813
+
814
+ return sprintf(buf, "%llu,%d\n",
815
+ tod_to_ns(stzi.lsoib.nlsout - TOD_UNIX_EPOCH) / NSEC_PER_SEC,
816
+ stzi.lsoib.nlso - stzi.lsoib.also);
817
+}
818
+
819
+static DEVICE_ATTR_RO(leap_seconds_scheduled);
820
+
821
+static ssize_t stratum_show(struct device *dev,
773822 struct device_attribute *attr,
774823 char *buf)
775824 {
776825 ssize_t ret = -ENODATA;
777826
778
- mutex_lock(&stp_work_mutex);
827
+ mutex_lock(&stp_mutex);
779828 if (stpinfo_valid())
780829 ret = sprintf(buf, "%i\n", (int)(s16) stp_info.stratum);
781
- mutex_unlock(&stp_work_mutex);
830
+ mutex_unlock(&stp_mutex);
782831 return ret;
783832 }
784833
785
-static DEVICE_ATTR(stratum, 0400, stp_stratum_show, NULL);
834
+static DEVICE_ATTR_RO(stratum);
786835
787
-static ssize_t stp_time_offset_show(struct device *dev,
836
+static ssize_t time_offset_show(struct device *dev,
788837 struct device_attribute *attr,
789838 char *buf)
790839 {
791840 ssize_t ret = -ENODATA;
792841
793
- mutex_lock(&stp_work_mutex);
842
+ mutex_lock(&stp_mutex);
794843 if (stpinfo_valid() && (stp_info.vbits & 0x0800))
795844 ret = sprintf(buf, "%i\n", (int) stp_info.tto);
796
- mutex_unlock(&stp_work_mutex);
845
+ mutex_unlock(&stp_mutex);
797846 return ret;
798847 }
799848
800
-static DEVICE_ATTR(time_offset, 0400, stp_time_offset_show, NULL);
849
+static DEVICE_ATTR_RO(time_offset);
801850
802
-static ssize_t stp_time_zone_offset_show(struct device *dev,
851
+static ssize_t time_zone_offset_show(struct device *dev,
803852 struct device_attribute *attr,
804853 char *buf)
805854 {
806855 ssize_t ret = -ENODATA;
807856
808
- mutex_lock(&stp_work_mutex);
857
+ mutex_lock(&stp_mutex);
809858 if (stpinfo_valid() && (stp_info.vbits & 0x4000))
810859 ret = sprintf(buf, "%i\n", (int)(s16) stp_info.tzo);
811
- mutex_unlock(&stp_work_mutex);
860
+ mutex_unlock(&stp_mutex);
812861 return ret;
813862 }
814863
815
-static DEVICE_ATTR(time_zone_offset, 0400,
816
- stp_time_zone_offset_show, NULL);
864
+static DEVICE_ATTR_RO(time_zone_offset);
817865
818
-static ssize_t stp_timing_mode_show(struct device *dev,
866
+static ssize_t timing_mode_show(struct device *dev,
819867 struct device_attribute *attr,
820868 char *buf)
821869 {
822870 ssize_t ret = -ENODATA;
823871
824
- mutex_lock(&stp_work_mutex);
872
+ mutex_lock(&stp_mutex);
825873 if (stpinfo_valid())
826874 ret = sprintf(buf, "%i\n", stp_info.tmd);
827
- mutex_unlock(&stp_work_mutex);
875
+ mutex_unlock(&stp_mutex);
828876 return ret;
829877 }
830878
831
-static DEVICE_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL);
879
+static DEVICE_ATTR_RO(timing_mode);
832880
833
-static ssize_t stp_timing_state_show(struct device *dev,
881
+static ssize_t timing_state_show(struct device *dev,
834882 struct device_attribute *attr,
835883 char *buf)
836884 {
837885 ssize_t ret = -ENODATA;
838886
839
- mutex_lock(&stp_work_mutex);
887
+ mutex_lock(&stp_mutex);
840888 if (stpinfo_valid())
841889 ret = sprintf(buf, "%i\n", stp_info.tst);
842
- mutex_unlock(&stp_work_mutex);
890
+ mutex_unlock(&stp_mutex);
843891 return ret;
844892 }
845893
846
-static DEVICE_ATTR(timing_state, 0400, stp_timing_state_show, NULL);
894
+static DEVICE_ATTR_RO(timing_state);
847895
848
-static ssize_t stp_online_show(struct device *dev,
896
+static ssize_t online_show(struct device *dev,
849897 struct device_attribute *attr,
850898 char *buf)
851899 {
852900 return sprintf(buf, "%i\n", stp_online);
853901 }
854902
855
-static ssize_t stp_online_store(struct device *dev,
903
+static ssize_t online_store(struct device *dev,
856904 struct device_attribute *attr,
857905 const char *buf, size_t count)
858906 {
....@@ -863,14 +911,14 @@
863911 return -EINVAL;
864912 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
865913 return -EOPNOTSUPP;
866
- mutex_lock(&clock_sync_mutex);
914
+ mutex_lock(&stp_mutex);
867915 stp_online = value;
868916 if (stp_online)
869917 set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
870918 else
871919 clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
872920 queue_work(time_sync_wq, &stp_work);
873
- mutex_unlock(&clock_sync_mutex);
921
+ mutex_unlock(&stp_mutex);
874922 return count;
875923 }
876924
....@@ -878,18 +926,15 @@
878926 * Can't use DEVICE_ATTR because the attribute should be named
879927 * stp/online but dev_attr_online already exists in this file ..
880928 */
881
-static struct device_attribute dev_attr_stp_online = {
882
- .attr = { .name = "online", .mode = 0600 },
883
- .show = stp_online_show,
884
- .store = stp_online_store,
885
-};
929
+static DEVICE_ATTR_RW(online);
886930
887931 static struct device_attribute *stp_attributes[] = {
888932 &dev_attr_ctn_id,
889933 &dev_attr_ctn_type,
890934 &dev_attr_dst_offset,
891935 &dev_attr_leap_seconds,
892
- &dev_attr_stp_online,
936
+ &dev_attr_online,
937
+ &dev_attr_leap_seconds_scheduled,
893938 &dev_attr_stratum,
894939 &dev_attr_time_offset,
895940 &dev_attr_time_zone_offset,