.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
---|
1 | 2 | /* |
---|
2 | | - * linux/kernel/time/timekeeping.c |
---|
3 | | - * |
---|
4 | | - * Kernel timekeeping code and accessor functions |
---|
5 | | - * |
---|
6 | | - * This code was moved from linux/kernel/timer.c. |
---|
7 | | - * Please see that file for copyright and history logs. |
---|
8 | | - * |
---|
| 3 | + * Kernel timekeeping code and accessor functions. Based on code from |
---|
| 4 | + * timer.c, moved in commit 8524070b7982. |
---|
9 | 5 | */ |
---|
10 | | - |
---|
11 | 6 | #include <linux/timekeeper_internal.h> |
---|
12 | 7 | #include <linux/module.h> |
---|
13 | 8 | #include <linux/interrupt.h> |
---|
.. | .. |
---|
22 | 17 | #include <linux/clocksource.h> |
---|
23 | 18 | #include <linux/jiffies.h> |
---|
24 | 19 | #include <linux/time.h> |
---|
| 20 | +#include <linux/timex.h> |
---|
25 | 21 | #include <linux/tick.h> |
---|
26 | 22 | #include <linux/stop_machine.h> |
---|
27 | 23 | #include <linux/pvclock_gtod.h> |
---|
28 | 24 | #include <linux/compiler.h> |
---|
| 25 | +#include <linux/audit.h> |
---|
| 26 | +#include <linux/random.h> |
---|
29 | 27 | |
---|
30 | 28 | #include "tick-internal.h" |
---|
31 | 29 | #include "ntp_internal.h" |
---|
.. | .. |
---|
43 | 41 | TK_ADV_FREQ |
---|
44 | 42 | }; |
---|
45 | 43 | |
---|
| 44 | +DEFINE_RAW_SPINLOCK(timekeeper_lock); |
---|
| 45 | + |
---|
46 | 46 | /* |
---|
47 | 47 | * The most important data for readout fits into a single 64 byte |
---|
48 | 48 | * cache line. |
---|
49 | 49 | */ |
---|
50 | 50 | static struct { |
---|
51 | | - seqcount_t seq; |
---|
| 51 | + seqcount_raw_spinlock_t seq; |
---|
52 | 52 | struct timekeeper timekeeper; |
---|
53 | 53 | } tk_core ____cacheline_aligned = { |
---|
54 | | - .seq = SEQCNT_ZERO(tk_core.seq), |
---|
| 54 | + .seq = SEQCNT_RAW_SPINLOCK_ZERO(tk_core.seq, &timekeeper_lock), |
---|
55 | 55 | }; |
---|
56 | 56 | |
---|
57 | | -static DEFINE_RAW_SPINLOCK(timekeeper_lock); |
---|
58 | 57 | static struct timekeeper shadow_timekeeper; |
---|
| 58 | + |
---|
| 59 | +/* flag for if timekeeping is suspended */ |
---|
| 60 | +int __read_mostly timekeeping_suspended; |
---|
59 | 61 | |
---|
60 | 62 | /** |
---|
61 | 63 | * struct tk_fast - NMI safe timekeeper |
---|
.. | .. |
---|
67 | 69 | * See @update_fast_timekeeper() below. |
---|
68 | 70 | */ |
---|
69 | 71 | struct tk_fast { |
---|
70 | | - seqcount_t seq; |
---|
| 72 | + seqcount_latch_t seq; |
---|
71 | 73 | struct tk_read_base base[2]; |
---|
72 | 74 | }; |
---|
73 | 75 | |
---|
.. | .. |
---|
76 | 78 | |
---|
77 | 79 | static u64 dummy_clock_read(struct clocksource *cs) |
---|
78 | 80 | { |
---|
79 | | - return cycles_at_suspend; |
---|
| 81 | + if (timekeeping_suspended) |
---|
| 82 | + return cycles_at_suspend; |
---|
| 83 | + return local_clock(); |
---|
80 | 84 | } |
---|
81 | 85 | |
---|
82 | 86 | static struct clocksource dummy_clock = { |
---|
83 | 87 | .read = dummy_clock_read, |
---|
84 | 88 | }; |
---|
85 | 89 | |
---|
| 90 | +/* |
---|
| 91 | + * Boot time initialization which allows local_clock() to be utilized |
---|
| 92 | + * during early boot when clocksources are not available. local_clock() |
---|
| 93 | + * returns nanoseconds already so no conversion is required, hence mult=1 |
---|
| 94 | + * and shift=0. When the first proper clocksource is installed then |
---|
| 95 | + * the fast time keepers are updated with the correct values. |
---|
| 96 | + */ |
---|
| 97 | +#define FAST_TK_INIT \ |
---|
| 98 | + { \ |
---|
| 99 | + .clock = &dummy_clock, \ |
---|
| 100 | + .mask = CLOCKSOURCE_MASK(64), \ |
---|
| 101 | + .mult = 1, \ |
---|
| 102 | + .shift = 0, \ |
---|
| 103 | + } |
---|
| 104 | + |
---|
86 | 105 | static struct tk_fast tk_fast_mono ____cacheline_aligned = { |
---|
87 | | - .base[0] = { .clock = &dummy_clock, }, |
---|
88 | | - .base[1] = { .clock = &dummy_clock, }, |
---|
| 106 | + .seq = SEQCNT_LATCH_ZERO(tk_fast_mono.seq), |
---|
| 107 | + .base[0] = FAST_TK_INIT, |
---|
| 108 | + .base[1] = FAST_TK_INIT, |
---|
89 | 109 | }; |
---|
90 | 110 | |
---|
91 | 111 | static struct tk_fast tk_fast_raw ____cacheline_aligned = { |
---|
92 | | - .base[0] = { .clock = &dummy_clock, }, |
---|
93 | | - .base[1] = { .clock = &dummy_clock, }, |
---|
| 112 | + .seq = SEQCNT_LATCH_ZERO(tk_fast_raw.seq), |
---|
| 113 | + .base[0] = FAST_TK_INIT, |
---|
| 114 | + .base[1] = FAST_TK_INIT, |
---|
94 | 115 | }; |
---|
95 | | - |
---|
96 | | -/* flag for if timekeeping is suspended */ |
---|
97 | | -int __read_mostly timekeeping_suspended; |
---|
98 | 116 | |
---|
99 | 117 | static inline void tk_normalize_xtime(struct timekeeper *tk) |
---|
100 | 118 | { |
---|
.. | .. |
---|
161 | 179 | * tk_clock_read - atomic clocksource read() helper |
---|
162 | 180 | * |
---|
163 | 181 | * This helper is necessary to use in the read paths because, while the |
---|
164 | | - * seqlock ensures we don't return a bad value while structures are updated, |
---|
| 182 | + * seqcount ensures we don't return a bad value while structures are updated, |
---|
165 | 183 | * it doesn't protect from potential crashes. There is the possibility that |
---|
166 | 184 | * the tkr's clocksource may change between the read reference, and the |
---|
167 | 185 | * clock reference passed to the read function. This can cause crashes if |
---|
.. | .. |
---|
226 | 244 | unsigned int seq; |
---|
227 | 245 | |
---|
228 | 246 | /* |
---|
229 | | - * Since we're called holding a seqlock, the data may shift |
---|
| 247 | + * Since we're called holding a seqcount, the data may shift |
---|
230 | 248 | * under us while we're doing the calculation. This can cause |
---|
231 | 249 | * false positives, since we'd note a problem but throw the |
---|
232 | | - * results away. So nest another seqlock here to atomically |
---|
| 250 | + * results away. So nest another seqcount here to atomically |
---|
233 | 251 | * grab the points we are checking with. |
---|
234 | 252 | */ |
---|
235 | 253 | do { |
---|
.. | .. |
---|
468 | 486 | tk_clock_read(tkr), |
---|
469 | 487 | tkr->cycle_last, |
---|
470 | 488 | tkr->mask)); |
---|
471 | | - } while (read_seqcount_retry(&tkf->seq, seq)); |
---|
| 489 | + } while (read_seqcount_latch_retry(&tkf->seq, seq)); |
---|
472 | 490 | |
---|
473 | 491 | return now; |
---|
474 | 492 | } |
---|
.. | .. |
---|
490 | 508 | * |
---|
491 | 509 | * To keep it NMI safe since we're accessing from tracing, we're not using a |
---|
492 | 510 | * separate timekeeper with updates to monotonic clock and boot offset |
---|
493 | | - * protected with seqlocks. This has the following minor side effects: |
---|
| 511 | + * protected with seqcounts. This has the following minor side effects: |
---|
494 | 512 | * |
---|
495 | 513 | * (1) Its possible that a timestamp be taken after the boot offset is updated |
---|
496 | 514 | * but before the timekeeper is updated. If this happens, the new boot offset |
---|
.. | .. |
---|
514 | 532 | } |
---|
515 | 533 | EXPORT_SYMBOL_GPL(ktime_get_boot_fast_ns); |
---|
516 | 534 | |
---|
517 | | - |
---|
518 | 535 | /* |
---|
519 | 536 | * See comment for __ktime_get_fast_ns() vs. timestamp ordering |
---|
520 | 537 | */ |
---|
521 | | -static __always_inline u64 __ktime_get_real_fast_ns(struct tk_fast *tkf) |
---|
| 538 | +static __always_inline u64 __ktime_get_real_fast(struct tk_fast *tkf, u64 *mono) |
---|
522 | 539 | { |
---|
523 | 540 | struct tk_read_base *tkr; |
---|
| 541 | + u64 basem, baser, delta; |
---|
524 | 542 | unsigned int seq; |
---|
525 | | - u64 now; |
---|
526 | 543 | |
---|
527 | 544 | do { |
---|
528 | 545 | seq = raw_read_seqcount_latch(&tkf->seq); |
---|
529 | 546 | tkr = tkf->base + (seq & 0x01); |
---|
530 | | - now = ktime_to_ns(tkr->base_real); |
---|
| 547 | + basem = ktime_to_ns(tkr->base); |
---|
| 548 | + baser = ktime_to_ns(tkr->base_real); |
---|
531 | 549 | |
---|
532 | | - now += timekeeping_delta_to_ns(tkr, |
---|
533 | | - clocksource_delta( |
---|
534 | | - tk_clock_read(tkr), |
---|
535 | | - tkr->cycle_last, |
---|
536 | | - tkr->mask)); |
---|
537 | | - } while (read_seqcount_retry(&tkf->seq, seq)); |
---|
| 550 | + delta = timekeeping_delta_to_ns(tkr, |
---|
| 551 | + clocksource_delta(tk_clock_read(tkr), |
---|
| 552 | + tkr->cycle_last, tkr->mask)); |
---|
| 553 | + } while (read_seqcount_latch_retry(&tkf->seq, seq)); |
---|
538 | 554 | |
---|
539 | | - return now; |
---|
| 555 | + if (mono) |
---|
| 556 | + *mono = basem + delta; |
---|
| 557 | + return baser + delta; |
---|
540 | 558 | } |
---|
541 | 559 | |
---|
542 | 560 | /** |
---|
.. | .. |
---|
544 | 562 | */ |
---|
545 | 563 | u64 ktime_get_real_fast_ns(void) |
---|
546 | 564 | { |
---|
547 | | - return __ktime_get_real_fast_ns(&tk_fast_mono); |
---|
| 565 | + return __ktime_get_real_fast(&tk_fast_mono, NULL); |
---|
548 | 566 | } |
---|
549 | 567 | EXPORT_SYMBOL_GPL(ktime_get_real_fast_ns); |
---|
| 568 | + |
---|
| 569 | +/** |
---|
| 570 | + * ktime_get_fast_timestamps: - NMI safe timestamps |
---|
| 571 | + * @snapshot: Pointer to timestamp storage |
---|
| 572 | + * |
---|
| 573 | + * Stores clock monotonic, boottime and realtime timestamps. |
---|
| 574 | + * |
---|
| 575 | + * Boot time is a racy access on 32bit systems if the sleep time injection |
---|
| 576 | + * happens late during resume and not in timekeeping_resume(). That could |
---|
| 577 | + * be avoided by expanding struct tk_read_base with boot offset for 32bit |
---|
| 578 | + * and adding more overhead to the update. As this is a hard to observe |
---|
| 579 | + * once per resume event which can be filtered with reasonable effort using |
---|
| 580 | + * the accurate mono/real timestamps, it's probably not worth the trouble. |
---|
| 581 | + * |
---|
| 582 | + * Aside of that it might be possible on 32 and 64 bit to observe the |
---|
| 583 | + * following when the sleep time injection happens late: |
---|
| 584 | + * |
---|
| 585 | + * CPU 0 CPU 1 |
---|
| 586 | + * timekeeping_resume() |
---|
| 587 | + * ktime_get_fast_timestamps() |
---|
| 588 | + * mono, real = __ktime_get_real_fast() |
---|
| 589 | + * inject_sleep_time() |
---|
| 590 | + * update boot offset |
---|
| 591 | + * boot = mono + bootoffset; |
---|
| 592 | + * |
---|
| 593 | + * That means that boot time already has the sleep time adjustment, but |
---|
| 594 | + * real time does not. On the next readout both are in sync again. |
---|
| 595 | + * |
---|
| 596 | + * Preventing this for 64bit is not really feasible without destroying the |
---|
| 597 | + * careful cache layout of the timekeeper because the sequence count and |
---|
| 598 | + * struct tk_read_base would then need two cache lines instead of one. |
---|
| 599 | + * |
---|
| 600 | + * Access to the time keeper clock source is disabled accross the innermost |
---|
| 601 | + * steps of suspend/resume. The accessors still work, but the timestamps |
---|
| 602 | + * are frozen until time keeping is resumed which happens very early. |
---|
| 603 | + * |
---|
| 604 | + * For regular suspend/resume there is no observable difference vs. sched |
---|
| 605 | + * clock, but it might affect some of the nasty low level debug printks. |
---|
| 606 | + * |
---|
| 607 | + * OTOH, access to sched clock is not guaranteed accross suspend/resume on |
---|
| 608 | + * all systems either so it depends on the hardware in use. |
---|
| 609 | + * |
---|
| 610 | + * If that turns out to be a real problem then this could be mitigated by |
---|
| 611 | + * using sched clock in a similar way as during early boot. But it's not as |
---|
| 612 | + * trivial as on early boot because it needs some careful protection |
---|
| 613 | + * against the clock monotonic timestamp jumping backwards on resume. |
---|
| 614 | + */ |
---|
| 615 | +void ktime_get_fast_timestamps(struct ktime_timestamps *snapshot) |
---|
| 616 | +{ |
---|
| 617 | + struct timekeeper *tk = &tk_core.timekeeper; |
---|
| 618 | + |
---|
| 619 | + snapshot->real = __ktime_get_real_fast(&tk_fast_mono, &snapshot->mono); |
---|
| 620 | + snapshot->boot = snapshot->mono + ktime_to_ns(data_race(tk->offs_boot)); |
---|
| 621 | +} |
---|
550 | 622 | |
---|
551 | 623 | /** |
---|
552 | 624 | * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource. |
---|
.. | .. |
---|
730 | 802 | void ktime_get_real_ts64(struct timespec64 *ts) |
---|
731 | 803 | { |
---|
732 | 804 | struct timekeeper *tk = &tk_core.timekeeper; |
---|
733 | | - unsigned long seq; |
---|
| 805 | + unsigned int seq; |
---|
734 | 806 | u64 nsecs; |
---|
735 | 807 | |
---|
736 | 808 | WARN_ON(timekeeping_suspended); |
---|
.. | .. |
---|
840 | 912 | ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs) |
---|
841 | 913 | { |
---|
842 | 914 | ktime_t *offset = offsets[offs]; |
---|
843 | | - unsigned long seq; |
---|
| 915 | + unsigned int seq; |
---|
844 | 916 | ktime_t tconv; |
---|
845 | 917 | |
---|
846 | 918 | do { |
---|
.. | .. |
---|
957 | 1029 | * but without the sequence counter protect. This internal function |
---|
958 | 1030 | * is called just when timekeeping lock is already held. |
---|
959 | 1031 | */ |
---|
960 | | -time64_t __ktime_get_real_seconds(void) |
---|
| 1032 | +noinstr time64_t __ktime_get_real_seconds(void) |
---|
961 | 1033 | { |
---|
962 | 1034 | struct timekeeper *tk = &tk_core.timekeeper; |
---|
963 | 1035 | |
---|
.. | .. |
---|
971 | 1043 | void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot) |
---|
972 | 1044 | { |
---|
973 | 1045 | struct timekeeper *tk = &tk_core.timekeeper; |
---|
974 | | - unsigned long seq; |
---|
| 1046 | + unsigned int seq; |
---|
975 | 1047 | ktime_t base_raw; |
---|
976 | 1048 | ktime_t base_real; |
---|
977 | 1049 | u64 nsec_raw; |
---|
.. | .. |
---|
1132 | 1204 | ktime_t base_real, base_raw; |
---|
1133 | 1205 | u64 nsec_real, nsec_raw; |
---|
1134 | 1206 | u8 cs_was_changed_seq; |
---|
1135 | | - unsigned long seq; |
---|
| 1207 | + unsigned int seq; |
---|
1136 | 1208 | bool do_interp; |
---|
1137 | 1209 | int ret; |
---|
1138 | 1210 | |
---|
.. | .. |
---|
1258 | 1330 | |
---|
1259 | 1331 | /* signal hrtimers about time change */ |
---|
1260 | 1332 | clock_was_set(); |
---|
| 1333 | + |
---|
| 1334 | + if (!ret) { |
---|
| 1335 | + audit_tk_injoffset(ts_delta); |
---|
| 1336 | + add_device_randomness(ts, sizeof(*ts)); |
---|
| 1337 | + } |
---|
1261 | 1338 | |
---|
1262 | 1339 | return ret; |
---|
1263 | 1340 | } |
---|
.. | .. |
---|
1418 | 1495 | void ktime_get_raw_ts64(struct timespec64 *ts) |
---|
1419 | 1496 | { |
---|
1420 | 1497 | struct timekeeper *tk = &tk_core.timekeeper; |
---|
1421 | | - unsigned long seq; |
---|
| 1498 | + unsigned int seq; |
---|
1422 | 1499 | u64 nsecs; |
---|
1423 | 1500 | |
---|
1424 | 1501 | do { |
---|
.. | .. |
---|
1440 | 1517 | int timekeeping_valid_for_hres(void) |
---|
1441 | 1518 | { |
---|
1442 | 1519 | struct timekeeper *tk = &tk_core.timekeeper; |
---|
1443 | | - unsigned long seq; |
---|
| 1520 | + unsigned int seq; |
---|
1444 | 1521 | int ret; |
---|
1445 | 1522 | |
---|
1446 | 1523 | do { |
---|
.. | .. |
---|
1459 | 1536 | u64 timekeeping_max_deferment(void) |
---|
1460 | 1537 | { |
---|
1461 | 1538 | struct timekeeper *tk = &tk_core.timekeeper; |
---|
1462 | | - unsigned long seq; |
---|
| 1539 | + unsigned int seq; |
---|
1463 | 1540 | u64 ret; |
---|
1464 | 1541 | |
---|
1465 | 1542 | do { |
---|
.. | .. |
---|
1473 | 1550 | } |
---|
1474 | 1551 | |
---|
1475 | 1552 | /** |
---|
1476 | | - * read_persistent_clock - Return time from the persistent clock. |
---|
| 1553 | + * read_persistent_clock64 - Return time from the persistent clock. |
---|
1477 | 1554 | * |
---|
1478 | 1555 | * Weak dummy function for arches that do not yet support it. |
---|
1479 | 1556 | * Reads the time from the battery backed persistent clock. |
---|
.. | .. |
---|
1481 | 1558 | * |
---|
1482 | 1559 | * XXX - Do be sure to remove it once all arches implement it. |
---|
1483 | 1560 | */ |
---|
1484 | | -void __weak read_persistent_clock(struct timespec *ts) |
---|
| 1561 | +void __weak read_persistent_clock64(struct timespec64 *ts) |
---|
1485 | 1562 | { |
---|
1486 | 1563 | ts->tv_sec = 0; |
---|
1487 | 1564 | ts->tv_nsec = 0; |
---|
1488 | | -} |
---|
1489 | | - |
---|
1490 | | -void __weak read_persistent_clock64(struct timespec64 *ts64) |
---|
1491 | | -{ |
---|
1492 | | - struct timespec ts; |
---|
1493 | | - |
---|
1494 | | - read_persistent_clock(&ts); |
---|
1495 | | - *ts64 = timespec_to_timespec64(ts); |
---|
1496 | 1565 | } |
---|
1497 | 1566 | |
---|
1498 | 1567 | /** |
---|
.. | .. |
---|
2009 | 2078 | * logarithmic_accumulation - shifted accumulation of cycles |
---|
2010 | 2079 | * |
---|
2011 | 2080 | * This functions accumulates a shifted interval of cycles into |
---|
2012 | | - * into a shifted interval nanoseconds. Allows for O(log) accumulation |
---|
| 2081 | + * a shifted interval nanoseconds. Allows for O(log) accumulation |
---|
2013 | 2082 | * loop. |
---|
2014 | 2083 | * |
---|
2015 | 2084 | * Returns the unconsumed cycles. |
---|
.. | .. |
---|
2167 | 2236 | void ktime_get_coarse_real_ts64(struct timespec64 *ts) |
---|
2168 | 2237 | { |
---|
2169 | 2238 | struct timekeeper *tk = &tk_core.timekeeper; |
---|
2170 | | - unsigned long seq; |
---|
| 2239 | + unsigned int seq; |
---|
2171 | 2240 | |
---|
2172 | 2241 | do { |
---|
2173 | 2242 | seq = read_seqcount_begin(&tk_core.seq); |
---|
.. | .. |
---|
2181 | 2250 | { |
---|
2182 | 2251 | struct timekeeper *tk = &tk_core.timekeeper; |
---|
2183 | 2252 | struct timespec64 now, mono; |
---|
2184 | | - unsigned long seq; |
---|
| 2253 | + unsigned int seq; |
---|
2185 | 2254 | |
---|
2186 | 2255 | do { |
---|
2187 | 2256 | seq = read_seqcount_begin(&tk_core.seq); |
---|
.. | .. |
---|
2201 | 2270 | void do_timer(unsigned long ticks) |
---|
2202 | 2271 | { |
---|
2203 | 2272 | jiffies_64 += ticks; |
---|
2204 | | - calc_global_load(ticks); |
---|
| 2273 | + calc_global_load(); |
---|
2205 | 2274 | } |
---|
2206 | 2275 | |
---|
2207 | 2276 | /** |
---|
.. | .. |
---|
2251 | 2320 | /** |
---|
2252 | 2321 | * timekeeping_validate_timex - Ensures the timex is ok for use in do_adjtimex |
---|
2253 | 2322 | */ |
---|
2254 | | -static int timekeeping_validate_timex(const struct timex *txc) |
---|
| 2323 | +static int timekeeping_validate_timex(const struct __kernel_timex *txc) |
---|
2255 | 2324 | { |
---|
2256 | 2325 | if (txc->modes & ADJ_ADJTIME) { |
---|
2257 | 2326 | /* singleshot must not be used with any other mode bits */ |
---|
.. | .. |
---|
2313 | 2382 | return 0; |
---|
2314 | 2383 | } |
---|
2315 | 2384 | |
---|
| 2385 | +/** |
---|
| 2386 | + * random_get_entropy_fallback - Returns the raw clock source value, |
---|
| 2387 | + * used by random.c for platforms with no valid random_get_entropy(). |
---|
| 2388 | + */ |
---|
| 2389 | +unsigned long random_get_entropy_fallback(void) |
---|
| 2390 | +{ |
---|
| 2391 | + struct tk_read_base *tkr = &tk_core.timekeeper.tkr_mono; |
---|
| 2392 | + struct clocksource *clock = READ_ONCE(tkr->clock); |
---|
| 2393 | + |
---|
| 2394 | + if (unlikely(timekeeping_suspended || !clock)) |
---|
| 2395 | + return 0; |
---|
| 2396 | + return clock->read(clock); |
---|
| 2397 | +} |
---|
| 2398 | +EXPORT_SYMBOL_GPL(random_get_entropy_fallback); |
---|
2316 | 2399 | |
---|
2317 | 2400 | /** |
---|
2318 | 2401 | * do_adjtimex() - Accessor function to NTP __do_adjtimex function |
---|
2319 | 2402 | */ |
---|
2320 | | -int do_adjtimex(struct timex *txc) |
---|
| 2403 | +int do_adjtimex(struct __kernel_timex *txc) |
---|
2321 | 2404 | { |
---|
2322 | 2405 | struct timekeeper *tk = &tk_core.timekeeper; |
---|
| 2406 | + struct audit_ntp_data ad; |
---|
2323 | 2407 | unsigned long flags; |
---|
2324 | 2408 | struct timespec64 ts; |
---|
2325 | 2409 | s32 orig_tai, tai; |
---|
.. | .. |
---|
2329 | 2413 | ret = timekeeping_validate_timex(txc); |
---|
2330 | 2414 | if (ret) |
---|
2331 | 2415 | return ret; |
---|
| 2416 | + add_device_randomness(txc, sizeof(*txc)); |
---|
2332 | 2417 | |
---|
2333 | 2418 | if (txc->modes & ADJ_SETOFFSET) { |
---|
2334 | 2419 | struct timespec64 delta; |
---|
.. | .. |
---|
2339 | 2424 | ret = timekeeping_inject_offset(&delta); |
---|
2340 | 2425 | if (ret) |
---|
2341 | 2426 | return ret; |
---|
| 2427 | + |
---|
| 2428 | + audit_tk_injoffset(delta); |
---|
2342 | 2429 | } |
---|
2343 | 2430 | |
---|
| 2431 | + audit_ntp_init(&ad); |
---|
| 2432 | + |
---|
2344 | 2433 | ktime_get_real_ts64(&ts); |
---|
| 2434 | + add_device_randomness(&ts, sizeof(ts)); |
---|
2345 | 2435 | |
---|
2346 | 2436 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
---|
2347 | 2437 | write_seqcount_begin(&tk_core.seq); |
---|
2348 | 2438 | |
---|
2349 | 2439 | orig_tai = tai = tk->tai_offset; |
---|
2350 | | - ret = __do_adjtimex(txc, &ts, &tai); |
---|
| 2440 | + ret = __do_adjtimex(txc, &ts, &tai, &ad); |
---|
2351 | 2441 | |
---|
2352 | 2442 | if (tai != orig_tai) { |
---|
2353 | 2443 | __timekeeping_set_tai_offset(tk, tai); |
---|
.. | .. |
---|
2357 | 2447 | |
---|
2358 | 2448 | write_seqcount_end(&tk_core.seq); |
---|
2359 | 2449 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
---|
| 2450 | + |
---|
| 2451 | + audit_ntp_log(&ad); |
---|
2360 | 2452 | |
---|
2361 | 2453 | /* Update the multiplier immediately if frequency was set directly */ |
---|
2362 | 2454 | if (txc->modes & (ADJ_FREQUENCY | ADJ_TICK)) |
---|
.. | .. |
---|
2397 | 2489 | */ |
---|
2398 | 2490 | void xtime_update(unsigned long ticks) |
---|
2399 | 2491 | { |
---|
2400 | | - write_seqlock(&jiffies_lock); |
---|
| 2492 | + raw_spin_lock(&jiffies_lock); |
---|
| 2493 | + write_seqcount_begin(&jiffies_seq); |
---|
2401 | 2494 | do_timer(ticks); |
---|
2402 | | - write_sequnlock(&jiffies_lock); |
---|
| 2495 | + write_seqcount_end(&jiffies_seq); |
---|
| 2496 | + raw_spin_unlock(&jiffies_lock); |
---|
2403 | 2497 | update_wall_time(); |
---|
2404 | 2498 | } |
---|