.. | .. |
---|
13 | 13 | #include <vdso/helpers.h> |
---|
14 | 14 | #include <vdso/vsyscall.h> |
---|
15 | 15 | |
---|
| 16 | +#include "timekeeping_internal.h" |
---|
| 17 | + |
---|
16 | 18 | static inline void update_vdso_data(struct vdso_data *vdata, |
---|
17 | 19 | struct timekeeper *tk) |
---|
18 | 20 | { |
---|
.. | .. |
---|
27 | 29 | vdata[CS_RAW].mask = tk->tkr_raw.mask; |
---|
28 | 30 | vdata[CS_RAW].mult = tk->tkr_raw.mult; |
---|
29 | 31 | vdata[CS_RAW].shift = tk->tkr_raw.shift; |
---|
30 | | - |
---|
31 | | - /* CLOCK_REALTIME */ |
---|
32 | | - vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME]; |
---|
33 | | - vdso_ts->sec = tk->xtime_sec; |
---|
34 | | - vdso_ts->nsec = tk->tkr_mono.xtime_nsec; |
---|
35 | 32 | |
---|
36 | 33 | /* CLOCK_MONOTONIC */ |
---|
37 | 34 | vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC]; |
---|
.. | .. |
---|
70 | 67 | vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI]; |
---|
71 | 68 | vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset; |
---|
72 | 69 | vdso_ts->nsec = tk->tkr_mono.xtime_nsec; |
---|
73 | | - |
---|
74 | | - /* |
---|
75 | | - * Read without the seqlock held by clock_getres(). |
---|
76 | | - * Note: No need to have a second copy. |
---|
77 | | - */ |
---|
78 | | - WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution); |
---|
79 | 70 | } |
---|
80 | 71 | |
---|
81 | 72 | void update_vsyscall(struct timekeeper *tk) |
---|
82 | 73 | { |
---|
83 | 74 | struct vdso_data *vdata = __arch_get_k_vdso_data(); |
---|
84 | 75 | struct vdso_timestamp *vdso_ts; |
---|
| 76 | + s32 clock_mode; |
---|
85 | 77 | u64 nsec; |
---|
86 | | - |
---|
87 | | - if (__arch_update_vdso_data()) { |
---|
88 | | - /* |
---|
89 | | - * Some architectures might want to skip the update of the |
---|
90 | | - * data page. |
---|
91 | | - */ |
---|
92 | | - return; |
---|
93 | | - } |
---|
94 | 78 | |
---|
95 | 79 | /* copy vsyscall data */ |
---|
96 | 80 | vdso_write_begin(vdata); |
---|
97 | 81 | |
---|
98 | | - vdata[CS_HRES_COARSE].clock_mode = __arch_get_clock_mode(tk); |
---|
99 | | - vdata[CS_RAW].clock_mode = __arch_get_clock_mode(tk); |
---|
| 82 | + clock_mode = tk->tkr_mono.clock->vdso_clock_mode; |
---|
| 83 | + vdata[CS_HRES_COARSE].clock_mode = clock_mode; |
---|
| 84 | + vdata[CS_RAW].clock_mode = clock_mode; |
---|
| 85 | + |
---|
| 86 | + /* CLOCK_REALTIME also required for time() */ |
---|
| 87 | + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME]; |
---|
| 88 | + vdso_ts->sec = tk->xtime_sec; |
---|
| 89 | + vdso_ts->nsec = tk->tkr_mono.xtime_nsec; |
---|
100 | 90 | |
---|
101 | 91 | /* CLOCK_REALTIME_COARSE */ |
---|
102 | 92 | vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE]; |
---|
.. | .. |
---|
110 | 100 | nsec = nsec + tk->wall_to_monotonic.tv_nsec; |
---|
111 | 101 | vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec); |
---|
112 | 102 | |
---|
113 | | - update_vdso_data(vdata, tk); |
---|
| 103 | + /* |
---|
| 104 | + * Read without the seqlock held by clock_getres(). |
---|
| 105 | + * Note: No need to have a second copy. |
---|
| 106 | + */ |
---|
| 107 | + WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution); |
---|
| 108 | + |
---|
| 109 | + /* |
---|
| 110 | + * If the current clocksource is not VDSO capable, then spare the |
---|
| 111 | + * update of the high reolution parts. |
---|
| 112 | + */ |
---|
| 113 | + if (clock_mode != VDSO_CLOCKMODE_NONE) |
---|
| 114 | + update_vdso_data(vdata, tk); |
---|
114 | 115 | |
---|
115 | 116 | __arch_update_vsyscall(vdata, tk); |
---|
116 | 117 | |
---|
.. | .. |
---|
128 | 129 | |
---|
129 | 130 | __arch_sync_vdso_data(vdata); |
---|
130 | 131 | } |
---|
| 132 | + |
---|
| 133 | +/** |
---|
| 134 | + * vdso_update_begin - Start of a VDSO update section |
---|
| 135 | + * |
---|
| 136 | + * Allows architecture code to safely update the architecture specific VDSO |
---|
| 137 | + * data. Disables interrupts, acquires timekeeper lock to serialize against |
---|
| 138 | + * concurrent updates from timekeeping and invalidates the VDSO data |
---|
| 139 | + * sequence counter to prevent concurrent readers from accessing |
---|
| 140 | + * inconsistent data. |
---|
| 141 | + * |
---|
| 142 | + * Returns: Saved interrupt flags which need to be handed in to |
---|
| 143 | + * vdso_update_end(). |
---|
| 144 | + */ |
---|
| 145 | +unsigned long vdso_update_begin(void) |
---|
| 146 | +{ |
---|
| 147 | + struct vdso_data *vdata = __arch_get_k_vdso_data(); |
---|
| 148 | + unsigned long flags; |
---|
| 149 | + |
---|
| 150 | + raw_spin_lock_irqsave(&timekeeper_lock, flags); |
---|
| 151 | + vdso_write_begin(vdata); |
---|
| 152 | + return flags; |
---|
| 153 | +} |
---|
| 154 | + |
---|
| 155 | +/** |
---|
| 156 | + * vdso_update_end - End of a VDSO update section |
---|
| 157 | + * @flags: Interrupt flags as returned from vdso_update_begin() |
---|
| 158 | + * |
---|
| 159 | + * Pairs with vdso_update_begin(). Marks vdso data consistent, invokes data |
---|
| 160 | + * synchronization if the architecture requires it, drops timekeeper lock |
---|
| 161 | + * and restores interrupt flags. |
---|
| 162 | + */ |
---|
| 163 | +void vdso_update_end(unsigned long flags) |
---|
| 164 | +{ |
---|
| 165 | + struct vdso_data *vdata = __arch_get_k_vdso_data(); |
---|
| 166 | + |
---|
| 167 | + vdso_write_end(vdata); |
---|
| 168 | + __arch_sync_vdso_data(vdata); |
---|
| 169 | + raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
---|
| 170 | +} |
---|