hc
2024-01-05 071106ecf68c401173c58808b1cf5f68cc50d390
kernel/kernel/time/vsyscall.c
....@@ -13,6 +13,8 @@
1313 #include <vdso/helpers.h>
1414 #include <vdso/vsyscall.h>
1515
16
+#include "timekeeping_internal.h"
17
+
1618 static inline void update_vdso_data(struct vdso_data *vdata,
1719 struct timekeeper *tk)
1820 {
....@@ -27,11 +29,6 @@
2729 vdata[CS_RAW].mask = tk->tkr_raw.mask;
2830 vdata[CS_RAW].mult = tk->tkr_raw.mult;
2931 vdata[CS_RAW].shift = tk->tkr_raw.shift;
30
-
31
- /* CLOCK_REALTIME */
32
- vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME];
33
- vdso_ts->sec = tk->xtime_sec;
34
- vdso_ts->nsec = tk->tkr_mono.xtime_nsec;
3532
3633 /* CLOCK_MONOTONIC */
3734 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC];
....@@ -70,33 +67,26 @@
7067 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI];
7168 vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset;
7269 vdso_ts->nsec = tk->tkr_mono.xtime_nsec;
73
-
74
- /*
75
- * Read without the seqlock held by clock_getres().
76
- * Note: No need to have a second copy.
77
- */
78
- WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution);
7970 }
8071
8172 void update_vsyscall(struct timekeeper *tk)
8273 {
8374 struct vdso_data *vdata = __arch_get_k_vdso_data();
8475 struct vdso_timestamp *vdso_ts;
76
+ s32 clock_mode;
8577 u64 nsec;
86
-
87
- if (__arch_update_vdso_data()) {
88
- /*
89
- * Some architectures might want to skip the update of the
90
- * data page.
91
- */
92
- return;
93
- }
9478
9579 /* copy vsyscall data */
9680 vdso_write_begin(vdata);
9781
98
- vdata[CS_HRES_COARSE].clock_mode = __arch_get_clock_mode(tk);
99
- vdata[CS_RAW].clock_mode = __arch_get_clock_mode(tk);
82
+ clock_mode = tk->tkr_mono.clock->vdso_clock_mode;
83
+ vdata[CS_HRES_COARSE].clock_mode = clock_mode;
84
+ vdata[CS_RAW].clock_mode = clock_mode;
85
+
86
+ /* CLOCK_REALTIME also required for time() */
87
+ vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME];
88
+ vdso_ts->sec = tk->xtime_sec;
89
+ vdso_ts->nsec = tk->tkr_mono.xtime_nsec;
10090
10191 /* CLOCK_REALTIME_COARSE */
10292 vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE];
....@@ -110,7 +100,18 @@
110100 nsec = nsec + tk->wall_to_monotonic.tv_nsec;
111101 vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec);
112102
113
- update_vdso_data(vdata, tk);
103
+ /*
104
+ * Read without the seqlock held by clock_getres().
105
+ * Note: No need to have a second copy.
106
+ */
107
+ WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution);
108
+
109
+ /*
110
+ * If the current clocksource is not VDSO capable, then spare the
111
+ * update of the high reolution parts.
112
+ */
113
+ if (clock_mode != VDSO_CLOCKMODE_NONE)
114
+ update_vdso_data(vdata, tk);
114115
115116 __arch_update_vsyscall(vdata, tk);
116117
....@@ -128,3 +129,42 @@
128129
129130 __arch_sync_vdso_data(vdata);
130131 }
132
+
133
+/**
134
+ * vdso_update_begin - Start of a VDSO update section
135
+ *
136
+ * Allows architecture code to safely update the architecture specific VDSO
137
+ * data. Disables interrupts, acquires timekeeper lock to serialize against
138
+ * concurrent updates from timekeeping and invalidates the VDSO data
139
+ * sequence counter to prevent concurrent readers from accessing
140
+ * inconsistent data.
141
+ *
142
+ * Returns: Saved interrupt flags which need to be handed in to
143
+ * vdso_update_end().
144
+ */
145
+unsigned long vdso_update_begin(void)
146
+{
147
+ struct vdso_data *vdata = __arch_get_k_vdso_data();
148
+ unsigned long flags;
149
+
150
+ raw_spin_lock_irqsave(&timekeeper_lock, flags);
151
+ vdso_write_begin(vdata);
152
+ return flags;
153
+}
154
+
155
+/**
156
+ * vdso_update_end - End of a VDSO update section
157
+ * @flags: Interrupt flags as returned from vdso_update_begin()
158
+ *
159
+ * Pairs with vdso_update_begin(). Marks vdso data consistent, invokes data
160
+ * synchronization if the architecture requires it, drops timekeeper lock
161
+ * and restores interrupt flags.
162
+ */
163
+void vdso_update_end(unsigned long flags)
164
+{
165
+ struct vdso_data *vdata = __arch_get_k_vdso_data();
166
+
167
+ vdso_write_end(vdata);
168
+ __arch_sync_vdso_data(vdata);
169
+ raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
170
+}