| .. | .. | 
|---|
| 18 | 18 |  #include <asm/unistd.h> | 
|---|
| 19 | 19 |  #include <asm/msr.h> | 
|---|
| 20 | 20 |  #include <asm/pvclock.h> | 
|---|
| 21 |  | -#include <asm/mshyperv.h>  | 
|---|
| 22 |  | -#include <linux/compat_time.h>  | 
|---|
 | 21 | +#include <clocksource/hyperv_timer.h>  | 
|---|
| 23 | 22 |   | 
|---|
| 24 | 23 |  #define __vdso_data (VVAR(_vdso_data)) | 
|---|
 | 24 | +#define __timens_vdso_data (TIMENS(_vdso_data))  | 
|---|
| 25 | 25 |   | 
|---|
| 26 | 26 |  #define VDSO_HAS_TIME 1 | 
|---|
| 27 | 27 |   | 
|---|
| .. | .. | 
|---|
| 52 | 52 |  	__attribute__((visibility("hidden"))); | 
|---|
| 53 | 53 |  #endif | 
|---|
| 54 | 54 |   | 
|---|
| 55 |  | -#ifdef CONFIG_HYPERV_TSCPAGE  | 
|---|
 | 55 | +#ifdef CONFIG_HYPERV_TIMER  | 
|---|
| 56 | 56 |  extern struct ms_hyperv_tsc_page hvclock_page | 
|---|
| 57 | 57 |  	__attribute__((visibility("hidden"))); | 
|---|
 | 58 | +#endif  | 
|---|
 | 59 | +  | 
|---|
 | 60 | +#ifdef CONFIG_TIME_NS  | 
|---|
 | 61 | +static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)  | 
|---|
 | 62 | +{  | 
|---|
 | 63 | +	return __timens_vdso_data;  | 
|---|
 | 64 | +}  | 
|---|
| 58 | 65 |  #endif | 
|---|
| 59 | 66 |   | 
|---|
| 60 | 67 |  #ifndef BUILD_VDSO32 | 
|---|
| .. | .. | 
|---|
| 97 | 104 |   | 
|---|
| 98 | 105 |  #else | 
|---|
| 99 | 106 |   | 
|---|
| 100 |  | -#define VDSO_HAS_32BIT_FALLBACK	1  | 
|---|
| 101 |  | -  | 
|---|
| 102 | 107 |  static __always_inline | 
|---|
| 103 | 108 |  long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) | 
|---|
| 104 | 109 |  { | 
|---|
| .. | .. | 
|---|
| 110 | 115 |  		"call __kernel_vsyscall \n" | 
|---|
| 111 | 116 |  		"mov %%edx, %%ebx \n" | 
|---|
| 112 | 117 |  		: "=a" (ret), "=m" (*_ts) | 
|---|
| 113 |  | -		: "0" (__NR_clock_gettime), [clock] "g" (_clkid), "c" (_ts)  | 
|---|
 | 118 | +		: "0" (__NR_clock_gettime64), [clock] "g" (_clkid), "c" (_ts)  | 
|---|
| 114 | 119 |  		: "edx"); | 
|---|
| 115 | 120 |   | 
|---|
| 116 | 121 |  	return ret; | 
|---|
| .. | .. | 
|---|
| 162 | 167 |  		"call __kernel_vsyscall \n" | 
|---|
| 163 | 168 |  		"mov %%edx, %%ebx \n" | 
|---|
| 164 | 169 |  		: "=a" (ret), "=m" (*_ts) | 
|---|
| 165 |  | -		: "0" (__NR_clock_getres), [clock] "g" (_clkid), "c" (_ts)  | 
|---|
 | 170 | +		: "0" (__NR_clock_getres_time64), [clock] "g" (_clkid), "c" (_ts)  | 
|---|
| 166 | 171 |  		: "edx"); | 
|---|
| 167 | 172 |   | 
|---|
| 168 | 173 |  	return ret; | 
|---|
| .. | .. | 
|---|
| 229 | 234 |  } | 
|---|
| 230 | 235 |  #endif | 
|---|
| 231 | 236 |   | 
|---|
| 232 |  | -#ifdef CONFIG_HYPERV_TSCPAGE  | 
|---|
 | 237 | +#ifdef CONFIG_HYPERV_TIMER  | 
|---|
| 233 | 238 |  static u64 vread_hvclock(void) | 
|---|
| 234 | 239 |  { | 
|---|
| 235 | 240 |  	return hv_read_tsc_page(&hvclock_page); | 
|---|
| 236 | 241 |  } | 
|---|
| 237 | 242 |  #endif | 
|---|
| 238 | 243 |   | 
|---|
| 239 |  | -static inline u64 __arch_get_hw_counter(s32 clock_mode)  | 
|---|
 | 244 | +static inline u64 __arch_get_hw_counter(s32 clock_mode,  | 
|---|
 | 245 | +					const struct vdso_data *vd)  | 
|---|
| 240 | 246 |  { | 
|---|
| 241 |  | -	if (clock_mode == VCLOCK_TSC)  | 
|---|
 | 247 | +	if (likely(clock_mode == VDSO_CLOCKMODE_TSC))  | 
|---|
| 242 | 248 |  		return (u64)rdtsc_ordered(); | 
|---|
| 243 | 249 |  	/* | 
|---|
| 244 | 250 |  	 * For any memory-mapped vclock type, we need to make sure that gcc | 
|---|
| .. | .. | 
|---|
| 247 | 253 |  	 * question isn't enabled, which will segfault.  Hence the barriers. | 
|---|
| 248 | 254 |  	 */ | 
|---|
| 249 | 255 |  #ifdef CONFIG_PARAVIRT_CLOCK | 
|---|
| 250 |  | -	if (clock_mode == VCLOCK_PVCLOCK) {  | 
|---|
 | 256 | +	if (clock_mode == VDSO_CLOCKMODE_PVCLOCK) {  | 
|---|
| 251 | 257 |  		barrier(); | 
|---|
| 252 | 258 |  		return vread_pvclock(); | 
|---|
| 253 | 259 |  	} | 
|---|
| 254 | 260 |  #endif | 
|---|
| 255 |  | -#ifdef CONFIG_HYPERV_TSCPAGE  | 
|---|
| 256 |  | -	if (clock_mode == VCLOCK_HVCLOCK) {  | 
|---|
 | 261 | +#ifdef CONFIG_HYPERV_TIMER  | 
|---|
 | 262 | +	if (clock_mode == VDSO_CLOCKMODE_HVCLOCK) {  | 
|---|
| 257 | 263 |  		barrier(); | 
|---|
| 258 | 264 |  		return vread_hvclock(); | 
|---|
| 259 | 265 |  	} | 
|---|
| .. | .. | 
|---|
| 266 | 272 |  	return __vdso_data; | 
|---|
| 267 | 273 |  } | 
|---|
| 268 | 274 |   | 
|---|
 | 275 | +static inline bool arch_vdso_clocksource_ok(const struct vdso_data *vd)  | 
|---|
 | 276 | +{  | 
|---|
 | 277 | +	return true;  | 
|---|
 | 278 | +}  | 
|---|
 | 279 | +#define vdso_clocksource_ok arch_vdso_clocksource_ok  | 
|---|
 | 280 | +  | 
|---|
 | 281 | +/*  | 
|---|
 | 282 | + * Clocksource read value validation to handle PV and HyperV clocksources  | 
|---|
 | 283 | + * which can be invalidated asynchronously and indicate invalidation by  | 
|---|
 | 284 | + * returning U64_MAX, which can be effectively tested by checking for a  | 
|---|
 | 285 | + * negative value after casting it to s64.  | 
|---|
 | 286 | + */  | 
|---|
 | 287 | +static inline bool arch_vdso_cycles_ok(u64 cycles)  | 
|---|
 | 288 | +{  | 
|---|
 | 289 | +	return (s64)cycles >= 0;  | 
|---|
 | 290 | +}  | 
|---|
 | 291 | +#define vdso_cycles_ok arch_vdso_cycles_ok  | 
|---|
 | 292 | +  | 
|---|
| 269 | 293 |  /* | 
|---|
| 270 | 294 |   * x86 specific delta calculation. | 
|---|
| 271 | 295 |   * | 
|---|