| .. | .. | 
|---|
 | 1 | +// SPDX-License-Identifier: GPL-2.0  | 
|---|
| 1 | 2 |  /* | 
|---|
| 2 |  | - * sched_clock.c: Generic sched_clock() support, to extend low level  | 
|---|
| 3 |  | - *                hardware time counters to full 64-bit ns values.  | 
|---|
| 4 |  | - *  | 
|---|
| 5 |  | - * This program is free software; you can redistribute it and/or modify  | 
|---|
| 6 |  | - * it under the terms of the GNU General Public License version 2 as  | 
|---|
| 7 |  | - * published by the Free Software Foundation.  | 
|---|
 | 3 | + * Generic sched_clock() support, to extend low level hardware time  | 
|---|
 | 4 | + * counters to full 64-bit ns values.  | 
|---|
| 8 | 5 |   */ | 
|---|
| 9 | 6 |  #include <linux/clocksource.h> | 
|---|
| 10 | 7 |  #include <linux/init.h> | 
|---|
| .. | .. | 
|---|
| 19 | 16 |  #include <linux/sched_clock.h> | 
|---|
| 20 | 17 |  #include <linux/seqlock.h> | 
|---|
| 21 | 18 |  #include <linux/bitops.h> | 
|---|
 | 19 | +#include <trace/hooks/epoch.h>  | 
|---|
| 22 | 20 |   | 
|---|
| 23 |  | -/**  | 
|---|
| 24 |  | - * struct clock_read_data - data required to read from sched_clock()  | 
|---|
| 25 |  | - *  | 
|---|
| 26 |  | - * @epoch_ns:		sched_clock() value at last update  | 
|---|
| 27 |  | - * @epoch_cyc:		Clock cycle value at last update.  | 
|---|
| 28 |  | - * @sched_clock_mask:   Bitmask for two's complement subtraction of non 64bit  | 
|---|
| 29 |  | - *			clocks.  | 
|---|
| 30 |  | - * @read_sched_clock:	Current clock source (or dummy source when suspended).  | 
|---|
| 31 |  | - * @mult:		Multipler for scaled math conversion.  | 
|---|
| 32 |  | - * @shift:		Shift value for scaled math conversion.  | 
|---|
| 33 |  | - *  | 
|---|
| 34 |  | - * Care must be taken when updating this structure; it is read by  | 
|---|
| 35 |  | - * some very hot code paths. It occupies <=40 bytes and, when combined  | 
|---|
| 36 |  | - * with the seqcount used to synchronize access, comfortably fits into  | 
|---|
| 37 |  | - * a 64 byte cache line.  | 
|---|
| 38 |  | - */  | 
|---|
| 39 |  | -struct clock_read_data {  | 
|---|
| 40 |  | -	u64 epoch_ns;  | 
|---|
| 41 |  | -	u64 epoch_cyc;  | 
|---|
| 42 |  | -	u64 sched_clock_mask;  | 
|---|
| 43 |  | -	u64 (*read_sched_clock)(void);  | 
|---|
| 44 |  | -	u32 mult;  | 
|---|
| 45 |  | -	u32 shift;  | 
|---|
| 46 |  | -};  | 
|---|
 | 21 | +#include "timekeeping.h"  | 
|---|
| 47 | 22 |   | 
|---|
| 48 | 23 |  /** | 
|---|
| 49 | 24 |   * struct clock_data - all data needed for sched_clock() (including | 
|---|
| .. | .. | 
|---|
| 61 | 36 |   * into a single 64-byte cache line. | 
|---|
| 62 | 37 |   */ | 
|---|
| 63 | 38 |  struct clock_data { | 
|---|
| 64 |  | -	seqcount_t		seq;  | 
|---|
 | 39 | +	seqcount_latch_t	seq;  | 
|---|
| 65 | 40 |  	struct clock_read_data	read_data[2]; | 
|---|
| 66 | 41 |  	ktime_t			wrap_kt; | 
|---|
| 67 | 42 |  	unsigned long		rate; | 
|---|
| .. | .. | 
|---|
| 94 | 69 |  	return (cyc * mult) >> shift; | 
|---|
| 95 | 70 |  } | 
|---|
| 96 | 71 |   | 
|---|
 | 72 | +notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)  | 
|---|
 | 73 | +{  | 
|---|
 | 74 | +	*seq = raw_read_seqcount_latch(&cd.seq);  | 
|---|
 | 75 | +	return cd.read_data + (*seq & 1);  | 
|---|
 | 76 | +}  | 
|---|
 | 77 | +  | 
|---|
 | 78 | +notrace int sched_clock_read_retry(unsigned int seq)  | 
|---|
 | 79 | +{  | 
|---|
 | 80 | +	return read_seqcount_latch_retry(&cd.seq, seq);  | 
|---|
 | 81 | +}  | 
|---|
 | 82 | +  | 
|---|
| 97 | 83 |  unsigned long long notrace sched_clock(void) | 
|---|
| 98 | 84 |  { | 
|---|
| 99 | 85 |  	u64 cyc, res; | 
|---|
| 100 |  | -	unsigned long seq;  | 
|---|
 | 86 | +	unsigned int seq;  | 
|---|
| 101 | 87 |  	struct clock_read_data *rd; | 
|---|
| 102 | 88 |   | 
|---|
| 103 | 89 |  	do { | 
|---|
| 104 |  | -		seq = raw_read_seqcount(&cd.seq);  | 
|---|
| 105 |  | -		rd = cd.read_data + (seq & 1);  | 
|---|
 | 90 | +		rd = sched_clock_read_begin(&seq);  | 
|---|
| 106 | 91 |   | 
|---|
| 107 | 92 |  		cyc = (rd->read_sched_clock() - rd->epoch_cyc) & | 
|---|
| 108 | 93 |  		      rd->sched_clock_mask; | 
|---|
| 109 | 94 |  		res = rd->epoch_ns + cyc_to_ns(cyc, rd->mult, rd->shift); | 
|---|
| 110 |  | -	} while (read_seqcount_retry(&cd.seq, seq));  | 
|---|
 | 95 | +	} while (sched_clock_read_retry(seq));  | 
|---|
| 111 | 96 |   | 
|---|
| 112 | 97 |  	return res; | 
|---|
| 113 | 98 |  } | 
|---|
| .. | .. | 
|---|
| 165 | 150 |  	return HRTIMER_RESTART; | 
|---|
| 166 | 151 |  } | 
|---|
| 167 | 152 |   | 
|---|
| 168 |  | -void __init  | 
|---|
| 169 |  | -sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)  | 
|---|
 | 153 | +void sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)  | 
|---|
| 170 | 154 |  { | 
|---|
| 171 | 155 |  	u64 res, wrap, new_mask, new_epoch, cyc, ns; | 
|---|
| 172 | 156 |  	u32 new_mult, new_shift; | 
|---|
| 173 |  | -	unsigned long r;  | 
|---|
 | 157 | +	unsigned long r, flags;  | 
|---|
| 174 | 158 |  	char r_unit; | 
|---|
| 175 | 159 |  	struct clock_read_data rd; | 
|---|
| 176 | 160 |   | 
|---|
| 177 | 161 |  	if (cd.rate > rate) | 
|---|
| 178 | 162 |  		return; | 
|---|
| 179 | 163 |   | 
|---|
| 180 |  | -	WARN_ON(!irqs_disabled());  | 
|---|
 | 164 | +	/* Cannot register a sched_clock with interrupts on */  | 
|---|
 | 165 | +	local_irq_save(flags);  | 
|---|
| 181 | 166 |   | 
|---|
| 182 | 167 |  	/* Calculate the mult/shift to convert counter ticks to ns. */ | 
|---|
| 183 | 168 |  	clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600); | 
|---|
| .. | .. | 
|---|
| 208 | 193 |   | 
|---|
| 209 | 194 |  	if (sched_clock_timer.function != NULL) { | 
|---|
| 210 | 195 |  		/* update timeout for clock wrap */ | 
|---|
| 211 |  | -		hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);  | 
|---|
 | 196 | +		hrtimer_start(&sched_clock_timer, cd.wrap_kt,  | 
|---|
 | 197 | +			      HRTIMER_MODE_REL_HARD);  | 
|---|
| 212 | 198 |  	} | 
|---|
| 213 | 199 |   | 
|---|
| 214 | 200 |  	r = rate; | 
|---|
| .. | .. | 
|---|
| 234 | 220 |  	if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) | 
|---|
| 235 | 221 |  		enable_sched_clock_irqtime(); | 
|---|
| 236 | 222 |   | 
|---|
| 237 |  | -	pr_debug("Registered %pF as sched_clock source\n", read);  | 
|---|
 | 223 | +	local_irq_restore(flags);  | 
|---|
 | 224 | +  | 
|---|
 | 225 | +	pr_debug("Registered %pS as sched_clock source\n", read);  | 
|---|
| 238 | 226 |  } | 
|---|
 | 227 | +EXPORT_SYMBOL_GPL(sched_clock_register);  | 
|---|
| 239 | 228 |   | 
|---|
| 240 | 229 |  void __init generic_sched_clock_init(void) | 
|---|
| 241 | 230 |  { | 
|---|
| 242 | 231 |  	/* | 
|---|
| 243 | 232 |  	 * If no sched_clock() function has been provided at that point, | 
|---|
| 244 |  | -	 * make it the final one one.  | 
|---|
 | 233 | +	 * make it the final one.  | 
|---|
| 245 | 234 |  	 */ | 
|---|
| 246 | 235 |  	if (cd.actual_read_sched_clock == jiffy_sched_clock_read) | 
|---|
| 247 | 236 |  		sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ); | 
|---|
| .. | .. | 
|---|
| 252 | 241 |  	 * Start the timer to keep sched_clock() properly updated and | 
|---|
| 253 | 242 |  	 * sets the initial epoch. | 
|---|
| 254 | 243 |  	 */ | 
|---|
| 255 |  | -	hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);  | 
|---|
 | 244 | +	hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);  | 
|---|
| 256 | 245 |  	sched_clock_timer.function = sched_clock_poll; | 
|---|
| 257 |  | -	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);  | 
|---|
 | 246 | +	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);  | 
|---|
| 258 | 247 |  } | 
|---|
| 259 | 248 |   | 
|---|
| 260 | 249 |  /* | 
|---|
| .. | .. | 
|---|
| 270 | 259 |   */ | 
|---|
| 271 | 260 |  static u64 notrace suspended_sched_clock_read(void) | 
|---|
| 272 | 261 |  { | 
|---|
| 273 |  | -	unsigned long seq = raw_read_seqcount(&cd.seq);  | 
|---|
 | 262 | +	unsigned int seq = raw_read_seqcount_latch(&cd.seq);  | 
|---|
| 274 | 263 |   | 
|---|
| 275 | 264 |  	return cd.read_data[seq & 1].epoch_cyc; | 
|---|
| 276 | 265 |  } | 
|---|
| .. | .. | 
|---|
| 282 | 271 |  	update_sched_clock(); | 
|---|
| 283 | 272 |  	hrtimer_cancel(&sched_clock_timer); | 
|---|
| 284 | 273 |  	rd->read_sched_clock = suspended_sched_clock_read; | 
|---|
 | 274 | +	trace_android_vh_show_suspend_epoch_val(rd->epoch_ns, rd->epoch_cyc);  | 
|---|
| 285 | 275 |   | 
|---|
| 286 | 276 |  	return 0; | 
|---|
| 287 | 277 |  } | 
|---|
| .. | .. | 
|---|
| 291 | 281 |  	struct clock_read_data *rd = &cd.read_data[0]; | 
|---|
| 292 | 282 |   | 
|---|
| 293 | 283 |  	rd->epoch_cyc = cd.actual_read_sched_clock(); | 
|---|
| 294 |  | -	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);  | 
|---|
 | 284 | +	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);  | 
|---|
| 295 | 285 |  	rd->read_sched_clock = cd.actual_read_sched_clock; | 
|---|
 | 286 | +	trace_android_vh_show_resume_epoch_val(rd->epoch_cyc);  | 
|---|
| 296 | 287 |  } | 
|---|
| 297 | 288 |   | 
|---|
| 298 | 289 |  static struct syscore_ops sched_clock_ops = { | 
|---|