.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * menu.c - the menu idle governor |
---|
3 | 4 | * |
---|
.. | .. |
---|
5 | 6 | * Copyright (C) 2009 Intel Corporation |
---|
6 | 7 | * Author: |
---|
7 | 8 | * Arjan van de Ven <arjan@linux.intel.com> |
---|
8 | | - * |
---|
9 | | - * This code is licenced under the GPL version 2 as described |
---|
10 | | - * in the COPYING file that acompanies the Linux Kernel. |
---|
11 | 9 | */ |
---|
12 | 10 | |
---|
13 | 11 | #include <linux/kernel.h> |
---|
.. | .. |
---|
21 | 19 | #include <linux/sched/stat.h> |
---|
22 | 20 | #include <linux/math64.h> |
---|
23 | 21 | |
---|
24 | | -/* |
---|
25 | | - * Please note when changing the tuning values: |
---|
26 | | - * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of |
---|
27 | | - * a scaling operation multiplication may overflow on 32 bit platforms. |
---|
28 | | - * In that case, #define RESOLUTION as ULL to get 64 bit result: |
---|
29 | | - * #define RESOLUTION 1024ULL |
---|
30 | | - * |
---|
31 | | - * The default values do not overflow. |
---|
32 | | - */ |
---|
33 | 22 | #define BUCKETS 12 |
---|
34 | 23 | #define INTERVAL_SHIFT 3 |
---|
35 | 24 | #define INTERVALS (1UL << INTERVAL_SHIFT) |
---|
36 | 25 | #define RESOLUTION 1024 |
---|
37 | 26 | #define DECAY 8 |
---|
38 | | -#define MAX_INTERESTING 50000 |
---|
39 | | - |
---|
| 27 | +#define MAX_INTERESTING (50000 * NSEC_PER_USEC) |
---|
40 | 28 | |
---|
41 | 29 | /* |
---|
42 | 30 | * Concepts and ideas behind the menu governor |
---|
.. | .. |
---|
119 | 107 | */ |
---|
120 | 108 | |
---|
121 | 109 | struct menu_device { |
---|
122 | | - int last_state_idx; |
---|
123 | 110 | int needs_update; |
---|
124 | 111 | int tick_wakeup; |
---|
125 | 112 | |
---|
126 | | - unsigned int next_timer_us; |
---|
127 | | - unsigned int predicted_us; |
---|
| 113 | + u64 next_timer_ns; |
---|
128 | 114 | unsigned int bucket; |
---|
129 | 115 | unsigned int correction_factor[BUCKETS]; |
---|
130 | 116 | unsigned int intervals[INTERVALS]; |
---|
131 | 117 | int interval_ptr; |
---|
132 | 118 | }; |
---|
133 | 119 | |
---|
134 | | -static inline int get_loadavg(unsigned long load) |
---|
135 | | -{ |
---|
136 | | - return LOAD_INT(load) * 10 + LOAD_FRAC(load) / 10; |
---|
137 | | -} |
---|
138 | | - |
---|
139 | | -static inline int which_bucket(unsigned int duration, unsigned long nr_iowaiters) |
---|
| 120 | +static inline int which_bucket(u64 duration_ns, unsigned long nr_iowaiters) |
---|
140 | 121 | { |
---|
141 | 122 | int bucket = 0; |
---|
142 | 123 | |
---|
.. | .. |
---|
149 | 130 | if (nr_iowaiters) |
---|
150 | 131 | bucket = BUCKETS/2; |
---|
151 | 132 | |
---|
152 | | - if (duration < 10) |
---|
| 133 | + if (duration_ns < 10ULL * NSEC_PER_USEC) |
---|
153 | 134 | return bucket; |
---|
154 | | - if (duration < 100) |
---|
| 135 | + if (duration_ns < 100ULL * NSEC_PER_USEC) |
---|
155 | 136 | return bucket + 1; |
---|
156 | | - if (duration < 1000) |
---|
| 137 | + if (duration_ns < 1000ULL * NSEC_PER_USEC) |
---|
157 | 138 | return bucket + 2; |
---|
158 | | - if (duration < 10000) |
---|
| 139 | + if (duration_ns < 10000ULL * NSEC_PER_USEC) |
---|
159 | 140 | return bucket + 3; |
---|
160 | | - if (duration < 100000) |
---|
| 141 | + if (duration_ns < 100000ULL * NSEC_PER_USEC) |
---|
161 | 142 | return bucket + 4; |
---|
162 | 143 | return bucket + 5; |
---|
163 | 144 | } |
---|
.. | .. |
---|
169 | 150 | * to be, the higher this multiplier, and thus the higher |
---|
170 | 151 | * the barrier to go to an expensive C state. |
---|
171 | 152 | */ |
---|
172 | | -static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned long load) |
---|
| 153 | +static inline int performance_multiplier(unsigned long nr_iowaiters) |
---|
173 | 154 | { |
---|
174 | | - int mult = 1; |
---|
175 | | - |
---|
176 | | - /* for higher loadavg, we are more reluctant */ |
---|
177 | | - |
---|
178 | | - mult += 2 * get_loadavg(load); |
---|
179 | | - |
---|
180 | | - /* for IO wait tasks (per cpu!) we add 5x each */ |
---|
181 | | - mult += 10 * nr_iowaiters; |
---|
182 | | - |
---|
183 | | - return mult; |
---|
| 155 | + /* for IO wait tasks (per cpu!) we add 10x each */ |
---|
| 156 | + return 1 + 10 * nr_iowaiters; |
---|
184 | 157 | } |
---|
185 | 158 | |
---|
186 | 159 | static DEFINE_PER_CPU(struct menu_device, menu_devices); |
---|
.. | .. |
---|
193 | 166 | * of points is below a threshold. If it is... then use the |
---|
194 | 167 | * average of these 8 points as the estimated value. |
---|
195 | 168 | */ |
---|
196 | | -static unsigned int get_typical_interval(struct menu_device *data) |
---|
| 169 | +static unsigned int get_typical_interval(struct menu_device *data, |
---|
| 170 | + unsigned int predicted_us) |
---|
197 | 171 | { |
---|
198 | 172 | int i, divisor; |
---|
199 | | - unsigned int max, thresh, avg; |
---|
| 173 | + unsigned int min, max, thresh, avg; |
---|
200 | 174 | uint64_t sum, variance; |
---|
201 | 175 | |
---|
202 | | - thresh = UINT_MAX; /* Discard outliers above this value */ |
---|
| 176 | + thresh = INT_MAX; /* Discard outliers above this value */ |
---|
203 | 177 | |
---|
204 | 178 | again: |
---|
205 | 179 | |
---|
206 | 180 | /* First calculate the average of past intervals */ |
---|
| 181 | + min = UINT_MAX; |
---|
207 | 182 | max = 0; |
---|
208 | 183 | sum = 0; |
---|
209 | 184 | divisor = 0; |
---|
.. | .. |
---|
214 | 189 | divisor++; |
---|
215 | 190 | if (value > max) |
---|
216 | 191 | max = value; |
---|
| 192 | + |
---|
| 193 | + if (value < min) |
---|
| 194 | + min = value; |
---|
217 | 195 | } |
---|
218 | 196 | } |
---|
| 197 | + |
---|
| 198 | + /* |
---|
| 199 | + * If the result of the computation is going to be discarded anyway, |
---|
| 200 | + * avoid the computation altogether. |
---|
| 201 | + */ |
---|
| 202 | + if (min >= predicted_us) |
---|
| 203 | + return UINT_MAX; |
---|
| 204 | + |
---|
219 | 205 | if (divisor == INTERVALS) |
---|
220 | 206 | avg = sum >> INTERVAL_SHIFT; |
---|
221 | 207 | else |
---|
.. | .. |
---|
280 | 266 | bool *stop_tick) |
---|
281 | 267 | { |
---|
282 | 268 | struct menu_device *data = this_cpu_ptr(&menu_devices); |
---|
283 | | - int latency_req = cpuidle_governor_latency_req(dev->cpu); |
---|
284 | | - int i; |
---|
285 | | - int first_idx; |
---|
286 | | - int idx; |
---|
287 | | - unsigned int interactivity_req; |
---|
288 | | - unsigned int expected_interval; |
---|
289 | | - unsigned long nr_iowaiters, cpu_load; |
---|
| 269 | + s64 latency_req = cpuidle_governor_latency_req(dev->cpu); |
---|
| 270 | + unsigned int predicted_us; |
---|
| 271 | + u64 predicted_ns; |
---|
| 272 | + u64 interactivity_req; |
---|
| 273 | + unsigned long nr_iowaiters; |
---|
290 | 274 | ktime_t delta_next; |
---|
| 275 | + int i, idx; |
---|
291 | 276 | |
---|
292 | 277 | if (data->needs_update) { |
---|
293 | 278 | menu_update(drv, dev); |
---|
294 | 279 | data->needs_update = 0; |
---|
295 | 280 | } |
---|
296 | 281 | |
---|
297 | | - /* Special case when user has set very strict latency requirement */ |
---|
298 | | - if (unlikely(latency_req == 0)) { |
---|
299 | | - *stop_tick = false; |
---|
| 282 | + /* determine the expected residency time, round up */ |
---|
| 283 | + data->next_timer_ns = tick_nohz_get_sleep_length(&delta_next); |
---|
| 284 | + |
---|
| 285 | + nr_iowaiters = nr_iowait_cpu(dev->cpu); |
---|
| 286 | + data->bucket = which_bucket(data->next_timer_ns, nr_iowaiters); |
---|
| 287 | + |
---|
| 288 | + if (unlikely(drv->state_count <= 1 || latency_req == 0) || |
---|
| 289 | + ((data->next_timer_ns < drv->states[1].target_residency_ns || |
---|
| 290 | + latency_req < drv->states[1].exit_latency_ns) && |
---|
| 291 | + !dev->states_usage[0].disable)) { |
---|
| 292 | + /* |
---|
| 293 | + * In this case state[0] will be used no matter what, so return |
---|
| 294 | + * it right away and keep the tick running if state[0] is a |
---|
| 295 | + * polling one. |
---|
| 296 | + */ |
---|
| 297 | + *stop_tick = !(drv->states[0].flags & CPUIDLE_FLAG_POLLING); |
---|
300 | 298 | return 0; |
---|
301 | 299 | } |
---|
302 | 300 | |
---|
303 | | - /* determine the expected residency time, round up */ |
---|
304 | | - data->next_timer_us = ktime_to_us(tick_nohz_get_sleep_length(&delta_next)); |
---|
305 | | - |
---|
306 | | - get_iowait_load(&nr_iowaiters, &cpu_load); |
---|
307 | | - data->bucket = which_bucket(data->next_timer_us, nr_iowaiters); |
---|
308 | | - |
---|
309 | | - /* |
---|
310 | | - * Force the result of multiplication to be 64 bits even if both |
---|
311 | | - * operands are 32 bits. |
---|
312 | | - * Make sure to round up for half microseconds. |
---|
313 | | - */ |
---|
314 | | - data->predicted_us = DIV_ROUND_CLOSEST_ULL((uint64_t)data->next_timer_us * |
---|
315 | | - data->correction_factor[data->bucket], |
---|
316 | | - RESOLUTION * DECAY); |
---|
317 | | - |
---|
318 | | - expected_interval = get_typical_interval(data); |
---|
319 | | - expected_interval = min(expected_interval, data->next_timer_us); |
---|
320 | | - |
---|
321 | | - first_idx = 0; |
---|
322 | | - if (drv->states[0].flags & CPUIDLE_FLAG_POLLING) { |
---|
323 | | - struct cpuidle_state *s = &drv->states[1]; |
---|
324 | | - unsigned int polling_threshold; |
---|
325 | | - |
---|
326 | | - /* |
---|
327 | | - * Default to a physical idle state, not to busy polling, unless |
---|
328 | | - * a timer is going to trigger really really soon. |
---|
329 | | - */ |
---|
330 | | - polling_threshold = max_t(unsigned int, 20, s->target_residency); |
---|
331 | | - if (data->next_timer_us > polling_threshold && |
---|
332 | | - latency_req > s->exit_latency && !s->disabled && |
---|
333 | | - !dev->states_usage[1].disable) |
---|
334 | | - first_idx = 1; |
---|
335 | | - } |
---|
336 | | - |
---|
337 | | - /* |
---|
338 | | - * Use the lowest expected idle interval to pick the idle state. |
---|
339 | | - */ |
---|
340 | | - data->predicted_us = min(data->predicted_us, expected_interval); |
---|
| 301 | + /* Round up the result for half microseconds. */ |
---|
| 302 | + predicted_us = div_u64(data->next_timer_ns * |
---|
| 303 | + data->correction_factor[data->bucket] + |
---|
| 304 | + (RESOLUTION * DECAY * NSEC_PER_USEC) / 2, |
---|
| 305 | + RESOLUTION * DECAY * NSEC_PER_USEC); |
---|
| 306 | + /* Use the lowest expected idle interval to pick the idle state. */ |
---|
| 307 | + predicted_ns = (u64)min(predicted_us, |
---|
| 308 | + get_typical_interval(data, predicted_us)) * |
---|
| 309 | + NSEC_PER_USEC; |
---|
341 | 310 | |
---|
342 | 311 | if (tick_nohz_tick_stopped()) { |
---|
343 | 312 | /* |
---|
.. | .. |
---|
348 | 317 | * the known time till the closest timer event for the idle |
---|
349 | 318 | * state selection. |
---|
350 | 319 | */ |
---|
351 | | - if (data->predicted_us < TICK_USEC) |
---|
352 | | - data->predicted_us = ktime_to_us(delta_next); |
---|
| 320 | + if (predicted_ns < TICK_NSEC) |
---|
| 321 | + predicted_ns = delta_next; |
---|
353 | 322 | } else { |
---|
354 | 323 | /* |
---|
355 | 324 | * Use the performance multiplier and the user-configurable |
---|
356 | 325 | * latency_req to determine the maximum exit latency. |
---|
357 | 326 | */ |
---|
358 | | - interactivity_req = data->predicted_us / performance_multiplier(nr_iowaiters, cpu_load); |
---|
| 327 | + interactivity_req = div64_u64(predicted_ns, |
---|
| 328 | + performance_multiplier(nr_iowaiters)); |
---|
359 | 329 | if (latency_req > interactivity_req) |
---|
360 | 330 | latency_req = interactivity_req; |
---|
361 | 331 | } |
---|
362 | 332 | |
---|
363 | | - expected_interval = data->predicted_us; |
---|
364 | 333 | /* |
---|
365 | 334 | * Find the idle state with the lowest power while satisfying |
---|
366 | 335 | * our constraints. |
---|
367 | 336 | */ |
---|
368 | 337 | idx = -1; |
---|
369 | | - for (i = first_idx; i < drv->state_count; i++) { |
---|
| 338 | + for (i = 0; i < drv->state_count; i++) { |
---|
370 | 339 | struct cpuidle_state *s = &drv->states[i]; |
---|
371 | | - struct cpuidle_state_usage *su = &dev->states_usage[i]; |
---|
372 | 340 | |
---|
373 | | - if (s->disabled || su->disable) |
---|
| 341 | + if (dev->states_usage[i].disable) |
---|
374 | 342 | continue; |
---|
| 343 | + |
---|
375 | 344 | if (idx == -1) |
---|
376 | 345 | idx = i; /* first enabled state */ |
---|
377 | | - if (s->target_residency > data->predicted_us) { |
---|
378 | | - if (data->predicted_us < TICK_USEC) |
---|
| 346 | + |
---|
| 347 | + if (s->target_residency_ns > predicted_ns) { |
---|
| 348 | + /* |
---|
| 349 | + * Use a physical idle state, not busy polling, unless |
---|
| 350 | + * a timer is going to trigger soon enough. |
---|
| 351 | + */ |
---|
| 352 | + if ((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) && |
---|
| 353 | + s->exit_latency_ns <= latency_req && |
---|
| 354 | + s->target_residency_ns <= data->next_timer_ns) { |
---|
| 355 | + predicted_ns = s->target_residency_ns; |
---|
| 356 | + idx = i; |
---|
| 357 | + break; |
---|
| 358 | + } |
---|
| 359 | + if (predicted_ns < TICK_NSEC) |
---|
379 | 360 | break; |
---|
380 | 361 | |
---|
381 | 362 | if (!tick_nohz_tick_stopped()) { |
---|
.. | .. |
---|
385 | 366 | * tick in that case and let the governor run |
---|
386 | 367 | * again in the next iteration of the loop. |
---|
387 | 368 | */ |
---|
388 | | - expected_interval = drv->states[idx].target_residency; |
---|
| 369 | + predicted_ns = drv->states[idx].target_residency_ns; |
---|
389 | 370 | break; |
---|
390 | 371 | } |
---|
391 | 372 | |
---|
.. | .. |
---|
395 | 376 | * closest timer event, select this one to avoid getting |
---|
396 | 377 | * stuck in the shallow one for too long. |
---|
397 | 378 | */ |
---|
398 | | - if (drv->states[idx].target_residency < TICK_USEC && |
---|
399 | | - s->target_residency <= ktime_to_us(delta_next)) |
---|
| 379 | + if (drv->states[idx].target_residency_ns < TICK_NSEC && |
---|
| 380 | + s->target_residency_ns <= delta_next) |
---|
400 | 381 | idx = i; |
---|
401 | 382 | |
---|
402 | | - goto out; |
---|
| 383 | + return idx; |
---|
403 | 384 | } |
---|
404 | | - if (s->exit_latency > latency_req) { |
---|
405 | | - /* |
---|
406 | | - * If we break out of the loop for latency reasons, use |
---|
407 | | - * the target residency of the selected state as the |
---|
408 | | - * expected idle duration so that the tick is retained |
---|
409 | | - * as long as that target residency is low enough. |
---|
410 | | - */ |
---|
411 | | - expected_interval = drv->states[idx].target_residency; |
---|
| 385 | + if (s->exit_latency_ns > latency_req) |
---|
412 | 386 | break; |
---|
413 | | - } |
---|
| 387 | + |
---|
414 | 388 | idx = i; |
---|
415 | 389 | } |
---|
416 | 390 | |
---|
.. | .. |
---|
422 | 396 | * expected idle duration is shorter than the tick period length. |
---|
423 | 397 | */ |
---|
424 | 398 | if (((drv->states[idx].flags & CPUIDLE_FLAG_POLLING) || |
---|
425 | | - expected_interval < TICK_USEC) && !tick_nohz_tick_stopped()) { |
---|
426 | | - unsigned int delta_next_us = ktime_to_us(delta_next); |
---|
427 | | - |
---|
| 399 | + predicted_ns < TICK_NSEC) && !tick_nohz_tick_stopped()) { |
---|
428 | 400 | *stop_tick = false; |
---|
429 | 401 | |
---|
430 | | - if (idx > 0 && drv->states[idx].target_residency > delta_next_us) { |
---|
| 402 | + if (idx > 0 && drv->states[idx].target_residency_ns > delta_next) { |
---|
431 | 403 | /* |
---|
432 | 404 | * The tick is not going to be stopped and the target |
---|
433 | 405 | * residency of the state to be returned is not within |
---|
.. | .. |
---|
435 | 407 | * tick, so try to correct that. |
---|
436 | 408 | */ |
---|
437 | 409 | for (i = idx - 1; i >= 0; i--) { |
---|
438 | | - if (drv->states[i].disabled || |
---|
439 | | - dev->states_usage[i].disable) |
---|
| 410 | + if (dev->states_usage[i].disable) |
---|
440 | 411 | continue; |
---|
441 | 412 | |
---|
442 | 413 | idx = i; |
---|
443 | | - if (drv->states[i].target_residency <= delta_next_us) |
---|
| 414 | + if (drv->states[i].target_residency_ns <= delta_next) |
---|
444 | 415 | break; |
---|
445 | 416 | } |
---|
446 | 417 | } |
---|
447 | 418 | } |
---|
448 | 419 | |
---|
449 | | -out: |
---|
450 | | - data->last_state_idx = idx; |
---|
451 | | - |
---|
452 | | - return data->last_state_idx; |
---|
| 420 | + return idx; |
---|
453 | 421 | } |
---|
454 | 422 | |
---|
455 | 423 | /** |
---|
.. | .. |
---|
464 | 432 | { |
---|
465 | 433 | struct menu_device *data = this_cpu_ptr(&menu_devices); |
---|
466 | 434 | |
---|
467 | | - data->last_state_idx = index; |
---|
| 435 | + dev->last_state_idx = index; |
---|
468 | 436 | data->needs_update = 1; |
---|
469 | 437 | data->tick_wakeup = tick_nohz_idle_got_tick(); |
---|
470 | 438 | } |
---|
.. | .. |
---|
477 | 445 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
---|
478 | 446 | { |
---|
479 | 447 | struct menu_device *data = this_cpu_ptr(&menu_devices); |
---|
480 | | - int last_idx = data->last_state_idx; |
---|
| 448 | + int last_idx = dev->last_state_idx; |
---|
481 | 449 | struct cpuidle_state *target = &drv->states[last_idx]; |
---|
482 | | - unsigned int measured_us; |
---|
| 450 | + u64 measured_ns; |
---|
483 | 451 | unsigned int new_factor; |
---|
484 | 452 | |
---|
485 | 453 | /* |
---|
.. | .. |
---|
497 | 465 | * assume the state was never reached and the exit latency is 0. |
---|
498 | 466 | */ |
---|
499 | 467 | |
---|
500 | | - if (data->tick_wakeup && data->next_timer_us > TICK_USEC) { |
---|
| 468 | + if (data->tick_wakeup && data->next_timer_ns > TICK_NSEC) { |
---|
501 | 469 | /* |
---|
502 | 470 | * The nohz code said that there wouldn't be any events within |
---|
503 | 471 | * the tick boundary (if the tick was stopped), but the idle |
---|
.. | .. |
---|
507 | 475 | * have been idle long (but not forever) to help the idle |
---|
508 | 476 | * duration predictor do a better job next time. |
---|
509 | 477 | */ |
---|
510 | | - measured_us = 9 * MAX_INTERESTING / 10; |
---|
| 478 | + measured_ns = 9 * MAX_INTERESTING / 10; |
---|
511 | 479 | } else if ((drv->states[last_idx].flags & CPUIDLE_FLAG_POLLING) && |
---|
512 | 480 | dev->poll_time_limit) { |
---|
513 | 481 | /* |
---|
.. | .. |
---|
517 | 485 | * the CPU might have been woken up from idle by the next timer. |
---|
518 | 486 | * Assume that to be the case. |
---|
519 | 487 | */ |
---|
520 | | - measured_us = data->next_timer_us; |
---|
| 488 | + measured_ns = data->next_timer_ns; |
---|
521 | 489 | } else { |
---|
522 | 490 | /* measured value */ |
---|
523 | | - measured_us = cpuidle_get_last_residency(dev); |
---|
| 491 | + measured_ns = dev->last_residency_ns; |
---|
524 | 492 | |
---|
525 | 493 | /* Deduct exit latency */ |
---|
526 | | - if (measured_us > 2 * target->exit_latency) |
---|
527 | | - measured_us -= target->exit_latency; |
---|
| 494 | + if (measured_ns > 2 * target->exit_latency_ns) |
---|
| 495 | + measured_ns -= target->exit_latency_ns; |
---|
528 | 496 | else |
---|
529 | | - measured_us /= 2; |
---|
| 497 | + measured_ns /= 2; |
---|
530 | 498 | } |
---|
531 | 499 | |
---|
532 | 500 | /* Make sure our coefficients do not exceed unity */ |
---|
533 | | - if (measured_us > data->next_timer_us) |
---|
534 | | - measured_us = data->next_timer_us; |
---|
| 501 | + if (measured_ns > data->next_timer_ns) |
---|
| 502 | + measured_ns = data->next_timer_ns; |
---|
535 | 503 | |
---|
536 | 504 | /* Update our correction ratio */ |
---|
537 | 505 | new_factor = data->correction_factor[data->bucket]; |
---|
538 | 506 | new_factor -= new_factor / DECAY; |
---|
539 | 507 | |
---|
540 | | - if (data->next_timer_us > 0 && measured_us < MAX_INTERESTING) |
---|
541 | | - new_factor += RESOLUTION * measured_us / data->next_timer_us; |
---|
| 508 | + if (data->next_timer_ns > 0 && measured_ns < MAX_INTERESTING) |
---|
| 509 | + new_factor += div64_u64(RESOLUTION * measured_ns, |
---|
| 510 | + data->next_timer_ns); |
---|
542 | 511 | else |
---|
543 | 512 | /* |
---|
544 | 513 | * we were idle so long that we count it as a perfect |
---|
.. | .. |
---|
558 | 527 | data->correction_factor[data->bucket] = new_factor; |
---|
559 | 528 | |
---|
560 | 529 | /* update the repeating-pattern data */ |
---|
561 | | - data->intervals[data->interval_ptr++] = measured_us; |
---|
| 530 | + data->intervals[data->interval_ptr++] = ktime_to_us(measured_ns); |
---|
562 | 531 | if (data->interval_ptr >= INTERVALS) |
---|
563 | 532 | data->interval_ptr = 0; |
---|
564 | 533 | } |
---|