hc
2024-05-10 cde9070d9970eef1f7ec2360586c802a16230ad8
kernel/kernel/time/tick-sched.c
....@@ -1,6 +1,5 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
2
- * linux/kernel/time/tick-sched.c
3
- *
43 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
54 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
65 * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
....@@ -8,8 +7,6 @@
87 * No idle tick implementation for low and high resolution timers
98 *
109 * Started by: Thomas Gleixner and Ingo Molnar
11
- *
12
- * Distribute under GPLv2.
1310 */
1411 #include <linux/cpu.h>
1512 #include <linux/err.h>
....@@ -26,9 +23,9 @@
2623 #include <linux/module.h>
2724 #include <linux/irq_work.h>
2825 #include <linux/posix-timers.h>
29
-#include <linux/timer.h>
3026 #include <linux/context_tracking.h>
3127 #include <linux/mm.h>
28
+#include <trace/hooks/sched.h>
3229
3330 #include <asm/irq_regs.h>
3431
....@@ -57,48 +54,69 @@
5754 */
5855 static void tick_do_update_jiffies64(ktime_t now)
5956 {
60
- unsigned long ticks = 0;
57
+ unsigned long ticks = 1;
6158 ktime_t delta;
6259
6360 /*
64
- * Do a quick check without holding jiffies_lock:
65
- * The READ_ONCE() pairs with two updates done later in this function.
61
+ * Do a quick check without holding jiffies_lock. The READ_ONCE()
62
+ * pairs with the update done later in this function.
63
+ *
64
+ * This is also an intentional data race which is even safe on
65
+ * 32bit in theory. If there is a concurrent update then the check
66
+ * might give a random answer. It does not matter because if it
67
+ * returns then the concurrent update is already taking care, if it
68
+ * falls through then it will pointlessly contend on jiffies_lock.
69
+ *
70
+ * Though there is one nasty case on 32bit due to store tearing of
71
+ * the 64bit value. If the first 32bit store makes the quick check
72
+ * return on all other CPUs and the writing CPU context gets
73
+ * delayed to complete the second store (scheduled out on virt)
74
+ * then jiffies can become stale for up to ~2^32 nanoseconds
75
+ * without noticing. After that point all CPUs will wait for
76
+ * jiffies lock.
77
+ *
78
+ * OTOH, this is not any different than the situation with NOHZ=off
79
+ * where one CPU is responsible for updating jiffies and
80
+ * timekeeping. If that CPU goes out for lunch then all other CPUs
81
+ * will operate on stale jiffies until it decides to come back.
6682 */
67
- delta = ktime_sub(now, READ_ONCE(last_jiffies_update));
68
- if (delta < tick_period)
83
+ if (ktime_before(now, READ_ONCE(tick_next_period)))
6984 return;
7085
7186 /* Reevaluate with jiffies_lock held */
72
- write_seqlock(&jiffies_lock);
73
-
74
- delta = ktime_sub(now, last_jiffies_update);
75
- if (delta >= tick_period) {
76
-
77
- delta = ktime_sub(delta, tick_period);
78
- /* Pairs with the lockless read in this function. */
79
- WRITE_ONCE(last_jiffies_update,
80
- ktime_add(last_jiffies_update, tick_period));
81
-
82
- /* Slow path for long timeouts */
83
- if (unlikely(delta >= tick_period)) {
84
- s64 incr = ktime_to_ns(tick_period);
85
-
86
- ticks = ktime_divns(delta, incr);
87
-
88
- /* Pairs with the lockless read in this function. */
89
- WRITE_ONCE(last_jiffies_update,
90
- ktime_add_ns(last_jiffies_update,
91
- incr * ticks));
92
- }
93
- do_timer(++ticks);
94
-
95
- /* Keep the tick_next_period variable up to date */
96
- tick_next_period = ktime_add(last_jiffies_update, tick_period);
97
- } else {
98
- write_sequnlock(&jiffies_lock);
87
+ raw_spin_lock(&jiffies_lock);
88
+ if (ktime_before(now, tick_next_period)) {
89
+ raw_spin_unlock(&jiffies_lock);
9990 return;
10091 }
101
- write_sequnlock(&jiffies_lock);
92
+
93
+ write_seqcount_begin(&jiffies_seq);
94
+
95
+ delta = ktime_sub(now, tick_next_period);
96
+ if (unlikely(delta >= TICK_NSEC)) {
97
+ /* Slow path for long idle sleep times */
98
+ s64 incr = TICK_NSEC;
99
+
100
+ ticks += ktime_divns(delta, incr);
101
+
102
+ last_jiffies_update = ktime_add_ns(last_jiffies_update,
103
+ incr * ticks);
104
+ } else {
105
+ last_jiffies_update = ktime_add_ns(last_jiffies_update,
106
+ TICK_NSEC);
107
+ }
108
+
109
+ do_timer(ticks);
110
+
111
+ /*
112
+ * Keep the tick_next_period variable up to date. WRITE_ONCE()
113
+ * pairs with the READ_ONCE() in the lockless quick check above.
114
+ */
115
+ WRITE_ONCE(tick_next_period,
116
+ ktime_add_ns(last_jiffies_update, TICK_NSEC));
117
+
118
+ write_seqcount_end(&jiffies_seq);
119
+ raw_spin_unlock(&jiffies_lock);
102120 update_wall_time();
103121 }
104122
....@@ -109,14 +127,29 @@
109127 {
110128 ktime_t period;
111129
112
- write_seqlock(&jiffies_lock);
130
+ raw_spin_lock(&jiffies_lock);
131
+ write_seqcount_begin(&jiffies_seq);
113132 /* Did we start the jiffies update yet ? */
114
- if (last_jiffies_update == 0)
133
+ if (last_jiffies_update == 0) {
134
+ u32 rem;
135
+
136
+ /*
137
+ * Ensure that the tick is aligned to a multiple of
138
+ * TICK_NSEC.
139
+ */
140
+ div_u64_rem(tick_next_period, TICK_NSEC, &rem);
141
+ if (rem)
142
+ tick_next_period += TICK_NSEC - rem;
143
+
115144 last_jiffies_update = tick_next_period;
145
+ }
116146 period = last_jiffies_update;
117
- write_sequnlock(&jiffies_lock);
147
+ write_seqcount_end(&jiffies_seq);
148
+ raw_spin_unlock(&jiffies_lock);
118149 return period;
119150 }
151
+
152
+#define MAX_STALLED_JIFFIES 5
120153
121154 static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
122155 {
....@@ -129,15 +162,38 @@
129162 * into a long sleep. If two CPUs happen to assign themselves to
130163 * this duty, then the jiffies update is still serialized by
131164 * jiffies_lock.
165
+ *
166
+ * If nohz_full is enabled, this should not happen because the
167
+ * tick_do_timer_cpu never relinquishes.
132168 */
133
- if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)
134
- && !tick_nohz_full_cpu(cpu))
169
+ if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) {
170
+#ifdef CONFIG_NO_HZ_FULL
171
+ WARN_ON_ONCE(tick_nohz_full_running);
172
+#endif
135173 tick_do_timer_cpu = cpu;
174
+ }
136175 #endif
137176
138177 /* Check, if the jiffies need an update */
139
- if (tick_do_timer_cpu == cpu)
178
+ if (tick_do_timer_cpu == cpu) {
140179 tick_do_update_jiffies64(now);
180
+ trace_android_vh_jiffies_update(NULL);
181
+ }
182
+
183
+ /*
184
+ * If jiffies update stalled for too long (timekeeper in stop_machine()
185
+ * or VMEXIT'ed for several msecs), force an update.
186
+ */
187
+ if (ts->last_tick_jiffies != jiffies) {
188
+ ts->stalled_jiffies = 0;
189
+ ts->last_tick_jiffies = READ_ONCE(jiffies);
190
+ } else {
191
+ if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) {
192
+ tick_do_update_jiffies64(now);
193
+ ts->stalled_jiffies = 0;
194
+ ts->last_tick_jiffies = READ_ONCE(jiffies);
195
+ }
196
+ }
141197
142198 if (ts->inidle)
143199 ts->got_idle_tick = 1;
....@@ -174,6 +230,7 @@
174230 #ifdef CONFIG_NO_HZ_FULL
175231 cpumask_var_t tick_nohz_full_mask;
176232 bool tick_nohz_full_running;
233
+EXPORT_SYMBOL_GPL(tick_nohz_full_running);
177234 static atomic_t tick_dep_mask;
178235
179236 static bool check_tick_dependency(atomic_t *dep)
....@@ -197,6 +254,16 @@
197254
198255 if (val & TICK_DEP_MASK_CLOCK_UNSTABLE) {
199256 trace_tick_stop(0, TICK_DEP_MASK_CLOCK_UNSTABLE);
257
+ return true;
258
+ }
259
+
260
+ if (val & TICK_DEP_MASK_RCU) {
261
+ trace_tick_stop(0, TICK_DEP_MASK_RCU);
262
+ return true;
263
+ }
264
+
265
+ if (val & TICK_DEP_MASK_RCU_EXP) {
266
+ trace_tick_stop(0, TICK_DEP_MASK_RCU_EXP);
200267 return true;
201268 }
202269
....@@ -232,6 +299,7 @@
232299
233300 static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
234301 .func = nohz_full_kick_func,
302
+ .flags = ATOMIC_INIT(IRQ_WORK_HARD_IRQ),
235303 };
236304
237305 /*
....@@ -326,6 +394,7 @@
326394 preempt_enable();
327395 }
328396 }
397
+EXPORT_SYMBOL_GPL(tick_nohz_dep_set_cpu);
329398
330399 void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit)
331400 {
....@@ -333,24 +402,35 @@
333402
334403 atomic_andnot(BIT(bit), &ts->tick_dep_mask);
335404 }
405
+EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_cpu);
336406
337407 /*
338
- * Set a per-task tick dependency. Posix CPU timers need this in order to elapse
339
- * per task timers.
408
+ * Set a per-task tick dependency. RCU need this. Also posix CPU timers
409
+ * in order to elapse per task timers.
340410 */
341411 void tick_nohz_dep_set_task(struct task_struct *tsk, enum tick_dep_bits bit)
342412 {
343
- /*
344
- * We could optimize this with just kicking the target running the task
345
- * if that noise matters for nohz full users.
346
- */
347
- tick_nohz_dep_set_all(&tsk->tick_dep_mask, bit);
413
+ if (!atomic_fetch_or(BIT(bit), &tsk->tick_dep_mask)) {
414
+ if (tsk == current) {
415
+ preempt_disable();
416
+ tick_nohz_full_kick();
417
+ preempt_enable();
418
+ } else {
419
+ /*
420
+ * Some future tick_nohz_full_kick_task()
421
+ * should optimize this.
422
+ */
423
+ tick_nohz_full_kick_all();
424
+ }
425
+ }
348426 }
427
+EXPORT_SYMBOL_GPL(tick_nohz_dep_set_task);
349428
350429 void tick_nohz_dep_clear_task(struct task_struct *tsk, enum tick_dep_bits bit)
351430 {
352431 atomic_andnot(BIT(bit), &tsk->tick_dep_mask);
353432 }
433
+EXPORT_SYMBOL_GPL(tick_nohz_dep_clear_task);
354434
355435 /*
356436 * Set a per-taskgroup tick dependency. Posix CPU timers need this in order to elapse
....@@ -400,16 +480,21 @@
400480 tick_nohz_full_running = true;
401481 }
402482
403
-static int tick_nohz_cpu_down(unsigned int cpu)
483
+bool tick_nohz_cpu_hotpluggable(unsigned int cpu)
404484 {
405485 /*
406
- * The boot CPU handles housekeeping duty (unbound timers,
407
- * workqueues, timekeeping, ...) on behalf of full dynticks
486
+ * The tick_do_timer_cpu CPU handles housekeeping duty (unbound
487
+ * timers, workqueues, timekeeping, ...) on behalf of full dynticks
408488 * CPUs. It must remain online when nohz full is enabled.
409489 */
410490 if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
411
- return -EBUSY;
412
- return 0;
491
+ return false;
492
+ return true;
493
+}
494
+
495
+static int tick_nohz_cpu_down(unsigned int cpu)
496
+{
497
+ return tick_nohz_cpu_hotpluggable(cpu) ? 0 : -EBUSY;
413498 }
414499
415500 void __init tick_nohz_init(void)
....@@ -431,12 +516,15 @@
431516 return;
432517 }
433518
434
- cpu = smp_processor_id();
519
+ if (IS_ENABLED(CONFIG_PM_SLEEP_SMP) &&
520
+ !IS_ENABLED(CONFIG_PM_SLEEP_SMP_NONZERO_CPU)) {
521
+ cpu = smp_processor_id();
435522
436
- if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
437
- pr_warn("NO_HZ: Clearing %d from nohz_full range for timekeeping\n",
438
- cpu);
439
- cpumask_clear_cpu(cpu, tick_nohz_full_mask);
523
+ if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
524
+ pr_warn("NO_HZ: Clearing %d from nohz_full range "
525
+ "for timekeeping\n", cpu);
526
+ cpumask_clear_cpu(cpu, tick_nohz_full_mask);
527
+ }
440528 }
441529
442530 for_each_cpu(cpu, tick_nohz_full_mask)
....@@ -631,12 +719,14 @@
631719 hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
632720
633721 /* Forward the time to expire in the future */
634
- hrtimer_forward(&ts->sched_timer, now, tick_period);
722
+ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
635723
636
- if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
637
- hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
638
- else
724
+ if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
725
+ hrtimer_start_expires(&ts->sched_timer,
726
+ HRTIMER_MODE_ABS_PINNED_HARD);
727
+ } else {
639728 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
729
+ }
640730
641731 /*
642732 * Reset to make sure next tick stop doesn't get fooled by past
....@@ -653,14 +743,15 @@
653743 static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
654744 {
655745 u64 basemono, next_tick, next_tmr, next_rcu, delta, expires;
656
- unsigned long seq, basejiff;
746
+ unsigned long basejiff;
747
+ unsigned int seq;
657748
658749 /* Read jiffies and the time when jiffies were updated last */
659750 do {
660
- seq = read_seqbegin(&jiffies_lock);
751
+ seq = read_seqcount_begin(&jiffies_seq);
661752 basemono = last_jiffies_update;
662753 basejiff = jiffies;
663
- } while (read_seqretry(&jiffies_lock, seq));
754
+ } while (read_seqcount_retry(&jiffies_seq, seq));
664755 ts->last_jiffies = basejiff;
665756 ts->timer_expires_base = basemono;
666757
....@@ -780,7 +871,6 @@
780871 */
781872 if (!ts->tick_stopped) {
782873 calc_load_nohz_start();
783
- cpu_load_update_nohz_start();
784874 quiet_vmstat();
785875
786876 ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
....@@ -797,11 +887,14 @@
797887 if (unlikely(expires == KTIME_MAX)) {
798888 if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
799889 hrtimer_cancel(&ts->sched_timer);
890
+ else
891
+ tick_program_event(KTIME_MAX, 1);
800892 return;
801893 }
802894
803895 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
804
- hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED);
896
+ hrtimer_start(&ts->sched_timer, tick,
897
+ HRTIMER_MODE_ABS_PINNED_HARD);
805898 } else {
806899 hrtimer_set_expires(&ts->sched_timer, tick);
807900 tick_program_event(tick, 1);
....@@ -827,7 +920,6 @@
827920 {
828921 /* Update jiffies first */
829922 tick_do_update_jiffies64(now);
830
- cpu_load_update_nohz_stop();
831923
832924 /*
833925 * Clear the timer idle flag, so we avoid IPIs on remote queueing and
....@@ -890,12 +982,12 @@
890982 if (need_resched())
891983 return false;
892984
893
- if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
985
+ if (unlikely(local_softirq_pending())) {
894986 static int ratelimit;
895987
896988 if (ratelimit < 10 &&
897989 (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) {
898
- pr_warn("NOHZ: local_softirq_pending %02x\n",
990
+ pr_warn("NOHZ tick-stop error: Non-RCU local softirq work is pending, handler #%02x!!!\n",
899991 (unsigned int) local_softirq_pending());
900992 ratelimit++;
901993 }
....@@ -909,11 +1001,9 @@
9091001 */
9101002 if (tick_do_timer_cpu == cpu)
9111003 return false;
912
- /*
913
- * Boot safety: make sure the timekeeping duty has been
914
- * assigned before entering dyntick-idle mode,
915
- */
916
- if (tick_do_timer_cpu == TICK_DO_TIMER_NONE)
1004
+
1005
+ /* Should not happen for nohz-full */
1006
+ if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
9171007 return false;
9181008 }
9191009
....@@ -1031,6 +1121,18 @@
10311121 }
10321122
10331123 /**
1124
+ * tick_nohz_get_next_hrtimer - return the next expiration time for the hrtimer
1125
+ * or the tick, whatever that expires first. Note that, if the tick has been
1126
+ * stopped, it returns the next hrtimer.
1127
+ *
1128
+ * Called from power state control code with interrupts disabled
1129
+ */
1130
+ktime_t tick_nohz_get_next_hrtimer(void)
1131
+{
1132
+ return __this_cpu_read(tick_cpu_device.evtdev)->next_event;
1133
+}
1134
+
1135
+/**
10341136 * tick_nohz_get_sleep_length - return the expected length of the current sleep
10351137 * @delta_next: duration until the next event if the tick cannot be stopped
10361138 *
....@@ -1082,6 +1184,7 @@
10821184
10831185 return ts->idle_calls;
10841186 }
1187
+EXPORT_SYMBOL_GPL(tick_nohz_get_idle_calls_cpu);
10851188
10861189 /**
10871190 * tick_nohz_get_idle_calls - return the current idle calls counter value
....@@ -1100,7 +1203,7 @@
11001203 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
11011204 unsigned long ticks;
11021205
1103
- if (vtime_accounting_cpu_enabled())
1206
+ if (vtime_accounting_enabled_this_cpu())
11041207 return;
11051208 /*
11061209 * We stopped the tick in idle. Update process times would miss the
....@@ -1178,11 +1281,17 @@
11781281 tick_sched_do_timer(ts, now);
11791282 tick_sched_handle(ts, regs);
11801283
1181
- /* No need to reprogram if we are running tickless */
1182
- if (unlikely(ts->tick_stopped))
1284
+ if (unlikely(ts->tick_stopped)) {
1285
+ /*
1286
+ * The clockevent device is not reprogrammed, so change the
1287
+ * clock event device to ONESHOT_STOPPED to avoid spurious
1288
+ * interrupts on devices which might not be truly one shot.
1289
+ */
1290
+ tick_program_event(KTIME_MAX, 1);
11831291 return;
1292
+ }
11841293
1185
- hrtimer_forward(&ts->sched_timer, now, tick_period);
1294
+ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
11861295 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
11871296 }
11881297
....@@ -1214,12 +1323,12 @@
12141323 * Recycle the hrtimer in ts, so we can share the
12151324 * hrtimer_forward with the highres code.
12161325 */
1217
- hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1326
+ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
12181327 /* Get the next period */
12191328 next = tick_init_jiffy_update();
12201329
12211330 hrtimer_set_expires(&ts->sched_timer, next);
1222
- hrtimer_forward_now(&ts->sched_timer, tick_period);
1331
+ hrtimer_forward_now(&ts->sched_timer, TICK_NSEC);
12231332 tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
12241333 tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
12251334 }
....@@ -1259,18 +1368,6 @@
12591368 * High resolution timer specific code
12601369 */
12611370 #ifdef CONFIG_HIGH_RES_TIMERS
1262
-
1263
-static void (*wake_callback)(void);
1264
-
1265
-void register_tick_sched_wakeup_callback(void (*cb)(void))
1266
-{
1267
- if (!wake_callback)
1268
- wake_callback = cb;
1269
- else
1270
- pr_warn("tick-sched wake cb already exists; skipping.\n");
1271
-}
1272
-EXPORT_SYMBOL_GPL(register_tick_sched_wakeup_callback);
1273
-
12741371 /*
12751372 * We rearm the timer until we get disabled by the idle code.
12761373 * Called with interrupts disabled.
....@@ -1288,15 +1385,8 @@
12881385 * Do not call, when we are not in irq context and have
12891386 * no valid regs pointer
12901387 */
1291
- if (regs) {
1388
+ if (regs)
12921389 tick_sched_handle(ts, regs);
1293
- if (wake_callback && tick_do_timer_cpu == smp_processor_id()) {
1294
- /*
1295
- * wakeup user if needed
1296
- */
1297
- wake_callback();
1298
- }
1299
- }
13001390 else
13011391 ts->next_tick = 0;
13021392
....@@ -1304,7 +1394,7 @@
13041394 if (unlikely(ts->tick_stopped))
13051395 return HRTIMER_NORESTART;
13061396
1307
- hrtimer_forward(timer, now, tick_period);
1397
+ hrtimer_forward(timer, now, TICK_NSEC);
13081398
13091399 return HRTIMER_RESTART;
13101400 }
....@@ -1330,7 +1420,7 @@
13301420 /*
13311421 * Emulate tick processing via per-CPU hrtimers:
13321422 */
1333
- hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1423
+ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
13341424 ts->sched_timer.function = tick_sched_timer;
13351425
13361426 /* Get the next period (per-CPU) */
....@@ -1338,14 +1428,14 @@
13381428
13391429 /* Offset the tick to avert jiffies_lock contention. */
13401430 if (sched_skew_tick) {
1341
- u64 offset = ktime_to_ns(tick_period) >> 1;
1431
+ u64 offset = TICK_NSEC >> 1;
13421432 do_div(offset, num_possible_cpus());
13431433 offset *= smp_processor_id();
13441434 hrtimer_add_expires_ns(&ts->sched_timer, offset);
13451435 }
13461436
1347
- hrtimer_forward(&ts->sched_timer, now, tick_period);
1348
- hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
1437
+ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
1438
+ hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD);
13491439 tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
13501440 }
13511441 #endif /* HIGH_RES_TIMERS */
....@@ -1412,9 +1502,3 @@
14121502 tick_nohz_switch_to_nohz();
14131503 return 0;
14141504 }
1415
-
1416
-ktime_t *get_next_event_cpu(unsigned int cpu)
1417
-{
1418
- return &(per_cpu(tick_cpu_device, cpu).evtdev->next_event);
1419
-}
1420
-EXPORT_SYMBOL_GPL(get_next_event_cpu);