hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/kernel/time/tick-broadcast.c
....@@ -1,15 +1,11 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
2
- * linux/kernel/time/tick-broadcast.c
3
- *
43 * This file contains functions which emulate a local clock-event
54 * device via a broadcast event source.
65 *
76 * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
87 * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
98 * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
10
- *
11
- * This code is licenced under the GPL version 2. For details see
12
- * kernel-base/COPYING.
139 */
1410 #include <linux/cpu.h>
1511 #include <linux/err.h>
....@@ -37,13 +33,21 @@
3733 static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
3834
3935 #ifdef CONFIG_TICK_ONESHOT
36
+static DEFINE_PER_CPU(struct clock_event_device *, tick_oneshot_wakeup_device);
37
+
4038 static void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
4139 static void tick_broadcast_clear_oneshot(int cpu);
4240 static void tick_resume_broadcast_oneshot(struct clock_event_device *bc);
41
+# ifdef CONFIG_HOTPLUG_CPU
42
+static void tick_broadcast_oneshot_offline(unsigned int cpu);
43
+# endif
4344 #else
4445 static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); }
4546 static inline void tick_broadcast_clear_oneshot(int cpu) { }
4647 static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { }
48
+# ifdef CONFIG_HOTPLUG_CPU
49
+static inline void tick_broadcast_oneshot_offline(unsigned int cpu) { }
50
+# endif
4751 #endif
4852
4953 /*
....@@ -57,6 +61,13 @@
5761 struct cpumask *tick_get_broadcast_mask(void)
5862 {
5963 return tick_broadcast_mask;
64
+}
65
+
66
+static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu);
67
+
68
+const struct clock_event_device *tick_get_wakeup_device(int cpu)
69
+{
70
+ return tick_get_oneshot_wakeup_device(cpu);
6071 }
6172
6273 /*
....@@ -86,12 +97,74 @@
8697 return !curdev || newdev->rating > curdev->rating;
8798 }
8899
100
+#ifdef CONFIG_TICK_ONESHOT
101
+static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu)
102
+{
103
+ return per_cpu(tick_oneshot_wakeup_device, cpu);
104
+}
105
+
106
+static void tick_oneshot_wakeup_handler(struct clock_event_device *wd)
107
+{
108
+ /*
109
+ * If we woke up early and the tick was reprogrammed in the
110
+ * meantime then this may be spurious but harmless.
111
+ */
112
+ tick_receive_broadcast();
113
+}
114
+
115
+static bool tick_set_oneshot_wakeup_device(struct clock_event_device *newdev,
116
+ int cpu)
117
+{
118
+ struct clock_event_device *curdev = tick_get_oneshot_wakeup_device(cpu);
119
+
120
+ if (!newdev)
121
+ goto set_device;
122
+
123
+ if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
124
+ (newdev->features & CLOCK_EVT_FEAT_C3STOP))
125
+ return false;
126
+
127
+ if (!(newdev->features & CLOCK_EVT_FEAT_PERCPU) ||
128
+ !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
129
+ return false;
130
+
131
+ if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
132
+ return false;
133
+
134
+ if (curdev && newdev->rating <= curdev->rating)
135
+ return false;
136
+
137
+ if (!try_module_get(newdev->owner))
138
+ return false;
139
+
140
+ newdev->event_handler = tick_oneshot_wakeup_handler;
141
+set_device:
142
+ clockevents_exchange_device(curdev, newdev);
143
+ per_cpu(tick_oneshot_wakeup_device, cpu) = newdev;
144
+ return true;
145
+}
146
+#else
147
+static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu)
148
+{
149
+ return NULL;
150
+}
151
+
152
+static bool tick_set_oneshot_wakeup_device(struct clock_event_device *newdev,
153
+ int cpu)
154
+{
155
+ return false;
156
+}
157
+#endif
158
+
89159 /*
90160 * Conditionally install/replace broadcast device
91161 */
92
-void tick_install_broadcast_device(struct clock_event_device *dev)
162
+void tick_install_broadcast_device(struct clock_event_device *dev, int cpu)
93163 {
94164 struct clock_event_device *cur = tick_broadcast_device.evtdev;
165
+
166
+ if (tick_set_oneshot_wakeup_device(dev, cpu))
167
+ return;
95168
96169 if (!tick_check_broadcast_device(cur, dev))
97170 return;
....@@ -105,6 +178,19 @@
105178 tick_broadcast_device.evtdev = dev;
106179 if (!cpumask_empty(tick_broadcast_mask))
107180 tick_broadcast_start_periodic(dev);
181
+
182
+ if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
183
+ return;
184
+
185
+ /*
186
+ * If the system already runs in oneshot mode, switch the newly
187
+ * registered broadcast device to oneshot mode explicitly.
188
+ */
189
+ if (tick_broadcast_oneshot_active()) {
190
+ tick_broadcast_switch_to_oneshot();
191
+ return;
192
+ }
193
+
108194 /*
109195 * Inform all cpus about this. We might be in a situation
110196 * where we did not switch to oneshot mode because the per cpu
....@@ -113,8 +199,7 @@
113199 * notification the systems stays stuck in periodic mode
114200 * forever.
115201 */
116
- if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
117
- tick_clock_notify();
202
+ tick_clock_notify();
118203 }
119204
120205 /*
....@@ -239,7 +324,6 @@
239324 return ret;
240325 }
241326
242
-#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
243327 int tick_receive_broadcast(void)
244328 {
245329 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
....@@ -254,7 +338,6 @@
254338 evt->event_handler(evt);
255339 return 0;
256340 }
257
-#endif
258341
259342 /*
260343 * Broadcast the event to the cpus, which are set in the mask (mangled).
....@@ -379,6 +462,7 @@
379462 switch (mode) {
380463 case TICK_BROADCAST_FORCE:
381464 tick_broadcast_forced = 1;
465
+ fallthrough;
382466 case TICK_BROADCAST_ON:
383467 cpumask_set_cpu(cpu, tick_broadcast_on);
384468 if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
....@@ -400,8 +484,6 @@
400484 if (tick_broadcast_forced)
401485 break;
402486 cpumask_clear_cpu(cpu, tick_broadcast_on);
403
- if (!tick_device_is_functional(dev))
404
- break;
405487 if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
406488 if (tick_broadcast_device.mode ==
407489 TICKDEV_MODE_PERIODIC)
....@@ -438,27 +520,29 @@
438520 }
439521
440522 #ifdef CONFIG_HOTPLUG_CPU
441
-/*
442
- * Remove a CPU from broadcasting
443
- */
444
-void tick_shutdown_broadcast(unsigned int cpu)
523
+static void tick_shutdown_broadcast(void)
445524 {
446
- struct clock_event_device *bc;
447
- unsigned long flags;
448
-
449
- raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
450
-
451
- bc = tick_broadcast_device.evtdev;
452
- cpumask_clear_cpu(cpu, tick_broadcast_mask);
453
- cpumask_clear_cpu(cpu, tick_broadcast_on);
525
+ struct clock_event_device *bc = tick_broadcast_device.evtdev;
454526
455527 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
456528 if (bc && cpumask_empty(tick_broadcast_mask))
457529 clockevents_shutdown(bc);
458530 }
459
-
460
- raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
461531 }
532
+
533
+/*
534
+ * Remove a CPU from broadcasting
535
+ */
536
+void tick_broadcast_offline(unsigned int cpu)
537
+{
538
+ raw_spin_lock(&tick_broadcast_lock);
539
+ cpumask_clear_cpu(cpu, tick_broadcast_mask);
540
+ cpumask_clear_cpu(cpu, tick_broadcast_on);
541
+ tick_broadcast_oneshot_offline(cpu);
542
+ tick_shutdown_broadcast();
543
+ raw_spin_unlock(&tick_broadcast_lock);
544
+}
545
+
462546 #endif
463547
464548 void tick_suspend_broadcast(void)
....@@ -704,24 +788,16 @@
704788 clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
705789 }
706790
707
-int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
791
+static int ___tick_broadcast_oneshot_control(enum tick_broadcast_state state,
792
+ struct tick_device *td,
793
+ int cpu)
708794 {
709
- struct clock_event_device *bc, *dev;
710
- int cpu, ret = 0;
795
+ struct clock_event_device *bc, *dev = td->evtdev;
796
+ int ret = 0;
711797 ktime_t now;
712
-
713
- /*
714
- * If there is no broadcast device, tell the caller not to go
715
- * into deep idle.
716
- */
717
- if (!tick_broadcast_device.evtdev)
718
- return -EBUSY;
719
-
720
- dev = this_cpu_ptr(&tick_cpu_device)->evtdev;
721798
722799 raw_spin_lock(&tick_broadcast_lock);
723800 bc = tick_broadcast_device.evtdev;
724
- cpu = smp_processor_id();
725801
726802 if (state == TICK_BROADCAST_ENTER) {
727803 /*
....@@ -806,13 +882,13 @@
806882 * either the CPU handling the broadcast
807883 * interrupt or we got woken by something else.
808884 *
809
- * We are not longer in the broadcast mask, so
885
+ * We are no longer in the broadcast mask, so
810886 * if the cpu local expiry time is already
811887 * reached, we would reprogram the cpu local
812888 * timer with an already expired event.
813889 *
814890 * This can lead to a ping-pong when we return
815
- * to idle and therefor rearm the broadcast
891
+ * to idle and therefore rearm the broadcast
816892 * timer before the cpu local timer was able
817893 * to fire. This happens because the forced
818894 * reprogramming makes sure that the event
....@@ -848,6 +924,53 @@
848924 out:
849925 raw_spin_unlock(&tick_broadcast_lock);
850926 return ret;
927
+}
928
+
929
+static int tick_oneshot_wakeup_control(enum tick_broadcast_state state,
930
+ struct tick_device *td,
931
+ int cpu)
932
+{
933
+ struct clock_event_device *dev, *wd;
934
+
935
+ dev = td->evtdev;
936
+ if (td->mode != TICKDEV_MODE_ONESHOT)
937
+ return -EINVAL;
938
+
939
+ wd = tick_get_oneshot_wakeup_device(cpu);
940
+ if (!wd)
941
+ return -ENODEV;
942
+
943
+ switch (state) {
944
+ case TICK_BROADCAST_ENTER:
945
+ clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT_STOPPED);
946
+ clockevents_switch_state(wd, CLOCK_EVT_STATE_ONESHOT);
947
+ clockevents_program_event(wd, dev->next_event, 1);
948
+ break;
949
+ case TICK_BROADCAST_EXIT:
950
+ /* We may have transitioned to oneshot mode while idle */
951
+ if (clockevent_get_state(wd) != CLOCK_EVT_STATE_ONESHOT)
952
+ return -ENODEV;
953
+ }
954
+
955
+ return 0;
956
+}
957
+
958
+int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
959
+{
960
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
961
+ int cpu = smp_processor_id();
962
+
963
+ if (!tick_oneshot_wakeup_control(state, td, cpu))
964
+ return 0;
965
+
966
+ if (tick_broadcast_device.evtdev)
967
+ return ___tick_broadcast_oneshot_control(state, td, cpu);
968
+
969
+ /*
970
+ * If there is no broadcast or wakeup device, tell the caller not
971
+ * to go into deep idle.
972
+ */
973
+ return -EBUSY;
851974 }
852975
853976 /*
....@@ -955,13 +1078,12 @@
9551078 }
9561079
9571080 /*
958
- * Remove a dead CPU from broadcasting
1081
+ * Remove a dying CPU from broadcasting
9591082 */
960
-void tick_shutdown_broadcast_oneshot(unsigned int cpu)
1083
+static void tick_broadcast_oneshot_offline(unsigned int cpu)
9611084 {
962
- unsigned long flags;
963
-
964
- raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
1085
+ if (tick_get_oneshot_wakeup_device(cpu))
1086
+ tick_set_oneshot_wakeup_device(NULL, cpu);
9651087
9661088 /*
9671089 * Clear the broadcast masks for the dead cpu, but do not stop
....@@ -970,8 +1092,6 @@
9701092 cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
9711093 cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
9721094 cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
973
-
974
- raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
9751095 }
9761096 #endif
9771097