.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
---|
1 | 2 | /* |
---|
2 | | - * linux/kernel/time/tick-broadcast.c |
---|
3 | | - * |
---|
4 | 3 | * This file contains functions which emulate a local clock-event |
---|
5 | 4 | * device via a broadcast event source. |
---|
6 | 5 | * |
---|
7 | 6 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> |
---|
8 | 7 | * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar |
---|
9 | 8 | * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner |
---|
10 | | - * |
---|
11 | | - * This code is licenced under the GPL version 2. For details see |
---|
12 | | - * kernel-base/COPYING. |
---|
13 | 9 | */ |
---|
14 | 10 | #include <linux/cpu.h> |
---|
15 | 11 | #include <linux/err.h> |
---|
.. | .. |
---|
37 | 33 | static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock); |
---|
38 | 34 | |
---|
39 | 35 | #ifdef CONFIG_TICK_ONESHOT |
---|
| 36 | +static DEFINE_PER_CPU(struct clock_event_device *, tick_oneshot_wakeup_device); |
---|
| 37 | + |
---|
40 | 38 | static void tick_broadcast_setup_oneshot(struct clock_event_device *bc); |
---|
41 | 39 | static void tick_broadcast_clear_oneshot(int cpu); |
---|
42 | 40 | static void tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
---|
| 41 | +# ifdef CONFIG_HOTPLUG_CPU |
---|
| 42 | +static void tick_broadcast_oneshot_offline(unsigned int cpu); |
---|
| 43 | +# endif |
---|
43 | 44 | #else |
---|
44 | 45 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); } |
---|
45 | 46 | static inline void tick_broadcast_clear_oneshot(int cpu) { } |
---|
46 | 47 | static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { } |
---|
| 48 | +# ifdef CONFIG_HOTPLUG_CPU |
---|
| 49 | +static inline void tick_broadcast_oneshot_offline(unsigned int cpu) { } |
---|
| 50 | +# endif |
---|
47 | 51 | #endif |
---|
48 | 52 | |
---|
49 | 53 | /* |
---|
.. | .. |
---|
57 | 61 | struct cpumask *tick_get_broadcast_mask(void) |
---|
58 | 62 | { |
---|
59 | 63 | return tick_broadcast_mask; |
---|
| 64 | +} |
---|
| 65 | + |
---|
| 66 | +static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu); |
---|
| 67 | + |
---|
| 68 | +const struct clock_event_device *tick_get_wakeup_device(int cpu) |
---|
| 69 | +{ |
---|
| 70 | + return tick_get_oneshot_wakeup_device(cpu); |
---|
60 | 71 | } |
---|
61 | 72 | |
---|
62 | 73 | /* |
---|
.. | .. |
---|
86 | 97 | return !curdev || newdev->rating > curdev->rating; |
---|
87 | 98 | } |
---|
88 | 99 | |
---|
| 100 | +#ifdef CONFIG_TICK_ONESHOT |
---|
| 101 | +static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu) |
---|
| 102 | +{ |
---|
| 103 | + return per_cpu(tick_oneshot_wakeup_device, cpu); |
---|
| 104 | +} |
---|
| 105 | + |
---|
| 106 | +static void tick_oneshot_wakeup_handler(struct clock_event_device *wd) |
---|
| 107 | +{ |
---|
| 108 | + /* |
---|
| 109 | + * If we woke up early and the tick was reprogrammed in the |
---|
| 110 | + * meantime then this may be spurious but harmless. |
---|
| 111 | + */ |
---|
| 112 | + tick_receive_broadcast(); |
---|
| 113 | +} |
---|
| 114 | + |
---|
| 115 | +static bool tick_set_oneshot_wakeup_device(struct clock_event_device *newdev, |
---|
| 116 | + int cpu) |
---|
| 117 | +{ |
---|
| 118 | + struct clock_event_device *curdev = tick_get_oneshot_wakeup_device(cpu); |
---|
| 119 | + |
---|
| 120 | + if (!newdev) |
---|
| 121 | + goto set_device; |
---|
| 122 | + |
---|
| 123 | + if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) || |
---|
| 124 | + (newdev->features & CLOCK_EVT_FEAT_C3STOP)) |
---|
| 125 | + return false; |
---|
| 126 | + |
---|
| 127 | + if (!(newdev->features & CLOCK_EVT_FEAT_PERCPU) || |
---|
| 128 | + !(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) |
---|
| 129 | + return false; |
---|
| 130 | + |
---|
| 131 | + if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) |
---|
| 132 | + return false; |
---|
| 133 | + |
---|
| 134 | + if (curdev && newdev->rating <= curdev->rating) |
---|
| 135 | + return false; |
---|
| 136 | + |
---|
| 137 | + if (!try_module_get(newdev->owner)) |
---|
| 138 | + return false; |
---|
| 139 | + |
---|
| 140 | + newdev->event_handler = tick_oneshot_wakeup_handler; |
---|
| 141 | +set_device: |
---|
| 142 | + clockevents_exchange_device(curdev, newdev); |
---|
| 143 | + per_cpu(tick_oneshot_wakeup_device, cpu) = newdev; |
---|
| 144 | + return true; |
---|
| 145 | +} |
---|
| 146 | +#else |
---|
| 147 | +static struct clock_event_device *tick_get_oneshot_wakeup_device(int cpu) |
---|
| 148 | +{ |
---|
| 149 | + return NULL; |
---|
| 150 | +} |
---|
| 151 | + |
---|
| 152 | +static bool tick_set_oneshot_wakeup_device(struct clock_event_device *newdev, |
---|
| 153 | + int cpu) |
---|
| 154 | +{ |
---|
| 155 | + return false; |
---|
| 156 | +} |
---|
| 157 | +#endif |
---|
| 158 | + |
---|
89 | 159 | /* |
---|
90 | 160 | * Conditionally install/replace broadcast device |
---|
91 | 161 | */ |
---|
92 | | -void tick_install_broadcast_device(struct clock_event_device *dev) |
---|
| 162 | +void tick_install_broadcast_device(struct clock_event_device *dev, int cpu) |
---|
93 | 163 | { |
---|
94 | 164 | struct clock_event_device *cur = tick_broadcast_device.evtdev; |
---|
| 165 | + |
---|
| 166 | + if (tick_set_oneshot_wakeup_device(dev, cpu)) |
---|
| 167 | + return; |
---|
95 | 168 | |
---|
96 | 169 | if (!tick_check_broadcast_device(cur, dev)) |
---|
97 | 170 | return; |
---|
.. | .. |
---|
105 | 178 | tick_broadcast_device.evtdev = dev; |
---|
106 | 179 | if (!cpumask_empty(tick_broadcast_mask)) |
---|
107 | 180 | tick_broadcast_start_periodic(dev); |
---|
| 181 | + |
---|
| 182 | + if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) |
---|
| 183 | + return; |
---|
| 184 | + |
---|
| 185 | + /* |
---|
| 186 | + * If the system already runs in oneshot mode, switch the newly |
---|
| 187 | + * registered broadcast device to oneshot mode explicitly. |
---|
| 188 | + */ |
---|
| 189 | + if (tick_broadcast_oneshot_active()) { |
---|
| 190 | + tick_broadcast_switch_to_oneshot(); |
---|
| 191 | + return; |
---|
| 192 | + } |
---|
| 193 | + |
---|
108 | 194 | /* |
---|
109 | 195 | * Inform all cpus about this. We might be in a situation |
---|
110 | 196 | * where we did not switch to oneshot mode because the per cpu |
---|
.. | .. |
---|
113 | 199 | * notification the systems stays stuck in periodic mode |
---|
114 | 200 | * forever. |
---|
115 | 201 | */ |
---|
116 | | - if (dev->features & CLOCK_EVT_FEAT_ONESHOT) |
---|
117 | | - tick_clock_notify(); |
---|
| 202 | + tick_clock_notify(); |
---|
118 | 203 | } |
---|
119 | 204 | |
---|
120 | 205 | /* |
---|
.. | .. |
---|
239 | 324 | return ret; |
---|
240 | 325 | } |
---|
241 | 326 | |
---|
242 | | -#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
---|
243 | 327 | int tick_receive_broadcast(void) |
---|
244 | 328 | { |
---|
245 | 329 | struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
---|
.. | .. |
---|
254 | 338 | evt->event_handler(evt); |
---|
255 | 339 | return 0; |
---|
256 | 340 | } |
---|
257 | | -#endif |
---|
258 | 341 | |
---|
259 | 342 | /* |
---|
260 | 343 | * Broadcast the event to the cpus, which are set in the mask (mangled). |
---|
.. | .. |
---|
379 | 462 | switch (mode) { |
---|
380 | 463 | case TICK_BROADCAST_FORCE: |
---|
381 | 464 | tick_broadcast_forced = 1; |
---|
| 465 | + fallthrough; |
---|
382 | 466 | case TICK_BROADCAST_ON: |
---|
383 | 467 | cpumask_set_cpu(cpu, tick_broadcast_on); |
---|
384 | 468 | if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) { |
---|
.. | .. |
---|
400 | 484 | if (tick_broadcast_forced) |
---|
401 | 485 | break; |
---|
402 | 486 | cpumask_clear_cpu(cpu, tick_broadcast_on); |
---|
403 | | - if (!tick_device_is_functional(dev)) |
---|
404 | | - break; |
---|
405 | 487 | if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) { |
---|
406 | 488 | if (tick_broadcast_device.mode == |
---|
407 | 489 | TICKDEV_MODE_PERIODIC) |
---|
.. | .. |
---|
438 | 520 | } |
---|
439 | 521 | |
---|
440 | 522 | #ifdef CONFIG_HOTPLUG_CPU |
---|
441 | | -/* |
---|
442 | | - * Remove a CPU from broadcasting |
---|
443 | | - */ |
---|
444 | | -void tick_shutdown_broadcast(unsigned int cpu) |
---|
| 523 | +static void tick_shutdown_broadcast(void) |
---|
445 | 524 | { |
---|
446 | | - struct clock_event_device *bc; |
---|
447 | | - unsigned long flags; |
---|
448 | | - |
---|
449 | | - raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
---|
450 | | - |
---|
451 | | - bc = tick_broadcast_device.evtdev; |
---|
452 | | - cpumask_clear_cpu(cpu, tick_broadcast_mask); |
---|
453 | | - cpumask_clear_cpu(cpu, tick_broadcast_on); |
---|
| 525 | + struct clock_event_device *bc = tick_broadcast_device.evtdev; |
---|
454 | 526 | |
---|
455 | 527 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { |
---|
456 | 528 | if (bc && cpumask_empty(tick_broadcast_mask)) |
---|
457 | 529 | clockevents_shutdown(bc); |
---|
458 | 530 | } |
---|
459 | | - |
---|
460 | | - raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
---|
461 | 531 | } |
---|
| 532 | + |
---|
| 533 | +/* |
---|
| 534 | + * Remove a CPU from broadcasting |
---|
| 535 | + */ |
---|
| 536 | +void tick_broadcast_offline(unsigned int cpu) |
---|
| 537 | +{ |
---|
| 538 | + raw_spin_lock(&tick_broadcast_lock); |
---|
| 539 | + cpumask_clear_cpu(cpu, tick_broadcast_mask); |
---|
| 540 | + cpumask_clear_cpu(cpu, tick_broadcast_on); |
---|
| 541 | + tick_broadcast_oneshot_offline(cpu); |
---|
| 542 | + tick_shutdown_broadcast(); |
---|
| 543 | + raw_spin_unlock(&tick_broadcast_lock); |
---|
| 544 | +} |
---|
| 545 | + |
---|
462 | 546 | #endif |
---|
463 | 547 | |
---|
464 | 548 | void tick_suspend_broadcast(void) |
---|
.. | .. |
---|
704 | 788 | clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN); |
---|
705 | 789 | } |
---|
706 | 790 | |
---|
707 | | -int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) |
---|
| 791 | +static int ___tick_broadcast_oneshot_control(enum tick_broadcast_state state, |
---|
| 792 | + struct tick_device *td, |
---|
| 793 | + int cpu) |
---|
708 | 794 | { |
---|
709 | | - struct clock_event_device *bc, *dev; |
---|
710 | | - int cpu, ret = 0; |
---|
| 795 | + struct clock_event_device *bc, *dev = td->evtdev; |
---|
| 796 | + int ret = 0; |
---|
711 | 797 | ktime_t now; |
---|
712 | | - |
---|
713 | | - /* |
---|
714 | | - * If there is no broadcast device, tell the caller not to go |
---|
715 | | - * into deep idle. |
---|
716 | | - */ |
---|
717 | | - if (!tick_broadcast_device.evtdev) |
---|
718 | | - return -EBUSY; |
---|
719 | | - |
---|
720 | | - dev = this_cpu_ptr(&tick_cpu_device)->evtdev; |
---|
721 | 798 | |
---|
722 | 799 | raw_spin_lock(&tick_broadcast_lock); |
---|
723 | 800 | bc = tick_broadcast_device.evtdev; |
---|
724 | | - cpu = smp_processor_id(); |
---|
725 | 801 | |
---|
726 | 802 | if (state == TICK_BROADCAST_ENTER) { |
---|
727 | 803 | /* |
---|
.. | .. |
---|
806 | 882 | * either the CPU handling the broadcast |
---|
807 | 883 | * interrupt or we got woken by something else. |
---|
808 | 884 | * |
---|
809 | | - * We are not longer in the broadcast mask, so |
---|
| 885 | + * We are no longer in the broadcast mask, so |
---|
810 | 886 | * if the cpu local expiry time is already |
---|
811 | 887 | * reached, we would reprogram the cpu local |
---|
812 | 888 | * timer with an already expired event. |
---|
813 | 889 | * |
---|
814 | 890 | * This can lead to a ping-pong when we return |
---|
815 | | - * to idle and therefor rearm the broadcast |
---|
| 891 | + * to idle and therefore rearm the broadcast |
---|
816 | 892 | * timer before the cpu local timer was able |
---|
817 | 893 | * to fire. This happens because the forced |
---|
818 | 894 | * reprogramming makes sure that the event |
---|
.. | .. |
---|
848 | 924 | out: |
---|
849 | 925 | raw_spin_unlock(&tick_broadcast_lock); |
---|
850 | 926 | return ret; |
---|
| 927 | +} |
---|
| 928 | + |
---|
| 929 | +static int tick_oneshot_wakeup_control(enum tick_broadcast_state state, |
---|
| 930 | + struct tick_device *td, |
---|
| 931 | + int cpu) |
---|
| 932 | +{ |
---|
| 933 | + struct clock_event_device *dev, *wd; |
---|
| 934 | + |
---|
| 935 | + dev = td->evtdev; |
---|
| 936 | + if (td->mode != TICKDEV_MODE_ONESHOT) |
---|
| 937 | + return -EINVAL; |
---|
| 938 | + |
---|
| 939 | + wd = tick_get_oneshot_wakeup_device(cpu); |
---|
| 940 | + if (!wd) |
---|
| 941 | + return -ENODEV; |
---|
| 942 | + |
---|
| 943 | + switch (state) { |
---|
| 944 | + case TICK_BROADCAST_ENTER: |
---|
| 945 | + clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT_STOPPED); |
---|
| 946 | + clockevents_switch_state(wd, CLOCK_EVT_STATE_ONESHOT); |
---|
| 947 | + clockevents_program_event(wd, dev->next_event, 1); |
---|
| 948 | + break; |
---|
| 949 | + case TICK_BROADCAST_EXIT: |
---|
| 950 | + /* We may have transitioned to oneshot mode while idle */ |
---|
| 951 | + if (clockevent_get_state(wd) != CLOCK_EVT_STATE_ONESHOT) |
---|
| 952 | + return -ENODEV; |
---|
| 953 | + } |
---|
| 954 | + |
---|
| 955 | + return 0; |
---|
| 956 | +} |
---|
| 957 | + |
---|
| 958 | +int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) |
---|
| 959 | +{ |
---|
| 960 | + struct tick_device *td = this_cpu_ptr(&tick_cpu_device); |
---|
| 961 | + int cpu = smp_processor_id(); |
---|
| 962 | + |
---|
| 963 | + if (!tick_oneshot_wakeup_control(state, td, cpu)) |
---|
| 964 | + return 0; |
---|
| 965 | + |
---|
| 966 | + if (tick_broadcast_device.evtdev) |
---|
| 967 | + return ___tick_broadcast_oneshot_control(state, td, cpu); |
---|
| 968 | + |
---|
| 969 | + /* |
---|
| 970 | + * If there is no broadcast or wakeup device, tell the caller not |
---|
| 971 | + * to go into deep idle. |
---|
| 972 | + */ |
---|
| 973 | + return -EBUSY; |
---|
851 | 974 | } |
---|
852 | 975 | |
---|
853 | 976 | /* |
---|
.. | .. |
---|
955 | 1078 | } |
---|
956 | 1079 | |
---|
957 | 1080 | /* |
---|
958 | | - * Remove a dead CPU from broadcasting |
---|
| 1081 | + * Remove a dying CPU from broadcasting |
---|
959 | 1082 | */ |
---|
960 | | -void tick_shutdown_broadcast_oneshot(unsigned int cpu) |
---|
| 1083 | +static void tick_broadcast_oneshot_offline(unsigned int cpu) |
---|
961 | 1084 | { |
---|
962 | | - unsigned long flags; |
---|
963 | | - |
---|
964 | | - raw_spin_lock_irqsave(&tick_broadcast_lock, flags); |
---|
| 1085 | + if (tick_get_oneshot_wakeup_device(cpu)) |
---|
| 1086 | + tick_set_oneshot_wakeup_device(NULL, cpu); |
---|
965 | 1087 | |
---|
966 | 1088 | /* |
---|
967 | 1089 | * Clear the broadcast masks for the dead cpu, but do not stop |
---|
.. | .. |
---|
970 | 1092 | cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); |
---|
971 | 1093 | cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); |
---|
972 | 1094 | cpumask_clear_cpu(cpu, tick_broadcast_force_mask); |
---|
973 | | - |
---|
974 | | - raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
---|
975 | 1095 | } |
---|
976 | 1096 | #endif |
---|
977 | 1097 | |
---|