.. | .. |
---|
15 | 15 | # include <linux/cpumask.h> |
---|
16 | 16 | # include <linux/ktime.h> |
---|
17 | 17 | # include <linux/notifier.h> |
---|
| 18 | +# include <linux/irqstage.h> |
---|
18 | 19 | |
---|
19 | 20 | struct clock_event_device; |
---|
20 | 21 | struct module; |
---|
.. | .. |
---|
31 | 32 | * from DETACHED or SHUTDOWN. |
---|
32 | 33 | * ONESHOT_STOPPED: Device was programmed in ONESHOT mode and is temporarily |
---|
33 | 34 | * stopped. |
---|
| 35 | + * RESERVED: Device is controlled by an out-of-band core via a proxy. |
---|
34 | 36 | */ |
---|
35 | 37 | enum clock_event_state { |
---|
36 | 38 | CLOCK_EVT_STATE_DETACHED, |
---|
.. | .. |
---|
38 | 40 | CLOCK_EVT_STATE_PERIODIC, |
---|
39 | 41 | CLOCK_EVT_STATE_ONESHOT, |
---|
40 | 42 | CLOCK_EVT_STATE_ONESHOT_STOPPED, |
---|
| 43 | + CLOCK_EVT_STATE_RESERVED, |
---|
41 | 44 | }; |
---|
42 | 45 | |
---|
43 | 46 | /* |
---|
.. | .. |
---|
67 | 70 | */ |
---|
68 | 71 | # define CLOCK_EVT_FEAT_HRTIMER 0x000080 |
---|
69 | 72 | |
---|
| 73 | +/* |
---|
| 74 | + * Interrupt pipeline support: |
---|
| 75 | + * |
---|
| 76 | + * - Clockevent device can work with pipelined timer events (i.e. proxied). |
---|
| 77 | + * - Device currently delivers high-precision events via out-of-band interrupts. |
---|
| 78 | + * - Device acts as a proxy for timer interrupt pipelining. |
---|
| 79 | + */ |
---|
| 80 | +# define CLOCK_EVT_FEAT_PIPELINE 0x000100 |
---|
| 81 | +# define CLOCK_EVT_FEAT_OOB 0x000200 |
---|
| 82 | +# define CLOCK_EVT_FEAT_PROXY 0x000400 |
---|
| 83 | + |
---|
70 | 84 | /** |
---|
71 | 85 | * struct clock_event_device - clock event device descriptor |
---|
72 | 86 | * @event_handler: Assigned by the framework to be called by the low |
---|
.. | .. |
---|
91 | 105 | * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration |
---|
92 | 106 | * @name: ptr to clock event name |
---|
93 | 107 | * @rating: variable to rate clock event devices |
---|
94 | | - * @irq: IRQ number (only for non CPU local devices) |
---|
| 108 | + * @irq: IRQ number (only for non CPU local devices, or pipelined timers) |
---|
95 | 109 | * @bound_on: Bound on CPU |
---|
96 | 110 | * @cpumask: cpumask to indicate for which CPUs this device works |
---|
97 | 111 | * @list: list head for the management code |
---|
.. | .. |
---|
137 | 151 | return dev->state_use_accessors == CLOCK_EVT_STATE_DETACHED; |
---|
138 | 152 | } |
---|
139 | 153 | |
---|
| 154 | +static inline bool clockevent_state_reserved(struct clock_event_device *dev) |
---|
| 155 | +{ |
---|
| 156 | + return dev->state_use_accessors == CLOCK_EVT_STATE_RESERVED; |
---|
| 157 | +} |
---|
| 158 | + |
---|
140 | 159 | static inline bool clockevent_state_shutdown(struct clock_event_device *dev) |
---|
141 | 160 | { |
---|
142 | 161 | return dev->state_use_accessors == CLOCK_EVT_STATE_SHUTDOWN; |
---|
.. | .. |
---|
155 | 174 | static inline bool clockevent_state_oneshot_stopped(struct clock_event_device *dev) |
---|
156 | 175 | { |
---|
157 | 176 | return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT_STOPPED; |
---|
| 177 | +} |
---|
| 178 | + |
---|
| 179 | +static inline bool clockevent_is_oob(struct clock_event_device *dev) |
---|
| 180 | +{ |
---|
| 181 | + return !!(dev->features & CLOCK_EVT_FEAT_OOB); |
---|
158 | 182 | } |
---|
159 | 183 | |
---|
160 | 184 | /* |
---|
.. | .. |
---|
186 | 210 | extern void clockevents_config_and_register(struct clock_event_device *dev, |
---|
187 | 211 | u32 freq, unsigned long min_delta, |
---|
188 | 212 | unsigned long max_delta); |
---|
| 213 | +extern void clockevents_switch_state(struct clock_event_device *dev, |
---|
| 214 | + enum clock_event_state state); |
---|
189 | 215 | |
---|
190 | 216 | extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); |
---|
191 | 217 | |
---|
.. | .. |
---|
215 | 241 | static inline void tick_setup_hrtimer_broadcast(void) { } |
---|
216 | 242 | # endif |
---|
217 | 243 | |
---|
| 244 | +#ifdef CONFIG_IRQ_PIPELINE |
---|
| 245 | + |
---|
| 246 | +struct clock_proxy_device { |
---|
| 247 | + struct clock_event_device proxy_device; |
---|
| 248 | + struct clock_event_device *real_device; |
---|
| 249 | + void (*handle_oob_event)(struct clock_event_device *dev); |
---|
| 250 | + void (*__setup_handler)(struct clock_proxy_device *dev); |
---|
| 251 | + void (*__original_handler)(struct clock_event_device *dev); |
---|
| 252 | +}; |
---|
| 253 | + |
---|
| 254 | +void tick_notify_proxy(void); |
---|
| 255 | + |
---|
| 256 | +static inline |
---|
| 257 | +void clockevents_handle_event(struct clock_event_device *ced) |
---|
| 258 | +{ |
---|
| 259 | + /* |
---|
| 260 | + * If called from the in-band stage, or for delivering a |
---|
| 261 | + * high-precision timer event to the out-of-band stage, call |
---|
| 262 | + * the event handler immediately. |
---|
| 263 | + * |
---|
| 264 | + * Otherwise, ced is still the in-band tick device for the |
---|
| 265 | + * current CPU, so just relay the incoming tick to the in-band |
---|
| 266 | + * stage via tick_notify_proxy(). This situation can happen |
---|
| 267 | + * when all CPUs receive the same out-of-band IRQ from a given |
---|
| 268 | + * clock event device, but only a subset of the online CPUs has |
---|
| 269 | + * enabled a proxy. |
---|
| 270 | + */ |
---|
| 271 | + if (clockevent_is_oob(ced) || running_inband()) |
---|
| 272 | + ced->event_handler(ced); |
---|
| 273 | + else |
---|
| 274 | + tick_notify_proxy(); |
---|
| 275 | +} |
---|
| 276 | + |
---|
| 277 | +#else |
---|
| 278 | + |
---|
| 279 | +static inline |
---|
| 280 | +void clockevents_handle_event(struct clock_event_device *ced) |
---|
| 281 | +{ |
---|
| 282 | + ced->event_handler(ced); |
---|
| 283 | +} |
---|
| 284 | + |
---|
| 285 | +#endif /* !CONFIG_IRQ_PIPELINE */ |
---|
| 286 | + |
---|
218 | 287 | #else /* !CONFIG_GENERIC_CLOCKEVENTS: */ |
---|
219 | 288 | |
---|
220 | 289 | static inline void clockevents_suspend(void) { } |
---|