hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/include/linux/clockchips.h
....@@ -15,6 +15,7 @@
1515 # include <linux/cpumask.h>
1616 # include <linux/ktime.h>
1717 # include <linux/notifier.h>
18
+# include <linux/irqstage.h>
1819
1920 struct clock_event_device;
2021 struct module;
....@@ -31,6 +32,7 @@
3132 * from DETACHED or SHUTDOWN.
3233 * ONESHOT_STOPPED: Device was programmed in ONESHOT mode and is temporarily
3334 * stopped.
35
+ * RESERVED: Device is controlled by an out-of-band core via a proxy.
3436 */
3537 enum clock_event_state {
3638 CLOCK_EVT_STATE_DETACHED,
....@@ -38,6 +40,7 @@
3840 CLOCK_EVT_STATE_PERIODIC,
3941 CLOCK_EVT_STATE_ONESHOT,
4042 CLOCK_EVT_STATE_ONESHOT_STOPPED,
43
+ CLOCK_EVT_STATE_RESERVED,
4144 };
4245
4346 /*
....@@ -67,6 +70,17 @@
6770 */
6871 # define CLOCK_EVT_FEAT_HRTIMER 0x000080
6972
73
+/*
74
+ * Interrupt pipeline support:
75
+ *
76
+ * - Clockevent device can work with pipelined timer events (i.e. proxied).
77
+ * - Device currently delivers high-precision events via out-of-band interrupts.
78
+ * - Device acts as a proxy for timer interrupt pipelining.
79
+ */
80
+# define CLOCK_EVT_FEAT_PIPELINE 0x000100
81
+# define CLOCK_EVT_FEAT_OOB 0x000200
82
+# define CLOCK_EVT_FEAT_PROXY 0x000400
83
+
7084 /**
7185 * struct clock_event_device - clock event device descriptor
7286 * @event_handler: Assigned by the framework to be called by the low
....@@ -91,7 +105,7 @@
91105 * @max_delta_ticks: maximum delta value in ticks stored for reconfiguration
92106 * @name: ptr to clock event name
93107 * @rating: variable to rate clock event devices
94
- * @irq: IRQ number (only for non CPU local devices)
108
+ * @irq: IRQ number (only for non CPU local devices, or pipelined timers)
95109 * @bound_on: Bound on CPU
96110 * @cpumask: cpumask to indicate for which CPUs this device works
97111 * @list: list head for the management code
....@@ -137,6 +151,11 @@
137151 return dev->state_use_accessors == CLOCK_EVT_STATE_DETACHED;
138152 }
139153
154
+static inline bool clockevent_state_reserved(struct clock_event_device *dev)
155
+{
156
+ return dev->state_use_accessors == CLOCK_EVT_STATE_RESERVED;
157
+}
158
+
140159 static inline bool clockevent_state_shutdown(struct clock_event_device *dev)
141160 {
142161 return dev->state_use_accessors == CLOCK_EVT_STATE_SHUTDOWN;
....@@ -155,6 +174,11 @@
155174 static inline bool clockevent_state_oneshot_stopped(struct clock_event_device *dev)
156175 {
157176 return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT_STOPPED;
177
+}
178
+
179
+static inline bool clockevent_is_oob(struct clock_event_device *dev)
180
+{
181
+ return !!(dev->features & CLOCK_EVT_FEAT_OOB);
158182 }
159183
160184 /*
....@@ -186,6 +210,8 @@
186210 extern void clockevents_config_and_register(struct clock_event_device *dev,
187211 u32 freq, unsigned long min_delta,
188212 unsigned long max_delta);
213
+extern void clockevents_switch_state(struct clock_event_device *dev,
214
+ enum clock_event_state state);
189215
190216 extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq);
191217
....@@ -215,6 +241,49 @@
215241 static inline void tick_setup_hrtimer_broadcast(void) { }
216242 # endif
217243
244
+#ifdef CONFIG_IRQ_PIPELINE
245
+
246
+struct clock_proxy_device {
247
+ struct clock_event_device proxy_device;
248
+ struct clock_event_device *real_device;
249
+ void (*handle_oob_event)(struct clock_event_device *dev);
250
+ void (*__setup_handler)(struct clock_proxy_device *dev);
251
+ void (*__original_handler)(struct clock_event_device *dev);
252
+};
253
+
254
+void tick_notify_proxy(void);
255
+
256
+static inline
257
+void clockevents_handle_event(struct clock_event_device *ced)
258
+{
259
+ /*
260
+ * If called from the in-band stage, or for delivering a
261
+ * high-precision timer event to the out-of-band stage, call
262
+ * the event handler immediately.
263
+ *
264
+ * Otherwise, ced is still the in-band tick device for the
265
+ * current CPU, so just relay the incoming tick to the in-band
266
+ * stage via tick_notify_proxy(). This situation can happen
267
+ * when all CPUs receive the same out-of-band IRQ from a given
268
+ * clock event device, but only a subset of the online CPUs has
269
+ * enabled a proxy.
270
+ */
271
+ if (clockevent_is_oob(ced) || running_inband())
272
+ ced->event_handler(ced);
273
+ else
274
+ tick_notify_proxy();
275
+}
276
+
277
+#else
278
+
279
+static inline
280
+void clockevents_handle_event(struct clock_event_device *ced)
281
+{
282
+ ced->event_handler(ced);
283
+}
284
+
285
+#endif /* !CONFIG_IRQ_PIPELINE */
286
+
218287 #else /* !CONFIG_GENERIC_CLOCKEVENTS: */
219288
220289 static inline void clockevents_suspend(void) { }