hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/include/linux/cpuidle.h
....@@ -14,6 +14,7 @@
1414 #include <linux/percpu.h>
1515 #include <linux/list.h>
1616 #include <linux/hrtimer.h>
17
+#include <linux/android_kabi.h>
1718
1819 #define CPUIDLE_STATE_MAX 10
1920 #define CPUIDLE_NAME_LEN 16
....@@ -29,10 +30,16 @@
2930 * CPUIDLE DEVICE INTERFACE *
3031 ****************************/
3132
33
+#define CPUIDLE_STATE_DISABLED_BY_USER BIT(0)
34
+#define CPUIDLE_STATE_DISABLED_BY_DRIVER BIT(1)
35
+
3236 struct cpuidle_state_usage {
3337 unsigned long long disable;
3438 unsigned long long usage;
35
- unsigned long long time; /* in US */
39
+ u64 time_ns;
40
+ unsigned long long above; /* Number of times it's been too deep */
41
+ unsigned long long below; /* Number of times it's been too shallow */
42
+ unsigned long long rejected; /* Number of times idle entry was rejected */
3643 #ifdef CONFIG_SUSPEND
3744 unsigned long long s2idle_usage;
3845 unsigned long long s2idle_time; /* in US */
....@@ -43,11 +50,12 @@
4350 char name[CPUIDLE_NAME_LEN];
4451 char desc[CPUIDLE_DESC_LEN];
4552
53
+ u64 exit_latency_ns;
54
+ u64 target_residency_ns;
4655 unsigned int flags;
4756 unsigned int exit_latency; /* in US */
4857 int power_usage; /* in mW */
4958 unsigned int target_residency; /* in US */
50
- bool disabled; /* disabled on all CPUs */
5159
5260 int (*enter) (struct cpuidle_device *dev,
5361 struct cpuidle_driver *drv,
....@@ -59,19 +67,24 @@
5967 * CPUs execute ->enter_s2idle with the local tick or entire timekeeping
6068 * suspended, so it must not re-enable interrupts at any point (even
6169 * temporarily) or attempt to change states of clock event devices.
70
+ *
71
+ * This callback may point to the same function as ->enter if all of
72
+ * the above requirements are met by it.
6273 */
63
- void (*enter_s2idle) (struct cpuidle_device *dev,
64
- struct cpuidle_driver *drv,
65
- int index);
74
+ int (*enter_s2idle)(struct cpuidle_device *dev,
75
+ struct cpuidle_driver *drv,
76
+ int index);
6677 };
6778
6879 /* Idle State Flags */
69
-#define CPUIDLE_FLAG_NONE (0x00)
70
-#define CPUIDLE_FLAG_POLLING (0x01) /* polling state */
71
-#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
72
-#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
73
-
74
-#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
80
+#define CPUIDLE_FLAG_NONE (0x00)
81
+#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
82
+#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
83
+#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
84
+#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
85
+#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
86
+#define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */
87
+#define CPUIDLE_FLAG_RCU_IDLE BIT(6) /* idle-state takes care of RCU */
7588
7689 struct cpuidle_device_kobj;
7790 struct cpuidle_state_kobj;
....@@ -80,11 +93,14 @@
8093 struct cpuidle_device {
8194 unsigned int registered:1;
8295 unsigned int enabled:1;
83
- unsigned int use_deepest_state:1;
8496 unsigned int poll_time_limit:1;
8597 unsigned int cpu;
98
+ ktime_t next_hrtimer;
8699
87
- int last_residency;
100
+ int last_state_idx;
101
+ u64 last_residency_ns;
102
+ u64 poll_limit_ns;
103
+ u64 forced_idle_latency_limit_ns;
88104 struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
89105 struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
90106 struct cpuidle_driver_kobj *kobj_driver;
....@@ -95,20 +111,12 @@
95111 cpumask_t coupled_cpus;
96112 struct cpuidle_coupled *coupled;
97113 #endif
114
+
115
+ ANDROID_KABI_RESERVE(1);
98116 };
99117
100118 DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
101119 DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
102
-
103
-/**
104
- * cpuidle_get_last_residency - retrieves the last state's residency time
105
- * @dev: the target CPU
106
- */
107
-static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
108
-{
109
- return dev->last_residency;
110
-}
111
-
112120
113121 /****************************
114122 * CPUIDLE DRIVER INTERFACE *
....@@ -117,7 +125,6 @@
117125 struct cpuidle_driver {
118126 const char *name;
119127 struct module *owner;
120
- int refcnt;
121128
122129 /* used by the cpuidle framework to setup the broadcast timer */
123130 unsigned int bctimer:1;
....@@ -128,6 +135,11 @@
128135
129136 /* the driver handles the cpus in cpumask */
130137 struct cpumask *cpumask;
138
+
139
+ /* preferred governor to switch at register time */
140
+ const char *governor;
141
+
142
+ ANDROID_KABI_RESERVE(1);
131143 };
132144
133145 #ifdef CONFIG_CPU_IDLE
....@@ -141,11 +153,13 @@
141153 extern int cpuidle_enter(struct cpuidle_driver *drv,
142154 struct cpuidle_device *dev, int index);
143155 extern void cpuidle_reflect(struct cpuidle_device *dev, int index);
156
+extern u64 cpuidle_poll_time(struct cpuidle_driver *drv,
157
+ struct cpuidle_device *dev);
144158
145159 extern int cpuidle_register_driver(struct cpuidle_driver *drv);
146160 extern struct cpuidle_driver *cpuidle_get_driver(void);
147
-extern struct cpuidle_driver *cpuidle_driver_ref(void);
148
-extern void cpuidle_driver_unref(void);
161
+extern void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
162
+ bool disable);
149163 extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
150164 extern int cpuidle_register_device(struct cpuidle_device *dev);
151165 extern void cpuidle_unregister_device(struct cpuidle_device *dev);
....@@ -175,11 +189,14 @@
175189 struct cpuidle_device *dev, int index)
176190 {return -ENODEV; }
177191 static inline void cpuidle_reflect(struct cpuidle_device *dev, int index) { }
192
+static inline u64 cpuidle_poll_time(struct cpuidle_driver *drv,
193
+ struct cpuidle_device *dev)
194
+{return 0; }
178195 static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
179196 {return -ENODEV; }
180197 static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; }
181
-static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; }
182
-static inline void cpuidle_driver_unref(void) {}
198
+static inline void cpuidle_driver_state_disabled(struct cpuidle_driver *drv,
199
+ int idx, bool disable) { }
183200 static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { }
184201 static inline int cpuidle_register_device(struct cpuidle_device *dev)
185202 {return -ENODEV; }
....@@ -203,24 +220,26 @@
203220
204221 #ifdef CONFIG_CPU_IDLE
205222 extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
206
- struct cpuidle_device *dev);
223
+ struct cpuidle_device *dev,
224
+ u64 latency_limit_ns);
207225 extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
208226 struct cpuidle_device *dev);
209
-extern void cpuidle_use_deepest_state(bool enable);
227
+extern void cpuidle_use_deepest_state(u64 latency_limit_ns);
210228 #else
211229 static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
212
- struct cpuidle_device *dev)
230
+ struct cpuidle_device *dev,
231
+ u64 latency_limit_ns)
213232 {return -ENODEV; }
214233 static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
215234 struct cpuidle_device *dev)
216235 {return -ENODEV; }
217
-static inline void cpuidle_use_deepest_state(bool enable)
236
+static inline void cpuidle_use_deepest_state(u64 latency_limit_ns)
218237 {
219238 }
220239 #endif
221240
222241 /* kernel/sched/idle.c */
223
-extern void sched_idle_set_state(struct cpuidle_state *idle_state, int index);
242
+extern void sched_idle_set_state(struct cpuidle_state *idle_state);
224243 extern void default_idle_call(void);
225244
226245 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
....@@ -257,15 +276,13 @@
257276 void (*reflect) (struct cpuidle_device *dev, int index);
258277 };
259278
260
-#ifdef CONFIG_CPU_IDLE
261279 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
262
-extern int cpuidle_governor_latency_req(unsigned int cpu);
263
-#else
264
-static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
265
-{return 0;}
266
-#endif
280
+extern s64 cpuidle_governor_latency_req(unsigned int cpu);
267281
268
-#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \
282
+#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, \
283
+ idx, \
284
+ state, \
285
+ is_retention) \
269286 ({ \
270287 int __ret = 0; \
271288 \
....@@ -277,7 +294,7 @@
277294 if (!is_retention) \
278295 __ret = cpu_pm_enter(); \
279296 if (!__ret) { \
280
- __ret = low_level_idle_enter(idx); \
297
+ __ret = low_level_idle_enter(state); \
281298 if (!is_retention) \
282299 cpu_pm_exit(); \
283300 } \
....@@ -286,9 +303,15 @@
286303 })
287304
288305 #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \
289
- __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0)
306
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0)
290307
291308 #define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \
292
- __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1)
309
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1)
310
+
311
+#define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state) \
312
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0)
313
+
314
+#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \
315
+ __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1)
293316
294317 #endif /* _LINUX_CPUIDLE_H */