.. | .. |
---|
14 | 14 | #include <linux/percpu.h> |
---|
15 | 15 | #include <linux/list.h> |
---|
16 | 16 | #include <linux/hrtimer.h> |
---|
| 17 | +#include <linux/android_kabi.h> |
---|
17 | 18 | |
---|
18 | 19 | #define CPUIDLE_STATE_MAX 10 |
---|
19 | 20 | #define CPUIDLE_NAME_LEN 16 |
---|
.. | .. |
---|
29 | 30 | * CPUIDLE DEVICE INTERFACE * |
---|
30 | 31 | ****************************/ |
---|
31 | 32 | |
---|
| 33 | +#define CPUIDLE_STATE_DISABLED_BY_USER BIT(0) |
---|
| 34 | +#define CPUIDLE_STATE_DISABLED_BY_DRIVER BIT(1) |
---|
| 35 | + |
---|
32 | 36 | struct cpuidle_state_usage { |
---|
33 | 37 | unsigned long long disable; |
---|
34 | 38 | unsigned long long usage; |
---|
35 | | - unsigned long long time; /* in US */ |
---|
| 39 | + u64 time_ns; |
---|
| 40 | + unsigned long long above; /* Number of times it's been too deep */ |
---|
| 41 | + unsigned long long below; /* Number of times it's been too shallow */ |
---|
| 42 | + unsigned long long rejected; /* Number of times idle entry was rejected */ |
---|
36 | 43 | #ifdef CONFIG_SUSPEND |
---|
37 | 44 | unsigned long long s2idle_usage; |
---|
38 | 45 | unsigned long long s2idle_time; /* in US */ |
---|
.. | .. |
---|
43 | 50 | char name[CPUIDLE_NAME_LEN]; |
---|
44 | 51 | char desc[CPUIDLE_DESC_LEN]; |
---|
45 | 52 | |
---|
| 53 | + u64 exit_latency_ns; |
---|
| 54 | + u64 target_residency_ns; |
---|
46 | 55 | unsigned int flags; |
---|
47 | 56 | unsigned int exit_latency; /* in US */ |
---|
48 | 57 | int power_usage; /* in mW */ |
---|
49 | 58 | unsigned int target_residency; /* in US */ |
---|
50 | | - bool disabled; /* disabled on all CPUs */ |
---|
51 | 59 | |
---|
52 | 60 | int (*enter) (struct cpuidle_device *dev, |
---|
53 | 61 | struct cpuidle_driver *drv, |
---|
.. | .. |
---|
59 | 67 | * CPUs execute ->enter_s2idle with the local tick or entire timekeeping |
---|
60 | 68 | * suspended, so it must not re-enable interrupts at any point (even |
---|
61 | 69 | * temporarily) or attempt to change states of clock event devices. |
---|
| 70 | + * |
---|
| 71 | + * This callback may point to the same function as ->enter if all of |
---|
| 72 | + * the above requirements are met by it. |
---|
62 | 73 | */ |
---|
63 | | - void (*enter_s2idle) (struct cpuidle_device *dev, |
---|
64 | | - struct cpuidle_driver *drv, |
---|
65 | | - int index); |
---|
| 74 | + int (*enter_s2idle)(struct cpuidle_device *dev, |
---|
| 75 | + struct cpuidle_driver *drv, |
---|
| 76 | + int index); |
---|
66 | 77 | }; |
---|
67 | 78 | |
---|
68 | 79 | /* Idle State Flags */ |
---|
69 | | -#define CPUIDLE_FLAG_NONE (0x00) |
---|
70 | | -#define CPUIDLE_FLAG_POLLING (0x01) /* polling state */ |
---|
71 | | -#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ |
---|
72 | | -#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ |
---|
73 | | - |
---|
74 | | -#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) |
---|
| 80 | +#define CPUIDLE_FLAG_NONE (0x00) |
---|
| 81 | +#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */ |
---|
| 82 | +#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ |
---|
| 83 | +#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ |
---|
| 84 | +#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */ |
---|
| 85 | +#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */ |
---|
| 86 | +#define CPUIDLE_FLAG_TLB_FLUSHED BIT(5) /* idle-state flushes TLBs */ |
---|
| 87 | +#define CPUIDLE_FLAG_RCU_IDLE BIT(6) /* idle-state takes care of RCU */ |
---|
75 | 88 | |
---|
76 | 89 | struct cpuidle_device_kobj; |
---|
77 | 90 | struct cpuidle_state_kobj; |
---|
.. | .. |
---|
80 | 93 | struct cpuidle_device { |
---|
81 | 94 | unsigned int registered:1; |
---|
82 | 95 | unsigned int enabled:1; |
---|
83 | | - unsigned int use_deepest_state:1; |
---|
84 | 96 | unsigned int poll_time_limit:1; |
---|
85 | 97 | unsigned int cpu; |
---|
| 98 | + ktime_t next_hrtimer; |
---|
86 | 99 | |
---|
87 | | - int last_residency; |
---|
| 100 | + int last_state_idx; |
---|
| 101 | + u64 last_residency_ns; |
---|
| 102 | + u64 poll_limit_ns; |
---|
| 103 | + u64 forced_idle_latency_limit_ns; |
---|
88 | 104 | struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; |
---|
89 | 105 | struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; |
---|
90 | 106 | struct cpuidle_driver_kobj *kobj_driver; |
---|
.. | .. |
---|
95 | 111 | cpumask_t coupled_cpus; |
---|
96 | 112 | struct cpuidle_coupled *coupled; |
---|
97 | 113 | #endif |
---|
| 114 | + |
---|
| 115 | + ANDROID_KABI_RESERVE(1); |
---|
98 | 116 | }; |
---|
99 | 117 | |
---|
100 | 118 | DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); |
---|
101 | 119 | DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); |
---|
102 | | - |
---|
103 | | -/** |
---|
104 | | - * cpuidle_get_last_residency - retrieves the last state's residency time |
---|
105 | | - * @dev: the target CPU |
---|
106 | | - */ |
---|
107 | | -static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) |
---|
108 | | -{ |
---|
109 | | - return dev->last_residency; |
---|
110 | | -} |
---|
111 | | - |
---|
112 | 120 | |
---|
113 | 121 | /**************************** |
---|
114 | 122 | * CPUIDLE DRIVER INTERFACE * |
---|
.. | .. |
---|
117 | 125 | struct cpuidle_driver { |
---|
118 | 126 | const char *name; |
---|
119 | 127 | struct module *owner; |
---|
120 | | - int refcnt; |
---|
121 | 128 | |
---|
122 | 129 | /* used by the cpuidle framework to setup the broadcast timer */ |
---|
123 | 130 | unsigned int bctimer:1; |
---|
.. | .. |
---|
128 | 135 | |
---|
129 | 136 | /* the driver handles the cpus in cpumask */ |
---|
130 | 137 | struct cpumask *cpumask; |
---|
| 138 | + |
---|
| 139 | + /* preferred governor to switch at register time */ |
---|
| 140 | + const char *governor; |
---|
| 141 | + |
---|
| 142 | + ANDROID_KABI_RESERVE(1); |
---|
131 | 143 | }; |
---|
132 | 144 | |
---|
133 | 145 | #ifdef CONFIG_CPU_IDLE |
---|
.. | .. |
---|
141 | 153 | extern int cpuidle_enter(struct cpuidle_driver *drv, |
---|
142 | 154 | struct cpuidle_device *dev, int index); |
---|
143 | 155 | extern void cpuidle_reflect(struct cpuidle_device *dev, int index); |
---|
| 156 | +extern u64 cpuidle_poll_time(struct cpuidle_driver *drv, |
---|
| 157 | + struct cpuidle_device *dev); |
---|
144 | 158 | |
---|
145 | 159 | extern int cpuidle_register_driver(struct cpuidle_driver *drv); |
---|
146 | 160 | extern struct cpuidle_driver *cpuidle_get_driver(void); |
---|
147 | | -extern struct cpuidle_driver *cpuidle_driver_ref(void); |
---|
148 | | -extern void cpuidle_driver_unref(void); |
---|
| 161 | +extern void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx, |
---|
| 162 | + bool disable); |
---|
149 | 163 | extern void cpuidle_unregister_driver(struct cpuidle_driver *drv); |
---|
150 | 164 | extern int cpuidle_register_device(struct cpuidle_device *dev); |
---|
151 | 165 | extern void cpuidle_unregister_device(struct cpuidle_device *dev); |
---|
.. | .. |
---|
175 | 189 | struct cpuidle_device *dev, int index) |
---|
176 | 190 | {return -ENODEV; } |
---|
177 | 191 | static inline void cpuidle_reflect(struct cpuidle_device *dev, int index) { } |
---|
| 192 | +static inline u64 cpuidle_poll_time(struct cpuidle_driver *drv, |
---|
| 193 | + struct cpuidle_device *dev) |
---|
| 194 | +{return 0; } |
---|
178 | 195 | static inline int cpuidle_register_driver(struct cpuidle_driver *drv) |
---|
179 | 196 | {return -ENODEV; } |
---|
180 | 197 | static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; } |
---|
181 | | -static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; } |
---|
182 | | -static inline void cpuidle_driver_unref(void) {} |
---|
| 198 | +static inline void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, |
---|
| 199 | + int idx, bool disable) { } |
---|
183 | 200 | static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { } |
---|
184 | 201 | static inline int cpuidle_register_device(struct cpuidle_device *dev) |
---|
185 | 202 | {return -ENODEV; } |
---|
.. | .. |
---|
203 | 220 | |
---|
204 | 221 | #ifdef CONFIG_CPU_IDLE |
---|
205 | 222 | extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, |
---|
206 | | - struct cpuidle_device *dev); |
---|
| 223 | + struct cpuidle_device *dev, |
---|
| 224 | + u64 latency_limit_ns); |
---|
207 | 225 | extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv, |
---|
208 | 226 | struct cpuidle_device *dev); |
---|
209 | | -extern void cpuidle_use_deepest_state(bool enable); |
---|
| 227 | +extern void cpuidle_use_deepest_state(u64 latency_limit_ns); |
---|
210 | 228 | #else |
---|
211 | 229 | static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, |
---|
212 | | - struct cpuidle_device *dev) |
---|
| 230 | + struct cpuidle_device *dev, |
---|
| 231 | + u64 latency_limit_ns) |
---|
213 | 232 | {return -ENODEV; } |
---|
214 | 233 | static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv, |
---|
215 | 234 | struct cpuidle_device *dev) |
---|
216 | 235 | {return -ENODEV; } |
---|
217 | | -static inline void cpuidle_use_deepest_state(bool enable) |
---|
| 236 | +static inline void cpuidle_use_deepest_state(u64 latency_limit_ns) |
---|
218 | 237 | { |
---|
219 | 238 | } |
---|
220 | 239 | #endif |
---|
221 | 240 | |
---|
222 | 241 | /* kernel/sched/idle.c */ |
---|
223 | | -extern void sched_idle_set_state(struct cpuidle_state *idle_state, int index); |
---|
| 242 | +extern void sched_idle_set_state(struct cpuidle_state *idle_state); |
---|
224 | 243 | extern void default_idle_call(void); |
---|
225 | 244 | |
---|
226 | 245 | #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED |
---|
.. | .. |
---|
257 | 276 | void (*reflect) (struct cpuidle_device *dev, int index); |
---|
258 | 277 | }; |
---|
259 | 278 | |
---|
260 | | -#ifdef CONFIG_CPU_IDLE |
---|
261 | 279 | extern int cpuidle_register_governor(struct cpuidle_governor *gov); |
---|
262 | | -extern int cpuidle_governor_latency_req(unsigned int cpu); |
---|
263 | | -#else |
---|
264 | | -static inline int cpuidle_register_governor(struct cpuidle_governor *gov) |
---|
265 | | -{return 0;} |
---|
266 | | -#endif |
---|
| 280 | +extern s64 cpuidle_governor_latency_req(unsigned int cpu); |
---|
267 | 281 | |
---|
268 | | -#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \ |
---|
| 282 | +#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, \ |
---|
| 283 | + idx, \ |
---|
| 284 | + state, \ |
---|
| 285 | + is_retention) \ |
---|
269 | 286 | ({ \ |
---|
270 | 287 | int __ret = 0; \ |
---|
271 | 288 | \ |
---|
.. | .. |
---|
277 | 294 | if (!is_retention) \ |
---|
278 | 295 | __ret = cpu_pm_enter(); \ |
---|
279 | 296 | if (!__ret) { \ |
---|
280 | | - __ret = low_level_idle_enter(idx); \ |
---|
| 297 | + __ret = low_level_idle_enter(state); \ |
---|
281 | 298 | if (!is_retention) \ |
---|
282 | 299 | cpu_pm_exit(); \ |
---|
283 | 300 | } \ |
---|
.. | .. |
---|
286 | 303 | }) |
---|
287 | 304 | |
---|
288 | 305 | #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ |
---|
289 | | - __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0) |
---|
| 306 | + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0) |
---|
290 | 307 | |
---|
291 | 308 | #define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \ |
---|
292 | | - __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1) |
---|
| 309 | + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1) |
---|
| 310 | + |
---|
| 311 | +#define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state) \ |
---|
| 312 | + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0) |
---|
| 313 | + |
---|
| 314 | +#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \ |
---|
| 315 | + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1) |
---|
293 | 316 | |
---|
294 | 317 | #endif /* _LINUX_CPUIDLE_H */ |
---|