.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 |
---|
2 | 2 | /* |
---|
3 | | - * Energy Model of CPUs |
---|
| 3 | + * Energy Model of devices |
---|
4 | 4 | * |
---|
5 | | - * Copyright (c) 2018, Arm ltd. |
---|
| 5 | + * Copyright (c) 2018-2020, Arm ltd. |
---|
6 | 6 | * Written by: Quentin Perret, Arm ltd. |
---|
| 7 | + * Improvements provided by: Lukasz Luba, Arm ltd. |
---|
7 | 8 | */ |
---|
8 | 9 | |
---|
9 | 10 | #define pr_fmt(fmt) "energy_model: " fmt |
---|
.. | .. |
---|
15 | 16 | #include <linux/sched/topology.h> |
---|
16 | 17 | #include <linux/slab.h> |
---|
17 | 18 | |
---|
18 | | -/* Mapping of each CPU to the performance domain to which it belongs. */ |
---|
19 | | -static DEFINE_PER_CPU(struct em_perf_domain *, em_data); |
---|
20 | | - |
---|
21 | 19 | /* |
---|
22 | 20 | * Mutex serializing the registrations of performance domains and letting |
---|
23 | 21 | * callbacks defined by drivers sleep. |
---|
24 | 22 | */ |
---|
25 | 23 | static DEFINE_MUTEX(em_pd_mutex); |
---|
26 | 24 | |
---|
| 25 | +static bool _is_cpu_device(struct device *dev) |
---|
| 26 | +{ |
---|
| 27 | + return (dev->bus == &cpu_subsys); |
---|
| 28 | +} |
---|
| 29 | + |
---|
27 | 30 | #ifdef CONFIG_DEBUG_FS |
---|
28 | 31 | static struct dentry *rootdir; |
---|
29 | 32 | |
---|
30 | | -static void em_debug_create_cs(struct em_cap_state *cs, struct dentry *pd) |
---|
| 33 | +static void em_debug_create_ps(struct em_perf_state *ps, struct dentry *pd) |
---|
31 | 34 | { |
---|
32 | 35 | struct dentry *d; |
---|
33 | 36 | char name[24]; |
---|
34 | 37 | |
---|
35 | | - snprintf(name, sizeof(name), "cs:%lu", cs->frequency); |
---|
| 38 | + snprintf(name, sizeof(name), "ps:%lu", ps->frequency); |
---|
36 | 39 | |
---|
37 | | - /* Create per-cs directory */ |
---|
| 40 | + /* Create per-ps directory */ |
---|
38 | 41 | d = debugfs_create_dir(name, pd); |
---|
39 | | - debugfs_create_ulong("frequency", 0444, d, &cs->frequency); |
---|
40 | | - debugfs_create_ulong("power", 0444, d, &cs->power); |
---|
41 | | - debugfs_create_ulong("cost", 0444, d, &cs->cost); |
---|
| 42 | + debugfs_create_ulong("frequency", 0444, d, &ps->frequency); |
---|
| 43 | + debugfs_create_ulong("power", 0444, d, &ps->power); |
---|
| 44 | + debugfs_create_ulong("cost", 0444, d, &ps->cost); |
---|
42 | 45 | } |
---|
43 | 46 | |
---|
44 | 47 | static int em_debug_cpus_show(struct seq_file *s, void *unused) |
---|
.. | .. |
---|
49 | 52 | } |
---|
50 | 53 | DEFINE_SHOW_ATTRIBUTE(em_debug_cpus); |
---|
51 | 54 | |
---|
52 | | -static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) |
---|
| 55 | +static int em_debug_units_show(struct seq_file *s, void *unused) |
---|
| 56 | +{ |
---|
| 57 | + struct em_perf_domain *pd = s->private; |
---|
| 58 | + char *units = pd->milliwatts ? "milliWatts" : "bogoWatts"; |
---|
| 59 | + |
---|
| 60 | + seq_printf(s, "%s\n", units); |
---|
| 61 | + |
---|
| 62 | + return 0; |
---|
| 63 | +} |
---|
| 64 | +DEFINE_SHOW_ATTRIBUTE(em_debug_units); |
---|
| 65 | + |
---|
| 66 | +static void em_debug_create_pd(struct device *dev) |
---|
53 | 67 | { |
---|
54 | 68 | struct dentry *d; |
---|
55 | | - char name[8]; |
---|
56 | 69 | int i; |
---|
57 | 70 | |
---|
58 | | - snprintf(name, sizeof(name), "pd%d", cpu); |
---|
59 | | - |
---|
60 | 71 | /* Create the directory of the performance domain */ |
---|
61 | | - d = debugfs_create_dir(name, rootdir); |
---|
| 72 | + d = debugfs_create_dir(dev_name(dev), rootdir); |
---|
62 | 73 | |
---|
63 | | - debugfs_create_file("cpus", 0444, d, pd->cpus, &em_debug_cpus_fops); |
---|
| 74 | + if (_is_cpu_device(dev)) |
---|
| 75 | + debugfs_create_file("cpus", 0444, d, dev->em_pd->cpus, |
---|
| 76 | + &em_debug_cpus_fops); |
---|
64 | 77 | |
---|
65 | | - /* Create a sub-directory for each capacity state */ |
---|
66 | | - for (i = 0; i < pd->nr_cap_states; i++) |
---|
67 | | - em_debug_create_cs(&pd->table[i], d); |
---|
| 78 | + debugfs_create_file("units", 0444, d, dev->em_pd, &em_debug_units_fops); |
---|
| 79 | + |
---|
| 80 | + /* Create a sub-directory for each performance state */ |
---|
| 81 | + for (i = 0; i < dev->em_pd->nr_perf_states; i++) |
---|
| 82 | + em_debug_create_ps(&dev->em_pd->table[i], d); |
---|
| 83 | + |
---|
| 84 | +} |
---|
| 85 | + |
---|
| 86 | +static void em_debug_remove_pd(struct device *dev) |
---|
| 87 | +{ |
---|
| 88 | + struct dentry *debug_dir; |
---|
| 89 | + |
---|
| 90 | + debug_dir = debugfs_lookup(dev_name(dev), rootdir); |
---|
| 91 | + debugfs_remove_recursive(debug_dir); |
---|
68 | 92 | } |
---|
69 | 93 | |
---|
70 | 94 | static int __init em_debug_init(void) |
---|
.. | .. |
---|
74 | 98 | |
---|
75 | 99 | return 0; |
---|
76 | 100 | } |
---|
77 | | -#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT |
---|
78 | | -core_initcall_sync(em_debug_init); |
---|
79 | | -#else |
---|
80 | | -core_initcall(em_debug_init); |
---|
81 | | -#endif |
---|
| 101 | +fs_initcall(em_debug_init); |
---|
82 | 102 | #else /* CONFIG_DEBUG_FS */ |
---|
83 | | -static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) {} |
---|
| 103 | +static void em_debug_create_pd(struct device *dev) {} |
---|
| 104 | +static void em_debug_remove_pd(struct device *dev) {} |
---|
84 | 105 | #endif |
---|
85 | | -static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states, |
---|
86 | | - struct em_data_callback *cb) |
---|
| 106 | + |
---|
| 107 | +static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd, |
---|
| 108 | + int nr_states, struct em_data_callback *cb) |
---|
87 | 109 | { |
---|
88 | | - unsigned long opp_eff, prev_opp_eff = ULONG_MAX; |
---|
89 | | - unsigned long power, freq, prev_freq = 0; |
---|
90 | | - int i, ret, cpu = cpumask_first(span); |
---|
91 | | - struct em_cap_state *table; |
---|
92 | | - struct em_perf_domain *pd; |
---|
| 110 | + unsigned long power, freq, prev_freq = 0, prev_cost = ULONG_MAX; |
---|
| 111 | + struct em_perf_state *table; |
---|
| 112 | + int i, ret; |
---|
93 | 113 | u64 fmax; |
---|
94 | | - |
---|
95 | | - if (!cb->active_power) |
---|
96 | | - return NULL; |
---|
97 | | - |
---|
98 | | - pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL); |
---|
99 | | - if (!pd) |
---|
100 | | - return NULL; |
---|
101 | 114 | |
---|
102 | 115 | table = kcalloc(nr_states, sizeof(*table), GFP_KERNEL); |
---|
103 | 116 | if (!table) |
---|
104 | | - goto free_pd; |
---|
| 117 | + return -ENOMEM; |
---|
105 | 118 | |
---|
106 | | - /* Build the list of capacity states for this performance domain */ |
---|
| 119 | + /* Build the list of performance states for this performance domain */ |
---|
107 | 120 | for (i = 0, freq = 0; i < nr_states; i++, freq++) { |
---|
108 | 121 | /* |
---|
109 | 122 | * active_power() is a driver callback which ceils 'freq' to |
---|
110 | | - * lowest capacity state of 'cpu' above 'freq' and updates |
---|
| 123 | + * lowest performance state of 'dev' above 'freq' and updates |
---|
111 | 124 | * 'power' and 'freq' accordingly. |
---|
112 | 125 | */ |
---|
113 | | - ret = cb->active_power(&power, &freq, cpu); |
---|
| 126 | + ret = cb->active_power(&power, &freq, dev); |
---|
114 | 127 | if (ret) { |
---|
115 | | - pr_err("pd%d: invalid cap. state: %d\n", cpu, ret); |
---|
116 | | - goto free_cs_table; |
---|
| 128 | + dev_err(dev, "EM: invalid perf. state: %d\n", |
---|
| 129 | + ret); |
---|
| 130 | + goto free_ps_table; |
---|
117 | 131 | } |
---|
118 | 132 | |
---|
119 | 133 | /* |
---|
120 | 134 | * We expect the driver callback to increase the frequency for |
---|
121 | | - * higher capacity states. |
---|
| 135 | + * higher performance states. |
---|
122 | 136 | */ |
---|
123 | 137 | if (freq <= prev_freq) { |
---|
124 | | - pr_err("pd%d: non-increasing freq: %lu\n", cpu, freq); |
---|
125 | | - goto free_cs_table; |
---|
| 138 | + dev_err(dev, "EM: non-increasing freq: %lu\n", |
---|
| 139 | + freq); |
---|
| 140 | + goto free_ps_table; |
---|
126 | 141 | } |
---|
127 | 142 | |
---|
128 | 143 | /* |
---|
129 | 144 | * The power returned by active_state() is expected to be |
---|
130 | 145 | * positive, in milli-watts and to fit into 16 bits. |
---|
131 | 146 | */ |
---|
132 | | - if (!power || power > EM_CPU_MAX_POWER) { |
---|
133 | | - pr_err("pd%d: invalid power: %lu\n", cpu, power); |
---|
134 | | - goto free_cs_table; |
---|
| 147 | + if (!power || power > EM_MAX_POWER) { |
---|
| 148 | + dev_err(dev, "EM: invalid power: %lu\n", |
---|
| 149 | + power); |
---|
| 150 | + goto free_ps_table; |
---|
135 | 151 | } |
---|
136 | 152 | |
---|
137 | 153 | table[i].power = power; |
---|
138 | 154 | table[i].frequency = prev_freq = freq; |
---|
139 | | - |
---|
140 | | - /* |
---|
141 | | - * The hertz/watts efficiency ratio should decrease as the |
---|
142 | | - * frequency grows on sane platforms. But this isn't always |
---|
143 | | - * true in practice so warn the user if a higher OPP is more |
---|
144 | | - * power efficient than a lower one. |
---|
145 | | - */ |
---|
146 | | - opp_eff = freq / power; |
---|
147 | | - if (opp_eff >= prev_opp_eff) |
---|
148 | | - pr_warn("pd%d: hertz/watts ratio non-monotonically decreasing: em_cap_state %d >= em_cap_state%d\n", |
---|
149 | | - cpu, i, i - 1); |
---|
150 | | - prev_opp_eff = opp_eff; |
---|
151 | 155 | } |
---|
152 | 156 | |
---|
153 | | - /* Compute the cost of each capacity_state. */ |
---|
| 157 | + /* Compute the cost of each performance state. */ |
---|
154 | 158 | fmax = (u64) table[nr_states - 1].frequency; |
---|
155 | | - for (i = 0; i < nr_states; i++) { |
---|
156 | | - table[i].cost = div64_u64(fmax * table[i].power, |
---|
| 159 | + for (i = nr_states - 1; i >= 0; i--) { |
---|
| 160 | + unsigned long power_res = em_scale_power(table[i].power); |
---|
| 161 | + |
---|
| 162 | + table[i].cost = div64_u64(fmax * power_res, |
---|
157 | 163 | table[i].frequency); |
---|
| 164 | + if (table[i].cost >= prev_cost) { |
---|
| 165 | + dev_dbg(dev, "EM: OPP:%lu is inefficient\n", |
---|
| 166 | + table[i].frequency); |
---|
| 167 | + } else { |
---|
| 168 | + prev_cost = table[i].cost; |
---|
| 169 | + } |
---|
158 | 170 | } |
---|
159 | 171 | |
---|
160 | 172 | pd->table = table; |
---|
161 | | - pd->nr_cap_states = nr_states; |
---|
162 | | - cpumask_copy(to_cpumask(pd->cpus), span); |
---|
| 173 | + pd->nr_perf_states = nr_states; |
---|
163 | 174 | |
---|
164 | | - em_debug_create_pd(pd, cpu); |
---|
| 175 | + return 0; |
---|
165 | 176 | |
---|
166 | | - return pd; |
---|
167 | | - |
---|
168 | | -free_cs_table: |
---|
| 177 | +free_ps_table: |
---|
169 | 178 | kfree(table); |
---|
170 | | -free_pd: |
---|
171 | | - kfree(pd); |
---|
172 | | - |
---|
173 | | - return NULL; |
---|
| 179 | + return -EINVAL; |
---|
174 | 180 | } |
---|
| 181 | + |
---|
| 182 | +static int em_create_pd(struct device *dev, int nr_states, |
---|
| 183 | + struct em_data_callback *cb, cpumask_t *cpus) |
---|
| 184 | +{ |
---|
| 185 | + struct em_perf_domain *pd; |
---|
| 186 | + struct device *cpu_dev; |
---|
| 187 | + int cpu, ret; |
---|
| 188 | + |
---|
| 189 | + if (_is_cpu_device(dev)) { |
---|
| 190 | + pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL); |
---|
| 191 | + if (!pd) |
---|
| 192 | + return -ENOMEM; |
---|
| 193 | + |
---|
| 194 | + cpumask_copy(em_span_cpus(pd), cpus); |
---|
| 195 | + } else { |
---|
| 196 | + pd = kzalloc(sizeof(*pd), GFP_KERNEL); |
---|
| 197 | + if (!pd) |
---|
| 198 | + return -ENOMEM; |
---|
| 199 | + } |
---|
| 200 | + |
---|
| 201 | + ret = em_create_perf_table(dev, pd, nr_states, cb); |
---|
| 202 | + if (ret) { |
---|
| 203 | + kfree(pd); |
---|
| 204 | + return ret; |
---|
| 205 | + } |
---|
| 206 | + |
---|
| 207 | + if (_is_cpu_device(dev)) |
---|
| 208 | + for_each_cpu(cpu, cpus) { |
---|
| 209 | + cpu_dev = get_cpu_device(cpu); |
---|
| 210 | + cpu_dev->em_pd = pd; |
---|
| 211 | + } |
---|
| 212 | + |
---|
| 213 | + dev->em_pd = pd; |
---|
| 214 | + |
---|
| 215 | + return 0; |
---|
| 216 | +} |
---|
| 217 | + |
---|
| 218 | +/** |
---|
| 219 | + * em_pd_get() - Return the performance domain for a device |
---|
| 220 | + * @dev : Device to find the performance domain for |
---|
| 221 | + * |
---|
| 222 | + * Returns the performance domain to which @dev belongs, or NULL if it doesn't |
---|
| 223 | + * exist. |
---|
| 224 | + */ |
---|
| 225 | +struct em_perf_domain *em_pd_get(struct device *dev) |
---|
| 226 | +{ |
---|
| 227 | + if (IS_ERR_OR_NULL(dev)) |
---|
| 228 | + return NULL; |
---|
| 229 | + |
---|
| 230 | + return dev->em_pd; |
---|
| 231 | +} |
---|
| 232 | +EXPORT_SYMBOL_GPL(em_pd_get); |
---|
175 | 233 | |
---|
176 | 234 | /** |
---|
177 | 235 | * em_cpu_get() - Return the performance domain for a CPU |
---|
178 | 236 | * @cpu : CPU to find the performance domain for |
---|
179 | 237 | * |
---|
180 | | - * Return: the performance domain to which 'cpu' belongs, or NULL if it doesn't |
---|
| 238 | + * Returns the performance domain to which @cpu belongs, or NULL if it doesn't |
---|
181 | 239 | * exist. |
---|
182 | 240 | */ |
---|
183 | 241 | struct em_perf_domain *em_cpu_get(int cpu) |
---|
184 | 242 | { |
---|
185 | | - return READ_ONCE(per_cpu(em_data, cpu)); |
---|
| 243 | + struct device *cpu_dev; |
---|
| 244 | + |
---|
| 245 | + cpu_dev = get_cpu_device(cpu); |
---|
| 246 | + if (!cpu_dev) |
---|
| 247 | + return NULL; |
---|
| 248 | + |
---|
| 249 | + return em_pd_get(cpu_dev); |
---|
186 | 250 | } |
---|
187 | 251 | EXPORT_SYMBOL_GPL(em_cpu_get); |
---|
188 | 252 | |
---|
189 | 253 | /** |
---|
190 | | - * em_register_perf_domain() - Register the Energy Model of a performance domain |
---|
191 | | - * @span : Mask of CPUs in the performance domain |
---|
192 | | - * @nr_states : Number of capacity states to register |
---|
| 254 | + * em_dev_register_perf_domain() - Register the Energy Model (EM) for a device |
---|
| 255 | + * @dev : Device for which the EM is to register |
---|
| 256 | + * @nr_states : Number of performance states to register |
---|
193 | 257 | * @cb : Callback functions providing the data of the Energy Model |
---|
| 258 | + * @cpus : Pointer to cpumask_t, which in case of a CPU device is |
---|
| 259 | + * obligatory. It can be taken from i.e. 'policy->cpus'. For other |
---|
| 260 | + * type of devices this should be set to NULL. |
---|
| 261 | + * @milliwatts : Flag indicating that the power values are in milliWatts or |
---|
| 262 | + * in some other scale. It must be set properly. |
---|
194 | 263 | * |
---|
195 | 264 | * Create Energy Model tables for a performance domain using the callbacks |
---|
196 | 265 | * defined in cb. |
---|
| 266 | + * |
---|
| 267 | + * The @milliwatts is important to set with correct value. Some kernel |
---|
| 268 | + * sub-systems might rely on this flag and check if all devices in the EM are |
---|
| 269 | + * using the same scale. |
---|
197 | 270 | * |
---|
198 | 271 | * If multiple clients register the same performance domain, all but the first |
---|
199 | 272 | * registration will be ignored. |
---|
200 | 273 | * |
---|
201 | 274 | * Return 0 on success |
---|
202 | 275 | */ |
---|
203 | | -int em_register_perf_domain(cpumask_t *span, unsigned int nr_states, |
---|
204 | | - struct em_data_callback *cb) |
---|
| 276 | +int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states, |
---|
| 277 | + struct em_data_callback *cb, cpumask_t *cpus, |
---|
| 278 | + bool milliwatts) |
---|
205 | 279 | { |
---|
206 | 280 | unsigned long cap, prev_cap = 0; |
---|
207 | | - struct em_perf_domain *pd; |
---|
208 | | - int cpu, ret = 0; |
---|
| 281 | + int cpu, ret; |
---|
209 | 282 | |
---|
210 | | - if (!span || !nr_states || !cb) |
---|
| 283 | + if (!dev || !nr_states || !cb) |
---|
211 | 284 | return -EINVAL; |
---|
212 | 285 | |
---|
213 | 286 | /* |
---|
.. | .. |
---|
216 | 289 | */ |
---|
217 | 290 | mutex_lock(&em_pd_mutex); |
---|
218 | 291 | |
---|
219 | | - for_each_cpu(cpu, span) { |
---|
220 | | - /* Make sure we don't register again an existing domain. */ |
---|
221 | | - if (READ_ONCE(per_cpu(em_data, cpu))) { |
---|
222 | | - ret = -EEXIST; |
---|
223 | | - goto unlock; |
---|
224 | | - } |
---|
225 | | - |
---|
226 | | - /* |
---|
227 | | - * All CPUs of a domain must have the same micro-architecture |
---|
228 | | - * since they all share the same table. |
---|
229 | | - */ |
---|
230 | | - cap = arch_scale_cpu_capacity(NULL, cpu); |
---|
231 | | - if (prev_cap && prev_cap != cap) { |
---|
232 | | - pr_err("CPUs of %*pbl must have the same capacity\n", |
---|
233 | | - cpumask_pr_args(span)); |
---|
234 | | - ret = -EINVAL; |
---|
235 | | - goto unlock; |
---|
236 | | - } |
---|
237 | | - prev_cap = cap; |
---|
238 | | - } |
---|
239 | | - |
---|
240 | | - /* Create the performance domain and add it to the Energy Model. */ |
---|
241 | | - pd = em_create_pd(span, nr_states, cb); |
---|
242 | | - if (!pd) { |
---|
243 | | - ret = -EINVAL; |
---|
| 292 | + if (dev->em_pd) { |
---|
| 293 | + ret = -EEXIST; |
---|
244 | 294 | goto unlock; |
---|
245 | 295 | } |
---|
246 | 296 | |
---|
247 | | - for_each_cpu(cpu, span) { |
---|
248 | | - /* |
---|
249 | | - * The per-cpu array can be read concurrently from em_cpu_get(). |
---|
250 | | - * The barrier enforces the ordering needed to make sure readers |
---|
251 | | - * can only access well formed em_perf_domain structs. |
---|
252 | | - */ |
---|
253 | | - smp_store_release(per_cpu_ptr(&em_data, cpu), pd); |
---|
| 297 | + if (_is_cpu_device(dev)) { |
---|
| 298 | + if (!cpus) { |
---|
| 299 | + dev_err(dev, "EM: invalid CPU mask\n"); |
---|
| 300 | + ret = -EINVAL; |
---|
| 301 | + goto unlock; |
---|
| 302 | + } |
---|
| 303 | + |
---|
| 304 | + for_each_cpu(cpu, cpus) { |
---|
| 305 | + if (em_cpu_get(cpu)) { |
---|
| 306 | + dev_err(dev, "EM: exists for CPU%d\n", cpu); |
---|
| 307 | + ret = -EEXIST; |
---|
| 308 | + goto unlock; |
---|
| 309 | + } |
---|
| 310 | + /* |
---|
| 311 | + * All CPUs of a domain must have the same |
---|
| 312 | + * micro-architecture since they all share the same |
---|
| 313 | + * table. |
---|
| 314 | + */ |
---|
| 315 | + cap = arch_scale_cpu_capacity(cpu); |
---|
| 316 | + if (prev_cap && prev_cap != cap) { |
---|
| 317 | + dev_err(dev, "EM: CPUs of %*pbl must have the same capacity\n", |
---|
| 318 | + cpumask_pr_args(cpus)); |
---|
| 319 | + |
---|
| 320 | + ret = -EINVAL; |
---|
| 321 | + goto unlock; |
---|
| 322 | + } |
---|
| 323 | + prev_cap = cap; |
---|
| 324 | + } |
---|
254 | 325 | } |
---|
255 | 326 | |
---|
256 | | - pr_debug("Created perf domain %*pbl\n", cpumask_pr_args(span)); |
---|
| 327 | + ret = em_create_pd(dev, nr_states, cb, cpus); |
---|
| 328 | + if (ret) |
---|
| 329 | + goto unlock; |
---|
| 330 | + |
---|
| 331 | + dev->em_pd->milliwatts = milliwatts; |
---|
| 332 | + |
---|
| 333 | + em_debug_create_pd(dev); |
---|
| 334 | + dev_info(dev, "EM: created perf domain\n"); |
---|
| 335 | + |
---|
257 | 336 | unlock: |
---|
258 | 337 | mutex_unlock(&em_pd_mutex); |
---|
259 | | - |
---|
260 | 338 | return ret; |
---|
261 | 339 | } |
---|
262 | | -EXPORT_SYMBOL_GPL(em_register_perf_domain); |
---|
| 340 | +EXPORT_SYMBOL_GPL(em_dev_register_perf_domain); |
---|
| 341 | + |
---|
| 342 | +/** |
---|
| 343 | + * em_dev_unregister_perf_domain() - Unregister Energy Model (EM) for a device |
---|
| 344 | + * @dev : Device for which the EM is registered |
---|
| 345 | + * |
---|
| 346 | + * Unregister the EM for the specified @dev (but not a CPU device). |
---|
| 347 | + */ |
---|
| 348 | +void em_dev_unregister_perf_domain(struct device *dev) |
---|
| 349 | +{ |
---|
| 350 | + if (IS_ERR_OR_NULL(dev) || !dev->em_pd) |
---|
| 351 | + return; |
---|
| 352 | + |
---|
| 353 | + if (_is_cpu_device(dev)) |
---|
| 354 | + return; |
---|
| 355 | + |
---|
| 356 | + /* |
---|
| 357 | + * The mutex separates all register/unregister requests and protects |
---|
| 358 | + * from potential clean-up/setup issues in the debugfs directories. |
---|
| 359 | + * The debugfs directory name is the same as device's name. |
---|
| 360 | + */ |
---|
| 361 | + mutex_lock(&em_pd_mutex); |
---|
| 362 | + em_debug_remove_pd(dev); |
---|
| 363 | + |
---|
| 364 | + kfree(dev->em_pd->table); |
---|
| 365 | + kfree(dev->em_pd); |
---|
| 366 | + dev->em_pd = NULL; |
---|
| 367 | + mutex_unlock(&em_pd_mutex); |
---|
| 368 | +} |
---|
| 369 | +EXPORT_SYMBOL_GPL(em_dev_unregister_perf_domain); |
---|