hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/kernel/power/energy_model.c
....@@ -1,9 +1,10 @@
11 // SPDX-License-Identifier: GPL-2.0
22 /*
3
- * Energy Model of CPUs
3
+ * Energy Model of devices
44 *
5
- * Copyright (c) 2018, Arm ltd.
5
+ * Copyright (c) 2018-2020, Arm ltd.
66 * Written by: Quentin Perret, Arm ltd.
7
+ * Improvements provided by: Lukasz Luba, Arm ltd.
78 */
89
910 #define pr_fmt(fmt) "energy_model: " fmt
....@@ -15,30 +16,32 @@
1516 #include <linux/sched/topology.h>
1617 #include <linux/slab.h>
1718
18
-/* Mapping of each CPU to the performance domain to which it belongs. */
19
-static DEFINE_PER_CPU(struct em_perf_domain *, em_data);
20
-
2119 /*
2220 * Mutex serializing the registrations of performance domains and letting
2321 * callbacks defined by drivers sleep.
2422 */
2523 static DEFINE_MUTEX(em_pd_mutex);
2624
25
+static bool _is_cpu_device(struct device *dev)
26
+{
27
+ return (dev->bus == &cpu_subsys);
28
+}
29
+
2730 #ifdef CONFIG_DEBUG_FS
2831 static struct dentry *rootdir;
2932
30
-static void em_debug_create_cs(struct em_cap_state *cs, struct dentry *pd)
33
+static void em_debug_create_ps(struct em_perf_state *ps, struct dentry *pd)
3134 {
3235 struct dentry *d;
3336 char name[24];
3437
35
- snprintf(name, sizeof(name), "cs:%lu", cs->frequency);
38
+ snprintf(name, sizeof(name), "ps:%lu", ps->frequency);
3639
37
- /* Create per-cs directory */
40
+ /* Create per-ps directory */
3841 d = debugfs_create_dir(name, pd);
39
- debugfs_create_ulong("frequency", 0444, d, &cs->frequency);
40
- debugfs_create_ulong("power", 0444, d, &cs->power);
41
- debugfs_create_ulong("cost", 0444, d, &cs->cost);
42
+ debugfs_create_ulong("frequency", 0444, d, &ps->frequency);
43
+ debugfs_create_ulong("power", 0444, d, &ps->power);
44
+ debugfs_create_ulong("cost", 0444, d, &ps->cost);
4245 }
4346
4447 static int em_debug_cpus_show(struct seq_file *s, void *unused)
....@@ -49,22 +52,40 @@
4952 }
5053 DEFINE_SHOW_ATTRIBUTE(em_debug_cpus);
5154
52
-static void em_debug_create_pd(struct em_perf_domain *pd, int cpu)
55
+static int em_debug_units_show(struct seq_file *s, void *unused)
56
+{
57
+ struct em_perf_domain *pd = s->private;
58
+ char *units = pd->milliwatts ? "milliWatts" : "bogoWatts";
59
+
60
+ seq_printf(s, "%s\n", units);
61
+
62
+ return 0;
63
+}
64
+DEFINE_SHOW_ATTRIBUTE(em_debug_units);
65
+
66
+static void em_debug_create_pd(struct device *dev)
5367 {
5468 struct dentry *d;
55
- char name[8];
5669 int i;
5770
58
- snprintf(name, sizeof(name), "pd%d", cpu);
59
-
6071 /* Create the directory of the performance domain */
61
- d = debugfs_create_dir(name, rootdir);
72
+ d = debugfs_create_dir(dev_name(dev), rootdir);
6273
63
- debugfs_create_file("cpus", 0444, d, pd->cpus, &em_debug_cpus_fops);
74
+ if (_is_cpu_device(dev))
75
+ debugfs_create_file("cpus", 0444, d, dev->em_pd->cpus,
76
+ &em_debug_cpus_fops);
6477
65
- /* Create a sub-directory for each capacity state */
66
- for (i = 0; i < pd->nr_cap_states; i++)
67
- em_debug_create_cs(&pd->table[i], d);
78
+ debugfs_create_file("units", 0444, d, dev->em_pd, &em_debug_units_fops);
79
+
80
+ /* Create a sub-directory for each performance state */
81
+ for (i = 0; i < dev->em_pd->nr_perf_states; i++)
82
+ em_debug_create_ps(&dev->em_pd->table[i], d);
83
+
84
+}
85
+
86
+static void em_debug_remove_pd(struct device *dev)
87
+{
88
+ debugfs_lookup_and_remove(dev_name(dev), rootdir);
6889 }
6990
7091 static int __init em_debug_init(void)
....@@ -74,140 +95,189 @@
7495
7596 return 0;
7697 }
77
-#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
78
-core_initcall_sync(em_debug_init);
79
-#else
80
-core_initcall(em_debug_init);
81
-#endif
98
+fs_initcall(em_debug_init);
8299 #else /* CONFIG_DEBUG_FS */
83
-static void em_debug_create_pd(struct em_perf_domain *pd, int cpu) {}
100
+static void em_debug_create_pd(struct device *dev) {}
101
+static void em_debug_remove_pd(struct device *dev) {}
84102 #endif
85
-static struct em_perf_domain *em_create_pd(cpumask_t *span, int nr_states,
86
- struct em_data_callback *cb)
103
+
104
+static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd,
105
+ int nr_states, struct em_data_callback *cb)
87106 {
88
- unsigned long opp_eff, prev_opp_eff = ULONG_MAX;
89
- unsigned long power, freq, prev_freq = 0;
90
- int i, ret, cpu = cpumask_first(span);
91
- struct em_cap_state *table;
92
- struct em_perf_domain *pd;
107
+ unsigned long power, freq, prev_freq = 0, prev_cost = ULONG_MAX;
108
+ struct em_perf_state *table;
109
+ int i, ret;
93110 u64 fmax;
94
-
95
- if (!cb->active_power)
96
- return NULL;
97
-
98
- pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL);
99
- if (!pd)
100
- return NULL;
101111
102112 table = kcalloc(nr_states, sizeof(*table), GFP_KERNEL);
103113 if (!table)
104
- goto free_pd;
114
+ return -ENOMEM;
105115
106
- /* Build the list of capacity states for this performance domain */
116
+ /* Build the list of performance states for this performance domain */
107117 for (i = 0, freq = 0; i < nr_states; i++, freq++) {
108118 /*
109119 * active_power() is a driver callback which ceils 'freq' to
110
- * lowest capacity state of 'cpu' above 'freq' and updates
120
+ * lowest performance state of 'dev' above 'freq' and updates
111121 * 'power' and 'freq' accordingly.
112122 */
113
- ret = cb->active_power(&power, &freq, cpu);
123
+ ret = cb->active_power(&power, &freq, dev);
114124 if (ret) {
115
- pr_err("pd%d: invalid cap. state: %d\n", cpu, ret);
116
- goto free_cs_table;
125
+ dev_err(dev, "EM: invalid perf. state: %d\n",
126
+ ret);
127
+ goto free_ps_table;
117128 }
118129
119130 /*
120131 * We expect the driver callback to increase the frequency for
121
- * higher capacity states.
132
+ * higher performance states.
122133 */
123134 if (freq <= prev_freq) {
124
- pr_err("pd%d: non-increasing freq: %lu\n", cpu, freq);
125
- goto free_cs_table;
135
+ dev_err(dev, "EM: non-increasing freq: %lu\n",
136
+ freq);
137
+ goto free_ps_table;
126138 }
127139
128140 /*
129141 * The power returned by active_state() is expected to be
130142 * positive, in milli-watts and to fit into 16 bits.
131143 */
132
- if (!power || power > EM_CPU_MAX_POWER) {
133
- pr_err("pd%d: invalid power: %lu\n", cpu, power);
134
- goto free_cs_table;
144
+ if (!power || power > EM_MAX_POWER) {
145
+ dev_err(dev, "EM: invalid power: %lu\n",
146
+ power);
147
+ goto free_ps_table;
135148 }
136149
137150 table[i].power = power;
138151 table[i].frequency = prev_freq = freq;
139
-
140
- /*
141
- * The hertz/watts efficiency ratio should decrease as the
142
- * frequency grows on sane platforms. But this isn't always
143
- * true in practice so warn the user if a higher OPP is more
144
- * power efficient than a lower one.
145
- */
146
- opp_eff = freq / power;
147
- if (opp_eff >= prev_opp_eff)
148
- pr_warn("pd%d: hertz/watts ratio non-monotonically decreasing: em_cap_state %d >= em_cap_state%d\n",
149
- cpu, i, i - 1);
150
- prev_opp_eff = opp_eff;
151152 }
152153
153
- /* Compute the cost of each capacity_state. */
154
+ /* Compute the cost of each performance state. */
154155 fmax = (u64) table[nr_states - 1].frequency;
155
- for (i = 0; i < nr_states; i++) {
156
- table[i].cost = div64_u64(fmax * table[i].power,
156
+ for (i = nr_states - 1; i >= 0; i--) {
157
+ unsigned long power_res = em_scale_power(table[i].power);
158
+
159
+ table[i].cost = div64_u64(fmax * power_res,
157160 table[i].frequency);
161
+ if (table[i].cost >= prev_cost) {
162
+ dev_dbg(dev, "EM: OPP:%lu is inefficient\n",
163
+ table[i].frequency);
164
+ } else {
165
+ prev_cost = table[i].cost;
166
+ }
158167 }
159168
160169 pd->table = table;
161
- pd->nr_cap_states = nr_states;
162
- cpumask_copy(to_cpumask(pd->cpus), span);
170
+ pd->nr_perf_states = nr_states;
163171
164
- em_debug_create_pd(pd, cpu);
172
+ return 0;
165173
166
- return pd;
167
-
168
-free_cs_table:
174
+free_ps_table:
169175 kfree(table);
170
-free_pd:
171
- kfree(pd);
172
-
173
- return NULL;
176
+ return -EINVAL;
174177 }
178
+
179
+static int em_create_pd(struct device *dev, int nr_states,
180
+ struct em_data_callback *cb, cpumask_t *cpus)
181
+{
182
+ struct em_perf_domain *pd;
183
+ struct device *cpu_dev;
184
+ int cpu, ret;
185
+
186
+ if (_is_cpu_device(dev)) {
187
+ pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL);
188
+ if (!pd)
189
+ return -ENOMEM;
190
+
191
+ cpumask_copy(em_span_cpus(pd), cpus);
192
+ } else {
193
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
194
+ if (!pd)
195
+ return -ENOMEM;
196
+ }
197
+
198
+ ret = em_create_perf_table(dev, pd, nr_states, cb);
199
+ if (ret) {
200
+ kfree(pd);
201
+ return ret;
202
+ }
203
+
204
+ if (_is_cpu_device(dev))
205
+ for_each_cpu(cpu, cpus) {
206
+ cpu_dev = get_cpu_device(cpu);
207
+ cpu_dev->em_pd = pd;
208
+ }
209
+
210
+ dev->em_pd = pd;
211
+
212
+ return 0;
213
+}
214
+
215
+/**
216
+ * em_pd_get() - Return the performance domain for a device
217
+ * @dev : Device to find the performance domain for
218
+ *
219
+ * Returns the performance domain to which @dev belongs, or NULL if it doesn't
220
+ * exist.
221
+ */
222
+struct em_perf_domain *em_pd_get(struct device *dev)
223
+{
224
+ if (IS_ERR_OR_NULL(dev))
225
+ return NULL;
226
+
227
+ return dev->em_pd;
228
+}
229
+EXPORT_SYMBOL_GPL(em_pd_get);
175230
176231 /**
177232 * em_cpu_get() - Return the performance domain for a CPU
178233 * @cpu : CPU to find the performance domain for
179234 *
180
- * Return: the performance domain to which 'cpu' belongs, or NULL if it doesn't
235
+ * Returns the performance domain to which @cpu belongs, or NULL if it doesn't
181236 * exist.
182237 */
183238 struct em_perf_domain *em_cpu_get(int cpu)
184239 {
185
- return READ_ONCE(per_cpu(em_data, cpu));
240
+ struct device *cpu_dev;
241
+
242
+ cpu_dev = get_cpu_device(cpu);
243
+ if (!cpu_dev)
244
+ return NULL;
245
+
246
+ return em_pd_get(cpu_dev);
186247 }
187248 EXPORT_SYMBOL_GPL(em_cpu_get);
188249
189250 /**
190
- * em_register_perf_domain() - Register the Energy Model of a performance domain
191
- * @span : Mask of CPUs in the performance domain
192
- * @nr_states : Number of capacity states to register
251
+ * em_dev_register_perf_domain() - Register the Energy Model (EM) for a device
252
+ * @dev : Device for which the EM is to register
253
+ * @nr_states : Number of performance states to register
193254 * @cb : Callback functions providing the data of the Energy Model
255
+ * @cpus : Pointer to cpumask_t, which in case of a CPU device is
256
+ * obligatory. It can be taken from i.e. 'policy->cpus'. For other
257
+ * type of devices this should be set to NULL.
258
+ * @milliwatts : Flag indicating that the power values are in milliWatts or
259
+ * in some other scale. It must be set properly.
194260 *
195261 * Create Energy Model tables for a performance domain using the callbacks
196262 * defined in cb.
263
+ *
264
+ * The @milliwatts is important to set with correct value. Some kernel
265
+ * sub-systems might rely on this flag and check if all devices in the EM are
266
+ * using the same scale.
197267 *
198268 * If multiple clients register the same performance domain, all but the first
199269 * registration will be ignored.
200270 *
201271 * Return 0 on success
202272 */
203
-int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
204
- struct em_data_callback *cb)
273
+int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
274
+ struct em_data_callback *cb, cpumask_t *cpus,
275
+ bool milliwatts)
205276 {
206277 unsigned long cap, prev_cap = 0;
207
- struct em_perf_domain *pd;
208
- int cpu, ret = 0;
278
+ int cpu, ret;
209279
210
- if (!span || !nr_states || !cb)
280
+ if (!dev || !nr_states || !cb)
211281 return -EINVAL;
212282
213283 /*
....@@ -216,47 +286,81 @@
216286 */
217287 mutex_lock(&em_pd_mutex);
218288
219
- for_each_cpu(cpu, span) {
220
- /* Make sure we don't register again an existing domain. */
221
- if (READ_ONCE(per_cpu(em_data, cpu))) {
222
- ret = -EEXIST;
223
- goto unlock;
224
- }
225
-
226
- /*
227
- * All CPUs of a domain must have the same micro-architecture
228
- * since they all share the same table.
229
- */
230
- cap = arch_scale_cpu_capacity(NULL, cpu);
231
- if (prev_cap && prev_cap != cap) {
232
- pr_err("CPUs of %*pbl must have the same capacity\n",
233
- cpumask_pr_args(span));
234
- ret = -EINVAL;
235
- goto unlock;
236
- }
237
- prev_cap = cap;
238
- }
239
-
240
- /* Create the performance domain and add it to the Energy Model. */
241
- pd = em_create_pd(span, nr_states, cb);
242
- if (!pd) {
243
- ret = -EINVAL;
289
+ if (dev->em_pd) {
290
+ ret = -EEXIST;
244291 goto unlock;
245292 }
246293
247
- for_each_cpu(cpu, span) {
248
- /*
249
- * The per-cpu array can be read concurrently from em_cpu_get().
250
- * The barrier enforces the ordering needed to make sure readers
251
- * can only access well formed em_perf_domain structs.
252
- */
253
- smp_store_release(per_cpu_ptr(&em_data, cpu), pd);
294
+ if (_is_cpu_device(dev)) {
295
+ if (!cpus) {
296
+ dev_err(dev, "EM: invalid CPU mask\n");
297
+ ret = -EINVAL;
298
+ goto unlock;
299
+ }
300
+
301
+ for_each_cpu(cpu, cpus) {
302
+ if (em_cpu_get(cpu)) {
303
+ dev_err(dev, "EM: exists for CPU%d\n", cpu);
304
+ ret = -EEXIST;
305
+ goto unlock;
306
+ }
307
+ /*
308
+ * All CPUs of a domain must have the same
309
+ * micro-architecture since they all share the same
310
+ * table.
311
+ */
312
+ cap = arch_scale_cpu_capacity(cpu);
313
+ if (prev_cap && prev_cap != cap) {
314
+ dev_err(dev, "EM: CPUs of %*pbl must have the same capacity\n",
315
+ cpumask_pr_args(cpus));
316
+
317
+ ret = -EINVAL;
318
+ goto unlock;
319
+ }
320
+ prev_cap = cap;
321
+ }
254322 }
255323
256
- pr_debug("Created perf domain %*pbl\n", cpumask_pr_args(span));
324
+ ret = em_create_pd(dev, nr_states, cb, cpus);
325
+ if (ret)
326
+ goto unlock;
327
+
328
+ dev->em_pd->milliwatts = milliwatts;
329
+
330
+ em_debug_create_pd(dev);
331
+ dev_info(dev, "EM: created perf domain\n");
332
+
257333 unlock:
258334 mutex_unlock(&em_pd_mutex);
259
-
260335 return ret;
261336 }
262
-EXPORT_SYMBOL_GPL(em_register_perf_domain);
337
+EXPORT_SYMBOL_GPL(em_dev_register_perf_domain);
338
+
339
+/**
340
+ * em_dev_unregister_perf_domain() - Unregister Energy Model (EM) for a device
341
+ * @dev : Device for which the EM is registered
342
+ *
343
+ * Unregister the EM for the specified @dev (but not a CPU device).
344
+ */
345
+void em_dev_unregister_perf_domain(struct device *dev)
346
+{
347
+ if (IS_ERR_OR_NULL(dev) || !dev->em_pd)
348
+ return;
349
+
350
+ if (_is_cpu_device(dev))
351
+ return;
352
+
353
+ /*
354
+ * The mutex separates all register/unregister requests and protects
355
+ * from potential clean-up/setup issues in the debugfs directories.
356
+ * The debugfs directory name is the same as device's name.
357
+ */
358
+ mutex_lock(&em_pd_mutex);
359
+ em_debug_remove_pd(dev);
360
+
361
+ kfree(dev->em_pd->table);
362
+ kfree(dev->em_pd);
363
+ dev->em_pd = NULL;
364
+ mutex_unlock(&em_pd_mutex);
365
+}
366
+EXPORT_SYMBOL_GPL(em_dev_unregister_perf_domain);