hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/opp/of.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Generic OPP OF helpers
34 *
....@@ -5,10 +6,6 @@
56 * Nishanth Menon
67 * Romit Dasgupta
78 * Kevin Hilman
8
- *
9
- * This program is free software; you can redistribute it and/or modify
10
- * it under the terms of the GNU General Public License version 2 as
11
- * published by the Free Software Foundation.
129 */
1310
1411 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
....@@ -20,14 +17,36 @@
2017 #include <linux/pm_domain.h>
2118 #include <linux/slab.h>
2219 #include <linux/export.h>
20
+#include <linux/energy_model.h>
2321
2422 #include "opp.h"
2523
26
-static struct opp_table *_managed_opp(const struct device_node *np)
24
+/*
25
+ * Returns opp descriptor node for a device node, caller must
26
+ * do of_node_put().
27
+ */
28
+static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np,
29
+ int index)
30
+{
31
+ /* "operating-points-v2" can be an array for power domain providers */
32
+ return of_parse_phandle(np, "operating-points-v2", index);
33
+}
34
+
35
+/* Returns opp descriptor node for a device, caller must do of_node_put() */
36
+struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
37
+{
38
+ return _opp_of_get_opp_desc_node(dev->of_node, 0);
39
+}
40
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
41
+
42
+struct opp_table *_managed_opp(struct device *dev, int index)
2743 {
2844 struct opp_table *opp_table, *managed_table = NULL;
45
+ struct device_node *np;
2946
30
- mutex_lock(&opp_table_lock);
47
+ np = _opp_of_get_opp_desc_node(dev->of_node, index);
48
+ if (!np)
49
+ return NULL;
3150
3251 list_for_each_entry(opp_table, &opp_tables, node) {
3352 if (opp_table->np == np) {
....@@ -47,37 +66,369 @@
4766 }
4867 }
4968
50
- mutex_unlock(&opp_table_lock);
69
+ of_node_put(np);
5170
5271 return managed_table;
5372 }
5473
55
-void _of_init_opp_table(struct opp_table *opp_table, struct device *dev)
74
+/* The caller must call dev_pm_opp_put() after the OPP is used */
75
+static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
76
+ struct device_node *opp_np)
5677 {
57
- struct device_node *np;
78
+ struct dev_pm_opp *opp;
79
+
80
+ mutex_lock(&opp_table->lock);
81
+
82
+ list_for_each_entry(opp, &opp_table->opp_list, node) {
83
+ if (opp->np == opp_np) {
84
+ dev_pm_opp_get(opp);
85
+ mutex_unlock(&opp_table->lock);
86
+ return opp;
87
+ }
88
+ }
89
+
90
+ mutex_unlock(&opp_table->lock);
91
+
92
+ return NULL;
93
+}
94
+
95
+static struct device_node *of_parse_required_opp(struct device_node *np,
96
+ int index)
97
+{
98
+ return of_parse_phandle(np, "required-opps", index);
99
+}
100
+
101
+/* The caller must call dev_pm_opp_put_opp_table() after the table is used */
102
+static struct opp_table *_find_table_of_opp_np(struct device_node *opp_np)
103
+{
104
+ struct opp_table *opp_table;
105
+ struct device_node *opp_table_np;
106
+
107
+ lockdep_assert_held(&opp_table_lock);
108
+
109
+ opp_table_np = of_get_parent(opp_np);
110
+ if (!opp_table_np)
111
+ goto err;
112
+
113
+ /* It is safe to put the node now as all we need now is its address */
114
+ of_node_put(opp_table_np);
115
+
116
+ list_for_each_entry(opp_table, &opp_tables, node) {
117
+ if (opp_table_np == opp_table->np) {
118
+ _get_opp_table_kref(opp_table);
119
+ return opp_table;
120
+ }
121
+ }
122
+
123
+err:
124
+ return ERR_PTR(-ENODEV);
125
+}
126
+
127
+/* Free resources previously acquired by _opp_table_alloc_required_tables() */
128
+static void _opp_table_free_required_tables(struct opp_table *opp_table)
129
+{
130
+ struct opp_table **required_opp_tables = opp_table->required_opp_tables;
131
+ int i;
132
+
133
+ if (!required_opp_tables)
134
+ return;
135
+
136
+ for (i = 0; i < opp_table->required_opp_count; i++) {
137
+ if (IS_ERR_OR_NULL(required_opp_tables[i]))
138
+ break;
139
+
140
+ dev_pm_opp_put_opp_table(required_opp_tables[i]);
141
+ }
142
+
143
+ kfree(required_opp_tables);
144
+
145
+ opp_table->required_opp_count = 0;
146
+ opp_table->required_opp_tables = NULL;
147
+}
148
+
149
+/*
150
+ * Populate all devices and opp tables which are part of "required-opps" list.
151
+ * Checking only the first OPP node should be enough.
152
+ */
153
+static void _opp_table_alloc_required_tables(struct opp_table *opp_table,
154
+ struct device *dev,
155
+ struct device_node *opp_np)
156
+{
157
+ struct opp_table **required_opp_tables;
158
+ struct device_node *required_np, *np;
159
+ int count, i;
160
+
161
+ /* Traversing the first OPP node is all we need */
162
+ np = of_get_next_available_child(opp_np, NULL);
163
+ if (!np) {
164
+ dev_err(dev, "Empty OPP table\n");
165
+ return;
166
+ }
167
+
168
+ count = of_count_phandle_with_args(np, "required-opps", NULL);
169
+ if (!count)
170
+ goto put_np;
171
+
172
+ required_opp_tables = kcalloc(count, sizeof(*required_opp_tables),
173
+ GFP_KERNEL);
174
+ if (!required_opp_tables)
175
+ goto put_np;
176
+
177
+ opp_table->required_opp_tables = required_opp_tables;
178
+ opp_table->required_opp_count = count;
179
+
180
+ for (i = 0; i < count; i++) {
181
+ required_np = of_parse_required_opp(np, i);
182
+ if (!required_np)
183
+ goto free_required_tables;
184
+
185
+ required_opp_tables[i] = _find_table_of_opp_np(required_np);
186
+ of_node_put(required_np);
187
+
188
+ if (IS_ERR(required_opp_tables[i]))
189
+ goto free_required_tables;
190
+
191
+ /*
192
+ * We only support genpd's OPPs in the "required-opps" for now,
193
+ * as we don't know how much about other cases. Error out if the
194
+ * required OPP doesn't belong to a genpd.
195
+ */
196
+ if (!required_opp_tables[i]->is_genpd) {
197
+ dev_err(dev, "required-opp doesn't belong to genpd: %pOF\n",
198
+ required_np);
199
+ goto free_required_tables;
200
+ }
201
+ }
202
+
203
+ goto put_np;
204
+
205
+free_required_tables:
206
+ _opp_table_free_required_tables(opp_table);
207
+put_np:
208
+ of_node_put(np);
209
+}
210
+
211
+void _of_init_opp_table(struct opp_table *opp_table, struct device *dev,
212
+ int index)
213
+{
214
+ struct device_node *np, *opp_np;
215
+ u32 val;
58216
59217 /*
60218 * Only required for backward compatibility with v1 bindings, but isn't
61219 * harmful for other cases. And so we do it unconditionally.
62220 */
63221 np = of_node_get(dev->of_node);
64
- if (np) {
65
- u32 val;
222
+ if (!np)
223
+ return;
66224
67
- if (!of_property_read_u32(np, "clock-latency", &val))
68
- opp_table->clock_latency_ns_max = val;
69
- of_property_read_u32(np, "voltage-tolerance",
70
- &opp_table->voltage_tolerance_v1);
71
- of_node_put(np);
72
- }
225
+ if (!of_property_read_u32(np, "clock-latency", &val))
226
+ opp_table->clock_latency_ns_max = val;
227
+ of_property_read_u32(np, "voltage-tolerance",
228
+ &opp_table->voltage_tolerance_v1);
229
+
230
+ if (of_find_property(np, "#power-domain-cells", NULL))
231
+ opp_table->is_genpd = true;
232
+
233
+ /* Get OPP table node */
234
+ opp_np = _opp_of_get_opp_desc_node(np, index);
235
+ of_node_put(np);
236
+
237
+ if (!opp_np)
238
+ return;
239
+
240
+ if (of_property_read_bool(opp_np, "opp-shared"))
241
+ opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
242
+ else
243
+ opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
244
+
245
+ opp_table->np = opp_np;
246
+
247
+ _opp_table_alloc_required_tables(opp_table, dev, opp_np);
248
+ of_node_put(opp_np);
73249 }
250
+
251
+void _of_clear_opp_table(struct opp_table *opp_table)
252
+{
253
+ _opp_table_free_required_tables(opp_table);
254
+}
255
+
256
+/*
257
+ * Release all resources previously acquired with a call to
258
+ * _of_opp_alloc_required_opps().
259
+ */
260
+void _of_opp_free_required_opps(struct opp_table *opp_table,
261
+ struct dev_pm_opp *opp)
262
+{
263
+ struct dev_pm_opp **required_opps = opp->required_opps;
264
+ int i;
265
+
266
+ if (!required_opps)
267
+ return;
268
+
269
+ for (i = 0; i < opp_table->required_opp_count; i++) {
270
+ if (!required_opps[i])
271
+ break;
272
+
273
+ /* Put the reference back */
274
+ dev_pm_opp_put(required_opps[i]);
275
+ }
276
+
277
+ kfree(required_opps);
278
+ opp->required_opps = NULL;
279
+}
280
+
281
+/* Populate all required OPPs which are part of "required-opps" list */
282
+static int _of_opp_alloc_required_opps(struct opp_table *opp_table,
283
+ struct dev_pm_opp *opp)
284
+{
285
+ struct dev_pm_opp **required_opps;
286
+ struct opp_table *required_table;
287
+ struct device_node *np;
288
+ int i, ret, count = opp_table->required_opp_count;
289
+
290
+ if (!count)
291
+ return 0;
292
+
293
+ required_opps = kcalloc(count, sizeof(*required_opps), GFP_KERNEL);
294
+ if (!required_opps)
295
+ return -ENOMEM;
296
+
297
+ opp->required_opps = required_opps;
298
+
299
+ for (i = 0; i < count; i++) {
300
+ required_table = opp_table->required_opp_tables[i];
301
+
302
+ np = of_parse_required_opp(opp->np, i);
303
+ if (unlikely(!np)) {
304
+ ret = -ENODEV;
305
+ goto free_required_opps;
306
+ }
307
+
308
+ required_opps[i] = _find_opp_of_np(required_table, np);
309
+ of_node_put(np);
310
+
311
+ if (!required_opps[i]) {
312
+ pr_err("%s: Unable to find required OPP node: %pOF (%d)\n",
313
+ __func__, opp->np, i);
314
+ ret = -ENODEV;
315
+ goto free_required_opps;
316
+ }
317
+ }
318
+
319
+ return 0;
320
+
321
+free_required_opps:
322
+ _of_opp_free_required_opps(opp_table, opp);
323
+
324
+ return ret;
325
+}
326
+
327
+static int _bandwidth_supported(struct device *dev, struct opp_table *opp_table)
328
+{
329
+ struct device_node *np, *opp_np;
330
+ struct property *prop;
331
+
332
+ if (!opp_table) {
333
+ np = of_node_get(dev->of_node);
334
+ if (!np)
335
+ return -ENODEV;
336
+
337
+ opp_np = _opp_of_get_opp_desc_node(np, 0);
338
+ of_node_put(np);
339
+ } else {
340
+ opp_np = of_node_get(opp_table->np);
341
+ }
342
+
343
+ /* Lets not fail in case we are parsing opp-v1 bindings */
344
+ if (!opp_np)
345
+ return 0;
346
+
347
+ /* Checking only first OPP is sufficient */
348
+ np = of_get_next_available_child(opp_np, NULL);
349
+ of_node_put(opp_np);
350
+ if (!np) {
351
+ dev_err(dev, "OPP table empty\n");
352
+ return -EINVAL;
353
+ }
354
+
355
+ prop = of_find_property(np, "opp-peak-kBps", NULL);
356
+ of_node_put(np);
357
+
358
+ if (!prop || !prop->length)
359
+ return 0;
360
+
361
+ return 1;
362
+}
363
+
364
+int dev_pm_opp_of_find_icc_paths(struct device *dev,
365
+ struct opp_table *opp_table)
366
+{
367
+ struct device_node *np;
368
+ int ret, i, count, num_paths;
369
+ struct icc_path **paths;
370
+
371
+ ret = _bandwidth_supported(dev, opp_table);
372
+ if (ret <= 0)
373
+ return ret;
374
+
375
+ ret = 0;
376
+
377
+ np = of_node_get(dev->of_node);
378
+ if (!np)
379
+ return 0;
380
+
381
+ count = of_count_phandle_with_args(np, "interconnects",
382
+ "#interconnect-cells");
383
+ of_node_put(np);
384
+ if (count < 0)
385
+ return 0;
386
+
387
+ /* two phandles when #interconnect-cells = <1> */
388
+ if (count % 2) {
389
+ dev_err(dev, "%s: Invalid interconnects values\n", __func__);
390
+ return -EINVAL;
391
+ }
392
+
393
+ num_paths = count / 2;
394
+ paths = kcalloc(num_paths, sizeof(*paths), GFP_KERNEL);
395
+ if (!paths)
396
+ return -ENOMEM;
397
+
398
+ for (i = 0; i < num_paths; i++) {
399
+ paths[i] = of_icc_get_by_index(dev, i);
400
+ if (IS_ERR(paths[i])) {
401
+ ret = PTR_ERR(paths[i]);
402
+ if (ret != -EPROBE_DEFER) {
403
+ dev_err(dev, "%s: Unable to get path%d: %d\n",
404
+ __func__, i, ret);
405
+ }
406
+ goto err;
407
+ }
408
+ }
409
+
410
+ if (opp_table) {
411
+ opp_table->paths = paths;
412
+ opp_table->path_count = num_paths;
413
+ return 0;
414
+ }
415
+
416
+err:
417
+ while (i--)
418
+ icc_put(paths[i]);
419
+
420
+ kfree(paths);
421
+
422
+ return ret;
423
+}
424
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_find_icc_paths);
74425
75426 static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
76427 struct device_node *np)
77428 {
78
- unsigned int count = opp_table->supported_hw_count;
79
- u32 version;
80
- int ret;
429
+ unsigned int levels = opp_table->supported_hw_count;
430
+ int count, versions, ret, i, j;
431
+ u32 val;
81432
82433 if (!opp_table->supported_hw) {
83434 /*
....@@ -92,21 +443,40 @@
92443 return true;
93444 }
94445
95
- while (count--) {
96
- ret = of_property_read_u32_index(np, "opp-supported-hw", count,
97
- &version);
98
- if (ret) {
99
- dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
100
- __func__, count, ret);
101
- return false;
102
- }
103
-
104
- /* Both of these are bitwise masks of the versions */
105
- if (!(version & opp_table->supported_hw[count]))
106
- return false;
446
+ count = of_property_count_u32_elems(np, "opp-supported-hw");
447
+ if (count <= 0 || count % levels) {
448
+ dev_err(dev, "%s: Invalid opp-supported-hw property (%d)\n",
449
+ __func__, count);
450
+ return false;
107451 }
108452
109
- return true;
453
+ versions = count / levels;
454
+
455
+ /* All levels in at least one of the versions should match */
456
+ for (i = 0; i < versions; i++) {
457
+ bool supported = true;
458
+
459
+ for (j = 0; j < levels; j++) {
460
+ ret = of_property_read_u32_index(np, "opp-supported-hw",
461
+ i * levels + j, &val);
462
+ if (ret) {
463
+ dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
464
+ __func__, i * levels + j, ret);
465
+ return false;
466
+ }
467
+
468
+ /* Check if the level is supported */
469
+ if (!(val & opp_table->supported_hw[j])) {
470
+ supported = false;
471
+ break;
472
+ }
473
+ }
474
+
475
+ if (supported)
476
+ return true;
477
+ }
478
+
479
+ return false;
110480 }
111481
112482 static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
....@@ -257,25 +627,93 @@
257627 */
258628 void dev_pm_opp_of_remove_table(struct device *dev)
259629 {
260
- _dev_pm_opp_find_and_remove_table(dev, false);
630
+ dev_pm_opp_remove_table(dev);
261631 }
262632 EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
263633
264
-/* Returns opp descriptor node for a device node, caller must
265
- * do of_node_put() */
266
-static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np,
267
- int index)
634
+static int _read_bw(struct dev_pm_opp *new_opp, struct opp_table *table,
635
+ struct device_node *np, bool peak)
268636 {
269
- /* "operating-points-v2" can be an array for power domain providers */
270
- return of_parse_phandle(np, "operating-points-v2", index);
637
+ const char *name = peak ? "opp-peak-kBps" : "opp-avg-kBps";
638
+ struct property *prop;
639
+ int i, count, ret;
640
+ u32 *bw;
641
+
642
+ prop = of_find_property(np, name, NULL);
643
+ if (!prop)
644
+ return -ENODEV;
645
+
646
+ count = prop->length / sizeof(u32);
647
+ if (table->path_count != count) {
648
+ pr_err("%s: Mismatch between %s and paths (%d %d)\n",
649
+ __func__, name, count, table->path_count);
650
+ return -EINVAL;
651
+ }
652
+
653
+ bw = kmalloc_array(count, sizeof(*bw), GFP_KERNEL);
654
+ if (!bw)
655
+ return -ENOMEM;
656
+
657
+ ret = of_property_read_u32_array(np, name, bw, count);
658
+ if (ret) {
659
+ pr_err("%s: Error parsing %s: %d\n", __func__, name, ret);
660
+ goto out;
661
+ }
662
+
663
+ for (i = 0; i < count; i++) {
664
+ if (peak)
665
+ new_opp->bandwidth[i].peak = kBps_to_icc(bw[i]);
666
+ else
667
+ new_opp->bandwidth[i].avg = kBps_to_icc(bw[i]);
668
+ }
669
+
670
+out:
671
+ kfree(bw);
672
+ return ret;
271673 }
272674
273
-/* Returns opp descriptor node for a device, caller must do of_node_put() */
274
-struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
675
+static int _read_opp_key(struct dev_pm_opp *new_opp, struct opp_table *table,
676
+ struct device_node *np, bool *rate_not_available)
275677 {
276
- return _opp_of_get_opp_desc_node(dev->of_node, 0);
678
+ bool found = false;
679
+ u64 rate;
680
+ int ret;
681
+
682
+ ret = of_property_read_u64(np, "opp-hz", &rate);
683
+ if (!ret) {
684
+ /*
685
+ * Rate is defined as an unsigned long in clk API, and so
686
+ * casting explicitly to its type. Must be fixed once rate is 64
687
+ * bit guaranteed in clk API.
688
+ */
689
+ new_opp->rate = (unsigned long)rate;
690
+ found = true;
691
+ }
692
+ *rate_not_available = !!ret;
693
+
694
+ /*
695
+ * Bandwidth consists of peak and average (optional) values:
696
+ * opp-peak-kBps = <path1_value path2_value>;
697
+ * opp-avg-kBps = <path1_value path2_value>;
698
+ */
699
+ ret = _read_bw(new_opp, table, np, true);
700
+ if (!ret) {
701
+ found = true;
702
+ ret = _read_bw(new_opp, table, np, false);
703
+ }
704
+
705
+ /* The properties were found but we failed to parse them */
706
+ if (ret && ret != -ENODEV)
707
+ return ret;
708
+
709
+ if (!of_property_read_u32(np, "opp-level", &new_opp->level))
710
+ found = true;
711
+
712
+ if (found)
713
+ return 0;
714
+
715
+ return ret;
277716 }
278
-EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
279717
280718 /**
281719 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
....@@ -288,48 +726,41 @@
288726 * removed by dev_pm_opp_remove.
289727 *
290728 * Return:
291
- * 0 On success OR
729
+ * Valid OPP pointer:
730
+ * On success
731
+ * NULL:
292732 * Duplicate OPPs (both freq and volt are same) and opp->available
293
- * -EEXIST Freq are same and volt are different OR
733
+ * OR if the OPP is not supported by hardware.
734
+ * ERR_PTR(-EEXIST):
735
+ * Freq are same and volt are different OR
294736 * Duplicate OPPs (both freq and volt are same) and !opp->available
295
- * -ENOMEM Memory allocation failure
296
- * -EINVAL Failed parsing the OPP node
737
+ * ERR_PTR(-ENOMEM):
738
+ * Memory allocation failure
739
+ * ERR_PTR(-EINVAL):
740
+ * Failed parsing the OPP node
297741 */
298
-static int _opp_add_static_v2(struct opp_table *opp_table, struct device *dev,
299
- struct device_node *np)
742
+static struct dev_pm_opp *_opp_add_static_v2(struct opp_table *opp_table,
743
+ struct device *dev, struct device_node *np)
300744 {
301745 struct dev_pm_opp *new_opp;
302
- u64 rate = 0;
303746 u32 val;
304747 int ret;
305748 bool rate_not_available = false;
306749
307750 new_opp = _opp_allocate(opp_table);
308751 if (!new_opp)
309
- return -ENOMEM;
752
+ return ERR_PTR(-ENOMEM);
310753
311
- ret = of_property_read_u64(np, "opp-hz", &rate);
312
- if (ret < 0) {
313
- /* "opp-hz" is optional for devices like power domains. */
314
- if (!of_find_property(dev->of_node, "#power-domain-cells",
315
- NULL)) {
316
- dev_err(dev, "%s: opp-hz not found\n", __func__);
317
- goto free_opp;
318
- }
319
-
320
- rate_not_available = true;
321
- } else {
322
- /*
323
- * Rate is defined as an unsigned long in clk API, and so
324
- * casting explicitly to its type. Must be fixed once rate is 64
325
- * bit guaranteed in clk API.
326
- */
327
- new_opp->rate = (unsigned long)rate;
754
+ ret = _read_opp_key(new_opp, opp_table, np, &rate_not_available);
755
+ if (ret < 0 && !opp_table->is_genpd) {
756
+ dev_err(dev, "%s: opp key field not found\n", __func__);
757
+ goto free_opp;
328758 }
329759
330760 /* Check if the OPP supports hardware's hierarchy of versions or not */
331761 if (!_opp_is_supported(dev, opp_table, np)) {
332
- dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
762
+ dev_dbg(dev, "OPP not supported by hardware: %lu\n",
763
+ new_opp->rate);
333764 goto free_opp;
334765 }
335766
....@@ -339,29 +770,37 @@
339770 new_opp->dynamic = false;
340771 new_opp->available = true;
341772
773
+ ret = _of_opp_alloc_required_opps(opp_table, new_opp);
774
+ if (ret)
775
+ goto free_opp;
776
+
342777 if (!of_property_read_u32(np, "clock-latency-ns", &val))
343778 new_opp->clock_latency_ns = val;
344779
345
- new_opp->pstate = of_genpd_opp_to_performance_state(dev, np);
346
-
347780 ret = opp_parse_supplies(new_opp, dev, opp_table);
348781 if (ret)
349
- goto free_opp;
782
+ goto free_required_opps;
783
+
784
+ if (opp_table->is_genpd)
785
+ new_opp->pstate = pm_genpd_opp_to_performance_state(dev, new_opp);
350786
351787 ret = _opp_add(dev, new_opp, opp_table, rate_not_available);
352788 if (ret) {
353789 /* Don't return error for duplicate OPPs */
354790 if (ret == -EBUSY)
355791 ret = 0;
356
- goto free_opp;
792
+ goto free_required_opps;
357793 }
358794
359795 /* OPP to select on device suspend */
360796 if (of_property_read_bool(np, "opp-suspend")) {
361797 if (opp_table->suspend_opp) {
362
- dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
363
- __func__, opp_table->suspend_opp->rate,
364
- new_opp->rate);
798
+ /* Pick the OPP with higher rate as suspend OPP */
799
+ if (new_opp->rate > opp_table->suspend_opp->rate) {
800
+ opp_table->suspend_opp->suspend = false;
801
+ new_opp->suspend = true;
802
+ opp_table->suspend_opp = new_opp;
803
+ }
365804 } else {
366805 new_opp->suspend = true;
367806 opp_table->suspend_opp = new_opp;
....@@ -381,45 +820,45 @@
381820 * frequency/voltage list.
382821 */
383822 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp);
384
- return 0;
823
+ return new_opp;
385824
825
+free_required_opps:
826
+ _of_opp_free_required_opps(opp_table, new_opp);
386827 free_opp:
387828 _opp_free(new_opp);
388829
389
- return ret;
830
+ return ret ? ERR_PTR(ret) : NULL;
390831 }
391832
392833 /* Initializes OPP tables based on new bindings */
393
-static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
834
+static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
394835 {
395836 struct device_node *np;
396
- struct opp_table *opp_table;
397
- int ret = 0, count = 0, pstate_count = 0;
837
+ int ret, count = 0;
398838 struct dev_pm_opp *opp;
399839
400
- opp_table = _managed_opp(opp_np);
401
- if (opp_table) {
402
- /* OPPs are already managed */
403
- if (!_add_opp_dev(dev, opp_table))
404
- ret = -ENOMEM;
405
- goto put_opp_table;
840
+ /* OPP table is already initialized for the device */
841
+ mutex_lock(&opp_table->lock);
842
+ if (opp_table->parsed_static_opps) {
843
+ opp_table->parsed_static_opps++;
844
+ mutex_unlock(&opp_table->lock);
845
+ return 0;
406846 }
407847
408
- opp_table = dev_pm_opp_get_opp_table(dev);
409
- if (!opp_table)
410
- return -ENOMEM;
848
+ opp_table->parsed_static_opps = 1;
849
+ mutex_unlock(&opp_table->lock);
411850
412851 /* We have opp-table node now, iterate over it and add OPPs */
413
- for_each_available_child_of_node(opp_np, np) {
414
- count++;
415
-
416
- ret = _opp_add_static_v2(opp_table, dev, np);
417
- if (ret) {
852
+ for_each_available_child_of_node(opp_table->np, np) {
853
+ opp = _opp_add_static_v2(opp_table, dev, np);
854
+ if (IS_ERR(opp)) {
855
+ ret = PTR_ERR(opp);
418856 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
419857 ret);
420
- _dev_pm_opp_remove_table(opp_table, dev, false);
421858 of_node_put(np);
422
- goto put_opp_table;
859
+ goto remove_static_opp;
860
+ } else if (opp) {
861
+ count++;
423862 }
424863 }
425864
....@@ -427,49 +866,51 @@
427866 if (!count) {
428867 dev_err(dev, "%s: no supported OPPs", __func__);
429868 ret = -ENOENT;
430
- goto put_opp_table;
869
+ goto remove_static_opp;
431870 }
432871
433
- list_for_each_entry(opp, &opp_table->opp_list, node)
434
- pstate_count += !!opp->pstate;
435
-
436
- /* Either all or none of the nodes shall have performance state set */
437
- if (pstate_count && pstate_count != count) {
438
- dev_err(dev, "Not all nodes have performance state set (%d: %d)\n",
439
- count, pstate_count);
440
- ret = -ENOENT;
441
- _dev_pm_opp_remove_table(opp_table, dev, false);
442
- goto put_opp_table;
872
+ list_for_each_entry(opp, &opp_table->opp_list, node) {
873
+ /* Any non-zero performance state would enable the feature */
874
+ if (opp->pstate) {
875
+ opp_table->genpd_performance_state = true;
876
+ break;
877
+ }
443878 }
444879
445
- if (pstate_count)
446
- opp_table->genpd_performance_state = true;
880
+ return 0;
447881
448
- opp_table->np = opp_np;
449
- if (of_property_read_bool(opp_np, "opp-shared"))
450
- opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
451
- else
452
- opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
453
-
454
-put_opp_table:
455
- dev_pm_opp_put_opp_table(opp_table);
882
+remove_static_opp:
883
+ _opp_remove_all_static(opp_table);
456884
457885 return ret;
458886 }
459887
460888 /* Initializes OPP tables based on old-deprecated bindings */
461
-static int _of_add_opp_table_v1(struct device *dev)
889
+static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
462890 {
463
- struct opp_table *opp_table;
464891 const struct property *prop;
465892 const __be32 *val;
466893 int nr, ret = 0;
467894
895
+ mutex_lock(&opp_table->lock);
896
+ if (opp_table->parsed_static_opps) {
897
+ opp_table->parsed_static_opps++;
898
+ mutex_unlock(&opp_table->lock);
899
+ return 0;
900
+ }
901
+
902
+ opp_table->parsed_static_opps = 1;
903
+ mutex_unlock(&opp_table->lock);
904
+
468905 prop = of_find_property(dev->of_node, "operating-points", NULL);
469
- if (!prop)
470
- return -ENODEV;
471
- if (!prop->value)
472
- return -ENODATA;
906
+ if (!prop) {
907
+ ret = -ENODEV;
908
+ goto remove_static_opp;
909
+ }
910
+ if (!prop->value) {
911
+ ret = -ENODATA;
912
+ goto remove_static_opp;
913
+ }
473914
474915 /*
475916 * Each OPP is a set of tuples consisting of frequency and
....@@ -478,12 +919,9 @@
478919 nr = prop->length / sizeof(u32);
479920 if (nr % 2) {
480921 dev_err(dev, "%s: Invalid OPP table\n", __func__);
481
- return -EINVAL;
922
+ ret = -EINVAL;
923
+ goto remove_static_opp;
482924 }
483
-
484
- opp_table = dev_pm_opp_get_opp_table(dev);
485
- if (!opp_table)
486
- return -ENOMEM;
487925
488926 val = prop->value;
489927 while (nr) {
....@@ -494,13 +932,16 @@
494932 if (ret) {
495933 dev_err(dev, "%s: Failed to add OPP %ld (%d)\n",
496934 __func__, freq, ret);
497
- _dev_pm_opp_remove_table(opp_table, dev, false);
498
- break;
935
+ goto remove_static_opp;
499936 }
500937 nr -= 2;
501938 }
502939
503
- dev_pm_opp_put_opp_table(opp_table);
940
+ return 0;
941
+
942
+remove_static_opp:
943
+ _opp_remove_all_static(opp_table);
944
+
504945 return ret;
505946 }
506947
....@@ -523,24 +964,24 @@
523964 */
524965 int dev_pm_opp_of_add_table(struct device *dev)
525966 {
526
- struct device_node *opp_np;
967
+ struct opp_table *opp_table;
527968 int ret;
528969
529
- /*
530
- * OPPs have two version of bindings now. The older one is deprecated,
531
- * try for the new binding first.
532
- */
533
- opp_np = dev_pm_opp_of_get_opp_desc_node(dev);
534
- if (!opp_np) {
535
- /*
536
- * Try old-deprecated bindings for backward compatibility with
537
- * older dtbs.
538
- */
539
- return _of_add_opp_table_v1(dev);
540
- }
970
+ opp_table = dev_pm_opp_get_opp_table_indexed(dev, 0);
971
+ if (IS_ERR(opp_table))
972
+ return PTR_ERR(opp_table);
541973
542
- ret = _of_add_opp_table_v2(dev, opp_np);
543
- of_node_put(opp_np);
974
+ /*
975
+ * OPPs have two version of bindings now. Also try the old (v1)
976
+ * bindings for backward compatibility with older dtbs.
977
+ */
978
+ if (opp_table->np)
979
+ ret = _of_add_opp_table_v2(dev, opp_table);
980
+ else
981
+ ret = _of_add_opp_table_v1(dev, opp_table);
982
+
983
+ if (ret)
984
+ dev_pm_opp_put_opp_table(opp_table);
544985
545986 return ret;
546987 }
....@@ -567,28 +1008,27 @@
5671008 */
5681009 int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
5691010 {
570
- struct device_node *opp_np;
1011
+ struct opp_table *opp_table;
5711012 int ret, count;
5721013
573
-again:
574
- opp_np = _opp_of_get_opp_desc_node(dev->of_node, index);
575
- if (!opp_np) {
1014
+ if (index) {
5761015 /*
5771016 * If only one phandle is present, then the same OPP table
5781017 * applies for all index requests.
5791018 */
5801019 count = of_count_phandle_with_args(dev->of_node,
5811020 "operating-points-v2", NULL);
582
- if (count == 1 && index) {
1021
+ if (count == 1)
5831022 index = 0;
584
- goto again;
585
- }
586
-
587
- return -ENODEV;
5881023 }
5891024
590
- ret = _of_add_opp_table_v2(dev, opp_np);
591
- of_node_put(opp_np);
1025
+ opp_table = dev_pm_opp_get_opp_table_indexed(dev, index);
1026
+ if (IS_ERR(opp_table))
1027
+ return PTR_ERR(opp_table);
1028
+
1029
+ ret = _of_add_opp_table_v2(dev, opp_table);
1030
+ if (ret)
1031
+ dev_pm_opp_put_opp_table(opp_table);
5921032
5931033 return ret;
5941034 }
....@@ -605,7 +1045,7 @@
6051045 */
6061046 void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
6071047 {
608
- _dev_pm_opp_cpumask_remove_table(cpumask, true);
1048
+ _dev_pm_opp_cpumask_remove_table(cpumask, -1);
6091049 }
6101050 EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
6111051
....@@ -618,16 +1058,18 @@
6181058 int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
6191059 {
6201060 struct device *cpu_dev;
621
- int cpu, ret = 0;
1061
+ int cpu, ret;
6221062
623
- WARN_ON(cpumask_empty(cpumask));
1063
+ if (WARN_ON(cpumask_empty(cpumask)))
1064
+ return -ENODEV;
6241065
6251066 for_each_cpu(cpu, cpumask) {
6261067 cpu_dev = get_cpu_device(cpu);
6271068 if (!cpu_dev) {
6281069 pr_err("%s: failed to get cpu%d device\n", __func__,
6291070 cpu);
630
- continue;
1071
+ ret = -ENODEV;
1072
+ goto remove_table;
6311073 }
6321074
6331075 ret = dev_pm_opp_of_add_table(cpu_dev);
....@@ -639,11 +1081,15 @@
6391081 pr_debug("%s: couldn't find opp table for cpu:%d, %d\n",
6401082 __func__, cpu, ret);
6411083
642
- /* Free all other OPPs */
643
- dev_pm_opp_of_cpumask_remove_table(cpumask);
644
- break;
1084
+ goto remove_table;
6451085 }
6461086 }
1087
+
1088
+ return 0;
1089
+
1090
+remove_table:
1091
+ /* Free all other OPPs */
1092
+ _dev_pm_opp_cpumask_remove_table(cpumask, cpu);
6471093
6481094 return ret;
6491095 }
....@@ -720,58 +1166,48 @@
7201166 EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
7211167
7221168 /**
723
- * of_dev_pm_opp_find_required_opp() - Search for required OPP.
724
- * @dev: The device whose OPP node is referenced by the 'np' DT node.
1169
+ * of_get_required_opp_performance_state() - Search for required OPP and return its performance state.
7251170 * @np: Node that contains the "required-opps" property.
1171
+ * @index: Index of the phandle to parse.
7261172 *
727
- * Returns the OPP of the device 'dev', whose phandle is present in the "np"
728
- * node. Although the "required-opps" property supports having multiple
729
- * phandles, this helper routine only parses the very first phandle in the list.
1173
+ * Returns the performance state of the OPP pointed out by the "required-opps"
1174
+ * property at @index in @np.
7301175 *
731
- * Return: Matching opp, else returns ERR_PTR in case of error and should be
732
- * handled using IS_ERR.
733
- *
734
- * The callers are required to call dev_pm_opp_put() for the returned OPP after
735
- * use.
1176
+ * Return: Zero or positive performance state on success, otherwise negative
1177
+ * value on errors.
7361178 */
737
-struct dev_pm_opp *of_dev_pm_opp_find_required_opp(struct device *dev,
738
- struct device_node *np)
1179
+int of_get_required_opp_performance_state(struct device_node *np, int index)
7391180 {
740
- struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ENODEV);
1181
+ struct dev_pm_opp *opp;
7411182 struct device_node *required_np;
7421183 struct opp_table *opp_table;
1184
+ int pstate = -EINVAL;
7431185
744
- opp_table = _find_opp_table(dev);
745
- if (IS_ERR(opp_table))
746
- return ERR_CAST(opp_table);
1186
+ required_np = of_parse_required_opp(np, index);
1187
+ if (!required_np)
1188
+ return -ENODEV;
7471189
748
- required_np = of_parse_phandle(np, "required-opps", 0);
749
- if (unlikely(!required_np)) {
750
- dev_err(dev, "Unable to parse required-opps\n");
751
- goto put_opp_table;
1190
+ opp_table = _find_table_of_opp_np(required_np);
1191
+ if (IS_ERR(opp_table)) {
1192
+ pr_err("%s: Failed to find required OPP table %pOF: %ld\n",
1193
+ __func__, np, PTR_ERR(opp_table));
1194
+ goto put_required_np;
7521195 }
7531196
754
- mutex_lock(&opp_table->lock);
755
-
756
- list_for_each_entry(temp_opp, &opp_table->opp_list, node) {
757
- if (temp_opp->available && temp_opp->np == required_np) {
758
- opp = temp_opp;
759
-
760
- /* Increment the reference count of OPP */
761
- dev_pm_opp_get(opp);
762
- break;
763
- }
1197
+ opp = _find_opp_of_np(opp_table, required_np);
1198
+ if (opp) {
1199
+ pstate = opp->pstate;
1200
+ dev_pm_opp_put(opp);
7641201 }
7651202
766
- mutex_unlock(&opp_table->lock);
767
-
768
- of_node_put(required_np);
769
-put_opp_table:
7701203 dev_pm_opp_put_opp_table(opp_table);
7711204
772
- return opp;
1205
+put_required_np:
1206
+ of_node_put(required_np);
1207
+
1208
+ return pstate;
7731209 }
774
-EXPORT_SYMBOL_GPL(of_dev_pm_opp_find_required_opp);
1210
+EXPORT_SYMBOL_GPL(of_get_required_opp_performance_state);
7751211
7761212 /**
7771213 * dev_pm_opp_get_of_node() - Gets the DT node corresponding to an opp
....@@ -792,37 +1228,29 @@
7921228 }
7931229 EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node);
7941230
795
-/**
796
- * of_dev_pm_opp_get_cpu_power() - Estimates the power of a CPU
797
- * @mW: pointer to the power estimate in milli-watts
798
- * @KHz: pointer to the OPP's frequency, in kilo-hertz
799
- * @cpu: CPU for which power needs to be estimated
800
- *
801
- * Computes the power estimated by @CPU at the first OPP above @KHz (ceil),
802
- * and updates @KHz and @mW accordingly.
803
- *
804
- * The power is estimated as P = C * V^2 * f, with C the CPU's capacitance
805
- * (read from the 'dynamic-power-coefficient' devicetree binding) and V and f
1231
+/*
1232
+ * Callback function provided to the Energy Model framework upon registration.
1233
+ * This computes the power estimated by @dev at @kHz if it is the frequency
1234
+ * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise
1235
+ * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled
1236
+ * frequency and @mW to the associated power. The power is estimated as
1237
+ * P = C * V^2 * f with C being the device's capacitance and V and f
8061238 * respectively the voltage and frequency of the OPP.
8071239 *
808
- * Return: -ENODEV if the CPU device cannot be found, -EINVAL if the power
809
- * calculation failed because of missing parameters, 0 otherwise.
1240
+ * Returns -EINVAL if the power calculation failed because of missing
1241
+ * parameters, 0 otherwise.
8101242 */
811
-int of_dev_pm_opp_get_cpu_power(unsigned long *mW, unsigned long *KHz, int cpu)
1243
+static int __maybe_unused _get_power(unsigned long *mW, unsigned long *kHz,
1244
+ struct device *dev)
8121245 {
813
- unsigned long mV, Hz, MHz;
814
- struct device *cpu_dev;
8151246 struct dev_pm_opp *opp;
8161247 struct device_node *np;
1248
+ unsigned long mV, Hz;
8171249 u32 cap;
8181250 u64 tmp;
8191251 int ret;
8201252
821
- cpu_dev = get_cpu_device(cpu);
822
- if (!cpu_dev)
823
- return -ENODEV;
824
-
825
- np = of_node_get(cpu_dev->of_node);
1253
+ np = of_node_get(dev->of_node);
8261254 if (!np)
8271255 return -EINVAL;
8281256
....@@ -831,8 +1259,8 @@
8311259 if (ret)
8321260 return -EINVAL;
8331261
834
- Hz = *KHz * 1000;
835
- opp = dev_pm_opp_find_freq_ceil(cpu_dev, &Hz);
1262
+ Hz = *kHz * 1000;
1263
+ opp = dev_pm_opp_find_freq_ceil(dev, &Hz);
8361264 if (IS_ERR(opp))
8371265 return -EINVAL;
8381266
....@@ -841,13 +1269,73 @@
8411269 if (!mV)
8421270 return -EINVAL;
8431271
844
- MHz = Hz / 1000000;
845
- tmp = (u64)cap * mV * mV * MHz;
1272
+ tmp = (u64)cap * mV * mV * (Hz / 1000000);
8461273 do_div(tmp, 1000000000);
8471274
8481275 *mW = (unsigned long)tmp;
849
- *KHz = Hz / 1000;
1276
+ *kHz = Hz / 1000;
8501277
8511278 return 0;
8521279 }
853
-EXPORT_SYMBOL_GPL(of_dev_pm_opp_get_cpu_power);
1280
+
1281
+/**
1282
+ * dev_pm_opp_of_register_em() - Attempt to register an Energy Model
1283
+ * @dev : Device for which an Energy Model has to be registered
1284
+ * @cpus : CPUs for which an Energy Model has to be registered. For
1285
+ * other type of devices it should be set to NULL.
1286
+ *
1287
+ * This checks whether the "dynamic-power-coefficient" devicetree property has
1288
+ * been specified, and tries to register an Energy Model with it if it has.
1289
+ * Having this property means the voltages are known for OPPs and the EM
1290
+ * might be calculated.
1291
+ */
1292
+int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus)
1293
+{
1294
+ struct em_data_callback em_cb = EM_DATA_CB(_get_power);
1295
+ struct device_node *np;
1296
+ int ret, nr_opp;
1297
+ u32 cap;
1298
+
1299
+ if (IS_ERR_OR_NULL(dev)) {
1300
+ ret = -EINVAL;
1301
+ goto failed;
1302
+ }
1303
+
1304
+ nr_opp = dev_pm_opp_get_opp_count(dev);
1305
+ if (nr_opp <= 0) {
1306
+ ret = -EINVAL;
1307
+ goto failed;
1308
+ }
1309
+
1310
+ np = of_node_get(dev->of_node);
1311
+ if (!np) {
1312
+ ret = -EINVAL;
1313
+ goto failed;
1314
+ }
1315
+
1316
+ /*
1317
+ * Register an EM only if the 'dynamic-power-coefficient' property is
1318
+ * set in devicetree. It is assumed the voltage values are known if that
1319
+ * property is set since it is useless otherwise. If voltages are not
1320
+ * known, just let the EM registration fail with an error to alert the
1321
+ * user about the inconsistent configuration.
1322
+ */
1323
+ ret = of_property_read_u32(np, "dynamic-power-coefficient", &cap);
1324
+ of_node_put(np);
1325
+ if (ret || !cap) {
1326
+ dev_dbg(dev, "Couldn't find proper 'dynamic-power-coefficient' in DT\n");
1327
+ ret = -EINVAL;
1328
+ goto failed;
1329
+ }
1330
+
1331
+ ret = em_dev_register_perf_domain(dev, nr_opp, &em_cb, cpus, true);
1332
+ if (ret)
1333
+ goto failed;
1334
+
1335
+ return 0;
1336
+
1337
+failed:
1338
+ dev_dbg(dev, "Couldn't register Energy Model %d\n", ret);
1339
+ return ret;
1340
+}
1341
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_register_em);