hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/of/base.c
....@@ -16,6 +16,7 @@
1616
1717 #define pr_fmt(fmt) "OF: " fmt
1818
19
+#include <linux/bitmap.h>
1920 #include <linux/console.h>
2021 #include <linux/ctype.h>
2122 #include <linux/cpu.h>
....@@ -78,33 +79,52 @@
7879 }
7980 EXPORT_SYMBOL(of_node_name_prefix);
8081
81
-int of_n_addr_cells(struct device_node *np)
82
+static bool __of_node_is_type(const struct device_node *np, const char *type)
83
+{
84
+ const char *match = __of_get_property(np, "device_type", NULL);
85
+
86
+ return np && match && type && !strcmp(match, type);
87
+}
88
+
89
+int of_bus_n_addr_cells(struct device_node *np)
8290 {
8391 u32 cells;
8492
85
- do {
86
- if (np->parent)
87
- np = np->parent;
93
+ for (; np; np = np->parent)
8894 if (!of_property_read_u32(np, "#address-cells", &cells))
8995 return cells;
90
- } while (np->parent);
96
+
9197 /* No #address-cells property for the root node */
9298 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
9399 }
100
+
101
+int of_n_addr_cells(struct device_node *np)
102
+{
103
+ if (np->parent)
104
+ np = np->parent;
105
+
106
+ return of_bus_n_addr_cells(np);
107
+}
94108 EXPORT_SYMBOL(of_n_addr_cells);
95109
96
-int of_n_size_cells(struct device_node *np)
110
+int of_bus_n_size_cells(struct device_node *np)
97111 {
98112 u32 cells;
99113
100
- do {
101
- if (np->parent)
102
- np = np->parent;
114
+ for (; np; np = np->parent)
103115 if (!of_property_read_u32(np, "#size-cells", &cells))
104116 return cells;
105
- } while (np->parent);
117
+
106118 /* No #size-cells property for the root node */
107119 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
120
+}
121
+
122
+int of_n_size_cells(struct device_node *np)
123
+{
124
+ if (np->parent)
125
+ np = np->parent;
126
+
127
+ return of_bus_n_size_cells(np);
108128 }
109129 EXPORT_SYMBOL(of_n_size_cells);
110130
....@@ -115,122 +135,38 @@
115135 }
116136 #endif
117137
118
-/*
119
- * Assumptions behind phandle_cache implementation:
120
- * - phandle property values are in a contiguous range of 1..n
121
- *
122
- * If the assumptions do not hold, then
123
- * - the phandle lookup overhead reduction provided by the cache
124
- * will likely be less
125
- */
138
+#define OF_PHANDLE_CACHE_BITS 7
139
+#define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS)
126140
127
-static struct device_node **phandle_cache;
128
-static u32 phandle_cache_mask;
141
+static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ];
142
+
143
+static u32 of_phandle_cache_hash(phandle handle)
144
+{
145
+ return hash_32(handle, OF_PHANDLE_CACHE_BITS);
146
+}
129147
130148 /*
131149 * Caller must hold devtree_lock.
132150 */
133
-static struct device_node** __of_free_phandle_cache(void)
151
+void __of_phandle_cache_inv_entry(phandle handle)
134152 {
135
- u32 cache_entries = phandle_cache_mask + 1;
136
- u32 k;
137
- struct device_node **shadow;
138
-
139
- if (!phandle_cache)
140
- return NULL;
141
-
142
- for (k = 0; k < cache_entries; k++)
143
- of_node_put(phandle_cache[k]);
144
-
145
- shadow = phandle_cache;
146
- phandle_cache = NULL;
147
- return shadow;
148
-}
149
-
150
-int of_free_phandle_cache(void)
151
-{
152
- unsigned long flags;
153
- struct device_node **shadow;
154
-
155
- raw_spin_lock_irqsave(&devtree_lock, flags);
156
-
157
- shadow = __of_free_phandle_cache();
158
-
159
- raw_spin_unlock_irqrestore(&devtree_lock, flags);
160
- kfree(shadow);
161
- return 0;
162
-}
163
-#if !defined(CONFIG_MODULES)
164
-late_initcall_sync(of_free_phandle_cache);
165
-#endif
166
-
167
-/*
168
- * Caller must hold devtree_lock.
169
- */
170
-void __of_free_phandle_cache_entry(phandle handle)
171
-{
172
- phandle masked_handle;
153
+ u32 handle_hash;
173154 struct device_node *np;
174155
175156 if (!handle)
176157 return;
177158
178
- masked_handle = handle & phandle_cache_mask;
159
+ handle_hash = of_phandle_cache_hash(handle);
179160
180
- if (phandle_cache) {
181
- np = phandle_cache[masked_handle];
182
- if (np && handle == np->phandle) {
183
- of_node_put(np);
184
- phandle_cache[masked_handle] = NULL;
185
- }
186
- }
187
-}
188
-
189
-void of_populate_phandle_cache(void)
190
-{
191
- unsigned long flags;
192
- u32 cache_entries;
193
- struct device_node *np;
194
- u32 phandles = 0;
195
- struct device_node **shadow;
196
-
197
- raw_spin_lock_irqsave(&devtree_lock, flags);
198
-
199
- shadow = __of_free_phandle_cache();
200
-
201
- for_each_of_allnodes(np)
202
- if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
203
- phandles++;
204
-
205
- if (!phandles)
206
- goto out;
207
- raw_spin_unlock_irqrestore(&devtree_lock, flags);
208
-
209
- cache_entries = roundup_pow_of_two(phandles);
210
- phandle_cache_mask = cache_entries - 1;
211
-
212
- phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
213
- GFP_ATOMIC);
214
- raw_spin_lock_irqsave(&devtree_lock, flags);
215
- if (!phandle_cache)
216
- goto out;
217
-
218
- for_each_of_allnodes(np)
219
- if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
220
- of_node_get(np);
221
- phandle_cache[np->phandle & phandle_cache_mask] = np;
222
- }
223
-
224
-out:
225
- raw_spin_unlock_irqrestore(&devtree_lock, flags);
226
- kfree(shadow);
161
+ np = phandle_cache[handle_hash];
162
+ if (np && handle == np->phandle)
163
+ phandle_cache[handle_hash] = NULL;
227164 }
228165
229166 void __init of_core_init(void)
230167 {
231168 struct device_node *np;
232169
233
- of_populate_phandle_cache();
234170
235171 /* Create the kset, and register existing nodes */
236172 mutex_lock(&of_mutex);
....@@ -240,8 +176,11 @@
240176 pr_err("failed to register existing nodes\n");
241177 return;
242178 }
243
- for_each_of_allnodes(np)
179
+ for_each_of_allnodes(np) {
244180 __of_attach_node_sysfs(np);
181
+ if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)])
182
+ phandle_cache[of_phandle_cache_hash(np->phandle)] = np;
183
+ }
245184 mutex_unlock(&of_mutex);
246185
247186 /* Symlink in /proc as required by userspace ABI */
....@@ -380,6 +319,8 @@
380319
381320 ac = of_n_addr_cells(cpun);
382321 cell = of_get_property(cpun, prop_name, &prop_len);
322
+ if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0))
323
+ return true;
383324 if (!cell || !ac)
384325 return false;
385326 prop_len /= sizeof(*cell) * ac;
....@@ -440,7 +381,7 @@
440381 {
441382 struct device_node *cpun;
442383
443
- for_each_node_by_type(cpun, "cpu") {
384
+ for_each_of_cpu_node(cpun) {
444385 if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
445386 return cpun;
446387 }
....@@ -473,6 +414,42 @@
473414 return -ENODEV;
474415 }
475416 EXPORT_SYMBOL(of_cpu_node_to_id);
417
+
418
+/**
419
+ * of_get_cpu_state_node - Get CPU's idle state node at the given index
420
+ *
421
+ * @cpu_node: The device node for the CPU
422
+ * @index: The index in the list of the idle states
423
+ *
424
+ * Two generic methods can be used to describe a CPU's idle states, either via
425
+ * a flattened description through the "cpu-idle-states" binding or via the
426
+ * hierarchical layout, using the "power-domains" and the "domain-idle-states"
427
+ * bindings. This function check for both and returns the idle state node for
428
+ * the requested index.
429
+ *
430
+ * In case an idle state node is found at @index, the refcount is incremented
431
+ * for it, so call of_node_put() on it when done. Returns NULL if not found.
432
+ */
433
+struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
434
+ int index)
435
+{
436
+ struct of_phandle_args args;
437
+ int err;
438
+
439
+ err = of_parse_phandle_with_args(cpu_node, "power-domains",
440
+ "#power-domain-cells", 0, &args);
441
+ if (!err) {
442
+ struct device_node *state_node =
443
+ of_parse_phandle(args.np, "domain-idle-states", index);
444
+
445
+ of_node_put(args.np);
446
+ if (state_node)
447
+ return state_node;
448
+ }
449
+
450
+ return of_parse_phandle(cpu_node, "cpu-idle-states", index);
451
+}
452
+EXPORT_SYMBOL(of_get_cpu_state_node);
476453
477454 /**
478455 * __of_device_is_compatible() - Check if the node matches given constraints
....@@ -527,14 +504,14 @@
527504
528505 /* Matching type is better than matching name */
529506 if (type && type[0]) {
530
- if (!device->type || of_node_cmp(type, device->type))
507
+ if (!__of_node_is_type(device, type))
531508 return 0;
532509 score += 2;
533510 }
534511
535512 /* Matching name is a bit better than not */
536513 if (name && name[0]) {
537
- if (!device->name || of_node_cmp(name, device->name))
514
+ if (!of_node_name_eq(device, name))
538515 return 0;
539516 score++;
540517 }
....@@ -649,6 +626,28 @@
649626
650627 }
651628 EXPORT_SYMBOL(of_device_is_available);
629
+
630
+/**
631
+ * __of_device_is_fail - check if a device has status "fail" or "fail-..."
632
+ *
633
+ * @device: Node to check status for, with locks already held
634
+ *
635
+ * Return: True if the status property is set to "fail" or "fail-..." (for any
636
+ * error code suffix), false otherwise
637
+ */
638
+static bool __of_device_is_fail(const struct device_node *device)
639
+{
640
+ const char *status;
641
+
642
+ if (!device)
643
+ return false;
644
+
645
+ status = __of_get_property(device, "status", NULL);
646
+ if (status == NULL)
647
+ return false;
648
+
649
+ return !strcmp(status, "fail") || !strncmp(status, "fail-", 5);
650
+}
652651
653652 /**
654653 * of_device_is_big_endian - check if a device has BE registers
....@@ -795,6 +794,48 @@
795794 EXPORT_SYMBOL(of_get_next_available_child);
796795
797796 /**
797
+ * of_get_next_cpu_node - Iterate on cpu nodes
798
+ * @prev: previous child of the /cpus node, or NULL to get first
799
+ *
800
+ * Unusable CPUs (those with the status property set to "fail" or "fail-...")
801
+ * will be skipped.
802
+ *
803
+ * Returns a cpu node pointer with refcount incremented, use of_node_put()
804
+ * on it when done. Returns NULL when prev is the last child. Decrements
805
+ * the refcount of prev.
806
+ */
807
+struct device_node *of_get_next_cpu_node(struct device_node *prev)
808
+{
809
+ struct device_node *next = NULL;
810
+ unsigned long flags;
811
+ struct device_node *node;
812
+
813
+ if (!prev)
814
+ node = of_find_node_by_path("/cpus");
815
+
816
+ raw_spin_lock_irqsave(&devtree_lock, flags);
817
+ if (prev)
818
+ next = prev->sibling;
819
+ else if (node) {
820
+ next = node->child;
821
+ of_node_put(node);
822
+ }
823
+ for (; next; next = next->sibling) {
824
+ if (__of_device_is_fail(next))
825
+ continue;
826
+ if (!(of_node_name_eq(next, "cpu") ||
827
+ __of_node_is_type(next, "cpu")))
828
+ continue;
829
+ if (of_node_get(next))
830
+ break;
831
+ }
832
+ of_node_put(prev);
833
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
834
+ return next;
835
+}
836
+EXPORT_SYMBOL(of_get_next_cpu_node);
837
+
838
+/**
798839 * of_get_compatible_child - Find compatible child node
799840 * @parent: parent node
800841 * @compatible: compatible string
....@@ -836,7 +877,7 @@
836877 struct device_node *child;
837878
838879 for_each_child_of_node(node, child)
839
- if (child->name && (of_node_cmp(child->name, name) == 0))
880
+ if (of_node_name_eq(child, name))
840881 break;
841882 return child;
842883 }
....@@ -962,8 +1003,7 @@
9621003
9631004 raw_spin_lock_irqsave(&devtree_lock, flags);
9641005 for_each_of_allnodes_from(from, np)
965
- if (np->name && (of_node_cmp(np->name, name) == 0)
966
- && of_node_get(np))
1006
+ if (of_node_name_eq(np, name) && of_node_get(np))
9671007 break;
9681008 of_node_put(from);
9691009 raw_spin_unlock_irqrestore(&devtree_lock, flags);
....@@ -991,8 +1031,7 @@
9911031
9921032 raw_spin_lock_irqsave(&devtree_lock, flags);
9931033 for_each_of_allnodes_from(from, np)
994
- if (np->type && (of_node_cmp(np->type, type) == 0)
995
- && of_node_get(np))
1034
+ if (__of_node_is_type(np, type) && of_node_get(np))
9961035 break;
9971036 of_node_put(from);
9981037 raw_spin_unlock_irqrestore(&devtree_lock, flags);
....@@ -1185,36 +1224,24 @@
11851224 {
11861225 struct device_node *np = NULL;
11871226 unsigned long flags;
1188
- phandle masked_handle;
1227
+ u32 handle_hash;
11891228
11901229 if (!handle)
11911230 return NULL;
11921231
1232
+ handle_hash = of_phandle_cache_hash(handle);
1233
+
11931234 raw_spin_lock_irqsave(&devtree_lock, flags);
11941235
1195
- masked_handle = handle & phandle_cache_mask;
1196
-
1197
- if (phandle_cache) {
1198
- if (phandle_cache[masked_handle] &&
1199
- handle == phandle_cache[masked_handle]->phandle)
1200
- np = phandle_cache[masked_handle];
1201
- if (np && of_node_check_flag(np, OF_DETACHED)) {
1202
- WARN_ON(1); /* did not uncache np on node removal */
1203
- of_node_put(np);
1204
- phandle_cache[masked_handle] = NULL;
1205
- np = NULL;
1206
- }
1207
- }
1236
+ if (phandle_cache[handle_hash] &&
1237
+ handle == phandle_cache[handle_hash]->phandle)
1238
+ np = phandle_cache[handle_hash];
12081239
12091240 if (!np) {
12101241 for_each_of_allnodes(np)
12111242 if (np->phandle == handle &&
12121243 !of_node_check_flag(np, OF_DETACHED)) {
1213
- if (phandle_cache) {
1214
- /* will put when removed from cache */
1215
- of_node_get(np);
1216
- phandle_cache[masked_handle] = np;
1217
- }
1244
+ phandle_cache[handle_hash] = np;
12181245 break;
12191246 }
12201247 }
....@@ -1247,6 +1274,13 @@
12471274 int size;
12481275
12491276 memset(it, 0, sizeof(*it));
1277
+
1278
+ /*
1279
+ * one of cell_count or cells_name must be provided to determine the
1280
+ * argument length.
1281
+ */
1282
+ if (cell_count < 0 && !cells_name)
1283
+ return -EINVAL;
12501284
12511285 list = of_get_property(np, list_name, &size);
12521286 if (!list)
....@@ -1297,11 +1331,20 @@
12971331
12981332 if (of_property_read_u32(it->node, it->cells_name,
12991333 &count)) {
1300
- pr_err("%pOF: could not get %s for %pOF\n",
1301
- it->parent,
1302
- it->cells_name,
1303
- it->node);
1304
- goto err;
1334
+ /*
1335
+ * If both cell_count and cells_name is given,
1336
+ * fall back to cell_count in absence
1337
+ * of the cells_name property
1338
+ */
1339
+ if (it->cell_count >= 0) {
1340
+ count = it->cell_count;
1341
+ } else {
1342
+ pr_err("%pOF: could not get %s for %pOF\n",
1343
+ it->parent,
1344
+ it->cells_name,
1345
+ it->node);
1346
+ goto err;
1347
+ }
13051348 }
13061349 } else {
13071350 count = it->cell_count;
....@@ -1312,8 +1355,14 @@
13121355 * property data length
13131356 */
13141357 if (it->cur + count > it->list_end) {
1315
- pr_err("%pOF: arguments longer than property\n",
1316
- it->parent);
1358
+ if (it->cells_name)
1359
+ pr_err("%pOF: %s = %d found %td\n",
1360
+ it->parent, it->cells_name,
1361
+ count, it->list_end - it->cur);
1362
+ else
1363
+ pr_err("%pOF: phandle %s needs %d, found %td\n",
1364
+ it->parent, of_node_full_name(it->node),
1365
+ count, it->list_end - it->cur);
13171366 goto err;
13181367 }
13191368 }
....@@ -1349,7 +1398,6 @@
13491398
13501399 return count;
13511400 }
1352
-EXPORT_SYMBOL_GPL(of_phandle_iterator_args);
13531401
13541402 static int __of_parse_phandle_with_args(const struct device_node *np,
13551403 const char *list_name,
....@@ -1465,10 +1513,17 @@
14651513 const char *cells_name, int index,
14661514 struct of_phandle_args *out_args)
14671515 {
1516
+ int cell_count = -1;
1517
+
14681518 if (index < 0)
14691519 return -EINVAL;
1470
- return __of_parse_phandle_with_args(np, list_name, cells_name, 0,
1471
- index, out_args);
1520
+
1521
+ /* If cells_name is NULL we assume a cell count of 0 */
1522
+ if (!cells_name)
1523
+ cell_count = 0;
1524
+
1525
+ return __of_parse_phandle_with_args(np, list_name, cells_name,
1526
+ cell_count, index, out_args);
14721527 }
14731528 EXPORT_SYMBOL(of_parse_phandle_with_args);
14741529
....@@ -1550,7 +1605,7 @@
15501605 if (!pass_name)
15511606 goto free;
15521607
1553
- ret = __of_parse_phandle_with_args(np, list_name, cells_name, 0, index,
1608
+ ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index,
15541609 out_args);
15551610 if (ret)
15561611 goto free;
....@@ -1718,7 +1773,24 @@
17181773 struct of_phandle_iterator it;
17191774 int rc, cur_index = 0;
17201775
1721
- rc = of_phandle_iterator_init(&it, np, list_name, cells_name, 0);
1776
+ /*
1777
+ * If cells_name is NULL we assume a cell count of 0. This makes
1778
+ * counting the phandles trivial as each 32bit word in the list is a
1779
+ * phandle and no arguments are to consider. So we don't iterate through
1780
+ * the list but just use the length to determine the phandle count.
1781
+ */
1782
+ if (!cells_name) {
1783
+ const __be32 *list;
1784
+ int size;
1785
+
1786
+ list = of_get_property(np, list_name, &size);
1787
+ if (!list)
1788
+ return -ENOENT;
1789
+
1790
+ return size / sizeof(*list);
1791
+ }
1792
+
1793
+ rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1);
17221794 if (rc)
17231795 return rc;
17241796
....@@ -1777,6 +1849,7 @@
17771849
17781850 return rc;
17791851 }
1852
+EXPORT_SYMBOL_GPL(of_add_property);
17801853
17811854 int __of_remove_property(struct device_node *np, struct property *prop)
17821855 {
....@@ -1829,6 +1902,7 @@
18291902
18301903 return rc;
18311904 }
1905
+EXPORT_SYMBOL_GPL(of_remove_property);
18321906
18331907 int __of_update_property(struct device_node *np, struct property *newprop,
18341908 struct property **oldpropp)
....@@ -2004,6 +2078,59 @@
20042078 EXPORT_SYMBOL_GPL(of_alias_get_id);
20052079
20062080 /**
2081
+ * of_alias_get_alias_list - Get alias list for the given device driver
2082
+ * @matches: Array of OF device match structures to search in
2083
+ * @stem: Alias stem of the given device_node
2084
+ * @bitmap: Bitmap field pointer
2085
+ * @nbits: Maximum number of alias IDs which can be recorded in bitmap
2086
+ *
2087
+ * The function travels the lookup table to record alias ids for the given
2088
+ * device match structures and alias stem.
2089
+ *
2090
+ * Return: 0 or -ENOSYS when !CONFIG_OF or
2091
+ * -EOVERFLOW if alias ID is greater then allocated nbits
2092
+ */
2093
+int of_alias_get_alias_list(const struct of_device_id *matches,
2094
+ const char *stem, unsigned long *bitmap,
2095
+ unsigned int nbits)
2096
+{
2097
+ struct alias_prop *app;
2098
+ int ret = 0;
2099
+
2100
+ /* Zero bitmap field to make sure that all the time it is clean */
2101
+ bitmap_zero(bitmap, nbits);
2102
+
2103
+ mutex_lock(&of_mutex);
2104
+ pr_debug("%s: Looking for stem: %s\n", __func__, stem);
2105
+ list_for_each_entry(app, &aliases_lookup, link) {
2106
+ pr_debug("%s: stem: %s, id: %d\n",
2107
+ __func__, app->stem, app->id);
2108
+
2109
+ if (strcmp(app->stem, stem) != 0) {
2110
+ pr_debug("%s: stem comparison didn't pass %s\n",
2111
+ __func__, app->stem);
2112
+ continue;
2113
+ }
2114
+
2115
+ if (of_match_node(matches, app->np)) {
2116
+ pr_debug("%s: Allocated ID %d\n", __func__, app->id);
2117
+
2118
+ if (app->id >= nbits) {
2119
+ pr_warn("%s: ID %d >= than bitmap field %d\n",
2120
+ __func__, app->id, nbits);
2121
+ ret = -EOVERFLOW;
2122
+ } else {
2123
+ set_bit(app->id, bitmap);
2124
+ }
2125
+ }
2126
+ }
2127
+ mutex_unlock(&of_mutex);
2128
+
2129
+ return ret;
2130
+}
2131
+EXPORT_SYMBOL_GPL(of_alias_get_alias_list);
2132
+
2133
+/**
20072134 * of_alias_get_highest_id - Get highest alias id for the given stem
20082135 * @stem: Alias stem to be examined
20092136 *
....@@ -2074,9 +2201,9 @@
20742201 /* OF on pmac has nodes instead of properties named "l2-cache"
20752202 * beneath CPU nodes.
20762203 */
2077
- if (IS_ENABLED(CONFIG_PPC_PMAC) && !strcmp(np->type, "cpu"))
2204
+ if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu"))
20782205 for_each_child_of_node(np, child)
2079
- if (!strcmp(child->type, "cache"))
2206
+ if (of_node_is_type(child, "cache"))
20802207 return child;
20812208
20822209 return NULL;
....@@ -2106,3 +2233,109 @@
21062233
21072234 return cache_level;
21082235 }
2236
+
2237
+/**
2238
+ * of_map_id - Translate an ID through a downstream mapping.
2239
+ * @np: root complex device node.
2240
+ * @id: device ID to map.
2241
+ * @map_name: property name of the map to use.
2242
+ * @map_mask_name: optional property name of the mask to use.
2243
+ * @target: optional pointer to a target device node.
2244
+ * @id_out: optional pointer to receive the translated ID.
2245
+ *
2246
+ * Given a device ID, look up the appropriate implementation-defined
2247
+ * platform ID and/or the target device which receives transactions on that
2248
+ * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
2249
+ * @id_out may be NULL if only the other is required. If @target points to
2250
+ * a non-NULL device node pointer, only entries targeting that node will be
2251
+ * matched; if it points to a NULL value, it will receive the device node of
2252
+ * the first matching target phandle, with a reference held.
2253
+ *
2254
+ * Return: 0 on success or a standard error code on failure.
2255
+ */
2256
+int of_map_id(struct device_node *np, u32 id,
2257
+ const char *map_name, const char *map_mask_name,
2258
+ struct device_node **target, u32 *id_out)
2259
+{
2260
+ u32 map_mask, masked_id;
2261
+ int map_len;
2262
+ const __be32 *map = NULL;
2263
+
2264
+ if (!np || !map_name || (!target && !id_out))
2265
+ return -EINVAL;
2266
+
2267
+ map = of_get_property(np, map_name, &map_len);
2268
+ if (!map) {
2269
+ if (target)
2270
+ return -ENODEV;
2271
+ /* Otherwise, no map implies no translation */
2272
+ *id_out = id;
2273
+ return 0;
2274
+ }
2275
+
2276
+ if (!map_len || map_len % (4 * sizeof(*map))) {
2277
+ pr_err("%pOF: Error: Bad %s length: %d\n", np,
2278
+ map_name, map_len);
2279
+ return -EINVAL;
2280
+ }
2281
+
2282
+ /* The default is to select all bits. */
2283
+ map_mask = 0xffffffff;
2284
+
2285
+ /*
2286
+ * Can be overridden by "{iommu,msi}-map-mask" property.
2287
+ * If of_property_read_u32() fails, the default is used.
2288
+ */
2289
+ if (map_mask_name)
2290
+ of_property_read_u32(np, map_mask_name, &map_mask);
2291
+
2292
+ masked_id = map_mask & id;
2293
+ for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
2294
+ struct device_node *phandle_node;
2295
+ u32 id_base = be32_to_cpup(map + 0);
2296
+ u32 phandle = be32_to_cpup(map + 1);
2297
+ u32 out_base = be32_to_cpup(map + 2);
2298
+ u32 id_len = be32_to_cpup(map + 3);
2299
+
2300
+ if (id_base & ~map_mask) {
2301
+ pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n",
2302
+ np, map_name, map_name,
2303
+ map_mask, id_base);
2304
+ return -EFAULT;
2305
+ }
2306
+
2307
+ if (masked_id < id_base || masked_id >= id_base + id_len)
2308
+ continue;
2309
+
2310
+ phandle_node = of_find_node_by_phandle(phandle);
2311
+ if (!phandle_node)
2312
+ return -ENODEV;
2313
+
2314
+ if (target) {
2315
+ if (*target)
2316
+ of_node_put(phandle_node);
2317
+ else
2318
+ *target = phandle_node;
2319
+
2320
+ if (*target != phandle_node)
2321
+ continue;
2322
+ }
2323
+
2324
+ if (id_out)
2325
+ *id_out = masked_id - id_base + out_base;
2326
+
2327
+ pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n",
2328
+ np, map_name, map_mask, id_base, out_base,
2329
+ id_len, id, masked_id - id_base + out_base);
2330
+ return 0;
2331
+ }
2332
+
2333
+ pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name,
2334
+ id, target && *target ? *target : NULL);
2335
+
2336
+ /* Bypasses translation */
2337
+ if (id_out)
2338
+ *id_out = id;
2339
+ return 0;
2340
+}
2341
+EXPORT_SYMBOL_GPL(of_map_id);