forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/drivers/of/base.c
....@@ -16,6 +16,7 @@
1616
1717 #define pr_fmt(fmt) "OF: " fmt
1818
19
+#include <linux/bitmap.h>
1920 #include <linux/console.h>
2021 #include <linux/ctype.h>
2122 #include <linux/cpu.h>
....@@ -78,33 +79,52 @@
7879 }
7980 EXPORT_SYMBOL(of_node_name_prefix);
8081
81
-int of_n_addr_cells(struct device_node *np)
82
+static bool __of_node_is_type(const struct device_node *np, const char *type)
83
+{
84
+ const char *match = __of_get_property(np, "device_type", NULL);
85
+
86
+ return np && match && type && !strcmp(match, type);
87
+}
88
+
89
+int of_bus_n_addr_cells(struct device_node *np)
8290 {
8391 u32 cells;
8492
85
- do {
86
- if (np->parent)
87
- np = np->parent;
93
+ for (; np; np = np->parent)
8894 if (!of_property_read_u32(np, "#address-cells", &cells))
8995 return cells;
90
- } while (np->parent);
96
+
9197 /* No #address-cells property for the root node */
9298 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
9399 }
100
+
101
+int of_n_addr_cells(struct device_node *np)
102
+{
103
+ if (np->parent)
104
+ np = np->parent;
105
+
106
+ return of_bus_n_addr_cells(np);
107
+}
94108 EXPORT_SYMBOL(of_n_addr_cells);
95109
96
-int of_n_size_cells(struct device_node *np)
110
+int of_bus_n_size_cells(struct device_node *np)
97111 {
98112 u32 cells;
99113
100
- do {
101
- if (np->parent)
102
- np = np->parent;
114
+ for (; np; np = np->parent)
103115 if (!of_property_read_u32(np, "#size-cells", &cells))
104116 return cells;
105
- } while (np->parent);
117
+
106118 /* No #size-cells property for the root node */
107119 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
120
+}
121
+
122
+int of_n_size_cells(struct device_node *np)
123
+{
124
+ if (np->parent)
125
+ np = np->parent;
126
+
127
+ return of_bus_n_size_cells(np);
108128 }
109129 EXPORT_SYMBOL(of_n_size_cells);
110130
....@@ -115,122 +135,38 @@
115135 }
116136 #endif
117137
118
-/*
119
- * Assumptions behind phandle_cache implementation:
120
- * - phandle property values are in a contiguous range of 1..n
121
- *
122
- * If the assumptions do not hold, then
123
- * - the phandle lookup overhead reduction provided by the cache
124
- * will likely be less
125
- */
138
+#define OF_PHANDLE_CACHE_BITS 7
139
+#define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS)
126140
127
-static struct device_node **phandle_cache;
128
-static u32 phandle_cache_mask;
141
+static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ];
142
+
143
+static u32 of_phandle_cache_hash(phandle handle)
144
+{
145
+ return hash_32(handle, OF_PHANDLE_CACHE_BITS);
146
+}
129147
130148 /*
131149 * Caller must hold devtree_lock.
132150 */
133
-static struct device_node** __of_free_phandle_cache(void)
151
+void __of_phandle_cache_inv_entry(phandle handle)
134152 {
135
- u32 cache_entries = phandle_cache_mask + 1;
136
- u32 k;
137
- struct device_node **shadow;
138
-
139
- if (!phandle_cache)
140
- return NULL;
141
-
142
- for (k = 0; k < cache_entries; k++)
143
- of_node_put(phandle_cache[k]);
144
-
145
- shadow = phandle_cache;
146
- phandle_cache = NULL;
147
- return shadow;
148
-}
149
-
150
-int of_free_phandle_cache(void)
151
-{
152
- unsigned long flags;
153
- struct device_node **shadow;
154
-
155
- raw_spin_lock_irqsave(&devtree_lock, flags);
156
-
157
- shadow = __of_free_phandle_cache();
158
-
159
- raw_spin_unlock_irqrestore(&devtree_lock, flags);
160
- kfree(shadow);
161
- return 0;
162
-}
163
-#if !defined(CONFIG_MODULES)
164
-late_initcall_sync(of_free_phandle_cache);
165
-#endif
166
-
167
-/*
168
- * Caller must hold devtree_lock.
169
- */
170
-void __of_free_phandle_cache_entry(phandle handle)
171
-{
172
- phandle masked_handle;
153
+ u32 handle_hash;
173154 struct device_node *np;
174155
175156 if (!handle)
176157 return;
177158
178
- masked_handle = handle & phandle_cache_mask;
159
+ handle_hash = of_phandle_cache_hash(handle);
179160
180
- if (phandle_cache) {
181
- np = phandle_cache[masked_handle];
182
- if (np && handle == np->phandle) {
183
- of_node_put(np);
184
- phandle_cache[masked_handle] = NULL;
185
- }
186
- }
187
-}
188
-
189
-void of_populate_phandle_cache(void)
190
-{
191
- unsigned long flags;
192
- u32 cache_entries;
193
- struct device_node *np;
194
- u32 phandles = 0;
195
- struct device_node **shadow;
196
-
197
- raw_spin_lock_irqsave(&devtree_lock, flags);
198
-
199
- shadow = __of_free_phandle_cache();
200
-
201
- for_each_of_allnodes(np)
202
- if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
203
- phandles++;
204
-
205
- if (!phandles)
206
- goto out;
207
- raw_spin_unlock_irqrestore(&devtree_lock, flags);
208
-
209
- cache_entries = roundup_pow_of_two(phandles);
210
- phandle_cache_mask = cache_entries - 1;
211
-
212
- phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
213
- GFP_ATOMIC);
214
- raw_spin_lock_irqsave(&devtree_lock, flags);
215
- if (!phandle_cache)
216
- goto out;
217
-
218
- for_each_of_allnodes(np)
219
- if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
220
- of_node_get(np);
221
- phandle_cache[np->phandle & phandle_cache_mask] = np;
222
- }
223
-
224
-out:
225
- raw_spin_unlock_irqrestore(&devtree_lock, flags);
226
- kfree(shadow);
161
+ np = phandle_cache[handle_hash];
162
+ if (np && handle == np->phandle)
163
+ phandle_cache[handle_hash] = NULL;
227164 }
228165
229166 void __init of_core_init(void)
230167 {
231168 struct device_node *np;
232169
233
- of_populate_phandle_cache();
234170
235171 /* Create the kset, and register existing nodes */
236172 mutex_lock(&of_mutex);
....@@ -240,8 +176,11 @@
240176 pr_err("failed to register existing nodes\n");
241177 return;
242178 }
243
- for_each_of_allnodes(np)
179
+ for_each_of_allnodes(np) {
244180 __of_attach_node_sysfs(np);
181
+ if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)])
182
+ phandle_cache[of_phandle_cache_hash(np->phandle)] = np;
183
+ }
245184 mutex_unlock(&of_mutex);
246185
247186 /* Symlink in /proc as required by userspace ABI */
....@@ -380,6 +319,8 @@
380319
381320 ac = of_n_addr_cells(cpun);
382321 cell = of_get_property(cpun, prop_name, &prop_len);
322
+ if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0))
323
+ return true;
383324 if (!cell || !ac)
384325 return false;
385326 prop_len /= sizeof(*cell) * ac;
....@@ -440,7 +381,7 @@
440381 {
441382 struct device_node *cpun;
442383
443
- for_each_node_by_type(cpun, "cpu") {
384
+ for_each_of_cpu_node(cpun) {
444385 if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
445386 return cpun;
446387 }
....@@ -473,6 +414,42 @@
473414 return -ENODEV;
474415 }
475416 EXPORT_SYMBOL(of_cpu_node_to_id);
417
+
418
+/**
419
+ * of_get_cpu_state_node - Get CPU's idle state node at the given index
420
+ *
421
+ * @cpu_node: The device node for the CPU
422
+ * @index: The index in the list of the idle states
423
+ *
424
+ * Two generic methods can be used to describe a CPU's idle states, either via
425
+ * a flattened description through the "cpu-idle-states" binding or via the
426
+ * hierarchical layout, using the "power-domains" and the "domain-idle-states"
427
+ * bindings. This function check for both and returns the idle state node for
428
+ * the requested index.
429
+ *
430
+ * In case an idle state node is found at @index, the refcount is incremented
431
+ * for it, so call of_node_put() on it when done. Returns NULL if not found.
432
+ */
433
+struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
434
+ int index)
435
+{
436
+ struct of_phandle_args args;
437
+ int err;
438
+
439
+ err = of_parse_phandle_with_args(cpu_node, "power-domains",
440
+ "#power-domain-cells", 0, &args);
441
+ if (!err) {
442
+ struct device_node *state_node =
443
+ of_parse_phandle(args.np, "domain-idle-states", index);
444
+
445
+ of_node_put(args.np);
446
+ if (state_node)
447
+ return state_node;
448
+ }
449
+
450
+ return of_parse_phandle(cpu_node, "cpu-idle-states", index);
451
+}
452
+EXPORT_SYMBOL(of_get_cpu_state_node);
476453
477454 /**
478455 * __of_device_is_compatible() - Check if the node matches given constraints
....@@ -527,14 +504,14 @@
527504
528505 /* Matching type is better than matching name */
529506 if (type && type[0]) {
530
- if (!device->type || of_node_cmp(type, device->type))
507
+ if (!__of_node_is_type(device, type))
531508 return 0;
532509 score += 2;
533510 }
534511
535512 /* Matching name is a bit better than not */
536513 if (name && name[0]) {
537
- if (!device->name || of_node_cmp(name, device->name))
514
+ if (!of_node_name_eq(device, name))
538515 return 0;
539516 score++;
540517 }
....@@ -795,6 +772,43 @@
795772 EXPORT_SYMBOL(of_get_next_available_child);
796773
797774 /**
775
+ * of_get_next_cpu_node - Iterate on cpu nodes
776
+ * @prev: previous child of the /cpus node, or NULL to get first
777
+ *
778
+ * Returns a cpu node pointer with refcount incremented, use of_node_put()
779
+ * on it when done. Returns NULL when prev is the last child. Decrements
780
+ * the refcount of prev.
781
+ */
782
+struct device_node *of_get_next_cpu_node(struct device_node *prev)
783
+{
784
+ struct device_node *next = NULL;
785
+ unsigned long flags;
786
+ struct device_node *node;
787
+
788
+ if (!prev)
789
+ node = of_find_node_by_path("/cpus");
790
+
791
+ raw_spin_lock_irqsave(&devtree_lock, flags);
792
+ if (prev)
793
+ next = prev->sibling;
794
+ else if (node) {
795
+ next = node->child;
796
+ of_node_put(node);
797
+ }
798
+ for (; next; next = next->sibling) {
799
+ if (!(of_node_name_eq(next, "cpu") ||
800
+ __of_node_is_type(next, "cpu")))
801
+ continue;
802
+ if (of_node_get(next))
803
+ break;
804
+ }
805
+ of_node_put(prev);
806
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
807
+ return next;
808
+}
809
+EXPORT_SYMBOL(of_get_next_cpu_node);
810
+
811
+/**
798812 * of_get_compatible_child - Find compatible child node
799813 * @parent: parent node
800814 * @compatible: compatible string
....@@ -836,7 +850,7 @@
836850 struct device_node *child;
837851
838852 for_each_child_of_node(node, child)
839
- if (child->name && (of_node_cmp(child->name, name) == 0))
853
+ if (of_node_name_eq(child, name))
840854 break;
841855 return child;
842856 }
....@@ -962,8 +976,7 @@
962976
963977 raw_spin_lock_irqsave(&devtree_lock, flags);
964978 for_each_of_allnodes_from(from, np)
965
- if (np->name && (of_node_cmp(np->name, name) == 0)
966
- && of_node_get(np))
979
+ if (of_node_name_eq(np, name) && of_node_get(np))
967980 break;
968981 of_node_put(from);
969982 raw_spin_unlock_irqrestore(&devtree_lock, flags);
....@@ -991,8 +1004,7 @@
9911004
9921005 raw_spin_lock_irqsave(&devtree_lock, flags);
9931006 for_each_of_allnodes_from(from, np)
994
- if (np->type && (of_node_cmp(np->type, type) == 0)
995
- && of_node_get(np))
1007
+ if (__of_node_is_type(np, type) && of_node_get(np))
9961008 break;
9971009 of_node_put(from);
9981010 raw_spin_unlock_irqrestore(&devtree_lock, flags);
....@@ -1185,36 +1197,24 @@
11851197 {
11861198 struct device_node *np = NULL;
11871199 unsigned long flags;
1188
- phandle masked_handle;
1200
+ u32 handle_hash;
11891201
11901202 if (!handle)
11911203 return NULL;
11921204
1205
+ handle_hash = of_phandle_cache_hash(handle);
1206
+
11931207 raw_spin_lock_irqsave(&devtree_lock, flags);
11941208
1195
- masked_handle = handle & phandle_cache_mask;
1196
-
1197
- if (phandle_cache) {
1198
- if (phandle_cache[masked_handle] &&
1199
- handle == phandle_cache[masked_handle]->phandle)
1200
- np = phandle_cache[masked_handle];
1201
- if (np && of_node_check_flag(np, OF_DETACHED)) {
1202
- WARN_ON(1); /* did not uncache np on node removal */
1203
- of_node_put(np);
1204
- phandle_cache[masked_handle] = NULL;
1205
- np = NULL;
1206
- }
1207
- }
1209
+ if (phandle_cache[handle_hash] &&
1210
+ handle == phandle_cache[handle_hash]->phandle)
1211
+ np = phandle_cache[handle_hash];
12081212
12091213 if (!np) {
12101214 for_each_of_allnodes(np)
12111215 if (np->phandle == handle &&
12121216 !of_node_check_flag(np, OF_DETACHED)) {
1213
- if (phandle_cache) {
1214
- /* will put when removed from cache */
1215
- of_node_get(np);
1216
- phandle_cache[masked_handle] = np;
1217
- }
1217
+ phandle_cache[handle_hash] = np;
12181218 break;
12191219 }
12201220 }
....@@ -1247,6 +1247,13 @@
12471247 int size;
12481248
12491249 memset(it, 0, sizeof(*it));
1250
+
1251
+ /*
1252
+ * one of cell_count or cells_name must be provided to determine the
1253
+ * argument length.
1254
+ */
1255
+ if (cell_count < 0 && !cells_name)
1256
+ return -EINVAL;
12501257
12511258 list = of_get_property(np, list_name, &size);
12521259 if (!list)
....@@ -1297,11 +1304,20 @@
12971304
12981305 if (of_property_read_u32(it->node, it->cells_name,
12991306 &count)) {
1300
- pr_err("%pOF: could not get %s for %pOF\n",
1301
- it->parent,
1302
- it->cells_name,
1303
- it->node);
1304
- goto err;
1307
+ /*
1308
+ * If both cell_count and cells_name is given,
1309
+ * fall back to cell_count in absence
1310
+ * of the cells_name property
1311
+ */
1312
+ if (it->cell_count >= 0) {
1313
+ count = it->cell_count;
1314
+ } else {
1315
+ pr_err("%pOF: could not get %s for %pOF\n",
1316
+ it->parent,
1317
+ it->cells_name,
1318
+ it->node);
1319
+ goto err;
1320
+ }
13051321 }
13061322 } else {
13071323 count = it->cell_count;
....@@ -1312,8 +1328,14 @@
13121328 * property data length
13131329 */
13141330 if (it->cur + count > it->list_end) {
1315
- pr_err("%pOF: arguments longer than property\n",
1316
- it->parent);
1331
+ if (it->cells_name)
1332
+ pr_err("%pOF: %s = %d found %td\n",
1333
+ it->parent, it->cells_name,
1334
+ count, it->list_end - it->cur);
1335
+ else
1336
+ pr_err("%pOF: phandle %s needs %d, found %td\n",
1337
+ it->parent, of_node_full_name(it->node),
1338
+ count, it->list_end - it->cur);
13171339 goto err;
13181340 }
13191341 }
....@@ -1349,7 +1371,6 @@
13491371
13501372 return count;
13511373 }
1352
-EXPORT_SYMBOL_GPL(of_phandle_iterator_args);
13531374
13541375 static int __of_parse_phandle_with_args(const struct device_node *np,
13551376 const char *list_name,
....@@ -1465,10 +1486,17 @@
14651486 const char *cells_name, int index,
14661487 struct of_phandle_args *out_args)
14671488 {
1489
+ int cell_count = -1;
1490
+
14681491 if (index < 0)
14691492 return -EINVAL;
1470
- return __of_parse_phandle_with_args(np, list_name, cells_name, 0,
1471
- index, out_args);
1493
+
1494
+ /* If cells_name is NULL we assume a cell count of 0 */
1495
+ if (!cells_name)
1496
+ cell_count = 0;
1497
+
1498
+ return __of_parse_phandle_with_args(np, list_name, cells_name,
1499
+ cell_count, index, out_args);
14721500 }
14731501 EXPORT_SYMBOL(of_parse_phandle_with_args);
14741502
....@@ -1550,7 +1578,7 @@
15501578 if (!pass_name)
15511579 goto free;
15521580
1553
- ret = __of_parse_phandle_with_args(np, list_name, cells_name, 0, index,
1581
+ ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index,
15541582 out_args);
15551583 if (ret)
15561584 goto free;
....@@ -1718,7 +1746,24 @@
17181746 struct of_phandle_iterator it;
17191747 int rc, cur_index = 0;
17201748
1721
- rc = of_phandle_iterator_init(&it, np, list_name, cells_name, 0);
1749
+ /*
1750
+ * If cells_name is NULL we assume a cell count of 0. This makes
1751
+ * counting the phandles trivial as each 32bit word in the list is a
1752
+ * phandle and no arguments are to consider. So we don't iterate through
1753
+ * the list but just use the length to determine the phandle count.
1754
+ */
1755
+ if (!cells_name) {
1756
+ const __be32 *list;
1757
+ int size;
1758
+
1759
+ list = of_get_property(np, list_name, &size);
1760
+ if (!list)
1761
+ return -ENOENT;
1762
+
1763
+ return size / sizeof(*list);
1764
+ }
1765
+
1766
+ rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1);
17221767 if (rc)
17231768 return rc;
17241769
....@@ -1777,6 +1822,7 @@
17771822
17781823 return rc;
17791824 }
1825
+EXPORT_SYMBOL_GPL(of_add_property);
17801826
17811827 int __of_remove_property(struct device_node *np, struct property *prop)
17821828 {
....@@ -1829,6 +1875,7 @@
18291875
18301876 return rc;
18311877 }
1878
+EXPORT_SYMBOL_GPL(of_remove_property);
18321879
18331880 int __of_update_property(struct device_node *np, struct property *newprop,
18341881 struct property **oldpropp)
....@@ -2004,6 +2051,59 @@
20042051 EXPORT_SYMBOL_GPL(of_alias_get_id);
20052052
20062053 /**
2054
+ * of_alias_get_alias_list - Get alias list for the given device driver
2055
+ * @matches: Array of OF device match structures to search in
2056
+ * @stem: Alias stem of the given device_node
2057
+ * @bitmap: Bitmap field pointer
2058
+ * @nbits: Maximum number of alias IDs which can be recorded in bitmap
2059
+ *
2060
+ * The function travels the lookup table to record alias ids for the given
2061
+ * device match structures and alias stem.
2062
+ *
2063
+ * Return: 0 or -ENOSYS when !CONFIG_OF or
2064
+ * -EOVERFLOW if alias ID is greater then allocated nbits
2065
+ */
2066
+int of_alias_get_alias_list(const struct of_device_id *matches,
2067
+ const char *stem, unsigned long *bitmap,
2068
+ unsigned int nbits)
2069
+{
2070
+ struct alias_prop *app;
2071
+ int ret = 0;
2072
+
2073
+ /* Zero bitmap field to make sure that all the time it is clean */
2074
+ bitmap_zero(bitmap, nbits);
2075
+
2076
+ mutex_lock(&of_mutex);
2077
+ pr_debug("%s: Looking for stem: %s\n", __func__, stem);
2078
+ list_for_each_entry(app, &aliases_lookup, link) {
2079
+ pr_debug("%s: stem: %s, id: %d\n",
2080
+ __func__, app->stem, app->id);
2081
+
2082
+ if (strcmp(app->stem, stem) != 0) {
2083
+ pr_debug("%s: stem comparison didn't pass %s\n",
2084
+ __func__, app->stem);
2085
+ continue;
2086
+ }
2087
+
2088
+ if (of_match_node(matches, app->np)) {
2089
+ pr_debug("%s: Allocated ID %d\n", __func__, app->id);
2090
+
2091
+ if (app->id >= nbits) {
2092
+ pr_warn("%s: ID %d >= than bitmap field %d\n",
2093
+ __func__, app->id, nbits);
2094
+ ret = -EOVERFLOW;
2095
+ } else {
2096
+ set_bit(app->id, bitmap);
2097
+ }
2098
+ }
2099
+ }
2100
+ mutex_unlock(&of_mutex);
2101
+
2102
+ return ret;
2103
+}
2104
+EXPORT_SYMBOL_GPL(of_alias_get_alias_list);
2105
+
2106
+/**
20072107 * of_alias_get_highest_id - Get highest alias id for the given stem
20082108 * @stem: Alias stem to be examined
20092109 *
....@@ -2074,9 +2174,9 @@
20742174 /* OF on pmac has nodes instead of properties named "l2-cache"
20752175 * beneath CPU nodes.
20762176 */
2077
- if (IS_ENABLED(CONFIG_PPC_PMAC) && !strcmp(np->type, "cpu"))
2177
+ if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu"))
20782178 for_each_child_of_node(np, child)
2079
- if (!strcmp(child->type, "cache"))
2179
+ if (of_node_is_type(child, "cache"))
20802180 return child;
20812181
20822182 return NULL;
....@@ -2106,3 +2206,109 @@
21062206
21072207 return cache_level;
21082208 }
2209
+
2210
+/**
2211
+ * of_map_id - Translate an ID through a downstream mapping.
2212
+ * @np: root complex device node.
2213
+ * @id: device ID to map.
2214
+ * @map_name: property name of the map to use.
2215
+ * @map_mask_name: optional property name of the mask to use.
2216
+ * @target: optional pointer to a target device node.
2217
+ * @id_out: optional pointer to receive the translated ID.
2218
+ *
2219
+ * Given a device ID, look up the appropriate implementation-defined
2220
+ * platform ID and/or the target device which receives transactions on that
2221
+ * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
2222
+ * @id_out may be NULL if only the other is required. If @target points to
2223
+ * a non-NULL device node pointer, only entries targeting that node will be
2224
+ * matched; if it points to a NULL value, it will receive the device node of
2225
+ * the first matching target phandle, with a reference held.
2226
+ *
2227
+ * Return: 0 on success or a standard error code on failure.
2228
+ */
2229
+int of_map_id(struct device_node *np, u32 id,
2230
+ const char *map_name, const char *map_mask_name,
2231
+ struct device_node **target, u32 *id_out)
2232
+{
2233
+ u32 map_mask, masked_id;
2234
+ int map_len;
2235
+ const __be32 *map = NULL;
2236
+
2237
+ if (!np || !map_name || (!target && !id_out))
2238
+ return -EINVAL;
2239
+
2240
+ map = of_get_property(np, map_name, &map_len);
2241
+ if (!map) {
2242
+ if (target)
2243
+ return -ENODEV;
2244
+ /* Otherwise, no map implies no translation */
2245
+ *id_out = id;
2246
+ return 0;
2247
+ }
2248
+
2249
+ if (!map_len || map_len % (4 * sizeof(*map))) {
2250
+ pr_err("%pOF: Error: Bad %s length: %d\n", np,
2251
+ map_name, map_len);
2252
+ return -EINVAL;
2253
+ }
2254
+
2255
+ /* The default is to select all bits. */
2256
+ map_mask = 0xffffffff;
2257
+
2258
+ /*
2259
+ * Can be overridden by "{iommu,msi}-map-mask" property.
2260
+ * If of_property_read_u32() fails, the default is used.
2261
+ */
2262
+ if (map_mask_name)
2263
+ of_property_read_u32(np, map_mask_name, &map_mask);
2264
+
2265
+ masked_id = map_mask & id;
2266
+ for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
2267
+ struct device_node *phandle_node;
2268
+ u32 id_base = be32_to_cpup(map + 0);
2269
+ u32 phandle = be32_to_cpup(map + 1);
2270
+ u32 out_base = be32_to_cpup(map + 2);
2271
+ u32 id_len = be32_to_cpup(map + 3);
2272
+
2273
+ if (id_base & ~map_mask) {
2274
+ pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n",
2275
+ np, map_name, map_name,
2276
+ map_mask, id_base);
2277
+ return -EFAULT;
2278
+ }
2279
+
2280
+ if (masked_id < id_base || masked_id >= id_base + id_len)
2281
+ continue;
2282
+
2283
+ phandle_node = of_find_node_by_phandle(phandle);
2284
+ if (!phandle_node)
2285
+ return -ENODEV;
2286
+
2287
+ if (target) {
2288
+ if (*target)
2289
+ of_node_put(phandle_node);
2290
+ else
2291
+ *target = phandle_node;
2292
+
2293
+ if (*target != phandle_node)
2294
+ continue;
2295
+ }
2296
+
2297
+ if (id_out)
2298
+ *id_out = masked_id - id_base + out_base;
2299
+
2300
+ pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n",
2301
+ np, map_name, map_mask, id_base, out_base,
2302
+ id_len, id, masked_id - id_base + out_base);
2303
+ return 0;
2304
+ }
2305
+
2306
+ pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name,
2307
+ id, target && *target ? *target : NULL);
2308
+
2309
+ /* Bypasses translation */
2310
+ if (id_out)
2311
+ *id_out = id;
2312
+ return 0;
2313
+}
2314
+EXPORT_SYMBOL_GPL(of_map_id);