hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/of/base.c
....@@ -16,6 +16,7 @@
1616
1717 #define pr_fmt(fmt) "OF: " fmt
1818
19
+#include <linux/bitmap.h>
1920 #include <linux/console.h>
2021 #include <linux/ctype.h>
2122 #include <linux/cpu.h>
....@@ -78,33 +79,52 @@
7879 }
7980 EXPORT_SYMBOL(of_node_name_prefix);
8081
81
-int of_n_addr_cells(struct device_node *np)
82
+static bool __of_node_is_type(const struct device_node *np, const char *type)
83
+{
84
+ const char *match = __of_get_property(np, "device_type", NULL);
85
+
86
+ return np && match && type && !strcmp(match, type);
87
+}
88
+
89
+int of_bus_n_addr_cells(struct device_node *np)
8290 {
8391 u32 cells;
8492
85
- do {
86
- if (np->parent)
87
- np = np->parent;
93
+ for (; np; np = np->parent)
8894 if (!of_property_read_u32(np, "#address-cells", &cells))
8995 return cells;
90
- } while (np->parent);
96
+
9197 /* No #address-cells property for the root node */
9298 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
9399 }
100
+
101
+int of_n_addr_cells(struct device_node *np)
102
+{
103
+ if (np->parent)
104
+ np = np->parent;
105
+
106
+ return of_bus_n_addr_cells(np);
107
+}
94108 EXPORT_SYMBOL(of_n_addr_cells);
95109
96
-int of_n_size_cells(struct device_node *np)
110
+int of_bus_n_size_cells(struct device_node *np)
97111 {
98112 u32 cells;
99113
100
- do {
101
- if (np->parent)
102
- np = np->parent;
114
+ for (; np; np = np->parent)
103115 if (!of_property_read_u32(np, "#size-cells", &cells))
104116 return cells;
105
- } while (np->parent);
117
+
106118 /* No #size-cells property for the root node */
107119 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
120
+}
121
+
122
+int of_n_size_cells(struct device_node *np)
123
+{
124
+ if (np->parent)
125
+ np = np->parent;
126
+
127
+ return of_bus_n_size_cells(np);
108128 }
109129 EXPORT_SYMBOL(of_n_size_cells);
110130
....@@ -115,115 +135,38 @@
115135 }
116136 #endif
117137
118
-/*
119
- * Assumptions behind phandle_cache implementation:
120
- * - phandle property values are in a contiguous range of 1..n
121
- *
122
- * If the assumptions do not hold, then
123
- * - the phandle lookup overhead reduction provided by the cache
124
- * will likely be less
125
- */
138
+#define OF_PHANDLE_CACHE_BITS 7
139
+#define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS)
126140
127
-static struct device_node **phandle_cache;
128
-static u32 phandle_cache_mask;
141
+static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ];
142
+
143
+static u32 of_phandle_cache_hash(phandle handle)
144
+{
145
+ return hash_32(handle, OF_PHANDLE_CACHE_BITS);
146
+}
129147
130148 /*
131149 * Caller must hold devtree_lock.
132150 */
133
-static void __of_free_phandle_cache(void)
151
+void __of_phandle_cache_inv_entry(phandle handle)
134152 {
135
- u32 cache_entries = phandle_cache_mask + 1;
136
- u32 k;
137
-
138
- if (!phandle_cache)
139
- return;
140
-
141
- for (k = 0; k < cache_entries; k++)
142
- of_node_put(phandle_cache[k]);
143
-
144
- kfree(phandle_cache);
145
- phandle_cache = NULL;
146
-}
147
-
148
-int of_free_phandle_cache(void)
149
-{
150
- unsigned long flags;
151
-
152
- raw_spin_lock_irqsave(&devtree_lock, flags);
153
-
154
- __of_free_phandle_cache();
155
-
156
- raw_spin_unlock_irqrestore(&devtree_lock, flags);
157
-
158
- return 0;
159
-}
160
-#if !defined(CONFIG_MODULES)
161
-late_initcall_sync(of_free_phandle_cache);
162
-#endif
163
-
164
-/*
165
- * Caller must hold devtree_lock.
166
- */
167
-void __of_free_phandle_cache_entry(phandle handle)
168
-{
169
- phandle masked_handle;
153
+ u32 handle_hash;
170154 struct device_node *np;
171155
172156 if (!handle)
173157 return;
174158
175
- masked_handle = handle & phandle_cache_mask;
159
+ handle_hash = of_phandle_cache_hash(handle);
176160
177
- if (phandle_cache) {
178
- np = phandle_cache[masked_handle];
179
- if (np && handle == np->phandle) {
180
- of_node_put(np);
181
- phandle_cache[masked_handle] = NULL;
182
- }
183
- }
184
-}
185
-
186
-void of_populate_phandle_cache(void)
187
-{
188
- unsigned long flags;
189
- u32 cache_entries;
190
- struct device_node *np;
191
- u32 phandles = 0;
192
-
193
- raw_spin_lock_irqsave(&devtree_lock, flags);
194
-
195
- __of_free_phandle_cache();
196
-
197
- for_each_of_allnodes(np)
198
- if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
199
- phandles++;
200
-
201
- if (!phandles)
202
- goto out;
203
-
204
- cache_entries = roundup_pow_of_two(phandles);
205
- phandle_cache_mask = cache_entries - 1;
206
-
207
- phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
208
- GFP_ATOMIC);
209
- if (!phandle_cache)
210
- goto out;
211
-
212
- for_each_of_allnodes(np)
213
- if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
214
- of_node_get(np);
215
- phandle_cache[np->phandle & phandle_cache_mask] = np;
216
- }
217
-
218
-out:
219
- raw_spin_unlock_irqrestore(&devtree_lock, flags);
161
+ np = phandle_cache[handle_hash];
162
+ if (np && handle == np->phandle)
163
+ phandle_cache[handle_hash] = NULL;
220164 }
221165
222166 void __init of_core_init(void)
223167 {
224168 struct device_node *np;
225169
226
- of_populate_phandle_cache();
227170
228171 /* Create the kset, and register existing nodes */
229172 mutex_lock(&of_mutex);
....@@ -233,8 +176,11 @@
233176 pr_err("failed to register existing nodes\n");
234177 return;
235178 }
236
- for_each_of_allnodes(np)
179
+ for_each_of_allnodes(np) {
237180 __of_attach_node_sysfs(np);
181
+ if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)])
182
+ phandle_cache[of_phandle_cache_hash(np->phandle)] = np;
183
+ }
238184 mutex_unlock(&of_mutex);
239185
240186 /* Symlink in /proc as required by userspace ABI */
....@@ -373,6 +319,8 @@
373319
374320 ac = of_n_addr_cells(cpun);
375321 cell = of_get_property(cpun, prop_name, &prop_len);
322
+ if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0))
323
+ return true;
376324 if (!cell || !ac)
377325 return false;
378326 prop_len /= sizeof(*cell) * ac;
....@@ -433,7 +381,7 @@
433381 {
434382 struct device_node *cpun;
435383
436
- for_each_node_by_type(cpun, "cpu") {
384
+ for_each_of_cpu_node(cpun) {
437385 if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
438386 return cpun;
439387 }
....@@ -466,6 +414,42 @@
466414 return -ENODEV;
467415 }
468416 EXPORT_SYMBOL(of_cpu_node_to_id);
417
+
418
+/**
419
+ * of_get_cpu_state_node - Get CPU's idle state node at the given index
420
+ *
421
+ * @cpu_node: The device node for the CPU
422
+ * @index: The index in the list of the idle states
423
+ *
424
+ * Two generic methods can be used to describe a CPU's idle states, either via
425
+ * a flattened description through the "cpu-idle-states" binding or via the
426
+ * hierarchical layout, using the "power-domains" and the "domain-idle-states"
427
+ * bindings. This function check for both and returns the idle state node for
428
+ * the requested index.
429
+ *
430
+ * In case an idle state node is found at @index, the refcount is incremented
431
+ * for it, so call of_node_put() on it when done. Returns NULL if not found.
432
+ */
433
+struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
434
+ int index)
435
+{
436
+ struct of_phandle_args args;
437
+ int err;
438
+
439
+ err = of_parse_phandle_with_args(cpu_node, "power-domains",
440
+ "#power-domain-cells", 0, &args);
441
+ if (!err) {
442
+ struct device_node *state_node =
443
+ of_parse_phandle(args.np, "domain-idle-states", index);
444
+
445
+ of_node_put(args.np);
446
+ if (state_node)
447
+ return state_node;
448
+ }
449
+
450
+ return of_parse_phandle(cpu_node, "cpu-idle-states", index);
451
+}
452
+EXPORT_SYMBOL(of_get_cpu_state_node);
469453
470454 /**
471455 * __of_device_is_compatible() - Check if the node matches given constraints
....@@ -520,14 +504,14 @@
520504
521505 /* Matching type is better than matching name */
522506 if (type && type[0]) {
523
- if (!device->type || of_node_cmp(type, device->type))
507
+ if (!__of_node_is_type(device, type))
524508 return 0;
525509 score += 2;
526510 }
527511
528512 /* Matching name is a bit better than not */
529513 if (name && name[0]) {
530
- if (!device->name || of_node_cmp(name, device->name))
514
+ if (!of_node_name_eq(device, name))
531515 return 0;
532516 score++;
533517 }
....@@ -788,6 +772,43 @@
788772 EXPORT_SYMBOL(of_get_next_available_child);
789773
790774 /**
775
+ * of_get_next_cpu_node - Iterate on cpu nodes
776
+ * @prev: previous child of the /cpus node, or NULL to get first
777
+ *
778
+ * Returns a cpu node pointer with refcount incremented, use of_node_put()
779
+ * on it when done. Returns NULL when prev is the last child. Decrements
780
+ * the refcount of prev.
781
+ */
782
+struct device_node *of_get_next_cpu_node(struct device_node *prev)
783
+{
784
+ struct device_node *next = NULL;
785
+ unsigned long flags;
786
+ struct device_node *node;
787
+
788
+ if (!prev)
789
+ node = of_find_node_by_path("/cpus");
790
+
791
+ raw_spin_lock_irqsave(&devtree_lock, flags);
792
+ if (prev)
793
+ next = prev->sibling;
794
+ else if (node) {
795
+ next = node->child;
796
+ of_node_put(node);
797
+ }
798
+ for (; next; next = next->sibling) {
799
+ if (!(of_node_name_eq(next, "cpu") ||
800
+ __of_node_is_type(next, "cpu")))
801
+ continue;
802
+ if (of_node_get(next))
803
+ break;
804
+ }
805
+ of_node_put(prev);
806
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
807
+ return next;
808
+}
809
+EXPORT_SYMBOL(of_get_next_cpu_node);
810
+
811
+/**
791812 * of_get_compatible_child - Find compatible child node
792813 * @parent: parent node
793814 * @compatible: compatible string
....@@ -829,7 +850,7 @@
829850 struct device_node *child;
830851
831852 for_each_child_of_node(node, child)
832
- if (child->name && (of_node_cmp(child->name, name) == 0))
853
+ if (of_node_name_eq(child, name))
833854 break;
834855 return child;
835856 }
....@@ -955,8 +976,7 @@
955976
956977 raw_spin_lock_irqsave(&devtree_lock, flags);
957978 for_each_of_allnodes_from(from, np)
958
- if (np->name && (of_node_cmp(np->name, name) == 0)
959
- && of_node_get(np))
979
+ if (of_node_name_eq(np, name) && of_node_get(np))
960980 break;
961981 of_node_put(from);
962982 raw_spin_unlock_irqrestore(&devtree_lock, flags);
....@@ -984,8 +1004,7 @@
9841004
9851005 raw_spin_lock_irqsave(&devtree_lock, flags);
9861006 for_each_of_allnodes_from(from, np)
987
- if (np->type && (of_node_cmp(np->type, type) == 0)
988
- && of_node_get(np))
1007
+ if (__of_node_is_type(np, type) && of_node_get(np))
9891008 break;
9901009 of_node_put(from);
9911010 raw_spin_unlock_irqrestore(&devtree_lock, flags);
....@@ -1178,36 +1197,24 @@
11781197 {
11791198 struct device_node *np = NULL;
11801199 unsigned long flags;
1181
- phandle masked_handle;
1200
+ u32 handle_hash;
11821201
11831202 if (!handle)
11841203 return NULL;
11851204
1205
+ handle_hash = of_phandle_cache_hash(handle);
1206
+
11861207 raw_spin_lock_irqsave(&devtree_lock, flags);
11871208
1188
- masked_handle = handle & phandle_cache_mask;
1189
-
1190
- if (phandle_cache) {
1191
- if (phandle_cache[masked_handle] &&
1192
- handle == phandle_cache[masked_handle]->phandle)
1193
- np = phandle_cache[masked_handle];
1194
- if (np && of_node_check_flag(np, OF_DETACHED)) {
1195
- WARN_ON(1); /* did not uncache np on node removal */
1196
- of_node_put(np);
1197
- phandle_cache[masked_handle] = NULL;
1198
- np = NULL;
1199
- }
1200
- }
1209
+ if (phandle_cache[handle_hash] &&
1210
+ handle == phandle_cache[handle_hash]->phandle)
1211
+ np = phandle_cache[handle_hash];
12011212
12021213 if (!np) {
12031214 for_each_of_allnodes(np)
12041215 if (np->phandle == handle &&
12051216 !of_node_check_flag(np, OF_DETACHED)) {
1206
- if (phandle_cache) {
1207
- /* will put when removed from cache */
1208
- of_node_get(np);
1209
- phandle_cache[masked_handle] = np;
1210
- }
1217
+ phandle_cache[handle_hash] = np;
12111218 break;
12121219 }
12131220 }
....@@ -1240,6 +1247,13 @@
12401247 int size;
12411248
12421249 memset(it, 0, sizeof(*it));
1250
+
1251
+ /*
1252
+ * one of cell_count or cells_name must be provided to determine the
1253
+ * argument length.
1254
+ */
1255
+ if (cell_count < 0 && !cells_name)
1256
+ return -EINVAL;
12431257
12441258 list = of_get_property(np, list_name, &size);
12451259 if (!list)
....@@ -1290,11 +1304,20 @@
12901304
12911305 if (of_property_read_u32(it->node, it->cells_name,
12921306 &count)) {
1293
- pr_err("%pOF: could not get %s for %pOF\n",
1294
- it->parent,
1295
- it->cells_name,
1296
- it->node);
1297
- goto err;
1307
+ /*
1308
+ * If both cell_count and cells_name is given,
1309
+ * fall back to cell_count in absence
1310
+ * of the cells_name property
1311
+ */
1312
+ if (it->cell_count >= 0) {
1313
+ count = it->cell_count;
1314
+ } else {
1315
+ pr_err("%pOF: could not get %s for %pOF\n",
1316
+ it->parent,
1317
+ it->cells_name,
1318
+ it->node);
1319
+ goto err;
1320
+ }
12981321 }
12991322 } else {
13001323 count = it->cell_count;
....@@ -1305,8 +1328,14 @@
13051328 * property data length
13061329 */
13071330 if (it->cur + count > it->list_end) {
1308
- pr_err("%pOF: arguments longer than property\n",
1309
- it->parent);
1331
+ if (it->cells_name)
1332
+ pr_err("%pOF: %s = %d found %td\n",
1333
+ it->parent, it->cells_name,
1334
+ count, it->list_end - it->cur);
1335
+ else
1336
+ pr_err("%pOF: phandle %s needs %d, found %td\n",
1337
+ it->parent, of_node_full_name(it->node),
1338
+ count, it->list_end - it->cur);
13101339 goto err;
13111340 }
13121341 }
....@@ -1342,7 +1371,6 @@
13421371
13431372 return count;
13441373 }
1345
-EXPORT_SYMBOL_GPL(of_phandle_iterator_args);
13461374
13471375 static int __of_parse_phandle_with_args(const struct device_node *np,
13481376 const char *list_name,
....@@ -1458,10 +1486,17 @@
14581486 const char *cells_name, int index,
14591487 struct of_phandle_args *out_args)
14601488 {
1489
+ int cell_count = -1;
1490
+
14611491 if (index < 0)
14621492 return -EINVAL;
1463
- return __of_parse_phandle_with_args(np, list_name, cells_name, 0,
1464
- index, out_args);
1493
+
1494
+ /* If cells_name is NULL we assume a cell count of 0 */
1495
+ if (!cells_name)
1496
+ cell_count = 0;
1497
+
1498
+ return __of_parse_phandle_with_args(np, list_name, cells_name,
1499
+ cell_count, index, out_args);
14651500 }
14661501 EXPORT_SYMBOL(of_parse_phandle_with_args);
14671502
....@@ -1543,7 +1578,7 @@
15431578 if (!pass_name)
15441579 goto free;
15451580
1546
- ret = __of_parse_phandle_with_args(np, list_name, cells_name, 0, index,
1581
+ ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index,
15471582 out_args);
15481583 if (ret)
15491584 goto free;
....@@ -1711,7 +1746,24 @@
17111746 struct of_phandle_iterator it;
17121747 int rc, cur_index = 0;
17131748
1714
- rc = of_phandle_iterator_init(&it, np, list_name, cells_name, 0);
1749
+ /*
1750
+ * If cells_name is NULL we assume a cell count of 0. This makes
1751
+ * counting the phandles trivial as each 32bit word in the list is a
1752
+ * phandle and no arguments are to consider. So we don't iterate through
1753
+ * the list but just use the length to determine the phandle count.
1754
+ */
1755
+ if (!cells_name) {
1756
+ const __be32 *list;
1757
+ int size;
1758
+
1759
+ list = of_get_property(np, list_name, &size);
1760
+ if (!list)
1761
+ return -ENOENT;
1762
+
1763
+ return size / sizeof(*list);
1764
+ }
1765
+
1766
+ rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1);
17151767 if (rc)
17161768 return rc;
17171769
....@@ -1770,6 +1822,7 @@
17701822
17711823 return rc;
17721824 }
1825
+EXPORT_SYMBOL_GPL(of_add_property);
17731826
17741827 int __of_remove_property(struct device_node *np, struct property *prop)
17751828 {
....@@ -1822,6 +1875,7 @@
18221875
18231876 return rc;
18241877 }
1878
+EXPORT_SYMBOL_GPL(of_remove_property);
18251879
18261880 int __of_update_property(struct device_node *np, struct property *newprop,
18271881 struct property **oldpropp)
....@@ -1997,6 +2051,59 @@
19972051 EXPORT_SYMBOL_GPL(of_alias_get_id);
19982052
19992053 /**
2054
+ * of_alias_get_alias_list - Get alias list for the given device driver
2055
+ * @matches: Array of OF device match structures to search in
2056
+ * @stem: Alias stem of the given device_node
2057
+ * @bitmap: Bitmap field pointer
2058
+ * @nbits: Maximum number of alias IDs which can be recorded in bitmap
2059
+ *
2060
+ * The function travels the lookup table to record alias ids for the given
2061
+ * device match structures and alias stem.
2062
+ *
2063
+ * Return: 0 or -ENOSYS when !CONFIG_OF or
2064
+ * -EOVERFLOW if alias ID is greater then allocated nbits
2065
+ */
2066
+int of_alias_get_alias_list(const struct of_device_id *matches,
2067
+ const char *stem, unsigned long *bitmap,
2068
+ unsigned int nbits)
2069
+{
2070
+ struct alias_prop *app;
2071
+ int ret = 0;
2072
+
2073
+ /* Zero bitmap field to make sure that all the time it is clean */
2074
+ bitmap_zero(bitmap, nbits);
2075
+
2076
+ mutex_lock(&of_mutex);
2077
+ pr_debug("%s: Looking for stem: %s\n", __func__, stem);
2078
+ list_for_each_entry(app, &aliases_lookup, link) {
2079
+ pr_debug("%s: stem: %s, id: %d\n",
2080
+ __func__, app->stem, app->id);
2081
+
2082
+ if (strcmp(app->stem, stem) != 0) {
2083
+ pr_debug("%s: stem comparison didn't pass %s\n",
2084
+ __func__, app->stem);
2085
+ continue;
2086
+ }
2087
+
2088
+ if (of_match_node(matches, app->np)) {
2089
+ pr_debug("%s: Allocated ID %d\n", __func__, app->id);
2090
+
2091
+ if (app->id >= nbits) {
2092
+ pr_warn("%s: ID %d >= than bitmap field %d\n",
2093
+ __func__, app->id, nbits);
2094
+ ret = -EOVERFLOW;
2095
+ } else {
2096
+ set_bit(app->id, bitmap);
2097
+ }
2098
+ }
2099
+ }
2100
+ mutex_unlock(&of_mutex);
2101
+
2102
+ return ret;
2103
+}
2104
+EXPORT_SYMBOL_GPL(of_alias_get_alias_list);
2105
+
2106
+/**
20002107 * of_alias_get_highest_id - Get highest alias id for the given stem
20012108 * @stem: Alias stem to be examined
20022109 *
....@@ -2067,9 +2174,9 @@
20672174 /* OF on pmac has nodes instead of properties named "l2-cache"
20682175 * beneath CPU nodes.
20692176 */
2070
- if (IS_ENABLED(CONFIG_PPC_PMAC) && !strcmp(np->type, "cpu"))
2177
+ if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu"))
20712178 for_each_child_of_node(np, child)
2072
- if (!strcmp(child->type, "cache"))
2179
+ if (of_node_is_type(child, "cache"))
20732180 return child;
20742181
20752182 return NULL;
....@@ -2099,3 +2206,109 @@
20992206
21002207 return cache_level;
21012208 }
2209
+
2210
+/**
2211
+ * of_map_id - Translate an ID through a downstream mapping.
2212
+ * @np: root complex device node.
2213
+ * @id: device ID to map.
2214
+ * @map_name: property name of the map to use.
2215
+ * @map_mask_name: optional property name of the mask to use.
2216
+ * @target: optional pointer to a target device node.
2217
+ * @id_out: optional pointer to receive the translated ID.
2218
+ *
2219
+ * Given a device ID, look up the appropriate implementation-defined
2220
+ * platform ID and/or the target device which receives transactions on that
2221
+ * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
2222
+ * @id_out may be NULL if only the other is required. If @target points to
2223
+ * a non-NULL device node pointer, only entries targeting that node will be
2224
+ * matched; if it points to a NULL value, it will receive the device node of
2225
+ * the first matching target phandle, with a reference held.
2226
+ *
2227
+ * Return: 0 on success or a standard error code on failure.
2228
+ */
2229
+int of_map_id(struct device_node *np, u32 id,
2230
+ const char *map_name, const char *map_mask_name,
2231
+ struct device_node **target, u32 *id_out)
2232
+{
2233
+ u32 map_mask, masked_id;
2234
+ int map_len;
2235
+ const __be32 *map = NULL;
2236
+
2237
+ if (!np || !map_name || (!target && !id_out))
2238
+ return -EINVAL;
2239
+
2240
+ map = of_get_property(np, map_name, &map_len);
2241
+ if (!map) {
2242
+ if (target)
2243
+ return -ENODEV;
2244
+ /* Otherwise, no map implies no translation */
2245
+ *id_out = id;
2246
+ return 0;
2247
+ }
2248
+
2249
+ if (!map_len || map_len % (4 * sizeof(*map))) {
2250
+ pr_err("%pOF: Error: Bad %s length: %d\n", np,
2251
+ map_name, map_len);
2252
+ return -EINVAL;
2253
+ }
2254
+
2255
+ /* The default is to select all bits. */
2256
+ map_mask = 0xffffffff;
2257
+
2258
+ /*
2259
+ * Can be overridden by "{iommu,msi}-map-mask" property.
2260
+ * If of_property_read_u32() fails, the default is used.
2261
+ */
2262
+ if (map_mask_name)
2263
+ of_property_read_u32(np, map_mask_name, &map_mask);
2264
+
2265
+ masked_id = map_mask & id;
2266
+ for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
2267
+ struct device_node *phandle_node;
2268
+ u32 id_base = be32_to_cpup(map + 0);
2269
+ u32 phandle = be32_to_cpup(map + 1);
2270
+ u32 out_base = be32_to_cpup(map + 2);
2271
+ u32 id_len = be32_to_cpup(map + 3);
2272
+
2273
+ if (id_base & ~map_mask) {
2274
+ pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n",
2275
+ np, map_name, map_name,
2276
+ map_mask, id_base);
2277
+ return -EFAULT;
2278
+ }
2279
+
2280
+ if (masked_id < id_base || masked_id >= id_base + id_len)
2281
+ continue;
2282
+
2283
+ phandle_node = of_find_node_by_phandle(phandle);
2284
+ if (!phandle_node)
2285
+ return -ENODEV;
2286
+
2287
+ if (target) {
2288
+ if (*target)
2289
+ of_node_put(phandle_node);
2290
+ else
2291
+ *target = phandle_node;
2292
+
2293
+ if (*target != phandle_node)
2294
+ continue;
2295
+ }
2296
+
2297
+ if (id_out)
2298
+ *id_out = masked_id - id_base + out_base;
2299
+
2300
+ pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n",
2301
+ np, map_name, map_mask, id_base, out_base,
2302
+ id_len, id, masked_id - id_base + out_base);
2303
+ return 0;
2304
+ }
2305
+
2306
+ pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name,
2307
+ id, target && *target ? *target : NULL);
2308
+
2309
+ /* Bypasses translation */
2310
+ if (id_out)
2311
+ *id_out = id;
2312
+ return 0;
2313
+}
2314
+EXPORT_SYMBOL_GPL(of_map_id);