hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/drivers/of/base.c
....@@ -16,6 +16,7 @@
1616
1717 #define pr_fmt(fmt) "OF: " fmt
1818
19
+#include <linux/bitmap.h>
1920 #include <linux/console.h>
2021 #include <linux/ctype.h>
2122 #include <linux/cpu.h>
....@@ -78,33 +79,52 @@
7879 }
7980 EXPORT_SYMBOL(of_node_name_prefix);
8081
81
-int of_n_addr_cells(struct device_node *np)
82
+static bool __of_node_is_type(const struct device_node *np, const char *type)
83
+{
84
+ const char *match = __of_get_property(np, "device_type", NULL);
85
+
86
+ return np && match && type && !strcmp(match, type);
87
+}
88
+
89
+int of_bus_n_addr_cells(struct device_node *np)
8290 {
8391 u32 cells;
8492
85
- do {
86
- if (np->parent)
87
- np = np->parent;
93
+ for (; np; np = np->parent)
8894 if (!of_property_read_u32(np, "#address-cells", &cells))
8995 return cells;
90
- } while (np->parent);
96
+
9197 /* No #address-cells property for the root node */
9298 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
9399 }
100
+
101
+int of_n_addr_cells(struct device_node *np)
102
+{
103
+ if (np->parent)
104
+ np = np->parent;
105
+
106
+ return of_bus_n_addr_cells(np);
107
+}
94108 EXPORT_SYMBOL(of_n_addr_cells);
95109
96
-int of_n_size_cells(struct device_node *np)
110
+int of_bus_n_size_cells(struct device_node *np)
97111 {
98112 u32 cells;
99113
100
- do {
101
- if (np->parent)
102
- np = np->parent;
114
+ for (; np; np = np->parent)
103115 if (!of_property_read_u32(np, "#size-cells", &cells))
104116 return cells;
105
- } while (np->parent);
117
+
106118 /* No #size-cells property for the root node */
107119 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
120
+}
121
+
122
+int of_n_size_cells(struct device_node *np)
123
+{
124
+ if (np->parent)
125
+ np = np->parent;
126
+
127
+ return of_bus_n_size_cells(np);
108128 }
109129 EXPORT_SYMBOL(of_n_size_cells);
110130
....@@ -115,115 +135,38 @@
115135 }
116136 #endif
117137
118
-/*
119
- * Assumptions behind phandle_cache implementation:
120
- * - phandle property values are in a contiguous range of 1..n
121
- *
122
- * If the assumptions do not hold, then
123
- * - the phandle lookup overhead reduction provided by the cache
124
- * will likely be less
125
- */
138
+#define OF_PHANDLE_CACHE_BITS 7
139
+#define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS)
126140
127
-static struct device_node **phandle_cache;
128
-static u32 phandle_cache_mask;
141
+static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ];
142
+
143
+static u32 of_phandle_cache_hash(phandle handle)
144
+{
145
+ return hash_32(handle, OF_PHANDLE_CACHE_BITS);
146
+}
129147
130148 /*
131149 * Caller must hold devtree_lock.
132150 */
133
-static void __of_free_phandle_cache(void)
151
+void __of_phandle_cache_inv_entry(phandle handle)
134152 {
135
- u32 cache_entries = phandle_cache_mask + 1;
136
- u32 k;
137
-
138
- if (!phandle_cache)
139
- return;
140
-
141
- for (k = 0; k < cache_entries; k++)
142
- of_node_put(phandle_cache[k]);
143
-
144
- kfree(phandle_cache);
145
- phandle_cache = NULL;
146
-}
147
-
148
-int of_free_phandle_cache(void)
149
-{
150
- unsigned long flags;
151
-
152
- raw_spin_lock_irqsave(&devtree_lock, flags);
153
-
154
- __of_free_phandle_cache();
155
-
156
- raw_spin_unlock_irqrestore(&devtree_lock, flags);
157
-
158
- return 0;
159
-}
160
-#if !defined(CONFIG_MODULES)
161
-late_initcall_sync(of_free_phandle_cache);
162
-#endif
163
-
164
-/*
165
- * Caller must hold devtree_lock.
166
- */
167
-void __of_free_phandle_cache_entry(phandle handle)
168
-{
169
- phandle masked_handle;
153
+ u32 handle_hash;
170154 struct device_node *np;
171155
172156 if (!handle)
173157 return;
174158
175
- masked_handle = handle & phandle_cache_mask;
159
+ handle_hash = of_phandle_cache_hash(handle);
176160
177
- if (phandle_cache) {
178
- np = phandle_cache[masked_handle];
179
- if (np && handle == np->phandle) {
180
- of_node_put(np);
181
- phandle_cache[masked_handle] = NULL;
182
- }
183
- }
184
-}
185
-
186
-void of_populate_phandle_cache(void)
187
-{
188
- unsigned long flags;
189
- u32 cache_entries;
190
- struct device_node *np;
191
- u32 phandles = 0;
192
-
193
- raw_spin_lock_irqsave(&devtree_lock, flags);
194
-
195
- __of_free_phandle_cache();
196
-
197
- for_each_of_allnodes(np)
198
- if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
199
- phandles++;
200
-
201
- if (!phandles)
202
- goto out;
203
-
204
- cache_entries = roundup_pow_of_two(phandles);
205
- phandle_cache_mask = cache_entries - 1;
206
-
207
- phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
208
- GFP_ATOMIC);
209
- if (!phandle_cache)
210
- goto out;
211
-
212
- for_each_of_allnodes(np)
213
- if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) {
214
- of_node_get(np);
215
- phandle_cache[np->phandle & phandle_cache_mask] = np;
216
- }
217
-
218
-out:
219
- raw_spin_unlock_irqrestore(&devtree_lock, flags);
161
+ np = phandle_cache[handle_hash];
162
+ if (np && handle == np->phandle)
163
+ phandle_cache[handle_hash] = NULL;
220164 }
221165
222166 void __init of_core_init(void)
223167 {
224168 struct device_node *np;
225169
226
- of_populate_phandle_cache();
227170
228171 /* Create the kset, and register existing nodes */
229172 mutex_lock(&of_mutex);
....@@ -233,8 +176,11 @@
233176 pr_err("failed to register existing nodes\n");
234177 return;
235178 }
236
- for_each_of_allnodes(np)
179
+ for_each_of_allnodes(np) {
237180 __of_attach_node_sysfs(np);
181
+ if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)])
182
+ phandle_cache[of_phandle_cache_hash(np->phandle)] = np;
183
+ }
238184 mutex_unlock(&of_mutex);
239185
240186 /* Symlink in /proc as required by userspace ABI */
....@@ -373,6 +319,8 @@
373319
374320 ac = of_n_addr_cells(cpun);
375321 cell = of_get_property(cpun, prop_name, &prop_len);
322
+ if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0))
323
+ return true;
376324 if (!cell || !ac)
377325 return false;
378326 prop_len /= sizeof(*cell) * ac;
....@@ -433,7 +381,7 @@
433381 {
434382 struct device_node *cpun;
435383
436
- for_each_node_by_type(cpun, "cpu") {
384
+ for_each_of_cpu_node(cpun) {
437385 if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
438386 return cpun;
439387 }
....@@ -466,6 +414,42 @@
466414 return -ENODEV;
467415 }
468416 EXPORT_SYMBOL(of_cpu_node_to_id);
417
+
418
+/**
419
+ * of_get_cpu_state_node - Get CPU's idle state node at the given index
420
+ *
421
+ * @cpu_node: The device node for the CPU
422
+ * @index: The index in the list of the idle states
423
+ *
424
+ * Two generic methods can be used to describe a CPU's idle states, either via
425
+ * a flattened description through the "cpu-idle-states" binding or via the
426
+ * hierarchical layout, using the "power-domains" and the "domain-idle-states"
427
+ * bindings. This function check for both and returns the idle state node for
428
+ * the requested index.
429
+ *
430
+ * In case an idle state node is found at @index, the refcount is incremented
431
+ * for it, so call of_node_put() on it when done. Returns NULL if not found.
432
+ */
433
+struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
434
+ int index)
435
+{
436
+ struct of_phandle_args args;
437
+ int err;
438
+
439
+ err = of_parse_phandle_with_args(cpu_node, "power-domains",
440
+ "#power-domain-cells", 0, &args);
441
+ if (!err) {
442
+ struct device_node *state_node =
443
+ of_parse_phandle(args.np, "domain-idle-states", index);
444
+
445
+ of_node_put(args.np);
446
+ if (state_node)
447
+ return state_node;
448
+ }
449
+
450
+ return of_parse_phandle(cpu_node, "cpu-idle-states", index);
451
+}
452
+EXPORT_SYMBOL(of_get_cpu_state_node);
469453
470454 /**
471455 * __of_device_is_compatible() - Check if the node matches given constraints
....@@ -520,14 +504,14 @@
520504
521505 /* Matching type is better than matching name */
522506 if (type && type[0]) {
523
- if (!device->type || of_node_cmp(type, device->type))
507
+ if (!__of_node_is_type(device, type))
524508 return 0;
525509 score += 2;
526510 }
527511
528512 /* Matching name is a bit better than not */
529513 if (name && name[0]) {
530
- if (!device->name || of_node_cmp(name, device->name))
514
+ if (!of_node_name_eq(device, name))
531515 return 0;
532516 score++;
533517 }
....@@ -642,6 +626,28 @@
642626
643627 }
644628 EXPORT_SYMBOL(of_device_is_available);
629
+
630
+/**
631
+ * __of_device_is_fail - check if a device has status "fail" or "fail-..."
632
+ *
633
+ * @device: Node to check status for, with locks already held
634
+ *
635
+ * Return: True if the status property is set to "fail" or "fail-..." (for any
636
+ * error code suffix), false otherwise
637
+ */
638
+static bool __of_device_is_fail(const struct device_node *device)
639
+{
640
+ const char *status;
641
+
642
+ if (!device)
643
+ return false;
644
+
645
+ status = __of_get_property(device, "status", NULL);
646
+ if (status == NULL)
647
+ return false;
648
+
649
+ return !strcmp(status, "fail") || !strncmp(status, "fail-", 5);
650
+}
645651
646652 /**
647653 * of_device_is_big_endian - check if a device has BE registers
....@@ -788,6 +794,48 @@
788794 EXPORT_SYMBOL(of_get_next_available_child);
789795
790796 /**
797
+ * of_get_next_cpu_node - Iterate on cpu nodes
798
+ * @prev: previous child of the /cpus node, or NULL to get first
799
+ *
800
+ * Unusable CPUs (those with the status property set to "fail" or "fail-...")
801
+ * will be skipped.
802
+ *
803
+ * Returns a cpu node pointer with refcount incremented, use of_node_put()
804
+ * on it when done. Returns NULL when prev is the last child. Decrements
805
+ * the refcount of prev.
806
+ */
807
+struct device_node *of_get_next_cpu_node(struct device_node *prev)
808
+{
809
+ struct device_node *next = NULL;
810
+ unsigned long flags;
811
+ struct device_node *node;
812
+
813
+ if (!prev)
814
+ node = of_find_node_by_path("/cpus");
815
+
816
+ raw_spin_lock_irqsave(&devtree_lock, flags);
817
+ if (prev)
818
+ next = prev->sibling;
819
+ else if (node) {
820
+ next = node->child;
821
+ of_node_put(node);
822
+ }
823
+ for (; next; next = next->sibling) {
824
+ if (__of_device_is_fail(next))
825
+ continue;
826
+ if (!(of_node_name_eq(next, "cpu") ||
827
+ __of_node_is_type(next, "cpu")))
828
+ continue;
829
+ if (of_node_get(next))
830
+ break;
831
+ }
832
+ of_node_put(prev);
833
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
834
+ return next;
835
+}
836
+EXPORT_SYMBOL(of_get_next_cpu_node);
837
+
838
+/**
791839 * of_get_compatible_child - Find compatible child node
792840 * @parent: parent node
793841 * @compatible: compatible string
....@@ -829,7 +877,7 @@
829877 struct device_node *child;
830878
831879 for_each_child_of_node(node, child)
832
- if (child->name && (of_node_cmp(child->name, name) == 0))
880
+ if (of_node_name_eq(child, name))
833881 break;
834882 return child;
835883 }
....@@ -955,8 +1003,7 @@
9551003
9561004 raw_spin_lock_irqsave(&devtree_lock, flags);
9571005 for_each_of_allnodes_from(from, np)
958
- if (np->name && (of_node_cmp(np->name, name) == 0)
959
- && of_node_get(np))
1006
+ if (of_node_name_eq(np, name) && of_node_get(np))
9601007 break;
9611008 of_node_put(from);
9621009 raw_spin_unlock_irqrestore(&devtree_lock, flags);
....@@ -984,8 +1031,7 @@
9841031
9851032 raw_spin_lock_irqsave(&devtree_lock, flags);
9861033 for_each_of_allnodes_from(from, np)
987
- if (np->type && (of_node_cmp(np->type, type) == 0)
988
- && of_node_get(np))
1034
+ if (__of_node_is_type(np, type) && of_node_get(np))
9891035 break;
9901036 of_node_put(from);
9911037 raw_spin_unlock_irqrestore(&devtree_lock, flags);
....@@ -1178,36 +1224,24 @@
11781224 {
11791225 struct device_node *np = NULL;
11801226 unsigned long flags;
1181
- phandle masked_handle;
1227
+ u32 handle_hash;
11821228
11831229 if (!handle)
11841230 return NULL;
11851231
1232
+ handle_hash = of_phandle_cache_hash(handle);
1233
+
11861234 raw_spin_lock_irqsave(&devtree_lock, flags);
11871235
1188
- masked_handle = handle & phandle_cache_mask;
1189
-
1190
- if (phandle_cache) {
1191
- if (phandle_cache[masked_handle] &&
1192
- handle == phandle_cache[masked_handle]->phandle)
1193
- np = phandle_cache[masked_handle];
1194
- if (np && of_node_check_flag(np, OF_DETACHED)) {
1195
- WARN_ON(1); /* did not uncache np on node removal */
1196
- of_node_put(np);
1197
- phandle_cache[masked_handle] = NULL;
1198
- np = NULL;
1199
- }
1200
- }
1236
+ if (phandle_cache[handle_hash] &&
1237
+ handle == phandle_cache[handle_hash]->phandle)
1238
+ np = phandle_cache[handle_hash];
12011239
12021240 if (!np) {
12031241 for_each_of_allnodes(np)
12041242 if (np->phandle == handle &&
12051243 !of_node_check_flag(np, OF_DETACHED)) {
1206
- if (phandle_cache) {
1207
- /* will put when removed from cache */
1208
- of_node_get(np);
1209
- phandle_cache[masked_handle] = np;
1210
- }
1244
+ phandle_cache[handle_hash] = np;
12111245 break;
12121246 }
12131247 }
....@@ -1240,6 +1274,13 @@
12401274 int size;
12411275
12421276 memset(it, 0, sizeof(*it));
1277
+
1278
+ /*
1279
+ * one of cell_count or cells_name must be provided to determine the
1280
+ * argument length.
1281
+ */
1282
+ if (cell_count < 0 && !cells_name)
1283
+ return -EINVAL;
12431284
12441285 list = of_get_property(np, list_name, &size);
12451286 if (!list)
....@@ -1290,11 +1331,20 @@
12901331
12911332 if (of_property_read_u32(it->node, it->cells_name,
12921333 &count)) {
1293
- pr_err("%pOF: could not get %s for %pOF\n",
1294
- it->parent,
1295
- it->cells_name,
1296
- it->node);
1297
- goto err;
1334
+ /*
1335
+ * If both cell_count and cells_name is given,
1336
+ * fall back to cell_count in absence
1337
+ * of the cells_name property
1338
+ */
1339
+ if (it->cell_count >= 0) {
1340
+ count = it->cell_count;
1341
+ } else {
1342
+ pr_err("%pOF: could not get %s for %pOF\n",
1343
+ it->parent,
1344
+ it->cells_name,
1345
+ it->node);
1346
+ goto err;
1347
+ }
12981348 }
12991349 } else {
13001350 count = it->cell_count;
....@@ -1305,8 +1355,14 @@
13051355 * property data length
13061356 */
13071357 if (it->cur + count > it->list_end) {
1308
- pr_err("%pOF: arguments longer than property\n",
1309
- it->parent);
1358
+ if (it->cells_name)
1359
+ pr_err("%pOF: %s = %d found %td\n",
1360
+ it->parent, it->cells_name,
1361
+ count, it->list_end - it->cur);
1362
+ else
1363
+ pr_err("%pOF: phandle %s needs %d, found %td\n",
1364
+ it->parent, of_node_full_name(it->node),
1365
+ count, it->list_end - it->cur);
13101366 goto err;
13111367 }
13121368 }
....@@ -1342,7 +1398,6 @@
13421398
13431399 return count;
13441400 }
1345
-EXPORT_SYMBOL_GPL(of_phandle_iterator_args);
13461401
13471402 static int __of_parse_phandle_with_args(const struct device_node *np,
13481403 const char *list_name,
....@@ -1458,10 +1513,17 @@
14581513 const char *cells_name, int index,
14591514 struct of_phandle_args *out_args)
14601515 {
1516
+ int cell_count = -1;
1517
+
14611518 if (index < 0)
14621519 return -EINVAL;
1463
- return __of_parse_phandle_with_args(np, list_name, cells_name, 0,
1464
- index, out_args);
1520
+
1521
+ /* If cells_name is NULL we assume a cell count of 0 */
1522
+ if (!cells_name)
1523
+ cell_count = 0;
1524
+
1525
+ return __of_parse_phandle_with_args(np, list_name, cells_name,
1526
+ cell_count, index, out_args);
14651527 }
14661528 EXPORT_SYMBOL(of_parse_phandle_with_args);
14671529
....@@ -1543,7 +1605,7 @@
15431605 if (!pass_name)
15441606 goto free;
15451607
1546
- ret = __of_parse_phandle_with_args(np, list_name, cells_name, 0, index,
1608
+ ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index,
15471609 out_args);
15481610 if (ret)
15491611 goto free;
....@@ -1711,7 +1773,24 @@
17111773 struct of_phandle_iterator it;
17121774 int rc, cur_index = 0;
17131775
1714
- rc = of_phandle_iterator_init(&it, np, list_name, cells_name, 0);
1776
+ /*
1777
+ * If cells_name is NULL we assume a cell count of 0. This makes
1778
+ * counting the phandles trivial as each 32bit word in the list is a
1779
+ * phandle and no arguments are to consider. So we don't iterate through
1780
+ * the list but just use the length to determine the phandle count.
1781
+ */
1782
+ if (!cells_name) {
1783
+ const __be32 *list;
1784
+ int size;
1785
+
1786
+ list = of_get_property(np, list_name, &size);
1787
+ if (!list)
1788
+ return -ENOENT;
1789
+
1790
+ return size / sizeof(*list);
1791
+ }
1792
+
1793
+ rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1);
17151794 if (rc)
17161795 return rc;
17171796
....@@ -1770,6 +1849,7 @@
17701849
17711850 return rc;
17721851 }
1852
+EXPORT_SYMBOL_GPL(of_add_property);
17731853
17741854 int __of_remove_property(struct device_node *np, struct property *prop)
17751855 {
....@@ -1822,6 +1902,7 @@
18221902
18231903 return rc;
18241904 }
1905
+EXPORT_SYMBOL_GPL(of_remove_property);
18251906
18261907 int __of_update_property(struct device_node *np, struct property *newprop,
18271908 struct property **oldpropp)
....@@ -1997,6 +2078,59 @@
19972078 EXPORT_SYMBOL_GPL(of_alias_get_id);
19982079
19992080 /**
2081
+ * of_alias_get_alias_list - Get alias list for the given device driver
2082
+ * @matches: Array of OF device match structures to search in
2083
+ * @stem: Alias stem of the given device_node
2084
+ * @bitmap: Bitmap field pointer
2085
+ * @nbits: Maximum number of alias IDs which can be recorded in bitmap
2086
+ *
2087
+ * The function travels the lookup table to record alias ids for the given
2088
+ * device match structures and alias stem.
2089
+ *
2090
+ * Return: 0 or -ENOSYS when !CONFIG_OF or
2091
+ * -EOVERFLOW if alias ID is greater then allocated nbits
2092
+ */
2093
+int of_alias_get_alias_list(const struct of_device_id *matches,
2094
+ const char *stem, unsigned long *bitmap,
2095
+ unsigned int nbits)
2096
+{
2097
+ struct alias_prop *app;
2098
+ int ret = 0;
2099
+
2100
+ /* Zero bitmap field to make sure that all the time it is clean */
2101
+ bitmap_zero(bitmap, nbits);
2102
+
2103
+ mutex_lock(&of_mutex);
2104
+ pr_debug("%s: Looking for stem: %s\n", __func__, stem);
2105
+ list_for_each_entry(app, &aliases_lookup, link) {
2106
+ pr_debug("%s: stem: %s, id: %d\n",
2107
+ __func__, app->stem, app->id);
2108
+
2109
+ if (strcmp(app->stem, stem) != 0) {
2110
+ pr_debug("%s: stem comparison didn't pass %s\n",
2111
+ __func__, app->stem);
2112
+ continue;
2113
+ }
2114
+
2115
+ if (of_match_node(matches, app->np)) {
2116
+ pr_debug("%s: Allocated ID %d\n", __func__, app->id);
2117
+
2118
+ if (app->id >= nbits) {
2119
+ pr_warn("%s: ID %d >= than bitmap field %d\n",
2120
+ __func__, app->id, nbits);
2121
+ ret = -EOVERFLOW;
2122
+ } else {
2123
+ set_bit(app->id, bitmap);
2124
+ }
2125
+ }
2126
+ }
2127
+ mutex_unlock(&of_mutex);
2128
+
2129
+ return ret;
2130
+}
2131
+EXPORT_SYMBOL_GPL(of_alias_get_alias_list);
2132
+
2133
+/**
20002134 * of_alias_get_highest_id - Get highest alias id for the given stem
20012135 * @stem: Alias stem to be examined
20022136 *
....@@ -2067,9 +2201,9 @@
20672201 /* OF on pmac has nodes instead of properties named "l2-cache"
20682202 * beneath CPU nodes.
20692203 */
2070
- if (IS_ENABLED(CONFIG_PPC_PMAC) && !strcmp(np->type, "cpu"))
2204
+ if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu"))
20712205 for_each_child_of_node(np, child)
2072
- if (!strcmp(child->type, "cache"))
2206
+ if (of_node_is_type(child, "cache"))
20732207 return child;
20742208
20752209 return NULL;
....@@ -2099,3 +2233,109 @@
20992233
21002234 return cache_level;
21012235 }
2236
+
2237
+/**
2238
+ * of_map_id - Translate an ID through a downstream mapping.
2239
+ * @np: root complex device node.
2240
+ * @id: device ID to map.
2241
+ * @map_name: property name of the map to use.
2242
+ * @map_mask_name: optional property name of the mask to use.
2243
+ * @target: optional pointer to a target device node.
2244
+ * @id_out: optional pointer to receive the translated ID.
2245
+ *
2246
+ * Given a device ID, look up the appropriate implementation-defined
2247
+ * platform ID and/or the target device which receives transactions on that
2248
+ * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
2249
+ * @id_out may be NULL if only the other is required. If @target points to
2250
+ * a non-NULL device node pointer, only entries targeting that node will be
2251
+ * matched; if it points to a NULL value, it will receive the device node of
2252
+ * the first matching target phandle, with a reference held.
2253
+ *
2254
+ * Return: 0 on success or a standard error code on failure.
2255
+ */
2256
+int of_map_id(struct device_node *np, u32 id,
2257
+ const char *map_name, const char *map_mask_name,
2258
+ struct device_node **target, u32 *id_out)
2259
+{
2260
+ u32 map_mask, masked_id;
2261
+ int map_len;
2262
+ const __be32 *map = NULL;
2263
+
2264
+ if (!np || !map_name || (!target && !id_out))
2265
+ return -EINVAL;
2266
+
2267
+ map = of_get_property(np, map_name, &map_len);
2268
+ if (!map) {
2269
+ if (target)
2270
+ return -ENODEV;
2271
+ /* Otherwise, no map implies no translation */
2272
+ *id_out = id;
2273
+ return 0;
2274
+ }
2275
+
2276
+ if (!map_len || map_len % (4 * sizeof(*map))) {
2277
+ pr_err("%pOF: Error: Bad %s length: %d\n", np,
2278
+ map_name, map_len);
2279
+ return -EINVAL;
2280
+ }
2281
+
2282
+ /* The default is to select all bits. */
2283
+ map_mask = 0xffffffff;
2284
+
2285
+ /*
2286
+ * Can be overridden by "{iommu,msi}-map-mask" property.
2287
+ * If of_property_read_u32() fails, the default is used.
2288
+ */
2289
+ if (map_mask_name)
2290
+ of_property_read_u32(np, map_mask_name, &map_mask);
2291
+
2292
+ masked_id = map_mask & id;
2293
+ for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
2294
+ struct device_node *phandle_node;
2295
+ u32 id_base = be32_to_cpup(map + 0);
2296
+ u32 phandle = be32_to_cpup(map + 1);
2297
+ u32 out_base = be32_to_cpup(map + 2);
2298
+ u32 id_len = be32_to_cpup(map + 3);
2299
+
2300
+ if (id_base & ~map_mask) {
2301
+ pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n",
2302
+ np, map_name, map_name,
2303
+ map_mask, id_base);
2304
+ return -EFAULT;
2305
+ }
2306
+
2307
+ if (masked_id < id_base || masked_id >= id_base + id_len)
2308
+ continue;
2309
+
2310
+ phandle_node = of_find_node_by_phandle(phandle);
2311
+ if (!phandle_node)
2312
+ return -ENODEV;
2313
+
2314
+ if (target) {
2315
+ if (*target)
2316
+ of_node_put(phandle_node);
2317
+ else
2318
+ *target = phandle_node;
2319
+
2320
+ if (*target != phandle_node)
2321
+ continue;
2322
+ }
2323
+
2324
+ if (id_out)
2325
+ *id_out = masked_id - id_base + out_base;
2326
+
2327
+ pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n",
2328
+ np, map_name, map_mask, id_base, out_base,
2329
+ id_len, id, masked_id - id_base + out_base);
2330
+ return 0;
2331
+ }
2332
+
2333
+ pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name,
2334
+ id, target && *target ? *target : NULL);
2335
+
2336
+ /* Bypasses translation */
2337
+ if (id_out)
2338
+ *id_out = id;
2339
+ return 0;
2340
+}
2341
+EXPORT_SYMBOL_GPL(of_map_id);