.. | .. |
---|
16 | 16 | |
---|
17 | 17 | #define pr_fmt(fmt) "OF: " fmt |
---|
18 | 18 | |
---|
| 19 | +#include <linux/bitmap.h> |
---|
19 | 20 | #include <linux/console.h> |
---|
20 | 21 | #include <linux/ctype.h> |
---|
21 | 22 | #include <linux/cpu.h> |
---|
.. | .. |
---|
78 | 79 | } |
---|
79 | 80 | EXPORT_SYMBOL(of_node_name_prefix); |
---|
80 | 81 | |
---|
81 | | -int of_n_addr_cells(struct device_node *np) |
---|
| 82 | +static bool __of_node_is_type(const struct device_node *np, const char *type) |
---|
| 83 | +{ |
---|
| 84 | + const char *match = __of_get_property(np, "device_type", NULL); |
---|
| 85 | + |
---|
| 86 | + return np && match && type && !strcmp(match, type); |
---|
| 87 | +} |
---|
| 88 | + |
---|
| 89 | +int of_bus_n_addr_cells(struct device_node *np) |
---|
82 | 90 | { |
---|
83 | 91 | u32 cells; |
---|
84 | 92 | |
---|
85 | | - do { |
---|
86 | | - if (np->parent) |
---|
87 | | - np = np->parent; |
---|
| 93 | + for (; np; np = np->parent) |
---|
88 | 94 | if (!of_property_read_u32(np, "#address-cells", &cells)) |
---|
89 | 95 | return cells; |
---|
90 | | - } while (np->parent); |
---|
| 96 | + |
---|
91 | 97 | /* No #address-cells property for the root node */ |
---|
92 | 98 | return OF_ROOT_NODE_ADDR_CELLS_DEFAULT; |
---|
93 | 99 | } |
---|
| 100 | + |
---|
| 101 | +int of_n_addr_cells(struct device_node *np) |
---|
| 102 | +{ |
---|
| 103 | + if (np->parent) |
---|
| 104 | + np = np->parent; |
---|
| 105 | + |
---|
| 106 | + return of_bus_n_addr_cells(np); |
---|
| 107 | +} |
---|
94 | 108 | EXPORT_SYMBOL(of_n_addr_cells); |
---|
95 | 109 | |
---|
96 | | -int of_n_size_cells(struct device_node *np) |
---|
| 110 | +int of_bus_n_size_cells(struct device_node *np) |
---|
97 | 111 | { |
---|
98 | 112 | u32 cells; |
---|
99 | 113 | |
---|
100 | | - do { |
---|
101 | | - if (np->parent) |
---|
102 | | - np = np->parent; |
---|
| 114 | + for (; np; np = np->parent) |
---|
103 | 115 | if (!of_property_read_u32(np, "#size-cells", &cells)) |
---|
104 | 116 | return cells; |
---|
105 | | - } while (np->parent); |
---|
| 117 | + |
---|
106 | 118 | /* No #size-cells property for the root node */ |
---|
107 | 119 | return OF_ROOT_NODE_SIZE_CELLS_DEFAULT; |
---|
| 120 | +} |
---|
| 121 | + |
---|
| 122 | +int of_n_size_cells(struct device_node *np) |
---|
| 123 | +{ |
---|
| 124 | + if (np->parent) |
---|
| 125 | + np = np->parent; |
---|
| 126 | + |
---|
| 127 | + return of_bus_n_size_cells(np); |
---|
108 | 128 | } |
---|
109 | 129 | EXPORT_SYMBOL(of_n_size_cells); |
---|
110 | 130 | |
---|
.. | .. |
---|
115 | 135 | } |
---|
116 | 136 | #endif |
---|
117 | 137 | |
---|
118 | | -/* |
---|
119 | | - * Assumptions behind phandle_cache implementation: |
---|
120 | | - * - phandle property values are in a contiguous range of 1..n |
---|
121 | | - * |
---|
122 | | - * If the assumptions do not hold, then |
---|
123 | | - * - the phandle lookup overhead reduction provided by the cache |
---|
124 | | - * will likely be less |
---|
125 | | - */ |
---|
| 138 | +#define OF_PHANDLE_CACHE_BITS 7 |
---|
| 139 | +#define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS) |
---|
126 | 140 | |
---|
127 | | -static struct device_node **phandle_cache; |
---|
128 | | -static u32 phandle_cache_mask; |
---|
| 141 | +static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ]; |
---|
| 142 | + |
---|
| 143 | +static u32 of_phandle_cache_hash(phandle handle) |
---|
| 144 | +{ |
---|
| 145 | + return hash_32(handle, OF_PHANDLE_CACHE_BITS); |
---|
| 146 | +} |
---|
129 | 147 | |
---|
130 | 148 | /* |
---|
131 | 149 | * Caller must hold devtree_lock. |
---|
132 | 150 | */ |
---|
133 | | -static struct device_node** __of_free_phandle_cache(void) |
---|
| 151 | +void __of_phandle_cache_inv_entry(phandle handle) |
---|
134 | 152 | { |
---|
135 | | - u32 cache_entries = phandle_cache_mask + 1; |
---|
136 | | - u32 k; |
---|
137 | | - struct device_node **shadow; |
---|
138 | | - |
---|
139 | | - if (!phandle_cache) |
---|
140 | | - return NULL; |
---|
141 | | - |
---|
142 | | - for (k = 0; k < cache_entries; k++) |
---|
143 | | - of_node_put(phandle_cache[k]); |
---|
144 | | - |
---|
145 | | - shadow = phandle_cache; |
---|
146 | | - phandle_cache = NULL; |
---|
147 | | - return shadow; |
---|
148 | | -} |
---|
149 | | - |
---|
150 | | -int of_free_phandle_cache(void) |
---|
151 | | -{ |
---|
152 | | - unsigned long flags; |
---|
153 | | - struct device_node **shadow; |
---|
154 | | - |
---|
155 | | - raw_spin_lock_irqsave(&devtree_lock, flags); |
---|
156 | | - |
---|
157 | | - shadow = __of_free_phandle_cache(); |
---|
158 | | - |
---|
159 | | - raw_spin_unlock_irqrestore(&devtree_lock, flags); |
---|
160 | | - kfree(shadow); |
---|
161 | | - return 0; |
---|
162 | | -} |
---|
163 | | -#if !defined(CONFIG_MODULES) |
---|
164 | | -late_initcall_sync(of_free_phandle_cache); |
---|
165 | | -#endif |
---|
166 | | - |
---|
167 | | -/* |
---|
168 | | - * Caller must hold devtree_lock. |
---|
169 | | - */ |
---|
170 | | -void __of_free_phandle_cache_entry(phandle handle) |
---|
171 | | -{ |
---|
172 | | - phandle masked_handle; |
---|
| 153 | + u32 handle_hash; |
---|
173 | 154 | struct device_node *np; |
---|
174 | 155 | |
---|
175 | 156 | if (!handle) |
---|
176 | 157 | return; |
---|
177 | 158 | |
---|
178 | | - masked_handle = handle & phandle_cache_mask; |
---|
| 159 | + handle_hash = of_phandle_cache_hash(handle); |
---|
179 | 160 | |
---|
180 | | - if (phandle_cache) { |
---|
181 | | - np = phandle_cache[masked_handle]; |
---|
182 | | - if (np && handle == np->phandle) { |
---|
183 | | - of_node_put(np); |
---|
184 | | - phandle_cache[masked_handle] = NULL; |
---|
185 | | - } |
---|
186 | | - } |
---|
187 | | -} |
---|
188 | | - |
---|
189 | | -void of_populate_phandle_cache(void) |
---|
190 | | -{ |
---|
191 | | - unsigned long flags; |
---|
192 | | - u32 cache_entries; |
---|
193 | | - struct device_node *np; |
---|
194 | | - u32 phandles = 0; |
---|
195 | | - struct device_node **shadow; |
---|
196 | | - |
---|
197 | | - raw_spin_lock_irqsave(&devtree_lock, flags); |
---|
198 | | - |
---|
199 | | - shadow = __of_free_phandle_cache(); |
---|
200 | | - |
---|
201 | | - for_each_of_allnodes(np) |
---|
202 | | - if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) |
---|
203 | | - phandles++; |
---|
204 | | - |
---|
205 | | - if (!phandles) |
---|
206 | | - goto out; |
---|
207 | | - raw_spin_unlock_irqrestore(&devtree_lock, flags); |
---|
208 | | - |
---|
209 | | - cache_entries = roundup_pow_of_two(phandles); |
---|
210 | | - phandle_cache_mask = cache_entries - 1; |
---|
211 | | - |
---|
212 | | - phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache), |
---|
213 | | - GFP_ATOMIC); |
---|
214 | | - raw_spin_lock_irqsave(&devtree_lock, flags); |
---|
215 | | - if (!phandle_cache) |
---|
216 | | - goto out; |
---|
217 | | - |
---|
218 | | - for_each_of_allnodes(np) |
---|
219 | | - if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) { |
---|
220 | | - of_node_get(np); |
---|
221 | | - phandle_cache[np->phandle & phandle_cache_mask] = np; |
---|
222 | | - } |
---|
223 | | - |
---|
224 | | -out: |
---|
225 | | - raw_spin_unlock_irqrestore(&devtree_lock, flags); |
---|
226 | | - kfree(shadow); |
---|
| 161 | + np = phandle_cache[handle_hash]; |
---|
| 162 | + if (np && handle == np->phandle) |
---|
| 163 | + phandle_cache[handle_hash] = NULL; |
---|
227 | 164 | } |
---|
228 | 165 | |
---|
229 | 166 | void __init of_core_init(void) |
---|
230 | 167 | { |
---|
231 | 168 | struct device_node *np; |
---|
232 | 169 | |
---|
233 | | - of_populate_phandle_cache(); |
---|
234 | 170 | |
---|
235 | 171 | /* Create the kset, and register existing nodes */ |
---|
236 | 172 | mutex_lock(&of_mutex); |
---|
.. | .. |
---|
240 | 176 | pr_err("failed to register existing nodes\n"); |
---|
241 | 177 | return; |
---|
242 | 178 | } |
---|
243 | | - for_each_of_allnodes(np) |
---|
| 179 | + for_each_of_allnodes(np) { |
---|
244 | 180 | __of_attach_node_sysfs(np); |
---|
| 181 | + if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)]) |
---|
| 182 | + phandle_cache[of_phandle_cache_hash(np->phandle)] = np; |
---|
| 183 | + } |
---|
245 | 184 | mutex_unlock(&of_mutex); |
---|
246 | 185 | |
---|
247 | 186 | /* Symlink in /proc as required by userspace ABI */ |
---|
.. | .. |
---|
380 | 319 | |
---|
381 | 320 | ac = of_n_addr_cells(cpun); |
---|
382 | 321 | cell = of_get_property(cpun, prop_name, &prop_len); |
---|
| 322 | + if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0)) |
---|
| 323 | + return true; |
---|
383 | 324 | if (!cell || !ac) |
---|
384 | 325 | return false; |
---|
385 | 326 | prop_len /= sizeof(*cell) * ac; |
---|
.. | .. |
---|
440 | 381 | { |
---|
441 | 382 | struct device_node *cpun; |
---|
442 | 383 | |
---|
443 | | - for_each_node_by_type(cpun, "cpu") { |
---|
| 384 | + for_each_of_cpu_node(cpun) { |
---|
444 | 385 | if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread)) |
---|
445 | 386 | return cpun; |
---|
446 | 387 | } |
---|
.. | .. |
---|
473 | 414 | return -ENODEV; |
---|
474 | 415 | } |
---|
475 | 416 | EXPORT_SYMBOL(of_cpu_node_to_id); |
---|
| 417 | + |
---|
| 418 | +/** |
---|
| 419 | + * of_get_cpu_state_node - Get CPU's idle state node at the given index |
---|
| 420 | + * |
---|
| 421 | + * @cpu_node: The device node for the CPU |
---|
| 422 | + * @index: The index in the list of the idle states |
---|
| 423 | + * |
---|
| 424 | + * Two generic methods can be used to describe a CPU's idle states, either via |
---|
| 425 | + * a flattened description through the "cpu-idle-states" binding or via the |
---|
| 426 | + * hierarchical layout, using the "power-domains" and the "domain-idle-states" |
---|
| 427 | + * bindings. This function check for both and returns the idle state node for |
---|
| 428 | + * the requested index. |
---|
| 429 | + * |
---|
| 430 | + * In case an idle state node is found at @index, the refcount is incremented |
---|
| 431 | + * for it, so call of_node_put() on it when done. Returns NULL if not found. |
---|
| 432 | + */ |
---|
| 433 | +struct device_node *of_get_cpu_state_node(struct device_node *cpu_node, |
---|
| 434 | + int index) |
---|
| 435 | +{ |
---|
| 436 | + struct of_phandle_args args; |
---|
| 437 | + int err; |
---|
| 438 | + |
---|
| 439 | + err = of_parse_phandle_with_args(cpu_node, "power-domains", |
---|
| 440 | + "#power-domain-cells", 0, &args); |
---|
| 441 | + if (!err) { |
---|
| 442 | + struct device_node *state_node = |
---|
| 443 | + of_parse_phandle(args.np, "domain-idle-states", index); |
---|
| 444 | + |
---|
| 445 | + of_node_put(args.np); |
---|
| 446 | + if (state_node) |
---|
| 447 | + return state_node; |
---|
| 448 | + } |
---|
| 449 | + |
---|
| 450 | + return of_parse_phandle(cpu_node, "cpu-idle-states", index); |
---|
| 451 | +} |
---|
| 452 | +EXPORT_SYMBOL(of_get_cpu_state_node); |
---|
476 | 453 | |
---|
477 | 454 | /** |
---|
478 | 455 | * __of_device_is_compatible() - Check if the node matches given constraints |
---|
.. | .. |
---|
527 | 504 | |
---|
528 | 505 | /* Matching type is better than matching name */ |
---|
529 | 506 | if (type && type[0]) { |
---|
530 | | - if (!device->type || of_node_cmp(type, device->type)) |
---|
| 507 | + if (!__of_node_is_type(device, type)) |
---|
531 | 508 | return 0; |
---|
532 | 509 | score += 2; |
---|
533 | 510 | } |
---|
534 | 511 | |
---|
535 | 512 | /* Matching name is a bit better than not */ |
---|
536 | 513 | if (name && name[0]) { |
---|
537 | | - if (!device->name || of_node_cmp(name, device->name)) |
---|
| 514 | + if (!of_node_name_eq(device, name)) |
---|
538 | 515 | return 0; |
---|
539 | 516 | score++; |
---|
540 | 517 | } |
---|
.. | .. |
---|
649 | 626 | |
---|
650 | 627 | } |
---|
651 | 628 | EXPORT_SYMBOL(of_device_is_available); |
---|
| 629 | + |
---|
| 630 | +/** |
---|
| 631 | + * __of_device_is_fail - check if a device has status "fail" or "fail-..." |
---|
| 632 | + * |
---|
| 633 | + * @device: Node to check status for, with locks already held |
---|
| 634 | + * |
---|
| 635 | + * Return: True if the status property is set to "fail" or "fail-..." (for any |
---|
| 636 | + * error code suffix), false otherwise |
---|
| 637 | + */ |
---|
| 638 | +static bool __of_device_is_fail(const struct device_node *device) |
---|
| 639 | +{ |
---|
| 640 | + const char *status; |
---|
| 641 | + |
---|
| 642 | + if (!device) |
---|
| 643 | + return false; |
---|
| 644 | + |
---|
| 645 | + status = __of_get_property(device, "status", NULL); |
---|
| 646 | + if (status == NULL) |
---|
| 647 | + return false; |
---|
| 648 | + |
---|
| 649 | + return !strcmp(status, "fail") || !strncmp(status, "fail-", 5); |
---|
| 650 | +} |
---|
652 | 651 | |
---|
653 | 652 | /** |
---|
654 | 653 | * of_device_is_big_endian - check if a device has BE registers |
---|
.. | .. |
---|
795 | 794 | EXPORT_SYMBOL(of_get_next_available_child); |
---|
796 | 795 | |
---|
797 | 796 | /** |
---|
| 797 | + * of_get_next_cpu_node - Iterate on cpu nodes |
---|
| 798 | + * @prev: previous child of the /cpus node, or NULL to get first |
---|
| 799 | + * |
---|
| 800 | + * Unusable CPUs (those with the status property set to "fail" or "fail-...") |
---|
| 801 | + * will be skipped. |
---|
| 802 | + * |
---|
| 803 | + * Returns a cpu node pointer with refcount incremented, use of_node_put() |
---|
| 804 | + * on it when done. Returns NULL when prev is the last child. Decrements |
---|
| 805 | + * the refcount of prev. |
---|
| 806 | + */ |
---|
| 807 | +struct device_node *of_get_next_cpu_node(struct device_node *prev) |
---|
| 808 | +{ |
---|
| 809 | + struct device_node *next = NULL; |
---|
| 810 | + unsigned long flags; |
---|
| 811 | + struct device_node *node; |
---|
| 812 | + |
---|
| 813 | + if (!prev) |
---|
| 814 | + node = of_find_node_by_path("/cpus"); |
---|
| 815 | + |
---|
| 816 | + raw_spin_lock_irqsave(&devtree_lock, flags); |
---|
| 817 | + if (prev) |
---|
| 818 | + next = prev->sibling; |
---|
| 819 | + else if (node) { |
---|
| 820 | + next = node->child; |
---|
| 821 | + of_node_put(node); |
---|
| 822 | + } |
---|
| 823 | + for (; next; next = next->sibling) { |
---|
| 824 | + if (__of_device_is_fail(next)) |
---|
| 825 | + continue; |
---|
| 826 | + if (!(of_node_name_eq(next, "cpu") || |
---|
| 827 | + __of_node_is_type(next, "cpu"))) |
---|
| 828 | + continue; |
---|
| 829 | + if (of_node_get(next)) |
---|
| 830 | + break; |
---|
| 831 | + } |
---|
| 832 | + of_node_put(prev); |
---|
| 833 | + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
---|
| 834 | + return next; |
---|
| 835 | +} |
---|
| 836 | +EXPORT_SYMBOL(of_get_next_cpu_node); |
---|
| 837 | + |
---|
| 838 | +/** |
---|
798 | 839 | * of_get_compatible_child - Find compatible child node |
---|
799 | 840 | * @parent: parent node |
---|
800 | 841 | * @compatible: compatible string |
---|
.. | .. |
---|
836 | 877 | struct device_node *child; |
---|
837 | 878 | |
---|
838 | 879 | for_each_child_of_node(node, child) |
---|
839 | | - if (child->name && (of_node_cmp(child->name, name) == 0)) |
---|
| 880 | + if (of_node_name_eq(child, name)) |
---|
840 | 881 | break; |
---|
841 | 882 | return child; |
---|
842 | 883 | } |
---|
.. | .. |
---|
962 | 1003 | |
---|
963 | 1004 | raw_spin_lock_irqsave(&devtree_lock, flags); |
---|
964 | 1005 | for_each_of_allnodes_from(from, np) |
---|
965 | | - if (np->name && (of_node_cmp(np->name, name) == 0) |
---|
966 | | - && of_node_get(np)) |
---|
| 1006 | + if (of_node_name_eq(np, name) && of_node_get(np)) |
---|
967 | 1007 | break; |
---|
968 | 1008 | of_node_put(from); |
---|
969 | 1009 | raw_spin_unlock_irqrestore(&devtree_lock, flags); |
---|
.. | .. |
---|
991 | 1031 | |
---|
992 | 1032 | raw_spin_lock_irqsave(&devtree_lock, flags); |
---|
993 | 1033 | for_each_of_allnodes_from(from, np) |
---|
994 | | - if (np->type && (of_node_cmp(np->type, type) == 0) |
---|
995 | | - && of_node_get(np)) |
---|
| 1034 | + if (__of_node_is_type(np, type) && of_node_get(np)) |
---|
996 | 1035 | break; |
---|
997 | 1036 | of_node_put(from); |
---|
998 | 1037 | raw_spin_unlock_irqrestore(&devtree_lock, flags); |
---|
.. | .. |
---|
1185 | 1224 | { |
---|
1186 | 1225 | struct device_node *np = NULL; |
---|
1187 | 1226 | unsigned long flags; |
---|
1188 | | - phandle masked_handle; |
---|
| 1227 | + u32 handle_hash; |
---|
1189 | 1228 | |
---|
1190 | 1229 | if (!handle) |
---|
1191 | 1230 | return NULL; |
---|
1192 | 1231 | |
---|
| 1232 | + handle_hash = of_phandle_cache_hash(handle); |
---|
| 1233 | + |
---|
1193 | 1234 | raw_spin_lock_irqsave(&devtree_lock, flags); |
---|
1194 | 1235 | |
---|
1195 | | - masked_handle = handle & phandle_cache_mask; |
---|
1196 | | - |
---|
1197 | | - if (phandle_cache) { |
---|
1198 | | - if (phandle_cache[masked_handle] && |
---|
1199 | | - handle == phandle_cache[masked_handle]->phandle) |
---|
1200 | | - np = phandle_cache[masked_handle]; |
---|
1201 | | - if (np && of_node_check_flag(np, OF_DETACHED)) { |
---|
1202 | | - WARN_ON(1); /* did not uncache np on node removal */ |
---|
1203 | | - of_node_put(np); |
---|
1204 | | - phandle_cache[masked_handle] = NULL; |
---|
1205 | | - np = NULL; |
---|
1206 | | - } |
---|
1207 | | - } |
---|
| 1236 | + if (phandle_cache[handle_hash] && |
---|
| 1237 | + handle == phandle_cache[handle_hash]->phandle) |
---|
| 1238 | + np = phandle_cache[handle_hash]; |
---|
1208 | 1239 | |
---|
1209 | 1240 | if (!np) { |
---|
1210 | 1241 | for_each_of_allnodes(np) |
---|
1211 | 1242 | if (np->phandle == handle && |
---|
1212 | 1243 | !of_node_check_flag(np, OF_DETACHED)) { |
---|
1213 | | - if (phandle_cache) { |
---|
1214 | | - /* will put when removed from cache */ |
---|
1215 | | - of_node_get(np); |
---|
1216 | | - phandle_cache[masked_handle] = np; |
---|
1217 | | - } |
---|
| 1244 | + phandle_cache[handle_hash] = np; |
---|
1218 | 1245 | break; |
---|
1219 | 1246 | } |
---|
1220 | 1247 | } |
---|
.. | .. |
---|
1247 | 1274 | int size; |
---|
1248 | 1275 | |
---|
1249 | 1276 | memset(it, 0, sizeof(*it)); |
---|
| 1277 | + |
---|
| 1278 | + /* |
---|
| 1279 | + * one of cell_count or cells_name must be provided to determine the |
---|
| 1280 | + * argument length. |
---|
| 1281 | + */ |
---|
| 1282 | + if (cell_count < 0 && !cells_name) |
---|
| 1283 | + return -EINVAL; |
---|
1250 | 1284 | |
---|
1251 | 1285 | list = of_get_property(np, list_name, &size); |
---|
1252 | 1286 | if (!list) |
---|
.. | .. |
---|
1297 | 1331 | |
---|
1298 | 1332 | if (of_property_read_u32(it->node, it->cells_name, |
---|
1299 | 1333 | &count)) { |
---|
1300 | | - pr_err("%pOF: could not get %s for %pOF\n", |
---|
1301 | | - it->parent, |
---|
1302 | | - it->cells_name, |
---|
1303 | | - it->node); |
---|
1304 | | - goto err; |
---|
| 1334 | + /* |
---|
| 1335 | + * If both cell_count and cells_name is given, |
---|
| 1336 | + * fall back to cell_count in absence |
---|
| 1337 | + * of the cells_name property |
---|
| 1338 | + */ |
---|
| 1339 | + if (it->cell_count >= 0) { |
---|
| 1340 | + count = it->cell_count; |
---|
| 1341 | + } else { |
---|
| 1342 | + pr_err("%pOF: could not get %s for %pOF\n", |
---|
| 1343 | + it->parent, |
---|
| 1344 | + it->cells_name, |
---|
| 1345 | + it->node); |
---|
| 1346 | + goto err; |
---|
| 1347 | + } |
---|
1305 | 1348 | } |
---|
1306 | 1349 | } else { |
---|
1307 | 1350 | count = it->cell_count; |
---|
.. | .. |
---|
1312 | 1355 | * property data length |
---|
1313 | 1356 | */ |
---|
1314 | 1357 | if (it->cur + count > it->list_end) { |
---|
1315 | | - pr_err("%pOF: arguments longer than property\n", |
---|
1316 | | - it->parent); |
---|
| 1358 | + if (it->cells_name) |
---|
| 1359 | + pr_err("%pOF: %s = %d found %td\n", |
---|
| 1360 | + it->parent, it->cells_name, |
---|
| 1361 | + count, it->list_end - it->cur); |
---|
| 1362 | + else |
---|
| 1363 | + pr_err("%pOF: phandle %s needs %d, found %td\n", |
---|
| 1364 | + it->parent, of_node_full_name(it->node), |
---|
| 1365 | + count, it->list_end - it->cur); |
---|
1317 | 1366 | goto err; |
---|
1318 | 1367 | } |
---|
1319 | 1368 | } |
---|
.. | .. |
---|
1349 | 1398 | |
---|
1350 | 1399 | return count; |
---|
1351 | 1400 | } |
---|
1352 | | -EXPORT_SYMBOL_GPL(of_phandle_iterator_args); |
---|
1353 | 1401 | |
---|
1354 | 1402 | static int __of_parse_phandle_with_args(const struct device_node *np, |
---|
1355 | 1403 | const char *list_name, |
---|
.. | .. |
---|
1465 | 1513 | const char *cells_name, int index, |
---|
1466 | 1514 | struct of_phandle_args *out_args) |
---|
1467 | 1515 | { |
---|
| 1516 | + int cell_count = -1; |
---|
| 1517 | + |
---|
1468 | 1518 | if (index < 0) |
---|
1469 | 1519 | return -EINVAL; |
---|
1470 | | - return __of_parse_phandle_with_args(np, list_name, cells_name, 0, |
---|
1471 | | - index, out_args); |
---|
| 1520 | + |
---|
| 1521 | + /* If cells_name is NULL we assume a cell count of 0 */ |
---|
| 1522 | + if (!cells_name) |
---|
| 1523 | + cell_count = 0; |
---|
| 1524 | + |
---|
| 1525 | + return __of_parse_phandle_with_args(np, list_name, cells_name, |
---|
| 1526 | + cell_count, index, out_args); |
---|
1472 | 1527 | } |
---|
1473 | 1528 | EXPORT_SYMBOL(of_parse_phandle_with_args); |
---|
1474 | 1529 | |
---|
.. | .. |
---|
1550 | 1605 | if (!pass_name) |
---|
1551 | 1606 | goto free; |
---|
1552 | 1607 | |
---|
1553 | | - ret = __of_parse_phandle_with_args(np, list_name, cells_name, 0, index, |
---|
| 1608 | + ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index, |
---|
1554 | 1609 | out_args); |
---|
1555 | 1610 | if (ret) |
---|
1556 | 1611 | goto free; |
---|
.. | .. |
---|
1718 | 1773 | struct of_phandle_iterator it; |
---|
1719 | 1774 | int rc, cur_index = 0; |
---|
1720 | 1775 | |
---|
1721 | | - rc = of_phandle_iterator_init(&it, np, list_name, cells_name, 0); |
---|
| 1776 | + /* |
---|
| 1777 | + * If cells_name is NULL we assume a cell count of 0. This makes |
---|
| 1778 | + * counting the phandles trivial as each 32bit word in the list is a |
---|
| 1779 | + * phandle and no arguments are to consider. So we don't iterate through |
---|
| 1780 | + * the list but just use the length to determine the phandle count. |
---|
| 1781 | + */ |
---|
| 1782 | + if (!cells_name) { |
---|
| 1783 | + const __be32 *list; |
---|
| 1784 | + int size; |
---|
| 1785 | + |
---|
| 1786 | + list = of_get_property(np, list_name, &size); |
---|
| 1787 | + if (!list) |
---|
| 1788 | + return -ENOENT; |
---|
| 1789 | + |
---|
| 1790 | + return size / sizeof(*list); |
---|
| 1791 | + } |
---|
| 1792 | + |
---|
| 1793 | + rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1); |
---|
1722 | 1794 | if (rc) |
---|
1723 | 1795 | return rc; |
---|
1724 | 1796 | |
---|
.. | .. |
---|
1777 | 1849 | |
---|
1778 | 1850 | return rc; |
---|
1779 | 1851 | } |
---|
| 1852 | +EXPORT_SYMBOL_GPL(of_add_property); |
---|
1780 | 1853 | |
---|
1781 | 1854 | int __of_remove_property(struct device_node *np, struct property *prop) |
---|
1782 | 1855 | { |
---|
.. | .. |
---|
1829 | 1902 | |
---|
1830 | 1903 | return rc; |
---|
1831 | 1904 | } |
---|
| 1905 | +EXPORT_SYMBOL_GPL(of_remove_property); |
---|
1832 | 1906 | |
---|
1833 | 1907 | int __of_update_property(struct device_node *np, struct property *newprop, |
---|
1834 | 1908 | struct property **oldpropp) |
---|
.. | .. |
---|
2004 | 2078 | EXPORT_SYMBOL_GPL(of_alias_get_id); |
---|
2005 | 2079 | |
---|
2006 | 2080 | /** |
---|
| 2081 | + * of_alias_get_alias_list - Get alias list for the given device driver |
---|
| 2082 | + * @matches: Array of OF device match structures to search in |
---|
| 2083 | + * @stem: Alias stem of the given device_node |
---|
| 2084 | + * @bitmap: Bitmap field pointer |
---|
| 2085 | + * @nbits: Maximum number of alias IDs which can be recorded in bitmap |
---|
| 2086 | + * |
---|
| 2087 | + * The function travels the lookup table to record alias ids for the given |
---|
| 2088 | + * device match structures and alias stem. |
---|
| 2089 | + * |
---|
| 2090 | + * Return: 0 or -ENOSYS when !CONFIG_OF or |
---|
| 2091 | + * -EOVERFLOW if alias ID is greater then allocated nbits |
---|
| 2092 | + */ |
---|
| 2093 | +int of_alias_get_alias_list(const struct of_device_id *matches, |
---|
| 2094 | + const char *stem, unsigned long *bitmap, |
---|
| 2095 | + unsigned int nbits) |
---|
| 2096 | +{ |
---|
| 2097 | + struct alias_prop *app; |
---|
| 2098 | + int ret = 0; |
---|
| 2099 | + |
---|
| 2100 | + /* Zero bitmap field to make sure that all the time it is clean */ |
---|
| 2101 | + bitmap_zero(bitmap, nbits); |
---|
| 2102 | + |
---|
| 2103 | + mutex_lock(&of_mutex); |
---|
| 2104 | + pr_debug("%s: Looking for stem: %s\n", __func__, stem); |
---|
| 2105 | + list_for_each_entry(app, &aliases_lookup, link) { |
---|
| 2106 | + pr_debug("%s: stem: %s, id: %d\n", |
---|
| 2107 | + __func__, app->stem, app->id); |
---|
| 2108 | + |
---|
| 2109 | + if (strcmp(app->stem, stem) != 0) { |
---|
| 2110 | + pr_debug("%s: stem comparison didn't pass %s\n", |
---|
| 2111 | + __func__, app->stem); |
---|
| 2112 | + continue; |
---|
| 2113 | + } |
---|
| 2114 | + |
---|
| 2115 | + if (of_match_node(matches, app->np)) { |
---|
| 2116 | + pr_debug("%s: Allocated ID %d\n", __func__, app->id); |
---|
| 2117 | + |
---|
| 2118 | + if (app->id >= nbits) { |
---|
| 2119 | + pr_warn("%s: ID %d >= than bitmap field %d\n", |
---|
| 2120 | + __func__, app->id, nbits); |
---|
| 2121 | + ret = -EOVERFLOW; |
---|
| 2122 | + } else { |
---|
| 2123 | + set_bit(app->id, bitmap); |
---|
| 2124 | + } |
---|
| 2125 | + } |
---|
| 2126 | + } |
---|
| 2127 | + mutex_unlock(&of_mutex); |
---|
| 2128 | + |
---|
| 2129 | + return ret; |
---|
| 2130 | +} |
---|
| 2131 | +EXPORT_SYMBOL_GPL(of_alias_get_alias_list); |
---|
| 2132 | + |
---|
| 2133 | +/** |
---|
2007 | 2134 | * of_alias_get_highest_id - Get highest alias id for the given stem |
---|
2008 | 2135 | * @stem: Alias stem to be examined |
---|
2009 | 2136 | * |
---|
.. | .. |
---|
2074 | 2201 | /* OF on pmac has nodes instead of properties named "l2-cache" |
---|
2075 | 2202 | * beneath CPU nodes. |
---|
2076 | 2203 | */ |
---|
2077 | | - if (IS_ENABLED(CONFIG_PPC_PMAC) && !strcmp(np->type, "cpu")) |
---|
| 2204 | + if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu")) |
---|
2078 | 2205 | for_each_child_of_node(np, child) |
---|
2079 | | - if (!strcmp(child->type, "cache")) |
---|
| 2206 | + if (of_node_is_type(child, "cache")) |
---|
2080 | 2207 | return child; |
---|
2081 | 2208 | |
---|
2082 | 2209 | return NULL; |
---|
.. | .. |
---|
2106 | 2233 | |
---|
2107 | 2234 | return cache_level; |
---|
2108 | 2235 | } |
---|
| 2236 | + |
---|
| 2237 | +/** |
---|
| 2238 | + * of_map_id - Translate an ID through a downstream mapping. |
---|
| 2239 | + * @np: root complex device node. |
---|
| 2240 | + * @id: device ID to map. |
---|
| 2241 | + * @map_name: property name of the map to use. |
---|
| 2242 | + * @map_mask_name: optional property name of the mask to use. |
---|
| 2243 | + * @target: optional pointer to a target device node. |
---|
| 2244 | + * @id_out: optional pointer to receive the translated ID. |
---|
| 2245 | + * |
---|
| 2246 | + * Given a device ID, look up the appropriate implementation-defined |
---|
| 2247 | + * platform ID and/or the target device which receives transactions on that |
---|
| 2248 | + * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or |
---|
| 2249 | + * @id_out may be NULL if only the other is required. If @target points to |
---|
| 2250 | + * a non-NULL device node pointer, only entries targeting that node will be |
---|
| 2251 | + * matched; if it points to a NULL value, it will receive the device node of |
---|
| 2252 | + * the first matching target phandle, with a reference held. |
---|
| 2253 | + * |
---|
| 2254 | + * Return: 0 on success or a standard error code on failure. |
---|
| 2255 | + */ |
---|
| 2256 | +int of_map_id(struct device_node *np, u32 id, |
---|
| 2257 | + const char *map_name, const char *map_mask_name, |
---|
| 2258 | + struct device_node **target, u32 *id_out) |
---|
| 2259 | +{ |
---|
| 2260 | + u32 map_mask, masked_id; |
---|
| 2261 | + int map_len; |
---|
| 2262 | + const __be32 *map = NULL; |
---|
| 2263 | + |
---|
| 2264 | + if (!np || !map_name || (!target && !id_out)) |
---|
| 2265 | + return -EINVAL; |
---|
| 2266 | + |
---|
| 2267 | + map = of_get_property(np, map_name, &map_len); |
---|
| 2268 | + if (!map) { |
---|
| 2269 | + if (target) |
---|
| 2270 | + return -ENODEV; |
---|
| 2271 | + /* Otherwise, no map implies no translation */ |
---|
| 2272 | + *id_out = id; |
---|
| 2273 | + return 0; |
---|
| 2274 | + } |
---|
| 2275 | + |
---|
| 2276 | + if (!map_len || map_len % (4 * sizeof(*map))) { |
---|
| 2277 | + pr_err("%pOF: Error: Bad %s length: %d\n", np, |
---|
| 2278 | + map_name, map_len); |
---|
| 2279 | + return -EINVAL; |
---|
| 2280 | + } |
---|
| 2281 | + |
---|
| 2282 | + /* The default is to select all bits. */ |
---|
| 2283 | + map_mask = 0xffffffff; |
---|
| 2284 | + |
---|
| 2285 | + /* |
---|
| 2286 | + * Can be overridden by "{iommu,msi}-map-mask" property. |
---|
| 2287 | + * If of_property_read_u32() fails, the default is used. |
---|
| 2288 | + */ |
---|
| 2289 | + if (map_mask_name) |
---|
| 2290 | + of_property_read_u32(np, map_mask_name, &map_mask); |
---|
| 2291 | + |
---|
| 2292 | + masked_id = map_mask & id; |
---|
| 2293 | + for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) { |
---|
| 2294 | + struct device_node *phandle_node; |
---|
| 2295 | + u32 id_base = be32_to_cpup(map + 0); |
---|
| 2296 | + u32 phandle = be32_to_cpup(map + 1); |
---|
| 2297 | + u32 out_base = be32_to_cpup(map + 2); |
---|
| 2298 | + u32 id_len = be32_to_cpup(map + 3); |
---|
| 2299 | + |
---|
| 2300 | + if (id_base & ~map_mask) { |
---|
| 2301 | + pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n", |
---|
| 2302 | + np, map_name, map_name, |
---|
| 2303 | + map_mask, id_base); |
---|
| 2304 | + return -EFAULT; |
---|
| 2305 | + } |
---|
| 2306 | + |
---|
| 2307 | + if (masked_id < id_base || masked_id >= id_base + id_len) |
---|
| 2308 | + continue; |
---|
| 2309 | + |
---|
| 2310 | + phandle_node = of_find_node_by_phandle(phandle); |
---|
| 2311 | + if (!phandle_node) |
---|
| 2312 | + return -ENODEV; |
---|
| 2313 | + |
---|
| 2314 | + if (target) { |
---|
| 2315 | + if (*target) |
---|
| 2316 | + of_node_put(phandle_node); |
---|
| 2317 | + else |
---|
| 2318 | + *target = phandle_node; |
---|
| 2319 | + |
---|
| 2320 | + if (*target != phandle_node) |
---|
| 2321 | + continue; |
---|
| 2322 | + } |
---|
| 2323 | + |
---|
| 2324 | + if (id_out) |
---|
| 2325 | + *id_out = masked_id - id_base + out_base; |
---|
| 2326 | + |
---|
| 2327 | + pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n", |
---|
| 2328 | + np, map_name, map_mask, id_base, out_base, |
---|
| 2329 | + id_len, id, masked_id - id_base + out_base); |
---|
| 2330 | + return 0; |
---|
| 2331 | + } |
---|
| 2332 | + |
---|
| 2333 | + pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name, |
---|
| 2334 | + id, target && *target ? *target : NULL); |
---|
| 2335 | + |
---|
| 2336 | + /* Bypasses translation */ |
---|
| 2337 | + if (id_out) |
---|
| 2338 | + *id_out = id; |
---|
| 2339 | + return 0; |
---|
| 2340 | +} |
---|
| 2341 | +EXPORT_SYMBOL_GPL(of_map_id); |
---|