| .. | .. |
|---|
| 16 | 16 | |
|---|
| 17 | 17 | #define pr_fmt(fmt) "OF: " fmt |
|---|
| 18 | 18 | |
|---|
| 19 | +#include <linux/bitmap.h> |
|---|
| 19 | 20 | #include <linux/console.h> |
|---|
| 20 | 21 | #include <linux/ctype.h> |
|---|
| 21 | 22 | #include <linux/cpu.h> |
|---|
| .. | .. |
|---|
| 78 | 79 | } |
|---|
| 79 | 80 | EXPORT_SYMBOL(of_node_name_prefix); |
|---|
| 80 | 81 | |
|---|
| 81 | | -int of_n_addr_cells(struct device_node *np) |
|---|
| 82 | +static bool __of_node_is_type(const struct device_node *np, const char *type) |
|---|
| 83 | +{ |
|---|
| 84 | + const char *match = __of_get_property(np, "device_type", NULL); |
|---|
| 85 | + |
|---|
| 86 | + return np && match && type && !strcmp(match, type); |
|---|
| 87 | +} |
|---|
| 88 | + |
|---|
| 89 | +int of_bus_n_addr_cells(struct device_node *np) |
|---|
| 82 | 90 | { |
|---|
| 83 | 91 | u32 cells; |
|---|
| 84 | 92 | |
|---|
| 85 | | - do { |
|---|
| 86 | | - if (np->parent) |
|---|
| 87 | | - np = np->parent; |
|---|
| 93 | + for (; np; np = np->parent) |
|---|
| 88 | 94 | if (!of_property_read_u32(np, "#address-cells", &cells)) |
|---|
| 89 | 95 | return cells; |
|---|
| 90 | | - } while (np->parent); |
|---|
| 96 | + |
|---|
| 91 | 97 | /* No #address-cells property for the root node */ |
|---|
| 92 | 98 | return OF_ROOT_NODE_ADDR_CELLS_DEFAULT; |
|---|
| 93 | 99 | } |
|---|
| 100 | + |
|---|
| 101 | +int of_n_addr_cells(struct device_node *np) |
|---|
| 102 | +{ |
|---|
| 103 | + if (np->parent) |
|---|
| 104 | + np = np->parent; |
|---|
| 105 | + |
|---|
| 106 | + return of_bus_n_addr_cells(np); |
|---|
| 107 | +} |
|---|
| 94 | 108 | EXPORT_SYMBOL(of_n_addr_cells); |
|---|
| 95 | 109 | |
|---|
| 96 | | -int of_n_size_cells(struct device_node *np) |
|---|
| 110 | +int of_bus_n_size_cells(struct device_node *np) |
|---|
| 97 | 111 | { |
|---|
| 98 | 112 | u32 cells; |
|---|
| 99 | 113 | |
|---|
| 100 | | - do { |
|---|
| 101 | | - if (np->parent) |
|---|
| 102 | | - np = np->parent; |
|---|
| 114 | + for (; np; np = np->parent) |
|---|
| 103 | 115 | if (!of_property_read_u32(np, "#size-cells", &cells)) |
|---|
| 104 | 116 | return cells; |
|---|
| 105 | | - } while (np->parent); |
|---|
| 117 | + |
|---|
| 106 | 118 | /* No #size-cells property for the root node */ |
|---|
| 107 | 119 | return OF_ROOT_NODE_SIZE_CELLS_DEFAULT; |
|---|
| 120 | +} |
|---|
| 121 | + |
|---|
| 122 | +int of_n_size_cells(struct device_node *np) |
|---|
| 123 | +{ |
|---|
| 124 | + if (np->parent) |
|---|
| 125 | + np = np->parent; |
|---|
| 126 | + |
|---|
| 127 | + return of_bus_n_size_cells(np); |
|---|
| 108 | 128 | } |
|---|
| 109 | 129 | EXPORT_SYMBOL(of_n_size_cells); |
|---|
| 110 | 130 | |
|---|
| .. | .. |
|---|
| 115 | 135 | } |
|---|
| 116 | 136 | #endif |
|---|
| 117 | 137 | |
|---|
| 118 | | -/* |
|---|
| 119 | | - * Assumptions behind phandle_cache implementation: |
|---|
| 120 | | - * - phandle property values are in a contiguous range of 1..n |
|---|
| 121 | | - * |
|---|
| 122 | | - * If the assumptions do not hold, then |
|---|
| 123 | | - * - the phandle lookup overhead reduction provided by the cache |
|---|
| 124 | | - * will likely be less |
|---|
| 125 | | - */ |
|---|
| 138 | +#define OF_PHANDLE_CACHE_BITS 7 |
|---|
| 139 | +#define OF_PHANDLE_CACHE_SZ BIT(OF_PHANDLE_CACHE_BITS) |
|---|
| 126 | 140 | |
|---|
| 127 | | -static struct device_node **phandle_cache; |
|---|
| 128 | | -static u32 phandle_cache_mask; |
|---|
| 141 | +static struct device_node *phandle_cache[OF_PHANDLE_CACHE_SZ]; |
|---|
| 142 | + |
|---|
| 143 | +static u32 of_phandle_cache_hash(phandle handle) |
|---|
| 144 | +{ |
|---|
| 145 | + return hash_32(handle, OF_PHANDLE_CACHE_BITS); |
|---|
| 146 | +} |
|---|
| 129 | 147 | |
|---|
| 130 | 148 | /* |
|---|
| 131 | 149 | * Caller must hold devtree_lock. |
|---|
| 132 | 150 | */ |
|---|
| 133 | | -static void __of_free_phandle_cache(void) |
|---|
| 151 | +void __of_phandle_cache_inv_entry(phandle handle) |
|---|
| 134 | 152 | { |
|---|
| 135 | | - u32 cache_entries = phandle_cache_mask + 1; |
|---|
| 136 | | - u32 k; |
|---|
| 137 | | - |
|---|
| 138 | | - if (!phandle_cache) |
|---|
| 139 | | - return; |
|---|
| 140 | | - |
|---|
| 141 | | - for (k = 0; k < cache_entries; k++) |
|---|
| 142 | | - of_node_put(phandle_cache[k]); |
|---|
| 143 | | - |
|---|
| 144 | | - kfree(phandle_cache); |
|---|
| 145 | | - phandle_cache = NULL; |
|---|
| 146 | | -} |
|---|
| 147 | | - |
|---|
| 148 | | -int of_free_phandle_cache(void) |
|---|
| 149 | | -{ |
|---|
| 150 | | - unsigned long flags; |
|---|
| 151 | | - |
|---|
| 152 | | - raw_spin_lock_irqsave(&devtree_lock, flags); |
|---|
| 153 | | - |
|---|
| 154 | | - __of_free_phandle_cache(); |
|---|
| 155 | | - |
|---|
| 156 | | - raw_spin_unlock_irqrestore(&devtree_lock, flags); |
|---|
| 157 | | - |
|---|
| 158 | | - return 0; |
|---|
| 159 | | -} |
|---|
| 160 | | -#if !defined(CONFIG_MODULES) |
|---|
| 161 | | -late_initcall_sync(of_free_phandle_cache); |
|---|
| 162 | | -#endif |
|---|
| 163 | | - |
|---|
| 164 | | -/* |
|---|
| 165 | | - * Caller must hold devtree_lock. |
|---|
| 166 | | - */ |
|---|
| 167 | | -void __of_free_phandle_cache_entry(phandle handle) |
|---|
| 168 | | -{ |
|---|
| 169 | | - phandle masked_handle; |
|---|
| 153 | + u32 handle_hash; |
|---|
| 170 | 154 | struct device_node *np; |
|---|
| 171 | 155 | |
|---|
| 172 | 156 | if (!handle) |
|---|
| 173 | 157 | return; |
|---|
| 174 | 158 | |
|---|
| 175 | | - masked_handle = handle & phandle_cache_mask; |
|---|
| 159 | + handle_hash = of_phandle_cache_hash(handle); |
|---|
| 176 | 160 | |
|---|
| 177 | | - if (phandle_cache) { |
|---|
| 178 | | - np = phandle_cache[masked_handle]; |
|---|
| 179 | | - if (np && handle == np->phandle) { |
|---|
| 180 | | - of_node_put(np); |
|---|
| 181 | | - phandle_cache[masked_handle] = NULL; |
|---|
| 182 | | - } |
|---|
| 183 | | - } |
|---|
| 184 | | -} |
|---|
| 185 | | - |
|---|
| 186 | | -void of_populate_phandle_cache(void) |
|---|
| 187 | | -{ |
|---|
| 188 | | - unsigned long flags; |
|---|
| 189 | | - u32 cache_entries; |
|---|
| 190 | | - struct device_node *np; |
|---|
| 191 | | - u32 phandles = 0; |
|---|
| 192 | | - |
|---|
| 193 | | - raw_spin_lock_irqsave(&devtree_lock, flags); |
|---|
| 194 | | - |
|---|
| 195 | | - __of_free_phandle_cache(); |
|---|
| 196 | | - |
|---|
| 197 | | - for_each_of_allnodes(np) |
|---|
| 198 | | - if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) |
|---|
| 199 | | - phandles++; |
|---|
| 200 | | - |
|---|
| 201 | | - if (!phandles) |
|---|
| 202 | | - goto out; |
|---|
| 203 | | - |
|---|
| 204 | | - cache_entries = roundup_pow_of_two(phandles); |
|---|
| 205 | | - phandle_cache_mask = cache_entries - 1; |
|---|
| 206 | | - |
|---|
| 207 | | - phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache), |
|---|
| 208 | | - GFP_ATOMIC); |
|---|
| 209 | | - if (!phandle_cache) |
|---|
| 210 | | - goto out; |
|---|
| 211 | | - |
|---|
| 212 | | - for_each_of_allnodes(np) |
|---|
| 213 | | - if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL) { |
|---|
| 214 | | - of_node_get(np); |
|---|
| 215 | | - phandle_cache[np->phandle & phandle_cache_mask] = np; |
|---|
| 216 | | - } |
|---|
| 217 | | - |
|---|
| 218 | | -out: |
|---|
| 219 | | - raw_spin_unlock_irqrestore(&devtree_lock, flags); |
|---|
| 161 | + np = phandle_cache[handle_hash]; |
|---|
| 162 | + if (np && handle == np->phandle) |
|---|
| 163 | + phandle_cache[handle_hash] = NULL; |
|---|
| 220 | 164 | } |
|---|
| 221 | 165 | |
|---|
| 222 | 166 | void __init of_core_init(void) |
|---|
| 223 | 167 | { |
|---|
| 224 | 168 | struct device_node *np; |
|---|
| 225 | 169 | |
|---|
| 226 | | - of_populate_phandle_cache(); |
|---|
| 227 | 170 | |
|---|
| 228 | 171 | /* Create the kset, and register existing nodes */ |
|---|
| 229 | 172 | mutex_lock(&of_mutex); |
|---|
| .. | .. |
|---|
| 233 | 176 | pr_err("failed to register existing nodes\n"); |
|---|
| 234 | 177 | return; |
|---|
| 235 | 178 | } |
|---|
| 236 | | - for_each_of_allnodes(np) |
|---|
| 179 | + for_each_of_allnodes(np) { |
|---|
| 237 | 180 | __of_attach_node_sysfs(np); |
|---|
| 181 | + if (np->phandle && !phandle_cache[of_phandle_cache_hash(np->phandle)]) |
|---|
| 182 | + phandle_cache[of_phandle_cache_hash(np->phandle)] = np; |
|---|
| 183 | + } |
|---|
| 238 | 184 | mutex_unlock(&of_mutex); |
|---|
| 239 | 185 | |
|---|
| 240 | 186 | /* Symlink in /proc as required by userspace ABI */ |
|---|
| .. | .. |
|---|
| 373 | 319 | |
|---|
| 374 | 320 | ac = of_n_addr_cells(cpun); |
|---|
| 375 | 321 | cell = of_get_property(cpun, prop_name, &prop_len); |
|---|
| 322 | + if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0)) |
|---|
| 323 | + return true; |
|---|
| 376 | 324 | if (!cell || !ac) |
|---|
| 377 | 325 | return false; |
|---|
| 378 | 326 | prop_len /= sizeof(*cell) * ac; |
|---|
| .. | .. |
|---|
| 433 | 381 | { |
|---|
| 434 | 382 | struct device_node *cpun; |
|---|
| 435 | 383 | |
|---|
| 436 | | - for_each_node_by_type(cpun, "cpu") { |
|---|
| 384 | + for_each_of_cpu_node(cpun) { |
|---|
| 437 | 385 | if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread)) |
|---|
| 438 | 386 | return cpun; |
|---|
| 439 | 387 | } |
|---|
| .. | .. |
|---|
| 466 | 414 | return -ENODEV; |
|---|
| 467 | 415 | } |
|---|
| 468 | 416 | EXPORT_SYMBOL(of_cpu_node_to_id); |
|---|
| 417 | + |
|---|
| 418 | +/** |
|---|
| 419 | + * of_get_cpu_state_node - Get CPU's idle state node at the given index |
|---|
| 420 | + * |
|---|
| 421 | + * @cpu_node: The device node for the CPU |
|---|
| 422 | + * @index: The index in the list of the idle states |
|---|
| 423 | + * |
|---|
| 424 | + * Two generic methods can be used to describe a CPU's idle states, either via |
|---|
| 425 | + * a flattened description through the "cpu-idle-states" binding or via the |
|---|
| 426 | + * hierarchical layout, using the "power-domains" and the "domain-idle-states" |
|---|
| 427 | + * bindings. This function check for both and returns the idle state node for |
|---|
| 428 | + * the requested index. |
|---|
| 429 | + * |
|---|
| 430 | + * In case an idle state node is found at @index, the refcount is incremented |
|---|
| 431 | + * for it, so call of_node_put() on it when done. Returns NULL if not found. |
|---|
| 432 | + */ |
|---|
| 433 | +struct device_node *of_get_cpu_state_node(struct device_node *cpu_node, |
|---|
| 434 | + int index) |
|---|
| 435 | +{ |
|---|
| 436 | + struct of_phandle_args args; |
|---|
| 437 | + int err; |
|---|
| 438 | + |
|---|
| 439 | + err = of_parse_phandle_with_args(cpu_node, "power-domains", |
|---|
| 440 | + "#power-domain-cells", 0, &args); |
|---|
| 441 | + if (!err) { |
|---|
| 442 | + struct device_node *state_node = |
|---|
| 443 | + of_parse_phandle(args.np, "domain-idle-states", index); |
|---|
| 444 | + |
|---|
| 445 | + of_node_put(args.np); |
|---|
| 446 | + if (state_node) |
|---|
| 447 | + return state_node; |
|---|
| 448 | + } |
|---|
| 449 | + |
|---|
| 450 | + return of_parse_phandle(cpu_node, "cpu-idle-states", index); |
|---|
| 451 | +} |
|---|
| 452 | +EXPORT_SYMBOL(of_get_cpu_state_node); |
|---|
| 469 | 453 | |
|---|
| 470 | 454 | /** |
|---|
| 471 | 455 | * __of_device_is_compatible() - Check if the node matches given constraints |
|---|
| .. | .. |
|---|
| 520 | 504 | |
|---|
| 521 | 505 | /* Matching type is better than matching name */ |
|---|
| 522 | 506 | if (type && type[0]) { |
|---|
| 523 | | - if (!device->type || of_node_cmp(type, device->type)) |
|---|
| 507 | + if (!__of_node_is_type(device, type)) |
|---|
| 524 | 508 | return 0; |
|---|
| 525 | 509 | score += 2; |
|---|
| 526 | 510 | } |
|---|
| 527 | 511 | |
|---|
| 528 | 512 | /* Matching name is a bit better than not */ |
|---|
| 529 | 513 | if (name && name[0]) { |
|---|
| 530 | | - if (!device->name || of_node_cmp(name, device->name)) |
|---|
| 514 | + if (!of_node_name_eq(device, name)) |
|---|
| 531 | 515 | return 0; |
|---|
| 532 | 516 | score++; |
|---|
| 533 | 517 | } |
|---|
| .. | .. |
|---|
| 788 | 772 | EXPORT_SYMBOL(of_get_next_available_child); |
|---|
| 789 | 773 | |
|---|
| 790 | 774 | /** |
|---|
| 775 | + * of_get_next_cpu_node - Iterate on cpu nodes |
|---|
| 776 | + * @prev: previous child of the /cpus node, or NULL to get first |
|---|
| 777 | + * |
|---|
| 778 | + * Returns a cpu node pointer with refcount incremented, use of_node_put() |
|---|
| 779 | + * on it when done. Returns NULL when prev is the last child. Decrements |
|---|
| 780 | + * the refcount of prev. |
|---|
| 781 | + */ |
|---|
| 782 | +struct device_node *of_get_next_cpu_node(struct device_node *prev) |
|---|
| 783 | +{ |
|---|
| 784 | + struct device_node *next = NULL; |
|---|
| 785 | + unsigned long flags; |
|---|
| 786 | + struct device_node *node; |
|---|
| 787 | + |
|---|
| 788 | + if (!prev) |
|---|
| 789 | + node = of_find_node_by_path("/cpus"); |
|---|
| 790 | + |
|---|
| 791 | + raw_spin_lock_irqsave(&devtree_lock, flags); |
|---|
| 792 | + if (prev) |
|---|
| 793 | + next = prev->sibling; |
|---|
| 794 | + else if (node) { |
|---|
| 795 | + next = node->child; |
|---|
| 796 | + of_node_put(node); |
|---|
| 797 | + } |
|---|
| 798 | + for (; next; next = next->sibling) { |
|---|
| 799 | + if (!(of_node_name_eq(next, "cpu") || |
|---|
| 800 | + __of_node_is_type(next, "cpu"))) |
|---|
| 801 | + continue; |
|---|
| 802 | + if (of_node_get(next)) |
|---|
| 803 | + break; |
|---|
| 804 | + } |
|---|
| 805 | + of_node_put(prev); |
|---|
| 806 | + raw_spin_unlock_irqrestore(&devtree_lock, flags); |
|---|
| 807 | + return next; |
|---|
| 808 | +} |
|---|
| 809 | +EXPORT_SYMBOL(of_get_next_cpu_node); |
|---|
| 810 | + |
|---|
| 811 | +/** |
|---|
| 791 | 812 | * of_get_compatible_child - Find compatible child node |
|---|
| 792 | 813 | * @parent: parent node |
|---|
| 793 | 814 | * @compatible: compatible string |
|---|
| .. | .. |
|---|
| 829 | 850 | struct device_node *child; |
|---|
| 830 | 851 | |
|---|
| 831 | 852 | for_each_child_of_node(node, child) |
|---|
| 832 | | - if (child->name && (of_node_cmp(child->name, name) == 0)) |
|---|
| 853 | + if (of_node_name_eq(child, name)) |
|---|
| 833 | 854 | break; |
|---|
| 834 | 855 | return child; |
|---|
| 835 | 856 | } |
|---|
| .. | .. |
|---|
| 955 | 976 | |
|---|
| 956 | 977 | raw_spin_lock_irqsave(&devtree_lock, flags); |
|---|
| 957 | 978 | for_each_of_allnodes_from(from, np) |
|---|
| 958 | | - if (np->name && (of_node_cmp(np->name, name) == 0) |
|---|
| 959 | | - && of_node_get(np)) |
|---|
| 979 | + if (of_node_name_eq(np, name) && of_node_get(np)) |
|---|
| 960 | 980 | break; |
|---|
| 961 | 981 | of_node_put(from); |
|---|
| 962 | 982 | raw_spin_unlock_irqrestore(&devtree_lock, flags); |
|---|
| .. | .. |
|---|
| 984 | 1004 | |
|---|
| 985 | 1005 | raw_spin_lock_irqsave(&devtree_lock, flags); |
|---|
| 986 | 1006 | for_each_of_allnodes_from(from, np) |
|---|
| 987 | | - if (np->type && (of_node_cmp(np->type, type) == 0) |
|---|
| 988 | | - && of_node_get(np)) |
|---|
| 1007 | + if (__of_node_is_type(np, type) && of_node_get(np)) |
|---|
| 989 | 1008 | break; |
|---|
| 990 | 1009 | of_node_put(from); |
|---|
| 991 | 1010 | raw_spin_unlock_irqrestore(&devtree_lock, flags); |
|---|
| .. | .. |
|---|
| 1178 | 1197 | { |
|---|
| 1179 | 1198 | struct device_node *np = NULL; |
|---|
| 1180 | 1199 | unsigned long flags; |
|---|
| 1181 | | - phandle masked_handle; |
|---|
| 1200 | + u32 handle_hash; |
|---|
| 1182 | 1201 | |
|---|
| 1183 | 1202 | if (!handle) |
|---|
| 1184 | 1203 | return NULL; |
|---|
| 1185 | 1204 | |
|---|
| 1205 | + handle_hash = of_phandle_cache_hash(handle); |
|---|
| 1206 | + |
|---|
| 1186 | 1207 | raw_spin_lock_irqsave(&devtree_lock, flags); |
|---|
| 1187 | 1208 | |
|---|
| 1188 | | - masked_handle = handle & phandle_cache_mask; |
|---|
| 1189 | | - |
|---|
| 1190 | | - if (phandle_cache) { |
|---|
| 1191 | | - if (phandle_cache[masked_handle] && |
|---|
| 1192 | | - handle == phandle_cache[masked_handle]->phandle) |
|---|
| 1193 | | - np = phandle_cache[masked_handle]; |
|---|
| 1194 | | - if (np && of_node_check_flag(np, OF_DETACHED)) { |
|---|
| 1195 | | - WARN_ON(1); /* did not uncache np on node removal */ |
|---|
| 1196 | | - of_node_put(np); |
|---|
| 1197 | | - phandle_cache[masked_handle] = NULL; |
|---|
| 1198 | | - np = NULL; |
|---|
| 1199 | | - } |
|---|
| 1200 | | - } |
|---|
| 1209 | + if (phandle_cache[handle_hash] && |
|---|
| 1210 | + handle == phandle_cache[handle_hash]->phandle) |
|---|
| 1211 | + np = phandle_cache[handle_hash]; |
|---|
| 1201 | 1212 | |
|---|
| 1202 | 1213 | if (!np) { |
|---|
| 1203 | 1214 | for_each_of_allnodes(np) |
|---|
| 1204 | 1215 | if (np->phandle == handle && |
|---|
| 1205 | 1216 | !of_node_check_flag(np, OF_DETACHED)) { |
|---|
| 1206 | | - if (phandle_cache) { |
|---|
| 1207 | | - /* will put when removed from cache */ |
|---|
| 1208 | | - of_node_get(np); |
|---|
| 1209 | | - phandle_cache[masked_handle] = np; |
|---|
| 1210 | | - } |
|---|
| 1217 | + phandle_cache[handle_hash] = np; |
|---|
| 1211 | 1218 | break; |
|---|
| 1212 | 1219 | } |
|---|
| 1213 | 1220 | } |
|---|
| .. | .. |
|---|
| 1240 | 1247 | int size; |
|---|
| 1241 | 1248 | |
|---|
| 1242 | 1249 | memset(it, 0, sizeof(*it)); |
|---|
| 1250 | + |
|---|
| 1251 | + /* |
|---|
| 1252 | + * one of cell_count or cells_name must be provided to determine the |
|---|
| 1253 | + * argument length. |
|---|
| 1254 | + */ |
|---|
| 1255 | + if (cell_count < 0 && !cells_name) |
|---|
| 1256 | + return -EINVAL; |
|---|
| 1243 | 1257 | |
|---|
| 1244 | 1258 | list = of_get_property(np, list_name, &size); |
|---|
| 1245 | 1259 | if (!list) |
|---|
| .. | .. |
|---|
| 1290 | 1304 | |
|---|
| 1291 | 1305 | if (of_property_read_u32(it->node, it->cells_name, |
|---|
| 1292 | 1306 | &count)) { |
|---|
| 1293 | | - pr_err("%pOF: could not get %s for %pOF\n", |
|---|
| 1294 | | - it->parent, |
|---|
| 1295 | | - it->cells_name, |
|---|
| 1296 | | - it->node); |
|---|
| 1297 | | - goto err; |
|---|
| 1307 | + /* |
|---|
| 1308 | + * If both cell_count and cells_name is given, |
|---|
| 1309 | + * fall back to cell_count in absence |
|---|
| 1310 | + * of the cells_name property |
|---|
| 1311 | + */ |
|---|
| 1312 | + if (it->cell_count >= 0) { |
|---|
| 1313 | + count = it->cell_count; |
|---|
| 1314 | + } else { |
|---|
| 1315 | + pr_err("%pOF: could not get %s for %pOF\n", |
|---|
| 1316 | + it->parent, |
|---|
| 1317 | + it->cells_name, |
|---|
| 1318 | + it->node); |
|---|
| 1319 | + goto err; |
|---|
| 1320 | + } |
|---|
| 1298 | 1321 | } |
|---|
| 1299 | 1322 | } else { |
|---|
| 1300 | 1323 | count = it->cell_count; |
|---|
| .. | .. |
|---|
| 1305 | 1328 | * property data length |
|---|
| 1306 | 1329 | */ |
|---|
| 1307 | 1330 | if (it->cur + count > it->list_end) { |
|---|
| 1308 | | - pr_err("%pOF: arguments longer than property\n", |
|---|
| 1309 | | - it->parent); |
|---|
| 1331 | + if (it->cells_name) |
|---|
| 1332 | + pr_err("%pOF: %s = %d found %td\n", |
|---|
| 1333 | + it->parent, it->cells_name, |
|---|
| 1334 | + count, it->list_end - it->cur); |
|---|
| 1335 | + else |
|---|
| 1336 | + pr_err("%pOF: phandle %s needs %d, found %td\n", |
|---|
| 1337 | + it->parent, of_node_full_name(it->node), |
|---|
| 1338 | + count, it->list_end - it->cur); |
|---|
| 1310 | 1339 | goto err; |
|---|
| 1311 | 1340 | } |
|---|
| 1312 | 1341 | } |
|---|
| .. | .. |
|---|
| 1342 | 1371 | |
|---|
| 1343 | 1372 | return count; |
|---|
| 1344 | 1373 | } |
|---|
| 1345 | | -EXPORT_SYMBOL_GPL(of_phandle_iterator_args); |
|---|
| 1346 | 1374 | |
|---|
| 1347 | 1375 | static int __of_parse_phandle_with_args(const struct device_node *np, |
|---|
| 1348 | 1376 | const char *list_name, |
|---|
| .. | .. |
|---|
| 1458 | 1486 | const char *cells_name, int index, |
|---|
| 1459 | 1487 | struct of_phandle_args *out_args) |
|---|
| 1460 | 1488 | { |
|---|
| 1489 | + int cell_count = -1; |
|---|
| 1490 | + |
|---|
| 1461 | 1491 | if (index < 0) |
|---|
| 1462 | 1492 | return -EINVAL; |
|---|
| 1463 | | - return __of_parse_phandle_with_args(np, list_name, cells_name, 0, |
|---|
| 1464 | | - index, out_args); |
|---|
| 1493 | + |
|---|
| 1494 | + /* If cells_name is NULL we assume a cell count of 0 */ |
|---|
| 1495 | + if (!cells_name) |
|---|
| 1496 | + cell_count = 0; |
|---|
| 1497 | + |
|---|
| 1498 | + return __of_parse_phandle_with_args(np, list_name, cells_name, |
|---|
| 1499 | + cell_count, index, out_args); |
|---|
| 1465 | 1500 | } |
|---|
| 1466 | 1501 | EXPORT_SYMBOL(of_parse_phandle_with_args); |
|---|
| 1467 | 1502 | |
|---|
| .. | .. |
|---|
| 1543 | 1578 | if (!pass_name) |
|---|
| 1544 | 1579 | goto free; |
|---|
| 1545 | 1580 | |
|---|
| 1546 | | - ret = __of_parse_phandle_with_args(np, list_name, cells_name, 0, index, |
|---|
| 1581 | + ret = __of_parse_phandle_with_args(np, list_name, cells_name, -1, index, |
|---|
| 1547 | 1582 | out_args); |
|---|
| 1548 | 1583 | if (ret) |
|---|
| 1549 | 1584 | goto free; |
|---|
| .. | .. |
|---|
| 1711 | 1746 | struct of_phandle_iterator it; |
|---|
| 1712 | 1747 | int rc, cur_index = 0; |
|---|
| 1713 | 1748 | |
|---|
| 1714 | | - rc = of_phandle_iterator_init(&it, np, list_name, cells_name, 0); |
|---|
| 1749 | + /* |
|---|
| 1750 | + * If cells_name is NULL we assume a cell count of 0. This makes |
|---|
| 1751 | + * counting the phandles trivial as each 32bit word in the list is a |
|---|
| 1752 | + * phandle and no arguments are to consider. So we don't iterate through |
|---|
| 1753 | + * the list but just use the length to determine the phandle count. |
|---|
| 1754 | + */ |
|---|
| 1755 | + if (!cells_name) { |
|---|
| 1756 | + const __be32 *list; |
|---|
| 1757 | + int size; |
|---|
| 1758 | + |
|---|
| 1759 | + list = of_get_property(np, list_name, &size); |
|---|
| 1760 | + if (!list) |
|---|
| 1761 | + return -ENOENT; |
|---|
| 1762 | + |
|---|
| 1763 | + return size / sizeof(*list); |
|---|
| 1764 | + } |
|---|
| 1765 | + |
|---|
| 1766 | + rc = of_phandle_iterator_init(&it, np, list_name, cells_name, -1); |
|---|
| 1715 | 1767 | if (rc) |
|---|
| 1716 | 1768 | return rc; |
|---|
| 1717 | 1769 | |
|---|
| .. | .. |
|---|
| 1770 | 1822 | |
|---|
| 1771 | 1823 | return rc; |
|---|
| 1772 | 1824 | } |
|---|
| 1825 | +EXPORT_SYMBOL_GPL(of_add_property); |
|---|
| 1773 | 1826 | |
|---|
| 1774 | 1827 | int __of_remove_property(struct device_node *np, struct property *prop) |
|---|
| 1775 | 1828 | { |
|---|
| .. | .. |
|---|
| 1822 | 1875 | |
|---|
| 1823 | 1876 | return rc; |
|---|
| 1824 | 1877 | } |
|---|
| 1878 | +EXPORT_SYMBOL_GPL(of_remove_property); |
|---|
| 1825 | 1879 | |
|---|
| 1826 | 1880 | int __of_update_property(struct device_node *np, struct property *newprop, |
|---|
| 1827 | 1881 | struct property **oldpropp) |
|---|
| .. | .. |
|---|
| 1997 | 2051 | EXPORT_SYMBOL_GPL(of_alias_get_id); |
|---|
| 1998 | 2052 | |
|---|
| 1999 | 2053 | /** |
|---|
| 2054 | + * of_alias_get_alias_list - Get alias list for the given device driver |
|---|
| 2055 | + * @matches: Array of OF device match structures to search in |
|---|
| 2056 | + * @stem: Alias stem of the given device_node |
|---|
| 2057 | + * @bitmap: Bitmap field pointer |
|---|
| 2058 | + * @nbits: Maximum number of alias IDs which can be recorded in bitmap |
|---|
| 2059 | + * |
|---|
| 2060 | + * The function travels the lookup table to record alias ids for the given |
|---|
| 2061 | + * device match structures and alias stem. |
|---|
| 2062 | + * |
|---|
| 2063 | + * Return: 0 or -ENOSYS when !CONFIG_OF or |
|---|
| 2064 | + * -EOVERFLOW if alias ID is greater then allocated nbits |
|---|
| 2065 | + */ |
|---|
| 2066 | +int of_alias_get_alias_list(const struct of_device_id *matches, |
|---|
| 2067 | + const char *stem, unsigned long *bitmap, |
|---|
| 2068 | + unsigned int nbits) |
|---|
| 2069 | +{ |
|---|
| 2070 | + struct alias_prop *app; |
|---|
| 2071 | + int ret = 0; |
|---|
| 2072 | + |
|---|
| 2073 | + /* Zero bitmap field to make sure that all the time it is clean */ |
|---|
| 2074 | + bitmap_zero(bitmap, nbits); |
|---|
| 2075 | + |
|---|
| 2076 | + mutex_lock(&of_mutex); |
|---|
| 2077 | + pr_debug("%s: Looking for stem: %s\n", __func__, stem); |
|---|
| 2078 | + list_for_each_entry(app, &aliases_lookup, link) { |
|---|
| 2079 | + pr_debug("%s: stem: %s, id: %d\n", |
|---|
| 2080 | + __func__, app->stem, app->id); |
|---|
| 2081 | + |
|---|
| 2082 | + if (strcmp(app->stem, stem) != 0) { |
|---|
| 2083 | + pr_debug("%s: stem comparison didn't pass %s\n", |
|---|
| 2084 | + __func__, app->stem); |
|---|
| 2085 | + continue; |
|---|
| 2086 | + } |
|---|
| 2087 | + |
|---|
| 2088 | + if (of_match_node(matches, app->np)) { |
|---|
| 2089 | + pr_debug("%s: Allocated ID %d\n", __func__, app->id); |
|---|
| 2090 | + |
|---|
| 2091 | + if (app->id >= nbits) { |
|---|
| 2092 | + pr_warn("%s: ID %d >= than bitmap field %d\n", |
|---|
| 2093 | + __func__, app->id, nbits); |
|---|
| 2094 | + ret = -EOVERFLOW; |
|---|
| 2095 | + } else { |
|---|
| 2096 | + set_bit(app->id, bitmap); |
|---|
| 2097 | + } |
|---|
| 2098 | + } |
|---|
| 2099 | + } |
|---|
| 2100 | + mutex_unlock(&of_mutex); |
|---|
| 2101 | + |
|---|
| 2102 | + return ret; |
|---|
| 2103 | +} |
|---|
| 2104 | +EXPORT_SYMBOL_GPL(of_alias_get_alias_list); |
|---|
| 2105 | + |
|---|
| 2106 | +/** |
|---|
| 2000 | 2107 | * of_alias_get_highest_id - Get highest alias id for the given stem |
|---|
| 2001 | 2108 | * @stem: Alias stem to be examined |
|---|
| 2002 | 2109 | * |
|---|
| .. | .. |
|---|
| 2067 | 2174 | /* OF on pmac has nodes instead of properties named "l2-cache" |
|---|
| 2068 | 2175 | * beneath CPU nodes. |
|---|
| 2069 | 2176 | */ |
|---|
| 2070 | | - if (IS_ENABLED(CONFIG_PPC_PMAC) && !strcmp(np->type, "cpu")) |
|---|
| 2177 | + if (IS_ENABLED(CONFIG_PPC_PMAC) && of_node_is_type(np, "cpu")) |
|---|
| 2071 | 2178 | for_each_child_of_node(np, child) |
|---|
| 2072 | | - if (!strcmp(child->type, "cache")) |
|---|
| 2179 | + if (of_node_is_type(child, "cache")) |
|---|
| 2073 | 2180 | return child; |
|---|
| 2074 | 2181 | |
|---|
| 2075 | 2182 | return NULL; |
|---|
| .. | .. |
|---|
| 2099 | 2206 | |
|---|
| 2100 | 2207 | return cache_level; |
|---|
| 2101 | 2208 | } |
|---|
| 2209 | + |
|---|
| 2210 | +/** |
|---|
| 2211 | + * of_map_id - Translate an ID through a downstream mapping. |
|---|
| 2212 | + * @np: root complex device node. |
|---|
| 2213 | + * @id: device ID to map. |
|---|
| 2214 | + * @map_name: property name of the map to use. |
|---|
| 2215 | + * @map_mask_name: optional property name of the mask to use. |
|---|
| 2216 | + * @target: optional pointer to a target device node. |
|---|
| 2217 | + * @id_out: optional pointer to receive the translated ID. |
|---|
| 2218 | + * |
|---|
| 2219 | + * Given a device ID, look up the appropriate implementation-defined |
|---|
| 2220 | + * platform ID and/or the target device which receives transactions on that |
|---|
| 2221 | + * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or |
|---|
| 2222 | + * @id_out may be NULL if only the other is required. If @target points to |
|---|
| 2223 | + * a non-NULL device node pointer, only entries targeting that node will be |
|---|
| 2224 | + * matched; if it points to a NULL value, it will receive the device node of |
|---|
| 2225 | + * the first matching target phandle, with a reference held. |
|---|
| 2226 | + * |
|---|
| 2227 | + * Return: 0 on success or a standard error code on failure. |
|---|
| 2228 | + */ |
|---|
| 2229 | +int of_map_id(struct device_node *np, u32 id, |
|---|
| 2230 | + const char *map_name, const char *map_mask_name, |
|---|
| 2231 | + struct device_node **target, u32 *id_out) |
|---|
| 2232 | +{ |
|---|
| 2233 | + u32 map_mask, masked_id; |
|---|
| 2234 | + int map_len; |
|---|
| 2235 | + const __be32 *map = NULL; |
|---|
| 2236 | + |
|---|
| 2237 | + if (!np || !map_name || (!target && !id_out)) |
|---|
| 2238 | + return -EINVAL; |
|---|
| 2239 | + |
|---|
| 2240 | + map = of_get_property(np, map_name, &map_len); |
|---|
| 2241 | + if (!map) { |
|---|
| 2242 | + if (target) |
|---|
| 2243 | + return -ENODEV; |
|---|
| 2244 | + /* Otherwise, no map implies no translation */ |
|---|
| 2245 | + *id_out = id; |
|---|
| 2246 | + return 0; |
|---|
| 2247 | + } |
|---|
| 2248 | + |
|---|
| 2249 | + if (!map_len || map_len % (4 * sizeof(*map))) { |
|---|
| 2250 | + pr_err("%pOF: Error: Bad %s length: %d\n", np, |
|---|
| 2251 | + map_name, map_len); |
|---|
| 2252 | + return -EINVAL; |
|---|
| 2253 | + } |
|---|
| 2254 | + |
|---|
| 2255 | + /* The default is to select all bits. */ |
|---|
| 2256 | + map_mask = 0xffffffff; |
|---|
| 2257 | + |
|---|
| 2258 | + /* |
|---|
| 2259 | + * Can be overridden by "{iommu,msi}-map-mask" property. |
|---|
| 2260 | + * If of_property_read_u32() fails, the default is used. |
|---|
| 2261 | + */ |
|---|
| 2262 | + if (map_mask_name) |
|---|
| 2263 | + of_property_read_u32(np, map_mask_name, &map_mask); |
|---|
| 2264 | + |
|---|
| 2265 | + masked_id = map_mask & id; |
|---|
| 2266 | + for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) { |
|---|
| 2267 | + struct device_node *phandle_node; |
|---|
| 2268 | + u32 id_base = be32_to_cpup(map + 0); |
|---|
| 2269 | + u32 phandle = be32_to_cpup(map + 1); |
|---|
| 2270 | + u32 out_base = be32_to_cpup(map + 2); |
|---|
| 2271 | + u32 id_len = be32_to_cpup(map + 3); |
|---|
| 2272 | + |
|---|
| 2273 | + if (id_base & ~map_mask) { |
|---|
| 2274 | + pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores id-base (0x%x)\n", |
|---|
| 2275 | + np, map_name, map_name, |
|---|
| 2276 | + map_mask, id_base); |
|---|
| 2277 | + return -EFAULT; |
|---|
| 2278 | + } |
|---|
| 2279 | + |
|---|
| 2280 | + if (masked_id < id_base || masked_id >= id_base + id_len) |
|---|
| 2281 | + continue; |
|---|
| 2282 | + |
|---|
| 2283 | + phandle_node = of_find_node_by_phandle(phandle); |
|---|
| 2284 | + if (!phandle_node) |
|---|
| 2285 | + return -ENODEV; |
|---|
| 2286 | + |
|---|
| 2287 | + if (target) { |
|---|
| 2288 | + if (*target) |
|---|
| 2289 | + of_node_put(phandle_node); |
|---|
| 2290 | + else |
|---|
| 2291 | + *target = phandle_node; |
|---|
| 2292 | + |
|---|
| 2293 | + if (*target != phandle_node) |
|---|
| 2294 | + continue; |
|---|
| 2295 | + } |
|---|
| 2296 | + |
|---|
| 2297 | + if (id_out) |
|---|
| 2298 | + *id_out = masked_id - id_base + out_base; |
|---|
| 2299 | + |
|---|
| 2300 | + pr_debug("%pOF: %s, using mask %08x, id-base: %08x, out-base: %08x, length: %08x, id: %08x -> %08x\n", |
|---|
| 2301 | + np, map_name, map_mask, id_base, out_base, |
|---|
| 2302 | + id_len, id, masked_id - id_base + out_base); |
|---|
| 2303 | + return 0; |
|---|
| 2304 | + } |
|---|
| 2305 | + |
|---|
| 2306 | + pr_info("%pOF: no %s translation for id 0x%x on %pOF\n", np, map_name, |
|---|
| 2307 | + id, target && *target ? *target : NULL); |
|---|
| 2308 | + |
|---|
| 2309 | + /* Bypasses translation */ |
|---|
| 2310 | + if (id_out) |
|---|
| 2311 | + *id_out = id; |
|---|
| 2312 | + return 0; |
|---|
| 2313 | +} |
|---|
| 2314 | +EXPORT_SYMBOL_GPL(of_map_id); |
|---|