.. | .. |
---|
20 | 20 | #include <linux/of_reserved_mem.h> |
---|
21 | 21 | #include <linux/sort.h> |
---|
22 | 22 | #include <linux/slab.h> |
---|
23 | | -#include <linux/kmemleak.h> |
---|
| 23 | +#include <linux/memblock.h> |
---|
24 | 24 | |
---|
25 | | -#define MAX_RESERVED_REGIONS 32 |
---|
| 25 | +#define MAX_RESERVED_REGIONS 64 |
---|
26 | 26 | static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; |
---|
27 | 27 | static int reserved_mem_count; |
---|
28 | 28 | |
---|
29 | | -#if defined(CONFIG_HAVE_MEMBLOCK) |
---|
30 | | -#include <linux/memblock.h> |
---|
31 | | -int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, |
---|
| 29 | +static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, |
---|
32 | 30 | phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, |
---|
33 | 31 | phys_addr_t *res_base) |
---|
34 | 32 | { |
---|
35 | 33 | phys_addr_t base; |
---|
36 | | - /* |
---|
37 | | - * We use __memblock_alloc_base() because memblock_alloc_base() |
---|
38 | | - * panic()s on allocation failure. |
---|
39 | | - */ |
---|
| 34 | + |
---|
40 | 35 | end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end; |
---|
41 | | - base = __memblock_alloc_base(size, align, end); |
---|
| 36 | + align = !align ? SMP_CACHE_BYTES : align; |
---|
| 37 | + base = memblock_find_in_range(start, end, size, align); |
---|
42 | 38 | if (!base) |
---|
43 | 39 | return -ENOMEM; |
---|
44 | 40 | |
---|
45 | | - /* |
---|
46 | | - * Check if the allocated region fits in to start..end window |
---|
47 | | - */ |
---|
48 | | - if (base < start) { |
---|
49 | | - memblock_free(base, size); |
---|
50 | | - return -ENOMEM; |
---|
51 | | - } |
---|
52 | | - |
---|
53 | 41 | *res_base = base; |
---|
54 | | - if (nomap) { |
---|
55 | | - kmemleak_ignore_phys(base); |
---|
| 42 | + if (nomap) |
---|
56 | 43 | return memblock_remove(base, size); |
---|
57 | | - } |
---|
58 | | - return 0; |
---|
| 44 | + |
---|
| 45 | + return memblock_reserve(base, size); |
---|
59 | 46 | } |
---|
60 | | -#else |
---|
61 | | -int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, |
---|
62 | | - phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap, |
---|
63 | | - phys_addr_t *res_base) |
---|
64 | | -{ |
---|
65 | | - pr_err("Reserved memory not supported, ignoring region 0x%llx%s\n", |
---|
66 | | - size, nomap ? " (nomap)" : ""); |
---|
67 | | - return -ENOSYS; |
---|
68 | | -} |
---|
69 | | -#endif |
---|
70 | 47 | |
---|
71 | 48 | /** |
---|
72 | | - * res_mem_save_node() - save fdt node for second pass initialization |
---|
| 49 | + * fdt_reserved_mem_save_node() - save fdt node for second pass initialization |
---|
73 | 50 | */ |
---|
74 | 51 | void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname, |
---|
75 | 52 | phys_addr_t base, phys_addr_t size) |
---|
.. | .. |
---|
77 | 54 | struct reserved_mem *rmem = &reserved_mem[reserved_mem_count]; |
---|
78 | 55 | |
---|
79 | 56 | if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) { |
---|
80 | | - pr_err("not enough space all defined regions.\n"); |
---|
| 57 | + pr_err("not enough space for all defined regions.\n"); |
---|
81 | 58 | return; |
---|
82 | 59 | } |
---|
83 | 60 | |
---|
.. | .. |
---|
91 | 68 | } |
---|
92 | 69 | |
---|
93 | 70 | /** |
---|
94 | | - * res_mem_alloc_size() - allocate reserved memory described by 'size', 'align' |
---|
95 | | - * and 'alloc-ranges' properties |
---|
| 71 | + * __reserved_mem_alloc_size() - allocate reserved memory described by |
---|
| 72 | + * 'size', 'alignment' and 'alloc-ranges' properties. |
---|
96 | 73 | */ |
---|
97 | 74 | static int __init __reserved_mem_alloc_size(unsigned long node, |
---|
98 | 75 | const char *uname, phys_addr_t *res_base, phys_addr_t *res_size) |
---|
.. | .. |
---|
102 | 79 | phys_addr_t base = 0, align = 0, size; |
---|
103 | 80 | int len; |
---|
104 | 81 | const __be32 *prop; |
---|
105 | | - int nomap; |
---|
| 82 | + bool nomap; |
---|
106 | 83 | int ret; |
---|
107 | 84 | |
---|
108 | 85 | prop = of_get_flat_dt_prop(node, "size", &len); |
---|
.. | .. |
---|
115 | 92 | } |
---|
116 | 93 | size = dt_mem_next_cell(dt_root_size_cells, &prop); |
---|
117 | 94 | |
---|
118 | | - nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; |
---|
119 | | - |
---|
120 | 95 | prop = of_get_flat_dt_prop(node, "alignment", &len); |
---|
121 | 96 | if (prop) { |
---|
122 | 97 | if (len != dt_root_addr_cells * sizeof(__be32)) { |
---|
.. | .. |
---|
127 | 102 | align = dt_mem_next_cell(dt_root_addr_cells, &prop); |
---|
128 | 103 | } |
---|
129 | 104 | |
---|
| 105 | + nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; |
---|
| 106 | + |
---|
130 | 107 | /* Need adjust the alignment to satisfy the CMA requirement */ |
---|
131 | 108 | if (IS_ENABLED(CONFIG_CMA) |
---|
132 | 109 | && of_flat_dt_is_compatible(node, "shared-dma-pool") |
---|
133 | 110 | && of_get_flat_dt_prop(node, "reusable", NULL) |
---|
134 | | - && !of_get_flat_dt_prop(node, "no-map", NULL)) { |
---|
| 111 | + && !nomap) { |
---|
135 | 112 | unsigned long order = |
---|
136 | 113 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order); |
---|
137 | 114 | |
---|
.. | .. |
---|
185 | 162 | } |
---|
186 | 163 | |
---|
187 | 164 | static const struct of_device_id __rmem_of_table_sentinel |
---|
188 | | - __used __section(__reservedmem_of_table_end); |
---|
| 165 | + __used __section("__reservedmem_of_table_end"); |
---|
189 | 166 | |
---|
190 | 167 | /** |
---|
191 | | - * res_mem_init_node() - call region specific reserved memory init code |
---|
| 168 | + * __reserved_mem_init_node() - call region specific reserved memory init code |
---|
192 | 169 | */ |
---|
193 | 170 | static int __init __reserved_mem_init_node(struct reserved_mem *rmem) |
---|
194 | 171 | { |
---|
195 | 172 | extern const struct of_device_id __reservedmem_of_table[]; |
---|
196 | 173 | const struct of_device_id *i; |
---|
| 174 | + int ret = -ENOENT; |
---|
197 | 175 | |
---|
198 | 176 | for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) { |
---|
199 | 177 | reservedmem_of_init_fn initfn = i->data; |
---|
.. | .. |
---|
202 | 180 | if (!of_flat_dt_is_compatible(rmem->fdt_node, compat)) |
---|
203 | 181 | continue; |
---|
204 | 182 | |
---|
205 | | - if (initfn(rmem) == 0) { |
---|
| 183 | + ret = initfn(rmem); |
---|
| 184 | + if (ret == 0) { |
---|
206 | 185 | pr_info("initialized node %s, compatible id %s\n", |
---|
207 | 186 | rmem->name, compat); |
---|
208 | | - return 0; |
---|
| 187 | + break; |
---|
209 | 188 | } |
---|
210 | 189 | } |
---|
211 | | - return -ENOENT; |
---|
| 190 | + return ret; |
---|
212 | 191 | } |
---|
213 | 192 | |
---|
214 | 193 | static int __init __rmem_cmp(const void *a, const void *b) |
---|
.. | .. |
---|
262 | 241 | } |
---|
263 | 242 | |
---|
264 | 243 | /** |
---|
265 | | - * fdt_init_reserved_mem - allocate and init all saved reserved memory regions |
---|
| 244 | + * fdt_init_reserved_mem() - allocate and init all saved reserved memory regions |
---|
266 | 245 | */ |
---|
267 | 246 | void __init fdt_init_reserved_mem(void) |
---|
268 | 247 | { |
---|
.. | .. |
---|
277 | 256 | int len; |
---|
278 | 257 | const __be32 *prop; |
---|
279 | 258 | int err = 0; |
---|
| 259 | + bool nomap; |
---|
280 | 260 | |
---|
| 261 | + nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; |
---|
281 | 262 | prop = of_get_flat_dt_prop(node, "phandle", &len); |
---|
282 | 263 | if (!prop) |
---|
283 | 264 | prop = of_get_flat_dt_prop(node, "linux,phandle", &len); |
---|
.. | .. |
---|
287 | 268 | if (rmem->size == 0) |
---|
288 | 269 | err = __reserved_mem_alloc_size(node, rmem->name, |
---|
289 | 270 | &rmem->base, &rmem->size); |
---|
290 | | - if (err == 0) |
---|
291 | | - __reserved_mem_init_node(rmem); |
---|
| 271 | + if (err == 0) { |
---|
| 272 | + err = __reserved_mem_init_node(rmem); |
---|
| 273 | + if (err != 0 && err != -ENOENT) { |
---|
| 274 | + pr_info("node %s compatible matching fail\n", |
---|
| 275 | + rmem->name); |
---|
| 276 | + memblock_free(rmem->base, rmem->size); |
---|
| 277 | + if (nomap) |
---|
| 278 | + memblock_add(rmem->base, rmem->size); |
---|
| 279 | + } |
---|
| 280 | + } |
---|
292 | 281 | } |
---|
293 | 282 | } |
---|
294 | 283 | |
---|
.. | .. |
---|
344 | 333 | if (!target) |
---|
345 | 334 | return -ENODEV; |
---|
346 | 335 | |
---|
| 336 | + if (!of_device_is_available(target)) { |
---|
| 337 | + of_node_put(target); |
---|
| 338 | + return 0; |
---|
| 339 | + } |
---|
| 340 | + |
---|
347 | 341 | rmem = __find_rmem(target); |
---|
348 | 342 | of_node_put(target); |
---|
349 | 343 | |
---|
.. | .. |
---|
362 | 356 | mutex_lock(&of_rmem_assigned_device_mutex); |
---|
363 | 357 | list_add(&rd->list, &of_rmem_assigned_device_list); |
---|
364 | 358 | mutex_unlock(&of_rmem_assigned_device_mutex); |
---|
365 | | - /* ensure that dma_ops is set for virtual devices |
---|
366 | | - * using reserved memory |
---|
367 | | - */ |
---|
368 | | - of_dma_configure(dev, np, true); |
---|
369 | 359 | |
---|
370 | 360 | dev_info(dev, "assigned reserved memory node %s\n", rmem->name); |
---|
371 | 361 | } else { |
---|
.. | .. |
---|
377 | 367 | EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx); |
---|
378 | 368 | |
---|
379 | 369 | /** |
---|
| 370 | + * of_reserved_mem_device_init_by_name() - assign named reserved memory region |
---|
| 371 | + * to given device |
---|
| 372 | + * @dev: pointer to the device to configure |
---|
| 373 | + * @np: pointer to the device node with 'memory-region' property |
---|
| 374 | + * @name: name of the selected memory region |
---|
| 375 | + * |
---|
| 376 | + * Returns: 0 on success or a negative error-code on failure. |
---|
| 377 | + */ |
---|
| 378 | +int of_reserved_mem_device_init_by_name(struct device *dev, |
---|
| 379 | + struct device_node *np, |
---|
| 380 | + const char *name) |
---|
| 381 | +{ |
---|
| 382 | + int idx = of_property_match_string(np, "memory-region-names", name); |
---|
| 383 | + |
---|
| 384 | + return of_reserved_mem_device_init_by_idx(dev, np, idx); |
---|
| 385 | +} |
---|
| 386 | +EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name); |
---|
| 387 | + |
---|
| 388 | +/** |
---|
380 | 389 | * of_reserved_mem_device_release() - release reserved memory device structures |
---|
381 | 390 | * @dev: Pointer to the device to deconfigure |
---|
382 | 391 | * |
---|
.. | .. |
---|
385 | 394 | */ |
---|
386 | 395 | void of_reserved_mem_device_release(struct device *dev) |
---|
387 | 396 | { |
---|
388 | | - struct rmem_assigned_device *rd; |
---|
389 | | - struct reserved_mem *rmem = NULL; |
---|
| 397 | + struct rmem_assigned_device *rd, *tmp; |
---|
| 398 | + LIST_HEAD(release_list); |
---|
390 | 399 | |
---|
391 | 400 | mutex_lock(&of_rmem_assigned_device_mutex); |
---|
392 | | - list_for_each_entry(rd, &of_rmem_assigned_device_list, list) { |
---|
393 | | - if (rd->dev == dev) { |
---|
394 | | - rmem = rd->rmem; |
---|
395 | | - list_del(&rd->list); |
---|
396 | | - kfree(rd); |
---|
397 | | - break; |
---|
398 | | - } |
---|
| 401 | + list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) { |
---|
| 402 | + if (rd->dev == dev) |
---|
| 403 | + list_move_tail(&rd->list, &release_list); |
---|
399 | 404 | } |
---|
400 | 405 | mutex_unlock(&of_rmem_assigned_device_mutex); |
---|
401 | 406 | |
---|
402 | | - if (!rmem || !rmem->ops || !rmem->ops->device_release) |
---|
403 | | - return; |
---|
| 407 | + list_for_each_entry_safe(rd, tmp, &release_list, list) { |
---|
| 408 | + if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release) |
---|
| 409 | + rd->rmem->ops->device_release(rd->rmem, dev); |
---|
404 | 410 | |
---|
405 | | - rmem->ops->device_release(rmem, dev); |
---|
| 411 | + kfree(rd); |
---|
| 412 | + } |
---|
406 | 413 | } |
---|
407 | 414 | EXPORT_SYMBOL_GPL(of_reserved_mem_device_release); |
---|
408 | 415 | |
---|