hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/drivers/of/of_reserved_mem.c
....@@ -20,56 +20,33 @@
2020 #include <linux/of_reserved_mem.h>
2121 #include <linux/sort.h>
2222 #include <linux/slab.h>
23
-#include <linux/kmemleak.h>
23
+#include <linux/memblock.h>
2424
25
-#define MAX_RESERVED_REGIONS 32
25
+#define MAX_RESERVED_REGIONS 64
2626 static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS];
2727 static int reserved_mem_count;
2828
29
-#if defined(CONFIG_HAVE_MEMBLOCK)
30
-#include <linux/memblock.h>
31
-int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
29
+static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
3230 phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
3331 phys_addr_t *res_base)
3432 {
3533 phys_addr_t base;
36
- /*
37
- * We use __memblock_alloc_base() because memblock_alloc_base()
38
- * panic()s on allocation failure.
39
- */
34
+
4035 end = !end ? MEMBLOCK_ALLOC_ANYWHERE : end;
41
- base = __memblock_alloc_base(size, align, end);
36
+ align = !align ? SMP_CACHE_BYTES : align;
37
+ base = memblock_find_in_range(start, end, size, align);
4238 if (!base)
4339 return -ENOMEM;
4440
45
- /*
46
- * Check if the allocated region fits in to start..end window
47
- */
48
- if (base < start) {
49
- memblock_free(base, size);
50
- return -ENOMEM;
51
- }
52
-
5341 *res_base = base;
54
- if (nomap) {
55
- kmemleak_ignore_phys(base);
42
+ if (nomap)
5643 return memblock_remove(base, size);
57
- }
58
- return 0;
44
+
45
+ return memblock_reserve(base, size);
5946 }
60
-#else
61
-int __init __weak early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
62
- phys_addr_t align, phys_addr_t start, phys_addr_t end, bool nomap,
63
- phys_addr_t *res_base)
64
-{
65
- pr_err("Reserved memory not supported, ignoring region 0x%llx%s\n",
66
- size, nomap ? " (nomap)" : "");
67
- return -ENOSYS;
68
-}
69
-#endif
7047
7148 /**
72
- * res_mem_save_node() - save fdt node for second pass initialization
49
+ * fdt_reserved_mem_save_node() - save fdt node for second pass initialization
7350 */
7451 void __init fdt_reserved_mem_save_node(unsigned long node, const char *uname,
7552 phys_addr_t base, phys_addr_t size)
....@@ -77,7 +54,7 @@
7754 struct reserved_mem *rmem = &reserved_mem[reserved_mem_count];
7855
7956 if (reserved_mem_count == ARRAY_SIZE(reserved_mem)) {
80
- pr_err("not enough space all defined regions.\n");
57
+ pr_err("not enough space for all defined regions.\n");
8158 return;
8259 }
8360
....@@ -91,8 +68,8 @@
9168 }
9269
9370 /**
94
- * res_mem_alloc_size() - allocate reserved memory described by 'size', 'align'
95
- * and 'alloc-ranges' properties
71
+ * __reserved_mem_alloc_size() - allocate reserved memory described by
72
+ * 'size', 'alignment' and 'alloc-ranges' properties.
9673 */
9774 static int __init __reserved_mem_alloc_size(unsigned long node,
9875 const char *uname, phys_addr_t *res_base, phys_addr_t *res_size)
....@@ -102,7 +79,7 @@
10279 phys_addr_t base = 0, align = 0, size;
10380 int len;
10481 const __be32 *prop;
105
- int nomap;
82
+ bool nomap;
10683 int ret;
10784
10885 prop = of_get_flat_dt_prop(node, "size", &len);
....@@ -115,8 +92,6 @@
11592 }
11693 size = dt_mem_next_cell(dt_root_size_cells, &prop);
11794
118
- nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
119
-
12095 prop = of_get_flat_dt_prop(node, "alignment", &len);
12196 if (prop) {
12297 if (len != dt_root_addr_cells * sizeof(__be32)) {
....@@ -127,11 +102,13 @@
127102 align = dt_mem_next_cell(dt_root_addr_cells, &prop);
128103 }
129104
105
+ nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
106
+
130107 /* Need adjust the alignment to satisfy the CMA requirement */
131108 if (IS_ENABLED(CONFIG_CMA)
132109 && of_flat_dt_is_compatible(node, "shared-dma-pool")
133110 && of_get_flat_dt_prop(node, "reusable", NULL)
134
- && !of_get_flat_dt_prop(node, "no-map", NULL)) {
111
+ && !nomap) {
135112 unsigned long order =
136113 max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
137114
....@@ -185,15 +162,16 @@
185162 }
186163
187164 static const struct of_device_id __rmem_of_table_sentinel
188
- __used __section(__reservedmem_of_table_end);
165
+ __used __section("__reservedmem_of_table_end");
189166
190167 /**
191
- * res_mem_init_node() - call region specific reserved memory init code
168
+ * __reserved_mem_init_node() - call region specific reserved memory init code
192169 */
193170 static int __init __reserved_mem_init_node(struct reserved_mem *rmem)
194171 {
195172 extern const struct of_device_id __reservedmem_of_table[];
196173 const struct of_device_id *i;
174
+ int ret = -ENOENT;
197175
198176 for (i = __reservedmem_of_table; i < &__rmem_of_table_sentinel; i++) {
199177 reservedmem_of_init_fn initfn = i->data;
....@@ -202,13 +180,14 @@
202180 if (!of_flat_dt_is_compatible(rmem->fdt_node, compat))
203181 continue;
204182
205
- if (initfn(rmem) == 0) {
183
+ ret = initfn(rmem);
184
+ if (ret == 0) {
206185 pr_info("initialized node %s, compatible id %s\n",
207186 rmem->name, compat);
208
- return 0;
187
+ break;
209188 }
210189 }
211
- return -ENOENT;
190
+ return ret;
212191 }
213192
214193 static int __init __rmem_cmp(const void *a, const void *b)
....@@ -262,7 +241,7 @@
262241 }
263242
264243 /**
265
- * fdt_init_reserved_mem - allocate and init all saved reserved memory regions
244
+ * fdt_init_reserved_mem() - allocate and init all saved reserved memory regions
266245 */
267246 void __init fdt_init_reserved_mem(void)
268247 {
....@@ -277,7 +256,9 @@
277256 int len;
278257 const __be32 *prop;
279258 int err = 0;
259
+ bool nomap;
280260
261
+ nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
281262 prop = of_get_flat_dt_prop(node, "phandle", &len);
282263 if (!prop)
283264 prop = of_get_flat_dt_prop(node, "linux,phandle", &len);
....@@ -287,8 +268,16 @@
287268 if (rmem->size == 0)
288269 err = __reserved_mem_alloc_size(node, rmem->name,
289270 &rmem->base, &rmem->size);
290
- if (err == 0)
291
- __reserved_mem_init_node(rmem);
271
+ if (err == 0) {
272
+ err = __reserved_mem_init_node(rmem);
273
+ if (err != 0 && err != -ENOENT) {
274
+ pr_info("node %s compatible matching fail\n",
275
+ rmem->name);
276
+ memblock_free(rmem->base, rmem->size);
277
+ if (nomap)
278
+ memblock_add(rmem->base, rmem->size);
279
+ }
280
+ }
292281 }
293282 }
294283
....@@ -344,6 +333,11 @@
344333 if (!target)
345334 return -ENODEV;
346335
336
+ if (!of_device_is_available(target)) {
337
+ of_node_put(target);
338
+ return 0;
339
+ }
340
+
347341 rmem = __find_rmem(target);
348342 of_node_put(target);
349343
....@@ -362,10 +356,6 @@
362356 mutex_lock(&of_rmem_assigned_device_mutex);
363357 list_add(&rd->list, &of_rmem_assigned_device_list);
364358 mutex_unlock(&of_rmem_assigned_device_mutex);
365
- /* ensure that dma_ops is set for virtual devices
366
- * using reserved memory
367
- */
368
- of_dma_configure(dev, np, true);
369359
370360 dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
371361 } else {
....@@ -377,6 +367,25 @@
377367 EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_idx);
378368
379369 /**
370
+ * of_reserved_mem_device_init_by_name() - assign named reserved memory region
371
+ * to given device
372
+ * @dev: pointer to the device to configure
373
+ * @np: pointer to the device node with 'memory-region' property
374
+ * @name: name of the selected memory region
375
+ *
376
+ * Returns: 0 on success or a negative error-code on failure.
377
+ */
378
+int of_reserved_mem_device_init_by_name(struct device *dev,
379
+ struct device_node *np,
380
+ const char *name)
381
+{
382
+ int idx = of_property_match_string(np, "memory-region-names", name);
383
+
384
+ return of_reserved_mem_device_init_by_idx(dev, np, idx);
385
+}
386
+EXPORT_SYMBOL_GPL(of_reserved_mem_device_init_by_name);
387
+
388
+/**
380389 * of_reserved_mem_device_release() - release reserved memory device structures
381390 * @dev: Pointer to the device to deconfigure
382391 *
....@@ -385,24 +394,22 @@
385394 */
386395 void of_reserved_mem_device_release(struct device *dev)
387396 {
388
- struct rmem_assigned_device *rd;
389
- struct reserved_mem *rmem = NULL;
397
+ struct rmem_assigned_device *rd, *tmp;
398
+ LIST_HEAD(release_list);
390399
391400 mutex_lock(&of_rmem_assigned_device_mutex);
392
- list_for_each_entry(rd, &of_rmem_assigned_device_list, list) {
393
- if (rd->dev == dev) {
394
- rmem = rd->rmem;
395
- list_del(&rd->list);
396
- kfree(rd);
397
- break;
398
- }
401
+ list_for_each_entry_safe(rd, tmp, &of_rmem_assigned_device_list, list) {
402
+ if (rd->dev == dev)
403
+ list_move_tail(&rd->list, &release_list);
399404 }
400405 mutex_unlock(&of_rmem_assigned_device_mutex);
401406
402
- if (!rmem || !rmem->ops || !rmem->ops->device_release)
403
- return;
407
+ list_for_each_entry_safe(rd, tmp, &release_list, list) {
408
+ if (rd->rmem && rd->rmem->ops && rd->rmem->ops->device_release)
409
+ rd->rmem->ops->device_release(rd->rmem, dev);
404410
405
- rmem->ops->device_release(rmem, dev);
411
+ kfree(rd);
412
+ }
406413 }
407414 EXPORT_SYMBOL_GPL(of_reserved_mem_device_release);
408415