.. | .. |
---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-or-later */ |
---|
1 | 2 | #ifndef _LINUX_MEMBLOCK_H |
---|
2 | 3 | #define _LINUX_MEMBLOCK_H |
---|
3 | 4 | #ifdef __KERNEL__ |
---|
4 | 5 | |
---|
5 | | -#ifdef CONFIG_HAVE_MEMBLOCK |
---|
6 | 6 | /* |
---|
7 | 7 | * Logical memory blocks. |
---|
8 | 8 | * |
---|
9 | 9 | * Copyright (C) 2001 Peter Bergner, IBM Corp. |
---|
10 | | - * |
---|
11 | | - * This program is free software; you can redistribute it and/or |
---|
12 | | - * modify it under the terms of the GNU General Public License |
---|
13 | | - * as published by the Free Software Foundation; either version |
---|
14 | | - * 2 of the License, or (at your option) any later version. |
---|
15 | 10 | */ |
---|
16 | 11 | |
---|
17 | 12 | #include <linux/init.h> |
---|
18 | 13 | #include <linux/mm.h> |
---|
| 14 | +#include <asm/dma.h> |
---|
19 | 15 | |
---|
20 | | -#define INIT_MEMBLOCK_REGIONS 128 |
---|
21 | | -#define INIT_PHYSMEM_REGIONS 4 |
---|
| 16 | +extern unsigned long max_low_pfn; |
---|
| 17 | +extern unsigned long min_low_pfn; |
---|
| 18 | + |
---|
| 19 | +/* |
---|
| 20 | + * highest page |
---|
| 21 | + */ |
---|
| 22 | +extern unsigned long max_pfn; |
---|
| 23 | +/* |
---|
| 24 | + * highest possible page |
---|
| 25 | + */ |
---|
| 26 | +extern unsigned long long max_possible_pfn; |
---|
| 27 | + |
---|
| 28 | +#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT |
---|
| 29 | +extern int defer_free_memblock(void *unused); |
---|
| 30 | +#endif |
---|
22 | 31 | |
---|
23 | 32 | /** |
---|
24 | 33 | * enum memblock_flags - definition of memory region attributes |
---|
.. | .. |
---|
36 | 45 | |
---|
37 | 46 | /** |
---|
38 | 47 | * struct memblock_region - represents a memory region |
---|
39 | | - * @base: physical address of the region |
---|
| 48 | + * @base: base address of the region |
---|
40 | 49 | * @size: size of the region |
---|
41 | 50 | * @flags: memory region attributes |
---|
42 | 51 | * @nid: NUMA node id |
---|
.. | .. |
---|
45 | 54 | phys_addr_t base; |
---|
46 | 55 | phys_addr_t size; |
---|
47 | 56 | enum memblock_flags flags; |
---|
48 | | -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
---|
| 57 | +#ifdef CONFIG_NEED_MULTIPLE_NODES |
---|
49 | 58 | int nid; |
---|
50 | 59 | #endif |
---|
51 | 60 | }; |
---|
.. | .. |
---|
70 | 79 | * struct memblock - memblock allocator metadata |
---|
71 | 80 | * @bottom_up: is bottom up direction? |
---|
72 | 81 | * @current_limit: physical address of the current allocation limit |
---|
73 | | - * @memory: usabe memory regions |
---|
| 82 | + * @memory: usable memory regions |
---|
74 | 83 | * @reserved: reserved memory regions |
---|
75 | | - * @physmem: all physical memory |
---|
76 | 84 | */ |
---|
77 | 85 | struct memblock { |
---|
78 | 86 | bool bottom_up; /* is bottom up direction? */ |
---|
79 | 87 | phys_addr_t current_limit; |
---|
80 | 88 | struct memblock_type memory; |
---|
81 | 89 | struct memblock_type reserved; |
---|
82 | | -#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
---|
83 | | - struct memblock_type physmem; |
---|
84 | | -#endif |
---|
85 | 90 | }; |
---|
86 | 91 | |
---|
87 | 92 | extern struct memblock memblock; |
---|
88 | | -extern int memblock_debug; |
---|
89 | 93 | |
---|
90 | | -#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK |
---|
| 94 | +#ifndef CONFIG_ARCH_KEEP_MEMBLOCK |
---|
91 | 95 | #define __init_memblock __meminit |
---|
92 | 96 | #define __initdata_memblock __meminitdata |
---|
93 | 97 | void memblock_discard(void); |
---|
94 | 98 | #else |
---|
95 | 99 | #define __init_memblock |
---|
96 | 100 | #define __initdata_memblock |
---|
| 101 | +static inline void memblock_discard(void) {} |
---|
97 | 102 | #endif |
---|
98 | 103 | |
---|
99 | | -#define memblock_dbg(fmt, ...) \ |
---|
100 | | - if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) |
---|
101 | | - |
---|
102 | | -phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, |
---|
103 | | - phys_addr_t start, phys_addr_t end, |
---|
104 | | - int nid, enum memblock_flags flags); |
---|
105 | 104 | phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, |
---|
106 | 105 | phys_addr_t size, phys_addr_t align); |
---|
107 | 106 | void memblock_allow_resize(void); |
---|
.. | .. |
---|
110 | 109 | int memblock_remove(phys_addr_t base, phys_addr_t size); |
---|
111 | 110 | int memblock_free(phys_addr_t base, phys_addr_t size); |
---|
112 | 111 | int memblock_reserve(phys_addr_t base, phys_addr_t size); |
---|
| 112 | +#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
---|
| 113 | +int memblock_physmem_add(phys_addr_t base, phys_addr_t size); |
---|
| 114 | +#endif |
---|
113 | 115 | void memblock_trim_memory(phys_addr_t align); |
---|
114 | 116 | bool memblock_overlaps_region(struct memblock_type *type, |
---|
115 | 117 | phys_addr_t base, phys_addr_t size); |
---|
.. | .. |
---|
118 | 120 | int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); |
---|
119 | 121 | int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); |
---|
120 | 122 | int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); |
---|
121 | | -enum memblock_flags choose_memblock_flags(void); |
---|
| 123 | + |
---|
| 124 | +unsigned long memblock_free_all(void); |
---|
| 125 | +void reset_node_managed_pages(pg_data_t *pgdat); |
---|
| 126 | +void reset_all_zones_managed_pages(void); |
---|
122 | 127 | |
---|
123 | 128 | /* Low level functions */ |
---|
124 | | -int memblock_add_range(struct memblock_type *type, |
---|
125 | | - phys_addr_t base, phys_addr_t size, |
---|
126 | | - int nid, enum memblock_flags flags); |
---|
127 | | - |
---|
128 | 129 | void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, |
---|
129 | 130 | struct memblock_type *type_a, |
---|
130 | 131 | struct memblock_type *type_b, phys_addr_t *out_start, |
---|
.. | .. |
---|
135 | 136 | struct memblock_type *type_b, phys_addr_t *out_start, |
---|
136 | 137 | phys_addr_t *out_end, int *out_nid); |
---|
137 | 138 | |
---|
138 | | -void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, |
---|
139 | | - phys_addr_t *out_end); |
---|
140 | | - |
---|
141 | | -void __memblock_free_early(phys_addr_t base, phys_addr_t size); |
---|
142 | 139 | void __memblock_free_late(phys_addr_t base, phys_addr_t size); |
---|
143 | | -void create_pgtable_mapping(phys_addr_t start, phys_addr_t end); |
---|
| 140 | + |
---|
| 141 | +#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
---|
| 142 | +static inline void __next_physmem_range(u64 *idx, struct memblock_type *type, |
---|
| 143 | + phys_addr_t *out_start, |
---|
| 144 | + phys_addr_t *out_end) |
---|
| 145 | +{ |
---|
| 146 | + extern struct memblock_type physmem; |
---|
| 147 | + |
---|
| 148 | + __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type, |
---|
| 149 | + out_start, out_end, NULL); |
---|
| 150 | +} |
---|
144 | 151 | |
---|
145 | 152 | /** |
---|
146 | | - * for_each_mem_range - iterate through memblock areas from type_a and not |
---|
| 153 | + * for_each_physmem_range - iterate through physmem areas not included in type. |
---|
| 154 | + * @i: u64 used as loop variable |
---|
| 155 | + * @type: ptr to memblock_type which excludes from the iteration, can be %NULL |
---|
| 156 | + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
---|
| 157 | + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
---|
| 158 | + */ |
---|
| 159 | +#define for_each_physmem_range(i, type, p_start, p_end) \ |
---|
| 160 | + for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \ |
---|
| 161 | + i != (u64)ULLONG_MAX; \ |
---|
| 162 | + __next_physmem_range(&i, type, p_start, p_end)) |
---|
| 163 | +#endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */ |
---|
| 164 | + |
---|
| 165 | +/** |
---|
| 166 | + * __for_each_mem_range - iterate through memblock areas from type_a and not |
---|
147 | 167 | * included in type_b. Or just type_a if type_b is NULL. |
---|
148 | 168 | * @i: u64 used as loop variable |
---|
149 | 169 | * @type_a: ptr to memblock_type to iterate |
---|
.. | .. |
---|
154 | 174 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
---|
155 | 175 | * @p_nid: ptr to int for nid of the range, can be %NULL |
---|
156 | 176 | */ |
---|
157 | | -#define for_each_mem_range(i, type_a, type_b, nid, flags, \ |
---|
| 177 | +#define __for_each_mem_range(i, type_a, type_b, nid, flags, \ |
---|
158 | 178 | p_start, p_end, p_nid) \ |
---|
159 | 179 | for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \ |
---|
160 | 180 | p_start, p_end, p_nid); \ |
---|
.. | .. |
---|
163 | 183 | p_start, p_end, p_nid)) |
---|
164 | 184 | |
---|
165 | 185 | /** |
---|
166 | | - * for_each_mem_range_rev - reverse iterate through memblock areas from |
---|
| 186 | + * __for_each_mem_range_rev - reverse iterate through memblock areas from |
---|
167 | 187 | * type_a and not included in type_b. Or just type_a if type_b is NULL. |
---|
168 | 188 | * @i: u64 used as loop variable |
---|
169 | 189 | * @type_a: ptr to memblock_type to iterate |
---|
.. | .. |
---|
174 | 194 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
---|
175 | 195 | * @p_nid: ptr to int for nid of the range, can be %NULL |
---|
176 | 196 | */ |
---|
177 | | -#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \ |
---|
178 | | - p_start, p_end, p_nid) \ |
---|
| 197 | +#define __for_each_mem_range_rev(i, type_a, type_b, nid, flags, \ |
---|
| 198 | + p_start, p_end, p_nid) \ |
---|
179 | 199 | for (i = (u64)ULLONG_MAX, \ |
---|
180 | | - __next_mem_range_rev(&i, nid, flags, type_a, type_b,\ |
---|
| 200 | + __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ |
---|
181 | 201 | p_start, p_end, p_nid); \ |
---|
182 | 202 | i != (u64)ULLONG_MAX; \ |
---|
183 | 203 | __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ |
---|
184 | 204 | p_start, p_end, p_nid)) |
---|
185 | 205 | |
---|
186 | 206 | /** |
---|
187 | | - * for_each_reserved_mem_region - iterate over all reserved memblock areas |
---|
| 207 | + * for_each_mem_range - iterate through memory areas. |
---|
| 208 | + * @i: u64 used as loop variable |
---|
| 209 | + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
---|
| 210 | + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
---|
| 211 | + */ |
---|
| 212 | +#define for_each_mem_range(i, p_start, p_end) \ |
---|
| 213 | + __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \ |
---|
| 214 | + MEMBLOCK_HOTPLUG, p_start, p_end, NULL) |
---|
| 215 | + |
---|
| 216 | +/** |
---|
| 217 | + * for_each_mem_range_rev - reverse iterate through memblock areas from |
---|
| 218 | + * type_a and not included in type_b. Or just type_a if type_b is NULL. |
---|
| 219 | + * @i: u64 used as loop variable |
---|
| 220 | + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
---|
| 221 | + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
---|
| 222 | + */ |
---|
| 223 | +#define for_each_mem_range_rev(i, p_start, p_end) \ |
---|
| 224 | + __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \ |
---|
| 225 | + MEMBLOCK_HOTPLUG, p_start, p_end, NULL) |
---|
| 226 | + |
---|
| 227 | +/** |
---|
| 228 | + * for_each_reserved_mem_range - iterate over all reserved memblock areas |
---|
188 | 229 | * @i: u64 used as loop variable |
---|
189 | 230 | * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
---|
190 | 231 | * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
---|
.. | .. |
---|
192 | 233 | * Walks over reserved areas of memblock. Available as soon as memblock |
---|
193 | 234 | * is initialized. |
---|
194 | 235 | */ |
---|
195 | | -#define for_each_reserved_mem_region(i, p_start, p_end) \ |
---|
196 | | - for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \ |
---|
197 | | - i != (u64)ULLONG_MAX; \ |
---|
198 | | - __next_reserved_mem_region(&i, p_start, p_end)) |
---|
| 236 | +#define for_each_reserved_mem_range(i, p_start, p_end) \ |
---|
| 237 | + __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \ |
---|
| 238 | + MEMBLOCK_NONE, p_start, p_end, NULL) |
---|
199 | 239 | |
---|
200 | 240 | static inline bool memblock_is_hotpluggable(struct memblock_region *m) |
---|
201 | 241 | { |
---|
.. | .. |
---|
212 | 252 | return m->flags & MEMBLOCK_NOMAP; |
---|
213 | 253 | } |
---|
214 | 254 | |
---|
215 | | -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
---|
216 | 255 | int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn, |
---|
217 | 256 | unsigned long *end_pfn); |
---|
218 | 257 | void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, |
---|
.. | .. |
---|
231 | 270 | #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ |
---|
232 | 271 | for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ |
---|
233 | 272 | i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) |
---|
234 | | -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
---|
| 273 | + |
---|
| 274 | +#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
---|
| 275 | +void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, |
---|
| 276 | + unsigned long *out_spfn, |
---|
| 277 | + unsigned long *out_epfn); |
---|
| 278 | +/** |
---|
| 279 | + * for_each_free_mem_range_in_zone - iterate through zone specific free |
---|
| 280 | + * memblock areas |
---|
| 281 | + * @i: u64 used as loop variable |
---|
| 282 | + * @zone: zone in which all of the memory blocks reside |
---|
| 283 | + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
---|
| 284 | + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
---|
| 285 | + * |
---|
| 286 | + * Walks over free (memory && !reserved) areas of memblock in a specific |
---|
| 287 | + * zone. Available once memblock and an empty zone is initialized. The main |
---|
| 288 | + * assumption is that the zone start, end, and pgdat have been associated. |
---|
| 289 | + * This way we can use the zone to determine NUMA node, and if a given part |
---|
| 290 | + * of the memblock is valid for the zone. |
---|
| 291 | + */ |
---|
| 292 | +#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \ |
---|
| 293 | + for (i = 0, \ |
---|
| 294 | + __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \ |
---|
| 295 | + i != U64_MAX; \ |
---|
| 296 | + __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) |
---|
| 297 | + |
---|
| 298 | +/** |
---|
| 299 | + * for_each_free_mem_range_in_zone_from - iterate through zone specific |
---|
| 300 | + * free memblock areas from a given point |
---|
| 301 | + * @i: u64 used as loop variable |
---|
| 302 | + * @zone: zone in which all of the memory blocks reside |
---|
| 303 | + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL |
---|
| 304 | + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL |
---|
| 305 | + * |
---|
| 306 | + * Walks over free (memory && !reserved) areas of memblock in a specific |
---|
| 307 | + * zone, continuing from current position. Available as soon as memblock is |
---|
| 308 | + * initialized. |
---|
| 309 | + */ |
---|
| 310 | +#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \ |
---|
| 311 | + for (; i != U64_MAX; \ |
---|
| 312 | + __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end)) |
---|
| 313 | + |
---|
| 314 | +int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask); |
---|
| 315 | + |
---|
| 316 | +#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ |
---|
235 | 317 | |
---|
236 | 318 | /** |
---|
237 | 319 | * for_each_free_mem_range - iterate through free memblock areas |
---|
.. | .. |
---|
246 | 328 | * soon as memblock is initialized. |
---|
247 | 329 | */ |
---|
248 | 330 | #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \ |
---|
249 | | - for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ |
---|
250 | | - nid, flags, p_start, p_end, p_nid) |
---|
| 331 | + __for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ |
---|
| 332 | + nid, flags, p_start, p_end, p_nid) |
---|
251 | 333 | |
---|
252 | 334 | /** |
---|
253 | 335 | * for_each_free_mem_range_reverse - rev-iterate through free memblock areas |
---|
.. | .. |
---|
263 | 345 | */ |
---|
264 | 346 | #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \ |
---|
265 | 347 | p_nid) \ |
---|
266 | | - for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ |
---|
267 | | - nid, flags, p_start, p_end, p_nid) |
---|
| 348 | + __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ |
---|
| 349 | + nid, flags, p_start, p_end, p_nid) |
---|
268 | 350 | |
---|
269 | | -static inline void memblock_set_region_flags(struct memblock_region *r, |
---|
270 | | - enum memblock_flags flags) |
---|
271 | | -{ |
---|
272 | | - r->flags |= flags; |
---|
273 | | -} |
---|
274 | | - |
---|
275 | | -static inline void memblock_clear_region_flags(struct memblock_region *r, |
---|
276 | | - enum memblock_flags flags) |
---|
277 | | -{ |
---|
278 | | - r->flags &= ~flags; |
---|
279 | | -} |
---|
280 | | - |
---|
281 | | -#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
---|
282 | 351 | int memblock_set_node(phys_addr_t base, phys_addr_t size, |
---|
283 | 352 | struct memblock_type *type, int nid); |
---|
284 | 353 | |
---|
| 354 | +#ifdef CONFIG_NEED_MULTIPLE_NODES |
---|
285 | 355 | static inline void memblock_set_region_node(struct memblock_region *r, int nid) |
---|
286 | 356 | { |
---|
287 | 357 | r->nid = nid; |
---|
.. | .. |
---|
300 | 370 | { |
---|
301 | 371 | return 0; |
---|
302 | 372 | } |
---|
303 | | -#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */ |
---|
| 373 | +#endif /* CONFIG_NEED_MULTIPLE_NODES */ |
---|
304 | 374 | |
---|
305 | | -phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); |
---|
306 | | -phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); |
---|
| 375 | +/* Flags for memblock allocation APIs */ |
---|
| 376 | +#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) |
---|
| 377 | +#define MEMBLOCK_ALLOC_ACCESSIBLE 0 |
---|
| 378 | +#define MEMBLOCK_ALLOC_KASAN 1 |
---|
307 | 379 | |
---|
308 | | -phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); |
---|
| 380 | +/* We are using top down, so it is safe to use 0 here */ |
---|
| 381 | +#define MEMBLOCK_LOW_LIMIT 0 |
---|
| 382 | + |
---|
| 383 | +#ifndef ARCH_LOW_ADDRESS_LIMIT |
---|
| 384 | +#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL |
---|
| 385 | +#endif |
---|
| 386 | + |
---|
| 387 | +phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align, |
---|
| 388 | + phys_addr_t start, phys_addr_t end); |
---|
| 389 | +phys_addr_t memblock_alloc_range_nid(phys_addr_t size, |
---|
| 390 | + phys_addr_t align, phys_addr_t start, |
---|
| 391 | + phys_addr_t end, int nid, bool exact_nid); |
---|
| 392 | +phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); |
---|
| 393 | + |
---|
| 394 | +static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size, |
---|
| 395 | + phys_addr_t align) |
---|
| 396 | +{ |
---|
| 397 | + return memblock_phys_alloc_range(size, align, 0, |
---|
| 398 | + MEMBLOCK_ALLOC_ACCESSIBLE); |
---|
| 399 | +} |
---|
| 400 | + |
---|
| 401 | +void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align, |
---|
| 402 | + phys_addr_t min_addr, phys_addr_t max_addr, |
---|
| 403 | + int nid); |
---|
| 404 | +void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, |
---|
| 405 | + phys_addr_t min_addr, phys_addr_t max_addr, |
---|
| 406 | + int nid); |
---|
| 407 | +void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, |
---|
| 408 | + phys_addr_t min_addr, phys_addr_t max_addr, |
---|
| 409 | + int nid); |
---|
| 410 | + |
---|
| 411 | +static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align) |
---|
| 412 | +{ |
---|
| 413 | + return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, |
---|
| 414 | + MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); |
---|
| 415 | +} |
---|
| 416 | + |
---|
| 417 | +static inline void *memblock_alloc_raw(phys_addr_t size, |
---|
| 418 | + phys_addr_t align) |
---|
| 419 | +{ |
---|
| 420 | + return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT, |
---|
| 421 | + MEMBLOCK_ALLOC_ACCESSIBLE, |
---|
| 422 | + NUMA_NO_NODE); |
---|
| 423 | +} |
---|
| 424 | + |
---|
| 425 | +static inline void *memblock_alloc_from(phys_addr_t size, |
---|
| 426 | + phys_addr_t align, |
---|
| 427 | + phys_addr_t min_addr) |
---|
| 428 | +{ |
---|
| 429 | + return memblock_alloc_try_nid(size, align, min_addr, |
---|
| 430 | + MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); |
---|
| 431 | +} |
---|
| 432 | + |
---|
| 433 | +static inline void *memblock_alloc_low(phys_addr_t size, |
---|
| 434 | + phys_addr_t align) |
---|
| 435 | +{ |
---|
| 436 | + return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, |
---|
| 437 | + ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); |
---|
| 438 | +} |
---|
| 439 | + |
---|
| 440 | +static inline void *memblock_alloc_node(phys_addr_t size, |
---|
| 441 | + phys_addr_t align, int nid) |
---|
| 442 | +{ |
---|
| 443 | + return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, |
---|
| 444 | + MEMBLOCK_ALLOC_ACCESSIBLE, nid); |
---|
| 445 | +} |
---|
| 446 | + |
---|
| 447 | +static inline void memblock_free_early(phys_addr_t base, |
---|
| 448 | + phys_addr_t size) |
---|
| 449 | +{ |
---|
| 450 | + memblock_free(base, size); |
---|
| 451 | +} |
---|
| 452 | + |
---|
| 453 | +static inline void memblock_free_early_nid(phys_addr_t base, |
---|
| 454 | + phys_addr_t size, int nid) |
---|
| 455 | +{ |
---|
| 456 | + memblock_free(base, size); |
---|
| 457 | +} |
---|
| 458 | + |
---|
| 459 | +static inline void memblock_free_late(phys_addr_t base, phys_addr_t size) |
---|
| 460 | +{ |
---|
| 461 | + __memblock_free_late(base, size); |
---|
| 462 | +} |
---|
309 | 463 | |
---|
310 | 464 | /* |
---|
311 | 465 | * Set the allocation direction to bottom-up or top-down. |
---|
312 | 466 | */ |
---|
313 | | -static inline void __init memblock_set_bottom_up(bool enable) |
---|
| 467 | +static inline __init_memblock void memblock_set_bottom_up(bool enable) |
---|
314 | 468 | { |
---|
315 | 469 | memblock.bottom_up = enable; |
---|
316 | 470 | } |
---|
.. | .. |
---|
320 | 474 | * if this is true, that said, memblock will allocate memory |
---|
321 | 475 | * in bottom-up direction. |
---|
322 | 476 | */ |
---|
323 | | -static inline bool memblock_bottom_up(void) |
---|
| 477 | +static inline __init_memblock bool memblock_bottom_up(void) |
---|
324 | 478 | { |
---|
325 | 479 | return memblock.bottom_up; |
---|
326 | 480 | } |
---|
327 | 481 | |
---|
328 | | -/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ |
---|
329 | | -#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) |
---|
330 | | -#define MEMBLOCK_ALLOC_ACCESSIBLE 0 |
---|
331 | | -#define MEMBLOCK_ALLOC_KASAN 1 |
---|
332 | | - |
---|
333 | | -phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, |
---|
334 | | - phys_addr_t start, phys_addr_t end, |
---|
335 | | - enum memblock_flags flags); |
---|
336 | | -phys_addr_t memblock_alloc_base_nid(phys_addr_t size, |
---|
337 | | - phys_addr_t align, phys_addr_t max_addr, |
---|
338 | | - int nid, enum memblock_flags flags); |
---|
339 | | -phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, |
---|
340 | | - phys_addr_t max_addr); |
---|
341 | | -phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, |
---|
342 | | - phys_addr_t max_addr); |
---|
343 | 482 | phys_addr_t memblock_phys_mem_size(void); |
---|
344 | 483 | phys_addr_t memblock_reserved_size(void); |
---|
345 | | -phys_addr_t memblock_mem_size(unsigned long limit_pfn); |
---|
346 | 484 | phys_addr_t memblock_start_of_DRAM(void); |
---|
347 | 485 | phys_addr_t memblock_end_of_DRAM(void); |
---|
348 | 486 | void memblock_enforce_memory_limit(phys_addr_t memory_limit); |
---|
.. | .. |
---|
353 | 491 | bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size); |
---|
354 | 492 | bool memblock_is_reserved(phys_addr_t addr); |
---|
355 | 493 | bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); |
---|
356 | | -bool memblock_overlaps_memory(phys_addr_t base, phys_addr_t size); |
---|
| 494 | +bool memblock_is_nomap_remove(void); |
---|
357 | 495 | |
---|
358 | | -extern void __memblock_dump_all(void); |
---|
359 | | - |
---|
360 | | -static inline void memblock_dump_all(void) |
---|
361 | | -{ |
---|
362 | | - if (memblock_debug) |
---|
363 | | - __memblock_dump_all(); |
---|
364 | | -} |
---|
| 496 | +void memblock_dump_all(void); |
---|
365 | 497 | |
---|
366 | 498 | /** |
---|
367 | 499 | * memblock_set_current_limit - Set the current allocation limit to allow |
---|
.. | .. |
---|
426 | 558 | return PFN_UP(reg->base + reg->size); |
---|
427 | 559 | } |
---|
428 | 560 | |
---|
429 | | -#define for_each_memblock(memblock_type, region) \ |
---|
430 | | - for (region = memblock.memblock_type.regions; \ |
---|
431 | | - region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ |
---|
| 561 | +/** |
---|
| 562 | + * for_each_mem_region - itereate over memory regions |
---|
| 563 | + * @region: loop variable |
---|
| 564 | + */ |
---|
| 565 | +#define for_each_mem_region(region) \ |
---|
| 566 | + for (region = memblock.memory.regions; \ |
---|
| 567 | + region < (memblock.memory.regions + memblock.memory.cnt); \ |
---|
432 | 568 | region++) |
---|
433 | 569 | |
---|
434 | | -#define for_each_memblock_type(i, memblock_type, rgn) \ |
---|
435 | | - for (i = 0, rgn = &memblock_type->regions[0]; \ |
---|
436 | | - i < memblock_type->cnt; \ |
---|
437 | | - i++, rgn = &memblock_type->regions[i]) |
---|
| 570 | +/** |
---|
| 571 | + * for_each_reserved_mem_region - itereate over reserved memory regions |
---|
| 572 | + * @region: loop variable |
---|
| 573 | + */ |
---|
| 574 | +#define for_each_reserved_mem_region(region) \ |
---|
| 575 | + for (region = memblock.reserved.regions; \ |
---|
| 576 | + region < (memblock.reserved.regions + memblock.reserved.cnt); \ |
---|
| 577 | + region++) |
---|
| 578 | + |
---|
| 579 | +extern void *alloc_large_system_hash(const char *tablename, |
---|
| 580 | + unsigned long bucketsize, |
---|
| 581 | + unsigned long numentries, |
---|
| 582 | + int scale, |
---|
| 583 | + int flags, |
---|
| 584 | + unsigned int *_hash_shift, |
---|
| 585 | + unsigned int *_hash_mask, |
---|
| 586 | + unsigned long low_limit, |
---|
| 587 | + unsigned long high_limit); |
---|
| 588 | + |
---|
| 589 | +#define HASH_EARLY 0x00000001 /* Allocating during early boot? */ |
---|
| 590 | +#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min |
---|
| 591 | + * shift passed via *_hash_shift */ |
---|
| 592 | +#define HASH_ZERO 0x00000004 /* Zero allocated hash table */ |
---|
| 593 | + |
---|
| 594 | +/* Only NUMA needs hash distribution. 64bit NUMA architectures have |
---|
| 595 | + * sufficient vmalloc space. |
---|
| 596 | + */ |
---|
| 597 | +#ifdef CONFIG_NUMA |
---|
| 598 | +#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT) |
---|
| 599 | +extern int hashdist; /* Distribute hashes across NUMA nodes? */ |
---|
| 600 | +#else |
---|
| 601 | +#define hashdist (0) |
---|
| 602 | +#endif |
---|
438 | 603 | |
---|
439 | 604 | #ifdef CONFIG_MEMTEST |
---|
440 | 605 | extern void early_memtest(phys_addr_t start, phys_addr_t end); |
---|
.. | .. |
---|
443 | 608 | { |
---|
444 | 609 | } |
---|
445 | 610 | #endif |
---|
446 | | -#else |
---|
447 | | -static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) |
---|
448 | | -{ |
---|
449 | | - return 0; |
---|
450 | | -} |
---|
451 | | -#endif /* CONFIG_HAVE_MEMBLOCK */ |
---|
452 | 611 | |
---|
453 | 612 | #endif /* __KERNEL__ */ |
---|
454 | 613 | |
---|