hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/include/linux/memblock.h
....@@ -1,24 +1,33 @@
1
+/* SPDX-License-Identifier: GPL-2.0-or-later */
12 #ifndef _LINUX_MEMBLOCK_H
23 #define _LINUX_MEMBLOCK_H
34 #ifdef __KERNEL__
45
5
-#ifdef CONFIG_HAVE_MEMBLOCK
66 /*
77 * Logical memory blocks.
88 *
99 * Copyright (C) 2001 Peter Bergner, IBM Corp.
10
- *
11
- * This program is free software; you can redistribute it and/or
12
- * modify it under the terms of the GNU General Public License
13
- * as published by the Free Software Foundation; either version
14
- * 2 of the License, or (at your option) any later version.
1510 */
1611
1712 #include <linux/init.h>
1813 #include <linux/mm.h>
14
+#include <asm/dma.h>
1915
20
-#define INIT_MEMBLOCK_REGIONS 128
21
-#define INIT_PHYSMEM_REGIONS 4
16
+extern unsigned long max_low_pfn;
17
+extern unsigned long min_low_pfn;
18
+
19
+/*
20
+ * highest page
21
+ */
22
+extern unsigned long max_pfn;
23
+/*
24
+ * highest possible page
25
+ */
26
+extern unsigned long long max_possible_pfn;
27
+
28
+#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
29
+extern int defer_free_memblock(void *unused);
30
+#endif
2231
2332 /**
2433 * enum memblock_flags - definition of memory region attributes
....@@ -36,7 +45,7 @@
3645
3746 /**
3847 * struct memblock_region - represents a memory region
39
- * @base: physical address of the region
48
+ * @base: base address of the region
4049 * @size: size of the region
4150 * @flags: memory region attributes
4251 * @nid: NUMA node id
....@@ -45,7 +54,7 @@
4554 phys_addr_t base;
4655 phys_addr_t size;
4756 enum memblock_flags flags;
48
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
57
+#ifdef CONFIG_NEED_MULTIPLE_NODES
4958 int nid;
5059 #endif
5160 };
....@@ -70,38 +79,28 @@
7079 * struct memblock - memblock allocator metadata
7180 * @bottom_up: is bottom up direction?
7281 * @current_limit: physical address of the current allocation limit
73
- * @memory: usabe memory regions
82
+ * @memory: usable memory regions
7483 * @reserved: reserved memory regions
75
- * @physmem: all physical memory
7684 */
7785 struct memblock {
7886 bool bottom_up; /* is bottom up direction? */
7987 phys_addr_t current_limit;
8088 struct memblock_type memory;
8189 struct memblock_type reserved;
82
-#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
83
- struct memblock_type physmem;
84
-#endif
8590 };
8691
8792 extern struct memblock memblock;
88
-extern int memblock_debug;
8993
90
-#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
94
+#ifndef CONFIG_ARCH_KEEP_MEMBLOCK
9195 #define __init_memblock __meminit
9296 #define __initdata_memblock __meminitdata
9397 void memblock_discard(void);
9498 #else
9599 #define __init_memblock
96100 #define __initdata_memblock
101
+static inline void memblock_discard(void) {}
97102 #endif
98103
99
-#define memblock_dbg(fmt, ...) \
100
- if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
101
-
102
-phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
103
- phys_addr_t start, phys_addr_t end,
104
- int nid, enum memblock_flags flags);
105104 phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
106105 phys_addr_t size, phys_addr_t align);
107106 void memblock_allow_resize(void);
....@@ -110,6 +109,9 @@
110109 int memblock_remove(phys_addr_t base, phys_addr_t size);
111110 int memblock_free(phys_addr_t base, phys_addr_t size);
112111 int memblock_reserve(phys_addr_t base, phys_addr_t size);
112
+#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
113
+int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
114
+#endif
113115 void memblock_trim_memory(phys_addr_t align);
114116 bool memblock_overlaps_region(struct memblock_type *type,
115117 phys_addr_t base, phys_addr_t size);
....@@ -118,13 +120,12 @@
118120 int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
119121 int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
120122 int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
121
-enum memblock_flags choose_memblock_flags(void);
123
+
124
+unsigned long memblock_free_all(void);
125
+void reset_node_managed_pages(pg_data_t *pgdat);
126
+void reset_all_zones_managed_pages(void);
122127
123128 /* Low level functions */
124
-int memblock_add_range(struct memblock_type *type,
125
- phys_addr_t base, phys_addr_t size,
126
- int nid, enum memblock_flags flags);
127
-
128129 void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
129130 struct memblock_type *type_a,
130131 struct memblock_type *type_b, phys_addr_t *out_start,
....@@ -135,15 +136,34 @@
135136 struct memblock_type *type_b, phys_addr_t *out_start,
136137 phys_addr_t *out_end, int *out_nid);
137138
138
-void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start,
139
- phys_addr_t *out_end);
140
-
141
-void __memblock_free_early(phys_addr_t base, phys_addr_t size);
142139 void __memblock_free_late(phys_addr_t base, phys_addr_t size);
143
-void create_pgtable_mapping(phys_addr_t start, phys_addr_t end);
140
+
141
+#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
142
+static inline void __next_physmem_range(u64 *idx, struct memblock_type *type,
143
+ phys_addr_t *out_start,
144
+ phys_addr_t *out_end)
145
+{
146
+ extern struct memblock_type physmem;
147
+
148
+ __next_mem_range(idx, NUMA_NO_NODE, MEMBLOCK_NONE, &physmem, type,
149
+ out_start, out_end, NULL);
150
+}
144151
145152 /**
146
- * for_each_mem_range - iterate through memblock areas from type_a and not
153
+ * for_each_physmem_range - iterate through physmem areas not included in type.
154
+ * @i: u64 used as loop variable
155
+ * @type: ptr to memblock_type which excludes from the iteration, can be %NULL
156
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
157
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
158
+ */
159
+#define for_each_physmem_range(i, type, p_start, p_end) \
160
+ for (i = 0, __next_physmem_range(&i, type, p_start, p_end); \
161
+ i != (u64)ULLONG_MAX; \
162
+ __next_physmem_range(&i, type, p_start, p_end))
163
+#endif /* CONFIG_HAVE_MEMBLOCK_PHYS_MAP */
164
+
165
+/**
166
+ * __for_each_mem_range - iterate through memblock areas from type_a and not
147167 * included in type_b. Or just type_a if type_b is NULL.
148168 * @i: u64 used as loop variable
149169 * @type_a: ptr to memblock_type to iterate
....@@ -154,7 +174,7 @@
154174 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
155175 * @p_nid: ptr to int for nid of the range, can be %NULL
156176 */
157
-#define for_each_mem_range(i, type_a, type_b, nid, flags, \
177
+#define __for_each_mem_range(i, type_a, type_b, nid, flags, \
158178 p_start, p_end, p_nid) \
159179 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
160180 p_start, p_end, p_nid); \
....@@ -163,7 +183,7 @@
163183 p_start, p_end, p_nid))
164184
165185 /**
166
- * for_each_mem_range_rev - reverse iterate through memblock areas from
186
+ * __for_each_mem_range_rev - reverse iterate through memblock areas from
167187 * type_a and not included in type_b. Or just type_a if type_b is NULL.
168188 * @i: u64 used as loop variable
169189 * @type_a: ptr to memblock_type to iterate
....@@ -174,17 +194,38 @@
174194 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
175195 * @p_nid: ptr to int for nid of the range, can be %NULL
176196 */
177
-#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
178
- p_start, p_end, p_nid) \
197
+#define __for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
198
+ p_start, p_end, p_nid) \
179199 for (i = (u64)ULLONG_MAX, \
180
- __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
200
+ __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
181201 p_start, p_end, p_nid); \
182202 i != (u64)ULLONG_MAX; \
183203 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
184204 p_start, p_end, p_nid))
185205
186206 /**
187
- * for_each_reserved_mem_region - iterate over all reserved memblock areas
207
+ * for_each_mem_range - iterate through memory areas.
208
+ * @i: u64 used as loop variable
209
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
210
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
211
+ */
212
+#define for_each_mem_range(i, p_start, p_end) \
213
+ __for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, \
214
+ MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
215
+
216
+/**
217
+ * for_each_mem_range_rev - reverse iterate through memblock areas from
218
+ * type_a and not included in type_b. Or just type_a if type_b is NULL.
219
+ * @i: u64 used as loop variable
220
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
221
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
222
+ */
223
+#define for_each_mem_range_rev(i, p_start, p_end) \
224
+ __for_each_mem_range_rev(i, &memblock.memory, NULL, NUMA_NO_NODE, \
225
+ MEMBLOCK_HOTPLUG, p_start, p_end, NULL)
226
+
227
+/**
228
+ * for_each_reserved_mem_range - iterate over all reserved memblock areas
188229 * @i: u64 used as loop variable
189230 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
190231 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
....@@ -192,10 +233,9 @@
192233 * Walks over reserved areas of memblock. Available as soon as memblock
193234 * is initialized.
194235 */
195
-#define for_each_reserved_mem_region(i, p_start, p_end) \
196
- for (i = 0UL, __next_reserved_mem_region(&i, p_start, p_end); \
197
- i != (u64)ULLONG_MAX; \
198
- __next_reserved_mem_region(&i, p_start, p_end))
236
+#define for_each_reserved_mem_range(i, p_start, p_end) \
237
+ __for_each_mem_range(i, &memblock.reserved, NULL, NUMA_NO_NODE, \
238
+ MEMBLOCK_NONE, p_start, p_end, NULL)
199239
200240 static inline bool memblock_is_hotpluggable(struct memblock_region *m)
201241 {
....@@ -212,7 +252,6 @@
212252 return m->flags & MEMBLOCK_NOMAP;
213253 }
214254
215
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
216255 int memblock_search_pfn_nid(unsigned long pfn, unsigned long *start_pfn,
217256 unsigned long *end_pfn);
218257 void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
....@@ -231,7 +270,50 @@
231270 #define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \
232271 for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \
233272 i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid))
234
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
273
+
274
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
275
+void __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
276
+ unsigned long *out_spfn,
277
+ unsigned long *out_epfn);
278
+/**
279
+ * for_each_free_mem_range_in_zone - iterate through zone specific free
280
+ * memblock areas
281
+ * @i: u64 used as loop variable
282
+ * @zone: zone in which all of the memory blocks reside
283
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
284
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
285
+ *
286
+ * Walks over free (memory && !reserved) areas of memblock in a specific
287
+ * zone. Available once memblock and an empty zone is initialized. The main
288
+ * assumption is that the zone start, end, and pgdat have been associated.
289
+ * This way we can use the zone to determine NUMA node, and if a given part
290
+ * of the memblock is valid for the zone.
291
+ */
292
+#define for_each_free_mem_pfn_range_in_zone(i, zone, p_start, p_end) \
293
+ for (i = 0, \
294
+ __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end); \
295
+ i != U64_MAX; \
296
+ __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
297
+
298
+/**
299
+ * for_each_free_mem_range_in_zone_from - iterate through zone specific
300
+ * free memblock areas from a given point
301
+ * @i: u64 used as loop variable
302
+ * @zone: zone in which all of the memory blocks reside
303
+ * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
304
+ * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
305
+ *
306
+ * Walks over free (memory && !reserved) areas of memblock in a specific
307
+ * zone, continuing from current position. Available as soon as memblock is
308
+ * initialized.
309
+ */
310
+#define for_each_free_mem_pfn_range_in_zone_from(i, zone, p_start, p_end) \
311
+ for (; i != U64_MAX; \
312
+ __next_mem_pfn_range_in_zone(&i, zone, p_start, p_end))
313
+
314
+int __init deferred_page_init_max_threads(const struct cpumask *node_cpumask);
315
+
316
+#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
235317
236318 /**
237319 * for_each_free_mem_range - iterate through free memblock areas
....@@ -246,8 +328,8 @@
246328 * soon as memblock is initialized.
247329 */
248330 #define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
249
- for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
250
- nid, flags, p_start, p_end, p_nid)
331
+ __for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
332
+ nid, flags, p_start, p_end, p_nid)
251333
252334 /**
253335 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
....@@ -263,25 +345,13 @@
263345 */
264346 #define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
265347 p_nid) \
266
- for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
267
- nid, flags, p_start, p_end, p_nid)
348
+ __for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
349
+ nid, flags, p_start, p_end, p_nid)
268350
269
-static inline void memblock_set_region_flags(struct memblock_region *r,
270
- enum memblock_flags flags)
271
-{
272
- r->flags |= flags;
273
-}
274
-
275
-static inline void memblock_clear_region_flags(struct memblock_region *r,
276
- enum memblock_flags flags)
277
-{
278
- r->flags &= ~flags;
279
-}
280
-
281
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
282351 int memblock_set_node(phys_addr_t base, phys_addr_t size,
283352 struct memblock_type *type, int nid);
284353
354
+#ifdef CONFIG_NEED_MULTIPLE_NODES
285355 static inline void memblock_set_region_node(struct memblock_region *r, int nid)
286356 {
287357 r->nid = nid;
....@@ -300,17 +370,101 @@
300370 {
301371 return 0;
302372 }
303
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
373
+#endif /* CONFIG_NEED_MULTIPLE_NODES */
304374
305
-phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid);
306
-phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
375
+/* Flags for memblock allocation APIs */
376
+#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
377
+#define MEMBLOCK_ALLOC_ACCESSIBLE 0
378
+#define MEMBLOCK_ALLOC_KASAN 1
307379
308
-phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align);
380
+/* We are using top down, so it is safe to use 0 here */
381
+#define MEMBLOCK_LOW_LIMIT 0
382
+
383
+#ifndef ARCH_LOW_ADDRESS_LIMIT
384
+#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
385
+#endif
386
+
387
+phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align,
388
+ phys_addr_t start, phys_addr_t end);
389
+phys_addr_t memblock_alloc_range_nid(phys_addr_t size,
390
+ phys_addr_t align, phys_addr_t start,
391
+ phys_addr_t end, int nid, bool exact_nid);
392
+phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid);
393
+
394
+static __always_inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
395
+ phys_addr_t align)
396
+{
397
+ return memblock_phys_alloc_range(size, align, 0,
398
+ MEMBLOCK_ALLOC_ACCESSIBLE);
399
+}
400
+
401
+void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
402
+ phys_addr_t min_addr, phys_addr_t max_addr,
403
+ int nid);
404
+void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
405
+ phys_addr_t min_addr, phys_addr_t max_addr,
406
+ int nid);
407
+void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
408
+ phys_addr_t min_addr, phys_addr_t max_addr,
409
+ int nid);
410
+
411
+static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align)
412
+{
413
+ return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
414
+ MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
415
+}
416
+
417
+static inline void *memblock_alloc_raw(phys_addr_t size,
418
+ phys_addr_t align)
419
+{
420
+ return memblock_alloc_try_nid_raw(size, align, MEMBLOCK_LOW_LIMIT,
421
+ MEMBLOCK_ALLOC_ACCESSIBLE,
422
+ NUMA_NO_NODE);
423
+}
424
+
425
+static inline void *memblock_alloc_from(phys_addr_t size,
426
+ phys_addr_t align,
427
+ phys_addr_t min_addr)
428
+{
429
+ return memblock_alloc_try_nid(size, align, min_addr,
430
+ MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
431
+}
432
+
433
+static inline void *memblock_alloc_low(phys_addr_t size,
434
+ phys_addr_t align)
435
+{
436
+ return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
437
+ ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
438
+}
439
+
440
+static inline void *memblock_alloc_node(phys_addr_t size,
441
+ phys_addr_t align, int nid)
442
+{
443
+ return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
444
+ MEMBLOCK_ALLOC_ACCESSIBLE, nid);
445
+}
446
+
447
+static inline void memblock_free_early(phys_addr_t base,
448
+ phys_addr_t size)
449
+{
450
+ memblock_free(base, size);
451
+}
452
+
453
+static inline void memblock_free_early_nid(phys_addr_t base,
454
+ phys_addr_t size, int nid)
455
+{
456
+ memblock_free(base, size);
457
+}
458
+
459
+static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
460
+{
461
+ __memblock_free_late(base, size);
462
+}
309463
310464 /*
311465 * Set the allocation direction to bottom-up or top-down.
312466 */
313
-static inline void __init memblock_set_bottom_up(bool enable)
467
+static inline __init_memblock void memblock_set_bottom_up(bool enable)
314468 {
315469 memblock.bottom_up = enable;
316470 }
....@@ -320,29 +474,13 @@
320474 * if this is true, that said, memblock will allocate memory
321475 * in bottom-up direction.
322476 */
323
-static inline bool memblock_bottom_up(void)
477
+static inline __init_memblock bool memblock_bottom_up(void)
324478 {
325479 return memblock.bottom_up;
326480 }
327481
328
-/* Flags for memblock_alloc_base() amd __memblock_alloc_base() */
329
-#define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0)
330
-#define MEMBLOCK_ALLOC_ACCESSIBLE 0
331
-#define MEMBLOCK_ALLOC_KASAN 1
332
-
333
-phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
334
- phys_addr_t start, phys_addr_t end,
335
- enum memblock_flags flags);
336
-phys_addr_t memblock_alloc_base_nid(phys_addr_t size,
337
- phys_addr_t align, phys_addr_t max_addr,
338
- int nid, enum memblock_flags flags);
339
-phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
340
- phys_addr_t max_addr);
341
-phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
342
- phys_addr_t max_addr);
343482 phys_addr_t memblock_phys_mem_size(void);
344483 phys_addr_t memblock_reserved_size(void);
345
-phys_addr_t memblock_mem_size(unsigned long limit_pfn);
346484 phys_addr_t memblock_start_of_DRAM(void);
347485 phys_addr_t memblock_end_of_DRAM(void);
348486 void memblock_enforce_memory_limit(phys_addr_t memory_limit);
....@@ -353,15 +491,9 @@
353491 bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
354492 bool memblock_is_reserved(phys_addr_t addr);
355493 bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
356
-bool memblock_overlaps_memory(phys_addr_t base, phys_addr_t size);
494
+bool memblock_is_nomap_remove(void);
357495
358
-extern void __memblock_dump_all(void);
359
-
360
-static inline void memblock_dump_all(void)
361
-{
362
- if (memblock_debug)
363
- __memblock_dump_all();
364
-}
496
+void memblock_dump_all(void);
365497
366498 /**
367499 * memblock_set_current_limit - Set the current allocation limit to allow
....@@ -426,15 +558,48 @@
426558 return PFN_UP(reg->base + reg->size);
427559 }
428560
429
-#define for_each_memblock(memblock_type, region) \
430
- for (region = memblock.memblock_type.regions; \
431
- region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \
561
+/**
562
+ * for_each_mem_region - itereate over memory regions
563
+ * @region: loop variable
564
+ */
565
+#define for_each_mem_region(region) \
566
+ for (region = memblock.memory.regions; \
567
+ region < (memblock.memory.regions + memblock.memory.cnt); \
432568 region++)
433569
434
-#define for_each_memblock_type(i, memblock_type, rgn) \
435
- for (i = 0, rgn = &memblock_type->regions[0]; \
436
- i < memblock_type->cnt; \
437
- i++, rgn = &memblock_type->regions[i])
570
+/**
571
+ * for_each_reserved_mem_region - itereate over reserved memory regions
572
+ * @region: loop variable
573
+ */
574
+#define for_each_reserved_mem_region(region) \
575
+ for (region = memblock.reserved.regions; \
576
+ region < (memblock.reserved.regions + memblock.reserved.cnt); \
577
+ region++)
578
+
579
+extern void *alloc_large_system_hash(const char *tablename,
580
+ unsigned long bucketsize,
581
+ unsigned long numentries,
582
+ int scale,
583
+ int flags,
584
+ unsigned int *_hash_shift,
585
+ unsigned int *_hash_mask,
586
+ unsigned long low_limit,
587
+ unsigned long high_limit);
588
+
589
+#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
590
+#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
591
+ * shift passed via *_hash_shift */
592
+#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
593
+
594
+/* Only NUMA needs hash distribution. 64bit NUMA architectures have
595
+ * sufficient vmalloc space.
596
+ */
597
+#ifdef CONFIG_NUMA
598
+#define HASHDIST_DEFAULT IS_ENABLED(CONFIG_64BIT)
599
+extern int hashdist; /* Distribute hashes across NUMA nodes? */
600
+#else
601
+#define hashdist (0)
602
+#endif
438603
439604 #ifdef CONFIG_MEMTEST
440605 extern void early_memtest(phys_addr_t start, phys_addr_t end);
....@@ -443,12 +608,6 @@
443608 {
444609 }
445610 #endif
446
-#else
447
-static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
448
-{
449
- return 0;
450
-}
451
-#endif /* CONFIG_HAVE_MEMBLOCK */
452611
453612 #endif /* __KERNEL__ */
454613