forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 23fa18eaa71266feff7ba8d83022d9e1cc83c65a
kernel/arch/ia64/mm/discontig.c
....@@ -19,13 +19,11 @@
1919 #include <linux/mm.h>
2020 #include <linux/nmi.h>
2121 #include <linux/swap.h>
22
-#include <linux/bootmem.h>
2322 #include <linux/memblock.h>
2423 #include <linux/acpi.h>
2524 #include <linux/efi.h>
2625 #include <linux/nodemask.h>
2726 #include <linux/slab.h>
28
-#include <asm/pgalloc.h>
2927 #include <asm/tlb.h>
3028 #include <asm/meminit.h>
3129 #include <asm/numa.h>
....@@ -181,13 +179,13 @@
181179 void __init setup_per_cpu_areas(void)
182180 {
183181 struct pcpu_alloc_info *ai;
184
- struct pcpu_group_info *uninitialized_var(gi);
182
+ struct pcpu_group_info *gi;
185183 unsigned int *cpu_map;
186184 void *base;
187185 unsigned long base_offset;
188186 unsigned int cpu;
189187 ssize_t static_size, reserved_size, dyn_size;
190
- int node, prev_node, unit, nr_units, rc;
188
+ int node, prev_node, unit, nr_units;
191189
192190 ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
193191 if (!ai)
....@@ -228,7 +226,7 @@
228226 * CPUs are put into groups according to node. Walk cpu_map
229227 * and create new groups at node boundaries.
230228 */
231
- prev_node = -1;
229
+ prev_node = NUMA_NO_NODE;
232230 ai->nr_groups = 0;
233231 for (unit = 0; unit < nr_units; unit++) {
234232 cpu = cpu_map[unit];
....@@ -246,10 +244,7 @@
246244 gi->cpu_map = &cpu_map[unit];
247245 }
248246
249
- rc = pcpu_setup_first_chunk(ai, base);
250
- if (rc)
251
- panic("failed to setup percpu area (err=%d)", rc);
252
-
247
+ pcpu_setup_first_chunk(ai, base);
253248 pcpu_free_alloc_info(ai);
254249 }
255250 #endif
....@@ -397,8 +392,7 @@
397392 *
398393 * Each node's per-node area has a copy of the global pg_data_t list, so
399394 * we copy that to each node here, as well as setting the per-cpu pointer
400
- * to the local node data structure. The active_cpus field of the per-node
401
- * structure gets setup by the platform_cpu_init() function later.
395
+ * to the local node data structure.
402396 */
403397 static void __init initialize_pernode_data(void)
404398 {
....@@ -436,7 +430,7 @@
436430 {
437431 void *ptr = NULL;
438432 u8 best = 0xff;
439
- int bestnode = -1, node, anynode = 0;
433
+ int bestnode = NUMA_NO_NODE, node, anynode = 0;
440434
441435 for_each_online_node(node) {
442436 if (node_isset(node, memory_less_mask))
....@@ -448,11 +442,17 @@
448442 anynode = node;
449443 }
450444
451
- if (bestnode == -1)
445
+ if (bestnode == NUMA_NO_NODE)
452446 bestnode = anynode;
453447
454
- ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize,
455
- PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
448
+ ptr = memblock_alloc_try_nid(pernodesize, PERCPU_PAGE_SIZE,
449
+ __pa(MAX_DMA_ADDRESS),
450
+ MEMBLOCK_ALLOC_ACCESSIBLE,
451
+ bestnode);
452
+ if (!ptr)
453
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%lx\n",
454
+ __func__, pernodesize, PERCPU_PAGE_SIZE, bestnode,
455
+ __pa(MAX_DMA_ADDRESS));
456456
457457 return ptr;
458458 }
....@@ -600,7 +600,6 @@
600600
601601 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
602602
603
- sparse_memory_present_with_active_regions(MAX_NUMNODES);
604603 sparse_init();
605604
606605 #ifdef CONFIG_VIRTUAL_MEM_MAP
....@@ -626,7 +625,7 @@
626625 max_zone_pfns[ZONE_DMA32] = max_dma;
627626 #endif
628627 max_zone_pfns[ZONE_NORMAL] = max_pfn;
629
- free_area_init_nodes(max_zone_pfns);
628
+ free_area_init(max_zone_pfns);
630629
631630 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
632631 }
....@@ -655,7 +654,7 @@
655654 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
656655 struct vmem_altmap *altmap)
657656 {
658
- return vmemmap_populate_basepages(start, end, node);
657
+ return vmemmap_populate_basepages(start, end, node, NULL);
659658 }
660659
661660 void vmemmap_free(unsigned long start, unsigned long end,