forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/arch/microblaze/mm/init.c
....@@ -7,10 +7,10 @@
77 * for more details.
88 */
99
10
-#include <linux/bootmem.h>
10
+#include <linux/dma-map-ops.h>
11
+#include <linux/memblock.h>
1112 #include <linux/init.h>
1213 #include <linux/kernel.h>
13
-#include <linux/memblock.h>
1414 #include <linux/mm.h> /* mem_init */
1515 #include <linux/initrd.h>
1616 #include <linux/pagemap.h>
....@@ -46,26 +46,14 @@
4646 EXPORT_SYMBOL(memory_size);
4747 unsigned long lowmem_size;
4848
49
-#ifdef CONFIG_HIGHMEM
50
-pte_t *kmap_pte;
51
-EXPORT_SYMBOL(kmap_pte);
52
-pgprot_t kmap_prot;
53
-EXPORT_SYMBOL(kmap_prot);
54
-
55
-static inline pte_t *virt_to_kpte(unsigned long vaddr)
56
-{
57
- return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr),
58
- vaddr), vaddr);
59
-}
49
+EXPORT_SYMBOL(min_low_pfn);
50
+EXPORT_SYMBOL(max_low_pfn);
6051
6152 static void __init highmem_init(void)
6253 {
6354 pr_debug("%x\n", (u32)PKMAP_BASE);
6455 map_page(PKMAP_BASE, 0, 0); /* XXX gross */
6556 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
66
-
67
- kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
68
- kmap_prot = PAGE_KERNEL;
6957 }
7058
7159 static void highmem_setup(void)
....@@ -109,20 +97,20 @@
10997 #endif
11098
11199 /* We don't have holes in memory map */
112
- free_area_init_nodes(zones_size);
100
+ free_area_init(zones_size);
113101 }
114102
115103 void __init setup_memory(void)
116104 {
117
- struct memblock_region *reg;
118
-
119105 #ifndef CONFIG_MMU
120106 u32 kernel_align_start, kernel_align_size;
107
+ phys_addr_t start, end;
108
+ u64 i;
121109
122110 /* Find main memory where is the kernel */
123
- for_each_memblock(memory, reg) {
124
- memory_start = (u32)reg->base;
125
- lowmem_size = reg->size;
111
+ for_each_mem_range(i, &start, &end) {
112
+ memory_start = start;
113
+ lowmem_size = end - start;
126114 if ((memory_start <= (u32)_text) &&
127115 ((u32)_text <= (memory_start + lowmem_size - 1))) {
128116 memory_size = lowmem_size;
....@@ -170,33 +158,7 @@
170158 pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
171159 pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
172160
173
- /* Add active regions with valid PFNs */
174
- for_each_memblock(memory, reg) {
175
- unsigned long start_pfn, end_pfn;
176
-
177
- start_pfn = memblock_region_memory_base_pfn(reg);
178
- end_pfn = memblock_region_memory_end_pfn(reg);
179
- memblock_set_node(start_pfn << PAGE_SHIFT,
180
- (end_pfn - start_pfn) << PAGE_SHIFT,
181
- &memblock.memory, 0);
182
- }
183
-
184
- /* XXX need to clip this if using highmem? */
185
- sparse_memory_present_with_active_regions(0);
186
-
187161 paging_init();
188
-}
189
-
190
-#ifdef CONFIG_BLK_DEV_INITRD
191
-void free_initrd_mem(unsigned long start, unsigned long end)
192
-{
193
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
194
-}
195
-#endif
196
-
197
-void free_initmem(void)
198
-{
199
- free_initmem_default(-1);
200162 }
201163
202164 void __init mem_init(void)
....@@ -204,24 +166,12 @@
204166 high_memory = (void *)__va(memory_start + lowmem_size - 1);
205167
206168 /* this will put all memory onto the freelists */
207
- free_all_bootmem();
169
+ memblock_free_all();
208170 #ifdef CONFIG_HIGHMEM
209171 highmem_setup();
210172 #endif
211173
212174 mem_init_print_info(NULL);
213
-#ifdef CONFIG_MMU
214
- pr_info("Kernel virtual memory layout:\n");
215
- pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
216
-#ifdef CONFIG_HIGHMEM
217
- pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
218
- PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
219
-#endif /* CONFIG_HIGHMEM */
220
- pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
221
- ioremap_bot, ioremap_base);
222
- pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
223
- (unsigned long)VMALLOC_START, VMALLOC_END);
224
-#endif
225175 mem_init_done = 1;
226176 }
227177
....@@ -355,6 +305,11 @@
355305 /* This will also cause that unflatten device tree will be allocated
356306 * inside 768MB limit */
357307 memblock_set_current_limit(memory_start + lowmem_size - 1);
308
+
309
+ parse_early_param();
310
+
311
+ /* CMA initialization */
312
+ dma_contiguous_reserve(memory_start + lowmem_size - 1);
358313 }
359314
360315 /* This is only called until mem_init is done. */
....@@ -364,8 +319,9 @@
364319 * Mem start + kernel_tlb -> here is limit
365320 * because of mem mapping from head.S
366321 */
367
- return __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
368
- memory_start + kernel_tlb));
322
+ return memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
323
+ MEMBLOCK_LOW_LIMIT, memory_start + kernel_tlb,
324
+ NUMA_NO_NODE);
369325 }
370326
371327 #endif /* CONFIG_MMU */
....@@ -374,12 +330,14 @@
374330 {
375331 void *p;
376332
377
- if (mem_init_done)
333
+ if (mem_init_done) {
378334 p = kzalloc(size, mask);
379
- else {
380
- p = alloc_bootmem(size);
381
- if (p)
382
- memset(p, 0, size);
335
+ } else {
336
+ p = memblock_alloc(size, SMP_CACHE_BYTES);
337
+ if (!p)
338
+ panic("%s: Failed to allocate %zu bytes\n",
339
+ __func__, size);
383340 }
341
+
384342 return p;
385343 }