forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-09-20 a36159eec6ca17402b0e146b86efaf76568dc353
kernel/arch/microblaze/mm/init.c
....@@ -7,10 +7,10 @@
77 * for more details.
88 */
99
10
-#include <linux/bootmem.h>
10
+#include <linux/dma-map-ops.h>
11
+#include <linux/memblock.h>
1112 #include <linux/init.h>
1213 #include <linux/kernel.h>
13
-#include <linux/memblock.h>
1414 #include <linux/mm.h> /* mem_init */
1515 #include <linux/initrd.h>
1616 #include <linux/pagemap.h>
....@@ -46,17 +46,12 @@
4646 EXPORT_SYMBOL(memory_size);
4747 unsigned long lowmem_size;
4848
49
+EXPORT_SYMBOL(min_low_pfn);
50
+EXPORT_SYMBOL(max_low_pfn);
51
+
4952 #ifdef CONFIG_HIGHMEM
5053 pte_t *kmap_pte;
5154 EXPORT_SYMBOL(kmap_pte);
52
-pgprot_t kmap_prot;
53
-EXPORT_SYMBOL(kmap_prot);
54
-
55
-static inline pte_t *virt_to_kpte(unsigned long vaddr)
56
-{
57
- return pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr),
58
- vaddr), vaddr);
59
-}
6055
6156 static void __init highmem_init(void)
6257 {
....@@ -65,7 +60,6 @@
6560 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
6661
6762 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
68
- kmap_prot = PAGE_KERNEL;
6963 }
7064
7165 static void highmem_setup(void)
....@@ -109,20 +103,20 @@
109103 #endif
110104
111105 /* We don't have holes in memory map */
112
- free_area_init_nodes(zones_size);
106
+ free_area_init(zones_size);
113107 }
114108
115109 void __init setup_memory(void)
116110 {
117
- struct memblock_region *reg;
118
-
119111 #ifndef CONFIG_MMU
120112 u32 kernel_align_start, kernel_align_size;
113
+ phys_addr_t start, end;
114
+ u64 i;
121115
122116 /* Find main memory where is the kernel */
123
- for_each_memblock(memory, reg) {
124
- memory_start = (u32)reg->base;
125
- lowmem_size = reg->size;
117
+ for_each_mem_range(i, &start, &end) {
118
+ memory_start = start;
119
+ lowmem_size = end - start;
126120 if ((memory_start <= (u32)_text) &&
127121 ((u32)_text <= (memory_start + lowmem_size - 1))) {
128122 memory_size = lowmem_size;
....@@ -170,33 +164,7 @@
170164 pr_info("%s: max_low_pfn: %#lx\n", __func__, max_low_pfn);
171165 pr_info("%s: max_pfn: %#lx\n", __func__, max_pfn);
172166
173
- /* Add active regions with valid PFNs */
174
- for_each_memblock(memory, reg) {
175
- unsigned long start_pfn, end_pfn;
176
-
177
- start_pfn = memblock_region_memory_base_pfn(reg);
178
- end_pfn = memblock_region_memory_end_pfn(reg);
179
- memblock_set_node(start_pfn << PAGE_SHIFT,
180
- (end_pfn - start_pfn) << PAGE_SHIFT,
181
- &memblock.memory, 0);
182
- }
183
-
184
- /* XXX need to clip this if using highmem? */
185
- sparse_memory_present_with_active_regions(0);
186
-
187167 paging_init();
188
-}
189
-
190
-#ifdef CONFIG_BLK_DEV_INITRD
191
-void free_initrd_mem(unsigned long start, unsigned long end)
192
-{
193
- free_reserved_area((void *)start, (void *)end, -1, "initrd");
194
-}
195
-#endif
196
-
197
-void free_initmem(void)
198
-{
199
- free_initmem_default(-1);
200168 }
201169
202170 void __init mem_init(void)
....@@ -204,24 +172,12 @@
204172 high_memory = (void *)__va(memory_start + lowmem_size - 1);
205173
206174 /* this will put all memory onto the freelists */
207
- free_all_bootmem();
175
+ memblock_free_all();
208176 #ifdef CONFIG_HIGHMEM
209177 highmem_setup();
210178 #endif
211179
212180 mem_init_print_info(NULL);
213
-#ifdef CONFIG_MMU
214
- pr_info("Kernel virtual memory layout:\n");
215
- pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
216
-#ifdef CONFIG_HIGHMEM
217
- pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
218
- PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
219
-#endif /* CONFIG_HIGHMEM */
220
- pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
221
- ioremap_bot, ioremap_base);
222
- pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
223
- (unsigned long)VMALLOC_START, VMALLOC_END);
224
-#endif
225181 mem_init_done = 1;
226182 }
227183
....@@ -355,6 +311,11 @@
355311 /* This will also cause that unflatten device tree will be allocated
356312 * inside 768MB limit */
357313 memblock_set_current_limit(memory_start + lowmem_size - 1);
314
+
315
+ parse_early_param();
316
+
317
+ /* CMA initialization */
318
+ dma_contiguous_reserve(memory_start + lowmem_size - 1);
358319 }
359320
360321 /* This is only called until mem_init is done. */
....@@ -364,8 +325,9 @@
364325 * Mem start + kernel_tlb -> here is limit
365326 * because of mem mapping from head.S
366327 */
367
- return __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE,
368
- memory_start + kernel_tlb));
328
+ return memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
329
+ MEMBLOCK_LOW_LIMIT, memory_start + kernel_tlb,
330
+ NUMA_NO_NODE);
369331 }
370332
371333 #endif /* CONFIG_MMU */
....@@ -374,12 +336,14 @@
374336 {
375337 void *p;
376338
377
- if (mem_init_done)
339
+ if (mem_init_done) {
378340 p = kzalloc(size, mask);
379
- else {
380
- p = alloc_bootmem(size);
381
- if (p)
382
- memset(p, 0, size);
341
+ } else {
342
+ p = memblock_alloc(size, SMP_CACHE_BYTES);
343
+ if (!p)
344
+ panic("%s: Failed to allocate %zu bytes\n",
345
+ __func__, size);
383346 }
347
+
384348 return p;
385349 }