hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/mm/sparse-vmemmap.c
....@@ -20,7 +20,7 @@
2020 */
2121 #include <linux/mm.h>
2222 #include <linux/mmzone.h>
23
-#include <linux/bootmem.h>
23
+#include <linux/memblock.h>
2424 #include <linux/memremap.h>
2525 #include <linux/highmem.h>
2626 #include <linux/slab.h>
....@@ -29,7 +29,6 @@
2929 #include <linux/sched.h>
3030 #include <asm/dma.h>
3131 #include <asm/pgalloc.h>
32
-#include <asm/pgtable.h>
3332
3433 /*
3534 * Allocate a block of memory to be used to back the virtual memory map
....@@ -42,8 +41,8 @@
4241 unsigned long align,
4342 unsigned long goal)
4443 {
45
- return memblock_virt_alloc_try_nid_raw(size, align, goal,
46
- BOOTMEM_ALLOC_ACCESSIBLE, node);
44
+ return memblock_alloc_try_nid_raw(size, align, goal,
45
+ MEMBLOCK_ALLOC_ACCESSIBLE, node);
4746 }
4847
4948 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
....@@ -70,11 +69,19 @@
7069 __pa(MAX_DMA_ADDRESS));
7170 }
7271
73
-/* need to make sure size is all the same during early stage */
74
-void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
75
-{
76
- void *ptr = sparse_buffer_alloc(size);
72
+static void * __meminit altmap_alloc_block_buf(unsigned long size,
73
+ struct vmem_altmap *altmap);
7774
75
+/* need to make sure size is all the same during early stage */
76
+void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node,
77
+ struct vmem_altmap *altmap)
78
+{
79
+ void *ptr;
80
+
81
+ if (altmap)
82
+ return altmap_alloc_block_buf(size, altmap);
83
+
84
+ ptr = sparse_buffer_alloc(size);
7885 if (!ptr)
7986 ptr = vmemmap_alloc_block(size, node);
8087 return ptr;
....@@ -95,15 +102,8 @@
95102 return 0;
96103 }
97104
98
-/**
99
- * altmap_alloc_block_buf - allocate pages from the device page map
100
- * @altmap: device page map
101
- * @size: size (in bytes) of the allocation
102
- *
103
- * Allocations are aligned to the size of the request.
104
- */
105
-void * __meminit altmap_alloc_block_buf(unsigned long size,
106
- struct vmem_altmap *altmap)
105
+static void * __meminit altmap_alloc_block_buf(unsigned long size,
106
+ struct vmem_altmap *altmap)
107107 {
108108 unsigned long pfn, nr_pfns, nr_align;
109109
....@@ -140,12 +140,15 @@
140140 start, end - 1);
141141 }
142142
143
-pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
143
+pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
144
+ struct vmem_altmap *altmap)
144145 {
145146 pte_t *pte = pte_offset_kernel(pmd, addr);
146147 if (pte_none(*pte)) {
147148 pte_t entry;
148
- void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
149
+ void *p;
150
+
151
+ p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
149152 if (!p)
150153 return NULL;
151154 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
....@@ -213,8 +216,8 @@
213216 return pgd;
214217 }
215218
216
-int __meminit vmemmap_populate_basepages(unsigned long start,
217
- unsigned long end, int node)
219
+int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
220
+ int node, struct vmem_altmap *altmap)
218221 {
219222 unsigned long addr = start;
220223 pgd_t *pgd;
....@@ -236,7 +239,7 @@
236239 pmd = vmemmap_pmd_populate(pud, addr, node);
237240 if (!pmd)
238241 return -ENOMEM;
239
- pte = vmemmap_pte_populate(pmd, addr, node);
242
+ pte = vmemmap_pte_populate(pmd, addr, node, altmap);
240243 if (!pte)
241244 return -ENOMEM;
242245 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
....@@ -245,19 +248,18 @@
245248 return 0;
246249 }
247250
248
-struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid,
249
- struct vmem_altmap *altmap)
251
+struct page * __meminit __populate_section_memmap(unsigned long pfn,
252
+ unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
250253 {
251
- unsigned long start;
252
- unsigned long end;
253
- struct page *map;
254
+ unsigned long start = (unsigned long) pfn_to_page(pfn);
255
+ unsigned long end = start + nr_pages * sizeof(struct page);
254256
255
- map = pfn_to_page(pnum * PAGES_PER_SECTION);
256
- start = (unsigned long)map;
257
- end = (unsigned long)(map + PAGES_PER_SECTION);
257
+ if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) ||
258
+ !IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION)))
259
+ return NULL;
258260
259261 if (vmemmap_populate(start, end, nid, altmap))
260262 return NULL;
261263
262
- return map;
264
+ return pfn_to_page(pfn);
263265 }