| .. | .. |
|---|
| 20 | 20 | */ |
|---|
| 21 | 21 | #include <linux/mm.h> |
|---|
| 22 | 22 | #include <linux/mmzone.h> |
|---|
| 23 | | -#include <linux/bootmem.h> |
|---|
| 23 | +#include <linux/memblock.h> |
|---|
| 24 | 24 | #include <linux/memremap.h> |
|---|
| 25 | 25 | #include <linux/highmem.h> |
|---|
| 26 | 26 | #include <linux/slab.h> |
|---|
| .. | .. |
|---|
| 29 | 29 | #include <linux/sched.h> |
|---|
| 30 | 30 | #include <asm/dma.h> |
|---|
| 31 | 31 | #include <asm/pgalloc.h> |
|---|
| 32 | | -#include <asm/pgtable.h> |
|---|
| 33 | 32 | |
|---|
| 34 | 33 | /* |
|---|
| 35 | 34 | * Allocate a block of memory to be used to back the virtual memory map |
|---|
| .. | .. |
|---|
| 42 | 41 | unsigned long align, |
|---|
| 43 | 42 | unsigned long goal) |
|---|
| 44 | 43 | { |
|---|
| 45 | | - return memblock_virt_alloc_try_nid_raw(size, align, goal, |
|---|
| 46 | | - BOOTMEM_ALLOC_ACCESSIBLE, node); |
|---|
| 44 | + return memblock_alloc_try_nid_raw(size, align, goal, |
|---|
| 45 | + MEMBLOCK_ALLOC_ACCESSIBLE, node); |
|---|
| 47 | 46 | } |
|---|
| 48 | 47 | |
|---|
| 49 | 48 | void * __meminit vmemmap_alloc_block(unsigned long size, int node) |
|---|
| .. | .. |
|---|
| 70 | 69 | __pa(MAX_DMA_ADDRESS)); |
|---|
| 71 | 70 | } |
|---|
| 72 | 71 | |
|---|
| 73 | | -/* need to make sure size is all the same during early stage */ |
|---|
| 74 | | -void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node) |
|---|
| 75 | | -{ |
|---|
| 76 | | - void *ptr = sparse_buffer_alloc(size); |
|---|
| 72 | +static void * __meminit altmap_alloc_block_buf(unsigned long size, |
|---|
| 73 | + struct vmem_altmap *altmap); |
|---|
| 77 | 74 | |
|---|
| 75 | +/* need to make sure size is all the same during early stage */ |
|---|
| 76 | +void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node, |
|---|
| 77 | + struct vmem_altmap *altmap) |
|---|
| 78 | +{ |
|---|
| 79 | + void *ptr; |
|---|
| 80 | + |
|---|
| 81 | + if (altmap) |
|---|
| 82 | + return altmap_alloc_block_buf(size, altmap); |
|---|
| 83 | + |
|---|
| 84 | + ptr = sparse_buffer_alloc(size); |
|---|
| 78 | 85 | if (!ptr) |
|---|
| 79 | 86 | ptr = vmemmap_alloc_block(size, node); |
|---|
| 80 | 87 | return ptr; |
|---|
| .. | .. |
|---|
| 95 | 102 | return 0; |
|---|
| 96 | 103 | } |
|---|
| 97 | 104 | |
|---|
| 98 | | -/** |
|---|
| 99 | | - * altmap_alloc_block_buf - allocate pages from the device page map |
|---|
| 100 | | - * @altmap: device page map |
|---|
| 101 | | - * @size: size (in bytes) of the allocation |
|---|
| 102 | | - * |
|---|
| 103 | | - * Allocations are aligned to the size of the request. |
|---|
| 104 | | - */ |
|---|
| 105 | | -void * __meminit altmap_alloc_block_buf(unsigned long size, |
|---|
| 106 | | - struct vmem_altmap *altmap) |
|---|
| 105 | +static void * __meminit altmap_alloc_block_buf(unsigned long size, |
|---|
| 106 | + struct vmem_altmap *altmap) |
|---|
| 107 | 107 | { |
|---|
| 108 | 108 | unsigned long pfn, nr_pfns, nr_align; |
|---|
| 109 | 109 | |
|---|
| .. | .. |
|---|
| 140 | 140 | start, end - 1); |
|---|
| 141 | 141 | } |
|---|
| 142 | 142 | |
|---|
| 143 | | -pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) |
|---|
| 143 | +pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, |
|---|
| 144 | + struct vmem_altmap *altmap) |
|---|
| 144 | 145 | { |
|---|
| 145 | 146 | pte_t *pte = pte_offset_kernel(pmd, addr); |
|---|
| 146 | 147 | if (pte_none(*pte)) { |
|---|
| 147 | 148 | pte_t entry; |
|---|
| 148 | | - void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node); |
|---|
| 149 | + void *p; |
|---|
| 150 | + |
|---|
| 151 | + p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap); |
|---|
| 149 | 152 | if (!p) |
|---|
| 150 | 153 | return NULL; |
|---|
| 151 | 154 | entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); |
|---|
| .. | .. |
|---|
| 213 | 216 | return pgd; |
|---|
| 214 | 217 | } |
|---|
| 215 | 218 | |
|---|
| 216 | | -int __meminit vmemmap_populate_basepages(unsigned long start, |
|---|
| 217 | | - unsigned long end, int node) |
|---|
| 219 | +int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, |
|---|
| 220 | + int node, struct vmem_altmap *altmap) |
|---|
| 218 | 221 | { |
|---|
| 219 | 222 | unsigned long addr = start; |
|---|
| 220 | 223 | pgd_t *pgd; |
|---|
| .. | .. |
|---|
| 236 | 239 | pmd = vmemmap_pmd_populate(pud, addr, node); |
|---|
| 237 | 240 | if (!pmd) |
|---|
| 238 | 241 | return -ENOMEM; |
|---|
| 239 | | - pte = vmemmap_pte_populate(pmd, addr, node); |
|---|
| 242 | + pte = vmemmap_pte_populate(pmd, addr, node, altmap); |
|---|
| 240 | 243 | if (!pte) |
|---|
| 241 | 244 | return -ENOMEM; |
|---|
| 242 | 245 | vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); |
|---|
| .. | .. |
|---|
| 245 | 248 | return 0; |
|---|
| 246 | 249 | } |
|---|
| 247 | 250 | |
|---|
| 248 | | -struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid, |
|---|
| 249 | | - struct vmem_altmap *altmap) |
|---|
| 251 | +struct page * __meminit __populate_section_memmap(unsigned long pfn, |
|---|
| 252 | + unsigned long nr_pages, int nid, struct vmem_altmap *altmap) |
|---|
| 250 | 253 | { |
|---|
| 251 | | - unsigned long start; |
|---|
| 252 | | - unsigned long end; |
|---|
| 253 | | - struct page *map; |
|---|
| 254 | + unsigned long start = (unsigned long) pfn_to_page(pfn); |
|---|
| 255 | + unsigned long end = start + nr_pages * sizeof(struct page); |
|---|
| 254 | 256 | |
|---|
| 255 | | - map = pfn_to_page(pnum * PAGES_PER_SECTION); |
|---|
| 256 | | - start = (unsigned long)map; |
|---|
| 257 | | - end = (unsigned long)(map + PAGES_PER_SECTION); |
|---|
| 257 | + if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) || |
|---|
| 258 | + !IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION))) |
|---|
| 259 | + return NULL; |
|---|
| 258 | 260 | |
|---|
| 259 | 261 | if (vmemmap_populate(start, end, nid, altmap)) |
|---|
| 260 | 262 | return NULL; |
|---|
| 261 | 263 | |
|---|
| 262 | | - return map; |
|---|
| 264 | + return pfn_to_page(pfn); |
|---|
| 263 | 265 | } |
|---|