.. | .. |
---|
10 | 10 | #include <linux/types.h> |
---|
11 | 11 | #include <linux/kernel.h> |
---|
12 | 12 | #include <linux/mm.h> |
---|
13 | | -#include <linux/bootmem.h> |
---|
| 13 | +#include <linux/memblock.h> |
---|
14 | 14 | #include <linux/swap.h> |
---|
15 | 15 | #include <linux/initrd.h> |
---|
16 | 16 | #include <linux/pfn.h> |
---|
17 | 17 | #include <linux/module.h> |
---|
18 | 18 | |
---|
19 | 19 | #include <asm/hwrpb.h> |
---|
20 | | -#include <asm/pgalloc.h> |
---|
21 | 20 | #include <asm/sections.h> |
---|
22 | 21 | |
---|
23 | 22 | pg_data_t node_data[MAX_NUMNODES]; |
---|
.. | .. |
---|
59 | 58 | struct memclust_struct * cluster; |
---|
60 | 59 | struct memdesc_struct * memdesc; |
---|
61 | 60 | unsigned long start_kernel_pfn, end_kernel_pfn; |
---|
62 | | - unsigned long bootmap_size, bootmap_pages, bootmap_start; |
---|
63 | 61 | unsigned long start, end; |
---|
64 | 62 | unsigned long node_pfn_start, node_pfn_end; |
---|
65 | 63 | unsigned long node_min_pfn, node_max_pfn; |
---|
66 | 64 | int i; |
---|
67 | | - unsigned long node_datasz = PFN_UP(sizeof(pg_data_t)); |
---|
68 | 65 | int show_init = 0; |
---|
69 | 66 | |
---|
70 | 67 | /* Find the bounds of current node */ |
---|
.. | .. |
---|
134 | 131 | /* Cute trick to make sure our local node data is on local memory */ |
---|
135 | 132 | node_data[nid] = (pg_data_t *)(__va(node_min_pfn << PAGE_SHIFT)); |
---|
136 | 133 | #endif |
---|
137 | | - /* Quasi-mark the pg_data_t as in-use */ |
---|
138 | | - node_min_pfn += node_datasz; |
---|
139 | | - if (node_min_pfn >= node_max_pfn) { |
---|
140 | | - printk(" not enough mem to reserve NODE_DATA"); |
---|
141 | | - return; |
---|
142 | | - } |
---|
143 | | - NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; |
---|
144 | | - |
---|
145 | 134 | printk(" Detected node memory: start %8lu, end %8lu\n", |
---|
146 | 135 | node_min_pfn, node_max_pfn); |
---|
147 | 136 | |
---|
148 | 137 | DBGDCONT(" DISCONTIG: node_data[%d] is at 0x%p\n", nid, NODE_DATA(nid)); |
---|
149 | | - DBGDCONT(" DISCONTIG: NODE_DATA(%d)->bdata is at 0x%p\n", nid, NODE_DATA(nid)->bdata); |
---|
150 | 138 | |
---|
151 | 139 | /* Find the bounds of kernel memory. */ |
---|
152 | 140 | start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS); |
---|
153 | 141 | end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end)); |
---|
154 | | - bootmap_start = -1; |
---|
155 | 142 | |
---|
156 | 143 | if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn)) |
---|
157 | 144 | panic("kernel loaded out of ram"); |
---|
| 145 | + |
---|
| 146 | + memblock_add_node(PFN_PHYS(node_min_pfn), |
---|
| 147 | + (node_max_pfn - node_min_pfn) << PAGE_SHIFT, nid); |
---|
158 | 148 | |
---|
159 | 149 | /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned. |
---|
160 | 150 | Note that we round this down, not up - node memory |
---|
161 | 151 | has much larger alignment than 8Mb, so it's safe. */ |
---|
162 | 152 | node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1); |
---|
163 | 153 | |
---|
164 | | - /* We need to know how many physically contiguous pages |
---|
165 | | - we'll need for the bootmap. */ |
---|
166 | | - bootmap_pages = bootmem_bootmap_pages(node_max_pfn-node_min_pfn); |
---|
167 | | - |
---|
168 | | - /* Now find a good region where to allocate the bootmap. */ |
---|
169 | | - for_each_mem_cluster(memdesc, cluster, i) { |
---|
170 | | - if (cluster->usage & 3) |
---|
171 | | - continue; |
---|
172 | | - |
---|
173 | | - start = cluster->start_pfn; |
---|
174 | | - end = start + cluster->numpages; |
---|
175 | | - |
---|
176 | | - if (start >= node_max_pfn || end <= node_min_pfn) |
---|
177 | | - continue; |
---|
178 | | - |
---|
179 | | - if (end > node_max_pfn) |
---|
180 | | - end = node_max_pfn; |
---|
181 | | - if (start < node_min_pfn) |
---|
182 | | - start = node_min_pfn; |
---|
183 | | - |
---|
184 | | - if (start < start_kernel_pfn) { |
---|
185 | | - if (end > end_kernel_pfn |
---|
186 | | - && end - end_kernel_pfn >= bootmap_pages) { |
---|
187 | | - bootmap_start = end_kernel_pfn; |
---|
188 | | - break; |
---|
189 | | - } else if (end > start_kernel_pfn) |
---|
190 | | - end = start_kernel_pfn; |
---|
191 | | - } else if (start < end_kernel_pfn) |
---|
192 | | - start = end_kernel_pfn; |
---|
193 | | - if (end - start >= bootmap_pages) { |
---|
194 | | - bootmap_start = start; |
---|
195 | | - break; |
---|
196 | | - } |
---|
197 | | - } |
---|
198 | | - |
---|
199 | | - if (bootmap_start == -1) |
---|
200 | | - panic("couldn't find a contiguous place for the bootmap"); |
---|
201 | | - |
---|
202 | | - /* Allocate the bootmap and mark the whole MM as reserved. */ |
---|
203 | | - bootmap_size = init_bootmem_node(NODE_DATA(nid), bootmap_start, |
---|
204 | | - node_min_pfn, node_max_pfn); |
---|
205 | | - DBGDCONT(" bootmap_start %lu, bootmap_size %lu, bootmap_pages %lu\n", |
---|
206 | | - bootmap_start, bootmap_size, bootmap_pages); |
---|
207 | | - |
---|
208 | | - /* Mark the free regions. */ |
---|
209 | | - for_each_mem_cluster(memdesc, cluster, i) { |
---|
210 | | - if (cluster->usage & 3) |
---|
211 | | - continue; |
---|
212 | | - |
---|
213 | | - start = cluster->start_pfn; |
---|
214 | | - end = cluster->start_pfn + cluster->numpages; |
---|
215 | | - |
---|
216 | | - if (start >= node_max_pfn || end <= node_min_pfn) |
---|
217 | | - continue; |
---|
218 | | - |
---|
219 | | - if (end > node_max_pfn) |
---|
220 | | - end = node_max_pfn; |
---|
221 | | - if (start < node_min_pfn) |
---|
222 | | - start = node_min_pfn; |
---|
223 | | - |
---|
224 | | - if (start < start_kernel_pfn) { |
---|
225 | | - if (end > end_kernel_pfn) { |
---|
226 | | - free_bootmem_node(NODE_DATA(nid), PFN_PHYS(start), |
---|
227 | | - (PFN_PHYS(start_kernel_pfn) |
---|
228 | | - - PFN_PHYS(start))); |
---|
229 | | - printk(" freeing pages %ld:%ld\n", |
---|
230 | | - start, start_kernel_pfn); |
---|
231 | | - start = end_kernel_pfn; |
---|
232 | | - } else if (end > start_kernel_pfn) |
---|
233 | | - end = start_kernel_pfn; |
---|
234 | | - } else if (start < end_kernel_pfn) |
---|
235 | | - start = end_kernel_pfn; |
---|
236 | | - if (start >= end) |
---|
237 | | - continue; |
---|
238 | | - |
---|
239 | | - free_bootmem_node(NODE_DATA(nid), PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start)); |
---|
240 | | - printk(" freeing pages %ld:%ld\n", start, end); |
---|
241 | | - } |
---|
242 | | - |
---|
243 | | - /* Reserve the bootmap memory. */ |
---|
244 | | - reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(bootmap_start), |
---|
245 | | - bootmap_size, BOOTMEM_DEFAULT); |
---|
246 | | - printk(" reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size)); |
---|
| 154 | + NODE_DATA(nid)->node_start_pfn = node_min_pfn; |
---|
| 155 | + NODE_DATA(nid)->node_present_pages = node_max_pfn - node_min_pfn; |
---|
247 | 156 | |
---|
248 | 157 | node_set_online(nid); |
---|
249 | 158 | } |
---|
.. | .. |
---|
251 | 160 | void __init |
---|
252 | 161 | setup_memory(void *kernel_end) |
---|
253 | 162 | { |
---|
| 163 | + unsigned long kernel_size; |
---|
254 | 164 | int nid; |
---|
255 | 165 | |
---|
256 | 166 | show_mem_layout(); |
---|
.. | .. |
---|
261 | 171 | max_low_pfn = 0UL; |
---|
262 | 172 | for (nid = 0; nid < MAX_NUMNODES; nid++) |
---|
263 | 173 | setup_memory_node(nid, kernel_end); |
---|
| 174 | + |
---|
| 175 | + kernel_size = virt_to_phys(kernel_end) - KERNEL_START_PHYS; |
---|
| 176 | + memblock_reserve(KERNEL_START_PHYS, kernel_size); |
---|
264 | 177 | |
---|
265 | 178 | #ifdef CONFIG_BLK_DEV_INITRD |
---|
266 | 179 | initrd_start = INITRD_START; |
---|
.. | .. |
---|
279 | 192 | phys_to_virt(PFN_PHYS(max_low_pfn))); |
---|
280 | 193 | } else { |
---|
281 | 194 | nid = kvaddr_to_nid(initrd_start); |
---|
282 | | - reserve_bootmem_node(NODE_DATA(nid), |
---|
283 | | - virt_to_phys((void *)initrd_start), |
---|
284 | | - INITRD_SIZE, BOOTMEM_DEFAULT); |
---|
| 195 | + memblock_reserve(virt_to_phys((void *)initrd_start), |
---|
| 196 | + INITRD_SIZE); |
---|
285 | 197 | } |
---|
286 | 198 | } |
---|
287 | 199 | #endif /* CONFIG_BLK_DEV_INITRD */ |
---|
.. | .. |
---|
289 | 201 | |
---|
290 | 202 | void __init paging_init(void) |
---|
291 | 203 | { |
---|
292 | | - unsigned int nid; |
---|
293 | | - unsigned long zones_size[MAX_NR_ZONES] = {0, }; |
---|
| 204 | + unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, }; |
---|
294 | 205 | unsigned long dma_local_pfn; |
---|
295 | 206 | |
---|
296 | 207 | /* |
---|
.. | .. |
---|
302 | 213 | */ |
---|
303 | 214 | dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; |
---|
304 | 215 | |
---|
305 | | - for_each_online_node(nid) { |
---|
306 | | - bootmem_data_t *bdata = &bootmem_node_data[nid]; |
---|
307 | | - unsigned long start_pfn = bdata->node_min_pfn; |
---|
308 | | - unsigned long end_pfn = bdata->node_low_pfn; |
---|
| 216 | + max_zone_pfn[ZONE_DMA] = dma_local_pfn; |
---|
| 217 | + max_zone_pfn[ZONE_NORMAL] = max_pfn; |
---|
309 | 218 | |
---|
310 | | - if (dma_local_pfn >= end_pfn - start_pfn) |
---|
311 | | - zones_size[ZONE_DMA] = end_pfn - start_pfn; |
---|
312 | | - else { |
---|
313 | | - zones_size[ZONE_DMA] = dma_local_pfn; |
---|
314 | | - zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn; |
---|
315 | | - } |
---|
316 | | - node_set_state(nid, N_NORMAL_MEMORY); |
---|
317 | | - free_area_init_node(nid, zones_size, start_pfn, NULL); |
---|
318 | | - } |
---|
| 219 | + free_area_init(max_zone_pfn); |
---|
319 | 220 | |
---|
320 | 221 | /* Initialize the kernel's ZERO_PGE. */ |
---|
321 | 222 | memset((void *)ZERO_PGE, 0, PAGE_SIZE); |
---|