.. | .. |
---|
29 | 29 | #include <linux/string.h> |
---|
30 | 30 | #include <linux/ioport.h> |
---|
31 | 31 | #include <linux/platform_device.h> |
---|
32 | | -#include <linux/bootmem.h> |
---|
| 32 | +#include <linux/memblock.h> |
---|
33 | 33 | #include <linux/pci.h> |
---|
34 | 34 | #include <linux/seq_file.h> |
---|
35 | 35 | #include <linux/root_dev.h> |
---|
.. | .. |
---|
55 | 55 | }; |
---|
56 | 56 | |
---|
57 | 57 | #include <linux/uaccess.h> |
---|
58 | | -#include <asm/pgtable.h> |
---|
59 | 58 | #include <asm/hwrpb.h> |
---|
60 | 59 | #include <asm/dma.h> |
---|
61 | 60 | #include <asm/mmu_context.h> |
---|
.. | .. |
---|
254 | 253 | |
---|
255 | 254 | /* Fix up for the Jensen's queer RTC placement. */ |
---|
256 | 255 | standard_io_resources[0].start = RTC_PORT(0); |
---|
257 | | - standard_io_resources[0].end = RTC_PORT(0) + 0x10; |
---|
| 256 | + standard_io_resources[0].end = RTC_PORT(0) + 0x0f; |
---|
258 | 257 | |
---|
259 | 258 | for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i) |
---|
260 | 259 | request_resource(io, standard_io_resources+i); |
---|
.. | .. |
---|
293 | 292 | unsigned long size; |
---|
294 | 293 | |
---|
295 | 294 | size = initrd_end - initrd_start; |
---|
296 | | - start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0); |
---|
| 295 | + start = memblock_alloc(PAGE_ALIGN(size), PAGE_SIZE); |
---|
297 | 296 | if (!start || __pa(start) + size > mem_limit) { |
---|
298 | 297 | initrd_start = initrd_end = 0; |
---|
299 | 298 | return NULL; |
---|
.. | .. |
---|
312 | 311 | { |
---|
313 | 312 | struct memclust_struct * cluster; |
---|
314 | 313 | struct memdesc_struct * memdesc; |
---|
315 | | - unsigned long start_kernel_pfn, end_kernel_pfn; |
---|
316 | | - unsigned long bootmap_size, bootmap_pages, bootmap_start; |
---|
317 | | - unsigned long start, end; |
---|
| 314 | + unsigned long kernel_size; |
---|
318 | 315 | unsigned long i; |
---|
319 | 316 | |
---|
320 | 317 | /* Find free clusters, and init and free the bootmem accordingly. */ |
---|
.. | .. |
---|
322 | 319 | (hwrpb->mddt_offset + (unsigned long) hwrpb); |
---|
323 | 320 | |
---|
324 | 321 | for_each_mem_cluster(memdesc, cluster, i) { |
---|
| 322 | + unsigned long end; |
---|
| 323 | + |
---|
325 | 324 | printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n", |
---|
326 | 325 | i, cluster->usage, cluster->start_pfn, |
---|
327 | 326 | cluster->start_pfn + cluster->numpages); |
---|
| 327 | + |
---|
| 328 | + end = cluster->start_pfn + cluster->numpages; |
---|
| 329 | + if (end > max_low_pfn) |
---|
| 330 | + max_low_pfn = end; |
---|
| 331 | + |
---|
| 332 | + memblock_add(PFN_PHYS(cluster->start_pfn), |
---|
| 333 | + cluster->numpages << PAGE_SHIFT); |
---|
328 | 334 | |
---|
329 | 335 | /* Bit 0 is console/PALcode reserved. Bit 1 is |
---|
330 | 336 | non-volatile memory -- we might want to mark |
---|
331 | 337 | this for later. */ |
---|
332 | 338 | if (cluster->usage & 3) |
---|
333 | | - continue; |
---|
334 | | - |
---|
335 | | - end = cluster->start_pfn + cluster->numpages; |
---|
336 | | - if (end > max_low_pfn) |
---|
337 | | - max_low_pfn = end; |
---|
| 339 | + memblock_reserve(PFN_PHYS(cluster->start_pfn), |
---|
| 340 | + cluster->numpages << PAGE_SHIFT); |
---|
338 | 341 | } |
---|
339 | 342 | |
---|
340 | 343 | /* |
---|
.. | .. |
---|
363 | 366 | max_low_pfn = mem_size_limit; |
---|
364 | 367 | } |
---|
365 | 368 | |
---|
366 | | - /* Find the bounds of kernel memory. */ |
---|
367 | | - start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS); |
---|
368 | | - end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end)); |
---|
369 | | - bootmap_start = -1; |
---|
370 | | - |
---|
371 | | - try_again: |
---|
372 | | - if (max_low_pfn <= end_kernel_pfn) |
---|
373 | | - panic("not enough memory to boot"); |
---|
374 | | - |
---|
375 | | - /* We need to know how many physically contiguous pages |
---|
376 | | - we'll need for the bootmap. */ |
---|
377 | | - bootmap_pages = bootmem_bootmap_pages(max_low_pfn); |
---|
378 | | - |
---|
379 | | - /* Now find a good region where to allocate the bootmap. */ |
---|
380 | | - for_each_mem_cluster(memdesc, cluster, i) { |
---|
381 | | - if (cluster->usage & 3) |
---|
382 | | - continue; |
---|
383 | | - |
---|
384 | | - start = cluster->start_pfn; |
---|
385 | | - end = start + cluster->numpages; |
---|
386 | | - if (start >= max_low_pfn) |
---|
387 | | - continue; |
---|
388 | | - if (end > max_low_pfn) |
---|
389 | | - end = max_low_pfn; |
---|
390 | | - if (start < start_kernel_pfn) { |
---|
391 | | - if (end > end_kernel_pfn |
---|
392 | | - && end - end_kernel_pfn >= bootmap_pages) { |
---|
393 | | - bootmap_start = end_kernel_pfn; |
---|
394 | | - break; |
---|
395 | | - } else if (end > start_kernel_pfn) |
---|
396 | | - end = start_kernel_pfn; |
---|
397 | | - } else if (start < end_kernel_pfn) |
---|
398 | | - start = end_kernel_pfn; |
---|
399 | | - if (end - start >= bootmap_pages) { |
---|
400 | | - bootmap_start = start; |
---|
401 | | - break; |
---|
402 | | - } |
---|
403 | | - } |
---|
404 | | - |
---|
405 | | - if (bootmap_start == ~0UL) { |
---|
406 | | - max_low_pfn >>= 1; |
---|
407 | | - goto try_again; |
---|
408 | | - } |
---|
409 | | - |
---|
410 | | - /* Allocate the bootmap and mark the whole MM as reserved. */ |
---|
411 | | - bootmap_size = init_bootmem(bootmap_start, max_low_pfn); |
---|
412 | | - |
---|
413 | | - /* Mark the free regions. */ |
---|
414 | | - for_each_mem_cluster(memdesc, cluster, i) { |
---|
415 | | - if (cluster->usage & 3) |
---|
416 | | - continue; |
---|
417 | | - |
---|
418 | | - start = cluster->start_pfn; |
---|
419 | | - end = cluster->start_pfn + cluster->numpages; |
---|
420 | | - if (start >= max_low_pfn) |
---|
421 | | - continue; |
---|
422 | | - if (end > max_low_pfn) |
---|
423 | | - end = max_low_pfn; |
---|
424 | | - if (start < start_kernel_pfn) { |
---|
425 | | - if (end > end_kernel_pfn) { |
---|
426 | | - free_bootmem(PFN_PHYS(start), |
---|
427 | | - (PFN_PHYS(start_kernel_pfn) |
---|
428 | | - - PFN_PHYS(start))); |
---|
429 | | - printk("freeing pages %ld:%ld\n", |
---|
430 | | - start, start_kernel_pfn); |
---|
431 | | - start = end_kernel_pfn; |
---|
432 | | - } else if (end > start_kernel_pfn) |
---|
433 | | - end = start_kernel_pfn; |
---|
434 | | - } else if (start < end_kernel_pfn) |
---|
435 | | - start = end_kernel_pfn; |
---|
436 | | - if (start >= end) |
---|
437 | | - continue; |
---|
438 | | - |
---|
439 | | - free_bootmem(PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start)); |
---|
440 | | - printk("freeing pages %ld:%ld\n", start, end); |
---|
441 | | - } |
---|
442 | | - |
---|
443 | | - /* Reserve the bootmap memory. */ |
---|
444 | | - reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size, |
---|
445 | | - BOOTMEM_DEFAULT); |
---|
446 | | - printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size)); |
---|
| 369 | + /* Reserve the kernel memory. */ |
---|
| 370 | + kernel_size = virt_to_phys(kernel_end) - KERNEL_START_PHYS; |
---|
| 371 | + memblock_reserve(KERNEL_START_PHYS, kernel_size); |
---|
447 | 372 | |
---|
448 | 373 | #ifdef CONFIG_BLK_DEV_INITRD |
---|
449 | 374 | initrd_start = INITRD_START; |
---|
.. | .. |
---|
459 | 384 | initrd_end, |
---|
460 | 385 | phys_to_virt(PFN_PHYS(max_low_pfn))); |
---|
461 | 386 | } else { |
---|
462 | | - reserve_bootmem(virt_to_phys((void *)initrd_start), |
---|
463 | | - INITRD_SIZE, BOOTMEM_DEFAULT); |
---|
| 387 | + memblock_reserve(virt_to_phys((void *)initrd_start), |
---|
| 388 | + INITRD_SIZE); |
---|
464 | 389 | } |
---|
465 | 390 | } |
---|
466 | 391 | #endif /* CONFIG_BLK_DEV_INITRD */ |
---|
.. | .. |
---|
469 | 394 | extern void setup_memory(void *); |
---|
470 | 395 | #endif /* !CONFIG_DISCONTIGMEM */ |
---|
471 | 396 | |
---|
472 | | -int __init |
---|
473 | | -page_is_ram(unsigned long pfn) |
---|
| 397 | +int page_is_ram(unsigned long pfn) |
---|
474 | 398 | { |
---|
475 | 399 | struct memclust_struct * cluster; |
---|
476 | 400 | struct memdesc_struct * memdesc; |
---|
.. | .. |
---|
504 | 428 | } |
---|
505 | 429 | |
---|
506 | 430 | arch_initcall(register_cpus); |
---|
| 431 | + |
---|
| 432 | +#ifdef CONFIG_MAGIC_SYSRQ |
---|
| 433 | +static void sysrq_reboot_handler(int unused) |
---|
| 434 | +{ |
---|
| 435 | + machine_halt(); |
---|
| 436 | +} |
---|
| 437 | + |
---|
| 438 | +static const struct sysrq_key_op srm_sysrq_reboot_op = { |
---|
| 439 | + .handler = sysrq_reboot_handler, |
---|
| 440 | + .help_msg = "reboot(b)", |
---|
| 441 | + .action_msg = "Resetting", |
---|
| 442 | + .enable_mask = SYSRQ_ENABLE_BOOT, |
---|
| 443 | +}; |
---|
| 444 | +#endif |
---|
507 | 445 | |
---|
508 | 446 | void __init |
---|
509 | 447 | setup_arch(char **cmdline_p) |
---|
.. | .. |
---|
541 | 479 | #ifndef alpha_using_srm |
---|
542 | 480 | /* Assume that we've booted from SRM if we haven't booted from MILO. |
---|
543 | 481 | Detect the later by looking for "MILO" in the system serial nr. */ |
---|
544 | | - alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0; |
---|
| 482 | + alpha_using_srm = !str_has_prefix((const char *)hwrpb->ssn, "MILO"); |
---|
545 | 483 | #endif |
---|
546 | 484 | #ifndef alpha_using_qemu |
---|
547 | 485 | /* Similarly, look for QEMU. */ |
---|
.. | .. |
---|
625 | 563 | /* If we're using SRM, make sysrq-b halt back to the prom, |
---|
626 | 564 | not auto-reboot. */ |
---|
627 | 565 | if (alpha_using_srm) { |
---|
628 | | - struct sysrq_key_op *op = __sysrq_get_key_op('b'); |
---|
629 | | - op->handler = (void *) machine_halt; |
---|
| 566 | + unregister_sysrq_key('b', __sysrq_reboot_op); |
---|
| 567 | + register_sysrq_key('b', &srm_sysrq_reboot_op); |
---|
630 | 568 | } |
---|
631 | 569 | #endif |
---|
632 | 570 | |
---|
.. | .. |
---|
709 | 647 | |
---|
710 | 648 | /* Find our memory. */ |
---|
711 | 649 | setup_memory(kernel_end); |
---|
| 650 | + memblock_set_bottom_up(true); |
---|
712 | 651 | |
---|
713 | 652 | /* First guess at cpu cache sizes. Do this before init_arch. */ |
---|
714 | 653 | determine_cpu_caches(cpu->type); |
---|
.. | .. |
---|
729 | 668 | #ifdef CONFIG_VT |
---|
730 | 669 | #if defined(CONFIG_VGA_CONSOLE) |
---|
731 | 670 | conswitchp = &vga_con; |
---|
732 | | -#elif defined(CONFIG_DUMMY_CONSOLE) |
---|
733 | | - conswitchp = &dummy_con; |
---|
734 | 671 | #endif |
---|
735 | 672 | #endif |
---|
736 | 673 | |
---|
.. | .. |
---|
1488 | 1425 | static void * |
---|
1489 | 1426 | c_next(struct seq_file *f, void *v, loff_t *pos) |
---|
1490 | 1427 | { |
---|
| 1428 | + (*pos)++; |
---|
1491 | 1429 | return NULL; |
---|
1492 | 1430 | } |
---|
1493 | 1431 | |
---|