hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/arch/alpha/kernel/setup.c
....@@ -29,7 +29,7 @@
2929 #include <linux/string.h>
3030 #include <linux/ioport.h>
3131 #include <linux/platform_device.h>
32
-#include <linux/bootmem.h>
32
+#include <linux/memblock.h>
3333 #include <linux/pci.h>
3434 #include <linux/seq_file.h>
3535 #include <linux/root_dev.h>
....@@ -55,7 +55,6 @@
5555 };
5656
5757 #include <linux/uaccess.h>
58
-#include <asm/pgtable.h>
5958 #include <asm/hwrpb.h>
6059 #include <asm/dma.h>
6160 #include <asm/mmu_context.h>
....@@ -254,7 +253,7 @@
254253
255254 /* Fix up for the Jensen's queer RTC placement. */
256255 standard_io_resources[0].start = RTC_PORT(0);
257
- standard_io_resources[0].end = RTC_PORT(0) + 0x10;
256
+ standard_io_resources[0].end = RTC_PORT(0) + 0x0f;
258257
259258 for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i)
260259 request_resource(io, standard_io_resources+i);
....@@ -293,7 +292,7 @@
293292 unsigned long size;
294293
295294 size = initrd_end - initrd_start;
296
- start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0);
295
+ start = memblock_alloc(PAGE_ALIGN(size), PAGE_SIZE);
297296 if (!start || __pa(start) + size > mem_limit) {
298297 initrd_start = initrd_end = 0;
299298 return NULL;
....@@ -312,9 +311,7 @@
312311 {
313312 struct memclust_struct * cluster;
314313 struct memdesc_struct * memdesc;
315
- unsigned long start_kernel_pfn, end_kernel_pfn;
316
- unsigned long bootmap_size, bootmap_pages, bootmap_start;
317
- unsigned long start, end;
314
+ unsigned long kernel_size;
318315 unsigned long i;
319316
320317 /* Find free clusters, and init and free the bootmem accordingly. */
....@@ -322,19 +319,25 @@
322319 (hwrpb->mddt_offset + (unsigned long) hwrpb);
323320
324321 for_each_mem_cluster(memdesc, cluster, i) {
322
+ unsigned long end;
323
+
325324 printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n",
326325 i, cluster->usage, cluster->start_pfn,
327326 cluster->start_pfn + cluster->numpages);
327
+
328
+ end = cluster->start_pfn + cluster->numpages;
329
+ if (end > max_low_pfn)
330
+ max_low_pfn = end;
331
+
332
+ memblock_add(PFN_PHYS(cluster->start_pfn),
333
+ cluster->numpages << PAGE_SHIFT);
328334
329335 /* Bit 0 is console/PALcode reserved. Bit 1 is
330336 non-volatile memory -- we might want to mark
331337 this for later. */
332338 if (cluster->usage & 3)
333
- continue;
334
-
335
- end = cluster->start_pfn + cluster->numpages;
336
- if (end > max_low_pfn)
337
- max_low_pfn = end;
339
+ memblock_reserve(PFN_PHYS(cluster->start_pfn),
340
+ cluster->numpages << PAGE_SHIFT);
338341 }
339342
340343 /*
....@@ -363,87 +366,9 @@
363366 max_low_pfn = mem_size_limit;
364367 }
365368
366
- /* Find the bounds of kernel memory. */
367
- start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS);
368
- end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end));
369
- bootmap_start = -1;
370
-
371
- try_again:
372
- if (max_low_pfn <= end_kernel_pfn)
373
- panic("not enough memory to boot");
374
-
375
- /* We need to know how many physically contiguous pages
376
- we'll need for the bootmap. */
377
- bootmap_pages = bootmem_bootmap_pages(max_low_pfn);
378
-
379
- /* Now find a good region where to allocate the bootmap. */
380
- for_each_mem_cluster(memdesc, cluster, i) {
381
- if (cluster->usage & 3)
382
- continue;
383
-
384
- start = cluster->start_pfn;
385
- end = start + cluster->numpages;
386
- if (start >= max_low_pfn)
387
- continue;
388
- if (end > max_low_pfn)
389
- end = max_low_pfn;
390
- if (start < start_kernel_pfn) {
391
- if (end > end_kernel_pfn
392
- && end - end_kernel_pfn >= bootmap_pages) {
393
- bootmap_start = end_kernel_pfn;
394
- break;
395
- } else if (end > start_kernel_pfn)
396
- end = start_kernel_pfn;
397
- } else if (start < end_kernel_pfn)
398
- start = end_kernel_pfn;
399
- if (end - start >= bootmap_pages) {
400
- bootmap_start = start;
401
- break;
402
- }
403
- }
404
-
405
- if (bootmap_start == ~0UL) {
406
- max_low_pfn >>= 1;
407
- goto try_again;
408
- }
409
-
410
- /* Allocate the bootmap and mark the whole MM as reserved. */
411
- bootmap_size = init_bootmem(bootmap_start, max_low_pfn);
412
-
413
- /* Mark the free regions. */
414
- for_each_mem_cluster(memdesc, cluster, i) {
415
- if (cluster->usage & 3)
416
- continue;
417
-
418
- start = cluster->start_pfn;
419
- end = cluster->start_pfn + cluster->numpages;
420
- if (start >= max_low_pfn)
421
- continue;
422
- if (end > max_low_pfn)
423
- end = max_low_pfn;
424
- if (start < start_kernel_pfn) {
425
- if (end > end_kernel_pfn) {
426
- free_bootmem(PFN_PHYS(start),
427
- (PFN_PHYS(start_kernel_pfn)
428
- - PFN_PHYS(start)));
429
- printk("freeing pages %ld:%ld\n",
430
- start, start_kernel_pfn);
431
- start = end_kernel_pfn;
432
- } else if (end > start_kernel_pfn)
433
- end = start_kernel_pfn;
434
- } else if (start < end_kernel_pfn)
435
- start = end_kernel_pfn;
436
- if (start >= end)
437
- continue;
438
-
439
- free_bootmem(PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start));
440
- printk("freeing pages %ld:%ld\n", start, end);
441
- }
442
-
443
- /* Reserve the bootmap memory. */
444
- reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size,
445
- BOOTMEM_DEFAULT);
446
- printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size));
369
+ /* Reserve the kernel memory. */
370
+ kernel_size = virt_to_phys(kernel_end) - KERNEL_START_PHYS;
371
+ memblock_reserve(KERNEL_START_PHYS, kernel_size);
447372
448373 #ifdef CONFIG_BLK_DEV_INITRD
449374 initrd_start = INITRD_START;
....@@ -459,8 +384,8 @@
459384 initrd_end,
460385 phys_to_virt(PFN_PHYS(max_low_pfn)));
461386 } else {
462
- reserve_bootmem(virt_to_phys((void *)initrd_start),
463
- INITRD_SIZE, BOOTMEM_DEFAULT);
387
+ memblock_reserve(virt_to_phys((void *)initrd_start),
388
+ INITRD_SIZE);
464389 }
465390 }
466391 #endif /* CONFIG_BLK_DEV_INITRD */
....@@ -469,8 +394,7 @@
469394 extern void setup_memory(void *);
470395 #endif /* !CONFIG_DISCONTIGMEM */
471396
472
-int __init
473
-page_is_ram(unsigned long pfn)
397
+int page_is_ram(unsigned long pfn)
474398 {
475399 struct memclust_struct * cluster;
476400 struct memdesc_struct * memdesc;
....@@ -504,6 +428,20 @@
504428 }
505429
506430 arch_initcall(register_cpus);
431
+
432
+#ifdef CONFIG_MAGIC_SYSRQ
433
+static void sysrq_reboot_handler(int unused)
434
+{
435
+ machine_halt();
436
+}
437
+
438
+static const struct sysrq_key_op srm_sysrq_reboot_op = {
439
+ .handler = sysrq_reboot_handler,
440
+ .help_msg = "reboot(b)",
441
+ .action_msg = "Resetting",
442
+ .enable_mask = SYSRQ_ENABLE_BOOT,
443
+};
444
+#endif
507445
508446 void __init
509447 setup_arch(char **cmdline_p)
....@@ -541,7 +479,7 @@
541479 #ifndef alpha_using_srm
542480 /* Assume that we've booted from SRM if we haven't booted from MILO.
543481 Detect the later by looking for "MILO" in the system serial nr. */
544
- alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0;
482
+ alpha_using_srm = !str_has_prefix((const char *)hwrpb->ssn, "MILO");
545483 #endif
546484 #ifndef alpha_using_qemu
547485 /* Similarly, look for QEMU. */
....@@ -625,8 +563,8 @@
625563 /* If we're using SRM, make sysrq-b halt back to the prom,
626564 not auto-reboot. */
627565 if (alpha_using_srm) {
628
- struct sysrq_key_op *op = __sysrq_get_key_op('b');
629
- op->handler = (void *) machine_halt;
566
+ unregister_sysrq_key('b', __sysrq_reboot_op);
567
+ register_sysrq_key('b', &srm_sysrq_reboot_op);
630568 }
631569 #endif
632570
....@@ -709,6 +647,7 @@
709647
710648 /* Find our memory. */
711649 setup_memory(kernel_end);
650
+ memblock_set_bottom_up(true);
712651
713652 /* First guess at cpu cache sizes. Do this before init_arch. */
714653 determine_cpu_caches(cpu->type);
....@@ -729,8 +668,6 @@
729668 #ifdef CONFIG_VT
730669 #if defined(CONFIG_VGA_CONSOLE)
731670 conswitchp = &vga_con;
732
-#elif defined(CONFIG_DUMMY_CONSOLE)
733
- conswitchp = &dummy_con;
734671 #endif
735672 #endif
736673
....@@ -1488,6 +1425,7 @@
14881425 static void *
14891426 c_next(struct seq_file *f, void *v, loff_t *pos)
14901427 {
1428
+ (*pos)++;
14911429 return NULL;
14921430 }
14931431