.. | .. |
---|
14 | 14 | |
---|
15 | 15 | #include <linux/module.h> |
---|
16 | 16 | #include <linux/mm.h> |
---|
17 | | -#include <linux/bootmem.h> |
---|
18 | 17 | #include <linux/memblock.h> |
---|
19 | 18 | #include <linux/gfp.h> |
---|
20 | 19 | #include <linux/delay.h> |
---|
.. | .. |
---|
27 | 26 | #include <linux/compat.h> |
---|
28 | 27 | |
---|
29 | 28 | #include <asm/pgalloc.h> |
---|
30 | | -#include <asm/pgtable.h> |
---|
31 | 29 | #include <asm/tlb.h> |
---|
32 | 30 | #include <asm/pdc_chassis.h> |
---|
33 | 31 | #include <asm/mmzone.h> |
---|
.. | .. |
---|
39 | 37 | extern void parisc_kernel_start(void); /* Kernel entry point in head.S */ |
---|
40 | 38 | |
---|
41 | 39 | #if CONFIG_PGTABLE_LEVELS == 3 |
---|
42 | | -/* NOTE: This layout exactly conforms to the hybrid L2/L3 page table layout |
---|
43 | | - * with the first pmd adjacent to the pgd and below it. gcc doesn't actually |
---|
44 | | - * guarantee that global objects will be laid out in memory in the same order |
---|
45 | | - * as the order of declaration, so put these in different sections and use |
---|
46 | | - * the linker script to order them. */ |
---|
47 | | -pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE))); |
---|
| 40 | +pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE))); |
---|
48 | 41 | #endif |
---|
49 | 42 | |
---|
50 | | -pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE))); |
---|
51 | | -pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE))); |
---|
| 43 | +pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".data..vm0.pgd") __attribute__ ((aligned(PAGE_SIZE))); |
---|
| 44 | +pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __section(".data..vm0.pte") __attribute__ ((aligned(PAGE_SIZE))); |
---|
52 | 45 | |
---|
53 | 46 | static struct resource data_resource = { |
---|
54 | 47 | .name = "Kernel data", |
---|
.. | .. |
---|
67 | 60 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM, |
---|
68 | 61 | }; |
---|
69 | 62 | |
---|
70 | | -static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; |
---|
| 63 | +static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init; |
---|
71 | 64 | |
---|
72 | 65 | /* The following array is initialized from the firmware specific |
---|
73 | 66 | * information retrieved in kernel/inventory.c. |
---|
.. | .. |
---|
75 | 68 | |
---|
76 | 69 | physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata; |
---|
77 | 70 | int npmem_ranges __initdata; |
---|
78 | | - |
---|
79 | | -/* |
---|
80 | | - * get_memblock() allocates pages via memblock. |
---|
81 | | - * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it |
---|
82 | | - * doesn't allocate from bottom to top which is needed because we only created |
---|
83 | | - * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code. |
---|
84 | | - */ |
---|
85 | | -static void * __init get_memblock(unsigned long size) |
---|
86 | | -{ |
---|
87 | | - static phys_addr_t search_addr __initdata; |
---|
88 | | - phys_addr_t phys; |
---|
89 | | - |
---|
90 | | - if (!search_addr) |
---|
91 | | - search_addr = PAGE_ALIGN(__pa((unsigned long) &_end)); |
---|
92 | | - search_addr = ALIGN(search_addr, size); |
---|
93 | | - while (!memblock_is_region_memory(search_addr, size) || |
---|
94 | | - memblock_is_region_reserved(search_addr, size)) { |
---|
95 | | - search_addr += size; |
---|
96 | | - } |
---|
97 | | - phys = search_addr; |
---|
98 | | - |
---|
99 | | - if (phys) |
---|
100 | | - memblock_reserve(phys, size); |
---|
101 | | - else |
---|
102 | | - panic("get_memblock() failed.\n"); |
---|
103 | | - |
---|
104 | | - memset(__va(phys), 0, size); |
---|
105 | | - |
---|
106 | | - return __va(phys); |
---|
107 | | -} |
---|
108 | 71 | |
---|
109 | 72 | #ifdef CONFIG_64BIT |
---|
110 | 73 | #define MAX_MEM (1UL << MAX_PHYSMEM_BITS) |
---|
.. | .. |
---|
302 | 265 | max_pfn = start_pfn + npages; |
---|
303 | 266 | } |
---|
304 | 267 | |
---|
| 268 | + /* |
---|
| 269 | + * We can't use memblock top-down allocations because we only |
---|
| 270 | + * created the initial mapping up to KERNEL_INITIAL_SIZE in |
---|
| 271 | + * the assembly bootup code. |
---|
| 272 | + */ |
---|
| 273 | + memblock_set_bottom_up(true); |
---|
| 274 | + |
---|
305 | 275 | /* IOMMU is always used to access "high mem" on those boxes |
---|
306 | 276 | * that can support enough mem that a PCI device couldn't |
---|
307 | 277 | * directly DMA to any physical addresses. |
---|
.. | .. |
---|
369 | 339 | memblock_dump_all(); |
---|
370 | 340 | } |
---|
371 | 341 | |
---|
372 | | -static int __init parisc_text_address(unsigned long vaddr) |
---|
| 342 | +static bool kernel_set_to_readonly; |
---|
| 343 | + |
---|
| 344 | +static void __ref map_pages(unsigned long start_vaddr, |
---|
| 345 | + unsigned long start_paddr, unsigned long size, |
---|
| 346 | + pgprot_t pgprot, int force) |
---|
373 | 347 | { |
---|
374 | | - static unsigned long head_ptr __initdata; |
---|
375 | | - |
---|
376 | | - if (!head_ptr) |
---|
377 | | - head_ptr = PAGE_MASK & (unsigned long) |
---|
378 | | - dereference_function_descriptor(&parisc_kernel_start); |
---|
379 | | - |
---|
380 | | - return core_kernel_text(vaddr) || vaddr == head_ptr; |
---|
381 | | -} |
---|
382 | | - |
---|
383 | | -static void __init map_pages(unsigned long start_vaddr, |
---|
384 | | - unsigned long start_paddr, unsigned long size, |
---|
385 | | - pgprot_t pgprot, int force) |
---|
386 | | -{ |
---|
387 | | - pgd_t *pg_dir; |
---|
388 | 348 | pmd_t *pmd; |
---|
389 | 349 | pte_t *pg_table; |
---|
390 | 350 | unsigned long end_paddr; |
---|
.. | .. |
---|
396 | 356 | unsigned long vaddr; |
---|
397 | 357 | unsigned long ro_start; |
---|
398 | 358 | unsigned long ro_end; |
---|
399 | | - unsigned long kernel_end; |
---|
| 359 | + unsigned long kernel_start, kernel_end; |
---|
400 | 360 | |
---|
401 | 361 | ro_start = __pa((unsigned long)_text); |
---|
402 | 362 | ro_end = __pa((unsigned long)&data_start); |
---|
| 363 | + kernel_start = __pa((unsigned long)&__init_begin); |
---|
403 | 364 | kernel_end = __pa((unsigned long)&_end); |
---|
404 | 365 | |
---|
405 | 366 | end_paddr = start_paddr + size; |
---|
406 | 367 | |
---|
407 | | - pg_dir = pgd_offset_k(start_vaddr); |
---|
408 | | - |
---|
409 | | -#if PTRS_PER_PMD == 1 |
---|
410 | | - start_pmd = 0; |
---|
411 | | -#else |
---|
| 368 | + /* for 2-level configuration PTRS_PER_PMD is 0 so start_pmd will be 0 */ |
---|
412 | 369 | start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); |
---|
413 | | -#endif |
---|
414 | 370 | start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); |
---|
415 | 371 | |
---|
416 | 372 | address = start_paddr; |
---|
417 | 373 | vaddr = start_vaddr; |
---|
418 | 374 | while (address < end_paddr) { |
---|
419 | | -#if PTRS_PER_PMD == 1 |
---|
420 | | - pmd = (pmd_t *)__pa(pg_dir); |
---|
421 | | -#else |
---|
422 | | - pmd = (pmd_t *)pgd_address(*pg_dir); |
---|
| 375 | + pgd_t *pgd = pgd_offset_k(vaddr); |
---|
| 376 | + p4d_t *p4d = p4d_offset(pgd, vaddr); |
---|
| 377 | + pud_t *pud = pud_offset(p4d, vaddr); |
---|
423 | 378 | |
---|
424 | | - /* |
---|
425 | | - * pmd is physical at this point |
---|
426 | | - */ |
---|
427 | | - |
---|
428 | | - if (!pmd) { |
---|
429 | | - pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER); |
---|
430 | | - pmd = (pmd_t *) __pa(pmd); |
---|
| 379 | +#if CONFIG_PGTABLE_LEVELS == 3 |
---|
| 380 | + if (pud_none(*pud)) { |
---|
| 381 | + pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER, |
---|
| 382 | + PAGE_SIZE << PMD_ORDER); |
---|
| 383 | + if (!pmd) |
---|
| 384 | + panic("pmd allocation failed.\n"); |
---|
| 385 | + pud_populate(NULL, pud, pmd); |
---|
431 | 386 | } |
---|
432 | | - |
---|
433 | | - pgd_populate(NULL, pg_dir, __va(pmd)); |
---|
434 | 387 | #endif |
---|
435 | | - pg_dir++; |
---|
436 | 388 | |
---|
437 | | - /* now change pmd to kernel virtual addresses */ |
---|
438 | | - |
---|
439 | | - pmd = (pmd_t *)__va(pmd) + start_pmd; |
---|
| 389 | + pmd = pmd_offset(pud, vaddr); |
---|
440 | 390 | for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { |
---|
441 | | - |
---|
442 | | - /* |
---|
443 | | - * pg_table is physical at this point |
---|
444 | | - */ |
---|
445 | | - |
---|
446 | | - pg_table = (pte_t *)pmd_address(*pmd); |
---|
447 | | - if (!pg_table) { |
---|
448 | | - pg_table = (pte_t *) get_memblock(PAGE_SIZE); |
---|
449 | | - pg_table = (pte_t *) __pa(pg_table); |
---|
| 391 | + if (pmd_none(*pmd)) { |
---|
| 392 | + pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
---|
| 393 | + if (!pg_table) |
---|
| 394 | + panic("page table allocation failed\n"); |
---|
| 395 | + pmd_populate_kernel(NULL, pmd, pg_table); |
---|
450 | 396 | } |
---|
451 | 397 | |
---|
452 | | - pmd_populate_kernel(NULL, pmd, __va(pg_table)); |
---|
453 | | - |
---|
454 | | - /* now change pg_table to kernel virtual addresses */ |
---|
455 | | - |
---|
456 | | - pg_table = (pte_t *) __va(pg_table) + start_pte; |
---|
| 398 | + pg_table = pte_offset_kernel(pmd, vaddr); |
---|
457 | 399 | for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) { |
---|
458 | 400 | pte_t pte; |
---|
| 401 | + pgprot_t prot; |
---|
| 402 | + bool huge = false; |
---|
459 | 403 | |
---|
460 | | - if (force) |
---|
461 | | - pte = __mk_pte(address, pgprot); |
---|
462 | | - else if (parisc_text_address(vaddr)) { |
---|
463 | | - pte = __mk_pte(address, PAGE_KERNEL_EXEC); |
---|
464 | | - if (address >= ro_start && address < kernel_end) |
---|
465 | | - pte = pte_mkhuge(pte); |
---|
| 404 | + if (force) { |
---|
| 405 | + prot = pgprot; |
---|
| 406 | + } else if (address < kernel_start || address >= kernel_end) { |
---|
| 407 | + /* outside kernel memory */ |
---|
| 408 | + prot = PAGE_KERNEL; |
---|
| 409 | + } else if (!kernel_set_to_readonly) { |
---|
| 410 | + /* still initializing, allow writing to RO memory */ |
---|
| 411 | + prot = PAGE_KERNEL_RWX; |
---|
| 412 | + huge = true; |
---|
| 413 | + } else if (address >= ro_start) { |
---|
| 414 | + /* Code (ro) and Data areas */ |
---|
| 415 | + prot = (address < ro_end) ? |
---|
| 416 | + PAGE_KERNEL_EXEC : PAGE_KERNEL; |
---|
| 417 | + huge = true; |
---|
| 418 | + } else { |
---|
| 419 | + prot = PAGE_KERNEL; |
---|
466 | 420 | } |
---|
467 | | - else |
---|
468 | | -#if defined(CONFIG_PARISC_PAGE_SIZE_4KB) |
---|
469 | | - if (address >= ro_start && address < ro_end) { |
---|
470 | | - pte = __mk_pte(address, PAGE_KERNEL_EXEC); |
---|
| 421 | + |
---|
| 422 | + pte = __mk_pte(address, prot); |
---|
| 423 | + if (huge) |
---|
471 | 424 | pte = pte_mkhuge(pte); |
---|
472 | | - } else |
---|
473 | | -#endif |
---|
474 | | - { |
---|
475 | | - pte = __mk_pte(address, pgprot); |
---|
476 | | - if (address >= ro_start && address < kernel_end) |
---|
477 | | - pte = pte_mkhuge(pte); |
---|
478 | | - } |
---|
479 | 425 | |
---|
480 | 426 | if (address >= end_paddr) |
---|
481 | 427 | break; |
---|
.. | .. |
---|
494 | 440 | } |
---|
495 | 441 | } |
---|
496 | 442 | |
---|
497 | | -void __ref free_initmem(void) |
---|
| 443 | +void __init set_kernel_text_rw(int enable_read_write) |
---|
| 444 | +{ |
---|
| 445 | + unsigned long start = (unsigned long) __init_begin; |
---|
| 446 | + unsigned long end = (unsigned long) &data_start; |
---|
| 447 | + |
---|
| 448 | + map_pages(start, __pa(start), end-start, |
---|
| 449 | + PAGE_KERNEL_RWX, enable_read_write ? 1:0); |
---|
| 450 | + |
---|
| 451 | + /* force the kernel to see the new page table entries */ |
---|
| 452 | + flush_cache_all(); |
---|
| 453 | + flush_tlb_all(); |
---|
| 454 | +} |
---|
| 455 | + |
---|
| 456 | +void free_initmem(void) |
---|
498 | 457 | { |
---|
499 | 458 | unsigned long init_begin = (unsigned long)__init_begin; |
---|
500 | 459 | unsigned long init_end = (unsigned long)__init_end; |
---|
| 460 | + unsigned long kernel_end = (unsigned long)&_end; |
---|
| 461 | + |
---|
| 462 | + /* Remap kernel text and data, but do not touch init section yet. */ |
---|
| 463 | + kernel_set_to_readonly = true; |
---|
| 464 | + map_pages(init_end, __pa(init_end), kernel_end - init_end, |
---|
| 465 | + PAGE_KERNEL, 0); |
---|
501 | 466 | |
---|
502 | 467 | /* The init text pages are marked R-X. We have to |
---|
503 | 468 | * flush the icache and mark them RW- |
---|
504 | 469 | * |
---|
505 | | - * This is tricky, because map_pages is in the init section. |
---|
506 | 470 | * Do a dummy remap of the data section first (the data |
---|
507 | 471 | * section is already PAGE_KERNEL) to pull in the TLB entries |
---|
508 | 472 | * for map_kernel */ |
---|
.. | .. |
---|
514 | 478 | PAGE_KERNEL, 1); |
---|
515 | 479 | |
---|
516 | 480 | /* force the kernel to see the new TLB entries */ |
---|
517 | | - __flush_tlb_range(0, init_begin, init_end); |
---|
| 481 | + __flush_tlb_range(0, init_begin, kernel_end); |
---|
518 | 482 | |
---|
519 | 483 | /* finally dump all the instructions which were cached, since the |
---|
520 | 484 | * pages are no-longer executable */ |
---|
.. | .. |
---|
532 | 496 | { |
---|
533 | 497 | /* rodata memory was already mapped with KERNEL_RO access rights by |
---|
534 | 498 | pagetable_init() and map_pages(). No need to do additional stuff here */ |
---|
535 | | - printk (KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
---|
536 | | - (unsigned long)(__end_rodata - __start_rodata) >> 10); |
---|
| 499 | + unsigned long roai_size = __end_ro_after_init - __start_ro_after_init; |
---|
| 500 | + |
---|
| 501 | + pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10); |
---|
537 | 502 | } |
---|
538 | 503 | #endif |
---|
539 | 504 | |
---|
.. | .. |
---|
559 | 524 | #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ |
---|
560 | 525 | & ~(VM_MAP_OFFSET-1))) |
---|
561 | 526 | |
---|
562 | | -void *parisc_vmalloc_start __read_mostly; |
---|
| 527 | +void *parisc_vmalloc_start __ro_after_init; |
---|
563 | 528 | EXPORT_SYMBOL(parisc_vmalloc_start); |
---|
564 | 529 | |
---|
565 | 530 | #ifdef CONFIG_PA11 |
---|
566 | | -unsigned long pcxl_dma_start __read_mostly; |
---|
| 531 | +unsigned long pcxl_dma_start __ro_after_init; |
---|
567 | 532 | #endif |
---|
568 | 533 | |
---|
569 | 534 | void __init mem_init(void) |
---|
.. | .. |
---|
588 | 553 | BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t)); |
---|
589 | 554 | BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD |
---|
590 | 555 | > BITS_PER_LONG); |
---|
| 556 | +#if CONFIG_PGTABLE_LEVELS == 3 |
---|
| 557 | + BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PMD); |
---|
| 558 | +#else |
---|
| 559 | + BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PGD); |
---|
| 560 | +#endif |
---|
591 | 561 | |
---|
592 | 562 | high_memory = __va((max_pfn << PAGE_SHIFT)); |
---|
593 | 563 | set_max_mapnr(max_low_pfn); |
---|
594 | | - free_all_bootmem(); |
---|
| 564 | + memblock_free_all(); |
---|
595 | 565 | |
---|
596 | 566 | #ifdef CONFIG_PA11 |
---|
597 | 567 | if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) { |
---|
.. | .. |
---|
610 | 580 | * But keep code for debugging purposes. |
---|
611 | 581 | */ |
---|
612 | 582 | printk("virtual kernel memory layout:\n" |
---|
613 | | - " vmalloc : 0x%px - 0x%px (%4ld MB)\n" |
---|
614 | | - " memory : 0x%px - 0x%px (%4ld MB)\n" |
---|
615 | | - " .init : 0x%px - 0x%px (%4ld kB)\n" |
---|
616 | | - " .data : 0x%px - 0x%px (%4ld kB)\n" |
---|
617 | | - " .text : 0x%px - 0x%px (%4ld kB)\n", |
---|
| 583 | + " vmalloc : 0x%px - 0x%px (%4ld MB)\n" |
---|
| 584 | + " fixmap : 0x%px - 0x%px (%4ld kB)\n" |
---|
| 585 | + " memory : 0x%px - 0x%px (%4ld MB)\n" |
---|
| 586 | + " .init : 0x%px - 0x%px (%4ld kB)\n" |
---|
| 587 | + " .data : 0x%px - 0x%px (%4ld kB)\n" |
---|
| 588 | + " .text : 0x%px - 0x%px (%4ld kB)\n", |
---|
618 | 589 | |
---|
619 | 590 | (void*)VMALLOC_START, (void*)VMALLOC_END, |
---|
620 | 591 | (VMALLOC_END - VMALLOC_START) >> 20, |
---|
| 592 | + |
---|
| 593 | + (void *)FIXMAP_START, (void *)(FIXMAP_START + FIXMAP_SIZE), |
---|
| 594 | + (unsigned long)(FIXMAP_SIZE / 1024), |
---|
621 | 595 | |
---|
622 | 596 | __va(0), high_memory, |
---|
623 | 597 | ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, |
---|
.. | .. |
---|
633 | 607 | #endif |
---|
634 | 608 | } |
---|
635 | 609 | |
---|
636 | | -unsigned long *empty_zero_page __read_mostly; |
---|
| 610 | +unsigned long *empty_zero_page __ro_after_init; |
---|
637 | 611 | EXPORT_SYMBOL(empty_zero_page); |
---|
638 | 612 | |
---|
639 | 613 | /* |
---|
.. | .. |
---|
671 | 645 | } |
---|
672 | 646 | #endif |
---|
673 | 647 | |
---|
674 | | - empty_zero_page = get_memblock(PAGE_SIZE); |
---|
| 648 | + empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
---|
| 649 | + if (!empty_zero_page) |
---|
| 650 | + panic("zero page allocation failed.\n"); |
---|
| 651 | + |
---|
675 | 652 | } |
---|
676 | 653 | |
---|
677 | 654 | static void __init gateway_init(void) |
---|
.. | .. |
---|
696 | 673 | |
---|
697 | 674 | static void __init parisc_bootmem_free(void) |
---|
698 | 675 | { |
---|
699 | | - unsigned long zones_size[MAX_NR_ZONES] = { 0, }; |
---|
700 | | - unsigned long holes_size[MAX_NR_ZONES] = { 0, }; |
---|
701 | | - unsigned long mem_start_pfn = ~0UL, mem_end_pfn = 0, mem_size_pfn = 0; |
---|
702 | | - int i; |
---|
| 676 | + unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, }; |
---|
703 | 677 | |
---|
704 | | - for (i = 0; i < npmem_ranges; i++) { |
---|
705 | | - unsigned long start = pmem_ranges[i].start_pfn; |
---|
706 | | - unsigned long size = pmem_ranges[i].pages; |
---|
707 | | - unsigned long end = start + size; |
---|
| 678 | + max_zone_pfn[0] = memblock_end_of_DRAM(); |
---|
708 | 679 | |
---|
709 | | - if (mem_start_pfn > start) |
---|
710 | | - mem_start_pfn = start; |
---|
711 | | - if (mem_end_pfn < end) |
---|
712 | | - mem_end_pfn = end; |
---|
713 | | - mem_size_pfn += size; |
---|
714 | | - } |
---|
715 | | - |
---|
716 | | - zones_size[0] = mem_end_pfn - mem_start_pfn; |
---|
717 | | - holes_size[0] = zones_size[0] - mem_size_pfn; |
---|
718 | | - |
---|
719 | | - free_area_init_node(0, zones_size, mem_start_pfn, holes_size); |
---|
| 680 | + free_area_init(max_zone_pfn); |
---|
720 | 681 | } |
---|
721 | 682 | |
---|
722 | 683 | void __init paging_init(void) |
---|
.. | .. |
---|
727 | 688 | flush_cache_all_local(); /* start with known state */ |
---|
728 | 689 | flush_tlb_all_local(NULL); |
---|
729 | 690 | |
---|
730 | | - /* |
---|
731 | | - * Mark all memblocks as present for sparsemem using |
---|
732 | | - * memory_present() and then initialize sparsemem. |
---|
733 | | - */ |
---|
734 | | - memblocks_present(); |
---|
735 | 691 | sparse_init(); |
---|
736 | 692 | parisc_bootmem_free(); |
---|
737 | 693 | } |
---|
.. | .. |
---|
788 | 744 | free_space_ids--; |
---|
789 | 745 | |
---|
790 | 746 | index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index); |
---|
791 | | - space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1))); |
---|
| 747 | + space_id[BIT_WORD(index)] |= BIT_MASK(index); |
---|
792 | 748 | space_id_index = index; |
---|
793 | 749 | |
---|
794 | 750 | spin_unlock(&sid_lock); |
---|
.. | .. |
---|
799 | 755 | void free_sid(unsigned long spaceid) |
---|
800 | 756 | { |
---|
801 | 757 | unsigned long index = spaceid >> SPACEID_SHIFT; |
---|
802 | | - unsigned long *dirty_space_offset; |
---|
| 758 | + unsigned long *dirty_space_offset, mask; |
---|
803 | 759 | |
---|
804 | | - dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG); |
---|
805 | | - index &= (BITS_PER_LONG - 1); |
---|
| 760 | + dirty_space_offset = &dirty_space_id[BIT_WORD(index)]; |
---|
| 761 | + mask = BIT_MASK(index); |
---|
806 | 762 | |
---|
807 | 763 | spin_lock(&sid_lock); |
---|
808 | 764 | |
---|
809 | | - BUG_ON(*dirty_space_offset & (1L << index)); /* attempt to free space id twice */ |
---|
| 765 | + BUG_ON(*dirty_space_offset & mask); /* attempt to free space id twice */ |
---|
810 | 766 | |
---|
811 | | - *dirty_space_offset |= (1L << index); |
---|
| 767 | + *dirty_space_offset |= mask; |
---|
812 | 768 | dirty_space_ids++; |
---|
813 | 769 | |
---|
814 | 770 | spin_unlock(&sid_lock); |
---|
.. | .. |
---|
913 | 869 | flush_tlb_all_local(NULL); |
---|
914 | 870 | recycle_sids(); |
---|
915 | 871 | spin_unlock(&sid_lock); |
---|
916 | | -} |
---|
917 | | -#endif |
---|
918 | | - |
---|
919 | | -#ifdef CONFIG_BLK_DEV_INITRD |
---|
920 | | -void free_initrd_mem(unsigned long start, unsigned long end) |
---|
921 | | -{ |
---|
922 | | - free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
---|
923 | 872 | } |
---|
924 | 873 | #endif |
---|