.. | .. |
---|
8 | 8 | #include <linux/kernel.h> |
---|
9 | 9 | #include <linux/init.h> |
---|
10 | 10 | |
---|
11 | | -#include <linux/bootmem.h> |
---|
| 11 | +#include <linux/dma-map-ops.h> |
---|
| 12 | +#include <linux/dmar.h> |
---|
12 | 13 | #include <linux/efi.h> |
---|
13 | 14 | #include <linux/elf.h> |
---|
14 | 15 | #include <linux/memblock.h> |
---|
.. | .. |
---|
23 | 24 | #include <linux/proc_fs.h> |
---|
24 | 25 | #include <linux/bitops.h> |
---|
25 | 26 | #include <linux/kexec.h> |
---|
| 27 | +#include <linux/swiotlb.h> |
---|
26 | 28 | |
---|
27 | 29 | #include <asm/dma.h> |
---|
28 | 30 | #include <asm/io.h> |
---|
29 | | -#include <asm/machvec.h> |
---|
30 | 31 | #include <asm/numa.h> |
---|
31 | 32 | #include <asm/patch.h> |
---|
32 | 33 | #include <asm/pgalloc.h> |
---|
.. | .. |
---|
63 | 64 | if (test_bit(PG_arch_1, &page->flags)) |
---|
64 | 65 | return; /* i-cache is already coherent with d-cache */ |
---|
65 | 66 | |
---|
66 | | - flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page))); |
---|
| 67 | + flush_icache_range(addr, addr + page_size(page)); |
---|
67 | 68 | set_bit(PG_arch_1, &page->flags); /* mark page as clean */ |
---|
68 | 69 | } |
---|
69 | 70 | |
---|
.. | .. |
---|
72 | 73 | * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to |
---|
73 | 74 | * flush them when they get mapped into an executable vm-area. |
---|
74 | 75 | */ |
---|
75 | | -void |
---|
76 | | -dma_mark_clean(void *addr, size_t size) |
---|
| 76 | +void arch_dma_mark_clean(phys_addr_t paddr, size_t size) |
---|
77 | 77 | { |
---|
78 | | - unsigned long pg_addr, end; |
---|
| 78 | + unsigned long pfn = PHYS_PFN(paddr); |
---|
79 | 79 | |
---|
80 | | - pg_addr = PAGE_ALIGN((unsigned long) addr); |
---|
81 | | - end = (unsigned long) addr + size; |
---|
82 | | - while (pg_addr + PAGE_SIZE <= end) { |
---|
83 | | - struct page *page = virt_to_page(pg_addr); |
---|
84 | | - set_bit(PG_arch_1, &page->flags); |
---|
85 | | - pg_addr += PAGE_SIZE; |
---|
86 | | - } |
---|
| 80 | + do { |
---|
| 81 | + set_bit(PG_arch_1, &pfn_to_page(pfn)->flags); |
---|
| 82 | + } while (++pfn <= PHYS_PFN(paddr + size - 1)); |
---|
87 | 83 | } |
---|
88 | 84 | |
---|
89 | 85 | inline void |
---|
.. | .. |
---|
121 | 117 | vma->vm_end = vma->vm_start + PAGE_SIZE; |
---|
122 | 118 | vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; |
---|
123 | 119 | vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); |
---|
124 | | - down_write(¤t->mm->mmap_sem); |
---|
| 120 | + mmap_write_lock(current->mm); |
---|
125 | 121 | if (insert_vm_struct(current->mm, vma)) { |
---|
126 | | - up_write(¤t->mm->mmap_sem); |
---|
| 122 | + mmap_write_unlock(current->mm); |
---|
127 | 123 | vm_area_free(vma); |
---|
128 | 124 | return; |
---|
129 | 125 | } |
---|
130 | | - up_write(¤t->mm->mmap_sem); |
---|
| 126 | + mmap_write_unlock(current->mm); |
---|
131 | 127 | } |
---|
132 | 128 | |
---|
133 | 129 | /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */ |
---|
.. | .. |
---|
139 | 135 | vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); |
---|
140 | 136 | vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | |
---|
141 | 137 | VM_DONTEXPAND | VM_DONTDUMP; |
---|
142 | | - down_write(¤t->mm->mmap_sem); |
---|
| 138 | + mmap_write_lock(current->mm); |
---|
143 | 139 | if (insert_vm_struct(current->mm, vma)) { |
---|
144 | | - up_write(¤t->mm->mmap_sem); |
---|
| 140 | + mmap_write_unlock(current->mm); |
---|
145 | 141 | vm_area_free(vma); |
---|
146 | 142 | return; |
---|
147 | 143 | } |
---|
148 | | - up_write(¤t->mm->mmap_sem); |
---|
| 144 | + mmap_write_unlock(current->mm); |
---|
149 | 145 | } |
---|
150 | 146 | } |
---|
151 | 147 | } |
---|
.. | .. |
---|
211 | 207 | put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) |
---|
212 | 208 | { |
---|
213 | 209 | pgd_t *pgd; |
---|
| 210 | + p4d_t *p4d; |
---|
214 | 211 | pud_t *pud; |
---|
215 | 212 | pmd_t *pmd; |
---|
216 | 213 | pte_t *pte; |
---|
.. | .. |
---|
218 | 215 | pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */ |
---|
219 | 216 | |
---|
220 | 217 | { |
---|
221 | | - pud = pud_alloc(&init_mm, pgd, address); |
---|
| 218 | + p4d = p4d_alloc(&init_mm, pgd, address); |
---|
| 219 | + if (!p4d) |
---|
| 220 | + goto out; |
---|
| 221 | + pud = pud_alloc(&init_mm, p4d, address); |
---|
222 | 222 | if (!pud) |
---|
223 | 223 | goto out; |
---|
224 | 224 | pmd = pmd_alloc(&init_mm, pud, address); |
---|
.. | .. |
---|
385 | 385 | |
---|
386 | 386 | do { |
---|
387 | 387 | pgd_t *pgd; |
---|
| 388 | + p4d_t *p4d; |
---|
388 | 389 | pud_t *pud; |
---|
389 | 390 | pmd_t *pmd; |
---|
390 | 391 | pte_t *pte; |
---|
.. | .. |
---|
395 | 396 | continue; |
---|
396 | 397 | } |
---|
397 | 398 | |
---|
398 | | - pud = pud_offset(pgd, end_address); |
---|
| 399 | + p4d = p4d_offset(pgd, end_address); |
---|
| 400 | + if (p4d_none(*p4d)) { |
---|
| 401 | + end_address += P4D_SIZE; |
---|
| 402 | + continue; |
---|
| 403 | + } |
---|
| 404 | + |
---|
| 405 | + pud = pud_offset(p4d, end_address); |
---|
399 | 406 | if (pud_none(*pud)) { |
---|
400 | 407 | end_address += PUD_SIZE; |
---|
401 | 408 | continue; |
---|
.. | .. |
---|
433 | 440 | struct page *map_start, *map_end; |
---|
434 | 441 | int node; |
---|
435 | 442 | pgd_t *pgd; |
---|
| 443 | + p4d_t *p4d; |
---|
436 | 444 | pud_t *pud; |
---|
437 | 445 | pmd_t *pmd; |
---|
438 | 446 | pte_t *pte; |
---|
.. | .. |
---|
446 | 454 | |
---|
447 | 455 | for (address = start_page; address < end_page; address += PAGE_SIZE) { |
---|
448 | 456 | pgd = pgd_offset_k(address); |
---|
449 | | - if (pgd_none(*pgd)) |
---|
450 | | - pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); |
---|
451 | | - pud = pud_offset(pgd, address); |
---|
| 457 | + if (pgd_none(*pgd)) { |
---|
| 458 | + p4d = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); |
---|
| 459 | + if (!p4d) |
---|
| 460 | + goto err_alloc; |
---|
| 461 | + pgd_populate(&init_mm, pgd, p4d); |
---|
| 462 | + } |
---|
| 463 | + p4d = p4d_offset(pgd, address); |
---|
452 | 464 | |
---|
453 | | - if (pud_none(*pud)) |
---|
454 | | - pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); |
---|
| 465 | + if (p4d_none(*p4d)) { |
---|
| 466 | + pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); |
---|
| 467 | + if (!pud) |
---|
| 468 | + goto err_alloc; |
---|
| 469 | + p4d_populate(&init_mm, p4d, pud); |
---|
| 470 | + } |
---|
| 471 | + pud = pud_offset(p4d, address); |
---|
| 472 | + |
---|
| 473 | + if (pud_none(*pud)) { |
---|
| 474 | + pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); |
---|
| 475 | + if (!pmd) |
---|
| 476 | + goto err_alloc; |
---|
| 477 | + pud_populate(&init_mm, pud, pmd); |
---|
| 478 | + } |
---|
455 | 479 | pmd = pmd_offset(pud, address); |
---|
456 | 480 | |
---|
457 | | - if (pmd_none(*pmd)) |
---|
458 | | - pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)); |
---|
| 481 | + if (pmd_none(*pmd)) { |
---|
| 482 | + pte = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); |
---|
| 483 | + if (!pte) |
---|
| 484 | + goto err_alloc; |
---|
| 485 | + pmd_populate_kernel(&init_mm, pmd, pte); |
---|
| 486 | + } |
---|
459 | 487 | pte = pte_offset_kernel(pmd, address); |
---|
460 | 488 | |
---|
461 | | - if (pte_none(*pte)) |
---|
462 | | - set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT, |
---|
| 489 | + if (pte_none(*pte)) { |
---|
| 490 | + void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, |
---|
| 491 | + node); |
---|
| 492 | + if (!page) |
---|
| 493 | + goto err_alloc; |
---|
| 494 | + set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT, |
---|
463 | 495 | PAGE_KERNEL)); |
---|
| 496 | + } |
---|
464 | 497 | } |
---|
465 | 498 | return 0; |
---|
| 499 | + |
---|
| 500 | +err_alloc: |
---|
| 501 | + panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d\n", |
---|
| 502 | + __func__, PAGE_SIZE, PAGE_SIZE, node); |
---|
| 503 | + return -ENOMEM; |
---|
466 | 504 | } |
---|
467 | 505 | |
---|
468 | 506 | struct memmap_init_callback_data { |
---|
.. | .. |
---|
498 | 536 | |
---|
499 | 537 | if (map_start < map_end) |
---|
500 | 538 | memmap_init_zone((unsigned long)(map_end - map_start), |
---|
501 | | - args->nid, args->zone, page_to_pfn(map_start), |
---|
502 | | - MEMINIT_EARLY, NULL); |
---|
| 539 | + args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end), |
---|
| 540 | + MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); |
---|
503 | 541 | return 0; |
---|
504 | 542 | } |
---|
505 | 543 | |
---|
506 | 544 | void __meminit |
---|
507 | | -memmap_init (unsigned long size, int nid, unsigned long zone, |
---|
| 545 | +arch_memmap_init (unsigned long size, int nid, unsigned long zone, |
---|
508 | 546 | unsigned long start_pfn) |
---|
509 | 547 | { |
---|
510 | 548 | if (!vmem_map) { |
---|
511 | | - memmap_init_zone(size, nid, zone, start_pfn, |
---|
512 | | - MEMINIT_EARLY, NULL); |
---|
| 549 | + memmap_init_zone(size, nid, zone, start_pfn, start_pfn + size, |
---|
| 550 | + MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); |
---|
513 | 551 | } else { |
---|
514 | 552 | struct page *start; |
---|
515 | 553 | struct memmap_init_callback_data args; |
---|
.. | .. |
---|
522 | 560 | |
---|
523 | 561 | efi_memmap_walk(virtual_memmap_init, &args); |
---|
524 | 562 | } |
---|
| 563 | +} |
---|
| 564 | + |
---|
| 565 | +void __init memmap_init(void) |
---|
| 566 | +{ |
---|
525 | 567 | } |
---|
526 | 568 | |
---|
527 | 569 | int |
---|
.. | .. |
---|
612 | 654 | BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE); |
---|
613 | 655 | BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE); |
---|
614 | 656 | |
---|
615 | | -#ifdef CONFIG_PCI |
---|
616 | 657 | /* |
---|
617 | | - * This needs to be called _after_ the command line has been parsed but _before_ |
---|
618 | | - * any drivers that may need the PCI DMA interface are initialized or bootmem has |
---|
619 | | - * been freed. |
---|
| 658 | + * This needs to be called _after_ the command line has been parsed but |
---|
| 659 | + * _before_ any drivers that may need the PCI DMA interface are |
---|
| 660 | + * initialized or bootmem has been freed. |
---|
620 | 661 | */ |
---|
621 | | - platform_dma_init(); |
---|
| 662 | +#ifdef CONFIG_INTEL_IOMMU |
---|
| 663 | + detect_intel_iommu(); |
---|
| 664 | + if (!iommu_detected) |
---|
| 665 | +#endif |
---|
| 666 | +#ifdef CONFIG_SWIOTLB |
---|
| 667 | + swiotlb_init(1); |
---|
622 | 668 | #endif |
---|
623 | 669 | |
---|
624 | 670 | #ifdef CONFIG_FLATMEM |
---|
.. | .. |
---|
627 | 673 | |
---|
628 | 674 | set_max_mapnr(max_low_pfn); |
---|
629 | 675 | high_memory = __va(max_low_pfn * PAGE_SIZE); |
---|
630 | | - free_all_bootmem(); |
---|
| 676 | + memblock_free_all(); |
---|
631 | 677 | mem_init_print_info(NULL); |
---|
632 | 678 | |
---|
633 | 679 | /* |
---|
.. | .. |
---|
646 | 692 | } |
---|
647 | 693 | |
---|
648 | 694 | #ifdef CONFIG_MEMORY_HOTPLUG |
---|
649 | | -int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, |
---|
650 | | - bool want_memblock) |
---|
| 695 | +int arch_add_memory(int nid, u64 start, u64 size, |
---|
| 696 | + struct mhp_params *params) |
---|
651 | 697 | { |
---|
652 | 698 | unsigned long start_pfn = start >> PAGE_SHIFT; |
---|
653 | 699 | unsigned long nr_pages = size >> PAGE_SHIFT; |
---|
654 | 700 | int ret; |
---|
655 | 701 | |
---|
656 | | - ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); |
---|
| 702 | + if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot)) |
---|
| 703 | + return -EINVAL; |
---|
| 704 | + |
---|
| 705 | + ret = __add_pages(nid, start_pfn, nr_pages, params); |
---|
657 | 706 | if (ret) |
---|
658 | 707 | printk("%s: Problem encountered in __add_pages() as ret=%d\n", |
---|
659 | 708 | __func__, ret); |
---|