hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/arch/ia64/mm/init.c
....@@ -8,7 +8,8 @@
88 #include <linux/kernel.h>
99 #include <linux/init.h>
1010
11
-#include <linux/bootmem.h>
11
+#include <linux/dma-map-ops.h>
12
+#include <linux/dmar.h>
1213 #include <linux/efi.h>
1314 #include <linux/elf.h>
1415 #include <linux/memblock.h>
....@@ -23,10 +24,10 @@
2324 #include <linux/proc_fs.h>
2425 #include <linux/bitops.h>
2526 #include <linux/kexec.h>
27
+#include <linux/swiotlb.h>
2628
2729 #include <asm/dma.h>
2830 #include <asm/io.h>
29
-#include <asm/machvec.h>
3031 #include <asm/numa.h>
3132 #include <asm/patch.h>
3233 #include <asm/pgalloc.h>
....@@ -63,7 +64,7 @@
6364 if (test_bit(PG_arch_1, &page->flags))
6465 return; /* i-cache is already coherent with d-cache */
6566
66
- flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
67
+ flush_icache_range(addr, addr + page_size(page));
6768 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
6869 }
6970
....@@ -72,18 +73,13 @@
7273 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
7374 * flush them when they get mapped into an executable vm-area.
7475 */
75
-void
76
-dma_mark_clean(void *addr, size_t size)
76
+void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
7777 {
78
- unsigned long pg_addr, end;
78
+ unsigned long pfn = PHYS_PFN(paddr);
7979
80
- pg_addr = PAGE_ALIGN((unsigned long) addr);
81
- end = (unsigned long) addr + size;
82
- while (pg_addr + PAGE_SIZE <= end) {
83
- struct page *page = virt_to_page(pg_addr);
84
- set_bit(PG_arch_1, &page->flags);
85
- pg_addr += PAGE_SIZE;
86
- }
80
+ do {
81
+ set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
82
+ } while (++pfn <= PHYS_PFN(paddr + size - 1));
8783 }
8884
8985 inline void
....@@ -121,13 +117,13 @@
121117 vma->vm_end = vma->vm_start + PAGE_SIZE;
122118 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
123119 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
124
- down_write(&current->mm->mmap_sem);
120
+ mmap_write_lock(current->mm);
125121 if (insert_vm_struct(current->mm, vma)) {
126
- up_write(&current->mm->mmap_sem);
122
+ mmap_write_unlock(current->mm);
127123 vm_area_free(vma);
128124 return;
129125 }
130
- up_write(&current->mm->mmap_sem);
126
+ mmap_write_unlock(current->mm);
131127 }
132128
133129 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
....@@ -139,13 +135,13 @@
139135 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
140136 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
141137 VM_DONTEXPAND | VM_DONTDUMP;
142
- down_write(&current->mm->mmap_sem);
138
+ mmap_write_lock(current->mm);
143139 if (insert_vm_struct(current->mm, vma)) {
144
- up_write(&current->mm->mmap_sem);
140
+ mmap_write_unlock(current->mm);
145141 vm_area_free(vma);
146142 return;
147143 }
148
- up_write(&current->mm->mmap_sem);
144
+ mmap_write_unlock(current->mm);
149145 }
150146 }
151147 }
....@@ -211,6 +207,7 @@
211207 put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
212208 {
213209 pgd_t *pgd;
210
+ p4d_t *p4d;
214211 pud_t *pud;
215212 pmd_t *pmd;
216213 pte_t *pte;
....@@ -218,7 +215,10 @@
218215 pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
219216
220217 {
221
- pud = pud_alloc(&init_mm, pgd, address);
218
+ p4d = p4d_alloc(&init_mm, pgd, address);
219
+ if (!p4d)
220
+ goto out;
221
+ pud = pud_alloc(&init_mm, p4d, address);
222222 if (!pud)
223223 goto out;
224224 pmd = pmd_alloc(&init_mm, pud, address);
....@@ -385,6 +385,7 @@
385385
386386 do {
387387 pgd_t *pgd;
388
+ p4d_t *p4d;
388389 pud_t *pud;
389390 pmd_t *pmd;
390391 pte_t *pte;
....@@ -395,7 +396,13 @@
395396 continue;
396397 }
397398
398
- pud = pud_offset(pgd, end_address);
399
+ p4d = p4d_offset(pgd, end_address);
400
+ if (p4d_none(*p4d)) {
401
+ end_address += P4D_SIZE;
402
+ continue;
403
+ }
404
+
405
+ pud = pud_offset(p4d, end_address);
399406 if (pud_none(*pud)) {
400407 end_address += PUD_SIZE;
401408 continue;
....@@ -433,6 +440,7 @@
433440 struct page *map_start, *map_end;
434441 int node;
435442 pgd_t *pgd;
443
+ p4d_t *p4d;
436444 pud_t *pud;
437445 pmd_t *pmd;
438446 pte_t *pte;
....@@ -446,23 +454,53 @@
446454
447455 for (address = start_page; address < end_page; address += PAGE_SIZE) {
448456 pgd = pgd_offset_k(address);
449
- if (pgd_none(*pgd))
450
- pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
451
- pud = pud_offset(pgd, address);
457
+ if (pgd_none(*pgd)) {
458
+ p4d = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
459
+ if (!p4d)
460
+ goto err_alloc;
461
+ pgd_populate(&init_mm, pgd, p4d);
462
+ }
463
+ p4d = p4d_offset(pgd, address);
452464
453
- if (pud_none(*pud))
454
- pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
465
+ if (p4d_none(*p4d)) {
466
+ pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
467
+ if (!pud)
468
+ goto err_alloc;
469
+ p4d_populate(&init_mm, p4d, pud);
470
+ }
471
+ pud = pud_offset(p4d, address);
472
+
473
+ if (pud_none(*pud)) {
474
+ pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
475
+ if (!pmd)
476
+ goto err_alloc;
477
+ pud_populate(&init_mm, pud, pmd);
478
+ }
455479 pmd = pmd_offset(pud, address);
456480
457
- if (pmd_none(*pmd))
458
- pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
481
+ if (pmd_none(*pmd)) {
482
+ pte = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
483
+ if (!pte)
484
+ goto err_alloc;
485
+ pmd_populate_kernel(&init_mm, pmd, pte);
486
+ }
459487 pte = pte_offset_kernel(pmd, address);
460488
461
- if (pte_none(*pte))
462
- set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
489
+ if (pte_none(*pte)) {
490
+ void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE,
491
+ node);
492
+ if (!page)
493
+ goto err_alloc;
494
+ set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT,
463495 PAGE_KERNEL));
496
+ }
464497 }
465498 return 0;
499
+
500
+err_alloc:
501
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d\n",
502
+ __func__, PAGE_SIZE, PAGE_SIZE, node);
503
+ return -ENOMEM;
466504 }
467505
468506 struct memmap_init_callback_data {
....@@ -498,18 +536,18 @@
498536
499537 if (map_start < map_end)
500538 memmap_init_zone((unsigned long)(map_end - map_start),
501
- args->nid, args->zone, page_to_pfn(map_start),
502
- MEMINIT_EARLY, NULL);
539
+ args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end),
540
+ MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
503541 return 0;
504542 }
505543
506544 void __meminit
507
-memmap_init (unsigned long size, int nid, unsigned long zone,
545
+arch_memmap_init (unsigned long size, int nid, unsigned long zone,
508546 unsigned long start_pfn)
509547 {
510548 if (!vmem_map) {
511
- memmap_init_zone(size, nid, zone, start_pfn,
512
- MEMINIT_EARLY, NULL);
549
+ memmap_init_zone(size, nid, zone, start_pfn, start_pfn + size,
550
+ MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
513551 } else {
514552 struct page *start;
515553 struct memmap_init_callback_data args;
....@@ -522,6 +560,10 @@
522560
523561 efi_memmap_walk(virtual_memmap_init, &args);
524562 }
563
+}
564
+
565
+void __init memmap_init(void)
566
+{
525567 }
526568
527569 int
....@@ -612,13 +654,17 @@
612654 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
613655 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
614656
615
-#ifdef CONFIG_PCI
616657 /*
617
- * This needs to be called _after_ the command line has been parsed but _before_
618
- * any drivers that may need the PCI DMA interface are initialized or bootmem has
619
- * been freed.
658
+ * This needs to be called _after_ the command line has been parsed but
659
+ * _before_ any drivers that may need the PCI DMA interface are
660
+ * initialized or bootmem has been freed.
620661 */
621
- platform_dma_init();
662
+#ifdef CONFIG_INTEL_IOMMU
663
+ detect_intel_iommu();
664
+ if (!iommu_detected)
665
+#endif
666
+#ifdef CONFIG_SWIOTLB
667
+ swiotlb_init(1);
622668 #endif
623669
624670 #ifdef CONFIG_FLATMEM
....@@ -627,7 +673,7 @@
627673
628674 set_max_mapnr(max_low_pfn);
629675 high_memory = __va(max_low_pfn * PAGE_SIZE);
630
- free_all_bootmem();
676
+ memblock_free_all();
631677 mem_init_print_info(NULL);
632678
633679 /*
....@@ -646,14 +692,17 @@
646692 }
647693
648694 #ifdef CONFIG_MEMORY_HOTPLUG
649
-int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
650
- bool want_memblock)
695
+int arch_add_memory(int nid, u64 start, u64 size,
696
+ struct mhp_params *params)
651697 {
652698 unsigned long start_pfn = start >> PAGE_SHIFT;
653699 unsigned long nr_pages = size >> PAGE_SHIFT;
654700 int ret;
655701
656
- ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
702
+ if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
703
+ return -EINVAL;
704
+
705
+ ret = __add_pages(nid, start_pfn, nr_pages, params);
657706 if (ret)
658707 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
659708 __func__, ret);