From 95099d4622f8cb224d94e314c7a8e0df60b13f87 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 09 Dec 2023 08:38:01 +0000
Subject: [PATCH] enable docker ppp
---
kernel/arch/ia64/mm/init.c | 137 +++++++++++++++++++++++++++++++--------------
1 files changed, 93 insertions(+), 44 deletions(-)
diff --git a/kernel/arch/ia64/mm/init.c b/kernel/arch/ia64/mm/init.c
index 561e257..f316a83 100644
--- a/kernel/arch/ia64/mm/init.c
+++ b/kernel/arch/ia64/mm/init.c
@@ -8,7 +8,8 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/bootmem.h>
+#include <linux/dma-map-ops.h>
+#include <linux/dmar.h>
#include <linux/efi.h>
#include <linux/elf.h>
#include <linux/memblock.h>
@@ -23,10 +24,10 @@
#include <linux/proc_fs.h>
#include <linux/bitops.h>
#include <linux/kexec.h>
+#include <linux/swiotlb.h>
#include <asm/dma.h>
#include <asm/io.h>
-#include <asm/machvec.h>
#include <asm/numa.h>
#include <asm/patch.h>
#include <asm/pgalloc.h>
@@ -63,7 +64,7 @@
if (test_bit(PG_arch_1, &page->flags))
return; /* i-cache is already coherent with d-cache */
- flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
+ flush_icache_range(addr, addr + page_size(page));
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
@@ -72,18 +73,13 @@
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
-void
-dma_mark_clean(void *addr, size_t size)
+void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
{
- unsigned long pg_addr, end;
+ unsigned long pfn = PHYS_PFN(paddr);
- pg_addr = PAGE_ALIGN((unsigned long) addr);
- end = (unsigned long) addr + size;
- while (pg_addr + PAGE_SIZE <= end) {
- struct page *page = virt_to_page(pg_addr);
- set_bit(PG_arch_1, &page->flags);
- pg_addr += PAGE_SIZE;
- }
+ do {
+ set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
+ } while (++pfn <= PHYS_PFN(paddr + size - 1));
}
inline void
@@ -121,13 +117,13 @@
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- down_write(¤t->mm->mmap_sem);
+ mmap_write_lock(current->mm);
if (insert_vm_struct(current->mm, vma)) {
- up_write(¤t->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
vm_area_free(vma);
return;
}
- up_write(¤t->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
}
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
@@ -139,13 +135,13 @@
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
VM_DONTEXPAND | VM_DONTDUMP;
- down_write(¤t->mm->mmap_sem);
+ mmap_write_lock(current->mm);
if (insert_vm_struct(current->mm, vma)) {
- up_write(¤t->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
vm_area_free(vma);
return;
}
- up_write(¤t->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
}
}
}
@@ -211,6 +207,7 @@
put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
{
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -218,7 +215,10 @@
pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
{
- pud = pud_alloc(&init_mm, pgd, address);
+ p4d = p4d_alloc(&init_mm, pgd, address);
+ if (!p4d)
+ goto out;
+ pud = pud_alloc(&init_mm, p4d, address);
if (!pud)
goto out;
pmd = pmd_alloc(&init_mm, pud, address);
@@ -385,6 +385,7 @@
do {
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -395,7 +396,13 @@
continue;
}
- pud = pud_offset(pgd, end_address);
+ p4d = p4d_offset(pgd, end_address);
+ if (p4d_none(*p4d)) {
+ end_address += P4D_SIZE;
+ continue;
+ }
+
+ pud = pud_offset(p4d, end_address);
if (pud_none(*pud)) {
end_address += PUD_SIZE;
continue;
@@ -433,6 +440,7 @@
struct page *map_start, *map_end;
int node;
pgd_t *pgd;
+ p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
@@ -446,23 +454,53 @@
for (address = start_page; address < end_page; address += PAGE_SIZE) {
pgd = pgd_offset_k(address);
- if (pgd_none(*pgd))
- pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
- pud = pud_offset(pgd, address);
+ if (pgd_none(*pgd)) {
+ p4d = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
+ if (!p4d)
+ goto err_alloc;
+ pgd_populate(&init_mm, pgd, p4d);
+ }
+ p4d = p4d_offset(pgd, address);
- if (pud_none(*pud))
- pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
+ if (p4d_none(*p4d)) {
+ pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
+ if (!pud)
+ goto err_alloc;
+ p4d_populate(&init_mm, p4d, pud);
+ }
+ pud = pud_offset(p4d, address);
+
+ if (pud_none(*pud)) {
+ pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
+ if (!pmd)
+ goto err_alloc;
+ pud_populate(&init_mm, pud, pmd);
+ }
pmd = pmd_offset(pud, address);
- if (pmd_none(*pmd))
- pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
+ if (pmd_none(*pmd)) {
+ pte = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node);
+ if (!pte)
+ goto err_alloc;
+ pmd_populate_kernel(&init_mm, pmd, pte);
+ }
pte = pte_offset_kernel(pmd, address);
- if (pte_none(*pte))
- set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
+ if (pte_none(*pte)) {
+ void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE,
+ node);
+ if (!page)
+ goto err_alloc;
+ set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT,
PAGE_KERNEL));
+ }
}
return 0;
+
+err_alloc:
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d\n",
+ __func__, PAGE_SIZE, PAGE_SIZE, node);
+ return -ENOMEM;
}
struct memmap_init_callback_data {
@@ -498,18 +536,18 @@
if (map_start < map_end)
memmap_init_zone((unsigned long)(map_end - map_start),
- args->nid, args->zone, page_to_pfn(map_start),
- MEMINIT_EARLY, NULL);
+ args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end),
+ MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
return 0;
}
void __meminit
-memmap_init (unsigned long size, int nid, unsigned long zone,
+arch_memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn)
{
if (!vmem_map) {
- memmap_init_zone(size, nid, zone, start_pfn,
- MEMINIT_EARLY, NULL);
+ memmap_init_zone(size, nid, zone, start_pfn, start_pfn + size,
+ MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
} else {
struct page *start;
struct memmap_init_callback_data args;
@@ -522,6 +560,10 @@
efi_memmap_walk(virtual_memmap_init, &args);
}
+}
+
+void __init memmap_init(void)
+{
}
int
@@ -612,13 +654,17 @@
BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
-#ifdef CONFIG_PCI
/*
- * This needs to be called _after_ the command line has been parsed but _before_
- * any drivers that may need the PCI DMA interface are initialized or bootmem has
- * been freed.
+ * This needs to be called _after_ the command line has been parsed but
+ * _before_ any drivers that may need the PCI DMA interface are
+ * initialized or bootmem has been freed.
*/
- platform_dma_init();
+#ifdef CONFIG_INTEL_IOMMU
+ detect_intel_iommu();
+ if (!iommu_detected)
+#endif
+#ifdef CONFIG_SWIOTLB
+ swiotlb_init(1);
#endif
#ifdef CONFIG_FLATMEM
@@ -627,7 +673,7 @@
set_max_mapnr(max_low_pfn);
high_memory = __va(max_low_pfn * PAGE_SIZE);
- free_all_bootmem();
+ memblock_free_all();
mem_init_print_info(NULL);
/*
@@ -646,14 +692,17 @@
}
#ifdef CONFIG_MEMORY_HOTPLUG
-int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
- bool want_memblock)
+int arch_add_memory(int nid, u64 start, u64 size,
+ struct mhp_params *params)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
- ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
+ if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
+ return -EINVAL;
+
+ ret = __add_pages(nid, start_pfn, nr_pages, params);
if (ret)
printk("%s: Problem encountered in __add_pages() as ret=%d\n",
__func__, ret);
--
Gitblit v1.6.2