From 102a0743326a03cd1a1202ceda21e175b7d3575c Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Tue, 20 Feb 2024 01:20:52 +0000 Subject: [PATCH] add new system file --- kernel/mm/pagewalk.c | 358 +++++++++++++++++++++++++++++++++++++++++++++++------------ 1 files changed, 282 insertions(+), 76 deletions(-) diff --git a/kernel/mm/pagewalk.c b/kernel/mm/pagewalk.c index 3c0930d..d11b6ba 100644 --- a/kernel/mm/pagewalk.c +++ b/kernel/mm/pagewalk.c @@ -1,18 +1,33 @@ // SPDX-License-Identifier: GPL-2.0 -#include <linux/mm.h> +#include <linux/pagewalk.h> #include <linux/highmem.h> #include <linux/sched.h> #include <linux/hugetlb.h> -static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, - struct mm_walk *walk) +/* + * We want to know the real level where a entry is located ignoring any + * folding of levels which may be happening. For example if p4d is folded then + * a missing entry found at level 1 (p4d) is actually at level 0 (pgd). + */ +static int real_depth(int depth) { - pte_t *pte; + if (depth == 3 && PTRS_PER_PMD == 1) + depth = 2; + if (depth == 2 && PTRS_PER_PUD == 1) + depth = 1; + if (depth == 1 && PTRS_PER_P4D == 1) + depth = 0; + return depth; +} + +static int walk_pte_range_inner(pte_t *pte, unsigned long addr, + unsigned long end, struct mm_walk *walk) +{ + const struct mm_walk_ops *ops = walk->ops; int err = 0; - pte = pte_offset_map(pmd, addr); for (;;) { - err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); + err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk); if (err) break; if (addr >= end - PAGE_SIZE) @@ -20,8 +35,26 @@ addr += PAGE_SIZE; pte++; } + return err; +} - pte_unmap(pte); +static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, + struct mm_walk *walk) +{ + pte_t *pte; + int err = 0; + spinlock_t *ptl; + + if (walk->no_vma) { + pte = pte_offset_map(pmd, addr); + err = walk_pte_range_inner(pte, addr, end, walk); + pte_unmap(pte); + } else { + pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + err = walk_pte_range_inner(pte, addr, end, walk); + pte_unmap_unlock(pte, ptl); + } + return err; } @@ -30,38 +63,51 @@ { pmd_t *pmd; unsigned long next; + const struct mm_walk_ops *ops = walk->ops; int err = 0; + int depth = real_depth(3); pmd = pmd_offset(pud, addr); do { again: next = pmd_addr_end(addr, end); - if (pmd_none(*pmd) || !walk->vma) { - if (walk->pte_hole) - err = walk->pte_hole(addr, next, walk); + if (pmd_none(*pmd)) { + if (ops->pte_hole) + err = ops->pte_hole(addr, next, depth, walk); if (err) break; continue; } + + walk->action = ACTION_SUBTREE; + /* * This implies that each ->pmd_entry() handler * needs to know about pmd_trans_huge() pmds */ - if (walk->pmd_entry) - err = walk->pmd_entry(pmd, addr, next, walk); + if (ops->pmd_entry) + err = ops->pmd_entry(pmd, addr, next, walk); if (err) break; + + if (walk->action == ACTION_AGAIN) + goto again; /* * Check this here so we only break down trans_huge * pages when we _need_ to */ - if (!walk->pte_entry) + if ((!walk->vma && (pmd_leaf(*pmd) || !pmd_present(*pmd))) || + walk->action == ACTION_CONTINUE || + !(ops->pte_entry)) continue; - split_huge_pmd(walk->vma, pmd, addr); - if (pmd_trans_unstable(pmd)) - goto again; + if (walk->vma) { + split_huge_pmd(walk->vma, pmd, addr); + if (pmd_trans_unstable(pmd)) + goto again; + } + err = walk_pte_range(pmd, addr, next, walk); if (err) break; @@ -75,38 +121,43 @@ { pud_t *pud; unsigned long next; + const struct mm_walk_ops *ops = walk->ops; int err = 0; + int depth = real_depth(2); pud = pud_offset(p4d, addr); do { again: next = pud_addr_end(addr, end); - if (pud_none(*pud) || !walk->vma) { - if (walk->pte_hole) - err = walk->pte_hole(addr, next, walk); + if (pud_none(*pud)) { + if (ops->pte_hole) + err = ops->pte_hole(addr, next, depth, walk); if (err) break; continue; } - if (walk->pud_entry) { - spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma); + walk->action = ACTION_SUBTREE; - if (ptl) { - err = walk->pud_entry(pud, addr, next, walk); - spin_unlock(ptl); - if (err) - break; - continue; - } - } + if (ops->pud_entry) + err = ops->pud_entry(pud, addr, next, walk); + if (err) + break; - split_huge_pud(walk->vma, pud, addr); + if (walk->action == ACTION_AGAIN) + goto again; + + if ((!walk->vma && (pud_leaf(*pud) || !pud_present(*pud))) || + walk->action == ACTION_CONTINUE || + !(ops->pmd_entry || ops->pte_entry)) + continue; + + if (walk->vma) + split_huge_pud(walk->vma, pud, addr); if (pud_none(*pud)) goto again; - if (walk->pmd_entry || walk->pte_entry) - err = walk_pmd_range(pud, addr, next, walk); + err = walk_pmd_range(pud, addr, next, walk); if (err) break; } while (pud++, addr = next, addr != end); @@ -119,19 +170,26 @@ { p4d_t *p4d; unsigned long next; + const struct mm_walk_ops *ops = walk->ops; int err = 0; + int depth = real_depth(1); p4d = p4d_offset(pgd, addr); do { next = p4d_addr_end(addr, end); if (p4d_none_or_clear_bad(p4d)) { - if (walk->pte_hole) - err = walk->pte_hole(addr, next, walk); + if (ops->pte_hole) + err = ops->pte_hole(addr, next, depth, walk); if (err) break; continue; } - if (walk->pmd_entry || walk->pte_entry) + if (ops->p4d_entry) { + err = ops->p4d_entry(p4d, addr, next, walk); + if (err) + break; + } + if (ops->pud_entry || ops->pmd_entry || ops->pte_entry) err = walk_pud_range(p4d, addr, next, walk); if (err) break; @@ -145,19 +203,29 @@ { pgd_t *pgd; unsigned long next; + const struct mm_walk_ops *ops = walk->ops; int err = 0; - pgd = pgd_offset(walk->mm, addr); + if (walk->pgd) + pgd = walk->pgd + pgd_index(addr); + else + pgd = pgd_offset(walk->mm, addr); do { next = pgd_addr_end(addr, end); if (pgd_none_or_clear_bad(pgd)) { - if (walk->pte_hole) - err = walk->pte_hole(addr, next, walk); + if (ops->pte_hole) + err = ops->pte_hole(addr, next, 0, walk); if (err) break; continue; } - if (walk->pmd_entry || walk->pte_entry) + if (ops->pgd_entry) { + err = ops->pgd_entry(pgd, addr, next, walk); + if (err) + break; + } + if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry || + ops->pte_entry) err = walk_p4d_range(pgd, addr, next, walk); if (err) break; @@ -183,6 +251,7 @@ unsigned long hmask = huge_page_mask(h); unsigned long sz = huge_page_size(h); pte_t *pte; + const struct mm_walk_ops *ops = walk->ops; int err = 0; do { @@ -190,9 +259,9 @@ pte = huge_pte_offset(walk->mm, addr & hmask, sz); if (pte) - err = walk->hugetlb_entry(pte, hmask, addr, next, walk); - else if (walk->pte_hole) - err = walk->pte_hole(addr, next, walk); + err = ops->hugetlb_entry(pte, hmask, addr, next, walk); + else if (ops->pte_hole) + err = ops->pte_hole(addr, next, -1, walk); if (err) break; @@ -220,9 +289,10 @@ struct mm_walk *walk) { struct vm_area_struct *vma = walk->vma; + const struct mm_walk_ops *ops = walk->ops; - if (walk->test_walk) - return walk->test_walk(start, end, walk); + if (ops->test_walk) + return ops->test_walk(start, end, walk); /* * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP @@ -234,8 +304,8 @@ */ if (vma->vm_flags & VM_PFNMAP) { int err = 1; - if (walk->pte_hole) - err = walk->pte_hole(start, end, walk); + if (ops->pte_hole) + err = ops->pte_hole(start, end, -1, walk); return err ? err : 1; } return 0; @@ -246,23 +316,35 @@ { int err = 0; struct vm_area_struct *vma = walk->vma; + const struct mm_walk_ops *ops = walk->ops; - if (vma && is_vm_hugetlb_page(vma)) { - if (walk->hugetlb_entry) + if (ops->pre_vma) { + err = ops->pre_vma(start, end, walk); + if (err) + return err; + } + + if (is_vm_hugetlb_page(vma)) { + if (ops->hugetlb_entry) err = walk_hugetlb_range(start, end, walk); } else err = walk_pgd_range(start, end, walk); + + if (ops->post_vma) + ops->post_vma(walk); return err; } /** * walk_page_range - walk page table with caller specific callbacks - * @start: start address of the virtual address range - * @end: end address of the virtual address range - * @walk: mm_walk structure defining the callbacks and the target address space + * @mm: mm_struct representing the target process of page table walk + * @start: start address of the virtual address range + * @end: end address of the virtual address range + * @ops: operation to call during the walk + * @private: private data for callbacks' usage * - * Recursively walk the page table tree of the process represented by @walk->mm + * Recursively walk the page table tree of the process represented by @mm * within the virtual address range [@start, @end). During walking, we can do * some caller-specific works for each entry, by setting up pmd_entry(), * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these @@ -278,47 +360,61 @@ * * Before starting to walk page table, some callers want to check whether * they really want to walk over the current vma, typically by checking - * its vm_flags. walk_page_test() and @walk->test_walk() are used for this + * its vm_flags. walk_page_test() and @ops->test_walk() are used for this * purpose. + * + * If operations need to be staged before and committed after a vma is walked, + * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(), + * since it is intended to handle commit-type operations, can't return any + * errors. * * struct mm_walk keeps current values of some common data like vma and pmd, * which are useful for the access from callbacks. If you want to pass some - * caller-specific data to callbacks, @walk->private should be helpful. + * caller-specific data to callbacks, @private should be helpful. * * Locking: - * Callers of walk_page_range() and walk_page_vma() should hold - * @walk->mm->mmap_sem, because these function traverse vma list and/or - * access to vma's data. + * Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_lock, + * because these function traverse vma list and/or access to vma's data. */ -int walk_page_range(unsigned long start, unsigned long end, - struct mm_walk *walk) +int walk_page_range(struct mm_struct *mm, unsigned long start, + unsigned long end, const struct mm_walk_ops *ops, + void *private) { int err = 0; unsigned long next; struct vm_area_struct *vma; + struct mm_walk walk = { + .ops = ops, + .mm = mm, + .private = private, + }; if (start >= end) return -EINVAL; - if (!walk->mm) + if (!walk.mm) return -EINVAL; - VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm); + mmap_assert_locked(walk.mm); - vma = find_vma(walk->mm, start); + vma = find_vma(walk.mm, start); do { if (!vma) { /* after the last vma */ - walk->vma = NULL; + walk.vma = NULL; next = end; + if (ops->pte_hole) + err = ops->pte_hole(start, next, -1, &walk); } else if (start < vma->vm_start) { /* outside vma */ - walk->vma = NULL; + walk.vma = NULL; next = min(end, vma->vm_start); + if (ops->pte_hole) + err = ops->pte_hole(start, next, -1, &walk); } else { /* inside vma */ - walk->vma = vma; + walk.vma = vma; next = min(end, vma->vm_end); vma = vma->vm_next; - err = walk_page_test(start, next, walk); + err = walk_page_test(start, next, &walk); if (err > 0) { /* * positive return values are purely for @@ -330,29 +426,139 @@ } if (err < 0) break; + err = __walk_page_range(start, next, &walk); } - if (walk->vma || walk->pte_hole) - err = __walk_page_range(start, next, walk); if (err) break; } while (start = next, start < end); return err; } +EXPORT_SYMBOL_GPL(walk_page_range); -int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk) +/* + * Similar to walk_page_range() but can walk any page tables even if they are + * not backed by VMAs. Because 'unusual' entries may be walked this function + * will also not lock the PTEs for the pte_entry() callback. This is useful for + * walking the kernel pages tables or page tables for firmware. + */ +int walk_page_range_novma(struct mm_struct *mm, unsigned long start, + unsigned long end, const struct mm_walk_ops *ops, + pgd_t *pgd, + void *private) { - int err; + struct mm_walk walk = { + .ops = ops, + .mm = mm, + .pgd = pgd, + .private = private, + .no_vma = true + }; - if (!walk->mm) + if (start >= end || !walk.mm) return -EINVAL; - VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem)); - VM_BUG_ON(!vma); - walk->vma = vma; - err = walk_page_test(vma->vm_start, vma->vm_end, walk); + mmap_assert_write_locked(walk.mm); + + return walk_pgd_range(start, end, &walk); +} + +int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, + void *private) +{ + struct mm_walk walk = { + .ops = ops, + .mm = vma->vm_mm, + .vma = vma, + .private = private, + }; + int err; + + if (!walk.mm) + return -EINVAL; + + mmap_assert_locked(walk.mm); + + err = walk_page_test(vma->vm_start, vma->vm_end, &walk); if (err > 0) return 0; if (err < 0) return err; - return __walk_page_range(vma->vm_start, vma->vm_end, walk); + return __walk_page_range(vma->vm_start, vma->vm_end, &walk); +} + +/** + * walk_page_mapping - walk all memory areas mapped into a struct address_space. + * @mapping: Pointer to the struct address_space + * @first_index: First page offset in the address_space + * @nr: Number of incremental page offsets to cover + * @ops: operation to call during the walk + * @private: private data for callbacks' usage + * + * This function walks all memory areas mapped into a struct address_space. + * The walk is limited to only the given page-size index range, but if + * the index boundaries cross a huge page-table entry, that entry will be + * included. + * + * Also see walk_page_range() for additional information. + * + * Locking: + * This function can't require that the struct mm_struct::mmap_lock is held, + * since @mapping may be mapped by multiple processes. Instead + * @mapping->i_mmap_rwsem must be held. This might have implications in the + * callbacks, and it's up tho the caller to ensure that the + * struct mm_struct::mmap_lock is not needed. + * + * Also this means that a caller can't rely on the struct + * vm_area_struct::vm_flags to be constant across a call, + * except for immutable flags. Callers requiring this shouldn't use + * this function. + * + * Return: 0 on success, negative error code on failure, positive number on + * caller defined premature termination. + */ +int walk_page_mapping(struct address_space *mapping, pgoff_t first_index, + pgoff_t nr, const struct mm_walk_ops *ops, + void *private) +{ + struct mm_walk walk = { + .ops = ops, + .private = private, + }; + struct vm_area_struct *vma; + pgoff_t vba, vea, cba, cea; + unsigned long start_addr, end_addr; + int err = 0; + + lockdep_assert_held(&mapping->i_mmap_rwsem); + vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index, + first_index + nr - 1) { + /* Clip to the vma */ + vba = vma->vm_pgoff; + vea = vba + vma_pages(vma); + cba = first_index; + cba = max(cba, vba); + cea = first_index + nr; + cea = min(cea, vea); + + start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start; + end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start; + if (start_addr >= end_addr) + continue; + + walk.vma = vma; + walk.mm = vma->vm_mm; + + err = walk_page_test(vma->vm_start, vma->vm_end, &walk); + if (err > 0) { + err = 0; + break; + } else if (err < 0) + break; + + err = __walk_page_range(start_addr, end_addr, &walk); + if (err) + break; + } + + return err; } -- Gitblit v1.6.2