| .. | .. |
|---|
| 10 | 10 | */ |
|---|
| 11 | 11 | #include <linux/pagemap.h> |
|---|
| 12 | 12 | #include <linux/gfp.h> |
|---|
| 13 | | -#include <linux/mm.h> |
|---|
| 13 | +#include <linux/pagewalk.h> |
|---|
| 14 | 14 | #include <linux/mman.h> |
|---|
| 15 | 15 | #include <linux/syscalls.h> |
|---|
| 16 | 16 | #include <linux/swap.h> |
|---|
| 17 | 17 | #include <linux/swapops.h> |
|---|
| 18 | 18 | #include <linux/shmem_fs.h> |
|---|
| 19 | 19 | #include <linux/hugetlb.h> |
|---|
| 20 | +#include <linux/pgtable.h> |
|---|
| 20 | 21 | |
|---|
| 21 | 22 | #include <linux/uaccess.h> |
|---|
| 22 | | -#include <asm/pgtable.h> |
|---|
| 23 | 23 | |
|---|
| 24 | 24 | static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, |
|---|
| 25 | 25 | unsigned long end, struct mm_walk *walk) |
|---|
| .. | .. |
|---|
| 48 | 48 | * and is up to date; i.e. that no page-in operation would be required |
|---|
| 49 | 49 | * at this time if an application were to map and access this page. |
|---|
| 50 | 50 | */ |
|---|
| 51 | | -static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) |
|---|
| 51 | +static unsigned char mincore_page(struct address_space *mapping, pgoff_t index) |
|---|
| 52 | 52 | { |
|---|
| 53 | 53 | unsigned char present = 0; |
|---|
| 54 | 54 | struct page *page; |
|---|
| .. | .. |
|---|
| 59 | 59 | * any other file mapping (ie. marked !present and faulted in with |
|---|
| 60 | 60 | * tmpfs's .fault). So swapped out tmpfs mappings are tested here. |
|---|
| 61 | 61 | */ |
|---|
| 62 | | -#ifdef CONFIG_SWAP |
|---|
| 63 | | - if (shmem_mapping(mapping)) { |
|---|
| 64 | | - page = find_get_entry(mapping, pgoff); |
|---|
| 65 | | - /* |
|---|
| 66 | | - * shmem/tmpfs may return swap: account for swapcache |
|---|
| 67 | | - * page too. |
|---|
| 68 | | - */ |
|---|
| 69 | | - if (radix_tree_exceptional_entry(page)) { |
|---|
| 70 | | - swp_entry_t swp = radix_to_swp_entry(page); |
|---|
| 71 | | - page = find_get_page(swap_address_space(swp), |
|---|
| 72 | | - swp_offset(swp)); |
|---|
| 73 | | - } |
|---|
| 74 | | - } else |
|---|
| 75 | | - page = find_get_page(mapping, pgoff); |
|---|
| 76 | | -#else |
|---|
| 77 | | - page = find_get_page(mapping, pgoff); |
|---|
| 78 | | -#endif |
|---|
| 62 | + page = find_get_incore_page(mapping, index); |
|---|
| 79 | 63 | if (page) { |
|---|
| 80 | 64 | present = PageUptodate(page); |
|---|
| 81 | 65 | put_page(page); |
|---|
| .. | .. |
|---|
| 104 | 88 | } |
|---|
| 105 | 89 | |
|---|
| 106 | 90 | static int mincore_unmapped_range(unsigned long addr, unsigned long end, |
|---|
| 91 | + __always_unused int depth, |
|---|
| 107 | 92 | struct mm_walk *walk) |
|---|
| 108 | 93 | { |
|---|
| 109 | 94 | walk->private += __mincore_unmapped_range(addr, end, |
|---|
| .. | .. |
|---|
| 185 | 170 | inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0; |
|---|
| 186 | 171 | } |
|---|
| 187 | 172 | |
|---|
| 173 | +static const struct mm_walk_ops mincore_walk_ops = { |
|---|
| 174 | + .pmd_entry = mincore_pte_range, |
|---|
| 175 | + .pte_hole = mincore_unmapped_range, |
|---|
| 176 | + .hugetlb_entry = mincore_hugetlb, |
|---|
| 177 | +}; |
|---|
| 178 | + |
|---|
| 188 | 179 | /* |
|---|
| 189 | 180 | * Do a chunk of "sys_mincore()". We've already checked |
|---|
| 190 | 181 | * all the arguments, we hold the mmap semaphore: we should |
|---|
| .. | .. |
|---|
| 195 | 186 | struct vm_area_struct *vma; |
|---|
| 196 | 187 | unsigned long end; |
|---|
| 197 | 188 | int err; |
|---|
| 198 | | - struct mm_walk mincore_walk = { |
|---|
| 199 | | - .pmd_entry = mincore_pte_range, |
|---|
| 200 | | - .pte_hole = mincore_unmapped_range, |
|---|
| 201 | | - .hugetlb_entry = mincore_hugetlb, |
|---|
| 202 | | - .private = vec, |
|---|
| 203 | | - }; |
|---|
| 204 | 189 | |
|---|
| 205 | 190 | vma = find_vma(current->mm, addr); |
|---|
| 206 | 191 | if (!vma || addr < vma->vm_start) |
|---|
| .. | .. |
|---|
| 211 | 196 | memset(vec, 1, pages); |
|---|
| 212 | 197 | return pages; |
|---|
| 213 | 198 | } |
|---|
| 214 | | - mincore_walk.mm = vma->vm_mm; |
|---|
| 215 | | - err = walk_page_range(addr, end, &mincore_walk); |
|---|
| 199 | + err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec); |
|---|
| 216 | 200 | if (err < 0) |
|---|
| 217 | 201 | return err; |
|---|
| 218 | 202 | return (end - addr) >> PAGE_SHIFT; |
|---|
| .. | .. |
|---|
| 256 | 240 | return -EINVAL; |
|---|
| 257 | 241 | |
|---|
| 258 | 242 | /* ..and we need to be passed a valid user-space range */ |
|---|
| 259 | | - if (!access_ok(VERIFY_READ, (void __user *) start, len)) |
|---|
| 243 | + if (!access_ok((void __user *) start, len)) |
|---|
| 260 | 244 | return -ENOMEM; |
|---|
| 261 | 245 | |
|---|
| 262 | 246 | /* This also avoids any overflows on PAGE_ALIGN */ |
|---|
| 263 | 247 | pages = len >> PAGE_SHIFT; |
|---|
| 264 | 248 | pages += (offset_in_page(len)) != 0; |
|---|
| 265 | 249 | |
|---|
| 266 | | - if (!access_ok(VERIFY_WRITE, vec, pages)) |
|---|
| 250 | + if (!access_ok(vec, pages)) |
|---|
| 267 | 251 | return -EFAULT; |
|---|
| 268 | 252 | |
|---|
| 269 | 253 | tmp = (void *) __get_free_page(GFP_USER); |
|---|
| .. | .. |
|---|
| 276 | 260 | * Do at most PAGE_SIZE entries per iteration, due to |
|---|
| 277 | 261 | * the temporary buffer size. |
|---|
| 278 | 262 | */ |
|---|
| 279 | | - down_read(¤t->mm->mmap_sem); |
|---|
| 263 | + mmap_read_lock(current->mm); |
|---|
| 280 | 264 | retval = do_mincore(start, min(pages, PAGE_SIZE), tmp); |
|---|
| 281 | | - up_read(¤t->mm->mmap_sem); |
|---|
| 265 | + mmap_read_unlock(current->mm); |
|---|
| 282 | 266 | |
|---|
| 283 | 267 | if (retval <= 0) |
|---|
| 284 | 268 | break; |
|---|