| .. | .. |
|---|
| 7 | 7 | |
|---|
| 8 | 8 | #include <linux/fs.h> /* only for vma_is_dax() */ |
|---|
| 9 | 9 | |
|---|
| 10 | | -extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); |
|---|
| 11 | | -extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
|---|
| 12 | | - pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, |
|---|
| 13 | | - struct vm_area_struct *vma); |
|---|
| 14 | | -extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); |
|---|
| 15 | | -extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
|---|
| 16 | | - pud_t *dst_pud, pud_t *src_pud, unsigned long addr, |
|---|
| 17 | | - struct vm_area_struct *vma); |
|---|
| 10 | +vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); |
|---|
| 11 | +int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
|---|
| 12 | + pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, |
|---|
| 13 | + struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); |
|---|
| 14 | +void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd); |
|---|
| 15 | +int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
|---|
| 16 | + pud_t *dst_pud, pud_t *src_pud, unsigned long addr, |
|---|
| 17 | + struct vm_area_struct *vma); |
|---|
| 18 | 18 | |
|---|
| 19 | 19 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
|---|
| 20 | | -extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); |
|---|
| 20 | +void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); |
|---|
| 21 | 21 | #else |
|---|
| 22 | 22 | static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) |
|---|
| 23 | 23 | { |
|---|
| 24 | 24 | } |
|---|
| 25 | 25 | #endif |
|---|
| 26 | 26 | |
|---|
| 27 | | -extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); |
|---|
| 28 | | -extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, |
|---|
| 29 | | - unsigned long addr, |
|---|
| 30 | | - pmd_t *pmd, |
|---|
| 31 | | - unsigned int flags); |
|---|
| 32 | | -extern bool madvise_free_huge_pmd(struct mmu_gather *tlb, |
|---|
| 33 | | - struct vm_area_struct *vma, |
|---|
| 34 | | - pmd_t *pmd, unsigned long addr, unsigned long next); |
|---|
| 35 | | -extern int zap_huge_pmd(struct mmu_gather *tlb, |
|---|
| 36 | | - struct vm_area_struct *vma, |
|---|
| 37 | | - pmd_t *pmd, unsigned long addr); |
|---|
| 38 | | -extern int zap_huge_pud(struct mmu_gather *tlb, |
|---|
| 39 | | - struct vm_area_struct *vma, |
|---|
| 40 | | - pud_t *pud, unsigned long addr); |
|---|
| 41 | | -extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
|---|
| 42 | | - unsigned long addr, unsigned long end, |
|---|
| 43 | | - unsigned char *vec); |
|---|
| 44 | | -extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, |
|---|
| 45 | | - unsigned long new_addr, unsigned long old_end, |
|---|
| 46 | | - pmd_t *old_pmd, pmd_t *new_pmd); |
|---|
| 47 | | -extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
|---|
| 48 | | - unsigned long addr, pgprot_t newprot, |
|---|
| 49 | | - int prot_numa); |
|---|
| 50 | | -vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); |
|---|
| 51 | | -vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); |
|---|
| 27 | +vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd); |
|---|
| 28 | +struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, |
|---|
| 29 | + unsigned long addr, pmd_t *pmd, |
|---|
| 30 | + unsigned int flags); |
|---|
| 31 | +bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, |
|---|
| 32 | + pmd_t *pmd, unsigned long addr, unsigned long next); |
|---|
| 33 | +int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, |
|---|
| 34 | + unsigned long addr); |
|---|
| 35 | +int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud, |
|---|
| 36 | + unsigned long addr); |
|---|
| 37 | +bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, |
|---|
| 38 | + unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd); |
|---|
| 39 | +int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, |
|---|
| 40 | + pgprot_t newprot, unsigned long cp_flags); |
|---|
| 41 | +vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn, |
|---|
| 42 | + pgprot_t pgprot, bool write); |
|---|
| 43 | + |
|---|
| 44 | +/** |
|---|
| 45 | + * vmf_insert_pfn_pmd - insert a pmd size pfn |
|---|
| 46 | + * @vmf: Structure describing the fault |
|---|
| 47 | + * @pfn: pfn to insert |
|---|
| 48 | + * @pgprot: page protection to use |
|---|
| 49 | + * @write: whether it's a write fault |
|---|
| 50 | + * |
|---|
| 51 | + * Insert a pmd size pfn. See vmf_insert_pfn() for additional info. |
|---|
| 52 | + * |
|---|
| 53 | + * Return: vm_fault_t value. |
|---|
| 54 | + */ |
|---|
| 55 | +static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, |
|---|
| 56 | + bool write) |
|---|
| 57 | +{ |
|---|
| 58 | + return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write); |
|---|
| 59 | +} |
|---|
| 60 | +vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn, |
|---|
| 61 | + pgprot_t pgprot, bool write); |
|---|
| 62 | + |
|---|
| 63 | +/** |
|---|
| 64 | + * vmf_insert_pfn_pud - insert a pud size pfn |
|---|
| 65 | + * @vmf: Structure describing the fault |
|---|
| 66 | + * @pfn: pfn to insert |
|---|
| 67 | + * @pgprot: page protection to use |
|---|
| 68 | + * @write: whether it's a write fault |
|---|
| 69 | + * |
|---|
| 70 | + * Insert a pud size pfn. See vmf_insert_pfn() for additional info. |
|---|
| 71 | + * |
|---|
| 72 | + * Return: vm_fault_t value. |
|---|
| 73 | + */ |
|---|
| 74 | +static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, |
|---|
| 75 | + bool write) |
|---|
| 76 | +{ |
|---|
| 77 | + return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write); |
|---|
| 78 | +} |
|---|
| 79 | + |
|---|
| 52 | 80 | enum transparent_hugepage_flag { |
|---|
| 81 | + TRANSPARENT_HUGEPAGE_NEVER_DAX, |
|---|
| 53 | 82 | TRANSPARENT_HUGEPAGE_FLAG, |
|---|
| 54 | 83 | TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, |
|---|
| 55 | 84 | TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, |
|---|
| .. | .. |
|---|
| 66 | 95 | struct kobject; |
|---|
| 67 | 96 | struct kobj_attribute; |
|---|
| 68 | 97 | |
|---|
| 69 | | -extern ssize_t single_hugepage_flag_store(struct kobject *kobj, |
|---|
| 70 | | - struct kobj_attribute *attr, |
|---|
| 71 | | - const char *buf, size_t count, |
|---|
| 72 | | - enum transparent_hugepage_flag flag); |
|---|
| 73 | | -extern ssize_t single_hugepage_flag_show(struct kobject *kobj, |
|---|
| 74 | | - struct kobj_attribute *attr, char *buf, |
|---|
| 75 | | - enum transparent_hugepage_flag flag); |
|---|
| 98 | +ssize_t single_hugepage_flag_store(struct kobject *kobj, |
|---|
| 99 | + struct kobj_attribute *attr, |
|---|
| 100 | + const char *buf, size_t count, |
|---|
| 101 | + enum transparent_hugepage_flag flag); |
|---|
| 102 | +ssize_t single_hugepage_flag_show(struct kobject *kobj, |
|---|
| 103 | + struct kobj_attribute *attr, char *buf, |
|---|
| 104 | + enum transparent_hugepage_flag flag); |
|---|
| 76 | 105 | extern struct kobj_attribute shmem_enabled_attr; |
|---|
| 77 | 106 | |
|---|
| 78 | 107 | #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) |
|---|
| .. | .. |
|---|
| 87 | 116 | #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) |
|---|
| 88 | 117 | #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) |
|---|
| 89 | 118 | |
|---|
| 90 | | -extern bool is_vma_temporary_stack(struct vm_area_struct *vma); |
|---|
| 91 | | - |
|---|
| 92 | 119 | extern unsigned long transparent_hugepage_flags; |
|---|
| 120 | + |
|---|
| 121 | +static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, |
|---|
| 122 | + unsigned long haddr) |
|---|
| 123 | +{ |
|---|
| 124 | + /* Don't have to check pgoff for anonymous vma */ |
|---|
| 125 | + if (!vma_is_anonymous(vma)) { |
|---|
| 126 | + if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, |
|---|
| 127 | + HPAGE_PMD_NR)) |
|---|
| 128 | + return false; |
|---|
| 129 | + } |
|---|
| 130 | + |
|---|
| 131 | + if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) |
|---|
| 132 | + return false; |
|---|
| 133 | + return true; |
|---|
| 134 | +} |
|---|
| 135 | + |
|---|
| 136 | +static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, |
|---|
| 137 | + unsigned long vm_flags) |
|---|
| 138 | +{ |
|---|
| 139 | + /* Explicitly disabled through madvise. */ |
|---|
| 140 | + if ((vm_flags & VM_NOHUGEPAGE) || |
|---|
| 141 | + test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) |
|---|
| 142 | + return false; |
|---|
| 143 | + return true; |
|---|
| 144 | +} |
|---|
| 93 | 145 | |
|---|
| 94 | 146 | /* |
|---|
| 95 | 147 | * to be used on vmas which are known to support THP. |
|---|
| 96 | | - * Use transparent_hugepage_enabled otherwise |
|---|
| 148 | + * Use transparent_hugepage_active otherwise |
|---|
| 97 | 149 | */ |
|---|
| 98 | 150 | static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) |
|---|
| 99 | 151 | { |
|---|
| 100 | | - if (vma->vm_flags & VM_NOHUGEPAGE) |
|---|
| 152 | + |
|---|
| 153 | + /* |
|---|
| 154 | + * If the hardware/firmware marked hugepage support disabled. |
|---|
| 155 | + */ |
|---|
| 156 | + if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX)) |
|---|
| 101 | 157 | return false; |
|---|
| 102 | 158 | |
|---|
| 103 | | - if (is_vma_temporary_stack(vma)) |
|---|
| 159 | + if (!transhuge_vma_enabled(vma, vma->vm_flags)) |
|---|
| 104 | 160 | return false; |
|---|
| 105 | 161 | |
|---|
| 106 | | - if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) |
|---|
| 162 | + if (vma_is_temporary_stack(vma)) |
|---|
| 107 | 163 | return false; |
|---|
| 108 | 164 | |
|---|
| 109 | 165 | if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG)) |
|---|
| .. | .. |
|---|
| 119 | 175 | return false; |
|---|
| 120 | 176 | } |
|---|
| 121 | 177 | |
|---|
| 122 | | -bool transparent_hugepage_enabled(struct vm_area_struct *vma); |
|---|
| 178 | +bool transparent_hugepage_active(struct vm_area_struct *vma); |
|---|
| 123 | 179 | |
|---|
| 124 | 180 | #define transparent_hugepage_use_zero_page() \ |
|---|
| 125 | 181 | (transparent_hugepage_flags & \ |
|---|
| 126 | 182 | (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) |
|---|
| 127 | | -#ifdef CONFIG_DEBUG_VM |
|---|
| 128 | | -#define transparent_hugepage_debug_cow() \ |
|---|
| 129 | | - (transparent_hugepage_flags & \ |
|---|
| 130 | | - (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG)) |
|---|
| 131 | | -#else /* CONFIG_DEBUG_VM */ |
|---|
| 132 | | -#define transparent_hugepage_debug_cow() 0 |
|---|
| 133 | | -#endif /* CONFIG_DEBUG_VM */ |
|---|
| 134 | 183 | |
|---|
| 135 | | -extern unsigned long thp_get_unmapped_area(struct file *filp, |
|---|
| 136 | | - unsigned long addr, unsigned long len, unsigned long pgoff, |
|---|
| 137 | | - unsigned long flags); |
|---|
| 184 | +unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, |
|---|
| 185 | + unsigned long len, unsigned long pgoff, unsigned long flags); |
|---|
| 138 | 186 | |
|---|
| 139 | | -extern void prep_transhuge_page(struct page *page); |
|---|
| 140 | | -extern void free_transhuge_page(struct page *page); |
|---|
| 187 | +void prep_transhuge_page(struct page *page); |
|---|
| 188 | +void free_transhuge_page(struct page *page); |
|---|
| 189 | +bool is_transparent_hugepage(struct page *page); |
|---|
| 141 | 190 | |
|---|
| 142 | 191 | bool can_split_huge_page(struct page *page, int *pextra_pins); |
|---|
| 143 | 192 | int split_huge_page_to_list(struct page *page, struct list_head *list); |
|---|
| .. | .. |
|---|
| 174 | 223 | __split_huge_pud(__vma, __pud, __address); \ |
|---|
| 175 | 224 | } while (0) |
|---|
| 176 | 225 | |
|---|
| 177 | | -extern int hugepage_madvise(struct vm_area_struct *vma, |
|---|
| 178 | | - unsigned long *vm_flags, int advice); |
|---|
| 179 | | -extern void vma_adjust_trans_huge(struct vm_area_struct *vma, |
|---|
| 180 | | - unsigned long start, |
|---|
| 181 | | - unsigned long end, |
|---|
| 182 | | - long adjust_next); |
|---|
| 183 | | -extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, |
|---|
| 184 | | - struct vm_area_struct *vma); |
|---|
| 185 | | -extern spinlock_t *__pud_trans_huge_lock(pud_t *pud, |
|---|
| 186 | | - struct vm_area_struct *vma); |
|---|
| 226 | +int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, |
|---|
| 227 | + int advice); |
|---|
| 228 | +void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, |
|---|
| 229 | + unsigned long end, long adjust_next); |
|---|
| 230 | +spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma); |
|---|
| 231 | +spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma); |
|---|
| 187 | 232 | |
|---|
| 188 | 233 | static inline int is_swap_pmd(pmd_t pmd) |
|---|
| 189 | 234 | { |
|---|
| 190 | 235 | return !pmd_none(pmd) && !pmd_present(pmd); |
|---|
| 191 | 236 | } |
|---|
| 192 | 237 | |
|---|
| 193 | | -/* mmap_sem must be held on entry */ |
|---|
| 238 | +/* mmap_lock must be held on entry */ |
|---|
| 194 | 239 | static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, |
|---|
| 195 | 240 | struct vm_area_struct *vma) |
|---|
| 196 | 241 | { |
|---|
| 197 | | - VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); |
|---|
| 198 | 242 | if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) |
|---|
| 199 | 243 | return __pmd_trans_huge_lock(pmd, vma); |
|---|
| 200 | 244 | else |
|---|
| .. | .. |
|---|
| 203 | 247 | static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, |
|---|
| 204 | 248 | struct vm_area_struct *vma) |
|---|
| 205 | 249 | { |
|---|
| 206 | | - VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); |
|---|
| 207 | 250 | if (pud_trans_huge(*pud) || pud_devmap(*pud)) |
|---|
| 208 | 251 | return __pud_trans_huge_lock(pud, vma); |
|---|
| 209 | 252 | else |
|---|
| 210 | 253 | return NULL; |
|---|
| 211 | 254 | } |
|---|
| 212 | | -static inline int hpage_nr_pages(struct page *page) |
|---|
| 255 | + |
|---|
| 256 | +/** |
|---|
| 257 | + * thp_head - Head page of a transparent huge page. |
|---|
| 258 | + * @page: Any page (tail, head or regular) found in the page cache. |
|---|
| 259 | + */ |
|---|
| 260 | +static inline struct page *thp_head(struct page *page) |
|---|
| 213 | 261 | { |
|---|
| 214 | | - if (unlikely(PageTransHuge(page))) |
|---|
| 262 | + return compound_head(page); |
|---|
| 263 | +} |
|---|
| 264 | + |
|---|
| 265 | +/** |
|---|
| 266 | + * thp_order - Order of a transparent huge page. |
|---|
| 267 | + * @page: Head page of a transparent huge page. |
|---|
| 268 | + */ |
|---|
| 269 | +static inline unsigned int thp_order(struct page *page) |
|---|
| 270 | +{ |
|---|
| 271 | + VM_BUG_ON_PGFLAGS(PageTail(page), page); |
|---|
| 272 | + if (PageHead(page)) |
|---|
| 273 | + return HPAGE_PMD_ORDER; |
|---|
| 274 | + return 0; |
|---|
| 275 | +} |
|---|
| 276 | + |
|---|
| 277 | +/** |
|---|
| 278 | + * thp_nr_pages - The number of regular pages in this huge page. |
|---|
| 279 | + * @page: The head page of a huge page. |
|---|
| 280 | + */ |
|---|
| 281 | +static inline int thp_nr_pages(struct page *page) |
|---|
| 282 | +{ |
|---|
| 283 | + VM_BUG_ON_PGFLAGS(PageTail(page), page); |
|---|
| 284 | + if (PageHead(page)) |
|---|
| 215 | 285 | return HPAGE_PMD_NR; |
|---|
| 216 | 286 | return 1; |
|---|
| 217 | 287 | } |
|---|
| 218 | 288 | |
|---|
| 219 | 289 | struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, |
|---|
| 220 | | - pmd_t *pmd, int flags); |
|---|
| 290 | + pmd_t *pmd, int flags, struct dev_pagemap **pgmap); |
|---|
| 221 | 291 | struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, |
|---|
| 222 | | - pud_t *pud, int flags); |
|---|
| 292 | + pud_t *pud, int flags, struct dev_pagemap **pgmap); |
|---|
| 223 | 293 | |
|---|
| 224 | | -extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); |
|---|
| 294 | +vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd); |
|---|
| 225 | 295 | |
|---|
| 226 | 296 | extern struct page *huge_zero_page; |
|---|
| 227 | 297 | extern unsigned long huge_zero_pfn; |
|---|
| .. | .. |
|---|
| 251 | 321 | return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION); |
|---|
| 252 | 322 | } |
|---|
| 253 | 323 | |
|---|
| 324 | +static inline struct list_head *page_deferred_list(struct page *page) |
|---|
| 325 | +{ |
|---|
| 326 | + /* |
|---|
| 327 | + * Global or memcg deferred list in the second tail pages is |
|---|
| 328 | + * occupied by compound_head. |
|---|
| 329 | + */ |
|---|
| 330 | + return &page[2].deferred_list; |
|---|
| 331 | +} |
|---|
| 332 | + |
|---|
| 254 | 333 | #else /* CONFIG_TRANSPARENT_HUGEPAGE */ |
|---|
| 255 | 334 | #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; }) |
|---|
| 256 | 335 | #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; }) |
|---|
| .. | .. |
|---|
| 260 | 339 | #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; }) |
|---|
| 261 | 340 | #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; }) |
|---|
| 262 | 341 | |
|---|
| 263 | | -#define hpage_nr_pages(x) 1 |
|---|
| 342 | +static inline struct page *thp_head(struct page *page) |
|---|
| 343 | +{ |
|---|
| 344 | + VM_BUG_ON_PGFLAGS(PageTail(page), page); |
|---|
| 345 | + return page; |
|---|
| 346 | +} |
|---|
| 347 | + |
|---|
| 348 | +static inline unsigned int thp_order(struct page *page) |
|---|
| 349 | +{ |
|---|
| 350 | + VM_BUG_ON_PGFLAGS(PageTail(page), page); |
|---|
| 351 | + return 0; |
|---|
| 352 | +} |
|---|
| 353 | + |
|---|
| 354 | +static inline int thp_nr_pages(struct page *page) |
|---|
| 355 | +{ |
|---|
| 356 | + VM_BUG_ON_PGFLAGS(PageTail(page), page); |
|---|
| 357 | + return 1; |
|---|
| 358 | +} |
|---|
| 264 | 359 | |
|---|
| 265 | 360 | static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) |
|---|
| 266 | 361 | { |
|---|
| 267 | 362 | return false; |
|---|
| 268 | 363 | } |
|---|
| 269 | 364 | |
|---|
| 270 | | -static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma) |
|---|
| 365 | +static inline bool transparent_hugepage_active(struct vm_area_struct *vma) |
|---|
| 366 | +{ |
|---|
| 367 | + return false; |
|---|
| 368 | +} |
|---|
| 369 | + |
|---|
| 370 | +static inline bool transhuge_vma_suitable(struct vm_area_struct *vma, |
|---|
| 371 | + unsigned long haddr) |
|---|
| 372 | +{ |
|---|
| 373 | + return false; |
|---|
| 374 | +} |
|---|
| 375 | + |
|---|
| 376 | +static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, |
|---|
| 377 | + unsigned long vm_flags) |
|---|
| 271 | 378 | { |
|---|
| 272 | 379 | return false; |
|---|
| 273 | 380 | } |
|---|
| 274 | 381 | |
|---|
| 275 | 382 | static inline void prep_transhuge_page(struct page *page) {} |
|---|
| 383 | + |
|---|
| 384 | +static inline bool is_transparent_hugepage(struct page *page) |
|---|
| 385 | +{ |
|---|
| 386 | + return false; |
|---|
| 387 | +} |
|---|
| 276 | 388 | |
|---|
| 277 | 389 | #define transparent_hugepage_flags 0UL |
|---|
| 278 | 390 | |
|---|
| .. | .. |
|---|
| 359 | 471 | } |
|---|
| 360 | 472 | |
|---|
| 361 | 473 | static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, |
|---|
| 362 | | - unsigned long addr, pmd_t *pmd, int flags) |
|---|
| 474 | + unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap) |
|---|
| 363 | 475 | { |
|---|
| 364 | 476 | return NULL; |
|---|
| 365 | 477 | } |
|---|
| 366 | 478 | |
|---|
| 367 | 479 | static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, |
|---|
| 368 | | - unsigned long addr, pud_t *pud, int flags) |
|---|
| 480 | + unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap) |
|---|
| 369 | 481 | { |
|---|
| 370 | 482 | return NULL; |
|---|
| 371 | 483 | } |
|---|
| .. | .. |
|---|
| 376 | 488 | } |
|---|
| 377 | 489 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
|---|
| 378 | 490 | |
|---|
| 491 | +/** |
|---|
| 492 | + * thp_size - Size of a transparent huge page. |
|---|
| 493 | + * @page: Head page of a transparent huge page. |
|---|
| 494 | + * |
|---|
| 495 | + * Return: Number of bytes in this page. |
|---|
| 496 | + */ |
|---|
| 497 | +static inline unsigned long thp_size(struct page *page) |
|---|
| 498 | +{ |
|---|
| 499 | + return PAGE_SIZE << thp_order(page); |
|---|
| 500 | +} |
|---|
| 501 | + |
|---|
| 379 | 502 | #endif /* _LINUX_HUGE_MM_H */ |
|---|