.. | .. |
---|
3 | 3 | #define _LINUX_KHUGEPAGED_H |
---|
4 | 4 | |
---|
5 | 5 | #include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */ |
---|
| 6 | +#include <linux/shmem_fs.h> |
---|
6 | 7 | |
---|
7 | 8 | |
---|
8 | 9 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
---|
.. | .. |
---|
16 | 17 | extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma, |
---|
17 | 18 | unsigned long vm_flags); |
---|
18 | 19 | extern void khugepaged_min_free_kbytes_update(void); |
---|
| 20 | +#ifdef CONFIG_SHMEM |
---|
| 21 | +extern void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr); |
---|
| 22 | +#else |
---|
| 23 | +static inline void collapse_pte_mapped_thp(struct mm_struct *mm, |
---|
| 24 | + unsigned long addr) |
---|
| 25 | +{ |
---|
| 26 | +} |
---|
| 27 | +#endif |
---|
19 | 28 | |
---|
20 | 29 | #define khugepaged_enabled() \ |
---|
21 | 30 | (transparent_hugepage_flags & \ |
---|
.. | .. |
---|
49 | 58 | { |
---|
50 | 59 | if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) |
---|
51 | 60 | if ((khugepaged_always() || |
---|
| 61 | + (shmem_file(vma->vm_file) && shmem_huge_enabled(vma)) || |
---|
52 | 62 | (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) && |
---|
53 | 63 | !(vm_flags & VM_NOHUGEPAGE) && |
---|
54 | 64 | !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) |
---|
.. | .. |
---|
74 | 84 | { |
---|
75 | 85 | return 0; |
---|
76 | 86 | } |
---|
| 87 | +static inline void collapse_pte_mapped_thp(struct mm_struct *mm, |
---|
| 88 | + unsigned long addr) |
---|
| 89 | +{ |
---|
| 90 | +} |
---|
77 | 91 | |
---|
78 | 92 | static inline void khugepaged_min_free_kbytes_update(void) |
---|
79 | 93 | { |
---|