hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/include/linux/hugetlb.h
....@@ -9,35 +9,19 @@
99 #include <linux/cgroup.h>
1010 #include <linux/list.h>
1111 #include <linux/kref.h>
12
-#include <asm/pgtable.h>
12
+#include <linux/pgtable.h>
13
+#include <linux/gfp.h>
14
+#include <linux/userfaultfd_k.h>
1315
1416 struct ctl_table;
1517 struct user_struct;
1618 struct mmu_gather;
1719
1820 #ifndef is_hugepd
19
-/*
20
- * Some architectures requires a hugepage directory format that is
21
- * required to support multiple hugepage sizes. For example
22
- * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
23
- * introduced the same on powerpc. This allows for a more flexible hugepage
24
- * pagetable layout.
25
- */
2621 typedef struct { unsigned long pd; } hugepd_t;
2722 #define is_hugepd(hugepd) (0)
2823 #define __hugepd(x) ((hugepd_t) { (x) })
29
-static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
30
- unsigned pdshift, unsigned long end,
31
- int write, struct page **pages, int *nr)
32
-{
33
- return 0;
34
-}
35
-#else
36
-extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
37
- unsigned pdshift, unsigned long end,
38
- int write, struct page **pages, int *nr);
3924 #endif
40
-
4125
4226 #ifdef CONFIG_HUGETLB_PAGE
4327
....@@ -64,7 +48,52 @@
6448 long adds_in_progress;
6549 struct list_head region_cache;
6650 long region_cache_count;
51
+#ifdef CONFIG_CGROUP_HUGETLB
52
+ /*
53
+ * On private mappings, the counter to uncharge reservations is stored
54
+ * here. If these fields are 0, then either the mapping is shared, or
55
+ * cgroup accounting is disabled for this resv_map.
56
+ */
57
+ struct page_counter *reservation_counter;
58
+ unsigned long pages_per_hpage;
59
+ struct cgroup_subsys_state *css;
60
+#endif
6761 };
62
+
63
+/*
64
+ * Region tracking -- allows tracking of reservations and instantiated pages
65
+ * across the pages in a mapping.
66
+ *
67
+ * The region data structures are embedded into a resv_map and protected
68
+ * by a resv_map's lock. The set of regions within the resv_map represent
69
+ * reservations for huge pages, or huge pages that have already been
70
+ * instantiated within the map. The from and to elements are huge page
71
+ * indicies into the associated mapping. from indicates the starting index
72
+ * of the region. to represents the first index past the end of the region.
73
+ *
74
+ * For example, a file region structure with from == 0 and to == 4 represents
75
+ * four huge pages in a mapping. It is important to note that the to element
76
+ * represents the first element past the end of the region. This is used in
77
+ * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
78
+ *
79
+ * Interval notation of the form [from, to) will be used to indicate that
80
+ * the endpoint from is inclusive and to is exclusive.
81
+ */
82
+struct file_region {
83
+ struct list_head link;
84
+ long from;
85
+ long to;
86
+#ifdef CONFIG_CGROUP_HUGETLB
87
+ /*
88
+ * On shared mappings, each reserved region appears as a struct
89
+ * file_region in resv_map. These fields hold the info needed to
90
+ * uncharge each reservation.
91
+ */
92
+ struct page_counter *reservation_counter;
93
+ struct cgroup_subsys_state *css;
94
+#endif
95
+};
96
+
6897 extern struct resv_map *resv_map_alloc(void);
6998 void resv_map_release(struct kref *ref);
7099
....@@ -78,14 +107,13 @@
78107 void hugepage_put_subpool(struct hugepage_subpool *spool);
79108
80109 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
81
-int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
82
-int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
83
-int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
84
-
85
-#ifdef CONFIG_NUMA
86
-int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
87
- void __user *, size_t *, loff_t *);
88
-#endif
110
+int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
111
+int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
112
+ loff_t *);
113
+int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
114
+ loff_t *);
115
+int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
116
+ loff_t *);
89117
90118 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
91119 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
....@@ -102,16 +130,19 @@
102130 unsigned long start, unsigned long end,
103131 struct page *ref_page);
104132 void hugetlb_report_meminfo(struct seq_file *);
105
-int hugetlb_report_node_meminfo(int, char *);
133
+int hugetlb_report_node_meminfo(char *buf, int len, int nid);
106134 void hugetlb_show_meminfo(void);
107135 unsigned long hugetlb_total_pages(void);
108136 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
109137 unsigned long address, unsigned int flags);
138
+#ifdef CONFIG_USERFAULTFD
110139 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
111140 struct vm_area_struct *dst_vma,
112141 unsigned long dst_addr,
113142 unsigned long src_addr,
143
+ enum mcopy_atomic_mode mode,
114144 struct page **pagep);
145
+#endif /* CONFIG_USERFAULTFD */
115146 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
116147 struct vm_area_struct *vma,
117148 vm_flags_t vm_flags);
....@@ -123,21 +154,24 @@
123154 void free_huge_page(struct page *page);
124155 void hugetlb_fix_reserve_counts(struct inode *inode);
125156 extern struct mutex *hugetlb_fault_mutex_table;
126
-u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
127
- pgoff_t idx);
157
+u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
128158
129
-pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
159
+pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
160
+ unsigned long addr, pud_t *pud);
161
+
162
+struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
130163
131164 extern int sysctl_hugetlb_shm_group;
132165 extern struct list_head huge_boot_pages;
133166
134167 /* arch callbacks */
135168
136
-pte_t *huge_pte_alloc(struct mm_struct *mm,
169
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
137170 unsigned long addr, unsigned long sz);
138171 pte_t *huge_pte_offset(struct mm_struct *mm,
139172 unsigned long addr, unsigned long sz);
140
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
173
+int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
174
+ unsigned long *addr, pte_t *ptep);
141175 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
142176 unsigned long *start, unsigned long *end);
143177 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
....@@ -145,8 +179,8 @@
145179 struct page *follow_huge_pd(struct vm_area_struct *vma,
146180 unsigned long address, hugepd_t hpd,
147181 int flags, int pdshift);
148
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
149
- pmd_t *pmd, int flags);
182
+struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
183
+ int flags);
150184 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
151185 pud_t *pud, int flags);
152186 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
....@@ -158,6 +192,7 @@
158192 unsigned long address, unsigned long end, pgprot_t newprot);
159193
160194 bool is_hugetlb_entry_migration(pte_t pte);
195
+void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
161196
162197 #else /* !CONFIG_HUGETLB_PAGE */
163198
....@@ -170,8 +205,15 @@
170205 return 0;
171206 }
172207
173
-static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
174
- pte_t *ptep)
208
+static inline struct address_space *hugetlb_page_mapping_lock_write(
209
+ struct page *hpage)
210
+{
211
+ return NULL;
212
+}
213
+
214
+static inline int huge_pmd_unshare(struct mm_struct *mm,
215
+ struct vm_area_struct *vma,
216
+ unsigned long *addr, pte_t *ptep)
175217 {
176218 return 0;
177219 }
....@@ -182,39 +224,133 @@
182224 {
183225 }
184226
185
-#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
186
-#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
187
-#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
227
+static inline long follow_hugetlb_page(struct mm_struct *mm,
228
+ struct vm_area_struct *vma, struct page **pages,
229
+ struct vm_area_struct **vmas, unsigned long *position,
230
+ unsigned long *nr_pages, long i, unsigned int flags,
231
+ int *nonblocking)
232
+{
233
+ BUG();
234
+ return 0;
235
+}
236
+
237
+static inline struct page *follow_huge_addr(struct mm_struct *mm,
238
+ unsigned long address, int write)
239
+{
240
+ return ERR_PTR(-EINVAL);
241
+}
242
+
243
+static inline int copy_hugetlb_page_range(struct mm_struct *dst,
244
+ struct mm_struct *src, struct vm_area_struct *vma)
245
+{
246
+ BUG();
247
+ return 0;
248
+}
249
+
188250 static inline void hugetlb_report_meminfo(struct seq_file *m)
189251 {
190252 }
191
-#define hugetlb_report_node_meminfo(n, buf) 0
253
+
254
+static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
255
+{
256
+ return 0;
257
+}
258
+
192259 static inline void hugetlb_show_meminfo(void)
193260 {
194261 }
195
-#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
196
-#define follow_huge_pmd(mm, addr, pmd, flags) NULL
197
-#define follow_huge_pud(mm, addr, pud, flags) NULL
198
-#define follow_huge_pgd(mm, addr, pgd, flags) NULL
199
-#define prepare_hugepage_range(file, addr, len) (-EINVAL)
200
-#define pmd_huge(x) 0
201
-#define pud_huge(x) 0
202
-#define is_hugepage_only_range(mm, addr, len) 0
203
-#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
204
-#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
205
-#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
206
- src_addr, pagep) ({ BUG(); 0; })
207
-#define huge_pte_offset(mm, address, sz) 0
262
+
263
+static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
264
+ unsigned long address, hugepd_t hpd, int flags,
265
+ int pdshift)
266
+{
267
+ return NULL;
268
+}
269
+
270
+static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
271
+ unsigned long address, int flags)
272
+{
273
+ return NULL;
274
+}
275
+
276
+static inline struct page *follow_huge_pud(struct mm_struct *mm,
277
+ unsigned long address, pud_t *pud, int flags)
278
+{
279
+ return NULL;
280
+}
281
+
282
+static inline struct page *follow_huge_pgd(struct mm_struct *mm,
283
+ unsigned long address, pgd_t *pgd, int flags)
284
+{
285
+ return NULL;
286
+}
287
+
288
+static inline int prepare_hugepage_range(struct file *file,
289
+ unsigned long addr, unsigned long len)
290
+{
291
+ return -EINVAL;
292
+}
293
+
294
+static inline int pmd_huge(pmd_t pmd)
295
+{
296
+ return 0;
297
+}
298
+
299
+static inline int pud_huge(pud_t pud)
300
+{
301
+ return 0;
302
+}
303
+
304
+static inline int is_hugepage_only_range(struct mm_struct *mm,
305
+ unsigned long addr, unsigned long len)
306
+{
307
+ return 0;
308
+}
309
+
310
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
311
+ unsigned long addr, unsigned long end,
312
+ unsigned long floor, unsigned long ceiling)
313
+{
314
+ BUG();
315
+}
316
+
317
+#ifdef CONFIG_USERFAULTFD
318
+static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
319
+ pte_t *dst_pte,
320
+ struct vm_area_struct *dst_vma,
321
+ unsigned long dst_addr,
322
+ unsigned long src_addr,
323
+ enum mcopy_atomic_mode mode,
324
+ struct page **pagep)
325
+{
326
+ BUG();
327
+ return 0;
328
+}
329
+#endif /* CONFIG_USERFAULTFD */
330
+
331
+static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
332
+ unsigned long sz)
333
+{
334
+ return NULL;
335
+}
208336
209337 static inline bool isolate_huge_page(struct page *page, struct list_head *list)
210338 {
211339 return false;
212340 }
213
-#define putback_active_hugepage(p) do {} while (0)
214
-#define move_hugetlb_state(old, new, reason) do {} while (0)
215341
216
-static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
217
- unsigned long address, unsigned long end, pgprot_t newprot)
342
+static inline void putback_active_hugepage(struct page *page)
343
+{
344
+}
345
+
346
+static inline void move_hugetlb_state(struct page *oldpage,
347
+ struct page *newpage, int reason)
348
+{
349
+}
350
+
351
+static inline unsigned long hugetlb_change_protection(
352
+ struct vm_area_struct *vma, unsigned long address,
353
+ unsigned long end, pgprot_t newprot)
218354 {
219355 return 0;
220356 }
....@@ -232,6 +368,16 @@
232368 {
233369 BUG();
234370 }
371
+
372
+static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
373
+ struct vm_area_struct *vma, unsigned long address,
374
+ unsigned int flags)
375
+{
376
+ BUG();
377
+ return 0;
378
+}
379
+
380
+static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
235381
236382 #endif /* !CONFIG_HUGETLB_PAGE */
237383 /*
....@@ -310,7 +456,10 @@
310456 return is_file_shm_hugepages(file);
311457 }
312458
313
-
459
+static inline struct hstate *hstate_inode(struct inode *i)
460
+{
461
+ return HUGETLBFS_SB(i->i_sb)->hstate;
462
+}
314463 #else /* !CONFIG_HUGETLBFS */
315464
316465 #define is_file_hugepages(file) false
....@@ -322,6 +471,10 @@
322471 return ERR_PTR(-ENOSYS);
323472 }
324473
474
+static inline struct hstate *hstate_inode(struct inode *i)
475
+{
476
+ return NULL;
477
+}
325478 #endif /* !CONFIG_HUGETLBFS */
326479
327480 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
....@@ -352,7 +505,8 @@
352505 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
353506 #ifdef CONFIG_CGROUP_HUGETLB
354507 /* cgroup control files */
355
- struct cftype cgroup_files[5];
508
+ struct cftype cgroup_files_dfl[7];
509
+ struct cftype cgroup_files_legacy[9];
356510 #endif
357511 char name[HSTATE_NAME_LEN];
358512 };
....@@ -364,9 +518,8 @@
364518
365519 struct page *alloc_huge_page(struct vm_area_struct *vma,
366520 unsigned long addr, int avoid_reserve);
367
-struct page *alloc_huge_page_node(struct hstate *h, int nid);
368521 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
369
- nodemask_t *nmask);
522
+ nodemask_t *nmask, gfp_t gfp_mask);
370523 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
371524 unsigned long address);
372525 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
....@@ -376,8 +529,8 @@
376529 int __init __alloc_bootmem_huge_page(struct hstate *h);
377530 int __init alloc_bootmem_huge_page(struct hstate *h);
378531
379
-void __init hugetlb_bad_size(void);
380532 void __init hugetlb_add_hstate(unsigned order);
533
+bool __init arch_hugetlb_valid_size(unsigned long size);
381534 struct hstate *size_to_hstate(unsigned long size);
382535
383536 #ifndef HUGE_MAX_HSTATE
....@@ -388,11 +541,6 @@
388541 extern unsigned int default_hstate_idx;
389542
390543 #define default_hstate (hstates[default_hstate_idx])
391
-
392
-static inline struct hstate *hstate_inode(struct inode *i)
393
-{
394
- return HUGETLBFS_SB(i->i_sb)->hstate;
395
-}
396544
397545 static inline struct hstate *hstate_file(struct file *f)
398546 {
....@@ -453,6 +601,20 @@
453601
454602 #include <asm/hugetlb.h>
455603
604
+#ifndef is_hugepage_only_range
605
+static inline int is_hugepage_only_range(struct mm_struct *mm,
606
+ unsigned long addr, unsigned long len)
607
+{
608
+ return 0;
609
+}
610
+#define is_hugepage_only_range is_hugepage_only_range
611
+#endif
612
+
613
+#ifndef arch_clear_hugepage_flags
614
+static inline void arch_clear_hugepage_flags(struct page *page) { }
615
+#define arch_clear_hugepage_flags arch_clear_hugepage_flags
616
+#endif
617
+
456618 #ifndef arch_make_huge_pte
457619 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
458620 struct page *page, int writable)
....@@ -464,7 +626,7 @@
464626 static inline struct hstate *page_hstate(struct page *page)
465627 {
466628 VM_BUG_ON_PAGE(!PageHuge(page), page);
467
- return size_to_hstate(PAGE_SIZE << compound_order(page));
629
+ return size_to_hstate(page_size(page));
468630 }
469631
470632 static inline unsigned hstate_index_to_shift(unsigned index)
....@@ -480,17 +642,75 @@
480642 extern int dissolve_free_huge_page(struct page *page);
481643 extern int dissolve_free_huge_pages(unsigned long start_pfn,
482644 unsigned long end_pfn);
483
-static inline bool hugepage_migration_supported(struct hstate *h)
484
-{
645
+
485646 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
647
+#ifndef arch_hugetlb_migration_supported
648
+static inline bool arch_hugetlb_migration_supported(struct hstate *h)
649
+{
486650 if ((huge_page_shift(h) == PMD_SHIFT) ||
487
- (huge_page_shift(h) == PGDIR_SHIFT))
651
+ (huge_page_shift(h) == PUD_SHIFT) ||
652
+ (huge_page_shift(h) == PGDIR_SHIFT))
488653 return true;
489654 else
490655 return false;
491
-#else
492
- return false;
656
+}
493657 #endif
658
+#else
659
+static inline bool arch_hugetlb_migration_supported(struct hstate *h)
660
+{
661
+ return false;
662
+}
663
+#endif
664
+
665
+static inline bool hugepage_migration_supported(struct hstate *h)
666
+{
667
+ return arch_hugetlb_migration_supported(h);
668
+}
669
+
670
+/*
671
+ * Movability check is different as compared to migration check.
672
+ * It determines whether or not a huge page should be placed on
673
+ * movable zone or not. Movability of any huge page should be
674
+ * required only if huge page size is supported for migration.
675
+ * There wont be any reason for the huge page to be movable if
676
+ * it is not migratable to start with. Also the size of the huge
677
+ * page should be large enough to be placed under a movable zone
678
+ * and still feasible enough to be migratable. Just the presence
679
+ * in movable zone does not make the migration feasible.
680
+ *
681
+ * So even though large huge page sizes like the gigantic ones
682
+ * are migratable they should not be movable because its not
683
+ * feasible to migrate them from movable zone.
684
+ */
685
+static inline bool hugepage_movable_supported(struct hstate *h)
686
+{
687
+ if (!hugepage_migration_supported(h))
688
+ return false;
689
+
690
+ if (hstate_is_gigantic(h))
691
+ return false;
692
+ return true;
693
+}
694
+
695
+/* Movability of hugepages depends on migration support. */
696
+static inline gfp_t htlb_alloc_mask(struct hstate *h)
697
+{
698
+ if (hugepage_movable_supported(h))
699
+ return GFP_HIGHUSER_MOVABLE;
700
+ else
701
+ return GFP_HIGHUSER;
702
+}
703
+
704
+static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
705
+{
706
+ gfp_t modified_mask = htlb_alloc_mask(h);
707
+
708
+ /* Some callers might want to enforce node */
709
+ modified_mask |= (gfp_mask & __GFP_THISNODE);
710
+
711
+ modified_mask |= (gfp_mask & __GFP_NOWARN);
712
+
713
+ return modified_mask;
494714 }
495715
496716 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
....@@ -536,26 +756,106 @@
536756 }
537757 #endif
538758
759
+#ifndef huge_ptep_modify_prot_start
760
+#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
761
+static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
762
+ unsigned long addr, pte_t *ptep)
763
+{
764
+ return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
765
+}
766
+#endif
767
+
768
+#ifndef huge_ptep_modify_prot_commit
769
+#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
770
+static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
771
+ unsigned long addr, pte_t *ptep,
772
+ pte_t old_pte, pte_t pte)
773
+{
774
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
775
+}
776
+#endif
777
+
539778 void set_page_huge_active(struct page *page);
540779
541780 #else /* CONFIG_HUGETLB_PAGE */
542781 struct hstate {};
543
-#define alloc_huge_page(v, a, r) NULL
544
-#define alloc_huge_page_node(h, nid) NULL
545
-#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
546
-#define alloc_huge_page_vma(h, vma, address) NULL
547
-#define alloc_bootmem_huge_page(h) NULL
548
-#define hstate_file(f) NULL
549
-#define hstate_sizelog(s) NULL
550
-#define hstate_vma(v) NULL
551
-#define hstate_inode(i) NULL
552
-#define page_hstate(page) NULL
553
-#define huge_page_size(h) PAGE_SIZE
554
-#define huge_page_mask(h) PAGE_MASK
555
-#define vma_kernel_pagesize(v) PAGE_SIZE
556
-#define vma_mmu_pagesize(v) PAGE_SIZE
557
-#define huge_page_order(h) 0
558
-#define huge_page_shift(h) PAGE_SHIFT
782
+
783
+static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
784
+ unsigned long addr,
785
+ int avoid_reserve)
786
+{
787
+ return NULL;
788
+}
789
+
790
+static inline struct page *
791
+alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
792
+ nodemask_t *nmask, gfp_t gfp_mask)
793
+{
794
+ return NULL;
795
+}
796
+
797
+static inline struct page *alloc_huge_page_vma(struct hstate *h,
798
+ struct vm_area_struct *vma,
799
+ unsigned long address)
800
+{
801
+ return NULL;
802
+}
803
+
804
+static inline int __alloc_bootmem_huge_page(struct hstate *h)
805
+{
806
+ return 0;
807
+}
808
+
809
+static inline struct hstate *hstate_file(struct file *f)
810
+{
811
+ return NULL;
812
+}
813
+
814
+static inline struct hstate *hstate_sizelog(int page_size_log)
815
+{
816
+ return NULL;
817
+}
818
+
819
+static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
820
+{
821
+ return NULL;
822
+}
823
+
824
+static inline struct hstate *page_hstate(struct page *page)
825
+{
826
+ return NULL;
827
+}
828
+
829
+static inline unsigned long huge_page_size(struct hstate *h)
830
+{
831
+ return PAGE_SIZE;
832
+}
833
+
834
+static inline unsigned long huge_page_mask(struct hstate *h)
835
+{
836
+ return PAGE_MASK;
837
+}
838
+
839
+static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
840
+{
841
+ return PAGE_SIZE;
842
+}
843
+
844
+static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
845
+{
846
+ return PAGE_SIZE;
847
+}
848
+
849
+static inline unsigned int huge_page_order(struct hstate *h)
850
+{
851
+ return 0;
852
+}
853
+
854
+static inline unsigned int huge_page_shift(struct hstate *h)
855
+{
856
+ return PAGE_SHIFT;
857
+}
858
+
559859 static inline bool hstate_is_gigantic(struct hstate *h)
560860 {
561861 return false;
....@@ -592,6 +892,21 @@
592892 return false;
593893 }
594894
895
+static inline bool hugepage_movable_supported(struct hstate *h)
896
+{
897
+ return false;
898
+}
899
+
900
+static inline gfp_t htlb_alloc_mask(struct hstate *h)
901
+{
902
+ return 0;
903
+}
904
+
905
+static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
906
+{
907
+ return 0;
908
+}
909
+
595910 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
596911 struct mm_struct *mm, pte_t *pte)
597912 {
....@@ -626,4 +941,26 @@
626941 return ptl;
627942 }
628943
944
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
945
+extern void __init hugetlb_cma_reserve(int order);
946
+extern void __init hugetlb_cma_check(void);
947
+#else
948
+static inline __init void hugetlb_cma_reserve(int order)
949
+{
950
+}
951
+static inline __init void hugetlb_cma_check(void)
952
+{
953
+}
954
+#endif
955
+
956
+bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
957
+
958
+#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
959
+/*
960
+ * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
961
+ * implement this.
962
+ */
963
+#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
964
+#endif
965
+
629966 #endif /* _LINUX_HUGETLB_H */