hc
2024-10-22 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5
kernel/include/linux/hugetlb.h
....@@ -7,37 +7,22 @@
77 #include <linux/fs.h>
88 #include <linux/hugetlb_inline.h>
99 #include <linux/cgroup.h>
10
+#include <linux/page_ref.h>
1011 #include <linux/list.h>
1112 #include <linux/kref.h>
12
-#include <asm/pgtable.h>
13
+#include <linux/pgtable.h>
14
+#include <linux/gfp.h>
15
+#include <linux/userfaultfd_k.h>
1316
1417 struct ctl_table;
1518 struct user_struct;
1619 struct mmu_gather;
1720
1821 #ifndef is_hugepd
19
-/*
20
- * Some architectures requires a hugepage directory format that is
21
- * required to support multiple hugepage sizes. For example
22
- * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
23
- * introduced the same on powerpc. This allows for a more flexible hugepage
24
- * pagetable layout.
25
- */
2622 typedef struct { unsigned long pd; } hugepd_t;
2723 #define is_hugepd(hugepd) (0)
2824 #define __hugepd(x) ((hugepd_t) { (x) })
29
-static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
30
- unsigned pdshift, unsigned long end,
31
- int write, struct page **pages, int *nr)
32
-{
33
- return 0;
34
-}
35
-#else
36
-extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
37
- unsigned pdshift, unsigned long end,
38
- int write, struct page **pages, int *nr);
3925 #endif
40
-
4126
4227 #ifdef CONFIG_HUGETLB_PAGE
4328
....@@ -64,7 +49,52 @@
6449 long adds_in_progress;
6550 struct list_head region_cache;
6651 long region_cache_count;
52
+#ifdef CONFIG_CGROUP_HUGETLB
53
+ /*
54
+ * On private mappings, the counter to uncharge reservations is stored
55
+ * here. If these fields are 0, then either the mapping is shared, or
56
+ * cgroup accounting is disabled for this resv_map.
57
+ */
58
+ struct page_counter *reservation_counter;
59
+ unsigned long pages_per_hpage;
60
+ struct cgroup_subsys_state *css;
61
+#endif
6762 };
63
+
64
+/*
65
+ * Region tracking -- allows tracking of reservations and instantiated pages
66
+ * across the pages in a mapping.
67
+ *
68
+ * The region data structures are embedded into a resv_map and protected
69
+ * by a resv_map's lock. The set of regions within the resv_map represent
70
+ * reservations for huge pages, or huge pages that have already been
71
+ * instantiated within the map. The from and to elements are huge page
72
+ * indicies into the associated mapping. from indicates the starting index
73
+ * of the region. to represents the first index past the end of the region.
74
+ *
75
+ * For example, a file region structure with from == 0 and to == 4 represents
76
+ * four huge pages in a mapping. It is important to note that the to element
77
+ * represents the first element past the end of the region. This is used in
78
+ * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
79
+ *
80
+ * Interval notation of the form [from, to) will be used to indicate that
81
+ * the endpoint from is inclusive and to is exclusive.
82
+ */
83
+struct file_region {
84
+ struct list_head link;
85
+ long from;
86
+ long to;
87
+#ifdef CONFIG_CGROUP_HUGETLB
88
+ /*
89
+ * On shared mappings, each reserved region appears as a struct
90
+ * file_region in resv_map. These fields hold the info needed to
91
+ * uncharge each reservation.
92
+ */
93
+ struct page_counter *reservation_counter;
94
+ struct cgroup_subsys_state *css;
95
+#endif
96
+};
97
+
6898 extern struct resv_map *resv_map_alloc(void);
6999 void resv_map_release(struct kref *ref);
70100
....@@ -78,14 +108,13 @@
78108 void hugepage_put_subpool(struct hugepage_subpool *spool);
79109
80110 void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
81
-int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
82
-int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
83
-int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
84
-
85
-#ifdef CONFIG_NUMA
86
-int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
87
- void __user *, size_t *, loff_t *);
88
-#endif
111
+int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
112
+int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
113
+ loff_t *);
114
+int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
115
+ loff_t *);
116
+int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
117
+ loff_t *);
89118
90119 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
91120 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
....@@ -102,42 +131,48 @@
102131 unsigned long start, unsigned long end,
103132 struct page *ref_page);
104133 void hugetlb_report_meminfo(struct seq_file *);
105
-int hugetlb_report_node_meminfo(int, char *);
134
+int hugetlb_report_node_meminfo(char *buf, int len, int nid);
106135 void hugetlb_show_meminfo(void);
107136 unsigned long hugetlb_total_pages(void);
108137 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
109138 unsigned long address, unsigned int flags);
139
+#ifdef CONFIG_USERFAULTFD
110140 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
111141 struct vm_area_struct *dst_vma,
112142 unsigned long dst_addr,
113143 unsigned long src_addr,
144
+ enum mcopy_atomic_mode mode,
114145 struct page **pagep);
146
+#endif /* CONFIG_USERFAULTFD */
115147 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
116148 struct vm_area_struct *vma,
117149 vm_flags_t vm_flags);
118150 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
119151 long freed);
120
-bool isolate_huge_page(struct page *page, struct list_head *list);
152
+int isolate_hugetlb(struct page *page, struct list_head *list);
121153 void putback_active_hugepage(struct page *page);
122154 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
123155 void free_huge_page(struct page *page);
124156 void hugetlb_fix_reserve_counts(struct inode *inode);
125157 extern struct mutex *hugetlb_fault_mutex_table;
126
-u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
127
- pgoff_t idx);
158
+u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
128159
129
-pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
160
+pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
161
+ unsigned long addr, pud_t *pud);
162
+
163
+struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
130164
131165 extern int sysctl_hugetlb_shm_group;
132166 extern struct list_head huge_boot_pages;
133167
134168 /* arch callbacks */
135169
136
-pte_t *huge_pte_alloc(struct mm_struct *mm,
170
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
137171 unsigned long addr, unsigned long sz);
138172 pte_t *huge_pte_offset(struct mm_struct *mm,
139173 unsigned long addr, unsigned long sz);
140
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
174
+int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
175
+ unsigned long *addr, pte_t *ptep);
141176 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
142177 unsigned long *start, unsigned long *end);
143178 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
....@@ -145,8 +180,8 @@
145180 struct page *follow_huge_pd(struct vm_area_struct *vma,
146181 unsigned long address, hugepd_t hpd,
147182 int flags, int pdshift);
148
-struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
149
- pmd_t *pmd, int flags);
183
+struct page *follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address,
184
+ int flags);
150185 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
151186 pud_t *pud, int flags);
152187 struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
....@@ -158,6 +193,7 @@
158193 unsigned long address, unsigned long end, pgprot_t newprot);
159194
160195 bool is_hugetlb_entry_migration(pte_t pte);
196
+void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
161197
162198 #else /* !CONFIG_HUGETLB_PAGE */
163199
....@@ -170,8 +206,15 @@
170206 return 0;
171207 }
172208
173
-static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
174
- pte_t *ptep)
209
+static inline struct address_space *hugetlb_page_mapping_lock_write(
210
+ struct page *hpage)
211
+{
212
+ return NULL;
213
+}
214
+
215
+static inline int huge_pmd_unshare(struct mm_struct *mm,
216
+ struct vm_area_struct *vma,
217
+ unsigned long *addr, pte_t *ptep)
175218 {
176219 return 0;
177220 }
....@@ -182,39 +225,133 @@
182225 {
183226 }
184227
185
-#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
186
-#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
187
-#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
228
+static inline long follow_hugetlb_page(struct mm_struct *mm,
229
+ struct vm_area_struct *vma, struct page **pages,
230
+ struct vm_area_struct **vmas, unsigned long *position,
231
+ unsigned long *nr_pages, long i, unsigned int flags,
232
+ int *nonblocking)
233
+{
234
+ BUG();
235
+ return 0;
236
+}
237
+
238
+static inline struct page *follow_huge_addr(struct mm_struct *mm,
239
+ unsigned long address, int write)
240
+{
241
+ return ERR_PTR(-EINVAL);
242
+}
243
+
244
+static inline int copy_hugetlb_page_range(struct mm_struct *dst,
245
+ struct mm_struct *src, struct vm_area_struct *vma)
246
+{
247
+ BUG();
248
+ return 0;
249
+}
250
+
188251 static inline void hugetlb_report_meminfo(struct seq_file *m)
189252 {
190253 }
191
-#define hugetlb_report_node_meminfo(n, buf) 0
254
+
255
+static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
256
+{
257
+ return 0;
258
+}
259
+
192260 static inline void hugetlb_show_meminfo(void)
193261 {
194262 }
195
-#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
196
-#define follow_huge_pmd(mm, addr, pmd, flags) NULL
197
-#define follow_huge_pud(mm, addr, pud, flags) NULL
198
-#define follow_huge_pgd(mm, addr, pgd, flags) NULL
199
-#define prepare_hugepage_range(file, addr, len) (-EINVAL)
200
-#define pmd_huge(x) 0
201
-#define pud_huge(x) 0
202
-#define is_hugepage_only_range(mm, addr, len) 0
203
-#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
204
-#define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
205
-#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
206
- src_addr, pagep) ({ BUG(); 0; })
207
-#define huge_pte_offset(mm, address, sz) 0
208263
209
-static inline bool isolate_huge_page(struct page *page, struct list_head *list)
264
+static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
265
+ unsigned long address, hugepd_t hpd, int flags,
266
+ int pdshift)
210267 {
211
- return false;
268
+ return NULL;
212269 }
213
-#define putback_active_hugepage(p) do {} while (0)
214
-#define move_hugetlb_state(old, new, reason) do {} while (0)
215270
216
-static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
217
- unsigned long address, unsigned long end, pgprot_t newprot)
271
+static inline struct page *follow_huge_pmd_pte(struct vm_area_struct *vma,
272
+ unsigned long address, int flags)
273
+{
274
+ return NULL;
275
+}
276
+
277
+static inline struct page *follow_huge_pud(struct mm_struct *mm,
278
+ unsigned long address, pud_t *pud, int flags)
279
+{
280
+ return NULL;
281
+}
282
+
283
+static inline struct page *follow_huge_pgd(struct mm_struct *mm,
284
+ unsigned long address, pgd_t *pgd, int flags)
285
+{
286
+ return NULL;
287
+}
288
+
289
+static inline int prepare_hugepage_range(struct file *file,
290
+ unsigned long addr, unsigned long len)
291
+{
292
+ return -EINVAL;
293
+}
294
+
295
+static inline int pmd_huge(pmd_t pmd)
296
+{
297
+ return 0;
298
+}
299
+
300
+static inline int pud_huge(pud_t pud)
301
+{
302
+ return 0;
303
+}
304
+
305
+static inline int is_hugepage_only_range(struct mm_struct *mm,
306
+ unsigned long addr, unsigned long len)
307
+{
308
+ return 0;
309
+}
310
+
311
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
312
+ unsigned long addr, unsigned long end,
313
+ unsigned long floor, unsigned long ceiling)
314
+{
315
+ BUG();
316
+}
317
+
318
+#ifdef CONFIG_USERFAULTFD
319
+static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
320
+ pte_t *dst_pte,
321
+ struct vm_area_struct *dst_vma,
322
+ unsigned long dst_addr,
323
+ unsigned long src_addr,
324
+ enum mcopy_atomic_mode mode,
325
+ struct page **pagep)
326
+{
327
+ BUG();
328
+ return 0;
329
+}
330
+#endif /* CONFIG_USERFAULTFD */
331
+
332
+static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
333
+ unsigned long sz)
334
+{
335
+ return NULL;
336
+}
337
+
338
+static inline int isolate_hugetlb(struct page *page, struct list_head *list)
339
+{
340
+ return -EBUSY;
341
+}
342
+
343
+static inline void putback_active_hugepage(struct page *page)
344
+{
345
+}
346
+
347
+static inline void move_hugetlb_state(struct page *oldpage,
348
+ struct page *newpage, int reason)
349
+{
350
+}
351
+
352
+static inline unsigned long hugetlb_change_protection(
353
+ struct vm_area_struct *vma, unsigned long address,
354
+ unsigned long end, pgprot_t newprot)
218355 {
219356 return 0;
220357 }
....@@ -232,6 +369,16 @@
232369 {
233370 BUG();
234371 }
372
+
373
+static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
374
+ struct vm_area_struct *vma, unsigned long address,
375
+ unsigned int flags)
376
+{
377
+ BUG();
378
+ return 0;
379
+}
380
+
381
+static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
235382
236383 #endif /* !CONFIG_HUGETLB_PAGE */
237384 /*
....@@ -310,7 +457,10 @@
310457 return is_file_shm_hugepages(file);
311458 }
312459
313
-
460
+static inline struct hstate *hstate_inode(struct inode *i)
461
+{
462
+ return HUGETLBFS_SB(i->i_sb)->hstate;
463
+}
314464 #else /* !CONFIG_HUGETLBFS */
315465
316466 #define is_file_hugepages(file) false
....@@ -322,6 +472,10 @@
322472 return ERR_PTR(-ENOSYS);
323473 }
324474
475
+static inline struct hstate *hstate_inode(struct inode *i)
476
+{
477
+ return NULL;
478
+}
325479 #endif /* !CONFIG_HUGETLBFS */
326480
327481 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
....@@ -352,7 +506,8 @@
352506 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
353507 #ifdef CONFIG_CGROUP_HUGETLB
354508 /* cgroup control files */
355
- struct cftype cgroup_files[5];
509
+ struct cftype cgroup_files_dfl[7];
510
+ struct cftype cgroup_files_legacy[9];
356511 #endif
357512 char name[HSTATE_NAME_LEN];
358513 };
....@@ -364,9 +519,8 @@
364519
365520 struct page *alloc_huge_page(struct vm_area_struct *vma,
366521 unsigned long addr, int avoid_reserve);
367
-struct page *alloc_huge_page_node(struct hstate *h, int nid);
368522 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
369
- nodemask_t *nmask);
523
+ nodemask_t *nmask, gfp_t gfp_mask);
370524 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
371525 unsigned long address);
372526 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
....@@ -376,8 +530,8 @@
376530 int __init __alloc_bootmem_huge_page(struct hstate *h);
377531 int __init alloc_bootmem_huge_page(struct hstate *h);
378532
379
-void __init hugetlb_bad_size(void);
380533 void __init hugetlb_add_hstate(unsigned order);
534
+bool __init arch_hugetlb_valid_size(unsigned long size);
381535 struct hstate *size_to_hstate(unsigned long size);
382536
383537 #ifndef HUGE_MAX_HSTATE
....@@ -389,11 +543,6 @@
389543
390544 #define default_hstate (hstates[default_hstate_idx])
391545
392
-static inline struct hstate *hstate_inode(struct inode *i)
393
-{
394
- return HUGETLBFS_SB(i->i_sb)->hstate;
395
-}
396
-
397546 static inline struct hstate *hstate_file(struct file *f)
398547 {
399548 return hstate_inode(file_inode(f));
....@@ -404,7 +553,10 @@
404553 if (!page_size_log)
405554 return &default_hstate;
406555
407
- return size_to_hstate(1UL << page_size_log);
556
+ if (page_size_log < BITS_PER_LONG)
557
+ return size_to_hstate(1UL << page_size_log);
558
+
559
+ return NULL;
408560 }
409561
410562 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
....@@ -453,6 +605,20 @@
453605
454606 #include <asm/hugetlb.h>
455607
608
+#ifndef is_hugepage_only_range
609
+static inline int is_hugepage_only_range(struct mm_struct *mm,
610
+ unsigned long addr, unsigned long len)
611
+{
612
+ return 0;
613
+}
614
+#define is_hugepage_only_range is_hugepage_only_range
615
+#endif
616
+
617
+#ifndef arch_clear_hugepage_flags
618
+static inline void arch_clear_hugepage_flags(struct page *page) { }
619
+#define arch_clear_hugepage_flags arch_clear_hugepage_flags
620
+#endif
621
+
456622 #ifndef arch_make_huge_pte
457623 static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
458624 struct page *page, int writable)
....@@ -464,7 +630,7 @@
464630 static inline struct hstate *page_hstate(struct page *page)
465631 {
466632 VM_BUG_ON_PAGE(!PageHuge(page), page);
467
- return size_to_hstate(PAGE_SIZE << compound_order(page));
633
+ return size_to_hstate(page_size(page));
468634 }
469635
470636 static inline unsigned hstate_index_to_shift(unsigned index)
....@@ -480,17 +646,75 @@
480646 extern int dissolve_free_huge_page(struct page *page);
481647 extern int dissolve_free_huge_pages(unsigned long start_pfn,
482648 unsigned long end_pfn);
483
-static inline bool hugepage_migration_supported(struct hstate *h)
484
-{
649
+
485650 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
651
+#ifndef arch_hugetlb_migration_supported
652
+static inline bool arch_hugetlb_migration_supported(struct hstate *h)
653
+{
486654 if ((huge_page_shift(h) == PMD_SHIFT) ||
487
- (huge_page_shift(h) == PGDIR_SHIFT))
655
+ (huge_page_shift(h) == PUD_SHIFT) ||
656
+ (huge_page_shift(h) == PGDIR_SHIFT))
488657 return true;
489658 else
490659 return false;
491
-#else
492
- return false;
660
+}
493661 #endif
662
+#else
663
+static inline bool arch_hugetlb_migration_supported(struct hstate *h)
664
+{
665
+ return false;
666
+}
667
+#endif
668
+
669
+static inline bool hugepage_migration_supported(struct hstate *h)
670
+{
671
+ return arch_hugetlb_migration_supported(h);
672
+}
673
+
674
+/*
675
+ * Movability check is different as compared to migration check.
676
+ * It determines whether or not a huge page should be placed on
677
+ * movable zone or not. Movability of any huge page should be
678
+ * required only if huge page size is supported for migration.
679
+ * There wont be any reason for the huge page to be movable if
680
+ * it is not migratable to start with. Also the size of the huge
681
+ * page should be large enough to be placed under a movable zone
682
+ * and still feasible enough to be migratable. Just the presence
683
+ * in movable zone does not make the migration feasible.
684
+ *
685
+ * So even though large huge page sizes like the gigantic ones
686
+ * are migratable they should not be movable because its not
687
+ * feasible to migrate them from movable zone.
688
+ */
689
+static inline bool hugepage_movable_supported(struct hstate *h)
690
+{
691
+ if (!hugepage_migration_supported(h))
692
+ return false;
693
+
694
+ if (hstate_is_gigantic(h))
695
+ return false;
696
+ return true;
697
+}
698
+
699
+/* Movability of hugepages depends on migration support. */
700
+static inline gfp_t htlb_alloc_mask(struct hstate *h)
701
+{
702
+ if (hugepage_movable_supported(h))
703
+ return GFP_HIGHUSER_MOVABLE;
704
+ else
705
+ return GFP_HIGHUSER;
706
+}
707
+
708
+static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
709
+{
710
+ gfp_t modified_mask = htlb_alloc_mask(h);
711
+
712
+ /* Some callers might want to enforce node */
713
+ modified_mask |= (gfp_mask & __GFP_THISNODE);
714
+
715
+ modified_mask |= (gfp_mask & __GFP_NOWARN);
716
+
717
+ return modified_mask;
494718 }
495719
496720 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
....@@ -536,26 +760,106 @@
536760 }
537761 #endif
538762
763
+#ifndef huge_ptep_modify_prot_start
764
+#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
765
+static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
766
+ unsigned long addr, pte_t *ptep)
767
+{
768
+ return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
769
+}
770
+#endif
771
+
772
+#ifndef huge_ptep_modify_prot_commit
773
+#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
774
+static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
775
+ unsigned long addr, pte_t *ptep,
776
+ pte_t old_pte, pte_t pte)
777
+{
778
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
779
+}
780
+#endif
781
+
539782 void set_page_huge_active(struct page *page);
540783
541784 #else /* CONFIG_HUGETLB_PAGE */
542785 struct hstate {};
543
-#define alloc_huge_page(v, a, r) NULL
544
-#define alloc_huge_page_node(h, nid) NULL
545
-#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
546
-#define alloc_huge_page_vma(h, vma, address) NULL
547
-#define alloc_bootmem_huge_page(h) NULL
548
-#define hstate_file(f) NULL
549
-#define hstate_sizelog(s) NULL
550
-#define hstate_vma(v) NULL
551
-#define hstate_inode(i) NULL
552
-#define page_hstate(page) NULL
553
-#define huge_page_size(h) PAGE_SIZE
554
-#define huge_page_mask(h) PAGE_MASK
555
-#define vma_kernel_pagesize(v) PAGE_SIZE
556
-#define vma_mmu_pagesize(v) PAGE_SIZE
557
-#define huge_page_order(h) 0
558
-#define huge_page_shift(h) PAGE_SHIFT
786
+
787
+static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
788
+ unsigned long addr,
789
+ int avoid_reserve)
790
+{
791
+ return NULL;
792
+}
793
+
794
+static inline struct page *
795
+alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
796
+ nodemask_t *nmask, gfp_t gfp_mask)
797
+{
798
+ return NULL;
799
+}
800
+
801
+static inline struct page *alloc_huge_page_vma(struct hstate *h,
802
+ struct vm_area_struct *vma,
803
+ unsigned long address)
804
+{
805
+ return NULL;
806
+}
807
+
808
+static inline int __alloc_bootmem_huge_page(struct hstate *h)
809
+{
810
+ return 0;
811
+}
812
+
813
+static inline struct hstate *hstate_file(struct file *f)
814
+{
815
+ return NULL;
816
+}
817
+
818
+static inline struct hstate *hstate_sizelog(int page_size_log)
819
+{
820
+ return NULL;
821
+}
822
+
823
+static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
824
+{
825
+ return NULL;
826
+}
827
+
828
+static inline struct hstate *page_hstate(struct page *page)
829
+{
830
+ return NULL;
831
+}
832
+
833
+static inline unsigned long huge_page_size(struct hstate *h)
834
+{
835
+ return PAGE_SIZE;
836
+}
837
+
838
+static inline unsigned long huge_page_mask(struct hstate *h)
839
+{
840
+ return PAGE_MASK;
841
+}
842
+
843
+static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
844
+{
845
+ return PAGE_SIZE;
846
+}
847
+
848
+static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
849
+{
850
+ return PAGE_SIZE;
851
+}
852
+
853
+static inline unsigned int huge_page_order(struct hstate *h)
854
+{
855
+ return 0;
856
+}
857
+
858
+static inline unsigned int huge_page_shift(struct hstate *h)
859
+{
860
+ return PAGE_SHIFT;
861
+}
862
+
559863 static inline bool hstate_is_gigantic(struct hstate *h)
560864 {
561865 return false;
....@@ -592,6 +896,21 @@
592896 return false;
593897 }
594898
899
+static inline bool hugepage_movable_supported(struct hstate *h)
900
+{
901
+ return false;
902
+}
903
+
904
+static inline gfp_t htlb_alloc_mask(struct hstate *h)
905
+{
906
+ return 0;
907
+}
908
+
909
+static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
910
+{
911
+ return 0;
912
+}
913
+
595914 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
596915 struct mm_struct *mm, pte_t *pte)
597916 {
....@@ -626,4 +945,38 @@
626945 return ptl;
627946 }
628947
948
+#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
949
+extern void __init hugetlb_cma_reserve(int order);
950
+extern void __init hugetlb_cma_check(void);
951
+#else
952
+static inline __init void hugetlb_cma_reserve(int order)
953
+{
954
+}
955
+static inline __init void hugetlb_cma_check(void)
956
+{
957
+}
958
+#endif
959
+
960
+bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
961
+
962
+#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
963
+/*
964
+ * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
965
+ * implement this.
966
+ */
967
+#define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
968
+#endif
969
+
970
+#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
971
+static inline bool hugetlb_pmd_shared(pte_t *pte)
972
+{
973
+ return page_count(virt_to_page(pte)) > 1;
974
+}
975
+#else
976
+static inline bool hugetlb_pmd_shared(pte_t *pte)
977
+{
978
+ return false;
979
+}
980
+#endif
981
+
629982 #endif /* _LINUX_HUGETLB_H */