hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/arch/x86/mm/pgtable.c
....@@ -3,7 +3,6 @@
33 #include <linux/gfp.h>
44 #include <linux/hugetlb.h>
55 #include <asm/pgalloc.h>
6
-#include <asm/pgtable.h>
76 #include <asm/tlb.h>
87 #include <asm/fixmap.h>
98 #include <asm/mtrr.h>
....@@ -13,33 +12,25 @@
1312 EXPORT_SYMBOL(physical_mask);
1413 #endif
1514
16
-#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
17
-
1815 #ifdef CONFIG_HIGHPTE
19
-#define PGALLOC_USER_GFP __GFP_HIGHMEM
16
+#define PGTABLE_HIGHMEM __GFP_HIGHMEM
2017 #else
21
-#define PGALLOC_USER_GFP 0
18
+#define PGTABLE_HIGHMEM 0
2219 #endif
2320
24
-gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
25
-
26
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
21
+#ifndef CONFIG_PARAVIRT
22
+static inline
23
+void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table)
2724 {
28
- return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT);
25
+ tlb_remove_page(tlb, table);
2926 }
27
+#endif
3028
31
-pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
29
+gfp_t __userpte_alloc_gfp = GFP_PGTABLE_USER | PGTABLE_HIGHMEM;
30
+
31
+pgtable_t pte_alloc_one(struct mm_struct *mm)
3232 {
33
- struct page *pte;
34
-
35
- pte = alloc_pages(__userpte_alloc_gfp, 0);
36
- if (!pte)
37
- return NULL;
38
- if (!pgtable_page_ctor(pte)) {
39
- __free_page(pte);
40
- return NULL;
41
- }
42
- return pte;
33
+ return __pte_alloc_one(mm, __userpte_alloc_gfp);
4334 }
4435
4536 static int __init setup_userpte(char *arg)
....@@ -61,7 +52,7 @@
6152
6253 void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
6354 {
64
- pgtable_page_dtor(pte);
55
+ pgtable_pte_page_dtor(pte);
6556 paravirt_release_pte(page_to_pfn(pte));
6657 paravirt_tlb_remove_table(tlb, pte);
6758 }
....@@ -190,7 +181,7 @@
190181 * when PTI is enabled. We need them to map the per-process LDT into the
191182 * user-space page-table.
192183 */
193
-#define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \
184
+#define PREALLOCATED_USER_PMDS (boot_cpu_has(X86_FEATURE_PTI) ? \
194185 KERNEL_PGD_PTRS : 0)
195186 #define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
196187
....@@ -235,7 +226,7 @@
235226 {
236227 int i;
237228 bool failed = false;
238
- gfp_t gfp = PGALLOC_GFP;
229
+ gfp_t gfp = GFP_PGTABLE_USER;
239230
240231 if (mm == &init_mm)
241232 gfp &= ~__GFP_ACCOUNT;
....@@ -292,7 +283,7 @@
292283
293284 #ifdef CONFIG_PAGE_TABLE_ISOLATION
294285
295
- if (!static_cpu_has(X86_FEATURE_PTI))
286
+ if (!boot_cpu_has(X86_FEATURE_PTI))
296287 return;
297288
298289 pgdp = kernel_to_user_pgdp(pgdp);
....@@ -373,14 +364,14 @@
373364
374365 static struct kmem_cache *pgd_cache;
375366
376
-static int __init pgd_cache_init(void)
367
+void __init pgtable_cache_init(void)
377368 {
378369 /*
379370 * When PAE kernel is running as a Xen domain, it does not use
380371 * shared kernel pmd. And this requires a whole page for pgd.
381372 */
382373 if (!SHARED_KERNEL_PMD)
383
- return 0;
374
+ return;
384375
385376 /*
386377 * when PAE kernel is not running as a Xen domain, it uses
....@@ -390,9 +381,7 @@
390381 */
391382 pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
392383 SLAB_PANIC, NULL);
393
- return 0;
394384 }
395
-core_initcall(pgd_cache_init);
396385
397386 static inline pgd_t *_pgd_alloc(void)
398387 {
....@@ -401,14 +390,14 @@
401390 * We allocate one page for pgd.
402391 */
403392 if (!SHARED_KERNEL_PMD)
404
- return (pgd_t *)__get_free_pages(PGALLOC_GFP,
393
+ return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
405394 PGD_ALLOCATION_ORDER);
406395
407396 /*
408397 * Now PAE kernel is not running as a Xen domain. We can allocate
409398 * a 32-byte slab for pgd to save memory space.
410399 */
411
- return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
400
+ return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER);
412401 }
413402
414403 static inline void _pgd_free(pgd_t *pgd)
....@@ -422,7 +411,8 @@
422411
423412 static inline pgd_t *_pgd_alloc(void)
424413 {
425
- return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
414
+ return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER,
415
+ PGD_ALLOCATION_ORDER);
426416 }
427417
428418 static inline void _pgd_free(pgd_t *pgd)
....@@ -723,11 +713,9 @@
723713 if (pud_present(*pud) && !pud_huge(*pud))
724714 return 0;
725715
726
- prot = pgprot_4k_2_large(prot);
727
-
728716 set_pte((pte_t *)pud, pfn_pte(
729717 (u64)addr >> PAGE_SHIFT,
730
- __pgprot(pgprot_val(prot) | _PAGE_PSE)));
718
+ __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
731719
732720 return 1;
733721 }
....@@ -755,11 +743,9 @@
755743 if (pmd_present(*pmd) && !pmd_huge(*pmd))
756744 return 0;
757745
758
- prot = pgprot_4k_2_large(prot);
759
-
760746 set_pte((pte_t *)pmd, pfn_pte(
761747 (u64)addr >> PAGE_SHIFT,
762
- __pgprot(pgprot_val(prot) | _PAGE_PSE)));
748
+ __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE)));
763749
764750 return 1;
765751 }
....@@ -794,6 +780,14 @@
794780 return 0;
795781 }
796782
783
+/*
784
+ * Until we support 512GB pages, skip them in the vmap area.
785
+ */
786
+int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
787
+{
788
+ return 0;
789
+}
790
+
797791 #ifdef CONFIG_X86_64
798792 /**
799793 * pud_free_pmd_page - Clear pud entry and free pmd page.
....@@ -811,10 +805,7 @@
811805 pte_t *pte;
812806 int i;
813807
814
- if (pud_none(*pud))
815
- return 1;
816
-
817
- pmd = (pmd_t *)pud_page_vaddr(*pud);
808
+ pmd = pud_pgtable(*pud);
818809 pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
819810 if (!pmd_sv)
820811 return 0;
....@@ -856,9 +847,6 @@
856847 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
857848 {
858849 pte_t *pte;
859
-
860
- if (pmd_none(*pmd))
861
- return 1;
862850
863851 pte = (pte_t *)pmd_page_vaddr(*pmd);
864852 pmd_clear(pmd);