| .. | .. |
|---|
| 3 | 3 | #include <linux/gfp.h> |
|---|
| 4 | 4 | #include <linux/hugetlb.h> |
|---|
| 5 | 5 | #include <asm/pgalloc.h> |
|---|
| 6 | | -#include <asm/pgtable.h> |
|---|
| 7 | 6 | #include <asm/tlb.h> |
|---|
| 8 | 7 | #include <asm/fixmap.h> |
|---|
| 9 | 8 | #include <asm/mtrr.h> |
|---|
| .. | .. |
|---|
| 13 | 12 | EXPORT_SYMBOL(physical_mask); |
|---|
| 14 | 13 | #endif |
|---|
| 15 | 14 | |
|---|
| 16 | | -#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) |
|---|
| 17 | | - |
|---|
| 18 | 15 | #ifdef CONFIG_HIGHPTE |
|---|
| 19 | | -#define PGALLOC_USER_GFP __GFP_HIGHMEM |
|---|
| 16 | +#define PGTABLE_HIGHMEM __GFP_HIGHMEM |
|---|
| 20 | 17 | #else |
|---|
| 21 | | -#define PGALLOC_USER_GFP 0 |
|---|
| 18 | +#define PGTABLE_HIGHMEM 0 |
|---|
| 22 | 19 | #endif |
|---|
| 23 | 20 | |
|---|
| 24 | | -gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP; |
|---|
| 25 | | - |
|---|
| 26 | | -pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
|---|
| 21 | +#ifndef CONFIG_PARAVIRT |
|---|
| 22 | +static inline |
|---|
| 23 | +void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) |
|---|
| 27 | 24 | { |
|---|
| 28 | | - return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT); |
|---|
| 25 | + tlb_remove_page(tlb, table); |
|---|
| 29 | 26 | } |
|---|
| 27 | +#endif |
|---|
| 30 | 28 | |
|---|
| 31 | | -pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) |
|---|
| 29 | +gfp_t __userpte_alloc_gfp = GFP_PGTABLE_USER | PGTABLE_HIGHMEM; |
|---|
| 30 | + |
|---|
| 31 | +pgtable_t pte_alloc_one(struct mm_struct *mm) |
|---|
| 32 | 32 | { |
|---|
| 33 | | - struct page *pte; |
|---|
| 34 | | - |
|---|
| 35 | | - pte = alloc_pages(__userpte_alloc_gfp, 0); |
|---|
| 36 | | - if (!pte) |
|---|
| 37 | | - return NULL; |
|---|
| 38 | | - if (!pgtable_page_ctor(pte)) { |
|---|
| 39 | | - __free_page(pte); |
|---|
| 40 | | - return NULL; |
|---|
| 41 | | - } |
|---|
| 42 | | - return pte; |
|---|
| 33 | + return __pte_alloc_one(mm, __userpte_alloc_gfp); |
|---|
| 43 | 34 | } |
|---|
| 44 | 35 | |
|---|
| 45 | 36 | static int __init setup_userpte(char *arg) |
|---|
| .. | .. |
|---|
| 61 | 52 | |
|---|
| 62 | 53 | void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) |
|---|
| 63 | 54 | { |
|---|
| 64 | | - pgtable_page_dtor(pte); |
|---|
| 55 | + pgtable_pte_page_dtor(pte); |
|---|
| 65 | 56 | paravirt_release_pte(page_to_pfn(pte)); |
|---|
| 66 | 57 | paravirt_tlb_remove_table(tlb, pte); |
|---|
| 67 | 58 | } |
|---|
| .. | .. |
|---|
| 190 | 181 | * when PTI is enabled. We need them to map the per-process LDT into the |
|---|
| 191 | 182 | * user-space page-table. |
|---|
| 192 | 183 | */ |
|---|
| 193 | | -#define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \ |
|---|
| 184 | +#define PREALLOCATED_USER_PMDS (boot_cpu_has(X86_FEATURE_PTI) ? \ |
|---|
| 194 | 185 | KERNEL_PGD_PTRS : 0) |
|---|
| 195 | 186 | #define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS |
|---|
| 196 | 187 | |
|---|
| .. | .. |
|---|
| 235 | 226 | { |
|---|
| 236 | 227 | int i; |
|---|
| 237 | 228 | bool failed = false; |
|---|
| 238 | | - gfp_t gfp = PGALLOC_GFP; |
|---|
| 229 | + gfp_t gfp = GFP_PGTABLE_USER; |
|---|
| 239 | 230 | |
|---|
| 240 | 231 | if (mm == &init_mm) |
|---|
| 241 | 232 | gfp &= ~__GFP_ACCOUNT; |
|---|
| .. | .. |
|---|
| 292 | 283 | |
|---|
| 293 | 284 | #ifdef CONFIG_PAGE_TABLE_ISOLATION |
|---|
| 294 | 285 | |
|---|
| 295 | | - if (!static_cpu_has(X86_FEATURE_PTI)) |
|---|
| 286 | + if (!boot_cpu_has(X86_FEATURE_PTI)) |
|---|
| 296 | 287 | return; |
|---|
| 297 | 288 | |
|---|
| 298 | 289 | pgdp = kernel_to_user_pgdp(pgdp); |
|---|
| .. | .. |
|---|
| 373 | 364 | |
|---|
| 374 | 365 | static struct kmem_cache *pgd_cache; |
|---|
| 375 | 366 | |
|---|
| 376 | | -static int __init pgd_cache_init(void) |
|---|
| 367 | +void __init pgtable_cache_init(void) |
|---|
| 377 | 368 | { |
|---|
| 378 | 369 | /* |
|---|
| 379 | 370 | * When PAE kernel is running as a Xen domain, it does not use |
|---|
| 380 | 371 | * shared kernel pmd. And this requires a whole page for pgd. |
|---|
| 381 | 372 | */ |
|---|
| 382 | 373 | if (!SHARED_KERNEL_PMD) |
|---|
| 383 | | - return 0; |
|---|
| 374 | + return; |
|---|
| 384 | 375 | |
|---|
| 385 | 376 | /* |
|---|
| 386 | 377 | * when PAE kernel is not running as a Xen domain, it uses |
|---|
| .. | .. |
|---|
| 390 | 381 | */ |
|---|
| 391 | 382 | pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN, |
|---|
| 392 | 383 | SLAB_PANIC, NULL); |
|---|
| 393 | | - return 0; |
|---|
| 394 | 384 | } |
|---|
| 395 | | -core_initcall(pgd_cache_init); |
|---|
| 396 | 385 | |
|---|
| 397 | 386 | static inline pgd_t *_pgd_alloc(void) |
|---|
| 398 | 387 | { |
|---|
| .. | .. |
|---|
| 401 | 390 | * We allocate one page for pgd. |
|---|
| 402 | 391 | */ |
|---|
| 403 | 392 | if (!SHARED_KERNEL_PMD) |
|---|
| 404 | | - return (pgd_t *)__get_free_pages(PGALLOC_GFP, |
|---|
| 393 | + return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER, |
|---|
| 405 | 394 | PGD_ALLOCATION_ORDER); |
|---|
| 406 | 395 | |
|---|
| 407 | 396 | /* |
|---|
| 408 | 397 | * Now PAE kernel is not running as a Xen domain. We can allocate |
|---|
| 409 | 398 | * a 32-byte slab for pgd to save memory space. |
|---|
| 410 | 399 | */ |
|---|
| 411 | | - return kmem_cache_alloc(pgd_cache, PGALLOC_GFP); |
|---|
| 400 | + return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER); |
|---|
| 412 | 401 | } |
|---|
| 413 | 402 | |
|---|
| 414 | 403 | static inline void _pgd_free(pgd_t *pgd) |
|---|
| .. | .. |
|---|
| 422 | 411 | |
|---|
| 423 | 412 | static inline pgd_t *_pgd_alloc(void) |
|---|
| 424 | 413 | { |
|---|
| 425 | | - return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER); |
|---|
| 414 | + return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER, |
|---|
| 415 | + PGD_ALLOCATION_ORDER); |
|---|
| 426 | 416 | } |
|---|
| 427 | 417 | |
|---|
| 428 | 418 | static inline void _pgd_free(pgd_t *pgd) |
|---|
| .. | .. |
|---|
| 723 | 713 | if (pud_present(*pud) && !pud_huge(*pud)) |
|---|
| 724 | 714 | return 0; |
|---|
| 725 | 715 | |
|---|
| 726 | | - prot = pgprot_4k_2_large(prot); |
|---|
| 727 | | - |
|---|
| 728 | 716 | set_pte((pte_t *)pud, pfn_pte( |
|---|
| 729 | 717 | (u64)addr >> PAGE_SHIFT, |
|---|
| 730 | | - __pgprot(pgprot_val(prot) | _PAGE_PSE))); |
|---|
| 718 | + __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE))); |
|---|
| 731 | 719 | |
|---|
| 732 | 720 | return 1; |
|---|
| 733 | 721 | } |
|---|
| .. | .. |
|---|
| 755 | 743 | if (pmd_present(*pmd) && !pmd_huge(*pmd)) |
|---|
| 756 | 744 | return 0; |
|---|
| 757 | 745 | |
|---|
| 758 | | - prot = pgprot_4k_2_large(prot); |
|---|
| 759 | | - |
|---|
| 760 | 746 | set_pte((pte_t *)pmd, pfn_pte( |
|---|
| 761 | 747 | (u64)addr >> PAGE_SHIFT, |
|---|
| 762 | | - __pgprot(pgprot_val(prot) | _PAGE_PSE))); |
|---|
| 748 | + __pgprot(protval_4k_2_large(pgprot_val(prot)) | _PAGE_PSE))); |
|---|
| 763 | 749 | |
|---|
| 764 | 750 | return 1; |
|---|
| 765 | 751 | } |
|---|
| .. | .. |
|---|
| 794 | 780 | return 0; |
|---|
| 795 | 781 | } |
|---|
| 796 | 782 | |
|---|
| 783 | +/* |
|---|
| 784 | + * Until we support 512GB pages, skip them in the vmap area. |
|---|
| 785 | + */ |
|---|
| 786 | +int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) |
|---|
| 787 | +{ |
|---|
| 788 | + return 0; |
|---|
| 789 | +} |
|---|
| 790 | + |
|---|
| 797 | 791 | #ifdef CONFIG_X86_64 |
|---|
| 798 | 792 | /** |
|---|
| 799 | 793 | * pud_free_pmd_page - Clear pud entry and free pmd page. |
|---|
| .. | .. |
|---|
| 810 | 804 | pmd_t *pmd, *pmd_sv; |
|---|
| 811 | 805 | pte_t *pte; |
|---|
| 812 | 806 | int i; |
|---|
| 813 | | - |
|---|
| 814 | | - if (pud_none(*pud)) |
|---|
| 815 | | - return 1; |
|---|
| 816 | 807 | |
|---|
| 817 | 808 | pmd = (pmd_t *)pud_page_vaddr(*pud); |
|---|
| 818 | 809 | pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL); |
|---|
| .. | .. |
|---|
| 856 | 847 | int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) |
|---|
| 857 | 848 | { |
|---|
| 858 | 849 | pte_t *pte; |
|---|
| 859 | | - |
|---|
| 860 | | - if (pmd_none(*pmd)) |
|---|
| 861 | | - return 1; |
|---|
| 862 | 850 | |
|---|
| 863 | 851 | pte = (pte_t *)pmd_page_vaddr(*pmd); |
|---|
| 864 | 852 | pmd_clear(pmd); |
|---|