| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
|---|
| 2 | + |
|---|
| 1 | 3 | /* |
|---|
| 2 | 4 | * Xen leaves the responsibility for maintaining p2m mappings to the |
|---|
| 3 | 5 | * guests themselves, but it must also access and update the p2m array |
|---|
| .. | .. |
|---|
| 65 | 67 | #include <linux/hash.h> |
|---|
| 66 | 68 | #include <linux/sched.h> |
|---|
| 67 | 69 | #include <linux/seq_file.h> |
|---|
| 68 | | -#include <linux/bootmem.h> |
|---|
| 70 | +#include <linux/memblock.h> |
|---|
| 69 | 71 | #include <linux/slab.h> |
|---|
| 70 | 72 | #include <linux/vmalloc.h> |
|---|
| 71 | 73 | |
|---|
| .. | .. |
|---|
| 96 | 98 | unsigned long xen_max_p2m_pfn __read_mostly; |
|---|
| 97 | 99 | EXPORT_SYMBOL_GPL(xen_max_p2m_pfn); |
|---|
| 98 | 100 | |
|---|
| 99 | | -#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT |
|---|
| 100 | | -#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT |
|---|
| 101 | +#ifdef CONFIG_XEN_MEMORY_HOTPLUG_LIMIT |
|---|
| 102 | +#define P2M_LIMIT CONFIG_XEN_MEMORY_HOTPLUG_LIMIT |
|---|
| 101 | 103 | #else |
|---|
| 102 | 104 | #define P2M_LIMIT 0 |
|---|
| 103 | 105 | #endif |
|---|
| .. | .. |
|---|
| 179 | 181 | |
|---|
| 180 | 182 | static void * __ref alloc_p2m_page(void) |
|---|
| 181 | 183 | { |
|---|
| 182 | | - if (unlikely(!slab_is_available())) |
|---|
| 183 | | - return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE); |
|---|
| 184 | + if (unlikely(!slab_is_available())) { |
|---|
| 185 | + void *ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
|---|
| 186 | + |
|---|
| 187 | + if (!ptr) |
|---|
| 188 | + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", |
|---|
| 189 | + __func__, PAGE_SIZE, PAGE_SIZE); |
|---|
| 190 | + |
|---|
| 191 | + return ptr; |
|---|
| 192 | + } |
|---|
| 184 | 193 | |
|---|
| 185 | 194 | return (void *)__get_free_page(GFP_KERNEL); |
|---|
| 186 | 195 | } |
|---|
| .. | .. |
|---|
| 188 | 197 | static void __ref free_p2m_page(void *p) |
|---|
| 189 | 198 | { |
|---|
| 190 | 199 | if (unlikely(!slab_is_available())) { |
|---|
| 191 | | - free_bootmem((unsigned long)p, PAGE_SIZE); |
|---|
| 200 | + memblock_free((unsigned long)p, PAGE_SIZE); |
|---|
| 192 | 201 | return; |
|---|
| 193 | 202 | } |
|---|
| 194 | 203 | |
|---|
| .. | .. |
|---|
| 370 | 379 | |
|---|
| 371 | 380 | if (type == P2M_TYPE_PFN || i < chunk) { |
|---|
| 372 | 381 | /* Use initial p2m page contents. */ |
|---|
| 373 | | -#ifdef CONFIG_X86_64 |
|---|
| 374 | 382 | mfns = alloc_p2m_page(); |
|---|
| 375 | 383 | copy_page(mfns, xen_p2m_addr + pfn); |
|---|
| 376 | | -#else |
|---|
| 377 | | - mfns = xen_p2m_addr + pfn; |
|---|
| 378 | | -#endif |
|---|
| 379 | 384 | ptep = populate_extra_pte((unsigned long)(p2m + pfn)); |
|---|
| 380 | 385 | set_pte(ptep, |
|---|
| 381 | 386 | pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL)); |
|---|
| .. | .. |
|---|
| 458 | 463 | * Allocate new pmd(s). It is checked whether the old pmd is still in place. |
|---|
| 459 | 464 | * If not, nothing is changed. This is okay as the only reason for allocating |
|---|
| 460 | 465 | * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual |
|---|
| 461 | | - * pmd. In case of PAE/x86-32 there are multiple pmds to allocate! |
|---|
| 466 | + * pmd. |
|---|
| 462 | 467 | */ |
|---|
| 463 | 468 | static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg) |
|---|
| 464 | 469 | { |
|---|
| .. | .. |
|---|
| 647 | 652 | pte_t *ptep; |
|---|
| 648 | 653 | unsigned int level; |
|---|
| 649 | 654 | |
|---|
| 650 | | - if (unlikely(pfn >= xen_p2m_size)) { |
|---|
| 651 | | - BUG_ON(mfn != INVALID_P2M_ENTRY); |
|---|
| 652 | | - return true; |
|---|
| 653 | | - } |
|---|
| 655 | + /* Only invalid entries allowed above the highest p2m covered frame. */ |
|---|
| 656 | + if (unlikely(pfn >= xen_p2m_size)) |
|---|
| 657 | + return mfn == INVALID_P2M_ENTRY; |
|---|
| 654 | 658 | |
|---|
| 655 | 659 | /* |
|---|
| 656 | 660 | * The interface requires atomic updates on p2m elements. |
|---|
| 657 | | - * xen_safe_write_ulong() is using __put_user which does an atomic |
|---|
| 658 | | - * store via asm(). |
|---|
| 661 | + * xen_safe_write_ulong() is using an atomic store via asm(). |
|---|
| 659 | 662 | */ |
|---|
| 660 | 663 | if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn))) |
|---|
| 661 | 664 | return true; |
|---|
| .. | .. |
|---|
| 845 | 848 | static int __init xen_p2m_debugfs(void) |
|---|
| 846 | 849 | { |
|---|
| 847 | 850 | struct dentry *d_xen = xen_init_debugfs(); |
|---|
| 848 | | - |
|---|
| 849 | | - if (d_xen == NULL) |
|---|
| 850 | | - return -ENOMEM; |
|---|
| 851 | 851 | |
|---|
| 852 | 852 | d_mmu_debug = debugfs_create_dir("mmu", d_xen); |
|---|
| 853 | 853 | |
|---|