forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 748e4f3d702def1a4bff191e0cf93b6a05340f01
kernel/arch/x86/xen/p2m.c
....@@ -1,3 +1,5 @@
1
+// SPDX-License-Identifier: GPL-2.0
2
+
13 /*
24 * Xen leaves the responsibility for maintaining p2m mappings to the
35 * guests themselves, but it must also access and update the p2m array
....@@ -65,7 +67,7 @@
6567 #include <linux/hash.h>
6668 #include <linux/sched.h>
6769 #include <linux/seq_file.h>
68
-#include <linux/bootmem.h>
70
+#include <linux/memblock.h>
6971 #include <linux/slab.h>
7072 #include <linux/vmalloc.h>
7173
....@@ -96,8 +98,8 @@
9698 unsigned long xen_max_p2m_pfn __read_mostly;
9799 EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
98100
99
-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
100
-#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
101
+#ifdef CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
102
+#define P2M_LIMIT CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
101103 #else
102104 #define P2M_LIMIT 0
103105 #endif
....@@ -179,8 +181,15 @@
179181
180182 static void * __ref alloc_p2m_page(void)
181183 {
182
- if (unlikely(!slab_is_available()))
183
- return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
184
+ if (unlikely(!slab_is_available())) {
185
+ void *ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
186
+
187
+ if (!ptr)
188
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
189
+ __func__, PAGE_SIZE, PAGE_SIZE);
190
+
191
+ return ptr;
192
+ }
184193
185194 return (void *)__get_free_page(GFP_KERNEL);
186195 }
....@@ -188,7 +197,7 @@
188197 static void __ref free_p2m_page(void *p)
189198 {
190199 if (unlikely(!slab_is_available())) {
191
- free_bootmem((unsigned long)p, PAGE_SIZE);
200
+ memblock_free((unsigned long)p, PAGE_SIZE);
192201 return;
193202 }
194203
....@@ -370,12 +379,8 @@
370379
371380 if (type == P2M_TYPE_PFN || i < chunk) {
372381 /* Use initial p2m page contents. */
373
-#ifdef CONFIG_X86_64
374382 mfns = alloc_p2m_page();
375383 copy_page(mfns, xen_p2m_addr + pfn);
376
-#else
377
- mfns = xen_p2m_addr + pfn;
378
-#endif
379384 ptep = populate_extra_pte((unsigned long)(p2m + pfn));
380385 set_pte(ptep,
381386 pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL));
....@@ -458,7 +463,7 @@
458463 * Allocate new pmd(s). It is checked whether the old pmd is still in place.
459464 * If not, nothing is changed. This is okay as the only reason for allocating
460465 * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
461
- * pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
466
+ * pmd.
462467 */
463468 static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
464469 {
....@@ -647,15 +652,13 @@
647652 pte_t *ptep;
648653 unsigned int level;
649654
650
- if (unlikely(pfn >= xen_p2m_size)) {
651
- BUG_ON(mfn != INVALID_P2M_ENTRY);
652
- return true;
653
- }
655
+ /* Only invalid entries allowed above the highest p2m covered frame. */
656
+ if (unlikely(pfn >= xen_p2m_size))
657
+ return mfn == INVALID_P2M_ENTRY;
654658
655659 /*
656660 * The interface requires atomic updates on p2m elements.
657
- * xen_safe_write_ulong() is using __put_user which does an atomic
658
- * store via asm().
661
+ * xen_safe_write_ulong() is using an atomic store via asm().
659662 */
660663 if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
661664 return true;
....@@ -845,9 +848,6 @@
845848 static int __init xen_p2m_debugfs(void)
846849 {
847850 struct dentry *d_xen = xen_init_debugfs();
848
-
849
- if (d_xen == NULL)
850
- return -ENOMEM;
851851
852852 d_mmu_debug = debugfs_create_dir("mmu", d_xen);
853853