From 01573e231f18eb2d99162747186f59511f56b64d Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Fri, 08 Dec 2023 10:40:48 +0000 Subject: [PATCH] 移去rt --- kernel/arch/x86/xen/setup.c | 78 +++++++++++++-------------------------- 1 files changed, 26 insertions(+), 52 deletions(-) diff --git a/kernel/arch/x86/xen/setup.c b/kernel/arch/x86/xen/setup.c index 075ed47..1f80dd3 100644 --- a/kernel/arch/x86/xen/setup.c +++ b/kernel/arch/x86/xen/setup.c @@ -12,6 +12,7 @@ #include <linux/memblock.h> #include <linux/cpuidle.h> #include <linux/cpufreq.h> +#include <linux/memory_hotplug.h> #include <asm/elf.h> #include <asm/vdso.h> @@ -19,6 +20,7 @@ #include <asm/setup.h> #include <asm/acpi.h> #include <asm/numa.h> +#include <asm/idtentry.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> @@ -30,7 +32,6 @@ #include <xen/features.h> #include <xen/hvc-console.h> #include "xen-ops.h" -#include "vdso.h" #include "mmu.h" #define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024) @@ -58,13 +59,13 @@ } xen_remap_buf __initdata __aligned(PAGE_SIZE); static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY; -/* +/* * The maximum amount of extra memory compared to the base size. The * main scaling factor is the size of struct page. At extreme ratios * of base:extra, all the base memory can be filled with page * structures for the extra memory, leaving no space for anything * else. - * + * * 10x seems like a reasonable balance between scaling flexibility and * leaving a practically usable system. */ @@ -411,7 +412,7 @@ remap_range_size = xen_find_pfn_range(&remap_pfn); if (!remap_range_size) { - pr_warning("Unable to find available pfn range, not remapping identity pages\n"); + pr_warn("Unable to find available pfn range, not remapping identity pages\n"); xen_set_identity_and_release_chunk(cur_pfn, cur_pfn + left, nr_pages); break; @@ -493,7 +494,7 @@ * The remap information (which mfn remap to which pfn) is contained in the * to be remapped memory itself in a linked list anchored at xen_remap_mfn. * This scheme allows to remap the different chunks in arbitrary order while - * the resulting mapping will be independant from the order. + * the resulting mapping will be independent from the order. */ void __init xen_remap_memory(void) { @@ -543,13 +544,10 @@ { unsigned long limit; -#ifdef CONFIG_X86_32 - limit = GB(64) / PAGE_SIZE; -#else limit = MAXMEM / PAGE_SIZE; if (!xen_initial_domain() && xen_512gb_limit) limit = GB(512) / PAGE_SIZE; -#endif + return limit; } @@ -589,6 +587,14 @@ if (type == E820_TYPE_RAM) { start = PAGE_ALIGN(start); end &= ~((phys_addr_t)PAGE_SIZE - 1); +#ifdef CONFIG_MEMORY_HOTPLUG + /* + * Don't allow adding memory not in E820 map while booting the + * system. Once the balloon driver is up it will remove that + * restriction again. + */ + max_mem_size = end; +#endif } e820__range_add(start, end - start, type); @@ -712,17 +718,8 @@ if (!xen_is_e820_reserved(start, size)) return; -#ifdef CONFIG_X86_32 - /* - * Relocating the p2m on 32 bit system to an arbitrary virtual address - * is not supported, so just give up. - */ - xen_raw_console_write("Xen hypervisor allocated p2m list conflicts with E820 map\n"); - BUG(); -#else xen_relocate_p2m(); memblock_free(start, size); -#endif } /** @@ -747,6 +744,10 @@ memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries); set_xen_guest_handle(memmap.buffer, xen_e820_table.entries); + +#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON) + xen_saved_max_mem_size = max_mem_size; +#endif op = xen_initial_domain() ? XENMEM_machine_memory_map : @@ -790,17 +791,10 @@ /* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO - * factor the base size. On non-highmem systems, the base - * size is the full initial memory allocation; on highmem it - * is limited to the max size of lowmem, so that it doesn't - * get completely filled. + * factor the base size. * * Make sure we have no memory above max_pages, as this area * isn't handled by the p2m management. - * - * In principle there could be a problem in lowmem systems if - * the initial memory is also very large with respect to - * lowmem, but we won't try to deal with that here. */ extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), extra_pages, max_pages - max_pfn); @@ -907,20 +901,6 @@ return "Xen"; } -/* - * Set the bit indicating "nosegneg" library variants should be used. - * We only need to bother in pure 32-bit mode; compat 32-bit processes - * can have un-truncated segments, so wrapping around is allowed. - */ -static void __init fiddle_vdso(void) -{ -#ifdef CONFIG_X86_32 - u32 *mask = vdso_image_32.data + - vdso_image_32.sym_VDSO32_NOTE_MASK; - *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT; -#endif -} - static int register_callback(unsigned type, const void *func) { struct callback_register callback = { @@ -937,26 +917,21 @@ int ret; unsigned sysenter_feature; -#ifdef CONFIG_X86_32 - sysenter_feature = X86_FEATURE_SEP; -#else sysenter_feature = X86_FEATURE_SYSENTER32; -#endif if (!boot_cpu_has(sysenter_feature)) return; - ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target); + ret = register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat); if(ret != 0) setup_clear_cpu_cap(sysenter_feature); } void xen_enable_syscall(void) { -#ifdef CONFIG_X86_64 int ret; - ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target); + ret = register_callback(CALLBACKTYPE_syscall, xen_entry_SYSCALL_64); if (ret != 0) { printk(KERN_ERR "Failed to set syscall callback: %d\n", ret); /* Pretty fatal; 64-bit userspace has no other @@ -965,14 +940,13 @@ if (boot_cpu_has(X86_FEATURE_SYSCALL32)) { ret = register_callback(CALLBACKTYPE_syscall32, - xen_syscall32_target); + xen_entry_SYSCALL_compat); if (ret != 0) setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); } -#endif /* CONFIG_X86_64 */ } -void __init xen_pvmmu_arch_setup(void) +static void __init xen_pvmmu_arch_setup(void) { HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments); HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables); @@ -980,7 +954,8 @@ HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_pae_extended_cr3); - if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) || + if (register_callback(CALLBACKTYPE_event, + xen_asm_exc_xen_hypervisor_callback) || register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback)) BUG(); @@ -1009,7 +984,6 @@ disable_cpuidle(); disable_cpufreq(); WARN_ON(xen_set_default_idle()); - fiddle_vdso(); #ifdef CONFIG_NUMA numa_off = 1; #endif -- Gitblit v1.6.2