From 61598093bbdd283a7edc367d900f223070ead8d2 Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Fri, 10 May 2024 07:43:03 +0000 Subject: [PATCH] add ax88772C AX88772C_eeprom_tools --- kernel/kernel/kexec_file.c | 282 ++++++++++++++++++++++++++++++++++++++----------------- 1 files changed, 194 insertions(+), 88 deletions(-) diff --git a/kernel/kernel/kexec_file.c b/kernel/kernel/kexec_file.c index 89d41c0..804dc50 100644 --- a/kernel/kernel/kexec_file.c +++ b/kernel/kernel/kexec_file.c @@ -1,12 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * kexec: kexec_file_load system call * * Copyright (C) 2014 Red Hat Inc. * Authors: * Vivek Goyal <vgoyal@redhat.com> - * - * This source code is licensed under the GNU General Public License, - * Version 2. See the file COPYING for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -16,6 +14,7 @@ #include <linux/file.h> #include <linux/slab.h> #include <linux/kexec.h> +#include <linux/memblock.h> #include <linux/mutex.h> #include <linux/list.h> #include <linux/fs.h> @@ -25,11 +24,19 @@ #include <linux/elf.h> #include <linux/elfcore.h> #include <linux/kernel.h> -#include <linux/kexec.h> -#include <linux/slab.h> +#include <linux/kernel_read_file.h> #include <linux/syscalls.h> #include <linux/vmalloc.h> #include "kexec_internal.h" + +#ifdef CONFIG_KEXEC_SIG +static bool sig_enforce = IS_ENABLED(CONFIG_KEXEC_SIG_FORCE); + +void set_kexec_sig_enforced(void) +{ + sig_enforce = true; +} +#endif static int kexec_calculate_store_digests(struct kimage *image); @@ -78,7 +85,7 @@ return kexec_image_load_default(image); } -static int kexec_image_post_load_cleanup_default(struct kimage *image) +int kexec_image_post_load_cleanup_default(struct kimage *image) { if (!image->fops || !image->fops->cleanup) return 0; @@ -91,7 +98,7 @@ return kexec_image_post_load_cleanup_default(image); } -#ifdef CONFIG_KEXEC_VERIFY_SIG +#ifdef CONFIG_KEXEC_SIG static int kexec_image_verify_sig_default(struct kimage *image, void *buf, unsigned long buf_len) { @@ -109,40 +116,6 @@ return kexec_image_verify_sig_default(image, buf, buf_len); } #endif - -/* - * arch_kexec_apply_relocations_add - apply relocations of type RELA - * @pi: Purgatory to be relocated. - * @section: Section relocations applying to. - * @relsec: Section containing RELAs. - * @symtab: Corresponding symtab. - * - * Return: 0 on success, negative errno on error. - */ -int __weak -arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section, - const Elf_Shdr *relsec, const Elf_Shdr *symtab) -{ - pr_err("RELA relocation unsupported.\n"); - return -ENOEXEC; -} - -/* - * arch_kexec_apply_relocations - apply relocations of type REL - * @pi: Purgatory to be relocated. - * @section: Section relocations applying to. - * @relsec: Section containing RELs. - * @symtab: Corresponding symtab. - * - * Return: 0 on success, negative errno on error. - */ -int __weak -arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section, - const Elf_Shdr *relsec, const Elf_Shdr *symtab) -{ - pr_err("REL relocation unsupported.\n"); - return -ENOEXEC; -} /* * Free up memory used by kernel, initrd, and command line. This is temporary @@ -185,6 +158,37 @@ image->image_loader_data = NULL; } +#ifdef CONFIG_KEXEC_SIG +static int +kimage_validate_signature(struct kimage *image) +{ + int ret; + + ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf, + image->kernel_buf_len); + if (ret) { + + if (sig_enforce) { + pr_notice("Enforced kernel signature verification failed (%d).\n", ret); + return ret; + } + + /* + * If IMA is guaranteed to appraise a signature on the kexec + * image, permit it even if the kernel is otherwise locked + * down. + */ + if (!ima_appraise_signature(READING_KEXEC_IMAGE) && + security_locked_down(LOCKDOWN_KEXEC)) + return -EPERM; + + pr_debug("kernel signature verification failed (%d).\n", ret); + } + + return 0; +} +#endif + /* * In file mode list of segments is prepared by kernel. Copy relevant * data from user space, do error checking, prepare segment list @@ -194,18 +198,14 @@ const char __user *cmdline_ptr, unsigned long cmdline_len, unsigned flags) { - int ret = 0; + int ret; void *ldata; - loff_t size; - ret = kernel_read_file_from_fd(kernel_fd, &image->kernel_buf, - &size, INT_MAX, READING_KEXEC_IMAGE); - if (ret) + ret = kernel_read_file_from_fd(kernel_fd, 0, &image->kernel_buf, + INT_MAX, NULL, READING_KEXEC_IMAGE); + if (ret < 0) return ret; - image->kernel_buf_len = size; - - /* IMA needs to pass the measurement list to the next kernel. */ - ima_add_kexec_buffer(image); + image->kernel_buf_len = ret; /* Call arch image probe handlers */ ret = arch_kexec_kernel_image_probe(image, image->kernel_buf, @@ -213,23 +213,21 @@ if (ret) goto out; -#ifdef CONFIG_KEXEC_VERIFY_SIG - ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf, - image->kernel_buf_len); - if (ret) { - pr_debug("kernel signature verification failed.\n"); +#ifdef CONFIG_KEXEC_SIG + ret = kimage_validate_signature(image); + + if (ret) goto out; - } - pr_debug("kernel signature verification successful.\n"); #endif /* It is possible that there no initramfs is being loaded */ if (!(flags & KEXEC_FILE_NO_INITRAMFS)) { - ret = kernel_read_file_from_fd(initrd_fd, &image->initrd_buf, - &size, INT_MAX, + ret = kernel_read_file_from_fd(initrd_fd, 0, &image->initrd_buf, + INT_MAX, NULL, READING_KEXEC_INITRAMFS); - if (ret) + if (ret < 0) goto out; - image->initrd_buf_len = size; + image->initrd_buf_len = ret; + ret = 0; } if (cmdline_len) { @@ -247,7 +245,13 @@ ret = -EINVAL; goto out; } + + ima_kexec_cmdline(kernel_fd, image->cmdline_buf, + image->cmdline_buf_len - 1); } + + /* IMA needs to pass the measurement list to the next kernel. */ + ima_add_kexec_buffer(image); /* Call arch image load handlers */ ldata = arch_kexec_kernel_image_load(image); @@ -339,7 +343,7 @@ image = NULL; - if (!mutex_trylock(&kexec_mutex)) + if (!kexec_trylock()) return -EBUSY; dest_image = &kexec_image; @@ -396,6 +400,10 @@ kimage_terminate(image); + ret = machine_kexec_post_load(image); + if (ret) + goto out; + /* * Free up any temporary buffers allocated which are not needed * after image has been loaded @@ -407,7 +415,7 @@ if ((flags & KEXEC_FILE_ON_CRASH) && kexec_crash_image) arch_kexec_protect_crashkres(); - mutex_unlock(&kexec_mutex); + kexec_unlock(); kimage_free(image); return ret; } @@ -491,6 +499,11 @@ unsigned long sz = end - start + 1; /* Returning 0 will take to next memory range */ + + /* Don't use memory that will be detected and handled by a driver. */ + if (res->flags & IORESOURCE_SYSRAM_DRIVER_MANAGED) + return 0; + if (sz < kbuf->memsz) return 0; @@ -506,8 +519,60 @@ return locate_mem_hole_bottom_up(start, end, kbuf); } +#ifdef CONFIG_ARCH_KEEP_MEMBLOCK +static int kexec_walk_memblock(struct kexec_buf *kbuf, + int (*func)(struct resource *, void *)) +{ + int ret = 0; + u64 i; + phys_addr_t mstart, mend; + struct resource res = { }; + + if (kbuf->image->type == KEXEC_TYPE_CRASH) + return func(&crashk_res, kbuf); + + if (kbuf->top_down) { + for_each_free_mem_range_reverse(i, NUMA_NO_NODE, MEMBLOCK_NONE, + &mstart, &mend, NULL) { + /* + * In memblock, end points to the first byte after the + * range while in kexec, end points to the last byte + * in the range. + */ + res.start = mstart; + res.end = mend - 1; + ret = func(&res, kbuf); + if (ret) + break; + } + } else { + for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, + &mstart, &mend, NULL) { + /* + * In memblock, end points to the first byte after the + * range while in kexec, end points to the last byte + * in the range. + */ + res.start = mstart; + res.end = mend - 1; + ret = func(&res, kbuf); + if (ret) + break; + } + } + + return ret; +} +#else +static int kexec_walk_memblock(struct kexec_buf *kbuf, + int (*func)(struct resource *, void *)) +{ + return 0; +} +#endif + /** - * arch_kexec_walk_mem - call func(data) on free memory regions + * kexec_walk_resources - call func(data) on free memory regions * @kbuf: Context info for the search. Also passed to @func. * @func: Function to call for each memory region. * @@ -515,8 +580,8 @@ * and that value will be returned. If all free regions are visited without * func returning non-zero, then zero will be returned. */ -int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf, - int (*func)(struct resource *, void *)) +static int kexec_walk_resources(struct kexec_buf *kbuf, + int (*func)(struct resource *, void *)) { if (kbuf->image->type == KEXEC_TYPE_CRASH) return walk_iomem_res_desc(crashk_res.desc, @@ -539,9 +604,29 @@ { int ret; - ret = arch_kexec_walk_mem(kbuf, locate_mem_hole_callback); + /* Arch knows where to place */ + if (kbuf->mem != KEXEC_BUF_MEM_UNKNOWN) + return 0; + + if (!IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) + ret = kexec_walk_resources(kbuf, locate_mem_hole_callback); + else + ret = kexec_walk_memblock(kbuf, locate_mem_hole_callback); return ret == 1 ? 0 : -EADDRNOTAVAIL; +} + +/** + * arch_kexec_locate_mem_hole - Find free memory to place the segments. + * @kbuf: Parameters for the memory search. + * + * On success, kbuf->mem will have the start address of the memory region found. + * + * Return: 0 on success, negative errno on error. + */ +int __weak arch_kexec_locate_mem_hole(struct kexec_buf *kbuf) +{ + return kexec_locate_mem_hole(kbuf); } /** @@ -556,7 +641,6 @@ */ int kexec_add_buffer(struct kexec_buf *kbuf) { - struct kexec_segment *ksegment; int ret; @@ -584,7 +668,7 @@ kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE); /* Walk the RAM ranges and allocate a suitable range for the buffer */ - ret = kexec_locate_mem_hole(kbuf); + ret = arch_kexec_locate_mem_hole(kbuf); if (ret) return ret; @@ -637,7 +721,6 @@ } desc->tfm = tfm; - desc->flags = 0; ret = crypto_shash_init(desc); if (ret < 0) @@ -827,10 +910,22 @@ } offset = ALIGN(offset, align); + + /* + * Check if the segment contains the entry point, if so, + * calculate the value of image->start based on it. + * If the compiler has produced more than one .text section + * (Eg: .text.hot), they are generally after the main .text + * section, and they shall not be used to calculate + * image->start. So do not re-calculate image->start if it + * is not set to the initial value, and warn the user so they + * have a chance to fix their purgatory's linker script. + */ if (sechdrs[i].sh_flags & SHF_EXECINSTR && pi->ehdr->e_entry >= sechdrs[i].sh_addr && pi->ehdr->e_entry < (sechdrs[i].sh_addr - + sechdrs[i].sh_size)) { + + sechdrs[i].sh_size) && + !WARN_ON(kbuf->image->start != pi->ehdr->e_entry)) { kbuf->image->start -= sechdrs[i].sh_addr; kbuf->image->start += kbuf->mem + offset; } @@ -1069,24 +1164,26 @@ unsigned long long mstart, unsigned long long mend) { int i, j; - unsigned long long start, end; + unsigned long long start, end, p_start, p_end; struct crash_mem_range temp_range = {0, 0}; for (i = 0; i < mem->nr_ranges; i++) { start = mem->ranges[i].start; end = mem->ranges[i].end; + p_start = mstart; + p_end = mend; if (mstart > end || mend < start) continue; /* Truncate any area outside of range */ if (mstart < start) - mstart = start; + p_start = start; if (mend > end) - mend = end; + p_end = end; /* Found completely overlapping range */ - if (mstart == start && mend == end) { + if (p_start == start && p_end == end) { mem->ranges[i].start = 0; mem->ranges[i].end = 0; if (i < mem->nr_ranges - 1) { @@ -1097,20 +1194,29 @@ mem->ranges[j].end = mem->ranges[j+1].end; } + + /* + * Continue to check if there are another overlapping ranges + * from the current position because of shifting the above + * mem ranges. + */ + i--; + mem->nr_ranges--; + continue; } mem->nr_ranges--; return 0; } - if (mstart > start && mend < end) { + if (p_start > start && p_end < end) { /* Split original range */ - mem->ranges[i].end = mstart - 1; - temp_range.start = mend + 1; + mem->ranges[i].end = p_start - 1; + temp_range.start = p_end + 1; temp_range.end = end; - } else if (mstart != start) - mem->ranges[i].end = mstart - 1; + } else if (p_start != start) + mem->ranges[i].end = p_start - 1; else - mem->ranges[i].start = mend + 1; + mem->ranges[i].start = p_end + 1; break; } @@ -1147,7 +1253,7 @@ unsigned long long notes_addr; unsigned long mstart, mend; - /* extra phdr for vmcoreinfo elf note */ + /* extra phdr for vmcoreinfo ELF note */ nr_phdr = nr_cpus + 1; nr_phdr += mem->nr_ranges; @@ -1155,7 +1261,7 @@ * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping * area (for example, ffffffff80000000 - ffffffffa0000000 on x86_64). * I think this is required by tools like gdb. So same physical - * memory will be mapped in two elf headers. One will contain kernel + * memory will be mapped in two ELF headers. One will contain kernel * text virtual addresses and other will have __va(physical) addresses. */ @@ -1182,7 +1288,7 @@ ehdr->e_ehsize = sizeof(Elf64_Ehdr); ehdr->e_phentsize = sizeof(Elf64_Phdr); - /* Prepare one phdr of type PT_NOTE for each present cpu */ + /* Prepare one phdr of type PT_NOTE for each present CPU */ for_each_present_cpu(cpu) { phdr->p_type = PT_NOTE; notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu)); @@ -1203,7 +1309,7 @@ if (kernel_map) { phdr->p_type = PT_LOAD; phdr->p_flags = PF_R|PF_W|PF_X; - phdr->p_vaddr = (Elf64_Addr)_text; + phdr->p_vaddr = (unsigned long) _text; phdr->p_filesz = phdr->p_memsz = _end - _text; phdr->p_offset = phdr->p_paddr = __pa_symbol(_text); ehdr->e_phnum++; @@ -1220,14 +1326,14 @@ phdr->p_offset = mstart; phdr->p_paddr = mstart; - phdr->p_vaddr = (unsigned long long) __va(mstart); + phdr->p_vaddr = (unsigned long) __va(mstart); phdr->p_filesz = phdr->p_memsz = mend - mstart + 1; phdr->p_align = 0; ehdr->e_phnum++; - phdr++; - pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n", + pr_debug("Crash PT_LOAD ELF header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n", phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz, ehdr->e_phnum, phdr->p_offset); + phdr++; } *addr = buf; -- Gitblit v1.6.2