From 102a0743326a03cd1a1202ceda21e175b7d3575c Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Tue, 20 Feb 2024 01:20:52 +0000 Subject: [PATCH] add new system file --- kernel/kernel/module.c | 918 +++++++++++++++++++++++++++++++++++++++----------------- 1 files changed, 637 insertions(+), 281 deletions(-) diff --git a/kernel/kernel/module.c b/kernel/kernel/module.c index c273377..93fade9 100644 --- a/kernel/kernel/module.c +++ b/kernel/kernel/module.c @@ -1,24 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2002 Richard Henderson Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#define INCLUDE_VERMAGIC + #include <linux/export.h> #include <linux/extable.h> #include <linux/moduleloader.h> +#include <linux/module_signature.h> #include <linux/trace_events.h> #include <linux/init.h> #include <linux/kallsyms.h> @@ -26,6 +18,7 @@ #include <linux/fs.h> #include <linux/sysfs.h> #include <linux/kernel.h> +#include <linux/kernel_read_file.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/elf.h> @@ -70,15 +63,24 @@ #define CREATE_TRACE_POINTS #include <trace/events/module.h> +#undef CREATE_TRACE_POINTS +#include <trace/hooks/module.h> +#include <trace/hooks/memory.h> + #ifndef ARCH_SHF_SMALL #define ARCH_SHF_SMALL 0 #endif /* * Modules' sections will be aligned on page boundaries - * to ensure complete separation of code and data + * to ensure complete separation of code and data, but + * only when CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y */ +#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX # define debug_align(X) ALIGN(X, PAGE_SIZE) +#else +# define debug_align(X) (X) +#endif /* If this is set, the section belongs in the init part of the module */ #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) @@ -90,8 +92,12 @@ * 3) module_addr_min/module_addr_max. * (delete and add uses RCU list operations). */ DEFINE_MUTEX(module_mutex); -EXPORT_SYMBOL_GPL(module_mutex); static LIST_HEAD(modules); + +/* Work queue for freeing init sections in success case */ +static void do_free_init(struct work_struct *w); +static DECLARE_WORK(init_free_wq, do_free_init); +static LLIST_HEAD(init_free_list); #ifdef CONFIG_MODULES_TREE_LOOKUP @@ -216,7 +222,8 @@ { struct module *mod; - list_for_each_entry_rcu(mod, &modules, list) { + list_for_each_entry_rcu(mod, &modules, list, + lockdep_is_held(&module_mutex)) { if (within_module(addr, mod)) return mod; } @@ -454,7 +461,8 @@ if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data)) return true; - list_for_each_entry_rcu(mod, &modules, list) { + list_for_each_entry_rcu(mod, &modules, list, + lockdep_is_held(&module_mutex)) { struct symsearch arr[] = { { mod->syms, mod->syms + mod->num_syms, mod->crcs, NOT_GPL_ONLY, false }, @@ -499,9 +507,9 @@ enum mod_license license; }; -static bool check_symbol(const struct symsearch *syms, - struct module *owner, - unsigned int symnum, void *data) +static bool check_exported_symbol(const struct symsearch *syms, + struct module *owner, + unsigned int symnum, void *data) { struct find_symbol_arg *fsa = data; @@ -552,17 +560,25 @@ #endif } -static int cmp_name(const void *va, const void *vb) +static const char *kernel_symbol_namespace(const struct kernel_symbol *sym) { - const char *a; - const struct kernel_symbol *b; - a = va; b = vb; - return strcmp(a, kernel_symbol_name(b)); +#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS + if (!sym->namespace_offset) + return NULL; + return offset_to_ptr(&sym->namespace_offset); +#else + return sym->namespace; +#endif } -static bool find_symbol_in_section(const struct symsearch *syms, - struct module *owner, - void *data) +static int cmp_name(const void *name, const void *sym) +{ + return strcmp(name, kernel_symbol_name(sym)); +} + +static bool find_exported_symbol_in_section(const struct symsearch *syms, + struct module *owner, + void *data) { struct find_symbol_arg *fsa = data; struct kernel_symbol *sym; @@ -570,13 +586,14 @@ sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, sizeof(struct kernel_symbol), cmp_name); - if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data)) + if (sym != NULL && check_exported_symbol(syms, owner, + sym - syms->start, data)) return true; return false; } -/* Find a symbol and return it, along with, (optional) crc and +/* Find an exported symbol and return it, along with, (optional) crc and * (optional) module which owns it. Needs preempt disabled or module_mutex. */ static const struct kernel_symbol *find_symbol(const char *name, struct module **owner, @@ -591,7 +608,7 @@ fsa.gplok = gplok; fsa.warn = warn; - if (each_symbol_section(find_symbol_in_section, &fsa)) { + if (each_symbol_section(find_exported_symbol_in_section, &fsa)) { if (owner) *owner = fsa.owner; if (crc) @@ -616,7 +633,8 @@ module_assert_mutex_or_preempt(); - list_for_each_entry_rcu(mod, &modules, list) { + list_for_each_entry_rcu(mod, &modules, list, + lockdep_is_held(&module_mutex)) { if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) continue; if (strlen(mod->name) == len && !memcmp(mod->name, name, len)) @@ -630,7 +648,6 @@ module_assert_mutex(); return find_module_all(name, strlen(name), false); } -EXPORT_SYMBOL_GPL(find_module); #ifdef CONFIG_SMP @@ -796,6 +813,7 @@ MODINFO_ATTR(version); MODINFO_ATTR(srcversion); +MODINFO_ATTR(scmversion); static char last_unloaded_module[MODULE_NAME_LEN+1]; @@ -1258,6 +1276,7 @@ &module_uevent, &modinfo_version, &modinfo_srcversion, + &modinfo_scmversion, &modinfo_initstate, &modinfo_coresize, &modinfo_initsize, @@ -1388,6 +1407,59 @@ } #endif /* CONFIG_MODVERSIONS */ +static char *get_modinfo(const struct load_info *info, const char *tag); +static char *get_next_modinfo(const struct load_info *info, const char *tag, + char *prev); + +static int verify_namespace_is_imported(const struct load_info *info, + const struct kernel_symbol *sym, + struct module *mod) +{ + const char *namespace; + char *imported_namespace; + + namespace = kernel_symbol_namespace(sym); + if (namespace && namespace[0]) { + imported_namespace = get_modinfo(info, "import_ns"); + while (imported_namespace) { + if (strcmp(namespace, imported_namespace) == 0) + return 0; + imported_namespace = get_next_modinfo( + info, "import_ns", imported_namespace); + } +#ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS + pr_warn( +#else + pr_err( +#endif + "%s: module uses symbol (%s) from namespace %s, but does not import it.\n", + mod->name, kernel_symbol_name(sym), namespace); +#ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS + return -EINVAL; +#endif + } + return 0; +} + +static bool inherit_taint(struct module *mod, struct module *owner) +{ + if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints)) + return true; + + if (mod->using_gplonly_symbols) { + pr_err("%s: module using GPL-only symbols uses symbols from proprietary module %s.\n", + mod->name, owner->name); + return false; + } + + if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) { + pr_warn("%s: module uses symbols from proprietary module %s, inheriting taint.\n", + mod->name, owner->name); + set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints); + } + return true; +} + /* Resolve a symbol for this module. I.e. if we find one, record usage. */ static const struct kernel_symbol *resolve_symbol(struct module *mod, const struct load_info *info, @@ -1412,8 +1484,22 @@ if (!sym) goto unlock; + if (license == GPL_ONLY) + mod->using_gplonly_symbols = true; + + if (!inherit_taint(mod, owner)) { + sym = NULL; + goto getname; + } + if (!check_version(info, name, mod, crc)) { sym = ERR_PTR(-EINVAL); + goto getname; + } + + err = verify_namespace_is_imported(info, sym, mod); + if (err) { + sym = ERR_PTR(err); goto getname; } @@ -1469,7 +1555,7 @@ struct module_sect_attrs { struct attribute_group grp; unsigned int nsections; - struct module_sect_attr attrs[0]; + struct module_sect_attr attrs[]; }; #define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4)) @@ -1582,7 +1668,7 @@ struct module_notes_attrs { struct kobject *dir; unsigned int notes; - struct bin_attribute attrs[0]; + struct bin_attribute attrs[]; }; static ssize_t module_notes_read(struct file *filp, struct kobject *kobj, @@ -1915,7 +2001,6 @@ mod_sysfs_fini(mod); } -#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX /* * LKM RO/NX protection: protect module's text/ro-data * from modification and any data from execution. @@ -1929,6 +2014,14 @@ * * These values are always page-aligned (as is base) */ + +/* + * Since some arches are moving towards PAGE_KERNEL module allocations instead + * of PAGE_KERNEL_EXEC, keep frob_text() and module_enable_x() outside of the + * CONFIG_STRICT_MODULE_RWX block below because they are needed regardless of + * whether we are strict. + */ +#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX static void frob_text(const struct module_layout *layout, int (*set_memory)(unsigned long start, int num_pages)) { @@ -1937,6 +2030,15 @@ set_memory((unsigned long)layout->base, layout->text_size >> PAGE_SHIFT); } + +static void module_enable_x(const struct module *mod) +{ + frob_text(&mod->core_layout, set_memory_x); + frob_text(&mod->init_layout, set_memory_x); +} +#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */ +static void module_enable_x(const struct module *mod) { } +#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */ #ifdef CONFIG_STRICT_MODULE_RWX static void frob_rodata(const struct module_layout *layout, @@ -1969,24 +2071,13 @@ (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT); } -/* livepatching wants to disable read-only so it can frob module. */ -void module_disable_ro(const struct module *mod) +static void module_enable_ro(const struct module *mod, bool after_init) { if (!rodata_enabled) return; - frob_text(&mod->core_layout, set_memory_rw); - frob_rodata(&mod->core_layout, set_memory_rw); - frob_ro_after_init(&mod->core_layout, set_memory_rw); - frob_text(&mod->init_layout, set_memory_rw); - frob_rodata(&mod->init_layout, set_memory_rw); -} - -void module_enable_ro(const struct module *mod, bool after_init) -{ - if (!rodata_enabled) - return; - + set_vm_flush_reset_perms(mod->core_layout.base); + set_vm_flush_reset_perms(mod->init_layout.base); frob_text(&mod->core_layout, set_memory_ro); frob_rodata(&mod->core_layout, set_memory_ro); @@ -2006,88 +2097,32 @@ frob_writable_data(&mod->init_layout, set_memory_nx); } -static void module_disable_nx(const struct module *mod) +static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + char *secstrings, struct module *mod) { - frob_rodata(&mod->core_layout, set_memory_x); - frob_ro_after_init(&mod->core_layout, set_memory_x); - frob_writable_data(&mod->core_layout, set_memory_x); - frob_rodata(&mod->init_layout, set_memory_x); - frob_writable_data(&mod->init_layout, set_memory_x); -} + const unsigned long shf_wx = SHF_WRITE|SHF_EXECINSTR; + int i; -/* Iterate through all modules and set each module's text as RW */ -void set_all_modules_text_rw(void) -{ - struct module *mod; - - if (!rodata_enabled) - return; - - mutex_lock(&module_mutex); - list_for_each_entry_rcu(mod, &modules, list) { - if (mod->state == MODULE_STATE_UNFORMED) - continue; - - frob_text(&mod->core_layout, set_memory_rw); - frob_text(&mod->init_layout, set_memory_rw); + for (i = 0; i < hdr->e_shnum; i++) { + if ((sechdrs[i].sh_flags & shf_wx) == shf_wx) { + pr_err("%s: section %s (index %d) has invalid WRITE|EXEC flags\n", + mod->name, secstrings + sechdrs[i].sh_name, i); + return -ENOEXEC; + } } - mutex_unlock(&module_mutex); -} -/* Iterate through all modules and set each module's text as RO */ -void set_all_modules_text_ro(void) -{ - struct module *mod; - - if (!rodata_enabled) - return; - - mutex_lock(&module_mutex); - list_for_each_entry_rcu(mod, &modules, list) { - /* - * Ignore going modules since it's possible that ro - * protection has already been disabled, otherwise we'll - * run into protection faults at module deallocation. - */ - if (mod->state == MODULE_STATE_UNFORMED || - mod->state == MODULE_STATE_GOING) - continue; - - frob_text(&mod->core_layout, set_memory_ro); - frob_text(&mod->init_layout, set_memory_ro); - } - mutex_unlock(&module_mutex); -} - -static void disable_ro_nx(const struct module_layout *layout) -{ - if (rodata_enabled) { - frob_text(layout, set_memory_rw); - frob_rodata(layout, set_memory_rw); - frob_ro_after_init(layout, set_memory_rw); - } - frob_rodata(layout, set_memory_x); - frob_ro_after_init(layout, set_memory_x); - frob_writable_data(layout, set_memory_x); + return 0; } #else /* !CONFIG_STRICT_MODULE_RWX */ -static void disable_ro_nx(const struct module_layout *layout) { } static void module_enable_nx(const struct module *mod) { } -static void module_disable_nx(const struct module *mod) { } -#endif /* CONFIG_STRICT_MODULE_RWX */ - -static void module_enable_x(const struct module *mod) +static void module_enable_ro(const struct module *mod, bool after_init) {} +static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, + char *secstrings, struct module *mod) { - frob_text(&mod->core_layout, set_memory_x); - frob_text(&mod->init_layout, set_memory_x); + return 0; } -#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */ -static void disable_ro_nx(const struct module_layout *layout) { } -static void module_enable_nx(const struct module *mod) { } -static void module_disable_nx(const struct module *mod) { } -static void module_enable_x(const struct module *mod) { } -#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */ +#endif /* CONFIG_STRICT_MODULE_RWX */ #ifdef CONFIG_LIVEPATCH /* @@ -2166,6 +2201,11 @@ void __weak module_memfree(void *module_region) { + /* + * This memory may be RO, and freeing RO memory in an interrupt is not + * supported by vmalloc. + */ + WARN_ON(in_interrupt()); vfree(module_region); } @@ -2215,16 +2255,18 @@ /* Remove this module from bug list, this uses list_del_rcu */ module_bug_cleanup(mod); /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */ - synchronize_sched(); + synchronize_rcu(); mutex_unlock(&module_mutex); - - /* This may be empty, but that's OK */ - disable_ro_nx(&mod->init_layout); /* Clean up CFI for the module. */ cfi_cleanup(mod); + /* This may be empty, but that's OK */ module_arch_freeing_init(mod); + trace_android_vh_set_memory_rw((unsigned long)mod->init_layout.base, + (mod->init_layout.size)>>PAGE_SHIFT); + trace_android_vh_set_memory_nx((unsigned long)mod->init_layout.base, + (mod->init_layout.size)>>PAGE_SHIFT); module_memfree(mod->init_layout.base); kfree(mod->args); percpu_modfree(mod); @@ -2233,24 +2275,47 @@ lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size); /* Finally, free the core (containing the module structure) */ - disable_ro_nx(&mod->core_layout); + trace_android_vh_set_memory_rw((unsigned long)mod->core_layout.base, + (mod->core_layout.size)>>PAGE_SHIFT); + trace_android_vh_set_memory_nx((unsigned long)mod->core_layout.base, + (mod->core_layout.size)>>PAGE_SHIFT); module_memfree(mod->core_layout.base); } void *__symbol_get(const char *symbol) { struct module *owner; + enum mod_license license; const struct kernel_symbol *sym; preempt_disable(); - sym = find_symbol(symbol, &owner, NULL, NULL, true, true); - if (sym && strong_try_module_get(owner)) + sym = find_symbol(symbol, &owner, NULL, &license, true, true); + if (!sym) + goto fail; + if (license != GPL_ONLY) { + pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n", + symbol); + goto fail; + } + if (strong_try_module_get(owner)) sym = NULL; preempt_enable(); return sym ? (void *)kernel_symbol_value(sym) : NULL; +fail: + preempt_enable(); + return NULL; } EXPORT_SYMBOL_GPL(__symbol_get); + +bool module_init_layout_section(const char *sname) +{ +#ifndef CONFIG_MODULE_UNLOAD + if (module_exit_section(sname)) + return true; +#endif + return module_init_section(sname); +} /* * Ensure that an exported symbol [global namespace] does not already exist @@ -2258,7 +2323,7 @@ * * You must hold the module_mutex. */ -static int verify_export_symbols(struct module *mod) +static int verify_exported_symbols(struct module *mod) { unsigned int i; struct module *owner; @@ -2393,11 +2458,13 @@ if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) continue; - /* Livepatch relocation sections are applied by livepatch */ if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH) - continue; - - if (info->sechdrs[i].sh_type == SHT_REL) + err = klp_apply_section_relocs(mod, info->sechdrs, + info->secstrings, + info->strtab, + info->index.sym, i, + NULL); + else if (info->sechdrs[i].sh_type == SHT_REL) err = apply_relocate(info->sechdrs, info->strtab, info->index.sym, i, mod); else if (info->sechdrs[i].sh_type == SHT_RELA) @@ -2459,7 +2526,7 @@ if ((s->sh_flags & masks[m][0]) != masks[m][0] || (s->sh_flags & masks[m][1]) || s->sh_entsize != ~0UL - || strstarts(sname, ".init")) + || module_init_layout_section(sname)) continue; s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i); pr_debug("\t%s\n", sname); @@ -2492,7 +2559,7 @@ if ((s->sh_flags & masks[m][0]) != masks[m][0] || (s->sh_flags & masks[m][1]) || s->sh_entsize != ~0UL - || !strstarts(sname, ".init")) + || !module_init_layout_section(sname)) continue; s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i) | INIT_OFFSET_MASK); @@ -2554,7 +2621,8 @@ return string; } -static char *get_modinfo(struct load_info *info, const char *tag) +static char *get_next_modinfo(const struct load_info *info, const char *tag, + char *prev) { char *p; unsigned int taglen = strlen(tag); @@ -2565,11 +2633,23 @@ * get_modinfo() calls made before rewrite_section_headers() * must use sh_offset, as sh_addr isn't set! */ - for (p = (char *)info->hdr + infosec->sh_offset; p; p = next_string(p, &size)) { + char *modinfo = (char *)info->hdr + infosec->sh_offset; + + if (prev) { + size -= prev - modinfo; + modinfo = next_string(prev, &size); + } + + for (p = modinfo; p; p = next_string(p, &size)) { if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=') return p + taglen + 1; } return NULL; +} + +static char *get_modinfo(const struct load_info *info, const char *tag) +{ + return get_next_modinfo(info, tag, NULL); } static void setup_modinfo(struct module *mod, struct load_info *info) @@ -2596,10 +2676,10 @@ #ifdef CONFIG_KALLSYMS -/* lookup symbol in given range of kernel_symbols */ -static const struct kernel_symbol *lookup_symbol(const char *name, - const struct kernel_symbol *start, - const struct kernel_symbol *stop) +/* Lookup exported symbol in given range of kernel_symbols */ +static const struct kernel_symbol *lookup_exported_symbol(const char *name, + const struct kernel_symbol *start, + const struct kernel_symbol *stop) { return bsearch(name, start, stop - start, sizeof(struct kernel_symbol), cmp_name); @@ -2610,9 +2690,10 @@ { const struct kernel_symbol *ks; if (!mod) - ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab); + ks = lookup_exported_symbol(name, __start___ksymtab, __stop___ksymtab); else - ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms); + ks = lookup_exported_symbol(name, mod->syms, mod->syms + mod->num_syms); + return ks != NULL && kernel_symbol_value(ks) == value; } @@ -2720,6 +2801,8 @@ info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1); info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym); mod->core_layout.size += strtab_size; + info->core_typeoffs = mod->core_layout.size; + mod->core_layout.size += ndst * sizeof(char); mod->core_layout.size = debug_align(mod->core_layout.size); /* Put string table section at end of init part of module. */ @@ -2733,6 +2816,8 @@ __alignof__(struct mod_kallsyms)); info->mod_kallsyms_init_off = mod->init_layout.size; mod->init_layout.size += sizeof(struct mod_kallsyms); + info->init_typeoffs = mod->init_layout.size; + mod->init_layout.size += nsrc * sizeof(char); mod->init_layout.size = debug_align(mod->init_layout.size); } @@ -2756,20 +2841,23 @@ mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym); /* Make sure we get permanent strtab: don't use info->strtab. */ mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr; + mod->kallsyms->typetab = mod->init_layout.base + info->init_typeoffs; - /* Set types up while we still have access to sections. */ - for (i = 0; i < mod->kallsyms->num_symtab; i++) - mod->kallsyms->symtab[i].st_info - = elf_type(&mod->kallsyms->symtab[i], info); - - /* Now populate the cut down core kallsyms for after init. */ + /* + * Now populate the cut down core kallsyms for after init + * and set types up while we still have access to sections. + */ mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs; mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs; + mod->core_kallsyms.typetab = mod->core_layout.base + info->core_typeoffs; src = mod->kallsyms->symtab; for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { + mod->kallsyms->typetab[i] = elf_type(src + i, info); if (i == 0 || is_livepatch_module(mod) || is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, info->index.pcpu)) { + mod->core_kallsyms.typetab[ndst] = + mod->kallsyms->typetab[i]; dst[ndst] = src[i]; dst[ndst++].st_name = s - mod->core_kallsyms.strtab; s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name], @@ -2792,11 +2880,7 @@ { if (!debug) return; -#ifdef CONFIG_DYNAMIC_DEBUG - if (ddebug_add_module(debug, num, mod->name)) - pr_err("dynamic debug error adding module: %s\n", - debug->modname); -#endif + ddebug_add_module(debug, num, mod->name); } static void dynamic_debug_remove(struct module *mod, struct _ddebug *debug) @@ -2807,7 +2891,19 @@ void * __weak module_alloc(unsigned long size) { - return vmalloc_exec(size); + return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, + GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, + NUMA_NO_NODE, __builtin_return_address(0)); +} + +bool __weak module_init_section(const char *name) +{ + return strstarts(name, ".init"); +} + +bool __weak module_exit_section(const char *name) +{ + return strstarts(name, ".exit"); } #ifdef CONFIG_DEBUG_KMEMLEAK @@ -2840,8 +2936,9 @@ #ifdef CONFIG_MODULE_SIG static int module_sig_check(struct load_info *info, int flags) { - int err = -ENOKEY; + int err = -ENODATA; const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; + const char *reason; const void *mod = info->hdr; /* @@ -2856,16 +2953,39 @@ err = mod_verify_sig(mod, info); } - if (!err) { + switch (err) { + case 0: info->sig_ok = true; return 0; + + /* We don't permit modules to be loaded into trusted kernels + * without a valid signature on them, but if we're not + * enforcing, certain errors are non-fatal. + */ + case -ENODATA: + reason = "unsigned module"; + break; + case -ENOPKG: + reason = "module with unsupported crypto"; + break; + case -ENOKEY: + reason = "module with unavailable key"; + break; + + /* All other errors are fatal, including nomem, unparseable + * signatures and signature check failures - even if signatures + * aren't required. + */ + default: + return err; } - /* Not having a signature is only an error if we're strict. */ - if (err == -ENOKEY && !is_module_sig_enforced()) - err = 0; + if (is_module_sig_enforced()) { + pr_notice("Loading of %s is rejected\n", reason); + return -EKEYREJECTED; + } - return err; + return security_locked_down(LOCKDOWN_MODULE_SIGNATURE); } #else /* !CONFIG_MODULE_SIG */ static int module_sig_check(struct load_info *info, int flags) @@ -2874,9 +2994,33 @@ } #endif /* !CONFIG_MODULE_SIG */ -/* Sanity checks against invalid binaries, wrong arch, weird elf version. */ -static int elf_header_check(struct load_info *info) +static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr) { + unsigned long secend; + + /* + * Check for both overflow and offset/size being + * too large. + */ + secend = shdr->sh_offset + shdr->sh_size; + if (secend < shdr->sh_offset || secend > info->len) + return -ENOEXEC; + + return 0; +} + +/* + * Sanity checks against invalid binaries, wrong arch, weird elf version. + * + * Also do basic validity checks against section offsets and sizes, the + * section name string table, and the indices used for it (sh_name). + */ +static int elf_validity_check(struct load_info *info) +{ + unsigned int i; + Elf_Shdr *shdr, *strhdr; + int err; + if (info->len < sizeof(*(info->hdr))) return -ENOEXEC; @@ -2886,10 +3030,77 @@ || info->hdr->e_shentsize != sizeof(Elf_Shdr)) return -ENOEXEC; + /* + * e_shnum is 16 bits, and sizeof(Elf_Shdr) is + * known and small. So e_shnum * sizeof(Elf_Shdr) + * will not overflow unsigned long on any platform. + */ if (info->hdr->e_shoff >= info->len || (info->hdr->e_shnum * sizeof(Elf_Shdr) > info->len - info->hdr->e_shoff)) return -ENOEXEC; + + info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; + + /* + * Verify if the section name table index is valid. + */ + if (info->hdr->e_shstrndx == SHN_UNDEF + || info->hdr->e_shstrndx >= info->hdr->e_shnum) + return -ENOEXEC; + + strhdr = &info->sechdrs[info->hdr->e_shstrndx]; + err = validate_section_offset(info, strhdr); + if (err < 0) + return err; + + /* + * The section name table must be NUL-terminated, as required + * by the spec. This makes strcmp and pr_* calls that access + * strings in the section safe. + */ + info->secstrings = (void *)info->hdr + strhdr->sh_offset; + if (info->secstrings[strhdr->sh_size - 1] != '\0') + return -ENOEXEC; + + /* + * The code assumes that section 0 has a length of zero and + * an addr of zero, so check for it. + */ + if (info->sechdrs[0].sh_type != SHT_NULL + || info->sechdrs[0].sh_size != 0 + || info->sechdrs[0].sh_addr != 0) + return -ENOEXEC; + + for (i = 1; i < info->hdr->e_shnum; i++) { + shdr = &info->sechdrs[i]; + switch (shdr->sh_type) { + case SHT_NULL: + case SHT_NOBITS: + continue; + case SHT_SYMTAB: + if (shdr->sh_link == SHN_UNDEF + || shdr->sh_link >= info->hdr->e_shnum) + return -ENOEXEC; + fallthrough; + default: + err = validate_section_offset(info, shdr); + if (err < 0) { + pr_err("Invalid ELF section in module (section %u type %u)\n", + i, shdr->sh_type); + return err; + } + + if (shdr->sh_flags & SHF_ALLOC) { + if (shdr->sh_name >= strhdr->sh_size) { + pr_err("Invalid ELF section name in module (section %u type %u)\n", + i, shdr->sh_type); + return -ENOEXEC; + } + } + break; + } + } return 0; } @@ -2955,22 +3166,27 @@ if (info->len < sizeof(*(info->hdr))) return -ENOEXEC; - err = security_kernel_load_data(LOADING_MODULE); + err = security_kernel_load_data(LOADING_MODULE, true); if (err) return err; /* Suck in entire file: we'll want most of it. */ - info->hdr = __vmalloc(info->len, - GFP_KERNEL | __GFP_NOWARN, PAGE_KERNEL); + info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN); if (!info->hdr) return -ENOMEM; if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) { - vfree(info->hdr); - return -EFAULT; + err = -EFAULT; + goto out; } - return 0; + err = security_kernel_post_load_data((char *)info->hdr, info->len, + LOADING_MODULE, "init_module"); +out: + if (err) + vfree(info->hdr); + + return err; } static void free_copy(struct load_info *info) @@ -2987,21 +3203,11 @@ for (i = 1; i < info->hdr->e_shnum; i++) { Elf_Shdr *shdr = &info->sechdrs[i]; - if (shdr->sh_type != SHT_NOBITS - && info->len < shdr->sh_offset + shdr->sh_size) { - pr_err("Module len %lu truncated\n", info->len); - return -ENOEXEC; - } /* Mark all sections sh_addr with their address in the temporary image. */ shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; -#ifndef CONFIG_MODULE_UNLOAD - /* Don't load .exit sections */ - if (strstarts(info->secstrings+shdr->sh_name, ".exit")) - shdr->sh_flags &= ~(unsigned long)SHF_ALLOC; -#endif } /* Track but don't keep modinfo and version sections. */ @@ -3022,11 +3228,6 @@ static int setup_load_info(struct load_info *info, int flags) { unsigned int i; - - /* Set up the convenience variables */ - info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; - info->secstrings = (void *)info->hdr - + info->sechdrs[info->hdr->e_shstrndx].sh_offset; /* Try to find a name early so we can log errors with a module name */ info->index.info = find_sec(info, ".modinfo"); @@ -3164,10 +3365,23 @@ } #endif + mod->noinstr_text_start = section_objs(info, ".noinstr.text", 1, + &mod->noinstr_text_size); + #ifdef CONFIG_TRACEPOINTS mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs", sizeof(*mod->tracepoints_ptrs), &mod->num_tracepoints); +#endif +#ifdef CONFIG_TREE_SRCU + mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs", + sizeof(*mod->srcu_struct_ptrs), + &mod->num_srcu_structs); +#endif +#ifdef CONFIG_BPF_EVENTS + mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map", + sizeof(*mod->bpf_raw_events), + &mod->num_bpf_raw_events); #endif #ifdef CONFIG_JUMP_LABEL mod->jump_entries = section_objs(info, "__jump_table", @@ -3189,7 +3403,7 @@ #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD /* sechdrs[0].sh_size is always zero */ - mod->ftrace_callsites = section_objs(info, "__mcount_loc", + mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION, sizeof(*mod->ftrace_callsites), &mod->num_ftrace_callsites); #endif @@ -3198,13 +3412,25 @@ sizeof(*mod->ei_funcs), &mod->num_ei_funcs); #endif +#ifdef CONFIG_KPROBES + mod->kprobes_text_start = section_objs(info, ".kprobes.text", 1, + &mod->kprobes_text_size); + mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist", + sizeof(unsigned long), + &mod->num_kprobe_blacklist); +#endif +#ifdef CONFIG_HAVE_STATIC_CALL_INLINE + mod->static_call_sites = section_objs(info, ".static_call_sites", + sizeof(*mod->static_call_sites), + &mod->num_static_call_sites); +#endif mod->extable = section_objs(info, "__ex_table", sizeof(*mod->extable), &mod->num_exentries); if (section_addr(info, "__obsparm")) pr_warn("%s: Ignoring obsolete parameters\n", mod->name); - info->debug = section_objs(info, "__verbose", + info->debug = section_objs(info, "__dyndbg", sizeof(*info->debug), &info->num_debug); return 0; @@ -3316,12 +3542,6 @@ static void flush_module_icache(const struct module *mod) { - mm_segment_t old_fs; - - /* flush the icache in correct context */ - old_fs = get_fs(); - set_fs(KERNEL_DS); - /* * Flush the instruction cache, since we've played with text. * Do it before processing of module parameters, so the module @@ -3333,8 +3553,6 @@ + mod->init_layout.size); flush_icache_range((unsigned long)mod->core_layout.base, (unsigned long)mod->core_layout.base + mod->core_layout.size); - - set_fs(old_fs); } int __weak module_frob_arch_sections(Elf_Ehdr *hdr, @@ -3382,6 +3600,11 @@ if (err < 0) return ERR_PTR(err); + err = module_enforce_rwx_sections(info->hdr, info->sechdrs, + info->secstrings, info->mod); + if (err < 0) + return ERR_PTR(err); + /* We will do a special allocation for per-cpu sections later. */ info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC; @@ -3391,6 +3614,15 @@ * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set. */ ndx = find_sec(info, ".data..ro_after_init"); + if (ndx) + info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; + /* + * Mark the __jump_table section as ro_after_init as well: these data + * structures are never modified, with the exception of entries that + * refer to code in the __init section, which are annotated as such + * at module load time. + */ + ndx = find_sec(info, "__jump_table"); if (ndx) info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT; @@ -3416,7 +3648,15 @@ { percpu_modfree(mod); module_arch_freeing_init(mod); + trace_android_vh_set_memory_rw((unsigned long)mod->init_layout.base, + (mod->init_layout.size)>>PAGE_SHIFT); + trace_android_vh_set_memory_nx((unsigned long)mod->init_layout.base, + (mod->init_layout.size)>>PAGE_SHIFT); module_memfree(mod->init_layout.base); + trace_android_vh_set_memory_rw((unsigned long)mod->core_layout.base, + (mod->core_layout.size)>>PAGE_SHIFT); + trace_android_vh_set_memory_nx((unsigned long)mod->core_layout.base, + (mod->core_layout.size)>>PAGE_SHIFT); module_memfree(mod->core_layout.base); } @@ -3426,8 +3666,6 @@ { return 0; } - -static void cfi_init(struct module *mod); static int post_relocation(struct module *mod, const struct load_info *info) { @@ -3440,9 +3678,6 @@ /* Setup kallsyms-specific fields. */ add_kallsyms(mod, info); - - /* Setup CFI for the module. */ - cfi_init(mod); /* Arch-specific module finalizing. */ return module_finalize(info->hdr, info->sechdrs, mod); @@ -3462,7 +3697,8 @@ sched_annotate_sleep(); mutex_lock(&module_mutex); mod = find_module_all(name, strlen(name), true); - ret = !mod || mod->state == MODULE_STATE_LIVE; + ret = !mod || mod->state == MODULE_STATE_LIVE + || mod->state == MODULE_STATE_GOING; mutex_unlock(&module_mutex); return ret; @@ -3481,15 +3717,24 @@ /* For freeing module_init on success, in case kallsyms traversing */ struct mod_initfree { - struct rcu_head rcu; + struct llist_node node; void *module_init; }; -static void do_free_init(struct rcu_head *head) +static void do_free_init(struct work_struct *w) { - struct mod_initfree *m = container_of(head, struct mod_initfree, rcu); - module_memfree(m->module_init); - kfree(m); + struct llist_node *pos, *n, *list; + struct mod_initfree *initfree; + + list = llist_del_all(&init_free_list); + + synchronize_rcu(); + + llist_for_each_safe(pos, n, list) { + initfree = container_of(pos, struct mod_initfree, node); + module_memfree(initfree->module_init); + kfree(initfree); + } } /* @@ -3555,9 +3800,13 @@ rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); #endif module_enable_ro(mod, true); + trace_android_vh_set_module_permit_after_init(mod); mod_tree_remove_init(mod); - disable_ro_nx(&mod->init_layout); module_arch_freeing_init(mod); + trace_android_vh_set_memory_rw((unsigned long)mod->init_layout.base, + (mod->init_layout.size)>>PAGE_SHIFT); + trace_android_vh_set_memory_nx((unsigned long)mod->init_layout.base, + (mod->init_layout.size)>>PAGE_SHIFT); mod->init_layout.base = NULL; mod->init_layout.size = 0; mod->init_layout.ro_size = 0; @@ -3566,15 +3815,19 @@ /* * We want to free module_init, but be aware that kallsyms may be * walking this with preempt disabled. In all the failure paths, we - * call synchronize_sched(), but we don't want to slow down the success - * path, so use actual RCU here. + * call synchronize_rcu(), but we don't want to slow down the success + * path. module_memfree() cannot be called in an interrupt, so do the + * work and call synchronize_rcu() in a work queue. + * * Note that module_alloc() on most architectures creates W+X page * mappings which won't be cleaned up until do_free_init() runs. Any * code such as mark_rodata_ro() which depends on those mappings to * be cleaned up needs to sync with the queued work - ie - * rcu_barrier_sched() + * rcu_barrier() */ - call_rcu_sched(&freeinit->rcu, do_free_init); + if (llist_add(&freeinit->node, &init_free_list)) + schedule_work(&init_free_wq); + mutex_unlock(&module_mutex); wake_up_all(&module_wq); @@ -3585,7 +3838,7 @@ fail: /* Try to protect us from buggy refcounters. */ mod->state = MODULE_STATE_GOING; - synchronize_sched(); + synchronize_rcu(); module_put(mod); blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_GOING, mod); @@ -3616,20 +3869,35 @@ mod->state = MODULE_STATE_UNFORMED; -again: mutex_lock(&module_mutex); old = find_module_all(mod->name, strlen(mod->name), true); if (old != NULL) { - if (old->state != MODULE_STATE_LIVE) { + if (old->state == MODULE_STATE_COMING + || old->state == MODULE_STATE_UNFORMED) { /* Wait in case it fails to load. */ mutex_unlock(&module_mutex); err = wait_event_interruptible(module_wq, finished_loading(mod->name)); if (err) goto out_unlocked; - goto again; + + /* The module might have gone in the meantime. */ + mutex_lock(&module_mutex); + old = find_module_all(mod->name, strlen(mod->name), + true); } - err = -EEXIST; + + /* + * We are here only when the same module was being loaded. Do + * not try to load it again right now. It prevents long delays + * caused by serialized module load failures. It might happen + * when more devices of the same type trigger load of + * a particular module. + */ + if (old && old->state == MODULE_STATE_LIVE) + err = -EEXIST; + else + err = -EBUSY; goto out; } mod_update_bounds(mod); @@ -3650,7 +3918,7 @@ mutex_lock(&module_mutex); /* Find duplicate symbols (must be called under lock). */ - err = verify_export_symbols(mod); + err = verify_exported_symbols(mod); if (err < 0) goto out; @@ -3660,6 +3928,7 @@ module_enable_ro(mod, false); module_enable_nx(mod); module_enable_x(mod); + trace_android_vh_set_module_permit_before_init(mod); /* Mark state as coming so strong_try_module_get() ignores us, * but kallsyms etc. can see us. */ @@ -3682,9 +3951,13 @@ if (err) return err; - blocking_notifier_call_chain(&module_notify_list, - MODULE_STATE_COMING, mod); - return 0; + err = blocking_notifier_call_chain_robust(&module_notify_list, + MODULE_STATE_COMING, MODULE_STATE_GOING, mod); + err = notifier_to_errno(err); + if (err) + klp_module_going(mod); + + return err; } static int unknown_module_param_cb(char *param, char *val, const char *modname, @@ -3705,6 +3978,8 @@ return 0; } +static void cfi_init(struct module *mod); + /* Allocate and load the module: note that size of section 0 is always zero, and we rely on this for optional sections. */ static int load_module(struct load_info *info, const char __user *uargs, @@ -3714,22 +3989,49 @@ long err = 0; char *after_dashes; - err = elf_header_check(info); + /* + * Do the signature check (if any) first. All that + * the signature check needs is info->len, it does + * not need any of the section info. That can be + * set up later. This will minimize the chances + * of a corrupt module causing problems before + * we even get to the signature check. + * + * The check will also adjust info->len by stripping + * off the sig length at the end of the module, making + * checks against info->len more correct. + */ + err = module_sig_check(info, flags); if (err) goto free_copy; + /* + * Do basic sanity checks against the ELF header and + * sections. + */ + err = elf_validity_check(info); + if (err) { + pr_err("Module has invalid ELF structures\n"); + goto free_copy; + } + + /* + * Everything checks out, so set up the section info + * in the info structure. + */ err = setup_load_info(info, flags); if (err) goto free_copy; + /* + * Now that we know we have the correct module name, check + * if it's blacklisted. + */ if (blacklisted(info->name)) { err = -EPERM; + pr_err("Module %s is blacklisted\n", info->name); goto free_copy; } - - err = module_sig_check(info, flags); - if (err) - goto free_copy; err = rewrite_section_headers(info, flags); if (err) @@ -3805,6 +4107,9 @@ flush_module_icache(mod); + /* Setup CFI for the module. */ + cfi_init(mod); + /* Now copy in args */ mod->args = strndup_user(uargs, ~0UL >> 1); if (IS_ERR(mod->args)) { @@ -3872,16 +4177,13 @@ module_bug_cleanup(mod); mutex_unlock(&module_mutex); - /* we can't deallocate the module until we clear memory protection */ - module_disable_ro(mod); - module_disable_nx(mod); - ddebug_cleanup: ftrace_release_mod(mod); dynamic_debug_remove(mod, info->debug); - synchronize_sched(); + synchronize_rcu(); kfree(mod->args); free_arch_cleanup: + cfi_cleanup(mod); module_arch_cleanup(mod); free_modinfo: free_modinfo(mod); @@ -3894,7 +4196,7 @@ mod_tree_remove(mod); wake_up_all(&module_wq); /* Wait for RCU-sched synchronizing before releasing mod->list. */ - synchronize_sched(); + synchronize_rcu(); mutex_unlock(&module_mutex); free_module: /* Free lock-classes; relies on the preceding sync_rcu() */ @@ -3929,8 +4231,7 @@ SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) { struct load_info info = { }; - loff_t size; - void *hdr; + void *hdr = NULL; int err; err = may_init_module(); @@ -3943,12 +4244,12 @@ |MODULE_INIT_IGNORE_VERMAGIC)) return -EINVAL; - err = kernel_read_file_from_fd(fd, &hdr, &size, INT_MAX, + err = kernel_read_file_from_fd(fd, 0, &hdr, INT_MAX, NULL, READING_MODULE); - if (err) + if (err < 0) return err; info.hdr = hdr; - info.len = size; + info.len = err; return load_module(&info, uargs, flags); } @@ -3971,18 +4272,27 @@ && (str[2] == '\0' || str[2] == '.'); } -static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum) +static inline int is_cfi_typeid_symbol(const char *str) +{ + return !strncmp(str, "__typeid__", 10); +} + +static const char *kallsyms_symbol_name(struct mod_kallsyms *kallsyms, unsigned int symnum) { return kallsyms->strtab + kallsyms->symtab[symnum].st_name; } -static const char *get_ksymbol(struct module *mod, - unsigned long addr, - unsigned long *size, - unsigned long *offset) +/* + * Given a module and address, find the corresponding symbol and return its name + * while providing its size and offset if needed. + */ +static const char *find_kallsyms_symbol(struct module *mod, + unsigned long addr, + unsigned long *size, + unsigned long *offset) { unsigned int i, best = 0; - unsigned long nextval; + unsigned long nextval, bestval; struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); /* At worse, next value is at end of module */ @@ -3991,34 +4301,41 @@ else nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size; + bestval = kallsyms_symbol_value(&kallsyms->symtab[best]); + /* Scan for closest preceding symbol, and next symbol. (ELF starts real symbols at 1). */ for (i = 1; i < kallsyms->num_symtab; i++) { - if (kallsyms->symtab[i].st_shndx == SHN_UNDEF) + const Elf_Sym *sym = &kallsyms->symtab[i]; + unsigned long thisval = kallsyms_symbol_value(sym); + + if (sym->st_shndx == SHN_UNDEF) continue; /* We ignore unnamed symbols: they're uninformative * and inserted at a whim. */ - if (*symname(kallsyms, i) == '\0' - || is_arm_mapping_symbol(symname(kallsyms, i))) + if (*kallsyms_symbol_name(kallsyms, i) == '\0' + || is_arm_mapping_symbol(kallsyms_symbol_name(kallsyms, i)) + || is_cfi_typeid_symbol(kallsyms_symbol_name(kallsyms, i))) continue; - if (kallsyms->symtab[i].st_value <= addr - && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value) + if (thisval <= addr && thisval > bestval) { best = i; - if (kallsyms->symtab[i].st_value > addr - && kallsyms->symtab[i].st_value < nextval) - nextval = kallsyms->symtab[i].st_value; + bestval = thisval; + } + if (thisval > addr && thisval < nextval) + nextval = thisval; } if (!best) return NULL; if (size) - *size = nextval - kallsyms->symtab[best].st_value; + *size = nextval - bestval; if (offset) - *offset = addr - kallsyms->symtab[best].st_value; - return symname(kallsyms, best); + *offset = addr - bestval; + + return kallsyms_symbol_name(kallsyms, best); } void * __weak dereference_module_function_descriptor(struct module *mod, @@ -4043,7 +4360,8 @@ if (mod) { if (modname) *modname = mod->name; - ret = get_ksymbol(mod, addr, size, offset); + + ret = find_kallsyms_symbol(mod, addr, size, offset); } /* Make a copy in here where it's safe */ if (ret) { @@ -4066,9 +4384,10 @@ if (within_module(addr, mod)) { const char *sym; - sym = get_ksymbol(mod, addr, NULL, NULL); + sym = find_kallsyms_symbol(mod, addr, NULL, NULL); if (!sym) goto out; + strlcpy(symname, sym, KSYM_NAME_LEN); preempt_enable(); return 0; @@ -4091,7 +4410,7 @@ if (within_module(addr, mod)) { const char *sym; - sym = get_ksymbol(mod, addr, size, offset); + sym = find_kallsyms_symbol(mod, addr, size, offset); if (!sym) goto out; if (modname) @@ -4120,9 +4439,11 @@ continue; kallsyms = rcu_dereference_sched(mod->kallsyms); if (symnum < kallsyms->num_symtab) { - *value = kallsyms->symtab[symnum].st_value; - *type = kallsyms->symtab[symnum].st_info; - strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN); + const Elf_Sym *sym = &kallsyms->symtab[symnum]; + + *value = kallsyms_symbol_value(sym); + *type = kallsyms->typetab[symnum]; + strlcpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN); strlcpy(module_name, mod->name, MODULE_NAME_LEN); *exported = is_exported(name, *value, mod); preempt_enable(); @@ -4134,15 +4455,19 @@ return -ERANGE; } -static unsigned long mod_find_symname(struct module *mod, const char *name) +/* Given a module and name of symbol, find and return the symbol's value */ +static unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name) { unsigned int i; struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); - for (i = 0; i < kallsyms->num_symtab; i++) - if (strcmp(name, symname(kallsyms, i)) == 0 && - kallsyms->symtab[i].st_shndx != SHN_UNDEF) - return kallsyms->symtab[i].st_value; + for (i = 0; i < kallsyms->num_symtab; i++) { + const Elf_Sym *sym = &kallsyms->symtab[i]; + + if (strcmp(name, kallsyms_symbol_name(kallsyms, i)) == 0 && + sym->st_shndx != SHN_UNDEF) + return kallsyms_symbol_value(sym); + } return 0; } @@ -4157,12 +4482,12 @@ preempt_disable(); if ((colon = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) { if ((mod = find_module_all(name, colon - name, false)) != NULL) - ret = mod_find_symname(mod, colon+1); + ret = find_kallsyms_symbol_value(mod, colon+1); } else { list_for_each_entry_rcu(mod, &modules, list) { if (mod->state == MODULE_STATE_UNFORMED) continue; - if ((ret = mod_find_symname(mod, name)) != 0) + if ((ret = find_kallsyms_symbol_value(mod, name)) != 0) break; } } @@ -4187,12 +4512,13 @@ if (mod->state == MODULE_STATE_UNFORMED) continue; for (i = 0; i < kallsyms->num_symtab; i++) { + const Elf_Sym *sym = &kallsyms->symtab[i]; - if (kallsyms->symtab[i].st_shndx == SHN_UNDEF) + if (sym->st_shndx == SHN_UNDEF) continue; - ret = fn(data, symname(kallsyms, i), - mod, kallsyms->symtab[i].st_value); + ret = fn(data, kallsyms_symbol_name(kallsyms, i), + mod, kallsyms_symbol_value(sym)); if (ret != 0) return ret; } @@ -4204,18 +4530,30 @@ static void cfi_init(struct module *mod) { #ifdef CONFIG_CFI_CLANG - preempt_disable(); - mod->cfi_check = - (cfi_check_fn)mod_find_symname(mod, CFI_CHECK_FN_NAME); - preempt_enable(); - cfi_module_add(mod, module_addr_min, module_addr_max); + initcall_t *init; + exitcall_t *exit; + + rcu_read_lock_sched(); + mod->cfi_check = (cfi_check_fn) + find_kallsyms_symbol_value(mod, "__cfi_check"); + init = (initcall_t *) + find_kallsyms_symbol_value(mod, "__cfi_jt_init_module"); + exit = (exitcall_t *) + find_kallsyms_symbol_value(mod, "__cfi_jt_cleanup_module"); + rcu_read_unlock_sched(); + + /* Fix init/exit functions to point to the CFI jump table */ + if (init) mod->init = *init; + if (exit) mod->exit = *exit; + + cfi_module_add(mod, module_addr_min); #endif } static void cfi_cleanup(struct module *mod) { #ifdef CONFIG_CFI_CLANG - cfi_module_remove(mod, module_addr_min, module_addr_max); + cfi_module_remove(mod, module_addr_min); #endif } @@ -4326,16 +4664,17 @@ return err; } -static const struct file_operations proc_modules_operations = { - .open = modules_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, +static const struct proc_ops modules_proc_ops = { + .proc_flags = PROC_ENTRY_PERMANENT, + .proc_open = modules_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = seq_release, }; static int __init proc_modules_init(void) { - proc_create("modules", 0, NULL, &proc_modules_operations); + proc_create("modules", 0, NULL, &modules_proc_ops); return 0; } module_init(proc_modules_init); @@ -4469,6 +4808,23 @@ pr_cont("\n"); } +#ifdef CONFIG_ANDROID_DEBUG_SYMBOLS +void android_debug_for_each_module(int (*fn)(const char *mod_name, void *mod_addr, void *data), + void *data) +{ + struct module *module; + + preempt_disable(); + list_for_each_entry_rcu(module, &modules, list) { + if (fn(module->name, module->core_layout.base, data)) + goto out; + } +out: + preempt_enable(); +} +EXPORT_SYMBOL_GPL(android_debug_for_each_module); +#endif + #ifdef CONFIG_MODVERSIONS /* Generate the signature for all relevant module structures here. * If these change, we don't want to try to parse the module. */ -- Gitblit v1.6.2