hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/kernel/module.c
....@@ -1,24 +1,16 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 Copyright (C) 2002 Richard Henderson
34 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
45
5
- This program is free software; you can redistribute it and/or modify
6
- it under the terms of the GNU General Public License as published by
7
- the Free Software Foundation; either version 2 of the License, or
8
- (at your option) any later version.
9
-
10
- This program is distributed in the hope that it will be useful,
11
- but WITHOUT ANY WARRANTY; without even the implied warranty of
12
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
- GNU General Public License for more details.
14
-
15
- You should have received a copy of the GNU General Public License
16
- along with this program; if not, write to the Free Software
17
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
186 */
7
+
8
+#define INCLUDE_VERMAGIC
9
+
1910 #include <linux/export.h>
2011 #include <linux/extable.h>
2112 #include <linux/moduleloader.h>
13
+#include <linux/module_signature.h>
2214 #include <linux/trace_events.h>
2315 #include <linux/init.h>
2416 #include <linux/kallsyms.h>
....@@ -26,6 +18,7 @@
2618 #include <linux/fs.h>
2719 #include <linux/sysfs.h>
2820 #include <linux/kernel.h>
21
+#include <linux/kernel_read_file.h>
2922 #include <linux/slab.h>
3023 #include <linux/vmalloc.h>
3124 #include <linux/elf.h>
....@@ -70,15 +63,24 @@
7063 #define CREATE_TRACE_POINTS
7164 #include <trace/events/module.h>
7265
66
+#undef CREATE_TRACE_POINTS
67
+#include <trace/hooks/module.h>
68
+#include <trace/hooks/memory.h>
69
+
7370 #ifndef ARCH_SHF_SMALL
7471 #define ARCH_SHF_SMALL 0
7572 #endif
7673
7774 /*
7875 * Modules' sections will be aligned on page boundaries
79
- * to ensure complete separation of code and data
76
+ * to ensure complete separation of code and data, but
77
+ * only when CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
8078 */
79
+#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
8180 # define debug_align(X) ALIGN(X, PAGE_SIZE)
81
+#else
82
+# define debug_align(X) (X)
83
+#endif
8284
8385 /* If this is set, the section belongs in the init part of the module */
8486 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
....@@ -90,8 +92,12 @@
9092 * 3) module_addr_min/module_addr_max.
9193 * (delete and add uses RCU list operations). */
9294 DEFINE_MUTEX(module_mutex);
93
-EXPORT_SYMBOL_GPL(module_mutex);
9495 static LIST_HEAD(modules);
96
+
97
+/* Work queue for freeing init sections in success case */
98
+static void do_free_init(struct work_struct *w);
99
+static DECLARE_WORK(init_free_wq, do_free_init);
100
+static LLIST_HEAD(init_free_list);
95101
96102 #ifdef CONFIG_MODULES_TREE_LOOKUP
97103
....@@ -216,7 +222,8 @@
216222 {
217223 struct module *mod;
218224
219
- list_for_each_entry_rcu(mod, &modules, list) {
225
+ list_for_each_entry_rcu(mod, &modules, list,
226
+ lockdep_is_held(&module_mutex)) {
220227 if (within_module(addr, mod))
221228 return mod;
222229 }
....@@ -454,7 +461,8 @@
454461 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
455462 return true;
456463
457
- list_for_each_entry_rcu(mod, &modules, list) {
464
+ list_for_each_entry_rcu(mod, &modules, list,
465
+ lockdep_is_held(&module_mutex)) {
458466 struct symsearch arr[] = {
459467 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
460468 NOT_GPL_ONLY, false },
....@@ -499,9 +507,9 @@
499507 enum mod_license license;
500508 };
501509
502
-static bool check_symbol(const struct symsearch *syms,
503
- struct module *owner,
504
- unsigned int symnum, void *data)
510
+static bool check_exported_symbol(const struct symsearch *syms,
511
+ struct module *owner,
512
+ unsigned int symnum, void *data)
505513 {
506514 struct find_symbol_arg *fsa = data;
507515
....@@ -552,17 +560,25 @@
552560 #endif
553561 }
554562
555
-static int cmp_name(const void *va, const void *vb)
563
+static const char *kernel_symbol_namespace(const struct kernel_symbol *sym)
556564 {
557
- const char *a;
558
- const struct kernel_symbol *b;
559
- a = va; b = vb;
560
- return strcmp(a, kernel_symbol_name(b));
565
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
566
+ if (!sym->namespace_offset)
567
+ return NULL;
568
+ return offset_to_ptr(&sym->namespace_offset);
569
+#else
570
+ return sym->namespace;
571
+#endif
561572 }
562573
563
-static bool find_symbol_in_section(const struct symsearch *syms,
564
- struct module *owner,
565
- void *data)
574
+static int cmp_name(const void *name, const void *sym)
575
+{
576
+ return strcmp(name, kernel_symbol_name(sym));
577
+}
578
+
579
+static bool find_exported_symbol_in_section(const struct symsearch *syms,
580
+ struct module *owner,
581
+ void *data)
566582 {
567583 struct find_symbol_arg *fsa = data;
568584 struct kernel_symbol *sym;
....@@ -570,13 +586,14 @@
570586 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
571587 sizeof(struct kernel_symbol), cmp_name);
572588
573
- if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
589
+ if (sym != NULL && check_exported_symbol(syms, owner,
590
+ sym - syms->start, data))
574591 return true;
575592
576593 return false;
577594 }
578595
579
-/* Find a symbol and return it, along with, (optional) crc and
596
+/* Find an exported symbol and return it, along with, (optional) crc and
580597 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
581598 static const struct kernel_symbol *find_symbol(const char *name,
582599 struct module **owner,
....@@ -591,7 +608,7 @@
591608 fsa.gplok = gplok;
592609 fsa.warn = warn;
593610
594
- if (each_symbol_section(find_symbol_in_section, &fsa)) {
611
+ if (each_symbol_section(find_exported_symbol_in_section, &fsa)) {
595612 if (owner)
596613 *owner = fsa.owner;
597614 if (crc)
....@@ -616,7 +633,8 @@
616633
617634 module_assert_mutex_or_preempt();
618635
619
- list_for_each_entry_rcu(mod, &modules, list) {
636
+ list_for_each_entry_rcu(mod, &modules, list,
637
+ lockdep_is_held(&module_mutex)) {
620638 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
621639 continue;
622640 if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
....@@ -630,7 +648,6 @@
630648 module_assert_mutex();
631649 return find_module_all(name, strlen(name), false);
632650 }
633
-EXPORT_SYMBOL_GPL(find_module);
634651
635652 #ifdef CONFIG_SMP
636653
....@@ -796,6 +813,7 @@
796813
797814 MODINFO_ATTR(version);
798815 MODINFO_ATTR(srcversion);
816
+MODINFO_ATTR(scmversion);
799817
800818 static char last_unloaded_module[MODULE_NAME_LEN+1];
801819
....@@ -1258,6 +1276,7 @@
12581276 &module_uevent,
12591277 &modinfo_version,
12601278 &modinfo_srcversion,
1279
+ &modinfo_scmversion,
12611280 &modinfo_initstate,
12621281 &modinfo_coresize,
12631282 &modinfo_initsize,
....@@ -1388,6 +1407,59 @@
13881407 }
13891408 #endif /* CONFIG_MODVERSIONS */
13901409
1410
+static char *get_modinfo(const struct load_info *info, const char *tag);
1411
+static char *get_next_modinfo(const struct load_info *info, const char *tag,
1412
+ char *prev);
1413
+
1414
+static int verify_namespace_is_imported(const struct load_info *info,
1415
+ const struct kernel_symbol *sym,
1416
+ struct module *mod)
1417
+{
1418
+ const char *namespace;
1419
+ char *imported_namespace;
1420
+
1421
+ namespace = kernel_symbol_namespace(sym);
1422
+ if (namespace && namespace[0]) {
1423
+ imported_namespace = get_modinfo(info, "import_ns");
1424
+ while (imported_namespace) {
1425
+ if (strcmp(namespace, imported_namespace) == 0)
1426
+ return 0;
1427
+ imported_namespace = get_next_modinfo(
1428
+ info, "import_ns", imported_namespace);
1429
+ }
1430
+#ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
1431
+ pr_warn(
1432
+#else
1433
+ pr_err(
1434
+#endif
1435
+ "%s: module uses symbol (%s) from namespace %s, but does not import it.\n",
1436
+ mod->name, kernel_symbol_name(sym), namespace);
1437
+#ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
1438
+ return -EINVAL;
1439
+#endif
1440
+ }
1441
+ return 0;
1442
+}
1443
+
1444
+static bool inherit_taint(struct module *mod, struct module *owner)
1445
+{
1446
+ if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints))
1447
+ return true;
1448
+
1449
+ if (mod->using_gplonly_symbols) {
1450
+ pr_err("%s: module using GPL-only symbols uses symbols from proprietary module %s.\n",
1451
+ mod->name, owner->name);
1452
+ return false;
1453
+ }
1454
+
1455
+ if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) {
1456
+ pr_warn("%s: module uses symbols from proprietary module %s, inheriting taint.\n",
1457
+ mod->name, owner->name);
1458
+ set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints);
1459
+ }
1460
+ return true;
1461
+}
1462
+
13911463 /* Resolve a symbol for this module. I.e. if we find one, record usage. */
13921464 static const struct kernel_symbol *resolve_symbol(struct module *mod,
13931465 const struct load_info *info,
....@@ -1412,8 +1484,22 @@
14121484 if (!sym)
14131485 goto unlock;
14141486
1487
+ if (license == GPL_ONLY)
1488
+ mod->using_gplonly_symbols = true;
1489
+
1490
+ if (!inherit_taint(mod, owner)) {
1491
+ sym = NULL;
1492
+ goto getname;
1493
+ }
1494
+
14151495 if (!check_version(info, name, mod, crc)) {
14161496 sym = ERR_PTR(-EINVAL);
1497
+ goto getname;
1498
+ }
1499
+
1500
+ err = verify_namespace_is_imported(info, sym, mod);
1501
+ if (err) {
1502
+ sym = ERR_PTR(err);
14171503 goto getname;
14181504 }
14191505
....@@ -1469,7 +1555,7 @@
14691555 struct module_sect_attrs {
14701556 struct attribute_group grp;
14711557 unsigned int nsections;
1472
- struct module_sect_attr attrs[0];
1558
+ struct module_sect_attr attrs[];
14731559 };
14741560
14751561 #define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4))
....@@ -1582,7 +1668,7 @@
15821668 struct module_notes_attrs {
15831669 struct kobject *dir;
15841670 unsigned int notes;
1585
- struct bin_attribute attrs[0];
1671
+ struct bin_attribute attrs[];
15861672 };
15871673
15881674 static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
....@@ -1915,7 +2001,6 @@
19152001 mod_sysfs_fini(mod);
19162002 }
19172003
1918
-#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
19192004 /*
19202005 * LKM RO/NX protection: protect module's text/ro-data
19212006 * from modification and any data from execution.
....@@ -1929,6 +2014,14 @@
19292014 *
19302015 * These values are always page-aligned (as is base)
19312016 */
2017
+
2018
+/*
2019
+ * Since some arches are moving towards PAGE_KERNEL module allocations instead
2020
+ * of PAGE_KERNEL_EXEC, keep frob_text() and module_enable_x() outside of the
2021
+ * CONFIG_STRICT_MODULE_RWX block below because they are needed regardless of
2022
+ * whether we are strict.
2023
+ */
2024
+#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
19322025 static void frob_text(const struct module_layout *layout,
19332026 int (*set_memory)(unsigned long start, int num_pages))
19342027 {
....@@ -1937,6 +2030,15 @@
19372030 set_memory((unsigned long)layout->base,
19382031 layout->text_size >> PAGE_SHIFT);
19392032 }
2033
+
2034
+static void module_enable_x(const struct module *mod)
2035
+{
2036
+ frob_text(&mod->core_layout, set_memory_x);
2037
+ frob_text(&mod->init_layout, set_memory_x);
2038
+}
2039
+#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
2040
+static void module_enable_x(const struct module *mod) { }
2041
+#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
19402042
19412043 #ifdef CONFIG_STRICT_MODULE_RWX
19422044 static void frob_rodata(const struct module_layout *layout,
....@@ -1969,24 +2071,13 @@
19692071 (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT);
19702072 }
19712073
1972
-/* livepatching wants to disable read-only so it can frob module. */
1973
-void module_disable_ro(const struct module *mod)
2074
+static void module_enable_ro(const struct module *mod, bool after_init)
19742075 {
19752076 if (!rodata_enabled)
19762077 return;
19772078
1978
- frob_text(&mod->core_layout, set_memory_rw);
1979
- frob_rodata(&mod->core_layout, set_memory_rw);
1980
- frob_ro_after_init(&mod->core_layout, set_memory_rw);
1981
- frob_text(&mod->init_layout, set_memory_rw);
1982
- frob_rodata(&mod->init_layout, set_memory_rw);
1983
-}
1984
-
1985
-void module_enable_ro(const struct module *mod, bool after_init)
1986
-{
1987
- if (!rodata_enabled)
1988
- return;
1989
-
2079
+ set_vm_flush_reset_perms(mod->core_layout.base);
2080
+ set_vm_flush_reset_perms(mod->init_layout.base);
19902081 frob_text(&mod->core_layout, set_memory_ro);
19912082
19922083 frob_rodata(&mod->core_layout, set_memory_ro);
....@@ -2006,88 +2097,32 @@
20062097 frob_writable_data(&mod->init_layout, set_memory_nx);
20072098 }
20082099
2009
-static void module_disable_nx(const struct module *mod)
2100
+static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2101
+ char *secstrings, struct module *mod)
20102102 {
2011
- frob_rodata(&mod->core_layout, set_memory_x);
2012
- frob_ro_after_init(&mod->core_layout, set_memory_x);
2013
- frob_writable_data(&mod->core_layout, set_memory_x);
2014
- frob_rodata(&mod->init_layout, set_memory_x);
2015
- frob_writable_data(&mod->init_layout, set_memory_x);
2016
-}
2103
+ const unsigned long shf_wx = SHF_WRITE|SHF_EXECINSTR;
2104
+ int i;
20172105
2018
-/* Iterate through all modules and set each module's text as RW */
2019
-void set_all_modules_text_rw(void)
2020
-{
2021
- struct module *mod;
2022
-
2023
- if (!rodata_enabled)
2024
- return;
2025
-
2026
- mutex_lock(&module_mutex);
2027
- list_for_each_entry_rcu(mod, &modules, list) {
2028
- if (mod->state == MODULE_STATE_UNFORMED)
2029
- continue;
2030
-
2031
- frob_text(&mod->core_layout, set_memory_rw);
2032
- frob_text(&mod->init_layout, set_memory_rw);
2106
+ for (i = 0; i < hdr->e_shnum; i++) {
2107
+ if ((sechdrs[i].sh_flags & shf_wx) == shf_wx) {
2108
+ pr_err("%s: section %s (index %d) has invalid WRITE|EXEC flags\n",
2109
+ mod->name, secstrings + sechdrs[i].sh_name, i);
2110
+ return -ENOEXEC;
2111
+ }
20332112 }
2034
- mutex_unlock(&module_mutex);
2035
-}
20362113
2037
-/* Iterate through all modules and set each module's text as RO */
2038
-void set_all_modules_text_ro(void)
2039
-{
2040
- struct module *mod;
2041
-
2042
- if (!rodata_enabled)
2043
- return;
2044
-
2045
- mutex_lock(&module_mutex);
2046
- list_for_each_entry_rcu(mod, &modules, list) {
2047
- /*
2048
- * Ignore going modules since it's possible that ro
2049
- * protection has already been disabled, otherwise we'll
2050
- * run into protection faults at module deallocation.
2051
- */
2052
- if (mod->state == MODULE_STATE_UNFORMED ||
2053
- mod->state == MODULE_STATE_GOING)
2054
- continue;
2055
-
2056
- frob_text(&mod->core_layout, set_memory_ro);
2057
- frob_text(&mod->init_layout, set_memory_ro);
2058
- }
2059
- mutex_unlock(&module_mutex);
2060
-}
2061
-
2062
-static void disable_ro_nx(const struct module_layout *layout)
2063
-{
2064
- if (rodata_enabled) {
2065
- frob_text(layout, set_memory_rw);
2066
- frob_rodata(layout, set_memory_rw);
2067
- frob_ro_after_init(layout, set_memory_rw);
2068
- }
2069
- frob_rodata(layout, set_memory_x);
2070
- frob_ro_after_init(layout, set_memory_x);
2071
- frob_writable_data(layout, set_memory_x);
2114
+ return 0;
20722115 }
20732116
20742117 #else /* !CONFIG_STRICT_MODULE_RWX */
2075
-static void disable_ro_nx(const struct module_layout *layout) { }
20762118 static void module_enable_nx(const struct module *mod) { }
2077
-static void module_disable_nx(const struct module *mod) { }
2078
-#endif /* CONFIG_STRICT_MODULE_RWX */
2079
-
2080
-static void module_enable_x(const struct module *mod)
2119
+static void module_enable_ro(const struct module *mod, bool after_init) {}
2120
+static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2121
+ char *secstrings, struct module *mod)
20812122 {
2082
- frob_text(&mod->core_layout, set_memory_x);
2083
- frob_text(&mod->init_layout, set_memory_x);
2123
+ return 0;
20842124 }
2085
-#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
2086
-static void disable_ro_nx(const struct module_layout *layout) { }
2087
-static void module_enable_nx(const struct module *mod) { }
2088
-static void module_disable_nx(const struct module *mod) { }
2089
-static void module_enable_x(const struct module *mod) { }
2090
-#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
2125
+#endif /* CONFIG_STRICT_MODULE_RWX */
20912126
20922127 #ifdef CONFIG_LIVEPATCH
20932128 /*
....@@ -2166,6 +2201,11 @@
21662201
21672202 void __weak module_memfree(void *module_region)
21682203 {
2204
+ /*
2205
+ * This memory may be RO, and freeing RO memory in an interrupt is not
2206
+ * supported by vmalloc.
2207
+ */
2208
+ WARN_ON(in_interrupt());
21692209 vfree(module_region);
21702210 }
21712211
....@@ -2215,16 +2255,18 @@
22152255 /* Remove this module from bug list, this uses list_del_rcu */
22162256 module_bug_cleanup(mod);
22172257 /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
2218
- synchronize_sched();
2258
+ synchronize_rcu();
22192259 mutex_unlock(&module_mutex);
2220
-
2221
- /* This may be empty, but that's OK */
2222
- disable_ro_nx(&mod->init_layout);
22232260
22242261 /* Clean up CFI for the module. */
22252262 cfi_cleanup(mod);
22262263
2264
+ /* This may be empty, but that's OK */
22272265 module_arch_freeing_init(mod);
2266
+ trace_android_vh_set_memory_rw((unsigned long)mod->init_layout.base,
2267
+ (mod->init_layout.size)>>PAGE_SHIFT);
2268
+ trace_android_vh_set_memory_nx((unsigned long)mod->init_layout.base,
2269
+ (mod->init_layout.size)>>PAGE_SHIFT);
22282270 module_memfree(mod->init_layout.base);
22292271 kfree(mod->args);
22302272 percpu_modfree(mod);
....@@ -2233,7 +2275,10 @@
22332275 lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
22342276
22352277 /* Finally, free the core (containing the module structure) */
2236
- disable_ro_nx(&mod->core_layout);
2278
+ trace_android_vh_set_memory_rw((unsigned long)mod->core_layout.base,
2279
+ (mod->core_layout.size)>>PAGE_SHIFT);
2280
+ trace_android_vh_set_memory_nx((unsigned long)mod->core_layout.base,
2281
+ (mod->core_layout.size)>>PAGE_SHIFT);
22372282 module_memfree(mod->core_layout.base);
22382283 }
22392284
....@@ -2252,13 +2297,22 @@
22522297 }
22532298 EXPORT_SYMBOL_GPL(__symbol_get);
22542299
2300
+static bool module_init_layout_section(const char *sname)
2301
+{
2302
+#ifndef CONFIG_MODULE_UNLOAD
2303
+ if (module_exit_section(sname))
2304
+ return true;
2305
+#endif
2306
+ return module_init_section(sname);
2307
+}
2308
+
22552309 /*
22562310 * Ensure that an exported symbol [global namespace] does not already exist
22572311 * in the kernel or in some other module's exported symbol table.
22582312 *
22592313 * You must hold the module_mutex.
22602314 */
2261
-static int verify_export_symbols(struct module *mod)
2315
+static int verify_exported_symbols(struct module *mod)
22622316 {
22632317 unsigned int i;
22642318 struct module *owner;
....@@ -2393,11 +2447,13 @@
23932447 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
23942448 continue;
23952449
2396
- /* Livepatch relocation sections are applied by livepatch */
23972450 if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
2398
- continue;
2399
-
2400
- if (info->sechdrs[i].sh_type == SHT_REL)
2451
+ err = klp_apply_section_relocs(mod, info->sechdrs,
2452
+ info->secstrings,
2453
+ info->strtab,
2454
+ info->index.sym, i,
2455
+ NULL);
2456
+ else if (info->sechdrs[i].sh_type == SHT_REL)
24012457 err = apply_relocate(info->sechdrs, info->strtab,
24022458 info->index.sym, i, mod);
24032459 else if (info->sechdrs[i].sh_type == SHT_RELA)
....@@ -2459,7 +2515,7 @@
24592515 if ((s->sh_flags & masks[m][0]) != masks[m][0]
24602516 || (s->sh_flags & masks[m][1])
24612517 || s->sh_entsize != ~0UL
2462
- || strstarts(sname, ".init"))
2518
+ || module_init_layout_section(sname))
24632519 continue;
24642520 s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i);
24652521 pr_debug("\t%s\n", sname);
....@@ -2492,7 +2548,7 @@
24922548 if ((s->sh_flags & masks[m][0]) != masks[m][0]
24932549 || (s->sh_flags & masks[m][1])
24942550 || s->sh_entsize != ~0UL
2495
- || !strstarts(sname, ".init"))
2551
+ || !module_init_layout_section(sname))
24962552 continue;
24972553 s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i)
24982554 | INIT_OFFSET_MASK);
....@@ -2554,7 +2610,8 @@
25542610 return string;
25552611 }
25562612
2557
-static char *get_modinfo(struct load_info *info, const char *tag)
2613
+static char *get_next_modinfo(const struct load_info *info, const char *tag,
2614
+ char *prev)
25582615 {
25592616 char *p;
25602617 unsigned int taglen = strlen(tag);
....@@ -2565,11 +2622,23 @@
25652622 * get_modinfo() calls made before rewrite_section_headers()
25662623 * must use sh_offset, as sh_addr isn't set!
25672624 */
2568
- for (p = (char *)info->hdr + infosec->sh_offset; p; p = next_string(p, &size)) {
2625
+ char *modinfo = (char *)info->hdr + infosec->sh_offset;
2626
+
2627
+ if (prev) {
2628
+ size -= prev - modinfo;
2629
+ modinfo = next_string(prev, &size);
2630
+ }
2631
+
2632
+ for (p = modinfo; p; p = next_string(p, &size)) {
25692633 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
25702634 return p + taglen + 1;
25712635 }
25722636 return NULL;
2637
+}
2638
+
2639
+static char *get_modinfo(const struct load_info *info, const char *tag)
2640
+{
2641
+ return get_next_modinfo(info, tag, NULL);
25732642 }
25742643
25752644 static void setup_modinfo(struct module *mod, struct load_info *info)
....@@ -2596,10 +2665,10 @@
25962665
25972666 #ifdef CONFIG_KALLSYMS
25982667
2599
-/* lookup symbol in given range of kernel_symbols */
2600
-static const struct kernel_symbol *lookup_symbol(const char *name,
2601
- const struct kernel_symbol *start,
2602
- const struct kernel_symbol *stop)
2668
+/* Lookup exported symbol in given range of kernel_symbols */
2669
+static const struct kernel_symbol *lookup_exported_symbol(const char *name,
2670
+ const struct kernel_symbol *start,
2671
+ const struct kernel_symbol *stop)
26032672 {
26042673 return bsearch(name, start, stop - start,
26052674 sizeof(struct kernel_symbol), cmp_name);
....@@ -2610,9 +2679,10 @@
26102679 {
26112680 const struct kernel_symbol *ks;
26122681 if (!mod)
2613
- ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
2682
+ ks = lookup_exported_symbol(name, __start___ksymtab, __stop___ksymtab);
26142683 else
2615
- ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
2684
+ ks = lookup_exported_symbol(name, mod->syms, mod->syms + mod->num_syms);
2685
+
26162686 return ks != NULL && kernel_symbol_value(ks) == value;
26172687 }
26182688
....@@ -2720,6 +2790,8 @@
27202790 info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1);
27212791 info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym);
27222792 mod->core_layout.size += strtab_size;
2793
+ info->core_typeoffs = mod->core_layout.size;
2794
+ mod->core_layout.size += ndst * sizeof(char);
27232795 mod->core_layout.size = debug_align(mod->core_layout.size);
27242796
27252797 /* Put string table section at end of init part of module. */
....@@ -2733,6 +2805,8 @@
27332805 __alignof__(struct mod_kallsyms));
27342806 info->mod_kallsyms_init_off = mod->init_layout.size;
27352807 mod->init_layout.size += sizeof(struct mod_kallsyms);
2808
+ info->init_typeoffs = mod->init_layout.size;
2809
+ mod->init_layout.size += nsrc * sizeof(char);
27362810 mod->init_layout.size = debug_align(mod->init_layout.size);
27372811 }
27382812
....@@ -2756,20 +2830,23 @@
27562830 mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
27572831 /* Make sure we get permanent strtab: don't use info->strtab. */
27582832 mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2833
+ mod->kallsyms->typetab = mod->init_layout.base + info->init_typeoffs;
27592834
2760
- /* Set types up while we still have access to sections. */
2761
- for (i = 0; i < mod->kallsyms->num_symtab; i++)
2762
- mod->kallsyms->symtab[i].st_info
2763
- = elf_type(&mod->kallsyms->symtab[i], info);
2764
-
2765
- /* Now populate the cut down core kallsyms for after init. */
2835
+ /*
2836
+ * Now populate the cut down core kallsyms for after init
2837
+ * and set types up while we still have access to sections.
2838
+ */
27662839 mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs;
27672840 mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs;
2841
+ mod->core_kallsyms.typetab = mod->core_layout.base + info->core_typeoffs;
27682842 src = mod->kallsyms->symtab;
27692843 for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
2844
+ mod->kallsyms->typetab[i] = elf_type(src + i, info);
27702845 if (i == 0 || is_livepatch_module(mod) ||
27712846 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
27722847 info->index.pcpu)) {
2848
+ mod->core_kallsyms.typetab[ndst] =
2849
+ mod->kallsyms->typetab[i];
27732850 dst[ndst] = src[i];
27742851 dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
27752852 s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
....@@ -2792,11 +2869,7 @@
27922869 {
27932870 if (!debug)
27942871 return;
2795
-#ifdef CONFIG_DYNAMIC_DEBUG
2796
- if (ddebug_add_module(debug, num, mod->name))
2797
- pr_err("dynamic debug error adding module: %s\n",
2798
- debug->modname);
2799
-#endif
2872
+ ddebug_add_module(debug, num, mod->name);
28002873 }
28012874
28022875 static void dynamic_debug_remove(struct module *mod, struct _ddebug *debug)
....@@ -2807,7 +2880,19 @@
28072880
28082881 void * __weak module_alloc(unsigned long size)
28092882 {
2810
- return vmalloc_exec(size);
2883
+ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
2884
+ GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
2885
+ NUMA_NO_NODE, __builtin_return_address(0));
2886
+}
2887
+
2888
+bool __weak module_init_section(const char *name)
2889
+{
2890
+ return strstarts(name, ".init");
2891
+}
2892
+
2893
+bool __weak module_exit_section(const char *name)
2894
+{
2895
+ return strstarts(name, ".exit");
28112896 }
28122897
28132898 #ifdef CONFIG_DEBUG_KMEMLEAK
....@@ -2840,8 +2925,9 @@
28402925 #ifdef CONFIG_MODULE_SIG
28412926 static int module_sig_check(struct load_info *info, int flags)
28422927 {
2843
- int err = -ENOKEY;
2928
+ int err = -ENODATA;
28442929 const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
2930
+ const char *reason;
28452931 const void *mod = info->hdr;
28462932
28472933 /*
....@@ -2856,16 +2942,39 @@
28562942 err = mod_verify_sig(mod, info);
28572943 }
28582944
2859
- if (!err) {
2945
+ switch (err) {
2946
+ case 0:
28602947 info->sig_ok = true;
28612948 return 0;
2949
+
2950
+ /* We don't permit modules to be loaded into trusted kernels
2951
+ * without a valid signature on them, but if we're not
2952
+ * enforcing, certain errors are non-fatal.
2953
+ */
2954
+ case -ENODATA:
2955
+ reason = "unsigned module";
2956
+ break;
2957
+ case -ENOPKG:
2958
+ reason = "module with unsupported crypto";
2959
+ break;
2960
+ case -ENOKEY:
2961
+ reason = "module with unavailable key";
2962
+ break;
2963
+
2964
+ /* All other errors are fatal, including nomem, unparseable
2965
+ * signatures and signature check failures - even if signatures
2966
+ * aren't required.
2967
+ */
2968
+ default:
2969
+ return err;
28622970 }
28632971
2864
- /* Not having a signature is only an error if we're strict. */
2865
- if (err == -ENOKEY && !is_module_sig_enforced())
2866
- err = 0;
2972
+ if (is_module_sig_enforced()) {
2973
+ pr_notice("Loading of %s is rejected\n", reason);
2974
+ return -EKEYREJECTED;
2975
+ }
28672976
2868
- return err;
2977
+ return security_locked_down(LOCKDOWN_MODULE_SIGNATURE);
28692978 }
28702979 #else /* !CONFIG_MODULE_SIG */
28712980 static int module_sig_check(struct load_info *info, int flags)
....@@ -2874,9 +2983,33 @@
28742983 }
28752984 #endif /* !CONFIG_MODULE_SIG */
28762985
2877
-/* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2878
-static int elf_header_check(struct load_info *info)
2986
+static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr)
28792987 {
2988
+ unsigned long secend;
2989
+
2990
+ /*
2991
+ * Check for both overflow and offset/size being
2992
+ * too large.
2993
+ */
2994
+ secend = shdr->sh_offset + shdr->sh_size;
2995
+ if (secend < shdr->sh_offset || secend > info->len)
2996
+ return -ENOEXEC;
2997
+
2998
+ return 0;
2999
+}
3000
+
3001
+/*
3002
+ * Sanity checks against invalid binaries, wrong arch, weird elf version.
3003
+ *
3004
+ * Also do basic validity checks against section offsets and sizes, the
3005
+ * section name string table, and the indices used for it (sh_name).
3006
+ */
3007
+static int elf_validity_check(struct load_info *info)
3008
+{
3009
+ unsigned int i;
3010
+ Elf_Shdr *shdr, *strhdr;
3011
+ int err;
3012
+
28803013 if (info->len < sizeof(*(info->hdr)))
28813014 return -ENOEXEC;
28823015
....@@ -2886,10 +3019,77 @@
28863019 || info->hdr->e_shentsize != sizeof(Elf_Shdr))
28873020 return -ENOEXEC;
28883021
3022
+ /*
3023
+ * e_shnum is 16 bits, and sizeof(Elf_Shdr) is
3024
+ * known and small. So e_shnum * sizeof(Elf_Shdr)
3025
+ * will not overflow unsigned long on any platform.
3026
+ */
28893027 if (info->hdr->e_shoff >= info->len
28903028 || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
28913029 info->len - info->hdr->e_shoff))
28923030 return -ENOEXEC;
3031
+
3032
+ info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
3033
+
3034
+ /*
3035
+ * Verify if the section name table index is valid.
3036
+ */
3037
+ if (info->hdr->e_shstrndx == SHN_UNDEF
3038
+ || info->hdr->e_shstrndx >= info->hdr->e_shnum)
3039
+ return -ENOEXEC;
3040
+
3041
+ strhdr = &info->sechdrs[info->hdr->e_shstrndx];
3042
+ err = validate_section_offset(info, strhdr);
3043
+ if (err < 0)
3044
+ return err;
3045
+
3046
+ /*
3047
+ * The section name table must be NUL-terminated, as required
3048
+ * by the spec. This makes strcmp and pr_* calls that access
3049
+ * strings in the section safe.
3050
+ */
3051
+ info->secstrings = (void *)info->hdr + strhdr->sh_offset;
3052
+ if (info->secstrings[strhdr->sh_size - 1] != '\0')
3053
+ return -ENOEXEC;
3054
+
3055
+ /*
3056
+ * The code assumes that section 0 has a length of zero and
3057
+ * an addr of zero, so check for it.
3058
+ */
3059
+ if (info->sechdrs[0].sh_type != SHT_NULL
3060
+ || info->sechdrs[0].sh_size != 0
3061
+ || info->sechdrs[0].sh_addr != 0)
3062
+ return -ENOEXEC;
3063
+
3064
+ for (i = 1; i < info->hdr->e_shnum; i++) {
3065
+ shdr = &info->sechdrs[i];
3066
+ switch (shdr->sh_type) {
3067
+ case SHT_NULL:
3068
+ case SHT_NOBITS:
3069
+ continue;
3070
+ case SHT_SYMTAB:
3071
+ if (shdr->sh_link == SHN_UNDEF
3072
+ || shdr->sh_link >= info->hdr->e_shnum)
3073
+ return -ENOEXEC;
3074
+ fallthrough;
3075
+ default:
3076
+ err = validate_section_offset(info, shdr);
3077
+ if (err < 0) {
3078
+ pr_err("Invalid ELF section in module (section %u type %u)\n",
3079
+ i, shdr->sh_type);
3080
+ return err;
3081
+ }
3082
+
3083
+ if (shdr->sh_flags & SHF_ALLOC) {
3084
+ if (shdr->sh_name >= strhdr->sh_size) {
3085
+ pr_err("Invalid ELF section name in module (section %u type %u)\n",
3086
+ i, shdr->sh_type);
3087
+ return -ENOEXEC;
3088
+ }
3089
+ }
3090
+ break;
3091
+ }
3092
+ }
28933093
28943094 return 0;
28953095 }
....@@ -2955,22 +3155,27 @@
29553155 if (info->len < sizeof(*(info->hdr)))
29563156 return -ENOEXEC;
29573157
2958
- err = security_kernel_load_data(LOADING_MODULE);
3158
+ err = security_kernel_load_data(LOADING_MODULE, true);
29593159 if (err)
29603160 return err;
29613161
29623162 /* Suck in entire file: we'll want most of it. */
2963
- info->hdr = __vmalloc(info->len,
2964
- GFP_KERNEL | __GFP_NOWARN, PAGE_KERNEL);
3163
+ info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN);
29653164 if (!info->hdr)
29663165 return -ENOMEM;
29673166
29683167 if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) {
2969
- vfree(info->hdr);
2970
- return -EFAULT;
3168
+ err = -EFAULT;
3169
+ goto out;
29713170 }
29723171
2973
- return 0;
3172
+ err = security_kernel_post_load_data((char *)info->hdr, info->len,
3173
+ LOADING_MODULE, "init_module");
3174
+out:
3175
+ if (err)
3176
+ vfree(info->hdr);
3177
+
3178
+ return err;
29743179 }
29753180
29763181 static void free_copy(struct load_info *info)
....@@ -2987,21 +3192,11 @@
29873192
29883193 for (i = 1; i < info->hdr->e_shnum; i++) {
29893194 Elf_Shdr *shdr = &info->sechdrs[i];
2990
- if (shdr->sh_type != SHT_NOBITS
2991
- && info->len < shdr->sh_offset + shdr->sh_size) {
2992
- pr_err("Module len %lu truncated\n", info->len);
2993
- return -ENOEXEC;
2994
- }
29953195
29963196 /* Mark all sections sh_addr with their address in the
29973197 temporary image. */
29983198 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
29993199
3000
-#ifndef CONFIG_MODULE_UNLOAD
3001
- /* Don't load .exit sections */
3002
- if (strstarts(info->secstrings+shdr->sh_name, ".exit"))
3003
- shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
3004
-#endif
30053200 }
30063201
30073202 /* Track but don't keep modinfo and version sections. */
....@@ -3022,11 +3217,6 @@
30223217 static int setup_load_info(struct load_info *info, int flags)
30233218 {
30243219 unsigned int i;
3025
-
3026
- /* Set up the convenience variables */
3027
- info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
3028
- info->secstrings = (void *)info->hdr
3029
- + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
30303220
30313221 /* Try to find a name early so we can log errors with a module name */
30323222 info->index.info = find_sec(info, ".modinfo");
....@@ -3164,10 +3354,23 @@
31643354 }
31653355 #endif
31663356
3357
+ mod->noinstr_text_start = section_objs(info, ".noinstr.text", 1,
3358
+ &mod->noinstr_text_size);
3359
+
31673360 #ifdef CONFIG_TRACEPOINTS
31683361 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
31693362 sizeof(*mod->tracepoints_ptrs),
31703363 &mod->num_tracepoints);
3364
+#endif
3365
+#ifdef CONFIG_TREE_SRCU
3366
+ mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs",
3367
+ sizeof(*mod->srcu_struct_ptrs),
3368
+ &mod->num_srcu_structs);
3369
+#endif
3370
+#ifdef CONFIG_BPF_EVENTS
3371
+ mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map",
3372
+ sizeof(*mod->bpf_raw_events),
3373
+ &mod->num_bpf_raw_events);
31713374 #endif
31723375 #ifdef CONFIG_JUMP_LABEL
31733376 mod->jump_entries = section_objs(info, "__jump_table",
....@@ -3189,7 +3392,7 @@
31893392 #endif
31903393 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
31913394 /* sechdrs[0].sh_size is always zero */
3192
- mod->ftrace_callsites = section_objs(info, "__mcount_loc",
3395
+ mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION,
31933396 sizeof(*mod->ftrace_callsites),
31943397 &mod->num_ftrace_callsites);
31953398 #endif
....@@ -3198,13 +3401,25 @@
31983401 sizeof(*mod->ei_funcs),
31993402 &mod->num_ei_funcs);
32003403 #endif
3404
+#ifdef CONFIG_KPROBES
3405
+ mod->kprobes_text_start = section_objs(info, ".kprobes.text", 1,
3406
+ &mod->kprobes_text_size);
3407
+ mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist",
3408
+ sizeof(unsigned long),
3409
+ &mod->num_kprobe_blacklist);
3410
+#endif
3411
+#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
3412
+ mod->static_call_sites = section_objs(info, ".static_call_sites",
3413
+ sizeof(*mod->static_call_sites),
3414
+ &mod->num_static_call_sites);
3415
+#endif
32013416 mod->extable = section_objs(info, "__ex_table",
32023417 sizeof(*mod->extable), &mod->num_exentries);
32033418
32043419 if (section_addr(info, "__obsparm"))
32053420 pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
32063421
3207
- info->debug = section_objs(info, "__verbose",
3422
+ info->debug = section_objs(info, "__dyndbg",
32083423 sizeof(*info->debug), &info->num_debug);
32093424
32103425 return 0;
....@@ -3316,12 +3531,6 @@
33163531
33173532 static void flush_module_icache(const struct module *mod)
33183533 {
3319
- mm_segment_t old_fs;
3320
-
3321
- /* flush the icache in correct context */
3322
- old_fs = get_fs();
3323
- set_fs(KERNEL_DS);
3324
-
33253534 /*
33263535 * Flush the instruction cache, since we've played with text.
33273536 * Do it before processing of module parameters, so the module
....@@ -3333,8 +3542,6 @@
33333542 + mod->init_layout.size);
33343543 flush_icache_range((unsigned long)mod->core_layout.base,
33353544 (unsigned long)mod->core_layout.base + mod->core_layout.size);
3336
-
3337
- set_fs(old_fs);
33383545 }
33393546
33403547 int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
....@@ -3382,6 +3589,11 @@
33823589 if (err < 0)
33833590 return ERR_PTR(err);
33843591
3592
+ err = module_enforce_rwx_sections(info->hdr, info->sechdrs,
3593
+ info->secstrings, info->mod);
3594
+ if (err < 0)
3595
+ return ERR_PTR(err);
3596
+
33853597 /* We will do a special allocation for per-cpu sections later. */
33863598 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
33873599
....@@ -3391,6 +3603,15 @@
33913603 * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set.
33923604 */
33933605 ndx = find_sec(info, ".data..ro_after_init");
3606
+ if (ndx)
3607
+ info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
3608
+ /*
3609
+ * Mark the __jump_table section as ro_after_init as well: these data
3610
+ * structures are never modified, with the exception of entries that
3611
+ * refer to code in the __init section, which are annotated as such
3612
+ * at module load time.
3613
+ */
3614
+ ndx = find_sec(info, "__jump_table");
33943615 if (ndx)
33953616 info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
33963617
....@@ -3416,7 +3637,15 @@
34163637 {
34173638 percpu_modfree(mod);
34183639 module_arch_freeing_init(mod);
3640
+ trace_android_vh_set_memory_rw((unsigned long)mod->init_layout.base,
3641
+ (mod->init_layout.size)>>PAGE_SHIFT);
3642
+ trace_android_vh_set_memory_nx((unsigned long)mod->init_layout.base,
3643
+ (mod->init_layout.size)>>PAGE_SHIFT);
34193644 module_memfree(mod->init_layout.base);
3645
+ trace_android_vh_set_memory_rw((unsigned long)mod->core_layout.base,
3646
+ (mod->core_layout.size)>>PAGE_SHIFT);
3647
+ trace_android_vh_set_memory_nx((unsigned long)mod->core_layout.base,
3648
+ (mod->core_layout.size)>>PAGE_SHIFT);
34203649 module_memfree(mod->core_layout.base);
34213650 }
34223651
....@@ -3426,8 +3655,6 @@
34263655 {
34273656 return 0;
34283657 }
3429
-
3430
-static void cfi_init(struct module *mod);
34313658
34323659 static int post_relocation(struct module *mod, const struct load_info *info)
34333660 {
....@@ -3440,9 +3667,6 @@
34403667
34413668 /* Setup kallsyms-specific fields. */
34423669 add_kallsyms(mod, info);
3443
-
3444
- /* Setup CFI for the module. */
3445
- cfi_init(mod);
34463670
34473671 /* Arch-specific module finalizing. */
34483672 return module_finalize(info->hdr, info->sechdrs, mod);
....@@ -3481,15 +3705,24 @@
34813705
34823706 /* For freeing module_init on success, in case kallsyms traversing */
34833707 struct mod_initfree {
3484
- struct rcu_head rcu;
3708
+ struct llist_node node;
34853709 void *module_init;
34863710 };
34873711
3488
-static void do_free_init(struct rcu_head *head)
3712
+static void do_free_init(struct work_struct *w)
34893713 {
3490
- struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
3491
- module_memfree(m->module_init);
3492
- kfree(m);
3714
+ struct llist_node *pos, *n, *list;
3715
+ struct mod_initfree *initfree;
3716
+
3717
+ list = llist_del_all(&init_free_list);
3718
+
3719
+ synchronize_rcu();
3720
+
3721
+ llist_for_each_safe(pos, n, list) {
3722
+ initfree = container_of(pos, struct mod_initfree, node);
3723
+ module_memfree(initfree->module_init);
3724
+ kfree(initfree);
3725
+ }
34933726 }
34943727
34953728 /*
....@@ -3555,9 +3788,13 @@
35553788 rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
35563789 #endif
35573790 module_enable_ro(mod, true);
3791
+ trace_android_vh_set_module_permit_after_init(mod);
35583792 mod_tree_remove_init(mod);
3559
- disable_ro_nx(&mod->init_layout);
35603793 module_arch_freeing_init(mod);
3794
+ trace_android_vh_set_memory_rw((unsigned long)mod->init_layout.base,
3795
+ (mod->init_layout.size)>>PAGE_SHIFT);
3796
+ trace_android_vh_set_memory_nx((unsigned long)mod->init_layout.base,
3797
+ (mod->init_layout.size)>>PAGE_SHIFT);
35613798 mod->init_layout.base = NULL;
35623799 mod->init_layout.size = 0;
35633800 mod->init_layout.ro_size = 0;
....@@ -3566,15 +3803,19 @@
35663803 /*
35673804 * We want to free module_init, but be aware that kallsyms may be
35683805 * walking this with preempt disabled. In all the failure paths, we
3569
- * call synchronize_sched(), but we don't want to slow down the success
3570
- * path, so use actual RCU here.
3806
+ * call synchronize_rcu(), but we don't want to slow down the success
3807
+ * path. module_memfree() cannot be called in an interrupt, so do the
3808
+ * work and call synchronize_rcu() in a work queue.
3809
+ *
35713810 * Note that module_alloc() on most architectures creates W+X page
35723811 * mappings which won't be cleaned up until do_free_init() runs. Any
35733812 * code such as mark_rodata_ro() which depends on those mappings to
35743813 * be cleaned up needs to sync with the queued work - ie
3575
- * rcu_barrier_sched()
3814
+ * rcu_barrier()
35763815 */
3577
- call_rcu_sched(&freeinit->rcu, do_free_init);
3816
+ if (llist_add(&freeinit->node, &init_free_list))
3817
+ schedule_work(&init_free_wq);
3818
+
35783819 mutex_unlock(&module_mutex);
35793820 wake_up_all(&module_wq);
35803821
....@@ -3585,7 +3826,7 @@
35853826 fail:
35863827 /* Try to protect us from buggy refcounters. */
35873828 mod->state = MODULE_STATE_GOING;
3588
- synchronize_sched();
3829
+ synchronize_rcu();
35893830 module_put(mod);
35903831 blocking_notifier_call_chain(&module_notify_list,
35913832 MODULE_STATE_GOING, mod);
....@@ -3650,7 +3891,7 @@
36503891 mutex_lock(&module_mutex);
36513892
36523893 /* Find duplicate symbols (must be called under lock). */
3653
- err = verify_export_symbols(mod);
3894
+ err = verify_exported_symbols(mod);
36543895 if (err < 0)
36553896 goto out;
36563897
....@@ -3660,6 +3901,7 @@
36603901 module_enable_ro(mod, false);
36613902 module_enable_nx(mod);
36623903 module_enable_x(mod);
3904
+ trace_android_vh_set_module_permit_before_init(mod);
36633905
36643906 /* Mark state as coming so strong_try_module_get() ignores us,
36653907 * but kallsyms etc. can see us. */
....@@ -3682,9 +3924,13 @@
36823924 if (err)
36833925 return err;
36843926
3685
- blocking_notifier_call_chain(&module_notify_list,
3686
- MODULE_STATE_COMING, mod);
3687
- return 0;
3927
+ err = blocking_notifier_call_chain_robust(&module_notify_list,
3928
+ MODULE_STATE_COMING, MODULE_STATE_GOING, mod);
3929
+ err = notifier_to_errno(err);
3930
+ if (err)
3931
+ klp_module_going(mod);
3932
+
3933
+ return err;
36883934 }
36893935
36903936 static int unknown_module_param_cb(char *param, char *val, const char *modname,
....@@ -3705,6 +3951,8 @@
37053951 return 0;
37063952 }
37073953
3954
+static void cfi_init(struct module *mod);
3955
+
37083956 /* Allocate and load the module: note that size of section 0 is always
37093957 zero, and we rely on this for optional sections. */
37103958 static int load_module(struct load_info *info, const char __user *uargs,
....@@ -3714,22 +3962,49 @@
37143962 long err = 0;
37153963 char *after_dashes;
37163964
3717
- err = elf_header_check(info);
3965
+ /*
3966
+ * Do the signature check (if any) first. All that
3967
+ * the signature check needs is info->len, it does
3968
+ * not need any of the section info. That can be
3969
+ * set up later. This will minimize the chances
3970
+ * of a corrupt module causing problems before
3971
+ * we even get to the signature check.
3972
+ *
3973
+ * The check will also adjust info->len by stripping
3974
+ * off the sig length at the end of the module, making
3975
+ * checks against info->len more correct.
3976
+ */
3977
+ err = module_sig_check(info, flags);
37183978 if (err)
37193979 goto free_copy;
37203980
3981
+ /*
3982
+ * Do basic sanity checks against the ELF header and
3983
+ * sections.
3984
+ */
3985
+ err = elf_validity_check(info);
3986
+ if (err) {
3987
+ pr_err("Module has invalid ELF structures\n");
3988
+ goto free_copy;
3989
+ }
3990
+
3991
+ /*
3992
+ * Everything checks out, so set up the section info
3993
+ * in the info structure.
3994
+ */
37213995 err = setup_load_info(info, flags);
37223996 if (err)
37233997 goto free_copy;
37243998
3999
+ /*
4000
+ * Now that we know we have the correct module name, check
4001
+ * if it's blacklisted.
4002
+ */
37254003 if (blacklisted(info->name)) {
37264004 err = -EPERM;
4005
+ pr_err("Module %s is blacklisted\n", info->name);
37274006 goto free_copy;
37284007 }
3729
-
3730
- err = module_sig_check(info, flags);
3731
- if (err)
3732
- goto free_copy;
37334008
37344009 err = rewrite_section_headers(info, flags);
37354010 if (err)
....@@ -3805,6 +4080,9 @@
38054080
38064081 flush_module_icache(mod);
38074082
4083
+ /* Setup CFI for the module. */
4084
+ cfi_init(mod);
4085
+
38084086 /* Now copy in args */
38094087 mod->args = strndup_user(uargs, ~0UL >> 1);
38104088 if (IS_ERR(mod->args)) {
....@@ -3872,16 +4150,13 @@
38724150 module_bug_cleanup(mod);
38734151 mutex_unlock(&module_mutex);
38744152
3875
- /* we can't deallocate the module until we clear memory protection */
3876
- module_disable_ro(mod);
3877
- module_disable_nx(mod);
3878
-
38794153 ddebug_cleanup:
38804154 ftrace_release_mod(mod);
38814155 dynamic_debug_remove(mod, info->debug);
3882
- synchronize_sched();
4156
+ synchronize_rcu();
38834157 kfree(mod->args);
38844158 free_arch_cleanup:
4159
+ cfi_cleanup(mod);
38854160 module_arch_cleanup(mod);
38864161 free_modinfo:
38874162 free_modinfo(mod);
....@@ -3894,7 +4169,7 @@
38944169 mod_tree_remove(mod);
38954170 wake_up_all(&module_wq);
38964171 /* Wait for RCU-sched synchronizing before releasing mod->list. */
3897
- synchronize_sched();
4172
+ synchronize_rcu();
38984173 mutex_unlock(&module_mutex);
38994174 free_module:
39004175 /* Free lock-classes; relies on the preceding sync_rcu() */
....@@ -3929,8 +4204,7 @@
39294204 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
39304205 {
39314206 struct load_info info = { };
3932
- loff_t size;
3933
- void *hdr;
4207
+ void *hdr = NULL;
39344208 int err;
39354209
39364210 err = may_init_module();
....@@ -3943,12 +4217,12 @@
39434217 |MODULE_INIT_IGNORE_VERMAGIC))
39444218 return -EINVAL;
39454219
3946
- err = kernel_read_file_from_fd(fd, &hdr, &size, INT_MAX,
4220
+ err = kernel_read_file_from_fd(fd, 0, &hdr, INT_MAX, NULL,
39474221 READING_MODULE);
3948
- if (err)
4222
+ if (err < 0)
39494223 return err;
39504224 info.hdr = hdr;
3951
- info.len = size;
4225
+ info.len = err;
39524226
39534227 return load_module(&info, uargs, flags);
39544228 }
....@@ -3971,18 +4245,27 @@
39714245 && (str[2] == '\0' || str[2] == '.');
39724246 }
39734247
3974
-static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum)
4248
+static inline int is_cfi_typeid_symbol(const char *str)
4249
+{
4250
+ return !strncmp(str, "__typeid__", 10);
4251
+}
4252
+
4253
+static const char *kallsyms_symbol_name(struct mod_kallsyms *kallsyms, unsigned int symnum)
39754254 {
39764255 return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
39774256 }
39784257
3979
-static const char *get_ksymbol(struct module *mod,
3980
- unsigned long addr,
3981
- unsigned long *size,
3982
- unsigned long *offset)
4258
+/*
4259
+ * Given a module and address, find the corresponding symbol and return its name
4260
+ * while providing its size and offset if needed.
4261
+ */
4262
+static const char *find_kallsyms_symbol(struct module *mod,
4263
+ unsigned long addr,
4264
+ unsigned long *size,
4265
+ unsigned long *offset)
39834266 {
39844267 unsigned int i, best = 0;
3985
- unsigned long nextval;
4268
+ unsigned long nextval, bestval;
39864269 struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
39874270
39884271 /* At worse, next value is at end of module */
....@@ -3991,34 +4274,41 @@
39914274 else
39924275 nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size;
39934276
4277
+ bestval = kallsyms_symbol_value(&kallsyms->symtab[best]);
4278
+
39944279 /* Scan for closest preceding symbol, and next symbol. (ELF
39954280 starts real symbols at 1). */
39964281 for (i = 1; i < kallsyms->num_symtab; i++) {
3997
- if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
4282
+ const Elf_Sym *sym = &kallsyms->symtab[i];
4283
+ unsigned long thisval = kallsyms_symbol_value(sym);
4284
+
4285
+ if (sym->st_shndx == SHN_UNDEF)
39984286 continue;
39994287
40004288 /* We ignore unnamed symbols: they're uninformative
40014289 * and inserted at a whim. */
4002
- if (*symname(kallsyms, i) == '\0'
4003
- || is_arm_mapping_symbol(symname(kallsyms, i)))
4290
+ if (*kallsyms_symbol_name(kallsyms, i) == '\0'
4291
+ || is_arm_mapping_symbol(kallsyms_symbol_name(kallsyms, i))
4292
+ || is_cfi_typeid_symbol(kallsyms_symbol_name(kallsyms, i)))
40044293 continue;
40054294
4006
- if (kallsyms->symtab[i].st_value <= addr
4007
- && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value)
4295
+ if (thisval <= addr && thisval > bestval) {
40084296 best = i;
4009
- if (kallsyms->symtab[i].st_value > addr
4010
- && kallsyms->symtab[i].st_value < nextval)
4011
- nextval = kallsyms->symtab[i].st_value;
4297
+ bestval = thisval;
4298
+ }
4299
+ if (thisval > addr && thisval < nextval)
4300
+ nextval = thisval;
40124301 }
40134302
40144303 if (!best)
40154304 return NULL;
40164305
40174306 if (size)
4018
- *size = nextval - kallsyms->symtab[best].st_value;
4307
+ *size = nextval - bestval;
40194308 if (offset)
4020
- *offset = addr - kallsyms->symtab[best].st_value;
4021
- return symname(kallsyms, best);
4309
+ *offset = addr - bestval;
4310
+
4311
+ return kallsyms_symbol_name(kallsyms, best);
40224312 }
40234313
40244314 void * __weak dereference_module_function_descriptor(struct module *mod,
....@@ -4043,7 +4333,8 @@
40434333 if (mod) {
40444334 if (modname)
40454335 *modname = mod->name;
4046
- ret = get_ksymbol(mod, addr, size, offset);
4336
+
4337
+ ret = find_kallsyms_symbol(mod, addr, size, offset);
40474338 }
40484339 /* Make a copy in here where it's safe */
40494340 if (ret) {
....@@ -4066,9 +4357,10 @@
40664357 if (within_module(addr, mod)) {
40674358 const char *sym;
40684359
4069
- sym = get_ksymbol(mod, addr, NULL, NULL);
4360
+ sym = find_kallsyms_symbol(mod, addr, NULL, NULL);
40704361 if (!sym)
40714362 goto out;
4363
+
40724364 strlcpy(symname, sym, KSYM_NAME_LEN);
40734365 preempt_enable();
40744366 return 0;
....@@ -4091,7 +4383,7 @@
40914383 if (within_module(addr, mod)) {
40924384 const char *sym;
40934385
4094
- sym = get_ksymbol(mod, addr, size, offset);
4386
+ sym = find_kallsyms_symbol(mod, addr, size, offset);
40954387 if (!sym)
40964388 goto out;
40974389 if (modname)
....@@ -4120,9 +4412,11 @@
41204412 continue;
41214413 kallsyms = rcu_dereference_sched(mod->kallsyms);
41224414 if (symnum < kallsyms->num_symtab) {
4123
- *value = kallsyms->symtab[symnum].st_value;
4124
- *type = kallsyms->symtab[symnum].st_info;
4125
- strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN);
4415
+ const Elf_Sym *sym = &kallsyms->symtab[symnum];
4416
+
4417
+ *value = kallsyms_symbol_value(sym);
4418
+ *type = kallsyms->typetab[symnum];
4419
+ strlcpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN);
41264420 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
41274421 *exported = is_exported(name, *value, mod);
41284422 preempt_enable();
....@@ -4134,15 +4428,19 @@
41344428 return -ERANGE;
41354429 }
41364430
4137
-static unsigned long mod_find_symname(struct module *mod, const char *name)
4431
+/* Given a module and name of symbol, find and return the symbol's value */
4432
+static unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name)
41384433 {
41394434 unsigned int i;
41404435 struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
41414436
4142
- for (i = 0; i < kallsyms->num_symtab; i++)
4143
- if (strcmp(name, symname(kallsyms, i)) == 0 &&
4144
- kallsyms->symtab[i].st_shndx != SHN_UNDEF)
4145
- return kallsyms->symtab[i].st_value;
4437
+ for (i = 0; i < kallsyms->num_symtab; i++) {
4438
+ const Elf_Sym *sym = &kallsyms->symtab[i];
4439
+
4440
+ if (strcmp(name, kallsyms_symbol_name(kallsyms, i)) == 0 &&
4441
+ sym->st_shndx != SHN_UNDEF)
4442
+ return kallsyms_symbol_value(sym);
4443
+ }
41464444 return 0;
41474445 }
41484446
....@@ -4157,12 +4455,12 @@
41574455 preempt_disable();
41584456 if ((colon = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) {
41594457 if ((mod = find_module_all(name, colon - name, false)) != NULL)
4160
- ret = mod_find_symname(mod, colon+1);
4458
+ ret = find_kallsyms_symbol_value(mod, colon+1);
41614459 } else {
41624460 list_for_each_entry_rcu(mod, &modules, list) {
41634461 if (mod->state == MODULE_STATE_UNFORMED)
41644462 continue;
4165
- if ((ret = mod_find_symname(mod, name)) != 0)
4463
+ if ((ret = find_kallsyms_symbol_value(mod, name)) != 0)
41664464 break;
41674465 }
41684466 }
....@@ -4187,12 +4485,13 @@
41874485 if (mod->state == MODULE_STATE_UNFORMED)
41884486 continue;
41894487 for (i = 0; i < kallsyms->num_symtab; i++) {
4488
+ const Elf_Sym *sym = &kallsyms->symtab[i];
41904489
4191
- if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
4490
+ if (sym->st_shndx == SHN_UNDEF)
41924491 continue;
41934492
4194
- ret = fn(data, symname(kallsyms, i),
4195
- mod, kallsyms->symtab[i].st_value);
4493
+ ret = fn(data, kallsyms_symbol_name(kallsyms, i),
4494
+ mod, kallsyms_symbol_value(sym));
41964495 if (ret != 0)
41974496 return ret;
41984497 }
....@@ -4204,18 +4503,30 @@
42044503 static void cfi_init(struct module *mod)
42054504 {
42064505 #ifdef CONFIG_CFI_CLANG
4207
- preempt_disable();
4208
- mod->cfi_check =
4209
- (cfi_check_fn)mod_find_symname(mod, CFI_CHECK_FN_NAME);
4210
- preempt_enable();
4211
- cfi_module_add(mod, module_addr_min, module_addr_max);
4506
+ initcall_t *init;
4507
+ exitcall_t *exit;
4508
+
4509
+ rcu_read_lock_sched();
4510
+ mod->cfi_check = (cfi_check_fn)
4511
+ find_kallsyms_symbol_value(mod, "__cfi_check");
4512
+ init = (initcall_t *)
4513
+ find_kallsyms_symbol_value(mod, "__cfi_jt_init_module");
4514
+ exit = (exitcall_t *)
4515
+ find_kallsyms_symbol_value(mod, "__cfi_jt_cleanup_module");
4516
+ rcu_read_unlock_sched();
4517
+
4518
+ /* Fix init/exit functions to point to the CFI jump table */
4519
+ if (init) mod->init = *init;
4520
+ if (exit) mod->exit = *exit;
4521
+
4522
+ cfi_module_add(mod, module_addr_min);
42124523 #endif
42134524 }
42144525
42154526 static void cfi_cleanup(struct module *mod)
42164527 {
42174528 #ifdef CONFIG_CFI_CLANG
4218
- cfi_module_remove(mod, module_addr_min, module_addr_max);
4529
+ cfi_module_remove(mod, module_addr_min);
42194530 #endif
42204531 }
42214532
....@@ -4326,16 +4637,17 @@
43264637 return err;
43274638 }
43284639
4329
-static const struct file_operations proc_modules_operations = {
4330
- .open = modules_open,
4331
- .read = seq_read,
4332
- .llseek = seq_lseek,
4333
- .release = seq_release,
4640
+static const struct proc_ops modules_proc_ops = {
4641
+ .proc_flags = PROC_ENTRY_PERMANENT,
4642
+ .proc_open = modules_open,
4643
+ .proc_read = seq_read,
4644
+ .proc_lseek = seq_lseek,
4645
+ .proc_release = seq_release,
43344646 };
43354647
43364648 static int __init proc_modules_init(void)
43374649 {
4338
- proc_create("modules", 0, NULL, &proc_modules_operations);
4650
+ proc_create("modules", 0, NULL, &modules_proc_ops);
43394651 return 0;
43404652 }
43414653 module_init(proc_modules_init);
....@@ -4469,6 +4781,23 @@
44694781 pr_cont("\n");
44704782 }
44714783
4784
+#ifdef CONFIG_ANDROID_DEBUG_SYMBOLS
4785
+void android_debug_for_each_module(int (*fn)(const char *mod_name, void *mod_addr, void *data),
4786
+ void *data)
4787
+{
4788
+ struct module *module;
4789
+
4790
+ preempt_disable();
4791
+ list_for_each_entry_rcu(module, &modules, list) {
4792
+ if (fn(module->name, module->core_layout.base, data))
4793
+ goto out;
4794
+ }
4795
+out:
4796
+ preempt_enable();
4797
+}
4798
+EXPORT_SYMBOL_GPL(android_debug_for_each_module);
4799
+#endif
4800
+
44724801 #ifdef CONFIG_MODVERSIONS
44734802 /* Generate the signature for all relevant module structures here.
44744803 * If these change, we don't want to try to parse the module. */