hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/kernel/module.c
....@@ -1,24 +1,16 @@
1
+// SPDX-License-Identifier: GPL-2.0-or-later
12 /*
23 Copyright (C) 2002 Richard Henderson
34 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
45
5
- This program is free software; you can redistribute it and/or modify
6
- it under the terms of the GNU General Public License as published by
7
- the Free Software Foundation; either version 2 of the License, or
8
- (at your option) any later version.
9
-
10
- This program is distributed in the hope that it will be useful,
11
- but WITHOUT ANY WARRANTY; without even the implied warranty of
12
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13
- GNU General Public License for more details.
14
-
15
- You should have received a copy of the GNU General Public License
16
- along with this program; if not, write to the Free Software
17
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
186 */
7
+
8
+#define INCLUDE_VERMAGIC
9
+
1910 #include <linux/export.h>
2011 #include <linux/extable.h>
2112 #include <linux/moduleloader.h>
13
+#include <linux/module_signature.h>
2214 #include <linux/trace_events.h>
2315 #include <linux/init.h>
2416 #include <linux/kallsyms.h>
....@@ -26,6 +18,7 @@
2618 #include <linux/fs.h>
2719 #include <linux/sysfs.h>
2820 #include <linux/kernel.h>
21
+#include <linux/kernel_read_file.h>
2922 #include <linux/slab.h>
3023 #include <linux/vmalloc.h>
3124 #include <linux/elf.h>
....@@ -70,15 +63,24 @@
7063 #define CREATE_TRACE_POINTS
7164 #include <trace/events/module.h>
7265
66
+#undef CREATE_TRACE_POINTS
67
+#include <trace/hooks/module.h>
68
+#include <trace/hooks/memory.h>
69
+
7370 #ifndef ARCH_SHF_SMALL
7471 #define ARCH_SHF_SMALL 0
7572 #endif
7673
7774 /*
7875 * Modules' sections will be aligned on page boundaries
79
- * to ensure complete separation of code and data
76
+ * to ensure complete separation of code and data, but
77
+ * only when CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
8078 */
79
+#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
8180 # define debug_align(X) ALIGN(X, PAGE_SIZE)
81
+#else
82
+# define debug_align(X) (X)
83
+#endif
8284
8385 /* If this is set, the section belongs in the init part of the module */
8486 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
....@@ -90,8 +92,12 @@
9092 * 3) module_addr_min/module_addr_max.
9193 * (delete and add uses RCU list operations). */
9294 DEFINE_MUTEX(module_mutex);
93
-EXPORT_SYMBOL_GPL(module_mutex);
9495 static LIST_HEAD(modules);
96
+
97
+/* Work queue for freeing init sections in success case */
98
+static void do_free_init(struct work_struct *w);
99
+static DECLARE_WORK(init_free_wq, do_free_init);
100
+static LLIST_HEAD(init_free_list);
95101
96102 #ifdef CONFIG_MODULES_TREE_LOOKUP
97103
....@@ -216,7 +222,8 @@
216222 {
217223 struct module *mod;
218224
219
- list_for_each_entry_rcu(mod, &modules, list) {
225
+ list_for_each_entry_rcu(mod, &modules, list,
226
+ lockdep_is_held(&module_mutex)) {
220227 if (within_module(addr, mod))
221228 return mod;
222229 }
....@@ -454,7 +461,8 @@
454461 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
455462 return true;
456463
457
- list_for_each_entry_rcu(mod, &modules, list) {
464
+ list_for_each_entry_rcu(mod, &modules, list,
465
+ lockdep_is_held(&module_mutex)) {
458466 struct symsearch arr[] = {
459467 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
460468 NOT_GPL_ONLY, false },
....@@ -499,9 +507,9 @@
499507 enum mod_license license;
500508 };
501509
502
-static bool check_symbol(const struct symsearch *syms,
503
- struct module *owner,
504
- unsigned int symnum, void *data)
510
+static bool check_exported_symbol(const struct symsearch *syms,
511
+ struct module *owner,
512
+ unsigned int symnum, void *data)
505513 {
506514 struct find_symbol_arg *fsa = data;
507515
....@@ -552,17 +560,25 @@
552560 #endif
553561 }
554562
555
-static int cmp_name(const void *va, const void *vb)
563
+static const char *kernel_symbol_namespace(const struct kernel_symbol *sym)
556564 {
557
- const char *a;
558
- const struct kernel_symbol *b;
559
- a = va; b = vb;
560
- return strcmp(a, kernel_symbol_name(b));
565
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
566
+ if (!sym->namespace_offset)
567
+ return NULL;
568
+ return offset_to_ptr(&sym->namespace_offset);
569
+#else
570
+ return sym->namespace;
571
+#endif
561572 }
562573
563
-static bool find_symbol_in_section(const struct symsearch *syms,
564
- struct module *owner,
565
- void *data)
574
+static int cmp_name(const void *name, const void *sym)
575
+{
576
+ return strcmp(name, kernel_symbol_name(sym));
577
+}
578
+
579
+static bool find_exported_symbol_in_section(const struct symsearch *syms,
580
+ struct module *owner,
581
+ void *data)
566582 {
567583 struct find_symbol_arg *fsa = data;
568584 struct kernel_symbol *sym;
....@@ -570,13 +586,14 @@
570586 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
571587 sizeof(struct kernel_symbol), cmp_name);
572588
573
- if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
589
+ if (sym != NULL && check_exported_symbol(syms, owner,
590
+ sym - syms->start, data))
574591 return true;
575592
576593 return false;
577594 }
578595
579
-/* Find a symbol and return it, along with, (optional) crc and
596
+/* Find an exported symbol and return it, along with, (optional) crc and
580597 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
581598 static const struct kernel_symbol *find_symbol(const char *name,
582599 struct module **owner,
....@@ -591,7 +608,7 @@
591608 fsa.gplok = gplok;
592609 fsa.warn = warn;
593610
594
- if (each_symbol_section(find_symbol_in_section, &fsa)) {
611
+ if (each_symbol_section(find_exported_symbol_in_section, &fsa)) {
595612 if (owner)
596613 *owner = fsa.owner;
597614 if (crc)
....@@ -616,7 +633,8 @@
616633
617634 module_assert_mutex_or_preempt();
618635
619
- list_for_each_entry_rcu(mod, &modules, list) {
636
+ list_for_each_entry_rcu(mod, &modules, list,
637
+ lockdep_is_held(&module_mutex)) {
620638 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
621639 continue;
622640 if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
....@@ -630,7 +648,6 @@
630648 module_assert_mutex();
631649 return find_module_all(name, strlen(name), false);
632650 }
633
-EXPORT_SYMBOL_GPL(find_module);
634651
635652 #ifdef CONFIG_SMP
636653
....@@ -796,6 +813,7 @@
796813
797814 MODINFO_ATTR(version);
798815 MODINFO_ATTR(srcversion);
816
+MODINFO_ATTR(scmversion);
799817
800818 static char last_unloaded_module[MODULE_NAME_LEN+1];
801819
....@@ -1258,6 +1276,7 @@
12581276 &module_uevent,
12591277 &modinfo_version,
12601278 &modinfo_srcversion,
1279
+ &modinfo_scmversion,
12611280 &modinfo_initstate,
12621281 &modinfo_coresize,
12631282 &modinfo_initsize,
....@@ -1388,6 +1407,59 @@
13881407 }
13891408 #endif /* CONFIG_MODVERSIONS */
13901409
1410
+static char *get_modinfo(const struct load_info *info, const char *tag);
1411
+static char *get_next_modinfo(const struct load_info *info, const char *tag,
1412
+ char *prev);
1413
+
1414
+static int verify_namespace_is_imported(const struct load_info *info,
1415
+ const struct kernel_symbol *sym,
1416
+ struct module *mod)
1417
+{
1418
+ const char *namespace;
1419
+ char *imported_namespace;
1420
+
1421
+ namespace = kernel_symbol_namespace(sym);
1422
+ if (namespace && namespace[0]) {
1423
+ imported_namespace = get_modinfo(info, "import_ns");
1424
+ while (imported_namespace) {
1425
+ if (strcmp(namespace, imported_namespace) == 0)
1426
+ return 0;
1427
+ imported_namespace = get_next_modinfo(
1428
+ info, "import_ns", imported_namespace);
1429
+ }
1430
+#ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
1431
+ pr_warn(
1432
+#else
1433
+ pr_err(
1434
+#endif
1435
+ "%s: module uses symbol (%s) from namespace %s, but does not import it.\n",
1436
+ mod->name, kernel_symbol_name(sym), namespace);
1437
+#ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
1438
+ return -EINVAL;
1439
+#endif
1440
+ }
1441
+ return 0;
1442
+}
1443
+
1444
+static bool inherit_taint(struct module *mod, struct module *owner)
1445
+{
1446
+ if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints))
1447
+ return true;
1448
+
1449
+ if (mod->using_gplonly_symbols) {
1450
+ pr_err("%s: module using GPL-only symbols uses symbols from proprietary module %s.\n",
1451
+ mod->name, owner->name);
1452
+ return false;
1453
+ }
1454
+
1455
+ if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) {
1456
+ pr_warn("%s: module uses symbols from proprietary module %s, inheriting taint.\n",
1457
+ mod->name, owner->name);
1458
+ set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints);
1459
+ }
1460
+ return true;
1461
+}
1462
+
13911463 /* Resolve a symbol for this module. I.e. if we find one, record usage. */
13921464 static const struct kernel_symbol *resolve_symbol(struct module *mod,
13931465 const struct load_info *info,
....@@ -1412,8 +1484,22 @@
14121484 if (!sym)
14131485 goto unlock;
14141486
1487
+ if (license == GPL_ONLY)
1488
+ mod->using_gplonly_symbols = true;
1489
+
1490
+ if (!inherit_taint(mod, owner)) {
1491
+ sym = NULL;
1492
+ goto getname;
1493
+ }
1494
+
14151495 if (!check_version(info, name, mod, crc)) {
14161496 sym = ERR_PTR(-EINVAL);
1497
+ goto getname;
1498
+ }
1499
+
1500
+ err = verify_namespace_is_imported(info, sym, mod);
1501
+ if (err) {
1502
+ sym = ERR_PTR(err);
14171503 goto getname;
14181504 }
14191505
....@@ -1469,7 +1555,7 @@
14691555 struct module_sect_attrs {
14701556 struct attribute_group grp;
14711557 unsigned int nsections;
1472
- struct module_sect_attr attrs[0];
1558
+ struct module_sect_attr attrs[];
14731559 };
14741560
14751561 #define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4))
....@@ -1582,7 +1668,7 @@
15821668 struct module_notes_attrs {
15831669 struct kobject *dir;
15841670 unsigned int notes;
1585
- struct bin_attribute attrs[0];
1671
+ struct bin_attribute attrs[];
15861672 };
15871673
15881674 static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
....@@ -1915,7 +2001,6 @@
19152001 mod_sysfs_fini(mod);
19162002 }
19172003
1918
-#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
19192004 /*
19202005 * LKM RO/NX protection: protect module's text/ro-data
19212006 * from modification and any data from execution.
....@@ -1929,6 +2014,14 @@
19292014 *
19302015 * These values are always page-aligned (as is base)
19312016 */
2017
+
2018
+/*
2019
+ * Since some arches are moving towards PAGE_KERNEL module allocations instead
2020
+ * of PAGE_KERNEL_EXEC, keep frob_text() and module_enable_x() outside of the
2021
+ * CONFIG_STRICT_MODULE_RWX block below because they are needed regardless of
2022
+ * whether we are strict.
2023
+ */
2024
+#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
19322025 static void frob_text(const struct module_layout *layout,
19332026 int (*set_memory)(unsigned long start, int num_pages))
19342027 {
....@@ -1937,6 +2030,15 @@
19372030 set_memory((unsigned long)layout->base,
19382031 layout->text_size >> PAGE_SHIFT);
19392032 }
2033
+
2034
+static void module_enable_x(const struct module *mod)
2035
+{
2036
+ frob_text(&mod->core_layout, set_memory_x);
2037
+ frob_text(&mod->init_layout, set_memory_x);
2038
+}
2039
+#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
2040
+static void module_enable_x(const struct module *mod) { }
2041
+#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
19402042
19412043 #ifdef CONFIG_STRICT_MODULE_RWX
19422044 static void frob_rodata(const struct module_layout *layout,
....@@ -1969,24 +2071,13 @@
19692071 (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT);
19702072 }
19712073
1972
-/* livepatching wants to disable read-only so it can frob module. */
1973
-void module_disable_ro(const struct module *mod)
2074
+static void module_enable_ro(const struct module *mod, bool after_init)
19742075 {
19752076 if (!rodata_enabled)
19762077 return;
19772078
1978
- frob_text(&mod->core_layout, set_memory_rw);
1979
- frob_rodata(&mod->core_layout, set_memory_rw);
1980
- frob_ro_after_init(&mod->core_layout, set_memory_rw);
1981
- frob_text(&mod->init_layout, set_memory_rw);
1982
- frob_rodata(&mod->init_layout, set_memory_rw);
1983
-}
1984
-
1985
-void module_enable_ro(const struct module *mod, bool after_init)
1986
-{
1987
- if (!rodata_enabled)
1988
- return;
1989
-
2079
+ set_vm_flush_reset_perms(mod->core_layout.base);
2080
+ set_vm_flush_reset_perms(mod->init_layout.base);
19902081 frob_text(&mod->core_layout, set_memory_ro);
19912082
19922083 frob_rodata(&mod->core_layout, set_memory_ro);
....@@ -2006,88 +2097,32 @@
20062097 frob_writable_data(&mod->init_layout, set_memory_nx);
20072098 }
20082099
2009
-static void module_disable_nx(const struct module *mod)
2100
+static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2101
+ char *secstrings, struct module *mod)
20102102 {
2011
- frob_rodata(&mod->core_layout, set_memory_x);
2012
- frob_ro_after_init(&mod->core_layout, set_memory_x);
2013
- frob_writable_data(&mod->core_layout, set_memory_x);
2014
- frob_rodata(&mod->init_layout, set_memory_x);
2015
- frob_writable_data(&mod->init_layout, set_memory_x);
2016
-}
2103
+ const unsigned long shf_wx = SHF_WRITE|SHF_EXECINSTR;
2104
+ int i;
20172105
2018
-/* Iterate through all modules and set each module's text as RW */
2019
-void set_all_modules_text_rw(void)
2020
-{
2021
- struct module *mod;
2022
-
2023
- if (!rodata_enabled)
2024
- return;
2025
-
2026
- mutex_lock(&module_mutex);
2027
- list_for_each_entry_rcu(mod, &modules, list) {
2028
- if (mod->state == MODULE_STATE_UNFORMED)
2029
- continue;
2030
-
2031
- frob_text(&mod->core_layout, set_memory_rw);
2032
- frob_text(&mod->init_layout, set_memory_rw);
2106
+ for (i = 0; i < hdr->e_shnum; i++) {
2107
+ if ((sechdrs[i].sh_flags & shf_wx) == shf_wx) {
2108
+ pr_err("%s: section %s (index %d) has invalid WRITE|EXEC flags\n",
2109
+ mod->name, secstrings + sechdrs[i].sh_name, i);
2110
+ return -ENOEXEC;
2111
+ }
20332112 }
2034
- mutex_unlock(&module_mutex);
2035
-}
20362113
2037
-/* Iterate through all modules and set each module's text as RO */
2038
-void set_all_modules_text_ro(void)
2039
-{
2040
- struct module *mod;
2041
-
2042
- if (!rodata_enabled)
2043
- return;
2044
-
2045
- mutex_lock(&module_mutex);
2046
- list_for_each_entry_rcu(mod, &modules, list) {
2047
- /*
2048
- * Ignore going modules since it's possible that ro
2049
- * protection has already been disabled, otherwise we'll
2050
- * run into protection faults at module deallocation.
2051
- */
2052
- if (mod->state == MODULE_STATE_UNFORMED ||
2053
- mod->state == MODULE_STATE_GOING)
2054
- continue;
2055
-
2056
- frob_text(&mod->core_layout, set_memory_ro);
2057
- frob_text(&mod->init_layout, set_memory_ro);
2058
- }
2059
- mutex_unlock(&module_mutex);
2060
-}
2061
-
2062
-static void disable_ro_nx(const struct module_layout *layout)
2063
-{
2064
- if (rodata_enabled) {
2065
- frob_text(layout, set_memory_rw);
2066
- frob_rodata(layout, set_memory_rw);
2067
- frob_ro_after_init(layout, set_memory_rw);
2068
- }
2069
- frob_rodata(layout, set_memory_x);
2070
- frob_ro_after_init(layout, set_memory_x);
2071
- frob_writable_data(layout, set_memory_x);
2114
+ return 0;
20722115 }
20732116
20742117 #else /* !CONFIG_STRICT_MODULE_RWX */
2075
-static void disable_ro_nx(const struct module_layout *layout) { }
20762118 static void module_enable_nx(const struct module *mod) { }
2077
-static void module_disable_nx(const struct module *mod) { }
2078
-#endif /* CONFIG_STRICT_MODULE_RWX */
2079
-
2080
-static void module_enable_x(const struct module *mod)
2119
+static void module_enable_ro(const struct module *mod, bool after_init) {}
2120
+static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2121
+ char *secstrings, struct module *mod)
20812122 {
2082
- frob_text(&mod->core_layout, set_memory_x);
2083
- frob_text(&mod->init_layout, set_memory_x);
2123
+ return 0;
20842124 }
2085
-#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
2086
-static void disable_ro_nx(const struct module_layout *layout) { }
2087
-static void module_enable_nx(const struct module *mod) { }
2088
-static void module_disable_nx(const struct module *mod) { }
2089
-static void module_enable_x(const struct module *mod) { }
2090
-#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
2125
+#endif /* CONFIG_STRICT_MODULE_RWX */
20912126
20922127 #ifdef CONFIG_LIVEPATCH
20932128 /*
....@@ -2166,6 +2201,11 @@
21662201
21672202 void __weak module_memfree(void *module_region)
21682203 {
2204
+ /*
2205
+ * This memory may be RO, and freeing RO memory in an interrupt is not
2206
+ * supported by vmalloc.
2207
+ */
2208
+ WARN_ON(in_interrupt());
21692209 vfree(module_region);
21702210 }
21712211
....@@ -2215,16 +2255,18 @@
22152255 /* Remove this module from bug list, this uses list_del_rcu */
22162256 module_bug_cleanup(mod);
22172257 /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
2218
- synchronize_sched();
2258
+ synchronize_rcu();
22192259 mutex_unlock(&module_mutex);
2220
-
2221
- /* This may be empty, but that's OK */
2222
- disable_ro_nx(&mod->init_layout);
22232260
22242261 /* Clean up CFI for the module. */
22252262 cfi_cleanup(mod);
22262263
2264
+ /* This may be empty, but that's OK */
22272265 module_arch_freeing_init(mod);
2266
+ trace_android_vh_set_memory_rw((unsigned long)mod->init_layout.base,
2267
+ (mod->init_layout.size)>>PAGE_SHIFT);
2268
+ trace_android_vh_set_memory_nx((unsigned long)mod->init_layout.base,
2269
+ (mod->init_layout.size)>>PAGE_SHIFT);
22282270 module_memfree(mod->init_layout.base);
22292271 kfree(mod->args);
22302272 percpu_modfree(mod);
....@@ -2233,24 +2275,47 @@
22332275 lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
22342276
22352277 /* Finally, free the core (containing the module structure) */
2236
- disable_ro_nx(&mod->core_layout);
2278
+ trace_android_vh_set_memory_rw((unsigned long)mod->core_layout.base,
2279
+ (mod->core_layout.size)>>PAGE_SHIFT);
2280
+ trace_android_vh_set_memory_nx((unsigned long)mod->core_layout.base,
2281
+ (mod->core_layout.size)>>PAGE_SHIFT);
22372282 module_memfree(mod->core_layout.base);
22382283 }
22392284
22402285 void *__symbol_get(const char *symbol)
22412286 {
22422287 struct module *owner;
2288
+ enum mod_license license;
22432289 const struct kernel_symbol *sym;
22442290
22452291 preempt_disable();
2246
- sym = find_symbol(symbol, &owner, NULL, NULL, true, true);
2247
- if (sym && strong_try_module_get(owner))
2292
+ sym = find_symbol(symbol, &owner, NULL, &license, true, true);
2293
+ if (!sym)
2294
+ goto fail;
2295
+ if (license != GPL_ONLY) {
2296
+ pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n",
2297
+ symbol);
2298
+ goto fail;
2299
+ }
2300
+ if (strong_try_module_get(owner))
22482301 sym = NULL;
22492302 preempt_enable();
22502303
22512304 return sym ? (void *)kernel_symbol_value(sym) : NULL;
2305
+fail:
2306
+ preempt_enable();
2307
+ return NULL;
22522308 }
22532309 EXPORT_SYMBOL_GPL(__symbol_get);
2310
+
2311
+bool module_init_layout_section(const char *sname)
2312
+{
2313
+#ifndef CONFIG_MODULE_UNLOAD
2314
+ if (module_exit_section(sname))
2315
+ return true;
2316
+#endif
2317
+ return module_init_section(sname);
2318
+}
22542319
22552320 /*
22562321 * Ensure that an exported symbol [global namespace] does not already exist
....@@ -2258,7 +2323,7 @@
22582323 *
22592324 * You must hold the module_mutex.
22602325 */
2261
-static int verify_export_symbols(struct module *mod)
2326
+static int verify_exported_symbols(struct module *mod)
22622327 {
22632328 unsigned int i;
22642329 struct module *owner;
....@@ -2393,11 +2458,13 @@
23932458 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
23942459 continue;
23952460
2396
- /* Livepatch relocation sections are applied by livepatch */
23972461 if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
2398
- continue;
2399
-
2400
- if (info->sechdrs[i].sh_type == SHT_REL)
2462
+ err = klp_apply_section_relocs(mod, info->sechdrs,
2463
+ info->secstrings,
2464
+ info->strtab,
2465
+ info->index.sym, i,
2466
+ NULL);
2467
+ else if (info->sechdrs[i].sh_type == SHT_REL)
24012468 err = apply_relocate(info->sechdrs, info->strtab,
24022469 info->index.sym, i, mod);
24032470 else if (info->sechdrs[i].sh_type == SHT_RELA)
....@@ -2459,7 +2526,7 @@
24592526 if ((s->sh_flags & masks[m][0]) != masks[m][0]
24602527 || (s->sh_flags & masks[m][1])
24612528 || s->sh_entsize != ~0UL
2462
- || strstarts(sname, ".init"))
2529
+ || module_init_layout_section(sname))
24632530 continue;
24642531 s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i);
24652532 pr_debug("\t%s\n", sname);
....@@ -2492,7 +2559,7 @@
24922559 if ((s->sh_flags & masks[m][0]) != masks[m][0]
24932560 || (s->sh_flags & masks[m][1])
24942561 || s->sh_entsize != ~0UL
2495
- || !strstarts(sname, ".init"))
2562
+ || !module_init_layout_section(sname))
24962563 continue;
24972564 s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i)
24982565 | INIT_OFFSET_MASK);
....@@ -2554,7 +2621,8 @@
25542621 return string;
25552622 }
25562623
2557
-static char *get_modinfo(struct load_info *info, const char *tag)
2624
+static char *get_next_modinfo(const struct load_info *info, const char *tag,
2625
+ char *prev)
25582626 {
25592627 char *p;
25602628 unsigned int taglen = strlen(tag);
....@@ -2565,11 +2633,23 @@
25652633 * get_modinfo() calls made before rewrite_section_headers()
25662634 * must use sh_offset, as sh_addr isn't set!
25672635 */
2568
- for (p = (char *)info->hdr + infosec->sh_offset; p; p = next_string(p, &size)) {
2636
+ char *modinfo = (char *)info->hdr + infosec->sh_offset;
2637
+
2638
+ if (prev) {
2639
+ size -= prev - modinfo;
2640
+ modinfo = next_string(prev, &size);
2641
+ }
2642
+
2643
+ for (p = modinfo; p; p = next_string(p, &size)) {
25692644 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
25702645 return p + taglen + 1;
25712646 }
25722647 return NULL;
2648
+}
2649
+
2650
+static char *get_modinfo(const struct load_info *info, const char *tag)
2651
+{
2652
+ return get_next_modinfo(info, tag, NULL);
25732653 }
25742654
25752655 static void setup_modinfo(struct module *mod, struct load_info *info)
....@@ -2596,10 +2676,10 @@
25962676
25972677 #ifdef CONFIG_KALLSYMS
25982678
2599
-/* lookup symbol in given range of kernel_symbols */
2600
-static const struct kernel_symbol *lookup_symbol(const char *name,
2601
- const struct kernel_symbol *start,
2602
- const struct kernel_symbol *stop)
2679
+/* Lookup exported symbol in given range of kernel_symbols */
2680
+static const struct kernel_symbol *lookup_exported_symbol(const char *name,
2681
+ const struct kernel_symbol *start,
2682
+ const struct kernel_symbol *stop)
26032683 {
26042684 return bsearch(name, start, stop - start,
26052685 sizeof(struct kernel_symbol), cmp_name);
....@@ -2610,9 +2690,10 @@
26102690 {
26112691 const struct kernel_symbol *ks;
26122692 if (!mod)
2613
- ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
2693
+ ks = lookup_exported_symbol(name, __start___ksymtab, __stop___ksymtab);
26142694 else
2615
- ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
2695
+ ks = lookup_exported_symbol(name, mod->syms, mod->syms + mod->num_syms);
2696
+
26162697 return ks != NULL && kernel_symbol_value(ks) == value;
26172698 }
26182699
....@@ -2720,6 +2801,8 @@
27202801 info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1);
27212802 info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym);
27222803 mod->core_layout.size += strtab_size;
2804
+ info->core_typeoffs = mod->core_layout.size;
2805
+ mod->core_layout.size += ndst * sizeof(char);
27232806 mod->core_layout.size = debug_align(mod->core_layout.size);
27242807
27252808 /* Put string table section at end of init part of module. */
....@@ -2733,6 +2816,8 @@
27332816 __alignof__(struct mod_kallsyms));
27342817 info->mod_kallsyms_init_off = mod->init_layout.size;
27352818 mod->init_layout.size += sizeof(struct mod_kallsyms);
2819
+ info->init_typeoffs = mod->init_layout.size;
2820
+ mod->init_layout.size += nsrc * sizeof(char);
27362821 mod->init_layout.size = debug_align(mod->init_layout.size);
27372822 }
27382823
....@@ -2756,20 +2841,23 @@
27562841 mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
27572842 /* Make sure we get permanent strtab: don't use info->strtab. */
27582843 mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2844
+ mod->kallsyms->typetab = mod->init_layout.base + info->init_typeoffs;
27592845
2760
- /* Set types up while we still have access to sections. */
2761
- for (i = 0; i < mod->kallsyms->num_symtab; i++)
2762
- mod->kallsyms->symtab[i].st_info
2763
- = elf_type(&mod->kallsyms->symtab[i], info);
2764
-
2765
- /* Now populate the cut down core kallsyms for after init. */
2846
+ /*
2847
+ * Now populate the cut down core kallsyms for after init
2848
+ * and set types up while we still have access to sections.
2849
+ */
27662850 mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs;
27672851 mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs;
2852
+ mod->core_kallsyms.typetab = mod->core_layout.base + info->core_typeoffs;
27682853 src = mod->kallsyms->symtab;
27692854 for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
2855
+ mod->kallsyms->typetab[i] = elf_type(src + i, info);
27702856 if (i == 0 || is_livepatch_module(mod) ||
27712857 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
27722858 info->index.pcpu)) {
2859
+ mod->core_kallsyms.typetab[ndst] =
2860
+ mod->kallsyms->typetab[i];
27732861 dst[ndst] = src[i];
27742862 dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
27752863 s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
....@@ -2792,11 +2880,7 @@
27922880 {
27932881 if (!debug)
27942882 return;
2795
-#ifdef CONFIG_DYNAMIC_DEBUG
2796
- if (ddebug_add_module(debug, num, mod->name))
2797
- pr_err("dynamic debug error adding module: %s\n",
2798
- debug->modname);
2799
-#endif
2883
+ ddebug_add_module(debug, num, mod->name);
28002884 }
28012885
28022886 static void dynamic_debug_remove(struct module *mod, struct _ddebug *debug)
....@@ -2807,7 +2891,19 @@
28072891
28082892 void * __weak module_alloc(unsigned long size)
28092893 {
2810
- return vmalloc_exec(size);
2894
+ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
2895
+ GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
2896
+ NUMA_NO_NODE, __builtin_return_address(0));
2897
+}
2898
+
2899
+bool __weak module_init_section(const char *name)
2900
+{
2901
+ return strstarts(name, ".init");
2902
+}
2903
+
2904
+bool __weak module_exit_section(const char *name)
2905
+{
2906
+ return strstarts(name, ".exit");
28112907 }
28122908
28132909 #ifdef CONFIG_DEBUG_KMEMLEAK
....@@ -2840,8 +2936,9 @@
28402936 #ifdef CONFIG_MODULE_SIG
28412937 static int module_sig_check(struct load_info *info, int flags)
28422938 {
2843
- int err = -ENOKEY;
2939
+ int err = -ENODATA;
28442940 const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
2941
+ const char *reason;
28452942 const void *mod = info->hdr;
28462943
28472944 /*
....@@ -2856,16 +2953,39 @@
28562953 err = mod_verify_sig(mod, info);
28572954 }
28582955
2859
- if (!err) {
2956
+ switch (err) {
2957
+ case 0:
28602958 info->sig_ok = true;
28612959 return 0;
2960
+
2961
+ /* We don't permit modules to be loaded into trusted kernels
2962
+ * without a valid signature on them, but if we're not
2963
+ * enforcing, certain errors are non-fatal.
2964
+ */
2965
+ case -ENODATA:
2966
+ reason = "unsigned module";
2967
+ break;
2968
+ case -ENOPKG:
2969
+ reason = "module with unsupported crypto";
2970
+ break;
2971
+ case -ENOKEY:
2972
+ reason = "module with unavailable key";
2973
+ break;
2974
+
2975
+ /* All other errors are fatal, including nomem, unparseable
2976
+ * signatures and signature check failures - even if signatures
2977
+ * aren't required.
2978
+ */
2979
+ default:
2980
+ return err;
28622981 }
28632982
2864
- /* Not having a signature is only an error if we're strict. */
2865
- if (err == -ENOKEY && !is_module_sig_enforced())
2866
- err = 0;
2983
+ if (is_module_sig_enforced()) {
2984
+ pr_notice("Loading of %s is rejected\n", reason);
2985
+ return -EKEYREJECTED;
2986
+ }
28672987
2868
- return err;
2988
+ return security_locked_down(LOCKDOWN_MODULE_SIGNATURE);
28692989 }
28702990 #else /* !CONFIG_MODULE_SIG */
28712991 static int module_sig_check(struct load_info *info, int flags)
....@@ -2874,9 +2994,33 @@
28742994 }
28752995 #endif /* !CONFIG_MODULE_SIG */
28762996
2877
-/* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2878
-static int elf_header_check(struct load_info *info)
2997
+static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr)
28792998 {
2999
+ unsigned long secend;
3000
+
3001
+ /*
3002
+ * Check for both overflow and offset/size being
3003
+ * too large.
3004
+ */
3005
+ secend = shdr->sh_offset + shdr->sh_size;
3006
+ if (secend < shdr->sh_offset || secend > info->len)
3007
+ return -ENOEXEC;
3008
+
3009
+ return 0;
3010
+}
3011
+
3012
+/*
3013
+ * Sanity checks against invalid binaries, wrong arch, weird elf version.
3014
+ *
3015
+ * Also do basic validity checks against section offsets and sizes, the
3016
+ * section name string table, and the indices used for it (sh_name).
3017
+ */
3018
+static int elf_validity_check(struct load_info *info)
3019
+{
3020
+ unsigned int i;
3021
+ Elf_Shdr *shdr, *strhdr;
3022
+ int err;
3023
+
28803024 if (info->len < sizeof(*(info->hdr)))
28813025 return -ENOEXEC;
28823026
....@@ -2886,10 +3030,77 @@
28863030 || info->hdr->e_shentsize != sizeof(Elf_Shdr))
28873031 return -ENOEXEC;
28883032
3033
+ /*
3034
+ * e_shnum is 16 bits, and sizeof(Elf_Shdr) is
3035
+ * known and small. So e_shnum * sizeof(Elf_Shdr)
3036
+ * will not overflow unsigned long on any platform.
3037
+ */
28893038 if (info->hdr->e_shoff >= info->len
28903039 || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
28913040 info->len - info->hdr->e_shoff))
28923041 return -ENOEXEC;
3042
+
3043
+ info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
3044
+
3045
+ /*
3046
+ * Verify if the section name table index is valid.
3047
+ */
3048
+ if (info->hdr->e_shstrndx == SHN_UNDEF
3049
+ || info->hdr->e_shstrndx >= info->hdr->e_shnum)
3050
+ return -ENOEXEC;
3051
+
3052
+ strhdr = &info->sechdrs[info->hdr->e_shstrndx];
3053
+ err = validate_section_offset(info, strhdr);
3054
+ if (err < 0)
3055
+ return err;
3056
+
3057
+ /*
3058
+ * The section name table must be NUL-terminated, as required
3059
+ * by the spec. This makes strcmp and pr_* calls that access
3060
+ * strings in the section safe.
3061
+ */
3062
+ info->secstrings = (void *)info->hdr + strhdr->sh_offset;
3063
+ if (info->secstrings[strhdr->sh_size - 1] != '\0')
3064
+ return -ENOEXEC;
3065
+
3066
+ /*
3067
+ * The code assumes that section 0 has a length of zero and
3068
+ * an addr of zero, so check for it.
3069
+ */
3070
+ if (info->sechdrs[0].sh_type != SHT_NULL
3071
+ || info->sechdrs[0].sh_size != 0
3072
+ || info->sechdrs[0].sh_addr != 0)
3073
+ return -ENOEXEC;
3074
+
3075
+ for (i = 1; i < info->hdr->e_shnum; i++) {
3076
+ shdr = &info->sechdrs[i];
3077
+ switch (shdr->sh_type) {
3078
+ case SHT_NULL:
3079
+ case SHT_NOBITS:
3080
+ continue;
3081
+ case SHT_SYMTAB:
3082
+ if (shdr->sh_link == SHN_UNDEF
3083
+ || shdr->sh_link >= info->hdr->e_shnum)
3084
+ return -ENOEXEC;
3085
+ fallthrough;
3086
+ default:
3087
+ err = validate_section_offset(info, shdr);
3088
+ if (err < 0) {
3089
+ pr_err("Invalid ELF section in module (section %u type %u)\n",
3090
+ i, shdr->sh_type);
3091
+ return err;
3092
+ }
3093
+
3094
+ if (shdr->sh_flags & SHF_ALLOC) {
3095
+ if (shdr->sh_name >= strhdr->sh_size) {
3096
+ pr_err("Invalid ELF section name in module (section %u type %u)\n",
3097
+ i, shdr->sh_type);
3098
+ return -ENOEXEC;
3099
+ }
3100
+ }
3101
+ break;
3102
+ }
3103
+ }
28933104
28943105 return 0;
28953106 }
....@@ -2955,22 +3166,27 @@
29553166 if (info->len < sizeof(*(info->hdr)))
29563167 return -ENOEXEC;
29573168
2958
- err = security_kernel_load_data(LOADING_MODULE);
3169
+ err = security_kernel_load_data(LOADING_MODULE, true);
29593170 if (err)
29603171 return err;
29613172
29623173 /* Suck in entire file: we'll want most of it. */
2963
- info->hdr = __vmalloc(info->len,
2964
- GFP_KERNEL | __GFP_NOWARN, PAGE_KERNEL);
3174
+ info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN);
29653175 if (!info->hdr)
29663176 return -ENOMEM;
29673177
29683178 if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) {
2969
- vfree(info->hdr);
2970
- return -EFAULT;
3179
+ err = -EFAULT;
3180
+ goto out;
29713181 }
29723182
2973
- return 0;
3183
+ err = security_kernel_post_load_data((char *)info->hdr, info->len,
3184
+ LOADING_MODULE, "init_module");
3185
+out:
3186
+ if (err)
3187
+ vfree(info->hdr);
3188
+
3189
+ return err;
29743190 }
29753191
29763192 static void free_copy(struct load_info *info)
....@@ -2987,21 +3203,11 @@
29873203
29883204 for (i = 1; i < info->hdr->e_shnum; i++) {
29893205 Elf_Shdr *shdr = &info->sechdrs[i];
2990
- if (shdr->sh_type != SHT_NOBITS
2991
- && info->len < shdr->sh_offset + shdr->sh_size) {
2992
- pr_err("Module len %lu truncated\n", info->len);
2993
- return -ENOEXEC;
2994
- }
29953206
29963207 /* Mark all sections sh_addr with their address in the
29973208 temporary image. */
29983209 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
29993210
3000
-#ifndef CONFIG_MODULE_UNLOAD
3001
- /* Don't load .exit sections */
3002
- if (strstarts(info->secstrings+shdr->sh_name, ".exit"))
3003
- shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
3004
-#endif
30053211 }
30063212
30073213 /* Track but don't keep modinfo and version sections. */
....@@ -3022,11 +3228,6 @@
30223228 static int setup_load_info(struct load_info *info, int flags)
30233229 {
30243230 unsigned int i;
3025
-
3026
- /* Set up the convenience variables */
3027
- info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
3028
- info->secstrings = (void *)info->hdr
3029
- + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
30303231
30313232 /* Try to find a name early so we can log errors with a module name */
30323233 info->index.info = find_sec(info, ".modinfo");
....@@ -3164,10 +3365,23 @@
31643365 }
31653366 #endif
31663367
3368
+ mod->noinstr_text_start = section_objs(info, ".noinstr.text", 1,
3369
+ &mod->noinstr_text_size);
3370
+
31673371 #ifdef CONFIG_TRACEPOINTS
31683372 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
31693373 sizeof(*mod->tracepoints_ptrs),
31703374 &mod->num_tracepoints);
3375
+#endif
3376
+#ifdef CONFIG_TREE_SRCU
3377
+ mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs",
3378
+ sizeof(*mod->srcu_struct_ptrs),
3379
+ &mod->num_srcu_structs);
3380
+#endif
3381
+#ifdef CONFIG_BPF_EVENTS
3382
+ mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map",
3383
+ sizeof(*mod->bpf_raw_events),
3384
+ &mod->num_bpf_raw_events);
31713385 #endif
31723386 #ifdef CONFIG_JUMP_LABEL
31733387 mod->jump_entries = section_objs(info, "__jump_table",
....@@ -3189,7 +3403,7 @@
31893403 #endif
31903404 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
31913405 /* sechdrs[0].sh_size is always zero */
3192
- mod->ftrace_callsites = section_objs(info, "__mcount_loc",
3406
+ mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION,
31933407 sizeof(*mod->ftrace_callsites),
31943408 &mod->num_ftrace_callsites);
31953409 #endif
....@@ -3198,13 +3412,25 @@
31983412 sizeof(*mod->ei_funcs),
31993413 &mod->num_ei_funcs);
32003414 #endif
3415
+#ifdef CONFIG_KPROBES
3416
+ mod->kprobes_text_start = section_objs(info, ".kprobes.text", 1,
3417
+ &mod->kprobes_text_size);
3418
+ mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist",
3419
+ sizeof(unsigned long),
3420
+ &mod->num_kprobe_blacklist);
3421
+#endif
3422
+#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
3423
+ mod->static_call_sites = section_objs(info, ".static_call_sites",
3424
+ sizeof(*mod->static_call_sites),
3425
+ &mod->num_static_call_sites);
3426
+#endif
32013427 mod->extable = section_objs(info, "__ex_table",
32023428 sizeof(*mod->extable), &mod->num_exentries);
32033429
32043430 if (section_addr(info, "__obsparm"))
32053431 pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
32063432
3207
- info->debug = section_objs(info, "__verbose",
3433
+ info->debug = section_objs(info, "__dyndbg",
32083434 sizeof(*info->debug), &info->num_debug);
32093435
32103436 return 0;
....@@ -3316,12 +3542,6 @@
33163542
33173543 static void flush_module_icache(const struct module *mod)
33183544 {
3319
- mm_segment_t old_fs;
3320
-
3321
- /* flush the icache in correct context */
3322
- old_fs = get_fs();
3323
- set_fs(KERNEL_DS);
3324
-
33253545 /*
33263546 * Flush the instruction cache, since we've played with text.
33273547 * Do it before processing of module parameters, so the module
....@@ -3333,8 +3553,6 @@
33333553 + mod->init_layout.size);
33343554 flush_icache_range((unsigned long)mod->core_layout.base,
33353555 (unsigned long)mod->core_layout.base + mod->core_layout.size);
3336
-
3337
- set_fs(old_fs);
33383556 }
33393557
33403558 int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
....@@ -3382,6 +3600,11 @@
33823600 if (err < 0)
33833601 return ERR_PTR(err);
33843602
3603
+ err = module_enforce_rwx_sections(info->hdr, info->sechdrs,
3604
+ info->secstrings, info->mod);
3605
+ if (err < 0)
3606
+ return ERR_PTR(err);
3607
+
33853608 /* We will do a special allocation for per-cpu sections later. */
33863609 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
33873610
....@@ -3391,6 +3614,15 @@
33913614 * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set.
33923615 */
33933616 ndx = find_sec(info, ".data..ro_after_init");
3617
+ if (ndx)
3618
+ info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
3619
+ /*
3620
+ * Mark the __jump_table section as ro_after_init as well: these data
3621
+ * structures are never modified, with the exception of entries that
3622
+ * refer to code in the __init section, which are annotated as such
3623
+ * at module load time.
3624
+ */
3625
+ ndx = find_sec(info, "__jump_table");
33943626 if (ndx)
33953627 info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
33963628
....@@ -3416,7 +3648,15 @@
34163648 {
34173649 percpu_modfree(mod);
34183650 module_arch_freeing_init(mod);
3651
+ trace_android_vh_set_memory_rw((unsigned long)mod->init_layout.base,
3652
+ (mod->init_layout.size)>>PAGE_SHIFT);
3653
+ trace_android_vh_set_memory_nx((unsigned long)mod->init_layout.base,
3654
+ (mod->init_layout.size)>>PAGE_SHIFT);
34193655 module_memfree(mod->init_layout.base);
3656
+ trace_android_vh_set_memory_rw((unsigned long)mod->core_layout.base,
3657
+ (mod->core_layout.size)>>PAGE_SHIFT);
3658
+ trace_android_vh_set_memory_nx((unsigned long)mod->core_layout.base,
3659
+ (mod->core_layout.size)>>PAGE_SHIFT);
34203660 module_memfree(mod->core_layout.base);
34213661 }
34223662
....@@ -3426,8 +3666,6 @@
34263666 {
34273667 return 0;
34283668 }
3429
-
3430
-static void cfi_init(struct module *mod);
34313669
34323670 static int post_relocation(struct module *mod, const struct load_info *info)
34333671 {
....@@ -3440,9 +3678,6 @@
34403678
34413679 /* Setup kallsyms-specific fields. */
34423680 add_kallsyms(mod, info);
3443
-
3444
- /* Setup CFI for the module. */
3445
- cfi_init(mod);
34463681
34473682 /* Arch-specific module finalizing. */
34483683 return module_finalize(info->hdr, info->sechdrs, mod);
....@@ -3462,7 +3697,8 @@
34623697 sched_annotate_sleep();
34633698 mutex_lock(&module_mutex);
34643699 mod = find_module_all(name, strlen(name), true);
3465
- ret = !mod || mod->state == MODULE_STATE_LIVE;
3700
+ ret = !mod || mod->state == MODULE_STATE_LIVE
3701
+ || mod->state == MODULE_STATE_GOING;
34663702 mutex_unlock(&module_mutex);
34673703
34683704 return ret;
....@@ -3481,15 +3717,24 @@
34813717
34823718 /* For freeing module_init on success, in case kallsyms traversing */
34833719 struct mod_initfree {
3484
- struct rcu_head rcu;
3720
+ struct llist_node node;
34853721 void *module_init;
34863722 };
34873723
3488
-static void do_free_init(struct rcu_head *head)
3724
+static void do_free_init(struct work_struct *w)
34893725 {
3490
- struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
3491
- module_memfree(m->module_init);
3492
- kfree(m);
3726
+ struct llist_node *pos, *n, *list;
3727
+ struct mod_initfree *initfree;
3728
+
3729
+ list = llist_del_all(&init_free_list);
3730
+
3731
+ synchronize_rcu();
3732
+
3733
+ llist_for_each_safe(pos, n, list) {
3734
+ initfree = container_of(pos, struct mod_initfree, node);
3735
+ module_memfree(initfree->module_init);
3736
+ kfree(initfree);
3737
+ }
34933738 }
34943739
34953740 /*
....@@ -3555,9 +3800,13 @@
35553800 rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
35563801 #endif
35573802 module_enable_ro(mod, true);
3803
+ trace_android_vh_set_module_permit_after_init(mod);
35583804 mod_tree_remove_init(mod);
3559
- disable_ro_nx(&mod->init_layout);
35603805 module_arch_freeing_init(mod);
3806
+ trace_android_vh_set_memory_rw((unsigned long)mod->init_layout.base,
3807
+ (mod->init_layout.size)>>PAGE_SHIFT);
3808
+ trace_android_vh_set_memory_nx((unsigned long)mod->init_layout.base,
3809
+ (mod->init_layout.size)>>PAGE_SHIFT);
35613810 mod->init_layout.base = NULL;
35623811 mod->init_layout.size = 0;
35633812 mod->init_layout.ro_size = 0;
....@@ -3566,15 +3815,19 @@
35663815 /*
35673816 * We want to free module_init, but be aware that kallsyms may be
35683817 * walking this with preempt disabled. In all the failure paths, we
3569
- * call synchronize_sched(), but we don't want to slow down the success
3570
- * path, so use actual RCU here.
3818
+ * call synchronize_rcu(), but we don't want to slow down the success
3819
+ * path. module_memfree() cannot be called in an interrupt, so do the
3820
+ * work and call synchronize_rcu() in a work queue.
3821
+ *
35713822 * Note that module_alloc() on most architectures creates W+X page
35723823 * mappings which won't be cleaned up until do_free_init() runs. Any
35733824 * code such as mark_rodata_ro() which depends on those mappings to
35743825 * be cleaned up needs to sync with the queued work - ie
3575
- * rcu_barrier_sched()
3826
+ * rcu_barrier()
35763827 */
3577
- call_rcu_sched(&freeinit->rcu, do_free_init);
3828
+ if (llist_add(&freeinit->node, &init_free_list))
3829
+ schedule_work(&init_free_wq);
3830
+
35783831 mutex_unlock(&module_mutex);
35793832 wake_up_all(&module_wq);
35803833
....@@ -3585,7 +3838,7 @@
35853838 fail:
35863839 /* Try to protect us from buggy refcounters. */
35873840 mod->state = MODULE_STATE_GOING;
3588
- synchronize_sched();
3841
+ synchronize_rcu();
35893842 module_put(mod);
35903843 blocking_notifier_call_chain(&module_notify_list,
35913844 MODULE_STATE_GOING, mod);
....@@ -3616,20 +3869,35 @@
36163869
36173870 mod->state = MODULE_STATE_UNFORMED;
36183871
3619
-again:
36203872 mutex_lock(&module_mutex);
36213873 old = find_module_all(mod->name, strlen(mod->name), true);
36223874 if (old != NULL) {
3623
- if (old->state != MODULE_STATE_LIVE) {
3875
+ if (old->state == MODULE_STATE_COMING
3876
+ || old->state == MODULE_STATE_UNFORMED) {
36243877 /* Wait in case it fails to load. */
36253878 mutex_unlock(&module_mutex);
36263879 err = wait_event_interruptible(module_wq,
36273880 finished_loading(mod->name));
36283881 if (err)
36293882 goto out_unlocked;
3630
- goto again;
3883
+
3884
+ /* The module might have gone in the meantime. */
3885
+ mutex_lock(&module_mutex);
3886
+ old = find_module_all(mod->name, strlen(mod->name),
3887
+ true);
36313888 }
3632
- err = -EEXIST;
3889
+
3890
+ /*
3891
+ * We are here only when the same module was being loaded. Do
3892
+ * not try to load it again right now. It prevents long delays
3893
+ * caused by serialized module load failures. It might happen
3894
+ * when more devices of the same type trigger load of
3895
+ * a particular module.
3896
+ */
3897
+ if (old && old->state == MODULE_STATE_LIVE)
3898
+ err = -EEXIST;
3899
+ else
3900
+ err = -EBUSY;
36333901 goto out;
36343902 }
36353903 mod_update_bounds(mod);
....@@ -3650,7 +3918,7 @@
36503918 mutex_lock(&module_mutex);
36513919
36523920 /* Find duplicate symbols (must be called under lock). */
3653
- err = verify_export_symbols(mod);
3921
+ err = verify_exported_symbols(mod);
36543922 if (err < 0)
36553923 goto out;
36563924
....@@ -3660,6 +3928,7 @@
36603928 module_enable_ro(mod, false);
36613929 module_enable_nx(mod);
36623930 module_enable_x(mod);
3931
+ trace_android_vh_set_module_permit_before_init(mod);
36633932
36643933 /* Mark state as coming so strong_try_module_get() ignores us,
36653934 * but kallsyms etc. can see us. */
....@@ -3682,9 +3951,13 @@
36823951 if (err)
36833952 return err;
36843953
3685
- blocking_notifier_call_chain(&module_notify_list,
3686
- MODULE_STATE_COMING, mod);
3687
- return 0;
3954
+ err = blocking_notifier_call_chain_robust(&module_notify_list,
3955
+ MODULE_STATE_COMING, MODULE_STATE_GOING, mod);
3956
+ err = notifier_to_errno(err);
3957
+ if (err)
3958
+ klp_module_going(mod);
3959
+
3960
+ return err;
36883961 }
36893962
36903963 static int unknown_module_param_cb(char *param, char *val, const char *modname,
....@@ -3705,6 +3978,8 @@
37053978 return 0;
37063979 }
37073980
3981
+static void cfi_init(struct module *mod);
3982
+
37083983 /* Allocate and load the module: note that size of section 0 is always
37093984 zero, and we rely on this for optional sections. */
37103985 static int load_module(struct load_info *info, const char __user *uargs,
....@@ -3714,22 +3989,49 @@
37143989 long err = 0;
37153990 char *after_dashes;
37163991
3717
- err = elf_header_check(info);
3992
+ /*
3993
+ * Do the signature check (if any) first. All that
3994
+ * the signature check needs is info->len, it does
3995
+ * not need any of the section info. That can be
3996
+ * set up later. This will minimize the chances
3997
+ * of a corrupt module causing problems before
3998
+ * we even get to the signature check.
3999
+ *
4000
+ * The check will also adjust info->len by stripping
4001
+ * off the sig length at the end of the module, making
4002
+ * checks against info->len more correct.
4003
+ */
4004
+ err = module_sig_check(info, flags);
37184005 if (err)
37194006 goto free_copy;
37204007
4008
+ /*
4009
+ * Do basic sanity checks against the ELF header and
4010
+ * sections.
4011
+ */
4012
+ err = elf_validity_check(info);
4013
+ if (err) {
4014
+ pr_err("Module has invalid ELF structures\n");
4015
+ goto free_copy;
4016
+ }
4017
+
4018
+ /*
4019
+ * Everything checks out, so set up the section info
4020
+ * in the info structure.
4021
+ */
37214022 err = setup_load_info(info, flags);
37224023 if (err)
37234024 goto free_copy;
37244025
4026
+ /*
4027
+ * Now that we know we have the correct module name, check
4028
+ * if it's blacklisted.
4029
+ */
37254030 if (blacklisted(info->name)) {
37264031 err = -EPERM;
4032
+ pr_err("Module %s is blacklisted\n", info->name);
37274033 goto free_copy;
37284034 }
3729
-
3730
- err = module_sig_check(info, flags);
3731
- if (err)
3732
- goto free_copy;
37334035
37344036 err = rewrite_section_headers(info, flags);
37354037 if (err)
....@@ -3805,6 +4107,9 @@
38054107
38064108 flush_module_icache(mod);
38074109
4110
+ /* Setup CFI for the module. */
4111
+ cfi_init(mod);
4112
+
38084113 /* Now copy in args */
38094114 mod->args = strndup_user(uargs, ~0UL >> 1);
38104115 if (IS_ERR(mod->args)) {
....@@ -3872,16 +4177,13 @@
38724177 module_bug_cleanup(mod);
38734178 mutex_unlock(&module_mutex);
38744179
3875
- /* we can't deallocate the module until we clear memory protection */
3876
- module_disable_ro(mod);
3877
- module_disable_nx(mod);
3878
-
38794180 ddebug_cleanup:
38804181 ftrace_release_mod(mod);
38814182 dynamic_debug_remove(mod, info->debug);
3882
- synchronize_sched();
4183
+ synchronize_rcu();
38834184 kfree(mod->args);
38844185 free_arch_cleanup:
4186
+ cfi_cleanup(mod);
38854187 module_arch_cleanup(mod);
38864188 free_modinfo:
38874189 free_modinfo(mod);
....@@ -3894,7 +4196,7 @@
38944196 mod_tree_remove(mod);
38954197 wake_up_all(&module_wq);
38964198 /* Wait for RCU-sched synchronizing before releasing mod->list. */
3897
- synchronize_sched();
4199
+ synchronize_rcu();
38984200 mutex_unlock(&module_mutex);
38994201 free_module:
39004202 /* Free lock-classes; relies on the preceding sync_rcu() */
....@@ -3929,8 +4231,7 @@
39294231 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
39304232 {
39314233 struct load_info info = { };
3932
- loff_t size;
3933
- void *hdr;
4234
+ void *hdr = NULL;
39344235 int err;
39354236
39364237 err = may_init_module();
....@@ -3943,12 +4244,12 @@
39434244 |MODULE_INIT_IGNORE_VERMAGIC))
39444245 return -EINVAL;
39454246
3946
- err = kernel_read_file_from_fd(fd, &hdr, &size, INT_MAX,
4247
+ err = kernel_read_file_from_fd(fd, 0, &hdr, INT_MAX, NULL,
39474248 READING_MODULE);
3948
- if (err)
4249
+ if (err < 0)
39494250 return err;
39504251 info.hdr = hdr;
3951
- info.len = size;
4252
+ info.len = err;
39524253
39534254 return load_module(&info, uargs, flags);
39544255 }
....@@ -3971,18 +4272,27 @@
39714272 && (str[2] == '\0' || str[2] == '.');
39724273 }
39734274
3974
-static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum)
4275
+static inline int is_cfi_typeid_symbol(const char *str)
4276
+{
4277
+ return !strncmp(str, "__typeid__", 10);
4278
+}
4279
+
4280
+static const char *kallsyms_symbol_name(struct mod_kallsyms *kallsyms, unsigned int symnum)
39754281 {
39764282 return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
39774283 }
39784284
3979
-static const char *get_ksymbol(struct module *mod,
3980
- unsigned long addr,
3981
- unsigned long *size,
3982
- unsigned long *offset)
4285
+/*
4286
+ * Given a module and address, find the corresponding symbol and return its name
4287
+ * while providing its size and offset if needed.
4288
+ */
4289
+static const char *find_kallsyms_symbol(struct module *mod,
4290
+ unsigned long addr,
4291
+ unsigned long *size,
4292
+ unsigned long *offset)
39834293 {
39844294 unsigned int i, best = 0;
3985
- unsigned long nextval;
4295
+ unsigned long nextval, bestval;
39864296 struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
39874297
39884298 /* At worse, next value is at end of module */
....@@ -3991,34 +4301,41 @@
39914301 else
39924302 nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size;
39934303
4304
+ bestval = kallsyms_symbol_value(&kallsyms->symtab[best]);
4305
+
39944306 /* Scan for closest preceding symbol, and next symbol. (ELF
39954307 starts real symbols at 1). */
39964308 for (i = 1; i < kallsyms->num_symtab; i++) {
3997
- if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
4309
+ const Elf_Sym *sym = &kallsyms->symtab[i];
4310
+ unsigned long thisval = kallsyms_symbol_value(sym);
4311
+
4312
+ if (sym->st_shndx == SHN_UNDEF)
39984313 continue;
39994314
40004315 /* We ignore unnamed symbols: they're uninformative
40014316 * and inserted at a whim. */
4002
- if (*symname(kallsyms, i) == '\0'
4003
- || is_arm_mapping_symbol(symname(kallsyms, i)))
4317
+ if (*kallsyms_symbol_name(kallsyms, i) == '\0'
4318
+ || is_arm_mapping_symbol(kallsyms_symbol_name(kallsyms, i))
4319
+ || is_cfi_typeid_symbol(kallsyms_symbol_name(kallsyms, i)))
40044320 continue;
40054321
4006
- if (kallsyms->symtab[i].st_value <= addr
4007
- && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value)
4322
+ if (thisval <= addr && thisval > bestval) {
40084323 best = i;
4009
- if (kallsyms->symtab[i].st_value > addr
4010
- && kallsyms->symtab[i].st_value < nextval)
4011
- nextval = kallsyms->symtab[i].st_value;
4324
+ bestval = thisval;
4325
+ }
4326
+ if (thisval > addr && thisval < nextval)
4327
+ nextval = thisval;
40124328 }
40134329
40144330 if (!best)
40154331 return NULL;
40164332
40174333 if (size)
4018
- *size = nextval - kallsyms->symtab[best].st_value;
4334
+ *size = nextval - bestval;
40194335 if (offset)
4020
- *offset = addr - kallsyms->symtab[best].st_value;
4021
- return symname(kallsyms, best);
4336
+ *offset = addr - bestval;
4337
+
4338
+ return kallsyms_symbol_name(kallsyms, best);
40224339 }
40234340
40244341 void * __weak dereference_module_function_descriptor(struct module *mod,
....@@ -4043,7 +4360,8 @@
40434360 if (mod) {
40444361 if (modname)
40454362 *modname = mod->name;
4046
- ret = get_ksymbol(mod, addr, size, offset);
4363
+
4364
+ ret = find_kallsyms_symbol(mod, addr, size, offset);
40474365 }
40484366 /* Make a copy in here where it's safe */
40494367 if (ret) {
....@@ -4066,9 +4384,10 @@
40664384 if (within_module(addr, mod)) {
40674385 const char *sym;
40684386
4069
- sym = get_ksymbol(mod, addr, NULL, NULL);
4387
+ sym = find_kallsyms_symbol(mod, addr, NULL, NULL);
40704388 if (!sym)
40714389 goto out;
4390
+
40724391 strlcpy(symname, sym, KSYM_NAME_LEN);
40734392 preempt_enable();
40744393 return 0;
....@@ -4091,7 +4410,7 @@
40914410 if (within_module(addr, mod)) {
40924411 const char *sym;
40934412
4094
- sym = get_ksymbol(mod, addr, size, offset);
4413
+ sym = find_kallsyms_symbol(mod, addr, size, offset);
40954414 if (!sym)
40964415 goto out;
40974416 if (modname)
....@@ -4120,9 +4439,11 @@
41204439 continue;
41214440 kallsyms = rcu_dereference_sched(mod->kallsyms);
41224441 if (symnum < kallsyms->num_symtab) {
4123
- *value = kallsyms->symtab[symnum].st_value;
4124
- *type = kallsyms->symtab[symnum].st_info;
4125
- strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN);
4442
+ const Elf_Sym *sym = &kallsyms->symtab[symnum];
4443
+
4444
+ *value = kallsyms_symbol_value(sym);
4445
+ *type = kallsyms->typetab[symnum];
4446
+ strlcpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN);
41264447 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
41274448 *exported = is_exported(name, *value, mod);
41284449 preempt_enable();
....@@ -4134,15 +4455,19 @@
41344455 return -ERANGE;
41354456 }
41364457
4137
-static unsigned long mod_find_symname(struct module *mod, const char *name)
4458
+/* Given a module and name of symbol, find and return the symbol's value */
4459
+static unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name)
41384460 {
41394461 unsigned int i;
41404462 struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
41414463
4142
- for (i = 0; i < kallsyms->num_symtab; i++)
4143
- if (strcmp(name, symname(kallsyms, i)) == 0 &&
4144
- kallsyms->symtab[i].st_shndx != SHN_UNDEF)
4145
- return kallsyms->symtab[i].st_value;
4464
+ for (i = 0; i < kallsyms->num_symtab; i++) {
4465
+ const Elf_Sym *sym = &kallsyms->symtab[i];
4466
+
4467
+ if (strcmp(name, kallsyms_symbol_name(kallsyms, i)) == 0 &&
4468
+ sym->st_shndx != SHN_UNDEF)
4469
+ return kallsyms_symbol_value(sym);
4470
+ }
41464471 return 0;
41474472 }
41484473
....@@ -4157,12 +4482,12 @@
41574482 preempt_disable();
41584483 if ((colon = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) {
41594484 if ((mod = find_module_all(name, colon - name, false)) != NULL)
4160
- ret = mod_find_symname(mod, colon+1);
4485
+ ret = find_kallsyms_symbol_value(mod, colon+1);
41614486 } else {
41624487 list_for_each_entry_rcu(mod, &modules, list) {
41634488 if (mod->state == MODULE_STATE_UNFORMED)
41644489 continue;
4165
- if ((ret = mod_find_symname(mod, name)) != 0)
4490
+ if ((ret = find_kallsyms_symbol_value(mod, name)) != 0)
41664491 break;
41674492 }
41684493 }
....@@ -4187,12 +4512,13 @@
41874512 if (mod->state == MODULE_STATE_UNFORMED)
41884513 continue;
41894514 for (i = 0; i < kallsyms->num_symtab; i++) {
4515
+ const Elf_Sym *sym = &kallsyms->symtab[i];
41904516
4191
- if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
4517
+ if (sym->st_shndx == SHN_UNDEF)
41924518 continue;
41934519
4194
- ret = fn(data, symname(kallsyms, i),
4195
- mod, kallsyms->symtab[i].st_value);
4520
+ ret = fn(data, kallsyms_symbol_name(kallsyms, i),
4521
+ mod, kallsyms_symbol_value(sym));
41964522 if (ret != 0)
41974523 return ret;
41984524 }
....@@ -4204,18 +4530,30 @@
42044530 static void cfi_init(struct module *mod)
42054531 {
42064532 #ifdef CONFIG_CFI_CLANG
4207
- preempt_disable();
4208
- mod->cfi_check =
4209
- (cfi_check_fn)mod_find_symname(mod, CFI_CHECK_FN_NAME);
4210
- preempt_enable();
4211
- cfi_module_add(mod, module_addr_min, module_addr_max);
4533
+ initcall_t *init;
4534
+ exitcall_t *exit;
4535
+
4536
+ rcu_read_lock_sched();
4537
+ mod->cfi_check = (cfi_check_fn)
4538
+ find_kallsyms_symbol_value(mod, "__cfi_check");
4539
+ init = (initcall_t *)
4540
+ find_kallsyms_symbol_value(mod, "__cfi_jt_init_module");
4541
+ exit = (exitcall_t *)
4542
+ find_kallsyms_symbol_value(mod, "__cfi_jt_cleanup_module");
4543
+ rcu_read_unlock_sched();
4544
+
4545
+ /* Fix init/exit functions to point to the CFI jump table */
4546
+ if (init) mod->init = *init;
4547
+ if (exit) mod->exit = *exit;
4548
+
4549
+ cfi_module_add(mod, module_addr_min);
42124550 #endif
42134551 }
42144552
42154553 static void cfi_cleanup(struct module *mod)
42164554 {
42174555 #ifdef CONFIG_CFI_CLANG
4218
- cfi_module_remove(mod, module_addr_min, module_addr_max);
4556
+ cfi_module_remove(mod, module_addr_min);
42194557 #endif
42204558 }
42214559
....@@ -4326,16 +4664,17 @@
43264664 return err;
43274665 }
43284666
4329
-static const struct file_operations proc_modules_operations = {
4330
- .open = modules_open,
4331
- .read = seq_read,
4332
- .llseek = seq_lseek,
4333
- .release = seq_release,
4667
+static const struct proc_ops modules_proc_ops = {
4668
+ .proc_flags = PROC_ENTRY_PERMANENT,
4669
+ .proc_open = modules_open,
4670
+ .proc_read = seq_read,
4671
+ .proc_lseek = seq_lseek,
4672
+ .proc_release = seq_release,
43344673 };
43354674
43364675 static int __init proc_modules_init(void)
43374676 {
4338
- proc_create("modules", 0, NULL, &proc_modules_operations);
4677
+ proc_create("modules", 0, NULL, &modules_proc_ops);
43394678 return 0;
43404679 }
43414680 module_init(proc_modules_init);
....@@ -4469,6 +4808,23 @@
44694808 pr_cont("\n");
44704809 }
44714810
4811
+#ifdef CONFIG_ANDROID_DEBUG_SYMBOLS
4812
+void android_debug_for_each_module(int (*fn)(const char *mod_name, void *mod_addr, void *data),
4813
+ void *data)
4814
+{
4815
+ struct module *module;
4816
+
4817
+ preempt_disable();
4818
+ list_for_each_entry_rcu(module, &modules, list) {
4819
+ if (fn(module->name, module->core_layout.base, data))
4820
+ goto out;
4821
+ }
4822
+out:
4823
+ preempt_enable();
4824
+}
4825
+EXPORT_SYMBOL_GPL(android_debug_for_each_module);
4826
+#endif
4827
+
44724828 #ifdef CONFIG_MODVERSIONS
44734829 /* Generate the signature for all relevant module structures here.
44744830 * If these change, we don't want to try to parse the module. */