.. | .. |
---|
64 | 64 | #include <linux/rcuwait.h> |
---|
65 | 65 | #include <linux/compat.h> |
---|
66 | 66 | #include <linux/io_uring.h> |
---|
| 67 | +#include <linux/sysfs.h> |
---|
67 | 68 | |
---|
68 | 69 | #include <linux/uaccess.h> |
---|
69 | 70 | #include <asm/unistd.h> |
---|
70 | 71 | #include <asm/mmu_context.h> |
---|
71 | 72 | #include <trace/hooks/mm.h> |
---|
| 73 | + |
---|
| 74 | +/* |
---|
| 75 | + * The default value should be high enough to not crash a system that randomly |
---|
| 76 | + * crashes its kernel from time to time, but low enough to at least not permit |
---|
| 77 | + * overflowing 32-bit refcounts or the ldsem writer count. |
---|
| 78 | + */ |
---|
| 79 | +static unsigned int oops_limit = 10000; |
---|
| 80 | + |
---|
| 81 | +#ifdef CONFIG_SYSCTL |
---|
| 82 | +static struct ctl_table kern_exit_table[] = { |
---|
| 83 | + { |
---|
| 84 | + .procname = "oops_limit", |
---|
| 85 | + .data = &oops_limit, |
---|
| 86 | + .maxlen = sizeof(oops_limit), |
---|
| 87 | + .mode = 0644, |
---|
| 88 | + .proc_handler = proc_douintvec, |
---|
| 89 | + }, |
---|
| 90 | + { } |
---|
| 91 | +}; |
---|
| 92 | + |
---|
| 93 | +static __init int kernel_exit_sysctls_init(void) |
---|
| 94 | +{ |
---|
| 95 | + register_sysctl_init("kernel", kern_exit_table); |
---|
| 96 | + return 0; |
---|
| 97 | +} |
---|
| 98 | +late_initcall(kernel_exit_sysctls_init); |
---|
| 99 | +#endif |
---|
| 100 | + |
---|
| 101 | +static atomic_t oops_count = ATOMIC_INIT(0); |
---|
| 102 | + |
---|
| 103 | +#ifdef CONFIG_SYSFS |
---|
| 104 | +static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr, |
---|
| 105 | + char *page) |
---|
| 106 | +{ |
---|
| 107 | + return sysfs_emit(page, "%d\n", atomic_read(&oops_count)); |
---|
| 108 | +} |
---|
| 109 | + |
---|
| 110 | +static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count); |
---|
| 111 | + |
---|
| 112 | +static __init int kernel_exit_sysfs_init(void) |
---|
| 113 | +{ |
---|
| 114 | + sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL); |
---|
| 115 | + return 0; |
---|
| 116 | +} |
---|
| 117 | +late_initcall(kernel_exit_sysfs_init); |
---|
| 118 | +#endif |
---|
72 | 119 | |
---|
73 | 120 | static void __unhash_process(struct task_struct *p, bool group_dead) |
---|
74 | 121 | { |
---|
.. | .. |
---|
865 | 912 | } |
---|
866 | 913 | EXPORT_SYMBOL_GPL(do_exit); |
---|
867 | 914 | |
---|
| 915 | +void __noreturn make_task_dead(int signr) |
---|
| 916 | +{ |
---|
| 917 | + /* |
---|
| 918 | + * Take the task off the cpu after something catastrophic has |
---|
| 919 | + * happened. |
---|
| 920 | + */ |
---|
| 921 | + unsigned int limit; |
---|
| 922 | + |
---|
| 923 | + /* |
---|
| 924 | + * Every time the system oopses, if the oops happens while a reference |
---|
| 925 | + * to an object was held, the reference leaks. |
---|
| 926 | + * If the oops doesn't also leak memory, repeated oopsing can cause |
---|
| 927 | + * reference counters to wrap around (if they're not using refcount_t). |
---|
| 928 | + * This means that repeated oopsing can make unexploitable-looking bugs |
---|
| 929 | + * exploitable through repeated oopsing. |
---|
| 930 | + * To make sure this can't happen, place an upper bound on how often the |
---|
| 931 | + * kernel may oops without panic(). |
---|
| 932 | + */ |
---|
| 933 | + limit = READ_ONCE(oops_limit); |
---|
| 934 | + if (atomic_inc_return(&oops_count) >= limit && limit) |
---|
| 935 | + panic("Oopsed too often (kernel.oops_limit is %d)", limit); |
---|
| 936 | + |
---|
| 937 | + do_exit(signr); |
---|
| 938 | +} |
---|
| 939 | + |
---|
868 | 940 | void complete_and_exit(struct completion *comp, long code) |
---|
869 | 941 | { |
---|
870 | 942 | if (comp) |
---|