hc
2024-05-16 8d2a02b24d66aa359e83eebc1ed3c0f85367a1cb
kernel/kernel/exit.c
....@@ -64,11 +64,58 @@
6464 #include <linux/rcuwait.h>
6565 #include <linux/compat.h>
6666 #include <linux/io_uring.h>
67
+#include <linux/sysfs.h>
6768
6869 #include <linux/uaccess.h>
6970 #include <asm/unistd.h>
7071 #include <asm/mmu_context.h>
7172 #include <trace/hooks/mm.h>
73
+
74
+/*
75
+ * The default value should be high enough to not crash a system that randomly
76
+ * crashes its kernel from time to time, but low enough to at least not permit
77
+ * overflowing 32-bit refcounts or the ldsem writer count.
78
+ */
79
+static unsigned int oops_limit = 10000;
80
+
81
+#ifdef CONFIG_SYSCTL
82
+static struct ctl_table kern_exit_table[] = {
83
+ {
84
+ .procname = "oops_limit",
85
+ .data = &oops_limit,
86
+ .maxlen = sizeof(oops_limit),
87
+ .mode = 0644,
88
+ .proc_handler = proc_douintvec,
89
+ },
90
+ { }
91
+};
92
+
93
+static __init int kernel_exit_sysctls_init(void)
94
+{
95
+ register_sysctl_init("kernel", kern_exit_table);
96
+ return 0;
97
+}
98
+late_initcall(kernel_exit_sysctls_init);
99
+#endif
100
+
101
+static atomic_t oops_count = ATOMIC_INIT(0);
102
+
103
+#ifdef CONFIG_SYSFS
104
+static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr,
105
+ char *page)
106
+{
107
+ return sysfs_emit(page, "%d\n", atomic_read(&oops_count));
108
+}
109
+
110
+static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count);
111
+
112
+static __init int kernel_exit_sysfs_init(void)
113
+{
114
+ sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL);
115
+ return 0;
116
+}
117
+late_initcall(kernel_exit_sysfs_init);
118
+#endif
72119
73120 static void __unhash_process(struct task_struct *p, bool group_dead)
74121 {
....@@ -153,7 +200,7 @@
153200 * Do this under ->siglock, we can race with another thread
154201 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
155202 */
156
- flush_task_sigqueue(tsk);
203
+ flush_sigqueue(&tsk->pending);
157204 tsk->sighand = NULL;
158205 spin_unlock(&sighand->siglock);
159206
....@@ -865,6 +912,31 @@
865912 }
866913 EXPORT_SYMBOL_GPL(do_exit);
867914
915
+void __noreturn make_task_dead(int signr)
916
+{
917
+ /*
918
+ * Take the task off the cpu after something catastrophic has
919
+ * happened.
920
+ */
921
+ unsigned int limit;
922
+
923
+ /*
924
+ * Every time the system oopses, if the oops happens while a reference
925
+ * to an object was held, the reference leaks.
926
+ * If the oops doesn't also leak memory, repeated oopsing can cause
927
+ * reference counters to wrap around (if they're not using refcount_t).
928
+ * This means that repeated oopsing can make unexploitable-looking bugs
929
+ * exploitable through repeated oopsing.
930
+ * To make sure this can't happen, place an upper bound on how often the
931
+ * kernel may oops without panic().
932
+ */
933
+ limit = READ_ONCE(oops_limit);
934
+ if (atomic_inc_return(&oops_count) >= limit && limit)
935
+ panic("Oopsed too often (kernel.oops_limit is %d)", limit);
936
+
937
+ do_exit(signr);
938
+}
939
+
868940 void complete_and_exit(struct completion *comp, long code)
869941 {
870942 if (comp)