hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/kernel/trace/bpf_trace.c
....@@ -970,7 +970,6 @@
970970 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
971971 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
972972 {
973
- int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
974973 struct perf_raw_frag frag = {
975974 .copy = ctx_copy,
976975 .size = ctx_size,
....@@ -987,7 +986,11 @@
987986 };
988987 struct perf_sample_data *sd;
989988 struct pt_regs *regs;
989
+ int nest_level;
990990 u64 ret;
991
+
992
+ preempt_disable();
993
+ nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
991994
992995 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
993996 ret = -EBUSY;
....@@ -1003,6 +1006,7 @@
10031006 ret = __bpf_perf_event_output(regs, map, flags, sd);
10041007 out:
10051008 this_cpu_dec(bpf_event_output_nest_level);
1009
+ preempt_enable();
10061010 return ret;
10071011 }
10081012
....@@ -1055,6 +1059,7 @@
10551059
10561060 work = container_of(entry, struct send_signal_irq_work, irq_work);
10571061 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type);
1062
+ put_task_struct(work->task);
10581063 }
10591064
10601065 static int bpf_send_signal_common(u32 sig, enum pid_type type)
....@@ -1072,6 +1077,9 @@
10721077 return -EPERM;
10731078 if (unlikely(!nmi_uaccess_okay()))
10741079 return -EPERM;
1080
+ /* Task should not be pid=1 to avoid kernel panic. */
1081
+ if (unlikely(is_global_init(current)))
1082
+ return -EPERM;
10751083
10761084 if (irqs_disabled()) {
10771085 /* Do an early check on signal validity. Otherwise,
....@@ -1088,7 +1096,7 @@
10881096 * to the irq_work. The current task may change when queued
10891097 * irq works get executed.
10901098 */
1091
- work->task = current;
1099
+ work->task = get_task_struct(current);
10921100 work->sig = sig;
10931101 work->type = type;
10941102 irq_work_queue(&work->irq_work);
....@@ -1124,13 +1132,23 @@
11241132
11251133 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
11261134 {
1135
+ struct path copy;
11271136 long len;
11281137 char *p;
11291138
11301139 if (!sz)
11311140 return 0;
11321141
1133
- p = d_path(path, buf, sz);
1142
+ /*
1143
+ * The path pointer is verified as trusted and safe to use,
1144
+ * but let's double check it's valid anyway to workaround
1145
+ * potentially broken verifier.
1146
+ */
1147
+ len = copy_from_kernel_nofault(&copy, path, sizeof(*path));
1148
+ if (len < 0)
1149
+ return len;
1150
+
1151
+ p = d_path(&copy, buf, sz);
11341152 if (IS_ERR(p)) {
11351153 len = PTR_ERR(p);
11361154 } else {
....@@ -2156,7 +2174,7 @@
21562174 #ifdef CONFIG_UPROBE_EVENTS
21572175 if (flags & TRACE_EVENT_FL_UPROBE)
21582176 err = bpf_get_uprobe_info(event, fd_type, buf,
2159
- probe_offset,
2177
+ probe_offset, probe_addr,
21602178 event->attr.type == PERF_TYPE_TRACEPOINT);
21612179 #endif
21622180 }