.. | .. |
---|
970 | 970 | u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, |
---|
971 | 971 | void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) |
---|
972 | 972 | { |
---|
973 | | - int nest_level = this_cpu_inc_return(bpf_event_output_nest_level); |
---|
974 | 973 | struct perf_raw_frag frag = { |
---|
975 | 974 | .copy = ctx_copy, |
---|
976 | 975 | .size = ctx_size, |
---|
.. | .. |
---|
987 | 986 | }; |
---|
988 | 987 | struct perf_sample_data *sd; |
---|
989 | 988 | struct pt_regs *regs; |
---|
| 989 | + int nest_level; |
---|
990 | 990 | u64 ret; |
---|
| 991 | + |
---|
| 992 | + preempt_disable(); |
---|
| 993 | + nest_level = this_cpu_inc_return(bpf_event_output_nest_level); |
---|
991 | 994 | |
---|
992 | 995 | if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { |
---|
993 | 996 | ret = -EBUSY; |
---|
.. | .. |
---|
1003 | 1006 | ret = __bpf_perf_event_output(regs, map, flags, sd); |
---|
1004 | 1007 | out: |
---|
1005 | 1008 | this_cpu_dec(bpf_event_output_nest_level); |
---|
| 1009 | + preempt_enable(); |
---|
1006 | 1010 | return ret; |
---|
1007 | 1011 | } |
---|
1008 | 1012 | |
---|
.. | .. |
---|
1055 | 1059 | |
---|
1056 | 1060 | work = container_of(entry, struct send_signal_irq_work, irq_work); |
---|
1057 | 1061 | group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, work->type); |
---|
| 1062 | + put_task_struct(work->task); |
---|
1058 | 1063 | } |
---|
1059 | 1064 | |
---|
1060 | 1065 | static int bpf_send_signal_common(u32 sig, enum pid_type type) |
---|
.. | .. |
---|
1072 | 1077 | return -EPERM; |
---|
1073 | 1078 | if (unlikely(!nmi_uaccess_okay())) |
---|
1074 | 1079 | return -EPERM; |
---|
| 1080 | + /* Task should not be pid=1 to avoid kernel panic. */ |
---|
| 1081 | + if (unlikely(is_global_init(current))) |
---|
| 1082 | + return -EPERM; |
---|
1075 | 1083 | |
---|
1076 | 1084 | if (irqs_disabled()) { |
---|
1077 | 1085 | /* Do an early check on signal validity. Otherwise, |
---|
.. | .. |
---|
1088 | 1096 | * to the irq_work. The current task may change when queued |
---|
1089 | 1097 | * irq works get executed. |
---|
1090 | 1098 | */ |
---|
1091 | | - work->task = current; |
---|
| 1099 | + work->task = get_task_struct(current); |
---|
1092 | 1100 | work->sig = sig; |
---|
1093 | 1101 | work->type = type; |
---|
1094 | 1102 | irq_work_queue(&work->irq_work); |
---|
.. | .. |
---|
1124 | 1132 | |
---|
1125 | 1133 | BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz) |
---|
1126 | 1134 | { |
---|
| 1135 | + struct path copy; |
---|
1127 | 1136 | long len; |
---|
1128 | 1137 | char *p; |
---|
1129 | 1138 | |
---|
1130 | 1139 | if (!sz) |
---|
1131 | 1140 | return 0; |
---|
1132 | 1141 | |
---|
1133 | | - p = d_path(path, buf, sz); |
---|
| 1142 | + /* |
---|
| 1143 | + * The path pointer is verified as trusted and safe to use, |
---|
| 1144 | + * but let's double check it's valid anyway to workaround |
---|
| 1145 | + * potentially broken verifier. |
---|
| 1146 | + */ |
---|
| 1147 | + len = copy_from_kernel_nofault(©, path, sizeof(*path)); |
---|
| 1148 | + if (len < 0) |
---|
| 1149 | + return len; |
---|
| 1150 | + |
---|
| 1151 | + p = d_path(©, buf, sz); |
---|
1134 | 1152 | if (IS_ERR(p)) { |
---|
1135 | 1153 | len = PTR_ERR(p); |
---|
1136 | 1154 | } else { |
---|
.. | .. |
---|
2156 | 2174 | #ifdef CONFIG_UPROBE_EVENTS |
---|
2157 | 2175 | if (flags & TRACE_EVENT_FL_UPROBE) |
---|
2158 | 2176 | err = bpf_get_uprobe_info(event, fd_type, buf, |
---|
2159 | | - probe_offset, |
---|
| 2177 | + probe_offset, probe_addr, |
---|
2160 | 2178 | event->attr.type == PERF_TYPE_TRACEPOINT); |
---|
2161 | 2179 | #endif |
---|
2162 | 2180 | } |
---|