.. | .. |
---|
1 | 1 | #include <linux/ptrace.h> |
---|
2 | 2 | #include <linux/version.h> |
---|
3 | 3 | #include <uapi/linux/bpf.h> |
---|
4 | | -#include "bpf_helpers.h" |
---|
| 4 | +#include <bpf/bpf_helpers.h> |
---|
| 5 | +#include <bpf/bpf_tracing.h> |
---|
| 6 | +#include <bpf/bpf_core_read.h> |
---|
5 | 7 | |
---|
6 | | -struct bpf_map_def SEC("maps") counters = { |
---|
7 | | - .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, |
---|
8 | | - .key_size = sizeof(int), |
---|
9 | | - .value_size = sizeof(u32), |
---|
10 | | - .max_entries = 64, |
---|
11 | | -}; |
---|
12 | | -struct bpf_map_def SEC("maps") values = { |
---|
13 | | - .type = BPF_MAP_TYPE_HASH, |
---|
14 | | - .key_size = sizeof(int), |
---|
15 | | - .value_size = sizeof(u64), |
---|
16 | | - .max_entries = 64, |
---|
17 | | -}; |
---|
18 | | -struct bpf_map_def SEC("maps") values2 = { |
---|
19 | | - .type = BPF_MAP_TYPE_HASH, |
---|
20 | | - .key_size = sizeof(int), |
---|
21 | | - .value_size = sizeof(struct bpf_perf_event_value), |
---|
22 | | - .max_entries = 64, |
---|
23 | | -}; |
---|
| 8 | +struct { |
---|
| 9 | + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); |
---|
| 10 | + __uint(key_size, sizeof(int)); |
---|
| 11 | + __uint(value_size, sizeof(u32)); |
---|
| 12 | + __uint(max_entries, 64); |
---|
| 13 | +} counters SEC(".maps"); |
---|
| 14 | + |
---|
| 15 | +struct { |
---|
| 16 | + __uint(type, BPF_MAP_TYPE_HASH); |
---|
| 17 | + __type(key, int); |
---|
| 18 | + __type(value, u64); |
---|
| 19 | + __uint(max_entries, 64); |
---|
| 20 | +} values SEC(".maps"); |
---|
| 21 | + |
---|
| 22 | +struct { |
---|
| 23 | + __uint(type, BPF_MAP_TYPE_HASH); |
---|
| 24 | + __type(key, int); |
---|
| 25 | + __type(value, struct bpf_perf_event_value); |
---|
| 26 | + __uint(max_entries, 64); |
---|
| 27 | +} values2 SEC(".maps"); |
---|
24 | 28 | |
---|
25 | 29 | SEC("kprobe/htab_map_get_next_key") |
---|
26 | 30 | int bpf_prog1(struct pt_regs *ctx) |
---|
.. | .. |
---|
43 | 47 | return 0; |
---|
44 | 48 | } |
---|
45 | 49 | |
---|
46 | | -SEC("kprobe/htab_map_lookup_elem") |
---|
47 | | -int bpf_prog2(struct pt_regs *ctx) |
---|
| 50 | +/* |
---|
| 51 | + * Since *_map_lookup_elem can't be expected to trigger bpf programs |
---|
| 52 | + * due to potential deadlocks (bpf_disable_instrumentation), this bpf |
---|
| 53 | + * program will be attached to bpf_map_copy_value (which is called |
---|
| 54 | + * from map_lookup_elem) and will only filter the hashtable type. |
---|
| 55 | + */ |
---|
| 56 | +SEC("kprobe/bpf_map_copy_value") |
---|
| 57 | +int BPF_KPROBE(bpf_prog2, struct bpf_map *map) |
---|
48 | 58 | { |
---|
49 | 59 | u32 key = bpf_get_smp_processor_id(); |
---|
50 | 60 | struct bpf_perf_event_value *val, buf; |
---|
| 61 | + enum bpf_map_type type; |
---|
51 | 62 | int error; |
---|
52 | 63 | |
---|
| 64 | + type = BPF_CORE_READ(map, map_type); |
---|
| 65 | + if (type != BPF_MAP_TYPE_HASH) |
---|
| 66 | + return 0; |
---|
| 67 | + |
---|
53 | 68 | error = bpf_perf_event_read_value(&counters, key, &buf, sizeof(buf)); |
---|
54 | 69 | if (error) |
---|
55 | 70 | return 0; |
---|