.. | .. |
---|
4 | 4 | #include <bpf/bpf_helpers.h> |
---|
5 | 5 | #include <bpf/bpf_tracing.h> |
---|
6 | 6 | |
---|
| 7 | +struct bpf_perf_event_value___local { |
---|
| 8 | + __u64 counter; |
---|
| 9 | + __u64 enabled; |
---|
| 10 | + __u64 running; |
---|
| 11 | +} __attribute__((preserve_access_index)); |
---|
| 12 | + |
---|
7 | 13 | /* map of perf event fds, num_cpu * num_metric entries */ |
---|
8 | 14 | struct { |
---|
9 | 15 | __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); |
---|
.. | .. |
---|
15 | 21 | struct { |
---|
16 | 22 | __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); |
---|
17 | 23 | __uint(key_size, sizeof(u32)); |
---|
18 | | - __uint(value_size, sizeof(struct bpf_perf_event_value)); |
---|
| 24 | + __uint(value_size, sizeof(struct bpf_perf_event_value___local)); |
---|
19 | 25 | } fentry_readings SEC(".maps"); |
---|
20 | 26 | |
---|
21 | 27 | /* accumulated readings */ |
---|
22 | 28 | struct { |
---|
23 | 29 | __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); |
---|
24 | 30 | __uint(key_size, sizeof(u32)); |
---|
25 | | - __uint(value_size, sizeof(struct bpf_perf_event_value)); |
---|
| 31 | + __uint(value_size, sizeof(struct bpf_perf_event_value___local)); |
---|
26 | 32 | } accum_readings SEC(".maps"); |
---|
27 | 33 | |
---|
28 | 34 | /* sample counts, one per cpu */ |
---|
.. | .. |
---|
39 | 45 | SEC("fentry/XXX") |
---|
40 | 46 | int BPF_PROG(fentry_XXX) |
---|
41 | 47 | { |
---|
42 | | - struct bpf_perf_event_value *ptrs[MAX_NUM_MATRICS]; |
---|
| 48 | + struct bpf_perf_event_value___local *ptrs[MAX_NUM_MATRICS]; |
---|
43 | 49 | u32 key = bpf_get_smp_processor_id(); |
---|
44 | 50 | u32 i; |
---|
45 | 51 | |
---|
.. | .. |
---|
53 | 59 | } |
---|
54 | 60 | |
---|
55 | 61 | for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) { |
---|
56 | | - struct bpf_perf_event_value reading; |
---|
| 62 | + struct bpf_perf_event_value___local reading; |
---|
57 | 63 | int err; |
---|
58 | 64 | |
---|
59 | | - err = bpf_perf_event_read_value(&events, key, &reading, |
---|
| 65 | + err = bpf_perf_event_read_value(&events, key, (void *)&reading, |
---|
60 | 66 | sizeof(reading)); |
---|
61 | 67 | if (err) |
---|
62 | 68 | return 0; |
---|
.. | .. |
---|
68 | 74 | } |
---|
69 | 75 | |
---|
70 | 76 | static inline void |
---|
71 | | -fexit_update_maps(u32 id, struct bpf_perf_event_value *after) |
---|
| 77 | +fexit_update_maps(u32 id, struct bpf_perf_event_value___local *after) |
---|
72 | 78 | { |
---|
73 | | - struct bpf_perf_event_value *before, diff; |
---|
| 79 | + struct bpf_perf_event_value___local *before, diff; |
---|
74 | 80 | |
---|
75 | 81 | before = bpf_map_lookup_elem(&fentry_readings, &id); |
---|
76 | 82 | /* only account samples with a valid fentry_reading */ |
---|
77 | 83 | if (before && before->counter) { |
---|
78 | | - struct bpf_perf_event_value *accum; |
---|
| 84 | + struct bpf_perf_event_value___local *accum; |
---|
79 | 85 | |
---|
80 | 86 | diff.counter = after->counter - before->counter; |
---|
81 | 87 | diff.enabled = after->enabled - before->enabled; |
---|
.. | .. |
---|
93 | 99 | SEC("fexit/XXX") |
---|
94 | 100 | int BPF_PROG(fexit_XXX) |
---|
95 | 101 | { |
---|
96 | | - struct bpf_perf_event_value readings[MAX_NUM_MATRICS]; |
---|
| 102 | + struct bpf_perf_event_value___local readings[MAX_NUM_MATRICS]; |
---|
97 | 103 | u32 cpu = bpf_get_smp_processor_id(); |
---|
98 | 104 | u32 i, zero = 0; |
---|
99 | 105 | int err; |
---|
.. | .. |
---|
102 | 108 | /* read all events before updating the maps, to reduce error */ |
---|
103 | 109 | for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) { |
---|
104 | 110 | err = bpf_perf_event_read_value(&events, cpu + i * num_cpu, |
---|
105 | | - readings + i, sizeof(*readings)); |
---|
| 111 | + (void *)(readings + i), |
---|
| 112 | + sizeof(*readings)); |
---|
106 | 113 | if (err) |
---|
107 | 114 | return 0; |
---|
108 | 115 | } |
---|