.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> |
---|
3 | 4 | * |
---|
4 | 5 | * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further |
---|
5 | 6 | * copyright notes. |
---|
6 | | - * |
---|
7 | | - * Released under the GPL v2. (and only v2, not any later version) |
---|
8 | 7 | */ |
---|
9 | 8 | |
---|
10 | 9 | #include <sys/mman.h> |
---|
11 | 10 | #include <inttypes.h> |
---|
12 | 11 | #include <asm/bug.h> |
---|
| 12 | +#include <linux/zalloc.h> |
---|
| 13 | +#include <stdlib.h> |
---|
| 14 | +#include <string.h> |
---|
| 15 | +#include <unistd.h> // sysconf() |
---|
| 16 | +#include <perf/mmap.h> |
---|
| 17 | +#ifdef HAVE_LIBNUMA_SUPPORT |
---|
| 18 | +#include <numaif.h> |
---|
| 19 | +#endif |
---|
| 20 | +#include "cpumap.h" |
---|
13 | 21 | #include "debug.h" |
---|
14 | 22 | #include "event.h" |
---|
15 | 23 | #include "mmap.h" |
---|
16 | | -#include "util.h" /* page_size */ |
---|
| 24 | +#include "../perf.h" |
---|
| 25 | +#include <internal/lib.h> /* page_size */ |
---|
| 26 | +#include <linux/bitmap.h> |
---|
17 | 27 | |
---|
18 | | -size_t perf_mmap__mmap_len(struct perf_mmap *map) |
---|
| 28 | +#define MASK_SIZE 1023 |
---|
| 29 | +void mmap_cpu_mask__scnprintf(struct mmap_cpu_mask *mask, const char *tag) |
---|
19 | 30 | { |
---|
20 | | - return map->mask + 1 + page_size; |
---|
| 31 | + char buf[MASK_SIZE + 1]; |
---|
| 32 | + size_t len; |
---|
| 33 | + |
---|
| 34 | + len = bitmap_scnprintf(mask->bits, mask->nbits, buf, MASK_SIZE); |
---|
| 35 | + buf[len] = '\0'; |
---|
| 36 | + pr_debug("%p: %s mask[%zd]: %s\n", mask, tag, mask->nbits, buf); |
---|
21 | 37 | } |
---|
22 | 38 | |
---|
23 | | -/* When check_messup is true, 'end' must points to a good entry */ |
---|
24 | | -static union perf_event *perf_mmap__read(struct perf_mmap *map, |
---|
25 | | - u64 *startp, u64 end) |
---|
| 39 | +size_t mmap__mmap_len(struct mmap *map) |
---|
26 | 40 | { |
---|
27 | | - unsigned char *data = map->base + page_size; |
---|
28 | | - union perf_event *event = NULL; |
---|
29 | | - int diff = end - *startp; |
---|
30 | | - |
---|
31 | | - if (diff >= (int)sizeof(event->header)) { |
---|
32 | | - size_t size; |
---|
33 | | - |
---|
34 | | - event = (union perf_event *)&data[*startp & map->mask]; |
---|
35 | | - size = event->header.size; |
---|
36 | | - |
---|
37 | | - if (size < sizeof(event->header) || diff < (int)size) |
---|
38 | | - return NULL; |
---|
39 | | - |
---|
40 | | - /* |
---|
41 | | - * Event straddles the mmap boundary -- header should always |
---|
42 | | - * be inside due to u64 alignment of output. |
---|
43 | | - */ |
---|
44 | | - if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) { |
---|
45 | | - unsigned int offset = *startp; |
---|
46 | | - unsigned int len = min(sizeof(*event), size), cpy; |
---|
47 | | - void *dst = map->event_copy; |
---|
48 | | - |
---|
49 | | - do { |
---|
50 | | - cpy = min(map->mask + 1 - (offset & map->mask), len); |
---|
51 | | - memcpy(dst, &data[offset & map->mask], cpy); |
---|
52 | | - offset += cpy; |
---|
53 | | - dst += cpy; |
---|
54 | | - len -= cpy; |
---|
55 | | - } while (len); |
---|
56 | | - |
---|
57 | | - event = (union perf_event *)map->event_copy; |
---|
58 | | - } |
---|
59 | | - |
---|
60 | | - *startp += size; |
---|
61 | | - } |
---|
62 | | - |
---|
63 | | - return event; |
---|
64 | | -} |
---|
65 | | - |
---|
66 | | -/* |
---|
67 | | - * Read event from ring buffer one by one. |
---|
68 | | - * Return one event for each call. |
---|
69 | | - * |
---|
70 | | - * Usage: |
---|
71 | | - * perf_mmap__read_init() |
---|
72 | | - * while(event = perf_mmap__read_event()) { |
---|
73 | | - * //process the event |
---|
74 | | - * perf_mmap__consume() |
---|
75 | | - * } |
---|
76 | | - * perf_mmap__read_done() |
---|
77 | | - */ |
---|
78 | | -union perf_event *perf_mmap__read_event(struct perf_mmap *map) |
---|
79 | | -{ |
---|
80 | | - union perf_event *event; |
---|
81 | | - |
---|
82 | | - /* |
---|
83 | | - * Check if event was unmapped due to a POLLHUP/POLLERR. |
---|
84 | | - */ |
---|
85 | | - if (!refcount_read(&map->refcnt)) |
---|
86 | | - return NULL; |
---|
87 | | - |
---|
88 | | - /* non-overwirte doesn't pause the ringbuffer */ |
---|
89 | | - if (!map->overwrite) |
---|
90 | | - map->end = perf_mmap__read_head(map); |
---|
91 | | - |
---|
92 | | - event = perf_mmap__read(map, &map->start, map->end); |
---|
93 | | - |
---|
94 | | - if (!map->overwrite) |
---|
95 | | - map->prev = map->start; |
---|
96 | | - |
---|
97 | | - return event; |
---|
98 | | -} |
---|
99 | | - |
---|
100 | | -static bool perf_mmap__empty(struct perf_mmap *map) |
---|
101 | | -{ |
---|
102 | | - return perf_mmap__read_head(map) == map->prev && !map->auxtrace_mmap.base; |
---|
103 | | -} |
---|
104 | | - |
---|
105 | | -void perf_mmap__get(struct perf_mmap *map) |
---|
106 | | -{ |
---|
107 | | - refcount_inc(&map->refcnt); |
---|
108 | | -} |
---|
109 | | - |
---|
110 | | -void perf_mmap__put(struct perf_mmap *map) |
---|
111 | | -{ |
---|
112 | | - BUG_ON(map->base && refcount_read(&map->refcnt) == 0); |
---|
113 | | - |
---|
114 | | - if (refcount_dec_and_test(&map->refcnt)) |
---|
115 | | - perf_mmap__munmap(map); |
---|
116 | | -} |
---|
117 | | - |
---|
118 | | -void perf_mmap__consume(struct perf_mmap *map) |
---|
119 | | -{ |
---|
120 | | - if (!map->overwrite) { |
---|
121 | | - u64 old = map->prev; |
---|
122 | | - |
---|
123 | | - perf_mmap__write_tail(map, old); |
---|
124 | | - } |
---|
125 | | - |
---|
126 | | - if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map)) |
---|
127 | | - perf_mmap__put(map); |
---|
| 41 | + return perf_mmap__mmap_len(&map->core); |
---|
128 | 42 | } |
---|
129 | 43 | |
---|
130 | 44 | int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused, |
---|
.. | .. |
---|
147 | 61 | } |
---|
148 | 62 | |
---|
149 | 63 | void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused, |
---|
150 | | - struct perf_evlist *evlist __maybe_unused, |
---|
| 64 | + struct evlist *evlist __maybe_unused, |
---|
151 | 65 | int idx __maybe_unused, |
---|
152 | 66 | bool per_cpu __maybe_unused) |
---|
153 | 67 | { |
---|
154 | 68 | } |
---|
155 | 69 | |
---|
156 | | -void perf_mmap__munmap(struct perf_mmap *map) |
---|
| 70 | +#ifdef HAVE_AIO_SUPPORT |
---|
| 71 | +static int perf_mmap__aio_enabled(struct mmap *map) |
---|
157 | 72 | { |
---|
158 | | - if (map->base != NULL) { |
---|
159 | | - munmap(map->base, perf_mmap__mmap_len(map)); |
---|
160 | | - map->base = NULL; |
---|
161 | | - map->fd = -1; |
---|
162 | | - refcount_set(&map->refcnt, 0); |
---|
| 73 | + return map->aio.nr_cblocks > 0; |
---|
| 74 | +} |
---|
| 75 | + |
---|
| 76 | +#ifdef HAVE_LIBNUMA_SUPPORT |
---|
| 77 | +static int perf_mmap__aio_alloc(struct mmap *map, int idx) |
---|
| 78 | +{ |
---|
| 79 | + map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE, |
---|
| 80 | + MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); |
---|
| 81 | + if (map->aio.data[idx] == MAP_FAILED) { |
---|
| 82 | + map->aio.data[idx] = NULL; |
---|
| 83 | + return -1; |
---|
| 84 | + } |
---|
| 85 | + |
---|
| 86 | + return 0; |
---|
| 87 | +} |
---|
| 88 | + |
---|
| 89 | +static void perf_mmap__aio_free(struct mmap *map, int idx) |
---|
| 90 | +{ |
---|
| 91 | + if (map->aio.data[idx]) { |
---|
| 92 | + munmap(map->aio.data[idx], mmap__mmap_len(map)); |
---|
| 93 | + map->aio.data[idx] = NULL; |
---|
| 94 | + } |
---|
| 95 | +} |
---|
| 96 | + |
---|
| 97 | +static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity) |
---|
| 98 | +{ |
---|
| 99 | + void *data; |
---|
| 100 | + size_t mmap_len; |
---|
| 101 | + unsigned long *node_mask; |
---|
| 102 | + unsigned long node_index; |
---|
| 103 | + int err = 0; |
---|
| 104 | + |
---|
| 105 | + if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) { |
---|
| 106 | + data = map->aio.data[idx]; |
---|
| 107 | + mmap_len = mmap__mmap_len(map); |
---|
| 108 | + node_index = cpu__get_node(cpu); |
---|
| 109 | + node_mask = bitmap_alloc(node_index + 1); |
---|
| 110 | + if (!node_mask) { |
---|
| 111 | + pr_err("Failed to allocate node mask for mbind: error %m\n"); |
---|
| 112 | + return -1; |
---|
| 113 | + } |
---|
| 114 | + set_bit(node_index, node_mask); |
---|
| 115 | + if (mbind(data, mmap_len, MPOL_BIND, node_mask, node_index + 1 + 1, 0)) { |
---|
| 116 | + pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n", |
---|
| 117 | + data, data + mmap_len, node_index); |
---|
| 118 | + err = -1; |
---|
| 119 | + } |
---|
| 120 | + bitmap_free(node_mask); |
---|
| 121 | + } |
---|
| 122 | + |
---|
| 123 | + return err; |
---|
| 124 | +} |
---|
| 125 | +#else /* !HAVE_LIBNUMA_SUPPORT */ |
---|
| 126 | +static int perf_mmap__aio_alloc(struct mmap *map, int idx) |
---|
| 127 | +{ |
---|
| 128 | + map->aio.data[idx] = malloc(mmap__mmap_len(map)); |
---|
| 129 | + if (map->aio.data[idx] == NULL) |
---|
| 130 | + return -1; |
---|
| 131 | + |
---|
| 132 | + return 0; |
---|
| 133 | +} |
---|
| 134 | + |
---|
| 135 | +static void perf_mmap__aio_free(struct mmap *map, int idx) |
---|
| 136 | +{ |
---|
| 137 | + zfree(&(map->aio.data[idx])); |
---|
| 138 | +} |
---|
| 139 | + |
---|
| 140 | +static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused, |
---|
| 141 | + int cpu __maybe_unused, int affinity __maybe_unused) |
---|
| 142 | +{ |
---|
| 143 | + return 0; |
---|
| 144 | +} |
---|
| 145 | +#endif |
---|
| 146 | + |
---|
| 147 | +static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp) |
---|
| 148 | +{ |
---|
| 149 | + int delta_max, i, prio, ret; |
---|
| 150 | + |
---|
| 151 | + map->aio.nr_cblocks = mp->nr_cblocks; |
---|
| 152 | + if (map->aio.nr_cblocks) { |
---|
| 153 | + map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *)); |
---|
| 154 | + if (!map->aio.aiocb) { |
---|
| 155 | + pr_debug2("failed to allocate aiocb for data buffer, error %m\n"); |
---|
| 156 | + return -1; |
---|
| 157 | + } |
---|
| 158 | + map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb)); |
---|
| 159 | + if (!map->aio.cblocks) { |
---|
| 160 | + pr_debug2("failed to allocate cblocks for data buffer, error %m\n"); |
---|
| 161 | + return -1; |
---|
| 162 | + } |
---|
| 163 | + map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *)); |
---|
| 164 | + if (!map->aio.data) { |
---|
| 165 | + pr_debug2("failed to allocate data buffer, error %m\n"); |
---|
| 166 | + return -1; |
---|
| 167 | + } |
---|
| 168 | + delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX); |
---|
| 169 | + for (i = 0; i < map->aio.nr_cblocks; ++i) { |
---|
| 170 | + ret = perf_mmap__aio_alloc(map, i); |
---|
| 171 | + if (ret == -1) { |
---|
| 172 | + pr_debug2("failed to allocate data buffer area, error %m"); |
---|
| 173 | + return -1; |
---|
| 174 | + } |
---|
| 175 | + ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity); |
---|
| 176 | + if (ret == -1) |
---|
| 177 | + return -1; |
---|
| 178 | + /* |
---|
| 179 | + * Use cblock.aio_fildes value different from -1 |
---|
| 180 | + * to denote started aio write operation on the |
---|
| 181 | + * cblock so it requires explicit record__aio_sync() |
---|
| 182 | + * call prior the cblock may be reused again. |
---|
| 183 | + */ |
---|
| 184 | + map->aio.cblocks[i].aio_fildes = -1; |
---|
| 185 | + /* |
---|
| 186 | + * Allocate cblocks with priority delta to have |
---|
| 187 | + * faster aio write system calls because queued requests |
---|
| 188 | + * are kept in separate per-prio queues and adding |
---|
| 189 | + * a new request will iterate thru shorter per-prio |
---|
| 190 | + * list. Blocks with numbers higher than |
---|
| 191 | + * _SC_AIO_PRIO_DELTA_MAX go with priority 0. |
---|
| 192 | + */ |
---|
| 193 | + prio = delta_max - i; |
---|
| 194 | + map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0; |
---|
| 195 | + } |
---|
| 196 | + } |
---|
| 197 | + |
---|
| 198 | + return 0; |
---|
| 199 | +} |
---|
| 200 | + |
---|
| 201 | +static void perf_mmap__aio_munmap(struct mmap *map) |
---|
| 202 | +{ |
---|
| 203 | + int i; |
---|
| 204 | + |
---|
| 205 | + for (i = 0; i < map->aio.nr_cblocks; ++i) |
---|
| 206 | + perf_mmap__aio_free(map, i); |
---|
| 207 | + if (map->aio.data) |
---|
| 208 | + zfree(&map->aio.data); |
---|
| 209 | + zfree(&map->aio.cblocks); |
---|
| 210 | + zfree(&map->aio.aiocb); |
---|
| 211 | +} |
---|
| 212 | +#else /* !HAVE_AIO_SUPPORT */ |
---|
| 213 | +static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused) |
---|
| 214 | +{ |
---|
| 215 | + return 0; |
---|
| 216 | +} |
---|
| 217 | + |
---|
| 218 | +static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused, |
---|
| 219 | + struct mmap_params *mp __maybe_unused) |
---|
| 220 | +{ |
---|
| 221 | + return 0; |
---|
| 222 | +} |
---|
| 223 | + |
---|
| 224 | +static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused) |
---|
| 225 | +{ |
---|
| 226 | +} |
---|
| 227 | +#endif |
---|
| 228 | + |
---|
| 229 | +void mmap__munmap(struct mmap *map) |
---|
| 230 | +{ |
---|
| 231 | + bitmap_free(map->affinity_mask.bits); |
---|
| 232 | + |
---|
| 233 | + perf_mmap__aio_munmap(map); |
---|
| 234 | + if (map->data != NULL) { |
---|
| 235 | + munmap(map->data, mmap__mmap_len(map)); |
---|
| 236 | + map->data = NULL; |
---|
163 | 237 | } |
---|
164 | 238 | auxtrace_mmap__munmap(&map->auxtrace_mmap); |
---|
165 | 239 | } |
---|
166 | 240 | |
---|
167 | | -int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu) |
---|
| 241 | +static void build_node_mask(int node, struct mmap_cpu_mask *mask) |
---|
168 | 242 | { |
---|
169 | | - /* |
---|
170 | | - * The last one will be done at perf_mmap__consume(), so that we |
---|
171 | | - * make sure we don't prevent tools from consuming every last event in |
---|
172 | | - * the ring buffer. |
---|
173 | | - * |
---|
174 | | - * I.e. we can get the POLLHUP meaning that the fd doesn't exist |
---|
175 | | - * anymore, but the last events for it are still in the ring buffer, |
---|
176 | | - * waiting to be consumed. |
---|
177 | | - * |
---|
178 | | - * Tools can chose to ignore this at their own discretion, but the |
---|
179 | | - * evlist layer can't just drop it when filtering events in |
---|
180 | | - * perf_evlist__filter_pollfd(). |
---|
181 | | - */ |
---|
182 | | - refcount_set(&map->refcnt, 2); |
---|
183 | | - map->prev = 0; |
---|
184 | | - map->mask = mp->mask; |
---|
185 | | - map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot, |
---|
186 | | - MAP_SHARED, fd, 0); |
---|
187 | | - if (map->base == MAP_FAILED) { |
---|
| 243 | + int c, cpu, nr_cpus; |
---|
| 244 | + const struct perf_cpu_map *cpu_map = NULL; |
---|
| 245 | + |
---|
| 246 | + cpu_map = cpu_map__online(); |
---|
| 247 | + if (!cpu_map) |
---|
| 248 | + return; |
---|
| 249 | + |
---|
| 250 | + nr_cpus = perf_cpu_map__nr(cpu_map); |
---|
| 251 | + for (c = 0; c < nr_cpus; c++) { |
---|
| 252 | + cpu = cpu_map->map[c]; /* map c index to online cpu index */ |
---|
| 253 | + if (cpu__get_node(cpu) == node) |
---|
| 254 | + set_bit(cpu, mask->bits); |
---|
| 255 | + } |
---|
| 256 | +} |
---|
| 257 | + |
---|
| 258 | +static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp) |
---|
| 259 | +{ |
---|
| 260 | + map->affinity_mask.nbits = cpu__max_cpu(); |
---|
| 261 | + map->affinity_mask.bits = bitmap_alloc(map->affinity_mask.nbits); |
---|
| 262 | + if (!map->affinity_mask.bits) |
---|
| 263 | + return -1; |
---|
| 264 | + |
---|
| 265 | + if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) |
---|
| 266 | + build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask); |
---|
| 267 | + else if (mp->affinity == PERF_AFFINITY_CPU) |
---|
| 268 | + set_bit(map->core.cpu, map->affinity_mask.bits); |
---|
| 269 | + |
---|
| 270 | + return 0; |
---|
| 271 | +} |
---|
| 272 | + |
---|
| 273 | +int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu) |
---|
| 274 | +{ |
---|
| 275 | + if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) { |
---|
188 | 276 | pr_debug2("failed to mmap perf event ring buffer, error %d\n", |
---|
189 | 277 | errno); |
---|
190 | | - map->base = NULL; |
---|
191 | 278 | return -1; |
---|
192 | 279 | } |
---|
193 | | - map->fd = fd; |
---|
194 | | - map->cpu = cpu; |
---|
| 280 | + |
---|
| 281 | + if (mp->affinity != PERF_AFFINITY_SYS && |
---|
| 282 | + perf_mmap__setup_affinity_mask(map, mp)) { |
---|
| 283 | + pr_debug2("failed to alloc mmap affinity mask, error %d\n", |
---|
| 284 | + errno); |
---|
| 285 | + return -1; |
---|
| 286 | + } |
---|
| 287 | + |
---|
| 288 | + if (verbose == 2) |
---|
| 289 | + mmap_cpu_mask__scnprintf(&map->affinity_mask, "mmap"); |
---|
| 290 | + |
---|
| 291 | + map->core.flush = mp->flush; |
---|
| 292 | + |
---|
| 293 | + map->comp_level = mp->comp_level; |
---|
| 294 | + |
---|
| 295 | + if (map->comp_level && !perf_mmap__aio_enabled(map)) { |
---|
| 296 | + map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE, |
---|
| 297 | + MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); |
---|
| 298 | + if (map->data == MAP_FAILED) { |
---|
| 299 | + pr_debug2("failed to mmap data buffer, error %d\n", |
---|
| 300 | + errno); |
---|
| 301 | + map->data = NULL; |
---|
| 302 | + return -1; |
---|
| 303 | + } |
---|
| 304 | + } |
---|
195 | 305 | |
---|
196 | 306 | if (auxtrace_mmap__mmap(&map->auxtrace_mmap, |
---|
197 | | - &mp->auxtrace_mp, map->base, fd)) |
---|
| 307 | + &mp->auxtrace_mp, map->core.base, fd)) |
---|
198 | 308 | return -1; |
---|
199 | 309 | |
---|
200 | | - return 0; |
---|
| 310 | + return perf_mmap__aio_mmap(map, mp); |
---|
201 | 311 | } |
---|
202 | 312 | |
---|
203 | | -static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end) |
---|
| 313 | +int perf_mmap__push(struct mmap *md, void *to, |
---|
| 314 | + int push(struct mmap *map, void *to, void *buf, size_t size)) |
---|
204 | 315 | { |
---|
205 | | - struct perf_event_header *pheader; |
---|
206 | | - u64 evt_head = *start; |
---|
207 | | - int size = mask + 1; |
---|
208 | | - |
---|
209 | | - pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start); |
---|
210 | | - pheader = (struct perf_event_header *)(buf + (*start & mask)); |
---|
211 | | - while (true) { |
---|
212 | | - if (evt_head - *start >= (unsigned int)size) { |
---|
213 | | - pr_debug("Finished reading overwrite ring buffer: rewind\n"); |
---|
214 | | - if (evt_head - *start > (unsigned int)size) |
---|
215 | | - evt_head -= pheader->size; |
---|
216 | | - *end = evt_head; |
---|
217 | | - return 0; |
---|
218 | | - } |
---|
219 | | - |
---|
220 | | - pheader = (struct perf_event_header *)(buf + (evt_head & mask)); |
---|
221 | | - |
---|
222 | | - if (pheader->size == 0) { |
---|
223 | | - pr_debug("Finished reading overwrite ring buffer: get start\n"); |
---|
224 | | - *end = evt_head; |
---|
225 | | - return 0; |
---|
226 | | - } |
---|
227 | | - |
---|
228 | | - evt_head += pheader->size; |
---|
229 | | - pr_debug3("move evt_head: %"PRIx64"\n", evt_head); |
---|
230 | | - } |
---|
231 | | - WARN_ONCE(1, "Shouldn't get here\n"); |
---|
232 | | - return -1; |
---|
233 | | -} |
---|
234 | | - |
---|
235 | | -/* |
---|
236 | | - * Report the start and end of the available data in ringbuffer |
---|
237 | | - */ |
---|
238 | | -static int __perf_mmap__read_init(struct perf_mmap *md) |
---|
239 | | -{ |
---|
240 | | - u64 head = perf_mmap__read_head(md); |
---|
241 | | - u64 old = md->prev; |
---|
242 | | - unsigned char *data = md->base + page_size; |
---|
243 | | - unsigned long size; |
---|
244 | | - |
---|
245 | | - md->start = md->overwrite ? head : old; |
---|
246 | | - md->end = md->overwrite ? old : head; |
---|
247 | | - |
---|
248 | | - if (md->start == md->end) |
---|
249 | | - return -EAGAIN; |
---|
250 | | - |
---|
251 | | - size = md->end - md->start; |
---|
252 | | - if (size > (unsigned long)(md->mask) + 1) { |
---|
253 | | - if (!md->overwrite) { |
---|
254 | | - WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n"); |
---|
255 | | - |
---|
256 | | - md->prev = head; |
---|
257 | | - perf_mmap__consume(md); |
---|
258 | | - return -EAGAIN; |
---|
259 | | - } |
---|
260 | | - |
---|
261 | | - /* |
---|
262 | | - * Backward ring buffer is full. We still have a chance to read |
---|
263 | | - * most of data from it. |
---|
264 | | - */ |
---|
265 | | - if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end)) |
---|
266 | | - return -EINVAL; |
---|
267 | | - } |
---|
268 | | - |
---|
269 | | - return 0; |
---|
270 | | -} |
---|
271 | | - |
---|
272 | | -int perf_mmap__read_init(struct perf_mmap *map) |
---|
273 | | -{ |
---|
274 | | - /* |
---|
275 | | - * Check if event was unmapped due to a POLLHUP/POLLERR. |
---|
276 | | - */ |
---|
277 | | - if (!refcount_read(&map->refcnt)) |
---|
278 | | - return -ENOENT; |
---|
279 | | - |
---|
280 | | - return __perf_mmap__read_init(map); |
---|
281 | | -} |
---|
282 | | - |
---|
283 | | -int perf_mmap__push(struct perf_mmap *md, void *to, |
---|
284 | | - int push(void *to, void *buf, size_t size)) |
---|
285 | | -{ |
---|
286 | | - u64 head = perf_mmap__read_head(md); |
---|
287 | | - unsigned char *data = md->base + page_size; |
---|
| 316 | + u64 head = perf_mmap__read_head(&md->core); |
---|
| 317 | + unsigned char *data = md->core.base + page_size; |
---|
288 | 318 | unsigned long size; |
---|
289 | 319 | void *buf; |
---|
290 | 320 | int rc = 0; |
---|
291 | 321 | |
---|
292 | | - rc = perf_mmap__read_init(md); |
---|
| 322 | + rc = perf_mmap__read_init(&md->core); |
---|
293 | 323 | if (rc < 0) |
---|
294 | | - return (rc == -EAGAIN) ? 0 : -1; |
---|
| 324 | + return (rc == -EAGAIN) ? 1 : -1; |
---|
295 | 325 | |
---|
296 | | - size = md->end - md->start; |
---|
| 326 | + size = md->core.end - md->core.start; |
---|
297 | 327 | |
---|
298 | | - if ((md->start & md->mask) + size != (md->end & md->mask)) { |
---|
299 | | - buf = &data[md->start & md->mask]; |
---|
300 | | - size = md->mask + 1 - (md->start & md->mask); |
---|
301 | | - md->start += size; |
---|
| 328 | + if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) { |
---|
| 329 | + buf = &data[md->core.start & md->core.mask]; |
---|
| 330 | + size = md->core.mask + 1 - (md->core.start & md->core.mask); |
---|
| 331 | + md->core.start += size; |
---|
302 | 332 | |
---|
303 | | - if (push(to, buf, size) < 0) { |
---|
| 333 | + if (push(md, to, buf, size) < 0) { |
---|
304 | 334 | rc = -1; |
---|
305 | 335 | goto out; |
---|
306 | 336 | } |
---|
307 | 337 | } |
---|
308 | 338 | |
---|
309 | | - buf = &data[md->start & md->mask]; |
---|
310 | | - size = md->end - md->start; |
---|
311 | | - md->start += size; |
---|
| 339 | + buf = &data[md->core.start & md->core.mask]; |
---|
| 340 | + size = md->core.end - md->core.start; |
---|
| 341 | + md->core.start += size; |
---|
312 | 342 | |
---|
313 | | - if (push(to, buf, size) < 0) { |
---|
| 343 | + if (push(md, to, buf, size) < 0) { |
---|
314 | 344 | rc = -1; |
---|
315 | 345 | goto out; |
---|
316 | 346 | } |
---|
317 | 347 | |
---|
318 | | - md->prev = head; |
---|
319 | | - perf_mmap__consume(md); |
---|
| 348 | + md->core.prev = head; |
---|
| 349 | + perf_mmap__consume(&md->core); |
---|
320 | 350 | out: |
---|
321 | 351 | return rc; |
---|
322 | | -} |
---|
323 | | - |
---|
324 | | -/* |
---|
325 | | - * Mandatory for overwrite mode |
---|
326 | | - * The direction of overwrite mode is backward. |
---|
327 | | - * The last perf_mmap__read() will set tail to map->prev. |
---|
328 | | - * Need to correct the map->prev to head which is the end of next read. |
---|
329 | | - */ |
---|
330 | | -void perf_mmap__read_done(struct perf_mmap *map) |
---|
331 | | -{ |
---|
332 | | - /* |
---|
333 | | - * Check if event was unmapped due to a POLLHUP/POLLERR. |
---|
334 | | - */ |
---|
335 | | - if (!refcount_read(&map->refcnt)) |
---|
336 | | - return; |
---|
337 | | - |
---|
338 | | - map->prev = perf_mmap__read_head(map); |
---|
339 | 352 | } |
---|