hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/tools/perf/util/ordered-events.c
....@@ -8,6 +8,7 @@
88 #include "session.h"
99 #include "asm/bug.h"
1010 #include "debug.h"
11
+#include "ui/progress.h"
1112
1213 #define pr_N(n, fmt, ...) \
1314 eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
....@@ -80,12 +81,18 @@
8081 return oe->copy_on_queue ? __dup_event(oe, event) : event;
8182 }
8283
83
-static void free_dup_event(struct ordered_events *oe, union perf_event *event)
84
+static void __free_dup_event(struct ordered_events *oe, union perf_event *event)
8485 {
85
- if (event && oe->copy_on_queue) {
86
+ if (event) {
8687 oe->cur_alloc_size -= event->header.size;
8788 free(event);
8889 }
90
+}
91
+
92
+static void free_dup_event(struct ordered_events *oe, union perf_event *event)
93
+{
94
+ if (oe->copy_on_queue)
95
+ __free_dup_event(oe, event);
8996 }
9097
9198 #define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct ordered_event))
....@@ -95,21 +102,49 @@
95102 struct list_head *cache = &oe->cache;
96103 struct ordered_event *new = NULL;
97104 union perf_event *new_event;
105
+ size_t size;
98106
99107 new_event = dup_event(oe, event);
100108 if (!new_event)
101109 return NULL;
102110
111
+ /*
112
+ * We maintain the following scheme of buffers for ordered
113
+ * event allocation:
114
+ *
115
+ * to_free list -> buffer1 (64K)
116
+ * buffer2 (64K)
117
+ * ...
118
+ *
119
+ * Each buffer keeps an array of ordered events objects:
120
+ * buffer -> event[0]
121
+ * event[1]
122
+ * ...
123
+ *
124
+ * Each allocated ordered event is linked to one of
125
+ * following lists:
126
+ * - time ordered list 'events'
127
+ * - list of currently removed events 'cache'
128
+ *
129
+ * Allocation of the ordered event uses the following order
130
+ * to get the memory:
131
+ * - use recently removed object from 'cache' list
132
+ * - use available object in current allocation buffer
133
+ * - allocate new buffer if the current buffer is full
134
+ *
135
+ * Removal of ordered event object moves it from events to
136
+ * the cache list.
137
+ */
138
+ size = sizeof(*oe->buffer) + MAX_SAMPLE_BUFFER * sizeof(*new);
139
+
103140 if (!list_empty(cache)) {
104141 new = list_entry(cache->next, struct ordered_event, list);
105
- list_del(&new->list);
142
+ list_del_init(&new->list);
106143 } else if (oe->buffer) {
107
- new = oe->buffer + oe->buffer_idx;
144
+ new = &oe->buffer->event[oe->buffer_idx];
108145 if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
109146 oe->buffer = NULL;
110
- } else if (oe->cur_alloc_size < oe->max_alloc_size) {
111
- size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
112
-
147
+ } else if ((oe->cur_alloc_size + size) < oe->max_alloc_size) {
113148 oe->buffer = malloc(size);
114149 if (!oe->buffer) {
115150 free_dup_event(oe, new_event);
....@@ -122,11 +157,11 @@
122157 oe->cur_alloc_size += size;
123158 list_add(&oe->buffer->list, &oe->to_free);
124159
125
- /* First entry is abused to maintain the to_free list. */
126
- oe->buffer_idx = 2;
127
- new = oe->buffer + 1;
160
+ oe->buffer_idx = 1;
161
+ new = &oe->buffer->event[0];
128162 } else {
129163 pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
164
+ return NULL;
130165 }
131166
132167 new->event = new_event;
....@@ -185,13 +220,12 @@
185220 return 0;
186221 }
187222
188
-static int __ordered_events__flush(struct ordered_events *oe)
223
+static int do_flush(struct ordered_events *oe, bool show_progress)
189224 {
190225 struct list_head *head = &oe->events;
191226 struct ordered_event *tmp, *iter;
192227 u64 limit = oe->next_flush;
193228 u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
194
- bool show_progress = limit == ULLONG_MAX;
195229 struct ui_progress prog;
196230 int ret;
197231
....@@ -229,21 +263,28 @@
229263 return 0;
230264 }
231265
232
-int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
266
+static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
267
+ u64 timestamp)
233268 {
234269 static const char * const str[] = {
235270 "NONE",
236271 "FINAL",
237272 "ROUND",
238273 "HALF ",
274
+ "TOP ",
275
+ "TIME ",
239276 };
240277 int err;
278
+ bool show_progress = false;
241279
242280 if (oe->nr_events == 0)
243281 return 0;
244282
245283 switch (how) {
246284 case OE_FLUSH__FINAL:
285
+ show_progress = true;
286
+ __fallthrough;
287
+ case OE_FLUSH__TOP:
247288 oe->next_flush = ULLONG_MAX;
248289 break;
249290
....@@ -264,17 +305,22 @@
264305 break;
265306 }
266307
308
+ case OE_FLUSH__TIME:
309
+ oe->next_flush = timestamp;
310
+ show_progress = false;
311
+ break;
312
+
267313 case OE_FLUSH__ROUND:
268314 case OE_FLUSH__NONE:
269315 default:
270316 break;
271
- };
317
+ }
272318
273319 pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE %s, nr_events %u\n",
274320 str[how], oe->nr_events);
275321 pr_oe_time(oe->max_timestamp, "max_timestamp\n");
276322
277
- err = __ordered_events__flush(oe);
323
+ err = do_flush(oe, show_progress);
278324
279325 if (!err) {
280326 if (how == OE_FLUSH__ROUND)
....@@ -290,7 +336,29 @@
290336 return err;
291337 }
292338
293
-void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver)
339
+int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
340
+{
341
+ return __ordered_events__flush(oe, how, 0);
342
+}
343
+
344
+int ordered_events__flush_time(struct ordered_events *oe, u64 timestamp)
345
+{
346
+ return __ordered_events__flush(oe, OE_FLUSH__TIME, timestamp);
347
+}
348
+
349
+u64 ordered_events__first_time(struct ordered_events *oe)
350
+{
351
+ struct ordered_event *event;
352
+
353
+ if (list_empty(&oe->events))
354
+ return 0;
355
+
356
+ event = list_first_entry(&oe->events, struct ordered_event, list);
357
+ return event->timestamp;
358
+}
359
+
360
+void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver,
361
+ void *data)
294362 {
295363 INIT_LIST_HEAD(&oe->events);
296364 INIT_LIST_HEAD(&oe->cache);
....@@ -298,17 +366,43 @@
298366 oe->max_alloc_size = (u64) -1;
299367 oe->cur_alloc_size = 0;
300368 oe->deliver = deliver;
369
+ oe->data = data;
370
+}
371
+
372
+static void
373
+ordered_events_buffer__free(struct ordered_events_buffer *buffer,
374
+ unsigned int max, struct ordered_events *oe)
375
+{
376
+ if (oe->copy_on_queue) {
377
+ unsigned int i;
378
+
379
+ for (i = 0; i < max; i++)
380
+ __free_dup_event(oe, buffer->event[i].event);
381
+ }
382
+
383
+ free(buffer);
301384 }
302385
303386 void ordered_events__free(struct ordered_events *oe)
304387 {
305
- while (!list_empty(&oe->to_free)) {
306
- struct ordered_event *event;
388
+ struct ordered_events_buffer *buffer, *tmp;
307389
308
- event = list_entry(oe->to_free.next, struct ordered_event, list);
309
- list_del(&event->list);
310
- free_dup_event(oe, event->event);
311
- free(event);
390
+ if (list_empty(&oe->to_free))
391
+ return;
392
+
393
+ /*
394
+ * Current buffer might not have all the events allocated
395
+ * yet, we need to free only allocated ones ...
396
+ */
397
+ if (oe->buffer) {
398
+ list_del_init(&oe->buffer->list);
399
+ ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
400
+ }
401
+
402
+ /* ... and continue with the rest */
403
+ list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
404
+ list_del_init(&buffer->list);
405
+ ordered_events_buffer__free(buffer, MAX_SAMPLE_BUFFER, oe);
312406 }
313407 }
314408
....@@ -318,5 +412,5 @@
318412
319413 ordered_events__free(oe);
320414 memset(oe, '\0', sizeof(*oe));
321
- ordered_events__init(oe, old_deliver);
415
+ ordered_events__init(oe, old_deliver, oe->data);
322416 }