hc
2023-12-08 01573e231f18eb2d99162747186f59511f56b64d
kernel/tools/perf/util/env.c
....@@ -1,17 +1,179 @@
11 // SPDX-License-Identifier: GPL-2.0
22 #include "cpumap.h"
3
+#include "debug.h"
34 #include "env.h"
4
-#include "sane_ctype.h"
5
-#include "util.h"
5
+#include "util/header.h"
6
+#include <linux/ctype.h>
7
+#include <linux/zalloc.h>
8
+#include "bpf-event.h"
9
+#include "cgroup.h"
610 #include <errno.h>
711 #include <sys/utsname.h>
12
+#include <bpf/libbpf.h>
13
+#include <stdlib.h>
14
+#include <string.h>
815
916 struct perf_env perf_env;
17
+
18
+void perf_env__insert_bpf_prog_info(struct perf_env *env,
19
+ struct bpf_prog_info_node *info_node)
20
+{
21
+ __u32 prog_id = info_node->info_linear->info.id;
22
+ struct bpf_prog_info_node *node;
23
+ struct rb_node *parent = NULL;
24
+ struct rb_node **p;
25
+
26
+ down_write(&env->bpf_progs.lock);
27
+ p = &env->bpf_progs.infos.rb_node;
28
+
29
+ while (*p != NULL) {
30
+ parent = *p;
31
+ node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
32
+ if (prog_id < node->info_linear->info.id) {
33
+ p = &(*p)->rb_left;
34
+ } else if (prog_id > node->info_linear->info.id) {
35
+ p = &(*p)->rb_right;
36
+ } else {
37
+ pr_debug("duplicated bpf prog info %u\n", prog_id);
38
+ goto out;
39
+ }
40
+ }
41
+
42
+ rb_link_node(&info_node->rb_node, parent, p);
43
+ rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
44
+ env->bpf_progs.infos_cnt++;
45
+out:
46
+ up_write(&env->bpf_progs.lock);
47
+}
48
+
49
+struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
50
+ __u32 prog_id)
51
+{
52
+ struct bpf_prog_info_node *node = NULL;
53
+ struct rb_node *n;
54
+
55
+ down_read(&env->bpf_progs.lock);
56
+ n = env->bpf_progs.infos.rb_node;
57
+
58
+ while (n) {
59
+ node = rb_entry(n, struct bpf_prog_info_node, rb_node);
60
+ if (prog_id < node->info_linear->info.id)
61
+ n = n->rb_left;
62
+ else if (prog_id > node->info_linear->info.id)
63
+ n = n->rb_right;
64
+ else
65
+ goto out;
66
+ }
67
+ node = NULL;
68
+
69
+out:
70
+ up_read(&env->bpf_progs.lock);
71
+ return node;
72
+}
73
+
74
+bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
75
+{
76
+ struct rb_node *parent = NULL;
77
+ __u32 btf_id = btf_node->id;
78
+ struct btf_node *node;
79
+ struct rb_node **p;
80
+ bool ret = true;
81
+
82
+ down_write(&env->bpf_progs.lock);
83
+ p = &env->bpf_progs.btfs.rb_node;
84
+
85
+ while (*p != NULL) {
86
+ parent = *p;
87
+ node = rb_entry(parent, struct btf_node, rb_node);
88
+ if (btf_id < node->id) {
89
+ p = &(*p)->rb_left;
90
+ } else if (btf_id > node->id) {
91
+ p = &(*p)->rb_right;
92
+ } else {
93
+ pr_debug("duplicated btf %u\n", btf_id);
94
+ ret = false;
95
+ goto out;
96
+ }
97
+ }
98
+
99
+ rb_link_node(&btf_node->rb_node, parent, p);
100
+ rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
101
+ env->bpf_progs.btfs_cnt++;
102
+out:
103
+ up_write(&env->bpf_progs.lock);
104
+ return ret;
105
+}
106
+
107
+struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
108
+{
109
+ struct btf_node *node = NULL;
110
+ struct rb_node *n;
111
+
112
+ down_read(&env->bpf_progs.lock);
113
+ n = env->bpf_progs.btfs.rb_node;
114
+
115
+ while (n) {
116
+ node = rb_entry(n, struct btf_node, rb_node);
117
+ if (btf_id < node->id)
118
+ n = n->rb_left;
119
+ else if (btf_id > node->id)
120
+ n = n->rb_right;
121
+ else
122
+ goto out;
123
+ }
124
+ node = NULL;
125
+
126
+out:
127
+ up_read(&env->bpf_progs.lock);
128
+ return node;
129
+}
130
+
131
+/* purge data in bpf_progs.infos tree */
132
+static void perf_env__purge_bpf(struct perf_env *env)
133
+{
134
+ struct rb_root *root;
135
+ struct rb_node *next;
136
+
137
+ down_write(&env->bpf_progs.lock);
138
+
139
+ root = &env->bpf_progs.infos;
140
+ next = rb_first(root);
141
+
142
+ while (next) {
143
+ struct bpf_prog_info_node *node;
144
+
145
+ node = rb_entry(next, struct bpf_prog_info_node, rb_node);
146
+ next = rb_next(&node->rb_node);
147
+ rb_erase(&node->rb_node, root);
148
+ free(node->info_linear);
149
+ free(node);
150
+ }
151
+
152
+ env->bpf_progs.infos_cnt = 0;
153
+
154
+ root = &env->bpf_progs.btfs;
155
+ next = rb_first(root);
156
+
157
+ while (next) {
158
+ struct btf_node *node;
159
+
160
+ node = rb_entry(next, struct btf_node, rb_node);
161
+ next = rb_next(&node->rb_node);
162
+ rb_erase(&node->rb_node, root);
163
+ free(node);
164
+ }
165
+
166
+ env->bpf_progs.btfs_cnt = 0;
167
+
168
+ up_write(&env->bpf_progs.lock);
169
+}
10170
11171 void perf_env__exit(struct perf_env *env)
12172 {
13173 int i;
14174
175
+ perf_env__purge_bpf(env);
176
+ perf_env__purge_cgroups(env);
15177 zfree(&env->hostname);
16178 zfree(&env->os_release);
17179 zfree(&env->version);
....@@ -20,13 +182,16 @@
20182 zfree(&env->cpuid);
21183 zfree(&env->cmdline);
22184 zfree(&env->cmdline_argv);
185
+ zfree(&env->sibling_dies);
23186 zfree(&env->sibling_cores);
24187 zfree(&env->sibling_threads);
25188 zfree(&env->pmu_mappings);
26189 zfree(&env->cpu);
190
+ zfree(&env->cpu_pmu_caps);
191
+ zfree(&env->numa_map);
27192
28193 for (i = 0; i < env->nr_numa_nodes; i++)
29
- cpu_map__put(env->numa_nodes[i].map);
194
+ perf_cpu_map__put(env->numa_nodes[i].map);
30195 zfree(&env->numa_nodes);
31196
32197 for (i = 0; i < env->caches_cnt; i++)
....@@ -34,8 +199,15 @@
34199 zfree(&env->caches);
35200
36201 for (i = 0; i < env->nr_memory_nodes; i++)
37
- free(env->memory_nodes[i].set);
202
+ zfree(&env->memory_nodes[i].set);
38203 zfree(&env->memory_nodes);
204
+}
205
+
206
+void perf_env__init(struct perf_env *env)
207
+{
208
+ env->bpf_progs.infos = RB_ROOT;
209
+ env->bpf_progs.btfs = RB_ROOT;
210
+ init_rwsem(&env->bpf_progs.lock);
39211 }
40212
41213 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
....@@ -87,9 +259,25 @@
87259 for (cpu = 0; cpu < nr_cpus; ++cpu) {
88260 env->cpu[cpu].core_id = cpu_map__get_core_id(cpu);
89261 env->cpu[cpu].socket_id = cpu_map__get_socket_id(cpu);
262
+ env->cpu[cpu].die_id = cpu_map__get_die_id(cpu);
90263 }
91264
92265 env->nr_cpus_avail = nr_cpus;
266
+ return 0;
267
+}
268
+
269
+int perf_env__read_cpuid(struct perf_env *env)
270
+{
271
+ char cpuid[128];
272
+ int err = get_cpuid(cpuid, sizeof(cpuid));
273
+
274
+ if (err)
275
+ return err;
276
+
277
+ free(env->cpuid);
278
+ env->cpuid = strdup(cpuid);
279
+ if (env->cpuid == NULL)
280
+ return ENOMEM;
93281 return 0;
94282 }
95283
....@@ -126,9 +314,9 @@
126314
127315 void cpu_cache_level__free(struct cpu_cache_level *cache)
128316 {
129
- free(cache->type);
130
- free(cache->map);
131
- free(cache->size);
317
+ zfree(&cache->type);
318
+ zfree(&cache->map);
319
+ zfree(&cache->size);
132320 }
133321
134322 /*
....@@ -163,11 +351,11 @@
163351
164352 const char *perf_env__arch(struct perf_env *env)
165353 {
166
- struct utsname uts;
167354 char *arch_name;
168355
169356 if (!env || !env->arch) { /* Assume local operation */
170
- if (uname(&uts) < 0)
357
+ static struct utsname uts = { .machine[0] = '\0', };
358
+ if (uts.machine[0] == '\0' && uname(&uts) < 0)
171359 return NULL;
172360 arch_name = uts.machine;
173361 } else
....@@ -175,3 +363,42 @@
175363
176364 return normalize_arch(arch_name);
177365 }
366
+
367
+
368
+int perf_env__numa_node(struct perf_env *env, int cpu)
369
+{
370
+ if (!env->nr_numa_map) {
371
+ struct numa_node *nn;
372
+ int i, nr = 0;
373
+
374
+ for (i = 0; i < env->nr_numa_nodes; i++) {
375
+ nn = &env->numa_nodes[i];
376
+ nr = max(nr, perf_cpu_map__max(nn->map));
377
+ }
378
+
379
+ nr++;
380
+
381
+ /*
382
+ * We initialize the numa_map array to prepare
383
+ * it for missing cpus, which return node -1
384
+ */
385
+ env->numa_map = malloc(nr * sizeof(int));
386
+ if (!env->numa_map)
387
+ return -1;
388
+
389
+ for (i = 0; i < nr; i++)
390
+ env->numa_map[i] = -1;
391
+
392
+ env->nr_numa_map = nr;
393
+
394
+ for (i = 0; i < env->nr_numa_nodes; i++) {
395
+ int tmp, j;
396
+
397
+ nn = &env->numa_nodes[i];
398
+ perf_cpu_map__for_each_cpu(j, tmp, nn->map)
399
+ env->numa_map[j] = i;
400
+ }
401
+ }
402
+
403
+ return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
404
+}