hc
2024-02-19 1c055e55a242a33e574e48be530e06770a210dcd
kernel/drivers/cpufreq/cpufreq_times.c
....@@ -15,36 +15,15 @@
1515
1616 #include <linux/cpufreq.h>
1717 #include <linux/cpufreq_times.h>
18
-#include <linux/hashtable.h>
19
-#include <linux/init.h>
2018 #include <linux/jiffies.h>
21
-#include <linux/proc_fs.h>
2219 #include <linux/sched.h>
2320 #include <linux/seq_file.h>
2421 #include <linux/slab.h>
2522 #include <linux/spinlock.h>
2623 #include <linux/threads.h>
27
-
28
-#define UID_HASH_BITS 10
29
-
30
-static DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS);
24
+#include <trace/hooks/cpufreq.h>
3125
3226 static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
33
-static DEFINE_SPINLOCK(uid_lock); /* uid_hash_table */
34
-
35
-struct concurrent_times {
36
- atomic64_t active[NR_CPUS];
37
- atomic64_t policy[NR_CPUS];
38
-};
39
-
40
-struct uid_entry {
41
- uid_t uid;
42
- unsigned int max_state;
43
- struct hlist_node hash;
44
- struct rcu_head rcu;
45
- struct concurrent_times *concurrent_times;
46
- u64 time_in_state[0];
47
-};
4827
4928 /**
5029 * struct cpu_freqs - per-cpu frequency information
....@@ -63,248 +42,6 @@
6342 static struct cpu_freqs *all_freqs[NR_CPUS];
6443
6544 static unsigned int next_offset;
66
-
67
-
68
-/* Caller must hold rcu_read_lock() */
69
-static struct uid_entry *find_uid_entry_rcu(uid_t uid)
70
-{
71
- struct uid_entry *uid_entry;
72
-
73
- hash_for_each_possible_rcu(uid_hash_table, uid_entry, hash, uid) {
74
- if (uid_entry->uid == uid)
75
- return uid_entry;
76
- }
77
- return NULL;
78
-}
79
-
80
-/* Caller must hold uid lock */
81
-static struct uid_entry *find_uid_entry_locked(uid_t uid)
82
-{
83
- struct uid_entry *uid_entry;
84
-
85
- hash_for_each_possible(uid_hash_table, uid_entry, hash, uid) {
86
- if (uid_entry->uid == uid)
87
- return uid_entry;
88
- }
89
- return NULL;
90
-}
91
-
92
-/* Caller must hold uid lock */
93
-static struct uid_entry *find_or_register_uid_locked(uid_t uid)
94
-{
95
- struct uid_entry *uid_entry, *temp;
96
- struct concurrent_times *times;
97
- unsigned int max_state = READ_ONCE(next_offset);
98
- size_t alloc_size = sizeof(*uid_entry) + max_state *
99
- sizeof(uid_entry->time_in_state[0]);
100
-
101
- uid_entry = find_uid_entry_locked(uid);
102
- if (uid_entry) {
103
- if (uid_entry->max_state == max_state)
104
- return uid_entry;
105
- /* uid_entry->time_in_state is too small to track all freqs, so
106
- * expand it.
107
- */
108
- temp = __krealloc(uid_entry, alloc_size, GFP_ATOMIC);
109
- if (!temp)
110
- return uid_entry;
111
- temp->max_state = max_state;
112
- memset(temp->time_in_state + uid_entry->max_state, 0,
113
- (max_state - uid_entry->max_state) *
114
- sizeof(uid_entry->time_in_state[0]));
115
- if (temp != uid_entry) {
116
- hlist_replace_rcu(&uid_entry->hash, &temp->hash);
117
- kfree_rcu(uid_entry, rcu);
118
- }
119
- return temp;
120
- }
121
-
122
- uid_entry = kzalloc(alloc_size, GFP_ATOMIC);
123
- if (!uid_entry)
124
- return NULL;
125
- times = kzalloc(sizeof(*times), GFP_ATOMIC);
126
- if (!times) {
127
- kfree(uid_entry);
128
- return NULL;
129
- }
130
-
131
- uid_entry->uid = uid;
132
- uid_entry->max_state = max_state;
133
- uid_entry->concurrent_times = times;
134
-
135
- hash_add_rcu(uid_hash_table, &uid_entry->hash, uid);
136
-
137
- return uid_entry;
138
-}
139
-
140
-static int single_uid_time_in_state_show(struct seq_file *m, void *ptr)
141
-{
142
- struct uid_entry *uid_entry;
143
- unsigned int i;
144
- uid_t uid = from_kuid_munged(current_user_ns(), *(kuid_t *)m->private);
145
-
146
- if (uid == overflowuid)
147
- return -EINVAL;
148
-
149
- rcu_read_lock();
150
-
151
- uid_entry = find_uid_entry_rcu(uid);
152
- if (!uid_entry) {
153
- rcu_read_unlock();
154
- return 0;
155
- }
156
-
157
- for (i = 0; i < uid_entry->max_state; ++i) {
158
- u64 time = nsec_to_clock_t(uid_entry->time_in_state[i]);
159
- seq_write(m, &time, sizeof(time));
160
- }
161
-
162
- rcu_read_unlock();
163
-
164
- return 0;
165
-}
166
-
167
-static void *uid_seq_start(struct seq_file *seq, loff_t *pos)
168
-{
169
- if (*pos >= HASH_SIZE(uid_hash_table))
170
- return NULL;
171
-
172
- return &uid_hash_table[*pos];
173
-}
174
-
175
-static void *uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
176
-{
177
- do {
178
- (*pos)++;
179
-
180
- if (*pos >= HASH_SIZE(uid_hash_table))
181
- return NULL;
182
- } while (hlist_empty(&uid_hash_table[*pos]));
183
-
184
- return &uid_hash_table[*pos];
185
-}
186
-
187
-static void uid_seq_stop(struct seq_file *seq, void *v) { }
188
-
189
-static int uid_time_in_state_seq_show(struct seq_file *m, void *v)
190
-{
191
- struct uid_entry *uid_entry;
192
- struct cpu_freqs *freqs, *last_freqs = NULL;
193
- int i, cpu;
194
-
195
- if (v == uid_hash_table) {
196
- seq_puts(m, "uid:");
197
- for_each_possible_cpu(cpu) {
198
- freqs = all_freqs[cpu];
199
- if (!freqs || freqs == last_freqs)
200
- continue;
201
- last_freqs = freqs;
202
- for (i = 0; i < freqs->max_state; i++) {
203
- seq_put_decimal_ull(m, " ",
204
- freqs->freq_table[i]);
205
- }
206
- }
207
- seq_putc(m, '\n');
208
- }
209
-
210
- rcu_read_lock();
211
-
212
- hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
213
- if (uid_entry->max_state) {
214
- seq_put_decimal_ull(m, "", uid_entry->uid);
215
- seq_putc(m, ':');
216
- }
217
- for (i = 0; i < uid_entry->max_state; ++i) {
218
- u64 time = nsec_to_clock_t(uid_entry->time_in_state[i]);
219
- seq_put_decimal_ull(m, " ", time);
220
- }
221
- if (uid_entry->max_state)
222
- seq_putc(m, '\n');
223
- }
224
-
225
- rcu_read_unlock();
226
- return 0;
227
-}
228
-
229
-static int concurrent_time_seq_show(struct seq_file *m, void *v,
230
- atomic64_t *(*get_times)(struct concurrent_times *))
231
-{
232
- struct uid_entry *uid_entry;
233
- int i, num_possible_cpus = num_possible_cpus();
234
-
235
- rcu_read_lock();
236
-
237
- hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
238
- atomic64_t *times = get_times(uid_entry->concurrent_times);
239
-
240
- seq_put_decimal_ull(m, "", (u64)uid_entry->uid);
241
- seq_putc(m, ':');
242
-
243
- for (i = 0; i < num_possible_cpus; ++i) {
244
- u64 time = nsec_to_clock_t(atomic64_read(&times[i]));
245
-
246
- seq_put_decimal_ull(m, " ", time);
247
- }
248
- seq_putc(m, '\n');
249
- }
250
-
251
- rcu_read_unlock();
252
-
253
- return 0;
254
-}
255
-
256
-static inline atomic64_t *get_active_times(struct concurrent_times *times)
257
-{
258
- return times->active;
259
-}
260
-
261
-static int concurrent_active_time_seq_show(struct seq_file *m, void *v)
262
-{
263
- if (v == uid_hash_table) {
264
- seq_put_decimal_ull(m, "cpus: ", num_possible_cpus());
265
- seq_putc(m, '\n');
266
- }
267
-
268
- return concurrent_time_seq_show(m, v, get_active_times);
269
-}
270
-
271
-static inline atomic64_t *get_policy_times(struct concurrent_times *times)
272
-{
273
- return times->policy;
274
-}
275
-
276
-static int concurrent_policy_time_seq_show(struct seq_file *m, void *v)
277
-{
278
- int i;
279
- struct cpu_freqs *freqs, *last_freqs = NULL;
280
-
281
- if (v == uid_hash_table) {
282
- int cnt = 0;
283
-
284
- for_each_possible_cpu(i) {
285
- freqs = all_freqs[i];
286
- if (!freqs)
287
- continue;
288
- if (freqs != last_freqs) {
289
- if (last_freqs) {
290
- seq_put_decimal_ull(m, ": ", cnt);
291
- seq_putc(m, ' ');
292
- cnt = 0;
293
- }
294
- seq_put_decimal_ull(m, "policy", i);
295
-
296
- last_freqs = freqs;
297
- }
298
- cnt++;
299
- }
300
- if (last_freqs) {
301
- seq_put_decimal_ull(m, ": ", cnt);
302
- seq_putc(m, '\n');
303
- }
304
- }
305
-
306
- return concurrent_time_seq_show(m, v, get_policy_times);
307
-}
30845
30946 void cpufreq_task_times_init(struct task_struct *p)
31047 {
....@@ -398,14 +135,7 @@
398135 {
399136 unsigned long flags;
400137 unsigned int state;
401
- unsigned int active_cpu_cnt = 0;
402
- unsigned int policy_cpu_cnt = 0;
403
- unsigned int policy_first_cpu;
404
- struct uid_entry *uid_entry;
405138 struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
406
- struct cpufreq_policy *policy;
407
- uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
408
- int cpu = 0;
409139
410140 if (!freqs || is_idle_task(p) || p->flags & PF_EXITING)
411141 return;
....@@ -418,47 +148,7 @@
418148 p->time_in_state[state] += cputime;
419149 spin_unlock_irqrestore(&task_time_in_state_lock, flags);
420150
421
- spin_lock_irqsave(&uid_lock, flags);
422
- uid_entry = find_or_register_uid_locked(uid);
423
- if (uid_entry && state < uid_entry->max_state)
424
- uid_entry->time_in_state[state] += cputime;
425
- spin_unlock_irqrestore(&uid_lock, flags);
426
-
427
- rcu_read_lock();
428
- uid_entry = find_uid_entry_rcu(uid);
429
- if (!uid_entry) {
430
- rcu_read_unlock();
431
- return;
432
- }
433
-
434
- for_each_possible_cpu(cpu)
435
- if (!idle_cpu(cpu))
436
- ++active_cpu_cnt;
437
-
438
- atomic64_add(cputime,
439
- &uid_entry->concurrent_times->active[active_cpu_cnt - 1]);
440
-
441
- policy = cpufreq_cpu_get(task_cpu(p));
442
- if (!policy) {
443
- /*
444
- * This CPU may have just come up and not have a cpufreq policy
445
- * yet.
446
- */
447
- rcu_read_unlock();
448
- return;
449
- }
450
-
451
- for_each_cpu(cpu, policy->related_cpus)
452
- if (!idle_cpu(cpu))
453
- ++policy_cpu_cnt;
454
-
455
- policy_first_cpu = cpumask_first(policy->related_cpus);
456
- cpufreq_cpu_put(policy);
457
-
458
- atomic64_add(cputime,
459
- &uid_entry->concurrent_times->policy[policy_first_cpu +
460
- policy_cpu_cnt - 1]);
461
- rcu_read_unlock();
151
+ trace_android_vh_cpufreq_acct_update_power(cputime, p, state);
462152 }
463153
464154 static int cpufreq_times_get_index(struct cpu_freqs *freqs, unsigned int freq)
....@@ -510,36 +200,6 @@
510200 all_freqs[cpu] = freqs;
511201 }
512202
513
-static void uid_entry_reclaim(struct rcu_head *rcu)
514
-{
515
- struct uid_entry *uid_entry = container_of(rcu, struct uid_entry, rcu);
516
-
517
- kfree(uid_entry->concurrent_times);
518
- kfree(uid_entry);
519
-}
520
-
521
-void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
522
-{
523
- struct uid_entry *uid_entry;
524
- struct hlist_node *tmp;
525
- unsigned long flags;
526
- u64 uid;
527
-
528
- spin_lock_irqsave(&uid_lock, flags);
529
-
530
- for (uid = uid_start; uid <= uid_end; uid++) {
531
- hash_for_each_possible_safe(uid_hash_table, uid_entry, tmp,
532
- hash, uid) {
533
- if (uid == uid_entry->uid) {
534
- hash_del_rcu(&uid_entry->hash);
535
- call_rcu(&uid_entry->rcu, uid_entry_reclaim);
536
- }
537
- }
538
- }
539
-
540
- spin_unlock_irqrestore(&uid_lock, flags);
541
-}
542
-
543203 void cpufreq_times_record_transition(struct cpufreq_policy *policy,
544204 unsigned int new_freq)
545205 {
....@@ -552,82 +212,3 @@
552212 if (index >= 0)
553213 WRITE_ONCE(freqs->last_index, index);
554214 }
555
-
556
-static const struct seq_operations uid_time_in_state_seq_ops = {
557
- .start = uid_seq_start,
558
- .next = uid_seq_next,
559
- .stop = uid_seq_stop,
560
- .show = uid_time_in_state_seq_show,
561
-};
562
-
563
-static int uid_time_in_state_open(struct inode *inode, struct file *file)
564
-{
565
- return seq_open(file, &uid_time_in_state_seq_ops);
566
-}
567
-
568
-int single_uid_time_in_state_open(struct inode *inode, struct file *file)
569
-{
570
- return single_open(file, single_uid_time_in_state_show,
571
- &(inode->i_uid));
572
-}
573
-
574
-static const struct file_operations uid_time_in_state_fops = {
575
- .open = uid_time_in_state_open,
576
- .read = seq_read,
577
- .llseek = seq_lseek,
578
- .release = seq_release,
579
-};
580
-
581
-static const struct seq_operations concurrent_active_time_seq_ops = {
582
- .start = uid_seq_start,
583
- .next = uid_seq_next,
584
- .stop = uid_seq_stop,
585
- .show = concurrent_active_time_seq_show,
586
-};
587
-
588
-static int concurrent_active_time_open(struct inode *inode, struct file *file)
589
-{
590
- return seq_open(file, &concurrent_active_time_seq_ops);
591
-}
592
-
593
-static const struct file_operations concurrent_active_time_fops = {
594
- .open = concurrent_active_time_open,
595
- .read = seq_read,
596
- .llseek = seq_lseek,
597
- .release = seq_release,
598
-};
599
-
600
-static const struct seq_operations concurrent_policy_time_seq_ops = {
601
- .start = uid_seq_start,
602
- .next = uid_seq_next,
603
- .stop = uid_seq_stop,
604
- .show = concurrent_policy_time_seq_show,
605
-};
606
-
607
-static int concurrent_policy_time_open(struct inode *inode, struct file *file)
608
-{
609
- return seq_open(file, &concurrent_policy_time_seq_ops);
610
-}
611
-
612
-static const struct file_operations concurrent_policy_time_fops = {
613
- .open = concurrent_policy_time_open,
614
- .read = seq_read,
615
- .llseek = seq_lseek,
616
- .release = seq_release,
617
-};
618
-
619
-static int __init cpufreq_times_init(void)
620
-{
621
- proc_create_data("uid_time_in_state", 0444, NULL,
622
- &uid_time_in_state_fops, NULL);
623
-
624
- proc_create_data("uid_concurrent_active_time", 0444, NULL,
625
- &concurrent_active_time_fops, NULL);
626
-
627
- proc_create_data("uid_concurrent_policy_time", 0444, NULL,
628
- &concurrent_policy_time_fops, NULL);
629
-
630
- return 0;
631
-}
632
-
633
-early_initcall(cpufreq_times_init);