.. | .. |
---|
5 | 5 | * Based on the work by Paul Menage (menage@google.com) and Balbir Singh |
---|
6 | 6 | * (balbir@in.ibm.com). |
---|
7 | 7 | */ |
---|
| 8 | +#include <asm/irq_regs.h> |
---|
8 | 9 | #include "sched.h" |
---|
9 | 10 | |
---|
10 | 11 | /* Time spent by the tasks of the CPU accounting group executing in ... */ |
---|
.. | .. |
---|
20 | 21 | [CPUACCT_STAT_SYSTEM] = "system", |
---|
21 | 22 | }; |
---|
22 | 23 | |
---|
23 | | -struct cpuacct_usage { |
---|
24 | | - u64 usages[CPUACCT_STAT_NSTATS]; |
---|
25 | | -}; |
---|
26 | | - |
---|
27 | 24 | /* track CPU usage of a group of tasks and its child groups */ |
---|
28 | 25 | struct cpuacct { |
---|
29 | 26 | struct cgroup_subsys_state css; |
---|
30 | 27 | /* cpuusage holds pointer to a u64-type object on every CPU */ |
---|
31 | | - struct cpuacct_usage __percpu *cpuusage; |
---|
| 28 | + u64 __percpu *cpuusage; |
---|
32 | 29 | struct kernel_cpustat __percpu *cpustat; |
---|
33 | 30 | }; |
---|
34 | 31 | |
---|
.. | .. |
---|
48 | 45 | return css_ca(ca->css.parent); |
---|
49 | 46 | } |
---|
50 | 47 | |
---|
51 | | -static DEFINE_PER_CPU(struct cpuacct_usage, root_cpuacct_cpuusage); |
---|
| 48 | +static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage); |
---|
52 | 49 | static struct cpuacct root_cpuacct = { |
---|
53 | 50 | .cpustat = &kernel_cpustat, |
---|
54 | 51 | .cpuusage = &root_cpuacct_cpuusage, |
---|
.. | .. |
---|
67 | 64 | if (!ca) |
---|
68 | 65 | goto out; |
---|
69 | 66 | |
---|
70 | | - ca->cpuusage = alloc_percpu(struct cpuacct_usage); |
---|
| 67 | + ca->cpuusage = alloc_percpu(u64); |
---|
71 | 68 | if (!ca->cpuusage) |
---|
72 | 69 | goto out_free_ca; |
---|
73 | 70 | |
---|
.. | .. |
---|
98 | 95 | static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu, |
---|
99 | 96 | enum cpuacct_stat_index index) |
---|
100 | 97 | { |
---|
101 | | - struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
---|
| 98 | + u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
---|
| 99 | + u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; |
---|
102 | 100 | u64 data; |
---|
103 | 101 | |
---|
104 | 102 | /* |
---|
.. | .. |
---|
114 | 112 | raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
---|
115 | 113 | #endif |
---|
116 | 114 | |
---|
117 | | - if (index == CPUACCT_STAT_NSTATS) { |
---|
118 | | - int i = 0; |
---|
119 | | - |
---|
120 | | - data = 0; |
---|
121 | | - for (i = 0; i < CPUACCT_STAT_NSTATS; i++) |
---|
122 | | - data += cpuusage->usages[i]; |
---|
123 | | - } else { |
---|
124 | | - data = cpuusage->usages[index]; |
---|
| 115 | + switch (index) { |
---|
| 116 | + case CPUACCT_STAT_USER: |
---|
| 117 | + data = cpustat[CPUTIME_USER] + cpustat[CPUTIME_NICE]; |
---|
| 118 | + break; |
---|
| 119 | + case CPUACCT_STAT_SYSTEM: |
---|
| 120 | + data = cpustat[CPUTIME_SYSTEM] + cpustat[CPUTIME_IRQ] + |
---|
| 121 | + cpustat[CPUTIME_SOFTIRQ]; |
---|
| 122 | + break; |
---|
| 123 | + case CPUACCT_STAT_NSTATS: |
---|
| 124 | + data = *cpuusage; |
---|
| 125 | + break; |
---|
125 | 126 | } |
---|
126 | 127 | |
---|
127 | 128 | #ifndef CONFIG_64BIT |
---|
.. | .. |
---|
131 | 132 | return data; |
---|
132 | 133 | } |
---|
133 | 134 | |
---|
134 | | -static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) |
---|
| 135 | +static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu) |
---|
135 | 136 | { |
---|
136 | | - struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
---|
137 | | - int i; |
---|
| 137 | + u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
---|
| 138 | + u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat; |
---|
| 139 | + |
---|
| 140 | + /* Don't allow to reset global kernel_cpustat */ |
---|
| 141 | + if (ca == &root_cpuacct) |
---|
| 142 | + return; |
---|
138 | 143 | |
---|
139 | 144 | #ifndef CONFIG_64BIT |
---|
140 | 145 | /* |
---|
.. | .. |
---|
142 | 147 | */ |
---|
143 | 148 | raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
---|
144 | 149 | #endif |
---|
145 | | - |
---|
146 | | - for (i = 0; i < CPUACCT_STAT_NSTATS; i++) |
---|
147 | | - cpuusage->usages[i] = val; |
---|
| 150 | + *cpuusage = 0; |
---|
| 151 | + cpustat[CPUTIME_USER] = cpustat[CPUTIME_NICE] = 0; |
---|
| 152 | + cpustat[CPUTIME_SYSTEM] = cpustat[CPUTIME_IRQ] = 0; |
---|
| 153 | + cpustat[CPUTIME_SOFTIRQ] = 0; |
---|
148 | 154 | |
---|
149 | 155 | #ifndef CONFIG_64BIT |
---|
150 | 156 | raw_spin_unlock_irq(&cpu_rq(cpu)->lock); |
---|
.. | .. |
---|
195 | 201 | return -EINVAL; |
---|
196 | 202 | |
---|
197 | 203 | for_each_possible_cpu(cpu) |
---|
198 | | - cpuacct_cpuusage_write(ca, cpu, 0); |
---|
| 204 | + cpuacct_cpuusage_write(ca, cpu); |
---|
199 | 205 | |
---|
200 | 206 | return 0; |
---|
201 | 207 | } |
---|
.. | .. |
---|
242 | 248 | seq_puts(m, "\n"); |
---|
243 | 249 | |
---|
244 | 250 | for_each_possible_cpu(cpu) { |
---|
245 | | - struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
---|
246 | | - |
---|
247 | 251 | seq_printf(m, "%d", cpu); |
---|
248 | | - |
---|
249 | | - for (index = 0; index < CPUACCT_STAT_NSTATS; index++) { |
---|
250 | | -#ifndef CONFIG_64BIT |
---|
251 | | - /* |
---|
252 | | - * Take rq->lock to make 64-bit read safe on 32-bit |
---|
253 | | - * platforms. |
---|
254 | | - */ |
---|
255 | | - raw_spin_lock_irq(&cpu_rq(cpu)->lock); |
---|
256 | | -#endif |
---|
257 | | - |
---|
258 | | - seq_printf(m, " %llu", cpuusage->usages[index]); |
---|
259 | | - |
---|
260 | | -#ifndef CONFIG_64BIT |
---|
261 | | - raw_spin_unlock_irq(&cpu_rq(cpu)->lock); |
---|
262 | | -#endif |
---|
263 | | - } |
---|
| 252 | + for (index = 0; index < CPUACCT_STAT_NSTATS; index++) |
---|
| 253 | + seq_printf(m, " %llu", |
---|
| 254 | + cpuacct_cpuusage_read(ca, cpu, index)); |
---|
264 | 255 | seq_puts(m, "\n"); |
---|
265 | 256 | } |
---|
266 | 257 | return 0; |
---|
.. | .. |
---|
337 | 328 | */ |
---|
338 | 329 | void cpuacct_charge(struct task_struct *tsk, u64 cputime) |
---|
339 | 330 | { |
---|
| 331 | + unsigned int cpu = task_cpu(tsk); |
---|
340 | 332 | struct cpuacct *ca; |
---|
341 | | - int index = CPUACCT_STAT_SYSTEM; |
---|
342 | | - struct pt_regs *regs = task_pt_regs(tsk); |
---|
343 | 333 | |
---|
344 | | - if (regs && user_mode(regs)) |
---|
345 | | - index = CPUACCT_STAT_USER; |
---|
346 | | - |
---|
347 | | - rcu_read_lock(); |
---|
| 334 | + lockdep_assert_held(&cpu_rq(cpu)->lock); |
---|
348 | 335 | |
---|
349 | 336 | for (ca = task_ca(tsk); ca; ca = parent_ca(ca)) |
---|
350 | | - this_cpu_ptr(ca->cpuusage)->usages[index] += cputime; |
---|
351 | | - |
---|
352 | | - rcu_read_unlock(); |
---|
| 337 | + *per_cpu_ptr(ca->cpuusage, cpu) += cputime; |
---|
353 | 338 | } |
---|
354 | 339 | |
---|
355 | 340 | /* |
---|
.. | .. |
---|
363 | 348 | |
---|
364 | 349 | rcu_read_lock(); |
---|
365 | 350 | for (ca = task_ca(tsk); ca != &root_cpuacct; ca = parent_ca(ca)) |
---|
366 | | - this_cpu_ptr(ca->cpustat)->cpustat[index] += val; |
---|
| 351 | + __this_cpu_add(ca->cpustat->cpustat[index], val); |
---|
367 | 352 | rcu_read_unlock(); |
---|
368 | 353 | } |
---|
369 | 354 | |
---|