| .. | .. |
|---|
| 47 | 47 | } |
|---|
| 48 | 48 | |
|---|
| 49 | 49 | /* |
|---|
| 50 | | - * Called after incrementing preempt_count on {soft,}irq_enter |
|---|
| 50 | + * Called before incrementing preempt_count on {soft,}irq_enter |
|---|
| 51 | 51 | * and before decrementing preempt_count on {soft,}irq_exit. |
|---|
| 52 | 52 | */ |
|---|
| 53 | | -void irqtime_account_irq(struct task_struct *curr, unsigned int offset) |
|---|
| 53 | +void irqtime_account_irq(struct task_struct *curr) |
|---|
| 54 | 54 | { |
|---|
| 55 | 55 | struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime); |
|---|
| 56 | | - unsigned int pc; |
|---|
| 57 | 56 | s64 delta; |
|---|
| 58 | 57 | int cpu; |
|---|
| 59 | 58 | |
|---|
| .. | .. |
|---|
| 63 | 62 | cpu = smp_processor_id(); |
|---|
| 64 | 63 | delta = sched_clock_cpu(cpu) - irqtime->irq_start_time; |
|---|
| 65 | 64 | irqtime->irq_start_time += delta; |
|---|
| 66 | | - pc = irq_count() - offset; |
|---|
| 67 | 65 | |
|---|
| 68 | 66 | /* |
|---|
| 69 | 67 | * We do not account for softirq time from ksoftirqd here. |
|---|
| .. | .. |
|---|
| 71 | 69 | * in that case, so as not to confuse scheduler with a special task |
|---|
| 72 | 70 | * that do not consume any time, but still wants to run. |
|---|
| 73 | 71 | */ |
|---|
| 74 | | - if (pc & HARDIRQ_MASK) |
|---|
| 72 | + if (hardirq_count()) |
|---|
| 75 | 73 | irqtime_account_delta(irqtime, delta, CPUTIME_IRQ); |
|---|
| 76 | | - else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd()) |
|---|
| 74 | + else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) |
|---|
| 77 | 75 | irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ); |
|---|
| 78 | 76 | |
|---|
| 79 | 77 | trace_android_rvh_account_irq(curr, cpu, delta); |
|---|
| .. | .. |
|---|
| 432 | 430 | } |
|---|
| 433 | 431 | # endif |
|---|
| 434 | 432 | |
|---|
| 435 | | -void vtime_account_irq(struct task_struct *tsk, unsigned int offset) |
|---|
| 433 | +/* |
|---|
| 434 | + * Archs that account the whole time spent in the idle task |
|---|
| 435 | + * (outside irq) as idle time can rely on this and just implement |
|---|
| 436 | + * vtime_account_kernel() and vtime_account_idle(). Archs that |
|---|
| 437 | + * have other meaning of the idle time (s390 only includes the |
|---|
| 438 | + * time spent by the CPU when it's in low power mode) must override |
|---|
| 439 | + * vtime_account(). |
|---|
| 440 | + */ |
|---|
| 441 | +#ifndef __ARCH_HAS_VTIME_ACCOUNT |
|---|
| 442 | +void vtime_account_irq_enter(struct task_struct *tsk) |
|---|
| 436 | 443 | { |
|---|
| 437 | | - unsigned int pc = irq_count() - offset; |
|---|
| 438 | | - |
|---|
| 439 | | - if (pc & HARDIRQ_OFFSET) { |
|---|
| 440 | | - vtime_account_hardirq(tsk); |
|---|
| 441 | | - } else if (pc & SOFTIRQ_OFFSET) { |
|---|
| 442 | | - vtime_account_softirq(tsk); |
|---|
| 443 | | - } else if (!IS_ENABLED(CONFIG_HAVE_VIRT_CPU_ACCOUNTING_IDLE) && |
|---|
| 444 | | - is_idle_task(tsk)) { |
|---|
| 444 | + if (!in_interrupt() && is_idle_task(tsk)) |
|---|
| 445 | 445 | vtime_account_idle(tsk); |
|---|
| 446 | | - } else { |
|---|
| 446 | + else |
|---|
| 447 | 447 | vtime_account_kernel(tsk); |
|---|
| 448 | | - } |
|---|
| 449 | 448 | } |
|---|
| 449 | +EXPORT_SYMBOL_GPL(vtime_account_irq_enter); |
|---|
| 450 | +#endif /* __ARCH_HAS_VTIME_ACCOUNT */ |
|---|
| 450 | 451 | |
|---|
| 451 | 452 | void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, |
|---|
| 452 | 453 | u64 *ut, u64 *st) |
|---|