| .. | .. |
|---|
| 23 | 23 | #include <linux/threads.h> |
|---|
| 24 | 24 | #include <trace/hooks/cpufreq.h> |
|---|
| 25 | 25 | |
|---|
| 26 | | -static DEFINE_RAW_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */ |
|---|
| 26 | +static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */ |
|---|
| 27 | 27 | |
|---|
| 28 | 28 | /** |
|---|
| 29 | 29 | * struct cpu_freqs - per-cpu frequency information |
|---|
| .. | .. |
|---|
| 47 | 47 | { |
|---|
| 48 | 48 | unsigned long flags; |
|---|
| 49 | 49 | |
|---|
| 50 | | - raw_spin_lock_irqsave(&task_time_in_state_lock, flags); |
|---|
| 50 | + spin_lock_irqsave(&task_time_in_state_lock, flags); |
|---|
| 51 | 51 | p->time_in_state = NULL; |
|---|
| 52 | | - raw_spin_unlock_irqrestore(&task_time_in_state_lock, flags); |
|---|
| 52 | + spin_unlock_irqrestore(&task_time_in_state_lock, flags); |
|---|
| 53 | 53 | p->max_state = 0; |
|---|
| 54 | 54 | } |
|---|
| 55 | 55 | |
|---|
| .. | .. |
|---|
| 64 | 64 | if (!temp) |
|---|
| 65 | 65 | return; |
|---|
| 66 | 66 | |
|---|
| 67 | | - raw_spin_lock_irqsave(&task_time_in_state_lock, flags); |
|---|
| 67 | + spin_lock_irqsave(&task_time_in_state_lock, flags); |
|---|
| 68 | 68 | p->time_in_state = temp; |
|---|
| 69 | | - raw_spin_unlock_irqrestore(&task_time_in_state_lock, flags); |
|---|
| 69 | + spin_unlock_irqrestore(&task_time_in_state_lock, flags); |
|---|
| 70 | 70 | p->max_state = max_state; |
|---|
| 71 | 71 | } |
|---|
| 72 | 72 | |
|---|
| .. | .. |
|---|
| 94 | 94 | if (!p->time_in_state) |
|---|
| 95 | 95 | return; |
|---|
| 96 | 96 | |
|---|
| 97 | | - raw_spin_lock_irqsave(&task_time_in_state_lock, flags); |
|---|
| 97 | + spin_lock_irqsave(&task_time_in_state_lock, flags); |
|---|
| 98 | 98 | temp = p->time_in_state; |
|---|
| 99 | 99 | p->time_in_state = NULL; |
|---|
| 100 | | - raw_spin_unlock_irqrestore(&task_time_in_state_lock, flags); |
|---|
| 100 | + spin_unlock_irqrestore(&task_time_in_state_lock, flags); |
|---|
| 101 | 101 | kfree(temp); |
|---|
| 102 | 102 | } |
|---|
| 103 | 103 | |
|---|
| .. | .. |
|---|
| 110 | 110 | struct cpu_freqs *freqs; |
|---|
| 111 | 111 | struct cpu_freqs *last_freqs = NULL; |
|---|
| 112 | 112 | |
|---|
| 113 | | - raw_spin_lock_irqsave(&task_time_in_state_lock, flags); |
|---|
| 113 | + spin_lock_irqsave(&task_time_in_state_lock, flags); |
|---|
| 114 | 114 | for_each_possible_cpu(cpu) { |
|---|
| 115 | 115 | freqs = all_freqs[cpu]; |
|---|
| 116 | 116 | if (!freqs || freqs == last_freqs) |
|---|
| .. | .. |
|---|
| 127 | 127 | (unsigned long)nsec_to_clock_t(cputime)); |
|---|
| 128 | 128 | } |
|---|
| 129 | 129 | } |
|---|
| 130 | | - raw_spin_unlock_irqrestore(&task_time_in_state_lock, flags); |
|---|
| 130 | + spin_unlock_irqrestore(&task_time_in_state_lock, flags); |
|---|
| 131 | 131 | return 0; |
|---|
| 132 | 132 | } |
|---|
| 133 | 133 | |
|---|
| .. | .. |
|---|
| 142 | 142 | |
|---|
| 143 | 143 | state = freqs->offset + READ_ONCE(freqs->last_index); |
|---|
| 144 | 144 | |
|---|
| 145 | | - raw_spin_lock_irqsave(&task_time_in_state_lock, flags); |
|---|
| 145 | + spin_lock_irqsave(&task_time_in_state_lock, flags); |
|---|
| 146 | 146 | if ((state < p->max_state || !cpufreq_task_times_realloc_locked(p)) && |
|---|
| 147 | 147 | p->time_in_state) |
|---|
| 148 | 148 | p->time_in_state[state] += cputime; |
|---|
| 149 | | - raw_spin_unlock_irqrestore(&task_time_in_state_lock, flags); |
|---|
| 149 | + spin_unlock_irqrestore(&task_time_in_state_lock, flags); |
|---|
| 150 | 150 | |
|---|
| 151 | 151 | trace_android_vh_cpufreq_acct_update_power(cputime, p, state); |
|---|
| 152 | 152 | } |
|---|