| .. | .. |
|---|
| 149 | 149 | raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, |
|---|
| 150 | 150 | cpu); |
|---|
| 151 | 151 | struct cgroup *pos = NULL; |
|---|
| 152 | + unsigned long flags; |
|---|
| 152 | 153 | |
|---|
| 153 | | - raw_spin_lock(cpu_lock); |
|---|
| 154 | + raw_spin_lock_irqsave(cpu_lock, flags); |
|---|
| 154 | 155 | while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) { |
|---|
| 155 | 156 | struct cgroup_subsys_state *css; |
|---|
| 156 | 157 | |
|---|
| .. | .. |
|---|
| 162 | 163 | css->ss->css_rstat_flush(css, cpu); |
|---|
| 163 | 164 | rcu_read_unlock(); |
|---|
| 164 | 165 | } |
|---|
| 165 | | - raw_spin_unlock(cpu_lock); |
|---|
| 166 | + raw_spin_unlock_irqrestore(cpu_lock, flags); |
|---|
| 166 | 167 | |
|---|
| 167 | 168 | /* if @may_sleep, play nice and yield if necessary */ |
|---|
| 168 | 169 | if (may_sleep && (need_resched() || |
|---|