| .. | .. |
|---|
| 39 | 39 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS) |
|---|
| 40 | 40 | #define NUM_USER_ASIDS ASID_FIRST_VERSION |
|---|
| 41 | 41 | |
|---|
| 42 | | -static DEFINE_RAW_SPINLOCK(cpu_asid_lock); |
|---|
| 42 | +static DEFINE_HARD_SPINLOCK(cpu_asid_lock); |
|---|
| 43 | 43 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); |
|---|
| 44 | 44 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); |
|---|
| 45 | 45 | |
|---|
| .. | .. |
|---|
| 237 | 237 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) |
|---|
| 238 | 238 | { |
|---|
| 239 | 239 | unsigned long flags; |
|---|
| 240 | | - unsigned int cpu = smp_processor_id(); |
|---|
| 240 | + unsigned int cpu = raw_smp_processor_id(); |
|---|
| 241 | + bool need_flush; |
|---|
| 241 | 242 | u64 asid; |
|---|
| 243 | + |
|---|
| 244 | + WARN_ON_ONCE(dovetail_debug() && !hard_irqs_disabled()); |
|---|
| 242 | 245 | |
|---|
| 243 | 246 | if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) |
|---|
| 244 | 247 | __check_vmalloc_seq(mm); |
|---|
| .. | .. |
|---|
| 263 | 266 | atomic64_set(&mm->context.id, asid); |
|---|
| 264 | 267 | } |
|---|
| 265 | 268 | |
|---|
| 266 | | - if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { |
|---|
| 267 | | - local_flush_bp_all(); |
|---|
| 268 | | - local_flush_tlb_all(); |
|---|
| 269 | | - } |
|---|
| 270 | | - |
|---|
| 269 | + need_flush = cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending); |
|---|
| 271 | 270 | atomic64_set(&per_cpu(active_asids, cpu), asid); |
|---|
| 272 | 271 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
|---|
| 273 | 272 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); |
|---|
| 274 | 273 | |
|---|
| 274 | + if (need_flush) { |
|---|
| 275 | + local_flush_bp_all(); |
|---|
| 276 | + local_flush_tlb_all(); |
|---|
| 277 | + } |
|---|
| 278 | + |
|---|
| 275 | 279 | switch_mm_fastpath: |
|---|
| 276 | 280 | cpu_switch_mm(mm->pgd, mm); |
|---|
| 277 | 281 | } |
|---|