hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/arch/arm/mm/context.c
....@@ -39,7 +39,7 @@
3939 #define ASID_FIRST_VERSION (1ULL << ASID_BITS)
4040 #define NUM_USER_ASIDS ASID_FIRST_VERSION
4141
42
-static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
42
+static DEFINE_HARD_SPINLOCK(cpu_asid_lock);
4343 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
4444 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
4545
....@@ -237,8 +237,11 @@
237237 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
238238 {
239239 unsigned long flags;
240
- unsigned int cpu = smp_processor_id();
240
+ unsigned int cpu = raw_smp_processor_id();
241
+ bool need_flush;
241242 u64 asid;
243
+
244
+ WARN_ON_ONCE(dovetail_debug() && !hard_irqs_disabled());
242245
243246 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
244247 __check_vmalloc_seq(mm);
....@@ -263,15 +266,16 @@
263266 atomic64_set(&mm->context.id, asid);
264267 }
265268
266
- if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
267
- local_flush_bp_all();
268
- local_flush_tlb_all();
269
- }
270
-
269
+ need_flush = cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending);
271270 atomic64_set(&per_cpu(active_asids, cpu), asid);
272271 cpumask_set_cpu(cpu, mm_cpumask(mm));
273272 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
274273
274
+ if (need_flush) {
275
+ local_flush_bp_all();
276
+ local_flush_tlb_all();
277
+ }
278
+
275279 switch_mm_fastpath:
276280 cpu_switch_mm(mm->pgd, mm);
277281 }