hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/arch/arm/common/mcpm_entry.c
....@@ -206,7 +206,7 @@
206206 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
207207 * variant exists, we need to disable IRQs manually here.
208208 */
209
- local_irq_disable();
209
+ hard_local_irq_disable();
210210 arch_spin_lock(&mcpm_lock);
211211
212212 cpu_is_down = !mcpm_cpu_use_count[cluster][cpu];
....@@ -230,7 +230,7 @@
230230 ret = platform_ops->cpu_powerup(cpu, cluster);
231231
232232 arch_spin_unlock(&mcpm_lock);
233
- local_irq_enable();
233
+ hard_local_irq_enable();
234234 return ret;
235235 }
236236
....@@ -349,7 +349,7 @@
349349 mpidr = read_cpuid_mpidr();
350350 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
351351 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
352
- local_irq_save(flags);
352
+ flags = hard_local_irq_save();
353353 arch_spin_lock(&mcpm_lock);
354354
355355 cpu_was_down = !mcpm_cpu_use_count[cluster][cpu];
....@@ -363,7 +363,7 @@
363363 platform_ops->cpu_is_up(cpu, cluster);
364364
365365 arch_spin_unlock(&mcpm_lock);
366
- local_irq_restore(flags);
366
+ hard_local_irq_restore(flags);
367367
368368 return 0;
369369 }
....@@ -402,7 +402,7 @@
402402 * infrastructure. Let's play it safe by using cpu_pm_enter()
403403 * in case the CPU init code path resets the VFP or similar.
404404 */
405
- local_irq_disable();
405
+ hard_local_irq_disable();
406406 local_fiq_disable();
407407 ret = cpu_pm_enter();
408408 if (!ret) {
....@@ -410,7 +410,7 @@
410410 cpu_pm_exit();
411411 }
412412 local_fiq_enable();
413
- local_irq_enable();
413
+ hard_local_irq_enable();
414414 if (ret)
415415 pr_err("%s returned %d\n", __func__, ret);
416416 return ret;