| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright (C) 2017 ARM Ltd. |
|---|
| 3 | | - * |
|---|
| 4 | | - * This program is free software; you can redistribute it and/or modify |
|---|
| 5 | | - * it under the terms of the GNU General Public License version 2 as |
|---|
| 6 | | - * published by the Free Software Foundation. |
|---|
| 7 | | - * |
|---|
| 8 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 9 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 10 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 11 | | - * GNU General Public License for more details. |
|---|
| 12 | | - * |
|---|
| 13 | | - * You should have received a copy of the GNU General Public License |
|---|
| 14 | | - * along with this program. If not, see <http://www.gnu.org/licenses/>. |
|---|
| 15 | 4 | */ |
|---|
| 16 | 5 | #ifndef __ASM_DAIFFLAGS_H |
|---|
| 17 | 6 | #define __ASM_DAIFFLAGS_H |
|---|
| 18 | 7 | |
|---|
| 19 | 8 | #include <linux/irqflags.h> |
|---|
| 20 | 9 | |
|---|
| 10 | +#include <asm/arch_gicv3.h> |
|---|
| 11 | +#include <asm/barrier.h> |
|---|
| 12 | +#include <asm/cpufeature.h> |
|---|
| 13 | +#include <asm/ptrace.h> |
|---|
| 14 | + |
|---|
| 21 | 15 | #define DAIF_PROCCTX 0 |
|---|
| 22 | 16 | #define DAIF_PROCCTX_NOIRQ PSR_I_BIT |
|---|
| 17 | +#define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT) |
|---|
| 18 | +#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) |
|---|
| 19 | + |
|---|
| 23 | 20 | |
|---|
| 24 | 21 | /* mask/save/unmask/restore all exceptions, including interrupts. */ |
|---|
| 25 | 22 | static inline void local_daif_mask(void) |
|---|
| 26 | 23 | { |
|---|
| 24 | + WARN_ON(system_has_prio_mask_debugging() && |
|---|
| 25 | + (read_sysreg_s(SYS_ICC_PMR_EL1) == (GIC_PRIO_IRQOFF | |
|---|
| 26 | + GIC_PRIO_PSR_I_SET))); |
|---|
| 27 | + |
|---|
| 27 | 28 | asm volatile( |
|---|
| 28 | 29 | "msr daifset, #0xf // local_daif_mask\n" |
|---|
| 29 | 30 | : |
|---|
| 30 | 31 | : |
|---|
| 31 | 32 | : "memory"); |
|---|
| 33 | + |
|---|
| 34 | + /* Don't really care for a dsb here, we don't intend to enable IRQs */ |
|---|
| 35 | + if (system_uses_irq_prio_masking()) |
|---|
| 36 | + gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); |
|---|
| 37 | + |
|---|
| 32 | 38 | trace_hardirqs_off(); |
|---|
| 39 | +} |
|---|
| 40 | + |
|---|
| 41 | +static inline unsigned long local_daif_save_flags(void) |
|---|
| 42 | +{ |
|---|
| 43 | + unsigned long flags; |
|---|
| 44 | + |
|---|
| 45 | + flags = read_sysreg(daif); |
|---|
| 46 | + |
|---|
| 47 | + if (system_uses_irq_prio_masking()) { |
|---|
| 48 | + /* If IRQs are masked with PMR, reflect it in the flags */ |
|---|
| 49 | + if (read_sysreg_s(SYS_ICC_PMR_EL1) != GIC_PRIO_IRQON) |
|---|
| 50 | + flags |= PSR_I_BIT; |
|---|
| 51 | + } |
|---|
| 52 | + |
|---|
| 53 | + return flags; |
|---|
| 33 | 54 | } |
|---|
| 34 | 55 | |
|---|
| 35 | 56 | static inline unsigned long local_daif_save(void) |
|---|
| 36 | 57 | { |
|---|
| 37 | 58 | unsigned long flags; |
|---|
| 38 | 59 | |
|---|
| 39 | | - asm volatile( |
|---|
| 40 | | - "mrs %0, daif // local_daif_save\n" |
|---|
| 41 | | - : "=r" (flags) |
|---|
| 42 | | - : |
|---|
| 43 | | - : "memory"); |
|---|
| 60 | + flags = local_daif_save_flags(); |
|---|
| 61 | + |
|---|
| 44 | 62 | local_daif_mask(); |
|---|
| 45 | 63 | |
|---|
| 46 | 64 | return flags; |
|---|
| 47 | 65 | } |
|---|
| 48 | 66 | |
|---|
| 49 | | -static inline void local_daif_unmask(void) |
|---|
| 50 | | -{ |
|---|
| 51 | | - trace_hardirqs_on(); |
|---|
| 52 | | - asm volatile( |
|---|
| 53 | | - "msr daifclr, #0xf // local_daif_unmask" |
|---|
| 54 | | - : |
|---|
| 55 | | - : |
|---|
| 56 | | - : "memory"); |
|---|
| 57 | | -} |
|---|
| 58 | | - |
|---|
| 59 | 67 | static inline void local_daif_restore(unsigned long flags) |
|---|
| 60 | 68 | { |
|---|
| 61 | | - if (!arch_irqs_disabled_flags(flags)) |
|---|
| 69 | + bool irq_disabled = flags & PSR_I_BIT; |
|---|
| 70 | + |
|---|
| 71 | + WARN_ON(system_has_prio_mask_debugging() && |
|---|
| 72 | + !(read_sysreg(daif) & PSR_I_BIT)); |
|---|
| 73 | + |
|---|
| 74 | + if (!irq_disabled) { |
|---|
| 62 | 75 | trace_hardirqs_on(); |
|---|
| 63 | | - asm volatile( |
|---|
| 64 | | - "msr daif, %0 // local_daif_restore" |
|---|
| 65 | | - : |
|---|
| 66 | | - : "r" (flags) |
|---|
| 67 | | - : "memory"); |
|---|
| 68 | | - if (arch_irqs_disabled_flags(flags)) |
|---|
| 76 | + |
|---|
| 77 | + if (system_uses_irq_prio_masking()) { |
|---|
| 78 | + gic_write_pmr(GIC_PRIO_IRQON); |
|---|
| 79 | + pmr_sync(); |
|---|
| 80 | + } |
|---|
| 81 | + } else if (system_uses_irq_prio_masking()) { |
|---|
| 82 | + u64 pmr; |
|---|
| 83 | + |
|---|
| 84 | + if (!(flags & PSR_A_BIT)) { |
|---|
| 85 | + /* |
|---|
| 86 | + * If interrupts are disabled but we can take |
|---|
| 87 | + * asynchronous errors, we can take NMIs |
|---|
| 88 | + */ |
|---|
| 89 | + flags &= ~PSR_I_BIT; |
|---|
| 90 | + pmr = GIC_PRIO_IRQOFF; |
|---|
| 91 | + } else { |
|---|
| 92 | + pmr = GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET; |
|---|
| 93 | + } |
|---|
| 94 | + |
|---|
| 95 | + /* |
|---|
| 96 | + * There has been concern that the write to daif |
|---|
| 97 | + * might be reordered before this write to PMR. |
|---|
| 98 | + * From the ARM ARM DDI 0487D.a, section D1.7.1 |
|---|
| 99 | + * "Accessing PSTATE fields": |
|---|
| 100 | + * Writes to the PSTATE fields have side-effects on |
|---|
| 101 | + * various aspects of the PE operation. All of these |
|---|
| 102 | + * side-effects are guaranteed: |
|---|
| 103 | + * - Not to be visible to earlier instructions in |
|---|
| 104 | + * the execution stream. |
|---|
| 105 | + * - To be visible to later instructions in the |
|---|
| 106 | + * execution stream |
|---|
| 107 | + * |
|---|
| 108 | + * Also, writes to PMR are self-synchronizing, so no |
|---|
| 109 | + * interrupts with a lower priority than PMR is signaled |
|---|
| 110 | + * to the PE after the write. |
|---|
| 111 | + * |
|---|
| 112 | + * So we don't need additional synchronization here. |
|---|
| 113 | + */ |
|---|
| 114 | + gic_write_pmr(pmr); |
|---|
| 115 | + } |
|---|
| 116 | + |
|---|
| 117 | + write_sysreg(flags, daif); |
|---|
| 118 | + |
|---|
| 119 | + if (irq_disabled) |
|---|
| 69 | 120 | trace_hardirqs_off(); |
|---|
| 70 | 121 | } |
|---|
| 71 | 122 | |
|---|
| 123 | +/* |
|---|
| 124 | + * Called by synchronous exception handlers to restore the DAIF bits that were |
|---|
| 125 | + * modified by taking an exception. |
|---|
| 126 | + */ |
|---|
| 127 | +static inline void local_daif_inherit(struct pt_regs *regs) |
|---|
| 128 | +{ |
|---|
| 129 | + unsigned long flags = regs->pstate & DAIF_MASK; |
|---|
| 130 | + |
|---|
| 131 | + if (interrupts_enabled(regs)) |
|---|
| 132 | + trace_hardirqs_on(); |
|---|
| 133 | + |
|---|
| 134 | + if (system_uses_irq_prio_masking()) |
|---|
| 135 | + gic_write_pmr(regs->pmr_save); |
|---|
| 136 | + |
|---|
| 137 | + /* |
|---|
| 138 | + * We can't use local_daif_restore(regs->pstate) here as |
|---|
| 139 | + * system_has_prio_mask_debugging() won't restore the I bit if it can |
|---|
| 140 | + * use the pmr instead. |
|---|
| 141 | + */ |
|---|
| 142 | + write_sysreg(flags, daif); |
|---|
| 143 | +} |
|---|
| 72 | 144 | #endif |
|---|