| .. | .. |
|---|
| 1 | +/* SPDX-License-Identifier: GPL-2.0-only */ |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * Copyright (C) 2012 Regents of the University of California |
|---|
| 3 | 4 | * Copyright (C) 2017 SiFive |
|---|
| 4 | | - * |
|---|
| 5 | | - * This program is free software; you can redistribute it and/or |
|---|
| 6 | | - * modify it under the terms of the GNU General Public License |
|---|
| 7 | | - * as published by the Free Software Foundation, version 2. |
|---|
| 8 | | - * |
|---|
| 9 | | - * This program is distributed in the hope that it will be useful, |
|---|
| 10 | | - * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|---|
| 11 | | - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|---|
| 12 | | - * GNU General Public License for more details. |
|---|
| 13 | 5 | */ |
|---|
| 14 | 6 | |
|---|
| 15 | 7 | #include <linux/init.h> |
|---|
| .. | .. |
|---|
| 21 | 13 | #include <asm/thread_info.h> |
|---|
| 22 | 14 | #include <asm/asm-offsets.h> |
|---|
| 23 | 15 | |
|---|
| 24 | | - .text |
|---|
| 25 | | - .altmacro |
|---|
| 16 | +#if !IS_ENABLED(CONFIG_PREEMPTION) |
|---|
| 17 | +.set resume_kernel, restore_all |
|---|
| 18 | +#endif |
|---|
| 26 | 19 | |
|---|
| 27 | | -/* |
|---|
| 28 | | - * Prepares to enter a system call or exception by saving all registers to the |
|---|
| 29 | | - * stack. |
|---|
| 30 | | - */ |
|---|
| 31 | | - .macro SAVE_ALL |
|---|
| 32 | | - LOCAL _restore_kernel_tpsp |
|---|
| 33 | | - LOCAL _save_context |
|---|
| 34 | | - |
|---|
| 20 | +ENTRY(handle_exception) |
|---|
| 35 | 21 | /* |
|---|
| 36 | 22 | * If coming from userspace, preserve the user thread pointer and load |
|---|
| 37 | | - * the kernel thread pointer. If we came from the kernel, sscratch |
|---|
| 38 | | - * will contain 0, and we should continue on the current TP. |
|---|
| 23 | + * the kernel thread pointer. If we came from the kernel, the scratch |
|---|
| 24 | + * register will contain 0, and we should continue on the current TP. |
|---|
| 39 | 25 | */ |
|---|
| 40 | | - csrrw tp, sscratch, tp |
|---|
| 26 | + csrrw tp, CSR_SCRATCH, tp |
|---|
| 41 | 27 | bnez tp, _save_context |
|---|
| 42 | 28 | |
|---|
| 43 | 29 | _restore_kernel_tpsp: |
|---|
| 44 | | - csrr tp, sscratch |
|---|
| 30 | + csrr tp, CSR_SCRATCH |
|---|
| 45 | 31 | REG_S sp, TASK_TI_KERNEL_SP(tp) |
|---|
| 46 | 32 | _save_context: |
|---|
| 47 | 33 | REG_S sp, TASK_TI_USER_SP(tp) |
|---|
| .. | .. |
|---|
| 87 | 73 | li t0, SR_SUM | SR_FS |
|---|
| 88 | 74 | |
|---|
| 89 | 75 | REG_L s0, TASK_TI_USER_SP(tp) |
|---|
| 90 | | - csrrc s1, sstatus, t0 |
|---|
| 91 | | - csrr s2, sepc |
|---|
| 92 | | - csrr s3, sbadaddr |
|---|
| 93 | | - csrr s4, scause |
|---|
| 94 | | - csrr s5, sscratch |
|---|
| 76 | + csrrc s1, CSR_STATUS, t0 |
|---|
| 77 | + csrr s2, CSR_EPC |
|---|
| 78 | + csrr s3, CSR_TVAL |
|---|
| 79 | + csrr s4, CSR_CAUSE |
|---|
| 80 | + csrr s5, CSR_SCRATCH |
|---|
| 95 | 81 | REG_S s0, PT_SP(sp) |
|---|
| 96 | | - REG_S s1, PT_SSTATUS(sp) |
|---|
| 97 | | - REG_S s2, PT_SEPC(sp) |
|---|
| 98 | | - REG_S s3, PT_SBADADDR(sp) |
|---|
| 99 | | - REG_S s4, PT_SCAUSE(sp) |
|---|
| 82 | + REG_S s1, PT_STATUS(sp) |
|---|
| 83 | + REG_S s2, PT_EPC(sp) |
|---|
| 84 | + REG_S s3, PT_BADADDR(sp) |
|---|
| 85 | + REG_S s4, PT_CAUSE(sp) |
|---|
| 100 | 86 | REG_S s5, PT_TP(sp) |
|---|
| 101 | | - .endm |
|---|
| 102 | 87 | |
|---|
| 103 | | -/* |
|---|
| 104 | | - * Prepares to return from a system call or exception by restoring all |
|---|
| 105 | | - * registers from the stack. |
|---|
| 106 | | - */ |
|---|
| 107 | | - .macro RESTORE_ALL |
|---|
| 108 | | - REG_L a0, PT_SSTATUS(sp) |
|---|
| 109 | | - REG_L a2, PT_SEPC(sp) |
|---|
| 110 | | - csrw sstatus, a0 |
|---|
| 111 | | - csrw sepc, a2 |
|---|
| 88 | + /* |
|---|
| 89 | + * Set the scratch register to 0, so that if a recursive exception |
|---|
| 90 | + * occurs, the exception vector knows it came from the kernel |
|---|
| 91 | + */ |
|---|
| 92 | + csrw CSR_SCRATCH, x0 |
|---|
| 93 | + |
|---|
| 94 | + /* Load the global pointer */ |
|---|
| 95 | +.option push |
|---|
| 96 | +.option norelax |
|---|
| 97 | + la gp, __global_pointer$ |
|---|
| 98 | +.option pop |
|---|
| 99 | + |
|---|
| 100 | +#ifdef CONFIG_TRACE_IRQFLAGS |
|---|
| 101 | + call __trace_hardirqs_off |
|---|
| 102 | +#endif |
|---|
| 103 | + |
|---|
| 104 | +#ifdef CONFIG_CONTEXT_TRACKING |
|---|
| 105 | + /* If previous state is in user mode, call context_tracking_user_exit. */ |
|---|
| 106 | + li a0, SR_PP |
|---|
| 107 | + and a0, s1, a0 |
|---|
| 108 | + bnez a0, skip_context_tracking |
|---|
| 109 | + call context_tracking_user_exit |
|---|
| 110 | +skip_context_tracking: |
|---|
| 111 | +#endif |
|---|
| 112 | + |
|---|
| 113 | + /* |
|---|
| 114 | + * MSB of cause differentiates between |
|---|
| 115 | + * interrupts and exceptions |
|---|
| 116 | + */ |
|---|
| 117 | + bge s4, zero, 1f |
|---|
| 118 | + |
|---|
| 119 | + la ra, ret_from_exception |
|---|
| 120 | + |
|---|
| 121 | + /* Handle interrupts */ |
|---|
| 122 | + move a0, sp /* pt_regs */ |
|---|
| 123 | + la a1, handle_arch_irq |
|---|
| 124 | + REG_L a1, (a1) |
|---|
| 125 | + jr a1 |
|---|
| 126 | +1: |
|---|
| 127 | + /* |
|---|
| 128 | + * Exceptions run with interrupts enabled or disabled depending on the |
|---|
| 129 | + * state of SR_PIE in m/sstatus. |
|---|
| 130 | + */ |
|---|
| 131 | + andi t0, s1, SR_PIE |
|---|
| 132 | + beqz t0, 1f |
|---|
| 133 | +#ifdef CONFIG_TRACE_IRQFLAGS |
|---|
| 134 | + call __trace_hardirqs_on |
|---|
| 135 | +#endif |
|---|
| 136 | + csrs CSR_STATUS, SR_IE |
|---|
| 137 | + |
|---|
| 138 | +1: |
|---|
| 139 | + la ra, ret_from_exception |
|---|
| 140 | + /* Handle syscalls */ |
|---|
| 141 | + li t0, EXC_SYSCALL |
|---|
| 142 | + beq s4, t0, handle_syscall |
|---|
| 143 | + |
|---|
| 144 | + /* Handle other exceptions */ |
|---|
| 145 | + slli t0, s4, RISCV_LGPTR |
|---|
| 146 | + la t1, excp_vect_table |
|---|
| 147 | + la t2, excp_vect_table_end |
|---|
| 148 | + move a0, sp /* pt_regs */ |
|---|
| 149 | + add t0, t1, t0 |
|---|
| 150 | + /* Check if exception code lies within bounds */ |
|---|
| 151 | + bgeu t0, t2, 1f |
|---|
| 152 | + REG_L t0, 0(t0) |
|---|
| 153 | + jr t0 |
|---|
| 154 | +1: |
|---|
| 155 | + tail do_trap_unknown |
|---|
| 156 | + |
|---|
| 157 | +handle_syscall: |
|---|
| 158 | +#ifdef CONFIG_RISCV_M_MODE |
|---|
| 159 | + /* |
|---|
| 160 | + * When running is M-Mode (no MMU config), MPIE does not get set. |
|---|
| 161 | + * As a result, we need to force enable interrupts here because |
|---|
| 162 | + * handle_exception did not do set SR_IE as it always sees SR_PIE |
|---|
| 163 | + * being cleared. |
|---|
| 164 | + */ |
|---|
| 165 | + csrs CSR_STATUS, SR_IE |
|---|
| 166 | +#endif |
|---|
| 167 | +#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING) |
|---|
| 168 | + /* Recover a0 - a7 for system calls */ |
|---|
| 169 | + REG_L a0, PT_A0(sp) |
|---|
| 170 | + REG_L a1, PT_A1(sp) |
|---|
| 171 | + REG_L a2, PT_A2(sp) |
|---|
| 172 | + REG_L a3, PT_A3(sp) |
|---|
| 173 | + REG_L a4, PT_A4(sp) |
|---|
| 174 | + REG_L a5, PT_A5(sp) |
|---|
| 175 | + REG_L a6, PT_A6(sp) |
|---|
| 176 | + REG_L a7, PT_A7(sp) |
|---|
| 177 | +#endif |
|---|
| 178 | + /* save the initial A0 value (needed in signal handlers) */ |
|---|
| 179 | + REG_S a0, PT_ORIG_A0(sp) |
|---|
| 180 | + /* |
|---|
| 181 | + * Advance SEPC to avoid executing the original |
|---|
| 182 | + * scall instruction on sret |
|---|
| 183 | + */ |
|---|
| 184 | + addi s2, s2, 0x4 |
|---|
| 185 | + REG_S s2, PT_EPC(sp) |
|---|
| 186 | + /* Trace syscalls, but only if requested by the user. */ |
|---|
| 187 | + REG_L t0, TASK_TI_FLAGS(tp) |
|---|
| 188 | + andi t0, t0, _TIF_SYSCALL_WORK |
|---|
| 189 | + bnez t0, handle_syscall_trace_enter |
|---|
| 190 | +check_syscall_nr: |
|---|
| 191 | + /* Check to make sure we don't jump to a bogus syscall number. */ |
|---|
| 192 | + li t0, __NR_syscalls |
|---|
| 193 | + la s0, sys_ni_syscall |
|---|
| 194 | + /* |
|---|
| 195 | + * Syscall number held in a7. |
|---|
| 196 | + * If syscall number is above allowed value, redirect to ni_syscall. |
|---|
| 197 | + */ |
|---|
| 198 | + bgeu a7, t0, 1f |
|---|
| 199 | + /* Call syscall */ |
|---|
| 200 | + la s0, sys_call_table |
|---|
| 201 | + slli t0, a7, RISCV_LGPTR |
|---|
| 202 | + add s0, s0, t0 |
|---|
| 203 | + REG_L s0, 0(s0) |
|---|
| 204 | +1: |
|---|
| 205 | + jalr s0 |
|---|
| 206 | + |
|---|
| 207 | +ret_from_syscall: |
|---|
| 208 | + /* Set user a0 to kernel a0 */ |
|---|
| 209 | + REG_S a0, PT_A0(sp) |
|---|
| 210 | + /* |
|---|
| 211 | + * We didn't execute the actual syscall. |
|---|
| 212 | + * Seccomp already set return value for the current task pt_regs. |
|---|
| 213 | + * (If it was configured with SECCOMP_RET_ERRNO/TRACE) |
|---|
| 214 | + */ |
|---|
| 215 | +ret_from_syscall_rejected: |
|---|
| 216 | + /* Trace syscalls, but only if requested by the user. */ |
|---|
| 217 | + REG_L t0, TASK_TI_FLAGS(tp) |
|---|
| 218 | + andi t0, t0, _TIF_SYSCALL_WORK |
|---|
| 219 | + bnez t0, handle_syscall_trace_exit |
|---|
| 220 | + |
|---|
| 221 | +ret_from_exception: |
|---|
| 222 | + REG_L s0, PT_STATUS(sp) |
|---|
| 223 | + csrc CSR_STATUS, SR_IE |
|---|
| 224 | +#ifdef CONFIG_TRACE_IRQFLAGS |
|---|
| 225 | + call __trace_hardirqs_off |
|---|
| 226 | +#endif |
|---|
| 227 | +#ifdef CONFIG_RISCV_M_MODE |
|---|
| 228 | + /* the MPP value is too large to be used as an immediate arg for addi */ |
|---|
| 229 | + li t0, SR_MPP |
|---|
| 230 | + and s0, s0, t0 |
|---|
| 231 | +#else |
|---|
| 232 | + andi s0, s0, SR_SPP |
|---|
| 233 | +#endif |
|---|
| 234 | + bnez s0, resume_kernel |
|---|
| 235 | + |
|---|
| 236 | +resume_userspace: |
|---|
| 237 | + /* Interrupts must be disabled here so flags are checked atomically */ |
|---|
| 238 | + REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ |
|---|
| 239 | + andi s1, s0, _TIF_WORK_MASK |
|---|
| 240 | + bnez s1, work_pending |
|---|
| 241 | + |
|---|
| 242 | +#ifdef CONFIG_CONTEXT_TRACKING |
|---|
| 243 | + call context_tracking_user_enter |
|---|
| 244 | +#endif |
|---|
| 245 | + |
|---|
| 246 | + /* Save unwound kernel stack pointer in thread_info */ |
|---|
| 247 | + addi s0, sp, PT_SIZE_ON_STACK |
|---|
| 248 | + REG_S s0, TASK_TI_KERNEL_SP(tp) |
|---|
| 249 | + |
|---|
| 250 | + /* |
|---|
| 251 | + * Save TP into the scratch register , so we can find the kernel data |
|---|
| 252 | + * structures again. |
|---|
| 253 | + */ |
|---|
| 254 | + csrw CSR_SCRATCH, tp |
|---|
| 255 | + |
|---|
| 256 | +restore_all: |
|---|
| 257 | +#ifdef CONFIG_TRACE_IRQFLAGS |
|---|
| 258 | + REG_L s1, PT_STATUS(sp) |
|---|
| 259 | + andi t0, s1, SR_PIE |
|---|
| 260 | + beqz t0, 1f |
|---|
| 261 | + call __trace_hardirqs_on |
|---|
| 262 | + j 2f |
|---|
| 263 | +1: |
|---|
| 264 | + call __trace_hardirqs_off |
|---|
| 265 | +2: |
|---|
| 266 | +#endif |
|---|
| 267 | + REG_L a0, PT_STATUS(sp) |
|---|
| 268 | + /* |
|---|
| 269 | + * The current load reservation is effectively part of the processor's |
|---|
| 270 | + * state, in the sense that load reservations cannot be shared between |
|---|
| 271 | + * different hart contexts. We can't actually save and restore a load |
|---|
| 272 | + * reservation, so instead here we clear any existing reservation -- |
|---|
| 273 | + * it's always legal for implementations to clear load reservations at |
|---|
| 274 | + * any point (as long as the forward progress guarantee is kept, but |
|---|
| 275 | + * we'll ignore that here). |
|---|
| 276 | + * |
|---|
| 277 | + * Dangling load reservations can be the result of taking a trap in the |
|---|
| 278 | + * middle of an LR/SC sequence, but can also be the result of a taken |
|---|
| 279 | + * forward branch around an SC -- which is how we implement CAS. As a |
|---|
| 280 | + * result we need to clear reservations between the last CAS and the |
|---|
| 281 | + * jump back to the new context. While it is unlikely the store |
|---|
| 282 | + * completes, implementations are allowed to expand reservations to be |
|---|
| 283 | + * arbitrarily large. |
|---|
| 284 | + */ |
|---|
| 285 | + REG_L a2, PT_EPC(sp) |
|---|
| 286 | + REG_SC x0, a2, PT_EPC(sp) |
|---|
| 287 | + |
|---|
| 288 | + csrw CSR_STATUS, a0 |
|---|
| 289 | + csrw CSR_EPC, a2 |
|---|
| 112 | 290 | |
|---|
| 113 | 291 | REG_L x1, PT_RA(sp) |
|---|
| 114 | 292 | REG_L x3, PT_GP(sp) |
|---|
| .. | .. |
|---|
| 142 | 320 | REG_L x31, PT_T6(sp) |
|---|
| 143 | 321 | |
|---|
| 144 | 322 | REG_L x2, PT_SP(sp) |
|---|
| 145 | | - .endm |
|---|
| 146 | 323 | |
|---|
| 147 | | -ENTRY(handle_exception) |
|---|
| 148 | | - SAVE_ALL |
|---|
| 149 | | - |
|---|
| 150 | | - /* |
|---|
| 151 | | - * Set sscratch register to 0, so that if a recursive exception |
|---|
| 152 | | - * occurs, the exception vector knows it came from the kernel |
|---|
| 153 | | - */ |
|---|
| 154 | | - csrw sscratch, x0 |
|---|
| 155 | | - |
|---|
| 156 | | - /* Load the global pointer */ |
|---|
| 157 | | -.option push |
|---|
| 158 | | -.option norelax |
|---|
| 159 | | - la gp, __global_pointer$ |
|---|
| 160 | | -.option pop |
|---|
| 161 | | - |
|---|
| 162 | | - la ra, ret_from_exception |
|---|
| 163 | | - /* |
|---|
| 164 | | - * MSB of cause differentiates between |
|---|
| 165 | | - * interrupts and exceptions |
|---|
| 166 | | - */ |
|---|
| 167 | | - bge s4, zero, 1f |
|---|
| 168 | | - |
|---|
| 169 | | - /* Handle interrupts */ |
|---|
| 170 | | - move a0, sp /* pt_regs */ |
|---|
| 171 | | - move a1, s4 /* scause */ |
|---|
| 172 | | - tail do_IRQ |
|---|
| 173 | | -1: |
|---|
| 174 | | - /* Exceptions run with interrupts enabled or disabled |
|---|
| 175 | | - depending on the state of sstatus.SR_SPIE */ |
|---|
| 176 | | - andi t0, s1, SR_SPIE |
|---|
| 177 | | - beqz t0, 1f |
|---|
| 178 | | - csrs sstatus, SR_SIE |
|---|
| 179 | | - |
|---|
| 180 | | -1: |
|---|
| 181 | | - /* Handle syscalls */ |
|---|
| 182 | | - li t0, EXC_SYSCALL |
|---|
| 183 | | - beq s4, t0, handle_syscall |
|---|
| 184 | | - |
|---|
| 185 | | - /* Handle other exceptions */ |
|---|
| 186 | | - slli t0, s4, RISCV_LGPTR |
|---|
| 187 | | - la t1, excp_vect_table |
|---|
| 188 | | - la t2, excp_vect_table_end |
|---|
| 189 | | - move a0, sp /* pt_regs */ |
|---|
| 190 | | - add t0, t1, t0 |
|---|
| 191 | | - /* Check if exception code lies within bounds */ |
|---|
| 192 | | - bgeu t0, t2, 1f |
|---|
| 193 | | - REG_L t0, 0(t0) |
|---|
| 194 | | - jr t0 |
|---|
| 195 | | -1: |
|---|
| 196 | | - tail do_trap_unknown |
|---|
| 197 | | - |
|---|
| 198 | | -handle_syscall: |
|---|
| 199 | | - /* save the initial A0 value (needed in signal handlers) */ |
|---|
| 200 | | - REG_S a0, PT_ORIG_A0(sp) |
|---|
| 201 | | - /* |
|---|
| 202 | | - * Advance SEPC to avoid executing the original |
|---|
| 203 | | - * scall instruction on sret |
|---|
| 204 | | - */ |
|---|
| 205 | | - addi s2, s2, 0x4 |
|---|
| 206 | | - REG_S s2, PT_SEPC(sp) |
|---|
| 207 | | - /* Trace syscalls, but only if requested by the user. */ |
|---|
| 208 | | - REG_L t0, TASK_TI_FLAGS(tp) |
|---|
| 209 | | - andi t0, t0, _TIF_SYSCALL_TRACE |
|---|
| 210 | | - bnez t0, handle_syscall_trace_enter |
|---|
| 211 | | -check_syscall_nr: |
|---|
| 212 | | - /* Check to make sure we don't jump to a bogus syscall number. */ |
|---|
| 213 | | - li t0, __NR_syscalls |
|---|
| 214 | | - la s0, sys_ni_syscall |
|---|
| 215 | | - /* Syscall number held in a7 */ |
|---|
| 216 | | - bgeu a7, t0, 1f |
|---|
| 217 | | - la s0, sys_call_table |
|---|
| 218 | | - slli t0, a7, RISCV_LGPTR |
|---|
| 219 | | - add s0, s0, t0 |
|---|
| 220 | | - REG_L s0, 0(s0) |
|---|
| 221 | | -1: |
|---|
| 222 | | - jalr s0 |
|---|
| 223 | | - |
|---|
| 224 | | -ret_from_syscall: |
|---|
| 225 | | - /* Set user a0 to kernel a0 */ |
|---|
| 226 | | - REG_S a0, PT_A0(sp) |
|---|
| 227 | | - /* Trace syscalls, but only if requested by the user. */ |
|---|
| 228 | | - REG_L t0, TASK_TI_FLAGS(tp) |
|---|
| 229 | | - andi t0, t0, _TIF_SYSCALL_TRACE |
|---|
| 230 | | - bnez t0, handle_syscall_trace_exit |
|---|
| 231 | | - |
|---|
| 232 | | -ret_from_exception: |
|---|
| 233 | | - REG_L s0, PT_SSTATUS(sp) |
|---|
| 234 | | - csrc sstatus, SR_SIE |
|---|
| 235 | | - andi s0, s0, SR_SPP |
|---|
| 236 | | - bnez s0, restore_all |
|---|
| 237 | | - |
|---|
| 238 | | -resume_userspace: |
|---|
| 239 | | - /* Interrupts must be disabled here so flags are checked atomically */ |
|---|
| 240 | | - REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */ |
|---|
| 241 | | - andi s1, s0, _TIF_WORK_MASK |
|---|
| 242 | | - bnez s1, work_pending |
|---|
| 243 | | - |
|---|
| 244 | | - /* Save unwound kernel stack pointer in thread_info */ |
|---|
| 245 | | - addi s0, sp, PT_SIZE_ON_STACK |
|---|
| 246 | | - REG_S s0, TASK_TI_KERNEL_SP(tp) |
|---|
| 247 | | - |
|---|
| 248 | | - /* |
|---|
| 249 | | - * Save TP into sscratch, so we can find the kernel data structures |
|---|
| 250 | | - * again. |
|---|
| 251 | | - */ |
|---|
| 252 | | - csrw sscratch, tp |
|---|
| 253 | | - |
|---|
| 254 | | -restore_all: |
|---|
| 255 | | - RESTORE_ALL |
|---|
| 324 | +#ifdef CONFIG_RISCV_M_MODE |
|---|
| 325 | + mret |
|---|
| 326 | +#else |
|---|
| 256 | 327 | sret |
|---|
| 328 | +#endif |
|---|
| 329 | + |
|---|
| 330 | +#if IS_ENABLED(CONFIG_PREEMPTION) |
|---|
| 331 | +resume_kernel: |
|---|
| 332 | + REG_L s0, TASK_TI_PREEMPT_COUNT(tp) |
|---|
| 333 | + bnez s0, restore_all |
|---|
| 334 | + REG_L s0, TASK_TI_FLAGS(tp) |
|---|
| 335 | + andi s0, s0, _TIF_NEED_RESCHED |
|---|
| 336 | + beqz s0, restore_all |
|---|
| 337 | + call preempt_schedule_irq |
|---|
| 338 | + j restore_all |
|---|
| 339 | +#endif |
|---|
| 257 | 340 | |
|---|
| 258 | 341 | work_pending: |
|---|
| 259 | 342 | /* Enter slow path for supplementary processing */ |
|---|
| .. | .. |
|---|
| 262 | 345 | bnez s1, work_resched |
|---|
| 263 | 346 | work_notifysig: |
|---|
| 264 | 347 | /* Handle pending signals and notify-resume requests */ |
|---|
| 265 | | - csrs sstatus, SR_SIE /* Enable interrupts for do_notify_resume() */ |
|---|
| 348 | + csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */ |
|---|
| 266 | 349 | move a0, sp /* pt_regs */ |
|---|
| 267 | 350 | move a1, s0 /* current_thread_info->flags */ |
|---|
| 268 | 351 | tail do_notify_resume |
|---|
| .. | .. |
|---|
| 273 | 356 | handle_syscall_trace_enter: |
|---|
| 274 | 357 | move a0, sp |
|---|
| 275 | 358 | call do_syscall_trace_enter |
|---|
| 359 | + move t0, a0 |
|---|
| 276 | 360 | REG_L a0, PT_A0(sp) |
|---|
| 277 | 361 | REG_L a1, PT_A1(sp) |
|---|
| 278 | 362 | REG_L a2, PT_A2(sp) |
|---|
| .. | .. |
|---|
| 281 | 365 | REG_L a5, PT_A5(sp) |
|---|
| 282 | 366 | REG_L a6, PT_A6(sp) |
|---|
| 283 | 367 | REG_L a7, PT_A7(sp) |
|---|
| 368 | + bnez t0, ret_from_syscall_rejected |
|---|
| 284 | 369 | j check_syscall_nr |
|---|
| 285 | 370 | handle_syscall_trace_exit: |
|---|
| 286 | 371 | move a0, sp |
|---|
| .. | .. |
|---|
| 352 | 437 | lw a4, TASK_TI_CPU(a1) |
|---|
| 353 | 438 | sw a3, TASK_TI_CPU(a1) |
|---|
| 354 | 439 | sw a4, TASK_TI_CPU(a0) |
|---|
| 355 | | -#if TASK_TI != 0 |
|---|
| 356 | | -#error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work." |
|---|
| 357 | | - addi tp, a1, TASK_TI |
|---|
| 358 | | -#else |
|---|
| 440 | + /* The offset of thread_info in task_struct is zero. */ |
|---|
| 359 | 441 | move tp, a1 |
|---|
| 360 | | -#endif |
|---|
| 361 | 442 | ret |
|---|
| 362 | 443 | ENDPROC(__switch_to) |
|---|
| 363 | 444 | |
|---|
| 364 | | -ENTRY(__fstate_save) |
|---|
| 365 | | - li a2, TASK_THREAD_F0 |
|---|
| 366 | | - add a0, a0, a2 |
|---|
| 367 | | - li t1, SR_FS |
|---|
| 368 | | - csrs sstatus, t1 |
|---|
| 369 | | - frcsr t0 |
|---|
| 370 | | - fsd f0, TASK_THREAD_F0_F0(a0) |
|---|
| 371 | | - fsd f1, TASK_THREAD_F1_F0(a0) |
|---|
| 372 | | - fsd f2, TASK_THREAD_F2_F0(a0) |
|---|
| 373 | | - fsd f3, TASK_THREAD_F3_F0(a0) |
|---|
| 374 | | - fsd f4, TASK_THREAD_F4_F0(a0) |
|---|
| 375 | | - fsd f5, TASK_THREAD_F5_F0(a0) |
|---|
| 376 | | - fsd f6, TASK_THREAD_F6_F0(a0) |
|---|
| 377 | | - fsd f7, TASK_THREAD_F7_F0(a0) |
|---|
| 378 | | - fsd f8, TASK_THREAD_F8_F0(a0) |
|---|
| 379 | | - fsd f9, TASK_THREAD_F9_F0(a0) |
|---|
| 380 | | - fsd f10, TASK_THREAD_F10_F0(a0) |
|---|
| 381 | | - fsd f11, TASK_THREAD_F11_F0(a0) |
|---|
| 382 | | - fsd f12, TASK_THREAD_F12_F0(a0) |
|---|
| 383 | | - fsd f13, TASK_THREAD_F13_F0(a0) |
|---|
| 384 | | - fsd f14, TASK_THREAD_F14_F0(a0) |
|---|
| 385 | | - fsd f15, TASK_THREAD_F15_F0(a0) |
|---|
| 386 | | - fsd f16, TASK_THREAD_F16_F0(a0) |
|---|
| 387 | | - fsd f17, TASK_THREAD_F17_F0(a0) |
|---|
| 388 | | - fsd f18, TASK_THREAD_F18_F0(a0) |
|---|
| 389 | | - fsd f19, TASK_THREAD_F19_F0(a0) |
|---|
| 390 | | - fsd f20, TASK_THREAD_F20_F0(a0) |
|---|
| 391 | | - fsd f21, TASK_THREAD_F21_F0(a0) |
|---|
| 392 | | - fsd f22, TASK_THREAD_F22_F0(a0) |
|---|
| 393 | | - fsd f23, TASK_THREAD_F23_F0(a0) |
|---|
| 394 | | - fsd f24, TASK_THREAD_F24_F0(a0) |
|---|
| 395 | | - fsd f25, TASK_THREAD_F25_F0(a0) |
|---|
| 396 | | - fsd f26, TASK_THREAD_F26_F0(a0) |
|---|
| 397 | | - fsd f27, TASK_THREAD_F27_F0(a0) |
|---|
| 398 | | - fsd f28, TASK_THREAD_F28_F0(a0) |
|---|
| 399 | | - fsd f29, TASK_THREAD_F29_F0(a0) |
|---|
| 400 | | - fsd f30, TASK_THREAD_F30_F0(a0) |
|---|
| 401 | | - fsd f31, TASK_THREAD_F31_F0(a0) |
|---|
| 402 | | - sw t0, TASK_THREAD_FCSR_F0(a0) |
|---|
| 403 | | - csrc sstatus, t1 |
|---|
| 404 | | - ret |
|---|
| 405 | | -ENDPROC(__fstate_save) |
|---|
| 406 | | - |
|---|
| 407 | | -ENTRY(__fstate_restore) |
|---|
| 408 | | - li a2, TASK_THREAD_F0 |
|---|
| 409 | | - add a0, a0, a2 |
|---|
| 410 | | - li t1, SR_FS |
|---|
| 411 | | - lw t0, TASK_THREAD_FCSR_F0(a0) |
|---|
| 412 | | - csrs sstatus, t1 |
|---|
| 413 | | - fld f0, TASK_THREAD_F0_F0(a0) |
|---|
| 414 | | - fld f1, TASK_THREAD_F1_F0(a0) |
|---|
| 415 | | - fld f2, TASK_THREAD_F2_F0(a0) |
|---|
| 416 | | - fld f3, TASK_THREAD_F3_F0(a0) |
|---|
| 417 | | - fld f4, TASK_THREAD_F4_F0(a0) |
|---|
| 418 | | - fld f5, TASK_THREAD_F5_F0(a0) |
|---|
| 419 | | - fld f6, TASK_THREAD_F6_F0(a0) |
|---|
| 420 | | - fld f7, TASK_THREAD_F7_F0(a0) |
|---|
| 421 | | - fld f8, TASK_THREAD_F8_F0(a0) |
|---|
| 422 | | - fld f9, TASK_THREAD_F9_F0(a0) |
|---|
| 423 | | - fld f10, TASK_THREAD_F10_F0(a0) |
|---|
| 424 | | - fld f11, TASK_THREAD_F11_F0(a0) |
|---|
| 425 | | - fld f12, TASK_THREAD_F12_F0(a0) |
|---|
| 426 | | - fld f13, TASK_THREAD_F13_F0(a0) |
|---|
| 427 | | - fld f14, TASK_THREAD_F14_F0(a0) |
|---|
| 428 | | - fld f15, TASK_THREAD_F15_F0(a0) |
|---|
| 429 | | - fld f16, TASK_THREAD_F16_F0(a0) |
|---|
| 430 | | - fld f17, TASK_THREAD_F17_F0(a0) |
|---|
| 431 | | - fld f18, TASK_THREAD_F18_F0(a0) |
|---|
| 432 | | - fld f19, TASK_THREAD_F19_F0(a0) |
|---|
| 433 | | - fld f20, TASK_THREAD_F20_F0(a0) |
|---|
| 434 | | - fld f21, TASK_THREAD_F21_F0(a0) |
|---|
| 435 | | - fld f22, TASK_THREAD_F22_F0(a0) |
|---|
| 436 | | - fld f23, TASK_THREAD_F23_F0(a0) |
|---|
| 437 | | - fld f24, TASK_THREAD_F24_F0(a0) |
|---|
| 438 | | - fld f25, TASK_THREAD_F25_F0(a0) |
|---|
| 439 | | - fld f26, TASK_THREAD_F26_F0(a0) |
|---|
| 440 | | - fld f27, TASK_THREAD_F27_F0(a0) |
|---|
| 441 | | - fld f28, TASK_THREAD_F28_F0(a0) |
|---|
| 442 | | - fld f29, TASK_THREAD_F29_F0(a0) |
|---|
| 443 | | - fld f30, TASK_THREAD_F30_F0(a0) |
|---|
| 444 | | - fld f31, TASK_THREAD_F31_F0(a0) |
|---|
| 445 | | - fscsr t0 |
|---|
| 446 | | - csrc sstatus, t1 |
|---|
| 447 | | - ret |
|---|
| 448 | | -ENDPROC(__fstate_restore) |
|---|
| 449 | | - |
|---|
| 445 | +#ifndef CONFIG_MMU |
|---|
| 446 | +#define do_page_fault do_trap_unknown |
|---|
| 447 | +#endif |
|---|
| 450 | 448 | |
|---|
| 451 | 449 | .section ".rodata" |
|---|
| 452 | 450 | .align LGREG |
|---|
| .. | .. |
|---|
| 470 | 468 | RISCV_PTR do_page_fault /* store page fault */ |
|---|
| 471 | 469 | excp_vect_table_end: |
|---|
| 472 | 470 | END(excp_vect_table) |
|---|
| 471 | + |
|---|
| 472 | +#ifndef CONFIG_MMU |
|---|
| 473 | +ENTRY(__user_rt_sigreturn) |
|---|
| 474 | + li a7, __NR_rt_sigreturn |
|---|
| 475 | + scall |
|---|
| 476 | +END(__user_rt_sigreturn) |
|---|
| 477 | +#endif |
|---|