From d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Mon, 11 Dec 2023 02:45:28 +0000 Subject: [PATCH] add boot partition size --- kernel/arch/arm/kernel/entry-armv.S | 127 ++++++++++++++++++++++++++++-------------- 1 files changed, 85 insertions(+), 42 deletions(-) diff --git a/kernel/arch/arm/kernel/entry-armv.S b/kernel/arch/arm/kernel/entry-armv.S index 857c9cd..d608655 100644 --- a/kernel/arch/arm/kernel/entry-armv.S +++ b/kernel/arch/arm/kernel/entry-armv.S @@ -1,13 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ /* * linux/arch/arm/kernel/entry-armv.S * * Copyright (C) 1996,1997,1998 Russell King. * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) * nommu support by Hyok S. Choi (hyok.choi@samsung.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. * * Low-level vector interface routines * @@ -207,7 +204,7 @@ svc_entry irq_handler -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION ldr r8, [tsk, #TI_PREEMPT] @ get preempt count teq r8, #0 @ if preempt count != 0 bne 1f @ return from exeption @@ -229,7 +226,7 @@ .ltorg -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION svc_preempt: mov r8, lr 1: bl preempt_schedule_irq @ irq en/disable is done inside @@ -268,31 +265,10 @@ #else svc_entry #endif - @ - @ call emulation code, which returns using r9 if it has emulated - @ the instruction, or the more conventional lr if we are to treat - @ this as a real undefined instruction - @ - @ r0 - instruction - @ -#ifndef CONFIG_THUMB2_KERNEL - ldr r0, [r4, #-4] -#else - mov r1, #2 - ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 - cmp r0, #0xe800 @ 32-bit instruction if xx >= 0 - blo __und_svc_fault - ldrh r9, [r4] @ bottom 16 bits - add r4, r4, #2 - str r4, [sp, #S_PC] - orr r0, r9, r0, lsl #16 -#endif - badr r9, __und_svc_finish - mov r2, r4 - bl call_fpe mov r1, #4 @ PC correction to apply -__und_svc_fault: + THUMB( tst r5, #PSR_T_BIT ) @ exception taken in Thumb mode? + THUMB( movne r1, #2 ) @ if so, fix up PC correction mov r0, sp @ struct pt_regs *regs bl __und_fault @@ -640,7 +616,7 @@ @ Test if we need to give access to iWMMXt coprocessors ldr r5, [r10, #TI_FLAGS] rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only - movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) + movscs r7, r5, lsr #(TIF_USING_IWMMXT + 1) bcs iwmmxt_task_enable #endif ARM( add pc, pc, r8, lsr #6 ) @@ -833,7 +809,7 @@ * existing ones. This mechanism should be used only for things that are * really small and justified, and not be abused freely. * - * See Documentation/arm/kernel_user_helpers.txt for formal definitions. + * See Documentation/arm/kernel_user_helpers.rst for formal definitions. */ THUMB( .arm ) @@ -876,7 +852,7 @@ smp_dmb arm 1: ldrexd r0, r1, [r2] @ load current val eors r3, r0, r4 @ compare with oldval (1) - eoreqs r3, r1, r5 @ compare with oldval (2) + eorseq r3, r1, r5 @ compare with oldval (2) strexdeq r3, r6, r7, [r2] @ store newval if eq teqeq r3, #1 @ success? beq 1b @ if no then retry @@ -900,8 +876,8 @@ ldmia r1, {r6, lr} @ load new val 1: ldmia r2, {r0, r1} @ load current val eors r3, r0, r4 @ compare with oldval (1) - eoreqs r3, r1, r5 @ compare with oldval (2) -2: stmeqia r2, {r6, lr} @ store newval if eq + eorseq r3, r1, r5 @ compare with oldval (2) +2: stmiaeq r2, {r6, lr} @ store newval if eq rsbs r0, r3, #0 @ set return val and C flag ldmfd sp!, {r4, r5, r6, pc} @@ -915,7 +891,7 @@ mov r7, #0xffff0fff sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64))) subs r8, r4, r7 - rsbcss r8, r8, #(2b - 1b) + rsbscs r8, r8, #(2b - 1b) strcs r7, [sp, #S_PC] #if __LINUX_ARM_ARCH__ < 6 bcc kuser_cmpxchg32_fixup @@ -973,7 +949,7 @@ mov r7, #0xffff0fff sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) subs r8, r4, r7 - rsbcss r8, r8, #(2b - 1b) + rsbscs r8, r8, #(2b - 1b) strcs r7, [sp, #S_PC] ret lr .previous @@ -1042,12 +1018,11 @@ sub lr, lr, #\correction .endif - @ - @ Save r0, lr_<exception> (parent PC) and spsr_<exception> - @ (parent CPSR) - @ + @ Save r0, lr_<exception> (parent PC) stmia sp, {r0, lr} @ save r0, lr - mrs lr, spsr + + @ Save spsr_<exception> (parent CPSR) +2: mrs lr, spsr str lr, [sp, #8] @ save spsr @ @@ -1068,6 +1043,44 @@ movs pc, lr @ branch to handler in SVC mode ENDPROC(vector_\name) +#ifdef CONFIG_HARDEN_BRANCH_HISTORY + .subsection 1 + .align 5 +vector_bhb_loop8_\name: + .if \correction + sub lr, lr, #\correction + .endif + + @ Save r0, lr_<exception> (parent PC) + stmia sp, {r0, lr} + + @ bhb workaround + mov r0, #8 +3: W(b) . + 4 + subs r0, r0, #1 + bne 3b + dsb + isb + b 2b +ENDPROC(vector_bhb_loop8_\name) + +vector_bhb_bpiall_\name: + .if \correction + sub lr, lr, #\correction + .endif + + @ Save r0, lr_<exception> (parent PC) + stmia sp, {r0, lr} + + @ bhb workaround + mcr p15, 0, r0, c7, c5, 6 @ BPIALL + @ isb not needed due to "movs pc, lr" in the vector stub + @ which gives a "context synchronisation". + b 2b +ENDPROC(vector_bhb_bpiall_\name) + .previous +#endif + .align 2 @ handler addresses follow this label 1: @@ -1076,6 +1089,10 @@ .section .stubs, "ax", %progbits @ This must be the first word .word vector_swi +#ifdef CONFIG_HARDEN_BRANCH_HISTORY + .word vector_bhb_loop8_swi + .word vector_bhb_bpiall_swi +#endif vector_rst: ARM( swi SYS_ERROR0 ) @@ -1190,8 +1207,10 @@ * FIQ "NMI" handler *----------------------------------------------------------------------------- * Handle a FIQ using the SVC stack allowing FIQ act like NMI on x86 - * systems. + * systems. This must be the last vector stub, so lets place it in its own + * subsection. */ + .subsection 2 vector_stub fiq, FIQ_MODE, 4 .long __fiq_usr @ 0 (USR_26 / USR_32) @@ -1224,6 +1243,30 @@ W(b) vector_irq W(b) vector_fiq +#ifdef CONFIG_HARDEN_BRANCH_HISTORY + .section .vectors.bhb.loop8, "ax", %progbits +.L__vectors_bhb_loop8_start: + W(b) vector_rst + W(b) vector_bhb_loop8_und + W(ldr) pc, .L__vectors_bhb_loop8_start + 0x1004 + W(b) vector_bhb_loop8_pabt + W(b) vector_bhb_loop8_dabt + W(b) vector_addrexcptn + W(b) vector_bhb_loop8_irq + W(b) vector_bhb_loop8_fiq + + .section .vectors.bhb.bpiall, "ax", %progbits +.L__vectors_bhb_bpiall_start: + W(b) vector_rst + W(b) vector_bhb_bpiall_und + W(ldr) pc, .L__vectors_bhb_bpiall_start + 0x1008 + W(b) vector_bhb_bpiall_pabt + W(b) vector_bhb_bpiall_dabt + W(b) vector_addrexcptn + W(b) vector_bhb_bpiall_irq + W(b) vector_bhb_bpiall_fiq +#endif + .data .align 2 -- Gitblit v1.6.2