hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/arch/arm/boot/compressed/head.S
....@@ -1,12 +1,9 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * linux/arch/arm/boot/compressed/head.S
34 *
45 * Copyright (C) 1996-2002 Russell King
56 * Copyright (C) 2004 Hyok S. Choi (MPU support)
6
- *
7
- * This program is free software; you can redistribute it and/or modify
8
- * it under the terms of the GNU General Public License version 2 as
9
- * published by the Free Software Foundation.
107 */
118 #include <linux/linkage.h>
129 #include <asm/assembler.h>
....@@ -31,19 +28,19 @@
3128 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
3229 .macro loadsp, rb, tmp1, tmp2
3330 .endm
34
- .macro writeb, ch, rb
31
+ .macro writeb, ch, rb, tmp
3532 mcr p14, 0, \ch, c0, c5, 0
3633 .endm
3734 #elif defined(CONFIG_CPU_XSCALE)
3835 .macro loadsp, rb, tmp1, tmp2
3936 .endm
40
- .macro writeb, ch, rb
37
+ .macro writeb, ch, rb, tmp
4138 mcr p14, 0, \ch, c8, c0, 0
4239 .endm
4340 #else
4441 .macro loadsp, rb, tmp1, tmp2
4542 .endm
46
- .macro writeb, ch, rb
43
+ .macro writeb, ch, rb, tmp
4744 mcr p14, 0, \ch, c1, c0, 0
4845 .endm
4946 #endif
....@@ -52,8 +49,13 @@
5249
5350 #include CONFIG_DEBUG_LL_INCLUDE
5451
55
- .macro writeb, ch, rb
52
+ .macro writeb, ch, rb, tmp
53
+#ifdef CONFIG_DEBUG_UART_FLOW_CONTROL
54
+ waituartcts \tmp, \rb
55
+#endif
56
+ waituarttxrdy \tmp, \rb
5657 senduart \ch, \rb
58
+ busyuart \tmp, \rb
5759 .endm
5860
5961 #if defined(CONFIG_ARCH_SA1100)
....@@ -84,37 +86,93 @@
8486 bl phex
8587 .endm
8688
87
- .macro debug_reloc_start
89
+ /*
90
+ * Debug kernel copy by printing the memory addresses involved
91
+ */
92
+ .macro dbgkc, begin, end, cbegin, cend
8893 #ifdef DEBUG
89
- kputc #'\n'
90
- kphex r6, 8 /* processor id */
91
- kputc #':'
92
- kphex r7, 8 /* architecture id */
93
-#ifdef CONFIG_CPU_CP15
94
- kputc #':'
95
- mrc p15, 0, r0, c1, c0
96
- kphex r0, 8 /* control reg */
97
-#endif
98
- kputc #'\n'
99
- kphex r5, 8 /* decompressed kernel start */
94
+ kputc #'C'
95
+ kputc #':'
96
+ kputc #'0'
97
+ kputc #'x'
98
+ kphex \begin, 8 /* Start of compressed kernel */
10099 kputc #'-'
101
- kphex r9, 8 /* decompressed kernel end */
100
+ kputc #'0'
101
+ kputc #'x'
102
+ kphex \end, 8 /* End of compressed kernel */
103
+ kputc #'-'
102104 kputc #'>'
103
- kphex r4, 8 /* kernel execution address */
105
+ kputc #'0'
106
+ kputc #'x'
107
+ kphex \cbegin, 8 /* Start of kernel copy */
108
+ kputc #'-'
109
+ kputc #'0'
110
+ kputc #'x'
111
+ kphex \cend, 8 /* End of kernel copy */
104112 kputc #'\n'
105113 #endif
106114 .endm
107115
108
- .macro debug_reloc_end
116
+ /*
117
+ * Debug print of the final appended DTB location
118
+ */
119
+ .macro dbgadtb, begin, size
109120 #ifdef DEBUG
110
- kphex r5, 8 /* end of kernel */
121
+ kputc #'D'
122
+ kputc #'T'
123
+ kputc #'B'
124
+ kputc #':'
125
+ kputc #'0'
126
+ kputc #'x'
127
+ kphex \begin, 8 /* Start of appended DTB */
128
+ kputc #' '
129
+ kputc #'('
130
+ kputc #'0'
131
+ kputc #'x'
132
+ kphex \size, 8 /* Size of appended DTB */
133
+ kputc #')'
111134 kputc #'\n'
112
- mov r0, r4
113
- bl memdump /* dump 256 bytes at start of kernel */
114135 #endif
115136 .endm
116137
117
- .section ".start", #alloc, #execinstr
138
+ .macro enable_cp15_barriers, reg
139
+ mrc p15, 0, \reg, c1, c0, 0 @ read SCTLR
140
+ tst \reg, #(1 << 5) @ CP15BEN bit set?
141
+ bne .L_\@
142
+ orr \reg, \reg, #(1 << 5) @ CP15 barrier instructions
143
+ mcr p15, 0, \reg, c1, c0, 0 @ write SCTLR
144
+ ARM( .inst 0xf57ff06f @ v7+ isb )
145
+ THUMB( isb )
146
+.L_\@:
147
+ .endm
148
+
149
+ /*
150
+ * The kernel build system appends the size of the
151
+ * decompressed kernel at the end of the compressed data
152
+ * in little-endian form.
153
+ */
154
+ .macro get_inflated_image_size, res:req, tmp1:req, tmp2:req
155
+ adr \res, .Linflated_image_size_offset
156
+ ldr \tmp1, [\res]
157
+ add \tmp1, \tmp1, \res @ address of inflated image size
158
+
159
+ ldrb \res, [\tmp1] @ get_unaligned_le32
160
+ ldrb \tmp2, [\tmp1, #1]
161
+ orr \res, \res, \tmp2, lsl #8
162
+ ldrb \tmp2, [\tmp1, #2]
163
+ ldrb \tmp1, [\tmp1, #3]
164
+ orr \res, \res, \tmp2, lsl #16
165
+ orr \res, \res, \tmp1, lsl #24
166
+ .endm
167
+
168
+ .macro be32tocpu, val, tmp
169
+#ifndef __ARMEB__
170
+ /* convert to little endian */
171
+ rev_l \val, \tmp
172
+#endif
173
+ .endm
174
+
175
+ .section ".start", "ax"
118176 /*
119177 * sort out different calling conventions
120178 */
....@@ -127,11 +185,24 @@
127185 AR_CLASS( .arm )
128186 start:
129187 .type start,#function
130
- .rept 7
188
+ /*
189
+ * These 7 nops along with the 1 nop immediately below for
190
+ * !THUMB2 form 8 nops that make the compressed kernel bootable
191
+ * on legacy ARM systems that were assuming the kernel in a.out
192
+ * binary format. The boot loaders on these systems would
193
+ * jump 32 bytes into the image to skip the a.out header.
194
+ * with these 8 nops filling exactly 32 bytes, things still
195
+ * work as expected on these legacy systems. Thumb2 mode keeps
196
+ * 7 of the nops as it turns out that some boot loaders
197
+ * were patching the initial instructions of the kernel, i.e
198
+ * had started to exploit this "patch area".
199
+ */
200
+ __initial_nops
201
+ .rept 5
131202 __nop
132203 .endr
133204 #ifndef CONFIG_THUMB2_KERNEL
134
- mov r0, r0
205
+ __nop
135206 #else
136207 AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode
137208 M_CLASS( nop.w ) @ M: already in Thumb2 mode
....@@ -220,41 +291,23 @@
220291 */
221292 mov r0, pc
222293 cmp r0, r4
223
- ldrcc r0, LC0+32
294
+ ldrcc r0, .Lheadroom
224295 addcc r0, r0, pc
225296 cmpcc r4, r0
226297 orrcc r4, r4, #1 @ remember we skipped cache_on
227298 blcs cache_on
228299
229
-restart: adr r0, LC0
230
- ldmia r0, {r1, r2, r3, r6, r10, r11, r12}
231
- ldr sp, [r0, #28]
300
+restart: adr r0, LC1
301
+ ldr sp, [r0]
302
+ ldr r6, [r0, #4]
303
+ add sp, sp, r0
304
+ add r6, r6, r0
232305
233
- /*
234
- * We might be running at a different address. We need
235
- * to fix up various pointers.
236
- */
237
- sub r0, r0, r1 @ calculate the delta offset
238
- add r6, r6, r0 @ _edata
239
- add r10, r10, r0 @ inflated kernel size location
240
-
241
- /*
242
- * The kernel build system appends the size of the
243
- * decompressed kernel at the end of the compressed data
244
- * in little-endian form.
245
- */
246
- ldrb r9, [r10, #0]
247
- ldrb lr, [r10, #1]
248
- orr r9, r9, lr, lsl #8
249
- ldrb lr, [r10, #2]
250
- ldrb r10, [r10, #3]
251
- orr r9, r9, lr, lsl #16
252
- orr r9, r9, r10, lsl #24
306
+ get_inflated_image_size r9, r10, lr
253307
254308 #ifndef CONFIG_ZBOOT_ROM
255309 /* malloc space is above the relocated stack (64k max) */
256
- add sp, sp, r0
257
- add r10, sp, #0x10000
310
+ add r10, sp, #MALLOC_SIZE
258311 #else
259312 /*
260313 * With ZBOOT_ROM the bss/stack is non relocatable,
....@@ -267,9 +320,6 @@
267320 mov r5, #0 @ init dtb size to 0
268321 #ifdef CONFIG_ARM_APPENDED_DTB
269322 /*
270
- * r0 = delta
271
- * r2 = BSS start
272
- * r3 = BSS end
273323 * r4 = final kernel address (possibly with LSB set)
274324 * r5 = appended dtb size (still unknown)
275325 * r6 = _edata
....@@ -277,8 +327,6 @@
277327 * r8 = atags/device tree pointer
278328 * r9 = size of decompressed image
279329 * r10 = end of this image, including bss/stack/malloc space if non XIP
280
- * r11 = GOT start
281
- * r12 = GOT end
282330 * sp = stack pointer
283331 *
284332 * if there are device trees (dtb) appended to zImage, advance r10 so that the
....@@ -306,13 +354,8 @@
306354
307355 /* Get the initial DTB size */
308356 ldr r5, [r6, #4]
309
-#ifndef __ARMEB__
310
- /* convert to little endian */
311
- eor r1, r5, r5, ror #16
312
- bic r1, r1, #0x00ff0000
313
- mov r5, r5, ror #8
314
- eor r5, r5, r1, lsr #8
315
-#endif
357
+ be32tocpu r5, r1
358
+ dbgadtb r6, r5
316359 /* 50% DTB growth should be good enough */
317360 add r5, r5, r5, lsr #1
318361 /* preserve 64-bit alignment */
....@@ -326,7 +369,6 @@
326369 /* temporarily relocate the stack past the DTB work space */
327370 add sp, sp, r5
328371
329
- stmfd sp!, {r0-r3, ip, lr}
330372 mov r0, r8
331373 mov r1, r6
332374 mov r2, r5
....@@ -346,7 +388,6 @@
346388 mov r2, r5
347389 bleq atags_to_fdt
348390
349
- ldmfd sp!, {r0-r3, ip, lr}
350391 sub sp, sp, r5
351392 #endif
352393
....@@ -366,13 +407,7 @@
366407
367408 /* Get the current DTB size */
368409 ldr r5, [r6, #4]
369
-#ifndef __ARMEB__
370
- /* convert r5 (dtb size) to little endian */
371
- eor r1, r5, r5, ror #16
372
- bic r1, r1, #0x00ff0000
373
- mov r5, r5, ror #8
374
- eor r5, r5, r1, lsr #8
375
-#endif
410
+ be32tocpu r5, r1
376411
377412 /* preserve 64-bit alignment */
378413 add r5, r5, #7
....@@ -452,6 +487,20 @@
452487 add r6, r9, r5
453488 add r9, r9, r10
454489
490
+#ifdef DEBUG
491
+ sub r10, r6, r5
492
+ sub r10, r9, r10
493
+ /*
494
+ * We are about to copy the kernel to a new memory area.
495
+ * The boundaries of the new memory area can be found in
496
+ * r10 and r9, whilst r5 and r6 contain the boundaries
497
+ * of the memory we are going to copy.
498
+ * Calling dbgkc will help with the printing of this
499
+ * information.
500
+ */
501
+ dbgkc r5, r6, r10, r9
502
+#endif
503
+
455504 1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
456505 cmp r6, r5
457506 stmdb r9!, {r0 - r3, r10 - r12, lr}
....@@ -460,11 +509,8 @@
460509 /* Preserve offset to relocated code. */
461510 sub r6, r9, r6
462511
463
-#ifndef CONFIG_ZBOOT_ROM
464
- /* cache_clean_flush may use the stack, so relocate it */
465
- add sp, sp, r6
466
-#endif
467
-
512
+ mov r0, r9 @ start of relocated zImage
513
+ add r1, sp, r6 @ end of relocated zImage
468514 bl cache_clean_flush
469515
470516 badr r0, restart
....@@ -472,6 +518,10 @@
472518 mov pc, r0
473519
474520 wont_overwrite:
521
+ adr r0, LC0
522
+ ldmia r0, {r1, r2, r3, r11, r12}
523
+ sub r0, r0, r1 @ calculate the delta offset
524
+
475525 /*
476526 * If delta is zero, we are running at the address we were linked at.
477527 * r0 = delta
....@@ -558,9 +608,14 @@
558608 */
559609 mov r0, r4
560610 mov r1, sp @ malloc space above stack
561
- add r2, sp, #0x10000 @ 64k max
611
+ add r2, sp, #MALLOC_SIZE @ 64k max
562612 mov r3, r7
563613 bl decompress_kernel
614
+
615
+ get_inflated_image_size r1, r2, r3
616
+
617
+ mov r0, r4 @ start of inflated image
618
+ add r1, r1, r0 @ end of inflated image
564619 bl cache_clean_flush
565620 bl cache_off
566621
....@@ -590,13 +645,20 @@
590645 LC0: .word LC0 @ r1
591646 .word __bss_start @ r2
592647 .word _end @ r3
593
- .word _edata @ r6
594
- .word input_data_end - 4 @ r10 (inflated size location)
595648 .word _got_start @ r11
596649 .word _got_end @ ip
597
- .word .L_user_stack_end @ sp
598
- .word _end - restart + 16384 + 1024*1024
599650 .size LC0, . - LC0
651
+
652
+ .type LC1, #object
653
+LC1: .word .L_user_stack_end - LC1 @ sp
654
+ .word _edata - LC1 @ r6
655
+ .size LC1, . - LC1
656
+
657
+.Lheadroom:
658
+ .word _end - restart + 16384 + 1024*1024
659
+
660
+.Linflated_image_size_offset:
661
+ .long (input_data_end - 4) - .
600662
601663 #ifdef CONFIG_ARCH_RPC
602664 .globl params
....@@ -605,6 +667,24 @@
605667 .ltorg
606668 .align
607669 #endif
670
+
671
+/*
672
+ * dcache_line_size - get the minimum D-cache line size from the CTR register
673
+ * on ARMv7.
674
+ */
675
+ .macro dcache_line_size, reg, tmp
676
+#ifdef CONFIG_CPU_V7M
677
+ movw \tmp, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_CTR
678
+ movt \tmp, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_CTR
679
+ ldr \tmp, [\tmp]
680
+#else
681
+ mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
682
+#endif
683
+ lsr \tmp, \tmp, #16
684
+ and \tmp, \tmp, #0xf @ cache line size encoding
685
+ mov \reg, #4 @ bytes per word
686
+ mov \reg, \reg, lsl \tmp @ actual cache line size
687
+ .endm
608688
609689 /*
610690 * Turn on the cache. We need to setup some page tables so that we
....@@ -770,6 +850,7 @@
770850 mov pc, r12
771851
772852 __armv7_mmu_cache_on:
853
+ enable_cp15_barriers r11
773854 mov r12, lr
774855 #ifdef CONFIG_MMU
775856 mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
....@@ -1097,8 +1178,6 @@
10971178 bic r0, r0, #0x0004
10981179 #endif
10991180 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
1100
- mov r12, lr
1101
- bl __armv7_mmu_cache_flush
11021181 mov r0, #0
11031182 #ifdef CONFIG_MMU
11041183 mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
....@@ -1106,11 +1185,14 @@
11061185 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
11071186 mcr p15, 0, r0, c7, c10, 4 @ DSB
11081187 mcr p15, 0, r0, c7, c5, 4 @ ISB
1109
- mov pc, r12
1188
+ mov pc, lr
11101189
11111190 /*
11121191 * Clean and flush the cache to maintain consistency.
11131192 *
1193
+ * On entry,
1194
+ * r0 = start address
1195
+ * r1 = end address (exclusive)
11141196 * On exit,
11151197 * r1, r2, r3, r9, r10, r11, r12 corrupted
11161198 * This routine must preserve:
....@@ -1119,6 +1201,7 @@
11191201 .align 5
11201202 cache_clean_flush:
11211203 mov r3, #16
1204
+ mov r11, r1
11221205 b call_cache_fn
11231206
11241207 __armv4_mpu_cache_flush:
....@@ -1159,6 +1242,7 @@
11591242 mov pc, lr
11601243
11611244 __armv7_mmu_cache_flush:
1245
+ enable_cp15_barriers r10
11621246 tst r4, #1
11631247 bne iflush
11641248 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
....@@ -1168,51 +1252,16 @@
11681252 mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
11691253 b iflush
11701254 hierarchical:
1171
- mcr p15, 0, r10, c7, c10, 5 @ DMB
1172
- stmfd sp!, {r0-r7, r9-r11}
1173
- mrc p15, 1, r0, c0, c0, 1 @ read clidr
1174
- ands r3, r0, #0x7000000 @ extract loc from clidr
1175
- mov r3, r3, lsr #23 @ left align loc bit field
1176
- beq finished @ if loc is 0, then no need to clean
1177
- mov r10, #0 @ start clean at cache level 0
1178
-loop1:
1179
- add r2, r10, r10, lsr #1 @ work out 3x current cache level
1180
- mov r1, r0, lsr r2 @ extract cache type bits from clidr
1181
- and r1, r1, #7 @ mask of the bits for current cache only
1182
- cmp r1, #2 @ see what cache we have at this level
1183
- blt skip @ skip if no cache, or just i-cache
1184
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
1185
- mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr
1186
- mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
1187
- and r2, r1, #7 @ extract the length of the cache lines
1188
- add r2, r2, #4 @ add 4 (line length offset)
1189
- ldr r4, =0x3ff
1190
- ands r4, r4, r1, lsr #3 @ find maximum number on the way size
1191
- clz r5, r4 @ find bit position of way size increment
1192
- ldr r7, =0x7fff
1193
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
1194
-loop2:
1195
- mov r9, r4 @ create working copy of max way size
1196
-loop3:
1197
- ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
1198
- ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
1199
- THUMB( lsl r6, r9, r5 )
1200
- THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
1201
- THUMB( lsl r6, r7, r2 )
1202
- THUMB( orr r11, r11, r6 ) @ factor index number into r11
1203
- mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
1204
- subs r9, r9, #1 @ decrement the way
1205
- bge loop3
1206
- subs r7, r7, #1 @ decrement the index
1207
- bge loop2
1208
-skip:
1209
- add r10, r10, #2 @ increment cache number
1210
- cmp r3, r10
1211
- bgt loop1
1212
-finished:
1213
- ldmfd sp!, {r0-r7, r9-r11}
1214
- mov r10, #0 @ switch back to cache level 0
1215
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
1255
+ dcache_line_size r1, r2 @ r1 := dcache min line size
1256
+ sub r2, r1, #1 @ r2 := line size mask
1257
+ bic r0, r0, r2 @ round down start to line size
1258
+ sub r11, r11, #1 @ end address is exclusive
1259
+ bic r11, r11, r2 @ round down end to line size
1260
+0: cmp r0, r11 @ finished?
1261
+ bgt iflush
1262
+ mcr p15, 0, r0, c7, c14, 1 @ Dcache clean/invalidate by VA
1263
+ add r0, r0, r1
1264
+ b 0b
12161265 iflush:
12171266 mcr p15, 0, r10, c7, c10, 4 @ DSB
12181267 mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
....@@ -1223,7 +1272,7 @@
12231272 __armv5tej_mmu_cache_flush:
12241273 tst r4, #1
12251274 movne pc, lr
1226
-1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
1275
+1: mrc p15, 0, APSR_nzcv, c7, c14, 3 @ test,clean,invalidate D cache
12271276 bne 1b
12281277 mcr p15, 0, r0, c7, c5, 0 @ flush I cache
12291278 mcr p15, 0, r0, c7, c10, 4 @ drain WB
....@@ -1301,7 +1350,7 @@
13011350 1: ldrb r2, [r0], #1
13021351 teq r2, #0
13031352 moveq pc, lr
1304
-2: writeb r2, r3
1353
+2: writeb r2, r3, r1
13051354 mov r1, #0x00020000
13061355 3: subs r1, r1, #1
13071356 bne 3b
....@@ -1355,7 +1404,11 @@
13551404 __hyp_reentry_vectors:
13561405 W(b) . @ reset
13571406 W(b) . @ undef
1407
+#ifdef CONFIG_EFI_STUB
1408
+ W(b) __enter_kernel_from_hyp @ hvc from HYP
1409
+#else
13581410 W(b) . @ svc
1411
+#endif
13591412 W(b) . @ pabort
13601413 W(b) . @ dabort
13611414 W(b) __enter_kernel @ hyp
....@@ -1374,64 +1427,87 @@
13741427 reloc_code_end:
13751428
13761429 #ifdef CONFIG_EFI_STUB
1377
- .align 2
1378
-_start: .long start - .
1430
+__enter_kernel_from_hyp:
1431
+ mrc p15, 4, r0, c1, c0, 0 @ read HSCTLR
1432
+ bic r0, r0, #0x5 @ disable MMU and caches
1433
+ mcr p15, 4, r0, c1, c0, 0 @ write HSCTLR
1434
+ isb
1435
+ b __enter_kernel
13791436
1380
-ENTRY(efi_stub_entry)
1381
- @ allocate space on stack for passing current zImage address
1382
- @ and for the EFI stub to return of new entry point of
1383
- @ zImage, as EFI stub may copy the kernel. Pointer address
1384
- @ is passed in r2. r0 and r1 are passed through from the
1385
- @ EFI firmware to efi_entry
1386
- adr ip, _start
1387
- ldr r3, [ip]
1388
- add r3, r3, ip
1389
- stmfd sp!, {r3, lr}
1390
- mov r2, sp @ pass zImage address in r2
1391
- bl efi_entry
1437
+ENTRY(efi_enter_kernel)
1438
+ mov r4, r0 @ preserve image base
1439
+ mov r8, r1 @ preserve DT pointer
13921440
1393
- @ Check for error return from EFI stub. r0 has FDT address
1394
- @ or error code.
1395
- cmn r0, #1
1396
- beq efi_load_fail
1441
+ adr_l r0, call_cache_fn
1442
+ adr r1, 0f @ clean the region of code we
1443
+ bl cache_clean_flush @ may run with the MMU off
13971444
1398
- @ Preserve return value of efi_entry() in r4
1399
- mov r4, r0
1445
+#ifdef CONFIG_ARM_VIRT_EXT
1446
+ @
1447
+ @ The EFI spec does not support booting on ARM in HYP mode,
1448
+ @ since it mandates that the MMU and caches are on, with all
1449
+ @ 32-bit addressable DRAM mapped 1:1 using short descriptors.
1450
+ @
1451
+ @ While the EDK2 reference implementation adheres to this,
1452
+ @ U-Boot might decide to enter the EFI stub in HYP mode
1453
+ @ anyway, with the MMU and caches either on or off.
1454
+ @
1455
+ mrs r0, cpsr @ get the current mode
1456
+ msr spsr_cxsf, r0 @ record boot mode
1457
+ and r0, r0, #MODE_MASK @ are we running in HYP mode?
1458
+ cmp r0, #HYP_MODE
1459
+ bne .Lefi_svc
14001460
1401
- @ our cache maintenance code relies on CP15 barrier instructions
1402
- @ but since we arrived here with the MMU and caches configured
1403
- @ by UEFI, we must check that the CP15BEN bit is set in SCTLR.
1404
- @ Note that this bit is RAO/WI on v6 and earlier, so the ISB in
1405
- @ the enable path will be executed on v7+ only.
1406
- mrc p15, 0, r1, c1, c0, 0 @ read SCTLR
1407
- tst r1, #(1 << 5) @ CP15BEN bit set?
1408
- bne 0f
1409
- orr r1, r1, #(1 << 5) @ CP15 barrier instructions
1410
- mcr p15, 0, r1, c1, c0, 0 @ write SCTLR
1411
- ARM( .inst 0xf57ff06f @ v7+ isb )
1412
- THUMB( isb )
1461
+ mrc p15, 4, r1, c1, c0, 0 @ read HSCTLR
1462
+ tst r1, #0x1 @ MMU enabled at HYP?
1463
+ beq 1f
14131464
1414
-0: bl cache_clean_flush
1415
- bl cache_off
1465
+ @
1466
+ @ When running in HYP mode with the caches on, we're better
1467
+ @ off just carrying on using the cached 1:1 mapping that the
1468
+ @ firmware provided. Set up the HYP vectors so HVC instructions
1469
+ @ issued from HYP mode take us to the correct handler code. We
1470
+ @ will disable the MMU before jumping to the kernel proper.
1471
+ @
1472
+ ARM( bic r1, r1, #(1 << 30) ) @ clear HSCTLR.TE
1473
+ THUMB( orr r1, r1, #(1 << 30) ) @ set HSCTLR.TE
1474
+ mcr p15, 4, r1, c1, c0, 0
1475
+ adr r0, __hyp_reentry_vectors
1476
+ mcr p15, 4, r0, c12, c0, 0 @ set HYP vector base (HVBAR)
1477
+ isb
1478
+ b .Lefi_hyp
14161479
1417
- @ Set parameters for booting zImage according to boot protocol
1418
- @ put FDT address in r2, it was returned by efi_entry()
1419
- @ r1 is the machine type, and r0 needs to be 0
1420
- mov r0, #0
1421
- mov r1, #0xFFFFFFFF
1422
- mov r2, r4
1480
+ @
1481
+ @ When running in HYP mode with the caches off, we need to drop
1482
+ @ into SVC mode now, and let the decompressor set up its cached
1483
+ @ 1:1 mapping as usual.
1484
+ @
1485
+1: mov r9, r4 @ preserve image base
1486
+ bl __hyp_stub_install @ install HYP stub vectors
1487
+ safe_svcmode_maskall r1 @ drop to SVC mode
1488
+ msr spsr_cxsf, r0 @ record boot mode
1489
+ orr r4, r9, #1 @ restore image base and set LSB
1490
+ b .Lefi_hyp
1491
+.Lefi_svc:
1492
+#endif
1493
+ mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
1494
+ tst r0, #0x1 @ MMU enabled?
1495
+ orreq r4, r4, #1 @ set LSB if not
14231496
1424
- @ Branch to (possibly) relocated zImage that is in [sp]
1425
- ldr lr, [sp]
1426
- ldr ip, =start_offset
1427
- add lr, lr, ip
1428
- mov pc, lr @ no mode switch
1497
+.Lefi_hyp:
1498
+ mov r0, r8 @ DT start
1499
+ add r1, r8, r2 @ DT end
1500
+ bl cache_clean_flush
14291501
1430
-efi_load_fail:
1431
- @ Return EFI_LOAD_ERROR to EFI firmware on error.
1432
- ldr r0, =0x80000001
1433
- ldmfd sp!, {ip, pc}
1434
-ENDPROC(efi_stub_entry)
1502
+ adr r0, 0f @ switch to our stack
1503
+ ldr sp, [r0]
1504
+ add sp, sp, r0
1505
+
1506
+ mov r5, #0 @ appended DTB size
1507
+ mov r7, #0xFFFFFFFF @ machine ID
1508
+ b wont_overwrite
1509
+ENDPROC(efi_enter_kernel)
1510
+0: .long .L_user_stack_end - .
14351511 #endif
14361512
14371513 .align