hc
2023-12-11 d2ccde1c8e90d38cee87a1b0309ad2827f3fd30d
kernel/arch/riscv/kernel/head.S
....@@ -1,17 +1,8 @@
1
+/* SPDX-License-Identifier: GPL-2.0-only */
12 /*
23 * Copyright (C) 2012 Regents of the University of California
3
- *
4
- * This program is free software; you can redistribute it and/or
5
- * modify it under the terms of the GNU General Public License
6
- * as published by the Free Software Foundation, version 2.
7
- *
8
- * This program is distributed in the hope that it will be useful,
9
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
10
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
- * GNU General Public License for more details.
124 */
135
14
-#include <asm/thread_info.h>
156 #include <asm/asm-offsets.h>
167 #include <asm/asm.h>
178 #include <linux/init.h>
....@@ -19,11 +10,213 @@
1910 #include <asm/thread_info.h>
2011 #include <asm/page.h>
2112 #include <asm/csr.h>
13
+#include <asm/hwcap.h>
14
+#include <asm/image.h>
15
+#include "efi-header.S"
2216
23
-__INIT
17
+__HEAD
2418 ENTRY(_start)
19
+ /*
20
+ * Image header expected by Linux boot-loaders. The image header data
21
+ * structure is described in asm/image.h.
22
+ * Do not modify it without modifying the structure and all bootloaders
23
+ * that expects this header format!!
24
+ */
25
+#ifdef CONFIG_EFI
26
+ /*
27
+ * This instruction decodes to "MZ" ASCII required by UEFI.
28
+ */
29
+ c.li s4,-13
30
+ j _start_kernel
31
+#else
32
+ /* jump to start kernel */
33
+ j _start_kernel
34
+ /* reserved */
35
+ .word 0
36
+#endif
37
+ .balign 8
38
+#ifdef CONFIG_RISCV_M_MODE
39
+ /* Image load offset (0MB) from start of RAM for M-mode */
40
+ .dword 0
41
+#else
42
+#if __riscv_xlen == 64
43
+ /* Image load offset(2MB) from start of RAM */
44
+ .dword 0x200000
45
+#else
46
+ /* Image load offset(4MB) from start of RAM */
47
+ .dword 0x400000
48
+#endif
49
+#endif
50
+ /* Effective size of kernel image */
51
+ .dword _end - _start
52
+ .dword __HEAD_FLAGS
53
+ .word RISCV_HEADER_VERSION
54
+ .word 0
55
+ .dword 0
56
+ .ascii RISCV_IMAGE_MAGIC
57
+ .balign 4
58
+ .ascii RISCV_IMAGE_MAGIC2
59
+#ifdef CONFIG_EFI
60
+ .word pe_head_start - _start
61
+pe_head_start:
62
+
63
+ __EFI_PE_HEADER
64
+#else
65
+ .word 0
66
+#endif
67
+
68
+.align 2
69
+#ifdef CONFIG_MMU
70
+relocate:
71
+ /* Relocate return address */
72
+ li a1, PAGE_OFFSET
73
+ la a2, _start
74
+ sub a1, a1, a2
75
+ add ra, ra, a1
76
+
77
+ /* Point stvec to virtual address of intruction after satp write */
78
+ la a2, 1f
79
+ add a2, a2, a1
80
+ csrw CSR_TVEC, a2
81
+
82
+ /* Compute satp for kernel page tables, but don't load it yet */
83
+ srl a2, a0, PAGE_SHIFT
84
+ li a1, SATP_MODE
85
+ or a2, a2, a1
86
+
87
+ /*
88
+ * Load trampoline page directory, which will cause us to trap to
89
+ * stvec if VA != PA, or simply fall through if VA == PA. We need a
90
+ * full fence here because setup_vm() just wrote these PTEs and we need
91
+ * to ensure the new translations are in use.
92
+ */
93
+ la a0, trampoline_pg_dir
94
+ srl a0, a0, PAGE_SHIFT
95
+ or a0, a0, a1
96
+ sfence.vma
97
+ csrw CSR_SATP, a0
98
+.align 2
99
+1:
100
+ /* Set trap vector to spin forever to help debug */
101
+ la a0, .Lsecondary_park
102
+ csrw CSR_TVEC, a0
103
+
104
+ /* Reload the global pointer */
105
+.option push
106
+.option norelax
107
+ la gp, __global_pointer$
108
+.option pop
109
+
110
+ /*
111
+ * Switch to kernel page tables. A full fence is necessary in order to
112
+ * avoid using the trampoline translations, which are only correct for
113
+ * the first superpage. Fetching the fence is guarnteed to work
114
+ * because that first superpage is translated the same way.
115
+ */
116
+ csrw CSR_SATP, a2
117
+ sfence.vma
118
+
119
+ ret
120
+#endif /* CONFIG_MMU */
121
+#ifdef CONFIG_SMP
122
+ .global secondary_start_sbi
123
+secondary_start_sbi:
25124 /* Mask all interrupts */
26
- csrw sie, zero
125
+ csrw CSR_IE, zero
126
+ csrw CSR_IP, zero
127
+
128
+ /* Load the global pointer */
129
+ .option push
130
+ .option norelax
131
+ la gp, __global_pointer$
132
+ .option pop
133
+
134
+ /*
135
+ * Disable FPU to detect illegal usage of
136
+ * floating point in kernel space
137
+ */
138
+ li t0, SR_FS
139
+ csrc CSR_STATUS, t0
140
+
141
+ /* Set trap vector to spin forever to help debug */
142
+ la a3, .Lsecondary_park
143
+ csrw CSR_TVEC, a3
144
+
145
+ slli a3, a0, LGREG
146
+ la a4, __cpu_up_stack_pointer
147
+ la a5, __cpu_up_task_pointer
148
+ add a4, a3, a4
149
+ add a5, a3, a5
150
+ REG_L sp, (a4)
151
+ REG_L tp, (a5)
152
+
153
+ .global secondary_start_common
154
+secondary_start_common:
155
+
156
+#ifdef CONFIG_MMU
157
+ /* Enable virtual memory and relocate to virtual address */
158
+ la a0, swapper_pg_dir
159
+ call relocate
160
+#endif
161
+ call setup_trap_vector
162
+ tail smp_callin
163
+#endif /* CONFIG_SMP */
164
+
165
+.align 2
166
+setup_trap_vector:
167
+ /* Set trap vector to exception handler */
168
+ la a0, handle_exception
169
+ csrw CSR_TVEC, a0
170
+
171
+ /*
172
+ * Set sup0 scratch register to 0, indicating to exception vector that
173
+ * we are presently executing in kernel.
174
+ */
175
+ csrw CSR_SCRATCH, zero
176
+ ret
177
+
178
+.align 2
179
+.Lsecondary_park:
180
+ /* We lack SMP support or have too many harts, so park this hart */
181
+ wfi
182
+ j .Lsecondary_park
183
+
184
+END(_start)
185
+
186
+ __INIT
187
+ENTRY(_start_kernel)
188
+ /* Mask all interrupts */
189
+ csrw CSR_IE, zero
190
+ csrw CSR_IP, zero
191
+
192
+#ifdef CONFIG_RISCV_M_MODE
193
+ /* flush the instruction cache */
194
+ fence.i
195
+
196
+ /* Reset all registers except ra, a0, a1 */
197
+ call reset_regs
198
+
199
+ /*
200
+ * Setup a PMP to permit access to all of memory. Some machines may
201
+ * not implement PMPs, so we set up a quick trap handler to just skip
202
+ * touching the PMPs on any trap.
203
+ */
204
+ la a0, pmp_done
205
+ csrw CSR_TVEC, a0
206
+
207
+ li a0, -1
208
+ csrw CSR_PMPADDR0, a0
209
+ li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
210
+ csrw CSR_PMPCFG0, a0
211
+.align 2
212
+pmp_done:
213
+
214
+ /*
215
+ * The hartid in a0 is expected later on, and we have no firmware
216
+ * to hand it to us.
217
+ */
218
+ csrr a0, CSR_MHARTID
219
+#endif /* CONFIG_RISCV_M_MODE */
27220
28221 /* Load the global pointer */
29222 .option push
....@@ -36,7 +229,14 @@
36229 * floating point in kernel space
37230 */
38231 li t0, SR_FS
39
- csrc sstatus, t0
232
+ csrc CSR_STATUS, t0
233
+
234
+#ifdef CONFIG_SMP
235
+ li t0, CONFIG_NR_CPUS
236
+ blt a0, t0, .Lgood_cores
237
+ tail .Lsecondary_park
238
+.Lgood_cores:
239
+#endif
40240
41241 /* Pick one hart to run the main boot sequence */
42242 la a3, hart_lottery
....@@ -44,81 +244,50 @@
44244 amoadd.w a3, a2, (a3)
45245 bnez a3, .Lsecondary_start
46246
247
+ /* Clear BSS for flat non-ELF images */
248
+ la a3, __bss_start
249
+ la a4, __bss_stop
250
+ ble a4, a3, clear_bss_done
251
+clear_bss:
252
+ REG_S zero, (a3)
253
+ add a3, a3, RISCV_SZPTR
254
+ blt a3, a4, clear_bss
255
+clear_bss_done:
256
+
47257 /* Save hart ID and DTB physical address */
48258 mv s0, a0
49259 mv s1, a1
260
+ la a2, boot_cpu_hartid
261
+ REG_S a0, (a2)
50262
51263 /* Initialize page tables and relocate to virtual addresses */
264
+ la tp, init_task
52265 la sp, init_thread_union + THREAD_SIZE
266
+ mv a0, s1
53267 call setup_vm
268
+#ifdef CONFIG_MMU
269
+ la a0, early_pg_dir
54270 call relocate
271
+#endif /* CONFIG_MMU */
55272
273
+ call setup_trap_vector
56274 /* Restore C environment */
57275 la tp, init_task
58
- sw s0, TASK_TI_CPU(tp)
276
+ sw zero, TASK_TI_CPU(tp)
277
+ la sp, init_thread_union + THREAD_SIZE
59278
60
- la sp, init_thread_union
61
- li a0, ASM_THREAD_SIZE
62
- add sp, sp, a0
63
-
279
+#ifdef CONFIG_KASAN
280
+ call kasan_early_init
281
+#endif
64282 /* Start the kernel */
65
- mv a0, s0
66
- mv a1, s1
67
- call parse_dtb
283
+ call soc_early_init
68284 tail start_kernel
69
-
70
-relocate:
71
- /* Relocate return address */
72
- li a1, PAGE_OFFSET
73
- la a0, _start
74
- sub a1, a1, a0
75
- add ra, ra, a1
76
-
77
- /* Point stvec to virtual address of intruction after satp write */
78
- la a0, 1f
79
- add a0, a0, a1
80
- csrw stvec, a0
81
-
82
- /* Compute satp for kernel page tables, but don't load it yet */
83
- la a2, swapper_pg_dir
84
- srl a2, a2, PAGE_SHIFT
85
- li a1, SATP_MODE
86
- or a2, a2, a1
87
-
88
- /*
89
- * Load trampoline page directory, which will cause us to trap to
90
- * stvec if VA != PA, or simply fall through if VA == PA
91
- */
92
- la a0, trampoline_pg_dir
93
- srl a0, a0, PAGE_SHIFT
94
- or a0, a0, a1
95
- sfence.vma
96
- csrw sptbr, a0
97
-.align 2
98
-1:
99
- /* Set trap vector to spin forever to help debug */
100
- la a0, .Lsecondary_park
101
- csrw stvec, a0
102
-
103
- /* Reload the global pointer */
104
-.option push
105
-.option norelax
106
- la gp, __global_pointer$
107
-.option pop
108
-
109
- /* Switch to kernel page tables */
110
- csrw sptbr, a2
111
-
112
- ret
113285
114286 .Lsecondary_start:
115287 #ifdef CONFIG_SMP
116
- li a1, CONFIG_NR_CPUS
117
- bgeu a0, a1, .Lsecondary_park
118
-
119288 /* Set trap vector to spin forever to help debug */
120289 la a3, .Lsecondary_park
121
- csrw stvec, a3
290
+ csrw CSR_TVEC, a3
122291
123292 slli a3, a0, LGREG
124293 la a1, __cpu_up_stack_pointer
....@@ -138,18 +307,89 @@
138307 beqz tp, .Lwait_for_cpu_up
139308 fence
140309
141
- /* Enable virtual memory and relocate to virtual address */
142
- call relocate
143
-
144
- tail smp_callin
310
+ tail secondary_start_common
145311 #endif
146312
147
-.align 2
148
-.Lsecondary_park:
149
- /* We lack SMP support or have too many harts, so park this hart */
150
- wfi
151
- j .Lsecondary_park
152
-END(_start)
313
+END(_start_kernel)
314
+
315
+#ifdef CONFIG_RISCV_M_MODE
316
+ENTRY(reset_regs)
317
+ li sp, 0
318
+ li gp, 0
319
+ li tp, 0
320
+ li t0, 0
321
+ li t1, 0
322
+ li t2, 0
323
+ li s0, 0
324
+ li s1, 0
325
+ li a2, 0
326
+ li a3, 0
327
+ li a4, 0
328
+ li a5, 0
329
+ li a6, 0
330
+ li a7, 0
331
+ li s2, 0
332
+ li s3, 0
333
+ li s4, 0
334
+ li s5, 0
335
+ li s6, 0
336
+ li s7, 0
337
+ li s8, 0
338
+ li s9, 0
339
+ li s10, 0
340
+ li s11, 0
341
+ li t3, 0
342
+ li t4, 0
343
+ li t5, 0
344
+ li t6, 0
345
+ csrw CSR_SCRATCH, 0
346
+
347
+#ifdef CONFIG_FPU
348
+ csrr t0, CSR_MISA
349
+ andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
350
+ beqz t0, .Lreset_regs_done
351
+
352
+ li t1, SR_FS
353
+ csrs CSR_STATUS, t1
354
+ fmv.s.x f0, zero
355
+ fmv.s.x f1, zero
356
+ fmv.s.x f2, zero
357
+ fmv.s.x f3, zero
358
+ fmv.s.x f4, zero
359
+ fmv.s.x f5, zero
360
+ fmv.s.x f6, zero
361
+ fmv.s.x f7, zero
362
+ fmv.s.x f8, zero
363
+ fmv.s.x f9, zero
364
+ fmv.s.x f10, zero
365
+ fmv.s.x f11, zero
366
+ fmv.s.x f12, zero
367
+ fmv.s.x f13, zero
368
+ fmv.s.x f14, zero
369
+ fmv.s.x f15, zero
370
+ fmv.s.x f16, zero
371
+ fmv.s.x f17, zero
372
+ fmv.s.x f18, zero
373
+ fmv.s.x f19, zero
374
+ fmv.s.x f20, zero
375
+ fmv.s.x f21, zero
376
+ fmv.s.x f22, zero
377
+ fmv.s.x f23, zero
378
+ fmv.s.x f24, zero
379
+ fmv.s.x f25, zero
380
+ fmv.s.x f26, zero
381
+ fmv.s.x f27, zero
382
+ fmv.s.x f28, zero
383
+ fmv.s.x f29, zero
384
+ fmv.s.x f30, zero
385
+ fmv.s.x f31, zero
386
+ csrw fcsr, 0
387
+ /* note that the caller must clear SR_FS */
388
+#endif /* CONFIG_FPU */
389
+.Lreset_regs_done:
390
+ ret
391
+END(reset_regs)
392
+#endif /* CONFIG_RISCV_M_MODE */
153393
154394 __PAGE_ALIGNED_BSS
155395 /* Empty zero page */