hc
2024-09-20 a36159eec6ca17402b0e146b86efaf76568dc353
kernel/arch/x86/kernel/vmlinux.lds.S
....@@ -21,6 +21,10 @@
2121 #define LOAD_OFFSET __START_KERNEL_map
2222 #endif
2323
24
+#define RUNTIME_DISCARD_EXIT
25
+#define EMITS_PT_NOTE
26
+#define RO_EXCEPTION_TABLE_ALIGN 16
27
+
2428 #include <asm-generic/vmlinux.lds.h>
2529 #include <asm/asm-offsets.h>
2630 #include <asm/thread_info.h>
....@@ -31,7 +35,7 @@
3135
3236 #undef i386 /* in case the preprocessor is a 32bit one */
3337
34
-OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
38
+OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT)
3539
3640 #ifdef CONFIG_X86_32
3741 OUTPUT_ARCH(i386)
....@@ -129,43 +133,37 @@
129133 LOCK_TEXT
130134 KPROBES_TEXT
131135 ALIGN_ENTRY_TEXT_BEGIN
136
+#ifdef CONFIG_CPU_SRSO
137
+ *(.text..__x86.rethunk_untrain)
138
+#endif
139
+
132140 ENTRY_TEXT
133
- IRQENTRY_TEXT
141
+
142
+#ifdef CONFIG_CPU_SRSO
143
+ /*
144
+ * See the comment above srso_alias_untrain_ret()'s
145
+ * definition.
146
+ */
147
+ . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
148
+ *(.text..__x86.rethunk_safe)
149
+#endif
134150 ALIGN_ENTRY_TEXT_END
135151 SOFTIRQENTRY_TEXT
152
+ STATIC_CALL_TEXT
136153 *(.fixup)
137154 *(.gnu.warning)
138155
139
-#ifdef CONFIG_X86_64
140
- . = ALIGN(PAGE_SIZE);
141
- __entry_trampoline_start = .;
142
- _entry_trampoline = .;
143
- *(.entry_trampoline)
144
- . = ALIGN(PAGE_SIZE);
145
- __entry_trampoline_end = .;
146
- ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
147
-#endif
148
-
149156 #ifdef CONFIG_RETPOLINE
150157 __indirect_thunk_start = .;
151
- *(.text.__x86.indirect_thunk)
158
+ *(.text..__x86.indirect_thunk)
159
+ *(.text..__x86.return_thunk)
152160 __indirect_thunk_end = .;
153161 #endif
154
-
155
-#ifdef CONFIG_CFI_CLANG
156
- . = ALIGN(PAGE_SIZE);
157
- __cfi_jt_start = .;
158
- *(.text..L.cfi.jumptable .text..L.cfi.jumptable.*)
159
- __cfi_jt_end = .;
160
-#endif
161
- } :text = 0x9090
162
-
163
- NOTES :text :note
164
-
165
- EXCEPTION_TABLE(16) :text = 0x9090
162
+ } :text =0xcccc
166163
167164 /* End of text section, which should occupy whole number of pages */
168165 _etext = .;
166
+
169167 . = ALIGN(PAGE_SIZE);
170168
171169 X86_ALIGN_RODATA_BEGIN
....@@ -211,12 +209,10 @@
211209 __vvar_beginning_hack = .;
212210
213211 /* Place all vvars at the offsets in asm/vvar.h. */
214
-#define EMIT_VVAR(name, offset) \
212
+#define EMIT_VVAR(name, offset) \
215213 . = __vvar_beginning_hack + offset; \
216214 *(.vvar_ ## name)
217
-#define __VVAR_KERNEL_LDS
218215 #include <asm/vvar.h>
219
-#undef __VVAR_KERNEL_LDS
220216 #undef EMIT_VVAR
221217
222218 /*
....@@ -291,6 +287,27 @@
291287 __parainstructions_end = .;
292288 }
293289
290
+#ifdef CONFIG_RETPOLINE
291
+ /*
292
+ * List of instructions that call/jmp/jcc to retpoline thunks
293
+ * __x86_indirect_thunk_*(). These instructions can be patched along
294
+ * with alternatives, after which the section can be freed.
295
+ */
296
+ . = ALIGN(8);
297
+ .retpoline_sites : AT(ADDR(.retpoline_sites) - LOAD_OFFSET) {
298
+ __retpoline_sites = .;
299
+ *(.retpoline_sites)
300
+ __retpoline_sites_end = .;
301
+ }
302
+
303
+ . = ALIGN(8);
304
+ .return_sites : AT(ADDR(.return_sites) - LOAD_OFFSET) {
305
+ __return_sites = .;
306
+ *(.return_sites)
307
+ __return_sites_end = .;
308
+ }
309
+#endif
310
+
294311 /*
295312 * struct alt_inst entries. From the header (alternative.h):
296313 * "Alternative instructions for different CPU types or capabilities"
....@@ -333,8 +350,8 @@
333350
334351 . = ALIGN(8);
335352 /*
336
- * .exit.text is discard at runtime, not link time, to deal with
337
- * references from .altinstructions and .eh_frame
353
+ * .exit.text is discarded at runtime, not link time, to deal with
354
+ * references from .altinstructions
338355 */
339356 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
340357 EXIT_TEXT
....@@ -385,6 +402,14 @@
385402 __bss_stop = .;
386403 }
387404
405
+ /*
406
+ * The memory occupied from _text to here, __end_of_kernel_reserve, is
407
+ * automatically reserved in setup_arch(). Anything after here must be
408
+ * explicitly reserved using memblock_reserve() or it will be discarded
409
+ * and treated as available memory.
410
+ */
411
+ __end_of_kernel_reserve = .;
412
+
388413 . = ALIGN(PAGE_SIZE);
389414 .brk : AT(ADDR(.brk) - LOAD_OFFSET) {
390415 __brk_base = .;
....@@ -396,16 +421,74 @@
396421 . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */
397422 _end = .;
398423
424
+#ifdef CONFIG_AMD_MEM_ENCRYPT
425
+ /*
426
+ * Early scratch/workarea section: Lives outside of the kernel proper
427
+ * (_text - _end).
428
+ *
429
+ * Resides after _end because even though the .brk section is after
430
+ * __end_of_kernel_reserve, the .brk section is later reserved as a
431
+ * part of the kernel. Since it is located after __end_of_kernel_reserve
432
+ * it will be discarded and become part of the available memory. As
433
+ * such, it can only be used by very early boot code and must not be
434
+ * needed afterwards.
435
+ *
436
+ * Currently used by SME for performing in-place encryption of the
437
+ * kernel during boot. Resides on a 2MB boundary to simplify the
438
+ * pagetable setup used for SME in-place encryption.
439
+ */
440
+ . = ALIGN(HPAGE_SIZE);
441
+ .init.scratch : AT(ADDR(.init.scratch) - LOAD_OFFSET) {
442
+ __init_scratch_begin = .;
443
+ *(.init.scratch)
444
+ . = ALIGN(HPAGE_SIZE);
445
+ __init_scratch_end = .;
446
+ }
447
+#endif
448
+
399449 STABS_DEBUG
400450 DWARF_DEBUG
451
+ ELF_DETAILS
401452
402
- /* Sections to be discarded */
403453 DISCARDS
404
- /DISCARD/ : {
405
- *(.eh_frame)
406
- }
407
-}
408454
455
+ /*
456
+ * Make sure that the .got.plt is either completely empty or it
457
+ * contains only the lazy dispatch entries.
458
+ */
459
+ .got.plt (INFO) : { *(.got.plt) }
460
+ ASSERT(SIZEOF(.got.plt) == 0 ||
461
+#ifdef CONFIG_X86_64
462
+ SIZEOF(.got.plt) == 0x18,
463
+#else
464
+ SIZEOF(.got.plt) == 0xc,
465
+#endif
466
+ "Unexpected GOT/PLT entries detected!")
467
+
468
+ /*
469
+ * Sections that should stay zero sized, which is safer to
470
+ * explicitly check instead of blindly discarding.
471
+ */
472
+ .got : {
473
+ *(.got) *(.igot.*)
474
+ }
475
+ ASSERT(SIZEOF(.got) == 0, "Unexpected GOT entries detected!")
476
+
477
+ .plt : {
478
+ *(.plt) *(.plt.*) *(.iplt)
479
+ }
480
+ ASSERT(SIZEOF(.plt) == 0, "Unexpected run-time procedure linkages detected!")
481
+
482
+ .rel.dyn : {
483
+ *(.rel.*) *(.rel_*)
484
+ }
485
+ ASSERT(SIZEOF(.rel.dyn) == 0, "Unexpected run-time relocations (.rel) detected!")
486
+
487
+ .rela.dyn : {
488
+ *(.rela.*) *(.rela_*)
489
+ }
490
+ ASSERT(SIZEOF(.rela.dyn) == 0, "Unexpected run-time relocations (.rela) detected!")
491
+}
409492
410493 #ifdef CONFIG_X86_32
411494 /*
....@@ -420,7 +503,8 @@
420503 */
421504 #define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
422505 INIT_PER_CPU(gdt_page);
423
-INIT_PER_CPU(irq_stack_union);
506
+INIT_PER_CPU(fixed_percpu_data);
507
+INIT_PER_CPU(irq_stack_backing_store);
424508
425509 /*
426510 * Build-time check on the image size:
....@@ -429,8 +513,29 @@
429513 "kernel image bigger than KERNEL_IMAGE_SIZE");
430514
431515 #ifdef CONFIG_SMP
432
-. = ASSERT((irq_stack_union == 0),
433
- "irq_stack_union is not at start of per-cpu area");
516
+. = ASSERT((fixed_percpu_data == 0),
517
+ "fixed_percpu_data is not at start of per-cpu area");
518
+#endif
519
+
520
+#ifdef CONFIG_RETHUNK
521
+. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
522
+. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
523
+#endif
524
+
525
+#ifdef CONFIG_CPU_SRSO
526
+/*
527
+ * GNU ld cannot do XOR until 2.41.
528
+ * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1
529
+ *
530
+ * LLVM lld cannot do XOR until lld-17.
531
+ * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb
532
+ *
533
+ * Instead do: (A | B) - (A & B) in order to compute the XOR
534
+ * of the two function addresses:
535
+ */
536
+. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) -
537
+ (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
538
+ "SRSO function pair won't alias");
434539 #endif
435540
436541 #endif /* CONFIG_X86_32 */