hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/arch/x86/mm/kaslr.c
....@@ -23,9 +23,9 @@
2323 #include <linux/kernel.h>
2424 #include <linux/init.h>
2525 #include <linux/random.h>
26
+#include <linux/memblock.h>
27
+#include <linux/pgtable.h>
2628
27
-#include <asm/pgalloc.h>
28
-#include <asm/pgtable.h>
2929 #include <asm/setup.h>
3030 #include <asm/kaslr.h>
3131
....@@ -58,15 +58,6 @@
5858 static inline unsigned long get_padding(struct kaslr_memory_region *region)
5959 {
6060 return (region->size_tb << TB_SHIFT);
61
-}
62
-
63
-/*
64
- * Apply no randomization if KASLR was disabled at boot or if KASAN
65
- * is enabled. KASAN shadow mappings rely on regions being PGD aligned.
66
- */
67
-static inline bool kaslr_memory_enabled(void)
68
-{
69
- return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
7061 }
7162
7263 /* Initialize base and padding for each memory region randomized with KASLR */
....@@ -133,10 +124,7 @@
133124 */
134125 entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
135126 prandom_bytes_state(&rand_state, &rand, sizeof(rand));
136
- if (pgtable_l5_enabled())
137
- entropy = (rand % (entropy + 1)) & P4D_MASK;
138
- else
139
- entropy = (rand % (entropy + 1)) & PUD_MASK;
127
+ entropy = (rand % (entropy + 1)) & PUD_MASK;
140128 vaddr += entropy;
141129 *kaslr_regions[i].base = vaddr;
142130
....@@ -145,84 +133,49 @@
145133 * randomization alignment.
146134 */
147135 vaddr += get_padding(&kaslr_regions[i]);
148
- if (pgtable_l5_enabled())
149
- vaddr = round_up(vaddr + 1, P4D_SIZE);
150
- else
151
- vaddr = round_up(vaddr + 1, PUD_SIZE);
136
+ vaddr = round_up(vaddr + 1, PUD_SIZE);
152137 remain_entropy -= entropy;
153138 }
154139 }
155140
156
-static void __meminit init_trampoline_pud(void)
141
+void __meminit init_trampoline_kaslr(void)
157142 {
158
- unsigned long paddr, paddr_next;
143
+ pud_t *pud_page_tramp, *pud, *pud_tramp;
144
+ p4d_t *p4d_page_tramp, *p4d, *p4d_tramp;
145
+ unsigned long paddr, vaddr;
159146 pgd_t *pgd;
160
- pud_t *pud_page, *pud_page_tramp;
161
- int i;
162147
163148 pud_page_tramp = alloc_low_page();
164149
150
+ /*
151
+ * There are two mappings for the low 1MB area, the direct mapping
152
+ * and the 1:1 mapping for the real mode trampoline:
153
+ *
154
+ * Direct mapping: virt_addr = phys_addr + PAGE_OFFSET
155
+ * 1:1 mapping: virt_addr = phys_addr
156
+ */
165157 paddr = 0;
166
- pgd = pgd_offset_k((unsigned long)__va(paddr));
167
- pud_page = (pud_t *) pgd_page_vaddr(*pgd);
158
+ vaddr = (unsigned long)__va(paddr);
159
+ pgd = pgd_offset_k(vaddr);
168160
169
- for (i = pud_index(paddr); i < PTRS_PER_PUD; i++, paddr = paddr_next) {
170
- pud_t *pud, *pud_tramp;
171
- unsigned long vaddr = (unsigned long)__va(paddr);
161
+ p4d = p4d_offset(pgd, vaddr);
162
+ pud = pud_offset(p4d, vaddr);
172163
173
- pud_tramp = pud_page_tramp + pud_index(paddr);
174
- pud = pud_page + pud_index(vaddr);
175
- paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
164
+ pud_tramp = pud_page_tramp + pud_index(paddr);
165
+ *pud_tramp = *pud;
176166
177
- *pud_tramp = *pud;
178
- }
179
-
180
- set_pgd(&trampoline_pgd_entry,
181
- __pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
182
-}
183
-
184
-static void __meminit init_trampoline_p4d(void)
185
-{
186
- unsigned long paddr, paddr_next;
187
- pgd_t *pgd;
188
- p4d_t *p4d_page, *p4d_page_tramp;
189
- int i;
190
-
191
- p4d_page_tramp = alloc_low_page();
192
-
193
- paddr = 0;
194
- pgd = pgd_offset_k((unsigned long)__va(paddr));
195
- p4d_page = (p4d_t *) pgd_page_vaddr(*pgd);
196
-
197
- for (i = p4d_index(paddr); i < PTRS_PER_P4D; i++, paddr = paddr_next) {
198
- p4d_t *p4d, *p4d_tramp;
199
- unsigned long vaddr = (unsigned long)__va(paddr);
167
+ if (pgtable_l5_enabled()) {
168
+ p4d_page_tramp = alloc_low_page();
200169
201170 p4d_tramp = p4d_page_tramp + p4d_index(paddr);
202
- p4d = p4d_page + p4d_index(vaddr);
203
- paddr_next = (paddr & P4D_MASK) + P4D_SIZE;
204171
205
- *p4d_tramp = *p4d;
172
+ set_p4d(p4d_tramp,
173
+ __p4d(_KERNPG_TABLE | __pa(pud_page_tramp)));
174
+
175
+ trampoline_pgd_entry =
176
+ __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp));
177
+ } else {
178
+ trampoline_pgd_entry =
179
+ __pgd(_KERNPG_TABLE | __pa(pud_page_tramp));
206180 }
207
-
208
- set_pgd(&trampoline_pgd_entry,
209
- __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp)));
210
-}
211
-
212
-/*
213
- * Create PGD aligned trampoline table to allow real mode initialization
214
- * of additional CPUs. Consume only 1 low memory page.
215
- */
216
-void __meminit init_trampoline(void)
217
-{
218
-
219
- if (!kaslr_memory_enabled()) {
220
- init_trampoline_default();
221
- return;
222
- }
223
-
224
- if (pgtable_l5_enabled())
225
- init_trampoline_p4d();
226
- else
227
- init_trampoline_pud();
228181 }