hc
2024-10-12 a5969cabbb4660eab42b6ef0412cbbd1200cf14d
kernel/arch/x86/include/asm/efi.h
....@@ -3,11 +3,15 @@
33 #define _ASM_X86_EFI_H
44
55 #include <asm/fpu/api.h>
6
-#include <asm/pgtable.h>
76 #include <asm/processor-flags.h>
87 #include <asm/tlb.h>
98 #include <asm/nospec-branch.h>
109 #include <asm/mmu_context.h>
10
+#include <linux/build_bug.h>
11
+#include <linux/kernel.h>
12
+#include <linux/pgtable.h>
13
+
14
+extern unsigned long efi_fw_vendor, efi_config_table;
1115
1216 /*
1317 * We map the EFI regions needed for runtime services non-contiguously,
....@@ -18,26 +22,53 @@
1822 *
1923 * This is the main reason why we're doing stable VA mappings for RT
2024 * services.
21
- *
22
- * This flag is used in conjuction with a chicken bit called
23
- * "efi=old_map" which can be used as a fallback to the old runtime
24
- * services mapping method in case there's some b0rkage with a
25
- * particular EFI implementation (haha, it is hard to hold up the
26
- * sarcasm here...).
2725 */
28
-#define EFI_OLD_MEMMAP EFI_ARCH_1
2926
3027 #define EFI32_LOADER_SIGNATURE "EL32"
3128 #define EFI64_LOADER_SIGNATURE "EL64"
3229
33
-#define MAX_CMDLINE_ADDRESS UINT_MAX
34
-
3530 #define ARCH_EFI_IRQ_FLAGS_MASK X86_EFLAGS_IF
3631
32
+/*
33
+ * The EFI services are called through variadic functions in many cases. These
34
+ * functions are implemented in assembler and support only a fixed number of
35
+ * arguments. The macros below allows us to check at build time that we don't
36
+ * try to call them with too many arguments.
37
+ *
38
+ * __efi_nargs() will return the number of arguments if it is 7 or less, and
39
+ * cause a BUILD_BUG otherwise. The limitations of the C preprocessor make it
40
+ * impossible to calculate the exact number of arguments beyond some
41
+ * pre-defined limit. The maximum number of arguments currently supported by
42
+ * any of the thunks is 7, so this is good enough for now and can be extended
43
+ * in the obvious way if we ever need more.
44
+ */
45
+
46
+#define __efi_nargs(...) __efi_nargs_(__VA_ARGS__)
47
+#define __efi_nargs_(...) __efi_nargs__(0, ##__VA_ARGS__, \
48
+ __efi_arg_sentinel(7), __efi_arg_sentinel(6), \
49
+ __efi_arg_sentinel(5), __efi_arg_sentinel(4), \
50
+ __efi_arg_sentinel(3), __efi_arg_sentinel(2), \
51
+ __efi_arg_sentinel(1), __efi_arg_sentinel(0))
52
+#define __efi_nargs__(_0, _1, _2, _3, _4, _5, _6, _7, n, ...) \
53
+ __take_second_arg(n, \
54
+ ({ BUILD_BUG_ON_MSG(1, "__efi_nargs limit exceeded"); 8; }))
55
+#define __efi_arg_sentinel(n) , n
56
+
57
+/*
58
+ * __efi_nargs_check(f, n, ...) will cause a BUILD_BUG if the ellipsis
59
+ * represents more than n arguments.
60
+ */
61
+
62
+#define __efi_nargs_check(f, n, ...) \
63
+ __efi_nargs_check_(f, __efi_nargs(__VA_ARGS__), n)
64
+#define __efi_nargs_check_(f, p, n) __efi_nargs_check__(f, p, n)
65
+#define __efi_nargs_check__(f, p, n) ({ \
66
+ BUILD_BUG_ON_MSG( \
67
+ (p) > (n), \
68
+ #f " called with too many arguments (" #p ">" #n ")"); \
69
+})
70
+
3771 #ifdef CONFIG_X86_32
38
-
39
-extern asmlinkage unsigned long efi_call_phys(void *, ...);
40
-
4172 #define arch_efi_call_virt_setup() \
4273 ({ \
4374 kernel_fpu_begin(); \
....@@ -50,24 +81,18 @@
5081 kernel_fpu_end(); \
5182 })
5283
53
-
54
-/*
55
- * Wrap all the virtual calls in a way that forces the parameters on the stack.
56
- */
57
-#define arch_efi_call_virt(p, f, args...) \
58
-({ \
59
- ((efi_##f##_t __attribute__((regparm(0)))*) p->f)(args); \
60
-})
61
-
62
-#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
84
+#define arch_efi_call_virt(p, f, args...) p->f(args)
6385
6486 #else /* !CONFIG_X86_32 */
6587
6688 #define EFI_LOADER_SIGNATURE "EL64"
6789
68
-extern asmlinkage u64 efi_call(void *fp, ...);
90
+extern asmlinkage u64 __efi_call(void *fp, ...);
6991
70
-#define efi_call_phys(f, args...) efi_call((f), args)
92
+#define efi_call(...) ({ \
93
+ __efi_nargs_check(efi_call, 7, __VA_ARGS__); \
94
+ __efi_call(__VA_ARGS__); \
95
+})
7196
7297 /*
7398 * struct efi_scratch - Scratch space used while switching to/from efi_mm
....@@ -84,9 +109,7 @@
84109 efi_sync_low_kernel_mappings(); \
85110 kernel_fpu_begin(); \
86111 firmware_restrict_branch_speculation_start(); \
87
- \
88
- if (!efi_enabled(EFI_OLD_MEMMAP)) \
89
- efi_switch_mm(&efi_mm); \
112
+ efi_switch_mm(&efi_mm); \
90113 })
91114
92115 #define arch_efi_call_virt(p, f, args...) \
....@@ -94,15 +117,10 @@
94117
95118 #define arch_efi_call_virt_teardown() \
96119 ({ \
97
- if (!efi_enabled(EFI_OLD_MEMMAP)) \
98
- efi_switch_mm(efi_scratch.prev_mm); \
99
- \
120
+ efi_switch_mm(efi_scratch.prev_mm); \
100121 firmware_restrict_branch_speculation_end(); \
101122 kernel_fpu_end(); \
102123 })
103
-
104
-extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
105
- u32 type, u64 attribute);
106124
107125 #ifdef CONFIG_KASAN
108126 /*
....@@ -119,29 +137,26 @@
119137 #endif /* CONFIG_X86_32 */
120138
121139 extern struct efi_scratch efi_scratch;
122
-extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
123140 extern int __init efi_memblock_x86_reserve_range(void);
124
-extern pgd_t * __init efi_call_phys_prolog(void);
125
-extern void __init efi_call_phys_epilog(pgd_t *save_pgd);
126141 extern void __init efi_print_memmap(void);
127
-extern void __init efi_memory_uc(u64 addr, unsigned long size);
128142 extern void __init efi_map_region(efi_memory_desc_t *md);
129143 extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
130144 extern void efi_sync_low_kernel_mappings(void);
131145 extern int __init efi_alloc_page_tables(void);
132146 extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
133
-extern void __init old_map_region(efi_memory_desc_t *md);
134
-extern void __init runtime_code_page_mkexec(void);
135147 extern void __init efi_runtime_update_mappings(void);
136148 extern void __init efi_dump_pagetable(void);
137149 extern void __init efi_apply_memmap_quirks(void);
138150 extern int __init efi_reuse_config(u64 tables, int nr_tables);
139151 extern void efi_delete_dummy_variable(void);
140152 extern void efi_switch_mm(struct mm_struct *mm);
153
+extern void efi_recover_from_page_fault(unsigned long phys_addr);
154
+extern void efi_free_boot_services(void);
141155
156
+/* kexec external ABI */
142157 struct efi_setup_data {
143158 u64 fw_vendor;
144
- u64 runtime;
159
+ u64 __unused;
145160 u64 tables;
146161 u64 smbios;
147162 u64 reserved[8];
....@@ -150,103 +165,221 @@
150165 extern u64 efi_setup;
151166
152167 #ifdef CONFIG_EFI
168
+extern efi_status_t __efi64_thunk(u32, ...);
153169
154
-static inline bool efi_is_native(void)
170
+#define efi64_thunk(...) ({ \
171
+ __efi_nargs_check(efi64_thunk, 6, __VA_ARGS__); \
172
+ __efi64_thunk(__VA_ARGS__); \
173
+})
174
+
175
+static inline bool efi_is_mixed(void)
155176 {
156
- return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
177
+ if (!IS_ENABLED(CONFIG_EFI_MIXED))
178
+ return false;
179
+ return IS_ENABLED(CONFIG_X86_64) && !efi_enabled(EFI_64BIT);
157180 }
158181
159182 static inline bool efi_runtime_supported(void)
160183 {
161
- if (efi_is_native())
184
+ if (IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT))
162185 return true;
163186
164
- if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_enabled(EFI_OLD_MEMMAP))
165
- return true;
166
-
167
- return false;
187
+ return IS_ENABLED(CONFIG_EFI_MIXED);
168188 }
169189
170
-extern struct console early_efi_console;
171190 extern void parse_efi_setup(u64 phys_addr, u32 data_len);
172191
173192 extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
174193
175
-#ifdef CONFIG_EFI_MIXED
176194 extern void efi_thunk_runtime_setup(void);
177
-extern efi_status_t efi_thunk_set_virtual_address_map(
178
- void *phys_set_virtual_address_map,
179
- unsigned long memory_map_size,
180
- unsigned long descriptor_size,
181
- u32 descriptor_version,
182
- efi_memory_desc_t *virtual_map);
183
-#else
184
-static inline void efi_thunk_runtime_setup(void) {}
185
-static inline efi_status_t efi_thunk_set_virtual_address_map(
186
- void *phys_set_virtual_address_map,
187
- unsigned long memory_map_size,
188
- unsigned long descriptor_size,
189
- u32 descriptor_version,
190
- efi_memory_desc_t *virtual_map)
191
-{
192
- return EFI_SUCCESS;
193
-}
194
-#endif /* CONFIG_EFI_MIXED */
195
-
195
+efi_status_t efi_set_virtual_address_map(unsigned long memory_map_size,
196
+ unsigned long descriptor_size,
197
+ u32 descriptor_version,
198
+ efi_memory_desc_t *virtual_map,
199
+ unsigned long systab_phys);
196200
197201 /* arch specific definitions used by the stub code */
198202
199
-struct efi_config {
200
- u64 image_handle;
201
- u64 table;
202
- u64 runtime_services;
203
- u64 boot_services;
204
- u64 text_output;
205
- efi_status_t (*call)(unsigned long, ...);
206
- bool is64;
207
-} __packed;
203
+#ifdef CONFIG_EFI_MIXED
208204
209
-__pure const struct efi_config *__efi_early(void);
205
+#define ARCH_HAS_EFISTUB_WRAPPERS
210206
211207 static inline bool efi_is_64bit(void)
212208 {
213
- if (!IS_ENABLED(CONFIG_X86_64))
214
- return false;
209
+ extern const bool efi_is64;
215210
216
- if (!IS_ENABLED(CONFIG_EFI_MIXED))
217
- return true;
218
-
219
- return __efi_early()->is64;
211
+ return efi_is64;
220212 }
221213
222
-#define efi_table_attr(table, attr, instance) \
223
- (efi_is_64bit() ? \
224
- ((table##_64_t *)(unsigned long)instance)->attr : \
225
- ((table##_32_t *)(unsigned long)instance)->attr)
214
+static inline bool efi_is_native(void)
215
+{
216
+ if (!IS_ENABLED(CONFIG_X86_64))
217
+ return true;
218
+ return efi_is_64bit();
219
+}
226220
227
-#define efi_call_proto(protocol, f, instance, ...) \
228
- __efi_early()->call(efi_table_attr(protocol, f, instance), \
229
- instance, ##__VA_ARGS__)
221
+#define efi_mixed_mode_cast(attr) \
222
+ __builtin_choose_expr( \
223
+ __builtin_types_compatible_p(u32, __typeof__(attr)), \
224
+ (unsigned long)(attr), (attr))
230225
231
-#define efi_call_early(f, ...) \
232
- __efi_early()->call(efi_table_attr(efi_boot_services, f, \
233
- __efi_early()->boot_services), __VA_ARGS__)
226
+#define efi_table_attr(inst, attr) \
227
+ (efi_is_native() \
228
+ ? inst->attr \
229
+ : (__typeof__(inst->attr)) \
230
+ efi_mixed_mode_cast(inst->mixed_mode.attr))
234231
235
-#define __efi_call_early(f, ...) \
236
- __efi_early()->call((unsigned long)f, __VA_ARGS__);
232
+/*
233
+ * The following macros allow translating arguments if necessary from native to
234
+ * mixed mode. The use case for this is to initialize the upper 32 bits of
235
+ * output parameters, and where the 32-bit method requires a 64-bit argument,
236
+ * which must be split up into two arguments to be thunked properly.
237
+ *
238
+ * As examples, the AllocatePool boot service returns the address of the
239
+ * allocation, but it will not set the high 32 bits of the address. To ensure
240
+ * that the full 64-bit address is initialized, we zero-init the address before
241
+ * calling the thunk.
242
+ *
243
+ * The FreePages boot service takes a 64-bit physical address even in 32-bit
244
+ * mode. For the thunk to work correctly, a native 64-bit call of
245
+ * free_pages(addr, size)
246
+ * must be translated to
247
+ * efi64_thunk(free_pages, addr & U32_MAX, addr >> 32, size)
248
+ * so that the two 32-bit halves of addr get pushed onto the stack separately.
249
+ */
237250
238
-#define efi_call_runtime(f, ...) \
239
- __efi_early()->call(efi_table_attr(efi_runtime_services, f, \
240
- __efi_early()->runtime_services), __VA_ARGS__)
251
+static inline void *efi64_zero_upper(void *p)
252
+{
253
+ ((u32 *)p)[1] = 0;
254
+ return p;
255
+}
256
+
257
+static inline u32 efi64_convert_status(efi_status_t status)
258
+{
259
+ return (u32)(status | (u64)status >> 32);
260
+}
261
+
262
+#define __efi64_argmap_free_pages(addr, size) \
263
+ ((addr), 0, (size))
264
+
265
+#define __efi64_argmap_get_memory_map(mm_size, mm, key, size, ver) \
266
+ ((mm_size), (mm), efi64_zero_upper(key), efi64_zero_upper(size), (ver))
267
+
268
+#define __efi64_argmap_allocate_pool(type, size, buffer) \
269
+ ((type), (size), efi64_zero_upper(buffer))
270
+
271
+#define __efi64_argmap_create_event(type, tpl, f, c, event) \
272
+ ((type), (tpl), (f), (c), efi64_zero_upper(event))
273
+
274
+#define __efi64_argmap_set_timer(event, type, time) \
275
+ ((event), (type), lower_32_bits(time), upper_32_bits(time))
276
+
277
+#define __efi64_argmap_wait_for_event(num, event, index) \
278
+ ((num), (event), efi64_zero_upper(index))
279
+
280
+#define __efi64_argmap_handle_protocol(handle, protocol, interface) \
281
+ ((handle), (protocol), efi64_zero_upper(interface))
282
+
283
+#define __efi64_argmap_locate_protocol(protocol, reg, interface) \
284
+ ((protocol), (reg), efi64_zero_upper(interface))
285
+
286
+#define __efi64_argmap_locate_device_path(protocol, path, handle) \
287
+ ((protocol), (path), efi64_zero_upper(handle))
288
+
289
+#define __efi64_argmap_exit(handle, status, size, data) \
290
+ ((handle), efi64_convert_status(status), (size), (data))
291
+
292
+/* PCI I/O */
293
+#define __efi64_argmap_get_location(protocol, seg, bus, dev, func) \
294
+ ((protocol), efi64_zero_upper(seg), efi64_zero_upper(bus), \
295
+ efi64_zero_upper(dev), efi64_zero_upper(func))
296
+
297
+/* LoadFile */
298
+#define __efi64_argmap_load_file(protocol, path, policy, bufsize, buf) \
299
+ ((protocol), (path), (policy), efi64_zero_upper(bufsize), (buf))
300
+
301
+/* Graphics Output Protocol */
302
+#define __efi64_argmap_query_mode(gop, mode, size, info) \
303
+ ((gop), (mode), efi64_zero_upper(size), efi64_zero_upper(info))
304
+
305
+/*
306
+ * The macros below handle the plumbing for the argument mapping. To add a
307
+ * mapping for a specific EFI method, simply define a macro
308
+ * __efi64_argmap_<method name>, following the examples above.
309
+ */
310
+
311
+#define __efi64_thunk_map(inst, func, ...) \
312
+ efi64_thunk(inst->mixed_mode.func, \
313
+ __efi64_argmap(__efi64_argmap_ ## func(__VA_ARGS__), \
314
+ (__VA_ARGS__)))
315
+
316
+#define __efi64_argmap(mapped, args) \
317
+ __PASTE(__efi64_argmap__, __efi_nargs(__efi_eat mapped))(mapped, args)
318
+#define __efi64_argmap__0(mapped, args) __efi_eval mapped
319
+#define __efi64_argmap__1(mapped, args) __efi_eval args
320
+
321
+#define __efi_eat(...)
322
+#define __efi_eval(...) __VA_ARGS__
323
+
324
+/* The three macros below handle dispatching via the thunk if needed */
325
+
326
+#define efi_call_proto(inst, func, ...) \
327
+ (efi_is_native() \
328
+ ? inst->func(inst, ##__VA_ARGS__) \
329
+ : __efi64_thunk_map(inst, func, inst, ##__VA_ARGS__))
330
+
331
+#define efi_bs_call(func, ...) \
332
+ (efi_is_native() \
333
+ ? efi_system_table->boottime->func(__VA_ARGS__) \
334
+ : __efi64_thunk_map(efi_table_attr(efi_system_table, \
335
+ boottime), \
336
+ func, __VA_ARGS__))
337
+
338
+#define efi_rt_call(func, ...) \
339
+ (efi_is_native() \
340
+ ? efi_system_table->runtime->func(__VA_ARGS__) \
341
+ : __efi64_thunk_map(efi_table_attr(efi_system_table, \
342
+ runtime), \
343
+ func, __VA_ARGS__))
344
+
345
+#else /* CONFIG_EFI_MIXED */
346
+
347
+static inline bool efi_is_64bit(void)
348
+{
349
+ return IS_ENABLED(CONFIG_X86_64);
350
+}
351
+
352
+#endif /* CONFIG_EFI_MIXED */
241353
242354 extern bool efi_reboot_required(void);
355
+extern bool efi_is_table_address(unsigned long phys_addr);
243356
357
+extern void efi_find_mirror(void);
358
+extern void efi_reserve_boot_services(void);
244359 #else
245360 static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
246361 static inline bool efi_reboot_required(void)
247362 {
248363 return false;
249364 }
365
+static inline bool efi_is_table_address(unsigned long phys_addr)
366
+{
367
+ return false;
368
+}
369
+static inline void efi_find_mirror(void)
370
+{
371
+}
372
+static inline void efi_reserve_boot_services(void)
373
+{
374
+}
250375 #endif /* CONFIG_EFI */
251376
377
+#ifdef CONFIG_EFI_FAKE_MEMMAP
378
+extern void __init efi_fake_memmap_early(void);
379
+#else
380
+static inline void efi_fake_memmap_early(void)
381
+{
382
+}
383
+#endif
384
+
252385 #endif /* _ASM_X86_EFI_H */