hc
2024-01-04 1543e317f1da31b75942316931e8f491a8920811
kernel/arch/x86/platform/efi/efi_64.c
....@@ -23,7 +23,7 @@
2323 #include <linux/mm.h>
2424 #include <linux/types.h>
2525 #include <linux/spinlock.h>
26
-#include <linux/bootmem.h>
26
+#include <linux/memblock.h>
2727 #include <linux/ioport.h>
2828 #include <linux/mc146818rtc.h>
2929 #include <linux/efi.h>
....@@ -39,7 +39,6 @@
3939 #include <asm/setup.h>
4040 #include <asm/page.h>
4141 #include <asm/e820/api.h>
42
-#include <asm/pgtable.h>
4342 #include <asm/tlbflush.h>
4443 #include <asm/proto.h>
4544 #include <asm/efi.h>
....@@ -48,6 +47,7 @@
4847 #include <asm/realmode.h>
4948 #include <asm/time.h>
5049 #include <asm/pgalloc.h>
50
+#include <asm/sev-es.h>
5151
5252 /*
5353 * We allocate runtime services regions top-down, starting from -4G, i.e.
....@@ -56,139 +56,6 @@
5656 static u64 efi_va = EFI_VA_START;
5757
5858 struct efi_scratch efi_scratch;
59
-
60
-static void __init early_code_mapping_set_exec(int executable)
61
-{
62
- efi_memory_desc_t *md;
63
-
64
- if (!(__supported_pte_mask & _PAGE_NX))
65
- return;
66
-
67
- /* Make EFI service code area executable */
68
- for_each_efi_memory_desc(md) {
69
- if (md->type == EFI_RUNTIME_SERVICES_CODE ||
70
- md->type == EFI_BOOT_SERVICES_CODE)
71
- efi_set_executable(md, executable);
72
- }
73
-}
74
-
75
-pgd_t * __init efi_call_phys_prolog(void)
76
-{
77
- unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
78
- pgd_t *save_pgd, *pgd_k, *pgd_efi;
79
- p4d_t *p4d, *p4d_k, *p4d_efi;
80
- pud_t *pud;
81
-
82
- int pgd;
83
- int n_pgds, i, j;
84
-
85
- if (!efi_enabled(EFI_OLD_MEMMAP)) {
86
- efi_switch_mm(&efi_mm);
87
- return NULL;
88
- }
89
-
90
- early_code_mapping_set_exec(1);
91
-
92
- n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
93
- save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
94
-
95
- /*
96
- * Build 1:1 identity mapping for efi=old_map usage. Note that
97
- * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
98
- * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
99
- * address X, the pud_index(X) != pud_index(__va(X)), we can only copy
100
- * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
101
- * This means here we can only reuse the PMD tables of the direct mapping.
102
- */
103
- for (pgd = 0; pgd < n_pgds; pgd++) {
104
- addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
105
- vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
106
- pgd_efi = pgd_offset_k(addr_pgd);
107
- save_pgd[pgd] = *pgd_efi;
108
-
109
- p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
110
- if (!p4d) {
111
- pr_err("Failed to allocate p4d table!\n");
112
- goto out;
113
- }
114
-
115
- for (i = 0; i < PTRS_PER_P4D; i++) {
116
- addr_p4d = addr_pgd + i * P4D_SIZE;
117
- p4d_efi = p4d + p4d_index(addr_p4d);
118
-
119
- pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
120
- if (!pud) {
121
- pr_err("Failed to allocate pud table!\n");
122
- goto out;
123
- }
124
-
125
- for (j = 0; j < PTRS_PER_PUD; j++) {
126
- addr_pud = addr_p4d + j * PUD_SIZE;
127
-
128
- if (addr_pud > (max_pfn << PAGE_SHIFT))
129
- break;
130
-
131
- vaddr = (unsigned long)__va(addr_pud);
132
-
133
- pgd_k = pgd_offset_k(vaddr);
134
- p4d_k = p4d_offset(pgd_k, vaddr);
135
- pud[j] = *pud_offset(p4d_k, vaddr);
136
- }
137
- }
138
- pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX;
139
- }
140
-
141
-out:
142
- __flush_tlb_all();
143
-
144
- return save_pgd;
145
-}
146
-
147
-void __init efi_call_phys_epilog(pgd_t *save_pgd)
148
-{
149
- /*
150
- * After the lock is released, the original page table is restored.
151
- */
152
- int pgd_idx, i;
153
- int nr_pgds;
154
- pgd_t *pgd;
155
- p4d_t *p4d;
156
- pud_t *pud;
157
-
158
- if (!efi_enabled(EFI_OLD_MEMMAP)) {
159
- efi_switch_mm(efi_scratch.prev_mm);
160
- return;
161
- }
162
-
163
- nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
164
-
165
- for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
166
- pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
167
- set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
168
-
169
- if (!pgd_present(*pgd))
170
- continue;
171
-
172
- for (i = 0; i < PTRS_PER_P4D; i++) {
173
- p4d = p4d_offset(pgd,
174
- pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
175
-
176
- if (!p4d_present(*p4d))
177
- continue;
178
-
179
- pud = (pud_t *)p4d_page_vaddr(*p4d);
180
- pud_free(&init_mm, pud);
181
- }
182
-
183
- p4d = (p4d_t *)pgd_page_vaddr(*pgd);
184
- p4d_free(&init_mm, p4d);
185
- }
186
-
187
- kfree(save_pgd);
188
-
189
- __flush_tlb_all();
190
- early_code_mapping_set_exec(0);
191
-}
19259
19360 EXPORT_SYMBOL_GPL(efi_mm);
19461
....@@ -207,9 +74,6 @@
20774 p4d_t *p4d;
20875 pud_t *pud;
20976 gfp_t gfp_mask;
210
-
211
- if (efi_enabled(EFI_OLD_MEMMAP))
212
- return 0;
21377
21478 gfp_mask = GFP_KERNEL | __GFP_ZERO;
21579 efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
....@@ -251,33 +115,11 @@
251115 pud_t *pud_k, *pud_efi;
252116 pgd_t *efi_pgd = efi_mm.pgd;
253117
254
- if (efi_enabled(EFI_OLD_MEMMAP))
255
- return;
256
-
257
- /*
258
- * We can share all PGD entries apart from the one entry that
259
- * covers the EFI runtime mapping space.
260
- *
261
- * Make sure the EFI runtime region mappings are guaranteed to
262
- * only span a single PGD entry and that the entry also maps
263
- * other important kernel regions.
264
- */
265
- MAYBE_BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
266
- MAYBE_BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
267
- (EFI_VA_END & PGDIR_MASK));
268
-
269118 pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
270119 pgd_k = pgd_offset_k(PAGE_OFFSET);
271120
272121 num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
273122 memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
274
-
275
- /*
276
- * As with PGDs, we share all P4D entries apart from the one entry
277
- * that covers the EFI runtime mapping space.
278
- */
279
- BUILD_BUG_ON(p4d_index(EFI_VA_END) != p4d_index(MODULES_END));
280
- BUILD_BUG_ON((EFI_VA_START & P4D_MASK) != (EFI_VA_END & P4D_MASK));
281123
282124 pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
283125 pgd_k = pgd_offset_k(EFI_VA_END);
....@@ -337,13 +179,10 @@
337179
338180 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
339181 {
340
- unsigned long pfn, text, pf;
182
+ unsigned long pfn, text, pf, rodata;
341183 struct page *page;
342184 unsigned npages;
343185 pgd_t *pgd = efi_mm.pgd;
344
-
345
- if (efi_enabled(EFI_OLD_MEMMAP))
346
- return 0;
347186
348187 /*
349188 * It can happen that the physical address of new_memmap lands in memory
....@@ -369,12 +208,17 @@
369208 * as trim_bios_range() will reserve the first page and isolate it away
370209 * from memory allocators anyway.
371210 */
372
- pf = _PAGE_RW;
373
- if (sev_active())
374
- pf |= _PAGE_ENC;
375
-
376211 if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) {
377212 pr_err("Failed to create 1:1 mapping for the first page!\n");
213
+ return 1;
214
+ }
215
+
216
+ /*
217
+ * When SEV-ES is active, the GHCB as set by the kernel will be used
218
+ * by firmware. Create a 1:1 unencrypted mapping for each GHCB.
219
+ */
220
+ if (sev_es_efi_map_ghcbs(pgd)) {
221
+ pr_err("Failed to create 1:1 mapping for the GHCBs!\n");
378222 return 1;
379223 }
380224
....@@ -384,7 +228,7 @@
384228 * text and allocate a new stack because we can't rely on the
385229 * stack pointer being < 4GB.
386230 */
387
- if (!IS_ENABLED(CONFIG_EFI_MIXED) || efi_is_native())
231
+ if (!efi_is_mixed())
388232 return 0;
389233
390234 page = alloc_page(GFP_KERNEL|__GFP_DMA32);
....@@ -399,9 +243,19 @@
399243 text = __pa(_text);
400244 pfn = text >> PAGE_SHIFT;
401245
402
- pf = _PAGE_RW | _PAGE_ENC;
246
+ pf = _PAGE_ENC;
403247 if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, pf)) {
404248 pr_err("Failed to map kernel text 1:1\n");
249
+ return 1;
250
+ }
251
+
252
+ npages = (__end_rodata - __start_rodata) >> PAGE_SHIFT;
253
+ rodata = __pa(__start_rodata);
254
+ pfn = rodata >> PAGE_SHIFT;
255
+
256
+ pf = _PAGE_NX | _PAGE_ENC;
257
+ if (kernel_map_pages_in_pgd(pgd, pfn, rodata, npages, pf)) {
258
+ pr_err("Failed to map kernel rodata 1:1\n");
405259 return 1;
406260 }
407261
....@@ -413,6 +267,22 @@
413267 unsigned long flags = _PAGE_RW;
414268 unsigned long pfn;
415269 pgd_t *pgd = efi_mm.pgd;
270
+
271
+ /*
272
+ * EFI_RUNTIME_SERVICES_CODE regions typically cover PE/COFF
273
+ * executable images in memory that consist of both R-X and
274
+ * RW- sections, so we cannot apply read-only or non-exec
275
+ * permissions just yet. However, modern EFI systems provide
276
+ * a memory attributes table that describes those sections
277
+ * with the appropriate restricted permissions, which are
278
+ * applied in efi_runtime_update_mappings() below. All other
279
+ * regions can be mapped non-executable at this point, with
280
+ * the exception of boot services code regions, but those will
281
+ * be unmapped again entirely in efi_free_boot_services().
282
+ */
283
+ if (md->type != EFI_BOOT_SERVICES_CODE &&
284
+ md->type != EFI_RUNTIME_SERVICES_CODE)
285
+ flags |= _PAGE_NX;
416286
417287 if (!(md->attribute & EFI_MEMORY_WB))
418288 flags |= _PAGE_PCD;
....@@ -431,9 +301,6 @@
431301 unsigned long size = md->num_pages << PAGE_SHIFT;
432302 u64 pa = md->phys_addr;
433303
434
- if (efi_enabled(EFI_OLD_MEMMAP))
435
- return old_map_region(md);
436
-
437304 /*
438305 * Make sure the 1:1 mappings are present as a catch-all for b0rked
439306 * firmware which doesn't update all internal pointers after switching
....@@ -446,7 +313,7 @@
446313 * booting in EFI mixed mode, because even though we may be
447314 * running a 64-bit kernel, the firmware may only be 32-bit.
448315 */
449
- if (!efi_is_native () && IS_ENABLED(CONFIG_EFI_MIXED)) {
316
+ if (efi_is_mixed()) {
450317 md->virt_addr = md->phys_addr;
451318 return;
452319 }
....@@ -486,26 +353,6 @@
486353 {
487354 __map_region(md, md->phys_addr);
488355 __map_region(md, md->virt_addr);
489
-}
490
-
491
-void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
492
- u32 type, u64 attribute)
493
-{
494
- unsigned long last_map_pfn;
495
-
496
- if (type == EFI_MEMORY_MAPPED_IO)
497
- return ioremap(phys_addr, size);
498
-
499
- last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
500
- if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
501
- unsigned long top = last_map_pfn << PAGE_SHIFT;
502
- efi_ioremap(top, size - (top - phys_addr), type, attribute);
503
- }
504
-
505
- if (!(attribute & EFI_MEMORY_WB))
506
- efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
507
-
508
- return (void __iomem *)__va(phys_addr);
509356 }
510357
511358 void __init parse_efi_setup(u64 phys_addr, u32 data_len)
....@@ -556,12 +403,6 @@
556403 {
557404 efi_memory_desc_t *md;
558405
559
- if (efi_enabled(EFI_OLD_MEMMAP)) {
560
- if (__supported_pte_mask & _PAGE_NX)
561
- runtime_code_page_mkexec();
562
- return;
563
- }
564
-
565406 /*
566407 * Use the EFI Memory Attribute Table for mapping permissions if it
567408 * exists, since it is intended to supersede EFI_PROPERTIES_TABLE.
....@@ -610,10 +451,7 @@
610451 void __init efi_dump_pagetable(void)
611452 {
612453 #ifdef CONFIG_EFI_PGT_DUMP
613
- if (efi_enabled(EFI_OLD_MEMMAP))
614
- ptdump_walk_pgd_level(NULL, swapper_pg_dir);
615
- else
616
- ptdump_walk_pgd_level(NULL, efi_mm.pgd);
454
+ ptdump_walk_pgd_level(NULL, &efi_mm);
617455 #endif
618456 }
619457
....@@ -631,63 +469,71 @@
631469 switch_mm(efi_scratch.prev_mm, mm, NULL);
632470 }
633471
634
-#ifdef CONFIG_EFI_MIXED
635
-extern efi_status_t efi64_thunk(u32, ...);
636
-
637472 static DEFINE_SPINLOCK(efi_runtime_lock);
638473
639
-#define runtime_service32(func) \
640
-({ \
641
- u32 table = (u32)(unsigned long)efi.systab; \
642
- u32 *rt, *___f; \
643
- \
644
- rt = (u32 *)(table + offsetof(efi_system_table_32_t, runtime)); \
645
- ___f = (u32 *)(*rt + offsetof(efi_runtime_services_32_t, func)); \
646
- *___f; \
474
+/*
475
+ * DS and ES contain user values. We need to save them.
476
+ * The 32-bit EFI code needs a valid DS, ES, and SS. There's no
477
+ * need to save the old SS: __KERNEL_DS is always acceptable.
478
+ */
479
+#define __efi_thunk(func, ...) \
480
+({ \
481
+ unsigned short __ds, __es; \
482
+ efi_status_t ____s; \
483
+ \
484
+ savesegment(ds, __ds); \
485
+ savesegment(es, __es); \
486
+ \
487
+ loadsegment(ss, __KERNEL_DS); \
488
+ loadsegment(ds, __KERNEL_DS); \
489
+ loadsegment(es, __KERNEL_DS); \
490
+ \
491
+ ____s = efi64_thunk(efi.runtime->mixed_mode.func, __VA_ARGS__); \
492
+ \
493
+ loadsegment(ds, __ds); \
494
+ loadsegment(es, __es); \
495
+ \
496
+ ____s ^= (____s & BIT(31)) | (____s & BIT_ULL(31)) << 32; \
497
+ ____s; \
647498 })
648499
649500 /*
650501 * Switch to the EFI page tables early so that we can access the 1:1
651502 * runtime services mappings which are not mapped in any other page
652
- * tables. This function must be called before runtime_service32().
503
+ * tables.
653504 *
654505 * Also, disable interrupts because the IDT points to 64-bit handlers,
655506 * which aren't going to function correctly when we switch to 32-bit.
656507 */
657
-#define efi_thunk(f, ...) \
508
+#define efi_thunk(func...) \
658509 ({ \
659510 efi_status_t __s; \
660
- u32 __func; \
661511 \
662512 arch_efi_call_virt_setup(); \
663513 \
664
- __func = runtime_service32(f); \
665
- __s = efi64_thunk(__func, __VA_ARGS__); \
514
+ __s = __efi_thunk(func); \
666515 \
667516 arch_efi_call_virt_teardown(); \
668517 \
669518 __s; \
670519 })
671520
672
-efi_status_t efi_thunk_set_virtual_address_map(
673
- void *phys_set_virtual_address_map,
674
- unsigned long memory_map_size,
675
- unsigned long descriptor_size,
676
- u32 descriptor_version,
677
- efi_memory_desc_t *virtual_map)
521
+static efi_status_t __init __no_sanitize_address
522
+efi_thunk_set_virtual_address_map(unsigned long memory_map_size,
523
+ unsigned long descriptor_size,
524
+ u32 descriptor_version,
525
+ efi_memory_desc_t *virtual_map)
678526 {
679527 efi_status_t status;
680528 unsigned long flags;
681
- u32 func;
682529
683530 efi_sync_low_kernel_mappings();
684531 local_irq_save(flags);
685532
686533 efi_switch_mm(&efi_mm);
687534
688
- func = (u32)(unsigned long)phys_set_virtual_address_map;
689
- status = efi64_thunk(func, memory_map_size, descriptor_size,
690
- descriptor_version, virtual_map);
535
+ status = __efi_thunk(set_virtual_address_map, memory_map_size,
536
+ descriptor_size, descriptor_version, virtual_map);
691537
692538 efi_switch_mm(efi_scratch.prev_mm);
693539 local_irq_restore(flags);
....@@ -697,85 +543,25 @@
697543
698544 static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
699545 {
700
- efi_status_t status;
701
- u32 phys_tm, phys_tc;
702
- unsigned long flags;
703
-
704
- spin_lock(&rtc_lock);
705
- spin_lock_irqsave(&efi_runtime_lock, flags);
706
-
707
- phys_tm = virt_to_phys_or_null(tm);
708
- phys_tc = virt_to_phys_or_null(tc);
709
-
710
- status = efi_thunk(get_time, phys_tm, phys_tc);
711
-
712
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
713
- spin_unlock(&rtc_lock);
714
-
715
- return status;
546
+ return EFI_UNSUPPORTED;
716547 }
717548
718549 static efi_status_t efi_thunk_set_time(efi_time_t *tm)
719550 {
720
- efi_status_t status;
721
- u32 phys_tm;
722
- unsigned long flags;
723
-
724
- spin_lock(&rtc_lock);
725
- spin_lock_irqsave(&efi_runtime_lock, flags);
726
-
727
- phys_tm = virt_to_phys_or_null(tm);
728
-
729
- status = efi_thunk(set_time, phys_tm);
730
-
731
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
732
- spin_unlock(&rtc_lock);
733
-
734
- return status;
551
+ return EFI_UNSUPPORTED;
735552 }
736553
737554 static efi_status_t
738555 efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
739556 efi_time_t *tm)
740557 {
741
- efi_status_t status;
742
- u32 phys_enabled, phys_pending, phys_tm;
743
- unsigned long flags;
744
-
745
- spin_lock(&rtc_lock);
746
- spin_lock_irqsave(&efi_runtime_lock, flags);
747
-
748
- phys_enabled = virt_to_phys_or_null(enabled);
749
- phys_pending = virt_to_phys_or_null(pending);
750
- phys_tm = virt_to_phys_or_null(tm);
751
-
752
- status = efi_thunk(get_wakeup_time, phys_enabled,
753
- phys_pending, phys_tm);
754
-
755
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
756
- spin_unlock(&rtc_lock);
757
-
758
- return status;
558
+ return EFI_UNSUPPORTED;
759559 }
760560
761561 static efi_status_t
762562 efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
763563 {
764
- efi_status_t status;
765
- u32 phys_tm;
766
- unsigned long flags;
767
-
768
- spin_lock(&rtc_lock);
769
- spin_lock_irqsave(&efi_runtime_lock, flags);
770
-
771
- phys_tm = virt_to_phys_or_null(tm);
772
-
773
- status = efi_thunk(set_wakeup_time, enabled, phys_tm);
774
-
775
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
776
- spin_unlock(&rtc_lock);
777
-
778
- return status;
564
+ return EFI_UNSUPPORTED;
779565 }
780566
781567 static unsigned long efi_name_size(efi_char16_t *name)
....@@ -909,18 +695,7 @@
909695 static efi_status_t
910696 efi_thunk_get_next_high_mono_count(u32 *count)
911697 {
912
- efi_status_t status;
913
- u32 phys_count;
914
- unsigned long flags;
915
-
916
- spin_lock_irqsave(&efi_runtime_lock, flags);
917
-
918
- phys_count = virt_to_phys_or_null(count);
919
- status = efi_thunk(get_next_high_mono_count, phys_count);
920
-
921
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
922
-
923
- return status;
698
+ return EFI_UNSUPPORTED;
924699 }
925700
926701 static void
....@@ -1017,8 +792,11 @@
1017792 return EFI_UNSUPPORTED;
1018793 }
1019794
1020
-void efi_thunk_runtime_setup(void)
795
+void __init efi_thunk_runtime_setup(void)
1021796 {
797
+ if (!IS_ENABLED(CONFIG_EFI_MIXED))
798
+ return;
799
+
1022800 efi.get_time = efi_thunk_get_time;
1023801 efi.set_time = efi_thunk_set_time;
1024802 efi.get_wakeup_time = efi_thunk_get_wakeup_time;
....@@ -1034,4 +812,40 @@
1034812 efi.update_capsule = efi_thunk_update_capsule;
1035813 efi.query_capsule_caps = efi_thunk_query_capsule_caps;
1036814 }
1037
-#endif /* CONFIG_EFI_MIXED */
815
+
816
+efi_status_t __init __no_sanitize_address
817
+efi_set_virtual_address_map(unsigned long memory_map_size,
818
+ unsigned long descriptor_size,
819
+ u32 descriptor_version,
820
+ efi_memory_desc_t *virtual_map,
821
+ unsigned long systab_phys)
822
+{
823
+ const efi_system_table_t *systab = (efi_system_table_t *)systab_phys;
824
+ efi_status_t status;
825
+ unsigned long flags;
826
+
827
+ if (efi_is_mixed())
828
+ return efi_thunk_set_virtual_address_map(memory_map_size,
829
+ descriptor_size,
830
+ descriptor_version,
831
+ virtual_map);
832
+ efi_switch_mm(&efi_mm);
833
+
834
+ kernel_fpu_begin();
835
+
836
+ /* Disable interrupts around EFI calls: */
837
+ local_irq_save(flags);
838
+ status = efi_call(efi.runtime->set_virtual_address_map,
839
+ memory_map_size, descriptor_size,
840
+ descriptor_version, virtual_map);
841
+ local_irq_restore(flags);
842
+
843
+ kernel_fpu_end();
844
+
845
+ /* grab the virtually remapped EFI runtime services table pointer */
846
+ efi.runtime = READ_ONCE(systab->runtime);
847
+
848
+ efi_switch_mm(efi_scratch.prev_mm);
849
+
850
+ return status;
851
+}