hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/arch/powerpc/kernel/kvm.c
....@@ -1,22 +1,10 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
34 * Copyright 2010-2011 Freescale Semiconductor, Inc.
45 *
56 * Authors:
67 * Alexander Graf <agraf@suse.de>
7
- *
8
- * This program is free software; you can redistribute it and/or modify
9
- * it under the terms of the GNU General Public License, version 2, as
10
- * published by the Free Software Foundation.
11
- *
12
- * This program is distributed in the hope that it will be useful,
13
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15
- * GNU General Public License for more details.
16
- *
17
- * You should have received a copy of the GNU General Public License
18
- * along with this program; if not, write to the Free Software
19
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
208 */
219
2210 #include <linux/kvm_host.h>
....@@ -76,16 +64,17 @@
7664 #define KVM_INST_MTSRIN 0x7c0001e4
7765
7866 static bool kvm_patching_worked = true;
79
-char kvm_tmp[1024 * 1024];
67
+extern char kvm_tmp[];
68
+extern char kvm_tmp_end[];
8069 static int kvm_tmp_index;
8170
82
-static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
71
+static void __init kvm_patch_ins(u32 *inst, u32 new_inst)
8372 {
8473 *inst = new_inst;
8574 flush_icache_range((ulong)inst, (ulong)inst + 4);
8675 }
8776
88
-static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
77
+static void __init kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
8978 {
9079 #ifdef CONFIG_64BIT
9180 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
....@@ -94,7 +83,7 @@
9483 #endif
9584 }
9685
97
-static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
86
+static void __init kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
9887 {
9988 #ifdef CONFIG_64BIT
10089 kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
....@@ -103,12 +92,12 @@
10392 #endif
10493 }
10594
106
-static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
95
+static void __init kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
10796 {
10897 kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
10998 }
11099
111
-static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
100
+static void __init kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
112101 {
113102 #ifdef CONFIG_64BIT
114103 kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
....@@ -117,17 +106,17 @@
117106 #endif
118107 }
119108
120
-static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
109
+static void __init kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
121110 {
122111 kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
123112 }
124113
125
-static void kvm_patch_ins_nop(u32 *inst)
114
+static void __init kvm_patch_ins_nop(u32 *inst)
126115 {
127116 kvm_patch_ins(inst, KVM_INST_NOP);
128117 }
129118
130
-static void kvm_patch_ins_b(u32 *inst, int addr)
119
+static void __init kvm_patch_ins_b(u32 *inst, int addr)
131120 {
132121 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
133122 /* On relocatable kernels interrupts handlers and our code
....@@ -140,11 +129,11 @@
140129 kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
141130 }
142131
143
-static u32 *kvm_alloc(int len)
132
+static u32 * __init kvm_alloc(int len)
144133 {
145134 u32 *p;
146135
147
- if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
136
+ if ((kvm_tmp_index + len) > (kvm_tmp_end - kvm_tmp)) {
148137 printk(KERN_ERR "KVM: No more space (%d + %d)\n",
149138 kvm_tmp_index, len);
150139 kvm_patching_worked = false;
....@@ -163,7 +152,7 @@
163152 extern u32 kvm_emulate_mtmsrd_len;
164153 extern u32 kvm_emulate_mtmsrd[];
165154
166
-static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
155
+static void __init kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
167156 {
168157 u32 *p;
169158 int distance_start;
....@@ -216,7 +205,7 @@
216205 extern u32 kvm_emulate_mtmsr_len;
217206 extern u32 kvm_emulate_mtmsr[];
218207
219
-static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
208
+static void __init kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
220209 {
221210 u32 *p;
222211 int distance_start;
....@@ -277,7 +266,7 @@
277266 extern u32 kvm_emulate_wrtee_len;
278267 extern u32 kvm_emulate_wrtee[];
279268
280
-static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
269
+static void __init kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
281270 {
282271 u32 *p;
283272 int distance_start;
....@@ -334,7 +323,7 @@
334323 extern u32 kvm_emulate_wrteei_0_len;
335324 extern u32 kvm_emulate_wrteei_0[];
336325
337
-static void kvm_patch_ins_wrteei_0(u32 *inst)
326
+static void __init kvm_patch_ins_wrteei_0(u32 *inst)
338327 {
339328 u32 *p;
340329 int distance_start;
....@@ -375,7 +364,7 @@
375364 extern u32 kvm_emulate_mtsrin_len;
376365 extern u32 kvm_emulate_mtsrin[];
377366
378
-static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
367
+static void __init kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
379368 {
380369 u32 *p;
381370 int distance_start;
....@@ -411,7 +400,7 @@
411400
412401 #endif
413402
414
-static void kvm_map_magic_page(void *data)
403
+static void __init kvm_map_magic_page(void *data)
415404 {
416405 u32 *features = data;
417406
....@@ -426,7 +415,7 @@
426415 *features = out[0];
427416 }
428417
429
-static void kvm_check_ins(u32 *inst, u32 features)
418
+static void __init kvm_check_ins(u32 *inst, u32 features)
430419 {
431420 u32 _inst = *inst;
432421 u32 inst_no_rt = _inst & ~KVM_MASK_RT;
....@@ -670,7 +659,7 @@
670659 extern u32 kvm_template_start[];
671660 extern u32 kvm_template_end[];
672661
673
-static void kvm_use_magic_page(void)
662
+static void __init kvm_use_magic_page(void)
674663 {
675664 u32 *p;
676665 u32 *start, *end;
....@@ -680,7 +669,7 @@
680669 on_each_cpu(kvm_map_magic_page, &features, 1);
681670
682671 /* Quick self-test to see if the mapping works */
683
- if (!fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) {
672
+ if (fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) {
684673 kvm_patching_worked = false;
685674 return;
686675 }
....@@ -711,25 +700,13 @@
711700 kvm_patching_worked ? "worked" : "failed");
712701 }
713702
714
-static __init void kvm_free_tmp(void)
715
-{
716
- /*
717
- * Inform kmemleak about the hole in the .bss section since the
718
- * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
719
- */
720
- kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
721
- ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
722
- free_reserved_area(&kvm_tmp[kvm_tmp_index],
723
- &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
724
-}
725
-
726703 static int __init kvm_guest_init(void)
727704 {
728705 if (!kvm_para_available())
729
- goto free_tmp;
706
+ return 0;
730707
731708 if (!epapr_paravirt_enabled)
732
- goto free_tmp;
709
+ return 0;
733710
734711 if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
735712 kvm_use_magic_page();
....@@ -738,9 +715,6 @@
738715 /* Enable napping */
739716 powersave_nap = 1;
740717 #endif
741
-
742
-free_tmp:
743
- kvm_free_tmp();
744718
745719 return 0;
746720 }