.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * kexec.c - kexec system call core code. |
---|
3 | 4 | * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com> |
---|
4 | | - * |
---|
5 | | - * This source code is licensed under the GNU General Public License, |
---|
6 | | - * Version 2. See the file COPYING for more details. |
---|
7 | 5 | */ |
---|
8 | 6 | |
---|
9 | 7 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
---|
.. | .. |
---|
38 | 36 | #include <linux/syscore_ops.h> |
---|
39 | 37 | #include <linux/compiler.h> |
---|
40 | 38 | #include <linux/hugetlb.h> |
---|
41 | | -#include <linux/frame.h> |
---|
| 39 | +#include <linux/objtool.h> |
---|
42 | 40 | |
---|
43 | 41 | #include <asm/page.h> |
---|
44 | 42 | #include <asm/sections.h> |
---|
.. | .. |
---|
47 | 45 | #include <crypto/sha.h> |
---|
48 | 46 | #include "kexec_internal.h" |
---|
49 | 47 | |
---|
50 | | -DEFINE_MUTEX(kexec_mutex); |
---|
| 48 | +atomic_t __kexec_lock = ATOMIC_INIT(0); |
---|
51 | 49 | |
---|
52 | 50 | /* Per cpu memory for storing cpu states in case of system crash. */ |
---|
53 | 51 | note_buf_t __percpu *crash_notes; |
---|
.. | .. |
---|
111 | 109 | * defined more restrictively in <asm/kexec.h>. |
---|
112 | 110 | * |
---|
113 | 111 | * The code for the transition from the current kernel to the |
---|
114 | | - * the new kernel is placed in the control_code_buffer, whose size |
---|
| 112 | + * new kernel is placed in the control_code_buffer, whose size |
---|
115 | 113 | * is given by KEXEC_CONTROL_PAGE_SIZE. In the best case only a single |
---|
116 | 114 | * page of memory is necessary, but some architectures require more. |
---|
117 | 115 | * Because this memory must be identity mapped in the transition from |
---|
.. | .. |
---|
152 | 150 | int i; |
---|
153 | 151 | unsigned long nr_segments = image->nr_segments; |
---|
154 | 152 | unsigned long total_pages = 0; |
---|
| 153 | + unsigned long nr_pages = totalram_pages(); |
---|
155 | 154 | |
---|
156 | 155 | /* |
---|
157 | 156 | * Verify we have good destination addresses. The caller is |
---|
.. | .. |
---|
217 | 216 | * wasted allocating pages, which can cause a soft lockup. |
---|
218 | 217 | */ |
---|
219 | 218 | for (i = 0; i < nr_segments; i++) { |
---|
220 | | - if (PAGE_COUNT(image->segment[i].memsz) > totalram_pages / 2) |
---|
| 219 | + if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2) |
---|
221 | 220 | return -EINVAL; |
---|
222 | 221 | |
---|
223 | 222 | total_pages += PAGE_COUNT(image->segment[i].memsz); |
---|
224 | 223 | } |
---|
225 | 224 | |
---|
226 | | - if (total_pages > totalram_pages / 2) |
---|
| 225 | + if (total_pages > nr_pages / 2) |
---|
227 | 226 | return -EINVAL; |
---|
228 | 227 | |
---|
229 | 228 | /* |
---|
.. | .. |
---|
590 | 589 | kimage_free_page_list(&image->unusable_pages); |
---|
591 | 590 | |
---|
592 | 591 | } |
---|
| 592 | + |
---|
| 593 | +int __weak machine_kexec_post_load(struct kimage *image) |
---|
| 594 | +{ |
---|
| 595 | + return 0; |
---|
| 596 | +} |
---|
| 597 | + |
---|
593 | 598 | void kimage_terminate(struct kimage *image) |
---|
594 | 599 | { |
---|
595 | 600 | if (*image->entry != 0) |
---|
.. | .. |
---|
938 | 943 | */ |
---|
939 | 944 | void __noclone __crash_kexec(struct pt_regs *regs) |
---|
940 | 945 | { |
---|
941 | | - /* Take the kexec_mutex here to prevent sys_kexec_load |
---|
| 946 | + /* Take the kexec_lock here to prevent sys_kexec_load |
---|
942 | 947 | * running on one cpu from replacing the crash kernel |
---|
943 | 948 | * we are using after a panic on a different cpu. |
---|
944 | 949 | * |
---|
.. | .. |
---|
946 | 951 | * of memory the xchg(&kexec_crash_image) would be |
---|
947 | 952 | * sufficient. But since I reuse the memory... |
---|
948 | 953 | */ |
---|
949 | | - if (mutex_trylock(&kexec_mutex)) { |
---|
| 954 | + if (kexec_trylock()) { |
---|
950 | 955 | if (kexec_crash_image) { |
---|
951 | 956 | struct pt_regs fixed_regs; |
---|
952 | 957 | |
---|
.. | .. |
---|
955 | 960 | machine_crash_shutdown(&fixed_regs); |
---|
956 | 961 | machine_kexec(kexec_crash_image); |
---|
957 | 962 | } |
---|
958 | | - mutex_unlock(&kexec_mutex); |
---|
| 963 | + kexec_unlock(); |
---|
959 | 964 | } |
---|
960 | 965 | } |
---|
961 | 966 | STACK_FRAME_NON_STANDARD(__crash_kexec); |
---|
.. | .. |
---|
984 | 989 | } |
---|
985 | 990 | } |
---|
986 | 991 | |
---|
987 | | -size_t crash_get_memory_size(void) |
---|
| 992 | +ssize_t crash_get_memory_size(void) |
---|
988 | 993 | { |
---|
989 | | - size_t size = 0; |
---|
| 994 | + ssize_t size = 0; |
---|
990 | 995 | |
---|
991 | | - mutex_lock(&kexec_mutex); |
---|
| 996 | + if (!kexec_trylock()) |
---|
| 997 | + return -EBUSY; |
---|
| 998 | + |
---|
992 | 999 | if (crashk_res.end != crashk_res.start) |
---|
993 | 1000 | size = resource_size(&crashk_res); |
---|
994 | | - mutex_unlock(&kexec_mutex); |
---|
| 1001 | + |
---|
| 1002 | + kexec_unlock(); |
---|
995 | 1003 | return size; |
---|
996 | 1004 | } |
---|
997 | 1005 | |
---|
.. | .. |
---|
1011 | 1019 | unsigned long old_size; |
---|
1012 | 1020 | struct resource *ram_res; |
---|
1013 | 1021 | |
---|
1014 | | - mutex_lock(&kexec_mutex); |
---|
| 1022 | + if (!kexec_trylock()) |
---|
| 1023 | + return -EBUSY; |
---|
1015 | 1024 | |
---|
1016 | 1025 | if (kexec_crash_image) { |
---|
1017 | 1026 | ret = -ENOENT; |
---|
.. | .. |
---|
1020 | 1029 | start = crashk_res.start; |
---|
1021 | 1030 | end = crashk_res.end; |
---|
1022 | 1031 | old_size = (end == 0) ? 0 : end - start + 1; |
---|
| 1032 | + new_size = roundup(new_size, KEXEC_CRASH_MEM_ALIGN); |
---|
1023 | 1033 | if (new_size >= old_size) { |
---|
1024 | 1034 | ret = (new_size == old_size) ? 0 : -EINVAL; |
---|
1025 | 1035 | goto unlock; |
---|
.. | .. |
---|
1031 | 1041 | goto unlock; |
---|
1032 | 1042 | } |
---|
1033 | 1043 | |
---|
1034 | | - start = roundup(start, KEXEC_CRASH_MEM_ALIGN); |
---|
1035 | | - end = roundup(start + new_size, KEXEC_CRASH_MEM_ALIGN); |
---|
1036 | | - |
---|
| 1044 | + end = start + new_size; |
---|
1037 | 1045 | crash_free_reserved_phys_range(end, crashk_res.end); |
---|
1038 | 1046 | |
---|
1039 | 1047 | if ((start == end) && (crashk_res.parent != NULL)) |
---|
.. | .. |
---|
1049 | 1057 | insert_resource(&iomem_resource, ram_res); |
---|
1050 | 1058 | |
---|
1051 | 1059 | unlock: |
---|
1052 | | - mutex_unlock(&kexec_mutex); |
---|
| 1060 | + kexec_unlock(); |
---|
1053 | 1061 | return ret; |
---|
1054 | 1062 | } |
---|
1055 | 1063 | |
---|
.. | .. |
---|
1121 | 1129 | { |
---|
1122 | 1130 | int error = 0; |
---|
1123 | 1131 | |
---|
1124 | | - if (!mutex_trylock(&kexec_mutex)) |
---|
| 1132 | + if (!kexec_trylock()) |
---|
1125 | 1133 | return -EBUSY; |
---|
1126 | 1134 | if (!kexec_image) { |
---|
1127 | 1135 | error = -EINVAL; |
---|
.. | .. |
---|
1150 | 1158 | error = dpm_suspend_end(PMSG_FREEZE); |
---|
1151 | 1159 | if (error) |
---|
1152 | 1160 | goto Resume_devices; |
---|
1153 | | - error = disable_nonboot_cpus(); |
---|
| 1161 | + error = suspend_disable_secondary_cpus(); |
---|
1154 | 1162 | if (error) |
---|
1155 | 1163 | goto Enable_cpus; |
---|
1156 | 1164 | local_irq_disable(); |
---|
.. | .. |
---|
1171 | 1179 | * CPU hotplug again; so re-enable it here. |
---|
1172 | 1180 | */ |
---|
1173 | 1181 | cpu_hotplug_enable(); |
---|
1174 | | - pr_emerg("Starting new kernel\n"); |
---|
| 1182 | + pr_notice("Starting new kernel\n"); |
---|
1175 | 1183 | machine_shutdown(); |
---|
1176 | 1184 | } |
---|
1177 | 1185 | |
---|
.. | .. |
---|
1183 | 1191 | Enable_irqs: |
---|
1184 | 1192 | local_irq_enable(); |
---|
1185 | 1193 | Enable_cpus: |
---|
1186 | | - enable_nonboot_cpus(); |
---|
| 1194 | + suspend_enable_secondary_cpus(); |
---|
1187 | 1195 | dpm_resume_start(PMSG_RESTORE); |
---|
1188 | 1196 | Resume_devices: |
---|
1189 | 1197 | dpm_resume_end(PMSG_RESTORE); |
---|
.. | .. |
---|
1196 | 1204 | #endif |
---|
1197 | 1205 | |
---|
1198 | 1206 | Unlock: |
---|
1199 | | - mutex_unlock(&kexec_mutex); |
---|
| 1207 | + kexec_unlock(); |
---|
1200 | 1208 | return error; |
---|
1201 | 1209 | } |
---|
1202 | 1210 | |
---|