.. | .. |
---|
110 | 110 | unsigned long i; |
---|
111 | 111 | int ret; |
---|
112 | 112 | |
---|
| 113 | + /* |
---|
| 114 | + * Because we write directly to the reserved memory region when loading |
---|
| 115 | + * crash kernels we need a serialization here to prevent multiple crash |
---|
| 116 | + * kernels from attempting to load simultaneously. |
---|
| 117 | + */ |
---|
| 118 | + if (!kexec_trylock()) |
---|
| 119 | + return -EBUSY; |
---|
| 120 | + |
---|
113 | 121 | if (flags & KEXEC_ON_CRASH) { |
---|
114 | 122 | dest_image = &kexec_crash_image; |
---|
115 | 123 | if (kexec_crash_image) |
---|
.. | .. |
---|
121 | 129 | if (nr_segments == 0) { |
---|
122 | 130 | /* Uninstall image */ |
---|
123 | 131 | kimage_free(xchg(dest_image, NULL)); |
---|
124 | | - return 0; |
---|
| 132 | + ret = 0; |
---|
| 133 | + goto out_unlock; |
---|
125 | 134 | } |
---|
126 | 135 | if (flags & KEXEC_ON_CRASH) { |
---|
127 | 136 | /* |
---|
.. | .. |
---|
134 | 143 | |
---|
135 | 144 | ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags); |
---|
136 | 145 | if (ret) |
---|
137 | | - return ret; |
---|
| 146 | + goto out_unlock; |
---|
138 | 147 | |
---|
139 | 148 | if (flags & KEXEC_PRESERVE_CONTEXT) |
---|
140 | 149 | image->preserve_context = 1; |
---|
.. | .. |
---|
171 | 180 | arch_kexec_protect_crashkres(); |
---|
172 | 181 | |
---|
173 | 182 | kimage_free(image); |
---|
| 183 | +out_unlock: |
---|
| 184 | + kexec_unlock(); |
---|
174 | 185 | return ret; |
---|
175 | 186 | } |
---|
176 | 187 | |
---|
.. | .. |
---|
247 | 258 | ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) |
---|
248 | 259 | return -EINVAL; |
---|
249 | 260 | |
---|
250 | | - /* Because we write directly to the reserved memory |
---|
251 | | - * region when loading crash kernels we need a mutex here to |
---|
252 | | - * prevent multiple crash kernels from attempting to load |
---|
253 | | - * simultaneously, and to prevent a crash kernel from loading |
---|
254 | | - * over the top of a in use crash kernel. |
---|
255 | | - * |
---|
256 | | - * KISS: always take the mutex. |
---|
257 | | - */ |
---|
258 | | - if (!mutex_trylock(&kexec_mutex)) |
---|
259 | | - return -EBUSY; |
---|
260 | | - |
---|
261 | 261 | result = do_kexec_load(entry, nr_segments, segments, flags); |
---|
262 | | - |
---|
263 | | - mutex_unlock(&kexec_mutex); |
---|
264 | 262 | |
---|
265 | 263 | return result; |
---|
266 | 264 | } |
---|
.. | .. |
---|
301 | 299 | return -EFAULT; |
---|
302 | 300 | } |
---|
303 | 301 | |
---|
304 | | - /* Because we write directly to the reserved memory |
---|
305 | | - * region when loading crash kernels we need a mutex here to |
---|
306 | | - * prevent multiple crash kernels from attempting to load |
---|
307 | | - * simultaneously, and to prevent a crash kernel from loading |
---|
308 | | - * over the top of a in use crash kernel. |
---|
309 | | - * |
---|
310 | | - * KISS: always take the mutex. |
---|
311 | | - */ |
---|
312 | | - if (!mutex_trylock(&kexec_mutex)) |
---|
313 | | - return -EBUSY; |
---|
314 | | - |
---|
315 | 302 | result = do_kexec_load(entry, nr_segments, ksegments, flags); |
---|
316 | | - |
---|
317 | | - mutex_unlock(&kexec_mutex); |
---|
318 | 303 | |
---|
319 | 304 | return result; |
---|
320 | 305 | } |
---|