forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 b22da3d8526a935aa31e086e63f60ff3246cb61c
kernel/arch/mips/kernel/crash_dump.c
....@@ -1,11 +1,9 @@
11 // SPDX-License-Identifier: GPL-2.0
22 #include <linux/highmem.h>
3
-#include <linux/bootmem.h>
3
+#include <linux/memblock.h>
44 #include <linux/crash_dump.h>
55 #include <linux/uaccess.h>
66 #include <linux/slab.h>
7
-
8
-static void *kdump_buf_page;
97
108 /**
119 * copy_oldmem_page - copy one page from "oldmem"
....@@ -17,51 +15,25 @@
1715 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
1816 * otherwise @buf is in kernel address space, use memcpy().
1917 *
20
- * Copy a page from "oldmem". For this page, there is no pte mapped
18
+ * Copy a page from "oldmem". For this page, there might be no pte mapped
2119 * in the current kernel.
22
- *
23
- * Calling copy_to_user() in atomic context is not desirable. Hence first
24
- * copying the data to a pre-allocated kernel page and then copying to user
25
- * space in non-atomic context.
2620 */
27
-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
28
- size_t csize, unsigned long offset, int userbuf)
21
+ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
22
+ unsigned long offset, int userbuf)
2923 {
3024 void *vaddr;
3125
3226 if (!csize)
3327 return 0;
3428
35
- vaddr = kmap_atomic_pfn(pfn);
29
+ vaddr = kmap_local_pfn(pfn);
3630
3731 if (!userbuf) {
38
- memcpy(buf, (vaddr + offset), csize);
39
- kunmap_atomic(vaddr);
32
+ memcpy(buf, vaddr + offset, csize);
4033 } else {
41
- if (!kdump_buf_page) {
42
- pr_warn("Kdump: Kdump buffer page not allocated\n");
43
-
44
- return -EFAULT;
45
- }
46
- copy_page(kdump_buf_page, vaddr);
47
- kunmap_atomic(vaddr);
48
- if (copy_to_user(buf, (kdump_buf_page + offset), csize))
49
- return -EFAULT;
34
+ if (copy_to_user(buf, vaddr + offset, csize))
35
+ csize = -EFAULT;
5036 }
5137
5238 return csize;
5339 }
54
-
55
-static int __init kdump_buf_page_init(void)
56
-{
57
- int ret = 0;
58
-
59
- kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
60
- if (!kdump_buf_page) {
61
- pr_warn("Kdump: Failed to allocate kdump buffer page\n");
62
- ret = -ENOMEM;
63
- }
64
-
65
- return ret;
66
-}
67
-arch_initcall(kdump_buf_page_init);