.. | .. |
---|
13 | 13 | |
---|
14 | 14 | #include <linux/uaccess.h> |
---|
15 | 15 | |
---|
| 16 | +static void *kdump_buf_page; |
---|
| 17 | + |
---|
16 | 18 | static inline bool is_crashed_pfn_valid(unsigned long pfn) |
---|
17 | 19 | { |
---|
18 | 20 | #ifndef CONFIG_X86_PAE |
---|
.. | .. |
---|
39 | 41 | * @userbuf: if set, @buf is in user address space, use copy_to_user(), |
---|
40 | 42 | * otherwise @buf is in kernel address space, use memcpy(). |
---|
41 | 43 | * |
---|
42 | | - * Copy a page from "oldmem". For this page, there might be no pte mapped |
---|
43 | | - * in the current kernel. |
---|
| 44 | + * Copy a page from "oldmem". For this page, there is no pte mapped |
---|
| 45 | + * in the current kernel. We stitch up a pte, similar to kmap_atomic. |
---|
| 46 | + * |
---|
| 47 | + * Calling copy_to_user() in atomic context is not desirable. Hence first |
---|
| 48 | + * copying the data to a pre-allocated kernel page and then copying to user |
---|
| 49 | + * space in non-atomic context. |
---|
44 | 50 | */ |
---|
45 | | -ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, |
---|
46 | | - unsigned long offset, int userbuf) |
---|
| 51 | +ssize_t copy_oldmem_page(unsigned long pfn, char *buf, |
---|
| 52 | + size_t csize, unsigned long offset, int userbuf) |
---|
47 | 53 | { |
---|
48 | 54 | void *vaddr; |
---|
49 | 55 | |
---|
.. | .. |
---|
53 | 59 | if (!is_crashed_pfn_valid(pfn)) |
---|
54 | 60 | return -EFAULT; |
---|
55 | 61 | |
---|
56 | | - vaddr = kmap_local_pfn(pfn); |
---|
| 62 | + vaddr = kmap_atomic_pfn(pfn); |
---|
57 | 63 | |
---|
58 | 64 | if (!userbuf) { |
---|
59 | | - memcpy(buf, vaddr + offset, csize); |
---|
| 65 | + memcpy(buf, (vaddr + offset), csize); |
---|
| 66 | + kunmap_atomic(vaddr); |
---|
60 | 67 | } else { |
---|
61 | | - if (copy_to_user(buf, vaddr + offset, csize)) |
---|
62 | | - csize = -EFAULT; |
---|
| 68 | + if (!kdump_buf_page) { |
---|
| 69 | + printk(KERN_WARNING "Kdump: Kdump buffer page not" |
---|
| 70 | + " allocated\n"); |
---|
| 71 | + kunmap_atomic(vaddr); |
---|
| 72 | + return -EFAULT; |
---|
| 73 | + } |
---|
| 74 | + copy_page(kdump_buf_page, vaddr); |
---|
| 75 | + kunmap_atomic(vaddr); |
---|
| 76 | + if (copy_to_user(buf, (kdump_buf_page + offset), csize)) |
---|
| 77 | + return -EFAULT; |
---|
63 | 78 | } |
---|
64 | | - |
---|
65 | | - kunmap_local(vaddr); |
---|
66 | 79 | |
---|
67 | 80 | return csize; |
---|
68 | 81 | } |
---|
| 82 | + |
---|
| 83 | +static int __init kdump_buf_page_init(void) |
---|
| 84 | +{ |
---|
| 85 | + int ret = 0; |
---|
| 86 | + |
---|
| 87 | + kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); |
---|
| 88 | + if (!kdump_buf_page) { |
---|
| 89 | + printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer" |
---|
| 90 | + " page\n"); |
---|
| 91 | + ret = -ENOMEM; |
---|
| 92 | + } |
---|
| 93 | + |
---|
| 94 | + return ret; |
---|
| 95 | +} |
---|
| 96 | +arch_initcall(kdump_buf_page_init); |
---|