.. | .. |
---|
5 | 5 | #include <linux/uaccess.h> |
---|
6 | 6 | #include <linux/slab.h> |
---|
7 | 7 | |
---|
| 8 | +static void *kdump_buf_page; |
---|
| 9 | + |
---|
8 | 10 | /** |
---|
9 | 11 | * copy_oldmem_page - copy one page from "oldmem" |
---|
10 | 12 | * @pfn: page frame number to be copied |
---|
.. | .. |
---|
15 | 17 | * @userbuf: if set, @buf is in user address space, use copy_to_user(), |
---|
16 | 18 | * otherwise @buf is in kernel address space, use memcpy(). |
---|
17 | 19 | * |
---|
18 | | - * Copy a page from "oldmem". For this page, there might be no pte mapped |
---|
| 20 | + * Copy a page from "oldmem". For this page, there is no pte mapped |
---|
19 | 21 | * in the current kernel. |
---|
| 22 | + * |
---|
| 23 | + * Calling copy_to_user() in atomic context is not desirable. Hence first |
---|
| 24 | + * copying the data to a pre-allocated kernel page and then copying to user |
---|
| 25 | + * space in non-atomic context. |
---|
20 | 26 | */ |
---|
21 | | -ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, |
---|
22 | | - unsigned long offset, int userbuf) |
---|
| 27 | +ssize_t copy_oldmem_page(unsigned long pfn, char *buf, |
---|
| 28 | + size_t csize, unsigned long offset, int userbuf) |
---|
23 | 29 | { |
---|
24 | 30 | void *vaddr; |
---|
25 | 31 | |
---|
26 | 32 | if (!csize) |
---|
27 | 33 | return 0; |
---|
28 | 34 | |
---|
29 | | - vaddr = kmap_local_pfn(pfn); |
---|
| 35 | + vaddr = kmap_atomic_pfn(pfn); |
---|
30 | 36 | |
---|
31 | 37 | if (!userbuf) { |
---|
32 | | - memcpy(buf, vaddr + offset, csize); |
---|
| 38 | + memcpy(buf, (vaddr + offset), csize); |
---|
| 39 | + kunmap_atomic(vaddr); |
---|
33 | 40 | } else { |
---|
34 | | - if (copy_to_user(buf, vaddr + offset, csize)) |
---|
35 | | - csize = -EFAULT; |
---|
| 41 | + if (!kdump_buf_page) { |
---|
| 42 | + pr_warn("Kdump: Kdump buffer page not allocated\n"); |
---|
| 43 | + |
---|
| 44 | + return -EFAULT; |
---|
| 45 | + } |
---|
| 46 | + copy_page(kdump_buf_page, vaddr); |
---|
| 47 | + kunmap_atomic(vaddr); |
---|
| 48 | + if (copy_to_user(buf, (kdump_buf_page + offset), csize)) |
---|
| 49 | + return -EFAULT; |
---|
36 | 50 | } |
---|
37 | 51 | |
---|
38 | 52 | return csize; |
---|
39 | 53 | } |
---|
| 54 | + |
---|
| 55 | +static int __init kdump_buf_page_init(void) |
---|
| 56 | +{ |
---|
| 57 | + int ret = 0; |
---|
| 58 | + |
---|
| 59 | + kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); |
---|
| 60 | + if (!kdump_buf_page) { |
---|
| 61 | + pr_warn("Kdump: Failed to allocate kdump buffer page\n"); |
---|
| 62 | + ret = -ENOMEM; |
---|
| 63 | + } |
---|
| 64 | + |
---|
| 65 | + return ret; |
---|
| 66 | +} |
---|
| 67 | +arch_initcall(kdump_buf_page_init); |
---|