| .. | .. |
|---|
| 11 | 11 | #include <linux/uaccess.h> |
|---|
| 12 | 12 | #include <linux/io.h> |
|---|
| 13 | 13 | |
|---|
| 14 | +static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, |
|---|
| 15 | + unsigned long offset, int userbuf, |
|---|
| 16 | + bool encrypted) |
|---|
| 17 | +{ |
|---|
| 18 | + void *vaddr; |
|---|
| 19 | + |
|---|
| 20 | + if (!csize) |
|---|
| 21 | + return 0; |
|---|
| 22 | + |
|---|
| 23 | + if (encrypted) |
|---|
| 24 | + vaddr = (__force void *)ioremap_encrypted(pfn << PAGE_SHIFT, PAGE_SIZE); |
|---|
| 25 | + else |
|---|
| 26 | + vaddr = (__force void *)ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE); |
|---|
| 27 | + |
|---|
| 28 | + if (!vaddr) |
|---|
| 29 | + return -ENOMEM; |
|---|
| 30 | + |
|---|
| 31 | + if (userbuf) { |
|---|
| 32 | + if (copy_to_user((void __user *)buf, vaddr + offset, csize)) { |
|---|
| 33 | + iounmap((void __iomem *)vaddr); |
|---|
| 34 | + return -EFAULT; |
|---|
| 35 | + } |
|---|
| 36 | + } else |
|---|
| 37 | + memcpy(buf, vaddr + offset, csize); |
|---|
| 38 | + |
|---|
| 39 | + set_iounmap_nonlazy(); |
|---|
| 40 | + iounmap((void __iomem *)vaddr); |
|---|
| 41 | + return csize; |
|---|
| 42 | +} |
|---|
| 43 | + |
|---|
| 14 | 44 | /** |
|---|
| 15 | | - * copy_oldmem_page - copy one page from "oldmem" |
|---|
| 45 | + * copy_oldmem_page - copy one page of memory |
|---|
| 16 | 46 | * @pfn: page frame number to be copied |
|---|
| 17 | 47 | * @buf: target memory address for the copy; this can be in kernel address |
|---|
| 18 | 48 | * space or user address space (see @userbuf) |
|---|
| .. | .. |
|---|
| 21 | 51 | * @userbuf: if set, @buf is in user address space, use copy_to_user(), |
|---|
| 22 | 52 | * otherwise @buf is in kernel address space, use memcpy(). |
|---|
| 23 | 53 | * |
|---|
| 24 | | - * Copy a page from "oldmem". For this page, there is no pte mapped |
|---|
| 25 | | - * in the current kernel. We stitch up a pte, similar to kmap_atomic. |
|---|
| 54 | + * Copy a page from the old kernel's memory. For this page, there is no pte |
|---|
| 55 | + * mapped in the current kernel. We stitch up a pte, similar to kmap_atomic. |
|---|
| 26 | 56 | */ |
|---|
| 27 | | -ssize_t copy_oldmem_page(unsigned long pfn, char *buf, |
|---|
| 28 | | - size_t csize, unsigned long offset, int userbuf) |
|---|
| 57 | +ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, |
|---|
| 58 | + unsigned long offset, int userbuf) |
|---|
| 29 | 59 | { |
|---|
| 30 | | - void *vaddr; |
|---|
| 60 | + return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, false); |
|---|
| 61 | +} |
|---|
| 31 | 62 | |
|---|
| 32 | | - if (!csize) |
|---|
| 33 | | - return 0; |
|---|
| 63 | +/** |
|---|
| 64 | + * copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the |
|---|
| 65 | + * memory with the encryption mask set to accommodate kdump on SME-enabled |
|---|
| 66 | + * machines. |
|---|
| 67 | + */ |
|---|
| 68 | +ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, |
|---|
| 69 | + unsigned long offset, int userbuf) |
|---|
| 70 | +{ |
|---|
| 71 | + return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true); |
|---|
| 72 | +} |
|---|
| 34 | 73 | |
|---|
| 35 | | - vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE); |
|---|
| 36 | | - if (!vaddr) |
|---|
| 37 | | - return -ENOMEM; |
|---|
| 38 | | - |
|---|
| 39 | | - if (userbuf) { |
|---|
| 40 | | - if (copy_to_user(buf, vaddr + offset, csize)) { |
|---|
| 41 | | - iounmap(vaddr); |
|---|
| 42 | | - return -EFAULT; |
|---|
| 43 | | - } |
|---|
| 44 | | - } else |
|---|
| 45 | | - memcpy(buf, vaddr + offset, csize); |
|---|
| 46 | | - |
|---|
| 47 | | - set_iounmap_nonlazy(); |
|---|
| 48 | | - iounmap(vaddr); |
|---|
| 49 | | - return csize; |
|---|
| 74 | +ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos) |
|---|
| 75 | +{ |
|---|
| 76 | + return read_from_oldmem(buf, count, ppos, 0, sev_active()); |
|---|
| 50 | 77 | } |
|---|