Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

vmcore: convert copy_oldmem_page() to take an iov_iter

Patch series "Convert vmcore to use an iov_iter", v5.

For some reason several people have been sending bad patches to fix
compiler warnings in vmcore recently. Here's how it should be done.
Compile-tested only on x86. As noted in the first patch, s390 should take
this conversion a bit further, but I'm not inclined to do that work
myself.


This patch (of 3):

Instead of passing in a 'buf' and 'userbuf' argument, pass in an iov_iter.
s390 needs more work to pass the iov_iter down further, or refactor, but
I'd be more comfortable if someone who can test on s390 did that work.

It's more convenient to convert the whole of read_from_oldmem() to take an
iov_iter at the same time, so rename it to read_from_oldmem_iter() and add
a temporary read_from_oldmem() wrapper that creates an iov_iter.

Link: https://lkml.kernel.org/r/20220408090636.560886-1-bhe@redhat.com
Link: https://lkml.kernel.org/r/20220408090636.560886-2-bhe@redhat.com
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Baoquan He <bhe@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Matthew Wilcox (Oracle) and committed by
akpm
5d8de293 04d168c6

+91 -260
+4 -23
arch/arm/kernel/crash_dump.c
··· 14 14 #include <linux/crash_dump.h> 15 15 #include <linux/uaccess.h> 16 16 #include <linux/io.h> 17 + #include <linux/uio.h> 17 18 18 - /** 19 - * copy_oldmem_page() - copy one page from old kernel memory 20 - * @pfn: page frame number to be copied 21 - * @buf: buffer where the copied page is placed 22 - * @csize: number of bytes to copy 23 - * @offset: offset in bytes into the page 24 - * @userbuf: if set, @buf is int he user address space 25 - * 26 - * This function copies one page from old kernel memory into buffer pointed by 27 - * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes 28 - * copied or negative error in case of failure. 29 - */ 30 - ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 31 - size_t csize, unsigned long offset, 32 - int userbuf) 19 + ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, 20 + size_t csize, unsigned long offset) 33 21 { 34 22 void *vaddr; 35 23 ··· 28 40 if (!vaddr) 29 41 return -ENOMEM; 30 42 31 - if (userbuf) { 32 - if (copy_to_user(buf, vaddr + offset, csize)) { 33 - iounmap(vaddr); 34 - return -EFAULT; 35 - } 36 - } else { 37 - memcpy(buf, vaddr + offset, csize); 38 - } 43 + csize = copy_to_iter(vaddr + offset, csize, iter); 39 44 40 45 iounmap(vaddr); 41 46 return csize;
+4 -25
arch/arm64/kernel/crash_dump.c
··· 9 9 #include <linux/crash_dump.h> 10 10 #include <linux/errno.h> 11 11 #include <linux/io.h> 12 - #include <linux/memblock.h> 13 - #include <linux/uaccess.h> 12 + #include <linux/uio.h> 14 13 #include <asm/memory.h> 15 14 16 - /** 17 - * copy_oldmem_page() - copy one page from old kernel memory 18 - * @pfn: page frame number to be copied 19 - * @buf: buffer where the copied page is placed 20 - * @csize: number of bytes to copy 21 - * @offset: offset in bytes into the page 22 - * @userbuf: if set, @buf is in a user address space 23 - * 24 - * This function copies one page from old kernel memory into buffer pointed by 25 - * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes 26 - * copied or negative error in case of failure. 27 - */ 28 - ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 29 - size_t csize, unsigned long offset, 30 - int userbuf) 15 + ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, 16 + size_t csize, unsigned long offset) 31 17 { 32 18 void *vaddr; 33 19 ··· 24 38 if (!vaddr) 25 39 return -ENOMEM; 26 40 27 - if (userbuf) { 28 - if (copy_to_user((char __user *)buf, vaddr + offset, csize)) { 29 - memunmap(vaddr); 30 - return -EFAULT; 31 - } 32 - } else { 33 - memcpy(buf, vaddr + offset, csize); 34 - } 41 + csize = copy_to_iter(vaddr + offset, csize, iter); 35 42 36 43 memunmap(vaddr); 37 44
+4 -28
arch/ia64/kernel/crash_dump.c
··· 10 10 #include <linux/errno.h> 11 11 #include <linux/types.h> 12 12 #include <linux/crash_dump.h> 13 - 13 + #include <linux/uio.h> 14 14 #include <asm/page.h> 15 - #include <linux/uaccess.h> 16 15 17 - /** 18 - * copy_oldmem_page - copy one page from "oldmem" 19 - * @pfn: page frame number to be copied 20 - * @buf: target memory address for the copy; this can be in kernel address 21 - * space or user address space (see @userbuf) 22 - * @csize: number of bytes to copy 23 - * @offset: offset in bytes into the page (based on pfn) to begin the copy 24 - * @userbuf: if set, @buf is in user address space, use copy_to_user(), 25 - * otherwise @buf is in kernel address space, use memcpy(). 26 - * 27 - * Copy a page from "oldmem". For this page, there is no pte mapped 28 - * in the current kernel. We stitch up a pte, similar to kmap_atomic. 29 - * 30 - * Calling copy_to_user() in atomic context is not desirable. Hence first 31 - * copying the data to a pre-allocated kernel page and then copying to user 32 - * space in non-atomic context. 33 - */ 34 - ssize_t 35 - copy_oldmem_page(unsigned long pfn, char *buf, 36 - size_t csize, unsigned long offset, int userbuf) 16 + ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, 17 + size_t csize, unsigned long offset) 37 18 { 38 19 void *vaddr; 39 20 40 21 if (!csize) 41 22 return 0; 42 23 vaddr = __va(pfn<<PAGE_SHIFT); 43 - if (userbuf) { 44 - if (copy_to_user(buf, (vaddr + offset), csize)) { 45 - return -EFAULT; 46 - } 47 - } else 48 - memcpy(buf, (vaddr + offset), csize); 24 + csize = copy_to_iter(vaddr + offset, csize, iter); 49 25 return csize; 50 26 } 51 27
+4 -23
arch/mips/kernel/crash_dump.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <linux/highmem.h> 3 3 #include <linux/crash_dump.h> 4 + #include <linux/uio.h> 4 5 5 - /** 6 - * copy_oldmem_page - copy one page from "oldmem" 7 - * @pfn: page frame number to be copied 8 - * @buf: target memory address for the copy; this can be in kernel address 9 - * space or user address space (see @userbuf) 10 - * @csize: number of bytes to copy 11 - * @offset: offset in bytes into the page (based on pfn) to begin the copy 12 - * @userbuf: if set, @buf is in user address space, use copy_to_user(), 13 - * otherwise @buf is in kernel address space, use memcpy(). 14 - * 15 - * Copy a page from "oldmem". For this page, there is no pte mapped 16 - * in the current kernel. 17 - */ 18 - ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 19 - size_t csize, unsigned long offset, int userbuf) 6 + ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, 7 + size_t csize, unsigned long offset) 20 8 { 21 9 void *vaddr; 22 10 ··· 12 24 return 0; 13 25 14 26 vaddr = kmap_local_pfn(pfn); 15 - 16 - if (!userbuf) { 17 - memcpy(buf, vaddr + offset, csize); 18 - } else { 19 - if (copy_to_user(buf, vaddr + offset, csize)) 20 - csize = -EFAULT; 21 - } 22 - 27 + csize = copy_to_iter(vaddr + offset, csize, iter); 23 28 kunmap_local(vaddr); 24 29 25 30 return csize;
+5 -30
arch/powerpc/kernel/crash_dump.c
··· 16 16 #include <asm/kdump.h> 17 17 #include <asm/prom.h> 18 18 #include <asm/firmware.h> 19 - #include <linux/uaccess.h> 19 + #include <linux/uio.h> 20 20 #include <asm/rtas.h> 21 21 #include <asm/inst.h> 22 22 ··· 68 68 } 69 69 #endif /* CONFIG_NONSTATIC_KERNEL */ 70 70 71 - static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize, 72 - unsigned long offset, int userbuf) 73 - { 74 - if (userbuf) { 75 - if (copy_to_user((char __user *)buf, (vaddr + offset), csize)) 76 - return -EFAULT; 77 - } else 78 - memcpy(buf, (vaddr + offset), csize); 79 - 80 - return csize; 81 - } 82 - 83 - /** 84 - * copy_oldmem_page - copy one page from "oldmem" 85 - * @pfn: page frame number to be copied 86 - * @buf: target memory address for the copy; this can be in kernel address 87 - * space or user address space (see @userbuf) 88 - * @csize: number of bytes to copy 89 - * @offset: offset in bytes into the page (based on pfn) to begin the copy 90 - * @userbuf: if set, @buf is in user address space, use copy_to_user(), 91 - * otherwise @buf is in kernel address space, use memcpy(). 92 - * 93 - * Copy a page from "oldmem". For this page, there is no pte mapped 94 - * in the current kernel. We stitch up a pte, similar to kmap_atomic. 95 - */ 96 - ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 97 - size_t csize, unsigned long offset, int userbuf) 71 + ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, 72 + size_t csize, unsigned long offset) 98 73 { 99 74 void *vaddr; 100 75 phys_addr_t paddr; ··· 82 107 83 108 if (memblock_is_region_memory(paddr, csize)) { 84 109 vaddr = __va(paddr); 85 - csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); 110 + csize = copy_to_iter(vaddr + offset, csize, iter); 86 111 } else { 87 112 vaddr = ioremap_cache(paddr, PAGE_SIZE); 88 - csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); 113 + csize = copy_to_iter(vaddr + offset, csize, iter); 89 114 iounmap(vaddr); 90 115 } 91 116
+4 -22
arch/riscv/kernel/crash_dump.c
··· 7 7 8 8 #include <linux/crash_dump.h> 9 9 #include <linux/io.h> 10 + #include <linux/uio.h> 10 11 11 - /** 12 - * copy_oldmem_page() - copy one page from old kernel memory 13 - * @pfn: page frame number to be copied 14 - * @buf: buffer where the copied page is placed 15 - * @csize: number of bytes to copy 16 - * @offset: offset in bytes into the page 17 - * @userbuf: if set, @buf is in a user address space 18 - * 19 - * This function copies one page from old kernel memory into buffer pointed by 20 - * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes 21 - * copied or negative error in case of failure. 22 - */ 23 - ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 24 - size_t csize, unsigned long offset, 25 - int userbuf) 12 + ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, 13 + size_t csize, unsigned long offset) 26 14 { 27 15 void *vaddr; 28 16 ··· 21 33 if (!vaddr) 22 34 return -ENOMEM; 23 35 24 - if (userbuf) { 25 - if (copy_to_user((char __user *)buf, vaddr + offset, csize)) { 26 - memunmap(vaddr); 27 - return -EFAULT; 28 - } 29 - } else 30 - memcpy(buf, vaddr + offset, csize); 36 + csize = copy_to_iter(vaddr + offset, csize, iter); 31 37 32 38 memunmap(vaddr); 33 39 return csize;
+8 -5
arch/s390/kernel/crash_dump.c
··· 15 15 #include <linux/slab.h> 16 16 #include <linux/memblock.h> 17 17 #include <linux/elf.h> 18 + #include <linux/uio.h> 18 19 #include <asm/asm-offsets.h> 19 20 #include <asm/os_info.h> 20 21 #include <asm/elf.h> ··· 213 212 /* 214 213 * Copy one page from "oldmem" 215 214 */ 216 - ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, 217 - unsigned long offset, int userbuf) 215 + ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize, 216 + unsigned long offset) 218 217 { 219 218 unsigned long src; 220 219 int rc; ··· 222 221 if (!csize) 223 222 return 0; 224 223 src = pfn_to_phys(pfn) + offset; 225 - if (userbuf) 226 - rc = copy_oldmem_user((void __force __user *) buf, src, csize); 224 + 225 + /* XXX: pass the iov_iter down to a common function */ 226 + if (iter_is_iovec(iter)) 227 + rc = copy_oldmem_user(iter->iov->iov_base, src, csize); 227 228 else 228 - rc = copy_oldmem_kernel((void *) buf, src, csize); 229 + rc = copy_oldmem_kernel(iter->kvec->iov_base, src, csize); 229 230 return rc; 230 231 } 231 232
+5 -24
arch/sh/kernel/crash_dump.c
··· 8 8 #include <linux/errno.h> 9 9 #include <linux/crash_dump.h> 10 10 #include <linux/io.h> 11 + #include <linux/uio.h> 11 12 #include <linux/uaccess.h> 12 13 13 - /** 14 - * copy_oldmem_page - copy one page from "oldmem" 15 - * @pfn: page frame number to be copied 16 - * @buf: target memory address for the copy; this can be in kernel address 17 - * space or user address space (see @userbuf) 18 - * @csize: number of bytes to copy 19 - * @offset: offset in bytes into the page (based on pfn) to begin the copy 20 - * @userbuf: if set, @buf is in user address space, use copy_to_user(), 21 - * otherwise @buf is in kernel address space, use memcpy(). 22 - * 23 - * Copy a page from "oldmem". For this page, there is no pte mapped 24 - * in the current kernel. We stitch up a pte, similar to kmap_atomic. 25 - */ 26 - ssize_t copy_oldmem_page(unsigned long pfn, char *buf, 27 - size_t csize, unsigned long offset, int userbuf) 14 + ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, 15 + size_t csize, unsigned long offset) 28 16 { 29 17 void __iomem *vaddr; 30 18 ··· 20 32 return 0; 21 33 22 34 vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); 23 - 24 - if (userbuf) { 25 - if (copy_to_user((void __user *)buf, (vaddr + offset), csize)) { 26 - iounmap(vaddr); 27 - return -EFAULT; 28 - } 29 - } else 30 - memcpy(buf, (vaddr + offset), csize); 31 - 35 + csize = copy_to_iter(vaddr + offset, csize, iter); 32 36 iounmap(vaddr); 37 + 33 38 return csize; 34 39 }
+4 -25
arch/x86/kernel/crash_dump_32.c
··· 10 10 #include <linux/errno.h> 11 11 #include <linux/highmem.h> 12 12 #include <linux/crash_dump.h> 13 - 14 - #include <linux/uaccess.h> 13 + #include <linux/uio.h> 15 14 16 15 static inline bool is_crashed_pfn_valid(unsigned long pfn) 17 16 { ··· 28 29 #endif 29 30 } 30 31 31 - /** 32 - * copy_oldmem_page - copy one page from "oldmem" 33 - * @pfn: page frame number to be copied 34 - * @buf: target memory address for the copy; this can be in kernel address 35 - * space or user address space (see @userbuf) 36 - * @csize: number of bytes to copy 37 - * @offset: offset in bytes into the page (based on pfn) to begin the copy 38 - * @userbuf: if set, @buf is in user address space, use copy_to_user(), 39 - * otherwise @buf is in kernel address space, use memcpy(). 40 - * 41 - * Copy a page from "oldmem". For this page, there might be no pte mapped 42 - * in the current kernel. 43 - */ 44 - ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, 45 - unsigned long offset, int userbuf) 32 + ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize, 33 + unsigned long offset) 46 34 { 47 35 void *vaddr; 48 36 ··· 40 54 return -EFAULT; 41 55 42 56 vaddr = kmap_local_pfn(pfn); 43 - 44 - if (!userbuf) { 45 - memcpy(buf, vaddr + offset, csize); 46 - } else { 47 - if (copy_to_user(buf, vaddr + offset, csize)) 48 - csize = -EFAULT; 49 - } 50 - 57 + csize = copy_to_iter(vaddr + offset, csize, iter); 51 58 kunmap_local(vaddr); 52 59 53 60 return csize;
+11 -30
arch/x86/kernel/crash_dump_64.c
··· 8 8 9 9 #include <linux/errno.h> 10 10 #include <linux/crash_dump.h> 11 - #include <linux/uaccess.h> 11 + #include <linux/uio.h> 12 12 #include <linux/io.h> 13 13 #include <linux/cc_platform.h> 14 14 15 - static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, 16 - unsigned long offset, int userbuf, 15 + static ssize_t __copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, 16 + size_t csize, unsigned long offset, 17 17 bool encrypted) 18 18 { 19 19 void *vaddr; ··· 29 29 if (!vaddr) 30 30 return -ENOMEM; 31 31 32 - if (userbuf) { 33 - if (copy_to_user((void __user *)buf, vaddr + offset, csize)) { 34 - iounmap((void __iomem *)vaddr); 35 - return -EFAULT; 36 - } 37 - } else 38 - memcpy(buf, vaddr + offset, csize); 32 + csize = copy_to_iter(vaddr + offset, csize, iter); 39 33 40 34 iounmap((void __iomem *)vaddr); 41 35 return csize; 42 36 } 43 37 44 - /** 45 - * copy_oldmem_page - copy one page of memory 46 - * @pfn: page frame number to be copied 47 - * @buf: target memory address for the copy; this can be in kernel address 48 - * space or user address space (see @userbuf) 49 - * @csize: number of bytes to copy 50 - * @offset: offset in bytes into the page (based on pfn) to begin the copy 51 - * @userbuf: if set, @buf is in user address space, use copy_to_user(), 52 - * otherwise @buf is in kernel address space, use memcpy(). 53 - * 54 - * Copy a page from the old kernel's memory. For this page, there is no pte 55 - * mapped in the current kernel. We stitch up a pte, similar to kmap_atomic. 56 - */ 57 - ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize, 58 - unsigned long offset, int userbuf) 38 + ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn, size_t csize, 39 + unsigned long offset) 59 40 { 60 - return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, false); 41 + return __copy_oldmem_page(iter, pfn, csize, offset, false); 61 42 } 62 43 63 - /** 44 + /* 64 45 * copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the 65 46 * memory with the encryption mask set to accommodate kdump on SME-enabled 66 47 * machines. 67 48 */ 68 - ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, 69 - unsigned long offset, int userbuf) 49 + ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn, 50 + size_t csize, unsigned long offset) 70 51 { 71 - return __copy_oldmem_page(pfn, buf, csize, offset, userbuf, true); 52 + return __copy_oldmem_page(iter, pfn, csize, offset, true); 72 53 } 73 54 74 55 ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
+34 -20
fs/proc/vmcore.c
··· 26 26 #include <linux/vmalloc.h> 27 27 #include <linux/pagemap.h> 28 28 #include <linux/uaccess.h> 29 + #include <linux/uio.h> 29 30 #include <linux/cc_platform.h> 30 31 #include <asm/io.h> 31 32 #include "internal.h" ··· 129 128 } 130 129 131 130 /* Reads a page from the oldmem device from given offset. */ 132 - ssize_t read_from_oldmem(char *buf, size_t count, 133 - u64 *ppos, int userbuf, 134 - bool encrypted) 131 + static ssize_t read_from_oldmem_iter(struct iov_iter *iter, size_t count, 132 + u64 *ppos, bool encrypted) 135 133 { 136 134 unsigned long pfn, offset; 137 135 size_t nr_bytes; ··· 152 152 153 153 /* If pfn is not ram, return zeros for sparse dump files */ 154 154 if (!pfn_is_ram(pfn)) { 155 - tmp = 0; 156 - if (!userbuf) 157 - memset(buf, 0, nr_bytes); 158 - else if (clear_user(buf, nr_bytes)) 159 - tmp = -EFAULT; 155 + tmp = iov_iter_zero(nr_bytes, iter); 160 156 } else { 161 157 if (encrypted) 162 - tmp = copy_oldmem_page_encrypted(pfn, buf, 158 + tmp = copy_oldmem_page_encrypted(iter, pfn, 163 159 nr_bytes, 164 - offset, 165 - userbuf); 160 + offset); 166 161 else 167 - tmp = copy_oldmem_page(pfn, buf, nr_bytes, 168 - offset, userbuf); 162 + tmp = copy_oldmem_page(iter, pfn, nr_bytes, 163 + offset); 169 164 } 170 - if (tmp < 0) { 165 + if (tmp < nr_bytes) { 171 166 srcu_read_unlock(&vmcore_cb_srcu, idx); 172 - return tmp; 167 + return -EFAULT; 173 168 } 174 169 175 170 *ppos += nr_bytes; 176 171 count -= nr_bytes; 177 - buf += nr_bytes; 178 172 read += nr_bytes; 179 173 ++pfn; 180 174 offset = 0; ··· 176 182 srcu_read_unlock(&vmcore_cb_srcu, idx); 177 183 178 184 return read; 185 + } 186 + 187 + ssize_t read_from_oldmem(char *buf, size_t count, 188 + u64 *ppos, int userbuf, 189 + bool encrypted) 190 + { 191 + struct iov_iter iter; 192 + struct iovec iov; 193 + struct kvec kvec; 194 + 195 + if (userbuf) { 196 + iov.iov_base = (__force void __user *)buf; 197 + iov.iov_len = count; 198 + iov_iter_init(&iter, READ, &iov, 1, count); 199 + } else { 200 + kvec.iov_base = buf; 201 + kvec.iov_len = count; 202 + iov_iter_kvec(&iter, READ, &kvec, 1, count); 203 + } 204 + 205 + return read_from_oldmem_iter(&iter, count, ppos, encrypted); 179 206 } 180 207 181 208 /* ··· 243 228 /* 244 229 * Architectures which support memory encryption override this. 245 230 */ 246 - ssize_t __weak 247 - copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, 248 - unsigned long offset, int userbuf) 231 + ssize_t __weak copy_oldmem_page_encrypted(struct iov_iter *iter, 232 + unsigned long pfn, size_t csize, unsigned long offset) 249 233 { 250 - return copy_oldmem_page(pfn, buf, csize, offset, userbuf); 234 + return copy_oldmem_page(iter, pfn, csize, offset); 251 235 } 252 236 253 237 /*
+4 -5
include/linux/crash_dump.h
··· 24 24 unsigned long from, unsigned long pfn, 25 25 unsigned long size, pgprot_t prot); 26 26 27 - extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, 28 - unsigned long, int); 29 - extern ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, 30 - size_t csize, unsigned long offset, 31 - int userbuf); 27 + ssize_t copy_oldmem_page(struct iov_iter *i, unsigned long pfn, size_t csize, 28 + unsigned long offset); 29 + ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn, 30 + size_t csize, unsigned long offset); 32 31 33 32 void vmcore_cleanup(void); 34 33