Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

vmcore: convert __read_vmcore to use an iov_iter

This gets rid of copy_to() and let us use proc_read_iter() instead of
proc_read().

Link: https://lkml.kernel.org/r/20220408090636.560886-3-bhe@redhat.com
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Baoquan He <bhe@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Matthew Wilcox (Oracle) and committed by
akpm
4a22fd20 5d8de293

+30 -52
+30 -52
fs/proc/vmcore.c
··· 249 249 return copy_oldmem_page(iter, pfn, csize, offset); 250 250 } 251 251 252 - /* 253 - * Copy to either kernel or user space 254 - */ 255 - static int copy_to(void *target, void *src, size_t size, int userbuf) 256 - { 257 - if (userbuf) { 258 - if (copy_to_user((char __user *) target, src, size)) 259 - return -EFAULT; 260 - } else { 261 - memcpy(target, src, size); 262 - } 263 - return 0; 264 - } 265 - 266 252 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP 267 - static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf) 253 + static int vmcoredd_copy_dumps(struct iov_iter *iter, u64 start, size_t size) 268 254 { 269 255 struct vmcoredd_node *dump; 270 256 u64 offset = 0; ··· 263 277 if (start < offset + dump->size) { 264 278 tsz = min(offset + (u64)dump->size - start, (u64)size); 265 279 buf = dump->buf + start - offset; 266 - if (copy_to(dst, buf, tsz, userbuf)) { 280 + if (copy_to_iter(buf, tsz, iter) < tsz) { 267 281 ret = -EFAULT; 268 282 goto out_unlock; 269 283 } 270 284 271 285 size -= tsz; 272 286 start += tsz; 273 - dst += tsz; 274 287 275 288 /* Leave now if buffer filled already */ 276 289 if (!size) ··· 325 340 /* Read from the ELF header and then the crash dump. On error, negative value is 326 341 * returned otherwise number of bytes read are returned. 327 342 */ 328 - static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos, 329 - int userbuf) 343 + static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos) 330 344 { 331 345 ssize_t acc = 0, tmp; 332 346 size_t tsz; 333 347 u64 start; 334 348 struct vmcore *m = NULL; 335 349 336 - if (buflen == 0 || *fpos >= vmcore_size) 350 + if (!iov_iter_count(iter) || *fpos >= vmcore_size) 337 351 return 0; 338 352 339 - /* trim buflen to not go beyond EOF */ 340 - if (buflen > vmcore_size - *fpos) 341 - buflen = vmcore_size - *fpos; 353 + iov_iter_truncate(iter, vmcore_size - *fpos); 342 354 343 355 /* Read ELF core header */ 344 356 if (*fpos < elfcorebuf_sz) { 345 - tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen); 346 - if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf)) 357 + tsz = min(elfcorebuf_sz - (size_t)*fpos, iov_iter_count(iter)); 358 + if (copy_to_iter(elfcorebuf + *fpos, tsz, iter) < tsz) 347 359 return -EFAULT; 348 - buflen -= tsz; 349 360 *fpos += tsz; 350 - buffer += tsz; 351 361 acc += tsz; 352 362 353 363 /* leave now if filled buffer already */ 354 - if (buflen == 0) 364 + if (!iov_iter_count(iter)) 355 365 return acc; 356 366 } 357 367 ··· 367 387 /* Read device dumps */ 368 388 if (*fpos < elfcorebuf_sz + vmcoredd_orig_sz) { 369 389 tsz = min(elfcorebuf_sz + vmcoredd_orig_sz - 370 - (size_t)*fpos, buflen); 390 + (size_t)*fpos, iov_iter_count(iter)); 371 391 start = *fpos - elfcorebuf_sz; 372 - if (vmcoredd_copy_dumps(buffer, start, tsz, userbuf)) 392 + if (vmcoredd_copy_dumps(iter, start, tsz)) 373 393 return -EFAULT; 374 394 375 - buflen -= tsz; 376 395 *fpos += tsz; 377 - buffer += tsz; 378 396 acc += tsz; 379 397 380 398 /* leave now if filled buffer already */ 381 - if (!buflen) 399 + if (!iov_iter_count(iter)) 382 400 return acc; 383 401 } 384 402 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */ 385 403 386 404 /* Read remaining elf notes */ 387 - tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen); 405 + tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, 406 + iov_iter_count(iter)); 388 407 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz - vmcoredd_orig_sz; 389 - if (copy_to(buffer, kaddr, tsz, userbuf)) 408 + if (copy_to_iter(kaddr, tsz, iter) < tsz) 390 409 return -EFAULT; 391 410 392 - buflen -= tsz; 393 411 *fpos += tsz; 394 - buffer += tsz; 395 412 acc += tsz; 396 413 397 414 /* leave now if filled buffer already */ 398 - if (buflen == 0) 415 + if (!iov_iter_count(iter)) 399 416 return acc; 400 417 } 401 418 ··· 400 423 if (*fpos < m->offset + m->size) { 401 424 tsz = (size_t)min_t(unsigned long long, 402 425 m->offset + m->size - *fpos, 403 - buflen); 426 + iov_iter_count(iter)); 404 427 start = m->paddr + *fpos - m->offset; 405 - tmp = read_from_oldmem(buffer, tsz, &start, 406 - userbuf, cc_platform_has(CC_ATTR_MEM_ENCRYPT)); 428 + tmp = read_from_oldmem_iter(iter, tsz, &start, 429 + cc_platform_has(CC_ATTR_MEM_ENCRYPT)); 407 430 if (tmp < 0) 408 431 return tmp; 409 - buflen -= tsz; 410 432 *fpos += tsz; 411 - buffer += tsz; 412 433 acc += tsz; 413 434 414 435 /* leave now if filled buffer already */ 415 - if (buflen == 0) 436 + if (!iov_iter_count(iter)) 416 437 return acc; 417 438 } 418 439 } ··· 418 443 return acc; 419 444 } 420 445 421 - static ssize_t read_vmcore(struct file *file, char __user *buffer, 422 - size_t buflen, loff_t *fpos) 446 + static ssize_t read_vmcore(struct kiocb *iocb, struct iov_iter *iter) 423 447 { 424 - return __read_vmcore((__force char *) buffer, buflen, fpos, 1); 448 + return __read_vmcore(iter, &iocb->ki_pos); 425 449 } 426 450 427 451 /* 428 452 * The vmcore fault handler uses the page cache and fills data using the 429 - * standard __vmcore_read() function. 453 + * standard __read_vmcore() function. 430 454 * 431 455 * On s390 the fault handler is used for memory regions that can't be mapped 432 456 * directly with remap_pfn_range(). ··· 435 461 #ifdef CONFIG_S390 436 462 struct address_space *mapping = vmf->vma->vm_file->f_mapping; 437 463 pgoff_t index = vmf->pgoff; 464 + struct iov_iter iter; 465 + struct kvec kvec; 438 466 struct page *page; 439 467 loff_t offset; 440 - char *buf; 441 468 int rc; 442 469 443 470 page = find_or_create_page(mapping, index, GFP_KERNEL); ··· 446 471 return VM_FAULT_OOM; 447 472 if (!PageUptodate(page)) { 448 473 offset = (loff_t) index << PAGE_SHIFT; 449 - buf = __va((page_to_pfn(page) << PAGE_SHIFT)); 450 - rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0); 474 + kvec.iov_base = page_address(page); 475 + kvec.iov_len = PAGE_SIZE; 476 + iov_iter_kvec(&iter, READ, &kvec, 1, PAGE_SIZE); 477 + 478 + rc = __read_vmcore(&iter, &offset); 451 479 if (rc < 0) { 452 480 unlock_page(page); 453 481 put_page(page); ··· 700 722 701 723 static const struct proc_ops vmcore_proc_ops = { 702 724 .proc_open = open_vmcore, 703 - .proc_read = read_vmcore, 725 + .proc_read_iter = read_vmcore, 704 726 .proc_lseek = default_llseek, 705 727 .proc_mmap = mmap_vmcore, 706 728 };