Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

fs/proc/kcore: convert read_kcore() to read_kcore_iter()

For the time being we still use a bounce buffer for vread(), however in
the next patch we will convert this to interact directly with the iterator
and eliminate the bounce buffer altogether.

Link: https://lkml.kernel.org/r/ebe12c8d70eebd71f487d80095605f3ad0d1489c.1679511146.git.lstoakes@gmail.com
Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Baoquan He <bhe@redhat.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Liu Shixin <liushixin2@huawei.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Lorenzo Stoakes and committed by
Andrew Morton
46c0d6d0 2e1c0170

+18 -18
+18 -18
fs/proc/kcore.c
··· 24 24 #include <linux/memblock.h> 25 25 #include <linux/init.h> 26 26 #include <linux/slab.h> 27 - #include <linux/uaccess.h> 27 + #include <linux/uio.h> 28 28 #include <asm/io.h> 29 29 #include <linux/list.h> 30 30 #include <linux/ioport.h> ··· 308 308 } 309 309 310 310 static ssize_t 311 - read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos) 311 + read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter) 312 312 { 313 + struct file *file = iocb->ki_filp; 313 314 char *buf = file->private_data; 315 + loff_t *fpos = &iocb->ki_pos; 316 + 314 317 size_t phdrs_offset, notes_offset, data_offset; 315 318 size_t page_offline_frozen = 1; 316 319 size_t phdrs_len, notes_len; ··· 321 318 size_t tsz; 322 319 int nphdr; 323 320 unsigned long start; 321 + size_t buflen = iov_iter_count(iter); 324 322 size_t orig_buflen = buflen; 325 323 int ret = 0; 326 324 ··· 360 356 }; 361 357 362 358 tsz = min_t(size_t, buflen, sizeof(struct elfhdr) - *fpos); 363 - if (copy_to_user(buffer, (char *)&ehdr + *fpos, tsz)) { 359 + if (copy_to_iter((char *)&ehdr + *fpos, tsz, iter) != tsz) { 364 360 ret = -EFAULT; 365 361 goto out; 366 362 } 367 363 368 - buffer += tsz; 369 364 buflen -= tsz; 370 365 *fpos += tsz; 371 366 } ··· 401 398 } 402 399 403 400 tsz = min_t(size_t, buflen, phdrs_offset + phdrs_len - *fpos); 404 - if (copy_to_user(buffer, (char *)phdrs + *fpos - phdrs_offset, 405 - tsz)) { 401 + if (copy_to_iter((char *)phdrs + *fpos - phdrs_offset, tsz, 402 + iter) != tsz) { 406 403 kfree(phdrs); 407 404 ret = -EFAULT; 408 405 goto out; 409 406 } 410 407 kfree(phdrs); 411 408 412 - buffer += tsz; 413 409 buflen -= tsz; 414 410 *fpos += tsz; 415 411 } ··· 450 448 min(vmcoreinfo_size, notes_len - i)); 451 449 452 450 tsz = min_t(size_t, buflen, notes_offset + notes_len - *fpos); 453 - if (copy_to_user(buffer, notes + *fpos - notes_offset, tsz)) { 451 + if (copy_to_iter(notes + *fpos - notes_offset, tsz, iter) != tsz) { 454 452 kfree(notes); 455 453 ret = -EFAULT; 456 454 goto out; 457 455 } 458 456 kfree(notes); 459 457 460 - buffer += tsz; 461 458 buflen -= tsz; 462 459 *fpos += tsz; 463 460 } ··· 498 497 } 499 498 500 499 if (!m) { 501 - if (clear_user(buffer, tsz)) { 500 + if (iov_iter_zero(tsz, iter) != tsz) { 502 501 ret = -EFAULT; 503 502 goto out; 504 503 } ··· 509 508 case KCORE_VMALLOC: 510 509 vread(buf, (char *)start, tsz); 511 510 /* we have to zero-fill user buffer even if no read */ 512 - if (copy_to_user(buffer, buf, tsz)) { 511 + if (copy_to_iter(buf, tsz, iter) != tsz) { 513 512 ret = -EFAULT; 514 513 goto out; 515 514 } 516 515 break; 517 516 case KCORE_USER: 518 517 /* User page is handled prior to normal kernel page: */ 519 - if (copy_to_user(buffer, (char *)start, tsz)) { 518 + if (copy_to_iter((char *)start, tsz, iter) != tsz) { 520 519 ret = -EFAULT; 521 520 goto out; 522 521 } ··· 532 531 */ 533 532 if (!page || PageOffline(page) || 534 533 is_page_hwpoison(page) || !pfn_is_ram(pfn)) { 535 - if (clear_user(buffer, tsz)) { 534 + if (iov_iter_zero(tsz, iter) != tsz) { 536 535 ret = -EFAULT; 537 536 goto out; 538 537 } ··· 542 541 case KCORE_VMEMMAP: 543 542 case KCORE_TEXT: 544 543 /* 545 - * We use _copy_to_user() to bypass usermode hardening 544 + * We use _copy_to_iter() to bypass usermode hardening 546 545 * which would otherwise prevent this operation. 547 546 */ 548 - if (_copy_to_user(buffer, (char *)start, tsz)) { 547 + if (_copy_to_iter((char *)start, tsz, iter) != tsz) { 549 548 ret = -EFAULT; 550 549 goto out; 551 550 } 552 551 break; 553 552 default: 554 553 pr_warn_once("Unhandled KCORE type: %d\n", m->type); 555 - if (clear_user(buffer, tsz)) { 554 + if (iov_iter_zero(tsz, iter) != tsz) { 556 555 ret = -EFAULT; 557 556 goto out; 558 557 } ··· 560 559 skip: 561 560 buflen -= tsz; 562 561 *fpos += tsz; 563 - buffer += tsz; 564 562 start += tsz; 565 563 tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen); 566 564 } ··· 603 603 } 604 604 605 605 static const struct proc_ops kcore_proc_ops = { 606 - .proc_read = read_kcore, 606 + .proc_read_iter = read_kcore_iter, 607 607 .proc_open = open_kcore, 608 608 .proc_release = release_kcore, 609 609 .proc_lseek = default_llseek,