Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iov_iter: saner checks for attempt to copy to/from iterator

instead of "don't do it to ITER_PIPE" check for ->data_source being
false on copying from iterator. Check for !->data_source for
copying to iterator, while we are at it.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

Al Viro a41dad90 fc02f337

+17 -14
+17 -14
lib/iov_iter.c
··· 520 520 521 521 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 522 522 { 523 + if (WARN_ON_ONCE(i->data_source)) 524 + return 0; 523 525 if (unlikely(iov_iter_is_pipe(i))) 524 526 return copy_pipe_to_iter(addr, bytes, i); 525 527 if (user_backed_iter(i)) ··· 608 606 */ 609 607 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 610 608 { 609 + if (WARN_ON_ONCE(i->data_source)) 610 + return 0; 611 611 if (unlikely(iov_iter_is_pipe(i))) 612 612 return copy_mc_pipe_to_iter(addr, bytes, i); 613 613 if (user_backed_iter(i)) ··· 626 622 627 623 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 628 624 { 629 - if (unlikely(iov_iter_is_pipe(i))) { 630 - WARN_ON(1); 625 + if (WARN_ON_ONCE(!i->data_source)) 631 626 return 0; 632 - } 627 + 633 628 if (user_backed_iter(i)) 634 629 might_fault(); 635 630 iterate_and_advance(i, bytes, base, len, off, ··· 642 639 643 640 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 644 641 { 645 - if (unlikely(iov_iter_is_pipe(i))) { 646 - WARN_ON(1); 642 + if (WARN_ON_ONCE(!i->data_source)) 647 643 return 0; 648 - } 644 + 649 645 iterate_and_advance(i, bytes, base, len, off, 650 646 __copy_from_user_inatomic_nocache(addr + off, base, len), 651 647 memcpy(addr + off, base, len) ··· 673 671 */ 674 672 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 675 673 { 676 - if (unlikely(iov_iter_is_pipe(i))) { 677 - WARN_ON(1); 674 + if (WARN_ON_ONCE(!i->data_source)) 678 675 return 0; 679 - } 676 + 680 677 iterate_and_advance(i, bytes, base, len, off, 681 678 __copy_from_user_flushcache(addr + off, base, len), 682 679 memcpy_flushcache(addr + off, base, len) ··· 714 713 { 715 714 size_t res = 0; 716 715 if (!page_copy_sane(page, offset, bytes)) 716 + return 0; 717 + if (WARN_ON_ONCE(i->data_source)) 717 718 return 0; 718 719 if (unlikely(iov_iter_is_pipe(i))) 719 720 return copy_page_to_iter_pipe(page, offset, bytes, i); ··· 814 811 kunmap_atomic(kaddr); 815 812 return 0; 816 813 } 817 - if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 814 + if (WARN_ON_ONCE(!i->data_source)) { 818 815 kunmap_atomic(kaddr); 819 - WARN_ON(1); 820 816 return 0; 821 817 } 822 818 iterate_and_advance(i, bytes, base, len, off, ··· 1527 1525 { 1528 1526 __wsum sum, next; 1529 1527 sum = *csum; 1530 - if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 1531 - WARN_ON(1); 1528 + if (WARN_ON_ONCE(!i->data_source)) 1532 1529 return 0; 1533 - } 1530 + 1534 1531 iterate_and_advance(i, bytes, base, len, off, ({ 1535 1532 next = csum_and_copy_from_user(base, addr + off, len); 1536 1533 sum = csum_block_add(sum, next, off); ··· 1549 1548 struct csum_state *csstate = _csstate; 1550 1549 __wsum sum, next; 1551 1550 1551 + if (WARN_ON_ONCE(i->data_source)) 1552 + return 0; 1552 1553 if (unlikely(iov_iter_is_discard(i))) { 1553 1554 // can't use csum_memcpy() for that one - data is not copied 1554 1555 csstate->csum = csum_block_add(csstate->csum,