Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xen/privcmd: Corrected error handling path

Previously, if lock_pages() end up partially mapping pages, it used
to return -ERRNO due to which unlock_pages() have to go through
each pages[i] till *nr_pages* to validate them. This can be avoided
by passing correct number of partially mapped pages & -ERRNO separately,
while returning from lock_pages() due to error.

With this fix unlock_pages() doesn't need to validate pages[i] till
*nr_pages* for error scenario and few condition checks can be ignored.

Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com>
Reviewed-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Paul Durrant <paul@xen.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Paul Durrant <xadimgnik@gmail.com>
Link: https://lore.kernel.org/r/1594525195-28345-2-git-send-email-jrdr.linux@gmail.com
Signed-off-by: Juergen Gross <jgross@suse.com>

authored by

Souptick Joarder and committed by
Juergen Gross
e398fb4b bcf87687

+15 -16
+15 -16
drivers/xen/privcmd.c
··· 580 580 581 581 static int lock_pages( 582 582 struct privcmd_dm_op_buf kbufs[], unsigned int num, 583 - struct page *pages[], unsigned int nr_pages) 583 + struct page *pages[], unsigned int nr_pages, unsigned int *pinned) 584 584 { 585 585 unsigned int i; 586 586 587 587 for (i = 0; i < num; i++) { 588 588 unsigned int requested; 589 - int pinned; 589 + int page_count; 590 590 591 591 requested = DIV_ROUND_UP( 592 592 offset_in_page(kbufs[i].uptr) + kbufs[i].size, ··· 594 594 if (requested > nr_pages) 595 595 return -ENOSPC; 596 596 597 - pinned = get_user_pages_fast( 597 + page_count = get_user_pages_fast( 598 598 (unsigned long) kbufs[i].uptr, 599 599 requested, FOLL_WRITE, pages); 600 - if (pinned < 0) 601 - return pinned; 600 + if (page_count < 0) 601 + return page_count; 602 602 603 - nr_pages -= pinned; 604 - pages += pinned; 603 + *pinned += page_count; 604 + nr_pages -= page_count; 605 + pages += page_count; 605 606 } 606 607 607 608 return 0; ··· 612 611 { 613 612 unsigned int i; 614 613 615 - if (!pages) 616 - return; 617 - 618 - for (i = 0; i < nr_pages; i++) { 619 - if (pages[i]) 620 - put_page(pages[i]); 621 - } 614 + for (i = 0; i < nr_pages; i++) 615 + put_page(pages[i]); 622 616 } 623 617 624 618 static long privcmd_ioctl_dm_op(struct file *file, void __user *udata) ··· 626 630 struct xen_dm_op_buf *xbufs = NULL; 627 631 unsigned int i; 628 632 long rc; 633 + unsigned int pinned = 0; 629 634 630 635 if (copy_from_user(&kdata, udata, sizeof(kdata))) 631 636 return -EFAULT; ··· 680 683 goto out; 681 684 } 682 685 683 - rc = lock_pages(kbufs, kdata.num, pages, nr_pages); 684 - if (rc) 686 + rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned); 687 + if (rc < 0) { 688 + nr_pages = pinned; 685 689 goto out; 690 + } 686 691 687 692 for (i = 0; i < kdata.num; i++) { 688 693 set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);