Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

xen/xenbus: avoid large structs and arrays on the stack

xenbus_map_ring_valloc() and its sub-functions are putting quite large
structs and arrays on the stack. This is problematic at runtime, but
might also result in build failures (e.g. with clang due to the option
-Werror,-Wframe-larger-than=... used).

Fix that by moving most of the data from the stack into a dynamically
allocated struct. Performance is no issue here, as
xenbus_map_ring_valloc() is used only when adding a new PV device to
a backend driver.

While at it move some duplicated code from pv/hvm specific mapping
functions to the single caller.

Reported-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Link: https://lore.kernel.org/r/20200701121638.19840-2-jgross@suse.com
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>

authored by

Juergen Gross and committed by
Boris Ostrovsky
3848e4e0 caef73cf

+83 -78
+83 -78
drivers/xen/xenbus/xenbus_client.c
··· 69 69 unsigned int nr_handles; 70 70 }; 71 71 72 + struct map_ring_valloc { 73 + struct xenbus_map_node *node; 74 + 75 + /* Why do we need two arrays? See comment of __xenbus_map_ring */ 76 + union { 77 + unsigned long addrs[XENBUS_MAX_RING_GRANTS]; 78 + pte_t *ptes[XENBUS_MAX_RING_GRANTS]; 79 + }; 80 + phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; 81 + 82 + struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS]; 83 + struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; 84 + 85 + unsigned int idx; /* HVM only. */ 86 + }; 87 + 72 88 static DEFINE_SPINLOCK(xenbus_valloc_lock); 73 89 static LIST_HEAD(xenbus_valloc_pages); 74 90 75 91 struct xenbus_ring_ops { 76 - int (*map)(struct xenbus_device *dev, 92 + int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info, 77 93 grant_ref_t *gnt_refs, unsigned int nr_grefs, 78 94 void **vaddr); 79 95 int (*unmap)(struct xenbus_device *dev, void *vaddr); ··· 465 449 unsigned int nr_grefs, void **vaddr) 466 450 { 467 451 int err; 452 + struct map_ring_valloc *info; 468 453 469 - err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr); 454 + *vaddr = NULL; 455 + 456 + if (nr_grefs > XENBUS_MAX_RING_GRANTS) 457 + return -EINVAL; 458 + 459 + info = kzalloc(sizeof(*info), GFP_KERNEL); 460 + if (!info) 461 + return -ENOMEM; 462 + 463 + info->node = kzalloc(sizeof(*info->node), GFP_KERNEL); 464 + if (!info->node) { 465 + err = -ENOMEM; 466 + goto out; 467 + } 468 + 469 + err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr); 470 + 470 471 /* Some hypervisors are buggy and can return 1. */ 471 472 if (err > 0) 472 473 err = GNTST_general_error; 473 474 475 + out: 476 + kfree(info->node); 477 + kfree(info); 474 478 return err; 475 479 } 476 480 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); ··· 502 466 grant_ref_t *gnt_refs, 503 467 unsigned int nr_grefs, 504 468 grant_handle_t *handles, 505 - phys_addr_t *addrs, 469 + struct map_ring_valloc *info, 506 470 unsigned int flags, 507 471 bool *leaked) 508 472 { 509 - struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS]; 510 - struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; 511 473 int i, j; 512 474 int err = GNTST_okay; 513 475 ··· 513 479 return -EINVAL; 514 480 515 481 for (i = 0; i < nr_grefs; i++) { 516 - memset(&map[i], 0, sizeof(map[i])); 517 - gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i], 518 - dev->otherend_id); 482 + gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags, 483 + gnt_refs[i], dev->otherend_id); 519 484 handles[i] = INVALID_GRANT_HANDLE; 520 485 } 521 486 522 - gnttab_batch_map(map, i); 487 + gnttab_batch_map(info->map, i); 523 488 524 489 for (i = 0; i < nr_grefs; i++) { 525 - if (map[i].status != GNTST_okay) { 526 - err = map[i].status; 527 - xenbus_dev_fatal(dev, map[i].status, 490 + if (info->map[i].status != GNTST_okay) { 491 + err = info->map[i].status; 492 + xenbus_dev_fatal(dev, info->map[i].status, 528 493 "mapping in shared page %d from domain %d", 529 494 gnt_refs[i], dev->otherend_id); 530 495 goto fail; 531 496 } else 532 - handles[i] = map[i].handle; 497 + handles[i] = info->map[i].handle; 533 498 } 534 499 535 500 return GNTST_okay; ··· 536 503 fail: 537 504 for (i = j = 0; i < nr_grefs; i++) { 538 505 if (handles[i] != INVALID_GRANT_HANDLE) { 539 - memset(&unmap[j], 0, sizeof(unmap[j])); 540 - gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i], 506 + gnttab_set_unmap_op(&info->unmap[j], 507 + info->phys_addrs[i], 541 508 GNTMAP_host_map, handles[i]); 542 509 j++; 543 510 } 544 511 } 545 512 546 - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j)) 513 + if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j)) 547 514 BUG(); 548 515 549 516 *leaked = false; 550 517 for (i = 0; i < j; i++) { 551 - if (unmap[i].status != GNTST_okay) { 518 + if (info->unmap[i].status != GNTST_okay) { 552 519 *leaked = true; 553 520 break; 554 521 } ··· 599 566 return err; 600 567 } 601 568 602 - struct map_ring_valloc_hvm 603 - { 604 - unsigned int idx; 605 - 606 - /* Why do we need two arrays? See comment of __xenbus_map_ring */ 607 - phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; 608 - unsigned long addrs[XENBUS_MAX_RING_GRANTS]; 609 - }; 610 - 611 569 static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn, 612 570 unsigned int goffset, 613 571 unsigned int len, 614 572 void *data) 615 573 { 616 - struct map_ring_valloc_hvm *info = data; 574 + struct map_ring_valloc *info = data; 617 575 unsigned long vaddr = (unsigned long)gfn_to_virt(gfn); 618 576 619 577 info->phys_addrs[info->idx] = vaddr; ··· 613 589 info->idx++; 614 590 } 615 591 616 - static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, 617 - grant_ref_t *gnt_ref, 618 - unsigned int nr_grefs, 619 - void **vaddr) 592 + static int xenbus_map_ring_hvm(struct xenbus_device *dev, 593 + struct map_ring_valloc *info, 594 + grant_ref_t *gnt_ref, 595 + unsigned int nr_grefs, 596 + void **vaddr) 620 597 { 621 - struct xenbus_map_node *node; 598 + struct xenbus_map_node *node = info->node; 622 599 int err; 623 600 void *addr; 624 601 bool leaked = false; 625 - struct map_ring_valloc_hvm info = { 626 - .idx = 0, 627 - }; 628 602 unsigned int nr_pages = XENBUS_PAGES(nr_grefs); 629 - 630 - if (nr_grefs > XENBUS_MAX_RING_GRANTS) 631 - return -EINVAL; 632 - 633 - *vaddr = NULL; 634 - 635 - node = kzalloc(sizeof(*node), GFP_KERNEL); 636 - if (!node) 637 - return -ENOMEM; 638 603 639 604 err = alloc_xenballooned_pages(nr_pages, node->hvm.pages); 640 605 if (err) ··· 631 618 632 619 gnttab_foreach_grant(node->hvm.pages, nr_grefs, 633 620 xenbus_map_ring_setup_grant_hvm, 634 - &info); 621 + info); 635 622 636 623 err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles, 637 - info.phys_addrs, GNTMAP_host_map, &leaked); 624 + info, GNTMAP_host_map, &leaked); 638 625 node->nr_handles = nr_grefs; 639 626 640 627 if (err) ··· 654 641 spin_unlock(&xenbus_valloc_lock); 655 642 656 643 *vaddr = addr; 644 + info->node = NULL; 645 + 657 646 return 0; 658 647 659 648 out_xenbus_unmap_ring: 660 649 if (!leaked) 661 - xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs); 650 + xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs); 662 651 else 663 652 pr_alert("leaking %p size %u page(s)", 664 653 addr, nr_pages); ··· 668 653 if (!leaked) 669 654 free_xenballooned_pages(nr_pages, node->hvm.pages); 670 655 out_err: 671 - kfree(node); 672 656 return err; 673 657 } 674 658 ··· 690 676 EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); 691 677 692 678 #ifdef CONFIG_XEN_PV 693 - static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, 694 - grant_ref_t *gnt_refs, 695 - unsigned int nr_grefs, 696 - void **vaddr) 679 + static int xenbus_map_ring_pv(struct xenbus_device *dev, 680 + struct map_ring_valloc *info, 681 + grant_ref_t *gnt_refs, 682 + unsigned int nr_grefs, 683 + void **vaddr) 697 684 { 698 - struct xenbus_map_node *node; 685 + struct xenbus_map_node *node = info->node; 699 686 struct vm_struct *area; 700 - pte_t *ptes[XENBUS_MAX_RING_GRANTS]; 701 - phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; 702 687 int err = GNTST_okay; 703 688 int i; 704 689 bool leaked; 705 690 706 - *vaddr = NULL; 707 - 708 - if (nr_grefs > XENBUS_MAX_RING_GRANTS) 709 - return -EINVAL; 710 - 711 - node = kzalloc(sizeof(*node), GFP_KERNEL); 712 - if (!node) 713 - return -ENOMEM; 714 - 715 - area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes); 691 + area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, info->ptes); 716 692 if (!area) { 717 693 kfree(node); 718 694 return -ENOMEM; 719 695 } 720 696 721 697 for (i = 0; i < nr_grefs; i++) 722 - phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr; 698 + info->phys_addrs[i] = 699 + arbitrary_virt_to_machine(info->ptes[i]).maddr; 723 700 724 701 err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles, 725 - phys_addrs, 726 - GNTMAP_host_map | GNTMAP_contains_pte, 702 + info, GNTMAP_host_map | GNTMAP_contains_pte, 727 703 &leaked); 728 704 if (err) 729 705 goto failed; ··· 726 722 spin_unlock(&xenbus_valloc_lock); 727 723 728 724 *vaddr = area->addr; 725 + info->node = NULL; 726 + 729 727 return 0; 730 728 731 729 failed: ··· 736 730 else 737 731 pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs); 738 732 739 - kfree(node); 740 733 return err; 741 734 } 742 735 743 - static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) 736 + static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr) 744 737 { 745 738 struct xenbus_map_node *node; 746 739 struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; ··· 803 798 } 804 799 805 800 static const struct xenbus_ring_ops ring_ops_pv = { 806 - .map = xenbus_map_ring_valloc_pv, 807 - .unmap = xenbus_unmap_ring_vfree_pv, 801 + .map = xenbus_map_ring_pv, 802 + .unmap = xenbus_unmap_ring_pv, 808 803 }; 809 804 #endif 810 805 811 - struct unmap_ring_vfree_hvm 806 + struct unmap_ring_hvm 812 807 { 813 808 unsigned int idx; 814 809 unsigned long addrs[XENBUS_MAX_RING_GRANTS]; ··· 819 814 unsigned int len, 820 815 void *data) 821 816 { 822 - struct unmap_ring_vfree_hvm *info = data; 817 + struct unmap_ring_hvm *info = data; 823 818 824 819 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn); 825 820 826 821 info->idx++; 827 822 } 828 823 829 - static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) 824 + static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr) 830 825 { 831 826 int rv; 832 827 struct xenbus_map_node *node; 833 828 void *addr; 834 - struct unmap_ring_vfree_hvm info = { 829 + struct unmap_ring_hvm info = { 835 830 .idx = 0, 836 831 }; 837 832 unsigned int nr_pages; ··· 892 887 EXPORT_SYMBOL_GPL(xenbus_read_driver_state); 893 888 894 889 static const struct xenbus_ring_ops ring_ops_hvm = { 895 - .map = xenbus_map_ring_valloc_hvm, 896 - .unmap = xenbus_unmap_ring_vfree_hvm, 890 + .map = xenbus_map_ring_hvm, 891 + .unmap = xenbus_unmap_ring_hvm, 897 892 }; 898 893 899 894 void __init xenbus_ring_ops_init(void)