Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xen/gntdev: remove struct gntdev_copy_batch from stack

When compiling the kernel with LLVM, the following warning was issued:

drivers/xen/gntdev.c:991: warning: stack frame size (1160) exceeds
limit (1024) in function 'gntdev_ioctl'

The main reason is struct gntdev_copy_batch which is located on the
stack and has a size of nearly 1kb.

For performance reasons it shouldn't by just dynamically allocated
instead, so allocate a new instance when needed and instead of freeing
it put it into a list of free structs anchored in struct gntdev_priv.

Fixes: a4cdb556cae0 ("xen/gntdev: add ioctl for grant copy")
Reported-by: Abinash Singh <abinashsinghlalotra@gmail.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Signed-off-by: Juergen Gross <jgross@suse.com>
Message-ID: <20250703073259.17356-1-jgross@suse.com>

+54 -21
+4
drivers/xen/gntdev-common.h
··· 26 26 /* lock protects maps and freeable_maps. */ 27 27 struct mutex lock; 28 28 29 + /* Free instances of struct gntdev_copy_batch. */ 30 + struct gntdev_copy_batch *batch; 31 + struct mutex batch_lock; 32 + 29 33 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC 30 34 /* Device for which DMA memory is allocated. */ 31 35 struct device *dma_dev;
+50 -21
drivers/xen/gntdev.c
··· 56 56 "Gerd Hoffmann <kraxel@redhat.com>"); 57 57 MODULE_DESCRIPTION("User-space granted page access driver"); 58 58 59 + #define GNTDEV_COPY_BATCH 16 60 + 61 + struct gntdev_copy_batch { 62 + struct gnttab_copy ops[GNTDEV_COPY_BATCH]; 63 + struct page *pages[GNTDEV_COPY_BATCH]; 64 + s16 __user *status[GNTDEV_COPY_BATCH]; 65 + unsigned int nr_ops; 66 + unsigned int nr_pages; 67 + bool writeable; 68 + struct gntdev_copy_batch *next; 69 + }; 70 + 59 71 static unsigned int limit = 64*1024; 60 72 module_param(limit, uint, 0644); 61 73 MODULE_PARM_DESC(limit, ··· 596 584 INIT_LIST_HEAD(&priv->maps); 597 585 mutex_init(&priv->lock); 598 586 587 + mutex_init(&priv->batch_lock); 588 + 599 589 #ifdef CONFIG_XEN_GNTDEV_DMABUF 600 590 priv->dmabuf_priv = gntdev_dmabuf_init(flip); 601 591 if (IS_ERR(priv->dmabuf_priv)) { ··· 622 608 { 623 609 struct gntdev_priv *priv = flip->private_data; 624 610 struct gntdev_grant_map *map; 611 + struct gntdev_copy_batch *batch; 625 612 626 613 pr_debug("priv %p\n", priv); 627 614 ··· 634 619 gntdev_put_map(NULL /* already removed */, map); 635 620 } 636 621 mutex_unlock(&priv->lock); 622 + 623 + mutex_lock(&priv->batch_lock); 624 + while (priv->batch) { 625 + batch = priv->batch; 626 + priv->batch = batch->next; 627 + kfree(batch); 628 + } 629 + mutex_unlock(&priv->batch_lock); 637 630 638 631 #ifdef CONFIG_XEN_GNTDEV_DMABUF 639 632 gntdev_dmabuf_fini(priv->dmabuf_priv); ··· 808 785 return rc; 809 786 } 810 787 811 - #define GNTDEV_COPY_BATCH 16 812 - 813 - struct gntdev_copy_batch { 814 - struct gnttab_copy ops[GNTDEV_COPY_BATCH]; 815 - struct page *pages[GNTDEV_COPY_BATCH]; 816 - s16 __user *status[GNTDEV_COPY_BATCH]; 817 - unsigned int nr_ops; 818 - unsigned int nr_pages; 819 - bool writeable; 820 - }; 821 - 822 788 static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt, 823 789 unsigned long *gfn) 824 790 { ··· 965 953 static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u) 966 954 { 967 955 struct ioctl_gntdev_grant_copy copy; 968 - struct gntdev_copy_batch batch; 956 + struct gntdev_copy_batch *batch; 969 957 unsigned int i; 970 958 int ret = 0; 971 959 972 960 if (copy_from_user(&copy, u, sizeof(copy))) 973 961 return -EFAULT; 974 962 975 - batch.nr_ops = 0; 976 - batch.nr_pages = 0; 963 + mutex_lock(&priv->batch_lock); 964 + if (!priv->batch) { 965 + batch = kmalloc(sizeof(*batch), GFP_KERNEL); 966 + } else { 967 + batch = priv->batch; 968 + priv->batch = batch->next; 969 + } 970 + mutex_unlock(&priv->batch_lock); 971 + if (!batch) 972 + return -ENOMEM; 973 + 974 + batch->nr_ops = 0; 975 + batch->nr_pages = 0; 977 976 978 977 for (i = 0; i < copy.count; i++) { 979 978 struct gntdev_grant_copy_segment seg; 980 979 981 980 if (copy_from_user(&seg, &copy.segments[i], sizeof(seg))) { 982 981 ret = -EFAULT; 982 + gntdev_put_pages(batch); 983 983 goto out; 984 984 } 985 985 986 - ret = gntdev_grant_copy_seg(&batch, &seg, &copy.segments[i].status); 987 - if (ret < 0) 986 + ret = gntdev_grant_copy_seg(batch, &seg, &copy.segments[i].status); 987 + if (ret < 0) { 988 + gntdev_put_pages(batch); 988 989 goto out; 990 + } 989 991 990 992 cond_resched(); 991 993 } 992 - if (batch.nr_ops) 993 - ret = gntdev_copy(&batch); 994 - return ret; 994 + if (batch->nr_ops) 995 + ret = gntdev_copy(batch); 995 996 996 - out: 997 - gntdev_put_pages(&batch); 997 + out: 998 + mutex_lock(&priv->batch_lock); 999 + batch->next = priv->batch; 1000 + priv->batch = batch; 1001 + mutex_unlock(&priv->batch_lock); 1002 + 998 1003 return ret; 999 1004 } 1000 1005