Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xen-netback: fix guest-receive-side array sizes

The sizes chosen for the metadata and grant_copy_op arrays on the guest
receive size are wrong;

- The meta array is needlessly twice the ring size, when we only ever
consume a single array element per RX ring slot
- The grant_copy_op array is way too small. It's sized based on a bogus
assumption: that at most two copy ops will be used per ring slot. This
may have been true at some point in the past but it's clear from looking
at start_new_rx_buffer() that a new ring slot is only consumed if a frag
would overflow the current slot (plus some other conditions) so the actual
limit is MAX_SKB_FRAGS grant_copy_ops per ring slot.

This patch fixes those two sizing issues and, because grant_copy_ops grows
so much, it pulls it out into a separate chunk of vmalloc()ed memory.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Acked-by: Wei Liu <wei.liu2@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Paul Durrant and committed by
David S. Miller
ac3d5ac2 7a399e3a

+24 -7
+13 -6
drivers/net/xen-netback/common.h
··· 101 101 102 102 #define MAX_PENDING_REQS 256 103 103 104 + /* It's possible for an skb to have a maximal number of frags 105 + * but still be less than MAX_BUFFER_OFFSET in size. Thus the 106 + * worst-case number of copy operations is MAX_SKB_FRAGS per 107 + * ring slot. 108 + */ 109 + #define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) 110 + 104 111 struct xenvif { 105 112 /* Unique identifier for this interface. */ 106 113 domid_t domid; ··· 150 143 */ 151 144 RING_IDX rx_req_cons_peek; 152 145 153 - /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each 154 - * head/fragment page uses 2 copy operations because it 155 - * straddles two buffers in the frontend. 156 - */ 157 - struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE]; 158 - struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE]; 146 + /* This array is allocated seperately as it is large */ 147 + struct gnttab_copy *grant_copy_op; 159 148 149 + /* We create one meta structure per ring request we consume, so 150 + * the maximum number is the same as the ring size. 151 + */ 152 + struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE]; 160 153 161 154 u8 fe_dev_addr[6]; 162 155
+10
drivers/net/xen-netback/interface.c
··· 307 307 SET_NETDEV_DEV(dev, parent); 308 308 309 309 vif = netdev_priv(dev); 310 + 311 + vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) * 312 + MAX_GRANT_COPY_OPS); 313 + if (vif->grant_copy_op == NULL) { 314 + pr_warn("Could not allocate grant copy space for %s\n", name); 315 + free_netdev(dev); 316 + return ERR_PTR(-ENOMEM); 317 + } 318 + 310 319 vif->domid = domid; 311 320 vif->handle = handle; 312 321 vif->can_sg = 1; ··· 496 487 497 488 unregister_netdev(vif->dev); 498 489 490 + vfree(vif->grant_copy_op); 499 491 free_netdev(vif->dev); 500 492 501 493 module_put(THIS_MODULE);
+1 -1
drivers/net/xen-netback/netback.c
··· 608 608 if (!npo.copy_prod) 609 609 return; 610 610 611 - BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op)); 611 + BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS); 612 612 gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod); 613 613 614 614 while ((skb = __skb_dequeue(&rxq)) != NULL) {