Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xen-netback: enable IPv6 TCP GSO to the guest

This patch adds code to handle SKB_GSO_TCPV6 skbs and construct appropriate
extra or prefix segments to pass the large packet to the frontend. New
xenstore flags, feature-gso-tcpv6 and feature-gso-tcpv6-prefix, are sampled
to determine if the frontend is capable of handling such packets.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Paul Durrant and committed by
David S. Miller
82cada22 a9468587

+77 -16
+7 -2
drivers/net/xen-netback/common.h
··· 87 87 struct xenvif_rx_meta { 88 88 int id; 89 89 int size; 90 + int gso_type; 90 91 int gso_size; 91 92 }; 93 + 94 + #define GSO_BIT(type) \ 95 + (1 << XEN_NETIF_GSO_TYPE_ ## type) 92 96 93 97 /* Discriminate from any valid pending_idx value. */ 94 98 #define INVALID_PENDING_IDX 0xFFFF ··· 154 150 u8 fe_dev_addr[6]; 155 151 156 152 /* Frontend feature information. */ 153 + int gso_mask; 154 + int gso_prefix_mask; 155 + 157 156 u8 can_sg:1; 158 - u8 gso:1; 159 - u8 gso_prefix:1; 160 157 u8 ip_csum:1; 161 158 u8 ipv6_csum:1; 162 159
+4 -2
drivers/net/xen-netback/interface.c
··· 214 214 215 215 if (!vif->can_sg) 216 216 features &= ~NETIF_F_SG; 217 - if (!vif->gso && !vif->gso_prefix) 217 + if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV4)) 218 218 features &= ~NETIF_F_TSO; 219 + if (~(vif->gso_mask | vif->gso_prefix_mask) & GSO_BIT(TCPV6)) 220 + features &= ~NETIF_F_TSO6; 219 221 if (!vif->ip_csum) 220 222 features &= ~NETIF_F_IP_CSUM; 221 223 if (!vif->ipv6_csum) ··· 322 320 dev->netdev_ops = &xenvif_netdev_ops; 323 321 dev->hw_features = NETIF_F_SG | 324 322 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 325 - NETIF_F_TSO; 323 + NETIF_F_TSO | NETIF_F_TSO6; 326 324 dev->features = dev->hw_features | NETIF_F_RXCSUM; 327 325 SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops); 328 326
+38 -10
drivers/net/xen-netback/netback.c
··· 142 142 int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE); 143 143 144 144 /* XXX FIXME: RX path dependent on MAX_SKB_FRAGS */ 145 - if (vif->can_sg || vif->gso || vif->gso_prefix) 145 + if (vif->can_sg || vif->gso_mask || vif->gso_prefix_mask) 146 146 max += MAX_SKB_FRAGS + 1; /* extra_info + frags */ 147 147 148 148 return max; ··· 314 314 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 315 315 316 316 meta = npo->meta + npo->meta_prod++; 317 + meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; 317 318 meta->gso_size = 0; 318 319 meta->size = 0; 319 320 meta->id = req->id; ··· 337 336 struct gnttab_copy *copy_gop; 338 337 struct xenvif_rx_meta *meta; 339 338 unsigned long bytes; 339 + int gso_type; 340 340 341 341 /* Data must not cross a page boundary. */ 342 342 BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); ··· 396 394 } 397 395 398 396 /* Leave a gap for the GSO descriptor. */ 399 - if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix) 397 + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 398 + gso_type = XEN_NETIF_GSO_TYPE_TCPV4; 399 + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 400 + gso_type = XEN_NETIF_GSO_TYPE_TCPV6; 401 + else 402 + gso_type = XEN_NETIF_GSO_TYPE_NONE; 403 + 404 + if (*head && ((1 << gso_type) & vif->gso_mask)) 400 405 vif->rx.req_cons++; 401 406 402 407 *head = 0; /* There must be something in this buffer now. */ ··· 434 425 unsigned char *data; 435 426 int head = 1; 436 427 int old_meta_prod; 428 + int gso_type; 429 + int gso_size; 437 430 438 431 old_meta_prod = npo->meta_prod; 439 432 433 + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 434 + gso_type = XEN_NETIF_GSO_TYPE_TCPV4; 435 + gso_size = skb_shinfo(skb)->gso_size; 436 + } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { 437 + gso_type = XEN_NETIF_GSO_TYPE_TCPV6; 438 + gso_size = skb_shinfo(skb)->gso_size; 439 + } else { 440 + gso_type = XEN_NETIF_GSO_TYPE_NONE; 441 + gso_size = 0; 442 + } 443 + 440 444 /* Set up a GSO prefix descriptor, if necessary */ 441 - if (skb_shinfo(skb)->gso_size && vif->gso_prefix) { 445 + if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) { 442 446 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 443 447 meta = npo->meta + npo->meta_prod++; 444 - meta->gso_size = skb_shinfo(skb)->gso_size; 448 + meta->gso_type = gso_type; 449 + meta->gso_size = gso_size; 445 450 meta->size = 0; 446 451 meta->id = req->id; 447 452 } ··· 463 440 req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); 464 441 meta = npo->meta + npo->meta_prod++; 465 442 466 - if (!vif->gso_prefix) 467 - meta->gso_size = skb_shinfo(skb)->gso_size; 468 - else 443 + if ((1 << gso_type) & vif->gso_mask) { 444 + meta->gso_type = gso_type; 445 + meta->gso_size = gso_size; 446 + } else { 447 + meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; 469 448 meta->gso_size = 0; 449 + } 470 450 471 451 meta->size = 0; 472 452 meta->id = req->id; ··· 615 589 616 590 vif = netdev_priv(skb->dev); 617 591 618 - if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) { 592 + if ((1 << vif->meta[npo.meta_cons].gso_type) & 593 + vif->gso_prefix_mask) { 619 594 resp = RING_GET_RESPONSE(&vif->rx, 620 595 vif->rx.rsp_prod_pvt++); 621 596 ··· 653 626 vif->meta[npo.meta_cons].size, 654 627 flags); 655 628 656 - if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) { 629 + if ((1 << vif->meta[npo.meta_cons].gso_type) & 630 + vif->gso_mask) { 657 631 struct xen_netif_extra_info *gso = 658 632 (struct xen_netif_extra_info *) 659 633 RING_GET_RESPONSE(&vif->rx, ··· 662 634 663 635 resp->flags |= XEN_NETRXF_extra_info; 664 636 637 + gso->u.gso.type = vif->meta[npo.meta_cons].gso_type; 665 638 gso->u.gso.size = vif->meta[npo.meta_cons].gso_size; 666 - gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 667 639 gso->u.gso.pad = 0; 668 640 gso->u.gso.features = 0; 669 641
+27 -2
drivers/net/xen-netback/xenbus.c
··· 577 577 val = 0; 578 578 vif->can_sg = !!val; 579 579 580 + vif->gso_mask = 0; 581 + vif->gso_prefix_mask = 0; 582 + 580 583 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", 581 584 "%d", &val) < 0) 582 585 val = 0; 583 - vif->gso = !!val; 586 + if (val) 587 + vif->gso_mask |= GSO_BIT(TCPV4); 584 588 585 589 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4-prefix", 586 590 "%d", &val) < 0) 587 591 val = 0; 588 - vif->gso_prefix = !!val; 592 + if (val) 593 + vif->gso_prefix_mask |= GSO_BIT(TCPV4); 594 + 595 + if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6", 596 + "%d", &val) < 0) 597 + val = 0; 598 + if (val) 599 + vif->gso_mask |= GSO_BIT(TCPV6); 600 + 601 + if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6-prefix", 602 + "%d", &val) < 0) 603 + val = 0; 604 + if (val) 605 + vif->gso_prefix_mask |= GSO_BIT(TCPV6); 606 + 607 + if (vif->gso_mask & vif->gso_prefix_mask) { 608 + xenbus_dev_fatal(dev, err, 609 + "%s: gso and gso prefix flags are not " 610 + "mutually exclusive", 611 + dev->otherend); 612 + return -EOPNOTSUPP; 613 + } 589 614 590 615 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", 591 616 "%d", &val) < 0)
+1
include/xen/interface/io/netif.h
··· 110 110 #define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) 111 111 112 112 /* GSO types */ 113 + #define XEN_NETIF_GSO_TYPE_NONE (0) 113 114 #define XEN_NETIF_GSO_TYPE_TCPV4 (1) 114 115 #define XEN_NETIF_GSO_TYPE_TCPV6 (2) 115 116