Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xen/netfront: force data bouncing when backend is untrusted

Bounce all data on the skbs to be transmitted into zeroed pages if the
backend is untrusted. This avoids leaking data present in the pages
shared with the backend but not part of the skb fragments. This
requires introducing a new helper in order to allocate skbs with a
size multiple of XEN_PAGE_SIZE so we don't leak contiguous data on the
granted pages.

Reporting whether the backend is to be trusted can be done using a
module parameter, or from the xenstore frontend path as set by the
toolstack when adding the device.

This is CVE-2022-33741, part of XSA-403.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Reviewed-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Juergen Gross <jgross@suse.com>

authored by

Roger Pau Monne and committed by
Juergen Gross
4491001c 307c8de2

+47 -2
+47 -2
drivers/net/xen-netfront.c
··· 66 66 MODULE_PARM_DESC(max_queues, 67 67 "Maximum number of queues per virtual interface"); 68 68 69 + static bool __read_mostly xennet_trusted = true; 70 + module_param_named(trusted, xennet_trusted, bool, 0644); 71 + MODULE_PARM_DESC(trusted, "Is the backend trusted"); 72 + 69 73 #define XENNET_TIMEOUT (5 * HZ) 70 74 71 75 static const struct ethtool_ops xennet_ethtool_ops; ··· 176 172 177 173 /* Is device behaving sane? */ 178 174 bool broken; 175 + 176 + /* Should skbs be bounced into a zeroed buffer? */ 177 + bool bounce; 179 178 180 179 atomic_t rx_gso_checksum_fixup; 181 180 }; ··· 673 666 return nxmit; 674 667 } 675 668 669 + struct sk_buff *bounce_skb(const struct sk_buff *skb) 670 + { 671 + unsigned int headerlen = skb_headroom(skb); 672 + /* Align size to allocate full pages and avoid contiguous data leaks */ 673 + unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len, 674 + XEN_PAGE_SIZE); 675 + struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO); 676 + 677 + if (!n) 678 + return NULL; 679 + 680 + if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) { 681 + WARN_ONCE(1, "misaligned skb allocated\n"); 682 + kfree_skb(n); 683 + return NULL; 684 + } 685 + 686 + /* Set the data pointer */ 687 + skb_reserve(n, headerlen); 688 + /* Set the tail pointer and length */ 689 + skb_put(n, skb->len); 690 + 691 + BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); 692 + 693 + skb_copy_header(n, skb); 694 + return n; 695 + } 676 696 677 697 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) 678 698 ··· 753 719 754 720 /* The first req should be at least ETH_HLEN size or the packet will be 755 721 * dropped by netback. 722 + * 723 + * If the backend is not trusted bounce all data to zeroed pages to 724 + * avoid exposing contiguous data on the granted page not belonging to 725 + * the skb. 756 726 */ 757 - if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) { 758 - nskb = skb_copy(skb, GFP_ATOMIC); 727 + if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) { 728 + nskb = bounce_skb(skb); 759 729 if (!nskb) 760 730 goto drop; 761 731 dev_consume_skb_any(skb); ··· 2253 2215 2254 2216 info->netdev->irq = 0; 2255 2217 2218 + /* Check if backend is trusted. */ 2219 + info->bounce = !xennet_trusted || 2220 + !xenbus_read_unsigned(dev->nodename, "trusted", 1); 2221 + 2256 2222 /* Check if backend supports multiple queues */ 2257 2223 max_queues = xenbus_read_unsigned(info->xbdev->otherend, 2258 2224 "multi-queue-max-queues", 1); ··· 2424 2382 return err; 2425 2383 if (np->netback_has_xdp_headroom) 2426 2384 pr_info("backend supports XDP headroom\n"); 2385 + if (np->bounce) 2386 + dev_info(&np->xbdev->dev, 2387 + "bouncing transmitted data to zeroed pages\n"); 2427 2388 2428 2389 /* talk_to_netback() sets the correct number of queues */ 2429 2390 num_queues = dev->real_num_tx_queues;