at v6.19 269 lines 6.7 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2#ifndef TUN_VNET_H 3#define TUN_VNET_H 4 5/* High bits in flags field are unused. */ 6#define TUN_VNET_LE 0x80000000 7#define TUN_VNET_BE 0x40000000 8 9#define TUN_VNET_TNL_SIZE sizeof(struct virtio_net_hdr_v1_hash_tunnel) 10 11static inline bool tun_vnet_legacy_is_little_endian(unsigned int flags) 12{ 13 bool be = IS_ENABLED(CONFIG_TUN_VNET_CROSS_LE) && 14 (flags & TUN_VNET_BE); 15 16 return !be && virtio_legacy_is_little_endian(); 17} 18 19static inline long tun_get_vnet_be(unsigned int flags, int __user *argp) 20{ 21 int be = !!(flags & TUN_VNET_BE); 22 23 if (!IS_ENABLED(CONFIG_TUN_VNET_CROSS_LE)) 24 return -EINVAL; 25 26 if (put_user(be, argp)) 27 return -EFAULT; 28 29 return 0; 30} 31 32static inline long tun_set_vnet_be(unsigned int *flags, int __user *argp) 33{ 34 int be; 35 36 if (!IS_ENABLED(CONFIG_TUN_VNET_CROSS_LE)) 37 return -EINVAL; 38 39 if (get_user(be, argp)) 40 return -EFAULT; 41 42 if (be) 43 *flags |= TUN_VNET_BE; 44 else 45 *flags &= ~TUN_VNET_BE; 46 47 return 0; 48} 49 50static inline bool tun_vnet_is_little_endian(unsigned int flags) 51{ 52 return flags & TUN_VNET_LE || tun_vnet_legacy_is_little_endian(flags); 53} 54 55static inline u16 tun_vnet16_to_cpu(unsigned int flags, __virtio16 val) 56{ 57 return __virtio16_to_cpu(tun_vnet_is_little_endian(flags), val); 58} 59 60static inline __virtio16 cpu_to_tun_vnet16(unsigned int flags, u16 val) 61{ 62 return __cpu_to_virtio16(tun_vnet_is_little_endian(flags), val); 63} 64 65static inline long tun_vnet_ioctl(int *vnet_hdr_sz, unsigned int *flags, 66 unsigned int cmd, int __user *sp) 67{ 68 int s; 69 70 switch (cmd) { 71 case TUNGETVNETHDRSZ: 72 s = *vnet_hdr_sz; 73 if (put_user(s, sp)) 74 return -EFAULT; 75 return 0; 76 77 case TUNSETVNETHDRSZ: 78 if (get_user(s, sp)) 79 return -EFAULT; 80 if (s < (int)sizeof(struct virtio_net_hdr)) 81 return -EINVAL; 82 83 *vnet_hdr_sz = s; 84 return 0; 85 86 case TUNGETVNETLE: 87 s = !!(*flags & TUN_VNET_LE); 88 if (put_user(s, sp)) 89 return -EFAULT; 90 return 0; 91 92 case TUNSETVNETLE: 93 if (get_user(s, sp)) 94 return -EFAULT; 95 if (s) 96 *flags |= TUN_VNET_LE; 97 else 98 *flags &= ~TUN_VNET_LE; 99 return 0; 100 101 case TUNGETVNETBE: 102 return tun_get_vnet_be(*flags, sp); 103 104 case TUNSETVNETBE: 105 return tun_set_vnet_be(flags, sp); 106 107 default: 108 return -EINVAL; 109 } 110} 111 112static inline unsigned int tun_vnet_parse_size(netdev_features_t features) 113{ 114 if (!(features & NETIF_F_GSO_UDP_TUNNEL)) 115 return sizeof(struct virtio_net_hdr); 116 117 return TUN_VNET_TNL_SIZE; 118} 119 120static inline int __tun_vnet_hdr_get(int sz, unsigned int flags, 121 netdev_features_t features, 122 struct iov_iter *from, 123 struct virtio_net_hdr *hdr) 124{ 125 unsigned int parsed_size = tun_vnet_parse_size(features); 126 u16 hdr_len; 127 128 if (iov_iter_count(from) < sz) 129 return -EINVAL; 130 131 if (!copy_from_iter_full(hdr, parsed_size, from)) 132 return -EFAULT; 133 134 hdr_len = tun_vnet16_to_cpu(flags, hdr->hdr_len); 135 136 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 137 hdr_len = max(tun_vnet16_to_cpu(flags, hdr->csum_start) + tun_vnet16_to_cpu(flags, hdr->csum_offset) + 2, hdr_len); 138 hdr->hdr_len = cpu_to_tun_vnet16(flags, hdr_len); 139 } 140 141 if (hdr_len > iov_iter_count(from)) 142 return -EINVAL; 143 144 iov_iter_advance(from, sz - parsed_size); 145 146 return hdr_len; 147} 148 149static inline int tun_vnet_hdr_get(int sz, unsigned int flags, 150 struct iov_iter *from, 151 struct virtio_net_hdr *hdr) 152{ 153 return __tun_vnet_hdr_get(sz, flags, 0, from, hdr); 154} 155 156static inline int __tun_vnet_hdr_put(int sz, netdev_features_t features, 157 struct iov_iter *iter, 158 const struct virtio_net_hdr *hdr) 159{ 160 unsigned int parsed_size = tun_vnet_parse_size(features); 161 162 if (unlikely(iov_iter_count(iter) < sz)) 163 return -EINVAL; 164 165 if (unlikely(copy_to_iter(hdr, parsed_size, iter) != parsed_size)) 166 return -EFAULT; 167 168 if (iov_iter_zero(sz - parsed_size, iter) != sz - parsed_size) 169 return -EFAULT; 170 171 return 0; 172} 173 174static inline int tun_vnet_hdr_put(int sz, struct iov_iter *iter, 175 const struct virtio_net_hdr *hdr) 176{ 177 return __tun_vnet_hdr_put(sz, 0, iter, hdr); 178} 179 180static inline int tun_vnet_hdr_to_skb(unsigned int flags, struct sk_buff *skb, 181 const struct virtio_net_hdr *hdr) 182{ 183 return virtio_net_hdr_to_skb(skb, hdr, tun_vnet_is_little_endian(flags)); 184} 185 186/* 187 * Tun is not aware of the negotiated guest features, guess them from the 188 * virtio net hdr size 189 */ 190static inline netdev_features_t tun_vnet_hdr_guest_features(int vnet_hdr_sz) 191{ 192 if (vnet_hdr_sz >= TUN_VNET_TNL_SIZE) 193 return NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM; 194 return 0; 195} 196 197static inline int 198tun_vnet_hdr_tnl_to_skb(unsigned int flags, netdev_features_t features, 199 struct sk_buff *skb, 200 const struct virtio_net_hdr_v1_hash_tunnel *hdr) 201{ 202 return virtio_net_hdr_tnl_to_skb(skb, hdr, 203 features & NETIF_F_GSO_UDP_TUNNEL, 204 features & NETIF_F_GSO_UDP_TUNNEL_CSUM, 205 tun_vnet_is_little_endian(flags)); 206} 207 208static inline int tun_vnet_hdr_from_skb(unsigned int flags, 209 const struct net_device *dev, 210 const struct sk_buff *skb, 211 struct virtio_net_hdr *hdr) 212{ 213 int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0; 214 215 if (virtio_net_hdr_from_skb(skb, hdr, 216 tun_vnet_is_little_endian(flags), true, 217 vlan_hlen)) { 218 struct skb_shared_info *sinfo = skb_shinfo(skb); 219 220 if (net_ratelimit()) { 221 netdev_err(dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n", 222 sinfo->gso_type, tun_vnet16_to_cpu(flags, hdr->gso_size), 223 tun_vnet16_to_cpu(flags, hdr->hdr_len)); 224 print_hex_dump(KERN_ERR, "tun: ", 225 DUMP_PREFIX_NONE, 226 16, 1, skb->head, 227 min(tun_vnet16_to_cpu(flags, hdr->hdr_len), 64), true); 228 } 229 WARN_ON_ONCE(1); 230 return -EINVAL; 231 } 232 233 return 0; 234} 235 236static inline int 237tun_vnet_hdr_tnl_from_skb(unsigned int flags, 238 const struct net_device *dev, 239 const struct sk_buff *skb, 240 struct virtio_net_hdr_v1_hash_tunnel *tnl_hdr) 241{ 242 bool has_tnl_offload = !!(dev->features & NETIF_F_GSO_UDP_TUNNEL); 243 int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0; 244 245 if (virtio_net_hdr_tnl_from_skb(skb, tnl_hdr, has_tnl_offload, 246 tun_vnet_is_little_endian(flags), 247 vlan_hlen, true)) { 248 struct virtio_net_hdr_v1 *hdr = &tnl_hdr->hash_hdr.hdr; 249 struct skb_shared_info *sinfo = skb_shinfo(skb); 250 251 if (net_ratelimit()) { 252 int hdr_len = tun_vnet16_to_cpu(flags, hdr->hdr_len); 253 254 netdev_err(dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n", 255 sinfo->gso_type, 256 tun_vnet16_to_cpu(flags, hdr->gso_size), 257 tun_vnet16_to_cpu(flags, hdr->hdr_len)); 258 print_hex_dump(KERN_ERR, "tun: ", DUMP_PREFIX_NONE, 259 16, 1, skb->head, min(hdr_len, 64), 260 true); 261 } 262 WARN_ON_ONCE(1); 263 return -EINVAL; 264 } 265 266 return 0; 267} 268 269#endif /* TUN_VNET_H */