Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next

Steffen Klassert says:

====================
pull request (net-next): ipsec-next 2017-04-20

This adds the basic infrastructure for IPsec hardware
offloading, it creates a configuration API and adjusts
the packet path.

1) Add the needed netdev features to configure IPsec offloads.

2) Add the IPsec hardware offloading API.

3) Prepare the ESP packet path for hardware offloading.

4) Add gso handlers for esp4 and esp6, this implements
the software fallback for GSO packets.

5) Add xfrm replay handler functions for offloading.

6) Change ESP to use a synchronous crypto algorithm on
offloading, we don't have the option for asynchronous
returns when we handle IPsec at layer2.

7) Add a xfrm validate function to validate_xmit_skb. This
implements the software fallback for non GSO packets.

8) Set the inner_network and inner_transport members of
the SKB, as well as encapsulation, to reflect the actual
positions of these headers, and removes them only once
encryption is done on the payload.
From Ilan Tayari.

9) Prepare the ESP GRO codepath for hardware offloading.

10) Fix incorrect null pointer check in esp6.
From Colin Ian King.

11) Fix for the GSO software fallback path to detect the
fallback correctly.
From Ilan Tayari.

Please pull or let me know if there are problems.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+1723 -376
+7 -1
include/linux/netdev_features.h
··· 54 54 */ 55 55 NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ 56 56 NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */ 57 + NETIF_F_GSO_ESP_BIT, /* ... ESP with TSO */ 57 58 /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ 58 - NETIF_F_GSO_SCTP_BIT, 59 + NETIF_F_GSO_ESP_BIT, 59 60 60 61 NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ 61 62 NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */ ··· 74 73 NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */ 75 74 76 75 NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */ 76 + NETIF_F_HW_ESP_BIT, /* Hardware ESP transformation offload */ 77 + NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */ 77 78 78 79 /* 79 80 * Add your fresh new feature above and remember to update ··· 132 129 #define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL) 133 130 #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) 134 131 #define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP) 132 + #define NETIF_F_GSO_ESP __NETIF_F(GSO_ESP) 135 133 #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) 136 134 #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) 137 135 #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) 138 136 #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD) 139 137 #define NETIF_F_HW_TC __NETIF_F(HW_TC) 138 + #define NETIF_F_HW_ESP __NETIF_F(HW_ESP) 139 + #define NETIF_F_HW_ESP_TX_CSUM __NETIF_F(HW_ESP_TX_CSUM) 140 140 141 141 #define for_each_netdev_feature(mask_addr, bit) \ 142 142 for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
+15
include/linux/netdevice.h
··· 823 823 }; 824 824 }; 825 825 826 + #ifdef CONFIG_XFRM_OFFLOAD 827 + struct xfrmdev_ops { 828 + int (*xdo_dev_state_add) (struct xfrm_state *x); 829 + void (*xdo_dev_state_delete) (struct xfrm_state *x); 830 + void (*xdo_dev_state_free) (struct xfrm_state *x); 831 + bool (*xdo_dev_offload_ok) (struct sk_buff *skb, 832 + struct xfrm_state *x); 833 + }; 834 + #endif 835 + 826 836 /* 827 837 * This structure defines the management hooks for network devices. 828 838 * The following hooks can be defined; unless noted otherwise, they are ··· 1704 1694 #endif 1705 1695 #if IS_ENABLED(CONFIG_IPV6) 1706 1696 const struct ndisc_ops *ndisc_ops; 1697 + #endif 1698 + 1699 + #ifdef CONFIG_XFRM 1700 + const struct xfrmdev_ops *xfrmdev_ops; 1707 1701 #endif 1708 1702 1709 1703 const struct header_ops *header_ops; ··· 4084 4070 BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT)); 4085 4071 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); 4086 4072 BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); 4073 + BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); 4087 4074 4088 4075 return (features & feature) == feature; 4089 4076 }
+2
include/linux/skbuff.h
··· 492 492 SKB_GSO_TUNNEL_REMCSUM = 1 << 14, 493 493 494 494 SKB_GSO_SCTP = 1 << 15, 495 + 496 + SKB_GSO_ESP = 1 << 16, 495 497 }; 496 498 497 499 #if BITS_PER_LONG > 32
+19
include/net/esp.h
··· 10 10 return (struct ip_esp_hdr *)skb_transport_header(skb); 11 11 } 12 12 13 + struct esp_info { 14 + struct ip_esp_hdr *esph; 15 + __be64 seqno; 16 + int tfclen; 17 + int tailen; 18 + int plen; 19 + int clen; 20 + int len; 21 + int nfrags; 22 + __u8 proto; 23 + bool inplace; 24 + }; 25 + 26 + int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp); 27 + int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp); 28 + int esp_input_done2(struct sk_buff *skb, int err); 29 + int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp); 30 + int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp); 31 + int esp6_input_done2(struct sk_buff *skb, int err); 13 32 #endif
+102 -6
include/net/xfrm.h
··· 120 120 struct xfrm_address_filter *filter; 121 121 }; 122 122 123 + struct xfrm_state_offload { 124 + struct net_device *dev; 125 + unsigned long offload_handle; 126 + unsigned int num_exthdrs; 127 + u8 flags; 128 + }; 129 + 123 130 /* Full description of state of transformer. */ 124 131 struct xfrm_state { 125 132 possible_net_t xs_net; ··· 214 207 struct xfrm_lifetime_cur curlft; 215 208 struct tasklet_hrtimer mtimer; 216 209 210 + struct xfrm_state_offload xso; 211 + 217 212 /* used to fix curlft->add_time when changing date */ 218 213 long saved_tmo; 219 214 ··· 230 221 struct xfrm_mode *inner_mode; 231 222 struct xfrm_mode *inner_mode_iaf; 232 223 struct xfrm_mode *outer_mode; 224 + 225 + const struct xfrm_type_offload *type_offload; 233 226 234 227 /* Security context */ 235 228 struct xfrm_sec_ctx *security; ··· 325 314 int __xfrm_state_delete(struct xfrm_state *x); 326 315 327 316 struct xfrm_state_afinfo { 328 - unsigned int family; 329 - unsigned int proto; 330 - __be16 eth_proto; 331 - struct module *owner; 332 - const struct xfrm_type *type_map[IPPROTO_MAX]; 333 - struct xfrm_mode *mode_map[XFRM_MODE_MAX]; 317 + unsigned int family; 318 + unsigned int proto; 319 + __be16 eth_proto; 320 + struct module *owner; 321 + const struct xfrm_type *type_map[IPPROTO_MAX]; 322 + const struct xfrm_type_offload *type_offload_map[IPPROTO_MAX]; 323 + struct xfrm_mode *mode_map[XFRM_MODE_MAX]; 324 + 334 325 int (*init_flags)(struct xfrm_state *x); 335 326 void (*init_tempsel)(struct xfrm_selector *sel, 336 327 const struct flowi *fl); ··· 393 380 int xfrm_register_type(const struct xfrm_type *type, unsigned short family); 394 381 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family); 395 382 383 + struct xfrm_type_offload { 384 + char *description; 385 + struct module *owner; 386 + u8 proto; 387 + void (*encap)(struct xfrm_state *, struct sk_buff *pskb); 388 + int (*input_tail)(struct xfrm_state *x, struct sk_buff *skb); 389 + int (*xmit)(struct xfrm_state *, struct sk_buff *pskb, netdev_features_t features); 390 + }; 391 + 392 + int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family); 393 + int xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family); 394 + 396 395 struct xfrm_mode { 397 396 /* 398 397 * Remove encapsulation header. ··· 452 427 * call output2. 453 428 */ 454 429 int (*output)(struct xfrm_state *x, struct sk_buff *skb); 430 + 431 + /* 432 + * Adjust pointers into the packet and do GSO segmentation. 433 + */ 434 + struct sk_buff *(*gso_segment)(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features); 435 + 436 + /* 437 + * Adjust pointers into the packet when IPsec is done at layer2. 438 + */ 439 + void (*xmit)(struct xfrm_state *x, struct sk_buff *skb); 455 440 456 441 struct xfrm_state_afinfo *afinfo; 457 442 struct module *owner; ··· 1567 1532 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq); 1568 1533 int xfrm_state_delete(struct xfrm_state *x); 1569 1534 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid); 1535 + int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid); 1570 1536 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); 1571 1537 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); 1572 1538 u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq); ··· 1649 1613 return 0; 1650 1614 } 1651 1615 #endif 1616 + 1617 + struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif, 1618 + const xfrm_address_t *saddr, 1619 + const xfrm_address_t *daddr, 1620 + int family); 1652 1621 1653 1622 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp); 1654 1623 ··· 1857 1816 return NULL; 1858 1817 1859 1818 return &sp->ovec[sp->olen - 1]; 1819 + } 1820 + #endif 1821 + 1822 + #ifdef CONFIG_XFRM_OFFLOAD 1823 + void __net_init xfrm_dev_init(void); 1824 + int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features); 1825 + int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, 1826 + struct xfrm_user_offload *xuo); 1827 + bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x); 1828 + 1829 + static inline void xfrm_dev_state_delete(struct xfrm_state *x) 1830 + { 1831 + struct xfrm_state_offload *xso = &x->xso; 1832 + 1833 + if (xso->dev) 1834 + xso->dev->xfrmdev_ops->xdo_dev_state_delete(x); 1835 + } 1836 + 1837 + static inline void xfrm_dev_state_free(struct xfrm_state *x) 1838 + { 1839 + struct xfrm_state_offload *xso = &x->xso; 1840 + struct net_device *dev = xso->dev; 1841 + 1842 + if (dev && dev->xfrmdev_ops) { 1843 + dev->xfrmdev_ops->xdo_dev_state_free(x); 1844 + xso->dev = NULL; 1845 + dev_put(dev); 1846 + } 1847 + } 1848 + #else 1849 + static inline void __net_init xfrm_dev_init(void) 1850 + { 1851 + } 1852 + 1853 + static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) 1854 + { 1855 + return 0; 1856 + } 1857 + 1858 + static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo) 1859 + { 1860 + return 0; 1861 + } 1862 + 1863 + static inline void xfrm_dev_state_delete(struct xfrm_state *x) 1864 + { 1865 + } 1866 + 1867 + static inline void xfrm_dev_state_free(struct xfrm_state *x) 1868 + { 1869 + } 1870 + 1871 + static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 1872 + { 1873 + return false; 1860 1874 } 1861 1875 #endif 1862 1876
+8
include/uapi/linux/xfrm.h
··· 303 303 XFRMA_PROTO, /* __u8 */ 304 304 XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */ 305 305 XFRMA_PAD, 306 + XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */ 306 307 __XFRMA_MAX 307 308 308 309 #define XFRMA_MAX (__XFRMA_MAX - 1) ··· 494 493 __u8 splen; 495 494 __u8 dplen; 496 495 }; 496 + 497 + struct xfrm_user_offload { 498 + int ifindex; 499 + __u8 flags; 500 + }; 501 + #define XFRM_OFFLOAD_IPV6 1 502 + #define XFRM_OFFLOAD_INBOUND 2 497 503 498 504 #ifndef __KERNEL__ 499 505 /* backwards compatibility for userspace */
+3
net/core/dev.c
··· 2972 2972 __skb_linearize(skb)) 2973 2973 goto out_kfree_skb; 2974 2974 2975 + if (validate_xmit_xfrm(skb, features)) 2976 + goto out_kfree_skb; 2977 + 2975 2978 /* If packet is not checksummed and device does not 2976 2979 * support checksumming for this protocol, complete 2977 2980 * checksumming here.
+3
net/core/ethtool.c
··· 90 90 [NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT] = "tx-udp_tnl-csum-segmentation", 91 91 [NETIF_F_GSO_PARTIAL_BIT] = "tx-gso-partial", 92 92 [NETIF_F_GSO_SCTP_BIT] = "tx-sctp-segmentation", 93 + [NETIF_F_GSO_ESP_BIT] = "tx-esp-segmentation", 93 94 94 95 [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", 95 96 [NETIF_F_SCTP_CRC_BIT] = "tx-checksum-sctp", ··· 104 103 [NETIF_F_RXALL_BIT] = "rx-all", 105 104 [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload", 106 105 [NETIF_F_HW_TC_BIT] = "hw-tc-offload", 106 + [NETIF_F_HW_ESP_BIT] = "esp-hw-offload", 107 + [NETIF_F_HW_ESP_TX_CSUM_BIT] = "esp-tx-csum-hw-offload", 107 108 }; 108 109 109 110 static const char
+205 -165
net/ipv4/esp4.c
··· 152 152 } 153 153 154 154 static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb, 155 + struct xfrm_state *x, 155 156 struct ip_esp_hdr *esph, 156 157 struct esp_output_extra *extra) 157 158 { 158 - struct xfrm_state *x = skb_dst(skb)->xfrm; 159 - 160 159 /* For ESN we move the header forward by 4 bytes to 161 160 * accomodate the high bits. We will move it back after 162 161 * encryption. 163 162 */ 164 163 if ((x->props.flags & XFRM_STATE_ESN)) { 164 + __u32 seqhi; 165 + struct xfrm_offload *xo = xfrm_offload(skb); 166 + 167 + if (xo) 168 + seqhi = xo->seq.hi; 169 + else 170 + seqhi = XFRM_SKB_CB(skb)->seq.output.hi; 171 + 165 172 extra->esphoff = (unsigned char *)esph - 166 173 skb_transport_header(skb); 167 174 esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4); 168 175 extra->seqhi = esph->spi; 169 - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); 176 + esph->seq_no = htonl(seqhi); 170 177 } 171 178 172 179 esph->spi = x->id.spi; ··· 205 198 tail[plen - 1] = proto; 206 199 } 207 200 208 - static int esp_output(struct xfrm_state *x, struct sk_buff *skb) 201 + static void esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) 209 202 { 210 - struct esp_output_extra *extra; 211 - int err = -ENOMEM; 212 - struct ip_esp_hdr *esph; 213 - struct crypto_aead *aead; 214 - struct aead_request *req; 215 - struct scatterlist *sg, *dsg; 216 - struct sk_buff *trailer; 217 - struct page *page; 218 - void *tmp; 219 - u8 *iv; 203 + int encap_type; 204 + struct udphdr *uh; 205 + __be32 *udpdata32; 206 + __be16 sport, dport; 207 + struct xfrm_encap_tmpl *encap = x->encap; 208 + struct ip_esp_hdr *esph = esp->esph; 209 + 210 + spin_lock_bh(&x->lock); 211 + sport = encap->encap_sport; 212 + dport = encap->encap_dport; 213 + encap_type = encap->encap_type; 214 + spin_unlock_bh(&x->lock); 215 + 216 + uh = (struct udphdr *)esph; 217 + uh->source = sport; 218 + uh->dest = dport; 219 + uh->len = htons(skb->len + esp->tailen 220 + - skb_transport_offset(skb)); 221 + uh->check = 0; 222 + 223 + switch (encap_type) { 224 + default: 225 + case UDP_ENCAP_ESPINUDP: 226 + esph = (struct ip_esp_hdr *)(uh + 1); 227 + break; 228 + case UDP_ENCAP_ESPINUDP_NON_IKE: 229 + udpdata32 = (__be32 *)(uh + 1); 230 + udpdata32[0] = udpdata32[1] = 0; 231 + esph = (struct ip_esp_hdr *)(udpdata32 + 2); 232 + break; 233 + } 234 + 235 + *skb_mac_header(skb) = IPPROTO_UDP; 236 + esp->esph = esph; 237 + } 238 + 239 + int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) 240 + { 220 241 u8 *tail; 221 242 u8 *vaddr; 222 - int blksize; 223 - int clen; 224 - int alen; 225 - int plen; 226 - int ivlen; 227 - int tfclen; 228 243 int nfrags; 229 - int assoclen; 230 - int extralen; 231 - int tailen; 232 - __be64 seqno; 233 - __u8 proto = *skb_mac_header(skb); 234 - 235 - /* skb is pure payload to encrypt */ 236 - 237 - aead = x->data; 238 - alen = crypto_aead_authsize(aead); 239 - ivlen = crypto_aead_ivsize(aead); 240 - 241 - tfclen = 0; 242 - if (x->tfcpad) { 243 - struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); 244 - u32 padto; 245 - 246 - padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached)); 247 - if (skb->len < padto) 248 - tfclen = padto - skb->len; 249 - } 250 - blksize = ALIGN(crypto_aead_blocksize(aead), 4); 251 - clen = ALIGN(skb->len + 2 + tfclen, blksize); 252 - plen = clen - skb->len - tfclen; 253 - tailen = tfclen + plen + alen; 254 - assoclen = sizeof(*esph); 255 - extralen = 0; 256 - 257 - if (x->props.flags & XFRM_STATE_ESN) { 258 - extralen += sizeof(*extra); 259 - assoclen += sizeof(__be32); 260 - } 261 - 262 - *skb_mac_header(skb) = IPPROTO_ESP; 263 - esph = ip_esp_hdr(skb); 244 + struct page *page; 245 + struct sk_buff *trailer; 246 + int tailen = esp->tailen; 264 247 265 248 /* this is non-NULL only with UDP Encapsulation */ 266 - if (x->encap) { 267 - struct xfrm_encap_tmpl *encap = x->encap; 268 - struct udphdr *uh; 269 - __be32 *udpdata32; 270 - __be16 sport, dport; 271 - int encap_type; 272 - 273 - spin_lock_bh(&x->lock); 274 - sport = encap->encap_sport; 275 - dport = encap->encap_dport; 276 - encap_type = encap->encap_type; 277 - spin_unlock_bh(&x->lock); 278 - 279 - uh = (struct udphdr *)esph; 280 - uh->source = sport; 281 - uh->dest = dport; 282 - uh->len = htons(skb->len + tailen 283 - - skb_transport_offset(skb)); 284 - uh->check = 0; 285 - 286 - switch (encap_type) { 287 - default: 288 - case UDP_ENCAP_ESPINUDP: 289 - esph = (struct ip_esp_hdr *)(uh + 1); 290 - break; 291 - case UDP_ENCAP_ESPINUDP_NON_IKE: 292 - udpdata32 = (__be32 *)(uh + 1); 293 - udpdata32[0] = udpdata32[1] = 0; 294 - esph = (struct ip_esp_hdr *)(udpdata32 + 2); 295 - break; 296 - } 297 - 298 - *skb_mac_header(skb) = IPPROTO_UDP; 299 - } 249 + if (x->encap) 250 + esp_output_udp_encap(x, skb, esp); 300 251 301 252 if (!skb_cloned(skb)) { 302 253 if (tailen <= skb_availroom(skb)) { ··· 268 303 int allocsize; 269 304 struct sock *sk = skb->sk; 270 305 struct page_frag *pfrag = &x->xfrag; 306 + 307 + esp->inplace = false; 271 308 272 309 allocsize = ALIGN(tailen, L1_CACHE_BYTES); 273 310 ··· 287 320 288 321 tail = vaddr + pfrag->offset; 289 322 290 - esp_output_fill_trailer(tail, tfclen, plen, proto); 323 + esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); 291 324 292 325 kunmap_atomic(vaddr); 326 + 327 + spin_unlock_bh(&x->lock); 293 328 294 329 nfrags = skb_shinfo(skb)->nr_frags; 295 330 ··· 308 339 if (sk) 309 340 atomic_add(tailen, &sk->sk_wmem_alloc); 310 341 311 - skb_push(skb, -skb_network_offset(skb)); 312 - 313 - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 314 - esph->spi = x->id.spi; 315 - 316 - tmp = esp_alloc_tmp(aead, nfrags + 2, extralen); 317 - if (!tmp) { 318 - spin_unlock_bh(&x->lock); 319 - err = -ENOMEM; 320 - goto error; 321 - } 322 - 323 - extra = esp_tmp_extra(tmp); 324 - iv = esp_tmp_iv(aead, tmp, extralen); 325 - req = esp_tmp_req(aead, iv); 326 - sg = esp_req_sg(aead, req); 327 - dsg = &sg[nfrags]; 328 - 329 - esph = esp_output_set_extra(skb, esph, extra); 330 - 331 - sg_init_table(sg, nfrags); 332 - skb_to_sgvec(skb, sg, 333 - (unsigned char *)esph - skb->data, 334 - assoclen + ivlen + clen + alen); 335 - 336 - allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES); 337 - 338 - if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { 339 - spin_unlock_bh(&x->lock); 340 - err = -ENOMEM; 341 - goto error; 342 - } 343 - 344 - skb_shinfo(skb)->nr_frags = 1; 345 - 346 - page = pfrag->page; 347 - get_page(page); 348 - /* replace page frags in skb with new page */ 349 - __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len); 350 - pfrag->offset = pfrag->offset + allocsize; 351 - 352 - sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1); 353 - skb_to_sgvec(skb, dsg, 354 - (unsigned char *)esph - skb->data, 355 - assoclen + ivlen + clen + alen); 356 - 357 - spin_unlock_bh(&x->lock); 358 - 359 - goto skip_cow2; 342 + goto out; 360 343 } 361 344 } 362 345 363 346 cow: 364 - err = skb_cow_data(skb, tailen, &trailer); 365 - if (err < 0) 366 - goto error; 367 - nfrags = err; 347 + nfrags = skb_cow_data(skb, tailen, &trailer); 348 + if (nfrags < 0) 349 + goto out; 368 350 tail = skb_tail_pointer(trailer); 369 - esph = ip_esp_hdr(skb); 370 351 371 352 skip_cow: 372 - esp_output_fill_trailer(tail, tfclen, plen, proto); 353 + esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); 354 + pskb_put(skb, trailer, tailen); 373 355 374 - pskb_put(skb, trailer, clen - skb->len + alen); 375 - skb_push(skb, -skb_network_offset(skb)); 376 - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 377 - esph->spi = x->id.spi; 356 + out: 357 + return nfrags; 358 + } 359 + EXPORT_SYMBOL_GPL(esp_output_head); 378 360 379 - tmp = esp_alloc_tmp(aead, nfrags, extralen); 361 + int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) 362 + { 363 + u8 *iv; 364 + int alen; 365 + void *tmp; 366 + int ivlen; 367 + int assoclen; 368 + int extralen; 369 + struct page *page; 370 + struct ip_esp_hdr *esph; 371 + struct crypto_aead *aead; 372 + struct aead_request *req; 373 + struct scatterlist *sg, *dsg; 374 + struct esp_output_extra *extra; 375 + int err = -ENOMEM; 376 + 377 + assoclen = sizeof(struct ip_esp_hdr); 378 + extralen = 0; 379 + 380 + if (x->props.flags & XFRM_STATE_ESN) { 381 + extralen += sizeof(*extra); 382 + assoclen += sizeof(__be32); 383 + } 384 + 385 + aead = x->data; 386 + alen = crypto_aead_authsize(aead); 387 + ivlen = crypto_aead_ivsize(aead); 388 + 389 + tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen); 380 390 if (!tmp) { 391 + spin_unlock_bh(&x->lock); 381 392 err = -ENOMEM; 382 393 goto error; 383 394 } ··· 366 417 iv = esp_tmp_iv(aead, tmp, extralen); 367 418 req = esp_tmp_req(aead, iv); 368 419 sg = esp_req_sg(aead, req); 369 - dsg = sg; 370 420 371 - esph = esp_output_set_extra(skb, esph, extra); 421 + if (esp->inplace) 422 + dsg = sg; 423 + else 424 + dsg = &sg[esp->nfrags]; 372 425 373 - sg_init_table(sg, nfrags); 426 + esph = esp_output_set_extra(skb, x, esp->esph, extra); 427 + esp->esph = esph; 428 + 429 + sg_init_table(sg, esp->nfrags); 374 430 skb_to_sgvec(skb, sg, 375 431 (unsigned char *)esph - skb->data, 376 - assoclen + ivlen + clen + alen); 432 + assoclen + ivlen + esp->clen + alen); 377 433 378 - skip_cow2: 434 + if (!esp->inplace) { 435 + int allocsize; 436 + struct page_frag *pfrag = &x->xfrag; 437 + 438 + allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES); 439 + 440 + spin_lock_bh(&x->lock); 441 + if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { 442 + spin_unlock_bh(&x->lock); 443 + err = -ENOMEM; 444 + goto error; 445 + } 446 + 447 + skb_shinfo(skb)->nr_frags = 1; 448 + 449 + page = pfrag->page; 450 + get_page(page); 451 + /* replace page frags in skb with new page */ 452 + __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len); 453 + pfrag->offset = pfrag->offset + allocsize; 454 + spin_unlock_bh(&x->lock); 455 + 456 + sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1); 457 + skb_to_sgvec(skb, dsg, 458 + (unsigned char *)esph - skb->data, 459 + assoclen + ivlen + esp->clen + alen); 460 + } 461 + 379 462 if ((x->props.flags & XFRM_STATE_ESN)) 380 463 aead_request_set_callback(req, 0, esp_output_done_esn, skb); 381 464 else 382 465 aead_request_set_callback(req, 0, esp_output_done, skb); 383 466 384 - aead_request_set_crypt(req, sg, dsg, ivlen + clen, iv); 467 + aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv); 385 468 aead_request_set_ad(req, assoclen); 386 469 387 - seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + 388 - ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); 389 - 390 470 memset(iv, 0, ivlen); 391 - memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&seqno + 8 - min(ivlen, 8), 471 + memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8), 392 472 min(ivlen, 8)); 393 473 394 474 ESP_SKB_CB(skb)->tmp = tmp; ··· 443 465 error: 444 466 return err; 445 467 } 468 + EXPORT_SYMBOL_GPL(esp_output_tail); 446 469 447 - static int esp_input_done2(struct sk_buff *skb, int err) 470 + static int esp_output(struct xfrm_state *x, struct sk_buff *skb) 471 + { 472 + int alen; 473 + int blksize; 474 + struct ip_esp_hdr *esph; 475 + struct crypto_aead *aead; 476 + struct esp_info esp; 477 + 478 + esp.inplace = true; 479 + 480 + esp.proto = *skb_mac_header(skb); 481 + *skb_mac_header(skb) = IPPROTO_ESP; 482 + 483 + /* skb is pure payload to encrypt */ 484 + 485 + aead = x->data; 486 + alen = crypto_aead_authsize(aead); 487 + 488 + esp.tfclen = 0; 489 + if (x->tfcpad) { 490 + struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); 491 + u32 padto; 492 + 493 + padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached)); 494 + if (skb->len < padto) 495 + esp.tfclen = padto - skb->len; 496 + } 497 + blksize = ALIGN(crypto_aead_blocksize(aead), 4); 498 + esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); 499 + esp.plen = esp.clen - skb->len - esp.tfclen; 500 + esp.tailen = esp.tfclen + esp.plen + alen; 501 + 502 + esp.esph = ip_esp_hdr(skb); 503 + 504 + esp.nfrags = esp_output_head(x, skb, &esp); 505 + if (esp.nfrags < 0) 506 + return esp.nfrags; 507 + 508 + esph = esp.esph; 509 + esph->spi = x->id.spi; 510 + 511 + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 512 + esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + 513 + ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); 514 + 515 + skb_push(skb, -skb_network_offset(skb)); 516 + 517 + return esp_output_tail(x, skb, &esp); 518 + } 519 + 520 + int esp_input_done2(struct sk_buff *skb, int err) 448 521 { 449 522 const struct iphdr *iph; 450 523 struct xfrm_state *x = xfrm_input_state(skb); 524 + struct xfrm_offload *xo = xfrm_offload(skb); 451 525 struct crypto_aead *aead = x->data; 452 526 int alen = crypto_aead_authsize(aead); 453 527 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); ··· 508 478 u8 nexthdr[2]; 509 479 int padlen; 510 480 511 - kfree(ESP_SKB_CB(skb)->tmp); 481 + if (!xo || (xo && !(xo->flags & CRYPTO_DONE))) 482 + kfree(ESP_SKB_CB(skb)->tmp); 512 483 513 484 if (unlikely(err)) 514 485 goto out; ··· 580 549 out: 581 550 return err; 582 551 } 552 + EXPORT_SYMBOL_GPL(esp_input_done2); 583 553 584 554 static void esp_input_done(struct crypto_async_request *base, int err) 585 555 { ··· 783 751 char aead_name[CRYPTO_MAX_ALG_NAME]; 784 752 struct crypto_aead *aead; 785 753 int err; 754 + u32 mask = 0; 786 755 787 756 err = -ENAMETOOLONG; 788 757 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", 789 758 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) 790 759 goto error; 791 760 792 - aead = crypto_alloc_aead(aead_name, 0, 0); 761 + if (x->xso.offload_handle) 762 + mask |= CRYPTO_ALG_ASYNC; 763 + 764 + aead = crypto_alloc_aead(aead_name, 0, mask); 793 765 err = PTR_ERR(aead); 794 766 if (IS_ERR(aead)) 795 767 goto error; ··· 823 787 char authenc_name[CRYPTO_MAX_ALG_NAME]; 824 788 unsigned int keylen; 825 789 int err; 790 + u32 mask = 0; 826 791 827 792 err = -EINVAL; 828 793 if (!x->ealg) ··· 849 812 goto error; 850 813 } 851 814 852 - aead = crypto_alloc_aead(authenc_name, 0, 0); 815 + if (x->xso.offload_handle) 816 + mask |= CRYPTO_ALG_ASYNC; 817 + 818 + aead = crypto_alloc_aead(authenc_name, 0, mask); 853 819 err = PTR_ERR(aead); 854 820 if (IS_ERR(aead)) 855 821 goto error; ··· 971 931 .destructor = esp_destroy, 972 932 .get_mtu = esp4_get_mtu, 973 933 .input = esp_input, 974 - .output = esp_output 934 + .output = esp_output, 975 935 }; 976 936 977 937 static struct xfrm4_protocol esp4_protocol = {
+218 -19
net/ipv4/esp4_offload.c
··· 43 43 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) 44 44 goto out; 45 45 46 - err = secpath_set(skb); 47 - if (err) 48 - goto out; 49 - 50 - if (skb->sp->len == XFRM_MAX_DEPTH) 51 - goto out; 52 - 53 - x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, 54 - (xfrm_address_t *)&ip_hdr(skb)->daddr, 55 - spi, IPPROTO_ESP, AF_INET); 56 - if (!x) 57 - goto out; 58 - 59 - skb->sp->xvec[skb->sp->len++] = x; 60 - skb->sp->olen++; 61 - 62 46 xo = xfrm_offload(skb); 63 - if (!xo) { 64 - xfrm_state_put(x); 65 - goto out; 47 + if (!xo || !(xo->flags & CRYPTO_DONE)) { 48 + err = secpath_set(skb); 49 + if (err) 50 + goto out; 51 + 52 + if (skb->sp->len == XFRM_MAX_DEPTH) 53 + goto out; 54 + 55 + x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, 56 + (xfrm_address_t *)&ip_hdr(skb)->daddr, 57 + spi, IPPROTO_ESP, AF_INET); 58 + if (!x) 59 + goto out; 60 + 61 + skb->sp->xvec[skb->sp->len++] = x; 62 + skb->sp->olen++; 63 + 64 + xo = xfrm_offload(skb); 65 + if (!xo) { 66 + xfrm_state_put(x); 67 + goto out; 68 + } 66 69 } 70 + 67 71 xo->flags |= XFRM_GRO; 68 72 69 73 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; ··· 88 84 return NULL; 89 85 } 90 86 87 + static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb) 88 + { 89 + struct ip_esp_hdr *esph; 90 + struct iphdr *iph = ip_hdr(skb); 91 + struct xfrm_offload *xo = xfrm_offload(skb); 92 + int proto = iph->protocol; 93 + 94 + skb_push(skb, -skb_network_offset(skb)); 95 + esph = ip_esp_hdr(skb); 96 + *skb_mac_header(skb) = IPPROTO_ESP; 97 + 98 + esph->spi = x->id.spi; 99 + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 100 + 101 + xo->proto = proto; 102 + } 103 + 104 + static struct sk_buff *esp4_gso_segment(struct sk_buff *skb, 105 + netdev_features_t features) 106 + { 107 + __u32 seq; 108 + int err = 0; 109 + struct sk_buff *skb2; 110 + struct xfrm_state *x; 111 + struct ip_esp_hdr *esph; 112 + struct crypto_aead *aead; 113 + struct sk_buff *segs = ERR_PTR(-EINVAL); 114 + netdev_features_t esp_features = features; 115 + struct xfrm_offload *xo = xfrm_offload(skb); 116 + 117 + if (!xo) 118 + goto out; 119 + 120 + seq = xo->seq.low; 121 + 122 + x = skb->sp->xvec[skb->sp->len - 1]; 123 + aead = x->data; 124 + esph = ip_esp_hdr(skb); 125 + 126 + if (esph->spi != x->id.spi) 127 + goto out; 128 + 129 + if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) 130 + goto out; 131 + 132 + __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); 133 + 134 + skb->encap_hdr_csum = 1; 135 + 136 + if (!(features & NETIF_F_HW_ESP)) 137 + esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); 138 + 139 + segs = x->outer_mode->gso_segment(x, skb, esp_features); 140 + if (IS_ERR_OR_NULL(segs)) 141 + goto out; 142 + 143 + __skb_pull(skb, skb->data - skb_mac_header(skb)); 144 + 145 + skb2 = segs; 146 + do { 147 + struct sk_buff *nskb = skb2->next; 148 + 149 + xo = xfrm_offload(skb2); 150 + xo->flags |= XFRM_GSO_SEGMENT; 151 + xo->seq.low = seq; 152 + xo->seq.hi = xfrm_replay_seqhi(x, seq); 153 + 154 + if(!(features & NETIF_F_HW_ESP)) 155 + xo->flags |= CRYPTO_FALLBACK; 156 + 157 + x->outer_mode->xmit(x, skb2); 158 + 159 + err = x->type_offload->xmit(x, skb2, esp_features); 160 + if (err) { 161 + kfree_skb_list(segs); 162 + return ERR_PTR(err); 163 + } 164 + 165 + if (!skb_is_gso(skb2)) 166 + seq++; 167 + else 168 + seq += skb_shinfo(skb2)->gso_segs; 169 + 170 + skb_push(skb2, skb2->mac_len); 171 + skb2 = nskb; 172 + } while (skb2); 173 + 174 + out: 175 + return segs; 176 + } 177 + 178 + static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb) 179 + { 180 + struct crypto_aead *aead = x->data; 181 + 182 + if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead))) 183 + return -EINVAL; 184 + 185 + skb->ip_summed = CHECKSUM_NONE; 186 + 187 + return esp_input_done2(skb, 0); 188 + } 189 + 190 + static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) 191 + { 192 + int err; 193 + int alen; 194 + int blksize; 195 + struct xfrm_offload *xo; 196 + struct ip_esp_hdr *esph; 197 + struct crypto_aead *aead; 198 + struct esp_info esp; 199 + bool hw_offload = true; 200 + 201 + esp.inplace = true; 202 + 203 + xo = xfrm_offload(skb); 204 + 205 + if (!xo) 206 + return -EINVAL; 207 + 208 + if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle || 209 + (x->xso.dev != skb->dev)) { 210 + xo->flags |= CRYPTO_FALLBACK; 211 + hw_offload = false; 212 + } 213 + 214 + esp.proto = xo->proto; 215 + 216 + /* skb is pure payload to encrypt */ 217 + 218 + aead = x->data; 219 + alen = crypto_aead_authsize(aead); 220 + 221 + esp.tfclen = 0; 222 + /* XXX: Add support for tfc padding here. */ 223 + 224 + blksize = ALIGN(crypto_aead_blocksize(aead), 4); 225 + esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); 226 + esp.plen = esp.clen - skb->len - esp.tfclen; 227 + esp.tailen = esp.tfclen + esp.plen + alen; 228 + 229 + esp.esph = ip_esp_hdr(skb); 230 + 231 + 232 + if (!hw_offload || (hw_offload && !skb_is_gso(skb))) { 233 + esp.nfrags = esp_output_head(x, skb, &esp); 234 + if (esp.nfrags < 0) 235 + return esp.nfrags; 236 + } 237 + 238 + esph = esp.esph; 239 + esph->spi = x->id.spi; 240 + 241 + skb_push(skb, -skb_network_offset(skb)); 242 + 243 + if (xo->flags & XFRM_GSO_SEGMENT) { 244 + esph->seq_no = htonl(xo->seq.low); 245 + } else { 246 + ip_hdr(skb)->tot_len = htons(skb->len); 247 + ip_send_check(ip_hdr(skb)); 248 + } 249 + 250 + if (hw_offload) 251 + return 0; 252 + 253 + esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); 254 + 255 + err = esp_output_tail(x, skb, &esp); 256 + if (err < 0) 257 + return err; 258 + 259 + secpath_reset(skb); 260 + 261 + return 0; 262 + } 263 + 91 264 static const struct net_offload esp4_offload = { 92 265 .callbacks = { 93 266 .gro_receive = esp4_gro_receive, 267 + .gso_segment = esp4_gso_segment, 94 268 }, 269 + }; 270 + 271 + static const struct xfrm_type_offload esp_type_offload = { 272 + .description = "ESP4 OFFLOAD", 273 + .owner = THIS_MODULE, 274 + .proto = IPPROTO_ESP, 275 + .input_tail = esp_input_tail, 276 + .xmit = esp_xmit, 277 + .encap = esp4_gso_encap, 95 278 }; 96 279 97 280 static int __init esp4_offload_init(void) 98 281 { 282 + if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) { 283 + pr_info("%s: can't add xfrm type offload\n", __func__); 284 + return -EAGAIN; 285 + } 286 + 99 287 return inet_add_offload(&esp4_offload, IPPROTO_ESP); 100 288 } 101 289 102 290 static void __exit esp4_offload_exit(void) 103 291 { 292 + if (xfrm_unregister_type_offload(&esp_type_offload, AF_INET) < 0) 293 + pr_info("%s: can't remove xfrm type offload\n", __func__); 294 + 104 295 inet_del_offload(&esp4_offload, IPPROTO_ESP); 105 296 } 106 297
+34
net/ipv4/xfrm4_mode_transport.c
··· 12 12 #include <net/dst.h> 13 13 #include <net/ip.h> 14 14 #include <net/xfrm.h> 15 + #include <net/protocol.h> 15 16 16 17 /* Add encapsulation header. 17 18 * ··· 23 22 { 24 23 struct iphdr *iph = ip_hdr(skb); 25 24 int ihl = iph->ihl * 4; 25 + 26 + skb_set_inner_transport_header(skb, skb_transport_offset(skb)); 26 27 27 28 skb_set_network_header(skb, -x->props.header_len); 28 29 skb->mac_header = skb->network_header + ··· 59 56 return 0; 60 57 } 61 58 59 + static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x, 60 + struct sk_buff *skb, 61 + netdev_features_t features) 62 + { 63 + const struct net_offload *ops; 64 + struct sk_buff *segs = ERR_PTR(-EINVAL); 65 + struct xfrm_offload *xo = xfrm_offload(skb); 66 + 67 + skb->transport_header += x->props.header_len; 68 + ops = rcu_dereference(inet_offloads[xo->proto]); 69 + if (likely(ops && ops->callbacks.gso_segment)) 70 + segs = ops->callbacks.gso_segment(skb, features); 71 + 72 + return segs; 73 + } 74 + 75 + static void xfrm4_transport_xmit(struct xfrm_state *x, struct sk_buff *skb) 76 + { 77 + struct xfrm_offload *xo = xfrm_offload(skb); 78 + 79 + skb_reset_mac_len(skb); 80 + pskb_pull(skb, skb->mac_len + sizeof(struct iphdr) + x->props.header_len); 81 + 82 + if (xo->flags & XFRM_GSO_SEGMENT) { 83 + skb_reset_transport_header(skb); 84 + skb->transport_header -= x->props.header_len; 85 + } 86 + } 87 + 62 88 static struct xfrm_mode xfrm4_transport_mode = { 63 89 .input = xfrm4_transport_input, 64 90 .output = xfrm4_transport_output, 91 + .gso_segment = xfrm4_transport_gso_segment, 92 + .xmit = xfrm4_transport_xmit, 65 93 .owner = THIS_MODULE, 66 94 .encap = XFRM_MODE_TRANSPORT, 67 95 };
+28
net/ipv4/xfrm4_mode_tunnel.c
··· 33 33 struct iphdr *top_iph; 34 34 int flags; 35 35 36 + skb_set_inner_network_header(skb, skb_network_offset(skb)); 37 + skb_set_inner_transport_header(skb, skb_transport_offset(skb)); 38 + 36 39 skb_set_network_header(skb, -x->props.header_len); 37 40 skb->mac_header = skb->network_header + 38 41 offsetof(struct iphdr, protocol); ··· 99 96 return err; 100 97 } 101 98 99 + static struct sk_buff *xfrm4_mode_tunnel_gso_segment(struct xfrm_state *x, 100 + struct sk_buff *skb, 101 + netdev_features_t features) 102 + { 103 + __skb_push(skb, skb->mac_len); 104 + return skb_mac_gso_segment(skb, features); 105 + 106 + } 107 + 108 + static void xfrm4_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb) 109 + { 110 + struct xfrm_offload *xo = xfrm_offload(skb); 111 + 112 + if (xo->flags & XFRM_GSO_SEGMENT) { 113 + skb->network_header = skb->network_header - x->props.header_len; 114 + skb->transport_header = skb->network_header + 115 + sizeof(struct iphdr); 116 + } 117 + 118 + skb_reset_mac_len(skb); 119 + pskb_pull(skb, skb->mac_len + x->props.header_len); 120 + } 121 + 102 122 static struct xfrm_mode xfrm4_tunnel_mode = { 103 123 .input2 = xfrm4_mode_tunnel_input, 104 124 .input = xfrm_prepare_input, 105 125 .output2 = xfrm4_mode_tunnel_output, 106 126 .output = xfrm4_prepare_output, 127 + .gso_segment = xfrm4_mode_tunnel_gso_segment, 128 + .xmit = xfrm4_mode_tunnel_xmit, 107 129 .owner = THIS_MODULE, 108 130 .encap = XFRM_MODE_TUNNEL, 109 131 .flags = XFRM_MODE_FLAG_TUNNEL,
+2 -1
net/ipv4/xfrm4_output.c
··· 29 29 goto out; 30 30 31 31 mtu = dst_mtu(skb_dst(skb)); 32 - if (skb->len > mtu) { 32 + if ((!skb_is_gso(skb) && skb->len > mtu) || 33 + (skb_is_gso(skb) && skb_gso_network_seglen(skb) > ip_skb_dst_mtu(skb->sk, skb))) { 33 34 skb->protocol = htons(ETH_P_IP); 34 35 35 36 if (skb->sk)
+161 -131
net/ipv6/esp6.c
··· 170 170 } 171 171 172 172 static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb, 173 + struct xfrm_state *x, 173 174 struct ip_esp_hdr *esph, 174 175 __be32 *seqhi) 175 176 { 176 - struct xfrm_state *x = skb_dst(skb)->xfrm; 177 - 178 177 /* For ESN we move the header forward by 4 bytes to 179 178 * accomodate the high bits. We will move it back after 180 179 * encryption. 181 180 */ 182 181 if ((x->props.flags & XFRM_STATE_ESN)) { 182 + struct xfrm_offload *xo = xfrm_offload(skb); 183 + 183 184 esph = (void *)(skb_transport_header(skb) - sizeof(__be32)); 184 185 *seqhi = esph->spi; 185 - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); 186 + if (xo) 187 + esph->seq_no = htonl(xo->seq.hi); 188 + else 189 + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); 186 190 } 187 191 188 192 esph->spi = x->id.spi; ··· 218 214 tail[plen - 1] = proto; 219 215 } 220 216 221 - static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) 217 + int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) 222 218 { 223 - int err; 224 - struct ip_esp_hdr *esph; 225 - struct crypto_aead *aead; 226 - struct aead_request *req; 227 - struct scatterlist *sg, *dsg; 228 - struct sk_buff *trailer; 229 - struct page *page; 230 - void *tmp; 231 - int blksize; 232 - int clen; 233 - int alen; 234 - int plen; 235 - int ivlen; 236 - int tfclen; 237 - int nfrags; 238 - int assoclen; 239 - int seqhilen; 240 - int tailen; 241 - u8 *iv; 242 219 u8 *tail; 243 220 u8 *vaddr; 244 - __be32 *seqhi; 245 - __be64 seqno; 246 - __u8 proto = *skb_mac_header(skb); 221 + int nfrags; 222 + struct page *page; 223 + struct ip_esp_hdr *esph; 224 + struct sk_buff *trailer; 225 + int tailen = esp->tailen; 247 226 248 - /* skb is pure payload to encrypt */ 249 - aead = x->data; 250 - alen = crypto_aead_authsize(aead); 251 - ivlen = crypto_aead_ivsize(aead); 252 - 253 - tfclen = 0; 254 - if (x->tfcpad) { 255 - struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); 256 - u32 padto; 257 - 258 - padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached)); 259 - if (skb->len < padto) 260 - tfclen = padto - skb->len; 261 - } 262 - blksize = ALIGN(crypto_aead_blocksize(aead), 4); 263 - clen = ALIGN(skb->len + 2 + tfclen, blksize); 264 - plen = clen - skb->len - tfclen; 265 - tailen = tfclen + plen + alen; 266 - 267 - assoclen = sizeof(*esph); 268 - seqhilen = 0; 269 - 270 - if (x->props.flags & XFRM_STATE_ESN) { 271 - seqhilen += sizeof(__be32); 272 - assoclen += seqhilen; 273 - } 274 - 275 - *skb_mac_header(skb) = IPPROTO_ESP; 276 227 esph = ip_esp_hdr(skb); 277 228 278 229 if (!skb_cloned(skb)) { ··· 242 283 int allocsize; 243 284 struct sock *sk = skb->sk; 244 285 struct page_frag *pfrag = &x->xfrag; 286 + 287 + esp->inplace = false; 245 288 246 289 allocsize = ALIGN(tailen, L1_CACHE_BYTES); 247 290 ··· 261 300 262 301 tail = vaddr + pfrag->offset; 263 302 264 - esp_output_fill_trailer(tail, tfclen, plen, proto); 303 + esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); 265 304 266 305 kunmap_atomic(vaddr); 306 + 307 + spin_unlock_bh(&x->lock); 267 308 268 309 nfrags = skb_shinfo(skb)->nr_frags; 269 310 ··· 282 319 if (sk) 283 320 atomic_add(tailen, &sk->sk_wmem_alloc); 284 321 285 - skb_push(skb, -skb_network_offset(skb)); 286 - 287 - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 288 - esph->spi = x->id.spi; 289 - 290 - tmp = esp_alloc_tmp(aead, nfrags + 2, seqhilen); 291 - if (!tmp) { 292 - spin_unlock_bh(&x->lock); 293 - err = -ENOMEM; 294 - goto error; 295 - } 296 - seqhi = esp_tmp_seqhi(tmp); 297 - iv = esp_tmp_iv(aead, tmp, seqhilen); 298 - req = esp_tmp_req(aead, iv); 299 - sg = esp_req_sg(aead, req); 300 - dsg = &sg[nfrags]; 301 - 302 - esph = esp_output_set_esn(skb, esph, seqhi); 303 - 304 - sg_init_table(sg, nfrags); 305 - skb_to_sgvec(skb, sg, 306 - (unsigned char *)esph - skb->data, 307 - assoclen + ivlen + clen + alen); 308 - 309 - allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES); 310 - 311 - if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { 312 - spin_unlock_bh(&x->lock); 313 - err = -ENOMEM; 314 - goto error; 315 - } 316 - 317 - skb_shinfo(skb)->nr_frags = 1; 318 - 319 - page = pfrag->page; 320 - get_page(page); 321 - /* replace page frags in skb with new page */ 322 - __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len); 323 - pfrag->offset = pfrag->offset + allocsize; 324 - 325 - sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1); 326 - skb_to_sgvec(skb, dsg, 327 - (unsigned char *)esph - skb->data, 328 - assoclen + ivlen + clen + alen); 329 - 330 - spin_unlock_bh(&x->lock); 331 - 332 - goto skip_cow2; 322 + goto out; 333 323 } 334 324 } 335 325 336 326 cow: 337 - err = skb_cow_data(skb, tailen, &trailer); 338 - if (err < 0) 339 - goto error; 340 - nfrags = err; 341 - 327 + nfrags = skb_cow_data(skb, tailen, &trailer); 328 + if (nfrags < 0) 329 + goto out; 342 330 tail = skb_tail_pointer(trailer); 343 - esph = ip_esp_hdr(skb); 344 331 345 332 skip_cow: 346 - esp_output_fill_trailer(tail, tfclen, plen, proto); 333 + esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); 334 + pskb_put(skb, trailer, tailen); 347 335 348 - pskb_put(skb, trailer, clen - skb->len + alen); 349 - skb_push(skb, -skb_network_offset(skb)); 336 + out: 337 + return nfrags; 338 + } 339 + EXPORT_SYMBOL_GPL(esp6_output_head); 350 340 351 - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 352 - esph->spi = x->id.spi; 341 + int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) 342 + { 343 + u8 *iv; 344 + int alen; 345 + void *tmp; 346 + int ivlen; 347 + int assoclen; 348 + int seqhilen; 349 + __be32 *seqhi; 350 + struct page *page; 351 + struct ip_esp_hdr *esph; 352 + struct aead_request *req; 353 + struct crypto_aead *aead; 354 + struct scatterlist *sg, *dsg; 355 + int err = -ENOMEM; 353 356 354 - tmp = esp_alloc_tmp(aead, nfrags, seqhilen); 357 + assoclen = sizeof(struct ip_esp_hdr); 358 + seqhilen = 0; 359 + 360 + if (x->props.flags & XFRM_STATE_ESN) { 361 + seqhilen += sizeof(__be32); 362 + assoclen += sizeof(__be32); 363 + } 364 + 365 + aead = x->data; 366 + alen = crypto_aead_authsize(aead); 367 + ivlen = crypto_aead_ivsize(aead); 368 + 369 + tmp = esp_alloc_tmp(aead, esp->nfrags + 2, seqhilen); 355 370 if (!tmp) { 371 + spin_unlock_bh(&x->lock); 356 372 err = -ENOMEM; 357 373 goto error; 358 374 } ··· 340 398 iv = esp_tmp_iv(aead, tmp, seqhilen); 341 399 req = esp_tmp_req(aead, iv); 342 400 sg = esp_req_sg(aead, req); 343 - dsg = sg; 344 401 345 - esph = esp_output_set_esn(skb, esph, seqhi); 402 + if (esp->inplace) 403 + dsg = sg; 404 + else 405 + dsg = &sg[esp->nfrags]; 346 406 347 - sg_init_table(sg, nfrags); 407 + esph = esp_output_set_esn(skb, x, ip_esp_hdr(skb), seqhi); 408 + 409 + sg_init_table(sg, esp->nfrags); 348 410 skb_to_sgvec(skb, sg, 349 411 (unsigned char *)esph - skb->data, 350 - assoclen + ivlen + clen + alen); 412 + assoclen + ivlen + esp->clen + alen); 351 413 352 - skip_cow2: 414 + if (!esp->inplace) { 415 + int allocsize; 416 + struct page_frag *pfrag = &x->xfrag; 417 + 418 + allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES); 419 + 420 + spin_lock_bh(&x->lock); 421 + if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { 422 + spin_unlock_bh(&x->lock); 423 + err = -ENOMEM; 424 + goto error; 425 + } 426 + 427 + skb_shinfo(skb)->nr_frags = 1; 428 + 429 + page = pfrag->page; 430 + get_page(page); 431 + /* replace page frags in skb with new page */ 432 + __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len); 433 + pfrag->offset = pfrag->offset + allocsize; 434 + spin_unlock_bh(&x->lock); 435 + 436 + sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1); 437 + skb_to_sgvec(skb, dsg, 438 + (unsigned char *)esph - skb->data, 439 + assoclen + ivlen + esp->clen + alen); 440 + } 441 + 353 442 if ((x->props.flags & XFRM_STATE_ESN)) 354 443 aead_request_set_callback(req, 0, esp_output_done_esn, skb); 355 444 else 356 445 aead_request_set_callback(req, 0, esp_output_done, skb); 357 446 358 - aead_request_set_crypt(req, sg, dsg, ivlen + clen, iv); 447 + aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv); 359 448 aead_request_set_ad(req, assoclen); 360 449 361 - seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + 362 - ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); 363 - 364 450 memset(iv, 0, ivlen); 365 - memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&seqno + 8 - min(ivlen, 8), 451 + memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8), 366 452 min(ivlen, 8)); 367 453 368 454 ESP_SKB_CB(skb)->tmp = tmp; ··· 416 446 error: 417 447 return err; 418 448 } 449 + EXPORT_SYMBOL_GPL(esp6_output_tail); 419 450 420 - static int esp_input_done2(struct sk_buff *skb, int err) 451 + static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) 452 + { 453 + int alen; 454 + int blksize; 455 + struct ip_esp_hdr *esph; 456 + struct crypto_aead *aead; 457 + struct esp_info esp; 458 + 459 + esp.inplace = true; 460 + 461 + esp.proto = *skb_mac_header(skb); 462 + *skb_mac_header(skb) = IPPROTO_ESP; 463 + 464 + /* skb is pure payload to encrypt */ 465 + 466 + aead = x->data; 467 + alen = crypto_aead_authsize(aead); 468 + 469 + esp.tfclen = 0; 470 + if (x->tfcpad) { 471 + struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); 472 + u32 padto; 473 + 474 + padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached)); 475 + if (skb->len < padto) 476 + esp.tfclen = padto - skb->len; 477 + } 478 + blksize = ALIGN(crypto_aead_blocksize(aead), 4); 479 + esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); 480 + esp.plen = esp.clen - skb->len - esp.tfclen; 481 + esp.tailen = esp.tfclen + esp.plen + alen; 482 + 483 + esp.nfrags = esp6_output_head(x, skb, &esp); 484 + if (esp.nfrags < 0) 485 + return esp.nfrags; 486 + 487 + esph = ip_esp_hdr(skb); 488 + esph->spi = x->id.spi; 489 + 490 + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 491 + esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + 492 + ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); 493 + 494 + skb_push(skb, -skb_network_offset(skb)); 495 + 496 + return esp6_output_tail(x, skb, &esp); 497 + } 498 + 499 + int esp6_input_done2(struct sk_buff *skb, int err) 421 500 { 422 501 struct xfrm_state *x = xfrm_input_state(skb); 502 + struct xfrm_offload *xo = xfrm_offload(skb); 423 503 struct crypto_aead *aead = x->data; 424 504 int alen = crypto_aead_authsize(aead); 425 505 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); ··· 478 458 int padlen; 479 459 u8 nexthdr[2]; 480 460 481 - kfree(ESP_SKB_CB(skb)->tmp); 461 + if (!xo || (xo && !(xo->flags & CRYPTO_DONE))) 462 + kfree(ESP_SKB_CB(skb)->tmp); 482 463 483 464 if (unlikely(err)) 484 465 goto out; ··· 513 492 out: 514 493 return err; 515 494 } 495 + EXPORT_SYMBOL_GPL(esp6_input_done2); 516 496 517 497 static void esp_input_done(struct crypto_async_request *base, int err) 518 498 { 519 499 struct sk_buff *skb = base->data; 520 500 521 - xfrm_input_resume(skb, esp_input_done2(skb, err)); 501 + xfrm_input_resume(skb, esp6_input_done2(skb, err)); 522 502 } 523 503 524 504 static void esp_input_restore_header(struct sk_buff *skb) ··· 641 619 if ((x->props.flags & XFRM_STATE_ESN)) 642 620 esp_input_restore_header(skb); 643 621 644 - ret = esp_input_done2(skb, ret); 622 + ret = esp6_input_done2(skb, ret); 645 623 646 624 out: 647 625 return ret; ··· 704 682 char aead_name[CRYPTO_MAX_ALG_NAME]; 705 683 struct crypto_aead *aead; 706 684 int err; 685 + u32 mask = 0; 707 686 708 687 err = -ENAMETOOLONG; 709 688 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", 710 689 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) 711 690 goto error; 712 691 713 - aead = crypto_alloc_aead(aead_name, 0, 0); 692 + if (x->xso.offload_handle) 693 + mask |= CRYPTO_ALG_ASYNC; 694 + 695 + aead = crypto_alloc_aead(aead_name, 0, mask); 714 696 err = PTR_ERR(aead); 715 697 if (IS_ERR(aead)) 716 698 goto error; ··· 744 718 char authenc_name[CRYPTO_MAX_ALG_NAME]; 745 719 unsigned int keylen; 746 720 int err; 721 + u32 mask = 0; 747 722 748 723 err = -EINVAL; 749 724 if (!x->ealg) ··· 770 743 goto error; 771 744 } 772 745 773 - aead = crypto_alloc_aead(authenc_name, 0, 0); 746 + if (x->xso.offload_handle) 747 + mask |= CRYPTO_ALG_ASYNC; 748 + 749 + aead = crypto_alloc_aead(authenc_name, 0, mask); 774 750 err = PTR_ERR(aead); 775 751 if (IS_ERR(aead)) 776 752 goto error;
+220 -19
net/ipv6/esp6_offload.c
··· 45 45 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) 46 46 goto out; 47 47 48 - err = secpath_set(skb); 49 - if (err) 50 - goto out; 51 - 52 - if (skb->sp->len == XFRM_MAX_DEPTH) 53 - goto out; 54 - 55 - x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, 56 - (xfrm_address_t *)&ipv6_hdr(skb)->daddr, 57 - spi, IPPROTO_ESP, AF_INET6); 58 - if (!x) 59 - goto out; 60 - 61 - skb->sp->xvec[skb->sp->len++] = x; 62 - skb->sp->olen++; 63 - 64 48 xo = xfrm_offload(skb); 65 - if (!xo) { 66 - xfrm_state_put(x); 67 - goto out; 49 + if (!xo || !(xo->flags & CRYPTO_DONE)) { 50 + err = secpath_set(skb); 51 + if (err) 52 + goto out; 53 + 54 + if (skb->sp->len == XFRM_MAX_DEPTH) 55 + goto out; 56 + 57 + x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, 58 + (xfrm_address_t *)&ipv6_hdr(skb)->daddr, 59 + spi, IPPROTO_ESP, AF_INET6); 60 + if (!x) 61 + goto out; 62 + 63 + skb->sp->xvec[skb->sp->len++] = x; 64 + skb->sp->olen++; 65 + 66 + xo = xfrm_offload(skb); 67 + if (!xo) { 68 + xfrm_state_put(x); 69 + goto out; 70 + } 68 71 } 72 + 69 73 xo->flags |= XFRM_GRO; 70 74 71 75 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; ··· 90 86 return NULL; 91 87 } 92 88 89 + static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb) 90 + { 91 + struct ip_esp_hdr *esph; 92 + struct ipv6hdr *iph = ipv6_hdr(skb); 93 + struct xfrm_offload *xo = xfrm_offload(skb); 94 + int proto = iph->nexthdr; 95 + 96 + skb_push(skb, -skb_network_offset(skb)); 97 + esph = ip_esp_hdr(skb); 98 + *skb_mac_header(skb) = IPPROTO_ESP; 99 + 100 + esph->spi = x->id.spi; 101 + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 102 + 103 + xo->proto = proto; 104 + } 105 + 106 + static struct sk_buff *esp6_gso_segment(struct sk_buff *skb, 107 + netdev_features_t features) 108 + { 109 + __u32 seq; 110 + int err = 0; 111 + struct sk_buff *skb2; 112 + struct xfrm_state *x; 113 + struct ip_esp_hdr *esph; 114 + struct crypto_aead *aead; 115 + struct sk_buff *segs = ERR_PTR(-EINVAL); 116 + netdev_features_t esp_features = features; 117 + struct xfrm_offload *xo = xfrm_offload(skb); 118 + 119 + if (!xo) 120 + goto out; 121 + 122 + seq = xo->seq.low; 123 + 124 + x = skb->sp->xvec[skb->sp->len - 1]; 125 + aead = x->data; 126 + esph = ip_esp_hdr(skb); 127 + 128 + if (esph->spi != x->id.spi) 129 + goto out; 130 + 131 + if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) 132 + goto out; 133 + 134 + __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); 135 + 136 + skb->encap_hdr_csum = 1; 137 + 138 + if (!(features & NETIF_F_HW_ESP)) 139 + esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); 140 + 141 + segs = x->outer_mode->gso_segment(x, skb, esp_features); 142 + if (IS_ERR_OR_NULL(segs)) 143 + goto out; 144 + 145 + __skb_pull(skb, skb->data - skb_mac_header(skb)); 146 + 147 + skb2 = segs; 148 + do { 149 + struct sk_buff *nskb = skb2->next; 150 + 151 + xo = xfrm_offload(skb2); 152 + xo->flags |= XFRM_GSO_SEGMENT; 153 + xo->seq.low = seq; 154 + xo->seq.hi = xfrm_replay_seqhi(x, seq); 155 + 156 + if(!(features & NETIF_F_HW_ESP)) 157 + xo->flags |= CRYPTO_FALLBACK; 158 + 159 + x->outer_mode->xmit(x, skb2); 160 + 161 + err = x->type_offload->xmit(x, skb2, esp_features); 162 + if (err) { 163 + kfree_skb_list(segs); 164 + return ERR_PTR(err); 165 + } 166 + 167 + if (!skb_is_gso(skb2)) 168 + seq++; 169 + else 170 + seq += skb_shinfo(skb2)->gso_segs; 171 + 172 + skb_push(skb2, skb2->mac_len); 173 + skb2 = nskb; 174 + } while (skb2); 175 + 176 + out: 177 + return segs; 178 + } 179 + 180 + static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb) 181 + { 182 + struct crypto_aead *aead = x->data; 183 + 184 + if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead))) 185 + return -EINVAL; 186 + 187 + skb->ip_summed = CHECKSUM_NONE; 188 + 189 + return esp6_input_done2(skb, 0); 190 + } 191 + 192 + static int esp6_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) 193 + { 194 + int err; 195 + int alen; 196 + int blksize; 197 + struct xfrm_offload *xo; 198 + struct ip_esp_hdr *esph; 199 + struct crypto_aead *aead; 200 + struct esp_info esp; 201 + bool hw_offload = true; 202 + 203 + esp.inplace = true; 204 + 205 + xo = xfrm_offload(skb); 206 + 207 + if (!xo) 208 + return -EINVAL; 209 + 210 + if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle || 211 + (x->xso.dev != skb->dev)) { 212 + xo->flags |= CRYPTO_FALLBACK; 213 + hw_offload = false; 214 + } 215 + 216 + esp.proto = xo->proto; 217 + 218 + /* skb is pure payload to encrypt */ 219 + 220 + aead = x->data; 221 + alen = crypto_aead_authsize(aead); 222 + 223 + esp.tfclen = 0; 224 + /* XXX: Add support for tfc padding here. */ 225 + 226 + blksize = ALIGN(crypto_aead_blocksize(aead), 4); 227 + esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); 228 + esp.plen = esp.clen - skb->len - esp.tfclen; 229 + esp.tailen = esp.tfclen + esp.plen + alen; 230 + 231 + if (!hw_offload || (hw_offload && !skb_is_gso(skb))) { 232 + esp.nfrags = esp6_output_head(x, skb, &esp); 233 + if (esp.nfrags < 0) 234 + return esp.nfrags; 235 + } 236 + 237 + esph = ip_esp_hdr(skb); 238 + esph->spi = x->id.spi; 239 + 240 + skb_push(skb, -skb_network_offset(skb)); 241 + 242 + if (xo->flags & XFRM_GSO_SEGMENT) { 243 + esph->seq_no = htonl(xo->seq.low); 244 + } else { 245 + int len; 246 + 247 + len = skb->len - sizeof(struct ipv6hdr); 248 + if (len > IPV6_MAXPLEN) 249 + len = 0; 250 + 251 + ipv6_hdr(skb)->payload_len = htons(len); 252 + } 253 + 254 + if (hw_offload) 255 + return 0; 256 + 257 + esp.seqno = cpu_to_be64(xo->seq.low + ((u64)xo->seq.hi << 32)); 258 + 259 + err = esp6_output_tail(x, skb, &esp); 260 + if (err < 0) 261 + return err; 262 + 263 + secpath_reset(skb); 264 + 265 + return 0; 266 + } 267 + 93 268 static const struct net_offload esp6_offload = { 94 269 .callbacks = { 95 270 .gro_receive = esp6_gro_receive, 271 + .gso_segment = esp6_gso_segment, 96 272 }, 273 + }; 274 + 275 + static const struct xfrm_type_offload esp6_type_offload = { 276 + .description = "ESP6 OFFLOAD", 277 + .owner = THIS_MODULE, 278 + .proto = IPPROTO_ESP, 279 + .input_tail = esp6_input_tail, 280 + .xmit = esp6_xmit, 281 + .encap = esp6_gso_encap, 97 282 }; 98 283 99 284 static int __init esp6_offload_init(void) 100 285 { 286 + if (xfrm_register_type_offload(&esp6_type_offload, AF_INET6) < 0) { 287 + pr_info("%s: can't add xfrm type offload\n", __func__); 288 + return -EAGAIN; 289 + } 290 + 101 291 return inet6_add_offload(&esp6_offload, IPPROTO_ESP); 102 292 } 103 293 104 294 static void __exit esp6_offload_exit(void) 105 295 { 296 + if (xfrm_unregister_type_offload(&esp6_type_offload, AF_INET6) < 0) 297 + pr_info("%s: can't remove xfrm type offload\n", __func__); 298 + 106 299 inet6_del_offload(&esp6_offload, IPPROTO_ESP); 107 300 } 108 301
+34
net/ipv6/xfrm6_mode_transport.c
··· 13 13 #include <net/dst.h> 14 14 #include <net/ipv6.h> 15 15 #include <net/xfrm.h> 16 + #include <net/protocol.h> 16 17 17 18 /* Add encapsulation header. 18 19 * ··· 27 26 int hdr_len; 28 27 29 28 iph = ipv6_hdr(skb); 29 + skb_set_inner_transport_header(skb, skb_transport_offset(skb)); 30 30 31 31 hdr_len = x->type->hdr_offset(x, skb, &prevhdr); 32 32 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); ··· 63 61 return 0; 64 62 } 65 63 64 + static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x, 65 + struct sk_buff *skb, 66 + netdev_features_t features) 67 + { 68 + const struct net_offload *ops; 69 + struct sk_buff *segs = ERR_PTR(-EINVAL); 70 + struct xfrm_offload *xo = xfrm_offload(skb); 71 + 72 + skb->transport_header += x->props.header_len; 73 + ops = rcu_dereference(inet6_offloads[xo->proto]); 74 + if (likely(ops && ops->callbacks.gso_segment)) 75 + segs = ops->callbacks.gso_segment(skb, features); 76 + 77 + return segs; 78 + } 79 + 80 + static void xfrm6_transport_xmit(struct xfrm_state *x, struct sk_buff *skb) 81 + { 82 + struct xfrm_offload *xo = xfrm_offload(skb); 83 + 84 + skb_reset_mac_len(skb); 85 + pskb_pull(skb, skb->mac_len + sizeof(struct ipv6hdr) + x->props.header_len); 86 + 87 + if (xo->flags & XFRM_GSO_SEGMENT) { 88 + skb_reset_transport_header(skb); 89 + skb->transport_header -= x->props.header_len; 90 + } 91 + } 92 + 93 + 66 94 static struct xfrm_mode xfrm6_transport_mode = { 67 95 .input = xfrm6_transport_input, 68 96 .output = xfrm6_transport_output, 97 + .gso_segment = xfrm4_transport_gso_segment, 98 + .xmit = xfrm6_transport_xmit, 69 99 .owner = THIS_MODULE, 70 100 .encap = XFRM_MODE_TRANSPORT, 71 101 };
+27
net/ipv6/xfrm6_mode_tunnel.c
··· 36 36 struct ipv6hdr *top_iph; 37 37 int dsfield; 38 38 39 + skb_set_inner_network_header(skb, skb_network_offset(skb)); 40 + skb_set_inner_transport_header(skb, skb_transport_offset(skb)); 41 + 39 42 skb_set_network_header(skb, -x->props.header_len); 40 43 skb->mac_header = skb->network_header + 41 44 offsetof(struct ipv6hdr, nexthdr); ··· 99 96 return err; 100 97 } 101 98 99 + static struct sk_buff *xfrm6_mode_tunnel_gso_segment(struct xfrm_state *x, 100 + struct sk_buff *skb, 101 + netdev_features_t features) 102 + { 103 + __skb_push(skb, skb->mac_len); 104 + return skb_mac_gso_segment(skb, features); 105 + 106 + } 107 + 108 + static void xfrm6_mode_tunnel_xmit(struct xfrm_state *x, struct sk_buff *skb) 109 + { 110 + struct xfrm_offload *xo = xfrm_offload(skb); 111 + 112 + if (xo->flags & XFRM_GSO_SEGMENT) { 113 + skb->network_header = skb->network_header - x->props.header_len; 114 + skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); 115 + } 116 + 117 + skb_reset_mac_len(skb); 118 + pskb_pull(skb, skb->mac_len + x->props.header_len); 119 + } 120 + 102 121 static struct xfrm_mode xfrm6_tunnel_mode = { 103 122 .input2 = xfrm6_mode_tunnel_input, 104 123 .input = xfrm_prepare_input, 105 124 .output2 = xfrm6_mode_tunnel_output, 106 125 .output = xfrm6_prepare_output, 126 + .gso_segment = xfrm6_mode_tunnel_gso_segment, 127 + .xmit = xfrm6_mode_tunnel_xmit, 107 128 .owner = THIS_MODULE, 108 129 .encap = XFRM_MODE_TUNNEL, 109 130 .flags = XFRM_MODE_FLAG_TUNNEL,
+7 -2
net/ipv6/xfrm6_output.c
··· 73 73 int mtu, ret = 0; 74 74 struct dst_entry *dst = skb_dst(skb); 75 75 76 + if (skb->ignore_df) 77 + goto out; 78 + 76 79 mtu = dst_mtu(dst); 77 80 if (mtu < IPV6_MIN_MTU) 78 81 mtu = IPV6_MIN_MTU; 79 82 80 - if (!skb->ignore_df && skb->len > mtu) { 83 + if ((!skb_is_gso(skb) && skb->len > mtu) || 84 + (skb_is_gso(skb) && 85 + skb_gso_network_seglen(skb) > ip6_skb_dst_mtu(skb))) { 81 86 skb->dev = dst->dev; 82 87 skb->protocol = htons(ETH_P_IPV6); 83 88 ··· 94 89 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 95 90 ret = -EMSGSIZE; 96 91 } 97 - 92 + out: 98 93 return ret; 99 94 } 100 95
+1
net/xfrm/Makefile
··· 5 5 obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \ 6 6 xfrm_input.o xfrm_output.o \ 7 7 xfrm_sysctl.o xfrm_replay.o 8 + obj-$(CONFIG_XFRM_OFFLOAD) += xfrm_device.o 8 9 obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o 9 10 obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o 10 11 obj-$(CONFIG_XFRM_USER) += xfrm_user.o
+208
net/xfrm/xfrm_device.c
··· 1 + /* 2 + * xfrm_device.c - IPsec device offloading code. 3 + * 4 + * Copyright (c) 2015 secunet Security Networks AG 5 + * 6 + * Author: 7 + * Steffen Klassert <steffen.klassert@secunet.com> 8 + * 9 + * This program is free software; you can redistribute it and/or 10 + * modify it under the terms of the GNU General Public License 11 + * as published by the Free Software Foundation; either version 12 + * 2 of the License, or (at your option) any later version. 13 + */ 14 + 15 + #include <linux/errno.h> 16 + #include <linux/module.h> 17 + #include <linux/netdevice.h> 18 + #include <linux/skbuff.h> 19 + #include <linux/slab.h> 20 + #include <linux/spinlock.h> 21 + #include <net/dst.h> 22 + #include <net/xfrm.h> 23 + #include <linux/notifier.h> 24 + 25 + int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features) 26 + { 27 + int err; 28 + struct xfrm_state *x; 29 + struct xfrm_offload *xo = xfrm_offload(skb); 30 + 31 + if (skb_is_gso(skb)) 32 + return 0; 33 + 34 + if (xo) { 35 + x = skb->sp->xvec[skb->sp->len - 1]; 36 + if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND) 37 + return 0; 38 + 39 + x->outer_mode->xmit(x, skb); 40 + 41 + err = x->type_offload->xmit(x, skb, features); 42 + if (err) { 43 + XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); 44 + return err; 45 + } 46 + 47 + skb_push(skb, skb->data - skb_mac_header(skb)); 48 + } 49 + 50 + return 0; 51 + } 52 + EXPORT_SYMBOL_GPL(validate_xmit_xfrm); 53 + 54 + int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, 55 + struct xfrm_user_offload *xuo) 56 + { 57 + int err; 58 + struct dst_entry *dst; 59 + struct net_device *dev; 60 + struct xfrm_state_offload *xso = &x->xso; 61 + xfrm_address_t *saddr; 62 + xfrm_address_t *daddr; 63 + 64 + if (!x->type_offload) 65 + return 0; 66 + 67 + /* We don't yet support UDP encapsulation, TFC padding and ESN. */ 68 + if (x->encap || x->tfcpad || (x->props.flags & XFRM_STATE_ESN)) 69 + return 0; 70 + 71 + dev = dev_get_by_index(net, xuo->ifindex); 72 + if (!dev) { 73 + if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) { 74 + saddr = &x->props.saddr; 75 + daddr = &x->id.daddr; 76 + } else { 77 + saddr = &x->id.daddr; 78 + daddr = &x->props.saddr; 79 + } 80 + 81 + dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr, x->props.family); 82 + if (IS_ERR(dst)) 83 + return 0; 84 + 85 + dev = dst->dev; 86 + 87 + dev_hold(dev); 88 + dst_release(dst); 89 + } 90 + 91 + if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) { 92 + dev_put(dev); 93 + return 0; 94 + } 95 + 96 + xso->dev = dev; 97 + xso->num_exthdrs = 1; 98 + xso->flags = xuo->flags; 99 + 100 + err = dev->xfrmdev_ops->xdo_dev_state_add(x); 101 + if (err) { 102 + dev_put(dev); 103 + return err; 104 + } 105 + 106 + return 0; 107 + } 108 + EXPORT_SYMBOL_GPL(xfrm_dev_state_add); 109 + 110 + bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 111 + { 112 + int mtu; 113 + struct dst_entry *dst = skb_dst(skb); 114 + struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 115 + struct net_device *dev = x->xso.dev; 116 + 117 + if (!x->type_offload || x->encap) 118 + return false; 119 + 120 + if ((x->xso.offload_handle && (dev == dst->path->dev)) && 121 + !dst->child->xfrm && x->type->get_mtu) { 122 + mtu = x->type->get_mtu(x, xdst->child_mtu_cached); 123 + 124 + if (skb->len <= mtu) 125 + goto ok; 126 + 127 + if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu)) 128 + goto ok; 129 + } 130 + 131 + return false; 132 + 133 + ok: 134 + if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok) 135 + return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x); 136 + 137 + return true; 138 + } 139 + EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok); 140 + 141 + int xfrm_dev_register(struct net_device *dev) 142 + { 143 + if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops) 144 + return NOTIFY_BAD; 145 + if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) && 146 + !(dev->features & NETIF_F_HW_ESP)) 147 + return NOTIFY_BAD; 148 + 149 + return NOTIFY_DONE; 150 + } 151 + 152 + static int xfrm_dev_unregister(struct net_device *dev) 153 + { 154 + return NOTIFY_DONE; 155 + } 156 + 157 + static int xfrm_dev_feat_change(struct net_device *dev) 158 + { 159 + if ((dev->features & NETIF_F_HW_ESP) && !dev->xfrmdev_ops) 160 + return NOTIFY_BAD; 161 + else if (!(dev->features & NETIF_F_HW_ESP)) 162 + dev->xfrmdev_ops = NULL; 163 + 164 + if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) && 165 + !(dev->features & NETIF_F_HW_ESP)) 166 + return NOTIFY_BAD; 167 + 168 + return NOTIFY_DONE; 169 + } 170 + 171 + static int xfrm_dev_down(struct net_device *dev) 172 + { 173 + if (dev->hw_features & NETIF_F_HW_ESP) 174 + xfrm_dev_state_flush(dev_net(dev), dev, true); 175 + 176 + xfrm_garbage_collect(dev_net(dev)); 177 + 178 + return NOTIFY_DONE; 179 + } 180 + 181 + static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 182 + { 183 + struct net_device *dev = netdev_notifier_info_to_dev(ptr); 184 + 185 + switch (event) { 186 + case NETDEV_REGISTER: 187 + return xfrm_dev_register(dev); 188 + 189 + case NETDEV_UNREGISTER: 190 + return xfrm_dev_unregister(dev); 191 + 192 + case NETDEV_FEAT_CHANGE: 193 + return xfrm_dev_feat_change(dev); 194 + 195 + case NETDEV_DOWN: 196 + return xfrm_dev_down(dev); 197 + } 198 + return NOTIFY_DONE; 199 + } 200 + 201 + static struct notifier_block xfrm_dev_notifier = { 202 + .notifier_call = xfrm_dev_event, 203 + }; 204 + 205 + void __net_init xfrm_dev_init(void) 206 + { 207 + register_netdevice_notifier(&xfrm_dev_notifier); 208 + }
+39 -2
net/xfrm/xfrm_input.c
··· 107 107 sp->len = 0; 108 108 sp->olen = 0; 109 109 110 + memset(sp->ovec, 0, sizeof(sp->ovec[XFRM_MAX_OFFLOAD_DEPTH])); 111 + 110 112 if (src) { 111 113 int i; 112 114 ··· 209 207 unsigned int family; 210 208 int decaps = 0; 211 209 int async = 0; 212 - struct xfrm_offload *xo; 213 210 bool xfrm_gro = false; 211 + bool crypto_done = false; 212 + struct xfrm_offload *xo = xfrm_offload(skb); 214 213 215 214 if (encap_type < 0) { 216 215 x = xfrm_input_state(skb); ··· 223 220 seq = XFRM_SKB_CB(skb)->seq.input.low; 224 221 goto resume; 225 222 } 223 + 226 224 /* encap_type < -1 indicates a GRO call. */ 227 225 encap_type = 0; 228 226 seq = XFRM_SPI_SKB_CB(skb)->seq; 227 + 228 + if (xo && (xo->flags & CRYPTO_DONE)) { 229 + crypto_done = true; 230 + x = xfrm_input_state(skb); 231 + family = XFRM_SPI_SKB_CB(skb)->family; 232 + 233 + if (!(xo->status & CRYPTO_SUCCESS)) { 234 + if (xo->status & 235 + (CRYPTO_TRANSPORT_AH_AUTH_FAILED | 236 + CRYPTO_TRANSPORT_ESP_AUTH_FAILED | 237 + CRYPTO_TUNNEL_AH_AUTH_FAILED | 238 + CRYPTO_TUNNEL_ESP_AUTH_FAILED)) { 239 + 240 + xfrm_audit_state_icvfail(x, skb, 241 + x->type->proto); 242 + x->stats.integrity_failed++; 243 + XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR); 244 + goto drop; 245 + } 246 + 247 + XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 248 + goto drop; 249 + } 250 + 251 + if ((err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) { 252 + XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 253 + goto drop; 254 + } 255 + } 256 + 229 257 goto lock; 230 258 } 231 259 ··· 345 311 skb_dst_force(skb); 346 312 dev_hold(skb->dev); 347 313 348 - nexthdr = x->type->input(x, skb); 314 + if (crypto_done) 315 + nexthdr = x->type_offload->input_tail(x, skb); 316 + else 317 + nexthdr = x->type->input(x, skb); 349 318 350 319 if (nexthdr == -EINPROGRESS) 351 320 return 0;
+40 -6
net/xfrm/xfrm_output.c
··· 99 99 100 100 skb_dst_force(skb); 101 101 102 - /* Inner headers are invalid now. */ 103 - skb->encapsulation = 0; 104 - 105 - err = x->type->output(x, skb); 106 - if (err == -EINPROGRESS) 107 - goto out; 102 + if (xfrm_offload(skb)) { 103 + x->type_offload->encap(x, skb); 104 + } else { 105 + err = x->type->output(x, skb); 106 + if (err == -EINPROGRESS) 107 + goto out; 108 + } 108 109 109 110 resume: 110 111 if (err) { ··· 201 200 int xfrm_output(struct sock *sk, struct sk_buff *skb) 202 201 { 203 202 struct net *net = dev_net(skb_dst(skb)->dev); 203 + struct xfrm_state *x = skb_dst(skb)->xfrm; 204 204 int err; 205 + 206 + secpath_reset(skb); 207 + skb->encapsulation = 0; 208 + 209 + if (xfrm_dev_offload_ok(skb, x)) { 210 + struct sec_path *sp; 211 + 212 + sp = secpath_dup(skb->sp); 213 + if (!sp) { 214 + XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR); 215 + kfree_skb(skb); 216 + return -ENOMEM; 217 + } 218 + if (skb->sp) 219 + secpath_put(skb->sp); 220 + skb->sp = sp; 221 + skb->encapsulation = 1; 222 + 223 + sp->olen++; 224 + sp->xvec[skb->sp->len++] = x; 225 + xfrm_state_hold(x); 226 + 227 + if (skb_is_gso(skb)) { 228 + skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; 229 + 230 + return xfrm_output2(net, sk, skb); 231 + } 232 + 233 + if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM) 234 + goto out; 235 + } 205 236 206 237 if (skb_is_gso(skb)) 207 238 return xfrm_output_gso(net, sk, skb); ··· 247 214 } 248 215 } 249 216 217 + out: 250 218 return xfrm_output2(net, sk, skb); 251 219 } 252 220 EXPORT_SYMBOL_GPL(xfrm_output);
+6 -21
net/xfrm/xfrm_policy.c
··· 116 116 return afinfo; 117 117 } 118 118 119 - static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, 120 - int tos, int oif, 121 - const xfrm_address_t *saddr, 122 - const xfrm_address_t *daddr, 123 - int family) 119 + struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif, 120 + const xfrm_address_t *saddr, 121 + const xfrm_address_t *daddr, 122 + int family) 124 123 { 125 124 const struct xfrm_policy_afinfo *afinfo; 126 125 struct dst_entry *dst; ··· 134 135 135 136 return dst; 136 137 } 138 + EXPORT_SYMBOL(__xfrm_dst_lookup); 137 139 138 140 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, 139 141 int tos, int oif, ··· 2929 2929 } 2930 2930 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 2931 2931 2932 - static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 2933 - { 2934 - struct net_device *dev = netdev_notifier_info_to_dev(ptr); 2935 - 2936 - switch (event) { 2937 - case NETDEV_DOWN: 2938 - xfrm_garbage_collect(dev_net(dev)); 2939 - } 2940 - return NOTIFY_DONE; 2941 - } 2942 - 2943 - static struct notifier_block xfrm_dev_notifier = { 2944 - .notifier_call = xfrm_dev_event, 2945 - }; 2946 - 2947 2932 #ifdef CONFIG_XFRM_STATISTICS 2948 2933 static int __net_init xfrm_statistics_init(struct net *net) 2949 2934 { ··· 3005 3020 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize); 3006 3021 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild); 3007 3022 if (net_eq(net, &init_net)) 3008 - register_netdevice_notifier(&xfrm_dev_notifier); 3023 + xfrm_dev_init(); 3009 3024 return 0; 3010 3025 3011 3026 out_bydst:
+159 -3
net/xfrm/xfrm_replay.c
··· 45 45 46 46 return seq_hi; 47 47 } 48 - 48 + EXPORT_SYMBOL(xfrm_replay_seqhi); 49 + ; 49 50 static void xfrm_replay_notify(struct xfrm_state *x, int event) 50 51 { 51 52 struct km_event c; ··· 559 558 x->repl->notify(x, XFRM_REPLAY_UPDATE); 560 559 } 561 560 561 + #ifdef CONFIG_XFRM_OFFLOAD 562 + static int xfrm_replay_overflow_offload(struct xfrm_state *x, struct sk_buff *skb) 563 + { 564 + int err = 0; 565 + struct net *net = xs_net(x); 566 + struct xfrm_offload *xo = xfrm_offload(skb); 567 + __u32 oseq = x->replay.oseq; 568 + 569 + if (!xo) 570 + return xfrm_replay_overflow(x, skb); 571 + 572 + if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { 573 + if (!skb_is_gso(skb)) { 574 + XFRM_SKB_CB(skb)->seq.output.low = ++oseq; 575 + xo->seq.low = oseq; 576 + } else { 577 + XFRM_SKB_CB(skb)->seq.output.low = oseq + 1; 578 + xo->seq.low = oseq + 1; 579 + oseq += skb_shinfo(skb)->gso_segs; 580 + } 581 + 582 + XFRM_SKB_CB(skb)->seq.output.hi = 0; 583 + xo->seq.hi = 0; 584 + if (unlikely(oseq < x->replay.oseq)) { 585 + xfrm_audit_state_replay_overflow(x, skb); 586 + err = -EOVERFLOW; 587 + 588 + return err; 589 + } 590 + 591 + x->replay.oseq = oseq; 592 + 593 + if (xfrm_aevent_is_on(net)) 594 + x->repl->notify(x, XFRM_REPLAY_UPDATE); 595 + } 596 + 597 + return err; 598 + } 599 + 600 + static int xfrm_replay_overflow_offload_bmp(struct xfrm_state *x, struct sk_buff *skb) 601 + { 602 + int err = 0; 603 + struct xfrm_offload *xo = xfrm_offload(skb); 604 + struct xfrm_replay_state_esn *replay_esn = x->replay_esn; 605 + struct net *net = xs_net(x); 606 + __u32 oseq = replay_esn->oseq; 607 + 608 + if (!xo) 609 + return xfrm_replay_overflow_bmp(x, skb); 610 + 611 + if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { 612 + if (!skb_is_gso(skb)) { 613 + XFRM_SKB_CB(skb)->seq.output.low = ++oseq; 614 + xo->seq.low = oseq; 615 + } else { 616 + XFRM_SKB_CB(skb)->seq.output.low = oseq + 1; 617 + xo->seq.low = oseq + 1; 618 + oseq += skb_shinfo(skb)->gso_segs; 619 + } 620 + 621 + XFRM_SKB_CB(skb)->seq.output.hi = 0; 622 + xo->seq.hi = 0; 623 + if (unlikely(oseq < replay_esn->oseq)) { 624 + xfrm_audit_state_replay_overflow(x, skb); 625 + err = -EOVERFLOW; 626 + 627 + return err; 628 + } else { 629 + replay_esn->oseq = oseq; 630 + } 631 + 632 + if (xfrm_aevent_is_on(net)) 633 + x->repl->notify(x, XFRM_REPLAY_UPDATE); 634 + } 635 + 636 + return err; 637 + } 638 + 639 + static int xfrm_replay_overflow_offload_esn(struct xfrm_state *x, struct sk_buff *skb) 640 + { 641 + int err = 0; 642 + struct xfrm_offload *xo = xfrm_offload(skb); 643 + struct xfrm_replay_state_esn *replay_esn = x->replay_esn; 644 + struct net *net = xs_net(x); 645 + __u32 oseq = replay_esn->oseq; 646 + __u32 oseq_hi = replay_esn->oseq_hi; 647 + 648 + if (!xo) 649 + return xfrm_replay_overflow_esn(x, skb); 650 + 651 + if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { 652 + if (!skb_is_gso(skb)) { 653 + XFRM_SKB_CB(skb)->seq.output.low = ++oseq; 654 + XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi; 655 + xo->seq.low = oseq; 656 + xo->seq.hi = oseq_hi; 657 + } else { 658 + XFRM_SKB_CB(skb)->seq.output.low = oseq + 1; 659 + XFRM_SKB_CB(skb)->seq.output.hi = oseq_hi; 660 + xo->seq.low = oseq = oseq + 1; 661 + xo->seq.hi = oseq_hi; 662 + oseq += skb_shinfo(skb)->gso_segs; 663 + } 664 + 665 + if (unlikely(oseq < replay_esn->oseq)) { 666 + XFRM_SKB_CB(skb)->seq.output.hi = ++oseq_hi; 667 + xo->seq.hi = oseq_hi; 668 + 669 + if (replay_esn->oseq_hi == 0) { 670 + replay_esn->oseq--; 671 + replay_esn->oseq_hi--; 672 + xfrm_audit_state_replay_overflow(x, skb); 673 + err = -EOVERFLOW; 674 + 675 + return err; 676 + } 677 + } 678 + 679 + replay_esn->oseq = oseq; 680 + replay_esn->oseq_hi = oseq_hi; 681 + 682 + if (xfrm_aevent_is_on(net)) 683 + x->repl->notify(x, XFRM_REPLAY_UPDATE); 684 + } 685 + 686 + return err; 687 + } 688 + 689 + static const struct xfrm_replay xfrm_replay_legacy = { 690 + .advance = xfrm_replay_advance, 691 + .check = xfrm_replay_check, 692 + .recheck = xfrm_replay_check, 693 + .notify = xfrm_replay_notify, 694 + .overflow = xfrm_replay_overflow_offload, 695 + }; 696 + 697 + static const struct xfrm_replay xfrm_replay_bmp = { 698 + .advance = xfrm_replay_advance_bmp, 699 + .check = xfrm_replay_check_bmp, 700 + .recheck = xfrm_replay_check_bmp, 701 + .notify = xfrm_replay_notify_bmp, 702 + .overflow = xfrm_replay_overflow_offload_bmp, 703 + }; 704 + 705 + static const struct xfrm_replay xfrm_replay_esn = { 706 + .advance = xfrm_replay_advance_esn, 707 + .check = xfrm_replay_check_esn, 708 + .recheck = xfrm_replay_recheck_esn, 709 + .notify = xfrm_replay_notify_esn, 710 + .overflow = xfrm_replay_overflow_offload_esn, 711 + }; 712 + #else 562 713 static const struct xfrm_replay xfrm_replay_legacy = { 563 714 .advance = xfrm_replay_advance, 564 715 .check = xfrm_replay_check, ··· 734 581 .notify = xfrm_replay_notify_esn, 735 582 .overflow = xfrm_replay_overflow_esn, 736 583 }; 584 + #endif 737 585 738 586 int xfrm_init_replay(struct xfrm_state *x) 739 587 { ··· 749 595 if (replay_esn->replay_window == 0) 750 596 return -EINVAL; 751 597 x->repl = &xfrm_replay_esn; 752 - } else 598 + } else { 753 599 x->repl = &xfrm_replay_bmp; 754 - } else 600 + } 601 + } else { 755 602 x->repl = &xfrm_replay_legacy; 603 + } 756 604 757 605 return 0; 758 606 }
+147
net/xfrm/xfrm_state.c
··· 251 251 module_put(type->owner); 252 252 } 253 253 254 + static DEFINE_SPINLOCK(xfrm_type_offload_lock); 255 + int xfrm_register_type_offload(const struct xfrm_type_offload *type, 256 + unsigned short family) 257 + { 258 + struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 259 + const struct xfrm_type_offload **typemap; 260 + int err = 0; 261 + 262 + if (unlikely(afinfo == NULL)) 263 + return -EAFNOSUPPORT; 264 + typemap = afinfo->type_offload_map; 265 + spin_lock_bh(&xfrm_type_offload_lock); 266 + 267 + if (likely(typemap[type->proto] == NULL)) 268 + typemap[type->proto] = type; 269 + else 270 + err = -EEXIST; 271 + spin_unlock_bh(&xfrm_type_offload_lock); 272 + rcu_read_unlock(); 273 + return err; 274 + } 275 + EXPORT_SYMBOL(xfrm_register_type_offload); 276 + 277 + int xfrm_unregister_type_offload(const struct xfrm_type_offload *type, 278 + unsigned short family) 279 + { 280 + struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 281 + const struct xfrm_type_offload **typemap; 282 + int err = 0; 283 + 284 + if (unlikely(afinfo == NULL)) 285 + return -EAFNOSUPPORT; 286 + typemap = afinfo->type_offload_map; 287 + spin_lock_bh(&xfrm_type_offload_lock); 288 + 289 + if (unlikely(typemap[type->proto] != type)) 290 + err = -ENOENT; 291 + else 292 + typemap[type->proto] = NULL; 293 + spin_unlock_bh(&xfrm_type_offload_lock); 294 + rcu_read_unlock(); 295 + return err; 296 + } 297 + EXPORT_SYMBOL(xfrm_unregister_type_offload); 298 + 299 + static const struct xfrm_type_offload *xfrm_get_type_offload(u8 proto, unsigned short family) 300 + { 301 + struct xfrm_state_afinfo *afinfo; 302 + const struct xfrm_type_offload **typemap; 303 + const struct xfrm_type_offload *type; 304 + 305 + afinfo = xfrm_state_get_afinfo(family); 306 + if (unlikely(afinfo == NULL)) 307 + return NULL; 308 + typemap = afinfo->type_offload_map; 309 + 310 + type = typemap[proto]; 311 + if ((type && !try_module_get(type->owner))) 312 + type = NULL; 313 + 314 + rcu_read_unlock(); 315 + return type; 316 + } 317 + 318 + static void xfrm_put_type_offload(const struct xfrm_type_offload *type) 319 + { 320 + module_put(type->owner); 321 + } 322 + 254 323 static DEFINE_SPINLOCK(xfrm_mode_lock); 255 324 int xfrm_register_mode(struct xfrm_mode *mode, int family) 256 325 { ··· 434 365 xfrm_put_mode(x->inner_mode_iaf); 435 366 if (x->outer_mode) 436 367 xfrm_put_mode(x->outer_mode); 368 + if (x->type_offload) 369 + xfrm_put_type_offload(x->type_offload); 437 370 if (x->type) { 438 371 x->type->destructor(x); 439 372 xfrm_put_type(x->type); 440 373 } 374 + xfrm_dev_state_free(x); 441 375 security_xfrm_state_free(x); 442 376 kfree(x); 443 377 } ··· 610 538 net->xfrm.state_num--; 611 539 spin_unlock(&net->xfrm.xfrm_state_lock); 612 540 541 + xfrm_dev_state_delete(x); 542 + 613 543 /* All xfrm_state objects are created by xfrm_state_alloc. 614 544 * The xfrm_state_alloc call gives a reference, and that 615 545 * is what we are dropping here. ··· 656 582 657 583 return err; 658 584 } 585 + 586 + static inline int 587 + xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid) 588 + { 589 + int i, err = 0; 590 + 591 + for (i = 0; i <= net->xfrm.state_hmask; i++) { 592 + struct xfrm_state *x; 593 + struct xfrm_state_offload *xso; 594 + 595 + hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 596 + xso = &x->xso; 597 + 598 + if (xso->dev == dev && 599 + (err = security_xfrm_state_delete(x)) != 0) { 600 + xfrm_audit_state_delete(x, 0, task_valid); 601 + return err; 602 + } 603 + } 604 + } 605 + 606 + return err; 607 + } 659 608 #else 660 609 static inline int 661 610 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid) 611 + { 612 + return 0; 613 + } 614 + 615 + static inline int 616 + xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid) 662 617 { 663 618 return 0; 664 619 } ··· 732 629 return err; 733 630 } 734 631 EXPORT_SYMBOL(xfrm_state_flush); 632 + 633 + int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid) 634 + { 635 + int i, err = 0, cnt = 0; 636 + 637 + spin_lock_bh(&net->xfrm.xfrm_state_lock); 638 + err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid); 639 + if (err) 640 + goto out; 641 + 642 + err = -ESRCH; 643 + for (i = 0; i <= net->xfrm.state_hmask; i++) { 644 + struct xfrm_state *x; 645 + struct xfrm_state_offload *xso; 646 + restart: 647 + hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 648 + xso = &x->xso; 649 + 650 + if (!xfrm_state_kern(x) && xso->dev == dev) { 651 + xfrm_state_hold(x); 652 + spin_unlock_bh(&net->xfrm.xfrm_state_lock); 653 + 654 + err = xfrm_state_delete(x); 655 + xfrm_audit_state_delete(x, err ? 0 : 1, 656 + task_valid); 657 + xfrm_state_put(x); 658 + if (!err) 659 + cnt++; 660 + 661 + spin_lock_bh(&net->xfrm.xfrm_state_lock); 662 + goto restart; 663 + } 664 + } 665 + } 666 + if (cnt) 667 + err = 0; 668 + 669 + out: 670 + spin_unlock_bh(&net->xfrm.xfrm_state_lock); 671 + return err; 672 + } 673 + EXPORT_SYMBOL(xfrm_dev_state_flush); 735 674 736 675 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si) 737 676 { ··· 2221 2076 x->type = xfrm_get_type(x->id.proto, family); 2222 2077 if (x->type == NULL) 2223 2078 goto error; 2079 + 2080 + x->type_offload = xfrm_get_type_offload(x->id.proto, family); 2224 2081 2225 2082 err = x->type->init_state(x); 2226 2083 if (err)
+28
net/xfrm/xfrm_user.c
··· 595 595 goto error; 596 596 } 597 597 598 + if (attrs[XFRMA_OFFLOAD_DEV] && 599 + xfrm_dev_state_add(net, x, nla_data(attrs[XFRMA_OFFLOAD_DEV]))) 600 + goto error; 601 + 598 602 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn, 599 603 attrs[XFRMA_REPLAY_ESN_VAL]))) 600 604 goto error; ··· 783 779 return 0; 784 780 } 785 781 782 + static int copy_user_offload(struct xfrm_state_offload *xso, struct sk_buff *skb) 783 + { 784 + struct xfrm_user_offload *xuo; 785 + struct nlattr *attr; 786 + 787 + attr = nla_reserve(skb, XFRMA_OFFLOAD_DEV, sizeof(*xuo)); 788 + if (attr == NULL) 789 + return -EMSGSIZE; 790 + 791 + xuo = nla_data(attr); 792 + 793 + xuo->ifindex = xso->dev->ifindex; 794 + xuo->flags = xso->flags; 795 + 796 + return 0; 797 + } 798 + 786 799 static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) 787 800 { 788 801 struct xfrm_algo *algo; ··· 888 867 else 889 868 ret = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), 890 869 &x->replay); 870 + if (ret) 871 + goto out; 872 + if(x->xso.dev) 873 + ret = copy_user_offload(&x->xso, skb); 891 874 if (ret) 892 875 goto out; 893 876 if (x->security) ··· 2431 2406 [XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 }, 2432 2407 [XFRMA_PROTO] = { .type = NLA_U8 }, 2433 2408 [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) }, 2409 + [XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) }, 2434 2410 }; 2435 2411 2436 2412 static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { ··· 2649 2623 l += nla_total_size(sizeof(*x->coaddr)); 2650 2624 if (x->props.extra_flags) 2651 2625 l += nla_total_size(sizeof(x->props.extra_flags)); 2626 + if (x->xso.dev) 2627 + l += nla_total_size(sizeof(x->xso)); 2652 2628 2653 2629 /* Must count x->lastused as it may become non-zero behind our back. */ 2654 2630 l += nla_total_size_64bit(sizeof(u64));