Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

esp: Add gso handlers for esp4 and esp6

This patch extends the xfrm_type by an encap function pointer
and implements esp4_gso_encap and esp6_gso_encap. These functions
doing the basic esp encapsulation for a GSO packet. In case the
GSO packet needs to be segmented in software, we add gso_segment
functions. This codepath is going to be used on esp hardware
offloads.

Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>

+203 -4
+9 -1
net/ipv4/esp4.c
··· 161 161 * encryption. 162 162 */ 163 163 if ((x->props.flags & XFRM_STATE_ESN)) { 164 + __u32 seqhi; 165 + struct xfrm_offload *xo = xfrm_offload(skb); 166 + 167 + if (xo) 168 + seqhi = xo->seq.hi; 169 + else 170 + seqhi = XFRM_SKB_CB(skb)->seq.output.hi; 171 + 164 172 extra->esphoff = (unsigned char *)esph - 165 173 skb_transport_header(skb); 166 174 esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4); 167 175 extra->seqhi = esph->spi; 168 - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); 176 + esph->seq_no = htonl(seqhi); 169 177 } 170 178 171 179 esph->spi = x->id.spi;
+93
net/ipv4/esp4_offload.c
··· 84 84 return NULL; 85 85 } 86 86 87 + static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb) 88 + { 89 + struct ip_esp_hdr *esph; 90 + struct iphdr *iph = ip_hdr(skb); 91 + struct xfrm_offload *xo = xfrm_offload(skb); 92 + int proto = iph->protocol; 93 + 94 + skb_push(skb, -skb_network_offset(skb)); 95 + esph = ip_esp_hdr(skb); 96 + *skb_mac_header(skb) = IPPROTO_ESP; 97 + 98 + esph->spi = x->id.spi; 99 + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 100 + 101 + xo->proto = proto; 102 + } 103 + 104 + static struct sk_buff *esp4_gso_segment(struct sk_buff *skb, 105 + netdev_features_t features) 106 + { 107 + __u32 seq; 108 + int err = 0; 109 + struct sk_buff *skb2; 110 + struct xfrm_state *x; 111 + struct ip_esp_hdr *esph; 112 + struct crypto_aead *aead; 113 + struct sk_buff *segs = ERR_PTR(-EINVAL); 114 + netdev_features_t esp_features = features; 115 + struct xfrm_offload *xo = xfrm_offload(skb); 116 + 117 + if (!xo) 118 + goto out; 119 + 120 + seq = xo->seq.low; 121 + 122 + x = skb->sp->xvec[skb->sp->len - 1]; 123 + aead = x->data; 124 + esph = ip_esp_hdr(skb); 125 + 126 + if (esph->spi != x->id.spi) 127 + goto out; 128 + 129 + if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) 130 + goto out; 131 + 132 + __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); 133 + 134 + skb->encap_hdr_csum = 1; 135 + 136 + if (!(features & NETIF_F_HW_ESP)) 137 + esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); 138 + 139 + segs = x->outer_mode->gso_segment(x, skb, esp_features); 140 + if (IS_ERR_OR_NULL(segs)) 141 + goto out; 142 + 143 + __skb_pull(skb, skb->data - skb_mac_header(skb)); 144 + 145 + skb2 = segs; 146 + do { 147 + struct sk_buff *nskb = skb2->next; 148 + 149 + xo = xfrm_offload(skb2); 150 + xo->flags |= XFRM_GSO_SEGMENT; 151 + xo->seq.low = seq; 152 + xo->seq.hi = xfrm_replay_seqhi(x, seq); 153 + 154 + if(!(features & NETIF_F_HW_ESP)) 155 + xo->flags |= CRYPTO_FALLBACK; 156 + 157 + x->outer_mode->xmit(x, skb2); 158 + 159 + err = x->type_offload->xmit(x, skb2, esp_features); 160 + if (err) { 161 + kfree_skb_list(segs); 162 + return ERR_PTR(err); 163 + } 164 + 165 + if (!skb_is_gso(skb2)) 166 + seq++; 167 + else 168 + seq += skb_shinfo(skb2)->gso_segs; 169 + 170 + skb_push(skb2, skb2->mac_len); 171 + skb2 = nskb; 172 + } while (skb2); 173 + 174 + out: 175 + return segs; 176 + } 177 + 87 178 static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb) 88 179 { 89 180 struct crypto_aead *aead = x->data; ··· 264 173 static const struct net_offload esp4_offload = { 265 174 .callbacks = { 266 175 .gro_receive = esp4_gro_receive, 176 + .gso_segment = esp4_gso_segment, 267 177 }, 268 178 }; 269 179 ··· 274 182 .proto = IPPROTO_ESP, 275 183 .input_tail = esp_input_tail, 276 184 .xmit = esp_xmit, 185 + .encap = esp4_gso_encap, 277 186 }; 278 187 279 188 static int __init esp4_offload_init(void)
+6 -2
net/ipv6/esp6.c
··· 179 179 * encryption. 180 180 */ 181 181 if ((x->props.flags & XFRM_STATE_ESN)) { 182 + struct xfrm_offload *xo = xfrm_offload(skb); 183 + 182 184 esph = (void *)(skb_transport_header(skb) - sizeof(__be32)); 183 185 *seqhi = esph->spi; 184 - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); 186 + if (xo) 187 + esph->seq_no = htonl(xo->seq.hi); 188 + else 189 + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); 185 190 } 186 191 187 192 esph->spi = x->id.spi; ··· 228 223 struct sk_buff *trailer; 229 224 int tailen = esp->tailen; 230 225 231 - *skb_mac_header(skb) = IPPROTO_ESP; 232 226 esph = ip_esp_hdr(skb); 233 227 234 228 if (!skb_cloned(skb)) {
+93
net/ipv6/esp6_offload.c
··· 86 86 return NULL; 87 87 } 88 88 89 + static void esp6_gso_encap(struct xfrm_state *x, struct sk_buff *skb) 90 + { 91 + struct ip_esp_hdr *esph; 92 + struct ipv6hdr *iph = ipv6_hdr(skb); 93 + struct xfrm_offload *xo = xfrm_offload(skb); 94 + int proto = iph->nexthdr; 95 + 96 + skb_push(skb, -skb_network_offset(skb)); 97 + esph = ip_esp_hdr(skb); 98 + *skb_mac_header(skb) = IPPROTO_ESP; 99 + 100 + esph->spi = x->id.spi; 101 + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); 102 + 103 + xo->proto = proto; 104 + } 105 + 106 + static struct sk_buff *esp6_gso_segment(struct sk_buff *skb, 107 + netdev_features_t features) 108 + { 109 + __u32 seq; 110 + int err = 0; 111 + struct sk_buff *skb2; 112 + struct xfrm_state *x; 113 + struct ip_esp_hdr *esph; 114 + struct crypto_aead *aead; 115 + struct sk_buff *segs = ERR_PTR(-EINVAL); 116 + netdev_features_t esp_features = features; 117 + struct xfrm_offload *xo = xfrm_offload(skb); 118 + 119 + if (xo) 120 + goto out; 121 + 122 + seq = xo->seq.low; 123 + 124 + x = skb->sp->xvec[skb->sp->len - 1]; 125 + aead = x->data; 126 + esph = ip_esp_hdr(skb); 127 + 128 + if (esph->spi != x->id.spi) 129 + goto out; 130 + 131 + if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) 132 + goto out; 133 + 134 + __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); 135 + 136 + skb->encap_hdr_csum = 1; 137 + 138 + if (!(features & NETIF_F_HW_ESP)) 139 + esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); 140 + 141 + segs = x->outer_mode->gso_segment(x, skb, esp_features); 142 + if (IS_ERR_OR_NULL(segs)) 143 + goto out; 144 + 145 + __skb_pull(skb, skb->data - skb_mac_header(skb)); 146 + 147 + skb2 = segs; 148 + do { 149 + struct sk_buff *nskb = skb2->next; 150 + 151 + xo = xfrm_offload(skb2); 152 + xo->flags |= XFRM_GSO_SEGMENT; 153 + xo->seq.low = seq; 154 + xo->seq.hi = xfrm_replay_seqhi(x, seq); 155 + 156 + if(!(features & NETIF_F_HW_ESP)) 157 + xo->flags |= CRYPTO_FALLBACK; 158 + 159 + x->outer_mode->xmit(x, skb2); 160 + 161 + err = x->type_offload->xmit(x, skb2, esp_features); 162 + if (err) { 163 + kfree_skb_list(segs); 164 + return ERR_PTR(err); 165 + } 166 + 167 + if (!skb_is_gso(skb2)) 168 + seq++; 169 + else 170 + seq += skb_shinfo(skb2)->gso_segs; 171 + 172 + skb_push(skb2, skb2->mac_len); 173 + skb2 = nskb; 174 + } while (skb2); 175 + 176 + out: 177 + return segs; 178 + } 179 + 89 180 static int esp6_input_tail(struct xfrm_state *x, struct sk_buff *skb) 90 181 { 91 182 struct crypto_aead *aead = x->data; ··· 267 176 static const struct net_offload esp6_offload = { 268 177 .callbacks = { 269 178 .gro_receive = esp6_gro_receive, 179 + .gso_segment = esp6_gso_segment, 270 180 }, 271 181 }; 272 182 ··· 277 185 .proto = IPPROTO_ESP, 278 186 .input_tail = esp6_input_tail, 279 187 .xmit = esp6_xmit, 188 + .encap = esp6_gso_encap, 280 189 }; 281 190 282 191 static int __init esp6_offload_init(void)
+2 -1
net/xfrm/xfrm_replay.c
··· 45 45 46 46 return seq_hi; 47 47 } 48 - 48 + EXPORT_SYMBOL(xfrm_replay_seqhi); 49 + ; 49 50 static void xfrm_replay_notify(struct xfrm_state *x, int event) 50 51 { 51 52 struct km_event c;