Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xfrm: add support for UDPv6 encapsulation of ESP

This patch adds support for encapsulation of ESP over UDPv6. The code
is very similar to the IPv4 encapsulation implementation, and allows
to easily add espintcp on IPv6 as a follow-up.

Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>

authored by

Sabrina Dubroca and committed by
Steffen Klassert
0146dca7 e62905ae

+395 -37
+3
include/net/ipv6_stubs.h
··· 56 56 void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr, 57 57 const struct in6_addr *solicited_addr, 58 58 bool router, bool solicited, bool override, bool inc_opt); 59 + #if IS_ENABLED(CONFIG_XFRM) 60 + int (*xfrm6_udp_encap_rcv)(struct sock *sk, struct sk_buff *skb); 61 + #endif 59 62 struct neigh_table *nd_tbl; 60 63 }; 61 64 extern const struct ipv6_stub *ipv6_stub __read_mostly;
+5
include/net/xfrm.h
··· 1406 1406 1407 1407 struct xfrm6_protocol { 1408 1408 int (*handler)(struct sk_buff *skb); 1409 + int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi, 1410 + int encap_type); 1409 1411 int (*cb_handler)(struct sk_buff *skb, int err); 1410 1412 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 1411 1413 u8 type, u8 code, int offset, __be32 info); ··· 1592 1590 int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb); 1593 1591 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi, 1594 1592 struct ip6_tnl *t); 1593 + int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, 1594 + int encap_type); 1595 1595 int xfrm6_transport_finish(struct sk_buff *skb, int async); 1596 1596 int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t); 1597 1597 int xfrm6_rcv(struct sk_buff *skb); ··· 1614 1610 1615 1611 #ifdef CONFIG_XFRM 1616 1612 int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); 1613 + int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); 1617 1614 int xfrm_user_policy(struct sock *sk, int optname, 1618 1615 u8 __user *optval, int optlen); 1619 1616 #else
+9 -1
net/ipv4/udp.c
··· 112 112 #include <net/sock_reuseport.h> 113 113 #include <net/addrconf.h> 114 114 #include <net/udp_tunnel.h> 115 + #if IS_ENABLED(CONFIG_IPV6) 116 + #include <net/ipv6_stubs.h> 117 + #endif 115 118 116 119 struct udp_table udp_table __read_mostly; 117 120 EXPORT_SYMBOL(udp_table); ··· 2566 2563 #ifdef CONFIG_XFRM 2567 2564 case UDP_ENCAP_ESPINUDP: 2568 2565 case UDP_ENCAP_ESPINUDP_NON_IKE: 2569 - up->encap_rcv = xfrm4_udp_encap_rcv; 2566 + #if IS_ENABLED(CONFIG_IPV6) 2567 + if (sk->sk_family == AF_INET6) 2568 + up->encap_rcv = ipv6_stub->xfrm6_udp_encap_rcv; 2569 + else 2570 + #endif 2571 + up->encap_rcv = xfrm4_udp_encap_rcv; 2570 2572 #endif 2571 2573 fallthrough; 2572 2574 case UDP_ENCAP_L2TPINUDP:
+4
net/ipv6/af_inet6.c
··· 60 60 #include <net/calipso.h> 61 61 #include <net/seg6.h> 62 62 #include <net/rpl.h> 63 + #include <net/xfrm.h> 63 64 64 65 #include <linux/uaccess.h> 65 66 #include <linux/mroute6.h> ··· 962 961 .ip6_del_rt = ip6_del_rt, 963 962 .udpv6_encap_enable = udpv6_encap_enable, 964 963 .ndisc_send_na = ndisc_send_na, 964 + #if IS_ENABLED(CONFIG_XFRM) 965 + .xfrm6_udp_encap_rcv = xfrm6_udp_encap_rcv, 966 + #endif 965 967 .nd_tbl = &nd_tbl, 966 968 }; 967 969
+1
net/ipv6/ah6.c
··· 767 767 768 768 static struct xfrm6_protocol ah6_protocol = { 769 769 .handler = xfrm6_rcv, 770 + .input_handler = xfrm_input, 770 771 .cb_handler = ah6_rcv_cb, 771 772 .err_handler = ah6_err, 772 773 .priority = 0,
+201 -25
net/ipv6/esp6.c
··· 26 26 #include <linux/random.h> 27 27 #include <linux/slab.h> 28 28 #include <linux/spinlock.h> 29 + #include <net/ip6_checksum.h> 29 30 #include <net/ip6_route.h> 30 31 #include <net/icmp.h> 31 32 #include <net/ipv6.h> 32 33 #include <net/protocol.h> 34 + #include <net/udp.h> 33 35 #include <linux/icmpv6.h> 34 36 35 37 #include <linux/highmem.h> ··· 39 37 struct esp_skb_cb { 40 38 struct xfrm_skb_cb xfrm; 41 39 void *tmp; 40 + }; 41 + 42 + struct esp_output_extra { 43 + __be32 seqhi; 44 + u32 esphoff; 42 45 }; 43 46 44 47 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) ··· 79 72 return kmalloc(len, GFP_ATOMIC); 80 73 } 81 74 82 - static inline __be32 *esp_tmp_seqhi(void *tmp) 75 + static inline void *esp_tmp_extra(void *tmp) 83 76 { 84 - return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32)); 77 + return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra)); 85 78 } 86 79 87 80 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen) ··· 111 104 112 105 static void esp_ssg_unref(struct xfrm_state *x, void *tmp) 113 106 { 107 + struct esp_output_extra *extra = esp_tmp_extra(tmp); 114 108 struct crypto_aead *aead = x->data; 115 - int seqhilen = 0; 109 + int extralen = 0; 116 110 u8 *iv; 117 111 struct aead_request *req; 118 112 struct scatterlist *sg; 119 113 120 114 if (x->props.flags & XFRM_STATE_ESN) 121 - seqhilen += sizeof(__be32); 115 + extralen += sizeof(*extra); 122 116 123 - iv = esp_tmp_iv(aead, tmp, seqhilen); 117 + iv = esp_tmp_iv(aead, tmp, extralen); 124 118 req = esp_tmp_req(aead, iv); 125 119 126 120 /* Unref skb_frag_pages in the src scatterlist if necessary. ··· 130 122 if (req->src != req->dst) 131 123 for (sg = sg_next(req->src); sg; sg = sg_next(sg)) 132 124 put_page(sg_page(sg)); 125 + } 126 + 127 + static void esp_output_encap_csum(struct sk_buff *skb) 128 + { 129 + /* UDP encap with IPv6 requires a valid checksum */ 130 + if (*skb_mac_header(skb) == IPPROTO_UDP) { 131 + struct udphdr *uh = udp_hdr(skb); 132 + struct ipv6hdr *ip6h = ipv6_hdr(skb); 133 + int len = ntohs(uh->len); 134 + unsigned int offset = skb_transport_offset(skb); 135 + __wsum csum = skb_checksum(skb, offset, skb->len - offset, 0); 136 + 137 + uh->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 138 + len, IPPROTO_UDP, csum); 139 + if (uh->check == 0) 140 + uh->check = CSUM_MANGLED_0; 141 + } 133 142 } 134 143 135 144 static void esp_output_done(struct crypto_async_request *base, int err) ··· 168 143 esp_ssg_unref(x, tmp); 169 144 kfree(tmp); 170 145 146 + esp_output_encap_csum(skb); 147 + 171 148 if (xo && (xo->flags & XFRM_DEV_RESUME)) { 172 149 if (err) { 173 150 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); ··· 190 163 { 191 164 struct ip_esp_hdr *esph = (void *)(skb->data + offset); 192 165 void *tmp = ESP_SKB_CB(skb)->tmp; 193 - __be32 *seqhi = esp_tmp_seqhi(tmp); 166 + __be32 *seqhi = esp_tmp_extra(tmp); 194 167 195 168 esph->seq_no = esph->spi; 196 169 esph->spi = *seqhi; ··· 198 171 199 172 static void esp_output_restore_header(struct sk_buff *skb) 200 173 { 201 - esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32)); 174 + void *tmp = ESP_SKB_CB(skb)->tmp; 175 + struct esp_output_extra *extra = esp_tmp_extra(tmp); 176 + 177 + esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff - 178 + sizeof(__be32)); 202 179 } 203 180 204 181 static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb, 205 182 struct xfrm_state *x, 206 183 struct ip_esp_hdr *esph, 207 - __be32 *seqhi) 184 + struct esp_output_extra *extra) 208 185 { 209 186 /* For ESN we move the header forward by 4 bytes to 210 187 * accomodate the high bits. We will move it back after 211 188 * encryption. 212 189 */ 213 190 if ((x->props.flags & XFRM_STATE_ESN)) { 191 + __u32 seqhi; 214 192 struct xfrm_offload *xo = xfrm_offload(skb); 215 193 216 - esph = (void *)(skb_transport_header(skb) - sizeof(__be32)); 217 - *seqhi = esph->spi; 218 194 if (xo) 219 - esph->seq_no = htonl(xo->seq.hi); 195 + seqhi = xo->seq.hi; 220 196 else 221 - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi); 197 + seqhi = XFRM_SKB_CB(skb)->seq.output.hi; 198 + 199 + extra->esphoff = (unsigned char *)esph - 200 + skb_transport_header(skb); 201 + esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4); 202 + extra->seqhi = esph->spi; 203 + esph->seq_no = htonl(seqhi); 222 204 } 223 205 224 206 esph->spi = x->id.spi; ··· 243 207 esp_output_done(base, err); 244 208 } 245 209 210 + static struct ip_esp_hdr *esp6_output_udp_encap(struct sk_buff *skb, 211 + int encap_type, 212 + struct esp_info *esp, 213 + __be16 sport, 214 + __be16 dport) 215 + { 216 + struct udphdr *uh; 217 + __be32 *udpdata32; 218 + unsigned int len; 219 + 220 + len = skb->len + esp->tailen - skb_transport_offset(skb); 221 + if (len > U16_MAX) 222 + return ERR_PTR(-EMSGSIZE); 223 + 224 + uh = (struct udphdr *)esp->esph; 225 + uh->source = sport; 226 + uh->dest = dport; 227 + uh->len = htons(len); 228 + uh->check = 0; 229 + 230 + *skb_mac_header(skb) = IPPROTO_UDP; 231 + 232 + if (encap_type == UDP_ENCAP_ESPINUDP_NON_IKE) { 233 + udpdata32 = (__be32 *)(uh + 1); 234 + udpdata32[0] = udpdata32[1] = 0; 235 + return (struct ip_esp_hdr *)(udpdata32 + 2); 236 + } 237 + 238 + return (struct ip_esp_hdr *)(uh + 1); 239 + } 240 + 241 + static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb, 242 + struct esp_info *esp) 243 + { 244 + struct xfrm_encap_tmpl *encap = x->encap; 245 + struct ip_esp_hdr *esph; 246 + __be16 sport, dport; 247 + int encap_type; 248 + 249 + spin_lock_bh(&x->lock); 250 + sport = encap->encap_sport; 251 + dport = encap->encap_dport; 252 + encap_type = encap->encap_type; 253 + spin_unlock_bh(&x->lock); 254 + 255 + switch (encap_type) { 256 + default: 257 + case UDP_ENCAP_ESPINUDP: 258 + case UDP_ENCAP_ESPINUDP_NON_IKE: 259 + esph = esp6_output_udp_encap(skb, encap_type, esp, sport, dport); 260 + break; 261 + } 262 + 263 + if (IS_ERR(esph)) 264 + return PTR_ERR(esph); 265 + 266 + esp->esph = esph; 267 + 268 + return 0; 269 + } 270 + 246 271 int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) 247 272 { 248 273 u8 *tail; 249 274 u8 *vaddr; 250 275 int nfrags; 276 + int esph_offset; 251 277 struct page *page; 252 278 struct sk_buff *trailer; 253 279 int tailen = esp->tailen; 280 + 281 + if (x->encap) { 282 + int err = esp6_output_encap(x, skb, esp); 283 + 284 + if (err < 0) 285 + return err; 286 + } 254 287 255 288 if (!skb_cloned(skb)) { 256 289 if (tailen <= skb_tailroom(skb)) { ··· 379 274 } 380 275 381 276 cow: 277 + esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb); 278 + 382 279 nfrags = skb_cow_data(skb, tailen, &trailer); 383 280 if (nfrags < 0) 384 281 goto out; 385 282 tail = skb_tail_pointer(trailer); 283 + esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset); 386 284 387 285 skip_cow: 388 286 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); ··· 403 295 void *tmp; 404 296 int ivlen; 405 297 int assoclen; 406 - int seqhilen; 407 - __be32 *seqhi; 298 + int extralen; 408 299 struct page *page; 409 300 struct ip_esp_hdr *esph; 410 301 struct aead_request *req; 411 302 struct crypto_aead *aead; 412 303 struct scatterlist *sg, *dsg; 304 + struct esp_output_extra *extra; 413 305 int err = -ENOMEM; 414 306 415 307 assoclen = sizeof(struct ip_esp_hdr); 416 - seqhilen = 0; 308 + extralen = 0; 417 309 418 310 if (x->props.flags & XFRM_STATE_ESN) { 419 - seqhilen += sizeof(__be32); 311 + extralen += sizeof(*extra); 420 312 assoclen += sizeof(__be32); 421 313 } 422 314 ··· 424 316 alen = crypto_aead_authsize(aead); 425 317 ivlen = crypto_aead_ivsize(aead); 426 318 427 - tmp = esp_alloc_tmp(aead, esp->nfrags + 2, seqhilen); 319 + tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen); 428 320 if (!tmp) 429 321 goto error; 430 322 431 - seqhi = esp_tmp_seqhi(tmp); 432 - iv = esp_tmp_iv(aead, tmp, seqhilen); 323 + extra = esp_tmp_extra(tmp); 324 + iv = esp_tmp_iv(aead, tmp, extralen); 433 325 req = esp_tmp_req(aead, iv); 434 326 sg = esp_req_sg(aead, req); 435 327 ··· 438 330 else 439 331 dsg = &sg[esp->nfrags]; 440 332 441 - esph = esp_output_set_esn(skb, x, ip_esp_hdr(skb), seqhi); 333 + esph = esp_output_set_esn(skb, x, esp->esph, extra); 334 + esp->esph = esph; 442 335 443 336 sg_init_table(sg, esp->nfrags); 444 337 err = skb_to_sgvec(skb, sg, ··· 503 394 case 0: 504 395 if ((x->props.flags & XFRM_STATE_ESN)) 505 396 esp_output_restore_header(skb); 397 + esp_output_encap_csum(skb); 506 398 } 507 399 508 400 if (sg != dsg) ··· 548 438 esp.plen = esp.clen - skb->len - esp.tfclen; 549 439 esp.tailen = esp.tfclen + esp.plen + alen; 550 440 441 + esp.esph = ip_esp_hdr(skb); 442 + 551 443 esp.nfrags = esp6_output_head(x, skb, &esp); 552 444 if (esp.nfrags < 0) 553 445 return esp.nfrags; 554 446 555 - esph = ip_esp_hdr(skb); 447 + esph = esp.esph; 556 448 esph->spi = x->id.spi; 557 449 558 450 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); ··· 628 516 err = esp_remove_trailer(skb); 629 517 if (unlikely(err < 0)) 630 518 goto out; 519 + 520 + if (x->encap) { 521 + const struct ipv6hdr *ip6h = ipv6_hdr(skb); 522 + struct xfrm_encap_tmpl *encap = x->encap; 523 + struct udphdr *uh = (void *)(skb_network_header(skb) + hdr_len); 524 + __be16 source; 525 + 526 + switch (x->encap->encap_type) { 527 + case UDP_ENCAP_ESPINUDP: 528 + case UDP_ENCAP_ESPINUDP_NON_IKE: 529 + source = uh->source; 530 + break; 531 + default: 532 + WARN_ON_ONCE(1); 533 + err = -EINVAL; 534 + goto out; 535 + } 536 + 537 + /* 538 + * 1) if the NAT-T peer's IP or port changed then 539 + * advertize the change to the keying daemon. 540 + * This is an inbound SA, so just compare 541 + * SRC ports. 542 + */ 543 + if (!ipv6_addr_equal(&ip6h->saddr, &x->props.saddr.in6) || 544 + source != encap->encap_sport) { 545 + xfrm_address_t ipaddr; 546 + 547 + memcpy(&ipaddr.a6, &ip6h->saddr.s6_addr, sizeof(ipaddr.a6)); 548 + km_new_mapping(x, &ipaddr, source); 549 + 550 + /* XXX: perhaps add an extra 551 + * policy check here, to see 552 + * if we should allow or 553 + * reject a packet from a 554 + * different source 555 + * address/port. 556 + */ 557 + } 558 + 559 + /* 560 + * 2) ignore UDP/TCP checksums in case 561 + * of NAT-T in Transport Mode, or 562 + * perform other post-processing fixes 563 + * as per draft-ietf-ipsec-udp-encaps-06, 564 + * section 3.1.2 565 + */ 566 + if (x->props.mode == XFRM_MODE_TRANSPORT) 567 + skb->ip_summed = CHECKSUM_UNNECESSARY; 568 + } 631 569 632 570 skb_postpull_rcsum(skb, skb_network_header(skb), 633 571 skb_network_header_len(skb)); ··· 794 632 goto out; 795 633 796 634 ESP_SKB_CB(skb)->tmp = tmp; 797 - seqhi = esp_tmp_seqhi(tmp); 635 + seqhi = esp_tmp_extra(tmp); 798 636 iv = esp_tmp_iv(aead, tmp, seqhilen); 799 637 req = esp_tmp_req(aead, iv); 800 638 sg = esp_req_sg(aead, req); ··· 998 836 u32 align; 999 837 int err; 1000 838 1001 - if (x->encap) 1002 - return -EINVAL; 1003 - 1004 839 x->data = NULL; 1005 840 1006 841 if (x->aead) ··· 1026 867 break; 1027 868 } 1028 869 870 + if (x->encap) { 871 + struct xfrm_encap_tmpl *encap = x->encap; 872 + 873 + switch (encap->encap_type) { 874 + default: 875 + err = -EINVAL; 876 + goto error; 877 + case UDP_ENCAP_ESPINUDP: 878 + x->props.header_len += sizeof(struct udphdr); 879 + break; 880 + case UDP_ENCAP_ESPINUDP_NON_IKE: 881 + x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32); 882 + break; 883 + } 884 + } 885 + 1029 886 align = ALIGN(crypto_aead_blocksize(aead), 4); 1030 887 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead); 1031 888 ··· 1068 893 1069 894 static struct xfrm6_protocol esp6_protocol = { 1070 895 .handler = xfrm6_rcv, 896 + .input_handler = xfrm_input, 1071 897 .cb_handler = esp6_rcv_cb, 1072 898 .err_handler = esp6_err, 1073 899 .priority = 0,
+3 -4
net/ipv6/esp6_offload.c
··· 271 271 int alen; 272 272 int blksize; 273 273 struct xfrm_offload *xo; 274 - struct ip_esp_hdr *esph; 275 274 struct crypto_aead *aead; 276 275 struct esp_info esp; 277 276 bool hw_offload = true; ··· 311 312 312 313 seq = xo->seq.low; 313 314 314 - esph = ip_esp_hdr(skb); 315 - esph->spi = x->id.spi; 315 + esp.esph = ip_esp_hdr(skb); 316 + esp.esph->spi = x->id.spi; 316 317 317 318 skb_push(skb, -skb_network_offset(skb)); 318 319 319 320 if (xo->flags & XFRM_GSO_SEGMENT) { 320 - esph->seq_no = htonl(seq); 321 + esp.esph->seq_no = htonl(seq); 321 322 322 323 if (!skb_is_gso(skb)) 323 324 xo->seq.low++;
+16 -2
net/ipv6/ip6_vti.c
··· 296 296 dev_put(dev); 297 297 } 298 298 299 - static int vti6_rcv(struct sk_buff *skb) 299 + static int vti6_input_proto(struct sk_buff *skb, int nexthdr, __be32 spi, 300 + int encap_type) 300 301 { 301 302 struct ip6_tnl *t; 302 303 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); ··· 324 323 325 324 rcu_read_unlock(); 326 325 327 - return xfrm6_rcv_tnl(skb, t); 326 + XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t; 327 + XFRM_SPI_SKB_CB(skb)->family = AF_INET6; 328 + XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); 329 + return xfrm_input(skb, nexthdr, spi, encap_type); 328 330 } 329 331 rcu_read_unlock(); 330 332 return -EINVAL; 331 333 discard: 332 334 kfree_skb(skb); 333 335 return 0; 336 + } 337 + 338 + static int vti6_rcv(struct sk_buff *skb) 339 + { 340 + int nexthdr = skb_network_header(skb)[IP6CB(skb)->nhoff]; 341 + 342 + return vti6_input_proto(skb, nexthdr, 0, 0); 334 343 } 335 344 336 345 static int vti6_rcv_cb(struct sk_buff *skb, int err) ··· 1196 1185 1197 1186 static struct xfrm6_protocol vti_esp6_protocol __read_mostly = { 1198 1187 .handler = vti6_rcv, 1188 + .input_handler = vti6_input_proto, 1199 1189 .cb_handler = vti6_rcv_cb, 1200 1190 .err_handler = vti6_err, 1201 1191 .priority = 100, ··· 1204 1192 1205 1193 static struct xfrm6_protocol vti_ah6_protocol __read_mostly = { 1206 1194 .handler = vti6_rcv, 1195 + .input_handler = vti6_input_proto, 1207 1196 .cb_handler = vti6_rcv_cb, 1208 1197 .err_handler = vti6_err, 1209 1198 .priority = 100, ··· 1212 1199 1213 1200 static struct xfrm6_protocol vti_ipcomp6_protocol __read_mostly = { 1214 1201 .handler = vti6_rcv, 1202 + .input_handler = vti6_input_proto, 1215 1203 .cb_handler = vti6_rcv_cb, 1216 1204 .err_handler = vti6_err, 1217 1205 .priority = 100,
+1
net/ipv6/ipcomp6.c
··· 183 183 184 184 static struct xfrm6_protocol ipcomp6_protocol = { 185 185 .handler = xfrm6_rcv, 186 + .input_handler = xfrm_input, 186 187 .cb_handler = ipcomp6_rcv_cb, 187 188 .err_handler = ipcomp6_err, 188 189 .priority = 0,
+101 -5
net/ipv6/xfrm6_input.c
··· 35 35 static int xfrm6_transport_finish2(struct net *net, struct sock *sk, 36 36 struct sk_buff *skb) 37 37 { 38 - if (xfrm_trans_queue(skb, ip6_rcv_finish)) 39 - __kfree_skb(skb); 40 - return -1; 38 + if (xfrm_trans_queue(skb, ip6_rcv_finish)) { 39 + kfree_skb(skb); 40 + return NET_RX_DROP; 41 + } 42 + 43 + return 0; 41 44 } 42 45 43 46 int xfrm6_transport_finish(struct sk_buff *skb, int async) ··· 63 60 if (xo && (xo->flags & XFRM_GRO)) { 64 61 skb_mac_header_rebuild(skb); 65 62 skb_reset_transport_header(skb); 66 - return -1; 63 + return 0; 67 64 } 68 65 69 66 NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, 70 67 dev_net(skb->dev), NULL, skb, skb->dev, NULL, 71 68 xfrm6_transport_finish2); 72 - return -1; 69 + return 0; 70 + } 71 + 72 + /* If it's a keepalive packet, then just eat it. 73 + * If it's an encapsulated packet, then pass it to the 74 + * IPsec xfrm input. 75 + * Returns 0 if skb passed to xfrm or was dropped. 76 + * Returns >0 if skb should be passed to UDP. 77 + * Returns <0 if skb should be resubmitted (-ret is protocol) 78 + */ 79 + int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb) 80 + { 81 + struct udp_sock *up = udp_sk(sk); 82 + struct udphdr *uh; 83 + struct ipv6hdr *ip6h; 84 + int len; 85 + int ip6hlen = sizeof(struct ipv6hdr); 86 + 87 + __u8 *udpdata; 88 + __be32 *udpdata32; 89 + __u16 encap_type = up->encap_type; 90 + 91 + /* if this is not encapsulated socket, then just return now */ 92 + if (!encap_type) 93 + return 1; 94 + 95 + /* If this is a paged skb, make sure we pull up 96 + * whatever data we need to look at. */ 97 + len = skb->len - sizeof(struct udphdr); 98 + if (!pskb_may_pull(skb, sizeof(struct udphdr) + min(len, 8))) 99 + return 1; 100 + 101 + /* Now we can get the pointers */ 102 + uh = udp_hdr(skb); 103 + udpdata = (__u8 *)uh + sizeof(struct udphdr); 104 + udpdata32 = (__be32 *)udpdata; 105 + 106 + switch (encap_type) { 107 + default: 108 + case UDP_ENCAP_ESPINUDP: 109 + /* Check if this is a keepalive packet. If so, eat it. */ 110 + if (len == 1 && udpdata[0] == 0xff) { 111 + goto drop; 112 + } else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0) { 113 + /* ESP Packet without Non-ESP header */ 114 + len = sizeof(struct udphdr); 115 + } else 116 + /* Must be an IKE packet.. pass it through */ 117 + return 1; 118 + break; 119 + case UDP_ENCAP_ESPINUDP_NON_IKE: 120 + /* Check if this is a keepalive packet. If so, eat it. */ 121 + if (len == 1 && udpdata[0] == 0xff) { 122 + goto drop; 123 + } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) && 124 + udpdata32[0] == 0 && udpdata32[1] == 0) { 125 + 126 + /* ESP Packet with Non-IKE marker */ 127 + len = sizeof(struct udphdr) + 2 * sizeof(u32); 128 + } else 129 + /* Must be an IKE packet.. pass it through */ 130 + return 1; 131 + break; 132 + } 133 + 134 + /* At this point we are sure that this is an ESPinUDP packet, 135 + * so we need to remove 'len' bytes from the packet (the UDP 136 + * header and optional ESP marker bytes) and then modify the 137 + * protocol to ESP, and then call into the transform receiver. 138 + */ 139 + if (skb_unclone(skb, GFP_ATOMIC)) 140 + goto drop; 141 + 142 + /* Now we can update and verify the packet length... */ 143 + ip6h = ipv6_hdr(skb); 144 + ip6h->payload_len = htons(ntohs(ip6h->payload_len) - len); 145 + if (skb->len < ip6hlen + len) { 146 + /* packet is too small!?! */ 147 + goto drop; 148 + } 149 + 150 + /* pull the data buffer up to the ESP header and set the 151 + * transport header to point to ESP. Keep UDP on the stack 152 + * for later. 153 + */ 154 + __skb_pull(skb, len); 155 + skb_reset_transport_header(skb); 156 + 157 + /* process ESP */ 158 + return xfrm6_rcv_encap(skb, IPPROTO_ESP, 0, encap_type); 159 + 160 + drop: 161 + kfree_skb(skb); 162 + return 0; 73 163 } 74 164 75 165 int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t)
+48
net/ipv6/xfrm6_protocol.c
··· 14 14 #include <linux/mutex.h> 15 15 #include <linux/skbuff.h> 16 16 #include <linux/icmpv6.h> 17 + #include <net/ip6_route.h> 17 18 #include <net/ipv6.h> 18 19 #include <net/protocol.h> 19 20 #include <net/xfrm.h> ··· 58 57 59 58 return 0; 60 59 } 60 + 61 + int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, 62 + int encap_type) 63 + { 64 + int ret; 65 + struct xfrm6_protocol *handler; 66 + struct xfrm6_protocol __rcu **head = proto_handlers(nexthdr); 67 + 68 + XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL; 69 + XFRM_SPI_SKB_CB(skb)->family = AF_INET6; 70 + XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr); 71 + 72 + if (!head) 73 + goto out; 74 + 75 + if (!skb_dst(skb)) { 76 + const struct ipv6hdr *ip6h = ipv6_hdr(skb); 77 + int flags = RT6_LOOKUP_F_HAS_SADDR; 78 + struct dst_entry *dst; 79 + struct flowi6 fl6 = { 80 + .flowi6_iif = skb->dev->ifindex, 81 + .daddr = ip6h->daddr, 82 + .saddr = ip6h->saddr, 83 + .flowlabel = ip6_flowinfo(ip6h), 84 + .flowi6_mark = skb->mark, 85 + .flowi6_proto = ip6h->nexthdr, 86 + }; 87 + 88 + dst = ip6_route_input_lookup(dev_net(skb->dev), skb->dev, &fl6, 89 + skb, flags); 90 + if (dst->error) 91 + goto drop; 92 + skb_dst_set(skb, dst); 93 + } 94 + 95 + for_each_protocol_rcu(*head, handler) 96 + if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL) 97 + return ret; 98 + 99 + out: 100 + icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); 101 + 102 + drop: 103 + kfree_skb(skb); 104 + return 0; 105 + } 106 + EXPORT_SYMBOL(xfrm6_rcv_encap); 61 107 62 108 static int xfrm6_esp_rcv(struct sk_buff *skb) 63 109 {
+3
net/xfrm/xfrm_interface.c
··· 755 755 756 756 static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = { 757 757 .handler = xfrm6_rcv, 758 + .input_handler = xfrm_input, 758 759 .cb_handler = xfrmi_rcv_cb, 759 760 .err_handler = xfrmi6_err, 760 761 .priority = 10, ··· 763 762 764 763 static struct xfrm6_protocol xfrmi_ah6_protocol __read_mostly = { 765 764 .handler = xfrm6_rcv, 765 + .input_handler = xfrm_input, 766 766 .cb_handler = xfrmi_rcv_cb, 767 767 .err_handler = xfrmi6_err, 768 768 .priority = 10, ··· 771 769 772 770 static struct xfrm6_protocol xfrmi_ipcomp6_protocol __read_mostly = { 773 771 .handler = xfrm6_rcv, 772 + .input_handler = xfrm_input, 774 773 .cb_handler = xfrmi_rcv_cb, 775 774 .err_handler = xfrmi6_err, 776 775 .priority = 10,