Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ipvs: API change to avoid rescan of IPv6 exthdr

Reduce the number of times we scan/skip the IPv6 exthdrs.

This patch contains a lot of API changes. This is done, to avoid
repeating the scan of finding the IPv6 headers, via ipv6_find_hdr(),
which is called by ip_vs_fill_iph_skb().

Finding the IPv6 headers is done as early as possible, and passed on
as a pointer "struct ip_vs_iphdr *" to the affected functions.

This patch reduce/removes 19 calls to ip_vs_fill_iph_skb().

Notice, I have choosen, not to change the API of function
pointer "(*schedule)" (in struct ip_vs_scheduler) as it can be
used by external schedulers, via {un,}register_ip_vs_scheduler.
Only 4 out of 10 schedulers use info from ip_vs_iphdr*, and when
they do, they are only interested in iph->{s,d}addr.

Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Acked-by: Julian Anastasov <ja@ssi.bg>
Signed-off-by: Simon Horman <horms@verge.net.au>

authored by

Jesper Dangaard Brouer and committed by
Simon Horman
d4383f04 2f74713d

+175 -229
+41 -40
include/net/ip_vs.h
··· 487 487 488 488 int (*conn_schedule)(int af, struct sk_buff *skb, 489 489 struct ip_vs_proto_data *pd, 490 - int *verdict, struct ip_vs_conn **cpp); 490 + int *verdict, struct ip_vs_conn **cpp, 491 + struct ip_vs_iphdr *iph); 491 492 492 493 struct ip_vs_conn * 493 494 (*conn_in_get)(int af, 494 495 const struct sk_buff *skb, 495 496 const struct ip_vs_iphdr *iph, 496 - unsigned int proto_off, 497 497 int inverse); 498 498 499 499 struct ip_vs_conn * 500 500 (*conn_out_get)(int af, 501 501 const struct sk_buff *skb, 502 502 const struct ip_vs_iphdr *iph, 503 - unsigned int proto_off, 504 503 int inverse); 505 504 506 - int (*snat_handler)(struct sk_buff *skb, 507 - struct ip_vs_protocol *pp, struct ip_vs_conn *cp); 505 + int (*snat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, 506 + struct ip_vs_conn *cp, struct ip_vs_iphdr *iph); 508 507 509 - int (*dnat_handler)(struct sk_buff *skb, 510 - struct ip_vs_protocol *pp, struct ip_vs_conn *cp); 508 + int (*dnat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, 509 + struct ip_vs_conn *cp, struct ip_vs_iphdr *iph); 511 510 512 511 int (*csum_check)(int af, struct sk_buff *skb, 513 512 struct ip_vs_protocol *pp); ··· 606 607 NF_ACCEPT can be returned when destination is local. 607 608 */ 608 609 int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp, 609 - struct ip_vs_protocol *pp); 610 + struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 610 611 611 612 /* Note: we can group the following members into a structure, 612 613 in order to save more space, and the following members are ··· 857 858 858 859 struct ip_vs_conn * 859 860 (*conn_in_get)(const struct sk_buff *skb, struct ip_vs_app *app, 860 - const struct iphdr *iph, unsigned int proto_off, 861 - int inverse); 861 + const struct iphdr *iph, int inverse); 862 862 863 863 struct ip_vs_conn * 864 864 (*conn_out_get)(const struct sk_buff *skb, struct ip_vs_app *app, 865 - const struct iphdr *iph, unsigned int proto_off, 866 - int inverse); 865 + const struct iphdr *iph, int inverse); 867 866 868 867 int (*state_transition)(struct ip_vs_conn *cp, int direction, 869 868 const struct sk_buff *skb, ··· 1160 1163 1161 1164 struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb, 1162 1165 const struct ip_vs_iphdr *iph, 1163 - unsigned int proto_off, 1164 1166 int inverse); 1165 1167 1166 1168 struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p); 1167 1169 1168 1170 struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb, 1169 1171 const struct ip_vs_iphdr *iph, 1170 - unsigned int proto_off, 1171 1172 int inverse); 1172 1173 1173 1174 /* put back the conn without restarting its timer */ ··· 1338 1343 extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler); 1339 1344 extern struct ip_vs_conn * 1340 1345 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, 1341 - struct ip_vs_proto_data *pd, int *ignored); 1346 + struct ip_vs_proto_data *pd, int *ignored, 1347 + struct ip_vs_iphdr *iph); 1342 1348 extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, 1343 - struct ip_vs_proto_data *pd); 1349 + struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph); 1344 1350 1345 1351 extern void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg); 1346 1352 ··· 1400 1404 /* 1401 1405 * Various IPVS packet transmitters (from ip_vs_xmit.c) 1402 1406 */ 1403 - extern int ip_vs_null_xmit 1404 - (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp); 1405 - extern int ip_vs_bypass_xmit 1406 - (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp); 1407 - extern int ip_vs_nat_xmit 1408 - (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp); 1409 - extern int ip_vs_tunnel_xmit 1410 - (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp); 1411 - extern int ip_vs_dr_xmit 1412 - (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp); 1413 - extern int ip_vs_icmp_xmit 1414 - (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, 1415 - int offset, unsigned int hooknum); 1407 + extern int ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1408 + struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1409 + extern int ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1410 + struct ip_vs_protocol *pp, 1411 + struct ip_vs_iphdr *iph); 1412 + extern int ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1413 + struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1414 + extern int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1415 + struct ip_vs_protocol *pp, 1416 + struct ip_vs_iphdr *iph); 1417 + extern int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1418 + struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1419 + extern int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1420 + struct ip_vs_protocol *pp, int offset, 1421 + unsigned int hooknum, struct ip_vs_iphdr *iph); 1416 1422 extern void ip_vs_dst_reset(struct ip_vs_dest *dest); 1417 1423 1418 1424 #ifdef CONFIG_IP_VS_IPV6 1419 - extern int ip_vs_bypass_xmit_v6 1420 - (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp); 1421 - extern int ip_vs_nat_xmit_v6 1422 - (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp); 1423 - extern int ip_vs_tunnel_xmit_v6 1424 - (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp); 1425 - extern int ip_vs_dr_xmit_v6 1426 - (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp); 1427 - extern int ip_vs_icmp_xmit_v6 1428 - (struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, 1429 - int offset, unsigned int hooknum); 1425 + extern int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1426 + struct ip_vs_protocol *pp, 1427 + struct ip_vs_iphdr *iph); 1428 + extern int ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1429 + struct ip_vs_protocol *pp, 1430 + struct ip_vs_iphdr *iph); 1431 + extern int ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1432 + struct ip_vs_protocol *pp, 1433 + struct ip_vs_iphdr *iph); 1434 + extern int ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1435 + struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); 1436 + extern int ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1437 + struct ip_vs_protocol *pp, int offset, 1438 + unsigned int hooknum, struct ip_vs_iphdr *iph); 1430 1439 #endif 1431 1440 1432 1441 #ifdef CONFIG_SYSCTL
+6 -9
net/netfilter/ipvs/ip_vs_conn.c
··· 308 308 static int 309 309 ip_vs_conn_fill_param_proto(int af, const struct sk_buff *skb, 310 310 const struct ip_vs_iphdr *iph, 311 - unsigned int proto_off, int inverse, 312 - struct ip_vs_conn_param *p) 311 + int inverse, struct ip_vs_conn_param *p) 313 312 { 314 313 __be16 _ports[2], *pptr; 315 314 struct net *net = skb_net(skb); 316 315 317 - pptr = frag_safe_skb_hp(skb, proto_off, sizeof(_ports), _ports, iph); 316 + pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph); 318 317 if (pptr == NULL) 319 318 return 1; 320 319 ··· 328 329 329 330 struct ip_vs_conn * 330 331 ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb, 331 - const struct ip_vs_iphdr *iph, 332 - unsigned int proto_off, int inverse) 332 + const struct ip_vs_iphdr *iph, int inverse) 333 333 { 334 334 struct ip_vs_conn_param p; 335 335 336 - if (ip_vs_conn_fill_param_proto(af, skb, iph, proto_off, inverse, &p)) 336 + if (ip_vs_conn_fill_param_proto(af, skb, iph, inverse, &p)) 337 337 return NULL; 338 338 339 339 return ip_vs_conn_in_get(&p); ··· 430 432 431 433 struct ip_vs_conn * 432 434 ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb, 433 - const struct ip_vs_iphdr *iph, 434 - unsigned int proto_off, int inverse) 435 + const struct ip_vs_iphdr *iph, int inverse) 435 436 { 436 437 struct ip_vs_conn_param p; 437 438 438 - if (ip_vs_conn_fill_param_proto(af, skb, iph, proto_off, inverse, &p)) 439 + if (ip_vs_conn_fill_param_proto(af, skb, iph, inverse, &p)) 439 440 return NULL; 440 441 441 442 return ip_vs_conn_out_get(&p);
+51 -65
net/netfilter/ipvs/ip_vs_core.c
··· 222 222 */ 223 223 static struct ip_vs_conn * 224 224 ip_vs_sched_persist(struct ip_vs_service *svc, 225 - struct sk_buff *skb, 226 - __be16 src_port, __be16 dst_port, int *ignored) 225 + struct sk_buff *skb, __be16 src_port, __be16 dst_port, 226 + int *ignored, struct ip_vs_iphdr *iph) 227 227 { 228 228 struct ip_vs_conn *cp = NULL; 229 - struct ip_vs_iphdr iph; 230 229 struct ip_vs_dest *dest; 231 230 struct ip_vs_conn *ct; 232 231 __be16 dport = 0; /* destination port to forward */ ··· 235 236 union nf_inet_addr snet; /* source network of the client, 236 237 after masking */ 237 238 238 - ip_vs_fill_iph_skb(svc->af, skb, &iph); 239 - 240 239 /* Mask saddr with the netmask to adjust template granularity */ 241 240 #ifdef CONFIG_IP_VS_IPV6 242 241 if (svc->af == AF_INET6) 243 - ipv6_addr_prefix(&snet.in6, &iph.saddr.in6, svc->netmask); 242 + ipv6_addr_prefix(&snet.in6, &iph->saddr.in6, svc->netmask); 244 243 else 245 244 #endif 246 - snet.ip = iph.saddr.ip & svc->netmask; 245 + snet.ip = iph->saddr.ip & svc->netmask; 247 246 248 247 IP_VS_DBG_BUF(6, "p-schedule: src %s:%u dest %s:%u " 249 248 "mnet %s\n", 250 - IP_VS_DBG_ADDR(svc->af, &iph.saddr), ntohs(src_port), 251 - IP_VS_DBG_ADDR(svc->af, &iph.daddr), ntohs(dst_port), 249 + IP_VS_DBG_ADDR(svc->af, &iph->saddr), ntohs(src_port), 250 + IP_VS_DBG_ADDR(svc->af, &iph->daddr), ntohs(dst_port), 252 251 IP_VS_DBG_ADDR(svc->af, &snet)); 253 252 254 253 /* ··· 263 266 * is created for other persistent services. 264 267 */ 265 268 { 266 - int protocol = iph.protocol; 267 - const union nf_inet_addr *vaddr = &iph.daddr; 269 + int protocol = iph->protocol; 270 + const union nf_inet_addr *vaddr = &iph->daddr; 268 271 __be16 vport = 0; 269 272 270 273 if (dst_port == svc->port) { ··· 339 342 dport = dest->port; 340 343 341 344 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET 342 - && iph.protocol == IPPROTO_UDP)? 345 + && iph->protocol == IPPROTO_UDP) ? 343 346 IP_VS_CONN_F_ONE_PACKET : 0; 344 347 345 348 /* 346 349 * Create a new connection according to the template 347 350 */ 348 - ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, &iph.saddr, 349 - src_port, &iph.daddr, dst_port, &param); 351 + ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol, &iph->saddr, 352 + src_port, &iph->daddr, dst_port, &param); 350 353 351 354 cp = ip_vs_conn_new(&param, &dest->addr, dport, flags, dest, skb->mark); 352 355 if (cp == NULL) { ··· 389 392 */ 390 393 struct ip_vs_conn * 391 394 ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, 392 - struct ip_vs_proto_data *pd, int *ignored) 395 + struct ip_vs_proto_data *pd, int *ignored, 396 + struct ip_vs_iphdr *iph) 393 397 { 394 398 struct ip_vs_protocol *pp = pd->pp; 395 399 struct ip_vs_conn *cp = NULL; 396 - struct ip_vs_iphdr iph; 397 400 struct ip_vs_dest *dest; 398 401 __be16 _ports[2], *pptr; 399 402 unsigned int flags; 400 403 401 404 *ignored = 1; 402 - 403 405 /* 404 406 * IPv6 frags, only the first hit here. 405 407 */ 406 - ip_vs_fill_iph_skb(svc->af, skb, &iph); 407 - pptr = frag_safe_skb_hp(skb, iph.len, sizeof(_ports), _ports, &iph); 408 + pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph); 408 409 if (pptr == NULL) 409 410 return NULL; 410 411 ··· 422 427 * Do not schedule replies from local real server. 423 428 */ 424 429 if ((!skb->dev || skb->dev->flags & IFF_LOOPBACK) && 425 - (cp = pp->conn_in_get(svc->af, skb, &iph, iph.len, 1))) { 430 + (cp = pp->conn_in_get(svc->af, skb, iph, 1))) { 426 431 IP_VS_DBG_PKT(12, svc->af, pp, skb, 0, 427 432 "Not scheduling reply for existing connection"); 428 433 __ip_vs_conn_put(cp); ··· 433 438 * Persistent service 434 439 */ 435 440 if (svc->flags & IP_VS_SVC_F_PERSISTENT) 436 - return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored); 441 + return ip_vs_sched_persist(svc, skb, pptr[0], pptr[1], ignored, 442 + iph); 437 443 438 444 *ignored = 0; 439 445 ··· 456 460 } 457 461 458 462 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET 459 - && iph.protocol == IPPROTO_UDP)? 463 + && iph->protocol == IPPROTO_UDP) ? 460 464 IP_VS_CONN_F_ONE_PACKET : 0; 461 465 462 466 /* ··· 465 469 { 466 470 struct ip_vs_conn_param p; 467 471 468 - ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, 469 - &iph.saddr, pptr[0], &iph.daddr, pptr[1], 470 - &p); 472 + ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol, 473 + &iph->saddr, pptr[0], &iph->daddr, 474 + pptr[1], &p); 471 475 cp = ip_vs_conn_new(&p, &dest->addr, 472 476 dest->port ? dest->port : pptr[1], 473 477 flags, dest, skb->mark); ··· 496 500 * no destination is available for a new connection. 497 501 */ 498 502 int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, 499 - struct ip_vs_proto_data *pd) 503 + struct ip_vs_proto_data *pd, struct ip_vs_iphdr *iph) 500 504 { 501 505 __be16 _ports[2], *pptr; 502 - struct ip_vs_iphdr iph; 503 506 #ifdef CONFIG_SYSCTL 504 507 struct net *net; 505 508 struct netns_ipvs *ipvs; 506 509 int unicast; 507 510 #endif 508 511 509 - ip_vs_fill_iph_skb(svc->af, skb, &iph); 510 - pptr = frag_safe_skb_hp(skb, iph.len, sizeof(_ports), _ports, &iph); 512 + pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph); 511 513 if (pptr == NULL) { 512 514 ip_vs_service_put(svc); 513 515 return NF_DROP; ··· 516 522 517 523 #ifdef CONFIG_IP_VS_IPV6 518 524 if (svc->af == AF_INET6) 519 - unicast = ipv6_addr_type(&iph.daddr.in6) & IPV6_ADDR_UNICAST; 525 + unicast = ipv6_addr_type(&iph->daddr.in6) & IPV6_ADDR_UNICAST; 520 526 else 521 527 #endif 522 - unicast = (inet_addr_type(net, iph.daddr.ip) == RTN_UNICAST); 528 + unicast = (inet_addr_type(net, iph->daddr.ip) == RTN_UNICAST); 523 529 524 530 /* if it is fwmark-based service, the cache_bypass sysctl is up 525 531 and the destination is a non-local unicast, then create ··· 529 535 int ret; 530 536 struct ip_vs_conn *cp; 531 537 unsigned int flags = (svc->flags & IP_VS_SVC_F_ONEPACKET && 532 - iph.protocol == IPPROTO_UDP)? 538 + iph->protocol == IPPROTO_UDP) ? 533 539 IP_VS_CONN_F_ONE_PACKET : 0; 534 540 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } }; 535 541 ··· 539 545 IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__); 540 546 { 541 547 struct ip_vs_conn_param p; 542 - ip_vs_conn_fill_param(svc->net, svc->af, iph.protocol, 543 - &iph.saddr, pptr[0], 544 - &iph.daddr, pptr[1], &p); 548 + ip_vs_conn_fill_param(svc->net, svc->af, iph->protocol, 549 + &iph->saddr, pptr[0], 550 + &iph->daddr, pptr[1], &p); 545 551 cp = ip_vs_conn_new(&p, &daddr, 0, 546 552 IP_VS_CONN_F_BYPASS | flags, 547 553 NULL, skb->mark); ··· 556 562 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); 557 563 558 564 /* transmit the first SYN packet */ 559 - ret = cp->packet_xmit(skb, cp, pd->pp); 565 + ret = cp->packet_xmit(skb, cp, pd->pp, iph); 560 566 /* do not touch skb anymore */ 561 567 562 568 atomic_inc(&cp->in_pkts); ··· 902 908 ip_vs_fill_ip4hdr(cih, &ciph); 903 909 ciph.len += offset; 904 910 /* The embedded headers contain source and dest in reverse order */ 905 - cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1); 911 + cp = pp->conn_out_get(AF_INET, skb, &ciph, 1); 906 912 if (!cp) 907 913 return NF_ACCEPT; 908 914 ··· 913 919 914 920 #ifdef CONFIG_IP_VS_IPV6 915 921 static int ip_vs_out_icmp_v6(struct sk_buff *skb, int *related, 916 - unsigned int hooknum) 922 + unsigned int hooknum, struct ip_vs_iphdr *ipvsh) 917 923 { 918 924 struct icmp6hdr _icmph, *ic; 919 925 struct ipv6hdr _ip6h, *ip6h; /* The ip header contained within ICMP */ ··· 922 928 struct ip_vs_protocol *pp; 923 929 union nf_inet_addr snet; 924 930 unsigned int writable; 925 - 926 - struct ip_vs_iphdr ipvsh_stack; 927 - struct ip_vs_iphdr *ipvsh = &ipvsh_stack; 928 - ip_vs_fill_iph_skb(AF_INET6, skb, ipvsh); 929 931 930 932 *related = 1; 931 933 ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph, ipvsh); ··· 966 976 return NF_ACCEPT; 967 977 968 978 /* The embedded headers contain source and dest in reverse order */ 969 - cp = pp->conn_out_get(AF_INET6, skb, &ciph, ciph.len, 1); 979 + cp = pp->conn_out_get(AF_INET6, skb, &ciph, 1); 970 980 if (!cp) 971 981 return NF_ACCEPT; 972 982 ··· 1006 1016 */ 1007 1017 static unsigned int 1008 1018 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 1009 - struct ip_vs_conn *cp, int ihl) 1019 + struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) 1010 1020 { 1011 1021 struct ip_vs_protocol *pp = pd->pp; 1012 1022 1013 1023 IP_VS_DBG_PKT(11, af, pp, skb, 0, "Outgoing packet"); 1014 1024 1015 - if (!skb_make_writable(skb, ihl)) 1025 + if (!skb_make_writable(skb, iph->len)) 1016 1026 goto drop; 1017 1027 1018 1028 /* mangle the packet */ 1019 - if (pp->snat_handler && !pp->snat_handler(skb, pp, cp)) 1029 + if (pp->snat_handler && !pp->snat_handler(skb, pp, cp, iph)) 1020 1030 goto drop; 1021 1031 1022 1032 #ifdef CONFIG_IP_VS_IPV6 ··· 1115 1125 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) { 1116 1126 int related; 1117 1127 int verdict = ip_vs_out_icmp_v6(skb, &related, 1118 - hooknum); 1128 + hooknum, &iph); 1119 1129 1120 1130 if (related) 1121 1131 return verdict; ··· 1150 1160 /* 1151 1161 * Check if the packet belongs to an existing entry 1152 1162 */ 1153 - cp = pp->conn_out_get(af, skb, &iph, iph.len, 0); 1163 + cp = pp->conn_out_get(af, skb, &iph, 0); 1154 1164 1155 1165 if (likely(cp)) 1156 - return handle_response(af, skb, pd, cp, iph.len); 1166 + return handle_response(af, skb, pd, cp, &iph); 1157 1167 if (sysctl_nat_icmp_send(net) && 1158 1168 (pp->protocol == IPPROTO_TCP || 1159 1169 pp->protocol == IPPROTO_UDP || ··· 1365 1375 /* The embedded headers contain source and dest in reverse order. 1366 1376 * For IPIP this is error for request, not for reply. 1367 1377 */ 1368 - cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, ipip ? 0 : 1); 1378 + cp = pp->conn_in_get(AF_INET, skb, &ciph, ipip ? 0 : 1); 1369 1379 if (!cp) 1370 1380 return NF_ACCEPT; 1371 1381 ··· 1434 1444 ip_vs_in_stats(cp, skb); 1435 1445 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) 1436 1446 offset += 2 * sizeof(__u16); 1437 - verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum); 1447 + verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph); 1438 1448 1439 1449 out: 1440 1450 __ip_vs_conn_put(cp); ··· 1443 1453 } 1444 1454 1445 1455 #ifdef CONFIG_IP_VS_IPV6 1446 - static int 1447 - ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum) 1456 + static int ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, 1457 + unsigned int hooknum, struct ip_vs_iphdr *iph) 1448 1458 { 1449 1459 struct net *net = NULL; 1450 1460 struct ipv6hdr _ip6h, *ip6h; ··· 1454 1464 struct ip_vs_protocol *pp; 1455 1465 struct ip_vs_proto_data *pd; 1456 1466 unsigned int offs_ciph, writable, verdict; 1457 - 1458 - struct ip_vs_iphdr iph_stack; 1459 - struct ip_vs_iphdr *iph = &iph_stack; 1460 - ip_vs_fill_iph_skb(AF_INET6, skb, iph); 1461 1467 1462 1468 *related = 1; 1463 1469 ··· 1511 1525 /* The embedded headers contain source and dest in reverse order 1512 1526 * if not from localhost 1513 1527 */ 1514 - cp = pp->conn_in_get(AF_INET6, skb, &ciph, ciph.len, 1528 + cp = pp->conn_in_get(AF_INET6, skb, &ciph, 1515 1529 (hooknum == NF_INET_LOCAL_OUT) ? 0 : 1); 1516 1530 1517 1531 if (!cp) ··· 1532 1546 IPPROTO_SCTP == ciph.protocol) 1533 1547 writable += 2 * sizeof(__u16); /* Also mangle ports */ 1534 1548 1535 - verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, writable, hooknum); 1549 + verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, writable, hooknum, &ciph); 1536 1550 1537 1551 __ip_vs_conn_put(cp); 1538 1552 ··· 1602 1616 } 1603 1617 if (unlikely(iph.protocol == IPPROTO_ICMPV6)) { 1604 1618 int related; 1605 - int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum); 1619 + int verdict = ip_vs_in_icmp_v6(skb, &related, hooknum, 1620 + &iph); 1606 1621 1607 1622 if (related) 1608 1623 return verdict; ··· 1626 1639 /* 1627 1640 * Check if the packet belongs to an existing connection entry 1628 1641 */ 1629 - cp = pp->conn_in_get(af, skb, &iph, iph.len, 0); 1630 - 1642 + cp = pp->conn_in_get(af, skb, &iph, 0); 1631 1643 if (unlikely(!cp) && !iph.fragoffs) { 1632 1644 /* No (second) fragments need to enter here, as nf_defrag_ipv6 1633 1645 * replayed fragment zero will already have created the cp ··· 1634 1648 int v; 1635 1649 1636 1650 /* Schedule and create new connection entry into &cp */ 1637 - if (!pp->conn_schedule(af, skb, pd, &v, &cp)) 1651 + if (!pp->conn_schedule(af, skb, pd, &v, &cp, &iph)) 1638 1652 return v; 1639 1653 } 1640 1654 ··· 1672 1686 ip_vs_in_stats(cp, skb); 1673 1687 ip_vs_set_state(cp, IP_VS_DIR_INPUT, skb, pd); 1674 1688 if (cp->packet_xmit) 1675 - ret = cp->packet_xmit(skb, cp, pp); 1689 + ret = cp->packet_xmit(skb, cp, pp, &iph); 1676 1690 /* do not touch skb anymore */ 1677 1691 else { 1678 1692 IP_VS_DBG_RL("warning: packet_xmit is null"); ··· 1846 1860 if (!net_ipvs(net)->enable) 1847 1861 return NF_ACCEPT; 1848 1862 1849 - return ip_vs_in_icmp_v6(skb, &r, hooknum); 1863 + return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr); 1850 1864 } 1851 1865 #endif 1852 1866
+4 -5
net/netfilter/ipvs/ip_vs_proto_ah_esp.c
··· 57 57 58 58 static struct ip_vs_conn * 59 59 ah_esp_conn_in_get(int af, const struct sk_buff *skb, 60 - const struct ip_vs_iphdr *iph, unsigned int proto_off, 60 + const struct ip_vs_iphdr *iph, 61 61 int inverse) 62 62 { 63 63 struct ip_vs_conn *cp; ··· 85 85 86 86 static struct ip_vs_conn * 87 87 ah_esp_conn_out_get(int af, const struct sk_buff *skb, 88 - const struct ip_vs_iphdr *iph, 89 - unsigned int proto_off, 90 - int inverse) 88 + const struct ip_vs_iphdr *iph, int inverse) 91 89 { 92 90 struct ip_vs_conn *cp; 93 91 struct ip_vs_conn_param p; ··· 108 110 109 111 static int 110 112 ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 111 - int *verdict, struct ip_vs_conn **cpp) 113 + int *verdict, struct ip_vs_conn **cpp, 114 + struct ip_vs_iphdr *iph) 112 115 { 113 116 /* 114 117 * AH/ESP is only related traffic. Pass the packet to IP stack.
+16 -26
net/netfilter/ipvs/ip_vs_proto_sctp.c
··· 10 10 11 11 static int 12 12 sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 13 - int *verdict, struct ip_vs_conn **cpp) 13 + int *verdict, struct ip_vs_conn **cpp, 14 + struct ip_vs_iphdr *iph) 14 15 { 15 16 struct net *net; 16 17 struct ip_vs_service *svc; 17 18 sctp_chunkhdr_t _schunkh, *sch; 18 19 sctp_sctphdr_t *sh, _sctph; 19 - struct ip_vs_iphdr iph; 20 20 21 - ip_vs_fill_iph_skb(af, skb, &iph); 22 - 23 - sh = skb_header_pointer(skb, iph.len, sizeof(_sctph), &_sctph); 21 + sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph); 24 22 if (sh == NULL) 25 23 return 0; 26 24 27 - sch = skb_header_pointer(skb, iph.len + sizeof(sctp_sctphdr_t), 25 + sch = skb_header_pointer(skb, iph->len + sizeof(sctp_sctphdr_t), 28 26 sizeof(_schunkh), &_schunkh); 29 27 if (sch == NULL) 30 28 return 0; 31 29 net = skb_net(skb); 32 30 if ((sch->type == SCTP_CID_INIT) && 33 - (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol, 34 - &iph.daddr, sh->dest))) { 31 + (svc = ip_vs_service_get(net, af, skb->mark, iph->protocol, 32 + &iph->daddr, sh->dest))) { 35 33 int ignored; 36 34 37 35 if (ip_vs_todrop(net_ipvs(net))) { ··· 45 47 * Let the virtual server select a real server for the 46 48 * incoming connection, and create a connection entry. 47 49 */ 48 - *cpp = ip_vs_schedule(svc, skb, pd, &ignored); 50 + *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); 49 51 if (!*cpp && ignored <= 0) { 50 52 if (!ignored) 51 - *verdict = ip_vs_leave(svc, skb, pd); 53 + *verdict = ip_vs_leave(svc, skb, pd, iph); 52 54 else { 53 55 ip_vs_service_put(svc); 54 56 *verdict = NF_DROP; ··· 62 64 } 63 65 64 66 static int 65 - sctp_snat_handler(struct sk_buff *skb, 66 - struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 67 + sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, 68 + struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) 67 69 { 68 70 sctp_sctphdr_t *sctph; 69 - unsigned int sctphoff; 71 + unsigned int sctphoff = iph->len; 70 72 struct sk_buff *iter; 71 73 __be32 crc32; 72 74 73 - struct ip_vs_iphdr iph; 74 - ip_vs_fill_iph_skb(cp->af, skb, &iph); 75 - sctphoff = iph.len; 76 - 77 75 #ifdef CONFIG_IP_VS_IPV6 78 - if (cp->af == AF_INET6 && iph.fragoffs) 76 + if (cp->af == AF_INET6 && iph->fragoffs) 79 77 return 1; 80 78 #endif 81 79 ··· 104 110 } 105 111 106 112 static int 107 - sctp_dnat_handler(struct sk_buff *skb, 108 - struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 113 + sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, 114 + struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) 109 115 { 110 116 sctp_sctphdr_t *sctph; 111 - unsigned int sctphoff; 117 + unsigned int sctphoff = iph->len; 112 118 struct sk_buff *iter; 113 119 __be32 crc32; 114 120 115 - struct ip_vs_iphdr iph; 116 - ip_vs_fill_iph_skb(cp->af, skb, &iph); 117 - sctphoff = iph.len; 118 - 119 121 #ifdef CONFIG_IP_VS_IPV6 120 - if (cp->af == AF_INET6 && iph.fragoffs) 122 + if (cp->af == AF_INET6 && iph->fragoffs) 121 123 return 1; 122 124 #endif 123 125
+15 -25
net/netfilter/ipvs/ip_vs_proto_tcp.c
··· 33 33 34 34 static int 35 35 tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 36 - int *verdict, struct ip_vs_conn **cpp) 36 + int *verdict, struct ip_vs_conn **cpp, 37 + struct ip_vs_iphdr *iph) 37 38 { 38 39 struct net *net; 39 40 struct ip_vs_service *svc; 40 41 struct tcphdr _tcph, *th; 41 - struct ip_vs_iphdr iph; 42 42 43 - ip_vs_fill_iph_skb(af, skb, &iph); 44 - 45 - th = skb_header_pointer(skb, iph.len, sizeof(_tcph), &_tcph); 43 + th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph); 46 44 if (th == NULL) { 47 45 *verdict = NF_DROP; 48 46 return 0; ··· 48 50 net = skb_net(skb); 49 51 /* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */ 50 52 if (th->syn && 51 - (svc = ip_vs_service_get(net, af, skb->mark, iph.protocol, 52 - &iph.daddr, th->dest))) { 53 + (svc = ip_vs_service_get(net, af, skb->mark, iph->protocol, 54 + &iph->daddr, th->dest))) { 53 55 int ignored; 54 56 55 57 if (ip_vs_todrop(net_ipvs(net))) { ··· 66 68 * Let the virtual server select a real server for the 67 69 * incoming connection, and create a connection entry. 68 70 */ 69 - *cpp = ip_vs_schedule(svc, skb, pd, &ignored); 71 + *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); 70 72 if (!*cpp && ignored <= 0) { 71 73 if (!ignored) 72 - *verdict = ip_vs_leave(svc, skb, pd); 74 + *verdict = ip_vs_leave(svc, skb, pd, iph); 73 75 else { 74 76 ip_vs_service_put(svc); 75 77 *verdict = NF_DROP; ··· 126 128 127 129 128 130 static int 129 - tcp_snat_handler(struct sk_buff *skb, 130 - struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 131 + tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, 132 + struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) 131 133 { 132 134 struct tcphdr *tcph; 133 - unsigned int tcphoff; 135 + unsigned int tcphoff = iph->len; 134 136 int oldlen; 135 137 int payload_csum = 0; 136 138 137 - struct ip_vs_iphdr iph; 138 - ip_vs_fill_iph_skb(cp->af, skb, &iph); 139 - tcphoff = iph.len; 140 - 141 139 #ifdef CONFIG_IP_VS_IPV6 142 - if (cp->af == AF_INET6 && iph.fragoffs) 140 + if (cp->af == AF_INET6 && iph->fragoffs) 143 141 return 1; 144 142 #endif 145 143 oldlen = skb->len - tcphoff; ··· 204 210 205 211 206 212 static int 207 - tcp_dnat_handler(struct sk_buff *skb, 208 - struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 213 + tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, 214 + struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) 209 215 { 210 216 struct tcphdr *tcph; 211 - unsigned int tcphoff; 217 + unsigned int tcphoff = iph->len; 212 218 int oldlen; 213 219 int payload_csum = 0; 214 220 215 - struct ip_vs_iphdr iph; 216 - ip_vs_fill_iph_skb(cp->af, skb, &iph); 217 - tcphoff = iph.len; 218 - 219 221 #ifdef CONFIG_IP_VS_IPV6 220 - if (cp->af == AF_INET6 && iph.fragoffs) 222 + if (cp->af == AF_INET6 && iph->fragoffs) 221 223 return 1; 222 224 #endif 223 225 oldlen = skb->len - tcphoff;
+16 -25
net/netfilter/ipvs/ip_vs_proto_udp.c
··· 30 30 31 31 static int 32 32 udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd, 33 - int *verdict, struct ip_vs_conn **cpp) 33 + int *verdict, struct ip_vs_conn **cpp, 34 + struct ip_vs_iphdr *iph) 34 35 { 35 36 struct net *net; 36 37 struct ip_vs_service *svc; 37 38 struct udphdr _udph, *uh; 38 - struct ip_vs_iphdr iph; 39 39 40 - ip_vs_fill_iph_skb(af, skb, &iph); 41 - 42 - uh = skb_header_pointer(skb, iph.len, sizeof(_udph), &_udph); 40 + /* IPv6 fragments, only first fragment will hit this */ 41 + uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph); 43 42 if (uh == NULL) { 44 43 *verdict = NF_DROP; 45 44 return 0; 46 45 } 47 46 net = skb_net(skb); 48 - svc = ip_vs_service_get(net, af, skb->mark, iph.protocol, 49 - &iph.daddr, uh->dest); 47 + svc = ip_vs_service_get(net, af, skb->mark, iph->protocol, 48 + &iph->daddr, uh->dest); 50 49 if (svc) { 51 50 int ignored; 52 51 ··· 63 64 * Let the virtual server select a real server for the 64 65 * incoming connection, and create a connection entry. 65 66 */ 66 - *cpp = ip_vs_schedule(svc, skb, pd, &ignored); 67 + *cpp = ip_vs_schedule(svc, skb, pd, &ignored, iph); 67 68 if (!*cpp && ignored <= 0) { 68 69 if (!ignored) 69 - *verdict = ip_vs_leave(svc, skb, pd); 70 + *verdict = ip_vs_leave(svc, skb, pd, iph); 70 71 else { 71 72 ip_vs_service_put(svc); 72 73 *verdict = NF_DROP; ··· 124 125 125 126 126 127 static int 127 - udp_snat_handler(struct sk_buff *skb, 128 - struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 128 + udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, 129 + struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) 129 130 { 130 131 struct udphdr *udph; 131 - unsigned int udphoff; 132 + unsigned int udphoff = iph->len; 132 133 int oldlen; 133 134 int payload_csum = 0; 134 135 135 - struct ip_vs_iphdr iph; 136 - ip_vs_fill_iph_skb(cp->af, skb, &iph); 137 - udphoff = iph.len; 138 - 139 136 #ifdef CONFIG_IP_VS_IPV6 140 - if (cp->af == AF_INET6 && iph.fragoffs) 137 + if (cp->af == AF_INET6 && iph->fragoffs) 141 138 return 1; 142 139 #endif 143 140 oldlen = skb->len - udphoff; ··· 207 212 208 213 209 214 static int 210 - udp_dnat_handler(struct sk_buff *skb, 211 - struct ip_vs_protocol *pp, struct ip_vs_conn *cp) 215 + udp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, 216 + struct ip_vs_conn *cp, struct ip_vs_iphdr *iph) 212 217 { 213 218 struct udphdr *udph; 214 - unsigned int udphoff; 219 + unsigned int udphoff = iph->len; 215 220 int oldlen; 216 221 int payload_csum = 0; 217 222 218 - struct ip_vs_iphdr iph; 219 - ip_vs_fill_iph_skb(cp->af, skb, &iph); 220 - udphoff = iph.len; 221 - 222 223 #ifdef CONFIG_IP_VS_IPV6 223 - if (cp->af == AF_INET6 && iph.fragoffs) 224 + if (cp->af == AF_INET6 && iph->fragoffs) 224 225 return 1; 225 226 #endif 226 227 oldlen = skb->len - udphoff;
+25 -33
net/netfilter/ipvs/ip_vs_xmit.c
··· 424 424 */ 425 425 int 426 426 ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 427 - struct ip_vs_protocol *pp) 427 + struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) 428 428 { 429 429 /* we do not touch skb and do not need pskb ptr */ 430 430 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1); ··· 438 438 */ 439 439 int 440 440 ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 441 - struct ip_vs_protocol *pp) 441 + struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) 442 442 { 443 443 struct rtable *rt; /* Route to the other host */ 444 444 struct iphdr *iph = ip_hdr(skb); ··· 493 493 #ifdef CONFIG_IP_VS_IPV6 494 494 int 495 495 ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 496 - struct ip_vs_protocol *pp) 496 + struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph) 497 497 { 498 498 struct rt6_info *rt; /* Route to the other host */ 499 - struct ip_vs_iphdr iph; 500 499 int mtu; 501 500 502 501 EnterFunction(10); 503 - ip_vs_fill_iph_skb(cp->af, skb, &iph); 504 502 505 - rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph.daddr.in6, NULL, 0, 503 + rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr.in6, NULL, 0, 506 504 IP_VS_RT_MODE_NON_LOCAL); 507 505 if (!rt) 508 506 goto tx_error_icmp; ··· 514 516 skb->dev = net->loopback_dev; 515 517 } 516 518 /* only send ICMP too big on first fragment */ 517 - if (!iph.fragoffs) 519 + if (!iph->fragoffs) 518 520 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 519 521 dst_release(&rt->dst); 520 522 IP_VS_DBG_RL("%s(): frag needed\n", __func__); ··· 558 560 */ 559 561 int 560 562 ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 561 - struct ip_vs_protocol *pp) 563 + struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) 562 564 { 563 565 struct rtable *rt; /* Route to the other host */ 564 566 int mtu; ··· 628 630 goto tx_error_put; 629 631 630 632 /* mangle the packet */ 631 - if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) 633 + if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh)) 632 634 goto tx_error_put; 633 635 ip_hdr(skb)->daddr = cp->daddr.ip; 634 636 ip_send_check(ip_hdr(skb)); ··· 676 678 #ifdef CONFIG_IP_VS_IPV6 677 679 int 678 680 ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 679 - struct ip_vs_protocol *pp) 681 + struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph) 680 682 { 681 683 struct rt6_info *rt; /* Route to the other host */ 682 684 int mtu; 683 685 int local; 684 - struct ip_vs_iphdr iph; 685 686 686 687 EnterFunction(10); 687 - ip_vs_fill_iph_skb(cp->af, skb, &iph); 688 688 689 689 /* check if it is a connection of no-client-port */ 690 - if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT && !iph.fragoffs)) { 690 + if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT && !iph->fragoffs)) { 691 691 __be16 _pt, *p; 692 - p = skb_header_pointer(skb, iph.len, sizeof(_pt), &_pt); 692 + p = skb_header_pointer(skb, iph->len, sizeof(_pt), &_pt); 693 693 if (p == NULL) 694 694 goto tx_error; 695 695 ip_vs_conn_fill_cport(cp, *p); ··· 736 740 skb->dev = net->loopback_dev; 737 741 } 738 742 /* only send ICMP too big on first fragment */ 739 - if (!iph.fragoffs) 743 + if (!iph->fragoffs) 740 744 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 741 745 IP_VS_DBG_RL_PKT(0, AF_INET6, pp, skb, 0, 742 746 "ip_vs_nat_xmit_v6(): frag needed for"); ··· 751 755 goto tx_error_put; 752 756 753 757 /* mangle the packet */ 754 - if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) 758 + if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, iph)) 755 759 goto tx_error; 756 760 ipv6_hdr(skb)->daddr = cp->daddr.in6; 757 761 ··· 812 816 */ 813 817 int 814 818 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 815 - struct ip_vs_protocol *pp) 819 + struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) 816 820 { 817 821 struct netns_ipvs *ipvs = net_ipvs(skb_net(skb)); 818 822 struct rtable *rt; /* Route to the other host */ ··· 932 936 #ifdef CONFIG_IP_VS_IPV6 933 937 int 934 938 ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 935 - struct ip_vs_protocol *pp) 939 + struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) 936 940 { 937 941 struct rt6_info *rt; /* Route to the other host */ 938 942 struct in6_addr saddr; /* Source for tunnel */ ··· 942 946 unsigned int max_headroom; /* The extra header space needed */ 943 947 int mtu; 944 948 int ret; 945 - struct ip_vs_iphdr ipvsh; 946 949 947 950 EnterFunction(10); 948 - ip_vs_fill_iph_skb(cp->af, skb, &ipvsh); 949 951 950 952 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, 951 953 &saddr, 1, (IP_VS_RT_MODE_LOCAL | ··· 973 979 skb->dev = net->loopback_dev; 974 980 } 975 981 /* only send ICMP too big on first fragment */ 976 - if (!ipvsh.fragoffs) 982 + if (!ipvsh->fragoffs) 977 983 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 978 984 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 979 985 goto tx_error_put; ··· 1055 1061 */ 1056 1062 int 1057 1063 ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1058 - struct ip_vs_protocol *pp) 1064 + struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) 1059 1065 { 1060 1066 struct rtable *rt; /* Route to the other host */ 1061 1067 struct iphdr *iph = ip_hdr(skb); ··· 1116 1122 #ifdef CONFIG_IP_VS_IPV6 1117 1123 int 1118 1124 ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1119 - struct ip_vs_protocol *pp) 1125 + struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph) 1120 1126 { 1121 1127 struct rt6_info *rt; /* Route to the other host */ 1122 1128 int mtu; 1123 - struct ip_vs_iphdr iph; 1124 1129 1125 1130 EnterFunction(10); 1126 - ip_vs_fill_iph_skb(cp->af, skb, &iph); 1127 1131 1128 1132 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL, 1129 1133 0, (IP_VS_RT_MODE_LOCAL | ··· 1141 1149 skb->dev = net->loopback_dev; 1142 1150 } 1143 1151 /* only send ICMP too big on first fragment */ 1144 - if (!iph.fragoffs) 1152 + if (!iph->fragoffs) 1145 1153 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1146 1154 dst_release(&rt->dst); 1147 1155 IP_VS_DBG_RL("%s(): frag needed\n", __func__); ··· 1186 1194 */ 1187 1195 int 1188 1196 ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1189 - struct ip_vs_protocol *pp, int offset, unsigned int hooknum) 1197 + struct ip_vs_protocol *pp, int offset, unsigned int hooknum, 1198 + struct ip_vs_iphdr *iph) 1190 1199 { 1191 1200 struct rtable *rt; /* Route to the other host */ 1192 1201 int mtu; ··· 1202 1209 translate address/port back */ 1203 1210 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) { 1204 1211 if (cp->packet_xmit) 1205 - rc = cp->packet_xmit(skb, cp, pp); 1212 + rc = cp->packet_xmit(skb, cp, pp, iph); 1206 1213 else 1207 1214 rc = NF_ACCEPT; 1208 1215 /* do not touch skb anymore */ ··· 1308 1315 #ifdef CONFIG_IP_VS_IPV6 1309 1316 int 1310 1317 ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1311 - struct ip_vs_protocol *pp, int offset, unsigned int hooknum) 1318 + struct ip_vs_protocol *pp, int offset, unsigned int hooknum, 1319 + struct ip_vs_iphdr *iph) 1312 1320 { 1313 1321 struct rt6_info *rt; /* Route to the other host */ 1314 1322 int mtu; 1315 1323 int rc; 1316 1324 int local; 1317 1325 int rt_mode; 1318 - struct ip_vs_iphdr iph; 1319 1326 1320 1327 EnterFunction(10); 1321 - ip_vs_fill_iph_skb(cp->af, skb, &iph); 1322 1328 1323 1329 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be 1324 1330 forwarded directly here, because there is no need to 1325 1331 translate address/port back */ 1326 1332 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) { 1327 1333 if (cp->packet_xmit) 1328 - rc = cp->packet_xmit(skb, cp, pp); 1334 + rc = cp->packet_xmit(skb, cp, pp, iph); 1329 1335 else 1330 1336 rc = NF_ACCEPT; 1331 1337 /* do not touch skb anymore */ ··· 1381 1389 skb->dev = net->loopback_dev; 1382 1390 } 1383 1391 /* only send ICMP too big on first fragment */ 1384 - if (!iph.fragoffs) 1392 + if (!iph->fragoffs) 1385 1393 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 1386 1394 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 1387 1395 goto tx_error_put;
+1 -1
net/netfilter/xt_ipvs.c
··· 85 85 /* 86 86 * Check if the packet belongs to an existing entry 87 87 */ 88 - cp = pp->conn_out_get(family, skb, &iph, iph.len, 1 /* inverse */); 88 + cp = pp->conn_out_get(family, skb, &iph, 1 /* inverse */); 89 89 if (unlikely(cp == NULL)) { 90 90 match = false; 91 91 goto out;