Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

inet: includes a sock_common in request_sock

TCP listener refactoring, part 5 :

We want to be able to insert request sockets (SYN_RECV) into main
ehash table instead of the per listener hash table to allow RCU
lookups and remove listener lock contention.

This patch includes the needed struct sock_common in front
of struct request_sock

This means there is no more inet6_request_sock IPv6 specific
structure.

Following inet_request_sock fields were renamed as they became
macros to reference fields from struct sock_common.
Prefix ir_ was chosen to avoid name collisions.

loc_port -> ir_loc_port
loc_addr -> ir_loc_addr
rmt_addr -> ir_rmt_addr
rmt_port -> ir_rmt_port
iif -> ir_iif

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric Dumazet and committed by
David S. Miller
634fb979 8a29111c

+169 -188
+2 -24
include/linux/ipv6.h
··· 115 115 return IP6CB(skb)->iif; 116 116 } 117 117 118 - struct inet6_request_sock { 119 - struct in6_addr loc_addr; 120 - struct in6_addr rmt_addr; 121 - struct sk_buff *pktopts; 122 - int iif; 123 - }; 124 - 125 118 struct tcp6_request_sock { 126 119 struct tcp_request_sock tcp6rsk_tcp; 127 - struct inet6_request_sock tcp6rsk_inet6; 128 120 }; 129 121 130 122 struct ipv6_mc_socklist; ··· 256 264 return inet_sk(__sk)->pinet6; 257 265 } 258 266 259 - static inline struct inet6_request_sock * 260 - inet6_rsk(const struct request_sock *rsk) 261 - { 262 - return (struct inet6_request_sock *)(((u8 *)rsk) + 263 - inet_rsk(rsk)->inet6_rsk_offset); 264 - } 265 - 266 - static inline u32 inet6_rsk_offset(struct request_sock *rsk) 267 - { 268 - return rsk->rsk_ops->obj_size - sizeof(struct inet6_request_sock); 269 - } 270 - 271 267 static inline struct request_sock *inet6_reqsk_alloc(struct request_sock_ops *ops) 272 268 { 273 269 struct request_sock *req = reqsk_alloc(ops); 274 270 275 - if (req != NULL) { 276 - inet_rsk(req)->inet6_rsk_offset = inet6_rsk_offset(req); 277 - inet6_rsk(req)->pktopts = NULL; 278 - } 271 + if (req) 272 + inet_rsk(req)->pktopts = NULL; 279 273 280 274 return req; 281 275 }
+9 -7
include/net/inet_sock.h
··· 70 70 71 71 struct inet_request_sock { 72 72 struct request_sock req; 73 - #if IS_ENABLED(CONFIG_IPV6) 74 - u16 inet6_rsk_offset; 75 - #endif 76 - __be16 loc_port; 77 - __be32 loc_addr; 78 - __be32 rmt_addr; 79 - __be16 rmt_port; 73 + #define ir_loc_addr req.__req_common.skc_rcv_saddr 74 + #define ir_rmt_addr req.__req_common.skc_daddr 75 + #define ir_loc_port req.__req_common.skc_num 76 + #define ir_rmt_port req.__req_common.skc_dport 77 + #define ir_v6_rmt_addr req.__req_common.skc_v6_daddr 78 + #define ir_v6_loc_addr req.__req_common.skc_v6_rcv_saddr 79 + #define ir_iif req.__req_common.skc_bound_dev_if 80 + 80 81 kmemcheck_bitfield_begin(flags); 81 82 u16 snd_wscale : 4, 82 83 rcv_wscale : 4, ··· 89 88 no_srccheck: 1; 90 89 kmemcheck_bitfield_end(flags); 91 90 struct ip_options_rcu *opt; 91 + struct sk_buff *pktopts; 92 92 }; 93 93 94 94 static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
+1
include/net/request_sock.h
··· 48 48 /* struct request_sock - mini sock to represent a connection request 49 49 */ 50 50 struct request_sock { 51 + struct sock_common __req_common; 51 52 struct request_sock *dl_next; 52 53 u16 mss; 53 54 u8 num_retrans; /* number of retransmits */
+2 -2
include/net/tcp.h
··· 1109 1109 ireq->wscale_ok = rx_opt->wscale_ok; 1110 1110 ireq->acked = 0; 1111 1111 ireq->ecn_ok = 0; 1112 - ireq->rmt_port = tcp_hdr(skb)->source; 1113 - ireq->loc_port = tcp_hdr(skb)->dest; 1112 + ireq->ir_rmt_port = tcp_hdr(skb)->source; 1113 + ireq->ir_loc_port = tcp_hdr(skb)->dest; 1114 1114 } 1115 1115 1116 1116 void tcp_enter_memory_pressure(struct sock *sk);
+9 -9
net/dccp/ipv4.c
··· 409 409 410 410 newinet = inet_sk(newsk); 411 411 ireq = inet_rsk(req); 412 - newinet->inet_daddr = ireq->rmt_addr; 413 - newinet->inet_rcv_saddr = ireq->loc_addr; 414 - newinet->inet_saddr = ireq->loc_addr; 412 + newinet->inet_daddr = ireq->ir_rmt_addr; 413 + newinet->inet_rcv_saddr = ireq->ir_loc_addr; 414 + newinet->inet_saddr = ireq->ir_loc_addr; 415 415 newinet->inet_opt = ireq->opt; 416 416 ireq->opt = NULL; 417 417 newinet->mc_index = inet_iif(skb); ··· 516 516 const struct inet_request_sock *ireq = inet_rsk(req); 517 517 struct dccp_hdr *dh = dccp_hdr(skb); 518 518 519 - dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->loc_addr, 520 - ireq->rmt_addr); 521 - err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, 522 - ireq->rmt_addr, 519 + dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr, 520 + ireq->ir_rmt_addr); 521 + err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 522 + ireq->ir_rmt_addr, 523 523 ireq->opt); 524 524 err = net_xmit_eval(err); 525 525 } ··· 641 641 goto drop_and_free; 642 642 643 643 ireq = inet_rsk(req); 644 - ireq->loc_addr = ip_hdr(skb)->daddr; 645 - ireq->rmt_addr = ip_hdr(skb)->saddr; 644 + ireq->ir_loc_addr = ip_hdr(skb)->daddr; 645 + ireq->ir_rmt_addr = ip_hdr(skb)->saddr; 646 646 647 647 /* 648 648 * Step 3: Process LISTEN state
+31 -32
net/dccp/ipv6.c
··· 216 216 217 217 static int dccp_v6_send_response(struct sock *sk, struct request_sock *req) 218 218 { 219 - struct inet6_request_sock *ireq6 = inet6_rsk(req); 219 + struct inet_request_sock *ireq = inet_rsk(req); 220 220 struct ipv6_pinfo *np = inet6_sk(sk); 221 221 struct sk_buff *skb; 222 222 struct in6_addr *final_p, final; ··· 226 226 227 227 memset(&fl6, 0, sizeof(fl6)); 228 228 fl6.flowi6_proto = IPPROTO_DCCP; 229 - fl6.daddr = ireq6->rmt_addr; 230 - fl6.saddr = ireq6->loc_addr; 229 + fl6.daddr = ireq->ir_v6_rmt_addr; 230 + fl6.saddr = ireq->ir_v6_loc_addr; 231 231 fl6.flowlabel = 0; 232 - fl6.flowi6_oif = ireq6->iif; 233 - fl6.fl6_dport = inet_rsk(req)->rmt_port; 234 - fl6.fl6_sport = inet_rsk(req)->loc_port; 232 + fl6.flowi6_oif = ireq->ir_iif; 233 + fl6.fl6_dport = ireq->ir_rmt_port; 234 + fl6.fl6_sport = ireq->ir_loc_port; 235 235 security_req_classify_flow(req, flowi6_to_flowi(&fl6)); 236 236 237 237 ··· 249 249 struct dccp_hdr *dh = dccp_hdr(skb); 250 250 251 251 dh->dccph_checksum = dccp_v6_csum_finish(skb, 252 - &ireq6->loc_addr, 253 - &ireq6->rmt_addr); 254 - fl6.daddr = ireq6->rmt_addr; 252 + &ireq->ir_v6_loc_addr, 253 + &ireq->ir_v6_rmt_addr); 254 + fl6.daddr = ireq->ir_v6_rmt_addr; 255 255 err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); 256 256 err = net_xmit_eval(err); 257 257 } ··· 264 264 static void dccp_v6_reqsk_destructor(struct request_sock *req) 265 265 { 266 266 dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg); 267 - if (inet6_rsk(req)->pktopts != NULL) 268 - kfree_skb(inet6_rsk(req)->pktopts); 267 + kfree_skb(inet_rsk(req)->pktopts); 269 268 } 270 269 271 270 static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) ··· 358 359 { 359 360 struct request_sock *req; 360 361 struct dccp_request_sock *dreq; 361 - struct inet6_request_sock *ireq6; 362 + struct inet_request_sock *ireq; 362 363 struct ipv6_pinfo *np = inet6_sk(sk); 363 364 const __be32 service = dccp_hdr_request(skb)->dccph_req_service; 364 365 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); ··· 397 398 if (security_inet_conn_request(sk, skb, req)) 398 399 goto drop_and_free; 399 400 400 - ireq6 = inet6_rsk(req); 401 - ireq6->rmt_addr = ipv6_hdr(skb)->saddr; 402 - ireq6->loc_addr = ipv6_hdr(skb)->daddr; 401 + ireq = inet_rsk(req); 402 + ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; 403 + ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; 403 404 404 405 if (ipv6_opt_accepted(sk, skb) || 405 406 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 406 407 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 407 408 atomic_inc(&skb->users); 408 - ireq6->pktopts = skb; 409 + ireq->pktopts = skb; 409 410 } 410 - ireq6->iif = sk->sk_bound_dev_if; 411 + ireq->ir_iif = sk->sk_bound_dev_if; 411 412 412 413 /* So that link locals have meaning */ 413 414 if (!sk->sk_bound_dev_if && 414 - ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL) 415 - ireq6->iif = inet6_iif(skb); 415 + ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) 416 + ireq->ir_iif = inet6_iif(skb); 416 417 417 418 /* 418 419 * Step 3: Process LISTEN state ··· 445 446 struct request_sock *req, 446 447 struct dst_entry *dst) 447 448 { 448 - struct inet6_request_sock *ireq6 = inet6_rsk(req); 449 + struct inet_request_sock *ireq = inet_rsk(req); 449 450 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 450 451 struct inet_sock *newinet; 451 452 struct dccp6_sock *newdp6; ··· 504 505 505 506 memset(&fl6, 0, sizeof(fl6)); 506 507 fl6.flowi6_proto = IPPROTO_DCCP; 507 - fl6.daddr = ireq6->rmt_addr; 508 + fl6.daddr = ireq->ir_v6_rmt_addr; 508 509 final_p = fl6_update_dst(&fl6, np->opt, &final); 509 - fl6.saddr = ireq6->loc_addr; 510 + fl6.saddr = ireq->ir_v6_loc_addr; 510 511 fl6.flowi6_oif = sk->sk_bound_dev_if; 511 - fl6.fl6_dport = inet_rsk(req)->rmt_port; 512 - fl6.fl6_sport = inet_rsk(req)->loc_port; 512 + fl6.fl6_dport = ireq->ir_rmt_port; 513 + fl6.fl6_sport = ireq->ir_loc_port; 513 514 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); 514 515 515 516 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); ··· 537 538 538 539 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 539 540 540 - newsk->sk_v6_daddr = ireq6->rmt_addr; 541 - newnp->saddr = ireq6->loc_addr; 542 - newsk->sk_v6_rcv_saddr = ireq6->loc_addr; 543 - newsk->sk_bound_dev_if = ireq6->iif; 541 + newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr; 542 + newnp->saddr = ireq->ir_v6_loc_addr; 543 + newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr; 544 + newsk->sk_bound_dev_if = ireq->ir_iif; 544 545 545 546 /* Now IPv6 options... 546 547 ··· 553 554 554 555 /* Clone pktoptions received with SYN */ 555 556 newnp->pktoptions = NULL; 556 - if (ireq6->pktopts != NULL) { 557 - newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC); 558 - consume_skb(ireq6->pktopts); 559 - ireq6->pktopts = NULL; 557 + if (ireq->pktopts != NULL) { 558 + newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC); 559 + consume_skb(ireq->pktopts); 560 + ireq->pktopts = NULL; 560 561 if (newnp->pktoptions) 561 562 skb_set_owner_r(newnp->pktoptions, newsk); 562 563 }
-1
net/dccp/ipv6.h
··· 25 25 26 26 struct dccp6_request_sock { 27 27 struct dccp_request_sock dccp; 28 - struct inet6_request_sock inet6; 29 28 }; 30 29 31 30 struct dccp6_timewait_sock {
+2 -2
net/dccp/minisocks.c
··· 266 266 { 267 267 struct dccp_request_sock *dreq = dccp_rsk(req); 268 268 269 - inet_rsk(req)->rmt_port = dccp_hdr(skb)->dccph_sport; 270 - inet_rsk(req)->loc_port = dccp_hdr(skb)->dccph_dport; 269 + inet_rsk(req)->ir_rmt_port = dccp_hdr(skb)->dccph_sport; 270 + inet_rsk(req)->ir_loc_port = dccp_hdr(skb)->dccph_dport; 271 271 inet_rsk(req)->acked = 0; 272 272 dreq->dreq_timestamp_echo = 0; 273 273
+2 -2
net/dccp/output.c
··· 424 424 /* Build and checksum header */ 425 425 dh = dccp_zeroed_hdr(skb, dccp_header_size); 426 426 427 - dh->dccph_sport = inet_rsk(req)->loc_port; 428 - dh->dccph_dport = inet_rsk(req)->rmt_port; 427 + dh->dccph_sport = inet_rsk(req)->ir_loc_port; 428 + dh->dccph_dport = inet_rsk(req)->ir_rmt_port; 429 429 dh->dccph_doff = (dccp_header_size + 430 430 DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; 431 431 dh->dccph_type = DCCP_PKT_RESPONSE;
+12 -11
net/ipv4/inet_connection_sock.c
··· 412 412 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 413 413 sk->sk_protocol, 414 414 flags, 415 - (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, 416 - ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport); 415 + (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, 416 + ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport); 417 417 security_req_classify_flow(req, flowi4_to_flowi(fl4)); 418 418 rt = ip_route_output_flow(net, fl4, sk); 419 419 if (IS_ERR(rt)) ··· 448 448 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 449 449 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 450 450 sk->sk_protocol, inet_sk_flowi_flags(sk), 451 - (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, 452 - ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport); 451 + (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr, 452 + ireq->ir_loc_addr, ireq->ir_rmt_port, inet_sk(sk)->inet_sport); 453 453 security_req_classify_flow(req, flowi4_to_flowi(fl4)); 454 454 rt = ip_route_output_flow(net, fl4, sk); 455 455 if (IS_ERR(rt)) ··· 495 495 prev = &req->dl_next) { 496 496 const struct inet_request_sock *ireq = inet_rsk(req); 497 497 498 - if (ireq->rmt_port == rport && 499 - ireq->rmt_addr == raddr && 500 - ireq->loc_addr == laddr && 498 + if (ireq->ir_rmt_port == rport && 499 + ireq->ir_rmt_addr == raddr && 500 + ireq->ir_loc_addr == laddr && 501 501 AF_INET_FAMILY(req->rsk_ops->family)) { 502 502 WARN_ON(req->sk); 503 503 *prevp = prev; ··· 514 514 { 515 515 struct inet_connection_sock *icsk = inet_csk(sk); 516 516 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; 517 - const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, 517 + const u32 h = inet_synq_hash(inet_rsk(req)->ir_rmt_addr, 518 + inet_rsk(req)->ir_rmt_port, 518 519 lopt->hash_rnd, lopt->nr_table_entries); 519 520 520 521 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout); ··· 675 674 newsk->sk_state = TCP_SYN_RECV; 676 675 newicsk->icsk_bind_hash = NULL; 677 676 678 - inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port; 679 - inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->loc_port); 680 - inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port; 677 + inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port; 678 + inet_sk(newsk)->inet_num = ntohs(inet_rsk(req)->ir_loc_port); 679 + inet_sk(newsk)->inet_sport = inet_rsk(req)->ir_loc_port; 681 680 newsk->sk_write_space = sk_stream_write_space; 682 681 683 682 newicsk->icsk_retransmits = 0;
+11 -11
net/ipv4/inet_diag.c
··· 679 679 #if IS_ENABLED(CONFIG_IPV6) 680 680 if (sk->sk_family == AF_INET6) { 681 681 if (req->rsk_ops->family == AF_INET6) { 682 - entry->saddr = inet6_rsk(req)->loc_addr.s6_addr32; 683 - entry->daddr = inet6_rsk(req)->rmt_addr.s6_addr32; 682 + entry->saddr = ireq->ir_v6_loc_addr.s6_addr32; 683 + entry->daddr = ireq->ir_v6_rmt_addr.s6_addr32; 684 684 } else if (req->rsk_ops->family == AF_INET) { 685 - ipv6_addr_set_v4mapped(ireq->loc_addr, 685 + ipv6_addr_set_v4mapped(ireq->ir_loc_addr, 686 686 &entry->saddr_storage); 687 - ipv6_addr_set_v4mapped(ireq->rmt_addr, 687 + ipv6_addr_set_v4mapped(ireq->ir_rmt_addr, 688 688 &entry->daddr_storage); 689 689 entry->saddr = entry->saddr_storage.s6_addr32; 690 690 entry->daddr = entry->daddr_storage.s6_addr32; ··· 692 692 } else 693 693 #endif 694 694 { 695 - entry->saddr = &ireq->loc_addr; 696 - entry->daddr = &ireq->rmt_addr; 695 + entry->saddr = &ireq->ir_loc_addr; 696 + entry->daddr = &ireq->ir_rmt_addr; 697 697 } 698 698 } 699 699 ··· 728 728 tmo = 0; 729 729 730 730 r->id.idiag_sport = inet->inet_sport; 731 - r->id.idiag_dport = ireq->rmt_port; 732 - r->id.idiag_src[0] = ireq->loc_addr; 733 - r->id.idiag_dst[0] = ireq->rmt_addr; 731 + r->id.idiag_dport = ireq->ir_rmt_port; 732 + r->id.idiag_src[0] = ireq->ir_loc_addr; 733 + r->id.idiag_dst[0] = ireq->ir_rmt_addr; 734 734 r->idiag_expires = jiffies_to_msecs(tmo); 735 735 r->idiag_rqueue = 0; 736 736 r->idiag_wqueue = 0; ··· 789 789 790 790 if (reqnum < s_reqnum) 791 791 continue; 792 - if (r->id.idiag_dport != ireq->rmt_port && 792 + if (r->id.idiag_dport != ireq->ir_rmt_port && 793 793 r->id.idiag_dport) 794 794 continue; 795 795 796 796 if (bc) { 797 797 inet_diag_req_addrs(sk, req, &entry); 798 - entry.dport = ntohs(ireq->rmt_port); 798 + entry.dport = ntohs(ireq->ir_rmt_port); 799 799 800 800 if (!inet_diag_bc_run(bc, &entry)) 801 801 continue;
+6 -6
net/ipv4/syncookies.c
··· 304 304 treq->rcv_isn = ntohl(th->seq) - 1; 305 305 treq->snt_isn = cookie; 306 306 req->mss = mss; 307 - ireq->loc_port = th->dest; 308 - ireq->rmt_port = th->source; 309 - ireq->loc_addr = ip_hdr(skb)->daddr; 310 - ireq->rmt_addr = ip_hdr(skb)->saddr; 307 + ireq->ir_loc_port = th->dest; 308 + ireq->ir_rmt_port = th->source; 309 + ireq->ir_loc_addr = ip_hdr(skb)->daddr; 310 + ireq->ir_rmt_addr = ip_hdr(skb)->saddr; 311 311 ireq->ecn_ok = ecn_ok; 312 312 ireq->snd_wscale = tcp_opt.snd_wscale; 313 313 ireq->sack_ok = tcp_opt.sack_ok; ··· 347 347 flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark, 348 348 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP, 349 349 inet_sk_flowi_flags(sk), 350 - (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, 351 - ireq->loc_addr, th->source, th->dest); 350 + (opt && opt->srr) ? opt->faddr : ireq->ir_rmt_addr, 351 + ireq->ir_loc_addr, th->source, th->dest); 352 352 security_req_classify_flow(req, flowi4_to_flowi(&fl4)); 353 353 rt = ip_route_output_key(sock_net(sk), &fl4); 354 354 if (IS_ERR(rt)) {
+19 -19
net/ipv4/tcp_ipv4.c
··· 835 835 skb = tcp_make_synack(sk, dst, req, NULL); 836 836 837 837 if (skb) { 838 - __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); 838 + __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); 839 839 840 840 skb_set_queue_mapping(skb, queue_mapping); 841 - err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, 842 - ireq->rmt_addr, 841 + err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, 842 + ireq->ir_rmt_addr, 843 843 ireq->opt); 844 844 err = net_xmit_eval(err); 845 845 if (!tcp_rsk(req)->snt_synack && !err) ··· 972 972 { 973 973 union tcp_md5_addr *addr; 974 974 975 - addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr; 975 + addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr; 976 976 return tcp_md5_do_lookup(sk, addr, AF_INET); 977 977 } 978 978 ··· 1149 1149 saddr = inet_sk(sk)->inet_saddr; 1150 1150 daddr = inet_sk(sk)->inet_daddr; 1151 1151 } else if (req) { 1152 - saddr = inet_rsk(req)->loc_addr; 1153 - daddr = inet_rsk(req)->rmt_addr; 1152 + saddr = inet_rsk(req)->ir_loc_addr; 1153 + daddr = inet_rsk(req)->ir_rmt_addr; 1154 1154 } else { 1155 1155 const struct iphdr *iph = ip_hdr(skb); 1156 1156 saddr = iph->saddr; ··· 1366 1366 kfree_skb(skb_synack); 1367 1367 return -1; 1368 1368 } 1369 - err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr, 1370 - ireq->rmt_addr, ireq->opt); 1369 + err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr, 1370 + ireq->ir_rmt_addr, ireq->opt); 1371 1371 err = net_xmit_eval(err); 1372 1372 if (!err) 1373 1373 tcp_rsk(req)->snt_synack = tcp_time_stamp; ··· 1502 1502 tcp_openreq_init(req, &tmp_opt, skb); 1503 1503 1504 1504 ireq = inet_rsk(req); 1505 - ireq->loc_addr = daddr; 1506 - ireq->rmt_addr = saddr; 1505 + ireq->ir_loc_addr = daddr; 1506 + ireq->ir_rmt_addr = saddr; 1507 1507 ireq->no_srccheck = inet_sk(sk)->transparent; 1508 1508 ireq->opt = tcp_v4_save_options(skb); 1509 1509 ··· 1578 1578 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL); 1579 1579 1580 1580 if (skb_synack) { 1581 - __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr); 1581 + __tcp_v4_send_check(skb_synack, ireq->ir_loc_addr, ireq->ir_rmt_addr); 1582 1582 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb)); 1583 1583 } else 1584 1584 goto drop_and_free; 1585 1585 1586 1586 if (likely(!do_fastopen)) { 1587 1587 int err; 1588 - err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr, 1589 - ireq->rmt_addr, ireq->opt); 1588 + err = ip_build_and_send_pkt(skb_synack, sk, ireq->ir_loc_addr, 1589 + ireq->ir_rmt_addr, ireq->opt); 1590 1590 err = net_xmit_eval(err); 1591 1591 if (err || want_cookie) 1592 1592 goto drop_and_free; ··· 1644 1644 newtp = tcp_sk(newsk); 1645 1645 newinet = inet_sk(newsk); 1646 1646 ireq = inet_rsk(req); 1647 - newinet->inet_daddr = ireq->rmt_addr; 1648 - newinet->inet_rcv_saddr = ireq->loc_addr; 1649 - newinet->inet_saddr = ireq->loc_addr; 1647 + newinet->inet_daddr = ireq->ir_rmt_addr; 1648 + newinet->inet_rcv_saddr = ireq->ir_loc_addr; 1649 + newinet->inet_saddr = ireq->ir_loc_addr; 1650 1650 inet_opt = ireq->opt; 1651 1651 rcu_assign_pointer(newinet->inet_opt, inet_opt); 1652 1652 ireq->opt = NULL; ··· 2548 2548 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2549 2549 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK%n", 2550 2550 i, 2551 - ireq->loc_addr, 2551 + ireq->ir_loc_addr, 2552 2552 ntohs(inet_sk(sk)->inet_sport), 2553 - ireq->rmt_addr, 2554 - ntohs(ireq->rmt_port), 2553 + ireq->ir_rmt_addr, 2554 + ntohs(ireq->ir_rmt_port), 2555 2555 TCP_SYN_RECV, 2556 2556 0, 0, /* could print option size, but that is af dependent. */ 2557 2557 1, /* timers active (only the expire timer) */
+5 -3
net/ipv4/tcp_metrics.c
··· 215 215 addr.family = req->rsk_ops->family; 216 216 switch (addr.family) { 217 217 case AF_INET: 218 - addr.addr.a4 = inet_rsk(req)->rmt_addr; 218 + addr.addr.a4 = inet_rsk(req)->ir_rmt_addr; 219 219 hash = (__force unsigned int) addr.addr.a4; 220 220 break; 221 + #if IS_ENABLED(CONFIG_IPV6) 221 222 case AF_INET6: 222 - *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr; 223 - hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr); 223 + *(struct in6_addr *)addr.addr.a6 = inet_rsk(req)->ir_v6_rmt_addr; 224 + hash = ipv6_addr_hash(&inet_rsk(req)->ir_v6_rmt_addr); 224 225 break; 226 + #endif 225 227 default: 226 228 return NULL; 227 229 }
+2 -2
net/ipv4/tcp_output.c
··· 2734 2734 th->syn = 1; 2735 2735 th->ack = 1; 2736 2736 TCP_ECN_make_synack(req, th); 2737 - th->source = ireq->loc_port; 2738 - th->dest = ireq->rmt_port; 2737 + th->source = ireq->ir_loc_port; 2738 + th->dest = ireq->ir_rmt_port; 2739 2739 /* Setting of flags are superfluous here for callers (and ECE is 2740 2740 * not even correctly set) 2741 2741 */
+13 -13
net/ipv6/inet6_connection_sock.c
··· 70 70 struct flowi6 *fl6, 71 71 const struct request_sock *req) 72 72 { 73 - struct inet6_request_sock *treq = inet6_rsk(req); 73 + struct inet_request_sock *ireq = inet_rsk(req); 74 74 struct ipv6_pinfo *np = inet6_sk(sk); 75 75 struct in6_addr *final_p, final; 76 76 struct dst_entry *dst; 77 77 78 78 memset(fl6, 0, sizeof(*fl6)); 79 79 fl6->flowi6_proto = IPPROTO_TCP; 80 - fl6->daddr = treq->rmt_addr; 80 + fl6->daddr = ireq->ir_v6_rmt_addr; 81 81 final_p = fl6_update_dst(fl6, np->opt, &final); 82 - fl6->saddr = treq->loc_addr; 83 - fl6->flowi6_oif = treq->iif; 82 + fl6->saddr = ireq->ir_v6_loc_addr; 83 + fl6->flowi6_oif = ireq->ir_iif; 84 84 fl6->flowi6_mark = sk->sk_mark; 85 - fl6->fl6_dport = inet_rsk(req)->rmt_port; 86 - fl6->fl6_sport = inet_rsk(req)->loc_port; 85 + fl6->fl6_dport = ireq->ir_rmt_port; 86 + fl6->fl6_sport = ireq->ir_loc_port; 87 87 security_req_classify_flow(req, flowi6_to_flowi(fl6)); 88 88 89 89 dst = ip6_dst_lookup_flow(sk, fl6, final_p, false); ··· 129 129 lopt->nr_table_entries)]; 130 130 (req = *prev) != NULL; 131 131 prev = &req->dl_next) { 132 - const struct inet6_request_sock *treq = inet6_rsk(req); 132 + const struct inet_request_sock *ireq = inet_rsk(req); 133 133 134 - if (inet_rsk(req)->rmt_port == rport && 134 + if (ireq->ir_rmt_port == rport && 135 135 req->rsk_ops->family == AF_INET6 && 136 - ipv6_addr_equal(&treq->rmt_addr, raddr) && 137 - ipv6_addr_equal(&treq->loc_addr, laddr) && 138 - (!treq->iif || treq->iif == iif)) { 136 + ipv6_addr_equal(&ireq->ir_v6_rmt_addr, raddr) && 137 + ipv6_addr_equal(&ireq->ir_v6_loc_addr, laddr) && 138 + (!ireq->ir_iif || ireq->ir_iif == iif)) { 139 139 WARN_ON(req->sk != NULL); 140 140 *prevp = prev; 141 141 return req; ··· 153 153 { 154 154 struct inet_connection_sock *icsk = inet_csk(sk); 155 155 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; 156 - const u32 h = inet6_synq_hash(&inet6_rsk(req)->rmt_addr, 157 - inet_rsk(req)->rmt_port, 156 + const u32 h = inet6_synq_hash(&inet_rsk(req)->ir_v6_rmt_addr, 157 + inet_rsk(req)->ir_rmt_port, 158 158 lopt->hash_rnd, lopt->nr_table_entries); 159 159 160 160 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
+11 -13
net/ipv6/syncookies.c
··· 150 150 { 151 151 struct tcp_options_received tcp_opt; 152 152 struct inet_request_sock *ireq; 153 - struct inet6_request_sock *ireq6; 154 153 struct tcp_request_sock *treq; 155 154 struct ipv6_pinfo *np = inet6_sk(sk); 156 155 struct tcp_sock *tp = tcp_sk(sk); ··· 186 187 goto out; 187 188 188 189 ireq = inet_rsk(req); 189 - ireq6 = inet6_rsk(req); 190 190 treq = tcp_rsk(req); 191 191 treq->listener = NULL; 192 192 ··· 193 195 goto out_free; 194 196 195 197 req->mss = mss; 196 - ireq->rmt_port = th->source; 197 - ireq->loc_port = th->dest; 198 - ireq6->rmt_addr = ipv6_hdr(skb)->saddr; 199 - ireq6->loc_addr = ipv6_hdr(skb)->daddr; 198 + ireq->ir_rmt_port = th->source; 199 + ireq->ir_loc_port = th->dest; 200 + ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; 201 + ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; 200 202 if (ipv6_opt_accepted(sk, skb) || 201 203 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 202 204 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 203 205 atomic_inc(&skb->users); 204 - ireq6->pktopts = skb; 206 + ireq->pktopts = skb; 205 207 } 206 208 207 - ireq6->iif = sk->sk_bound_dev_if; 209 + ireq->ir_iif = sk->sk_bound_dev_if; 208 210 /* So that link locals have meaning */ 209 211 if (!sk->sk_bound_dev_if && 210 - ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL) 211 - ireq6->iif = inet6_iif(skb); 212 + ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) 213 + ireq->ir_iif = inet6_iif(skb); 212 214 213 215 req->expires = 0UL; 214 216 req->num_retrans = 0; ··· 232 234 struct flowi6 fl6; 233 235 memset(&fl6, 0, sizeof(fl6)); 234 236 fl6.flowi6_proto = IPPROTO_TCP; 235 - fl6.daddr = ireq6->rmt_addr; 237 + fl6.daddr = ireq->ir_v6_rmt_addr; 236 238 final_p = fl6_update_dst(&fl6, np->opt, &final); 237 - fl6.saddr = ireq6->loc_addr; 239 + fl6.saddr = ireq->ir_v6_loc_addr; 238 240 fl6.flowi6_oif = sk->sk_bound_dev_if; 239 241 fl6.flowi6_mark = sk->sk_mark; 240 - fl6.fl6_dport = inet_rsk(req)->rmt_port; 242 + fl6.fl6_dport = ireq->ir_rmt_port; 241 243 fl6.fl6_sport = inet_sk(sk)->inet_sport; 242 244 security_req_classify_flow(req, flowi6_to_flowi(&fl6)); 243 245
+31 -30
net/ipv6/tcp_ipv6.c
··· 465 465 struct request_sock *req, 466 466 u16 queue_mapping) 467 467 { 468 - struct inet6_request_sock *treq = inet6_rsk(req); 468 + struct inet_request_sock *ireq = inet_rsk(req); 469 469 struct ipv6_pinfo *np = inet6_sk(sk); 470 470 struct sk_buff * skb; 471 471 int err = -ENOMEM; ··· 477 477 skb = tcp_make_synack(sk, dst, req, NULL); 478 478 479 479 if (skb) { 480 - __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 480 + __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, 481 + &ireq->ir_v6_rmt_addr); 481 482 482 - fl6->daddr = treq->rmt_addr; 483 + fl6->daddr = ireq->ir_v6_rmt_addr; 483 484 skb_set_queue_mapping(skb, queue_mapping); 484 485 err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass); 485 486 err = net_xmit_eval(err); ··· 503 502 504 503 static void tcp_v6_reqsk_destructor(struct request_sock *req) 505 504 { 506 - kfree_skb(inet6_rsk(req)->pktopts); 505 + kfree_skb(inet_rsk(req)->pktopts); 507 506 } 508 507 509 508 #ifdef CONFIG_TCP_MD5SIG ··· 522 521 static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk, 523 522 struct request_sock *req) 524 523 { 525 - return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr); 524 + return tcp_v6_md5_do_lookup(sk, &inet_rsk(req)->ir_v6_rmt_addr); 526 525 } 527 526 528 527 static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval, ··· 624 623 saddr = &inet6_sk(sk)->saddr; 625 624 daddr = &sk->sk_v6_daddr; 626 625 } else if (req) { 627 - saddr = &inet6_rsk(req)->loc_addr; 628 - daddr = &inet6_rsk(req)->rmt_addr; 626 + saddr = &inet_rsk(req)->ir_v6_loc_addr; 627 + daddr = &inet_rsk(req)->ir_v6_rmt_addr; 629 628 } else { 630 629 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 631 630 saddr = &ip6h->saddr; ··· 950 949 { 951 950 struct tcp_options_received tmp_opt; 952 951 struct request_sock *req; 953 - struct inet6_request_sock *treq; 952 + struct inet_request_sock *ireq; 954 953 struct ipv6_pinfo *np = inet6_sk(sk); 955 954 struct tcp_sock *tp = tcp_sk(sk); 956 955 __u32 isn = TCP_SKB_CB(skb)->when; ··· 995 994 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp; 996 995 tcp_openreq_init(req, &tmp_opt, skb); 997 996 998 - treq = inet6_rsk(req); 999 - treq->rmt_addr = ipv6_hdr(skb)->saddr; 1000 - treq->loc_addr = ipv6_hdr(skb)->daddr; 997 + ireq = inet_rsk(req); 998 + ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; 999 + ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; 1001 1000 if (!want_cookie || tmp_opt.tstamp_ok) 1002 1001 TCP_ECN_create_request(req, skb, sock_net(sk)); 1003 1002 1004 - treq->iif = sk->sk_bound_dev_if; 1003 + ireq->ir_iif = sk->sk_bound_dev_if; 1005 1004 1006 1005 /* So that link locals have meaning */ 1007 1006 if (!sk->sk_bound_dev_if && 1008 - ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) 1009 - treq->iif = inet6_iif(skb); 1007 + ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) 1008 + ireq->ir_iif = inet6_iif(skb); 1010 1009 1011 1010 if (!isn) { 1012 1011 if (ipv6_opt_accepted(sk, skb) || 1013 1012 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 1014 1013 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 1015 1014 atomic_inc(&skb->users); 1016 - treq->pktopts = skb; 1015 + ireq->pktopts = skb; 1017 1016 } 1018 1017 1019 1018 if (want_cookie) { ··· 1052 1051 * to the moment of synflood. 1053 1052 */ 1054 1053 LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n", 1055 - &treq->rmt_addr, ntohs(tcp_hdr(skb)->source)); 1054 + &ireq->ir_v6_rmt_addr, ntohs(tcp_hdr(skb)->source)); 1056 1055 goto drop_and_release; 1057 1056 } 1058 1057 ··· 1087 1086 struct request_sock *req, 1088 1087 struct dst_entry *dst) 1089 1088 { 1090 - struct inet6_request_sock *treq; 1089 + struct inet_request_sock *ireq; 1091 1090 struct ipv6_pinfo *newnp, *np = inet6_sk(sk); 1092 1091 struct tcp6_sock *newtcp6sk; 1093 1092 struct inet_sock *newinet; ··· 1152 1151 return newsk; 1153 1152 } 1154 1153 1155 - treq = inet6_rsk(req); 1154 + ireq = inet_rsk(req); 1156 1155 1157 1156 if (sk_acceptq_is_full(sk)) 1158 1157 goto out_overflow; ··· 1186 1185 1187 1186 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 1188 1187 1189 - newsk->sk_v6_daddr = treq->rmt_addr; 1190 - newnp->saddr = treq->loc_addr; 1191 - newsk->sk_v6_rcv_saddr = treq->loc_addr; 1192 - newsk->sk_bound_dev_if = treq->iif; 1188 + newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr; 1189 + newnp->saddr = ireq->ir_v6_loc_addr; 1190 + newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr; 1191 + newsk->sk_bound_dev_if = ireq->ir_iif; 1193 1192 1194 1193 /* Now IPv6 options... 1195 1194 ··· 1204 1203 1205 1204 /* Clone pktoptions received with SYN */ 1206 1205 newnp->pktoptions = NULL; 1207 - if (treq->pktopts != NULL) { 1208 - newnp->pktoptions = skb_clone(treq->pktopts, 1206 + if (ireq->pktopts != NULL) { 1207 + newnp->pktoptions = skb_clone(ireq->pktopts, 1209 1208 sk_gfp_atomic(sk, GFP_ATOMIC)); 1210 - consume_skb(treq->pktopts); 1211 - treq->pktopts = NULL; 1209 + consume_skb(ireq->pktopts); 1210 + ireq->pktopts = NULL; 1212 1211 if (newnp->pktoptions) 1213 1212 skb_set_owner_r(newnp->pktoptions, newsk); 1214 1213 } ··· 1723 1722 const struct sock *sk, struct request_sock *req, int i, kuid_t uid) 1724 1723 { 1725 1724 int ttd = req->expires - jiffies; 1726 - const struct in6_addr *src = &inet6_rsk(req)->loc_addr; 1727 - const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr; 1725 + const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr; 1726 + const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr; 1728 1727 1729 1728 if (ttd < 0) 1730 1729 ttd = 0; ··· 1735 1734 i, 1736 1735 src->s6_addr32[0], src->s6_addr32[1], 1737 1736 src->s6_addr32[2], src->s6_addr32[3], 1738 - ntohs(inet_rsk(req)->loc_port), 1737 + ntohs(inet_rsk(req)->ir_loc_port), 1739 1738 dest->s6_addr32[0], dest->s6_addr32[1], 1740 1739 dest->s6_addr32[2], dest->s6_addr32[3], 1741 - ntohs(inet_rsk(req)->rmt_port), 1740 + ntohs(inet_rsk(req)->ir_rmt_port), 1742 1741 TCP_SYN_RECV, 1743 1742 0,0, /* could print option size, but that is af dependent. */ 1744 1743 1, /* timers active (only the expire timer) */
+1 -1
net/netlabel/netlabel_kapi.c
··· 817 817 switch (req->rsk_ops->family) { 818 818 case AF_INET: 819 819 entry = netlbl_domhsh_getentry_af4(secattr->domain, 820 - inet_rsk(req)->rmt_addr); 820 + inet_rsk(req)->ir_rmt_addr); 821 821 if (entry == NULL) { 822 822 ret_val = -ENOENT; 823 823 goto req_setattr_return;