Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[NET]: Introduce inet_connection_sock

This creates struct inet_connection_sock, moving members out of struct
tcp_sock that are shareable with other INET connection oriented
protocols, such as DCCP, that in my private tree already uses most of
these members.

The functions that operate on these members were renamed, using a
inet_csk_ prefix while not being moved yet to a new file, so as to
ease the review of these changes.

Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Arnaldo Carvalho de Melo and committed by
David S. Miller
463c84b9 87d11ceb

+692 -588
-2
include/linux/ip.h
··· 128 128 return (struct inet_request_sock *)sk; 129 129 } 130 130 131 - struct inet_bind_bucket; 132 131 struct ipv6_pinfo; 133 132 134 133 struct inet_sock { ··· 157 158 int mc_index; /* Multicast device index */ 158 159 __u32 mc_addr; 159 160 struct ip_mc_socklist *mc_list; /* Group array */ 160 - struct inet_bind_bucket *bind_hash; 161 161 /* 162 162 * Following members are used to retain the infomation to build 163 163 * an ip header on each ip fragmentation while the socket is corked.
+4 -4
include/linux/ipv6.h
··· 333 333 return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL; 334 334 } 335 335 336 - static inline int tcp_twsk_ipv6only(const struct sock *sk) 336 + static inline int inet_twsk_ipv6only(const struct sock *sk) 337 337 { 338 338 return inet_twsk(sk)->tw_ipv6only; 339 339 } 340 340 341 - static inline int tcp_v6_ipv6only(const struct sock *sk) 341 + static inline int inet_v6_ipv6only(const struct sock *sk) 342 342 { 343 343 return likely(sk->sk_state != TCP_TIME_WAIT) ? 344 - ipv6_only_sock(sk) : tcp_twsk_ipv6only(sk); 344 + ipv6_only_sock(sk) : inet_twsk_ipv6only(sk); 345 345 } 346 346 #else 347 347 #define __ipv6_only_sock(sk) 0 ··· 360 360 #define __tcp_v6_rcv_saddr(__sk) NULL 361 361 #define tcp_v6_rcv_saddr(__sk) NULL 362 362 #define tcp_twsk_ipv6only(__sk) 0 363 - #define tcp_v6_ipv6only(__sk) 0 363 + #define inet_v6_ipv6only(__sk) 0 364 364 #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ 365 365 366 366 #define INET6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
+7 -32
include/linux/tcp.h
··· 177 177 178 178 #include <linux/config.h> 179 179 #include <linux/skbuff.h> 180 - #include <linux/ip.h> 181 180 #include <net/sock.h> 181 + #include <net/inet_connection_sock.h> 182 182 #include <net/inet_timewait_sock.h> 183 183 184 184 /* This defines a selective acknowledgement block. */ ··· 219 219 } 220 220 221 221 struct tcp_sock { 222 - /* inet_sock has to be the first member of tcp_sock */ 223 - struct inet_sock inet; 222 + /* inet_connection_sock has to be the first member of tcp_sock */ 223 + struct inet_connection_sock inet_conn; 224 224 int tcp_header_len; /* Bytes of tcp header to send */ 225 225 226 226 /* ··· 241 241 __u32 snd_sml; /* Last byte of the most recently transmitted small packet */ 242 242 __u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ 243 243 __u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ 244 - /* Delayed ACK control data */ 245 - struct { 246 - __u8 pending; /* ACK is pending */ 247 - __u8 quick; /* Scheduled number of quick acks */ 248 - __u8 pingpong; /* The session is interactive */ 249 - __u8 blocked; /* Delayed ACK was blocked by socket lock*/ 250 - __u32 ato; /* Predicted tick of soft clock */ 251 - unsigned long timeout; /* Currently scheduled timeout */ 252 - __u32 lrcvtime; /* timestamp of last received data packet*/ 253 - __u16 last_seg_size; /* Size of last incoming segment */ 254 - __u16 rcv_mss; /* MSS used for delayed ACK decisions */ 255 - } ack; 256 244 257 245 /* Data for direct copy to user */ 258 246 struct { ··· 259 271 __u16 xmit_size_goal; /* Goal for segmenting output packets */ 260 272 __u16 ext_header_len; /* Network protocol overhead (IP/IPv6 options) */ 261 273 __u8 ca_state; /* State of fast-retransmit machine */ 262 - __u8 retransmits; /* Number of unrecovered RTO timeouts. */ 263 274 275 + __u8 keepalive_probes; /* num of allowed keep alive probes */ 264 276 __u16 advmss; /* Advertised MSS */ 265 277 __u32 window_clamp; /* Maximal window to advertise */ 266 278 __u32 rcv_ssthresh; /* Current window clamp */ ··· 269 281 __u8 reordering; /* Packet reordering metric. */ 270 282 __u8 frto_counter; /* Number of new acks after RTO */ 271 283 272 - __u8 unused; 284 + __u8 nonagle; /* Disable Nagle algorithm? */ 273 285 __u8 defer_accept; /* User waits for some data after accept() */ 274 286 275 287 /* RTT measurement */ ··· 278 290 __u32 mdev_max; /* maximal mdev for the last rtt period */ 279 291 __u32 rttvar; /* smoothed mdev_max */ 280 292 __u32 rtt_seq; /* sequence number to update rttvar */ 281 - __u32 rto; /* retransmit timeout */ 282 293 283 294 __u32 packets_out; /* Packets which are "in flight" */ 284 295 __u32 left_out; /* Packets which leaved network */ 285 296 __u32 retrans_out; /* Retransmitted packets out */ 286 - __u8 backoff; /* backoff */ 287 297 /* 288 298 * Options received (usually on last packet, some only on SYN packets). 289 299 */ 290 - __u8 nonagle; /* Disable Nagle algorithm? */ 291 - __u8 keepalive_probes; /* num of allowed keep alive probes */ 292 - 293 - __u8 probes_out; /* unanswered 0 window probes */ 294 300 struct tcp_options_received rx_opt; 295 301 296 302 /* ··· 296 314 __u16 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */ 297 315 __u32 snd_cwnd_used; 298 316 __u32 snd_cwnd_stamp; 299 - 300 - /* Two commonly used timers in both sender and receiver paths. */ 301 - unsigned long timeout; 302 - struct timer_list retransmit_timer; /* Resend (no ack) */ 303 - struct timer_list delack_timer; /* Ack delay */ 304 317 305 318 struct sk_buff_head out_of_order_queue; /* Out of order segments go here */ 306 319 ··· 311 334 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ 312 335 struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/ 313 336 314 - __u8 syn_retries; /* num of allowed syn retries */ 337 + __u8 probes_out; /* unanswered 0 window probes */ 315 338 __u8 ecn_flags; /* ECN status bits. */ 316 339 __u16 prior_ssthresh; /* ssthresh saved at recovery start */ 317 340 __u32 lost_out; /* Lost packets */ ··· 326 349 int undo_retrans; /* number of undoable retransmissions. */ 327 350 __u32 urg_seq; /* Seq of received urgent pointer */ 328 351 __u16 urg_data; /* Saved octet of OOB data and control flags */ 329 - __u8 pending; /* Scheduled timer event */ 330 352 __u8 urg_mode; /* In urgent mode */ 353 + /* ONE BYTE HOLE, TRY TO PACK! */ 331 354 __u32 snd_up; /* Urgent pointer */ 332 355 333 356 __u32 total_retrans; /* Total retransmits for entire connection */ 334 - 335 - struct request_sock_queue accept_queue; /* FIFO of established children */ 336 357 337 358 unsigned int keepalive_time; /* time before keep alive takes place */ 338 359 unsigned int keepalive_intvl; /* time interval between keep alive probes */
+86
include/net/inet_connection_sock.h
··· 1 + /* 2 + * NET Generic infrastructure for INET connection oriented protocols. 3 + * 4 + * Definitions for inet_connection_sock 5 + * 6 + * Authors: Many people, see the TCP sources 7 + * 8 + * From code originally in TCP 9 + * 10 + * This program is free software; you can redistribute it and/or 11 + * modify it under the terms of the GNU General Public License 12 + * as published by the Free Software Foundation; either version 13 + * 2 of the License, or (at your option) any later version. 14 + */ 15 + #ifndef _INET_CONNECTION_SOCK_H 16 + #define _INET_CONNECTION_SOCK_H 17 + 18 + #include <linux/ip.h> 19 + #include <linux/timer.h> 20 + #include <net/request_sock.h> 21 + 22 + struct inet_bind_bucket; 23 + struct inet_hashinfo; 24 + 25 + /** inet_connection_sock - INET connection oriented sock 26 + * 27 + * @icsk_accept_queue: FIFO of established children 28 + * @icsk_bind_hash: Bind node 29 + * @icsk_timeout: Timeout 30 + * @icsk_retransmit_timer: Resend (no ack) 31 + * @icsk_rto: Retransmit timeout 32 + * @icsk_retransmits: Number of unrecovered [RTO] timeouts 33 + * @icsk_pending: Scheduled timer event 34 + * @icsk_backoff: Backoff 35 + * @icsk_syn_retries: Number of allowed SYN (or equivalent) retries 36 + * @icsk_ack: Delayed ACK control data 37 + */ 38 + struct inet_connection_sock { 39 + /* inet_sock has to be the first member! */ 40 + struct inet_sock icsk_inet; 41 + struct request_sock_queue icsk_accept_queue; 42 + struct inet_bind_bucket *icsk_bind_hash; 43 + unsigned long icsk_timeout; 44 + struct timer_list icsk_retransmit_timer; 45 + struct timer_list icsk_delack_timer; 46 + __u32 icsk_rto; 47 + __u8 icsk_retransmits; 48 + __u8 icsk_pending; 49 + __u8 icsk_backoff; 50 + __u8 icsk_syn_retries; 51 + struct { 52 + __u8 pending; /* ACK is pending */ 53 + __u8 quick; /* Scheduled number of quick acks */ 54 + __u8 pingpong; /* The session is interactive */ 55 + __u8 blocked; /* Delayed ACK was blocked by socket lock */ 56 + __u32 ato; /* Predicted tick of soft clock */ 57 + unsigned long timeout; /* Currently scheduled timeout */ 58 + __u32 lrcvtime; /* timestamp of last received data packet */ 59 + __u16 last_seg_size; /* Size of last incoming segment */ 60 + __u16 rcv_mss; /* MSS used for delayed ACK decisions */ 61 + } icsk_ack; 62 + }; 63 + 64 + static inline struct inet_connection_sock *inet_csk(const struct sock *sk) 65 + { 66 + return (struct inet_connection_sock *)sk; 67 + } 68 + 69 + extern void inet_csk_init_xmit_timers(struct sock *sk, 70 + void (*retransmit_handler)(unsigned long), 71 + void (*delack_handler)(unsigned long), 72 + void (*keepalive_handler)(unsigned long)); 73 + extern void inet_csk_clear_xmit_timers(struct sock *sk); 74 + 75 + extern struct request_sock *inet_csk_search_req(const struct sock *sk, 76 + struct request_sock ***prevp, 77 + const __u16 rport, 78 + const __u32 raddr, 79 + const __u32 laddr); 80 + extern int inet_csk_get_port(struct inet_hashinfo *hashinfo, 81 + struct sock *sk, unsigned short snum); 82 + 83 + extern struct dst_entry* inet_csk_route_req(struct sock *sk, 84 + const struct request_sock *req); 85 + 86 + #endif /* _INET_CONNECTION_SOCK_H */
+3 -3
include/net/inet_hashtables.h
··· 17 17 #include <linux/config.h> 18 18 19 19 #include <linux/interrupt.h> 20 - #include <linux/ip.h> 21 20 #include <linux/ipv6.h> 22 21 #include <linux/list.h> 23 22 #include <linux/slab.h> ··· 25 26 #include <linux/types.h> 26 27 #include <linux/wait.h> 27 28 29 + #include <net/inet_connection_sock.h> 28 30 #include <net/sock.h> 29 31 #include <net/tcp_states.h> 30 32 ··· 185 185 struct inet_bind_bucket *tb; 186 186 187 187 spin_lock(&head->lock); 188 - tb = inet_sk(sk)->bind_hash; 188 + tb = inet_csk(sk)->icsk_bind_hash; 189 189 sk_add_bind_node(child, &tb->owners); 190 - inet_sk(child)->bind_hash = tb; 190 + inet_csk(child)->icsk_bind_hash = tb; 191 191 spin_unlock(&head->lock); 192 192 } 193 193
+3 -3
include/net/request_sock.h
··· 224 224 return prev_qlen; 225 225 } 226 226 227 - static inline int reqsk_queue_len(struct request_sock_queue *queue) 227 + static inline int reqsk_queue_len(const struct request_sock_queue *queue) 228 228 { 229 229 return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0; 230 230 } 231 231 232 - static inline int reqsk_queue_len_young(struct request_sock_queue *queue) 232 + static inline int reqsk_queue_len_young(const struct request_sock_queue *queue) 233 233 { 234 234 return queue->listen_opt->qlen_young; 235 235 } 236 236 237 - static inline int reqsk_queue_is_full(struct request_sock_queue *queue) 237 + static inline int reqsk_queue_is_full(const struct request_sock_queue *queue) 238 238 { 239 239 return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log; 240 240 }
-3
include/net/sock.h
··· 493 493 494 494 struct request_sock_ops; 495 495 496 - /* Here is the right place to enable sock refcounting debugging */ 497 - //#define SOCK_REFCNT_DEBUG 498 - 499 496 /* Networking protocol blocks we attach to sockets. 500 497 * socket layer -> transport layer interface 501 498 * transport -> network interface is defined by struct inet_proto
+111 -111
include/net/tcp.h
··· 19 19 #define _TCP_H 20 20 21 21 #define TCP_DEBUG 1 22 + #define INET_CSK_DEBUG 1 22 23 #define FASTRETRANS_DEBUG 1 23 24 24 25 /* Cancel timers, when they are not required. */ 25 - #undef TCP_CLEAR_TIMERS 26 + #undef INET_CSK_CLEAR_TIMERS 26 27 27 28 #include <linux/config.h> 28 29 #include <linux/list.h> ··· 206 205 #define TCPOLEN_SACK_BASE_ALIGNED 4 207 206 #define TCPOLEN_SACK_PERBLOCK 8 208 207 209 - #define TCP_TIME_RETRANS 1 /* Retransmit timer */ 210 - #define TCP_TIME_DACK 2 /* Delayed ack timer */ 211 - #define TCP_TIME_PROBE0 3 /* Zero window probe timer */ 212 - #define TCP_TIME_KEEPOPEN 4 /* Keepalive timer */ 208 + #define ICSK_TIME_RETRANS 1 /* Retransmit timer */ 209 + #define ICSK_TIME_DACK 2 /* Delayed ack timer */ 210 + #define ICSK_TIME_PROBE0 3 /* Zero window probe timer */ 211 + #define ICSK_TIME_KEEPOPEN 4 /* Keepalive timer */ 213 212 214 213 /* Flags in tp->nonagle */ 215 214 #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ ··· 258 257 extern int tcp_memory_pressure; 259 258 260 259 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 261 - #define TCP_INET_FAMILY(fam) ((fam) == AF_INET) 260 + #define AF_INET_FAMILY(fam) ((fam) == AF_INET) 262 261 #else 263 - #define TCP_INET_FAMILY(fam) 1 262 + #define AF_INET_FAMILY(fam) 1 264 263 #endif 265 264 266 265 /* ··· 373 372 374 373 extern void tcp_rcv_space_adjust(struct sock *sk); 375 374 376 - enum tcp_ack_state_t 377 - { 378 - TCP_ACK_SCHED = 1, 379 - TCP_ACK_TIMER = 2, 380 - TCP_ACK_PUSHED= 4 375 + enum inet_csk_ack_state_t { 376 + ICSK_ACK_SCHED = 1, 377 + ICSK_ACK_TIMER = 2, 378 + ICSK_ACK_PUSHED = 4 381 379 }; 382 380 383 - static inline void tcp_schedule_ack(struct tcp_sock *tp) 381 + static inline void inet_csk_schedule_ack(struct sock *sk) 384 382 { 385 - tp->ack.pending |= TCP_ACK_SCHED; 383 + inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED; 386 384 } 387 385 388 - static inline int tcp_ack_scheduled(struct tcp_sock *tp) 386 + static inline int inet_csk_ack_scheduled(const struct sock *sk) 389 387 { 390 - return tp->ack.pending&TCP_ACK_SCHED; 388 + return inet_csk(sk)->icsk_ack.pending & ICSK_ACK_SCHED; 391 389 } 392 390 393 - static __inline__ void tcp_dec_quickack_mode(struct tcp_sock *tp, unsigned int pkts) 391 + static inline void tcp_dec_quickack_mode(struct sock *sk, 392 + const unsigned int pkts) 394 393 { 395 - if (tp->ack.quick) { 396 - if (pkts >= tp->ack.quick) { 397 - tp->ack.quick = 0; 394 + struct inet_connection_sock *icsk = inet_csk(sk); 398 395 396 + if (icsk->icsk_ack.quick) { 397 + if (pkts >= icsk->icsk_ack.quick) { 398 + icsk->icsk_ack.quick = 0; 399 399 /* Leaving quickack mode we deflate ATO. */ 400 - tp->ack.ato = TCP_ATO_MIN; 400 + icsk->icsk_ack.ato = TCP_ATO_MIN; 401 401 } else 402 - tp->ack.quick -= pkts; 402 + icsk->icsk_ack.quick -= pkts; 403 403 } 404 404 } 405 405 406 - extern void tcp_enter_quickack_mode(struct tcp_sock *tp); 406 + extern void tcp_enter_quickack_mode(struct sock *sk); 407 407 408 - static __inline__ void tcp_delack_init(struct tcp_sock *tp) 408 + static inline void inet_csk_delack_init(struct sock *sk) 409 409 { 410 - memset(&tp->ack, 0, sizeof(tp->ack)); 410 + memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack)); 411 411 } 412 412 413 413 static inline void tcp_clear_options(struct tcp_options_received *rx_opt) ··· 442 440 443 441 extern void tcp_close(struct sock *sk, 444 442 long timeout); 445 - extern struct sock * tcp_accept(struct sock *sk, int flags, int *err); 443 + extern struct sock * inet_csk_accept(struct sock *sk, int flags, int *err); 446 444 extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait); 447 445 448 446 extern int tcp_getsockopt(struct sock *sk, int level, ··· 536 534 537 535 /* tcp_timer.c */ 538 536 extern void tcp_init_xmit_timers(struct sock *); 539 - extern void tcp_clear_xmit_timers(struct sock *); 537 + static inline void tcp_clear_xmit_timers(struct sock *sk) 538 + { 539 + inet_csk_clear_xmit_timers(sk); 540 + } 540 541 541 - extern void tcp_delete_keepalive_timer(struct sock *); 542 - extern void tcp_reset_keepalive_timer(struct sock *, unsigned long); 542 + extern void inet_csk_delete_keepalive_timer(struct sock *sk); 543 + extern void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long timeout); 543 544 extern unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu); 544 545 extern unsigned int tcp_current_mss(struct sock *sk, int large); 545 546 546 - #ifdef TCP_DEBUG 547 - extern const char tcp_timer_bug_msg[]; 547 + #ifdef INET_CSK_DEBUG 548 + extern const char inet_csk_timer_bug_msg[]; 548 549 #endif 549 550 550 551 /* tcp_diag.c */ ··· 559 554 extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, 560 555 sk_read_actor_t recv_actor); 561 556 562 - static inline void tcp_clear_xmit_timer(struct sock *sk, int what) 557 + static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what) 563 558 { 564 - struct tcp_sock *tp = tcp_sk(sk); 559 + struct inet_connection_sock *icsk = inet_csk(sk); 565 560 566 - switch (what) { 567 - case TCP_TIME_RETRANS: 568 - case TCP_TIME_PROBE0: 569 - tp->pending = 0; 570 - 571 - #ifdef TCP_CLEAR_TIMERS 572 - sk_stop_timer(sk, &tp->retransmit_timer); 561 + if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) { 562 + icsk->icsk_pending = 0; 563 + #ifdef INET_CSK_CLEAR_TIMERS 564 + sk_stop_timer(sk, &icsk->icsk_retransmit_timer); 573 565 #endif 574 - break; 575 - case TCP_TIME_DACK: 576 - tp->ack.blocked = 0; 577 - tp->ack.pending = 0; 578 - 579 - #ifdef TCP_CLEAR_TIMERS 580 - sk_stop_timer(sk, &tp->delack_timer); 566 + } else if (what == ICSK_TIME_DACK) { 567 + icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0; 568 + #ifdef INET_CSK_CLEAR_TIMERS 569 + sk_stop_timer(sk, &icsk->icsk_delack_timer); 581 570 #endif 582 - break; 583 - default: 584 - #ifdef TCP_DEBUG 585 - printk(tcp_timer_bug_msg); 571 + } 572 + #ifdef INET_CSK_DEBUG 573 + else { 574 + pr_debug(inet_csk_timer_bug_msg); 575 + } 586 576 #endif 587 - return; 588 - }; 589 - 590 577 } 591 578 592 579 /* 593 580 * Reset the retransmission timer 594 581 */ 595 - static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when) 582 + static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, 583 + unsigned long when) 596 584 { 597 - struct tcp_sock *tp = tcp_sk(sk); 585 + struct inet_connection_sock *icsk = inet_csk(sk); 598 586 599 587 if (when > TCP_RTO_MAX) { 600 - #ifdef TCP_DEBUG 601 - printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr()); 588 + #ifdef INET_CSK_DEBUG 589 + pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n", 590 + sk, what, when, current_text_addr()); 602 591 #endif 603 592 when = TCP_RTO_MAX; 604 593 } 605 594 606 - switch (what) { 607 - case TCP_TIME_RETRANS: 608 - case TCP_TIME_PROBE0: 609 - tp->pending = what; 610 - tp->timeout = jiffies+when; 611 - sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout); 612 - break; 613 - 614 - case TCP_TIME_DACK: 615 - tp->ack.pending |= TCP_ACK_TIMER; 616 - tp->ack.timeout = jiffies+when; 617 - sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout); 618 - break; 619 - 620 - default: 621 - #ifdef TCP_DEBUG 622 - printk(tcp_timer_bug_msg); 595 + if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) { 596 + icsk->icsk_pending = what; 597 + icsk->icsk_timeout = jiffies + when; 598 + sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); 599 + } else if (what == ICSK_TIME_DACK) { 600 + icsk->icsk_ack.pending |= ICSK_ACK_TIMER; 601 + icsk->icsk_ack.timeout = jiffies + when; 602 + sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); 603 + } 604 + #ifdef INET_CSK_DEBUG 605 + else { 606 + pr_debug(inet_csk_timer_bug_msg); 607 + } 623 608 #endif 624 - return; 625 - }; 626 609 } 627 610 628 611 /* Initialize RCV_MSS value. ··· 630 637 hint = min(hint, TCP_MIN_RCVMSS); 631 638 hint = max(hint, TCP_MIN_MSS); 632 639 633 - tp->ack.rcv_mss = hint; 640 + inet_csk(sk)->icsk_ack.rcv_mss = hint; 634 641 } 635 642 636 643 static __inline__ void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd) ··· 765 772 766 773 tp->packets_out += tcp_skb_pcount(skb); 767 774 if (!orig) 768 - tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); 775 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto); 769 776 } 770 777 771 778 static inline void tcp_packets_out_dec(struct tcp_sock *tp, ··· 932 939 933 940 static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) 934 941 { 935 - if (!tp->packets_out && !tp->pending) 936 - tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto); 942 + const struct inet_connection_sock *icsk = inet_csk(sk); 943 + if (!tp->packets_out && !icsk->icsk_pending) 944 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, icsk->icsk_rto); 937 945 } 938 946 939 947 static __inline__ void tcp_push_pending_frames(struct sock *sk, ··· 1015 1021 tp->ucopy.memory = 0; 1016 1022 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { 1017 1023 wake_up_interruptible(sk->sk_sleep); 1018 - if (!tcp_ack_scheduled(tp)) 1019 - tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4); 1024 + if (!inet_csk_ack_scheduled(sk)) 1025 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 1026 + (3 * TCP_RTO_MIN) / 4); 1020 1027 } 1021 1028 return 1; 1022 1029 } ··· 1050 1055 TCP_INC_STATS(TCP_MIB_ESTABRESETS); 1051 1056 1052 1057 sk->sk_prot->unhash(sk); 1053 - if (inet_sk(sk)->bind_hash && 1058 + if (inet_csk(sk)->icsk_bind_hash && 1054 1059 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) 1055 1060 inet_put_port(&tcp_hashinfo, sk); 1056 1061 /* fall through */ ··· 1181 1186 return tcp_win_from_space(sk->sk_rcvbuf); 1182 1187 } 1183 1188 1184 - static inline void tcp_acceptq_queue(struct sock *sk, struct request_sock *req, 1185 - struct sock *child) 1189 + static inline void inet_csk_reqsk_queue_add(struct sock *sk, 1190 + struct request_sock *req, 1191 + struct sock *child) 1186 1192 { 1187 - reqsk_queue_add(&tcp_sk(sk)->accept_queue, req, sk, child); 1193 + reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child); 1188 1194 } 1189 1195 1190 - static inline void 1191 - tcp_synq_removed(struct sock *sk, struct request_sock *req) 1196 + static inline void inet_csk_reqsk_queue_removed(struct sock *sk, 1197 + struct request_sock *req) 1192 1198 { 1193 - if (reqsk_queue_removed(&tcp_sk(sk)->accept_queue, req) == 0) 1194 - tcp_delete_keepalive_timer(sk); 1199 + if (reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req) == 0) 1200 + inet_csk_delete_keepalive_timer(sk); 1195 1201 } 1196 1202 1197 - static inline void tcp_synq_added(struct sock *sk) 1203 + static inline void inet_csk_reqsk_queue_added(struct sock *sk, 1204 + const unsigned long timeout) 1198 1205 { 1199 - if (reqsk_queue_added(&tcp_sk(sk)->accept_queue) == 0) 1200 - tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT); 1206 + if (reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue) == 0) 1207 + inet_csk_reset_keepalive_timer(sk, timeout); 1201 1208 } 1202 1209 1203 - static inline int tcp_synq_len(struct sock *sk) 1210 + static inline int inet_csk_reqsk_queue_len(const struct sock *sk) 1204 1211 { 1205 - return reqsk_queue_len(&tcp_sk(sk)->accept_queue); 1212 + return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue); 1206 1213 } 1207 1214 1208 - static inline int tcp_synq_young(struct sock *sk) 1215 + static inline int inet_csk_reqsk_queue_young(const struct sock *sk) 1209 1216 { 1210 - return reqsk_queue_len_young(&tcp_sk(sk)->accept_queue); 1217 + return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue); 1211 1218 } 1212 1219 1213 - static inline int tcp_synq_is_full(struct sock *sk) 1220 + static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk) 1214 1221 { 1215 - return reqsk_queue_is_full(&tcp_sk(sk)->accept_queue); 1222 + return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue); 1216 1223 } 1217 1224 1218 - static inline void tcp_synq_unlink(struct tcp_sock *tp, struct request_sock *req, 1219 - struct request_sock **prev) 1225 + static inline void inet_csk_reqsk_queue_unlink(struct sock *sk, 1226 + struct request_sock *req, 1227 + struct request_sock **prev) 1220 1228 { 1221 - reqsk_queue_unlink(&tp->accept_queue, req, prev); 1229 + reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req, prev); 1222 1230 } 1223 1231 1224 - static inline void tcp_synq_drop(struct sock *sk, struct request_sock *req, 1225 - struct request_sock **prev) 1232 + static inline void inet_csk_reqsk_queue_drop(struct sock *sk, 1233 + struct request_sock *req, 1234 + struct request_sock **prev) 1226 1235 { 1227 - tcp_synq_unlink(tcp_sk(sk), req, prev); 1228 - tcp_synq_removed(sk, req); 1236 + inet_csk_reqsk_queue_unlink(sk, req, prev); 1237 + inet_csk_reqsk_queue_removed(sk, req); 1229 1238 reqsk_free(req); 1230 1239 } 1231 1240 ··· 1264 1265 return tp->keepalive_time ? : sysctl_tcp_keepalive_time; 1265 1266 } 1266 1267 1267 - static inline int tcp_fin_time(const struct tcp_sock *tp) 1268 + static inline int tcp_fin_time(const struct sock *sk) 1268 1269 { 1269 - int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout; 1270 + int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout; 1271 + const int rto = inet_csk(sk)->icsk_rto; 1270 1272 1271 - if (fin_timeout < (tp->rto<<2) - (tp->rto>>1)) 1272 - fin_timeout = (tp->rto<<2) - (tp->rto>>1); 1273 + if (fin_timeout < (rto << 2) - (rto >> 1)) 1274 + fin_timeout = (rto << 2) - (rto >> 1); 1273 1275 1274 1276 return fin_timeout; 1275 1277 }
+1 -1
include/net/tcp_ecn.h
··· 88 88 * it is surely retransmit. It is not in ECN RFC, 89 89 * but Linux follows this rule. */ 90 90 else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags))) 91 - tcp_enter_quickack_mode(tp); 91 + tcp_enter_quickack_mode((struct sock *)tp); 92 92 } 93 93 } 94 94
+7 -8
net/ipv4/inet_hashtables.c
··· 19 19 #include <linux/slab.h> 20 20 #include <linux/wait.h> 21 21 22 + #include <net/inet_connection_sock.h> 22 23 #include <net/inet_hashtables.h> 23 24 24 25 /* ··· 57 56 void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, 58 57 const unsigned short snum) 59 58 { 60 - struct inet_sock *inet = inet_sk(sk); 61 - inet->num = snum; 59 + inet_sk(sk)->num = snum; 62 60 sk_add_bind_node(sk, &tb->owners); 63 - inet->bind_hash = tb; 61 + inet_csk(sk)->icsk_bind_hash = tb; 64 62 } 65 63 66 64 EXPORT_SYMBOL(inet_bind_hash); ··· 69 69 */ 70 70 static void __inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk) 71 71 { 72 - struct inet_sock *inet = inet_sk(sk); 73 - const int bhash = inet_bhashfn(inet->num, hashinfo->bhash_size); 72 + const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size); 74 73 struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash]; 75 74 struct inet_bind_bucket *tb; 76 75 77 76 spin_lock(&head->lock); 78 - tb = inet->bind_hash; 77 + tb = inet_csk(sk)->icsk_bind_hash; 79 78 __sk_del_bind_node(sk); 80 - inet->bind_hash = NULL; 81 - inet->num = 0; 79 + inet_csk(sk)->icsk_bind_hash = NULL; 80 + inet_sk(sk)->num = 0; 82 81 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); 83 82 spin_unlock(&head->lock); 84 83 }
+3 -2
net/ipv4/inet_timewait_sock.c
··· 56 56 struct inet_hashinfo *hashinfo) 57 57 { 58 58 const struct inet_sock *inet = inet_sk(sk); 59 + const struct inet_connection_sock *icsk = inet_csk(sk); 59 60 struct inet_ehash_bucket *ehead = &hashinfo->ehash[sk->sk_hashent]; 60 61 struct inet_bind_hashbucket *bhead; 61 62 /* Step 1: Put TW into bind hash. Original socket stays there too. ··· 65 64 */ 66 65 bhead = &hashinfo->bhash[inet_bhashfn(inet->num, hashinfo->bhash_size)]; 67 66 spin_lock(&bhead->lock); 68 - tw->tw_tb = inet->bind_hash; 69 - BUG_TRAP(inet->bind_hash); 67 + tw->tw_tb = icsk->icsk_bind_hash; 68 + BUG_TRAP(icsk->icsk_bind_hash); 70 69 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); 71 70 spin_unlock(&bhead->lock); 72 71
+1 -1
net/ipv4/syncookies.c
··· 180 180 181 181 child = tp->af_specific->syn_recv_sock(sk, skb, req, dst); 182 182 if (child) 183 - tcp_acceptq_queue(sk, req, child); 183 + inet_csk_reqsk_queue_add(sk, req, child); 184 184 else 185 185 reqsk_free(req); 186 186
+47 -43
net/ipv4/tcp.c
··· 313 313 static __inline__ unsigned int tcp_listen_poll(struct sock *sk, 314 314 poll_table *wait) 315 315 { 316 - return !reqsk_queue_empty(&tcp_sk(sk)->accept_queue) ? (POLLIN | POLLRDNORM) : 0; 316 + return !reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue) ? (POLLIN | POLLRDNORM) : 0; 317 317 } 318 318 319 319 /* ··· 458 458 int tcp_listen_start(struct sock *sk) 459 459 { 460 460 struct inet_sock *inet = inet_sk(sk); 461 - struct tcp_sock *tp = tcp_sk(sk); 462 - int rc = reqsk_queue_alloc(&tp->accept_queue, TCP_SYNQ_HSIZE); 461 + struct inet_connection_sock *icsk = inet_csk(sk); 462 + int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, TCP_SYNQ_HSIZE); 463 463 464 464 if (rc != 0) 465 465 return rc; 466 466 467 467 sk->sk_max_ack_backlog = 0; 468 468 sk->sk_ack_backlog = 0; 469 - tcp_delack_init(tp); 469 + inet_csk_delack_init(sk); 470 470 471 471 /* There is race window here: we announce ourselves listening, 472 472 * but this transition is still not validated by get_port(). ··· 484 484 } 485 485 486 486 sk->sk_state = TCP_CLOSE; 487 - __reqsk_queue_destroy(&tp->accept_queue); 487 + __reqsk_queue_destroy(&icsk->icsk_accept_queue); 488 488 return -EADDRINUSE; 489 489 } 490 490 ··· 495 495 496 496 static void tcp_listen_stop (struct sock *sk) 497 497 { 498 - struct tcp_sock *tp = tcp_sk(sk); 498 + struct inet_connection_sock *icsk = inet_csk(sk); 499 499 struct request_sock *acc_req; 500 500 struct request_sock *req; 501 501 502 - tcp_delete_keepalive_timer(sk); 502 + inet_csk_delete_keepalive_timer(sk); 503 503 504 504 /* make all the listen_opt local to us */ 505 - acc_req = reqsk_queue_yank_acceptq(&tp->accept_queue); 505 + acc_req = reqsk_queue_yank_acceptq(&icsk->icsk_accept_queue); 506 506 507 507 /* Following specs, it would be better either to send FIN 508 508 * (and enter FIN-WAIT-1, it is normal close) ··· 512 512 * To be honest, we are not able to make either 513 513 * of the variants now. --ANK 514 514 */ 515 - reqsk_queue_destroy(&tp->accept_queue); 515 + reqsk_queue_destroy(&icsk->icsk_accept_queue); 516 516 517 517 while ((req = acc_req) != NULL) { 518 518 struct sock *child = req->sk; ··· 1039 1039 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)); 1040 1040 #endif 1041 1041 1042 - if (tcp_ack_scheduled(tp)) { 1042 + if (inet_csk_ack_scheduled(sk)) { 1043 + const struct inet_connection_sock *icsk = inet_csk(sk); 1043 1044 /* Delayed ACKs frequently hit locked sockets during bulk 1044 1045 * receive. */ 1045 - if (tp->ack.blocked || 1046 + if (icsk->icsk_ack.blocked || 1046 1047 /* Once-per-two-segments ACK was not sent by tcp_input.c */ 1047 - tp->rcv_nxt - tp->rcv_wup > tp->ack.rcv_mss || 1048 + tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || 1048 1049 /* 1049 1050 * If this read emptied read buffer, we send ACK, if 1050 1051 * connection is not bidirectional, user drained 1051 1052 * receive buffer and there was a small segment 1052 1053 * in queue. 1053 1054 */ 1054 - (copied > 0 && (tp->ack.pending & TCP_ACK_PUSHED) && 1055 - !tp->ack.pingpong && !atomic_read(&sk->sk_rmem_alloc))) 1055 + (copied > 0 && (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && 1056 + !icsk->icsk_ack.pingpong && !atomic_read(&sk->sk_rmem_alloc))) 1056 1057 time_to_ack = 1; 1057 1058 } 1058 1059 ··· 1570 1569 BUG_TRAP(sk_unhashed(sk)); 1571 1570 1572 1571 /* If it has not 0 inet_sk(sk)->num, it must be bound */ 1573 - BUG_TRAP(!inet_sk(sk)->num || inet_sk(sk)->bind_hash); 1572 + BUG_TRAP(!inet_sk(sk)->num || inet_csk(sk)->icsk_bind_hash); 1574 1573 1575 1574 sk->sk_prot->destroy(sk); 1576 1575 ··· 1699 1698 tcp_send_active_reset(sk, GFP_ATOMIC); 1700 1699 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER); 1701 1700 } else { 1702 - int tmo = tcp_fin_time(tp); 1701 + const int tmo = tcp_fin_time(sk); 1703 1702 1704 1703 if (tmo > TCP_TIMEWAIT_LEN) { 1705 - tcp_reset_keepalive_timer(sk, tcp_fin_time(tp)); 1704 + inet_csk_reset_keepalive_timer(sk, tcp_fin_time(sk)); 1706 1705 } else { 1707 1706 atomic_inc(&tcp_orphan_count); 1708 1707 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); ··· 1747 1746 int tcp_disconnect(struct sock *sk, int flags) 1748 1747 { 1749 1748 struct inet_sock *inet = inet_sk(sk); 1749 + struct inet_connection_sock *icsk = inet_csk(sk); 1750 1750 struct tcp_sock *tp = tcp_sk(sk); 1751 1751 int err = 0; 1752 1752 int old_state = sk->sk_state; ··· 1784 1782 tp->srtt = 0; 1785 1783 if ((tp->write_seq += tp->max_window + 2) == 0) 1786 1784 tp->write_seq = 1; 1787 - tp->backoff = 0; 1785 + icsk->icsk_backoff = 0; 1788 1786 tp->snd_cwnd = 2; 1789 1787 tp->probes_out = 0; 1790 1788 tp->packets_out = 0; ··· 1792 1790 tp->snd_cwnd_cnt = 0; 1793 1791 tcp_set_ca_state(tp, TCP_CA_Open); 1794 1792 tcp_clear_retrans(tp); 1795 - tcp_delack_init(tp); 1793 + inet_csk_delack_init(sk); 1796 1794 sk->sk_send_head = NULL; 1797 1795 tp->rx_opt.saw_tstamp = 0; 1798 1796 tcp_sack_reset(&tp->rx_opt); 1799 1797 __sk_dst_reset(sk); 1800 1798 1801 - BUG_TRAP(!inet->num || inet->bind_hash); 1799 + BUG_TRAP(!inet->num || icsk->icsk_bind_hash); 1802 1800 1803 1801 sk->sk_error_report(sk); 1804 1802 return err; ··· 1810 1808 */ 1811 1809 static int wait_for_connect(struct sock *sk, long timeo) 1812 1810 { 1813 - struct tcp_sock *tp = tcp_sk(sk); 1811 + struct inet_connection_sock *icsk = inet_csk(sk); 1814 1812 DEFINE_WAIT(wait); 1815 1813 int err; 1816 1814 ··· 1832 1830 prepare_to_wait_exclusive(sk->sk_sleep, &wait, 1833 1831 TASK_INTERRUPTIBLE); 1834 1832 release_sock(sk); 1835 - if (reqsk_queue_empty(&tp->accept_queue)) 1833 + if (reqsk_queue_empty(&icsk->icsk_accept_queue)) 1836 1834 timeo = schedule_timeout(timeo); 1837 1835 lock_sock(sk); 1838 1836 err = 0; 1839 - if (!reqsk_queue_empty(&tp->accept_queue)) 1837 + if (!reqsk_queue_empty(&icsk->icsk_accept_queue)) 1840 1838 break; 1841 1839 err = -EINVAL; 1842 1840 if (sk->sk_state != TCP_LISTEN) ··· 1856 1854 * This will accept the next outstanding connection. 1857 1855 */ 1858 1856 1859 - struct sock *tcp_accept(struct sock *sk, int flags, int *err) 1857 + struct sock *inet_csk_accept(struct sock *sk, int flags, int *err) 1860 1858 { 1861 - struct tcp_sock *tp = tcp_sk(sk); 1859 + struct inet_connection_sock *icsk = inet_csk(sk); 1862 1860 struct sock *newsk; 1863 1861 int error; 1864 1862 ··· 1872 1870 goto out_err; 1873 1871 1874 1872 /* Find already established connection */ 1875 - if (reqsk_queue_empty(&tp->accept_queue)) { 1873 + if (reqsk_queue_empty(&icsk->icsk_accept_queue)) { 1876 1874 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 1877 1875 1878 1876 /* If this is a non blocking socket don't sleep */ ··· 1885 1883 goto out_err; 1886 1884 } 1887 1885 1888 - newsk = reqsk_queue_get_child(&tp->accept_queue, sk); 1886 + newsk = reqsk_queue_get_child(&icsk->icsk_accept_queue, sk); 1889 1887 BUG_TRAP(newsk->sk_state != TCP_SYN_RECV); 1890 1888 out: 1891 1889 release_sock(sk); ··· 1903 1901 int optlen) 1904 1902 { 1905 1903 struct tcp_sock *tp = tcp_sk(sk); 1904 + struct inet_connection_sock *icsk = inet_csk(sk); 1906 1905 int val; 1907 1906 int err = 0; 1908 1907 ··· 2002 1999 elapsed = tp->keepalive_time - elapsed; 2003 2000 else 2004 2001 elapsed = 0; 2005 - tcp_reset_keepalive_timer(sk, elapsed); 2002 + inet_csk_reset_keepalive_timer(sk, elapsed); 2006 2003 } 2007 2004 } 2008 2005 break; ··· 2022 2019 if (val < 1 || val > MAX_TCP_SYNCNT) 2023 2020 err = -EINVAL; 2024 2021 else 2025 - tp->syn_retries = val; 2022 + icsk->icsk_syn_retries = val; 2026 2023 break; 2027 2024 2028 2025 case TCP_LINGER2: ··· 2061 2058 2062 2059 case TCP_QUICKACK: 2063 2060 if (!val) { 2064 - tp->ack.pingpong = 1; 2061 + icsk->icsk_ack.pingpong = 1; 2065 2062 } else { 2066 - tp->ack.pingpong = 0; 2063 + icsk->icsk_ack.pingpong = 0; 2067 2064 if ((1 << sk->sk_state) & 2068 2065 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && 2069 - tcp_ack_scheduled(tp)) { 2070 - tp->ack.pending |= TCP_ACK_PUSHED; 2066 + inet_csk_ack_scheduled(sk)) { 2067 + icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; 2071 2068 cleanup_rbuf(sk, 1); 2072 2069 if (!(val & 1)) 2073 - tp->ack.pingpong = 1; 2070 + icsk->icsk_ack.pingpong = 1; 2074 2071 } 2075 2072 } 2076 2073 break; ··· 2087 2084 void tcp_get_info(struct sock *sk, struct tcp_info *info) 2088 2085 { 2089 2086 struct tcp_sock *tp = tcp_sk(sk); 2087 + const struct inet_connection_sock *icsk = inet_csk(sk); 2090 2088 u32 now = tcp_time_stamp; 2091 2089 2092 2090 memset(info, 0, sizeof(*info)); 2093 2091 2094 2092 info->tcpi_state = sk->sk_state; 2095 2093 info->tcpi_ca_state = tp->ca_state; 2096 - info->tcpi_retransmits = tp->retransmits; 2094 + info->tcpi_retransmits = icsk->icsk_retransmits; 2097 2095 info->tcpi_probes = tp->probes_out; 2098 - info->tcpi_backoff = tp->backoff; 2096 + info->tcpi_backoff = icsk->icsk_backoff; 2099 2097 2100 2098 if (tp->rx_opt.tstamp_ok) 2101 2099 info->tcpi_options |= TCPI_OPT_TIMESTAMPS; ··· 2111 2107 if (tp->ecn_flags&TCP_ECN_OK) 2112 2108 info->tcpi_options |= TCPI_OPT_ECN; 2113 2109 2114 - info->tcpi_rto = jiffies_to_usecs(tp->rto); 2115 - info->tcpi_ato = jiffies_to_usecs(tp->ack.ato); 2110 + info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); 2111 + info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); 2116 2112 info->tcpi_snd_mss = tp->mss_cache; 2117 - info->tcpi_rcv_mss = tp->ack.rcv_mss; 2113 + info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; 2118 2114 2119 2115 info->tcpi_unacked = tp->packets_out; 2120 2116 info->tcpi_sacked = tp->sacked_out; ··· 2123 2119 info->tcpi_fackets = tp->fackets_out; 2124 2120 2125 2121 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); 2126 - info->tcpi_last_data_recv = jiffies_to_msecs(now - tp->ack.lrcvtime); 2122 + info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); 2127 2123 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); 2128 2124 2129 2125 info->tcpi_pmtu = tp->pmtu_cookie; ··· 2183 2179 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes; 2184 2180 break; 2185 2181 case TCP_SYNCNT: 2186 - val = tp->syn_retries ? : sysctl_tcp_syn_retries; 2182 + val = inet_csk(sk)->icsk_syn_retries ? : sysctl_tcp_syn_retries; 2187 2183 break; 2188 2184 case TCP_LINGER2: 2189 2185 val = tp->linger2; ··· 2213 2209 return 0; 2214 2210 } 2215 2211 case TCP_QUICKACK: 2216 - val = !tp->ack.pingpong; 2212 + val = !inet_csk(sk)->icsk_ack.pingpong; 2217 2213 break; 2218 2214 2219 2215 case TCP_CONGESTION: ··· 2344 2340 tcp_register_congestion_control(&tcp_reno); 2345 2341 } 2346 2342 2347 - EXPORT_SYMBOL(tcp_accept); 2343 + EXPORT_SYMBOL(inet_csk_accept); 2348 2344 EXPORT_SYMBOL(tcp_close); 2349 2345 EXPORT_SYMBOL(tcp_destroy_sock); 2350 2346 EXPORT_SYMBOL(tcp_disconnect);
+11 -10
net/ipv4/tcp_diag.c
··· 48 48 static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, 49 49 int ext, u32 pid, u32 seq, u16 nlmsg_flags) 50 50 { 51 - struct inet_sock *inet = inet_sk(sk); 51 + const struct inet_sock *inet = inet_sk(sk); 52 52 struct tcp_sock *tp = tcp_sk(sk); 53 + const struct inet_connection_sock *icsk = inet_csk(sk); 53 54 struct tcpdiagmsg *r; 54 55 struct nlmsghdr *nlh; 55 56 struct tcp_info *info = NULL; ··· 130 129 131 130 #define EXPIRES_IN_MS(tmo) ((tmo-jiffies)*1000+HZ-1)/HZ 132 131 133 - if (tp->pending == TCP_TIME_RETRANS) { 132 + if (icsk->icsk_pending == ICSK_TIME_RETRANS) { 134 133 r->tcpdiag_timer = 1; 135 - r->tcpdiag_retrans = tp->retransmits; 136 - r->tcpdiag_expires = EXPIRES_IN_MS(tp->timeout); 137 - } else if (tp->pending == TCP_TIME_PROBE0) { 134 + r->tcpdiag_retrans = icsk->icsk_retransmits; 135 + r->tcpdiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); 136 + } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { 138 137 r->tcpdiag_timer = 4; 139 138 r->tcpdiag_retrans = tp->probes_out; 140 - r->tcpdiag_expires = EXPIRES_IN_MS(tp->timeout); 139 + r->tcpdiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); 141 140 } else if (timer_pending(&sk->sk_timer)) { 142 141 r->tcpdiag_timer = 2; 143 142 r->tcpdiag_retrans = tp->probes_out; ··· 498 497 { 499 498 struct tcpdiag_entry entry; 500 499 struct tcpdiagreq *r = NLMSG_DATA(cb->nlh); 501 - struct tcp_sock *tp = tcp_sk(sk); 500 + struct inet_connection_sock *icsk = inet_csk(sk); 502 501 struct listen_sock *lopt; 503 502 struct rtattr *bc = NULL; 504 503 struct inet_sock *inet = inet_sk(sk); ··· 514 513 515 514 entry.family = sk->sk_family; 516 515 517 - read_lock_bh(&tp->accept_queue.syn_wait_lock); 516 + read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 518 517 519 - lopt = tp->accept_queue.listen_opt; 518 + lopt = icsk->icsk_accept_queue.listen_opt; 520 519 if (!lopt || !lopt->qlen) 521 520 goto out; 522 521 ··· 573 572 } 574 573 575 574 out: 576 - read_unlock_bh(&tp->accept_queue.syn_wait_lock); 575 + read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 577 576 578 577 return err; 579 578 }
+138 -128
net/ipv4/tcp_input.c
··· 114 114 /* Adapt the MSS value used to make delayed ack decision to the 115 115 * real world. 116 116 */ 117 - static inline void tcp_measure_rcv_mss(struct tcp_sock *tp, 118 - struct sk_buff *skb) 117 + static inline void tcp_measure_rcv_mss(struct sock *sk, 118 + const struct sk_buff *skb) 119 119 { 120 - unsigned int len, lss; 120 + struct inet_connection_sock *icsk = inet_csk(sk); 121 + const unsigned int lss = icsk->icsk_ack.last_seg_size; 122 + unsigned int len; 121 123 122 - lss = tp->ack.last_seg_size; 123 - tp->ack.last_seg_size = 0; 124 + icsk->icsk_ack.last_seg_size = 0; 124 125 125 126 /* skb->len may jitter because of SACKs, even if peer 126 127 * sends good full-sized frames. 127 128 */ 128 129 len = skb->len; 129 - if (len >= tp->ack.rcv_mss) { 130 - tp->ack.rcv_mss = len; 130 + if (len >= icsk->icsk_ack.rcv_mss) { 131 + icsk->icsk_ack.rcv_mss = len; 131 132 } else { 132 133 /* Otherwise, we make more careful check taking into account, 133 134 * that SACKs block is variable. ··· 148 147 * tcp header plus fixed timestamp option length. 149 148 * Resulting "len" is MSS free of SACK jitter. 150 149 */ 151 - len -= tp->tcp_header_len; 152 - tp->ack.last_seg_size = len; 150 + len -= tcp_sk(sk)->tcp_header_len; 151 + icsk->icsk_ack.last_seg_size = len; 153 152 if (len == lss) { 154 - tp->ack.rcv_mss = len; 153 + icsk->icsk_ack.rcv_mss = len; 155 154 return; 156 155 } 157 156 } 158 - tp->ack.pending |= TCP_ACK_PUSHED; 157 + icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; 159 158 } 160 159 } 161 160 162 - static void tcp_incr_quickack(struct tcp_sock *tp) 161 + static void tcp_incr_quickack(struct sock *sk) 163 162 { 164 - unsigned quickacks = tp->rcv_wnd/(2*tp->ack.rcv_mss); 163 + struct inet_connection_sock *icsk = inet_csk(sk); 164 + unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); 165 165 166 166 if (quickacks==0) 167 167 quickacks=2; 168 - if (quickacks > tp->ack.quick) 169 - tp->ack.quick = min(quickacks, TCP_MAX_QUICKACKS); 168 + if (quickacks > icsk->icsk_ack.quick) 169 + icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); 170 170 } 171 171 172 - void tcp_enter_quickack_mode(struct tcp_sock *tp) 172 + void tcp_enter_quickack_mode(struct sock *sk) 173 173 { 174 - tcp_incr_quickack(tp); 175 - tp->ack.pingpong = 0; 176 - tp->ack.ato = TCP_ATO_MIN; 174 + struct inet_connection_sock *icsk = inet_csk(sk); 175 + tcp_incr_quickack(sk); 176 + icsk->icsk_ack.pingpong = 0; 177 + icsk->icsk_ack.ato = TCP_ATO_MIN; 177 178 } 178 179 179 180 /* Send ACKs quickly, if "quick" count is not exhausted 180 181 * and the session is not interactive. 181 182 */ 182 183 183 - static __inline__ int tcp_in_quickack_mode(struct tcp_sock *tp) 184 + static inline int tcp_in_quickack_mode(const struct sock *sk) 184 185 { 185 - return (tp->ack.quick && !tp->ack.pingpong); 186 + const struct inet_connection_sock *icsk = inet_csk(sk); 187 + return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong; 186 188 } 187 189 188 190 /* Buffer size and advertised window tuning. ··· 228 224 */ 229 225 230 226 /* Slow part of check#2. */ 231 - static int __tcp_grow_window(struct sock *sk, struct tcp_sock *tp, 232 - struct sk_buff *skb) 227 + static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp, 228 + const struct sk_buff *skb) 233 229 { 234 230 /* Optimize this! */ 235 231 int truesize = tcp_win_from_space(skb->truesize)/2; ··· 237 233 238 234 while (tp->rcv_ssthresh <= window) { 239 235 if (truesize <= skb->len) 240 - return 2*tp->ack.rcv_mss; 236 + return 2 * inet_csk(sk)->icsk_ack.rcv_mss; 241 237 242 238 truesize >>= 1; 243 239 window >>= 1; ··· 264 260 265 261 if (incr) { 266 262 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); 267 - tp->ack.quick |= 1; 263 + inet_csk(sk)->icsk_ack.quick |= 1; 268 264 } 269 265 } 270 266 } ··· 329 325 unsigned int app_win = tp->rcv_nxt - tp->copied_seq; 330 326 int ofo_win = 0; 331 327 332 - tp->ack.quick = 0; 328 + inet_csk(sk)->icsk_ack.quick = 0; 333 329 334 330 skb_queue_walk(&tp->out_of_order_queue, skb) { 335 331 ofo_win += skb->len; ··· 350 346 app_win += ofo_win; 351 347 if (atomic_read(&sk->sk_rmem_alloc) >= 2 * sk->sk_rcvbuf) 352 348 app_win >>= 1; 353 - if (app_win > tp->ack.rcv_mss) 354 - app_win -= tp->ack.rcv_mss; 349 + if (app_win > inet_csk(sk)->icsk_ack.rcv_mss) 350 + app_win -= inet_csk(sk)->icsk_ack.rcv_mss; 355 351 app_win = max(app_win, 2U*tp->advmss); 356 352 357 353 if (!ofo_win) ··· 419 415 tp->rcv_rtt_est.time = tcp_time_stamp; 420 416 } 421 417 422 - static inline void tcp_rcv_rtt_measure_ts(struct tcp_sock *tp, struct sk_buff *skb) 418 + static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb) 423 419 { 420 + struct tcp_sock *tp = tcp_sk(sk); 424 421 if (tp->rx_opt.rcv_tsecr && 425 422 (TCP_SKB_CB(skb)->end_seq - 426 - TCP_SKB_CB(skb)->seq >= tp->ack.rcv_mss)) 423 + TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) 427 424 tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); 428 425 } 429 426 ··· 497 492 */ 498 493 static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) 499 494 { 495 + struct inet_connection_sock *icsk = inet_csk(sk); 500 496 u32 now; 501 497 502 - tcp_schedule_ack(tp); 498 + inet_csk_schedule_ack(sk); 503 499 504 - tcp_measure_rcv_mss(tp, skb); 500 + tcp_measure_rcv_mss(sk, skb); 505 501 506 502 tcp_rcv_rtt_measure(tp); 507 503 508 504 now = tcp_time_stamp; 509 505 510 - if (!tp->ack.ato) { 506 + if (!icsk->icsk_ack.ato) { 511 507 /* The _first_ data packet received, initialize 512 508 * delayed ACK engine. 513 509 */ 514 - tcp_incr_quickack(tp); 515 - tp->ack.ato = TCP_ATO_MIN; 510 + tcp_incr_quickack(sk); 511 + icsk->icsk_ack.ato = TCP_ATO_MIN; 516 512 } else { 517 - int m = now - tp->ack.lrcvtime; 513 + int m = now - icsk->icsk_ack.lrcvtime; 518 514 519 515 if (m <= TCP_ATO_MIN/2) { 520 516 /* The fastest case is the first. */ 521 - tp->ack.ato = (tp->ack.ato>>1) + TCP_ATO_MIN/2; 522 - } else if (m < tp->ack.ato) { 523 - tp->ack.ato = (tp->ack.ato>>1) + m; 524 - if (tp->ack.ato > tp->rto) 525 - tp->ack.ato = tp->rto; 526 - } else if (m > tp->rto) { 517 + icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; 518 + } else if (m < icsk->icsk_ack.ato) { 519 + icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; 520 + if (icsk->icsk_ack.ato > icsk->icsk_rto) 521 + icsk->icsk_ack.ato = icsk->icsk_rto; 522 + } else if (m > icsk->icsk_rto) { 527 523 /* Too long gap. Apparently sender falled to 528 524 * restart window, so that we send ACKs quickly. 529 525 */ 530 - tcp_incr_quickack(tp); 526 + tcp_incr_quickack(sk); 531 527 sk_stream_mem_reclaim(sk); 532 528 } 533 529 } 534 - tp->ack.lrcvtime = now; 530 + icsk->icsk_ack.lrcvtime = now; 535 531 536 532 TCP_ECN_check_ce(tp, skb); 537 533 ··· 617 611 /* Calculate rto without backoff. This is the second half of Van Jacobson's 618 612 * routine referred to above. 619 613 */ 620 - static inline void tcp_set_rto(struct tcp_sock *tp) 614 + static inline void tcp_set_rto(struct sock *sk) 621 615 { 616 + const struct tcp_sock *tp = tcp_sk(sk); 622 617 /* Old crap is replaced with new one. 8) 623 618 * 624 619 * More seriously: ··· 630 623 * is invisible. Actually, Linux-2.4 also generates erratic 631 624 * ACKs in some curcumstances. 632 625 */ 633 - tp->rto = (tp->srtt >> 3) + tp->rttvar; 626 + inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar; 634 627 635 628 /* 2. Fixups made earlier cannot be right. 636 629 * If we do not estimate RTO correctly without them, ··· 642 635 /* NOTE: clamping at TCP_RTO_MIN is not required, current algo 643 636 * guarantees that rto is higher. 644 637 */ 645 - static inline void tcp_bound_rto(struct tcp_sock *tp) 638 + static inline void tcp_bound_rto(struct sock *sk) 646 639 { 647 - if (tp->rto > TCP_RTO_MAX) 648 - tp->rto = TCP_RTO_MAX; 640 + if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) 641 + inet_csk(sk)->icsk_rto = TCP_RTO_MAX; 649 642 } 650 643 651 644 /* Save metrics learned by this TCP session. ··· 665 658 if (dst && (dst->flags&DST_HOST)) { 666 659 int m; 667 660 668 - if (tp->backoff || !tp->srtt) { 661 + if (inet_csk(sk)->icsk_backoff || !tp->srtt) { 669 662 /* This session failed to estimate rtt. Why? 670 663 * Probably, no packets returned in time. 671 664 * Reset our results. ··· 808 801 tp->mdev = dst_metric(dst, RTAX_RTTVAR); 809 802 tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN); 810 803 } 811 - tcp_set_rto(tp); 812 - tcp_bound_rto(tp); 813 - if (tp->rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) 804 + tcp_set_rto(sk); 805 + tcp_bound_rto(sk); 806 + if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) 814 807 goto reset; 815 808 tp->snd_cwnd = tcp_init_cwnd(tp, dst); 816 809 tp->snd_cwnd_stamp = tcp_time_stamp; ··· 824 817 if (!tp->rx_opt.saw_tstamp && tp->srtt) { 825 818 tp->srtt = 0; 826 819 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT; 827 - tp->rto = TCP_TIMEOUT_INIT; 820 + inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 828 821 } 829 822 } 830 823 ··· 1125 1118 1126 1119 if (tp->ca_state <= TCP_CA_Disorder || 1127 1120 tp->snd_una == tp->high_seq || 1128 - (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) { 1121 + (tp->ca_state == TCP_CA_Loss && !inet_csk(sk)->icsk_retransmits)) { 1129 1122 tp->prior_ssthresh = tcp_current_ssthresh(tp); 1130 1123 tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); 1131 1124 tcp_ca_event(tp, CA_EVENT_FRTO); ··· 1221 1214 1222 1215 /* Reduce ssthresh if it has not yet been made inside this window. */ 1223 1216 if (tp->ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || 1224 - (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) { 1217 + (tp->ca_state == TCP_CA_Loss && !inet_csk(sk)->icsk_retransmits)) { 1225 1218 tp->prior_ssthresh = tcp_current_ssthresh(tp); 1226 1219 tp->snd_ssthresh = tp->ca_ops->ssthresh(tp); 1227 1220 tcp_ca_event(tp, CA_EVENT_LOSS); ··· 1260 1253 TCP_ECN_queue_cwr(tp); 1261 1254 } 1262 1255 1263 - static int tcp_check_sack_reneging(struct sock *sk, struct tcp_sock *tp) 1256 + static int tcp_check_sack_reneging(struct sock *sk) 1264 1257 { 1265 1258 struct sk_buff *skb; 1266 1259 ··· 1275 1268 NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING); 1276 1269 1277 1270 tcp_enter_loss(sk, 1); 1278 - tp->retransmits++; 1271 + inet_csk(sk)->icsk_retransmits++; 1279 1272 tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue)); 1280 - tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); 1273 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 1274 + inet_csk(sk)->icsk_rto); 1281 1275 return 1; 1282 1276 } 1283 1277 return 0; ··· 1289 1281 return IsReno(tp) ? tp->sacked_out+1 : tp->fackets_out; 1290 1282 } 1291 1283 1292 - static inline int tcp_skb_timedout(struct tcp_sock *tp, struct sk_buff *skb) 1284 + static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) 1293 1285 { 1294 - return (tcp_time_stamp - TCP_SKB_CB(skb)->when > tp->rto); 1286 + return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto); 1295 1287 } 1296 1288 1297 1289 static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) 1298 1290 { 1299 1291 return tp->packets_out && 1300 - tcp_skb_timedout(tp, skb_peek(&sk->sk_write_queue)); 1292 + tcp_skb_timedout(sk, skb_peek(&sk->sk_write_queue)); 1301 1293 } 1302 1294 1303 1295 /* Linux NewReno/SACK/FACK/ECN state machine. ··· 1517 1509 struct sk_buff *skb; 1518 1510 1519 1511 sk_stream_for_retrans_queue(skb, sk) { 1520 - if (tcp_skb_timedout(tp, skb) && 1512 + if (tcp_skb_timedout(sk, skb) && 1521 1513 !(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { 1522 1514 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 1523 1515 tp->lost_out += tcp_skb_pcount(skb); ··· 1684 1676 tp->left_out = tp->sacked_out; 1685 1677 tcp_undo_cwr(tp, 1); 1686 1678 NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); 1687 - tp->retransmits = 0; 1679 + inet_csk(sk)->icsk_retransmits = 0; 1688 1680 tp->undo_marker = 0; 1689 1681 if (!IsReno(tp)) 1690 1682 tcp_set_ca_state(tp, TCP_CA_Open); ··· 1758 1750 tp->prior_ssthresh = 0; 1759 1751 1760 1752 /* B. In all the states check for reneging SACKs. */ 1761 - if (tp->sacked_out && tcp_check_sack_reneging(sk, tp)) 1753 + if (tp->sacked_out && tcp_check_sack_reneging(sk)) 1762 1754 return; 1763 1755 1764 1756 /* C. Process data loss notification, provided it is valid. */ ··· 1782 1774 } else if (!before(tp->snd_una, tp->high_seq)) { 1783 1775 switch (tp->ca_state) { 1784 1776 case TCP_CA_Loss: 1785 - tp->retransmits = 0; 1777 + inet_csk(sk)->icsk_retransmits = 0; 1786 1778 if (tcp_try_undo_recovery(sk, tp)) 1787 1779 return; 1788 1780 break; ··· 1832 1824 break; 1833 1825 case TCP_CA_Loss: 1834 1826 if (flag&FLAG_DATA_ACKED) 1835 - tp->retransmits = 0; 1827 + inet_csk(sk)->icsk_retransmits = 0; 1836 1828 if (!tcp_try_undo_loss(sk, tp)) { 1837 1829 tcp_moderate_cwnd(tp); 1838 1830 tcp_xmit_retransmit_queue(sk); ··· 1889 1881 /* Read draft-ietf-tcplw-high-performance before mucking 1890 1882 * with this code. (Superceeds RFC1323) 1891 1883 */ 1892 - static void tcp_ack_saw_tstamp(struct tcp_sock *tp, u32 *usrtt, int flag) 1884 + static void tcp_ack_saw_tstamp(struct sock *sk, u32 *usrtt, int flag) 1893 1885 { 1894 - __u32 seq_rtt; 1895 - 1896 1886 /* RTTM Rule: A TSecr value received in a segment is used to 1897 1887 * update the averaged RTT measurement only if the segment 1898 1888 * acknowledges some new data, i.e., only if it advances the ··· 1906 1900 * answer arrives rto becomes 120 seconds! If at least one of segments 1907 1901 * in window is lost... Voila. --ANK (010210) 1908 1902 */ 1909 - seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; 1903 + struct tcp_sock *tp = tcp_sk(sk); 1904 + const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; 1910 1905 tcp_rtt_estimator(tp, seq_rtt, usrtt); 1911 - tcp_set_rto(tp); 1912 - tp->backoff = 0; 1913 - tcp_bound_rto(tp); 1906 + tcp_set_rto(sk); 1907 + inet_csk(sk)->icsk_backoff = 0; 1908 + tcp_bound_rto(sk); 1914 1909 } 1915 1910 1916 - static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, u32 *usrtt, int flag) 1911 + static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, u32 *usrtt, int flag) 1917 1912 { 1918 1913 /* We don't have a timestamp. Can only use 1919 1914 * packets that are not retransmitted to determine ··· 1928 1921 if (flag & FLAG_RETRANS_DATA_ACKED) 1929 1922 return; 1930 1923 1931 - tcp_rtt_estimator(tp, seq_rtt, usrtt); 1932 - tcp_set_rto(tp); 1933 - tp->backoff = 0; 1934 - tcp_bound_rto(tp); 1924 + tcp_rtt_estimator(tcp_sk(sk), seq_rtt, usrtt); 1925 + tcp_set_rto(sk); 1926 + inet_csk(sk)->icsk_backoff = 0; 1927 + tcp_bound_rto(sk); 1935 1928 } 1936 1929 1937 - static inline void tcp_ack_update_rtt(struct tcp_sock *tp, 1938 - int flag, s32 seq_rtt, u32 *usrtt) 1930 + static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, 1931 + const s32 seq_rtt, u32 *usrtt) 1939 1932 { 1933 + const struct tcp_sock *tp = tcp_sk(sk); 1940 1934 /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ 1941 1935 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 1942 - tcp_ack_saw_tstamp(tp, usrtt, flag); 1936 + tcp_ack_saw_tstamp(sk, usrtt, flag); 1943 1937 else if (seq_rtt >= 0) 1944 - tcp_ack_no_tstamp(tp, seq_rtt, usrtt, flag); 1938 + tcp_ack_no_tstamp(sk, seq_rtt, usrtt, flag); 1945 1939 } 1946 1940 1947 1941 static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 rtt, ··· 1959 1951 static inline void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp) 1960 1952 { 1961 1953 if (!tp->packets_out) { 1962 - tcp_clear_xmit_timer(sk, TCP_TIME_RETRANS); 1954 + inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 1963 1955 } else { 1964 - tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); 1956 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto); 1965 1957 } 1966 1958 } 1967 1959 ··· 2098 2090 } 2099 2091 2100 2092 if (acked&FLAG_ACKED) { 2101 - tcp_ack_update_rtt(tp, acked, seq_rtt, seq_usrtt); 2093 + tcp_ack_update_rtt(sk, acked, seq_rtt, seq_usrtt); 2102 2094 tcp_ack_packets_out(sk, tp); 2103 2095 2104 2096 if (tp->ca_ops->pkts_acked) ··· 2133 2125 2134 2126 static void tcp_ack_probe(struct sock *sk) 2135 2127 { 2136 - struct tcp_sock *tp = tcp_sk(sk); 2128 + const struct tcp_sock *tp = tcp_sk(sk); 2129 + struct inet_connection_sock *icsk = inet_csk(sk); 2137 2130 2138 2131 /* Was it a usable window open? */ 2139 2132 2140 2133 if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq, 2141 2134 tp->snd_una + tp->snd_wnd)) { 2142 - tp->backoff = 0; 2143 - tcp_clear_xmit_timer(sk, TCP_TIME_PROBE0); 2135 + icsk->icsk_backoff = 0; 2136 + inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); 2144 2137 /* Socket must be waked up by subsequent tcp_data_snd_check(). 2145 2138 * This function is not for random using! 2146 2139 */ 2147 2140 } else { 2148 - tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, 2149 - min(tp->rto << tp->backoff, TCP_RTO_MAX)); 2141 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2142 + min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX)); 2150 2143 } 2151 2144 } 2152 2145 ··· 2166 2157 /* Check that window update is acceptable. 2167 2158 * The function assumes that snd_una<=ack<=snd_next. 2168 2159 */ 2169 - static inline int tcp_may_update_window(struct tcp_sock *tp, u32 ack, 2170 - u32 ack_seq, u32 nwin) 2160 + static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack, 2161 + const u32 ack_seq, const u32 nwin) 2171 2162 { 2172 2163 return (after(ack, tp->snd_una) || 2173 2164 after(ack_seq, tp->snd_wl1) || ··· 2509 2500 * up to bandwidth of 18Gigabit/sec. 8) ] 2510 2501 */ 2511 2502 2512 - static int tcp_disordered_ack(struct tcp_sock *tp, struct sk_buff *skb) 2503 + static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) 2513 2504 { 2505 + struct tcp_sock *tp = tcp_sk(sk); 2514 2506 struct tcphdr *th = skb->h.th; 2515 2507 u32 seq = TCP_SKB_CB(skb)->seq; 2516 2508 u32 ack = TCP_SKB_CB(skb)->ack_seq; ··· 2526 2516 !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && 2527 2517 2528 2518 /* 4. ... and sits in replay window. */ 2529 - (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (tp->rto*1024)/HZ); 2519 + (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ); 2530 2520 } 2531 2521 2532 - static inline int tcp_paws_discard(struct tcp_sock *tp, struct sk_buff *skb) 2522 + static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *skb) 2533 2523 { 2524 + const struct tcp_sock *tp = tcp_sk(sk); 2534 2525 return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW && 2535 2526 xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS && 2536 - !tcp_disordered_ack(tp, skb)); 2527 + !tcp_disordered_ack(sk, skb)); 2537 2528 } 2538 2529 2539 2530 /* Check segment sequence number for validity. ··· 2597 2586 { 2598 2587 struct tcp_sock *tp = tcp_sk(sk); 2599 2588 2600 - tcp_schedule_ack(tp); 2589 + inet_csk_schedule_ack(sk); 2601 2590 2602 2591 sk->sk_shutdown |= RCV_SHUTDOWN; 2603 2592 sock_set_flag(sk, SOCK_DONE); ··· 2607 2596 case TCP_ESTABLISHED: 2608 2597 /* Move to CLOSE_WAIT */ 2609 2598 tcp_set_state(sk, TCP_CLOSE_WAIT); 2610 - tp->ack.pingpong = 1; 2599 + inet_csk(sk)->icsk_ack.pingpong = 1; 2611 2600 break; 2612 2601 2613 2602 case TCP_CLOSE_WAIT: ··· 2705 2694 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && 2706 2695 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 2707 2696 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); 2708 - tcp_enter_quickack_mode(tp); 2697 + tcp_enter_quickack_mode(sk); 2709 2698 2710 2699 if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) { 2711 2700 u32 end_seq = TCP_SKB_CB(skb)->end_seq; ··· 2953 2942 * gap in queue is filled. 2954 2943 */ 2955 2944 if (skb_queue_empty(&tp->out_of_order_queue)) 2956 - tp->ack.pingpong = 0; 2945 + inet_csk(sk)->icsk_ack.pingpong = 0; 2957 2946 } 2958 2947 2959 2948 if (tp->rx_opt.num_sacks) ··· 2974 2963 tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); 2975 2964 2976 2965 out_of_window: 2977 - tcp_enter_quickack_mode(tp); 2978 - tcp_schedule_ack(tp); 2966 + tcp_enter_quickack_mode(sk); 2967 + inet_csk_schedule_ack(sk); 2979 2968 drop: 2980 2969 __kfree_skb(skb); 2981 2970 return; ··· 2985 2974 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) 2986 2975 goto out_of_window; 2987 2976 2988 - tcp_enter_quickack_mode(tp); 2977 + tcp_enter_quickack_mode(sk); 2989 2978 2990 2979 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { 2991 2980 /* Partial packet, seq < rcv_next < end_seq */ ··· 3014 3003 3015 3004 /* Disable header prediction. */ 3016 3005 tp->pred_flags = 0; 3017 - tcp_schedule_ack(tp); 3006 + inet_csk_schedule_ack(sk); 3018 3007 3019 3008 SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", 3020 3009 tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); ··· 3384 3373 struct tcp_sock *tp = tcp_sk(sk); 3385 3374 3386 3375 /* More than one full frame received... */ 3387 - if (((tp->rcv_nxt - tp->rcv_wup) > tp->ack.rcv_mss 3376 + if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss 3388 3377 /* ... and right edge of window advances far enough. 3389 3378 * (tcp_recvmsg() will send ACK otherwise). Or... 3390 3379 */ 3391 3380 && __tcp_select_window(sk) >= tp->rcv_wnd) || 3392 3381 /* We ACK each frame or... */ 3393 - tcp_in_quickack_mode(tp) || 3382 + tcp_in_quickack_mode(sk) || 3394 3383 /* We have out of order data. */ 3395 3384 (ofo_possible && 3396 3385 skb_peek(&tp->out_of_order_queue))) { ··· 3404 3393 3405 3394 static __inline__ void tcp_ack_snd_check(struct sock *sk) 3406 3395 { 3407 - struct tcp_sock *tp = tcp_sk(sk); 3408 - if (!tcp_ack_scheduled(tp)) { 3396 + if (!inet_csk_ack_scheduled(sk)) { 3409 3397 /* We sent a data segment already. */ 3410 3398 return; 3411 3399 } ··· 3658 3648 tp->rcv_nxt == tp->rcv_wup) 3659 3649 tcp_store_ts_recent(tp); 3660 3650 3661 - tcp_rcv_rtt_measure_ts(tp, skb); 3651 + tcp_rcv_rtt_measure_ts(sk, skb); 3662 3652 3663 3653 /* We know that such packets are checksummed 3664 3654 * on entry. ··· 3691 3681 tp->rcv_nxt == tp->rcv_wup) 3692 3682 tcp_store_ts_recent(tp); 3693 3683 3694 - tcp_rcv_rtt_measure_ts(tp, skb); 3684 + tcp_rcv_rtt_measure_ts(sk, skb); 3695 3685 3696 3686 __skb_pull(skb, tcp_header_len); 3697 3687 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; ··· 3712 3702 tp->rcv_nxt == tp->rcv_wup) 3713 3703 tcp_store_ts_recent(tp); 3714 3704 3715 - tcp_rcv_rtt_measure_ts(tp, skb); 3705 + tcp_rcv_rtt_measure_ts(sk, skb); 3716 3706 3717 3707 if ((int)skb->truesize > sk->sk_forward_alloc) 3718 3708 goto step5; ··· 3732 3722 /* Well, only one small jumplet in fast path... */ 3733 3723 tcp_ack(sk, skb, FLAG_DATA); 3734 3724 tcp_data_snd_check(sk, tp); 3735 - if (!tcp_ack_scheduled(tp)) 3725 + if (!inet_csk_ack_scheduled(sk)) 3736 3726 goto no_ack; 3737 3727 } 3738 3728 ··· 3754 3744 * RFC1323: H1. Apply PAWS check first. 3755 3745 */ 3756 3746 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 3757 - tcp_paws_discard(tp, skb)) { 3747 + tcp_paws_discard(sk, skb)) { 3758 3748 if (!th->rst) { 3759 3749 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 3760 3750 tcp_send_dupack(sk, skb); ··· 3801 3791 if(th->ack) 3802 3792 tcp_ack(sk, skb, FLAG_SLOWPATH); 3803 3793 3804 - tcp_rcv_rtt_measure_ts(tp, skb); 3794 + tcp_rcv_rtt_measure_ts(sk, skb); 3805 3795 3806 3796 /* Process urgent data. */ 3807 3797 tcp_urg(sk, skb, th); ··· 3943 3933 tcp_init_buffer_space(sk); 3944 3934 3945 3935 if (sock_flag(sk, SOCK_KEEPOPEN)) 3946 - tcp_reset_keepalive_timer(sk, keepalive_time_when(tp)); 3936 + inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); 3947 3937 3948 3938 if (!tp->rx_opt.snd_wscale) 3949 3939 __tcp_fast_path_on(tp, tp->snd_wnd); ··· 3955 3945 sk_wake_async(sk, 0, POLL_OUT); 3956 3946 } 3957 3947 3958 - if (sk->sk_write_pending || tp->defer_accept || tp->ack.pingpong) { 3948 + if (sk->sk_write_pending || tp->defer_accept || inet_csk(sk)->icsk_ack.pingpong) { 3959 3949 /* Save one ACK. Data will be ready after 3960 3950 * several ticks, if write_pending is set. 3961 3951 * ··· 3963 3953 * look so _wonderfully_ clever, that I was not able 3964 3954 * to stand against the temptation 8) --ANK 3965 3955 */ 3966 - tcp_schedule_ack(tp); 3967 - tp->ack.lrcvtime = tcp_time_stamp; 3968 - tp->ack.ato = TCP_ATO_MIN; 3969 - tcp_incr_quickack(tp); 3970 - tcp_enter_quickack_mode(tp); 3971 - tcp_reset_xmit_timer(sk, TCP_TIME_DACK, TCP_DELACK_MAX); 3956 + inet_csk_schedule_ack(sk); 3957 + inet_csk(sk)->icsk_ack.lrcvtime = tcp_time_stamp; 3958 + inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 3959 + tcp_incr_quickack(sk); 3960 + tcp_enter_quickack_mode(sk); 3961 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX); 3972 3962 3973 3963 discard: 3974 3964 __kfree_skb(skb); ··· 4124 4114 } 4125 4115 4126 4116 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && 4127 - tcp_paws_discard(tp, skb)) { 4117 + tcp_paws_discard(sk, skb)) { 4128 4118 if (!th->rst) { 4129 4119 NET_INC_STATS_BH(LINUX_MIB_PAWSESTABREJECTED); 4130 4120 tcp_send_dupack(sk, skb); ··· 4193 4183 */ 4194 4184 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 4195 4185 !tp->srtt) 4196 - tcp_ack_saw_tstamp(tp, 0, 0); 4186 + tcp_ack_saw_tstamp(sk, 0, 0); 4197 4187 4198 4188 if (tp->rx_opt.tstamp_ok) 4199 4189 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; ··· 4240 4230 return 1; 4241 4231 } 4242 4232 4243 - tmo = tcp_fin_time(tp); 4233 + tmo = tcp_fin_time(sk); 4244 4234 if (tmo > TCP_TIMEWAIT_LEN) { 4245 - tcp_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); 4235 + inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); 4246 4236 } else if (th->fin || sock_owned_by_user(sk)) { 4247 4237 /* Bad case. We could lose such FIN otherwise. 4248 4238 * It is not a big problem, but it looks confusing ··· 4250 4240 * if it spins in bh_lock_sock(), but it is really 4251 4241 * marginal case. 4252 4242 */ 4253 - tcp_reset_keepalive_timer(sk, tmo); 4243 + inet_csk_reset_keepalive_timer(sk, tmo); 4254 4244 } else { 4255 4245 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); 4256 4246 goto discard;
+85 -73
net/ipv4/tcp_ipv4.c
··· 104 104 */ 105 105 int sysctl_local_port_range[2] = { 1024, 4999 }; 106 106 107 - static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb) 107 + static inline int inet_csk_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb) 108 108 { 109 109 const u32 sk_rcv_saddr = inet_rcv_saddr(sk); 110 110 struct sock *sk2; ··· 113 113 114 114 sk_for_each_bound(sk2, node, &tb->owners) { 115 115 if (sk != sk2 && 116 - !tcp_v6_ipv6only(sk2) && 116 + !inet_v6_ipv6only(sk2) && 117 117 (!sk->sk_bound_dev_if || 118 118 !sk2->sk_bound_dev_if || 119 119 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { ··· 132 132 /* Obtain a reference to a local port for the given sock, 133 133 * if snum is zero it means select any available local port. 134 134 */ 135 - static int tcp_v4_get_port(struct sock *sk, unsigned short snum) 135 + int inet_csk_get_port(struct inet_hashinfo *hashinfo, 136 + struct sock *sk, unsigned short snum) 136 137 { 137 138 struct inet_bind_hashbucket *head; 138 139 struct hlist_node *node; ··· 147 146 int remaining = (high - low) + 1; 148 147 int rover; 149 148 150 - spin_lock(&tcp_hashinfo.portalloc_lock); 151 - if (tcp_hashinfo.port_rover < low) 149 + spin_lock(&hashinfo->portalloc_lock); 150 + if (hashinfo->port_rover < low) 152 151 rover = low; 153 152 else 154 - rover = tcp_hashinfo.port_rover; 153 + rover = hashinfo->port_rover; 155 154 do { 156 155 rover++; 157 156 if (rover > high) 158 157 rover = low; 159 - head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)]; 158 + head = &hashinfo->bhash[inet_bhashfn(rover, hashinfo->bhash_size)]; 160 159 spin_lock(&head->lock); 161 160 inet_bind_bucket_for_each(tb, node, &head->chain) 162 161 if (tb->port == rover) ··· 165 164 next: 166 165 spin_unlock(&head->lock); 167 166 } while (--remaining > 0); 168 - tcp_hashinfo.port_rover = rover; 169 - spin_unlock(&tcp_hashinfo.portalloc_lock); 167 + hashinfo->port_rover = rover; 168 + spin_unlock(&hashinfo->portalloc_lock); 170 169 171 170 /* Exhausted local port range during search? It is not 172 171 * possible for us to be holding one of the bind hash ··· 183 182 */ 184 183 snum = rover; 185 184 } else { 186 - head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; 185 + head = &hashinfo->bhash[inet_bhashfn(snum, hashinfo->bhash_size)]; 187 186 spin_lock(&head->lock); 188 187 inet_bind_bucket_for_each(tb, node, &head->chain) 189 188 if (tb->port == snum) ··· 200 199 goto success; 201 200 } else { 202 201 ret = 1; 203 - if (tcp_bind_conflict(sk, tb)) 202 + if (inet_csk_bind_conflict(sk, tb)) 204 203 goto fail_unlock; 205 204 } 206 205 } 207 206 tb_not_found: 208 207 ret = 1; 209 - if (!tb && (tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum)) == NULL) 208 + if (!tb && (tb = inet_bind_bucket_create(hashinfo->bind_bucket_cachep, head, snum)) == NULL) 210 209 goto fail_unlock; 211 210 if (hlist_empty(&tb->owners)) { 212 211 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN) ··· 217 216 (!sk->sk_reuse || sk->sk_state == TCP_LISTEN)) 218 217 tb->fastreuse = 0; 219 218 success: 220 - if (!inet_sk(sk)->bind_hash) 219 + if (!inet_csk(sk)->icsk_bind_hash) 221 220 inet_bind_hash(sk, tb, snum); 222 - BUG_TRAP(inet_sk(sk)->bind_hash == tb); 221 + BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb); 223 222 ret = 0; 224 223 225 224 fail_unlock: ··· 227 226 fail: 228 227 local_bh_enable(); 229 228 return ret; 229 + } 230 + 231 + static int tcp_v4_get_port(struct sock *sk, unsigned short snum) 232 + { 233 + return inet_csk_get_port(&tcp_hashinfo, sk, snum); 230 234 } 231 235 232 236 static void tcp_v4_hash(struct sock *sk) ··· 432 426 } 433 427 434 428 head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; 435 - tb = inet_sk(sk)->bind_hash; 429 + tb = inet_csk(sk)->icsk_bind_hash; 436 430 spin_lock_bh(&head->lock); 437 431 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 438 432 __inet_hash(&tcp_hashinfo, sk, 0); ··· 563 557 return err; 564 558 } 565 559 566 - static __inline__ int tcp_v4_iif(struct sk_buff *skb) 560 + static inline int inet_iif(const struct sk_buff *skb) 567 561 { 568 562 return ((struct rtable *)skb->dst)->rt_iif; 569 563 } 570 564 571 - static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd) 565 + static inline u32 inet_synq_hash(const u32 raddr, const u16 rport, 566 + const u32 rnd, const u16 synq_hsize) 572 567 { 573 - return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1)); 568 + return jhash_2words(raddr, (u32)rport, rnd) & (synq_hsize - 1); 574 569 } 575 570 576 - static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp, 577 - struct request_sock ***prevp, 578 - __u16 rport, 579 - __u32 raddr, __u32 laddr) 571 + struct request_sock *inet_csk_search_req(const struct sock *sk, 572 + struct request_sock ***prevp, 573 + const __u16 rport, const __u32 raddr, 574 + const __u32 laddr) 580 575 { 581 - struct listen_sock *lopt = tp->accept_queue.listen_opt; 576 + const struct inet_connection_sock *icsk = inet_csk(sk); 577 + struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; 582 578 struct request_sock *req, **prev; 583 579 584 - for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)]; 580 + for (prev = &lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd, 581 + lopt->nr_table_entries)]; 585 582 (req = *prev) != NULL; 586 583 prev = &req->dl_next) { 587 584 const struct inet_request_sock *ireq = inet_rsk(req); ··· 592 583 if (ireq->rmt_port == rport && 593 584 ireq->rmt_addr == raddr && 594 585 ireq->loc_addr == laddr && 595 - TCP_INET_FAMILY(req->rsk_ops->family)) { 586 + AF_INET_FAMILY(req->rsk_ops->family)) { 596 587 BUG_TRAP(!req->sk); 597 588 *prevp = prev; 598 589 break; ··· 604 595 605 596 static void tcp_v4_synq_add(struct sock *sk, struct request_sock *req) 606 597 { 607 - struct tcp_sock *tp = tcp_sk(sk); 608 - struct listen_sock *lopt = tp->accept_queue.listen_opt; 609 - u32 h = tcp_v4_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd); 598 + struct inet_connection_sock *icsk = inet_csk(sk); 599 + struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; 600 + const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, 601 + lopt->hash_rnd, lopt->nr_table_entries); 610 602 611 - reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT); 612 - tcp_synq_added(sk); 603 + reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, TCP_TIMEOUT_INIT); 604 + inet_csk_reqsk_queue_added(sk, TCP_TIMEOUT_INIT); 613 605 } 614 606 615 607 ··· 697 687 } 698 688 699 689 sk = inet_lookup(&tcp_hashinfo, iph->daddr, th->dest, iph->saddr, 700 - th->source, tcp_v4_iif(skb)); 690 + th->source, inet_iif(skb)); 701 691 if (!sk) { 702 692 ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); 703 693 return; ··· 757 747 if (sock_owned_by_user(sk)) 758 748 goto out; 759 749 760 - req = tcp_v4_search_req(tp, &prev, th->dest, 761 - iph->daddr, iph->saddr); 750 + req = inet_csk_search_req(sk, &prev, th->dest, 751 + iph->daddr, iph->saddr); 762 752 if (!req) 763 753 goto out; 764 754 ··· 778 768 * created socket, and POSIX does not want network 779 769 * errors returned from accept(). 780 770 */ 781 - tcp_synq_drop(sk, req, prev); 771 + inet_csk_reqsk_queue_drop(sk, req, prev); 782 772 goto out; 783 773 784 774 case TCP_SYN_SENT: ··· 963 953 req->ts_recent); 964 954 } 965 955 966 - static struct dst_entry* tcp_v4_route_req(struct sock *sk, 967 - struct request_sock *req) 956 + struct dst_entry* inet_csk_route_req(struct sock *sk, 957 + const struct request_sock *req) 968 958 { 969 959 struct rtable *rt; 970 960 const struct inet_request_sock *ireq = inet_rsk(req); ··· 976 966 ireq->rmt_addr), 977 967 .saddr = ireq->loc_addr, 978 968 .tos = RT_CONN_FLAGS(sk) } }, 979 - .proto = IPPROTO_TCP, 969 + .proto = sk->sk_protocol, 980 970 .uli_u = { .ports = 981 971 { .sport = inet_sk(sk)->sport, 982 972 .dport = ireq->rmt_port } } }; ··· 1006 996 struct sk_buff * skb; 1007 997 1008 998 /* First, grab a route. */ 1009 - if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL) 999 + if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL) 1010 1000 goto out; 1011 1001 1012 1002 skb = tcp_make_synack(sk, dst, req); ··· 1108 1098 * limitations, they conserve resources and peer is 1109 1099 * evidently real one. 1110 1100 */ 1111 - if (tcp_synq_is_full(sk) && !isn) { 1101 + if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1112 1102 #ifdef CONFIG_SYN_COOKIES 1113 1103 if (sysctl_tcp_syncookies) { 1114 1104 want_cookie = 1; ··· 1122 1112 * clogging syn queue with openreqs with exponentially increasing 1123 1113 * timeout. 1124 1114 */ 1125 - if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1) 1115 + if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 1126 1116 goto drop; 1127 1117 1128 1118 req = reqsk_alloc(&tcp_request_sock_ops); ··· 1179 1169 */ 1180 1170 if (tmp_opt.saw_tstamp && 1181 1171 sysctl_tcp_tw_recycle && 1182 - (dst = tcp_v4_route_req(sk, req)) != NULL && 1172 + (dst = inet_csk_route_req(sk, req)) != NULL && 1183 1173 (peer = rt_get_peer((struct rtable *)dst)) != NULL && 1184 1174 peer->v4daddr == saddr) { 1185 1175 if (xtime.tv_sec < peer->tcp_ts_stamp + TCP_PAWS_MSL && ··· 1192 1182 } 1193 1183 /* Kill the following clause, if you dislike this way. */ 1194 1184 else if (!sysctl_tcp_syncookies && 1195 - (sysctl_max_syn_backlog - tcp_synq_len(sk) < 1185 + (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < 1196 1186 (sysctl_max_syn_backlog >> 2)) && 1197 1187 (!peer || !peer->tcp_ts_stamp) && 1198 1188 (!dst || !dst_metric(dst, RTAX_RTT))) { ··· 1250 1240 if (sk_acceptq_is_full(sk)) 1251 1241 goto exit_overflow; 1252 1242 1253 - if (!dst && (dst = tcp_v4_route_req(sk, req)) == NULL) 1243 + if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL) 1254 1244 goto exit; 1255 1245 1256 1246 newsk = tcp_create_openreq_child(sk, req, skb); ··· 1267 1257 newinet->saddr = ireq->loc_addr; 1268 1258 newinet->opt = ireq->opt; 1269 1259 ireq->opt = NULL; 1270 - newinet->mc_index = tcp_v4_iif(skb); 1260 + newinet->mc_index = inet_iif(skb); 1271 1261 newinet->mc_ttl = skb->nh.iph->ttl; 1272 1262 newtp->ext_header_len = 0; 1273 1263 if (newinet->opt) ··· 1295 1285 { 1296 1286 struct tcphdr *th = skb->h.th; 1297 1287 struct iphdr *iph = skb->nh.iph; 1298 - struct tcp_sock *tp = tcp_sk(sk); 1299 1288 struct sock *nsk; 1300 1289 struct request_sock **prev; 1301 1290 /* Find possible connection requests. */ 1302 - struct request_sock *req = tcp_v4_search_req(tp, &prev, th->source, 1303 - iph->saddr, iph->daddr); 1291 + struct request_sock *req = inet_csk_search_req(sk, &prev, th->source, 1292 + iph->saddr, iph->daddr); 1304 1293 if (req) 1305 1294 return tcp_check_req(sk, skb, req, prev); 1306 1295 1307 1296 nsk = __inet_lookup_established(&tcp_hashinfo, skb->nh.iph->saddr, 1308 1297 th->source, skb->nh.iph->daddr, 1309 - ntohs(th->dest), tcp_v4_iif(skb)); 1298 + ntohs(th->dest), inet_iif(skb)); 1310 1299 1311 1300 if (nsk) { 1312 1301 if (nsk->sk_state != TCP_TIME_WAIT) { ··· 1449 1440 1450 1441 sk = __inet_lookup(&tcp_hashinfo, skb->nh.iph->saddr, th->source, 1451 1442 skb->nh.iph->daddr, ntohs(th->dest), 1452 - tcp_v4_iif(skb)); 1443 + inet_iif(skb)); 1453 1444 1454 1445 if (!sk) 1455 1446 goto no_tcp_socket; ··· 1516 1507 struct sock *sk2 = inet_lookup_listener(&tcp_hashinfo, 1517 1508 skb->nh.iph->daddr, 1518 1509 ntohs(th->dest), 1519 - tcp_v4_iif(skb)); 1510 + inet_iif(skb)); 1520 1511 if (sk2) { 1521 1512 tcp_tw_deschedule((struct inet_timewait_sock *)sk); 1522 1513 inet_twsk_put((struct inet_timewait_sock *)sk); ··· 1628 1619 tcp_init_xmit_timers(sk); 1629 1620 tcp_prequeue_init(tp); 1630 1621 1631 - tp->rto = TCP_TIMEOUT_INIT; 1622 + inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 1632 1623 tp->mdev = TCP_TIMEOUT_INIT; 1633 1624 1634 1625 /* So many TCP implementations out there (incorrectly) count the ··· 1681 1672 __skb_queue_purge(&tp->ucopy.prequeue); 1682 1673 1683 1674 /* Clean up a referenced TCP bind bucket. */ 1684 - if (inet_sk(sk)->bind_hash) 1675 + if (inet_csk(sk)->icsk_bind_hash) 1685 1676 inet_put_port(&tcp_hashinfo, sk); 1686 1677 1687 1678 /* ··· 1716 1707 1717 1708 static void *listening_get_next(struct seq_file *seq, void *cur) 1718 1709 { 1719 - struct tcp_sock *tp; 1710 + struct inet_connection_sock *icsk; 1720 1711 struct hlist_node *node; 1721 1712 struct sock *sk = cur; 1722 1713 struct tcp_iter_state* st = seq->private; ··· 1732 1723 if (st->state == TCP_SEQ_STATE_OPENREQ) { 1733 1724 struct request_sock *req = cur; 1734 1725 1735 - tp = tcp_sk(st->syn_wait_sk); 1726 + icsk = inet_csk(st->syn_wait_sk); 1736 1727 req = req->dl_next; 1737 1728 while (1) { 1738 1729 while (req) { ··· 1745 1736 if (++st->sbucket >= TCP_SYNQ_HSIZE) 1746 1737 break; 1747 1738 get_req: 1748 - req = tp->accept_queue.listen_opt->syn_table[st->sbucket]; 1739 + req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket]; 1749 1740 } 1750 1741 sk = sk_next(st->syn_wait_sk); 1751 1742 st->state = TCP_SEQ_STATE_LISTENING; 1752 - read_unlock_bh(&tp->accept_queue.syn_wait_lock); 1743 + read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 1753 1744 } else { 1754 - tp = tcp_sk(sk); 1755 - read_lock_bh(&tp->accept_queue.syn_wait_lock); 1756 - if (reqsk_queue_len(&tp->accept_queue)) 1745 + icsk = inet_csk(sk); 1746 + read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 1747 + if (reqsk_queue_len(&icsk->icsk_accept_queue)) 1757 1748 goto start_req; 1758 - read_unlock_bh(&tp->accept_queue.syn_wait_lock); 1749 + read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 1759 1750 sk = sk_next(sk); 1760 1751 } 1761 1752 get_sk: ··· 1764 1755 cur = sk; 1765 1756 goto out; 1766 1757 } 1767 - tp = tcp_sk(sk); 1768 - read_lock_bh(&tp->accept_queue.syn_wait_lock); 1769 - if (reqsk_queue_len(&tp->accept_queue)) { 1758 + icsk = inet_csk(sk); 1759 + read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 1760 + if (reqsk_queue_len(&icsk->icsk_accept_queue)) { 1770 1761 start_req: 1771 1762 st->uid = sock_i_uid(sk); 1772 1763 st->syn_wait_sk = sk; ··· 1774 1765 st->sbucket = 0; 1775 1766 goto get_req; 1776 1767 } 1777 - read_unlock_bh(&tp->accept_queue.syn_wait_lock); 1768 + read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 1778 1769 } 1779 1770 if (++st->bucket < INET_LHTABLE_SIZE) { 1780 1771 sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]); ··· 1960 1951 switch (st->state) { 1961 1952 case TCP_SEQ_STATE_OPENREQ: 1962 1953 if (v) { 1963 - struct tcp_sock *tp = tcp_sk(st->syn_wait_sk); 1964 - read_unlock_bh(&tp->accept_queue.syn_wait_lock); 1954 + struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk); 1955 + read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 1965 1956 } 1966 1957 case TCP_SEQ_STATE_LISTENING: 1967 1958 if (v != SEQ_START_TOKEN) ··· 2067 2058 int timer_active; 2068 2059 unsigned long timer_expires; 2069 2060 struct tcp_sock *tp = tcp_sk(sp); 2061 + const struct inet_connection_sock *icsk = inet_csk(sp); 2070 2062 struct inet_sock *inet = inet_sk(sp); 2071 2063 unsigned int dest = inet->daddr; 2072 2064 unsigned int src = inet->rcv_saddr; 2073 2065 __u16 destp = ntohs(inet->dport); 2074 2066 __u16 srcp = ntohs(inet->sport); 2075 2067 2076 - if (tp->pending == TCP_TIME_RETRANS) { 2068 + if (icsk->icsk_pending == ICSK_TIME_RETRANS) { 2077 2069 timer_active = 1; 2078 - timer_expires = tp->timeout; 2079 - } else if (tp->pending == TCP_TIME_PROBE0) { 2070 + timer_expires = icsk->icsk_timeout; 2071 + } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { 2080 2072 timer_active = 4; 2081 - timer_expires = tp->timeout; 2073 + timer_expires = icsk->icsk_timeout; 2082 2074 } else if (timer_pending(&sp->sk_timer)) { 2083 2075 timer_active = 2; 2084 2076 timer_expires = sp->sk_timer.expires; ··· 2094 2084 tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq, 2095 2085 timer_active, 2096 2086 jiffies_to_clock_t(timer_expires - jiffies), 2097 - tp->retransmits, 2087 + icsk->icsk_retransmits, 2098 2088 sock_i_uid(sp), 2099 2089 tp->probes_out, 2100 2090 sock_i_ino(sp), 2101 2091 atomic_read(&sp->sk_refcnt), sp, 2102 - tp->rto, tp->ack.ato, (tp->ack.quick << 1) | tp->ack.pingpong, 2092 + icsk->icsk_rto, 2093 + icsk->icsk_ack.ato, 2094 + (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 2103 2095 tp->snd_cwnd, 2104 2096 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh); 2105 2097 } ··· 2186 2174 .close = tcp_close, 2187 2175 .connect = tcp_v4_connect, 2188 2176 .disconnect = tcp_disconnect, 2189 - .accept = tcp_accept, 2177 + .accept = inet_csk_accept, 2190 2178 .ioctl = tcp_ioctl, 2191 2179 .init = tcp_v4_init_sock, 2192 2180 .destroy = tcp_v4_destroy_sock,
+15 -13
net/ipv4/tcp_minisocks.c
··· 271 271 272 272 if (tw != NULL) { 273 273 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 274 - const int rto = (tp->rto << 2) - (tp->rto >> 1); 274 + const struct inet_connection_sock *icsk = inet_csk(sk); 275 + const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); 275 276 276 277 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; 277 278 tcptw->tw_rcv_nxt = tp->rcv_nxt; ··· 606 605 struct inet_request_sock *ireq = inet_rsk(req); 607 606 struct tcp_request_sock *treq = tcp_rsk(req); 608 607 struct inet_sock *newinet = inet_sk(newsk); 608 + struct inet_connection_sock *newicsk = inet_csk(newsk); 609 609 struct tcp_sock *newtp; 610 610 611 611 newsk->sk_state = TCP_SYN_RECV; 612 - newinet->bind_hash = NULL; 612 + newicsk->icsk_bind_hash = NULL; 613 613 614 614 /* Clone the TCP header template */ 615 615 newinet->dport = ireq->rmt_port; ··· 626 624 627 625 tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn); 628 626 629 - newtp->retransmits = 0; 630 - newtp->backoff = 0; 627 + newicsk->icsk_retransmits = 0; 628 + newicsk->icsk_backoff = 0; 631 629 newtp->srtt = 0; 632 630 newtp->mdev = TCP_TIMEOUT_INIT; 633 - newtp->rto = TCP_TIMEOUT_INIT; 631 + newicsk->icsk_rto = TCP_TIMEOUT_INIT; 634 632 635 633 newtp->packets_out = 0; 636 634 newtp->left_out = 0; ··· 669 667 newtp->rx_opt.num_sacks = 0; 670 668 newtp->urg_data = 0; 671 669 /* Deinitialize accept_queue to trap illegal accesses. */ 672 - memset(&newtp->accept_queue, 0, sizeof(newtp->accept_queue)); 670 + memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue)); 673 671 674 672 if (sock_flag(newsk, SOCK_KEEPOPEN)) 675 - tcp_reset_keepalive_timer(newsk, 676 - keepalive_time_when(newtp)); 673 + inet_csk_reset_keepalive_timer(newsk, 674 + keepalive_time_when(newtp)); 677 675 678 676 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; 679 677 if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { ··· 703 701 newtp->tcp_header_len = sizeof(struct tcphdr); 704 702 } 705 703 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) 706 - newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len; 704 + newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 707 705 newtp->rx_opt.mss_clamp = req->mss; 708 706 TCP_ECN_openreq_child(newtp, req); 709 707 if (newtp->ecn_flags&TCP_ECN_OK) ··· 883 881 if (child == NULL) 884 882 goto listen_overflow; 885 883 886 - tcp_synq_unlink(tp, req, prev); 887 - tcp_synq_removed(sk, req); 884 + inet_csk_reqsk_queue_unlink(sk, req, prev); 885 + inet_csk_reqsk_queue_removed(sk, req); 888 886 889 - tcp_acceptq_queue(sk, req, child); 887 + inet_csk_reqsk_queue_add(sk, req, child); 890 888 return child; 891 889 892 890 listen_overflow: ··· 900 898 if (!(flg & TCP_FLAG_RST)) 901 899 req->rsk_ops->send_reset(skb); 902 900 903 - tcp_synq_drop(sk, req, prev); 901 + inet_csk_reqsk_queue_drop(sk, req, prev); 904 902 return NULL; 905 903 } 906 904
+46 -40
net/ipv4/tcp_output.c
··· 105 105 106 106 /* RFC2861. Reset CWND after idle period longer RTO to "restart window". 107 107 * This is the first part of cwnd validation mechanism. */ 108 - static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst) 108 + static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) 109 109 { 110 + struct tcp_sock *tp = tcp_sk(sk); 110 111 s32 delta = tcp_time_stamp - tp->lsndtime; 111 112 u32 restart_cwnd = tcp_init_cwnd(tp, dst); 112 113 u32 cwnd = tp->snd_cwnd; ··· 117 116 tp->snd_ssthresh = tcp_current_ssthresh(tp); 118 117 restart_cwnd = min(restart_cwnd, cwnd); 119 118 120 - while ((delta -= tp->rto) > 0 && cwnd > restart_cwnd) 119 + while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 121 120 cwnd >>= 1; 122 121 tp->snd_cwnd = max(cwnd, restart_cwnd); 123 122 tp->snd_cwnd_stamp = tcp_time_stamp; ··· 127 126 static inline void tcp_event_data_sent(struct tcp_sock *tp, 128 127 struct sk_buff *skb, struct sock *sk) 129 128 { 130 - u32 now = tcp_time_stamp; 129 + struct inet_connection_sock *icsk = inet_csk(sk); 130 + const u32 now = tcp_time_stamp; 131 131 132 - if (!tp->packets_out && (s32)(now - tp->lsndtime) > tp->rto) 133 - tcp_cwnd_restart(tp, __sk_dst_get(sk)); 132 + if (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto) 133 + tcp_cwnd_restart(sk, __sk_dst_get(sk)); 134 134 135 135 tp->lsndtime = now; 136 136 137 137 /* If it is a reply for ato after last received 138 138 * packet, enter pingpong mode. 139 139 */ 140 - if ((u32)(now - tp->ack.lrcvtime) < tp->ack.ato) 141 - tp->ack.pingpong = 1; 140 + if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) 141 + icsk->icsk_ack.pingpong = 1; 142 142 } 143 143 144 144 static __inline__ void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) 145 145 { 146 - struct tcp_sock *tp = tcp_sk(sk); 147 - 148 - tcp_dec_quickack_mode(tp, pkts); 149 - tcp_clear_xmit_timer(sk, TCP_TIME_DACK); 146 + tcp_dec_quickack_mode(sk, pkts); 147 + inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 150 148 } 151 149 152 150 /* Determine a window scaling and initial window to offer. ··· 696 696 if (tp->packets_out > tp->snd_cwnd_used) 697 697 tp->snd_cwnd_used = tp->packets_out; 698 698 699 - if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto) 699 + if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) 700 700 tcp_cwnd_application_limited(sk); 701 701 } 702 702 } ··· 1147 1147 */ 1148 1148 u32 __tcp_select_window(struct sock *sk) 1149 1149 { 1150 + struct inet_connection_sock *icsk = inet_csk(sk); 1150 1151 struct tcp_sock *tp = tcp_sk(sk); 1151 1152 /* MSS for the peer's data. Previous verions used mss_clamp 1152 1153 * here. I don't know if the value based on our guesses ··· 1155 1154 * but may be worse for the performance because of rcv_mss 1156 1155 * fluctuations. --SAW 1998/11/1 1157 1156 */ 1158 - int mss = tp->ack.rcv_mss; 1157 + int mss = icsk->icsk_ack.rcv_mss; 1159 1158 int free_space = tcp_space(sk); 1160 1159 int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); 1161 1160 int window; ··· 1164 1163 mss = full_space; 1165 1164 1166 1165 if (free_space < full_space/2) { 1167 - tp->ack.quick = 0; 1166 + icsk->icsk_ack.quick = 0; 1168 1167 1169 1168 if (tcp_memory_pressure) 1170 1169 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss); ··· 1492 1491 1493 1492 if (skb == 1494 1493 skb_peek(&sk->sk_write_queue)) 1495 - tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); 1494 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 1495 + inet_csk(sk)->icsk_rto); 1496 1496 } 1497 1497 1498 1498 packet_cnt -= tcp_skb_pcount(skb); ··· 1546 1544 break; 1547 1545 1548 1546 if (skb == skb_peek(&sk->sk_write_queue)) 1549 - tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); 1547 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto); 1550 1548 1551 1549 NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); 1552 1550 } ··· 1782 1780 tp->rcv_wup = 0; 1783 1781 tp->copied_seq = 0; 1784 1782 1785 - tp->rto = TCP_TIMEOUT_INIT; 1786 - tp->retransmits = 0; 1783 + inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 1784 + inet_csk(sk)->icsk_retransmits = 0; 1787 1785 tcp_clear_retrans(tp); 1788 1786 } 1789 1787 ··· 1826 1824 TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); 1827 1825 1828 1826 /* Timer for repeating the SYN until an answer. */ 1829 - tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); 1827 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto); 1830 1828 return 0; 1831 1829 } 1832 1830 ··· 1836 1834 */ 1837 1835 void tcp_send_delayed_ack(struct sock *sk) 1838 1836 { 1839 - struct tcp_sock *tp = tcp_sk(sk); 1840 - int ato = tp->ack.ato; 1837 + struct inet_connection_sock *icsk = inet_csk(sk); 1838 + int ato = icsk->icsk_ack.ato; 1841 1839 unsigned long timeout; 1842 1840 1843 1841 if (ato > TCP_DELACK_MIN) { 1842 + const struct tcp_sock *tp = tcp_sk(sk); 1844 1843 int max_ato = HZ/2; 1845 1844 1846 - if (tp->ack.pingpong || (tp->ack.pending&TCP_ACK_PUSHED)) 1845 + if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) 1847 1846 max_ato = TCP_DELACK_MAX; 1848 1847 1849 1848 /* Slow path, intersegment interval is "high". */ 1850 1849 1851 1850 /* If some rtt estimate is known, use it to bound delayed ack. 1852 - * Do not use tp->rto here, use results of rtt measurements 1851 + * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements 1853 1852 * directly. 1854 1853 */ 1855 1854 if (tp->srtt) { ··· 1867 1864 timeout = jiffies + ato; 1868 1865 1869 1866 /* Use new timeout only if there wasn't a older one earlier. */ 1870 - if (tp->ack.pending&TCP_ACK_TIMER) { 1867 + if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 1871 1868 /* If delack timer was blocked or is about to expire, 1872 1869 * send ACK now. 1873 1870 */ 1874 - if (tp->ack.blocked || time_before_eq(tp->ack.timeout, jiffies+(ato>>2))) { 1871 + if (icsk->icsk_ack.blocked || 1872 + time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { 1875 1873 tcp_send_ack(sk); 1876 1874 return; 1877 1875 } 1878 1876 1879 - if (!time_before(timeout, tp->ack.timeout)) 1880 - timeout = tp->ack.timeout; 1877 + if (!time_before(timeout, icsk->icsk_ack.timeout)) 1878 + timeout = icsk->icsk_ack.timeout; 1881 1879 } 1882 - tp->ack.pending |= TCP_ACK_SCHED|TCP_ACK_TIMER; 1883 - tp->ack.timeout = timeout; 1884 - sk_reset_timer(sk, &tp->delack_timer, timeout); 1880 + icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; 1881 + icsk->icsk_ack.timeout = timeout; 1882 + sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); 1885 1883 } 1886 1884 1887 1885 /* This routine sends an ack and also updates the window. */ ··· 1899 1895 */ 1900 1896 buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); 1901 1897 if (buff == NULL) { 1902 - tcp_schedule_ack(tp); 1903 - tp->ack.ato = TCP_ATO_MIN; 1904 - tcp_reset_xmit_timer(sk, TCP_TIME_DACK, TCP_DELACK_MAX); 1898 + inet_csk_schedule_ack(sk); 1899 + inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; 1900 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX); 1905 1901 return; 1906 1902 } 1907 1903 ··· 2015 2011 */ 2016 2012 void tcp_send_probe0(struct sock *sk) 2017 2013 { 2014 + struct inet_connection_sock *icsk = inet_csk(sk); 2018 2015 struct tcp_sock *tp = tcp_sk(sk); 2019 2016 int err; 2020 2017 ··· 2024 2019 if (tp->packets_out || !sk->sk_send_head) { 2025 2020 /* Cancel probe timer, if it is not required. */ 2026 2021 tp->probes_out = 0; 2027 - tp->backoff = 0; 2022 + icsk->icsk_backoff = 0; 2028 2023 return; 2029 2024 } 2030 2025 2031 2026 if (err <= 0) { 2032 - if (tp->backoff < sysctl_tcp_retries2) 2033 - tp->backoff++; 2027 + if (icsk->icsk_backoff < sysctl_tcp_retries2) 2028 + icsk->icsk_backoff++; 2034 2029 tp->probes_out++; 2035 - tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0, 2036 - min(tp->rto << tp->backoff, TCP_RTO_MAX)); 2030 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2031 + min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX)); 2037 2032 } else { 2038 2033 /* If packet was not sent due to local congestion, 2039 2034 * do not backoff and do not remember probes_out. ··· 2043 2038 */ 2044 2039 if (!tp->probes_out) 2045 2040 tp->probes_out=1; 2046 - tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0, 2047 - min(tp->rto << tp->backoff, TCP_RESOURCE_PROBE_INTERVAL)); 2041 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 2042 + min(icsk->icsk_rto << icsk->icsk_backoff, 2043 + TCP_RESOURCE_PROBE_INTERVAL)); 2048 2044 } 2049 2045 } 2050 2046
+94 -85
net/ipv4/tcp_timer.c
··· 36 36 static void tcp_delack_timer(unsigned long); 37 37 static void tcp_keepalive_timer (unsigned long data); 38 38 39 - #ifdef TCP_DEBUG 40 - const char tcp_timer_bug_msg[] = KERN_DEBUG "tcpbug: unknown timer value\n"; 41 - EXPORT_SYMBOL(tcp_timer_bug_msg); 39 + #ifdef INET_CSK_DEBUG 40 + const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n"; 41 + EXPORT_SYMBOL(inet_csk_timer_bug_msg); 42 42 #endif 43 43 44 44 /* ··· 46 46 * We may wish use just one timer maintaining a list of expire jiffies 47 47 * to optimize. 48 48 */ 49 + void inet_csk_init_xmit_timers(struct sock *sk, 50 + void (*retransmit_handler)(unsigned long), 51 + void (*delack_handler)(unsigned long), 52 + void (*keepalive_handler)(unsigned long)) 53 + { 54 + struct inet_connection_sock *icsk = inet_csk(sk); 55 + 56 + init_timer(&icsk->icsk_retransmit_timer); 57 + init_timer(&icsk->icsk_delack_timer); 58 + init_timer(&sk->sk_timer); 59 + 60 + icsk->icsk_retransmit_timer.function = retransmit_handler; 61 + icsk->icsk_delack_timer.function = delack_handler; 62 + sk->sk_timer.function = keepalive_handler; 63 + 64 + icsk->icsk_retransmit_timer.data = 65 + icsk->icsk_delack_timer.data = 66 + sk->sk_timer.data = (unsigned long)sk; 67 + 68 + icsk->icsk_pending = icsk->icsk_ack.pending = 0; 69 + } 70 + 71 + void inet_csk_clear_xmit_timers(struct sock *sk) 72 + { 73 + struct inet_connection_sock *icsk = inet_csk(sk); 74 + 75 + icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0; 76 + 77 + sk_stop_timer(sk, &icsk->icsk_retransmit_timer); 78 + sk_stop_timer(sk, &icsk->icsk_delack_timer); 79 + sk_stop_timer(sk, &sk->sk_timer); 80 + } 49 81 50 82 void tcp_init_xmit_timers(struct sock *sk) 51 83 { 52 - struct tcp_sock *tp = tcp_sk(sk); 53 - 54 - init_timer(&tp->retransmit_timer); 55 - tp->retransmit_timer.function=&tcp_write_timer; 56 - tp->retransmit_timer.data = (unsigned long) sk; 57 - tp->pending = 0; 58 - 59 - init_timer(&tp->delack_timer); 60 - tp->delack_timer.function=&tcp_delack_timer; 61 - tp->delack_timer.data = (unsigned long) sk; 62 - tp->ack.pending = 0; 63 - 64 - init_timer(&sk->sk_timer); 65 - sk->sk_timer.function = &tcp_keepalive_timer; 66 - sk->sk_timer.data = (unsigned long)sk; 67 - } 68 - 69 - void tcp_clear_xmit_timers(struct sock *sk) 70 - { 71 - struct tcp_sock *tp = tcp_sk(sk); 72 - 73 - tp->pending = 0; 74 - sk_stop_timer(sk, &tp->retransmit_timer); 75 - 76 - tp->ack.pending = 0; 77 - tp->ack.blocked = 0; 78 - sk_stop_timer(sk, &tp->delack_timer); 79 - 80 - sk_stop_timer(sk, &sk->sk_timer); 84 + inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer, 85 + &tcp_keepalive_timer); 81 86 } 82 87 83 88 static void tcp_write_err(struct sock *sk) ··· 160 155 /* A write timeout has occurred. Process the after effects. */ 161 156 static int tcp_write_timeout(struct sock *sk) 162 157 { 163 - struct tcp_sock *tp = tcp_sk(sk); 158 + const struct inet_connection_sock *icsk = inet_csk(sk); 164 159 int retry_until; 165 160 166 161 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 167 - if (tp->retransmits) 162 + if (icsk->icsk_retransmits) 168 163 dst_negative_advice(&sk->sk_dst_cache); 169 - retry_until = tp->syn_retries ? : sysctl_tcp_syn_retries; 164 + retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 170 165 } else { 171 - if (tp->retransmits >= sysctl_tcp_retries1) { 166 + if (icsk->icsk_retransmits >= sysctl_tcp_retries1) { 172 167 /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black 173 168 hole detection. :-( 174 169 ··· 194 189 195 190 retry_until = sysctl_tcp_retries2; 196 191 if (sock_flag(sk, SOCK_DEAD)) { 197 - int alive = (tp->rto < TCP_RTO_MAX); 192 + const int alive = (icsk->icsk_rto < TCP_RTO_MAX); 198 193 199 194 retry_until = tcp_orphan_retries(sk, alive); 200 195 201 - if (tcp_out_of_resources(sk, alive || tp->retransmits < retry_until)) 196 + if (tcp_out_of_resources(sk, alive || icsk->icsk_retransmits < retry_until)) 202 197 return 1; 203 198 } 204 199 } 205 200 206 - if (tp->retransmits >= retry_until) { 201 + if (icsk->icsk_retransmits >= retry_until) { 207 202 /* Has it gone just too far? */ 208 203 tcp_write_err(sk); 209 204 return 1; ··· 215 210 { 216 211 struct sock *sk = (struct sock*)data; 217 212 struct tcp_sock *tp = tcp_sk(sk); 213 + struct inet_connection_sock *icsk = inet_csk(sk); 218 214 219 215 bh_lock_sock(sk); 220 216 if (sock_owned_by_user(sk)) { 221 217 /* Try again later. */ 222 - tp->ack.blocked = 1; 218 + icsk->icsk_ack.blocked = 1; 223 219 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOCKED); 224 - sk_reset_timer(sk, &tp->delack_timer, jiffies + TCP_DELACK_MIN); 220 + sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); 225 221 goto out_unlock; 226 222 } 227 223 228 224 sk_stream_mem_reclaim(sk); 229 225 230 - if (sk->sk_state == TCP_CLOSE || !(tp->ack.pending & TCP_ACK_TIMER)) 226 + if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) 231 227 goto out; 232 228 233 - if (time_after(tp->ack.timeout, jiffies)) { 234 - sk_reset_timer(sk, &tp->delack_timer, tp->ack.timeout); 229 + if (time_after(icsk->icsk_ack.timeout, jiffies)) { 230 + sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); 235 231 goto out; 236 232 } 237 - tp->ack.pending &= ~TCP_ACK_TIMER; 233 + icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; 238 234 239 235 if (!skb_queue_empty(&tp->ucopy.prequeue)) { 240 236 struct sk_buff *skb; ··· 248 242 tp->ucopy.memory = 0; 249 243 } 250 244 251 - if (tcp_ack_scheduled(tp)) { 252 - if (!tp->ack.pingpong) { 245 + if (inet_csk_ack_scheduled(sk)) { 246 + if (!icsk->icsk_ack.pingpong) { 253 247 /* Delayed ACK missed: inflate ATO. */ 254 - tp->ack.ato = min(tp->ack.ato << 1, tp->rto); 248 + icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto); 255 249 } else { 256 250 /* Delayed ACK missed: leave pingpong mode and 257 251 * deflate ATO. 258 252 */ 259 - tp->ack.pingpong = 0; 260 - tp->ack.ato = TCP_ATO_MIN; 253 + icsk->icsk_ack.pingpong = 0; 254 + icsk->icsk_ack.ato = TCP_ATO_MIN; 261 255 } 262 256 tcp_send_ack(sk); 263 257 NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKS); ··· 300 294 max_probes = sysctl_tcp_retries2; 301 295 302 296 if (sock_flag(sk, SOCK_DEAD)) { 303 - int alive = ((tp->rto<<tp->backoff) < TCP_RTO_MAX); 297 + const struct inet_connection_sock *icsk = inet_csk(sk); 298 + const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX); 304 299 305 300 max_probes = tcp_orphan_retries(sk, alive); 306 301 ··· 324 317 static void tcp_retransmit_timer(struct sock *sk) 325 318 { 326 319 struct tcp_sock *tp = tcp_sk(sk); 320 + struct inet_connection_sock *icsk = inet_csk(sk); 327 321 328 322 if (!tp->packets_out) 329 323 goto out; ··· 359 351 if (tcp_write_timeout(sk)) 360 352 goto out; 361 353 362 - if (tp->retransmits == 0) { 354 + if (icsk->icsk_retransmits == 0) { 363 355 if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) { 364 356 if (tp->rx_opt.sack_ok) { 365 357 if (tp->ca_state == TCP_CA_Recovery) ··· 389 381 /* Retransmission failed because of local congestion, 390 382 * do not backoff. 391 383 */ 392 - if (!tp->retransmits) 393 - tp->retransmits=1; 394 - tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, 395 - min(tp->rto, TCP_RESOURCE_PROBE_INTERVAL)); 384 + if (!icsk->icsk_retransmits) 385 + icsk->icsk_retransmits = 1; 386 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 387 + min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL)); 396 388 goto out; 397 389 } 398 390 ··· 411 403 * implemented ftp to mars will work nicely. We will have to fix 412 404 * the 120 second clamps though! 413 405 */ 414 - tp->backoff++; 415 - tp->retransmits++; 406 + icsk->icsk_backoff++; 407 + icsk->icsk_retransmits++; 416 408 417 409 out_reset_timer: 418 - tp->rto = min(tp->rto << 1, TCP_RTO_MAX); 419 - tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto); 420 - if (tp->retransmits > sysctl_tcp_retries1) 410 + icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); 411 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto); 412 + if (icsk->icsk_retransmits > sysctl_tcp_retries1) 421 413 __sk_dst_reset(sk); 422 414 423 415 out:; ··· 426 418 static void tcp_write_timer(unsigned long data) 427 419 { 428 420 struct sock *sk = (struct sock*)data; 429 - struct tcp_sock *tp = tcp_sk(sk); 421 + struct inet_connection_sock *icsk = inet_csk(sk); 430 422 int event; 431 423 432 424 bh_lock_sock(sk); 433 425 if (sock_owned_by_user(sk)) { 434 426 /* Try again later */ 435 - sk_reset_timer(sk, &tp->retransmit_timer, jiffies + (HZ / 20)); 427 + sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20)); 436 428 goto out_unlock; 437 429 } 438 430 439 - if (sk->sk_state == TCP_CLOSE || !tp->pending) 431 + if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending) 440 432 goto out; 441 433 442 - if (time_after(tp->timeout, jiffies)) { 443 - sk_reset_timer(sk, &tp->retransmit_timer, tp->timeout); 434 + if (time_after(icsk->icsk_timeout, jiffies)) { 435 + sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); 444 436 goto out; 445 437 } 446 438 447 - event = tp->pending; 448 - tp->pending = 0; 439 + event = icsk->icsk_pending; 440 + icsk->icsk_pending = 0; 449 441 450 442 switch (event) { 451 - case TCP_TIME_RETRANS: 443 + case ICSK_TIME_RETRANS: 452 444 tcp_retransmit_timer(sk); 453 445 break; 454 - case TCP_TIME_PROBE0: 446 + case ICSK_TIME_PROBE0: 455 447 tcp_probe_timer(sk); 456 448 break; 457 449 } ··· 471 463 static void tcp_synack_timer(struct sock *sk) 472 464 { 473 465 struct tcp_sock *tp = tcp_sk(sk); 474 - struct listen_sock *lopt = tp->accept_queue.listen_opt; 475 - int max_retries = tp->syn_retries ? : sysctl_tcp_synack_retries; 466 + struct inet_connection_sock *icsk = inet_csk(sk); 467 + struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; 468 + int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; 476 469 int thresh = max_retries; 477 470 unsigned long now = jiffies; 478 471 struct request_sock **reqp, *req; ··· 535 526 } 536 527 537 528 /* Drop this request */ 538 - tcp_synq_unlink(tp, req, reqp); 539 - reqsk_queue_removed(&tp->accept_queue, req); 529 + inet_csk_reqsk_queue_unlink(sk, req, reqp); 530 + reqsk_queue_removed(&icsk->icsk_accept_queue, req); 540 531 reqsk_free(req); 541 532 continue; 542 533 } ··· 550 541 lopt->clock_hand = i; 551 542 552 543 if (lopt->qlen) 553 - tcp_reset_keepalive_timer(sk, TCP_SYNQ_INTERVAL); 544 + inet_csk_reset_keepalive_timer(sk, TCP_SYNQ_INTERVAL); 554 545 } 555 546 556 - void tcp_delete_keepalive_timer (struct sock *sk) 547 + void inet_csk_delete_keepalive_timer(struct sock *sk) 557 548 { 558 549 sk_stop_timer(sk, &sk->sk_timer); 559 550 } 560 551 561 - void tcp_reset_keepalive_timer (struct sock *sk, unsigned long len) 552 + void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) 562 553 { 563 554 sk_reset_timer(sk, &sk->sk_timer, jiffies + len); 564 555 } ··· 569 560 return; 570 561 571 562 if (val && !sock_flag(sk, SOCK_KEEPOPEN)) 572 - tcp_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); 563 + inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk))); 573 564 else if (!val) 574 - tcp_delete_keepalive_timer(sk); 565 + inet_csk_delete_keepalive_timer(sk); 575 566 } 576 567 577 568 ··· 585 576 bh_lock_sock(sk); 586 577 if (sock_owned_by_user(sk)) { 587 578 /* Try again later. */ 588 - tcp_reset_keepalive_timer (sk, HZ/20); 579 + inet_csk_reset_keepalive_timer (sk, HZ/20); 589 580 goto out; 590 581 } 591 582 ··· 596 587 597 588 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { 598 589 if (tp->linger2 >= 0) { 599 - int tmo = tcp_fin_time(tp) - TCP_TIMEWAIT_LEN; 590 + const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; 600 591 601 592 if (tmo > 0) { 602 593 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); ··· 643 634 sk_stream_mem_reclaim(sk); 644 635 645 636 resched: 646 - tcp_reset_keepalive_timer (sk, elapsed); 637 + inet_csk_reset_keepalive_timer (sk, elapsed); 647 638 goto out; 648 639 649 640 death: ··· 654 645 sock_put(sk); 655 646 } 656 647 657 - EXPORT_SYMBOL(tcp_clear_xmit_timers); 658 - EXPORT_SYMBOL(tcp_delete_keepalive_timer); 648 + EXPORT_SYMBOL(inet_csk_clear_xmit_timers); 649 + EXPORT_SYMBOL(inet_csk_delete_keepalive_timer); 659 650 EXPORT_SYMBOL(tcp_init_xmit_timers); 660 - EXPORT_SYMBOL(tcp_reset_keepalive_timer); 651 + EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
+1 -1
net/ipv6/addrconf.c
··· 1043 1043 u32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr; 1044 1044 u32 sk2_rcv_saddr = inet_rcv_saddr(sk2); 1045 1045 int sk_ipv6only = ipv6_only_sock(sk); 1046 - int sk2_ipv6only = tcp_v6_ipv6only(sk2); 1046 + int sk2_ipv6only = inet_v6_ipv6only(sk2); 1047 1047 int addr_type = ipv6_addr_type(sk_rcv_saddr6); 1048 1048 int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; 1049 1049
+29 -25
net/ipv6/tcp_ipv6.c
··· 207 207 tb->fastreuse = 0; 208 208 209 209 success: 210 - if (!inet_sk(sk)->bind_hash) 210 + if (!inet_csk(sk)->icsk_bind_hash) 211 211 inet_bind_hash(sk, tb, snum); 212 - BUG_TRAP(inet_sk(sk)->bind_hash == tb); 212 + BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb); 213 213 ret = 0; 214 214 215 215 fail_unlock: ··· 381 381 * Open request hash tables. 382 382 */ 383 383 384 - static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd) 384 + static u32 tcp_v6_synq_hash(const struct in6_addr *raddr, const u16 rport, const u32 rnd) 385 385 { 386 386 u32 a, b, c; 387 387 ··· 401 401 return c & (TCP_SYNQ_HSIZE - 1); 402 402 } 403 403 404 - static struct request_sock *tcp_v6_search_req(struct tcp_sock *tp, 404 + static struct request_sock *tcp_v6_search_req(const struct sock *sk, 405 405 struct request_sock ***prevp, 406 406 __u16 rport, 407 407 struct in6_addr *raddr, 408 408 struct in6_addr *laddr, 409 409 int iif) 410 410 { 411 - struct listen_sock *lopt = tp->accept_queue.listen_opt; 411 + const struct inet_connection_sock *icsk = inet_csk(sk); 412 + struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; 412 413 struct request_sock *req, **prev; 413 414 414 415 for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)]; ··· 620 619 } 621 620 622 621 head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; 623 - tb = inet_sk(sk)->bind_hash; 622 + tb = inet_csk(sk)->icsk_bind_hash; 624 623 spin_lock_bh(&head->lock); 625 624 626 625 if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { ··· 926 925 if (sock_owned_by_user(sk)) 927 926 goto out; 928 927 929 - req = tcp_v6_search_req(tp, &prev, th->dest, &hdr->daddr, 928 + req = tcp_v6_search_req(sk, &prev, th->dest, &hdr->daddr, 930 929 &hdr->saddr, tcp_v6_iif(skb)); 931 930 if (!req) 932 931 goto out; ··· 941 940 goto out; 942 941 } 943 942 944 - tcp_synq_drop(sk, req, prev); 943 + inet_csk_reqsk_queue_drop(sk, req, prev); 945 944 goto out; 946 945 947 946 case TCP_SYN_SENT: ··· 1246 1245 { 1247 1246 struct request_sock *req, **prev; 1248 1247 struct tcphdr *th = skb->h.th; 1249 - struct tcp_sock *tp = tcp_sk(sk); 1250 1248 struct sock *nsk; 1251 1249 1252 1250 /* Find possible connection requests. */ 1253 - req = tcp_v6_search_req(tp, &prev, th->source, &skb->nh.ipv6h->saddr, 1251 + req = tcp_v6_search_req(sk, &prev, th->source, &skb->nh.ipv6h->saddr, 1254 1252 &skb->nh.ipv6h->daddr, tcp_v6_iif(skb)); 1255 1253 if (req) 1256 1254 return tcp_check_req(sk, skb, req, prev); ··· 1278 1278 1279 1279 static void tcp_v6_synq_add(struct sock *sk, struct request_sock *req) 1280 1280 { 1281 - struct tcp_sock *tp = tcp_sk(sk); 1282 - struct listen_sock *lopt = tp->accept_queue.listen_opt; 1283 - u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd); 1281 + struct inet_connection_sock *icsk = inet_csk(sk); 1282 + struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; 1283 + const u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd); 1284 1284 1285 - reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT); 1286 - tcp_synq_added(sk); 1285 + reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, TCP_TIMEOUT_INIT); 1286 + inet_csk_reqsk_queue_added(sk, TCP_TIMEOUT_INIT); 1287 1287 } 1288 1288 1289 1289 ··· 1308 1308 /* 1309 1309 * There are no SYN attacks on IPv6, yet... 1310 1310 */ 1311 - if (tcp_synq_is_full(sk) && !isn) { 1311 + if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1312 1312 if (net_ratelimit()) 1313 1313 printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n"); 1314 1314 goto drop; 1315 1315 } 1316 1316 1317 - if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1) 1317 + if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) 1318 1318 goto drop; 1319 1319 1320 1320 req = reqsk_alloc(&tcp6_request_sock_ops); ··· 2015 2015 tcp_init_xmit_timers(sk); 2016 2016 tcp_prequeue_init(tp); 2017 2017 2018 - tp->rto = TCP_TIMEOUT_INIT; 2018 + inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 2019 2019 tp->mdev = TCP_TIMEOUT_INIT; 2020 2020 2021 2021 /* So many TCP implementations out there (incorrectly) count the ··· 2098 2098 unsigned long timer_expires; 2099 2099 struct inet_sock *inet = inet_sk(sp); 2100 2100 struct tcp_sock *tp = tcp_sk(sp); 2101 + const struct inet_connection_sock *icsk = inet_csk(sp); 2101 2102 struct ipv6_pinfo *np = inet6_sk(sp); 2102 2103 2103 2104 dest = &np->daddr; 2104 2105 src = &np->rcv_saddr; 2105 2106 destp = ntohs(inet->dport); 2106 2107 srcp = ntohs(inet->sport); 2107 - if (tp->pending == TCP_TIME_RETRANS) { 2108 + 2109 + if (icsk->icsk_pending == ICSK_TIME_RETRANS) { 2108 2110 timer_active = 1; 2109 - timer_expires = tp->timeout; 2110 - } else if (tp->pending == TCP_TIME_PROBE0) { 2111 + timer_expires = icsk->icsk_timeout; 2112 + } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { 2111 2113 timer_active = 4; 2112 - timer_expires = tp->timeout; 2114 + timer_expires = icsk->icsk_timeout; 2113 2115 } else if (timer_pending(&sp->sk_timer)) { 2114 2116 timer_active = 2; 2115 2117 timer_expires = sp->sk_timer.expires; ··· 2132 2130 tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq, 2133 2131 timer_active, 2134 2132 jiffies_to_clock_t(timer_expires - jiffies), 2135 - tp->retransmits, 2133 + icsk->icsk_retransmits, 2136 2134 sock_i_uid(sp), 2137 2135 tp->probes_out, 2138 2136 sock_i_ino(sp), 2139 2137 atomic_read(&sp->sk_refcnt), sp, 2140 - tp->rto, tp->ack.ato, (tp->ack.quick<<1)|tp->ack.pingpong, 2138 + icsk->icsk_rto, 2139 + icsk->icsk_ack.ato, 2140 + (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong, 2141 2141 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh 2142 2142 ); 2143 2143 } ··· 2231 2227 .close = tcp_close, 2232 2228 .connect = tcp_v6_connect, 2233 2229 .disconnect = tcp_disconnect, 2234 - .accept = tcp_accept, 2230 + .accept = inet_csk_accept, 2235 2231 .ioctl = tcp_ioctl, 2236 2232 .init = tcp_v6_init_sock, 2237 2233 .destroy = tcp_v6_destroy_sock,