Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tcp: more struct tcp_sock adjustments

tp->recvmsg_inq is used from tcp recvmsg() thus should
be in tcp_sock_read_rx group.

tp->tcp_clock_cache and tp->tcp_mstamp are written
both in rx and tx paths, thus are better placed
in tcp_sock_write_txrx group.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric Dumazet and committed by
David S. Miller
d2c3a7eb 7c7be683

+7 -7
+3 -3
include/linux/tcp.h
··· 244 244 /* OOO segments go in this rbtree. Socket lock must be held. */ 245 245 struct rb_root out_of_order_queue; 246 246 u32 snd_ssthresh; /* Slow start size threshold */ 247 + u8 recvmsg_inq : 1;/* Indicate # of bytes in queue upon recvmsg */ 247 248 __cacheline_group_end(tcp_sock_read_rx); 248 249 249 250 /* TX read-write hotpath cache lines */ ··· 267 266 u32 mdev_us; /* medium deviation */ 268 267 u32 rtt_seq; /* sequence number to update rttvar */ 269 268 u64 tcp_wstamp_ns; /* departure time for next sent data packet */ 270 - u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */ 271 - u64 tcp_mstamp; /* most recent packet received/sent */ 272 269 struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */ 273 270 struct sk_buff *highest_sack; /* skb just after the highest 274 271 * skb with SACKed bit set ··· 283 284 * 0x5?10 << 16 + snd_wnd in net byte order 284 285 */ 285 286 __be32 pred_flags; 287 + u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */ 288 + u64 tcp_mstamp; /* most recent packet received/sent */ 286 289 u32 rcv_nxt; /* What we want to receive next */ 287 290 u32 snd_nxt; /* Next sequence we send */ 288 291 u32 snd_una; /* First byte we want an ack for */ ··· 371 370 tlp_retrans:1, /* TLP is a retransmission */ 372 371 unused:5; 373 372 u8 thin_lto : 1,/* Use linear timeouts for thin streams */ 374 - recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */ 375 373 fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */ 376 374 fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */ 377 375 fastopen_client_fail:2, /* reason why fastopen failed */
+4 -4
net/ipv4/tcp.c
··· 4648 4648 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, lsndtime); 4649 4649 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, mdev_us); 4650 4650 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_wstamp_ns); 4651 - CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_clock_cache); 4652 - CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_mstamp); 4653 4651 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, rtt_seq); 4654 4652 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tsorted_sent_queue); 4655 4653 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, highest_sack); 4656 4654 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, ecn_flags); 4657 - CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 105); 4655 + CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 89); 4658 4656 4659 4657 /* TXRX read-write hotpath cache lines */ 4660 4658 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, pred_flags); 4659 + CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_clock_cache); 4660 + CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_mstamp); 4661 4661 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_nxt); 4662 4662 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_nxt); 4663 4663 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_una); ··· 4670 4670 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, app_limited); 4671 4671 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_wnd); 4672 4672 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rx_opt); 4673 - CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_txrx, 76); 4673 + CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_txrx, 92); 4674 4674 4675 4675 /* RX read-write hotpath cache lines */ 4676 4676 CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_received);