Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tcp: add const qualifiers where possible

Adding const qualifiers to pointers can ease code review, and spot some
bugs. It might allow compiler to optimize code further.

For example, is it legal to temporary write a null cksum into tcphdr
in tcp_md5_hash_header() ? I am afraid a sniffer could catch the
temporary null value...

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric Dumazet and committed by
David S. Miller
cf533ea5 f04565dd

+160 -152
+1 -1
include/net/secure_seq.h
··· 10 10 __be16 dport); 11 11 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, 12 12 __be16 sport, __be16 dport); 13 - extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, 13 + extern __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, 14 14 __be16 sport, __be16 dport); 15 15 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, 16 16 __be16 sport, __be16 dport);
+23 -20
include/net/tcp.h
··· 327 327 size_t size, int flags); 328 328 extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); 329 329 extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 330 - struct tcphdr *th, unsigned len); 330 + const struct tcphdr *th, unsigned int len); 331 331 extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 332 - struct tcphdr *th, unsigned len); 332 + const struct tcphdr *th, unsigned int len); 333 333 extern void tcp_rcv_space_adjust(struct sock *sk); 334 334 extern void tcp_cleanup_rbuf(struct sock *sk, int copied); 335 335 extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); ··· 401 401 extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req); 402 402 extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 403 403 size_t len, int nonblock, int flags, int *addr_len); 404 - extern void tcp_parse_options(struct sk_buff *skb, 405 - struct tcp_options_received *opt_rx, u8 **hvpp, 404 + extern void tcp_parse_options(const struct sk_buff *skb, 405 + struct tcp_options_received *opt_rx, const u8 **hvpp, 406 406 int estab); 407 - extern u8 *tcp_parse_md5sig_option(struct tcphdr *th); 407 + extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); 408 408 409 409 /* 410 410 * TCP v4 functions exported for the inet6 API ··· 450 450 /* From net/ipv6/syncookies.c */ 451 451 extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); 452 452 #ifdef CONFIG_SYN_COOKIES 453 - extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, 453 + extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, 454 454 __u16 *mss); 455 455 #else 456 456 static inline __u32 cookie_v6_init_sequence(struct sock *sk, ··· 522 522 } 523 523 524 524 /* tcp.c */ 525 - extern void tcp_get_info(struct sock *, struct tcp_info *); 525 + extern void tcp_get_info(const struct sock *, struct tcp_info *); 526 526 527 527 /* Read 'sendfile()'-style from a TCP socket */ 528 528 typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, ··· 532 532 533 533 extern void tcp_initialize_rcv_mss(struct sock *sk); 534 534 535 - extern int tcp_mtu_to_mss(struct sock *sk, int pmtu); 536 - extern int tcp_mss_to_mtu(struct sock *sk, int mss); 535 + extern int tcp_mtu_to_mss(const struct sock *sk, int pmtu); 536 + extern int tcp_mss_to_mtu(const struct sock *sk, int mss); 537 537 extern void tcp_mtup_init(struct sock *sk); 538 538 extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt); 539 539 ··· 574 574 /* Compute the actual rto_min value */ 575 575 static inline u32 tcp_rto_min(struct sock *sk) 576 576 { 577 - struct dst_entry *dst = __sk_dst_get(sk); 577 + const struct dst_entry *dst = __sk_dst_get(sk); 578 578 u32 rto_min = TCP_RTO_MIN; 579 579 580 580 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) ··· 820 820 static inline __u32 tcp_current_ssthresh(const struct sock *sk) 821 821 { 822 822 const struct tcp_sock *tp = tcp_sk(sk); 823 + 823 824 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery)) 824 825 return tp->snd_ssthresh; 825 826 else ··· 833 832 #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) 834 833 835 834 extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); 836 - extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); 835 + extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst); 837 836 838 837 /* Slow start with delack produces 3 packets of burst, so that 839 838 * it is safe "de facto". This will be the default - same as ··· 862 861 863 862 static inline void tcp_check_probe_timer(struct sock *sk) 864 863 { 865 - struct tcp_sock *tp = tcp_sk(sk); 864 + const struct tcp_sock *tp = tcp_sk(sk); 866 865 const struct inet_connection_sock *icsk = inet_csk(sk); 867 866 868 867 if (!tp->packets_out && !icsk->icsk_pending) ··· 1210 1209 extern void tcp_put_md5sig_pool(void); 1211 1210 1212 1211 extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); 1213 - extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, 1212 + extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *, 1214 1213 unsigned header_len); 1215 1214 extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, 1216 - struct tcp_md5sig_key *key); 1215 + const struct tcp_md5sig_key *key); 1217 1216 1218 1217 /* write queue abstraction */ 1219 1218 static inline void tcp_write_queue_purge(struct sock *sk) ··· 1226 1225 tcp_clear_all_retrans_hints(tcp_sk(sk)); 1227 1226 } 1228 1227 1229 - static inline struct sk_buff *tcp_write_queue_head(struct sock *sk) 1228 + static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk) 1230 1229 { 1231 1230 return skb_peek(&sk->sk_write_queue); 1232 1231 } 1233 1232 1234 - static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk) 1233 + static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk) 1235 1234 { 1236 1235 return skb_peek_tail(&sk->sk_write_queue); 1237 1236 } 1238 1237 1239 - static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb) 1238 + static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk, 1239 + const struct sk_buff *skb) 1240 1240 { 1241 1241 return skb_queue_next(&sk->sk_write_queue, skb); 1242 1242 } 1243 1243 1244 - static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb) 1244 + static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk, 1245 + const struct sk_buff *skb) 1245 1246 { 1246 1247 return skb_queue_prev(&sk->sk_write_queue, skb); 1247 1248 } ··· 1257 1254 #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ 1258 1255 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp) 1259 1256 1260 - static inline struct sk_buff *tcp_send_head(struct sock *sk) 1257 + static inline struct sk_buff *tcp_send_head(const struct sock *sk) 1261 1258 { 1262 1259 return sk->sk_send_head; 1263 1260 } ··· 1268 1265 return skb_queue_is_last(&sk->sk_write_queue, skb); 1269 1266 } 1270 1267 1271 - static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb) 1268 + static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb) 1272 1269 { 1273 1270 if (tcp_skb_is_last(sk, skb)) 1274 1271 sk->sk_send_head = NULL;
+1 -1
net/core/secure_seq.c
··· 35 35 } 36 36 37 37 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 38 - __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, 38 + __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr, 39 39 __be16 sport, __be16 dport) 40 40 { 41 41 u32 secret[MD5_MESSAGE_BYTES / 4];
+1 -1
net/ipv4/syncookies.c
··· 265 265 struct ip_options *opt) 266 266 { 267 267 struct tcp_options_received tcp_opt; 268 - u8 *hash_location; 268 + const u8 *hash_location; 269 269 struct inet_request_sock *ireq; 270 270 struct tcp_request_sock *treq; 271 271 struct tcp_sock *tp = tcp_sk(sk);
+9 -9
net/ipv4/tcp.c
··· 374 374 { 375 375 unsigned int mask; 376 376 struct sock *sk = sock->sk; 377 - struct tcp_sock *tp = tcp_sk(sk); 377 + const struct tcp_sock *tp = tcp_sk(sk); 378 378 379 379 sock_poll_wait(file, sk_sleep(sk), wait); 380 380 if (sk->sk_state == TCP_LISTEN) ··· 528 528 tp->pushed_seq = tp->write_seq; 529 529 } 530 530 531 - static inline int forced_push(struct tcp_sock *tp) 531 + static inline int forced_push(const struct tcp_sock *tp) 532 532 { 533 533 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); 534 534 } ··· 891 891 #define TCP_PAGE(sk) (sk->sk_sndmsg_page) 892 892 #define TCP_OFF(sk) (sk->sk_sndmsg_off) 893 893 894 - static inline int select_size(struct sock *sk, int sg) 894 + static inline int select_size(const struct sock *sk, int sg) 895 895 { 896 - struct tcp_sock *tp = tcp_sk(sk); 896 + const struct tcp_sock *tp = tcp_sk(sk); 897 897 int tmp = tp->mss_cache; 898 898 899 899 if (sg) { ··· 2408 2408 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, 2409 2409 unsigned int optlen) 2410 2410 { 2411 - struct inet_connection_sock *icsk = inet_csk(sk); 2411 + const struct inet_connection_sock *icsk = inet_csk(sk); 2412 2412 2413 2413 if (level != SOL_TCP) 2414 2414 return icsk->icsk_af_ops->setsockopt(sk, level, optname, ··· 2430 2430 #endif 2431 2431 2432 2432 /* Return information about state of tcp endpoint in API format. */ 2433 - void tcp_get_info(struct sock *sk, struct tcp_info *info) 2433 + void tcp_get_info(const struct sock *sk, struct tcp_info *info) 2434 2434 { 2435 - struct tcp_sock *tp = tcp_sk(sk); 2435 + const struct tcp_sock *tp = tcp_sk(sk); 2436 2436 const struct inet_connection_sock *icsk = inet_csk(sk); 2437 2437 u32 now = tcp_time_stamp; 2438 2438 ··· 3010 3010 EXPORT_SYMBOL(tcp_md5_hash_header); 3011 3011 3012 3012 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, 3013 - struct sk_buff *skb, unsigned header_len) 3013 + const struct sk_buff *skb, unsigned int header_len) 3014 3014 { 3015 3015 struct scatterlist sg; 3016 3016 const struct tcphdr *tp = tcp_hdr(skb); ··· 3043 3043 } 3044 3044 EXPORT_SYMBOL(tcp_md5_hash_skb_data); 3045 3045 3046 - int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key) 3046 + int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) 3047 3047 { 3048 3048 struct scatterlist sg; 3049 3049
+56 -54
net/ipv4/tcp_input.c
··· 206 206 tp->ecn_flags |= TCP_ECN_QUEUE_CWR; 207 207 } 208 208 209 - static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb) 209 + static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) 210 210 { 211 211 if (tcp_hdr(skb)->cwr) 212 212 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; ··· 239 239 } 240 240 } 241 241 242 - static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, struct tcphdr *th) 242 + static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) 243 243 { 244 244 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) 245 245 tp->ecn_flags &= ~TCP_ECN_OK; 246 246 } 247 247 248 - static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, struct tcphdr *th) 248 + static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) 249 249 { 250 250 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) 251 251 tp->ecn_flags &= ~TCP_ECN_OK; 252 252 } 253 253 254 - static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th) 254 + static inline int TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) 255 255 { 256 256 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) 257 257 return 1; ··· 315 315 return 0; 316 316 } 317 317 318 - static void tcp_grow_window(struct sock *sk, struct sk_buff *skb) 318 + static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) 319 319 { 320 320 struct tcp_sock *tp = tcp_sk(sk); 321 321 ··· 429 429 */ 430 430 void tcp_initialize_rcv_mss(struct sock *sk) 431 431 { 432 - struct tcp_sock *tp = tcp_sk(sk); 432 + const struct tcp_sock *tp = tcp_sk(sk); 433 433 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); 434 434 435 435 hint = min(hint, tp->rcv_wnd / 2); ··· 824 824 } 825 825 } 826 826 827 - __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst) 827 + __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) 828 828 { 829 829 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 830 830 ··· 1216 1216 tp->lost_retrans_low = new_low_seq; 1217 1217 } 1218 1218 1219 - static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb, 1219 + static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, 1220 1220 struct tcp_sack_block_wire *sp, int num_sacks, 1221 1221 u32 prior_snd_una) 1222 1222 { ··· 1310 1310 return in_sack; 1311 1311 } 1312 1312 1313 - static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk, 1313 + static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk, 1314 1314 struct tcp_sacktag_state *state, 1315 1315 int dup_sack, int pcount) 1316 1316 { ··· 1465 1465 /* I wish gso_size would have a bit more sane initialization than 1466 1466 * something-or-zero which complicates things 1467 1467 */ 1468 - static int tcp_skb_seglen(struct sk_buff *skb) 1468 + static int tcp_skb_seglen(const struct sk_buff *skb) 1469 1469 { 1470 1470 return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb); 1471 1471 } 1472 1472 1473 1473 /* Shifting pages past head area doesn't work */ 1474 - static int skb_can_shift(struct sk_buff *skb) 1474 + static int skb_can_shift(const struct sk_buff *skb) 1475 1475 { 1476 1476 return !skb_headlen(skb) && skb_is_nonlinear(skb); 1477 1477 } ··· 1720 1720 return skb; 1721 1721 } 1722 1722 1723 - static int tcp_sack_cache_ok(struct tcp_sock *tp, struct tcp_sack_block *cache) 1723 + static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) 1724 1724 { 1725 1725 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); 1726 1726 } 1727 1727 1728 1728 static int 1729 - tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, 1729 + tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, 1730 1730 u32 prior_snd_una) 1731 1731 { 1732 1732 const struct inet_connection_sock *icsk = inet_csk(sk); 1733 1733 struct tcp_sock *tp = tcp_sk(sk); 1734 - unsigned char *ptr = (skb_transport_header(ack_skb) + 1735 - TCP_SKB_CB(ack_skb)->sacked); 1734 + const unsigned char *ptr = (skb_transport_header(ack_skb) + 1735 + TCP_SKB_CB(ack_skb)->sacked); 1736 1736 struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); 1737 1737 struct tcp_sack_block sp[TCP_NUM_SACKS]; 1738 1738 struct tcp_sack_block *cache; ··· 2296 2296 return 0; 2297 2297 } 2298 2298 2299 - static inline int tcp_fackets_out(struct tcp_sock *tp) 2299 + static inline int tcp_fackets_out(const struct tcp_sock *tp) 2300 2300 { 2301 2301 return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out; 2302 2302 } ··· 2316 2316 * they differ. Since neither occurs due to loss, TCP should really 2317 2317 * ignore them. 2318 2318 */ 2319 - static inline int tcp_dupack_heuristics(struct tcp_sock *tp) 2319 + static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) 2320 2320 { 2321 2321 return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; 2322 2322 } 2323 2323 2324 - static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) 2324 + static inline int tcp_skb_timedout(const struct sock *sk, 2325 + const struct sk_buff *skb) 2325 2326 { 2326 2327 return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto; 2327 2328 } 2328 2329 2329 - static inline int tcp_head_timedout(struct sock *sk) 2330 + static inline int tcp_head_timedout(const struct sock *sk) 2330 2331 { 2331 - struct tcp_sock *tp = tcp_sk(sk); 2332 + const struct tcp_sock *tp = tcp_sk(sk); 2332 2333 2333 2334 return tp->packets_out && 2334 2335 tcp_skb_timedout(sk, tcp_write_queue_head(sk)); ··· 2640 2639 /* Nothing was retransmitted or returned timestamp is less 2641 2640 * than timestamp of the first retransmission. 2642 2641 */ 2643 - static inline int tcp_packet_delayed(struct tcp_sock *tp) 2642 + static inline int tcp_packet_delayed(const struct tcp_sock *tp) 2644 2643 { 2645 2644 return !tp->retrans_stamp || 2646 2645 (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && ··· 2701 2700 tp->snd_cwnd_stamp = tcp_time_stamp; 2702 2701 } 2703 2702 2704 - static inline int tcp_may_undo(struct tcp_sock *tp) 2703 + static inline int tcp_may_undo(const struct tcp_sock *tp) 2705 2704 { 2706 2705 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); 2707 2706 } ··· 2765 2764 * that successive retransmissions of a segment must not advance 2766 2765 * retrans_stamp under any conditions. 2767 2766 */ 2768 - static int tcp_any_retrans_done(struct sock *sk) 2767 + static int tcp_any_retrans_done(const struct sock *sk) 2769 2768 { 2770 - struct tcp_sock *tp = tcp_sk(sk); 2769 + const struct tcp_sock *tp = tcp_sk(sk); 2771 2770 struct sk_buff *skb; 2772 2771 2773 2772 if (tp->retrans_out) ··· 3246 3245 */ 3247 3246 static void tcp_rearm_rto(struct sock *sk) 3248 3247 { 3249 - struct tcp_sock *tp = tcp_sk(sk); 3248 + const struct tcp_sock *tp = tcp_sk(sk); 3250 3249 3251 3250 if (!tp->packets_out) { 3252 3251 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); ··· 3498 3497 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 3499 3498 * and in FreeBSD. NetBSD's one is even worse.) is wrong. 3500 3499 */ 3501 - static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack, 3500 + static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, 3502 3501 u32 ack_seq) 3503 3502 { 3504 3503 struct tcp_sock *tp = tcp_sk(sk); ··· 3674 3673 } 3675 3674 3676 3675 /* This routine deals with incoming acks, but not outgoing ones. */ 3677 - static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) 3676 + static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) 3678 3677 { 3679 3678 struct inet_connection_sock *icsk = inet_csk(sk); 3680 3679 struct tcp_sock *tp = tcp_sk(sk); ··· 3811 3810 * But, this can also be called on packets in the established flow when 3812 3811 * the fast version below fails. 3813 3812 */ 3814 - void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, 3815 - u8 **hvpp, int estab) 3813 + void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx, 3814 + const u8 **hvpp, int estab) 3816 3815 { 3817 - unsigned char *ptr; 3818 - struct tcphdr *th = tcp_hdr(skb); 3816 + const unsigned char *ptr; 3817 + const struct tcphdr *th = tcp_hdr(skb); 3819 3818 int length = (th->doff * 4) - sizeof(struct tcphdr); 3820 3819 3821 - ptr = (unsigned char *)(th + 1); 3820 + ptr = (const unsigned char *)(th + 1); 3822 3821 opt_rx->saw_tstamp = 0; 3823 3822 3824 3823 while (length > 0) { ··· 3929 3928 } 3930 3929 EXPORT_SYMBOL(tcp_parse_options); 3931 3930 3932 - static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th) 3931 + static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) 3933 3932 { 3934 - __be32 *ptr = (__be32 *)(th + 1); 3933 + const __be32 *ptr = (const __be32 *)(th + 1); 3935 3934 3936 3935 if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) 3937 3936 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) { ··· 3948 3947 /* Fast parse options. This hopes to only see timestamps. 3949 3948 * If it is wrong it falls back on tcp_parse_options(). 3950 3949 */ 3951 - static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, 3952 - struct tcp_sock *tp, u8 **hvpp) 3950 + static int tcp_fast_parse_options(const struct sk_buff *skb, 3951 + const struct tcphdr *th, 3952 + struct tcp_sock *tp, const u8 **hvpp) 3953 3953 { 3954 3954 /* In the spirit of fast parsing, compare doff directly to constant 3955 3955 * values. Because equality is used, short doff can be ignored here. ··· 3971 3969 /* 3972 3970 * Parse MD5 Signature option 3973 3971 */ 3974 - u8 *tcp_parse_md5sig_option(struct tcphdr *th) 3972 + const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) 3975 3973 { 3976 - int length = (th->doff << 2) - sizeof (*th); 3977 - u8 *ptr = (u8*)(th + 1); 3974 + int length = (th->doff << 2) - sizeof(*th); 3975 + const u8 *ptr = (const u8 *)(th + 1); 3978 3976 3979 3977 /* If the TCP option is too short, we can short cut */ 3980 3978 if (length < TCPOLEN_MD5SIG) ··· 4051 4049 4052 4050 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb) 4053 4051 { 4054 - struct tcp_sock *tp = tcp_sk(sk); 4055 - struct tcphdr *th = tcp_hdr(skb); 4052 + const struct tcp_sock *tp = tcp_sk(sk); 4053 + const struct tcphdr *th = tcp_hdr(skb); 4056 4054 u32 seq = TCP_SKB_CB(skb)->seq; 4057 4055 u32 ack = TCP_SKB_CB(skb)->ack_seq; 4058 4056 ··· 4091 4089 * (borrowed from freebsd) 4092 4090 */ 4093 4091 4094 - static inline int tcp_sequence(struct tcp_sock *tp, u32 seq, u32 end_seq) 4092 + static inline int tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) 4095 4093 { 4096 4094 return !before(end_seq, tp->rcv_wup) && 4097 4095 !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); ··· 4248 4246 tcp_sack_extend(tp->duplicate_sack, seq, end_seq); 4249 4247 } 4250 4248 4251 - static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) 4249 + static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) 4252 4250 { 4253 4251 struct tcp_sock *tp = tcp_sk(sk); 4254 4252 ··· 4435 4433 4436 4434 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 4437 4435 { 4438 - struct tcphdr *th = tcp_hdr(skb); 4436 + const struct tcphdr *th = tcp_hdr(skb); 4439 4437 struct tcp_sock *tp = tcp_sk(sk); 4440 4438 int eaten = -1; 4441 4439 ··· 4919 4917 tp->snd_cwnd_stamp = tcp_time_stamp; 4920 4918 } 4921 4919 4922 - static int tcp_should_expand_sndbuf(struct sock *sk) 4920 + static int tcp_should_expand_sndbuf(const struct sock *sk) 4923 4921 { 4924 - struct tcp_sock *tp = tcp_sk(sk); 4922 + const struct tcp_sock *tp = tcp_sk(sk); 4925 4923 4926 4924 /* If the user specified a specific send buffer setting, do 4927 4925 * not modify it. ··· 5030 5028 * either form (or just set the sysctl tcp_stdurg). 5031 5029 */ 5032 5030 5033 - static void tcp_check_urg(struct sock *sk, struct tcphdr *th) 5031 + static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) 5034 5032 { 5035 5033 struct tcp_sock *tp = tcp_sk(sk); 5036 5034 u32 ptr = ntohs(th->urg_ptr); ··· 5096 5094 } 5097 5095 5098 5096 /* This is the 'fast' part of urgent handling. */ 5099 - static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th) 5097 + static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th) 5100 5098 { 5101 5099 struct tcp_sock *tp = tcp_sk(sk); 5102 5100 ··· 5217 5215 * play significant role here. 5218 5216 */ 5219 5217 static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, 5220 - struct tcphdr *th, int syn_inerr) 5218 + const struct tcphdr *th, int syn_inerr) 5221 5219 { 5222 - u8 *hash_location; 5220 + const u8 *hash_location; 5223 5221 struct tcp_sock *tp = tcp_sk(sk); 5224 5222 5225 5223 /* RFC1323: H1. Apply PAWS check first. */ ··· 5300 5298 * tcp_data_queue when everything is OK. 5301 5299 */ 5302 5300 int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 5303 - struct tcphdr *th, unsigned len) 5301 + const struct tcphdr *th, unsigned int len) 5304 5302 { 5305 5303 struct tcp_sock *tp = tcp_sk(sk); 5306 5304 int res; ··· 5511 5509 EXPORT_SYMBOL(tcp_rcv_established); 5512 5510 5513 5511 static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, 5514 - struct tcphdr *th, unsigned len) 5512 + const struct tcphdr *th, unsigned int len) 5515 5513 { 5516 - u8 *hash_location; 5514 + const u8 *hash_location; 5517 5515 struct inet_connection_sock *icsk = inet_csk(sk); 5518 5516 struct tcp_sock *tp = tcp_sk(sk); 5519 5517 struct tcp_cookie_values *cvp = tp->cookie_values; ··· 5788 5786 */ 5789 5787 5790 5788 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 5791 - struct tcphdr *th, unsigned len) 5789 + const struct tcphdr *th, unsigned int len) 5792 5790 { 5793 5791 struct tcp_sock *tp = tcp_sk(sk); 5794 5792 struct inet_connection_sock *icsk = inet_csk(sk);
+13 -13
net/ipv4/tcp_ipv4.c
··· 104 104 struct inet_hashinfo tcp_hashinfo; 105 105 EXPORT_SYMBOL(tcp_hashinfo); 106 106 107 - static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb) 107 + static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb) 108 108 { 109 109 return secure_tcp_sequence_number(ip_hdr(skb)->daddr, 110 110 ip_hdr(skb)->saddr, ··· 552 552 /* This routine computes an IPv4 TCP checksum. */ 553 553 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) 554 554 { 555 - struct inet_sock *inet = inet_sk(sk); 555 + const struct inet_sock *inet = inet_sk(sk); 556 556 557 557 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); 558 558 } ··· 590 590 591 591 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) 592 592 { 593 - struct tcphdr *th = tcp_hdr(skb); 593 + const struct tcphdr *th = tcp_hdr(skb); 594 594 struct { 595 595 struct tcphdr th; 596 596 #ifdef CONFIG_TCP_MD5SIG ··· 668 668 struct tcp_md5sig_key *key, 669 669 int reply_flags) 670 670 { 671 - struct tcphdr *th = tcp_hdr(skb); 671 + const struct tcphdr *th = tcp_hdr(skb); 672 672 struct { 673 673 struct tcphdr th; 674 674 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) ··· 1182 1182 * o MD5 hash and we're not expecting one. 1183 1183 * o MD5 hash and its wrong. 1184 1184 */ 1185 - __u8 *hash_location = NULL; 1185 + const __u8 *hash_location = NULL; 1186 1186 struct tcp_md5sig_key *hash_expected; 1187 1187 const struct iphdr *iph = ip_hdr(skb); 1188 - struct tcphdr *th = tcp_hdr(skb); 1188 + const struct tcphdr *th = tcp_hdr(skb); 1189 1189 int genhash; 1190 1190 unsigned char newhash[16]; 1191 1191 ··· 1248 1248 { 1249 1249 struct tcp_extend_values tmp_ext; 1250 1250 struct tcp_options_received tmp_opt; 1251 - u8 *hash_location; 1251 + const u8 *hash_location; 1252 1252 struct request_sock *req; 1253 1253 struct inet_request_sock *ireq; 1254 1254 struct tcp_sock *tp = tcp_sk(sk); ··· 1645 1645 int tcp_v4_rcv(struct sk_buff *skb) 1646 1646 { 1647 1647 const struct iphdr *iph; 1648 - struct tcphdr *th; 1648 + const struct tcphdr *th; 1649 1649 struct sock *sk; 1650 1650 int ret; 1651 1651 struct net *net = dev_net(skb->dev); ··· 1809 1809 1810 1810 void *tcp_v4_tw_get_peer(struct sock *sk) 1811 1811 { 1812 - struct inet_timewait_sock *tw = inet_twsk(sk); 1812 + const struct inet_timewait_sock *tw = inet_twsk(sk); 1813 1813 1814 1814 return inet_getpeer_v4(tw->tw_daddr, 1); 1815 1815 } ··· 2381 2381 } 2382 2382 EXPORT_SYMBOL(tcp_proc_unregister); 2383 2383 2384 - static void get_openreq4(struct sock *sk, struct request_sock *req, 2384 + static void get_openreq4(const struct sock *sk, const struct request_sock *req, 2385 2385 struct seq_file *f, int i, int uid, int *len) 2386 2386 { 2387 2387 const struct inet_request_sock *ireq = inet_rsk(req); ··· 2411 2411 { 2412 2412 int timer_active; 2413 2413 unsigned long timer_expires; 2414 - struct tcp_sock *tp = tcp_sk(sk); 2414 + const struct tcp_sock *tp = tcp_sk(sk); 2415 2415 const struct inet_connection_sock *icsk = inet_csk(sk); 2416 - struct inet_sock *inet = inet_sk(sk); 2416 + const struct inet_sock *inet = inet_sk(sk); 2417 2417 __be32 dest = inet->inet_daddr; 2418 2418 __be32 src = inet->inet_rcv_saddr; 2419 2419 __u16 destp = ntohs(inet->inet_dport); ··· 2462 2462 len); 2463 2463 } 2464 2464 2465 - static void get_timewait4_sock(struct inet_timewait_sock *tw, 2465 + static void get_timewait4_sock(const struct inet_timewait_sock *tw, 2466 2466 struct seq_file *f, int i, int *len) 2467 2467 { 2468 2468 __be32 dest, src;
+2 -2
net/ipv4/tcp_minisocks.c
··· 141 141 const struct tcphdr *th) 142 142 { 143 143 struct tcp_options_received tmp_opt; 144 - u8 *hash_location; 144 + const u8 *hash_location; 145 145 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 146 146 int paws_reject = 0; 147 147 ··· 566 566 struct request_sock **prev) 567 567 { 568 568 struct tcp_options_received tmp_opt; 569 - u8 *hash_location; 569 + const u8 *hash_location; 570 570 struct sock *child; 571 571 const struct tcphdr *th = tcp_hdr(skb); 572 572 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
+37 -35
net/ipv4/tcp_output.c
··· 65 65 66 66 67 67 /* Account for new data that has been sent to the network. */ 68 - static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb) 68 + static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) 69 69 { 70 70 struct tcp_sock *tp = tcp_sk(sk); 71 71 unsigned int prior_packets = tp->packets_out; ··· 89 89 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already 90 90 * invalid. OK, let's make this for now: 91 91 */ 92 - static inline __u32 tcp_acceptable_seq(struct sock *sk) 92 + static inline __u32 tcp_acceptable_seq(const struct sock *sk) 93 93 { 94 - struct tcp_sock *tp = tcp_sk(sk); 94 + const struct tcp_sock *tp = tcp_sk(sk); 95 95 96 96 if (!before(tcp_wnd_end(tp), tp->snd_nxt)) 97 97 return tp->snd_nxt; ··· 116 116 static __u16 tcp_advertise_mss(struct sock *sk) 117 117 { 118 118 struct tcp_sock *tp = tcp_sk(sk); 119 - struct dst_entry *dst = __sk_dst_get(sk); 119 + const struct dst_entry *dst = __sk_dst_get(sk); 120 120 int mss = tp->advmss; 121 121 122 122 if (dst) { ··· 133 133 134 134 /* RFC2861. Reset CWND after idle period longer RTO to "restart window". 135 135 * This is the first part of cwnd validation mechanism. */ 136 - static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) 136 + static void tcp_cwnd_restart(struct sock *sk, const struct dst_entry *dst) 137 137 { 138 138 struct tcp_sock *tp = tcp_sk(sk); 139 139 s32 delta = tcp_time_stamp - tp->lsndtime; ··· 154 154 155 155 /* Congestion state accounting after a packet has been sent. */ 156 156 static void tcp_event_data_sent(struct tcp_sock *tp, 157 - struct sk_buff *skb, struct sock *sk) 157 + struct sock *sk) 158 158 { 159 159 struct inet_connection_sock *icsk = inet_csk(sk); 160 160 const u32 now = tcp_time_stamp; ··· 295 295 } 296 296 297 297 /* Packet ECN state for a SYN-ACK */ 298 - static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb) 298 + static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb) 299 299 { 300 300 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; 301 301 if (!(tp->ecn_flags & TCP_ECN_OK)) ··· 315 315 } 316 316 317 317 static __inline__ void 318 - TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) 318 + TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th) 319 319 { 320 320 if (inet_rsk(req)->ecn_ok) 321 321 th->ece = 1; ··· 565 565 */ 566 566 static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, 567 567 struct tcp_out_options *opts, 568 - struct tcp_md5sig_key **md5) { 568 + struct tcp_md5sig_key **md5) 569 + { 569 570 struct tcp_sock *tp = tcp_sk(sk); 570 571 struct tcp_cookie_values *cvp = tp->cookie_values; 571 572 unsigned remaining = MAX_TCP_OPTION_SPACE; ··· 744 743 */ 745 744 static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, 746 745 struct tcp_out_options *opts, 747 - struct tcp_md5sig_key **md5) { 746 + struct tcp_md5sig_key **md5) 747 + { 748 748 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; 749 749 struct tcp_sock *tp = tcp_sk(sk); 750 750 unsigned size = 0; ··· 895 893 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 896 894 897 895 if (skb->len != tcp_header_size) 898 - tcp_event_data_sent(tp, skb, sk); 896 + tcp_event_data_sent(tp, sk); 899 897 900 898 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 901 899 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, ··· 928 926 } 929 927 930 928 /* Initialize TSO segments for a packet. */ 931 - static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, 929 + static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, 932 930 unsigned int mss_now) 933 931 { 934 932 if (skb->len <= mss_now || !sk_can_gso(sk) || ··· 949 947 /* When a modification to fackets out becomes necessary, we need to check 950 948 * skb is counted to fackets_out or not. 951 949 */ 952 - static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb, 950 + static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb, 953 951 int decr) 954 952 { 955 953 struct tcp_sock *tp = tcp_sk(sk); ··· 964 962 /* Pcount in the middle of the write queue got changed, we need to do various 965 963 * tweaks to fix counters 966 964 */ 967 - static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr) 965 + static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) 968 966 { 969 967 struct tcp_sock *tp = tcp_sk(sk); 970 968 ··· 1148 1146 } 1149 1147 1150 1148 /* Calculate MSS. Not accounting for SACKs here. */ 1151 - int tcp_mtu_to_mss(struct sock *sk, int pmtu) 1149 + int tcp_mtu_to_mss(const struct sock *sk, int pmtu) 1152 1150 { 1153 - struct tcp_sock *tp = tcp_sk(sk); 1154 - struct inet_connection_sock *icsk = inet_csk(sk); 1151 + const struct tcp_sock *tp = tcp_sk(sk); 1152 + const struct inet_connection_sock *icsk = inet_csk(sk); 1155 1153 int mss_now; 1156 1154 1157 1155 /* Calculate base mss without TCP options: ··· 1177 1175 } 1178 1176 1179 1177 /* Inverse of above */ 1180 - int tcp_mss_to_mtu(struct sock *sk, int mss) 1178 + int tcp_mss_to_mtu(const struct sock *sk, int mss) 1181 1179 { 1182 - struct tcp_sock *tp = tcp_sk(sk); 1183 - struct inet_connection_sock *icsk = inet_csk(sk); 1180 + const struct tcp_sock *tp = tcp_sk(sk); 1181 + const struct inet_connection_sock *icsk = inet_csk(sk); 1184 1182 int mtu; 1185 1183 1186 1184 mtu = mss + ··· 1254 1252 */ 1255 1253 unsigned int tcp_current_mss(struct sock *sk) 1256 1254 { 1257 - struct tcp_sock *tp = tcp_sk(sk); 1258 - struct dst_entry *dst = __sk_dst_get(sk); 1255 + const struct tcp_sock *tp = tcp_sk(sk); 1256 + const struct dst_entry *dst = __sk_dst_get(sk); 1259 1257 u32 mss_now; 1260 1258 unsigned header_len; 1261 1259 struct tcp_out_options opts; ··· 1315 1313 * modulo only when the receiver window alone is the limiting factor or 1316 1314 * when we would be allowed to send the split-due-to-Nagle skb fully. 1317 1315 */ 1318 - static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, 1316 + static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, 1319 1317 unsigned int mss_now, unsigned int cwnd) 1320 1318 { 1321 - struct tcp_sock *tp = tcp_sk(sk); 1319 + const struct tcp_sock *tp = tcp_sk(sk); 1322 1320 u32 needed, window, cwnd_len; 1323 1321 1324 1322 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; ··· 1338 1336 /* Can at least one segment of SKB be sent right now, according to the 1339 1337 * congestion window rules? If so, return how many segments are allowed. 1340 1338 */ 1341 - static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, 1342 - struct sk_buff *skb) 1339 + static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, 1340 + const struct sk_buff *skb) 1343 1341 { 1344 1342 u32 in_flight, cwnd; 1345 1343 ··· 1360 1358 * This must be invoked the first time we consider transmitting 1361 1359 * SKB onto the wire. 1362 1360 */ 1363 - static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, 1361 + static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb, 1364 1362 unsigned int mss_now) 1365 1363 { 1366 1364 int tso_segs = tcp_skb_pcount(skb); ··· 1398 1396 /* Return non-zero if the Nagle test allows this packet to be 1399 1397 * sent now. 1400 1398 */ 1401 - static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, 1399 + static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, 1402 1400 unsigned int cur_mss, int nonagle) 1403 1401 { 1404 1402 /* Nagle rule does not apply to frames, which sit in the middle of the ··· 1424 1422 } 1425 1423 1426 1424 /* Does at least the first segment of SKB fit into the send window? */ 1427 - static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, 1425 + static inline int tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb, 1428 1426 unsigned int cur_mss) 1429 1427 { 1430 1428 u32 end_seq = TCP_SKB_CB(skb)->end_seq; ··· 1439 1437 * should be put on the wire right now. If so, it returns the number of 1440 1438 * packets allowed by the congestion window. 1441 1439 */ 1442 - static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, 1440 + static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb, 1443 1441 unsigned int cur_mss, int nonagle) 1444 1442 { 1445 - struct tcp_sock *tp = tcp_sk(sk); 1443 + const struct tcp_sock *tp = tcp_sk(sk); 1446 1444 unsigned int cwnd_quota; 1447 1445 1448 1446 tcp_init_tso_segs(sk, skb, cur_mss); ··· 1460 1458 /* Test if sending is allowed right now. */ 1461 1459 int tcp_may_send_now(struct sock *sk) 1462 1460 { 1463 - struct tcp_sock *tp = tcp_sk(sk); 1461 + const struct tcp_sock *tp = tcp_sk(sk); 1464 1462 struct sk_buff *skb = tcp_send_head(sk); 1465 1463 1466 1464 return skb && ··· 2010 2008 } 2011 2009 2012 2010 /* Check if coalescing SKBs is legal. */ 2013 - static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb) 2011 + static int tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) 2014 2012 { 2015 2013 if (tcp_skb_pcount(skb) > 1) 2016 2014 return 0; ··· 2186 2184 static int tcp_can_forward_retransmit(struct sock *sk) 2187 2185 { 2188 2186 const struct inet_connection_sock *icsk = inet_csk(sk); 2189 - struct tcp_sock *tp = tcp_sk(sk); 2187 + const struct tcp_sock *tp = tcp_sk(sk); 2190 2188 2191 2189 /* Forward retransmissions are possible only during Recovery. */ 2192 2190 if (icsk->icsk_ca_state != TCP_CA_Recovery) ··· 2552 2550 /* Do all connect socket setups that can be done AF independent. */ 2553 2551 static void tcp_connect_init(struct sock *sk) 2554 2552 { 2555 - struct dst_entry *dst = __sk_dst_get(sk); 2553 + const struct dst_entry *dst = __sk_dst_get(sk); 2556 2554 struct tcp_sock *tp = tcp_sk(sk); 2557 2555 __u8 rcv_wscale; 2558 2556
+3 -3
net/ipv6/syncookies.c
··· 115 115 & COOKIEMASK; 116 116 } 117 117 118 - __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) 118 + __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mssp) 119 119 { 120 120 const struct ipv6hdr *iph = ipv6_hdr(skb); 121 121 const struct tcphdr *th = tcp_hdr(skb); ··· 137 137 jiffies / (HZ * 60), mssind); 138 138 } 139 139 140 - static inline int cookie_check(struct sk_buff *skb, __u32 cookie) 140 + static inline int cookie_check(const struct sk_buff *skb, __u32 cookie) 141 141 { 142 142 const struct ipv6hdr *iph = ipv6_hdr(skb); 143 143 const struct tcphdr *th = tcp_hdr(skb); ··· 152 152 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) 153 153 { 154 154 struct tcp_options_received tcp_opt; 155 - u8 *hash_location; 155 + const u8 *hash_location; 156 156 struct inet_request_sock *ireq; 157 157 struct inet6_request_sock *ireq6; 158 158 struct tcp_request_sock *treq;
+14 -13
net/ipv6/tcp_ipv6.c
··· 114 114 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); 115 115 } 116 116 117 - static __u32 tcp_v6_init_sequence(struct sk_buff *skb) 117 + static __u32 tcp_v6_init_sequence(const struct sk_buff *skb) 118 118 { 119 119 return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, 120 120 ipv6_hdr(skb)->saddr.s6_addr32, ··· 844 844 845 845 static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb) 846 846 { 847 - __u8 *hash_location = NULL; 847 + const __u8 *hash_location = NULL; 848 848 struct tcp_md5sig_key *hash_expected; 849 849 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 850 850 struct tcphdr *th = tcp_hdr(skb); ··· 980 980 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, 981 981 u32 ts, struct tcp_md5sig_key *key, int rst) 982 982 { 983 - struct tcphdr *th = tcp_hdr(skb), *t1; 983 + const struct tcphdr *th = tcp_hdr(skb); 984 + struct tcphdr *t1; 984 985 struct sk_buff *buff; 985 986 struct flowi6 fl6; 986 987 struct net *net = dev_net(skb_dst(skb)->dev); ··· 1071 1070 1072 1071 static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb) 1073 1072 { 1074 - struct tcphdr *th = tcp_hdr(skb); 1073 + const struct tcphdr *th = tcp_hdr(skb); 1075 1074 u32 seq = 0, ack_seq = 0; 1076 1075 struct tcp_md5sig_key *key = NULL; 1077 1076 ··· 1161 1160 { 1162 1161 struct tcp_extend_values tmp_ext; 1163 1162 struct tcp_options_received tmp_opt; 1164 - u8 *hash_location; 1163 + const u8 *hash_location; 1165 1164 struct request_sock *req; 1166 1165 struct inet6_request_sock *treq; 1167 1166 struct ipv6_pinfo *np = inet6_sk(sk); ··· 1689 1688 1690 1689 static int tcp_v6_rcv(struct sk_buff *skb) 1691 1690 { 1692 - struct tcphdr *th; 1691 + const struct tcphdr *th; 1693 1692 const struct ipv6hdr *hdr; 1694 1693 struct sock *sk; 1695 1694 int ret; ··· 1857 1856 1858 1857 static void *tcp_v6_tw_get_peer(struct sock *sk) 1859 1858 { 1860 - struct inet6_timewait_sock *tw6 = inet6_twsk(sk); 1861 - struct inet_timewait_sock *tw = inet_twsk(sk); 1859 + const struct inet6_timewait_sock *tw6 = inet6_twsk(sk); 1860 + const struct inet_timewait_sock *tw = inet_twsk(sk); 1862 1861 1863 1862 if (tw->tw_family == AF_INET) 1864 1863 return tcp_v4_tw_get_peer(sk); ··· 2013 2012 #ifdef CONFIG_PROC_FS 2014 2013 /* Proc filesystem TCPv6 sock list dumping. */ 2015 2014 static void get_openreq6(struct seq_file *seq, 2016 - struct sock *sk, struct request_sock *req, int i, int uid) 2015 + const struct sock *sk, struct request_sock *req, int i, int uid) 2017 2016 { 2018 2017 int ttd = req->expires - jiffies; 2019 2018 const struct in6_addr *src = &inet6_rsk(req)->loc_addr; ··· 2049 2048 __u16 destp, srcp; 2050 2049 int timer_active; 2051 2050 unsigned long timer_expires; 2052 - struct inet_sock *inet = inet_sk(sp); 2053 - struct tcp_sock *tp = tcp_sk(sp); 2051 + const struct inet_sock *inet = inet_sk(sp); 2052 + const struct tcp_sock *tp = tcp_sk(sp); 2054 2053 const struct inet_connection_sock *icsk = inet_csk(sp); 2055 - struct ipv6_pinfo *np = inet6_sk(sp); 2054 + const struct ipv6_pinfo *np = inet6_sk(sp); 2056 2055 2057 2056 dest = &np->daddr; 2058 2057 src = &np->rcv_saddr; ··· 2104 2103 { 2105 2104 const struct in6_addr *dest, *src; 2106 2105 __u16 destp, srcp; 2107 - struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw); 2106 + const struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw); 2108 2107 int ttd = tw->tw_ttd - jiffies; 2109 2108 2110 2109 if (ttd < 0)