Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ipv6: tcp: fix TCLASS value in ACK messages sent from TIME_WAIT

commit 66b13d99d96a (ipv4: tcp: fix TOS value in ACK messages sent from
TIME_WAIT) fixed IPv4 only.

This part is for the IPv6 side, adding a tclass param to ip6_xmit()

We alias tw_tclass and tw_tos, if socket family is INET6.

[ if sockets is ipv4-mapped, only IP_TOS socket option is used to fill
TOS field, TCLASS is not taken into account ]

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric Dumazet and committed by
David S. Miller
b903d324 138c4ae9

+19 -18
+1
include/net/inet_timewait_sock.h
··· 134 134 struct inet_bind_bucket *tw_tb; 135 135 struct hlist_node tw_death_node; 136 136 }; 137 + #define tw_tclass tw_tos 137 138 138 139 static inline void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw, 139 140 struct hlist_nulls_head *list)
+2 -1
include/net/ipv6.h
··· 486 486 extern int ip6_xmit(struct sock *sk, 487 487 struct sk_buff *skb, 488 488 struct flowi6 *fl6, 489 - struct ipv6_txoptions *opt); 489 + struct ipv6_txoptions *opt, 490 + int tclass); 490 491 491 492 extern int ip6_nd_hdr(struct sock *sk, 492 493 struct sk_buff *skb,
+2 -2
net/dccp/ipv6.c
··· 271 271 &ireq6->loc_addr, 272 272 &ireq6->rmt_addr); 273 273 ipv6_addr_copy(&fl6.daddr, &ireq6->rmt_addr); 274 - err = ip6_xmit(sk, skb, &fl6, opt); 274 + err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); 275 275 err = net_xmit_eval(err); 276 276 } 277 277 ··· 326 326 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false); 327 327 if (!IS_ERR(dst)) { 328 328 skb_dst_set(skb, dst); 329 - ip6_xmit(ctl_sk, skb, &fl6, NULL); 329 + ip6_xmit(ctl_sk, skb, &fl6, NULL, 0); 330 330 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); 331 331 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); 332 332 return;
+1
net/ipv4/tcp_minisocks.c
··· 345 345 tw6 = inet6_twsk((struct sock *)tw); 346 346 ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr); 347 347 ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr); 348 + tw->tw_tclass = np->tclass; 348 349 tw->tw_ipv6only = np->ipv6only; 349 350 } 350 351 #endif
+1 -1
net/ipv6/inet6_connection_sock.c
··· 248 248 /* Restore final destination back after routing done */ 249 249 ipv6_addr_copy(&fl6.daddr, &np->daddr); 250 250 251 - res = ip6_xmit(sk, skb, &fl6, np->opt); 251 + res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); 252 252 rcu_read_unlock(); 253 253 return res; 254 254 }
+2 -5
net/ipv6/ip6_output.c
··· 180 180 */ 181 181 182 182 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, 183 - struct ipv6_txoptions *opt) 183 + struct ipv6_txoptions *opt, int tclass) 184 184 { 185 185 struct net *net = sock_net(sk); 186 186 struct ipv6_pinfo *np = inet6_sk(sk); ··· 190 190 u8 proto = fl6->flowi6_proto; 191 191 int seg_len = skb->len; 192 192 int hlimit = -1; 193 - int tclass = 0; 194 193 u32 mtu; 195 194 196 195 if (opt) { ··· 227 228 /* 228 229 * Fill in the IPv6 header 229 230 */ 230 - if (np) { 231 - tclass = np->tclass; 231 + if (np) 232 232 hlimit = np->hop_limit; 233 - } 234 233 if (hlimit < 0) 235 234 hlimit = ip6_dst_hoplimit(dst); 236 235
+9 -8
net/ipv6/tcp_ipv6.c
··· 513 513 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 514 514 515 515 ipv6_addr_copy(&fl6.daddr, &treq->rmt_addr); 516 - err = ip6_xmit(sk, skb, &fl6, opt); 516 + err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); 517 517 err = net_xmit_eval(err); 518 518 } 519 519 ··· 979 979 } 980 980 981 981 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, 982 - u32 ts, struct tcp_md5sig_key *key, int rst) 982 + u32 ts, struct tcp_md5sig_key *key, int rst, u8 tclass) 983 983 { 984 984 const struct tcphdr *th = tcp_hdr(skb); 985 985 struct tcphdr *t1; ··· 1060 1060 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false); 1061 1061 if (!IS_ERR(dst)) { 1062 1062 skb_dst_set(buff, dst); 1063 - ip6_xmit(ctl_sk, buff, &fl6, NULL); 1063 + ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); 1064 1064 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 1065 1065 if (rst) 1066 1066 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); ··· 1093 1093 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len - 1094 1094 (th->doff << 2); 1095 1095 1096 - tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1); 1096 + tcp_v6_send_response(skb, seq, ack_seq, 0, 0, key, 1, 0); 1097 1097 } 1098 1098 1099 1099 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts, 1100 - struct tcp_md5sig_key *key) 1100 + struct tcp_md5sig_key *key, u8 tclass) 1101 1101 { 1102 - tcp_v6_send_response(skb, seq, ack, win, ts, key, 0); 1102 + tcp_v6_send_response(skb, seq, ack, win, ts, key, 0, tclass); 1103 1103 } 1104 1104 1105 1105 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) ··· 1109 1109 1110 1110 tcp_v6_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, 1111 1111 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, 1112 - tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw)); 1112 + tcptw->tw_ts_recent, tcp_twsk_md5_key(tcptw), 1113 + tw->tw_tclass); 1113 1114 1114 1115 inet_twsk_put(tw); 1115 1116 } ··· 1119 1118 struct request_sock *req) 1120 1119 { 1121 1120 tcp_v6_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent, 1122 - tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr)); 1121 + tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0); 1123 1122 } 1124 1123 1125 1124
+1 -1
net/sctp/ipv6.c
··· 243 243 if (!(transport->param_flags & SPP_PMTUD_ENABLE)) 244 244 skb->local_df = 1; 245 245 246 - return ip6_xmit(sk, skb, &fl6, np->opt); 246 + return ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); 247 247 } 248 248 249 249 /* Returns the dst cache entry for the given source and destination ip