tcp: don't annotate mark on control socket from tcp_v6_send_response()

Unlike ipv4, this control socket is shared by all cpus so we cannot use
it as scratchpad area to annotate the mark that we pass to ip6_xmit().

Add a new parameter to ip6_xmit() to indicate the mark. The SCTP socket
family caches the flowi6 structure in the sctp_transport structure, so
we cannot use to carry the mark unless we later on reset it back, which
I discarded since it looks ugly to me.

Fixes: bf99b4ded5f8 ("tcp: fix mark propagation with fwmark_reflect enabled")
Suggested-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by Pablo Neira and committed by David S. Miller 92e55f41 a47b70ea

+10 -10
+1 -1
include/net/ipv6.h
··· 871 * upper-layer output functions 872 */ 873 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, 874 - struct ipv6_txoptions *opt, int tclass); 875 876 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); 877
··· 871 * upper-layer output functions 872 */ 873 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, 874 + __u32 mark, struct ipv6_txoptions *opt, int tclass); 875 876 int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); 877
+2 -2
net/dccp/ipv6.c
··· 227 opt = ireq->ipv6_opt; 228 if (!opt) 229 opt = rcu_dereference(np->opt); 230 - err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); 231 rcu_read_unlock(); 232 err = net_xmit_eval(err); 233 } ··· 281 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); 282 if (!IS_ERR(dst)) { 283 skb_dst_set(skb, dst); 284 - ip6_xmit(ctl_sk, skb, &fl6, NULL, 0); 285 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 286 DCCP_INC_STATS(DCCP_MIB_OUTRSTS); 287 return;
··· 227 opt = ireq->ipv6_opt; 228 if (!opt) 229 opt = rcu_dereference(np->opt); 230 + err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass); 231 rcu_read_unlock(); 232 err = net_xmit_eval(err); 233 } ··· 281 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); 282 if (!IS_ERR(dst)) { 283 skb_dst_set(skb, dst); 284 + ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0); 285 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 286 DCCP_INC_STATS(DCCP_MIB_OUTRSTS); 287 return;
+1 -1
net/ipv6/inet6_connection_sock.c
··· 176 /* Restore final destination back after routing done */ 177 fl6.daddr = sk->sk_v6_daddr; 178 179 - res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt), 180 np->tclass); 181 rcu_read_unlock(); 182 return res;
··· 176 /* Restore final destination back after routing done */ 177 fl6.daddr = sk->sk_v6_daddr; 178 179 + res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt), 180 np->tclass); 181 rcu_read_unlock(); 182 return res;
+2 -2
net/ipv6/ip6_output.c
··· 172 * which are using proper atomic operations or spinlocks. 173 */ 174 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, 175 - struct ipv6_txoptions *opt, int tclass) 176 { 177 struct net *net = sock_net(sk); 178 const struct ipv6_pinfo *np = inet6_sk(sk); ··· 240 241 skb->protocol = htons(ETH_P_IPV6); 242 skb->priority = sk->sk_priority; 243 - skb->mark = sk->sk_mark; 244 245 mtu = dst_mtu(dst); 246 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
··· 172 * which are using proper atomic operations or spinlocks. 173 */ 174 int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, 175 + __u32 mark, struct ipv6_txoptions *opt, int tclass) 176 { 177 struct net *net = sock_net(sk); 178 const struct ipv6_pinfo *np = inet6_sk(sk); ··· 240 241 skb->protocol = htons(ETH_P_IPV6); 242 skb->priority = sk->sk_priority; 243 + skb->mark = mark; 244 245 mtu = dst_mtu(dst); 246 if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
+2 -3
net/ipv6/tcp_ipv6.c
··· 469 opt = ireq->ipv6_opt; 470 if (!opt) 471 opt = rcu_dereference(np->opt); 472 - err = ip6_xmit(sk, skb, fl6, opt, np->tclass); 473 rcu_read_unlock(); 474 err = net_xmit_eval(err); 475 } ··· 840 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); 841 if (!IS_ERR(dst)) { 842 skb_dst_set(buff, dst); 843 - ctl_sk->sk_mark = fl6.flowi6_mark; 844 - ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); 845 TCP_INC_STATS(net, TCP_MIB_OUTSEGS); 846 if (rst) 847 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
··· 469 opt = ireq->ipv6_opt; 470 if (!opt) 471 opt = rcu_dereference(np->opt); 472 + err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass); 473 rcu_read_unlock(); 474 err = net_xmit_eval(err); 475 } ··· 840 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); 841 if (!IS_ERR(dst)) { 842 skb_dst_set(buff, dst); 843 + ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass); 844 TCP_INC_STATS(net, TCP_MIB_OUTSEGS); 845 if (rst) 846 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
+2 -1
net/sctp/ipv6.c
··· 222 SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); 223 224 rcu_read_lock(); 225 - res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass); 226 rcu_read_unlock(); 227 return res; 228 }
··· 222 SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); 223 224 rcu_read_lock(); 225 + res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt), 226 + np->tclass); 227 rcu_read_unlock(); 228 return res; 229 }