Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfilter: x_tables: Use par->net instead of computing from the passed net devices

Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>

authored by

Eric W. Biederman and committed by
Pablo Neira Ayuso
686c9b50 156c196f

+41 -43
+1 -1
net/bridge/netfilter/ebt_log.c
··· 180 180 { 181 181 const struct ebt_log_info *info = par->targinfo; 182 182 struct nf_loginfo li; 183 - struct net *net = dev_net(par->in ? par->in : par->out); 183 + struct net *net = par->net; 184 184 185 185 li.type = NF_LOG_TYPE_LOG; 186 186 li.u.log.level = info->loglevel;
+1 -1
net/bridge/netfilter/ebt_nflog.c
··· 24 24 { 25 25 const struct ebt_nflog_info *info = par->targinfo; 26 26 struct nf_loginfo li; 27 - struct net *net = dev_net(par->in ? par->in : par->out); 27 + struct net *net = par->net; 28 28 29 29 li.type = NF_LOG_TYPE_ULOG; 30 30 li.u.ulog.copy_len = info->len;
+1 -1
net/ipv4/netfilter/ipt_SYNPROXY.c
··· 258 258 synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par) 259 259 { 260 260 const struct xt_synproxy_info *info = par->targinfo; 261 - struct synproxy_net *snet = synproxy_pernet(dev_net(par->in)); 261 + struct synproxy_net *snet = synproxy_pernet(par->net); 262 262 struct synproxy_options opts = {}; 263 263 struct tcphdr *th, _th; 264 264
+2 -3
net/ipv4/netfilter/ipt_rpfilter.c
··· 32 32 return addr; 33 33 } 34 34 35 - static bool rpfilter_lookup_reverse(struct flowi4 *fl4, 35 + static bool rpfilter_lookup_reverse(struct net *net, struct flowi4 *fl4, 36 36 const struct net_device *dev, u8 flags) 37 37 { 38 38 struct fib_result res; 39 39 bool dev_match; 40 - struct net *net = dev_net(dev); 41 40 int ret __maybe_unused; 42 41 43 42 if (fib_lookup(net, fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE)) ··· 97 98 flow.flowi4_tos = RT_TOS(iph->tos); 98 99 flow.flowi4_scope = RT_SCOPE_UNIVERSE; 99 100 100 - return rpfilter_lookup_reverse(&flow, par->in, info->flags) ^ invert; 101 + return rpfilter_lookup_reverse(par->net, &flow, par->in, info->flags) ^ invert; 101 102 } 102 103 103 104 static int rpfilter_check(const struct xt_mtchk_param *par)
+1 -1
net/ipv6/netfilter/ip6t_REJECT.c
··· 39 39 reject_tg6(struct sk_buff *skb, const struct xt_action_param *par) 40 40 { 41 41 const struct ip6t_reject_info *reject = par->targinfo; 42 - struct net *net = dev_net((par->in != NULL) ? par->in : par->out); 42 + struct net *net = par->net; 43 43 44 44 switch (reject->with) { 45 45 case IP6T_ICMP6_NO_ROUTE:
+1 -1
net/ipv6/netfilter/ip6t_SYNPROXY.c
··· 275 275 synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par) 276 276 { 277 277 const struct xt_synproxy_info *info = par->targinfo; 278 - struct synproxy_net *snet = synproxy_pernet(dev_net(par->in)); 278 + struct synproxy_net *snet = synproxy_pernet(par->net); 279 279 struct synproxy_options opts = {}; 280 280 struct tcphdr *th, _th; 281 281
+3 -3
net/ipv6/netfilter/ip6t_rpfilter.c
··· 26 26 return addr_type & IPV6_ADDR_UNICAST; 27 27 } 28 28 29 - static bool rpfilter_lookup_reverse6(const struct sk_buff *skb, 29 + static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb, 30 30 const struct net_device *dev, u8 flags) 31 31 { 32 32 struct rt6_info *rt; ··· 53 53 lookup_flags |= RT6_LOOKUP_F_IFACE; 54 54 } 55 55 56 - rt = (void *) ip6_route_lookup(dev_net(dev), &fl6, lookup_flags); 56 + rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags); 57 57 if (rt->dst.error) 58 58 goto out; 59 59 ··· 93 93 if (unlikely(saddrtype == IPV6_ADDR_ANY)) 94 94 return true ^ invert; /* not routable: forward path will drop it */ 95 95 96 - return rpfilter_lookup_reverse6(skb, par->in, info->flags) ^ invert; 96 + return rpfilter_lookup_reverse6(par->net, skb, par->in, info->flags) ^ invert; 97 97 } 98 98 99 99 static int rpfilter_check(const struct xt_mtchk_param *par)
+3 -6
net/netfilter/ipset/ip_set_core.c
··· 519 519 ip_set_test(ip_set_id_t index, const struct sk_buff *skb, 520 520 const struct xt_action_param *par, struct ip_set_adt_opt *opt) 521 521 { 522 - struct ip_set *set = ip_set_rcu_get( 523 - dev_net(par->in ? par->in : par->out), index); 522 + struct ip_set *set = ip_set_rcu_get(par->net, index); 524 523 int ret = 0; 525 524 526 525 BUG_ON(!set); ··· 557 558 ip_set_add(ip_set_id_t index, const struct sk_buff *skb, 558 559 const struct xt_action_param *par, struct ip_set_adt_opt *opt) 559 560 { 560 - struct ip_set *set = ip_set_rcu_get( 561 - dev_net(par->in ? par->in : par->out), index); 561 + struct ip_set *set = ip_set_rcu_get(par->net, index); 562 562 int ret; 563 563 564 564 BUG_ON(!set); ··· 579 581 ip_set_del(ip_set_id_t index, const struct sk_buff *skb, 580 582 const struct xt_action_param *par, struct ip_set_adt_opt *opt) 581 583 { 582 - struct ip_set *set = ip_set_rcu_get( 583 - dev_net(par->in ? par->in : par->out), index); 584 + struct ip_set *set = ip_set_rcu_get(par->net, index); 584 585 int ret = 0; 585 586 586 587 BUG_ON(!set);
+1 -1
net/netfilter/xt_LOG.c
··· 33 33 { 34 34 const struct xt_log_info *loginfo = par->targinfo; 35 35 struct nf_loginfo li; 36 - struct net *net = dev_net(par->in ? par->in : par->out); 36 + struct net *net = par->net; 37 37 38 38 li.type = NF_LOG_TYPE_LOG; 39 39 li.u.log.level = loginfo->level;
+1 -1
net/netfilter/xt_NFLOG.c
··· 26 26 { 27 27 const struct xt_nflog_info *info = par->targinfo; 28 28 struct nf_loginfo li; 29 - struct net *net = dev_net(par->in ? par->in : par->out); 29 + struct net *net = par->net; 30 30 31 31 li.type = NF_LOG_TYPE_ULOG; 32 32 li.u.ulog.copy_len = info->len;
+1 -1
net/netfilter/xt_TCPMSS.c
··· 108 108 return -1; 109 109 110 110 if (info->mss == XT_TCPMSS_CLAMP_PMTU) { 111 - struct net *net = dev_net(par->in ? par->in : par->out); 111 + struct net *net = par->net; 112 112 unsigned int in_mtu = tcpmss_reverse_mtu(net, skb, family); 113 113 114 114 if (dst_mtu(skb_dst(skb)) <= minlen) {
+12 -12
net/netfilter/xt_TPROXY.c
··· 250 250 * no such listener is found, or NULL if the TCP header is incomplete. 251 251 */ 252 252 static struct sock * 253 - tproxy_handle_time_wait4(struct sk_buff *skb, __be32 laddr, __be16 lport, 254 - struct sock *sk) 253 + tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb, 254 + __be32 laddr, __be16 lport, struct sock *sk) 255 255 { 256 256 const struct iphdr *iph = ip_hdr(skb); 257 257 struct tcphdr _hdr, *hp; ··· 267 267 * to a listener socket if there's one */ 268 268 struct sock *sk2; 269 269 270 - sk2 = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol, 270 + sk2 = nf_tproxy_get_sock_v4(net, iph->protocol, 271 271 iph->saddr, laddr ? laddr : iph->daddr, 272 272 hp->source, lport ? lport : hp->dest, 273 273 skb->dev, NFT_LOOKUP_LISTENER); ··· 290 290 } 291 291 292 292 static unsigned int 293 - tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport, 293 + tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport, 294 294 u_int32_t mark_mask, u_int32_t mark_value) 295 295 { 296 296 const struct iphdr *iph = ip_hdr(skb); ··· 305 305 * addresses, this happens if the redirect already happened 306 306 * and the current packet belongs to an already established 307 307 * connection */ 308 - sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol, 308 + sk = nf_tproxy_get_sock_v4(net, iph->protocol, 309 309 iph->saddr, iph->daddr, 310 310 hp->source, hp->dest, 311 311 skb->dev, NFT_LOOKUP_ESTABLISHED); ··· 317 317 /* UDP has no TCP_TIME_WAIT state, so we never enter here */ 318 318 if (sk && sk->sk_state == TCP_TIME_WAIT) 319 319 /* reopening a TIME_WAIT connection needs special handling */ 320 - sk = tproxy_handle_time_wait4(skb, laddr, lport, sk); 320 + sk = tproxy_handle_time_wait4(net, skb, laddr, lport, sk); 321 321 else if (!sk) 322 322 /* no, there's no established connection, check if 323 323 * there's a listener on the redirected addr/port */ 324 - sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol, 324 + sk = nf_tproxy_get_sock_v4(net, iph->protocol, 325 325 iph->saddr, laddr, 326 326 hp->source, lport, 327 327 skb->dev, NFT_LOOKUP_LISTENER); ··· 351 351 { 352 352 const struct xt_tproxy_target_info *tgi = par->targinfo; 353 353 354 - return tproxy_tg4(skb, tgi->laddr, tgi->lport, tgi->mark_mask, tgi->mark_value); 354 + return tproxy_tg4(par->net, skb, tgi->laddr, tgi->lport, tgi->mark_mask, tgi->mark_value); 355 355 } 356 356 357 357 static unsigned int ··· 359 359 { 360 360 const struct xt_tproxy_target_info_v1 *tgi = par->targinfo; 361 361 362 - return tproxy_tg4(skb, tgi->laddr.ip, tgi->lport, tgi->mark_mask, tgi->mark_value); 362 + return tproxy_tg4(par->net, skb, tgi->laddr.ip, tgi->lport, tgi->mark_mask, tgi->mark_value); 363 363 } 364 364 365 365 #ifdef XT_TPROXY_HAVE_IPV6 ··· 429 429 * to a listener socket if there's one */ 430 430 struct sock *sk2; 431 431 432 - sk2 = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, 432 + sk2 = nf_tproxy_get_sock_v6(par->net, tproto, 433 433 &iph->saddr, 434 434 tproxy_laddr6(skb, &tgi->laddr.in6, &iph->daddr), 435 435 hp->source, ··· 472 472 * addresses, this happens if the redirect already happened 473 473 * and the current packet belongs to an already established 474 474 * connection */ 475 - sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, 475 + sk = nf_tproxy_get_sock_v6(par->net, tproto, 476 476 &iph->saddr, &iph->daddr, 477 477 hp->source, hp->dest, 478 478 par->in, NFT_LOOKUP_ESTABLISHED); ··· 487 487 else if (!sk) 488 488 /* no there's no established connection, check if 489 489 * there's a listener on the redirected addr/port */ 490 - sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto, 490 + sk = nf_tproxy_get_sock_v6(par->net, tproto, 491 491 &iph->saddr, laddr, 492 492 hp->source, lport, 493 493 par->in, NFT_LOOKUP_LISTENER);
+2 -2
net/netfilter/xt_addrtype.c
··· 125 125 static bool 126 126 addrtype_mt_v0(const struct sk_buff *skb, struct xt_action_param *par) 127 127 { 128 - struct net *net = dev_net(par->in ? par->in : par->out); 128 + struct net *net = par->net; 129 129 const struct xt_addrtype_info *info = par->matchinfo; 130 130 const struct iphdr *iph = ip_hdr(skb); 131 131 bool ret = true; ··· 143 143 static bool 144 144 addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par) 145 145 { 146 - struct net *net = dev_net(par->in ? par->in : par->out); 146 + struct net *net = par->net; 147 147 const struct xt_addrtype_info_v1 *info = par->matchinfo; 148 148 const struct iphdr *iph; 149 149 const struct net_device *dev = NULL;
+1 -1
net/netfilter/xt_connlimit.c
··· 317 317 static bool 318 318 connlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) 319 319 { 320 - struct net *net = dev_net(par->in ? par->in : par->out); 320 + struct net *net = par->net; 321 321 const struct xt_connlimit_info *info = par->matchinfo; 322 322 union nf_inet_addr addr; 323 323 struct nf_conntrack_tuple tuple;
+1 -1
net/netfilter/xt_osf.c
··· 200 200 unsigned char opts[MAX_IPOPTLEN]; 201 201 const struct xt_osf_finger *kf; 202 202 const struct xt_osf_user_finger *f; 203 - struct net *net = dev_net(p->in ? p->in : p->out); 203 + struct net *net = p->net; 204 204 205 205 if (!info) 206 206 return false;
+1 -1
net/netfilter/xt_recent.c
··· 237 237 static bool 238 238 recent_mt(const struct sk_buff *skb, struct xt_action_param *par) 239 239 { 240 - struct net *net = dev_net(par->in ? par->in : par->out); 240 + struct net *net = par->net; 241 241 struct recent_net *recent_net = recent_pernet(net); 242 242 const struct xt_recent_mtinfo_v1 *info = par->matchinfo; 243 243 struct recent_table *t;
+8 -6
net/netfilter/xt_socket.c
··· 143 143 } 144 144 } 145 145 146 - static struct sock *xt_socket_lookup_slow_v4(const struct sk_buff *skb, 146 + static struct sock *xt_socket_lookup_slow_v4(struct net *net, 147 + const struct sk_buff *skb, 147 148 const struct net_device *indev) 148 149 { 149 150 const struct iphdr *iph = ip_hdr(skb); ··· 198 197 } 199 198 #endif 200 199 201 - return xt_socket_get_sock_v4(dev_net(skb->dev), protocol, saddr, daddr, 200 + return xt_socket_get_sock_v4(net, protocol, saddr, daddr, 202 201 sport, dport, indev); 203 202 } 204 203 ··· 210 209 struct sock *sk = skb->sk; 211 210 212 211 if (!sk) 213 - sk = xt_socket_lookup_slow_v4(skb, par->in); 212 + sk = xt_socket_lookup_slow_v4(par->net, skb, par->in); 214 213 if (sk) { 215 214 bool wildcard; 216 215 bool transparent = true; ··· 336 335 return NULL; 337 336 } 338 337 339 - static struct sock *xt_socket_lookup_slow_v6(const struct sk_buff *skb, 338 + static struct sock *xt_socket_lookup_slow_v6(struct net *net, 339 + const struct sk_buff *skb, 340 340 const struct net_device *indev) 341 341 { 342 342 __be16 uninitialized_var(dport), uninitialized_var(sport); ··· 373 371 return NULL; 374 372 } 375 373 376 - return xt_socket_get_sock_v6(dev_net(skb->dev), tproto, saddr, daddr, 374 + return xt_socket_get_sock_v6(net, tproto, saddr, daddr, 377 375 sport, dport, indev); 378 376 } 379 377 ··· 385 383 struct sock *sk = skb->sk; 386 384 387 385 if (!sk) 388 - sk = xt_socket_lookup_slow_v6(skb, par->in); 386 + sk = xt_socket_lookup_slow_v6(par->net, skb, par->in); 389 387 if (sk) { 390 388 bool wildcard; 391 389 bool transparent = true;