Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
Netfilter/IPVS fixes for net

The following patchset contains Netfilter fixes for your net tree,
they are:

1) Put back reference on CLUSTERIP configuration structure from the
error path, patch from Florian Westphal.

2) Put reference on CLUSTERIP configuration instead of freeing it,
another cpu may still be walking over it, also from Florian.

3) Refetch pointer to IPv6 header from nf_nat_ipv6_manip_pkt() given
packet manipulation may reallocation the skbuff header, from Florian.

4) Missing match size sanity checks in ebt_among, from Florian.

5) Convert BUG_ON to WARN_ON in ebtables, from Florian.

6) Sanity check userspace offsets from ebtables kernel, from Florian.

7) Missing checksum replace call in flowtable IPv4 DNAT, from Felix
Fietkau.

8) Bump the right stats on checksum error from bridge netfilter,
from Taehee Yoo.

9) Unset interface flag in IPv6 fib lookups otherwise we get
misleading routing lookup results, from Florian.

10) Missing sk_to_full_sk() in ip6_route_me_harder() from Eric Dumazet.

11) Don't allow devices to be part of multiple flowtables at the same
time, this may break setups.

12) Missing netlink attribute validation in flowtable deletion.

13) Wrong array index in nf_unregister_net_hook() call from error path
in flowtable addition path.

14) Fix FTP IPVS helper when NAT mangling is in place, patch from
Julian Anastasov.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+98 -39
+3 -1
net/bridge/br_netfilter_hooks.c
··· 214 214 215 215 iph = ip_hdr(skb); 216 216 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 217 - goto inhdr_error; 217 + goto csum_error; 218 218 219 219 len = ntohs(iph->tot_len); 220 220 if (skb->len < len) { ··· 236 236 */ 237 237 return 0; 238 238 239 + csum_error: 240 + __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS); 239 241 inhdr_error: 240 242 __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS); 241 243 drop:
+19 -2
net/bridge/netfilter/ebt_among.c
··· 172 172 return true; 173 173 } 174 174 175 + static bool poolsize_invalid(const struct ebt_mac_wormhash *w) 176 + { 177 + return w && w->poolsize >= (INT_MAX / sizeof(struct ebt_mac_wormhash_tuple)); 178 + } 179 + 175 180 static int ebt_among_mt_check(const struct xt_mtchk_param *par) 176 181 { 177 182 const struct ebt_among_info *info = par->matchinfo; 178 183 const struct ebt_entry_match *em = 179 184 container_of(par->matchinfo, const struct ebt_entry_match, data); 180 - int expected_length = sizeof(struct ebt_among_info); 185 + unsigned int expected_length = sizeof(struct ebt_among_info); 181 186 const struct ebt_mac_wormhash *wh_dst, *wh_src; 182 187 int err; 183 188 189 + if (expected_length > em->match_size) 190 + return -EINVAL; 191 + 184 192 wh_dst = ebt_among_wh_dst(info); 185 - wh_src = ebt_among_wh_src(info); 193 + if (poolsize_invalid(wh_dst)) 194 + return -EINVAL; 195 + 186 196 expected_length += ebt_mac_wormhash_size(wh_dst); 197 + if (expected_length > em->match_size) 198 + return -EINVAL; 199 + 200 + wh_src = ebt_among_wh_src(info); 201 + if (poolsize_invalid(wh_src)) 202 + return -EINVAL; 203 + 187 204 expected_length += ebt_mac_wormhash_size(wh_src); 188 205 189 206 if (em->match_size != EBT_ALIGN(expected_length)) {
+30 -10
net/bridge/netfilter/ebtables.c
··· 1641 1641 int off = ebt_compat_match_offset(match, m->match_size); 1642 1642 compat_uint_t msize = m->match_size - off; 1643 1643 1644 - BUG_ON(off >= m->match_size); 1644 + if (WARN_ON(off >= m->match_size)) 1645 + return -EINVAL; 1645 1646 1646 1647 if (copy_to_user(cm->u.name, match->name, 1647 1648 strlen(match->name) + 1) || put_user(msize, &cm->match_size)) ··· 1672 1671 int off = xt_compat_target_offset(target); 1673 1672 compat_uint_t tsize = t->target_size - off; 1674 1673 1675 - BUG_ON(off >= t->target_size); 1674 + if (WARN_ON(off >= t->target_size)) 1675 + return -EINVAL; 1676 1676 1677 1677 if (copy_to_user(cm->u.name, target->name, 1678 1678 strlen(target->name) + 1) || put_user(tsize, &cm->match_size)) ··· 1904 1902 if (state->buf_kern_start == NULL) 1905 1903 goto count_only; 1906 1904 1907 - BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len); 1905 + if (WARN_ON(state->buf_kern_offset + sz > state->buf_kern_len)) 1906 + return -EINVAL; 1908 1907 1909 1908 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz); 1910 1909 ··· 1918 1915 { 1919 1916 char *b = state->buf_kern_start; 1920 1917 1921 - BUG_ON(b && state->buf_kern_offset > state->buf_kern_len); 1918 + if (WARN_ON(b && state->buf_kern_offset > state->buf_kern_len)) 1919 + return -EINVAL; 1922 1920 1923 1921 if (b != NULL && sz > 0) 1924 1922 memset(b + state->buf_kern_offset, 0, sz); ··· 1996 1992 pad = XT_ALIGN(size_kern) - size_kern; 1997 1993 1998 1994 if (pad > 0 && dst) { 1999 - BUG_ON(state->buf_kern_len <= pad); 2000 - BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad); 1995 + if (WARN_ON(state->buf_kern_len <= pad)) 1996 + return -EINVAL; 1997 + if (WARN_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad)) 1998 + return -EINVAL; 2001 1999 memset(dst + size_kern, 0, pad); 2002 2000 } 2003 2001 return off + match_size; ··· 2049 2043 if (ret < 0) 2050 2044 return ret; 2051 2045 2052 - BUG_ON(ret < match32->match_size); 2046 + if (WARN_ON(ret < match32->match_size)) 2047 + return -EINVAL; 2053 2048 growth += ret - match32->match_size; 2054 2049 growth += ebt_compat_entry_padsize(); 2055 2050 ··· 2060 2053 if (match_kern) 2061 2054 match_kern->match_size = ret; 2062 2055 2063 - WARN_ON(type == EBT_COMPAT_TARGET && size_left); 2056 + if (WARN_ON(type == EBT_COMPAT_TARGET && size_left)) 2057 + return -EINVAL; 2058 + 2064 2059 match32 = (struct compat_ebt_entry_mwt *) buf; 2065 2060 } 2066 2061 ··· 2118 2109 * 2119 2110 * offsets are relative to beginning of struct ebt_entry (i.e., 0). 2120 2111 */ 2112 + for (i = 0; i < 4 ; ++i) { 2113 + if (offsets[i] >= *total) 2114 + return -EINVAL; 2115 + if (i == 0) 2116 + continue; 2117 + if (offsets[i-1] > offsets[i]) 2118 + return -EINVAL; 2119 + } 2120 + 2121 2121 for (i = 0, j = 1 ; j < 4 ; j++, i++) { 2122 2122 struct compat_ebt_entry_mwt *match32; 2123 2123 unsigned int size; ··· 2158 2140 2159 2141 startoff = state->buf_user_offset - startoff; 2160 2142 2161 - BUG_ON(*total < startoff); 2143 + if (WARN_ON(*total < startoff)) 2144 + return -EINVAL; 2162 2145 *total -= startoff; 2163 2146 return 0; 2164 2147 } ··· 2286 2267 state.buf_kern_len = size64; 2287 2268 2288 2269 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2289 - BUG_ON(ret < 0); /* parses same data again */ 2270 + if (WARN_ON(ret < 0)) 2271 + goto out_unlock; 2290 2272 2291 2273 vfree(entries_tmp); 2292 2274 tmp.entries_size = size64;
+10 -5
net/ipv4/netfilter/ipt_CLUSTERIP.c
··· 232 232 c->hash_mode = i->hash_mode; 233 233 c->hash_initval = i->hash_initval; 234 234 refcount_set(&c->refcount, 1); 235 - refcount_set(&c->entries, 1); 236 235 237 236 spin_lock_bh(&cn->lock); 238 237 if (__clusterip_config_find(net, ip)) { ··· 262 263 263 264 c->notifier.notifier_call = clusterip_netdev_event; 264 265 err = register_netdevice_notifier(&c->notifier); 265 - if (!err) 266 + if (!err) { 267 + refcount_set(&c->entries, 1); 266 268 return c; 269 + } 267 270 268 271 #ifdef CONFIG_PROC_FS 269 272 proc_remove(c->pde); ··· 274 273 spin_lock_bh(&cn->lock); 275 274 list_del_rcu(&c->list); 276 275 spin_unlock_bh(&cn->lock); 277 - kfree(c); 276 + clusterip_config_put(c); 278 277 279 278 return ERR_PTR(err); 280 279 } ··· 497 496 return PTR_ERR(config); 498 497 } 499 498 } 500 - cipinfo->config = config; 501 499 502 500 ret = nf_ct_netns_get(par->net, par->family); 503 - if (ret < 0) 501 + if (ret < 0) { 504 502 pr_info("cannot load conntrack support for proto=%u\n", 505 503 par->family); 504 + clusterip_config_entry_put(par->net, config); 505 + clusterip_config_put(config); 506 + return ret; 507 + } 506 508 507 509 if (!par->net->xt.clusterip_deprecated_warning) { 508 510 pr_info("ipt_CLUSTERIP is deprecated and it will removed soon, " ··· 513 509 par->net->xt.clusterip_deprecated_warning = true; 514 510 } 515 511 512 + cipinfo->config = config; 516 513 return ret; 517 514 } 518 515
+1
net/ipv4/netfilter/nf_flow_table_ipv4.c
··· 111 111 default: 112 112 return -1; 113 113 } 114 + csum_replace4(&iph->check, addr, new_addr); 114 115 115 116 return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); 116 117 }
+5 -4
net/ipv6/netfilter.c
··· 21 21 int ip6_route_me_harder(struct net *net, struct sk_buff *skb) 22 22 { 23 23 const struct ipv6hdr *iph = ipv6_hdr(skb); 24 + struct sock *sk = sk_to_full_sk(skb->sk); 24 25 unsigned int hh_len; 25 26 struct dst_entry *dst; 26 27 struct flowi6 fl6 = { 27 - .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, 28 + .flowi6_oif = sk ? sk->sk_bound_dev_if : 0, 28 29 .flowi6_mark = skb->mark, 29 - .flowi6_uid = sock_net_uid(net, skb->sk), 30 + .flowi6_uid = sock_net_uid(net, sk), 30 31 .daddr = iph->daddr, 31 32 .saddr = iph->saddr, 32 33 }; 33 34 int err; 34 35 35 - dst = ip6_route_output(net, skb->sk, &fl6); 36 + dst = ip6_route_output(net, sk, &fl6); 36 37 err = dst->error; 37 38 if (err) { 38 39 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); ··· 51 50 if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && 52 51 xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) { 53 52 skb_dst_set(skb, NULL); 54 - dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), skb->sk, 0); 53 + dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), sk, 0); 55 54 if (IS_ERR(dst)) 56 55 return PTR_ERR(dst); 57 56 skb_dst_set(skb, dst);
-4
net/ipv6/netfilter/ip6t_rpfilter.c
··· 48 48 } 49 49 50 50 fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0; 51 - if ((flags & XT_RPFILTER_LOOSE) == 0) { 52 - fl6.flowi6_oif = dev->ifindex; 53 - lookup_flags |= RT6_LOOKUP_F_IFACE; 54 - } 55 51 56 52 rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags); 57 53 if (rt->dst.error)
+4
net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
··· 99 99 !l4proto->manip_pkt(skb, &nf_nat_l3proto_ipv6, iphdroff, hdroff, 100 100 target, maniptype)) 101 101 return false; 102 + 103 + /* must reload, offset might have changed */ 104 + ipv6h = (void *)skb->data + iphdroff; 105 + 102 106 manip_addr: 103 107 if (maniptype == NF_NAT_MANIP_SRC) 104 108 ipv6h->saddr = target->src.u3.in6;
+2 -10
net/ipv6/netfilter/nft_fib_ipv6.c
··· 180 180 } 181 181 182 182 *dest = 0; 183 - again: 184 183 rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, lookup_flags); 185 184 if (rt->dst.error) 186 185 goto put_rt_err; ··· 188 189 if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL)) 189 190 goto put_rt_err; 190 191 191 - if (oif && oif != rt->rt6i_idev->dev) { 192 - /* multipath route? Try again with F_IFACE */ 193 - if ((lookup_flags & RT6_LOOKUP_F_IFACE) == 0) { 194 - lookup_flags |= RT6_LOOKUP_F_IFACE; 195 - fl6.flowi6_oif = oif->ifindex; 196 - ip6_rt_put(rt); 197 - goto again; 198 - } 199 - } 192 + if (oif && oif != rt->rt6i_idev->dev) 193 + goto put_rt_err; 200 194 201 195 switch (priv->result) { 202 196 case NFT_FIB_RESULT_OIF:
+1 -1
net/netfilter/ipvs/ip_vs_ftp.c
··· 260 260 buf_len = strlen(buf); 261 261 262 262 ct = nf_ct_get(skb, &ctinfo); 263 - if (ct && (ct->status & IPS_NAT_MASK)) { 263 + if (ct) { 264 264 bool mangled; 265 265 266 266 /* If mangling fails this function will return 0
+23 -2
net/netfilter/nf_tables_api.c
··· 5037 5037 { 5038 5038 const struct nfgenmsg *nfmsg = nlmsg_data(nlh); 5039 5039 const struct nf_flowtable_type *type; 5040 + struct nft_flowtable *flowtable, *ft; 5040 5041 u8 genmask = nft_genmask_next(net); 5041 5042 int family = nfmsg->nfgen_family; 5042 - struct nft_flowtable *flowtable; 5043 5043 struct nft_table *table; 5044 5044 struct nft_ctx ctx; 5045 5045 int err, i, k; ··· 5099 5099 goto err3; 5100 5100 5101 5101 for (i = 0; i < flowtable->ops_len; i++) { 5102 + if (!flowtable->ops[i].dev) 5103 + continue; 5104 + 5105 + list_for_each_entry(ft, &table->flowtables, list) { 5106 + for (k = 0; k < ft->ops_len; k++) { 5107 + if (!ft->ops[k].dev) 5108 + continue; 5109 + 5110 + if (flowtable->ops[i].dev == ft->ops[k].dev && 5111 + flowtable->ops[i].pf == ft->ops[k].pf) { 5112 + err = -EBUSY; 5113 + goto err4; 5114 + } 5115 + } 5116 + } 5117 + 5102 5118 err = nf_register_net_hook(net, &flowtable->ops[i]); 5103 5119 if (err < 0) 5104 5120 goto err4; ··· 5136 5120 i = flowtable->ops_len; 5137 5121 err4: 5138 5122 for (k = i - 1; k >= 0; k--) 5139 - nf_unregister_net_hook(net, &flowtable->ops[i]); 5123 + nf_unregister_net_hook(net, &flowtable->ops[k]); 5140 5124 5141 5125 kfree(flowtable->ops); 5142 5126 err3: ··· 5160 5144 struct nft_flowtable *flowtable; 5161 5145 struct nft_table *table; 5162 5146 struct nft_ctx ctx; 5147 + 5148 + if (!nla[NFTA_FLOWTABLE_TABLE] || 5149 + (!nla[NFTA_FLOWTABLE_NAME] && 5150 + !nla[NFTA_FLOWTABLE_HANDLE])) 5151 + return -EINVAL; 5163 5152 5164 5153 table = nf_tables_table_lookup(net, nla[NFTA_FLOWTABLE_TABLE], 5165 5154 family, genmask);