Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
Netfilter fixes for net

The following patchset contains Netfilter fixes for net:

1) Missing netns context in arp_tables, from Florian Westphal.

2) Underflow in flowtable reference counter, from wenxu.

3) Fix incorrect ethernet destination address in flowtable offload,
from wenxu.

4) Check for status of neighbour entry, from wenxu.

5) Fix NAT port mangling, from wenxu.

6) Unbind callbacks from destroy path to cleanup hardware properly
on flowtable removal.

7) Fix missing casting statistics timestamp, add nf_flowtable_time_stamp
and use it.

8) NULL pointer exception when timeout argument is null in conntrack
dccp and sctp protocol helpers, from Florian Westphal.

9) Possible nul-dereference in ipset with IPSET_ATTR_LINENO, also from
Florian.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+75 -39
+6
include/net/netfilter/nf_flow_table.h
··· 106 106 }; 107 107 108 108 #define NF_FLOW_TIMEOUT (30 * HZ) 109 + #define nf_flowtable_time_stamp (u32)jiffies 110 + 111 + static inline __s32 nf_flow_timeout_delta(unsigned int timeout) 112 + { 113 + return (__s32)(timeout - nf_flowtable_time_stamp); 114 + } 109 115 110 116 struct nf_flow_route { 111 117 struct {
+16 -11
net/ipv4/netfilter/arp_tables.c
··· 384 384 return 1; 385 385 } 386 386 387 - static inline int check_target(struct arpt_entry *e, const char *name) 387 + static int check_target(struct arpt_entry *e, struct net *net, const char *name) 388 388 { 389 389 struct xt_entry_target *t = arpt_get_target(e); 390 390 struct xt_tgchk_param par = { 391 + .net = net, 391 392 .table = name, 392 393 .entryinfo = e, 393 394 .target = t->u.kernel.target, ··· 400 399 return xt_check_target(&par, t->u.target_size - sizeof(*t), 0, false); 401 400 } 402 401 403 - static inline int 404 - find_check_entry(struct arpt_entry *e, const char *name, unsigned int size, 402 + static int 403 + find_check_entry(struct arpt_entry *e, struct net *net, const char *name, 404 + unsigned int size, 405 405 struct xt_percpu_counter_alloc_state *alloc_state) 406 406 { 407 407 struct xt_entry_target *t; ··· 421 419 } 422 420 t->u.kernel.target = target; 423 421 424 - ret = check_target(e, name); 422 + ret = check_target(e, net, name); 425 423 if (ret) 426 424 goto err; 427 425 return 0; ··· 514 512 /* Checks and translates the user-supplied table segment (held in 515 513 * newinfo). 516 514 */ 517 - static int translate_table(struct xt_table_info *newinfo, void *entry0, 515 + static int translate_table(struct net *net, 516 + struct xt_table_info *newinfo, 517 + void *entry0, 518 518 const struct arpt_replace *repl) 519 519 { 520 520 struct xt_percpu_counter_alloc_state alloc_state = { 0 }; ··· 573 569 /* Finally, each sanity check must pass */ 574 570 i = 0; 575 571 xt_entry_foreach(iter, entry0, newinfo->size) { 576 - ret = find_check_entry(iter, repl->name, repl->size, 572 + ret = find_check_entry(iter, net, repl->name, repl->size, 577 573 &alloc_state); 578 574 if (ret != 0) 579 575 break; ··· 978 974 goto free_newinfo; 979 975 } 980 976 981 - ret = translate_table(newinfo, loc_cpu_entry, &tmp); 977 + ret = translate_table(net, newinfo, loc_cpu_entry, &tmp); 982 978 if (ret != 0) 983 979 goto free_newinfo; 984 980 ··· 1153 1149 } 1154 1150 } 1155 1151 1156 - static int translate_compat_table(struct xt_table_info **pinfo, 1152 + static int translate_compat_table(struct net *net, 1153 + struct xt_table_info **pinfo, 1157 1154 void **pentry0, 1158 1155 const struct compat_arpt_replace *compatr) 1159 1156 { ··· 1222 1217 repl.num_counters = 0; 1223 1218 repl.counters = NULL; 1224 1219 repl.size = newinfo->size; 1225 - ret = translate_table(newinfo, entry1, &repl); 1220 + ret = translate_table(net, newinfo, entry1, &repl); 1226 1221 if (ret) 1227 1222 goto free_newinfo; 1228 1223 ··· 1275 1270 goto free_newinfo; 1276 1271 } 1277 1272 1278 - ret = translate_compat_table(&newinfo, &loc_cpu_entry, &tmp); 1273 + ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp); 1279 1274 if (ret != 0) 1280 1275 goto free_newinfo; 1281 1276 ··· 1551 1546 loc_cpu_entry = newinfo->entries; 1552 1547 memcpy(loc_cpu_entry, repl->entries, repl->size); 1553 1548 1554 - ret = translate_table(newinfo, loc_cpu_entry, repl); 1549 + ret = translate_table(net, newinfo, loc_cpu_entry, repl); 1555 1550 if (ret != 0) 1556 1551 goto out_free; 1557 1552
+2 -1
net/netfilter/ipset/ip_set_core.c
··· 1848 1848 struct ip_set *set; 1849 1849 struct nlattr *tb[IPSET_ATTR_ADT_MAX + 1] = {}; 1850 1850 int ret = 0; 1851 + u32 lineno; 1851 1852 1852 1853 if (unlikely(protocol_min_failed(attr) || 1853 1854 !attr[IPSET_ATTR_SETNAME] || ··· 1865 1864 return -IPSET_ERR_PROTOCOL; 1866 1865 1867 1866 rcu_read_lock_bh(); 1868 - ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0, 0); 1867 + ret = set->variant->uadt(set, tb, IPSET_TEST, &lineno, 0, 0); 1869 1868 rcu_read_unlock_bh(); 1870 1869 /* Userspace can't trigger element to be re-added */ 1871 1870 if (ret == -EAGAIN)
+3
net/netfilter/nf_conntrack_proto_dccp.c
··· 677 677 unsigned int *timeouts = data; 678 678 int i; 679 679 680 + if (!timeouts) 681 + timeouts = dn->dccp_timeout; 682 + 680 683 /* set default DCCP timeouts. */ 681 684 for (i=0; i<CT_DCCP_MAX; i++) 682 685 timeouts[i] = dn->dccp_timeout[i];
+3
net/netfilter/nf_conntrack_proto_sctp.c
··· 594 594 struct nf_sctp_net *sn = nf_sctp_pernet(net); 595 595 int i; 596 596 597 + if (!timeouts) 598 + timeouts = sn->timeouts; 599 + 597 600 /* set default SCTP timeouts. */ 598 601 for (i=0; i<SCTP_CONNTRACK_MAX; i++) 599 602 timeouts[i] = sn->timeouts[i];
+1 -6
net/netfilter/nf_flow_table_core.c
··· 134 134 #define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ) 135 135 #define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ) 136 136 137 - static inline __s32 nf_flow_timeout_delta(unsigned int timeout) 138 - { 139 - return (__s32)(timeout - (u32)jiffies); 140 - } 141 - 142 137 static void flow_offload_fixup_ct_timeout(struct nf_conn *ct) 143 138 { 144 139 const struct nf_conntrack_l4proto *l4proto; ··· 227 232 { 228 233 int err; 229 234 230 - flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; 235 + flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT; 231 236 232 237 err = rhashtable_insert_fast(&flow_table->rhashtable, 233 238 &flow->tuplehash[0].node,
+2 -2
net/netfilter/nf_flow_table_ip.c
··· 280 280 if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0) 281 281 return NF_DROP; 282 282 283 - flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; 283 + flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT; 284 284 iph = ip_hdr(skb); 285 285 ip_decrease_ttl(iph); 286 286 skb->tstamp = 0; ··· 509 509 if (nf_flow_nat_ipv6(flow, skb, dir) < 0) 510 510 return NF_DROP; 511 511 512 - flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; 512 + flow->timeout = nf_flowtable_time_stamp + NF_FLOW_TIMEOUT; 513 513 ip6h = ipv6_hdr(skb); 514 514 ip6h->hop_limit--; 515 515 skb->tstamp = 0;
+36 -14
net/netfilter/nf_flow_table_offload.c
··· 166 166 enum flow_offload_tuple_dir dir, 167 167 struct nf_flow_rule *flow_rule) 168 168 { 169 - const struct flow_offload_tuple *tuple = &flow->tuplehash[dir].tuple; 170 169 struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule); 171 170 struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule); 171 + const void *daddr = &flow->tuplehash[!dir].tuple.src_v4; 172 + const struct dst_entry *dst_cache; 173 + unsigned char ha[ETH_ALEN]; 172 174 struct neighbour *n; 173 175 u32 mask, val; 176 + u8 nud_state; 174 177 u16 val16; 175 178 176 - n = dst_neigh_lookup(tuple->dst_cache, &tuple->dst_v4); 179 + dst_cache = flow->tuplehash[dir].tuple.dst_cache; 180 + n = dst_neigh_lookup(dst_cache, daddr); 177 181 if (!n) 178 182 return -ENOENT; 179 183 184 + read_lock_bh(&n->lock); 185 + nud_state = n->nud_state; 186 + ether_addr_copy(ha, n->ha); 187 + read_unlock_bh(&n->lock); 188 + 189 + if (!(nud_state & NUD_VALID)) { 190 + neigh_release(n); 191 + return -ENOENT; 192 + } 193 + 180 194 mask = ~0xffffffff; 181 - memcpy(&val, n->ha, 4); 195 + memcpy(&val, ha, 4); 182 196 flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0, 183 197 &val, &mask); 184 198 185 199 mask = ~0x0000ffff; 186 - memcpy(&val16, n->ha + 4, 2); 200 + memcpy(&val16, ha + 4, 2); 187 201 val = val16; 188 202 flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4, 189 203 &val, &mask); ··· 349 335 struct nf_flow_rule *flow_rule) 350 336 { 351 337 struct flow_action_entry *entry = flow_action_entry_next(flow_rule); 352 - u32 mask = ~htonl(0xffff0000), port; 338 + u32 mask, port; 353 339 u32 offset; 354 340 355 341 switch (dir) { 356 342 case FLOW_OFFLOAD_DIR_ORIGINAL: 357 343 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port); 358 344 offset = 0; /* offsetof(struct tcphdr, source); */ 345 + port = htonl(port << 16); 346 + mask = ~htonl(0xffff0000); 359 347 break; 360 348 case FLOW_OFFLOAD_DIR_REPLY: 361 349 port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port); 362 350 offset = 0; /* offsetof(struct tcphdr, dest); */ 351 + port = htonl(port); 352 + mask = ~htonl(0xffff); 363 353 break; 364 354 default: 365 355 return; 366 356 } 367 - port = htonl(port << 16); 357 + 368 358 flow_offload_mangle(entry, flow_offload_l4proto(flow), offset, 369 359 &port, &mask); 370 360 } ··· 379 361 struct nf_flow_rule *flow_rule) 380 362 { 381 363 struct flow_action_entry *entry = flow_action_entry_next(flow_rule); 382 - u32 mask = ~htonl(0xffff), port; 364 + u32 mask, port; 383 365 u32 offset; 384 366 385 367 switch (dir) { 386 368 case FLOW_OFFLOAD_DIR_ORIGINAL: 387 - port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port); 388 - offset = 0; /* offsetof(struct tcphdr, source); */ 369 + port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port); 370 + offset = 0; /* offsetof(struct tcphdr, dest); */ 371 + port = htonl(port); 372 + mask = ~htonl(0xffff); 389 373 break; 390 374 case FLOW_OFFLOAD_DIR_REPLY: 391 - port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port); 392 - offset = 0; /* offsetof(struct tcphdr, dest); */ 375 + port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port); 376 + offset = 0; /* offsetof(struct tcphdr, source); */ 377 + port = htonl(port << 16); 378 + mask = ~htonl(0xffff0000); 393 379 break; 394 380 default: 395 381 return; 396 382 } 397 - port = htonl(port); 383 + 398 384 flow_offload_mangle(entry, flow_offload_l4proto(flow), offset, 399 385 &port, &mask); 400 386 } ··· 781 759 struct flow_offload *flow) 782 760 { 783 761 struct flow_offload_work *offload; 784 - s64 delta; 762 + __s32 delta; 785 763 786 - delta = flow->timeout - jiffies; 764 + delta = nf_flow_timeout_delta(flow->timeout); 787 765 if ((delta >= (9 * NF_FLOW_TIMEOUT) / 10) || 788 766 flow->flags & FLOW_OFFLOAD_HW_DYING) 789 767 return;
+6 -2
net/netfilter/nf_tables_api.c
··· 5984 5984 return ERR_PTR(-ENOENT); 5985 5985 } 5986 5986 5987 + /* Only called from error and netdev event paths. */ 5987 5988 static void nft_unregister_flowtable_hook(struct net *net, 5988 5989 struct nft_flowtable *flowtable, 5989 5990 struct nft_hook *hook) ··· 6000 5999 struct nft_hook *hook; 6001 6000 6002 6001 list_for_each_entry(hook, &flowtable->hook_list, list) 6003 - nft_unregister_flowtable_hook(net, flowtable, hook); 6002 + nf_unregister_net_hook(net, &hook->ops); 6004 6003 } 6005 6004 6006 6005 static int nft_register_flowtable_net_hooks(struct net *net, ··· 6449 6448 { 6450 6449 struct nft_hook *hook, *next; 6451 6450 6451 + flowtable->data.type->free(&flowtable->data); 6452 6452 list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) { 6453 + flowtable->data.type->setup(&flowtable->data, hook->ops.dev, 6454 + FLOW_BLOCK_UNBIND); 6453 6455 list_del_rcu(&hook->list); 6454 6456 kfree(hook); 6455 6457 } 6456 6458 kfree(flowtable->name); 6457 - flowtable->data.type->free(&flowtable->data); 6458 6459 module_put(flowtable->data.type->owner); 6459 6460 kfree(flowtable); 6460 6461 } ··· 6500 6497 if (hook->ops.dev != dev) 6501 6498 continue; 6502 6499 6500 + /* flow_offload_netdev_event() cleans up entries for us. */ 6503 6501 nft_unregister_flowtable_hook(dev_net(dev), flowtable, hook); 6504 6502 list_del_rcu(&hook->list); 6505 6503 kfree_rcu(hook, rcu);
-3
net/netfilter/nft_flow_offload.c
··· 200 200 static void nft_flow_offload_destroy(const struct nft_ctx *ctx, 201 201 const struct nft_expr *expr) 202 202 { 203 - struct nft_flow_offload *priv = nft_expr_priv(expr); 204 - 205 - priv->flowtable->use--; 206 203 nf_ct_netns_put(ctx->net, ctx->family); 207 204 } 208 205