Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfilter: nf_conntrack: use is_vmalloc_addr()

Use is_vmalloc_addr() in nf_ct_free_hashtable() and get rid of
the vmalloc flags to indicate that a hash table has been allocated
using vmalloc().

Signed-off-by: Patrick McHardy <kaber@trash.net>

+19 -39
+2 -2
include/net/netfilter/nf_conntrack.h
··· 202 202 * Allocate a hashtable of hlist_head (if nulls == 0), 203 203 * or hlist_nulls_head (if nulls == 1) 204 204 */ 205 - extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls); 205 + extern void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls); 206 206 207 - extern void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size); 207 + extern void nf_ct_free_hashtable(void *hash, unsigned int size); 208 208 209 209 extern struct nf_conntrack_tuple_hash * 210 210 __nf_conntrack_find(struct net *net, u16 zone,
-2
include/net/netns/conntrack.h
··· 28 28 struct ctl_table_header *acct_sysctl_header; 29 29 struct ctl_table_header *event_sysctl_header; 30 30 #endif 31 - int hash_vmalloc; 32 - int expect_vmalloc; 33 31 char *slabname; 34 32 }; 35 33 #endif
-1
include/net/netns/ipv4.h
··· 43 43 struct xt_table *nat_table; 44 44 struct hlist_head *nat_bysource; 45 45 unsigned int nat_htable_size; 46 - int nat_vmalloced; 47 46 #endif 48 47 49 48 int sysctl_icmp_echo_ignore_all;
+2 -4
net/ipv4/netfilter/nf_nat_core.c
··· 682 682 { 683 683 /* Leave them the same for the moment. */ 684 684 net->ipv4.nat_htable_size = net->ct.htable_size; 685 - net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, 686 - &net->ipv4.nat_vmalloced, 0); 685 + net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, 0); 687 686 if (!net->ipv4.nat_bysource) 688 687 return -ENOMEM; 689 688 return 0; ··· 704 705 { 705 706 nf_ct_iterate_cleanup(net, &clean_nat, NULL); 706 707 synchronize_rcu(); 707 - nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced, 708 - net->ipv4.nat_htable_size); 708 + nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_htable_size); 709 709 } 710 710 711 711 static struct pernet_operations nf_nat_net_ops = {
+9 -17
net/netfilter/nf_conntrack_core.c
··· 1202 1202 return 1; 1203 1203 } 1204 1204 1205 - void nf_ct_free_hashtable(void *hash, int vmalloced, unsigned int size) 1205 + void nf_ct_free_hashtable(void *hash, unsigned int size) 1206 1206 { 1207 - if (vmalloced) 1207 + if (is_vmalloc_addr(hash)) 1208 1208 vfree(hash); 1209 1209 else 1210 1210 free_pages((unsigned long)hash, ··· 1271 1271 goto i_see_dead_people; 1272 1272 } 1273 1273 1274 - nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1275 - net->ct.htable_size); 1274 + nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); 1276 1275 nf_conntrack_ecache_fini(net); 1277 1276 nf_conntrack_acct_fini(net); 1278 1277 nf_conntrack_expect_fini(net); ··· 1300 1301 } 1301 1302 } 1302 1303 1303 - void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls) 1304 + void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) 1304 1305 { 1305 1306 struct hlist_nulls_head *hash; 1306 1307 unsigned int nr_slots, i; 1307 1308 size_t sz; 1308 - 1309 - *vmalloced = 0; 1310 1309 1311 1310 BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head)); 1312 1311 nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head)); ··· 1312 1315 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 1313 1316 get_order(sz)); 1314 1317 if (!hash) { 1315 - *vmalloced = 1; 1316 1318 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); 1317 1319 hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 1318 1320 PAGE_KERNEL); ··· 1327 1331 1328 1332 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp) 1329 1333 { 1330 - int i, bucket, vmalloced, old_vmalloced; 1334 + int i, bucket; 1331 1335 unsigned int hashsize, old_size; 1332 1336 struct hlist_nulls_head *hash, *old_hash; 1333 1337 struct nf_conntrack_tuple_hash *h; ··· 1344 1348 if (!hashsize) 1345 1349 return -EINVAL; 1346 1350 1347 - hash = nf_ct_alloc_hashtable(&hashsize, &vmalloced, 1); 1351 + hash = nf_ct_alloc_hashtable(&hashsize, 1); 1348 1352 if (!hash) 1349 1353 return -ENOMEM; 1350 1354 ··· 1366 1370 } 1367 1371 } 1368 1372 old_size = init_net.ct.htable_size; 1369 - old_vmalloced = init_net.ct.hash_vmalloc; 1370 1373 old_hash = init_net.ct.hash; 1371 1374 1372 1375 init_net.ct.htable_size = nf_conntrack_htable_size = hashsize; 1373 - init_net.ct.hash_vmalloc = vmalloced; 1374 1376 init_net.ct.hash = hash; 1375 1377 spin_unlock_bh(&nf_conntrack_lock); 1376 1378 1377 - nf_ct_free_hashtable(old_hash, old_vmalloced, old_size); 1379 + nf_ct_free_hashtable(old_hash, old_size); 1378 1380 return 0; 1379 1381 } 1380 1382 EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize); ··· 1485 1491 } 1486 1492 1487 1493 net->ct.htable_size = nf_conntrack_htable_size; 1488 - net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1489 - &net->ct.hash_vmalloc, 1); 1494 + net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size, 1); 1490 1495 if (!net->ct.hash) { 1491 1496 ret = -ENOMEM; 1492 1497 printk(KERN_ERR "Unable to create nf_conntrack_hash\n"); ··· 1508 1515 err_acct: 1509 1516 nf_conntrack_expect_fini(net); 1510 1517 err_expect: 1511 - nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc, 1512 - net->ct.htable_size); 1518 + nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); 1513 1519 err_hash: 1514 1520 kmem_cache_destroy(net->ct.nf_conntrack_cachep); 1515 1521 err_cache:
+3 -6
net/netfilter/nf_conntrack_expect.c
··· 639 639 } 640 640 641 641 net->ct.expect_count = 0; 642 - net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 643 - &net->ct.expect_vmalloc, 0); 642 + net->ct.expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0); 644 643 if (net->ct.expect_hash == NULL) 645 644 goto err1; 646 645 ··· 661 662 if (net_eq(net, &init_net)) 662 663 kmem_cache_destroy(nf_ct_expect_cachep); 663 664 err2: 664 - nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc, 665 - nf_ct_expect_hsize); 665 + nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize); 666 666 err1: 667 667 return err; 668 668 } ··· 673 675 rcu_barrier(); /* Wait for call_rcu() before destroy */ 674 676 kmem_cache_destroy(nf_ct_expect_cachep); 675 677 } 676 - nf_ct_free_hashtable(net->ct.expect_hash, net->ct.expect_vmalloc, 677 - nf_ct_expect_hsize); 678 + nf_ct_free_hashtable(net->ct.expect_hash, nf_ct_expect_hsize); 678 679 }
+3 -7
net/netfilter/nf_conntrack_helper.c
··· 33 33 static struct hlist_head *nf_ct_helper_hash __read_mostly; 34 34 static unsigned int nf_ct_helper_hsize __read_mostly; 35 35 static unsigned int nf_ct_helper_count __read_mostly; 36 - static int nf_ct_helper_vmalloc; 37 36 38 37 39 38 /* Stupid hash, but collision free for the default registrations of the ··· 266 267 int err; 267 268 268 269 nf_ct_helper_hsize = 1; /* gets rounded up to use one page */ 269 - nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 270 - &nf_ct_helper_vmalloc, 0); 270 + nf_ct_helper_hash = nf_ct_alloc_hashtable(&nf_ct_helper_hsize, 0); 271 271 if (!nf_ct_helper_hash) 272 272 return -ENOMEM; 273 273 ··· 277 279 return 0; 278 280 279 281 err1: 280 - nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc, 281 - nf_ct_helper_hsize); 282 + nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize); 282 283 return err; 283 284 } 284 285 285 286 void nf_conntrack_helper_fini(void) 286 287 { 287 288 nf_ct_extend_unregister(&helper_extend); 288 - nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_vmalloc, 289 - nf_ct_helper_hsize); 289 + nf_ct_free_hashtable(nf_ct_helper_hash, nf_ct_helper_hsize); 290 290 }