Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfilter: conntrack: simplify the code by using nf_conntrack_get_ht

Since commit 64b87639c9cb ("netfilter: conntrack: fix race between
nf_conntrack proc read and hash resize") introduce the
nf_conntrack_get_ht, so there's no need to check nf_conntrack_generation
again and again to get the hash table and hash size. And convert
nf_conntrack_get_ht to inline function here.

Suggested-by: Pablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: Liping Zhang <liping.zhang@spreadtrum.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>

authored by

Liping Zhang and committed by
Pablo Neira Ayuso
92e47ba8 adf05168

+30 -39
+20
include/net/netfilter/nf_conntrack.h
··· 303 303 304 304 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp); 305 305 int nf_conntrack_hash_resize(unsigned int hashsize); 306 + 307 + extern struct hlist_nulls_head *nf_conntrack_hash; 306 308 extern unsigned int nf_conntrack_htable_size; 309 + extern seqcount_t nf_conntrack_generation; 307 310 extern unsigned int nf_conntrack_max; 311 + 312 + /* must be called with rcu read lock held */ 313 + static inline void 314 + nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize) 315 + { 316 + struct hlist_nulls_head *hptr; 317 + unsigned int sequence, hsz; 318 + 319 + do { 320 + sequence = read_seqcount_begin(&nf_conntrack_generation); 321 + hsz = nf_conntrack_htable_size; 322 + hptr = nf_conntrack_hash; 323 + } while (read_seqcount_retry(&nf_conntrack_generation, sequence)); 324 + 325 + *hash = hptr; 326 + *hsize = hsz; 327 + } 308 328 309 329 struct nf_conn *nf_ct_tmpl_alloc(struct net *net, 310 330 const struct nf_conntrack_zone *zone,
-3
include/net/netfilter/nf_conntrack_core.h
··· 51 51 const struct nf_conntrack_l3proto *l3proto, 52 52 const struct nf_conntrack_l4proto *l4proto); 53 53 54 - void nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize); 55 - 56 54 /* Find a connection corresponding to a tuple. */ 57 55 struct nf_conntrack_tuple_hash * 58 56 nf_conntrack_find_get(struct net *net, ··· 81 83 82 84 #define CONNTRACK_LOCKS 1024 83 85 84 - extern struct hlist_nulls_head *nf_conntrack_hash; 85 86 extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS]; 86 87 void nf_conntrack_lock(spinlock_t *lock); 87 88
+10 -36
net/netfilter/nf_conntrack_core.c
··· 74 74 75 75 static __read_mostly struct kmem_cache *nf_conntrack_cachep; 76 76 static __read_mostly spinlock_t nf_conntrack_locks_all_lock; 77 - static __read_mostly seqcount_t nf_conntrack_generation; 78 77 static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); 79 78 static __read_mostly bool nf_conntrack_locks_all; 80 79 ··· 161 162 162 163 unsigned int nf_conntrack_htable_size __read_mostly; 163 164 unsigned int nf_conntrack_max __read_mostly; 165 + seqcount_t nf_conntrack_generation __read_mostly; 164 166 165 167 DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); 166 168 EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); ··· 478 478 net_eq(net, nf_ct_net(ct)); 479 479 } 480 480 481 - /* must be called with rcu read lock held */ 482 - void nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize) 483 - { 484 - struct hlist_nulls_head *hptr; 485 - unsigned int sequence, hsz; 486 - 487 - do { 488 - sequence = read_seqcount_begin(&nf_conntrack_generation); 489 - hsz = nf_conntrack_htable_size; 490 - hptr = nf_conntrack_hash; 491 - } while (read_seqcount_retry(&nf_conntrack_generation, sequence)); 492 - 493 - *hash = hptr; 494 - *hsize = hsz; 495 - } 496 - EXPORT_SYMBOL_GPL(nf_conntrack_get_ht); 497 - 498 481 /* 499 482 * Warning : 500 483 * - Caller must take a reference on returned object ··· 490 507 struct nf_conntrack_tuple_hash *h; 491 508 struct hlist_nulls_head *ct_hash; 492 509 struct hlist_nulls_node *n; 493 - unsigned int bucket, sequence; 510 + unsigned int bucket, hsize; 494 511 495 512 begin: 496 - do { 497 - sequence = read_seqcount_begin(&nf_conntrack_generation); 498 - bucket = scale_hash(hash); 499 - ct_hash = nf_conntrack_hash; 500 - } while (read_seqcount_retry(&nf_conntrack_generation, sequence)); 513 + nf_conntrack_get_ht(&ct_hash, &hsize); 514 + bucket = reciprocal_scale(hash, hsize); 501 515 502 516 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) { 503 517 if (nf_ct_key_equal(h, tuple, zone, net)) { ··· 800 820 const struct nf_conntrack_zone *zone; 801 821 struct nf_conntrack_tuple_hash *h; 802 822 struct hlist_nulls_head *ct_hash; 803 - unsigned int hash, sequence; 823 + unsigned int hash, hsize; 804 824 struct hlist_nulls_node *n; 805 825 struct nf_conn *ct; 806 826 807 827 zone = nf_ct_zone(ignored_conntrack); 808 828 809 829 rcu_read_lock(); 810 - do { 811 - sequence = read_seqcount_begin(&nf_conntrack_generation); 812 - hash = hash_conntrack(net, tuple); 813 - ct_hash = nf_conntrack_hash; 814 - } while (read_seqcount_retry(&nf_conntrack_generation, sequence)); 830 + nf_conntrack_get_ht(&ct_hash, &hsize); 831 + hash = __hash_conntrack(net, tuple, hsize); 815 832 816 833 hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) { 817 834 ct = nf_ct_tuplehash_to_ctrack(h); ··· 874 897 875 898 for (i = 0; i < NF_CT_EVICTION_RANGE; i++) { 876 899 struct hlist_nulls_head *ct_hash; 877 - unsigned hash, sequence, drops; 900 + unsigned int hash, hsize, drops; 878 901 879 902 rcu_read_lock(); 880 - do { 881 - sequence = read_seqcount_begin(&nf_conntrack_generation); 882 - hash = scale_hash(_hash++); 883 - ct_hash = nf_conntrack_hash; 884 - } while (read_seqcount_retry(&nf_conntrack_generation, sequence)); 903 + nf_conntrack_get_ht(&ct_hash, &hsize); 904 + hash = reciprocal_scale(_hash++, hsize); 885 905 886 906 drops = early_drop_list(net, &ct_hash[hash]); 887 907 rcu_read_unlock();