Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfilter: conntrack: make netns address part of hash

Once we place all conntracks into a global hash table we want them to be
spread across entire hash table, even if namespaces have overlapping ip
addresses.

Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>

authored by

Florian Westphal and committed by
Pablo Neira Ayuso
1b8c8a9f e0c7d472

+18 -17
+18 -17
net/netfilter/nf_conntrack_core.c
··· 54 54 #include <net/netfilter/nf_nat.h> 55 55 #include <net/netfilter/nf_nat_core.h> 56 56 #include <net/netfilter/nf_nat_helper.h> 57 + #include <net/netns/hash.h> 57 58 58 59 #define NF_CONNTRACK_VERSION "0.5.0" 59 60 ··· 145 144 146 145 static unsigned int nf_conntrack_hash_rnd __read_mostly; 147 146 148 - static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple) 147 + static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, 148 + const struct net *net) 149 149 { 150 150 unsigned int n; 151 + u32 seed; 151 152 152 153 get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd)); 153 154 ··· 157 154 * destination ports (which is a multiple of 4) and treat the last 158 155 * three bytes manually. 159 156 */ 157 + seed = nf_conntrack_hash_rnd ^ net_hash_mix(net); 160 158 n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); 161 - return jhash2((u32 *)tuple, n, nf_conntrack_hash_rnd ^ 159 + return jhash2((u32 *)tuple, n, seed ^ 162 160 (((__force __u16)tuple->dst.u.all << 16) | 163 161 tuple->dst.protonum)); 164 162 } 165 163 166 - static u32 __hash_bucket(u32 hash, unsigned int size) 167 - { 168 - return reciprocal_scale(hash, size); 169 - } 170 - 171 164 static u32 hash_bucket(u32 hash, const struct net *net) 172 165 { 173 - return __hash_bucket(hash, net->ct.htable_size); 166 + return reciprocal_scale(hash, net->ct.htable_size); 174 167 } 175 168 176 - static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple, 177 - unsigned int size) 169 + static u32 __hash_conntrack(const struct net *net, 170 + const struct nf_conntrack_tuple *tuple, 171 + unsigned int size) 178 172 { 179 - return __hash_bucket(hash_conntrack_raw(tuple), size); 173 + return reciprocal_scale(hash_conntrack_raw(tuple, net), size); 180 174 } 181 175 182 - static inline u_int32_t hash_conntrack(const struct net *net, 183 - const struct nf_conntrack_tuple *tuple) 176 + static u32 hash_conntrack(const struct net *net, 177 + const struct nf_conntrack_tuple *tuple) 184 178 { 185 - return __hash_conntrack(tuple, net->ct.htable_size); 179 + return __hash_conntrack(net, tuple, net->ct.htable_size); 186 180 } 187 181 188 182 bool ··· 535 535 const struct nf_conntrack_tuple *tuple) 536 536 { 537 537 return __nf_conntrack_find_get(net, zone, tuple, 538 - hash_conntrack_raw(tuple)); 538 + hash_conntrack_raw(tuple, net)); 539 539 } 540 540 EXPORT_SYMBOL_GPL(nf_conntrack_find_get); 541 541 ··· 1041 1041 1042 1042 /* look for tuple match */ 1043 1043 zone = nf_ct_zone_tmpl(tmpl, skb, &tmp); 1044 - hash = hash_conntrack_raw(&tuple); 1044 + hash = hash_conntrack_raw(&tuple, net); 1045 1045 h = __nf_conntrack_find_get(net, zone, &tuple, hash); 1046 1046 if (!h) { 1047 1047 h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, ··· 1605 1605 struct nf_conntrack_tuple_hash, hnnode); 1606 1606 ct = nf_ct_tuplehash_to_ctrack(h); 1607 1607 hlist_nulls_del_rcu(&h->hnnode); 1608 - bucket = __hash_conntrack(&h->tuple, hashsize); 1608 + bucket = __hash_conntrack(nf_ct_net(ct), 1609 + &h->tuple, hashsize); 1609 1610 hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]); 1610 1611 } 1611 1612 }