Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[INET6]: Generalise tcp_v6_hash_connect

Renaming it to inet6_hash_connect, making it possible to ditch
dccp_v6_hash_connect and share the same code with TCP instead.

Signed-off-by: Arnaldo Carvalho de Melo <acme@mandriva.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Arnaldo Carvalho de Melo and committed by
David S. Miller
d8313f5c a7f5e7f1

+190 -348
+2 -2
drivers/char/random.c
··· 1573 1573 } 1574 1574 1575 1575 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1576 - u32 secure_tcpv6_port_ephemeral(const __u32 *saddr, const __u32 *daddr, __u16 dport) 1576 + u32 secure_ipv6_port_ephemeral(const __u32 *saddr, const __u32 *daddr, __u16 dport) 1577 1577 { 1578 1578 struct keydata *keyptr = get_keyptr(); 1579 1579 u32 hash[12]; ··· 1584 1584 1585 1585 return twothirdsMD4Transform(daddr, hash); 1586 1586 } 1587 - EXPORT_SYMBOL(secure_tcpv6_port_ephemeral); 1587 + EXPORT_SYMBOL(secure_ipv6_port_ephemeral); 1588 1588 #endif 1589 1589 1590 1590 #if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
+2 -2
include/linux/random.h
··· 53 53 54 54 extern __u32 secure_ip_id(__u32 daddr); 55 55 extern u32 secure_ipv4_port_ephemeral(__u32 saddr, __u32 daddr, __u16 dport); 56 - extern u32 secure_tcpv6_port_ephemeral(const __u32 *saddr, const __u32 *daddr, 57 - __u16 dport); 56 + extern u32 secure_ipv6_port_ephemeral(const __u32 *saddr, const __u32 *daddr, 57 + __u16 dport); 58 58 extern __u32 secure_tcp_sequence_number(__u32 saddr, __u32 daddr, 59 59 __u16 sport, __u16 dport); 60 60 extern __u32 secure_tcpv6_sequence_number(__u32 *saddr, __u32 *daddr,
+3
include/net/ipv6.h
··· 527 527 extern int inet6_ioctl(struct socket *sock, unsigned int cmd, 528 528 unsigned long arg); 529 529 530 + extern int inet6_hash_connect(struct inet_timewait_death_row *death_row, 531 + struct sock *sk); 532 + 530 533 /* 531 534 * reassembly.c 532 535 */
+1 -170
net/dccp/ipv6.c
··· 84 84 dh->dccph_sport); 85 85 } 86 86 87 - static int __dccp_v6_check_established(struct sock *sk, const __u16 lport, 88 - struct inet_timewait_sock **twp) 89 - { 90 - struct inet_sock *inet = inet_sk(sk); 91 - const struct ipv6_pinfo *np = inet6_sk(sk); 92 - const struct in6_addr *daddr = &np->rcv_saddr; 93 - const struct in6_addr *saddr = &np->daddr; 94 - const int dif = sk->sk_bound_dev_if; 95 - const u32 ports = INET_COMBINED_PORTS(inet->dport, lport); 96 - const unsigned int hash = inet6_ehashfn(daddr, inet->num, 97 - saddr, inet->dport); 98 - struct inet_ehash_bucket *head = inet_ehash_bucket(&dccp_hashinfo, hash); 99 - struct sock *sk2; 100 - const struct hlist_node *node; 101 - struct inet_timewait_sock *tw; 102 - 103 - prefetch(head->chain.first); 104 - write_lock(&head->lock); 105 - 106 - /* Check TIME-WAIT sockets first. */ 107 - sk_for_each(sk2, node, &(head + dccp_hashinfo.ehash_size)->chain) { 108 - const struct inet6_timewait_sock *tw6 = inet6_twsk(sk2); 109 - 110 - tw = inet_twsk(sk2); 111 - 112 - if(*((__u32 *)&(tw->tw_dport)) == ports && 113 - sk2->sk_family == PF_INET6 && 114 - ipv6_addr_equal(&tw6->tw_v6_daddr, saddr) && 115 - ipv6_addr_equal(&tw6->tw_v6_rcv_saddr, daddr) && 116 - sk2->sk_bound_dev_if == sk->sk_bound_dev_if) 117 - goto not_unique; 118 - } 119 - tw = NULL; 120 - 121 - /* And established part... */ 122 - sk_for_each(sk2, node, &head->chain) { 123 - if (INET6_MATCH(sk2, hash, saddr, daddr, ports, dif)) 124 - goto not_unique; 125 - } 126 - 127 - BUG_TRAP(sk_unhashed(sk)); 128 - __sk_add_node(sk, &head->chain); 129 - sk->sk_hash = hash; 130 - sock_prot_inc_use(sk->sk_prot); 131 - write_unlock(&head->lock); 132 - 133 - if (twp) { 134 - *twp = tw; 135 - NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 136 - } else if (tw) { 137 - /* Silly. Should hash-dance instead... */ 138 - inet_twsk_deschedule(tw, &dccp_death_row); 139 - NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 140 - 141 - inet_twsk_put(tw); 142 - } 143 - return 0; 144 - 145 - not_unique: 146 - write_unlock(&head->lock); 147 - return -EADDRNOTAVAIL; 148 - } 149 - 150 - static inline u32 dccp_v6_port_offset(const struct sock *sk) 151 - { 152 - const struct inet_sock *inet = inet_sk(sk); 153 - const struct ipv6_pinfo *np = inet6_sk(sk); 154 - 155 - return secure_tcpv6_port_ephemeral(np->rcv_saddr.s6_addr32, 156 - np->daddr.s6_addr32, 157 - inet->dport); 158 - } 159 - 160 - static int dccp_v6_hash_connect(struct sock *sk) 161 - { 162 - const unsigned short snum = inet_sk(sk)->num; 163 - struct inet_bind_hashbucket *head; 164 - struct inet_bind_bucket *tb; 165 - int ret; 166 - 167 - if (snum == 0) { 168 - int low = sysctl_local_port_range[0]; 169 - int high = sysctl_local_port_range[1]; 170 - int range = high - low; 171 - int i; 172 - int port; 173 - static u32 hint; 174 - u32 offset = hint + dccp_v6_port_offset(sk); 175 - struct hlist_node *node; 176 - struct inet_timewait_sock *tw = NULL; 177 - 178 - local_bh_disable(); 179 - for (i = 1; i <= range; i++) { 180 - port = low + (i + offset) % range; 181 - head = &dccp_hashinfo.bhash[inet_bhashfn(port, 182 - dccp_hashinfo.bhash_size)]; 183 - spin_lock(&head->lock); 184 - 185 - /* Does not bother with rcv_saddr checks, 186 - * because the established check is already 187 - * unique enough. 188 - */ 189 - inet_bind_bucket_for_each(tb, node, &head->chain) { 190 - if (tb->port == port) { 191 - BUG_TRAP(!hlist_empty(&tb->owners)); 192 - if (tb->fastreuse >= 0) 193 - goto next_port; 194 - if (!__dccp_v6_check_established(sk, 195 - port, 196 - &tw)) 197 - goto ok; 198 - goto next_port; 199 - } 200 - } 201 - 202 - tb = inet_bind_bucket_create(dccp_hashinfo.bind_bucket_cachep, 203 - head, port); 204 - if (!tb) { 205 - spin_unlock(&head->lock); 206 - break; 207 - } 208 - tb->fastreuse = -1; 209 - goto ok; 210 - 211 - next_port: 212 - spin_unlock(&head->lock); 213 - } 214 - local_bh_enable(); 215 - 216 - return -EADDRNOTAVAIL; 217 - ok: 218 - hint += i; 219 - 220 - /* Head lock still held and bh's disabled */ 221 - inet_bind_hash(sk, tb, port); 222 - if (sk_unhashed(sk)) { 223 - inet_sk(sk)->sport = htons(port); 224 - __inet6_hash(&dccp_hashinfo, sk); 225 - } 226 - spin_unlock(&head->lock); 227 - 228 - if (tw) { 229 - inet_twsk_deschedule(tw, &dccp_death_row); 230 - inet_twsk_put(tw); 231 - } 232 - 233 - ret = 0; 234 - goto out; 235 - } 236 - 237 - head = &dccp_hashinfo.bhash[inet_bhashfn(snum, 238 - dccp_hashinfo.bhash_size)]; 239 - tb = inet_csk(sk)->icsk_bind_hash; 240 - spin_lock_bh(&head->lock); 241 - 242 - if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 243 - __inet6_hash(&dccp_hashinfo, sk); 244 - spin_unlock_bh(&head->lock); 245 - return 0; 246 - } else { 247 - spin_unlock(&head->lock); 248 - /* No definite answer... Walk to established hash table */ 249 - ret = __dccp_v6_check_established(sk, snum, NULL); 250 - out: 251 - local_bh_enable(); 252 - return ret; 253 - } 254 - } 255 - 256 87 static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 257 88 int addr_len) 258 89 { ··· 234 403 inet->dport = usin->sin6_port; 235 404 236 405 dccp_set_state(sk, DCCP_REQUESTING); 237 - err = dccp_v6_hash_connect(sk); 406 + err = inet6_hash_connect(&dccp_death_row, sk); 238 407 if (err) 239 408 goto late_failure; 240 409 /* FIXME */
+181 -2
net/ipv6/inet6_hashtables.c
··· 5 5 * 6 6 * Generic INET6 transport hashtables 7 7 * 8 - * Authors: Lotsa people, from code originally in tcp 8 + * Authors: Lotsa people, from code originally in tcp, generalised here 9 + * by Arnaldo Carvalho de Melo <acme@mandriva.com> 9 10 * 10 11 * This program is free software; you can redistribute it and/or 11 12 * modify it under the terms of the GNU General Public License ··· 15 14 */ 16 15 17 16 #include <linux/config.h> 18 - 19 17 #include <linux/module.h> 18 + #include <linux/random.h> 20 19 21 20 #include <net/inet_connection_sock.h> 22 21 #include <net/inet_hashtables.h> 23 22 #include <net/inet6_hashtables.h> 23 + #include <net/ip.h> 24 24 25 25 struct sock *inet6_lookup_listener(struct inet_hashinfo *hashinfo, 26 26 const struct in6_addr *daddr, ··· 81 79 } 82 80 83 81 EXPORT_SYMBOL_GPL(inet6_lookup); 82 + 83 + static int __inet6_check_established(struct inet_timewait_death_row *death_row, 84 + struct sock *sk, const __u16 lport, 85 + struct inet_timewait_sock **twp) 86 + { 87 + struct inet_hashinfo *hinfo = death_row->hashinfo; 88 + const struct inet_sock *inet = inet_sk(sk); 89 + const struct ipv6_pinfo *np = inet6_sk(sk); 90 + const struct in6_addr *daddr = &np->rcv_saddr; 91 + const struct in6_addr *saddr = &np->daddr; 92 + const int dif = sk->sk_bound_dev_if; 93 + const u32 ports = INET_COMBINED_PORTS(inet->dport, lport); 94 + const unsigned int hash = inet6_ehashfn(daddr, inet->num, saddr, 95 + inet->dport); 96 + struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash); 97 + struct sock *sk2; 98 + const struct hlist_node *node; 99 + struct inet_timewait_sock *tw; 100 + 101 + prefetch(head->chain.first); 102 + write_lock(&head->lock); 103 + 104 + /* Check TIME-WAIT sockets first. */ 105 + sk_for_each(sk2, node, &(head + hinfo->ehash_size)->chain) { 106 + const struct inet6_timewait_sock *tw6 = inet6_twsk(sk2); 107 + 108 + tw = inet_twsk(sk2); 109 + 110 + if(*((__u32 *)&(tw->tw_dport)) == ports && 111 + sk2->sk_family == PF_INET6 && 112 + ipv6_addr_equal(&tw6->tw_v6_daddr, saddr) && 113 + ipv6_addr_equal(&tw6->tw_v6_rcv_saddr, daddr) && 114 + sk2->sk_bound_dev_if == sk->sk_bound_dev_if) { 115 + if (twsk_unique(sk, sk2, twp)) 116 + goto unique; 117 + else 118 + goto not_unique; 119 + } 120 + } 121 + tw = NULL; 122 + 123 + /* And established part... */ 124 + sk_for_each(sk2, node, &head->chain) { 125 + if (INET6_MATCH(sk2, hash, saddr, daddr, ports, dif)) 126 + goto not_unique; 127 + } 128 + 129 + unique: 130 + BUG_TRAP(sk_unhashed(sk)); 131 + __sk_add_node(sk, &head->chain); 132 + sk->sk_hash = hash; 133 + sock_prot_inc_use(sk->sk_prot); 134 + write_unlock(&head->lock); 135 + 136 + if (twp != NULL) { 137 + *twp = tw; 138 + NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 139 + } else if (tw != NULL) { 140 + /* Silly. Should hash-dance instead... */ 141 + inet_twsk_deschedule(tw, death_row); 142 + NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 143 + 144 + inet_twsk_put(tw); 145 + } 146 + return 0; 147 + 148 + not_unique: 149 + write_unlock(&head->lock); 150 + return -EADDRNOTAVAIL; 151 + } 152 + 153 + static inline u32 inet6_sk_port_offset(const struct sock *sk) 154 + { 155 + const struct inet_sock *inet = inet_sk(sk); 156 + const struct ipv6_pinfo *np = inet6_sk(sk); 157 + return secure_ipv6_port_ephemeral(np->rcv_saddr.s6_addr32, 158 + np->daddr.s6_addr32, 159 + inet->dport); 160 + } 161 + 162 + int inet6_hash_connect(struct inet_timewait_death_row *death_row, 163 + struct sock *sk) 164 + { 165 + struct inet_hashinfo *hinfo = death_row->hashinfo; 166 + const unsigned short snum = inet_sk(sk)->num; 167 + struct inet_bind_hashbucket *head; 168 + struct inet_bind_bucket *tb; 169 + int ret; 170 + 171 + if (snum == 0) { 172 + const int low = sysctl_local_port_range[0]; 173 + const int high = sysctl_local_port_range[1]; 174 + const int range = high - low; 175 + int i, port; 176 + static u32 hint; 177 + const u32 offset = hint + inet6_sk_port_offset(sk); 178 + struct hlist_node *node; 179 + struct inet_timewait_sock *tw = NULL; 180 + 181 + local_bh_disable(); 182 + for (i = 1; i <= range; i++) { 183 + port = low + (i + offset) % range; 184 + head = &hinfo->bhash[inet_bhashfn(port, hinfo->bhash_size)]; 185 + spin_lock(&head->lock); 186 + 187 + /* Does not bother with rcv_saddr checks, 188 + * because the established check is already 189 + * unique enough. 190 + */ 191 + inet_bind_bucket_for_each(tb, node, &head->chain) { 192 + if (tb->port == port) { 193 + BUG_TRAP(!hlist_empty(&tb->owners)); 194 + if (tb->fastreuse >= 0) 195 + goto next_port; 196 + if (!__inet6_check_established(death_row, 197 + sk, port, 198 + &tw)) 199 + goto ok; 200 + goto next_port; 201 + } 202 + } 203 + 204 + tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, 205 + head, port); 206 + if (!tb) { 207 + spin_unlock(&head->lock); 208 + break; 209 + } 210 + tb->fastreuse = -1; 211 + goto ok; 212 + 213 + next_port: 214 + spin_unlock(&head->lock); 215 + } 216 + local_bh_enable(); 217 + 218 + return -EADDRNOTAVAIL; 219 + 220 + ok: 221 + hint += i; 222 + 223 + /* Head lock still held and bh's disabled */ 224 + inet_bind_hash(sk, tb, port); 225 + if (sk_unhashed(sk)) { 226 + inet_sk(sk)->sport = htons(port); 227 + __inet6_hash(hinfo, sk); 228 + } 229 + spin_unlock(&head->lock); 230 + 231 + if (tw) { 232 + inet_twsk_deschedule(tw, death_row); 233 + inet_twsk_put(tw); 234 + } 235 + 236 + ret = 0; 237 + goto out; 238 + } 239 + 240 + head = &hinfo->bhash[inet_bhashfn(snum, hinfo->bhash_size)]; 241 + tb = inet_csk(sk)->icsk_bind_hash; 242 + spin_lock_bh(&head->lock); 243 + 244 + if (sk_head(&tb->owners) == sk && sk->sk_bind_node.next == NULL) { 245 + __inet6_hash(hinfo, sk); 246 + spin_unlock_bh(&head->lock); 247 + return 0; 248 + } else { 249 + spin_unlock(&head->lock); 250 + /* No definite answer... Walk to established hash table */ 251 + ret = __inet6_check_established(death_row, sk, snum, NULL); 252 + out: 253 + local_bh_enable(); 254 + return ret; 255 + } 256 + } 257 + 258 + EXPORT_SYMBOL_GPL(inet6_hash_connect);
+1 -172
net/ipv6/tcp_ipv6.c
··· 119 119 } 120 120 } 121 121 122 - static int __tcp_v6_check_established(struct sock *sk, const __u16 lport, 123 - struct inet_timewait_sock **twp) 124 - { 125 - struct inet_sock *inet = inet_sk(sk); 126 - const struct ipv6_pinfo *np = inet6_sk(sk); 127 - const struct in6_addr *daddr = &np->rcv_saddr; 128 - const struct in6_addr *saddr = &np->daddr; 129 - const int dif = sk->sk_bound_dev_if; 130 - const u32 ports = INET_COMBINED_PORTS(inet->dport, lport); 131 - unsigned int hash = inet6_ehashfn(daddr, inet->num, saddr, inet->dport); 132 - struct inet_ehash_bucket *head = inet_ehash_bucket(&tcp_hashinfo, hash); 133 - struct sock *sk2; 134 - const struct hlist_node *node; 135 - struct inet_timewait_sock *tw; 136 - 137 - prefetch(head->chain.first); 138 - write_lock(&head->lock); 139 - 140 - /* Check TIME-WAIT sockets first. */ 141 - sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) { 142 - const struct inet6_timewait_sock *tw6 = inet6_twsk(sk2); 143 - 144 - tw = inet_twsk(sk2); 145 - 146 - if(*((__u32 *)&(tw->tw_dport)) == ports && 147 - sk2->sk_family == PF_INET6 && 148 - ipv6_addr_equal(&tw6->tw_v6_daddr, saddr) && 149 - ipv6_addr_equal(&tw6->tw_v6_rcv_saddr, daddr) && 150 - sk2->sk_bound_dev_if == sk->sk_bound_dev_if) { 151 - if (twsk_unique(sk, sk2, twp)) 152 - goto unique; 153 - else 154 - goto not_unique; 155 - } 156 - } 157 - tw = NULL; 158 - 159 - /* And established part... */ 160 - sk_for_each(sk2, node, &head->chain) { 161 - if (INET6_MATCH(sk2, hash, saddr, daddr, ports, dif)) 162 - goto not_unique; 163 - } 164 - 165 - unique: 166 - BUG_TRAP(sk_unhashed(sk)); 167 - __sk_add_node(sk, &head->chain); 168 - sk->sk_hash = hash; 169 - sock_prot_inc_use(sk->sk_prot); 170 - write_unlock(&head->lock); 171 - 172 - if (twp) { 173 - *twp = tw; 174 - NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 175 - } else if (tw) { 176 - /* Silly. Should hash-dance instead... */ 177 - inet_twsk_deschedule(tw, &tcp_death_row); 178 - NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED); 179 - 180 - inet_twsk_put(tw); 181 - } 182 - return 0; 183 - 184 - not_unique: 185 - write_unlock(&head->lock); 186 - return -EADDRNOTAVAIL; 187 - } 188 - 189 - static inline u32 tcpv6_port_offset(const struct sock *sk) 190 - { 191 - const struct inet_sock *inet = inet_sk(sk); 192 - const struct ipv6_pinfo *np = inet6_sk(sk); 193 - 194 - return secure_tcpv6_port_ephemeral(np->rcv_saddr.s6_addr32, 195 - np->daddr.s6_addr32, 196 - inet->dport); 197 - } 198 - 199 - static int tcp_v6_hash_connect(struct sock *sk) 200 - { 201 - unsigned short snum = inet_sk(sk)->num; 202 - struct inet_bind_hashbucket *head; 203 - struct inet_bind_bucket *tb; 204 - int ret; 205 - 206 - if (!snum) { 207 - int low = sysctl_local_port_range[0]; 208 - int high = sysctl_local_port_range[1]; 209 - int range = high - low; 210 - int i; 211 - int port; 212 - static u32 hint; 213 - u32 offset = hint + tcpv6_port_offset(sk); 214 - struct hlist_node *node; 215 - struct inet_timewait_sock *tw = NULL; 216 - 217 - local_bh_disable(); 218 - for (i = 1; i <= range; i++) { 219 - port = low + (i + offset) % range; 220 - head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)]; 221 - spin_lock(&head->lock); 222 - 223 - /* Does not bother with rcv_saddr checks, 224 - * because the established check is already 225 - * unique enough. 226 - */ 227 - inet_bind_bucket_for_each(tb, node, &head->chain) { 228 - if (tb->port == port) { 229 - BUG_TRAP(!hlist_empty(&tb->owners)); 230 - if (tb->fastreuse >= 0) 231 - goto next_port; 232 - if (!__tcp_v6_check_established(sk, 233 - port, 234 - &tw)) 235 - goto ok; 236 - goto next_port; 237 - } 238 - } 239 - 240 - tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port); 241 - if (!tb) { 242 - spin_unlock(&head->lock); 243 - break; 244 - } 245 - tb->fastreuse = -1; 246 - goto ok; 247 - 248 - next_port: 249 - spin_unlock(&head->lock); 250 - } 251 - local_bh_enable(); 252 - 253 - return -EADDRNOTAVAIL; 254 - 255 - ok: 256 - hint += i; 257 - 258 - /* Head lock still held and bh's disabled */ 259 - inet_bind_hash(sk, tb, port); 260 - if (sk_unhashed(sk)) { 261 - inet_sk(sk)->sport = htons(port); 262 - __inet6_hash(&tcp_hashinfo, sk); 263 - } 264 - spin_unlock(&head->lock); 265 - 266 - if (tw) { 267 - inet_twsk_deschedule(tw, &tcp_death_row); 268 - inet_twsk_put(tw); 269 - } 270 - 271 - ret = 0; 272 - goto out; 273 - } 274 - 275 - head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)]; 276 - tb = inet_csk(sk)->icsk_bind_hash; 277 - spin_lock_bh(&head->lock); 278 - 279 - if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) { 280 - __inet6_hash(&tcp_hashinfo, sk); 281 - spin_unlock_bh(&head->lock); 282 - return 0; 283 - } else { 284 - spin_unlock(&head->lock); 285 - /* No definite answer... Walk to established hash table */ 286 - ret = __tcp_v6_check_established(sk, snum, NULL); 287 - out: 288 - local_bh_enable(); 289 - return ret; 290 - } 291 - } 292 - 293 122 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 294 123 int addr_len) 295 124 { ··· 279 450 inet->dport = usin->sin6_port; 280 451 281 452 tcp_set_state(sk, TCP_SYN_SENT); 282 - err = tcp_v6_hash_connect(sk); 453 + err = inet6_hash_connect(&tcp_death_row, sk); 283 454 if (err) 284 455 goto late_failure; 285 456