Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'sctp-race-fix'

Xin Long says:

====================
sctp: fix the race condition in sctp_destroy_sock in a proper way

The original fix introduced a dead lock, and has to be removed in
Patch 1/2, and we will get a proper way to fix it in Patch 2/2.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+22 -16
+22 -16
net/sctp/socket.c
··· 357 357 return af; 358 358 } 359 359 360 + static void sctp_auto_asconf_init(struct sctp_sock *sp) 361 + { 362 + struct net *net = sock_net(&sp->inet.sk); 363 + 364 + if (net->sctp.default_auto_asconf) { 365 + spin_lock(&net->sctp.addr_wq_lock); 366 + list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist); 367 + spin_unlock(&net->sctp.addr_wq_lock); 368 + sp->do_auto_asconf = 1; 369 + } 370 + } 371 + 360 372 /* Bind a local address either to an endpoint or to an association. */ 361 373 static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) 362 374 { ··· 430 418 return -EADDRINUSE; 431 419 432 420 /* Refresh ephemeral port. */ 433 - if (!bp->port) 421 + if (!bp->port) { 434 422 bp->port = inet_sk(sk)->inet_num; 423 + sctp_auto_asconf_init(sp); 424 + } 435 425 436 426 /* Add the address to the bind address list. 437 427 * Use GFP_ATOMIC since BHs will be disabled. ··· 1534 1520 1535 1521 /* Supposedly, no process has access to the socket, but 1536 1522 * the net layers still may. 1523 + * Also, sctp_destroy_sock() needs to be called with addr_wq_lock 1524 + * held and that should be grabbed before socket lock. 1537 1525 */ 1538 - local_bh_disable(); 1539 - bh_lock_sock(sk); 1526 + spin_lock_bh(&net->sctp.addr_wq_lock); 1527 + bh_lock_sock_nested(sk); 1540 1528 1541 1529 /* Hold the sock, since sk_common_release() will put sock_put() 1542 1530 * and we have just a little more cleanup. ··· 1547 1531 sk_common_release(sk); 1548 1532 1549 1533 bh_unlock_sock(sk); 1550 - local_bh_enable(); 1534 + spin_unlock_bh(&net->sctp.addr_wq_lock); 1551 1535 1552 1536 sock_put(sk); 1553 1537 ··· 5007 4991 sk_sockets_allocated_inc(sk); 5008 4992 sock_prot_inuse_add(net, sk->sk_prot, 1); 5009 4993 5010 - if (net->sctp.default_auto_asconf) { 5011 - spin_lock(&sock_net(sk)->sctp.addr_wq_lock); 5012 - list_add_tail(&sp->auto_asconf_list, 5013 - &net->sctp.auto_asconf_splist); 5014 - sp->do_auto_asconf = 1; 5015 - spin_unlock(&sock_net(sk)->sctp.addr_wq_lock); 5016 - } else { 5017 - sp->do_auto_asconf = 0; 5018 - } 5019 - 5020 4994 local_bh_enable(); 5021 4995 5022 4996 return 0; ··· 5031 5025 5032 5026 if (sp->do_auto_asconf) { 5033 5027 sp->do_auto_asconf = 0; 5034 - spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock); 5035 5028 list_del(&sp->auto_asconf_list); 5036 - spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock); 5037 5029 } 5038 5030 sctp_endpoint_free(sp->ep); 5039 5031 local_bh_disable(); ··· 9401 9397 if (err) 9402 9398 return err; 9403 9399 } 9400 + 9401 + sctp_auto_asconf_init(newsp); 9404 9402 9405 9403 /* Move any messages in the old socket's receive queue that are for the 9406 9404 * peeled off association to the new socket's receive queue.