Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
[IRDA]: fix printk format
[NETPOLL] netconsole: fix soft lockup when removing module
[NETPOLL]: tx lock deadlock fix
SCTP: lock_sock_nested in sctp_sock_migrate
SCTP: Fix sctp_getsockopt_get_peer_addrs
SCTP: update sctp_getsockopt helpers to allow oversized buffers

+101 -51
+1 -1
drivers/net/irda/irport.c
··· 509 IRDA_DEBUG(0, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", 510 __FUNCTION__, iir, lsr, iobase); 511 512 - IRDA_DEBUG(0, "%s(), transmitting=%d, remain=%d, done=%d\n", 513 __FUNCTION__, self->transmitting, self->tx_buff.len, 514 self->tx_buff.data - self->tx_buff.head); 515
··· 509 IRDA_DEBUG(0, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", 510 __FUNCTION__, iir, lsr, iobase); 511 512 + IRDA_DEBUG(0, "%s(), transmitting=%d, remain=%d, done=%td\n", 513 __FUNCTION__, self->transmitting, self->tx_buff.len, 514 self->tx_buff.data - self->tx_buff.head); 515
+19 -11
net/core/netpoll.c
··· 72 netif_tx_unlock(dev); 73 local_irq_restore(flags); 74 75 - schedule_delayed_work(&npinfo->tx_work, HZ/10); 76 return; 77 } 78 netif_tx_unlock(dev); ··· 251 unsigned long flags; 252 253 local_irq_save(flags); 254 - if (netif_tx_trylock(dev)) { 255 - /* try until next clock tick */ 256 - for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 257 - tries > 0; --tries) { 258 if (!netif_queue_stopped(dev)) 259 status = dev->hard_start_xmit(skb, dev); 260 261 if (status == NETDEV_TX_OK) 262 break; 263 264 - /* tickle device maybe there is some cleanup */ 265 - netpoll_poll(np); 266 - 267 - udelay(USEC_PER_POLL); 268 } 269 - netif_tx_unlock(dev); 270 } 271 local_irq_restore(flags); 272 } ··· 786 if (atomic_dec_and_test(&npinfo->refcnt)) { 787 skb_queue_purge(&npinfo->arp_tx); 788 skb_queue_purge(&npinfo->txq); 789 - cancel_rearming_delayed_work(&npinfo->tx_work); 790 flush_scheduled_work(); 791 792 kfree(npinfo); 793 } 794 }
··· 72 netif_tx_unlock(dev); 73 local_irq_restore(flags); 74 75 + if (atomic_read(&npinfo->refcnt)) 76 + schedule_delayed_work(&npinfo->tx_work, HZ/10); 77 return; 78 } 79 netif_tx_unlock(dev); ··· 250 unsigned long flags; 251 252 local_irq_save(flags); 253 + /* try until next clock tick */ 254 + for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 255 + tries > 0; --tries) { 256 + if (netif_tx_trylock(dev)) { 257 if (!netif_queue_stopped(dev)) 258 status = dev->hard_start_xmit(skb, dev); 259 + netif_tx_unlock(dev); 260 261 if (status == NETDEV_TX_OK) 262 break; 263 264 } 265 + 266 + /* tickle device maybe there is some cleanup */ 267 + netpoll_poll(np); 268 + 269 + udelay(USEC_PER_POLL); 270 } 271 local_irq_restore(flags); 272 } ··· 784 if (atomic_dec_and_test(&npinfo->refcnt)) { 785 skb_queue_purge(&npinfo->arp_tx); 786 skb_queue_purge(&npinfo->txq); 787 + cancel_delayed_work(&npinfo->tx_work); 788 flush_scheduled_work(); 789 790 + /* clean after last, unfinished work */ 791 + if (!skb_queue_empty(&npinfo->txq)) { 792 + struct sk_buff *skb; 793 + skb = __skb_dequeue(&npinfo->txq); 794 + kfree_skb(skb); 795 + } 796 kfree(npinfo); 797 } 798 }
+81 -39
net/sctp/socket.c
··· 3375 sctp_assoc_t associd; 3376 int retval = 0; 3377 3378 - if (len != sizeof(status)) { 3379 retval = -EINVAL; 3380 goto out; 3381 } 3382 3383 - if (copy_from_user(&status, optval, sizeof(status))) { 3384 retval = -EFAULT; 3385 goto out; 3386 } ··· 3453 struct sctp_transport *transport; 3454 int retval = 0; 3455 3456 - if (len != sizeof(pinfo)) { 3457 retval = -EINVAL; 3458 goto out; 3459 } 3460 3461 - if (copy_from_user(&pinfo, optval, sizeof(pinfo))) { 3462 retval = -EFAULT; 3463 goto out; 3464 } ··· 3525 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 3526 int __user *optlen) 3527 { 3528 - if (len != sizeof(struct sctp_event_subscribe)) 3529 return -EINVAL; 3530 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) 3531 return -EFAULT; 3532 return 0; ··· 3551 /* Applicable to UDP-style socket only */ 3552 if (sctp_style(sk, TCP)) 3553 return -EOPNOTSUPP; 3554 - if (len != sizeof(int)) 3555 return -EINVAL; 3556 - if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len)) 3557 return -EFAULT; 3558 return 0; 3559 } ··· 3607 int retval = 0; 3608 struct sctp_association *asoc; 3609 3610 - if (len != sizeof(sctp_peeloff_arg_t)) 3611 return -EINVAL; 3612 if (copy_from_user(&peeloff, optval, len)) 3613 return -EFAULT; 3614 ··· 3637 3638 /* Return the fd mapped to the new socket. */ 3639 peeloff.sd = retval; 3640 if (copy_to_user(optval, &peeloff, len)) 3641 retval = -EFAULT; 3642 ··· 3747 struct sctp_association *asoc = NULL; 3748 struct sctp_sock *sp = sctp_sk(sk); 3749 3750 - if (len != sizeof(struct sctp_paddrparams)) 3751 return -EINVAL; 3752 - 3753 if (copy_from_user(&params, optval, len)) 3754 return -EFAULT; 3755 ··· 3848 struct sctp_association *asoc = NULL; 3849 struct sctp_sock *sp = sctp_sk(sk); 3850 3851 - if (len != sizeof(struct sctp_assoc_value)) 3852 return - EINVAL; 3853 3854 if (copy_from_user(&params, optval, len)) 3855 return -EFAULT; ··· 3901 */ 3902 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) 3903 { 3904 - if (len != sizeof(struct sctp_initmsg)) 3905 return -EINVAL; 3906 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) 3907 return -EFAULT; 3908 return 0; ··· 3920 struct list_head *pos; 3921 int cnt = 0; 3922 3923 - if (len != sizeof(sctp_assoc_t)) 3924 return -EINVAL; 3925 3926 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) ··· 3956 struct sctp_sock *sp = sctp_sk(sk); 3957 int addrlen; 3958 3959 - if (len != sizeof(struct sctp_getaddrs_old)) 3960 return -EINVAL; 3961 3962 - if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs_old))) 3963 return -EFAULT; 3964 3965 if (getaddrs.addr_num <= 0) return -EINVAL; ··· 3984 if (cnt >= getaddrs.addr_num) break; 3985 } 3986 getaddrs.addr_num = cnt; 3987 - if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old))) 3988 return -EFAULT; 3989 3990 return 0; ··· 4019 return -EINVAL; 4020 4021 to = optval + offsetof(struct sctp_getaddrs,addrs); 4022 - space_left = len - sizeof(struct sctp_getaddrs) - 4023 - offsetof(struct sctp_getaddrs,addrs); 4024 4025 list_for_each(pos, &asoc->peer.transport_addr_list) { 4026 from = list_entry(pos, struct sctp_transport, transports); ··· 4056 rwlock_t *addr_lock; 4057 int cnt = 0; 4058 4059 - if (len != sizeof(sctp_assoc_t)) 4060 return -EINVAL; 4061 4062 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) ··· 4198 void *buf; 4199 int bytes_copied = 0; 4200 4201 - if (len != sizeof(struct sctp_getaddrs_old)) 4202 return -EINVAL; 4203 4204 - if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs_old))) 4205 return -EFAULT; 4206 4207 if (getaddrs.addr_num <= 0) return -EINVAL; ··· 4274 4275 /* copy the leading structure back to user */ 4276 getaddrs.addr_num = cnt; 4277 - if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old))) 4278 err = -EFAULT; 4279 4280 error: ··· 4302 void *addrs; 4303 void *buf; 4304 4305 - if (len <= sizeof(struct sctp_getaddrs)) 4306 return -EINVAL; 4307 4308 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) ··· 4326 } 4327 4328 to = optval + offsetof(struct sctp_getaddrs,addrs); 4329 - space_left = len - sizeof(struct sctp_getaddrs) - 4330 - offsetof(struct sctp_getaddrs,addrs); 4331 addrs = kmalloc(space_left, GFP_KERNEL); 4332 if (!addrs) 4333 return -ENOMEM; ··· 4399 struct sctp_association *asoc; 4400 struct sctp_sock *sp = sctp_sk(sk); 4401 4402 - if (len != sizeof(struct sctp_prim)) 4403 return -EINVAL; 4404 4405 - if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) 4406 return -EFAULT; 4407 4408 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); ··· 4420 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, 4421 (union sctp_addr *)&prim.ssp_addr); 4422 4423 - if (copy_to_user(optval, &prim, sizeof(struct sctp_prim))) 4424 return -EFAULT; 4425 4426 return 0; ··· 4439 { 4440 struct sctp_setadaptation adaptation; 4441 4442 - if (len != sizeof(struct sctp_setadaptation)) 4443 return -EINVAL; 4444 4445 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; 4446 if (copy_to_user(optval, &adaptation, len)) 4447 return -EFAULT; 4448 ··· 4481 struct sctp_association *asoc; 4482 struct sctp_sock *sp = sctp_sk(sk); 4483 4484 - if (len != sizeof(struct sctp_sndrcvinfo)) 4485 return -EINVAL; 4486 - if (copy_from_user(&info, optval, sizeof(struct sctp_sndrcvinfo))) 4487 return -EFAULT; 4488 4489 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); ··· 4507 info.sinfo_timetolive = sp->default_timetolive; 4508 } 4509 4510 - if (copy_to_user(optval, &info, sizeof(struct sctp_sndrcvinfo))) 4511 return -EFAULT; 4512 4513 return 0; ··· 4560 struct sctp_rtoinfo rtoinfo; 4561 struct sctp_association *asoc; 4562 4563 - if (len != sizeof (struct sctp_rtoinfo)) 4564 return -EINVAL; 4565 4566 - if (copy_from_user(&rtoinfo, optval, sizeof (struct sctp_rtoinfo))) 4567 return -EFAULT; 4568 4569 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); ··· 4617 struct list_head *pos; 4618 int cnt = 0; 4619 4620 - if (len != sizeof (struct sctp_assocparams)) 4621 return -EINVAL; 4622 4623 - if (copy_from_user(&assocparams, optval, 4624 - sizeof (struct sctp_assocparams))) 4625 return -EFAULT; 4626 4627 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); ··· 4708 struct sctp_sock *sp; 4709 struct sctp_association *asoc; 4710 4711 - if (len != sizeof(struct sctp_assoc_value)) 4712 return -EINVAL; 4713 4714 if (copy_from_user(&params, optval, len)) 4715 return -EFAULT; ··· 6123 * queued to the backlog. This prevents a potential race between 6124 * backlog processing on the old socket and new-packet processing 6125 * on the new socket. 6126 */ 6127 - sctp_lock_sock(newsk); 6128 sctp_assoc_migrate(assoc, newsk); 6129 6130 /* If the association on the newsk is already closed before accept()
··· 3375 sctp_assoc_t associd; 3376 int retval = 0; 3377 3378 + if (len < sizeof(status)) { 3379 retval = -EINVAL; 3380 goto out; 3381 } 3382 3383 + len = sizeof(status); 3384 + if (copy_from_user(&status, optval, len)) { 3385 retval = -EFAULT; 3386 goto out; 3387 } ··· 3452 struct sctp_transport *transport; 3453 int retval = 0; 3454 3455 + if (len < sizeof(pinfo)) { 3456 retval = -EINVAL; 3457 goto out; 3458 } 3459 3460 + len = sizeof(pinfo); 3461 + if (copy_from_user(&pinfo, optval, len)) { 3462 retval = -EFAULT; 3463 goto out; 3464 } ··· 3523 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, 3524 int __user *optlen) 3525 { 3526 + if (len < sizeof(struct sctp_event_subscribe)) 3527 return -EINVAL; 3528 + len = sizeof(struct sctp_event_subscribe); 3529 + if (put_user(len, optlen)) 3530 + return -EFAULT; 3531 if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) 3532 return -EFAULT; 3533 return 0; ··· 3546 /* Applicable to UDP-style socket only */ 3547 if (sctp_style(sk, TCP)) 3548 return -EOPNOTSUPP; 3549 + if (len < sizeof(int)) 3550 return -EINVAL; 3551 + len = sizeof(int); 3552 + if (put_user(len, optlen)) 3553 + return -EFAULT; 3554 + if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) 3555 return -EFAULT; 3556 return 0; 3557 } ··· 3599 int retval = 0; 3600 struct sctp_association *asoc; 3601 3602 + if (len < sizeof(sctp_peeloff_arg_t)) 3603 return -EINVAL; 3604 + len = sizeof(sctp_peeloff_arg_t); 3605 if (copy_from_user(&peeloff, optval, len)) 3606 return -EFAULT; 3607 ··· 3628 3629 /* Return the fd mapped to the new socket. */ 3630 peeloff.sd = retval; 3631 + if (put_user(len, optlen)) 3632 + return -EFAULT; 3633 if (copy_to_user(optval, &peeloff, len)) 3634 retval = -EFAULT; 3635 ··· 3736 struct sctp_association *asoc = NULL; 3737 struct sctp_sock *sp = sctp_sk(sk); 3738 3739 + if (len < sizeof(struct sctp_paddrparams)) 3740 return -EINVAL; 3741 + len = sizeof(struct sctp_paddrparams); 3742 if (copy_from_user(&params, optval, len)) 3743 return -EFAULT; 3744 ··· 3837 struct sctp_association *asoc = NULL; 3838 struct sctp_sock *sp = sctp_sk(sk); 3839 3840 + if (len < sizeof(struct sctp_assoc_value)) 3841 return - EINVAL; 3842 + 3843 + len = sizeof(struct sctp_assoc_value); 3844 3845 if (copy_from_user(&params, optval, len)) 3846 return -EFAULT; ··· 3888 */ 3889 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) 3890 { 3891 + if (len < sizeof(struct sctp_initmsg)) 3892 return -EINVAL; 3893 + len = sizeof(struct sctp_initmsg); 3894 + if (put_user(len, optlen)) 3895 + return -EFAULT; 3896 if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) 3897 return -EFAULT; 3898 return 0; ··· 3904 struct list_head *pos; 3905 int cnt = 0; 3906 3907 + if (len < sizeof(sctp_assoc_t)) 3908 return -EINVAL; 3909 3910 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) ··· 3940 struct sctp_sock *sp = sctp_sk(sk); 3941 int addrlen; 3942 3943 + if (len < sizeof(struct sctp_getaddrs_old)) 3944 return -EINVAL; 3945 3946 + len = sizeof(struct sctp_getaddrs_old); 3947 + 3948 + if (copy_from_user(&getaddrs, optval, len)) 3949 return -EFAULT; 3950 3951 if (getaddrs.addr_num <= 0) return -EINVAL; ··· 3966 if (cnt >= getaddrs.addr_num) break; 3967 } 3968 getaddrs.addr_num = cnt; 3969 + if (put_user(len, optlen)) 3970 + return -EFAULT; 3971 + if (copy_to_user(optval, &getaddrs, len)) 3972 return -EFAULT; 3973 3974 return 0; ··· 3999 return -EINVAL; 4000 4001 to = optval + offsetof(struct sctp_getaddrs,addrs); 4002 + space_left = len - offsetof(struct sctp_getaddrs,addrs); 4003 4004 list_for_each(pos, &asoc->peer.transport_addr_list) { 4005 from = list_entry(pos, struct sctp_transport, transports); ··· 4037 rwlock_t *addr_lock; 4038 int cnt = 0; 4039 4040 + if (len < sizeof(sctp_assoc_t)) 4041 return -EINVAL; 4042 4043 if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) ··· 4179 void *buf; 4180 int bytes_copied = 0; 4181 4182 + if (len < sizeof(struct sctp_getaddrs_old)) 4183 return -EINVAL; 4184 4185 + len = sizeof(struct sctp_getaddrs_old); 4186 + if (copy_from_user(&getaddrs, optval, len)) 4187 return -EFAULT; 4188 4189 if (getaddrs.addr_num <= 0) return -EINVAL; ··· 4254 4255 /* copy the leading structure back to user */ 4256 getaddrs.addr_num = cnt; 4257 + if (copy_to_user(optval, &getaddrs, len)) 4258 err = -EFAULT; 4259 4260 error: ··· 4282 void *addrs; 4283 void *buf; 4284 4285 + if (len < sizeof(struct sctp_getaddrs)) 4286 return -EINVAL; 4287 4288 if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) ··· 4306 } 4307 4308 to = optval + offsetof(struct sctp_getaddrs,addrs); 4309 + space_left = len - offsetof(struct sctp_getaddrs,addrs); 4310 + 4311 addrs = kmalloc(space_left, GFP_KERNEL); 4312 if (!addrs) 4313 return -ENOMEM; ··· 4379 struct sctp_association *asoc; 4380 struct sctp_sock *sp = sctp_sk(sk); 4381 4382 + if (len < sizeof(struct sctp_prim)) 4383 return -EINVAL; 4384 4385 + len = sizeof(struct sctp_prim); 4386 + 4387 + if (copy_from_user(&prim, optval, len)) 4388 return -EFAULT; 4389 4390 asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); ··· 4398 sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, 4399 (union sctp_addr *)&prim.ssp_addr); 4400 4401 + if (put_user(len, optlen)) 4402 + return -EFAULT; 4403 + if (copy_to_user(optval, &prim, len)) 4404 return -EFAULT; 4405 4406 return 0; ··· 4415 { 4416 struct sctp_setadaptation adaptation; 4417 4418 + if (len < sizeof(struct sctp_setadaptation)) 4419 return -EINVAL; 4420 4421 + len = sizeof(struct sctp_setadaptation); 4422 + 4423 adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; 4424 + 4425 + if (put_user(len, optlen)) 4426 + return -EFAULT; 4427 if (copy_to_user(optval, &adaptation, len)) 4428 return -EFAULT; 4429 ··· 4452 struct sctp_association *asoc; 4453 struct sctp_sock *sp = sctp_sk(sk); 4454 4455 + if (len < sizeof(struct sctp_sndrcvinfo)) 4456 return -EINVAL; 4457 + 4458 + len = sizeof(struct sctp_sndrcvinfo); 4459 + 4460 + if (copy_from_user(&info, optval, len)) 4461 return -EFAULT; 4462 4463 asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); ··· 4475 info.sinfo_timetolive = sp->default_timetolive; 4476 } 4477 4478 + if (put_user(len, optlen)) 4479 + return -EFAULT; 4480 + if (copy_to_user(optval, &info, len)) 4481 return -EFAULT; 4482 4483 return 0; ··· 4526 struct sctp_rtoinfo rtoinfo; 4527 struct sctp_association *asoc; 4528 4529 + if (len < sizeof (struct sctp_rtoinfo)) 4530 return -EINVAL; 4531 4532 + len = sizeof(struct sctp_rtoinfo); 4533 + 4534 + if (copy_from_user(&rtoinfo, optval, len)) 4535 return -EFAULT; 4536 4537 asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); ··· 4581 struct list_head *pos; 4582 int cnt = 0; 4583 4584 + if (len < sizeof (struct sctp_assocparams)) 4585 return -EINVAL; 4586 4587 + len = sizeof(struct sctp_assocparams); 4588 + 4589 + if (copy_from_user(&assocparams, optval, len)) 4590 return -EFAULT; 4591 4592 asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); ··· 4671 struct sctp_sock *sp; 4672 struct sctp_association *asoc; 4673 4674 + if (len < sizeof(struct sctp_assoc_value)) 4675 return -EINVAL; 4676 + 4677 + len = sizeof(struct sctp_assoc_value); 4678 4679 if (copy_from_user(&params, optval, len)) 4680 return -EFAULT; ··· 6084 * queued to the backlog. This prevents a potential race between 6085 * backlog processing on the old socket and new-packet processing 6086 * on the new socket. 6087 + * 6088 + * The caller has just allocated newsk so we can guarantee that other 6089 + * paths won't try to lock it and then oldsk. 6090 */ 6091 + lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); 6092 sctp_assoc_migrate(assoc, newsk); 6093 6094 /* If the association on the newsk is already closed before accept()