Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (21 commits)
niu: VLAN_ETH_HLEN should be used to make sure that the whole MAC header was copied to the head buffer in the Vlan packets case
KS8851: Fix ks8851_set_rx_mode() for IFF_MULTICAST
KS8851: Fix MAC address write order
KS8851: Add soft reset at probe time
net: fix section mismatch in fec.c
net: Fix struct inet_timewait_sock bitfield annotation
tcp: Try to catch MSG_PEEK bug
net: Fix IP_MULTICAST_IF
bluetooth: static lock key fix
bluetooth: scheduling while atomic bug fix
tcp: fix TCP_DEFER_ACCEPT retrans calculation
tcp: reduce SYN-ACK retrans for TCP_DEFER_ACCEPT
tcp: accept socket after TCP_DEFER_ACCEPT period
Revert "tcp: fix tcp_defer_accept to consider the timeout"
AF_UNIX: Fix deadlock on connecting to shutdown socket
ethoc: clear only pending irqs
ethoc: inline regs access
vmxnet3: use dev_dbg, fix build for CONFIG_BLOCK=n
virtio_net: use dev_kfree_skb_any() in free_old_xmit_skbs()
be2net: fix support for PCI hot plug
...

+212 -86
+22 -11
drivers/net/benet/be_cmds.c
··· 243 243 244 244 int be_cmd_POST(struct be_adapter *adapter) 245 245 { 246 - u16 stage, error; 246 + u16 stage; 247 + int status, timeout = 0; 247 248 248 - error = be_POST_stage_get(adapter, &stage); 249 - if (error || stage != POST_STAGE_ARMFW_RDY) { 250 - dev_err(&adapter->pdev->dev, "POST failed.\n"); 251 - return -1; 252 - } 249 + do { 250 + status = be_POST_stage_get(adapter, &stage); 251 + if (status) { 252 + dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n", 253 + stage); 254 + return -1; 255 + } else if (stage != POST_STAGE_ARMFW_RDY) { 256 + set_current_state(TASK_INTERRUPTIBLE); 257 + schedule_timeout(2 * HZ); 258 + timeout += 2; 259 + } else { 260 + return 0; 261 + } 262 + } while (timeout < 20); 253 263 254 - return 0; 264 + dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage); 265 + return -1; 255 266 } 256 267 257 268 static inline void *embedded_payload(struct be_mcc_wrb *wrb) ··· 740 729 /* Create an rx filtering policy configuration on an i/f 741 730 * Uses mbox 742 731 */ 743 - int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac, 744 - bool pmac_invalid, u32 *if_handle, u32 *pmac_id) 732 + int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, 733 + u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id) 745 734 { 746 735 struct be_mcc_wrb *wrb; 747 736 struct be_cmd_req_if_create *req; ··· 757 746 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 758 747 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req)); 759 748 760 - req->capability_flags = cpu_to_le32(flags); 761 - req->enable_flags = cpu_to_le32(flags); 749 + req->capability_flags = cpu_to_le32(cap_flags); 750 + req->enable_flags = cpu_to_le32(en_flags); 762 751 req->pmac_invalid = pmac_invalid; 763 752 if (!pmac_invalid) 764 753 memcpy(req->mac_addr, mac, ETH_ALEN);
+3 -2
drivers/net/benet/be_cmds.h
··· 720 720 extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, 721 721 u32 if_id, u32 *pmac_id); 722 722 extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id); 723 - extern int be_cmd_if_create(struct be_adapter *adapter, u32 if_flags, u8 *mac, 724 - bool pmac_invalid, u32 *if_handle, u32 *pmac_id); 723 + extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, 724 + u32 en_flags, u8 *mac, bool pmac_invalid, 725 + u32 *if_handle, u32 *pmac_id); 725 726 extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle); 726 727 extern int be_cmd_eq_create(struct be_adapter *adapter, 727 728 struct be_queue_info *eq, int eq_delay);
+15 -12
drivers/net/benet/be_main.c
··· 1620 1620 static int be_setup(struct be_adapter *adapter) 1621 1621 { 1622 1622 struct net_device *netdev = adapter->netdev; 1623 - u32 if_flags; 1623 + u32 cap_flags, en_flags; 1624 1624 int status; 1625 1625 1626 - if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS | 1627 - BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED | 1628 - BE_IF_FLAGS_PASS_L3L4_ERRORS; 1629 - status = be_cmd_if_create(adapter, if_flags, netdev->dev_addr, 1630 - false/* pmac_invalid */, &adapter->if_handle, 1631 - &adapter->pmac_id); 1626 + cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 1627 + BE_IF_FLAGS_MCAST_PROMISCUOUS | 1628 + BE_IF_FLAGS_PROMISCUOUS | 1629 + BE_IF_FLAGS_PASS_L3L4_ERRORS; 1630 + en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 1631 + BE_IF_FLAGS_PASS_L3L4_ERRORS; 1632 + 1633 + status = be_cmd_if_create(adapter, cap_flags, en_flags, 1634 + netdev->dev_addr, false/* pmac_invalid */, 1635 + &adapter->if_handle, &adapter->pmac_id); 1632 1636 if (status != 0) 1633 1637 goto do_none; 1634 - 1635 1638 1636 1639 status = be_tx_queues_create(adapter); 1637 1640 if (status != 0) ··· 2058 2055 if (status) 2059 2056 return status; 2060 2057 2058 + status = be_cmd_reset_function(adapter); 2059 + if (status) 2060 + return status; 2061 + 2061 2062 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver); 2062 2063 if (status) 2063 2064 return status; ··· 2114 2107 status = be_ctrl_init(adapter); 2115 2108 if (status) 2116 2109 goto free_netdev; 2117 - 2118 - status = be_cmd_reset_function(adapter); 2119 - if (status) 2120 - goto ctrl_clean; 2121 2110 2122 2111 status = be_stats_init(adapter); 2123 2112 if (status)
+11 -10
drivers/net/ethoc.c
··· 223 223 u32 addr; 224 224 }; 225 225 226 - static u32 ethoc_read(struct ethoc *dev, loff_t offset) 226 + static inline u32 ethoc_read(struct ethoc *dev, loff_t offset) 227 227 { 228 228 return ioread32(dev->iobase + offset); 229 229 } 230 230 231 - static void ethoc_write(struct ethoc *dev, loff_t offset, u32 data) 231 + static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data) 232 232 { 233 233 iowrite32(data, dev->iobase + offset); 234 234 } 235 235 236 - static void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd) 236 + static inline void ethoc_read_bd(struct ethoc *dev, int index, 237 + struct ethoc_bd *bd) 237 238 { 238 239 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); 239 240 bd->stat = ethoc_read(dev, offset + 0); 240 241 bd->addr = ethoc_read(dev, offset + 4); 241 242 } 242 243 243 - static void ethoc_write_bd(struct ethoc *dev, int index, 244 + static inline void ethoc_write_bd(struct ethoc *dev, int index, 244 245 const struct ethoc_bd *bd) 245 246 { 246 247 loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); ··· 249 248 ethoc_write(dev, offset + 4, bd->addr); 250 249 } 251 250 252 - static void ethoc_enable_irq(struct ethoc *dev, u32 mask) 251 + static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask) 253 252 { 254 253 u32 imask = ethoc_read(dev, INT_MASK); 255 254 imask |= mask; 256 255 ethoc_write(dev, INT_MASK, imask); 257 256 } 258 257 259 - static void ethoc_disable_irq(struct ethoc *dev, u32 mask) 258 + static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask) 260 259 { 261 260 u32 imask = ethoc_read(dev, INT_MASK); 262 261 imask &= ~mask; 263 262 ethoc_write(dev, INT_MASK, imask); 264 263 } 265 264 266 - static void ethoc_ack_irq(struct ethoc *dev, u32 mask) 265 + static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask) 267 266 { 268 267 ethoc_write(dev, INT_SOURCE, mask); 269 268 } 270 269 271 - static void ethoc_enable_rx_and_tx(struct ethoc *dev) 270 + static inline void ethoc_enable_rx_and_tx(struct ethoc *dev) 272 271 { 273 272 u32 mode = ethoc_read(dev, MODER); 274 273 mode |= MODER_RXEN | MODER_TXEN; 275 274 ethoc_write(dev, MODER, mode); 276 275 } 277 276 278 - static void ethoc_disable_rx_and_tx(struct ethoc *dev) 277 + static inline void ethoc_disable_rx_and_tx(struct ethoc *dev) 279 278 { 280 279 u32 mode = ethoc_read(dev, MODER); 281 280 mode &= ~(MODER_RXEN | MODER_TXEN); ··· 509 508 return IRQ_NONE; 510 509 } 511 510 512 - ethoc_ack_irq(priv, INT_MASK_ALL); 511 + ethoc_ack_irq(priv, pending); 513 512 514 513 if (pending & INT_MASK_BUSY) { 515 514 dev_err(&dev->dev, "packet dropped\n");
+1 -1
drivers/net/fec.c
··· 1654 1654 * 1655 1655 * index is only used in legacy code 1656 1656 */ 1657 - int __init fec_enet_init(struct net_device *dev, int index) 1657 + static int fec_enet_init(struct net_device *dev, int index) 1658 1658 { 1659 1659 struct fec_enet_private *fep = netdev_priv(dev); 1660 1660 struct bufdesc *cbd_base;
+37 -5
drivers/net/ks8851.c
··· 171 171 } 172 172 173 173 /** 174 + * ks8851_wrreg8 - write 8bit register value to chip 175 + * @ks: The chip state 176 + * @reg: The register address 177 + * @val: The value to write 178 + * 179 + * Issue a write to put the value @val into the register specified in @reg. 180 + */ 181 + static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val) 182 + { 183 + struct spi_transfer *xfer = &ks->spi_xfer1; 184 + struct spi_message *msg = &ks->spi_msg1; 185 + __le16 txb[2]; 186 + int ret; 187 + int bit; 188 + 189 + bit = 1 << (reg & 3); 190 + 191 + txb[0] = cpu_to_le16(MK_OP(bit, reg) | KS_SPIOP_WR); 192 + txb[1] = val; 193 + 194 + xfer->tx_buf = txb; 195 + xfer->rx_buf = NULL; 196 + xfer->len = 3; 197 + 198 + ret = spi_sync(ks->spidev, msg); 199 + if (ret < 0) 200 + ks_err(ks, "spi_sync() failed\n"); 201 + } 202 + 203 + /** 174 204 * ks8851_rx_1msg - select whether to use one or two messages for spi read 175 205 * @ks: The device structure 176 206 * ··· 352 322 static int ks8851_write_mac_addr(struct net_device *dev) 353 323 { 354 324 struct ks8851_net *ks = netdev_priv(dev); 355 - u16 *mcp = (u16 *)dev->dev_addr; 325 + int i; 356 326 357 327 mutex_lock(&ks->lock); 358 328 359 - ks8851_wrreg16(ks, KS_MARL, mcp[0]); 360 - ks8851_wrreg16(ks, KS_MARM, mcp[1]); 361 - ks8851_wrreg16(ks, KS_MARH, mcp[2]); 329 + for (i = 0; i < ETH_ALEN; i++) 330 + ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]); 362 331 363 332 mutex_unlock(&ks->lock); 364 333 ··· 980 951 mcptr = mcptr->next; 981 952 } 982 953 983 - rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXAE | RXCR1_RXPAFMA; 954 + rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA; 984 955 } else { 985 956 /* just accept broadcast / unicast */ 986 957 rxctrl.rxcr1 = RXCR1_RXPAFMA; ··· 1267 1238 ndev->if_port = IF_PORT_100BASET; 1268 1239 ndev->netdev_ops = &ks8851_netdev_ops; 1269 1240 ndev->irq = spi->irq; 1241 + 1242 + /* issue a global soft reset to reset the device. */ 1243 + ks8851_soft_reset(ks, GRR_GSR); 1270 1244 1271 1245 /* simple check for a valid chip being connected to the bus */ 1272 1246
+1
drivers/net/ks8851.h
··· 16 16 #define CCR_32PIN (1 << 0) 17 17 18 18 /* MAC address registers */ 19 + #define KS_MAR(_m) 0x15 - (_m) 19 20 #define KS_MARL 0x10 20 21 #define KS_MARM 0x12 21 22 #define KS_MARH 0x14
+1 -1
drivers/net/niu.c
··· 3545 3545 rp->rcr_index = index; 3546 3546 3547 3547 skb_reserve(skb, NET_IP_ALIGN); 3548 - __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX)); 3548 + __pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN)); 3549 3549 3550 3550 rp->rx_packets++; 3551 3551 rp->rx_bytes += skb->len;
+1 -1
drivers/net/virtio_net.c
··· 454 454 vi->dev->stats.tx_bytes += skb->len; 455 455 vi->dev->stats.tx_packets++; 456 456 tot_sgs += skb_vnet_hdr(skb)->num_sg; 457 - kfree_skb(skb); 457 + dev_kfree_skb_any(skb); 458 458 } 459 459 return tot_sgs; 460 460 }
+18 -9
drivers/net/vmxnet3/vmxnet3_drv.c
··· 481 481 } 482 482 rq->uncommitted[ring_idx] += num_allocated; 483 483 484 - dprintk(KERN_ERR "alloc_rx_buf: %d allocated, next2fill %u, next2comp " 484 + dev_dbg(&adapter->netdev->dev, 485 + "alloc_rx_buf: %d allocated, next2fill %u, next2comp " 485 486 "%u, uncommited %u\n", num_allocated, ring->next2fill, 486 487 ring->next2comp, rq->uncommitted[ring_idx]); 487 488 ··· 540 539 tbi = tq->buf_info + tq->tx_ring.next2fill; 541 540 tbi->map_type = VMXNET3_MAP_NONE; 542 541 543 - dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n", 542 + dev_dbg(&adapter->netdev->dev, 543 + "txd[%u]: 0x%Lx 0x%x 0x%x\n", 544 544 tq->tx_ring.next2fill, ctx->sop_txd->txd.addr, 545 545 ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); 546 546 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); ··· 574 572 gdesc->dword[2] = dw2 | buf_size; 575 573 gdesc->dword[3] = 0; 576 574 577 - dprintk(KERN_ERR "txd[%u]: 0x%Lx 0x%x 0x%x\n", 575 + dev_dbg(&adapter->netdev->dev, 576 + "txd[%u]: 0x%Lx 0x%x 0x%x\n", 578 577 tq->tx_ring.next2fill, gdesc->txd.addr, 579 578 gdesc->dword[2], gdesc->dword[3]); 580 579 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); ··· 603 600 gdesc->dword[2] = dw2 | frag->size; 604 601 gdesc->dword[3] = 0; 605 602 606 - dprintk(KERN_ERR "txd[%u]: 0x%llu %u %u\n", 603 + dev_dbg(&adapter->netdev->dev, 604 + "txd[%u]: 0x%llu %u %u\n", 607 605 tq->tx_ring.next2fill, gdesc->txd.addr, 608 606 gdesc->dword[2], gdesc->dword[3]); 609 607 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); ··· 701 697 tdd = tq->data_ring.base + tq->tx_ring.next2fill; 702 698 703 699 memcpy(tdd->data, skb->data, ctx->copy_size); 704 - dprintk(KERN_ERR "copy %u bytes to dataRing[%u]\n", 700 + dev_dbg(&adapter->netdev->dev, 701 + "copy %u bytes to dataRing[%u]\n", 705 702 ctx->copy_size, tq->tx_ring.next2fill); 706 703 return 1; 707 704 ··· 813 808 814 809 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { 815 810 tq->stats.tx_ring_full++; 816 - dprintk(KERN_ERR "tx queue stopped on %s, next2comp %u" 811 + dev_dbg(&adapter->netdev->dev, 812 + "tx queue stopped on %s, next2comp %u" 817 813 " next2fill %u\n", adapter->netdev->name, 818 814 tq->tx_ring.next2comp, tq->tx_ring.next2fill); 819 815 ··· 859 853 860 854 /* finally flips the GEN bit of the SOP desc */ 861 855 gdesc->dword[2] ^= VMXNET3_TXD_GEN; 862 - dprintk(KERN_ERR "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", 856 + dev_dbg(&adapter->netdev->dev, 857 + "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", 863 858 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - 864 859 tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2], 865 860 gdesc->dword[3]); ··· 997 990 if (unlikely(rcd->len == 0)) { 998 991 /* Pretend the rx buffer is skipped. */ 999 992 BUG_ON(!(rcd->sop && rcd->eop)); 1000 - dprintk(KERN_ERR "rxRing[%u][%u] 0 length\n", 993 + dev_dbg(&adapter->netdev->dev, 994 + "rxRing[%u][%u] 0 length\n", 1001 995 ring_idx, idx); 1002 996 goto rcd_done; 1003 997 } ··· 1691 1683 int err; 1692 1684 u32 ret; 1693 1685 1694 - dprintk(KERN_ERR "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes" 1686 + dev_dbg(&adapter->netdev->dev, 1687 + "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes" 1695 1688 " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size, 1696 1689 adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size, 1697 1690 adapter->rx_queue.rx_ring[0].size,
+1 -1
drivers/net/vmxnet3/vmxnet3_int.h
··· 30 30 #include <linux/types.h> 31 31 #include <linux/ethtool.h> 32 32 #include <linux/delay.h> 33 + #include <linux/device.h> 33 34 #include <linux/netdevice.h> 34 35 #include <linux/pci.h> 35 36 #include <linux/ethtool.h> ··· 60 59 #include <linux/if_vlan.h> 61 60 #include <linux/if_arp.h> 62 61 #include <linux/inetdevice.h> 63 - #include <linux/dst.h> 64 62 65 63 #include "vmxnet3_defs.h" 66 64
+4 -4
include/net/inet_timewait_sock.h
··· 130 130 __u16 tw_num; 131 131 kmemcheck_bitfield_begin(flags); 132 132 /* And these are ours. */ 133 - __u8 tw_ipv6only:1, 134 - tw_transparent:1; 135 - /* 14 bits hole, try to pack */ 133 + unsigned int tw_ipv6only : 1, 134 + tw_transparent : 1, 135 + tw_pad : 14, /* 14 bits hole */ 136 + tw_ipv6_offset : 16; 136 137 kmemcheck_bitfield_end(flags); 137 - __u16 tw_ipv6_offset; 138 138 unsigned long tw_ttd; 139 139 struct inet_bind_bucket *tw_tb; 140 140 struct hlist_node tw_death_node;
+2 -2
net/bluetooth/hci_sysfs.c
··· 92 92 93 93 dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); 94 94 95 + dev_set_drvdata(&conn->dev, conn); 96 + 95 97 if (device_add(&conn->dev) < 0) { 96 98 BT_ERR("Failed to register connection device"); 97 99 return; ··· 145 143 conn->dev.type = &bt_link; 146 144 conn->dev.class = bt_class; 147 145 conn->dev.parent = &hdev->dev; 148 - 149 - dev_set_drvdata(&conn->dev, conn); 150 146 151 147 device_initialize(&conn->dev); 152 148
+6 -3
net/bluetooth/l2cap.c
··· 555 555 556 556 conn->feat_mask = 0; 557 557 558 - setup_timer(&conn->info_timer, l2cap_info_timeout, 559 - (unsigned long) conn); 560 - 561 558 spin_lock_init(&conn->lock); 562 559 rwlock_init(&conn->chan_list.lock); 560 + 561 + setup_timer(&conn->info_timer, l2cap_info_timeout, 562 + (unsigned long) conn); 563 563 564 564 conn->disc_reason = 0x13; 565 565 ··· 783 783 /* Default config options */ 784 784 pi->conf_len = 0; 785 785 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO; 786 + skb_queue_head_init(TX_QUEUE(sk)); 787 + skb_queue_head_init(SREJ_QUEUE(sk)); 788 + INIT_LIST_HEAD(SREJ_LIST(sk)); 786 789 } 787 790 788 791 static struct proto l2cap_proto = {
+31 -3
net/ipv4/inet_connection_sock.c
··· 446 446 447 447 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add); 448 448 449 + /* Decide when to expire the request and when to resend SYN-ACK */ 450 + static inline void syn_ack_recalc(struct request_sock *req, const int thresh, 451 + const int max_retries, 452 + const u8 rskq_defer_accept, 453 + int *expire, int *resend) 454 + { 455 + if (!rskq_defer_accept) { 456 + *expire = req->retrans >= thresh; 457 + *resend = 1; 458 + return; 459 + } 460 + *expire = req->retrans >= thresh && 461 + (!inet_rsk(req)->acked || req->retrans >= max_retries); 462 + /* 463 + * Do not resend while waiting for data after ACK, 464 + * start to resend on end of deferring period to give 465 + * last chance for data or ACK to create established socket. 466 + */ 467 + *resend = !inet_rsk(req)->acked || 468 + req->retrans >= rskq_defer_accept - 1; 469 + } 470 + 449 471 void inet_csk_reqsk_queue_prune(struct sock *parent, 450 472 const unsigned long interval, 451 473 const unsigned long timeout, ··· 523 501 reqp=&lopt->syn_table[i]; 524 502 while ((req = *reqp) != NULL) { 525 503 if (time_after_eq(now, req->expires)) { 526 - if ((req->retrans < thresh || 527 - (inet_rsk(req)->acked && req->retrans < max_retries)) 528 - && !req->rsk_ops->rtx_syn_ack(parent, req)) { 504 + int expire = 0, resend = 0; 505 + 506 + syn_ack_recalc(req, thresh, max_retries, 507 + queue->rskq_defer_accept, 508 + &expire, &resend); 509 + if (!expire && 510 + (!resend || 511 + !req->rsk_ops->rtx_syn_ack(parent, req) || 512 + inet_rsk(req)->acked)) { 529 513 unsigned long timeo; 530 514 531 515 if (req->retrans++ == 0)
+3 -4
net/ipv4/ip_sockglue.c
··· 634 634 break; 635 635 } 636 636 dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr); 637 - if (dev) { 637 + if (dev) 638 638 mreq.imr_ifindex = dev->ifindex; 639 - dev_put(dev); 640 - } 641 639 } else 642 - dev = __dev_get_by_index(sock_net(sk), mreq.imr_ifindex); 640 + dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex); 643 641 644 642 645 643 err = -EADDRNOTAVAIL; 646 644 if (!dev) 647 645 break; 646 + dev_put(dev); 648 647 649 648 err = -EINVAL; 650 649 if (sk->sk_bound_dev_if &&
+46 -13
net/ipv4/tcp.c
··· 326 326 327 327 EXPORT_SYMBOL(tcp_enter_memory_pressure); 328 328 329 + /* Convert seconds to retransmits based on initial and max timeout */ 330 + static u8 secs_to_retrans(int seconds, int timeout, int rto_max) 331 + { 332 + u8 res = 0; 333 + 334 + if (seconds > 0) { 335 + int period = timeout; 336 + 337 + res = 1; 338 + while (seconds > period && res < 255) { 339 + res++; 340 + timeout <<= 1; 341 + if (timeout > rto_max) 342 + timeout = rto_max; 343 + period += timeout; 344 + } 345 + } 346 + return res; 347 + } 348 + 349 + /* Convert retransmits to seconds based on initial and max timeout */ 350 + static int retrans_to_secs(u8 retrans, int timeout, int rto_max) 351 + { 352 + int period = 0; 353 + 354 + if (retrans > 0) { 355 + period = timeout; 356 + while (--retrans) { 357 + timeout <<= 1; 358 + if (timeout > rto_max) 359 + timeout = rto_max; 360 + period += timeout; 361 + } 362 + } 363 + return period; 364 + } 365 + 329 366 /* 330 367 * Wait for a TCP event. 331 368 * ··· 1442 1405 goto found_ok_skb; 1443 1406 if (tcp_hdr(skb)->fin) 1444 1407 goto found_fin_ok; 1445 - WARN_ON(!(flags & MSG_PEEK)); 1408 + if (WARN_ON(!(flags & MSG_PEEK))) 1409 + printk(KERN_INFO "recvmsg bug 2: copied %X " 1410 + "seq %X\n", *seq, TCP_SKB_CB(skb)->seq); 1446 1411 } 1447 1412 1448 1413 /* Well, if we have backlog, try to process it now yet. */ ··· 2202 2163 break; 2203 2164 2204 2165 case TCP_DEFER_ACCEPT: 2205 - icsk->icsk_accept_queue.rskq_defer_accept = 0; 2206 - if (val > 0) { 2207 - /* Translate value in seconds to number of 2208 - * retransmits */ 2209 - while (icsk->icsk_accept_queue.rskq_defer_accept < 32 && 2210 - val > ((TCP_TIMEOUT_INIT / HZ) << 2211 - icsk->icsk_accept_queue.rskq_defer_accept)) 2212 - icsk->icsk_accept_queue.rskq_defer_accept++; 2213 - icsk->icsk_accept_queue.rskq_defer_accept++; 2214 - } 2166 + /* Translate value in seconds to number of retransmits */ 2167 + icsk->icsk_accept_queue.rskq_defer_accept = 2168 + secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, 2169 + TCP_RTO_MAX / HZ); 2215 2170 break; 2216 2171 2217 2172 case TCP_WINDOW_CLAMP: ··· 2386 2353 val = (val ? : sysctl_tcp_fin_timeout) / HZ; 2387 2354 break; 2388 2355 case TCP_DEFER_ACCEPT: 2389 - val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 : 2390 - ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1)); 2356 + val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, 2357 + TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ); 2391 2358 break; 2392 2359 case TCP_WINDOW_CLAMP: 2393 2360 val = tp->window_clamp;
+2 -3
net/ipv4/tcp_minisocks.c
··· 641 641 if (!(flg & TCP_FLAG_ACK)) 642 642 return NULL; 643 643 644 - /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ 645 - if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && 644 + /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ 645 + if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && 646 646 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { 647 - inet_csk(sk)->icsk_accept_queue.rskq_defer_accept--; 648 647 inet_rsk(req)->acked = 1; 649 648 return NULL; 650 649 }
+5 -1
net/ipv6/ipv6_sockglue.c
··· 496 496 goto e_inval; 497 497 498 498 if (val) { 499 + struct net_device *dev; 500 + 499 501 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val) 500 502 goto e_inval; 501 503 502 - if (__dev_get_by_index(net, val) == NULL) { 504 + dev = dev_get_by_index(net, val); 505 + if (!dev) { 503 506 retv = -ENODEV; 504 507 break; 505 508 } 509 + dev_put(dev); 506 510 } 507 511 np->mcast_oif = val; 508 512 retv = 0;
+2
net/unix/af_unix.c
··· 1074 1074 err = -ECONNREFUSED; 1075 1075 if (other->sk_state != TCP_LISTEN) 1076 1076 goto out_unlock; 1077 + if (other->sk_shutdown & RCV_SHUTDOWN) 1078 + goto out_unlock; 1077 1079 1078 1080 if (unix_recvq_full(other)) { 1079 1081 err = -EAGAIN;