Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (28 commits)
drivers/isdn/hardware/mISDN: Use GFP_ATOMIC when a lock is held
ksz884x: Add missing validate_addr hook
ksz884x: convert to netdev_tx_t
virtio-net: pass gfp to add_buf
be2net: convert hdr.timeout in be_cmd_loopback_test() to le32
can: mpc5xxx_can.c: Fix build failure
net/ipv4/tcp_input.c: fix compilation breakage when FASTRETRANS_DEBUG > 1
net: sock_queue_err_skb() dont mess with sk_forward_alloc
netfilter: xtables: stackptr should be percpu
netfilter: don't xt_jumpstack_alloc twice in xt_register_table
greth: Fix build after OF device conversions.
net: fix sk_forward_alloc corruptions
Phonet: listening socket lock protects the connected socket list
caif: unlock on error path in cfserl_receive()
be2net: remove superfluous externs
be2net: add unlock on error path
net/rds: Add missing mutex_unlock
drivers/isdn/hardware/mISDN: Add missing spin_unlock
fs_enet: Adjust BDs after tx error
skb: make skb_recycle_check() return a bool value
...

+157 -90
+3 -1
drivers/isdn/hardware/mISDN/hfcsusb.c
··· 97 97 hw->name, __func__, reg, val); 98 98 99 99 spin_lock(&hw->ctrl_lock); 100 - if (hw->ctrl_cnt >= HFC_CTRL_BUFSIZE) 100 + if (hw->ctrl_cnt >= HFC_CTRL_BUFSIZE) { 101 + spin_unlock(&hw->ctrl_lock); 101 102 return 1; 103 + } 102 104 buf = &hw->ctrl_buff[hw->ctrl_in_idx]; 103 105 buf->hfcs_reg = reg; 104 106 buf->reg_val = val;
+2 -2
drivers/isdn/hardware/mISDN/netjet.c
··· 320 320 return -ENOMEM; 321 321 } 322 322 for (i = 0; i < 2; i++) { 323 - card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_KERNEL); 323 + card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_ATOMIC); 324 324 if (!card->bc[i].hsbuf) { 325 325 pr_info("%s: no B%d send buffer\n", card->name, i + 1); 326 326 return -ENOMEM; 327 327 } 328 - card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_KERNEL); 328 + card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_ATOMIC); 329 329 if (!card->bc[i].hrbuf) { 330 330 pr_info("%s: no B%d recv buffer\n", card->name, i + 1); 331 331 return -ENOMEM;
+8 -5
drivers/net/benet/be_cmds.c
··· 1429 1429 wrb = wrb_from_mccq(adapter); 1430 1430 if (!wrb) { 1431 1431 status = -EBUSY; 1432 - goto err; 1432 + goto err_unlock; 1433 1433 } 1434 1434 req = cmd->va; 1435 1435 sge = nonembedded_sgl(wrb); ··· 1457 1457 else 1458 1458 status = adapter->flash_status; 1459 1459 1460 - err: 1460 + return status; 1461 + 1462 + err_unlock: 1463 + spin_unlock_bh(&adapter->mcc_lock); 1461 1464 return status; 1462 1465 } 1463 1466 ··· 1500 1497 return status; 1501 1498 } 1502 1499 1503 - extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 1500 + int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 1504 1501 struct be_dma_mem *nonemb_cmd) 1505 1502 { 1506 1503 struct be_mcc_wrb *wrb; ··· 1593 1590 1594 1591 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 1595 1592 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req)); 1596 - req->hdr.timeout = 4; 1593 + req->hdr.timeout = cpu_to_le32(4); 1597 1594 1598 1595 req->pattern = cpu_to_le64(pattern); 1599 1596 req->src_port = cpu_to_le32(port_num); ··· 1665 1662 return status; 1666 1663 } 1667 1664 1668 - extern int be_cmd_get_seeprom_data(struct be_adapter *adapter, 1665 + int be_cmd_get_seeprom_data(struct be_adapter *adapter, 1669 1666 struct be_dma_mem *nonemb_cmd) 1670 1667 { 1671 1668 struct be_mcc_wrb *wrb;
+5 -5
drivers/net/can/mscan/mpc5xxx_can.c
··· 73 73 else 74 74 *mscan_clksrc = MSCAN_CLKSRC_XTAL; 75 75 76 - freq = mpc5xxx_get_bus_frequency(ofdev->node); 76 + freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node); 77 77 if (!freq) 78 78 return 0; 79 79 ··· 152 152 } 153 153 154 154 /* Determine the MSCAN device index from the physical address */ 155 - pval = of_get_property(ofdev->node, "reg", &plen); 155 + pval = of_get_property(ofdev->dev.of_node, "reg", &plen); 156 156 BUG_ON(!pval || plen < sizeof(*pval)); 157 157 clockidx = (*pval & 0x80) ? 1 : 0; 158 158 if (*pval & 0x2000) ··· 168 168 */ 169 169 if (clock_name && !strcmp(clock_name, "ip")) { 170 170 *mscan_clksrc = MSCAN_CLKSRC_IPS; 171 - freq = mpc5xxx_get_bus_frequency(ofdev->node); 171 + freq = mpc5xxx_get_bus_frequency(ofdev->dev.of_node); 172 172 } else { 173 173 *mscan_clksrc = MSCAN_CLKSRC_BUS; 174 174 175 - pval = of_get_property(ofdev->node, 175 + pval = of_get_property(ofdev->dev.of_node, 176 176 "fsl,mscan-clock-divider", &plen); 177 177 if (pval && plen == sizeof(*pval)) 178 178 clockdiv = *pval; ··· 251 251 const struct of_device_id *id) 252 252 { 253 253 struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data; 254 - struct device_node *np = ofdev->node; 254 + struct device_node *np = ofdev->dev.of_node; 255 255 struct net_device *dev; 256 256 struct mscan_priv *priv; 257 257 void __iomem *base;
+43 -6
drivers/net/fs_enet/mac-fcc.c
··· 504 504 } 505 505 506 506 /* Some transmit errors cause the transmitter to shut 507 - * down. We now issue a restart transmit. Since the 508 - * errors close the BD and update the pointers, the restart 509 - * _should_ pick up without having to reset any of our 510 - * pointers either. Also, To workaround 8260 device erratum 511 - * CPM37, we must disable and then re-enable the transmitter 512 - * following a Late Collision, Underrun, or Retry Limit error. 507 + * down. We now issue a restart transmit. 508 + * Also, to workaround 8260 device erratum CPM37, we must 509 + * disable and then re-enable the transmitterfollowing a 510 + * Late Collision, Underrun, or Retry Limit error. 511 + * In addition, tbptr may point beyond BDs beyond still marked 512 + * as ready due to internal pipelining, so we need to look back 513 + * through the BDs and adjust tbptr to point to the last BD 514 + * marked as ready. This may result in some buffers being 515 + * retransmitted. 513 516 */ 514 517 static void tx_restart(struct net_device *dev) 515 518 { 516 519 struct fs_enet_private *fep = netdev_priv(dev); 517 520 fcc_t __iomem *fccp = fep->fcc.fccp; 521 + const struct fs_platform_info *fpi = fep->fpi; 522 + fcc_enet_t __iomem *ep = fep->fcc.ep; 523 + cbd_t __iomem *curr_tbptr; 524 + cbd_t __iomem *recheck_bd; 525 + cbd_t __iomem *prev_bd; 526 + cbd_t __iomem *last_tx_bd; 527 + 528 + last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t)); 529 + 530 + /* get the current bd held in TBPTR and scan back from this point */ 531 + recheck_bd = curr_tbptr = (cbd_t __iomem *) 532 + ((R32(ep, fen_genfcc.fcc_tbptr) - fep->ring_mem_addr) + 533 + fep->ring_base); 534 + 535 + prev_bd = (recheck_bd == fep->tx_bd_base) ? last_tx_bd : recheck_bd - 1; 536 + 537 + /* Move through the bds in reverse, look for the earliest buffer 538 + * that is not ready. Adjust TBPTR to the following buffer */ 539 + while ((CBDR_SC(prev_bd) & BD_ENET_TX_READY) != 0) { 540 + /* Go back one buffer */ 541 + recheck_bd = prev_bd; 542 + 543 + /* update the previous buffer */ 544 + prev_bd = (prev_bd == fep->tx_bd_base) ? last_tx_bd : prev_bd - 1; 545 + 546 + /* We should never see all bds marked as ready, check anyway */ 547 + if (recheck_bd == curr_tbptr) 548 + break; 549 + } 550 + /* Now update the TBPTR and dirty flag to the current buffer */ 551 + W32(ep, fen_genfcc.fcc_tbptr, 552 + (uint) (((void *)recheck_bd - fep->ring_base) + 553 + fep->ring_mem_addr)); 554 + fep->dirty_tx = recheck_bd; 518 555 519 556 C32(fccp, fcc_gfmr, FCC_GFMR_ENT); 520 557 udelay(10);
+5 -6
drivers/net/greth.c
··· 1607 1607 MODULE_DEVICE_TABLE(of, greth_of_match); 1608 1608 1609 1609 static struct of_platform_driver greth_of_driver = { 1610 - .name = "grlib-greth", 1611 - .match_table = greth_of_match, 1610 + .driver = { 1611 + .name = "grlib-greth", 1612 + .owner = THIS_MODULE, 1613 + .of_match_table = greth_of_match, 1614 + }, 1612 1615 .probe = greth_of_probe, 1613 1616 .remove = __devexit_p(greth_of_remove), 1614 - .driver = { 1615 - .owner = THIS_MODULE, 1616 - .name = "grlib-greth", 1617 - }, 1618 1617 }; 1619 1618 1620 1619 static int __init greth_init(void)
+2 -1
drivers/net/ksz884x.c
··· 4854 4854 * 4855 4855 * Return 0 if successful; otherwise an error code indicating failure. 4856 4856 */ 4857 - static int netdev_tx(struct sk_buff *skb, struct net_device *dev) 4857 + static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev) 4858 4858 { 4859 4859 struct dev_priv *priv = netdev_priv(dev); 4860 4860 struct dev_info *hw_priv = priv->adapter; ··· 6863 6863 .ndo_tx_timeout = netdev_tx_timeout, 6864 6864 .ndo_change_mtu = netdev_change_mtu, 6865 6865 .ndo_set_mac_address = netdev_set_mac_address, 6866 + .ndo_validate_addr = eth_validate_addr, 6866 6867 .ndo_do_ioctl = netdev_ioctl, 6867 6868 .ndo_set_rx_mode = netdev_set_rx_mode, 6868 6869 #ifdef CONFIG_NET_POLL_CONTROLLER
+4 -4
drivers/net/virtio_net.c
··· 340 340 341 341 skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len); 342 342 343 - err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb); 343 + err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 2, skb, gfp); 344 344 if (err < 0) 345 345 dev_kfree_skb(skb); 346 346 ··· 385 385 386 386 /* chain first in list head */ 387 387 first->private = (unsigned long)list; 388 - err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2, 389 - first); 388 + err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2, 389 + first, gfp); 390 390 if (err < 0) 391 391 give_pages(vi, first); 392 392 ··· 404 404 405 405 sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE); 406 406 407 - err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page); 407 + err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 1, page, gfp); 408 408 if (err < 0) 409 409 give_pages(vi, page); 410 410
+12 -2
drivers/net/wireless/ath/ar9170/usb.c
··· 739 739 static void ar9170_usb_firmware_failed(struct ar9170_usb *aru) 740 740 { 741 741 struct device *parent = aru->udev->dev.parent; 742 + struct usb_device *udev; 743 + 744 + /* 745 + * Store a copy of the usb_device pointer locally. 746 + * This is because device_release_driver initiates 747 + * ar9170_usb_disconnect, which in turn frees our 748 + * driver context (aru). 749 + */ 750 + udev = aru->udev; 742 751 743 752 complete(&aru->firmware_loading_complete); 744 753 745 754 /* unbind anything failed */ 746 755 if (parent) 747 756 device_lock(parent); 748 - device_release_driver(&aru->udev->dev); 757 + 758 + device_release_driver(&udev->dev); 749 759 if (parent) 750 760 device_unlock(parent); 751 761 752 - usb_put_dev(aru->udev); 762 + usb_put_dev(udev); 753 763 } 754 764 755 765 static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context)
+4 -2
drivers/net/wireless/ath/ath9k/xmit.c
··· 1198 1198 int r; 1199 1199 1200 1200 ath_print(common, ATH_DBG_FATAL, 1201 - "Unable to stop TxDMA. Reset HAL!\n"); 1201 + "Failed to stop TX DMA. Resetting hardware!\n"); 1202 1202 1203 1203 spin_lock_bh(&sc->sc_resetlock); 1204 1204 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false); ··· 1728 1728 } else 1729 1729 bf->bf_isnullfunc = false; 1730 1730 1731 + bf->bf_tx_aborted = false; 1732 + 1731 1733 return 0; 1732 1734 } 1733 1735 ··· 1991 1989 int nbad = 0; 1992 1990 int isaggr = 0; 1993 1991 1994 - if (bf->bf_tx_aborted) 1992 + if (bf->bf_lastbf->bf_tx_aborted) 1995 1993 return 0; 1996 1994 1997 1995 isaggr = bf_isaggr(bf);
+2 -3
drivers/net/wireless/libertas/rx.c
··· 329 329 /* create the exported radio header */ 330 330 331 331 /* radiotap header */ 332 - radiotap_hdr.hdr.it_version = 0; 333 - /* XXX must check this value for pad */ 334 - radiotap_hdr.hdr.it_pad = 0; 332 + memset(&radiotap_hdr, 0, sizeof(radiotap_hdr)); 333 + /* XXX must check radiotap_hdr.hdr.it_pad for pad */ 335 334 radiotap_hdr.hdr.it_len = cpu_to_le16 (sizeof(struct rx_radiotap_hdr)); 336 335 radiotap_hdr.hdr.it_present = cpu_to_le32 (RX_RADIOTAP_PRESENT); 337 336 radiotap_hdr.rate = convert_mv_rate_to_radiotap(prxpd->rx_rate);
+1 -1
drivers/net/wireless/rt2x00/rt2800usb.c
··· 413 413 */ 414 414 rt2x00_desc_read(txi, 0, &word); 415 415 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN, 416 - skb->len - TXINFO_DESC_SIZE); 416 + skb->len + TXWI_DESC_SIZE); 417 417 rt2x00_set_field32(&word, TXINFO_W0_WIV, 418 418 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); 419 419 rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
+6 -3
drivers/ssb/pci.c
··· 625 625 ssb_printk(KERN_ERR PFX "No SPROM available!\n"); 626 626 return -ENODEV; 627 627 } 628 - 629 - bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ? 630 - SSB_SPROM_BASE1 : SSB_SPROM_BASE31; 628 + if (bus->chipco.dev) { /* can be unavailible! */ 629 + bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ? 630 + SSB_SPROM_BASE1 : SSB_SPROM_BASE31; 631 + } else { 632 + bus->sprom_offset = SSB_SPROM_BASE1; 633 + } 631 634 632 635 buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL); 633 636 if (!buf)
+1
drivers/ssb/sprom.c
··· 185 185 /* this routine differs from specs as we do not access SPROM directly 186 186 on PCMCIA */ 187 187 if (bus->bustype == SSB_BUSTYPE_PCI && 188 + bus->chipco.dev && /* can be unavailible! */ 188 189 bus->chipco.dev->id.revision >= 31) 189 190 return bus->chipco.capabilities & SSB_CHIPCO_CAP_SPROM; 190 191
+1 -1
include/linux/netfilter/x_tables.h
··· 397 397 * @stacksize jumps (number of user chains) can possibly be made. 398 398 */ 399 399 unsigned int stacksize; 400 - unsigned int *stackptr; 400 + unsigned int __percpu *stackptr; 401 401 void ***jumpstack; 402 402 /* ipt_entry tables: one per CPU */ 403 403 /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */
+1 -1
include/linux/skbuff.h
··· 501 501 return __alloc_skb(size, priority, 1, -1); 502 502 } 503 503 504 - extern int skb_recycle_check(struct sk_buff *skb, int skb_size); 504 + extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); 505 505 506 506 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 507 507 extern struct sk_buff *skb_clone(struct sk_buff *skb,
+1 -14
include/net/sock.h
··· 1524 1524 1525 1525 extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); 1526 1526 1527 - static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 1528 - { 1529 - /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces 1530 - number of warnings when compiling with -W --ANK 1531 - */ 1532 - if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 1533 - (unsigned)sk->sk_rcvbuf) 1534 - return -ENOMEM; 1535 - skb_set_owner_r(skb, sk); 1536 - skb_queue_tail(&sk->sk_error_queue, skb); 1537 - if (!sock_flag(sk, SOCK_DEAD)) 1538 - sk->sk_data_ready(sk, skb->len); 1539 - return 0; 1540 - } 1527 + extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); 1541 1528 1542 1529 /* 1543 1530 * Recover an error report and clear atomically
+4 -2
net/caif/cfserl.c
··· 59 59 u8 stx = CFSERL_STX; 60 60 int ret; 61 61 u16 expectlen = 0; 62 + 62 63 caif_assert(newpkt != NULL); 63 64 spin_lock(&layr->sync); 64 65 65 66 if (layr->incomplete_frm != NULL) { 66 - 67 67 layr->incomplete_frm = 68 68 cfpkt_append(layr->incomplete_frm, newpkt, expectlen); 69 69 pkt = layr->incomplete_frm; 70 - if (pkt == NULL) 70 + if (pkt == NULL) { 71 + spin_unlock(&layr->sync); 71 72 return -ENOMEM; 73 + } 72 74 } else { 73 75 pkt = newpkt; 74 76 }
+36 -6
net/core/skbuff.c
··· 482 482 * reference count dropping and cleans up the skbuff as if it 483 483 * just came from __alloc_skb(). 484 484 */ 485 - int skb_recycle_check(struct sk_buff *skb, int skb_size) 485 + bool skb_recycle_check(struct sk_buff *skb, int skb_size) 486 486 { 487 487 struct skb_shared_info *shinfo; 488 488 489 489 if (irqs_disabled()) 490 - return 0; 490 + return false; 491 491 492 492 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 493 - return 0; 493 + return false; 494 494 495 495 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 496 496 if (skb_end_pointer(skb) - skb->head < skb_size) 497 - return 0; 497 + return false; 498 498 499 499 if (skb_shared(skb) || skb_cloned(skb)) 500 - return 0; 500 + return false; 501 501 502 502 skb_release_head_state(skb); 503 503 ··· 509 509 skb->data = skb->head + NET_SKB_PAD; 510 510 skb_reset_tail_pointer(skb); 511 511 512 - return 1; 512 + return true; 513 513 } 514 514 EXPORT_SYMBOL(skb_recycle_check); 515 515 ··· 2965 2965 } 2966 2966 EXPORT_SYMBOL_GPL(skb_cow_data); 2967 2967 2968 + static void sock_rmem_free(struct sk_buff *skb) 2969 + { 2970 + struct sock *sk = skb->sk; 2971 + 2972 + atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 2973 + } 2974 + 2975 + /* 2976 + * Note: We dont mem charge error packets (no sk_forward_alloc changes) 2977 + */ 2978 + int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) 2979 + { 2980 + if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 2981 + (unsigned)sk->sk_rcvbuf) 2982 + return -ENOMEM; 2983 + 2984 + skb_orphan(skb); 2985 + skb->sk = sk; 2986 + skb->destructor = sock_rmem_free; 2987 + atomic_add(skb->truesize, &sk->sk_rmem_alloc); 2988 + 2989 + skb_queue_tail(&sk->sk_error_queue, skb); 2990 + if (!sock_flag(sk, SOCK_DEAD)) 2991 + sk->sk_data_ready(sk, skb->len); 2992 + return 0; 2993 + } 2994 + EXPORT_SYMBOL(sock_queue_err_skb); 2995 + 2968 2996 void skb_tstamp_tx(struct sk_buff *orig_skb, 2969 2997 struct skb_shared_hwtstamps *hwtstamps) 2970 2998 { ··· 3024 2996 memset(serr, 0, sizeof(*serr)); 3025 2997 serr->ee.ee_errno = ENOMSG; 3026 2998 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 2999 + 3027 3000 err = sock_queue_err_skb(sk, skb); 3001 + 3028 3002 if (err) 3029 3003 kfree_skb(skb); 3030 3004 }
+1 -1
net/ipv4/netfilter/ip_tables.c
··· 336 336 cpu = smp_processor_id(); 337 337 table_base = private->entries[cpu]; 338 338 jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; 339 - stackptr = &private->stackptr[cpu]; 339 + stackptr = per_cpu_ptr(private->stackptr, cpu); 340 340 origptr = *stackptr; 341 341 342 342 e = get_entry(table_base, private->hook_entry[hook]);
+2 -2
net/ipv4/tcp_input.c
··· 2639 2639 if (sk->sk_family == AF_INET) { 2640 2640 printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", 2641 2641 msg, 2642 - &inet->daddr, ntohs(inet->dport), 2642 + &inet->inet_daddr, ntohs(inet->inet_dport), 2643 2643 tp->snd_cwnd, tcp_left_out(tp), 2644 2644 tp->snd_ssthresh, tp->prior_ssthresh, 2645 2645 tp->packets_out); ··· 2649 2649 struct ipv6_pinfo *np = inet6_sk(sk); 2650 2650 printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", 2651 2651 msg, 2652 - &np->daddr, ntohs(inet->dport), 2652 + &np->daddr, ntohs(inet->inet_dport), 2653 2653 tp->snd_cwnd, tcp_left_out(tp), 2654 2654 tp->snd_ssthresh, tp->prior_ssthresh, 2655 2655 tp->packets_out);
+2 -2
net/ipv4/udp.c
··· 633 633 if (!inet->recverr) { 634 634 if (!harderr || sk->sk_state != TCP_ESTABLISHED) 635 635 goto out; 636 - } else { 636 + } else 637 637 ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); 638 - } 638 + 639 639 sk->sk_err = err; 640 640 sk->sk_error_report(sk); 641 641 out:
+1 -1
net/ipv6/netfilter/ip6_tables.c
··· 363 363 cpu = smp_processor_id(); 364 364 table_base = private->entries[cpu]; 365 365 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; 366 - stackptr = &private->stackptr[cpu]; 366 + stackptr = per_cpu_ptr(private->stackptr, cpu); 367 367 origptr = *stackptr; 368 368 369 369 e = get_entry(table_base, private->hook_entry[hook]);
+1 -1
net/ipv6/route.c
··· 814 814 { 815 815 int flags = 0; 816 816 817 - if (fl->oif || rt6_need_strict(&fl->fl6_dst)) 817 + if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl->fl6_dst)) 818 818 flags |= RT6_LOOKUP_F_IFACE; 819 819 820 820 if (!ipv6_addr_any(&fl->fl6_src))
+1 -1
net/mac80211/chan.c
··· 5 5 #include <linux/nl80211.h> 6 6 #include "ieee80211_i.h" 7 7 8 - enum ieee80211_chan_mode 8 + static enum ieee80211_chan_mode 9 9 __ieee80211_get_channel_mode(struct ieee80211_local *local, 10 10 struct ieee80211_sub_if_data *ignore) 11 11 {
+3 -14
net/netfilter/x_tables.c
··· 699 699 vfree(info->jumpstack); 700 700 else 701 701 kfree(info->jumpstack); 702 - if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE) 703 - vfree(info->stackptr); 704 - else 705 - kfree(info->stackptr); 702 + 703 + free_percpu(info->stackptr); 706 704 707 705 kfree(info); 708 706 } ··· 751 753 unsigned int size; 752 754 int cpu; 753 755 754 - size = sizeof(unsigned int) * nr_cpu_ids; 755 - if (size > PAGE_SIZE) 756 - i->stackptr = vmalloc(size); 757 - else 758 - i->stackptr = kmalloc(size, GFP_KERNEL); 756 + i->stackptr = alloc_percpu(unsigned int); 759 757 if (i->stackptr == NULL) 760 758 return -ENOMEM; 761 - memset(i->stackptr, 0, size); 762 759 763 760 size = sizeof(void **) * nr_cpu_ids; 764 761 if (size > PAGE_SIZE) ··· 836 843 int ret; 837 844 struct xt_table_info *private; 838 845 struct xt_table *t, *table; 839 - 840 - ret = xt_jumpstack_alloc(newinfo); 841 - if (ret < 0) 842 - return ERR_PTR(ret); 843 846 844 847 /* Don't add one object to multiple lists. */ 845 848 table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
+3 -3
net/phonet/pep.c
··· 1045 1045 lock_sock(sk); 1046 1046 if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) { 1047 1047 skparent = pn->listener; 1048 - sk_del_node_init(sk); 1049 1048 release_sock(sk); 1050 1049 1051 - sk = skparent; 1052 1050 pn = pep_sk(skparent); 1053 - lock_sock(sk); 1051 + lock_sock(skparent); 1052 + sk_del_node_init(sk); 1053 + sk = skparent; 1054 1054 } 1055 1055 /* Unhash a listening sock only when it is closed 1056 1056 * and all of its active connected pipes are closed. */
+1
net/rds/ib_cm.c
··· 475 475 err = rds_ib_setup_qp(conn); 476 476 if (err) { 477 477 rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); 478 + mutex_unlock(&conn->c_cm_lock); 478 479 goto out; 479 480 } 480 481
+1
net/rds/iw_cm.c
··· 452 452 err = rds_iw_setup_qp(conn); 453 453 if (err) { 454 454 rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err); 455 + mutex_unlock(&conn->c_cm_lock); 455 456 goto out; 456 457 } 457 458