Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:

drivers/net/wireless/iwlwifi/iwl4965-base.c

+362 -216
+3 -3
Documentation/networking/s2io.txt
··· 83 83 Default: 30 84 84 85 85 e. intr_type 86 - Specifies interrupt type. Possible values 1(INTA), 2(MSI), 3(MSI-X) 87 - Valid range: 1-3 88 - Default: 1 86 + Specifies interrupt type. Possible values 0(INTA), 2(MSI-X) 87 + Valid values: 0, 2 88 + Default: 2 89 89 90 90 5. Performance suggestions 91 91 General:
+40
drivers/connector/connector.c
··· 27 27 #include <linux/moduleparam.h> 28 28 #include <linux/connector.h> 29 29 #include <linux/mutex.h> 30 + #include <linux/proc_fs.h> 31 + #include <linux/spinlock.h> 30 32 31 33 #include <net/sock.h> 32 34 ··· 405 403 mutex_unlock(&notify_lock); 406 404 } 407 405 406 + static int cn_proc_show(struct seq_file *m, void *v) 407 + { 408 + struct cn_queue_dev *dev = cdev.cbdev; 409 + struct cn_callback_entry *cbq; 410 + 411 + seq_printf(m, "Name ID\n"); 412 + 413 + spin_lock_bh(&dev->queue_lock); 414 + 415 + list_for_each_entry(cbq, &dev->queue_list, callback_entry) { 416 + seq_printf(m, "%-15s %u:%u\n", 417 + cbq->id.name, 418 + cbq->id.id.idx, 419 + cbq->id.id.val); 420 + } 421 + 422 + spin_unlock_bh(&dev->queue_lock); 423 + 424 + return 0; 425 + } 426 + 427 + static int cn_proc_open(struct inode *inode, struct file *file) 428 + { 429 + return single_open(file, cn_proc_show, NULL); 430 + } 431 + 432 + static const struct file_operations cn_file_ops = { 433 + .owner = THIS_MODULE, 434 + .open = cn_proc_open, 435 + .read = seq_read, 436 + .llseek = seq_lseek, 437 + .release = single_release 438 + }; 439 + 408 440 static int __devinit cn_init(void) 409 441 { 410 442 struct cn_dev *dev = &cdev; ··· 470 434 return -EINVAL; 471 435 } 472 436 437 + proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops); 438 + 473 439 return 0; 474 440 } 475 441 ··· 480 442 struct cn_dev *dev = &cdev; 481 443 482 444 cn_already_initialized = 0; 445 + 446 + proc_net_remove(&init_net, "connector"); 483 447 484 448 cn_del_callback(&dev->id); 485 449 cn_queue_free_dev(dev->cbdev);
+3 -2
drivers/net/3c59x.c
··· 1768 1768 case XCVR_MII: case XCVR_NWAY: 1769 1769 { 1770 1770 ok = 1; 1771 - spin_lock_bh(&vp->lock); 1771 + /* Interrupts are already disabled */ 1772 + spin_lock(&vp->lock); 1772 1773 vortex_check_media(dev, 0); 1773 - spin_unlock_bh(&vp->lock); 1774 + spin_unlock(&vp->lock); 1774 1775 } 1775 1776 break; 1776 1777 default: /* Other media types handled by Tx timeouts. */
+2
drivers/net/e100.c
··· 1803 1803 if (rx->prev->skb) { 1804 1804 struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; 1805 1805 put_unaligned_le32(rx->dma_addr, &prev_rfd->link); 1806 + pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr, 1807 + sizeof(struct rfd), PCI_DMA_TODEVICE); 1806 1808 } 1807 1809 1808 1810 return 0;
+1 -1
drivers/net/e1000/e1000_ethtool.c
··· 347 347 else 348 348 netdev->features &= ~NETIF_F_TSO; 349 349 350 - if (data) 350 + if (data && (adapter->hw.mac_type > e1000_82547_rev_2)) 351 351 netdev->features |= NETIF_F_TSO6; 352 352 else 353 353 netdev->features &= ~NETIF_F_TSO6;
+2 -1
drivers/net/e1000e/netdev.c
··· 2535 2535 adapter->link_speed = 0; 2536 2536 adapter->link_duplex = 0; 2537 2537 2538 - e1000e_reset(adapter); 2538 + if (!pci_channel_offline(adapter->pdev)) 2539 + e1000e_reset(adapter); 2539 2540 e1000_clean_tx_ring(adapter); 2540 2541 e1000_clean_rx_ring(adapter); 2541 2542
-2
drivers/net/hamradio/dmascc.c
··· 1077 1077 1078 1078 static void start_timer(struct scc_priv *priv, int t, int r15) 1079 1079 { 1080 - unsigned long flags; 1081 - 1082 1080 outb(priv->tmr_mode, priv->tmr_ctrl); 1083 1081 if (t == 0) { 1084 1082 tm_isr(priv);
+2 -1
drivers/net/igb/igb_main.c
··· 718 718 adapter->link_speed = 0; 719 719 adapter->link_duplex = 0; 720 720 721 - igb_reset(adapter); 721 + if (!pci_channel_offline(adapter->pdev)) 722 + igb_reset(adapter); 722 723 igb_clean_all_tx_rings(adapter); 723 724 igb_clean_all_rx_rings(adapter); 724 725 }
+8 -8
drivers/net/ipg.c
··· 1271 1271 1272 1272 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; 1273 1273 1274 - endframeLen = framelen - jumbo->current_size; 1274 + endframelen = framelen - jumbo->current_size; 1275 1275 /* 1276 1276 if (framelen > IPG_RXFRAG_SIZE) 1277 1277 framelen=IPG_RXFRAG_SIZE; ··· 1279 1279 if (framelen > IPG_RXSUPPORT_SIZE) 1280 1280 dev_kfree_skb_irq(jumbo->skb); 1281 1281 else { 1282 - memcpy(skb_put(jumbo->skb, endframeLen), 1283 - skb->data, endframeLen); 1282 + memcpy(skb_put(jumbo->skb, endframelen), 1283 + skb->data, endframelen); 1284 1284 1285 1285 jumbo->skb->protocol = 1286 1286 eth_type_trans(jumbo->skb, dev); ··· 1352 1352 1353 1353 switch (ipg_nic_rx_check_frame_type(dev)) { 1354 1354 case FRAME_WITH_START_WITH_END: 1355 - ipg_nic_rx_with_start_and_end(dev, tp, rxfd, entry); 1355 + ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry); 1356 1356 break; 1357 1357 case FRAME_WITH_START: 1358 - ipg_nic_rx_with_start(dev, tp, rxfd, entry); 1358 + ipg_nic_rx_with_start(dev, sp, rxfd, entry); 1359 1359 break; 1360 1360 case FRAME_WITH_END: 1361 - ipg_nic_rx_with_end(dev, tp, rxfd, entry); 1361 + ipg_nic_rx_with_end(dev, sp, rxfd, entry); 1362 1362 break; 1363 1363 case FRAME_NO_START_NO_END: 1364 - ipg_nic_rx_no_start_no_end(dev, tp, rxfd, entry); 1364 + ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry); 1365 1365 break; 1366 1366 } 1367 1367 } ··· 1808 1808 /* initialize JUMBO Frame control variable */ 1809 1809 sp->jumbo.found_start = 0; 1810 1810 sp->jumbo.current_size = 0; 1811 - sp->jumbo.skb = 0; 1811 + sp->jumbo.skb = NULL; 1812 1812 dev->mtu = IPG_TXFRAG_SIZE; 1813 1813 #endif 1814 1814
+2 -1
drivers/net/ixgbe/ixgbe_main.c
··· 1969 1969 netif_carrier_off(netdev); 1970 1970 netif_stop_queue(netdev); 1971 1971 1972 - ixgbe_reset(adapter); 1972 + if (!pci_channel_offline(adapter->pdev)) 1973 + ixgbe_reset(adapter); 1973 1974 ixgbe_clean_all_tx_rings(adapter); 1974 1975 ixgbe_clean_all_rx_rings(adapter); 1975 1976
+11 -7
drivers/net/netxen/netxen_nic_main.c
··· 71 71 static irqreturn_t netxen_msi_intr(int irq, void *data); 72 72 73 73 /* PCI Device ID Table */ 74 + #define ENTRY(device) \ 75 + {PCI_DEVICE(0x4040, (device)), \ 76 + .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} 77 + 74 78 static struct pci_device_id netxen_pci_tbl[] __devinitdata = { 75 - {PCI_DEVICE(0x4040, 0x0001), PCI_DEVICE_CLASS(0x020000, ~0)}, 76 - {PCI_DEVICE(0x4040, 0x0002), PCI_DEVICE_CLASS(0x020000, ~0)}, 77 - {PCI_DEVICE(0x4040, 0x0003), PCI_DEVICE_CLASS(0x020000, ~0)}, 78 - {PCI_DEVICE(0x4040, 0x0004), PCI_DEVICE_CLASS(0x020000, ~0)}, 79 - {PCI_DEVICE(0x4040, 0x0005), PCI_DEVICE_CLASS(0x020000, ~0)}, 80 - {PCI_DEVICE(0x4040, 0x0024), PCI_DEVICE_CLASS(0x020000, ~0)}, 81 - {PCI_DEVICE(0x4040, 0x0025), PCI_DEVICE_CLASS(0x020000, ~0)}, 79 + ENTRY(0x0001), 80 + ENTRY(0x0002), 81 + ENTRY(0x0003), 82 + ENTRY(0x0004), 83 + ENTRY(0x0005), 84 + ENTRY(0x0024), 85 + ENTRY(0x0025), 82 86 {0,} 83 87 }; 84 88
+2
drivers/net/pcmcia/axnet_cs.c
··· 525 525 int ret; 526 526 axnet_dev_t *info = PRIV(dev); 527 527 struct pcmcia_device *link = info->p_dev; 528 + unsigned int nic_base = dev->base_addr; 528 529 529 530 DEBUG(2, "axnet_open('%s')\n", dev->name); 530 531 531 532 if (!pcmcia_dev_present(link)) 532 533 return -ENODEV; 533 534 535 + outb_p(0xFF, nic_base + EN0_ISR); /* Clear bogus intr. */ 534 536 ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, "axnet_cs", dev); 535 537 if (ret) 536 538 return ret;
+3
drivers/net/pcmcia/pcnet_cs.c
··· 969 969 int ret; 970 970 pcnet_dev_t *info = PRIV(dev); 971 971 struct pcmcia_device *link = info->p_dev; 972 + unsigned int nic_base = dev->base_addr; 972 973 973 974 DEBUG(2, "pcnet_open('%s')\n", dev->name); 974 975 ··· 977 976 return -ENODEV; 978 977 979 978 set_misc_reg(dev); 979 + 980 + outb_p(0xFF, nic_base + EN0_ISR); /* Clear bogus intr. */ 980 981 ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, dev_info, dev); 981 982 if (ret) 982 983 return ret;
+1 -1
drivers/net/pppoe.c
··· 942 942 m->msg_namelen = 0; 943 943 944 944 if (skb) { 945 - total_len = min(total_len, skb->len); 945 + total_len = min_t(size_t, total_len, skb->len); 946 946 error = skb_copy_datagram_iovec(skb, 0, m->msg_iov, total_len); 947 947 if (error == 0) 948 948 error = total_len;
+2
drivers/net/qla3xxx.c
··· 3701 3701 printk(KERN_ERR PFX 3702 3702 "%s: Driver up/down cycle failed, " 3703 3703 "closing device\n",qdev->ndev->name); 3704 + rtnl_lock(); 3704 3705 dev_close(qdev->ndev); 3706 + rtnl_unlock(); 3705 3707 return -1; 3706 3708 } 3707 3709 return 0;
+1 -1
drivers/net/r6040.c
··· 273 273 dma_addr_t mapping = desc_dma; 274 274 275 275 while (size-- > 0) { 276 - mapping += sizeof(sizeof(*desc)); 276 + mapping += sizeof(*desc); 277 277 desc->ndesc = cpu_to_le32(mapping); 278 278 desc->vndescp = desc + 1; 279 279 desc++;
+12 -23
drivers/net/s2io.c
··· 2625 2625 rxdp1->Buffer0_ptr = pci_map_single 2626 2626 (ring->pdev, skb->data, size - NET_IP_ALIGN, 2627 2627 PCI_DMA_FROMDEVICE); 2628 - if( (rxdp1->Buffer0_ptr == 0) || 2629 - (rxdp1->Buffer0_ptr == 2630 - DMA_ERROR_CODE)) 2628 + if(pci_dma_mapping_error(rxdp1->Buffer0_ptr)) 2631 2629 goto pci_map_failed; 2632 2630 2633 2631 rxdp->Control_2 = ··· 2655 2657 skb->data = (void *) (unsigned long)tmp; 2656 2658 skb_reset_tail_pointer(skb); 2657 2659 2660 + /* AK: check is wrong. 0 can be valid dma address */ 2658 2661 if (!(rxdp3->Buffer0_ptr)) 2659 2662 rxdp3->Buffer0_ptr = 2660 2663 pci_map_single(ring->pdev, ba->ba_0, ··· 2664 2665 pci_dma_sync_single_for_device(ring->pdev, 2665 2666 (dma_addr_t) rxdp3->Buffer0_ptr, 2666 2667 BUF0_LEN, PCI_DMA_FROMDEVICE); 2667 - if( (rxdp3->Buffer0_ptr == 0) || 2668 - (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) 2668 + if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) 2669 2669 goto pci_map_failed; 2670 2670 2671 2671 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); ··· 2679 2681 (ring->pdev, skb->data, ring->mtu + 4, 2680 2682 PCI_DMA_FROMDEVICE); 2681 2683 2682 - if( (rxdp3->Buffer2_ptr == 0) || 2683 - (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) 2684 + if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) 2684 2685 goto pci_map_failed; 2685 2686 2687 + /* AK: check is wrong */ 2686 2688 if (!rxdp3->Buffer1_ptr) 2687 2689 rxdp3->Buffer1_ptr = 2688 2690 pci_map_single(ring->pdev, 2689 2691 ba->ba_1, BUF1_LEN, 2690 2692 PCI_DMA_FROMDEVICE); 2691 2693 2692 - if( (rxdp3->Buffer1_ptr == 0) || 2693 - (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) { 2694 + if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) { 2694 2695 pci_unmap_single 2695 2696 (ring->pdev, 2696 2697 (dma_addr_t)(unsigned long) ··· 4261 4264 txdp->Buffer_Pointer = pci_map_single(sp->pdev, 4262 4265 fifo->ufo_in_band_v, 4263 4266 sizeof(u64), PCI_DMA_TODEVICE); 4264 - if((txdp->Buffer_Pointer == 0) || 4265 - (txdp->Buffer_Pointer == DMA_ERROR_CODE)) 4267 + if (pci_dma_mapping_error(txdp->Buffer_Pointer)) 4266 4268 goto pci_map_failed; 4267 4269 txdp++; 4268 4270 } 4269 4271 4270 4272 txdp->Buffer_Pointer = pci_map_single 4271 4273 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); 4272 - if((txdp->Buffer_Pointer == 0) || 4273 - (txdp->Buffer_Pointer == DMA_ERROR_CODE)) 4274 + if (pci_dma_mapping_error(txdp->Buffer_Pointer)) 4274 4275 goto pci_map_failed; 4275 4276 4276 4277 txdp->Host_Control = (unsigned long) skb; ··· 6879 6884 pci_map_single( sp->pdev, (*skb)->data, 6880 6885 size - NET_IP_ALIGN, 6881 6886 PCI_DMA_FROMDEVICE); 6882 - if( (rxdp1->Buffer0_ptr == 0) || 6883 - (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) { 6887 + if (pci_dma_mapping_error(rxdp1->Buffer0_ptr)) 6884 6888 goto memalloc_failed; 6885 - } 6886 6889 rxdp->Host_Control = (unsigned long) (*skb); 6887 6890 } 6888 6891 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) { ··· 6906 6913 pci_map_single(sp->pdev, (*skb)->data, 6907 6914 dev->mtu + 4, 6908 6915 PCI_DMA_FROMDEVICE); 6909 - if( (rxdp3->Buffer2_ptr == 0) || 6910 - (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) { 6916 + if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) 6911 6917 goto memalloc_failed; 6912 - } 6913 6918 rxdp3->Buffer0_ptr = *temp0 = 6914 6919 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, 6915 6920 PCI_DMA_FROMDEVICE); 6916 - if( (rxdp3->Buffer0_ptr == 0) || 6917 - (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) { 6921 + if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) { 6918 6922 pci_unmap_single (sp->pdev, 6919 6923 (dma_addr_t)rxdp3->Buffer2_ptr, 6920 6924 dev->mtu + 4, PCI_DMA_FROMDEVICE); ··· 6923 6933 rxdp3->Buffer1_ptr = *temp1 = 6924 6934 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, 6925 6935 PCI_DMA_FROMDEVICE); 6926 - if( (rxdp3->Buffer1_ptr == 0) || 6927 - (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) { 6936 + if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) { 6928 6937 pci_unmap_single (sp->pdev, 6929 6938 (dma_addr_t)rxdp3->Buffer0_ptr, 6930 6939 BUF0_LEN, PCI_DMA_FROMDEVICE);
-4
drivers/net/s2io.h
··· 75 75 /* DEBUG message print. */ 76 76 #define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args) 77 77 78 - #ifndef DMA_ERROR_CODE 79 - #define DMA_ERROR_CODE (~(dma_addr_t)0x0) 80 - #endif 81 - 82 78 /* Protocol assist features of the NIC */ 83 79 #define L3_CKSUM_OK 0xFFFF 84 80 #define L4_CKSUM_OK 0xFFFF
+3 -1
drivers/net/tc35815.c
··· 1394 1394 tc35815_chip_init(dev); 1395 1395 spin_unlock_irq(&lp->lock); 1396 1396 1397 + netif_carrier_off(dev); 1397 1398 /* schedule a link state check */ 1398 1399 phy_start(lp->phy_dev); 1399 1400 ··· 1736 1735 skb = lp->rx_skbs[cur_bd].skb; 1737 1736 prefetch(skb->data); 1738 1737 lp->rx_skbs[cur_bd].skb = NULL; 1739 - lp->fbl_count--; 1740 1738 pci_unmap_single(lp->pci_dev, 1741 1739 lp->rx_skbs[cur_bd].skb_dma, 1742 1740 RX_BUF_SIZE, PCI_DMA_FROMDEVICE); ··· 1791 1791 #ifdef TC35815_USE_PACKEDBUFFER 1792 1792 while (lp->fbl_curid != id) 1793 1793 #else 1794 + lp->fbl_count--; 1794 1795 while (lp->fbl_count < RX_BUF_NUM) 1795 1796 #endif 1796 1797 { ··· 2454 2453 return 0; 2455 2454 pci_set_power_state(pdev, PCI_D0); 2456 2455 tc35815_restart(dev); 2456 + netif_carrier_off(dev); 2457 2457 if (lp->phy_dev) 2458 2458 phy_start(lp->phy_dev); 2459 2459 netif_device_attach(dev);
+3
drivers/net/wan/x25_asy.c
··· 32 32 #include <linux/x25.h> 33 33 #include <linux/lapb.h> 34 34 #include <linux/init.h> 35 + #include <linux/rtnetlink.h> 35 36 #include "x25_asy.h" 36 37 37 38 #include <net/x25device.h> ··· 602 601 if (!sl || sl->magic != X25_ASY_MAGIC) 603 602 return; 604 603 604 + rtnl_lock(); 605 605 if (sl->dev->flags & IFF_UP) 606 606 dev_close(sl->dev); 607 + rtnl_unlock(); 607 608 608 609 tty->disc_data = NULL; 609 610 sl->tty = NULL;
+3
drivers/net/wireless/b43/leds.c
··· 72 72 struct b43_wldev *dev = led->dev; 73 73 bool radio_enabled; 74 74 75 + if (unlikely(b43_status(dev) < B43_STAT_INITIALIZED)) 76 + return; 77 + 75 78 /* Checking the radio-enabled status here is slightly racy, 76 79 * but we want to avoid the locking overhead and we don't care 77 80 * whether the LED has the wrong state for a second. */
+8 -4
drivers/net/wireless/b43/main.c
··· 2976 2976 2977 2977 if (unlikely(skb->len < 2 + 2 + 6)) { 2978 2978 /* Too short, this can't be a valid frame. */ 2979 - dev_kfree_skb_any(skb); 2980 - return NETDEV_TX_OK; 2979 + goto drop_packet; 2981 2980 } 2982 2981 B43_WARN_ON(skb_shinfo(skb)->nr_frags); 2983 2982 if (unlikely(!dev)) 2984 - return NETDEV_TX_BUSY; 2983 + goto drop_packet; 2985 2984 2986 2985 /* Transmissions on seperate queues can run concurrently. */ 2987 2986 read_lock_irqsave(&wl->tx_lock, flags); ··· 2996 2997 read_unlock_irqrestore(&wl->tx_lock, flags); 2997 2998 2998 2999 if (unlikely(err)) 2999 - return NETDEV_TX_BUSY; 3000 + goto drop_packet; 3001 + return NETDEV_TX_OK; 3002 + 3003 + drop_packet: 3004 + /* We can not transmit this packet. Drop it. */ 3005 + dev_kfree_skb_any(skb); 3000 3006 return NETDEV_TX_OK; 3001 3007 } 3002 3008
+1 -1
drivers/net/wireless/b43legacy/dma.c
··· 876 876 if (!ring) 877 877 goto out; 878 878 ring->type = type; 879 + ring->dev = dev; 879 880 880 881 nr_slots = B43legacy_RXRING_SLOTS; 881 882 if (for_tx) ··· 923 922 DMA_TO_DEVICE); 924 923 } 925 924 926 - ring->dev = dev; 927 925 ring->nr_slots = nr_slots; 928 926 ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index); 929 927 ring->index = controller_index;
+4 -2
drivers/net/wireless/b43legacy/main.c
··· 2377 2377 } else 2378 2378 err = b43legacy_dma_tx(dev, skb); 2379 2379 out: 2380 - if (unlikely(err)) 2381 - return NETDEV_TX_BUSY; 2380 + if (unlikely(err)) { 2381 + /* Drop the packet. */ 2382 + dev_kfree_skb_any(skb); 2383 + } 2382 2384 return NETDEV_TX_OK; 2383 2385 } 2384 2386
+18 -15
drivers/net/wireless/iwlwifi/iwl-scan.c
··· 276 276 cancel_delayed_work(&priv->scan_check); 277 277 278 278 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n", 279 - (priv->scan_bands == 2) ? "2.4" : "5.2", 279 + (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ? 280 + "2.4" : "5.2", 280 281 jiffies_to_msecs(elapsed_jiffies 281 282 (priv->scan_pass_start, jiffies))); 282 283 283 - /* Remove this scanned band from the list 284 - * of pending bands to scan */ 285 - priv->scan_bands--; 284 + /* Remove this scanned band from the list of pending 285 + * bands to scan, band G precedes A in order of scanning 286 + * as seen in iwl_bg_request_scan */ 287 + if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) 288 + priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ); 289 + else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) 290 + priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ); 286 291 287 292 /* If a request to abort was given, or the scan did not succeed 288 293 * then we reset the scan state machine and terminate, ··· 297 292 clear_bit(STATUS_SCAN_ABORTING, &priv->status); 298 293 } else { 299 294 /* If there are more bands on this scan pass reschedule */ 300 - if (priv->scan_bands > 0) 295 + if (priv->scan_bands) 301 296 goto reschedule; 302 297 } 303 298 ··· 394 389 395 390 ch_info = iwl_get_channel_info(priv, band, scan_ch->channel); 396 391 if (!is_channel_valid(ch_info)) { 397 - IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n", 392 + IWL_DEBUG_SCAN("Channel %d is INVALID for this band.\n", 398 393 scan_ch->channel); 399 394 continue; 400 395 } ··· 470 465 } 471 466 472 467 IWL_DEBUG_INFO("Starting scan...\n"); 473 - priv->scan_bands = 2; 468 + if (priv->cfg->sku & IWL_SKU_G) 469 + priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ); 470 + if (priv->cfg->sku & IWL_SKU_A) 471 + priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ); 474 472 set_bit(STATUS_SCANNING, &priv->status); 475 473 priv->scan_start = jiffies; 476 474 priv->scan_pass_start = priv->scan_start; ··· 811 803 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 812 804 813 805 814 - switch (priv->scan_bands) { 815 - case 2: 806 + if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) { 816 807 band = IEEE80211_BAND_2GHZ; 817 808 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 818 809 tx_ant = iwl_scan_tx_ant(priv, band); ··· 825 818 tx_ant | 826 819 RATE_MCS_CCK_MSK); 827 820 scan->good_CRC_th = 0; 828 - break; 829 - 830 - case 1: 821 + } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) { 831 822 band = IEEE80211_BAND_5GHZ; 832 823 tx_ant = iwl_scan_tx_ant(priv, band); 833 824 scan->tx_cmd.rate_n_flags = ··· 838 833 * MIMO is not used here, but value is required */ 839 834 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) 840 835 rx_chain = 0x6; 841 - 842 - break; 843 - default: 836 + } else { 844 837 IWL_WARNING("Invalid scan band count\n"); 845 838 goto done; 846 839 }
+18 -15
drivers/net/wireless/iwlwifi/iwl3945-base.c
··· 2217 2217 } 2218 2218 2219 2219 IWL_DEBUG_INFO("Starting scan...\n"); 2220 - priv->scan_bands = 2; 2220 + if (priv->cfg->sku & IWL_SKU_G) 2221 + priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ); 2222 + if (priv->cfg->sku & IWL_SKU_A) 2223 + priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ); 2221 2224 set_bit(STATUS_SCANNING, &priv->status); 2222 2225 priv->scan_start = jiffies; 2223 2226 priv->scan_pass_start = priv->scan_start; ··· 3345 3342 cancel_delayed_work(&priv->scan_check); 3346 3343 3347 3344 IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n", 3348 - (priv->scan_bands == 2) ? "2.4" : "5.2", 3345 + (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ? 3346 + "2.4" : "5.2", 3349 3347 jiffies_to_msecs(elapsed_jiffies 3350 3348 (priv->scan_pass_start, jiffies))); 3351 3349 3352 - /* Remove this scanned band from the list 3353 - * of pending bands to scan */ 3354 - priv->scan_bands--; 3350 + /* Remove this scanned band from the list of pending 3351 + * bands to scan, band G precedes A in order of scanning 3352 + * as seen in iwl3945_bg_request_scan */ 3353 + if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) 3354 + priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ); 3355 + else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) 3356 + priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ); 3355 3357 3356 3358 /* If a request to abort was given, or the scan did not succeed 3357 3359 * then we reset the scan state machine and terminate, ··· 4969 4961 4970 4962 ch_info = iwl3945_get_channel_info(priv, band, scan_ch->channel); 4971 4963 if (!is_channel_valid(ch_info)) { 4972 - IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n", 4964 + IWL_DEBUG_SCAN("Channel %d is INVALID for this band.\n", 4973 4965 scan_ch->channel); 4974 4966 continue; 4975 4967 } ··· 6324 6316 6325 6317 /* flags + rate selection */ 6326 6318 6327 - switch (priv->scan_bands) { 6328 - case 2: 6319 + if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) { 6329 6320 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 6330 6321 scan->tx_cmd.rate = IWL_RATE_1M_PLCP; 6331 6322 scan->good_CRC_th = 0; 6332 6323 band = IEEE80211_BAND_2GHZ; 6333 - break; 6334 - 6335 - case 1: 6324 + } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) { 6336 6325 scan->tx_cmd.rate = IWL_RATE_6M_PLCP; 6337 6326 scan->good_CRC_th = IWL_GOOD_CRC_TH; 6338 6327 band = IEEE80211_BAND_5GHZ; 6339 - break; 6340 - 6341 - default: 6328 + } else { 6342 6329 IWL_WARNING("Invalid scan band count\n"); 6343 6330 goto done; 6344 6331 } ··· 6773 6770 ch_info = iwl3945_get_channel_info(priv, conf->channel->band, 6774 6771 conf->channel->hw_value); 6775 6772 if (!is_channel_valid(ch_info)) { 6776 - IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n", 6773 + IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this band.\n", 6777 6774 conf->channel->hw_value, conf->channel->band); 6778 6775 IWL_DEBUG_MAC80211("leave - invalid channel\n"); 6779 6776 spin_unlock_irqrestore(&priv->lock, flags);
+1 -1
drivers/net/wireless/prism54/islpci_eth.c
··· 290 290 291 291 avs->version = cpu_to_be32(P80211CAPTURE_VERSION); 292 292 avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header)); 293 - avs->mactime = cpu_to_be64(le64_to_cpu(clock)); 293 + avs->mactime = cpu_to_be64(clock); 294 294 avs->hosttime = cpu_to_be64(jiffies); 295 295 avs->phytype = cpu_to_be32(6); /*OFDM: 6 for (g), 8 for (a) */ 296 296 avs->channel = cpu_to_be32(channel_of_freq(freq));
+21 -15
drivers/net/wireless/rt2x00/rt2500usb.c
··· 138 138 * Wait until the BBP becomes ready. 139 139 */ 140 140 reg = rt2500usb_bbp_check(rt2x00dev); 141 - if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) { 142 - ERROR(rt2x00dev, "PHY_CSR8 register busy. Write failed.\n"); 143 - mutex_unlock(&rt2x00dev->usb_cache_mutex); 144 - return; 145 - } 141 + if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) 142 + goto exit_fail; 146 143 147 144 /* 148 145 * Write the data into the BBP. ··· 152 155 rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg); 153 156 154 157 mutex_unlock(&rt2x00dev->usb_cache_mutex); 158 + 159 + return; 160 + 161 + exit_fail: 162 + mutex_unlock(&rt2x00dev->usb_cache_mutex); 163 + 164 + ERROR(rt2x00dev, "PHY_CSR8 register busy. Write failed.\n"); 155 165 } 156 166 157 167 static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev, ··· 172 168 * Wait until the BBP becomes ready. 173 169 */ 174 170 reg = rt2500usb_bbp_check(rt2x00dev); 175 - if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) { 176 - ERROR(rt2x00dev, "PHY_CSR8 register busy. Read failed.\n"); 177 - return; 178 - } 171 + if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) 172 + goto exit_fail; 179 173 180 174 /* 181 175 * Write the request into the BBP. ··· 188 186 * Wait until the BBP becomes ready. 189 187 */ 190 188 reg = rt2500usb_bbp_check(rt2x00dev); 191 - if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) { 192 - ERROR(rt2x00dev, "PHY_CSR8 register busy. Read failed.\n"); 193 - *value = 0xff; 194 - mutex_unlock(&rt2x00dev->usb_cache_mutex); 195 - return; 196 - } 189 + if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) 190 + goto exit_fail; 197 191 198 192 rt2500usb_register_read_lock(rt2x00dev, PHY_CSR7, &reg); 199 193 *value = rt2x00_get_field16(reg, PHY_CSR7_DATA); 200 194 201 195 mutex_unlock(&rt2x00dev->usb_cache_mutex); 196 + 197 + return; 198 + 199 + exit_fail: 200 + mutex_unlock(&rt2x00dev->usb_cache_mutex); 201 + 202 + ERROR(rt2x00dev, "PHY_CSR8 register busy. Read failed.\n"); 203 + *value = 0xff; 202 204 } 203 205 204 206 static void rt2500usb_rf_write(struct rt2x00_dev *rt2x00dev,
+1
drivers/net/wireless/rt2x00/rt2x00.h
··· 818 818 /* 819 819 * Scheduled work. 820 820 */ 821 + struct workqueue_struct *workqueue; 821 822 struct work_struct intf_work; 822 823 struct work_struct filter_work; 823 824
+25 -13
drivers/net/wireless/rt2x00/rt2x00dev.c
··· 74 74 75 75 rt2x00lib_reset_link_tuner(rt2x00dev); 76 76 77 - queue_delayed_work(rt2x00dev->hw->workqueue, 77 + queue_delayed_work(rt2x00dev->workqueue, 78 78 &rt2x00dev->link.work, LINK_TUNE_INTERVAL); 79 79 } 80 80 ··· 136 136 { 137 137 if (!__test_and_clear_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 138 138 return; 139 - 140 - /* 141 - * Stop all scheduled work. 142 - */ 143 - if (work_pending(&rt2x00dev->intf_work)) 144 - cancel_work_sync(&rt2x00dev->intf_work); 145 - if (work_pending(&rt2x00dev->filter_work)) 146 - cancel_work_sync(&rt2x00dev->filter_work); 147 139 148 140 /* 149 141 * Stop the TX queues. ··· 392 400 * Increase tuner counter, and reschedule the next link tuner run. 393 401 */ 394 402 rt2x00dev->link.count++; 395 - queue_delayed_work(rt2x00dev->hw->workqueue, &rt2x00dev->link.work, 396 - LINK_TUNE_INTERVAL); 403 + queue_delayed_work(rt2x00dev->workqueue, 404 + &rt2x00dev->link.work, LINK_TUNE_INTERVAL); 397 405 } 398 406 399 407 static void rt2x00lib_packetfilter_scheduled(struct work_struct *work) ··· 426 434 427 435 spin_unlock(&intf->lock); 428 436 437 + /* 438 + * It is possible the radio was disabled while the work had been 439 + * scheduled. If that happens we should return here immediately, 440 + * note that in the spinlock protected area above the delayed_flags 441 + * have been cleared correctly. 442 + */ 443 + if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 444 + return; 445 + 429 446 if (delayed_flags & DELAYED_UPDATE_BEACON) { 430 447 skb = ieee80211_beacon_get(rt2x00dev->hw, vif); 431 448 if (skb && ··· 443 442 } 444 443 445 444 if (delayed_flags & DELAYED_CONFIG_ERP) 446 - rt2x00lib_config_erp(rt2x00dev, intf, &intf->conf); 445 + rt2x00lib_config_erp(rt2x00dev, intf, &conf); 447 446 448 447 if (delayed_flags & DELAYED_LED_ASSOC) 449 448 rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated); ··· 489 488 rt2x00lib_beacondone_iter, 490 489 rt2x00dev); 491 490 492 - queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->intf_work); 491 + queue_work(rt2x00dev->workqueue, &rt2x00dev->intf_work); 493 492 } 494 493 EXPORT_SYMBOL_GPL(rt2x00lib_beacondone); 495 494 ··· 1004 1003 /* 1005 1004 * Initialize configuration work. 1006 1005 */ 1006 + rt2x00dev->workqueue = create_singlethread_workqueue("rt2x00lib"); 1007 + if (!rt2x00dev->workqueue) 1008 + goto exit; 1009 + 1007 1010 INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled); 1008 1011 INIT_WORK(&rt2x00dev->filter_work, rt2x00lib_packetfilter_scheduled); 1009 1012 INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00lib_link_tuner); ··· 1066 1061 rt2x00debug_deregister(rt2x00dev); 1067 1062 rt2x00rfkill_free(rt2x00dev); 1068 1063 rt2x00leds_unregister(rt2x00dev); 1064 + 1065 + /* 1066 + * Stop all queued work. Note that most tasks will already be halted 1067 + * during rt2x00lib_disable_radio() and rt2x00lib_uninitialize(). 1068 + */ 1069 + flush_workqueue(rt2x00dev->workqueue); 1070 + destroy_workqueue(rt2x00dev->workqueue); 1069 1071 1070 1072 /* 1071 1073 * Free ieee80211_hw memory.
+2 -2
drivers/net/wireless/rt2x00/rt2x00mac.c
··· 431 431 if (!test_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags)) 432 432 rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags); 433 433 else 434 - queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->filter_work); 434 + queue_work(rt2x00dev->workqueue, &rt2x00dev->filter_work); 435 435 } 436 436 EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter); 437 437 ··· 512 512 memcpy(&intf->conf, bss_conf, sizeof(*bss_conf)); 513 513 if (delayed) { 514 514 intf->delayed_flags |= delayed; 515 - queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->intf_work); 515 + queue_work(rt2x00dev->workqueue, &rt2x00dev->intf_work); 516 516 } 517 517 spin_unlock(&intf->lock); 518 518 }
+21 -15
drivers/net/wireless/rt2x00/rt73usb.c
··· 134 134 * Wait until the BBP becomes ready. 135 135 */ 136 136 reg = rt73usb_bbp_check(rt2x00dev); 137 - if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) { 138 - ERROR(rt2x00dev, "PHY_CSR3 register busy. Write failed.\n"); 139 - mutex_unlock(&rt2x00dev->usb_cache_mutex); 140 - return; 141 - } 137 + if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) 138 + goto exit_fail; 142 139 143 140 /* 144 141 * Write the data into the BBP. ··· 148 151 149 152 rt73usb_register_write_lock(rt2x00dev, PHY_CSR3, reg); 150 153 mutex_unlock(&rt2x00dev->usb_cache_mutex); 154 + 155 + return; 156 + 157 + exit_fail: 158 + mutex_unlock(&rt2x00dev->usb_cache_mutex); 159 + 160 + ERROR(rt2x00dev, "PHY_CSR3 register busy. Write failed.\n"); 151 161 } 152 162 153 163 static void rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev, ··· 168 164 * Wait until the BBP becomes ready. 169 165 */ 170 166 reg = rt73usb_bbp_check(rt2x00dev); 171 - if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) { 172 - ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n"); 173 - mutex_unlock(&rt2x00dev->usb_cache_mutex); 174 - return; 175 - } 167 + if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) 168 + goto exit_fail; 176 169 177 170 /* 178 171 * Write the request into the BBP. ··· 185 184 * Wait until the BBP becomes ready. 186 185 */ 187 186 reg = rt73usb_bbp_check(rt2x00dev); 188 - if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) { 189 - ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n"); 190 - *value = 0xff; 191 - return; 192 - } 187 + if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) 188 + goto exit_fail; 193 189 194 190 *value = rt2x00_get_field32(reg, PHY_CSR3_VALUE); 195 191 mutex_unlock(&rt2x00dev->usb_cache_mutex); 192 + 193 + return; 194 + 195 + exit_fail: 196 + mutex_unlock(&rt2x00dev->usb_cache_mutex); 197 + 198 + ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n"); 199 + *value = 0xff; 196 200 } 197 201 198 202 static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev,
+5 -1
include/linux/inet_lro.h
··· 84 84 from received packets and eth protocol 85 85 is still ETH_P_8021Q */ 86 86 87 - u32 ip_summed; /* Set in non generated SKBs in page mode */ 87 + /* 88 + * Set for generated SKBs that are not added to 89 + * the frag list in fragmented mode 90 + */ 91 + u32 ip_summed; 88 92 u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY 89 93 * or CHECKSUM_NONE */ 90 94
+4
include/linux/netdevice.h
··· 88 88 #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/ 89 89 #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */ 90 90 91 + #ifdef __KERNEL__ 92 + 91 93 /* 92 94 * Compute the worst case header length according to the protocols 93 95 * used. ··· 115 113 #else 116 114 #define MAX_HEADER (LL_MAX_HEADER + 48) 117 115 #endif 116 + 117 + #endif /* __KERNEL__ */ 118 118 119 119 struct net_device_subqueue 120 120 {
+6
include/net/ipv6.h
··· 365 365 a->s6_addr32[2] | a->s6_addr32[3] ) == 0); 366 366 } 367 367 368 + static inline int ipv6_addr_loopback(const struct in6_addr *a) 369 + { 370 + return ((a->s6_addr32[0] | a->s6_addr32[1] | 371 + a->s6_addr32[2] | (a->s6_addr32[3] ^ htonl(1))) == 0); 372 + } 373 + 368 374 static inline int ipv6_addr_v4mapped(const struct in6_addr *a) 369 375 { 370 376 return ((a->s6_addr32[0] | a->s6_addr32[1] |
+11
include/net/net_namespace.h
··· 95 95 #ifdef CONFIG_NET_NS 96 96 extern void __put_net(struct net *net); 97 97 98 + static inline int net_alive(struct net *net) 99 + { 100 + return net && atomic_read(&net->count); 101 + } 102 + 98 103 static inline struct net *get_net(struct net *net) 99 104 { 100 105 atomic_inc(&net->count); ··· 130 125 return net1 == net2; 131 126 } 132 127 #else 128 + 129 + static inline int net_alive(struct net *net) 130 + { 131 + return 1; 132 + } 133 + 133 134 static inline struct net *get_net(struct net *net) 134 135 { 135 136 return net;
+5 -1
net/core/dev.c
··· 2107 2107 2108 2108 rcu_read_lock(); 2109 2109 2110 + /* Don't receive packets in an exiting network namespace */ 2111 + if (!net_alive(dev_net(skb->dev))) 2112 + goto out; 2113 + 2110 2114 #ifdef CONFIG_NET_CLS_ACT 2111 2115 if (skb->tc_verd & TC_NCLS) { 2112 2116 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); ··· 3038 3034 /** 3039 3035 * dev_unicast_add - add a secondary unicast address 3040 3036 * @dev: device 3041 - * @addr: address to delete 3037 + * @addr: address to add 3042 3038 * @alen: length of @addr 3043 3039 * 3044 3040 * Add a secondary unicast address to the device or increase
+3
net/core/net_namespace.c
··· 140 140 struct pernet_operations *ops; 141 141 struct net *net; 142 142 143 + /* Be very certain incoming network packets will not find us */ 144 + rcu_barrier(); 145 + 143 146 net = container_of(work, struct net, work); 144 147 145 148 mutex_lock(&net_mutex);
+12 -5
net/core/skbuff.c
··· 1290 1290 { 1291 1291 unsigned int nr_pages = spd->nr_pages; 1292 1292 unsigned int poff, plen, len, toff, tlen; 1293 - int headlen, seg; 1293 + int headlen, seg, error = 0; 1294 1294 1295 1295 toff = *offset; 1296 1296 tlen = *total_len; 1297 - if (!tlen) 1297 + if (!tlen) { 1298 + error = 1; 1298 1299 goto err; 1300 + } 1299 1301 1300 1302 /* 1301 1303 * if the offset is greater than the linear part, go directly to ··· 1339 1337 * just jump directly to update and return, no point 1340 1338 * in going over fragments when the output is full. 1341 1339 */ 1342 - if (spd_fill_page(spd, virt_to_page(p), plen, poff, skb)) 1340 + error = spd_fill_page(spd, virt_to_page(p), plen, poff, skb); 1341 + if (error) 1343 1342 goto done; 1344 1343 1345 1344 tlen -= plen; ··· 1370 1367 if (!plen) 1371 1368 break; 1372 1369 1373 - if (spd_fill_page(spd, f->page, plen, poff, skb)) 1370 + error = spd_fill_page(spd, f->page, plen, poff, skb); 1371 + if (error) 1374 1372 break; 1375 1373 1376 1374 tlen -= plen; ··· 1384 1380 return 0; 1385 1381 } 1386 1382 err: 1387 - return 1; 1383 + /* update the offset to reflect the linear part skip, if any */ 1384 + if (!error) 1385 + *offset = toff; 1386 + return error; 1388 1387 } 1389 1388 1390 1389 /*
+11 -5
net/ipv4/inet_fragment.c
··· 192 192 193 193 static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, 194 194 struct inet_frag_queue *qp_in, struct inet_frags *f, 195 - unsigned int hash, void *arg) 195 + void *arg) 196 196 { 197 197 struct inet_frag_queue *qp; 198 198 #ifdef CONFIG_SMP 199 199 struct hlist_node *n; 200 200 #endif 201 + unsigned int hash; 201 202 202 203 write_lock(&f->lock); 204 + /* 205 + * While we stayed w/o the lock other CPU could update 206 + * the rnd seed, so we need to re-calculate the hash 207 + * chain. Fortunatelly the qp_in can be used to get one. 208 + */ 209 + hash = f->hashfn(qp_in); 203 210 #ifdef CONFIG_SMP 204 211 /* With SMP race we have to recheck hash table, because 205 212 * such entry could be created on other cpu, while we ··· 254 247 } 255 248 256 249 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf, 257 - struct inet_frags *f, void *arg, unsigned int hash) 250 + struct inet_frags *f, void *arg) 258 251 { 259 252 struct inet_frag_queue *q; 260 253 ··· 262 255 if (q == NULL) 263 256 return NULL; 264 257 265 - return inet_frag_intern(nf, q, f, hash, arg); 258 + return inet_frag_intern(nf, q, f, arg); 266 259 } 267 260 268 261 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, ··· 271 264 struct inet_frag_queue *q; 272 265 struct hlist_node *n; 273 266 274 - read_lock(&f->lock); 275 267 hlist_for_each_entry(q, n, &f->hash[hash], list) { 276 268 if (q->net == nf && f->match(q, key)) { 277 269 atomic_inc(&q->refcnt); ··· 280 274 } 281 275 read_unlock(&f->lock); 282 276 283 - return inet_frag_create(nf, f, key, hash); 277 + return inet_frag_create(nf, f, key); 284 278 } 285 279 EXPORT_SYMBOL(inet_frag_find);
+1 -2
net/ipv4/inet_lro.c
··· 383 383 out2: /* send aggregated SKBs to stack */ 384 384 lro_flush(lro_mgr, lro_desc); 385 385 386 - out: /* Original SKB has to be posted to stack */ 387 - skb->ip_summed = lro_mgr->ip_summed; 386 + out: 388 387 return 1; 389 388 } 390 389
+2
net/ipv4/ip_fragment.c
··· 227 227 228 228 arg.iph = iph; 229 229 arg.user = user; 230 + 231 + read_lock(&ip4_frags.lock); 230 232 hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); 231 233 232 234 q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
+6 -3
net/ipv4/tcp.c
··· 258 258 #include <linux/socket.h> 259 259 #include <linux/random.h> 260 260 #include <linux/bootmem.h> 261 + #include <linux/highmem.h> 262 + #include <linux/swap.h> 261 263 #include <linux/cache.h> 262 264 #include <linux/err.h> 263 265 #include <linux/crypto.h> ··· 2690 2688 void __init tcp_init(void) 2691 2689 { 2692 2690 struct sk_buff *skb = NULL; 2693 - unsigned long limit; 2691 + unsigned long nr_pages, limit; 2694 2692 int order, i, max_share; 2695 2693 2696 2694 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); ··· 2759 2757 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of 2760 2758 * memory, with a floor of 128 pages. 2761 2759 */ 2762 - limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); 2763 - limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); 2760 + nr_pages = totalram_pages - totalhigh_pages; 2761 + limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); 2762 + limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); 2764 2763 limit = max(limit, 128UL); 2765 2764 sysctl_tcp_mem[0] = limit / 4 * 3; 2766 2765 sysctl_tcp_mem[1] = limit;
+3 -3
net/ipv4/tcp_ipv4.c
··· 2189 2189 } 2190 2190 2191 2191 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " 2192 - "%08X %5d %8d %lu %d %p %u %u %u %u %d%n", 2192 + "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n", 2193 2193 i, src, srcp, dest, destp, sk->sk_state, 2194 2194 tp->write_seq - tp->snd_una, 2195 2195 sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog : ··· 2201 2201 icsk->icsk_probes_out, 2202 2202 sock_i_ino(sk), 2203 2203 atomic_read(&sk->sk_refcnt), sk, 2204 - icsk->icsk_rto, 2205 - icsk->icsk_ack.ato, 2204 + jiffies_to_clock_t(icsk->icsk_rto), 2205 + jiffies_to_clock_t(icsk->icsk_ack.ato), 2206 2206 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 2207 2207 tp->snd_cwnd, 2208 2208 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh,
+9
net/ipv6/ip6_input.c
··· 100 100 if (hdr->version != 6) 101 101 goto err; 102 102 103 + /* 104 + * RFC4291 2.5.3 105 + * A packet received on an interface with a destination address 106 + * of loopback must be dropped. 107 + */ 108 + if (!(dev->flags & IFF_LOOPBACK) && 109 + ipv6_addr_loopback(&hdr->daddr)) 110 + goto err; 111 + 103 112 skb->transport_header = skb->network_header + sizeof(*hdr); 104 113 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); 105 114
+7 -4
net/ipv6/ipv6_sockglue.c
··· 343 343 case IPV6_DSTOPTS: 344 344 { 345 345 struct ipv6_txoptions *opt; 346 + 347 + /* remove any sticky options header with a zero option 348 + * length, per RFC3542. 349 + */ 346 350 if (optlen == 0) 347 351 optval = NULL; 352 + else if (optlen < sizeof(struct ipv6_opt_hdr) || 353 + optlen & 0x7 || optlen > 8 * 255) 354 + goto e_inval; 348 355 349 356 /* hop-by-hop / destination options are privileged option */ 350 357 retv = -EPERM; 351 358 if (optname != IPV6_RTHDR && !capable(CAP_NET_RAW)) 352 359 break; 353 - 354 - if (optlen < sizeof(struct ipv6_opt_hdr) || 355 - optlen & 0x7 || optlen > 8 * 255) 356 - goto e_inval; 357 360 358 361 opt = ipv6_renew_options(sk, np->opt, optname, 359 362 (struct ipv6_opt_hdr __user *)optval,
+1 -1
net/ipv6/netfilter/ip6table_mangle.c
··· 129 129 .priority = NF_IP6_PRI_MANGLE, 130 130 }, 131 131 { 132 - .hook = ip6t_local_hook, 132 + .hook = ip6t_route_hook, 133 133 .owner = THIS_MODULE, 134 134 .pf = PF_INET6, 135 135 .hooknum = NF_INET_LOCAL_IN,
+2 -1
net/ipv6/netfilter/nf_conntrack_reasm.c
··· 207 207 arg.id = id; 208 208 arg.src = src; 209 209 arg.dst = dst; 210 + 211 + read_lock_bh(&nf_frags.lock); 210 212 hash = ip6qhashfn(id, src, dst); 211 213 212 - local_bh_disable(); 213 214 q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash); 214 215 local_bh_enable(); 215 216 if (q == NULL)
+2
net/ipv6/reassembly.c
··· 245 245 arg.id = id; 246 246 arg.src = src; 247 247 arg.dst = dst; 248 + 249 + read_lock(&ip6_frags.lock); 248 250 hash = ip6qhashfn(id, src, dst); 249 251 250 252 q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
+3 -3
net/ipv6/route.c
··· 238 238 static inline struct rt6_info *rt6_device_match(struct net *net, 239 239 struct rt6_info *rt, 240 240 int oif, 241 - int strict) 241 + int flags) 242 242 { 243 243 struct rt6_info *local = NULL; 244 244 struct rt6_info *sprt; ··· 251 251 if (dev->flags & IFF_LOOPBACK) { 252 252 if (sprt->rt6i_idev == NULL || 253 253 sprt->rt6i_idev->dev->ifindex != oif) { 254 - if (strict && oif) 254 + if (flags & RT6_LOOKUP_F_IFACE && oif) 255 255 continue; 256 256 if (local && (!oif || 257 257 local->rt6i_idev->dev->ifindex == oif)) ··· 264 264 if (local) 265 265 return local; 266 266 267 - if (strict) 267 + if (flags & RT6_LOOKUP_F_IFACE) 268 268 return net->ipv6.ip6_null_entry; 269 269 } 270 270 return rt;
+3 -3
net/ipv6/tcp_ipv6.c
··· 1946 1946 1947 1947 seq_printf(seq, 1948 1948 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1949 - "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n", 1949 + "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n", 1950 1950 i, 1951 1951 src->s6_addr32[0], src->s6_addr32[1], 1952 1952 src->s6_addr32[2], src->s6_addr32[3], srcp, ··· 1962 1962 icsk->icsk_probes_out, 1963 1963 sock_i_ino(sp), 1964 1964 atomic_read(&sp->sk_refcnt), sp, 1965 - icsk->icsk_rto, 1966 - icsk->icsk_ack.ato, 1965 + jiffies_to_clock_t(icsk->icsk_rto), 1966 + jiffies_to_clock_t(icsk->icsk_ack.ato), 1967 1967 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong, 1968 1968 tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh 1969 1969 );
+9
net/mac80211/key.c
··· 387 387 if (!key) 388 388 return; 389 389 390 + if (!key->sdata) { 391 + /* The key has not been linked yet, simply free it 392 + * and don't Oops */ 393 + if (key->conf.alg == ALG_CCMP) 394 + ieee80211_aes_key_free(key->u.ccmp.tfm); 395 + kfree(key); 396 + return; 397 + } 398 + 390 399 spin_lock_irqsave(&key->sdata->local->key_lock, flags); 391 400 __ieee80211_key_free(key); 392 401 spin_unlock_irqrestore(&key->sdata->local->key_lock, flags);
+1 -1
net/netlabel/netlabel_unlabeled.c
··· 1534 1534 } 1535 1535 } 1536 1536 list_for_each_entry_rcu(addr6, &iface->addr6_list, list) { 1537 - if (addr6->valid || iter_addr6++ < skip_addr6) 1537 + if (!addr6->valid || iter_addr6++ < skip_addr6) 1538 1538 continue; 1539 1539 if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF, 1540 1540 iface,
+4 -3
net/netlink/attr.c
··· 132 132 * @maxtype: maximum attribute type to be expected 133 133 * @head: head of attribute stream 134 134 * @len: length of attribute stream 135 + * @policy: validation policy 135 136 * 136 137 * Parses a stream of attributes and stores a pointer to each attribute in 137 138 * the tb array accessable via the attribute type. Attributes with a type ··· 195 194 /** 196 195 * nla_strlcpy - Copy string attribute payload into a sized buffer 197 196 * @dst: where to copy the string to 198 - * @src: attribute to copy the string from 197 + * @nla: attribute to copy the string from 199 198 * @dstsize: size of destination buffer 200 199 * 201 200 * Copies at most dstsize - 1 bytes into the destination buffer. ··· 341 340 } 342 341 343 342 /** 344 - * nla_reserve - reserve room for attribute without header 343 + * nla_reserve_nohdr - reserve room for attribute without header 345 344 * @skb: socket buffer to reserve room on 346 - * @len: length of attribute payload 345 + * @attrlen: length of attribute payload 347 346 * 348 347 * Reserves room for attribute payload without a header. 349 348 *
-11
net/sched/Kconfig
··· 106 106 To compile this code as a module, choose M here: the 107 107 module will be called sch_prio. 108 108 109 - config NET_SCH_RR 110 - tristate "Multi Band Round Robin Queuing (RR)" 111 - select NET_SCH_PRIO 112 - ---help--- 113 - Say Y here if you want to use an n-band round robin packet 114 - scheduler. 115 - 116 - The module uses sch_prio for its framework and is aliased as 117 - sch_rr, so it will load sch_prio, although it is referred 118 - to using sch_rr. 119 - 120 109 config NET_SCH_RED 121 110 tristate "Random Early Detection (RED)" 122 111 ---help---
+1 -1
net/sched/sch_generic.c
··· 468 468 469 469 return sch; 470 470 errout: 471 - return ERR_PTR(-err); 471 + return ERR_PTR(err); 472 472 } 473 473 474 474 struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops,
+3 -1
net/sctp/socket.c
··· 4512 4512 if (copy_from_user(&getaddrs, optval, len)) 4513 4513 return -EFAULT; 4514 4514 4515 - if (getaddrs.addr_num <= 0) return -EINVAL; 4515 + if (getaddrs.addr_num <= 0 || 4516 + getaddrs.addr_num >= (INT_MAX / sizeof(union sctp_addr))) 4517 + return -EINVAL; 4516 4518 /* 4517 4519 * For UDP-style sockets, id specifies the association to query. 4518 4520 * If the id field is set to the value '0' then the locally bound
+24 -28
net/unix/af_unix.c
··· 485 485 static int unix_accept(struct socket *, struct socket *, int); 486 486 static int unix_getname(struct socket *, struct sockaddr *, int *, int); 487 487 static unsigned int unix_poll(struct file *, struct socket *, poll_table *); 488 - static unsigned int unix_datagram_poll(struct file *, struct socket *, 489 - poll_table *); 488 + static unsigned int unix_dgram_poll(struct file *, struct socket *, 489 + poll_table *); 490 490 static int unix_ioctl(struct socket *, unsigned int, unsigned long); 491 491 static int unix_shutdown(struct socket *, int); 492 492 static int unix_stream_sendmsg(struct kiocb *, struct socket *, ··· 532 532 .socketpair = unix_socketpair, 533 533 .accept = sock_no_accept, 534 534 .getname = unix_getname, 535 - .poll = unix_datagram_poll, 535 + .poll = unix_dgram_poll, 536 536 .ioctl = unix_ioctl, 537 537 .listen = sock_no_listen, 538 538 .shutdown = unix_shutdown, ··· 553 553 .socketpair = unix_socketpair, 554 554 .accept = unix_accept, 555 555 .getname = unix_getname, 556 - .poll = unix_datagram_poll, 556 + .poll = unix_dgram_poll, 557 557 .ioctl = unix_ioctl, 558 558 .listen = unix_listen, 559 559 .shutdown = unix_shutdown, ··· 1992 1992 return mask; 1993 1993 } 1994 1994 1995 - static unsigned int unix_datagram_poll(struct file *file, struct socket *sock, 1996 - poll_table *wait) 1995 + static unsigned int unix_dgram_poll(struct file *file, struct socket *sock, 1996 + poll_table *wait) 1997 1997 { 1998 - struct sock *sk = sock->sk, *peer; 1999 - unsigned int mask; 1998 + struct sock *sk = sock->sk, *other; 1999 + unsigned int mask, writable; 2000 2000 2001 2001 poll_wait(file, sk->sk_sleep, wait); 2002 - 2003 - peer = unix_peer_get(sk); 2004 - if (peer) { 2005 - if (peer != sk) { 2006 - /* 2007 - * Writability of a connected socket additionally 2008 - * depends on the state of the receive queue of the 2009 - * peer. 2010 - */ 2011 - poll_wait(file, &unix_sk(peer)->peer_wait, wait); 2012 - } else { 2013 - sock_put(peer); 2014 - peer = NULL; 2015 - } 2016 - } 2017 - 2018 2002 mask = 0; 2019 2003 2020 2004 /* exceptional events? */ ··· 2024 2040 } 2025 2041 2026 2042 /* writable? */ 2027 - if (unix_writable(sk) && !(peer && unix_recvq_full(peer))) 2043 + writable = unix_writable(sk); 2044 + if (writable) { 2045 + other = unix_peer_get(sk); 2046 + if (other) { 2047 + if (unix_peer(other) != sk) { 2048 + poll_wait(file, &unix_sk(other)->peer_wait, 2049 + wait); 2050 + if (unix_recvq_full(other)) 2051 + writable = 0; 2052 + } 2053 + 2054 + sock_put(other); 2055 + } 2056 + } 2057 + 2058 + if (writable) 2028 2059 mask |= POLLOUT | POLLWRNORM | POLLWRBAND; 2029 2060 else 2030 2061 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 2031 - 2032 - if (peer) 2033 - sock_put(peer); 2034 2062 2035 2063 return mask; 2036 2064 }