Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (42 commits)
virtio_net: Make delayed refill more reliable
sfc: Use fixed-size buffers for MCDI NVRAM requests
sfc: Add workspace for GMAC bug workaround to MCDI MAC_STATS buffer
tcp_probe: avoid modulus operation and wrap fix
qlge: Only free resources if they were allocated
netns xfrm: deal with dst entries in netns
sky2: revert config space change
vlan: fix vlan_skb_recv()
netns xfrm: fix "ip xfrm state|policy count" misreport
sky2: Enable/disable WOL per hardware device
net: Fix IPv6 GSO type checks in Intel ethernet drivers
igb/igbvf: cleanup exception handling in tx_map_adv
MAINTAINERS: Add Intel igbvf maintainer
e1000/e1000e: don't use small hardware rx buffers
fmvj18x_cs: add new id (Panasonic lan & modem card)
be2net: swap only first 2 fields of mcc_wrb
Please add support for Microsoft MN-120 PCMCIA network card
be2net: fix bug in rx page posting
wimax/i2400m: Add support for more i6x50 SKUs
e1000e: enhance frame fragment detection
...

+389 -193
+4 -4
MAINTAINERS
··· 987 988 ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API 989 M: Dan Williams <dan.j.williams@intel.com> 990 - M: Maciej Sosnowski <maciej.sosnowski@intel.com> 991 W: http://sourceforge.net/projects/xscaleiop 992 S: Supported 993 F: Documentation/crypto/async-tx-api.txt ··· 1822 F: fs/dlm/ 1823 1824 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM 1825 - M: Maciej Sosnowski <maciej.sosnowski@intel.com> 1826 M: Dan Williams <dan.j.williams@intel.com> 1827 S: Supported 1828 F: drivers/dma/ ··· 2784 F: arch/x86/kernel/microcode_intel.c 2785 2786 INTEL I/OAT DMA DRIVER 2787 - M: Maciej Sosnowski <maciej.sosnowski@intel.com> 2788 S: Supported 2789 F: drivers/dma/ioat* 2790 ··· 2822 S: Maintained 2823 F: drivers/net/ixp2000/ 2824 2825 - INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/ixgb/ixgbe) 2826 M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> 2827 M: Jesse Brandeburg <jesse.brandeburg@intel.com> 2828 M: Bruce Allan <bruce.w.allan@intel.com> 2829 M: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com> 2830 M: John Ronciak <john.ronciak@intel.com> 2831 L: e1000-devel@lists.sourceforge.net ··· 2836 F: drivers/net/e1000/ 2837 F: drivers/net/e1000e/ 2838 F: drivers/net/igb/ 2839 F: drivers/net/ixgb/ 2840 F: drivers/net/ixgbe/ 2841
··· 987 988 ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API 989 M: Dan Williams <dan.j.williams@intel.com> 990 W: http://sourceforge.net/projects/xscaleiop 991 S: Supported 992 F: Documentation/crypto/async-tx-api.txt ··· 1823 F: fs/dlm/ 1824 1825 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM 1826 M: Dan Williams <dan.j.williams@intel.com> 1827 S: Supported 1828 F: drivers/dma/ ··· 2786 F: arch/x86/kernel/microcode_intel.c 2787 2788 INTEL I/OAT DMA DRIVER 2789 + M: Dan Williams <dan.j.williams@intel.com> 2790 S: Supported 2791 F: drivers/dma/ioat* 2792 ··· 2824 S: Maintained 2825 F: drivers/net/ixp2000/ 2826 2827 + INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe) 2828 M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> 2829 M: Jesse Brandeburg <jesse.brandeburg@intel.com> 2830 M: Bruce Allan <bruce.w.allan@intel.com> 2831 + M: Alex Duyck <alexander.h.duyck@intel.com> 2832 M: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com> 2833 M: John Ronciak <john.ronciak@intel.com> 2834 L: e1000-devel@lists.sourceforge.net ··· 2837 F: drivers/net/e1000/ 2838 F: drivers/net/e1000e/ 2839 F: drivers/net/igb/ 2840 + F: drivers/net/igbvf/ 2841 F: drivers/net/ixgb/ 2842 F: drivers/net/ixgbe/ 2843
+1 -1
drivers/net/benet/be_cmds.c
··· 286 MCC_WRB_SGE_CNT_SHIFT; 287 wrb->payload_length = payload_len; 288 wrb->tag0 = opcode; 289 - be_dws_cpu_to_le(wrb, 20); 290 } 291 292 /* Don't touch the hdr after it's prepared */
··· 286 MCC_WRB_SGE_CNT_SHIFT; 287 wrb->payload_length = payload_len; 288 wrb->tag0 = opcode; 289 + be_dws_cpu_to_le(wrb, 8); 290 } 291 292 /* Don't touch the hdr after it's prepared */
+5 -3
drivers/net/benet/be_main.c
··· 910 static void be_post_rx_frags(struct be_adapter *adapter) 911 { 912 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; 913 - struct be_rx_page_info *page_info = NULL; 914 struct be_queue_info *rxq = &adapter->rx_obj.q; 915 struct page *pagep = NULL; 916 struct be_eth_rx_d *rxd; ··· 941 rxd = queue_head_node(rxq); 942 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); 943 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); 944 - queue_head_inc(rxq); 945 946 /* Any space left in the current big page for another frag? */ 947 if ((page_offset + rx_frag_size + rx_frag_size) > ··· 948 pagep = NULL; 949 page_info->last_page_user = true; 950 } 951 page_info = &page_info_tbl[rxq->head]; 952 } 953 if (pagep) 954 - page_info->last_page_user = true; 955 956 if (posted) { 957 atomic_add(posted, &rxq->used);
··· 910 static void be_post_rx_frags(struct be_adapter *adapter) 911 { 912 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; 913 + struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL; 914 struct be_queue_info *rxq = &adapter->rx_obj.q; 915 struct page *pagep = NULL; 916 struct be_eth_rx_d *rxd; ··· 941 rxd = queue_head_node(rxq); 942 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); 943 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); 944 945 /* Any space left in the current big page for another frag? */ 946 if ((page_offset + rx_frag_size + rx_frag_size) > ··· 949 pagep = NULL; 950 page_info->last_page_user = true; 951 } 952 + 953 + prev_page_info = page_info; 954 + queue_head_inc(rxq); 955 page_info = &page_info_tbl[rxq->head]; 956 } 957 if (pagep) 958 + prev_page_info->last_page_user = true; 959 960 if (posted) { 961 atomic_add(posted, &rxq->used);
+3 -2
drivers/net/bfin_mac.c
··· 33 #include <asm/dma.h> 34 #include <linux/dma-mapping.h> 35 36 #include <asm/blackfin.h> 37 #include <asm/cacheflush.h> 38 #include <asm/portmux.h> ··· 387 u32 sclk, mdc_div; 388 389 /* Enable PHY output early */ 390 - if (!(bfin_read_VR_CTL() & PHYCLKOE)) 391 - bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE); 392 393 sclk = get_sclk(); 394 mdc_div = ((sclk / MDC_CLK) / 2) - 1;
··· 33 #include <asm/dma.h> 34 #include <linux/dma-mapping.h> 35 36 + #include <asm/dpmc.h> 37 #include <asm/blackfin.h> 38 #include <asm/cacheflush.h> 39 #include <asm/portmux.h> ··· 386 u32 sclk, mdc_div; 387 388 /* Enable PHY output early */ 389 + if (!(bfin_read_VR_CTL() & CLKBUFOE)) 390 + bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE); 391 392 sclk = get_sclk(); 393 mdc_div = ((sclk / MDC_CLK) / 2) - 1;
+2
drivers/net/e1000/e1000.h
··· 326 /* for ioport free */ 327 int bars; 328 int need_ioport; 329 }; 330 331 enum e1000_state_t {
··· 326 /* for ioport free */ 327 int bars; 328 int need_ioport; 329 + 330 + bool discarding; 331 }; 332 333 enum e1000_state_t {
+17 -26
drivers/net/e1000/e1000_main.c
··· 1698 rctl &= ~E1000_RCTL_SZ_4096; 1699 rctl |= E1000_RCTL_BSEX; 1700 switch (adapter->rx_buffer_len) { 1701 - case E1000_RXBUFFER_256: 1702 - rctl |= E1000_RCTL_SZ_256; 1703 - rctl &= ~E1000_RCTL_BSEX; 1704 - break; 1705 - case E1000_RXBUFFER_512: 1706 - rctl |= E1000_RCTL_SZ_512; 1707 - rctl &= ~E1000_RCTL_BSEX; 1708 - break; 1709 - case E1000_RXBUFFER_1024: 1710 - rctl |= E1000_RCTL_SZ_1024; 1711 - rctl &= ~E1000_RCTL_BSEX; 1712 - break; 1713 case E1000_RXBUFFER_2048: 1714 default: 1715 rctl |= E1000_RCTL_SZ_2048; ··· 2790 dma_error: 2791 dev_err(&pdev->dev, "TX DMA map failed\n"); 2792 buffer_info->dma = 0; 2793 - count--; 2794 - 2795 - while (count >= 0) { 2796 count--; 2797 - i--; 2798 - if (i < 0) 2799 i += tx_ring->count; 2800 buffer_info = &tx_ring->buffer_info[i]; 2801 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 2802 } ··· 3164 * however with the new *_jumbo_rx* routines, jumbo receives will use 3165 * fragmented skbs */ 3166 3167 - if (max_frame <= E1000_RXBUFFER_256) 3168 - adapter->rx_buffer_len = E1000_RXBUFFER_256; 3169 - else if (max_frame <= E1000_RXBUFFER_512) 3170 - adapter->rx_buffer_len = E1000_RXBUFFER_512; 3171 - else if (max_frame <= E1000_RXBUFFER_1024) 3172 - adapter->rx_buffer_len = E1000_RXBUFFER_1024; 3173 - else if (max_frame <= E1000_RXBUFFER_2048) 3174 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3175 else 3176 #if (PAGE_SIZE >= E1000_RXBUFFER_16384) ··· 3832 3833 length = le16_to_cpu(rx_desc->length); 3834 /* !EOP means multiple descriptors were used to store a single 3835 - * packet, also make sure the frame isn't just CRC only */ 3836 - if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) { 3837 /* All receives must fit into a single buffer */ 3838 E1000_DBG("%s: Receive packet consumed multiple" 3839 " buffers\n", netdev->name); 3840 /* recycle */ 3841 buffer_info->skb = skb; 3842 goto next_desc; 3843 } 3844
··· 1698 rctl &= ~E1000_RCTL_SZ_4096; 1699 rctl |= E1000_RCTL_BSEX; 1700 switch (adapter->rx_buffer_len) { 1701 case E1000_RXBUFFER_2048: 1702 default: 1703 rctl |= E1000_RCTL_SZ_2048; ··· 2802 dma_error: 2803 dev_err(&pdev->dev, "TX DMA map failed\n"); 2804 buffer_info->dma = 0; 2805 + if (count) 2806 count--; 2807 + 2808 + while (count--) { 2809 + if (i==0) 2810 i += tx_ring->count; 2811 + i--; 2812 buffer_info = &tx_ring->buffer_info[i]; 2813 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 2814 } ··· 3176 * however with the new *_jumbo_rx* routines, jumbo receives will use 3177 * fragmented skbs */ 3178 3179 + if (max_frame <= E1000_RXBUFFER_2048) 3180 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3181 else 3182 #if (PAGE_SIZE >= E1000_RXBUFFER_16384) ··· 3850 3851 length = le16_to_cpu(rx_desc->length); 3852 /* !EOP means multiple descriptors were used to store a single 3853 + * packet, if thats the case we need to toss it. In fact, we 3854 + * to toss every packet with the EOP bit clear and the next 3855 + * frame that _does_ have the EOP bit set, as it is by 3856 + * definition only a frame fragment 3857 + */ 3858 + if (unlikely(!(status & E1000_RXD_STAT_EOP))) 3859 + adapter->discarding = true; 3860 + 3861 + if (adapter->discarding) { 3862 /* All receives must fit into a single buffer */ 3863 E1000_DBG("%s: Receive packet consumed multiple" 3864 " buffers\n", netdev->name); 3865 /* recycle */ 3866 buffer_info->skb = skb; 3867 + if (status & E1000_RXD_STAT_EOP) 3868 + adapter->discarding = false; 3869 goto next_desc; 3870 } 3871
+1
drivers/net/e1000e/e1000.h
··· 421 /* CRC Stripping defines */ 422 #define FLAG2_CRC_STRIPPING (1 << 0) 423 #define FLAG2_HAS_PHY_WAKEUP (1 << 1) 424 425 #define E1000_RX_DESC_PS(R, i) \ 426 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
··· 421 /* CRC Stripping defines */ 422 #define FLAG2_CRC_STRIPPING (1 << 0) 423 #define FLAG2_HAS_PHY_WAKEUP (1 << 1) 424 + #define FLAG2_IS_DISCARDING (1 << 2) 425 426 #define E1000_RX_DESC_PS(R, i) \ 427 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
+28 -29
drivers/net/e1000e/netdev.c
··· 450 451 length = le16_to_cpu(rx_desc->length); 452 453 - /* !EOP means multiple descriptors were used to store a single 454 - * packet, also make sure the frame isn't just CRC only */ 455 - if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { 456 /* All receives must fit into a single buffer */ 457 e_dbg("Receive packet consumed multiple buffers\n"); 458 /* recycle */ 459 buffer_info->skb = skb; 460 goto next_desc; 461 } 462 ··· 755 PCI_DMA_FROMDEVICE); 756 buffer_info->dma = 0; 757 758 - if (!(staterr & E1000_RXD_STAT_EOP)) { 759 e_dbg("Packet Split buffers didn't pick up the full " 760 "packet\n"); 761 dev_kfree_skb_irq(skb); 762 goto next_desc; 763 } 764 ··· 1134 1135 rx_ring->next_to_clean = 0; 1136 rx_ring->next_to_use = 0; 1137 1138 writel(0, adapter->hw.hw_addr + rx_ring->head); 1139 writel(0, adapter->hw.hw_addr + rx_ring->tail); ··· 2350 rctl &= ~E1000_RCTL_SZ_4096; 2351 rctl |= E1000_RCTL_BSEX; 2352 switch (adapter->rx_buffer_len) { 2353 - case 256: 2354 - rctl |= E1000_RCTL_SZ_256; 2355 - rctl &= ~E1000_RCTL_BSEX; 2356 - break; 2357 - case 512: 2358 - rctl |= E1000_RCTL_SZ_512; 2359 - rctl &= ~E1000_RCTL_BSEX; 2360 - break; 2361 - case 1024: 2362 - rctl |= E1000_RCTL_SZ_1024; 2363 - rctl &= ~E1000_RCTL_BSEX; 2364 - break; 2365 case 2048: 2366 default: 2367 rctl |= E1000_RCTL_SZ_2048; ··· 3786 0, IPPROTO_TCP, 0); 3787 cmd_length = E1000_TXD_CMD_IP; 3788 ipcse = skb_transport_offset(skb) - 1; 3789 - } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 3790 ipv6_hdr(skb)->payload_len = 0; 3791 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3792 &ipv6_hdr(skb)->daddr, ··· 3967 dma_error: 3968 dev_err(&pdev->dev, "TX DMA map failed\n"); 3969 buffer_info->dma = 0; 3970 - count--; 3971 - 3972 - while (count >= 0) { 3973 count--; 3974 - i--; 3975 - if (i < 0) 3976 i += tx_ring->count; 3977 buffer_info = &tx_ring->buffer_info[i]; 3978 e1000_put_txbuf(adapter, buffer_info);; 3979 } ··· 4322 * fragmented skbs 4323 */ 4324 4325 - if (max_frame <= 256) 4326 - adapter->rx_buffer_len = 256; 4327 - else if (max_frame <= 512) 4328 - adapter->rx_buffer_len = 512; 4329 - else if (max_frame <= 1024) 4330 - adapter->rx_buffer_len = 1024; 4331 - else if (max_frame <= 2048) 4332 adapter->rx_buffer_len = 2048; 4333 else 4334 adapter->rx_buffer_len = 4096;
··· 450 451 length = le16_to_cpu(rx_desc->length); 452 453 + /* 454 + * !EOP means multiple descriptors were used to store a single 455 + * packet, if that's the case we need to toss it. In fact, we 456 + * need to toss every packet with the EOP bit clear and the 457 + * next frame that _does_ have the EOP bit set, as it is by 458 + * definition only a frame fragment 459 + */ 460 + if (unlikely(!(status & E1000_RXD_STAT_EOP))) 461 + adapter->flags2 |= FLAG2_IS_DISCARDING; 462 + 463 + if (adapter->flags2 & FLAG2_IS_DISCARDING) { 464 /* All receives must fit into a single buffer */ 465 e_dbg("Receive packet consumed multiple buffers\n"); 466 /* recycle */ 467 buffer_info->skb = skb; 468 + if (status & E1000_RXD_STAT_EOP) 469 + adapter->flags2 &= ~FLAG2_IS_DISCARDING; 470 goto next_desc; 471 } 472 ··· 745 PCI_DMA_FROMDEVICE); 746 buffer_info->dma = 0; 747 748 + /* see !EOP comment in other rx routine */ 749 + if (!(staterr & E1000_RXD_STAT_EOP)) 750 + adapter->flags2 |= FLAG2_IS_DISCARDING; 751 + 752 + if (adapter->flags2 & FLAG2_IS_DISCARDING) { 753 e_dbg("Packet Split buffers didn't pick up the full " 754 "packet\n"); 755 dev_kfree_skb_irq(skb); 756 + if (staterr & E1000_RXD_STAT_EOP) 757 + adapter->flags2 &= ~FLAG2_IS_DISCARDING; 758 goto next_desc; 759 } 760 ··· 1118 1119 rx_ring->next_to_clean = 0; 1120 rx_ring->next_to_use = 0; 1121 + adapter->flags2 &= ~FLAG2_IS_DISCARDING; 1122 1123 writel(0, adapter->hw.hw_addr + rx_ring->head); 1124 writel(0, adapter->hw.hw_addr + rx_ring->tail); ··· 2333 rctl &= ~E1000_RCTL_SZ_4096; 2334 rctl |= E1000_RCTL_BSEX; 2335 switch (adapter->rx_buffer_len) { 2336 case 2048: 2337 default: 2338 rctl |= E1000_RCTL_SZ_2048; ··· 3781 0, IPPROTO_TCP, 0); 3782 cmd_length = E1000_TXD_CMD_IP; 3783 ipcse = skb_transport_offset(skb) - 1; 3784 + } else if (skb_is_gso_v6(skb)) { 3785 ipv6_hdr(skb)->payload_len = 0; 3786 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3787 &ipv6_hdr(skb)->daddr, ··· 3962 dma_error: 3963 dev_err(&pdev->dev, "TX DMA map failed\n"); 3964 buffer_info->dma = 0; 3965 + if (count) 3966 count--; 3967 + 3968 + while (count--) { 3969 + if (i==0) 3970 i += tx_ring->count; 3971 + i--; 3972 buffer_info = &tx_ring->buffer_info[i]; 3973 e1000_put_txbuf(adapter, buffer_info);; 3974 } ··· 4317 * fragmented skbs 4318 */ 4319 4320 + if (max_frame <= 2048) 4321 adapter->rx_buffer_len = 2048; 4322 else 4323 adapter->rx_buffer_len = 4096;
+2 -2
drivers/net/igb/igb_main.c
··· 3422 iph->daddr, 0, 3423 IPPROTO_TCP, 3424 0); 3425 - } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 3426 ipv6_hdr(skb)->payload_len = 0; 3427 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3428 &ipv6_hdr(skb)->daddr, ··· 3584 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 3585 struct skb_frag_struct *frag; 3586 3587 i++; 3588 if (i == tx_ring->count) 3589 i = 0; ··· 3606 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 3607 goto dma_error; 3608 3609 - count++; 3610 } 3611 3612 tx_ring->buffer_info[i].skb = skb;
··· 3422 iph->daddr, 0, 3423 IPPROTO_TCP, 3424 0); 3425 + } else if (skb_is_gso_v6(skb)) { 3426 ipv6_hdr(skb)->payload_len = 0; 3427 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3428 &ipv6_hdr(skb)->daddr, ··· 3584 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 3585 struct skb_frag_struct *frag; 3586 3587 + count++; 3588 i++; 3589 if (i == tx_ring->count) 3590 i = 0; ··· 3605 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 3606 goto dma_error; 3607 3608 } 3609 3610 tx_ring->buffer_info[i].skb = skb;
+7 -7
drivers/net/igbvf/netdev.c
··· 1963 iph->daddr, 0, 1964 IPPROTO_TCP, 1965 0); 1966 - } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 1967 ipv6_hdr(skb)->payload_len = 0; 1968 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 1969 &ipv6_hdr(skb)->daddr, ··· 2126 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 2127 struct skb_frag_struct *frag; 2128 2129 i++; 2130 if (i == tx_ring->count) 2131 i = 0; ··· 2147 PCI_DMA_TODEVICE); 2148 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2149 goto dma_error; 2150 - count++; 2151 } 2152 2153 tx_ring->buffer_info[i].skb = skb; ··· 2163 buffer_info->length = 0; 2164 buffer_info->next_to_watch = 0; 2165 buffer_info->mapped_as_page = false; 2166 - count--; 2167 2168 /* clear timestamp and dma mappings for remaining portion of packet */ 2169 - while (count >= 0) { 2170 - count--; 2171 - i--; 2172 - if (i < 0) 2173 i += tx_ring->count; 2174 buffer_info = &tx_ring->buffer_info[i]; 2175 igbvf_put_txbuf(adapter, buffer_info); 2176 }
··· 1963 iph->daddr, 0, 1964 IPPROTO_TCP, 1965 0); 1966 + } else if (skb_is_gso_v6(skb)) { 1967 ipv6_hdr(skb)->payload_len = 0; 1968 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 1969 &ipv6_hdr(skb)->daddr, ··· 2126 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 2127 struct skb_frag_struct *frag; 2128 2129 + count++; 2130 i++; 2131 if (i == tx_ring->count) 2132 i = 0; ··· 2146 PCI_DMA_TODEVICE); 2147 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2148 goto dma_error; 2149 } 2150 2151 tx_ring->buffer_info[i].skb = skb; ··· 2163 buffer_info->length = 0; 2164 buffer_info->next_to_watch = 0; 2165 buffer_info->mapped_as_page = false; 2166 + if (count) 2167 + count--; 2168 2169 /* clear timestamp and dma mappings for remaining portion of packet */ 2170 + while (count--) { 2171 + if (i==0) 2172 i += tx_ring->count; 2173 + i--; 2174 buffer_info = &tx_ring->buffer_info[i]; 2175 igbvf_put_txbuf(adapter, buffer_info); 2176 }
+5 -5
drivers/net/ixgb/ixgb_main.c
··· 1363 dma_error: 1364 dev_err(&pdev->dev, "TX DMA map failed\n"); 1365 buffer_info->dma = 0; 1366 - count--; 1367 - 1368 - while (count >= 0) { 1369 count--; 1370 - i--; 1371 - if (i < 0) 1372 i += tx_ring->count; 1373 buffer_info = &tx_ring->buffer_info[i]; 1374 ixgb_unmap_and_free_tx_resource(adapter, buffer_info); 1375 }
··· 1363 dma_error: 1364 dev_err(&pdev->dev, "TX DMA map failed\n"); 1365 buffer_info->dma = 0; 1366 + if (count) 1367 count--; 1368 + 1369 + while (count--) { 1370 + if (i==0) 1371 i += tx_ring->count; 1372 + i--; 1373 buffer_info = &tx_ring->buffer_info[i]; 1374 ixgb_unmap_and_free_tx_resource(adapter, buffer_info); 1375 }
+6 -6
drivers/net/ixgbe/ixgbe_main.c
··· 4928 iph->daddr, 0, 4929 IPPROTO_TCP, 4930 0); 4931 - } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 4932 ipv6_hdr(skb)->payload_len = 0; 4933 tcp_hdr(skb)->check = 4934 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, ··· 5167 tx_buffer_info->dma = 0; 5168 tx_buffer_info->time_stamp = 0; 5169 tx_buffer_info->next_to_watch = 0; 5170 - count--; 5171 5172 /* clear timestamp and dma mappings for remaining portion of packet */ 5173 - while (count >= 0) { 5174 - count--; 5175 - i--; 5176 - if (i < 0) 5177 i += tx_ring->count; 5178 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 5179 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 5180 }
··· 4928 iph->daddr, 0, 4929 IPPROTO_TCP, 4930 0); 4931 + } else if (skb_is_gso_v6(skb)) { 4932 ipv6_hdr(skb)->payload_len = 0; 4933 tcp_hdr(skb)->check = 4934 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, ··· 5167 tx_buffer_info->dma = 0; 5168 tx_buffer_info->time_stamp = 0; 5169 tx_buffer_info->next_to_watch = 0; 5170 + if (count) 5171 + count--; 5172 5173 /* clear timestamp and dma mappings for remaining portion of packet */ 5174 + while (count--) { 5175 + if (i==0) 5176 i += tx_ring->count; 5177 + i--; 5178 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 5179 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 5180 }
+1
drivers/net/pcmcia/fmvj18x_cs.c
··· 717 PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064), 718 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a), 719 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), 720 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), 721 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), 722 PCMCIA_DEVICE_NULL,
··· 717 PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064), 718 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a), 719 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), 720 + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), 721 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), 722 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), 723 PCMCIA_DEVICE_NULL,
+1 -3
drivers/net/phy/phy.c
··· 410 411 412 static void phy_change(struct work_struct *work); 413 - static void phy_state_machine(struct work_struct *work); 414 415 /** 416 * phy_start_machine - start PHY state machine tracking ··· 429 { 430 phydev->adjust_state = handler; 431 432 - INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine); 433 schedule_delayed_work(&phydev->state_queue, HZ); 434 } 435 ··· 759 * phy_state_machine - Handle the state machine 760 * @work: work_struct that describes the work to be done 761 */ 762 - static void phy_state_machine(struct work_struct *work) 763 { 764 struct delayed_work *dwork = to_delayed_work(work); 765 struct phy_device *phydev =
··· 410 411 412 static void phy_change(struct work_struct *work); 413 414 /** 415 * phy_start_machine - start PHY state machine tracking ··· 430 { 431 phydev->adjust_state = handler; 432 433 schedule_delayed_work(&phydev->state_queue, HZ); 434 } 435 ··· 761 * phy_state_machine - Handle the state machine 762 * @work: work_struct that describes the work to be done 763 */ 764 + void phy_state_machine(struct work_struct *work) 765 { 766 struct delayed_work *dwork = to_delayed_work(work); 767 struct phy_device *phydev =
+1
drivers/net/phy/phy_device.c
··· 177 dev->state = PHY_DOWN; 178 179 mutex_init(&dev->lock); 180 181 return dev; 182 }
··· 177 dev->state = PHY_DOWN; 178 179 mutex_init(&dev->lock); 180 + INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine); 181 182 return dev; 183 }
+8 -7
drivers/net/qlge/qlge_main.c
··· 4119 err = pcie_set_readrq(pdev, 4096); 4120 if (err) { 4121 dev_err(&pdev->dev, "Set readrq failed.\n"); 4122 - goto err_out; 4123 } 4124 4125 err = pci_request_regions(pdev, DRV_NAME); ··· 4140 4141 if (err) { 4142 dev_err(&pdev->dev, "No usable DMA configuration.\n"); 4143 - goto err_out; 4144 } 4145 4146 /* Set PCIe reset type for EEH to fundamental. */ ··· 4152 if (!qdev->reg_base) { 4153 dev_err(&pdev->dev, "Register mapping failed.\n"); 4154 err = -ENOMEM; 4155 - goto err_out; 4156 } 4157 4158 qdev->doorbell_area_size = pci_resource_len(pdev, 3); ··· 4162 if (!qdev->doorbell_area) { 4163 dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); 4164 err = -ENOMEM; 4165 - goto err_out; 4166 } 4167 4168 err = ql_get_board_info(qdev); 4169 if (err) { 4170 dev_err(&pdev->dev, "Register access failed.\n"); 4171 err = -EIO; 4172 - goto err_out; 4173 } 4174 qdev->msg_enable = netif_msg_init(debug, default_msg); 4175 spin_lock_init(&qdev->hw_lock); ··· 4179 err = qdev->nic_ops->get_flash(qdev); 4180 if (err) { 4181 dev_err(&pdev->dev, "Invalid FLASH.\n"); 4182 - goto err_out; 4183 } 4184 4185 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); ··· 4212 DRV_NAME, DRV_VERSION); 4213 } 4214 return 0; 4215 - err_out: 4216 ql_release_all(pdev); 4217 pci_disable_device(pdev); 4218 return err; 4219 }
··· 4119 err = pcie_set_readrq(pdev, 4096); 4120 if (err) { 4121 dev_err(&pdev->dev, "Set readrq failed.\n"); 4122 + goto err_out1; 4123 } 4124 4125 err = pci_request_regions(pdev, DRV_NAME); ··· 4140 4141 if (err) { 4142 dev_err(&pdev->dev, "No usable DMA configuration.\n"); 4143 + goto err_out2; 4144 } 4145 4146 /* Set PCIe reset type for EEH to fundamental. */ ··· 4152 if (!qdev->reg_base) { 4153 dev_err(&pdev->dev, "Register mapping failed.\n"); 4154 err = -ENOMEM; 4155 + goto err_out2; 4156 } 4157 4158 qdev->doorbell_area_size = pci_resource_len(pdev, 3); ··· 4162 if (!qdev->doorbell_area) { 4163 dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); 4164 err = -ENOMEM; 4165 + goto err_out2; 4166 } 4167 4168 err = ql_get_board_info(qdev); 4169 if (err) { 4170 dev_err(&pdev->dev, "Register access failed.\n"); 4171 err = -EIO; 4172 + goto err_out2; 4173 } 4174 qdev->msg_enable = netif_msg_init(debug, default_msg); 4175 spin_lock_init(&qdev->hw_lock); ··· 4179 err = qdev->nic_ops->get_flash(qdev); 4180 if (err) { 4181 dev_err(&pdev->dev, "Invalid FLASH.\n"); 4182 + goto err_out2; 4183 } 4184 4185 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); ··· 4212 DRV_NAME, DRV_VERSION); 4213 } 4214 return 0; 4215 + err_out2: 4216 ql_release_all(pdev); 4217 + err_out1: 4218 pci_disable_device(pdev); 4219 return err; 4220 }
+1 -1
drivers/net/s2io.c
··· 3421 break; 3422 } 3423 } else { 3424 - if (!(val64 & busy_bit)) { 3425 ret = SUCCESS; 3426 break; 3427 }
··· 3421 break; 3422 } 3423 } else { 3424 + if (val64 & busy_bit) { 3425 ret = SUCCESS; 3426 break; 3427 }
+4 -3
drivers/net/sfc/mcdi.c
··· 804 loff_t offset, u8 *buffer, size_t length) 805 { 806 u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; 807 - u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(length)]; 808 size_t outlen; 809 int rc; 810 ··· 828 int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, 829 loff_t offset, const u8 *buffer, size_t length) 830 { 831 - u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(length)]; 832 int rc; 833 834 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); ··· 838 839 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); 840 841 - rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, sizeof(inbuf), 842 NULL, 0, NULL); 843 if (rc) 844 goto fail;
··· 804 loff_t offset, u8 *buffer, size_t length) 805 { 806 u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; 807 + u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; 808 size_t outlen; 809 int rc; 810 ··· 828 int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, 829 loff_t offset, const u8 *buffer, size_t length) 830 { 831 + u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; 832 int rc; 833 834 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); ··· 838 839 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); 840 841 + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, 842 + ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), 843 NULL, 0, NULL); 844 if (rc) 845 goto fail;
+1
drivers/net/sfc/mcdi.h
··· 111 extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, 112 loff_t offset, const u8 *buffer, 113 size_t length); 114 extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, 115 loff_t offset, size_t length); 116 extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
··· 111 extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, 112 loff_t offset, const u8 *buffer, 113 size_t length); 114 + #define EFX_MCDI_NVRAM_LEN_MAX 128 115 extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, 116 loff_t offset, size_t length); 117 extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
+3 -1
drivers/net/sfc/mcdi_pcol.h
··· 1090 #define MC_CMD_MAC_RX_LANES01_DISP_ERR 57 1091 #define MC_CMD_MAC_RX_LANES23_DISP_ERR 58 1092 #define MC_CMD_MAC_RX_MATCH_FAULT 59 1093 /* Insert new members here. */ 1094 - #define MC_CMD_MAC_GENERATION_END 60 1095 #define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1) 1096 1097 /* MC_CMD_MAC_STATS:
··· 1090 #define MC_CMD_MAC_RX_LANES01_DISP_ERR 57 1091 #define MC_CMD_MAC_RX_LANES23_DISP_ERR 58 1092 #define MC_CMD_MAC_RX_MATCH_FAULT 59 1093 + #define MC_CMD_GMAC_DMABUF_START 64 1094 + #define MC_CMD_GMAC_DMABUF_END 95 1095 /* Insert new members here. */ 1096 + #define MC_CMD_MAC_GENERATION_END 96 1097 #define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1) 1098 1099 /* MC_CMD_MAC_STATS:
+2 -3
drivers/net/sfc/mtd.c
··· 23 #include "mcdi_pcol.h" 24 25 #define EFX_SPI_VERIFY_BUF_LEN 16 26 - #define EFX_MCDI_CHUNK_LEN 128 27 28 struct efx_mtd_partition { 29 struct mtd_info mtd; ··· 427 int rc = 0; 428 429 while (offset < end) { 430 - chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN); 431 rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset, 432 buffer, chunk); 433 if (rc) ··· 490 } 491 492 while (offset < end) { 493 - chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN); 494 rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset, 495 buffer, chunk); 496 if (rc)
··· 23 #include "mcdi_pcol.h" 24 25 #define EFX_SPI_VERIFY_BUF_LEN 16 26 27 struct efx_mtd_partition { 28 struct mtd_info mtd; ··· 428 int rc = 0; 429 430 while (offset < end) { 431 + chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); 432 rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset, 433 buffer, chunk); 434 if (rc) ··· 491 } 492 493 while (offset < end) { 494 + chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); 495 rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset, 496 buffer, chunk); 497 if (rc)
-6
drivers/net/sfc/qt202x_phy.c
··· 318 /* Wait 250ms for the PHY to complete bootup */ 319 msleep(250); 320 321 - /* Check that all the MMDs we expect are present and responding. We 322 - * expect faults on some if the link is down, but not on the PHY XS */ 323 - rc = efx_mdio_check_mmds(efx, QT202X_REQUIRED_DEVS, MDIO_DEVS_PHYXS); 324 - if (rc < 0) 325 - goto fail; 326 - 327 falcon_board(efx)->type->init_phy(efx); 328 329 return rc;
··· 318 /* Wait 250ms for the PHY to complete bootup */ 319 msleep(250); 320 321 falcon_board(efx)->type->init_phy(efx); 322 323 return rc;
+35 -7
drivers/net/sky2.c
··· 644 { 645 u32 reg1; 646 647 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 648 reg1 &= ~phy_power[port]; 649 ··· 652 reg1 |= coma_mode[port]; 653 654 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 655 sky2_pci_read32(hw, PCI_DEV_REG1); 656 657 if (hw->chip_id == CHIP_ID_YUKON_FE) ··· 709 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN); 710 } 711 712 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 713 reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */ 714 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 715 } 716 717 /* Force a renegotiation */ ··· 2153 2154 /* reset PHY Link Detect */ 2155 phy = sky2_pci_read16(hw, PSM_CONFIG_REG4); 2156 sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1); 2157 2158 sky2_link_up(sky2); 2159 } ··· 2646 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { 2647 u16 pci_err; 2648 2649 pci_err = sky2_pci_read16(hw, PCI_STATUS); 2650 if (net_ratelimit()) 2651 dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", ··· 2654 2655 sky2_pci_write16(hw, PCI_STATUS, 2656 pci_err | PCI_STATUS_ERROR_BITS); 2657 } 2658 2659 if (status & Y2_IS_PCI_EXP) { 2660 /* PCI-Express uncorrectable Error occurred */ 2661 u32 err; 2662 2663 err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); 2664 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, 2665 0xfffffffful); ··· 2669 dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); 2670 2671 sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); 2672 } 2673 2674 if (status & Y2_HWE_L1_MASK) ··· 3048 } 3049 3050 sky2_power_on(hw); 3051 3052 for (i = 0; i < hw->ports; i++) { 3053 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); ··· 3085 reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE; 3086 3087 /* reset PHY Link Detect */ 3088 sky2_pci_write16(hw, PSM_CONFIG_REG4, 3089 reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT); 3090 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); ··· 3103 /* restore the PCIe Link Control register */ 3104 sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg); 3105 } 3106 3107 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ 3108 sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); ··· 3241 return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0; 3242 } 3243 3244 static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 3245 { 3246 const struct sky2_port *sky2 = netdev_priv(dev); ··· 3281 3282 sky2->wol = wol->wolopts; 3283 3284 - if (hw->chip_id == CHIP_ID_YUKON_EC_U || 3285 - hw->chip_id == CHIP_ID_YUKON_EX || 3286 - hw->chip_id == CHIP_ID_YUKON_FE_P) 3287 - sky2_write32(hw, B0_CTST, sky2->wol 3288 - ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF); 3289 - 3290 - device_set_wakeup_enable(&hw->pdev->dev, sky2->wol); 3291 3292 if (!netif_running(dev)) 3293 sky2_wol_init(sky2);
··· 644 { 645 u32 reg1; 646 647 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 648 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 649 reg1 &= ~phy_power[port]; 650 ··· 651 reg1 |= coma_mode[port]; 652 653 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 654 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 655 sky2_pci_read32(hw, PCI_DEV_REG1); 656 657 if (hw->chip_id == CHIP_ID_YUKON_FE) ··· 707 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN); 708 } 709 710 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 711 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 712 reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */ 713 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 714 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 715 } 716 717 /* Force a renegotiation */ ··· 2149 2150 /* reset PHY Link Detect */ 2151 phy = sky2_pci_read16(hw, PSM_CONFIG_REG4); 2152 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2153 sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1); 2154 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2155 2156 sky2_link_up(sky2); 2157 } ··· 2640 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { 2641 u16 pci_err; 2642 2643 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2644 pci_err = sky2_pci_read16(hw, PCI_STATUS); 2645 if (net_ratelimit()) 2646 dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", ··· 2647 2648 sky2_pci_write16(hw, PCI_STATUS, 2649 pci_err | PCI_STATUS_ERROR_BITS); 2650 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2651 } 2652 2653 if (status & Y2_IS_PCI_EXP) { 2654 /* PCI-Express uncorrectable Error occurred */ 2655 u32 err; 2656 2657 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2658 err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); 2659 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, 2660 0xfffffffful); ··· 2660 dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); 2661 2662 sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); 2663 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2664 } 2665 2666 if (status & Y2_HWE_L1_MASK) ··· 3038 } 3039 3040 sky2_power_on(hw); 3041 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3042 3043 for (i = 0; i < hw->ports; i++) { 3044 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); ··· 3074 reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE; 3075 3076 /* reset PHY Link Detect */ 3077 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3078 sky2_pci_write16(hw, PSM_CONFIG_REG4, 3079 reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT); 3080 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); ··· 3091 /* restore the PCIe Link Control register */ 3092 sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg); 3093 } 3094 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3095 3096 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ 3097 sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); ··· 3228 return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0; 3229 } 3230 3231 + static void sky2_hw_set_wol(struct sky2_hw *hw) 3232 + { 3233 + int wol = 0; 3234 + int i; 3235 + 3236 + for (i = 0; i < hw->ports; i++) { 3237 + struct net_device *dev = hw->dev[i]; 3238 + struct sky2_port *sky2 = netdev_priv(dev); 3239 + 3240 + if (sky2->wol) 3241 + wol = 1; 3242 + } 3243 + 3244 + if (hw->chip_id == CHIP_ID_YUKON_EC_U || 3245 + hw->chip_id == CHIP_ID_YUKON_EX || 3246 + hw->chip_id == CHIP_ID_YUKON_FE_P) 3247 + sky2_write32(hw, B0_CTST, wol ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF); 3248 + 3249 + device_set_wakeup_enable(&hw->pdev->dev, wol); 3250 + } 3251 + 3252 static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 3253 { 3254 const struct sky2_port *sky2 = netdev_priv(dev); ··· 3247 3248 sky2->wol = wol->wolopts; 3249 3250 + sky2_hw_set_wol(hw); 3251 3252 if (!netif_running(dev)) 3253 sky2_wol_init(sky2);
+1
drivers/net/tulip/tulip_core.c
··· 249 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 250 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ 251 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ 252 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 253 { } /* terminate list */ 254 };
··· 249 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 250 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ 251 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ 252 + { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */ 253 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 254 { } /* terminate list */ 255 };
+2 -3
drivers/net/ucc_geth.c
··· 3279 /* Handle the transmitted buffer and release */ 3280 /* the BD to be used with the current frame */ 3281 3282 - if (bd == ugeth->txBd[txQ]) /* queue empty? */ 3283 break; 3284 3285 dev->stats.tx_packets++; 3286 - 3287 - skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; 3288 3289 if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && 3290 skb_recycle_check(skb,
··· 3279 /* Handle the transmitted buffer and release */ 3280 /* the BD to be used with the current frame */ 3281 3282 + skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; 3283 + if (!skb) 3284 break; 3285 3286 dev->stats.tx_packets++; 3287 3288 if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && 3289 skb_recycle_check(skb,
+1 -2
drivers/net/virtio_net.c
··· 395 396 vi = container_of(work, struct virtnet_info, refill.work); 397 napi_disable(&vi->napi); 398 - try_fill_recv(vi, GFP_KERNEL); 399 - still_empty = (vi->num == 0); 400 napi_enable(&vi->napi); 401 402 /* In theory, this can happen: if we don't get any buffers in
··· 395 396 vi = container_of(work, struct virtnet_info, refill.work); 397 napi_disable(&vi->napi); 398 + still_empty = !try_fill_recv(vi, GFP_KERNEL); 399 napi_enable(&vi->napi); 400 401 /* In theory, this can happen: if we don't get any buffers in
+2
drivers/net/wimax/i2400m/i2400m-usb.h
··· 151 152 /* Device IDs */ 153 USB_DEVICE_ID_I6050 = 0x0186, 154 }; 155 156 ··· 235 u8 rx_size_auto_shrink; 236 237 struct dentry *debugfs_dentry; 238 }; 239 240
··· 151 152 /* Device IDs */ 153 USB_DEVICE_ID_I6050 = 0x0186, 154 + USB_DEVICE_ID_I6050_2 = 0x0188, 155 }; 156 157 ··· 234 u8 rx_size_auto_shrink; 235 236 struct dentry *debugfs_dentry; 237 + unsigned i6050:1; /* 1 if this is a 6050 based SKU */ 238 }; 239 240
+11 -1
drivers/net/wimax/i2400m/usb.c
··· 478 i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack; 479 i2400m->bus_bm_mac_addr_impaired = 0; 480 481 - if (id->idProduct == USB_DEVICE_ID_I6050) { 482 i2400m->bus_fw_names = i2400mu_bus_fw_names_6050; 483 i2400mu->endpoint_cfg.bulk_out = 0; 484 i2400mu->endpoint_cfg.notification = 3; ··· 728 static 729 struct usb_device_id i2400mu_id_table[] = { 730 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, 731 { USB_DEVICE(0x8086, 0x0181) }, 732 { USB_DEVICE(0x8086, 0x1403) }, 733 { USB_DEVICE(0x8086, 0x1405) },
··· 478 i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack; 479 i2400m->bus_bm_mac_addr_impaired = 0; 480 481 + switch (id->idProduct) { 482 + case USB_DEVICE_ID_I6050: 483 + case USB_DEVICE_ID_I6050_2: 484 + i2400mu->i6050 = 1; 485 + break; 486 + default: 487 + break; 488 + } 489 + 490 + if (i2400mu->i6050) { 491 i2400m->bus_fw_names = i2400mu_bus_fw_names_6050; 492 i2400mu->endpoint_cfg.bulk_out = 0; 493 i2400mu->endpoint_cfg.notification = 3; ··· 719 static 720 struct usb_device_id i2400mu_id_table[] = { 721 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, 722 + { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) }, 723 { USB_DEVICE(0x8086, 0x0181) }, 724 { USB_DEVICE(0x8086, 0x1403) }, 725 { USB_DEVICE(0x8086, 0x1405) },
+5
drivers/net/wireless/iwlwifi/iwl-5000.c
··· 1598 .use_bsm = false, 1599 .ht_greenfield_support = true, 1600 .led_compensation = 51, 1601 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1602 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1603 }; ··· 1623 .use_bsm = false, 1624 .ht_greenfield_support = true, 1625 .led_compensation = 51, 1626 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1627 }; 1628 ··· 1669 .use_bsm = false, 1670 .ht_greenfield_support = true, 1671 .led_compensation = 51, 1672 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1673 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1674 }; ··· 1694 .use_bsm = false, 1695 .ht_greenfield_support = true, 1696 .led_compensation = 51, 1697 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1698 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1699 }; ··· 1719 .use_bsm = false, 1720 .ht_greenfield_support = true, 1721 .led_compensation = 51, 1722 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1723 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1724 };
··· 1598 .use_bsm = false, 1599 .ht_greenfield_support = true, 1600 .led_compensation = 51, 1601 + .use_rts_for_ht = true, /* use rts/cts protection */ 1602 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1603 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1604 }; ··· 1622 .use_bsm = false, 1623 .ht_greenfield_support = true, 1624 .led_compensation = 51, 1625 + .use_rts_for_ht = true, /* use rts/cts protection */ 1626 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1627 }; 1628 ··· 1667 .use_bsm = false, 1668 .ht_greenfield_support = true, 1669 .led_compensation = 51, 1670 + .use_rts_for_ht = true, /* use rts/cts protection */ 1671 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1672 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1673 }; ··· 1691 .use_bsm = false, 1692 .ht_greenfield_support = true, 1693 .led_compensation = 51, 1694 + .use_rts_for_ht = true, /* use rts/cts protection */ 1695 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1696 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1697 }; ··· 1715 .use_bsm = false, 1716 .ht_greenfield_support = true, 1717 .led_compensation = 51, 1718 + .use_rts_for_ht = true, /* use rts/cts protection */ 1719 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1720 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1721 };
+26
drivers/net/wireless/iwlwifi/iwl-devtrace.c
··· 1 #include <linux/module.h> 2 3 /* sparse doesn't like tracepoint macros */
··· 1 + /****************************************************************************** 2 + * 3 + * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms of version 2 of the GNU General Public License as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along with 15 + * this program; if not, write to the Free Software Foundation, Inc., 16 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 17 + * 18 + * The full GNU General Public License is included in this distribution in the 19 + * file called LICENSE. 20 + * 21 + * Contact Information: 22 + * Intel Linux Wireless <ilw@linux.intel.com> 23 + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 + * 25 + *****************************************************************************/ 26 + 27 #include <linux/module.h> 28 29 /* sparse doesn't like tracepoint macros */
+26
drivers/net/wireless/iwlwifi/iwl-devtrace.h
··· 1 #if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ) 2 #define __IWLWIFI_DEVICE_TRACE 3
··· 1 + /****************************************************************************** 2 + * 3 + * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms of version 2 of the GNU General Public License as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along with 15 + * this program; if not, write to the Free Software Foundation, Inc., 16 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 17 + * 18 + * The full GNU General Public License is included in this distribution in the 19 + * file called LICENSE. 20 + * 21 + * Contact Information: 22 + * Intel Linux Wireless <ilw@linux.intel.com> 23 + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 + * 25 + *****************************************************************************/ 26 + 27 #if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ) 28 #define __IWLWIFI_DEVICE_TRACE 29
+4
drivers/net/wireless/iwmc3200wifi/commands.c
··· 973 974 memset(&update, 0, sizeof(struct iwm_umac_pmkid_update)); 975 976 update.command = cpu_to_le32(command); 977 if (pmksa->bssid) 978 memcpy(&update.bssid, pmksa->bssid, ETH_ALEN);
··· 973 974 memset(&update, 0, sizeof(struct iwm_umac_pmkid_update)); 975 976 + update.hdr.oid = UMAC_WIFI_IF_CMD_PMKID_UPDATE; 977 + update.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_pmkid_update) - 978 + sizeof(struct iwm_umac_wifi_if)); 979 + 980 update.command = cpu_to_le32(command); 981 if (pmksa->bssid) 982 memcpy(&update.bssid, pmksa->bssid, ETH_ALEN);
+1
drivers/net/wireless/iwmc3200wifi/commands.h
··· 463 #define IWM_CMD_PMKID_FLUSH 3 464 465 struct iwm_umac_pmkid_update { 466 __le32 command; 467 u8 bssid[ETH_ALEN]; 468 __le16 reserved;
··· 463 #define IWM_CMD_PMKID_FLUSH 3 464 465 struct iwm_umac_pmkid_update { 466 + struct iwm_umac_wifi_if hdr; 467 __le32 command; 468 u8 bssid[ETH_ALEN]; 469 __le16 reserved;
+8
drivers/net/wireless/p54/p54pci.c
··· 197 i %= ring_limit; 198 continue; 199 } 200 skb_put(skb, len); 201 202 if (p54_rx(dev, skb)) {
··· 197 i %= ring_limit; 198 continue; 199 } 200 + 201 + if (unlikely(len > priv->common.rx_mtu)) { 202 + if (net_ratelimit()) 203 + dev_err(&priv->pdev->dev, "rx'd frame size " 204 + "exceeds length threshold.\n"); 205 + 206 + len = priv->common.rx_mtu; 207 + } 208 skb_put(skb, len); 209 210 if (p54_rx(dev, skb)) {
+1
drivers/net/wireless/zd1211rw/zd_usb.c
··· 62 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, 63 /* ZD1211B */ 64 { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B }, 65 { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B }, 66 { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, 67 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
··· 62 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, 63 /* ZD1211B */ 64 { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B }, 65 + { USB_DEVICE(0x0409, 0x0248), .driver_info = DEVICE_ZD1211B }, 66 { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B }, 67 { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, 68 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
+1
drivers/serial/serial_cs.c
··· 758 PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f), 759 PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed), 760 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), 761 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), 762 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), 763 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
··· 758 PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f), 759 PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed), 760 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), 761 + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01), 762 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), 763 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), 764 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
+1
include/linux/phy.h
··· 485 int phy_driver_register(struct phy_driver *new_driver); 486 void phy_prepare_link(struct phy_device *phydev, 487 void (*adjust_link)(struct net_device *)); 488 void phy_start_machine(struct phy_device *phydev, 489 void (*handler)(struct net_device *)); 490 void phy_stop_machine(struct phy_device *phydev);
··· 485 int phy_driver_register(struct phy_driver *new_driver); 486 void phy_prepare_link(struct phy_device *phydev, 487 void (*adjust_link)(struct net_device *)); 488 + void phy_state_machine(struct work_struct *work); 489 void phy_start_machine(struct phy_device *phydev, 490 void (*handler)(struct net_device *)); 491 void phy_stop_machine(struct phy_device *phydev);
+6
include/net/netns/xfrm.h
··· 5 #include <linux/wait.h> 6 #include <linux/workqueue.h> 7 #include <linux/xfrm.h> 8 9 struct ctl_table_header; 10 ··· 42 struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2]; 43 unsigned int policy_count[XFRM_POLICY_MAX * 2]; 44 struct work_struct policy_hash_work; 45 46 struct sock *nlsk; 47 struct sock *nlsk_stash;
··· 5 #include <linux/wait.h> 6 #include <linux/workqueue.h> 7 #include <linux/xfrm.h> 8 + #include <net/dst_ops.h> 9 10 struct ctl_table_header; 11 ··· 41 struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2]; 42 unsigned int policy_count[XFRM_POLICY_MAX * 2]; 43 struct work_struct policy_hash_work; 44 + 45 + struct dst_ops xfrm4_dst_ops; 46 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 47 + struct dst_ops xfrm6_dst_ops; 48 + #endif 49 50 struct sock *nlsk; 51 struct sock *nlsk_stash;
+2
include/net/netrom.h
··· 132 static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh) 133 { 134 if (atomic_dec_and_test(&nr_neigh->refcount)) { 135 kfree(nr_neigh->digipeat); 136 kfree(nr_neigh); 137 }
··· 132 static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh) 133 { 134 if (atomic_dec_and_test(&nr_neigh->refcount)) { 135 + if (nr_neigh->ax25) 136 + ax25_cb_put(nr_neigh->ax25); 137 kfree(nr_neigh->digipeat); 138 kfree(nr_neigh); 139 }
+2 -2
include/net/xfrm.h
··· 1367 extern struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 seq); 1368 extern int xfrm_state_delete(struct xfrm_state *x); 1369 extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info); 1370 - extern void xfrm_sad_getinfo(struct xfrmk_sadinfo *si); 1371 - extern void xfrm_spd_getinfo(struct xfrmk_spdinfo *si); 1372 extern int xfrm_replay_check(struct xfrm_state *x, 1373 struct sk_buff *skb, __be32 seq); 1374 extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
··· 1367 extern struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 seq); 1368 extern int xfrm_state_delete(struct xfrm_state *x); 1369 extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info); 1370 + extern void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); 1371 + extern void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); 1372 extern int xfrm_replay_check(struct xfrm_state *x, 1373 struct sk_buff *skb, __be32 seq); 1374 extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
+1 -1
net/8021q/vlan_dev.c
··· 163 goto err_unlock; 164 } 165 166 - rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, 167 smp_processor_id()); 168 rx_stats->rx_packets++; 169 rx_stats->rx_bytes += skb->len;
··· 163 goto err_unlock; 164 } 165 166 + rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats, 167 smp_processor_id()); 168 rx_stats->rx_packets++; 169 rx_stats->rx_bytes += skb->len;
+1 -1
net/appletalk/aarp.c
··· 819 ma = &ifa->address; 820 else { /* We need to make a copy of the entry. */ 821 da.s_node = sa.s_node; 822 - da.s_net = da.s_net; 823 ma = &da; 824 } 825
··· 819 ma = &ifa->address; 820 else { /* We need to make a copy of the entry. */ 821 da.s_node = sa.s_node; 822 + da.s_net = sa.s_net; 823 ma = &da; 824 } 825
+6
net/ax25/ax25_out.c
··· 92 #endif 93 } 94 95 ax25_cb_add(ax25); 96 97 ax25->state = AX25_STATE_1;
··· 92 #endif 93 } 94 95 + /* 96 + * There is one ref for the state machine; a caller needs 97 + * one more to put it back, just like with the existing one. 98 + */ 99 + ax25_cb_hold(ax25); 100 + 101 ax25_cb_add(ax25); 102 103 ax25->state = AX25_STATE_1;
+5 -13
net/dccp/ccid.c
··· 77 return err; 78 } 79 80 - static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) 81 { 82 struct kmem_cache *slab; 83 - char slab_name_fmt[32], *slab_name; 84 va_list args; 85 86 va_start(args, fmt); 87 vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args); 88 va_end(args); 89 90 - slab_name = kstrdup(slab_name_fmt, GFP_KERNEL); 91 - if (slab_name == NULL) 92 - return NULL; 93 - slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0, 94 SLAB_HWCACHE_ALIGN, NULL); 95 - if (slab == NULL) 96 - kfree(slab_name); 97 return slab; 98 } 99 100 static void ccid_kmem_cache_destroy(struct kmem_cache *slab) 101 { 102 - if (slab != NULL) { 103 - const char *name = kmem_cache_name(slab); 104 - 105 kmem_cache_destroy(slab); 106 - kfree(name); 107 - } 108 } 109 110 static int ccid_activate(struct ccid_operations *ccid_ops) ··· 103 104 ccid_ops->ccid_hc_rx_slab = 105 ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size, 106 "ccid%u_hc_rx_sock", 107 ccid_ops->ccid_id); 108 if (ccid_ops->ccid_hc_rx_slab == NULL) ··· 111 112 ccid_ops->ccid_hc_tx_slab = 113 ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size, 114 "ccid%u_hc_tx_sock", 115 ccid_ops->ccid_id); 116 if (ccid_ops->ccid_hc_tx_slab == NULL)
··· 77 return err; 78 } 79 80 + static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_fmt, const char *fmt,...) 81 { 82 struct kmem_cache *slab; 83 va_list args; 84 85 va_start(args, fmt); 86 vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args); 87 va_end(args); 88 89 + slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, 90 SLAB_HWCACHE_ALIGN, NULL); 91 return slab; 92 } 93 94 static void ccid_kmem_cache_destroy(struct kmem_cache *slab) 95 { 96 + if (slab != NULL) 97 kmem_cache_destroy(slab); 98 } 99 100 static int ccid_activate(struct ccid_operations *ccid_ops) ··· 113 114 ccid_ops->ccid_hc_rx_slab = 115 ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size, 116 + ccid_ops->ccid_hc_rx_slab_name, 117 "ccid%u_hc_rx_sock", 118 ccid_ops->ccid_id); 119 if (ccid_ops->ccid_hc_rx_slab == NULL) ··· 120 121 ccid_ops->ccid_hc_tx_slab = 122 ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size, 123 + ccid_ops->ccid_hc_tx_slab_name, 124 "ccid%u_hc_tx_sock", 125 ccid_ops->ccid_id); 126 if (ccid_ops->ccid_hc_tx_slab == NULL)
+2
net/dccp/ccid.h
··· 49 const char *ccid_name; 50 struct kmem_cache *ccid_hc_rx_slab, 51 *ccid_hc_tx_slab; 52 __u32 ccid_hc_rx_obj_size, 53 ccid_hc_tx_obj_size; 54 /* Interface Routines */
··· 49 const char *ccid_name; 50 struct kmem_cache *ccid_hc_rx_slab, 51 *ccid_hc_tx_slab; 52 + char ccid_hc_rx_slab_name[32]; 53 + char ccid_hc_tx_slab_name[32]; 54 __u32 ccid_hc_rx_obj_size, 55 ccid_hc_tx_obj_size; 56 /* Interface Routines */
+2 -1
net/dccp/probe.c
··· 161 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) 162 goto err0; 163 164 - ret = register_jprobe(&dccp_send_probe); 165 if (ret) 166 goto err1; 167
··· 161 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) 162 goto err0; 163 164 + ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0), 165 + "dccp"); 166 if (ret) 167 goto err1; 168
+1 -1
net/ipv4/inet_diag.c
··· 368 yes = entry->sport >= op[1].no; 369 break; 370 case INET_DIAG_BC_S_LE: 371 - yes = entry->dport <= op[1].no; 372 break; 373 case INET_DIAG_BC_D_GE: 374 yes = entry->dport >= op[1].no;
··· 368 yes = entry->sport >= op[1].no; 369 break; 370 case INET_DIAG_BC_S_LE: 371 + yes = entry->sport <= op[1].no; 372 break; 373 case INET_DIAG_BC_D_GE: 374 yes = entry->dport >= op[1].no;
+2
net/ipv4/route.c
··· 586 { 587 remove_proc_entry("rt_cache", net->proc_net_stat); 588 remove_proc_entry("rt_cache", net->proc_net); 589 remove_proc_entry("rt_acct", net->proc_net); 590 } 591 592 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
··· 586 { 587 remove_proc_entry("rt_cache", net->proc_net_stat); 588 remove_proc_entry("rt_cache", net->proc_net); 589 + #ifdef CONFIG_NET_CLS_ROUTE 590 remove_proc_entry("rt_acct", net->proc_net); 591 + #endif 592 } 593 594 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
+10 -9
net/ipv4/tcp_probe.c
··· 39 MODULE_PARM_DESC(port, "Port to match (0=all)"); 40 module_param(port, int, 0); 41 42 - static int bufsize __read_mostly = 4096; 43 MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)"); 44 - module_param(bufsize, int, 0); 45 46 static int full __read_mostly; 47 MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)"); ··· 75 76 static inline int tcp_probe_used(void) 77 { 78 - return (tcp_probe.head - tcp_probe.tail) % bufsize; 79 } 80 81 static inline int tcp_probe_avail(void) 82 { 83 - return bufsize - tcp_probe_used(); 84 } 85 86 /* ··· 116 p->ssthresh = tcp_current_ssthresh(sk); 117 p->srtt = tp->srtt >> 3; 118 119 - tcp_probe.head = (tcp_probe.head + 1) % bufsize; 120 } 121 tcp_probe.lastcwnd = tp->snd_cwnd; 122 spin_unlock(&tcp_probe.lock); ··· 149 static int tcpprobe_sprint(char *tbuf, int n) 150 { 151 const struct tcp_log *p 152 - = tcp_probe.log + tcp_probe.tail % bufsize; 153 struct timespec tv 154 = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); 155 ··· 192 width = tcpprobe_sprint(tbuf, sizeof(tbuf)); 193 194 if (cnt + width < len) 195 - tcp_probe.tail = (tcp_probe.tail + 1) % bufsize; 196 197 spin_unlock_bh(&tcp_probe.lock); 198 ··· 222 init_waitqueue_head(&tcp_probe.wait); 223 spin_lock_init(&tcp_probe.lock); 224 225 - if (bufsize < 0) 226 return -EINVAL; 227 228 tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL); 229 if (!tcp_probe.log) 230 goto err0; ··· 237 if (ret) 238 goto err1; 239 240 - pr_info("TCP probe registered (port=%d)\n", port); 241 return 0; 242 err1: 243 proc_net_remove(&init_net, procname);
··· 39 MODULE_PARM_DESC(port, "Port to match (0=all)"); 40 module_param(port, int, 0); 41 42 + static unsigned int bufsize __read_mostly = 4096; 43 MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)"); 44 + module_param(bufsize, uint, 0); 45 46 static int full __read_mostly; 47 MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)"); ··· 75 76 static inline int tcp_probe_used(void) 77 { 78 + return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1); 79 } 80 81 static inline int tcp_probe_avail(void) 82 { 83 + return bufsize - tcp_probe_used() - 1; 84 } 85 86 /* ··· 116 p->ssthresh = tcp_current_ssthresh(sk); 117 p->srtt = tp->srtt >> 3; 118 119 + tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1); 120 } 121 tcp_probe.lastcwnd = tp->snd_cwnd; 122 spin_unlock(&tcp_probe.lock); ··· 149 static int tcpprobe_sprint(char *tbuf, int n) 150 { 151 const struct tcp_log *p 152 + = tcp_probe.log + tcp_probe.tail; 153 struct timespec tv 154 = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); 155 ··· 192 width = tcpprobe_sprint(tbuf, sizeof(tbuf)); 193 194 if (cnt + width < len) 195 + tcp_probe.tail = (tcp_probe.tail + 1) & (bufsize - 1); 196 197 spin_unlock_bh(&tcp_probe.lock); 198 ··· 222 init_waitqueue_head(&tcp_probe.wait); 223 spin_lock_init(&tcp_probe.lock); 224 225 + if (bufsize == 0) 226 return -EINVAL; 227 228 + bufsize = roundup_pow_of_two(bufsize); 229 tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL); 230 if (!tcp_probe.log) 231 goto err0; ··· 236 if (ret) 237 goto err1; 238 239 + pr_info("TCP probe registered (port=%d) bufsize=%u\n", port, bufsize); 240 return 0; 241 err1: 242 proc_net_remove(&init_net, procname);
+8 -6
net/ipv4/xfrm4_policy.c
··· 15 #include <net/xfrm.h> 16 #include <net/ip.h> 17 18 - static struct dst_ops xfrm4_dst_ops; 19 static struct xfrm_policy_afinfo xfrm4_policy_afinfo; 20 21 static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, ··· 189 190 static inline int xfrm4_garbage_collect(struct dst_ops *ops) 191 { 192 - xfrm4_policy_afinfo.garbage_collect(&init_net); 193 - return (atomic_read(&xfrm4_dst_ops.entries) > xfrm4_dst_ops.gc_thresh*2); 194 } 195 196 static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu) ··· 269 static struct ctl_table xfrm4_policy_table[] = { 270 { 271 .procname = "xfrm4_gc_thresh", 272 - .data = &xfrm4_dst_ops.gc_thresh, 273 .maxlen = sizeof(int), 274 .mode = 0644, 275 .proc_handler = proc_dointvec, ··· 296 297 void __init xfrm4_init(int rt_max_size) 298 { 299 - xfrm4_state_init(); 300 - xfrm4_policy_init(); 301 /* 302 * Select a default value for the gc_thresh based on the main route 303 * table hash size. It seems to me the worst case scenario is when ··· 307 * and start cleaning when were 1/2 full 308 */ 309 xfrm4_dst_ops.gc_thresh = rt_max_size/2; 310 #ifdef CONFIG_SYSCTL 311 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, 312 xfrm4_policy_table);
··· 15 #include <net/xfrm.h> 16 #include <net/ip.h> 17 18 static struct xfrm_policy_afinfo xfrm4_policy_afinfo; 19 20 static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, ··· 190 191 static inline int xfrm4_garbage_collect(struct dst_ops *ops) 192 { 193 + struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops); 194 + 195 + xfrm4_policy_afinfo.garbage_collect(net); 196 + return (atomic_read(&ops->entries) > ops->gc_thresh * 2); 197 } 198 199 static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu) ··· 268 static struct ctl_table xfrm4_policy_table[] = { 269 { 270 .procname = "xfrm4_gc_thresh", 271 + .data = &init_net.xfrm.xfrm4_dst_ops.gc_thresh, 272 .maxlen = sizeof(int), 273 .mode = 0644, 274 .proc_handler = proc_dointvec, ··· 295 296 void __init xfrm4_init(int rt_max_size) 297 { 298 /* 299 * Select a default value for the gc_thresh based on the main route 300 * table hash size. It seems to me the worst case scenario is when ··· 308 * and start cleaning when were 1/2 full 309 */ 310 xfrm4_dst_ops.gc_thresh = rt_max_size/2; 311 + 312 + xfrm4_state_init(); 313 + xfrm4_policy_init(); 314 #ifdef CONFIG_SYSCTL 315 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, 316 xfrm4_policy_table);
+14 -11
net/ipv6/xfrm6_policy.c
··· 24 #include <net/mip6.h> 25 #endif 26 27 - static struct dst_ops xfrm6_dst_ops; 28 static struct xfrm_policy_afinfo xfrm6_policy_afinfo; 29 30 static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, ··· 223 224 static inline int xfrm6_garbage_collect(struct dst_ops *ops) 225 { 226 - xfrm6_policy_afinfo.garbage_collect(&init_net); 227 - return (atomic_read(&xfrm6_dst_ops.entries) > xfrm6_dst_ops.gc_thresh*2); 228 } 229 230 static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu) ··· 311 static struct ctl_table xfrm6_policy_table[] = { 312 { 313 .procname = "xfrm6_gc_thresh", 314 - .data = &xfrm6_dst_ops.gc_thresh, 315 .maxlen = sizeof(int), 316 .mode = 0644, 317 .proc_handler = proc_dointvec, ··· 327 int ret; 328 unsigned int gc_thresh; 329 330 - ret = xfrm6_policy_init(); 331 - if (ret) 332 - goto out; 333 - 334 - ret = xfrm6_state_init(); 335 - if (ret) 336 - goto out_policy; 337 /* 338 * We need a good default value for the xfrm6 gc threshold. 339 * In ipv4 we set it to the route hash table size * 8, which ··· 340 */ 341 gc_thresh = FIB6_TABLE_HASHSZ * 8; 342 xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh; 343 #ifdef CONFIG_SYSCTL 344 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path, 345 xfrm6_policy_table);
··· 24 #include <net/mip6.h> 25 #endif 26 27 static struct xfrm_policy_afinfo xfrm6_policy_afinfo; 28 29 static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, ··· 224 225 static inline int xfrm6_garbage_collect(struct dst_ops *ops) 226 { 227 + struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops); 228 + 229 + xfrm6_policy_afinfo.garbage_collect(net); 230 + return (atomic_read(&ops->entries) > ops->gc_thresh * 2); 231 } 232 233 static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu) ··· 310 static struct ctl_table xfrm6_policy_table[] = { 311 { 312 .procname = "xfrm6_gc_thresh", 313 + .data = &init_net.xfrm.xfrm6_dst_ops.gc_thresh, 314 .maxlen = sizeof(int), 315 .mode = 0644, 316 .proc_handler = proc_dointvec, ··· 326 int ret; 327 unsigned int gc_thresh; 328 329 /* 330 * We need a good default value for the xfrm6 gc threshold. 331 * In ipv4 we set it to the route hash table size * 8, which ··· 346 */ 347 gc_thresh = FIB6_TABLE_HASHSZ * 8; 348 xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh; 349 + 350 + ret = xfrm6_policy_init(); 351 + if (ret) 352 + goto out; 353 + 354 + ret = xfrm6_state_init(); 355 + if (ret) 356 + goto out_policy; 357 + 358 #ifdef CONFIG_SYSCTL 359 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path, 360 xfrm6_policy_table);
+3
net/mac80211/cfg.c
··· 1331 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1332 struct ieee80211_conf *conf = &local->hw.conf; 1333 1334 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) 1335 return -EOPNOTSUPP; 1336
··· 1331 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1332 struct ieee80211_conf *conf = &local->hw.conf; 1333 1334 + if (sdata->vif.type != NL80211_IFTYPE_STATION) 1335 + return -EOPNOTSUPP; 1336 + 1337 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) 1338 return -EOPNOTSUPP; 1339
+1 -1
net/mac80211/rc80211_pid_algo.c
··· 190 rate_control_pid_normalize(pinfo, sband->n_bitrates); 191 192 /* Compute the proportional, integral and derivative errors. */ 193 - err_prop = (pinfo->target << RC_PID_ARITH_SHIFT) - pf; 194 195 err_avg = spinfo->err_avg_sc >> pinfo->smoothing_shift; 196 spinfo->err_avg_sc = spinfo->err_avg_sc - err_avg + err_prop;
··· 190 rate_control_pid_normalize(pinfo, sband->n_bitrates); 191 192 /* Compute the proportional, integral and derivative errors. */ 193 + err_prop = (pinfo->target - pf) << RC_PID_ARITH_SHIFT; 194 195 err_avg = spinfo->err_avg_sc >> pinfo->smoothing_shift; 196 spinfo->err_avg_sc = spinfo->err_avg_sc - err_avg + err_prop;
+6 -5
net/netrom/nr_route.c
··· 843 dptr = skb_push(skb, 1); 844 *dptr = AX25_P_NETROM; 845 846 - ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev); 847 - if (nr_neigh->ax25 && ax25s) { 848 - /* We were already holding this ax25_cb */ 849 ax25_cb_put(ax25s); 850 - } 851 - nr_neigh->ax25 = ax25s; 852 853 dev_put(dev); 854 ret = (nr_neigh->ax25 != NULL);
··· 843 dptr = skb_push(skb, 1); 844 *dptr = AX25_P_NETROM; 845 846 + ax25s = nr_neigh->ax25; 847 + nr_neigh->ax25 = ax25_send_frame(skb, 256, 848 + (ax25_address *)dev->dev_addr, 849 + &nr_neigh->callsign, 850 + nr_neigh->digipeat, nr_neigh->dev); 851 + if (ax25s) 852 ax25_cb_put(ax25s); 853 854 dev_put(dev); 855 ret = (nr_neigh->ax25 != NULL);
+8
net/rose/rose_link.c
··· 101 static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh) 102 { 103 ax25_address *rose_call; 104 105 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) 106 rose_call = (ax25_address *)neigh->dev->dev_addr; 107 else 108 rose_call = &rose_callsign; 109 110 neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); 111 112 return (neigh->ax25 != NULL); 113 } ··· 124 static int rose_link_up(struct rose_neigh *neigh) 125 { 126 ax25_address *rose_call; 127 128 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) 129 rose_call = (ax25_address *)neigh->dev->dev_addr; 130 else 131 rose_call = &rose_callsign; 132 133 neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); 134 135 return (neigh->ax25 != NULL); 136 }
··· 101 static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh) 102 { 103 ax25_address *rose_call; 104 + ax25_cb *ax25s; 105 106 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) 107 rose_call = (ax25_address *)neigh->dev->dev_addr; 108 else 109 rose_call = &rose_callsign; 110 111 + ax25s = neigh->ax25; 112 neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); 113 + if (ax25s) 114 + ax25_cb_put(ax25s); 115 116 return (neigh->ax25 != NULL); 117 } ··· 120 static int rose_link_up(struct rose_neigh *neigh) 121 { 122 ax25_address *rose_call; 123 + ax25_cb *ax25s; 124 125 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) 126 rose_call = (ax25_address *)neigh->dev->dev_addr; 127 else 128 rose_call = &rose_callsign; 129 130 + ax25s = neigh->ax25; 131 neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); 132 + if (ax25s) 133 + ax25_cb_put(ax25s); 134 135 return (neigh->ax25 != NULL); 136 }
+5
net/rose/rose_route.c
··· 235 236 if ((s = rose_neigh_list) == rose_neigh) { 237 rose_neigh_list = rose_neigh->next; 238 kfree(rose_neigh->digipeat); 239 kfree(rose_neigh); 240 return; ··· 245 while (s != NULL && s->next != NULL) { 246 if (s->next == rose_neigh) { 247 s->next = rose_neigh->next; 248 kfree(rose_neigh->digipeat); 249 kfree(rose_neigh); 250 return; ··· 816 817 if (rose_neigh != NULL) { 818 rose_neigh->ax25 = NULL; 819 820 rose_del_route_by_neigh(rose_neigh); 821 rose_kill_by_neigh(rose_neigh);
··· 235 236 if ((s = rose_neigh_list) == rose_neigh) { 237 rose_neigh_list = rose_neigh->next; 238 + if (rose_neigh->ax25) 239 + ax25_cb_put(rose_neigh->ax25); 240 kfree(rose_neigh->digipeat); 241 kfree(rose_neigh); 242 return; ··· 243 while (s != NULL && s->next != NULL) { 244 if (s->next == rose_neigh) { 245 s->next = rose_neigh->next; 246 + if (rose_neigh->ax25) 247 + ax25_cb_put(rose_neigh->ax25); 248 kfree(rose_neigh->digipeat); 249 kfree(rose_neigh); 250 return; ··· 812 813 if (rose_neigh != NULL) { 814 rose_neigh->ax25 = NULL; 815 + ax25_cb_put(ax25); 816 817 rose_del_route_by_neigh(rose_neigh); 818 rose_kill_by_neigh(rose_neigh);
+1
net/wireless/sme.c
··· 655 memset(&wrqu, 0, sizeof(wrqu)); 656 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 657 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); 658 #endif 659 } 660
··· 655 memset(&wrqu, 0, sizeof(wrqu)); 656 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 657 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); 658 + wdev->wext.connect.ssid_len = 0; 659 #endif 660 } 661
+64 -11
net/xfrm/xfrm_policy.c
··· 469 return 0; 470 } 471 472 - void xfrm_spd_getinfo(struct xfrmk_spdinfo *si) 473 { 474 read_lock_bh(&xfrm_policy_lock); 475 - si->incnt = init_net.xfrm.policy_count[XFRM_POLICY_IN]; 476 - si->outcnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT]; 477 - si->fwdcnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD]; 478 - si->inscnt = init_net.xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; 479 - si->outscnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; 480 - si->fwdscnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; 481 - si->spdhcnt = init_net.xfrm.policy_idx_hmask; 482 si->spdhmcnt = xfrm_policy_hashmax; 483 read_unlock_bh(&xfrm_policy_lock); 484 } ··· 1309 return tos; 1310 } 1311 1312 - static inline struct xfrm_dst *xfrm_alloc_dst(int family) 1313 { 1314 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1315 struct xfrm_dst *xdst; 1316 1317 if (!afinfo) 1318 return ERR_PTR(-EINVAL); 1319 1320 - xdst = dst_alloc(afinfo->dst_ops) ?: ERR_PTR(-ENOBUFS); 1321 1322 xfrm_policy_put_afinfo(afinfo); 1323 ··· 1379 struct flowi *fl, 1380 struct dst_entry *dst) 1381 { 1382 unsigned long now = jiffies; 1383 struct net_device *dev; 1384 struct dst_entry *dst_prev = NULL; ··· 1403 dst_hold(dst); 1404 1405 for (; i < nx; i++) { 1406 - struct xfrm_dst *xdst = xfrm_alloc_dst(family); 1407 struct dst_entry *dst1 = &xdst->u.dst; 1408 1409 err = PTR_ERR(xdst); ··· 2293 2294 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 2295 { 2296 int err = 0; 2297 if (unlikely(afinfo == NULL)) 2298 return -EINVAL; ··· 2317 xfrm_policy_afinfo[afinfo->family] = afinfo; 2318 } 2319 write_unlock_bh(&xfrm_policy_afinfo_lock); 2320 return err; 2321 } 2322 EXPORT_SYMBOL(xfrm_policy_register_afinfo); ··· 2367 return err; 2368 } 2369 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 2370 2371 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 2372 { ··· 2546 rv = xfrm_policy_init(net); 2547 if (rv < 0) 2548 goto out_policy; 2549 rv = xfrm_sysctl_init(net); 2550 if (rv < 0) 2551 goto out_sysctl;
··· 469 return 0; 470 } 471 472 + void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si) 473 { 474 read_lock_bh(&xfrm_policy_lock); 475 + si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN]; 476 + si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT]; 477 + si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD]; 478 + si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; 479 + si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; 480 + si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; 481 + si->spdhcnt = net->xfrm.policy_idx_hmask; 482 si->spdhmcnt = xfrm_policy_hashmax; 483 read_unlock_bh(&xfrm_policy_lock); 484 } ··· 1309 return tos; 1310 } 1311 1312 + static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) 1313 { 1314 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1315 + struct dst_ops *dst_ops; 1316 struct xfrm_dst *xdst; 1317 1318 if (!afinfo) 1319 return ERR_PTR(-EINVAL); 1320 1321 + switch (family) { 1322 + case AF_INET: 1323 + dst_ops = &net->xfrm.xfrm4_dst_ops; 1324 + break; 1325 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1326 + case AF_INET6: 1327 + dst_ops = &net->xfrm.xfrm6_dst_ops; 1328 + break; 1329 + #endif 1330 + default: 1331 + BUG(); 1332 + } 1333 + xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); 1334 1335 xfrm_policy_put_afinfo(afinfo); 1336 ··· 1366 struct flowi *fl, 1367 struct dst_entry *dst) 1368 { 1369 + struct net *net = xp_net(policy); 1370 unsigned long now = jiffies; 1371 struct net_device *dev; 1372 struct dst_entry *dst_prev = NULL; ··· 1389 dst_hold(dst); 1390 1391 for (; i < nx; i++) { 1392 + struct xfrm_dst *xdst = xfrm_alloc_dst(net, family); 1393 struct dst_entry *dst1 = &xdst->u.dst; 1394 1395 err = PTR_ERR(xdst); ··· 2279 2280 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 2281 { 2282 + struct net *net; 2283 int err = 0; 2284 if (unlikely(afinfo == NULL)) 2285 return -EINVAL; ··· 2302 xfrm_policy_afinfo[afinfo->family] = afinfo; 2303 } 2304 write_unlock_bh(&xfrm_policy_afinfo_lock); 2305 + 2306 + rtnl_lock(); 2307 + for_each_net(net) { 2308 + struct dst_ops *xfrm_dst_ops; 2309 + 2310 + switch (afinfo->family) { 2311 + case AF_INET: 2312 + xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops; 2313 + break; 2314 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 2315 + case AF_INET6: 2316 + xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops; 2317 + break; 2318 + #endif 2319 + default: 2320 + BUG(); 2321 + } 2322 + *xfrm_dst_ops = *afinfo->dst_ops; 2323 + } 2324 + rtnl_unlock(); 2325 + 2326 return err; 2327 } 2328 EXPORT_SYMBOL(xfrm_policy_register_afinfo); ··· 2331 return err; 2332 } 2333 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 2334 + 2335 + static void __net_init xfrm_dst_ops_init(struct net *net) 2336 + { 2337 + struct xfrm_policy_afinfo *afinfo; 2338 + 2339 + read_lock_bh(&xfrm_policy_afinfo_lock); 2340 + afinfo = xfrm_policy_afinfo[AF_INET]; 2341 + if (afinfo) 2342 + net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops; 2343 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 2344 + afinfo = xfrm_policy_afinfo[AF_INET6]; 2345 + if (afinfo) 2346 + net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops; 2347 + #endif 2348 + read_unlock_bh(&xfrm_policy_afinfo_lock); 2349 + } 2350 2351 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 2352 { ··· 2494 rv = xfrm_policy_init(net); 2495 if (rv < 0) 2496 goto out_policy; 2497 + xfrm_dst_ops_init(net); 2498 rv = xfrm_sysctl_init(net); 2499 if (rv < 0) 2500 goto out_sysctl;
+3 -3
net/xfrm/xfrm_state.c
··· 641 } 642 EXPORT_SYMBOL(xfrm_state_flush); 643 644 - void xfrm_sad_getinfo(struct xfrmk_sadinfo *si) 645 { 646 spin_lock_bh(&xfrm_state_lock); 647 - si->sadcnt = init_net.xfrm.state_num; 648 - si->sadhcnt = init_net.xfrm.state_hmask; 649 si->sadhmcnt = xfrm_state_hashmax; 650 spin_unlock_bh(&xfrm_state_lock); 651 }
··· 641 } 642 EXPORT_SYMBOL(xfrm_state_flush); 643 644 + void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si) 645 { 646 spin_lock_bh(&xfrm_state_lock); 647 + si->sadcnt = net->xfrm.state_num; 648 + si->sadhcnt = net->xfrm.state_hmask; 649 si->sadhmcnt = xfrm_state_hashmax; 650 spin_unlock_bh(&xfrm_state_lock); 651 }
+8 -6
net/xfrm/xfrm_user.c
··· 781 + nla_total_size(sizeof(struct xfrmu_spdhinfo)); 782 } 783 784 - static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) 785 { 786 struct xfrmk_spdinfo si; 787 struct xfrmu_spdinfo spc; ··· 796 797 f = nlmsg_data(nlh); 798 *f = flags; 799 - xfrm_spd_getinfo(&si); 800 spc.incnt = si.incnt; 801 spc.outcnt = si.outcnt; 802 spc.fwdcnt = si.fwdcnt; ··· 829 if (r_skb == NULL) 830 return -ENOMEM; 831 832 - if (build_spdinfo(r_skb, spid, seq, *flags) < 0) 833 BUG(); 834 835 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); ··· 842 + nla_total_size(4); /* XFRMA_SAD_CNT */ 843 } 844 845 - static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) 846 { 847 struct xfrmk_sadinfo si; 848 struct xfrmu_sadhinfo sh; ··· 856 857 f = nlmsg_data(nlh); 858 *f = flags; 859 - xfrm_sad_getinfo(&si); 860 861 sh.sadhmcnt = si.sadhmcnt; 862 sh.sadhcnt = si.sadhcnt; ··· 884 if (r_skb == NULL) 885 return -ENOMEM; 886 887 - if (build_sadinfo(r_skb, spid, seq, *flags) < 0) 888 BUG(); 889 890 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);
··· 781 + nla_total_size(sizeof(struct xfrmu_spdhinfo)); 782 } 783 784 + static int build_spdinfo(struct sk_buff *skb, struct net *net, 785 + u32 pid, u32 seq, u32 flags) 786 { 787 struct xfrmk_spdinfo si; 788 struct xfrmu_spdinfo spc; ··· 795 796 f = nlmsg_data(nlh); 797 *f = flags; 798 + xfrm_spd_getinfo(net, &si); 799 spc.incnt = si.incnt; 800 spc.outcnt = si.outcnt; 801 spc.fwdcnt = si.fwdcnt; ··· 828 if (r_skb == NULL) 829 return -ENOMEM; 830 831 + if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0) 832 BUG(); 833 834 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); ··· 841 + nla_total_size(4); /* XFRMA_SAD_CNT */ 842 } 843 844 + static int build_sadinfo(struct sk_buff *skb, struct net *net, 845 + u32 pid, u32 seq, u32 flags) 846 { 847 struct xfrmk_sadinfo si; 848 struct xfrmu_sadhinfo sh; ··· 854 855 f = nlmsg_data(nlh); 856 *f = flags; 857 + xfrm_sad_getinfo(net, &si); 858 859 sh.sadhmcnt = si.sadhmcnt; 860 sh.sadhcnt = si.sadhcnt; ··· 882 if (r_skb == NULL) 883 return -ENOMEM; 884 885 + if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0) 886 BUG(); 887 888 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);