Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (42 commits)
virtio_net: Make delayed refill more reliable
sfc: Use fixed-size buffers for MCDI NVRAM requests
sfc: Add workspace for GMAC bug workaround to MCDI MAC_STATS buffer
tcp_probe: avoid modulus operation and wrap fix
qlge: Only free resources if they were allocated
netns xfrm: deal with dst entries in netns
sky2: revert config space change
vlan: fix vlan_skb_recv()
netns xfrm: fix "ip xfrm state|policy count" misreport
sky2: Enable/disable WOL per hardware device
net: Fix IPv6 GSO type checks in Intel ethernet drivers
igb/igbvf: cleanup exception handling in tx_map_adv
MAINTAINERS: Add Intel igbvf maintainer
e1000/e1000e: don't use small hardware rx buffers
fmvj18x_cs: add new id (Panasonic lan & modem card)
be2net: swap only first 2 fields of mcc_wrb
Please add support for Microsoft MN-120 PCMCIA network card
be2net: fix bug in rx page posting
wimax/i2400m: Add support for more i6x50 SKUs
e1000e: enhance frame fragment detection
...

+389 -193
+4 -4
MAINTAINERS
··· 987 987 988 988 ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API 989 989 M: Dan Williams <dan.j.williams@intel.com> 990 - M: Maciej Sosnowski <maciej.sosnowski@intel.com> 991 990 W: http://sourceforge.net/projects/xscaleiop 992 991 S: Supported 993 992 F: Documentation/crypto/async-tx-api.txt ··· 1822 1823 F: fs/dlm/ 1823 1824 1824 1825 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM 1825 - M: Maciej Sosnowski <maciej.sosnowski@intel.com> 1826 1826 M: Dan Williams <dan.j.williams@intel.com> 1827 1827 S: Supported 1828 1828 F: drivers/dma/ ··· 2784 2786 F: arch/x86/kernel/microcode_intel.c 2785 2787 2786 2788 INTEL I/OAT DMA DRIVER 2787 - M: Maciej Sosnowski <maciej.sosnowski@intel.com> 2789 + M: Dan Williams <dan.j.williams@intel.com> 2788 2790 S: Supported 2789 2791 F: drivers/dma/ioat* 2790 2792 ··· 2822 2824 S: Maintained 2823 2825 F: drivers/net/ixp2000/ 2824 2826 2825 - INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/ixgb/ixgbe) 2827 + INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe) 2826 2828 M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> 2827 2829 M: Jesse Brandeburg <jesse.brandeburg@intel.com> 2828 2830 M: Bruce Allan <bruce.w.allan@intel.com> 2831 + M: Alex Duyck <alexander.h.duyck@intel.com> 2829 2832 M: PJ Waskiewicz <peter.p.waskiewicz.jr@intel.com> 2830 2833 M: John Ronciak <john.ronciak@intel.com> 2831 2834 L: e1000-devel@lists.sourceforge.net ··· 2836 2837 F: drivers/net/e1000/ 2837 2838 F: drivers/net/e1000e/ 2838 2839 F: drivers/net/igb/ 2840 + F: drivers/net/igbvf/ 2839 2841 F: drivers/net/ixgb/ 2840 2842 F: drivers/net/ixgbe/ 2841 2843
+1 -1
drivers/net/benet/be_cmds.c
··· 286 286 MCC_WRB_SGE_CNT_SHIFT; 287 287 wrb->payload_length = payload_len; 288 288 wrb->tag0 = opcode; 289 - be_dws_cpu_to_le(wrb, 20); 289 + be_dws_cpu_to_le(wrb, 8); 290 290 } 291 291 292 292 /* Don't touch the hdr after it's prepared */
+5 -3
drivers/net/benet/be_main.c
··· 910 910 static void be_post_rx_frags(struct be_adapter *adapter) 911 911 { 912 912 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl; 913 - struct be_rx_page_info *page_info = NULL; 913 + struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL; 914 914 struct be_queue_info *rxq = &adapter->rx_obj.q; 915 915 struct page *pagep = NULL; 916 916 struct be_eth_rx_d *rxd; ··· 941 941 rxd = queue_head_node(rxq); 942 942 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF); 943 943 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr)); 944 - queue_head_inc(rxq); 945 944 946 945 /* Any space left in the current big page for another frag? */ 947 946 if ((page_offset + rx_frag_size + rx_frag_size) > ··· 948 949 pagep = NULL; 949 950 page_info->last_page_user = true; 950 951 } 952 + 953 + prev_page_info = page_info; 954 + queue_head_inc(rxq); 951 955 page_info = &page_info_tbl[rxq->head]; 952 956 } 953 957 if (pagep) 954 - page_info->last_page_user = true; 958 + prev_page_info->last_page_user = true; 955 959 956 960 if (posted) { 957 961 atomic_add(posted, &rxq->used);
+3 -2
drivers/net/bfin_mac.c
··· 33 33 #include <asm/dma.h> 34 34 #include <linux/dma-mapping.h> 35 35 36 + #include <asm/dpmc.h> 36 37 #include <asm/blackfin.h> 37 38 #include <asm/cacheflush.h> 38 39 #include <asm/portmux.h> ··· 387 386 u32 sclk, mdc_div; 388 387 389 388 /* Enable PHY output early */ 390 - if (!(bfin_read_VR_CTL() & PHYCLKOE)) 391 - bfin_write_VR_CTL(bfin_read_VR_CTL() | PHYCLKOE); 389 + if (!(bfin_read_VR_CTL() & CLKBUFOE)) 390 + bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE); 392 391 393 392 sclk = get_sclk(); 394 393 mdc_div = ((sclk / MDC_CLK) / 2) - 1;
+2
drivers/net/e1000/e1000.h
··· 326 326 /* for ioport free */ 327 327 int bars; 328 328 int need_ioport; 329 + 330 + bool discarding; 329 331 }; 330 332 331 333 enum e1000_state_t {
+17 -26
drivers/net/e1000/e1000_main.c
··· 1698 1698 rctl &= ~E1000_RCTL_SZ_4096; 1699 1699 rctl |= E1000_RCTL_BSEX; 1700 1700 switch (adapter->rx_buffer_len) { 1701 - case E1000_RXBUFFER_256: 1702 - rctl |= E1000_RCTL_SZ_256; 1703 - rctl &= ~E1000_RCTL_BSEX; 1704 - break; 1705 - case E1000_RXBUFFER_512: 1706 - rctl |= E1000_RCTL_SZ_512; 1707 - rctl &= ~E1000_RCTL_BSEX; 1708 - break; 1709 - case E1000_RXBUFFER_1024: 1710 - rctl |= E1000_RCTL_SZ_1024; 1711 - rctl &= ~E1000_RCTL_BSEX; 1712 - break; 1713 1701 case E1000_RXBUFFER_2048: 1714 1702 default: 1715 1703 rctl |= E1000_RCTL_SZ_2048; ··· 2790 2802 dma_error: 2791 2803 dev_err(&pdev->dev, "TX DMA map failed\n"); 2792 2804 buffer_info->dma = 0; 2793 - count--; 2794 - 2795 - while (count >= 0) { 2805 + if (count) 2796 2806 count--; 2797 - i--; 2798 - if (i < 0) 2807 + 2808 + while (count--) { 2809 + if (i==0) 2799 2810 i += tx_ring->count; 2811 + i--; 2800 2812 buffer_info = &tx_ring->buffer_info[i]; 2801 2813 e1000_unmap_and_free_tx_resource(adapter, buffer_info); 2802 2814 } ··· 3164 3176 * however with the new *_jumbo_rx* routines, jumbo receives will use 3165 3177 * fragmented skbs */ 3166 3178 3167 - if (max_frame <= E1000_RXBUFFER_256) 3168 - adapter->rx_buffer_len = E1000_RXBUFFER_256; 3169 - else if (max_frame <= E1000_RXBUFFER_512) 3170 - adapter->rx_buffer_len = E1000_RXBUFFER_512; 3171 - else if (max_frame <= E1000_RXBUFFER_1024) 3172 - adapter->rx_buffer_len = E1000_RXBUFFER_1024; 3173 - else if (max_frame <= E1000_RXBUFFER_2048) 3179 + if (max_frame <= E1000_RXBUFFER_2048) 3174 3180 adapter->rx_buffer_len = E1000_RXBUFFER_2048; 3175 3181 else 3176 3182 #if (PAGE_SIZE >= E1000_RXBUFFER_16384) ··· 3832 3850 3833 3851 length = le16_to_cpu(rx_desc->length); 3834 3852 /* !EOP means multiple descriptors were used to store a single 3835 - * packet, also make sure the frame isn't just CRC only */ 3836 - if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) { 3853 + * packet, if thats the case we need to toss it. In fact, we 3854 + * to toss every packet with the EOP bit clear and the next 3855 + * frame that _does_ have the EOP bit set, as it is by 3856 + * definition only a frame fragment 3857 + */ 3858 + if (unlikely(!(status & E1000_RXD_STAT_EOP))) 3859 + adapter->discarding = true; 3860 + 3861 + if (adapter->discarding) { 3837 3862 /* All receives must fit into a single buffer */ 3838 3863 E1000_DBG("%s: Receive packet consumed multiple" 3839 3864 " buffers\n", netdev->name); 3840 3865 /* recycle */ 3841 3866 buffer_info->skb = skb; 3867 + if (status & E1000_RXD_STAT_EOP) 3868 + adapter->discarding = false; 3842 3869 goto next_desc; 3843 3870 } 3844 3871
+1
drivers/net/e1000e/e1000.h
··· 421 421 /* CRC Stripping defines */ 422 422 #define FLAG2_CRC_STRIPPING (1 << 0) 423 423 #define FLAG2_HAS_PHY_WAKEUP (1 << 1) 424 + #define FLAG2_IS_DISCARDING (1 << 2) 424 425 425 426 #define E1000_RX_DESC_PS(R, i) \ 426 427 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
+28 -29
drivers/net/e1000e/netdev.c
··· 450 450 451 451 length = le16_to_cpu(rx_desc->length); 452 452 453 - /* !EOP means multiple descriptors were used to store a single 454 - * packet, also make sure the frame isn't just CRC only */ 455 - if (!(status & E1000_RXD_STAT_EOP) || (length <= 4)) { 453 + /* 454 + * !EOP means multiple descriptors were used to store a single 455 + * packet, if that's the case we need to toss it. In fact, we 456 + * need to toss every packet with the EOP bit clear and the 457 + * next frame that _does_ have the EOP bit set, as it is by 458 + * definition only a frame fragment 459 + */ 460 + if (unlikely(!(status & E1000_RXD_STAT_EOP))) 461 + adapter->flags2 |= FLAG2_IS_DISCARDING; 462 + 463 + if (adapter->flags2 & FLAG2_IS_DISCARDING) { 456 464 /* All receives must fit into a single buffer */ 457 465 e_dbg("Receive packet consumed multiple buffers\n"); 458 466 /* recycle */ 459 467 buffer_info->skb = skb; 468 + if (status & E1000_RXD_STAT_EOP) 469 + adapter->flags2 &= ~FLAG2_IS_DISCARDING; 460 470 goto next_desc; 461 471 } 462 472 ··· 755 745 PCI_DMA_FROMDEVICE); 756 746 buffer_info->dma = 0; 757 747 758 - if (!(staterr & E1000_RXD_STAT_EOP)) { 748 + /* see !EOP comment in other rx routine */ 749 + if (!(staterr & E1000_RXD_STAT_EOP)) 750 + adapter->flags2 |= FLAG2_IS_DISCARDING; 751 + 752 + if (adapter->flags2 & FLAG2_IS_DISCARDING) { 759 753 e_dbg("Packet Split buffers didn't pick up the full " 760 754 "packet\n"); 761 755 dev_kfree_skb_irq(skb); 756 + if (staterr & E1000_RXD_STAT_EOP) 757 + adapter->flags2 &= ~FLAG2_IS_DISCARDING; 762 758 goto next_desc; 763 759 } 764 760 ··· 1134 1118 1135 1119 rx_ring->next_to_clean = 0; 1136 1120 rx_ring->next_to_use = 0; 1121 + adapter->flags2 &= ~FLAG2_IS_DISCARDING; 1137 1122 1138 1123 writel(0, adapter->hw.hw_addr + rx_ring->head); 1139 1124 writel(0, adapter->hw.hw_addr + rx_ring->tail); ··· 2350 2333 rctl &= ~E1000_RCTL_SZ_4096; 2351 2334 rctl |= E1000_RCTL_BSEX; 2352 2335 switch (adapter->rx_buffer_len) { 2353 - case 256: 2354 - rctl |= E1000_RCTL_SZ_256; 2355 - rctl &= ~E1000_RCTL_BSEX; 2356 - break; 2357 - case 512: 2358 - rctl |= E1000_RCTL_SZ_512; 2359 - rctl &= ~E1000_RCTL_BSEX; 2360 - break; 2361 - case 1024: 2362 - rctl |= E1000_RCTL_SZ_1024; 2363 - rctl &= ~E1000_RCTL_BSEX; 2364 - break; 2365 2336 case 2048: 2366 2337 default: 2367 2338 rctl |= E1000_RCTL_SZ_2048; ··· 3786 3781 0, IPPROTO_TCP, 0); 3787 3782 cmd_length = E1000_TXD_CMD_IP; 3788 3783 ipcse = skb_transport_offset(skb) - 1; 3789 - } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 3784 + } else if (skb_is_gso_v6(skb)) { 3790 3785 ipv6_hdr(skb)->payload_len = 0; 3791 3786 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3792 3787 &ipv6_hdr(skb)->daddr, ··· 3967 3962 dma_error: 3968 3963 dev_err(&pdev->dev, "TX DMA map failed\n"); 3969 3964 buffer_info->dma = 0; 3970 - count--; 3971 - 3972 - while (count >= 0) { 3965 + if (count) 3973 3966 count--; 3974 - i--; 3975 - if (i < 0) 3967 + 3968 + while (count--) { 3969 + if (i==0) 3976 3970 i += tx_ring->count; 3971 + i--; 3977 3972 buffer_info = &tx_ring->buffer_info[i]; 3978 3973 e1000_put_txbuf(adapter, buffer_info);; 3979 3974 } ··· 4322 4317 * fragmented skbs 4323 4318 */ 4324 4319 4325 - if (max_frame <= 256) 4326 - adapter->rx_buffer_len = 256; 4327 - else if (max_frame <= 512) 4328 - adapter->rx_buffer_len = 512; 4329 - else if (max_frame <= 1024) 4330 - adapter->rx_buffer_len = 1024; 4331 - else if (max_frame <= 2048) 4320 + if (max_frame <= 2048) 4332 4321 adapter->rx_buffer_len = 2048; 4333 4322 else 4334 4323 adapter->rx_buffer_len = 4096;
+2 -2
drivers/net/igb/igb_main.c
··· 3422 3422 iph->daddr, 0, 3423 3423 IPPROTO_TCP, 3424 3424 0); 3425 - } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 3425 + } else if (skb_is_gso_v6(skb)) { 3426 3426 ipv6_hdr(skb)->payload_len = 0; 3427 3427 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3428 3428 &ipv6_hdr(skb)->daddr, ··· 3584 3584 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 3585 3585 struct skb_frag_struct *frag; 3586 3586 3587 + count++; 3587 3588 i++; 3588 3589 if (i == tx_ring->count) 3589 3590 i = 0; ··· 3606 3605 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 3607 3606 goto dma_error; 3608 3607 3609 - count++; 3610 3608 } 3611 3609 3612 3610 tx_ring->buffer_info[i].skb = skb;
+7 -7
drivers/net/igbvf/netdev.c
··· 1963 1963 iph->daddr, 0, 1964 1964 IPPROTO_TCP, 1965 1965 0); 1966 - } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 1966 + } else if (skb_is_gso_v6(skb)) { 1967 1967 ipv6_hdr(skb)->payload_len = 0; 1968 1968 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 1969 1969 &ipv6_hdr(skb)->daddr, ··· 2126 2126 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 2127 2127 struct skb_frag_struct *frag; 2128 2128 2129 + count++; 2129 2130 i++; 2130 2131 if (i == tx_ring->count) 2131 2132 i = 0; ··· 2147 2146 PCI_DMA_TODEVICE); 2148 2147 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2149 2148 goto dma_error; 2150 - count++; 2151 2149 } 2152 2150 2153 2151 tx_ring->buffer_info[i].skb = skb; ··· 2163 2163 buffer_info->length = 0; 2164 2164 buffer_info->next_to_watch = 0; 2165 2165 buffer_info->mapped_as_page = false; 2166 - count--; 2166 + if (count) 2167 + count--; 2167 2168 2168 2169 /* clear timestamp and dma mappings for remaining portion of packet */ 2169 - while (count >= 0) { 2170 - count--; 2171 - i--; 2172 - if (i < 0) 2170 + while (count--) { 2171 + if (i==0) 2173 2172 i += tx_ring->count; 2173 + i--; 2174 2174 buffer_info = &tx_ring->buffer_info[i]; 2175 2175 igbvf_put_txbuf(adapter, buffer_info); 2176 2176 }
+5 -5
drivers/net/ixgb/ixgb_main.c
··· 1363 1363 dma_error: 1364 1364 dev_err(&pdev->dev, "TX DMA map failed\n"); 1365 1365 buffer_info->dma = 0; 1366 - count--; 1367 - 1368 - while (count >= 0) { 1366 + if (count) 1369 1367 count--; 1370 - i--; 1371 - if (i < 0) 1368 + 1369 + while (count--) { 1370 + if (i==0) 1372 1371 i += tx_ring->count; 1372 + i--; 1373 1373 buffer_info = &tx_ring->buffer_info[i]; 1374 1374 ixgb_unmap_and_free_tx_resource(adapter, buffer_info); 1375 1375 }
+6 -6
drivers/net/ixgbe/ixgbe_main.c
··· 4928 4928 iph->daddr, 0, 4929 4929 IPPROTO_TCP, 4930 4930 0); 4931 - } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 4931 + } else if (skb_is_gso_v6(skb)) { 4932 4932 ipv6_hdr(skb)->payload_len = 0; 4933 4933 tcp_hdr(skb)->check = 4934 4934 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, ··· 5167 5167 tx_buffer_info->dma = 0; 5168 5168 tx_buffer_info->time_stamp = 0; 5169 5169 tx_buffer_info->next_to_watch = 0; 5170 - count--; 5170 + if (count) 5171 + count--; 5171 5172 5172 5173 /* clear timestamp and dma mappings for remaining portion of packet */ 5173 - while (count >= 0) { 5174 - count--; 5175 - i--; 5176 - if (i < 0) 5174 + while (count--) { 5175 + if (i==0) 5177 5176 i += tx_ring->count; 5177 + i--; 5178 5178 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 5179 5179 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 5180 5180 }
+1
drivers/net/pcmcia/fmvj18x_cs.c
··· 717 717 PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064), 718 718 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a), 719 719 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), 720 + PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), 720 721 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), 721 722 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), 722 723 PCMCIA_DEVICE_NULL,
+1 -3
drivers/net/phy/phy.c
··· 410 410 411 411 412 412 static void phy_change(struct work_struct *work); 413 - static void phy_state_machine(struct work_struct *work); 414 413 415 414 /** 416 415 * phy_start_machine - start PHY state machine tracking ··· 429 430 { 430 431 phydev->adjust_state = handler; 431 432 432 - INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine); 433 433 schedule_delayed_work(&phydev->state_queue, HZ); 434 434 } 435 435 ··· 759 761 * phy_state_machine - Handle the state machine 760 762 * @work: work_struct that describes the work to be done 761 763 */ 762 - static void phy_state_machine(struct work_struct *work) 764 + void phy_state_machine(struct work_struct *work) 763 765 { 764 766 struct delayed_work *dwork = to_delayed_work(work); 765 767 struct phy_device *phydev =
+1
drivers/net/phy/phy_device.c
··· 177 177 dev->state = PHY_DOWN; 178 178 179 179 mutex_init(&dev->lock); 180 + INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine); 180 181 181 182 return dev; 182 183 }
+8 -7
drivers/net/qlge/qlge_main.c
··· 4119 4119 err = pcie_set_readrq(pdev, 4096); 4120 4120 if (err) { 4121 4121 dev_err(&pdev->dev, "Set readrq failed.\n"); 4122 - goto err_out; 4122 + goto err_out1; 4123 4123 } 4124 4124 4125 4125 err = pci_request_regions(pdev, DRV_NAME); ··· 4140 4140 4141 4141 if (err) { 4142 4142 dev_err(&pdev->dev, "No usable DMA configuration.\n"); 4143 - goto err_out; 4143 + goto err_out2; 4144 4144 } 4145 4145 4146 4146 /* Set PCIe reset type for EEH to fundamental. */ ··· 4152 4152 if (!qdev->reg_base) { 4153 4153 dev_err(&pdev->dev, "Register mapping failed.\n"); 4154 4154 err = -ENOMEM; 4155 - goto err_out; 4155 + goto err_out2; 4156 4156 } 4157 4157 4158 4158 qdev->doorbell_area_size = pci_resource_len(pdev, 3); ··· 4162 4162 if (!qdev->doorbell_area) { 4163 4163 dev_err(&pdev->dev, "Doorbell register mapping failed.\n"); 4164 4164 err = -ENOMEM; 4165 - goto err_out; 4165 + goto err_out2; 4166 4166 } 4167 4167 4168 4168 err = ql_get_board_info(qdev); 4169 4169 if (err) { 4170 4170 dev_err(&pdev->dev, "Register access failed.\n"); 4171 4171 err = -EIO; 4172 - goto err_out; 4172 + goto err_out2; 4173 4173 } 4174 4174 qdev->msg_enable = netif_msg_init(debug, default_msg); 4175 4175 spin_lock_init(&qdev->hw_lock); ··· 4179 4179 err = qdev->nic_ops->get_flash(qdev); 4180 4180 if (err) { 4181 4181 dev_err(&pdev->dev, "Invalid FLASH.\n"); 4182 - goto err_out; 4182 + goto err_out2; 4183 4183 } 4184 4184 4185 4185 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); ··· 4212 4212 DRV_NAME, DRV_VERSION); 4213 4213 } 4214 4214 return 0; 4215 - err_out: 4215 + err_out2: 4216 4216 ql_release_all(pdev); 4217 + err_out1: 4217 4218 pci_disable_device(pdev); 4218 4219 return err; 4219 4220 }
+1 -1
drivers/net/s2io.c
··· 3421 3421 break; 3422 3422 } 3423 3423 } else { 3424 - if (!(val64 & busy_bit)) { 3424 + if (val64 & busy_bit) { 3425 3425 ret = SUCCESS; 3426 3426 break; 3427 3427 }
+4 -3
drivers/net/sfc/mcdi.c
··· 804 804 loff_t offset, u8 *buffer, size_t length) 805 805 { 806 806 u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; 807 - u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(length)]; 807 + u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; 808 808 size_t outlen; 809 809 int rc; 810 810 ··· 828 828 int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, 829 829 loff_t offset, const u8 *buffer, size_t length) 830 830 { 831 - u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(length)]; 831 + u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; 832 832 int rc; 833 833 834 834 MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); ··· 838 838 839 839 BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); 840 840 841 - rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, sizeof(inbuf), 841 + rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, 842 + ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), 842 843 NULL, 0, NULL); 843 844 if (rc) 844 845 goto fail;
+1
drivers/net/sfc/mcdi.h
··· 111 111 extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, 112 112 loff_t offset, const u8 *buffer, 113 113 size_t length); 114 + #define EFX_MCDI_NVRAM_LEN_MAX 128 114 115 extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, 115 116 loff_t offset, size_t length); 116 117 extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
+3 -1
drivers/net/sfc/mcdi_pcol.h
··· 1090 1090 #define MC_CMD_MAC_RX_LANES01_DISP_ERR 57 1091 1091 #define MC_CMD_MAC_RX_LANES23_DISP_ERR 58 1092 1092 #define MC_CMD_MAC_RX_MATCH_FAULT 59 1093 + #define MC_CMD_GMAC_DMABUF_START 64 1094 + #define MC_CMD_GMAC_DMABUF_END 95 1093 1095 /* Insert new members here. */ 1094 - #define MC_CMD_MAC_GENERATION_END 60 1096 + #define MC_CMD_MAC_GENERATION_END 96 1095 1097 #define MC_CMD_MAC_NSTATS (MC_CMD_MAC_GENERATION_END+1) 1096 1098 1097 1099 /* MC_CMD_MAC_STATS:
+2 -3
drivers/net/sfc/mtd.c
··· 23 23 #include "mcdi_pcol.h" 24 24 25 25 #define EFX_SPI_VERIFY_BUF_LEN 16 26 - #define EFX_MCDI_CHUNK_LEN 128 27 26 28 27 struct efx_mtd_partition { 29 28 struct mtd_info mtd; ··· 427 428 int rc = 0; 428 429 429 430 while (offset < end) { 430 - chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN); 431 + chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); 431 432 rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset, 432 433 buffer, chunk); 433 434 if (rc) ··· 490 491 } 491 492 492 493 while (offset < end) { 493 - chunk = min_t(size_t, end - offset, EFX_MCDI_CHUNK_LEN); 494 + chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX); 494 495 rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset, 495 496 buffer, chunk); 496 497 if (rc)
-6
drivers/net/sfc/qt202x_phy.c
··· 318 318 /* Wait 250ms for the PHY to complete bootup */ 319 319 msleep(250); 320 320 321 - /* Check that all the MMDs we expect are present and responding. We 322 - * expect faults on some if the link is down, but not on the PHY XS */ 323 - rc = efx_mdio_check_mmds(efx, QT202X_REQUIRED_DEVS, MDIO_DEVS_PHYXS); 324 - if (rc < 0) 325 - goto fail; 326 - 327 321 falcon_board(efx)->type->init_phy(efx); 328 322 329 323 return rc;
+35 -7
drivers/net/sky2.c
··· 644 644 { 645 645 u32 reg1; 646 646 647 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 647 648 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 648 649 reg1 &= ~phy_power[port]; 649 650 ··· 652 651 reg1 |= coma_mode[port]; 653 652 654 653 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 654 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 655 655 sky2_pci_read32(hw, PCI_DEV_REG1); 656 656 657 657 if (hw->chip_id == CHIP_ID_YUKON_FE) ··· 709 707 gm_phy_write(hw, port, PHY_MARV_CTRL, PHY_CT_PDOWN); 710 708 } 711 709 710 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 712 711 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 713 712 reg1 |= phy_power[port]; /* set PHY to PowerDown/COMA Mode */ 714 713 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 714 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 715 715 } 716 716 717 717 /* Force a renegotiation */ ··· 2153 2149 2154 2150 /* reset PHY Link Detect */ 2155 2151 phy = sky2_pci_read16(hw, PSM_CONFIG_REG4); 2152 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2156 2153 sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1); 2154 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2157 2155 2158 2156 sky2_link_up(sky2); 2159 2157 } ··· 2646 2640 if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { 2647 2641 u16 pci_err; 2648 2642 2643 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2649 2644 pci_err = sky2_pci_read16(hw, PCI_STATUS); 2650 2645 if (net_ratelimit()) 2651 2646 dev_err(&pdev->dev, "PCI hardware error (0x%x)\n", ··· 2654 2647 2655 2648 sky2_pci_write16(hw, PCI_STATUS, 2656 2649 pci_err | PCI_STATUS_ERROR_BITS); 2650 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2657 2651 } 2658 2652 2659 2653 if (status & Y2_IS_PCI_EXP) { 2660 2654 /* PCI-Express uncorrectable Error occurred */ 2661 2655 u32 err; 2662 2656 2657 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2663 2658 err = sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); 2664 2659 sky2_write32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS, 2665 2660 0xfffffffful); ··· 2669 2660 dev_err(&pdev->dev, "PCI Express error (0x%x)\n", err); 2670 2661 2671 2662 sky2_read32(hw, Y2_CFG_AER + PCI_ERR_UNCOR_STATUS); 2663 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2672 2664 } 2673 2665 2674 2666 if (status & Y2_HWE_L1_MASK) ··· 3048 3038 } 3049 3039 3050 3040 sky2_power_on(hw); 3041 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3051 3042 3052 3043 for (i = 0; i < hw->ports; i++) { 3053 3044 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); ··· 3085 3074 reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE; 3086 3075 3087 3076 /* reset PHY Link Detect */ 3077 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 3088 3078 sky2_pci_write16(hw, PSM_CONFIG_REG4, 3089 3079 reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT); 3090 3080 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg); ··· 3103 3091 /* restore the PCIe Link Control register */ 3104 3092 sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg); 3105 3093 } 3094 + sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 3106 3095 3107 3096 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */ 3108 3097 sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16)); ··· 3241 3228 return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0; 3242 3229 } 3243 3230 3231 + static void sky2_hw_set_wol(struct sky2_hw *hw) 3232 + { 3233 + int wol = 0; 3234 + int i; 3235 + 3236 + for (i = 0; i < hw->ports; i++) { 3237 + struct net_device *dev = hw->dev[i]; 3238 + struct sky2_port *sky2 = netdev_priv(dev); 3239 + 3240 + if (sky2->wol) 3241 + wol = 1; 3242 + } 3243 + 3244 + if (hw->chip_id == CHIP_ID_YUKON_EC_U || 3245 + hw->chip_id == CHIP_ID_YUKON_EX || 3246 + hw->chip_id == CHIP_ID_YUKON_FE_P) 3247 + sky2_write32(hw, B0_CTST, wol ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF); 3248 + 3249 + device_set_wakeup_enable(&hw->pdev->dev, wol); 3250 + } 3251 + 3244 3252 static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 3245 3253 { 3246 3254 const struct sky2_port *sky2 = netdev_priv(dev); ··· 3281 3247 3282 3248 sky2->wol = wol->wolopts; 3283 3249 3284 - if (hw->chip_id == CHIP_ID_YUKON_EC_U || 3285 - hw->chip_id == CHIP_ID_YUKON_EX || 3286 - hw->chip_id == CHIP_ID_YUKON_FE_P) 3287 - sky2_write32(hw, B0_CTST, sky2->wol 3288 - ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF); 3289 - 3290 - device_set_wakeup_enable(&hw->pdev->dev, sky2->wol); 3250 + sky2_hw_set_wol(hw); 3291 3251 3292 3252 if (!netif_running(dev)) 3293 3253 sky2_wol_init(sky2);
+1
drivers/net/tulip/tulip_core.c
··· 249 249 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 250 250 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ 251 251 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ 252 + { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */ 252 253 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 253 254 { } /* terminate list */ 254 255 };
+2 -3
drivers/net/ucc_geth.c
··· 3279 3279 /* Handle the transmitted buffer and release */ 3280 3280 /* the BD to be used with the current frame */ 3281 3281 3282 - if (bd == ugeth->txBd[txQ]) /* queue empty? */ 3282 + skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; 3283 + if (!skb) 3283 3284 break; 3284 3285 3285 3286 dev->stats.tx_packets++; 3286 - 3287 - skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; 3288 3287 3289 3288 if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && 3290 3289 skb_recycle_check(skb,
+1 -2
drivers/net/virtio_net.c
··· 395 395 396 396 vi = container_of(work, struct virtnet_info, refill.work); 397 397 napi_disable(&vi->napi); 398 - try_fill_recv(vi, GFP_KERNEL); 399 - still_empty = (vi->num == 0); 398 + still_empty = !try_fill_recv(vi, GFP_KERNEL); 400 399 napi_enable(&vi->napi); 401 400 402 401 /* In theory, this can happen: if we don't get any buffers in
+2
drivers/net/wimax/i2400m/i2400m-usb.h
··· 151 151 152 152 /* Device IDs */ 153 153 USB_DEVICE_ID_I6050 = 0x0186, 154 + USB_DEVICE_ID_I6050_2 = 0x0188, 154 155 }; 155 156 156 157 ··· 235 234 u8 rx_size_auto_shrink; 236 235 237 236 struct dentry *debugfs_dentry; 237 + unsigned i6050:1; /* 1 if this is a 6050 based SKU */ 238 238 }; 239 239 240 240
+11 -1
drivers/net/wimax/i2400m/usb.c
··· 478 478 i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack; 479 479 i2400m->bus_bm_mac_addr_impaired = 0; 480 480 481 - if (id->idProduct == USB_DEVICE_ID_I6050) { 481 + switch (id->idProduct) { 482 + case USB_DEVICE_ID_I6050: 483 + case USB_DEVICE_ID_I6050_2: 484 + i2400mu->i6050 = 1; 485 + break; 486 + default: 487 + break; 488 + } 489 + 490 + if (i2400mu->i6050) { 482 491 i2400m->bus_fw_names = i2400mu_bus_fw_names_6050; 483 492 i2400mu->endpoint_cfg.bulk_out = 0; 484 493 i2400mu->endpoint_cfg.notification = 3; ··· 728 719 static 729 720 struct usb_device_id i2400mu_id_table[] = { 730 721 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) }, 722 + { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050_2) }, 731 723 { USB_DEVICE(0x8086, 0x0181) }, 732 724 { USB_DEVICE(0x8086, 0x1403) }, 733 725 { USB_DEVICE(0x8086, 0x1405) },
+5
drivers/net/wireless/iwlwifi/iwl-5000.c
··· 1598 1598 .use_bsm = false, 1599 1599 .ht_greenfield_support = true, 1600 1600 .led_compensation = 51, 1601 + .use_rts_for_ht = true, /* use rts/cts protection */ 1601 1602 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1602 1603 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1603 1604 }; ··· 1623 1622 .use_bsm = false, 1624 1623 .ht_greenfield_support = true, 1625 1624 .led_compensation = 51, 1625 + .use_rts_for_ht = true, /* use rts/cts protection */ 1626 1626 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1627 1627 }; 1628 1628 ··· 1669 1667 .use_bsm = false, 1670 1668 .ht_greenfield_support = true, 1671 1669 .led_compensation = 51, 1670 + .use_rts_for_ht = true, /* use rts/cts protection */ 1672 1671 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1673 1672 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1674 1673 }; ··· 1694 1691 .use_bsm = false, 1695 1692 .ht_greenfield_support = true, 1696 1693 .led_compensation = 51, 1694 + .use_rts_for_ht = true, /* use rts/cts protection */ 1697 1695 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1698 1696 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1699 1697 }; ··· 1719 1715 .use_bsm = false, 1720 1716 .ht_greenfield_support = true, 1721 1717 .led_compensation = 51, 1718 + .use_rts_for_ht = true, /* use rts/cts protection */ 1722 1719 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 1723 1720 .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED, 1724 1721 };
+26
drivers/net/wireless/iwlwifi/iwl-devtrace.c
··· 1 + /****************************************************************************** 2 + * 3 + * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms of version 2 of the GNU General Public License as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along with 15 + * this program; if not, write to the Free Software Foundation, Inc., 16 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 17 + * 18 + * The full GNU General Public License is included in this distribution in the 19 + * file called LICENSE. 20 + * 21 + * Contact Information: 22 + * Intel Linux Wireless <ilw@linux.intel.com> 23 + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 + * 25 + *****************************************************************************/ 26 + 1 27 #include <linux/module.h> 2 28 3 29 /* sparse doesn't like tracepoint macros */
+26
drivers/net/wireless/iwlwifi/iwl-devtrace.h
··· 1 + /****************************************************************************** 2 + * 3 + * Copyright(c) 2009 - 2010 Intel Corporation. All rights reserved. 4 + * 5 + * This program is free software; you can redistribute it and/or modify it 6 + * under the terms of version 2 of the GNU General Public License as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, but WITHOUT 10 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 + * more details. 13 + * 14 + * You should have received a copy of the GNU General Public License along with 15 + * this program; if not, write to the Free Software Foundation, Inc., 16 + * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 17 + * 18 + * The full GNU General Public License is included in this distribution in the 19 + * file called LICENSE. 20 + * 21 + * Contact Information: 22 + * Intel Linux Wireless <ilw@linux.intel.com> 23 + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 + * 25 + *****************************************************************************/ 26 + 1 27 #if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ) 2 28 #define __IWLWIFI_DEVICE_TRACE 3 29
+4
drivers/net/wireless/iwmc3200wifi/commands.c
··· 973 973 974 974 memset(&update, 0, sizeof(struct iwm_umac_pmkid_update)); 975 975 976 + update.hdr.oid = UMAC_WIFI_IF_CMD_PMKID_UPDATE; 977 + update.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_pmkid_update) - 978 + sizeof(struct iwm_umac_wifi_if)); 979 + 976 980 update.command = cpu_to_le32(command); 977 981 if (pmksa->bssid) 978 982 memcpy(&update.bssid, pmksa->bssid, ETH_ALEN);
+1
drivers/net/wireless/iwmc3200wifi/commands.h
··· 463 463 #define IWM_CMD_PMKID_FLUSH 3 464 464 465 465 struct iwm_umac_pmkid_update { 466 + struct iwm_umac_wifi_if hdr; 466 467 __le32 command; 467 468 u8 bssid[ETH_ALEN]; 468 469 __le16 reserved;
+8
drivers/net/wireless/p54/p54pci.c
··· 197 197 i %= ring_limit; 198 198 continue; 199 199 } 200 + 201 + if (unlikely(len > priv->common.rx_mtu)) { 202 + if (net_ratelimit()) 203 + dev_err(&priv->pdev->dev, "rx'd frame size " 204 + "exceeds length threshold.\n"); 205 + 206 + len = priv->common.rx_mtu; 207 + } 200 208 skb_put(skb, len); 201 209 202 210 if (p54_rx(dev, skb)) {
+1
drivers/net/wireless/zd1211rw/zd_usb.c
··· 62 62 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, 63 63 /* ZD1211B */ 64 64 { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B }, 65 + { USB_DEVICE(0x0409, 0x0248), .driver_info = DEVICE_ZD1211B }, 65 66 { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B }, 66 67 { USB_DEVICE(0x0471, 0x1236), .driver_info = DEVICE_ZD1211B }, 67 68 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
+1
drivers/serial/serial_cs.c
··· 758 758 PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f), 759 759 PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed), 760 760 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), 761 + PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01), 761 762 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), 762 763 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), 763 764 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
+1
include/linux/phy.h
··· 485 485 int phy_driver_register(struct phy_driver *new_driver); 486 486 void phy_prepare_link(struct phy_device *phydev, 487 487 void (*adjust_link)(struct net_device *)); 488 + void phy_state_machine(struct work_struct *work); 488 489 void phy_start_machine(struct phy_device *phydev, 489 490 void (*handler)(struct net_device *)); 490 491 void phy_stop_machine(struct phy_device *phydev);
+6
include/net/netns/xfrm.h
··· 5 5 #include <linux/wait.h> 6 6 #include <linux/workqueue.h> 7 7 #include <linux/xfrm.h> 8 + #include <net/dst_ops.h> 8 9 9 10 struct ctl_table_header; 10 11 ··· 42 41 struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2]; 43 42 unsigned int policy_count[XFRM_POLICY_MAX * 2]; 44 43 struct work_struct policy_hash_work; 44 + 45 + struct dst_ops xfrm4_dst_ops; 46 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 47 + struct dst_ops xfrm6_dst_ops; 48 + #endif 45 49 46 50 struct sock *nlsk; 47 51 struct sock *nlsk_stash;
+2
include/net/netrom.h
··· 132 132 static __inline__ void nr_neigh_put(struct nr_neigh *nr_neigh) 133 133 { 134 134 if (atomic_dec_and_test(&nr_neigh->refcount)) { 135 + if (nr_neigh->ax25) 136 + ax25_cb_put(nr_neigh->ax25); 135 137 kfree(nr_neigh->digipeat); 136 138 kfree(nr_neigh); 137 139 }
+2 -2
include/net/xfrm.h
··· 1367 1367 extern struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 seq); 1368 1368 extern int xfrm_state_delete(struct xfrm_state *x); 1369 1369 extern int xfrm_state_flush(struct net *net, u8 proto, struct xfrm_audit *audit_info); 1370 - extern void xfrm_sad_getinfo(struct xfrmk_sadinfo *si); 1371 - extern void xfrm_spd_getinfo(struct xfrmk_spdinfo *si); 1370 + extern void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); 1371 + extern void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); 1372 1372 extern int xfrm_replay_check(struct xfrm_state *x, 1373 1373 struct sk_buff *skb, __be32 seq); 1374 1374 extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
+1 -1
net/8021q/vlan_dev.c
··· 163 163 goto err_unlock; 164 164 } 165 165 166 - rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, 166 + rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats, 167 167 smp_processor_id()); 168 168 rx_stats->rx_packets++; 169 169 rx_stats->rx_bytes += skb->len;
+1 -1
net/appletalk/aarp.c
··· 819 819 ma = &ifa->address; 820 820 else { /* We need to make a copy of the entry. */ 821 821 da.s_node = sa.s_node; 822 - da.s_net = da.s_net; 822 + da.s_net = sa.s_net; 823 823 ma = &da; 824 824 } 825 825
+6
net/ax25/ax25_out.c
··· 92 92 #endif 93 93 } 94 94 95 + /* 96 + * There is one ref for the state machine; a caller needs 97 + * one more to put it back, just like with the existing one. 98 + */ 99 + ax25_cb_hold(ax25); 100 + 95 101 ax25_cb_add(ax25); 96 102 97 103 ax25->state = AX25_STATE_1;
+5 -13
net/dccp/ccid.c
··· 77 77 return err; 78 78 } 79 79 80 - static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...) 80 + static struct kmem_cache *ccid_kmem_cache_create(int obj_size, char *slab_name_fmt, const char *fmt,...) 81 81 { 82 82 struct kmem_cache *slab; 83 - char slab_name_fmt[32], *slab_name; 84 83 va_list args; 85 84 86 85 va_start(args, fmt); 87 86 vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args); 88 87 va_end(args); 89 88 90 - slab_name = kstrdup(slab_name_fmt, GFP_KERNEL); 91 - if (slab_name == NULL) 92 - return NULL; 93 - slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0, 89 + slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, 94 90 SLAB_HWCACHE_ALIGN, NULL); 95 - if (slab == NULL) 96 - kfree(slab_name); 97 91 return slab; 98 92 } 99 93 100 94 static void ccid_kmem_cache_destroy(struct kmem_cache *slab) 101 95 { 102 - if (slab != NULL) { 103 - const char *name = kmem_cache_name(slab); 104 - 96 + if (slab != NULL) 105 97 kmem_cache_destroy(slab); 106 - kfree(name); 107 - } 108 98 } 109 99 110 100 static int ccid_activate(struct ccid_operations *ccid_ops) ··· 103 113 104 114 ccid_ops->ccid_hc_rx_slab = 105 115 ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size, 116 + ccid_ops->ccid_hc_rx_slab_name, 106 117 "ccid%u_hc_rx_sock", 107 118 ccid_ops->ccid_id); 108 119 if (ccid_ops->ccid_hc_rx_slab == NULL) ··· 111 120 112 121 ccid_ops->ccid_hc_tx_slab = 113 122 ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size, 123 + ccid_ops->ccid_hc_tx_slab_name, 114 124 "ccid%u_hc_tx_sock", 115 125 ccid_ops->ccid_id); 116 126 if (ccid_ops->ccid_hc_tx_slab == NULL)
+2
net/dccp/ccid.h
··· 49 49 const char *ccid_name; 50 50 struct kmem_cache *ccid_hc_rx_slab, 51 51 *ccid_hc_tx_slab; 52 + char ccid_hc_rx_slab_name[32]; 53 + char ccid_hc_tx_slab_name[32]; 52 54 __u32 ccid_hc_rx_obj_size, 53 55 ccid_hc_tx_obj_size; 54 56 /* Interface Routines */
+2 -1
net/dccp/probe.c
··· 161 161 if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) 162 162 goto err0; 163 163 164 - ret = register_jprobe(&dccp_send_probe); 164 + ret = try_then_request_module((register_jprobe(&dccp_send_probe) == 0), 165 + "dccp"); 165 166 if (ret) 166 167 goto err1; 167 168
+1 -1
net/ipv4/inet_diag.c
··· 368 368 yes = entry->sport >= op[1].no; 369 369 break; 370 370 case INET_DIAG_BC_S_LE: 371 - yes = entry->dport <= op[1].no; 371 + yes = entry->sport <= op[1].no; 372 372 break; 373 373 case INET_DIAG_BC_D_GE: 374 374 yes = entry->dport >= op[1].no;
+2
net/ipv4/route.c
··· 586 586 { 587 587 remove_proc_entry("rt_cache", net->proc_net_stat); 588 588 remove_proc_entry("rt_cache", net->proc_net); 589 + #ifdef CONFIG_NET_CLS_ROUTE 589 590 remove_proc_entry("rt_acct", net->proc_net); 591 + #endif 590 592 } 591 593 592 594 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
+10 -9
net/ipv4/tcp_probe.c
··· 39 39 MODULE_PARM_DESC(port, "Port to match (0=all)"); 40 40 module_param(port, int, 0); 41 41 42 - static int bufsize __read_mostly = 4096; 42 + static unsigned int bufsize __read_mostly = 4096; 43 43 MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)"); 44 - module_param(bufsize, int, 0); 44 + module_param(bufsize, uint, 0); 45 45 46 46 static int full __read_mostly; 47 47 MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)"); ··· 75 75 76 76 static inline int tcp_probe_used(void) 77 77 { 78 - return (tcp_probe.head - tcp_probe.tail) % bufsize; 78 + return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1); 79 79 } 80 80 81 81 static inline int tcp_probe_avail(void) 82 82 { 83 - return bufsize - tcp_probe_used(); 83 + return bufsize - tcp_probe_used() - 1; 84 84 } 85 85 86 86 /* ··· 116 116 p->ssthresh = tcp_current_ssthresh(sk); 117 117 p->srtt = tp->srtt >> 3; 118 118 119 - tcp_probe.head = (tcp_probe.head + 1) % bufsize; 119 + tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1); 120 120 } 121 121 tcp_probe.lastcwnd = tp->snd_cwnd; 122 122 spin_unlock(&tcp_probe.lock); ··· 149 149 static int tcpprobe_sprint(char *tbuf, int n) 150 150 { 151 151 const struct tcp_log *p 152 - = tcp_probe.log + tcp_probe.tail % bufsize; 152 + = tcp_probe.log + tcp_probe.tail; 153 153 struct timespec tv 154 154 = ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start)); 155 155 ··· 192 192 width = tcpprobe_sprint(tbuf, sizeof(tbuf)); 193 193 194 194 if (cnt + width < len) 195 - tcp_probe.tail = (tcp_probe.tail + 1) % bufsize; 195 + tcp_probe.tail = (tcp_probe.tail + 1) & (bufsize - 1); 196 196 197 197 spin_unlock_bh(&tcp_probe.lock); 198 198 ··· 222 222 init_waitqueue_head(&tcp_probe.wait); 223 223 spin_lock_init(&tcp_probe.lock); 224 224 225 - if (bufsize < 0) 225 + if (bufsize == 0) 226 226 return -EINVAL; 227 227 228 + bufsize = roundup_pow_of_two(bufsize); 228 229 tcp_probe.log = kcalloc(bufsize, sizeof(struct tcp_log), GFP_KERNEL); 229 230 if (!tcp_probe.log) 230 231 goto err0; ··· 237 236 if (ret) 238 237 goto err1; 239 238 240 - pr_info("TCP probe registered (port=%d)\n", port); 239 + pr_info("TCP probe registered (port=%d) bufsize=%u\n", port, bufsize); 241 240 return 0; 242 241 err1: 243 242 proc_net_remove(&init_net, procname);
+8 -6
net/ipv4/xfrm4_policy.c
··· 15 15 #include <net/xfrm.h> 16 16 #include <net/ip.h> 17 17 18 - static struct dst_ops xfrm4_dst_ops; 19 18 static struct xfrm_policy_afinfo xfrm4_policy_afinfo; 20 19 21 20 static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, ··· 189 190 190 191 static inline int xfrm4_garbage_collect(struct dst_ops *ops) 191 192 { 192 - xfrm4_policy_afinfo.garbage_collect(&init_net); 193 - return (atomic_read(&xfrm4_dst_ops.entries) > xfrm4_dst_ops.gc_thresh*2); 193 + struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops); 194 + 195 + xfrm4_policy_afinfo.garbage_collect(net); 196 + return (atomic_read(&ops->entries) > ops->gc_thresh * 2); 194 197 } 195 198 196 199 static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu) ··· 269 268 static struct ctl_table xfrm4_policy_table[] = { 270 269 { 271 270 .procname = "xfrm4_gc_thresh", 272 - .data = &xfrm4_dst_ops.gc_thresh, 271 + .data = &init_net.xfrm.xfrm4_dst_ops.gc_thresh, 273 272 .maxlen = sizeof(int), 274 273 .mode = 0644, 275 274 .proc_handler = proc_dointvec, ··· 296 295 297 296 void __init xfrm4_init(int rt_max_size) 298 297 { 299 - xfrm4_state_init(); 300 - xfrm4_policy_init(); 301 298 /* 302 299 * Select a default value for the gc_thresh based on the main route 303 300 * table hash size. It seems to me the worst case scenario is when ··· 307 308 * and start cleaning when were 1/2 full 308 309 */ 309 310 xfrm4_dst_ops.gc_thresh = rt_max_size/2; 311 + 312 + xfrm4_state_init(); 313 + xfrm4_policy_init(); 310 314 #ifdef CONFIG_SYSCTL 311 315 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, 312 316 xfrm4_policy_table);
+14 -11
net/ipv6/xfrm6_policy.c
··· 24 24 #include <net/mip6.h> 25 25 #endif 26 26 27 - static struct dst_ops xfrm6_dst_ops; 28 27 static struct xfrm_policy_afinfo xfrm6_policy_afinfo; 29 28 30 29 static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, ··· 223 224 224 225 static inline int xfrm6_garbage_collect(struct dst_ops *ops) 225 226 { 226 - xfrm6_policy_afinfo.garbage_collect(&init_net); 227 - return (atomic_read(&xfrm6_dst_ops.entries) > xfrm6_dst_ops.gc_thresh*2); 227 + struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops); 228 + 229 + xfrm6_policy_afinfo.garbage_collect(net); 230 + return (atomic_read(&ops->entries) > ops->gc_thresh * 2); 228 231 } 229 232 230 233 static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu) ··· 311 310 static struct ctl_table xfrm6_policy_table[] = { 312 311 { 313 312 .procname = "xfrm6_gc_thresh", 314 - .data = &xfrm6_dst_ops.gc_thresh, 313 + .data = &init_net.xfrm.xfrm6_dst_ops.gc_thresh, 315 314 .maxlen = sizeof(int), 316 315 .mode = 0644, 317 316 .proc_handler = proc_dointvec, ··· 327 326 int ret; 328 327 unsigned int gc_thresh; 329 328 330 - ret = xfrm6_policy_init(); 331 - if (ret) 332 - goto out; 333 - 334 - ret = xfrm6_state_init(); 335 - if (ret) 336 - goto out_policy; 337 329 /* 338 330 * We need a good default value for the xfrm6 gc threshold. 339 331 * In ipv4 we set it to the route hash table size * 8, which ··· 340 346 */ 341 347 gc_thresh = FIB6_TABLE_HASHSZ * 8; 342 348 xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh; 349 + 350 + ret = xfrm6_policy_init(); 351 + if (ret) 352 + goto out; 353 + 354 + ret = xfrm6_state_init(); 355 + if (ret) 356 + goto out_policy; 357 + 343 358 #ifdef CONFIG_SYSCTL 344 359 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path, 345 360 xfrm6_policy_table);
+3
net/mac80211/cfg.c
··· 1331 1331 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1332 1332 struct ieee80211_conf *conf = &local->hw.conf; 1333 1333 1334 + if (sdata->vif.type != NL80211_IFTYPE_STATION) 1335 + return -EOPNOTSUPP; 1336 + 1334 1337 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) 1335 1338 return -EOPNOTSUPP; 1336 1339
+1 -1
net/mac80211/rc80211_pid_algo.c
··· 190 190 rate_control_pid_normalize(pinfo, sband->n_bitrates); 191 191 192 192 /* Compute the proportional, integral and derivative errors. */ 193 - err_prop = (pinfo->target << RC_PID_ARITH_SHIFT) - pf; 193 + err_prop = (pinfo->target - pf) << RC_PID_ARITH_SHIFT; 194 194 195 195 err_avg = spinfo->err_avg_sc >> pinfo->smoothing_shift; 196 196 spinfo->err_avg_sc = spinfo->err_avg_sc - err_avg + err_prop;
+6 -5
net/netrom/nr_route.c
··· 843 843 dptr = skb_push(skb, 1); 844 844 *dptr = AX25_P_NETROM; 845 845 846 - ax25s = ax25_send_frame(skb, 256, (ax25_address *)dev->dev_addr, &nr_neigh->callsign, nr_neigh->digipeat, nr_neigh->dev); 847 - if (nr_neigh->ax25 && ax25s) { 848 - /* We were already holding this ax25_cb */ 846 + ax25s = nr_neigh->ax25; 847 + nr_neigh->ax25 = ax25_send_frame(skb, 256, 848 + (ax25_address *)dev->dev_addr, 849 + &nr_neigh->callsign, 850 + nr_neigh->digipeat, nr_neigh->dev); 851 + if (ax25s) 849 852 ax25_cb_put(ax25s); 850 - } 851 - nr_neigh->ax25 = ax25s; 852 853 853 854 dev_put(dev); 854 855 ret = (nr_neigh->ax25 != NULL);
+8
net/rose/rose_link.c
··· 101 101 static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh) 102 102 { 103 103 ax25_address *rose_call; 104 + ax25_cb *ax25s; 104 105 105 106 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) 106 107 rose_call = (ax25_address *)neigh->dev->dev_addr; 107 108 else 108 109 rose_call = &rose_callsign; 109 110 111 + ax25s = neigh->ax25; 110 112 neigh->ax25 = ax25_send_frame(skb, 260, rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); 113 + if (ax25s) 114 + ax25_cb_put(ax25s); 111 115 112 116 return (neigh->ax25 != NULL); 113 117 } ··· 124 120 static int rose_link_up(struct rose_neigh *neigh) 125 121 { 126 122 ax25_address *rose_call; 123 + ax25_cb *ax25s; 127 124 128 125 if (ax25cmp(&rose_callsign, &null_ax25_address) == 0) 129 126 rose_call = (ax25_address *)neigh->dev->dev_addr; 130 127 else 131 128 rose_call = &rose_callsign; 132 129 130 + ax25s = neigh->ax25; 133 131 neigh->ax25 = ax25_find_cb(rose_call, &neigh->callsign, neigh->digipeat, neigh->dev); 132 + if (ax25s) 133 + ax25_cb_put(ax25s); 134 134 135 135 return (neigh->ax25 != NULL); 136 136 }
+5
net/rose/rose_route.c
··· 235 235 236 236 if ((s = rose_neigh_list) == rose_neigh) { 237 237 rose_neigh_list = rose_neigh->next; 238 + if (rose_neigh->ax25) 239 + ax25_cb_put(rose_neigh->ax25); 238 240 kfree(rose_neigh->digipeat); 239 241 kfree(rose_neigh); 240 242 return; ··· 245 243 while (s != NULL && s->next != NULL) { 246 244 if (s->next == rose_neigh) { 247 245 s->next = rose_neigh->next; 246 + if (rose_neigh->ax25) 247 + ax25_cb_put(rose_neigh->ax25); 248 248 kfree(rose_neigh->digipeat); 249 249 kfree(rose_neigh); 250 250 return; ··· 816 812 817 813 if (rose_neigh != NULL) { 818 814 rose_neigh->ax25 = NULL; 815 + ax25_cb_put(ax25); 819 816 820 817 rose_del_route_by_neigh(rose_neigh); 821 818 rose_kill_by_neigh(rose_neigh);
+1
net/wireless/sme.c
··· 655 655 memset(&wrqu, 0, sizeof(wrqu)); 656 656 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 657 657 wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); 658 + wdev->wext.connect.ssid_len = 0; 658 659 #endif 659 660 } 660 661
+64 -11
net/xfrm/xfrm_policy.c
··· 469 469 return 0; 470 470 } 471 471 472 - void xfrm_spd_getinfo(struct xfrmk_spdinfo *si) 472 + void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si) 473 473 { 474 474 read_lock_bh(&xfrm_policy_lock); 475 - si->incnt = init_net.xfrm.policy_count[XFRM_POLICY_IN]; 476 - si->outcnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT]; 477 - si->fwdcnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD]; 478 - si->inscnt = init_net.xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; 479 - si->outscnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; 480 - si->fwdscnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; 481 - si->spdhcnt = init_net.xfrm.policy_idx_hmask; 475 + si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN]; 476 + si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT]; 477 + si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD]; 478 + si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; 479 + si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; 480 + si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; 481 + si->spdhcnt = net->xfrm.policy_idx_hmask; 482 482 si->spdhmcnt = xfrm_policy_hashmax; 483 483 read_unlock_bh(&xfrm_policy_lock); 484 484 } ··· 1309 1309 return tos; 1310 1310 } 1311 1311 1312 - static inline struct xfrm_dst *xfrm_alloc_dst(int family) 1312 + static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) 1313 1313 { 1314 1314 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1315 + struct dst_ops *dst_ops; 1315 1316 struct xfrm_dst *xdst; 1316 1317 1317 1318 if (!afinfo) 1318 1319 return ERR_PTR(-EINVAL); 1319 1320 1320 - xdst = dst_alloc(afinfo->dst_ops) ?: ERR_PTR(-ENOBUFS); 1321 + switch (family) { 1322 + case AF_INET: 1323 + dst_ops = &net->xfrm.xfrm4_dst_ops; 1324 + break; 1325 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1326 + case AF_INET6: 1327 + dst_ops = &net->xfrm.xfrm6_dst_ops; 1328 + break; 1329 + #endif 1330 + default: 1331 + BUG(); 1332 + } 1333 + xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); 1321 1334 1322 1335 xfrm_policy_put_afinfo(afinfo); 1323 1336 ··· 1379 1366 struct flowi *fl, 1380 1367 struct dst_entry *dst) 1381 1368 { 1369 + struct net *net = xp_net(policy); 1382 1370 unsigned long now = jiffies; 1383 1371 struct net_device *dev; 1384 1372 struct dst_entry *dst_prev = NULL; ··· 1403 1389 dst_hold(dst); 1404 1390 1405 1391 for (; i < nx; i++) { 1406 - struct xfrm_dst *xdst = xfrm_alloc_dst(family); 1392 + struct xfrm_dst *xdst = xfrm_alloc_dst(net, family); 1407 1393 struct dst_entry *dst1 = &xdst->u.dst; 1408 1394 1409 1395 err = PTR_ERR(xdst); ··· 2293 2279 2294 2280 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 2295 2281 { 2282 + struct net *net; 2296 2283 int err = 0; 2297 2284 if (unlikely(afinfo == NULL)) 2298 2285 return -EINVAL; ··· 2317 2302 xfrm_policy_afinfo[afinfo->family] = afinfo; 2318 2303 } 2319 2304 write_unlock_bh(&xfrm_policy_afinfo_lock); 2305 + 2306 + rtnl_lock(); 2307 + for_each_net(net) { 2308 + struct dst_ops *xfrm_dst_ops; 2309 + 2310 + switch (afinfo->family) { 2311 + case AF_INET: 2312 + xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops; 2313 + break; 2314 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 2315 + case AF_INET6: 2316 + xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops; 2317 + break; 2318 + #endif 2319 + default: 2320 + BUG(); 2321 + } 2322 + *xfrm_dst_ops = *afinfo->dst_ops; 2323 + } 2324 + rtnl_unlock(); 2325 + 2320 2326 return err; 2321 2327 } 2322 2328 EXPORT_SYMBOL(xfrm_policy_register_afinfo); ··· 2367 2331 return err; 2368 2332 } 2369 2333 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 2334 + 2335 + static void __net_init xfrm_dst_ops_init(struct net *net) 2336 + { 2337 + struct xfrm_policy_afinfo *afinfo; 2338 + 2339 + read_lock_bh(&xfrm_policy_afinfo_lock); 2340 + afinfo = xfrm_policy_afinfo[AF_INET]; 2341 + if (afinfo) 2342 + net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops; 2343 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 2344 + afinfo = xfrm_policy_afinfo[AF_INET6]; 2345 + if (afinfo) 2346 + net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops; 2347 + #endif 2348 + read_unlock_bh(&xfrm_policy_afinfo_lock); 2349 + } 2370 2350 2371 2351 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 2372 2352 { ··· 2546 2494 rv = xfrm_policy_init(net); 2547 2495 if (rv < 0) 2548 2496 goto out_policy; 2497 + xfrm_dst_ops_init(net); 2549 2498 rv = xfrm_sysctl_init(net); 2550 2499 if (rv < 0) 2551 2500 goto out_sysctl;
+3 -3
net/xfrm/xfrm_state.c
··· 641 641 } 642 642 EXPORT_SYMBOL(xfrm_state_flush); 643 643 644 - void xfrm_sad_getinfo(struct xfrmk_sadinfo *si) 644 + void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si) 645 645 { 646 646 spin_lock_bh(&xfrm_state_lock); 647 - si->sadcnt = init_net.xfrm.state_num; 648 - si->sadhcnt = init_net.xfrm.state_hmask; 647 + si->sadcnt = net->xfrm.state_num; 648 + si->sadhcnt = net->xfrm.state_hmask; 649 649 si->sadhmcnt = xfrm_state_hashmax; 650 650 spin_unlock_bh(&xfrm_state_lock); 651 651 }
+8 -6
net/xfrm/xfrm_user.c
··· 781 781 + nla_total_size(sizeof(struct xfrmu_spdhinfo)); 782 782 } 783 783 784 - static int build_spdinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) 784 + static int build_spdinfo(struct sk_buff *skb, struct net *net, 785 + u32 pid, u32 seq, u32 flags) 785 786 { 786 787 struct xfrmk_spdinfo si; 787 788 struct xfrmu_spdinfo spc; ··· 796 795 797 796 f = nlmsg_data(nlh); 798 797 *f = flags; 799 - xfrm_spd_getinfo(&si); 798 + xfrm_spd_getinfo(net, &si); 800 799 spc.incnt = si.incnt; 801 800 spc.outcnt = si.outcnt; 802 801 spc.fwdcnt = si.fwdcnt; ··· 829 828 if (r_skb == NULL) 830 829 return -ENOMEM; 831 830 832 - if (build_spdinfo(r_skb, spid, seq, *flags) < 0) 831 + if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0) 833 832 BUG(); 834 833 835 834 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); ··· 842 841 + nla_total_size(4); /* XFRMA_SAD_CNT */ 843 842 } 844 843 845 - static int build_sadinfo(struct sk_buff *skb, u32 pid, u32 seq, u32 flags) 844 + static int build_sadinfo(struct sk_buff *skb, struct net *net, 845 + u32 pid, u32 seq, u32 flags) 846 846 { 847 847 struct xfrmk_sadinfo si; 848 848 struct xfrmu_sadhinfo sh; ··· 856 854 857 855 f = nlmsg_data(nlh); 858 856 *f = flags; 859 - xfrm_sad_getinfo(&si); 857 + xfrm_sad_getinfo(net, &si); 860 858 861 859 sh.sadhmcnt = si.sadhmcnt; 862 860 sh.sadhcnt = si.sadhcnt; ··· 884 882 if (r_skb == NULL) 885 883 return -ENOMEM; 886 884 887 - if (build_sadinfo(r_skb, spid, seq, *flags) < 0) 885 + if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0) 888 886 BUG(); 889 887 890 888 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid);