Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (47 commits)
[SCTP]: Fix local_addr deletions during list traversals.
net: fix build with CONFIG_NET=n
[TCP]: Prevent sending past receiver window with TSO (at last skb)
rt2x00: Add new D-Link USB ID
rt2x00: never disable multicast because it disables broadcast too
libertas: fix the 'compare command with itself' properly
drivers/net/Kconfig: fix whitespace for GELIC_WIRELESS entry
[NETFILTER]: nf_queue: don't return error when unregistering a non-existant handler
[NETFILTER]: nfnetlink_queue: fix EPERM when binding/unbinding and instance 0 exists
[NETFILTER]: nfnetlink_log: fix EPERM when binding/unbinding and instance 0 exists
[NETFILTER]: nf_conntrack: replace horrible hack with ksize()
[NETFILTER]: nf_conntrack: add \n to "expectation table full" message
[NETFILTER]: xt_time: fix failure to match on Sundays
[NETFILTER]: nfnetlink_log: fix computation of netlink skb size
[NETFILTER]: nfnetlink_queue: fix computation of allocated size for netlink skb.
[NETFILTER]: nfnetlink: fix ifdef in nfnetlink_compat.h
[NET]: include <linux/types.h> into linux/ethtool.h for __u* typedef
[NET]: Make /proc/net a symlink on /proc/self/net (v3)
RxRPC: fix rxrpc_recvmsg()'s returning of msg_name
net/enc28j60: oops fix
...

+366 -259
+4 -28
MAINTAINERS
··· 2052 L: netdev@vger.kernel.org 2053 S: Maintained 2054 2055 - INTEL PRO/100 ETHERNET SUPPORT 2056 P: Auke Kok 2057 M: auke-jan.h.kok@intel.com 2058 P: Jesse Brandeburg 2059 M: jesse.brandeburg@intel.com 2060 P: Jeff Kirsher 2061 M: jeffrey.t.kirsher@intel.com 2062 P: John Ronciak 2063 M: john.ronciak@intel.com 2064 L: e1000-devel@lists.sourceforge.net 2065 - W: http://sourceforge.net/projects/e1000/ 2066 - S: Supported 2067 - 2068 - INTEL PRO/1000 GIGABIT ETHERNET SUPPORT 2069 - P: Auke Kok 2070 - M: auke-jan.h.kok@intel.com 2071 - P: Jesse Brandeburg 2072 - M: jesse.brandeburg@intel.com 2073 - P: Jeff Kirsher 2074 - M: jeffrey.t.kirsher@intel.com 2075 - P: John Ronciak 2076 - M: john.ronciak@intel.com 2077 - L: e1000-devel@lists.sourceforge.net 2078 - W: http://sourceforge.net/projects/e1000/ 2079 - S: Supported 2080 - 2081 - INTEL PRO/10GbE SUPPORT 2082 - P: Ayyappan Veeraiyan 2083 - M: ayyappan.veeraiyan@intel.com 2084 - P: Auke Kok 2085 - M: auke-jan.h.kok@intel.com 2086 - P: Jesse Brandeburg 2087 - M: jesse.brandeburg@intel.com 2088 - P: John Ronciak 2089 - M: john.ronciak@intel.com 2090 - L: e1000-devel@lists.sourceforge.net 2091 - W: http://sourceforge.net/projects/e1000/ 2092 S: Supported 2093 2094 INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT
··· 2052 L: netdev@vger.kernel.org 2053 S: Maintained 2054 2055 + INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/ixgb/ixgbe) 2056 P: Auke Kok 2057 M: auke-jan.h.kok@intel.com 2058 P: Jesse Brandeburg 2059 M: jesse.brandeburg@intel.com 2060 P: Jeff Kirsher 2061 M: jeffrey.t.kirsher@intel.com 2062 + P: Bruce Allan 2063 + M: bruce.w.allan@intel.com 2064 P: John Ronciak 2065 M: john.ronciak@intel.com 2066 L: e1000-devel@lists.sourceforge.net 2067 + W: http://e1000.sourceforge.net/ 2068 S: Supported 2069 2070 INTEL PRO/WIRELESS 2100 NETWORK CONNECTION SUPPORT
+2 -2
drivers/atm/firestream.c
··· 331 #define FS_DEBUG_QSIZE 0x00001000 332 333 334 - #define func_enter() fs_dprintk (FS_DEBUG_FLOW, "fs: enter %s\n", __FUNCTION__) 335 - #define func_exit() fs_dprintk (FS_DEBUG_FLOW, "fs: exit %s\n", __FUNCTION__) 336 337 338 static struct fs_dev *fs_boards = NULL;
··· 331 #define FS_DEBUG_QSIZE 0x00001000 332 333 334 + #define func_enter() fs_dprintk(FS_DEBUG_FLOW, "fs: enter %s\n", __func__) 335 + #define func_exit() fs_dprintk(FS_DEBUG_FLOW, "fs: exit %s\n", __func__) 336 337 338 static struct fs_dev *fs_boards = NULL;
+2 -2
drivers/atm/fore200e.c
··· 95 #if 1 96 #define ASSERT(expr) if (!(expr)) { \ 97 printk(FORE200E "assertion failed! %s[%d]: %s\n", \ 98 - __FUNCTION__, __LINE__, #expr); \ 99 - panic(FORE200E "%s", __FUNCTION__); \ 100 } 101 #else 102 #define ASSERT(expr) do {} while (0)
··· 95 #if 1 96 #define ASSERT(expr) if (!(expr)) { \ 97 printk(FORE200E "assertion failed! %s[%d]: %s\n", \ 98 + __func__, __LINE__, #expr); \ 99 + panic(FORE200E "%s", __func__); \ 100 } 101 #else 102 #define ASSERT(expr) do {} while (0)
+6 -6
drivers/atm/idt77252.c
··· 555 struct vc_map *vc; 556 int i; 557 558 - printk("%s\n", __FUNCTION__); 559 for (i = 0; i < card->tct_size; i++) { 560 vc = card->vcs[i]; 561 if (!vc) ··· 1035 skb = sb_pool_skb(card, le32_to_cpu(rsqe->word_2)); 1036 if (skb == NULL) { 1037 printk("%s: NULL skb in %s, rsqe: %08x %08x %08x %08x\n", 1038 - card->name, __FUNCTION__, 1039 le32_to_cpu(rsqe->word_1), le32_to_cpu(rsqe->word_2), 1040 le32_to_cpu(rsqe->word_3), le32_to_cpu(rsqe->word_4)); 1041 return; ··· 1873 return; 1874 1875 if (sb_pool_add(card, skb, queue)) { 1876 - printk("%s: SB POOL full\n", __FUNCTION__); 1877 goto outfree; 1878 } 1879 ··· 1883 IDT77252_PRV_PADDR(skb) = paddr; 1884 1885 if (push_rx_skb(card, skb, queue)) { 1886 - printk("%s: FB QUEUE full\n", __FUNCTION__); 1887 goto outunmap; 1888 } 1889 } ··· 3821 { 3822 struct sk_buff *skb; 3823 3824 - printk("%s: at %p\n", __FUNCTION__, idt77252_init); 3825 3826 if (sizeof(skb->cb) < sizeof(struct atm_skb_data) + 3827 sizeof(struct idt77252_skb_prv)) { 3828 printk(KERN_ERR "%s: skb->cb is too small (%lu < %lu)\n", 3829 - __FUNCTION__, (unsigned long) sizeof(skb->cb), 3830 (unsigned long) sizeof(struct atm_skb_data) + 3831 sizeof(struct idt77252_skb_prv)); 3832 return -EIO;
··· 555 struct vc_map *vc; 556 int i; 557 558 + printk("%s\n", __func__); 559 for (i = 0; i < card->tct_size; i++) { 560 vc = card->vcs[i]; 561 if (!vc) ··· 1035 skb = sb_pool_skb(card, le32_to_cpu(rsqe->word_2)); 1036 if (skb == NULL) { 1037 printk("%s: NULL skb in %s, rsqe: %08x %08x %08x %08x\n", 1038 + card->name, __func__, 1039 le32_to_cpu(rsqe->word_1), le32_to_cpu(rsqe->word_2), 1040 le32_to_cpu(rsqe->word_3), le32_to_cpu(rsqe->word_4)); 1041 return; ··· 1873 return; 1874 1875 if (sb_pool_add(card, skb, queue)) { 1876 + printk("%s: SB POOL full\n", __func__); 1877 goto outfree; 1878 } 1879 ··· 1883 IDT77252_PRV_PADDR(skb) = paddr; 1884 1885 if (push_rx_skb(card, skb, queue)) { 1886 + printk("%s: FB QUEUE full\n", __func__); 1887 goto outunmap; 1888 } 1889 } ··· 3821 { 3822 struct sk_buff *skb; 3823 3824 + printk("%s: at %p\n", __func__, idt77252_init); 3825 3826 if (sizeof(skb->cb) < sizeof(struct atm_skb_data) + 3827 sizeof(struct idt77252_skb_prv)) { 3828 printk(KERN_ERR "%s: skb->cb is too small (%lu < %lu)\n", 3829 + __func__, (unsigned long) sizeof(skb->cb), 3830 (unsigned long) sizeof(struct atm_skb_data) + 3831 sizeof(struct idt77252_skb_prv)); 3832 return -EIO;
+3
drivers/bluetooth/hci_usb.c
··· 149 { USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC }, 150 { USB_DEVICE(0x0400, 0x080a), .driver_info = HCI_BROKEN_ISOC }, 151 152 /* Belkin F8T012 and F8T013 devices */ 153 { USB_DEVICE(0x050d, 0x0012), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU }, 154 { USB_DEVICE(0x050d, 0x0013), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU },
··· 149 { USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC }, 150 { USB_DEVICE(0x0400, 0x080a), .driver_info = HCI_BROKEN_ISOC }, 151 152 + /* CONWISE Technology based adapters with buggy SCO support */ 153 + { USB_DEVICE(0x0e5e, 0x6622), .driver_info = HCI_BROKEN_ISOC }, 154 + 155 /* Belkin F8T012 and F8T013 devices */ 156 { USB_DEVICE(0x050d, 0x0012), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU }, 157 { USB_DEVICE(0x050d, 0x0013), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU },
+2 -2
drivers/isdn/i4l/isdn_common.c
··· 981 } 982 983 984 - static __inline int 985 isdn_minor2drv(int minor) 986 { 987 return (dev->drvmap[minor]); 988 } 989 990 - static __inline int 991 isdn_minor2chan(int minor) 992 { 993 return (dev->chanmap[minor]);
··· 981 } 982 983 984 + static inline int 985 isdn_minor2drv(int minor) 986 { 987 return (dev->drvmap[minor]); 988 } 989 990 + static inline int 991 isdn_minor2chan(int minor) 992 { 993 return (dev->chanmap[minor]);
+1 -1
drivers/isdn/i4l/isdn_v110.c
··· 62 * and to 67452301 when keylen = 2. This is necessary because ordering on 63 * the isdn line is the other way. 64 */ 65 - static __inline unsigned char 66 FlipBits(unsigned char c, int keylen) 67 { 68 unsigned char b = c;
··· 62 * and to 67452301 when keylen = 2. This is necessary because ordering on 63 * the isdn line is the other way. 64 */ 65 + static inline unsigned char 66 FlipBits(unsigned char c, int keylen) 67 { 68 unsigned char b = c;
+10 -10
drivers/net/Kconfig
··· 2366 module will be called ps3_gelic. 2367 2368 config GELIC_WIRELESS 2369 - bool "PS3 Wireless support" 2370 - depends on GELIC_NET 2371 - select WIRELESS_EXT 2372 - help 2373 - This option adds the support for the wireless feature of PS3. 2374 - If you have the wireless-less model of PS3 or have no plan to 2375 - use wireless feature, disabling this option saves memory. As 2376 - the driver automatically distinguishes the models, you can 2377 - safely enable this option even if you have a wireless-less model. 2378 2379 config GIANFAR 2380 tristate "Gianfar Ethernet" ··· 2519 2520 config EHEA 2521 tristate "eHEA Ethernet support" 2522 - depends on IBMEBUS && INET 2523 select INET_LRO 2524 ---help--- 2525 This driver supports the IBM pSeries eHEA ethernet adapter.
··· 2366 module will be called ps3_gelic. 2367 2368 config GELIC_WIRELESS 2369 + bool "PS3 Wireless support" 2370 + depends on GELIC_NET 2371 + select WIRELESS_EXT 2372 + help 2373 + This option adds the support for the wireless feature of PS3. 2374 + If you have the wireless-less model of PS3 or have no plan to 2375 + use wireless feature, disabling this option saves memory. As 2376 + the driver automatically distinguishes the models, you can 2377 + safely enable this option even if you have a wireless-less model. 2378 2379 config GIANFAR 2380 tristate "Gianfar Ethernet" ··· 2519 2520 config EHEA 2521 tristate "eHEA Ethernet support" 2522 + depends on IBMEBUS && INET && SPARSEMEM 2523 select INET_LRO 2524 ---help--- 2525 This driver supports the IBM pSeries eHEA ethernet adapter.
+4 -3
drivers/net/ac3200.c
··· 369 MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver"); 370 MODULE_LICENSE("GPL"); 371 372 - int __init init_module(void) 373 { 374 struct net_device *dev; 375 int this_dev, found = 0; ··· 404 iounmap(ei_status.mem); 405 } 406 407 - void __exit 408 - cleanup_module(void) 409 { 410 int this_dev; 411 ··· 417 } 418 } 419 } 420 #endif /* MODULE */
··· 369 MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver"); 370 MODULE_LICENSE("GPL"); 371 372 + static int __init ac3200_module_init(void) 373 { 374 struct net_device *dev; 375 int this_dev, found = 0; ··· 404 iounmap(ei_status.mem); 405 } 406 407 + static void __exit ac3200_module_exit(void) 408 { 409 int this_dev; 410 ··· 418 } 419 } 420 } 421 + module_init(ac3200_module_init); 422 + module_exit(ac3200_module_exit); 423 #endif /* MODULE */
+4 -3
drivers/net/apne.c
··· 569 #ifdef MODULE 570 static struct net_device *apne_dev; 571 572 - int __init init_module(void) 573 { 574 apne_dev = apne_probe(-1); 575 if (IS_ERR(apne_dev)) ··· 577 return 0; 578 } 579 580 - void __exit cleanup_module(void) 581 { 582 unregister_netdev(apne_dev); 583 ··· 591 592 free_netdev(apne_dev); 593 } 594 - 595 #endif 596 597 static int init_pcmcia(void)
··· 569 #ifdef MODULE 570 static struct net_device *apne_dev; 571 572 + static int __init apne_module_init(void) 573 { 574 apne_dev = apne_probe(-1); 575 if (IS_ERR(apne_dev)) ··· 577 return 0; 578 } 579 580 + static void __exit apne_module_exit(void) 581 { 582 unregister_netdev(apne_dev); 583 ··· 591 592 free_netdev(apne_dev); 593 } 594 + module_init(apne_module_init); 595 + module_exit(apne_module_exit); 596 #endif 597 598 static int init_pcmcia(void)
+2 -1
drivers/net/appletalk/ltpc.c
··· 1252 module_param(dma, int, 0); 1253 1254 1255 - int __init init_module(void) 1256 { 1257 if(io == 0) 1258 printk(KERN_NOTICE ··· 1263 return PTR_ERR(dev_ltpc); 1264 return 0; 1265 } 1266 #endif 1267 1268 static void __exit ltpc_cleanup(void)
··· 1252 module_param(dma, int, 0); 1253 1254 1255 + static int __init ltpc_module_init(void) 1256 { 1257 if(io == 0) 1258 printk(KERN_NOTICE ··· 1263 return PTR_ERR(dev_ltpc); 1264 return 0; 1265 } 1266 + module_init(ltpc_module_init); 1267 #endif 1268 1269 static void __exit ltpc_cleanup(void)
+4 -2
drivers/net/arcnet/capmode.c
··· 80 81 #ifdef MODULE 82 83 - int __init init_module(void) 84 { 85 printk(VERSION); 86 arcnet_cap_init(); 87 return 0; 88 } 89 90 - void cleanup_module(void) 91 { 92 arcnet_unregister_proto(&capmode_proto); 93 } 94 95 MODULE_LICENSE("GPL"); 96 #endif /* MODULE */
··· 80 81 #ifdef MODULE 82 83 + static int __init capmode_module_init(void) 84 { 85 printk(VERSION); 86 arcnet_cap_init(); 87 return 0; 88 } 89 90 + static void __exit capmode_module_exit(void) 91 { 92 arcnet_unregister_proto(&capmode_proto); 93 } 94 + module_init(capmode_module_init); 95 + module_exit(capmode_module_exit); 96 97 MODULE_LICENSE("GPL"); 98 #endif /* MODULE */
+2 -3
drivers/net/atarilance.c
··· 336 337 /***************************** Prototypes *****************************/ 338 339 - static int addr_accessible( volatile void *regp, int wordflag, int 340 - writeflag ); 341 static unsigned long lance_probe1( struct net_device *dev, struct lance_addr 342 *init_rec ); 343 static int lance_open( struct net_device *dev ); ··· 404 405 /* Derived from hwreg_present() in atari/config.c: */ 406 407 - static int __init addr_accessible( volatile void *regp, int wordflag, int writeflag ) 408 { 409 int ret; 410 long flags;
··· 336 337 /***************************** Prototypes *****************************/ 338 339 static unsigned long lance_probe1( struct net_device *dev, struct lance_addr 340 *init_rec ); 341 static int lance_open( struct net_device *dev ); ··· 406 407 /* Derived from hwreg_present() in atari/config.c: */ 408 409 + static noinline int __init addr_accessible(volatile void *regp, int wordflag, 410 + int writeflag) 411 { 412 int ret; 413 long flags;
+3 -26
drivers/net/e100.c
··· 2782 } 2783 } 2784 2785 - #ifdef CONFIG_PM 2786 static int e100_suspend(struct pci_dev *pdev, pm_message_t state) 2787 { 2788 struct net_device *netdev = pci_get_drvdata(pdev); 2789 struct nic *nic = netdev_priv(netdev); 2790 2791 if (netif_running(netdev)) 2792 - napi_disable(&nic->napi); 2793 - del_timer_sync(&nic->watchdog); 2794 - netif_carrier_off(nic->netdev); 2795 netif_device_detach(netdev); 2796 2797 pci_save_state(pdev); ··· 2801 pci_enable_wake(pdev, PCI_D3cold, 0); 2802 } 2803 2804 - free_irq(pdev->irq, netdev); 2805 - 2806 pci_disable_device(pdev); 2807 pci_set_power_state(pdev, PCI_D3hot); 2808 2809 return 0; 2810 } 2811 2812 static int e100_resume(struct pci_dev *pdev) 2813 { 2814 struct net_device *netdev = pci_get_drvdata(pdev); ··· 2828 2829 static void e100_shutdown(struct pci_dev *pdev) 2830 { 2831 - struct net_device *netdev = pci_get_drvdata(pdev); 2832 - struct nic *nic = netdev_priv(netdev); 2833 - 2834 - if (netif_running(netdev)) 2835 - napi_disable(&nic->napi); 2836 - del_timer_sync(&nic->watchdog); 2837 - netif_carrier_off(nic->netdev); 2838 - 2839 - if ((nic->flags & wol_magic) | e100_asf(nic)) { 2840 - pci_enable_wake(pdev, PCI_D3hot, 1); 2841 - pci_enable_wake(pdev, PCI_D3cold, 1); 2842 - } else { 2843 - pci_enable_wake(pdev, PCI_D3hot, 0); 2844 - pci_enable_wake(pdev, PCI_D3cold, 0); 2845 - } 2846 - 2847 - free_irq(pdev->irq, netdev); 2848 - 2849 - pci_disable_device(pdev); 2850 - pci_set_power_state(pdev, PCI_D3hot); 2851 } 2852 2853 /* ------------------ PCI Error Recovery infrastructure -------------- */
··· 2782 } 2783 } 2784 2785 static int e100_suspend(struct pci_dev *pdev, pm_message_t state) 2786 { 2787 struct net_device *netdev = pci_get_drvdata(pdev); 2788 struct nic *nic = netdev_priv(netdev); 2789 2790 if (netif_running(netdev)) 2791 + e100_down(nic); 2792 netif_device_detach(netdev); 2793 2794 pci_save_state(pdev); ··· 2804 pci_enable_wake(pdev, PCI_D3cold, 0); 2805 } 2806 2807 pci_disable_device(pdev); 2808 pci_set_power_state(pdev, PCI_D3hot); 2809 2810 return 0; 2811 } 2812 2813 + #ifdef CONFIG_PM 2814 static int e100_resume(struct pci_dev *pdev) 2815 { 2816 struct net_device *netdev = pci_get_drvdata(pdev); ··· 2832 2833 static void e100_shutdown(struct pci_dev *pdev) 2834 { 2835 + e100_suspend(pdev, PMSG_SUSPEND); 2836 } 2837 2838 /* ------------------ PCI Error Recovery infrastructure -------------- */
+2 -1
drivers/net/enc28j60.c
··· 900 if (RSV_GETBIT(rxstat, RSV_LENCHECKERR)) 901 ndev->stats.rx_frame_errors++; 902 } else { 903 - skb = dev_alloc_skb(len); 904 if (!skb) { 905 if (netif_msg_rx_err(priv)) 906 dev_err(&ndev->dev, ··· 908 ndev->stats.rx_dropped++; 909 } else { 910 skb->dev = ndev; 911 /* copy the packet from the receive buffer */ 912 enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv), 913 len, skb_put(skb, len));
··· 900 if (RSV_GETBIT(rxstat, RSV_LENCHECKERR)) 901 ndev->stats.rx_frame_errors++; 902 } else { 903 + skb = dev_alloc_skb(len + NET_IP_ALIGN); 904 if (!skb) { 905 if (netif_msg_rx_err(priv)) 906 dev_err(&ndev->dev, ··· 908 ndev->stats.rx_dropped++; 909 } else { 910 skb->dev = ndev; 911 + skb_reserve(skb, NET_IP_ALIGN); 912 /* copy the packet from the receive buffer */ 913 enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv), 914 len, skb_put(skb, len));
+1 -1
drivers/net/ixgbe/ixgbe_main.c
··· 2133 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 2134 "10 Gbps" : 2135 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 2136 - "1 Gpbs" : "unknown speed")), 2137 ((FLOW_RX && FLOW_TX) ? "RX/TX" : 2138 (FLOW_RX ? "RX" : 2139 (FLOW_TX ? "TX" : "None"))));
··· 2133 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 2134 "10 Gbps" : 2135 (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 2136 + "1 Gbps" : "unknown speed")), 2137 ((FLOW_RX && FLOW_TX) ? "RX/TX" : 2138 (FLOW_RX ? "RX" : 2139 (FLOW_TX ? "TX" : "None"))));
+1
drivers/net/mv643xx_eth.c
··· 2104 MODULE_AUTHOR( "Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani" 2105 " and Dale Farnsworth"); 2106 MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); 2107 2108 /* 2109 * The second part is the low level driver of the gigE ethernet ports.
··· 2104 MODULE_AUTHOR( "Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani" 2105 " and Dale Farnsworth"); 2106 MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); 2107 + MODULE_ALIAS("platform:mv643xx_eth"); 2108 2109 /* 2110 * The second part is the low level driver of the gigE ethernet ports.
+1 -1
drivers/net/pcmcia/axnet_cs.c
··· 1268 } 1269 } 1270 1271 - if (interrupts && ei_debug) 1272 { 1273 handled = 1; 1274 if (nr_serviced >= MAX_SERVICE)
··· 1268 } 1269 } 1270 1271 + if (interrupts && ei_debug > 3) 1272 { 1273 handled = 1; 1274 if (nr_serviced >= MAX_SERVICE)
+1
drivers/net/phy/Kconfig
··· 67 68 config FIXED_PHY 69 bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" 70 ---help--- 71 Adds the platform "fixed" MDIO Bus to cover the boards that use 72 PHYs that are not connected to the real MDIO bus.
··· 67 68 config FIXED_PHY 69 bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" 70 + depends on PHYLIB=y 71 ---help--- 72 Adds the platform "fixed" MDIO Bus to cover the boards that use 73 PHYs that are not connected to the real MDIO bus.
+14 -3
drivers/net/phy/davicom.c
··· 37 38 #define MII_DM9161_SCR 0x10 39 #define MII_DM9161_SCR_INIT 0x0610 40 41 /* DM9161 Interrupt Register */ 42 #define MII_DM9161_INTR 0x15 ··· 104 105 static int dm9161_config_init(struct phy_device *phydev) 106 { 107 - int err; 108 109 /* Isolate the PHY */ 110 err = phy_write(phydev, MII_BMCR, BMCR_ISOLATE); ··· 112 if (err < 0) 113 return err; 114 115 - /* Do not bypass the scrambler/descrambler */ 116 - err = phy_write(phydev, MII_DM9161_SCR, MII_DM9161_SCR_INIT); 117 118 if (err < 0) 119 return err; 120
··· 37 38 #define MII_DM9161_SCR 0x10 39 #define MII_DM9161_SCR_INIT 0x0610 40 + #define MII_DM9161_SCR_RMII 0x0100 41 42 /* DM9161 Interrupt Register */ 43 #define MII_DM9161_INTR 0x15 ··· 103 104 static int dm9161_config_init(struct phy_device *phydev) 105 { 106 + int err, temp; 107 108 /* Isolate the PHY */ 109 err = phy_write(phydev, MII_BMCR, BMCR_ISOLATE); ··· 111 if (err < 0) 112 return err; 113 114 + switch (phydev->interface) { 115 + case PHY_INTERFACE_MODE_MII: 116 + temp = MII_DM9161_SCR_INIT; 117 + break; 118 + case PHY_INTERFACE_MODE_RMII: 119 + temp = MII_DM9161_SCR_INIT | MII_DM9161_SCR_RMII; 120 + break; 121 + default: 122 + return -EINVAL; 123 + } 124 125 + /* Do not bypass the scrambler/descrambler */ 126 + err = phy_write(phydev, MII_DM9161_SCR, temp); 127 if (err < 0) 128 return err; 129
+37 -32
drivers/net/pppol2tp.c
··· 302 struct pppol2tp_session *session; 303 struct hlist_node *walk; 304 305 - read_lock(&tunnel->hlist_lock); 306 hlist_for_each_entry(session, walk, session_list, hlist) { 307 if (session->tunnel_addr.s_session == session_id) { 308 - read_unlock(&tunnel->hlist_lock); 309 return session; 310 } 311 } 312 - read_unlock(&tunnel->hlist_lock); 313 314 return NULL; 315 } ··· 320 { 321 struct pppol2tp_tunnel *tunnel = NULL; 322 323 - read_lock(&pppol2tp_tunnel_list_lock); 324 list_for_each_entry(tunnel, &pppol2tp_tunnel_list, list) { 325 if (tunnel->stats.tunnel_id == tunnel_id) { 326 - read_unlock(&pppol2tp_tunnel_list_lock); 327 return tunnel; 328 } 329 } 330 - read_unlock(&pppol2tp_tunnel_list_lock); 331 332 return NULL; 333 } ··· 342 static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_buff *skb) 343 { 344 struct sk_buff *skbp; 345 u16 ns = PPPOL2TP_SKB_CB(skb)->ns; 346 347 - spin_lock(&session->reorder_q.lock); 348 - skb_queue_walk(&session->reorder_q, skbp) { 349 if (PPPOL2TP_SKB_CB(skbp)->ns > ns) { 350 __skb_insert(skb, skbp->prev, skbp, &session->reorder_q); 351 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, ··· 361 __skb_queue_tail(&session->reorder_q, skb); 362 363 out: 364 - spin_unlock(&session->reorder_q.lock); 365 } 366 367 /* Dequeue a single skb. ··· 372 int length = PPPOL2TP_SKB_CB(skb)->length; 373 struct sock *session_sock = NULL; 374 375 - /* We're about to requeue the skb, so unlink it and return resources 376 * to its current owner (a socket receive buffer). 377 */ 378 - skb_unlink(skb, &session->reorder_q); 379 skb_orphan(skb); 380 381 tunnel->stats.rx_packets++; ··· 442 * expect to send up next, dequeue it and any other 443 * in-sequence packets behind it. 444 */ 445 - spin_lock(&session->reorder_q.lock); 446 skb_queue_walk_safe(&session->reorder_q, skb, tmp) { 447 if (time_after(jiffies, PPPOL2TP_SKB_CB(skb)->expires)) { 448 session->stats.rx_seq_discards++; ··· 470 goto out; 471 } 472 } 473 - spin_unlock(&session->reorder_q.lock); 474 pppol2tp_recv_dequeue_skb(session, skb); 475 - spin_lock(&session->reorder_q.lock); 476 } 477 478 out: 479 - spin_unlock(&session->reorder_q.lock); 480 } 481 482 /* Internal receive frame. Do the real work of receiving an L2TP data frame ··· 1064 1065 /* Get routing info from the tunnel socket */ 1066 dst_release(skb->dst); 1067 - skb->dst = sk_dst_get(sk_tun); 1068 skb_orphan(skb); 1069 skb->sk = sk_tun; 1070 ··· 1112 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1113 "%s: closing all sessions...\n", tunnel->name); 1114 1115 - write_lock(&tunnel->hlist_lock); 1116 for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) { 1117 again: 1118 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { ··· 1134 * disappear as we're jumping between locks. 1135 */ 1136 sock_hold(sk); 1137 - write_unlock(&tunnel->hlist_lock); 1138 lock_sock(sk); 1139 1140 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { ··· 1159 * list so we are guaranteed to make forward 1160 * progress. 1161 */ 1162 - write_lock(&tunnel->hlist_lock); 1163 goto again; 1164 } 1165 } 1166 - write_unlock(&tunnel->hlist_lock); 1167 } 1168 1169 /* Really kill the tunnel. ··· 1172 static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel) 1173 { 1174 /* Remove from socket list */ 1175 - write_lock(&pppol2tp_tunnel_list_lock); 1176 list_del_init(&tunnel->list); 1177 - write_unlock(&pppol2tp_tunnel_list_lock); 1178 1179 atomic_dec(&pppol2tp_tunnel_count); 1180 kfree(tunnel); ··· 1250 /* Delete the session socket from the 1251 * hash 1252 */ 1253 - write_lock(&tunnel->hlist_lock); 1254 hlist_del_init(&session->hlist); 1255 - write_unlock(&tunnel->hlist_lock); 1256 1257 atomic_dec(&pppol2tp_session_count); 1258 } ··· 1397 1398 /* Add tunnel to our list */ 1399 INIT_LIST_HEAD(&tunnel->list); 1400 - write_lock(&pppol2tp_tunnel_list_lock); 1401 list_add(&tunnel->list, &pppol2tp_tunnel_list); 1402 - write_unlock(&pppol2tp_tunnel_list_lock); 1403 atomic_inc(&pppol2tp_tunnel_count); 1404 1405 /* Bump the reference count. The tunnel context is deleted ··· 1604 sk->sk_user_data = session; 1605 1606 /* Add session to the tunnel's hash list */ 1607 - write_lock(&tunnel->hlist_lock); 1608 hlist_add_head(&session->hlist, 1609 pppol2tp_session_id_hash(tunnel, 1610 session->tunnel_addr.s_session)); 1611 - write_unlock(&tunnel->hlist_lock); 1612 1613 atomic_inc(&pppol2tp_session_count); 1614 ··· 2210 int next = 0; 2211 int i; 2212 2213 - read_lock(&tunnel->hlist_lock); 2214 for (i = 0; i < PPPOL2TP_HASH_SIZE; i++) { 2215 hlist_for_each_entry(session, walk, &tunnel->session_hlist[i], hlist) { 2216 if (curr == NULL) { ··· 2228 } 2229 } 2230 out: 2231 - read_unlock(&tunnel->hlist_lock); 2232 if (!found) 2233 session = NULL; 2234 ··· 2239 { 2240 struct pppol2tp_tunnel *tunnel = NULL; 2241 2242 - read_lock(&pppol2tp_tunnel_list_lock); 2243 if (list_is_last(&curr->list, &pppol2tp_tunnel_list)) { 2244 goto out; 2245 } 2246 tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list); 2247 out: 2248 - read_unlock(&pppol2tp_tunnel_list_lock); 2249 2250 return tunnel; 2251 }
··· 302 struct pppol2tp_session *session; 303 struct hlist_node *walk; 304 305 + read_lock_bh(&tunnel->hlist_lock); 306 hlist_for_each_entry(session, walk, session_list, hlist) { 307 if (session->tunnel_addr.s_session == session_id) { 308 + read_unlock_bh(&tunnel->hlist_lock); 309 return session; 310 } 311 } 312 + read_unlock_bh(&tunnel->hlist_lock); 313 314 return NULL; 315 } ··· 320 { 321 struct pppol2tp_tunnel *tunnel = NULL; 322 323 + read_lock_bh(&pppol2tp_tunnel_list_lock); 324 list_for_each_entry(tunnel, &pppol2tp_tunnel_list, list) { 325 if (tunnel->stats.tunnel_id == tunnel_id) { 326 + read_unlock_bh(&pppol2tp_tunnel_list_lock); 327 return tunnel; 328 } 329 } 330 + read_unlock_bh(&pppol2tp_tunnel_list_lock); 331 332 return NULL; 333 } ··· 342 static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_buff *skb) 343 { 344 struct sk_buff *skbp; 345 + struct sk_buff *tmp; 346 u16 ns = PPPOL2TP_SKB_CB(skb)->ns; 347 348 + spin_lock_bh(&session->reorder_q.lock); 349 + skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { 350 if (PPPOL2TP_SKB_CB(skbp)->ns > ns) { 351 __skb_insert(skb, skbp->prev, skbp, &session->reorder_q); 352 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG, ··· 360 __skb_queue_tail(&session->reorder_q, skb); 361 362 out: 363 + spin_unlock_bh(&session->reorder_q.lock); 364 } 365 366 /* Dequeue a single skb. ··· 371 int length = PPPOL2TP_SKB_CB(skb)->length; 372 struct sock *session_sock = NULL; 373 374 + /* We're about to requeue the skb, so return resources 375 * to its current owner (a socket receive buffer). 376 */ 377 skb_orphan(skb); 378 379 tunnel->stats.rx_packets++; ··· 442 * expect to send up next, dequeue it and any other 443 * in-sequence packets behind it. 444 */ 445 + spin_lock_bh(&session->reorder_q.lock); 446 skb_queue_walk_safe(&session->reorder_q, skb, tmp) { 447 if (time_after(jiffies, PPPOL2TP_SKB_CB(skb)->expires)) { 448 session->stats.rx_seq_discards++; ··· 470 goto out; 471 } 472 } 473 + __skb_unlink(skb, &session->reorder_q); 474 + 475 + /* Process the skb. We release the queue lock while we 476 + * do so to let other contexts process the queue. 477 + */ 478 + spin_unlock_bh(&session->reorder_q.lock); 479 pppol2tp_recv_dequeue_skb(session, skb); 480 + spin_lock_bh(&session->reorder_q.lock); 481 } 482 483 out: 484 + spin_unlock_bh(&session->reorder_q.lock); 485 } 486 487 /* Internal receive frame. Do the real work of receiving an L2TP data frame ··· 1059 1060 /* Get routing info from the tunnel socket */ 1061 dst_release(skb->dst); 1062 + skb->dst = dst_clone(__sk_dst_get(sk_tun)); 1063 skb_orphan(skb); 1064 skb->sk = sk_tun; 1065 ··· 1107 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1108 "%s: closing all sessions...\n", tunnel->name); 1109 1110 + write_lock_bh(&tunnel->hlist_lock); 1111 for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) { 1112 again: 1113 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) { ··· 1129 * disappear as we're jumping between locks. 1130 */ 1131 sock_hold(sk); 1132 + write_unlock_bh(&tunnel->hlist_lock); 1133 lock_sock(sk); 1134 1135 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { ··· 1154 * list so we are guaranteed to make forward 1155 * progress. 1156 */ 1157 + write_lock_bh(&tunnel->hlist_lock); 1158 goto again; 1159 } 1160 } 1161 + write_unlock_bh(&tunnel->hlist_lock); 1162 } 1163 1164 /* Really kill the tunnel. ··· 1167 static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel) 1168 { 1169 /* Remove from socket list */ 1170 + write_lock_bh(&pppol2tp_tunnel_list_lock); 1171 list_del_init(&tunnel->list); 1172 + write_unlock_bh(&pppol2tp_tunnel_list_lock); 1173 1174 atomic_dec(&pppol2tp_tunnel_count); 1175 kfree(tunnel); ··· 1245 /* Delete the session socket from the 1246 * hash 1247 */ 1248 + write_lock_bh(&tunnel->hlist_lock); 1249 hlist_del_init(&session->hlist); 1250 + write_unlock_bh(&tunnel->hlist_lock); 1251 1252 atomic_dec(&pppol2tp_session_count); 1253 } ··· 1392 1393 /* Add tunnel to our list */ 1394 INIT_LIST_HEAD(&tunnel->list); 1395 + write_lock_bh(&pppol2tp_tunnel_list_lock); 1396 list_add(&tunnel->list, &pppol2tp_tunnel_list); 1397 + write_unlock_bh(&pppol2tp_tunnel_list_lock); 1398 atomic_inc(&pppol2tp_tunnel_count); 1399 1400 /* Bump the reference count. The tunnel context is deleted ··· 1599 sk->sk_user_data = session; 1600 1601 /* Add session to the tunnel's hash list */ 1602 + write_lock_bh(&tunnel->hlist_lock); 1603 hlist_add_head(&session->hlist, 1604 pppol2tp_session_id_hash(tunnel, 1605 session->tunnel_addr.s_session)); 1606 + write_unlock_bh(&tunnel->hlist_lock); 1607 1608 atomic_inc(&pppol2tp_session_count); 1609 ··· 2205 int next = 0; 2206 int i; 2207 2208 + read_lock_bh(&tunnel->hlist_lock); 2209 for (i = 0; i < PPPOL2TP_HASH_SIZE; i++) { 2210 hlist_for_each_entry(session, walk, &tunnel->session_hlist[i], hlist) { 2211 if (curr == NULL) { ··· 2223 } 2224 } 2225 out: 2226 + read_unlock_bh(&tunnel->hlist_lock); 2227 if (!found) 2228 session = NULL; 2229 ··· 2234 { 2235 struct pppol2tp_tunnel *tunnel = NULL; 2236 2237 + read_lock_bh(&pppol2tp_tunnel_list_lock); 2238 if (list_is_last(&curr->list, &pppol2tp_tunnel_list)) { 2239 goto out; 2240 } 2241 tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list); 2242 out: 2243 + read_unlock_bh(&pppol2tp_tunnel_list_lock); 2244 2245 return tunnel; 2246 }
+1 -1
drivers/net/s2io.c
··· 1088 * '-1' on failure 1089 */ 1090 1091 - int init_tti(struct s2io_nic *nic, int link) 1092 { 1093 struct XENA_dev_config __iomem *bar0 = nic->bar0; 1094 register u64 val64 = 0;
··· 1088 * '-1' on failure 1089 */ 1090 1091 + static int init_tti(struct s2io_nic *nic, int link) 1092 { 1093 struct XENA_dev_config __iomem *bar0 = nic->bar0; 1094 register u64 val64 = 0;
+2 -1
drivers/net/tulip/de2104x.c
··· 910 unsigned media = de->media_type; 911 u32 macmode = dr32(MacMode); 912 913 - BUG_ON(de_is_running(de)); 914 915 if (de->de21040) 916 dw32(CSR11, FULL_DUPLEX_MAGIC);
··· 910 unsigned media = de->media_type; 911 u32 macmode = dr32(MacMode); 912 913 + if (de_is_running(de)) 914 + printk(KERN_WARNING "%s: chip is running while changing media!\n", de->dev->name); 915 916 if (de->de21040) 917 dw32(CSR11, FULL_DUPLEX_MAGIC);
+1 -1
drivers/net/wan/sbni.c
··· 751 } 752 753 754 - static __inline void 755 send_complete( struct net_local *nl ) 756 { 757 #ifdef CONFIG_SBNI_MULTILINE
··· 751 } 752 753 754 + static inline void 755 send_complete( struct net_local *nl ) 756 { 757 #ifdef CONFIG_SBNI_MULTILINE
+1 -1
drivers/net/wireless/libertas/cmdresp.c
··· 578 goto done; 579 } 580 if (respcmd != CMD_RET(curcmd) && 581 - respcmd != CMD_802_11_ASSOCIATE && curcmd != CMD_RET_802_11_ASSOCIATE) { 582 lbs_pr_info("Invalid CMD_RESP %x to command %x!\n", respcmd, curcmd); 583 spin_unlock_irqrestore(&priv->driver_lock, flags); 584 ret = -1;
··· 578 goto done; 579 } 580 if (respcmd != CMD_RET(curcmd) && 581 + respcmd != CMD_RET_802_11_ASSOCIATE && curcmd != CMD_802_11_ASSOCIATE) { 582 lbs_pr_info("Invalid CMD_RESP %x to command %x!\n", respcmd, curcmd); 583 spin_unlock_irqrestore(&priv->driver_lock, flags); 584 ret = -1;
+2 -2
drivers/net/wireless/rt2x00/rt61pci.c
··· 2302 * Apply some rules to the filters: 2303 * - Some filters imply different filters to be set. 2304 * - Some things we can't filter out at all. 2305 */ 2306 - if (mc_count) 2307 - *total_flags |= FIF_ALLMULTI; 2308 if (*total_flags & FIF_OTHER_BSS || 2309 *total_flags & FIF_PROMISC_IN_BSS) 2310 *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS;
··· 2302 * Apply some rules to the filters: 2303 * - Some filters imply different filters to be set. 2304 * - Some things we can't filter out at all. 2305 + * - Multicast filter seems to kill broadcast traffic so never use it. 2306 */ 2307 + *total_flags |= FIF_ALLMULTI; 2308 if (*total_flags & FIF_OTHER_BSS || 2309 *total_flags & FIF_PROMISC_IN_BSS) 2310 *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS;
+3 -2
drivers/net/wireless/rt2x00/rt73usb.c
··· 1869 * Apply some rules to the filters: 1870 * - Some filters imply different filters to be set. 1871 * - Some things we can't filter out at all. 1872 */ 1873 - if (mc_count) 1874 - *total_flags |= FIF_ALLMULTI; 1875 if (*total_flags & FIF_OTHER_BSS || 1876 *total_flags & FIF_PROMISC_IN_BSS) 1877 *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS; ··· 2098 /* D-Link */ 2099 { USB_DEVICE(0x07d1, 0x3c03), USB_DEVICE_DATA(&rt73usb_ops) }, 2100 { USB_DEVICE(0x07d1, 0x3c04), USB_DEVICE_DATA(&rt73usb_ops) }, 2101 /* Gemtek */ 2102 { USB_DEVICE(0x15a9, 0x0004), USB_DEVICE_DATA(&rt73usb_ops) }, 2103 /* Gigabyte */
··· 1869 * Apply some rules to the filters: 1870 * - Some filters imply different filters to be set. 1871 * - Some things we can't filter out at all. 1872 + * - Multicast filter seems to kill broadcast traffic so never use it. 1873 */ 1874 + *total_flags |= FIF_ALLMULTI; 1875 if (*total_flags & FIF_OTHER_BSS || 1876 *total_flags & FIF_PROMISC_IN_BSS) 1877 *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS; ··· 2098 /* D-Link */ 2099 { USB_DEVICE(0x07d1, 0x3c03), USB_DEVICE_DATA(&rt73usb_ops) }, 2100 { USB_DEVICE(0x07d1, 0x3c04), USB_DEVICE_DATA(&rt73usb_ops) }, 2101 + { USB_DEVICE(0x07d1, 0x3c07), USB_DEVICE_DATA(&rt73usb_ops) }, 2102 /* Gemtek */ 2103 { USB_DEVICE(0x15a9, 0x0004), USB_DEVICE_DATA(&rt73usb_ops) }, 2104 /* Gigabyte */
+3
fs/proc/base.c
··· 2269 DIR("task", S_IRUGO|S_IXUGO, task), 2270 DIR("fd", S_IRUSR|S_IXUSR, fd), 2271 DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo), 2272 REG("environ", S_IRUSR, environ), 2273 INF("auxv", S_IRUSR, pid_auxv), 2274 ONE("status", S_IRUGO, pid_status),
··· 2269 DIR("task", S_IRUGO|S_IXUGO, task), 2270 DIR("fd", S_IRUSR|S_IXUSR, fd), 2271 DIR("fdinfo", S_IRUSR|S_IXUSR, fdinfo), 2272 + #ifdef CONFIG_NET 2273 + DIR("net", S_IRUGO|S_IXUSR, net), 2274 + #endif 2275 REG("environ", S_IRUSR, environ), 2276 INF("auxv", S_IRUSR, pid_auxv), 2277 ONE("status", S_IRUGO, pid_status),
+17 -9
fs/proc/generic.c
··· 377 * Don't create negative dentries here, return -ENOENT by hand 378 * instead. 379 */ 380 - struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd) 381 { 382 struct inode *inode = NULL; 383 - struct proc_dir_entry * de; 384 int error = -ENOENT; 385 386 lock_kernel(); 387 spin_lock(&proc_subdir_lock); 388 - de = PDE(dir); 389 if (de) { 390 for (de = de->subdir; de ; de = de->next) { 391 if (de->namelen != dentry->d_name.len) ··· 392 if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { 393 unsigned int ino; 394 395 - if (de->shadow_proc) 396 - de = de->shadow_proc(current, de); 397 ino = de->low_ino; 398 de_get(de); 399 spin_unlock(&proc_subdir_lock); ··· 414 return ERR_PTR(error); 415 } 416 417 /* 418 * This returns non-zero if at EOF, so that the /proc 419 * root directory can use this and check if it should ··· 429 * value of the readdir() call, as long as it's non-negative 430 * for success.. 431 */ 432 - int proc_readdir(struct file * filp, 433 - void * dirent, filldir_t filldir) 434 { 435 - struct proc_dir_entry * de; 436 unsigned int ino; 437 int i; 438 struct inode *inode = filp->f_path.dentry->d_inode; ··· 440 lock_kernel(); 441 442 ino = inode->i_ino; 443 - de = PDE(inode); 444 if (!de) { 445 ret = -EINVAL; 446 goto out; ··· 498 ret = 1; 499 out: unlock_kernel(); 500 return ret; 501 } 502 503 /*
··· 377 * Don't create negative dentries here, return -ENOENT by hand 378 * instead. 379 */ 380 + struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir, 381 + struct dentry *dentry) 382 { 383 struct inode *inode = NULL; 384 int error = -ENOENT; 385 386 lock_kernel(); 387 spin_lock(&proc_subdir_lock); 388 if (de) { 389 for (de = de->subdir; de ; de = de->next) { 390 if (de->namelen != dentry->d_name.len) ··· 393 if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { 394 unsigned int ino; 395 396 ino = de->low_ino; 397 de_get(de); 398 spin_unlock(&proc_subdir_lock); ··· 417 return ERR_PTR(error); 418 } 419 420 + struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry, 421 + struct nameidata *nd) 422 + { 423 + return proc_lookup_de(PDE(dir), dir, dentry); 424 + } 425 + 426 /* 427 * This returns non-zero if at EOF, so that the /proc 428 * root directory can use this and check if it should ··· 426 * value of the readdir() call, as long as it's non-negative 427 * for success.. 428 */ 429 + int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent, 430 + filldir_t filldir) 431 { 432 unsigned int ino; 433 int i; 434 struct inode *inode = filp->f_path.dentry->d_inode; ··· 438 lock_kernel(); 439 440 ino = inode->i_ino; 441 if (!de) { 442 ret = -EINVAL; 443 goto out; ··· 497 ret = 1; 498 out: unlock_kernel(); 499 return ret; 500 + } 501 + 502 + int proc_readdir(struct file *filp, void *dirent, filldir_t filldir) 503 + { 504 + struct inode *inode = filp->f_path.dentry->d_inode; 505 + 506 + return proc_readdir_de(PDE(inode), filp, dirent, filldir); 507 } 508 509 /*
+7
fs/proc/internal.h
··· 64 extern const struct file_operations proc_smaps_operations; 65 extern const struct file_operations proc_clear_refs_operations; 66 extern const struct file_operations proc_pagemap_operations; 67 68 void free_proc_entry(struct proc_dir_entry *de); 69 ··· 85 { 86 return PROC_I(inode)->fd; 87 }
··· 64 extern const struct file_operations proc_smaps_operations; 65 extern const struct file_operations proc_clear_refs_operations; 66 extern const struct file_operations proc_pagemap_operations; 67 + extern const struct file_operations proc_net_operations; 68 + extern const struct inode_operations proc_net_inode_operations; 69 70 void free_proc_entry(struct proc_dir_entry *de); 71 ··· 83 { 84 return PROC_I(inode)->fd; 85 } 86 + 87 + struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *ino, 88 + struct dentry *dentry); 89 + int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent, 90 + filldir_t filldir);
+89 -28
fs/proc/proc_net.c
··· 63 } 64 EXPORT_SYMBOL_GPL(seq_release_net); 65 66 67 struct proc_dir_entry *proc_net_fops_create(struct net *net, 68 const char *name, mode_t mode, const struct file_operations *fops) ··· 159 } 160 EXPORT_SYMBOL_GPL(get_proc_net); 161 162 - static struct proc_dir_entry *shadow_pde; 163 - 164 - static struct proc_dir_entry *proc_net_shadow(struct task_struct *task, 165 - struct proc_dir_entry *de) 166 - { 167 - return task->nsproxy->net_ns->proc_net; 168 - } 169 - 170 struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name, 171 struct proc_dir_entry *parent) 172 { ··· 172 173 static __net_init int proc_net_ns_init(struct net *net) 174 { 175 - struct proc_dir_entry *root, *netd, *net_statd; 176 int err; 177 178 err = -ENOMEM; 179 - root = kzalloc(sizeof(*root), GFP_KERNEL); 180 - if (!root) 181 goto out; 182 183 - err = -EEXIST; 184 - netd = proc_net_mkdir(net, "net", root); 185 - if (!netd) 186 - goto free_root; 187 188 err = -EEXIST; 189 net_statd = proc_net_mkdir(net, "stat", netd); 190 if (!net_statd) 191 goto free_net; 192 193 - root->data = net; 194 - 195 - net->proc_net_root = root; 196 net->proc_net = netd; 197 net->proc_net_stat = net_statd; 198 - err = 0; 199 200 out: 201 return err; 202 - free_net: 203 - remove_proc_entry("net", root); 204 - free_root: 205 - kfree(root); 206 - goto out; 207 } 208 209 static __net_exit void proc_net_ns_exit(struct net *net) 210 { 211 remove_proc_entry("stat", net->proc_net); 212 - remove_proc_entry("net", net->proc_net_root); 213 - kfree(net->proc_net_root); 214 } 215 216 static struct pernet_operations __net_initdata proc_net_ns_ops = { ··· 214 215 int __init proc_net_init(void) 216 { 217 - shadow_pde = proc_mkdir("net", NULL); 218 - shadow_pde->shadow_proc = proc_net_shadow; 219 220 return register_pernet_subsys(&proc_net_ns_ops); 221 }
··· 63 } 64 EXPORT_SYMBOL_GPL(seq_release_net); 65 66 + static struct net *get_proc_task_net(struct inode *dir) 67 + { 68 + struct task_struct *task; 69 + struct nsproxy *ns; 70 + struct net *net = NULL; 71 + 72 + rcu_read_lock(); 73 + task = pid_task(proc_pid(dir), PIDTYPE_PID); 74 + if (task != NULL) { 75 + ns = task_nsproxy(task); 76 + if (ns != NULL) 77 + net = get_net(ns->net_ns); 78 + } 79 + rcu_read_unlock(); 80 + 81 + return net; 82 + } 83 + 84 + static struct dentry *proc_tgid_net_lookup(struct inode *dir, 85 + struct dentry *dentry, struct nameidata *nd) 86 + { 87 + struct dentry *de; 88 + struct net *net; 89 + 90 + de = ERR_PTR(-ENOENT); 91 + net = get_proc_task_net(dir); 92 + if (net != NULL) { 93 + de = proc_lookup_de(net->proc_net, dir, dentry); 94 + put_net(net); 95 + } 96 + return de; 97 + } 98 + 99 + static int proc_tgid_net_getattr(struct vfsmount *mnt, struct dentry *dentry, 100 + struct kstat *stat) 101 + { 102 + struct inode *inode = dentry->d_inode; 103 + struct net *net; 104 + 105 + net = get_proc_task_net(inode); 106 + 107 + generic_fillattr(inode, stat); 108 + 109 + if (net != NULL) { 110 + stat->nlink = net->proc_net->nlink; 111 + put_net(net); 112 + } 113 + 114 + return 0; 115 + } 116 + 117 + const struct inode_operations proc_net_inode_operations = { 118 + .lookup = proc_tgid_net_lookup, 119 + .getattr = proc_tgid_net_getattr, 120 + }; 121 + 122 + static int proc_tgid_net_readdir(struct file *filp, void *dirent, 123 + filldir_t filldir) 124 + { 125 + int ret; 126 + struct net *net; 127 + 128 + ret = -EINVAL; 129 + net = get_proc_task_net(filp->f_path.dentry->d_inode); 130 + if (net != NULL) { 131 + ret = proc_readdir_de(net->proc_net, filp, dirent, filldir); 132 + put_net(net); 133 + } 134 + return ret; 135 + } 136 + 137 + const struct file_operations proc_net_operations = { 138 + .read = generic_read_dir, 139 + .readdir = proc_tgid_net_readdir, 140 + }; 141 + 142 143 struct proc_dir_entry *proc_net_fops_create(struct net *net, 144 const char *name, mode_t mode, const struct file_operations *fops) ··· 83 } 84 EXPORT_SYMBOL_GPL(get_proc_net); 85 86 struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name, 87 struct proc_dir_entry *parent) 88 { ··· 104 105 static __net_init int proc_net_ns_init(struct net *net) 106 { 107 + struct proc_dir_entry *netd, *net_statd; 108 int err; 109 110 err = -ENOMEM; 111 + netd = kzalloc(sizeof(*netd), GFP_KERNEL); 112 + if (!netd) 113 goto out; 114 115 + netd->data = net; 116 + netd->nlink = 2; 117 + netd->name = "net"; 118 + netd->namelen = 3; 119 + netd->parent = &proc_root; 120 121 err = -EEXIST; 122 net_statd = proc_net_mkdir(net, "stat", netd); 123 if (!net_statd) 124 goto free_net; 125 126 net->proc_net = netd; 127 net->proc_net_stat = net_statd; 128 + return 0; 129 130 + free_net: 131 + kfree(netd); 132 out: 133 return err; 134 } 135 136 static __net_exit void proc_net_ns_exit(struct net *net) 137 { 138 remove_proc_entry("stat", net->proc_net); 139 + kfree(net->proc_net); 140 } 141 142 static struct pernet_operations __net_initdata proc_net_ns_ops = { ··· 152 153 int __init proc_net_init(void) 154 { 155 + proc_symlink("net", NULL, "self/net"); 156 157 return register_pernet_subsys(&proc_net_ns_ops); 158 }
+1
include/linux/ethtool.h
··· 12 #ifndef _LINUX_ETHTOOL_H 13 #define _LINUX_ETHTOOL_H 14 15 16 /* This should work for both 32 and 64 bit userland. */ 17 struct ethtool_cmd {
··· 12 #ifndef _LINUX_ETHTOOL_H 13 #define _LINUX_ETHTOOL_H 14 15 + #include <linux/types.h> 16 17 /* This should work for both 32 and 64 bit userland. */ 18 struct ethtool_cmd {
-3
include/linux/proc_fs.h
··· 50 typedef int (write_proc_t)(struct file *file, const char __user *buffer, 51 unsigned long count, void *data); 52 typedef int (get_info_t)(char *, char **, off_t, int); 53 - typedef struct proc_dir_entry *(shadow_proc_t)(struct task_struct *task, 54 - struct proc_dir_entry *pde); 55 56 struct proc_dir_entry { 57 unsigned int low_ino; ··· 80 int pde_users; /* number of callers into module in progress */ 81 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */ 82 struct completion *pde_unload_completion; 83 - shadow_proc_t *shadow_proc; 84 }; 85 86 struct kcore_list {
··· 50 typedef int (write_proc_t)(struct file *file, const char __user *buffer, 51 unsigned long count, void *data); 52 typedef int (get_info_t)(char *, char **, off_t, int); 53 54 struct proc_dir_entry { 55 unsigned int low_ino; ··· 82 int pde_users; /* number of callers into module in progress */ 83 spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */ 84 struct completion *pde_unload_completion; 85 }; 86 87 struct kcore_list {
+1 -1
include/net/bluetooth/bluetooth.h
··· 170 int bt_err(__u16 code); 171 172 extern int hci_sock_init(void); 173 - extern int hci_sock_cleanup(void); 174 175 extern int bt_sysfs_init(void); 176 extern void bt_sysfs_cleanup(void);
··· 170 int bt_err(__u16 code); 171 172 extern int hci_sock_init(void); 173 + extern void hci_sock_cleanup(void); 174 175 extern int bt_sysfs_init(void); 176 extern void bt_sysfs_cleanup(void);
+3 -3
include/net/irda/irttp.h
··· 169 void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow); 170 struct tsap_cb *irttp_dup(struct tsap_cb *self, void *instance); 171 172 - static __inline __u32 irttp_get_saddr(struct tsap_cb *self) 173 { 174 return irlmp_get_saddr(self->lsap); 175 } 176 177 - static __inline __u32 irttp_get_daddr(struct tsap_cb *self) 178 { 179 return irlmp_get_daddr(self->lsap); 180 } 181 182 - static __inline __u32 irttp_get_max_seg_size(struct tsap_cb *self) 183 { 184 return self->max_seg_size; 185 }
··· 169 void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow); 170 struct tsap_cb *irttp_dup(struct tsap_cb *self, void *instance); 171 172 + static inline __u32 irttp_get_saddr(struct tsap_cb *self) 173 { 174 return irlmp_get_saddr(self->lsap); 175 } 176 177 + static inline __u32 irttp_get_daddr(struct tsap_cb *self) 178 { 179 return irlmp_get_daddr(self->lsap); 180 } 181 182 + static inline __u32 irttp_get_max_seg_size(struct tsap_cb *self) 183 { 184 return self->max_seg_size; 185 }
-1
include/net/net_namespace.h
··· 31 32 struct proc_dir_entry *proc_net; 33 struct proc_dir_entry *proc_net_stat; 34 - struct proc_dir_entry *proc_net_root; 35 36 struct list_head sysctl_table_headers; 37
··· 31 32 struct proc_dir_entry *proc_net; 33 struct proc_dir_entry *proc_net_stat; 34 35 struct list_head sysctl_table_headers; 36
-1
include/net/netfilter/nf_conntrack_extend.h
··· 17 struct nf_ct_ext { 18 u8 offset[NF_CT_EXT_NUM]; 19 u8 len; 20 - u8 real_len; 21 char data[0]; 22 }; 23
··· 17 struct nf_ct_ext { 18 u8 offset[NF_CT_EXT_NUM]; 19 u8 len; 20 char data[0]; 21 }; 22
+1 -1
net/bluetooth/bnep/bnep.h
··· 174 175 void bnep_net_setup(struct net_device *dev); 176 int bnep_sock_init(void); 177 - int bnep_sock_cleanup(void); 178 179 static inline int bnep_mc_hash(__u8 *addr) 180 {
··· 174 175 void bnep_net_setup(struct net_device *dev); 176 int bnep_sock_init(void); 177 + void bnep_sock_cleanup(void); 178 179 static inline int bnep_mc_hash(__u8 *addr) 180 {
+1 -3
net/bluetooth/bnep/sock.c
··· 257 return err; 258 } 259 260 - int __exit bnep_sock_cleanup(void) 261 { 262 if (bt_sock_unregister(BTPROTO_BNEP) < 0) 263 BT_ERR("Can't unregister BNEP socket"); 264 265 proto_unregister(&bnep_proto); 266 - 267 - return 0; 268 }
··· 257 return err; 258 } 259 260 + void __exit bnep_sock_cleanup(void) 261 { 262 if (bt_sock_unregister(BTPROTO_BNEP) < 0) 263 BT_ERR("Can't unregister BNEP socket"); 264 265 proto_unregister(&bnep_proto); 266 }
+2 -2
net/bluetooth/hci_core.c
··· 902 903 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 904 905 - hci_unregister_sysfs(hdev); 906 - 907 write_lock_bh(&hci_dev_list_lock); 908 list_del(&hdev->list); 909 write_unlock_bh(&hci_dev_list_lock); ··· 912 kfree_skb(hdev->reassembly[i]); 913 914 hci_notify(hdev, HCI_DEV_UNREG); 915 916 __hci_dev_put(hdev); 917
··· 902 903 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); 904 905 write_lock_bh(&hci_dev_list_lock); 906 list_del(&hdev->list); 907 write_unlock_bh(&hci_dev_list_lock); ··· 914 kfree_skb(hdev->reassembly[i]); 915 916 hci_notify(hdev, HCI_DEV_UNREG); 917 + 918 + hci_unregister_sysfs(hdev); 919 920 __hci_dev_put(hdev); 921
+1 -3
net/bluetooth/hci_sock.c
··· 734 return err; 735 } 736 737 - int __exit hci_sock_cleanup(void) 738 { 739 if (bt_sock_unregister(BTPROTO_HCI) < 0) 740 BT_ERR("HCI socket unregistration failed"); ··· 742 hci_unregister_notifier(&hci_sock_nblock); 743 744 proto_unregister(&hci_sk_proto); 745 - 746 - return 0; 747 }
··· 734 return err; 735 } 736 737 + void __exit hci_sock_cleanup(void) 738 { 739 if (bt_sock_unregister(BTPROTO_HCI) < 0) 740 BT_ERR("HCI socket unregistration failed"); ··· 742 hci_unregister_notifier(&hci_sock_nblock); 743 744 proto_unregister(&hci_sk_proto); 745 }
+10 -2
net/ipv4/tcp_output.c
··· 1035 * introducing MSS oddities to segment boundaries. In rare cases where 1036 * mss_now != mss_cache, we will request caller to create a small skb 1037 * per input skb which could be mostly avoided here (if desired). 1038 */ 1039 static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, 1040 unsigned int mss_now, unsigned int cwnd) ··· 1055 if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) 1056 return cwnd_len; 1057 1058 - if (skb == tcp_write_queue_tail(sk) && cwnd_len <= skb->len) 1059 return cwnd_len; 1060 1061 - needed = min(skb->len, window); 1062 return needed - needed % mss_now; 1063 } 1064
··· 1035 * introducing MSS oddities to segment boundaries. In rare cases where 1036 * mss_now != mss_cache, we will request caller to create a small skb 1037 * per input skb which could be mostly avoided here (if desired). 1038 + * 1039 + * We explicitly want to create a request for splitting write queue tail 1040 + * to a small skb for Nagle purposes while avoiding unnecessary modulos, 1041 + * thus all the complexity (cwnd_len is always MSS multiple which we 1042 + * return whenever allowed by the other factors). Basically we need the 1043 + * modulo only when the receiver window alone is the limiting factor or 1044 + * when we would be allowed to send the split-due-to-Nagle skb fully. 1045 */ 1046 static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, 1047 unsigned int mss_now, unsigned int cwnd) ··· 1048 if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) 1049 return cwnd_len; 1050 1051 + needed = min(skb->len, window); 1052 + 1053 + if (skb == tcp_write_queue_tail(sk) && cwnd_len <= needed) 1054 return cwnd_len; 1055 1056 return needed - needed % mss_now; 1057 } 1058
+1 -1
net/netfilter/nf_conntrack_expect.c
··· 381 if (nf_ct_expect_count >= nf_ct_expect_max) { 382 if (net_ratelimit()) 383 printk(KERN_WARNING 384 - "nf_conntrack: expectation table full"); 385 ret = -EMFILE; 386 goto out; 387 }
··· 381 if (nf_ct_expect_count >= nf_ct_expect_max) { 382 if (net_ratelimit()) 383 printk(KERN_WARNING 384 + "nf_conntrack: expectation table full\n"); 385 ret = -EMFILE; 386 goto out; 387 }
+3 -16
net/netfilter/nf_conntrack_extend.c
··· 19 static struct nf_ct_ext_type *nf_ct_ext_types[NF_CT_EXT_NUM]; 20 static DEFINE_MUTEX(nf_ct_ext_type_mutex); 21 22 - /* Horrible trick to figure out smallest amount worth kmallocing. */ 23 - #define CACHE(x) (x) + 0 * 24 - enum { 25 - NF_CT_EXT_MIN_SIZE = 26 - #include <linux/kmalloc_sizes.h> 27 - 1 }; 28 - #undef CACHE 29 - 30 void __nf_ct_ext_destroy(struct nf_conn *ct) 31 { 32 unsigned int i; ··· 45 static void * 46 nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp) 47 { 48 - unsigned int off, len, real_len; 49 struct nf_ct_ext_type *t; 50 51 rcu_read_lock(); ··· 53 BUG_ON(t == NULL); 54 off = ALIGN(sizeof(struct nf_ct_ext), t->align); 55 len = off + t->len; 56 - real_len = t->alloc_size; 57 rcu_read_unlock(); 58 59 - *ext = kzalloc(real_len, gfp); 60 if (!*ext) 61 return NULL; 62 63 (*ext)->offset[id] = off; 64 (*ext)->len = len; 65 - (*ext)->real_len = real_len; 66 67 return (void *)(*ext) + off; 68 } ··· 85 newlen = newoff + t->len; 86 rcu_read_unlock(); 87 88 - if (newlen >= ct->ext->real_len) { 89 new = kmalloc(newlen, gfp); 90 if (!new) 91 return NULL; ··· 104 rcu_read_unlock(); 105 } 106 kfree(ct->ext); 107 - new->real_len = newlen; 108 ct->ext = new; 109 } 110 ··· 145 t1->alloc_size = ALIGN(t1->alloc_size, t2->align) 146 + t2->len; 147 } 148 - if (t1->alloc_size < NF_CT_EXT_MIN_SIZE) 149 - t1->alloc_size = NF_CT_EXT_MIN_SIZE; 150 } 151 } 152
··· 19 static struct nf_ct_ext_type *nf_ct_ext_types[NF_CT_EXT_NUM]; 20 static DEFINE_MUTEX(nf_ct_ext_type_mutex); 21 22 void __nf_ct_ext_destroy(struct nf_conn *ct) 23 { 24 unsigned int i; ··· 53 static void * 54 nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp) 55 { 56 + unsigned int off, len; 57 struct nf_ct_ext_type *t; 58 59 rcu_read_lock(); ··· 61 BUG_ON(t == NULL); 62 off = ALIGN(sizeof(struct nf_ct_ext), t->align); 63 len = off + t->len; 64 rcu_read_unlock(); 65 66 + *ext = kzalloc(t->alloc_size, gfp); 67 if (!*ext) 68 return NULL; 69 70 (*ext)->offset[id] = off; 71 (*ext)->len = len; 72 73 return (void *)(*ext) + off; 74 } ··· 95 newlen = newoff + t->len; 96 rcu_read_unlock(); 97 98 + if (newlen >= ksize(ct->ext)) { 99 new = kmalloc(newlen, gfp); 100 if (!new) 101 return NULL; ··· 114 rcu_read_unlock(); 115 } 116 kfree(ct->ext); 117 ct->ext = new; 118 } 119 ··· 156 t1->alloc_size = ALIGN(t1->alloc_size, t2->align) 157 + t2->len; 158 } 159 } 160 } 161
+1 -1
net/netfilter/nf_queue.c
··· 51 return -EINVAL; 52 53 mutex_lock(&queue_handler_mutex); 54 - if (queue_handler[pf] != qh) { 55 mutex_unlock(&queue_handler_mutex); 56 return -EINVAL; 57 }
··· 51 return -EINVAL; 52 53 mutex_lock(&queue_handler_mutex); 54 + if (queue_handler[pf] && queue_handler[pf] != qh) { 55 mutex_unlock(&queue_handler_mutex); 56 return -EINVAL; 57 }
+5 -2
net/netfilter/xt_time.c
··· 95 */ 96 r->dse = time / 86400; 97 98 - /* 1970-01-01 (w=0) was a Thursday (4). */ 99 - r->weekday = (4 + r->dse) % 7; 100 } 101 102 static void localtime_3(struct xtm *r, time_t time)
··· 95 */ 96 r->dse = time / 86400; 97 98 + /* 99 + * 1970-01-01 (w=0) was a Thursday (4). 100 + * -1 and +1 map Sunday properly onto 7. 101 + */ 102 + r->weekday = (4 + r->dse - 1) % 7 + 1; 103 } 104 105 static void localtime_3(struct xtm *r, time_t time)
+2 -1
net/rxrpc/ar-recvmsg.c
··· 143 /* copy the peer address and timestamp */ 144 if (!continue_call) { 145 if (msg->msg_name && msg->msg_namelen > 0) 146 - memcpy(&msg->msg_name, &call->conn->trans->peer->srx, 147 sizeof(call->conn->trans->peer->srx)); 148 sock_recv_timestamp(msg, &rx->sk, skb); 149 }
··· 143 /* copy the peer address and timestamp */ 144 if (!continue_call) { 145 if (msg->msg_name && msg->msg_namelen > 0) 146 + memcpy(msg->msg_name, 147 + &call->conn->trans->peer->srx, 148 sizeof(call->conn->trans->peer->srx)); 149 sock_recv_timestamp(msg, &rx->sk, skb); 150 }
+3 -1
net/sctp/bind_addr.c
··· 209 int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) 210 { 211 struct sctp_sockaddr_entry *addr, *temp; 212 213 /* We hold the socket lock when calling this function, 214 * and that acts as a writer synchronizing lock. ··· 217 list_for_each_entry_safe(addr, temp, &bp->address_list, list) { 218 if (sctp_cmp_addr_exact(&addr->a, del_addr)) { 219 /* Found the exact match. */ 220 addr->valid = 0; 221 list_del_rcu(&addr->list); 222 break; 223 } 224 } 225 226 - if (addr && !addr->valid) { 227 call_rcu(&addr->rcu, sctp_local_addr_free); 228 SCTP_DBG_OBJCNT_DEC(addr); 229 return 0;
··· 209 int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) 210 { 211 struct sctp_sockaddr_entry *addr, *temp; 212 + int found = 0; 213 214 /* We hold the socket lock when calling this function, 215 * and that acts as a writer synchronizing lock. ··· 216 list_for_each_entry_safe(addr, temp, &bp->address_list, list) { 217 if (sctp_cmp_addr_exact(&addr->a, del_addr)) { 218 /* Found the exact match. */ 219 + found = 1; 220 addr->valid = 0; 221 list_del_rcu(&addr->list); 222 break; 223 } 224 } 225 226 + if (found) { 227 call_rcu(&addr->rcu, sctp_local_addr_free); 228 SCTP_DBG_OBJCNT_DEC(addr); 229 return 0;
+3 -1
net/sctp/ipv6.c
··· 89 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; 90 struct sctp_sockaddr_entry *addr = NULL; 91 struct sctp_sockaddr_entry *temp; 92 93 switch (ev) { 94 case NETDEV_UP: ··· 112 &sctp_local_addr_list, list) { 113 if (ipv6_addr_equal(&addr->a.v6.sin6_addr, 114 &ifa->addr)) { 115 addr->valid = 0; 116 list_del_rcu(&addr->list); 117 break; 118 } 119 } 120 spin_unlock_bh(&sctp_local_addr_lock); 121 - if (addr && !addr->valid) 122 call_rcu(&addr->rcu, sctp_local_addr_free); 123 break; 124 }
··· 89 struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; 90 struct sctp_sockaddr_entry *addr = NULL; 91 struct sctp_sockaddr_entry *temp; 92 + int found = 0; 93 94 switch (ev) { 95 case NETDEV_UP: ··· 111 &sctp_local_addr_list, list) { 112 if (ipv6_addr_equal(&addr->a.v6.sin6_addr, 113 &ifa->addr)) { 114 + found = 1; 115 addr->valid = 0; 116 list_del_rcu(&addr->list); 117 break; 118 } 119 } 120 spin_unlock_bh(&sctp_local_addr_lock); 121 + if (found) 122 call_rcu(&addr->rcu, sctp_local_addr_free); 123 break; 124 }
+3 -1
net/sctp/protocol.c
··· 628 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 629 struct sctp_sockaddr_entry *addr = NULL; 630 struct sctp_sockaddr_entry *temp; 631 632 switch (ev) { 633 case NETDEV_UP: ··· 648 list_for_each_entry_safe(addr, temp, 649 &sctp_local_addr_list, list) { 650 if (addr->a.v4.sin_addr.s_addr == ifa->ifa_local) { 651 addr->valid = 0; 652 list_del_rcu(&addr->list); 653 break; 654 } 655 } 656 spin_unlock_bh(&sctp_local_addr_lock); 657 - if (addr && !addr->valid) 658 call_rcu(&addr->rcu, sctp_local_addr_free); 659 break; 660 }
··· 628 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 629 struct sctp_sockaddr_entry *addr = NULL; 630 struct sctp_sockaddr_entry *temp; 631 + int found = 0; 632 633 switch (ev) { 634 case NETDEV_UP: ··· 647 list_for_each_entry_safe(addr, temp, 648 &sctp_local_addr_list, list) { 649 if (addr->a.v4.sin_addr.s_addr == ifa->ifa_local) { 650 + found = 1; 651 addr->valid = 0; 652 list_del_rcu(&addr->list); 653 break; 654 } 655 } 656 spin_unlock_bh(&sctp_local_addr_lock); 657 + if (found) 658 call_rcu(&addr->rcu, sctp_local_addr_free); 659 break; 660 }
+8
net/sctp/sm_make_chunk.c
··· 2375 asoc->peer.ipv4_address = 0; 2376 asoc->peer.ipv6_address = 0; 2377 2378 /* Cycle through address types; avoid divide by 0. */ 2379 sat = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); 2380 if (sat)
··· 2375 asoc->peer.ipv4_address = 0; 2376 asoc->peer.ipv6_address = 0; 2377 2378 + /* Assume that peer supports the address family 2379 + * by which it sends a packet. 2380 + */ 2381 + if (peer_addr->sa.sa_family == AF_INET6) 2382 + asoc->peer.ipv6_address = 1; 2383 + else if (peer_addr->sa.sa_family == AF_INET) 2384 + asoc->peer.ipv4_address = 1; 2385 + 2386 /* Cycle through address types; avoid divide by 0. */ 2387 sat = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); 2388 if (sat)
+62 -15
net/sctp/socket.c
··· 2933 char __user *optval, 2934 int optlen) 2935 { 2936 int val; 2937 2938 - if (optlen != sizeof(int)) 2939 - return -EINVAL; 2940 - if (get_user(val, (int __user *)optval)) 2941 - return -EFAULT; 2942 - 2943 - if (val < 0) 2944 return -EINVAL; 2945 2946 - sctp_sk(sk)->max_burst = val; 2947 2948 return 0; 2949 } ··· 5027 char __user *optval, 5028 int __user *optlen) 5029 { 5030 - int val; 5031 5032 if (len < sizeof(int)) 5033 return -EINVAL; 5034 5035 - len = sizeof(int); 5036 5037 - val = sctp_sk(sk)->max_burst; 5038 - if (put_user(len, optlen)) 5039 - return -EFAULT; 5040 - if (copy_to_user(optval, &val, len)) 5041 - return -EFAULT; 5042 5043 - return -ENOTSUPP; 5044 } 5045 5046 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
··· 2933 char __user *optval, 2934 int optlen) 2935 { 2936 + struct sctp_assoc_value params; 2937 + struct sctp_sock *sp; 2938 + struct sctp_association *asoc; 2939 int val; 2940 + int assoc_id = 0; 2941 2942 + if (optlen < sizeof(int)) 2943 return -EINVAL; 2944 2945 + if (optlen == sizeof(int)) { 2946 + printk(KERN_WARNING 2947 + "SCTP: Use of int in max_burst socket option deprecated\n"); 2948 + printk(KERN_WARNING 2949 + "SCTP: Use struct sctp_assoc_value instead\n"); 2950 + if (copy_from_user(&val, optval, optlen)) 2951 + return -EFAULT; 2952 + } else if (optlen == sizeof(struct sctp_assoc_value)) { 2953 + if (copy_from_user(&params, optval, optlen)) 2954 + return -EFAULT; 2955 + val = params.assoc_value; 2956 + assoc_id = params.assoc_id; 2957 + } else 2958 + return -EINVAL; 2959 + 2960 + sp = sctp_sk(sk); 2961 + 2962 + if (assoc_id != 0) { 2963 + asoc = sctp_id2assoc(sk, assoc_id); 2964 + if (!asoc) 2965 + return -EINVAL; 2966 + asoc->max_burst = val; 2967 + } else 2968 + sp->max_burst = val; 2969 2970 return 0; 2971 } ··· 5005 char __user *optval, 5006 int __user *optlen) 5007 { 5008 + struct sctp_assoc_value params; 5009 + struct sctp_sock *sp; 5010 + struct sctp_association *asoc; 5011 5012 if (len < sizeof(int)) 5013 return -EINVAL; 5014 5015 + if (len == sizeof(int)) { 5016 + printk(KERN_WARNING 5017 + "SCTP: Use of int in max_burst socket option deprecated\n"); 5018 + printk(KERN_WARNING 5019 + "SCTP: Use struct sctp_assoc_value instead\n"); 5020 + params.assoc_id = 0; 5021 + } else if (len == sizeof (struct sctp_assoc_value)) { 5022 + if (copy_from_user(&params, optval, len)) 5023 + return -EFAULT; 5024 + } else 5025 + return -EINVAL; 5026 5027 + sp = sctp_sk(sk); 5028 5029 + if (params.assoc_id != 0) { 5030 + asoc = sctp_id2assoc(sk, params.assoc_id); 5031 + if (!asoc) 5032 + return -EINVAL; 5033 + params.assoc_value = asoc->max_burst; 5034 + } else 5035 + params.assoc_value = sp->max_burst; 5036 + 5037 + if (len == sizeof(int)) { 5038 + if (copy_to_user(optval, &params.assoc_value, len)) 5039 + return -EFAULT; 5040 + } else { 5041 + if (copy_to_user(optval, &params, len)) 5042 + return -EFAULT; 5043 + } 5044 + 5045 + return 0; 5046 + 5047 } 5048 5049 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,