Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (30 commits)
[TIPC]: Initial activation message now includes TIPC version number
[TIPC]: Improve response to requests for node/link information
[TIPC]: Fixed skb_under_panic caused by tipc_link_bundle_buf
[IrDA]: Fix the AU1000 FIR dependencies
[IrDA]: Fix RCU lock pairing on error path
[XFRM]: unexport xfrm_state_mtu
[NET]: make skb_release_data() static
[NETFILTE] ipv4: Fix typo (Bugzilla #6753)
[IrDA]: MCS7780 usb_driver struct should be static
[BNX2]: Turn off link during shutdown
[BNX2]: Use dev_kfree_skb() instead of the _irq version
[ATM]: basic sysfs support for ATM devices
[ATM]: [suni] change suni_init to __devinit
[ATM]: [iphase] should be __devinit not __init
[ATM]: [idt77105] should be __devinit not __init
[BNX2]: Add NETIF_F_TSO_ECN
[NET]: Add ECN support for TSO
[AF_UNIX]: Datagram getpeersec
[NET]: Fix logical error in skb_gso_ok
[PKT_SCHED]: PSCHED_TADD() and PSCHED_TADD2() can result,tv_usec >= 1000000
...

+520 -148
-1
arch/x86_64/kernel/functionlist
··· 384 *(.text.__end_that_request_first) 385 *(.text.wake_up_bit) 386 *(.text.unuse_mm) 387 - *(.text.skb_release_data) 388 *(.text.shrink_icache_memory) 389 *(.text.sched_balance_self) 390 *(.text.__pmd_alloc)
··· 384 *(.text.__end_that_request_first) 385 *(.text.wake_up_bit) 386 *(.text.unuse_mm) 387 *(.text.shrink_icache_memory) 388 *(.text.sched_balance_self) 389 *(.text.__pmd_alloc)
+1 -1
drivers/atm/he.c
··· 1018 return 0; 1019 } 1020 1021 - static int __init 1022 he_start(struct atm_dev *dev) 1023 { 1024 struct he_dev *he_dev;
··· 1018 return 0; 1019 } 1020 1021 + static int __devinit 1022 he_start(struct atm_dev *dev) 1023 { 1024 struct he_dev *he_dev;
+1 -1
drivers/atm/idt77105.c
··· 358 }; 359 360 361 - int idt77105_init(struct atm_dev *dev) 362 { 363 dev->phy = &idt77105_ops; 364 return 0;
··· 358 }; 359 360 361 + int __devinit idt77105_init(struct atm_dev *dev) 362 { 363 dev->phy = &idt77105_ops; 364 return 0;
+1 -1
drivers/atm/idt77105.h
··· 76 #define IDT77105_CTRSEL_RHEC 0x01 /* W, Rx HEC Error Counter */ 77 78 #ifdef __KERNEL__ 79 - int idt77105_init(struct atm_dev *dev) __init; 80 #endif 81 82 /*
··· 76 #define IDT77105_CTRSEL_RHEC 0x01 /* W, Rx HEC Error Counter */ 77 78 #ifdef __KERNEL__ 79 + int idt77105_init(struct atm_dev *dev); 80 #endif 81 82 /*
+2 -2
drivers/atm/iphase.c
··· 2284 } 2285 2286 2287 - static int __init ia_init(struct atm_dev *dev) 2288 { 2289 IADEV *iadev; 2290 unsigned long real_base; ··· 2480 iadev->rx_dle_dma); 2481 } 2482 2483 - static int __init ia_start(struct atm_dev *dev) 2484 { 2485 IADEV *iadev; 2486 int error;
··· 2284 } 2285 2286 2287 + static int __devinit ia_init(struct atm_dev *dev) 2288 { 2289 IADEV *iadev; 2290 unsigned long real_base; ··· 2480 iadev->rx_dle_dma); 2481 } 2482 2483 + static int __devinit ia_start(struct atm_dev *dev) 2484 { 2485 IADEV *iadev; 2486 int error;
+1 -1
drivers/atm/suni.c
··· 289 }; 290 291 292 - int suni_init(struct atm_dev *dev) 293 { 294 unsigned char mri; 295
··· 289 }; 290 291 292 + int __devinit suni_init(struct atm_dev *dev) 293 { 294 unsigned char mri; 295
+21 -11
drivers/net/bnx2.c
··· 57 58 #define DRV_MODULE_NAME "bnx2" 59 #define PFX DRV_MODULE_NAME ": " 60 - #define DRV_MODULE_VERSION "1.4.42" 61 - #define DRV_MODULE_RELDATE "June 12, 2006" 62 63 #define RUN_AT(x) (jiffies + (x)) 64 ··· 1676 1677 tx_free_bd += last + 1; 1678 1679 - dev_kfree_skb_irq(skb); 1680 1681 hw_cons = bp->hw_tx_cons = 1682 sblk->status_tx_quick_consumer_index0; ··· 1824 if ((len > (bp->dev->mtu + ETH_HLEN)) && 1825 (ntohs(skb->protocol) != 0x8100)) { 1826 1827 - dev_kfree_skb_irq(skb); 1828 goto next_rx; 1829 1830 } ··· 3643 skb_shinfo(skb)->frags[j].size, 3644 PCI_DMA_TODEVICE); 3645 } 3646 - dev_kfree_skb_any(skb); 3647 i += j + 1; 3648 } 3649 ··· 3669 3670 rx_buf->skb = NULL; 3671 3672 - dev_kfree_skb_any(skb); 3673 } 3674 } 3675 ··· 3999 udelay(5); 4000 4001 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE); 4002 - dev_kfree_skb_irq(skb); 4003 4004 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) { 4005 goto loopback_test_done; ··· 4541 bnx2_netif_stop(bp); 4542 del_timer_sync(&bp->timer); 4543 if (bp->flags & NO_WOL_FLAG) 4544 - reset_code = BNX2_DRV_MSG_CODE_UNLOAD; 4545 else if (bp->wol) 4546 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL; 4547 else ··· 5128 return 0; 5129 } 5130 5131 #define BNX2_NUM_STATS 46 5132 5133 static struct { ··· 5455 .set_sg = ethtool_op_set_sg, 5456 #ifdef BCM_TSO 5457 .get_tso = ethtool_op_get_tso, 5458 - .set_tso = ethtool_op_set_tso, 5459 #endif 5460 .self_test_count = bnx2_self_test_count, 5461 .self_test = bnx2_self_test, ··· 5936 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 5937 #endif 5938 #ifdef BCM_TSO 5939 - dev->features |= NETIF_F_TSO; 5940 #endif 5941 5942 netif_carrier_off(bp->dev); ··· 5978 netif_device_detach(dev); 5979 del_timer_sync(&bp->timer); 5980 if (bp->flags & NO_WOL_FLAG) 5981 - reset_code = BNX2_DRV_MSG_CODE_UNLOAD; 5982 else if (bp->wol) 5983 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL; 5984 else
··· 57 58 #define DRV_MODULE_NAME "bnx2" 59 #define PFX DRV_MODULE_NAME ": " 60 + #define DRV_MODULE_VERSION "1.4.43" 61 + #define DRV_MODULE_RELDATE "June 28, 2006" 62 63 #define RUN_AT(x) (jiffies + (x)) 64 ··· 1676 1677 tx_free_bd += last + 1; 1678 1679 + dev_kfree_skb(skb); 1680 1681 hw_cons = bp->hw_tx_cons = 1682 sblk->status_tx_quick_consumer_index0; ··· 1824 if ((len > (bp->dev->mtu + ETH_HLEN)) && 1825 (ntohs(skb->protocol) != 0x8100)) { 1826 1827 + dev_kfree_skb(skb); 1828 goto next_rx; 1829 1830 } ··· 3643 skb_shinfo(skb)->frags[j].size, 3644 PCI_DMA_TODEVICE); 3645 } 3646 + dev_kfree_skb(skb); 3647 i += j + 1; 3648 } 3649 ··· 3669 3670 rx_buf->skb = NULL; 3671 3672 + dev_kfree_skb(skb); 3673 } 3674 } 3675 ··· 3999 udelay(5); 4000 4001 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE); 4002 + dev_kfree_skb(skb); 4003 4004 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) { 4005 goto loopback_test_done; ··· 4541 bnx2_netif_stop(bp); 4542 del_timer_sync(&bp->timer); 4543 if (bp->flags & NO_WOL_FLAG) 4544 + reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN; 4545 else if (bp->wol) 4546 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL; 4547 else ··· 5128 return 0; 5129 } 5130 5131 + static int 5132 + bnx2_set_tso(struct net_device *dev, u32 data) 5133 + { 5134 + if (data) 5135 + dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN; 5136 + else 5137 + dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN); 5138 + return 0; 5139 + } 5140 + 5141 #define BNX2_NUM_STATS 46 5142 5143 static struct { ··· 5445 .set_sg = ethtool_op_set_sg, 5446 #ifdef BCM_TSO 5447 .get_tso = ethtool_op_get_tso, 5448 + .set_tso = bnx2_set_tso, 5449 #endif 5450 .self_test_count = bnx2_self_test_count, 5451 .self_test = bnx2_self_test, ··· 5926 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 5927 #endif 5928 #ifdef BCM_TSO 5929 + dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN; 5930 #endif 5931 5932 netif_carrier_off(bp->dev); ··· 5968 netif_device_detach(dev); 5969 del_timer_sync(&bp->timer); 5970 if (bp->flags & NO_WOL_FLAG) 5971 + reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN; 5972 else if (bp->wol) 5973 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL; 5974 else
+1
drivers/net/bnx2.h
··· 4174 #define BNX2_DRV_MSG_CODE_PULSE 0x06000000 4175 #define BNX2_DRV_MSG_CODE_DIAG 0x07000000 4176 #define BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL 0x09000000 4177 4178 #define BNX2_DRV_MSG_DATA 0x00ff0000 4179 #define BNX2_DRV_MSG_DATA_WAIT0 0x00010000
··· 4174 #define BNX2_DRV_MSG_CODE_PULSE 0x06000000 4175 #define BNX2_DRV_MSG_CODE_DIAG 0x07000000 4176 #define BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL 0x09000000 4177 + #define BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN 0x0b000000 4178 4179 #define BNX2_DRV_MSG_DATA 0x00ff0000 4180 #define BNX2_DRV_MSG_DATA_WAIT0 0x00010000
+1 -1
drivers/net/irda/Kconfig
··· 350 351 config AU1000_FIR 352 tristate "Alchemy Au1000 SIR/FIR" 353 - depends on MIPS_AU1000 && IRDA 354 355 config SMC_IRCC_FIR 356 tristate "SMSC IrCC (EXPERIMENTAL)"
··· 350 351 config AU1000_FIR 352 tristate "Alchemy Au1000 SIR/FIR" 353 + depends on SOC_AU1000 && IRDA 354 355 config SMC_IRCC_FIR 356 tristate "SMSC IrCC (EXPERIMENTAL)"
+1 -1
drivers/net/irda/mcs7780.c
··· 101 module_param(transceiver_type, int, 0444); 102 MODULE_PARM_DESC(transceiver_type, "IR transceiver type, see mcs7780.h."); 103 104 - struct usb_driver mcs_driver = { 105 .name = "mcs7780", 106 .probe = mcs_probe, 107 .disconnect = mcs_disconnect,
··· 101 module_param(transceiver_type, int, 0444); 102 MODULE_PARM_DESC(transceiver_type, "IR transceiver type, see mcs7780.h."); 103 104 + static struct usb_driver mcs_driver = { 105 .name = "mcs7780", 106 .probe = mcs_probe, 107 .disconnect = mcs_disconnect,
+1
include/asm-alpha/socket.h
··· 51 #define SCM_TIMESTAMP SO_TIMESTAMP 52 53 #define SO_PEERSEC 30 54 55 /* Security levels - as per NRL IPv6 - don't actually do anything */ 56 #define SO_SECURITY_AUTHENTICATION 19
··· 51 #define SCM_TIMESTAMP SO_TIMESTAMP 52 53 #define SO_PEERSEC 30 54 + #define SO_PASSSEC 34 55 56 /* Security levels - as per NRL IPv6 - don't actually do anything */ 57 #define SO_SECURITY_AUTHENTICATION 19
+1
include/asm-arm/socket.h
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 52 #endif /* _ASM_SOCKET_H */
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 + #define SO_PASSSEC 34 52 53 #endif /* _ASM_SOCKET_H */
+1
include/asm-arm26/socket.h
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 52 #endif /* _ASM_SOCKET_H */
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 + #define SO_PASSSEC 34 52 53 #endif /* _ASM_SOCKET_H */
+1
include/asm-cris/socket.h
··· 50 #define SO_ACCEPTCONN 30 51 52 #define SO_PEERSEC 31 53 54 #endif /* _ASM_SOCKET_H */ 55
··· 50 #define SO_ACCEPTCONN 30 51 52 #define SO_PEERSEC 31 53 + #define SO_PASSSEC 34 54 55 #endif /* _ASM_SOCKET_H */ 56
+1
include/asm-frv/socket.h
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 52 #endif /* _ASM_SOCKET_H */ 53
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 + #define SO_PASSSEC 34 52 53 #endif /* _ASM_SOCKET_H */ 54
+1
include/asm-h8300/socket.h
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 52 #endif /* _ASM_SOCKET_H */
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 + #define SO_PASSSEC 34 52 53 #endif /* _ASM_SOCKET_H */
+1
include/asm-i386/socket.h
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 52 #endif /* _ASM_SOCKET_H */
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 + #define SO_PASSSEC 34 52 53 #endif /* _ASM_SOCKET_H */
+1
include/asm-ia64/socket.h
··· 57 #define SO_ACCEPTCONN 30 58 59 #define SO_PEERSEC 31 60 61 #endif /* _ASM_IA64_SOCKET_H */
··· 57 #define SO_ACCEPTCONN 30 58 59 #define SO_PEERSEC 31 60 + #define SO_PASSSEC 34 61 62 #endif /* _ASM_IA64_SOCKET_H */
+1
include/asm-m32r/socket.h
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 52 #endif /* _ASM_M32R_SOCKET_H */
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 + #define SO_PASSSEC 34 52 53 #endif /* _ASM_M32R_SOCKET_H */
+1
include/asm-m68k/socket.h
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 52 #endif /* _ASM_SOCKET_H */
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 + #define SO_PASSSEC 34 52 53 #endif /* _ASM_SOCKET_H */
+1
include/asm-mips/socket.h
··· 69 #define SO_PEERSEC 30 70 #define SO_SNDBUFFORCE 31 71 #define SO_RCVBUFFORCE 33 72 73 #ifdef __KERNEL__ 74
··· 69 #define SO_PEERSEC 30 70 #define SO_SNDBUFFORCE 31 71 #define SO_RCVBUFFORCE 33 72 + #define SO_PASSSEC 34 73 74 #ifdef __KERNEL__ 75
+1
include/asm-parisc/socket.h
··· 48 #define SO_ACCEPTCONN 0x401c 49 50 #define SO_PEERSEC 0x401d 51 52 #endif /* _ASM_SOCKET_H */
··· 48 #define SO_ACCEPTCONN 0x401c 49 50 #define SO_PEERSEC 0x401d 51 + #define SO_PASSSEC 0x401e 52 53 #endif /* _ASM_SOCKET_H */
+1
include/asm-powerpc/socket.h
··· 55 #define SO_ACCEPTCONN 30 56 57 #define SO_PEERSEC 31 58 59 #endif /* _ASM_POWERPC_SOCKET_H */
··· 55 #define SO_ACCEPTCONN 30 56 57 #define SO_PEERSEC 31 58 + #define SO_PASSSEC 34 59 60 #endif /* _ASM_POWERPC_SOCKET_H */
+1
include/asm-s390/socket.h
··· 56 #define SO_ACCEPTCONN 30 57 58 #define SO_PEERSEC 31 59 60 #endif /* _ASM_SOCKET_H */
··· 56 #define SO_ACCEPTCONN 30 57 58 #define SO_PEERSEC 31 59 + #define SO_PASSSEC 34 60 61 #endif /* _ASM_SOCKET_H */
+1
include/asm-sh/socket.h
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 52 #endif /* __ASM_SH_SOCKET_H */
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 + #define SO_PASSSEC 34 52 53 #endif /* __ASM_SH_SOCKET_H */
+1
include/asm-sparc/socket.h
··· 48 #define SCM_TIMESTAMP SO_TIMESTAMP 49 50 #define SO_PEERSEC 0x001e 51 52 /* Security levels - as per NRL IPv6 - don't actually do anything */ 53 #define SO_SECURITY_AUTHENTICATION 0x5001
··· 48 #define SCM_TIMESTAMP SO_TIMESTAMP 49 50 #define SO_PEERSEC 0x001e 51 + #define SO_PASSSEC 0x001f 52 53 /* Security levels - as per NRL IPv6 - don't actually do anything */ 54 #define SO_SECURITY_AUTHENTICATION 0x5001
+1
include/asm-sparc64/socket.h
··· 48 #define SCM_TIMESTAMP SO_TIMESTAMP 49 50 #define SO_PEERSEC 0x001e 51 52 /* Security levels - as per NRL IPv6 - don't actually do anything */ 53 #define SO_SECURITY_AUTHENTICATION 0x5001
··· 48 #define SCM_TIMESTAMP SO_TIMESTAMP 49 50 #define SO_PEERSEC 0x001e 51 + #define SO_PASSSEC 0x001f 52 53 /* Security levels - as per NRL IPv6 - don't actually do anything */ 54 #define SO_SECURITY_AUTHENTICATION 0x5001
+1
include/asm-v850/socket.h
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 52 #endif /* __V850_SOCKET_H__ */
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 + #define SO_PASSSEC 34 52 53 #endif /* __V850_SOCKET_H__ */
+1
include/asm-x86_64/socket.h
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 52 #endif /* _ASM_SOCKET_H */
··· 48 #define SO_ACCEPTCONN 30 49 50 #define SO_PEERSEC 31 51 + #define SO_PASSSEC 34 52 53 #endif /* _ASM_SOCKET_H */
+1
include/asm-xtensa/socket.h
··· 59 60 #define SO_ACCEPTCONN 30 61 #define SO_PEERSEC 31 62 63 #endif /* _XTENSA_SOCKET_H */
··· 59 60 #define SO_ACCEPTCONN 30 61 #define SO_PEERSEC 31 62 + #define SO_PASSSEC 34 63 64 #endif /* _XTENSA_SOCKET_H */
+3 -1
include/linux/atmdev.h
··· 7 #define LINUX_ATMDEV_H 8 9 10 #include <linux/atmapi.h> 11 #include <linux/atm.h> 12 #include <linux/atmioc.h> ··· 359 struct proc_dir_entry *proc_entry; /* proc entry */ 360 char *proc_name; /* proc entry name */ 361 #endif 362 struct list_head dev_list; /* linkage */ 363 }; 364 ··· 461 BUG_ON(!test_bit(ATM_DF_REMOVED, &dev->flags)); 462 if (dev->ops->dev_close) 463 dev->ops->dev_close(dev); 464 - kfree(dev); 465 } 466 } 467
··· 7 #define LINUX_ATMDEV_H 8 9 10 + #include <linux/device.h> 11 #include <linux/atmapi.h> 12 #include <linux/atm.h> 13 #include <linux/atmioc.h> ··· 358 struct proc_dir_entry *proc_entry; /* proc entry */ 359 char *proc_name; /* proc entry name */ 360 #endif 361 + struct class_device class_dev; /* sysfs class device */ 362 struct list_head dev_list; /* linkage */ 363 }; 364 ··· 459 BUG_ON(!test_bit(ATM_DF_REMOVED, &dev->flags)); 460 if (dev->ops->dev_close) 461 dev->ops->dev_close(dev); 462 + class_device_put(&dev->class_dev); 463 } 464 } 465
+1
include/linux/net.h
··· 61 #define SOCK_ASYNC_WAITDATA 1 62 #define SOCK_NOSPACE 2 63 #define SOCK_PASSCRED 3 64 65 #ifndef ARCH_HAS_SOCKET_TYPES 66 /**
··· 61 #define SOCK_ASYNC_WAITDATA 1 62 #define SOCK_NOSPACE 2 63 #define SOCK_PASSCRED 3 64 + #define SOCK_PASSSEC 4 65 66 #ifndef ARCH_HAS_SOCKET_TYPES 67 /**
+13 -5
include/linux/netdevice.h
··· 315 #define NETIF_F_GSO_SHIFT 16 316 #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) 317 #define NETIF_F_UFO (SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT) 318 319 #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) 320 #define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) ··· 545 struct net_device *, 546 struct packet_type *, 547 struct net_device *); 548 - struct sk_buff *(*gso_segment)(struct sk_buff *skb, int sg); 549 void *af_packet_priv; 550 struct list_head list; 551 }; ··· 971 extern int weight_p; 972 extern int netdev_set_master(struct net_device *dev, struct net_device *master); 973 extern int skb_checksum_help(struct sk_buff *skb, int inward); 974 - extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int sg); 975 #ifdef CONFIG_BUG 976 extern void netdev_rx_csum_fault(struct net_device *dev); 977 #else ··· 991 992 extern void linkwatch_run_queue(void); 993 994 static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) 995 { 996 - int feature = skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT; 997 - return skb_shinfo(skb)->gso_size && 998 - (dev->features & feature) != feature; 999 } 1000 1001 #endif /* __KERNEL__ */
··· 315 #define NETIF_F_GSO_SHIFT 16 316 #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) 317 #define NETIF_F_UFO (SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT) 318 + #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) 319 + #define NETIF_F_TSO_ECN (SKB_GSO_TCPV4_ECN << NETIF_F_GSO_SHIFT) 320 321 #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) 322 #define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) ··· 543 struct net_device *, 544 struct packet_type *, 545 struct net_device *); 546 + struct sk_buff *(*gso_segment)(struct sk_buff *skb, 547 + int features); 548 void *af_packet_priv; 549 struct list_head list; 550 }; ··· 968 extern int weight_p; 969 extern int netdev_set_master(struct net_device *dev, struct net_device *master); 970 extern int skb_checksum_help(struct sk_buff *skb, int inward); 971 + extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features); 972 #ifdef CONFIG_BUG 973 extern void netdev_rx_csum_fault(struct net_device *dev); 974 #else ··· 988 989 extern void linkwatch_run_queue(void); 990 991 + static inline int skb_gso_ok(struct sk_buff *skb, int features) 992 + { 993 + int feature = skb_shinfo(skb)->gso_size ? 994 + skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT : 0; 995 + return (features & feature) == feature; 996 + } 997 + 998 static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) 999 { 1000 + return !skb_gso_ok(skb, dev->features); 1001 } 1002 1003 #endif /* __KERNEL__ */
+7 -6
include/linux/security.h
··· 67 struct xfrm_user_sec_ctx; 68 69 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb); 70 - extern int cap_netlink_recv(struct sk_buff *skb); 71 72 /* 73 * Values used in the task_security_ops calls ··· 656 * Check permission before processing the received netlink message in 657 * @skb. 658 * @skb contains the sk_buff structure for the netlink message. 659 * Return 0 if permission is granted. 660 * 661 * Security hooks for Unix domain networking. ··· 1267 struct sembuf * sops, unsigned nsops, int alter); 1268 1269 int (*netlink_send) (struct sock * sk, struct sk_buff * skb); 1270 - int (*netlink_recv) (struct sk_buff * skb); 1271 1272 /* allow module stacking */ 1273 int (*register_security) (const char *name, ··· 2033 return security_ops->netlink_send(sk, skb); 2034 } 2035 2036 - static inline int security_netlink_recv(struct sk_buff * skb) 2037 { 2038 - return security_ops->netlink_recv(skb); 2039 } 2040 2041 /* prototypes */ ··· 2671 return cap_netlink_send (sk, skb); 2672 } 2673 2674 - static inline int security_netlink_recv (struct sk_buff *skb) 2675 { 2676 - return cap_netlink_recv (skb); 2677 } 2678 2679 static inline struct dentry *securityfs_create_dir(const char *name,
··· 67 struct xfrm_user_sec_ctx; 68 69 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb); 70 + extern int cap_netlink_recv(struct sk_buff *skb, int cap); 71 72 /* 73 * Values used in the task_security_ops calls ··· 656 * Check permission before processing the received netlink message in 657 * @skb. 658 * @skb contains the sk_buff structure for the netlink message. 659 + * @cap indicates the capability required 660 * Return 0 if permission is granted. 661 * 662 * Security hooks for Unix domain networking. ··· 1266 struct sembuf * sops, unsigned nsops, int alter); 1267 1268 int (*netlink_send) (struct sock * sk, struct sk_buff * skb); 1269 + int (*netlink_recv) (struct sk_buff * skb, int cap); 1270 1271 /* allow module stacking */ 1272 int (*register_security) (const char *name, ··· 2032 return security_ops->netlink_send(sk, skb); 2033 } 2034 2035 + static inline int security_netlink_recv(struct sk_buff * skb, int cap) 2036 { 2037 + return security_ops->netlink_recv(skb, cap); 2038 } 2039 2040 /* prototypes */ ··· 2670 return cap_netlink_send (sk, skb); 2671 } 2672 2673 + static inline int security_netlink_recv (struct sk_buff *skb, int cap) 2674 { 2675 + return cap_netlink_recv (skb, cap); 2676 } 2677 2678 static inline struct dentry *securityfs_create_dir(const char *name,
+7 -2
include/linux/skbuff.h
··· 172 enum { 173 SKB_GSO_TCPV4 = 1 << 0, 174 SKB_GSO_UDPV4 = 1 << 1, 175 }; 176 177 /** ··· 1304 extern void skb_split(struct sk_buff *skb, 1305 struct sk_buff *skb1, const u32 len); 1306 1307 - extern void skb_release_data(struct sk_buff *skb); 1308 - extern struct sk_buff *skb_segment(struct sk_buff *skb, int sg); 1309 1310 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 1311 int len, void *buffer)
··· 172 enum { 173 SKB_GSO_TCPV4 = 1 << 0, 174 SKB_GSO_UDPV4 = 1 << 1, 175 + 176 + /* This indicates the skb is from an untrusted source. */ 177 + SKB_GSO_DODGY = 1 << 2, 178 + 179 + /* This indicates the tcp segment has CWR set. */ 180 + SKB_GSO_TCPV4_ECN = 1 << 3, 181 }; 182 183 /** ··· 1298 extern void skb_split(struct sk_buff *skb, 1299 struct sk_buff *skb1, const u32 len); 1300 1301 + extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); 1302 1303 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 1304 int len, void *buffer)
+6
include/net/af_unix.h
··· 53 struct unix_skb_parms { 54 struct ucred creds; /* Skb credentials */ 55 struct scm_fp_list *fp; /* Passed files */ 56 }; 57 58 #define UNIXCB(skb) (*(struct unix_skb_parms*)&((skb)->cb)) 59 #define UNIXCREDS(skb) (&UNIXCB((skb)).creds) 60 61 #define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock) 62 #define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock)
··· 53 struct unix_skb_parms { 54 struct ucred creds; /* Skb credentials */ 55 struct scm_fp_list *fp; /* Passed files */ 56 + #ifdef CONFIG_SECURITY_NETWORK 57 + char *secdata; /* Security context */ 58 + u32 seclen; /* Security length */ 59 + #endif 60 }; 61 62 #define UNIXCB(skb) (*(struct unix_skb_parms*)&((skb)->cb)) 63 #define UNIXCREDS(skb) (&UNIXCB((skb)).creds) 64 + #define UNIXSECDATA(skb) (&UNIXCB((skb)).secdata) 65 + #define UNIXSECLEN(skb) (&UNIXCB((skb)).seclen) 66 67 #define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock) 68 #define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock)
+12 -6
include/net/pkt_sched.h
··· 169 170 #define PSCHED_TADD2(tv, delta, tv_res) \ 171 ({ \ 172 - int __delta = (tv).tv_usec + (delta); \ 173 - (tv_res).tv_sec = (tv).tv_sec; \ 174 - if (__delta > USEC_PER_SEC) { (tv_res).tv_sec++; __delta -= USEC_PER_SEC; } \ 175 (tv_res).tv_usec = __delta; \ 176 }) 177 178 #define PSCHED_TADD(tv, delta) \ 179 ({ \ 180 - (tv).tv_usec += (delta); \ 181 - if ((tv).tv_usec > USEC_PER_SEC) { (tv).tv_sec++; \ 182 - (tv).tv_usec -= USEC_PER_SEC; } \ 183 }) 184 185 /* Set/check that time is in the "past perfect";
··· 169 170 #define PSCHED_TADD2(tv, delta, tv_res) \ 171 ({ \ 172 + int __delta = (delta); \ 173 + (tv_res) = (tv); \ 174 + while(__delta >= USEC_PER_SEC){ \ 175 + (tv_res).tv_sec++; \ 176 + __delta -= USEC_PER_SEC; \ 177 + } \ 178 (tv_res).tv_usec = __delta; \ 179 }) 180 181 #define PSCHED_TADD(tv, delta) \ 182 ({ \ 183 + int __delta = (delta); \ 184 + while(__delta >= USEC_PER_SEC){ \ 185 + (tv).tv_sec++; \ 186 + __delta -= USEC_PER_SEC; \ 187 + } \ 188 + (tv).tv_usec = __delta; \ 189 }) 190 191 /* Set/check that time is in the "past perfect";
+2 -1
include/net/protocol.h
··· 36 struct net_protocol { 37 int (*handler)(struct sk_buff *skb); 38 void (*err_handler)(struct sk_buff *skb, u32 info); 39 - struct sk_buff *(*gso_segment)(struct sk_buff *skb, int sg); 40 int no_policy; 41 }; 42
··· 36 struct net_protocol { 37 int (*handler)(struct sk_buff *skb); 38 void (*err_handler)(struct sk_buff *skb, u32 info); 39 + struct sk_buff *(*gso_segment)(struct sk_buff *skb, 40 + int features); 41 int no_policy; 42 }; 43
+17
include/net/scm.h
··· 19 { 20 struct ucred creds; /* Skb credentials */ 21 struct scm_fp_list *fp; /* Passed files */ 22 unsigned long seq; /* Connection seqno */ 23 }; 24 ··· 52 return __scm_send(sock, msg, scm); 53 } 54 55 static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg, 56 struct scm_cookie *scm, int flags) 57 { ··· 76 77 if (test_bit(SOCK_PASSCRED, &sock->flags)) 78 put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(scm->creds), &scm->creds); 79 80 if (!scm->fp) 81 return;
··· 19 { 20 struct ucred creds; /* Skb credentials */ 21 struct scm_fp_list *fp; /* Passed files */ 22 + #ifdef CONFIG_SECURITY_NETWORK 23 + char *secdata; /* Security context */ 24 + u32 seclen; /* Security length */ 25 + #endif 26 unsigned long seq; /* Connection seqno */ 27 }; 28 ··· 48 return __scm_send(sock, msg, scm); 49 } 50 51 + #ifdef CONFIG_SECURITY_NETWORK 52 + static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm) 53 + { 54 + if (test_bit(SOCK_PASSSEC, &sock->flags) && scm->secdata != NULL) 55 + put_cmsg(msg, SOL_SOCKET, SCM_SECURITY, scm->seclen, scm->secdata); 56 + } 57 + #else 58 + static inline void scm_passec(struct socket *sock, struct msghdr *msg, struct scm_cookie *scm) 59 + { } 60 + #endif /* CONFIG_SECURITY_NETWORK */ 61 + 62 static __inline__ void scm_recv(struct socket *sock, struct msghdr *msg, 63 struct scm_cookie *scm, int flags) 64 { ··· 61 62 if (test_bit(SOCK_PASSCRED, &sock->flags)) 63 put_cmsg(msg, SOL_SOCKET, SCM_CREDENTIALS, sizeof(scm->creds), &scm->creds); 64 + 65 + scm_passec(sock, msg, scm); 66 67 if (!scm->fp) 68 return;
+1 -2
include/net/sock.h
··· 383 SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ 384 SOCK_DBG, /* %SO_DEBUG setting */ 385 SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ 386 - SOCK_NO_LARGESEND, /* whether to sent large segments or not */ 387 SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ 388 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ 389 }; ··· 1032 if (sk->sk_route_caps & NETIF_F_GSO) 1033 sk->sk_route_caps |= NETIF_F_TSO; 1034 if (sk->sk_route_caps & NETIF_F_TSO) { 1035 - if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len) 1036 sk->sk_route_caps &= ~NETIF_F_TSO; 1037 else 1038 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
··· 383 SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */ 384 SOCK_DBG, /* %SO_DEBUG setting */ 385 SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */ 386 SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */ 387 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */ 388 }; ··· 1033 if (sk->sk_route_caps & NETIF_F_GSO) 1034 sk->sk_route_caps |= NETIF_F_TSO; 1035 if (sk->sk_route_caps & NETIF_F_TSO) { 1036 + if (dst->header_len) 1037 sk->sk_route_caps &= ~NETIF_F_TSO; 1038 else 1039 sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
+1 -1
include/net/tcp.h
··· 1086 1087 extern int tcp_v4_destroy_sock(struct sock *sk); 1088 1089 - extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg); 1090 1091 #ifdef CONFIG_PROC_FS 1092 extern int tcp4_proc_init(void);
··· 1086 1087 extern int tcp_v4_destroy_sock(struct sock *sk); 1088 1089 + extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features); 1090 1091 #ifdef CONFIG_PROC_FS 1092 extern int tcp4_proc_init(void);
+4 -2
include/net/tcp_ecn.h
··· 31 struct sk_buff *skb) 32 { 33 tp->ecn_flags = 0; 34 - if (sysctl_tcp_ecn && !(sk->sk_route_caps & NETIF_F_TSO)) { 35 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR; 36 tp->ecn_flags = TCP_ECN_OK; 37 - sock_set_flag(sk, SOCK_NO_LARGESEND); 38 } 39 } 40 ··· 55 if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) { 56 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 57 skb->h.th->cwr = 1; 58 } 59 } else { 60 /* ACK or retransmitted segment: clear ECT|CE */
··· 31 struct sk_buff *skb) 32 { 33 tp->ecn_flags = 0; 34 + if (sysctl_tcp_ecn) { 35 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR; 36 tp->ecn_flags = TCP_ECN_OK; 37 } 38 } 39 ··· 56 if (tp->ecn_flags&TCP_ECN_QUEUE_CWR) { 57 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; 58 skb->h.th->cwr = 1; 59 + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 60 + skb_shinfo(skb)->gso_type |= 61 + SKB_GSO_TCPV4_ECN; 62 } 63 } else { 64 /* ACK or retransmitted segment: clear ECT|CE */
+4 -4
kernel/audit.c
··· 445 * Check for appropriate CAP_AUDIT_ capabilities on incoming audit 446 * control messages. 447 */ 448 - static int audit_netlink_ok(kernel_cap_t eff_cap, u16 msg_type) 449 { 450 int err = 0; 451 ··· 459 case AUDIT_DEL: 460 case AUDIT_DEL_RULE: 461 case AUDIT_SIGNAL_INFO: 462 - if (!cap_raised(eff_cap, CAP_AUDIT_CONTROL)) 463 err = -EPERM; 464 break; 465 case AUDIT_USER: 466 case AUDIT_FIRST_USER_MSG...AUDIT_LAST_USER_MSG: 467 case AUDIT_FIRST_USER_MSG2...AUDIT_LAST_USER_MSG2: 468 - if (!cap_raised(eff_cap, CAP_AUDIT_WRITE)) 469 err = -EPERM; 470 break; 471 default: /* bad msg */ ··· 488 char *ctx; 489 u32 len; 490 491 - err = audit_netlink_ok(NETLINK_CB(skb).eff_cap, msg_type); 492 if (err) 493 return err; 494
··· 445 * Check for appropriate CAP_AUDIT_ capabilities on incoming audit 446 * control messages. 447 */ 448 + static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type) 449 { 450 int err = 0; 451 ··· 459 case AUDIT_DEL: 460 case AUDIT_DEL_RULE: 461 case AUDIT_SIGNAL_INFO: 462 + if (security_netlink_recv(skb, CAP_AUDIT_CONTROL)) 463 err = -EPERM; 464 break; 465 case AUDIT_USER: 466 case AUDIT_FIRST_USER_MSG...AUDIT_LAST_USER_MSG: 467 case AUDIT_FIRST_USER_MSG2...AUDIT_LAST_USER_MSG2: 468 + if (security_netlink_recv(skb, CAP_AUDIT_WRITE)) 469 err = -EPERM; 470 break; 471 default: /* bad msg */ ··· 488 char *ctx; 489 u32 len; 490 491 + err = audit_netlink_ok(skb, msg_type); 492 if (err) 493 return err; 494
+1 -1
net/atm/Makefile
··· 2 # Makefile for the ATM Protocol Families. 3 # 4 5 - atm-y := addr.o pvc.o signaling.o svc.o ioctl.o common.o atm_misc.o raw.o resources.o 6 mpoa-objs := mpc.o mpoa_caches.o mpoa_proc.o 7 8 obj-$(CONFIG_ATM) += atm.o
··· 2 # Makefile for the ATM Protocol Families. 3 # 4 5 + atm-y := addr.o pvc.o signaling.o svc.o ioctl.o common.o atm_misc.o raw.o resources.o atm_sysfs.o 6 mpoa-objs := mpc.o mpoa_caches.o mpoa_proc.o 7 8 obj-$(CONFIG_ATM) += atm.o
+176
net/atm/atm_sysfs.c
···
··· 1 + /* ATM driver model support. */ 2 + 3 + #include <linux/config.h> 4 + #include <linux/kernel.h> 5 + #include <linux/init.h> 6 + #include <linux/kobject.h> 7 + #include <linux/atmdev.h> 8 + #include "common.h" 9 + #include "resources.h" 10 + 11 + #define to_atm_dev(cldev) container_of(cldev, struct atm_dev, class_dev) 12 + 13 + static ssize_t show_type(struct class_device *cdev, char *buf) 14 + { 15 + struct atm_dev *adev = to_atm_dev(cdev); 16 + return sprintf(buf, "%s\n", adev->type); 17 + } 18 + 19 + static ssize_t show_address(struct class_device *cdev, char *buf) 20 + { 21 + char *pos = buf; 22 + struct atm_dev *adev = to_atm_dev(cdev); 23 + int i; 24 + 25 + for (i = 0; i < (ESI_LEN - 1); i++) 26 + pos += sprintf(pos, "%02x:", adev->esi[i]); 27 + pos += sprintf(pos, "%02x\n", adev->esi[i]); 28 + 29 + return pos - buf; 30 + } 31 + 32 + static ssize_t show_atmaddress(struct class_device *cdev, char *buf) 33 + { 34 + unsigned long flags; 35 + char *pos = buf; 36 + struct atm_dev *adev = to_atm_dev(cdev); 37 + struct atm_dev_addr *aaddr; 38 + int bin[] = { 1, 2, 10, 6, 1 }, *fmt = bin; 39 + int i, j; 40 + 41 + spin_lock_irqsave(&adev->lock, flags); 42 + list_for_each_entry(aaddr, &adev->local, entry) { 43 + for(i = 0, j = 0; i < ATM_ESA_LEN; ++i, ++j) { 44 + if (j == *fmt) { 45 + pos += sprintf(pos, "."); 46 + ++fmt; 47 + j = 0; 48 + } 49 + pos += sprintf(pos, "%02x", aaddr->addr.sas_addr.prv[i]); 50 + } 51 + pos += sprintf(pos, "\n"); 52 + } 53 + spin_unlock_irqrestore(&adev->lock, flags); 54 + 55 + return pos - buf; 56 + } 57 + 58 + static ssize_t show_carrier(struct class_device *cdev, char *buf) 59 + { 60 + char *pos = buf; 61 + struct atm_dev *adev = to_atm_dev(cdev); 62 + 63 + pos += sprintf(pos, "%d\n", 64 + adev->signal == ATM_PHY_SIG_LOST ? 0 : 1); 65 + 66 + return pos - buf; 67 + } 68 + 69 + static ssize_t show_link_rate(struct class_device *cdev, char *buf) 70 + { 71 + char *pos = buf; 72 + struct atm_dev *adev = to_atm_dev(cdev); 73 + int link_rate; 74 + 75 + /* show the link rate, not the data rate */ 76 + switch (adev->link_rate) { 77 + case ATM_OC3_PCR: 78 + link_rate = 155520000; 79 + break; 80 + case ATM_OC12_PCR: 81 + link_rate = 622080000; 82 + break; 83 + case ATM_25_PCR: 84 + link_rate = 25600000; 85 + break; 86 + default: 87 + link_rate = adev->link_rate * 8 * 53; 88 + } 89 + pos += sprintf(pos, "%d\n", link_rate); 90 + 91 + return pos - buf; 92 + } 93 + 94 + static CLASS_DEVICE_ATTR(address, S_IRUGO, show_address, NULL); 95 + static CLASS_DEVICE_ATTR(atmaddress, S_IRUGO, show_atmaddress, NULL); 96 + static CLASS_DEVICE_ATTR(carrier, S_IRUGO, show_carrier, NULL); 97 + static CLASS_DEVICE_ATTR(type, S_IRUGO, show_type, NULL); 98 + static CLASS_DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL); 99 + 100 + static struct class_device_attribute *atm_attrs[] = { 101 + &class_device_attr_atmaddress, 102 + &class_device_attr_address, 103 + &class_device_attr_carrier, 104 + &class_device_attr_type, 105 + &class_device_attr_link_rate, 106 + NULL 107 + }; 108 + 109 + static int atm_uevent(struct class_device *cdev, char **envp, int num_envp, char *buf, int size) 110 + { 111 + struct atm_dev *adev; 112 + int i = 0, len = 0; 113 + 114 + if (!cdev) 115 + return -ENODEV; 116 + 117 + adev = to_atm_dev(cdev); 118 + if (!adev) 119 + return -ENODEV; 120 + 121 + if (add_uevent_var(envp, num_envp, &i, buf, size, &len, 122 + "NAME=%s%d", adev->type, adev->number)) 123 + return -ENOMEM; 124 + 125 + envp[i] = NULL; 126 + return 0; 127 + } 128 + 129 + static void atm_release(struct class_device *cdev) 130 + { 131 + struct atm_dev *adev = to_atm_dev(cdev); 132 + 133 + kfree(adev); 134 + } 135 + 136 + static struct class atm_class = { 137 + .name = "atm", 138 + .release = atm_release, 139 + .uevent = atm_uevent, 140 + }; 141 + 142 + int atm_register_sysfs(struct atm_dev *adev) 143 + { 144 + struct class_device *cdev = &adev->class_dev; 145 + int i, err; 146 + 147 + cdev->class = &atm_class; 148 + class_set_devdata(cdev, adev); 149 + 150 + snprintf(cdev->class_id, BUS_ID_SIZE, "%s%d", adev->type, adev->number); 151 + err = class_device_register(cdev); 152 + if (err < 0) 153 + return err; 154 + 155 + for (i = 0; atm_attrs[i]; i++) 156 + class_device_create_file(cdev, atm_attrs[i]); 157 + 158 + return 0; 159 + } 160 + 161 + void atm_unregister_sysfs(struct atm_dev *adev) 162 + { 163 + struct class_device *cdev = &adev->class_dev; 164 + 165 + class_device_del(cdev); 166 + } 167 + 168 + int __init atm_sysfs_init(void) 169 + { 170 + return class_register(&atm_class); 171 + } 172 + 173 + void __exit atm_sysfs_exit(void) 174 + { 175 + class_unregister(&atm_class); 176 + }
+7
net/atm/common.c
··· 791 printk(KERN_ERR "atm_proc_init() failed with %d\n",error); 792 goto out_atmsvc_exit; 793 } 794 out: 795 return error; 796 out_atmsvc_exit: 797 atmsvc_exit(); 798 out_atmpvc_exit: ··· 811 static void __exit atm_exit(void) 812 { 813 atm_proc_exit(); 814 atmsvc_exit(); 815 atmpvc_exit(); 816 proto_unregister(&vcc_proto);
··· 791 printk(KERN_ERR "atm_proc_init() failed with %d\n",error); 792 goto out_atmsvc_exit; 793 } 794 + if ((error = atm_sysfs_init()) < 0) { 795 + printk(KERN_ERR "atm_sysfs_init() failed with %d\n",error); 796 + goto out_atmproc_exit; 797 + } 798 out: 799 return error; 800 + out_atmproc_exit: 801 + atm_proc_exit(); 802 out_atmsvc_exit: 803 atmsvc_exit(); 804 out_atmpvc_exit: ··· 805 static void __exit atm_exit(void) 806 { 807 atm_proc_exit(); 808 + atm_sysfs_exit(); 809 atmsvc_exit(); 810 atmpvc_exit(); 811 proto_unregister(&vcc_proto);
+2
net/atm/common.h
··· 28 void atmpvc_exit(void); 29 int atmsvc_init(void); 30 void atmsvc_exit(void); 31 32 #ifdef CONFIG_PROC_FS 33 int atm_proc_init(void);
··· 28 void atmpvc_exit(void); 29 int atmsvc_init(void); 30 void atmsvc_exit(void); 31 + int atm_sysfs_init(void); 32 + void atm_sysfs_exit(void); 33 34 #ifdef CONFIG_PROC_FS 35 int atm_proc_init(void);
+19 -5
net/atm/resources.c
··· 114 printk(KERN_ERR "atm_dev_register: " 115 "atm_proc_dev_register failed for dev %s\n", 116 type); 117 - mutex_unlock(&atm_dev_mutex); 118 - kfree(dev); 119 - return NULL; 120 } 121 - list_add_tail(&dev->dev_list, &atm_devs); 122 - mutex_unlock(&atm_dev_mutex); 123 124 return dev; 125 } 126 127 ··· 153 mutex_unlock(&atm_dev_mutex); 154 155 atm_dev_release_vccs(dev); 156 atm_proc_dev_deregister(dev); 157 158 atm_dev_put(dev);
··· 114 printk(KERN_ERR "atm_dev_register: " 115 "atm_proc_dev_register failed for dev %s\n", 116 type); 117 + goto out_fail; 118 } 119 120 + if (atm_register_sysfs(dev) < 0) { 121 + printk(KERN_ERR "atm_dev_register: " 122 + "atm_register_sysfs failed for dev %s\n", 123 + type); 124 + atm_proc_dev_deregister(dev); 125 + goto out_fail; 126 + } 127 + 128 + list_add_tail(&dev->dev_list, &atm_devs); 129 + 130 + out: 131 + mutex_unlock(&atm_dev_mutex); 132 return dev; 133 + 134 + out_fail: 135 + kfree(dev); 136 + dev = NULL; 137 + goto out; 138 } 139 140 ··· 140 mutex_unlock(&atm_dev_mutex); 141 142 atm_dev_release_vccs(dev); 143 + atm_unregister_sysfs(dev); 144 atm_proc_dev_deregister(dev); 145 146 atm_dev_put(dev);
+2
net/atm/resources.h
··· 43 44 #endif /* CONFIG_PROC_FS */ 45 46 #endif
··· 43 44 #endif /* CONFIG_PROC_FS */ 45 46 + int atm_register_sysfs(struct atm_dev *adev); 47 + void atm_unregister_sysfs(struct atm_dev *adev); 48 #endif
+2 -2
net/bridge/br_device.c
··· 184 dev->set_mac_address = br_set_mac_address; 185 dev->priv_flags = IFF_EBRIDGE; 186 187 - dev->features = NETIF_F_SG | NETIF_F_FRAGLIST 188 - | NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_NO_CSUM; 189 }
··· 184 dev->set_mac_address = br_set_mac_address; 185 dev->priv_flags = IFF_EBRIDGE; 186 187 + dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 188 + NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_GSO_ROBUST; 189 }
+2 -1
net/bridge/br_if.c
··· 392 features &= feature; 393 } 394 395 - br->dev->features = features | checksum | NETIF_F_LLTX; 396 } 397 398 /* called with RTNL */
··· 392 features &= feature; 393 } 394 395 + br->dev->features = features | checksum | NETIF_F_LLTX | 396 + NETIF_F_GSO_ROBUST; 397 } 398 399 /* called with RTNL */
+26 -13
net/core/dev.c
··· 1190 /** 1191 * skb_gso_segment - Perform segmentation on skb. 1192 * @skb: buffer to segment 1193 - * @sg: whether scatter-gather is supported on the target. 1194 * 1195 * This function segments the given skb and returns a list of segments. 1196 */ 1197 - struct sk_buff *skb_gso_segment(struct sk_buff *skb, int sg) 1198 { 1199 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 1200 struct packet_type *ptype; ··· 1213 rcu_read_lock(); 1214 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { 1215 if (ptype->type == type && !ptype->dev && ptype->gso_segment) { 1216 - segs = ptype->gso_segment(skb, sg); 1217 break; 1218 } 1219 } 1220 rcu_read_unlock(); 1221 1222 return segs; 1223 } ··· 1239 EXPORT_SYMBOL(netdev_rx_csum_fault); 1240 #endif 1241 1242 - #ifdef CONFIG_HIGHMEM 1243 /* Actually, we should eliminate this check as soon as we know, that: 1244 * 1. IOMMU is present and allows to map all the memory. 1245 * 2. No high memory really exists on this machine. ··· 1246 1247 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 1248 { 1249 int i; 1250 1251 if (dev->features & NETIF_F_HIGHDMA) ··· 1256 if (PageHighMem(skb_shinfo(skb)->frags[i].page)) 1257 return 1; 1258 1259 return 0; 1260 } 1261 - #else 1262 - #define illegal_highdma(dev, skb) (0) 1263 - #endif 1264 1265 struct dev_gso_cb { 1266 void (*destructor)(struct sk_buff *skb); ··· 1294 { 1295 struct net_device *dev = skb->dev; 1296 struct sk_buff *segs; 1297 1298 - segs = skb_gso_segment(skb, dev->features & NETIF_F_SG && 1299 - !illegal_highdma(dev, skb)); 1300 if (unlikely(IS_ERR(segs))) 1301 return PTR_ERR(segs); 1302 ··· 1319 if (netdev_nit) 1320 dev_queue_xmit_nit(skb, dev); 1321 1322 - if (!netif_needs_gso(dev, skb)) 1323 - return dev->hard_start_xmit(skb, dev); 1324 1325 - if (unlikely(dev_gso_segment(skb))) 1326 - goto out_kfree_skb; 1327 } 1328 1329 do { 1330 struct sk_buff *nskb = skb->next; 1331 int rc;
··· 1190 /** 1191 * skb_gso_segment - Perform segmentation on skb. 1192 * @skb: buffer to segment 1193 + * @features: features for the output path (see dev->features) 1194 * 1195 * This function segments the given skb and returns a list of segments. 1196 + * 1197 + * It may return NULL if the skb requires no segmentation. This is 1198 + * only possible when GSO is used for verifying header integrity. 1199 */ 1200 + struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) 1201 { 1202 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 1203 struct packet_type *ptype; ··· 1210 rcu_read_lock(); 1211 list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { 1212 if (ptype->type == type && !ptype->dev && ptype->gso_segment) { 1213 + segs = ptype->gso_segment(skb, features); 1214 break; 1215 } 1216 } 1217 rcu_read_unlock(); 1218 + 1219 + __skb_push(skb, skb->data - skb->mac.raw); 1220 1221 return segs; 1222 } ··· 1234 EXPORT_SYMBOL(netdev_rx_csum_fault); 1235 #endif 1236 1237 /* Actually, we should eliminate this check as soon as we know, that: 1238 * 1. IOMMU is present and allows to map all the memory. 1239 * 2. No high memory really exists on this machine. ··· 1242 1243 static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 1244 { 1245 + #ifdef CONFIG_HIGHMEM 1246 int i; 1247 1248 if (dev->features & NETIF_F_HIGHDMA) ··· 1251 if (PageHighMem(skb_shinfo(skb)->frags[i].page)) 1252 return 1; 1253 1254 + #endif 1255 return 0; 1256 } 1257 1258 struct dev_gso_cb { 1259 void (*destructor)(struct sk_buff *skb); ··· 1291 { 1292 struct net_device *dev = skb->dev; 1293 struct sk_buff *segs; 1294 + int features = dev->features & ~(illegal_highdma(dev, skb) ? 1295 + NETIF_F_SG : 0); 1296 1297 + segs = skb_gso_segment(skb, features); 1298 + 1299 + /* Verifying header integrity only. */ 1300 + if (!segs) 1301 + return 0; 1302 + 1303 if (unlikely(IS_ERR(segs))) 1304 return PTR_ERR(segs); 1305 ··· 1310 if (netdev_nit) 1311 dev_queue_xmit_nit(skb, dev); 1312 1313 + if (netif_needs_gso(dev, skb)) { 1314 + if (unlikely(dev_gso_segment(skb))) 1315 + goto out_kfree_skb; 1316 + if (skb->next) 1317 + goto gso; 1318 + } 1319 1320 + return dev->hard_start_xmit(skb, dev); 1321 } 1322 1323 + gso: 1324 do { 1325 struct sk_buff *nskb = skb->next; 1326 int rc;
+1 -1
net/core/rtnetlink.c
··· 663 sz_idx = type>>2; 664 kind = type&3; 665 666 - if (kind != 2 && security_netlink_recv(skb)) { 667 *errp = -EPERM; 668 return -1; 669 }
··· 663 sz_idx = type>>2; 664 kind = type&3; 665 666 + if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN)) { 667 *errp = -EPERM; 668 return -1; 669 }
+4 -3
net/core/skbuff.c
··· 272 skb_get(list); 273 } 274 275 - void skb_release_data(struct sk_buff *skb) 276 { 277 if (!skb->cloned || 278 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, ··· 1848 /** 1849 * skb_segment - Perform protocol segmentation on skb. 1850 * @skb: buffer to segment 1851 - * @sg: whether scatter-gather can be used for generated segments 1852 * 1853 * This function performs segmentation on the given skb. It returns 1854 * the segment at the given position. It returns NULL if there are 1855 * no more segments to generate, or when an error is encountered. 1856 */ 1857 - struct sk_buff *skb_segment(struct sk_buff *skb, int sg) 1858 { 1859 struct sk_buff *segs = NULL; 1860 struct sk_buff *tail = NULL; ··· 1863 unsigned int offset = doffset; 1864 unsigned int headroom; 1865 unsigned int len; 1866 int nfrags = skb_shinfo(skb)->nr_frags; 1867 int err = -ENOMEM; 1868 int i = 0;
··· 272 skb_get(list); 273 } 274 275 + static void skb_release_data(struct sk_buff *skb) 276 { 277 if (!skb->cloned || 278 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, ··· 1848 /** 1849 * skb_segment - Perform protocol segmentation on skb. 1850 * @skb: buffer to segment 1851 + * @features: features for the output path (see dev->features) 1852 * 1853 * This function performs segmentation on the given skb. It returns 1854 * the segment at the given position. It returns NULL if there are 1855 * no more segments to generate, or when an error is encountered. 1856 */ 1857 + struct sk_buff *skb_segment(struct sk_buff *skb, int features) 1858 { 1859 struct sk_buff *segs = NULL; 1860 struct sk_buff *tail = NULL; ··· 1863 unsigned int offset = doffset; 1864 unsigned int headroom; 1865 unsigned int len; 1866 + int sg = features & NETIF_F_SG; 1867 int nfrags = skb_shinfo(skb)->nr_frags; 1868 int err = -ENOMEM; 1869 int i = 0;
+11
net/core/sock.c
··· 565 ret = -ENONET; 566 break; 567 568 /* We implement the SO_SNDLOWAT etc to 569 not be settable (1003.1g 5.3) */ 570 default: ··· 728 */ 729 case SO_ACCEPTCONN: 730 v.val = sk->sk_state == TCP_LISTEN; 731 break; 732 733 case SO_PEERSEC:
··· 565 ret = -ENONET; 566 break; 567 568 + case SO_PASSSEC: 569 + if (valbool) 570 + set_bit(SOCK_PASSSEC, &sock->flags); 571 + else 572 + clear_bit(SOCK_PASSSEC, &sock->flags); 573 + break; 574 + 575 /* We implement the SO_SNDLOWAT etc to 576 not be settable (1003.1g 5.3) */ 577 default: ··· 721 */ 722 case SO_ACCEPTCONN: 723 v.val = sk->sk_state == TCP_LISTEN; 724 + break; 725 + 726 + case SO_PASSSEC: 727 + v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0; 728 break; 729 730 case SO_PEERSEC:
+1 -1
net/decnet/netfilter/dn_rtmsg.c
··· 107 if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) 108 return; 109 110 - if (!cap_raised(NETLINK_CB(skb).eff_cap, CAP_NET_ADMIN)) 111 RCV_SKB_FAIL(-EPERM); 112 113 /* Eventually we might send routing messages too */
··· 107 if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len) 108 return; 109 110 + if (security_netlink_recv(skb, CAP_NET_ADMIN)) 111 RCV_SKB_FAIL(-EPERM); 112 113 /* Eventually we might send routing messages too */
+3 -3
net/ipv4/af_inet.c
··· 1097 1098 EXPORT_SYMBOL(inet_sk_rebuild_header); 1099 1100 - static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int sg) 1101 { 1102 struct sk_buff *segs = ERR_PTR(-EINVAL); 1103 struct iphdr *iph; ··· 1126 rcu_read_lock(); 1127 ops = rcu_dereference(inet_protos[proto]); 1128 if (ops && ops->gso_segment) 1129 - segs = ops->gso_segment(skb, sg); 1130 rcu_read_unlock(); 1131 1132 - if (IS_ERR(segs)) 1133 goto out; 1134 1135 skb = segs;
··· 1097 1098 EXPORT_SYMBOL(inet_sk_rebuild_header); 1099 1100 + static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) 1101 { 1102 struct sk_buff *segs = ERR_PTR(-EINVAL); 1103 struct iphdr *iph; ··· 1126 rcu_read_lock(); 1127 ops = rcu_dereference(inet_protos[proto]); 1128 if (ops && ops->gso_segment) 1129 + segs = ops->gso_segment(skb, features); 1130 rcu_read_unlock(); 1131 1132 + if (!segs || unlikely(IS_ERR(segs))) 1133 goto out; 1134 1135 skb = segs;
+1 -1
net/ipv4/netfilter/Kconfig
··· 332 help 333 This option adds a new iptables `hashlimit' match. 334 335 - As opposed to `limit', this match dynamically crates a hash table 336 of limit buckets, based on your selection of source/destination 337 ip addresses and/or ports. 338
··· 332 help 333 This option adds a new iptables `hashlimit' match. 334 335 + As opposed to `limit', this match dynamically creates a hash table 336 of limit buckets, based on your selection of source/destination 337 ip addresses and/or ports. 338
+2 -1
net/ipv4/netfilter/arp_tables.c
··· 1120 return ret; 1121 } 1122 1123 - if (xt_register_table(table, &bootstrap, newinfo) != 0) { 1124 xt_free_table_info(newinfo); 1125 return ret; 1126 }
··· 1120 return ret; 1121 } 1122 1123 + ret = xt_register_table(table, &bootstrap, newinfo); 1124 + if (ret != 0) { 1125 xt_free_table_info(newinfo); 1126 return ret; 1127 }
+11 -3
net/ipv4/netfilter/ip_queue.c
··· 457 if (entry->info->indev) 458 if (entry->info->indev->ifindex == ifindex) 459 return 1; 460 - 461 if (entry->info->outdev) 462 if (entry->info->outdev->ifindex == ifindex) 463 return 1; 464 - 465 return 0; 466 } 467 ··· 515 if (type <= IPQM_BASE) 516 return; 517 518 - if (security_netlink_recv(skb)) 519 RCV_SKB_FAIL(-EPERM); 520 521 write_lock_bh(&queue_lock);
··· 457 if (entry->info->indev) 458 if (entry->info->indev->ifindex == ifindex) 459 return 1; 460 if (entry->info->outdev) 461 if (entry->info->outdev->ifindex == ifindex) 462 return 1; 463 + #ifdef CONFIG_BRIDGE_NETFILTER 464 + if (entry->skb->nf_bridge) { 465 + if (entry->skb->nf_bridge->physindev && 466 + entry->skb->nf_bridge->physindev->ifindex == ifindex) 467 + return 1; 468 + if (entry->skb->nf_bridge->physoutdev && 469 + entry->skb->nf_bridge->physoutdev->ifindex == ifindex) 470 + return 1; 471 + } 472 + #endif 473 return 0; 474 } 475 ··· 507 if (type <= IPQM_BASE) 508 return; 509 510 + if (security_netlink_recv(skb, CAP_NET_ADMIN)) 511 RCV_SKB_FAIL(-EPERM); 512 513 write_lock_bh(&queue_lock);
+2 -1
net/ipv4/netfilter/ip_tables.c
··· 2113 return ret; 2114 } 2115 2116 - if (xt_register_table(table, &bootstrap, newinfo) != 0) { 2117 xt_free_table_info(newinfo); 2118 return ret; 2119 }
··· 2113 return ret; 2114 } 2115 2116 + ret = xt_register_table(table, &bootstrap, newinfo); 2117 + if (ret != 0) { 2118 xt_free_table_info(newinfo); 2119 return ret; 2120 }
+6 -2
net/ipv4/tcp.c
··· 2145 EXPORT_SYMBOL(compat_tcp_getsockopt); 2146 #endif 2147 2148 - struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int sg) 2149 { 2150 struct sk_buff *segs = ERR_PTR(-EINVAL); 2151 struct tcphdr *th; ··· 2166 if (!pskb_may_pull(skb, thlen)) 2167 goto out; 2168 2169 oldlen = (u16)~skb->len; 2170 __skb_pull(skb, thlen); 2171 2172 - segs = skb_segment(skb, sg); 2173 if (IS_ERR(segs)) 2174 goto out; 2175
··· 2145 EXPORT_SYMBOL(compat_tcp_getsockopt); 2146 #endif 2147 2148 + struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) 2149 { 2150 struct sk_buff *segs = ERR_PTR(-EINVAL); 2151 struct tcphdr *th; ··· 2166 if (!pskb_may_pull(skb, thlen)) 2167 goto out; 2168 2169 + segs = NULL; 2170 + if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) 2171 + goto out; 2172 + 2173 oldlen = (u16)~skb->len; 2174 __skb_pull(skb, thlen); 2175 2176 + segs = skb_segment(skb, features); 2177 if (IS_ERR(segs)) 2178 goto out; 2179
+4 -1
net/ipv4/tcp_diag.c
··· 26 const struct tcp_sock *tp = tcp_sk(sk); 27 struct tcp_info *info = _info; 28 29 - r->idiag_rqueue = tp->rcv_nxt - tp->copied_seq; 30 r->idiag_wqueue = tp->write_seq - tp->snd_una; 31 if (info != NULL) 32 tcp_get_info(sk, info);
··· 26 const struct tcp_sock *tp = tcp_sk(sk); 27 struct tcp_info *info = _info; 28 29 + if (sk->sk_state == TCP_LISTEN) 30 + r->idiag_rqueue = sk->sk_ack_backlog; 31 + else 32 + r->idiag_rqueue = tp->rcv_nxt - tp->copied_seq; 33 r->idiag_wqueue = tp->write_seq - tp->snd_una; 34 if (info != NULL) 35 tcp_get_info(sk, info);
-4
net/ipv4/tcp_input.c
··· 4178 */ 4179 4180 TCP_ECN_rcv_synack(tp, th); 4181 - if (tp->ecn_flags&TCP_ECN_OK) 4182 - sock_set_flag(sk, SOCK_NO_LARGESEND); 4183 4184 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 4185 tcp_ack(sk, skb, FLAG_SLOWPATH); ··· 4320 tp->max_window = tp->snd_wnd; 4321 4322 TCP_ECN_rcv_syn(tp, th); 4323 - if (tp->ecn_flags&TCP_ECN_OK) 4324 - sock_set_flag(sk, SOCK_NO_LARGESEND); 4325 4326 tcp_mtup_init(sk); 4327 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
··· 4178 */ 4179 4180 TCP_ECN_rcv_synack(tp, th); 4181 4182 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; 4183 tcp_ack(sk, skb, FLAG_SLOWPATH); ··· 4322 tp->max_window = tp->snd_wnd; 4323 4324 TCP_ECN_rcv_syn(tp, th); 4325 4326 tcp_mtup_init(sk); 4327 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
+2 -1
net/ipv4/tcp_ipv4.c
··· 1726 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " 1727 "%08X %5d %8d %lu %d %p %u %u %u %u %d", 1728 i, src, srcp, dest, destp, sp->sk_state, 1729 - tp->write_seq - tp->snd_una, tp->rcv_nxt - tp->copied_seq, 1730 timer_active, 1731 jiffies_to_clock_t(timer_expires - jiffies), 1732 icsk->icsk_retransmits,
··· 1726 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " 1727 "%08X %5d %8d %lu %d %p %u %u %u %u %d", 1728 i, src, srcp, dest, destp, sp->sk_state, 1729 + tp->write_seq - tp->snd_una, 1730 + (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq), 1731 timer_active, 1732 jiffies_to_clock_t(timer_expires - jiffies), 1733 icsk->icsk_retransmits,
-2
net/ipv4/tcp_minisocks.c
··· 440 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 441 newtp->rx_opt.mss_clamp = req->mss; 442 TCP_ECN_openreq_child(newtp, req); 443 - if (newtp->ecn_flags&TCP_ECN_OK) 444 - sock_set_flag(newsk, SOCK_NO_LARGESEND); 445 446 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS); 447 }
··· 440 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; 441 newtp->rx_opt.mss_clamp = req->mss; 442 TCP_ECN_openreq_child(newtp, req); 443 444 TCP_INC_STATS_BH(TCP_MIB_PASSIVEOPENS); 445 }
-2
net/ipv4/tcp_output.c
··· 2044 memset(th, 0, sizeof(struct tcphdr)); 2045 th->syn = 1; 2046 th->ack = 1; 2047 - if (dst->dev->features&NETIF_F_TSO) 2048 - ireq->ecn_ok = 0; 2049 TCP_ECN_make_synack(req, th); 2050 th->source = inet_sk(sk)->sport; 2051 th->dest = ireq->rmt_port;
··· 2044 memset(th, 0, sizeof(struct tcphdr)); 2045 th->syn = 1; 2046 th->ack = 1; 2047 TCP_ECN_make_synack(req, th); 2048 th->source = inet_sk(sk)->sport; 2049 th->dest = ireq->rmt_port;
+1 -1
net/ipv6/netfilter/ip6_queue.c
··· 505 if (type <= IPQM_BASE) 506 return; 507 508 - if (security_netlink_recv(skb)) 509 RCV_SKB_FAIL(-EPERM); 510 511 write_lock_bh(&queue_lock);
··· 505 if (type <= IPQM_BASE) 506 return; 507 508 + if (security_netlink_recv(skb, CAP_NET_ADMIN)) 509 RCV_SKB_FAIL(-EPERM); 510 511 write_lock_bh(&queue_lock);
+2 -1
net/ipv6/netfilter/ip6_tables.c
··· 1281 return ret; 1282 } 1283 1284 - if (xt_register_table(table, &bootstrap, newinfo) != 0) { 1285 xt_free_table_info(newinfo); 1286 return ret; 1287 }
··· 1281 return ret; 1282 } 1283 1284 + ret = xt_register_table(table, &bootstrap, newinfo); 1285 + if (ret != 0) { 1286 xt_free_table_info(newinfo); 1287 return ret; 1288 }
+2 -1
net/ipv6/tcp_ipv6.c
··· 1469 dest->s6_addr32[0], dest->s6_addr32[1], 1470 dest->s6_addr32[2], dest->s6_addr32[3], destp, 1471 sp->sk_state, 1472 - tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq, 1473 timer_active, 1474 jiffies_to_clock_t(timer_expires - jiffies), 1475 icsk->icsk_retransmits,
··· 1469 dest->s6_addr32[0], dest->s6_addr32[1], 1470 dest->s6_addr32[2], dest->s6_addr32[3], destp, 1471 sp->sk_state, 1472 + tp->write_seq-tp->snd_una, 1473 + (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq), 1474 timer_active, 1475 jiffies_to_clock_t(timer_expires - jiffies), 1476 icsk->icsk_retransmits,
+2 -1
net/irda/irlan/irlan_client.c
··· 173 rcu_read_lock(); 174 self = irlan_get_any(); 175 if (self) { 176 - IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 177 178 IRDA_DEBUG(1, "%s(), Found instance (%08x)!\n", __FUNCTION__ , 179 daddr); 180 181 irlan_client_wakeup(self, saddr, daddr); 182 } 183 rcu_read_unlock(); 184 } 185
··· 173 rcu_read_lock(); 174 self = irlan_get_any(); 175 if (self) { 176 + IRDA_ASSERT(self->magic == IRLAN_MAGIC, goto out;); 177 178 IRDA_DEBUG(1, "%s(), Found instance (%08x)!\n", __FUNCTION__ , 179 daddr); 180 181 irlan_client_wakeup(self, saddr, daddr); 182 } 183 + IRDA_ASSERT_LABEL(out:) 184 rcu_read_unlock(); 185 } 186
+4 -1
net/netfilter/Kconfig
··· 411 tristate '"statistic" match support' 412 depends on NETFILTER_XTABLES 413 help 414 - statistic module 415 416 config NETFILTER_XT_MATCH_STRING 417 tristate '"string" match support'
··· 411 tristate '"statistic" match support' 412 depends on NETFILTER_XTABLES 413 help 414 + This option adds a `statistic' match, which allows you to match 415 + on packets periodically or randomly with a given percentage. 416 + 417 + To compile it as a module, choose M here. If unsure, say N. 418 419 config NETFILTER_XT_MATCH_STRING 420 tristate '"string" match support'
+1
net/netfilter/nf_conntrack_netlink.c
··· 29 #include <linux/errno.h> 30 #include <linux/netlink.h> 31 #include <linux/spinlock.h> 32 #include <linux/notifier.h> 33 34 #include <linux/netfilter.h>
··· 29 #include <linux/errno.h> 30 #include <linux/netlink.h> 31 #include <linux/spinlock.h> 32 + #include <linux/interrupt.h> 33 #include <linux/notifier.h> 34 35 #include <linux/netfilter.h>
+2
net/netfilter/nf_conntrack_proto_sctp.c
··· 28 #include <linux/sctp.h> 29 #include <linux/string.h> 30 #include <linux/seq_file.h> 31 32 #include <net/netfilter/nf_conntrack.h> 33 #include <net/netfilter/nf_conntrack_protocol.h>
··· 28 #include <linux/sctp.h> 29 #include <linux/string.h> 30 #include <linux/seq_file.h> 31 + #include <linux/spinlock.h> 32 + #include <linux/interrupt.h> 33 34 #include <net/netfilter/nf_conntrack.h> 35 #include <net/netfilter/nf_conntrack_protocol.h>
+1 -1
net/netfilter/nfnetlink.c
··· 229 NFNL_SUBSYS_ID(nlh->nlmsg_type), 230 NFNL_MSG_TYPE(nlh->nlmsg_type)); 231 232 - if (!cap_raised(NETLINK_CB(skb).eff_cap, CAP_NET_ADMIN)) { 233 DEBUGP("missing CAP_NET_ADMIN\n"); 234 *errp = -EPERM; 235 return -1;
··· 229 NFNL_SUBSYS_ID(nlh->nlmsg_type), 230 NFNL_MSG_TYPE(nlh->nlmsg_type)); 231 232 + if (security_netlink_recv(skb, CAP_NET_ADMIN)) { 233 DEBUGP("missing CAP_NET_ADMIN\n"); 234 *errp = -EPERM; 235 return -1;
+1 -1
net/netfilter/xt_sctp.c
··· 151 && SCCHECK(((ntohs(sh->dest) >= info->dpts[0]) 152 && (ntohs(sh->dest) <= info->dpts[1])), 153 XT_SCTP_DEST_PORTS, info->flags, info->invflags) 154 - && SCCHECK(match_packet(skb, protoff, 155 info->chunkmap, info->chunk_match_type, 156 info->flag_info, info->flag_count, 157 hotdrop),
··· 151 && SCCHECK(((ntohs(sh->dest) >= info->dpts[0]) 152 && (ntohs(sh->dest) <= info->dpts[1])), 153 XT_SCTP_DEST_PORTS, info->flags, info->invflags) 154 + && SCCHECK(match_packet(skb, protoff + sizeof (sctp_sctphdr_t), 155 info->chunkmap, info->chunk_match_type, 156 info->flag_info, info->flag_count, 157 hotdrop),
+1 -1
net/netfilter/xt_tcpudp.c
··· 260 return ret; 261 262 out_unreg_udp: 263 - xt_unregister_match(&tcp_matchstruct); 264 out_unreg_tcp6: 265 xt_unregister_match(&tcp6_matchstruct); 266 out_unreg_tcp:
··· 260 return ret; 261 262 out_unreg_udp: 263 + xt_unregister_match(&udp_matchstruct); 264 out_unreg_tcp6: 265 xt_unregister_match(&tcp6_matchstruct); 266 out_unreg_tcp:
+1 -1
net/netlink/genetlink.c
··· 320 goto errout; 321 } 322 323 - if ((ops->flags & GENL_ADMIN_PERM) && security_netlink_recv(skb)) { 324 err = -EPERM; 325 goto errout; 326 }
··· 320 goto errout; 321 } 322 323 + if ((ops->flags & GENL_ADMIN_PERM) && security_netlink_recv(skb, CAP_NET_ADMIN)) { 324 err = -EPERM; 325 goto errout; 326 }
+2 -1
net/tipc/core.c
··· 191 int res; 192 193 tipc_log_reinit(CONFIG_TIPC_LOG); 194 - info("Activated (compiled " __DATE__ " " __TIME__ ")\n"); 195 196 tipc_own_addr = 0; 197 tipc_remote_management = 1;
··· 191 int res; 192 193 tipc_log_reinit(CONFIG_TIPC_LOG); 194 + info("Activated (version " TIPC_MOD_VER 195 + " compiled " __DATE__ " " __TIME__ ")\n"); 196 197 tipc_own_addr = 0; 198 tipc_remote_management = 1;
+6 -5
net/tipc/link.c
··· 2 * net/tipc/link.c: TIPC link code 3 * 4 * Copyright (c) 1996-2006, Ericsson AB 5 - * Copyright (c) 2004-2005, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without ··· 988 struct tipc_msg *bundler_msg = buf_msg(bundler); 989 struct tipc_msg *msg = buf_msg(buf); 990 u32 size = msg_size(msg); 991 - u32 to_pos = align(msg_size(bundler_msg)); 992 - u32 rest = link_max_pkt(l_ptr) - to_pos; 993 994 if (msg_user(bundler_msg) != MSG_BUNDLER) 995 return 0; 996 if (msg_type(bundler_msg) != OPEN_MSG) 997 return 0; 998 - if (rest < align(size)) 999 return 0; 1000 1001 - skb_put(bundler, (to_pos - msg_size(bundler_msg)) + size); 1002 memcpy(bundler->data + to_pos, buf->data, size); 1003 msg_set_size(bundler_msg, to_pos + size); 1004 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
··· 2 * net/tipc/link.c: TIPC link code 3 * 4 * Copyright (c) 1996-2006, Ericsson AB 5 + * Copyright (c) 2004-2006, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without ··· 988 struct tipc_msg *bundler_msg = buf_msg(bundler); 989 struct tipc_msg *msg = buf_msg(buf); 990 u32 size = msg_size(msg); 991 + u32 bundle_size = msg_size(bundler_msg); 992 + u32 to_pos = align(bundle_size); 993 + u32 pad = to_pos - bundle_size; 994 995 if (msg_user(bundler_msg) != MSG_BUNDLER) 996 return 0; 997 if (msg_type(bundler_msg) != OPEN_MSG) 998 return 0; 999 + if (skb_tailroom(bundler) < (pad + size)) 1000 return 0; 1001 1002 + skb_put(bundler, pad + size); 1003 memcpy(bundler->data + to_pos, buf->data, size); 1004 msg_set_size(bundler_msg, to_pos + size); 1005 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
+16 -8
net/tipc/node.c
··· 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, Ericsson AB 5 - * Copyright (c) 2005, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without ··· 592 struct sk_buff *buf; 593 struct node *n_ptr; 594 struct tipc_node_info node_info; 595 596 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 597 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); ··· 609 /* For now, get space for all other nodes 610 (will need to modify this when slave nodes are supported */ 611 612 - buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(node_info)) * 613 - (tipc_max_nodes - 1)); 614 if (!buf) 615 return NULL; 616 ··· 637 struct sk_buff *buf; 638 struct node *n_ptr; 639 struct tipc_link_info link_info; 640 641 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 642 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); ··· 650 651 if (!tipc_nodes) 652 return tipc_cfg_reply_none(); 653 654 - /* For now, get space for 2 links to all other nodes + bcast link 655 - (will need to modify this when slave nodes are supported */ 656 - 657 - buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(link_info)) * 658 - (2 * (tipc_max_nodes - 1) + 1)); 659 if (!buf) 660 return NULL; 661
··· 2 * net/tipc/node.c: TIPC node management routines 3 * 4 * Copyright (c) 2000-2006, Ericsson AB 5 + * Copyright (c) 2005-2006, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without ··· 592 struct sk_buff *buf; 593 struct node *n_ptr; 594 struct tipc_node_info node_info; 595 + u32 payload_size; 596 597 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 598 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); ··· 608 /* For now, get space for all other nodes 609 (will need to modify this when slave nodes are supported */ 610 611 + payload_size = TLV_SPACE(sizeof(node_info)) * (tipc_max_nodes - 1); 612 + if (payload_size > 32768u) 613 + return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 614 + " (too many nodes)"); 615 + buf = tipc_cfg_reply_alloc(payload_size); 616 if (!buf) 617 return NULL; 618 ··· 633 struct sk_buff *buf; 634 struct node *n_ptr; 635 struct tipc_link_info link_info; 636 + u32 payload_size; 637 638 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 639 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); ··· 645 646 if (!tipc_nodes) 647 return tipc_cfg_reply_none(); 648 + 649 + /* Get space for all unicast links + multicast link */ 650 651 + payload_size = TLV_SPACE(sizeof(link_info)) * 652 + (tipc_net.zones[tipc_zone(tipc_own_addr)]->links + 1); 653 + if (payload_size > 32768u) 654 + return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 655 + " (too many links)"); 656 + buf = tipc_cfg_reply_alloc(payload_size); 657 if (!buf) 658 return NULL; 659
+2 -2
net/tipc/zone.h
··· 2 * net/tipc/zone.h: Include file for TIPC zone management routines 3 * 4 * Copyright (c) 2000-2006, Ericsson AB 5 - * Copyright (c) 2005, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without ··· 45 * struct _zone - TIPC zone structure 46 * @addr: network address of zone 47 * @clusters: array of pointers to all clusters within zone 48 - * @links: (used for inter-zone communication) 49 */ 50 51 struct _zone {
··· 2 * net/tipc/zone.h: Include file for TIPC zone management routines 3 * 4 * Copyright (c) 2000-2006, Ericsson AB 5 + * Copyright (c) 2005-2006, Wind River Systems 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without ··· 45 * struct _zone - TIPC zone structure 46 * @addr: network address of zone 47 * @clusters: array of pointers to all clusters within zone 48 + * @links: number of (unicast) links to zone 49 */ 50 51 struct _zone {
+27
net/unix/af_unix.c
··· 128 129 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE) 130 131 /* 132 * SMP locking strategy: 133 * hash table is protected with spinlock unix_table_lock ··· 1315 if (siocb->scm->fp) 1316 unix_attach_fds(siocb->scm, skb); 1317 1318 skb->h.raw = skb->data; 1319 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); 1320 if (err) ··· 1596 memset(&tmp_scm, 0, sizeof(tmp_scm)); 1597 } 1598 siocb->scm->creds = *UNIXCREDS(skb); 1599 1600 if (!(flags & MSG_PEEK)) 1601 {
··· 128 129 #define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE) 130 131 + #ifdef CONFIG_SECURITY_NETWORK 132 + static void unix_get_peersec_dgram(struct sk_buff *skb) 133 + { 134 + int err; 135 + 136 + err = security_socket_getpeersec_dgram(skb, UNIXSECDATA(skb), 137 + UNIXSECLEN(skb)); 138 + if (err) 139 + *(UNIXSECDATA(skb)) = NULL; 140 + } 141 + 142 + static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) 143 + { 144 + scm->secdata = *UNIXSECDATA(skb); 145 + scm->seclen = *UNIXSECLEN(skb); 146 + } 147 + #else 148 + static void unix_get_peersec_dgram(struct sk_buff *skb) 149 + { } 150 + 151 + static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) 152 + { } 153 + #endif /* CONFIG_SECURITY_NETWORK */ 154 + 155 /* 156 * SMP locking strategy: 157 * hash table is protected with spinlock unix_table_lock ··· 1291 if (siocb->scm->fp) 1292 unix_attach_fds(siocb->scm, skb); 1293 1294 + unix_get_peersec_dgram(skb); 1295 + 1296 skb->h.raw = skb->data; 1297 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); 1298 if (err) ··· 1570 memset(&tmp_scm, 0, sizeof(tmp_scm)); 1571 } 1572 siocb->scm->creds = *UNIXCREDS(skb); 1573 + unix_set_secdata(siocb->scm, skb); 1574 1575 if (!(flags & MSG_PEEK)) 1576 {
-2
net/xfrm/xfrm_state.c
··· 1164 return res; 1165 } 1166 1167 - EXPORT_SYMBOL(xfrm_state_mtu); 1168 - 1169 int xfrm_init_state(struct xfrm_state *x) 1170 { 1171 struct xfrm_state_afinfo *afinfo;
··· 1164 return res; 1165 } 1166 1167 int xfrm_init_state(struct xfrm_state *x) 1168 { 1169 struct xfrm_state_afinfo *afinfo;
+1 -1
net/xfrm/xfrm_user.c
··· 1435 link = &xfrm_dispatch[type]; 1436 1437 /* All operations require privileges, even GET */ 1438 - if (security_netlink_recv(skb)) { 1439 *errp = -EPERM; 1440 return -1; 1441 }
··· 1435 link = &xfrm_dispatch[type]; 1436 1437 /* All operations require privileges, even GET */ 1438 + if (security_netlink_recv(skb, CAP_NET_ADMIN)) { 1439 *errp = -EPERM; 1440 return -1; 1441 }
+2 -2
security/commoncap.c
··· 33 34 EXPORT_SYMBOL(cap_netlink_send); 35 36 - int cap_netlink_recv(struct sk_buff *skb) 37 { 38 - if (!cap_raised(NETLINK_CB(skb).eff_cap, CAP_NET_ADMIN)) 39 return -EPERM; 40 return 0; 41 }
··· 33 34 EXPORT_SYMBOL(cap_netlink_send); 35 36 + int cap_netlink_recv(struct sk_buff *skb, int cap) 37 { 38 + if (!cap_raised(NETLINK_CB(skb).eff_cap, cap)) 39 return -EPERM; 40 return 0; 41 }
+2 -2
security/dummy.c
··· 675 return 0; 676 } 677 678 - static int dummy_netlink_recv (struct sk_buff *skb) 679 { 680 - if (!cap_raised (NETLINK_CB (skb).eff_cap, CAP_NET_ADMIN)) 681 return -EPERM; 682 return 0; 683 }
··· 675 return 0; 676 } 677 678 + static int dummy_netlink_recv (struct sk_buff *skb, int cap) 679 { 680 + if (!cap_raised (NETLINK_CB (skb).eff_cap, cap)) 681 return -EPERM; 682 return 0; 683 }
+21 -16
security/selinux/hooks.c
··· 69 #include <linux/sysctl.h> 70 #include <linux/audit.h> 71 #include <linux/string.h> 72 73 #include "avc.h" 74 #include "objsec.h" ··· 3421 static int selinux_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, u32 *seclen) 3422 { 3423 int err = 0; 3424 - u32 peer_sid = selinux_socket_getpeer_dgram(skb); 3425 3426 if (peer_sid == SECSID_NULL) 3427 return -EINVAL; ··· 3438 3439 return 0; 3440 } 3441 - 3442 - 3443 3444 static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority) 3445 { ··· 3646 3647 static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb) 3648 { 3649 - struct task_security_struct *tsec; 3650 - struct av_decision avd; 3651 int err; 3652 3653 err = secondary_ops->netlink_send(sk, skb); 3654 if (err) 3655 return err; 3656 - 3657 - tsec = current->security; 3658 - 3659 - avd.allowed = 0; 3660 - avc_has_perm_noaudit(tsec->sid, tsec->sid, 3661 - SECCLASS_CAPABILITY, ~0, &avd); 3662 - cap_mask(NETLINK_CB(skb).eff_cap, avd.allowed); 3663 3664 if (policydb_loaded_version >= POLICYDB_VERSION_NLCLASS) 3665 err = selinux_nlmsg_perm(sk, skb); ··· 3658 return err; 3659 } 3660 3661 - static int selinux_netlink_recv(struct sk_buff *skb) 3662 { 3663 - if (!cap_raised(NETLINK_CB(skb).eff_cap, CAP_NET_ADMIN)) 3664 - return -EPERM; 3665 - return 0; 3666 } 3667 3668 static int ipc_alloc_security(struct task_struct *task,
··· 69 #include <linux/sysctl.h> 70 #include <linux/audit.h> 71 #include <linux/string.h> 72 + #include <linux/selinux.h> 73 74 #include "avc.h" 75 #include "objsec.h" ··· 3420 static int selinux_socket_getpeersec_dgram(struct sk_buff *skb, char **secdata, u32 *seclen) 3421 { 3422 int err = 0; 3423 + u32 peer_sid; 3424 + 3425 + if (skb->sk->sk_family == PF_UNIX) 3426 + selinux_get_inode_sid(SOCK_INODE(skb->sk->sk_socket), 3427 + &peer_sid); 3428 + else 3429 + peer_sid = selinux_socket_getpeer_dgram(skb); 3430 3431 if (peer_sid == SECSID_NULL) 3432 return -EINVAL; ··· 3431 3432 return 0; 3433 } 3434 3435 static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority) 3436 { ··· 3641 3642 static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb) 3643 { 3644 int err; 3645 3646 err = secondary_ops->netlink_send(sk, skb); 3647 if (err) 3648 return err; 3649 3650 if (policydb_loaded_version >= POLICYDB_VERSION_NLCLASS) 3651 err = selinux_nlmsg_perm(sk, skb); ··· 3662 return err; 3663 } 3664 3665 + static int selinux_netlink_recv(struct sk_buff *skb, int capability) 3666 { 3667 + int err; 3668 + struct avc_audit_data ad; 3669 + 3670 + err = secondary_ops->netlink_recv(skb, capability); 3671 + if (err) 3672 + return err; 3673 + 3674 + AVC_AUDIT_DATA_INIT(&ad, CAP); 3675 + ad.u.cap = capability; 3676 + 3677 + return avc_has_perm(NETLINK_CB(skb).sid, NETLINK_CB(skb).sid, 3678 + SECCLASS_CAPABILITY, CAP_TO_MASK(capability), &ad); 3679 } 3680 3681 static int ipc_alloc_security(struct task_struct *task,