Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (41 commits)
inet_diag: Make sure we actually run the same bytecode we audited.
netlink: Make nlmsg_find_attr take a const nlmsghdr*.
fib: fib_result_assign() should not change fib refcounts
netfilter: ip6_tables: fix information leak to userspace
cls_cgroup: Fix crash on module unload
memory corruption in X.25 facilities parsing
net dst: fix percpu_counter list corruption and poison overwritten
rds: Remove kfreed tcp conn from list
rds: Lost locking in loop connection freeing
de2104x: fix panic on load
atl1 : fix panic on load
netxen: remove unused firmware exports
caif: Remove noisy printout when disconnecting caif socket
caif: SPI-driver bugfix - incorrect padding.
caif: Bugfix for socket priority, bindtodev and dbg channel.
smsc911x: Set Ethernet EEPROM size to supported device's size
ipv4: netfilter: ip_tables: fix information leak to userland
ipv4: netfilter: arp_tables: fix information leak to userland
cxgb4vf: remove call to stop TX queues at load time.
cxgb4: remove call to stop TX queues at load time.
...

+247 -154
+2 -2
drivers/isdn/hisax/isar.c
··· 1427 &bcs->hw.isar.reg->Flags)) 1428 bcs->hw.isar.dpath = 1; 1429 else { 1430 - printk(KERN_WARNING"isar modeisar analog funktions only with DP1\n"); 1431 - debugl1(cs, "isar modeisar analog funktions only with DP1"); 1432 return(1); 1433 } 1434 break;
··· 1427 &bcs->hw.isar.reg->Flags)) 1428 bcs->hw.isar.dpath = 1; 1429 else { 1430 + printk(KERN_WARNING"isar modeisar analog functions only with DP1\n"); 1431 + debugl1(cs, "isar modeisar analog functions only with DP1"); 1432 return(1); 1433 } 1434 break;
-1
drivers/net/atlx/atl1.c
··· 3043 atl1_pcie_patch(adapter); 3044 /* assume we have no link for now */ 3045 netif_carrier_off(netdev); 3046 - netif_stop_queue(netdev); 3047 3048 setup_timer(&adapter->phy_config_timer, atl1_phy_config, 3049 (unsigned long)adapter);
··· 3043 atl1_pcie_patch(adapter); 3044 /* assume we have no link for now */ 3045 netif_carrier_off(netdev); 3046 3047 setup_timer(&adapter->phy_config_timer, atl1_phy_config, 3048 (unsigned long)adapter);
+2 -2
drivers/net/bnx2x/bnx2x.h
··· 20 * (you will need to reboot afterwards) */ 21 /* #define BNX2X_STOP_ON_ERROR */ 22 23 - #define DRV_MODULE_VERSION "1.60.00-3" 24 - #define DRV_MODULE_RELDATE "2010/10/19" 25 #define BNX2X_BC_VER 0x040200 26 27 #define BNX2X_MULTI_QUEUE
··· 20 * (you will need to reboot afterwards) */ 21 /* #define BNX2X_STOP_ON_ERROR */ 22 23 + #define DRV_MODULE_VERSION "1.60.00-4" 24 + #define DRV_MODULE_RELDATE "2010/11/01" 25 #define BNX2X_BC_VER 0x040200 26 27 #define BNX2X_MULTI_QUEUE
+8 -1
drivers/net/bnx2x/bnx2x_hsi.h
··· 244 245 u16 xgxs_config_tx[4]; /* 0x1A0 */ 246 247 - u32 Reserved1[57]; /* 0x1A8 */ 248 u32 speed_capability_mask2; /* 0x28C */ 249 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF 250 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
··· 244 245 u16 xgxs_config_tx[4]; /* 0x1A0 */ 246 247 + u32 Reserved1[56]; /* 0x1A8 */ 248 + u32 default_cfg; /* 0x288 */ 249 + /* Enable BAM on KR */ 250 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 251 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 252 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 253 + #define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 254 + 255 u32 speed_capability_mask2; /* 0x28C */ 256 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF 257 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0
+42 -15
drivers/net/bnx2x/bnx2x_link.c
··· 610 /* reset and unreset the BigMac */ 611 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 612 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 613 - udelay(10); 614 615 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 616 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); ··· 3525 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); 3526 3527 /* Enable CL37 BAM */ 3528 - bnx2x_cl45_read(bp, phy, 3529 - MDIO_AN_DEVAD, 3530 - MDIO_AN_REG_8073_BAM, &val); 3531 - bnx2x_cl45_write(bp, phy, 3532 - MDIO_AN_DEVAD, 3533 - MDIO_AN_REG_8073_BAM, val | 1); 3534 3535 if (params->loopback_mode == LOOPBACK_EXT) { 3536 bnx2x_807x_force_10G(bp, phy); 3537 DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n"); ··· 5308 { 5309 struct bnx2x *bp = params->bp; 5310 u16 autoneg_val, an_1000_val, an_10_100_val; 5311 - bnx2x_wait_reset_complete(bp, phy); 5312 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, 5313 1 << NIG_LATCH_BC_ENABLE_MI_INT); 5314 ··· 5437 5438 /* HW reset */ 5439 bnx2x_ext_phy_hw_reset(bp, params->port); 5440 5441 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); 5442 return bnx2x_848xx_cmn_config_init(phy, params, vars); ··· 5448 struct link_vars *vars) 5449 { 5450 struct bnx2x *bp = params->bp; 5451 - u8 port = params->port, initialize = 1; 5452 u16 val; 5453 u16 temp; 5454 u32 actual_phy_selection; ··· 5457 /* This is just for MDIO_CTL_REG_84823_MEDIA register. */ 5458 5459 msleep(1); 5460 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 5461 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 5462 port); 5463 - msleep(200); /* 100 is not enough */ 5464 - 5465 /* BCM84823 requires that XGXS links up first @ 10G for normal 5466 behavior */ 5467 temp = vars->line_speed; ··· 5637 struct link_params *params) 5638 { 5639 struct bnx2x *bp = params->bp; 5640 - u8 port = params->port; 5641 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 5642 MISC_REGISTERS_GPIO_OUTPUT_LOW, 5643 port); ··· 6944 u8 reset_ext_phy) 6945 { 6946 struct bnx2x *bp = params->bp; 6947 - u8 phy_index, port = params->port; 6948 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); 6949 /* disable attentions */ 6950 vars->link_status = 0; ··· 6982 params->phy[phy_index].link_reset( 6983 &params->phy[phy_index], 6984 params); 6985 } 6986 } 6987 6988 if (params->phy[INT_PHY].link_reset) 6989 params->phy[INT_PHY].link_reset( 6990 &params->phy[INT_PHY], params); ··· 7024 s8 port; 7025 s8 port_of_path = 0; 7026 7027 /* PART1 - Reset both phys */ 7028 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7029 u32 shmem_base, shmem2_base; ··· 7047 return -EINVAL; 7048 } 7049 /* disable attentions */ 7050 - bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 7051 (NIG_MASK_XGXS0_LINK_STATUS | 7052 NIG_MASK_XGXS0_LINK10G | 7053 NIG_MASK_SERDES0_LINK_STATUS | ··· 7159 (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT))); 7160 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); 7161 7162 - bnx2x_ext_phy_hw_reset(bp, 1); 7163 msleep(5); 7164 for (port = 0; port < PORT_MAX; port++) { 7165 u32 shmem_base, shmem2_base;
··· 610 /* reset and unreset the BigMac */ 611 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 612 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); 613 + msleep(1); 614 615 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 616 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); ··· 3525 DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); 3526 3527 /* Enable CL37 BAM */ 3528 + if (REG_RD(bp, params->shmem_base + 3529 + offsetof(struct shmem_region, dev_info. 3530 + port_hw_config[params->port].default_cfg)) & 3531 + PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { 3532 3533 + bnx2x_cl45_read(bp, phy, 3534 + MDIO_AN_DEVAD, 3535 + MDIO_AN_REG_8073_BAM, &val); 3536 + bnx2x_cl45_write(bp, phy, 3537 + MDIO_AN_DEVAD, 3538 + MDIO_AN_REG_8073_BAM, val | 1); 3539 + DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n"); 3540 + } 3541 if (params->loopback_mode == LOOPBACK_EXT) { 3542 bnx2x_807x_force_10G(bp, phy); 3543 DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n"); ··· 5302 { 5303 struct bnx2x *bp = params->bp; 5304 u16 autoneg_val, an_1000_val, an_10_100_val; 5305 + 5306 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, 5307 1 << NIG_LATCH_BC_ENABLE_MI_INT); 5308 ··· 5431 5432 /* HW reset */ 5433 bnx2x_ext_phy_hw_reset(bp, params->port); 5434 + bnx2x_wait_reset_complete(bp, phy); 5435 5436 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); 5437 return bnx2x_848xx_cmn_config_init(phy, params, vars); ··· 5441 struct link_vars *vars) 5442 { 5443 struct bnx2x *bp = params->bp; 5444 + u8 port, initialize = 1; 5445 u16 val; 5446 u16 temp; 5447 u32 actual_phy_selection; ··· 5450 /* This is just for MDIO_CTL_REG_84823_MEDIA register. */ 5451 5452 msleep(1); 5453 + if (CHIP_IS_E2(bp)) 5454 + port = BP_PATH(bp); 5455 + else 5456 + port = params->port; 5457 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 5458 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 5459 port); 5460 + bnx2x_wait_reset_complete(bp, phy); 5461 + /* Wait for GPHY to come out of reset */ 5462 + msleep(50); 5463 /* BCM84823 requires that XGXS links up first @ 10G for normal 5464 behavior */ 5465 temp = vars->line_speed; ··· 5625 struct link_params *params) 5626 { 5627 struct bnx2x *bp = params->bp; 5628 + u8 port; 5629 + if (CHIP_IS_E2(bp)) 5630 + port = BP_PATH(bp); 5631 + else 5632 + port = params->port; 5633 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, 5634 MISC_REGISTERS_GPIO_OUTPUT_LOW, 5635 port); ··· 6928 u8 reset_ext_phy) 6929 { 6930 struct bnx2x *bp = params->bp; 6931 + u8 phy_index, port = params->port, clear_latch_ind = 0; 6932 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); 6933 /* disable attentions */ 6934 vars->link_status = 0; ··· 6966 params->phy[phy_index].link_reset( 6967 &params->phy[phy_index], 6968 params); 6969 + if (params->phy[phy_index].flags & 6970 + FLAGS_REARM_LATCH_SIGNAL) 6971 + clear_latch_ind = 1; 6972 } 6973 } 6974 6975 + if (clear_latch_ind) { 6976 + /* Clear latching indication */ 6977 + bnx2x_rearm_latch_signal(bp, port, 0); 6978 + bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4, 6979 + 1 << NIG_LATCH_BC_ENABLE_MI_INT); 6980 + } 6981 if (params->phy[INT_PHY].link_reset) 6982 params->phy[INT_PHY].link_reset( 6983 &params->phy[INT_PHY], params); ··· 6999 s8 port; 7000 s8 port_of_path = 0; 7001 7002 + bnx2x_ext_phy_hw_reset(bp, 0); 7003 /* PART1 - Reset both phys */ 7004 for (port = PORT_MAX - 1; port >= PORT_0; port--) { 7005 u32 shmem_base, shmem2_base; ··· 7021 return -EINVAL; 7022 } 7023 /* disable attentions */ 7024 + bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + 7025 + port_of_path*4, 7026 (NIG_MASK_XGXS0_LINK_STATUS | 7027 NIG_MASK_XGXS0_LINK10G | 7028 NIG_MASK_SERDES0_LINK_STATUS | ··· 7132 (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT))); 7133 REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); 7134 7135 + bnx2x_ext_phy_hw_reset(bp, 0); 7136 msleep(5); 7137 for (port = 0; port < PORT_MAX; port++) { 7138 u32 shmem_base, shmem2_base;
+42 -15
drivers/net/caif/caif_spi.c
··· 33 MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>"); 34 MODULE_DESCRIPTION("CAIF SPI driver"); 35 36 static int spi_loop; 37 module_param(spi_loop, bool, S_IRUGO); 38 MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode."); ··· 44 module_param(spi_frm_align, int, S_IRUGO); 45 MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment."); 46 47 - /* SPI padding options. */ 48 module_param(spi_up_head_align, int, S_IRUGO); 49 MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment."); 50 ··· 246 static const struct file_operations dbgfs_state_fops = { 247 .open = dbgfs_open, 248 .read = dbgfs_state, 249 - .owner = THIS_MODULE, 250 - .llseek = default_llseek, 251 }; 252 253 static const struct file_operations dbgfs_frame_fops = { 254 .open = dbgfs_open, 255 .read = dbgfs_frame, 256 - .owner = THIS_MODULE, 257 - .llseek = default_llseek, 258 }; 259 260 static inline void dev_debugfs_add(struct cfspi *cfspi) ··· 341 u8 *dst = buf; 342 caif_assert(buf); 343 344 do { 345 struct sk_buff *skb; 346 struct caif_payload_info *info; ··· 364 * Compute head offset i.e. number of bytes to add to 365 * get the start of the payload aligned. 366 */ 367 - if (spi_up_head_align) { 368 - spad = 1 + ((info->hdr_len + 1) & spi_up_head_align); 369 *dst = (u8)(spad - 1); 370 dst += spad; 371 } ··· 380 * Compute tail offset i.e. number of bytes to add to 381 * get the complete CAIF frame aligned. 382 */ 383 - epad = (skb->len + spad) & spi_up_tail_align; 384 dst += epad; 385 386 dev_kfree_skb(skb); ··· 424 * Compute head offset i.e. number of bytes to add to 425 * get the start of the payload aligned. 426 */ 427 - if (spi_up_head_align) 428 - spad = 1 + ((info->hdr_len + 1) & spi_up_head_align); 429 430 /* 431 * Compute tail offset i.e. number of bytes to add to 432 * get the complete CAIF frame aligned. 433 */ 434 - epad = (skb->len + spad) & spi_up_tail_align; 435 436 if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) { 437 skb_queue_tail(&cfspi->chead, skb); ··· 440 } else { 441 /* Put back packet. */ 442 skb_queue_head(&cfspi->qhead, skb); 443 } 444 } while (pkts <= CAIF_MAX_SPI_PKTS); 445 ··· 461 { 462 struct cfspi *cfspi = (struct cfspi *)ifc->priv; 463 464 if (!in_interrupt()) 465 spin_lock(&cfspi->lock); 466 if (assert) { ··· 482 spin_unlock(&cfspi->lock); 483 484 /* Wake up the xfer thread. */ 485 - wake_up_interruptible(&cfspi->wait); 486 } 487 488 static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc) ··· 541 * Compute head offset i.e. number of bytes added to 542 * get the start of the payload aligned. 543 */ 544 - if (spi_down_head_align) { 545 spad = 1 + *src; 546 src += spad; 547 } ··· 582 * Compute tail offset i.e. number of bytes added to 583 * get the complete CAIF frame aligned. 584 */ 585 - epad = (pkt_len + spad) & spi_down_tail_align; 586 src += epad; 587 } while ((src - buf) < len); 588 ··· 643 cfspi->ndev = ndev; 644 cfspi->pdev = pdev; 645 646 - /* Set flow info */ 647 cfspi->flow_off_sent = 0; 648 cfspi->qd_low_mark = LOW_WATER_MARK; 649 cfspi->qd_high_mark = HIGH_WATER_MARK; 650 651 /* Assign the SPI device. */ 652 cfspi->dev = dev;
··· 33 MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>"); 34 MODULE_DESCRIPTION("CAIF SPI driver"); 35 36 + /* Returns the number of padding bytes for alignment. */ 37 + #define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1))))) 38 + 39 static int spi_loop; 40 module_param(spi_loop, bool, S_IRUGO); 41 MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode."); ··· 41 module_param(spi_frm_align, int, S_IRUGO); 42 MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment."); 43 44 + /* 45 + * SPI padding options. 46 + * Warning: must be a base of 2 (& operation used) and can not be zero ! 47 + */ 48 module_param(spi_up_head_align, int, S_IRUGO); 49 MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment."); 50 ··· 240 static const struct file_operations dbgfs_state_fops = { 241 .open = dbgfs_open, 242 .read = dbgfs_state, 243 + .owner = THIS_MODULE 244 }; 245 246 static const struct file_operations dbgfs_frame_fops = { 247 .open = dbgfs_open, 248 .read = dbgfs_frame, 249 + .owner = THIS_MODULE 250 }; 251 252 static inline void dev_debugfs_add(struct cfspi *cfspi) ··· 337 u8 *dst = buf; 338 caif_assert(buf); 339 340 + if (cfspi->slave && !cfspi->slave_talked) 341 + cfspi->slave_talked = true; 342 + 343 do { 344 struct sk_buff *skb; 345 struct caif_payload_info *info; ··· 357 * Compute head offset i.e. number of bytes to add to 358 * get the start of the payload aligned. 359 */ 360 + if (spi_up_head_align > 1) { 361 + spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align); 362 *dst = (u8)(spad - 1); 363 dst += spad; 364 } ··· 373 * Compute tail offset i.e. number of bytes to add to 374 * get the complete CAIF frame aligned. 375 */ 376 + epad = PAD_POW2((skb->len + spad), spi_up_tail_align); 377 dst += epad; 378 379 dev_kfree_skb(skb); ··· 417 * Compute head offset i.e. number of bytes to add to 418 * get the start of the payload aligned. 419 */ 420 + if (spi_up_head_align > 1) 421 + spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align); 422 423 /* 424 * Compute tail offset i.e. number of bytes to add to 425 * get the complete CAIF frame aligned. 426 */ 427 + epad = PAD_POW2((skb->len + spad), spi_up_tail_align); 428 429 if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) { 430 skb_queue_tail(&cfspi->chead, skb); ··· 433 } else { 434 /* Put back packet. */ 435 skb_queue_head(&cfspi->qhead, skb); 436 + break; 437 } 438 } while (pkts <= CAIF_MAX_SPI_PKTS); 439 ··· 453 { 454 struct cfspi *cfspi = (struct cfspi *)ifc->priv; 455 456 + /* 457 + * The slave device is the master on the link. Interrupts before the 458 + * slave has transmitted are considered spurious. 459 + */ 460 + if (cfspi->slave && !cfspi->slave_talked) { 461 + printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n"); 462 + return; 463 + } 464 + 465 if (!in_interrupt()) 466 spin_lock(&cfspi->lock); 467 if (assert) { ··· 465 spin_unlock(&cfspi->lock); 466 467 /* Wake up the xfer thread. */ 468 + if (assert) 469 + wake_up_interruptible(&cfspi->wait); 470 } 471 472 static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc) ··· 523 * Compute head offset i.e. number of bytes added to 524 * get the start of the payload aligned. 525 */ 526 + if (spi_down_head_align > 1) { 527 spad = 1 + *src; 528 src += spad; 529 } ··· 564 * Compute tail offset i.e. number of bytes added to 565 * get the complete CAIF frame aligned. 566 */ 567 + epad = PAD_POW2((pkt_len + spad), spi_down_tail_align); 568 src += epad; 569 } while ((src - buf) < len); 570 ··· 625 cfspi->ndev = ndev; 626 cfspi->pdev = pdev; 627 628 + /* Set flow info. */ 629 cfspi->flow_off_sent = 0; 630 cfspi->qd_low_mark = LOW_WATER_MARK; 631 cfspi->qd_high_mark = HIGH_WATER_MARK; 632 + 633 + /* Set slave info. */ 634 + if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) { 635 + cfspi->slave = true; 636 + cfspi->slave_talked = false; 637 + } else { 638 + cfspi->slave = false; 639 + cfspi->slave_talked = false; 640 + } 641 642 /* Assign the SPI device. */ 643 cfspi->dev = dev;
+9 -4
drivers/net/caif/caif_spi_slave.c
··· 36 #endif 37 38 int spi_frm_align = 2; 39 - int spi_up_head_align = 1; 40 - int spi_up_tail_align; 41 - int spi_down_head_align = 3; 42 - int spi_down_tail_align = 1; 43 44 #ifdef CONFIG_DEBUG_FS 45 static inline void debugfs_store_prev(struct cfspi *cfspi)
··· 36 #endif 37 38 int spi_frm_align = 2; 39 + 40 + /* 41 + * SPI padding options. 42 + * Warning: must be a base of 2 (& operation used) and can not be zero ! 43 + */ 44 + int spi_up_head_align = 1 << 1; 45 + int spi_up_tail_align = 1 << 0; 46 + int spi_down_head_align = 1 << 2; 47 + int spi_down_tail_align = 1 << 1; 48 49 #ifdef CONFIG_DEBUG_FS 50 static inline void debugfs_store_prev(struct cfspi *cfspi)
-1
drivers/net/cxgb3/cxgb3_main.c
··· 3341 adapter->name = adapter->port[i]->name; 3342 3343 __set_bit(i, &adapter->registered_device_map); 3344 - netif_tx_stop_all_queues(adapter->port[i]); 3345 } 3346 } 3347 if (!adapter->registered_device_map) {
··· 3341 adapter->name = adapter->port[i]->name; 3342 3343 __set_bit(i, &adapter->registered_device_map); 3344 } 3345 } 3346 if (!adapter->registered_device_map) {
-1
drivers/net/cxgb4/cxgb4_main.c
··· 3736 3737 __set_bit(i, &adapter->registered_device_map); 3738 adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i; 3739 - netif_tx_stop_all_queues(adapter->port[i]); 3740 } 3741 } 3742 if (!adapter->registered_device_map) {
··· 3736 3737 __set_bit(i, &adapter->registered_device_map); 3738 adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i; 3739 } 3740 } 3741 if (!adapter->registered_device_map) {
-1
drivers/net/cxgb4vf/cxgb4vf_main.c
··· 2600 pi->xact_addr_filt = -1; 2601 pi->rx_offload = RX_CSO; 2602 netif_carrier_off(netdev); 2603 - netif_tx_stop_all_queues(netdev); 2604 netdev->irq = pdev->irq; 2605 2606 netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
··· 2600 pi->xact_addr_filt = -1; 2601 pi->rx_offload = RX_CSO; 2602 netif_carrier_off(netdev); 2603 netdev->irq = pdev->irq; 2604 2605 netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
-1
drivers/net/ibm_newemac/core.c
··· 2871 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); 2872 2873 netif_carrier_off(ndev); 2874 - netif_stop_queue(ndev); 2875 2876 err = register_netdev(ndev); 2877 if (err) {
··· 2871 SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); 2872 2873 netif_carrier_off(ndev); 2874 2875 err = register_netdev(ndev); 2876 if (err) {
-4
drivers/net/jme.c
··· 2955 * Tell stack that we are not ready to work until open() 2956 */ 2957 netif_carrier_off(netdev); 2958 - netif_stop_queue(netdev); 2959 2960 - /* 2961 - * Register netdev 2962 - */ 2963 rc = register_netdev(netdev); 2964 if (rc) { 2965 pr_err("Cannot register net device\n");
··· 2955 * Tell stack that we are not ready to work until open() 2956 */ 2957 netif_carrier_off(netdev); 2958 2959 rc = register_netdev(netdev); 2960 if (rc) { 2961 pr_err("Cannot register net device\n");
-3
drivers/net/netxen/netxen_nic_main.c
··· 41 MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver"); 42 MODULE_LICENSE("GPL"); 43 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); 44 - MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME); 45 - MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME); 46 - MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME); 47 MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME); 48 49 char netxen_nic_driver_name[] = "netxen_nic";
··· 41 MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver"); 42 MODULE_LICENSE("GPL"); 43 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); 44 MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME); 45 46 char netxen_nic_driver_name[] = "netxen_nic";
-1
drivers/net/qlcnic/qlcnic_main.c
··· 1450 netdev->irq = adapter->msix_entries[0].vector; 1451 1452 netif_carrier_off(netdev); 1453 - netif_stop_queue(netdev); 1454 1455 err = register_netdev(netdev); 1456 if (err) {
··· 1450 netdev->irq = adapter->msix_entries[0].vector; 1451 1452 netif_carrier_off(netdev); 1453 1454 err = register_netdev(netdev); 1455 if (err) {
+1 -1
drivers/net/smsc911x.h
··· 22 #define __SMSC911X_H__ 23 24 #define TX_FIFO_LOW_THRESHOLD ((u32)1600) 25 - #define SMSC911X_EEPROM_SIZE ((u32)7) 26 #define USE_DEBUG 0 27 28 /* This is the maximum number of packets to be received every
··· 22 #define __SMSC911X_H__ 23 24 #define TX_FIFO_LOW_THRESHOLD ((u32)1600) 25 + #define SMSC911X_EEPROM_SIZE ((u32)128) 26 #define USE_DEBUG 0 27 28 /* This is the maximum number of packets to be received every
-1
drivers/net/tulip/de2104x.c
··· 2021 de->media_timer.data = (unsigned long) de; 2022 2023 netif_carrier_off(dev); 2024 - netif_stop_queue(dev); 2025 2026 /* wake up device, assign resources */ 2027 rc = pci_enable_device(pdev);
··· 2021 de->media_timer.data = (unsigned long) de; 2022 2023 netif_carrier_off(dev); 2024 2025 /* wake up device, assign resources */ 2026 rc = pci_enable_device(pdev);
+11
drivers/net/usb/usbnet.c
··· 45 #include <linux/usb/usbnet.h> 46 #include <linux/slab.h> 47 #include <linux/kernel.h> 48 49 #define DRIVER_VERSION "22-Aug-2005" 50 ··· 1274 struct usb_device *xdev; 1275 int status; 1276 const char *name; 1277 1278 name = udev->dev.driver->name; 1279 info = (struct driver_info *) prod->driver_info;
··· 45 #include <linux/usb/usbnet.h> 46 #include <linux/slab.h> 47 #include <linux/kernel.h> 48 + #include <linux/pm_runtime.h> 49 50 #define DRIVER_VERSION "22-Aug-2005" 51 ··· 1273 struct usb_device *xdev; 1274 int status; 1275 const char *name; 1276 + struct usb_driver *driver = to_usb_driver(udev->dev.driver); 1277 + 1278 + /* usbnet already took usb runtime pm, so have to enable the feature 1279 + * for usb interface, otherwise usb_autopm_get_interface may return 1280 + * failure if USB_SUSPEND(RUNTIME_PM) is enabled. 1281 + */ 1282 + if (!driver->supports_autosuspend) { 1283 + driver->supports_autosuspend = 1; 1284 + pm_runtime_enable(&udev->dev); 1285 + } 1286 1287 name = udev->dev.driver->name; 1288 info = (struct driver_info *) prod->driver_info;
-1
drivers/usb/gadget/u_ether.c
··· 811 INFO(dev, "MAC %pM\n", net->dev_addr); 812 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 813 814 - netif_stop_queue(net); 815 the_dev = dev; 816 } 817
··· 811 INFO(dev, "MAC %pM\n", net->dev_addr); 812 INFO(dev, "HOST MAC %pM\n", dev->host_mac); 813 814 the_dev = dev; 815 } 816
+2 -2
include/net/caif/caif_dev.h
··· 28 * @sockaddr: Socket address to connect. 29 * @priority: Priority of the connection. 30 * @link_selector: Link selector (high bandwidth or low latency) 31 - * @link_name: Name of the CAIF Link Layer to use. 32 * @param: Connect Request parameters (CAIF_SO_REQ_PARAM). 33 * 34 * This struct is used when connecting a CAIF channel. ··· 39 struct sockaddr_caif sockaddr; 40 enum caif_channel_priority priority; 41 enum caif_link_selector link_selector; 42 - char link_name[16]; 43 struct caif_param param; 44 }; 45
··· 28 * @sockaddr: Socket address to connect. 29 * @priority: Priority of the connection. 30 * @link_selector: Link selector (high bandwidth or low latency) 31 + * @ifindex: kernel index of the interface. 32 * @param: Connect Request parameters (CAIF_SO_REQ_PARAM). 33 * 34 * This struct is used when connecting a CAIF channel. ··· 39 struct sockaddr_caif sockaddr; 40 enum caif_channel_priority priority; 41 enum caif_link_selector link_selector; 42 + int ifindex; 43 struct caif_param param; 44 }; 45
+2
include/net/caif/caif_spi.h
··· 121 wait_queue_head_t wait; 122 spinlock_t lock; 123 bool flow_stop; 124 #ifdef CONFIG_DEBUG_FS 125 enum cfspi_state dbg_state; 126 u16 pcmd;
··· 121 wait_queue_head_t wait; 122 spinlock_t lock; 123 bool flow_stop; 124 + bool slave; 125 + bool slave_talked; 126 #ifdef CONFIG_DEBUG_FS 127 enum cfspi_state dbg_state; 128 u16 pcmd;
+4 -4
include/net/caif/cfcnfg.h
··· 139 enum cfcnfg_phy_preference phy_pref); 140 141 /** 142 - * cfcnfg_get_named() - Get the Physical Identifier of CAIF Link Layer 143 * @cnfg: Configuration object 144 - * @name: Name of the Physical Layer (Caif Link Layer) 145 */ 146 - int cfcnfg_get_named(struct cfcnfg *cnfg, char *name); 147 - 148 #endif /* CFCNFG_H_ */
··· 139 enum cfcnfg_phy_preference phy_pref); 140 141 /** 142 + * cfcnfg_get_id_from_ifi() - Get the Physical Identifier of ifindex, 143 + * it matches caif physical id with the kernel interface id. 144 * @cnfg: Configuration object 145 + * @ifi: ifindex obtained from socket.c bindtodevice. 146 */ 147 + int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi); 148 #endif /* CFCNFG_H_ */
+1 -1
include/net/netlink.h
··· 384 * 385 * Returns the first attribute which matches the specified type. 386 */ 387 - static inline struct nlattr *nlmsg_find_attr(struct nlmsghdr *nlh, 388 int hdrlen, int attrtype) 389 { 390 return nla_find(nlmsg_attrdata(nlh, hdrlen),
··· 384 * 385 * Returns the first attribute which matches the specified type. 386 */ 387 + static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh, 388 int hdrlen, int attrtype) 389 { 390 return nla_find(nlmsg_attrdata(nlh, hdrlen),
+11 -4
net/caif/caif_config_util.c
··· 16 { 17 struct dev_info *dev_info; 18 enum cfcnfg_phy_preference pref; 19 - memset(l, 0, sizeof(*l)); 20 - l->priority = s->priority; 21 22 - if (s->link_name[0] != '\0') 23 - l->phyid = cfcnfg_get_named(cnfg, s->link_name); 24 else { 25 switch (s->link_selector) { 26 case CAIF_LINK_HIGH_BANDW:
··· 16 { 17 struct dev_info *dev_info; 18 enum cfcnfg_phy_preference pref; 19 + int res; 20 21 + memset(l, 0, sizeof(*l)); 22 + /* In caif protocol low value is high priority */ 23 + l->priority = CAIF_PRIO_MAX - s->priority + 1; 24 + 25 + if (s->ifindex != 0){ 26 + res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex); 27 + if (res < 0) 28 + return res; 29 + l->phyid = res; 30 + } 31 else { 32 switch (s->link_selector) { 33 case CAIF_LINK_HIGH_BANDW:
+2
net/caif/caif_dev.c
··· 307 308 case NETDEV_UNREGISTER: 309 caifd = caif_get(dev); 310 netdev_info(dev, "unregister\n"); 311 atomic_set(&caifd->state, what); 312 caif_device_destroy(dev);
··· 307 308 case NETDEV_UNREGISTER: 309 caifd = caif_get(dev); 310 + if (caifd == NULL) 311 + break; 312 netdev_info(dev, "unregister\n"); 313 atomic_set(&caifd->state, what); 314 caif_device_destroy(dev);
+15 -30
net/caif/caif_socket.c
··· 716 { 717 struct sock *sk = sock->sk; 718 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 719 - int prio, linksel; 720 - struct ifreq ifreq; 721 722 if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED) 723 return -ENOPROTOOPT; ··· 731 return -EINVAL; 732 lock_sock(&(cf_sk->sk)); 733 cf_sk->conn_req.link_selector = linksel; 734 - release_sock(&cf_sk->sk); 735 - return 0; 736 - 737 - case SO_PRIORITY: 738 - if (lvl != SOL_SOCKET) 739 - goto bad_sol; 740 - if (ol < sizeof(int)) 741 - return -EINVAL; 742 - if (copy_from_user(&prio, ov, sizeof(int))) 743 - return -EINVAL; 744 - lock_sock(&(cf_sk->sk)); 745 - cf_sk->conn_req.priority = prio; 746 - release_sock(&cf_sk->sk); 747 - return 0; 748 - 749 - case SO_BINDTODEVICE: 750 - if (lvl != SOL_SOCKET) 751 - goto bad_sol; 752 - if (ol < sizeof(struct ifreq)) 753 - return -EINVAL; 754 - if (copy_from_user(&ifreq, ov, sizeof(ifreq))) 755 - return -EFAULT; 756 - lock_sock(&(cf_sk->sk)); 757 - strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name, 758 - sizeof(cf_sk->conn_req.link_name)); 759 - cf_sk->conn_req.link_name 760 - [sizeof(cf_sk->conn_req.link_name)-1] = 0; 761 release_sock(&cf_sk->sk); 762 return 0; 763 ··· 852 sock->state = SS_CONNECTING; 853 sk->sk_state = CAIF_CONNECTING; 854 855 dbfs_atomic_inc(&cnt.num_connect_req); 856 cf_sk->layer.receive = caif_sktrecv_cb; 857 err = caif_connect_client(&cf_sk->conn_req, ··· 889 cf_sk->maxframe = mtu - (headroom + tailroom); 890 if (cf_sk->maxframe < 1) { 891 pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu); 892 goto out; 893 } 894 ··· 1127 set_rx_flow_on(cf_sk); 1128 1129 /* Set default options on configuration */ 1130 - cf_sk->conn_req.priority = CAIF_PRIO_NORMAL; 1131 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; 1132 cf_sk->conn_req.protocol = protocol; 1133 /* Increase the number of sockets created. */
··· 716 { 717 struct sock *sk = sock->sk; 718 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 719 + int linksel; 720 721 if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED) 722 return -ENOPROTOOPT; ··· 732 return -EINVAL; 733 lock_sock(&(cf_sk->sk)); 734 cf_sk->conn_req.link_selector = linksel; 735 release_sock(&cf_sk->sk); 736 return 0; 737 ··· 880 sock->state = SS_CONNECTING; 881 sk->sk_state = CAIF_CONNECTING; 882 883 + /* Check priority value comming from socket */ 884 + /* if priority value is out of range it will be ajusted */ 885 + if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX) 886 + cf_sk->conn_req.priority = CAIF_PRIO_MAX; 887 + else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN) 888 + cf_sk->conn_req.priority = CAIF_PRIO_MIN; 889 + else 890 + cf_sk->conn_req.priority = cf_sk->sk.sk_priority; 891 + 892 + /*ifindex = id of the interface.*/ 893 + cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if; 894 + 895 dbfs_atomic_inc(&cnt.num_connect_req); 896 cf_sk->layer.receive = caif_sktrecv_cb; 897 err = caif_connect_client(&cf_sk->conn_req, ··· 905 cf_sk->maxframe = mtu - (headroom + tailroom); 906 if (cf_sk->maxframe < 1) { 907 pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu); 908 + err = -ENODEV; 909 goto out; 910 } 911 ··· 1142 set_rx_flow_on(cf_sk); 1143 1144 /* Set default options on configuration */ 1145 + cf_sk->sk.sk_priority= CAIF_PRIO_NORMAL; 1146 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; 1147 cf_sk->conn_req.protocol = protocol; 1148 /* Increase the number of sockets created. */
+7 -10
net/caif/cfcnfg.c
··· 173 return NULL; 174 } 175 176 - int cfcnfg_get_named(struct cfcnfg *cnfg, char *name) 177 { 178 int i; 179 - 180 - /* Try to match with specified name */ 181 - for (i = 0; i < MAX_PHY_LAYERS; i++) { 182 - if (cnfg->phy_layers[i].frm_layer != NULL 183 - && strcmp(cnfg->phy_layers[i].phy_layer->name, 184 - name) == 0) 185 - return cnfg->phy_layers[i].frm_layer->id; 186 - } 187 - return 0; 188 } 189 190 int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
··· 173 return NULL; 174 } 175 176 + 177 + int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi) 178 { 179 int i; 180 + for (i = 0; i < MAX_PHY_LAYERS; i++) 181 + if (cnfg->phy_layers[i].frm_layer != NULL && 182 + cnfg->phy_layers[i].ifindex == ifi) 183 + return i; 184 + return -ENODEV; 185 } 186 187 int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
+1 -2
net/caif/cfctrl.c
··· 361 struct cfctrl_request_info *p, *tmp; 362 struct cfctrl *ctrl = container_obj(layr); 363 spin_lock(&ctrl->info_list_lock); 364 - pr_warn("enter\n"); 365 366 list_for_each_entry_safe(p, tmp, &ctrl->list, list) { 367 if (p->client_layer == adap_layer) { 368 - pr_warn("cancel req :%d\n", p->sequence_no); 369 list_del(&p->list); 370 kfree(p); 371 }
··· 361 struct cfctrl_request_info *p, *tmp; 362 struct cfctrl *ctrl = container_obj(layr); 363 spin_lock(&ctrl->info_list_lock); 364 365 list_for_each_entry_safe(p, tmp, &ctrl->list, list) { 366 if (p->client_layer == adap_layer) { 367 + pr_debug("cancel req :%d\n", p->sequence_no); 368 list_del(&p->list); 369 kfree(p); 370 }
+14
net/caif/cfdbgl.c
··· 12 #include <net/caif/cfsrvl.h> 13 #include <net/caif/cfpkt.h> 14 15 static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt); 16 static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt); 17 ··· 40 41 static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt) 42 { 43 return layr->dn->transmit(layr->dn, pkt); 44 }
··· 12 #include <net/caif/cfsrvl.h> 13 #include <net/caif/cfpkt.h> 14 15 + #define container_obj(layr) ((struct cfsrvl *) layr) 16 + 17 static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt); 18 static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt); 19 ··· 38 39 static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt) 40 { 41 + struct cfsrvl *service = container_obj(layr); 42 + struct caif_payload_info *info; 43 + int ret; 44 + 45 + if (!cfsrvl_ready(service, &ret)) 46 + return ret; 47 + 48 + /* Add info for MUX-layer to route the packet out */ 49 + info = cfpkt_info(pkt); 50 + info->channel_id = service->layer.id; 51 + info->dev_info = &service->dev_info; 52 + 53 return layr->dn->transmit(layr->dn, pkt); 54 }
+1 -1
net/caif/cfrfml.c
··· 193 194 static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt) 195 { 196 - caif_assert(cfpkt_getlen(pkt) >= rfml->fragment_size); 197 198 /* Add info for MUX-layer to route the packet out. */ 199 cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
··· 193 194 static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt) 195 { 196 + caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size); 197 198 /* Add info for MUX-layer to route the packet out. */ 199 cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
+1 -1
net/core/dev.c
··· 2131 } else { 2132 struct sock *sk = skb->sk; 2133 queue_index = sk_tx_queue_get(sk); 2134 - if (queue_index < 0) { 2135 2136 queue_index = 0; 2137 if (dev->real_num_tx_queues > 1)
··· 2131 } else { 2132 struct sock *sk = skb->sk; 2133 queue_index = sk_tx_queue_get(sk); 2134 + if (queue_index < 0 || queue_index >= dev->real_num_tx_queues) { 2135 2136 queue_index = 0; 2137 if (dev->real_num_tx_queues > 1)
+1 -4
net/ipv4/fib_lookup.h
··· 47 static inline void fib_result_assign(struct fib_result *res, 48 struct fib_info *fi) 49 { 50 - if (res->fi != NULL) 51 - fib_info_put(res->fi); 52 res->fi = fi; 53 - if (fi != NULL) 54 - atomic_inc(&fi->fib_clntref); 55 } 56 57 #endif /* _FIB_LOOKUP_H */
··· 47 static inline void fib_result_assign(struct fib_result *res, 48 struct fib_info *fi) 49 { 50 + /* we used to play games with refcounts, but we now use RCU */ 51 res->fi = fi; 52 } 53 54 #endif /* _FIB_LOOKUP_H */
+16 -11
net/ipv4/inet_diag.c
··· 490 { 491 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 492 493 - if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 494 struct inet_diag_entry entry; 495 - struct rtattr *bc = (struct rtattr *)(r + 1); 496 struct inet_sock *inet = inet_sk(sk); 497 498 entry.family = sk->sk_family; ··· 514 entry.dport = ntohs(inet->inet_dport); 515 entry.userlocks = sk->sk_userlocks; 516 517 - if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) 518 return 0; 519 } 520 ··· 529 { 530 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 531 532 - if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 533 struct inet_diag_entry entry; 534 - struct rtattr *bc = (struct rtattr *)(r + 1); 535 536 entry.family = tw->tw_family; 537 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) ··· 552 entry.dport = ntohs(tw->tw_dport); 553 entry.userlocks = 0; 554 555 - if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) 556 return 0; 557 } 558 ··· 622 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 623 struct inet_connection_sock *icsk = inet_csk(sk); 624 struct listen_sock *lopt; 625 - struct rtattr *bc = NULL; 626 struct inet_sock *inet = inet_sk(sk); 627 int j, s_j; 628 int reqnum, s_reqnum; ··· 642 if (!lopt || !lopt->qlen) 643 goto out; 644 645 - if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { 646 - bc = (struct rtattr *)(r + 1); 647 entry.sport = inet->inet_num; 648 entry.userlocks = sk->sk_userlocks; 649 } ··· 677 &ireq->rmt_addr; 678 entry.dport = ntohs(ireq->rmt_port); 679 680 - if (!inet_diag_bc_run(RTA_DATA(bc), 681 - RTA_PAYLOAD(bc), &entry)) 682 continue; 683 } 684
··· 490 { 491 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 492 493 + if (nlmsg_attrlen(cb->nlh, sizeof(*r))) { 494 struct inet_diag_entry entry; 495 + const struct nlattr *bc = nlmsg_find_attr(cb->nlh, 496 + sizeof(*r), 497 + INET_DIAG_REQ_BYTECODE); 498 struct inet_sock *inet = inet_sk(sk); 499 500 entry.family = sk->sk_family; ··· 512 entry.dport = ntohs(inet->inet_dport); 513 entry.userlocks = sk->sk_userlocks; 514 515 + if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry)) 516 return 0; 517 } 518 ··· 527 { 528 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 529 530 + if (nlmsg_attrlen(cb->nlh, sizeof(*r))) { 531 struct inet_diag_entry entry; 532 + const struct nlattr *bc = nlmsg_find_attr(cb->nlh, 533 + sizeof(*r), 534 + INET_DIAG_REQ_BYTECODE); 535 536 entry.family = tw->tw_family; 537 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) ··· 548 entry.dport = ntohs(tw->tw_dport); 549 entry.userlocks = 0; 550 551 + if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry)) 552 return 0; 553 } 554 ··· 618 struct inet_diag_req *r = NLMSG_DATA(cb->nlh); 619 struct inet_connection_sock *icsk = inet_csk(sk); 620 struct listen_sock *lopt; 621 + const struct nlattr *bc = NULL; 622 struct inet_sock *inet = inet_sk(sk); 623 int j, s_j; 624 int reqnum, s_reqnum; ··· 638 if (!lopt || !lopt->qlen) 639 goto out; 640 641 + if (nlmsg_attrlen(cb->nlh, sizeof(*r))) { 642 + bc = nlmsg_find_attr(cb->nlh, sizeof(*r), 643 + INET_DIAG_REQ_BYTECODE); 644 entry.sport = inet->inet_num; 645 entry.userlocks = sk->sk_userlocks; 646 } ··· 672 &ireq->rmt_addr; 673 entry.dport = ntohs(ireq->rmt_port); 674 675 + if (!inet_diag_bc_run(nla_data(bc), 676 + nla_len(bc), &entry)) 677 continue; 678 } 679
+1
net/ipv4/netfilter/arp_tables.c
··· 927 private = &tmp; 928 } 929 #endif 930 info.valid_hooks = t->valid_hooks; 931 memcpy(info.hook_entry, private->hook_entry, 932 sizeof(info.hook_entry));
··· 927 private = &tmp; 928 } 929 #endif 930 + memset(&info, 0, sizeof(info)); 931 info.valid_hooks = t->valid_hooks; 932 memcpy(info.hook_entry, private->hook_entry, 933 sizeof(info.hook_entry));
+1
net/ipv4/netfilter/ip_tables.c
··· 1124 private = &tmp; 1125 } 1126 #endif 1127 info.valid_hooks = t->valid_hooks; 1128 memcpy(info.hook_entry, private->hook_entry, 1129 sizeof(info.hook_entry));
··· 1124 private = &tmp; 1125 } 1126 #endif 1127 + memset(&info, 0, sizeof(info)); 1128 info.valid_hooks = t->valid_hooks; 1129 memcpy(info.hook_entry, private->hook_entry, 1130 sizeof(info.hook_entry));
+20 -20
net/ipv4/netfilter/nf_nat_core.c
··· 47 return rcu_dereference(nf_nat_protos[protonum]); 48 } 49 50 - static const struct nf_nat_protocol * 51 - nf_nat_proto_find_get(u_int8_t protonum) 52 - { 53 - const struct nf_nat_protocol *p; 54 - 55 - rcu_read_lock(); 56 - p = __nf_nat_proto_find(protonum); 57 - if (!try_module_get(p->me)) 58 - p = &nf_nat_unknown_protocol; 59 - rcu_read_unlock(); 60 - 61 - return p; 62 - } 63 - 64 - static void 65 - nf_nat_proto_put(const struct nf_nat_protocol *p) 66 - { 67 - module_put(p->me); 68 - } 69 - 70 /* We keep an extra hash for each conntrack, for fast searching. */ 71 static inline unsigned int 72 hash_by_src(const struct net *net, u16 zone, ··· 567 568 #include <linux/netfilter/nfnetlink.h> 569 #include <linux/netfilter/nfnetlink_conntrack.h> 570 571 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { 572 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
··· 47 return rcu_dereference(nf_nat_protos[protonum]); 48 } 49 50 /* We keep an extra hash for each conntrack, for fast searching. */ 51 static inline unsigned int 52 hash_by_src(const struct net *net, u16 zone, ··· 587 588 #include <linux/netfilter/nfnetlink.h> 589 #include <linux/netfilter/nfnetlink_conntrack.h> 590 + 591 + static const struct nf_nat_protocol * 592 + nf_nat_proto_find_get(u_int8_t protonum) 593 + { 594 + const struct nf_nat_protocol *p; 595 + 596 + rcu_read_lock(); 597 + p = __nf_nat_proto_find(protonum); 598 + if (!try_module_get(p->me)) 599 + p = &nf_nat_unknown_protocol; 600 + rcu_read_unlock(); 601 + 602 + return p; 603 + } 604 + 605 + static void 606 + nf_nat_proto_put(const struct nf_nat_protocol *p) 607 + { 608 + module_put(p->me); 609 + } 610 611 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { 612 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
+1
net/ipv6/netfilter/ip6_tables.c
··· 1137 private = &tmp; 1138 } 1139 #endif 1140 info.valid_hooks = t->valid_hooks; 1141 memcpy(info.hook_entry, private->hook_entry, 1142 sizeof(info.hook_entry));
··· 1137 private = &tmp; 1138 } 1139 #endif 1140 + memset(&info, 0, sizeof(info)); 1141 info.valid_hooks = t->valid_hooks; 1142 memcpy(info.hook_entry, private->hook_entry, 1143 sizeof(info.hook_entry));
+2
net/ipv6/route.c
··· 2741 kfree(net->ipv6.ip6_prohibit_entry); 2742 kfree(net->ipv6.ip6_blk_hole_entry); 2743 #endif 2744 } 2745 2746 static struct pernet_operations ip6_route_net_ops = { ··· 2833 xfrm6_fini(); 2834 fib6_gc_cleanup(); 2835 unregister_pernet_subsys(&ip6_route_net_ops); 2836 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); 2837 }
··· 2741 kfree(net->ipv6.ip6_prohibit_entry); 2742 kfree(net->ipv6.ip6_blk_hole_entry); 2743 #endif 2744 + dst_entries_destroy(&net->ipv6.ip6_dst_ops); 2745 } 2746 2747 static struct pernet_operations ip6_route_net_ops = { ··· 2832 xfrm6_fini(); 2833 fib6_gc_cleanup(); 2834 unregister_pernet_subsys(&ip6_route_net_ops); 2835 + dst_entries_destroy(&ip6_dst_blackhole_ops); 2836 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); 2837 }
+1 -1
net/l2tp/l2tp_debugfs.c
··· 249 struct seq_file *seq; 250 int rc = -ENOMEM; 251 252 - pd = kzalloc(GFP_KERNEL, sizeof(*pd)); 253 if (pd == NULL) 254 goto out; 255
··· 249 struct seq_file *seq; 250 int rc = -ENOMEM; 251 252 + pd = kzalloc(sizeof(*pd), GFP_KERNEL); 253 if (pd == NULL) 254 goto out; 255
+2 -1
net/netfilter/nf_conntrack_core.c
··· 1312 if (!hash) { 1313 *vmalloced = 1; 1314 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); 1315 - hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); 1316 } 1317 1318 if (hash && nulls)
··· 1312 if (!hash) { 1313 *vmalloced = 1; 1314 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); 1315 + hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, 1316 + PAGE_KERNEL); 1317 } 1318 1319 if (hash && nulls)
+6
net/netfilter/nf_conntrack_proto.c
··· 292 293 for (i = 0; i < MAX_NF_CT_PROTO; i++) 294 proto_array[i] = &nf_conntrack_l4proto_generic; 295 nf_ct_protos[l4proto->l3proto] = proto_array; 296 } else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != 297 &nf_conntrack_l4proto_generic) {
··· 292 293 for (i = 0; i < MAX_NF_CT_PROTO; i++) 294 proto_array[i] = &nf_conntrack_l4proto_generic; 295 + 296 + /* Before making proto_array visible to lockless readers, 297 + * we must make sure its content is committed to memory. 298 + */ 299 + smp_wmb(); 300 + 301 nf_ct_protos[l4proto->l3proto] = proto_array; 302 } else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != 303 &nf_conntrack_l4proto_generic) {
+4
net/rds/loop.c
··· 134 static void rds_loop_conn_free(void *arg) 135 { 136 struct rds_loop_connection *lc = arg; 137 rdsdebug("lc %p\n", lc); 138 list_del(&lc->loop_node); 139 kfree(lc); 140 } 141
··· 134 static void rds_loop_conn_free(void *arg) 135 { 136 struct rds_loop_connection *lc = arg; 137 + unsigned long flags; 138 + 139 rdsdebug("lc %p\n", lc); 140 + spin_lock_irqsave(&loop_conns_lock, flags); 141 list_del(&lc->loop_node); 142 + spin_unlock_irqrestore(&loop_conns_lock, flags); 143 kfree(lc); 144 } 145
+6
net/rds/tcp.c
··· 221 static void rds_tcp_conn_free(void *arg) 222 { 223 struct rds_tcp_connection *tc = arg; 224 rdsdebug("freeing tc %p\n", tc); 225 kmem_cache_free(rds_tcp_conn_slab, tc); 226 } 227
··· 221 static void rds_tcp_conn_free(void *arg) 222 { 223 struct rds_tcp_connection *tc = arg; 224 + unsigned long flags; 225 rdsdebug("freeing tc %p\n", tc); 226 + 227 + spin_lock_irqsave(&rds_tcp_conn_lock, flags); 228 + list_del(&tc->t_tcp_node); 229 + spin_unlock_irqrestore(&rds_tcp_conn_lock, flags); 230 + 231 kmem_cache_free(rds_tcp_conn_slab, tc); 232 } 233
-2
net/sched/cls_cgroup.c
··· 34 .populate = cgrp_populate, 35 #ifdef CONFIG_NET_CLS_CGROUP 36 .subsys_id = net_cls_subsys_id, 37 - #else 38 - #define net_cls_subsys_id net_cls_subsys.subsys_id 39 #endif 40 .module = THIS_MODULE, 41 };
··· 34 .populate = cgrp_populate, 35 #ifdef CONFIG_NET_CLS_CGROUP 36 .subsys_id = net_cls_subsys_id, 37 #endif 38 .module = THIS_MODULE, 39 };
+2 -1
net/sched/em_text.c
··· 103 104 static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m) 105 { 106 - textsearch_destroy(EM_TEXT_PRIV(m)->config); 107 } 108 109 static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m)
··· 103 104 static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m) 105 { 106 + if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config) 107 + textsearch_destroy(EM_TEXT_PRIV(m)->config); 108 } 109 110 static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m)
+4 -4
net/x25/x25_facilities.c
··· 134 case X25_FAC_CLASS_D: 135 switch (*p) { 136 case X25_FAC_CALLING_AE: 137 - if (p[1] > X25_MAX_DTE_FACIL_LEN) 138 - break; 139 dte_facs->calling_len = p[2]; 140 memcpy(dte_facs->calling_ae, &p[3], p[1] - 1); 141 *vc_fac_mask |= X25_MASK_CALLING_AE; 142 break; 143 case X25_FAC_CALLED_AE: 144 - if (p[1] > X25_MAX_DTE_FACIL_LEN) 145 - break; 146 dte_facs->called_len = p[2]; 147 memcpy(dte_facs->called_ae, &p[3], p[1] - 1); 148 *vc_fac_mask |= X25_MASK_CALLED_AE;
··· 134 case X25_FAC_CLASS_D: 135 switch (*p) { 136 case X25_FAC_CALLING_AE: 137 + if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) 138 + return 0; 139 dte_facs->calling_len = p[2]; 140 memcpy(dte_facs->calling_ae, &p[3], p[1] - 1); 141 *vc_fac_mask |= X25_MASK_CALLING_AE; 142 break; 143 case X25_FAC_CALLED_AE: 144 + if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) 145 + return 0; 146 dte_facs->called_len = p[2]; 147 memcpy(dte_facs->called_ae, &p[3], p[1] - 1); 148 *vc_fac_mask |= X25_MASK_CALLED_AE;
+2
net/x25/x25_in.c
··· 119 &x25->vc_facil_mask); 120 if (len > 0) 121 skb_pull(skb, len); 122 /* 123 * Copy any Call User Data. 124 */
··· 119 &x25->vc_facil_mask); 120 if (len > 0) 121 skb_pull(skb, len); 122 + else 123 + return -1; 124 /* 125 * Copy any Call User Data. 126 */