Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (55 commits)
sctp: fix random memory dereference with SCTP_HMAC_IDENT option.
sctp: correct bounds check in sctp_setsockopt_auth_key
wan: Missing capability checks in sbni_ioctl()
e100, fix iomap read
qeth: preallocated header account offset
qeth: l2 write unicast list to hardware
qeth: use -EOPNOTSUPP instead of -ENOTSUPP.
ibm_newemac: Don't call dev_mc_add() before device is registered
net: don't grab a mutex within a timer context in gianfar
forcedeth: fix checksum flag
net/usb/mcs7830: add set_mac_address
net/usb/mcs7830: new device IDs
[netdrvr] smc91x: fix resource removal (null ptr deref)
ibmveth: fix bad UDP checksums
[netdrvr] hso: dev_kfree_skb crash fix
[netdrvr] hso: icon 322 detection fix
atl1: disable TSO by default
atl1e: multistatement if missing braces
igb: remove 82576 quad adapter
drivers/net/skfp/ess.c: fix compile warnings
...

+413 -296
+5
arch/powerpc/include/asm/cpm2.h
··· 337 uint scc_tcrc; /* Internal */ 338 } sccp_t; 339 340 /* CPM Ethernet through SCC1. 341 */ 342 typedef struct scc_enet {
··· 337 uint scc_tcrc; /* Internal */ 338 } sccp_t; 339 340 + /* Function code bits. 341 + */ 342 + #define SCC_EB ((u_char) 0x10) /* Set big endian byte order */ 343 + #define SCC_GBL ((u_char) 0x20) /* Snooping enabled */ 344 + 345 /* CPM Ethernet through SCC1. 346 */ 347 typedef struct scc_enet {
+3 -3
drivers/net/Kconfig
··· 822 will be called smc-ultra32. 823 824 config BFIN_MAC 825 - tristate "Blackfin 527/536/537 on-chip mac support" 826 - depends on NET_ETHERNET && (BF527 || BF537 || BF536) 827 select CRC32 828 select MII 829 select PHYLIB 830 select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE 831 help 832 - This is the driver for blackfin on-chip mac device. Say Y if you want it 833 compiled into the kernel. This driver is also available as a module 834 ( = code which can be inserted in and removed from the running kernel 835 whenever you want). The module will be called bfin_mac.
··· 822 will be called smc-ultra32. 823 824 config BFIN_MAC 825 + tristate "Blackfin on-chip MAC support" 826 + depends on NET_ETHERNET && (BF526 || BF527 || BF536 || BF537) 827 select CRC32 828 select MII 829 select PHYLIB 830 select BFIN_MAC_USE_L1 if DMA_UNCACHED_NONE 831 help 832 + This is the driver for Blackfin on-chip mac device. Say Y if you want it 833 compiled into the kernel. This driver is also available as a module 834 ( = code which can be inserted in and removed from the running kernel 835 whenever you want). The module will be called bfin_mac.
+2 -1
drivers/net/atl1e/atl1e_main.c
··· 2232 2233 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); 2234 2235 - if (netif_running(netdev)) 2236 err = atl1e_request_irq(adapter); 2237 if (err) 2238 return err; 2239 2240 atl1e_reset_hw(&adapter->hw); 2241
··· 2232 2233 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); 2234 2235 + if (netif_running(netdev)) { 2236 err = atl1e_request_irq(adapter); 2237 if (err) 2238 return err; 2239 + } 2240 2241 atl1e_reset_hw(&adapter->hw); 2242
-1
drivers/net/atlx/atl1.c
··· 3022 netdev->features = NETIF_F_HW_CSUM; 3023 netdev->features |= NETIF_F_SG; 3024 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 3025 - netdev->features |= NETIF_F_TSO; 3026 netdev->features |= NETIF_F_LLTX; 3027 3028 /*
··· 3022 netdev->features = NETIF_F_HW_CSUM; 3023 netdev->features |= NETIF_F_SG; 3024 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 3025 netdev->features |= NETIF_F_LLTX; 3026 3027 /*
+2 -2
drivers/net/e100.c
··· 1838 if ((le16_to_cpu(rfd->command) & cb_el) && 1839 (RU_RUNNING == nic->ru_running)) 1840 1841 - if (readb(&nic->csr->scb.status) & rus_no_res) 1842 nic->ru_running = RU_SUSPENDED; 1843 return -ENODATA; 1844 } ··· 1861 if ((le16_to_cpu(rfd->command) & cb_el) && 1862 (RU_RUNNING == nic->ru_running)) { 1863 1864 - if (readb(&nic->csr->scb.status) & rus_no_res) 1865 nic->ru_running = RU_SUSPENDED; 1866 } 1867
··· 1838 if ((le16_to_cpu(rfd->command) & cb_el) && 1839 (RU_RUNNING == nic->ru_running)) 1840 1841 + if (ioread8(&nic->csr->scb.status) & rus_no_res) 1842 nic->ru_running = RU_SUSPENDED; 1843 return -ENODATA; 1844 } ··· 1861 if ((le16_to_cpu(rfd->command) & cb_el) && 1862 (RU_RUNNING == nic->ru_running)) { 1863 1864 + if (ioread8(&nic->csr->scb.status) & rus_no_res) 1865 nic->ru_running = RU_SUSPENDED; 1866 } 1867
+2 -2
drivers/net/forcedeth.c
··· 5522 if (id->driver_data & DEV_HAS_CHECKSUM) { 5523 np->rx_csum = 1; 5524 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 5525 - dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 5526 dev->features |= NETIF_F_TSO; 5527 } 5528 ··· 5835 5836 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", 5837 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", 5838 - dev->features & (NETIF_F_HW_CSUM | NETIF_F_SG) ? 5839 "csum " : "", 5840 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? 5841 "vlan " : "",
··· 5522 if (id->driver_data & DEV_HAS_CHECKSUM) { 5523 np->rx_csum = 1; 5524 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 5525 + dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 5526 dev->features |= NETIF_F_TSO; 5527 } 5528 ··· 5835 5836 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", 5837 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", 5838 + dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? 5839 "csum " : "", 5840 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? 5841 "vlan " : "",
+8
drivers/net/fs_enet/fs_enet-main.c
··· 792 int r; 793 int err; 794 795 if (fep->fpi->use_napi) 796 napi_enable(&fep->napi); 797 ··· 1169 #ifdef CONFIG_FS_ENET_HAS_SCC 1170 { 1171 .compatible = "fsl,cpm1-scc-enet", 1172 .data = (void *)&fs_scc_ops, 1173 }, 1174 #endif
··· 792 int r; 793 int err; 794 795 + /* to initialize the fep->cur_rx,... */ 796 + /* not doing this, will cause a crash in fs_enet_rx_napi */ 797 + fs_init_bds(fep->ndev); 798 + 799 if (fep->fpi->use_napi) 800 napi_enable(&fep->napi); 801 ··· 1165 #ifdef CONFIG_FS_ENET_HAS_SCC 1166 { 1167 .compatible = "fsl,cpm1-scc-enet", 1168 + .data = (void *)&fs_scc_ops, 1169 + }, 1170 + { 1171 + .compatible = "fsl,cpm2-scc-enet", 1172 .data = (void *)&fs_scc_ops, 1173 }, 1174 #endif
+7 -1
drivers/net/fs_enet/mac-scc.c
··· 47 #include "fs_enet.h" 48 49 /*************************************************/ 50 - 51 #if defined(CONFIG_CPM1) 52 /* for a 8xx __raw_xxx's are sufficient */ 53 #define __fs_out32(addr, x) __raw_writel(x, addr) ··· 61 #define __fs_out16(addr, x) out_be16(addr, x) 62 #define __fs_in32(addr) in_be32(addr) 63 #define __fs_in16(addr) in_be16(addr) 64 #endif 65 66 /* write, read, set bits, clear bits */ ··· 263 264 /* Initialize function code registers for big-endian. 265 */ 266 W8(ep, sen_genscc.scc_rfcr, SCC_EB); 267 W8(ep, sen_genscc.scc_tfcr, SCC_EB); 268 269 /* Set maximum bytes per receive buffer. 270 * This appears to be an Ethernet frame size, not the buffer
··· 47 #include "fs_enet.h" 48 49 /*************************************************/ 50 #if defined(CONFIG_CPM1) 51 /* for a 8xx __raw_xxx's are sufficient */ 52 #define __fs_out32(addr, x) __raw_writel(x, addr) ··· 62 #define __fs_out16(addr, x) out_be16(addr, x) 63 #define __fs_in32(addr) in_be32(addr) 64 #define __fs_in16(addr) in_be16(addr) 65 + #define __fs_out8(addr, x) out_8(addr, x) 66 + #define __fs_in8(addr) in_8(addr) 67 #endif 68 69 /* write, read, set bits, clear bits */ ··· 262 263 /* Initialize function code registers for big-endian. 264 */ 265 + #ifndef CONFIG_NOT_COHERENT_CACHE 266 + W8(ep, sen_genscc.scc_rfcr, SCC_EB | SCC_GBL); 267 + W8(ep, sen_genscc.scc_tfcr, SCC_EB | SCC_GBL); 268 + #else 269 W8(ep, sen_genscc.scc_rfcr, SCC_EB); 270 W8(ep, sen_genscc.scc_tfcr, SCC_EB); 271 + #endif 272 273 /* Set maximum bytes per receive buffer. 274 * This appears to be an Ethernet frame size, not the buffer
+18 -4
drivers/net/gianfar.c
··· 105 106 static int gfar_enet_open(struct net_device *dev); 107 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 108 static void gfar_timeout(struct net_device *dev); 109 static int gfar_close(struct net_device *dev); 110 struct sk_buff *gfar_new_skb(struct net_device *dev); ··· 210 spin_lock_init(&priv->txlock); 211 spin_lock_init(&priv->rxlock); 212 spin_lock_init(&priv->bflock); 213 214 platform_set_drvdata(pdev, dev); 215 ··· 1214 1215 napi_disable(&priv->napi); 1216 1217 stop_gfar(dev); 1218 1219 /* Disconnect from the PHY */ ··· 1329 return 0; 1330 } 1331 1332 - /* gfar_timeout gets called when a packet has not been 1333 * transmitted after a set amount of time. 1334 * For now, assume that clearing out all the structures, and 1335 - * starting over will fix the problem. */ 1336 - static void gfar_timeout(struct net_device *dev) 1337 { 1338 - dev->stats.tx_errors++; 1339 1340 if (dev->flags & IFF_UP) { 1341 stop_gfar(dev); ··· 1346 } 1347 1348 netif_tx_schedule_all(dev); 1349 } 1350 1351 /* Interrupt Handler for Transmit complete */
··· 105 106 static int gfar_enet_open(struct net_device *dev); 107 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev); 108 + static void gfar_reset_task(struct work_struct *work); 109 static void gfar_timeout(struct net_device *dev); 110 static int gfar_close(struct net_device *dev); 111 struct sk_buff *gfar_new_skb(struct net_device *dev); ··· 209 spin_lock_init(&priv->txlock); 210 spin_lock_init(&priv->rxlock); 211 spin_lock_init(&priv->bflock); 212 + INIT_WORK(&priv->reset_task, gfar_reset_task); 213 214 platform_set_drvdata(pdev, dev); 215 ··· 1212 1213 napi_disable(&priv->napi); 1214 1215 + cancel_work_sync(&priv->reset_task); 1216 stop_gfar(dev); 1217 1218 /* Disconnect from the PHY */ ··· 1326 return 0; 1327 } 1328 1329 + /* gfar_reset_task gets scheduled when a packet has not been 1330 * transmitted after a set amount of time. 1331 * For now, assume that clearing out all the structures, and 1332 + * starting over will fix the problem. 1333 + */ 1334 + static void gfar_reset_task(struct work_struct *work) 1335 { 1336 + struct gfar_private *priv = container_of(work, struct gfar_private, 1337 + reset_task); 1338 + struct net_device *dev = priv->dev; 1339 1340 if (dev->flags & IFF_UP) { 1341 stop_gfar(dev); ··· 1340 } 1341 1342 netif_tx_schedule_all(dev); 1343 + } 1344 + 1345 + static void gfar_timeout(struct net_device *dev) 1346 + { 1347 + struct gfar_private *priv = netdev_priv(dev); 1348 + 1349 + dev->stats.tx_errors++; 1350 + schedule_work(&priv->reset_task); 1351 } 1352 1353 /* Interrupt Handler for Transmit complete */
+1
drivers/net/gianfar.h
··· 756 757 uint32_t msg_enable; 758 759 /* Network Statistics */ 760 struct gfar_extra_stats extra_stats; 761 };
··· 756 757 uint32_t msg_enable; 758 759 + struct work_struct reset_task; 760 /* Network Statistics */ 761 struct gfar_extra_stats extra_stats; 762 };
+3 -3
drivers/net/ibm_newemac/core.c
··· 663 if (emac_phy_gpcs(dev->phy.mode)) 664 emac_mii_reset_phy(&dev->phy); 665 666 - /* Required for Pause packet support in EMAC */ 667 - dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1); 668 - 669 return 0; 670 } 671 ··· 1146 emac_print_link_status(dev); 1147 } else 1148 netif_carrier_on(dev->ndev); 1149 1150 emac_configure(dev); 1151 mal_poll_add(dev->mal, &dev->commac);
··· 663 if (emac_phy_gpcs(dev->phy.mode)) 664 emac_mii_reset_phy(&dev->phy); 665 666 return 0; 667 } 668 ··· 1149 emac_print_link_status(dev); 1150 } else 1151 netif_carrier_on(dev->ndev); 1152 + 1153 + /* Required for Pause packet support in EMAC */ 1154 + dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1); 1155 1156 emac_configure(dev); 1157 mal_poll_add(dev->mal, &dev->commac);
+3 -2
drivers/net/ibmveth.c
··· 904 unsigned long data_dma_addr; 905 906 desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; 907 - data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 908 - skb->len, DMA_TO_DEVICE); 909 910 if (skb->ip_summed == CHECKSUM_PARTIAL && 911 ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { ··· 922 buf[1] = 0; 923 } 924 925 if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) { 926 if (!firmware_has_feature(FW_FEATURE_CMO)) 927 ibmveth_error_printk("tx: unable to map xmit buffer\n"); ··· 932 desc.fields.address = adapter->bounce_buffer_dma; 933 tx_map_failed++; 934 used_bounce = 1; 935 } else 936 desc.fields.address = data_dma_addr; 937
··· 904 unsigned long data_dma_addr; 905 906 desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len; 907 908 if (skb->ip_summed == CHECKSUM_PARTIAL && 909 ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) { ··· 924 buf[1] = 0; 925 } 926 927 + data_dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 928 + skb->len, DMA_TO_DEVICE); 929 if (dma_mapping_error(&adapter->vdev->dev, data_dma_addr)) { 930 if (!firmware_has_feature(FW_FEATURE_CMO)) 931 ibmveth_error_printk("tx: unable to map xmit buffer\n"); ··· 932 desc.fields.address = adapter->bounce_buffer_dma; 933 tx_map_failed++; 934 used_bounce = 1; 935 + wmb(); 936 } else 937 desc.fields.address = data_dma_addr; 938
-1
drivers/net/igb/e1000_82575.c
··· 87 case E1000_DEV_ID_82576: 88 case E1000_DEV_ID_82576_FIBER: 89 case E1000_DEV_ID_82576_SERDES: 90 - case E1000_DEV_ID_82576_QUAD_COPPER: 91 mac->type = e1000_82576; 92 break; 93 default:
··· 87 case E1000_DEV_ID_82576: 88 case E1000_DEV_ID_82576_FIBER: 89 case E1000_DEV_ID_82576_SERDES: 90 mac->type = e1000_82576; 91 break; 92 default:
-1
drivers/net/igb/e1000_hw.h
··· 41 #define E1000_DEV_ID_82576 0x10C9 42 #define E1000_DEV_ID_82576_FIBER 0x10E6 43 #define E1000_DEV_ID_82576_SERDES 0x10E7 44 - #define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 45 #define E1000_DEV_ID_82575EB_COPPER 0x10A7 46 #define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 47 #define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
··· 41 #define E1000_DEV_ID_82576 0x10C9 42 #define E1000_DEV_ID_82576_FIBER 0x10E6 43 #define E1000_DEV_ID_82576_SERDES 0x10E7 44 #define E1000_DEV_ID_82575EB_COPPER 0x10A7 45 #define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 46 #define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6
+6 -11
drivers/net/igb/igb_ethtool.c
··· 373 regs_buff[12] = rd32(E1000_EECD); 374 375 /* Interrupt */ 376 - regs_buff[13] = rd32(E1000_EICR); 377 regs_buff[14] = rd32(E1000_EICS); 378 regs_buff[15] = rd32(E1000_EIMS); 379 regs_buff[16] = rd32(E1000_EIMC); 380 regs_buff[17] = rd32(E1000_EIAC); 381 regs_buff[18] = rd32(E1000_EIAM); 382 - regs_buff[19] = rd32(E1000_ICR); 383 regs_buff[20] = rd32(E1000_ICS); 384 regs_buff[21] = rd32(E1000_IMS); 385 regs_buff[22] = rd32(E1000_IMC); ··· 1744 case E1000_DEV_ID_82576_SERDES: 1745 /* Wake events not supported on port B */ 1746 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) { 1747 - wol->supported = 0; 1748 - break; 1749 - } 1750 - /* return success for non excluded adapter ports */ 1751 - retval = 0; 1752 - break; 1753 - case E1000_DEV_ID_82576_QUAD_COPPER: 1754 - /* quad port adapters only support WoL on port A */ 1755 - if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { 1756 wol->supported = 0; 1757 break; 1758 }
··· 373 regs_buff[12] = rd32(E1000_EECD); 374 375 /* Interrupt */ 376 + /* Reading EICS for EICR because they read the 377 + * same but EICS does not clear on read */ 378 + regs_buff[13] = rd32(E1000_EICS); 379 regs_buff[14] = rd32(E1000_EICS); 380 regs_buff[15] = rd32(E1000_EIMS); 381 regs_buff[16] = rd32(E1000_EIMC); 382 regs_buff[17] = rd32(E1000_EIAC); 383 regs_buff[18] = rd32(E1000_EIAM); 384 + /* Reading ICS for ICR because they read the 385 + * same but ICS does not clear on read */ 386 + regs_buff[19] = rd32(E1000_ICS); 387 regs_buff[20] = rd32(E1000_ICS); 388 regs_buff[21] = rd32(E1000_IMS); 389 regs_buff[22] = rd32(E1000_IMC); ··· 1740 case E1000_DEV_ID_82576_SERDES: 1741 /* Wake events not supported on port B */ 1742 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) { 1743 wol->supported = 0; 1744 break; 1745 }
+11 -14
drivers/net/igb/igb_main.c
··· 61 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, 62 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, 63 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, 64 - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 }, 65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, 66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, 67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, ··· 520 adapter->msix_entries, 521 numvecs); 522 if (err == 0) 523 - return; 524 525 igb_reset_interrupt_capability(adapter); 526 ··· 530 adapter->num_tx_queues = 1; 531 if (!pci_enable_msi(adapter->pdev)) 532 adapter->flags |= IGB_FLAG_HAS_MSI; 533 - 534 /* Notify the stack of the (possibly) reduced Tx Queue count. */ 535 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; 536 return; ··· 1215 * regardless of eeprom setting */ 1216 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) 1217 adapter->eeprom_wol = 0; 1218 - break; 1219 - case E1000_DEV_ID_82576_QUAD_COPPER: 1220 - /* if quad port adapter, disable WoL on all but port A */ 1221 - if (global_quad_port_a != 0) 1222 - adapter->eeprom_wol = 0; 1223 - else 1224 - adapter->flags |= IGB_FLAG_QUAD_PORT_A; 1225 - /* Reset for multiple quad port adapters */ 1226 - if (++global_quad_port_a == 4) 1227 - global_quad_port_a = 0; 1228 break; 1229 } 1230 ··· 2279 struct igb_ring *tx_ring = adapter->tx_ring; 2280 struct e1000_mac_info *mac = &adapter->hw.mac; 2281 u32 link; 2282 s32 ret_val; 2283 2284 if ((netif_carrier_ok(netdev)) && 2285 (rd32(E1000_STATUS) & E1000_STATUS_LU)) ··· 2383 } 2384 2385 /* Cause software interrupt to ensure rx ring is cleaned */ 2386 - wr32(E1000_ICS, E1000_ICS_RXDMT0); 2387 2388 /* Force detection of hung controller every watchdog period */ 2389 tx_ring->detect_tx_hung = true;
··· 61 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, 62 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, 63 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, 64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, 65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, 66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, ··· 521 adapter->msix_entries, 522 numvecs); 523 if (err == 0) 524 + goto out; 525 526 igb_reset_interrupt_capability(adapter); 527 ··· 531 adapter->num_tx_queues = 1; 532 if (!pci_enable_msi(adapter->pdev)) 533 adapter->flags |= IGB_FLAG_HAS_MSI; 534 + out: 535 /* Notify the stack of the (possibly) reduced Tx Queue count. */ 536 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; 537 return; ··· 1216 * regardless of eeprom setting */ 1217 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) 1218 adapter->eeprom_wol = 0; 1219 break; 1220 } 1221 ··· 2290 struct igb_ring *tx_ring = adapter->tx_ring; 2291 struct e1000_mac_info *mac = &adapter->hw.mac; 2292 u32 link; 2293 + u32 eics = 0; 2294 s32 ret_val; 2295 + int i; 2296 2297 if ((netif_carrier_ok(netdev)) && 2298 (rd32(E1000_STATUS) & E1000_STATUS_LU)) ··· 2392 } 2393 2394 /* Cause software interrupt to ensure rx ring is cleaned */ 2395 + if (adapter->msix_entries) { 2396 + for (i = 0; i < adapter->num_rx_queues; i++) 2397 + eics |= adapter->rx_ring[i].eims_value; 2398 + wr32(E1000_EICS, eics); 2399 + } else { 2400 + wr32(E1000_ICS, E1000_ICS_RXDMT0); 2401 + } 2402 2403 /* Force detection of hung controller every watchdog period */ 2404 tx_ring->detect_tx_hung = true;
+5 -3
drivers/net/ixgbe/ixgbe_main.c
··· 1636 struct ixgbe_hw *hw = &adapter->hw; 1637 struct dev_mc_list *mc_ptr; 1638 u8 *mta_list; 1639 - u32 fctrl; 1640 int i; 1641 1642 /* Check for Promiscuous and All Multicast modes */ 1643 1644 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1645 1646 if (netdev->flags & IFF_PROMISC) { 1647 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1648 - fctrl &= ~IXGBE_VLNCTRL_VFE; 1649 } else { 1650 if (netdev->flags & IFF_ALLMULTI) { 1651 fctrl |= IXGBE_FCTRL_MPE; ··· 1654 } else { 1655 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1656 } 1657 - fctrl |= IXGBE_VLNCTRL_VFE; 1658 } 1659 1660 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 1661 1662 if (netdev->mc_count) { 1663 mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC);
··· 1636 struct ixgbe_hw *hw = &adapter->hw; 1637 struct dev_mc_list *mc_ptr; 1638 u8 *mta_list; 1639 + u32 fctrl, vlnctrl; 1640 int i; 1641 1642 /* Check for Promiscuous and All Multicast modes */ 1643 1644 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1645 + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 1646 1647 if (netdev->flags & IFF_PROMISC) { 1648 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1649 + vlnctrl &= ~IXGBE_VLNCTRL_VFE; 1650 } else { 1651 if (netdev->flags & IFF_ALLMULTI) { 1652 fctrl |= IXGBE_FCTRL_MPE; ··· 1653 } else { 1654 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 1655 } 1656 + vlnctrl |= IXGBE_VLNCTRL_VFE; 1657 } 1658 1659 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 1660 + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 1661 1662 if (netdev->mc_count) { 1663 mta_list = kcalloc(netdev->mc_count, ETH_ALEN, GFP_ATOMIC);
+20 -15
drivers/net/mv643xx_eth.c
··· 55 #include <asm/system.h> 56 57 static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 58 - static char mv643xx_eth_driver_version[] = "1.2"; 59 60 #define MV643XX_ETH_CHECKSUM_OFFLOAD_TX 61 #define MV643XX_ETH_NAPI ··· 474 /* 475 * Reserve 2+14 bytes for an ethernet header (the 476 * hardware automatically prepends 2 bytes of dummy 477 - * data to each received packet), 4 bytes for a VLAN 478 - * header, and 4 bytes for the trailing FCS -- 24 479 - * bytes total. 480 */ 481 - skb_size = mp->dev->mtu + 24; 482 483 skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1); 484 if (skb == NULL) ··· 517 skb_reserve(skb, 2); 518 } 519 520 - if (rxq->rx_desc_count != rxq->rx_ring_size) { 521 - rxq->rx_oom.expires = jiffies + (HZ / 10); 522 - add_timer(&rxq->rx_oom); 523 - } 524 525 spin_unlock_irqrestore(&mp->lock, flags); 526 } ··· 535 int rx; 536 537 rx = 0; 538 - while (rx < budget) { 539 struct rx_desc *rx_desc; 540 unsigned int cmd_sts; 541 struct sk_buff *skb; ··· 560 spin_unlock_irqrestore(&mp->lock, flags); 561 562 dma_unmap_single(NULL, rx_desc->buf_ptr + 2, 563 - mp->dev->mtu + 24, DMA_FROM_DEVICE); 564 rxq->rx_desc_count--; 565 rx++; 566 ··· 642 txq_reclaim(mp->txq + i, 0); 643 644 if (netif_carrier_ok(mp->dev)) { 645 - spin_lock(&mp->lock); 646 __txq_maybe_wake(mp->txq + mp->txq_primary); 647 - spin_unlock(&mp->lock); 648 } 649 } 650 #endif ··· 656 657 if (rx < budget) { 658 netif_rx_complete(mp->dev, napi); 659 - wrl(mp, INT_CAUSE(mp->port_num), 0); 660 - wrl(mp, INT_CAUSE_EXT(mp->port_num), 0); 661 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); 662 } 663 ··· 1800 */ 1801 #ifdef MV643XX_ETH_NAPI 1802 if (int_cause & INT_RX) { 1803 wrl(mp, INT_MASK(mp->port_num), 0x00000000); 1804 rdl(mp, INT_MASK(mp->port_num)); 1805
··· 55 #include <asm/system.h> 56 57 static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 58 + static char mv643xx_eth_driver_version[] = "1.3"; 59 60 #define MV643XX_ETH_CHECKSUM_OFFLOAD_TX 61 #define MV643XX_ETH_NAPI ··· 474 /* 475 * Reserve 2+14 bytes for an ethernet header (the 476 * hardware automatically prepends 2 bytes of dummy 477 + * data to each received packet), 16 bytes for up to 478 + * four VLAN tags, and 4 bytes for the trailing FCS 479 + * -- 36 bytes total. 480 */ 481 + skb_size = mp->dev->mtu + 36; 482 + 483 + /* 484 + * Make sure that the skb size is a multiple of 8 485 + * bytes, as the lower three bits of the receive 486 + * descriptor's buffer size field are ignored by 487 + * the hardware. 488 + */ 489 + skb_size = (skb_size + 7) & ~7; 490 491 skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1); 492 if (skb == NULL) ··· 509 skb_reserve(skb, 2); 510 } 511 512 + if (rxq->rx_desc_count != rxq->rx_ring_size) 513 + mod_timer(&rxq->rx_oom, jiffies + (HZ / 10)); 514 515 spin_unlock_irqrestore(&mp->lock, flags); 516 } ··· 529 int rx; 530 531 rx = 0; 532 + while (rx < budget && rxq->rx_desc_count) { 533 struct rx_desc *rx_desc; 534 unsigned int cmd_sts; 535 struct sk_buff *skb; ··· 554 spin_unlock_irqrestore(&mp->lock, flags); 555 556 dma_unmap_single(NULL, rx_desc->buf_ptr + 2, 557 + rx_desc->buf_size, DMA_FROM_DEVICE); 558 rxq->rx_desc_count--; 559 rx++; 560 ··· 636 txq_reclaim(mp->txq + i, 0); 637 638 if (netif_carrier_ok(mp->dev)) { 639 + spin_lock_irq(&mp->lock); 640 __txq_maybe_wake(mp->txq + mp->txq_primary); 641 + spin_unlock_irq(&mp->lock); 642 } 643 } 644 #endif ··· 650 651 if (rx < budget) { 652 netif_rx_complete(mp->dev, napi); 653 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); 654 } 655 ··· 1796 */ 1797 #ifdef MV643XX_ETH_NAPI 1798 if (int_cause & INT_RX) { 1799 + wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_RX)); 1800 wrl(mp, INT_MASK(mp->port_num), 0x00000000); 1801 rdl(mp, INT_MASK(mp->port_num)); 1802
+1 -1
drivers/net/myri10ge/myri10ge.c
··· 75 #include "myri10ge_mcp.h" 76 #include "myri10ge_mcp_gen_header.h" 77 78 - #define MYRI10GE_VERSION_STR "1.3.99-1.347" 79 80 MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 81 MODULE_AUTHOR("Maintainer: help@myri.com");
··· 75 #include "myri10ge_mcp.h" 76 #include "myri10ge_mcp_gen_header.h" 77 78 + #define MYRI10GE_VERSION_STR "1.4.3-1.358" 79 80 MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 81 MODULE_AUTHOR("Maintainer: help@myri.com");
+1 -1
drivers/net/r8169.c
··· 2792 pkt_size, PCI_DMA_FROMDEVICE); 2793 rtl8169_mark_to_asic(desc, tp->rx_buf_sz); 2794 } else { 2795 - pci_unmap_single(pdev, addr, pkt_size, 2796 PCI_DMA_FROMDEVICE); 2797 tp->Rx_skbuff[entry] = NULL; 2798 }
··· 2792 pkt_size, PCI_DMA_FROMDEVICE); 2793 rtl8169_mark_to_asic(desc, tp->rx_buf_sz); 2794 } else { 2795 + pci_unmap_single(pdev, addr, tp->rx_buf_sz, 2796 PCI_DMA_FROMDEVICE); 2797 tp->Rx_skbuff[entry] = NULL; 2798 }
+3 -3
drivers/net/skfp/ess.c
··· 510 chg->path.para.p_type = SMT_P320B ; 511 chg->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ; 512 chg->path.mib_index = SBAPATHINDEX ; 513 - chg->path.path_pad = (u_short)NULL ; 514 chg->path.path_index = PRIMARY_RING ; 515 516 /* set P320F */ ··· 606 req->path.para.p_type = SMT_P320B ; 607 req->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ; 608 req->path.mib_index = SBAPATHINDEX ; 609 - req->path.path_pad = (u_short)NULL ; 610 req->path.path_index = PRIMARY_RING ; 611 612 /* set P0017 */ ··· 636 /* set P19 */ 637 req->a_addr.para.p_type = SMT_P0019 ; 638 req->a_addr.para.p_len = sizeof(struct smt_p_0019) - PARA_LEN ; 639 - req->a_addr.sba_pad = (u_short)NULL ; 640 req->a_addr.alloc_addr = null_addr ; 641 642 /* set P1A */
··· 510 chg->path.para.p_type = SMT_P320B ; 511 chg->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ; 512 chg->path.mib_index = SBAPATHINDEX ; 513 + chg->path.path_pad = 0; 514 chg->path.path_index = PRIMARY_RING ; 515 516 /* set P320F */ ··· 606 req->path.para.p_type = SMT_P320B ; 607 req->path.para.p_len = sizeof(struct smt_p_320b) - PARA_LEN ; 608 req->path.mib_index = SBAPATHINDEX ; 609 + req->path.path_pad = 0; 610 req->path.path_index = PRIMARY_RING ; 611 612 /* set P0017 */ ··· 636 /* set P19 */ 637 req->a_addr.para.p_type = SMT_P0019 ; 638 req->a_addr.para.p_len = sizeof(struct smt_p_0019) - PARA_LEN ; 639 + req->a_addr.sba_pad = 0; 640 req->a_addr.alloc_addr = null_addr ; 641 642 /* set P1A */
+1 -1
drivers/net/smc91x.c
··· 2255 2256 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); 2257 if (!res) 2258 - platform_get_resource(pdev, IORESOURCE_MEM, 0); 2259 release_mem_region(res->start, SMC_IO_EXTENT); 2260 2261 free_netdev(ndev);
··· 2255 2256 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); 2257 if (!res) 2258 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2259 release_mem_region(res->start, SMC_IO_EXTENT); 2260 2261 free_netdev(ndev);
+2 -1
drivers/net/usb/hso.c
··· 397 {default_port_device(0x0af0, 0xc031)}, /* Icon-Edge */ 398 {icon321_port_device(0x0af0, 0xd013)}, /* Module HSxPA */ 399 {icon321_port_device(0x0af0, 0xd031)}, /* Icon-321 */ 400 - {default_port_device(0x0af0, 0xd033)}, /* Icon-322 */ 401 {USB_DEVICE(0x0af0, 0x7301)}, /* GE40x */ 402 {USB_DEVICE(0x0af0, 0x7361)}, /* GE40x */ 403 {USB_DEVICE(0x0af0, 0x7401)}, /* GI 0401 */ ··· 2613 "Transmitting lingering data\n"); 2614 hso_net_start_xmit(hso_net->skb_tx_buf, 2615 hso_net->net); 2616 } 2617 result = hso_start_net_device(network_table[i]); 2618 if (result)
··· 397 {default_port_device(0x0af0, 0xc031)}, /* Icon-Edge */ 398 {icon321_port_device(0x0af0, 0xd013)}, /* Module HSxPA */ 399 {icon321_port_device(0x0af0, 0xd031)}, /* Icon-321 */ 400 + {icon321_port_device(0x0af0, 0xd033)}, /* Icon-322 */ 401 {USB_DEVICE(0x0af0, 0x7301)}, /* GE40x */ 402 {USB_DEVICE(0x0af0, 0x7361)}, /* GE40x */ 403 {USB_DEVICE(0x0af0, 0x7401)}, /* GI 0401 */ ··· 2613 "Transmitting lingering data\n"); 2614 hso_net_start_xmit(hso_net->skb_tx_buf, 2615 hso_net->net); 2616 + hso_net->skb_tx_buf = NULL; 2617 } 2618 result = hso_start_net_device(network_table[i]); 2619 if (result)
+46 -1
drivers/net/usb/mcs7830.c
··· 46 47 #define MCS7830_VENDOR_ID 0x9710 48 #define MCS7830_PRODUCT_ID 0x7830 49 50 #define MCS7830_MII_ADVERTISE (ADVERTISE_PAUSE_CAP | ADVERTISE_100FULL | \ 51 ADVERTISE_100HALF | ADVERTISE_10FULL | \ ··· 446 .nway_reset = usbnet_nway_reset, 447 }; 448 449 static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev) 450 { 451 struct net_device *net = dev->net; ··· 482 net->ethtool_ops = &mcs7830_ethtool_ops; 483 net->set_multicast_list = mcs7830_set_multicast; 484 mcs7830_set_multicast(net); 485 486 /* reserve space for the status byte on rx */ 487 dev->rx_urb_size = ETH_FRAME_LEN + 1; ··· 519 } 520 521 static const struct driver_info moschip_info = { 522 - .description = "MOSCHIP 7830 usb-NET adapter", 523 .bind = mcs7830_bind, 524 .rx_fixup = mcs7830_rx_fixup, 525 .flags = FLAG_ETHER, ··· 540 { 541 USB_DEVICE(MCS7830_VENDOR_ID, MCS7830_PRODUCT_ID), 542 .driver_info = (unsigned long) &moschip_info, 543 }, 544 {}, 545 };
··· 46 47 #define MCS7830_VENDOR_ID 0x9710 48 #define MCS7830_PRODUCT_ID 0x7830 49 + #define MCS7730_PRODUCT_ID 0x7730 50 + 51 + #define SITECOM_VENDOR_ID 0x0DF6 52 + #define LN_030_PRODUCT_ID 0x0021 53 54 #define MCS7830_MII_ADVERTISE (ADVERTISE_PAUSE_CAP | ADVERTISE_100FULL | \ 55 ADVERTISE_100HALF | ADVERTISE_10FULL | \ ··· 442 .nway_reset = usbnet_nway_reset, 443 }; 444 445 + static int mcs7830_set_mac_address(struct net_device *netdev, void *p) 446 + { 447 + int ret; 448 + struct usbnet *dev = netdev_priv(netdev); 449 + struct sockaddr *addr = p; 450 + 451 + if (netif_running(netdev)) 452 + return -EBUSY; 453 + 454 + if (!is_valid_ether_addr(addr->sa_data)) 455 + return -EINVAL; 456 + 457 + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 458 + 459 + ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, 460 + netdev->dev_addr); 461 + 462 + if (ret < 0) 463 + return ret; 464 + 465 + return 0; 466 + } 467 + 468 static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev) 469 { 470 struct net_device *net = dev->net; ··· 455 net->ethtool_ops = &mcs7830_ethtool_ops; 456 net->set_multicast_list = mcs7830_set_multicast; 457 mcs7830_set_multicast(net); 458 + net->set_mac_address = mcs7830_set_mac_address; 459 460 /* reserve space for the status byte on rx */ 461 dev->rx_urb_size = ETH_FRAME_LEN + 1; ··· 491 } 492 493 static const struct driver_info moschip_info = { 494 + .description = "MOSCHIP 7830/7730 usb-NET adapter", 495 + .bind = mcs7830_bind, 496 + .rx_fixup = mcs7830_rx_fixup, 497 + .flags = FLAG_ETHER, 498 + .in = 1, 499 + .out = 2, 500 + }; 501 + 502 + static const struct driver_info sitecom_info = { 503 + .description = "Sitecom LN-30 usb-NET adapter", 504 .bind = mcs7830_bind, 505 .rx_fixup = mcs7830_rx_fixup, 506 .flags = FLAG_ETHER, ··· 503 { 504 USB_DEVICE(MCS7830_VENDOR_ID, MCS7830_PRODUCT_ID), 505 .driver_info = (unsigned long) &moschip_info, 506 + }, 507 + { 508 + USB_DEVICE(MCS7830_VENDOR_ID, MCS7730_PRODUCT_ID), 509 + .driver_info = (unsigned long) &moschip_info, 510 + }, 511 + { 512 + USB_DEVICE(SITECOM_VENDOR_ID, LN_030_PRODUCT_ID), 513 + .driver_info = (unsigned long) &sitecom_info, 514 }, 515 {}, 516 };
+4 -4
drivers/net/wan/sbni.c
··· 1317 break; 1318 1319 case SIOCDEVRESINSTATS : 1320 - if( current->euid != 0 ) /* root only */ 1321 return -EPERM; 1322 memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) ); 1323 break; ··· 1334 break; 1335 1336 case SIOCDEVSHWSTATE : 1337 - if( current->euid != 0 ) /* root only */ 1338 return -EPERM; 1339 1340 spin_lock( &nl->lock ); ··· 1355 #ifdef CONFIG_SBNI_MULTILINE 1356 1357 case SIOCDEVENSLAVE : 1358 - if( current->euid != 0 ) /* root only */ 1359 return -EPERM; 1360 1361 if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name )) ··· 1370 return enslave( dev, slave_dev ); 1371 1372 case SIOCDEVEMANSIPATE : 1373 - if( current->euid != 0 ) /* root only */ 1374 return -EPERM; 1375 1376 return emancipate( dev );
··· 1317 break; 1318 1319 case SIOCDEVRESINSTATS : 1320 + if (!capable(CAP_NET_ADMIN)) 1321 return -EPERM; 1322 memset( &nl->in_stats, 0, sizeof(struct sbni_in_stats) ); 1323 break; ··· 1334 break; 1335 1336 case SIOCDEVSHWSTATE : 1337 + if (!capable(CAP_NET_ADMIN)) 1338 return -EPERM; 1339 1340 spin_lock( &nl->lock ); ··· 1355 #ifdef CONFIG_SBNI_MULTILINE 1356 1357 case SIOCDEVENSLAVE : 1358 + if (!capable(CAP_NET_ADMIN)) 1359 return -EPERM; 1360 1361 if (copy_from_user( slave_name, ifr->ifr_data, sizeof slave_name )) ··· 1370 return enslave( dev, slave_dev ); 1371 1372 case SIOCDEVEMANSIPATE : 1373 + if (!capable(CAP_NET_ADMIN)) 1374 return -EPERM; 1375 1376 return emancipate( dev );
+1 -1
drivers/net/wd.c
··· 337 #ifdef CONFIG_NET_POLL_CONTROLLER 338 dev->poll_controller = ei_poll; 339 #endif 340 - NS8390p_init(dev, 0); 341 342 #if 1 343 /* Enable interrupt generation on softconfig cards -- M.U */
··· 337 #ifdef CONFIG_NET_POLL_CONTROLLER 338 dev->poll_controller = ei_poll; 339 #endif 340 + NS8390_init(dev, 0); 341 342 #if 1 343 /* Enable interrupt generation on softconfig cards -- M.U */
+17 -6
drivers/net/wireless/ath5k/base.c
··· 251 return; 252 pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len, 253 PCI_DMA_TODEVICE); 254 - dev_kfree_skb(bf->skb); 255 bf->skb = NULL; 256 } 257 ··· 466 mutex_init(&sc->lock); 467 spin_lock_init(&sc->rxbuflock); 468 spin_lock_init(&sc->txbuflock); 469 470 /* Set private data */ 471 pci_set_drvdata(pdev, hw); ··· 2180 2181 sc->imask |= AR5K_INT_SWBA; 2182 2183 - if (ath5k_hw_hasveol(ah)) 2184 ath5k_beacon_send(sc); 2185 } 2186 /* TODO else AP */ 2187 ··· 2407 TSF_TO_TU(tsf), 2408 (unsigned long long) tsf); 2409 } else { 2410 ath5k_beacon_send(sc); 2411 } 2412 } 2413 if (status & AR5K_INT_RXEOL) { ··· 2751 ret = -EOPNOTSUPP; 2752 goto end; 2753 } 2754 ret = 0; 2755 end: 2756 mutex_unlock(&sc->lock); ··· 2800 struct ath5k_hw *ah = sc->ah; 2801 int ret; 2802 2803 - /* Set to a reasonable value. Note that this will 2804 - * be set to mac80211's value at ath5k_config(). */ 2805 - sc->bintval = 1000; 2806 mutex_lock(&sc->lock); 2807 if (sc->vif != vif) { 2808 ret = -EIO; ··· 3058 ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) 3059 { 3060 struct ath5k_softc *sc = hw->priv; 3061 int ret; 3062 3063 ath5k_debug_dump_skb(sc, skb, "BC ", 1); ··· 3068 goto end; 3069 } 3070 3071 ath5k_txbuf_free(sc, sc->bbuf); 3072 sc->bbuf->skb = skb; 3073 ret = ath5k_beacon_setup(sc, sc->bbuf); 3074 if (ret) 3075 sc->bbuf->skb = NULL; 3076 - else { 3077 ath5k_beacon_config(sc); 3078 mmiowb(); 3079 }
··· 251 return; 252 pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len, 253 PCI_DMA_TODEVICE); 254 + dev_kfree_skb_any(bf->skb); 255 bf->skb = NULL; 256 } 257 ··· 466 mutex_init(&sc->lock); 467 spin_lock_init(&sc->rxbuflock); 468 spin_lock_init(&sc->txbuflock); 469 + spin_lock_init(&sc->block); 470 471 /* Set private data */ 472 pci_set_drvdata(pdev, hw); ··· 2179 2180 sc->imask |= AR5K_INT_SWBA; 2181 2182 + if (ath5k_hw_hasveol(ah)) { 2183 + spin_lock(&sc->block); 2184 ath5k_beacon_send(sc); 2185 + spin_unlock(&sc->block); 2186 + } 2187 } 2188 /* TODO else AP */ 2189 ··· 2403 TSF_TO_TU(tsf), 2404 (unsigned long long) tsf); 2405 } else { 2406 + spin_lock(&sc->block); 2407 ath5k_beacon_send(sc); 2408 + spin_unlock(&sc->block); 2409 } 2410 } 2411 if (status & AR5K_INT_RXEOL) { ··· 2745 ret = -EOPNOTSUPP; 2746 goto end; 2747 } 2748 + 2749 + /* Set to a reasonable value. Note that this will 2750 + * be set to mac80211's value at ath5k_config(). */ 2751 + sc->bintval = 1000; 2752 + 2753 ret = 0; 2754 end: 2755 mutex_unlock(&sc->lock); ··· 2789 struct ath5k_hw *ah = sc->ah; 2790 int ret; 2791 2792 mutex_lock(&sc->lock); 2793 if (sc->vif != vif) { 2794 ret = -EIO; ··· 3050 ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb) 3051 { 3052 struct ath5k_softc *sc = hw->priv; 3053 + unsigned long flags; 3054 int ret; 3055 3056 ath5k_debug_dump_skb(sc, skb, "BC ", 1); ··· 3059 goto end; 3060 } 3061 3062 + spin_lock_irqsave(&sc->block, flags); 3063 ath5k_txbuf_free(sc, sc->bbuf); 3064 sc->bbuf->skb = skb; 3065 ret = ath5k_beacon_setup(sc, sc->bbuf); 3066 if (ret) 3067 sc->bbuf->skb = NULL; 3068 + spin_unlock_irqrestore(&sc->block, flags); 3069 + if (!ret) { 3070 ath5k_beacon_config(sc); 3071 mmiowb(); 3072 }
+1
drivers/net/wireless/ath5k/base.h
··· 172 struct tasklet_struct txtq; /* tx intr tasklet */ 173 struct ath5k_led tx_led; /* tx led */ 174 175 struct ath5k_buf *bbuf; /* beacon buffer */ 176 unsigned int bhalq, /* SW q for outgoing beacons */ 177 bmisscount, /* missed beacon transmits */
··· 172 struct tasklet_struct txtq; /* tx intr tasklet */ 173 struct ath5k_led tx_led; /* tx led */ 174 175 + spinlock_t block; /* protects beacon */ 176 struct ath5k_buf *bbuf; /* beacon buffer */ 177 unsigned int bhalq, /* SW q for outgoing beacons */ 178 bmisscount, /* missed beacon transmits */
+31 -20
drivers/net/wireless/atmel.c
··· 1304 int atmel_open(struct net_device *dev) 1305 { 1306 struct atmel_private *priv = netdev_priv(dev); 1307 - int i, channel; 1308 1309 /* any scheduled timer is no longer needed and might screw things up.. */ 1310 del_timer_sync(&priv->management_timer); ··· 1328 priv->site_survey_state = SITE_SURVEY_IDLE; 1329 priv->station_is_associated = 0; 1330 1331 - if (!reset_atmel_card(dev)) 1332 - return -EAGAIN; 1333 1334 if (priv->config_reg_domain) { 1335 priv->reg_domain = priv->config_reg_domain; ··· 3062 } 3063 3064 if (status == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) { 3065 - /* Do opensystem first, then try sharedkey */ 3066 if (system == WLAN_AUTH_OPEN) { 3067 priv->CurrentAuthentTransactionSeqNum = 0x001; 3068 priv->exclude_unencrypted = 1; 3069 send_authentication_request(priv, WLAN_AUTH_SHARED_KEY, NULL, 0); 3070 return; 3071 } else if (priv->connect_to_any_BSS) { 3072 int bss_index; ··· 3589 3590 if (i == 0) { 3591 printk(KERN_ALERT "%s: MAC failed to boot.\n", priv->dev->name); 3592 - return 0; 3593 } 3594 3595 if ((priv->host_info_base = atmel_read16(priv->dev, MR2)) == 0xffff) { 3596 printk(KERN_ALERT "%s: card missing.\n", priv->dev->name); 3597 - return 0; 3598 } 3599 3600 /* now check for completion of MAC initialization through ··· 3618 if (i == 0) { 3619 printk(KERN_ALERT "%s: MAC failed to initialise.\n", 3620 priv->dev->name); 3621 - return 0; 3622 } 3623 3624 /* Check for MAC_INIT_OK only on the register that the MAC_INIT_OK was set */ 3625 if ((mr3 & MAC_INIT_COMPLETE) && 3626 !(atmel_read16(priv->dev, MR3) & MAC_INIT_OK)) { 3627 printk(KERN_ALERT "%s: MAC failed MR3 self-test.\n", priv->dev->name); 3628 - return 0; 3629 } 3630 if ((mr1 & MAC_INIT_COMPLETE) && 3631 !(atmel_read16(priv->dev, MR1) & MAC_INIT_OK)) { 3632 printk(KERN_ALERT "%s: MAC failed MR1 self-test.\n", priv->dev->name); 3633 - return 0; 3634 } 3635 3636 atmel_copy_to_host(priv->dev, (unsigned char *)iface, ··· 3651 iface->func_ctrl = le16_to_cpu(iface->func_ctrl); 3652 iface->mac_status = le16_to_cpu(iface->mac_status); 3653 3654 - return 1; 3655 } 3656 3657 /* determine type of memory and MAC address */ ··· 3702 /* Standard firmware in flash, boot it up and ask 3703 for the Mac Address */ 3704 priv->card_type = CARD_TYPE_SPI_FLASH; 3705 - if (atmel_wakeup_firmware(priv)) { 3706 atmel_get_mib(priv, Mac_Address_Mib_Type, 0, dev->dev_addr, 6); 3707 3708 /* got address, now squash it again until the network ··· 3844 struct atmel_private *priv = netdev_priv(dev); 3845 u8 configuration; 3846 int old_state = priv->station_state; 3847 3848 /* data to add to the firmware names, in priority order 3849 this implemenents firmware versioning */ ··· 3878 dev->name); 3879 strcpy(priv->firmware_id, "atmel_at76c502.bin"); 3880 } 3881 - if (request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev) != 0) { 3882 printk(KERN_ALERT 3883 "%s: firmware %s is missing, cannot continue.\n", 3884 dev->name, priv->firmware_id); 3885 - return 0; 3886 } 3887 } else { 3888 int fw_index = 0; ··· 3912 "%s: firmware %s is missing, cannot start.\n", 3913 dev->name, priv->firmware_id); 3914 priv->firmware_id[0] = '\0'; 3915 - return 0; 3916 } 3917 } 3918 ··· 3937 release_firmware(fw_entry); 3938 } 3939 3940 - if (!atmel_wakeup_firmware(priv)) 3941 - return 0; 3942 3943 /* Check the version and set the correct flag for wpa stuff, 3944 old and new firmware is incompatible. ··· 3980 if (!priv->radio_on_broken) { 3981 if (atmel_send_command_wait(priv, CMD_EnableRadio, NULL, 0) == 3982 CMD_STATUS_REJECTED_RADIO_OFF) { 3983 - printk(KERN_INFO 3984 - "%s: cannot turn the radio on. (Hey radio, you're beautiful!)\n", 3985 dev->name); 3986 - return 0; 3987 } 3988 } 3989 ··· 4017 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); 4018 } 4019 4020 - return 1; 4021 } 4022 4023 static void atmel_send_command(struct atmel_private *priv, int command,
··· 1304 int atmel_open(struct net_device *dev) 1305 { 1306 struct atmel_private *priv = netdev_priv(dev); 1307 + int i, channel, err; 1308 1309 /* any scheduled timer is no longer needed and might screw things up.. */ 1310 del_timer_sync(&priv->management_timer); ··· 1328 priv->site_survey_state = SITE_SURVEY_IDLE; 1329 priv->station_is_associated = 0; 1330 1331 + err = reset_atmel_card(dev); 1332 + if (err) 1333 + return err; 1334 1335 if (priv->config_reg_domain) { 1336 priv->reg_domain = priv->config_reg_domain; ··· 3061 } 3062 3063 if (status == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) { 3064 + /* Flip back and forth between WEP auth modes until the max 3065 + * authentication tries has been exceeded. 3066 + */ 3067 if (system == WLAN_AUTH_OPEN) { 3068 priv->CurrentAuthentTransactionSeqNum = 0x001; 3069 priv->exclude_unencrypted = 1; 3070 send_authentication_request(priv, WLAN_AUTH_SHARED_KEY, NULL, 0); 3071 + return; 3072 + } else if ( system == WLAN_AUTH_SHARED_KEY 3073 + && priv->wep_is_on) { 3074 + priv->CurrentAuthentTransactionSeqNum = 0x001; 3075 + priv->exclude_unencrypted = 0; 3076 + send_authentication_request(priv, WLAN_AUTH_OPEN, NULL, 0); 3077 return; 3078 } else if (priv->connect_to_any_BSS) { 3079 int bss_index; ··· 3580 3581 if (i == 0) { 3582 printk(KERN_ALERT "%s: MAC failed to boot.\n", priv->dev->name); 3583 + return -EIO; 3584 } 3585 3586 if ((priv->host_info_base = atmel_read16(priv->dev, MR2)) == 0xffff) { 3587 printk(KERN_ALERT "%s: card missing.\n", priv->dev->name); 3588 + return -ENODEV; 3589 } 3590 3591 /* now check for completion of MAC initialization through ··· 3609 if (i == 0) { 3610 printk(KERN_ALERT "%s: MAC failed to initialise.\n", 3611 priv->dev->name); 3612 + return -EIO; 3613 } 3614 3615 /* Check for MAC_INIT_OK only on the register that the MAC_INIT_OK was set */ 3616 if ((mr3 & MAC_INIT_COMPLETE) && 3617 !(atmel_read16(priv->dev, MR3) & MAC_INIT_OK)) { 3618 printk(KERN_ALERT "%s: MAC failed MR3 self-test.\n", priv->dev->name); 3619 + return -EIO; 3620 } 3621 if ((mr1 & MAC_INIT_COMPLETE) && 3622 !(atmel_read16(priv->dev, MR1) & MAC_INIT_OK)) { 3623 printk(KERN_ALERT "%s: MAC failed MR1 self-test.\n", priv->dev->name); 3624 + return -EIO; 3625 } 3626 3627 atmel_copy_to_host(priv->dev, (unsigned char *)iface, ··· 3642 iface->func_ctrl = le16_to_cpu(iface->func_ctrl); 3643 iface->mac_status = le16_to_cpu(iface->mac_status); 3644 3645 + return 0; 3646 } 3647 3648 /* determine type of memory and MAC address */ ··· 3693 /* Standard firmware in flash, boot it up and ask 3694 for the Mac Address */ 3695 priv->card_type = CARD_TYPE_SPI_FLASH; 3696 + if (atmel_wakeup_firmware(priv) == 0) { 3697 atmel_get_mib(priv, Mac_Address_Mib_Type, 0, dev->dev_addr, 6); 3698 3699 /* got address, now squash it again until the network ··· 3835 struct atmel_private *priv = netdev_priv(dev); 3836 u8 configuration; 3837 int old_state = priv->station_state; 3838 + int err = 0; 3839 3840 /* data to add to the firmware names, in priority order 3841 this implemenents firmware versioning */ ··· 3868 dev->name); 3869 strcpy(priv->firmware_id, "atmel_at76c502.bin"); 3870 } 3871 + err = request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev); 3872 + if (err != 0) { 3873 printk(KERN_ALERT 3874 "%s: firmware %s is missing, cannot continue.\n", 3875 dev->name, priv->firmware_id); 3876 + return err; 3877 } 3878 } else { 3879 int fw_index = 0; ··· 3901 "%s: firmware %s is missing, cannot start.\n", 3902 dev->name, priv->firmware_id); 3903 priv->firmware_id[0] = '\0'; 3904 + return -ENOENT; 3905 } 3906 } 3907 ··· 3926 release_firmware(fw_entry); 3927 } 3928 3929 + err = atmel_wakeup_firmware(priv); 3930 + if (err != 0) 3931 + return err; 3932 3933 /* Check the version and set the correct flag for wpa stuff, 3934 old and new firmware is incompatible. ··· 3968 if (!priv->radio_on_broken) { 3969 if (atmel_send_command_wait(priv, CMD_EnableRadio, NULL, 0) == 3970 CMD_STATUS_REJECTED_RADIO_OFF) { 3971 + printk(KERN_INFO "%s: cannot turn the radio on.\n", 3972 dev->name); 3973 + return -EIO; 3974 } 3975 } 3976 ··· 4006 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); 4007 } 4008 4009 + return 0; 4010 } 4011 4012 static void atmel_send_command(struct atmel_private *priv, int command,
+38 -41
drivers/s390/net/claw.c
··· 133 static inline void 134 claw_set_busy(struct net_device *dev) 135 { 136 - ((struct claw_privbk *) dev->priv)->tbusy=1; 137 eieio(); 138 } 139 140 static inline void 141 claw_clear_busy(struct net_device *dev) 142 { 143 - clear_bit(0, &(((struct claw_privbk *) dev->priv)->tbusy)); 144 netif_wake_queue(dev); 145 eieio(); 146 } ··· 149 claw_check_busy(struct net_device *dev) 150 { 151 eieio(); 152 - return ((struct claw_privbk *) dev->priv)->tbusy; 153 } 154 155 static inline void 156 claw_setbit_busy(int nr,struct net_device *dev) 157 { 158 netif_stop_queue(dev); 159 - set_bit(nr, (void *)&(((struct claw_privbk *)dev->priv)->tbusy)); 160 } 161 162 static inline void 163 claw_clearbit_busy(int nr,struct net_device *dev) 164 { 165 - clear_bit(nr,(void *)&(((struct claw_privbk *)dev->priv)->tbusy)); 166 netif_wake_queue(dev); 167 } 168 ··· 171 { 172 netif_stop_queue(dev); 173 return test_and_set_bit(nr, 174 - (void *)&(((struct claw_privbk *) dev->priv)->tbusy)); 175 } 176 177 ··· 271 if (!get_device(&cgdev->dev)) 272 return -ENODEV; 273 privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL); 274 if (privptr == NULL) { 275 probe_error(cgdev); 276 put_device(&cgdev->dev); ··· 306 privptr->p_env->p_priv = privptr; 307 cgdev->cdev[0]->handler = claw_irq_handler; 308 cgdev->cdev[1]->handler = claw_irq_handler; 309 - cgdev->dev.driver_data = privptr; 310 CLAW_DBF_TEXT(2, setup, "prbext 0"); 311 312 return 0; ··· 319 claw_tx(struct sk_buff *skb, struct net_device *dev) 320 { 321 int rc; 322 - struct claw_privbk *privptr=dev->priv; 323 unsigned long saveflags; 324 struct chbk *p_ch; 325 ··· 404 static int 405 claw_change_mtu(struct net_device *dev, int new_mtu) 406 { 407 - struct claw_privbk *privptr=dev->priv; 408 int buff_size; 409 CLAW_DBF_TEXT(4, trace, "setmtu"); 410 buff_size = privptr->p_env->write_size; ··· 434 struct ccwbk *p_buf; 435 436 CLAW_DBF_TEXT(4, trace, "open"); 437 - privptr = (struct claw_privbk *)dev->priv; 438 /* allocate and initialize CCW blocks */ 439 if (privptr->buffs_alloc == 0) { 440 rc=init_ccw_bk(dev); ··· 780 p_ch = (struct chbk *) data; 781 dev = (struct net_device *)p_ch->ndev; 782 CLAW_DBF_TEXT(4, trace, "IRQtask"); 783 - privptr = (struct claw_privbk *) dev->priv; 784 unpack_read(dev); 785 clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a); 786 CLAW_DBF_TEXT(4, trace, "TskletXt"); ··· 805 806 if (!dev) 807 return 0; 808 - privptr = (struct claw_privbk *) dev->priv; 809 if (!privptr) 810 return 0; 811 CLAW_DBF_TEXT(4, trace, "release"); ··· 960 if (p_ch->claw_state == CLAW_STOP) 961 return; 962 dev = (struct net_device *) p_ch->ndev; 963 - privptr = (struct claw_privbk *) dev->priv; 964 claw_free_wrt_buf( dev ); 965 if ((privptr->write_free_count > 0) && 966 !skb_queue_empty(&p_ch->collect_queue)) { ··· 1042 struct ccw1 temp_ccw; 1043 struct endccw * p_end; 1044 CLAW_DBF_TEXT(4, trace, "addreads"); 1045 - privptr = dev->priv; 1046 p_end = privptr->p_end_ccw; 1047 1048 /* first CCW and last CCW contains a new set of read channel programs ··· 1212 int rc=0; 1213 1214 CLAW_DBF_TEXT(2, setup, "findlink"); 1215 - privptr=dev->priv; 1216 p_env=privptr->p_env; 1217 switch (p_env->packing) 1218 { ··· 1264 struct chbk *ch; 1265 1266 CLAW_DBF_TEXT(4, trace, "hw_tx"); 1267 - privptr = (struct claw_privbk *) (dev->priv); 1268 p_ch=(struct chbk *)&privptr->channel[WRITE]; 1269 p_env =privptr->p_env; 1270 claw_free_wrt_buf(dev); /* Clean up free chain if posible */ ··· 1483 struct ccwbk*p_last_CCWB; 1484 struct ccwbk*p_first_CCWB; 1485 struct endccw *p_endccw=NULL; 1486 - addr_t real_address; 1487 - struct claw_privbk *privptr=dev->priv; 1488 struct clawh *pClawH=NULL; 1489 addr_t real_TIC_address; 1490 int i,j; ··· 1960 static void 1961 probe_error( struct ccwgroup_device *cgdev) 1962 { 1963 - struct claw_privbk *privptr; 1964 1965 CLAW_DBF_TEXT(4, trace, "proberr"); 1966 - privptr=(struct claw_privbk *)cgdev->dev.driver_data; 1967 - if (privptr!=NULL) { 1968 kfree(privptr->p_env); 1969 - privptr->p_env=NULL; 1970 - kfree(privptr->p_mtc_envelope); 1971 - privptr->p_mtc_envelope=NULL; 1972 - kfree(privptr); 1973 - privptr=NULL; 1974 - } 1975 - return; 1976 } /* probe_error */ 1977 1978 /*-------------------------------------------------------------------* ··· 1997 CLAW_DBF_TEXT(2, setup, "clw_cntl"); 1998 udelay(1000); /* Wait a ms for the control packets to 1999 *catch up to each other */ 2000 - privptr=dev->priv; 2001 p_env=privptr->p_env; 2002 tdev = &privptr->channel[READ].cdev->dev; 2003 memcpy( &temp_host_name, p_env->host_name, 8); ··· 2275 struct sk_buff *skb; 2276 2277 CLAW_DBF_TEXT(2, setup, "sndcntl"); 2278 - privptr=dev->priv; 2279 p_ctl=(struct clawctl *)&privptr->ctl_bk; 2280 2281 p_ctl->command=type; ··· 2345 claw_snd_conn_req(struct net_device *dev, __u8 link) 2346 { 2347 int rc; 2348 - struct claw_privbk *privptr=dev->priv; 2349 struct clawctl *p_ctl; 2350 2351 CLAW_DBF_TEXT(2, setup, "snd_conn"); ··· 2405 int rc; 2406 2407 CLAW_DBF_TEXT(2, setup, "chkresp"); 2408 - privptr = dev->priv; 2409 p_env=privptr->p_env; 2410 rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE, 2411 p_ctl->linkid, ··· 2443 struct claw_privbk *privptr; 2444 2445 CLAW_DBF_TEXT(4, trace, "stats"); 2446 - privptr = dev->priv; 2447 return &privptr->stats; 2448 } /* end of claw_stats */ 2449 ··· 2479 p_last_ccw=NULL; 2480 p_packh=NULL; 2481 p_packd=NULL; 2482 - privptr=dev->priv; 2483 2484 p_dev = &privptr->channel[READ].cdev->dev; 2485 p_env = privptr->p_env; ··· 2648 int rc = 0; 2649 __u32 parm; 2650 unsigned long saveflags = 0; 2651 - struct claw_privbk *privptr=dev->priv; 2652 struct ccwbk*p_ccwbk; 2653 struct chbk *p_ch; 2654 struct clawh *p_clawh; ··· 2705 if (!dev) { 2706 return; 2707 } 2708 - privptr=(struct claw_privbk *)dev->priv; 2709 p_ch=&privptr->channel[WRITE]; 2710 2711 CLAW_DBF_TEXT(4, trace, "strt_io"); ··· 2738 claw_free_wrt_buf( struct net_device *dev ) 2739 { 2740 2741 - struct claw_privbk *privptr=(struct claw_privbk *)dev->priv; 2742 struct ccwbk*p_first_ccw; 2743 struct ccwbk*p_last_ccw; 2744 struct ccwbk*p_this_ccw; ··· 2795 if (!dev) 2796 return; 2797 CLAW_DBF_TEXT_(2, setup, "%s", dev->name); 2798 - privptr = dev->priv; 2799 if (dev->flags & IFF_RUNNING) 2800 claw_release(dev); 2801 if (privptr) { 2802 privptr->channel[READ].ndev = NULL; /* say it's free */ 2803 } 2804 - dev->priv=NULL; 2805 #ifdef MODULE 2806 if (free_dev) { 2807 free_netdev(dev); ··· 2918 printk(KERN_WARNING "%s:alloc_netdev failed\n",__func__); 2919 goto out; 2920 } 2921 - dev->priv = privptr; 2922 cgdev->dev.driver_data = privptr; 2923 cgdev->cdev[READ]->dev.driver_data = privptr; 2924 cgdev->cdev[WRITE]->dev.driver_data = privptr; ··· 2999 ret = claw_release(ndev); 3000 ndev->flags &=~IFF_RUNNING; 3001 unregister_netdev(ndev); 3002 - ndev->priv = NULL; /* cgdev data, not ndev's to free */ 3003 claw_free_netdevice(ndev, 1); 3004 priv->channel[READ].ndev = NULL; 3005 priv->channel[WRITE].ndev = NULL;
··· 133 static inline void 134 claw_set_busy(struct net_device *dev) 135 { 136 + ((struct claw_privbk *)dev->ml_priv)->tbusy = 1; 137 eieio(); 138 } 139 140 static inline void 141 claw_clear_busy(struct net_device *dev) 142 { 143 + clear_bit(0, &(((struct claw_privbk *) dev->ml_priv)->tbusy)); 144 netif_wake_queue(dev); 145 eieio(); 146 } ··· 149 claw_check_busy(struct net_device *dev) 150 { 151 eieio(); 152 + return ((struct claw_privbk *) dev->ml_priv)->tbusy; 153 } 154 155 static inline void 156 claw_setbit_busy(int nr,struct net_device *dev) 157 { 158 netif_stop_queue(dev); 159 + set_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy)); 160 } 161 162 static inline void 163 claw_clearbit_busy(int nr,struct net_device *dev) 164 { 165 + clear_bit(nr, (void *)&(((struct claw_privbk *)dev->ml_priv)->tbusy)); 166 netif_wake_queue(dev); 167 } 168 ··· 171 { 172 netif_stop_queue(dev); 173 return test_and_set_bit(nr, 174 + (void *)&(((struct claw_privbk *) dev->ml_priv)->tbusy)); 175 } 176 177 ··· 271 if (!get_device(&cgdev->dev)) 272 return -ENODEV; 273 privptr = kzalloc(sizeof(struct claw_privbk), GFP_KERNEL); 274 + cgdev->dev.driver_data = privptr; 275 if (privptr == NULL) { 276 probe_error(cgdev); 277 put_device(&cgdev->dev); ··· 305 privptr->p_env->p_priv = privptr; 306 cgdev->cdev[0]->handler = claw_irq_handler; 307 cgdev->cdev[1]->handler = claw_irq_handler; 308 CLAW_DBF_TEXT(2, setup, "prbext 0"); 309 310 return 0; ··· 319 claw_tx(struct sk_buff *skb, struct net_device *dev) 320 { 321 int rc; 322 + struct claw_privbk *privptr = dev->ml_priv; 323 unsigned long saveflags; 324 struct chbk *p_ch; 325 ··· 404 static int 405 claw_change_mtu(struct net_device *dev, int new_mtu) 406 { 407 + struct claw_privbk *privptr = dev->ml_priv; 408 int buff_size; 409 CLAW_DBF_TEXT(4, trace, "setmtu"); 410 buff_size = privptr->p_env->write_size; ··· 434 struct ccwbk *p_buf; 435 436 CLAW_DBF_TEXT(4, trace, "open"); 437 + privptr = (struct claw_privbk *)dev->ml_priv; 438 /* allocate and initialize CCW blocks */ 439 if (privptr->buffs_alloc == 0) { 440 rc=init_ccw_bk(dev); ··· 780 p_ch = (struct chbk *) data; 781 dev = (struct net_device *)p_ch->ndev; 782 CLAW_DBF_TEXT(4, trace, "IRQtask"); 783 + privptr = (struct claw_privbk *)dev->ml_priv; 784 unpack_read(dev); 785 clear_bit(CLAW_BH_ACTIVE, (void *)&p_ch->flag_a); 786 CLAW_DBF_TEXT(4, trace, "TskletXt"); ··· 805 806 if (!dev) 807 return 0; 808 + privptr = (struct claw_privbk *)dev->ml_priv; 809 if (!privptr) 810 return 0; 811 CLAW_DBF_TEXT(4, trace, "release"); ··· 960 if (p_ch->claw_state == CLAW_STOP) 961 return; 962 dev = (struct net_device *) p_ch->ndev; 963 + privptr = (struct claw_privbk *) dev->ml_priv; 964 claw_free_wrt_buf( dev ); 965 if ((privptr->write_free_count > 0) && 966 !skb_queue_empty(&p_ch->collect_queue)) { ··· 1042 struct ccw1 temp_ccw; 1043 struct endccw * p_end; 1044 CLAW_DBF_TEXT(4, trace, "addreads"); 1045 + privptr = dev->ml_priv; 1046 p_end = privptr->p_end_ccw; 1047 1048 /* first CCW and last CCW contains a new set of read channel programs ··· 1212 int rc=0; 1213 1214 CLAW_DBF_TEXT(2, setup, "findlink"); 1215 + privptr = dev->ml_priv; 1216 p_env=privptr->p_env; 1217 switch (p_env->packing) 1218 { ··· 1264 struct chbk *ch; 1265 1266 CLAW_DBF_TEXT(4, trace, "hw_tx"); 1267 + privptr = (struct claw_privbk *)(dev->ml_priv); 1268 p_ch=(struct chbk *)&privptr->channel[WRITE]; 1269 p_env =privptr->p_env; 1270 claw_free_wrt_buf(dev); /* Clean up free chain if posible */ ··· 1483 struct ccwbk*p_last_CCWB; 1484 struct ccwbk*p_first_CCWB; 1485 struct endccw *p_endccw=NULL; 1486 + addr_t real_address; 1487 + struct claw_privbk *privptr = dev->ml_priv; 1488 struct clawh *pClawH=NULL; 1489 addr_t real_TIC_address; 1490 int i,j; ··· 1960 static void 1961 probe_error( struct ccwgroup_device *cgdev) 1962 { 1963 + struct claw_privbk *privptr; 1964 1965 CLAW_DBF_TEXT(4, trace, "proberr"); 1966 + privptr = (struct claw_privbk *) cgdev->dev.driver_data; 1967 + if (privptr != NULL) { 1968 + cgdev->dev.driver_data = NULL; 1969 kfree(privptr->p_env); 1970 + kfree(privptr->p_mtc_envelope); 1971 + kfree(privptr); 1972 + } 1973 } /* probe_error */ 1974 1975 /*-------------------------------------------------------------------* ··· 2000 CLAW_DBF_TEXT(2, setup, "clw_cntl"); 2001 udelay(1000); /* Wait a ms for the control packets to 2002 *catch up to each other */ 2003 + privptr = dev->ml_priv; 2004 p_env=privptr->p_env; 2005 tdev = &privptr->channel[READ].cdev->dev; 2006 memcpy( &temp_host_name, p_env->host_name, 8); ··· 2278 struct sk_buff *skb; 2279 2280 CLAW_DBF_TEXT(2, setup, "sndcntl"); 2281 + privptr = dev->ml_priv; 2282 p_ctl=(struct clawctl *)&privptr->ctl_bk; 2283 2284 p_ctl->command=type; ··· 2348 claw_snd_conn_req(struct net_device *dev, __u8 link) 2349 { 2350 int rc; 2351 + struct claw_privbk *privptr = dev->ml_priv; 2352 struct clawctl *p_ctl; 2353 2354 CLAW_DBF_TEXT(2, setup, "snd_conn"); ··· 2408 int rc; 2409 2410 CLAW_DBF_TEXT(2, setup, "chkresp"); 2411 + privptr = dev->ml_priv; 2412 p_env=privptr->p_env; 2413 rc=claw_send_control(dev, SYSTEM_VALIDATE_RESPONSE, 2414 p_ctl->linkid, ··· 2446 struct claw_privbk *privptr; 2447 2448 CLAW_DBF_TEXT(4, trace, "stats"); 2449 + privptr = dev->ml_priv; 2450 return &privptr->stats; 2451 } /* end of claw_stats */ 2452 ··· 2482 p_last_ccw=NULL; 2483 p_packh=NULL; 2484 p_packd=NULL; 2485 + privptr = dev->ml_priv; 2486 2487 p_dev = &privptr->channel[READ].cdev->dev; 2488 p_env = privptr->p_env; ··· 2651 int rc = 0; 2652 __u32 parm; 2653 unsigned long saveflags = 0; 2654 + struct claw_privbk *privptr = dev->ml_priv; 2655 struct ccwbk*p_ccwbk; 2656 struct chbk *p_ch; 2657 struct clawh *p_clawh; ··· 2708 if (!dev) { 2709 return; 2710 } 2711 + privptr = (struct claw_privbk *)dev->ml_priv; 2712 p_ch=&privptr->channel[WRITE]; 2713 2714 CLAW_DBF_TEXT(4, trace, "strt_io"); ··· 2741 claw_free_wrt_buf( struct net_device *dev ) 2742 { 2743 2744 + struct claw_privbk *privptr = (struct claw_privbk *)dev->ml_priv; 2745 struct ccwbk*p_first_ccw; 2746 struct ccwbk*p_last_ccw; 2747 struct ccwbk*p_this_ccw; ··· 2798 if (!dev) 2799 return; 2800 CLAW_DBF_TEXT_(2, setup, "%s", dev->name); 2801 + privptr = dev->ml_priv; 2802 if (dev->flags & IFF_RUNNING) 2803 claw_release(dev); 2804 if (privptr) { 2805 privptr->channel[READ].ndev = NULL; /* say it's free */ 2806 } 2807 + dev->ml_priv = NULL; 2808 #ifdef MODULE 2809 if (free_dev) { 2810 free_netdev(dev); ··· 2921 printk(KERN_WARNING "%s:alloc_netdev failed\n",__func__); 2922 goto out; 2923 } 2924 + dev->ml_priv = privptr; 2925 cgdev->dev.driver_data = privptr; 2926 cgdev->cdev[READ]->dev.driver_data = privptr; 2927 cgdev->cdev[WRITE]->dev.driver_data = privptr; ··· 3002 ret = claw_release(ndev); 3003 ndev->flags &=~IFF_RUNNING; 3004 unregister_netdev(ndev); 3005 + ndev->ml_priv = NULL; /* cgdev data, not ndev's to free */ 3006 claw_free_netdevice(ndev, 1); 3007 priv->channel[READ].ndev = NULL; 3008 priv->channel[WRITE].ndev = NULL;
+28 -28
drivers/s390/net/ctcm_fsms.c
··· 245 { 246 struct channel *ch = arg; 247 struct net_device *dev = ch->netdev; 248 - struct ctcm_priv *priv = dev->priv; 249 struct sk_buff *skb; 250 int first = 1; 251 int i; ··· 336 { 337 struct channel *ch = arg; 338 struct net_device *dev = ch->netdev; 339 - struct ctcm_priv *priv = dev->priv; 340 341 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); 342 ··· 357 { 358 struct channel *ch = arg; 359 struct net_device *dev = ch->netdev; 360 - struct ctcm_priv *priv = dev->priv; 361 int len = ch->max_bufsize - ch->irb->scsw.cmd.count; 362 struct sk_buff *skb = ch->trans_skb; 363 __u16 block_len = *((__u16 *)skb->data); ··· 459 chx_rxidle(fi, event, arg); 460 } else { 461 struct net_device *dev = ch->netdev; 462 - struct ctcm_priv *priv = dev->priv; 463 fsm_newstate(fi, CTC_STATE_TXIDLE); 464 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); 465 } ··· 496 if ((CHANNEL_DIRECTION(ch->flags) == READ) && 497 (ch->protocol == CTCM_PROTO_S390)) { 498 struct net_device *dev = ch->netdev; 499 - struct ctcm_priv *priv = dev->priv; 500 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 501 } 502 } ··· 514 { 515 struct channel *ch = arg; 516 struct net_device *dev = ch->netdev; 517 - struct ctcm_priv *priv = dev->priv; 518 __u16 buflen; 519 int rc; 520 ··· 699 struct channel *ch) 700 { 701 struct net_device *dev = ch->netdev; 702 - struct ctcm_priv *priv = dev->priv; 703 704 CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, 705 "%s(%s): %s[%d]\n", ··· 784 { 785 struct channel *ch = arg; 786 struct net_device *dev = ch->netdev; 787 - struct ctcm_priv *priv = dev->priv; 788 789 /* 790 * Special case: Got UC_RCRESET on setmode. ··· 874 { 875 struct channel *ch = arg; 876 struct net_device *dev = ch->netdev; 877 - struct ctcm_priv *priv = dev->priv; 878 879 if (event == CTC_EVENT_TIMER) { 880 if (!IS_MPCDEV(dev)) ··· 902 { 903 struct channel *ch = arg; 904 struct net_device *dev = ch->netdev; 905 - struct ctcm_priv *priv = dev->priv; 906 907 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 908 "%s(%s): RX %s busy, init. fail", ··· 923 struct channel *ch = arg; 924 struct channel *ch2; 925 struct net_device *dev = ch->netdev; 926 - struct ctcm_priv *priv = dev->priv; 927 928 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 929 "%s: %s: remote disconnect - re-init ...", ··· 954 { 955 struct channel *ch = arg; 956 struct net_device *dev = ch->netdev; 957 - struct ctcm_priv *priv = dev->priv; 958 959 if (event == CTC_EVENT_TIMER) { 960 fsm_deltimer(&ch->timer); ··· 984 { 985 struct channel *ch = arg; 986 struct net_device *dev = ch->netdev; 987 - struct ctcm_priv *priv = dev->priv; 988 struct sk_buff *skb; 989 990 CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n", ··· 1057 { 1058 struct channel *ch = arg; 1059 struct net_device *dev = ch->netdev; 1060 - struct ctcm_priv *priv = dev->priv; 1061 int rd = CHANNEL_DIRECTION(ch->flags); 1062 1063 fsm_deltimer(&ch->timer); ··· 1207 { 1208 struct channel *ch = arg; 1209 struct net_device *dev = ch->netdev; 1210 - struct ctcm_priv *priv = dev->priv; 1211 struct mpc_group *grp = priv->mpcg; 1212 struct sk_buff *skb; 1213 int first = 1; ··· 1368 { 1369 struct channel *ch = arg; 1370 struct net_device *dev = ch->netdev; 1371 - struct ctcm_priv *priv = dev->priv; 1372 struct mpc_group *grp = priv->mpcg; 1373 struct sk_buff *skb = ch->trans_skb; 1374 struct sk_buff *new_skb; ··· 1471 { 1472 struct channel *ch = arg; 1473 struct net_device *dev = ch->netdev; 1474 - struct ctcm_priv *priv = dev->priv; 1475 struct mpc_group *gptr = priv->mpcg; 1476 1477 CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n", ··· 1525 { 1526 struct channel *ch = arg; 1527 struct net_device *dev = ch->netdev; 1528 - struct ctcm_priv *priv = dev->priv; 1529 struct mpc_group *grp = priv->mpcg; 1530 int rc; 1531 unsigned long saveflags = 0; /* avoids compiler warning */ ··· 1580 { 1581 struct channel *ch = arg; 1582 struct net_device *dev = ch->netdev; 1583 - struct ctcm_priv *priv = dev->priv; 1584 struct mpc_group *grp = priv->mpcg; 1585 1586 CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n", ··· 1639 { 1640 struct channel *ch = arg; 1641 struct net_device *dev = ch->netdev; 1642 - struct ctcm_priv *priv = dev->priv; 1643 struct mpc_group *grp = priv->mpcg; 1644 1645 CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n", ··· 1724 { 1725 struct channel *ch = arg; 1726 struct net_device *dev = ch->netdev; 1727 - struct ctcm_priv *priv = dev->priv; 1728 struct mpc_group *grp = priv->mpcg; 1729 1730 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); ··· 1740 { 1741 struct channel *ach = arg; 1742 struct net_device *dev = ach->netdev; 1743 - struct ctcm_priv *priv = dev->priv; 1744 struct mpc_group *grp = priv->mpcg; 1745 struct channel *wch = priv->channel[WRITE]; 1746 struct channel *rch = priv->channel[READ]; ··· 2050 static void dev_action_start(fsm_instance *fi, int event, void *arg) 2051 { 2052 struct net_device *dev = arg; 2053 - struct ctcm_priv *priv = dev->priv; 2054 int direction; 2055 2056 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); ··· 2076 { 2077 int direction; 2078 struct net_device *dev = arg; 2079 - struct ctcm_priv *priv = dev->priv; 2080 2081 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2082 ··· 2096 { 2097 int restart_timer; 2098 struct net_device *dev = arg; 2099 - struct ctcm_priv *priv = dev->priv; 2100 2101 CTCMY_DBF_DEV_NAME(TRACE, dev, ""); 2102 ··· 2133 static void dev_action_chup(fsm_instance *fi, int event, void *arg) 2134 { 2135 struct net_device *dev = arg; 2136 - struct ctcm_priv *priv = dev->priv; 2137 int dev_stat = fsm_getstate(fi); 2138 2139 CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, 2140 "%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL, 2141 - dev->name, dev->priv, dev_stat, event); 2142 2143 switch (fsm_getstate(fi)) { 2144 case DEV_STATE_STARTWAIT_RXTX: ··· 2195 { 2196 2197 struct net_device *dev = arg; 2198 - struct ctcm_priv *priv = dev->priv; 2199 2200 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2201
··· 245 { 246 struct channel *ch = arg; 247 struct net_device *dev = ch->netdev; 248 + struct ctcm_priv *priv = dev->ml_priv; 249 struct sk_buff *skb; 250 int first = 1; 251 int i; ··· 336 { 337 struct channel *ch = arg; 338 struct net_device *dev = ch->netdev; 339 + struct ctcm_priv *priv = dev->ml_priv; 340 341 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); 342 ··· 357 { 358 struct channel *ch = arg; 359 struct net_device *dev = ch->netdev; 360 + struct ctcm_priv *priv = dev->ml_priv; 361 int len = ch->max_bufsize - ch->irb->scsw.cmd.count; 362 struct sk_buff *skb = ch->trans_skb; 363 __u16 block_len = *((__u16 *)skb->data); ··· 459 chx_rxidle(fi, event, arg); 460 } else { 461 struct net_device *dev = ch->netdev; 462 + struct ctcm_priv *priv = dev->ml_priv; 463 fsm_newstate(fi, CTC_STATE_TXIDLE); 464 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); 465 } ··· 496 if ((CHANNEL_DIRECTION(ch->flags) == READ) && 497 (ch->protocol == CTCM_PROTO_S390)) { 498 struct net_device *dev = ch->netdev; 499 + struct ctcm_priv *priv = dev->ml_priv; 500 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 501 } 502 } ··· 514 { 515 struct channel *ch = arg; 516 struct net_device *dev = ch->netdev; 517 + struct ctcm_priv *priv = dev->ml_priv; 518 __u16 buflen; 519 int rc; 520 ··· 699 struct channel *ch) 700 { 701 struct net_device *dev = ch->netdev; 702 + struct ctcm_priv *priv = dev->ml_priv; 703 704 CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, 705 "%s(%s): %s[%d]\n", ··· 784 { 785 struct channel *ch = arg; 786 struct net_device *dev = ch->netdev; 787 + struct ctcm_priv *priv = dev->ml_priv; 788 789 /* 790 * Special case: Got UC_RCRESET on setmode. ··· 874 { 875 struct channel *ch = arg; 876 struct net_device *dev = ch->netdev; 877 + struct ctcm_priv *priv = dev->ml_priv; 878 879 if (event == CTC_EVENT_TIMER) { 880 if (!IS_MPCDEV(dev)) ··· 902 { 903 struct channel *ch = arg; 904 struct net_device *dev = ch->netdev; 905 + struct ctcm_priv *priv = dev->ml_priv; 906 907 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 908 "%s(%s): RX %s busy, init. fail", ··· 923 struct channel *ch = arg; 924 struct channel *ch2; 925 struct net_device *dev = ch->netdev; 926 + struct ctcm_priv *priv = dev->ml_priv; 927 928 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 929 "%s: %s: remote disconnect - re-init ...", ··· 954 { 955 struct channel *ch = arg; 956 struct net_device *dev = ch->netdev; 957 + struct ctcm_priv *priv = dev->ml_priv; 958 959 if (event == CTC_EVENT_TIMER) { 960 fsm_deltimer(&ch->timer); ··· 984 { 985 struct channel *ch = arg; 986 struct net_device *dev = ch->netdev; 987 + struct ctcm_priv *priv = dev->ml_priv; 988 struct sk_buff *skb; 989 990 CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n", ··· 1057 { 1058 struct channel *ch = arg; 1059 struct net_device *dev = ch->netdev; 1060 + struct ctcm_priv *priv = dev->ml_priv; 1061 int rd = CHANNEL_DIRECTION(ch->flags); 1062 1063 fsm_deltimer(&ch->timer); ··· 1207 { 1208 struct channel *ch = arg; 1209 struct net_device *dev = ch->netdev; 1210 + struct ctcm_priv *priv = dev->ml_priv; 1211 struct mpc_group *grp = priv->mpcg; 1212 struct sk_buff *skb; 1213 int first = 1; ··· 1368 { 1369 struct channel *ch = arg; 1370 struct net_device *dev = ch->netdev; 1371 + struct ctcm_priv *priv = dev->ml_priv; 1372 struct mpc_group *grp = priv->mpcg; 1373 struct sk_buff *skb = ch->trans_skb; 1374 struct sk_buff *new_skb; ··· 1471 { 1472 struct channel *ch = arg; 1473 struct net_device *dev = ch->netdev; 1474 + struct ctcm_priv *priv = dev->ml_priv; 1475 struct mpc_group *gptr = priv->mpcg; 1476 1477 CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n", ··· 1525 { 1526 struct channel *ch = arg; 1527 struct net_device *dev = ch->netdev; 1528 + struct ctcm_priv *priv = dev->ml_priv; 1529 struct mpc_group *grp = priv->mpcg; 1530 int rc; 1531 unsigned long saveflags = 0; /* avoids compiler warning */ ··· 1580 { 1581 struct channel *ch = arg; 1582 struct net_device *dev = ch->netdev; 1583 + struct ctcm_priv *priv = dev->ml_priv; 1584 struct mpc_group *grp = priv->mpcg; 1585 1586 CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n", ··· 1639 { 1640 struct channel *ch = arg; 1641 struct net_device *dev = ch->netdev; 1642 + struct ctcm_priv *priv = dev->ml_priv; 1643 struct mpc_group *grp = priv->mpcg; 1644 1645 CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n", ··· 1724 { 1725 struct channel *ch = arg; 1726 struct net_device *dev = ch->netdev; 1727 + struct ctcm_priv *priv = dev->ml_priv; 1728 struct mpc_group *grp = priv->mpcg; 1729 1730 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); ··· 1740 { 1741 struct channel *ach = arg; 1742 struct net_device *dev = ach->netdev; 1743 + struct ctcm_priv *priv = dev->ml_priv; 1744 struct mpc_group *grp = priv->mpcg; 1745 struct channel *wch = priv->channel[WRITE]; 1746 struct channel *rch = priv->channel[READ]; ··· 2050 static void dev_action_start(fsm_instance *fi, int event, void *arg) 2051 { 2052 struct net_device *dev = arg; 2053 + struct ctcm_priv *priv = dev->ml_priv; 2054 int direction; 2055 2056 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); ··· 2076 { 2077 int direction; 2078 struct net_device *dev = arg; 2079 + struct ctcm_priv *priv = dev->ml_priv; 2080 2081 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2082 ··· 2096 { 2097 int restart_timer; 2098 struct net_device *dev = arg; 2099 + struct ctcm_priv *priv = dev->ml_priv; 2100 2101 CTCMY_DBF_DEV_NAME(TRACE, dev, ""); 2102 ··· 2133 static void dev_action_chup(fsm_instance *fi, int event, void *arg) 2134 { 2135 struct net_device *dev = arg; 2136 + struct ctcm_priv *priv = dev->ml_priv; 2137 int dev_stat = fsm_getstate(fi); 2138 2139 CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, 2140 "%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL, 2141 + dev->name, dev->ml_priv, dev_stat, event); 2142 2143 switch (fsm_getstate(fi)) { 2144 case DEV_STATE_STARTWAIT_RXTX: ··· 2195 { 2196 2197 struct net_device *dev = arg; 2198 + struct ctcm_priv *priv = dev->ml_priv; 2199 2200 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2201
+12 -12
drivers/s390/net/ctcm_main.c
··· 69 void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) 70 { 71 struct net_device *dev = ch->netdev; 72 - struct ctcm_priv *priv = dev->priv; 73 __u16 len = *((__u16 *) pskb->data); 74 75 skb_put(pskb, 2 + LL_HEADER_LENGTH); ··· 414 */ 415 int ctcm_open(struct net_device *dev) 416 { 417 - struct ctcm_priv *priv = dev->priv; 418 419 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 420 if (!IS_MPC(priv)) ··· 432 */ 433 int ctcm_close(struct net_device *dev) 434 { 435 - struct ctcm_priv *priv = dev->priv; 436 437 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 438 if (!IS_MPC(priv)) ··· 573 skb_pull(skb, LL_HEADER_LENGTH + 2); 574 } else if (ccw_idx == 0) { 575 struct net_device *dev = ch->netdev; 576 - struct ctcm_priv *priv = dev->priv; 577 priv->stats.tx_packets++; 578 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; 579 } ··· 592 struct channel *ch; 593 /* int rc = 0; */ 594 595 - priv = dev->priv; 596 grp = priv->mpcg; 597 ch = priv->channel[WRITE]; 598 ··· 652 { 653 struct pdu *p_header; 654 struct net_device *dev = ch->netdev; 655 - struct ctcm_priv *priv = dev->priv; 656 struct mpc_group *grp = priv->mpcg; 657 struct th_header *header; 658 struct sk_buff *nskb; ··· 867 /* first merge version - leaving both functions separated */ 868 static int ctcm_tx(struct sk_buff *skb, struct net_device *dev) 869 { 870 - struct ctcm_priv *priv = dev->priv; 871 872 if (skb == NULL) { 873 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, ··· 911 static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) 912 { 913 int len = 0; 914 - struct ctcm_priv *priv = dev->priv; 915 struct mpc_group *grp = priv->mpcg; 916 struct sk_buff *newskb = NULL; 917 ··· 1025 if (new_mtu < 576 || new_mtu > 65527) 1026 return -EINVAL; 1027 1028 - priv = dev->priv; 1029 max_bufsize = priv->channel[READ]->max_bufsize; 1030 1031 if (IS_MPC(priv)) { ··· 1050 */ 1051 static struct net_device_stats *ctcm_stats(struct net_device *dev) 1052 { 1053 - return &((struct ctcm_priv *)dev->priv)->stats; 1054 } 1055 1056 static void ctcm_free_netdevice(struct net_device *dev) ··· 1060 1061 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, 1062 "%s(%s)", CTCM_FUNTAIL, dev->name); 1063 - priv = dev->priv; 1064 if (priv) { 1065 grp = priv->mpcg; 1066 if (grp) { ··· 1125 CTCM_FUNTAIL); 1126 return NULL; 1127 } 1128 - dev->priv = priv; 1129 priv->fsm = init_fsm("ctcmdev", dev_state_names, dev_event_names, 1130 CTCM_NR_DEV_STATES, CTCM_NR_DEV_EVENTS, 1131 dev_fsm, dev_fsm_len, GFP_KERNEL);
··· 69 void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) 70 { 71 struct net_device *dev = ch->netdev; 72 + struct ctcm_priv *priv = dev->ml_priv; 73 __u16 len = *((__u16 *) pskb->data); 74 75 skb_put(pskb, 2 + LL_HEADER_LENGTH); ··· 414 */ 415 int ctcm_open(struct net_device *dev) 416 { 417 + struct ctcm_priv *priv = dev->ml_priv; 418 419 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 420 if (!IS_MPC(priv)) ··· 432 */ 433 int ctcm_close(struct net_device *dev) 434 { 435 + struct ctcm_priv *priv = dev->ml_priv; 436 437 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 438 if (!IS_MPC(priv)) ··· 573 skb_pull(skb, LL_HEADER_LENGTH + 2); 574 } else if (ccw_idx == 0) { 575 struct net_device *dev = ch->netdev; 576 + struct ctcm_priv *priv = dev->ml_priv; 577 priv->stats.tx_packets++; 578 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; 579 } ··· 592 struct channel *ch; 593 /* int rc = 0; */ 594 595 + priv = dev->ml_priv; 596 grp = priv->mpcg; 597 ch = priv->channel[WRITE]; 598 ··· 652 { 653 struct pdu *p_header; 654 struct net_device *dev = ch->netdev; 655 + struct ctcm_priv *priv = dev->ml_priv; 656 struct mpc_group *grp = priv->mpcg; 657 struct th_header *header; 658 struct sk_buff *nskb; ··· 867 /* first merge version - leaving both functions separated */ 868 static int ctcm_tx(struct sk_buff *skb, struct net_device *dev) 869 { 870 + struct ctcm_priv *priv = dev->ml_priv; 871 872 if (skb == NULL) { 873 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, ··· 911 static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) 912 { 913 int len = 0; 914 + struct ctcm_priv *priv = dev->ml_priv; 915 struct mpc_group *grp = priv->mpcg; 916 struct sk_buff *newskb = NULL; 917 ··· 1025 if (new_mtu < 576 || new_mtu > 65527) 1026 return -EINVAL; 1027 1028 + priv = dev->ml_priv; 1029 max_bufsize = priv->channel[READ]->max_bufsize; 1030 1031 if (IS_MPC(priv)) { ··· 1050 */ 1051 static struct net_device_stats *ctcm_stats(struct net_device *dev) 1052 { 1053 + return &((struct ctcm_priv *)dev->ml_priv)->stats; 1054 } 1055 1056 static void ctcm_free_netdevice(struct net_device *dev) ··· 1060 1061 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, 1062 "%s(%s)", CTCM_FUNTAIL, dev->name); 1063 + priv = dev->ml_priv; 1064 if (priv) { 1065 grp = priv->mpcg; 1066 if (grp) { ··· 1125 CTCM_FUNTAIL); 1126 return NULL; 1127 } 1128 + dev->ml_priv = priv; 1129 priv->fsm = init_fsm("ctcmdev", dev_state_names, dev_event_names, 1130 CTCM_NR_DEV_STATES, CTCM_NR_DEV_EVENTS, 1131 dev_fsm, dev_fsm_len, GFP_KERNEL);
+5 -4
drivers/s390/net/ctcm_main.h
··· 229 */ 230 static inline void ctcm_clear_busy_do(struct net_device *dev) 231 { 232 - clear_bit(0, &(((struct ctcm_priv *)dev->priv)->tbusy)); 233 netif_wake_queue(dev); 234 } 235 236 static inline void ctcm_clear_busy(struct net_device *dev) 237 { 238 struct mpc_group *grp; 239 - grp = ((struct ctcm_priv *)dev->priv)->mpcg; 240 241 if (!(grp && grp->in_sweep)) 242 ctcm_clear_busy_do(dev); ··· 246 static inline int ctcm_test_and_set_busy(struct net_device *dev) 247 { 248 netif_stop_queue(dev); 249 - return test_and_set_bit(0, &(((struct ctcm_priv *)dev->priv)->tbusy)); 250 } 251 252 extern int loglevel; ··· 293 #define IS_MPC(p) ((p)->protocol == CTCM_PROTO_MPC) 294 295 /* test if struct ctcm_priv of struct net_device has MPC protocol setting */ 296 - #define IS_MPCDEV(d) IS_MPC((struct ctcm_priv *)d->priv) 297 298 static inline gfp_t gfp_type(void) 299 {
··· 229 */ 230 static inline void ctcm_clear_busy_do(struct net_device *dev) 231 { 232 + clear_bit(0, &(((struct ctcm_priv *)dev->ml_priv)->tbusy)); 233 netif_wake_queue(dev); 234 } 235 236 static inline void ctcm_clear_busy(struct net_device *dev) 237 { 238 struct mpc_group *grp; 239 + grp = ((struct ctcm_priv *)dev->ml_priv)->mpcg; 240 241 if (!(grp && grp->in_sweep)) 242 ctcm_clear_busy_do(dev); ··· 246 static inline int ctcm_test_and_set_busy(struct net_device *dev) 247 { 248 netif_stop_queue(dev); 249 + return test_and_set_bit(0, 250 + &(((struct ctcm_priv *)dev->ml_priv)->tbusy)); 251 } 252 253 extern int loglevel; ··· 292 #define IS_MPC(p) ((p)->protocol == CTCM_PROTO_MPC) 293 294 /* test if struct ctcm_priv of struct net_device has MPC protocol setting */ 295 + #define IS_MPCDEV(dev) IS_MPC((struct ctcm_priv *)dev->ml_priv) 296 297 static inline gfp_t gfp_type(void) 298 {
+23 -23
drivers/s390/net/ctcm_mpc.c
··· 312 CTCM_FUNTAIL, device); 313 return NULL; 314 } 315 - priv = dev->priv; 316 if (priv == NULL) { 317 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 318 - "%s(%s): dev->priv is NULL", 319 CTCM_FUNTAIL, device); 320 return NULL; 321 } ··· 344 dev = ctcmpc_get_dev(port_num); 345 if (dev == NULL) 346 return 1; 347 - priv = dev->priv; 348 grp = priv->mpcg; 349 350 grp->allochanfunc = callback; ··· 416 dev = ctcmpc_get_dev(port_num); 417 if (dev == NULL) 418 return; 419 - priv = dev->priv; 420 grp = priv->mpcg; 421 rch = priv->channel[READ]; 422 wch = priv->channel[WRITE]; ··· 534 dev = ctcmpc_get_dev(port_num); 535 if (dev == NULL) 536 return; 537 - priv = dev->priv; 538 grp = priv->mpcg; 539 540 CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG, ··· 570 dev = ctcmpc_get_dev(port_num); 571 if (dev == NULL) 572 return; 573 - priv = dev->priv; 574 grp = priv->mpcg; 575 576 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, ··· 619 { 620 struct channel *rch = mpcginfo->ch; 621 struct net_device *dev = rch->netdev; 622 - struct ctcm_priv *priv = dev->priv; 623 struct mpc_group *grp = priv->mpcg; 624 struct channel *ch = priv->channel[WRITE]; 625 ··· 650 static void ctcmpc_send_sweep_resp(struct channel *rch) 651 { 652 struct net_device *dev = rch->netdev; 653 - struct ctcm_priv *priv = dev->priv; 654 struct mpc_group *grp = priv->mpcg; 655 int rc = 0; 656 struct th_sweep *header; ··· 712 { 713 struct channel *rch = mpcginfo->ch; 714 struct net_device *dev = rch->netdev; 715 - struct ctcm_priv *priv = dev->priv; 716 struct mpc_group *grp = priv->mpcg; 717 struct channel *ch = priv->channel[WRITE]; 718 ··· 846 static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg) 847 { 848 struct net_device *dev = arg; 849 - struct ctcm_priv *priv = dev->priv; 850 struct mpc_group *grp = priv->mpcg; 851 852 if (grp == NULL) { ··· 890 void mpc_group_ready(unsigned long adev) 891 { 892 struct net_device *dev = (struct net_device *)adev; 893 - struct ctcm_priv *priv = dev->priv; 894 struct mpc_group *grp = priv->mpcg; 895 struct channel *ch = NULL; 896 ··· 946 void mpc_channel_action(struct channel *ch, int direction, int action) 947 { 948 struct net_device *dev = ch->netdev; 949 - struct ctcm_priv *priv = dev->priv; 950 struct mpc_group *grp = priv->mpcg; 951 952 if (grp == NULL) { ··· 1056 static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb) 1057 { 1058 struct net_device *dev = ch->netdev; 1059 - struct ctcm_priv *priv = dev->priv; 1060 struct mpc_group *grp = priv->mpcg; 1061 struct pdu *curr_pdu; 1062 struct mpcg_info *mpcginfo; ··· 1254 struct channel *ch = (struct channel *)thischan; 1255 struct sk_buff *skb; 1256 struct net_device *dev = ch->netdev; 1257 - struct ctcm_priv *priv = dev->priv; 1258 struct mpc_group *grp = priv->mpcg; 1259 1260 CTCM_PR_DEBUG("%s cp:%i enter: %s() %s\n", ··· 1376 BUG_ON(dev == NULL); 1377 CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name); 1378 1379 - priv = dev->priv; 1380 grp = priv->mpcg; 1381 grp->flow_off_called = 0; 1382 fsm_deltimer(&grp->timer); ··· 1482 1483 BUG_ON(dev == NULL); 1484 1485 - priv = dev->priv; 1486 grp = priv->mpcg; 1487 wch = priv->channel[WRITE]; 1488 rch = priv->channel[READ]; ··· 1520 if (ch) { 1521 dev = ch->netdev; 1522 if (dev) { 1523 - priv = dev->priv; 1524 if (priv) { 1525 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, 1526 "%s: %s: %s\n", ··· 1568 { 1569 struct channel *ch = mpcginfo->ch; 1570 struct net_device *dev = ch->netdev; 1571 - struct ctcm_priv *priv = dev->priv; 1572 struct mpc_group *grp = priv->mpcg; 1573 struct xid2 *xid = mpcginfo->xid; 1574 int rc = 0; ··· 1865 { 1866 struct channel *ch = arg; 1867 struct net_device *dev = ch->netdev; 1868 - struct ctcm_priv *priv = dev->priv; 1869 struct mpc_group *grp = priv->mpcg; 1870 1871 CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", ··· 1905 static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg) 1906 { 1907 struct net_device *dev = arg; 1908 - struct ctcm_priv *priv = dev->priv; 1909 struct mpc_group *grp = NULL; 1910 int direction; 1911 int send = 0; ··· 1982 struct mpcg_info *mpcginfo = arg; 1983 struct channel *ch = mpcginfo->ch; 1984 struct net_device *dev = ch->netdev; 1985 - struct ctcm_priv *priv = dev->priv; 1986 struct mpc_group *grp = priv->mpcg; 1987 1988 CTCM_PR_DEBUG("%s: ch-id:%s xid2:%i xid7:%i xidt_p2:%i \n", ··· 2044 struct mpcg_info *mpcginfo = arg; 2045 struct channel *ch = mpcginfo->ch; 2046 struct net_device *dev = ch->netdev; 2047 - struct ctcm_priv *priv = dev->priv; 2048 struct mpc_group *grp = priv->mpcg; 2049 2050 CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", ··· 2096 __u32 new_len = 0; 2097 struct sk_buff *skb; 2098 struct qllc *qllcptr; 2099 - struct ctcm_priv *priv = dev->priv; 2100 struct mpc_group *grp = priv->mpcg; 2101 2102 CTCM_PR_DEBUG("%s: GROUP STATE: %s\n",
··· 312 CTCM_FUNTAIL, device); 313 return NULL; 314 } 315 + priv = dev->ml_priv; 316 if (priv == NULL) { 317 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 318 + "%s(%s): dev->ml_priv is NULL", 319 CTCM_FUNTAIL, device); 320 return NULL; 321 } ··· 344 dev = ctcmpc_get_dev(port_num); 345 if (dev == NULL) 346 return 1; 347 + priv = dev->ml_priv; 348 grp = priv->mpcg; 349 350 grp->allochanfunc = callback; ··· 416 dev = ctcmpc_get_dev(port_num); 417 if (dev == NULL) 418 return; 419 + priv = dev->ml_priv; 420 grp = priv->mpcg; 421 rch = priv->channel[READ]; 422 wch = priv->channel[WRITE]; ··· 534 dev = ctcmpc_get_dev(port_num); 535 if (dev == NULL) 536 return; 537 + priv = dev->ml_priv; 538 grp = priv->mpcg; 539 540 CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG, ··· 570 dev = ctcmpc_get_dev(port_num); 571 if (dev == NULL) 572 return; 573 + priv = dev->ml_priv; 574 grp = priv->mpcg; 575 576 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, ··· 619 { 620 struct channel *rch = mpcginfo->ch; 621 struct net_device *dev = rch->netdev; 622 + struct ctcm_priv *priv = dev->ml_priv; 623 struct mpc_group *grp = priv->mpcg; 624 struct channel *ch = priv->channel[WRITE]; 625 ··· 650 static void ctcmpc_send_sweep_resp(struct channel *rch) 651 { 652 struct net_device *dev = rch->netdev; 653 + struct ctcm_priv *priv = dev->ml_priv; 654 struct mpc_group *grp = priv->mpcg; 655 int rc = 0; 656 struct th_sweep *header; ··· 712 { 713 struct channel *rch = mpcginfo->ch; 714 struct net_device *dev = rch->netdev; 715 + struct ctcm_priv *priv = dev->ml_priv; 716 struct mpc_group *grp = priv->mpcg; 717 struct channel *ch = priv->channel[WRITE]; 718 ··· 846 static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg) 847 { 848 struct net_device *dev = arg; 849 + struct ctcm_priv *priv = dev->ml_priv; 850 struct mpc_group *grp = priv->mpcg; 851 852 if (grp == NULL) { ··· 890 void mpc_group_ready(unsigned long adev) 891 { 892 struct net_device *dev = (struct net_device *)adev; 893 + struct ctcm_priv *priv = dev->ml_priv; 894 struct mpc_group *grp = priv->mpcg; 895 struct channel *ch = NULL; 896 ··· 946 void mpc_channel_action(struct channel *ch, int direction, int action) 947 { 948 struct net_device *dev = ch->netdev; 949 + struct ctcm_priv *priv = dev->ml_priv; 950 struct mpc_group *grp = priv->mpcg; 951 952 if (grp == NULL) { ··· 1056 static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb) 1057 { 1058 struct net_device *dev = ch->netdev; 1059 + struct ctcm_priv *priv = dev->ml_priv; 1060 struct mpc_group *grp = priv->mpcg; 1061 struct pdu *curr_pdu; 1062 struct mpcg_info *mpcginfo; ··· 1254 struct channel *ch = (struct channel *)thischan; 1255 struct sk_buff *skb; 1256 struct net_device *dev = ch->netdev; 1257 + struct ctcm_priv *priv = dev->ml_priv; 1258 struct mpc_group *grp = priv->mpcg; 1259 1260 CTCM_PR_DEBUG("%s cp:%i enter: %s() %s\n", ··· 1376 BUG_ON(dev == NULL); 1377 CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name); 1378 1379 + priv = dev->ml_priv; 1380 grp = priv->mpcg; 1381 grp->flow_off_called = 0; 1382 fsm_deltimer(&grp->timer); ··· 1482 1483 BUG_ON(dev == NULL); 1484 1485 + priv = dev->ml_priv; 1486 grp = priv->mpcg; 1487 wch = priv->channel[WRITE]; 1488 rch = priv->channel[READ]; ··· 1520 if (ch) { 1521 dev = ch->netdev; 1522 if (dev) { 1523 + priv = dev->ml_priv; 1524 if (priv) { 1525 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, 1526 "%s: %s: %s\n", ··· 1568 { 1569 struct channel *ch = mpcginfo->ch; 1570 struct net_device *dev = ch->netdev; 1571 + struct ctcm_priv *priv = dev->ml_priv; 1572 struct mpc_group *grp = priv->mpcg; 1573 struct xid2 *xid = mpcginfo->xid; 1574 int rc = 0; ··· 1865 { 1866 struct channel *ch = arg; 1867 struct net_device *dev = ch->netdev; 1868 + struct ctcm_priv *priv = dev->ml_priv; 1869 struct mpc_group *grp = priv->mpcg; 1870 1871 CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", ··· 1905 static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg) 1906 { 1907 struct net_device *dev = arg; 1908 + struct ctcm_priv *priv = dev->ml_priv; 1909 struct mpc_group *grp = NULL; 1910 int direction; 1911 int send = 0; ··· 1982 struct mpcg_info *mpcginfo = arg; 1983 struct channel *ch = mpcginfo->ch; 1984 struct net_device *dev = ch->netdev; 1985 + struct ctcm_priv *priv = dev->ml_priv; 1986 struct mpc_group *grp = priv->mpcg; 1987 1988 CTCM_PR_DEBUG("%s: ch-id:%s xid2:%i xid7:%i xidt_p2:%i \n", ··· 2044 struct mpcg_info *mpcginfo = arg; 2045 struct channel *ch = mpcginfo->ch; 2046 struct net_device *dev = ch->netdev; 2047 + struct ctcm_priv *priv = dev->ml_priv; 2048 struct mpc_group *grp = priv->mpcg; 2049 2050 CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", ··· 2096 __u32 new_len = 0; 2097 struct sk_buff *skb; 2098 struct qllc *qllcptr; 2099 + struct ctcm_priv *priv = dev->ml_priv; 2100 struct mpc_group *grp = priv->mpcg; 2101 2102 CTCM_PR_DEBUG("%s: GROUP STATE: %s\n",
+2 -1
drivers/s390/net/lcs.c
··· 1412 } 1413 /* How far in the ccw chain have we processed? */ 1414 if ((channel->state != LCS_CH_STATE_INIT) && 1415 - (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC)) { 1416 index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa) 1417 - channel->ccws; 1418 if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) ||
··· 1412 } 1413 /* How far in the ccw chain have we processed? */ 1414 if ((channel->state != LCS_CH_STATE_INIT) && 1415 + (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && 1416 + (irb->scsw.cmd.cpa != 0)) { 1417 index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa) 1418 - channel->ccws; 1419 if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) ||
+1
drivers/s390/net/qeth_core.h
··· 689 struct list_head list; 690 __u8 mc_addr[MAX_ADDR_LEN]; 691 unsigned char mc_addrlen; 692 }; 693 694 struct qeth_card {
··· 689 struct list_head list; 690 __u8 mc_addr[MAX_ADDR_LEN]; 691 unsigned char mc_addrlen; 692 + int is_vmac; 693 }; 694 695 struct qeth_card {
+1 -1
drivers/s390/net/qeth_core_main.c
··· 3024 struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill, 3025 int offset) 3026 { 3027 - int length = skb->len; 3028 int length_here; 3029 int element; 3030 char *data;
··· 3024 struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill, 3025 int offset) 3026 { 3027 + int length = skb->len - offset; 3028 int length_here; 3029 int element; 3030 char *data;
+22 -5
drivers/s390/net/qeth_l2_main.c
··· 177 qeth_l2_send_delgroupmac_cb); 178 } 179 180 - static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac) 181 { 182 struct qeth_mc_mac *mc; 183 184 mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC); 185 ··· 189 190 memcpy(mc->mc_addr, mac, OSA_ADDR_LEN); 191 mc->mc_addrlen = OSA_ADDR_LEN; 192 193 - if (!qeth_l2_send_setgroupmac(card, mac)) 194 list_add_tail(&mc->list, &card->mc_list); 195 else 196 kfree(mc); ··· 210 211 spin_lock_bh(&card->mclock); 212 list_for_each_entry_safe(mc, tmp, &card->mc_list, list) { 213 - qeth_l2_send_delgroupmac(card, mc->mc_addr); 214 list_del(&mc->list); 215 kfree(mc); 216 } ··· 603 static void qeth_l2_set_multicast_list(struct net_device *dev) 604 { 605 struct qeth_card *card = dev->ml_priv; 606 - struct dev_mc_list *dm; 607 608 if (card->info.type == QETH_CARD_TYPE_OSN) 609 return ; ··· 612 qeth_l2_del_all_mc(card); 613 spin_lock_bh(&card->mclock); 614 for (dm = dev->mc_list; dm; dm = dm->next) 615 - qeth_l2_add_mc(card, dm->dmi_addr); 616 spin_unlock_bh(&card->mclock); 617 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) 618 return;
··· 177 qeth_l2_send_delgroupmac_cb); 178 } 179 180 + static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac) 181 { 182 struct qeth_mc_mac *mc; 183 + int rc; 184 185 mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC); 186 ··· 188 189 memcpy(mc->mc_addr, mac, OSA_ADDR_LEN); 190 mc->mc_addrlen = OSA_ADDR_LEN; 191 + mc->is_vmac = vmac; 192 193 + if (vmac) { 194 + rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC, 195 + NULL); 196 + } else { 197 + rc = qeth_l2_send_setgroupmac(card, mac); 198 + } 199 + 200 + if (!rc) 201 list_add_tail(&mc->list, &card->mc_list); 202 else 203 kfree(mc); ··· 201 202 spin_lock_bh(&card->mclock); 203 list_for_each_entry_safe(mc, tmp, &card->mc_list, list) { 204 + if (mc->is_vmac) 205 + qeth_l2_send_setdelmac(card, mc->mc_addr, 206 + IPA_CMD_DELVMAC, NULL); 207 + else 208 + qeth_l2_send_delgroupmac(card, mc->mc_addr); 209 list_del(&mc->list); 210 kfree(mc); 211 } ··· 590 static void qeth_l2_set_multicast_list(struct net_device *dev) 591 { 592 struct qeth_card *card = dev->ml_priv; 593 + struct dev_addr_list *dm; 594 595 if (card->info.type == QETH_CARD_TYPE_OSN) 596 return ; ··· 599 qeth_l2_del_all_mc(card); 600 spin_lock_bh(&card->mclock); 601 for (dm = dev->mc_list; dm; dm = dm->next) 602 + qeth_l2_add_mc(card, dm->da_addr, 0); 603 + 604 + for (dm = dev->uc_list; dm; dm = dm->next) 605 + qeth_l2_add_mc(card, dm->da_addr, 1); 606 + 607 spin_unlock_bh(&card->mclock); 608 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) 609 return;
+1 -1
drivers/s390/net/qeth_l3_sys.c
··· 136 return -EINVAL; 137 138 if (!qeth_is_supported(card, IPA_IPV6)) { 139 - return -ENOTSUPP; 140 } 141 142 return qeth_l3_dev_route_store(card, &card->options.route6,
··· 136 return -EINVAL; 137 138 if (!qeth_is_supported(card, IPA_IPV6)) { 139 + return -EOPNOTSUPP; 140 } 141 142 return qeth_l3_dev_route_store(card, &card->options.route6,
+10 -2
include/net/sch_generic.h
··· 217 return qdisc_lock(root); 218 } 219 220 static inline struct net_device *qdisc_dev(struct Qdisc *qdisc) 221 { 222 return qdisc->dev_queue->dev; ··· 232 233 static inline void sch_tree_lock(struct Qdisc *q) 234 { 235 - spin_lock_bh(qdisc_root_lock(q)); 236 } 237 238 static inline void sch_tree_unlock(struct Qdisc *q) 239 { 240 - spin_unlock_bh(qdisc_root_lock(q)); 241 } 242 243 #define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
··· 217 return qdisc_lock(root); 218 } 219 220 + static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc) 221 + { 222 + struct Qdisc *root = qdisc_root_sleeping(qdisc); 223 + 224 + ASSERT_RTNL(); 225 + return qdisc_lock(root); 226 + } 227 + 228 static inline struct net_device *qdisc_dev(struct Qdisc *qdisc) 229 { 230 return qdisc->dev_queue->dev; ··· 224 225 static inline void sch_tree_lock(struct Qdisc *q) 226 { 227 + spin_lock_bh(qdisc_root_sleeping_lock(q)); 228 } 229 230 static inline void sch_tree_unlock(struct Qdisc *q) 231 { 232 + spin_unlock_bh(qdisc_root_sleeping_lock(q)); 233 } 234 235 #define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
+2 -2
net/ipv4/route.c
··· 3121 static struct ctl_table ipv4_skeleton[] = 3122 { 3123 { .procname = "route", .ctl_name = NET_IPV4_ROUTE, 3124 - .child = ipv4_route_table}, 3125 { .procname = "neigh", .ctl_name = NET_IPV4_NEIGH, 3126 - .child = empty}, 3127 { } 3128 }; 3129
··· 3121 static struct ctl_table ipv4_skeleton[] = 3122 { 3123 { .procname = "route", .ctl_name = NET_IPV4_ROUTE, 3124 + .mode = 0555, .child = ipv4_route_table}, 3125 { .procname = "neigh", .ctl_name = NET_IPV4_NEIGH, 3126 + .mode = 0555, .child = empty}, 3127 { } 3128 }; 3129
+4 -2
net/ipv4/tcp_output.c
··· 468 } 469 if (likely(sysctl_tcp_window_scaling)) { 470 opts->ws = tp->rx_opt.rcv_wscale; 471 - size += TCPOLEN_WSCALE_ALIGNED; 472 } 473 if (likely(sysctl_tcp_sack)) { 474 opts->options |= OPTION_SACK_ADVERTISE; ··· 510 511 if (likely(ireq->wscale_ok)) { 512 opts->ws = ireq->rcv_wscale; 513 - size += TCPOLEN_WSCALE_ALIGNED; 514 } 515 if (likely(doing_ts)) { 516 opts->options |= OPTION_TS;
··· 468 } 469 if (likely(sysctl_tcp_window_scaling)) { 470 opts->ws = tp->rx_opt.rcv_wscale; 471 + if(likely(opts->ws)) 472 + size += TCPOLEN_WSCALE_ALIGNED; 473 } 474 if (likely(sysctl_tcp_sack)) { 475 opts->options |= OPTION_SACK_ADVERTISE; ··· 509 510 if (likely(ireq->wscale_ok)) { 511 opts->ws = ireq->rcv_wscale; 512 + if(likely(opts->ws)) 513 + size += TCPOLEN_WSCALE_ALIGNED; 514 } 515 if (likely(doing_ts)) { 516 opts->options |= OPTION_TS;
+12 -12
net/mac80211/debugfs_netdev.c
··· 248 static void add_sta_files(struct ieee80211_sub_if_data *sdata) 249 { 250 DEBUGFS_ADD(drop_unencrypted, sta); 251 - DEBUGFS_ADD(force_unicast_rateidx, ap); 252 - DEBUGFS_ADD(max_ratectrl_rateidx, ap); 253 254 DEBUGFS_ADD(state, sta); 255 DEBUGFS_ADD(bssid, sta); ··· 283 static void add_wds_files(struct ieee80211_sub_if_data *sdata) 284 { 285 DEBUGFS_ADD(drop_unencrypted, wds); 286 - DEBUGFS_ADD(force_unicast_rateidx, ap); 287 - DEBUGFS_ADD(max_ratectrl_rateidx, ap); 288 289 DEBUGFS_ADD(peer, wds); 290 } ··· 292 static void add_vlan_files(struct ieee80211_sub_if_data *sdata) 293 { 294 DEBUGFS_ADD(drop_unencrypted, vlan); 295 - DEBUGFS_ADD(force_unicast_rateidx, ap); 296 - DEBUGFS_ADD(max_ratectrl_rateidx, ap); 297 } 298 299 static void add_monitor_files(struct ieee80211_sub_if_data *sdata) ··· 381 static void del_sta_files(struct ieee80211_sub_if_data *sdata) 382 { 383 DEBUGFS_DEL(drop_unencrypted, sta); 384 - DEBUGFS_DEL(force_unicast_rateidx, ap); 385 - DEBUGFS_DEL(max_ratectrl_rateidx, ap); 386 387 DEBUGFS_DEL(state, sta); 388 DEBUGFS_DEL(bssid, sta); ··· 416 static void del_wds_files(struct ieee80211_sub_if_data *sdata) 417 { 418 DEBUGFS_DEL(drop_unencrypted, wds); 419 - DEBUGFS_DEL(force_unicast_rateidx, ap); 420 - DEBUGFS_DEL(max_ratectrl_rateidx, ap); 421 422 DEBUGFS_DEL(peer, wds); 423 } ··· 425 static void del_vlan_files(struct ieee80211_sub_if_data *sdata) 426 { 427 DEBUGFS_DEL(drop_unencrypted, vlan); 428 - DEBUGFS_DEL(force_unicast_rateidx, ap); 429 - DEBUGFS_DEL(max_ratectrl_rateidx, ap); 430 } 431 432 static void del_monitor_files(struct ieee80211_sub_if_data *sdata)
··· 248 static void add_sta_files(struct ieee80211_sub_if_data *sdata) 249 { 250 DEBUGFS_ADD(drop_unencrypted, sta); 251 + DEBUGFS_ADD(force_unicast_rateidx, sta); 252 + DEBUGFS_ADD(max_ratectrl_rateidx, sta); 253 254 DEBUGFS_ADD(state, sta); 255 DEBUGFS_ADD(bssid, sta); ··· 283 static void add_wds_files(struct ieee80211_sub_if_data *sdata) 284 { 285 DEBUGFS_ADD(drop_unencrypted, wds); 286 + DEBUGFS_ADD(force_unicast_rateidx, wds); 287 + DEBUGFS_ADD(max_ratectrl_rateidx, wds); 288 289 DEBUGFS_ADD(peer, wds); 290 } ··· 292 static void add_vlan_files(struct ieee80211_sub_if_data *sdata) 293 { 294 DEBUGFS_ADD(drop_unencrypted, vlan); 295 + DEBUGFS_ADD(force_unicast_rateidx, vlan); 296 + DEBUGFS_ADD(max_ratectrl_rateidx, vlan); 297 } 298 299 static void add_monitor_files(struct ieee80211_sub_if_data *sdata) ··· 381 static void del_sta_files(struct ieee80211_sub_if_data *sdata) 382 { 383 DEBUGFS_DEL(drop_unencrypted, sta); 384 + DEBUGFS_DEL(force_unicast_rateidx, sta); 385 + DEBUGFS_DEL(max_ratectrl_rateidx, sta); 386 387 DEBUGFS_DEL(state, sta); 388 DEBUGFS_DEL(bssid, sta); ··· 416 static void del_wds_files(struct ieee80211_sub_if_data *sdata) 417 { 418 DEBUGFS_DEL(drop_unencrypted, wds); 419 + DEBUGFS_DEL(force_unicast_rateidx, wds); 420 + DEBUGFS_DEL(max_ratectrl_rateidx, wds); 421 422 DEBUGFS_DEL(peer, wds); 423 } ··· 425 static void del_vlan_files(struct ieee80211_sub_if_data *sdata) 426 { 427 DEBUGFS_DEL(drop_unencrypted, vlan); 428 + DEBUGFS_DEL(force_unicast_rateidx, vlan); 429 + DEBUGFS_DEL(max_ratectrl_rateidx, vlan); 430 } 431 432 static void del_monitor_files(struct ieee80211_sub_if_data *sdata)
+6
net/mac80211/ieee80211_i.h
··· 470 struct dentry *auth_transaction; 471 struct dentry *flags; 472 struct dentry *num_beacons_sta; 473 } sta; 474 struct { 475 struct dentry *drop_unencrypted; ··· 485 struct { 486 struct dentry *drop_unencrypted; 487 struct dentry *peer; 488 } wds; 489 struct { 490 struct dentry *drop_unencrypted; 491 } vlan; 492 struct { 493 struct dentry *mode;
··· 470 struct dentry *auth_transaction; 471 struct dentry *flags; 472 struct dentry *num_beacons_sta; 473 + struct dentry *force_unicast_rateidx; 474 + struct dentry *max_ratectrl_rateidx; 475 } sta; 476 struct { 477 struct dentry *drop_unencrypted; ··· 483 struct { 484 struct dentry *drop_unencrypted; 485 struct dentry *peer; 486 + struct dentry *force_unicast_rateidx; 487 + struct dentry *max_ratectrl_rateidx; 488 } wds; 489 struct { 490 struct dentry *drop_unencrypted; 491 + struct dentry *force_unicast_rateidx; 492 + struct dentry *max_ratectrl_rateidx; 493 } vlan; 494 struct { 495 struct dentry *mode;
+1 -1
net/mac80211/mesh.c
··· 383 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) 384 tbl->free_node(p, 0); 385 } 386 - __mesh_table_free(tbl); 387 endgrow: 388 return NULL; 389 }
··· 383 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) 384 tbl->free_node(p, 0); 385 } 386 + __mesh_table_free(newtbl); 387 endgrow: 388 return NULL; 389 }
+11 -41
net/mac80211/mlme.c
··· 478 static void ieee80211_sta_send_associnfo(struct net_device *dev, 479 struct ieee80211_if_sta *ifsta) 480 { 481 - char *buf; 482 - size_t len; 483 - int i; 484 union iwreq_data wrqu; 485 486 - if (!ifsta->assocreq_ies && !ifsta->assocresp_ies) 487 - return; 488 - 489 - buf = kmalloc(50 + 2 * (ifsta->assocreq_ies_len + 490 - ifsta->assocresp_ies_len), GFP_KERNEL); 491 - if (!buf) 492 - return; 493 - 494 - len = sprintf(buf, "ASSOCINFO("); 495 if (ifsta->assocreq_ies) { 496 - len += sprintf(buf + len, "ReqIEs="); 497 - for (i = 0; i < ifsta->assocreq_ies_len; i++) { 498 - len += sprintf(buf + len, "%02x", 499 - ifsta->assocreq_ies[i]); 500 - } 501 } 502 if (ifsta->assocresp_ies) { 503 - if (ifsta->assocreq_ies) 504 - len += sprintf(buf + len, " "); 505 - len += sprintf(buf + len, "RespIEs="); 506 - for (i = 0; i < ifsta->assocresp_ies_len; i++) { 507 - len += sprintf(buf + len, "%02x", 508 - ifsta->assocresp_ies[i]); 509 - } 510 } 511 - len += sprintf(buf + len, ")"); 512 - 513 - if (len > IW_CUSTOM_MAX) { 514 - len = sprintf(buf, "ASSOCRESPIE="); 515 - for (i = 0; i < ifsta->assocresp_ies_len; i++) { 516 - len += sprintf(buf + len, "%02x", 517 - ifsta->assocresp_ies[i]); 518 - } 519 - } 520 - 521 - memset(&wrqu, 0, sizeof(wrqu)); 522 - wrqu.data.length = len; 523 - wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf); 524 - 525 - kfree(buf); 526 } 527 528 ··· 783 } 784 } 785 786 - if (count == 8) { 787 pos = skb_put(skb, rates_len - count + 2); 788 *pos++ = WLAN_EID_EXT_SUPP_RATES; 789 *pos++ = rates_len - count; ··· 2838 jiffies); 2839 #endif /* CONFIG_MAC80211_IBSS_DEBUG */ 2840 if (beacon_timestamp > rx_timestamp) { 2841 - #ifndef CONFIG_MAC80211_IBSS_DEBUG 2842 printk(KERN_DEBUG "%s: beacon TSF higher than " 2843 "local TSF - IBSS merge with BSSID %s\n", 2844 dev->name, print_mac(mac, mgmt->bssid));
··· 478 static void ieee80211_sta_send_associnfo(struct net_device *dev, 479 struct ieee80211_if_sta *ifsta) 480 { 481 union iwreq_data wrqu; 482 483 if (ifsta->assocreq_ies) { 484 + memset(&wrqu, 0, sizeof(wrqu)); 485 + wrqu.data.length = ifsta->assocreq_ies_len; 486 + wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, 487 + ifsta->assocreq_ies); 488 } 489 + 490 if (ifsta->assocresp_ies) { 491 + memset(&wrqu, 0, sizeof(wrqu)); 492 + wrqu.data.length = ifsta->assocresp_ies_len; 493 + wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, 494 + ifsta->assocresp_ies); 495 } 496 } 497 498 ··· 813 } 814 } 815 816 + if (rates_len > count) { 817 pos = skb_put(skb, rates_len - count + 2); 818 *pos++ = WLAN_EID_EXT_SUPP_RATES; 819 *pos++ = rates_len - count; ··· 2868 jiffies); 2869 #endif /* CONFIG_MAC80211_IBSS_DEBUG */ 2870 if (beacon_timestamp > rx_timestamp) { 2871 + #ifdef CONFIG_MAC80211_IBSS_DEBUG 2872 printk(KERN_DEBUG "%s: beacon TSF higher than " 2873 "local TSF - IBSS merge with BSSID %s\n", 2874 dev->name, print_mac(mac, mgmt->bssid));
+1 -1
net/rfkill/rfkill.c
··· 377 { 378 struct rfkill *rfkill = to_rfkill(dev); 379 380 - return sprintf(buf, "%d", rfkill->user_claim); 381 } 382 383 static ssize_t rfkill_claim_store(struct device *dev,
··· 377 { 378 struct rfkill *rfkill = to_rfkill(dev); 379 380 + return sprintf(buf, "%d\n", rfkill->user_claim); 381 } 382 383 static ssize_t rfkill_claim_store(struct device *dev,
+13 -5
net/sched/sch_api.c
··· 624 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; 625 spinlock_t *root_lock; 626 627 - root_lock = qdisc_root_lock(oqdisc); 628 spin_lock_bh(root_lock); 629 630 /* Prune old scheduler */ ··· 635 if (qdisc == NULL) 636 qdisc = &noop_qdisc; 637 dev_queue->qdisc_sleeping = qdisc; 638 - dev_queue->qdisc = &noop_qdisc; 639 640 spin_unlock_bh(root_lock); 641 ··· 830 sch->stab = stab; 831 } 832 if (tca[TCA_RATE]) { 833 err = gen_new_estimator(&sch->bstats, &sch->rate_est, 834 - qdisc_root_lock(sch), 835 - tca[TCA_RATE]); 836 if (err) { 837 /* 838 * Any broken qdiscs that would require ··· 891 892 if (tca[TCA_RATE]) 893 gen_replace_estimator(&sch->bstats, &sch->rate_est, 894 - qdisc_root_lock(sch), tca[TCA_RATE]); 895 return 0; 896 } 897
··· 624 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; 625 spinlock_t *root_lock; 626 627 + root_lock = qdisc_lock(oqdisc); 628 spin_lock_bh(root_lock); 629 630 /* Prune old scheduler */ ··· 635 if (qdisc == NULL) 636 qdisc = &noop_qdisc; 637 dev_queue->qdisc_sleeping = qdisc; 638 + rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); 639 640 spin_unlock_bh(root_lock); 641 ··· 830 sch->stab = stab; 831 } 832 if (tca[TCA_RATE]) { 833 + spinlock_t *root_lock; 834 + 835 + if ((sch->parent != TC_H_ROOT) && 836 + !(sch->flags & TCQ_F_INGRESS)) 837 + root_lock = qdisc_root_sleeping_lock(sch); 838 + else 839 + root_lock = qdisc_lock(sch); 840 + 841 err = gen_new_estimator(&sch->bstats, &sch->rate_est, 842 + root_lock, tca[TCA_RATE]); 843 if (err) { 844 /* 845 * Any broken qdiscs that would require ··· 884 885 if (tca[TCA_RATE]) 886 gen_replace_estimator(&sch->bstats, &sch->rate_est, 887 + qdisc_root_sleeping_lock(sch), 888 + tca[TCA_RATE]); 889 return 0; 890 } 891
+2 -2
net/sched/sch_cbq.c
··· 1839 1840 if (tca[TCA_RATE]) 1841 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1842 - qdisc_root_lock(sch), 1843 tca[TCA_RATE]); 1844 return 0; 1845 } ··· 1930 1931 if (tca[TCA_RATE]) 1932 gen_new_estimator(&cl->bstats, &cl->rate_est, 1933 - qdisc_root_lock(sch), tca[TCA_RATE]); 1934 1935 *arg = (unsigned long)cl; 1936 return 0;
··· 1839 1840 if (tca[TCA_RATE]) 1841 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1842 + qdisc_root_sleeping_lock(sch), 1843 tca[TCA_RATE]); 1844 return 0; 1845 } ··· 1930 1931 if (tca[TCA_RATE]) 1932 gen_new_estimator(&cl->bstats, &cl->rate_est, 1933 + qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); 1934 1935 *arg = (unsigned long)cl; 1936 return 0;
+2 -2
net/sched/sch_generic.c
··· 634 if (!(qdisc->flags & TCQ_F_BUILTIN)) 635 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); 636 637 - dev_queue->qdisc = qdisc_default; 638 qdisc_reset(qdisc); 639 640 spin_unlock_bh(qdisc_lock(qdisc)); ··· 709 struct Qdisc *qdisc_default = _qdisc_default; 710 711 if (qdisc) { 712 - dev_queue->qdisc = qdisc_default; 713 dev_queue->qdisc_sleeping = qdisc_default; 714 715 qdisc_destroy(qdisc);
··· 634 if (!(qdisc->flags & TCQ_F_BUILTIN)) 635 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); 636 637 + rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 638 qdisc_reset(qdisc); 639 640 spin_unlock_bh(qdisc_lock(qdisc)); ··· 709 struct Qdisc *qdisc_default = _qdisc_default; 710 711 if (qdisc) { 712 + rcu_assign_pointer(dev_queue->qdisc, qdisc_default); 713 dev_queue->qdisc_sleeping = qdisc_default; 714 715 qdisc_destroy(qdisc);
+2 -2
net/sched/sch_hfsc.c
··· 1045 1046 if (tca[TCA_RATE]) 1047 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1048 - qdisc_root_lock(sch), 1049 tca[TCA_RATE]); 1050 return 0; 1051 } ··· 1104 1105 if (tca[TCA_RATE]) 1106 gen_new_estimator(&cl->bstats, &cl->rate_est, 1107 - qdisc_root_lock(sch), tca[TCA_RATE]); 1108 *arg = (unsigned long)cl; 1109 return 0; 1110 }
··· 1045 1046 if (tca[TCA_RATE]) 1047 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1048 + qdisc_root_sleeping_lock(sch), 1049 tca[TCA_RATE]); 1050 return 0; 1051 } ··· 1104 1105 if (tca[TCA_RATE]) 1106 gen_new_estimator(&cl->bstats, &cl->rate_est, 1107 + qdisc_root_sleeping_lock(sch), tca[TCA_RATE]); 1108 *arg = (unsigned long)cl; 1109 return 0; 1110 }
+2 -2
net/sched/sch_htb.c
··· 1372 goto failure; 1373 1374 gen_new_estimator(&cl->bstats, &cl->rate_est, 1375 - qdisc_root_lock(sch), 1376 tca[TCA_RATE] ? : &est.nla); 1377 cl->refcnt = 1; 1378 cl->children = 0; ··· 1427 } else { 1428 if (tca[TCA_RATE]) 1429 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1430 - qdisc_root_lock(sch), 1431 tca[TCA_RATE]); 1432 sch_tree_lock(sch); 1433 }
··· 1372 goto failure; 1373 1374 gen_new_estimator(&cl->bstats, &cl->rate_est, 1375 + qdisc_root_sleeping_lock(sch), 1376 tca[TCA_RATE] ? : &est.nla); 1377 cl->refcnt = 1; 1378 cl->children = 0; ··· 1427 } else { 1428 if (tca[TCA_RATE]) 1429 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1430 + qdisc_root_sleeping_lock(sch), 1431 tca[TCA_RATE]); 1432 sch_tree_lock(sch); 1433 }
+3
net/sctp/auth.c
··· 786 for (i = 0; i < hmacs->shmac_num_idents; i++) { 787 id = hmacs->shmac_idents[i]; 788 789 if (SCTP_AUTH_HMAC_ID_SHA1 == id) 790 has_sha1 = 1; 791
··· 786 for (i = 0; i < hmacs->shmac_num_idents; i++) { 787 id = hmacs->shmac_idents[i]; 788 789 + if (id > SCTP_AUTH_HMAC_ID_MAX) 790 + return -EOPNOTSUPP; 791 + 792 if (SCTP_AUTH_HMAC_ID_SHA1 == id) 793 has_sha1 = 1; 794
+5 -3
net/sctp/socket.c
··· 3086 int optlen) 3087 { 3088 struct sctp_hmacalgo *hmacs; 3089 int err; 3090 3091 if (!sctp_auth_enable) ··· 3104 goto out; 3105 } 3106 3107 - if (hmacs->shmac_num_idents == 0 || 3108 - hmacs->shmac_num_idents > SCTP_AUTH_NUM_HMACS) { 3109 err = -EINVAL; 3110 goto out; 3111 } ··· 3146 goto out; 3147 } 3148 3149 - if (authkey->sca_keylength > optlen) { 3150 ret = -EINVAL; 3151 goto out; 3152 }
··· 3086 int optlen) 3087 { 3088 struct sctp_hmacalgo *hmacs; 3089 + u32 idents; 3090 int err; 3091 3092 if (!sctp_auth_enable) ··· 3103 goto out; 3104 } 3105 3106 + idents = hmacs->shmac_num_idents; 3107 + if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || 3108 + (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { 3109 err = -EINVAL; 3110 goto out; 3111 } ··· 3144 goto out; 3145 } 3146 3147 + if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { 3148 ret = -EINVAL; 3149 goto out; 3150 }