Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6:
pcnet_cs : add new id
chelsio: error path fix
s390: iucv Kconfig help description changes
s390: qeth driver fixes: atomic context fixups
s390: qeth driver fixes: packet socket
s390: qeth driver fixes: VLAN hdr, perf stats
forcedeth: sideband management fix
Revert "[PATCH] e1000: disable TSO on the 82544 with slab debugging"
qeth: fix uaccess handling and get rid of unused variable
qla3xxx: Add delay to NVRAM register access.
qla3xxx: Remove NETIF_F_LLTX from driver features.
ixgb: Write RA register high word first, increment version
ixgb: Maybe stop TX if not enough free descriptors
ixgb: Fix early TSO completion
[PATCH] ipw2100: Fix dropping fragmented small packet problem
[PATCH] ieee80211: WLAN_GET_SEQ_SEQ fix (select correct region)

+236 -216
+3 -2
drivers/net/chelsio/my3126.c
··· 170 { 171 struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL); 172 173 - if (cphy) 174 - cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops); 175 176 INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); 177 cphy->bmsr = 0; 178
··· 170 { 171 struct cphy *cphy = kzalloc(sizeof (*cphy), GFP_KERNEL); 172 173 + if (!cphy) 174 + return NULL; 175 176 + cphy_init(cphy, adapter, phy_addr, &my3126_ops, mdio_ops); 177 INIT_DELAYED_WORK(&cphy->phy_update, my3216_poll); 178 cphy->bmsr = 0; 179
-6
drivers/net/e1000/e1000_main.c
··· 995 (adapter->hw.mac_type != e1000_82547)) 996 netdev->features |= NETIF_F_TSO; 997 998 - #ifdef CONFIG_DEBUG_SLAB 999 - /* 82544's work arounds do not play nicely with DEBUG SLAB */ 1000 - if (adapter->hw.mac_type == e1000_82544) 1001 - netdev->features &= ~NETIF_F_TSO; 1002 - #endif 1003 - 1004 #ifdef NETIF_F_TSO6 1005 if (adapter->hw.mac_type > e1000_82547_rev_2) 1006 netdev->features |= NETIF_F_TSO6;
··· 995 (adapter->hw.mac_type != e1000_82547)) 996 netdev->features |= NETIF_F_TSO; 997 998 #ifdef NETIF_F_TSO6 999 if (adapter->hw.mac_type > e1000_82547_rev_2) 1000 netdev->features |= NETIF_F_TSO6;
+56 -55
drivers/net/forcedeth.c
··· 234 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 235 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 236 #define NVREG_XMITCTL_HOST_LOADED 0x00004000 237 NvRegTransmitterStatus = 0x088, 238 #define NVREG_XMITSTAT_BUSY 0x01 239 ··· 250 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE 251 NvRegReceiverControl = 0x094, 252 #define NVREG_RCVCTL_START 0x01 253 NvRegReceiverStatus = 0x98, 254 #define NVREG_RCVSTAT_BUSY 0x01 255 ··· 1171 { 1172 struct fe_priv *np = netdev_priv(dev); 1173 u8 __iomem *base = get_hwbase(dev); 1174 1175 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); 1176 /* Already running? Stop it. */ 1177 - if (readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) { 1178 - writel(0, base + NvRegReceiverControl); 1179 pci_push(base); 1180 } 1181 writel(np->linkspeed, base + NvRegLinkSpeed); 1182 pci_push(base); 1183 - writel(NVREG_RCVCTL_START, base + NvRegReceiverControl); 1184 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", 1185 dev->name, np->duplex, np->linkspeed); 1186 pci_push(base); ··· 1193 1194 static void nv_stop_rx(struct net_device *dev) 1195 { 1196 u8 __iomem *base = get_hwbase(dev); 1197 1198 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); 1199 - writel(0, base + NvRegReceiverControl); 1200 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1201 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, 1202 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); 1203 1204 udelay(NV_RXSTOP_DELAY2); 1205 - writel(0, base + NvRegLinkSpeed); 1206 } 1207 1208 static void nv_start_tx(struct net_device *dev) 1209 { 1210 u8 __iomem *base = get_hwbase(dev); 1211 1212 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); 1213 - writel(NVREG_XMITCTL_START, base + NvRegTransmitterControl); 1214 pci_push(base); 1215 } 1216 1217 static void nv_stop_tx(struct net_device *dev) 1218 { 1219 u8 __iomem *base = get_hwbase(dev); 1220 1221 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); 1222 - writel(0, base + NvRegTransmitterControl); 1223 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1224 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, 1225 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1226 1227 udelay(NV_TXSTOP_DELAY2); 1228 - writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 1229 } 1230 1231 static void nv_txrx_reset(struct net_device *dev) ··· 4175 return 0; 4176 } 4177 4178 - /* Indicate to mgmt unit whether driver is loaded or not */ 4179 - static void nv_mgmt_driver_loaded(struct net_device *dev, int loaded) 4180 - { 4181 - u8 __iomem *base = get_hwbase(dev); 4182 - u32 tx_ctrl; 4183 - 4184 - tx_ctrl = readl(base + NvRegTransmitterControl); 4185 - if (loaded) 4186 - tx_ctrl |= NVREG_XMITCTL_HOST_LOADED; 4187 - else 4188 - tx_ctrl &= ~NVREG_XMITCTL_HOST_LOADED; 4189 - writel(tx_ctrl, base + NvRegTransmitterControl); 4190 - } 4191 - 4192 static int nv_open(struct net_device *dev) 4193 { 4194 struct fe_priv *np = netdev_priv(dev); ··· 4672 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 4673 4674 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 4675 - writel(0x1, base + 0x204); pci_push(base); 4676 - msleep(500); 4677 /* management unit running on the mac? */ 4678 - np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; 4679 - if (np->mac_in_use) { 4680 - u32 mgmt_sync; 4681 - /* management unit setup the phy already? */ 4682 - mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK; 4683 - if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY) { 4684 - if (!nv_mgmt_acquire_sema(dev)) { 4685 - for (i = 0; i < 5000; i++) { 4686 - msleep(1); 4687 - mgmt_sync = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK; 4688 - if (mgmt_sync == NVREG_XMITCTL_SYNC_NOT_READY) 4689 - continue; 4690 - if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) 4691 - phyinitialized = 1; 4692 - break; 4693 } 4694 - } else { 4695 - /* we need to init the phy */ 4696 } 4697 - } else if (mgmt_sync == NVREG_XMITCTL_SYNC_PHY_INIT) { 4698 - /* phy is inited by SMU */ 4699 - phyinitialized = 1; 4700 - } else { 4701 - /* we need to init the phy */ 4702 } 4703 } 4704 } ··· 4728 if (!phyinitialized) { 4729 /* reset it */ 4730 phy_init(dev); 4731 - } 4732 - 4733 - if (id->driver_data & DEV_HAS_MGMT_UNIT) { 4734 - nv_mgmt_driver_loaded(dev, 1); 4735 } 4736 4737 /* set default link speed settings */ ··· 4755 out_error: 4756 if (phystate_orig) 4757 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); 4758 - if (np->mac_in_use) 4759 - nv_mgmt_driver_loaded(dev, 0); 4760 pci_set_drvdata(pci_dev, NULL); 4761 out_freering: 4762 free_rings(dev); ··· 4783 */ 4784 writel(np->orig_mac[0], base + NvRegMacAddrA); 4785 writel(np->orig_mac[1], base + NvRegMacAddrB); 4786 - 4787 - if (np->mac_in_use) 4788 - nv_mgmt_driver_loaded(dev, 0); 4789 4790 /* free all structures */ 4791 free_rings(dev);
··· 234 #define NVREG_XMITCTL_HOST_SEMA_MASK 0x0000f000 235 #define NVREG_XMITCTL_HOST_SEMA_ACQ 0x0000f000 236 #define NVREG_XMITCTL_HOST_LOADED 0x00004000 237 + #define NVREG_XMITCTL_TX_PATH_EN 0x01000000 238 NvRegTransmitterStatus = 0x088, 239 #define NVREG_XMITSTAT_BUSY 0x01 240 ··· 249 #define NVREG_OFFLOAD_NORMAL RX_NIC_BUFSIZE 250 NvRegReceiverControl = 0x094, 251 #define NVREG_RCVCTL_START 0x01 252 + #define NVREG_RCVCTL_RX_PATH_EN 0x01000000 253 NvRegReceiverStatus = 0x98, 254 #define NVREG_RCVSTAT_BUSY 0x01 255 ··· 1169 { 1170 struct fe_priv *np = netdev_priv(dev); 1171 u8 __iomem *base = get_hwbase(dev); 1172 + u32 rx_ctrl = readl(base + NvRegReceiverControl); 1173 1174 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); 1175 /* Already running? Stop it. */ 1176 + if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { 1177 + rx_ctrl &= ~NVREG_RCVCTL_START; 1178 + writel(rx_ctrl, base + NvRegReceiverControl); 1179 pci_push(base); 1180 } 1181 writel(np->linkspeed, base + NvRegLinkSpeed); 1182 pci_push(base); 1183 + rx_ctrl |= NVREG_RCVCTL_START; 1184 + if (np->mac_in_use) 1185 + rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; 1186 + writel(rx_ctrl, base + NvRegReceiverControl); 1187 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n", 1188 dev->name, np->duplex, np->linkspeed); 1189 pci_push(base); ··· 1186 1187 static void nv_stop_rx(struct net_device *dev) 1188 { 1189 + struct fe_priv *np = netdev_priv(dev); 1190 u8 __iomem *base = get_hwbase(dev); 1191 + u32 rx_ctrl = readl(base + NvRegReceiverControl); 1192 1193 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name); 1194 + if (!np->mac_in_use) 1195 + rx_ctrl &= ~NVREG_RCVCTL_START; 1196 + else 1197 + rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; 1198 + writel(rx_ctrl, base + NvRegReceiverControl); 1199 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1200 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, 1201 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); 1202 1203 udelay(NV_RXSTOP_DELAY2); 1204 + if (!np->mac_in_use) 1205 + writel(0, base + NvRegLinkSpeed); 1206 } 1207 1208 static void nv_start_tx(struct net_device *dev) 1209 { 1210 + struct fe_priv *np = netdev_priv(dev); 1211 u8 __iomem *base = get_hwbase(dev); 1212 + u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1213 1214 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name); 1215 + tx_ctrl |= NVREG_XMITCTL_START; 1216 + if (np->mac_in_use) 1217 + tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; 1218 + writel(tx_ctrl, base + NvRegTransmitterControl); 1219 pci_push(base); 1220 } 1221 1222 static void nv_stop_tx(struct net_device *dev) 1223 { 1224 + struct fe_priv *np = netdev_priv(dev); 1225 u8 __iomem *base = get_hwbase(dev); 1226 + u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1227 1228 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name); 1229 + if (!np->mac_in_use) 1230 + tx_ctrl &= ~NVREG_XMITCTL_START; 1231 + else 1232 + tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; 1233 + writel(tx_ctrl, base + NvRegTransmitterControl); 1234 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1235 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, 1236 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1237 1238 udelay(NV_TXSTOP_DELAY2); 1239 + if (!np->mac_in_use) 1240 + writel(readl(base + NvRegTransmitPoll) & NVREG_TRANSMITPOLL_MAC_ADDR_REV, 1241 + base + NvRegTransmitPoll); 1242 } 1243 1244 static void nv_txrx_reset(struct net_device *dev) ··· 4148 return 0; 4149 } 4150 4151 static int nv_open(struct net_device *dev) 4152 { 4153 struct fe_priv *np = netdev_priv(dev); ··· 4659 writel(NVREG_MIISTAT_MASK, base + NvRegMIIStatus); 4660 4661 if (id->driver_data & DEV_HAS_MGMT_UNIT) { 4662 /* management unit running on the mac? */ 4663 + if (readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_PHY_INIT) { 4664 + np->mac_in_use = readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_MGMT_ST; 4665 + dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n", pci_name(pci_dev), np->mac_in_use); 4666 + for (i = 0; i < 5000; i++) { 4667 + msleep(1); 4668 + if (nv_mgmt_acquire_sema(dev)) { 4669 + /* management unit setup the phy already? */ 4670 + if ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == 4671 + NVREG_XMITCTL_SYNC_PHY_INIT) { 4672 + /* phy is inited by mgmt unit */ 4673 + phyinitialized = 1; 4674 + dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n", pci_name(pci_dev)); 4675 + } else { 4676 + /* we need to init the phy */ 4677 } 4678 + break; 4679 } 4680 } 4681 } 4682 } ··· 4724 if (!phyinitialized) { 4725 /* reset it */ 4726 phy_init(dev); 4727 + } else { 4728 + /* see if it is a gigabit phy */ 4729 + u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 4730 + if (mii_status & PHY_GIGABIT) { 4731 + np->gigabit = PHY_GIGABIT; 4732 + } 4733 } 4734 4735 /* set default link speed settings */ ··· 4749 out_error: 4750 if (phystate_orig) 4751 writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl); 4752 pci_set_drvdata(pci_dev, NULL); 4753 out_freering: 4754 free_rings(dev); ··· 4779 */ 4780 writel(np->orig_mac[0], base + NvRegMacAddrA); 4781 writel(np->orig_mac[1], base + NvRegMacAddrB); 4782 4783 /* free all structures */ 4784 free_rings(dev);
+1
drivers/net/ixgb/ixgb.h
··· 171 172 /* TX */ 173 struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp; 174 unsigned long timeo_start; 175 uint32_t tx_cmd_type; 176 uint64_t hw_csum_tx_good;
··· 171 172 /* TX */ 173 struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp; 174 + unsigned int restart_queue; 175 unsigned long timeo_start; 176 uint32_t tx_cmd_type; 177 uint64_t hw_csum_tx_good;
+1
drivers/net/ixgb/ixgb_ethtool.c
··· 79 {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)}, 80 {"tx_deferred_ok", IXGB_STAT(stats.dc)}, 81 {"tx_timeout_count", IXGB_STAT(tx_timeout_count) }, 82 {"rx_long_length_errors", IXGB_STAT(stats.roc)}, 83 {"rx_short_length_errors", IXGB_STAT(stats.ruc)}, 84 #ifdef NETIF_F_TSO
··· 79 {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)}, 80 {"tx_deferred_ok", IXGB_STAT(stats.dc)}, 81 {"tx_timeout_count", IXGB_STAT(tx_timeout_count) }, 82 + {"tx_restart_queue", IXGB_STAT(restart_queue) }, 83 {"rx_long_length_errors", IXGB_STAT(stats.roc)}, 84 {"rx_short_length_errors", IXGB_STAT(stats.ruc)}, 85 #ifdef NETIF_F_TSO
+2 -1
drivers/net/ixgb/ixgb_hw.c
··· 399 /* Zero out the other 15 receive addresses. */ 400 DEBUGOUT("Clearing RAR[1-15]\n"); 401 for(i = 1; i < IXGB_RAR_ENTRIES; i++) { 402 - IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 403 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 404 } 405 406 return;
··· 399 /* Zero out the other 15 receive addresses. */ 400 DEBUGOUT("Clearing RAR[1-15]\n"); 401 for(i = 1; i < IXGB_RAR_ENTRIES; i++) { 402 + /* Write high reg first to disable the AV bit first */ 403 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 404 + IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 405 } 406 407 return;
+51 -6
drivers/net/ixgb/ixgb_main.c
··· 36 #else 37 #define DRIVERNAPI "-NAPI" 38 #endif 39 - #define DRV_VERSION "1.0.117-k2"DRIVERNAPI 40 char ixgb_driver_version[] = DRV_VERSION; 41 static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 42 ··· 1287 struct ixgb_buffer *buffer_info; 1288 int len = skb->len; 1289 unsigned int offset = 0, size, count = 0, i; 1290 1291 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 1292 unsigned int f; ··· 1299 while(len) { 1300 buffer_info = &tx_ring->buffer_info[i]; 1301 size = min(len, IXGB_MAX_DATA_PER_TXD); 1302 buffer_info->length = size; 1303 WARN_ON(buffer_info->dma != 0); 1304 buffer_info->dma = ··· 1330 while(len) { 1331 buffer_info = &tx_ring->buffer_info[i]; 1332 size = min(len, IXGB_MAX_DATA_PER_TXD); 1333 buffer_info->length = size; 1334 buffer_info->dma = 1335 pci_map_page(adapter->pdev, ··· 1411 IXGB_WRITE_REG(&adapter->hw, TDT, i); 1412 } 1413 1414 /* Tx Descriptors needed, worst case */ 1415 #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \ 1416 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) 1417 - #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \ 1418 - MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 1419 1420 static int 1421 ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ··· 1475 spin_lock_irqsave(&adapter->tx_lock, flags); 1476 #endif 1477 1478 - if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) { 1479 netif_stop_queue(netdev); 1480 spin_unlock_irqrestore(&adapter->tx_lock, flags); 1481 return NETDEV_TX_BUSY; ··· 1514 1515 #ifdef NETIF_F_LLTX 1516 /* Make sure there is space in the ring for the next send. */ 1517 - if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) 1518 - netif_stop_queue(netdev); 1519 1520 spin_unlock_irqrestore(&adapter->tx_lock, flags); 1521
··· 36 #else 37 #define DRIVERNAPI "-NAPI" 38 #endif 39 + #define DRV_VERSION "1.0.126-k2"DRIVERNAPI 40 char ixgb_driver_version[] = DRV_VERSION; 41 static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 42 ··· 1287 struct ixgb_buffer *buffer_info; 1288 int len = skb->len; 1289 unsigned int offset = 0, size, count = 0, i; 1290 + unsigned int mss = skb_shinfo(skb)->gso_size; 1291 1292 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 1293 unsigned int f; ··· 1298 while(len) { 1299 buffer_info = &tx_ring->buffer_info[i]; 1300 size = min(len, IXGB_MAX_DATA_PER_TXD); 1301 + /* Workaround for premature desc write-backs 1302 + * in TSO mode. Append 4-byte sentinel desc */ 1303 + if (unlikely(mss && !nr_frags && size == len && size > 8)) 1304 + size -= 4; 1305 + 1306 buffer_info->length = size; 1307 WARN_ON(buffer_info->dma != 0); 1308 buffer_info->dma = ··· 1324 while(len) { 1325 buffer_info = &tx_ring->buffer_info[i]; 1326 size = min(len, IXGB_MAX_DATA_PER_TXD); 1327 + 1328 + /* Workaround for premature desc write-backs 1329 + * in TSO mode. Append 4-byte sentinel desc */ 1330 + if (unlikely(mss && !nr_frags && size == len 1331 + && size > 8)) 1332 + size -= 4; 1333 + 1334 buffer_info->length = size; 1335 buffer_info->dma = 1336 pci_map_page(adapter->pdev, ··· 1398 IXGB_WRITE_REG(&adapter->hw, TDT, i); 1399 } 1400 1401 + static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size) 1402 + { 1403 + struct ixgb_adapter *adapter = netdev_priv(netdev); 1404 + struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; 1405 + 1406 + netif_stop_queue(netdev); 1407 + /* Herbert's original patch had: 1408 + * smp_mb__after_netif_stop_queue(); 1409 + * but since that doesn't exist yet, just open code it. */ 1410 + smp_mb(); 1411 + 1412 + /* We need to check again in a case another CPU has just 1413 + * made room available. */ 1414 + if (likely(IXGB_DESC_UNUSED(tx_ring) < size)) 1415 + return -EBUSY; 1416 + 1417 + /* A reprieve! */ 1418 + netif_start_queue(netdev); 1419 + ++adapter->restart_queue; 1420 + return 0; 1421 + } 1422 + 1423 + static int ixgb_maybe_stop_tx(struct net_device *netdev, 1424 + struct ixgb_desc_ring *tx_ring, int size) 1425 + { 1426 + if (likely(IXGB_DESC_UNUSED(tx_ring) >= size)) 1427 + return 0; 1428 + return __ixgb_maybe_stop_tx(netdev, size); 1429 + } 1430 + 1431 + 1432 /* Tx Descriptors needed, worst case */ 1433 #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \ 1434 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) 1435 + #define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \ 1436 + MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \ 1437 + + 1 /* one more needed for sentinel TSO workaround */ 1438 1439 static int 1440 ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ··· 1430 spin_lock_irqsave(&adapter->tx_lock, flags); 1431 #endif 1432 1433 + if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, 1434 + DESC_NEEDED))) { 1435 netif_stop_queue(netdev); 1436 spin_unlock_irqrestore(&adapter->tx_lock, flags); 1437 return NETDEV_TX_BUSY; ··· 1468 1469 #ifdef NETIF_F_LLTX 1470 /* Make sure there is space in the ring for the next send. */ 1471 + ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED); 1472 1473 spin_unlock_irqrestore(&adapter->tx_lock, flags); 1474
+2
drivers/net/pcmcia/pcnet_cs.c
··· 1617 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9), 1618 PCMCIA_DEVICE_PROD_ID12("Corega,K.K.", "Ethernet LAN Card", 0x110d26d9, 0x9fd2f0a2), 1619 PCMCIA_DEVICE_PROD_ID12("corega,K.K.", "Ethernet LAN Card", 0x9791a90e, 0x9fd2f0a2), 1620 PCMCIA_DEVICE_PROD_ID12("CouplerlessPCMCIA", "100BASE", 0xee5af0ad, 0x7c2add04), 1621 PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-010", 0x77008979, 0x9d8d445d), 1622 PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-110E 10/100M LAN Card", 0x77008979, 0xfd184814), ··· 1668 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737), 1669 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TE", 0x88fcdeda, 0x0e714bee), 1670 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922), 1671 PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0), 1672 PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578), 1673 PCMCIA_DEVICE_PROD_ID12("Macsense", "MPC-10", 0xd830297f, 0xd265c307),
··· 1617 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FastEther PCC-TX", 0x5261440f, 0x485e85d9), 1618 PCMCIA_DEVICE_PROD_ID12("Corega,K.K.", "Ethernet LAN Card", 0x110d26d9, 0x9fd2f0a2), 1619 PCMCIA_DEVICE_PROD_ID12("corega,K.K.", "Ethernet LAN Card", 0x9791a90e, 0x9fd2f0a2), 1620 + PCMCIA_DEVICE_PROD_ID12("corega K.K.", "(CG-LAPCCTXD)", 0x5261440f, 0x73ec0d88), 1621 PCMCIA_DEVICE_PROD_ID12("CouplerlessPCMCIA", "100BASE", 0xee5af0ad, 0x7c2add04), 1622 PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-010", 0x77008979, 0x9d8d445d), 1623 PCMCIA_DEVICE_PROD_ID12("CyQ've", "ELA-110E 10/100M LAN Card", 0x77008979, 0xfd184814), ··· 1667 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TX", 0x88fcdeda, 0x6d772737), 1668 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN100TE", 0x88fcdeda, 0x0e714bee), 1669 PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN20T", 0x88fcdeda, 0x81090922), 1670 + PCMCIA_DEVICE_PROD_ID12("Logitec", "LPM-LN10TE", 0x88fcdeda, 0xc1e2521c), 1671 PCMCIA_DEVICE_PROD_ID12("LONGSHINE", "PCMCIA Ethernet Card", 0xf866b0b0, 0x6f6652e0), 1672 PCMCIA_DEVICE_PROD_ID12("MACNICA", "ME1-JEIDA", 0x20841b68, 0xaf8a3578), 1673 PCMCIA_DEVICE_PROD_ID12("Macsense", "MPC-10", 0xd830297f, 0xd265c307),
+23 -15
drivers/net/qla3xxx.c
··· 208 return; 209 } 210 211 static void ql_write_page0_reg(struct ql3_adapter *qdev, 212 u32 __iomem *reg, u32 value) 213 { ··· 345 qdev->mem_map_registers; 346 347 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 348 - ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 349 ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 350 - ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 351 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); 352 } 353 ··· 364 qdev->mem_map_registers; 365 366 /* Clock in a zero, then do the start bit */ 367 - ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 368 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 369 AUBURN_EEPROM_DO_1); 370 - ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 371 ISP_NVRAM_MASK | qdev-> 372 eeprom_cmd_data | AUBURN_EEPROM_DO_1 | 373 AUBURN_EEPROM_CLK_RISE); 374 - ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 375 ISP_NVRAM_MASK | qdev-> 376 eeprom_cmd_data | AUBURN_EEPROM_DO_1 | 377 AUBURN_EEPROM_CLK_FALL); ··· 387 * If the bit changed, then change the DO state to 388 * match 389 */ 390 - ql_write_common_reg(qdev, 391 &port_regs->CommonRegs. 392 serialPortInterfaceReg, 393 ISP_NVRAM_MASK | qdev-> 394 eeprom_cmd_data | dataBit); 395 previousBit = dataBit; 396 } 397 - ql_write_common_reg(qdev, 398 &port_regs->CommonRegs. 399 serialPortInterfaceReg, 400 ISP_NVRAM_MASK | qdev-> 401 eeprom_cmd_data | dataBit | 402 AUBURN_EEPROM_CLK_RISE); 403 - ql_write_common_reg(qdev, 404 &port_regs->CommonRegs. 405 serialPortInterfaceReg, 406 ISP_NVRAM_MASK | qdev-> ··· 421 * If the bit changed, then change the DO state to 422 * match 423 */ 424 - ql_write_common_reg(qdev, 425 &port_regs->CommonRegs. 426 serialPortInterfaceReg, 427 ISP_NVRAM_MASK | qdev-> 428 eeprom_cmd_data | dataBit); 429 previousBit = dataBit; 430 } 431 - ql_write_common_reg(qdev, 432 &port_regs->CommonRegs. 433 serialPortInterfaceReg, 434 ISP_NVRAM_MASK | qdev-> 435 eeprom_cmd_data | dataBit | 436 AUBURN_EEPROM_CLK_RISE); 437 - ql_write_common_reg(qdev, 438 &port_regs->CommonRegs. 439 serialPortInterfaceReg, 440 ISP_NVRAM_MASK | qdev-> ··· 452 struct ql3xxx_port_registers __iomem *port_regs = 453 qdev->mem_map_registers; 454 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 455 - ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 456 ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 457 } 458 ··· 470 /* Read the data bits */ 471 /* The first bit is a dummy. Clock right over it. */ 472 for (i = 0; i < dataBits; i++) { 473 - ql_write_common_reg(qdev, 474 &port_regs->CommonRegs. 475 serialPortInterfaceReg, 476 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 477 AUBURN_EEPROM_CLK_RISE); 478 - ql_write_common_reg(qdev, 479 &port_regs->CommonRegs. 480 serialPortInterfaceReg, 481 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | ··· 3379 SET_MODULE_OWNER(ndev); 3380 SET_NETDEV_DEV(ndev, &pdev->dev); 3381 3382 - ndev->features = NETIF_F_LLTX; 3383 if (pci_using_dac) 3384 ndev->features |= NETIF_F_HIGHDMA; 3385
··· 208 return; 209 } 210 211 + static void ql_write_nvram_reg(struct ql3_adapter *qdev, 212 + u32 __iomem *reg, u32 value) 213 + { 214 + writel(value, reg); 215 + readl(reg); 216 + udelay(1); 217 + return; 218 + } 219 + 220 static void ql_write_page0_reg(struct ql3_adapter *qdev, 221 u32 __iomem *reg, u32 value) 222 { ··· 336 qdev->mem_map_registers; 337 338 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; 339 + ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 340 ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 341 + ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 342 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); 343 } 344 ··· 355 qdev->mem_map_registers; 356 357 /* Clock in a zero, then do the start bit */ 358 + ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 359 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 360 AUBURN_EEPROM_DO_1); 361 + ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 362 ISP_NVRAM_MASK | qdev-> 363 eeprom_cmd_data | AUBURN_EEPROM_DO_1 | 364 AUBURN_EEPROM_CLK_RISE); 365 + ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 366 ISP_NVRAM_MASK | qdev-> 367 eeprom_cmd_data | AUBURN_EEPROM_DO_1 | 368 AUBURN_EEPROM_CLK_FALL); ··· 378 * If the bit changed, then change the DO state to 379 * match 380 */ 381 + ql_write_nvram_reg(qdev, 382 &port_regs->CommonRegs. 383 serialPortInterfaceReg, 384 ISP_NVRAM_MASK | qdev-> 385 eeprom_cmd_data | dataBit); 386 previousBit = dataBit; 387 } 388 + ql_write_nvram_reg(qdev, 389 &port_regs->CommonRegs. 390 serialPortInterfaceReg, 391 ISP_NVRAM_MASK | qdev-> 392 eeprom_cmd_data | dataBit | 393 AUBURN_EEPROM_CLK_RISE); 394 + ql_write_nvram_reg(qdev, 395 &port_regs->CommonRegs. 396 serialPortInterfaceReg, 397 ISP_NVRAM_MASK | qdev-> ··· 412 * If the bit changed, then change the DO state to 413 * match 414 */ 415 + ql_write_nvram_reg(qdev, 416 &port_regs->CommonRegs. 417 serialPortInterfaceReg, 418 ISP_NVRAM_MASK | qdev-> 419 eeprom_cmd_data | dataBit); 420 previousBit = dataBit; 421 } 422 + ql_write_nvram_reg(qdev, 423 &port_regs->CommonRegs. 424 serialPortInterfaceReg, 425 ISP_NVRAM_MASK | qdev-> 426 eeprom_cmd_data | dataBit | 427 AUBURN_EEPROM_CLK_RISE); 428 + ql_write_nvram_reg(qdev, 429 &port_regs->CommonRegs. 430 serialPortInterfaceReg, 431 ISP_NVRAM_MASK | qdev-> ··· 443 struct ql3xxx_port_registers __iomem *port_regs = 444 qdev->mem_map_registers; 445 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0; 446 + ql_write_nvram_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg, 447 ISP_NVRAM_MASK | qdev->eeprom_cmd_data); 448 } 449 ··· 461 /* Read the data bits */ 462 /* The first bit is a dummy. Clock right over it. */ 463 for (i = 0; i < dataBits; i++) { 464 + ql_write_nvram_reg(qdev, 465 &port_regs->CommonRegs. 466 serialPortInterfaceReg, 467 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | 468 AUBURN_EEPROM_CLK_RISE); 469 + ql_write_nvram_reg(qdev, 470 &port_regs->CommonRegs. 471 serialPortInterfaceReg, 472 ISP_NVRAM_MASK | qdev->eeprom_cmd_data | ··· 3370 SET_MODULE_OWNER(ndev); 3371 SET_NETDEV_DEV(ndev, &pdev->dev); 3372 3373 if (pci_using_dac) 3374 ndev->features |= NETIF_F_HIGHDMA; 3375
+1 -1
drivers/net/wireless/ipw2100.c
··· 2664 break; 2665 } 2666 #endif 2667 - if (stats.len < sizeof(u->rx_data.header)) 2668 break; 2669 switch (WLAN_FC_GET_TYPE(u->rx_data.header.frame_ctl)) { 2670 case IEEE80211_FTYPE_MGMT:
··· 2664 break; 2665 } 2666 #endif 2667 + if (stats.len < sizeof(struct ieee80211_hdr_3addr)) 2668 break; 2669 switch (WLAN_FC_GET_TYPE(u->rx_data.header.frame_ctl)) { 2670 case IEEE80211_FTYPE_MGMT:
+1 -4
drivers/s390/net/Kconfig
··· 27 help 28 Select this option if you want to use inter-user communication 29 under VM or VIF. If unsure, say "Y" to enable a fast communication 30 - link between VM guests. At boot time the user ID of the guest needs 31 - to be passed to the kernel. Note that both kernels need to be 32 - compiled with this option and both need to be booted with the user ID 33 - of the other VM guest. 34 35 config NETIUCV 36 tristate "IUCV network device support (VM only)"
··· 27 help 28 Select this option if you want to use inter-user communication 29 under VM or VIF. If unsure, say "Y" to enable a fast communication 30 + link between VM guests. 31 32 config NETIUCV 33 tristate "IUCV network device support (VM only)"
+1 -1
drivers/s390/net/qeth.h
··· 710 int (*callback)(struct qeth_card *,struct qeth_reply *,unsigned long); 711 u32 seqno; 712 unsigned long offset; 713 - int received; 714 int rc; 715 void *param; 716 struct qeth_card *card;
··· 710 int (*callback)(struct qeth_card *,struct qeth_reply *,unsigned long); 711 u32 seqno; 712 unsigned long offset; 713 + atomic_t received; 714 int rc; 715 void *param; 716 struct qeth_card *card;
+93 -124
drivers/s390/net/qeth_main.c
··· 471 channel->state == CH_STATE_UP) 472 qeth_issue_next_read(card); 473 474 - tasklet_schedule(&channel->irq_tasklet); 475 return; 476 out: 477 wake_up(&card->wait_q); ··· 951 } 952 953 static int 954 - qeth_register_ip_addresses(void *ptr) 955 - { 956 - struct qeth_card *card; 957 - 958 - card = (struct qeth_card *) ptr; 959 - daemonize("qeth_reg_ip"); 960 - QETH_DBF_TEXT(trace,4,"regipth1"); 961 - if (!qeth_do_run_thread(card, QETH_SET_IP_THREAD)) 962 - return 0; 963 - QETH_DBF_TEXT(trace,4,"regipth2"); 964 - qeth_set_ip_addr_list(card); 965 - qeth_clear_thread_running_bit(card, QETH_SET_IP_THREAD); 966 - return 0; 967 - } 968 - 969 - /* 970 - * Drive the SET_PROMISC_MODE thread 971 - */ 972 - static int 973 - qeth_set_promisc_mode(void *ptr) 974 - { 975 - struct qeth_card *card = (struct qeth_card *) ptr; 976 - 977 - daemonize("qeth_setprm"); 978 - QETH_DBF_TEXT(trace,4,"setprm1"); 979 - if (!qeth_do_run_thread(card, QETH_SET_PROMISC_MODE_THREAD)) 980 - return 0; 981 - QETH_DBF_TEXT(trace,4,"setprm2"); 982 - qeth_setadp_promisc_mode(card); 983 - qeth_clear_thread_running_bit(card, QETH_SET_PROMISC_MODE_THREAD); 984 - return 0; 985 - } 986 - 987 - static int 988 qeth_recover(void *ptr) 989 { 990 struct qeth_card *card; ··· 1013 if (card->read.state != CH_STATE_UP && 1014 card->write.state != CH_STATE_UP) 1015 return; 1016 - 1017 - if (qeth_do_start_thread(card, QETH_SET_IP_THREAD)) 1018 - kernel_thread(qeth_register_ip_addresses, (void *)card,SIGCHLD); 1019 - if (qeth_do_start_thread(card, QETH_SET_PROMISC_MODE_THREAD)) 1020 - kernel_thread(qeth_set_promisc_mode, (void *)card, SIGCHLD); 1021 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) 1022 kernel_thread(qeth_recover, (void *) card, SIGCHLD); 1023 } ··· 1035 card->options.layer2 = 1; 1036 else 1037 card->options.layer2 = 0; 1038 - card->options.performance_stats = 1; 1039 } 1040 1041 /** ··· 1574 return -ENOMEM; 1575 } 1576 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); 1577 - wait_event(card->wait_q, 1578 - atomic_cmpxchg(&card->read.irq_pending, 0, 1) == 0); 1579 QETH_DBF_TEXT(trace, 6, "noirqpnd"); 1580 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, 1581 (addr_t) iob, 0, 0); ··· 1594 reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC); 1595 if (reply){ 1596 atomic_set(&reply->refcnt, 1); 1597 reply->card = card; 1598 }; 1599 return reply; ··· 1614 if (atomic_dec_and_test(&reply->refcnt)) 1615 kfree(reply); 1616 } 1617 - 1618 - static void 1619 - qeth_cmd_timeout(unsigned long data) 1620 - { 1621 - struct qeth_reply *reply, *list_reply, *r; 1622 - unsigned long flags; 1623 - 1624 - reply = (struct qeth_reply *) data; 1625 - spin_lock_irqsave(&reply->card->lock, flags); 1626 - list_for_each_entry_safe(list_reply, r, 1627 - &reply->card->cmd_waiter_list, list) { 1628 - if (reply == list_reply){ 1629 - qeth_get_reply(reply); 1630 - list_del_init(&reply->list); 1631 - spin_unlock_irqrestore(&reply->card->lock, flags); 1632 - reply->rc = -ETIME; 1633 - reply->received = 1; 1634 - wake_up(&reply->wait_q); 1635 - qeth_put_reply(reply); 1636 - return; 1637 - } 1638 - } 1639 - spin_unlock_irqrestore(&reply->card->lock, flags); 1640 - } 1641 - 1642 1643 static struct qeth_ipa_cmd * 1644 qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) ··· 1680 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { 1681 qeth_get_reply(reply); 1682 reply->rc = -EIO; 1683 - reply->received = 1; 1684 list_del_init(&reply->list); 1685 wake_up(&reply->wait_q); 1686 qeth_put_reply(reply); ··· 1749 &card->cmd_waiter_list); 1750 spin_unlock_irqrestore(&card->lock, flags); 1751 } else { 1752 - reply->received = 1; 1753 wake_up(&reply->wait_q); 1754 } 1755 qeth_put_reply(reply); ··· 1793 int rc; 1794 unsigned long flags; 1795 struct qeth_reply *reply = NULL; 1796 - struct timer_list timer; 1797 1798 QETH_DBF_TEXT(trace, 2, "sendctl"); 1799 ··· 1808 reply->seqno = QETH_IDX_COMMAND_SEQNO; 1809 else 1810 reply->seqno = card->seqno.ipa++; 1811 - init_timer(&timer); 1812 - timer.function = qeth_cmd_timeout; 1813 - timer.data = (unsigned long) reply; 1814 init_waitqueue_head(&reply->wait_q); 1815 spin_lock_irqsave(&card->lock, flags); 1816 list_add_tail(&reply->list, &card->cmd_waiter_list); 1817 spin_unlock_irqrestore(&card->lock, flags); 1818 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); 1819 - wait_event(card->wait_q, 1820 - atomic_cmpxchg(&card->write.irq_pending, 0, 1) == 0); 1821 qeth_prepare_control_data(card, len, iob); 1822 if (IS_IPA(iob->data)) 1823 - timer.expires = jiffies + QETH_IPA_TIMEOUT; 1824 else 1825 - timer.expires = jiffies + QETH_TIMEOUT; 1826 QETH_DBF_TEXT(trace, 6, "noirqpnd"); 1827 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); 1828 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, ··· 1840 wake_up(&card->wait_q); 1841 return rc; 1842 } 1843 - add_timer(&timer); 1844 - wait_event(reply->wait_q, reply->received); 1845 - del_timer_sync(&timer); 1846 rc = reply->rc; 1847 qeth_put_reply(reply); 1848 return rc; ··· 2407 qeth_rebuild_skb_fake_ll_eth(card, skb, hdr); 2408 } 2409 2410 - static inline __u16 2411 qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 2412 struct qeth_hdr *hdr) 2413 { 2414 - unsigned short vlan_id = 0; 2415 - #ifdef CONFIG_QETH_VLAN 2416 - struct vlan_hdr *vhdr; 2417 - #endif 2418 - 2419 skb->pkt_type = PACKET_HOST; 2420 skb->protocol = qeth_type_trans(skb, skb->dev); 2421 if (card->options.checksum_type == NO_CHECKSUMMING) 2422 skb->ip_summed = CHECKSUM_UNNECESSARY; 2423 else 2424 skb->ip_summed = CHECKSUM_NONE; 2425 - #ifdef CONFIG_QETH_VLAN 2426 - if (hdr->hdr.l2.flags[2] & (QETH_LAYER2_FLAG_VLAN)) { 2427 - vhdr = (struct vlan_hdr *) skb->data; 2428 - skb->protocol = 2429 - __constant_htons(vhdr->h_vlan_encapsulated_proto); 2430 - vlan_id = hdr->hdr.l2.vlan_id; 2431 - skb_pull(skb, VLAN_HLEN); 2432 - } 2433 - #endif 2434 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; 2435 - return vlan_id; 2436 } 2437 2438 static inline __u16 ··· 2486 int offset; 2487 int rxrc; 2488 __u16 vlan_tag = 0; 2489 - __u16 *vlan_addr; 2490 2491 /* get first element of current buffer */ 2492 element = (struct qdio_buffer_element *)&buf->buffer->element[0]; ··· 2496 &offset, &hdr))) { 2497 skb->dev = card->dev; 2498 if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) 2499 - vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr); 2500 else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3) 2501 vlan_tag = qeth_rebuild_skb(card, skb, hdr); 2502 else { /*in case of OSN*/ ··· 3893 qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, 3894 struct qeth_hdr **hdr, int ipv) 3895 { 3896 - struct sk_buff *new_skb; 3897 3898 QETH_DBF_TEXT(trace, 6, "prepskb"); 3899 - 3900 - new_skb = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr)); 3901 - if (new_skb == NULL) 3902 return NULL; 3903 *hdr = __qeth_prepare_skb(card, new_skb, ipv); 3904 if (*hdr == NULL) { 3905 __qeth_free_new_skb(skb, new_skb); ··· 4778 "(0x%x/%d)\n", 4779 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc), 4780 tmp, tmp); 4781 - copy_to_user(udata, qinfo.udata, 4); 4782 } else { 4783 - copy_to_user(udata, qinfo.udata, qinfo.udata_len); 4784 } 4785 kfree(qinfo.udata); 4786 return rc; ··· 4928 if (rc) 4929 PRINT_WARN("SNMP command failed on %s: (0x%x)\n", 4930 QETH_CARD_IFNAME(card), rc); 4931 - else 4932 - copy_to_user(udata, qinfo.udata, qinfo.udata_len); 4933 4934 kfree(ureq); 4935 kfree(qinfo.udata); ··· 5482 qeth_add_multicast_ipv6(card); 5483 #endif 5484 out: 5485 - if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 5486 - schedule_work(&card->kernel_thread_starter); 5487 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) 5488 return; 5489 - if (qeth_set_thread_start_bit(card, QETH_SET_PROMISC_MODE_THREAD)==0) 5490 - schedule_work(&card->kernel_thread_starter); 5491 } 5492 5493 static int ··· 6287 }; 6288 6289 static int 6290 qeth_netdev_init(struct net_device *dev) 6291 { 6292 struct qeth_card *card; ··· 6360 if (card->options.fake_ll && 6361 (qeth_get_netdev_flags(card) & IFF_NOARP)) 6362 dev->hard_header = qeth_fake_header; 6363 - dev->hard_header_parse = NULL; 6364 dev->set_mac_address = qeth_layer2_set_mac_address; 6365 dev->flags |= qeth_get_netdev_flags(card); 6366 if ((card->options.fake_broadcast) || ··· 8210 } 8211 if (!qeth_add_ip(card, ipaddr)) 8212 kfree(ipaddr); 8213 - if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 8214 - schedule_work(&card->kernel_thread_starter); 8215 return rc; 8216 } 8217 ··· 8238 return; 8239 if (!qeth_delete_ip(card, ipaddr)) 8240 kfree(ipaddr); 8241 - if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 8242 - schedule_work(&card->kernel_thread_starter); 8243 } 8244 8245 /* ··· 8281 } 8282 if (!qeth_add_ip(card, ipaddr)) 8283 kfree(ipaddr); 8284 - if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 8285 - schedule_work(&card->kernel_thread_starter); 8286 return 0; 8287 } 8288 ··· 8309 return; 8310 if (!qeth_delete_ip(card, ipaddr)) 8311 kfree(ipaddr); 8312 - if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 8313 - schedule_work(&card->kernel_thread_starter); 8314 } 8315 8316 /** ··· 8351 default: 8352 break; 8353 } 8354 - if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 8355 - schedule_work(&card->kernel_thread_starter); 8356 out: 8357 return NOTIFY_DONE; 8358 } ··· 8403 default: 8404 break; 8405 } 8406 - if (qeth_set_thread_start_bit(card, QETH_SET_IP_THREAD) == 0) 8407 - schedule_work(&card->kernel_thread_starter); 8408 out: 8409 return NOTIFY_DONE; 8410 }
··· 471 channel->state == CH_STATE_UP) 472 qeth_issue_next_read(card); 473 474 + qeth_irq_tasklet((unsigned long)channel); 475 return; 476 out: 477 wake_up(&card->wait_q); ··· 951 } 952 953 static int 954 qeth_recover(void *ptr) 955 { 956 struct qeth_card *card; ··· 1047 if (card->read.state != CH_STATE_UP && 1048 card->write.state != CH_STATE_UP) 1049 return; 1050 if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) 1051 kernel_thread(qeth_recover, (void *) card, SIGCHLD); 1052 } ··· 1074 card->options.layer2 = 1; 1075 else 1076 card->options.layer2 = 0; 1077 + card->options.performance_stats = 0; 1078 } 1079 1080 /** ··· 1613 return -ENOMEM; 1614 } 1615 qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); 1616 QETH_DBF_TEXT(trace, 6, "noirqpnd"); 1617 rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, 1618 (addr_t) iob, 0, 0); ··· 1635 reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC); 1636 if (reply){ 1637 atomic_set(&reply->refcnt, 1); 1638 + atomic_set(&reply->received, 0); 1639 reply->card = card; 1640 }; 1641 return reply; ··· 1654 if (atomic_dec_and_test(&reply->refcnt)) 1655 kfree(reply); 1656 } 1657 1658 static struct qeth_ipa_cmd * 1659 qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) ··· 1745 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { 1746 qeth_get_reply(reply); 1747 reply->rc = -EIO; 1748 + atomic_inc(&reply->received); 1749 list_del_init(&reply->list); 1750 wake_up(&reply->wait_q); 1751 qeth_put_reply(reply); ··· 1814 &card->cmd_waiter_list); 1815 spin_unlock_irqrestore(&card->lock, flags); 1816 } else { 1817 + atomic_inc(&reply->received); 1818 wake_up(&reply->wait_q); 1819 } 1820 qeth_put_reply(reply); ··· 1858 int rc; 1859 unsigned long flags; 1860 struct qeth_reply *reply = NULL; 1861 + unsigned long timeout; 1862 1863 QETH_DBF_TEXT(trace, 2, "sendctl"); 1864 ··· 1873 reply->seqno = QETH_IDX_COMMAND_SEQNO; 1874 else 1875 reply->seqno = card->seqno.ipa++; 1876 init_waitqueue_head(&reply->wait_q); 1877 spin_lock_irqsave(&card->lock, flags); 1878 list_add_tail(&reply->list, &card->cmd_waiter_list); 1879 spin_unlock_irqrestore(&card->lock, flags); 1880 QETH_DBF_HEX(control, 2, iob->data, QETH_DBF_CONTROL_LEN); 1881 + 1882 + while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; 1883 qeth_prepare_control_data(card, len, iob); 1884 + 1885 if (IS_IPA(iob->data)) 1886 + timeout = jiffies + QETH_IPA_TIMEOUT; 1887 else 1888 + timeout = jiffies + QETH_TIMEOUT; 1889 + 1890 QETH_DBF_TEXT(trace, 6, "noirqpnd"); 1891 spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); 1892 rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, ··· 1906 wake_up(&card->wait_q); 1907 return rc; 1908 } 1909 + while (!atomic_read(&reply->received)) { 1910 + if (time_after(jiffies, timeout)) { 1911 + spin_lock_irqsave(&reply->card->lock, flags); 1912 + list_del_init(&reply->list); 1913 + spin_unlock_irqrestore(&reply->card->lock, flags); 1914 + reply->rc = -ETIME; 1915 + atomic_inc(&reply->received); 1916 + wake_up(&reply->wait_q); 1917 + } 1918 + }; 1919 rc = reply->rc; 1920 qeth_put_reply(reply); 1921 return rc; ··· 2466 qeth_rebuild_skb_fake_ll_eth(card, skb, hdr); 2467 } 2468 2469 + static inline void 2470 qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, 2471 struct qeth_hdr *hdr) 2472 { 2473 skb->pkt_type = PACKET_HOST; 2474 skb->protocol = qeth_type_trans(skb, skb->dev); 2475 if (card->options.checksum_type == NO_CHECKSUMMING) 2476 skb->ip_summed = CHECKSUM_UNNECESSARY; 2477 else 2478 skb->ip_summed = CHECKSUM_NONE; 2479 *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; 2480 } 2481 2482 static inline __u16 ··· 2560 int offset; 2561 int rxrc; 2562 __u16 vlan_tag = 0; 2563 2564 /* get first element of current buffer */ 2565 element = (struct qdio_buffer_element *)&buf->buffer->element[0]; ··· 2571 &offset, &hdr))) { 2572 skb->dev = card->dev; 2573 if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) 2574 + qeth_layer2_rebuild_skb(card, skb, hdr); 2575 else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3) 2576 vlan_tag = qeth_rebuild_skb(card, skb, hdr); 2577 else { /*in case of OSN*/ ··· 3968 qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, 3969 struct qeth_hdr **hdr, int ipv) 3970 { 3971 + struct sk_buff *new_skb, *new_skb2; 3972 3973 QETH_DBF_TEXT(trace, 6, "prepskb"); 3974 + new_skb = skb; 3975 + new_skb = qeth_pskb_unshare(skb, GFP_ATOMIC); 3976 + if (!new_skb) 3977 return NULL; 3978 + new_skb2 = qeth_realloc_headroom(card, new_skb, 3979 + sizeof(struct qeth_hdr)); 3980 + if (!new_skb2) { 3981 + __qeth_free_new_skb(skb, new_skb); 3982 + return NULL; 3983 + } 3984 + if (new_skb != skb) 3985 + __qeth_free_new_skb(new_skb2, new_skb); 3986 + new_skb = new_skb2; 3987 *hdr = __qeth_prepare_skb(card, new_skb, ipv); 3988 if (*hdr == NULL) { 3989 __qeth_free_new_skb(skb, new_skb); ··· 4844 "(0x%x/%d)\n", 4845 QETH_CARD_IFNAME(card), qeth_arp_get_error_cause(&rc), 4846 tmp, tmp); 4847 + if (copy_to_user(udata, qinfo.udata, 4)) 4848 + rc = -EFAULT; 4849 } else { 4850 + if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 4851 + rc = -EFAULT; 4852 } 4853 kfree(qinfo.udata); 4854 return rc; ··· 4992 if (rc) 4993 PRINT_WARN("SNMP command failed on %s: (0x%x)\n", 4994 QETH_CARD_IFNAME(card), rc); 4995 + else { 4996 + if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) 4997 + rc = -EFAULT; 4998 + } 4999 5000 kfree(ureq); 5001 kfree(qinfo.udata); ··· 5544 qeth_add_multicast_ipv6(card); 5545 #endif 5546 out: 5547 + qeth_set_ip_addr_list(card); 5548 if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) 5549 return; 5550 + qeth_setadp_promisc_mode(card); 5551 } 5552 5553 static int ··· 6351 }; 6352 6353 static int 6354 + qeth_hard_header_parse(struct sk_buff *skb, unsigned char *haddr) 6355 + { 6356 + struct qeth_card *card; 6357 + struct ethhdr *eth; 6358 + 6359 + card = qeth_get_card_from_dev(skb->dev); 6360 + if (card->options.layer2) 6361 + goto haveheader; 6362 + #ifdef CONFIG_QETH_IPV6 6363 + /* cause of the manipulated arp constructor and the ARP 6364 + flag for OSAE devices we have some nasty exceptions */ 6365 + if (card->info.type == QETH_CARD_TYPE_OSAE) { 6366 + if (!card->options.fake_ll) { 6367 + if ((skb->pkt_type==PACKET_OUTGOING) && 6368 + (skb->protocol==ETH_P_IPV6)) 6369 + goto haveheader; 6370 + else 6371 + return 0; 6372 + } else { 6373 + if ((skb->pkt_type==PACKET_OUTGOING) && 6374 + (skb->protocol==ETH_P_IP)) 6375 + return 0; 6376 + else 6377 + goto haveheader; 6378 + } 6379 + } 6380 + #endif 6381 + if (!card->options.fake_ll) 6382 + return 0; 6383 + haveheader: 6384 + eth = eth_hdr(skb); 6385 + memcpy(haddr, eth->h_source, ETH_ALEN); 6386 + return ETH_ALEN; 6387 + } 6388 + 6389 + static int 6390 qeth_netdev_init(struct net_device *dev) 6391 { 6392 struct qeth_card *card; ··· 6388 if (card->options.fake_ll && 6389 (qeth_get_netdev_flags(card) & IFF_NOARP)) 6390 dev->hard_header = qeth_fake_header; 6391 + if (dev->type == ARPHRD_IEEE802_TR) 6392 + dev->hard_header_parse = NULL; 6393 + else 6394 + dev->hard_header_parse = qeth_hard_header_parse; 6395 dev->set_mac_address = qeth_layer2_set_mac_address; 6396 dev->flags |= qeth_get_netdev_flags(card); 6397 if ((card->options.fake_broadcast) || ··· 8235 } 8236 if (!qeth_add_ip(card, ipaddr)) 8237 kfree(ipaddr); 8238 + qeth_set_ip_addr_list(card); 8239 return rc; 8240 } 8241 ··· 8264 return; 8265 if (!qeth_delete_ip(card, ipaddr)) 8266 kfree(ipaddr); 8267 + qeth_set_ip_addr_list(card); 8268 } 8269 8270 /* ··· 8308 } 8309 if (!qeth_add_ip(card, ipaddr)) 8310 kfree(ipaddr); 8311 + qeth_set_ip_addr_list(card); 8312 return 0; 8313 } 8314 ··· 8337 return; 8338 if (!qeth_delete_ip(card, ipaddr)) 8339 kfree(ipaddr); 8340 + qeth_set_ip_addr_list(card); 8341 } 8342 8343 /** ··· 8380 default: 8381 break; 8382 } 8383 + qeth_set_ip_addr_list(card); 8384 out: 8385 return NOTIFY_DONE; 8386 } ··· 8433 default: 8434 break; 8435 } 8436 + qeth_set_ip_addr_list(card); 8437 out: 8438 return NOTIFY_DONE; 8439 }
+1 -1
include/net/ieee80211.h
··· 218 #define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE) 219 220 #define WLAN_GET_SEQ_FRAG(seq) ((seq) & IEEE80211_SCTL_FRAG) 221 - #define WLAN_GET_SEQ_SEQ(seq) ((seq) & IEEE80211_SCTL_SEQ) 222 223 /* Authentication algorithms */ 224 #define WLAN_AUTH_OPEN 0
··· 218 #define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE) 219 220 #define WLAN_GET_SEQ_FRAG(seq) ((seq) & IEEE80211_SCTL_FRAG) 221 + #define WLAN_GET_SEQ_SEQ(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) 222 223 /* Authentication algorithms */ 224 #define WLAN_AUTH_OPEN 0