Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6:
[netdrvr] ewrk3: correct card detection bug
cxgb3 - fix white spaces in drivers/net/Kconfig
myri10ge: update driver version to 1.3.0-1.226
myri10ge: fix management of >4kB allocated pages
myri10ge: update wcfifo and intr_coal_delay default values
myri10ge: Serverworks HT2100 provides aligned PCIe completion
mv643xx_eth: add mv643xx_eth_shutdown function
SAA9730: Fix large pile of warnings
Revert "ucc_geth: returns NETDEV_TX_BUSY when BD ring is full"
cxgb3 - T3B2 pcie config space
cxgb3 - Fix potential MAC hang
cxgb3 - Auto-load FW if mismatch detected
cxgb3 - fix ethtool cmd on multiple queues port
Fix return code in pci-skeleton.c
skge: use per-port phy locking
skge: mask irqs when device down
skge: deadlock on tx timeout
[PATCH] airo: Fix an error path memory leak
[PATCH] bcm43xx: MANUALWLAN fixes

+460 -197
+13 -12
drivers/net/Kconfig
··· 2372 2372 when the driver is receiving lots of packets from the card. 2373 2373 2374 2374 config CHELSIO_T3 2375 - tristate "Chelsio Communications T3 10Gb Ethernet support" 2376 - depends on PCI 2377 - help 2378 - This driver supports Chelsio T3-based gigabit and 10Gb Ethernet 2379 - adapters. 2375 + tristate "Chelsio Communications T3 10Gb Ethernet support" 2376 + depends on PCI 2377 + select FW_LOADER 2378 + help 2379 + This driver supports Chelsio T3-based gigabit and 10Gb Ethernet 2380 + adapters. 2380 2381 2381 - For general information about Chelsio and our products, visit 2382 - our website at <http://www.chelsio.com>. 2382 + For general information about Chelsio and our products, visit 2383 + our website at <http://www.chelsio.com>. 2383 2384 2384 - For customer support, please visit our customer support page at 2385 - <http://www.chelsio.com/support.htm>. 2385 + For customer support, please visit our customer support page at 2386 + <http://www.chelsio.com/support.htm>. 2386 2387 2387 - Please send feedback to <linux-bugs@chelsio.com>. 2388 + Please send feedback to <linux-bugs@chelsio.com>. 2388 2389 2389 - To compile this driver as a module, choose M here: the module 2390 - will be called cxgb3. 2390 + To compile this driver as a module, choose M here: the module 2391 + will be called cxgb3. 2391 2392 2392 2393 config EHEA 2393 2394 tristate "eHEA Ethernet support"
+15
drivers/net/cxgb3/common.h
··· 260 260 unsigned long serdes_signal_loss; 261 261 unsigned long xaui_pcs_ctc_err; 262 262 unsigned long xaui_pcs_align_change; 263 + 264 + unsigned long num_toggled; /* # times toggled TxEn due to stuck TX */ 265 + unsigned long num_resets; /* # times reset due to stuck TX */ 266 + 263 267 }; 264 268 265 269 struct tp_mib_stats { ··· 404 400 unsigned int rev; /* chip revision */ 405 401 }; 406 402 403 + enum { /* chip revisions */ 404 + T3_REV_A = 0, 405 + T3_REV_B = 2, 406 + T3_REV_B2 = 3, 407 + }; 408 + 407 409 struct trace_params { 408 410 u32 sip; 409 411 u32 sip_mask; ··· 475 465 struct adapter *adapter; 476 466 unsigned int offset; 477 467 unsigned int nucast; /* # of address filters for unicast MACs */ 468 + unsigned int tcnt; 469 + unsigned int xcnt; 470 + unsigned int toggle_cnt; 471 + unsigned int txen; 478 472 struct mac_stats stats; 479 473 }; 480 474 ··· 680 666 int t3_mac_set_num_ucast(struct cmac *mac, int n); 681 667 const struct mac_stats *t3_mac_update_stats(struct cmac *mac); 682 668 int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc); 669 + int t3b2_mac_watchdog_task(struct cmac *mac); 683 670 684 671 void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode); 685 672 int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
+81 -9
drivers/net/cxgb3/cxgb3_main.c
··· 42 42 #include <linux/workqueue.h> 43 43 #include <linux/proc_fs.h> 44 44 #include <linux/rtnetlink.h> 45 + #include <linux/firmware.h> 45 46 #include <asm/uaccess.h> 46 47 47 48 #include "common.h" ··· 708 707 } 709 708 } 710 709 710 + #define FW_FNAME "t3fw-%d.%d.bin" 711 + 712 + static int upgrade_fw(struct adapter *adap) 713 + { 714 + int ret; 715 + char buf[64]; 716 + const struct firmware *fw; 717 + struct device *dev = &adap->pdev->dev; 718 + 719 + snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR, 720 + FW_VERSION_MINOR); 721 + ret = request_firmware(&fw, buf, dev); 722 + if (ret < 0) { 723 + dev_err(dev, "could not upgrade firmware: unable to load %s\n", 724 + buf); 725 + return ret; 726 + } 727 + ret = t3_load_fw(adap, fw->data, fw->size); 728 + release_firmware(fw); 729 + return ret; 730 + } 731 + 711 732 /** 712 733 * cxgb_up - enable the adapter 713 734 * @adapter: adapter being enabled ··· 746 723 747 724 if (!(adap->flags & FULL_INIT_DONE)) { 748 725 err = t3_check_fw_version(adap); 726 + if (err == -EINVAL) 727 + err = upgrade_fw(adap); 749 728 if (err) 750 729 goto out; 751 730 ··· 1056 1031 "VLANinsertions ", 1057 1032 "TxCsumOffload ", 1058 1033 "RxCsumGood ", 1059 - "RxDrops " 1034 + "RxDrops ", 1035 + 1036 + "CheckTXEnToggled ", 1037 + "CheckResets ", 1038 + 1060 1039 }; 1061 1040 1062 1041 static int get_stats_count(struct net_device *dev) ··· 1174 1145 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM); 1175 1146 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD); 1176 1147 *data++ = s->rx_cong_drops; 1148 + 1149 + *data++ = s->num_toggled; 1150 + *data++ = s->num_resets; 1177 1151 } 1178 1152 1179 1153 static inline void reg_block_dump(struct adapter *ap, void *buf, ··· 1394 1362 1395 1363 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) 1396 1364 { 1397 - struct adapter *adapter = dev->priv; 1365 + const struct adapter *adapter = dev->priv; 1366 + const struct port_info *pi = netdev_priv(dev); 1367 + const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset]; 1398 1368 1399 1369 e->rx_max_pending = MAX_RX_BUFFERS; 1400 1370 e->rx_mini_max_pending = 0; 1401 1371 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS; 1402 1372 e->tx_max_pending = MAX_TXQ_ENTRIES; 1403 1373 1404 - e->rx_pending = adapter->params.sge.qset[0].fl_size; 1405 - e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size; 1406 - e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size; 1407 - e->tx_pending = adapter->params.sge.qset[0].txq_size[0]; 1374 + e->rx_pending = q->fl_size; 1375 + e->rx_mini_pending = q->rspq_size; 1376 + e->rx_jumbo_pending = q->jumbo_size; 1377 + e->tx_pending = q->txq_size[0]; 1408 1378 } 1409 1379 1410 1380 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) 1411 1381 { 1412 1382 int i; 1383 + struct qset_params *q; 1413 1384 struct adapter *adapter = dev->priv; 1385 + const struct port_info *pi = netdev_priv(dev); 1414 1386 1415 1387 if (e->rx_pending > MAX_RX_BUFFERS || 1416 1388 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS || ··· 1429 1393 if (adapter->flags & FULL_INIT_DONE) 1430 1394 return -EBUSY; 1431 1395 1432 - for (i = 0; i < SGE_QSETS; ++i) { 1433 - struct qset_params *q = &adapter->params.sge.qset[i]; 1434 - 1396 + q = &adapter->params.sge.qset[pi->first_qset]; 1397 + for (i = 0; i < pi->nqsets; ++i, ++q) { 1435 1398 q->rspq_size = e->rx_mini_pending; 1436 1399 q->fl_size = e->rx_pending; 1437 1400 q->jumbo_size = e->rx_jumbo_pending; ··· 2102 2067 } 2103 2068 } 2104 2069 2070 + static void check_t3b2_mac(struct adapter *adapter) 2071 + { 2072 + int i; 2073 + 2074 + rtnl_lock(); /* synchronize with ifdown */ 2075 + for_each_port(adapter, i) { 2076 + struct net_device *dev = adapter->port[i]; 2077 + struct port_info *p = netdev_priv(dev); 2078 + int status; 2079 + 2080 + if (!netif_running(dev)) 2081 + continue; 2082 + 2083 + status = 0; 2084 + if (netif_running(dev)) 2085 + status = t3b2_mac_watchdog_task(&p->mac); 2086 + if (status == 1) 2087 + p->mac.stats.num_toggled++; 2088 + else if (status == 2) { 2089 + struct cmac *mac = &p->mac; 2090 + 2091 + t3_mac_set_mtu(mac, dev->mtu); 2092 + t3_mac_set_address(mac, 0, dev->dev_addr); 2093 + cxgb_set_rxmode(dev); 2094 + t3_link_start(&p->phy, mac, &p->link_config); 2095 + t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); 2096 + t3_port_intr_enable(adapter, p->port_id); 2097 + p->mac.stats.num_resets++; 2098 + } 2099 + } 2100 + rtnl_unlock(); 2101 + } 2102 + 2103 + 2105 2104 static void t3_adap_check_task(struct work_struct *work) 2106 2105 { 2107 2106 struct adapter *adapter = container_of(work, struct adapter, ··· 2155 2086 mac_stats_update(adapter); 2156 2087 adapter->check_task_cnt = 0; 2157 2088 } 2089 + 2090 + if (p->rev == T3_REV_B2) 2091 + check_t3b2_mac(adapter); 2158 2092 2159 2093 /* Schedule the next check update if any port is active. */ 2160 2094 spin_lock(&adapter->work_lock);
+22
drivers/net/cxgb3/regs.h
··· 1206 1206 1207 1207 #define A_TP_RX_TRC_KEY0 0x120 1208 1208 1209 + #define A_TP_TX_DROP_CNT_CH0 0x12d 1210 + 1211 + #define S_TXDROPCNTCH0RCVD 0 1212 + #define M_TXDROPCNTCH0RCVD 0xffff 1213 + #define V_TXDROPCNTCH0RCVD(x) ((x) << S_TXDROPCNTCH0RCVD) 1214 + #define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & \ 1215 + M_TXDROPCNTCH0RCVD) 1216 + 1209 1217 #define A_ULPRX_CTL 0x500 1210 1218 1211 1219 #define S_ROUND_ROBIN 4 ··· 1842 1834 #define V_TXPAUSEEN(x) ((x) << S_TXPAUSEEN) 1843 1835 #define F_TXPAUSEEN V_TXPAUSEEN(1U) 1844 1836 1837 + #define A_XGM_TX_PAUSE_QUANTA 0x808 1838 + 1845 1839 #define A_XGM_RX_CTRL 0x80c 1846 1840 1847 1841 #define S_RXEN 0 ··· 1929 1919 #define F_DISERRFRAMES V_DISERRFRAMES(1U) 1930 1920 1931 1921 #define A_XGM_TXFIFO_CFG 0x888 1922 + 1923 + #define S_TXIPG 13 1924 + #define M_TXIPG 0xff 1925 + #define V_TXIPG(x) ((x) << S_TXIPG) 1926 + #define G_TXIPG(x) (((x) >> S_TXIPG) & M_TXIPG) 1932 1927 1933 1928 #define S_TXFIFOTHRESH 4 1934 1929 #define M_TXFIFOTHRESH 0x1ff ··· 2204 2189 #define F_CMULOCK V_CMULOCK(1U) 2205 2190 2206 2191 #define A_XGM_RX_MAX_PKT_SIZE_ERR_CNT 0x9a4 2192 + 2193 + #define A_XGM_TX_SPI4_SOP_EOP_CNT 0x9a8 2194 + 2195 + #define S_TXSPI4SOPCNT 16 2196 + #define M_TXSPI4SOPCNT 0xffff 2197 + #define V_TXSPI4SOPCNT(x) ((x) << S_TXSPI4SOPCNT) 2198 + #define G_TXSPI4SOPCNT(x) (((x) >> S_TXSPI4SOPCNT) & M_TXSPI4SOPCNT) 2207 2199 2208 2200 #define A_XGM_RX_SPI4_SOP_EOP_CNT 0x9ac 2209 2201
+9 -6
drivers/net/cxgb3/t3_hw.c
··· 681 681 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 682 682 683 683 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */ 684 - FW_VERS_ADDR = 0x77ffc /* flash address holding FW version */ 684 + FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */ 685 + FW_MIN_SIZE = 8 /* at least version and csum */ 685 686 }; 686 687 687 688 /** ··· 936 935 const u32 *p = (const u32 *)fw_data; 937 936 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16; 938 937 939 - if (size & 3) 938 + if ((size & 3) || size < FW_MIN_SIZE) 940 939 return -EINVAL; 941 940 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR) 942 941 return -EFBIG; ··· 3244 3243 } 3245 3244 3246 3245 /* 3247 - * Reset the adapter. PCIe cards lose their config space during reset, PCI-X 3246 + * Reset the adapter. 3247 + * Older PCIe cards lose their config space during reset, PCI-X 3248 3248 * ones don't. 3249 3249 */ 3250 3250 int t3_reset_adapter(struct adapter *adapter) 3251 3251 { 3252 - int i; 3252 + int i, save_and_restore_pcie = 3253 + adapter->params.rev < T3_REV_B2 && is_pcie(adapter); 3253 3254 uint16_t devid = 0; 3254 3255 3255 - if (is_pcie(adapter)) 3256 + if (save_and_restore_pcie) 3256 3257 pci_save_state(adapter->pdev); 3257 3258 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE); 3258 3259 ··· 3272 3269 if (devid != 0x1425) 3273 3270 return -1; 3274 3271 3275 - if (is_pcie(adapter)) 3272 + if (save_and_restore_pcie) 3276 3273 pci_restore_state(adapter->pdev); 3277 3274 return 0; 3278 3275 }
+118 -15
drivers/net/cxgb3/xgmac.c
··· 124 124 xaui_serdes_reset(mac); 125 125 } 126 126 127 - if (adap->params.rev > 0) 128 - t3_write_reg(adap, A_XGM_PAUSE_TIMER + oft, 0xf000); 129 - 130 127 val = F_MAC_RESET_; 131 128 if (is_10G(adap)) 132 129 val |= F_PCS_RESET_; ··· 139 142 } 140 143 141 144 memset(&mac->stats, 0, sizeof(mac->stats)); 145 + return 0; 146 + } 147 + 148 + int t3b2_mac_reset(struct cmac *mac) 149 + { 150 + struct adapter *adap = mac->adapter; 151 + unsigned int oft = mac->offset; 152 + u32 val; 153 + 154 + if (!macidx(mac)) 155 + t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0); 156 + else 157 + t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0); 158 + 159 + t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_); 160 + t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ 161 + 162 + msleep(10); 163 + 164 + /* Check for xgm Rx fifo empty */ 165 + if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft, 166 + 0x80000000, 1, 5, 2)) { 167 + CH_ERR(adap, "MAC %d Rx fifo drain failed\n", 168 + macidx(mac)); 169 + return -1; 170 + } 171 + 172 + t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0); 173 + t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ 174 + 175 + val = F_MAC_RESET_; 176 + if (is_10G(adap)) 177 + val |= F_PCS_RESET_; 178 + else if (uses_xaui(adap)) 179 + val |= F_PCS_RESET_ | F_XG2G_RESET_; 180 + else 181 + val |= F_RGMII_RESET_ | F_XG2G_RESET_; 182 + t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val); 183 + t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */ 184 + if ((val & F_PCS_RESET_) && adap->params.rev) { 185 + msleep(1); 186 + t3b_pcs_reset(mac); 187 + } 188 + t3_write_reg(adap, A_XGM_RX_CFG + oft, 189 + F_DISPAUSEFRAMES | F_EN1536BFRAMES | 190 + F_RMFCS | F_ENJUMBO | F_ENHASHMCAST); 191 + 192 + if (!macidx(mac)) 193 + t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE); 194 + else 195 + t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE); 196 + 142 197 return 0; 143 198 } 144 199 ··· 300 251 * Adjust the PAUSE frame watermarks. We always set the LWM, and the 301 252 * HWM only if flow-control is enabled. 302 253 */ 303 - hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, MAC_RXFIFO_SIZE / 2U); 304 - hwm = min(hwm, 3 * MAC_RXFIFO_SIZE / 4 + 1024); 305 - lwm = hwm - 1024; 254 + hwm = max_t(unsigned int, MAC_RXFIFO_SIZE - 3 * mtu, 255 + MAC_RXFIFO_SIZE * 38 / 100); 256 + hwm = min(hwm, MAC_RXFIFO_SIZE - 8192); 257 + lwm = min(3 * (int)mtu, MAC_RXFIFO_SIZE / 4); 258 + 306 259 v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset); 307 260 v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM); 308 261 v |= V_RXFIFOPAUSELWM(lwm / 8); ··· 321 270 thres = mtu > thres ? (mtu - thres + 7) / 8 : 0; 322 271 thres = max(thres, 8U); /* need at least 8 */ 323 272 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset, 324 - V_TXFIFOTHRESH(M_TXFIFOTHRESH), V_TXFIFOTHRESH(thres)); 273 + V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG), 274 + V_TXFIFOTHRESH(thres) | V_TXIPG(1)); 275 + 276 + if (adap->params.rev > 0) 277 + t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset, 278 + (hwm - lwm) * 4 / 8); 279 + t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset, 280 + MAC_RXFIFO_SIZE * 4 * 8 / 512); 281 + 325 282 return 0; 326 283 } 327 284 ··· 357 298 V_PORTSPEED(M_PORTSPEED), val); 358 299 } 359 300 360 - val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft); 361 - val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM); 362 - if (fc & PAUSE_TX) 363 - val |= V_RXFIFOPAUSEHWM(G_RXFIFOPAUSELWM(val) + 128); /* +1KB */ 364 - t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val); 365 - 366 301 t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, 367 302 (fc & PAUSE_RX) ? F_TXPAUSEEN : 0); 368 303 return 0; ··· 371 318 if (which & MAC_DIRECTION_TX) { 372 319 t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN); 373 320 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); 374 - t3_write_reg(adap, A_TP_PIO_DATA, 0xbf000001); 321 + t3_write_reg(adap, A_TP_PIO_DATA, 0xc0ede401); 375 322 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE); 376 323 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx); 324 + 325 + t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx); 326 + mac->tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap, 327 + A_TP_PIO_DATA))); 328 + mac->xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, 329 + A_XGM_TX_SPI4_SOP_EOP_CNT))); 330 + mac->txen = F_TXEN; 331 + mac->toggle_cnt = 0; 377 332 } 378 333 if (which & MAC_DIRECTION_RX) 379 334 t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN); ··· 398 337 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); 399 338 t3_write_reg(adap, A_TP_PIO_DATA, 0xc000001f); 400 339 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE); 401 - t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 0); 340 + t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx); 341 + mac->txen = 0; 402 342 } 403 343 if (which & MAC_DIRECTION_RX) 404 344 t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0); 405 345 return 0; 346 + } 347 + 348 + int t3b2_mac_watchdog_task(struct cmac *mac) 349 + { 350 + struct adapter *adap = mac->adapter; 351 + unsigned int tcnt, xcnt; 352 + int status; 353 + 354 + t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + macidx(mac)); 355 + tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap, A_TP_PIO_DATA))); 356 + xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, 357 + A_XGM_TX_SPI4_SOP_EOP_CNT + 358 + mac->offset))); 359 + 360 + if (tcnt != mac->tcnt && xcnt == 0 && mac->xcnt == 0) { 361 + if (mac->toggle_cnt > 4) { 362 + t3b2_mac_reset(mac); 363 + mac->toggle_cnt = 0; 364 + status = 2; 365 + } else { 366 + t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0); 367 + t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); 368 + t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 369 + mac->txen); 370 + t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); 371 + mac->toggle_cnt++; 372 + status = 1; 373 + } 374 + } else { 375 + mac->toggle_cnt = 0; 376 + status = 0; 377 + } 378 + mac->tcnt = tcnt; 379 + mac->xcnt = xcnt; 380 + 381 + return status; 406 382 } 407 383 408 384 /* ··· 472 374 473 375 RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES); 474 376 mac->stats.rx_too_long += RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT); 377 + 378 + v = RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT); 379 + if (mac->adapter->params.rev == T3_REV_B2) 380 + v &= 0x7fffffff; 381 + mac->stats.rx_too_long += v; 475 382 476 383 RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES); 477 384 RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
+1 -2
drivers/net/ewrk3.c
··· 414 414 icr &= 0x70; 415 415 outb(icr, EWRK3_ICR); /* Disable all the IRQs */ 416 416 417 - if (nicsr == (CSR_TXD | CSR_RXD)) 417 + if (nicsr != (CSR_TXD | CSR_RXD)) 418 418 return -ENXIO; 419 - 420 419 421 420 /* Check that the EEPROM is alive and well and not living on Pluto... */ 422 421 for (chksum = 0, i = 0; i < EEPROM_MAX; i += 2) {
+14
drivers/net/mv643xx_eth.c
··· 1516 1516 return 0; 1517 1517 } 1518 1518 1519 + static void mv643xx_eth_shutdown(struct platform_device *pdev) 1520 + { 1521 + struct net_device *dev = platform_get_drvdata(pdev); 1522 + struct mv643xx_private *mp = netdev_priv(dev); 1523 + unsigned int port_num = mp->port_num; 1524 + 1525 + /* Mask all interrupts on ethernet port */ 1526 + mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0); 1527 + mv_read (MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); 1528 + 1529 + eth_port_reset(port_num); 1530 + } 1531 + 1519 1532 static struct platform_driver mv643xx_eth_driver = { 1520 1533 .probe = mv643xx_eth_probe, 1521 1534 .remove = mv643xx_eth_remove, 1535 + .shutdown = mv643xx_eth_shutdown, 1522 1536 .driver = { 1523 1537 .name = MV643XX_ETH_NAME, 1524 1538 },
+19 -3
drivers/net/myri10ge/myri10ge.c
··· 71 71 #include "myri10ge_mcp.h" 72 72 #include "myri10ge_mcp_gen_header.h" 73 73 74 - #define MYRI10GE_VERSION_STR "1.2.0" 74 + #define MYRI10GE_VERSION_STR "1.3.0-1.226" 75 75 76 76 MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 77 77 MODULE_AUTHOR("Maintainer: help@myri.com"); ··· 234 234 module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR); 235 235 MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts\n"); 236 236 237 - static int myri10ge_intr_coal_delay = 25; 237 + static int myri10ge_intr_coal_delay = 75; 238 238 module_param(myri10ge_intr_coal_delay, int, S_IRUGO); 239 239 MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay\n"); 240 240 ··· 279 279 module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR); 280 280 MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed\n"); 281 281 282 - static int myri10ge_wcfifo = 1; 282 + static int myri10ge_wcfifo = 0; 283 283 module_param(myri10ge_wcfifo, int, S_IRUGO); 284 284 MODULE_PARM_DESC(myri10ge_wcfifo, "Enable WC Fifo when WC is enabled\n"); 285 285 ··· 905 905 (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE)) { 906 906 /* we can use part of previous page */ 907 907 get_page(rx->page); 908 + #if MYRI10GE_ALLOC_SIZE > 4096 909 + /* Firmware cannot cross 4K boundary.. */ 910 + if ((rx->page_offset >> 12) != 911 + ((rx->page_offset + bytes - 1) >> 12)) { 912 + rx->page_offset = 913 + (rx->page_offset + bytes) & ~4095; 914 + } 915 + #endif 908 916 } else { 909 917 /* we need a new page */ 910 918 page = ··· 2491 2483 2492 2484 #define PCI_DEVICE_ID_INTEL_E5000_PCIE23 0x25f7 2493 2485 #define PCI_DEVICE_ID_INTEL_E5000_PCIE47 0x25fa 2486 + #define PCI_DEVICE_ID_SERVERWORKS_HT2100_PCIE_FIRST 0x140 2487 + #define PCI_DEVICE_ID_SERVERWORKS_HT2100_PCIE_LAST 0x142 2494 2488 2495 2489 static void myri10ge_select_firmware(struct myri10ge_priv *mgp) 2496 2490 { ··· 2524 2514 ((bridge->vendor == PCI_VENDOR_ID_SERVERWORKS 2525 2515 && bridge->device == 2526 2516 PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE) 2517 + /* ServerWorks HT2100 */ 2518 + || (bridge->vendor == PCI_VENDOR_ID_SERVERWORKS 2519 + && bridge->device >= 2520 + PCI_DEVICE_ID_SERVERWORKS_HT2100_PCIE_FIRST 2521 + && bridge->device <= 2522 + PCI_DEVICE_ID_SERVERWORKS_HT2100_PCIE_LAST) 2527 2523 /* All Intel E5000 PCIE ports */ 2528 2524 || (bridge->vendor == PCI_VENDOR_ID_INTEL 2529 2525 && bridge->device >=
+2 -2
drivers/net/pci-skeleton.c
··· 710 710 tp->chipset, 711 711 rtl_chip_info[tp->chipset].name); 712 712 713 - i = register_netdev (dev); 714 - if (i) 713 + rc = register_netdev (dev); 714 + if (rc) 715 715 goto err_out_unmap; 716 716 717 717 DPRINTK ("EXIT, returning 0\n");
+88 -89
drivers/net/saa9730.c
··· 64 64 65 65 static void evm_saa9730_enable_lan_int(struct lan_saa9730_private *lp) 66 66 { 67 - outl(readl(&lp->evm_saa9730_regs->InterruptBlock1) | EVM_LAN_INT, 68 - &lp->evm_saa9730_regs->InterruptBlock1); 69 - outl(readl(&lp->evm_saa9730_regs->InterruptStatus1) | EVM_LAN_INT, 70 - &lp->evm_saa9730_regs->InterruptStatus1); 71 - outl(readl(&lp->evm_saa9730_regs->InterruptEnable1) | EVM_LAN_INT | 72 - EVM_MASTER_EN, &lp->evm_saa9730_regs->InterruptEnable1); 67 + writel(readl(&lp->evm_saa9730_regs->InterruptBlock1) | EVM_LAN_INT, 68 + &lp->evm_saa9730_regs->InterruptBlock1); 69 + writel(readl(&lp->evm_saa9730_regs->InterruptStatus1) | EVM_LAN_INT, 70 + &lp->evm_saa9730_regs->InterruptStatus1); 71 + writel(readl(&lp->evm_saa9730_regs->InterruptEnable1) | EVM_LAN_INT | 72 + EVM_MASTER_EN, &lp->evm_saa9730_regs->InterruptEnable1); 73 73 } 74 74 75 75 static void evm_saa9730_disable_lan_int(struct lan_saa9730_private *lp) 76 76 { 77 - outl(readl(&lp->evm_saa9730_regs->InterruptBlock1) & ~EVM_LAN_INT, 78 - &lp->evm_saa9730_regs->InterruptBlock1); 79 - outl(readl(&lp->evm_saa9730_regs->InterruptEnable1) & ~EVM_LAN_INT, 80 - &lp->evm_saa9730_regs->InterruptEnable1); 77 + writel(readl(&lp->evm_saa9730_regs->InterruptBlock1) & ~EVM_LAN_INT, 78 + &lp->evm_saa9730_regs->InterruptBlock1); 79 + writel(readl(&lp->evm_saa9730_regs->InterruptEnable1) & ~EVM_LAN_INT, 80 + &lp->evm_saa9730_regs->InterruptEnable1); 81 81 } 82 82 83 83 static void evm_saa9730_clear_lan_int(struct lan_saa9730_private *lp) 84 84 { 85 - outl(EVM_LAN_INT, &lp->evm_saa9730_regs->InterruptStatus1); 85 + writel(EVM_LAN_INT, &lp->evm_saa9730_regs->InterruptStatus1); 86 86 } 87 87 88 88 static void evm_saa9730_block_lan_int(struct lan_saa9730_private *lp) 89 89 { 90 - outl(readl(&lp->evm_saa9730_regs->InterruptBlock1) & ~EVM_LAN_INT, 91 - &lp->evm_saa9730_regs->InterruptBlock1); 90 + writel(readl(&lp->evm_saa9730_regs->InterruptBlock1) & ~EVM_LAN_INT, 91 + &lp->evm_saa9730_regs->InterruptBlock1); 92 92 } 93 93 94 94 static void evm_saa9730_unblock_lan_int(struct lan_saa9730_private *lp) 95 95 { 96 - outl(readl(&lp->evm_saa9730_regs->InterruptBlock1) | EVM_LAN_INT, 97 - &lp->evm_saa9730_regs->InterruptBlock1); 96 + writel(readl(&lp->evm_saa9730_regs->InterruptBlock1) | EVM_LAN_INT, 97 + &lp->evm_saa9730_regs->InterruptBlock1); 98 98 } 99 99 100 100 static void __attribute_used__ show_saa9730_regs(struct lan_saa9730_private *lp) ··· 147 147 printk("lp->lan_saa9730_regs->RxStatus = %x\n", 148 148 readl(&lp->lan_saa9730_regs->RxStatus)); 149 149 for (i = 0; i < LAN_SAA9730_CAM_DWORDS; i++) { 150 - outl(i, &lp->lan_saa9730_regs->CamAddress); 150 + writel(i, &lp->lan_saa9730_regs->CamAddress); 151 151 printk("lp->lan_saa9730_regs->CamData = %x\n", 152 152 readl(&lp->lan_saa9730_regs->CamData)); 153 153 } ··· 288 288 * Set rx buffer A and rx buffer B to point to the first two buffer 289 289 * spaces. 290 290 */ 291 - outl(lp->dma_addr + rxoffset, 292 - &lp->lan_saa9730_regs->RxBuffA); 293 - outl(lp->dma_addr + rxoffset + 294 - LAN_SAA9730_PACKET_SIZE * LAN_SAA9730_RCV_Q_SIZE, 295 - &lp->lan_saa9730_regs->RxBuffB); 291 + writel(lp->dma_addr + rxoffset, &lp->lan_saa9730_regs->RxBuffA); 292 + writel(lp->dma_addr + rxoffset + 293 + LAN_SAA9730_PACKET_SIZE * LAN_SAA9730_RCV_Q_SIZE, 294 + &lp->lan_saa9730_regs->RxBuffB); 296 295 297 296 /* 298 297 * Set txm_buf_a and txm_buf_b to point to the first two buffer 299 298 * space 300 299 */ 301 - outl(lp->dma_addr + txoffset, 302 - &lp->lan_saa9730_regs->TxBuffA); 303 - outl(lp->dma_addr + txoffset + 304 - LAN_SAA9730_PACKET_SIZE * LAN_SAA9730_TXM_Q_SIZE, 305 - &lp->lan_saa9730_regs->TxBuffB); 300 + writel(lp->dma_addr + txoffset, 301 + &lp->lan_saa9730_regs->TxBuffA); 302 + writel(lp->dma_addr + txoffset + 303 + LAN_SAA9730_PACKET_SIZE * LAN_SAA9730_TXM_Q_SIZE, 304 + &lp->lan_saa9730_regs->TxBuffB); 306 305 307 306 /* Set packet number */ 308 - outl((lp->DmaRcvPackets << PK_COUNT_RX_A_SHF) | 309 - (lp->DmaRcvPackets << PK_COUNT_RX_B_SHF) | 310 - (lp->DmaTxmPackets << PK_COUNT_TX_A_SHF) | 311 - (lp->DmaTxmPackets << PK_COUNT_TX_B_SHF), 312 - &lp->lan_saa9730_regs->PacketCount); 307 + writel((lp->DmaRcvPackets << PK_COUNT_RX_A_SHF) | 308 + (lp->DmaRcvPackets << PK_COUNT_RX_B_SHF) | 309 + (lp->DmaTxmPackets << PK_COUNT_TX_A_SHF) | 310 + (lp->DmaTxmPackets << PK_COUNT_TX_B_SHF), 311 + &lp->lan_saa9730_regs->PacketCount); 313 312 314 313 return 0; 315 314 ··· 325 326 326 327 for (i = 0; i < LAN_SAA9730_CAM_DWORDS; i++) { 327 328 /* First set address to where data is written */ 328 - outl(i, &lp->lan_saa9730_regs->CamAddress); 329 - outl((NetworkAddress[0] << 24) | (NetworkAddress[1] << 16) 330 - | (NetworkAddress[2] << 8) | NetworkAddress[3], 331 - &lp->lan_saa9730_regs->CamData); 329 + writel(i, &lp->lan_saa9730_regs->CamAddress); 330 + writel((NetworkAddress[0] << 24) | (NetworkAddress[1] << 16) | 331 + (NetworkAddress[2] << 8) | NetworkAddress[3], 332 + &lp->lan_saa9730_regs->CamData); 332 333 NetworkAddress += 4; 333 334 } 334 335 return 0; ··· 364 365 } 365 366 366 367 /* Now set the control and address register. */ 367 - outl(MD_CA_BUSY | PHY_STATUS | PHY_ADDRESS << MD_CA_PHY_SHF, 368 - &lp->lan_saa9730_regs->StationMgmtCtl); 368 + writel(MD_CA_BUSY | PHY_STATUS | PHY_ADDRESS << MD_CA_PHY_SHF, 369 + &lp->lan_saa9730_regs->StationMgmtCtl); 369 370 370 371 /* check link status, spin here till station is not busy */ 371 372 i = 0; ··· 390 391 /* Link is down, reset the PHY first. */ 391 392 392 393 /* set PHY address = 'CONTROL' */ 393 - outl(PHY_ADDRESS << MD_CA_PHY_SHF | MD_CA_WR | PHY_CONTROL, 394 - &lp->lan_saa9730_regs->StationMgmtCtl); 394 + writel(PHY_ADDRESS << MD_CA_PHY_SHF | MD_CA_WR | PHY_CONTROL, 395 + &lp->lan_saa9730_regs->StationMgmtCtl); 395 396 396 397 /* Wait for 1 ms. */ 397 398 mdelay(1); 398 399 399 400 /* set 'CONTROL' = force reset and renegotiate */ 400 - outl(PHY_CONTROL_RESET | PHY_CONTROL_AUTO_NEG | 401 - PHY_CONTROL_RESTART_AUTO_NEG, 402 - &lp->lan_saa9730_regs->StationMgmtData); 401 + writel(PHY_CONTROL_RESET | PHY_CONTROL_AUTO_NEG | 402 + PHY_CONTROL_RESTART_AUTO_NEG, 403 + &lp->lan_saa9730_regs->StationMgmtData); 403 404 404 405 /* Wait for 50 ms. */ 405 406 mdelay(50); 406 407 407 408 /* set 'BUSY' to start operation */ 408 - outl(MD_CA_BUSY | PHY_ADDRESS << MD_CA_PHY_SHF | MD_CA_WR | 409 - PHY_CONTROL, &lp->lan_saa9730_regs->StationMgmtCtl); 409 + writel(MD_CA_BUSY | PHY_ADDRESS << MD_CA_PHY_SHF | MD_CA_WR | 410 + PHY_CONTROL, &lp->lan_saa9730_regs->StationMgmtCtl); 410 411 411 412 /* await completion */ 412 413 i = 0; ··· 426 427 427 428 for (l = 0; l < 2; l++) { 428 429 /* set PHY address = 'STATUS' */ 429 - outl(MD_CA_BUSY | PHY_ADDRESS << MD_CA_PHY_SHF | 430 - PHY_STATUS, 431 - &lp->lan_saa9730_regs->StationMgmtCtl); 430 + writel(MD_CA_BUSY | PHY_ADDRESS << MD_CA_PHY_SHF | 431 + PHY_STATUS, 432 + &lp->lan_saa9730_regs->StationMgmtCtl); 432 433 433 434 /* await completion */ 434 435 i = 0; ··· 461 462 static int lan_saa9730_control_init(struct lan_saa9730_private *lp) 462 463 { 463 464 /* Initialize DMA control register. */ 464 - outl((LANMB_ANY << DMA_CTL_MAX_XFER_SHF) | 465 - (LANEND_LITTLE << DMA_CTL_ENDIAN_SHF) | 466 - (LAN_SAA9730_RCV_Q_INT_THRESHOLD << DMA_CTL_RX_INT_COUNT_SHF) 467 - | DMA_CTL_RX_INT_TO_EN | DMA_CTL_RX_INT_EN | 468 - DMA_CTL_MAC_RX_INT_EN | DMA_CTL_MAC_TX_INT_EN, 469 - &lp->lan_saa9730_regs->LanDmaCtl); 465 + writel((LANMB_ANY << DMA_CTL_MAX_XFER_SHF) | 466 + (LANEND_LITTLE << DMA_CTL_ENDIAN_SHF) | 467 + (LAN_SAA9730_RCV_Q_INT_THRESHOLD << DMA_CTL_RX_INT_COUNT_SHF) 468 + | DMA_CTL_RX_INT_TO_EN | DMA_CTL_RX_INT_EN | 469 + DMA_CTL_MAC_RX_INT_EN | DMA_CTL_MAC_TX_INT_EN, 470 + &lp->lan_saa9730_regs->LanDmaCtl); 470 471 471 472 /* Initial MAC control register. */ 472 - outl((MACCM_MII << MAC_CONTROL_CONN_SHF) | MAC_CONTROL_FULL_DUP, 473 - &lp->lan_saa9730_regs->MacCtl); 473 + writel((MACCM_MII << MAC_CONTROL_CONN_SHF) | MAC_CONTROL_FULL_DUP, 474 + &lp->lan_saa9730_regs->MacCtl); 474 475 475 476 /* Initialize CAM control register. */ 476 - outl(CAM_CONTROL_COMP_EN | CAM_CONTROL_BROAD_ACC, 477 - &lp->lan_saa9730_regs->CamCtl); 477 + writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_BROAD_ACC, 478 + &lp->lan_saa9730_regs->CamCtl); 478 479 479 480 /* 480 481 * Initialize CAM enable register, only turn on first entry, should 481 482 * contain own addr. 482 483 */ 483 - outl(0x0001, &lp->lan_saa9730_regs->CamEnable); 484 + writel(0x0001, &lp->lan_saa9730_regs->CamEnable); 484 485 485 486 /* Initialize Tx control register */ 486 - outl(TX_CTL_EN_COMP, &lp->lan_saa9730_regs->TxCtl); 487 + writel(TX_CTL_EN_COMP, &lp->lan_saa9730_regs->TxCtl); 487 488 488 489 /* Initialize Rcv control register */ 489 - outl(RX_CTL_STRIP_CRC, &lp->lan_saa9730_regs->RxCtl); 490 + writel(RX_CTL_STRIP_CRC, &lp->lan_saa9730_regs->RxCtl); 490 491 491 492 /* Reset DMA engine */ 492 - outl(DMA_TEST_SW_RESET, &lp->lan_saa9730_regs->DmaTest); 493 + writel(DMA_TEST_SW_RESET, &lp->lan_saa9730_regs->DmaTest); 493 494 494 495 return 0; 495 496 } ··· 499 500 int i; 500 501 501 502 /* Stop DMA first */ 502 - outl(readl(&lp->lan_saa9730_regs->LanDmaCtl) & 503 - ~(DMA_CTL_EN_TX_DMA | DMA_CTL_EN_RX_DMA), 504 - &lp->lan_saa9730_regs->LanDmaCtl); 503 + writel(readl(&lp->lan_saa9730_regs->LanDmaCtl) & 504 + ~(DMA_CTL_EN_TX_DMA | DMA_CTL_EN_RX_DMA), 505 + &lp->lan_saa9730_regs->LanDmaCtl); 505 506 506 507 /* Set the SW Reset bits in DMA and MAC control registers */ 507 - outl(DMA_TEST_SW_RESET, &lp->lan_saa9730_regs->DmaTest); 508 - outl(readl(&lp->lan_saa9730_regs->MacCtl) | MAC_CONTROL_RESET, 509 - &lp->lan_saa9730_regs->MacCtl); 508 + writel(DMA_TEST_SW_RESET, &lp->lan_saa9730_regs->DmaTest); 509 + writel(readl(&lp->lan_saa9730_regs->MacCtl) | MAC_CONTROL_RESET, 510 + &lp->lan_saa9730_regs->MacCtl); 510 511 511 512 /* 512 513 * Wait for MAC reset to have finished. The reset bit is auto cleared ··· 531 532 /* Stop lan controller. */ 532 533 lan_saa9730_stop(lp); 533 534 534 - outl(LAN_SAA9730_DEFAULT_TIME_OUT_CNT, 535 - &lp->lan_saa9730_regs->Timeout); 535 + writel(LAN_SAA9730_DEFAULT_TIME_OUT_CNT, 536 + &lp->lan_saa9730_regs->Timeout); 536 537 537 538 return 0; 538 539 } ··· 551 552 lp->PendingTxmPacketIndex = 0; 552 553 lp->PendingTxmBufferIndex = 0; 553 554 554 - outl(readl(&lp->lan_saa9730_regs->LanDmaCtl) | DMA_CTL_EN_TX_DMA | 555 - DMA_CTL_EN_RX_DMA, &lp->lan_saa9730_regs->LanDmaCtl); 555 + writel(readl(&lp->lan_saa9730_regs->LanDmaCtl) | DMA_CTL_EN_TX_DMA | 556 + DMA_CTL_EN_RX_DMA, &lp->lan_saa9730_regs->LanDmaCtl); 556 557 557 558 /* For Tx, turn on MAC then DMA */ 558 - outl(readl(&lp->lan_saa9730_regs->TxCtl) | TX_CTL_TX_EN, 559 - &lp->lan_saa9730_regs->TxCtl); 559 + writel(readl(&lp->lan_saa9730_regs->TxCtl) | TX_CTL_TX_EN, 560 + &lp->lan_saa9730_regs->TxCtl); 560 561 561 562 /* For Rx, turn on DMA then MAC */ 562 - outl(readl(&lp->lan_saa9730_regs->RxCtl) | RX_CTL_RX_EN, 563 - &lp->lan_saa9730_regs->RxCtl); 563 + writel(readl(&lp->lan_saa9730_regs->RxCtl) | RX_CTL_RX_EN, 564 + &lp->lan_saa9730_regs->RxCtl); 564 565 565 566 /* Set Ok2Use to let hardware own the buffers. */ 566 - outl(OK2USE_RX_A | OK2USE_RX_B, &lp->lan_saa9730_regs->Ok2Use); 567 + writel(OK2USE_RX_A | OK2USE_RX_B, &lp->lan_saa9730_regs->Ok2Use); 567 568 568 569 return 0; 569 570 } ··· 586 587 printk("lan_saa9730_tx interrupt\n"); 587 588 588 589 /* Clear interrupt. */ 589 - outl(DMA_STATUS_MAC_TX_INT, &lp->lan_saa9730_regs->DmaStatus); 590 + writel(DMA_STATUS_MAC_TX_INT, &lp->lan_saa9730_regs->DmaStatus); 590 591 591 592 while (1) { 592 593 pPacket = lp->TxmBuffer[lp->PendingTxmBufferIndex] ··· 659 660 printk("lan_saa9730_rx interrupt\n"); 660 661 661 662 /* Clear receive interrupts. */ 662 - outl(DMA_STATUS_MAC_RX_INT | DMA_STATUS_RX_INT | 663 - DMA_STATUS_RX_TO_INT, &lp->lan_saa9730_regs->DmaStatus); 663 + writel(DMA_STATUS_MAC_RX_INT | DMA_STATUS_RX_INT | 664 + DMA_STATUS_RX_TO_INT, &lp->lan_saa9730_regs->DmaStatus); 664 665 665 666 /* Address next packet */ 666 667 BufferIndex = lp->NextRcvBufferIndex; ··· 724 725 *pPacket = cpu_to_le32(RXSF_READY << RX_STAT_CTL_OWNER_SHF); 725 726 726 727 /* Make sure A or B is available to hardware as appropriate. */ 727 - outl(BufferIndex ? OK2USE_RX_B : OK2USE_RX_A, 728 - &lp->lan_saa9730_regs->Ok2Use); 728 + writel(BufferIndex ? OK2USE_RX_B : OK2USE_RX_A, 729 + &lp->lan_saa9730_regs->Ok2Use); 729 730 730 731 /* Go to next packet in sequence. */ 731 732 lp->NextRcvPacketIndex++; ··· 843 844 (len << TX_STAT_CTL_LENGTH_SHF)); 844 845 845 846 /* Make sure A or B is available to hardware as appropriate. */ 846 - outl(BufferIndex ? OK2USE_TX_B : OK2USE_TX_A, 847 - &lp->lan_saa9730_regs->Ok2Use); 847 + writel(BufferIndex ? OK2USE_TX_B : OK2USE_TX_A, 848 + &lp->lan_saa9730_regs->Ok2Use); 848 849 849 850 return 0; 850 851 } ··· 937 938 938 939 if (dev->flags & IFF_PROMISC) { 939 940 /* accept all packets */ 940 - outl(CAM_CONTROL_COMP_EN | CAM_CONTROL_STATION_ACC | 941 - CAM_CONTROL_GROUP_ACC | CAM_CONTROL_BROAD_ACC, 942 - &lp->lan_saa9730_regs->CamCtl); 941 + writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_STATION_ACC | 942 + CAM_CONTROL_GROUP_ACC | CAM_CONTROL_BROAD_ACC, 943 + &lp->lan_saa9730_regs->CamCtl); 943 944 } else { 944 945 if (dev->flags & IFF_ALLMULTI) { 945 946 /* accept all multicast packets */ 946 - outl(CAM_CONTROL_COMP_EN | CAM_CONTROL_GROUP_ACC | 947 - CAM_CONTROL_BROAD_ACC, 948 - &lp->lan_saa9730_regs->CamCtl); 947 + writel(CAM_CONTROL_COMP_EN | CAM_CONTROL_GROUP_ACC | 948 + CAM_CONTROL_BROAD_ACC, 949 + &lp->lan_saa9730_regs->CamCtl); 949 950 } else { 950 951 /* 951 952 * Will handle the multicast stuff later. -carstenl
+64 -46
drivers/net/skge.c
··· 105 105 static const int rxqaddr[] = { Q_R1, Q_R2 }; 106 106 static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; 107 107 static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 108 - static const u32 irqmask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F }; 108 + static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F }; 109 + static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 }; 109 110 110 111 static int skge_get_regs_len(struct net_device *dev) 111 112 { ··· 672 671 struct skge_hw *hw = skge->hw; 673 672 int port = skge->port; 674 673 675 - mutex_lock(&hw->phy_mutex); 674 + spin_lock_bh(&hw->phy_lock); 676 675 if (hw->chip_id == CHIP_ID_GENESIS) { 677 676 switch (mode) { 678 677 case LED_MODE_OFF: ··· 743 742 PHY_M_LED_MO_RX(MO_LED_ON)); 744 743 } 745 744 } 746 - mutex_unlock(&hw->phy_mutex); 745 + spin_unlock_bh(&hw->phy_lock); 747 746 } 748 747 749 748 /* blink LED's for finding board */ ··· 1317 1316 xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl); 1318 1317 1319 1318 /* Poll PHY for status changes */ 1320 - schedule_delayed_work(&skge->link_thread, LINK_HZ); 1319 + mod_timer(&skge->link_timer, jiffies + LINK_HZ); 1321 1320 } 1322 1321 1323 1322 static void xm_check_link(struct net_device *dev) ··· 1392 1391 * Since internal PHY is wired to a level triggered pin, can't 1393 1392 * get an interrupt when carrier is detected. 1394 1393 */ 1395 - static void xm_link_timer(struct work_struct *work) 1394 + static void xm_link_timer(unsigned long arg) 1396 1395 { 1397 - struct skge_port *skge = 1398 - container_of(work, struct skge_port, link_thread.work); 1396 + struct skge_port *skge = (struct skge_port *) arg; 1399 1397 struct net_device *dev = skge->netdev; 1400 1398 struct skge_hw *hw = skge->hw; 1401 1399 int port = skge->port; ··· 1414 1414 goto nochange; 1415 1415 } 1416 1416 1417 - mutex_lock(&hw->phy_mutex); 1417 + spin_lock(&hw->phy_lock); 1418 1418 xm_check_link(dev); 1419 - mutex_unlock(&hw->phy_mutex); 1419 + spin_unlock(&hw->phy_lock); 1420 1420 1421 1421 nochange: 1422 1422 if (netif_running(dev)) 1423 - schedule_delayed_work(&skge->link_thread, LINK_HZ); 1423 + mod_timer(&skge->link_timer, jiffies + LINK_HZ); 1424 1424 } 1425 1425 1426 1426 static void genesis_mac_init(struct skge_hw *hw, int port) ··· 2323 2323 netif_stop_queue(skge->netdev); 2324 2324 netif_carrier_off(skge->netdev); 2325 2325 2326 - mutex_lock(&hw->phy_mutex); 2326 + spin_lock_bh(&hw->phy_lock); 2327 2327 if (hw->chip_id == CHIP_ID_GENESIS) { 2328 2328 genesis_reset(hw, port); 2329 2329 genesis_mac_init(hw, port); ··· 2331 2331 yukon_reset(hw, port); 2332 2332 yukon_init(hw, port); 2333 2333 } 2334 - mutex_unlock(&hw->phy_mutex); 2334 + spin_unlock_bh(&hw->phy_lock); 2335 2335 2336 2336 dev->set_multicast_list(dev); 2337 2337 } ··· 2354 2354 /* fallthru */ 2355 2355 case SIOCGMIIREG: { 2356 2356 u16 val = 0; 2357 - mutex_lock(&hw->phy_mutex); 2357 + spin_lock_bh(&hw->phy_lock); 2358 2358 if (hw->chip_id == CHIP_ID_GENESIS) 2359 2359 err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); 2360 2360 else 2361 2361 err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); 2362 - mutex_unlock(&hw->phy_mutex); 2362 + spin_unlock_bh(&hw->phy_lock); 2363 2363 data->val_out = val; 2364 2364 break; 2365 2365 } ··· 2368 2368 if (!capable(CAP_NET_ADMIN)) 2369 2369 return -EPERM; 2370 2370 2371 - mutex_lock(&hw->phy_mutex); 2371 + spin_lock_bh(&hw->phy_lock); 2372 2372 if (hw->chip_id == CHIP_ID_GENESIS) 2373 2373 err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f, 2374 2374 data->val_in); 2375 2375 else 2376 2376 err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f, 2377 2377 data->val_in); 2378 - mutex_unlock(&hw->phy_mutex); 2378 + spin_unlock_bh(&hw->phy_lock); 2379 2379 break; 2380 2380 } 2381 2381 return err; ··· 2481 2481 goto free_rx_ring; 2482 2482 2483 2483 /* Initialize MAC */ 2484 - mutex_lock(&hw->phy_mutex); 2484 + spin_lock_bh(&hw->phy_lock); 2485 2485 if (hw->chip_id == CHIP_ID_GENESIS) 2486 2486 genesis_mac_init(hw, port); 2487 2487 else 2488 2488 yukon_mac_init(hw, port); 2489 - mutex_unlock(&hw->phy_mutex); 2489 + spin_unlock_bh(&hw->phy_lock); 2490 2490 2491 2491 /* Configure RAMbuffers */ 2492 2492 chunk = hw->ram_size / ((hw->ports + 1)*2); ··· 2503 2503 wmb(); 2504 2504 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); 2505 2505 skge_led(skge, LED_MODE_ON); 2506 + 2507 + spin_lock_irq(&hw->hw_lock); 2508 + hw->intr_mask |= portmask[port]; 2509 + skge_write32(hw, B0_IMSK, hw->intr_mask); 2510 + spin_unlock_irq(&hw->hw_lock); 2506 2511 2507 2512 netif_poll_enable(dev); 2508 2513 return 0; ··· 2536 2531 2537 2532 netif_stop_queue(dev); 2538 2533 if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) 2539 - cancel_delayed_work(&skge->link_thread); 2534 + del_timer_sync(&skge->link_timer); 2535 + 2536 + netif_poll_disable(dev); 2537 + 2538 + spin_lock_irq(&hw->hw_lock); 2539 + hw->intr_mask &= ~portmask[port]; 2540 + skge_write32(hw, B0_IMSK, hw->intr_mask); 2541 + spin_unlock_irq(&hw->hw_lock); 2540 2542 2541 2543 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); 2542 2544 if (hw->chip_id == CHIP_ID_GENESIS) ··· 2587 2575 2588 2576 skge_led(skge, LED_MODE_OFF); 2589 2577 2590 - netif_poll_disable(dev); 2578 + netif_tx_lock_bh(dev); 2591 2579 skge_tx_clean(dev); 2580 + netif_tx_unlock_bh(dev); 2581 + 2592 2582 skge_rx_clean(skge); 2593 2583 2594 2584 kfree(skge->rx_ring.start); ··· 2735 2721 struct skge_port *skge = netdev_priv(dev); 2736 2722 struct skge_element *e; 2737 2723 2738 - netif_tx_lock_bh(dev); 2739 2724 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { 2740 2725 struct skge_tx_desc *td = e->desc; 2741 2726 skge_tx_free(skge, e, td->control); ··· 2743 2730 2744 2731 skge->tx_ring.to_clean = e; 2745 2732 netif_wake_queue(dev); 2746 - netif_tx_unlock_bh(dev); 2747 2733 } 2748 2734 2749 2735 static void skge_tx_timeout(struct net_device *dev) ··· 3061 3049 3062 3050 spin_lock_irqsave(&hw->hw_lock, flags); 3063 3051 __netif_rx_complete(dev); 3064 - hw->intr_mask |= irqmask[skge->port]; 3052 + hw->intr_mask |= napimask[skge->port]; 3065 3053 skge_write32(hw, B0_IMSK, hw->intr_mask); 3066 3054 skge_read32(hw, B0_IMSK); 3067 3055 spin_unlock_irqrestore(&hw->hw_lock, flags); ··· 3172 3160 } 3173 3161 3174 3162 /* 3175 - * Interrupt from PHY are handled in work queue 3163 + * Interrupt from PHY are handled in tasklet (softirq) 3176 3164 * because accessing phy registers requires spin wait which might 3177 3165 * cause excess interrupt latency. 3178 3166 */ 3179 - static void skge_extirq(struct work_struct *work) 3167 + static void skge_extirq(unsigned long arg) 3180 3168 { 3181 - struct skge_hw *hw = container_of(work, struct skge_hw, phy_work); 3169 + struct skge_hw *hw = (struct skge_hw *) arg; 3182 3170 int port; 3183 3171 3184 - mutex_lock(&hw->phy_mutex); 3185 3172 for (port = 0; port < hw->ports; port++) { 3186 3173 struct net_device *dev = hw->dev[port]; 3187 - struct skge_port *skge = netdev_priv(dev); 3188 3174 3189 3175 if (netif_running(dev)) { 3176 + struct skge_port *skge = netdev_priv(dev); 3177 + 3178 + spin_lock(&hw->phy_lock); 3190 3179 if (hw->chip_id != CHIP_ID_GENESIS) 3191 3180 yukon_phy_intr(skge); 3192 3181 else if (hw->phy_type == SK_PHY_BCOM) 3193 3182 bcom_phy_intr(skge); 3183 + spin_unlock(&hw->phy_lock); 3194 3184 } 3195 3185 } 3196 - mutex_unlock(&hw->phy_mutex); 3197 3186 3198 3187 spin_lock_irq(&hw->hw_lock); 3199 3188 hw->intr_mask |= IS_EXT_REG; ··· 3219 3206 status &= hw->intr_mask; 3220 3207 if (status & IS_EXT_REG) { 3221 3208 hw->intr_mask &= ~IS_EXT_REG; 3222 - schedule_work(&hw->phy_work); 3209 + tasklet_schedule(&hw->phy_task); 3223 3210 } 3224 3211 3225 3212 if (status & (IS_XA1_F|IS_R1_F)) { ··· 3295 3282 3296 3283 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 3297 3284 3298 - /* disable Rx */ 3299 - ctrl = gma_read16(hw, port, GM_GP_CTRL); 3300 - gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA); 3285 + if (!netif_running(dev)) { 3286 + memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); 3287 + memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); 3288 + } else { 3289 + /* disable Rx */ 3290 + spin_lock_bh(&hw->phy_lock); 3291 + ctrl = gma_read16(hw, port, GM_GP_CTRL); 3292 + gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA); 3301 3293 3302 - memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); 3303 - memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); 3294 + memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); 3295 + memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); 3304 3296 3305 - if (netif_running(dev)) { 3306 3297 if (hw->chip_id == CHIP_ID_GENESIS) 3307 3298 xm_outaddr(hw, port, XM_SA, dev->dev_addr); 3308 3299 else { 3309 3300 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); 3310 3301 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); 3311 3302 } 3312 - } 3313 3303 3314 - gma_write16(hw, port, GM_GP_CTRL, ctrl); 3304 + gma_write16(hw, port, GM_GP_CTRL, ctrl); 3305 + spin_unlock_bh(&hw->phy_lock); 3306 + } 3315 3307 3316 3308 return 0; 3317 3309 } ··· 3431 3413 else 3432 3414 hw->ram_size = t8 * 4096; 3433 3415 3434 - hw->intr_mask = IS_HW_ERR | IS_PORT_1; 3435 - if (hw->ports > 1) 3436 - hw->intr_mask |= IS_PORT_2; 3416 + hw->intr_mask = IS_HW_ERR; 3437 3417 3418 + /* Use PHY IRQ for all but fiber based Genesis board */ 3438 3419 if (!(hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC)) 3439 3420 hw->intr_mask |= IS_EXT_REG; 3440 3421 ··· 3501 3484 3502 3485 skge_write32(hw, B0_IMSK, hw->intr_mask); 3503 3486 3504 - mutex_lock(&hw->phy_mutex); 3505 3487 for (i = 0; i < hw->ports; i++) { 3506 3488 if (hw->chip_id == CHIP_ID_GENESIS) 3507 3489 genesis_reset(hw, i); 3508 3490 else 3509 3491 yukon_reset(hw, i); 3510 3492 } 3511 - mutex_unlock(&hw->phy_mutex); 3512 3493 3513 3494 return 0; 3514 3495 } ··· 3554 3539 skge->netdev = dev; 3555 3540 skge->hw = hw; 3556 3541 skge->msg_enable = netif_msg_init(debug, default_msg); 3542 + 3557 3543 skge->tx_ring.count = DEFAULT_TX_RING_SIZE; 3558 3544 skge->rx_ring.count = DEFAULT_RX_RING_SIZE; 3559 3545 ··· 3571 3555 skge->port = port; 3572 3556 3573 3557 /* Only used for Genesis XMAC */ 3574 - INIT_DELAYED_WORK(&skge->link_thread, xm_link_timer); 3558 + setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge); 3575 3559 3576 3560 if (hw->chip_id != CHIP_ID_GENESIS) { 3577 3561 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; ··· 3653 3637 } 3654 3638 3655 3639 hw->pdev = pdev; 3656 - mutex_init(&hw->phy_mutex); 3657 - INIT_WORK(&hw->phy_work, skge_extirq); 3658 3640 spin_lock_init(&hw->hw_lock); 3641 + spin_lock_init(&hw->phy_lock); 3642 + tasklet_init(&hw->phy_task, &skge_extirq, (unsigned long) hw); 3659 3643 3660 3644 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3661 3645 if (!hw->regs) { ··· 3740 3724 unregister_netdev(dev1); 3741 3725 dev0 = hw->dev[0]; 3742 3726 unregister_netdev(dev0); 3727 + 3728 + tasklet_disable(&hw->phy_task); 3743 3729 3744 3730 spin_lock_irq(&hw->hw_lock); 3745 3731 hw->intr_mask = 0;
+3 -3
drivers/net/skge.h
··· 2424 2424 u32 ram_size; 2425 2425 u32 ram_offset; 2426 2426 u16 phy_addr; 2427 - struct work_struct phy_work; 2428 - struct mutex phy_mutex; 2427 + spinlock_t phy_lock; 2428 + struct tasklet_struct phy_task; 2429 2429 }; 2430 2430 2431 2431 enum pause_control { ··· 2457 2457 2458 2458 struct net_device_stats net_stats; 2459 2459 2460 - struct delayed_work link_thread; 2460 + struct timer_list link_timer; 2461 2461 enum pause_control flow_control; 2462 2462 enum pause_status flow_status; 2463 2463 u8 rx_csum;
+1 -2
drivers/net/ucc_geth.c
··· 3607 3607 if (bd == ugeth->confBd[txQ]) { 3608 3608 if (!netif_queue_stopped(dev)) 3609 3609 netif_stop_queue(dev); 3610 - return NETDEV_TX_BUSY; 3611 3610 } 3612 3611 3613 3612 ugeth->txBd[txQ] = bd; ··· 3622 3623 3623 3624 spin_unlock_irq(&ugeth->lock); 3624 3625 3625 - return NETDEV_TX_OK; 3626 + return 0; 3626 3627 } 3627 3628 3628 3629 static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
+3 -1
drivers/net/wireless/airo.c
··· 2852 2852 if (rc) { 2853 2853 airo_print_err(dev->name, "register interrupt %d failed, rc %d", 2854 2854 irq, rc); 2855 - goto err_out_unlink; 2855 + goto err_out_nets; 2856 2856 } 2857 2857 if (!is_pcmcia) { 2858 2858 if (!request_region( dev->base_addr, 64, dev->name )) { ··· 2935 2935 release_region( dev->base_addr, 64 ); 2936 2936 err_out_irq: 2937 2937 free_irq(dev->irq, dev); 2938 + err_out_nets: 2939 + airo_networks_free(ai); 2938 2940 err_out_unlink: 2939 2941 del_airo_dev(dev); 2940 2942 err_out_thr:
+7 -7
drivers/net/wireless/bcm43xx/bcm43xx_radio.c
··· 882 882 { 883 883 u32 *stackptr = &(_stackptr[*stackidx]); 884 884 885 - assert((offset & 0xF000) == 0x0000); 886 - assert((id & 0xF0) == 0x00); 885 + assert((offset & 0xE000) == 0x0000); 886 + assert((id & 0xF8) == 0x00); 887 887 *stackptr = offset; 888 - *stackptr |= ((u32)id) << 12; 888 + *stackptr |= ((u32)id) << 13; 889 889 *stackptr |= ((u32)value) << 16; 890 890 (*stackidx)++; 891 891 assert(*stackidx < BCM43xx_INTERFSTACK_SIZE); ··· 896 896 { 897 897 size_t i; 898 898 899 - assert((offset & 0xF000) == 0x0000); 900 - assert((id & 0xF0) == 0x00); 899 + assert((offset & 0xE000) == 0x0000); 900 + assert((id & 0xF8) == 0x00); 901 901 for (i = 0; i < BCM43xx_INTERFSTACK_SIZE; i++, stackptr++) { 902 - if ((*stackptr & 0x00000FFF) != offset) 902 + if ((*stackptr & 0x00001FFF) != offset) 903 903 continue; 904 - if (((*stackptr & 0x0000F000) >> 12) != id) 904 + if (((*stackptr & 0x00007000) >> 13) != id) 905 905 continue; 906 906 return ((*stackptr & 0xFFFF0000) >> 16); 907 907 }