Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6:
myri10ge: update driver version to 1.3.0-1.233
myri10ge: more Intel chipsets providing aligned PCIe completions
myri10ge: fix management of the firmware 4KB boundary crossing restriction
cxgb3 - missing CPL hanler and register setting.
cxgb3 - MAC watchdog update
cxgb3 - avoid deadlock with mac watchdog
skge: fix wake on lan
sky2: phy workarounds for Yukon EC-U A1
sky2: turn on clocks when doing resume
sky2: turn carrier off when down
skge: turn carrier off when down
[PATCH] bcm43xx: Fix PPC machine checks and match loopback gain specs
[PATCH] bcm43xx: Fix 802.11b/g scan limits to match regulatory reqs
[PATCH] zd1211rw: Fix E2P_PHY_REG patching
[PATCH] zd1211rw: Reject AL2230S devices

+260 -137
+5 -2
drivers/net/cxgb3/common.h
··· 478 struct adapter *adapter; 479 unsigned int offset; 480 unsigned int nucast; /* # of address filters for unicast MACs */ 481 - unsigned int tcnt; 482 - unsigned int xcnt; 483 unsigned int toggle_cnt; 484 unsigned int txen; 485 struct mac_stats stats;
··· 478 struct adapter *adapter; 479 unsigned int offset; 480 unsigned int nucast; /* # of address filters for unicast MACs */ 481 + unsigned int tx_tcnt; 482 + unsigned int tx_xcnt; 483 + u64 tx_mcnt; 484 + unsigned int rx_xcnt; 485 + u64 rx_mcnt; 486 unsigned int toggle_cnt; 487 unsigned int txen; 488 struct mac_stats stats;
+9 -7
drivers/net/cxgb3/cxgb3_main.c
··· 194 195 if (link_stat != netif_carrier_ok(dev)) { 196 if (link_stat) { 197 - t3_set_reg_field(adapter, 198 - A_XGM_TXFIFO_CFG + mac->offset, 199 - F_ENDROPPKT, 0); 200 netif_carrier_on(dev); 201 } else { 202 netif_carrier_off(dev); 203 - t3_set_reg_field(adapter, 204 - A_XGM_TXFIFO_CFG + mac->offset, 205 - F_ENDROPPKT, F_ENDROPPKT); 206 } 207 208 link_report(dev); ··· 770 if (err) 771 goto out; 772 773 err = setup_sge_qsets(adap); 774 if (err) 775 goto out; ··· 2119 { 2120 int i; 2121 2122 - rtnl_lock(); /* synchronize with ifdown */ 2123 for_each_port(adapter, i) { 2124 struct net_device *dev = adapter->port[i]; 2125 struct port_info *p = netdev_priv(dev);
··· 194 195 if (link_stat != netif_carrier_ok(dev)) { 196 if (link_stat) { 197 + t3_mac_enable(mac, MAC_DIRECTION_RX); 198 netif_carrier_on(dev); 199 } else { 200 netif_carrier_off(dev); 201 + pi->phy.ops->power_down(&pi->phy, 1); 202 + t3_mac_disable(mac, MAC_DIRECTION_RX); 203 + t3_link_start(&pi->phy, mac, &pi->link_config); 204 } 205 206 link_report(dev); ··· 772 if (err) 773 goto out; 774 775 + t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); 776 + 777 err = setup_sge_qsets(adap); 778 if (err) 779 goto out; ··· 2119 { 2120 int i; 2121 2122 + if (!rtnl_trylock()) /* synchronize with ifdown */ 2123 + return; 2124 + 2125 for_each_port(adapter, i) { 2126 struct net_device *dev = adapter->port[i]; 2127 struct port_info *p = netdev_priv(dev);
+2 -12
drivers/net/cxgb3/cxgb3_offload.c
··· 743 } 744 } 745 746 - static int do_set_tcb_rpl(struct t3cdev *dev, struct sk_buff *skb) 747 - { 748 - struct cpl_set_tcb_rpl *rpl = cplhdr(skb); 749 - 750 - if (rpl->status != CPL_ERR_NONE) 751 - printk(KERN_ERR 752 - "Unexpected SET_TCB_RPL status %u for tid %u\n", 753 - rpl->status, GET_TID(rpl)); 754 - return CPL_RET_BUF_DONE; 755 - } 756 - 757 static int do_trace(struct t3cdev *dev, struct sk_buff *skb) 758 { 759 struct cpl_trace_pkt *p = cplhdr(skb); ··· 1204 t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl); 1205 t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss); 1206 t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish); 1207 - t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl); 1208 t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term); 1209 t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl); 1210 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
··· 743 } 744 } 745 746 static int do_trace(struct t3cdev *dev, struct sk_buff *skb) 747 { 748 struct cpl_trace_pkt *p = cplhdr(skb); ··· 1215 t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl); 1216 t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss); 1217 t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish); 1218 + t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl); 1219 + t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl); 1220 t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term); 1221 t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl); 1222 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
+6
drivers/net/cxgb3/regs.h
··· 1234 1235 #define A_ULPRX_ISCSI_TAGMASK 0x514 1236 1237 #define A_ULPRX_TDDP_LLIMIT 0x51c 1238 1239 #define A_ULPRX_TDDP_ULIMIT 0x520 1240 1241 #define A_ULPRX_STAG_LLIMIT 0x52c 1242
··· 1234 1235 #define A_ULPRX_ISCSI_TAGMASK 0x514 1236 1237 + #define S_HPZ0 0 1238 + #define M_HPZ0 0xf 1239 + #define V_HPZ0(x) ((x) << S_HPZ0) 1240 + #define G_HPZ0(x) (((x) >> S_HPZ0) & M_HPZ0) 1241 + 1242 #define A_ULPRX_TDDP_LLIMIT 0x51c 1243 1244 #define A_ULPRX_TDDP_ULIMIT 0x520 1245 + #define A_ULPRX_TDDP_PSZ 0x528 1246 1247 #define A_ULPRX_STAG_LLIMIT 0x52c 1248
+82 -29
drivers/net/cxgb3/xgmac.c
··· 367 int idx = macidx(mac); 368 struct adapter *adap = mac->adapter; 369 unsigned int oft = mac->offset; 370 - 371 if (which & MAC_DIRECTION_TX) { 372 t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN); 373 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); ··· 377 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx); 378 379 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx); 380 - mac->tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap, 381 - A_TP_PIO_DATA))); 382 - mac->xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, 383 - A_XGM_TX_SPI4_SOP_EOP_CNT))); 384 mac->txen = F_TXEN; 385 mac->toggle_cnt = 0; 386 } ··· 399 { 400 int idx = macidx(mac); 401 struct adapter *adap = mac->adapter; 402 403 if (which & MAC_DIRECTION_TX) { 404 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0); ··· 409 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx); 410 mac->txen = 0; 411 } 412 - if (which & MAC_DIRECTION_RX) 413 t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0); 414 return 0; 415 } 416 417 int t3b2_mac_watchdog_task(struct cmac *mac) 418 { 419 struct adapter *adap = mac->adapter; 420 - unsigned int tcnt, xcnt; 421 int status; 422 423 - t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + macidx(mac)); 424 - tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap, A_TP_PIO_DATA))); 425 - xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, 426 - A_XGM_TX_SPI4_SOP_EOP_CNT + 427 - mac->offset))); 428 - 429 - if (tcnt != mac->tcnt && xcnt == 0 && mac->xcnt == 0) { 430 - if (mac->toggle_cnt > 4) { 431 - t3b2_mac_reset(mac); 432 - mac->toggle_cnt = 0; 433 - status = 2; 434 } else { 435 - t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0); 436 - t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); 437 - t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 438 - mac->txen); 439 - t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); 440 - mac->toggle_cnt++; 441 - status = 1; 442 - } 443 } else { 444 mac->toggle_cnt = 0; 445 - status = 0; 446 } 447 - mac->tcnt = tcnt; 448 - mac->xcnt = xcnt; 449 450 return status; 451 } 452
··· 367 int idx = macidx(mac); 368 struct adapter *adap = mac->adapter; 369 unsigned int oft = mac->offset; 370 + struct mac_stats *s = &mac->stats; 371 + 372 if (which & MAC_DIRECTION_TX) { 373 t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN); 374 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx); ··· 376 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx); 377 378 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx); 379 + mac->tx_mcnt = s->tx_frames; 380 + mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap, 381 + A_TP_PIO_DATA))); 382 + mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, 383 + A_XGM_TX_SPI4_SOP_EOP_CNT + 384 + oft))); 385 + mac->rx_mcnt = s->rx_frames; 386 + mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, 387 + A_XGM_RX_SPI4_SOP_EOP_CNT + 388 + oft))); 389 mac->txen = F_TXEN; 390 mac->toggle_cnt = 0; 391 } ··· 392 { 393 int idx = macidx(mac); 394 struct adapter *adap = mac->adapter; 395 + int val; 396 397 if (which & MAC_DIRECTION_TX) { 398 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0); ··· 401 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx); 402 mac->txen = 0; 403 } 404 + if (which & MAC_DIRECTION_RX) { 405 + t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 406 + F_PCS_RESET_, 0); 407 + msleep(100); 408 t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0); 409 + val = F_MAC_RESET_; 410 + if (is_10G(adap)) 411 + val |= F_PCS_RESET_; 412 + else if (uses_xaui(adap)) 413 + val |= F_PCS_RESET_ | F_XG2G_RESET_; 414 + else 415 + val |= F_RGMII_RESET_ | F_XG2G_RESET_; 416 + t3_write_reg(mac->adapter, A_XGM_RESET_CTRL + mac->offset, val); 417 + } 418 return 0; 419 } 420 421 int t3b2_mac_watchdog_task(struct cmac *mac) 422 { 423 struct adapter *adap = mac->adapter; 424 + struct mac_stats *s = &mac->stats; 425 + unsigned int tx_tcnt, tx_xcnt; 426 + unsigned int tx_mcnt = s->tx_frames; 427 + unsigned int rx_mcnt = s->rx_frames; 428 + unsigned int rx_xcnt; 429 int status; 430 431 + if (tx_mcnt == mac->tx_mcnt) { 432 + tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, 433 + A_XGM_TX_SPI4_SOP_EOP_CNT + 434 + mac->offset))); 435 + if (tx_xcnt == 0) { 436 + t3_write_reg(adap, A_TP_PIO_ADDR, 437 + A_TP_TX_DROP_CNT_CH0 + macidx(mac)); 438 + tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap, 439 + A_TP_PIO_DATA))); 440 } else { 441 + mac->toggle_cnt = 0; 442 + return 0; 443 + } 444 } else { 445 mac->toggle_cnt = 0; 446 + return 0; 447 } 448 449 + if (((tx_tcnt != mac->tx_tcnt) && 450 + (tx_xcnt == 0) && (mac->tx_xcnt == 0)) || 451 + ((mac->tx_mcnt == tx_mcnt) && 452 + (tx_xcnt != 0) && (mac->tx_xcnt != 0))) { 453 + if (mac->toggle_cnt > 4) 454 + status = 2; 455 + else 456 + status = 1; 457 + } else { 458 + mac->toggle_cnt = 0; 459 + return 0; 460 + } 461 + 462 + if (rx_mcnt != mac->rx_mcnt) 463 + rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap, 464 + A_XGM_RX_SPI4_SOP_EOP_CNT + 465 + mac->offset))); 466 + else 467 + return 0; 468 + 469 + if (mac->rx_mcnt != s->rx_frames && rx_xcnt == 0 && mac->rx_xcnt == 0) 470 + status = 2; 471 + 472 + mac->tx_tcnt = tx_tcnt; 473 + mac->tx_xcnt = tx_xcnt; 474 + mac->tx_mcnt = s->tx_frames; 475 + mac->rx_xcnt = rx_xcnt; 476 + mac->rx_mcnt = s->rx_frames; 477 + if (status == 1) { 478 + t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0); 479 + t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */ 480 + t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, mac->txen); 481 + t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */ 482 + mac->toggle_cnt++; 483 + } else if (status == 2) { 484 + t3b2_mac_reset(mac); 485 + mac->toggle_cnt = 0; 486 + } 487 return status; 488 } 489
+25 -12
drivers/net/myri10ge/myri10ge.c
··· 71 #include "myri10ge_mcp.h" 72 #include "myri10ge_mcp_gen_header.h" 73 74 - #define MYRI10GE_VERSION_STR "1.3.0-1.227" 75 76 MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 77 MODULE_AUTHOR("Maintainer: help@myri.com"); ··· 900 /* try to refill entire ring */ 901 while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) { 902 idx = rx->fill_cnt & rx->mask; 903 - 904 - if ((bytes < MYRI10GE_ALLOC_SIZE / 2) && 905 - (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE)) { 906 /* we can use part of previous page */ 907 get_page(rx->page); 908 - #if MYRI10GE_ALLOC_SIZE > 4096 909 - /* Firmware cannot cross 4K boundary.. */ 910 - if ((rx->page_offset >> 12) != 911 - ((rx->page_offset + bytes - 1) >> 12)) { 912 - rx->page_offset = 913 - (rx->page_offset + bytes) & ~4095; 914 - } 915 - #endif 916 } else { 917 /* we need a new page */ 918 page = ··· 931 932 /* start next packet on a cacheline boundary */ 933 rx->page_offset += SKB_DATA_ALIGN(bytes); 934 rx->fill_cnt++; 935 936 /* copy 8 descriptors to the firmware at a time */ ··· 2487 2488 #define PCI_DEVICE_ID_INTEL_E5000_PCIE23 0x25f7 2489 #define PCI_DEVICE_ID_INTEL_E5000_PCIE47 0x25fa 2490 #define PCI_DEVICE_ID_SERVERWORKS_HT2100_PCIE_FIRST 0x140 2491 #define PCI_DEVICE_ID_SERVERWORKS_HT2100_PCIE_LAST 0x142 2492 ··· 2530 PCI_DEVICE_ID_SERVERWORKS_HT2100_PCIE_FIRST 2531 && bridge->device <= 2532 PCI_DEVICE_ID_SERVERWORKS_HT2100_PCIE_LAST) 2533 /* All Intel E5000 PCIE ports */ 2534 || (bridge->vendor == PCI_VENDOR_ID_INTEL 2535 && bridge->device >=
··· 71 #include "myri10ge_mcp.h" 72 #include "myri10ge_mcp_gen_header.h" 73 74 + #define MYRI10GE_VERSION_STR "1.3.0-1.233" 75 76 MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 77 MODULE_AUTHOR("Maintainer: help@myri.com"); ··· 900 /* try to refill entire ring */ 901 while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) { 902 idx = rx->fill_cnt & rx->mask; 903 + if (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE) { 904 /* we can use part of previous page */ 905 get_page(rx->page); 906 } else { 907 /* we need a new page */ 908 page = ··· 941 942 /* start next packet on a cacheline boundary */ 943 rx->page_offset += SKB_DATA_ALIGN(bytes); 944 + 945 + #if MYRI10GE_ALLOC_SIZE > 4096 946 + /* don't cross a 4KB boundary */ 947 + if ((rx->page_offset >> 12) != 948 + ((rx->page_offset + bytes - 1) >> 12)) 949 + rx->page_offset = (rx->page_offset + 4096) & ~4095; 950 + #endif 951 rx->fill_cnt++; 952 953 /* copy 8 descriptors to the firmware at a time */ ··· 2490 2491 #define PCI_DEVICE_ID_INTEL_E5000_PCIE23 0x25f7 2492 #define PCI_DEVICE_ID_INTEL_E5000_PCIE47 0x25fa 2493 + #define PCI_DEVICE_ID_INTEL_6300ESB_PCIEE1 0x3510 2494 + #define PCI_DEVICE_ID_INTEL_6300ESB_PCIEE4 0x351b 2495 + #define PCI_DEVICE_ID_INTEL_E3000_PCIE 0x2779 2496 + #define PCI_DEVICE_ID_INTEL_E3010_PCIE 0x277a 2497 #define PCI_DEVICE_ID_SERVERWORKS_HT2100_PCIE_FIRST 0x140 2498 #define PCI_DEVICE_ID_SERVERWORKS_HT2100_PCIE_LAST 0x142 2499 ··· 2529 PCI_DEVICE_ID_SERVERWORKS_HT2100_PCIE_FIRST 2530 && bridge->device <= 2531 PCI_DEVICE_ID_SERVERWORKS_HT2100_PCIE_LAST) 2532 + /* All Intel E3000/E3010 PCIE ports */ 2533 + || (bridge->vendor == PCI_VENDOR_ID_INTEL 2534 + && (bridge->device == 2535 + PCI_DEVICE_ID_INTEL_E3000_PCIE 2536 + || bridge->device == 2537 + PCI_DEVICE_ID_INTEL_E3010_PCIE)) 2538 + /* All Intel 6310/6311/6321ESB PCIE ports */ 2539 + || (bridge->vendor == PCI_VENDOR_ID_INTEL 2540 + && bridge->device >= 2541 + PCI_DEVICE_ID_INTEL_6300ESB_PCIEE1 2542 + && bridge->device <= 2543 + PCI_DEVICE_ID_INTEL_6300ESB_PCIEE4) 2544 /* All Intel E5000 PCIE ports */ 2545 || (bridge->vendor == PCI_VENDOR_ID_INTEL 2546 && bridge->device >=
+59 -36
drivers/net/skge.c
··· 163 { 164 struct skge_hw *hw = skge->hw; 165 int port = skge->port; 166 - enum pause_control save_mode; 167 - u32 ctrl; 168 169 - /* Bring hardware out of reset */ 170 skge_write16(hw, B0_CTST, CS_RST_CLR); 171 skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); 172 173 - skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); 174 - skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); 175 176 /* Force to 10/100 skge_reset will re-enable on resume */ 177 - save_mode = skge->flow_control; 178 - skge->flow_control = FLOW_MODE_SYMMETRIC; 179 180 - ctrl = skge->advertising; 181 - skge->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full); 182 - 183 - skge_phy_reset(skge); 184 - 185 - skge->flow_control = save_mode; 186 - skge->advertising = ctrl; 187 188 /* Set GMAC to no flow control and auto update for speed/duplex */ 189 gma_write16(hw, port, GM_GP_CTRL, ··· 246 struct skge_port *skge = netdev_priv(dev); 247 struct skge_hw *hw = skge->hw; 248 249 - if (wol->wolopts & wol_supported(hw)) 250 return -EOPNOTSUPP; 251 252 skge->wol = wol->wolopts; 253 - if (!netif_running(dev)) 254 - skge_wol_init(skge); 255 return 0; 256 } 257 ··· 2552 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); 2553 2554 netif_stop_queue(dev); 2555 if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) 2556 del_timer_sync(&skge->link_timer); 2557 2558 netif_poll_disable(dev); 2559 2560 spin_lock_irq(&hw->hw_lock); 2561 hw->intr_mask &= ~portmask[port]; ··· 3784 } 3785 3786 #ifdef CONFIG_PM 3787 - static int vaux_avail(struct pci_dev *pdev) 3788 - { 3789 - int pm_cap; 3790 - 3791 - pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 3792 - if (pm_cap) { 3793 - u16 ctl; 3794 - pci_read_config_word(pdev, pm_cap + PCI_PM_PMC, &ctl); 3795 - if (ctl & PCI_PM_CAP_AUX_POWER) 3796 - return 1; 3797 - } 3798 - return 0; 3799 - } 3800 - 3801 - 3802 static int skge_suspend(struct pci_dev *pdev, pm_message_t state) 3803 { 3804 struct skge_hw *hw = pci_get_drvdata(pdev); ··· 3804 3805 wol |= skge->wol; 3806 } 3807 - 3808 - if (wol && vaux_avail(pdev)) 3809 - skge_write8(hw, B0_POWER_CTRL, 3810 - PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 3811 3812 skge_write32(hw, B0_IMSK, 0); 3813 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); ··· 3850 } 3851 #endif 3852 3853 static struct pci_driver skge_driver = { 3854 .name = DRV_NAME, 3855 .id_table = skge_id_table, ··· 3881 .suspend = skge_suspend, 3882 .resume = skge_resume, 3883 #endif 3884 }; 3885 3886 static int __init skge_init_module(void)
··· 163 { 164 struct skge_hw *hw = skge->hw; 165 int port = skge->port; 166 + u16 ctrl; 167 168 skge_write16(hw, B0_CTST, CS_RST_CLR); 169 skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); 170 171 + /* Turn on Vaux */ 172 + skge_write8(hw, B0_POWER_CTRL, 173 + PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); 174 + 175 + /* WA code for COMA mode -- clear PHY reset */ 176 + if (hw->chip_id == CHIP_ID_YUKON_LITE && 177 + hw->chip_rev >= CHIP_REV_YU_LITE_A3) { 178 + u32 reg = skge_read32(hw, B2_GP_IO); 179 + reg |= GP_DIR_9; 180 + reg &= ~GP_IO_9; 181 + skge_write32(hw, B2_GP_IO, reg); 182 + } 183 + 184 + skge_write32(hw, SK_REG(port, GPHY_CTRL), 185 + GPC_DIS_SLEEP | 186 + GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | 187 + GPC_ANEG_1 | GPC_RST_SET); 188 + 189 + skge_write32(hw, SK_REG(port, GPHY_CTRL), 190 + GPC_DIS_SLEEP | 191 + GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | 192 + GPC_ANEG_1 | GPC_RST_CLR); 193 + 194 + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); 195 196 /* Force to 10/100 skge_reset will re-enable on resume */ 197 + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, 198 + PHY_AN_100FULL | PHY_AN_100HALF | 199 + PHY_AN_10FULL | PHY_AN_10HALF| PHY_AN_CSMA); 200 + /* no 1000 HD/FD */ 201 + gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0); 202 + gm_phy_write(hw, port, PHY_MARV_CTRL, 203 + PHY_CT_RESET | PHY_CT_SPS_LSB | PHY_CT_ANE | 204 + PHY_CT_RE_CFG | PHY_CT_DUP_MD); 205 206 207 /* Set GMAC to no flow control and auto update for speed/duplex */ 208 gma_write16(hw, port, GM_GP_CTRL, ··· 227 struct skge_port *skge = netdev_priv(dev); 228 struct skge_hw *hw = skge->hw; 229 230 + if (wol->wolopts & ~wol_supported(hw)) 231 return -EOPNOTSUPP; 232 233 skge->wol = wol->wolopts; 234 return 0; 235 } 236 ··· 2535 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); 2536 2537 netif_stop_queue(dev); 2538 + 2539 if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) 2540 del_timer_sync(&skge->link_timer); 2541 2542 netif_poll_disable(dev); 2543 + netif_carrier_off(dev); 2544 2545 spin_lock_irq(&hw->hw_lock); 2546 hw->intr_mask &= ~portmask[port]; ··· 3765 } 3766 3767 #ifdef CONFIG_PM 3768 static int skge_suspend(struct pci_dev *pdev, pm_message_t state) 3769 { 3770 struct skge_hw *hw = pci_get_drvdata(pdev); ··· 3800 3801 wol |= skge->wol; 3802 } 3803 3804 skge_write32(hw, B0_IMSK, 0); 3805 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); ··· 3850 } 3851 #endif 3852 3853 + static void skge_shutdown(struct pci_dev *pdev) 3854 + { 3855 + struct skge_hw *hw = pci_get_drvdata(pdev); 3856 + int i, wol = 0; 3857 + 3858 + for (i = 0; i < hw->ports; i++) { 3859 + struct net_device *dev = hw->dev[i]; 3860 + struct skge_port *skge = netdev_priv(dev); 3861 + 3862 + if (skge->wol) 3863 + skge_wol_init(skge); 3864 + wol |= skge->wol; 3865 + } 3866 + 3867 + pci_enable_wake(pdev, PCI_D3hot, wol); 3868 + pci_enable_wake(pdev, PCI_D3cold, wol); 3869 + 3870 + pci_disable_device(pdev); 3871 + pci_set_power_state(pdev, PCI_D3hot); 3872 + 3873 + } 3874 + 3875 static struct pci_driver skge_driver = { 3876 .name = DRV_NAME, 3877 .id_table = skge_id_table, ··· 3859 .suspend = skge_suspend, 3860 .resume = skge_resume, 3861 #endif 3862 + .shutdown = skge_shutdown, 3863 }; 3864 3865 static int __init skge_init_module(void)
+9 -3
drivers/net/sky2.c
··· 510 ledover &= ~PHY_M_LED_MO_RX; 511 } 512 513 - if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == CHIP_REV_YU_EC_A1) { 514 /* apply fixes in PHY AFE */ 515 - pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); 516 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255); 517 518 /* increase differential signal amplitude in 10BASE-T */ ··· 524 gm_phy_write(hw, port, 0x17, 0x2002); 525 526 /* set page register to 0 */ 527 - gm_phy_write(hw, port, PHY_MARV_EXT_ADR, pg); 528 } else if (hw->chip_id != CHIP_ID_YUKON_EX) { 529 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); 530 ··· 1561 1562 /* Stop more packets from being queued */ 1563 netif_stop_queue(dev); 1564 1565 /* Disable port IRQ */ 1566 imask = sky2_read32(hw, B0_IMSK); ··· 3770 goto out; 3771 3772 pci_enable_wake(pdev, PCI_D0, 0); 3773 sky2_reset(hw); 3774 3775 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
··· 510 ledover &= ~PHY_M_LED_MO_RX; 511 } 512 513 + if (hw->chip_id == CHIP_ID_YUKON_EC_U && 514 + hw->chip_rev == CHIP_REV_YU_EC_U_A1) { 515 /* apply fixes in PHY AFE */ 516 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 255); 517 518 /* increase differential signal amplitude in 10BASE-T */ ··· 524 gm_phy_write(hw, port, 0x17, 0x2002); 525 526 /* set page register to 0 */ 527 + gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0); 528 } else if (hw->chip_id != CHIP_ID_YUKON_EX) { 529 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); 530 ··· 1561 1562 /* Stop more packets from being queued */ 1563 netif_stop_queue(dev); 1564 + netif_carrier_off(dev); 1565 1566 /* Disable port IRQ */ 1567 imask = sky2_read32(hw, B0_IMSK); ··· 3769 goto out; 3770 3771 pci_enable_wake(pdev, PCI_D0, 0); 3772 + 3773 + /* Re-enable all clocks */ 3774 + if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_EC_U) 3775 + sky2_pci_write32(hw, PCI_DEV_REG3, 0); 3776 + 3777 sky2_reset(hw); 3778 3779 sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
+19 -1
drivers/net/wireless/bcm43xx/bcm43xx_main.c
··· 946 u8 channel; 947 struct bcm43xx_phyinfo *phy; 948 const char *iso_country; 949 950 geo = kzalloc(sizeof(*geo), GFP_KERNEL); 951 if (!geo) ··· 968 } 969 iso_country = bcm43xx_locale_iso(bcm->sprom.locale); 970 971 if (have_a) { 972 for (i = 0, channel = IEEE80211_52GHZ_MIN_CHANNEL; 973 channel <= IEEE80211_52GHZ_MAX_CHANNEL; channel++) { ··· 996 } 997 if (have_bg) { 998 for (i = 0, channel = IEEE80211_24GHZ_MIN_CHANNEL; 999 - channel <= IEEE80211_24GHZ_MAX_CHANNEL; channel++) { 1000 chan = &geo->bg[i++]; 1001 chan->freq = bcm43xx_channel_to_freq_bg(channel); 1002 chan->channel = channel;
··· 946 u8 channel; 947 struct bcm43xx_phyinfo *phy; 948 const char *iso_country; 949 + u8 max_bg_channel; 950 951 geo = kzalloc(sizeof(*geo), GFP_KERNEL); 952 if (!geo) ··· 967 } 968 iso_country = bcm43xx_locale_iso(bcm->sprom.locale); 969 970 + /* set the maximum channel based on locale set in sprom or witle locale option */ 971 + switch (bcm->sprom.locale) { 972 + case BCM43xx_LOCALE_THAILAND: 973 + case BCM43xx_LOCALE_ISRAEL: 974 + case BCM43xx_LOCALE_JORDAN: 975 + case BCM43xx_LOCALE_USA_CANADA_ANZ: 976 + case BCM43xx_LOCALE_USA_LOW: 977 + max_bg_channel = 11; 978 + break; 979 + case BCM43xx_LOCALE_JAPAN: 980 + case BCM43xx_LOCALE_JAPAN_HIGH: 981 + max_bg_channel = 14; 982 + break; 983 + default: 984 + max_bg_channel = 13; 985 + } 986 + 987 if (have_a) { 988 for (i = 0, channel = IEEE80211_52GHZ_MIN_CHANNEL; 989 channel <= IEEE80211_52GHZ_MAX_CHANNEL; channel++) { ··· 978 } 979 if (have_bg) { 980 for (i = 0, channel = IEEE80211_24GHZ_MIN_CHANNEL; 981 + channel <= max_bg_channel; channel++) { 982 chan = &geo->bg[i++]; 983 chan->freq = bcm43xx_channel_to_freq_bg(channel); 984 chan->channel = channel;
+32 -25
drivers/net/wireless/bcm43xx/bcm43xx_phy.c
··· 978 { 979 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm); 980 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm); 981 - u16 backup_phy[15]; 982 u16 backup_radio[3]; 983 u16 backup_bband; 984 u16 i; ··· 989 backup_phy[1] = bcm43xx_phy_read(bcm, 0x0001); 990 backup_phy[2] = bcm43xx_phy_read(bcm, 0x0811); 991 backup_phy[3] = bcm43xx_phy_read(bcm, 0x0812); 992 - backup_phy[4] = bcm43xx_phy_read(bcm, 0x0814); 993 - backup_phy[5] = bcm43xx_phy_read(bcm, 0x0815); 994 backup_phy[6] = bcm43xx_phy_read(bcm, 0x005A); 995 backup_phy[7] = bcm43xx_phy_read(bcm, 0x0059); 996 backup_phy[8] = bcm43xx_phy_read(bcm, 0x0058); ··· 1020 bcm43xx_phy_read(bcm, 0x0811) | 0x0001); 1021 bcm43xx_phy_write(bcm, 0x0812, 1022 bcm43xx_phy_read(bcm, 0x0812) & 0xFFFE); 1023 - bcm43xx_phy_write(bcm, 0x0814, 1024 - bcm43xx_phy_read(bcm, 0x0814) | 0x0001); 1025 - bcm43xx_phy_write(bcm, 0x0815, 1026 - bcm43xx_phy_read(bcm, 0x0815) & 0xFFFE); 1027 - bcm43xx_phy_write(bcm, 0x0814, 1028 - bcm43xx_phy_read(bcm, 0x0814) | 0x0002); 1029 - bcm43xx_phy_write(bcm, 0x0815, 1030 - bcm43xx_phy_read(bcm, 0x0815) & 0xFFFD); 1031 bcm43xx_phy_write(bcm, 0x0811, 1032 bcm43xx_phy_read(bcm, 0x0811) | 0x000C); 1033 bcm43xx_phy_write(bcm, 0x0812, ··· 1052 bcm43xx_phy_read(bcm, 0x000A) 1053 | 0x2000); 1054 } 1055 - bcm43xx_phy_write(bcm, 0x0814, 1056 - bcm43xx_phy_read(bcm, 0x0814) | 0x0004); 1057 - bcm43xx_phy_write(bcm, 0x0815, 1058 - bcm43xx_phy_read(bcm, 0x0815) & 0xFFFB); 1059 bcm43xx_phy_write(bcm, 0x0003, 1060 (bcm43xx_phy_read(bcm, 0x0003) 1061 & 0xFF9F) | 0x0040); ··· 1144 } 1145 } 1146 1147 - bcm43xx_phy_write(bcm, 0x0814, backup_phy[4]); 1148 - bcm43xx_phy_write(bcm, 0x0815, backup_phy[5]); 1149 bcm43xx_phy_write(bcm, 0x005A, backup_phy[6]); 1150 bcm43xx_phy_write(bcm, 0x0059, backup_phy[7]); 1151 bcm43xx_phy_write(bcm, 0x0058, backup_phy[8]); ··· 1196 bcm43xx_phy_write(bcm, 0x0811, 0x0000); 1197 bcm43xx_phy_write(bcm, 0x0015, 0x00C0); 1198 } 1199 - if (phy->rev >= 3) { 1200 bcm43xx_phy_write(bcm, 0x0811, 0x0400); 1201 bcm43xx_phy_write(bcm, 0x0015, 0x00C0); 1202 } 1203 if (phy->rev >= 2 && phy->connected) { 1204 tmp = bcm43xx_phy_read(bcm, 0x0400) & 0xFF; 1205 - if (tmp < 6) { 1206 bcm43xx_phy_write(bcm, 0x04C2, 0x1816); 1207 bcm43xx_phy_write(bcm, 0x04C3, 0x8006); 1208 - if (tmp != 3) { 1209 bcm43xx_phy_write(bcm, 0x04CC, 1210 (bcm43xx_phy_read(bcm, 0x04CC) 1211 & 0x00FF) | 0x1F00); 1212 } 1213 } 1214 - } 1215 - if (phy->rev < 3 && phy->connected) 1216 bcm43xx_phy_write(bcm, 0x047E, 0x0078); 1217 if (radio->revision == 8) { 1218 bcm43xx_phy_write(bcm, 0x0801, bcm43xx_phy_read(bcm, 0x0801) | 0x0080); 1219 bcm43xx_phy_write(bcm, 0x043E, bcm43xx_phy_read(bcm, 0x043E) | 0x0004); ··· 1239 if (phy->rev >= 6) { 1240 bcm43xx_phy_write(bcm, 0x0036, 1241 (bcm43xx_phy_read(bcm, 0x0036) 1242 - & 0xF000) | (radio->txctl2 << 12)); 1243 } 1244 if (bcm->sprom.boardflags & BCM43xx_BFL_PACTRL) 1245 bcm43xx_phy_write(bcm, 0x002E, 0x8075); ··· 1250 else 1251 bcm43xx_phy_write(bcm, 0x002F, 0x0202); 1252 } 1253 - if (phy->connected) { 1254 bcm43xx_phy_lo_adjust(bcm, 0); 1255 bcm43xx_phy_write(bcm, 0x080F, 0x8078); 1256 } ··· 1264 */ 1265 bcm43xx_nrssi_hw_update(bcm, 0xFFFF); 1266 bcm43xx_calc_nrssi_threshold(bcm); 1267 - } else if (phy->connected) { 1268 if (radio->nrssi[0] == -1000) { 1269 assert(radio->nrssi[1] == -1000); 1270 bcm43xx_calc_nrssi_slope(bcm);
··· 978 { 979 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm); 980 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm); 981 + u16 backup_phy[15] = {0}; 982 u16 backup_radio[3]; 983 u16 backup_bband; 984 u16 i; ··· 989 backup_phy[1] = bcm43xx_phy_read(bcm, 0x0001); 990 backup_phy[2] = bcm43xx_phy_read(bcm, 0x0811); 991 backup_phy[3] = bcm43xx_phy_read(bcm, 0x0812); 992 + if (phy->rev != 1) { 993 + backup_phy[4] = bcm43xx_phy_read(bcm, 0x0814); 994 + backup_phy[5] = bcm43xx_phy_read(bcm, 0x0815); 995 + } 996 backup_phy[6] = bcm43xx_phy_read(bcm, 0x005A); 997 backup_phy[7] = bcm43xx_phy_read(bcm, 0x0059); 998 backup_phy[8] = bcm43xx_phy_read(bcm, 0x0058); ··· 1018 bcm43xx_phy_read(bcm, 0x0811) | 0x0001); 1019 bcm43xx_phy_write(bcm, 0x0812, 1020 bcm43xx_phy_read(bcm, 0x0812) & 0xFFFE); 1021 + if (phy->rev != 1) { 1022 + bcm43xx_phy_write(bcm, 0x0814, 1023 + bcm43xx_phy_read(bcm, 0x0814) | 0x0001); 1024 + bcm43xx_phy_write(bcm, 0x0815, 1025 + bcm43xx_phy_read(bcm, 0x0815) & 0xFFFE); 1026 + bcm43xx_phy_write(bcm, 0x0814, 1027 + bcm43xx_phy_read(bcm, 0x0814) | 0x0002); 1028 + bcm43xx_phy_write(bcm, 0x0815, 1029 + bcm43xx_phy_read(bcm, 0x0815) & 0xFFFD); 1030 + } 1031 bcm43xx_phy_write(bcm, 0x0811, 1032 bcm43xx_phy_read(bcm, 0x0811) | 0x000C); 1033 bcm43xx_phy_write(bcm, 0x0812, ··· 1048 bcm43xx_phy_read(bcm, 0x000A) 1049 | 0x2000); 1050 } 1051 + if (phy->rev != 1) { 1052 + bcm43xx_phy_write(bcm, 0x0814, 1053 + bcm43xx_phy_read(bcm, 0x0814) | 0x0004); 1054 + bcm43xx_phy_write(bcm, 0x0815, 1055 + bcm43xx_phy_read(bcm, 0x0815) & 0xFFFB); 1056 + } 1057 bcm43xx_phy_write(bcm, 0x0003, 1058 (bcm43xx_phy_read(bcm, 0x0003) 1059 & 0xFF9F) | 0x0040); ··· 1138 } 1139 } 1140 1141 + if (phy->rev != 1) { 1142 + bcm43xx_phy_write(bcm, 0x0814, backup_phy[4]); 1143 + bcm43xx_phy_write(bcm, 0x0815, backup_phy[5]); 1144 + } 1145 bcm43xx_phy_write(bcm, 0x005A, backup_phy[6]); 1146 bcm43xx_phy_write(bcm, 0x0059, backup_phy[7]); 1147 bcm43xx_phy_write(bcm, 0x0058, backup_phy[8]); ··· 1188 bcm43xx_phy_write(bcm, 0x0811, 0x0000); 1189 bcm43xx_phy_write(bcm, 0x0015, 0x00C0); 1190 } 1191 + if (phy->rev > 5) { 1192 bcm43xx_phy_write(bcm, 0x0811, 0x0400); 1193 bcm43xx_phy_write(bcm, 0x0015, 0x00C0); 1194 } 1195 if (phy->rev >= 2 && phy->connected) { 1196 tmp = bcm43xx_phy_read(bcm, 0x0400) & 0xFF; 1197 + if (tmp ==3 || tmp == 5) { 1198 bcm43xx_phy_write(bcm, 0x04C2, 0x1816); 1199 bcm43xx_phy_write(bcm, 0x04C3, 0x8006); 1200 + if (tmp == 5) { 1201 bcm43xx_phy_write(bcm, 0x04CC, 1202 (bcm43xx_phy_read(bcm, 0x04CC) 1203 & 0x00FF) | 0x1F00); 1204 } 1205 } 1206 bcm43xx_phy_write(bcm, 0x047E, 0x0078); 1207 + } 1208 if (radio->revision == 8) { 1209 bcm43xx_phy_write(bcm, 0x0801, bcm43xx_phy_read(bcm, 0x0801) | 0x0080); 1210 bcm43xx_phy_write(bcm, 0x043E, bcm43xx_phy_read(bcm, 0x043E) | 0x0004); ··· 1232 if (phy->rev >= 6) { 1233 bcm43xx_phy_write(bcm, 0x0036, 1234 (bcm43xx_phy_read(bcm, 0x0036) 1235 + & 0x0FFF) | (radio->txctl2 << 12)); 1236 } 1237 if (bcm->sprom.boardflags & BCM43xx_BFL_PACTRL) 1238 bcm43xx_phy_write(bcm, 0x002E, 0x8075); ··· 1243 else 1244 bcm43xx_phy_write(bcm, 0x002F, 0x0202); 1245 } 1246 + if (phy->connected || phy->rev >= 2) { 1247 bcm43xx_phy_lo_adjust(bcm, 0); 1248 bcm43xx_phy_write(bcm, 0x080F, 0x8078); 1249 } ··· 1257 */ 1258 bcm43xx_nrssi_hw_update(bcm, 0xFFFF); 1259 bcm43xx_calc_nrssi_threshold(bcm); 1260 + } else if (phy->connected || phy->rev >= 2) { 1261 if (radio->nrssi[0] == -1000) { 1262 assert(radio->nrssi[1] == -1000); 1263 bcm43xx_calc_nrssi_slope(bcm);
+4 -8
drivers/net/wireless/zd1211rw/zd_chip.c
··· 337 chip->patch_cr157 = (value >> 13) & 0x1; 338 chip->patch_6m_band_edge = (value >> 21) & 0x1; 339 chip->new_phy_layout = (value >> 31) & 0x1; 340 chip->link_led = ((value >> 4) & 1) ? LED1 : LED2; 341 chip->supports_tx_led = 1; 342 if (value & (1 << 24)) { /* LED scenario */ ··· 592 return r; 593 } 594 595 - /* CR157 can be optionally patched by the EEPROM */ 596 static int patch_cr157(struct zd_chip *chip) 597 { 598 int r; 599 - u32 value; 600 601 if (!chip->patch_cr157) 602 return 0; 603 604 - r = zd_ioread32_locked(chip, &value, E2P_PHY_REG); 605 if (r) 606 return r; 607 ··· 791 goto out; 792 793 r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); 794 - if (r) 795 - goto unlock; 796 - 797 - r = patch_cr157(chip); 798 - unlock: 799 t = zd_chip_unlock_phy_regs(chip); 800 if (t && !r) 801 r = t;
··· 337 chip->patch_cr157 = (value >> 13) & 0x1; 338 chip->patch_6m_band_edge = (value >> 21) & 0x1; 339 chip->new_phy_layout = (value >> 31) & 0x1; 340 + chip->al2230s_bit = (value >> 7) & 0x1; 341 chip->link_led = ((value >> 4) & 1) ? LED1 : LED2; 342 chip->supports_tx_led = 1; 343 if (value & (1 << 24)) { /* LED scenario */ ··· 591 return r; 592 } 593 594 + /* CR157 can be optionally patched by the EEPROM for original ZD1211 */ 595 static int patch_cr157(struct zd_chip *chip) 596 { 597 int r; 598 + u16 value; 599 600 if (!chip->patch_cr157) 601 return 0; 602 603 + r = zd_ioread16_locked(chip, &value, E2P_PHY_REG); 604 if (r) 605 return r; 606 ··· 790 goto out; 791 792 r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs)); 793 t = zd_chip_unlock_phy_regs(chip); 794 if (t && !r) 795 r = t;
+2 -2
drivers/net/wireless/zd1211rw/zd_chip.h
··· 641 * also only 11 channels. */ 642 #define E2P_ALLOWED_CHANNEL E2P_DATA(0x18) 643 644 - #define E2P_PHY_REG E2P_DATA(0x1a) 645 #define E2P_DEVICE_VER E2P_DATA(0x20) 646 #define E2P_36M_CAL_VALUE1 E2P_DATA(0x28) 647 #define E2P_36M_CAL_VALUE2 E2P_DATA(0x2a) 648 #define E2P_36M_CAL_VALUE3 E2P_DATA(0x2c) ··· 711 u16 link_led; 712 unsigned int pa_type:4, 713 patch_cck_gain:1, patch_cr157:1, patch_6m_band_edge:1, 714 - new_phy_layout:1, 715 is_zd1211b:1, supports_tx_led:1; 716 }; 717
··· 641 * also only 11 channels. */ 642 #define E2P_ALLOWED_CHANNEL E2P_DATA(0x18) 643 644 #define E2P_DEVICE_VER E2P_DATA(0x20) 645 + #define E2P_PHY_REG E2P_DATA(0x25) 646 #define E2P_36M_CAL_VALUE1 E2P_DATA(0x28) 647 #define E2P_36M_CAL_VALUE2 E2P_DATA(0x2a) 648 #define E2P_36M_CAL_VALUE3 E2P_DATA(0x2c) ··· 711 u16 link_led; 712 unsigned int pa_type:4, 713 patch_cck_gain:1, patch_cr157:1, patch_6m_band_edge:1, 714 + new_phy_layout:1, al2230s_bit:1, 715 is_zd1211b:1, supports_tx_led:1; 716 }; 717
+6
drivers/net/wireless/zd1211rw/zd_rf_al2230.c
··· 358 { 359 struct zd_chip *chip = zd_rf_to_chip(rf); 360 361 rf->switch_radio_off = al2230_switch_radio_off; 362 if (chip->is_zd1211b) { 363 rf->init_hw = zd1211b_al2230_init_hw;
··· 358 { 359 struct zd_chip *chip = zd_rf_to_chip(rf); 360 361 + if (chip->al2230s_bit) { 362 + dev_err(zd_chip_dev(chip), "AL2230S devices are not yet " 363 + "supported by this driver.\n"); 364 + return -ENODEV; 365 + } 366 + 367 rf->switch_radio_off = al2230_switch_radio_off; 368 if (chip->is_zd1211b) { 369 rf->init_hw = zd1211b_al2230_init_hw;