Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (37 commits)
forcedeth bug fix: realtek phy
forcedeth bug fix: vitesse phy
forcedeth bug fix: cicada phy
atl1: reorder atl1_main functions
atl1: fix excessively indented code
atl1: cleanup atl1_main
atl1: header file cleanup
atl1: remove irq_sem
cdc-subset to support new vendor/product ID
8139cp: implement the missing dev->tx_timeout
myri10ge: Remove nonsensical limit in the tx done routine
gianfar: kill unused header
EP93XX_ETH must select MII
macb: Add multicast capability
macb: Use generic PHY layer
s390: add barriers to qeth driver
s390: scatter-gather for inbound traffic in qeth driver
eHEA: Introducing support vor DLPAR memory add
Fix a potential NULL pointer dereference in free_shared_mem() in drivers/net/s2io.c
[PATCH] softmac: Fix ESSID problem
...

+2470 -1870
+24 -3
drivers/net/8139cp.c
··· 26 26 27 27 TODO: 28 28 * Test Tx checksumming thoroughly 29 - * Implement dev->tx_timeout 30 29 31 30 Low priority TODO: 32 31 * Complete reset on PciErr ··· 1217 1218 return 0; 1218 1219 } 1219 1220 1221 + static void cp_tx_timeout(struct net_device *dev) 1222 + { 1223 + struct cp_private *cp = netdev_priv(dev); 1224 + unsigned long flags; 1225 + int rc; 1226 + 1227 + printk(KERN_WARNING "%s: Transmit timeout, status %2x %4x %4x %4x\n", 1228 + dev->name, cpr8(Cmd), cpr16(CpCmd), 1229 + cpr16(IntrStatus), cpr16(IntrMask)); 1230 + 1231 + spin_lock_irqsave(&cp->lock, flags); 1232 + 1233 + cp_stop_hw(cp); 1234 + cp_clean_rings(cp); 1235 + rc = cp_init_rings(cp); 1236 + cp_start_hw(cp); 1237 + 1238 + netif_wake_queue(dev); 1239 + 1240 + spin_unlock_irqrestore(&cp->lock, flags); 1241 + 1242 + return; 1243 + } 1244 + 1220 1245 #ifdef BROKEN 1221 1246 static int cp_change_mtu(struct net_device *dev, int new_mtu) 1222 1247 { ··· 1943 1920 dev->change_mtu = cp_change_mtu; 1944 1921 #endif 1945 1922 dev->ethtool_ops = &cp_ethtool_ops; 1946 - #if 0 1947 1923 dev->tx_timeout = cp_tx_timeout; 1948 1924 dev->watchdog_timeo = TX_TIMEOUT; 1949 - #endif 1950 1925 1951 1926 #if CP_VLAN_TAG_USED 1952 1927 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+1 -1
drivers/net/Kconfig
··· 205 205 config MACB 206 206 tristate "Atmel MACB support" 207 207 depends on AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263 208 - select MII 208 + select PHYLIB 209 209 help 210 210 The Atmel MACB ethernet interface is found on many AT32 and AT91 211 211 parts. Say Y to include support for the MACB chip.
+1
drivers/net/arm/Kconfig
··· 43 43 config EP93XX_ETH 44 44 tristate "EP93xx Ethernet support" 45 45 depends on ARM && ARCH_EP93XX 46 + select MII 46 47 help 47 48 This is a driver for the ethernet hardware included in EP93xx CPUs. 48 49 Say Y if you are building a kernel for EP93xx based devices.
+80 -76
drivers/net/atl1/atl1.h
··· 43 43 struct atl1_adapter; 44 44 45 45 #define ATL1_MAX_INTR 3 46 + #define ATL1_MAX_TX_BUF_LEN 0x3000 /* 12288 bytes */ 46 47 47 48 #define ATL1_DEFAULT_TPD 256 48 49 #define ATL1_MAX_TPD 1024 ··· 58 57 #define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc) 59 58 60 59 /* 60 + * This detached comment is preserved for documentation purposes only. 61 + * It was originally attached to some code that got deleted, but seems 62 + * important enough to keep around... 63 + * 64 + * <begin detached comment> 61 65 * Some workarounds require millisecond delays and are run during interrupt 62 66 * context. Most notably, when establishing link, the phy may need tweaking 63 67 * but cannot process phy register reads/writes faster than millisecond 64 68 * intervals...and we establish link due to a "link status change" interrupt. 69 + * <end detached comment> 65 70 */ 66 71 67 72 /* 68 - * wrapper around a pointer to a socket buffer, 69 - * so a DMA handle can be stored along with the buffer 73 + * atl1_ring_header represents a single, contiguous block of DMA space 74 + * mapped for the three descriptor rings (tpd, rfd, rrd) and the two 75 + * message blocks (cmb, smb) described below 76 + */ 77 + struct atl1_ring_header { 78 + void *desc; /* virtual address */ 79 + dma_addr_t dma; /* physical address*/ 80 + unsigned int size; /* length in bytes */ 81 + }; 82 + 83 + /* 84 + * atl1_buffer is wrapper around a pointer to a socket buffer 85 + * so a DMA handle can be stored along with the skb 70 86 */ 71 87 struct atl1_buffer { 72 - struct sk_buff *skb; 73 - u16 length; 74 - u16 alloced; 88 + struct sk_buff *skb; /* socket buffer */ 89 + u16 length; /* rx buffer length */ 90 + u16 alloced; /* 1 if skb allocated */ 75 91 dma_addr_t dma; 76 92 }; 77 93 78 - #define MAX_TX_BUF_LEN 0x3000 /* 12KB */ 79 - 94 + /* transmit packet descriptor (tpd) ring */ 80 95 struct atl1_tpd_ring { 81 - void *desc; /* pointer to the descriptor ring memory */ 82 - dma_addr_t dma; /* physical adress of the descriptor ring */ 83 - u16 size; /* length of descriptor ring in bytes */ 96 + void *desc; /* descriptor ring virtual address */ 97 + dma_addr_t dma; /* descriptor ring physical address */ 98 + u16 size; /* descriptor ring length in bytes */ 84 99 u16 count; /* number of descriptors in the ring */ 85 100 u16 hw_idx; /* hardware index */ 86 101 atomic_t next_to_clean; ··· 104 87 struct atl1_buffer *buffer_info; 105 88 }; 106 89 90 + /* receive free descriptor (rfd) ring */ 107 91 struct atl1_rfd_ring { 108 - void *desc; 109 - dma_addr_t dma; 110 - u16 size; 111 - u16 count; 92 + void *desc; /* descriptor ring virtual address */ 93 + dma_addr_t dma; /* descriptor ring physical address */ 94 + u16 size; /* descriptor ring length in bytes */ 95 + u16 count; /* number of descriptors in the ring */ 112 96 atomic_t next_to_use; 113 97 u16 next_to_clean; 114 98 struct atl1_buffer *buffer_info; 115 99 }; 116 100 101 + /* receive return descriptor (rrd) ring */ 117 102 struct atl1_rrd_ring { 118 - void *desc; 119 - dma_addr_t dma; 120 - unsigned int size; 121 - u16 count; 103 + void *desc; /* descriptor ring virtual address */ 104 + dma_addr_t dma; /* descriptor ring physical address */ 105 + unsigned int size; /* descriptor ring length in bytes */ 106 + u16 count; /* number of descriptors in the ring */ 122 107 u16 next_to_use; 123 108 atomic_t next_to_clean; 124 109 }; 125 110 126 - struct atl1_ring_header { 127 - void *desc; /* pointer to the descriptor ring memory */ 128 - dma_addr_t dma; /* physical adress of the descriptor ring */ 129 - unsigned int size; /* length of descriptor ring in bytes */ 130 - }; 131 - 111 + /* coalescing message block (cmb) */ 132 112 struct atl1_cmb { 133 113 struct coals_msg_block *cmb; 134 114 dma_addr_t dma; 135 115 }; 136 116 117 + /* statistics message block (smb) */ 137 118 struct atl1_smb { 138 119 struct stats_msg_block *smb; 139 120 dma_addr_t dma; ··· 156 141 u64 tx_aborted_errors; 157 142 u64 tx_window_errors; 158 143 u64 tx_carrier_errors; 159 - 160 - u64 tx_pause; /* num Pause packet transmitted. */ 161 - u64 excecol; /* num tx packets aborted due to excessive collisions. */ 162 - u64 deffer; /* num deferred tx packets */ 163 - u64 scc; /* num packets subsequently transmitted successfully w/ single prior collision. */ 164 - u64 mcc; /* num packets subsequently transmitted successfully w/ multiple prior collisions. */ 144 + u64 tx_pause; /* num pause packets transmitted. */ 145 + u64 excecol; /* num tx packets w/ excessive collisions. */ 146 + u64 deffer; /* num tx packets deferred */ 147 + u64 scc; /* num packets subsequently transmitted 148 + * successfully w/ single prior collision. */ 149 + u64 mcc; /* num packets subsequently transmitted 150 + * successfully w/ multiple prior collisions. */ 165 151 u64 latecol; /* num tx packets w/ late collisions. */ 166 - u64 tx_underun; /* num tx packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */ 167 - u64 tx_trunc; /* num tx packets truncated due to size exceeding MTU, regardless whether truncated by Selene or not. (The name doesn't really reflect the meaning in this case.) */ 152 + u64 tx_underun; /* num tx packets aborted due to transmit 153 + * FIFO underrun, or TRD FIFO underrun */ 154 + u64 tx_trunc; /* num tx packets truncated due to size 155 + * exceeding MTU, regardless whether truncated 156 + * by the chip or not. (The name doesn't really 157 + * reflect the meaning in this case.) */ 168 158 u64 rx_pause; /* num Pause packets received. */ 169 159 u64 rx_rrd_ov; 170 160 u64 rx_trunc; 171 161 }; 172 162 173 - /* board specific private data structure */ 174 - #define ATL1_REGS_LEN 8 175 - 176 - /* Structure containing variables used by the shared code */ 163 + /* hardware structure */ 177 164 struct atl1_hw { 178 165 u8 __iomem *hw_addr; 179 166 struct atl1_adapter *back; ··· 184 167 enum atl1_dma_req_block dmar_block; 185 168 enum atl1_dma_req_block dmaw_block; 186 169 u8 preamble_len; 187 - u8 max_retry; /* Retransmission maximum, after which the packet will be discarded */ 188 - u8 jam_ipg; /* IPG to start JAM for collision based flow control in half-duplex mode. In units of 8-bit time */ 189 - u8 ipgt; /* Desired back to back inter-packet gap. The default is 96-bit time */ 190 - u8 min_ifg; /* Minimum number of IFG to enforce in between RX frames. Frame gap below such IFP is dropped */ 170 + u8 max_retry; /* Retransmission maximum, after which the 171 + * packet will be discarded */ 172 + u8 jam_ipg; /* IPG to start JAM for collision based flow 173 + * control in half-duplex mode. In units of 174 + * 8-bit time */ 175 + u8 ipgt; /* Desired back to back inter-packet gap. 176 + * The default is 96-bit time */ 177 + u8 min_ifg; /* Minimum number of IFG to enforce in between 178 + * receive frames. Frame gap below such IFP 179 + * is dropped */ 191 180 u8 ipgr1; /* 64bit Carrier-Sense window */ 192 181 u8 ipgr2; /* 96-bit IPG window */ 193 - u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. Each TPD is 16 bytes long */ 194 - u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned burst. Each RFD is 12 bytes long */ 182 + u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned 183 + * burst. Each TPD is 16 bytes long */ 184 + u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned 185 + * burst. Each RFD is 12 bytes long */ 195 186 u8 rfd_fetch_gap; 196 - u8 rrd_burst; /* Threshold number of RRDs that can be retired in a burst. Each RRD is 16 bytes long */ 187 + u8 rrd_burst; /* Threshold number of RRDs that can be retired 188 + * in a burst. Each RRD is 16 bytes long */ 197 189 u8 tpd_fetch_th; 198 190 u8 tpd_fetch_gap; 199 191 u16 tx_jumbo_task_th; 200 - u16 txf_burst; /* Number of data bytes to read in a cache-aligned burst. Each SRAM entry is 201 - 8 bytes long */ 202 - u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN packets should add 4 bytes */ 192 + u16 txf_burst; /* Number of data bytes to read in a cache- 193 + * aligned burst. Each SRAM entry is 8 bytes */ 194 + u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN 195 + * packets should add 4 bytes */ 203 196 u16 rx_jumbo_lkah; 204 - u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after every 512ns passes. */ 197 + u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after 198 + * every 512ns passes. */ 205 199 u16 lcol; /* Collision Window */ 206 200 207 201 u16 cmb_tpd; ··· 222 194 u32 smb_timer; 223 195 u16 media_type; 224 196 u16 autoneg_advertised; 225 - u16 pci_cmd_word; 226 197 227 198 u16 mii_autoneg_adv_reg; 228 199 u16 mii_1000t_ctrl_reg; 229 200 230 - u32 mem_rang; 231 - u32 txcw; 232 201 u32 max_frame_size; 233 202 u32 min_frame_size; 234 - u32 mc_filter_type; 235 - u32 num_mc_addrs; 236 - u32 collision_delta; 237 - u32 tx_packet_delta; 238 - u16 phy_spd_default; 239 203 240 204 u16 dev_rev; 241 205 242 206 /* spi flash */ 243 207 u8 flash_vendor; 244 208 245 - u8 dma_fairness; 246 209 u8 mac_addr[ETH_ALEN]; 247 210 u8 perm_mac_addr[ETH_ALEN]; 248 211 249 - /* bool phy_preamble_sup; */ 250 212 bool phy_configured; 251 213 }; 252 214 253 215 struct atl1_adapter { 254 - /* OS defined structs */ 255 216 struct net_device *netdev; 256 217 struct pci_dev *pdev; 257 218 struct net_device_stats net_stats; 258 219 struct atl1_sft_stats soft_stats; 259 - 260 220 struct vlan_group *vlgrp; 261 221 u32 rx_buffer_len; 262 222 u32 wol; 263 223 u16 link_speed; 264 224 u16 link_duplex; 265 225 spinlock_t lock; 266 - atomic_t irq_sem; 267 226 struct work_struct tx_timeout_task; 268 227 struct work_struct link_chg_task; 269 228 struct work_struct pcie_dma_to_rst_task; ··· 258 243 struct timer_list phy_config_timer; 259 244 bool phy_timer_pending; 260 245 261 - bool mac_disabled; 262 - 263 - /* All descriptor rings' memory */ 246 + /* all descriptor rings' memory */ 264 247 struct atl1_ring_header ring_header; 265 248 266 249 /* TX */ ··· 271 258 u64 hw_csum_err; 272 259 u64 hw_csum_good; 273 260 274 - u32 gorcl; 275 - u64 gorcl_old; 276 - 277 - /* Interrupt Moderator timer ( 2us resolution) */ 278 - u16 imt; 279 - /* Interrupt Clear timer (2us resolution) */ 280 - u16 ict; 281 - 282 - /* MII interface info */ 283 - struct mii_if_info mii; 261 + u16 imt; /* interrupt moderator timer (2us resolution */ 262 + u16 ict; /* interrupt clear timer (2us resolution */ 263 + struct mii_if_info mii; /* MII interface info */ 284 264 285 265 /* structs defined in atl1_hw.h */ 286 - u32 bd_number; /* board number */ 266 + u32 bd_number; /* board number */ 287 267 bool pci_using_64; 288 268 struct atl1_hw hw; 289 269 struct atl1_smb smb; 290 270 struct atl1_cmb cmb; 291 - 292 - u32 pci_state[16]; 293 271 }; 294 272 295 273 #endif /* _ATL1_H_ */
+1156 -1148
drivers/net/atl1/atl1_main.c
··· 38 38 * TODO: 39 39 * Fix TSO; tx performance is horrible with TSO enabled. 40 40 * Wake on LAN. 41 - * Add more ethtool functions, including set ring parameters. 41 + * Add more ethtool functions. 42 42 * Fix abstruse irq enable/disable condition described here: 43 43 * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 44 44 * ··· 158 158 hw->cmb_tx_timer = 1; /* about 2us */ 159 159 hw->smb_timer = 100000; /* about 200ms */ 160 160 161 - atomic_set(&adapter->irq_sem, 0); 162 161 spin_lock_init(&adapter->lock); 163 162 spin_lock_init(&adapter->mb_lock); 164 163 165 164 return 0; 165 + } 166 + 167 + static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) 168 + { 169 + struct atl1_adapter *adapter = netdev_priv(netdev); 170 + u16 result; 171 + 172 + atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); 173 + 174 + return result; 175 + } 176 + 177 + static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, 178 + int val) 179 + { 180 + struct atl1_adapter *adapter = netdev_priv(netdev); 181 + 182 + atl1_write_phy_reg(&adapter->hw, reg_num, val); 183 + } 184 + 185 + /* 186 + * atl1_mii_ioctl - 187 + * @netdev: 188 + * @ifreq: 189 + * @cmd: 190 + */ 191 + static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 192 + { 193 + struct atl1_adapter *adapter = netdev_priv(netdev); 194 + unsigned long flags; 195 + int retval; 196 + 197 + if (!netif_running(netdev)) 198 + return -EINVAL; 199 + 200 + spin_lock_irqsave(&adapter->lock, flags); 201 + retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); 202 + spin_unlock_irqrestore(&adapter->lock, flags); 203 + 204 + return retval; 205 + } 206 + 207 + /* 208 + * atl1_ioctl - 209 + * @netdev: 210 + * @ifreq: 211 + * @cmd: 212 + */ 213 + static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 214 + { 215 + switch (cmd) { 216 + case SIOCGMIIPHY: 217 + case SIOCGMIIREG: 218 + case SIOCSMIIREG: 219 + return atl1_mii_ioctl(netdev, ifr, cmd); 220 + default: 221 + return -EOPNOTSUPP; 222 + } 166 223 } 167 224 168 225 /* ··· 245 188 goto err_nomem; 246 189 } 247 190 rfd_ring->buffer_info = 248 - (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); 191 + (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); 249 192 250 - /* real ring DMA buffer */ 251 - ring_header->size = size = sizeof(struct tx_packet_desc) * 252 - tpd_ring->count 253 - + sizeof(struct rx_free_desc) * rfd_ring->count 254 - + sizeof(struct rx_return_desc) * rrd_ring->count 255 - + sizeof(struct coals_msg_block) 256 - + sizeof(struct stats_msg_block) 257 - + 40; /* "40: for 8 bytes align" huh? -- CHS */ 193 + /* real ring DMA buffer 194 + * each ring/block may need up to 8 bytes for alignment, hence the 195 + * additional 40 bytes tacked onto the end. 196 + */ 197 + ring_header->size = size = 198 + sizeof(struct tx_packet_desc) * tpd_ring->count 199 + + sizeof(struct rx_free_desc) * rfd_ring->count 200 + + sizeof(struct rx_return_desc) * rrd_ring->count 201 + + sizeof(struct coals_msg_block) 202 + + sizeof(struct stats_msg_block) 203 + + 40; 258 204 259 205 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, 260 - &ring_header->dma); 206 + &ring_header->dma); 261 207 if (unlikely(!ring_header->desc)) { 262 208 dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); 263 209 goto err_nomem; ··· 274 214 tpd_ring->dma += offset; 275 215 tpd_ring->desc = (u8 *) ring_header->desc + offset; 276 216 tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count; 277 - atomic_set(&tpd_ring->next_to_use, 0); 278 - atomic_set(&tpd_ring->next_to_clean, 0); 279 217 280 218 /* init RFD ring */ 281 219 rfd_ring->dma = tpd_ring->dma + tpd_ring->size; ··· 281 223 rfd_ring->dma += offset; 282 224 rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset); 283 225 rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count; 284 - rfd_ring->next_to_clean = 0; 285 - /* rfd_ring->next_to_use = rfd_ring->count - 1; */ 286 - atomic_set(&rfd_ring->next_to_use, 0); 226 + 287 227 288 228 /* init RRD ring */ 289 229 rrd_ring->dma = rfd_ring->dma + rfd_ring->size; ··· 289 233 rrd_ring->dma += offset; 290 234 rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset); 291 235 rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count; 292 - rrd_ring->next_to_use = 0; 293 - atomic_set(&rrd_ring->next_to_clean, 0); 236 + 294 237 295 238 /* init CMB */ 296 239 adapter->cmb.dma = rrd_ring->dma + rrd_ring->size; 297 240 offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0; 298 241 adapter->cmb.dma += offset; 299 - adapter->cmb.cmb = 300 - (struct coals_msg_block *) ((u8 *) rrd_ring->desc + 301 - (rrd_ring->size + offset)); 242 + adapter->cmb.cmb = (struct coals_msg_block *) 243 + ((u8 *) rrd_ring->desc + (rrd_ring->size + offset)); 302 244 303 245 /* init SMB */ 304 246 adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block); 305 247 offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0; 306 248 adapter->smb.dma += offset; 307 249 adapter->smb.smb = (struct stats_msg_block *) 308 - ((u8 *) adapter->cmb.cmb + (sizeof(struct coals_msg_block) + offset)); 250 + ((u8 *) adapter->cmb.cmb + 251 + (sizeof(struct coals_msg_block) + offset)); 309 252 310 253 return ATL1_SUCCESS; 311 254 ··· 313 258 return -ENOMEM; 314 259 } 315 260 261 + void atl1_init_ring_ptrs(struct atl1_adapter *adapter) 262 + { 263 + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 264 + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 265 + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; 266 + 267 + atomic_set(&tpd_ring->next_to_use, 0); 268 + atomic_set(&tpd_ring->next_to_clean, 0); 269 + 270 + rfd_ring->next_to_clean = 0; 271 + atomic_set(&rfd_ring->next_to_use, 0); 272 + 273 + rrd_ring->next_to_use = 0; 274 + atomic_set(&rrd_ring->next_to_clean, 0); 275 + } 276 + 277 + /* 278 + * atl1_clean_rx_ring - Free RFD Buffers 279 + * @adapter: board private structure 280 + */ 281 + static void atl1_clean_rx_ring(struct atl1_adapter *adapter) 282 + { 283 + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 284 + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; 285 + struct atl1_buffer *buffer_info; 286 + struct pci_dev *pdev = adapter->pdev; 287 + unsigned long size; 288 + unsigned int i; 289 + 290 + /* Free all the Rx ring sk_buffs */ 291 + for (i = 0; i < rfd_ring->count; i++) { 292 + buffer_info = &rfd_ring->buffer_info[i]; 293 + if (buffer_info->dma) { 294 + pci_unmap_page(pdev, buffer_info->dma, 295 + buffer_info->length, PCI_DMA_FROMDEVICE); 296 + buffer_info->dma = 0; 297 + } 298 + if (buffer_info->skb) { 299 + dev_kfree_skb(buffer_info->skb); 300 + buffer_info->skb = NULL; 301 + } 302 + } 303 + 304 + size = sizeof(struct atl1_buffer) * rfd_ring->count; 305 + memset(rfd_ring->buffer_info, 0, size); 306 + 307 + /* Zero out the descriptor ring */ 308 + memset(rfd_ring->desc, 0, rfd_ring->size); 309 + 310 + rfd_ring->next_to_clean = 0; 311 + atomic_set(&rfd_ring->next_to_use, 0); 312 + 313 + rrd_ring->next_to_use = 0; 314 + atomic_set(&rrd_ring->next_to_clean, 0); 315 + } 316 + 317 + /* 318 + * atl1_clean_tx_ring - Free Tx Buffers 319 + * @adapter: board private structure 320 + */ 321 + static void atl1_clean_tx_ring(struct atl1_adapter *adapter) 322 + { 323 + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 324 + struct atl1_buffer *buffer_info; 325 + struct pci_dev *pdev = adapter->pdev; 326 + unsigned long size; 327 + unsigned int i; 328 + 329 + /* Free all the Tx ring sk_buffs */ 330 + for (i = 0; i < tpd_ring->count; i++) { 331 + buffer_info = &tpd_ring->buffer_info[i]; 332 + if (buffer_info->dma) { 333 + pci_unmap_page(pdev, buffer_info->dma, 334 + buffer_info->length, PCI_DMA_TODEVICE); 335 + buffer_info->dma = 0; 336 + } 337 + } 338 + 339 + for (i = 0; i < tpd_ring->count; i++) { 340 + buffer_info = &tpd_ring->buffer_info[i]; 341 + if (buffer_info->skb) { 342 + dev_kfree_skb_any(buffer_info->skb); 343 + buffer_info->skb = NULL; 344 + } 345 + } 346 + 347 + size = sizeof(struct atl1_buffer) * tpd_ring->count; 348 + memset(tpd_ring->buffer_info, 0, size); 349 + 350 + /* Zero out the descriptor ring */ 351 + memset(tpd_ring->desc, 0, tpd_ring->size); 352 + 353 + atomic_set(&tpd_ring->next_to_use, 0); 354 + atomic_set(&tpd_ring->next_to_clean, 0); 355 + } 356 + 357 + /* 358 + * atl1_free_ring_resources - Free Tx / RX descriptor Resources 359 + * @adapter: board private structure 360 + * 361 + * Free all transmit software resources 362 + */ 363 + void atl1_free_ring_resources(struct atl1_adapter *adapter) 364 + { 365 + struct pci_dev *pdev = adapter->pdev; 366 + struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 367 + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 368 + struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; 369 + struct atl1_ring_header *ring_header = &adapter->ring_header; 370 + 371 + atl1_clean_tx_ring(adapter); 372 + atl1_clean_rx_ring(adapter); 373 + 374 + kfree(tpd_ring->buffer_info); 375 + pci_free_consistent(pdev, ring_header->size, ring_header->desc, 376 + ring_header->dma); 377 + 378 + tpd_ring->buffer_info = NULL; 379 + tpd_ring->desc = NULL; 380 + tpd_ring->dma = 0; 381 + 382 + rfd_ring->buffer_info = NULL; 383 + rfd_ring->desc = NULL; 384 + rfd_ring->dma = 0; 385 + 386 + rrd_ring->desc = NULL; 387 + rrd_ring->dma = 0; 388 + } 389 + 390 + static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) 391 + { 392 + u32 value; 393 + struct atl1_hw *hw = &adapter->hw; 394 + struct net_device *netdev = adapter->netdev; 395 + /* Config MAC CTRL Register */ 396 + value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN; 397 + /* duplex */ 398 + if (FULL_DUPLEX == adapter->link_duplex) 399 + value |= MAC_CTRL_DUPLX; 400 + /* speed */ 401 + value |= ((u32) ((SPEED_1000 == adapter->link_speed) ? 402 + MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << 403 + MAC_CTRL_SPEED_SHIFT); 404 + /* flow control */ 405 + value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); 406 + /* PAD & CRC */ 407 + value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); 408 + /* preamble length */ 409 + value |= (((u32) adapter->hw.preamble_len 410 + & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); 411 + /* vlan */ 412 + if (adapter->vlgrp) 413 + value |= MAC_CTRL_RMV_VLAN; 414 + /* rx checksum 415 + if (adapter->rx_csum) 416 + value |= MAC_CTRL_RX_CHKSUM_EN; 417 + */ 418 + /* filter mode */ 419 + value |= MAC_CTRL_BC_EN; 420 + if (netdev->flags & IFF_PROMISC) 421 + value |= MAC_CTRL_PROMIS_EN; 422 + else if (netdev->flags & IFF_ALLMULTI) 423 + value |= MAC_CTRL_MC_ALL_EN; 424 + /* value |= MAC_CTRL_LOOPBACK; */ 425 + iowrite32(value, hw->hw_addr + REG_MAC_CTRL); 426 + } 427 + 428 + /* 429 + * atl1_set_mac - Change the Ethernet Address of the NIC 430 + * @netdev: network interface device structure 431 + * @p: pointer to an address structure 432 + * 433 + * Returns 0 on success, negative on failure 434 + */ 435 + static int atl1_set_mac(struct net_device *netdev, void *p) 436 + { 437 + struct atl1_adapter *adapter = netdev_priv(netdev); 438 + struct sockaddr *addr = p; 439 + 440 + if (netif_running(netdev)) 441 + return -EBUSY; 442 + 443 + if (!is_valid_ether_addr(addr->sa_data)) 444 + return -EADDRNOTAVAIL; 445 + 446 + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 447 + memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); 448 + 449 + atl1_set_mac_addr(&adapter->hw); 450 + return 0; 451 + } 452 + 453 + static u32 atl1_check_link(struct atl1_adapter *adapter) 454 + { 455 + struct atl1_hw *hw = &adapter->hw; 456 + struct net_device *netdev = adapter->netdev; 457 + u32 ret_val; 458 + u16 speed, duplex, phy_data; 459 + int reconfig = 0; 460 + 461 + /* MII_BMSR must read twice */ 462 + atl1_read_phy_reg(hw, MII_BMSR, &phy_data); 463 + atl1_read_phy_reg(hw, MII_BMSR, &phy_data); 464 + if (!(phy_data & BMSR_LSTATUS)) { /* link down */ 465 + if (netif_carrier_ok(netdev)) { /* old link state: Up */ 466 + dev_info(&adapter->pdev->dev, "link is down\n"); 467 + adapter->link_speed = SPEED_0; 468 + netif_carrier_off(netdev); 469 + netif_stop_queue(netdev); 470 + } 471 + return ATL1_SUCCESS; 472 + } 473 + 474 + /* Link Up */ 475 + ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex); 476 + if (ret_val) 477 + return ret_val; 478 + 479 + switch (hw->media_type) { 480 + case MEDIA_TYPE_1000M_FULL: 481 + if (speed != SPEED_1000 || duplex != FULL_DUPLEX) 482 + reconfig = 1; 483 + break; 484 + case MEDIA_TYPE_100M_FULL: 485 + if (speed != SPEED_100 || duplex != FULL_DUPLEX) 486 + reconfig = 1; 487 + break; 488 + case MEDIA_TYPE_100M_HALF: 489 + if (speed != SPEED_100 || duplex != HALF_DUPLEX) 490 + reconfig = 1; 491 + break; 492 + case MEDIA_TYPE_10M_FULL: 493 + if (speed != SPEED_10 || duplex != FULL_DUPLEX) 494 + reconfig = 1; 495 + break; 496 + case MEDIA_TYPE_10M_HALF: 497 + if (speed != SPEED_10 || duplex != HALF_DUPLEX) 498 + reconfig = 1; 499 + break; 500 + } 501 + 502 + /* link result is our setting */ 503 + if (!reconfig) { 504 + if (adapter->link_speed != speed 505 + || adapter->link_duplex != duplex) { 506 + adapter->link_speed = speed; 507 + adapter->link_duplex = duplex; 508 + atl1_setup_mac_ctrl(adapter); 509 + dev_info(&adapter->pdev->dev, 510 + "%s link is up %d Mbps %s\n", 511 + netdev->name, adapter->link_speed, 512 + adapter->link_duplex == FULL_DUPLEX ? 513 + "full duplex" : "half duplex"); 514 + } 515 + if (!netif_carrier_ok(netdev)) { /* Link down -> Up */ 516 + netif_carrier_on(netdev); 517 + netif_wake_queue(netdev); 518 + } 519 + return ATL1_SUCCESS; 520 + } 521 + 522 + /* change orignal link status */ 523 + if (netif_carrier_ok(netdev)) { 524 + adapter->link_speed = SPEED_0; 525 + netif_carrier_off(netdev); 526 + netif_stop_queue(netdev); 527 + } 528 + 529 + if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR && 530 + hw->media_type != MEDIA_TYPE_1000M_FULL) { 531 + switch (hw->media_type) { 532 + case MEDIA_TYPE_100M_FULL: 533 + phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | 534 + MII_CR_RESET; 535 + break; 536 + case MEDIA_TYPE_100M_HALF: 537 + phy_data = MII_CR_SPEED_100 | MII_CR_RESET; 538 + break; 539 + case MEDIA_TYPE_10M_FULL: 540 + phy_data = 541 + MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; 542 + break; 543 + default: /* MEDIA_TYPE_10M_HALF: */ 544 + phy_data = MII_CR_SPEED_10 | MII_CR_RESET; 545 + break; 546 + } 547 + atl1_write_phy_reg(hw, MII_BMCR, phy_data); 548 + return ATL1_SUCCESS; 549 + } 550 + 551 + /* auto-neg, insert timer to re-config phy */ 552 + if (!adapter->phy_timer_pending) { 553 + adapter->phy_timer_pending = true; 554 + mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ); 555 + } 556 + 557 + return ATL1_SUCCESS; 558 + } 559 + 560 + static void atl1_check_for_link(struct atl1_adapter *adapter) 561 + { 562 + struct net_device *netdev = adapter->netdev; 563 + u16 phy_data = 0; 564 + 565 + spin_lock(&adapter->lock); 566 + adapter->phy_timer_pending = false; 567 + atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); 568 + atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); 569 + spin_unlock(&adapter->lock); 570 + 571 + /* notify upper layer link down ASAP */ 572 + if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */ 573 + if (netif_carrier_ok(netdev)) { /* old link state: Up */ 574 + dev_info(&adapter->pdev->dev, "%s link is down\n", 575 + netdev->name); 576 + adapter->link_speed = SPEED_0; 577 + netif_carrier_off(netdev); 578 + netif_stop_queue(netdev); 579 + } 580 + } 581 + schedule_work(&adapter->link_chg_task); 582 + } 583 + 584 + /* 585 + * atl1_set_multi - Multicast and Promiscuous mode set 586 + * @netdev: network interface device structure 587 + * 588 + * The set_multi entry point is called whenever the multicast address 589 + * list or the network interface flags are updated. This routine is 590 + * responsible for configuring the hardware for proper multicast, 591 + * promiscuous mode, and all-multi behavior. 592 + */ 593 + static void atl1_set_multi(struct net_device *netdev) 594 + { 595 + struct atl1_adapter *adapter = netdev_priv(netdev); 596 + struct atl1_hw *hw = &adapter->hw; 597 + struct dev_mc_list *mc_ptr; 598 + u32 rctl; 599 + u32 hash_value; 600 + 601 + /* Check for Promiscuous and All Multicast modes */ 602 + rctl = ioread32(hw->hw_addr + REG_MAC_CTRL); 603 + if (netdev->flags & IFF_PROMISC) 604 + rctl |= MAC_CTRL_PROMIS_EN; 605 + else if (netdev->flags & IFF_ALLMULTI) { 606 + rctl |= MAC_CTRL_MC_ALL_EN; 607 + rctl &= ~MAC_CTRL_PROMIS_EN; 608 + } else 609 + rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); 610 + 611 + iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL); 612 + 613 + /* clear the old settings from the multicast hash table */ 614 + iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); 615 + iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); 616 + 617 + /* compute mc addresses' hash value ,and put it into hash table */ 618 + for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 619 + hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr); 620 + atl1_hash_set(hw, hash_value); 621 + } 622 + } 623 + 624 + /* 625 + * atl1_change_mtu - Change the Maximum Transfer Unit 626 + * @netdev: network interface device structure 627 + * @new_mtu: new value for maximum frame size 628 + * 629 + * Returns 0 on success, negative on failure 630 + */ 631 + static int atl1_change_mtu(struct net_device *netdev, int new_mtu) 632 + { 633 + struct atl1_adapter *adapter = netdev_priv(netdev); 634 + int old_mtu = netdev->mtu; 635 + int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 636 + 637 + if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 638 + (max_frame > MAX_JUMBO_FRAME_SIZE)) { 639 + dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); 640 + return -EINVAL; 641 + } 642 + 643 + adapter->hw.max_frame_size = max_frame; 644 + adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; 645 + adapter->rx_buffer_len = (max_frame + 7) & ~7; 646 + adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; 647 + 648 + netdev->mtu = new_mtu; 649 + if ((old_mtu != new_mtu) && netif_running(netdev)) { 650 + atl1_down(adapter); 651 + atl1_up(adapter); 652 + } 653 + 654 + return 0; 655 + } 656 + 657 + static void set_flow_ctrl_old(struct atl1_adapter *adapter) 658 + { 659 + u32 hi, lo, value; 660 + 661 + /* RFD Flow Control */ 662 + value = adapter->rfd_ring.count; 663 + hi = value / 16; 664 + if (hi < 2) 665 + hi = 2; 666 + lo = value * 7 / 8; 667 + 668 + value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | 669 + ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); 670 + iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); 671 + 672 + /* RRD Flow Control */ 673 + value = adapter->rrd_ring.count; 674 + lo = value / 16; 675 + hi = value * 7 / 8; 676 + if (lo < 2) 677 + lo = 2; 678 + value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | 679 + ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); 680 + iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); 681 + } 682 + 683 + static void set_flow_ctrl_new(struct atl1_hw *hw) 684 + { 685 + u32 hi, lo, value; 686 + 687 + /* RXF Flow Control */ 688 + value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN); 689 + lo = value / 16; 690 + if (lo < 192) 691 + lo = 192; 692 + hi = value * 7 / 8; 693 + if (hi < lo) 694 + hi = lo + 16; 695 + value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | 696 + ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); 697 + iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); 698 + 699 + /* RRD Flow Control */ 700 + value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN); 701 + lo = value / 8; 702 + hi = value * 7 / 8; 703 + if (lo < 2) 704 + lo = 2; 705 + if (hi < lo) 706 + hi = lo + 3; 707 + value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | 708 + ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); 709 + iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); 710 + } 711 + 712 + /* 713 + * atl1_configure - Configure Transmit&Receive Unit after Reset 714 + * @adapter: board private structure 715 + * 716 + * Configure the Tx /Rx unit of the MAC after a reset. 717 + */ 718 + static u32 atl1_configure(struct atl1_adapter *adapter) 719 + { 720 + struct atl1_hw *hw = &adapter->hw; 721 + u32 value; 722 + 723 + /* clear interrupt status */ 724 + iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR); 725 + 726 + /* set MAC Address */ 727 + value = (((u32) hw->mac_addr[2]) << 24) | 728 + (((u32) hw->mac_addr[3]) << 16) | 729 + (((u32) hw->mac_addr[4]) << 8) | 730 + (((u32) hw->mac_addr[5])); 731 + iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); 732 + value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); 733 + iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4)); 734 + 735 + /* tx / rx ring */ 736 + 737 + /* HI base address */ 738 + iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32), 739 + hw->hw_addr + REG_DESC_BASE_ADDR_HI); 740 + /* LO base address */ 741 + iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL), 742 + hw->hw_addr + REG_DESC_RFD_ADDR_LO); 743 + iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL), 744 + hw->hw_addr + REG_DESC_RRD_ADDR_LO); 745 + iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL), 746 + hw->hw_addr + REG_DESC_TPD_ADDR_LO); 747 + iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL), 748 + hw->hw_addr + REG_DESC_CMB_ADDR_LO); 749 + iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL), 750 + hw->hw_addr + REG_DESC_SMB_ADDR_LO); 751 + 752 + /* element count */ 753 + value = adapter->rrd_ring.count; 754 + value <<= 16; 755 + value += adapter->rfd_ring.count; 756 + iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE); 757 + iowrite32(adapter->tpd_ring.count, hw->hw_addr + 758 + REG_DESC_TPD_RING_SIZE); 759 + 760 + /* Load Ptr */ 761 + iowrite32(1, hw->hw_addr + REG_LOAD_PTR); 762 + 763 + /* config Mailbox */ 764 + value = ((atomic_read(&adapter->tpd_ring.next_to_use) 765 + & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | 766 + ((atomic_read(&adapter->rrd_ring.next_to_clean) 767 + & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | 768 + ((atomic_read(&adapter->rfd_ring.next_to_use) 769 + & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); 770 + iowrite32(value, hw->hw_addr + REG_MAILBOX); 771 + 772 + /* config IPG/IFG */ 773 + value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) 774 + << MAC_IPG_IFG_IPGT_SHIFT) | 775 + (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) 776 + << MAC_IPG_IFG_MIFG_SHIFT) | 777 + (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) 778 + << MAC_IPG_IFG_IPGR1_SHIFT) | 779 + (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) 780 + << MAC_IPG_IFG_IPGR2_SHIFT); 781 + iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); 782 + 783 + /* config Half-Duplex Control */ 784 + value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | 785 + (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) 786 + << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | 787 + MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | 788 + (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | 789 + (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) 790 + << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); 791 + iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); 792 + 793 + /* set Interrupt Moderator Timer */ 794 + iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT); 795 + iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL); 796 + 797 + /* set Interrupt Clear Timer */ 798 + iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER); 799 + 800 + /* set MTU, 4 : VLAN */ 801 + iowrite32(hw->max_frame_size + 4, hw->hw_addr + REG_MTU); 802 + 803 + /* jumbo size & rrd retirement timer */ 804 + value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) 805 + << RXQ_JMBOSZ_TH_SHIFT) | 806 + (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) 807 + << RXQ_JMBO_LKAH_SHIFT) | 808 + (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) 809 + << RXQ_RRD_TIMER_SHIFT); 810 + iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); 811 + 812 + /* Flow Control */ 813 + switch (hw->dev_rev) { 814 + case 0x8001: 815 + case 0x9001: 816 + case 0x9002: 817 + case 0x9003: 818 + set_flow_ctrl_old(adapter); 819 + break; 820 + default: 821 + set_flow_ctrl_new(hw); 822 + break; 823 + } 824 + 825 + /* config TXQ */ 826 + value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) 827 + << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | 828 + (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) 829 + << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | 830 + (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) 831 + << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | 832 + TXQ_CTRL_EN; 833 + iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); 834 + 835 + /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ 836 + value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) 837 + << TX_JUMBO_TASK_TH_SHIFT) | 838 + (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) 839 + << TX_TPD_MIN_IPG_SHIFT); 840 + iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); 841 + 842 + /* config RXQ */ 843 + value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) 844 + << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | 845 + (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) 846 + << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | 847 + (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) 848 + << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN | 849 + RXQ_CTRL_EN; 850 + iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); 851 + 852 + /* config DMA Engine */ 853 + value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) 854 + << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | 855 + ((((u32) hw->dmaw_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) 856 + << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN | 857 + DMA_CTRL_DMAW_EN; 858 + value |= (u32) hw->dma_ord; 859 + if (atl1_rcb_128 == hw->rcb_value) 860 + value |= DMA_CTRL_RCB_VALUE; 861 + iowrite32(value, hw->hw_addr + REG_DMA_CTRL); 862 + 863 + /* config CMB / SMB */ 864 + value = hw->cmb_rrd | ((u32) hw->cmb_tpd << 16); 865 + iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH); 866 + value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16); 867 + iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER); 868 + iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER); 869 + 870 + /* --- enable CMB / SMB */ 871 + value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN; 872 + iowrite32(value, hw->hw_addr + REG_CSMB_CTRL); 873 + 874 + value = ioread32(adapter->hw.hw_addr + REG_ISR); 875 + if (unlikely((value & ISR_PHY_LINKDOWN) != 0)) 876 + value = 1; /* config failed */ 877 + else 878 + value = 0; 879 + 880 + /* clear all interrupt status */ 881 + iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR); 882 + iowrite32(0, adapter->hw.hw_addr + REG_ISR); 883 + return value; 884 + } 885 + 886 + /* 887 + * atl1_pcie_patch - Patch for PCIE module 888 + */ 889 + static void atl1_pcie_patch(struct atl1_adapter *adapter) 890 + { 891 + u32 value; 892 + 893 + /* much vendor magic here */ 894 + value = 0x6500; 895 + iowrite32(value, adapter->hw.hw_addr + 0x12FC); 896 + /* pcie flow control mode change */ 897 + value = ioread32(adapter->hw.hw_addr + 0x1008); 898 + value |= 0x8000; 899 + iowrite32(value, adapter->hw.hw_addr + 0x1008); 900 + } 901 + 902 + /* 903 + * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 904 + * on PCI Command register is disable. 905 + * The function enable this bit. 906 + * Brackett, 2006/03/15 907 + */ 908 + static void atl1_via_workaround(struct atl1_adapter *adapter) 909 + { 910 + unsigned long value; 911 + 912 + value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); 913 + if (value & PCI_COMMAND_INTX_DISABLE) 914 + value &= ~PCI_COMMAND_INTX_DISABLE; 915 + iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); 916 + } 917 + 316 918 /* 317 919 * atl1_irq_enable - Enable default interrupt generation settings 318 920 * @adapter: board private structure 319 921 */ 320 922 static void atl1_irq_enable(struct atl1_adapter *adapter) 321 923 { 322 - if (likely(!atomic_dec_and_test(&adapter->irq_sem))) 323 - iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR); 924 + iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR); 925 + ioread32(adapter->hw.hw_addr + REG_IMR); 926 + } 927 + 928 + /* 929 + * atl1_irq_disable - Mask off interrupt generation on the NIC 930 + * @adapter: board private structure 931 + */ 932 + static void atl1_irq_disable(struct atl1_adapter *adapter) 933 + { 934 + iowrite32(0, adapter->hw.hw_addr + REG_IMR); 935 + ioread32(adapter->hw.hw_addr + REG_IMR); 936 + synchronize_irq(adapter->pdev->irq); 324 937 } 325 938 326 939 static void atl1_clear_phy_int(struct atl1_adapter *adapter) ··· 1011 288 adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; 1012 289 adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; 1013 290 adapter->soft_stats.multicast += smb->rx_mcast; 1014 - adapter->soft_stats.collisions += (smb->tx_1_col + 1015 - smb->tx_2_col * 2 + 1016 - smb->tx_late_col + 1017 - smb->tx_abort_col * 1018 - adapter->hw.max_retry); 291 + adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 + 292 + smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry); 1019 293 1020 294 /* Rx Errors */ 1021 - adapter->soft_stats.rx_errors += (smb->rx_frag + 1022 - smb->rx_fcs_err + 1023 - smb->rx_len_err + 1024 - smb->rx_sz_ov + 1025 - smb->rx_rxf_ov + 1026 - smb->rx_rrd_ov + smb->rx_align_err); 295 + adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err + 296 + smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov + 297 + smb->rx_rrd_ov + smb->rx_align_err); 1027 298 adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; 1028 299 adapter->soft_stats.rx_length_errors += smb->rx_len_err; 1029 300 adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; 1030 301 adapter->soft_stats.rx_frame_errors += smb->rx_align_err; 1031 302 adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + 1032 - smb->rx_rxf_ov); 303 + smb->rx_rxf_ov); 1033 304 1034 305 adapter->soft_stats.rx_pause += smb->rx_pause; 1035 306 adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; ··· 1031 314 1032 315 /* Tx Errors */ 1033 316 adapter->soft_stats.tx_errors += (smb->tx_late_col + 1034 - smb->tx_abort_col + 1035 - smb->tx_underrun + smb->tx_trunc); 317 + smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc); 1036 318 adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; 1037 319 adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; 1038 320 adapter->soft_stats.tx_window_errors += smb->tx_late_col; ··· 1053 337 adapter->net_stats.collisions = adapter->soft_stats.collisions; 1054 338 adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors; 1055 339 adapter->net_stats.rx_over_errors = 1056 - adapter->soft_stats.rx_missed_errors; 340 + adapter->soft_stats.rx_missed_errors; 1057 341 adapter->net_stats.rx_length_errors = 1058 - adapter->soft_stats.rx_length_errors; 342 + adapter->soft_stats.rx_length_errors; 1059 343 adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; 1060 344 adapter->net_stats.rx_frame_errors = 1061 - adapter->soft_stats.rx_frame_errors; 345 + adapter->soft_stats.rx_frame_errors; 1062 346 adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; 1063 347 adapter->net_stats.rx_missed_errors = 1064 - adapter->soft_stats.rx_missed_errors; 348 + adapter->soft_stats.rx_missed_errors; 1065 349 adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors; 1066 350 adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; 1067 351 adapter->net_stats.tx_aborted_errors = 1068 - adapter->soft_stats.tx_aborted_errors; 352 + adapter->soft_stats.tx_aborted_errors; 1069 353 adapter->net_stats.tx_window_errors = 1070 - adapter->soft_stats.tx_window_errors; 354 + adapter->soft_stats.tx_window_errors; 1071 355 adapter->net_stats.tx_carrier_errors = 1072 - adapter->soft_stats.tx_carrier_errors; 356 + adapter->soft_stats.tx_carrier_errors; 357 + } 358 + 359 + /* 360 + * atl1_get_stats - Get System Network Statistics 361 + * @netdev: network interface device structure 362 + * 363 + * Returns the address of the device statistics structure. 364 + * The statistics are actually updated from the timer callback. 365 + */ 366 + static struct net_device_stats *atl1_get_stats(struct net_device *netdev) 367 + { 368 + struct atl1_adapter *adapter = netdev_priv(netdev); 369 + return &adapter->net_stats; 370 + } 371 + 372 + static void atl1_update_mailbox(struct atl1_adapter *adapter) 373 + { 374 + unsigned long flags; 375 + u32 tpd_next_to_use; 376 + u32 rfd_next_to_use; 377 + u32 rrd_next_to_clean; 378 + u32 value; 379 + 380 + spin_lock_irqsave(&adapter->mb_lock, flags); 381 + 382 + tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); 383 + rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); 384 + rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); 385 + 386 + value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << 387 + MB_RFD_PROD_INDX_SHIFT) | 388 + ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << 389 + MB_RRD_CONS_INDX_SHIFT) | 390 + ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << 391 + MB_TPD_PROD_INDX_SHIFT); 392 + iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); 393 + 394 + spin_unlock_irqrestore(&adapter->mb_lock, flags); 395 + } 396 + 397 + static void atl1_clean_alloc_flag(struct atl1_adapter *adapter, 398 + struct rx_return_desc *rrd, u16 offset) 399 + { 400 + struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 401 + 402 + while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) { 403 + rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0; 404 + if (++rfd_ring->next_to_clean == rfd_ring->count) { 405 + rfd_ring->next_to_clean = 0; 406 + } 407 + } 408 + } 409 + 410 + static void atl1_update_rfd_index(struct atl1_adapter *adapter, 411 + struct rx_return_desc *rrd) 412 + { 413 + u16 num_buf; 414 + 415 + num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) / 416 + adapter->rx_buffer_len; 417 + if (rrd->num_buf == num_buf) 418 + /* clean alloc flag for bad rrd */ 419 + atl1_clean_alloc_flag(adapter, rrd, num_buf); 1073 420 } 1074 421 1075 422 static void atl1_rx_checksum(struct atl1_adapter *adapter, 1076 - struct rx_return_desc *rrd, 1077 - struct sk_buff *skb) 423 + struct rx_return_desc *rrd, struct sk_buff *skb) 1078 424 { 425 + struct pci_dev *pdev = adapter->pdev; 426 + 1079 427 skb->ip_summed = CHECKSUM_NONE; 1080 428 1081 429 if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { 1082 430 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | 1083 431 ERR_FLAG_CODE | ERR_FLAG_OV)) { 1084 432 adapter->hw_csum_err++; 1085 - dev_dbg(&adapter->pdev->dev, "rx checksum error\n"); 433 + dev_printk(KERN_DEBUG, &pdev->dev, 434 + "rx checksum error\n"); 1086 435 return; 1087 436 } 1088 437 } ··· 1166 385 } 1167 386 1168 387 /* IPv4, but hardware thinks its checksum is wrong */ 1169 - dev_dbg(&adapter->pdev->dev, 388 + dev_printk(KERN_DEBUG, &pdev->dev, 1170 389 "hw csum wrong, pkt_flag:%x, err_flag:%x\n", 1171 390 rrd->pkt_flg, rrd->err_flg); 1172 391 skb->ip_summed = CHECKSUM_COMPLETE; ··· 1281 500 /* rrd seems to be bad */ 1282 501 if (unlikely(i-- > 0)) { 1283 502 /* rrd may not be DMAed completely */ 1284 - dev_dbg(&adapter->pdev->dev, 503 + dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1285 504 "incomplete RRD DMA transfer\n"); 1286 505 udelay(1); 1287 506 goto chk_rrd; 1288 507 } 1289 508 /* bad rrd */ 1290 - dev_dbg(&adapter->pdev->dev, "bad RRD\n"); 509 + dev_printk(KERN_DEBUG, &adapter->pdev->dev, 510 + "bad RRD\n"); 1291 511 /* see if update RFD index */ 1292 - if (rrd->num_buf > 1) { 1293 - u16 num_buf; 1294 - num_buf = 1295 - (rrd->xsz.xsum_sz.pkt_size + 1296 - adapter->rx_buffer_len - 1297 - 1) / adapter->rx_buffer_len; 1298 - if (rrd->num_buf == num_buf) { 1299 - /* clean alloc flag for bad rrd */ 1300 - while (rfd_ring->next_to_clean != 1301 - (rrd->buf_indx + num_buf)) { 1302 - rfd_ring->buffer_info[rfd_ring-> 1303 - next_to_clean].alloced = 0; 1304 - if (++rfd_ring->next_to_clean == 1305 - rfd_ring->count) { 1306 - rfd_ring-> 1307 - next_to_clean = 0; 1308 - } 1309 - } 1310 - } 1311 - } 512 + if (rrd->num_buf > 1) 513 + atl1_update_rfd_index(adapter, rrd); 1312 514 1313 515 /* update rrd */ 1314 516 rrd->xsz.valid = 0; ··· 1305 541 } 1306 542 rrd_ok: 1307 543 /* clean alloc flag for bad rrd */ 1308 - while (rfd_ring->next_to_clean != rrd->buf_indx) { 1309 - rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 1310 - 0; 1311 - if (++rfd_ring->next_to_clean == rfd_ring->count) 1312 - rfd_ring->next_to_clean = 0; 1313 - } 544 + atl1_clean_alloc_flag(adapter, rrd, 0); 1314 545 1315 546 buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; 1316 547 if (++rfd_ring->next_to_clean == rfd_ring->count) ··· 1421 662 netif_wake_queue(adapter->netdev); 1422 663 } 1423 664 1424 - static void atl1_check_for_link(struct atl1_adapter *adapter) 1425 - { 1426 - struct net_device *netdev = adapter->netdev; 1427 - u16 phy_data = 0; 1428 - 1429 - spin_lock(&adapter->lock); 1430 - adapter->phy_timer_pending = false; 1431 - atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); 1432 - atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); 1433 - spin_unlock(&adapter->lock); 1434 - 1435 - /* notify upper layer link down ASAP */ 1436 - if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */ 1437 - if (netif_carrier_ok(netdev)) { /* old link state: Up */ 1438 - dev_info(&adapter->pdev->dev, "%s link is down\n", 1439 - netdev->name); 1440 - adapter->link_speed = SPEED_0; 1441 - netif_carrier_off(netdev); 1442 - netif_stop_queue(netdev); 1443 - } 1444 - } 1445 - schedule_work(&adapter->link_chg_task); 1446 - } 1447 - 1448 - /* 1449 - * atl1_intr - Interrupt Handler 1450 - * @irq: interrupt number 1451 - * @data: pointer to a network interface device structure 1452 - * @pt_regs: CPU registers structure 1453 - */ 1454 - static irqreturn_t atl1_intr(int irq, void *data) 1455 - { 1456 - /*struct atl1_adapter *adapter = ((struct net_device *)data)->priv;*/ 1457 - struct atl1_adapter *adapter = netdev_priv(data); 1458 - u32 status; 1459 - u8 update_rx; 1460 - int max_ints = 10; 1461 - 1462 - status = adapter->cmb.cmb->int_stats; 1463 - if (!status) 1464 - return IRQ_NONE; 1465 - 1466 - update_rx = 0; 1467 - 1468 - do { 1469 - /* clear CMB interrupt status at once */ 1470 - adapter->cmb.cmb->int_stats = 0; 1471 - 1472 - if (status & ISR_GPHY) /* clear phy status */ 1473 - atl1_clear_phy_int(adapter); 1474 - 1475 - /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ 1476 - iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); 1477 - 1478 - /* check if SMB intr */ 1479 - if (status & ISR_SMB) 1480 - atl1_inc_smb(adapter); 1481 - 1482 - /* check if PCIE PHY Link down */ 1483 - if (status & ISR_PHY_LINKDOWN) { 1484 - dev_dbg(&adapter->pdev->dev, "pcie phy link down %x\n", 1485 - status); 1486 - if (netif_running(adapter->netdev)) { /* reset MAC */ 1487 - iowrite32(0, adapter->hw.hw_addr + REG_IMR); 1488 - schedule_work(&adapter->pcie_dma_to_rst_task); 1489 - return IRQ_HANDLED; 1490 - } 1491 - } 1492 - 1493 - /* check if DMA read/write error ? */ 1494 - if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { 1495 - dev_dbg(&adapter->pdev->dev, 1496 - "pcie DMA r/w error (status = 0x%x)\n", 1497 - status); 1498 - iowrite32(0, adapter->hw.hw_addr + REG_IMR); 1499 - schedule_work(&adapter->pcie_dma_to_rst_task); 1500 - return IRQ_HANDLED; 1501 - } 1502 - 1503 - /* link event */ 1504 - if (status & ISR_GPHY) { 1505 - adapter->soft_stats.tx_carrier_errors++; 1506 - atl1_check_for_link(adapter); 1507 - } 1508 - 1509 - /* transmit event */ 1510 - if (status & ISR_CMB_TX) 1511 - atl1_intr_tx(adapter); 1512 - 1513 - /* rx exception */ 1514 - if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | 1515 - ISR_RRD_OV | ISR_HOST_RFD_UNRUN | 1516 - ISR_HOST_RRD_OV | ISR_CMB_RX))) { 1517 - if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | 1518 - ISR_RRD_OV | ISR_HOST_RFD_UNRUN | 1519 - ISR_HOST_RRD_OV)) 1520 - dev_dbg(&adapter->pdev->dev, 1521 - "rx exception, ISR = 0x%x\n", status); 1522 - atl1_intr_rx(adapter); 1523 - } 1524 - 1525 - if (--max_ints < 0) 1526 - break; 1527 - 1528 - } while ((status = adapter->cmb.cmb->int_stats)); 1529 - 1530 - /* re-enable Interrupt */ 1531 - iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); 1532 - return IRQ_HANDLED; 1533 - } 1534 - 1535 - /* 1536 - * atl1_set_multi - Multicast and Promiscuous mode set 1537 - * @netdev: network interface device structure 1538 - * 1539 - * The set_multi entry point is called whenever the multicast address 1540 - * list or the network interface flags are updated. This routine is 1541 - * responsible for configuring the hardware for proper multicast, 1542 - * promiscuous mode, and all-multi behavior. 1543 - */ 1544 - static void atl1_set_multi(struct net_device *netdev) 1545 - { 1546 - struct atl1_adapter *adapter = netdev_priv(netdev); 1547 - struct atl1_hw *hw = &adapter->hw; 1548 - struct dev_mc_list *mc_ptr; 1549 - u32 rctl; 1550 - u32 hash_value; 1551 - 1552 - /* Check for Promiscuous and All Multicast modes */ 1553 - rctl = ioread32(hw->hw_addr + REG_MAC_CTRL); 1554 - if (netdev->flags & IFF_PROMISC) 1555 - rctl |= MAC_CTRL_PROMIS_EN; 1556 - else if (netdev->flags & IFF_ALLMULTI) { 1557 - rctl |= MAC_CTRL_MC_ALL_EN; 1558 - rctl &= ~MAC_CTRL_PROMIS_EN; 1559 - } else 1560 - rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); 1561 - 1562 - iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL); 1563 - 1564 - /* clear the old settings from the multicast hash table */ 1565 - iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); 1566 - iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); 1567 - 1568 - /* compute mc addresses' hash value ,and put it into hash table */ 1569 - for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) { 1570 - hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr); 1571 - atl1_hash_set(hw, hash_value); 1572 - } 1573 - } 1574 - 1575 - static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) 1576 - { 1577 - u32 value; 1578 - struct atl1_hw *hw = &adapter->hw; 1579 - struct net_device *netdev = adapter->netdev; 1580 - /* Config MAC CTRL Register */ 1581 - value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN; 1582 - /* duplex */ 1583 - if (FULL_DUPLEX == adapter->link_duplex) 1584 - value |= MAC_CTRL_DUPLX; 1585 - /* speed */ 1586 - value |= ((u32) ((SPEED_1000 == adapter->link_speed) ? 1587 - MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << 1588 - MAC_CTRL_SPEED_SHIFT); 1589 - /* flow control */ 1590 - value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); 1591 - /* PAD & CRC */ 1592 - value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); 1593 - /* preamble length */ 1594 - value |= (((u32) adapter->hw.preamble_len 1595 - & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); 1596 - /* vlan */ 1597 - if (adapter->vlgrp) 1598 - value |= MAC_CTRL_RMV_VLAN; 1599 - /* rx checksum 1600 - if (adapter->rx_csum) 1601 - value |= MAC_CTRL_RX_CHKSUM_EN; 1602 - */ 1603 - /* filter mode */ 1604 - value |= MAC_CTRL_BC_EN; 1605 - if (netdev->flags & IFF_PROMISC) 1606 - value |= MAC_CTRL_PROMIS_EN; 1607 - else if (netdev->flags & IFF_ALLMULTI) 1608 - value |= MAC_CTRL_MC_ALL_EN; 1609 - /* value |= MAC_CTRL_LOOPBACK; */ 1610 - iowrite32(value, hw->hw_addr + REG_MAC_CTRL); 1611 - } 1612 - 1613 - static u32 atl1_check_link(struct atl1_adapter *adapter) 1614 - { 1615 - struct atl1_hw *hw = &adapter->hw; 1616 - struct net_device *netdev = adapter->netdev; 1617 - u32 ret_val; 1618 - u16 speed, duplex, phy_data; 1619 - int reconfig = 0; 1620 - 1621 - /* MII_BMSR must read twice */ 1622 - atl1_read_phy_reg(hw, MII_BMSR, &phy_data); 1623 - atl1_read_phy_reg(hw, MII_BMSR, &phy_data); 1624 - if (!(phy_data & BMSR_LSTATUS)) { /* link down */ 1625 - if (netif_carrier_ok(netdev)) { /* old link state: Up */ 1626 - dev_info(&adapter->pdev->dev, "link is down\n"); 1627 - adapter->link_speed = SPEED_0; 1628 - netif_carrier_off(netdev); 1629 - netif_stop_queue(netdev); 1630 - } 1631 - return ATL1_SUCCESS; 1632 - } 1633 - 1634 - /* Link Up */ 1635 - ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex); 1636 - if (ret_val) 1637 - return ret_val; 1638 - 1639 - switch (hw->media_type) { 1640 - case MEDIA_TYPE_1000M_FULL: 1641 - if (speed != SPEED_1000 || duplex != FULL_DUPLEX) 1642 - reconfig = 1; 1643 - break; 1644 - case MEDIA_TYPE_100M_FULL: 1645 - if (speed != SPEED_100 || duplex != FULL_DUPLEX) 1646 - reconfig = 1; 1647 - break; 1648 - case MEDIA_TYPE_100M_HALF: 1649 - if (speed != SPEED_100 || duplex != HALF_DUPLEX) 1650 - reconfig = 1; 1651 - break; 1652 - case MEDIA_TYPE_10M_FULL: 1653 - if (speed != SPEED_10 || duplex != FULL_DUPLEX) 1654 - reconfig = 1; 1655 - break; 1656 - case MEDIA_TYPE_10M_HALF: 1657 - if (speed != SPEED_10 || duplex != HALF_DUPLEX) 1658 - reconfig = 1; 1659 - break; 1660 - } 1661 - 1662 - /* link result is our setting */ 1663 - if (!reconfig) { 1664 - if (adapter->link_speed != speed 1665 - || adapter->link_duplex != duplex) { 1666 - adapter->link_speed = speed; 1667 - adapter->link_duplex = duplex; 1668 - atl1_setup_mac_ctrl(adapter); 1669 - dev_info(&adapter->pdev->dev, 1670 - "%s link is up %d Mbps %s\n", 1671 - netdev->name, adapter->link_speed, 1672 - adapter->link_duplex == FULL_DUPLEX ? 1673 - "full duplex" : "half duplex"); 1674 - } 1675 - if (!netif_carrier_ok(netdev)) { /* Link down -> Up */ 1676 - netif_carrier_on(netdev); 1677 - netif_wake_queue(netdev); 1678 - } 1679 - return ATL1_SUCCESS; 1680 - } 1681 - 1682 - /* change orignal link status */ 1683 - if (netif_carrier_ok(netdev)) { 1684 - adapter->link_speed = SPEED_0; 1685 - netif_carrier_off(netdev); 1686 - netif_stop_queue(netdev); 1687 - } 1688 - 1689 - if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR && 1690 - hw->media_type != MEDIA_TYPE_1000M_FULL) { 1691 - switch (hw->media_type) { 1692 - case MEDIA_TYPE_100M_FULL: 1693 - phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | 1694 - MII_CR_RESET; 1695 - break; 1696 - case MEDIA_TYPE_100M_HALF: 1697 - phy_data = MII_CR_SPEED_100 | MII_CR_RESET; 1698 - break; 1699 - case MEDIA_TYPE_10M_FULL: 1700 - phy_data = 1701 - MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; 1702 - break; 1703 - default: /* MEDIA_TYPE_10M_HALF: */ 1704 - phy_data = MII_CR_SPEED_10 | MII_CR_RESET; 1705 - break; 1706 - } 1707 - atl1_write_phy_reg(hw, MII_BMCR, phy_data); 1708 - return ATL1_SUCCESS; 1709 - } 1710 - 1711 - /* auto-neg, insert timer to re-config phy */ 1712 - if (!adapter->phy_timer_pending) { 1713 - adapter->phy_timer_pending = true; 1714 - mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ); 1715 - } 1716 - 1717 - return ATL1_SUCCESS; 1718 - } 1719 - 1720 - static void set_flow_ctrl_old(struct atl1_adapter *adapter) 1721 - { 1722 - u32 hi, lo, value; 1723 - 1724 - /* RFD Flow Control */ 1725 - value = adapter->rfd_ring.count; 1726 - hi = value / 16; 1727 - if (hi < 2) 1728 - hi = 2; 1729 - lo = value * 7 / 8; 1730 - 1731 - value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | 1732 - ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); 1733 - iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); 1734 - 1735 - /* RRD Flow Control */ 1736 - value = adapter->rrd_ring.count; 1737 - lo = value / 16; 1738 - hi = value * 7 / 8; 1739 - if (lo < 2) 1740 - lo = 2; 1741 - value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | 1742 - ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); 1743 - iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); 1744 - } 1745 - 1746 - static void set_flow_ctrl_new(struct atl1_hw *hw) 1747 - { 1748 - u32 hi, lo, value; 1749 - 1750 - /* RXF Flow Control */ 1751 - value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN); 1752 - lo = value / 16; 1753 - if (lo < 192) 1754 - lo = 192; 1755 - hi = value * 7 / 8; 1756 - if (hi < lo) 1757 - hi = lo + 16; 1758 - value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | 1759 - ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); 1760 - iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); 1761 - 1762 - /* RRD Flow Control */ 1763 - value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN); 1764 - lo = value / 8; 1765 - hi = value * 7 / 8; 1766 - if (lo < 2) 1767 - lo = 2; 1768 - if (hi < lo) 1769 - hi = lo + 3; 1770 - value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | 1771 - ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); 1772 - iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); 1773 - } 1774 - 1775 - /* 1776 - * atl1_configure - Configure Transmit&Receive Unit after Reset 1777 - * @adapter: board private structure 1778 - * 1779 - * Configure the Tx /Rx unit of the MAC after a reset. 1780 - */ 1781 - static u32 atl1_configure(struct atl1_adapter *adapter) 1782 - { 1783 - struct atl1_hw *hw = &adapter->hw; 1784 - u32 value; 1785 - 1786 - /* clear interrupt status */ 1787 - iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR); 1788 - 1789 - /* set MAC Address */ 1790 - value = (((u32) hw->mac_addr[2]) << 24) | 1791 - (((u32) hw->mac_addr[3]) << 16) | 1792 - (((u32) hw->mac_addr[4]) << 8) | 1793 - (((u32) hw->mac_addr[5])); 1794 - iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); 1795 - value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); 1796 - iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4)); 1797 - 1798 - /* tx / rx ring */ 1799 - 1800 - /* HI base address */ 1801 - iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32), 1802 - hw->hw_addr + REG_DESC_BASE_ADDR_HI); 1803 - /* LO base address */ 1804 - iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL), 1805 - hw->hw_addr + REG_DESC_RFD_ADDR_LO); 1806 - iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL), 1807 - hw->hw_addr + REG_DESC_RRD_ADDR_LO); 1808 - iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL), 1809 - hw->hw_addr + REG_DESC_TPD_ADDR_LO); 1810 - iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL), 1811 - hw->hw_addr + REG_DESC_CMB_ADDR_LO); 1812 - iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL), 1813 - hw->hw_addr + REG_DESC_SMB_ADDR_LO); 1814 - 1815 - /* element count */ 1816 - value = adapter->rrd_ring.count; 1817 - value <<= 16; 1818 - value += adapter->rfd_ring.count; 1819 - iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE); 1820 - iowrite32(adapter->tpd_ring.count, hw->hw_addr + REG_DESC_TPD_RING_SIZE); 1821 - 1822 - /* Load Ptr */ 1823 - iowrite32(1, hw->hw_addr + REG_LOAD_PTR); 1824 - 1825 - /* config Mailbox */ 1826 - value = ((atomic_read(&adapter->tpd_ring.next_to_use) 1827 - & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | 1828 - ((atomic_read(&adapter->rrd_ring.next_to_clean) 1829 - & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | 1830 - ((atomic_read(&adapter->rfd_ring.next_to_use) 1831 - & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); 1832 - iowrite32(value, hw->hw_addr + REG_MAILBOX); 1833 - 1834 - /* config IPG/IFG */ 1835 - value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) 1836 - << MAC_IPG_IFG_IPGT_SHIFT) | 1837 - (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) 1838 - << MAC_IPG_IFG_MIFG_SHIFT) | 1839 - (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) 1840 - << MAC_IPG_IFG_IPGR1_SHIFT) | 1841 - (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) 1842 - << MAC_IPG_IFG_IPGR2_SHIFT); 1843 - iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); 1844 - 1845 - /* config Half-Duplex Control */ 1846 - value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | 1847 - (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) 1848 - << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | 1849 - MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | 1850 - (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | 1851 - (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) 1852 - << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); 1853 - iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); 1854 - 1855 - /* set Interrupt Moderator Timer */ 1856 - iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT); 1857 - iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL); 1858 - 1859 - /* set Interrupt Clear Timer */ 1860 - iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER); 1861 - 1862 - /* set MTU, 4 : VLAN */ 1863 - iowrite32(hw->max_frame_size + 4, hw->hw_addr + REG_MTU); 1864 - 1865 - /* jumbo size & rrd retirement timer */ 1866 - value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) 1867 - << RXQ_JMBOSZ_TH_SHIFT) | 1868 - (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) 1869 - << RXQ_JMBO_LKAH_SHIFT) | 1870 - (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) 1871 - << RXQ_RRD_TIMER_SHIFT); 1872 - iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); 1873 - 1874 - /* Flow Control */ 1875 - switch (hw->dev_rev) { 1876 - case 0x8001: 1877 - case 0x9001: 1878 - case 0x9002: 1879 - case 0x9003: 1880 - set_flow_ctrl_old(adapter); 1881 - break; 1882 - default: 1883 - set_flow_ctrl_new(hw); 1884 - break; 1885 - } 1886 - 1887 - /* config TXQ */ 1888 - value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) 1889 - << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | 1890 - (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) 1891 - << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | 1892 - (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) 1893 - << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN; 1894 - iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); 1895 - 1896 - /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ 1897 - value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) 1898 - << TX_JUMBO_TASK_TH_SHIFT) | 1899 - (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) 1900 - << TX_TPD_MIN_IPG_SHIFT); 1901 - iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); 1902 - 1903 - /* config RXQ */ 1904 - value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) 1905 - << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | 1906 - (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) 1907 - << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | 1908 - (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) 1909 - << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | 1910 - RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN; 1911 - iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); 1912 - 1913 - /* config DMA Engine */ 1914 - value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) 1915 - << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | 1916 - ((((u32) hw->dmaw_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) 1917 - << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | 1918 - DMA_CTRL_DMAR_EN | DMA_CTRL_DMAW_EN; 1919 - value |= (u32) hw->dma_ord; 1920 - if (atl1_rcb_128 == hw->rcb_value) 1921 - value |= DMA_CTRL_RCB_VALUE; 1922 - iowrite32(value, hw->hw_addr + REG_DMA_CTRL); 1923 - 1924 - /* config CMB / SMB */ 1925 - value = hw->cmb_rrd | ((u32) hw->cmb_tpd << 16); 1926 - iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH); 1927 - value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16); 1928 - iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER); 1929 - iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER); 1930 - 1931 - /* --- enable CMB / SMB */ 1932 - value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN; 1933 - iowrite32(value, hw->hw_addr + REG_CSMB_CTRL); 1934 - 1935 - value = ioread32(adapter->hw.hw_addr + REG_ISR); 1936 - if (unlikely((value & ISR_PHY_LINKDOWN) != 0)) 1937 - value = 1; /* config failed */ 1938 - else 1939 - value = 0; 1940 - 1941 - /* clear all interrupt status */ 1942 - iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR); 1943 - iowrite32(0, adapter->hw.hw_addr + REG_ISR); 1944 - return value; 1945 - } 1946 - 1947 - /* 1948 - * atl1_irq_disable - Mask off interrupt generation on the NIC 1949 - * @adapter: board private structure 1950 - */ 1951 - static void atl1_irq_disable(struct atl1_adapter *adapter) 1952 - { 1953 - atomic_inc(&adapter->irq_sem); 1954 - iowrite32(0, adapter->hw.hw_addr + REG_IMR); 1955 - ioread32(adapter->hw.hw_addr + REG_IMR); 1956 - synchronize_irq(adapter->pdev->irq); 1957 - } 1958 - 1959 - static void atl1_vlan_rx_register(struct net_device *netdev, 1960 - struct vlan_group *grp) 1961 - { 1962 - struct atl1_adapter *adapter = netdev_priv(netdev); 1963 - unsigned long flags; 1964 - u32 ctrl; 1965 - 1966 - spin_lock_irqsave(&adapter->lock, flags); 1967 - /* atl1_irq_disable(adapter); */ 1968 - adapter->vlgrp = grp; 1969 - 1970 - if (grp) { 1971 - /* enable VLAN tag insert/strip */ 1972 - ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); 1973 - ctrl |= MAC_CTRL_RMV_VLAN; 1974 - iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); 1975 - } else { 1976 - /* disable VLAN tag insert/strip */ 1977 - ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); 1978 - ctrl &= ~MAC_CTRL_RMV_VLAN; 1979 - iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); 1980 - } 1981 - 1982 - /* atl1_irq_enable(adapter); */ 1983 - spin_unlock_irqrestore(&adapter->lock, flags); 1984 - } 1985 - 1986 - static void atl1_restore_vlan(struct atl1_adapter *adapter) 1987 - { 1988 - atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp); 1989 - } 1990 - 1991 665 static u16 tpd_avail(struct atl1_tpd_ring *tpd_ring) 1992 666 { 1993 667 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); 1994 668 u16 next_to_use = atomic_read(&tpd_ring->next_to_use); 1995 - return ((next_to_clean > 1996 - next_to_use) ? next_to_clean - next_to_use - 1997 - 1 : tpd_ring->count + next_to_clean - next_to_use - 1); 669 + return ((next_to_clean > next_to_use) ? 670 + next_to_clean - next_to_use - 1 : 671 + tpd_ring->count + next_to_clean - next_to_use - 1); 1998 672 } 1999 673 2000 674 static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, ··· 1450 1258 iph->tot_len = 0; 1451 1259 iph->check = 0; 1452 1260 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 1453 - iph->daddr, 0, 1454 - IPPROTO_TCP, 1455 - 0); 1261 + iph->daddr, 0, IPPROTO_TCP, 0); 1456 1262 ipofst = skb_network_offset(skb); 1457 1263 if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */ 1458 1264 tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; ··· 1458 1268 tso->tsopl |= (iph->ihl & 1459 1269 CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; 1460 1270 tso->tsopl |= (tcp_hdrlen(skb) & 1461 - TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT; 1271 + TSO_PARAM_TCPHDRLEN_MASK) << 1272 + TSO_PARAM_TCPHDRLEN_SHIFT; 1462 1273 tso->tsopl |= (skb_shinfo(skb)->gso_size & 1463 1274 TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; 1464 1275 tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT; ··· 1472 1281 } 1473 1282 1474 1283 static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, 1475 - struct csum_param *csum) 1284 + struct csum_param *csum) 1476 1285 { 1477 1286 u8 css, cso; 1478 1287 ··· 1480 1289 cso = skb_transport_offset(skb); 1481 1290 css = cso + skb->csum_offset; 1482 1291 if (unlikely(cso & 0x1)) { 1483 - dev_dbg(&adapter->pdev->dev, 1292 + dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1484 1293 "payload offset not an even number\n"); 1485 1294 return -1; 1486 1295 } ··· 1495 1304 return true; 1496 1305 } 1497 1306 1498 - static void atl1_tx_map(struct atl1_adapter *adapter, 1499 - struct sk_buff *skb, bool tcp_seg) 1307 + static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, 1308 + bool tcp_seg) 1500 1309 { 1501 1310 /* We enter this function holding a spinlock. */ 1502 1311 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; ··· 1533 1342 1534 1343 if (first_buf_len > proto_hdr_len) { 1535 1344 len12 = first_buf_len - proto_hdr_len; 1536 - m = (len12 + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; 1345 + m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) / 1346 + ATL1_MAX_TX_BUF_LEN; 1537 1347 for (i = 0; i < m; i++) { 1538 1348 buffer_info = 1539 1349 &tpd_ring->buffer_info[tpd_next_to_use]; 1540 1350 buffer_info->skb = NULL; 1541 1351 buffer_info->length = 1542 - (MAX_TX_BUF_LEN >= 1543 - len12) ? MAX_TX_BUF_LEN : len12; 1352 + (ATL1_MAX_TX_BUF_LEN >= 1353 + len12) ? ATL1_MAX_TX_BUF_LEN : len12; 1544 1354 len12 -= buffer_info->length; 1545 1355 page = virt_to_page(skb->data + 1546 - (proto_hdr_len + 1547 - i * MAX_TX_BUF_LEN)); 1356 + (proto_hdr_len + 1357 + i * ATL1_MAX_TX_BUF_LEN)); 1548 1358 offset = (unsigned long)(skb->data + 1549 - (proto_hdr_len + 1550 - i * MAX_TX_BUF_LEN)) & 1551 - ~PAGE_MASK; 1552 - buffer_info->dma = 1553 - pci_map_page(adapter->pdev, page, offset, 1554 - buffer_info->length, 1555 - PCI_DMA_TODEVICE); 1359 + (proto_hdr_len + 1360 + i * ATL1_MAX_TX_BUF_LEN)) & ~PAGE_MASK; 1361 + buffer_info->dma = pci_map_page(adapter->pdev, 1362 + page, offset, buffer_info->length, 1363 + PCI_DMA_TODEVICE); 1556 1364 if (++tpd_next_to_use == tpd_ring->count) 1557 1365 tpd_next_to_use = 0; 1558 1366 } ··· 1562 1372 page = virt_to_page(skb->data); 1563 1373 offset = (unsigned long)skb->data & ~PAGE_MASK; 1564 1374 buffer_info->dma = pci_map_page(adapter->pdev, page, 1565 - offset, first_buf_len, 1566 - PCI_DMA_TODEVICE); 1375 + offset, first_buf_len, PCI_DMA_TODEVICE); 1567 1376 if (++tpd_next_to_use == tpd_ring->count) 1568 1377 tpd_next_to_use = 0; 1569 1378 } ··· 1574 1385 frag = &skb_shinfo(skb)->frags[f]; 1575 1386 lenf = frag->size; 1576 1387 1577 - m = (lenf + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; 1388 + m = (lenf + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; 1578 1389 for (i = 0; i < m; i++) { 1579 1390 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; 1580 1391 if (unlikely(buffer_info->skb)) 1581 1392 BUG(); 1582 1393 buffer_info->skb = NULL; 1583 - buffer_info->length = 1584 - (lenf > MAX_TX_BUF_LEN) ? MAX_TX_BUF_LEN : lenf; 1394 + buffer_info->length = (lenf > ATL1_MAX_TX_BUF_LEN) ? 1395 + ATL1_MAX_TX_BUF_LEN : lenf; 1585 1396 lenf -= buffer_info->length; 1586 - buffer_info->dma = 1587 - pci_map_page(adapter->pdev, frag->page, 1588 - frag->page_offset + i * MAX_TX_BUF_LEN, 1589 - buffer_info->length, PCI_DMA_TODEVICE); 1397 + buffer_info->dma = pci_map_page(adapter->pdev, 1398 + frag->page, 1399 + frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN), 1400 + buffer_info->length, PCI_DMA_TODEVICE); 1590 1401 1591 1402 if (++tpd_next_to_use == tpd_ring->count) 1592 1403 tpd_next_to_use = 0; ··· 1598 1409 } 1599 1410 1600 1411 static void atl1_tx_queue(struct atl1_adapter *adapter, int count, 1601 - union tpd_descr *descr) 1412 + union tpd_descr *descr) 1602 1413 { 1603 1414 /* We enter this function holding a spinlock. */ 1604 1415 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; ··· 1642 1453 atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use); 1643 1454 } 1644 1455 1645 - static void atl1_update_mailbox(struct atl1_adapter *adapter) 1646 - { 1647 - unsigned long flags; 1648 - u32 tpd_next_to_use; 1649 - u32 rfd_next_to_use; 1650 - u32 rrd_next_to_clean; 1651 - u32 value; 1652 - 1653 - spin_lock_irqsave(&adapter->mb_lock, flags); 1654 - 1655 - tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); 1656 - rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); 1657 - rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); 1658 - 1659 - value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << 1660 - MB_RFD_PROD_INDX_SHIFT) | 1661 - ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << 1662 - MB_RRD_CONS_INDX_SHIFT) | 1663 - ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << 1664 - MB_TPD_PROD_INDX_SHIFT); 1665 - iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); 1666 - 1667 - spin_unlock_irqrestore(&adapter->mb_lock, flags); 1668 - } 1669 - 1670 1456 static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1671 1457 { 1672 1458 struct atl1_adapter *adapter = netdev_priv(netdev); ··· 1677 1513 for (f = 0; f < nr_frags; f++) { 1678 1514 frag_size = skb_shinfo(skb)->frags[f].size; 1679 1515 if (frag_size) 1680 - count += 1681 - (frag_size + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; 1516 + count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) / 1517 + ATL1_MAX_TX_BUF_LEN; 1682 1518 } 1683 1519 1684 1520 /* mss will be nonzero if we're doing segment offload (TSO/GSO) */ ··· 1694 1530 /* need additional TPD ? */ 1695 1531 if (proto_hdr_len != len) 1696 1532 count += (len - proto_hdr_len + 1697 - MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; 1533 + ATL1_MAX_TX_BUF_LEN - 1) / 1534 + ATL1_MAX_TX_BUF_LEN; 1698 1535 } 1699 1536 } 1700 1537 ··· 1703 1538 if (!spin_trylock(&adapter->lock)) { 1704 1539 /* Can't get lock - tell upper layer to requeue */ 1705 1540 local_irq_restore(flags); 1706 - dev_dbg(&adapter->pdev->dev, "tx locked\n"); 1541 + dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n"); 1707 1542 return NETDEV_TX_LOCKED; 1708 1543 } 1709 1544 ··· 1711 1546 /* not enough descriptors */ 1712 1547 netif_stop_queue(netdev); 1713 1548 spin_unlock_irqrestore(&adapter->lock, flags); 1714 - dev_dbg(&adapter->pdev->dev, "tx busy\n"); 1549 + dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n"); 1715 1550 return NETDEV_TX_BUSY; 1716 1551 } 1717 1552 ··· 1753 1588 } 1754 1589 1755 1590 /* 1756 - * atl1_get_stats - Get System Network Statistics 1757 - * @netdev: network interface device structure 1758 - * 1759 - * Returns the address of the device statistics structure. 1760 - * The statistics are actually updated from the timer callback. 1591 + * atl1_intr - Interrupt Handler 1592 + * @irq: interrupt number 1593 + * @data: pointer to a network interface device structure 1594 + * @pt_regs: CPU registers structure 1761 1595 */ 1762 - static struct net_device_stats *atl1_get_stats(struct net_device *netdev) 1596 + static irqreturn_t atl1_intr(int irq, void *data) 1597 + { 1598 + struct atl1_adapter *adapter = netdev_priv(data); 1599 + u32 status; 1600 + u8 update_rx; 1601 + int max_ints = 10; 1602 + 1603 + status = adapter->cmb.cmb->int_stats; 1604 + if (!status) 1605 + return IRQ_NONE; 1606 + 1607 + update_rx = 0; 1608 + 1609 + do { 1610 + /* clear CMB interrupt status at once */ 1611 + adapter->cmb.cmb->int_stats = 0; 1612 + 1613 + if (status & ISR_GPHY) /* clear phy status */ 1614 + atl1_clear_phy_int(adapter); 1615 + 1616 + /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ 1617 + iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); 1618 + 1619 + /* check if SMB intr */ 1620 + if (status & ISR_SMB) 1621 + atl1_inc_smb(adapter); 1622 + 1623 + /* check if PCIE PHY Link down */ 1624 + if (status & ISR_PHY_LINKDOWN) { 1625 + dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1626 + "pcie phy link down %x\n", status); 1627 + if (netif_running(adapter->netdev)) { /* reset MAC */ 1628 + iowrite32(0, adapter->hw.hw_addr + REG_IMR); 1629 + schedule_work(&adapter->pcie_dma_to_rst_task); 1630 + return IRQ_HANDLED; 1631 + } 1632 + } 1633 + 1634 + /* check if DMA read/write error ? */ 1635 + if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { 1636 + dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1637 + "pcie DMA r/w error (status = 0x%x)\n", 1638 + status); 1639 + iowrite32(0, adapter->hw.hw_addr + REG_IMR); 1640 + schedule_work(&adapter->pcie_dma_to_rst_task); 1641 + return IRQ_HANDLED; 1642 + } 1643 + 1644 + /* link event */ 1645 + if (status & ISR_GPHY) { 1646 + adapter->soft_stats.tx_carrier_errors++; 1647 + atl1_check_for_link(adapter); 1648 + } 1649 + 1650 + /* transmit event */ 1651 + if (status & ISR_CMB_TX) 1652 + atl1_intr_tx(adapter); 1653 + 1654 + /* rx exception */ 1655 + if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | 1656 + ISR_RRD_OV | ISR_HOST_RFD_UNRUN | 1657 + ISR_HOST_RRD_OV | ISR_CMB_RX))) { 1658 + if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | 1659 + ISR_RRD_OV | ISR_HOST_RFD_UNRUN | 1660 + ISR_HOST_RRD_OV)) 1661 + dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1662 + "rx exception, ISR = 0x%x\n", status); 1663 + atl1_intr_rx(adapter); 1664 + } 1665 + 1666 + if (--max_ints < 0) 1667 + break; 1668 + 1669 + } while ((status = adapter->cmb.cmb->int_stats)); 1670 + 1671 + /* re-enable Interrupt */ 1672 + iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); 1673 + return IRQ_HANDLED; 1674 + } 1675 + 1676 + /* 1677 + * atl1_watchdog - Timer Call-back 1678 + * @data: pointer to netdev cast into an unsigned long 1679 + */ 1680 + static void atl1_watchdog(unsigned long data) 1681 + { 1682 + struct atl1_adapter *adapter = (struct atl1_adapter *)data; 1683 + 1684 + /* Reset the timer */ 1685 + mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); 1686 + } 1687 + 1688 + /* 1689 + * atl1_phy_config - Timer Call-back 1690 + * @data: pointer to netdev cast into an unsigned long 1691 + */ 1692 + static void atl1_phy_config(unsigned long data) 1693 + { 1694 + struct atl1_adapter *adapter = (struct atl1_adapter *)data; 1695 + struct atl1_hw *hw = &adapter->hw; 1696 + unsigned long flags; 1697 + 1698 + spin_lock_irqsave(&adapter->lock, flags); 1699 + adapter->phy_timer_pending = false; 1700 + atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); 1701 + atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg); 1702 + atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); 1703 + spin_unlock_irqrestore(&adapter->lock, flags); 1704 + } 1705 + 1706 + /* 1707 + * atl1_tx_timeout - Respond to a Tx Hang 1708 + * @netdev: network interface device structure 1709 + */ 1710 + static void atl1_tx_timeout(struct net_device *netdev) 1763 1711 { 1764 1712 struct atl1_adapter *adapter = netdev_priv(netdev); 1765 - return &adapter->net_stats; 1713 + /* Do the reset outside of interrupt context */ 1714 + schedule_work(&adapter->tx_timeout_task); 1766 1715 } 1767 1716 1768 1717 /* 1769 - * atl1_clean_rx_ring - Free RFD Buffers 1770 - * @adapter: board private structure 1718 + * Orphaned vendor comment left intact here: 1719 + * <vendor comment> 1720 + * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT 1721 + * will assert. We do soft reset <0x1400=1> according 1722 + * with the SPEC. BUT, it seemes that PCIE or DMA 1723 + * state-machine will not be reset. DMAR_TO_INT will 1724 + * assert again and again. 1725 + * </vendor comment> 1771 1726 */ 1772 - static void atl1_clean_rx_ring(struct atl1_adapter *adapter) 1727 + static void atl1_tx_timeout_task(struct work_struct *work) 1773 1728 { 1774 - struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 1775 - struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; 1776 - struct atl1_buffer *buffer_info; 1777 - struct pci_dev *pdev = adapter->pdev; 1778 - unsigned long size; 1779 - unsigned int i; 1729 + struct atl1_adapter *adapter = 1730 + container_of(work, struct atl1_adapter, tx_timeout_task); 1731 + struct net_device *netdev = adapter->netdev; 1780 1732 1781 - /* Free all the Rx ring sk_buffs */ 1782 - for (i = 0; i < rfd_ring->count; i++) { 1783 - buffer_info = &rfd_ring->buffer_info[i]; 1784 - if (buffer_info->dma) { 1785 - pci_unmap_page(pdev, 1786 - buffer_info->dma, 1787 - buffer_info->length, 1788 - PCI_DMA_FROMDEVICE); 1789 - buffer_info->dma = 0; 1790 - } 1791 - if (buffer_info->skb) { 1792 - dev_kfree_skb(buffer_info->skb); 1793 - buffer_info->skb = NULL; 1794 - } 1795 - } 1796 - 1797 - size = sizeof(struct atl1_buffer) * rfd_ring->count; 1798 - memset(rfd_ring->buffer_info, 0, size); 1799 - 1800 - /* Zero out the descriptor ring */ 1801 - memset(rfd_ring->desc, 0, rfd_ring->size); 1802 - 1803 - rfd_ring->next_to_clean = 0; 1804 - atomic_set(&rfd_ring->next_to_use, 0); 1805 - 1806 - rrd_ring->next_to_use = 0; 1807 - atomic_set(&rrd_ring->next_to_clean, 0); 1733 + netif_device_detach(netdev); 1734 + atl1_down(adapter); 1735 + atl1_up(adapter); 1736 + netif_device_attach(netdev); 1808 1737 } 1809 1738 1810 1739 /* 1811 - * atl1_clean_tx_ring - Free Tx Buffers 1812 - * @adapter: board private structure 1740 + * atl1_link_chg_task - deal with link change event Out of interrupt context 1813 1741 */ 1814 - static void atl1_clean_tx_ring(struct atl1_adapter *adapter) 1742 + static void atl1_link_chg_task(struct work_struct *work) 1815 1743 { 1816 - struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 1817 - struct atl1_buffer *buffer_info; 1818 - struct pci_dev *pdev = adapter->pdev; 1819 - unsigned long size; 1820 - unsigned int i; 1744 + struct atl1_adapter *adapter = 1745 + container_of(work, struct atl1_adapter, link_chg_task); 1746 + unsigned long flags; 1821 1747 1822 - /* Free all the Tx ring sk_buffs */ 1823 - for (i = 0; i < tpd_ring->count; i++) { 1824 - buffer_info = &tpd_ring->buffer_info[i]; 1825 - if (buffer_info->dma) { 1826 - pci_unmap_page(pdev, buffer_info->dma, 1827 - buffer_info->length, PCI_DMA_TODEVICE); 1828 - buffer_info->dma = 0; 1829 - } 1830 - } 1831 - 1832 - for (i = 0; i < tpd_ring->count; i++) { 1833 - buffer_info = &tpd_ring->buffer_info[i]; 1834 - if (buffer_info->skb) { 1835 - dev_kfree_skb_any(buffer_info->skb); 1836 - buffer_info->skb = NULL; 1837 - } 1838 - } 1839 - 1840 - size = sizeof(struct atl1_buffer) * tpd_ring->count; 1841 - memset(tpd_ring->buffer_info, 0, size); 1842 - 1843 - /* Zero out the descriptor ring */ 1844 - memset(tpd_ring->desc, 0, tpd_ring->size); 1845 - 1846 - atomic_set(&tpd_ring->next_to_use, 0); 1847 - atomic_set(&tpd_ring->next_to_clean, 0); 1748 + spin_lock_irqsave(&adapter->lock, flags); 1749 + atl1_check_link(adapter); 1750 + spin_unlock_irqrestore(&adapter->lock, flags); 1848 1751 } 1849 1752 1850 - /* 1851 - * atl1_free_ring_resources - Free Tx / RX descriptor Resources 1852 - * @adapter: board private structure 1853 - * 1854 - * Free all transmit software resources 1855 - */ 1856 - void atl1_free_ring_resources(struct atl1_adapter *adapter) 1753 + static void atl1_vlan_rx_register(struct net_device *netdev, 1754 + struct vlan_group *grp) 1857 1755 { 1858 - struct pci_dev *pdev = adapter->pdev; 1859 - struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 1860 - struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 1861 - struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; 1862 - struct atl1_ring_header *ring_header = &adapter->ring_header; 1756 + struct atl1_adapter *adapter = netdev_priv(netdev); 1757 + unsigned long flags; 1758 + u32 ctrl; 1863 1759 1864 - atl1_clean_tx_ring(adapter); 1865 - atl1_clean_rx_ring(adapter); 1760 + spin_lock_irqsave(&adapter->lock, flags); 1761 + /* atl1_irq_disable(adapter); */ 1762 + adapter->vlgrp = grp; 1866 1763 1867 - kfree(tpd_ring->buffer_info); 1868 - pci_free_consistent(pdev, ring_header->size, ring_header->desc, 1869 - ring_header->dma); 1764 + if (grp) { 1765 + /* enable VLAN tag insert/strip */ 1766 + ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); 1767 + ctrl |= MAC_CTRL_RMV_VLAN; 1768 + iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); 1769 + } else { 1770 + /* disable VLAN tag insert/strip */ 1771 + ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL); 1772 + ctrl &= ~MAC_CTRL_RMV_VLAN; 1773 + iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL); 1774 + } 1870 1775 1871 - tpd_ring->buffer_info = NULL; 1872 - tpd_ring->desc = NULL; 1873 - tpd_ring->dma = 0; 1776 + /* atl1_irq_enable(adapter); */ 1777 + spin_unlock_irqrestore(&adapter->lock, flags); 1778 + } 1874 1779 1875 - rfd_ring->buffer_info = NULL; 1876 - rfd_ring->desc = NULL; 1877 - rfd_ring->dma = 0; 1780 + static void atl1_restore_vlan(struct atl1_adapter *adapter) 1781 + { 1782 + atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp); 1783 + } 1878 1784 1879 - rrd_ring->desc = NULL; 1880 - rrd_ring->dma = 0; 1785 + int atl1_reset(struct atl1_adapter *adapter) 1786 + { 1787 + int ret; 1788 + 1789 + ret = atl1_reset_hw(&adapter->hw); 1790 + if (ret != ATL1_SUCCESS) 1791 + return ret; 1792 + return atl1_init_hw(&adapter->hw); 1881 1793 } 1882 1794 1883 1795 s32 atl1_up(struct atl1_adapter *adapter) ··· 1965 1723 1966 1724 /* hardware has been reset, we need to reload some things */ 1967 1725 atl1_set_multi(netdev); 1726 + atl1_init_ring_ptrs(adapter); 1968 1727 atl1_restore_vlan(adapter); 1969 1728 err = atl1_alloc_rx_buffers(adapter); 1970 1729 if (unlikely(!err)) /* no RX BUFFER allocated */ ··· 1992 1749 atl1_irq_enable(adapter); 1993 1750 atl1_check_link(adapter); 1994 1751 return 0; 1995 - 1996 - /* FIXME: unreachable code! -- CHS */ 1997 - /* free irq disable any interrupt */ 1998 - iowrite32(0, adapter->hw.hw_addr + REG_IMR); 1999 - free_irq(adapter->pdev->irq, netdev); 2000 1752 2001 1753 err_up: 2002 1754 pci_disable_msi(adapter->pdev); ··· 2021 1783 2022 1784 atl1_clean_tx_ring(adapter); 2023 1785 atl1_clean_rx_ring(adapter); 2024 - } 2025 - 2026 - /* 2027 - * atl1_change_mtu - Change the Maximum Transfer Unit 2028 - * @netdev: network interface device structure 2029 - * @new_mtu: new value for maximum frame size 2030 - * 2031 - * Returns 0 on success, negative on failure 2032 - */ 2033 - static int atl1_change_mtu(struct net_device *netdev, int new_mtu) 2034 - { 2035 - struct atl1_adapter *adapter = netdev_priv(netdev); 2036 - int old_mtu = netdev->mtu; 2037 - int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 2038 - 2039 - if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 2040 - (max_frame > MAX_JUMBO_FRAME_SIZE)) { 2041 - dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); 2042 - return -EINVAL; 2043 - } 2044 - 2045 - adapter->hw.max_frame_size = max_frame; 2046 - adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; 2047 - adapter->rx_buffer_len = (max_frame + 7) & ~7; 2048 - adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; 2049 - 2050 - netdev->mtu = new_mtu; 2051 - if ((old_mtu != new_mtu) && netif_running(netdev)) { 2052 - atl1_down(adapter); 2053 - atl1_up(adapter); 2054 - } 2055 - 2056 - return 0; 2057 - } 2058 - 2059 - /* 2060 - * atl1_set_mac - Change the Ethernet Address of the NIC 2061 - * @netdev: network interface device structure 2062 - * @p: pointer to an address structure 2063 - * 2064 - * Returns 0 on success, negative on failure 2065 - */ 2066 - static int atl1_set_mac(struct net_device *netdev, void *p) 2067 - { 2068 - struct atl1_adapter *adapter = netdev_priv(netdev); 2069 - struct sockaddr *addr = p; 2070 - 2071 - if (netif_running(netdev)) 2072 - return -EBUSY; 2073 - 2074 - if (!is_valid_ether_addr(addr->sa_data)) 2075 - return -EADDRNOTAVAIL; 2076 - 2077 - memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2078 - memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); 2079 - 2080 - atl1_set_mac_addr(&adapter->hw); 2081 - return 0; 2082 - } 2083 - 2084 - /* 2085 - * atl1_watchdog - Timer Call-back 2086 - * @data: pointer to netdev cast into an unsigned long 2087 - */ 2088 - static void atl1_watchdog(unsigned long data) 2089 - { 2090 - struct atl1_adapter *adapter = (struct atl1_adapter *)data; 2091 - 2092 - /* Reset the timer */ 2093 - mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ); 2094 - } 2095 - 2096 - static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) 2097 - { 2098 - struct atl1_adapter *adapter = netdev_priv(netdev); 2099 - u16 result; 2100 - 2101 - atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); 2102 - 2103 - return result; 2104 - } 2105 - 2106 - static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, int val) 2107 - { 2108 - struct atl1_adapter *adapter = netdev_priv(netdev); 2109 - 2110 - atl1_write_phy_reg(&adapter->hw, reg_num, val); 2111 - } 2112 - 2113 - /* 2114 - * atl1_mii_ioctl - 2115 - * @netdev: 2116 - * @ifreq: 2117 - * @cmd: 2118 - */ 2119 - static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2120 - { 2121 - struct atl1_adapter *adapter = netdev_priv(netdev); 2122 - unsigned long flags; 2123 - int retval; 2124 - 2125 - if (!netif_running(netdev)) 2126 - return -EINVAL; 2127 - 2128 - spin_lock_irqsave(&adapter->lock, flags); 2129 - retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); 2130 - spin_unlock_irqrestore(&adapter->lock, flags); 2131 - 2132 - return retval; 2133 - } 2134 - 2135 - /* 2136 - * atl1_ioctl - 2137 - * @netdev: 2138 - * @ifreq: 2139 - * @cmd: 2140 - */ 2141 - static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2142 - { 2143 - switch (cmd) { 2144 - case SIOCGMIIPHY: 2145 - case SIOCGMIIREG: 2146 - case SIOCSMIIREG: 2147 - return atl1_mii_ioctl(netdev, ifr, cmd); 2148 - default: 2149 - return -EOPNOTSUPP; 2150 - } 2151 - } 2152 - 2153 - /* 2154 - * atl1_tx_timeout - Respond to a Tx Hang 2155 - * @netdev: network interface device structure 2156 - */ 2157 - static void atl1_tx_timeout(struct net_device *netdev) 2158 - { 2159 - struct atl1_adapter *adapter = netdev_priv(netdev); 2160 - /* Do the reset outside of interrupt context */ 2161 - schedule_work(&adapter->tx_timeout_task); 2162 - } 2163 - 2164 - /* 2165 - * atl1_phy_config - Timer Call-back 2166 - * @data: pointer to netdev cast into an unsigned long 2167 - */ 2168 - static void atl1_phy_config(unsigned long data) 2169 - { 2170 - struct atl1_adapter *adapter = (struct atl1_adapter *)data; 2171 - struct atl1_hw *hw = &adapter->hw; 2172 - unsigned long flags; 2173 - 2174 - spin_lock_irqsave(&adapter->lock, flags); 2175 - adapter->phy_timer_pending = false; 2176 - atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); 2177 - atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg); 2178 - atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); 2179 - spin_unlock_irqrestore(&adapter->lock, flags); 2180 - } 2181 - 2182 - int atl1_reset(struct atl1_adapter *adapter) 2183 - { 2184 - int ret; 2185 - 2186 - ret = atl1_reset_hw(&adapter->hw); 2187 - if (ret != ATL1_SUCCESS) 2188 - return ret; 2189 - return atl1_init_hw(&adapter->hw); 2190 1786 } 2191 1787 2192 1788 /* ··· 2075 2003 return 0; 2076 2004 } 2077 2005 2006 + #ifdef CONFIG_PM 2007 + static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) 2008 + { 2009 + struct net_device *netdev = pci_get_drvdata(pdev); 2010 + struct atl1_adapter *adapter = netdev_priv(netdev); 2011 + struct atl1_hw *hw = &adapter->hw; 2012 + u32 ctrl = 0; 2013 + u32 wufc = adapter->wol; 2014 + 2015 + netif_device_detach(netdev); 2016 + if (netif_running(netdev)) 2017 + atl1_down(adapter); 2018 + 2019 + atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2020 + atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2021 + if (ctrl & BMSR_LSTATUS) 2022 + wufc &= ~ATL1_WUFC_LNKC; 2023 + 2024 + /* reduce speed to 10/100M */ 2025 + if (wufc) { 2026 + atl1_phy_enter_power_saving(hw); 2027 + /* if resume, let driver to re- setup link */ 2028 + hw->phy_configured = false; 2029 + atl1_set_mac_addr(hw); 2030 + atl1_set_multi(netdev); 2031 + 2032 + ctrl = 0; 2033 + /* turn on magic packet wol */ 2034 + if (wufc & ATL1_WUFC_MAG) 2035 + ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; 2036 + 2037 + /* turn on Link change WOL */ 2038 + if (wufc & ATL1_WUFC_LNKC) 2039 + ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); 2040 + iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); 2041 + 2042 + /* turn on all-multi mode if wake on multicast is enabled */ 2043 + ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); 2044 + ctrl &= ~MAC_CTRL_DBG; 2045 + ctrl &= ~MAC_CTRL_PROMIS_EN; 2046 + if (wufc & ATL1_WUFC_MC) 2047 + ctrl |= MAC_CTRL_MC_ALL_EN; 2048 + else 2049 + ctrl &= ~MAC_CTRL_MC_ALL_EN; 2050 + 2051 + /* turn on broadcast mode if wake on-BC is enabled */ 2052 + if (wufc & ATL1_WUFC_BC) 2053 + ctrl |= MAC_CTRL_BC_EN; 2054 + else 2055 + ctrl &= ~MAC_CTRL_BC_EN; 2056 + 2057 + /* enable RX */ 2058 + ctrl |= MAC_CTRL_RX_EN; 2059 + iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); 2060 + pci_enable_wake(pdev, PCI_D3hot, 1); 2061 + pci_enable_wake(pdev, PCI_D3cold, 1); 2062 + } else { 2063 + iowrite32(0, hw->hw_addr + REG_WOL_CTRL); 2064 + pci_enable_wake(pdev, PCI_D3hot, 0); 2065 + pci_enable_wake(pdev, PCI_D3cold, 0); 2066 + } 2067 + 2068 + pci_save_state(pdev); 2069 + pci_disable_device(pdev); 2070 + 2071 + pci_set_power_state(pdev, PCI_D3hot); 2072 + 2073 + return 0; 2074 + } 2075 + 2076 + static int atl1_resume(struct pci_dev *pdev) 2077 + { 2078 + struct net_device *netdev = pci_get_drvdata(pdev); 2079 + struct atl1_adapter *adapter = netdev_priv(netdev); 2080 + u32 ret_val; 2081 + 2082 + pci_set_power_state(pdev, 0); 2083 + pci_restore_state(pdev); 2084 + 2085 + ret_val = pci_enable_device(pdev); 2086 + pci_enable_wake(pdev, PCI_D3hot, 0); 2087 + pci_enable_wake(pdev, PCI_D3cold, 0); 2088 + 2089 + iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); 2090 + atl1_reset(adapter); 2091 + 2092 + if (netif_running(netdev)) 2093 + atl1_up(adapter); 2094 + netif_device_attach(netdev); 2095 + 2096 + atl1_via_workaround(adapter); 2097 + 2098 + return 0; 2099 + } 2100 + #else 2101 + #define atl1_suspend NULL 2102 + #define atl1_resume NULL 2103 + #endif 2104 + 2078 2105 #ifdef CONFIG_NET_POLL_CONTROLLER 2079 2106 static void atl1_poll_controller(struct net_device *netdev) 2080 2107 { ··· 2182 2011 enable_irq(netdev->irq); 2183 2012 } 2184 2013 #endif 2185 - 2186 - /* 2187 - * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT 2188 - * will assert. We do soft reset <0x1400=1> according 2189 - * with the SPEC. BUT, it seemes that PCIE or DMA 2190 - * state-machine will not be reset. DMAR_TO_INT will 2191 - * assert again and again. 2192 - */ 2193 - static void atl1_tx_timeout_task(struct work_struct *work) 2194 - { 2195 - struct atl1_adapter *adapter = 2196 - container_of(work, struct atl1_adapter, tx_timeout_task); 2197 - struct net_device *netdev = adapter->netdev; 2198 - 2199 - netif_device_detach(netdev); 2200 - atl1_down(adapter); 2201 - atl1_up(adapter); 2202 - netif_device_attach(netdev); 2203 - } 2204 - 2205 - /* 2206 - * atl1_link_chg_task - deal with link change event Out of interrupt context 2207 - */ 2208 - static void atl1_link_chg_task(struct work_struct *work) 2209 - { 2210 - struct atl1_adapter *adapter = 2211 - container_of(work, struct atl1_adapter, link_chg_task); 2212 - unsigned long flags; 2213 - 2214 - spin_lock_irqsave(&adapter->lock, flags); 2215 - atl1_check_link(adapter); 2216 - spin_unlock_irqrestore(&adapter->lock, flags); 2217 - } 2218 - 2219 - /* 2220 - * atl1_pcie_patch - Patch for PCIE module 2221 - */ 2222 - static void atl1_pcie_patch(struct atl1_adapter *adapter) 2223 - { 2224 - u32 value; 2225 - value = 0x6500; 2226 - iowrite32(value, adapter->hw.hw_addr + 0x12FC); 2227 - /* pcie flow control mode change */ 2228 - value = ioread32(adapter->hw.hw_addr + 0x1008); 2229 - value |= 0x8000; 2230 - iowrite32(value, adapter->hw.hw_addr + 0x1008); 2231 - } 2232 - 2233 - /* 2234 - * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 2235 - * on PCI Command register is disable. 2236 - * The function enable this bit. 2237 - * Brackett, 2006/03/15 2238 - */ 2239 - static void atl1_via_workaround(struct atl1_adapter *adapter) 2240 - { 2241 - unsigned long value; 2242 - 2243 - value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); 2244 - if (value & PCI_COMMAND_INTX_DISABLE) 2245 - value &= ~PCI_COMMAND_INTX_DISABLE; 2246 - iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); 2247 - } 2248 2014 2249 2015 /* 2250 2016 * atl1_probe - Device Initialization Routine ··· 2195 2087 * and a hardware reset occur. 2196 2088 */ 2197 2089 static int __devinit atl1_probe(struct pci_dev *pdev, 2198 - const struct pci_device_id *ent) 2090 + const struct pci_device_id *ent) 2199 2091 { 2200 2092 struct net_device *netdev; 2201 2093 struct atl1_adapter *adapter; ··· 2249 2141 } 2250 2142 /* get device revision number */ 2251 2143 adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + 2252 - (REG_MASTER_CTRL + 2)); 2144 + (REG_MASTER_CTRL + 2)); 2253 2145 dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION); 2254 2146 2255 2147 /* set default ring resource counts */ ··· 2402 2294 * address, we need to save the permanent one. 2403 2295 */ 2404 2296 if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) { 2405 - memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN); 2297 + memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, 2298 + ETH_ALEN); 2406 2299 atl1_set_mac_addr(&adapter->hw); 2407 2300 } 2408 2301 ··· 2415 2306 pci_disable_device(pdev); 2416 2307 } 2417 2308 2418 - #ifdef CONFIG_PM 2419 - static int atl1_suspend(struct pci_dev *pdev, pm_message_t state) 2420 - { 2421 - struct net_device *netdev = pci_get_drvdata(pdev); 2422 - struct atl1_adapter *adapter = netdev_priv(netdev); 2423 - struct atl1_hw *hw = &adapter->hw; 2424 - u32 ctrl = 0; 2425 - u32 wufc = adapter->wol; 2426 - 2427 - netif_device_detach(netdev); 2428 - if (netif_running(netdev)) 2429 - atl1_down(adapter); 2430 - 2431 - atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2432 - atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2433 - if (ctrl & BMSR_LSTATUS) 2434 - wufc &= ~ATL1_WUFC_LNKC; 2435 - 2436 - /* reduce speed to 10/100M */ 2437 - if (wufc) { 2438 - atl1_phy_enter_power_saving(hw); 2439 - /* if resume, let driver to re- setup link */ 2440 - hw->phy_configured = false; 2441 - atl1_set_mac_addr(hw); 2442 - atl1_set_multi(netdev); 2443 - 2444 - ctrl = 0; 2445 - /* turn on magic packet wol */ 2446 - if (wufc & ATL1_WUFC_MAG) 2447 - ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; 2448 - 2449 - /* turn on Link change WOL */ 2450 - if (wufc & ATL1_WUFC_LNKC) 2451 - ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); 2452 - iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); 2453 - 2454 - /* turn on all-multi mode if wake on multicast is enabled */ 2455 - ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); 2456 - ctrl &= ~MAC_CTRL_DBG; 2457 - ctrl &= ~MAC_CTRL_PROMIS_EN; 2458 - if (wufc & ATL1_WUFC_MC) 2459 - ctrl |= MAC_CTRL_MC_ALL_EN; 2460 - else 2461 - ctrl &= ~MAC_CTRL_MC_ALL_EN; 2462 - 2463 - /* turn on broadcast mode if wake on-BC is enabled */ 2464 - if (wufc & ATL1_WUFC_BC) 2465 - ctrl |= MAC_CTRL_BC_EN; 2466 - else 2467 - ctrl &= ~MAC_CTRL_BC_EN; 2468 - 2469 - /* enable RX */ 2470 - ctrl |= MAC_CTRL_RX_EN; 2471 - iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); 2472 - pci_enable_wake(pdev, PCI_D3hot, 1); 2473 - pci_enable_wake(pdev, PCI_D3cold, 1); /* 4 == D3 cold */ 2474 - } else { 2475 - iowrite32(0, hw->hw_addr + REG_WOL_CTRL); 2476 - pci_enable_wake(pdev, PCI_D3hot, 0); 2477 - pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */ 2478 - } 2479 - 2480 - pci_save_state(pdev); 2481 - pci_disable_device(pdev); 2482 - 2483 - pci_set_power_state(pdev, PCI_D3hot); 2484 - 2485 - return 0; 2486 - } 2487 - 2488 - static int atl1_resume(struct pci_dev *pdev) 2489 - { 2490 - struct net_device *netdev = pci_get_drvdata(pdev); 2491 - struct atl1_adapter *adapter = netdev_priv(netdev); 2492 - u32 ret_val; 2493 - 2494 - pci_set_power_state(pdev, 0); 2495 - pci_restore_state(pdev); 2496 - 2497 - ret_val = pci_enable_device(pdev); 2498 - pci_enable_wake(pdev, PCI_D3hot, 0); 2499 - pci_enable_wake(pdev, PCI_D3cold, 0); 2500 - 2501 - iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); 2502 - atl1_reset(adapter); 2503 - 2504 - if (netif_running(netdev)) 2505 - atl1_up(adapter); 2506 - netif_device_attach(netdev); 2507 - 2508 - atl1_via_workaround(adapter); 2509 - 2510 - return 0; 2511 - } 2512 - #else 2513 - #define atl1_suspend NULL 2514 - #define atl1_resume NULL 2515 - #endif 2516 - 2517 2309 static struct pci_driver atl1_driver = { 2518 2310 .name = atl1_driver_name, 2519 2311 .id_table = atl1_pci_tbl, 2520 2312 .probe = atl1_probe, 2521 2313 .remove = __devexit_p(atl1_remove), 2522 - /* Power Managment Hooks */ 2523 - /* probably broken right now -- CHS */ 2524 2314 .suspend = atl1_suspend, 2525 2315 .resume = atl1_resume 2526 2316 };
+18 -5
drivers/net/ehea/ehea.h
··· 39 39 #include <asm/io.h> 40 40 41 41 #define DRV_NAME "ehea" 42 - #define DRV_VERSION "EHEA_0067" 42 + #define DRV_VERSION "EHEA_0070" 43 43 44 - /* EHEA capability flags */ 44 + /* eHEA capability flags */ 45 45 #define DLPAR_PORT_ADD_REM 1 46 - #define DLPAR_MEM_ADD 2 47 - #define DLPAR_MEM_REM 4 48 - #define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM) 46 + #define DLPAR_MEM_ADD 2 47 + #define DLPAR_MEM_REM 4 48 + #define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM) 49 49 50 50 #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ 51 51 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) ··· 112 112 113 113 /* Memory Regions */ 114 114 #define EHEA_MR_ACC_CTRL 0x00800000 115 + 116 + #define EHEA_BUSMAP_START 0x8000000000000000ULL 115 117 116 118 #define EHEA_WATCH_DOG_TIMEOUT 10*HZ 117 119 ··· 186 184 set to 0 if unused */ 187 185 struct h_epa_user user; /* user space accessible resource 188 186 set to 0 if unused */ 187 + }; 188 + 189 + struct ehea_busmap { 190 + unsigned int entries; /* total number of entries */ 191 + unsigned int valid_sections; /* number of valid sections */ 192 + u64 *vaddr; 189 193 }; 190 194 191 195 struct ehea_qp; ··· 390 382 struct ehea_mr mr; 391 383 u32 pd; /* protection domain */ 392 384 u64 max_mc_mac; /* max number of multicast mac addresses */ 385 + int active_ports; 386 + struct list_head list; 393 387 }; 394 388 395 389 ··· 441 431 int max_entries_rq3; 442 432 }; 443 433 434 + enum ehea_flag_bits { 435 + __EHEA_STOP_XFER 436 + }; 444 437 445 438 void ehea_set_ethtool_ops(struct net_device *netdev); 446 439 int ehea_sense_port_attr(struct ehea_port *port);
+130 -14
drivers/net/ehea/ehea_main.c
··· 79 79 MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 1 "); 80 80 81 81 static int port_name_cnt = 0; 82 + static LIST_HEAD(adapter_list); 83 + u64 ehea_driver_flags = 0; 84 + struct workqueue_struct *ehea_driver_wq; 85 + struct work_struct ehea_rereg_mr_task; 86 + 82 87 83 88 static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, 84 89 const struct of_device_id *id); ··· 243 238 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) 244 239 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index); 245 240 rwqe->sg_list[0].l_key = pr->recv_mr.lkey; 246 - rwqe->sg_list[0].vaddr = (u64)skb->data; 241 + rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data); 247 242 rwqe->sg_list[0].len = packet_size; 248 243 rwqe->data_segments = 1; 249 244 250 245 index++; 251 246 index &= max_index_mask; 247 + 248 + if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) 249 + goto out; 252 250 } 251 + 253 252 q_skba->index = index; 254 253 255 254 /* Ring doorbell */ ··· 262 253 ehea_update_rq2a(pr->qp, i); 263 254 else 264 255 ehea_update_rq3a(pr->qp, i); 265 - 256 + out: 266 257 return ret; 267 258 } 268 259 ··· 1330 1321 sg1entry->len = skb_data_size - headersize; 1331 1322 1332 1323 tmp_addr = (u64)(skb->data + headersize); 1333 - sg1entry->vaddr = tmp_addr; 1324 + sg1entry->vaddr = ehea_map_vaddr(tmp_addr); 1334 1325 swqe->descriptors++; 1335 1326 } 1336 1327 } else ··· 1361 1352 sg1entry->l_key = lkey; 1362 1353 sg1entry->len = skb_data_size - SWQE2_MAX_IMM; 1363 1354 tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM); 1364 - sg1entry->vaddr = tmp_addr; 1355 + sg1entry->vaddr = ehea_map_vaddr(tmp_addr); 1365 1356 swqe->descriptors++; 1366 1357 } 1367 1358 } else { ··· 1400 1391 sg1entry->len = frag->size; 1401 1392 tmp_addr = (u64)(page_address(frag->page) 1402 1393 + frag->page_offset); 1403 - sg1entry->vaddr = tmp_addr; 1394 + sg1entry->vaddr = ehea_map_vaddr(tmp_addr); 1404 1395 swqe->descriptors++; 1405 1396 sg1entry_contains_frag_data = 1; 1406 1397 } ··· 1415 1406 1416 1407 tmp_addr = (u64)(page_address(frag->page) 1417 1408 + frag->page_offset); 1418 - sgentry->vaddr = tmp_addr; 1409 + sgentry->vaddr = ehea_map_vaddr(tmp_addr); 1419 1410 swqe->descriptors++; 1420 1411 } 1421 1412 } ··· 1887 1878 ehea_dump(swqe, 512, "swqe"); 1888 1879 } 1889 1880 1881 + if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) 1882 + goto out; 1883 + 1890 1884 ehea_post_swqe(pr->qp, swqe); 1891 1885 pr->tx_packets++; 1892 1886 ··· 1904 1892 } 1905 1893 dev->trans_start = jiffies; 1906 1894 spin_unlock(&pr->xmit_lock); 1907 - 1895 + out: 1908 1896 return NETDEV_TX_OK; 1909 1897 } 1910 1898 ··· 2232 2220 out_clean_pr: 2233 2221 ehea_clean_all_portres(port); 2234 2222 out: 2223 + if (ret) 2224 + ehea_info("Failed starting %s. ret=%i", dev->name, ret); 2225 + 2235 2226 return ret; 2236 2227 } 2237 2228 ··· 2274 2259 msleep(1); 2275 2260 2276 2261 ehea_broadcast_reg_helper(port, H_DEREG_BCMC); 2277 - ret = ehea_clean_all_portres(port); 2278 2262 port->state = EHEA_PORT_DOWN; 2263 + 2264 + ret = ehea_clean_all_portres(port); 2265 + if (ret) 2266 + ehea_info("Failed freeing resources for %s. ret=%i", 2267 + dev->name, ret); 2268 + 2279 2269 return ret; 2280 2270 } 2281 2271 ··· 2312 2292 netif_stop_queue(dev); 2313 2293 netif_poll_disable(dev); 2314 2294 2315 - ret = ehea_down(dev); 2316 - if (ret) 2317 - ehea_error("ehea_down failed. not all resources are freed"); 2295 + ehea_down(dev); 2318 2296 2319 2297 ret = ehea_up(dev); 2320 - if (ret) { 2321 - ehea_error("Reset device %s failed: ret=%d", dev->name, ret); 2298 + if (ret) 2322 2299 goto out; 2323 - } 2324 2300 2325 2301 if (netif_msg_timer(port)) 2326 2302 ehea_info("Device %s resetted successfully", dev->name); ··· 2325 2309 netif_wake_queue(dev); 2326 2310 out: 2327 2311 up(&port->port_lock); 2312 + return; 2313 + } 2314 + 2315 + static void ehea_rereg_mrs(struct work_struct *work) 2316 + { 2317 + int ret, i; 2318 + struct ehea_adapter *adapter; 2319 + 2320 + ehea_info("LPAR memory enlarged - re-initializing driver"); 2321 + 2322 + list_for_each_entry(adapter, &adapter_list, list) 2323 + if (adapter->active_ports) { 2324 + /* Shutdown all ports */ 2325 + for (i = 0; i < EHEA_MAX_PORTS; i++) { 2326 + struct ehea_port *port = adapter->port[i]; 2327 + 2328 + if (port) { 2329 + struct net_device *dev = port->netdev; 2330 + 2331 + if (dev->flags & IFF_UP) { 2332 + ehea_info("stopping %s", 2333 + dev->name); 2334 + down(&port->port_lock); 2335 + netif_stop_queue(dev); 2336 + netif_poll_disable(dev); 2337 + ehea_down(dev); 2338 + up(&port->port_lock); 2339 + } 2340 + } 2341 + } 2342 + 2343 + /* Unregister old memory region */ 2344 + ret = ehea_rem_mr(&adapter->mr); 2345 + if (ret) { 2346 + ehea_error("unregister MR failed - driver" 2347 + " inoperable!"); 2348 + goto out; 2349 + } 2350 + } 2351 + 2352 + ehea_destroy_busmap(); 2353 + 2354 + ret = ehea_create_busmap(); 2355 + if (ret) 2356 + goto out; 2357 + 2358 + clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 2359 + 2360 + list_for_each_entry(adapter, &adapter_list, list) 2361 + if (adapter->active_ports) { 2362 + /* Register new memory region */ 2363 + ret = ehea_reg_kernel_mr(adapter, &adapter->mr); 2364 + if (ret) { 2365 + ehea_error("register MR failed - driver" 2366 + " inoperable!"); 2367 + goto out; 2368 + } 2369 + 2370 + /* Restart all ports */ 2371 + for (i = 0; i < EHEA_MAX_PORTS; i++) { 2372 + struct ehea_port *port = adapter->port[i]; 2373 + 2374 + if (port) { 2375 + struct net_device *dev = port->netdev; 2376 + 2377 + if (dev->flags & IFF_UP) { 2378 + ehea_info("restarting %s", 2379 + dev->name); 2380 + down(&port->port_lock); 2381 + 2382 + ret = ehea_up(dev); 2383 + if (!ret) { 2384 + netif_poll_enable(dev); 2385 + netif_wake_queue(dev); 2386 + } 2387 + 2388 + up(&port->port_lock); 2389 + } 2390 + } 2391 + } 2392 + } 2393 + out: 2328 2394 return; 2329 2395 } 2330 2396 ··· 2671 2573 ehea_info("%s: Jumbo frames are %sabled", dev->name, 2672 2574 jumbo == 1 ? "en" : "dis"); 2673 2575 2576 + adapter->active_ports++; 2577 + 2674 2578 return port; 2675 2579 2676 2580 out_unreg_port: ··· 2696 2596 ehea_unregister_port(port); 2697 2597 kfree(port->mc_list); 2698 2598 free_netdev(port->netdev); 2599 + port->adapter->active_ports--; 2699 2600 } 2700 2601 2701 2602 static int ehea_setup_ports(struct ehea_adapter *adapter) ··· 2889 2788 goto out; 2890 2789 } 2891 2790 2791 + list_add(&adapter->list, &adapter_list); 2792 + 2892 2793 adapter->ebus_dev = dev; 2893 2794 2894 2795 adapter_handle = of_get_property(dev->ofdev.node, "ibm,hea-handle", ··· 2994 2891 2995 2892 ehea_destroy_eq(adapter->neq); 2996 2893 ehea_remove_adapter_mr(adapter); 2894 + list_del(&adapter->list); 2895 + 2997 2896 kfree(adapter); 2897 + 2998 2898 return 0; 2999 2899 } 3000 2900 ··· 3045 2939 printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n", 3046 2940 DRV_VERSION); 3047 2941 2942 + ehea_driver_wq = create_workqueue("ehea_driver_wq"); 2943 + 2944 + INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs); 2945 + 3048 2946 ret = check_module_parm(); 3049 2947 if (ret) 3050 2948 goto out; 2949 + 2950 + ret = ehea_create_busmap(); 2951 + if (ret) 2952 + goto out; 2953 + 3051 2954 ret = ibmebus_register_driver(&ehea_driver); 3052 2955 if (ret) { 3053 2956 ehea_error("failed registering eHEA device driver on ebus"); ··· 3080 2965 { 3081 2966 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); 3082 2967 ibmebus_unregister_driver(&ehea_driver); 2968 + ehea_destroy_busmap(); 3083 2969 } 3084 2970 3085 2971 module_init(ehea_module_init);
+3
drivers/net/ehea/ehea_phyp.h
··· 60 60 } 61 61 } 62 62 63 + /* Number of pages which can be registered at once by H_REGISTER_HEA_RPAGES */ 64 + #define EHEA_MAX_RPAGE 512 65 + 63 66 /* Notification Event Queue (NEQ) Entry bit masks */ 64 67 #define NEQE_EVENT_CODE EHEA_BMASK_IBM(2, 7) 65 68 #define NEQE_PORTNUM EHEA_BMASK_IBM(32, 47)
+109 -41
drivers/net/ehea/ehea_qmr.c
··· 31 31 #include "ehea_phyp.h" 32 32 #include "ehea_qmr.h" 33 33 34 + 35 + struct ehea_busmap ehea_bmap = { 0, 0, NULL }; 36 + extern u64 ehea_driver_flags; 37 + extern struct workqueue_struct *ehea_driver_wq; 38 + extern struct work_struct ehea_rereg_mr_task; 39 + 40 + 34 41 static void *hw_qpageit_get_inc(struct hw_queue *queue) 35 42 { 36 43 void *retvalue = hw_qeit_get(queue); ··· 554 547 return 0; 555 548 } 556 549 550 + int ehea_create_busmap( void ) 551 + { 552 + u64 vaddr = EHEA_BUSMAP_START; 553 + unsigned long abs_max_pfn = 0; 554 + unsigned long sec_max_pfn; 555 + int i; 556 + 557 + /* 558 + * Sections are not in ascending order -> Loop over all sections and 559 + * find the highest PFN to compute the required map size. 560 + */ 561 + ehea_bmap.valid_sections = 0; 562 + 563 + for (i = 0; i < NR_MEM_SECTIONS; i++) 564 + if (valid_section_nr(i)) { 565 + sec_max_pfn = section_nr_to_pfn(i); 566 + if (sec_max_pfn > abs_max_pfn) 567 + abs_max_pfn = sec_max_pfn; 568 + ehea_bmap.valid_sections++; 569 + } 570 + 571 + ehea_bmap.entries = abs_max_pfn / EHEA_PAGES_PER_SECTION + 1; 572 + ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr)); 573 + 574 + if (!ehea_bmap.vaddr) 575 + return -ENOMEM; 576 + 577 + for (i = 0 ; i < ehea_bmap.entries; i++) { 578 + unsigned long pfn = section_nr_to_pfn(i); 579 + 580 + if (pfn_valid(pfn)) { 581 + ehea_bmap.vaddr[i] = vaddr; 582 + vaddr += EHEA_SECTSIZE; 583 + } else 584 + ehea_bmap.vaddr[i] = 0; 585 + } 586 + 587 + return 0; 588 + } 589 + 590 + void ehea_destroy_busmap( void ) 591 + { 592 + vfree(ehea_bmap.vaddr); 593 + } 594 + 595 + u64 ehea_map_vaddr(void *caddr) 596 + { 597 + u64 mapped_addr; 598 + unsigned long index = __pa(caddr) >> SECTION_SIZE_BITS; 599 + 600 + if (likely(index < ehea_bmap.entries)) { 601 + mapped_addr = ehea_bmap.vaddr[index]; 602 + if (likely(mapped_addr)) 603 + mapped_addr |= (((unsigned long)caddr) 604 + & (EHEA_SECTSIZE - 1)); 605 + else 606 + mapped_addr = -1; 607 + } else 608 + mapped_addr = -1; 609 + 610 + if (unlikely(mapped_addr == -1)) 611 + if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags)) 612 + queue_work(ehea_driver_wq, &ehea_rereg_mr_task); 613 + 614 + return mapped_addr; 615 + } 616 + 557 617 int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) 558 618 { 559 - int i, k, ret; 560 - u64 hret, pt_abs, start, end, nr_pages; 561 - u32 acc_ctrl = EHEA_MR_ACC_CTRL; 619 + int ret; 562 620 u64 *pt; 621 + void *pg; 622 + u64 hret, pt_abs, i, j, m, mr_len; 623 + u32 acc_ctrl = EHEA_MR_ACC_CTRL; 563 624 564 - start = KERNELBASE; 565 - end = (u64)high_memory; 566 - nr_pages = (end - start) / EHEA_PAGESIZE; 625 + mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE; 567 626 568 - pt = kzalloc(PAGE_SIZE, GFP_KERNEL); 627 + pt = kzalloc(EHEA_MAX_RPAGE * sizeof(u64), GFP_KERNEL); 569 628 if (!pt) { 570 629 ehea_error("no mem"); 571 630 ret = -ENOMEM; ··· 639 566 } 640 567 pt_abs = virt_to_abs(pt); 641 568 642 - hret = ehea_h_alloc_resource_mr(adapter->handle, start, end - start, 569 + hret = ehea_h_alloc_resource_mr(adapter->handle, 570 + EHEA_BUSMAP_START, mr_len, 643 571 acc_ctrl, adapter->pd, 644 572 &mr->handle, &mr->lkey); 645 573 if (hret != H_SUCCESS) { ··· 649 575 goto out; 650 576 } 651 577 652 - mr->vaddr = KERNELBASE; 653 - k = 0; 578 + for (i = 0 ; i < ehea_bmap.entries; i++) 579 + if (ehea_bmap.vaddr[i]) { 580 + void *sectbase = __va(i << SECTION_SIZE_BITS); 581 + unsigned long k = 0; 654 582 655 - while (nr_pages > 0) { 656 - if (nr_pages > 1) { 657 - u64 num_pages = min(nr_pages, (u64)512); 658 - for (i = 0; i < num_pages; i++) 659 - pt[i] = virt_to_abs((void*)(((u64)start) + 660 - ((k++) * 661 - EHEA_PAGESIZE))); 583 + for (j = 0; j < (PAGES_PER_SECTION / EHEA_MAX_RPAGE); 584 + j++) { 662 585 663 - hret = ehea_h_register_rpage_mr(adapter->handle, 664 - mr->handle, 0, 665 - 0, (u64)pt_abs, 666 - num_pages); 667 - nr_pages -= num_pages; 668 - } else { 669 - u64 abs_adr = virt_to_abs((void*)(((u64)start) + 670 - (k * EHEA_PAGESIZE))); 586 + for (m = 0; m < EHEA_MAX_RPAGE; m++) { 587 + pg = sectbase + ((k++) * EHEA_PAGESIZE); 588 + pt[m] = virt_to_abs(pg); 589 + } 671 590 672 - hret = ehea_h_register_rpage_mr(adapter->handle, 673 - mr->handle, 0, 674 - 0, abs_adr,1); 675 - nr_pages--; 591 + hret = ehea_h_register_rpage_mr(adapter->handle, 592 + mr->handle, 593 + 0, 0, pt_abs, 594 + EHEA_MAX_RPAGE); 595 + if ((hret != H_SUCCESS) 596 + && (hret != H_PAGE_REGISTERED)) { 597 + ehea_h_free_resource(adapter->handle, 598 + mr->handle, 599 + FORCE_FREE); 600 + ehea_error("register_rpage_mr failed"); 601 + ret = -EIO; 602 + goto out; 603 + } 604 + } 676 605 } 677 - 678 - if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) { 679 - ehea_h_free_resource(adapter->handle, 680 - mr->handle, FORCE_FREE); 681 - ehea_error("register_rpage_mr failed"); 682 - ret = -EIO; 683 - goto out; 684 - } 685 - } 686 606 687 607 if (hret != H_SUCCESS) { 688 - ehea_h_free_resource(adapter->handle, mr->handle, 689 - FORCE_FREE); 690 - ehea_error("register_rpage failed for last page"); 608 + ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); 609 + ehea_error("registering mr failed"); 691 610 ret = -EIO; 692 611 goto out; 693 612 } 694 613 614 + mr->vaddr = EHEA_BUSMAP_START; 695 615 mr->adapter = adapter; 696 616 ret = 0; 697 617 out:
+12 -2
drivers/net/ehea/ehea_qmr.h
··· 36 36 * page size of ehea hardware queues 37 37 */ 38 38 39 - #define EHEA_PAGESHIFT 12 40 - #define EHEA_PAGESIZE 4096UL 39 + #define EHEA_PAGESHIFT 12 40 + #define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT) 41 + #define EHEA_SECTSIZE (1UL << 24) 42 + #define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> PAGE_SHIFT) 43 + 44 + #if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE 45 + #error eHEA module can't work if kernel sectionsize < ehea sectionsize 46 + #endif 41 47 42 48 /* Some abbreviations used here: 43 49 * ··· 377 371 int ehea_rem_mr(struct ehea_mr *mr); 378 372 379 373 void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle); 374 + 375 + int ehea_create_busmap( void ); 376 + void ehea_destroy_busmap( void ); 377 + u64 ehea_map_vaddr(void *caddr); 380 378 381 379 #endif /* __EHEA_QMR_H__ */
+152 -10
drivers/net/forcedeth.c
··· 550 550 /* PHY defines */ 551 551 #define PHY_OUI_MARVELL 0x5043 552 552 #define PHY_OUI_CICADA 0x03f1 553 + #define PHY_OUI_VITESSE 0x01c1 554 + #define PHY_OUI_REALTEK 0x01c1 553 555 #define PHYID1_OUI_MASK 0x03ff 554 556 #define PHYID1_OUI_SHFT 6 555 557 #define PHYID2_OUI_MASK 0xfc00 ··· 559 557 #define PHYID2_MODEL_MASK 0x03f0 560 558 #define PHY_MODEL_MARVELL_E3016 0x220 561 559 #define PHY_MARVELL_E3016_INITMASK 0x0300 562 - #define PHY_INIT1 0x0f000 563 - #define PHY_INIT2 0x0e00 564 - #define PHY_INIT3 0x01000 565 - #define PHY_INIT4 0x0200 566 - #define PHY_INIT5 0x0004 567 - #define PHY_INIT6 0x02000 560 + #define PHY_CICADA_INIT1 0x0f000 561 + #define PHY_CICADA_INIT2 0x0e00 562 + #define PHY_CICADA_INIT3 0x01000 563 + #define PHY_CICADA_INIT4 0x0200 564 + #define PHY_CICADA_INIT5 0x0004 565 + #define PHY_CICADA_INIT6 0x02000 566 + #define PHY_VITESSE_INIT_REG1 0x1f 567 + #define PHY_VITESSE_INIT_REG2 0x10 568 + #define PHY_VITESSE_INIT_REG3 0x11 569 + #define PHY_VITESSE_INIT_REG4 0x12 570 + #define PHY_VITESSE_INIT_MSK1 0xc 571 + #define PHY_VITESSE_INIT_MSK2 0x0180 572 + #define PHY_VITESSE_INIT1 0x52b5 573 + #define PHY_VITESSE_INIT2 0xaf8a 574 + #define PHY_VITESSE_INIT3 0x8 575 + #define PHY_VITESSE_INIT4 0x8f8a 576 + #define PHY_VITESSE_INIT5 0xaf86 577 + #define PHY_VITESSE_INIT6 0x8f86 578 + #define PHY_VITESSE_INIT7 0xaf82 579 + #define PHY_VITESSE_INIT8 0x0100 580 + #define PHY_VITESSE_INIT9 0x8f82 581 + #define PHY_VITESSE_INIT10 0x0 582 + #define PHY_REALTEK_INIT_REG1 0x1f 583 + #define PHY_REALTEK_INIT_REG2 0x19 584 + #define PHY_REALTEK_INIT_REG3 0x13 585 + #define PHY_REALTEK_INIT1 0x0000 586 + #define PHY_REALTEK_INIT2 0x8e00 587 + #define PHY_REALTEK_INIT3 0x0001 588 + #define PHY_REALTEK_INIT4 0xad17 589 + 568 590 #define PHY_GIGABIT 0x0100 569 591 570 592 #define PHY_TIMEOUT 0x1 ··· 1122 1096 return PHY_ERROR; 1123 1097 } 1124 1098 } 1099 + if (np->phy_oui == PHY_OUI_REALTEK) { 1100 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1101 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1102 + return PHY_ERROR; 1103 + } 1104 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1105 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1106 + return PHY_ERROR; 1107 + } 1108 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1109 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1110 + return PHY_ERROR; 1111 + } 1112 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1113 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1114 + return PHY_ERROR; 1115 + } 1116 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1117 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1118 + return PHY_ERROR; 1119 + } 1120 + } 1125 1121 1126 1122 /* set advertise register */ 1127 1123 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); ··· 1189 1141 /* phy vendor specific configuration */ 1190 1142 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { 1191 1143 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 1192 - phy_reserved &= ~(PHY_INIT1 | PHY_INIT2); 1193 - phy_reserved |= (PHY_INIT3 | PHY_INIT4); 1144 + phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); 1145 + phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); 1194 1146 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) { 1195 1147 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1196 1148 return PHY_ERROR; 1197 1149 } 1198 1150 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1199 - phy_reserved |= PHY_INIT5; 1151 + phy_reserved |= PHY_CICADA_INIT5; 1200 1152 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { 1201 1153 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1202 1154 return PHY_ERROR; ··· 1204 1156 } 1205 1157 if (np->phy_oui == PHY_OUI_CICADA) { 1206 1158 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ); 1207 - phy_reserved |= PHY_INIT6; 1159 + phy_reserved |= PHY_CICADA_INIT6; 1208 1160 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) { 1209 1161 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1210 1162 return PHY_ERROR; 1211 1163 } 1212 1164 } 1165 + if (np->phy_oui == PHY_OUI_VITESSE) { 1166 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) { 1167 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1168 + return PHY_ERROR; 1169 + } 1170 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) { 1171 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1172 + return PHY_ERROR; 1173 + } 1174 + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1175 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1176 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1177 + return PHY_ERROR; 1178 + } 1179 + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1180 + phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1181 + phy_reserved |= PHY_VITESSE_INIT3; 1182 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1183 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1184 + return PHY_ERROR; 1185 + } 1186 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) { 1187 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1188 + return PHY_ERROR; 1189 + } 1190 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) { 1191 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1192 + return PHY_ERROR; 1193 + } 1194 + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1195 + phy_reserved &= ~PHY_VITESSE_INIT_MSK1; 1196 + phy_reserved |= PHY_VITESSE_INIT3; 1197 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1198 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1199 + return PHY_ERROR; 1200 + } 1201 + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1202 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1203 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1204 + return PHY_ERROR; 1205 + } 1206 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) { 1207 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1208 + return PHY_ERROR; 1209 + } 1210 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) { 1211 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1212 + return PHY_ERROR; 1213 + } 1214 + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ); 1215 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) { 1216 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1217 + return PHY_ERROR; 1218 + } 1219 + phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ); 1220 + phy_reserved &= ~PHY_VITESSE_INIT_MSK2; 1221 + phy_reserved |= PHY_VITESSE_INIT8; 1222 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) { 1223 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1224 + return PHY_ERROR; 1225 + } 1226 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) { 1227 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1228 + return PHY_ERROR; 1229 + } 1230 + if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) { 1231 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1232 + return PHY_ERROR; 1233 + } 1234 + } 1235 + if (np->phy_oui == PHY_OUI_REALTEK) { 1236 + /* reset could have cleared these out, set them back */ 1237 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1238 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1239 + return PHY_ERROR; 1240 + } 1241 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) { 1242 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1243 + return PHY_ERROR; 1244 + } 1245 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) { 1246 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1247 + return PHY_ERROR; 1248 + } 1249 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) { 1250 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1251 + return PHY_ERROR; 1252 + } 1253 + if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1254 + printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1255 + return PHY_ERROR; 1256 + } 1257 + } 1258 + 1213 1259 /* some phys clear out pause advertisment on reset, set it back */ 1214 1260 mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg); 1215 1261
-1
drivers/net/gianfar_mii.c
··· 31 31 #include <linux/mm.h> 32 32 #include <linux/module.h> 33 33 #include <linux/platform_device.h> 34 - #include <asm/ocp.h> 35 34 #include <linux/crc32.h> 36 35 #include <linux/mii.h> 37 36 #include <linux/phy.h>
+324 -255
drivers/net/macb.c
··· 17 17 #include <linux/init.h> 18 18 #include <linux/netdevice.h> 19 19 #include <linux/etherdevice.h> 20 - #include <linux/mii.h> 21 - #include <linux/mutex.h> 22 20 #include <linux/dma-mapping.h> 23 - #include <linux/ethtool.h> 24 21 #include <linux/platform_device.h> 22 + #include <linux/phy.h> 25 23 26 24 #include <asm/arch/board.h> 25 + #include <asm/arch/cpu.h> 27 26 28 27 #include "macb.h" 29 28 ··· 84 85 memcpy(bp->dev->dev_addr, addr, sizeof(addr)); 85 86 } 86 87 87 - static void macb_enable_mdio(struct macb *bp) 88 + static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 88 89 { 89 - unsigned long flags; 90 - u32 reg; 91 - 92 - spin_lock_irqsave(&bp->lock, flags); 93 - reg = macb_readl(bp, NCR); 94 - reg |= MACB_BIT(MPE); 95 - macb_writel(bp, NCR, reg); 96 - macb_writel(bp, IER, MACB_BIT(MFD)); 97 - spin_unlock_irqrestore(&bp->lock, flags); 98 - } 99 - 100 - static void macb_disable_mdio(struct macb *bp) 101 - { 102 - unsigned long flags; 103 - u32 reg; 104 - 105 - spin_lock_irqsave(&bp->lock, flags); 106 - reg = macb_readl(bp, NCR); 107 - reg &= ~MACB_BIT(MPE); 108 - macb_writel(bp, NCR, reg); 109 - macb_writel(bp, IDR, MACB_BIT(MFD)); 110 - spin_unlock_irqrestore(&bp->lock, flags); 111 - } 112 - 113 - static int macb_mdio_read(struct net_device *dev, int phy_id, int location) 114 - { 115 - struct macb *bp = netdev_priv(dev); 90 + struct macb *bp = bus->priv; 116 91 int value; 117 92 118 - mutex_lock(&bp->mdio_mutex); 119 - 120 - macb_enable_mdio(bp); 121 93 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) 122 94 | MACB_BF(RW, MACB_MAN_READ) 123 - | MACB_BF(PHYA, phy_id) 124 - | MACB_BF(REGA, location) 95 + | MACB_BF(PHYA, mii_id) 96 + | MACB_BF(REGA, regnum) 125 97 | MACB_BF(CODE, MACB_MAN_CODE))); 126 98 127 - wait_for_completion(&bp->mdio_complete); 99 + /* wait for end of transfer */ 100 + while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) 101 + cpu_relax(); 128 102 129 103 value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); 130 - macb_disable_mdio(bp); 131 - mutex_unlock(&bp->mdio_mutex); 132 104 133 105 return value; 134 106 } 135 107 136 - static void macb_mdio_write(struct net_device *dev, int phy_id, 137 - int location, int val) 108 + static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 109 + u16 value) 138 110 { 139 - struct macb *bp = netdev_priv(dev); 140 - 141 - dev_dbg(&bp->pdev->dev, "mdio_write %02x:%02x <- %04x\n", 142 - phy_id, location, val); 143 - 144 - mutex_lock(&bp->mdio_mutex); 145 - macb_enable_mdio(bp); 111 + struct macb *bp = bus->priv; 146 112 147 113 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) 148 114 | MACB_BF(RW, MACB_MAN_WRITE) 149 - | MACB_BF(PHYA, phy_id) 150 - | MACB_BF(REGA, location) 115 + | MACB_BF(PHYA, mii_id) 116 + | MACB_BF(REGA, regnum) 151 117 | MACB_BF(CODE, MACB_MAN_CODE) 152 - | MACB_BF(DATA, val))); 118 + | MACB_BF(DATA, value))); 153 119 154 - wait_for_completion(&bp->mdio_complete); 120 + /* wait for end of transfer */ 121 + while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) 122 + cpu_relax(); 155 123 156 - macb_disable_mdio(bp); 157 - mutex_unlock(&bp->mdio_mutex); 158 - } 159 - 160 - static int macb_phy_probe(struct macb *bp) 161 - { 162 - int phy_address; 163 - u16 phyid1, phyid2; 164 - 165 - for (phy_address = 0; phy_address < 32; phy_address++) { 166 - phyid1 = macb_mdio_read(bp->dev, phy_address, MII_PHYSID1); 167 - phyid2 = macb_mdio_read(bp->dev, phy_address, MII_PHYSID2); 168 - 169 - if (phyid1 != 0xffff && phyid1 != 0x0000 170 - && phyid2 != 0xffff && phyid2 != 0x0000) 171 - break; 172 - } 173 - 174 - if (phy_address == 32) 175 - return -ENODEV; 176 - 177 - dev_info(&bp->pdev->dev, 178 - "detected PHY at address %d (ID %04x:%04x)\n", 179 - phy_address, phyid1, phyid2); 180 - 181 - bp->mii.phy_id = phy_address; 182 124 return 0; 183 125 } 184 126 185 - static void macb_set_media(struct macb *bp, int media) 127 + static int macb_mdio_reset(struct mii_bus *bus) 186 128 { 187 - u32 reg; 188 - 189 - spin_lock_irq(&bp->lock); 190 - reg = macb_readl(bp, NCFGR); 191 - reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); 192 - if (media & (ADVERTISE_100HALF | ADVERTISE_100FULL)) 193 - reg |= MACB_BIT(SPD); 194 - if (media & ADVERTISE_FULL) 195 - reg |= MACB_BIT(FD); 196 - macb_writel(bp, NCFGR, reg); 197 - spin_unlock_irq(&bp->lock); 129 + return 0; 198 130 } 199 131 200 - static void macb_check_media(struct macb *bp, int ok_to_print, int init_media) 132 + static void macb_handle_link_change(struct net_device *dev) 201 133 { 202 - struct mii_if_info *mii = &bp->mii; 203 - unsigned int old_carrier, new_carrier; 204 - int advertise, lpa, media, duplex; 134 + struct macb *bp = netdev_priv(dev); 135 + struct phy_device *phydev = bp->phy_dev; 136 + unsigned long flags; 205 137 206 - /* if forced media, go no further */ 207 - if (mii->force_media) 208 - return; 138 + int status_change = 0; 209 139 210 - /* check current and old link status */ 211 - old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0; 212 - new_carrier = (unsigned int) mii_link_ok(mii); 140 + spin_lock_irqsave(&bp->lock, flags); 213 141 214 - /* if carrier state did not change, assume nothing else did */ 215 - if (!init_media && old_carrier == new_carrier) 216 - return; 142 + if (phydev->link) { 143 + if ((bp->speed != phydev->speed) || 144 + (bp->duplex != phydev->duplex)) { 145 + u32 reg; 217 146 218 - /* no carrier, nothing much to do */ 219 - if (!new_carrier) { 220 - netif_carrier_off(mii->dev); 221 - printk(KERN_INFO "%s: link down\n", mii->dev->name); 222 - return; 147 + reg = macb_readl(bp, NCFGR); 148 + reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); 149 + 150 + if (phydev->duplex) 151 + reg |= MACB_BIT(FD); 152 + if (phydev->speed) 153 + reg |= MACB_BIT(SPD); 154 + 155 + macb_writel(bp, NCFGR, reg); 156 + 157 + bp->speed = phydev->speed; 158 + bp->duplex = phydev->duplex; 159 + status_change = 1; 160 + } 223 161 } 224 162 225 - /* 226 - * we have carrier, see who's on the other end 227 - */ 228 - netif_carrier_on(mii->dev); 163 + if (phydev->link != bp->link) { 164 + if (phydev->link) 165 + netif_schedule(dev); 166 + else { 167 + bp->speed = 0; 168 + bp->duplex = -1; 169 + } 170 + bp->link = phydev->link; 229 171 230 - /* get MII advertise and LPA values */ 231 - if (!init_media && mii->advertising) { 232 - advertise = mii->advertising; 172 + status_change = 1; 173 + } 174 + 175 + spin_unlock_irqrestore(&bp->lock, flags); 176 + 177 + if (status_change) { 178 + if (phydev->link) 179 + printk(KERN_INFO "%s: link up (%d/%s)\n", 180 + dev->name, phydev->speed, 181 + DUPLEX_FULL == phydev->duplex ? "Full":"Half"); 182 + else 183 + printk(KERN_INFO "%s: link down\n", dev->name); 184 + } 185 + } 186 + 187 + /* based on au1000_eth. c*/ 188 + static int macb_mii_probe(struct net_device *dev) 189 + { 190 + struct macb *bp = netdev_priv(dev); 191 + struct phy_device *phydev = NULL; 192 + struct eth_platform_data *pdata; 193 + int phy_addr; 194 + 195 + /* find the first phy */ 196 + for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { 197 + if (bp->mii_bus.phy_map[phy_addr]) { 198 + phydev = bp->mii_bus.phy_map[phy_addr]; 199 + break; 200 + } 201 + } 202 + 203 + if (!phydev) { 204 + printk (KERN_ERR "%s: no PHY found\n", dev->name); 205 + return -1; 206 + } 207 + 208 + pdata = bp->pdev->dev.platform_data; 209 + /* TODO : add pin_irq */ 210 + 211 + /* attach the mac to the phy */ 212 + if (pdata && pdata->is_rmii) { 213 + phydev = phy_connect(dev, phydev->dev.bus_id, 214 + &macb_handle_link_change, 0, PHY_INTERFACE_MODE_RMII); 233 215 } else { 234 - advertise = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE); 235 - mii->advertising = advertise; 216 + phydev = phy_connect(dev, phydev->dev.bus_id, 217 + &macb_handle_link_change, 0, PHY_INTERFACE_MODE_MII); 236 218 } 237 - lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA); 238 219 239 - /* figure out media and duplex from advertise and LPA values */ 240 - media = mii_nway_result(lpa & advertise); 241 - duplex = (media & ADVERTISE_FULL) ? 1 : 0; 220 + if (IS_ERR(phydev)) { 221 + printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 222 + return PTR_ERR(phydev); 223 + } 242 224 243 - if (ok_to_print) 244 - printk(KERN_INFO "%s: link up, %sMbps, %s-duplex, lpa 0x%04X\n", 245 - mii->dev->name, 246 - media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? "100" : "10", 247 - duplex ? "full" : "half", lpa); 225 + /* mask with MAC supported features */ 226 + phydev->supported &= PHY_BASIC_FEATURES; 248 227 249 - mii->full_duplex = duplex; 228 + phydev->advertising = phydev->supported; 250 229 251 - /* Let the MAC know about the new link state */ 252 - macb_set_media(bp, media); 230 + bp->link = 0; 231 + bp->speed = 0; 232 + bp->duplex = -1; 233 + bp->phy_dev = phydev; 234 + 235 + return 0; 236 + } 237 + 238 + static int macb_mii_init(struct macb *bp) 239 + { 240 + struct eth_platform_data *pdata; 241 + int err = -ENXIO, i; 242 + 243 + /* Enable managment port */ 244 + macb_writel(bp, NCR, MACB_BIT(MPE)); 245 + 246 + bp->mii_bus.name = "MACB_mii_bus", 247 + bp->mii_bus.read = &macb_mdio_read, 248 + bp->mii_bus.write = &macb_mdio_write, 249 + bp->mii_bus.reset = &macb_mdio_reset, 250 + bp->mii_bus.id = bp->pdev->id, 251 + bp->mii_bus.priv = bp, 252 + bp->mii_bus.dev = &bp->dev->dev; 253 + pdata = bp->pdev->dev.platform_data; 254 + 255 + if (pdata) 256 + bp->mii_bus.phy_mask = pdata->phy_mask; 257 + 258 + bp->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 259 + if (!bp->mii_bus.irq) { 260 + err = -ENOMEM; 261 + goto err_out; 262 + } 263 + 264 + for (i = 0; i < PHY_MAX_ADDR; i++) 265 + bp->mii_bus.irq[i] = PHY_POLL; 266 + 267 + platform_set_drvdata(bp->dev, &bp->mii_bus); 268 + 269 + if (mdiobus_register(&bp->mii_bus)) 270 + goto err_out_free_mdio_irq; 271 + 272 + if (macb_mii_probe(bp->dev) != 0) { 273 + goto err_out_unregister_bus; 274 + } 275 + 276 + return 0; 277 + 278 + err_out_unregister_bus: 279 + mdiobus_unregister(&bp->mii_bus); 280 + err_out_free_mdio_irq: 281 + kfree(bp->mii_bus.irq); 282 + err_out: 283 + return err; 253 284 } 254 285 255 286 static void macb_update_stats(struct macb *bp) ··· 292 263 293 264 for(; p < end; p++, reg++) 294 265 *p += __raw_readl(reg); 295 - } 296 - 297 - static void macb_periodic_task(struct work_struct *work) 298 - { 299 - struct macb *bp = container_of(work, struct macb, periodic_task.work); 300 - 301 - macb_update_stats(bp); 302 - macb_check_media(bp, 1, 0); 303 - 304 - schedule_delayed_work(&bp->periodic_task, HZ); 305 266 } 306 267 307 268 static void macb_tx(struct macb *bp) ··· 538 519 spin_lock(&bp->lock); 539 520 540 521 while (status) { 541 - if (status & MACB_BIT(MFD)) 542 - complete(&bp->mdio_complete); 543 - 544 522 /* close possible race with dev_close */ 545 523 if (unlikely(!netif_running(dev))) { 546 524 macb_writel(bp, IDR, ~0UL); ··· 551 535 * until we have processed the buffers 552 536 */ 553 537 macb_writel(bp, IDR, MACB_RX_INT_FLAGS); 554 - dev_dbg(&bp->pdev->dev, "scheduling RX softirq\n"); 538 + dev_dbg(&bp->pdev->dev, 539 + "scheduling RX softirq\n"); 555 540 __netif_rx_schedule(dev); 556 541 } 557 542 } ··· 782 765 macb_writel(bp, TBQP, bp->tx_ring_dma); 783 766 784 767 /* Enable TX and RX */ 785 - macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE)); 768 + macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); 786 769 787 770 /* Enable interrupts */ 788 771 macb_writel(bp, IER, (MACB_BIT(RCOMP) ··· 793 776 | MACB_BIT(TCOMP) 794 777 | MACB_BIT(ISR_ROVR) 795 778 | MACB_BIT(HRESP))); 779 + 796 780 } 797 781 798 - static void macb_init_phy(struct net_device *dev) 782 + /* 783 + * The hash address register is 64 bits long and takes up two 784 + * locations in the memory map. The least significant bits are stored 785 + * in EMAC_HSL and the most significant bits in EMAC_HSH. 786 + * 787 + * The unicast hash enable and the multicast hash enable bits in the 788 + * network configuration register enable the reception of hash matched 789 + * frames. The destination address is reduced to a 6 bit index into 790 + * the 64 bit hash register using the following hash function. The 791 + * hash function is an exclusive or of every sixth bit of the 792 + * destination address. 793 + * 794 + * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] 795 + * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] 796 + * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] 797 + * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] 798 + * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] 799 + * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] 800 + * 801 + * da[0] represents the least significant bit of the first byte 802 + * received, that is, the multicast/unicast indicator, and da[47] 803 + * represents the most significant bit of the last byte received. If 804 + * the hash index, hi[n], points to a bit that is set in the hash 805 + * register then the frame will be matched according to whether the 806 + * frame is multicast or unicast. A multicast match will be signalled 807 + * if the multicast hash enable bit is set, da[0] is 1 and the hash 808 + * index points to a bit set in the hash register. A unicast match 809 + * will be signalled if the unicast hash enable bit is set, da[0] is 0 810 + * and the hash index points to a bit set in the hash register. To 811 + * receive all multicast frames, the hash register should be set with 812 + * all ones and the multicast hash enable bit should be set in the 813 + * network configuration register. 814 + */ 815 + 816 + static inline int hash_bit_value(int bitnr, __u8 *addr) 799 817 { 818 + if (addr[bitnr / 8] & (1 << (bitnr % 8))) 819 + return 1; 820 + return 0; 821 + } 822 + 823 + /* 824 + * Return the hash index value for the specified address. 825 + */ 826 + static int hash_get_index(__u8 *addr) 827 + { 828 + int i, j, bitval; 829 + int hash_index = 0; 830 + 831 + for (j = 0; j < 6; j++) { 832 + for (i = 0, bitval = 0; i < 8; i++) 833 + bitval ^= hash_bit_value(i*6 + j, addr); 834 + 835 + hash_index |= (bitval << j); 836 + } 837 + 838 + return hash_index; 839 + } 840 + 841 + /* 842 + * Add multicast addresses to the internal multicast-hash table. 843 + */ 844 + static void macb_sethashtable(struct net_device *dev) 845 + { 846 + struct dev_mc_list *curr; 847 + unsigned long mc_filter[2]; 848 + unsigned int i, bitnr; 800 849 struct macb *bp = netdev_priv(dev); 801 850 802 - /* Set some reasonable default settings */ 803 - macb_mdio_write(dev, bp->mii.phy_id, MII_ADVERTISE, 804 - ADVERTISE_CSMA | ADVERTISE_ALL); 805 - macb_mdio_write(dev, bp->mii.phy_id, MII_BMCR, 806 - (BMCR_SPEED100 | BMCR_ANENABLE 807 - | BMCR_ANRESTART | BMCR_FULLDPLX)); 851 + mc_filter[0] = mc_filter[1] = 0; 852 + 853 + curr = dev->mc_list; 854 + for (i = 0; i < dev->mc_count; i++, curr = curr->next) { 855 + if (!curr) break; /* unexpected end of list */ 856 + 857 + bitnr = hash_get_index(curr->dmi_addr); 858 + mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 859 + } 860 + 861 + macb_writel(bp, HRB, mc_filter[0]); 862 + macb_writel(bp, HRT, mc_filter[1]); 863 + } 864 + 865 + /* 866 + * Enable/Disable promiscuous and multicast modes. 867 + */ 868 + static void macb_set_rx_mode(struct net_device *dev) 869 + { 870 + unsigned long cfg; 871 + struct macb *bp = netdev_priv(dev); 872 + 873 + cfg = macb_readl(bp, NCFGR); 874 + 875 + if (dev->flags & IFF_PROMISC) 876 + /* Enable promiscuous mode */ 877 + cfg |= MACB_BIT(CAF); 878 + else if (dev->flags & (~IFF_PROMISC)) 879 + /* Disable promiscuous mode */ 880 + cfg &= ~MACB_BIT(CAF); 881 + 882 + if (dev->flags & IFF_ALLMULTI) { 883 + /* Enable all multicast mode */ 884 + macb_writel(bp, HRB, -1); 885 + macb_writel(bp, HRT, -1); 886 + cfg |= MACB_BIT(NCFGR_MTI); 887 + } else if (dev->mc_count > 0) { 888 + /* Enable specific multicasts */ 889 + macb_sethashtable(dev); 890 + cfg |= MACB_BIT(NCFGR_MTI); 891 + } else if (dev->flags & (~IFF_ALLMULTI)) { 892 + /* Disable all multicast mode */ 893 + macb_writel(bp, HRB, 0); 894 + macb_writel(bp, HRT, 0); 895 + cfg &= ~MACB_BIT(NCFGR_MTI); 896 + } 897 + 898 + macb_writel(bp, NCFGR, cfg); 808 899 } 809 900 810 901 static int macb_open(struct net_device *dev) ··· 921 796 int err; 922 797 923 798 dev_dbg(&bp->pdev->dev, "open\n"); 799 + 800 + /* if the phy is not yet register, retry later*/ 801 + if (!bp->phy_dev) 802 + return -EAGAIN; 924 803 925 804 if (!is_valid_ether_addr(dev->dev_addr)) 926 805 return -EADDRNOTAVAIL; ··· 939 810 940 811 macb_init_rings(bp); 941 812 macb_init_hw(bp); 942 - macb_init_phy(dev); 943 813 944 - macb_check_media(bp, 1, 1); 814 + /* schedule a link state check */ 815 + phy_start(bp->phy_dev); 816 + 945 817 netif_start_queue(dev); 946 - 947 - schedule_delayed_work(&bp->periodic_task, HZ); 948 818 949 819 return 0; 950 820 } ··· 953 825 struct macb *bp = netdev_priv(dev); 954 826 unsigned long flags; 955 827 956 - cancel_rearming_delayed_work(&bp->periodic_task); 957 - 958 828 netif_stop_queue(dev); 829 + 830 + if (bp->phy_dev) 831 + phy_stop(bp->phy_dev); 959 832 960 833 spin_lock_irqsave(&bp->lock, flags); 961 834 macb_reset_hw(bp); ··· 973 844 struct macb *bp = netdev_priv(dev); 974 845 struct net_device_stats *nstat = &bp->stats; 975 846 struct macb_stats *hwstat = &bp->hw_stats; 847 + 848 + /* read stats from hardware */ 849 + macb_update_stats(bp); 976 850 977 851 /* Convert HW stats into netdevice stats */ 978 852 nstat->rx_errors = (hwstat->rx_fcs_errors + ··· 1014 882 static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1015 883 { 1016 884 struct macb *bp = netdev_priv(dev); 885 + struct phy_device *phydev = bp->phy_dev; 1017 886 1018 - return mii_ethtool_gset(&bp->mii, cmd); 887 + if (!phydev) 888 + return -ENODEV; 889 + 890 + return phy_ethtool_gset(phydev, cmd); 1019 891 } 1020 892 1021 893 static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1022 894 { 1023 895 struct macb *bp = netdev_priv(dev); 896 + struct phy_device *phydev = bp->phy_dev; 1024 897 1025 - return mii_ethtool_sset(&bp->mii, cmd); 898 + if (!phydev) 899 + return -ENODEV; 900 + 901 + return phy_ethtool_sset(phydev, cmd); 1026 902 } 1027 903 1028 - static void macb_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 904 + static void macb_get_drvinfo(struct net_device *dev, 905 + struct ethtool_drvinfo *info) 1029 906 { 1030 907 struct macb *bp = netdev_priv(dev); 1031 908 ··· 1043 902 strcpy(info->bus_info, bp->pdev->dev.bus_id); 1044 903 } 1045 904 1046 - static int macb_nway_reset(struct net_device *dev) 1047 - { 1048 - struct macb *bp = netdev_priv(dev); 1049 - return mii_nway_restart(&bp->mii); 1050 - } 1051 - 1052 905 static struct ethtool_ops macb_ethtool_ops = { 1053 906 .get_settings = macb_get_settings, 1054 907 .set_settings = macb_set_settings, 1055 908 .get_drvinfo = macb_get_drvinfo, 1056 - .nway_reset = macb_nway_reset, 1057 909 .get_link = ethtool_op_get_link, 1058 910 }; 1059 911 1060 912 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1061 913 { 1062 914 struct macb *bp = netdev_priv(dev); 915 + struct phy_device *phydev = bp->phy_dev; 1063 916 1064 917 if (!netif_running(dev)) 1065 918 return -EINVAL; 1066 919 1067 - return generic_mii_ioctl(&bp->mii, if_mii(rq), cmd, NULL); 920 + if (!phydev) 921 + return -ENODEV; 922 + 923 + return phy_mii_ioctl(phydev, if_mii(rq), cmd); 1068 924 } 1069 925 1070 - static ssize_t macb_mii_show(const struct device *_dev, char *buf, 1071 - unsigned long addr) 1072 - { 1073 - struct net_device *dev = to_net_dev(_dev); 1074 - struct macb *bp = netdev_priv(dev); 1075 - ssize_t ret = -EINVAL; 1076 - 1077 - if (netif_running(dev)) { 1078 - int value; 1079 - value = macb_mdio_read(dev, bp->mii.phy_id, addr); 1080 - ret = sprintf(buf, "0x%04x\n", (uint16_t)value); 1081 - } 1082 - 1083 - return ret; 1084 - } 1085 - 1086 - #define MII_ENTRY(name, addr) \ 1087 - static ssize_t show_##name(struct device *_dev, \ 1088 - struct device_attribute *attr, \ 1089 - char *buf) \ 1090 - { \ 1091 - return macb_mii_show(_dev, buf, addr); \ 1092 - } \ 1093 - static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) 1094 - 1095 - MII_ENTRY(bmcr, MII_BMCR); 1096 - MII_ENTRY(bmsr, MII_BMSR); 1097 - MII_ENTRY(physid1, MII_PHYSID1); 1098 - MII_ENTRY(physid2, MII_PHYSID2); 1099 - MII_ENTRY(advertise, MII_ADVERTISE); 1100 - MII_ENTRY(lpa, MII_LPA); 1101 - MII_ENTRY(expansion, MII_EXPANSION); 1102 - 1103 - static struct attribute *macb_mii_attrs[] = { 1104 - &dev_attr_bmcr.attr, 1105 - &dev_attr_bmsr.attr, 1106 - &dev_attr_physid1.attr, 1107 - &dev_attr_physid2.attr, 1108 - &dev_attr_advertise.attr, 1109 - &dev_attr_lpa.attr, 1110 - &dev_attr_expansion.attr, 1111 - NULL, 1112 - }; 1113 - 1114 - static struct attribute_group macb_mii_group = { 1115 - .name = "mii", 1116 - .attrs = macb_mii_attrs, 1117 - }; 1118 - 1119 - static void macb_unregister_sysfs(struct net_device *net) 1120 - { 1121 - struct device *_dev = &net->dev; 1122 - 1123 - sysfs_remove_group(&_dev->kobj, &macb_mii_group); 1124 - } 1125 - 1126 - static int macb_register_sysfs(struct net_device *net) 1127 - { 1128 - struct device *_dev = &net->dev; 1129 - int ret; 1130 - 1131 - ret = sysfs_create_group(&_dev->kobj, &macb_mii_group); 1132 - if (ret) 1133 - printk(KERN_WARNING 1134 - "%s: sysfs mii attribute registration failed: %d\n", 1135 - net->name, ret); 1136 - return ret; 1137 - } 1138 926 static int __devinit macb_probe(struct platform_device *pdev) 1139 927 { 1140 928 struct eth_platform_data *pdata; 1141 929 struct resource *regs; 1142 930 struct net_device *dev; 1143 931 struct macb *bp; 932 + struct phy_device *phydev; 1144 933 unsigned long pclk_hz; 1145 934 u32 config; 1146 935 int err = -ENXIO; ··· 1144 1073 dev->stop = macb_close; 1145 1074 dev->hard_start_xmit = macb_start_xmit; 1146 1075 dev->get_stats = macb_get_stats; 1076 + dev->set_multicast_list = macb_set_rx_mode; 1147 1077 dev->do_ioctl = macb_ioctl; 1148 1078 dev->poll = macb_poll; 1149 1079 dev->weight = 64; 1150 1080 dev->ethtool_ops = &macb_ethtool_ops; 1151 1081 1152 1082 dev->base_addr = regs->start; 1153 - 1154 - INIT_DELAYED_WORK(&bp->periodic_task, macb_periodic_task); 1155 - mutex_init(&bp->mdio_mutex); 1156 - init_completion(&bp->mdio_complete); 1157 1083 1158 1084 /* Set MII management clock divider */ 1159 1085 pclk_hz = clk_get_rate(bp->pclk); ··· 1164 1096 config = MACB_BF(CLK, MACB_CLK_DIV64); 1165 1097 macb_writel(bp, NCFGR, config); 1166 1098 1167 - bp->mii.dev = dev; 1168 - bp->mii.mdio_read = macb_mdio_read; 1169 - bp->mii.mdio_write = macb_mdio_write; 1170 - bp->mii.phy_id_mask = 0x1f; 1171 - bp->mii.reg_num_mask = 0x1f; 1172 - 1173 1099 macb_get_hwaddr(bp); 1174 - err = macb_phy_probe(bp); 1175 - if (err) { 1176 - dev_err(&pdev->dev, "Failed to detect PHY, aborting.\n"); 1177 - goto err_out_free_irq; 1178 - } 1179 - 1180 1100 pdata = pdev->dev.platform_data; 1101 + 1181 1102 if (pdata && pdata->is_rmii) 1182 1103 #if defined(CONFIG_ARCH_AT91) 1183 1104 macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) ); ··· 1188 1131 goto err_out_free_irq; 1189 1132 } 1190 1133 1191 - platform_set_drvdata(pdev, dev); 1134 + if (macb_mii_init(bp) != 0) { 1135 + goto err_out_unregister_netdev; 1136 + } 1192 1137 1193 - macb_register_sysfs(dev); 1138 + platform_set_drvdata(pdev, dev); 1194 1139 1195 1140 printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d " 1196 1141 "(%02x:%02x:%02x:%02x:%02x:%02x)\n", ··· 1200 1141 dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2], 1201 1142 dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); 1202 1143 1144 + phydev = bp->phy_dev; 1145 + printk(KERN_INFO "%s: attached PHY driver [%s] " 1146 + "(mii_bus:phy_addr=%s, irq=%d)\n", 1147 + dev->name, phydev->drv->name, phydev->dev.bus_id, phydev->irq); 1148 + 1203 1149 return 0; 1204 1150 1151 + err_out_unregister_netdev: 1152 + unregister_netdev(dev); 1205 1153 err_out_free_irq: 1206 1154 free_irq(dev->irq, dev); 1207 1155 err_out_iounmap: ··· 1219 1153 clk_put(bp->hclk); 1220 1154 #endif 1221 1155 clk_disable(bp->pclk); 1156 + #ifndef CONFIG_ARCH_AT91 1222 1157 err_out_put_pclk: 1158 + #endif 1223 1159 clk_put(bp->pclk); 1224 1160 err_out_free_dev: 1225 1161 free_netdev(dev); ··· 1239 1171 1240 1172 if (dev) { 1241 1173 bp = netdev_priv(dev); 1242 - macb_unregister_sysfs(dev); 1174 + mdiobus_unregister(&bp->mii_bus); 1175 + kfree(bp->mii_bus.irq); 1243 1176 unregister_netdev(dev); 1244 1177 free_irq(dev->irq, dev); 1245 1178 iounmap(bp->regs);
+5 -5
drivers/net/macb.h
··· 383 383 384 384 unsigned int rx_pending, tx_pending; 385 385 386 - struct delayed_work periodic_task; 387 - 388 - struct mutex mdio_mutex; 389 - struct completion mdio_complete; 390 - struct mii_if_info mii; 386 + struct mii_bus mii_bus; 387 + struct phy_device *phy_dev; 388 + unsigned int link; 389 + unsigned int speed; 390 + unsigned int duplex; 391 391 }; 392 392 393 393 #endif /* _MACB_H */
-6
drivers/net/myri10ge/myri10ge.c
··· 1060 1060 struct myri10ge_tx_buf *tx = &mgp->tx; 1061 1061 struct sk_buff *skb; 1062 1062 int idx, len; 1063 - int limit = 0; 1064 1063 1065 1064 while (tx->pkt_done != mcp_index) { 1066 1065 idx = tx->done & tx->mask; ··· 1090 1091 bus), len, 1091 1092 PCI_DMA_TODEVICE); 1092 1093 } 1093 - 1094 - /* limit potential for livelock by only handling 1095 - * 2 full tx rings per call */ 1096 - if (unlikely(++limit > 2 * tx->mask)) 1097 - break; 1098 1094 } 1099 1095 /* start the queue if we've stopped it */ 1100 1096 if (netif_queue_stopped(mgp->dev)
+3 -1
drivers/net/s2io.c
··· 796 796 struct mac_info *mac_control; 797 797 struct config_param *config; 798 798 int lst_size, lst_per_page; 799 - struct net_device *dev = nic->dev; 799 + struct net_device *dev; 800 800 int page_num = 0; 801 801 802 802 if (!nic) 803 803 return; 804 + 805 + dev = nic->dev; 804 806 805 807 mac_control = &nic->mac_control; 806 808 config = &nic->config;
+3
drivers/net/usb/cdc_subset.c
··· 305 305 USB_DEVICE (0x8086, 0x07d3), // "blob" bootloader 306 306 .driver_info = (unsigned long) &blob_info, 307 307 }, { 308 + USB_DEVICE (0x1286, 0x8001), // "blob" bootloader 309 + .driver_info = (unsigned long) &blob_info, 310 + }, { 308 311 // Linux Ethernet/RNDIS gadget on pxa210/25x/26x, second config 309 312 // e.g. Gumstix, current OpenZaurus, ... 310 313 USB_DEVICE_VER (0x0525, 0xa4a2, 0x0203, 0x0203),
+106 -104
drivers/net/wireless/airo.c
··· 52 52 53 53 #include "airo.h" 54 54 55 + #define DRV_NAME "airo" 56 + 55 57 #ifdef CONFIG_PCI 56 58 static struct pci_device_id card_ids[] = { 57 59 { 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, }, ··· 73 71 static int airo_pci_resume(struct pci_dev *pdev); 74 72 75 73 static struct pci_driver airo_driver = { 76 - .name = "airo", 74 + .name = DRV_NAME, 77 75 .id_table = card_ids, 78 76 .probe = airo_pci_probe, 79 77 .remove = __devexit_p(airo_pci_remove), ··· 1094 1092 static void OUT4500( struct airo_info *, u16 register, u16 value ); 1095 1093 static unsigned short IN4500( struct airo_info *, u16 register ); 1096 1094 static u16 setup_card(struct airo_info*, u8 *mac, int lock); 1097 - static int enable_MAC( struct airo_info *ai, Resp *rsp, int lock ); 1095 + static int enable_MAC(struct airo_info *ai, int lock); 1098 1096 static void disable_MAC(struct airo_info *ai, int lock); 1099 1097 static void enable_interrupts(struct airo_info*); 1100 1098 static void disable_interrupts(struct airo_info*); ··· 1252 1250 static int flashrestart(struct airo_info *ai,struct net_device *dev); 1253 1251 1254 1252 #define airo_print(type, name, fmt, args...) \ 1255 - { printk(type "airo(%s): " fmt "\n", name, ##args); } 1253 + printk(type DRV_NAME "(%s): " fmt "\n", name, ##args) 1256 1254 1257 1255 #define airo_print_info(name, fmt, args...) \ 1258 1256 airo_print(KERN_INFO, name, fmt, ##args) ··· 1928 1926 return rc; 1929 1927 } 1930 1928 1931 - static int airo_open(struct net_device *dev) { 1932 - struct airo_info *info = dev->priv; 1933 - Resp rsp; 1929 + static void try_auto_wep(struct airo_info *ai) 1930 + { 1931 + if (auto_wep && !(ai->flags & FLAG_RADIO_DOWN)) { 1932 + ai->expires = RUN_AT(3*HZ); 1933 + wake_up_interruptible(&ai->thr_wait); 1934 + } 1935 + } 1934 1936 1935 - if (test_bit(FLAG_FLASHING, &info->flags)) 1937 + static int airo_open(struct net_device *dev) { 1938 + struct airo_info *ai = dev->priv; 1939 + int rc = 0; 1940 + 1941 + if (test_bit(FLAG_FLASHING, &ai->flags)) 1936 1942 return -EIO; 1937 1943 1938 1944 /* Make sure the card is configured. 1939 1945 * Wireless Extensions may postpone config changes until the card 1940 1946 * is open (to pipeline changes and speed-up card setup). If 1941 1947 * those changes are not yet commited, do it now - Jean II */ 1942 - if (test_bit (FLAG_COMMIT, &info->flags)) { 1943 - disable_MAC(info, 1); 1944 - writeConfigRid(info, 1); 1948 + if (test_bit(FLAG_COMMIT, &ai->flags)) { 1949 + disable_MAC(ai, 1); 1950 + writeConfigRid(ai, 1); 1945 1951 } 1946 1952 1947 - if (info->wifidev != dev) { 1953 + if (ai->wifidev != dev) { 1954 + clear_bit(JOB_DIE, &ai->jobs); 1955 + ai->airo_thread_task = kthread_run(airo_thread, dev, dev->name); 1956 + if (IS_ERR(ai->airo_thread_task)) 1957 + return (int)PTR_ERR(ai->airo_thread_task); 1958 + 1959 + rc = request_irq(dev->irq, airo_interrupt, IRQF_SHARED, 1960 + dev->name, dev); 1961 + if (rc) { 1962 + airo_print_err(dev->name, 1963 + "register interrupt %d failed, rc %d", 1964 + dev->irq, rc); 1965 + set_bit(JOB_DIE, &ai->jobs); 1966 + kthread_stop(ai->airo_thread_task); 1967 + return rc; 1968 + } 1969 + 1948 1970 /* Power on the MAC controller (which may have been disabled) */ 1949 - clear_bit(FLAG_RADIO_DOWN, &info->flags); 1950 - enable_interrupts(info); 1971 + clear_bit(FLAG_RADIO_DOWN, &ai->flags); 1972 + enable_interrupts(ai); 1973 + 1974 + try_auto_wep(ai); 1951 1975 } 1952 - enable_MAC(info, &rsp, 1); 1976 + enable_MAC(ai, 1); 1953 1977 1954 1978 netif_start_queue(dev); 1955 1979 return 0; ··· 2366 2338 { 2367 2339 struct airo_info *ai = dev->priv; 2368 2340 struct sockaddr *addr = p; 2369 - Resp rsp; 2370 2341 2371 2342 readConfigRid(ai, 1); 2372 2343 memcpy (ai->config.macAddr, addr->sa_data, dev->addr_len); 2373 2344 set_bit (FLAG_COMMIT, &ai->flags); 2374 2345 disable_MAC(ai, 1); 2375 2346 writeConfigRid (ai, 1); 2376 - enable_MAC(ai, &rsp, 1); 2347 + enable_MAC(ai, 1); 2377 2348 memcpy (ai->dev->dev_addr, addr->sa_data, dev->addr_len); 2378 2349 if (ai->wifidev) 2379 2350 memcpy (ai->wifidev->dev_addr, addr->sa_data, dev->addr_len); ··· 2419 2392 disable_MAC(ai, 1); 2420 2393 #endif 2421 2394 disable_interrupts( ai ); 2395 + 2396 + free_irq(dev->irq, dev); 2397 + 2398 + set_bit(JOB_DIE, &ai->jobs); 2399 + kthread_stop(ai->airo_thread_task); 2422 2400 } 2423 2401 return 0; 2424 2402 } ··· 2435 2403 set_bit(FLAG_RADIO_DOWN, &ai->flags); 2436 2404 disable_MAC(ai, 1); 2437 2405 disable_interrupts(ai); 2438 - free_irq( dev->irq, dev ); 2439 2406 takedown_proc_entry( dev, ai ); 2440 2407 if (test_bit(FLAG_REGISTERED, &ai->flags)) { 2441 2408 unregister_netdev( dev ); ··· 2445 2414 } 2446 2415 clear_bit(FLAG_REGISTERED, &ai->flags); 2447 2416 } 2448 - set_bit(JOB_DIE, &ai->jobs); 2449 - kthread_stop(ai->airo_thread_task); 2450 - 2451 2417 /* 2452 2418 * Clean out tx queue 2453 2419 */ ··· 2582 2554 * 2) Map PCI memory for issueing commands. 2583 2555 * 3) Allocate memory (shared) to send and receive ethernet frames. 2584 2556 */ 2585 - static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci, 2586 - const char *name) 2557 + static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci) 2587 2558 { 2588 2559 unsigned long mem_start, mem_len, aux_start, aux_len; 2589 2560 int rc = -1; ··· 2596 2569 aux_start = pci_resource_start(pci, 2); 2597 2570 aux_len = AUXMEMSIZE; 2598 2571 2599 - if (!request_mem_region(mem_start, mem_len, name)) { 2600 - airo_print_err(ai->dev->name, "Couldn't get region %x[%x] for %s", 2601 - (int)mem_start, (int)mem_len, name); 2572 + if (!request_mem_region(mem_start, mem_len, DRV_NAME)) { 2573 + airo_print_err("", "Couldn't get region %x[%x]", 2574 + (int)mem_start, (int)mem_len); 2602 2575 goto out; 2603 2576 } 2604 - if (!request_mem_region(aux_start, aux_len, name)) { 2605 - airo_print_err(ai->dev->name, "Couldn't get region %x[%x] for %s", 2606 - (int)aux_start, (int)aux_len, name); 2577 + if (!request_mem_region(aux_start, aux_len, DRV_NAME)) { 2578 + airo_print_err("", "Couldn't get region %x[%x]", 2579 + (int)aux_start, (int)aux_len); 2607 2580 goto free_region1; 2608 2581 } 2609 2582 2610 2583 ai->pcimem = ioremap(mem_start, mem_len); 2611 2584 if (!ai->pcimem) { 2612 - airo_print_err(ai->dev->name, "Couldn't map region %x[%x] for %s", 2613 - (int)mem_start, (int)mem_len, name); 2585 + airo_print_err("", "Couldn't map region %x[%x]", 2586 + (int)mem_start, (int)mem_len); 2614 2587 goto free_region2; 2615 2588 } 2616 2589 ai->pciaux = ioremap(aux_start, aux_len); 2617 2590 if (!ai->pciaux) { 2618 - airo_print_err(ai->dev->name, "Couldn't map region %x[%x] for %s", 2619 - (int)aux_start, (int)aux_len, name); 2591 + airo_print_err("", "Couldn't map region %x[%x]", 2592 + (int)aux_start, (int)aux_len); 2620 2593 goto free_memmap; 2621 2594 } 2622 2595 2623 2596 /* Reserve PKTSIZE for each fid and 2K for the Rids */ 2624 2597 ai->shared = pci_alloc_consistent(pci, PCI_SHARED_LEN, &ai->shared_dma); 2625 2598 if (!ai->shared) { 2626 - airo_print_err(ai->dev->name, "Couldn't alloc_consistent %d", 2627 - PCI_SHARED_LEN); 2599 + airo_print_err("", "Couldn't alloc_consistent %d", 2600 + PCI_SHARED_LEN); 2628 2601 goto free_auxmap; 2629 2602 } 2630 2603 ··· 2769 2742 kzalloc(AIRO_MAX_NETWORK_COUNT * sizeof(BSSListElement), 2770 2743 GFP_KERNEL); 2771 2744 if (!ai->networks) { 2772 - airo_print_warn(ai->dev->name, "Out of memory allocating beacons"); 2745 + airo_print_warn("", "Out of memory allocating beacons"); 2773 2746 return -ENOMEM; 2774 2747 } 2775 2748 ··· 2797 2770 { 2798 2771 int status; 2799 2772 CapabilityRid cap_rid; 2800 - const char *name = ai->dev->name; 2801 2773 2802 2774 status = readCapabilityRid(ai, &cap_rid, 1); 2803 2775 if (status != SUCCESS) return 0; ··· 2804 2778 /* Only firmware versions 5.30.17 or better can do WPA */ 2805 2779 if ((cap_rid.softVer > 0x530) 2806 2780 || ((cap_rid.softVer == 0x530) && (cap_rid.softSubVer >= 17))) { 2807 - airo_print_info(name, "WPA is supported."); 2781 + airo_print_info("", "WPA is supported."); 2808 2782 return 1; 2809 2783 } 2810 2784 2811 2785 /* No WPA support */ 2812 - airo_print_info(name, "WPA unsupported (only firmware versions 5.30.17" 2786 + airo_print_info("", "WPA unsupported (only firmware versions 5.30.17" 2813 2787 " and greater support WPA. Detected %s)", cap_rid.prodVer); 2814 2788 return 0; 2815 2789 } ··· 2823 2797 int i, rc; 2824 2798 2825 2799 /* Create the network device object. */ 2826 - dev = alloc_etherdev(sizeof(*ai)); 2827 - if (!dev) { 2800 + dev = alloc_netdev(sizeof(*ai), "", ether_setup); 2801 + if (!dev) { 2828 2802 airo_print_err("", "Couldn't alloc_etherdev"); 2829 2803 return NULL; 2830 - } 2831 - if (dev_alloc_name(dev, dev->name) < 0) { 2832 - airo_print_err("", "Couldn't get name!"); 2833 - goto err_out_free; 2834 2804 } 2835 2805 2836 2806 ai = dev->priv; 2837 2807 ai->wifidev = NULL; 2838 - ai->flags = 0; 2808 + ai->flags = 1 << FLAG_RADIO_DOWN; 2839 2809 ai->jobs = 0; 2840 2810 ai->dev = dev; 2841 2811 if (pci && (pci->device == 0x5000 || pci->device == 0xa504)) { 2842 - airo_print_dbg(dev->name, "Found an MPI350 card"); 2812 + airo_print_dbg("", "Found an MPI350 card"); 2843 2813 set_bit(FLAG_MPI, &ai->flags); 2844 2814 } 2845 2815 spin_lock_init(&ai->aux_lock); ··· 2843 2821 ai->config.len = 0; 2844 2822 ai->pci = pci; 2845 2823 init_waitqueue_head (&ai->thr_wait); 2846 - ai->airo_thread_task = kthread_run(airo_thread, dev, dev->name); 2847 - if (IS_ERR(ai->airo_thread_task)) 2848 - goto err_out_free; 2849 2824 ai->tfm = NULL; 2850 2825 add_airo_dev(ai); 2851 2826 2852 2827 if (airo_networks_allocate (ai)) 2853 - goto err_out_thr; 2828 + goto err_out_free; 2854 2829 airo_networks_initialize (ai); 2855 2830 2856 2831 /* The Airo-specific entries in the device structure. */ ··· 2870 2851 dev->base_addr = port; 2871 2852 2872 2853 SET_NETDEV_DEV(dev, dmdev); 2854 + SET_MODULE_OWNER(dev); 2873 2855 2874 2856 reset_card (dev, 1); 2875 2857 msleep(400); 2876 2858 2877 - rc = request_irq( dev->irq, airo_interrupt, IRQF_SHARED, dev->name, dev ); 2878 - if (rc) { 2879 - airo_print_err(dev->name, "register interrupt %d failed, rc %d", 2880 - irq, rc); 2881 - goto err_out_nets; 2882 - } 2883 2859 if (!is_pcmcia) { 2884 - if (!request_region( dev->base_addr, 64, dev->name )) { 2860 + if (!request_region(dev->base_addr, 64, DRV_NAME)) { 2885 2861 rc = -EBUSY; 2886 2862 airo_print_err(dev->name, "Couldn't request region"); 2887 - goto err_out_irq; 2863 + goto err_out_nets; 2888 2864 } 2889 2865 } 2890 2866 2891 2867 if (test_bit(FLAG_MPI,&ai->flags)) { 2892 - if (mpi_map_card(ai, pci, dev->name)) { 2893 - airo_print_err(dev->name, "Could not map memory"); 2868 + if (mpi_map_card(ai, pci)) { 2869 + airo_print_err("", "Could not map memory"); 2894 2870 goto err_out_res; 2895 2871 } 2896 2872 } ··· 2913 2899 ai->bssListRidLen = sizeof(BSSListRid) - sizeof(BSSListRidExtra); 2914 2900 } 2915 2901 2902 + strcpy(dev->name, "eth%d"); 2916 2903 rc = register_netdev(dev); 2917 2904 if (rc) { 2918 2905 airo_print_err(dev->name, "Couldn't register_netdev"); ··· 2936 2921 if (setup_proc_entry(dev, dev->priv) < 0) 2937 2922 goto err_out_wifi; 2938 2923 2939 - netif_start_queue(dev); 2940 - SET_MODULE_OWNER(dev); 2941 2924 return dev; 2942 2925 2943 2926 err_out_wifi: ··· 2953 2940 err_out_res: 2954 2941 if (!is_pcmcia) 2955 2942 release_region( dev->base_addr, 64 ); 2956 - err_out_irq: 2957 - free_irq(dev->irq, dev); 2958 2943 err_out_nets: 2959 2944 airo_networks_free(ai); 2960 - err_out_thr: 2961 2945 del_airo_dev(ai); 2962 - set_bit(JOB_DIE, &ai->jobs); 2963 - kthread_stop(ai->airo_thread_task); 2964 2946 err_out_free: 2965 2947 free_netdev(dev); 2966 2948 return NULL; ··· 3537 3529 return rc; 3538 3530 } 3539 3531 3540 - static int enable_MAC( struct airo_info *ai, Resp *rsp, int lock ) { 3532 + static int enable_MAC(struct airo_info *ai, int lock) 3533 + { 3541 3534 int rc; 3542 - Cmd cmd; 3535 + Cmd cmd; 3536 + Resp rsp; 3543 3537 3544 3538 /* FLAG_RADIO_OFF : Radio disabled via /proc or Wireless Extensions 3545 3539 * FLAG_RADIO_DOWN : Radio disabled via "ifconfig ethX down" ··· 3557 3547 if (!test_bit(FLAG_ENABLED, &ai->flags)) { 3558 3548 memset(&cmd, 0, sizeof(cmd)); 3559 3549 cmd.cmd = MAC_ENABLE; 3560 - rc = issuecommand(ai, &cmd, rsp); 3550 + rc = issuecommand(ai, &cmd, &rsp); 3561 3551 if (rc == SUCCESS) 3562 3552 set_bit(FLAG_ENABLED, &ai->flags); 3563 3553 } else ··· 3567 3557 up(&ai->sem); 3568 3558 3569 3559 if (rc) 3570 - airo_print_err(ai->dev->name, "%s: Cannot enable MAC, err=%d", 3571 - __FUNCTION__, rc); 3560 + airo_print_err(ai->dev->name, "Cannot enable MAC"); 3561 + else if ((rsp.status & 0xFF00) != 0) { 3562 + airo_print_err(ai->dev->name, "Bad MAC enable reason=%x, " 3563 + "rid=%x, offset=%d", rsp.rsp0, rsp.rsp1, rsp.rsp2); 3564 + rc = ERROR; 3565 + } 3572 3566 return rc; 3573 3567 } 3574 3568 ··· 3916 3902 if ( status != SUCCESS ) return ERROR; 3917 3903 } 3918 3904 3919 - status = enable_MAC(ai, &rsp, lock); 3920 - if ( status != SUCCESS || (rsp.status & 0xFF00) != 0) { 3921 - airo_print_err(ai->dev->name, "Bad MAC enable reason = %x, rid = %x," 3922 - " offset = %d", rsp.rsp0, rsp.rsp1, rsp.rsp2 ); 3905 + status = enable_MAC(ai, lock); 3906 + if (status != SUCCESS) 3923 3907 return ERROR; 3924 - } 3925 3908 3926 3909 /* Grab the initial wep key, we gotta save it for auto_wep */ 3927 3910 rc = readWepKeyRid(ai, &wkr, 1, lock); ··· 3930 3919 rc = readWepKeyRid(ai, &wkr, 0, lock); 3931 3920 } while(lastindex != wkr.kindex); 3932 3921 3933 - if (auto_wep) { 3934 - ai->expires = RUN_AT(3*HZ); 3935 - wake_up_interruptible(&ai->thr_wait); 3936 - } 3922 + try_auto_wep(ai); 3937 3923 3938 3924 return SUCCESS; 3939 3925 } ··· 4012 4004 } 4013 4005 if ( !(max_tries--) ) { 4014 4006 airo_print_err(ai->dev->name, 4015 - "airo: BAP setup error too many retries\n"); 4007 + "BAP setup error too many retries\n"); 4016 4008 return ERROR; 4017 4009 } 4018 4010 // -- PC4500 missed it, try again ··· 5160 5152 struct net_device *dev = dp->data; 5161 5153 struct airo_info *ai = dev->priv; 5162 5154 SsidRid SSID_rid; 5163 - Resp rsp; 5164 5155 int i; 5165 5156 int offset = 0; 5166 5157 ··· 5184 5177 SSID_rid.len = sizeof(SSID_rid); 5185 5178 disable_MAC(ai, 1); 5186 5179 writeSsidRid(ai, &SSID_rid, 1); 5187 - enable_MAC(ai, &rsp, 1); 5180 + enable_MAC(ai, 1); 5188 5181 } 5189 5182 5190 5183 static inline u8 hexVal(char c) { ··· 5200 5193 struct net_device *dev = dp->data; 5201 5194 struct airo_info *ai = dev->priv; 5202 5195 APListRid APList_rid; 5203 - Resp rsp; 5204 5196 int i; 5205 5197 5206 5198 if ( !data->writelen ) return; ··· 5224 5218 } 5225 5219 disable_MAC(ai, 1); 5226 5220 writeAPListRid(ai, &APList_rid, 1); 5227 - enable_MAC(ai, &rsp, 1); 5221 + enable_MAC(ai, 1); 5228 5222 } 5229 5223 5230 5224 /* This function wraps PC4500_writerid with a MAC disable */ 5231 5225 static int do_writerid( struct airo_info *ai, u16 rid, const void *rid_data, 5232 5226 int len, int dummy ) { 5233 5227 int rc; 5234 - Resp rsp; 5235 5228 5236 5229 disable_MAC(ai, 1); 5237 5230 rc = PC4500_writerid(ai, rid, rid_data, len, 1); 5238 - enable_MAC(ai, &rsp, 1); 5231 + enable_MAC(ai, 1); 5239 5232 return rc; 5240 5233 } 5241 5234 ··· 5265 5260 const char *key, u16 keylen, int perm, int lock ) { 5266 5261 static const unsigned char macaddr[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; 5267 5262 WepKeyRid wkr; 5268 - Resp rsp; 5269 5263 5270 5264 memset(&wkr, 0, sizeof(wkr)); 5271 5265 if (keylen == 0) { ··· 5284 5280 5285 5281 if (perm) disable_MAC(ai, lock); 5286 5282 writeWepKeyRid(ai, &wkr, perm, lock); 5287 - if (perm) enable_MAC(ai, &rsp, lock); 5283 + if (perm) enable_MAC(ai, lock); 5288 5284 return 0; 5289 5285 } 5290 5286 ··· 5552 5548 changed. */ 5553 5549 static void timer_func( struct net_device *dev ) { 5554 5550 struct airo_info *apriv = dev->priv; 5555 - Resp rsp; 5556 5551 5557 5552 /* We don't have a link so try changing the authtype */ 5558 5553 readConfigRid(apriv, 0); ··· 5578 5575 } 5579 5576 set_bit (FLAG_COMMIT, &apriv->flags); 5580 5577 writeConfigRid(apriv, 0); 5581 - enable_MAC(apriv, &rsp, 0); 5578 + enable_MAC(apriv, 0); 5582 5579 up(&apriv->sem); 5583 5580 5584 5581 /* Schedule check to see if the change worked */ ··· 5600 5597 dev = _init_airo_card(pdev->irq, pdev->resource[0].start, 0, pdev, &pdev->dev); 5601 5598 else 5602 5599 dev = _init_airo_card(pdev->irq, pdev->resource[2].start, 0, pdev, &pdev->dev); 5603 - if (!dev) 5600 + if (!dev) { 5601 + pci_disable_device(pdev); 5604 5602 return -ENODEV; 5603 + } 5605 5604 5606 5605 pci_set_drvdata(pdev, dev); 5607 5606 return 0; ··· 5615 5610 5616 5611 airo_print_info(dev->name, "Unregistering..."); 5617 5612 stop_airo_card(dev, 1); 5613 + pci_disable_device(pdev); 5614 + pci_set_drvdata(pdev, NULL); 5618 5615 } 5619 5616 5620 5617 static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state) ··· 5653 5646 { 5654 5647 struct net_device *dev = pci_get_drvdata(pdev); 5655 5648 struct airo_info *ai = dev->priv; 5656 - Resp rsp; 5657 5649 pci_power_t prev_state = pdev->current_state; 5658 5650 5659 5651 pci_set_power_state(pdev, PCI_D0); ··· 5685 5679 ai->APList = NULL; 5686 5680 } 5687 5681 writeConfigRid(ai, 0); 5688 - enable_MAC(ai, &rsp, 0); 5682 + enable_MAC(ai, 0); 5689 5683 ai->power = PMSG_ON; 5690 5684 netif_device_attach(dev); 5691 5685 netif_wake_queue(dev); ··· 5909 5903 char *extra) 5910 5904 { 5911 5905 struct airo_info *local = dev->priv; 5912 - Resp rsp; 5913 5906 SsidRid SSID_rid; /* SSIDs */ 5914 5907 5915 5908 /* Reload the list of current SSID */ ··· 5940 5935 /* Write it to the card */ 5941 5936 disable_MAC(local, 1); 5942 5937 writeSsidRid(local, &SSID_rid, 1); 5943 - enable_MAC(local, &rsp, 1); 5938 + enable_MAC(local, 1); 5944 5939 5945 5940 return 0; 5946 5941 } ··· 6005 6000 memcpy(APList_rid.ap[0], awrq->sa_data, ETH_ALEN); 6006 6001 disable_MAC(local, 1); 6007 6002 writeAPListRid(local, &APList_rid, 1); 6008 - enable_MAC(local, &rsp, 1); 6003 + enable_MAC(local, 1); 6009 6004 } 6010 6005 return 0; 6011 6006 } ··· 7459 7454 char *extra) /* NULL */ 7460 7455 { 7461 7456 struct airo_info *local = dev->priv; 7462 - Resp rsp; 7463 7457 7464 7458 if (!test_bit (FLAG_COMMIT, &local->flags)) 7465 7459 return 0; ··· 7483 7479 if (down_interruptible(&local->sem)) 7484 7480 return -ERESTARTSYS; 7485 7481 writeConfigRid(local, 0); 7486 - enable_MAC(local, &rsp, 0); 7482 + enable_MAC(local, 0); 7487 7483 if (test_bit (FLAG_RESET, &local->flags)) 7488 7484 airo_set_promisc(local); 7489 7485 else ··· 7750 7746 unsigned char *iobuf; 7751 7747 int len; 7752 7748 struct airo_info *ai = dev->priv; 7753 - Resp rsp; 7754 7749 7755 7750 if (test_bit(FLAG_FLASHING, &ai->flags)) 7756 7751 return -EIO; ··· 7761 7758 if (test_bit(FLAG_COMMIT, &ai->flags)) { 7762 7759 disable_MAC (ai, 1); 7763 7760 writeConfigRid (ai, 1); 7764 - enable_MAC (ai, &rsp, 1); 7761 + enable_MAC(ai, 1); 7765 7762 } 7766 7763 break; 7767 7764 case AIROGSLIST: ridcode = RID_SSID; break; ··· 7818 7815 struct airo_info *ai = dev->priv; 7819 7816 int ridcode; 7820 7817 int enabled; 7821 - Resp rsp; 7822 7818 static int (* writer)(struct airo_info *, u16 rid, const void *, int, int); 7823 7819 unsigned char *iobuf; 7824 7820 ··· 7851 7849 * same with MAC off 7852 7850 */ 7853 7851 case AIROPMACON: 7854 - if (enable_MAC(ai, &rsp, 1) != 0) 7852 + if (enable_MAC(ai, 1) != 0) 7855 7853 return -EIO; 7856 7854 return 0; 7857 7855
+7 -4
drivers/net/wireless/ipw2100.c
··· 1768 1768 1769 1769 if (priv->stop_rf_kill) { 1770 1770 priv->stop_rf_kill = 0; 1771 - queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ); 1771 + queue_delayed_work(priv->workqueue, &priv->rf_kill, 1772 + round_jiffies(HZ)); 1772 1773 } 1773 1774 1774 1775 deferred = 1; ··· 2099 2098 /* Make sure the RF Kill check timer is running */ 2100 2099 priv->stop_rf_kill = 0; 2101 2100 cancel_delayed_work(&priv->rf_kill); 2102 - queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ); 2101 + queue_delayed_work(priv->workqueue, &priv->rf_kill, round_jiffies(HZ)); 2103 2102 } 2104 2103 2105 2104 static void isr_scan_complete(struct ipw2100_priv *priv, u32 status) ··· 4234 4233 /* Make sure the RF_KILL check timer is running */ 4235 4234 priv->stop_rf_kill = 0; 4236 4235 cancel_delayed_work(&priv->rf_kill); 4237 - queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ); 4236 + queue_delayed_work(priv->workqueue, &priv->rf_kill, 4237 + round_jiffies(HZ)); 4238 4238 } else 4239 4239 schedule_reset(priv); 4240 4240 } ··· 5971 5969 if (rf_kill_active(priv)) { 5972 5970 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); 5973 5971 if (!priv->stop_rf_kill) 5974 - queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ); 5972 + queue_delayed_work(priv->workqueue, &priv->rf_kill, 5973 + round_jiffies(HZ)); 5975 5974 goto exit_unlock; 5976 5975 } 5977 5976
+3 -2
drivers/net/wireless/ipw2200.c
··· 1751 1751 /* Make sure the RF_KILL check timer is running */ 1752 1752 cancel_delayed_work(&priv->rf_kill); 1753 1753 queue_delayed_work(priv->workqueue, &priv->rf_kill, 1754 - 2 * HZ); 1754 + round_jiffies(2 * HZ)); 1755 1755 } else 1756 1756 queue_work(priv->workqueue, &priv->up); 1757 1757 } ··· 4690 4690 else if (priv->config & CFG_BACKGROUND_SCAN 4691 4691 && priv->status & STATUS_ASSOCIATED) 4692 4692 queue_delayed_work(priv->workqueue, 4693 - &priv->request_scan, HZ); 4693 + &priv->request_scan, 4694 + round_jiffies(HZ)); 4694 4695 4695 4696 /* Send an empty event to user space. 4696 4697 * We don't send the received data on the event because
+1 -1
drivers/net/wireless/libertas/cmd.c
··· 240 240 if (*enable) 241 241 penableRSN->enable = cpu_to_le16(cmd_enable_rsn); 242 242 else 243 - penableRSN->enable = cpu_to_le16(cmd_enable_rsn); 243 + penableRSN->enable = cpu_to_le16(cmd_disable_rsn); 244 244 } 245 245 246 246 lbs_deb_leave(LBS_DEB_CMD);
-1
drivers/net/wireless/libertas/rx.c
··· 439 439 ret = 0; 440 440 441 441 done: 442 - skb->protocol = __constant_htons(0x0019); /* ETH_P_80211_RAW */ 443 442 lbs_deb_leave_args(LBS_DEB_RX, "ret %d", ret); 444 443 return ret; 445 444 }
-1
drivers/net/wireless/libertas/version.h
··· 1 -
-3
drivers/net/wireless/libertas/wext.c
··· 1719 1719 pkey->type = KEY_TYPE_ID_TKIP; 1720 1720 } else if (alg == IW_ENCODE_ALG_CCMP) { 1721 1721 pkey->type = KEY_TYPE_ID_AES; 1722 - } else { 1723 - ret = -EINVAL; 1724 - goto out; 1725 1722 } 1726 1723 1727 1724 /* If WPA isn't enabled yet, do that now */
+6 -16
drivers/net/wireless/prism54/isl_ioctl.c
··· 1853 1853 islpci_private *priv = netdev_priv(ndev); 1854 1854 struct islpci_acl *acl = &priv->acl; 1855 1855 struct mac_entry *entry; 1856 - struct list_head *ptr; 1857 1856 struct sockaddr *addr = (struct sockaddr *) extra; 1858 1857 1859 1858 if (addr->sa_family != ARPHRD_ETHER) ··· 1860 1861 1861 1862 if (down_interruptible(&acl->sem)) 1862 1863 return -ERESTARTSYS; 1863 - for (ptr = acl->mac_list.next; ptr != &acl->mac_list; ptr = ptr->next) { 1864 - entry = list_entry(ptr, struct mac_entry, _list); 1865 - 1864 + list_for_each_entry(entry, &acl->mac_list, _list) { 1866 1865 if (memcmp(entry->addr, addr->sa_data, ETH_ALEN) == 0) { 1867 - list_del(ptr); 1866 + list_del(&entry->_list); 1868 1867 acl->size--; 1869 1868 kfree(entry); 1870 1869 up(&acl->sem); ··· 1880 1883 islpci_private *priv = netdev_priv(ndev); 1881 1884 struct islpci_acl *acl = &priv->acl; 1882 1885 struct mac_entry *entry; 1883 - struct list_head *ptr; 1884 1886 struct sockaddr *dst = (struct sockaddr *) extra; 1885 1887 1886 1888 dwrq->length = 0; ··· 1887 1891 if (down_interruptible(&acl->sem)) 1888 1892 return -ERESTARTSYS; 1889 1893 1890 - for (ptr = acl->mac_list.next; ptr != &acl->mac_list; ptr = ptr->next) { 1891 - entry = list_entry(ptr, struct mac_entry, _list); 1892 - 1894 + list_for_each_entry(entry, &acl->mac_list, _list) { 1893 1895 memcpy(dst->sa_data, entry->addr, ETH_ALEN); 1894 1896 dst->sa_family = ARPHRD_ETHER; 1895 1897 dwrq->length++; ··· 1954 1960 static int 1955 1961 prism54_mac_accept(struct islpci_acl *acl, char *mac) 1956 1962 { 1957 - struct list_head *ptr; 1958 1963 struct mac_entry *entry; 1959 1964 int res = 0; 1960 1965 ··· 1965 1972 return 1; 1966 1973 } 1967 1974 1968 - for (ptr = acl->mac_list.next; ptr != &acl->mac_list; ptr = ptr->next) { 1969 - entry = list_entry(ptr, struct mac_entry, _list); 1975 + list_for_each_entry(entry, &acl->mac_list, _list) { 1970 1976 if (memcmp(entry->addr, mac, ETH_ALEN) == 0) { 1971 1977 res = 1; 1972 1978 break; ··· 2208 2216 void 2209 2217 prism54_wpa_bss_ie_clean(islpci_private *priv) 2210 2218 { 2211 - struct list_head *ptr, *n; 2219 + struct islpci_bss_wpa_ie *bss, *n; 2212 2220 2213 - list_for_each_safe(ptr, n, &priv->bss_wpa_list) { 2214 - struct islpci_bss_wpa_ie *bss; 2215 - bss = list_entry(ptr, struct islpci_bss_wpa_ie, list); 2221 + list_for_each_entry_safe(bss, n, &priv->bss_wpa_list, list) { 2216 2222 kfree(bss); 2217 2223 } 2218 2224 }
+2 -2
drivers/net/wireless/rtl8187_rtl8225.c
··· 67 67 msleep(2); 68 68 } 69 69 70 - static void rtl8225_write_8051(struct ieee80211_hw *dev, u8 addr, u16 data) 70 + static void rtl8225_write_8051(struct ieee80211_hw *dev, u8 addr, __le16 data) 71 71 { 72 72 struct rtl8187_priv *priv = dev->priv; 73 73 u16 reg80, reg82, reg84; ··· 106 106 struct rtl8187_priv *priv = dev->priv; 107 107 108 108 if (priv->asic_rev) 109 - rtl8225_write_8051(dev, addr, data); 109 + rtl8225_write_8051(dev, addr, cpu_to_le16(data)); 110 110 else 111 111 rtl8225_write_bitbang(dev, addr, data); 112 112 }
+15 -73
drivers/net/wireless/zd1211rw/zd_chip.c
··· 49 49 ZD_MEMCLEAR(chip, sizeof(*chip)); 50 50 } 51 51 52 - static int scnprint_mac_oui(const u8 *addr, char *buffer, size_t size) 52 + static int scnprint_mac_oui(struct zd_chip *chip, char *buffer, size_t size) 53 53 { 54 + u8 *addr = zd_usb_to_netdev(&chip->usb)->dev_addr; 54 55 return scnprintf(buffer, size, "%02x-%02x-%02x", 55 56 addr[0], addr[1], addr[2]); 56 57 } ··· 62 61 int i = 0; 63 62 64 63 i = scnprintf(buffer, size, "zd1211%s chip ", 65 - chip->is_zd1211b ? "b" : ""); 64 + zd_chip_is_zd1211b(chip) ? "b" : ""); 66 65 i += zd_usb_scnprint_id(&chip->usb, buffer+i, size-i); 67 66 i += scnprintf(buffer+i, size-i, " "); 68 - i += scnprint_mac_oui(chip->e2p_mac, buffer+i, size-i); 67 + i += scnprint_mac_oui(chip, buffer+i, size-i); 69 68 i += scnprintf(buffer+i, size-i, " "); 70 69 i += zd_rf_scnprint_id(&chip->rf, buffer+i, size-i); 71 70 i += scnprintf(buffer+i, size-i, " pa%1x %c%c%c%c%c", chip->pa_type, ··· 367 366 return r; 368 367 } 369 368 370 - static int _read_mac_addr(struct zd_chip *chip, u8 *mac_addr, 371 - const zd_addr_t *addr) 372 - { 373 - int r; 374 - u32 parts[2]; 375 - 376 - r = zd_ioread32v_locked(chip, parts, (const zd_addr_t *)addr, 2); 377 - if (r) { 378 - dev_dbg_f(zd_chip_dev(chip), 379 - "error: couldn't read e2p macs. Error number %d\n", r); 380 - return r; 381 - } 382 - 383 - mac_addr[0] = parts[0]; 384 - mac_addr[1] = parts[0] >> 8; 385 - mac_addr[2] = parts[0] >> 16; 386 - mac_addr[3] = parts[0] >> 24; 387 - mac_addr[4] = parts[1]; 388 - mac_addr[5] = parts[1] >> 8; 389 - 390 - return 0; 391 - } 392 - 393 - static int read_e2p_mac_addr(struct zd_chip *chip) 394 - { 395 - static const zd_addr_t addr[2] = { E2P_MAC_ADDR_P1, E2P_MAC_ADDR_P2 }; 396 - 397 - ZD_ASSERT(mutex_is_locked(&chip->mutex)); 398 - return _read_mac_addr(chip, chip->e2p_mac, (const zd_addr_t *)addr); 399 - } 400 - 401 369 /* MAC address: if custom mac addresses are to to be used CR_MAC_ADDR_P1 and 402 370 * CR_MAC_ADDR_P2 must be overwritten 403 371 */ 404 - void zd_get_e2p_mac_addr(struct zd_chip *chip, u8 *mac_addr) 405 - { 406 - mutex_lock(&chip->mutex); 407 - memcpy(mac_addr, chip->e2p_mac, ETH_ALEN); 408 - mutex_unlock(&chip->mutex); 409 - } 410 - 411 - static int read_mac_addr(struct zd_chip *chip, u8 *mac_addr) 412 - { 413 - static const zd_addr_t addr[2] = { CR_MAC_ADDR_P1, CR_MAC_ADDR_P2 }; 414 - return _read_mac_addr(chip, mac_addr, (const zd_addr_t *)addr); 415 - } 416 - 417 - int zd_read_mac_addr(struct zd_chip *chip, u8 *mac_addr) 418 - { 419 - int r; 420 - 421 - dev_dbg_f(zd_chip_dev(chip), "\n"); 422 - mutex_lock(&chip->mutex); 423 - r = read_mac_addr(chip, mac_addr); 424 - mutex_unlock(&chip->mutex); 425 - return r; 426 - } 427 - 428 372 int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr) 429 373 { 430 374 int r; ··· 390 444 391 445 mutex_lock(&chip->mutex); 392 446 r = zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs)); 393 - #ifdef DEBUG 394 - { 395 - u8 tmp[ETH_ALEN]; 396 - read_mac_addr(chip, tmp); 397 - } 398 - #endif /* DEBUG */ 399 447 mutex_unlock(&chip->mutex); 400 448 return r; 401 449 } ··· 749 809 750 810 static int hw_reset_phy(struct zd_chip *chip) 751 811 { 752 - return chip->is_zd1211b ? zd1211b_hw_reset_phy(chip) : 812 + return zd_chip_is_zd1211b(chip) ? zd1211b_hw_reset_phy(chip) : 753 813 zd1211_hw_reset_phy(chip); 754 814 } 755 815 ··· 814 874 if (r) 815 875 return r; 816 876 817 - return chip->is_zd1211b ? 877 + return zd_chip_is_zd1211b(chip) ? 818 878 zd1211b_hw_init_hmac(chip) : zd1211_hw_init_hmac(chip); 819 879 } 820 880 ··· 1076 1136 return 0; 1077 1137 } 1078 1138 1139 + /* Read mac address using pre-firmware interface */ 1140 + int zd_chip_read_mac_addr_fw(struct zd_chip *chip, u8 *addr) 1141 + { 1142 + dev_dbg_f(zd_chip_dev(chip), "\n"); 1143 + return zd_usb_read_fw(&chip->usb, E2P_MAC_ADDR_P1, addr, 1144 + ETH_ALEN); 1145 + } 1079 1146 1080 - int zd_chip_init_hw(struct zd_chip *chip, u8 device_type) 1147 + int zd_chip_init_hw(struct zd_chip *chip) 1081 1148 { 1082 1149 int r; 1083 1150 u8 rf_type; ··· 1092 1145 dev_dbg_f(zd_chip_dev(chip), "\n"); 1093 1146 1094 1147 mutex_lock(&chip->mutex); 1095 - chip->is_zd1211b = (device_type == DEVICE_ZD1211B) != 0; 1096 1148 1097 1149 #ifdef DEBUG 1098 1150 r = test_init(chip); ··· 1147 1201 goto out; 1148 1202 #endif /* DEBUG */ 1149 1203 1150 - r = read_e2p_mac_addr(chip); 1151 - if (r) 1152 - goto out; 1153 - 1154 1204 r = read_cal_int_tables(chip); 1155 1205 if (r) 1156 1206 goto out; ··· 1201 1259 r = update_pwr_int(chip, channel); 1202 1260 if (r) 1203 1261 return r; 1204 - if (chip->is_zd1211b) { 1262 + if (zd_chip_is_zd1211b(chip)) { 1205 1263 static const struct zd_ioreq16 ioreqs[] = { 1206 1264 { CR69, 0x28 }, 1207 1265 {},
+8 -5
drivers/net/wireless/zd1211rw/zd_chip.h
··· 704 704 struct mutex mutex; 705 705 /* Base address of FW_REG_ registers */ 706 706 zd_addr_t fw_regs_base; 707 - u8 e2p_mac[ETH_ALEN]; 708 707 /* EepSetPoint in the vendor driver */ 709 708 u8 pwr_cal_values[E2P_CHANNEL_COUNT]; 710 709 /* integration values in the vendor driver */ ··· 714 715 unsigned int pa_type:4, 715 716 patch_cck_gain:1, patch_cr157:1, patch_6m_band_edge:1, 716 717 new_phy_layout:1, al2230s_bit:1, 717 - is_zd1211b:1, supports_tx_led:1; 718 + supports_tx_led:1; 718 719 }; 719 720 720 721 static inline struct zd_chip *zd_usb_to_chip(struct zd_usb *usb) ··· 733 734 struct net_device *netdev, 734 735 struct usb_interface *intf); 735 736 void zd_chip_clear(struct zd_chip *chip); 736 - int zd_chip_init_hw(struct zd_chip *chip, u8 device_type); 737 + int zd_chip_read_mac_addr_fw(struct zd_chip *chip, u8 *addr); 738 + int zd_chip_init_hw(struct zd_chip *chip); 737 739 int zd_chip_reset(struct zd_chip *chip); 740 + 741 + static inline int zd_chip_is_zd1211b(struct zd_chip *chip) 742 + { 743 + return chip->usb.is_zd1211b; 744 + } 738 745 739 746 static inline int zd_ioread16v_locked(struct zd_chip *chip, u16 *values, 740 747 const zd_addr_t *addresses, ··· 830 825 } 831 826 u8 zd_chip_get_channel(struct zd_chip *chip); 832 827 int zd_read_regdomain(struct zd_chip *chip, u8 *regdomain); 833 - void zd_get_e2p_mac_addr(struct zd_chip *chip, u8 *mac_addr); 834 - int zd_read_mac_addr(struct zd_chip *chip, u8 *mac_addr); 835 828 int zd_write_mac_addr(struct zd_chip *chip, const u8 *mac_addr); 836 829 int zd_chip_switch_radio_on(struct zd_chip *chip); 837 830 int zd_chip_switch_radio_off(struct zd_chip *chip);
+40 -19
drivers/net/wireless/zd1211rw/zd_mac.c
··· 86 86 return r; 87 87 } 88 88 89 - int zd_mac_init_hw(struct zd_mac *mac, u8 device_type) 89 + int zd_mac_preinit_hw(struct zd_mac *mac) 90 + { 91 + int r; 92 + u8 addr[ETH_ALEN]; 93 + 94 + r = zd_chip_read_mac_addr_fw(&mac->chip, addr); 95 + if (r) 96 + return r; 97 + 98 + memcpy(mac->netdev->dev_addr, addr, ETH_ALEN); 99 + return 0; 100 + } 101 + 102 + int zd_mac_init_hw(struct zd_mac *mac) 90 103 { 91 104 int r; 92 105 struct zd_chip *chip = &mac->chip; 93 - u8 addr[ETH_ALEN]; 94 106 u8 default_regdomain; 95 107 96 108 r = zd_chip_enable_int(chip); 97 109 if (r) 98 110 goto out; 99 - r = zd_chip_init_hw(chip, device_type); 111 + r = zd_chip_init_hw(chip); 100 112 if (r) 101 113 goto disable_int; 102 114 103 - zd_get_e2p_mac_addr(chip, addr); 104 - r = zd_write_mac_addr(chip, addr); 105 - if (r) 106 - goto disable_int; 107 115 ZD_ASSERT(!irqs_disabled()); 108 - spin_lock_irq(&mac->lock); 109 - memcpy(mac->netdev->dev_addr, addr, ETH_ALEN); 110 - spin_unlock_irq(&mac->lock); 111 116 112 117 r = zd_read_regdomain(chip, &default_regdomain); 113 118 if (r) 114 119 goto disable_int; 115 120 if (!zd_regdomain_supported(default_regdomain)) { 116 - dev_dbg_f(zd_mac_dev(mac), 117 - "Regulatory Domain %#04x is not supported.\n", 118 - default_regdomain); 119 - r = -EINVAL; 120 - goto disable_int; 121 + /* The vendor driver overrides the regulatory domain and 122 + * allowed channel registers and unconditionally restricts 123 + * available channels to 1-11 everywhere. Match their 124 + * questionable behaviour only for regdomains which we don't 125 + * recognise. */ 126 + dev_warn(zd_mac_dev(mac), "Unrecognised regulatory domain: " 127 + "%#04x. Defaulting to FCC.\n", default_regdomain); 128 + default_regdomain = ZD_REGDOMAIN_FCC; 121 129 } 122 130 spin_lock_irq(&mac->lock); 123 131 mac->regdomain = mac->default_regdomain = default_regdomain; ··· 172 164 { 173 165 struct zd_mac *mac = zd_netdev_mac(netdev); 174 166 struct zd_chip *chip = &mac->chip; 167 + struct zd_usb *usb = &chip->usb; 175 168 int r; 169 + 170 + if (!usb->initialized) { 171 + r = zd_usb_init_hw(usb); 172 + if (r) 173 + goto out; 174 + } 176 175 177 176 tasklet_enable(&mac->rx_tasklet); 178 177 179 178 r = zd_chip_enable_int(chip); 180 179 if (r < 0) 181 180 goto out; 181 + 182 + r = zd_write_mac_addr(chip, netdev->dev_addr); 183 + if (r) 184 + goto disable_int; 182 185 183 186 r = zd_chip_set_basic_rates(chip, CR_RATES_80211B | CR_RATES_80211G); 184 187 if (r < 0) ··· 270 251 dev_dbg_f(zd_mac_dev(mac), 271 252 "Setting MAC to " MAC_FMT "\n", MAC_ARG(addr->sa_data)); 272 253 273 - r = zd_write_mac_addr(chip, addr->sa_data); 274 - if (r) 275 - return r; 254 + if (netdev->flags & IFF_UP) { 255 + r = zd_write_mac_addr(chip, addr->sa_data); 256 + if (r) 257 + return r; 258 + } 276 259 277 260 spin_lock_irqsave(&mac->lock, flags); 278 261 memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN); ··· 876 855 /* ZD1211B: Computing the length difference this way, gives us 877 856 * flexibility to compute the packet length. 878 857 */ 879 - cs->packet_length = cpu_to_le16(mac->chip.is_zd1211b ? 858 + cs->packet_length = cpu_to_le16(zd_chip_is_zd1211b(&mac->chip) ? 880 859 packet_length - frag_len : packet_length); 881 860 882 861 /*
+2 -1
drivers/net/wireless/zd1211rw/zd_mac.h
··· 189 189 struct usb_interface *intf); 190 190 void zd_mac_clear(struct zd_mac *mac); 191 191 192 - int zd_mac_init_hw(struct zd_mac *mac, u8 device_type); 192 + int zd_mac_preinit_hw(struct zd_mac *mac); 193 + int zd_mac_init_hw(struct zd_mac *mac); 193 194 194 195 int zd_mac_open(struct net_device *netdev); 195 196 int zd_mac_stop(struct net_device *netdev);
+2 -1
drivers/net/wireless/zd1211rw/zd_rf.c
··· 34 34 [AL2210_RF] = "AL2210_RF", 35 35 [MAXIM_NEW_RF] = "MAXIM_NEW_RF", 36 36 [UW2453_RF] = "UW2453_RF", 37 - [UNKNOWN_A_RF] = "UNKNOWN_A_RF", 37 + [AL2230S_RF] = "AL2230S_RF", 38 38 [RALINK_RF] = "RALINK_RF", 39 39 [INTERSIL_RF] = "INTERSIL_RF", 40 40 [RF2959_RF] = "RF2959_RF", ··· 77 77 r = zd_rf_init_rf2959(rf); 78 78 break; 79 79 case AL2230_RF: 80 + case AL2230S_RF: 80 81 r = zd_rf_init_al2230(rf); 81 82 break; 82 83 case AL7230B_RF:
+1 -1
drivers/net/wireless/zd1211rw/zd_rf.h
··· 26 26 #define AL2210_RF 0x7 27 27 #define MAXIM_NEW_RF 0x8 28 28 #define UW2453_RF 0x9 29 - #define UNKNOWN_A_RF 0xa 29 + #define AL2230S_RF 0xa 30 30 #define RALINK_RF 0xb 31 31 #define INTERSIL_RF 0xc 32 32 #define RF2959_RF 0xd
+7 -5
drivers/net/wireless/zd1211rw/zd_rf_al2230.c
··· 21 21 #include "zd_usb.h" 22 22 #include "zd_chip.h" 23 23 24 + #define IS_AL2230S(chip) ((chip)->al2230s_bit || (chip)->rf.type == AL2230S_RF) 25 + 24 26 static const u32 zd1211_al2230_table[][3] = { 25 27 RF_CHANNEL( 1) = { 0x03f790, 0x033331, 0x00000d, }, 26 28 RF_CHANNEL( 2) = { 0x03f790, 0x0b3331, 0x00000d, }, ··· 178 176 if (r) 179 177 return r; 180 178 181 - if (chip->al2230s_bit) { 179 + if (IS_AL2230S(chip)) { 182 180 r = zd_iowrite16a_locked(chip, ioreqs_init_al2230s, 183 181 ARRAY_SIZE(ioreqs_init_al2230s)); 184 182 if (r) ··· 190 188 return r; 191 189 192 190 /* improve band edge for AL2230S */ 193 - if (chip->al2230s_bit) 191 + if (IS_AL2230S(chip)) 194 192 r = zd_rfwrite_locked(chip, 0x000824, RF_RV_BITS); 195 193 else 196 194 r = zd_rfwrite_locked(chip, 0x0005a4, RF_RV_BITS); ··· 316 314 if (r) 317 315 return r; 318 316 319 - if (chip->al2230s_bit) { 317 + if (IS_AL2230S(chip)) { 320 318 r = zd_iowrite16a_locked(chip, ioreqs_init_al2230s, 321 319 ARRAY_SIZE(ioreqs_init_al2230s)); 322 320 if (r) ··· 330 328 if (r) 331 329 return r; 332 330 333 - if (chip->al2230s_bit) 331 + if (IS_AL2230S(chip)) 334 332 r = zd_rfwrite_locked(chip, 0x241000, RF_RV_BITS); 335 333 else 336 334 r = zd_rfwrite_locked(chip, 0x25a000, RF_RV_BITS); ··· 424 422 struct zd_chip *chip = zd_rf_to_chip(rf); 425 423 426 424 rf->switch_radio_off = al2230_switch_radio_off; 427 - if (chip->is_zd1211b) { 425 + if (zd_chip_is_zd1211b(chip)) { 428 426 rf->init_hw = zd1211b_al2230_init_hw; 429 427 rf->set_channel = zd1211b_al2230_set_channel; 430 428 rf->switch_radio_on = zd1211b_al2230_switch_radio_on;
+1 -1
drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
··· 473 473 { 474 474 struct zd_chip *chip = zd_rf_to_chip(rf); 475 475 476 - if (chip->is_zd1211b) { 476 + if (zd_chip_is_zd1211b(chip)) { 477 477 rf->init_hw = zd1211b_al7230b_init_hw; 478 478 rf->switch_radio_on = zd1211b_al7230b_switch_radio_on; 479 479 rf->set_channel = zd1211b_al7230b_set_channel;
+1 -1
drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
··· 265 265 { 266 266 struct zd_chip *chip = zd_rf_to_chip(rf); 267 267 268 - if (chip->is_zd1211b) { 268 + if (zd_chip_is_zd1211b(chip)) { 269 269 dev_err(zd_chip_dev(chip), 270 270 "RF2959 is currently not supported for ZD1211B" 271 271 " devices\n");
+1 -1
drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
··· 486 486 if (r) 487 487 return r; 488 488 489 - if (chip->is_zd1211b) 489 + if (zd_chip_is_zd1211b(chip)) 490 490 ioreqs[1].value = 0x7f; 491 491 492 492 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
+71 -27
drivers/net/wireless/zd1211rw/zd_usb.c
··· 71 71 { USB_DEVICE(0x0586, 0x3412), .driver_info = DEVICE_ZD1211B }, 72 72 { USB_DEVICE(0x0586, 0x3413), .driver_info = DEVICE_ZD1211B }, 73 73 { USB_DEVICE(0x0053, 0x5301), .driver_info = DEVICE_ZD1211B }, 74 + { USB_DEVICE(0x0411, 0x00da), .driver_info = DEVICE_ZD1211B }, 74 75 /* "Driverless" devices that need ejecting */ 75 76 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, 76 77 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER }, ··· 196 195 return le16_to_cpu(p[offset]); 197 196 } 198 197 199 - static char *get_fw_name(char *buffer, size_t size, u8 device_type, 198 + static char *get_fw_name(struct zd_usb *usb, char *buffer, size_t size, 200 199 const char* postfix) 201 200 { 202 201 scnprintf(buffer, size, "%s%s", 203 - device_type == DEVICE_ZD1211B ? 202 + usb->is_zd1211b ? 204 203 FW_ZD1211B_PREFIX : FW_ZD1211_PREFIX, 205 204 postfix); 206 205 return buffer; 207 206 } 208 207 209 - static int handle_version_mismatch(struct usb_device *udev, u8 device_type, 208 + static int handle_version_mismatch(struct zd_usb *usb, 210 209 const struct firmware *ub_fw) 211 210 { 211 + struct usb_device *udev = zd_usb_to_usbdev(usb); 212 212 const struct firmware *ur_fw = NULL; 213 213 int offset; 214 214 int r = 0; 215 215 char fw_name[128]; 216 216 217 217 r = request_fw_file(&ur_fw, 218 - get_fw_name(fw_name, sizeof(fw_name), device_type, "ur"), 218 + get_fw_name(usb, fw_name, sizeof(fw_name), "ur"), 219 219 &udev->dev); 220 220 if (r) 221 221 goto error; ··· 239 237 return r; 240 238 } 241 239 242 - static int upload_firmware(struct usb_device *udev, u8 device_type) 240 + static int upload_firmware(struct zd_usb *usb) 243 241 { 244 242 int r; 245 243 u16 fw_bcdDevice; 246 244 u16 bcdDevice; 245 + struct usb_device *udev = zd_usb_to_usbdev(usb); 247 246 const struct firmware *ub_fw = NULL; 248 247 const struct firmware *uph_fw = NULL; 249 248 char fw_name[128]; ··· 252 249 bcdDevice = get_bcdDevice(udev); 253 250 254 251 r = request_fw_file(&ub_fw, 255 - get_fw_name(fw_name, sizeof(fw_name), device_type, "ub"), 252 + get_fw_name(usb, fw_name, sizeof(fw_name), "ub"), 256 253 &udev->dev); 257 254 if (r) 258 255 goto error; ··· 267 264 dev_warn(&udev->dev, "device has old bootcode, please " 268 265 "report success or failure\n"); 269 266 270 - r = handle_version_mismatch(udev, device_type, ub_fw); 267 + r = handle_version_mismatch(usb, ub_fw); 271 268 if (r) 272 269 goto error; 273 270 } else { ··· 278 275 279 276 280 277 r = request_fw_file(&uph_fw, 281 - get_fw_name(fw_name, sizeof(fw_name), device_type, "uphr"), 278 + get_fw_name(usb, fw_name, sizeof(fw_name), "uphr"), 282 279 &udev->dev); 283 280 if (r) 284 281 goto error; ··· 295 292 release_firmware(ub_fw); 296 293 release_firmware(uph_fw); 297 294 return r; 295 + } 296 + 297 + /* Read data from device address space using "firmware interface" which does 298 + * not require firmware to be loaded. */ 299 + int zd_usb_read_fw(struct zd_usb *usb, zd_addr_t addr, u8 *data, u16 len) 300 + { 301 + int r; 302 + struct usb_device *udev = zd_usb_to_usbdev(usb); 303 + 304 + r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 305 + USB_REQ_FIRMWARE_READ_DATA, USB_DIR_IN | 0x40, addr, 0, 306 + data, len, 5000); 307 + if (r < 0) { 308 + dev_err(&udev->dev, 309 + "read over firmware interface failed: %d\n", r); 310 + return r; 311 + } else if (r != len) { 312 + dev_err(&udev->dev, 313 + "incomplete read over firmware interface: %d/%d\n", 314 + r, len); 315 + return -EIO; 316 + } 317 + 318 + return 0; 298 319 } 299 320 300 321 #define urb_dev(urb) (&(urb)->dev->dev) ··· 947 920 return 0; 948 921 } 949 922 923 + int zd_usb_init_hw(struct zd_usb *usb) 924 + { 925 + int r; 926 + struct zd_mac *mac = zd_usb_to_mac(usb); 927 + 928 + dev_dbg_f(zd_usb_dev(usb), "\n"); 929 + 930 + r = upload_firmware(usb); 931 + if (r) { 932 + dev_err(zd_usb_dev(usb), 933 + "couldn't load firmware. Error number %d\n", r); 934 + return r; 935 + } 936 + 937 + r = usb_reset_configuration(zd_usb_to_usbdev(usb)); 938 + if (r) { 939 + dev_dbg_f(zd_usb_dev(usb), 940 + "couldn't reset configuration. Error number %d\n", r); 941 + return r; 942 + } 943 + 944 + r = zd_mac_init_hw(mac); 945 + if (r) { 946 + dev_dbg_f(zd_usb_dev(usb), 947 + "couldn't initialize mac. Error number %d\n", r); 948 + return r; 949 + } 950 + 951 + usb->initialized = 1; 952 + return 0; 953 + } 954 + 950 955 static int probe(struct usb_interface *intf, const struct usb_device_id *id) 951 956 { 952 957 int r; 958 + struct zd_usb *usb; 953 959 struct usb_device *udev = interface_to_usbdev(intf); 954 960 struct net_device *netdev = NULL; 955 961 ··· 1010 950 goto error; 1011 951 } 1012 952 1013 - r = upload_firmware(udev, id->driver_info); 1014 - if (r) { 1015 - dev_err(&intf->dev, 1016 - "couldn't load firmware. Error number %d\n", r); 1017 - goto error; 1018 - } 953 + usb = &zd_netdev_mac(netdev)->chip.usb; 954 + usb->is_zd1211b = (id->driver_info == DEVICE_ZD1211B) != 0; 1019 955 1020 - r = usb_reset_configuration(udev); 1021 - if (r) { 1022 - dev_dbg_f(&intf->dev, 1023 - "couldn't reset configuration. Error number %d\n", r); 1024 - goto error; 1025 - } 1026 - 1027 - /* At this point the interrupt endpoint is not generally enabled. We 1028 - * save the USB bandwidth until the network device is opened. But 1029 - * notify that the initialization of the MAC will require the 1030 - * interrupts to be temporary enabled. 1031 - */ 1032 - r = zd_mac_init_hw(zd_netdev_mac(netdev), id->driver_info); 956 + r = zd_mac_preinit_hw(zd_netdev_mac(netdev)); 1033 957 if (r) { 1034 958 dev_dbg_f(&intf->dev, 1035 959 "couldn't initialize mac. Error number %d\n", r);
+3
drivers/net/wireless/zd1211rw/zd_usb.h
··· 188 188 struct zd_usb_rx rx; 189 189 struct zd_usb_tx tx; 190 190 struct usb_interface *intf; 191 + u8 is_zd1211b:1, initialized:1; 191 192 }; 192 193 193 194 #define zd_usb_dev(usb) (&usb->intf->dev) ··· 236 235 unsigned int count); 237 236 238 237 int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits); 238 + 239 + int zd_usb_read_fw(struct zd_usb *usb, zd_addr_t addr, u8 *data, u16 len); 239 240 240 241 extern struct workqueue_struct *zd_workqueue; 241 242
+9
drivers/s390/net/qeth.h
··· 211 211 /* initial values when measuring starts */ 212 212 unsigned long initial_rx_packets; 213 213 unsigned long initial_tx_packets; 214 + /* inbound scatter gather data */ 215 + unsigned int sg_skbs_rx; 216 + unsigned int sg_frags_rx; 217 + unsigned int sg_alloc_page_rx; 214 218 }; 215 219 216 220 /* Routing stuff */ ··· 344 340 #define QETH_WATERMARK_PACK_FUZZ 1 345 341 346 342 #define QETH_IP_HEADER_SIZE 40 343 + 344 + /* large receive scatter gather copy break */ 345 + #define QETH_RX_SG_CB (PAGE_SIZE >> 1) 347 346 348 347 struct qeth_hdr_layer3 { 349 348 __u8 id; ··· 778 771 int layer2; 779 772 enum qeth_large_send_types large_send; 780 773 int performance_stats; 774 + int rx_sg_cb; 781 775 }; 782 776 783 777 /* ··· 836 828 int (*orig_hard_header)(struct sk_buff *,struct net_device *, 837 829 unsigned short,void *,void *,unsigned); 838 830 struct qeth_osn_info osn_info; 831 + atomic_t force_alloc_skb; 839 832 }; 840 833 841 834 struct qeth_card_list_struct {
+151 -32
drivers/s390/net/qeth_main.c
··· 1054 1054 else 1055 1055 card->options.layer2 = 0; 1056 1056 card->options.performance_stats = 0; 1057 + card->options.rx_sg_cb = QETH_RX_SG_CB; 1057 1058 } 1058 1059 1059 1060 /** ··· 1935 1934 atomic_inc(&reply->received); 1936 1935 wake_up(&reply->wait_q); 1937 1936 } 1937 + cpu_relax(); 1938 1938 }; 1939 1939 rc = reply->rc; 1940 1940 qeth_put_reply(reply); ··· 2260 2258 return skb; 2261 2259 } 2262 2260 2261 + static inline int 2262 + qeth_create_skb_frag(struct qdio_buffer_element *element, 2263 + struct sk_buff **pskb, 2264 + int offset, int *pfrag, int data_len) 2265 + { 2266 + struct page *page = virt_to_page(element->addr); 2267 + if (*pfrag == 0) { 2268 + /* the upper protocol layers assume that there is data in the 2269 + * skb itself. Copy a small amount (64 bytes) to make them 2270 + * happy. */ 2271 + *pskb = dev_alloc_skb(64 + QETH_FAKE_LL_LEN_ETH); 2272 + if (!(*pskb)) 2273 + return -ENOMEM; 2274 + skb_reserve(*pskb, QETH_FAKE_LL_LEN_ETH); 2275 + if (data_len <= 64) { 2276 + memcpy(skb_put(*pskb, data_len), element->addr + offset, 2277 + data_len); 2278 + } else { 2279 + get_page(page); 2280 + memcpy(skb_put(*pskb, 64), element->addr + offset, 64); 2281 + skb_fill_page_desc(*pskb, *pfrag, page, offset + 64, 2282 + data_len - 64); 2283 + (*pskb)->data_len += data_len - 64; 2284 + (*pskb)->len += data_len - 64; 2285 + (*pskb)->truesize += data_len - 64; 2286 + } 2287 + } else { 2288 + get_page(page); 2289 + skb_fill_page_desc(*pskb, *pfrag, page, offset, data_len); 2290 + (*pskb)->data_len += data_len; 2291 + (*pskb)->len += data_len; 2292 + (*pskb)->truesize += data_len; 2293 + } 2294 + (*pfrag)++; 2295 + return 0; 2296 + } 2297 + 2298 + static inline struct qeth_buffer_pool_entry * 2299 + qeth_find_free_buffer_pool_entry(struct qeth_card *card) 2300 + { 2301 + struct list_head *plh; 2302 + struct qeth_buffer_pool_entry *entry; 2303 + int i, free; 2304 + struct page *page; 2305 + 2306 + if (list_empty(&card->qdio.in_buf_pool.entry_list)) 2307 + return NULL; 2308 + 2309 + list_for_each(plh, &card->qdio.in_buf_pool.entry_list) { 2310 + entry = list_entry(plh, struct qeth_buffer_pool_entry, list); 2311 + free = 1; 2312 + for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2313 + if (page_count(virt_to_page(entry->elements[i])) > 1) { 2314 + free = 0; 2315 + break; 2316 + } 2317 + } 2318 + if (free) { 2319 + list_del_init(&entry->list); 2320 + return entry; 2321 + } 2322 + } 2323 + 2324 + /* no free buffer in pool so take first one and swap pages */ 2325 + entry = list_entry(card->qdio.in_buf_pool.entry_list.next, 2326 + struct qeth_buffer_pool_entry, list); 2327 + for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { 2328 + if (page_count(virt_to_page(entry->elements[i])) > 1) { 2329 + page = alloc_page(GFP_ATOMIC|GFP_DMA); 2330 + if (!page) { 2331 + return NULL; 2332 + } else { 2333 + free_page((unsigned long)entry->elements[i]); 2334 + entry->elements[i] = page_address(page); 2335 + if (card->options.performance_stats) 2336 + card->perf_stats.sg_alloc_page_rx++; 2337 + } 2338 + } 2339 + } 2340 + list_del_init(&entry->list); 2341 + return entry; 2342 + } 2343 + 2263 2344 static struct sk_buff * 2264 2345 qeth_get_next_skb(struct qeth_card *card, struct qdio_buffer *buffer, 2265 2346 struct qdio_buffer_element **__element, int *__offset, ··· 2354 2269 int skb_len; 2355 2270 void *data_ptr; 2356 2271 int data_len; 2272 + int use_rx_sg = 0; 2273 + int frag = 0; 2357 2274 2358 2275 QETH_DBF_TEXT(trace,6,"nextskb"); 2359 2276 /* qeth_hdr must not cross element boundaries */ ··· 2380 2293 2381 2294 if (!skb_len) 2382 2295 return NULL; 2383 - if (card->options.fake_ll){ 2384 - if(card->dev->type == ARPHRD_IEEE802_TR){ 2385 - if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_TR, *hdr))) 2386 - goto no_mem; 2387 - skb_reserve(skb,QETH_FAKE_LL_LEN_TR); 2296 + if ((skb_len >= card->options.rx_sg_cb) && 2297 + (!(card->info.type == QETH_CARD_TYPE_OSN)) && 2298 + (!atomic_read(&card->force_alloc_skb))) { 2299 + use_rx_sg = 1; 2300 + } else { 2301 + if (card->options.fake_ll) { 2302 + if (card->dev->type == ARPHRD_IEEE802_TR) { 2303 + if (!(skb = qeth_get_skb(skb_len + 2304 + QETH_FAKE_LL_LEN_TR, *hdr))) 2305 + goto no_mem; 2306 + skb_reserve(skb, QETH_FAKE_LL_LEN_TR); 2307 + } else { 2308 + if (!(skb = qeth_get_skb(skb_len + 2309 + QETH_FAKE_LL_LEN_ETH, *hdr))) 2310 + goto no_mem; 2311 + skb_reserve(skb, QETH_FAKE_LL_LEN_ETH); 2312 + } 2388 2313 } else { 2389 - if (!(skb = qeth_get_skb(skb_len+QETH_FAKE_LL_LEN_ETH, *hdr))) 2314 + skb = qeth_get_skb(skb_len, *hdr); 2315 + if (!skb) 2390 2316 goto no_mem; 2391 - skb_reserve(skb,QETH_FAKE_LL_LEN_ETH); 2392 2317 } 2393 - } else if (!(skb = qeth_get_skb(skb_len, *hdr))) 2394 - goto no_mem; 2318 + } 2319 + 2395 2320 data_ptr = element->addr + offset; 2396 2321 while (skb_len) { 2397 2322 data_len = min(skb_len, (int)(element->length - offset)); 2398 - if (data_len) 2399 - memcpy(skb_put(skb, data_len), data_ptr, data_len); 2323 + if (data_len) { 2324 + if (use_rx_sg) { 2325 + if (qeth_create_skb_frag(element, &skb, offset, 2326 + &frag, data_len)) 2327 + goto no_mem; 2328 + } else { 2329 + memcpy(skb_put(skb, data_len), data_ptr, 2330 + data_len); 2331 + } 2332 + } 2400 2333 skb_len -= data_len; 2401 2334 if (skb_len){ 2402 2335 if (qeth_is_last_sbale(element)){ ··· 2438 2331 } 2439 2332 *__element = element; 2440 2333 *__offset = offset; 2334 + if (use_rx_sg && card->options.performance_stats) { 2335 + card->perf_stats.sg_skbs_rx++; 2336 + card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags; 2337 + } 2441 2338 return skb; 2442 2339 no_mem: 2443 2340 if (net_ratelimit()){ ··· 2719 2608 } 2720 2609 } 2721 2610 2722 - static struct qeth_buffer_pool_entry * 2723 - qeth_get_buffer_pool_entry(struct qeth_card *card) 2724 - { 2725 - struct qeth_buffer_pool_entry *entry; 2726 - 2727 - QETH_DBF_TEXT(trace, 6, "gtbfplen"); 2728 - if (!list_empty(&card->qdio.in_buf_pool.entry_list)) { 2729 - entry = list_entry(card->qdio.in_buf_pool.entry_list.next, 2730 - struct qeth_buffer_pool_entry, list); 2731 - list_del_init(&entry->list); 2732 - return entry; 2733 - } 2734 - return NULL; 2735 - } 2736 - 2737 - static void 2611 + static int 2738 2612 qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) 2739 2613 { 2740 2614 struct qeth_buffer_pool_entry *pool_entry; 2741 2615 int i; 2742 - 2743 - pool_entry = qeth_get_buffer_pool_entry(card); 2616 + 2617 + pool_entry = qeth_find_free_buffer_pool_entry(card); 2618 + if (!pool_entry) 2619 + return 1; 2744 2620 /* 2745 2621 * since the buffer is accessed only from the input_tasklet 2746 2622 * there shouldn't be a need to synchronize; also, since we use ··· 2746 2648 buf->buffer->element[i].flags = 0; 2747 2649 } 2748 2650 buf->state = QETH_QDIO_BUF_EMPTY; 2651 + return 0; 2749 2652 } 2750 2653 2751 2654 static void ··· 2781 2682 int count; 2782 2683 int i; 2783 2684 int rc; 2685 + int newcount = 0; 2784 2686 2785 2687 QETH_DBF_TEXT(trace,6,"queinbuf"); 2786 2688 count = (index < queue->next_buf_to_init)? ··· 2792 2692 /* only requeue at a certain threshold to avoid SIGAs */ 2793 2693 if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)){ 2794 2694 for (i = queue->next_buf_to_init; 2795 - i < queue->next_buf_to_init + count; ++i) 2796 - qeth_init_input_buffer(card, 2797 - &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]); 2695 + i < queue->next_buf_to_init + count; ++i) { 2696 + if (qeth_init_input_buffer(card, 2697 + &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) { 2698 + break; 2699 + } else { 2700 + newcount++; 2701 + } 2702 + } 2703 + 2704 + if (newcount < count) { 2705 + /* we are in memory shortage so we switch back to 2706 + traditional skb allocation and drop packages */ 2707 + if (atomic_cmpxchg(&card->force_alloc_skb, 0, 1)) 2708 + printk(KERN_WARNING 2709 + "qeth: switch to alloc skb\n"); 2710 + count = newcount; 2711 + } else { 2712 + if (atomic_cmpxchg(&card->force_alloc_skb, 1, 0)) 2713 + printk(KERN_WARNING "qeth: switch to sg\n"); 2714 + } 2715 + 2798 2716 /* 2799 2717 * according to old code it should be avoided to requeue all 2800 2718 * 128 buffers in order to benefit from PCI avoidance. ··· 6612 6494 6613 6495 QETH_DBF_TEXT(setup, 2, "hrdsetup"); 6614 6496 6497 + atomic_set(&card->force_alloc_skb, 0); 6615 6498 retry: 6616 6499 if (retries < 3){ 6617 6500 PRINT_WARN("Retrying to do IDX activates.\n");
+6
drivers/s390/net/qeth_proc.c
··· 212 212 " Skb fragments sent in SG mode : %u\n\n", 213 213 card->perf_stats.sg_skbs_sent, 214 214 card->perf_stats.sg_frags_sent); 215 + seq_printf(s, " Skbs received in SG mode : %u\n" 216 + " Skb fragments received in SG mode : %u\n" 217 + " Page allocations for rx SG mode : %u\n\n", 218 + card->perf_stats.sg_skbs_rx, 219 + card->perf_stats.sg_frags_rx, 220 + card->perf_stats.sg_alloc_page_rx); 215 221 seq_printf(s, " large_send tx (in Kbytes) : %u\n" 216 222 " large_send count : %u\n\n", 217 223 card->perf_stats.large_send_bytes >> 10,
+1
include/asm-arm/arch-at91/board.h
··· 64 64 65 65 /* Ethernet (EMAC & MACB) */ 66 66 struct at91_eth_data { 67 + u32 phy_mask; 67 68 u8 phy_irq_pin; /* PHY IRQ */ 68 69 u8 is_rmii; /* using RMII interface? */ 69 70 };
+1
include/asm-avr32/arch-at32ap/board.h
··· 21 21 struct platform_device *at32_add_device_usart(unsigned int id); 22 22 23 23 struct eth_platform_data { 24 + u32 phy_mask; 24 25 u8 is_rmii; 25 26 }; 26 27 struct platform_device *
+4 -1
net/ieee80211/softmac/ieee80211softmac_assoc.c
··· 271 271 */ 272 272 dprintk(KERN_INFO PFX "Associate: Scanning for networks first.\n"); 273 273 ieee80211softmac_notify(mac->dev, IEEE80211SOFTMAC_EVENT_SCAN_FINISHED, ieee80211softmac_assoc_notify_scan, NULL); 274 - if (ieee80211softmac_start_scan(mac)) 274 + if (ieee80211softmac_start_scan(mac)) { 275 275 dprintk(KERN_INFO PFX "Associate: failed to initiate scan. Is device up?\n"); 276 + mac->associnfo.associating = 0; 277 + mac->associnfo.associated = 0; 278 + } 276 279 goto out; 277 280 } else { 278 281 mac->associnfo.associating = 0;