Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging: qlge: use qlge_* prefix to avoid namespace clashes with other qlogic drivers

To avoid namespace clashes with other qlogic drivers and also for the
sake of naming consistency, use the "qlge_" prefix as suggested in
drivers/staging/qlge/TODO,
- For existing ql_ prefix,
sed -i "s/ql_/qlge_/g" *.{c,h}
- for structs not having a prefix
1. get a list of structs
grep "struct.*{" qlge.
2. add qlge_ for each struct, e.g.,
sed -i "s/ib_ae_iocb_rsp/qlge_ib_ae_iocb_rsp/g" *.{c,h}

Link: https://lore.kernel.org/patchwork/patch/1318503/#1516131
Suggested-by: Benjamin Poirier <benjamin.poirier@gmail.com>
Signed-off-by: Coiby Xu <coiby.xu@gmail.com>
Link: https://lore.kernel.org/r/20210123104613.38359-2-coiby.xu@gmail.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Coiby Xu and committed by
Greg Kroah-Hartman
f8c047be 684ceb81

+1554 -1559
-4
drivers/staging/qlge/TODO
··· 28 28 * the driver has a habit of using runtime checks where compile time checks are 29 29 possible (ex. ql_free_rx_buffers(), ql_alloc_rx_buffers()) 30 30 * reorder struct members to avoid holes if it doesn't impact performance 31 - * in terms of namespace, the driver uses either qlge_, ql_ (used by 32 - other qlogic drivers, with clashes, ex: ql_sem_spinlock) or nothing (with 33 - clashes, ex: struct ob_mac_iocb_req). Rename everything to use the "qlge_" 34 - prefix. 35 31 * avoid legacy/deprecated apis (ex. replace pci_dma_*, replace pci_enable_msi, 36 32 use pci_iomap) 37 33 * some "while" loops could be rewritten with simple "for", ex.
+105 -105
drivers/staging/qlge/qlge.h
··· 1081 1081 #define OPCODE_IB_MPI_IOCB 0x21 1082 1082 #define OPCODE_IB_AE_IOCB 0x3f 1083 1083 1084 - struct ob_mac_iocb_req { 1084 + struct qlge_ob_mac_iocb_req { 1085 1085 u8 opcode; 1086 1086 u8 flags1; 1087 1087 #define OB_MAC_IOCB_REQ_OI 0x01 ··· 1104 1104 struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; 1105 1105 } __packed; 1106 1106 1107 - struct ob_mac_iocb_rsp { 1107 + struct qlge_ob_mac_iocb_rsp { 1108 1108 u8 opcode; /* */ 1109 1109 u8 flags1; /* */ 1110 1110 #define OB_MAC_IOCB_RSP_OI 0x01 /* */ ··· 1121 1121 __le32 reserved[13]; 1122 1122 } __packed; 1123 1123 1124 - struct ob_mac_tso_iocb_req { 1124 + struct qlge_ob_mac_tso_iocb_req { 1125 1125 u8 opcode; 1126 1126 u8 flags1; 1127 1127 #define OB_MAC_TSO_IOCB_OI 0x01 ··· 1149 1149 struct tx_buf_desc tbd[TX_DESC_PER_IOCB]; 1150 1150 } __packed; 1151 1151 1152 - struct ob_mac_tso_iocb_rsp { 1152 + struct qlge_ob_mac_tso_iocb_rsp { 1153 1153 u8 opcode; 1154 1154 u8 flags1; 1155 1155 #define OB_MAC_TSO_IOCB_RSP_OI 0x01 ··· 1166 1166 __le32 reserved2[13]; 1167 1167 } __packed; 1168 1168 1169 - struct ib_mac_iocb_rsp { 1169 + struct qlge_ib_mac_iocb_rsp { 1170 1170 u8 opcode; /* 0x20 */ 1171 1171 u8 flags1; 1172 1172 #define IB_MAC_IOCB_RSP_OI 0x01 /* Override intr delay */ ··· 1225 1225 __le64 hdr_addr; /* */ 1226 1226 } __packed; 1227 1227 1228 - struct ib_ae_iocb_rsp { 1228 + struct qlge_ib_ae_iocb_rsp { 1229 1229 u8 opcode; 1230 1230 u8 flags1; 1231 1231 #define IB_AE_IOCB_RSP_OI 0x01 ··· 1250 1250 * These three structures are for generic 1251 1251 * handling of ib and ob iocbs. 1252 1252 */ 1253 - struct ql_net_rsp_iocb { 1253 + struct qlge_net_rsp_iocb { 1254 1254 u8 opcode; 1255 1255 u8 flags0; 1256 1256 __le16 length; ··· 1258 1258 __le32 reserved[14]; 1259 1259 } __packed; 1260 1260 1261 - struct net_req_iocb { 1261 + struct qlge_net_req_iocb { 1262 1262 u8 opcode; 1263 1263 u8 flags0; 1264 1264 __le16 flags1; ··· 1346 1346 1347 1347 /* SOFTWARE/DRIVER DATA STRUCTURES. */ 1348 1348 1349 - struct oal { 1349 + struct qlge_oal { 1350 1350 struct tx_buf_desc oal[TX_DESC_PER_OAL]; 1351 1351 }; 1352 1352 ··· 1357 1357 1358 1358 struct tx_ring_desc { 1359 1359 struct sk_buff *skb; 1360 - struct ob_mac_iocb_req *queue_entry; 1360 + struct qlge_ob_mac_iocb_req *queue_entry; 1361 1361 u32 index; 1362 - struct oal oal; 1362 + struct qlge_oal oal; 1363 1363 struct map_list map[MAX_SKB_FRAGS + 2]; 1364 1364 int map_cnt; 1365 1365 struct tx_ring_desc *next; ··· 1388 1388 spinlock_t lock; 1389 1389 atomic_t tx_count; /* counts down for every outstanding IO */ 1390 1390 struct delayed_work tx_work; 1391 - struct ql_adapter *qdev; 1391 + struct qlge_adapter *qdev; 1392 1392 u64 tx_packets; 1393 1393 u64 tx_bytes; 1394 1394 u64 tx_errors; ··· 1469 1469 dma_addr_t prod_idx_sh_reg_dma; 1470 1470 void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */ 1471 1471 u32 cnsmr_idx; /* current sw idx */ 1472 - struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */ 1472 + struct qlge_net_rsp_iocb *curr_entry; /* next entry on queue */ 1473 1473 void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */ 1474 1474 1475 1475 /* Large buffer queue elements. */ ··· 1487 1487 char name[IFNAMSIZ + 5]; 1488 1488 struct napi_struct napi; 1489 1489 u8 reserved; 1490 - struct ql_adapter *qdev; 1490 + struct qlge_adapter *qdev; 1491 1491 u64 rx_packets; 1492 1492 u64 rx_multicast; 1493 1493 u64 rx_bytes; ··· 1752 1752 #define SHADOW_OFFSET 0xb0000000 1753 1753 #define SHADOW_REG_SHIFT 20 1754 1754 1755 - struct ql_nic_misc { 1755 + struct qlge_nic_misc { 1756 1756 u32 rx_ring_count; 1757 1757 u32 tx_ring_count; 1758 1758 u32 intr_count; 1759 1759 u32 function; 1760 1760 }; 1761 1761 1762 - struct ql_reg_dump { 1762 + struct qlge_reg_dump { 1763 1763 /* segment 0 */ 1764 1764 struct mpi_coredump_global_header mpi_global_header; 1765 1765 ··· 1769 1769 1770 1770 /* segment 30 */ 1771 1771 struct mpi_coredump_segment_header misc_nic_seg_hdr; 1772 - struct ql_nic_misc misc_nic_info; 1772 + struct qlge_nic_misc misc_nic_info; 1773 1773 1774 1774 /* segment 31 */ 1775 1775 /* one interrupt state for each CQ */ ··· 1792 1792 u32 ets[8 + 2]; 1793 1793 }; 1794 1794 1795 - struct ql_mpi_coredump { 1795 + struct qlge_mpi_coredump { 1796 1796 /* segment 0 */ 1797 1797 struct mpi_coredump_global_header mpi_global_header; 1798 1798 ··· 1914 1914 1915 1915 /* segment 30 */ 1916 1916 struct mpi_coredump_segment_header misc_nic_seg_hdr; 1917 - struct ql_nic_misc misc_nic_info; 1917 + struct qlge_nic_misc misc_nic_info; 1918 1918 1919 1919 /* segment 31 */ 1920 1920 /* one interrupt state for each CQ */ ··· 1991 1991 * irq environment as a context to the ISR. 1992 1992 */ 1993 1993 struct intr_context { 1994 - struct ql_adapter *qdev; 1994 + struct qlge_adapter *qdev; 1995 1995 u32 intr; 1996 1996 u32 irq_mask; /* Mask of which rings the vector services. */ 1997 1997 u32 hooked; ··· 2056 2056 }; 2057 2057 2058 2058 struct nic_operations { 2059 - int (*get_flash)(struct ql_adapter *qdev); 2060 - int (*port_initialize)(struct ql_adapter *qdev); 2059 + int (*get_flash)(struct qlge_adapter *qdev); 2060 + int (*port_initialize)(struct qlge_adapter *qdev); 2061 2061 }; 2062 2062 2063 2063 /* 2064 2064 * The main Adapter structure definition. 2065 2065 * This structure has all fields relevant to the hardware. 2066 2066 */ 2067 - struct ql_adapter { 2067 + struct qlge_adapter { 2068 2068 struct ricb ricb; 2069 2069 unsigned long flags; 2070 2070 u32 wol; ··· 2139 2139 u32 port_link_up; 2140 2140 u32 port_init; 2141 2141 u32 link_status; 2142 - struct ql_mpi_coredump *mpi_coredump; 2142 + struct qlge_mpi_coredump *mpi_coredump; 2143 2143 u32 core_is_dumped; 2144 2144 u32 link_config; 2145 2145 u32 led_config; ··· 2166 2166 /* 2167 2167 * Typical Register accessor for memory mapped device. 2168 2168 */ 2169 - static inline u32 ql_read32(const struct ql_adapter *qdev, int reg) 2169 + static inline u32 qlge_read32(const struct qlge_adapter *qdev, int reg) 2170 2170 { 2171 2171 return readl(qdev->reg_base + reg); 2172 2172 } ··· 2174 2174 /* 2175 2175 * Typical Register accessor for memory mapped device. 2176 2176 */ 2177 - static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val) 2177 + static inline void qlge_write32(const struct qlge_adapter *qdev, int reg, u32 val) 2178 2178 { 2179 2179 writel(val, qdev->reg_base + reg); 2180 2180 } ··· 2189 2189 * 1 4k chunk of memory. The lower half of the space is for outbound 2190 2190 * queues. The upper half is for inbound queues. 2191 2191 */ 2192 - static inline void ql_write_db_reg(u32 val, void __iomem *addr) 2192 + static inline void qlge_write_db_reg(u32 val, void __iomem *addr) 2193 2193 { 2194 2194 writel(val, addr); 2195 2195 } ··· 2205 2205 * queues. The upper half is for inbound queues. 2206 2206 * Caller has to guarantee ordering. 2207 2207 */ 2208 - static inline void ql_write_db_reg_relaxed(u32 val, void __iomem *addr) 2208 + static inline void qlge_write_db_reg_relaxed(u32 val, void __iomem *addr) 2209 2209 { 2210 2210 writel_relaxed(val, addr); 2211 2211 } ··· 2220 2220 * update the relevant index register and then copy the value to the 2221 2221 * shadow register in host memory. 2222 2222 */ 2223 - static inline u32 ql_read_sh_reg(__le32 *addr) 2223 + static inline u32 qlge_read_sh_reg(__le32 *addr) 2224 2224 { 2225 2225 u32 reg; 2226 2226 ··· 2233 2233 extern const char qlge_driver_version[]; 2234 2234 extern const struct ethtool_ops qlge_ethtool_ops; 2235 2235 2236 - int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask); 2237 - void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask); 2238 - int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data); 2239 - int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, 2240 - u32 *value); 2241 - int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value); 2242 - int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, 2243 - u16 q_id); 2244 - void ql_queue_fw_error(struct ql_adapter *qdev); 2245 - void ql_mpi_work(struct work_struct *work); 2246 - void ql_mpi_reset_work(struct work_struct *work); 2247 - void ql_mpi_core_to_log(struct work_struct *work); 2248 - int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); 2249 - void ql_queue_asic_error(struct ql_adapter *qdev); 2250 - void ql_set_ethtool_ops(struct net_device *ndev); 2251 - int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data); 2252 - void ql_mpi_idc_work(struct work_struct *work); 2253 - void ql_mpi_port_cfg_work(struct work_struct *work); 2254 - int ql_mb_get_fw_state(struct ql_adapter *qdev); 2255 - int ql_cam_route_initialize(struct ql_adapter *qdev); 2256 - int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); 2257 - int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data); 2258 - int ql_unpause_mpi_risc(struct ql_adapter *qdev); 2259 - int ql_pause_mpi_risc(struct ql_adapter *qdev); 2260 - int ql_hard_reset_mpi_risc(struct ql_adapter *qdev); 2261 - int ql_soft_reset_mpi_risc(struct ql_adapter *qdev); 2262 - int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, u32 ram_addr, 2263 - int word_count); 2264 - int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump); 2265 - int ql_mb_about_fw(struct ql_adapter *qdev); 2266 - int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol); 2267 - int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol); 2268 - int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config); 2269 - int ql_mb_get_led_cfg(struct ql_adapter *qdev); 2270 - void ql_link_on(struct ql_adapter *qdev); 2271 - void ql_link_off(struct ql_adapter *qdev); 2272 - int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control); 2273 - int ql_mb_get_port_cfg(struct ql_adapter *qdev); 2274 - int ql_mb_set_port_cfg(struct ql_adapter *qdev); 2275 - int ql_wait_fifo_empty(struct ql_adapter *qdev); 2276 - void ql_get_dump(struct ql_adapter *qdev, void *buff); 2277 - netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); 2278 - void ql_check_lb_frame(struct ql_adapter *qdev, struct sk_buff *skb); 2279 - int ql_own_firmware(struct ql_adapter *qdev); 2280 - int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); 2236 + int qlge_sem_spinlock(struct qlge_adapter *qdev, u32 sem_mask); 2237 + void qlge_sem_unlock(struct qlge_adapter *qdev, u32 sem_mask); 2238 + int qlge_read_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 *data); 2239 + int qlge_get_mac_addr_reg(struct qlge_adapter *qdev, u32 type, u16 index, 2240 + u32 *value); 2241 + int qlge_get_routing_reg(struct qlge_adapter *qdev, u32 index, u32 *value); 2242 + int qlge_write_cfg(struct qlge_adapter *qdev, void *ptr, int size, u32 bit, 2243 + u16 q_id); 2244 + void qlge_queue_fw_error(struct qlge_adapter *qdev); 2245 + void qlge_mpi_work(struct work_struct *work); 2246 + void qlge_mpi_reset_work(struct work_struct *work); 2247 + void qlge_mpi_core_to_log(struct work_struct *work); 2248 + int qlge_wait_reg_rdy(struct qlge_adapter *qdev, u32 reg, u32 bit, u32 ebit); 2249 + void qlge_queue_asic_error(struct qlge_adapter *qdev); 2250 + void qlge_set_ethtool_ops(struct net_device *ndev); 2251 + int qlge_read_xgmac_reg64(struct qlge_adapter *qdev, u32 reg, u64 *data); 2252 + void qlge_mpi_idc_work(struct work_struct *work); 2253 + void qlge_mpi_port_cfg_work(struct work_struct *work); 2254 + int qlge_mb_get_fw_state(struct qlge_adapter *qdev); 2255 + int qlge_cam_route_initialize(struct qlge_adapter *qdev); 2256 + int qlge_read_mpi_reg(struct qlge_adapter *qdev, u32 reg, u32 *data); 2257 + int qlge_write_mpi_reg(struct qlge_adapter *qdev, u32 reg, u32 data); 2258 + int qlge_unpause_mpi_risc(struct qlge_adapter *qdev); 2259 + int qlge_pause_mpi_risc(struct qlge_adapter *qdev); 2260 + int qlge_hard_reset_mpi_risc(struct qlge_adapter *qdev); 2261 + int qlge_soft_reset_mpi_risc(struct qlge_adapter *qdev); 2262 + int qlge_dump_risc_ram_area(struct qlge_adapter *qdev, void *buf, u32 ram_addr, 2263 + int word_count); 2264 + int qlge_core_dump(struct qlge_adapter *qdev, struct qlge_mpi_coredump *mpi_coredump); 2265 + int qlge_mb_about_fw(struct qlge_adapter *qdev); 2266 + int qlge_mb_wol_set_magic(struct qlge_adapter *qdev, u32 enable_wol); 2267 + int qlge_mb_wol_mode(struct qlge_adapter *qdev, u32 wol); 2268 + int qlge_mb_set_led_cfg(struct qlge_adapter *qdev, u32 led_config); 2269 + int qlge_mb_get_led_cfg(struct qlge_adapter *qdev); 2270 + void qlge_link_on(struct qlge_adapter *qdev); 2271 + void qlge_link_off(struct qlge_adapter *qdev); 2272 + int qlge_mb_set_mgmnt_traffic_ctl(struct qlge_adapter *qdev, u32 control); 2273 + int qlge_mb_get_port_cfg(struct qlge_adapter *qdev); 2274 + int qlge_mb_set_port_cfg(struct qlge_adapter *qdev); 2275 + int qlge_wait_fifo_empty(struct qlge_adapter *qdev); 2276 + void qlge_get_dump(struct qlge_adapter *qdev, void *buff); 2277 + netdev_tx_t qlge_lb_send(struct sk_buff *skb, struct net_device *ndev); 2278 + void qlge_check_lb_frame(struct qlge_adapter *qdev, struct sk_buff *skb); 2279 + int qlge_own_firmware(struct qlge_adapter *qdev); 2280 + int qlge_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget); 2281 2281 2282 2282 /* #define QL_ALL_DUMP */ 2283 2283 /* #define QL_REG_DUMP */ ··· 2287 2287 /* #define QL_OB_DUMP */ 2288 2288 2289 2289 #ifdef QL_REG_DUMP 2290 - void ql_dump_xgmac_control_regs(struct ql_adapter *qdev); 2291 - void ql_dump_routing_entries(struct ql_adapter *qdev); 2292 - void ql_dump_regs(struct ql_adapter *qdev); 2293 - #define QL_DUMP_REGS(qdev) ql_dump_regs(qdev) 2294 - #define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev) 2295 - #define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev) 2290 + void qlge_dump_xgmac_control_regs(struct qlge_adapter *qdev); 2291 + void qlge_dump_routing_entries(struct qlge_adapter *qdev); 2292 + void qlge_dump_regs(struct qlge_adapter *qdev); 2293 + #define QL_DUMP_REGS(qdev) qlge_dump_regs(qdev) 2294 + #define QL_DUMP_ROUTE(qdev) qlge_dump_routing_entries(qdev) 2295 + #define QL_DUMP_XGMAC_CONTROL_REGS(qdev) qlge_dump_xgmac_control_regs(qdev) 2296 2296 #else 2297 2297 #define QL_DUMP_REGS(qdev) 2298 2298 #define QL_DUMP_ROUTE(qdev) ··· 2300 2300 #endif 2301 2301 2302 2302 #ifdef QL_STAT_DUMP 2303 - void ql_dump_stat(struct ql_adapter *qdev); 2304 - #define QL_DUMP_STAT(qdev) ql_dump_stat(qdev) 2303 + void qlge_dump_stat(struct qlge_adapter *qdev); 2304 + #define QL_DUMP_STAT(qdev) qlge_dump_stat(qdev) 2305 2305 #else 2306 2306 #define QL_DUMP_STAT(qdev) 2307 2307 #endif 2308 2308 2309 2309 #ifdef QL_DEV_DUMP 2310 - void ql_dump_qdev(struct ql_adapter *qdev); 2311 - #define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev) 2310 + void qlge_dump_qdev(struct qlge_adapter *qdev); 2311 + #define QL_DUMP_QDEV(qdev) qlge_dump_qdev(qdev) 2312 2312 #else 2313 2313 #define QL_DUMP_QDEV(qdev) 2314 2314 #endif 2315 2315 2316 2316 #ifdef QL_CB_DUMP 2317 - void ql_dump_wqicb(struct wqicb *wqicb); 2318 - void ql_dump_tx_ring(struct tx_ring *tx_ring); 2319 - void ql_dump_ricb(struct ricb *ricb); 2320 - void ql_dump_cqicb(struct cqicb *cqicb); 2321 - void ql_dump_rx_ring(struct rx_ring *rx_ring); 2322 - void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id); 2323 - #define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb) 2324 - #define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb) 2325 - #define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring) 2326 - #define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb) 2327 - #define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring) 2317 + void qlge_dump_wqicb(struct wqicb *wqicb); 2318 + void qlge_dump_tx_ring(struct tx_ring *tx_ring); 2319 + void qlge_dump_ricb(struct ricb *ricb); 2320 + void qlge_dump_cqicb(struct cqicb *cqicb); 2321 + void qlge_dump_rx_ring(struct rx_ring *rx_ring); 2322 + void qlge_dump_hw_cb(struct qlge_adapter *qdev, int size, u32 bit, u16 q_id); 2323 + #define QL_DUMP_RICB(ricb) qlge_dump_ricb(ricb) 2324 + #define QL_DUMP_WQICB(wqicb) qlge_dump_wqicb(wqicb) 2325 + #define QL_DUMP_TX_RING(tx_ring) qlge_dump_tx_ring(tx_ring) 2326 + #define QL_DUMP_CQICB(cqicb) qlge_dump_cqicb(cqicb) 2327 + #define QL_DUMP_RX_RING(rx_ring) qlge_dump_rx_ring(rx_ring) 2328 2328 #define QL_DUMP_HW_CB(qdev, size, bit, q_id) \ 2329 - ql_dump_hw_cb(qdev, size, bit, q_id) 2329 + qlge_dump_hw_cb(qdev, size, bit, q_id) 2330 2330 #else 2331 2331 #define QL_DUMP_RICB(ricb) 2332 2332 #define QL_DUMP_WQICB(wqicb) ··· 2337 2337 #endif 2338 2338 2339 2339 #ifdef QL_OB_DUMP 2340 - void ql_dump_tx_desc(struct ql_adapter *qdev, struct tx_buf_desc *tbd); 2341 - void ql_dump_ob_mac_iocb(struct ql_adapter *qdev, struct ob_mac_iocb_req *ob_mac_iocb); 2342 - void ql_dump_ob_mac_rsp(struct ql_adapter *qdev, struct ob_mac_iocb_rsp *ob_mac_rsp); 2343 - #define QL_DUMP_OB_MAC_IOCB(qdev, ob_mac_iocb) ql_dump_ob_mac_iocb(qdev, ob_mac_iocb) 2344 - #define QL_DUMP_OB_MAC_RSP(qdev, ob_mac_rsp) ql_dump_ob_mac_rsp(qdev, ob_mac_rsp) 2340 + void qlge_dump_tx_desc(struct qlge_adapter *qdev, struct tx_buf_desc *tbd); 2341 + void qlge_dump_ob_mac_iocb(struct qlge_adapter *qdev, struct qlge_ob_mac_iocb_req *ob_mac_iocb); 2342 + void qlge_dump_ob_mac_rsp(struct qlge_adapter *qdev, struct qlge_ob_mac_iocb_rsp *ob_mac_rsp); 2343 + #define QL_DUMP_OB_MAC_IOCB(qdev, ob_mac_iocb) qlge_dump_ob_mac_iocb(qdev, ob_mac_iocb) 2344 + #define QL_DUMP_OB_MAC_RSP(qdev, ob_mac_rsp) qlge_dump_ob_mac_rsp(qdev, ob_mac_rsp) 2345 2345 #else 2346 2346 #define QL_DUMP_OB_MAC_IOCB(qdev, ob_mac_iocb) 2347 2347 #define QL_DUMP_OB_MAC_RSP(qdev, ob_mac_rsp) 2348 2348 #endif 2349 2349 2350 2350 #ifdef QL_IB_DUMP 2351 - void ql_dump_ib_mac_rsp(struct ql_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp); 2352 - #define QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp) ql_dump_ib_mac_rsp(qdev, ib_mac_rsp) 2351 + void qlge_dump_ib_mac_rsp(struct qlge_adapter *qdev, struct qlge_ib_mac_iocb_rsp *ib_mac_rsp); 2352 + #define QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp) qlge_dump_ib_mac_rsp(qdev, ib_mac_rsp) 2353 2353 #else 2354 2354 #define QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp) 2355 2355 #endif 2356 2356 2357 2357 #ifdef QL_ALL_DUMP 2358 - void ql_dump_all(struct ql_adapter *qdev); 2359 - #define QL_DUMP_ALL(qdev) ql_dump_all(qdev) 2358 + void qlge_dump_all(struct qlge_adapter *qdev); 2359 + #define QL_DUMP_ALL(qdev) qlge_dump_all(qdev) 2360 2360 #else 2361 2361 #define QL_DUMP_ALL(qdev) 2362 2362 #endif
+519 -520
drivers/staging/qlge/qlge_dbg.c
··· 6 6 #include "qlge.h" 7 7 8 8 /* Read a NIC register from the alternate function. */ 9 - static u32 ql_read_other_func_reg(struct ql_adapter *qdev, 10 - u32 reg) 9 + static u32 qlge_read_other_func_reg(struct qlge_adapter *qdev, 10 + u32 reg) 11 11 { 12 12 u32 register_to_read; 13 13 u32 reg_val; ··· 17 17 | MPI_NIC_READ 18 18 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) 19 19 | reg; 20 - status = ql_read_mpi_reg(qdev, register_to_read, &reg_val); 20 + status = qlge_read_mpi_reg(qdev, register_to_read, &reg_val); 21 21 if (status != 0) 22 22 return 0xffffffff; 23 23 ··· 25 25 } 26 26 27 27 /* Write a NIC register from the alternate function. */ 28 - static int ql_write_other_func_reg(struct ql_adapter *qdev, 29 - u32 reg, u32 reg_val) 28 + static int qlge_write_other_func_reg(struct qlge_adapter *qdev, 29 + u32 reg, u32 reg_val) 30 30 { 31 31 u32 register_to_read; 32 32 ··· 35 35 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT) 36 36 | reg; 37 37 38 - return ql_write_mpi_reg(qdev, register_to_read, reg_val); 38 + return qlge_write_mpi_reg(qdev, register_to_read, reg_val); 39 39 } 40 40 41 - static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg, 42 - u32 bit, u32 err_bit) 41 + static int qlge_wait_other_func_reg_rdy(struct qlge_adapter *qdev, u32 reg, 42 + u32 bit, u32 err_bit) 43 43 { 44 44 u32 temp; 45 45 int count; 46 46 47 47 for (count = 10; count; count--) { 48 - temp = ql_read_other_func_reg(qdev, reg); 48 + temp = qlge_read_other_func_reg(qdev, reg); 49 49 50 50 /* check for errors */ 51 51 if (temp & err_bit) ··· 57 57 return -1; 58 58 } 59 59 60 - static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg, 61 - u32 *data) 60 + static int qlge_read_other_func_serdes_reg(struct qlge_adapter *qdev, u32 reg, 61 + u32 *data) 62 62 { 63 63 int status; 64 64 65 65 /* wait for reg to come ready */ 66 - status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, 67 - XG_SERDES_ADDR_RDY, 0); 66 + status = qlge_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, 67 + XG_SERDES_ADDR_RDY, 0); 68 68 if (status) 69 69 goto exit; 70 70 71 71 /* set up for reg read */ 72 - ql_write_other_func_reg(qdev, XG_SERDES_ADDR / 4, reg | PROC_ADDR_R); 72 + qlge_write_other_func_reg(qdev, XG_SERDES_ADDR / 4, reg | PROC_ADDR_R); 73 73 74 74 /* wait for reg to come ready */ 75 - status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, 76 - XG_SERDES_ADDR_RDY, 0); 75 + status = qlge_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4, 76 + XG_SERDES_ADDR_RDY, 0); 77 77 if (status) 78 78 goto exit; 79 79 80 80 /* get the data */ 81 - *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4)); 81 + *data = qlge_read_other_func_reg(qdev, (XG_SERDES_DATA / 4)); 82 82 exit: 83 83 return status; 84 84 } 85 85 86 86 /* Read out the SERDES registers */ 87 - static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data) 87 + static int qlge_read_serdes_reg(struct qlge_adapter *qdev, u32 reg, u32 *data) 88 88 { 89 89 int status; 90 90 91 91 /* wait for reg to come ready */ 92 - status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); 92 + status = qlge_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); 93 93 if (status) 94 94 goto exit; 95 95 96 96 /* set up for reg read */ 97 - ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R); 97 + qlge_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R); 98 98 99 99 /* wait for reg to come ready */ 100 - status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); 100 + status = qlge_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0); 101 101 if (status) 102 102 goto exit; 103 103 104 104 /* get the data */ 105 - *data = ql_read32(qdev, XG_SERDES_DATA); 105 + *data = qlge_read32(qdev, XG_SERDES_DATA); 106 106 exit: 107 107 return status; 108 108 } 109 109 110 - static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr, 111 - u32 *direct_ptr, u32 *indirect_ptr, 112 - bool direct_valid, bool indirect_valid) 110 + static void qlge_get_both_serdes(struct qlge_adapter *qdev, u32 addr, 111 + u32 *direct_ptr, u32 *indirect_ptr, 112 + bool direct_valid, bool indirect_valid) 113 113 { 114 114 unsigned int status; 115 115 116 116 status = 1; 117 117 if (direct_valid) 118 - status = ql_read_serdes_reg(qdev, addr, direct_ptr); 118 + status = qlge_read_serdes_reg(qdev, addr, direct_ptr); 119 119 /* Dead fill any failures or invalids. */ 120 120 if (status) 121 121 *direct_ptr = 0xDEADBEEF; 122 122 123 123 status = 1; 124 124 if (indirect_valid) 125 - status = ql_read_other_func_serdes_reg( 126 - qdev, addr, indirect_ptr); 125 + status = qlge_read_other_func_serdes_reg(qdev, addr, 126 + indirect_ptr); 127 127 /* Dead fill any failures or invalids. */ 128 128 if (status) 129 129 *indirect_ptr = 0xDEADBEEF; 130 130 } 131 131 132 - static int ql_get_serdes_regs(struct ql_adapter *qdev, 133 - struct ql_mpi_coredump *mpi_coredump) 132 + static int qlge_get_serdes_regs(struct qlge_adapter *qdev, 133 + struct qlge_mpi_coredump *mpi_coredump) 134 134 { 135 135 int status; 136 136 bool xfi_direct_valid = false, xfi_indirect_valid = false; ··· 140 140 u32 *indirect_ptr; 141 141 142 142 /* The XAUI needs to be read out per port */ 143 - status = ql_read_other_func_serdes_reg(qdev, 144 - XG_SERDES_XAUI_HSS_PCS_START, 145 - &temp); 143 + status = qlge_read_other_func_serdes_reg(qdev, 144 + XG_SERDES_XAUI_HSS_PCS_START, 145 + &temp); 146 146 if (status) 147 147 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; 148 148 ··· 150 150 XG_SERDES_ADDR_XAUI_PWR_DOWN) 151 151 xaui_indirect_valid = false; 152 152 153 - status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp); 153 + status = qlge_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp); 154 154 155 155 if (status) 156 156 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN; ··· 163 163 * XFI register is shared so only need to read one 164 164 * functions and then check the bits. 165 165 */ 166 - status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp); 166 + status = qlge_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp); 167 167 if (status) 168 168 temp = 0; 169 169 ··· 198 198 } 199 199 200 200 for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++) 201 - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, 202 - xaui_direct_valid, xaui_indirect_valid); 201 + qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, 202 + xaui_direct_valid, xaui_indirect_valid); 203 203 204 204 /* Get XAUI_HSS_PCS register block. */ 205 205 if (qdev->func & 1) { ··· 215 215 } 216 216 217 217 for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++) 218 - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, 219 - xaui_direct_valid, xaui_indirect_valid); 218 + qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, 219 + xaui_direct_valid, xaui_indirect_valid); 220 220 221 221 /* Get XAUI_XFI_AN register block. */ 222 222 if (qdev->func & 1) { ··· 228 228 } 229 229 230 230 for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++) 231 - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, 232 - xfi_direct_valid, xfi_indirect_valid); 231 + qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, 232 + xfi_direct_valid, xfi_indirect_valid); 233 233 234 234 /* Get XAUI_XFI_TRAIN register block. */ 235 235 if (qdev->func & 1) { ··· 243 243 } 244 244 245 245 for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++) 246 - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, 247 - xfi_direct_valid, xfi_indirect_valid); 246 + qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, 247 + xfi_direct_valid, xfi_indirect_valid); 248 248 249 249 /* Get XAUI_XFI_HSS_PCS register block. */ 250 250 if (qdev->func & 1) { ··· 260 260 } 261 261 262 262 for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++) 263 - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, 264 - xfi_direct_valid, xfi_indirect_valid); 263 + qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, 264 + xfi_direct_valid, xfi_indirect_valid); 265 265 266 266 /* Get XAUI_XFI_HSS_TX register block. */ 267 267 if (qdev->func & 1) { ··· 275 275 mpi_coredump->serdes2_xfi_hss_tx; 276 276 } 277 277 for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++) 278 - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, 279 - xfi_direct_valid, xfi_indirect_valid); 278 + qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, 279 + xfi_direct_valid, xfi_indirect_valid); 280 280 281 281 /* Get XAUI_XFI_HSS_RX register block. */ 282 282 if (qdev->func & 1) { ··· 291 291 } 292 292 293 293 for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++) 294 - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, 295 - xfi_direct_valid, xfi_indirect_valid); 294 + qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, 295 + xfi_direct_valid, xfi_indirect_valid); 296 296 297 297 /* Get XAUI_XFI_HSS_PLL register block. */ 298 298 if (qdev->func & 1) { ··· 307 307 mpi_coredump->serdes2_xfi_hss_pll; 308 308 } 309 309 for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++) 310 - ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, 311 - xfi_direct_valid, xfi_indirect_valid); 310 + qlge_get_both_serdes(qdev, i, direct_ptr, indirect_ptr, 311 + xfi_direct_valid, xfi_indirect_valid); 312 312 return 0; 313 313 } 314 314 315 - static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg, 316 - u32 *data) 315 + static int qlge_read_other_func_xgmac_reg(struct qlge_adapter *qdev, u32 reg, 316 + u32 *data) 317 317 { 318 318 int status = 0; 319 319 320 320 /* wait for reg to come ready */ 321 - status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, 322 - XGMAC_ADDR_RDY, XGMAC_ADDR_XME); 321 + status = qlge_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, 322 + XGMAC_ADDR_RDY, XGMAC_ADDR_XME); 323 323 if (status) 324 324 goto exit; 325 325 326 326 /* set up for reg read */ 327 - ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R); 327 + qlge_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R); 328 328 329 329 /* wait for reg to come ready */ 330 - status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, 331 - XGMAC_ADDR_RDY, XGMAC_ADDR_XME); 330 + status = qlge_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4, 331 + XGMAC_ADDR_RDY, XGMAC_ADDR_XME); 332 332 if (status) 333 333 goto exit; 334 334 335 335 /* get the data */ 336 - *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4); 336 + *data = qlge_read_other_func_reg(qdev, XGMAC_DATA / 4); 337 337 exit: 338 338 return status; 339 339 } ··· 341 341 /* Read the 400 xgmac control/statistics registers 342 342 * skipping unused locations. 343 343 */ 344 - static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf, 345 - unsigned int other_function) 344 + static int qlge_get_xgmac_regs(struct qlge_adapter *qdev, u32 *buf, 345 + unsigned int other_function) 346 346 { 347 347 int status = 0; 348 348 int i; ··· 370 370 (i > 0x000005c8 && i < 0x00000600)) { 371 371 if (other_function) 372 372 status = 373 - ql_read_other_func_xgmac_reg(qdev, i, buf); 373 + qlge_read_other_func_xgmac_reg(qdev, i, buf); 374 374 else 375 - status = ql_read_xgmac_reg(qdev, i, buf); 375 + status = qlge_read_xgmac_reg(qdev, i, buf); 376 376 377 377 if (status) 378 378 *buf = 0xdeadbeef; ··· 382 382 return status; 383 383 } 384 384 385 - static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf) 385 + static int qlge_get_ets_regs(struct qlge_adapter *qdev, u32 *buf) 386 386 { 387 387 int i; 388 388 389 389 for (i = 0; i < 8; i++, buf++) { 390 - ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000); 391 - *buf = ql_read32(qdev, NIC_ETS); 390 + qlge_write32(qdev, NIC_ETS, i << 29 | 0x08000000); 391 + *buf = qlge_read32(qdev, NIC_ETS); 392 392 } 393 393 394 394 for (i = 0; i < 2; i++, buf++) { 395 - ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000); 396 - *buf = ql_read32(qdev, CNA_ETS); 395 + qlge_write32(qdev, CNA_ETS, i << 29 | 0x08000000); 396 + *buf = qlge_read32(qdev, CNA_ETS); 397 397 } 398 398 399 399 return 0; 400 400 } 401 401 402 - static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf) 402 + static void qlge_get_intr_states(struct qlge_adapter *qdev, u32 *buf) 403 403 { 404 404 int i; 405 405 406 406 for (i = 0; i < qdev->rx_ring_count; i++, buf++) { 407 - ql_write32(qdev, INTR_EN, 408 - qdev->intr_context[i].intr_read_mask); 409 - *buf = ql_read32(qdev, INTR_EN); 407 + qlge_write32(qdev, INTR_EN, 408 + qdev->intr_context[i].intr_read_mask); 409 + *buf = qlge_read32(qdev, INTR_EN); 410 410 } 411 411 } 412 412 413 - static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf) 413 + static int qlge_get_cam_entries(struct qlge_adapter *qdev, u32 *buf) 414 414 { 415 415 int i, status; 416 416 u32 value[3]; 417 417 418 - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 418 + status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 419 419 if (status) 420 420 return status; 421 421 422 422 for (i = 0; i < 16; i++) { 423 - status = ql_get_mac_addr_reg(qdev, 424 - MAC_ADDR_TYPE_CAM_MAC, i, value); 423 + status = qlge_get_mac_addr_reg(qdev, 424 + MAC_ADDR_TYPE_CAM_MAC, i, value); 425 425 if (status) { 426 426 netif_err(qdev, drv, qdev->ndev, 427 427 "Failed read of mac index register\n"); ··· 432 432 *buf++ = value[2]; /* output */ 433 433 } 434 434 for (i = 0; i < 32; i++) { 435 - status = ql_get_mac_addr_reg(qdev, 436 - MAC_ADDR_TYPE_MULTI_MAC, i, value); 435 + status = qlge_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_MULTI_MAC, 436 + i, value); 437 437 if (status) { 438 438 netif_err(qdev, drv, qdev->ndev, 439 439 "Failed read of mac index register\n"); ··· 443 443 *buf++ = value[1]; /* upper Mcast address */ 444 444 } 445 445 err: 446 - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 446 + qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 447 447 return status; 448 448 } 449 449 450 - static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf) 450 + static int qlge_get_routing_entries(struct qlge_adapter *qdev, u32 *buf) 451 451 { 452 452 int status; 453 453 u32 value, i; 454 454 455 - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); 455 + status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK); 456 456 if (status) 457 457 return status; 458 458 459 459 for (i = 0; i < 16; i++) { 460 - status = ql_get_routing_reg(qdev, i, &value); 460 + status = qlge_get_routing_reg(qdev, i, &value); 461 461 if (status) { 462 462 netif_err(qdev, drv, qdev->ndev, 463 463 "Failed read of routing index register\n"); ··· 467 467 } 468 468 } 469 469 err: 470 - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); 470 + qlge_sem_unlock(qdev, SEM_RT_IDX_MASK); 471 471 return status; 472 472 } 473 473 474 474 /* Read the MPI Processor shadow registers */ 475 - static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf) 475 + static int qlge_get_mpi_shadow_regs(struct qlge_adapter *qdev, u32 *buf) 476 476 { 477 477 u32 i; 478 478 int status; 479 479 480 480 for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) { 481 - status = ql_write_mpi_reg(qdev, 482 - RISC_124, 483 - (SHADOW_OFFSET | i << SHADOW_REG_SHIFT)); 481 + status = qlge_write_mpi_reg(qdev, 482 + RISC_124, 483 + (SHADOW_OFFSET | i << SHADOW_REG_SHIFT)); 484 484 if (status) 485 485 goto end; 486 - status = ql_read_mpi_reg(qdev, RISC_127, buf); 486 + status = qlge_read_mpi_reg(qdev, RISC_127, buf); 487 487 if (status) 488 488 goto end; 489 489 } ··· 492 492 } 493 493 494 494 /* Read the MPI Processor core registers */ 495 - static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf, 496 - u32 offset, u32 count) 495 + static int qlge_get_mpi_regs(struct qlge_adapter *qdev, u32 *buf, 496 + u32 offset, u32 count) 497 497 { 498 498 int i, status = 0; 499 499 500 500 for (i = 0; i < count; i++, buf++) { 501 - status = ql_read_mpi_reg(qdev, offset + i, buf); 501 + status = qlge_read_mpi_reg(qdev, offset + i, buf); 502 502 if (status) 503 503 return status; 504 504 } ··· 506 506 } 507 507 508 508 /* Read the ASIC probe dump */ 509 - static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock, 510 - u32 valid, u32 *buf) 509 + static unsigned int *qlge_get_probe(struct qlge_adapter *qdev, u32 clock, 510 + u32 valid, u32 *buf) 511 511 { 512 512 u32 module, mux_sel, probe, lo_val, hi_val; 513 513 ··· 519 519 | PRB_MX_ADDR_ARE 520 520 | mux_sel 521 521 | (module << PRB_MX_ADDR_MOD_SEL_SHIFT); 522 - ql_write32(qdev, PRB_MX_ADDR, probe); 523 - lo_val = ql_read32(qdev, PRB_MX_DATA); 522 + qlge_write32(qdev, PRB_MX_ADDR, probe); 523 + lo_val = qlge_read32(qdev, PRB_MX_DATA); 524 524 if (mux_sel == 0) { 525 525 *buf = probe; 526 526 buf++; 527 527 } 528 528 probe |= PRB_MX_ADDR_UP; 529 - ql_write32(qdev, PRB_MX_ADDR, probe); 530 - hi_val = ql_read32(qdev, PRB_MX_DATA); 529 + qlge_write32(qdev, PRB_MX_ADDR, probe); 530 + hi_val = qlge_read32(qdev, PRB_MX_DATA); 531 531 *buf = lo_val; 532 532 buf++; 533 533 *buf = hi_val; ··· 537 537 return buf; 538 538 } 539 539 540 - static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf) 540 + static int qlge_get_probe_dump(struct qlge_adapter *qdev, unsigned int *buf) 541 541 { 542 542 /* First we have to enable the probe mux */ 543 - ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN); 544 - buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK, 545 - PRB_MX_ADDR_VALID_SYS_MOD, buf); 546 - buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK, 547 - PRB_MX_ADDR_VALID_PCI_MOD, buf); 548 - buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK, 549 - PRB_MX_ADDR_VALID_XGM_MOD, buf); 550 - buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK, 551 - PRB_MX_ADDR_VALID_FC_MOD, buf); 543 + qlge_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN); 544 + buf = qlge_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK, 545 + PRB_MX_ADDR_VALID_SYS_MOD, buf); 546 + buf = qlge_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK, 547 + PRB_MX_ADDR_VALID_PCI_MOD, buf); 548 + buf = qlge_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK, 549 + PRB_MX_ADDR_VALID_XGM_MOD, buf); 550 + buf = qlge_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK, 551 + PRB_MX_ADDR_VALID_FC_MOD, buf); 552 552 return 0; 553 553 } 554 554 555 555 /* Read out the routing index registers */ 556 - static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf) 556 + static int qlge_get_routing_index_registers(struct qlge_adapter *qdev, u32 *buf) 557 557 { 558 558 int status; 559 559 u32 type, index, index_max; ··· 561 561 u32 result_data; 562 562 u32 val; 563 563 564 - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); 564 + status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK); 565 565 if (status) 566 566 return status; 567 567 ··· 574 574 val = RT_IDX_RS 575 575 | (type << RT_IDX_TYPE_SHIFT) 576 576 | (index << RT_IDX_IDX_SHIFT); 577 - ql_write32(qdev, RT_IDX, val); 577 + qlge_write32(qdev, RT_IDX, val); 578 578 result_index = 0; 579 579 while ((result_index & RT_IDX_MR) == 0) 580 - result_index = ql_read32(qdev, RT_IDX); 581 - result_data = ql_read32(qdev, RT_DATA); 580 + result_index = qlge_read32(qdev, RT_IDX); 581 + result_data = qlge_read32(qdev, RT_DATA); 582 582 *buf = type; 583 583 buf++; 584 584 *buf = index; ··· 589 589 buf++; 590 590 } 591 591 } 592 - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); 592 + qlge_sem_unlock(qdev, SEM_RT_IDX_MASK); 593 593 return status; 594 594 } 595 595 596 596 /* Read out the MAC protocol registers */ 597 - static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf) 597 + static void qlge_get_mac_protocol_registers(struct qlge_adapter *qdev, u32 *buf) 598 598 { 599 599 u32 result_index, result_data; 600 600 u32 type; ··· 657 657 | (type << MAC_ADDR_TYPE_SHIFT) 658 658 | (index << MAC_ADDR_IDX_SHIFT) 659 659 | (offset); 660 - ql_write32(qdev, MAC_ADDR_IDX, val); 660 + qlge_write32(qdev, MAC_ADDR_IDX, val); 661 661 result_index = 0; 662 662 while ((result_index & MAC_ADDR_MR) == 0) { 663 - result_index = ql_read32(qdev, 664 - MAC_ADDR_IDX); 663 + result_index = qlge_read32(qdev, 664 + MAC_ADDR_IDX); 665 665 } 666 - result_data = ql_read32(qdev, MAC_ADDR_DATA); 666 + result_data = qlge_read32(qdev, MAC_ADDR_DATA); 667 667 *buf = result_index; 668 668 buf++; 669 669 *buf = result_data; ··· 673 673 } 674 674 } 675 675 676 - static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf) 676 + static void qlge_get_sem_registers(struct qlge_adapter *qdev, u32 *buf) 677 677 { 678 678 u32 func_num, reg, reg_val; 679 679 int status; ··· 682 682 reg = MPI_NIC_REG_BLOCK 683 683 | (func_num << MPI_NIC_FUNCTION_SHIFT) 684 684 | (SEM / 4); 685 - status = ql_read_mpi_reg(qdev, reg, &reg_val); 685 + status = qlge_read_mpi_reg(qdev, reg, &reg_val); 686 686 *buf = reg_val; 687 687 /* if the read failed then dead fill the element. */ 688 688 if (!status) ··· 692 692 } 693 693 694 694 /* Create a coredump segment header */ 695 - static void ql_build_coredump_seg_header( 696 - struct mpi_coredump_segment_header *seg_hdr, 697 - u32 seg_number, u32 seg_size, u8 *desc) 695 + static void qlge_build_coredump_seg_header(struct mpi_coredump_segment_header *seg_hdr, 696 + u32 seg_number, u32 seg_size, u8 *desc) 698 697 { 699 698 memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header)); 700 699 seg_hdr->cookie = MPI_COREDUMP_COOKIE; ··· 709 710 * space for this function as well as a coredump structure that 710 711 * will contain the dump. 711 712 */ 712 - int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) 713 + int qlge_core_dump(struct qlge_adapter *qdev, struct qlge_mpi_coredump *mpi_coredump) 713 714 { 714 715 int status; 715 716 int i; ··· 723 724 * it isn't available. If the firmware died it 724 725 * might be holding the sem. 725 726 */ 726 - ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); 727 + qlge_sem_spinlock(qdev, SEM_PROC_REG_MASK); 727 728 728 - status = ql_pause_mpi_risc(qdev); 729 + status = qlge_pause_mpi_risc(qdev); 729 730 if (status) { 730 731 netif_err(qdev, drv, qdev->ndev, 731 732 "Failed RISC pause. Status = 0x%.08x\n", status); ··· 739 740 mpi_coredump->mpi_global_header.header_size = 740 741 sizeof(struct mpi_coredump_global_header); 741 742 mpi_coredump->mpi_global_header.image_size = 742 - sizeof(struct ql_mpi_coredump); 743 + sizeof(struct qlge_mpi_coredump); 743 744 strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump", 744 745 sizeof(mpi_coredump->mpi_global_header.id_string)); 745 746 746 747 /* Get generic NIC reg dump */ 747 - ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, 748 - NIC1_CONTROL_SEG_NUM, 749 - sizeof(struct mpi_coredump_segment_header) + 750 - sizeof(mpi_coredump->nic_regs), "NIC1 Registers"); 748 + qlge_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, 749 + NIC1_CONTROL_SEG_NUM, 750 + sizeof(struct mpi_coredump_segment_header) + 751 + sizeof(mpi_coredump->nic_regs), "NIC1 Registers"); 751 752 752 - ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr, 753 - NIC2_CONTROL_SEG_NUM, 754 - sizeof(struct mpi_coredump_segment_header) + 755 - sizeof(mpi_coredump->nic2_regs), "NIC2 Registers"); 753 + qlge_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr, 754 + NIC2_CONTROL_SEG_NUM, 755 + sizeof(struct mpi_coredump_segment_header) + 756 + sizeof(mpi_coredump->nic2_regs), "NIC2 Registers"); 756 757 757 758 /* Get XGMac registers. (Segment 18, Rev C. step 21) */ 758 - ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr, 759 - NIC1_XGMAC_SEG_NUM, 760 - sizeof(struct mpi_coredump_segment_header) + 761 - sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers"); 759 + qlge_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr, 760 + NIC1_XGMAC_SEG_NUM, 761 + sizeof(struct mpi_coredump_segment_header) + 762 + sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers"); 762 763 763 - ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr, 764 - NIC2_XGMAC_SEG_NUM, 765 - sizeof(struct mpi_coredump_segment_header) + 766 - sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers"); 764 + qlge_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr, 765 + NIC2_XGMAC_SEG_NUM, 766 + sizeof(struct mpi_coredump_segment_header) + 767 + sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers"); 767 768 768 769 if (qdev->func & 1) { 769 770 /* Odd means our function is NIC 2 */ 770 771 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) 771 772 mpi_coredump->nic2_regs[i] = 772 - ql_read32(qdev, i * sizeof(u32)); 773 + qlge_read32(qdev, i * sizeof(u32)); 773 774 774 775 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) 775 776 mpi_coredump->nic_regs[i] = 776 - ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); 777 + qlge_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); 777 778 778 - ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0); 779 - ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1); 779 + qlge_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0); 780 + qlge_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1); 780 781 } else { 781 782 /* Even means our function is NIC 1 */ 782 783 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) 783 784 mpi_coredump->nic_regs[i] = 784 - ql_read32(qdev, i * sizeof(u32)); 785 + qlge_read32(qdev, i * sizeof(u32)); 785 786 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++) 786 787 mpi_coredump->nic2_regs[i] = 787 - ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); 788 + qlge_read_other_func_reg(qdev, (i * sizeof(u32)) / 4); 788 789 789 - ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0); 790 - ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1); 790 + qlge_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0); 791 + qlge_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1); 791 792 } 792 793 793 794 /* Rev C. Step 20a */ 794 - ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr, 795 - XAUI_AN_SEG_NUM, 796 - sizeof(struct mpi_coredump_segment_header) + 797 - sizeof(mpi_coredump->serdes_xaui_an), 798 - "XAUI AN Registers"); 795 + qlge_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr, 796 + XAUI_AN_SEG_NUM, 797 + sizeof(struct mpi_coredump_segment_header) + 798 + sizeof(mpi_coredump->serdes_xaui_an), 799 + "XAUI AN Registers"); 799 800 800 801 /* Rev C. Step 20b */ 801 - ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr, 802 - XAUI_HSS_PCS_SEG_NUM, 803 - sizeof(struct mpi_coredump_segment_header) + 804 - sizeof(mpi_coredump->serdes_xaui_hss_pcs), 805 - "XAUI HSS PCS Registers"); 802 + qlge_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr, 803 + XAUI_HSS_PCS_SEG_NUM, 804 + sizeof(struct mpi_coredump_segment_header) + 805 + sizeof(mpi_coredump->serdes_xaui_hss_pcs), 806 + "XAUI HSS PCS Registers"); 806 807 807 - ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM, 808 - sizeof(struct mpi_coredump_segment_header) + 809 - sizeof(mpi_coredump->serdes_xfi_an), 810 - "XFI AN Registers"); 808 + qlge_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM, 809 + sizeof(struct mpi_coredump_segment_header) + 810 + sizeof(mpi_coredump->serdes_xfi_an), 811 + "XFI AN Registers"); 811 812 812 - ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr, 813 - XFI_TRAIN_SEG_NUM, 814 - sizeof(struct mpi_coredump_segment_header) + 815 - sizeof(mpi_coredump->serdes_xfi_train), 816 - "XFI TRAIN Registers"); 813 + qlge_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr, 814 + XFI_TRAIN_SEG_NUM, 815 + sizeof(struct mpi_coredump_segment_header) + 816 + sizeof(mpi_coredump->serdes_xfi_train), 817 + "XFI TRAIN Registers"); 817 818 818 - ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr, 819 - XFI_HSS_PCS_SEG_NUM, 820 - sizeof(struct mpi_coredump_segment_header) + 821 - sizeof(mpi_coredump->serdes_xfi_hss_pcs), 822 - "XFI HSS PCS Registers"); 819 + qlge_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr, 820 + XFI_HSS_PCS_SEG_NUM, 821 + sizeof(struct mpi_coredump_segment_header) + 822 + sizeof(mpi_coredump->serdes_xfi_hss_pcs), 823 + "XFI HSS PCS Registers"); 823 824 824 - ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr, 825 - XFI_HSS_TX_SEG_NUM, 826 - sizeof(struct mpi_coredump_segment_header) + 827 - sizeof(mpi_coredump->serdes_xfi_hss_tx), 828 - "XFI HSS TX Registers"); 825 + qlge_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr, 826 + XFI_HSS_TX_SEG_NUM, 827 + sizeof(struct mpi_coredump_segment_header) + 828 + sizeof(mpi_coredump->serdes_xfi_hss_tx), 829 + "XFI HSS TX Registers"); 829 830 830 - ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr, 831 - XFI_HSS_RX_SEG_NUM, 832 - sizeof(struct mpi_coredump_segment_header) + 833 - sizeof(mpi_coredump->serdes_xfi_hss_rx), 834 - "XFI HSS RX Registers"); 831 + qlge_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr, 832 + XFI_HSS_RX_SEG_NUM, 833 + sizeof(struct mpi_coredump_segment_header) + 834 + sizeof(mpi_coredump->serdes_xfi_hss_rx), 835 + "XFI HSS RX Registers"); 835 836 836 - ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr, 837 - XFI_HSS_PLL_SEG_NUM, 838 - sizeof(struct mpi_coredump_segment_header) + 839 - sizeof(mpi_coredump->serdes_xfi_hss_pll), 840 - "XFI HSS PLL Registers"); 837 + qlge_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr, 838 + XFI_HSS_PLL_SEG_NUM, 839 + sizeof(struct mpi_coredump_segment_header) + 840 + sizeof(mpi_coredump->serdes_xfi_hss_pll), 841 + "XFI HSS PLL Registers"); 841 842 842 - ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr, 843 - XAUI2_AN_SEG_NUM, 844 - sizeof(struct mpi_coredump_segment_header) + 845 - sizeof(mpi_coredump->serdes2_xaui_an), 846 - "XAUI2 AN Registers"); 843 + qlge_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr, 844 + XAUI2_AN_SEG_NUM, 845 + sizeof(struct mpi_coredump_segment_header) + 846 + sizeof(mpi_coredump->serdes2_xaui_an), 847 + "XAUI2 AN Registers"); 847 848 848 - ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr, 849 - XAUI2_HSS_PCS_SEG_NUM, 850 - sizeof(struct mpi_coredump_segment_header) + 851 - sizeof(mpi_coredump->serdes2_xaui_hss_pcs), 852 - "XAUI2 HSS PCS Registers"); 849 + qlge_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr, 850 + XAUI2_HSS_PCS_SEG_NUM, 851 + sizeof(struct mpi_coredump_segment_header) + 852 + sizeof(mpi_coredump->serdes2_xaui_hss_pcs), 853 + "XAUI2 HSS PCS Registers"); 853 854 854 - ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr, 855 - XFI2_AN_SEG_NUM, 856 - sizeof(struct mpi_coredump_segment_header) + 857 - sizeof(mpi_coredump->serdes2_xfi_an), 858 - "XFI2 AN Registers"); 855 + qlge_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr, 856 + XFI2_AN_SEG_NUM, 857 + sizeof(struct mpi_coredump_segment_header) + 858 + sizeof(mpi_coredump->serdes2_xfi_an), 859 + "XFI2 AN Registers"); 859 860 860 - ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr, 861 - XFI2_TRAIN_SEG_NUM, 862 - sizeof(struct mpi_coredump_segment_header) + 863 - sizeof(mpi_coredump->serdes2_xfi_train), 864 - "XFI2 TRAIN Registers"); 861 + qlge_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr, 862 + XFI2_TRAIN_SEG_NUM, 863 + sizeof(struct mpi_coredump_segment_header) + 864 + sizeof(mpi_coredump->serdes2_xfi_train), 865 + "XFI2 TRAIN Registers"); 865 866 866 - ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr, 867 - XFI2_HSS_PCS_SEG_NUM, 868 - sizeof(struct mpi_coredump_segment_header) + 869 - sizeof(mpi_coredump->serdes2_xfi_hss_pcs), 870 - "XFI2 HSS PCS Registers"); 867 + qlge_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr, 868 + XFI2_HSS_PCS_SEG_NUM, 869 + sizeof(struct mpi_coredump_segment_header) + 870 + sizeof(mpi_coredump->serdes2_xfi_hss_pcs), 871 + "XFI2 HSS PCS Registers"); 871 872 872 - ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr, 873 - XFI2_HSS_TX_SEG_NUM, 874 - sizeof(struct mpi_coredump_segment_header) + 875 - sizeof(mpi_coredump->serdes2_xfi_hss_tx), 876 - "XFI2 HSS TX Registers"); 873 + qlge_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr, 874 + XFI2_HSS_TX_SEG_NUM, 875 + sizeof(struct mpi_coredump_segment_header) + 876 + sizeof(mpi_coredump->serdes2_xfi_hss_tx), 877 + "XFI2 HSS TX Registers"); 877 878 878 - ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr, 879 - XFI2_HSS_RX_SEG_NUM, 880 - sizeof(struct mpi_coredump_segment_header) + 881 - sizeof(mpi_coredump->serdes2_xfi_hss_rx), 882 - "XFI2 HSS RX Registers"); 879 + qlge_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr, 880 + XFI2_HSS_RX_SEG_NUM, 881 + sizeof(struct mpi_coredump_segment_header) + 882 + sizeof(mpi_coredump->serdes2_xfi_hss_rx), 883 + "XFI2 HSS RX Registers"); 883 884 884 - ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr, 885 - XFI2_HSS_PLL_SEG_NUM, 886 - sizeof(struct mpi_coredump_segment_header) + 887 - sizeof(mpi_coredump->serdes2_xfi_hss_pll), 888 - "XFI2 HSS PLL Registers"); 885 + qlge_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr, 886 + XFI2_HSS_PLL_SEG_NUM, 887 + sizeof(struct mpi_coredump_segment_header) + 888 + sizeof(mpi_coredump->serdes2_xfi_hss_pll), 889 + "XFI2 HSS PLL Registers"); 889 890 890 - status = ql_get_serdes_regs(qdev, mpi_coredump); 891 + status = qlge_get_serdes_regs(qdev, mpi_coredump); 891 892 if (status) { 892 893 netif_err(qdev, drv, qdev->ndev, 893 894 "Failed Dump of Serdes Registers. Status = 0x%.08x\n", ··· 895 896 goto err; 896 897 } 897 898 898 - ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr, 899 - CORE_SEG_NUM, 900 - sizeof(mpi_coredump->core_regs_seg_hdr) + 901 - sizeof(mpi_coredump->mpi_core_regs) + 902 - sizeof(mpi_coredump->mpi_core_sh_regs), 903 - "Core Registers"); 899 + qlge_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr, 900 + CORE_SEG_NUM, 901 + sizeof(mpi_coredump->core_regs_seg_hdr) + 902 + sizeof(mpi_coredump->mpi_core_regs) + 903 + sizeof(mpi_coredump->mpi_core_sh_regs), 904 + "Core Registers"); 904 905 905 906 /* Get the MPI Core Registers */ 906 - status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0], 907 - MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT); 907 + status = qlge_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0], 908 + MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT); 908 909 if (status) 909 910 goto err; 910 911 /* Get the 16 MPI shadow registers */ 911 - status = ql_get_mpi_shadow_regs(qdev, 912 - &mpi_coredump->mpi_core_sh_regs[0]); 912 + status = qlge_get_mpi_shadow_regs(qdev, 913 + &mpi_coredump->mpi_core_sh_regs[0]); 913 914 if (status) 914 915 goto err; 915 916 916 917 /* Get the Test Logic Registers */ 917 - ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr, 918 - TEST_LOGIC_SEG_NUM, 919 - sizeof(struct mpi_coredump_segment_header) 920 - + sizeof(mpi_coredump->test_logic_regs), 921 - "Test Logic Regs"); 922 - status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0], 923 - TEST_REGS_ADDR, TEST_REGS_CNT); 918 + qlge_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr, 919 + TEST_LOGIC_SEG_NUM, 920 + sizeof(struct mpi_coredump_segment_header) 921 + + sizeof(mpi_coredump->test_logic_regs), 922 + "Test Logic Regs"); 923 + status = qlge_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0], 924 + TEST_REGS_ADDR, TEST_REGS_CNT); 924 925 if (status) 925 926 goto err; 926 927 927 928 /* Get the RMII Registers */ 928 - ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr, 929 - RMII_SEG_NUM, 930 - sizeof(struct mpi_coredump_segment_header) 931 - + sizeof(mpi_coredump->rmii_regs), 932 - "RMII Registers"); 933 - status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0], 934 - RMII_REGS_ADDR, RMII_REGS_CNT); 929 + qlge_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr, 930 + RMII_SEG_NUM, 931 + sizeof(struct mpi_coredump_segment_header) 932 + + sizeof(mpi_coredump->rmii_regs), 933 + "RMII Registers"); 934 + status = qlge_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0], 935 + RMII_REGS_ADDR, RMII_REGS_CNT); 935 936 if (status) 936 937 goto err; 937 938 938 939 /* Get the FCMAC1 Registers */ 939 - ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr, 940 - FCMAC1_SEG_NUM, 941 - sizeof(struct mpi_coredump_segment_header) 942 - + sizeof(mpi_coredump->fcmac1_regs), 943 - "FCMAC1 Registers"); 944 - status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0], 945 - FCMAC1_REGS_ADDR, FCMAC_REGS_CNT); 940 + qlge_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr, 941 + FCMAC1_SEG_NUM, 942 + sizeof(struct mpi_coredump_segment_header) 943 + + sizeof(mpi_coredump->fcmac1_regs), 944 + "FCMAC1 Registers"); 945 + status = qlge_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0], 946 + FCMAC1_REGS_ADDR, FCMAC_REGS_CNT); 946 947 if (status) 947 948 goto err; 948 949 949 950 /* Get the FCMAC2 Registers */ 950 951 951 - ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr, 952 - FCMAC2_SEG_NUM, 953 - sizeof(struct mpi_coredump_segment_header) 954 - + sizeof(mpi_coredump->fcmac2_regs), 955 - "FCMAC2 Registers"); 952 + qlge_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr, 953 + FCMAC2_SEG_NUM, 954 + sizeof(struct mpi_coredump_segment_header) 955 + + sizeof(mpi_coredump->fcmac2_regs), 956 + "FCMAC2 Registers"); 956 957 957 - status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0], 958 - FCMAC2_REGS_ADDR, FCMAC_REGS_CNT); 958 + status = qlge_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0], 959 + FCMAC2_REGS_ADDR, FCMAC_REGS_CNT); 959 960 if (status) 960 961 goto err; 961 962 962 963 /* Get the FC1 MBX Registers */ 963 - ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr, 964 - FC1_MBOX_SEG_NUM, 965 - sizeof(struct mpi_coredump_segment_header) 966 - + sizeof(mpi_coredump->fc1_mbx_regs), 967 - "FC1 MBox Regs"); 968 - status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0], 969 - FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT); 964 + qlge_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr, 965 + FC1_MBOX_SEG_NUM, 966 + sizeof(struct mpi_coredump_segment_header) 967 + + sizeof(mpi_coredump->fc1_mbx_regs), 968 + "FC1 MBox Regs"); 969 + status = qlge_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0], 970 + FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT); 970 971 if (status) 971 972 goto err; 972 973 973 974 /* Get the IDE Registers */ 974 - ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr, 975 - IDE_SEG_NUM, 976 - sizeof(struct mpi_coredump_segment_header) 977 - + sizeof(mpi_coredump->ide_regs), 978 - "IDE Registers"); 979 - status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0], 980 - IDE_REGS_ADDR, IDE_REGS_CNT); 975 + qlge_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr, 976 + IDE_SEG_NUM, 977 + sizeof(struct mpi_coredump_segment_header) 978 + + sizeof(mpi_coredump->ide_regs), 979 + "IDE Registers"); 980 + status = qlge_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0], 981 + IDE_REGS_ADDR, IDE_REGS_CNT); 981 982 if (status) 982 983 goto err; 983 984 984 985 /* Get the NIC1 MBX Registers */ 985 - ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr, 986 - NIC1_MBOX_SEG_NUM, 987 - sizeof(struct mpi_coredump_segment_header) 988 - + sizeof(mpi_coredump->nic1_mbx_regs), 989 - "NIC1 MBox Regs"); 990 - status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0], 991 - NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); 986 + qlge_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr, 987 + NIC1_MBOX_SEG_NUM, 988 + sizeof(struct mpi_coredump_segment_header) 989 + + sizeof(mpi_coredump->nic1_mbx_regs), 990 + "NIC1 MBox Regs"); 991 + status = qlge_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0], 992 + NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); 992 993 if (status) 993 994 goto err; 994 995 995 996 /* Get the SMBus Registers */ 996 - ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr, 997 - SMBUS_SEG_NUM, 998 - sizeof(struct mpi_coredump_segment_header) 999 - + sizeof(mpi_coredump->smbus_regs), 1000 - "SMBus Registers"); 1001 - status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0], 1002 - SMBUS_REGS_ADDR, SMBUS_REGS_CNT); 997 + qlge_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr, 998 + SMBUS_SEG_NUM, 999 + sizeof(struct mpi_coredump_segment_header) 1000 + + sizeof(mpi_coredump->smbus_regs), 1001 + "SMBus Registers"); 1002 + status = qlge_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0], 1003 + SMBUS_REGS_ADDR, SMBUS_REGS_CNT); 1003 1004 if (status) 1004 1005 goto err; 1005 1006 1006 1007 /* Get the FC2 MBX Registers */ 1007 - ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr, 1008 - FC2_MBOX_SEG_NUM, 1009 - sizeof(struct mpi_coredump_segment_header) 1010 - + sizeof(mpi_coredump->fc2_mbx_regs), 1011 - "FC2 MBox Regs"); 1012 - status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0], 1013 - FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT); 1008 + qlge_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr, 1009 + FC2_MBOX_SEG_NUM, 1010 + sizeof(struct mpi_coredump_segment_header) 1011 + + sizeof(mpi_coredump->fc2_mbx_regs), 1012 + "FC2 MBox Regs"); 1013 + status = qlge_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0], 1014 + FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT); 1014 1015 if (status) 1015 1016 goto err; 1016 1017 1017 1018 /* Get the NIC2 MBX Registers */ 1018 - ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr, 1019 - NIC2_MBOX_SEG_NUM, 1020 - sizeof(struct mpi_coredump_segment_header) 1021 - + sizeof(mpi_coredump->nic2_mbx_regs), 1022 - "NIC2 MBox Regs"); 1023 - status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0], 1024 - NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); 1019 + qlge_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr, 1020 + NIC2_MBOX_SEG_NUM, 1021 + sizeof(struct mpi_coredump_segment_header) 1022 + + sizeof(mpi_coredump->nic2_mbx_regs), 1023 + "NIC2 MBox Regs"); 1024 + status = qlge_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0], 1025 + NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT); 1025 1026 if (status) 1026 1027 goto err; 1027 1028 1028 1029 /* Get the I2C Registers */ 1029 - ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr, 1030 - I2C_SEG_NUM, 1031 - sizeof(struct mpi_coredump_segment_header) 1032 - + sizeof(mpi_coredump->i2c_regs), 1033 - "I2C Registers"); 1034 - status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0], 1035 - I2C_REGS_ADDR, I2C_REGS_CNT); 1030 + qlge_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr, 1031 + I2C_SEG_NUM, 1032 + sizeof(struct mpi_coredump_segment_header) 1033 + + sizeof(mpi_coredump->i2c_regs), 1034 + "I2C Registers"); 1035 + status = qlge_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0], 1036 + I2C_REGS_ADDR, I2C_REGS_CNT); 1036 1037 if (status) 1037 1038 goto err; 1038 1039 1039 1040 /* Get the MEMC Registers */ 1040 - ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr, 1041 - MEMC_SEG_NUM, 1042 - sizeof(struct mpi_coredump_segment_header) 1043 - + sizeof(mpi_coredump->memc_regs), 1044 - "MEMC Registers"); 1045 - status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0], 1046 - MEMC_REGS_ADDR, MEMC_REGS_CNT); 1041 + qlge_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr, 1042 + MEMC_SEG_NUM, 1043 + sizeof(struct mpi_coredump_segment_header) 1044 + + sizeof(mpi_coredump->memc_regs), 1045 + "MEMC Registers"); 1046 + status = qlge_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0], 1047 + MEMC_REGS_ADDR, MEMC_REGS_CNT); 1047 1048 if (status) 1048 1049 goto err; 1049 1050 1050 1051 /* Get the PBus Registers */ 1051 - ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr, 1052 - PBUS_SEG_NUM, 1053 - sizeof(struct mpi_coredump_segment_header) 1054 - + sizeof(mpi_coredump->pbus_regs), 1055 - "PBUS Registers"); 1056 - status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0], 1057 - PBUS_REGS_ADDR, PBUS_REGS_CNT); 1052 + qlge_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr, 1053 + PBUS_SEG_NUM, 1054 + sizeof(struct mpi_coredump_segment_header) 1055 + + sizeof(mpi_coredump->pbus_regs), 1056 + "PBUS Registers"); 1057 + status = qlge_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0], 1058 + PBUS_REGS_ADDR, PBUS_REGS_CNT); 1058 1059 if (status) 1059 1060 goto err; 1060 1061 1061 1062 /* Get the MDE Registers */ 1062 - ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr, 1063 - MDE_SEG_NUM, 1064 - sizeof(struct mpi_coredump_segment_header) 1065 - + sizeof(mpi_coredump->mde_regs), 1066 - "MDE Registers"); 1067 - status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0], 1068 - MDE_REGS_ADDR, MDE_REGS_CNT); 1063 + qlge_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr, 1064 + MDE_SEG_NUM, 1065 + sizeof(struct mpi_coredump_segment_header) 1066 + + sizeof(mpi_coredump->mde_regs), 1067 + "MDE Registers"); 1068 + status = qlge_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0], 1069 + MDE_REGS_ADDR, MDE_REGS_CNT); 1069 1070 if (status) 1070 1071 goto err; 1071 1072 1072 - ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, 1073 - MISC_NIC_INFO_SEG_NUM, 1074 - sizeof(struct mpi_coredump_segment_header) 1075 - + sizeof(mpi_coredump->misc_nic_info), 1076 - "MISC NIC INFO"); 1073 + qlge_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, 1074 + MISC_NIC_INFO_SEG_NUM, 1075 + sizeof(struct mpi_coredump_segment_header) 1076 + + sizeof(mpi_coredump->misc_nic_info), 1077 + "MISC NIC INFO"); 1077 1078 mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count; 1078 1079 mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count; 1079 1080 mpi_coredump->misc_nic_info.intr_count = qdev->intr_count; ··· 1081 1082 1082 1083 /* Segment 31 */ 1083 1084 /* Get indexed register values. */ 1084 - ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, 1085 - INTR_STATES_SEG_NUM, 1086 - sizeof(struct mpi_coredump_segment_header) 1087 - + sizeof(mpi_coredump->intr_states), 1088 - "INTR States"); 1089 - ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); 1085 + qlge_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, 1086 + INTR_STATES_SEG_NUM, 1087 + sizeof(struct mpi_coredump_segment_header) 1088 + + sizeof(mpi_coredump->intr_states), 1089 + "INTR States"); 1090 + qlge_get_intr_states(qdev, &mpi_coredump->intr_states[0]); 1090 1091 1091 - ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, 1092 - CAM_ENTRIES_SEG_NUM, 1093 - sizeof(struct mpi_coredump_segment_header) 1094 - + sizeof(mpi_coredump->cam_entries), 1095 - "CAM Entries"); 1096 - status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); 1092 + qlge_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, 1093 + CAM_ENTRIES_SEG_NUM, 1094 + sizeof(struct mpi_coredump_segment_header) 1095 + + sizeof(mpi_coredump->cam_entries), 1096 + "CAM Entries"); 1097 + status = qlge_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); 1097 1098 if (status) 1098 1099 goto err; 1099 1100 1100 - ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, 1101 - ROUTING_WORDS_SEG_NUM, 1102 - sizeof(struct mpi_coredump_segment_header) 1103 - + sizeof(mpi_coredump->nic_routing_words), 1104 - "Routing Words"); 1105 - status = ql_get_routing_entries(qdev, 1106 - &mpi_coredump->nic_routing_words[0]); 1101 + qlge_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, 1102 + ROUTING_WORDS_SEG_NUM, 1103 + sizeof(struct mpi_coredump_segment_header) 1104 + + sizeof(mpi_coredump->nic_routing_words), 1105 + "Routing Words"); 1106 + status = qlge_get_routing_entries(qdev, 1107 + &mpi_coredump->nic_routing_words[0]); 1107 1108 if (status) 1108 1109 goto err; 1109 1110 1110 1111 /* Segment 34 (Rev C. step 23) */ 1111 - ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, 1112 - ETS_SEG_NUM, 1113 - sizeof(struct mpi_coredump_segment_header) 1114 - + sizeof(mpi_coredump->ets), 1115 - "ETS Registers"); 1116 - status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); 1112 + qlge_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, 1113 + ETS_SEG_NUM, 1114 + sizeof(struct mpi_coredump_segment_header) 1115 + + sizeof(mpi_coredump->ets), 1116 + "ETS Registers"); 1117 + status = qlge_get_ets_regs(qdev, &mpi_coredump->ets[0]); 1117 1118 if (status) 1118 1119 goto err; 1119 1120 1120 - ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr, 1121 - PROBE_DUMP_SEG_NUM, 1122 - sizeof(struct mpi_coredump_segment_header) 1123 - + sizeof(mpi_coredump->probe_dump), 1124 - "Probe Dump"); 1125 - ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]); 1121 + qlge_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr, 1122 + PROBE_DUMP_SEG_NUM, 1123 + sizeof(struct mpi_coredump_segment_header) 1124 + + sizeof(mpi_coredump->probe_dump), 1125 + "Probe Dump"); 1126 + qlge_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]); 1126 1127 1127 - ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr, 1128 - ROUTING_INDEX_SEG_NUM, 1129 - sizeof(struct mpi_coredump_segment_header) 1130 - + sizeof(mpi_coredump->routing_regs), 1131 - "Routing Regs"); 1132 - status = ql_get_routing_index_registers(qdev, 1133 - &mpi_coredump->routing_regs[0]); 1128 + qlge_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr, 1129 + ROUTING_INDEX_SEG_NUM, 1130 + sizeof(struct mpi_coredump_segment_header) 1131 + + sizeof(mpi_coredump->routing_regs), 1132 + "Routing Regs"); 1133 + status = qlge_get_routing_index_registers(qdev, 1134 + &mpi_coredump->routing_regs[0]); 1134 1135 if (status) 1135 1136 goto err; 1136 1137 1137 - ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr, 1138 - MAC_PROTOCOL_SEG_NUM, 1139 - sizeof(struct mpi_coredump_segment_header) 1140 - + sizeof(mpi_coredump->mac_prot_regs), 1141 - "MAC Prot Regs"); 1142 - ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]); 1138 + qlge_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr, 1139 + MAC_PROTOCOL_SEG_NUM, 1140 + sizeof(struct mpi_coredump_segment_header) 1141 + + sizeof(mpi_coredump->mac_prot_regs), 1142 + "MAC Prot Regs"); 1143 + qlge_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]); 1143 1144 1144 1145 /* Get the semaphore registers for all 5 functions */ 1145 - ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr, 1146 - SEM_REGS_SEG_NUM, 1147 - sizeof(struct mpi_coredump_segment_header) + 1148 - sizeof(mpi_coredump->sem_regs), "Sem Registers"); 1146 + qlge_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr, 1147 + SEM_REGS_SEG_NUM, 1148 + sizeof(struct mpi_coredump_segment_header) + 1149 + sizeof(mpi_coredump->sem_regs), "Sem Registers"); 1149 1150 1150 - ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]); 1151 + qlge_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]); 1151 1152 1152 1153 /* Prevent the mpi restarting while we dump the memory.*/ 1153 - ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC); 1154 + qlge_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC); 1154 1155 1155 1156 /* clear the pause */ 1156 - status = ql_unpause_mpi_risc(qdev); 1157 + status = qlge_unpause_mpi_risc(qdev); 1157 1158 if (status) { 1158 1159 netif_err(qdev, drv, qdev->ndev, 1159 1160 "Failed RISC unpause. Status = 0x%.08x\n", status); ··· 1161 1162 } 1162 1163 1163 1164 /* Reset the RISC so we can dump RAM */ 1164 - status = ql_hard_reset_mpi_risc(qdev); 1165 + status = qlge_hard_reset_mpi_risc(qdev); 1165 1166 if (status) { 1166 1167 netif_err(qdev, drv, qdev->ndev, 1167 1168 "Failed RISC reset. Status = 0x%.08x\n", status); 1168 1169 goto err; 1169 1170 } 1170 1171 1171 - ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr, 1172 - WCS_RAM_SEG_NUM, 1173 - sizeof(struct mpi_coredump_segment_header) 1174 - + sizeof(mpi_coredump->code_ram), 1175 - "WCS RAM"); 1176 - status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0], 1177 - CODE_RAM_ADDR, CODE_RAM_CNT); 1172 + qlge_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr, 1173 + WCS_RAM_SEG_NUM, 1174 + sizeof(struct mpi_coredump_segment_header) 1175 + + sizeof(mpi_coredump->code_ram), 1176 + "WCS RAM"); 1177 + status = qlge_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0], 1178 + CODE_RAM_ADDR, CODE_RAM_CNT); 1178 1179 if (status) { 1179 1180 netif_err(qdev, drv, qdev->ndev, 1180 1181 "Failed Dump of CODE RAM. Status = 0x%.08x\n", ··· 1183 1184 } 1184 1185 1185 1186 /* Insert the segment header */ 1186 - ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr, 1187 - MEMC_RAM_SEG_NUM, 1188 - sizeof(struct mpi_coredump_segment_header) 1189 - + sizeof(mpi_coredump->memc_ram), 1190 - "MEMC RAM"); 1191 - status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0], 1192 - MEMC_RAM_ADDR, MEMC_RAM_CNT); 1187 + qlge_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr, 1188 + MEMC_RAM_SEG_NUM, 1189 + sizeof(struct mpi_coredump_segment_header) 1190 + + sizeof(mpi_coredump->memc_ram), 1191 + "MEMC RAM"); 1192 + status = qlge_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0], 1193 + MEMC_RAM_ADDR, MEMC_RAM_CNT); 1193 1194 if (status) { 1194 1195 netif_err(qdev, drv, qdev->ndev, 1195 1196 "Failed Dump of MEMC RAM. Status = 0x%.08x\n", ··· 1197 1198 goto err; 1198 1199 } 1199 1200 err: 1200 - ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ 1201 + qlge_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ 1201 1202 return status; 1202 1203 } 1203 1204 1204 - static void ql_get_core_dump(struct ql_adapter *qdev) 1205 + static void qlge_get_core_dump(struct qlge_adapter *qdev) 1205 1206 { 1206 - if (!ql_own_firmware(qdev)) { 1207 + if (!qlge_own_firmware(qdev)) { 1207 1208 netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n"); 1208 1209 return; 1209 1210 } ··· 1213 1214 "Force Coredump can only be done from interface that is up\n"); 1214 1215 return; 1215 1216 } 1216 - ql_queue_fw_error(qdev); 1217 + qlge_queue_fw_error(qdev); 1217 1218 } 1218 1219 1219 - static void ql_gen_reg_dump(struct ql_adapter *qdev, 1220 - struct ql_reg_dump *mpi_coredump) 1220 + static void qlge_gen_reg_dump(struct qlge_adapter *qdev, 1221 + struct qlge_reg_dump *mpi_coredump) 1221 1222 { 1222 1223 int i, status; 1223 1224 ··· 1227 1228 mpi_coredump->mpi_global_header.header_size = 1228 1229 sizeof(struct mpi_coredump_global_header); 1229 1230 mpi_coredump->mpi_global_header.image_size = 1230 - sizeof(struct ql_reg_dump); 1231 + sizeof(struct qlge_reg_dump); 1231 1232 strncpy(mpi_coredump->mpi_global_header.id_string, "MPI Coredump", 1232 1233 sizeof(mpi_coredump->mpi_global_header.id_string)); 1233 1234 1234 1235 /* segment 16 */ 1235 - ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, 1236 - MISC_NIC_INFO_SEG_NUM, 1237 - sizeof(struct mpi_coredump_segment_header) 1238 - + sizeof(mpi_coredump->misc_nic_info), 1239 - "MISC NIC INFO"); 1236 + qlge_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr, 1237 + MISC_NIC_INFO_SEG_NUM, 1238 + sizeof(struct mpi_coredump_segment_header) 1239 + + sizeof(mpi_coredump->misc_nic_info), 1240 + "MISC NIC INFO"); 1240 1241 mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count; 1241 1242 mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count; 1242 1243 mpi_coredump->misc_nic_info.intr_count = qdev->intr_count; 1243 1244 mpi_coredump->misc_nic_info.function = qdev->func; 1244 1245 1245 1246 /* Segment 16, Rev C. Step 18 */ 1246 - ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, 1247 - NIC1_CONTROL_SEG_NUM, 1248 - sizeof(struct mpi_coredump_segment_header) 1249 - + sizeof(mpi_coredump->nic_regs), 1250 - "NIC Registers"); 1247 + qlge_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr, 1248 + NIC1_CONTROL_SEG_NUM, 1249 + sizeof(struct mpi_coredump_segment_header) 1250 + + sizeof(mpi_coredump->nic_regs), 1251 + "NIC Registers"); 1251 1252 /* Get generic reg dump */ 1252 1253 for (i = 0; i < 64; i++) 1253 - mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32)); 1254 + mpi_coredump->nic_regs[i] = qlge_read32(qdev, i * sizeof(u32)); 1254 1255 1255 1256 /* Segment 31 */ 1256 1257 /* Get indexed register values. */ 1257 - ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, 1258 - INTR_STATES_SEG_NUM, 1259 - sizeof(struct mpi_coredump_segment_header) 1260 - + sizeof(mpi_coredump->intr_states), 1261 - "INTR States"); 1262 - ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]); 1258 + qlge_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr, 1259 + INTR_STATES_SEG_NUM, 1260 + sizeof(struct mpi_coredump_segment_header) 1261 + + sizeof(mpi_coredump->intr_states), 1262 + "INTR States"); 1263 + qlge_get_intr_states(qdev, &mpi_coredump->intr_states[0]); 1263 1264 1264 - ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, 1265 - CAM_ENTRIES_SEG_NUM, 1266 - sizeof(struct mpi_coredump_segment_header) 1267 - + sizeof(mpi_coredump->cam_entries), 1268 - "CAM Entries"); 1269 - status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); 1265 + qlge_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr, 1266 + CAM_ENTRIES_SEG_NUM, 1267 + sizeof(struct mpi_coredump_segment_header) 1268 + + sizeof(mpi_coredump->cam_entries), 1269 + "CAM Entries"); 1270 + status = qlge_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]); 1270 1271 if (status) 1271 1272 return; 1272 1273 1273 - ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, 1274 - ROUTING_WORDS_SEG_NUM, 1275 - sizeof(struct mpi_coredump_segment_header) 1276 - + sizeof(mpi_coredump->nic_routing_words), 1277 - "Routing Words"); 1278 - status = ql_get_routing_entries(qdev, 1279 - &mpi_coredump->nic_routing_words[0]); 1274 + qlge_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr, 1275 + ROUTING_WORDS_SEG_NUM, 1276 + sizeof(struct mpi_coredump_segment_header) 1277 + + sizeof(mpi_coredump->nic_routing_words), 1278 + "Routing Words"); 1279 + status = qlge_get_routing_entries(qdev, 1280 + &mpi_coredump->nic_routing_words[0]); 1280 1281 if (status) 1281 1282 return; 1282 1283 1283 1284 /* Segment 34 (Rev C. step 23) */ 1284 - ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, 1285 - ETS_SEG_NUM, 1286 - sizeof(struct mpi_coredump_segment_header) 1287 - + sizeof(mpi_coredump->ets), 1288 - "ETS Registers"); 1289 - status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); 1285 + qlge_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr, 1286 + ETS_SEG_NUM, 1287 + sizeof(struct mpi_coredump_segment_header) 1288 + + sizeof(mpi_coredump->ets), 1289 + "ETS Registers"); 1290 + status = qlge_get_ets_regs(qdev, &mpi_coredump->ets[0]); 1290 1291 if (status) 1291 1292 return; 1292 1293 } 1293 1294 1294 - void ql_get_dump(struct ql_adapter *qdev, void *buff) 1295 + void qlge_get_dump(struct qlge_adapter *qdev, void *buff) 1295 1296 { 1296 1297 /* 1297 1298 * If the dump has already been taken and is stored ··· 1303 1304 */ 1304 1305 1305 1306 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) { 1306 - if (!ql_core_dump(qdev, buff)) 1307 - ql_soft_reset_mpi_risc(qdev); 1307 + if (!qlge_core_dump(qdev, buff)) 1308 + qlge_soft_reset_mpi_risc(qdev); 1308 1309 else 1309 1310 netif_err(qdev, drv, qdev->ndev, "coredump failed!\n"); 1310 1311 } else { 1311 - ql_gen_reg_dump(qdev, buff); 1312 - ql_get_core_dump(qdev); 1312 + qlge_gen_reg_dump(qdev, buff); 1313 + qlge_get_core_dump(qdev); 1313 1314 } 1314 1315 } 1315 1316 1316 1317 /* Coredump to messages log file using separate worker thread */ 1317 - void ql_mpi_core_to_log(struct work_struct *work) 1318 + void qlge_mpi_core_to_log(struct work_struct *work) 1318 1319 { 1319 - struct ql_adapter *qdev = 1320 - container_of(work, struct ql_adapter, mpi_core_to_log.work); 1320 + struct qlge_adapter *qdev = 1321 + container_of(work, struct qlge_adapter, mpi_core_to_log.work); 1321 1322 1322 1323 print_hex_dump(KERN_DEBUG, "Core is dumping to log file!\n", 1323 1324 DUMP_PREFIX_OFFSET, 32, 4, qdev->mpi_coredump, ··· 1325 1326 } 1326 1327 1327 1328 #ifdef QL_REG_DUMP 1328 - static void ql_dump_intr_states(struct ql_adapter *qdev) 1329 + static void qlge_dump_intr_states(struct qlge_adapter *qdev) 1329 1330 { 1330 1331 int i; 1331 1332 u32 value; 1332 1333 1333 1334 for (i = 0; i < qdev->intr_count; i++) { 1334 - ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask); 1335 - value = ql_read32(qdev, INTR_EN); 1335 + qlge_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask); 1336 + value = qlge_read32(qdev, INTR_EN); 1336 1337 netdev_err(qdev->ndev, "Interrupt %d is %s\n", i, 1337 1338 (value & INTR_EN_EN ? "enabled" : "disabled")); 1338 1339 } 1339 1340 } 1340 1341 1341 1342 #define DUMP_XGMAC(qdev, reg) \ 1342 - do { \ 1343 - u32 data; \ 1344 - ql_read_xgmac_reg(qdev, reg, &data); \ 1345 - netdev_err(qdev->ndev, "%s = 0x%.08x\n", #reg, data); \ 1346 - } while (0) 1343 + do { \ 1344 + u32 data; \ 1345 + qlge_read_xgmac_reg(qdev, reg, &data); \ 1346 + netdev_err(qdev->ndev, "%s = 0x%.08x\n", #reg, data); \ 1347 + } while (0) 1347 1348 1348 - void ql_dump_xgmac_control_regs(struct ql_adapter *qdev) 1349 + void qlge_dump_xgmac_control_regs(struct qlge_adapter *qdev) 1349 1350 { 1350 - if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { 1351 + if (qlge_sem_spinlock(qdev, qdev->xg_sem_mask)) { 1351 1352 netdev_err(qdev->ndev, "%s: Couldn't get xgmac sem\n", 1352 1353 __func__); 1353 1354 return; ··· 1369 1370 DUMP_XGMAC(qdev, MAC_MGMT_INT); 1370 1371 DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK); 1371 1372 DUMP_XGMAC(qdev, EXT_ARB_MODE); 1372 - ql_sem_unlock(qdev, qdev->xg_sem_mask); 1373 + qlge_sem_unlock(qdev, qdev->xg_sem_mask); 1373 1374 } 1374 1375 1375 - static void ql_dump_ets_regs(struct ql_adapter *qdev) 1376 + static void qlge_dump_ets_regs(struct qlge_adapter *qdev) 1376 1377 { 1377 1378 } 1378 1379 1379 - static void ql_dump_cam_entries(struct ql_adapter *qdev) 1380 + static void qlge_dump_cam_entries(struct qlge_adapter *qdev) 1380 1381 { 1381 1382 int i; 1382 1383 u32 value[3]; 1383 1384 1384 - i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 1385 + i = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 1385 1386 if (i) 1386 1387 return; 1387 1388 for (i = 0; i < 4; i++) { 1388 - if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) { 1389 + if (qlge_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) { 1389 1390 netdev_err(qdev->ndev, 1390 1391 "%s: Failed read of mac index register\n", 1391 1392 __func__); ··· 1397 1398 i, value[1], value[0], value[2]); 1398 1399 } 1399 1400 for (i = 0; i < 32; i++) { 1400 - if (ql_get_mac_addr_reg 1401 + if (qlge_get_mac_addr_reg 1401 1402 (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) { 1402 1403 netdev_err(qdev->ndev, 1403 1404 "%s: Failed read of mac index register\n", ··· 1409 1410 "MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n", 1410 1411 i, value[1], value[0]); 1411 1412 } 1412 - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 1413 + qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 1413 1414 } 1414 1415 1415 - void ql_dump_routing_entries(struct ql_adapter *qdev) 1416 + void qlge_dump_routing_entries(struct qlge_adapter *qdev) 1416 1417 { 1417 1418 int i; 1418 1419 u32 value; 1419 1420 1420 - i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); 1421 + i = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK); 1421 1422 if (i) 1422 1423 return; 1423 1424 for (i = 0; i < 16; i++) { 1424 1425 value = 0; 1425 - if (ql_get_routing_reg(qdev, i, &value)) { 1426 + if (qlge_get_routing_reg(qdev, i, &value)) { 1426 1427 netdev_err(qdev->ndev, 1427 1428 "%s: Failed read of routing index register\n", 1428 1429 __func__); ··· 1433 1434 "Routing Mask %d = 0x%.08x\n", 1434 1435 i, value); 1435 1436 } 1436 - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); 1437 + qlge_sem_unlock(qdev, SEM_RT_IDX_MASK); 1437 1438 } 1438 1439 1439 1440 #define DUMP_REG(qdev, reg) \ 1440 - netdev_err(qdev->ndev, "%-32s= 0x%x\n", #reg, ql_read32(qdev, reg)) 1441 + netdev_err(qdev->ndev, "%-32s= 0x%x\n", #reg, qlge_read32(qdev, reg)) 1441 1442 1442 - void ql_dump_regs(struct ql_adapter *qdev) 1443 + void qlge_dump_regs(struct qlge_adapter *qdev) 1443 1444 { 1444 1445 netdev_err(qdev->ndev, "reg dump for function #%d\n", qdev->func); 1445 1446 DUMP_REG(qdev, SYS); ··· 1495 1496 DUMP_REG(qdev, XG_SERDES_DATA); 1496 1497 DUMP_REG(qdev, PRB_MX_ADDR); 1497 1498 DUMP_REG(qdev, PRB_MX_DATA); 1498 - ql_dump_intr_states(qdev); 1499 - ql_dump_xgmac_control_regs(qdev); 1500 - ql_dump_ets_regs(qdev); 1501 - ql_dump_cam_entries(qdev); 1502 - ql_dump_routing_entries(qdev); 1499 + qlge_dump_intr_states(qdev); 1500 + qlge_dump_xgmac_control_regs(qdev); 1501 + qlge_dump_ets_regs(qdev); 1502 + qlge_dump_cam_entries(qdev); 1503 + qlge_dump_routing_entries(qdev); 1503 1504 } 1504 1505 #endif 1505 1506 ··· 1509 1510 netdev_err(qdev->ndev, "%s = %ld\n", #stat, \ 1510 1511 (unsigned long)(qdev)->nic_stats.stat) 1511 1512 1512 - void ql_dump_stat(struct ql_adapter *qdev) 1513 + void qlge_dump_stat(struct qlge_adapter *qdev) 1513 1514 { 1514 1515 netdev_err(qdev->ndev, "%s: Enter\n", __func__); 1515 1516 DUMP_STAT(qdev, tx_pkts); ··· 1566 1567 (unsigned long long)qdev->field) 1567 1568 #define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \ 1568 1569 netdev_err(qdev->ndev, "%s[%d].%s = " type "\n", \ 1569 - #array, index, #field, (qdev)->array[index].field) 1570 - void ql_dump_qdev(struct ql_adapter *qdev) 1570 + #array, index, #field, (qdev)->array[index].field) 1571 + void qlge_dump_qdev(struct qlge_adapter *qdev) 1571 1572 { 1572 1573 int i; 1573 1574 ··· 1614 1615 #endif 1615 1616 1616 1617 #ifdef QL_CB_DUMP 1617 - void ql_dump_wqicb(struct wqicb *wqicb) 1618 + void qlge_dump_wqicb(struct wqicb *wqicb) 1618 1619 { 1619 1620 struct tx_ring *tx_ring = container_of(wqicb, struct tx_ring, wqicb); 1620 - struct ql_adapter *qdev = tx_ring->qdev; 1621 + struct qlge_adapter *qdev = tx_ring->qdev; 1621 1622 1622 1623 netdev_err(qdev->ndev, "Dumping wqicb stuff...\n"); 1623 1624 netdev_err(qdev->ndev, "wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len)); ··· 1632 1633 (unsigned long long)le64_to_cpu(wqicb->cnsmr_idx_addr)); 1633 1634 } 1634 1635 1635 - void ql_dump_tx_ring(struct tx_ring *tx_ring) 1636 + void qlge_dump_tx_ring(struct tx_ring *tx_ring) 1636 1637 { 1637 - struct ql_adapter *qdev = tx_ring->qdev; 1638 + struct qlge_adapter *qdev = tx_ring->qdev; 1638 1639 1639 1640 netdev_err(qdev->ndev, "===================== Dumping tx_ring %d ===============\n", 1640 1641 tx_ring->wq_id); ··· 1644 1645 netdev_err(qdev->ndev, "tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n", 1645 1646 tx_ring->cnsmr_idx_sh_reg, 1646 1647 tx_ring->cnsmr_idx_sh_reg 1647 - ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0); 1648 + ? qlge_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0); 1648 1649 netdev_err(qdev->ndev, "tx_ring->size = %d\n", tx_ring->wq_size); 1649 1650 netdev_err(qdev->ndev, "tx_ring->len = %d\n", tx_ring->wq_len); 1650 1651 netdev_err(qdev->ndev, "tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg); ··· 1656 1657 netdev_err(qdev->ndev, "tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count)); 1657 1658 } 1658 1659 1659 - void ql_dump_ricb(struct ricb *ricb) 1660 + void qlge_dump_ricb(struct ricb *ricb) 1660 1661 { 1661 1662 int i; 1662 - struct ql_adapter *qdev = 1663 - container_of(ricb, struct ql_adapter, ricb); 1663 + struct qlge_adapter *qdev = 1664 + container_of(ricb, struct qlge_adapter, ricb); 1664 1665 1665 1666 netdev_err(qdev->ndev, "===================== Dumping ricb ===============\n"); 1666 1667 netdev_err(qdev->ndev, "Dumping ricb stuff...\n"); ··· 1688 1689 le32_to_cpu(ricb->ipv4_hash_key[i])); 1689 1690 } 1690 1691 1691 - void ql_dump_cqicb(struct cqicb *cqicb) 1692 + void qlge_dump_cqicb(struct cqicb *cqicb) 1692 1693 { 1693 1694 struct rx_ring *rx_ring = container_of(cqicb, struct rx_ring, cqicb); 1694 - struct ql_adapter *qdev = rx_ring->qdev; 1695 + struct qlge_adapter *qdev = rx_ring->qdev; 1695 1696 1696 1697 netdev_err(qdev->ndev, "Dumping cqicb stuff...\n"); 1697 1698 ··· 1722 1723 1723 1724 static const char *qlge_rx_ring_type_name(struct rx_ring *rx_ring) 1724 1725 { 1725 - struct ql_adapter *qdev = rx_ring->qdev; 1726 + struct qlge_adapter *qdev = rx_ring->qdev; 1726 1727 1727 1728 if (rx_ring->cq_id < qdev->rss_ring_count) 1728 1729 return "RX COMPLETION"; ··· 1730 1731 return "TX COMPLETION"; 1731 1732 }; 1732 1733 1733 - void ql_dump_rx_ring(struct rx_ring *rx_ring) 1734 + void qlge_dump_rx_ring(struct rx_ring *rx_ring) 1734 1735 { 1735 - struct ql_adapter *qdev = rx_ring->qdev; 1736 + struct qlge_adapter *qdev = rx_ring->qdev; 1736 1737 1737 1738 netdev_err(qdev->ndev, 1738 1739 "===================== Dumping rx_ring %d ===============\n", ··· 1749 1750 netdev_err(qdev->ndev, 1750 1751 "rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n", 1751 1752 rx_ring->prod_idx_sh_reg, 1752 - rx_ring->prod_idx_sh_reg ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0); 1753 + rx_ring->prod_idx_sh_reg ? qlge_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0); 1753 1754 netdev_err(qdev->ndev, "rx_ring->prod_idx_sh_reg_dma = %llx\n", 1754 1755 (unsigned long long)rx_ring->prod_idx_sh_reg_dma); 1755 1756 netdev_err(qdev->ndev, "rx_ring->cnsmr_idx_db_reg = %p\n", ··· 1789 1790 netdev_err(qdev->ndev, "rx_ring->qdev = %p\n", rx_ring->qdev); 1790 1791 } 1791 1792 1792 - void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id) 1793 + void qlge_dump_hw_cb(struct qlge_adapter *qdev, int size, u32 bit, u16 q_id) 1793 1794 { 1794 1795 void *ptr; 1795 1796 ··· 1799 1800 if (!ptr) 1800 1801 return; 1801 1802 1802 - if (ql_write_cfg(qdev, ptr, size, bit, q_id)) { 1803 + if (qlge_write_cfg(qdev, ptr, size, bit, q_id)) { 1803 1804 netdev_err(qdev->ndev, "%s: Failed to upload control block!\n", __func__); 1804 1805 goto fail_it; 1805 1806 } 1806 1807 switch (bit) { 1807 1808 case CFG_DRQ: 1808 - ql_dump_wqicb((struct wqicb *)ptr); 1809 + qlge_dump_wqicb((struct wqicb *)ptr); 1809 1810 break; 1810 1811 case CFG_DCQ: 1811 - ql_dump_cqicb((struct cqicb *)ptr); 1812 + qlge_dump_cqicb((struct cqicb *)ptr); 1812 1813 break; 1813 1814 case CFG_DR: 1814 - ql_dump_ricb((struct ricb *)ptr); 1815 + qlge_dump_ricb((struct ricb *)ptr); 1815 1816 break; 1816 1817 default: 1817 1818 netdev_err(qdev->ndev, "%s: Invalid bit value = %x\n", __func__, bit); ··· 1823 1824 #endif 1824 1825 1825 1826 #ifdef QL_OB_DUMP 1826 - void ql_dump_tx_desc(struct ql_adapter *qdev, struct tx_buf_desc *tbd) 1827 + void qlge_dump_tx_desc(struct qlge_adapter *qdev, struct tx_buf_desc *tbd) 1827 1828 { 1828 1829 netdev_err(qdev->ndev, "tbd->addr = 0x%llx\n", 1829 1830 le64_to_cpu((u64)tbd->addr)); ··· 1850 1851 tbd->len & TX_DESC_E ? "E" : "."); 1851 1852 } 1852 1853 1853 - void ql_dump_ob_mac_iocb(struct ql_adapter *qdev, struct ob_mac_iocb_req *ob_mac_iocb) 1854 + void qlge_dump_ob_mac_iocb(struct qlge_adapter *qdev, struct qlge_ob_mac_iocb_req *ob_mac_iocb) 1854 1855 { 1855 - struct ob_mac_tso_iocb_req *ob_mac_tso_iocb = 1856 - (struct ob_mac_tso_iocb_req *)ob_mac_iocb; 1856 + struct qlge_ob_mac_tso_iocb_req *ob_mac_tso_iocb = 1857 + (struct qlge_ob_mac_tso_iocb_req *)ob_mac_iocb; 1857 1858 struct tx_buf_desc *tbd; 1858 1859 u16 frame_len; 1859 1860 ··· 1893 1894 frame_len = le16_to_cpu(ob_mac_iocb->frame_len); 1894 1895 } 1895 1896 tbd = &ob_mac_iocb->tbd[0]; 1896 - ql_dump_tx_desc(qdev, tbd); 1897 + qlge_dump_tx_desc(qdev, tbd); 1897 1898 } 1898 1899 1899 - void ql_dump_ob_mac_rsp(struct ql_adapter *qdev, struct ob_mac_iocb_rsp *ob_mac_rsp) 1900 + void qlge_dump_ob_mac_rsp(struct qlge_adapter *qdev, struct qlge_ob_mac_iocb_rsp *ob_mac_rsp) 1900 1901 { 1901 1902 netdev_err(qdev->ndev, "%s\n", __func__); 1902 1903 netdev_err(qdev->ndev, "opcode = %d\n", ob_mac_rsp->opcode); 1903 1904 netdev_err(qdev->ndev, "flags = %s %s %s %s %s %s %s\n", 1904 1905 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? 1905 - "OI" : ".", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".", 1906 + "OI" : ".", ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".", 1906 1907 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".", 1907 1908 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".", 1908 1909 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".", ··· 1913 1914 #endif 1914 1915 1915 1916 #ifdef QL_IB_DUMP 1916 - void ql_dump_ib_mac_rsp(struct ql_adapter *qdev, struct ib_mac_iocb_rsp *ib_mac_rsp) 1917 + void qlge_dump_ib_mac_rsp(struct qlge_adapter *qdev, struct qlge_ib_mac_iocb_rsp *ib_mac_rsp) 1917 1918 { 1918 1919 netdev_err(qdev->ndev, "%s\n", __func__); 1919 1920 netdev_err(qdev->ndev, "opcode = 0x%x\n", ib_mac_rsp->opcode); ··· 1995 1996 #endif 1996 1997 1997 1998 #ifdef QL_ALL_DUMP 1998 - void ql_dump_all(struct ql_adapter *qdev) 1999 + void qlge_dump_all(struct qlge_adapter *qdev) 1999 2000 { 2000 2001 int i; 2001 2002
+115 -115
drivers/staging/qlge/qlge_ethtool.c
··· 34 34 35 35 #include "qlge.h" 36 36 37 - struct ql_stats { 37 + struct qlge_stats { 38 38 char stat_string[ETH_GSTRING_LEN]; 39 39 int sizeof_stat; 40 40 int stat_offset; 41 41 }; 42 42 43 - #define QL_SIZEOF(m) sizeof_field(struct ql_adapter, m) 44 - #define QL_OFF(m) offsetof(struct ql_adapter, m) 43 + #define QL_SIZEOF(m) sizeof_field(struct qlge_adapter, m) 44 + #define QL_OFF(m) offsetof(struct qlge_adapter, m) 45 45 46 - static const struct ql_stats ql_gstrings_stats[] = { 46 + static const struct qlge_stats qlge_gstrings_stats[] = { 47 47 {"tx_pkts", QL_SIZEOF(nic_stats.tx_pkts), QL_OFF(nic_stats.tx_pkts)}, 48 48 {"tx_bytes", QL_SIZEOF(nic_stats.tx_bytes), QL_OFF(nic_stats.tx_bytes)}, 49 49 {"tx_mcast_pkts", QL_SIZEOF(nic_stats.tx_mcast_pkts), ··· 175 175 QL_OFF(nic_stats.rx_nic_fifo_drop)}, 176 176 }; 177 177 178 - static const char ql_gstrings_test[][ETH_GSTRING_LEN] = { 178 + static const char qlge_gstrings_test[][ETH_GSTRING_LEN] = { 179 179 "Loopback test (offline)" 180 180 }; 181 181 182 - #define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) 183 - #define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats) 182 + #define QLGE_TEST_LEN (sizeof(qlge_gstrings_test) / ETH_GSTRING_LEN) 183 + #define QLGE_STATS_LEN ARRAY_SIZE(qlge_gstrings_stats) 184 184 #define QLGE_RCV_MAC_ERR_STATS 7 185 185 186 - static int ql_update_ring_coalescing(struct ql_adapter *qdev) 186 + static int qlge_update_ring_coalescing(struct qlge_adapter *qdev) 187 187 { 188 188 int i, status = 0; 189 189 struct rx_ring *rx_ring; ··· 203 203 cqicb = (struct cqicb *)rx_ring; 204 204 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); 205 205 cqicb->pkt_delay = 206 - cpu_to_le16(qdev->tx_max_coalesced_frames); 206 + cpu_to_le16(qdev->tx_max_coalesced_frames); 207 207 cqicb->flags = FLAGS_LI; 208 - status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), 209 - CFG_LCQ, rx_ring->cq_id); 208 + status = qlge_write_cfg(qdev, cqicb, sizeof(*cqicb), 209 + CFG_LCQ, rx_ring->cq_id); 210 210 if (status) { 211 211 netif_err(qdev, ifup, qdev->ndev, 212 212 "Failed to load CQICB.\n"); ··· 224 224 cqicb = (struct cqicb *)rx_ring; 225 225 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); 226 226 cqicb->pkt_delay = 227 - cpu_to_le16(qdev->rx_max_coalesced_frames); 227 + cpu_to_le16(qdev->rx_max_coalesced_frames); 228 228 cqicb->flags = FLAGS_LI; 229 - status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), 230 - CFG_LCQ, rx_ring->cq_id); 229 + status = qlge_write_cfg(qdev, cqicb, sizeof(*cqicb), 230 + CFG_LCQ, rx_ring->cq_id); 231 231 if (status) { 232 232 netif_err(qdev, ifup, qdev->ndev, 233 233 "Failed to load CQICB.\n"); ··· 239 239 return status; 240 240 } 241 241 242 - static void ql_update_stats(struct ql_adapter *qdev) 242 + static void qlge_update_stats(struct qlge_adapter *qdev) 243 243 { 244 244 u32 i; 245 245 u64 data; 246 246 u64 *iter = &qdev->nic_stats.tx_pkts; 247 247 248 248 spin_lock(&qdev->stats_lock); 249 - if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { 249 + if (qlge_sem_spinlock(qdev, qdev->xg_sem_mask)) { 250 250 netif_err(qdev, drv, qdev->ndev, 251 251 "Couldn't get xgmac sem.\n"); 252 252 goto quit; ··· 255 255 * Get TX statistics. 256 256 */ 257 257 for (i = 0x200; i < 0x280; i += 8) { 258 - if (ql_read_xgmac_reg64(qdev, i, &data)) { 258 + if (qlge_read_xgmac_reg64(qdev, i, &data)) { 259 259 netif_err(qdev, drv, qdev->ndev, 260 260 "Error reading status register 0x%.04x.\n", 261 261 i); ··· 270 270 * Get RX statistics. 271 271 */ 272 272 for (i = 0x300; i < 0x3d0; i += 8) { 273 - if (ql_read_xgmac_reg64(qdev, i, &data)) { 273 + if (qlge_read_xgmac_reg64(qdev, i, &data)) { 274 274 netif_err(qdev, drv, qdev->ndev, 275 275 "Error reading status register 0x%.04x.\n", 276 276 i); ··· 288 288 * Get Per-priority TX pause frame counter statistics. 289 289 */ 290 290 for (i = 0x500; i < 0x540; i += 8) { 291 - if (ql_read_xgmac_reg64(qdev, i, &data)) { 291 + if (qlge_read_xgmac_reg64(qdev, i, &data)) { 292 292 netif_err(qdev, drv, qdev->ndev, 293 293 "Error reading status register 0x%.04x.\n", 294 294 i); ··· 303 303 * Get Per-priority RX pause frame counter statistics. 304 304 */ 305 305 for (i = 0x568; i < 0x5a8; i += 8) { 306 - if (ql_read_xgmac_reg64(qdev, i, &data)) { 306 + if (qlge_read_xgmac_reg64(qdev, i, &data)) { 307 307 netif_err(qdev, drv, qdev->ndev, 308 308 "Error reading status register 0x%.04x.\n", 309 309 i); ··· 317 317 /* 318 318 * Get RX NIC FIFO DROP statistics. 319 319 */ 320 - if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) { 320 + if (qlge_read_xgmac_reg64(qdev, 0x5b8, &data)) { 321 321 netif_err(qdev, drv, qdev->ndev, 322 322 "Error reading status register 0x%.04x.\n", i); 323 323 goto end; ··· 325 325 *iter = data; 326 326 } 327 327 end: 328 - ql_sem_unlock(qdev, qdev->xg_sem_mask); 328 + qlge_sem_unlock(qdev, qdev->xg_sem_mask); 329 329 quit: 330 330 spin_unlock(&qdev->stats_lock); 331 331 332 332 QL_DUMP_STAT(qdev); 333 333 } 334 334 335 - static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 335 + static void qlge_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 336 336 { 337 337 int index; 338 338 339 339 switch (stringset) { 340 340 case ETH_SS_TEST: 341 - memcpy(buf, *ql_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN); 341 + memcpy(buf, *qlge_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN); 342 342 break; 343 343 case ETH_SS_STATS: 344 344 for (index = 0; index < QLGE_STATS_LEN; index++) { 345 345 memcpy(buf + index * ETH_GSTRING_LEN, 346 - ql_gstrings_stats[index].stat_string, 346 + qlge_gstrings_stats[index].stat_string, 347 347 ETH_GSTRING_LEN); 348 348 } 349 349 break; 350 350 } 351 351 } 352 352 353 - static int ql_get_sset_count(struct net_device *dev, int sset) 353 + static int qlge_get_sset_count(struct net_device *dev, int sset) 354 354 { 355 355 switch (sset) { 356 356 case ETH_SS_TEST: ··· 363 363 } 364 364 365 365 static void 366 - ql_get_ethtool_stats(struct net_device *ndev, 367 - struct ethtool_stats *stats, u64 *data) 366 + qlge_get_ethtool_stats(struct net_device *ndev, 367 + struct ethtool_stats *stats, u64 *data) 368 368 { 369 - struct ql_adapter *qdev = netdev_priv(ndev); 369 + struct qlge_adapter *qdev = netdev_priv(ndev); 370 370 int index, length; 371 371 372 372 length = QLGE_STATS_LEN; 373 - ql_update_stats(qdev); 373 + qlge_update_stats(qdev); 374 374 375 375 for (index = 0; index < length; index++) { 376 376 char *p = (char *)qdev + 377 - ql_gstrings_stats[index].stat_offset; 378 - *data++ = (ql_gstrings_stats[index].sizeof_stat == 379 - sizeof(u64)) ? *(u64 *)p : (*(u32 *)p); 377 + qlge_gstrings_stats[index].stat_offset; 378 + *data++ = (qlge_gstrings_stats[index].sizeof_stat == 379 + sizeof(u64)) ? *(u64 *)p : (*(u32 *)p); 380 380 } 381 381 } 382 382 383 - static int ql_get_link_ksettings(struct net_device *ndev, 384 - struct ethtool_link_ksettings *ecmd) 383 + static int qlge_get_link_ksettings(struct net_device *ndev, 384 + struct ethtool_link_ksettings *ecmd) 385 385 { 386 - struct ql_adapter *qdev = netdev_priv(ndev); 386 + struct qlge_adapter *qdev = netdev_priv(ndev); 387 387 u32 supported, advertising; 388 388 389 389 supported = SUPPORTED_10000baseT_Full; 390 390 advertising = ADVERTISED_10000baseT_Full; 391 391 392 392 if ((qdev->link_status & STS_LINK_TYPE_MASK) == 393 - STS_LINK_TYPE_10GBASET) { 393 + STS_LINK_TYPE_10GBASET) { 394 394 supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); 395 395 advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); 396 396 ecmd->base.port = PORT_TP; ··· 412 412 return 0; 413 413 } 414 414 415 - static void ql_get_drvinfo(struct net_device *ndev, 416 - struct ethtool_drvinfo *drvinfo) 415 + static void qlge_get_drvinfo(struct net_device *ndev, 416 + struct ethtool_drvinfo *drvinfo) 417 417 { 418 - struct ql_adapter *qdev = netdev_priv(ndev); 418 + struct qlge_adapter *qdev = netdev_priv(ndev); 419 419 420 420 strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver)); 421 421 strlcpy(drvinfo->version, qlge_driver_version, ··· 429 429 sizeof(drvinfo->bus_info)); 430 430 } 431 431 432 - static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 432 + static void qlge_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 433 433 { 434 - struct ql_adapter *qdev = netdev_priv(ndev); 434 + struct qlge_adapter *qdev = netdev_priv(ndev); 435 435 unsigned short ssys_dev = qdev->pdev->subsystem_device; 436 436 437 437 /* WOL is only supported for mezz card. */ ··· 442 442 } 443 443 } 444 444 445 - static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 445 + static int qlge_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 446 446 { 447 - struct ql_adapter *qdev = netdev_priv(ndev); 447 + struct qlge_adapter *qdev = netdev_priv(ndev); 448 448 unsigned short ssys_dev = qdev->pdev->subsystem_device; 449 449 450 450 /* WOL is only supported for mezz card. */ ··· 462 462 return 0; 463 463 } 464 464 465 - static int ql_set_phys_id(struct net_device *ndev, 466 - enum ethtool_phys_id_state state) 465 + static int qlge_set_phys_id(struct net_device *ndev, 466 + enum ethtool_phys_id_state state) 467 467 468 468 { 469 - struct ql_adapter *qdev = netdev_priv(ndev); 469 + struct qlge_adapter *qdev = netdev_priv(ndev); 470 470 471 471 switch (state) { 472 472 case ETHTOOL_ID_ACTIVE: 473 473 /* Save the current LED settings */ 474 - if (ql_mb_get_led_cfg(qdev)) 474 + if (qlge_mb_get_led_cfg(qdev)) 475 475 return -EIO; 476 476 477 477 /* Start blinking */ 478 - ql_mb_set_led_cfg(qdev, QL_LED_BLINK); 478 + qlge_mb_set_led_cfg(qdev, QL_LED_BLINK); 479 479 return 0; 480 480 481 481 case ETHTOOL_ID_INACTIVE: 482 482 /* Restore LED settings */ 483 - if (ql_mb_set_led_cfg(qdev, qdev->led_config)) 483 + if (qlge_mb_set_led_cfg(qdev, qdev->led_config)) 484 484 return -EIO; 485 485 return 0; 486 486 ··· 489 489 } 490 490 } 491 491 492 - static int ql_start_loopback(struct ql_adapter *qdev) 492 + static int qlge_start_loopback(struct qlge_adapter *qdev) 493 493 { 494 494 if (netif_carrier_ok(qdev->ndev)) { 495 495 set_bit(QL_LB_LINK_UP, &qdev->flags); ··· 498 498 clear_bit(QL_LB_LINK_UP, &qdev->flags); 499 499 } 500 500 qdev->link_config |= CFG_LOOPBACK_PCS; 501 - return ql_mb_set_port_cfg(qdev); 501 + return qlge_mb_set_port_cfg(qdev); 502 502 } 503 503 504 - static void ql_stop_loopback(struct ql_adapter *qdev) 504 + static void qlge_stop_loopback(struct qlge_adapter *qdev) 505 505 { 506 506 qdev->link_config &= ~CFG_LOOPBACK_PCS; 507 - ql_mb_set_port_cfg(qdev); 507 + qlge_mb_set_port_cfg(qdev); 508 508 if (test_bit(QL_LB_LINK_UP, &qdev->flags)) { 509 509 netif_carrier_on(qdev->ndev); 510 510 clear_bit(QL_LB_LINK_UP, &qdev->flags); 511 511 } 512 512 } 513 513 514 - static void ql_create_lb_frame(struct sk_buff *skb, 515 - unsigned int frame_size) 514 + static void qlge_create_lb_frame(struct sk_buff *skb, 515 + unsigned int frame_size) 516 516 { 517 517 memset(skb->data, 0xFF, frame_size); 518 518 frame_size &= ~1; ··· 521 521 skb->data[frame_size / 2 + 12] = (unsigned char)0xAF; 522 522 } 523 523 524 - void ql_check_lb_frame(struct ql_adapter *qdev, 525 - struct sk_buff *skb) 524 + void qlge_check_lb_frame(struct qlge_adapter *qdev, 525 + struct sk_buff *skb) 526 526 { 527 527 unsigned int frame_size = skb->len; 528 528 ··· 534 534 } 535 535 } 536 536 537 - static int ql_run_loopback_test(struct ql_adapter *qdev) 537 + static int qlge_run_loopback_test(struct qlge_adapter *qdev) 538 538 { 539 539 int i; 540 540 netdev_tx_t rc; ··· 548 548 549 549 skb->queue_mapping = 0; 550 550 skb_put(skb, size); 551 - ql_create_lb_frame(skb, size); 552 - rc = ql_lb_send(skb, qdev->ndev); 551 + qlge_create_lb_frame(skb, size); 552 + rc = qlge_lb_send(skb, qdev->ndev); 553 553 if (rc != NETDEV_TX_OK) 554 554 return -EPIPE; 555 555 atomic_inc(&qdev->lb_count); 556 556 } 557 557 /* Give queue time to settle before testing results. */ 558 558 msleep(2); 559 - ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128); 559 + qlge_clean_lb_rx_ring(&qdev->rx_ring[0], 128); 560 560 return atomic_read(&qdev->lb_count) ? -EIO : 0; 561 561 } 562 562 563 - static int ql_loopback_test(struct ql_adapter *qdev, u64 *data) 563 + static int qlge_loopback_test(struct qlge_adapter *qdev, u64 *data) 564 564 { 565 - *data = ql_start_loopback(qdev); 565 + *data = qlge_start_loopback(qdev); 566 566 if (*data) 567 567 goto out; 568 - *data = ql_run_loopback_test(qdev); 568 + *data = qlge_run_loopback_test(qdev); 569 569 out: 570 - ql_stop_loopback(qdev); 570 + qlge_stop_loopback(qdev); 571 571 return *data; 572 572 } 573 573 574 - static void ql_self_test(struct net_device *ndev, 575 - struct ethtool_test *eth_test, u64 *data) 574 + static void qlge_self_test(struct net_device *ndev, 575 + struct ethtool_test *eth_test, u64 *data) 576 576 { 577 - struct ql_adapter *qdev = netdev_priv(ndev); 577 + struct qlge_adapter *qdev = netdev_priv(ndev); 578 578 579 579 memset(data, 0, sizeof(u64) * QLGE_TEST_LEN); 580 580 ··· 582 582 set_bit(QL_SELFTEST, &qdev->flags); 583 583 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 584 584 /* Offline tests */ 585 - if (ql_loopback_test(qdev, &data[0])) 585 + if (qlge_loopback_test(qdev, &data[0])) 586 586 eth_test->flags |= ETH_TEST_FL_FAILED; 587 587 588 588 } else { ··· 601 601 } 602 602 } 603 603 604 - static int ql_get_regs_len(struct net_device *ndev) 604 + static int qlge_get_regs_len(struct net_device *ndev) 605 605 { 606 - struct ql_adapter *qdev = netdev_priv(ndev); 606 + struct qlge_adapter *qdev = netdev_priv(ndev); 607 607 608 608 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) 609 - return sizeof(struct ql_mpi_coredump); 609 + return sizeof(struct qlge_mpi_coredump); 610 610 else 611 - return sizeof(struct ql_reg_dump); 611 + return sizeof(struct qlge_reg_dump); 612 612 } 613 613 614 - static void ql_get_regs(struct net_device *ndev, 615 - struct ethtool_regs *regs, void *p) 614 + static void qlge_get_regs(struct net_device *ndev, 615 + struct ethtool_regs *regs, void *p) 616 616 { 617 - struct ql_adapter *qdev = netdev_priv(ndev); 617 + struct qlge_adapter *qdev = netdev_priv(ndev); 618 618 619 - ql_get_dump(qdev, p); 619 + qlge_get_dump(qdev, p); 620 620 qdev->core_is_dumped = 0; 621 621 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) 622 - regs->len = sizeof(struct ql_mpi_coredump); 622 + regs->len = sizeof(struct qlge_mpi_coredump); 623 623 else 624 - regs->len = sizeof(struct ql_reg_dump); 624 + regs->len = sizeof(struct qlge_reg_dump); 625 625 } 626 626 627 - static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 627 + static int qlge_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 628 628 { 629 - struct ql_adapter *qdev = netdev_priv(dev); 629 + struct qlge_adapter *qdev = netdev_priv(dev); 630 630 631 631 c->rx_coalesce_usecs = qdev->rx_coalesce_usecs; 632 632 c->tx_coalesce_usecs = qdev->tx_coalesce_usecs; ··· 647 647 return 0; 648 648 } 649 649 650 - static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c) 650 + static int qlge_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c) 651 651 { 652 - struct ql_adapter *qdev = netdev_priv(ndev); 652 + struct qlge_adapter *qdev = netdev_priv(ndev); 653 653 654 654 /* Validate user parameters. */ 655 655 if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2) 656 656 return -EINVAL; 657 - /* Don't wait more than 10 usec. */ 657 + /* Don't wait more than 10 usec. */ 658 658 if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT) 659 659 return -EINVAL; 660 660 if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2) ··· 674 674 qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames; 675 675 qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames; 676 676 677 - return ql_update_ring_coalescing(qdev); 677 + return qlge_update_ring_coalescing(qdev); 678 678 } 679 679 680 - static void ql_get_pauseparam(struct net_device *netdev, 681 - struct ethtool_pauseparam *pause) 680 + static void qlge_get_pauseparam(struct net_device *netdev, 681 + struct ethtool_pauseparam *pause) 682 682 { 683 - struct ql_adapter *qdev = netdev_priv(netdev); 683 + struct qlge_adapter *qdev = netdev_priv(netdev); 684 684 685 - ql_mb_get_port_cfg(qdev); 685 + qlge_mb_get_port_cfg(qdev); 686 686 if (qdev->link_config & CFG_PAUSE_STD) { 687 687 pause->rx_pause = 1; 688 688 pause->tx_pause = 1; 689 689 } 690 690 } 691 691 692 - static int ql_set_pauseparam(struct net_device *netdev, 693 - struct ethtool_pauseparam *pause) 692 + static int qlge_set_pauseparam(struct net_device *netdev, 693 + struct ethtool_pauseparam *pause) 694 694 { 695 - struct ql_adapter *qdev = netdev_priv(netdev); 695 + struct qlge_adapter *qdev = netdev_priv(netdev); 696 696 697 697 if ((pause->rx_pause) && (pause->tx_pause)) 698 698 qdev->link_config |= CFG_PAUSE_STD; ··· 701 701 else 702 702 return -EINVAL; 703 703 704 - return ql_mb_set_port_cfg(qdev); 704 + return qlge_mb_set_port_cfg(qdev); 705 705 } 706 706 707 - static u32 ql_get_msglevel(struct net_device *ndev) 707 + static u32 qlge_get_msglevel(struct net_device *ndev) 708 708 { 709 - struct ql_adapter *qdev = netdev_priv(ndev); 709 + struct qlge_adapter *qdev = netdev_priv(ndev); 710 710 711 711 return qdev->msg_enable; 712 712 } 713 713 714 - static void ql_set_msglevel(struct net_device *ndev, u32 value) 714 + static void qlge_set_msglevel(struct net_device *ndev, u32 value) 715 715 { 716 - struct ql_adapter *qdev = netdev_priv(ndev); 716 + struct qlge_adapter *qdev = netdev_priv(ndev); 717 717 718 718 qdev->msg_enable = value; 719 719 } ··· 721 721 const struct ethtool_ops qlge_ethtool_ops = { 722 722 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 723 723 ETHTOOL_COALESCE_MAX_FRAMES, 724 - .get_drvinfo = ql_get_drvinfo, 725 - .get_wol = ql_get_wol, 726 - .set_wol = ql_set_wol, 727 - .get_regs_len = ql_get_regs_len, 728 - .get_regs = ql_get_regs, 729 - .get_msglevel = ql_get_msglevel, 730 - .set_msglevel = ql_set_msglevel, 724 + .get_drvinfo = qlge_get_drvinfo, 725 + .get_wol = qlge_get_wol, 726 + .set_wol = qlge_set_wol, 727 + .get_regs_len = qlge_get_regs_len, 728 + .get_regs = qlge_get_regs, 729 + .get_msglevel = qlge_get_msglevel, 730 + .set_msglevel = qlge_set_msglevel, 731 731 .get_link = ethtool_op_get_link, 732 - .set_phys_id = ql_set_phys_id, 733 - .self_test = ql_self_test, 734 - .get_pauseparam = ql_get_pauseparam, 735 - .set_pauseparam = ql_set_pauseparam, 736 - .get_coalesce = ql_get_coalesce, 737 - .set_coalesce = ql_set_coalesce, 738 - .get_sset_count = ql_get_sset_count, 739 - .get_strings = ql_get_strings, 740 - .get_ethtool_stats = ql_get_ethtool_stats, 741 - .get_link_ksettings = ql_get_link_ksettings, 732 + .set_phys_id = qlge_set_phys_id, 733 + .self_test = qlge_self_test, 734 + .get_pauseparam = qlge_get_pauseparam, 735 + .set_pauseparam = qlge_set_pauseparam, 736 + .get_coalesce = qlge_get_coalesce, 737 + .set_coalesce = qlge_set_coalesce, 738 + .get_sset_count = qlge_get_sset_count, 739 + .get_strings = qlge_get_strings, 740 + .get_ethtool_stats = qlge_get_ethtool_stats, 741 + .get_link_ksettings = qlge_get_link_ksettings, 742 742 }; 743 743
+639 -639
drivers/staging/qlge/qlge_main.c
··· 89 89 90 90 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl); 91 91 92 - static int ql_wol(struct ql_adapter *); 92 + static int qlge_wol(struct qlge_adapter *); 93 93 static void qlge_set_multicast_list(struct net_device *); 94 - static int ql_adapter_down(struct ql_adapter *); 95 - static int ql_adapter_up(struct ql_adapter *); 94 + static int qlge_adapter_down(struct qlge_adapter *); 95 + static int qlge_adapter_up(struct qlge_adapter *); 96 96 97 97 /* This hardware semaphore causes exclusive access to 98 98 * resources shared between the NIC driver, MPI firmware, 99 99 * FCOE firmware and the FC driver. 100 100 */ 101 - static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask) 101 + static int qlge_sem_trylock(struct qlge_adapter *qdev, u32 sem_mask) 102 102 { 103 103 u32 sem_bits = 0; 104 104 ··· 132 132 return -EINVAL; 133 133 } 134 134 135 - ql_write32(qdev, SEM, sem_bits | sem_mask); 136 - return !(ql_read32(qdev, SEM) & sem_bits); 135 + qlge_write32(qdev, SEM, sem_bits | sem_mask); 136 + return !(qlge_read32(qdev, SEM) & sem_bits); 137 137 } 138 138 139 - int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) 139 + int qlge_sem_spinlock(struct qlge_adapter *qdev, u32 sem_mask) 140 140 { 141 141 unsigned int wait_count = 30; 142 142 143 143 do { 144 - if (!ql_sem_trylock(qdev, sem_mask)) 144 + if (!qlge_sem_trylock(qdev, sem_mask)) 145 145 return 0; 146 146 udelay(100); 147 147 } while (--wait_count); 148 148 return -ETIMEDOUT; 149 149 } 150 150 151 - void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask) 151 + void qlge_sem_unlock(struct qlge_adapter *qdev, u32 sem_mask) 152 152 { 153 - ql_write32(qdev, SEM, sem_mask); 154 - ql_read32(qdev, SEM); /* flush */ 153 + qlge_write32(qdev, SEM, sem_mask); 154 + qlge_read32(qdev, SEM); /* flush */ 155 155 } 156 156 157 157 /* This function waits for a specific bit to come ready ··· 159 159 * process, but is also used in kernel thread API such as 160 160 * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid. 161 161 */ 162 - int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit) 162 + int qlge_wait_reg_rdy(struct qlge_adapter *qdev, u32 reg, u32 bit, u32 err_bit) 163 163 { 164 164 u32 temp; 165 165 int count; 166 166 167 167 for (count = 0; count < UDELAY_COUNT; count++) { 168 - temp = ql_read32(qdev, reg); 168 + temp = qlge_read32(qdev, reg); 169 169 170 170 /* check for errors */ 171 171 if (temp & err_bit) { ··· 186 186 /* The CFG register is used to download TX and RX control blocks 187 187 * to the chip. This function waits for an operation to complete. 188 188 */ 189 - static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit) 189 + static int qlge_wait_cfg(struct qlge_adapter *qdev, u32 bit) 190 190 { 191 191 int count; 192 192 u32 temp; 193 193 194 194 for (count = 0; count < UDELAY_COUNT; count++) { 195 - temp = ql_read32(qdev, CFG); 195 + temp = qlge_read32(qdev, CFG); 196 196 if (temp & CFG_LE) 197 197 return -EIO; 198 198 if (!(temp & bit)) ··· 205 205 /* Used to issue init control blocks to hw. Maps control block, 206 206 * sets address, triggers download, waits for completion. 207 207 */ 208 - int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit, 209 - u16 q_id) 208 + int qlge_write_cfg(struct qlge_adapter *qdev, void *ptr, int size, u32 bit, 209 + u16 q_id) 210 210 { 211 211 u64 map; 212 212 int status = 0; ··· 225 225 return -ENOMEM; 226 226 } 227 227 228 - status = ql_sem_spinlock(qdev, SEM_ICB_MASK); 228 + status = qlge_sem_spinlock(qdev, SEM_ICB_MASK); 229 229 if (status) 230 230 goto lock_failed; 231 231 232 - status = ql_wait_cfg(qdev, bit); 232 + status = qlge_wait_cfg(qdev, bit); 233 233 if (status) { 234 234 netif_err(qdev, ifup, qdev->ndev, 235 235 "Timed out waiting for CFG to come ready.\n"); 236 236 goto exit; 237 237 } 238 238 239 - ql_write32(qdev, ICB_L, (u32)map); 240 - ql_write32(qdev, ICB_H, (u32)(map >> 32)); 239 + qlge_write32(qdev, ICB_L, (u32)map); 240 + qlge_write32(qdev, ICB_H, (u32)(map >> 32)); 241 241 242 242 mask = CFG_Q_MASK | (bit << 16); 243 243 value = bit | (q_id << CFG_Q_SHIFT); 244 - ql_write32(qdev, CFG, (mask | value)); 244 + qlge_write32(qdev, CFG, (mask | value)); 245 245 246 246 /* 247 247 * Wait for the bit to clear after signaling hw. 248 248 */ 249 - status = ql_wait_cfg(qdev, bit); 249 + status = qlge_wait_cfg(qdev, bit); 250 250 exit: 251 - ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */ 251 + qlge_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */ 252 252 lock_failed: 253 253 dma_unmap_single(&qdev->pdev->dev, map, size, direction); 254 254 return status; 255 255 } 256 256 257 257 /* Get a specific MAC address from the CAM. Used for debug and reg dump. */ 258 - int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index, 259 - u32 *value) 258 + int qlge_get_mac_addr_reg(struct qlge_adapter *qdev, u32 type, u16 index, 259 + u32 *value) 260 260 { 261 261 u32 offset = 0; 262 262 int status; ··· 264 264 switch (type) { 265 265 case MAC_ADDR_TYPE_MULTI_MAC: 266 266 case MAC_ADDR_TYPE_CAM_MAC: { 267 - status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); 267 + status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); 268 268 if (status) 269 269 break; 270 - ql_write32(qdev, MAC_ADDR_IDX, 271 - (offset++) | /* offset */ 270 + qlge_write32(qdev, MAC_ADDR_IDX, 271 + (offset++) | /* offset */ 272 272 (index << MAC_ADDR_IDX_SHIFT) | /* index */ 273 273 MAC_ADDR_ADR | MAC_ADDR_RS | 274 274 type); /* type */ 275 - status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0); 275 + status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0); 276 276 if (status) 277 277 break; 278 - *value++ = ql_read32(qdev, MAC_ADDR_DATA); 279 - status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); 278 + *value++ = qlge_read32(qdev, MAC_ADDR_DATA); 279 + status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); 280 280 if (status) 281 281 break; 282 - ql_write32(qdev, MAC_ADDR_IDX, 283 - (offset++) | /* offset */ 282 + qlge_write32(qdev, MAC_ADDR_IDX, 283 + (offset++) | /* offset */ 284 284 (index << MAC_ADDR_IDX_SHIFT) | /* index */ 285 285 MAC_ADDR_ADR | MAC_ADDR_RS | 286 286 type); /* type */ 287 - status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0); 287 + status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MR, 0); 288 288 if (status) 289 289 break; 290 - *value++ = ql_read32(qdev, MAC_ADDR_DATA); 290 + *value++ = qlge_read32(qdev, MAC_ADDR_DATA); 291 291 if (type == MAC_ADDR_TYPE_CAM_MAC) { 292 - status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, 293 - MAC_ADDR_MW, 0); 292 + status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, 293 + MAC_ADDR_MW, 0); 294 294 if (status) 295 295 break; 296 - ql_write32(qdev, MAC_ADDR_IDX, 297 - (offset++) | /* offset */ 296 + qlge_write32(qdev, MAC_ADDR_IDX, 297 + (offset++) | /* offset */ 298 298 (index 299 299 << MAC_ADDR_IDX_SHIFT) | /* index */ 300 300 MAC_ADDR_ADR | 301 301 MAC_ADDR_RS | type); /* type */ 302 - status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, 303 - MAC_ADDR_MR, 0); 302 + status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, 303 + MAC_ADDR_MR, 0); 304 304 if (status) 305 305 break; 306 - *value++ = ql_read32(qdev, MAC_ADDR_DATA); 306 + *value++ = qlge_read32(qdev, MAC_ADDR_DATA); 307 307 } 308 308 break; 309 309 } ··· 320 320 /* Set up a MAC, multicast or VLAN address for the 321 321 * inbound frame matching. 322 322 */ 323 - static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, 324 - u16 index) 323 + static int qlge_set_mac_addr_reg(struct qlge_adapter *qdev, u8 *addr, u32 type, 324 + u16 index) 325 325 { 326 326 u32 offset = 0; 327 327 int status = 0; ··· 332 332 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | 333 333 (addr[5]); 334 334 335 - status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); 335 + status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); 336 336 if (status) 337 337 break; 338 - ql_write32(qdev, MAC_ADDR_IDX, 339 - (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type | 338 + qlge_write32(qdev, MAC_ADDR_IDX, 339 + (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type | 340 340 MAC_ADDR_E); 341 - ql_write32(qdev, MAC_ADDR_DATA, lower); 342 - status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); 341 + qlge_write32(qdev, MAC_ADDR_DATA, lower); 342 + status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); 343 343 if (status) 344 344 break; 345 - ql_write32(qdev, MAC_ADDR_IDX, 346 - (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type | 345 + qlge_write32(qdev, MAC_ADDR_IDX, 346 + (offset++) | (index << MAC_ADDR_IDX_SHIFT) | type | 347 347 MAC_ADDR_E); 348 348 349 - ql_write32(qdev, MAC_ADDR_DATA, upper); 350 - status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); 349 + qlge_write32(qdev, MAC_ADDR_DATA, upper); 350 + status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); 351 351 break; 352 352 } 353 353 case MAC_ADDR_TYPE_CAM_MAC: { ··· 355 355 u32 upper = (addr[0] << 8) | addr[1]; 356 356 u32 lower = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | 357 357 (addr[5]); 358 - status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); 358 + status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); 359 359 if (status) 360 360 break; 361 - ql_write32(qdev, MAC_ADDR_IDX, 362 - (offset++) | /* offset */ 361 + qlge_write32(qdev, MAC_ADDR_IDX, 362 + (offset++) | /* offset */ 363 363 (index << MAC_ADDR_IDX_SHIFT) | /* index */ 364 364 type); /* type */ 365 - ql_write32(qdev, MAC_ADDR_DATA, lower); 366 - status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); 365 + qlge_write32(qdev, MAC_ADDR_DATA, lower); 366 + status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); 367 367 if (status) 368 368 break; 369 - ql_write32(qdev, MAC_ADDR_IDX, 370 - (offset++) | /* offset */ 369 + qlge_write32(qdev, MAC_ADDR_IDX, 370 + (offset++) | /* offset */ 371 371 (index << MAC_ADDR_IDX_SHIFT) | /* index */ 372 372 type); /* type */ 373 - ql_write32(qdev, MAC_ADDR_DATA, upper); 374 - status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); 373 + qlge_write32(qdev, MAC_ADDR_DATA, upper); 374 + status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); 375 375 if (status) 376 376 break; 377 - ql_write32(qdev, MAC_ADDR_IDX, 378 - (offset) | /* offset */ 377 + qlge_write32(qdev, MAC_ADDR_IDX, 378 + (offset) | /* offset */ 379 379 (index << MAC_ADDR_IDX_SHIFT) | /* index */ 380 380 type); /* type */ 381 381 /* This field should also include the queue id ··· 388 388 if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) 389 389 cam_output |= CAM_OUT_RV; 390 390 /* route to NIC core */ 391 - ql_write32(qdev, MAC_ADDR_DATA, cam_output); 391 + qlge_write32(qdev, MAC_ADDR_DATA, cam_output); 392 392 break; 393 393 } 394 394 case MAC_ADDR_TYPE_VLAN: { ··· 398 398 * addressing. It's either MAC_ADDR_E on or off. 399 399 * That's bit-27 we're talking about. 400 400 */ 401 - status = ql_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); 401 + status = qlge_wait_reg_rdy(qdev, MAC_ADDR_IDX, MAC_ADDR_MW, 0); 402 402 if (status) 403 403 break; 404 - ql_write32(qdev, MAC_ADDR_IDX, 405 - offset | /* offset */ 404 + qlge_write32(qdev, MAC_ADDR_IDX, 405 + offset | /* offset */ 406 406 (index << MAC_ADDR_IDX_SHIFT) | /* index */ 407 407 type | /* type */ 408 408 enable_bit); /* enable/disable */ ··· 421 421 * have to clear it to prevent wrong frame routing 422 422 * especially in a bonding environment. 423 423 */ 424 - static int ql_set_mac_addr(struct ql_adapter *qdev, int set) 424 + static int qlge_set_mac_addr(struct qlge_adapter *qdev, int set) 425 425 { 426 426 int status; 427 427 char zero_mac_addr[ETH_ALEN]; ··· 437 437 netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, 438 438 "Clearing MAC address\n"); 439 439 } 440 - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 440 + status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 441 441 if (status) 442 442 return status; 443 - status = ql_set_mac_addr_reg(qdev, (u8 *)addr, 444 - MAC_ADDR_TYPE_CAM_MAC, 445 - qdev->func * MAX_CQ); 446 - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 443 + status = qlge_set_mac_addr_reg(qdev, (u8 *)addr, 444 + MAC_ADDR_TYPE_CAM_MAC, 445 + qdev->func * MAX_CQ); 446 + qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 447 447 if (status) 448 448 netif_err(qdev, ifup, qdev->ndev, 449 449 "Failed to init mac address.\n"); 450 450 return status; 451 451 } 452 452 453 - void ql_link_on(struct ql_adapter *qdev) 453 + void qlge_link_on(struct qlge_adapter *qdev) 454 454 { 455 455 netif_err(qdev, link, qdev->ndev, "Link is up.\n"); 456 456 netif_carrier_on(qdev->ndev); 457 - ql_set_mac_addr(qdev, 1); 457 + qlge_set_mac_addr(qdev, 1); 458 458 } 459 459 460 - void ql_link_off(struct ql_adapter *qdev) 460 + void qlge_link_off(struct qlge_adapter *qdev) 461 461 { 462 462 netif_err(qdev, link, qdev->ndev, "Link is down.\n"); 463 463 netif_carrier_off(qdev->ndev); 464 - ql_set_mac_addr(qdev, 0); 464 + qlge_set_mac_addr(qdev, 0); 465 465 } 466 466 467 467 /* Get a specific frame routing value from the CAM. 468 468 * Used for debug and reg dump. 469 469 */ 470 - int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value) 470 + int qlge_get_routing_reg(struct qlge_adapter *qdev, u32 index, u32 *value) 471 471 { 472 472 int status = 0; 473 473 474 - status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); 474 + status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); 475 475 if (status) 476 476 goto exit; 477 477 478 - ql_write32(qdev, RT_IDX, 479 - RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT)); 480 - status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0); 478 + qlge_write32(qdev, RT_IDX, 479 + RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT)); 480 + status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0); 481 481 if (status) 482 482 goto exit; 483 - *value = ql_read32(qdev, RT_DATA); 483 + *value = qlge_read32(qdev, RT_DATA); 484 484 exit: 485 485 return status; 486 486 } ··· 490 490 * multicast/error frames to the default queue for slow handling, 491 491 * and CAM hit/RSS frames to the fast handling queues. 492 492 */ 493 - static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, 494 - int enable) 493 + static int qlge_set_routing_reg(struct qlge_adapter *qdev, u32 index, u32 mask, 494 + int enable) 495 495 { 496 496 int status = -EINVAL; /* Return error if no mask match. */ 497 497 u32 value = 0; ··· 577 577 } 578 578 579 579 if (value) { 580 - status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); 580 + status = qlge_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0); 581 581 if (status) 582 582 goto exit; 583 583 value |= (enable ? RT_IDX_E : 0); 584 - ql_write32(qdev, RT_IDX, value); 585 - ql_write32(qdev, RT_DATA, enable ? mask : 0); 584 + qlge_write32(qdev, RT_IDX, value); 585 + qlge_write32(qdev, RT_DATA, enable ? mask : 0); 586 586 } 587 587 exit: 588 588 return status; 589 589 } 590 590 591 - static void ql_enable_interrupts(struct ql_adapter *qdev) 591 + static void qlge_enable_interrupts(struct qlge_adapter *qdev) 592 592 { 593 - ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI); 593 + qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI); 594 594 } 595 595 596 - static void ql_disable_interrupts(struct ql_adapter *qdev) 596 + static void qlge_disable_interrupts(struct qlge_adapter *qdev) 597 597 { 598 - ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16)); 598 + qlge_write32(qdev, INTR_EN, (INTR_EN_EI << 16)); 599 599 } 600 600 601 - static void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) 602 - { 603 - struct intr_context *ctx = &qdev->intr_context[intr]; 604 - 605 - ql_write32(qdev, INTR_EN, ctx->intr_en_mask); 606 - } 607 - 608 - static void ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) 601 + static void qlge_enable_completion_interrupt(struct qlge_adapter *qdev, u32 intr) 609 602 { 610 603 struct intr_context *ctx = &qdev->intr_context[intr]; 611 604 612 - ql_write32(qdev, INTR_EN, ctx->intr_dis_mask); 605 + qlge_write32(qdev, INTR_EN, ctx->intr_en_mask); 613 606 } 614 607 615 - static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev) 608 + static void qlge_disable_completion_interrupt(struct qlge_adapter *qdev, u32 intr) 609 + { 610 + struct intr_context *ctx = &qdev->intr_context[intr]; 611 + 612 + qlge_write32(qdev, INTR_EN, ctx->intr_dis_mask); 613 + } 614 + 615 + static void qlge_enable_all_completion_interrupts(struct qlge_adapter *qdev) 616 616 { 617 617 int i; 618 618 619 619 for (i = 0; i < qdev->intr_count; i++) 620 - ql_enable_completion_interrupt(qdev, i); 620 + qlge_enable_completion_interrupt(qdev, i); 621 621 } 622 622 623 - static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str) 623 + static int qlge_validate_flash(struct qlge_adapter *qdev, u32 size, const char *str) 624 624 { 625 625 int status, i; 626 626 u16 csum = 0; ··· 642 642 return csum; 643 643 } 644 644 645 - static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data) 645 + static int qlge_read_flash_word(struct qlge_adapter *qdev, int offset, __le32 *data) 646 646 { 647 647 int status = 0; 648 648 /* wait for reg to come ready */ 649 - status = ql_wait_reg_rdy(qdev, 650 - FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); 649 + status = qlge_wait_reg_rdy(qdev, 650 + FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); 651 651 if (status) 652 652 goto exit; 653 653 /* set up for reg read */ 654 - ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset); 654 + qlge_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset); 655 655 /* wait for reg to come ready */ 656 - status = ql_wait_reg_rdy(qdev, 657 - FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); 656 + status = qlge_wait_reg_rdy(qdev, 657 + FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR); 658 658 if (status) 659 659 goto exit; 660 660 /* This data is stored on flash as an array of 661 - * __le32. Since ql_read32() returns cpu endian 661 + * __le32. Since qlge_read32() returns cpu endian 662 662 * we need to swap it back. 663 663 */ 664 - *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA)); 664 + *data = cpu_to_le32(qlge_read32(qdev, FLASH_DATA)); 665 665 exit: 666 666 return status; 667 667 } 668 668 669 - static int ql_get_8000_flash_params(struct ql_adapter *qdev) 669 + static int qlge_get_8000_flash_params(struct qlge_adapter *qdev) 670 670 { 671 671 u32 i, size; 672 672 int status; ··· 682 682 else 683 683 offset = FUNC1_FLASH_OFFSET / sizeof(u32); 684 684 685 - if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) 685 + if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK)) 686 686 return -ETIMEDOUT; 687 687 688 688 size = sizeof(struct flash_params_8000) / sizeof(u32); 689 689 for (i = 0; i < size; i++, p++) { 690 - status = ql_read_flash_word(qdev, i + offset, p); 690 + status = qlge_read_flash_word(qdev, i + offset, p); 691 691 if (status) { 692 692 netif_err(qdev, ifup, qdev->ndev, 693 693 "Error reading flash.\n"); ··· 695 695 } 696 696 } 697 697 698 - status = ql_validate_flash(qdev, 699 - sizeof(struct flash_params_8000) / 698 + status = qlge_validate_flash(qdev, 699 + sizeof(struct flash_params_8000) / 700 700 sizeof(u16), 701 701 "8000"); 702 702 if (status) { ··· 728 728 qdev->ndev->addr_len); 729 729 730 730 exit: 731 - ql_sem_unlock(qdev, SEM_FLASH_MASK); 731 + qlge_sem_unlock(qdev, SEM_FLASH_MASK); 732 732 return status; 733 733 } 734 734 735 - static int ql_get_8012_flash_params(struct ql_adapter *qdev) 735 + static int qlge_get_8012_flash_params(struct qlge_adapter *qdev) 736 736 { 737 737 int i; 738 738 int status; ··· 746 746 if (qdev->port) 747 747 offset = size; 748 748 749 - if (ql_sem_spinlock(qdev, SEM_FLASH_MASK)) 749 + if (qlge_sem_spinlock(qdev, SEM_FLASH_MASK)) 750 750 return -ETIMEDOUT; 751 751 752 752 for (i = 0; i < size; i++, p++) { 753 - status = ql_read_flash_word(qdev, i + offset, p); 753 + status = qlge_read_flash_word(qdev, i + offset, p); 754 754 if (status) { 755 755 netif_err(qdev, ifup, qdev->ndev, 756 756 "Error reading flash.\n"); ··· 758 758 } 759 759 } 760 760 761 - status = ql_validate_flash(qdev, 762 - sizeof(struct flash_params_8012) / 763 - sizeof(u16), 764 - "8012"); 761 + status = qlge_validate_flash(qdev, 762 + sizeof(struct flash_params_8012) / 763 + sizeof(u16), 764 + "8012"); 765 765 if (status) { 766 766 netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n"); 767 767 status = -EINVAL; ··· 778 778 qdev->ndev->addr_len); 779 779 780 780 exit: 781 - ql_sem_unlock(qdev, SEM_FLASH_MASK); 781 + qlge_sem_unlock(qdev, SEM_FLASH_MASK); 782 782 return status; 783 783 } 784 784 ··· 786 786 * register pair. Each read/write requires us to wait for the ready 787 787 * bit before reading/writing the data. 788 788 */ 789 - static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data) 789 + static int qlge_write_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 data) 790 790 { 791 791 int status; 792 792 /* wait for reg to come ready */ 793 - status = ql_wait_reg_rdy(qdev, 794 - XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); 793 + status = qlge_wait_reg_rdy(qdev, 794 + XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); 795 795 if (status) 796 796 return status; 797 797 /* write the data to the data reg */ 798 - ql_write32(qdev, XGMAC_DATA, data); 798 + qlge_write32(qdev, XGMAC_DATA, data); 799 799 /* trigger the write */ 800 - ql_write32(qdev, XGMAC_ADDR, reg); 800 + qlge_write32(qdev, XGMAC_ADDR, reg); 801 801 return status; 802 802 } 803 803 ··· 805 805 * register pair. Each read/write requires us to wait for the ready 806 806 * bit before reading/writing the data. 807 807 */ 808 - int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data) 808 + int qlge_read_xgmac_reg(struct qlge_adapter *qdev, u32 reg, u32 *data) 809 809 { 810 810 int status = 0; 811 811 /* wait for reg to come ready */ 812 - status = ql_wait_reg_rdy(qdev, 813 - XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); 812 + status = qlge_wait_reg_rdy(qdev, 813 + XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); 814 814 if (status) 815 815 goto exit; 816 816 /* set up for reg read */ 817 - ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R); 817 + qlge_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R); 818 818 /* wait for reg to come ready */ 819 - status = ql_wait_reg_rdy(qdev, 820 - XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); 819 + status = qlge_wait_reg_rdy(qdev, 820 + XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME); 821 821 if (status) 822 822 goto exit; 823 823 /* get the data */ 824 - *data = ql_read32(qdev, XGMAC_DATA); 824 + *data = qlge_read32(qdev, XGMAC_DATA); 825 825 exit: 826 826 return status; 827 827 } 828 828 829 829 /* This is used for reading the 64-bit statistics regs. */ 830 - int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data) 830 + int qlge_read_xgmac_reg64(struct qlge_adapter *qdev, u32 reg, u64 *data) 831 831 { 832 832 int status = 0; 833 833 u32 hi = 0; 834 834 u32 lo = 0; 835 835 836 - status = ql_read_xgmac_reg(qdev, reg, &lo); 836 + status = qlge_read_xgmac_reg(qdev, reg, &lo); 837 837 if (status) 838 838 goto exit; 839 839 840 - status = ql_read_xgmac_reg(qdev, reg + 4, &hi); 840 + status = qlge_read_xgmac_reg(qdev, reg + 4, &hi); 841 841 if (status) 842 842 goto exit; 843 843 ··· 847 847 return status; 848 848 } 849 849 850 - static int ql_8000_port_initialize(struct ql_adapter *qdev) 850 + static int qlge_8000_port_initialize(struct qlge_adapter *qdev) 851 851 { 852 852 int status; 853 853 /* 854 854 * Get MPI firmware version for driver banner 855 855 * and ethool info. 856 856 */ 857 - status = ql_mb_about_fw(qdev); 857 + status = qlge_mb_about_fw(qdev); 858 858 if (status) 859 859 goto exit; 860 - status = ql_mb_get_fw_state(qdev); 860 + status = qlge_mb_get_fw_state(qdev); 861 861 if (status) 862 862 goto exit; 863 863 /* Wake up a worker to get/set the TX/RX frame sizes. */ ··· 872 872 * This functionality may be done in the MPI firmware at a 873 873 * later date. 874 874 */ 875 - static int ql_8012_port_initialize(struct ql_adapter *qdev) 875 + static int qlge_8012_port_initialize(struct qlge_adapter *qdev) 876 876 { 877 877 int status = 0; 878 878 u32 data; 879 879 880 - if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) { 880 + if (qlge_sem_trylock(qdev, qdev->xg_sem_mask)) { 881 881 /* Another function has the semaphore, so 882 882 * wait for the port init bit to come ready. 883 883 */ 884 884 netif_info(qdev, link, qdev->ndev, 885 885 "Another function has the semaphore, so wait for the port init bit to come ready.\n"); 886 - status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0); 886 + status = qlge_wait_reg_rdy(qdev, STS, qdev->port_init, 0); 887 887 if (status) { 888 888 netif_crit(qdev, link, qdev->ndev, 889 889 "Port initialize timed out.\n"); ··· 893 893 894 894 netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n"); 895 895 /* Set the core reset. */ 896 - status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data); 896 + status = qlge_read_xgmac_reg(qdev, GLOBAL_CFG, &data); 897 897 if (status) 898 898 goto end; 899 899 data |= GLOBAL_CFG_RESET; 900 - status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); 900 + status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data); 901 901 if (status) 902 902 goto end; 903 903 ··· 906 906 data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */ 907 907 data |= GLOBAL_CFG_TX_STAT_EN; 908 908 data |= GLOBAL_CFG_RX_STAT_EN; 909 - status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data); 909 + status = qlge_write_xgmac_reg(qdev, GLOBAL_CFG, data); 910 910 if (status) 911 911 goto end; 912 912 913 913 /* Enable transmitter, and clear it's reset. */ 914 - status = ql_read_xgmac_reg(qdev, TX_CFG, &data); 914 + status = qlge_read_xgmac_reg(qdev, TX_CFG, &data); 915 915 if (status) 916 916 goto end; 917 917 data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */ 918 918 data |= TX_CFG_EN; /* Enable the transmitter. */ 919 - status = ql_write_xgmac_reg(qdev, TX_CFG, data); 919 + status = qlge_write_xgmac_reg(qdev, TX_CFG, data); 920 920 if (status) 921 921 goto end; 922 922 923 923 /* Enable receiver and clear it's reset. */ 924 - status = ql_read_xgmac_reg(qdev, RX_CFG, &data); 924 + status = qlge_read_xgmac_reg(qdev, RX_CFG, &data); 925 925 if (status) 926 926 goto end; 927 927 data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */ 928 928 data |= RX_CFG_EN; /* Enable the receiver. */ 929 - status = ql_write_xgmac_reg(qdev, RX_CFG, data); 929 + status = qlge_write_xgmac_reg(qdev, RX_CFG, data); 930 930 if (status) 931 931 goto end; 932 932 933 933 /* Turn on jumbo. */ 934 934 status = 935 - ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16)); 935 + qlge_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16)); 936 936 if (status) 937 937 goto end; 938 938 status = 939 - ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580); 939 + qlge_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580); 940 940 if (status) 941 941 goto end; 942 942 943 943 /* Signal to the world that the port is enabled. */ 944 - ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init)); 944 + qlge_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init)); 945 945 end: 946 - ql_sem_unlock(qdev, qdev->xg_sem_mask); 946 + qlge_sem_unlock(qdev, qdev->xg_sem_mask); 947 947 return status; 948 948 } 949 949 950 - static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev) 950 + static inline unsigned int qlge_lbq_block_size(struct qlge_adapter *qdev) 951 951 { 952 952 return PAGE_SIZE << qdev->lbq_buf_order; 953 953 } ··· 962 962 return bq_desc; 963 963 } 964 964 965 - static struct qlge_bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev, 966 - struct rx_ring *rx_ring) 965 + static struct qlge_bq_desc *qlge_get_curr_lchunk(struct qlge_adapter *qdev, 966 + struct rx_ring *rx_ring) 967 967 { 968 968 struct qlge_bq_desc *lbq_desc = qlge_get_curr_buf(&rx_ring->lbq); 969 969 ··· 971 971 qdev->lbq_buf_size, DMA_FROM_DEVICE); 972 972 973 973 if ((lbq_desc->p.pg_chunk.offset + qdev->lbq_buf_size) == 974 - ql_lbq_block_size(qdev)) { 974 + qlge_lbq_block_size(qdev)) { 975 975 /* last chunk of the master page */ 976 976 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr, 977 - ql_lbq_block_size(qdev), DMA_FROM_DEVICE); 977 + qlge_lbq_block_size(qdev), DMA_FROM_DEVICE); 978 978 } 979 979 980 980 return lbq_desc; 981 981 } 982 982 983 983 /* Update an rx ring index. */ 984 - static void ql_update_cq(struct rx_ring *rx_ring) 984 + static void qlge_update_cq(struct rx_ring *rx_ring) 985 985 { 986 986 rx_ring->cnsmr_idx++; 987 987 rx_ring->curr_entry++; ··· 991 991 } 992 992 } 993 993 994 - static void ql_write_cq_idx(struct rx_ring *rx_ring) 994 + static void qlge_write_cq_idx(struct rx_ring *rx_ring) 995 995 { 996 - ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); 996 + qlge_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); 997 997 } 998 998 999 999 static const char * const bq_type_name[] = { ··· 1005 1005 static int qlge_refill_sb(struct rx_ring *rx_ring, 1006 1006 struct qlge_bq_desc *sbq_desc, gfp_t gfp) 1007 1007 { 1008 - struct ql_adapter *qdev = rx_ring->qdev; 1008 + struct qlge_adapter *qdev = rx_ring->qdev; 1009 1009 struct sk_buff *skb; 1010 1010 1011 1011 if (sbq_desc->p.skb) ··· 1038 1038 static int qlge_refill_lb(struct rx_ring *rx_ring, 1039 1039 struct qlge_bq_desc *lbq_desc, gfp_t gfp) 1040 1040 { 1041 - struct ql_adapter *qdev = rx_ring->qdev; 1041 + struct qlge_adapter *qdev = rx_ring->qdev; 1042 1042 struct qlge_page_chunk *master_chunk = &rx_ring->master_chunk; 1043 1043 1044 1044 if (!master_chunk->page) { ··· 1049 1049 if (unlikely(!page)) 1050 1050 return -ENOMEM; 1051 1051 dma_addr = dma_map_page(&qdev->pdev->dev, page, 0, 1052 - ql_lbq_block_size(qdev), 1052 + qlge_lbq_block_size(qdev), 1053 1053 DMA_FROM_DEVICE); 1054 1054 if (dma_mapping_error(&qdev->pdev->dev, dma_addr)) { 1055 1055 __free_pages(page, qdev->lbq_buf_order); ··· 1072 1072 * buffer get. 1073 1073 */ 1074 1074 master_chunk->offset += qdev->lbq_buf_size; 1075 - if (master_chunk->offset == ql_lbq_block_size(qdev)) { 1075 + if (master_chunk->offset == qlge_lbq_block_size(qdev)) { 1076 1076 master_chunk->page = NULL; 1077 1077 } else { 1078 1078 master_chunk->va += qdev->lbq_buf_size; ··· 1086 1086 static int qlge_refill_bq(struct qlge_bq *bq, gfp_t gfp) 1087 1087 { 1088 1088 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq); 1089 - struct ql_adapter *qdev = rx_ring->qdev; 1089 + struct qlge_adapter *qdev = rx_ring->qdev; 1090 1090 struct qlge_bq_desc *bq_desc; 1091 1091 int refill_count; 1092 1092 int retval; ··· 1132 1132 "ring %u %s: updating prod idx = %d.\n", 1133 1133 rx_ring->cq_id, bq_type_name[bq->type], 1134 1134 i); 1135 - ql_write_db_reg(i, bq->prod_idx_db_reg); 1135 + qlge_write_db_reg(i, bq->prod_idx_db_reg); 1136 1136 } 1137 1137 bq->next_to_use = i; 1138 1138 } ··· 1140 1140 return retval; 1141 1141 } 1142 1142 1143 - static void ql_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp, 1144 - unsigned long delay) 1143 + static void qlge_update_buffer_queues(struct rx_ring *rx_ring, gfp_t gfp, 1144 + unsigned long delay) 1145 1145 { 1146 1146 bool sbq_fail, lbq_fail; 1147 1147 ··· 1172 1172 struct napi_struct *napi = &rx_ring->napi; 1173 1173 1174 1174 napi_disable(napi); 1175 - ql_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2); 1175 + qlge_update_buffer_queues(rx_ring, GFP_KERNEL, HZ / 2); 1176 1176 napi_enable(napi); 1177 1177 1178 1178 local_bh_disable(); ··· 1187 1187 /* Unmaps tx buffers. Can be called from send() if a pci mapping 1188 1188 * fails at some stage, or from the interrupt when a tx completes. 1189 1189 */ 1190 - static void ql_unmap_send(struct ql_adapter *qdev, 1191 - struct tx_ring_desc *tx_ring_desc, int mapped) 1190 + static void qlge_unmap_send(struct qlge_adapter *qdev, 1191 + struct tx_ring_desc *tx_ring_desc, int mapped) 1192 1192 { 1193 1193 int i; 1194 1194 ··· 1229 1229 /* Map the buffers for this transmit. This will return 1230 1230 * NETDEV_TX_BUSY or NETDEV_TX_OK based on success. 1231 1231 */ 1232 - static int ql_map_send(struct ql_adapter *qdev, 1233 - struct ob_mac_iocb_req *mac_iocb_ptr, 1234 - struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc) 1232 + static int qlge_map_send(struct qlge_adapter *qdev, 1233 + struct qlge_ob_mac_iocb_req *mac_iocb_ptr, 1234 + struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc) 1235 1235 { 1236 1236 int len = skb_headlen(skb); 1237 1237 dma_addr_t map; ··· 1294 1294 */ 1295 1295 /* Tack on the OAL in the eighth segment of IOCB. */ 1296 1296 map = dma_map_single(&qdev->pdev->dev, &tx_ring_desc->oal, 1297 - sizeof(struct oal), 1297 + sizeof(struct qlge_oal), 1298 1298 DMA_TO_DEVICE); 1299 1299 err = dma_mapping_error(&qdev->pdev->dev, map); 1300 1300 if (err) { ··· 1316 1316 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, 1317 1317 map); 1318 1318 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, 1319 - sizeof(struct oal)); 1319 + sizeof(struct qlge_oal)); 1320 1320 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal; 1321 1321 map_idx++; 1322 1322 } ··· 1351 1351 * we pass in the number of frags that mapped successfully 1352 1352 * so they can be umapped. 1353 1353 */ 1354 - ql_unmap_send(qdev, tx_ring_desc, map_idx); 1354 + qlge_unmap_send(qdev, tx_ring_desc, map_idx); 1355 1355 return NETDEV_TX_BUSY; 1356 1356 } 1357 1357 1358 1358 /* Categorizing receive firmware frame errors */ 1359 - static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err, 1360 - struct rx_ring *rx_ring) 1359 + static void qlge_categorize_rx_err(struct qlge_adapter *qdev, u8 rx_err, 1360 + struct rx_ring *rx_ring) 1361 1361 { 1362 1362 struct nic_stats *stats = &qdev->nic_stats; 1363 1363 ··· 1389 1389 } 1390 1390 1391 1391 /** 1392 - * ql_update_mac_hdr_len - helper routine to update the mac header length 1392 + * qlge_update_mac_hdr_len - helper routine to update the mac header length 1393 1393 * based on vlan tags if present 1394 1394 */ 1395 - static void ql_update_mac_hdr_len(struct ql_adapter *qdev, 1396 - struct ib_mac_iocb_rsp *ib_mac_rsp, 1397 - void *page, size_t *len) 1395 + static void qlge_update_mac_hdr_len(struct qlge_adapter *qdev, 1396 + struct qlge_ib_mac_iocb_rsp *ib_mac_rsp, 1397 + void *page, size_t *len) 1398 1398 { 1399 1399 u16 *tags; 1400 1400 ··· 1412 1412 } 1413 1413 1414 1414 /* Process an inbound completion from an rx ring. */ 1415 - static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev, 1416 - struct rx_ring *rx_ring, 1417 - struct ib_mac_iocb_rsp *ib_mac_rsp, 1418 - u32 length, u16 vlan_id) 1415 + static void qlge_process_mac_rx_gro_page(struct qlge_adapter *qdev, 1416 + struct rx_ring *rx_ring, 1417 + struct qlge_ib_mac_iocb_rsp *ib_mac_rsp, 1418 + u32 length, u16 vlan_id) 1419 1419 { 1420 1420 struct sk_buff *skb; 1421 - struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); 1421 + struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring); 1422 1422 struct napi_struct *napi = &rx_ring->napi; 1423 1423 1424 1424 /* Frame error, so drop the packet. */ 1425 1425 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { 1426 - ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); 1426 + qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); 1427 1427 put_page(lbq_desc->p.pg_chunk.page); 1428 1428 return; 1429 1429 } ··· 1458 1458 } 1459 1459 1460 1460 /* Process an inbound completion from an rx ring. */ 1461 - static void ql_process_mac_rx_page(struct ql_adapter *qdev, 1462 - struct rx_ring *rx_ring, 1463 - struct ib_mac_iocb_rsp *ib_mac_rsp, 1464 - u32 length, u16 vlan_id) 1461 + static void qlge_process_mac_rx_page(struct qlge_adapter *qdev, 1462 + struct rx_ring *rx_ring, 1463 + struct qlge_ib_mac_iocb_rsp *ib_mac_rsp, 1464 + u32 length, u16 vlan_id) 1465 1465 { 1466 1466 struct net_device *ndev = qdev->ndev; 1467 1467 struct sk_buff *skb = NULL; 1468 1468 void *addr; 1469 - struct qlge_bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); 1469 + struct qlge_bq_desc *lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring); 1470 1470 struct napi_struct *napi = &rx_ring->napi; 1471 1471 size_t hlen = ETH_HLEN; 1472 1472 ··· 1482 1482 1483 1483 /* Frame error, so drop the packet. */ 1484 1484 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { 1485 - ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); 1485 + qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); 1486 1486 goto err_out; 1487 1487 } 1488 1488 1489 1489 /* Update the MAC header length*/ 1490 - ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen); 1490 + qlge_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen); 1491 1491 1492 1492 /* The max framesize filter on this chip is set higher than 1493 1493 * MTU since FCoE uses 2k frames. ··· 1521 1521 "TCP checksum done!\n"); 1522 1522 skb->ip_summed = CHECKSUM_UNNECESSARY; 1523 1523 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && 1524 - (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { 1524 + (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { 1525 1525 /* Unfragmented ipv4 UDP frame. */ 1526 1526 struct iphdr *iph = 1527 1527 (struct iphdr *)((u8 *)addr + hlen); 1528 1528 if (!(iph->frag_off & 1529 - htons(IP_MF | IP_OFFSET))) { 1529 + htons(IP_MF | IP_OFFSET))) { 1530 1530 skb->ip_summed = CHECKSUM_UNNECESSARY; 1531 1531 netif_printk(qdev, rx_status, KERN_DEBUG, 1532 1532 qdev->ndev, ··· 1549 1549 } 1550 1550 1551 1551 /* Process an inbound completion from an rx ring. */ 1552 - static void ql_process_mac_rx_skb(struct ql_adapter *qdev, 1553 - struct rx_ring *rx_ring, 1554 - struct ib_mac_iocb_rsp *ib_mac_rsp, 1555 - u32 length, u16 vlan_id) 1552 + static void qlge_process_mac_rx_skb(struct qlge_adapter *qdev, 1553 + struct rx_ring *rx_ring, 1554 + struct qlge_ib_mac_iocb_rsp *ib_mac_rsp, 1555 + u32 length, u16 vlan_id) 1556 1556 { 1557 1557 struct qlge_bq_desc *sbq_desc = qlge_get_curr_buf(&rx_ring->sbq); 1558 1558 struct net_device *ndev = qdev->ndev; ··· 1576 1576 1577 1577 /* Frame error, so drop the packet. */ 1578 1578 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { 1579 - ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); 1579 + qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); 1580 1580 dev_kfree_skb_any(skb); 1581 1581 return; 1582 1582 } 1583 1583 1584 1584 /* loopback self test for ethtool */ 1585 1585 if (test_bit(QL_SELFTEST, &qdev->flags)) { 1586 - ql_check_lb_frame(qdev, skb); 1586 + qlge_check_lb_frame(qdev, skb); 1587 1587 dev_kfree_skb_any(skb); 1588 1588 return; 1589 1589 } ··· 1628 1628 "TCP checksum done!\n"); 1629 1629 skb->ip_summed = CHECKSUM_UNNECESSARY; 1630 1630 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && 1631 - (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { 1631 + (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { 1632 1632 /* Unfragmented ipv4 UDP frame. */ 1633 1633 struct iphdr *iph = (struct iphdr *)skb->data; 1634 1634 1635 1635 if (!(iph->frag_off & 1636 - htons(IP_MF | IP_OFFSET))) { 1636 + htons(IP_MF | IP_OFFSET))) { 1637 1637 skb->ip_summed = CHECKSUM_UNNECESSARY; 1638 1638 netif_printk(qdev, rx_status, KERN_DEBUG, 1639 1639 qdev->ndev, ··· 1651 1651 netif_receive_skb(skb); 1652 1652 } 1653 1653 1654 - static void ql_realign_skb(struct sk_buff *skb, int len) 1654 + static void qlge_realign_skb(struct sk_buff *skb, int len) 1655 1655 { 1656 1656 void *temp_addr = skb->data; 1657 1657 ··· 1669 1669 * completion. It will be rewritten for readability in the near 1670 1670 * future, but for not it works well. 1671 1671 */ 1672 - static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev, 1673 - struct rx_ring *rx_ring, 1674 - struct ib_mac_iocb_rsp *ib_mac_rsp) 1672 + static struct sk_buff *qlge_build_rx_skb(struct qlge_adapter *qdev, 1673 + struct rx_ring *rx_ring, 1674 + struct qlge_ib_mac_iocb_rsp *ib_mac_rsp) 1675 1675 { 1676 1676 u32 length = le32_to_cpu(ib_mac_rsp->data_len); 1677 1677 u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len); ··· 1693 1693 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr, 1694 1694 SMALL_BUF_MAP_SIZE, DMA_FROM_DEVICE); 1695 1695 skb = sbq_desc->p.skb; 1696 - ql_realign_skb(skb, hdr_len); 1696 + qlge_realign_skb(skb, hdr_len); 1697 1697 skb_put(skb, hdr_len); 1698 1698 sbq_desc->p.skb = NULL; 1699 1699 } ··· 1731 1731 length); 1732 1732 sbq_desc = qlge_get_curr_buf(&rx_ring->sbq); 1733 1733 skb = sbq_desc->p.skb; 1734 - ql_realign_skb(skb, length); 1734 + qlge_realign_skb(skb, length); 1735 1735 skb_put(skb, length); 1736 1736 dma_unmap_single(&qdev->pdev->dev, sbq_desc->dma_addr, 1737 1737 SMALL_BUF_MAP_SIZE, ··· 1748 1748 * chain it to the header buffer's skb and let 1749 1749 * it rip. 1750 1750 */ 1751 - lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); 1751 + lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring); 1752 1752 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, 1753 1753 "Chaining page at offset = %d, for %d bytes to skb.\n", 1754 1754 lbq_desc->p.pg_chunk.offset, length); ··· 1763 1763 * copy it to a new skb and let it go. This can happen with 1764 1764 * jumbo mtu on a non-TCP/UDP frame. 1765 1765 */ 1766 - lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); 1766 + lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring); 1767 1767 skb = netdev_alloc_skb(qdev->ndev, length); 1768 1768 if (!skb) { 1769 1769 netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev, ··· 1783 1783 skb->len += length; 1784 1784 skb->data_len += length; 1785 1785 skb->truesize += length; 1786 - ql_update_mac_hdr_len(qdev, ib_mac_rsp, 1787 - lbq_desc->p.pg_chunk.va, 1788 - &hlen); 1786 + qlge_update_mac_hdr_len(qdev, ib_mac_rsp, 1787 + lbq_desc->p.pg_chunk.va, 1788 + &hlen); 1789 1789 __pskb_pull_tail(skb, hlen); 1790 1790 } 1791 1791 } else { ··· 1823 1823 skb_reserve(skb, NET_IP_ALIGN); 1824 1824 } 1825 1825 do { 1826 - lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); 1826 + lbq_desc = qlge_get_curr_lchunk(qdev, rx_ring); 1827 1827 size = min(length, qdev->lbq_buf_size); 1828 1828 1829 1829 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, ··· 1838 1838 length -= size; 1839 1839 i++; 1840 1840 } while (length > 0); 1841 - ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va, 1842 - &hlen); 1841 + qlge_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va, 1842 + &hlen); 1843 1843 __pskb_pull_tail(skb, hlen); 1844 1844 } 1845 1845 return skb; 1846 1846 } 1847 1847 1848 1848 /* Process an inbound completion from an rx ring. */ 1849 - static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev, 1850 - struct rx_ring *rx_ring, 1851 - struct ib_mac_iocb_rsp *ib_mac_rsp, 1852 - u16 vlan_id) 1849 + static void qlge_process_mac_split_rx_intr(struct qlge_adapter *qdev, 1850 + struct rx_ring *rx_ring, 1851 + struct qlge_ib_mac_iocb_rsp *ib_mac_rsp, 1852 + u16 vlan_id) 1853 1853 { 1854 1854 struct net_device *ndev = qdev->ndev; 1855 1855 struct sk_buff *skb = NULL; 1856 1856 1857 1857 QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp); 1858 1858 1859 - skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); 1859 + skb = qlge_build_rx_skb(qdev, rx_ring, ib_mac_rsp); 1860 1860 if (unlikely(!skb)) { 1861 1861 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, 1862 1862 "No skb available, drop packet.\n"); ··· 1866 1866 1867 1867 /* Frame error, so drop the packet. */ 1868 1868 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) { 1869 - ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); 1869 + qlge_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); 1870 1870 dev_kfree_skb_any(skb); 1871 1871 return; 1872 1872 } ··· 1882 1882 1883 1883 /* loopback self test for ethtool */ 1884 1884 if (test_bit(QL_SELFTEST, &qdev->flags)) { 1885 - ql_check_lb_frame(qdev, skb); 1885 + qlge_check_lb_frame(qdev, skb); 1886 1886 dev_kfree_skb_any(skb); 1887 1887 return; 1888 1888 } ··· 1917 1917 "TCP checksum done!\n"); 1918 1918 skb->ip_summed = CHECKSUM_UNNECESSARY; 1919 1919 } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) && 1920 - (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { 1921 - /* Unfragmented ipv4 UDP frame. */ 1920 + (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) { 1921 + /* Unfragmented ipv4 UDP frame. */ 1922 1922 struct iphdr *iph = (struct iphdr *)skb->data; 1923 1923 1924 1924 if (!(iph->frag_off & 1925 - htons(IP_MF | IP_OFFSET))) { 1925 + htons(IP_MF | IP_OFFSET))) { 1926 1926 skb->ip_summed = CHECKSUM_UNNECESSARY; 1927 1927 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, 1928 1928 "TCP checksum done!\n"); ··· 1942 1942 } 1943 1943 1944 1944 /* Process an inbound completion from an rx ring. */ 1945 - static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev, 1946 - struct rx_ring *rx_ring, 1947 - struct ib_mac_iocb_rsp *ib_mac_rsp) 1945 + static unsigned long qlge_process_mac_rx_intr(struct qlge_adapter *qdev, 1946 + struct rx_ring *rx_ring, 1947 + struct qlge_ib_mac_iocb_rsp *ib_mac_rsp) 1948 1948 { 1949 1949 u32 length = le32_to_cpu(ib_mac_rsp->data_len); 1950 1950 u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && 1951 - (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ? 1952 - ((le16_to_cpu(ib_mac_rsp->vlan_id) & 1953 - IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff; 1951 + (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ? 1952 + ((le16_to_cpu(ib_mac_rsp->vlan_id) & 1953 + IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff; 1954 1954 1955 1955 QL_DUMP_IB_MAC_RSP(qdev, ib_mac_rsp); 1956 1956 ··· 1958 1958 /* The data and headers are split into 1959 1959 * separate buffers. 1960 1960 */ 1961 - ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, 1962 - vlan_id); 1961 + qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, 1962 + vlan_id); 1963 1963 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) { 1964 1964 /* The data fit in a single small buffer. 1965 1965 * Allocate a new skb, copy the data and 1966 1966 * return the buffer to the free pool. 1967 1967 */ 1968 - ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length, 1969 - vlan_id); 1968 + qlge_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, length, 1969 + vlan_id); 1970 1970 } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) && 1971 - !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) && 1972 - (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) { 1971 + !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) && 1972 + (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) { 1973 1973 /* TCP packet in a page chunk that's been checksummed. 1974 1974 * Tack it on to our GRO skb and let it go. 1975 1975 */ 1976 - ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length, 1977 - vlan_id); 1976 + qlge_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, length, 1977 + vlan_id); 1978 1978 } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) { 1979 1979 /* Non-TCP packet in a page chunk. Allocate an 1980 1980 * skb, tack it on frags, and send it up. 1981 1981 */ 1982 - ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length, 1983 - vlan_id); 1982 + qlge_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, length, 1983 + vlan_id); 1984 1984 } else { 1985 1985 /* Non-TCP/UDP large frames that span multiple buffers 1986 1986 * can be processed corrrectly by the split frame logic. 1987 1987 */ 1988 - ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, 1989 - vlan_id); 1988 + qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, 1989 + vlan_id); 1990 1990 } 1991 1991 1992 1992 return (unsigned long)length; 1993 1993 } 1994 1994 1995 1995 /* Process an outbound completion from an rx ring. */ 1996 - static void ql_process_mac_tx_intr(struct ql_adapter *qdev, 1997 - struct ob_mac_iocb_rsp *mac_rsp) 1996 + static void qlge_process_mac_tx_intr(struct qlge_adapter *qdev, 1997 + struct qlge_ob_mac_iocb_rsp *mac_rsp) 1998 1998 { 1999 1999 struct tx_ring *tx_ring; 2000 2000 struct tx_ring_desc *tx_ring_desc; ··· 2002 2002 QL_DUMP_OB_MAC_RSP(qdev, mac_rsp); 2003 2003 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; 2004 2004 tx_ring_desc = &tx_ring->q[mac_rsp->tid]; 2005 - ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); 2005 + qlge_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); 2006 2006 tx_ring->tx_bytes += (tx_ring_desc->skb)->len; 2007 2007 tx_ring->tx_packets++; 2008 2008 dev_kfree_skb(tx_ring_desc->skb); ··· 2033 2033 } 2034 2034 2035 2035 /* Fire up a handler to reset the MPI processor. */ 2036 - void ql_queue_fw_error(struct ql_adapter *qdev) 2036 + void qlge_queue_fw_error(struct qlge_adapter *qdev) 2037 2037 { 2038 - ql_link_off(qdev); 2038 + qlge_link_off(qdev); 2039 2039 queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0); 2040 2040 } 2041 2041 2042 - void ql_queue_asic_error(struct ql_adapter *qdev) 2042 + void qlge_queue_asic_error(struct qlge_adapter *qdev) 2043 2043 { 2044 - ql_link_off(qdev); 2045 - ql_disable_interrupts(qdev); 2044 + qlge_link_off(qdev); 2045 + qlge_disable_interrupts(qdev); 2046 2046 /* Clear adapter up bit to signal the recovery 2047 2047 * process that it shouldn't kill the reset worker 2048 2048 * thread ··· 2055 2055 queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0); 2056 2056 } 2057 2057 2058 - static void ql_process_chip_ae_intr(struct ql_adapter *qdev, 2059 - struct ib_ae_iocb_rsp *ib_ae_rsp) 2058 + static void qlge_process_chip_ae_intr(struct qlge_adapter *qdev, 2059 + struct qlge_ib_ae_iocb_rsp *ib_ae_rsp) 2060 2060 { 2061 2061 switch (ib_ae_rsp->event) { 2062 2062 case MGMT_ERR_EVENT: 2063 2063 netif_err(qdev, rx_err, qdev->ndev, 2064 2064 "Management Processor Fatal Error.\n"); 2065 - ql_queue_fw_error(qdev); 2065 + qlge_queue_fw_error(qdev); 2066 2066 return; 2067 2067 2068 2068 case CAM_LOOKUP_ERR_EVENT: 2069 2069 netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n"); 2070 2070 netdev_err(qdev->ndev, "This event shouldn't occur.\n"); 2071 - ql_queue_asic_error(qdev); 2071 + qlge_queue_asic_error(qdev); 2072 2072 return; 2073 2073 2074 2074 case SOFT_ECC_ERROR_EVENT: 2075 2075 netdev_err(qdev->ndev, "Soft ECC error detected.\n"); 2076 - ql_queue_asic_error(qdev); 2076 + qlge_queue_asic_error(qdev); 2077 2077 break; 2078 2078 2079 2079 case PCI_ERR_ANON_BUF_RD: 2080 2080 netdev_err(qdev->ndev, 2081 2081 "PCI error occurred when reading anonymous buffers from rx_ring %d.\n", 2082 2082 ib_ae_rsp->q_id); 2083 - ql_queue_asic_error(qdev); 2083 + qlge_queue_asic_error(qdev); 2084 2084 break; 2085 2085 2086 2086 default: 2087 2087 netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n", 2088 2088 ib_ae_rsp->event); 2089 - ql_queue_asic_error(qdev); 2089 + qlge_queue_asic_error(qdev); 2090 2090 break; 2091 2091 } 2092 2092 } 2093 2093 2094 - static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) 2094 + static int qlge_clean_outbound_rx_ring(struct rx_ring *rx_ring) 2095 2095 { 2096 - struct ql_adapter *qdev = rx_ring->qdev; 2097 - u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); 2098 - struct ob_mac_iocb_rsp *net_rsp = NULL; 2096 + struct qlge_adapter *qdev = rx_ring->qdev; 2097 + u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg); 2098 + struct qlge_ob_mac_iocb_rsp *net_rsp = NULL; 2099 2099 int count = 0; 2100 2100 2101 2101 struct tx_ring *tx_ring; ··· 2105 2105 "cq_id = %d, prod = %d, cnsmr = %d\n", 2106 2106 rx_ring->cq_id, prod, rx_ring->cnsmr_idx); 2107 2107 2108 - net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; 2108 + net_rsp = (struct qlge_ob_mac_iocb_rsp *)rx_ring->curr_entry; 2109 2109 rmb(); 2110 2110 switch (net_rsp->opcode) { 2111 2111 case OPCODE_OB_MAC_TSO_IOCB: 2112 2112 case OPCODE_OB_MAC_IOCB: 2113 - ql_process_mac_tx_intr(qdev, net_rsp); 2113 + qlge_process_mac_tx_intr(qdev, net_rsp); 2114 2114 break; 2115 2115 default: 2116 2116 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, ··· 2118 2118 net_rsp->opcode); 2119 2119 } 2120 2120 count++; 2121 - ql_update_cq(rx_ring); 2122 - prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); 2121 + qlge_update_cq(rx_ring); 2122 + prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg); 2123 2123 } 2124 2124 if (!net_rsp) 2125 2125 return 0; 2126 - ql_write_cq_idx(rx_ring); 2126 + qlge_write_cq_idx(rx_ring); 2127 2127 tx_ring = &qdev->tx_ring[net_rsp->txq_idx]; 2128 2128 if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) { 2129 2129 if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4))) ··· 2137 2137 return count; 2138 2138 } 2139 2139 2140 - static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) 2140 + static int qlge_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) 2141 2141 { 2142 - struct ql_adapter *qdev = rx_ring->qdev; 2143 - u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); 2144 - struct ql_net_rsp_iocb *net_rsp; 2142 + struct qlge_adapter *qdev = rx_ring->qdev; 2143 + u32 prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg); 2144 + struct qlge_net_rsp_iocb *net_rsp; 2145 2145 int count = 0; 2146 2146 2147 2147 /* While there are entries in the completion queue. */ ··· 2154 2154 rmb(); 2155 2155 switch (net_rsp->opcode) { 2156 2156 case OPCODE_IB_MAC_IOCB: 2157 - ql_process_mac_rx_intr(qdev, rx_ring, 2158 - (struct ib_mac_iocb_rsp *) 2159 - net_rsp); 2157 + qlge_process_mac_rx_intr(qdev, rx_ring, 2158 + (struct qlge_ib_mac_iocb_rsp *) 2159 + net_rsp); 2160 2160 break; 2161 2161 2162 2162 case OPCODE_IB_AE_IOCB: 2163 - ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *) 2164 - net_rsp); 2163 + qlge_process_chip_ae_intr(qdev, (struct qlge_ib_ae_iocb_rsp *) 2164 + net_rsp); 2165 2165 break; 2166 2166 default: 2167 2167 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, ··· 2170 2170 break; 2171 2171 } 2172 2172 count++; 2173 - ql_update_cq(rx_ring); 2174 - prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); 2173 + qlge_update_cq(rx_ring); 2174 + prod = qlge_read_sh_reg(rx_ring->prod_idx_sh_reg); 2175 2175 if (count == budget) 2176 2176 break; 2177 2177 } 2178 - ql_update_buffer_queues(rx_ring, GFP_ATOMIC, 0); 2179 - ql_write_cq_idx(rx_ring); 2178 + qlge_update_buffer_queues(rx_ring, GFP_ATOMIC, 0); 2179 + qlge_write_cq_idx(rx_ring); 2180 2180 return count; 2181 2181 } 2182 2182 2183 - static int ql_napi_poll_msix(struct napi_struct *napi, int budget) 2183 + static int qlge_napi_poll_msix(struct napi_struct *napi, int budget) 2184 2184 { 2185 2185 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi); 2186 - struct ql_adapter *qdev = rx_ring->qdev; 2186 + struct qlge_adapter *qdev = rx_ring->qdev; 2187 2187 struct rx_ring *trx_ring; 2188 2188 int i, work_done = 0; 2189 2189 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id]; ··· 2200 2200 * it's not empty then service it. 2201 2201 */ 2202 2202 if ((ctx->irq_mask & (1 << trx_ring->cq_id)) && 2203 - (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) != 2203 + (qlge_read_sh_reg(trx_ring->prod_idx_sh_reg) != 2204 2204 trx_ring->cnsmr_idx)) { 2205 2205 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, 2206 2206 "%s: Servicing TX completion ring %d.\n", 2207 2207 __func__, trx_ring->cq_id); 2208 - ql_clean_outbound_rx_ring(trx_ring); 2208 + qlge_clean_outbound_rx_ring(trx_ring); 2209 2209 } 2210 2210 } 2211 2211 2212 2212 /* 2213 2213 * Now service the RSS ring if it's active. 2214 2214 */ 2215 - if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != 2216 - rx_ring->cnsmr_idx) { 2215 + if (qlge_read_sh_reg(rx_ring->prod_idx_sh_reg) != 2216 + rx_ring->cnsmr_idx) { 2217 2217 netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev, 2218 2218 "%s: Servicing RX completion ring %d.\n", 2219 2219 __func__, rx_ring->cq_id); 2220 - work_done = ql_clean_inbound_rx_ring(rx_ring, budget); 2220 + work_done = qlge_clean_inbound_rx_ring(rx_ring, budget); 2221 2221 } 2222 2222 2223 2223 if (work_done < budget) { 2224 2224 napi_complete_done(napi, work_done); 2225 - ql_enable_completion_interrupt(qdev, rx_ring->irq); 2225 + qlge_enable_completion_interrupt(qdev, rx_ring->irq); 2226 2226 } 2227 2227 return work_done; 2228 2228 } 2229 2229 2230 2230 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features) 2231 2231 { 2232 - struct ql_adapter *qdev = netdev_priv(ndev); 2232 + struct qlge_adapter *qdev = netdev_priv(ndev); 2233 2233 2234 2234 if (features & NETIF_F_HW_VLAN_CTAG_RX) { 2235 - ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | 2236 - NIC_RCV_CFG_VLAN_MATCH_AND_NON); 2235 + qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK | 2236 + NIC_RCV_CFG_VLAN_MATCH_AND_NON); 2237 2237 } else { 2238 - ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK); 2238 + qlge_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK); 2239 2239 } 2240 2240 } 2241 2241 ··· 2246 2246 static int qlge_update_hw_vlan_features(struct net_device *ndev, 2247 2247 netdev_features_t features) 2248 2248 { 2249 - struct ql_adapter *qdev = netdev_priv(ndev); 2249 + struct qlge_adapter *qdev = netdev_priv(ndev); 2250 2250 int status = 0; 2251 2251 bool need_restart = netif_running(ndev); 2252 2252 2253 2253 if (need_restart) { 2254 - status = ql_adapter_down(qdev); 2254 + status = qlge_adapter_down(qdev); 2255 2255 if (status) { 2256 2256 netif_err(qdev, link, qdev->ndev, 2257 2257 "Failed to bring down the adapter\n"); ··· 2263 2263 ndev->features = features; 2264 2264 2265 2265 if (need_restart) { 2266 - status = ql_adapter_up(qdev); 2266 + status = qlge_adapter_up(qdev); 2267 2267 if (status) { 2268 2268 netif_err(qdev, link, qdev->ndev, 2269 2269 "Failed to bring up the adapter\n"); ··· 2292 2292 return 0; 2293 2293 } 2294 2294 2295 - static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid) 2295 + static int __qlge_vlan_rx_add_vid(struct qlge_adapter *qdev, u16 vid) 2296 2296 { 2297 2297 u32 enable_bit = MAC_ADDR_E; 2298 2298 int err; 2299 2299 2300 - err = ql_set_mac_addr_reg(qdev, (u8 *)&enable_bit, 2301 - MAC_ADDR_TYPE_VLAN, vid); 2300 + err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit, 2301 + MAC_ADDR_TYPE_VLAN, vid); 2302 2302 if (err) 2303 2303 netif_err(qdev, ifup, qdev->ndev, 2304 2304 "Failed to init vlan address.\n"); ··· 2307 2307 2308 2308 static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid) 2309 2309 { 2310 - struct ql_adapter *qdev = netdev_priv(ndev); 2310 + struct qlge_adapter *qdev = netdev_priv(ndev); 2311 2311 int status; 2312 2312 int err; 2313 2313 2314 - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 2314 + status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 2315 2315 if (status) 2316 2316 return status; 2317 2317 2318 2318 err = __qlge_vlan_rx_add_vid(qdev, vid); 2319 2319 set_bit(vid, qdev->active_vlans); 2320 2320 2321 - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 2321 + qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 2322 2322 2323 2323 return err; 2324 2324 } 2325 2325 2326 - static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid) 2326 + static int __qlge_vlan_rx_kill_vid(struct qlge_adapter *qdev, u16 vid) 2327 2327 { 2328 2328 u32 enable_bit = 0; 2329 2329 int err; 2330 2330 2331 - err = ql_set_mac_addr_reg(qdev, (u8 *)&enable_bit, 2332 - MAC_ADDR_TYPE_VLAN, vid); 2331 + err = qlge_set_mac_addr_reg(qdev, (u8 *)&enable_bit, 2332 + MAC_ADDR_TYPE_VLAN, vid); 2333 2333 if (err) 2334 2334 netif_err(qdev, ifup, qdev->ndev, 2335 2335 "Failed to clear vlan address.\n"); ··· 2338 2338 2339 2339 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) 2340 2340 { 2341 - struct ql_adapter *qdev = netdev_priv(ndev); 2341 + struct qlge_adapter *qdev = netdev_priv(ndev); 2342 2342 int status; 2343 2343 int err; 2344 2344 2345 - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 2345 + status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 2346 2346 if (status) 2347 2347 return status; 2348 2348 2349 2349 err = __qlge_vlan_rx_kill_vid(qdev, vid); 2350 2350 clear_bit(vid, qdev->active_vlans); 2351 2351 2352 - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 2352 + qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 2353 2353 2354 2354 return err; 2355 2355 } 2356 2356 2357 - static void qlge_restore_vlan(struct ql_adapter *qdev) 2357 + static void qlge_restore_vlan(struct qlge_adapter *qdev) 2358 2358 { 2359 2359 int status; 2360 2360 u16 vid; 2361 2361 2362 - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 2362 + status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 2363 2363 if (status) 2364 2364 return; 2365 2365 2366 2366 for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID) 2367 2367 __qlge_vlan_rx_add_vid(qdev, vid); 2368 2368 2369 - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 2369 + qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 2370 2370 } 2371 2371 2372 2372 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */ ··· 2386 2386 static irqreturn_t qlge_isr(int irq, void *dev_id) 2387 2387 { 2388 2388 struct rx_ring *rx_ring = dev_id; 2389 - struct ql_adapter *qdev = rx_ring->qdev; 2389 + struct qlge_adapter *qdev = rx_ring->qdev; 2390 2390 struct intr_context *intr_context = &qdev->intr_context[0]; 2391 2391 u32 var; 2392 2392 int work_done = 0; ··· 2398 2398 * enable it is not effective. 2399 2399 */ 2400 2400 if (!test_bit(QL_MSIX_ENABLED, &qdev->flags)) 2401 - ql_disable_completion_interrupt(qdev, 0); 2401 + qlge_disable_completion_interrupt(qdev, 0); 2402 2402 2403 - var = ql_read32(qdev, STS); 2403 + var = qlge_read32(qdev, STS); 2404 2404 2405 2405 /* 2406 2406 * Check for fatal error. 2407 2407 */ 2408 2408 if (var & STS_FE) { 2409 - ql_disable_completion_interrupt(qdev, 0); 2410 - ql_queue_asic_error(qdev); 2409 + qlge_disable_completion_interrupt(qdev, 0); 2410 + qlge_queue_asic_error(qdev); 2411 2411 netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var); 2412 - var = ql_read32(qdev, ERR_STS); 2412 + var = qlge_read32(qdev, ERR_STS); 2413 2413 netdev_err(qdev->ndev, "Resetting chip. Error Status Register = 0x%x\n", var); 2414 2414 return IRQ_HANDLED; 2415 2415 } ··· 2418 2418 * Check MPI processor activity. 2419 2419 */ 2420 2420 if ((var & STS_PI) && 2421 - (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) { 2421 + (qlge_read32(qdev, INTR_MASK) & INTR_MASK_PI)) { 2422 2422 /* 2423 2423 * We've got an async event or mailbox completion. 2424 2424 * Handle it and clear the source of the interrupt. 2425 2425 */ 2426 2426 netif_err(qdev, intr, qdev->ndev, 2427 2427 "Got MPI processor interrupt.\n"); 2428 - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 2428 + qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 2429 2429 queue_delayed_work_on(smp_processor_id(), 2430 2430 qdev->workqueue, &qdev->mpi_work, 0); 2431 2431 work_done++; ··· 2436 2436 * pass. Compare it to the queues that this irq services 2437 2437 * and call napi if there's a match. 2438 2438 */ 2439 - var = ql_read32(qdev, ISR1); 2439 + var = qlge_read32(qdev, ISR1); 2440 2440 if (var & intr_context->irq_mask) { 2441 2441 netif_info(qdev, intr, qdev->ndev, 2442 2442 "Waking handler for rx_ring[0].\n"); ··· 2449 2449 * systematically re-enable the interrupt if we didn't 2450 2450 * schedule napi. 2451 2451 */ 2452 - ql_enable_completion_interrupt(qdev, 0); 2452 + qlge_enable_completion_interrupt(qdev, 0); 2453 2453 } 2454 2454 2455 2455 return work_done ? IRQ_HANDLED : IRQ_NONE; 2456 2456 } 2457 2457 2458 - static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) 2458 + static int qlge_tso(struct sk_buff *skb, struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr) 2459 2459 { 2460 2460 if (skb_is_gso(skb)) { 2461 2461 int err; ··· 2469 2469 mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC; 2470 2470 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len); 2471 2471 mac_iocb_ptr->total_hdrs_len = 2472 - cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb)); 2472 + cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb)); 2473 2473 mac_iocb_ptr->net_trans_offset = 2474 - cpu_to_le16(skb_network_offset(skb) | 2475 - skb_transport_offset(skb) 2476 - << OB_MAC_TRANSPORT_HDR_SHIFT); 2474 + cpu_to_le16(skb_network_offset(skb) | 2475 + skb_transport_offset(skb) 2476 + << OB_MAC_TRANSPORT_HDR_SHIFT); 2477 2477 mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 2478 2478 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; 2479 2479 if (likely(l3_proto == htons(ETH_P_IP))) { ··· 2488 2488 } else if (l3_proto == htons(ETH_P_IPV6)) { 2489 2489 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; 2490 2490 tcp_hdr(skb)->check = 2491 - ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2492 - &ipv6_hdr(skb)->daddr, 2493 - 0, IPPROTO_TCP, 0); 2491 + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2492 + &ipv6_hdr(skb)->daddr, 2493 + 0, IPPROTO_TCP, 0); 2494 2494 } 2495 2495 return 1; 2496 2496 } 2497 2497 return 0; 2498 2498 } 2499 2499 2500 - static void ql_hw_csum_setup(struct sk_buff *skb, 2501 - struct ob_mac_tso_iocb_req *mac_iocb_ptr) 2500 + static void qlge_hw_csum_setup(struct sk_buff *skb, 2501 + struct qlge_ob_mac_tso_iocb_req *mac_iocb_ptr) 2502 2502 { 2503 2503 int len; 2504 2504 struct iphdr *iph = ip_hdr(skb); ··· 2508 2508 mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len); 2509 2509 mac_iocb_ptr->net_trans_offset = 2510 2510 cpu_to_le16(skb_network_offset(skb) | 2511 - skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT); 2511 + skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT); 2512 2512 2513 2513 mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; 2514 2514 len = (ntohs(iph->tot_len) - (iph->ihl << 2)); ··· 2516 2516 check = &(tcp_hdr(skb)->check); 2517 2517 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC; 2518 2518 mac_iocb_ptr->total_hdrs_len = 2519 - cpu_to_le16(skb_transport_offset(skb) + 2520 - (tcp_hdr(skb)->doff << 2)); 2519 + cpu_to_le16(skb_transport_offset(skb) + 2520 + (tcp_hdr(skb)->doff << 2)); 2521 2521 } else { 2522 2522 check = &(udp_hdr(skb)->check); 2523 2523 mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC; 2524 2524 mac_iocb_ptr->total_hdrs_len = 2525 - cpu_to_le16(skb_transport_offset(skb) + 2526 - sizeof(struct udphdr)); 2525 + cpu_to_le16(skb_transport_offset(skb) + 2526 + sizeof(struct udphdr)); 2527 2527 } 2528 2528 *check = ~csum_tcpudp_magic(iph->saddr, 2529 2529 iph->daddr, len, iph->protocol, 0); ··· 2532 2532 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev) 2533 2533 { 2534 2534 struct tx_ring_desc *tx_ring_desc; 2535 - struct ob_mac_iocb_req *mac_iocb_ptr; 2536 - struct ql_adapter *qdev = netdev_priv(ndev); 2535 + struct qlge_ob_mac_iocb_req *mac_iocb_ptr; 2536 + struct qlge_adapter *qdev = netdev_priv(ndev); 2537 2537 int tso; 2538 2538 struct tx_ring *tx_ring; 2539 2539 u32 tx_ring_idx = (u32)skb->queue_mapping; ··· 2571 2571 mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V; 2572 2572 mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb)); 2573 2573 } 2574 - tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); 2574 + tso = qlge_tso(skb, (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr); 2575 2575 if (tso < 0) { 2576 2576 dev_kfree_skb_any(skb); 2577 2577 return NETDEV_TX_OK; 2578 2578 } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) { 2579 - ql_hw_csum_setup(skb, 2580 - (struct ob_mac_tso_iocb_req *)mac_iocb_ptr); 2579 + qlge_hw_csum_setup(skb, 2580 + (struct qlge_ob_mac_tso_iocb_req *)mac_iocb_ptr); 2581 2581 } 2582 - if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != 2583 - NETDEV_TX_OK) { 2582 + if (qlge_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != 2583 + NETDEV_TX_OK) { 2584 2584 netif_err(qdev, tx_queued, qdev->ndev, 2585 2585 "Could not map the segments.\n"); 2586 2586 tx_ring->tx_errors++; ··· 2592 2592 tx_ring->prod_idx = 0; 2593 2593 wmb(); 2594 2594 2595 - ql_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); 2595 + qlge_write_db_reg_relaxed(tx_ring->prod_idx, tx_ring->prod_idx_db_reg); 2596 2596 netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev, 2597 2597 "tx queued, slot %d, len %d\n", 2598 2598 tx_ring->prod_idx, skb->len); ··· 2611 2611 return NETDEV_TX_OK; 2612 2612 } 2613 2613 2614 - static void ql_free_shadow_space(struct ql_adapter *qdev) 2614 + static void qlge_free_shadow_space(struct qlge_adapter *qdev) 2615 2615 { 2616 2616 if (qdev->rx_ring_shadow_reg_area) { 2617 2617 dma_free_coherent(&qdev->pdev->dev, ··· 2629 2629 } 2630 2630 } 2631 2631 2632 - static int ql_alloc_shadow_space(struct ql_adapter *qdev) 2632 + static int qlge_alloc_shadow_space(struct qlge_adapter *qdev) 2633 2633 { 2634 2634 qdev->rx_ring_shadow_reg_area = 2635 2635 dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE, ··· 2658 2658 return -ENOMEM; 2659 2659 } 2660 2660 2661 - static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) 2661 + static void qlge_init_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring) 2662 2662 { 2663 2663 struct tx_ring_desc *tx_ring_desc; 2664 2664 int i; 2665 - struct ob_mac_iocb_req *mac_iocb_ptr; 2665 + struct qlge_ob_mac_iocb_req *mac_iocb_ptr; 2666 2666 2667 2667 mac_iocb_ptr = tx_ring->wq_base; 2668 2668 tx_ring_desc = tx_ring->q; ··· 2676 2676 atomic_set(&tx_ring->tx_count, tx_ring->wq_len); 2677 2677 } 2678 2678 2679 - static void ql_free_tx_resources(struct ql_adapter *qdev, 2680 - struct tx_ring *tx_ring) 2679 + static void qlge_free_tx_resources(struct qlge_adapter *qdev, 2680 + struct tx_ring *tx_ring) 2681 2681 { 2682 2682 if (tx_ring->wq_base) { 2683 2683 dma_free_coherent(&qdev->pdev->dev, tx_ring->wq_size, ··· 2688 2688 tx_ring->q = NULL; 2689 2689 } 2690 2690 2691 - static int ql_alloc_tx_resources(struct ql_adapter *qdev, 2692 - struct tx_ring *tx_ring) 2691 + static int qlge_alloc_tx_resources(struct qlge_adapter *qdev, 2692 + struct tx_ring *tx_ring) 2693 2693 { 2694 2694 tx_ring->wq_base = 2695 - dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size, 2696 - &tx_ring->wq_base_dma, GFP_ATOMIC); 2695 + dma_alloc_coherent(&qdev->pdev->dev, tx_ring->wq_size, 2696 + &tx_ring->wq_base_dma, GFP_ATOMIC); 2697 2697 2698 2698 if (!tx_ring->wq_base || 2699 2699 tx_ring->wq_base_dma & WQ_ADDR_ALIGN) 2700 2700 goto pci_alloc_err; 2701 2701 2702 2702 tx_ring->q = 2703 - kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc), 2704 - GFP_KERNEL); 2703 + kmalloc_array(tx_ring->wq_len, sizeof(struct tx_ring_desc), 2704 + GFP_KERNEL); 2705 2705 if (!tx_ring->q) 2706 2706 goto err; 2707 2707 ··· 2715 2715 return -ENOMEM; 2716 2716 } 2717 2717 2718 - static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) 2718 + static void qlge_free_lbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring) 2719 2719 { 2720 2720 struct qlge_bq *lbq = &rx_ring->lbq; 2721 2721 unsigned int last_offset; 2722 2722 2723 - last_offset = ql_lbq_block_size(qdev) - qdev->lbq_buf_size; 2723 + last_offset = qlge_lbq_block_size(qdev) - qdev->lbq_buf_size; 2724 2724 while (lbq->next_to_clean != lbq->next_to_use) { 2725 2725 struct qlge_bq_desc *lbq_desc = 2726 2726 &lbq->queue[lbq->next_to_clean]; 2727 2727 2728 2728 if (lbq_desc->p.pg_chunk.offset == last_offset) 2729 2729 dma_unmap_page(&qdev->pdev->dev, lbq_desc->dma_addr, 2730 - ql_lbq_block_size(qdev), 2730 + qlge_lbq_block_size(qdev), 2731 2731 DMA_FROM_DEVICE); 2732 2732 put_page(lbq_desc->p.pg_chunk.page); 2733 2733 ··· 2736 2736 2737 2737 if (rx_ring->master_chunk.page) { 2738 2738 dma_unmap_page(&qdev->pdev->dev, rx_ring->chunk_dma_addr, 2739 - ql_lbq_block_size(qdev), DMA_FROM_DEVICE); 2739 + qlge_lbq_block_size(qdev), DMA_FROM_DEVICE); 2740 2740 put_page(rx_ring->master_chunk.page); 2741 2741 rx_ring->master_chunk.page = NULL; 2742 2742 } 2743 2743 } 2744 2744 2745 - static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) 2745 + static void qlge_free_sbq_buffers(struct qlge_adapter *qdev, struct rx_ring *rx_ring) 2746 2746 { 2747 2747 int i; 2748 2748 ··· 2767 2767 /* Free all large and small rx buffers associated 2768 2768 * with the completion queues for this device. 2769 2769 */ 2770 - static void ql_free_rx_buffers(struct ql_adapter *qdev) 2770 + static void qlge_free_rx_buffers(struct qlge_adapter *qdev) 2771 2771 { 2772 2772 int i; 2773 2773 ··· 2775 2775 struct rx_ring *rx_ring = &qdev->rx_ring[i]; 2776 2776 2777 2777 if (rx_ring->lbq.queue) 2778 - ql_free_lbq_buffers(qdev, rx_ring); 2778 + qlge_free_lbq_buffers(qdev, rx_ring); 2779 2779 if (rx_ring->sbq.queue) 2780 - ql_free_sbq_buffers(qdev, rx_ring); 2780 + qlge_free_sbq_buffers(qdev, rx_ring); 2781 2781 } 2782 2782 } 2783 2783 2784 - static void ql_alloc_rx_buffers(struct ql_adapter *qdev) 2784 + static void qlge_alloc_rx_buffers(struct qlge_adapter *qdev) 2785 2785 { 2786 2786 int i; 2787 2787 2788 2788 for (i = 0; i < qdev->rss_ring_count; i++) 2789 - ql_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL, 2790 - HZ / 2); 2789 + qlge_update_buffer_queues(&qdev->rx_ring[i], GFP_KERNEL, 2790 + HZ / 2); 2791 2791 } 2792 2792 2793 2793 static int qlge_init_bq(struct qlge_bq *bq) 2794 2794 { 2795 2795 struct rx_ring *rx_ring = QLGE_BQ_CONTAINER(bq); 2796 - struct ql_adapter *qdev = rx_ring->qdev; 2796 + struct qlge_adapter *qdev = rx_ring->qdev; 2797 2797 struct qlge_bq_desc *bq_desc; 2798 2798 __le64 *buf_ptr; 2799 2799 int i; ··· 2823 2823 return 0; 2824 2824 } 2825 2825 2826 - static void ql_free_rx_resources(struct ql_adapter *qdev, 2827 - struct rx_ring *rx_ring) 2826 + static void qlge_free_rx_resources(struct qlge_adapter *qdev, 2827 + struct rx_ring *rx_ring) 2828 2828 { 2829 2829 /* Free the small buffer queue. */ 2830 2830 if (rx_ring->sbq.base) { ··· 2860 2860 /* Allocate queues and buffers for this completions queue based 2861 2861 * on the values in the parameter structure. 2862 2862 */ 2863 - static int ql_alloc_rx_resources(struct ql_adapter *qdev, 2864 - struct rx_ring *rx_ring) 2863 + static int qlge_alloc_rx_resources(struct qlge_adapter *qdev, 2864 + struct rx_ring *rx_ring) 2865 2865 { 2866 2866 /* 2867 2867 * Allocate the completion queue for this rx_ring. 2868 2868 */ 2869 2869 rx_ring->cq_base = 2870 - dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size, 2871 - &rx_ring->cq_base_dma, GFP_ATOMIC); 2870 + dma_alloc_coherent(&qdev->pdev->dev, rx_ring->cq_size, 2871 + &rx_ring->cq_base_dma, GFP_ATOMIC); 2872 2872 2873 2873 if (!rx_ring->cq_base) { 2874 2874 netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n"); ··· 2877 2877 2878 2878 if (rx_ring->cq_id < qdev->rss_ring_count && 2879 2879 (qlge_init_bq(&rx_ring->sbq) || qlge_init_bq(&rx_ring->lbq))) { 2880 - ql_free_rx_resources(qdev, rx_ring); 2880 + qlge_free_rx_resources(qdev, rx_ring); 2881 2881 return -ENOMEM; 2882 2882 } 2883 2883 2884 2884 return 0; 2885 2885 } 2886 2886 2887 - static void ql_tx_ring_clean(struct ql_adapter *qdev) 2887 + static void qlge_tx_ring_clean(struct qlge_adapter *qdev) 2888 2888 { 2889 2889 struct tx_ring *tx_ring; 2890 2890 struct tx_ring_desc *tx_ring_desc; ··· 2903 2903 "Freeing lost SKB %p, from queue %d, index %d.\n", 2904 2904 tx_ring_desc->skb, j, 2905 2905 tx_ring_desc->index); 2906 - ql_unmap_send(qdev, tx_ring_desc, 2907 - tx_ring_desc->map_cnt); 2906 + qlge_unmap_send(qdev, tx_ring_desc, 2907 + tx_ring_desc->map_cnt); 2908 2908 dev_kfree_skb(tx_ring_desc->skb); 2909 2909 tx_ring_desc->skb = NULL; 2910 2910 } ··· 2912 2912 } 2913 2913 } 2914 2914 2915 - static void ql_free_mem_resources(struct ql_adapter *qdev) 2915 + static void qlge_free_mem_resources(struct qlge_adapter *qdev) 2916 2916 { 2917 2917 int i; 2918 2918 2919 2919 for (i = 0; i < qdev->tx_ring_count; i++) 2920 - ql_free_tx_resources(qdev, &qdev->tx_ring[i]); 2920 + qlge_free_tx_resources(qdev, &qdev->tx_ring[i]); 2921 2921 for (i = 0; i < qdev->rx_ring_count; i++) 2922 - ql_free_rx_resources(qdev, &qdev->rx_ring[i]); 2923 - ql_free_shadow_space(qdev); 2922 + qlge_free_rx_resources(qdev, &qdev->rx_ring[i]); 2923 + qlge_free_shadow_space(qdev); 2924 2924 } 2925 2925 2926 - static int ql_alloc_mem_resources(struct ql_adapter *qdev) 2926 + static int qlge_alloc_mem_resources(struct qlge_adapter *qdev) 2927 2927 { 2928 2928 int i; 2929 2929 2930 2930 /* Allocate space for our shadow registers and such. */ 2931 - if (ql_alloc_shadow_space(qdev)) 2931 + if (qlge_alloc_shadow_space(qdev)) 2932 2932 return -ENOMEM; 2933 2933 2934 2934 for (i = 0; i < qdev->rx_ring_count; i++) { 2935 - if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { 2935 + if (qlge_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { 2936 2936 netif_err(qdev, ifup, qdev->ndev, 2937 2937 "RX resource allocation failed.\n"); 2938 2938 goto err_mem; ··· 2940 2940 } 2941 2941 /* Allocate tx queue resources */ 2942 2942 for (i = 0; i < qdev->tx_ring_count; i++) { 2943 - if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { 2943 + if (qlge_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) { 2944 2944 netif_err(qdev, ifup, qdev->ndev, 2945 2945 "TX resource allocation failed.\n"); 2946 2946 goto err_mem; ··· 2949 2949 return 0; 2950 2950 2951 2951 err_mem: 2952 - ql_free_mem_resources(qdev); 2952 + qlge_free_mem_resources(qdev); 2953 2953 return -ENOMEM; 2954 2954 } 2955 2955 ··· 2957 2957 * The control block is defined as 2958 2958 * "Completion Queue Initialization Control Block", or cqicb. 2959 2959 */ 2960 - static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) 2960 + static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring) 2961 2961 { 2962 2962 struct cqicb *cqicb = &rx_ring->cqicb; 2963 2963 void *shadow_reg = qdev->rx_ring_shadow_reg_area + ··· 2965 2965 u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma + 2966 2966 (rx_ring->cq_id * RX_RING_SHADOW_SPACE); 2967 2967 void __iomem *doorbell_area = 2968 - qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); 2968 + qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); 2969 2969 int err = 0; 2970 2970 u64 tmp; 2971 2971 __le64 *base_indirect_ptr; ··· 3012 3012 * Set up the control block load flags. 3013 3013 */ 3014 3014 cqicb->flags = FLAGS_LC | /* Load queue base address */ 3015 - FLAGS_LV | /* Load MSI-X vector */ 3016 - FLAGS_LI; /* Load irq delay values */ 3015 + FLAGS_LV | /* Load MSI-X vector */ 3016 + FLAGS_LI; /* Load irq delay values */ 3017 3017 if (rx_ring->cq_id < qdev->rss_ring_count) { 3018 3018 cqicb->flags |= FLAGS_LL; /* Load lbq values */ 3019 3019 tmp = (u64)rx_ring->lbq.base_dma; ··· 3043 3043 page_entries++; 3044 3044 } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN)); 3045 3045 cqicb->sbq_addr = 3046 - cpu_to_le64(rx_ring->sbq.base_indirect_dma); 3046 + cpu_to_le64(rx_ring->sbq.base_indirect_dma); 3047 3047 cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE); 3048 3048 cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN)); 3049 3049 rx_ring->sbq.next_to_use = 0; ··· 3053 3053 /* Inbound completion handling rx_rings run in 3054 3054 * separate NAPI contexts. 3055 3055 */ 3056 - netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix, 3056 + netif_napi_add(qdev->ndev, &rx_ring->napi, qlge_napi_poll_msix, 3057 3057 64); 3058 3058 cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); 3059 3059 cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames); ··· 3061 3061 cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); 3062 3062 cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames); 3063 3063 } 3064 - err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb), 3065 - CFG_LCQ, rx_ring->cq_id); 3064 + err = qlge_write_cfg(qdev, cqicb, sizeof(struct cqicb), 3065 + CFG_LCQ, rx_ring->cq_id); 3066 3066 if (err) { 3067 3067 netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n"); 3068 3068 return err; ··· 3070 3070 return err; 3071 3071 } 3072 3072 3073 - static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) 3073 + static int qlge_start_tx_ring(struct qlge_adapter *qdev, struct tx_ring *tx_ring) 3074 3074 { 3075 3075 struct wqicb *wqicb = (struct wqicb *)tx_ring; 3076 3076 void __iomem *doorbell_area = 3077 - qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id); 3077 + qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id); 3078 3078 void *shadow_reg = qdev->tx_ring_shadow_reg_area + 3079 - (tx_ring->wq_id * sizeof(u64)); 3079 + (tx_ring->wq_id * sizeof(u64)); 3080 3080 u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma + 3081 - (tx_ring->wq_id * sizeof(u64)); 3081 + (tx_ring->wq_id * sizeof(u64)); 3082 3082 int err = 0; 3083 3083 3084 3084 /* ··· 3105 3105 3106 3106 wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma); 3107 3107 3108 - ql_init_tx_ring(qdev, tx_ring); 3108 + qlge_init_tx_ring(qdev, tx_ring); 3109 3109 3110 - err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ, 3111 - (u16)tx_ring->wq_id); 3110 + err = qlge_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ, 3111 + (u16)tx_ring->wq_id); 3112 3112 if (err) { 3113 3113 netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n"); 3114 3114 return err; ··· 3116 3116 return err; 3117 3117 } 3118 3118 3119 - static void ql_disable_msix(struct ql_adapter *qdev) 3119 + static void qlge_disable_msix(struct qlge_adapter *qdev) 3120 3120 { 3121 3121 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { 3122 3122 pci_disable_msix(qdev->pdev); ··· 3133 3133 * stored in qdev->intr_count. If we don't get that 3134 3134 * many then we reduce the count and try again. 3135 3135 */ 3136 - static void ql_enable_msix(struct ql_adapter *qdev) 3136 + static void qlge_enable_msix(struct qlge_adapter *qdev) 3137 3137 { 3138 3138 int i, err; 3139 3139 ··· 3195 3195 * and TX completion rings 0,1,2 and 3. Vector 1 would 3196 3196 * service RSS ring 1 and TX completion rings 4,5,6 and 7. 3197 3197 */ 3198 - static void ql_set_tx_vect(struct ql_adapter *qdev) 3198 + static void qlge_set_tx_vect(struct qlge_adapter *qdev) 3199 3199 { 3200 3200 int i, j, vect; 3201 3201 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; ··· 3203 3203 if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) { 3204 3204 /* Assign irq vectors to TX rx_rings.*/ 3205 3205 for (vect = 0, j = 0, i = qdev->rss_ring_count; 3206 - i < qdev->rx_ring_count; i++) { 3206 + i < qdev->rx_ring_count; i++) { 3207 3207 if (j == tx_rings_per_vector) { 3208 3208 vect++; 3209 3209 j = 0; ··· 3225 3225 * rings. This function sets up a bit mask per vector 3226 3226 * that indicates which rings it services. 3227 3227 */ 3228 - static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx) 3228 + static void qlge_set_irq_mask(struct qlge_adapter *qdev, struct intr_context *ctx) 3229 3229 { 3230 3230 int j, vect = ctx->intr; 3231 3231 u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count; ··· 3240 3240 */ 3241 3241 for (j = 0; j < tx_rings_per_vector; j++) { 3242 3242 ctx->irq_mask |= 3243 - (1 << qdev->rx_ring[qdev->rss_ring_count + 3244 - (vect * tx_rings_per_vector) + j].cq_id); 3243 + (1 << qdev->rx_ring[qdev->rss_ring_count + 3244 + (vect * tx_rings_per_vector) + j].cq_id); 3245 3245 } 3246 3246 } else { 3247 3247 /* For single vector we just shift each queue's ··· 3258 3258 * The intr_context structure is used to hook each vector 3259 3259 * to possibly different handlers. 3260 3260 */ 3261 - static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev) 3261 + static void qlge_resolve_queues_to_irqs(struct qlge_adapter *qdev) 3262 3262 { 3263 3263 int i = 0; 3264 3264 struct intr_context *intr_context = &qdev->intr_context[0]; ··· 3275 3275 /* Set up this vector's bit-mask that indicates 3276 3276 * which queues it services. 3277 3277 */ 3278 - ql_set_irq_mask(qdev, intr_context); 3278 + qlge_set_irq_mask(qdev, intr_context); 3279 3279 /* 3280 3280 * We set up each vectors enable/disable/read bits so 3281 3281 * there's no bit/mask calculations in the critical path. 3282 3282 */ 3283 3283 intr_context->intr_en_mask = 3284 - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | 3285 - INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD 3286 - | i; 3284 + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | 3285 + INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD 3286 + | i; 3287 3287 intr_context->intr_dis_mask = 3288 - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | 3289 - INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK | 3290 - INTR_EN_IHD | i; 3288 + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | 3289 + INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK | 3290 + INTR_EN_IHD | i; 3291 3291 intr_context->intr_read_mask = 3292 - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | 3293 - INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD | 3294 - i; 3292 + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | 3293 + INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD | 3294 + i; 3295 3295 if (i == 0) { 3296 3296 /* The first vector/queue handles 3297 3297 * broadcast/multicast, fatal errors, ··· 3322 3322 * there's no bit/mask calculations in the critical path. 3323 3323 */ 3324 3324 intr_context->intr_en_mask = 3325 - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE; 3325 + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE; 3326 3326 intr_context->intr_dis_mask = 3327 - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | 3328 - INTR_EN_TYPE_DISABLE; 3327 + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | 3328 + INTR_EN_TYPE_DISABLE; 3329 3329 if (test_bit(QL_LEGACY_ENABLED, &qdev->flags)) { 3330 3330 /* Experience shows that when using INTx interrupts, 3331 3331 * the device does not always auto-mask INTR_EN_EN. ··· 3337 3337 intr_context->intr_dis_mask |= INTR_EN_EI << 16; 3338 3338 } 3339 3339 intr_context->intr_read_mask = 3340 - INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ; 3340 + INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ; 3341 3341 /* 3342 3342 * Single interrupt means one handler for all rings. 3343 3343 */ ··· 3348 3348 * a single vector so it will service all RSS and 3349 3349 * TX completion rings. 3350 3350 */ 3351 - ql_set_irq_mask(qdev, intr_context); 3351 + qlge_set_irq_mask(qdev, intr_context); 3352 3352 } 3353 3353 /* Tell the TX completion rings which MSIx vector 3354 3354 * they will be using. 3355 3355 */ 3356 - ql_set_tx_vect(qdev); 3356 + qlge_set_tx_vect(qdev); 3357 3357 } 3358 3358 3359 - static void ql_free_irq(struct ql_adapter *qdev) 3359 + static void qlge_free_irq(struct qlge_adapter *qdev) 3360 3360 { 3361 3361 int i; 3362 3362 struct intr_context *intr_context = &qdev->intr_context[0]; ··· 3371 3371 } 3372 3372 } 3373 3373 } 3374 - ql_disable_msix(qdev); 3374 + qlge_disable_msix(qdev); 3375 3375 } 3376 3376 3377 - static int ql_request_irq(struct ql_adapter *qdev) 3377 + static int qlge_request_irq(struct qlge_adapter *qdev) 3378 3378 { 3379 3379 int i; 3380 3380 int status = 0; 3381 3381 struct pci_dev *pdev = qdev->pdev; 3382 3382 struct intr_context *intr_context = &qdev->intr_context[0]; 3383 3383 3384 - ql_resolve_queues_to_irqs(qdev); 3384 + qlge_resolve_queues_to_irqs(qdev); 3385 3385 3386 3386 for (i = 0; i < qdev->intr_count; i++, intr_context++) { 3387 3387 if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) { ··· 3408 3408 "%s: dev_id = 0x%p.\n", __func__, 3409 3409 &qdev->rx_ring[0]); 3410 3410 status = 3411 - request_irq(pdev->irq, qlge_isr, 3412 - test_bit(QL_MSI_ENABLED, &qdev->flags) 3413 - ? 0 3414 - : IRQF_SHARED, 3415 - intr_context->name, &qdev->rx_ring[0]); 3411 + request_irq(pdev->irq, qlge_isr, 3412 + test_bit(QL_MSI_ENABLED, &qdev->flags) 3413 + ? 0 3414 + : IRQF_SHARED, 3415 + intr_context->name, &qdev->rx_ring[0]); 3416 3416 if (status) 3417 3417 goto err_irq; 3418 3418 ··· 3425 3425 return status; 3426 3426 err_irq: 3427 3427 netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n"); 3428 - ql_free_irq(qdev); 3428 + qlge_free_irq(qdev); 3429 3429 return status; 3430 3430 } 3431 3431 3432 - static int ql_start_rss(struct ql_adapter *qdev) 3432 + static int qlge_start_rss(struct qlge_adapter *qdev) 3433 3433 { 3434 3434 static const u8 init_hash_seed[] = { 3435 3435 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, ··· 3459 3459 memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40); 3460 3460 memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16); 3461 3461 3462 - status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0); 3462 + status = qlge_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0); 3463 3463 if (status) { 3464 3464 netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n"); 3465 3465 return status; ··· 3467 3467 return status; 3468 3468 } 3469 3469 3470 - static int ql_clear_routing_entries(struct ql_adapter *qdev) 3470 + static int qlge_clear_routing_entries(struct qlge_adapter *qdev) 3471 3471 { 3472 3472 int i, status = 0; 3473 3473 3474 - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); 3474 + status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK); 3475 3475 if (status) 3476 3476 return status; 3477 3477 /* Clear all the entries in the routing table. */ 3478 3478 for (i = 0; i < 16; i++) { 3479 - status = ql_set_routing_reg(qdev, i, 0, 0); 3479 + status = qlge_set_routing_reg(qdev, i, 0, 0); 3480 3480 if (status) { 3481 3481 netif_err(qdev, ifup, qdev->ndev, 3482 3482 "Failed to init routing register for CAM packets.\n"); 3483 3483 break; 3484 3484 } 3485 3485 } 3486 - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); 3486 + qlge_sem_unlock(qdev, SEM_RT_IDX_MASK); 3487 3487 return status; 3488 3488 } 3489 3489 3490 3490 /* Initialize the frame-to-queue routing. */ 3491 - static int ql_route_initialize(struct ql_adapter *qdev) 3491 + static int qlge_route_initialize(struct qlge_adapter *qdev) 3492 3492 { 3493 3493 int status = 0; 3494 3494 3495 3495 /* Clear all the entries in the routing table. */ 3496 - status = ql_clear_routing_entries(qdev); 3496 + status = qlge_clear_routing_entries(qdev); 3497 3497 if (status) 3498 3498 return status; 3499 3499 3500 - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); 3500 + status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK); 3501 3501 if (status) 3502 3502 return status; 3503 3503 3504 - status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT, 3505 - RT_IDX_IP_CSUM_ERR, 1); 3504 + status = qlge_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT, 3505 + RT_IDX_IP_CSUM_ERR, 1); 3506 3506 if (status) { 3507 3507 netif_err(qdev, ifup, qdev->ndev, 3508 3508 "Failed to init routing register for IP CSUM error packets.\n"); 3509 3509 goto exit; 3510 3510 } 3511 - status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT, 3512 - RT_IDX_TU_CSUM_ERR, 1); 3511 + status = qlge_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT, 3512 + RT_IDX_TU_CSUM_ERR, 1); 3513 3513 if (status) { 3514 3514 netif_err(qdev, ifup, qdev->ndev, 3515 3515 "Failed to init routing register for TCP/UDP CSUM error packets.\n"); 3516 3516 goto exit; 3517 3517 } 3518 - status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); 3518 + status = qlge_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1); 3519 3519 if (status) { 3520 3520 netif_err(qdev, ifup, qdev->ndev, 3521 3521 "Failed to init routing register for broadcast packets.\n"); ··· 3525 3525 * routing block. 3526 3526 */ 3527 3527 if (qdev->rss_ring_count > 1) { 3528 - status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT, 3529 - RT_IDX_RSS_MATCH, 1); 3528 + status = qlge_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT, 3529 + RT_IDX_RSS_MATCH, 1); 3530 3530 if (status) { 3531 3531 netif_err(qdev, ifup, qdev->ndev, 3532 3532 "Failed to init routing register for MATCH RSS packets.\n"); ··· 3534 3534 } 3535 3535 } 3536 3536 3537 - status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT, 3538 - RT_IDX_CAM_HIT, 1); 3537 + status = qlge_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT, 3538 + RT_IDX_CAM_HIT, 1); 3539 3539 if (status) 3540 3540 netif_err(qdev, ifup, qdev->ndev, 3541 3541 "Failed to init routing register for CAM packets.\n"); 3542 3542 exit: 3543 - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); 3543 + qlge_sem_unlock(qdev, SEM_RT_IDX_MASK); 3544 3544 return status; 3545 3545 } 3546 3546 3547 - int ql_cam_route_initialize(struct ql_adapter *qdev) 3547 + int qlge_cam_route_initialize(struct qlge_adapter *qdev) 3548 3548 { 3549 3549 int status, set; 3550 3550 ··· 3552 3552 * determine if we are setting or clearing 3553 3553 * the MAC address in the CAM. 3554 3554 */ 3555 - set = ql_read32(qdev, STS); 3555 + set = qlge_read32(qdev, STS); 3556 3556 set &= qdev->port_link_up; 3557 - status = ql_set_mac_addr(qdev, set); 3557 + status = qlge_set_mac_addr(qdev, set); 3558 3558 if (status) { 3559 3559 netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n"); 3560 3560 return status; 3561 3561 } 3562 3562 3563 - status = ql_route_initialize(qdev); 3563 + status = qlge_route_initialize(qdev); 3564 3564 if (status) 3565 3565 netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n"); 3566 3566 3567 3567 return status; 3568 3568 } 3569 3569 3570 - static int ql_adapter_initialize(struct ql_adapter *qdev) 3570 + static int qlge_adapter_initialize(struct qlge_adapter *qdev) 3571 3571 { 3572 3572 u32 value, mask; 3573 3573 int i; ··· 3578 3578 */ 3579 3579 value = SYS_EFE | SYS_FAE; 3580 3580 mask = value << 16; 3581 - ql_write32(qdev, SYS, mask | value); 3581 + qlge_write32(qdev, SYS, mask | value); 3582 3582 3583 3583 /* Set the default queue, and VLAN behavior. */ 3584 3584 value = NIC_RCV_CFG_DFQ; ··· 3587 3587 value |= NIC_RCV_CFG_RV; 3588 3588 mask |= (NIC_RCV_CFG_RV << 16); 3589 3589 } 3590 - ql_write32(qdev, NIC_RCV_CFG, (mask | value)); 3590 + qlge_write32(qdev, NIC_RCV_CFG, (mask | value)); 3591 3591 3592 3592 /* Set the MPI interrupt to enabled. */ 3593 - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); 3593 + qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); 3594 3594 3595 3595 /* Enable the function, set pagesize, enable error checking. */ 3596 3596 value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND | 3597 - FSC_EC | FSC_VM_PAGE_4K; 3597 + FSC_EC | FSC_VM_PAGE_4K; 3598 3598 value |= SPLT_SETTING; 3599 3599 3600 3600 /* Set/clear header splitting. */ 3601 3601 mask = FSC_VM_PAGESIZE_MASK | 3602 - FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16); 3603 - ql_write32(qdev, FSC, mask | value); 3602 + FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16); 3603 + qlge_write32(qdev, FSC, mask | value); 3604 3604 3605 - ql_write32(qdev, SPLT_HDR, SPLT_LEN); 3605 + qlge_write32(qdev, SPLT_HDR, SPLT_LEN); 3606 3606 3607 3607 /* Set RX packet routing to use port/pci function on which the 3608 3608 * packet arrived on in addition to usual frame routing. 3609 3609 * This is helpful on bonding where both interfaces can have 3610 3610 * the same MAC address. 3611 3611 */ 3612 - ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); 3612 + qlge_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); 3613 3613 /* Reroute all packets to our Interface. 3614 3614 * They may have been routed to MPI firmware 3615 3615 * due to WOL. 3616 3616 */ 3617 - value = ql_read32(qdev, MGMT_RCV_CFG); 3617 + value = qlge_read32(qdev, MGMT_RCV_CFG); 3618 3618 value &= ~MGMT_RCV_CFG_RM; 3619 3619 mask = 0xffff0000; 3620 3620 3621 3621 /* Sticky reg needs clearing due to WOL. */ 3622 - ql_write32(qdev, MGMT_RCV_CFG, mask); 3623 - ql_write32(qdev, MGMT_RCV_CFG, mask | value); 3622 + qlge_write32(qdev, MGMT_RCV_CFG, mask); 3623 + qlge_write32(qdev, MGMT_RCV_CFG, mask | value); 3624 3624 3625 3625 /* Default WOL is enable on Mezz cards */ 3626 3626 if (qdev->pdev->subsystem_device == 0x0068 || ··· 3629 3629 3630 3630 /* Start up the rx queues. */ 3631 3631 for (i = 0; i < qdev->rx_ring_count; i++) { 3632 - status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); 3632 + status = qlge_start_rx_ring(qdev, &qdev->rx_ring[i]); 3633 3633 if (status) { 3634 3634 netif_err(qdev, ifup, qdev->ndev, 3635 3635 "Failed to start rx ring[%d].\n", i); ··· 3641 3641 * then download a RICB to configure RSS. 3642 3642 */ 3643 3643 if (qdev->rss_ring_count > 1) { 3644 - status = ql_start_rss(qdev); 3644 + status = qlge_start_rss(qdev); 3645 3645 if (status) { 3646 3646 netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n"); 3647 3647 return status; ··· 3650 3650 3651 3651 /* Start up the tx queues. */ 3652 3652 for (i = 0; i < qdev->tx_ring_count; i++) { 3653 - status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]); 3653 + status = qlge_start_tx_ring(qdev, &qdev->tx_ring[i]); 3654 3654 if (status) { 3655 3655 netif_err(qdev, ifup, qdev->ndev, 3656 3656 "Failed to start tx ring[%d].\n", i); ··· 3664 3664 netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n"); 3665 3665 3666 3666 /* Set up the MAC address and frame routing filter. */ 3667 - status = ql_cam_route_initialize(qdev); 3667 + status = qlge_cam_route_initialize(qdev); 3668 3668 if (status) { 3669 3669 netif_err(qdev, ifup, qdev->ndev, 3670 3670 "Failed to init CAM/Routing tables.\n"); ··· 3679 3679 } 3680 3680 3681 3681 /* Issue soft reset to chip. */ 3682 - static int ql_adapter_reset(struct ql_adapter *qdev) 3682 + static int qlge_adapter_reset(struct qlge_adapter *qdev) 3683 3683 { 3684 3684 u32 value; 3685 3685 int status = 0; 3686 3686 unsigned long end_jiffies; 3687 3687 3688 3688 /* Clear all the entries in the routing table. */ 3689 - status = ql_clear_routing_entries(qdev); 3689 + status = qlge_clear_routing_entries(qdev); 3690 3690 if (status) { 3691 3691 netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n"); 3692 3692 return status; ··· 3697 3697 */ 3698 3698 if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) { 3699 3699 /* Stop management traffic. */ 3700 - ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); 3700 + qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); 3701 3701 3702 3702 /* Wait for the NIC and MGMNT FIFOs to empty. */ 3703 - ql_wait_fifo_empty(qdev); 3703 + qlge_wait_fifo_empty(qdev); 3704 3704 } else { 3705 3705 clear_bit(QL_ASIC_RECOVERY, &qdev->flags); 3706 3706 } 3707 3707 3708 - ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); 3708 + qlge_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); 3709 3709 3710 3710 end_jiffies = jiffies + usecs_to_jiffies(30); 3711 3711 do { 3712 - value = ql_read32(qdev, RST_FO); 3712 + value = qlge_read32(qdev, RST_FO); 3713 3713 if ((value & RST_FO_FR) == 0) 3714 3714 break; 3715 3715 cpu_relax(); ··· 3722 3722 } 3723 3723 3724 3724 /* Resume management traffic. */ 3725 - ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME); 3725 + qlge_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME); 3726 3726 return status; 3727 3727 } 3728 3728 3729 - static void ql_display_dev_info(struct net_device *ndev) 3729 + static void qlge_display_dev_info(struct net_device *ndev) 3730 3730 { 3731 - struct ql_adapter *qdev = netdev_priv(ndev); 3731 + struct qlge_adapter *qdev = netdev_priv(ndev); 3732 3732 3733 3733 netif_info(qdev, probe, qdev->ndev, 3734 3734 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, XG Roll = %d, XG Rev = %d.\n", ··· 3742 3742 "MAC address %pM\n", ndev->dev_addr); 3743 3743 } 3744 3744 3745 - static int ql_wol(struct ql_adapter *qdev) 3745 + static int qlge_wol(struct qlge_adapter *qdev) 3746 3746 { 3747 3747 int status = 0; 3748 3748 u32 wol = MB_WOL_DISABLE; ··· 3755 3755 */ 3756 3756 3757 3757 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | 3758 - WAKE_MCAST | WAKE_BCAST)) { 3758 + WAKE_MCAST | WAKE_BCAST)) { 3759 3759 netif_err(qdev, ifdown, qdev->ndev, 3760 3760 "Unsupported WOL parameter. qdev->wol = 0x%x.\n", 3761 3761 qdev->wol); ··· 3763 3763 } 3764 3764 3765 3765 if (qdev->wol & WAKE_MAGIC) { 3766 - status = ql_mb_wol_set_magic(qdev, 1); 3766 + status = qlge_mb_wol_set_magic(qdev, 1); 3767 3767 if (status) { 3768 3768 netif_err(qdev, ifdown, qdev->ndev, 3769 3769 "Failed to set magic packet on %s.\n", ··· 3779 3779 3780 3780 if (qdev->wol) { 3781 3781 wol |= MB_WOL_MODE_ON; 3782 - status = ql_mb_wol_mode(qdev, wol); 3782 + status = qlge_mb_wol_mode(qdev, wol); 3783 3783 netif_err(qdev, drv, qdev->ndev, 3784 3784 "WOL %s (wol code 0x%x) on %s\n", 3785 3785 (status == 0) ? "Successfully set" : "Failed", ··· 3789 3789 return status; 3790 3790 } 3791 3791 3792 - static void ql_cancel_all_work_sync(struct ql_adapter *qdev) 3792 + static void qlge_cancel_all_work_sync(struct qlge_adapter *qdev) 3793 3793 { 3794 3794 /* Don't kill the reset worker thread if we 3795 3795 * are in the process of recovery. ··· 3803 3803 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); 3804 3804 } 3805 3805 3806 - static int ql_adapter_down(struct ql_adapter *qdev) 3806 + static int qlge_adapter_down(struct qlge_adapter *qdev) 3807 3807 { 3808 3808 int i, status = 0; 3809 3809 3810 - ql_link_off(qdev); 3810 + qlge_link_off(qdev); 3811 3811 3812 - ql_cancel_all_work_sync(qdev); 3812 + qlge_cancel_all_work_sync(qdev); 3813 3813 3814 3814 for (i = 0; i < qdev->rss_ring_count; i++) 3815 3815 napi_disable(&qdev->rx_ring[i].napi); 3816 3816 3817 3817 clear_bit(QL_ADAPTER_UP, &qdev->flags); 3818 3818 3819 - ql_disable_interrupts(qdev); 3819 + qlge_disable_interrupts(qdev); 3820 3820 3821 - ql_tx_ring_clean(qdev); 3821 + qlge_tx_ring_clean(qdev); 3822 3822 3823 3823 /* Call netif_napi_del() from common point. 3824 - */ 3824 + */ 3825 3825 for (i = 0; i < qdev->rss_ring_count; i++) 3826 3826 netif_napi_del(&qdev->rx_ring[i].napi); 3827 3827 3828 - status = ql_adapter_reset(qdev); 3828 + status = qlge_adapter_reset(qdev); 3829 3829 if (status) 3830 3830 netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n", 3831 3831 qdev->func); 3832 - ql_free_rx_buffers(qdev); 3832 + qlge_free_rx_buffers(qdev); 3833 3833 3834 3834 return status; 3835 3835 } 3836 3836 3837 - static int ql_adapter_up(struct ql_adapter *qdev) 3837 + static int qlge_adapter_up(struct qlge_adapter *qdev) 3838 3838 { 3839 3839 int err = 0; 3840 3840 3841 - err = ql_adapter_initialize(qdev); 3841 + err = qlge_adapter_initialize(qdev); 3842 3842 if (err) { 3843 3843 netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n"); 3844 3844 goto err_init; 3845 3845 } 3846 3846 set_bit(QL_ADAPTER_UP, &qdev->flags); 3847 - ql_alloc_rx_buffers(qdev); 3847 + qlge_alloc_rx_buffers(qdev); 3848 3848 /* If the port is initialized and the 3849 3849 * link is up the turn on the carrier. 3850 3850 */ 3851 - if ((ql_read32(qdev, STS) & qdev->port_init) && 3852 - (ql_read32(qdev, STS) & qdev->port_link_up)) 3853 - ql_link_on(qdev); 3851 + if ((qlge_read32(qdev, STS) & qdev->port_init) && 3852 + (qlge_read32(qdev, STS) & qdev->port_link_up)) 3853 + qlge_link_on(qdev); 3854 3854 /* Restore rx mode. */ 3855 3855 clear_bit(QL_ALLMULTI, &qdev->flags); 3856 3856 clear_bit(QL_PROMISCUOUS, &qdev->flags); ··· 3859 3859 /* Restore vlan setting. */ 3860 3860 qlge_restore_vlan(qdev); 3861 3861 3862 - ql_enable_interrupts(qdev); 3863 - ql_enable_all_completion_interrupts(qdev); 3862 + qlge_enable_interrupts(qdev); 3863 + qlge_enable_all_completion_interrupts(qdev); 3864 3864 netif_tx_start_all_queues(qdev->ndev); 3865 3865 3866 3866 return 0; 3867 3867 err_init: 3868 - ql_adapter_reset(qdev); 3868 + qlge_adapter_reset(qdev); 3869 3869 return err; 3870 3870 } 3871 3871 3872 - static void ql_release_adapter_resources(struct ql_adapter *qdev) 3872 + static void qlge_release_adapter_resources(struct qlge_adapter *qdev) 3873 3873 { 3874 - ql_free_mem_resources(qdev); 3875 - ql_free_irq(qdev); 3874 + qlge_free_mem_resources(qdev); 3875 + qlge_free_irq(qdev); 3876 3876 } 3877 3877 3878 - static int ql_get_adapter_resources(struct ql_adapter *qdev) 3878 + static int qlge_get_adapter_resources(struct qlge_adapter *qdev) 3879 3879 { 3880 - if (ql_alloc_mem_resources(qdev)) { 3880 + if (qlge_alloc_mem_resources(qdev)) { 3881 3881 netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n"); 3882 3882 return -ENOMEM; 3883 3883 } 3884 - return ql_request_irq(qdev); 3884 + return qlge_request_irq(qdev); 3885 3885 } 3886 3886 3887 3887 static int qlge_close(struct net_device *ndev) 3888 3888 { 3889 - struct ql_adapter *qdev = netdev_priv(ndev); 3889 + struct qlge_adapter *qdev = netdev_priv(ndev); 3890 3890 int i; 3891 3891 3892 3892 /* If we hit pci_channel_io_perm_failure ··· 3910 3910 for (i = 0; i < qdev->rss_ring_count; i++) 3911 3911 cancel_delayed_work_sync(&qdev->rx_ring[i].refill_work); 3912 3912 3913 - ql_adapter_down(qdev); 3914 - ql_release_adapter_resources(qdev); 3913 + qlge_adapter_down(qdev); 3914 + qlge_release_adapter_resources(qdev); 3915 3915 return 0; 3916 3916 } 3917 3917 3918 - static void qlge_set_lb_size(struct ql_adapter *qdev) 3918 + static void qlge_set_lb_size(struct qlge_adapter *qdev) 3919 3919 { 3920 3920 if (qdev->ndev->mtu <= 1500) 3921 3921 qdev->lbq_buf_size = LARGE_BUFFER_MIN_SIZE; ··· 3924 3924 qdev->lbq_buf_order = get_order(qdev->lbq_buf_size); 3925 3925 } 3926 3926 3927 - static int ql_configure_rings(struct ql_adapter *qdev) 3927 + static int qlge_configure_rings(struct qlge_adapter *qdev) 3928 3928 { 3929 3929 int i; 3930 3930 struct rx_ring *rx_ring; ··· 3933 3933 3934 3934 /* In a perfect world we have one RSS ring for each CPU 3935 3935 * and each has it's own vector. To do that we ask for 3936 - * cpu_cnt vectors. ql_enable_msix() will adjust the 3936 + * cpu_cnt vectors. qlge_enable_msix() will adjust the 3937 3937 * vector count to what we actually get. We then 3938 3938 * allocate an RSS ring for each. 3939 3939 * Essentially, we are doing min(cpu_count, msix_vector_count). 3940 3940 */ 3941 3941 qdev->intr_count = cpu_cnt; 3942 - ql_enable_msix(qdev); 3942 + qlge_enable_msix(qdev); 3943 3943 /* Adjust the RSS ring count to the actual vector count. */ 3944 3944 qdev->rss_ring_count = qdev->intr_count; 3945 3945 qdev->tx_ring_count = cpu_cnt; ··· 3952 3952 tx_ring->wq_id = i; 3953 3953 tx_ring->wq_len = qdev->tx_ring_size; 3954 3954 tx_ring->wq_size = 3955 - tx_ring->wq_len * sizeof(struct ob_mac_iocb_req); 3955 + tx_ring->wq_len * sizeof(struct qlge_ob_mac_iocb_req); 3956 3956 3957 3957 /* 3958 3958 * The completion queue ID for the tx rings start ··· 3973 3973 */ 3974 3974 rx_ring->cq_len = qdev->rx_ring_size; 3975 3975 rx_ring->cq_size = 3976 - rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); 3976 + rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb); 3977 3977 rx_ring->lbq.type = QLGE_LB; 3978 3978 rx_ring->sbq.type = QLGE_SB; 3979 3979 INIT_DELAYED_WORK(&rx_ring->refill_work, ··· 3985 3985 /* outbound cq is same size as tx_ring it services. */ 3986 3986 rx_ring->cq_len = qdev->tx_ring_size; 3987 3987 rx_ring->cq_size = 3988 - rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); 3988 + rx_ring->cq_len * sizeof(struct qlge_net_rsp_iocb); 3989 3989 } 3990 3990 } 3991 3991 return 0; ··· 3994 3994 static int qlge_open(struct net_device *ndev) 3995 3995 { 3996 3996 int err = 0; 3997 - struct ql_adapter *qdev = netdev_priv(ndev); 3997 + struct qlge_adapter *qdev = netdev_priv(ndev); 3998 3998 3999 - err = ql_adapter_reset(qdev); 3999 + err = qlge_adapter_reset(qdev); 4000 4000 if (err) 4001 4001 return err; 4002 4002 4003 4003 qlge_set_lb_size(qdev); 4004 - err = ql_configure_rings(qdev); 4004 + err = qlge_configure_rings(qdev); 4005 4005 if (err) 4006 4006 return err; 4007 4007 4008 - err = ql_get_adapter_resources(qdev); 4008 + err = qlge_get_adapter_resources(qdev); 4009 4009 if (err) 4010 4010 goto error_up; 4011 4011 4012 - err = ql_adapter_up(qdev); 4012 + err = qlge_adapter_up(qdev); 4013 4013 if (err) 4014 4014 goto error_up; 4015 4015 4016 4016 return err; 4017 4017 4018 4018 error_up: 4019 - ql_release_adapter_resources(qdev); 4019 + qlge_release_adapter_resources(qdev); 4020 4020 return err; 4021 4021 } 4022 4022 4023 - static int ql_change_rx_buffers(struct ql_adapter *qdev) 4023 + static int qlge_change_rx_buffers(struct qlge_adapter *qdev) 4024 4024 { 4025 4025 int status; 4026 4026 ··· 4041 4041 } 4042 4042 } 4043 4043 4044 - status = ql_adapter_down(qdev); 4044 + status = qlge_adapter_down(qdev); 4045 4045 if (status) 4046 4046 goto error; 4047 4047 4048 4048 qlge_set_lb_size(qdev); 4049 4049 4050 - status = ql_adapter_up(qdev); 4050 + status = qlge_adapter_up(qdev); 4051 4051 if (status) 4052 4052 goto error; 4053 4053 ··· 4062 4062 4063 4063 static int qlge_change_mtu(struct net_device *ndev, int new_mtu) 4064 4064 { 4065 - struct ql_adapter *qdev = netdev_priv(ndev); 4065 + struct qlge_adapter *qdev = netdev_priv(ndev); 4066 4066 int status; 4067 4067 4068 4068 if (ndev->mtu == 1500 && new_mtu == 9000) ··· 4080 4080 if (!netif_running(qdev->ndev)) 4081 4081 return 0; 4082 4082 4083 - status = ql_change_rx_buffers(qdev); 4083 + status = qlge_change_rx_buffers(qdev); 4084 4084 if (status) { 4085 4085 netif_err(qdev, ifup, qdev->ndev, 4086 4086 "Changing MTU failed.\n"); ··· 4092 4092 static struct net_device_stats *qlge_get_stats(struct net_device 4093 4093 *ndev) 4094 4094 { 4095 - struct ql_adapter *qdev = netdev_priv(ndev); 4095 + struct qlge_adapter *qdev = netdev_priv(ndev); 4096 4096 struct rx_ring *rx_ring = &qdev->rx_ring[0]; 4097 4097 struct tx_ring *tx_ring = &qdev->tx_ring[0]; 4098 4098 unsigned long pkts, mcast, dropped, errors, bytes; ··· 4128 4128 4129 4129 static void qlge_set_multicast_list(struct net_device *ndev) 4130 4130 { 4131 - struct ql_adapter *qdev = netdev_priv(ndev); 4131 + struct qlge_adapter *qdev = netdev_priv(ndev); 4132 4132 struct netdev_hw_addr *ha; 4133 4133 int i, status; 4134 4134 4135 - status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); 4135 + status = qlge_sem_spinlock(qdev, SEM_RT_IDX_MASK); 4136 4136 if (status) 4137 4137 return; 4138 4138 /* ··· 4141 4141 */ 4142 4142 if (ndev->flags & IFF_PROMISC) { 4143 4143 if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) { 4144 - if (ql_set_routing_reg 4144 + if (qlge_set_routing_reg 4145 4145 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) { 4146 4146 netif_err(qdev, hw, qdev->ndev, 4147 4147 "Failed to set promiscuous mode.\n"); ··· 4151 4151 } 4152 4152 } else { 4153 4153 if (test_bit(QL_PROMISCUOUS, &qdev->flags)) { 4154 - if (ql_set_routing_reg 4154 + if (qlge_set_routing_reg 4155 4155 (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) { 4156 4156 netif_err(qdev, hw, qdev->ndev, 4157 4157 "Failed to clear promiscuous mode.\n"); ··· 4168 4168 if ((ndev->flags & IFF_ALLMULTI) || 4169 4169 (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) { 4170 4170 if (!test_bit(QL_ALLMULTI, &qdev->flags)) { 4171 - if (ql_set_routing_reg 4171 + if (qlge_set_routing_reg 4172 4172 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) { 4173 4173 netif_err(qdev, hw, qdev->ndev, 4174 4174 "Failed to set all-multi mode.\n"); ··· 4178 4178 } 4179 4179 } else { 4180 4180 if (test_bit(QL_ALLMULTI, &qdev->flags)) { 4181 - if (ql_set_routing_reg 4181 + if (qlge_set_routing_reg 4182 4182 (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) { 4183 4183 netif_err(qdev, hw, qdev->ndev, 4184 4184 "Failed to clear all-multi mode.\n"); ··· 4189 4189 } 4190 4190 4191 4191 if (!netdev_mc_empty(ndev)) { 4192 - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 4192 + status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 4193 4193 if (status) 4194 4194 goto exit; 4195 4195 i = 0; 4196 4196 netdev_for_each_mc_addr(ha, ndev) { 4197 - if (ql_set_mac_addr_reg(qdev, (u8 *)ha->addr, 4198 - MAC_ADDR_TYPE_MULTI_MAC, i)) { 4197 + if (qlge_set_mac_addr_reg(qdev, (u8 *)ha->addr, 4198 + MAC_ADDR_TYPE_MULTI_MAC, i)) { 4199 4199 netif_err(qdev, hw, qdev->ndev, 4200 4200 "Failed to loadmulticast address.\n"); 4201 - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 4201 + qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 4202 4202 goto exit; 4203 4203 } 4204 4204 i++; 4205 4205 } 4206 - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 4207 - if (ql_set_routing_reg 4206 + qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 4207 + if (qlge_set_routing_reg 4208 4208 (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) { 4209 4209 netif_err(qdev, hw, qdev->ndev, 4210 4210 "Failed to set multicast match mode.\n"); ··· 4213 4213 } 4214 4214 } 4215 4215 exit: 4216 - ql_sem_unlock(qdev, SEM_RT_IDX_MASK); 4216 + qlge_sem_unlock(qdev, SEM_RT_IDX_MASK); 4217 4217 } 4218 4218 4219 4219 static int qlge_set_mac_address(struct net_device *ndev, void *p) 4220 4220 { 4221 - struct ql_adapter *qdev = netdev_priv(ndev); 4221 + struct qlge_adapter *qdev = netdev_priv(ndev); 4222 4222 struct sockaddr *addr = p; 4223 4223 int status; 4224 4224 ··· 4228 4228 /* Update local copy of current mac address. */ 4229 4229 memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len); 4230 4230 4231 - status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 4231 + status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); 4232 4232 if (status) 4233 4233 return status; 4234 - status = ql_set_mac_addr_reg(qdev, (u8 *)ndev->dev_addr, 4235 - MAC_ADDR_TYPE_CAM_MAC, 4236 - qdev->func * MAX_CQ); 4234 + status = qlge_set_mac_addr_reg(qdev, (u8 *)ndev->dev_addr, 4235 + MAC_ADDR_TYPE_CAM_MAC, 4236 + qdev->func * MAX_CQ); 4237 4237 if (status) 4238 4238 netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n"); 4239 - ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 4239 + qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 4240 4240 return status; 4241 4241 } 4242 4242 4243 4243 static void qlge_tx_timeout(struct net_device *ndev, unsigned int txqueue) 4244 4244 { 4245 - struct ql_adapter *qdev = netdev_priv(ndev); 4245 + struct qlge_adapter *qdev = netdev_priv(ndev); 4246 4246 4247 - ql_queue_asic_error(qdev); 4247 + qlge_queue_asic_error(qdev); 4248 4248 } 4249 4249 4250 - static void ql_asic_reset_work(struct work_struct *work) 4250 + static void qlge_asic_reset_work(struct work_struct *work) 4251 4251 { 4252 - struct ql_adapter *qdev = 4253 - container_of(work, struct ql_adapter, asic_reset_work.work); 4252 + struct qlge_adapter *qdev = 4253 + container_of(work, struct qlge_adapter, asic_reset_work.work); 4254 4254 int status; 4255 4255 4256 4256 rtnl_lock(); 4257 - status = ql_adapter_down(qdev); 4257 + status = qlge_adapter_down(qdev); 4258 4258 if (status) 4259 4259 goto error; 4260 4260 4261 - status = ql_adapter_up(qdev); 4261 + status = qlge_adapter_up(qdev); 4262 4262 if (status) 4263 4263 goto error; 4264 4264 ··· 4279 4279 } 4280 4280 4281 4281 static const struct nic_operations qla8012_nic_ops = { 4282 - .get_flash = ql_get_8012_flash_params, 4283 - .port_initialize = ql_8012_port_initialize, 4282 + .get_flash = qlge_get_8012_flash_params, 4283 + .port_initialize = qlge_8012_port_initialize, 4284 4284 }; 4285 4285 4286 4286 static const struct nic_operations qla8000_nic_ops = { 4287 - .get_flash = ql_get_8000_flash_params, 4288 - .port_initialize = ql_8000_port_initialize, 4287 + .get_flash = qlge_get_8000_flash_params, 4288 + .port_initialize = qlge_8000_port_initialize, 4289 4289 }; 4290 4290 4291 4291 /* Find the pcie function number for the other NIC ··· 4295 4295 * after a fatal firmware error, or doing a firmware 4296 4296 * coredump. 4297 4297 */ 4298 - static int ql_get_alt_pcie_func(struct ql_adapter *qdev) 4298 + static int qlge_get_alt_pcie_func(struct qlge_adapter *qdev) 4299 4299 { 4300 4300 int status = 0; 4301 4301 u32 temp; 4302 4302 u32 nic_func1, nic_func2; 4303 4303 4304 - status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG, 4305 - &temp); 4304 + status = qlge_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG, 4305 + &temp); 4306 4306 if (status) 4307 4307 return status; 4308 4308 4309 4309 nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) & 4310 - MPI_TEST_NIC_FUNC_MASK); 4310 + MPI_TEST_NIC_FUNC_MASK); 4311 4311 nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) & 4312 - MPI_TEST_NIC_FUNC_MASK); 4312 + MPI_TEST_NIC_FUNC_MASK); 4313 4313 4314 4314 if (qdev->func == nic_func1) 4315 4315 qdev->alt_func = nic_func2; ··· 4321 4321 return status; 4322 4322 } 4323 4323 4324 - static int ql_get_board_info(struct ql_adapter *qdev) 4324 + static int qlge_get_board_info(struct qlge_adapter *qdev) 4325 4325 { 4326 4326 int status; 4327 4327 4328 4328 qdev->func = 4329 - (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT; 4329 + (qlge_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT; 4330 4330 if (qdev->func > 3) 4331 4331 return -EIO; 4332 4332 4333 - status = ql_get_alt_pcie_func(qdev); 4333 + status = qlge_get_alt_pcie_func(qdev); 4334 4334 if (status) 4335 4335 return status; 4336 4336 ··· 4348 4348 qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI; 4349 4349 qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO; 4350 4350 } 4351 - qdev->chip_rev_id = ql_read32(qdev, REV_ID); 4351 + qdev->chip_rev_id = qlge_read32(qdev, REV_ID); 4352 4352 qdev->device_id = qdev->pdev->device; 4353 4353 if (qdev->device_id == QLGE_DEVICE_ID_8012) 4354 4354 qdev->nic_ops = &qla8012_nic_ops; ··· 4357 4357 return status; 4358 4358 } 4359 4359 4360 - static void ql_release_all(struct pci_dev *pdev) 4360 + static void qlge_release_all(struct pci_dev *pdev) 4361 4361 { 4362 4362 struct net_device *ndev = pci_get_drvdata(pdev); 4363 - struct ql_adapter *qdev = netdev_priv(ndev); 4363 + struct qlge_adapter *qdev = netdev_priv(ndev); 4364 4364 4365 4365 if (qdev->workqueue) { 4366 4366 destroy_workqueue(qdev->workqueue); ··· 4375 4375 pci_release_regions(pdev); 4376 4376 } 4377 4377 4378 - static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev, 4379 - int cards_found) 4378 + static int qlge_init_device(struct pci_dev *pdev, struct net_device *ndev, 4379 + int cards_found) 4380 4380 { 4381 - struct ql_adapter *qdev = netdev_priv(ndev); 4381 + struct qlge_adapter *qdev = netdev_priv(ndev); 4382 4382 int err = 0; 4383 4383 4384 4384 memset((void *)qdev, 0, sizeof(*qdev)); ··· 4441 4441 goto err_out2; 4442 4442 } 4443 4443 4444 - err = ql_get_board_info(qdev); 4444 + err = qlge_get_board_info(qdev); 4445 4445 if (err) { 4446 4446 dev_err(&pdev->dev, "Register access failed.\n"); 4447 4447 err = -EIO; ··· 4452 4452 4453 4453 if (qlge_mpi_coredump) { 4454 4454 qdev->mpi_coredump = 4455 - vmalloc(sizeof(struct ql_mpi_coredump)); 4455 + vmalloc(sizeof(struct qlge_mpi_coredump)); 4456 4456 if (!qdev->mpi_coredump) { 4457 4457 err = -ENOMEM; 4458 4458 goto err_out2; ··· 4490 4490 goto err_out2; 4491 4491 } 4492 4492 4493 - INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work); 4494 - INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work); 4495 - INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); 4496 - INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); 4497 - INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); 4498 - INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log); 4493 + INIT_DELAYED_WORK(&qdev->asic_reset_work, qlge_asic_reset_work); 4494 + INIT_DELAYED_WORK(&qdev->mpi_reset_work, qlge_mpi_reset_work); 4495 + INIT_DELAYED_WORK(&qdev->mpi_work, qlge_mpi_work); 4496 + INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, qlge_mpi_port_cfg_work); 4497 + INIT_DELAYED_WORK(&qdev->mpi_idc_work, qlge_mpi_idc_work); 4498 + INIT_DELAYED_WORK(&qdev->mpi_core_to_log, qlge_mpi_core_to_log); 4499 4499 init_completion(&qdev->ide_completion); 4500 4500 mutex_init(&qdev->mpi_mutex); 4501 4501 ··· 4506 4506 } 4507 4507 return 0; 4508 4508 err_out2: 4509 - ql_release_all(pdev); 4509 + qlge_release_all(pdev); 4510 4510 err_out1: 4511 4511 pci_disable_device(pdev); 4512 4512 return err; ··· 4527 4527 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, 4528 4528 }; 4529 4529 4530 - static void ql_timer(struct timer_list *t) 4530 + static void qlge_timer(struct timer_list *t) 4531 4531 { 4532 - struct ql_adapter *qdev = from_timer(qdev, t, timer); 4532 + struct qlge_adapter *qdev = from_timer(qdev, t, timer); 4533 4533 u32 var = 0; 4534 4534 4535 - var = ql_read32(qdev, STS); 4535 + var = qlge_read32(qdev, STS); 4536 4536 if (pci_channel_offline(qdev->pdev)) { 4537 4537 netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var); 4538 4538 return; ··· 4545 4545 const struct pci_device_id *pci_entry) 4546 4546 { 4547 4547 struct net_device *ndev = NULL; 4548 - struct ql_adapter *qdev = NULL; 4548 + struct qlge_adapter *qdev = NULL; 4549 4549 static int cards_found; 4550 4550 int err = 0; 4551 4551 4552 - ndev = alloc_etherdev_mq(sizeof(struct ql_adapter), 4552 + ndev = alloc_etherdev_mq(sizeof(struct qlge_adapter), 4553 4553 min(MAX_CPUS, 4554 4554 netif_get_num_default_rss_queues())); 4555 4555 if (!ndev) 4556 4556 return -ENOMEM; 4557 4557 4558 - err = ql_init_device(pdev, ndev, cards_found); 4558 + err = qlge_init_device(pdev, ndev, cards_found); 4559 4559 if (err < 0) { 4560 4560 free_netdev(ndev); 4561 4561 return err; ··· 4564 4564 qdev = netdev_priv(ndev); 4565 4565 SET_NETDEV_DEV(ndev, &pdev->dev); 4566 4566 ndev->hw_features = NETIF_F_SG | 4567 - NETIF_F_IP_CSUM | 4568 - NETIF_F_TSO | 4569 - NETIF_F_TSO_ECN | 4570 - NETIF_F_HW_VLAN_CTAG_TX | 4571 - NETIF_F_HW_VLAN_CTAG_RX | 4572 - NETIF_F_HW_VLAN_CTAG_FILTER | 4573 - NETIF_F_RXCSUM; 4567 + NETIF_F_IP_CSUM | 4568 + NETIF_F_TSO | 4569 + NETIF_F_TSO_ECN | 4570 + NETIF_F_HW_VLAN_CTAG_TX | 4571 + NETIF_F_HW_VLAN_CTAG_RX | 4572 + NETIF_F_HW_VLAN_CTAG_FILTER | 4573 + NETIF_F_RXCSUM; 4574 4574 ndev->features = ndev->hw_features; 4575 4575 ndev->vlan_features = ndev->hw_features; 4576 4576 /* vlan gets same features (except vlan filter) */ ··· 4601 4601 err = register_netdev(ndev); 4602 4602 if (err) { 4603 4603 dev_err(&pdev->dev, "net device registration failed.\n"); 4604 - ql_release_all(pdev); 4604 + qlge_release_all(pdev); 4605 4605 pci_disable_device(pdev); 4606 4606 free_netdev(ndev); 4607 4607 return err; ··· 4609 4609 /* Start up the timer to trigger EEH if 4610 4610 * the bus goes dead 4611 4611 */ 4612 - timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE); 4612 + timer_setup(&qdev->timer, qlge_timer, TIMER_DEFERRABLE); 4613 4613 mod_timer(&qdev->timer, jiffies + (5 * HZ)); 4614 - ql_link_off(qdev); 4615 - ql_display_dev_info(ndev); 4614 + qlge_link_off(qdev); 4615 + qlge_display_dev_info(ndev); 4616 4616 atomic_set(&qdev->lb_count, 0); 4617 4617 cards_found++; 4618 4618 return 0; 4619 4619 } 4620 4620 4621 - netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev) 4621 + netdev_tx_t qlge_lb_send(struct sk_buff *skb, struct net_device *ndev) 4622 4622 { 4623 4623 return qlge_send(skb, ndev); 4624 4624 } 4625 4625 4626 - int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget) 4626 + int qlge_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget) 4627 4627 { 4628 - return ql_clean_inbound_rx_ring(rx_ring, budget); 4628 + return qlge_clean_inbound_rx_ring(rx_ring, budget); 4629 4629 } 4630 4630 4631 4631 static void qlge_remove(struct pci_dev *pdev) 4632 4632 { 4633 4633 struct net_device *ndev = pci_get_drvdata(pdev); 4634 - struct ql_adapter *qdev = netdev_priv(ndev); 4634 + struct qlge_adapter *qdev = netdev_priv(ndev); 4635 4635 4636 4636 del_timer_sync(&qdev->timer); 4637 - ql_cancel_all_work_sync(qdev); 4637 + qlge_cancel_all_work_sync(qdev); 4638 4638 unregister_netdev(ndev); 4639 - ql_release_all(pdev); 4639 + qlge_release_all(pdev); 4640 4640 pci_disable_device(pdev); 4641 4641 free_netdev(ndev); 4642 4642 } 4643 4643 4644 4644 /* Clean up resources without touching hardware. */ 4645 - static void ql_eeh_close(struct net_device *ndev) 4645 + static void qlge_eeh_close(struct net_device *ndev) 4646 4646 { 4647 4647 int i; 4648 - struct ql_adapter *qdev = netdev_priv(ndev); 4648 + struct qlge_adapter *qdev = netdev_priv(ndev); 4649 4649 4650 4650 if (netif_carrier_ok(ndev)) { 4651 4651 netif_carrier_off(ndev); ··· 4653 4653 } 4654 4654 4655 4655 /* Disabling the timer */ 4656 - ql_cancel_all_work_sync(qdev); 4656 + qlge_cancel_all_work_sync(qdev); 4657 4657 4658 4658 for (i = 0; i < qdev->rss_ring_count; i++) 4659 4659 netif_napi_del(&qdev->rx_ring[i].napi); 4660 4660 4661 4661 clear_bit(QL_ADAPTER_UP, &qdev->flags); 4662 - ql_tx_ring_clean(qdev); 4663 - ql_free_rx_buffers(qdev); 4664 - ql_release_adapter_resources(qdev); 4662 + qlge_tx_ring_clean(qdev); 4663 + qlge_free_rx_buffers(qdev); 4664 + qlge_release_adapter_resources(qdev); 4665 4665 } 4666 4666 4667 4667 /* ··· 4672 4672 pci_channel_state_t state) 4673 4673 { 4674 4674 struct net_device *ndev = pci_get_drvdata(pdev); 4675 - struct ql_adapter *qdev = netdev_priv(ndev); 4675 + struct qlge_adapter *qdev = netdev_priv(ndev); 4676 4676 4677 4677 switch (state) { 4678 4678 case pci_channel_io_normal: ··· 4681 4681 netif_device_detach(ndev); 4682 4682 del_timer_sync(&qdev->timer); 4683 4683 if (netif_running(ndev)) 4684 - ql_eeh_close(ndev); 4684 + qlge_eeh_close(ndev); 4685 4685 pci_disable_device(pdev); 4686 4686 return PCI_ERS_RESULT_NEED_RESET; 4687 4687 case pci_channel_io_perm_failure: 4688 4688 dev_err(&pdev->dev, 4689 4689 "%s: pci_channel_io_perm_failure.\n", __func__); 4690 4690 del_timer_sync(&qdev->timer); 4691 - ql_eeh_close(ndev); 4691 + qlge_eeh_close(ndev); 4692 4692 set_bit(QL_EEH_FATAL, &qdev->flags); 4693 4693 return PCI_ERS_RESULT_DISCONNECT; 4694 4694 } ··· 4706 4706 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev) 4707 4707 { 4708 4708 struct net_device *ndev = pci_get_drvdata(pdev); 4709 - struct ql_adapter *qdev = netdev_priv(ndev); 4709 + struct qlge_adapter *qdev = netdev_priv(ndev); 4710 4710 4711 4711 pdev->error_state = pci_channel_io_normal; 4712 4712 ··· 4718 4718 } 4719 4719 pci_set_master(pdev); 4720 4720 4721 - if (ql_adapter_reset(qdev)) { 4721 + if (qlge_adapter_reset(qdev)) { 4722 4722 netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n"); 4723 4723 set_bit(QL_EEH_FATAL, &qdev->flags); 4724 4724 return PCI_ERS_RESULT_DISCONNECT; ··· 4730 4730 static void qlge_io_resume(struct pci_dev *pdev) 4731 4731 { 4732 4732 struct net_device *ndev = pci_get_drvdata(pdev); 4733 - struct ql_adapter *qdev = netdev_priv(ndev); 4733 + struct qlge_adapter *qdev = netdev_priv(ndev); 4734 4734 int err = 0; 4735 4735 4736 4736 if (netif_running(ndev)) { ··· 4757 4757 static int __maybe_unused qlge_suspend(struct device *dev_d) 4758 4758 { 4759 4759 struct net_device *ndev = dev_get_drvdata(dev_d); 4760 - struct ql_adapter *qdev = netdev_priv(ndev); 4760 + struct qlge_adapter *qdev = netdev_priv(ndev); 4761 4761 int err; 4762 4762 4763 4763 netif_device_detach(ndev); 4764 4764 del_timer_sync(&qdev->timer); 4765 4765 4766 4766 if (netif_running(ndev)) { 4767 - err = ql_adapter_down(qdev); 4767 + err = qlge_adapter_down(qdev); 4768 4768 if (!err) 4769 4769 return err; 4770 4770 } 4771 4771 4772 - ql_wol(qdev); 4772 + qlge_wol(qdev); 4773 4773 4774 4774 return 0; 4775 4775 } ··· 4777 4777 static int __maybe_unused qlge_resume(struct device *dev_d) 4778 4778 { 4779 4779 struct net_device *ndev = dev_get_drvdata(dev_d); 4780 - struct ql_adapter *qdev = netdev_priv(ndev); 4780 + struct qlge_adapter *qdev = netdev_priv(ndev); 4781 4781 int err; 4782 4782 4783 4783 pci_set_master(to_pci_dev(dev_d)); ··· 4785 4785 device_wakeup_disable(dev_d); 4786 4786 4787 4787 if (netif_running(ndev)) { 4788 - err = ql_adapter_up(qdev); 4788 + err = qlge_adapter_up(qdev); 4789 4789 if (err) 4790 4790 return err; 4791 4791 }
+176 -176
drivers/staging/qlge/qlge_mpi.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include "qlge.h" 3 3 4 - int ql_unpause_mpi_risc(struct ql_adapter *qdev) 4 + int qlge_unpause_mpi_risc(struct qlge_adapter *qdev) 5 5 { 6 6 u32 tmp; 7 7 8 8 /* Un-pause the RISC */ 9 - tmp = ql_read32(qdev, CSR); 9 + tmp = qlge_read32(qdev, CSR); 10 10 if (!(tmp & CSR_RP)) 11 11 return -EIO; 12 12 13 - ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE); 13 + qlge_write32(qdev, CSR, CSR_CMD_CLR_PAUSE); 14 14 return 0; 15 15 } 16 16 17 - int ql_pause_mpi_risc(struct ql_adapter *qdev) 17 + int qlge_pause_mpi_risc(struct qlge_adapter *qdev) 18 18 { 19 19 u32 tmp; 20 20 int count; 21 21 22 22 /* Pause the RISC */ 23 - ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE); 23 + qlge_write32(qdev, CSR, CSR_CMD_SET_PAUSE); 24 24 for (count = UDELAY_COUNT; count; count--) { 25 - tmp = ql_read32(qdev, CSR); 25 + tmp = qlge_read32(qdev, CSR); 26 26 if (tmp & CSR_RP) 27 27 break; 28 28 mdelay(UDELAY_DELAY); ··· 30 30 return (count == 0) ? -ETIMEDOUT : 0; 31 31 } 32 32 33 - int ql_hard_reset_mpi_risc(struct ql_adapter *qdev) 33 + int qlge_hard_reset_mpi_risc(struct qlge_adapter *qdev) 34 34 { 35 35 u32 tmp; 36 36 int count; 37 37 38 38 /* Reset the RISC */ 39 - ql_write32(qdev, CSR, CSR_CMD_SET_RST); 39 + qlge_write32(qdev, CSR, CSR_CMD_SET_RST); 40 40 for (count = UDELAY_COUNT; count; count--) { 41 - tmp = ql_read32(qdev, CSR); 41 + tmp = qlge_read32(qdev, CSR); 42 42 if (tmp & CSR_RR) { 43 - ql_write32(qdev, CSR, CSR_CMD_CLR_RST); 43 + qlge_write32(qdev, CSR, CSR_CMD_CLR_RST); 44 44 break; 45 45 } 46 46 mdelay(UDELAY_DELAY); ··· 48 48 return (count == 0) ? -ETIMEDOUT : 0; 49 49 } 50 50 51 - int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data) 51 + int qlge_read_mpi_reg(struct qlge_adapter *qdev, u32 reg, u32 *data) 52 52 { 53 53 int status; 54 54 /* wait for reg to come ready */ 55 - status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); 55 + status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); 56 56 if (status) 57 57 goto exit; 58 58 /* set up for reg read */ 59 - ql_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R); 59 + qlge_write32(qdev, PROC_ADDR, reg | PROC_ADDR_R); 60 60 /* wait for reg to come ready */ 61 - status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); 61 + status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); 62 62 if (status) 63 63 goto exit; 64 64 /* get the data */ 65 - *data = ql_read32(qdev, PROC_DATA); 65 + *data = qlge_read32(qdev, PROC_DATA); 66 66 exit: 67 67 return status; 68 68 } 69 69 70 - int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data) 70 + int qlge_write_mpi_reg(struct qlge_adapter *qdev, u32 reg, u32 data) 71 71 { 72 72 int status = 0; 73 73 /* wait for reg to come ready */ 74 - status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); 74 + status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); 75 75 if (status) 76 76 goto exit; 77 77 /* write the data to the data reg */ 78 - ql_write32(qdev, PROC_DATA, data); 78 + qlge_write32(qdev, PROC_DATA, data); 79 79 /* trigger the write */ 80 - ql_write32(qdev, PROC_ADDR, reg); 80 + qlge_write32(qdev, PROC_ADDR, reg); 81 81 /* wait for reg to come ready */ 82 - status = ql_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); 82 + status = qlge_wait_reg_rdy(qdev, PROC_ADDR, PROC_ADDR_RDY, PROC_ADDR_ERR); 83 83 if (status) 84 84 goto exit; 85 85 exit: 86 86 return status; 87 87 } 88 88 89 - int ql_soft_reset_mpi_risc(struct ql_adapter *qdev) 89 + int qlge_soft_reset_mpi_risc(struct qlge_adapter *qdev) 90 90 { 91 - return ql_write_mpi_reg(qdev, 0x00001010, 1); 91 + return qlge_write_mpi_reg(qdev, 0x00001010, 1); 92 92 } 93 93 94 94 /* Determine if we are in charge of the firmware. If ··· 96 96 * we are the higher function and the lower function 97 97 * is not enabled. 98 98 */ 99 - int ql_own_firmware(struct ql_adapter *qdev) 99 + int qlge_own_firmware(struct qlge_adapter *qdev) 100 100 { 101 101 u32 temp; 102 102 ··· 112 112 * enabled, then we are responsible for 113 113 * core dump and firmware reset after an error. 114 114 */ 115 - temp = ql_read32(qdev, STS); 115 + temp = qlge_read32(qdev, STS); 116 116 if (!(temp & (1 << (8 + qdev->alt_func)))) 117 117 return 1; 118 118 119 119 return 0; 120 120 } 121 121 122 - static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp) 122 + static int qlge_get_mb_sts(struct qlge_adapter *qdev, struct mbox_params *mbcp) 123 123 { 124 124 int i, status; 125 125 126 - status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); 126 + status = qlge_sem_spinlock(qdev, SEM_PROC_REG_MASK); 127 127 if (status) 128 128 return -EBUSY; 129 129 for (i = 0; i < mbcp->out_count; i++) { 130 130 status = 131 - ql_read_mpi_reg(qdev, qdev->mailbox_out + i, 132 - &mbcp->mbox_out[i]); 131 + qlge_read_mpi_reg(qdev, qdev->mailbox_out + i, 132 + &mbcp->mbox_out[i]); 133 133 if (status) { 134 134 netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n"); 135 135 break; 136 136 } 137 137 } 138 - ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ 138 + qlge_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */ 139 139 return status; 140 140 } 141 141 142 142 /* Wait for a single mailbox command to complete. 143 143 * Returns zero on success. 144 144 */ 145 - static int ql_wait_mbx_cmd_cmplt(struct ql_adapter *qdev) 145 + static int qlge_wait_mbx_cmd_cmplt(struct qlge_adapter *qdev) 146 146 { 147 147 int count; 148 148 u32 value; 149 149 150 150 for (count = 100; count; count--) { 151 - value = ql_read32(qdev, STS); 151 + value = qlge_read32(qdev, STS); 152 152 if (value & STS_PI) 153 153 return 0; 154 154 mdelay(UDELAY_DELAY); /* 100ms */ ··· 159 159 /* Execute a single mailbox command. 160 160 * Caller must hold PROC_ADDR semaphore. 161 161 */ 162 - static int ql_exec_mb_cmd(struct ql_adapter *qdev, struct mbox_params *mbcp) 162 + static int qlge_exec_mb_cmd(struct qlge_adapter *qdev, struct mbox_params *mbcp) 163 163 { 164 164 int i, status; 165 165 ··· 167 167 * Make sure there's nothing pending. 168 168 * This shouldn't happen. 169 169 */ 170 - if (ql_read32(qdev, CSR) & CSR_HRI) 170 + if (qlge_read32(qdev, CSR) & CSR_HRI) 171 171 return -EIO; 172 172 173 - status = ql_sem_spinlock(qdev, SEM_PROC_REG_MASK); 173 + status = qlge_sem_spinlock(qdev, SEM_PROC_REG_MASK); 174 174 if (status) 175 175 return status; 176 176 ··· 178 178 * Fill the outbound mailboxes. 179 179 */ 180 180 for (i = 0; i < mbcp->in_count; i++) { 181 - status = ql_write_mpi_reg(qdev, qdev->mailbox_in + i, 182 - mbcp->mbox_in[i]); 181 + status = qlge_write_mpi_reg(qdev, qdev->mailbox_in + i, 182 + mbcp->mbox_in[i]); 183 183 if (status) 184 184 goto end; 185 185 } 186 186 /* 187 187 * Wake up the MPI firmware. 188 188 */ 189 - ql_write32(qdev, CSR, CSR_CMD_SET_H2R_INT); 189 + qlge_write32(qdev, CSR, CSR_CMD_SET_H2R_INT); 190 190 end: 191 - ql_sem_unlock(qdev, SEM_PROC_REG_MASK); 191 + qlge_sem_unlock(qdev, SEM_PROC_REG_MASK); 192 192 return status; 193 193 } 194 194 ··· 199 199 * to handler processing this since a mailbox command 200 200 * will need to be sent to ACK the request. 201 201 */ 202 - static int ql_idc_req_aen(struct ql_adapter *qdev) 202 + static int qlge_idc_req_aen(struct qlge_adapter *qdev) 203 203 { 204 204 int status; 205 205 struct mbox_params *mbcp = &qdev->idc_mbc; ··· 209 209 * handle the request. 210 210 */ 211 211 mbcp->out_count = 4; 212 - status = ql_get_mb_sts(qdev, mbcp); 212 + status = qlge_get_mb_sts(qdev, mbcp); 213 213 if (status) { 214 214 netif_err(qdev, drv, qdev->ndev, 215 215 "Could not read MPI, resetting ASIC!\n"); 216 - ql_queue_asic_error(qdev); 216 + qlge_queue_asic_error(qdev); 217 217 } else { 218 218 /* Begin polled mode early so 219 219 * we don't get another interrupt 220 220 * when we leave mpi_worker. 221 221 */ 222 - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 222 + qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 223 223 queue_delayed_work(qdev->workqueue, &qdev->mpi_idc_work, 0); 224 224 } 225 225 return status; ··· 228 228 /* Process an inter-device event completion. 229 229 * If good, signal the caller's completion. 230 230 */ 231 - static int ql_idc_cmplt_aen(struct ql_adapter *qdev) 231 + static int qlge_idc_cmplt_aen(struct qlge_adapter *qdev) 232 232 { 233 233 int status; 234 234 struct mbox_params *mbcp = &qdev->idc_mbc; 235 235 236 236 mbcp->out_count = 4; 237 - status = ql_get_mb_sts(qdev, mbcp); 237 + status = qlge_get_mb_sts(qdev, mbcp); 238 238 if (status) { 239 239 netif_err(qdev, drv, qdev->ndev, 240 240 "Could not read MPI, resetting RISC!\n"); 241 - ql_queue_fw_error(qdev); 241 + qlge_queue_fw_error(qdev); 242 242 } else { 243 243 /* Wake up the sleeping mpi_idc_work thread that is 244 244 * waiting for this event. ··· 248 248 return status; 249 249 } 250 250 251 - static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp) 251 + static void qlge_link_up(struct qlge_adapter *qdev, struct mbox_params *mbcp) 252 252 { 253 253 int status; 254 254 255 255 mbcp->out_count = 2; 256 256 257 - status = ql_get_mb_sts(qdev, mbcp); 257 + status = qlge_get_mb_sts(qdev, mbcp); 258 258 if (status) { 259 259 netif_err(qdev, drv, qdev->ndev, 260 260 "%s: Could not get mailbox status.\n", __func__); ··· 268 268 * then set up the CAM and frame routing. 269 269 */ 270 270 if (test_bit(QL_CAM_RT_SET, &qdev->flags)) { 271 - status = ql_cam_route_initialize(qdev); 271 + status = qlge_cam_route_initialize(qdev); 272 272 if (status) { 273 273 netif_err(qdev, ifup, qdev->ndev, 274 274 "Failed to init CAM/Routing tables.\n"); ··· 288 288 * we don't get another interrupt 289 289 * when we leave mpi_worker dpc. 290 290 */ 291 - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 291 + qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 292 292 queue_delayed_work(qdev->workqueue, 293 293 &qdev->mpi_port_cfg_work, 0); 294 294 } 295 295 296 - ql_link_on(qdev); 296 + qlge_link_on(qdev); 297 297 } 298 298 299 - static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp) 299 + static void qlge_link_down(struct qlge_adapter *qdev, struct mbox_params *mbcp) 300 300 { 301 301 int status; 302 302 303 303 mbcp->out_count = 3; 304 304 305 - status = ql_get_mb_sts(qdev, mbcp); 305 + status = qlge_get_mb_sts(qdev, mbcp); 306 306 if (status) 307 307 netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n"); 308 308 309 - ql_link_off(qdev); 309 + qlge_link_off(qdev); 310 310 } 311 311 312 - static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp) 312 + static int qlge_sfp_in(struct qlge_adapter *qdev, struct mbox_params *mbcp) 313 313 { 314 314 int status; 315 315 316 316 mbcp->out_count = 5; 317 317 318 - status = ql_get_mb_sts(qdev, mbcp); 318 + status = qlge_get_mb_sts(qdev, mbcp); 319 319 if (status) 320 320 netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n"); 321 321 else ··· 324 324 return status; 325 325 } 326 326 327 - static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp) 327 + static int qlge_sfp_out(struct qlge_adapter *qdev, struct mbox_params *mbcp) 328 328 { 329 329 int status; 330 330 331 331 mbcp->out_count = 1; 332 332 333 - status = ql_get_mb_sts(qdev, mbcp); 333 + status = qlge_get_mb_sts(qdev, mbcp); 334 334 if (status) 335 335 netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n"); 336 336 else ··· 339 339 return status; 340 340 } 341 341 342 - static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp) 342 + static int qlge_aen_lost(struct qlge_adapter *qdev, struct mbox_params *mbcp) 343 343 { 344 344 int status; 345 345 346 346 mbcp->out_count = 6; 347 347 348 - status = ql_get_mb_sts(qdev, mbcp); 348 + status = qlge_get_mb_sts(qdev, mbcp); 349 349 if (status) { 350 350 netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n"); 351 351 } else { ··· 360 360 return status; 361 361 } 362 362 363 - static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp) 363 + static void qlge_init_fw_done(struct qlge_adapter *qdev, struct mbox_params *mbcp) 364 364 { 365 365 int status; 366 366 367 367 mbcp->out_count = 2; 368 368 369 - status = ql_get_mb_sts(qdev, mbcp); 369 + status = qlge_get_mb_sts(qdev, mbcp); 370 370 if (status) { 371 371 netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n"); 372 372 } else { 373 373 netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n", 374 374 mbcp->mbox_out[1]); 375 375 qdev->fw_rev_id = mbcp->mbox_out[1]; 376 - status = ql_cam_route_initialize(qdev); 376 + status = qlge_cam_route_initialize(qdev); 377 377 if (status) 378 378 netif_err(qdev, ifup, qdev->ndev, 379 379 "Failed to init CAM/Routing tables.\n"); ··· 387 387 * It also gets called when a mailbox command is polling for 388 388 * it's completion. 389 389 */ 390 - static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp) 390 + static int qlge_mpi_handler(struct qlge_adapter *qdev, struct mbox_params *mbcp) 391 391 { 392 392 int status; 393 393 int orig_count = mbcp->out_count; 394 394 395 395 /* Just get mailbox zero for now. */ 396 396 mbcp->out_count = 1; 397 - status = ql_get_mb_sts(qdev, mbcp); 397 + status = qlge_get_mb_sts(qdev, mbcp); 398 398 if (status) { 399 399 netif_err(qdev, drv, qdev->ndev, 400 400 "Could not read MPI, resetting ASIC!\n"); 401 - ql_queue_asic_error(qdev); 401 + qlge_queue_asic_error(qdev); 402 402 goto end; 403 403 } 404 404 405 405 switch (mbcp->mbox_out[0]) { 406 - /* This case is only active when we arrive here 407 - * as a result of issuing a mailbox command to 408 - * the firmware. 409 - */ 406 + /* This case is only active when we arrive here 407 + * as a result of issuing a mailbox command to 408 + * the firmware. 409 + */ 410 410 case MB_CMD_STS_INTRMDT: 411 411 case MB_CMD_STS_GOOD: 412 412 case MB_CMD_STS_INVLD_CMD: ··· 421 421 * command completion. 422 422 */ 423 423 mbcp->out_count = orig_count; 424 - status = ql_get_mb_sts(qdev, mbcp); 424 + status = qlge_get_mb_sts(qdev, mbcp); 425 425 return status; 426 426 427 - /* We are being asked by firmware to accept 428 - * a change to the port. This is only 429 - * a change to max frame sizes (Tx/Rx), pause 430 - * parameters, or loopback mode. 431 - */ 427 + /* We are being asked by firmware to accept 428 + * a change to the port. This is only 429 + * a change to max frame sizes (Tx/Rx), pause 430 + * parameters, or loopback mode. 431 + */ 432 432 case AEN_IDC_REQ: 433 - status = ql_idc_req_aen(qdev); 433 + status = qlge_idc_req_aen(qdev); 434 434 break; 435 435 436 - /* Process and inbound IDC event. 437 - * This will happen when we're trying to 438 - * change tx/rx max frame size, change pause 439 - * parameters or loopback mode. 440 - */ 436 + /* Process and inbound IDC event. 437 + * This will happen when we're trying to 438 + * change tx/rx max frame size, change pause 439 + * parameters or loopback mode. 440 + */ 441 441 case AEN_IDC_CMPLT: 442 442 case AEN_IDC_EXT: 443 - status = ql_idc_cmplt_aen(qdev); 443 + status = qlge_idc_cmplt_aen(qdev); 444 444 break; 445 445 446 446 case AEN_LINK_UP: 447 - ql_link_up(qdev, mbcp); 447 + qlge_link_up(qdev, mbcp); 448 448 break; 449 449 450 450 case AEN_LINK_DOWN: 451 - ql_link_down(qdev, mbcp); 451 + qlge_link_down(qdev, mbcp); 452 452 break; 453 453 454 454 case AEN_FW_INIT_DONE: ··· 457 457 */ 458 458 if (mbcp->mbox_in[0] == MB_CMD_EX_FW) { 459 459 mbcp->out_count = orig_count; 460 - status = ql_get_mb_sts(qdev, mbcp); 460 + status = qlge_get_mb_sts(qdev, mbcp); 461 461 mbcp->mbox_out[0] = MB_CMD_STS_GOOD; 462 462 return status; 463 463 } 464 - ql_init_fw_done(qdev, mbcp); 464 + qlge_init_fw_done(qdev, mbcp); 465 465 break; 466 466 467 467 case AEN_AEN_SFP_IN: 468 - ql_sfp_in(qdev, mbcp); 468 + qlge_sfp_in(qdev, mbcp); 469 469 break; 470 470 471 471 case AEN_AEN_SFP_OUT: 472 - ql_sfp_out(qdev, mbcp); 472 + qlge_sfp_out(qdev, mbcp); 473 473 break; 474 474 475 - /* This event can arrive at boot time or after an 476 - * MPI reset if the firmware failed to initialize. 477 - */ 475 + /* This event can arrive at boot time or after an 476 + * MPI reset if the firmware failed to initialize. 477 + */ 478 478 case AEN_FW_INIT_FAIL: 479 479 /* If we're in process on executing the firmware, 480 480 * then convert the status to normal mailbox status. 481 481 */ 482 482 if (mbcp->mbox_in[0] == MB_CMD_EX_FW) { 483 483 mbcp->out_count = orig_count; 484 - status = ql_get_mb_sts(qdev, mbcp); 484 + status = qlge_get_mb_sts(qdev, mbcp); 485 485 mbcp->mbox_out[0] = MB_CMD_STS_ERR; 486 486 return status; 487 487 } 488 488 netif_err(qdev, drv, qdev->ndev, 489 489 "Firmware initialization failed.\n"); 490 490 status = -EIO; 491 - ql_queue_fw_error(qdev); 491 + qlge_queue_fw_error(qdev); 492 492 break; 493 493 494 494 case AEN_SYS_ERR: 495 495 netif_err(qdev, drv, qdev->ndev, "System Error.\n"); 496 - ql_queue_fw_error(qdev); 496 + qlge_queue_fw_error(qdev); 497 497 status = -EIO; 498 498 break; 499 499 500 500 case AEN_AEN_LOST: 501 - ql_aen_lost(qdev, mbcp); 501 + qlge_aen_lost(qdev, mbcp); 502 502 break; 503 503 504 504 case AEN_DCBX_CHG: ··· 510 510 /* Clear the MPI firmware status. */ 511 511 } 512 512 end: 513 - ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); 513 + qlge_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); 514 514 /* Restore the original mailbox count to 515 515 * what the caller asked for. This can get 516 516 * changed when a mailbox command is waiting ··· 526 526 * element in the array contains the value for it's 527 527 * respective mailbox register. 528 528 */ 529 - static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) 529 + static int qlge_mailbox_command(struct qlge_adapter *qdev, struct mbox_params *mbcp) 530 530 { 531 531 int status; 532 532 unsigned long count; ··· 534 534 mutex_lock(&qdev->mpi_mutex); 535 535 536 536 /* Begin polled mode for MPI */ 537 - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 537 + qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 538 538 539 539 /* Load the mailbox registers and wake up MPI RISC. */ 540 - status = ql_exec_mb_cmd(qdev, mbcp); 540 + status = qlge_exec_mb_cmd(qdev, mbcp); 541 541 if (status) 542 542 goto end; 543 543 ··· 556 556 count = jiffies + HZ * MAILBOX_TIMEOUT; 557 557 do { 558 558 /* Wait for the interrupt to come in. */ 559 - status = ql_wait_mbx_cmd_cmplt(qdev); 559 + status = qlge_wait_mbx_cmd_cmplt(qdev); 560 560 if (status) 561 561 continue; 562 562 ··· 565 565 * will be spawned. If it's our completion 566 566 * we will catch it below. 567 567 */ 568 - status = ql_mpi_handler(qdev, mbcp); 568 + status = qlge_mpi_handler(qdev, mbcp); 569 569 if (status) 570 570 goto end; 571 571 ··· 574 574 * completion then get out. 575 575 */ 576 576 if (((mbcp->mbox_out[0] & 0x0000f000) == 577 - MB_CMD_STS_GOOD) || 578 - ((mbcp->mbox_out[0] & 0x0000f000) == 579 - MB_CMD_STS_INTRMDT)) 577 + MB_CMD_STS_GOOD) || 578 + ((mbcp->mbox_out[0] & 0x0000f000) == 579 + MB_CMD_STS_INTRMDT)) 580 580 goto done; 581 581 } while (time_before(jiffies, count)); 582 582 ··· 590 590 /* Now we can clear the interrupt condition 591 591 * and look at our status. 592 592 */ 593 - ql_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); 593 + qlge_write32(qdev, CSR, CSR_CMD_CLR_R2PCI_INT); 594 594 595 595 if (((mbcp->mbox_out[0] & 0x0000f000) != 596 - MB_CMD_STS_GOOD) && 597 - ((mbcp->mbox_out[0] & 0x0000f000) != 598 - MB_CMD_STS_INTRMDT)) { 596 + MB_CMD_STS_GOOD) && 597 + ((mbcp->mbox_out[0] & 0x0000f000) != 598 + MB_CMD_STS_INTRMDT)) { 599 599 status = -EIO; 600 600 } 601 601 end: 602 602 /* End polled mode for MPI */ 603 - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); 603 + qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); 604 604 mutex_unlock(&qdev->mpi_mutex); 605 605 return status; 606 606 } ··· 609 609 * driver banner and for ethtool info. 610 610 * Returns zero on success. 611 611 */ 612 - int ql_mb_about_fw(struct ql_adapter *qdev) 612 + int qlge_mb_about_fw(struct qlge_adapter *qdev) 613 613 { 614 614 struct mbox_params mbc; 615 615 struct mbox_params *mbcp = &mbc; ··· 622 622 623 623 mbcp->mbox_in[0] = MB_CMD_ABOUT_FW; 624 624 625 - status = ql_mailbox_command(qdev, mbcp); 625 + status = qlge_mailbox_command(qdev, mbcp); 626 626 if (status) 627 627 return status; 628 628 ··· 641 641 /* Get functional state for MPI firmware. 642 642 * Returns zero on success. 643 643 */ 644 - int ql_mb_get_fw_state(struct ql_adapter *qdev) 644 + int qlge_mb_get_fw_state(struct qlge_adapter *qdev) 645 645 { 646 646 struct mbox_params mbc; 647 647 struct mbox_params *mbcp = &mbc; ··· 654 654 655 655 mbcp->mbox_in[0] = MB_CMD_GET_FW_STATE; 656 656 657 - status = ql_mailbox_command(qdev, mbcp); 657 + status = qlge_mailbox_command(qdev, mbcp); 658 658 if (status) 659 659 return status; 660 660 ··· 680 680 /* Send and ACK mailbox command to the firmware to 681 681 * let it continue with the change. 682 682 */ 683 - static int ql_mb_idc_ack(struct ql_adapter *qdev) 683 + static int qlge_mb_idc_ack(struct qlge_adapter *qdev) 684 684 { 685 685 struct mbox_params mbc; 686 686 struct mbox_params *mbcp = &mbc; ··· 697 697 mbcp->mbox_in[3] = qdev->idc_mbc.mbox_out[3]; 698 698 mbcp->mbox_in[4] = qdev->idc_mbc.mbox_out[4]; 699 699 700 - status = ql_mailbox_command(qdev, mbcp); 700 + status = qlge_mailbox_command(qdev, mbcp); 701 701 if (status) 702 702 return status; 703 703 ··· 712 712 * for the current port. 713 713 * Most likely will block. 714 714 */ 715 - int ql_mb_set_port_cfg(struct ql_adapter *qdev) 715 + int qlge_mb_set_port_cfg(struct qlge_adapter *qdev) 716 716 { 717 717 struct mbox_params mbc; 718 718 struct mbox_params *mbcp = &mbc; ··· 727 727 mbcp->mbox_in[1] = qdev->link_config; 728 728 mbcp->mbox_in[2] = qdev->max_frame_size; 729 729 730 - status = ql_mailbox_command(qdev, mbcp); 730 + status = qlge_mailbox_command(qdev, mbcp); 731 731 if (status) 732 732 return status; 733 733 ··· 742 742 return status; 743 743 } 744 744 745 - static int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr, 746 - u32 size) 745 + static int qlge_mb_dump_ram(struct qlge_adapter *qdev, u64 req_dma, u32 addr, 746 + u32 size) 747 747 { 748 748 int status = 0; 749 749 struct mbox_params mbc; ··· 764 764 mbcp->mbox_in[7] = LSW(MSD(req_dma)); 765 765 mbcp->mbox_in[8] = MSW(addr); 766 766 767 - status = ql_mailbox_command(qdev, mbcp); 767 + status = qlge_mailbox_command(qdev, mbcp); 768 768 if (status) 769 769 return status; 770 770 ··· 776 776 } 777 777 778 778 /* Issue a mailbox command to dump RISC RAM. */ 779 - int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, 780 - u32 ram_addr, int word_count) 779 + int qlge_dump_risc_ram_area(struct qlge_adapter *qdev, void *buf, 780 + u32 ram_addr, int word_count) 781 781 { 782 782 int status; 783 783 char *my_buf; ··· 789 789 if (!my_buf) 790 790 return -EIO; 791 791 792 - status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count); 792 + status = qlge_mb_dump_ram(qdev, buf_dma, ram_addr, word_count); 793 793 if (!status) 794 794 memcpy(buf, my_buf, word_count * sizeof(u32)); 795 795 ··· 802 802 * for the current port. 803 803 * Most likely will block. 804 804 */ 805 - int ql_mb_get_port_cfg(struct ql_adapter *qdev) 805 + int qlge_mb_get_port_cfg(struct qlge_adapter *qdev) 806 806 { 807 807 struct mbox_params mbc; 808 808 struct mbox_params *mbcp = &mbc; ··· 815 815 816 816 mbcp->mbox_in[0] = MB_CMD_GET_PORT_CFG; 817 817 818 - status = ql_mailbox_command(qdev, mbcp); 818 + status = qlge_mailbox_command(qdev, mbcp); 819 819 if (status) 820 820 return status; 821 821 ··· 832 832 return status; 833 833 } 834 834 835 - int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol) 835 + int qlge_mb_wol_mode(struct qlge_adapter *qdev, u32 wol) 836 836 { 837 837 struct mbox_params mbc; 838 838 struct mbox_params *mbcp = &mbc; ··· 846 846 mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE; 847 847 mbcp->mbox_in[1] = wol; 848 848 849 - status = ql_mailbox_command(qdev, mbcp); 849 + status = qlge_mailbox_command(qdev, mbcp); 850 850 if (status) 851 851 return status; 852 852 ··· 857 857 return status; 858 858 } 859 859 860 - int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol) 860 + int qlge_mb_wol_set_magic(struct qlge_adapter *qdev, u32 enable_wol) 861 861 { 862 862 struct mbox_params mbc; 863 863 struct mbox_params *mbcp = &mbc; ··· 888 888 mbcp->mbox_in[7] = 0; 889 889 } 890 890 891 - status = ql_mailbox_command(qdev, mbcp); 891 + status = qlge_mailbox_command(qdev, mbcp); 892 892 if (status) 893 893 return status; 894 894 ··· 906 906 * The firmware will complete the request if the other 907 907 * function doesn't respond. 908 908 */ 909 - static int ql_idc_wait(struct ql_adapter *qdev) 909 + static int qlge_idc_wait(struct qlge_adapter *qdev) 910 910 { 911 911 int status = -ETIMEDOUT; 912 912 struct mbox_params *mbcp = &qdev->idc_mbc; ··· 947 947 return status; 948 948 } 949 949 950 - int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config) 950 + int qlge_mb_set_led_cfg(struct qlge_adapter *qdev, u32 led_config) 951 951 { 952 952 struct mbox_params mbc; 953 953 struct mbox_params *mbcp = &mbc; ··· 961 961 mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG; 962 962 mbcp->mbox_in[1] = led_config; 963 963 964 - status = ql_mailbox_command(qdev, mbcp); 964 + status = qlge_mailbox_command(qdev, mbcp); 965 965 if (status) 966 966 return status; 967 967 ··· 974 974 return status; 975 975 } 976 976 977 - int ql_mb_get_led_cfg(struct ql_adapter *qdev) 977 + int qlge_mb_get_led_cfg(struct qlge_adapter *qdev) 978 978 { 979 979 struct mbox_params mbc; 980 980 struct mbox_params *mbcp = &mbc; ··· 987 987 988 988 mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG; 989 989 990 - status = ql_mailbox_command(qdev, mbcp); 990 + status = qlge_mailbox_command(qdev, mbcp); 991 991 if (status) 992 992 return status; 993 993 ··· 1001 1001 return status; 1002 1002 } 1003 1003 1004 - int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control) 1004 + int qlge_mb_set_mgmnt_traffic_ctl(struct qlge_adapter *qdev, u32 control) 1005 1005 { 1006 1006 struct mbox_params mbc; 1007 1007 struct mbox_params *mbcp = &mbc; ··· 1015 1015 mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL; 1016 1016 mbcp->mbox_in[1] = control; 1017 1017 1018 - status = ql_mailbox_command(qdev, mbcp); 1018 + status = qlge_mailbox_command(qdev, mbcp); 1019 1019 if (status) 1020 1020 return status; 1021 1021 ··· 1038 1038 } 1039 1039 1040 1040 /* Returns a negative error code or the mailbox command status. */ 1041 - static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control) 1041 + static int qlge_mb_get_mgmnt_traffic_ctl(struct qlge_adapter *qdev, u32 *control) 1042 1042 { 1043 1043 struct mbox_params mbc; 1044 1044 struct mbox_params *mbcp = &mbc; ··· 1052 1052 1053 1053 mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL; 1054 1054 1055 - status = ql_mailbox_command(qdev, mbcp); 1055 + status = qlge_mailbox_command(qdev, mbcp); 1056 1056 if (status) 1057 1057 return status; 1058 1058 ··· 1073 1073 return status; 1074 1074 } 1075 1075 1076 - int ql_wait_fifo_empty(struct ql_adapter *qdev) 1076 + int qlge_wait_fifo_empty(struct qlge_adapter *qdev) 1077 1077 { 1078 1078 int count; 1079 1079 u32 mgmnt_fifo_empty; 1080 1080 u32 nic_fifo_empty; 1081 1081 1082 1082 for (count = 6; count; count--) { 1083 - nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE; 1084 - ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty); 1083 + nic_fifo_empty = qlge_read32(qdev, STS) & STS_NFE; 1084 + qlge_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty); 1085 1085 mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY; 1086 1086 if (nic_fifo_empty && mgmnt_fifo_empty) 1087 1087 return 0; ··· 1093 1093 /* API called in work thread context to set new TX/RX 1094 1094 * maximum frame size values to match MTU. 1095 1095 */ 1096 - static int ql_set_port_cfg(struct ql_adapter *qdev) 1096 + static int qlge_set_port_cfg(struct qlge_adapter *qdev) 1097 1097 { 1098 1098 int status; 1099 1099 1100 - status = ql_mb_set_port_cfg(qdev); 1100 + status = qlge_mb_set_port_cfg(qdev); 1101 1101 if (status) 1102 1102 return status; 1103 - status = ql_idc_wait(qdev); 1103 + status = qlge_idc_wait(qdev); 1104 1104 return status; 1105 1105 } 1106 1106 ··· 1112 1112 * from the firmware and, if necessary, changes them to match 1113 1113 * the MTU setting. 1114 1114 */ 1115 - void ql_mpi_port_cfg_work(struct work_struct *work) 1115 + void qlge_mpi_port_cfg_work(struct work_struct *work) 1116 1116 { 1117 - struct ql_adapter *qdev = 1118 - container_of(work, struct ql_adapter, mpi_port_cfg_work.work); 1117 + struct qlge_adapter *qdev = 1118 + container_of(work, struct qlge_adapter, mpi_port_cfg_work.work); 1119 1119 int status; 1120 1120 1121 - status = ql_mb_get_port_cfg(qdev); 1121 + status = qlge_mb_get_port_cfg(qdev); 1122 1122 if (status) { 1123 1123 netif_err(qdev, drv, qdev->ndev, 1124 1124 "Bug: Failed to get port config data.\n"); ··· 1131 1131 1132 1132 qdev->link_config |= CFG_JUMBO_FRAME_SIZE; 1133 1133 qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE; 1134 - status = ql_set_port_cfg(qdev); 1134 + status = qlge_set_port_cfg(qdev); 1135 1135 if (status) { 1136 1136 netif_err(qdev, drv, qdev->ndev, 1137 1137 "Bug: Failed to set port config data.\n"); ··· 1141 1141 clear_bit(QL_PORT_CFG, &qdev->flags); 1142 1142 return; 1143 1143 err: 1144 - ql_queue_fw_error(qdev); 1144 + qlge_queue_fw_error(qdev); 1145 1145 goto end; 1146 1146 } 1147 1147 ··· 1151 1151 * has been made and then send a mailbox command ACKing 1152 1152 * the change request. 1153 1153 */ 1154 - void ql_mpi_idc_work(struct work_struct *work) 1154 + void qlge_mpi_idc_work(struct work_struct *work) 1155 1155 { 1156 - struct ql_adapter *qdev = 1157 - container_of(work, struct ql_adapter, mpi_idc_work.work); 1156 + struct qlge_adapter *qdev = 1157 + container_of(work, struct qlge_adapter, mpi_idc_work.work); 1158 1158 int status; 1159 1159 struct mbox_params *mbcp = &qdev->idc_mbc; 1160 1160 u32 aen; ··· 1170 1170 break; 1171 1171 case MB_CMD_PORT_RESET: 1172 1172 case MB_CMD_STOP_FW: 1173 - ql_link_off(qdev); 1173 + qlge_link_off(qdev); 1174 1174 fallthrough; 1175 1175 case MB_CMD_SET_PORT_CFG: 1176 1176 /* Signal the resulting link up AEN ··· 1180 1180 set_bit(QL_CAM_RT_SET, &qdev->flags); 1181 1181 /* Do ACK if required */ 1182 1182 if (timeout) { 1183 - status = ql_mb_idc_ack(qdev); 1183 + status = qlge_mb_idc_ack(qdev); 1184 1184 if (status) 1185 1185 netif_err(qdev, drv, qdev->ndev, 1186 1186 "Bug: No pending IDC!\n"); ··· 1191 1191 } 1192 1192 break; 1193 1193 1194 - /* These sub-commands issued by another (FCoE) 1195 - * function are requesting to do an operation 1196 - * on the shared resource (MPI environment). 1197 - * We currently don't issue these so we just 1198 - * ACK the request. 1199 - */ 1194 + /* These sub-commands issued by another (FCoE) 1195 + * function are requesting to do an operation 1196 + * on the shared resource (MPI environment). 1197 + * We currently don't issue these so we just 1198 + * ACK the request. 1199 + */ 1200 1200 case MB_CMD_IOP_RESTART_MPI: 1201 1201 case MB_CMD_IOP_PREP_LINK_DOWN: 1202 1202 /* Drop the link, reload the routing 1203 1203 * table when link comes up. 1204 1204 */ 1205 - ql_link_off(qdev); 1205 + qlge_link_off(qdev); 1206 1206 set_bit(QL_CAM_RT_SET, &qdev->flags); 1207 1207 fallthrough; 1208 1208 case MB_CMD_IOP_DVR_START: ··· 1213 1213 case MB_CMD_IOP_NONE: /* an IDC without params */ 1214 1214 /* Do ACK if required */ 1215 1215 if (timeout) { 1216 - status = ql_mb_idc_ack(qdev); 1216 + status = qlge_mb_idc_ack(qdev); 1217 1217 if (status) 1218 1218 netif_err(qdev, drv, qdev->ndev, 1219 1219 "Bug: No pending IDC!\n"); ··· 1226 1226 } 1227 1227 } 1228 1228 1229 - void ql_mpi_work(struct work_struct *work) 1229 + void qlge_mpi_work(struct work_struct *work) 1230 1230 { 1231 - struct ql_adapter *qdev = 1232 - container_of(work, struct ql_adapter, mpi_work.work); 1231 + struct qlge_adapter *qdev = 1232 + container_of(work, struct qlge_adapter, mpi_work.work); 1233 1233 struct mbox_params mbc; 1234 1234 struct mbox_params *mbcp = &mbc; 1235 1235 int err = 0; 1236 1236 1237 1237 mutex_lock(&qdev->mpi_mutex); 1238 1238 /* Begin polled mode for MPI */ 1239 - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 1239 + qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); 1240 1240 1241 - while (ql_read32(qdev, STS) & STS_PI) { 1241 + while (qlge_read32(qdev, STS) & STS_PI) { 1242 1242 memset(mbcp, 0, sizeof(struct mbox_params)); 1243 1243 mbcp->out_count = 1; 1244 1244 /* Don't continue if an async event 1245 1245 * did not complete properly. 1246 1246 */ 1247 - err = ql_mpi_handler(qdev, mbcp); 1247 + err = qlge_mpi_handler(qdev, mbcp); 1248 1248 if (err) 1249 1249 break; 1250 1250 } 1251 1251 1252 1252 /* End polled mode for MPI */ 1253 - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); 1253 + qlge_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); 1254 1254 mutex_unlock(&qdev->mpi_mutex); 1255 1255 } 1256 1256 1257 - void ql_mpi_reset_work(struct work_struct *work) 1257 + void qlge_mpi_reset_work(struct work_struct *work) 1258 1258 { 1259 - struct ql_adapter *qdev = 1260 - container_of(work, struct ql_adapter, mpi_reset_work.work); 1259 + struct qlge_adapter *qdev = 1260 + container_of(work, struct qlge_adapter, mpi_reset_work.work); 1261 1261 cancel_delayed_work_sync(&qdev->mpi_work); 1262 1262 cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); 1263 1263 cancel_delayed_work_sync(&qdev->mpi_idc_work); 1264 1264 /* If we're not the dominant NIC function, 1265 1265 * then there is nothing to do. 1266 1266 */ 1267 - if (!ql_own_firmware(qdev)) { 1267 + if (!qlge_own_firmware(qdev)) { 1268 1268 netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n"); 1269 1269 return; 1270 1270 } 1271 1271 1272 - if (qdev->mpi_coredump && !ql_core_dump(qdev, qdev->mpi_coredump)) { 1272 + if (qdev->mpi_coredump && !qlge_core_dump(qdev, qdev->mpi_coredump)) { 1273 1273 netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n"); 1274 1274 qdev->core_is_dumped = 1; 1275 1275 queue_delayed_work(qdev->workqueue, 1276 1276 &qdev->mpi_core_to_log, 5 * HZ); 1277 1277 } 1278 - ql_soft_reset_mpi_risc(qdev); 1278 + qlge_soft_reset_mpi_risc(qdev); 1279 1279 }