Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bna: Fixed build break for allyesconfig

This is the patch to fix the build break caused by multiple
definitions of symbols between Brocade's FC/FCOE driver(BFA)
and 10G Networking Driver(BNA).

Changes are:

1. locally used functions are made static

2. unused functions are removed

3. using unique namespaces for the function names that must be
globally visible

Signed-off-by: Debashis Dutt <ddutt@brocade.com>
Signed-off-by: Rasesh Mody <rmody@brocade.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Rasesh Mody and committed by
David S. Miller
8a891429 ced1de4c

+159 -428
+10 -126
drivers/net/bna/bfa_cee.c
··· 152 152 cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status); 153 153 } 154 154 /** 155 - * bfa_cee_meminfo() 155 + * bfa_nw_cee_meminfo() 156 156 * 157 157 * @brief Returns the size of the DMA memory needed by CEE module 158 158 * ··· 161 161 * @return Size of DMA region 162 162 */ 163 163 u32 164 - bfa_cee_meminfo(void) 164 + bfa_nw_cee_meminfo(void) 165 165 { 166 166 return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo(); 167 167 } 168 168 169 169 /** 170 - * bfa_cee_mem_claim() 170 + * bfa_nw_cee_mem_claim() 171 171 * 172 172 * @brief Initialized CEE DMA Memory 173 173 * ··· 178 178 * @return void 179 179 */ 180 180 void 181 - bfa_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa) 181 + bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa) 182 182 { 183 183 cee->attr_dma.kva = dma_kva; 184 184 cee->attr_dma.pa = dma_pa; ··· 187 187 cee->attr = (struct bfa_cee_attr *) dma_kva; 188 188 cee->stats = (struct bfa_cee_stats *) 189 189 (dma_kva + bfa_cee_attr_meminfo()); 190 - } 191 - 192 - /** 193 - * bfa_cee_get_attr() 194 - * 195 - * @brief 196 - * Send the request to the f/w to fetch CEE attributes. 197 - * 198 - * @param[in] Pointer to the CEE module data structure. 199 - * 200 - * @return Status 201 - */ 202 - 203 - enum bfa_status 204 - bfa_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr, 205 - bfa_cee_get_attr_cbfn_t cbfn, void *cbarg) 206 - { 207 - struct bfi_cee_get_req *cmd; 208 - 209 - BUG_ON(!((cee != NULL) && (cee->ioc != NULL))); 210 - if (!bfa_ioc_is_operational(cee->ioc)) 211 - return BFA_STATUS_IOC_FAILURE; 212 - if (cee->get_attr_pending == true) 213 - return BFA_STATUS_DEVBUSY; 214 - cee->get_attr_pending = true; 215 - cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg; 216 - cee->attr = attr; 217 - cee->cbfn.get_attr_cbfn = cbfn; 218 - cee->cbfn.get_attr_cbarg = cbarg; 219 - bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ, 220 - bfa_ioc_portid(cee->ioc)); 221 - bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa); 222 - bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb); 223 - 224 - return BFA_STATUS_OK; 225 - } 226 - 227 - /** 228 - * bfa_cee_get_stats() 229 - * 230 - * @brief 231 - * Send the request to the f/w to fetch CEE statistics. 232 - * 233 - * @param[in] Pointer to the CEE module data structure. 234 - * 235 - * @return Status 236 - */ 237 - 238 - enum bfa_status 239 - bfa_cee_get_stats(struct bfa_cee *cee, struct bfa_cee_stats *stats, 240 - bfa_cee_get_stats_cbfn_t cbfn, void *cbarg) 241 - { 242 - struct bfi_cee_get_req *cmd; 243 - 244 - BUG_ON(!((cee != NULL) && (cee->ioc != NULL))); 245 - 246 - if (!bfa_ioc_is_operational(cee->ioc)) 247 - return BFA_STATUS_IOC_FAILURE; 248 - if (cee->get_stats_pending == true) 249 - return BFA_STATUS_DEVBUSY; 250 - cee->get_stats_pending = true; 251 - cmd = (struct bfi_cee_get_req *) cee->get_stats_mb.msg; 252 - cee->stats = stats; 253 - cee->cbfn.get_stats_cbfn = cbfn; 254 - cee->cbfn.get_stats_cbarg = cbarg; 255 - bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ, 256 - bfa_ioc_portid(cee->ioc)); 257 - bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa); 258 - bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb); 259 - 260 - return BFA_STATUS_OK; 261 - } 262 - 263 - /** 264 - * bfa_cee_reset_stats() 265 - * 266 - * @brief Clears CEE Stats in the f/w. 267 - * 268 - * @param[in] Pointer to the CEE module data structure. 269 - * 270 - * @return Status 271 - */ 272 - 273 - enum bfa_status 274 - bfa_cee_reset_stats(struct bfa_cee *cee, bfa_cee_reset_stats_cbfn_t cbfn, 275 - void *cbarg) 276 - { 277 - struct bfi_cee_reset_stats *cmd; 278 - 279 - BUG_ON(!((cee != NULL) && (cee->ioc != NULL))); 280 - if (!bfa_ioc_is_operational(cee->ioc)) 281 - return BFA_STATUS_IOC_FAILURE; 282 - if (cee->reset_stats_pending == true) 283 - return BFA_STATUS_DEVBUSY; 284 - cee->reset_stats_pending = true; 285 - cmd = (struct bfi_cee_reset_stats *) cee->reset_stats_mb.msg; 286 - cee->cbfn.reset_stats_cbfn = cbfn; 287 - cee->cbfn.reset_stats_cbarg = cbarg; 288 - bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS, 289 - bfa_ioc_portid(cee->ioc)); 290 - bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb); 291 - return BFA_STATUS_OK; 292 190 } 293 191 294 192 /** ··· 199 301 * @return void 200 302 */ 201 303 202 - void 304 + static void 203 305 bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m) 204 306 { 205 307 union bfi_cee_i2h_msg_u *msg; ··· 232 334 * @return void 233 335 */ 234 336 235 - void 337 + static void 236 338 bfa_cee_hbfail(void *arg) 237 339 { 238 340 struct bfa_cee *cee; ··· 265 367 } 266 368 267 369 /** 268 - * bfa_cee_attach() 370 + * bfa_nw_cee_attach() 269 371 * 270 372 * @brief CEE module-attach API 271 373 * ··· 278 380 * @return void 279 381 */ 280 382 void 281 - bfa_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, 383 + bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, 282 384 void *dev) 283 385 { 284 386 BUG_ON(!(cee != NULL)); 285 387 cee->dev = dev; 286 388 cee->ioc = ioc; 287 389 288 - bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee); 390 + bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee); 289 391 bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee); 290 - bfa_ioc_hbfail_register(cee->ioc, &cee->hbfail); 291 - } 292 - 293 - /** 294 - * bfa_cee_detach() 295 - * 296 - * @brief CEE module-detach API 297 - * 298 - * @param[in] cee - Pointer to the CEE module data structure 299 - * 300 - * @return void 301 - */ 302 - void 303 - bfa_cee_detach(struct bfa_cee *cee) 304 - { 392 + bfa_nw_ioc_hbfail_register(cee->ioc, &cee->hbfail); 305 393 }
+3 -11
drivers/net/bna/bfa_cee.h
··· 56 56 struct bfa_mbox_cmd reset_stats_mb; 57 57 }; 58 58 59 - u32 bfa_cee_meminfo(void); 60 - void bfa_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, 59 + u32 bfa_nw_cee_meminfo(void); 60 + void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, 61 61 u64 dma_pa); 62 - void bfa_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev); 63 - void bfa_cee_detach(struct bfa_cee *cee); 64 - enum bfa_status bfa_cee_get_attr(struct bfa_cee *cee, 65 - struct bfa_cee_attr *attr, bfa_cee_get_attr_cbfn_t cbfn, void *cbarg); 66 - enum bfa_status bfa_cee_get_stats(struct bfa_cee *cee, 67 - struct bfa_cee_stats *stats, bfa_cee_get_stats_cbfn_t cbfn, 68 - void *cbarg); 69 - enum bfa_status bfa_cee_reset_stats(struct bfa_cee *cee, 70 - bfa_cee_reset_stats_cbfn_t cbfn, void *cbarg); 62 + void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev); 71 63 72 64 #endif /* __BFA_CEE_H__ */
+74 -175
drivers/net/bna/bfa_ioc.c
··· 65 65 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ 66 66 readl((__ioc)->ioc_regs.hfn_mbox_cmd)) 67 67 68 - bool bfa_auto_recover = true; 68 + bool bfa_nw_auto_recover = true; 69 69 70 70 /* 71 71 * forward declarations ··· 85 85 static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc); 86 86 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); 87 87 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); 88 + static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, 89 + u32 boot_param); 90 + static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); 91 + static u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr); 92 + static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, 93 + char *serial_num); 94 + static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, 95 + char *fw_ver); 96 + static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, 97 + char *chip_rev); 98 + static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, 99 + char *optrom_ver); 100 + static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, 101 + char *manufacturer); 102 + static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model); 103 + static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); 104 + static mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc); 88 105 89 106 /** 90 107 * IOC state machine events ··· 155 138 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) 156 139 { 157 140 ioc->retry_count = 0; 158 - ioc->auto_recover = bfa_auto_recover; 141 + ioc->auto_recover = bfa_nw_auto_recover; 159 142 } 160 143 161 144 /** ··· 202 185 ioc->retry_count = 0; 203 186 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); 204 187 } else { 205 - bfa_ioc_hw_sem_release(ioc); 188 + bfa_nw_ioc_hw_sem_release(ioc); 206 189 bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch); 207 190 } 208 191 break; ··· 331 314 break; 332 315 } 333 316 334 - bfa_ioc_hw_sem_release(ioc); 317 + bfa_nw_ioc_hw_sem_release(ioc); 335 318 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 336 319 break; 337 320 338 321 case IOC_E_DISABLE: 339 - bfa_ioc_hw_sem_release(ioc); 322 + bfa_nw_ioc_hw_sem_release(ioc); 340 323 bfa_ioc_timer_stop(ioc); 341 324 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 342 325 break; ··· 363 346 switch (event) { 364 347 case IOC_E_FWRSP_ENABLE: 365 348 bfa_ioc_timer_stop(ioc); 366 - bfa_ioc_hw_sem_release(ioc); 349 + bfa_nw_ioc_hw_sem_release(ioc); 367 350 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 368 351 break; 369 352 ··· 380 363 break; 381 364 } 382 365 383 - bfa_ioc_hw_sem_release(ioc); 366 + bfa_nw_ioc_hw_sem_release(ioc); 384 367 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 385 368 break; 386 369 387 370 case IOC_E_DISABLE: 388 371 bfa_ioc_timer_stop(ioc); 389 - bfa_ioc_hw_sem_release(ioc); 372 + bfa_nw_ioc_hw_sem_release(ioc); 390 373 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 391 374 break; 392 375 ··· 679 662 } 680 663 681 664 void 682 - bfa_ioc_sem_timeout(void *ioc_arg) 665 + bfa_nw_ioc_sem_timeout(void *ioc_arg) 683 666 { 684 667 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; 685 668 ··· 687 670 } 688 671 689 672 bool 690 - bfa_ioc_sem_get(void __iomem *sem_reg) 673 + bfa_nw_ioc_sem_get(void __iomem *sem_reg) 691 674 { 692 675 u32 r32; 693 676 int cnt = 0; ··· 709 692 } 710 693 711 694 void 712 - bfa_ioc_sem_release(void __iomem *sem_reg) 695 + bfa_nw_ioc_sem_release(void __iomem *sem_reg) 713 696 { 714 697 writel(1, sem_reg); 715 698 } ··· 734 717 } 735 718 736 719 void 737 - bfa_ioc_hw_sem_release(struct bfa_ioc *ioc) 720 + bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc) 738 721 { 739 722 writel(1, ioc->ioc_regs.ioc_sem_reg); 740 723 } ··· 817 800 * Get driver and firmware versions. 818 801 */ 819 802 void 820 - bfa_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 803 + bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 821 804 { 822 805 u32 pgnum, pgoff; 823 806 u32 loff = 0; ··· 840 823 * Returns TRUE if same. 841 824 */ 842 825 bool 843 - bfa_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 826 + bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) 844 827 { 845 828 struct bfi_ioc_image_hdr *drv_fwhdr; 846 829 int i; ··· 871 854 if (bfa_ioc_is_optrom(ioc)) 872 855 return true; 873 856 874 - bfa_ioc_fwver_get(ioc, &fwhdr); 857 + bfa_nw_ioc_fwver_get(ioc, &fwhdr); 875 858 drv_fwhdr = (struct bfi_ioc_image_hdr *) 876 859 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); 877 860 ··· 881 864 if (fwhdr.exec != drv_fwhdr->exec) 882 865 return false; 883 866 884 - return bfa_ioc_fwver_cmp(ioc, &fwhdr); 867 + return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); 885 868 } 886 869 887 870 /** ··· 958 941 } 959 942 960 943 void 961 - bfa_ioc_timeout(void *ioc_arg) 944 + bfa_nw_ioc_timeout(void *ioc_arg) 962 945 { 963 946 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg; 964 947 965 948 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); 966 949 } 967 950 968 - void 951 + static void 969 952 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len) 970 953 { 971 954 u32 *msgp = (u32 *) ioc_msg; ··· 1026 1009 } 1027 1010 1028 1011 void 1029 - bfa_ioc_hb_check(void *cbarg) 1012 + bfa_nw_ioc_hb_check(void *cbarg) 1030 1013 { 1031 1014 struct bfa_ioc *ioc = cbarg; 1032 1015 u32 hb_count; ··· 1212 1195 /** 1213 1196 * IOC public 1214 1197 */ 1215 - enum bfa_status 1198 + static enum bfa_status 1216 1199 bfa_ioc_pll_init(struct bfa_ioc *ioc) 1217 1200 { 1218 1201 /* 1219 1202 * Hold semaphore so that nobody can access the chip during init. 1220 1203 */ 1221 - bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); 1204 + bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); 1222 1205 1223 1206 bfa_ioc_pll_init_asic(ioc); 1224 1207 ··· 1226 1209 /* 1227 1210 * release semaphore. 1228 1211 */ 1229 - bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); 1212 + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); 1230 1213 1231 1214 return BFA_STATUS_OK; 1232 1215 } ··· 1235 1218 * Interface used by diag module to do firmware boot with memory test 1236 1219 * as the entry vector. 1237 1220 */ 1238 - void 1221 + static void 1239 1222 bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param) 1240 1223 { 1241 1224 void __iomem *rb; ··· 1271 1254 * Enable/disable IOC failure auto recovery. 1272 1255 */ 1273 1256 void 1274 - bfa_ioc_auto_recover(bool auto_recover) 1257 + bfa_nw_ioc_auto_recover(bool auto_recover) 1275 1258 { 1276 - bfa_auto_recover = auto_recover; 1259 + bfa_nw_auto_recover = auto_recover; 1277 1260 } 1278 1261 1279 1262 bool 1280 - bfa_ioc_is_operational(struct bfa_ioc *ioc) 1263 + bfa_nw_ioc_is_operational(struct bfa_ioc *ioc) 1281 1264 { 1282 1265 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); 1283 1266 } 1284 1267 1285 - bool 1286 - bfa_ioc_is_initialized(struct bfa_ioc *ioc) 1287 - { 1288 - u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); 1289 - 1290 - return ((r32 != BFI_IOC_UNINIT) && 1291 - (r32 != BFI_IOC_INITING) && 1292 - (r32 != BFI_IOC_MEMTEST)); 1293 - } 1294 - 1295 - void 1268 + static void 1296 1269 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg) 1297 1270 { 1298 1271 u32 *msgp = mbmsg; ··· 1306 1299 readl(ioc->ioc_regs.lpu_mbox_cmd); 1307 1300 } 1308 1301 1309 - void 1302 + static void 1310 1303 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) 1311 1304 { 1312 1305 union bfi_ioc_i2h_msg_u *msg; ··· 1347 1340 * @param[in] bfa driver instance structure 1348 1341 */ 1349 1342 void 1350 - bfa_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) 1343 + bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) 1351 1344 { 1352 1345 ioc->bfa = bfa; 1353 1346 ioc->cbfn = cbfn; ··· 1365 1358 * Driver detach time IOC cleanup. 1366 1359 */ 1367 1360 void 1368 - bfa_ioc_detach(struct bfa_ioc *ioc) 1361 + bfa_nw_ioc_detach(struct bfa_ioc *ioc) 1369 1362 { 1370 1363 bfa_fsm_send_event(ioc, IOC_E_DETACH); 1371 1364 } ··· 1376 1369 * @param[in] pcidev PCI device information for this IOC 1377 1370 */ 1378 1371 void 1379 - bfa_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, 1372 + bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, 1380 1373 enum bfi_mclass mc) 1381 1374 { 1382 1375 ioc->ioc_mc = mc; ··· 1384 1377 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id); 1385 1378 ioc->cna = ioc->ctdev && !ioc->fcmode; 1386 1379 1387 - bfa_ioc_set_ct_hwif(ioc); 1380 + bfa_nw_ioc_set_ct_hwif(ioc); 1388 1381 1389 1382 bfa_ioc_map_port(ioc); 1390 1383 bfa_ioc_reg_init(ioc); ··· 1397 1390 * @param[in] dm_pa physical address of IOC dma memory 1398 1391 */ 1399 1392 void 1400 - bfa_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa) 1393 + bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa) 1401 1394 { 1402 1395 /** 1403 1396 * dma memory for firmware attribute ··· 1411 1404 * Return size of dma memory required. 1412 1405 */ 1413 1406 u32 1414 - bfa_ioc_meminfo(void) 1407 + bfa_nw_ioc_meminfo(void) 1415 1408 { 1416 1409 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ); 1417 1410 } 1418 1411 1419 1412 void 1420 - bfa_ioc_enable(struct bfa_ioc *ioc) 1413 + bfa_nw_ioc_enable(struct bfa_ioc *ioc) 1421 1414 { 1422 1415 bfa_ioc_stats(ioc, ioc_enables); 1423 1416 ioc->dbg_fwsave_once = true; ··· 1426 1419 } 1427 1420 1428 1421 void 1429 - bfa_ioc_disable(struct bfa_ioc *ioc) 1422 + bfa_nw_ioc_disable(struct bfa_ioc *ioc) 1430 1423 { 1431 1424 bfa_ioc_stats(ioc, ioc_disables); 1432 1425 bfa_fsm_send_event(ioc, IOC_E_DISABLE); 1433 1426 } 1434 1427 1435 - u32 1428 + static u32 1436 1429 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr) 1437 1430 { 1438 1431 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr); 1439 1432 } 1440 1433 1441 - u32 1434 + static u32 1442 1435 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr) 1443 1436 { 1444 1437 return PSS_SMEM_PGOFF(fmaddr); 1445 1438 } 1446 1439 1447 1440 /** 1448 - * Register mailbox message handler functions 1449 - * 1450 - * @param[in] ioc IOC instance 1451 - * @param[in] mcfuncs message class handler functions 1452 - */ 1453 - void 1454 - bfa_ioc_mbox_register(struct bfa_ioc *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs) 1455 - { 1456 - struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 1457 - int mc; 1458 - 1459 - for (mc = 0; mc < BFI_MC_MAX; mc++) 1460 - mod->mbhdlr[mc].cbfn = mcfuncs[mc]; 1461 - } 1462 - 1463 - /** 1464 1441 * Register mailbox message handler function, to be called by common modules 1465 1442 */ 1466 1443 void 1467 - bfa_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, 1444 + bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, 1468 1445 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) 1469 1446 { 1470 1447 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; ··· 1465 1474 * @param[i] cmd Mailbox command 1466 1475 */ 1467 1476 void 1468 - bfa_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd) 1477 + bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd) 1469 1478 { 1470 1479 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 1471 1480 u32 stat; ··· 1497 1506 * Handle mailbox interrupts 1498 1507 */ 1499 1508 void 1500 - bfa_ioc_mbox_isr(struct bfa_ioc *ioc) 1509 + bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc) 1501 1510 { 1502 1511 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; 1503 1512 struct bfi_mbmsg m; ··· 1521 1530 } 1522 1531 1523 1532 void 1524 - bfa_ioc_error_isr(struct bfa_ioc *ioc) 1533 + bfa_nw_ioc_error_isr(struct bfa_ioc *ioc) 1525 1534 { 1526 1535 bfa_fsm_send_event(ioc, IOC_E_HWERROR); 1527 - } 1528 - 1529 - void 1530 - bfa_ioc_set_fcmode(struct bfa_ioc *ioc) 1531 - { 1532 - ioc->fcmode = true; 1533 - ioc->port_id = bfa_ioc_pcifn(ioc); 1534 - } 1535 - 1536 - /** 1537 - * return true if IOC is disabled 1538 - */ 1539 - bool 1540 - bfa_ioc_is_disabled(struct bfa_ioc *ioc) 1541 - { 1542 - return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) || 1543 - bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); 1544 - } 1545 - 1546 - /** 1547 - * return true if IOC firmware is different. 1548 - */ 1549 - bool 1550 - bfa_ioc_fw_mismatch(struct bfa_ioc *ioc) 1551 - { 1552 - return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_reset) || 1553 - bfa_fsm_cmp_state(ioc, bfa_ioc_sm_fwcheck) || 1554 - bfa_fsm_cmp_state(ioc, bfa_ioc_sm_mismatch); 1555 - } 1556 - 1557 - #define bfa_ioc_state_disabled(__sm) \ 1558 - (((__sm) == BFI_IOC_UNINIT) || \ 1559 - ((__sm) == BFI_IOC_INITING) || \ 1560 - ((__sm) == BFI_IOC_HWINIT) || \ 1561 - ((__sm) == BFI_IOC_DISABLED) || \ 1562 - ((__sm) == BFI_IOC_FAIL) || \ 1563 - ((__sm) == BFI_IOC_CFG_DISABLED)) 1564 - 1565 - /** 1566 - * Check if adapter is disabled -- both IOCs should be in a disabled 1567 - * state. 1568 - */ 1569 - bool 1570 - bfa_ioc_adapter_is_disabled(struct bfa_ioc *ioc) 1571 - { 1572 - u32 ioc_state; 1573 - void __iomem *rb = ioc->pcidev.pci_bar_kva; 1574 - 1575 - if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)) 1576 - return false; 1577 - 1578 - ioc_state = readl(rb + BFA_IOC0_STATE_REG); 1579 - if (!bfa_ioc_state_disabled(ioc_state)) 1580 - return false; 1581 - 1582 - if (ioc->pcidev.device_id != PCI_DEVICE_ID_BROCADE_FC_8G1P) { 1583 - ioc_state = readl(rb + BFA_IOC1_STATE_REG); 1584 - if (!bfa_ioc_state_disabled(ioc_state)) 1585 - return false; 1586 - } 1587 - 1588 - return true; 1589 1536 } 1590 1537 1591 1538 /** ··· 1531 1602 * modules such as cee, port, diag. 1532 1603 */ 1533 1604 void 1534 - bfa_ioc_hbfail_register(struct bfa_ioc *ioc, 1605 + bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc, 1535 1606 struct bfa_ioc_hbfail_notify *notify) 1536 1607 { 1537 1608 list_add_tail(&notify->qe, &ioc->hb_notify_q); 1538 1609 } 1539 1610 1540 1611 #define BFA_MFG_NAME "Brocade" 1541 - void 1612 + static void 1542 1613 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc, 1543 1614 struct bfa_adapter_attr *ad_attr) 1544 1615 { ··· 1569 1640 ad_attr->prototype = 0; 1570 1641 1571 1642 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); 1572 - ad_attr->mac = bfa_ioc_get_mac(ioc); 1643 + ad_attr->mac = bfa_nw_ioc_get_mac(ioc); 1573 1644 1574 1645 ad_attr->pcie_gen = ioc_attr->pcie_gen; 1575 1646 ad_attr->pcie_lanes = ioc_attr->pcie_lanes; ··· 1582 1653 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna; 1583 1654 } 1584 1655 1585 - enum bfa_ioc_type 1656 + static enum bfa_ioc_type 1586 1657 bfa_ioc_get_type(struct bfa_ioc *ioc) 1587 1658 { 1588 1659 if (!ioc->ctdev || ioc->fcmode) ··· 1597 1668 } 1598 1669 } 1599 1670 1600 - void 1671 + static void 1601 1672 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num) 1602 1673 { 1603 1674 memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN); ··· 1606 1677 BFA_ADAPTER_SERIAL_NUM_LEN); 1607 1678 } 1608 1679 1609 - void 1680 + static void 1610 1681 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver) 1611 1682 { 1612 1683 memset(fw_ver, 0, BFA_VERSION_LEN); 1613 1684 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); 1614 1685 } 1615 1686 1616 - void 1687 + static void 1617 1688 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev) 1618 1689 { 1619 1690 BUG_ON(!(chip_rev)); ··· 1628 1699 chip_rev[5] = '\0'; 1629 1700 } 1630 1701 1631 - void 1702 + static void 1632 1703 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) 1633 1704 { 1634 1705 memset(optrom_ver, 0, BFA_VERSION_LEN); ··· 1636 1707 BFA_VERSION_LEN); 1637 1708 } 1638 1709 1639 - void 1710 + static void 1640 1711 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) 1641 1712 { 1642 1713 memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN); 1643 1714 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); 1644 1715 } 1645 1716 1646 - void 1717 + static void 1647 1718 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model) 1648 1719 { 1649 1720 struct bfi_ioc_attr *ioc_attr; ··· 1660 1731 BFA_MFG_NAME, ioc_attr->card_type); 1661 1732 } 1662 1733 1663 - enum bfa_ioc_state 1734 + static enum bfa_ioc_state 1664 1735 bfa_ioc_get_state(struct bfa_ioc *ioc) 1665 1736 { 1666 1737 return bfa_sm_to_state(ioc_sm_table, ioc->fsm); 1667 1738 } 1668 1739 1669 1740 void 1670 - bfa_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr) 1741 + bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr) 1671 1742 { 1672 1743 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr)); 1673 1744 ··· 1686 1757 /** 1687 1758 * WWN public 1688 1759 */ 1689 - u64 1760 + static u64 1690 1761 bfa_ioc_get_pwwn(struct bfa_ioc *ioc) 1691 1762 { 1692 1763 return ioc->attr->pwwn; 1693 1764 } 1694 1765 1695 - u64 1696 - bfa_ioc_get_nwwn(struct bfa_ioc *ioc) 1697 - { 1698 - return ioc->attr->nwwn; 1699 - } 1700 - 1701 - u64 1702 - bfa_ioc_get_adid(struct bfa_ioc *ioc) 1703 - { 1704 - return ioc->attr->mfg_pwwn; 1705 - } 1706 - 1707 1766 mac_t 1708 - bfa_ioc_get_mac(struct bfa_ioc *ioc) 1767 + bfa_nw_ioc_get_mac(struct bfa_ioc *ioc) 1709 1768 { 1710 1769 /* 1711 1770 * Currently mfg mac is used as FCoE enode mac (not configured by PBC) ··· 1704 1787 return ioc->attr->mac; 1705 1788 } 1706 1789 1707 - u64 1708 - bfa_ioc_get_mfg_pwwn(struct bfa_ioc *ioc) 1709 - { 1710 - return ioc->attr->mfg_pwwn; 1711 - } 1712 - 1713 - u64 1714 - bfa_ioc_get_mfg_nwwn(struct bfa_ioc *ioc) 1715 - { 1716 - return ioc->attr->mfg_nwwn; 1717 - } 1718 - 1719 - mac_t 1790 + static mac_t 1720 1791 bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc) 1721 1792 { 1722 1793 mac_t m; ··· 1717 1812 bfa_ioc_pcifn(ioc)); 1718 1813 1719 1814 return m; 1720 - } 1721 - 1722 - bool 1723 - bfa_ioc_get_fcmode(struct bfa_ioc *ioc) 1724 - { 1725 - return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id); 1726 1815 } 1727 1816 1728 1817 /**
+25 -67
drivers/net/bna/bfa_ioc.h
··· 239 239 /** 240 240 * IOC mailbox interface 241 241 */ 242 - void bfa_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd); 243 - void bfa_ioc_mbox_register(struct bfa_ioc *ioc, 244 - bfa_ioc_mbox_mcfunc_t *mcfuncs); 245 - void bfa_ioc_mbox_isr(struct bfa_ioc *ioc); 246 - void bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len); 247 - void bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg); 248 - void bfa_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, 242 + void bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd); 243 + void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc); 244 + void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, 249 245 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg); 250 246 251 247 /** ··· 252 256 ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \ 253 257 (__ioc)->fcmode)) 254 258 255 - enum bfa_status bfa_ioc_pll_init(struct bfa_ioc *ioc); 256 - enum bfa_status bfa_ioc_cb_pll_init(void __iomem *rb, bool fcmode); 257 - enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode); 258 - 259 259 #define bfa_ioc_isr_mode_set(__ioc, __msix) \ 260 260 ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)) 261 261 #define bfa_ioc_ownership_reset(__ioc) \ 262 262 ((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc)) 263 263 264 - void bfa_ioc_set_ct_hwif(struct bfa_ioc *ioc); 264 + void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc); 265 265 266 - void bfa_ioc_attach(struct bfa_ioc *ioc, void *bfa, 266 + void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, 267 267 struct bfa_ioc_cbfn *cbfn); 268 - void bfa_ioc_auto_recover(bool auto_recover); 269 - void bfa_ioc_detach(struct bfa_ioc *ioc); 270 - void bfa_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, 268 + void bfa_nw_ioc_auto_recover(bool auto_recover); 269 + void bfa_nw_ioc_detach(struct bfa_ioc *ioc); 270 + void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, 271 271 enum bfi_mclass mc); 272 - u32 bfa_ioc_meminfo(void); 273 - void bfa_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa); 274 - void bfa_ioc_enable(struct bfa_ioc *ioc); 275 - void bfa_ioc_disable(struct bfa_ioc *ioc); 276 - bool bfa_ioc_intx_claim(struct bfa_ioc *ioc); 272 + u32 bfa_nw_ioc_meminfo(void); 273 + void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa); 274 + void bfa_nw_ioc_enable(struct bfa_ioc *ioc); 275 + void bfa_nw_ioc_disable(struct bfa_ioc *ioc); 277 276 278 - void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, 279 - u32 boot_param); 280 - void bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *msg); 281 - void bfa_ioc_error_isr(struct bfa_ioc *ioc); 282 - bool bfa_ioc_is_operational(struct bfa_ioc *ioc); 283 - bool bfa_ioc_is_initialized(struct bfa_ioc *ioc); 284 - bool bfa_ioc_is_disabled(struct bfa_ioc *ioc); 285 - bool bfa_ioc_fw_mismatch(struct bfa_ioc *ioc); 286 - bool bfa_ioc_adapter_is_disabled(struct bfa_ioc *ioc); 287 - void bfa_ioc_cfg_complete(struct bfa_ioc *ioc); 288 - enum bfa_ioc_type bfa_ioc_get_type(struct bfa_ioc *ioc); 289 - void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num); 290 - void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver); 291 - void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver); 292 - void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model); 293 - void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, 294 - char *manufacturer); 295 - void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev); 296 - enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc *ioc); 277 + void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc); 278 + bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc); 297 279 298 - void bfa_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr); 299 - void bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc, 300 - struct bfa_adapter_attr *ad_attr); 301 - u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); 302 - u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr); 303 - void bfa_ioc_set_fcmode(struct bfa_ioc *ioc); 304 - bool bfa_ioc_get_fcmode(struct bfa_ioc *ioc); 305 - void bfa_ioc_hbfail_register(struct bfa_ioc *ioc, 280 + void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr); 281 + void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc, 306 282 struct bfa_ioc_hbfail_notify *notify); 307 - bool bfa_ioc_sem_get(void __iomem *sem_reg); 308 - void bfa_ioc_sem_release(void __iomem *sem_reg); 309 - void bfa_ioc_hw_sem_release(struct bfa_ioc *ioc); 310 - void bfa_ioc_fwver_get(struct bfa_ioc *ioc, 283 + bool bfa_nw_ioc_sem_get(void __iomem *sem_reg); 284 + void bfa_nw_ioc_sem_release(void __iomem *sem_reg); 285 + void bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc); 286 + void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, 311 287 struct bfi_ioc_image_hdr *fwhdr); 312 - bool bfa_ioc_fwver_cmp(struct bfa_ioc *ioc, 288 + bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, 313 289 struct bfi_ioc_image_hdr *fwhdr); 290 + mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc); 314 291 315 292 /* 316 293 * Timeout APIs 317 294 */ 318 - void bfa_ioc_timeout(void *ioc); 319 - void bfa_ioc_hb_check(void *ioc); 320 - void bfa_ioc_sem_timeout(void *ioc); 321 - 322 - /* 323 - * bfa mfg wwn API functions 324 - */ 325 - u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); 326 - u64 bfa_ioc_get_nwwn(struct bfa_ioc *ioc); 327 - mac_t bfa_ioc_get_mac(struct bfa_ioc *ioc); 328 - u64 bfa_ioc_get_mfg_pwwn(struct bfa_ioc *ioc); 329 - u64 bfa_ioc_get_mfg_nwwn(struct bfa_ioc *ioc); 330 - mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc); 331 - u64 bfa_ioc_get_adid(struct bfa_ioc *ioc); 295 + void bfa_nw_ioc_timeout(void *ioc); 296 + void bfa_nw_ioc_hb_check(void *ioc); 297 + void bfa_nw_ioc_sem_timeout(void *ioc); 332 298 333 299 /* 334 300 * F/W Image Size & Chunk
+24 -23
drivers/net/bna/bfa_ioc_ct.c
··· 32 32 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); 33 33 static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc); 34 34 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); 35 + static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode); 35 36 36 - struct bfa_ioc_hwif hwif_ct; 37 + struct bfa_ioc_hwif nw_hwif_ct; 37 38 38 39 /** 39 40 * Called from bfa_ioc_attach() to map asic specific calls. 40 41 */ 41 42 void 42 - bfa_ioc_set_ct_hwif(struct bfa_ioc *ioc) 43 + bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) 43 44 { 44 - hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init; 45 - hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock; 46 - hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock; 47 - hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; 48 - hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; 49 - hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; 50 - hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail; 51 - hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; 45 + nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init; 46 + nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock; 47 + nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock; 48 + nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; 49 + nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; 50 + nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; 51 + nw_hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail; 52 + nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; 52 53 53 - ioc->ioc_hwif = &hwif_ct; 54 + ioc->ioc_hwif = &nw_hwif_ct; 54 55 } 55 56 56 57 /** ··· 77 76 BFA_IOC_FWIMG_MINSZ) 78 77 return true; 79 78 80 - bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 79 + bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 81 80 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 82 81 83 82 /** ··· 85 84 */ 86 85 if (usecnt == 0) { 87 86 writel(1, ioc->ioc_regs.ioc_usage_reg); 88 - bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 87 + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 89 88 return true; 90 89 } 91 90 ··· 99 98 /** 100 99 * Check if another driver with a different firmware is active 101 100 */ 102 - bfa_ioc_fwver_get(ioc, &fwhdr); 103 - if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) { 104 - bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 101 + bfa_nw_ioc_fwver_get(ioc, &fwhdr); 102 + if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) { 103 + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 105 104 return false; 106 105 } 107 106 ··· 110 109 */ 111 110 usecnt++; 112 111 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 113 - bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 112 + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 114 113 return true; 115 114 } 116 115 ··· 135 134 /** 136 135 * decrement usage count 137 136 */ 138 - bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 137 + bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 139 138 usecnt = readl(ioc->ioc_regs.ioc_usage_reg); 140 139 BUG_ON(!(usecnt > 0)); 141 140 142 141 usecnt--; 143 142 writel(usecnt, ioc->ioc_regs.ioc_usage_reg); 144 143 145 - bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 144 + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 146 145 } 147 146 148 147 /** ··· 303 302 bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) 304 303 { 305 304 if (ioc->cna) { 306 - bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 305 + bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); 307 306 writel(0, ioc->ioc_regs.ioc_usage_reg); 308 - bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 307 + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 309 308 } 310 309 311 310 /* ··· 314 313 * will lock it instead of clearing it. 315 314 */ 316 315 readl(ioc->ioc_regs.ioc_sem_reg); 317 - bfa_ioc_hw_sem_release(ioc); 316 + bfa_nw_ioc_hw_sem_release(ioc); 318 317 } 319 318 320 - enum bfa_status 319 + static enum bfa_status 321 320 bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode) 322 321 { 323 322 u32 pll_sclk, pll_fclk, r32;
+18 -20
drivers/net/bna/bna_ctrl.c
··· 81 81 /* Post the next entry, if needed */ 82 82 if (to_post) { 83 83 mb_qe = bfa_q_first(&bna->mbox_mod.posted_q); 84 - bfa_ioc_mbox_queue(&bna->device.ioc, 84 + bfa_nw_ioc_mbox_queue(&bna->device.ioc, 85 85 &mb_qe->cmd); 86 86 } 87 87 } else { ··· 107 107 writel(init_halt, bna->device.ioc.ioc_regs.ll_halt); 108 108 } 109 109 110 - bfa_ioc_error_isr(&bna->device.ioc); 110 + bfa_nw_ioc_error_isr(&bna->device.ioc); 111 111 } 112 112 113 113 void ··· 118 118 return; 119 119 } 120 120 if (BNA_IS_MBOX_INTR(intr_status)) 121 - bfa_ioc_mbox_isr(&bna->device.ioc); 121 + bfa_nw_ioc_mbox_isr(&bna->device.ioc); 122 122 } 123 123 124 124 void ··· 133 133 bna->mbox_mod.msg_pending++; 134 134 if (bna->mbox_mod.state == BNA_MBOX_FREE) { 135 135 list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q); 136 - bfa_ioc_mbox_queue(&bna->device.ioc, &mbox_qe->cmd); 136 + bfa_nw_ioc_mbox_queue(&bna->device.ioc, &mbox_qe->cmd); 137 137 bna->mbox_mod.state = BNA_MBOX_POSTED; 138 138 } else { 139 139 list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q); ··· 180 180 void 181 181 bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna) 182 182 { 183 - bfa_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna); 183 + bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna); 184 184 mbox_mod->state = BNA_MBOX_FREE; 185 185 mbox_mod->msg_ctr = mbox_mod->msg_pending = 0; 186 186 INIT_LIST_HEAD(&mbox_mod->posted_q); ··· 1289 1289 void 1290 1290 bna_port_mac_get(struct bna_port *port, mac_t *mac) 1291 1291 { 1292 - *mac = bfa_ioc_get_mac(&port->bna->device.ioc); 1292 + *mac = bfa_nw_ioc_get_mac(&port->bna->device.ioc); 1293 1293 } 1294 1294 1295 1295 /** ··· 1427 1427 case DEVICE_E_ENABLE: 1428 1428 if (device->intr_type == BNA_INTR_T_MSIX) 1429 1429 bna_mbox_msix_idx_set(device); 1430 - bfa_ioc_enable(&device->ioc); 1430 + bfa_nw_ioc_enable(&device->ioc); 1431 1431 bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait); 1432 1432 break; 1433 1433 ··· 1547 1547 static void 1548 1548 bna_device_sm_ioc_disable_wait_entry(struct bna_device *device) 1549 1549 { 1550 - bfa_ioc_disable(&device->ioc); 1550 + bfa_nw_ioc_disable(&device->ioc); 1551 1551 } 1552 1552 1553 1553 static void ··· 1655 1655 * 1. DMA memory for IOC attributes 1656 1656 * 2. Kernel memory for FW trace 1657 1657 */ 1658 - bfa_ioc_attach(&device->ioc, device, &bfa_iocll_cbfn); 1659 - bfa_ioc_pci_init(&device->ioc, &bna->pcidev, BFI_MC_LL); 1658 + bfa_nw_ioc_attach(&device->ioc, device, &bfa_iocll_cbfn); 1659 + bfa_nw_ioc_pci_init(&device->ioc, &bna->pcidev, BFI_MC_LL); 1660 1660 1661 1661 BNA_GET_DMA_ADDR( 1662 1662 &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma); 1663 - bfa_ioc_mem_claim(&device->ioc, 1663 + bfa_nw_ioc_mem_claim(&device->ioc, 1664 1664 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva, 1665 1665 dma); 1666 1666 ··· 1686 1686 { 1687 1687 bna_mbox_mod_uninit(&device->bna->mbox_mod); 1688 1688 1689 - bfa_cee_detach(&device->bna->cee); 1690 - 1691 - bfa_ioc_detach(&device->ioc); 1689 + bfa_nw_ioc_detach(&device->ioc); 1692 1690 1693 1691 device->bna = NULL; 1694 1692 } ··· 1781 1783 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma); 1782 1784 kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva; 1783 1785 1784 - bfa_cee_attach(&bna->cee, &device->ioc, bna); 1785 - bfa_cee_mem_claim(&bna->cee, kva, dma); 1786 - kva += bfa_cee_meminfo(); 1787 - dma += bfa_cee_meminfo(); 1786 + bfa_nw_cee_attach(&bna->cee, &device->ioc, bna); 1787 + bfa_nw_cee_mem_claim(&bna->cee, kva, dma); 1788 + kva += bfa_nw_cee_meminfo(); 1789 + dma += bfa_nw_cee_meminfo(); 1788 1790 1789 1791 } 1790 1792 ··· 1798 1800 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA; 1799 1801 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1; 1800 1802 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN( 1801 - bfa_cee_meminfo(), PAGE_SIZE); 1803 + bfa_nw_cee_meminfo(), PAGE_SIZE); 1802 1804 1803 1805 /* Virtual memory for retreiving fw_trc */ 1804 1806 res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM; ··· 3331 3333 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA; 3332 3334 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1; 3333 3335 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len = 3334 - ALIGN(bfa_ioc_meminfo(), PAGE_SIZE); 3336 + ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE); 3335 3337 3336 3338 /* DMA memory for index segment of an IB */ 3337 3339 res_info[BNA_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
+4 -5
drivers/net/bna/bnad.c
··· 1365 1365 unsigned long flags; 1366 1366 1367 1367 spin_lock_irqsave(&bnad->bna_lock, flags); 1368 - bfa_ioc_timeout((void *) &bnad->bna.device.ioc); 1368 + bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc); 1369 1369 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1370 1370 } 1371 1371 ··· 1376 1376 unsigned long flags; 1377 1377 1378 1378 spin_lock_irqsave(&bnad->bna_lock, flags); 1379 - bfa_ioc_hb_check((void *) &bnad->bna.device.ioc); 1379 + bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc); 1380 1380 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1381 1381 } 1382 1382 ··· 1387 1387 unsigned long flags; 1388 1388 1389 1389 spin_lock_irqsave(&bnad->bna_lock, flags); 1390 - bfa_ioc_sem_timeout((void *) &bnad->bna.device.ioc); 1390 + bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc); 1391 1391 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1392 1392 } 1393 1393 ··· 3067 3067 } 3068 3068 bnad = netdev_priv(netdev); 3069 3069 3070 - 3071 3070 /* 3072 3071 * PCI initialization 3073 3072 * Output : using_dac = 1 for 64 bit DMA ··· 3238 3239 3239 3240 pr_info("Brocade 10G Ethernet driver\n"); 3240 3241 3241 - bfa_ioc_auto_recover(bnad_ioc_auto_recover); 3242 + bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover); 3242 3243 3243 3244 err = pci_register_driver(&bnad_pci_driver); 3244 3245 if (err < 0) {
+1 -1
drivers/net/bna/bnad_ethtool.c
··· 276 276 if (ioc_attr) { 277 277 memset(ioc_attr, 0, sizeof(*ioc_attr)); 278 278 spin_lock_irqsave(&bnad->bna_lock, flags); 279 - bfa_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr); 279 + bfa_nw_ioc_get_attr(&bnad->bna.device.ioc, ioc_attr); 280 280 spin_unlock_irqrestore(&bnad->bna_lock, flags); 281 281 282 282 strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,