Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bna: scope and dead code cleanup

As suggested by Stephen Hemminger:
1) Made functions and data structures static wherever possible.
2) Removed unused code.

Signed-off-by: Debashis Dutt <ddutt@brocade.com>
Signed-off-by: Rasesh Mody <rmody@brocade.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Rasesh Mody and committed by
David S. Miller
b7ee31c5 e2fa6f2e

+170 -692
+1 -7
drivers/net/bna/bfa_ioc.c
··· 65 65 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ 66 66 readl((__ioc)->ioc_regs.hfn_mbox_cmd)) 67 67 68 - bool bfa_nw_auto_recover = true; 68 + static bool bfa_nw_auto_recover = true; 69 69 70 70 /* 71 71 * forward declarations ··· 1274 1274 bfa_nw_ioc_auto_recover(bool auto_recover) 1275 1275 { 1276 1276 bfa_nw_auto_recover = auto_recover; 1277 - } 1278 - 1279 - bool 1280 - bfa_nw_ioc_is_operational(struct bfa_ioc *ioc) 1281 - { 1282 - return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); 1283 1277 } 1284 1278 1285 1279 static void
-1
drivers/net/bna/bfa_ioc.h
··· 271 271 void bfa_nw_ioc_disable(struct bfa_ioc *ioc); 272 272 273 273 void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc); 274 - bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc); 275 274 276 275 void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr); 277 276 void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
+1 -1
drivers/net/bna/bfa_ioc_ct.c
··· 34 34 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); 35 35 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode); 36 36 37 - struct bfa_ioc_hwif nw_hwif_ct; 37 + static struct bfa_ioc_hwif nw_hwif_ct; 38 38 39 39 /** 40 40 * Called from bfa_ioc_attach() to map asic specific calls.
+1 -1
drivers/net/bna/bfa_sm.h
··· 77 77 ((_fsm)->fsm == (bfa_fsm_t)(_state)) 78 78 79 79 static inline int 80 - bfa_sm_to_state(struct bfa_sm_table *smt, bfa_sm_t sm) 80 + bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm) 81 81 { 82 82 int i = 0; 83 83
+2 -106
drivers/net/bna/bna.h
··· 19 19 #include "bfi_ll.h" 20 20 #include "bna_types.h" 21 21 22 - extern u32 bna_dim_vector[][BNA_BIAS_T_MAX]; 23 - extern u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX]; 22 + extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX]; 24 23 25 24 /** 26 25 * ··· 343 344 * BNA 344 345 */ 345 346 346 - /* Internal APIs */ 347 - void bna_adv_res_req(struct bna_res_info *res_info); 348 - 349 347 /* APIs for BNAD */ 350 348 void bna_res_req(struct bna_res_info *res_info); 351 349 void bna_init(struct bna *bna, struct bnad *bnad, ··· 350 354 struct bna_res_info *res_info); 351 355 void bna_uninit(struct bna *bna); 352 356 void bna_stats_get(struct bna *bna); 353 - void bna_stats_clr(struct bna *bna); 354 357 void bna_get_perm_mac(struct bna *bna, u8 *mac); 355 358 356 359 /* APIs for Rx */ ··· 371 376 * DEVICE 372 377 */ 373 378 374 - /* Interanl APIs */ 375 - void bna_adv_device_init(struct bna_device *device, struct bna *bna, 376 - struct bna_res_info *res_info); 377 - 378 - /* APIs for BNA */ 379 - void bna_device_init(struct bna_device *device, struct bna *bna, 380 - struct bna_res_info *res_info); 381 - void bna_device_uninit(struct bna_device *device); 382 - void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status); 383 - int bna_device_status_get(struct bna_device *device); 384 - int bna_device_state_get(struct bna_device *device); 385 - 386 379 /* APIs for BNAD */ 387 380 void bna_device_enable(struct bna_device *device); 388 381 void bna_device_disable(struct bna_device *device, ··· 380 397 * MBOX 381 398 */ 382 399 383 - /* APIs for DEVICE */ 384 - void bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna); 385 - void bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod); 386 - void bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod); 387 - void bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod); 388 - 389 400 /* APIs for PORT, TX, RX */ 390 401 void bna_mbox_handler(struct bna *bna, u32 intr_status); 391 402 void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe); ··· 387 410 /** 388 411 * PORT 389 412 */ 390 - 391 - /* APIs for BNA */ 392 - void bna_port_init(struct bna_port *port, struct bna *bna); 393 - void bna_port_uninit(struct bna_port *port); 394 - int bna_port_state_get(struct bna_port *port); 395 - int bna_llport_state_get(struct bna_llport *llport); 396 - 397 - /* APIs for DEVICE */ 398 - void bna_port_start(struct bna_port *port); 399 - void bna_port_stop(struct bna_port *port); 400 - void bna_port_fail(struct bna_port *port); 401 413 402 414 /* API for RX */ 403 415 int bna_port_mtu_get(struct bna_port *port); ··· 403 437 void bna_port_mtu_set(struct bna_port *port, int mtu, 404 438 void (*cbfn)(struct bnad *, enum bna_cb_status)); 405 439 void bna_port_mac_get(struct bna_port *port, mac_t *mac); 406 - void bna_port_type_set(struct bna_port *port, enum bna_port_type type); 407 - void bna_port_linkcbfn_set(struct bna_port *port, 408 - void (*linkcbfn)(struct bnad *, 409 - enum bna_link_status)); 410 - void bna_port_admin_up(struct bna_port *port); 411 - void bna_port_admin_down(struct bna_port *port); 412 440 413 441 /* Callbacks for TX, RX */ 414 442 void bna_port_cb_tx_stopped(struct bna_port *port, 415 443 enum bna_cb_status status); 416 444 void bna_port_cb_rx_stopped(struct bna_port *port, 417 445 enum bna_cb_status status); 418 - 419 - /* Callbacks for MBOX */ 420 - void bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen, 421 - int status); 422 - void bna_port_cb_link_down(struct bna_port *port, int status); 423 446 424 447 /** 425 448 * IB ··· 419 464 struct bna_res_info *res_info); 420 465 void bna_ib_mod_uninit(struct bna_ib_mod *ib_mod); 421 466 422 - /* APIs for TX, RX */ 423 - struct bna_ib *bna_ib_get(struct bna_ib_mod *ib_mod, 424 - enum bna_intr_type intr_type, int vector); 425 - void bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib); 426 - int bna_ib_reserve_idx(struct bna_ib *ib); 427 - void bna_ib_release_idx(struct bna_ib *ib, int idx); 428 - int bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config); 429 - void bna_ib_start(struct bna_ib *ib); 430 - void bna_ib_stop(struct bna_ib *ib); 431 - void bna_ib_fail(struct bna_ib *ib); 432 - void bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo); 433 - 434 467 /** 435 468 * TX MODULE AND TX 436 469 */ 437 - 438 - /* Internal APIs */ 439 - void bna_tx_prio_changed(struct bna_tx *tx, int prio); 440 470 441 471 /* APIs for BNA */ 442 472 void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna, ··· 448 508 void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, 449 509 void (*cbfn)(void *, struct bna_tx *, 450 510 enum bna_cb_status)); 451 - enum bna_cb_status 452 - bna_tx_prio_set(struct bna_tx *tx, int prio, 453 - void (*cbfn)(struct bnad *, struct bna_tx *, 454 - enum bna_cb_status)); 455 511 void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo); 456 512 457 513 /** ··· 500 564 void (*cbfn)(void *, struct bna_rx *, 501 565 enum bna_cb_status)); 502 566 void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo); 503 - void bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX]); 567 + void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]); 504 568 void bna_rx_dim_update(struct bna_ccb *ccb); 505 569 enum bna_cb_status 506 570 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac, 507 - void (*cbfn)(struct bnad *, struct bna_rx *, 508 - enum bna_cb_status)); 509 - enum bna_cb_status 510 - bna_rx_ucast_add(struct bna_rx *rx, u8* ucmac, 511 - void (*cbfn)(struct bnad *, struct bna_rx *, 512 - enum bna_cb_status)); 513 - enum bna_cb_status 514 - bna_rx_ucast_del(struct bna_rx *rx, u8 *ucmac, 515 571 void (*cbfn)(struct bnad *, struct bna_rx *, 516 572 enum bna_cb_status)); 517 573 enum bna_cb_status ··· 511 583 void (*cbfn)(struct bnad *, struct bna_rx *, 512 584 enum bna_cb_status)); 513 585 enum bna_cb_status 514 - bna_rx_mcast_del(struct bna_rx *rx, u8 *mcmac, 515 - void (*cbfn)(struct bnad *, struct bna_rx *, 516 - enum bna_cb_status)); 517 - enum bna_cb_status 518 586 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mcmac, 519 587 void (*cbfn)(struct bnad *, struct bna_rx *, 520 588 enum bna_cb_status)); 521 - void bna_rx_mcast_delall(struct bna_rx *rx, 522 - void (*cbfn)(struct bnad *, struct bna_rx *, 523 - enum bna_cb_status)); 524 589 enum bna_cb_status 525 590 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode, 526 591 enum bna_rxmode bitmask, ··· 522 601 void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id); 523 602 void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id); 524 603 void bna_rx_vlanfilter_enable(struct bna_rx *rx); 525 - void bna_rx_vlanfilter_disable(struct bna_rx *rx); 526 - void bna_rx_rss_enable(struct bna_rx *rx); 527 - void bna_rx_rss_disable(struct bna_rx *rx); 528 - void bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config); 529 - void bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors, 530 - int nvectors); 531 604 void bna_rx_hds_enable(struct bna_rx *rx, struct bna_rxf_hds *hds_config, 532 605 void (*cbfn)(struct bnad *, struct bna_rx *, 533 606 enum bna_cb_status)); 534 607 void bna_rx_hds_disable(struct bna_rx *rx, 535 608 void (*cbfn)(struct bnad *, struct bna_rx *, 536 609 enum bna_cb_status)); 537 - void bna_rx_receive_pause(struct bna_rx *rx, 538 - void (*cbfn)(struct bnad *, struct bna_rx *, 539 - enum bna_cb_status)); 540 - void bna_rx_receive_resume(struct bna_rx *rx, 541 - void (*cbfn)(struct bnad *, struct bna_rx *, 542 - enum bna_cb_status)); 543 - 544 - /* RxF APIs for RX */ 545 - void bna_rxf_start(struct bna_rxf *rxf); 546 - void bna_rxf_stop(struct bna_rxf *rxf); 547 - void bna_rxf_fail(struct bna_rxf *rxf); 548 - void bna_rxf_init(struct bna_rxf *rxf, struct bna_rx *rx, 549 - struct bna_rx_config *q_config); 550 - void bna_rxf_uninit(struct bna_rxf *rxf); 551 - 552 - /* Callback from RXF to RX */ 553 - void bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status); 554 - void bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status); 555 610 556 611 /** 557 612 * BNAD ··· 536 639 /* Callbacks for BNA */ 537 640 void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, 538 641 struct bna_stats *stats); 539 - void bnad_cb_stats_clr(struct bnad *bnad); 540 642 541 643 /* Callbacks for DEVICE */ 542 644 void bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status);
+98 -461
drivers/net/bna/bna_ctrl.c
··· 19 19 #include "bfa_sm.h" 20 20 #include "bfa_wc.h" 21 21 22 + static void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status); 23 + 24 + static void 25 + bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen, 26 + int status) 27 + { 28 + int i; 29 + u8 prio_map; 30 + 31 + port->llport.link_status = BNA_LINK_UP; 32 + if (aen->cee_linkup) 33 + port->llport.link_status = BNA_CEE_UP; 34 + 35 + /* Compute the priority */ 36 + prio_map = aen->prio_map; 37 + if (prio_map) { 38 + for (i = 0; i < 8; i++) { 39 + if ((prio_map >> i) & 0x1) 40 + break; 41 + } 42 + port->priority = i; 43 + } else 44 + port->priority = 0; 45 + 46 + /* Dispatch events */ 47 + bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup); 48 + bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority); 49 + port->link_cbfn(port->bna->bnad, port->llport.link_status); 50 + } 51 + 52 + static void 53 + bna_port_cb_link_down(struct bna_port *port, int status) 54 + { 55 + port->llport.link_status = BNA_LINK_DOWN; 56 + 57 + /* Dispatch events */ 58 + bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN); 59 + port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN); 60 + } 61 + 22 62 /** 23 63 * MBOX 24 64 */ ··· 136 96 bna_mbox_aen_callback(bna, msg); 137 97 } 138 98 139 - void 99 + static void 140 100 bna_err_handler(struct bna *bna, u32 intr_status) 141 101 { 142 102 u32 init_halt; ··· 180 140 } 181 141 } 182 142 183 - void 143 + static void 184 144 bna_mbox_flush_q(struct bna *bna, struct list_head *q) 185 145 { 186 146 struct bna_mbox_qe *mb_qe = NULL; ··· 206 166 bna->mbox_mod.state = BNA_MBOX_FREE; 207 167 } 208 168 209 - void 169 + static void 210 170 bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod) 211 171 { 212 172 } 213 173 214 - void 174 + static void 215 175 bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod) 216 176 { 217 177 bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q); 218 178 } 219 179 220 - void 180 + static void 221 181 bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna) 222 182 { 223 183 bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna); ··· 227 187 mbox_mod->bna = bna; 228 188 } 229 189 230 - void 190 + static void 231 191 bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod) 232 192 { 233 193 mbox_mod->bna = NULL; ··· 578 538 bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN); 579 539 } 580 540 581 - void 541 + static void 582 542 bna_port_cb_llport_stopped(struct bna_port *port, 583 543 enum bna_cb_status status) 584 544 { ··· 631 591 bfa_fsm_send_event(llport, LLPORT_E_FAIL); 632 592 } 633 593 634 - int 594 + static int 635 595 bna_llport_state_get(struct bna_llport *llport) 636 596 { 637 597 return bfa_sm_to_state(llport_sm_table, llport->fsm); ··· 1149 1109 bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED); 1150 1110 } 1151 1111 1152 - void 1112 + static void 1153 1113 bna_port_init(struct bna_port *port, struct bna *bna) 1154 1114 { 1155 1115 port->bna = bna; ··· 1177 1137 bna_llport_init(&port->llport, bna); 1178 1138 } 1179 1139 1180 - void 1140 + static void 1181 1141 bna_port_uninit(struct bna_port *port) 1182 1142 { 1183 1143 bna_llport_uninit(&port->llport); ··· 1187 1147 port->bna = NULL; 1188 1148 } 1189 1149 1190 - int 1150 + static int 1191 1151 bna_port_state_get(struct bna_port *port) 1192 1152 { 1193 1153 return bfa_sm_to_state(port_sm_table, port->fsm); 1194 1154 } 1195 1155 1196 - void 1156 + static void 1197 1157 bna_port_start(struct bna_port *port) 1198 1158 { 1199 1159 port->flags |= BNA_PORT_F_DEVICE_READY; ··· 1201 1161 bfa_fsm_send_event(port, PORT_E_START); 1202 1162 } 1203 1163 1204 - void 1164 + static void 1205 1165 bna_port_stop(struct bna_port *port) 1206 1166 { 1207 1167 port->stop_cbfn = bna_device_cb_port_stopped; ··· 1211 1171 bfa_fsm_send_event(port, PORT_E_STOP); 1212 1172 } 1213 1173 1214 - void 1174 + static void 1215 1175 bna_port_fail(struct bna_port *port) 1216 1176 { 1217 1177 port->flags &= ~BNA_PORT_F_DEVICE_READY; ··· 1228 1188 bna_port_cb_rx_stopped(struct bna_port *port, enum bna_cb_status status) 1229 1189 { 1230 1190 bfa_wc_down(&port->chld_stop_wc); 1231 - } 1232 - 1233 - void 1234 - bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen, 1235 - int status) 1236 - { 1237 - int i; 1238 - u8 prio_map; 1239 - 1240 - port->llport.link_status = BNA_LINK_UP; 1241 - if (aen->cee_linkup) 1242 - port->llport.link_status = BNA_CEE_UP; 1243 - 1244 - /* Compute the priority */ 1245 - prio_map = aen->prio_map; 1246 - if (prio_map) { 1247 - for (i = 0; i < 8; i++) { 1248 - if ((prio_map >> i) & 0x1) 1249 - break; 1250 - } 1251 - port->priority = i; 1252 - } else 1253 - port->priority = 0; 1254 - 1255 - /* Dispatch events */ 1256 - bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup); 1257 - bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority); 1258 - port->link_cbfn(port->bna->bnad, port->llport.link_status); 1259 - } 1260 - 1261 - void 1262 - bna_port_cb_link_down(struct bna_port *port, int status) 1263 - { 1264 - port->llport.link_status = BNA_LINK_DOWN; 1265 - 1266 - /* Dispatch events */ 1267 - bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN); 1268 - port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN); 1269 1191 } 1270 1192 1271 1193 int ··· 1295 1293 } 1296 1294 1297 1295 /** 1298 - * Should be called only when port is disabled 1299 - */ 1300 - void 1301 - bna_port_type_set(struct bna_port *port, enum bna_port_type type) 1302 - { 1303 - port->type = type; 1304 - port->llport.type = type; 1305 - } 1306 - 1307 - /** 1308 - * Should be called only when port is disabled 1309 - */ 1310 - void 1311 - bna_port_linkcbfn_set(struct bna_port *port, 1312 - void (*linkcbfn)(struct bnad *, enum bna_link_status)) 1313 - { 1314 - port->link_cbfn = linkcbfn; 1315 - } 1316 - 1317 - void 1318 - bna_port_admin_up(struct bna_port *port) 1319 - { 1320 - struct bna_llport *llport = &port->llport; 1321 - 1322 - if (llport->flags & BNA_LLPORT_F_ENABLED) 1323 - return; 1324 - 1325 - llport->flags |= BNA_LLPORT_F_ENABLED; 1326 - 1327 - if (llport->flags & BNA_LLPORT_F_RX_ENABLED) 1328 - bfa_fsm_send_event(llport, LLPORT_E_UP); 1329 - } 1330 - 1331 - void 1332 - bna_port_admin_down(struct bna_port *port) 1333 - { 1334 - struct bna_llport *llport = &port->llport; 1335 - 1336 - if (!(llport->flags & BNA_LLPORT_F_ENABLED)) 1337 - return; 1338 - 1339 - llport->flags &= ~BNA_LLPORT_F_ENABLED; 1340 - 1341 - if (llport->flags & BNA_LLPORT_F_RX_ENABLED) 1342 - bfa_fsm_send_event(llport, LLPORT_E_DOWN); 1343 - } 1344 - 1345 - /** 1346 1296 * DEVICE 1347 1297 */ 1348 1298 #define enable_mbox_intr(_device)\ ··· 1311 1357 bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\ 1312 1358 } while (0) 1313 1359 1314 - const struct bna_chip_regs_offset reg_offset[] = 1360 + static const struct bna_chip_regs_offset reg_offset[] = 1315 1361 {{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS, 1316 1362 HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0}, 1317 1363 {HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS, ··· 1596 1642 bna_device_cb_iocll_reset 1597 1643 }; 1598 1644 1599 - void 1645 + /* device */ 1646 + static void 1647 + bna_adv_device_init(struct bna_device *device, struct bna *bna, 1648 + struct bna_res_info *res_info) 1649 + { 1650 + u8 *kva; 1651 + u64 dma; 1652 + 1653 + device->bna = bna; 1654 + 1655 + kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva; 1656 + 1657 + /** 1658 + * Attach common modules (Diag, SFP, CEE, Port) and claim respective 1659 + * DMA memory. 1660 + */ 1661 + BNA_GET_DMA_ADDR( 1662 + &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma); 1663 + kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva; 1664 + 1665 + bfa_nw_cee_attach(&bna->cee, &device->ioc, bna); 1666 + bfa_nw_cee_mem_claim(&bna->cee, kva, dma); 1667 + kva += bfa_nw_cee_meminfo(); 1668 + dma += bfa_nw_cee_meminfo(); 1669 + 1670 + } 1671 + 1672 + static void 1600 1673 bna_device_init(struct bna_device *device, struct bna *bna, 1601 1674 struct bna_res_info *res_info) 1602 1675 { ··· 1662 1681 bfa_fsm_set_state(device, bna_device_sm_stopped); 1663 1682 } 1664 1683 1665 - void 1684 + static void 1666 1685 bna_device_uninit(struct bna_device *device) 1667 1686 { 1668 1687 bna_mbox_mod_uninit(&device->bna->mbox_mod); ··· 1672 1691 device->bna = NULL; 1673 1692 } 1674 1693 1675 - void 1694 + static void 1676 1695 bna_device_cb_port_stopped(void *arg, enum bna_cb_status status) 1677 1696 { 1678 1697 struct bna_device *device = (struct bna_device *)arg; ··· 1680 1699 bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED); 1681 1700 } 1682 1701 1683 - int 1702 + static int 1684 1703 bna_device_status_get(struct bna_device *device) 1685 1704 { 1686 1705 return device->fsm == (bfa_fsm_t)bna_device_sm_ready; ··· 1714 1733 bfa_fsm_send_event(device, DEVICE_E_DISABLE); 1715 1734 } 1716 1735 1717 - int 1736 + static int 1718 1737 bna_device_state_get(struct bna_device *device) 1719 1738 { 1720 1739 return bfa_sm_to_state(device_sm_table, device->fsm); 1721 1740 } 1722 1741 1723 - u32 bna_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { 1724 - {12, 20}, 1725 - {10, 18}, 1726 - {8, 16}, 1727 - {6, 12}, 1728 - {4, 8}, 1729 - {3, 6}, 1730 - {2, 4}, 1731 - {1, 2}, 1732 - }; 1733 - 1734 - u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { 1742 + const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { 1735 1743 {12, 12}, 1736 1744 {6, 10}, 1737 1745 {5, 10}, ··· 1731 1761 {1, 2}, 1732 1762 }; 1733 1763 1734 - /* device */ 1735 - void 1736 - bna_adv_device_init(struct bna_device *device, struct bna *bna, 1737 - struct bna_res_info *res_info) 1738 - { 1739 - u8 *kva; 1740 - u64 dma; 1741 - 1742 - device->bna = bna; 1743 - 1744 - kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva; 1745 - 1746 - /** 1747 - * Attach common modules (Diag, SFP, CEE, Port) and claim respective 1748 - * DMA memory. 1749 - */ 1750 - BNA_GET_DMA_ADDR( 1751 - &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma); 1752 - kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva; 1753 - 1754 - bfa_nw_cee_attach(&bna->cee, &device->ioc, bna); 1755 - bfa_nw_cee_mem_claim(&bna->cee, kva, dma); 1756 - kva += bfa_nw_cee_meminfo(); 1757 - dma += bfa_nw_cee_meminfo(); 1758 - 1759 - } 1760 - 1761 1764 /* utils */ 1762 1765 1763 - void 1766 + static void 1764 1767 bna_adv_res_req(struct bna_res_info *res_info) 1765 1768 { 1766 1769 /* DMA memory for COMMON_MODULE */ ··· 1987 2044 bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1]; 1988 2045 } 1989 2046 1990 - static void 1991 - bna_fw_cb_stats_clr(void *arg, int status) 1992 - { 1993 - struct bna *bna = (struct bna *)arg; 1994 - 1995 - bfa_q_qe_init(&bna->mbox_qe.qe); 1996 - 1997 - memset(bna->stats.sw_stats, 0, sizeof(struct bna_sw_stats)); 1998 - memset(bna->stats.hw_stats, 0, sizeof(struct bfi_ll_stats)); 1999 - 2000 - bnad_cb_stats_clr(bna->bnad); 2001 - } 2002 - 2003 - static void 2004 - bna_fw_stats_clr(struct bna *bna) 2005 - { 2006 - struct bfi_ll_stats_req ll_req; 2007 - 2008 - bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0); 2009 - ll_req.stats_mask = htons(BFI_LL_STATS_ALL); 2010 - ll_req.rxf_id_mask[0] = htonl(0xffffffff); 2011 - ll_req.rxf_id_mask[1] = htonl(0xffffffff); 2012 - ll_req.txf_id_mask[0] = htonl(0xffffffff); 2013 - ll_req.txf_id_mask[1] = htonl(0xffffffff); 2014 - 2015 - bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req), 2016 - bna_fw_cb_stats_clr, bna); 2017 - bna_mbox_send(bna, &bna->mbox_qe); 2018 - } 2019 - 2020 2047 void 2021 2048 bna_stats_get(struct bna *bna) 2022 2049 { ··· 1996 2083 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats); 1997 2084 } 1998 2085 1999 - void 2000 - bna_stats_clr(struct bna *bna) 2001 - { 2002 - if (bna_device_status_get(&bna->device)) 2003 - bna_fw_stats_clr(bna); 2004 - else { 2005 - memset(&bna->stats.sw_stats, 0, 2006 - sizeof(struct bna_sw_stats)); 2007 - memset(bna->stats.hw_stats, 0, 2008 - sizeof(struct bfi_ll_stats)); 2009 - bnad_cb_stats_clr(bna->bnad); 2010 - } 2011 - } 2012 - 2013 2086 /* IB */ 2014 - void 2087 + static void 2015 2088 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) 2016 2089 { 2017 2090 ib->ib_config.coalescing_timeo = coalescing_timeo; ··· 2056 2157 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); 2057 2158 } 2058 2159 2059 - void 2160 + static void 2060 2161 __rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status) 2061 2162 { 2062 2163 struct bna_rx_fndb_ram *rx_fndb_ram; ··· 2452 2553 * 0 = no h/w change 2453 2554 * 1 = need h/w change 2454 2555 */ 2455 - int 2556 + static int 2456 2557 rxf_promisc_enable(struct bna_rxf *rxf) 2457 2558 { 2458 2559 struct bna *bna = rxf->rx->bna; ··· 2483 2584 * 0 = no h/w change 2484 2585 * 1 = need h/w change 2485 2586 */ 2486 - int 2587 + static int 2487 2588 rxf_promisc_disable(struct bna_rxf *rxf) 2488 2589 { 2489 2590 struct bna *bna = rxf->rx->bna; ··· 2522 2623 * 0 = no h/w change 2523 2624 * 1 = need h/w change 2524 2625 */ 2525 - int 2626 + static int 2526 2627 rxf_default_enable(struct bna_rxf *rxf) 2527 2628 { 2528 2629 struct bna *bna = rxf->rx->bna; ··· 2553 2654 * 0 = no h/w change 2554 2655 * 1 = need h/w change 2555 2656 */ 2556 - int 2657 + static int 2557 2658 rxf_default_disable(struct bna_rxf *rxf) 2558 2659 { 2559 2660 struct bna *bna = rxf->rx->bna; ··· 2592 2693 * 0 = no h/w change 2593 2694 * 1 = need h/w change 2594 2695 */ 2595 - int 2696 + static int 2596 2697 rxf_allmulti_enable(struct bna_rxf *rxf) 2597 2698 { 2598 2699 int ret = 0; ··· 2620 2721 * 0 = no h/w change 2621 2722 * 1 = need h/w change 2622 2723 */ 2623 - int 2724 + static int 2624 2725 rxf_allmulti_disable(struct bna_rxf *rxf) 2625 2726 { 2626 2727 int ret = 0; ··· 2642 2743 } 2643 2744 2644 2745 return ret; 2645 - } 2646 - 2647 - /* RxF <- bnad */ 2648 - void 2649 - bna_rx_mcast_delall(struct bna_rx *rx, 2650 - void (*cbfn)(struct bnad *, struct bna_rx *, 2651 - enum bna_cb_status)) 2652 - { 2653 - struct bna_rxf *rxf = &rx->rxf; 2654 - struct list_head *qe; 2655 - struct bna_mac *mac; 2656 - int need_hw_config = 0; 2657 - 2658 - /* Purge all entries from pending_add_q */ 2659 - while (!list_empty(&rxf->mcast_pending_add_q)) { 2660 - bfa_q_deq(&rxf->mcast_pending_add_q, &qe); 2661 - mac = (struct bna_mac *)qe; 2662 - bfa_q_qe_init(&mac->qe); 2663 - bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); 2664 - } 2665 - 2666 - /* Schedule all entries in active_q for deletion */ 2667 - while (!list_empty(&rxf->mcast_active_q)) { 2668 - bfa_q_deq(&rxf->mcast_active_q, &qe); 2669 - mac = (struct bna_mac *)qe; 2670 - bfa_q_qe_init(&mac->qe); 2671 - list_add_tail(&mac->qe, &rxf->mcast_pending_del_q); 2672 - need_hw_config = 1; 2673 - } 2674 - 2675 - if (need_hw_config) { 2676 - rxf->cam_fltr_cbfn = cbfn; 2677 - rxf->cam_fltr_cbarg = rx->bna->bnad; 2678 - bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); 2679 - return; 2680 - } 2681 - 2682 - if (cbfn) 2683 - (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); 2684 - } 2685 - 2686 - /* RxF <- Rx */ 2687 - void 2688 - bna_rx_receive_resume(struct bna_rx *rx, 2689 - void (*cbfn)(struct bnad *, struct bna_rx *, 2690 - enum bna_cb_status)) 2691 - { 2692 - struct bna_rxf *rxf = &rx->rxf; 2693 - 2694 - if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED) { 2695 - rxf->oper_state_cbfn = cbfn; 2696 - rxf->oper_state_cbarg = rx->bna->bnad; 2697 - bfa_fsm_send_event(rxf, RXF_E_RESUME); 2698 - } else if (cbfn) 2699 - (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); 2700 - } 2701 - 2702 - void 2703 - bna_rx_receive_pause(struct bna_rx *rx, 2704 - void (*cbfn)(struct bnad *, struct bna_rx *, 2705 - enum bna_cb_status)) 2706 - { 2707 - struct bna_rxf *rxf = &rx->rxf; 2708 - 2709 - if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_RUNNING) { 2710 - rxf->oper_state_cbfn = cbfn; 2711 - rxf->oper_state_cbarg = rx->bna->bnad; 2712 - bfa_fsm_send_event(rxf, RXF_E_PAUSE); 2713 - } else if (cbfn) 2714 - (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); 2715 - } 2716 - 2717 - /* RxF <- bnad */ 2718 - enum bna_cb_status 2719 - bna_rx_ucast_add(struct bna_rx *rx, u8 *addr, 2720 - void (*cbfn)(struct bnad *, struct bna_rx *, 2721 - enum bna_cb_status)) 2722 - { 2723 - struct bna_rxf *rxf = &rx->rxf; 2724 - struct list_head *qe; 2725 - struct bna_mac *mac; 2726 - 2727 - /* Check if already added */ 2728 - list_for_each(qe, &rxf->ucast_active_q) { 2729 - mac = (struct bna_mac *)qe; 2730 - if (BNA_MAC_IS_EQUAL(mac->addr, addr)) { 2731 - if (cbfn) 2732 - (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); 2733 - return BNA_CB_SUCCESS; 2734 - } 2735 - } 2736 - 2737 - /* Check if pending addition */ 2738 - list_for_each(qe, &rxf->ucast_pending_add_q) { 2739 - mac = (struct bna_mac *)qe; 2740 - if (BNA_MAC_IS_EQUAL(mac->addr, addr)) { 2741 - if (cbfn) 2742 - (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); 2743 - return BNA_CB_SUCCESS; 2744 - } 2745 - } 2746 - 2747 - mac = bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod); 2748 - if (mac == NULL) 2749 - return BNA_CB_UCAST_CAM_FULL; 2750 - bfa_q_qe_init(&mac->qe); 2751 - memcpy(mac->addr, addr, ETH_ALEN); 2752 - list_add_tail(&mac->qe, &rxf->ucast_pending_add_q); 2753 - 2754 - rxf->cam_fltr_cbfn = cbfn; 2755 - rxf->cam_fltr_cbarg = rx->bna->bnad; 2756 - 2757 - bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); 2758 - 2759 - return BNA_CB_SUCCESS; 2760 - } 2761 - 2762 - /* RxF <- bnad */ 2763 - enum bna_cb_status 2764 - bna_rx_ucast_del(struct bna_rx *rx, u8 *addr, 2765 - void (*cbfn)(struct bnad *, struct bna_rx *, 2766 - enum bna_cb_status)) 2767 - { 2768 - struct bna_rxf *rxf = &rx->rxf; 2769 - struct list_head *qe; 2770 - struct bna_mac *mac; 2771 - 2772 - list_for_each(qe, &rxf->ucast_pending_add_q) { 2773 - mac = (struct bna_mac *)qe; 2774 - if (BNA_MAC_IS_EQUAL(mac->addr, addr)) { 2775 - list_del(qe); 2776 - bfa_q_qe_init(qe); 2777 - bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); 2778 - if (cbfn) 2779 - (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); 2780 - return BNA_CB_SUCCESS; 2781 - } 2782 - } 2783 - 2784 - list_for_each(qe, &rxf->ucast_active_q) { 2785 - mac = (struct bna_mac *)qe; 2786 - if (BNA_MAC_IS_EQUAL(mac->addr, addr)) { 2787 - list_del(qe); 2788 - bfa_q_qe_init(qe); 2789 - list_add_tail(qe, &rxf->ucast_pending_del_q); 2790 - rxf->cam_fltr_cbfn = cbfn; 2791 - rxf->cam_fltr_cbarg = rx->bna->bnad; 2792 - bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); 2793 - return BNA_CB_SUCCESS; 2794 - } 2795 - } 2796 - 2797 - return BNA_CB_INVALID_MAC; 2798 2746 } 2799 2747 2800 2748 /* RxF <- bnad */ ··· 2724 2978 return BNA_CB_FAIL; 2725 2979 } 2726 2980 2727 - /* RxF <- bnad */ 2728 - void 2729 - bna_rx_rss_enable(struct bna_rx *rx) 2730 - { 2731 - struct bna_rxf *rxf = &rx->rxf; 2732 - 2733 - rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING; 2734 - rxf->rss_status = BNA_STATUS_T_ENABLED; 2735 - bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); 2736 - } 2737 - 2738 - /* RxF <- bnad */ 2739 - void 2740 - bna_rx_rss_disable(struct bna_rx *rx) 2741 - { 2742 - struct bna_rxf *rxf = &rx->rxf; 2743 - 2744 - rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING; 2745 - rxf->rss_status = BNA_STATUS_T_DISABLED; 2746 - bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); 2747 - } 2748 - 2749 - /* RxF <- bnad */ 2750 - void 2751 - bna_rx_rss_reconfig(struct bna_rx *rx, struct bna_rxf_rss *rss_config) 2752 - { 2753 - struct bna_rxf *rxf = &rx->rxf; 2754 - rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING; 2755 - rxf->rss_status = BNA_STATUS_T_ENABLED; 2756 - rxf->rss_cfg = *rss_config; 2757 - bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); 2758 - } 2759 - 2760 2981 void 2761 2982 /* RxF <- bnad */ 2762 2983 bna_rx_vlanfilter_enable(struct bna_rx *rx) ··· 2737 3024 } 2738 3025 } 2739 3026 2740 - /* RxF <- bnad */ 2741 - void 2742 - bna_rx_vlanfilter_disable(struct bna_rx *rx) 2743 - { 2744 - struct bna_rxf *rxf = &rx->rxf; 2745 - 2746 - if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { 2747 - rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING; 2748 - rxf->vlan_filter_status = BNA_STATUS_T_DISABLED; 2749 - bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); 2750 - } 2751 - } 2752 - 2753 3027 /* Rx */ 2754 - 2755 - struct bna_rxp * 2756 - bna_rx_get_rxp(struct bna_rx *rx, int vector) 2757 - { 2758 - struct bna_rxp *rxp; 2759 - struct list_head *qe; 2760 - 2761 - list_for_each(qe, &rx->rxp_q) { 2762 - rxp = (struct bna_rxp *)qe; 2763 - if (rxp->vector == vector) 2764 - return rxp; 2765 - } 2766 - return NULL; 2767 - } 2768 - 2769 - /* 2770 - * bna_rx_rss_rit_set() 2771 - * Sets the Q ids for the specified msi-x vectors in the RIT. 2772 - * Maximum rit size supported is 64, which should be the max size of the 2773 - * vectors array. 2774 - */ 2775 - 2776 - void 2777 - bna_rx_rss_rit_set(struct bna_rx *rx, unsigned int *vectors, int nvectors) 2778 - { 2779 - int i; 2780 - struct bna_rxp *rxp; 2781 - struct bna_rxq *q0 = NULL, *q1 = NULL; 2782 - struct bna *bna; 2783 - struct bna_rxf *rxf; 2784 - 2785 - /* Build the RIT contents for this RX */ 2786 - bna = rx->bna; 2787 - 2788 - rxf = &rx->rxf; 2789 - for (i = 0; i < nvectors; i++) { 2790 - rxp = bna_rx_get_rxp(rx, vectors[i]); 2791 - 2792 - GET_RXQS(rxp, q0, q1); 2793 - rxf->rit_segment->rit[i].large_rxq_id = q0->rxq_id; 2794 - rxf->rit_segment->rit[i].small_rxq_id = (q1 ? q1->rxq_id : 0); 2795 - } 2796 - 2797 - rxf->rit_segment->rit_size = nvectors; 2798 - 2799 - /* Subsequent call to enable/reconfig RSS will update the RIT in h/w */ 2800 - } 2801 3028 2802 3029 /* Rx <- bnad */ 2803 3030 void ··· 2755 3102 2756 3103 /* Rx <- bnad */ 2757 3104 void 2758 - bna_rx_dim_reconfig(struct bna *bna, u32 vector[][BNA_BIAS_T_MAX]) 3105 + bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]) 2759 3106 { 2760 3107 int i, j; 2761 3108 ··· 2817 3164 } 2818 3165 2819 3166 /* Tx */ 2820 - /* TX <- bnad */ 2821 - enum bna_cb_status 2822 - bna_tx_prio_set(struct bna_tx *tx, int prio, 2823 - void (*cbfn)(struct bnad *, struct bna_tx *, 2824 - enum bna_cb_status)) 2825 - { 2826 - if (tx->flags & BNA_TX_F_PRIO_LOCK) 2827 - return BNA_CB_FAIL; 2828 - else { 2829 - tx->prio_change_cbfn = cbfn; 2830 - bna_tx_prio_changed(tx, prio); 2831 - } 2832 - 2833 - return BNA_CB_SUCCESS; 2834 - } 2835 - 2836 3167 /* TX <- bnad */ 2837 3168 void 2838 3169 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
-1
drivers/net/bna/bna_hw.h
··· 1282 1282 u32 fn_int_mask; 1283 1283 u32 msix_idx; 1284 1284 }; 1285 - extern const struct bna_chip_regs_offset reg_offset[]; 1286 1285 1287 1286 struct bna_chip_regs { 1288 1287 void __iomem *page_addr;
+56 -93
drivers/net/bna/bna_txrx.c
··· 195 195 ib_mod->bna = NULL; 196 196 } 197 197 198 - struct bna_ib * 198 + static struct bna_ib * 199 199 bna_ib_get(struct bna_ib_mod *ib_mod, 200 200 enum bna_intr_type intr_type, 201 201 int vector) ··· 240 240 return ib; 241 241 } 242 242 243 - void 243 + static void 244 244 bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib) 245 245 { 246 246 bna_intr_put(ib_mod, ib->intr); ··· 255 255 } 256 256 257 257 /* Returns index offset - starting from 0 */ 258 - int 258 + static int 259 259 bna_ib_reserve_idx(struct bna_ib *ib) 260 260 { 261 261 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod; ··· 309 309 return idx; 310 310 } 311 311 312 - void 312 + static void 313 313 bna_ib_release_idx(struct bna_ib *ib, int idx) 314 314 { 315 315 struct bna_ib_mod *ib_mod = &ib->bna->ib_mod; ··· 356 356 } 357 357 } 358 358 359 - int 359 + static int 360 360 bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config) 361 361 { 362 362 if (ib->start_count) ··· 374 374 return 0; 375 375 } 376 376 377 - void 377 + static void 378 378 bna_ib_start(struct bna_ib *ib) 379 379 { 380 380 struct bna_ib_blk_mem ib_cfg; ··· 450 450 } 451 451 } 452 452 453 - void 453 + static void 454 454 bna_ib_stop(struct bna_ib *ib) 455 455 { 456 456 u32 intx_mask; ··· 468 468 } 469 469 } 470 470 471 - void 471 + static void 472 472 bna_ib_fail(struct bna_ib *ib) 473 473 { 474 474 ib->start_count = 0; ··· 1394 1394 rxf_reset_packet_filter_allmulti(rxf); 1395 1395 } 1396 1396 1397 - void 1397 + static void 1398 1398 bna_rxf_init(struct bna_rxf *rxf, 1399 1399 struct bna_rx *rx, 1400 1400 struct bna_rx_config *q_config) ··· 1444 1444 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); 1445 1445 } 1446 1446 1447 - void 1447 + static void 1448 1448 bna_rxf_uninit(struct bna_rxf *rxf) 1449 1449 { 1450 1450 struct bna_mac *mac; ··· 1476 1476 rxf->rx = NULL; 1477 1477 } 1478 1478 1479 - void 1479 + static void 1480 + bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status) 1481 + { 1482 + bfa_fsm_send_event(rx, RX_E_RXF_STARTED); 1483 + if (rx->rxf.rxf_id < 32) 1484 + rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id); 1485 + else 1486 + rx->bna->rx_mod.rxf_bmap[1] |= ((u32) 1487 + 1 << (rx->rxf.rxf_id - 32)); 1488 + } 1489 + 1490 + static void 1480 1491 bna_rxf_start(struct bna_rxf *rxf) 1481 1492 { 1482 1493 rxf->start_cbfn = bna_rx_cb_rxf_started; ··· 1496 1485 bfa_fsm_send_event(rxf, RXF_E_START); 1497 1486 } 1498 1487 1499 - void 1488 + static void 1489 + bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status) 1490 + { 1491 + bfa_fsm_send_event(rx, RX_E_RXF_STOPPED); 1492 + if (rx->rxf.rxf_id < 32) 1493 + rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id; 1494 + else 1495 + rx->bna->rx_mod.rxf_bmap[1] &= ~(u32) 1496 + 1 << (rx->rxf.rxf_id - 32); 1497 + } 1498 + 1499 + static void 1500 1500 bna_rxf_stop(struct bna_rxf *rxf) 1501 1501 { 1502 1502 rxf->stop_cbfn = bna_rx_cb_rxf_stopped; ··· 1515 1493 bfa_fsm_send_event(rxf, RXF_E_STOP); 1516 1494 } 1517 1495 1518 - void 1496 + static void 1519 1497 bna_rxf_fail(struct bna_rxf *rxf) 1520 1498 { 1521 1499 rxf->rxf_flags |= BNA_RXF_FL_FAILED; ··· 1595 1573 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); 1596 1574 1597 1575 return BNA_CB_SUCCESS; 1598 - } 1599 - 1600 - enum bna_cb_status 1601 - bna_rx_mcast_del(struct bna_rx *rx, u8 *addr, 1602 - void (*cbfn)(struct bnad *, struct bna_rx *, 1603 - enum bna_cb_status)) 1604 - { 1605 - struct bna_rxf *rxf = &rx->rxf; 1606 - struct list_head *qe; 1607 - struct bna_mac *mac; 1608 - 1609 - list_for_each(qe, &rxf->mcast_pending_add_q) { 1610 - mac = (struct bna_mac *)qe; 1611 - if (BNA_MAC_IS_EQUAL(mac->addr, addr)) { 1612 - list_del(qe); 1613 - bfa_q_qe_init(qe); 1614 - bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); 1615 - if (cbfn) 1616 - (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); 1617 - return BNA_CB_SUCCESS; 1618 - } 1619 - } 1620 - 1621 - list_for_each(qe, &rxf->mcast_active_q) { 1622 - mac = (struct bna_mac *)qe; 1623 - if (BNA_MAC_IS_EQUAL(mac->addr, addr)) { 1624 - list_del(qe); 1625 - bfa_q_qe_init(qe); 1626 - list_add_tail(qe, &rxf->mcast_pending_del_q); 1627 - rxf->cam_fltr_cbfn = cbfn; 1628 - rxf->cam_fltr_cbarg = rx->bna->bnad; 1629 - bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); 1630 - return BNA_CB_SUCCESS; 1631 - } 1632 - } 1633 - 1634 - return BNA_CB_INVALID_MAC; 1635 1576 } 1636 1577 1637 1578 enum bna_cb_status ··· 1847 1862 bfa_fsm_state_decl(bna_rx, rxq_stop_wait, 1848 1863 struct bna_rx, enum bna_rx_event); 1849 1864 1850 - static struct bfa_sm_table rx_sm_table[] = { 1865 + static const struct bfa_sm_table rx_sm_table[] = { 1851 1866 {BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED}, 1852 1867 {BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT}, 1853 1868 {BFA_SM(bna_rx_sm_started), BNA_RX_STARTED}, ··· 2232 2247 } 2233 2248 } 2234 2249 2235 - int 2250 + static int 2236 2251 _rx_can_satisfy(struct bna_rx_mod *rx_mod, 2237 2252 struct bna_rx_config *rx_cfg) 2238 2253 { ··· 2257 2272 return 1; 2258 2273 } 2259 2274 2260 - struct bna_rxq * 2275 + static struct bna_rxq * 2261 2276 _get_free_rxq(struct bna_rx_mod *rx_mod) 2262 2277 { 2263 2278 struct bna_rxq *rxq = NULL; ··· 2271 2286 return rxq; 2272 2287 } 2273 2288 2274 - void 2289 + static void 2275 2290 _put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq) 2276 2291 { 2277 2292 bfa_q_qe_init(&rxq->qe); ··· 2279 2294 rx_mod->rxq_free_count++; 2280 2295 } 2281 2296 2282 - struct bna_rxp * 2297 + static struct bna_rxp * 2283 2298 _get_free_rxp(struct bna_rx_mod *rx_mod) 2284 2299 { 2285 2300 struct list_head *qe = NULL; ··· 2295 2310 return rxp; 2296 2311 } 2297 2312 2298 - void 2313 + static void 2299 2314 _put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp) 2300 2315 { 2301 2316 bfa_q_qe_init(&rxp->qe); ··· 2303 2318 rx_mod->rxp_free_count++; 2304 2319 } 2305 2320 2306 - struct bna_rx * 2321 + static struct bna_rx * 2307 2322 _get_free_rx(struct bna_rx_mod *rx_mod) 2308 2323 { 2309 2324 struct list_head *qe = NULL; ··· 2321 2336 return rx; 2322 2337 } 2323 2338 2324 - void 2339 + static void 2325 2340 _put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx) 2326 2341 { 2327 2342 bfa_q_qe_init(&rx->qe); ··· 2329 2344 rx_mod->rx_free_count++; 2330 2345 } 2331 2346 2332 - void 2347 + static void 2333 2348 _rx_init(struct bna_rx *rx, struct bna *bna) 2334 2349 { 2335 2350 rx->bna = bna; ··· 2345 2360 rx->stop_cbarg = NULL; 2346 2361 } 2347 2362 2348 - void 2363 + static void 2349 2364 _rxp_add_rxqs(struct bna_rxp *rxp, 2350 2365 struct bna_rxq *q0, 2351 2366 struct bna_rxq *q1) ··· 2368 2383 } 2369 2384 } 2370 2385 2371 - void 2386 + static void 2372 2387 _rxq_qpt_init(struct bna_rxq *rxq, 2373 2388 struct bna_rxp *rxp, 2374 2389 u32 page_count, ··· 2397 2412 } 2398 2413 } 2399 2414 2400 - void 2415 + static void 2401 2416 _rxp_cqpt_setup(struct bna_rxp *rxp, 2402 2417 u32 page_count, 2403 2418 u32 page_size, ··· 2426 2441 } 2427 2442 } 2428 2443 2429 - void 2444 + static void 2430 2445 _rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp) 2431 2446 { 2432 2447 list_add_tail(&rxp->qe, &rx->rxp_q); 2433 2448 } 2434 2449 2435 - void 2450 + static void 2436 2451 _init_rxmod_queues(struct bna_rx_mod *rx_mod) 2437 2452 { 2438 2453 INIT_LIST_HEAD(&rx_mod->rx_free_q); ··· 2445 2460 rx_mod->rxp_free_count = 0; 2446 2461 } 2447 2462 2448 - void 2463 + static void 2449 2464 _rx_ctor(struct bna_rx *rx, int id) 2450 2465 { 2451 2466 bfa_q_qe_init(&rx->qe); ··· 2477 2492 bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED); 2478 2493 } 2479 2494 2480 - void 2495 + static void 2481 2496 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx, 2482 2497 enum bna_cb_status status) 2483 2498 { ··· 2486 2501 bfa_wc_down(&rx_mod->rx_stop_wc); 2487 2502 } 2488 2503 2489 - void 2504 + static void 2490 2505 bna_rx_mod_cb_rx_stopped_all(void *arg) 2491 2506 { 2492 2507 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; ··· 2496 2511 rx_mod->stop_cbfn = NULL; 2497 2512 } 2498 2513 2499 - void 2514 + static void 2500 2515 bna_rx_start(struct bna_rx *rx) 2501 2516 { 2502 2517 rx->rx_flags |= BNA_RX_F_PORT_ENABLED; ··· 2504 2519 bfa_fsm_send_event(rx, RX_E_START); 2505 2520 } 2506 2521 2507 - void 2522 + static void 2508 2523 bna_rx_stop(struct bna_rx *rx) 2509 2524 { 2510 2525 rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED; ··· 2517 2532 } 2518 2533 } 2519 2534 2520 - void 2535 + static void 2521 2536 bna_rx_fail(struct bna_rx *rx) 2522 2537 { 2523 2538 /* Indicate port is not enabled, and failed */ 2524 2539 rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED; 2525 2540 rx->rx_flags |= BNA_RX_F_PORT_FAILED; 2526 2541 bfa_fsm_send_event(rx, RX_E_FAIL); 2527 - } 2528 - 2529 - void 2530 - bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status) 2531 - { 2532 - bfa_fsm_send_event(rx, RX_E_RXF_STARTED); 2533 - if (rx->rxf.rxf_id < 32) 2534 - rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id); 2535 - else 2536 - rx->bna->rx_mod.rxf_bmap[1] |= ((u32) 2537 - 1 << (rx->rxf.rxf_id - 32)); 2538 - } 2539 - 2540 - void 2541 - bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status) 2542 - { 2543 - bfa_fsm_send_event(rx, RX_E_RXF_STOPPED); 2544 - if (rx->rxf.rxf_id < 32) 2545 - rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id; 2546 - else 2547 - rx->bna->rx_mod.rxf_bmap[1] &= ~(u32) 2548 - 1 << (rx->rxf.rxf_id - 32); 2549 2542 } 2550 2543 2551 2544 void ··· 3694 3731 bfa_fsm_send_event(tx, TX_E_FAIL); 3695 3732 } 3696 3733 3697 - void 3734 + static void 3698 3735 bna_tx_prio_changed(struct bna_tx *tx, int prio) 3699 3736 { 3700 3737 struct bna_txq *txq;
+10 -19
drivers/net/bna/bnad.c
··· 28 28 #include "bna.h" 29 29 #include "cna.h" 30 30 31 - DEFINE_MUTEX(bnad_fwimg_mutex); 31 + static DEFINE_MUTEX(bnad_fwimg_mutex); 32 32 33 33 /* 34 34 * Module params ··· 46 46 */ 47 47 u32 bnad_rxqs_per_cq = 2; 48 48 49 - const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 49 + static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 50 50 51 51 /* 52 52 * Local MACROS ··· 687 687 * Called with bnad->bna_lock held b'cos of 688 688 * bnad->cfg_flags access. 689 689 */ 690 - void 690 + static void 691 691 bnad_disable_mbox_irq(struct bnad *bnad) 692 692 { 693 693 int irq = BNAD_GET_MBOX_IRQ(bnad); ··· 956 956 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); 957 957 } 958 958 959 - void 960 - bnad_cb_stats_clr(struct bnad *bnad) 961 - { 962 - } 963 - 964 959 /* Resource allocation, free functions */ 965 960 966 961 static void ··· 1106 1111 } 1107 1112 1108 1113 spin_lock_irqsave(&bnad->bna_lock, flags); 1114 + 1109 1115 if (bnad->cfg_flags & BNAD_CF_MSIX) 1110 1116 disable_irq_nosync(irq); 1117 + 1111 1118 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1112 1119 return 0; 1113 1120 } ··· 2240 2243 bnad_enable_msix(struct bnad *bnad) 2241 2244 { 2242 2245 int i, ret; 2243 - u32 tot_msix_num; 2244 2246 unsigned long flags; 2245 2247 2246 2248 spin_lock_irqsave(&bnad->bna_lock, flags); ··· 2252 2256 if (bnad->msix_table) 2253 2257 return; 2254 2258 2255 - tot_msix_num = bnad->msix_num + bnad->msix_diag_num; 2256 - 2257 2259 bnad->msix_table = 2258 - kcalloc(tot_msix_num, sizeof(struct msix_entry), GFP_KERNEL); 2260 + kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL); 2259 2261 2260 2262 if (!bnad->msix_table) 2261 2263 goto intx_mode; 2262 2264 2263 - for (i = 0; i < tot_msix_num; i++) 2265 + for (i = 0; i < bnad->msix_num; i++) 2264 2266 bnad->msix_table[i].entry = i; 2265 2267 2266 - ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, tot_msix_num); 2268 + ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num); 2267 2269 if (ret > 0) { 2268 2270 /* Not enough MSI-X vectors. */ 2269 2271 ··· 2274 2280 + (bnad->num_rx 2275 2281 * bnad->num_rxp_per_rx) + 2276 2282 BNAD_MAILBOX_MSIX_VECTORS; 2277 - tot_msix_num = bnad->msix_num + bnad->msix_diag_num; 2278 2283 2279 2284 /* Try once more with adjusted numbers */ 2280 2285 /* If this fails, fall back to INTx */ 2281 2286 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, 2282 - tot_msix_num); 2287 + bnad->msix_num); 2283 2288 if (ret) 2284 2289 goto intx_mode; 2285 2290 ··· 2291 2298 kfree(bnad->msix_table); 2292 2299 bnad->msix_table = NULL; 2293 2300 bnad->msix_num = 0; 2294 - bnad->msix_diag_num = 0; 2295 2301 spin_lock_irqsave(&bnad->bna_lock, flags); 2296 2302 bnad->cfg_flags &= ~BNAD_CF_MSIX; 2297 2303 bnad_q_num_init(bnad); ··· 2938 2946 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) + 2939 2947 (bnad->num_rx * bnad->num_rxp_per_rx) + 2940 2948 BNAD_MAILBOX_MSIX_VECTORS; 2941 - bnad->msix_diag_num = 2; /* 1 for Tx, 1 for Rx */ 2942 2949 2943 2950 bnad->txq_depth = BNAD_TXQ_DEPTH; 2944 2951 bnad->rxq_depth = BNAD_RXQ_DEPTH; ··· 3208 3217 free_netdev(netdev); 3209 3218 } 3210 3219 3211 - const struct pci_device_id bnad_pci_id_table[] = { 3220 + static const struct pci_device_id bnad_pci_id_table[] = { 3212 3221 { 3213 3222 PCI_DEVICE(PCI_VENDOR_ID_BROCADE, 3214 3223 PCI_DEVICE_ID_BROCADE_CT),
-1
drivers/net/bna/bnad.h
··· 248 248 u64 mmio_len; 249 249 250 250 u32 msix_num; 251 - u32 msix_diag_num; 252 251 struct msix_entry *msix_table; 253 252 254 253 struct mutex conf_mutex;
+1 -1
drivers/net/bna/cna_fwimg.c
··· 22 22 static u32 *bfi_image_ct_cna; 23 23 static u32 bfi_image_ct_cna_size; 24 24 25 - u32 * 25 + static u32 * 26 26 cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image, 27 27 u32 *bfi_image_size, char *fw_name) 28 28 {