Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.1-rc9 3076 lines 76 kB view raw
1/* 2 * Linux network driver for Brocade Converged Network Adapter. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License (GPL) Version 2 as 6 * published by the Free Software Foundation 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13/* 14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 15 * All rights reserved 16 * www.brocade.com 17 */ 18#include "bna.h" 19#include "bfa_cs.h" 20 21static void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status); 22 23static void 24bna_port_cb_link_up(struct bna_port *port, struct bfi_ll_aen *aen, 25 int status) 26{ 27 int i; 28 u8 prio_map; 29 30 port->llport.link_status = BNA_LINK_UP; 31 if (aen->cee_linkup) 32 port->llport.link_status = BNA_CEE_UP; 33 34 /* Compute the priority */ 35 prio_map = aen->prio_map; 36 if (prio_map) { 37 for (i = 0; i < 8; i++) { 38 if ((prio_map >> i) & 0x1) 39 break; 40 } 41 port->priority = i; 42 } else 43 port->priority = 0; 44 45 /* Dispatch events */ 46 bna_tx_mod_cee_link_status(&port->bna->tx_mod, aen->cee_linkup); 47 bna_tx_mod_prio_changed(&port->bna->tx_mod, port->priority); 48 port->link_cbfn(port->bna->bnad, port->llport.link_status); 49} 50 51static void 52bna_port_cb_link_down(struct bna_port *port, int status) 53{ 54 port->llport.link_status = BNA_LINK_DOWN; 55 56 /* Dispatch events */ 57 bna_tx_mod_cee_link_status(&port->bna->tx_mod, BNA_LINK_DOWN); 58 port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN); 59} 60 61static inline int 62llport_can_be_up(struct bna_llport *llport) 63{ 64 int ready = 0; 65 if (llport->type == BNA_PORT_T_REGULAR) 66 ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) && 67 (llport->flags & BNA_LLPORT_F_RX_STARTED) && 68 (llport->flags & BNA_LLPORT_F_PORT_ENABLED)); 69 else 70 ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) && 71 (llport->flags & BNA_LLPORT_F_RX_STARTED) && 72 !(llport->flags & BNA_LLPORT_F_PORT_ENABLED)); 73 return ready; 74} 75 76#define llport_is_up llport_can_be_up 77 78enum bna_llport_event { 79 LLPORT_E_START = 1, 80 LLPORT_E_STOP = 2, 81 LLPORT_E_FAIL = 3, 82 LLPORT_E_UP = 4, 83 LLPORT_E_DOWN = 5, 84 LLPORT_E_FWRESP_UP_OK = 6, 85 LLPORT_E_FWRESP_UP_FAIL = 7, 86 LLPORT_E_FWRESP_DOWN = 8 87}; 88 89static void 90bna_llport_cb_port_enabled(struct bna_llport *llport) 91{ 92 llport->flags |= BNA_LLPORT_F_PORT_ENABLED; 93 94 if (llport_can_be_up(llport)) 95 bfa_fsm_send_event(llport, LLPORT_E_UP); 96} 97 98static void 99bna_llport_cb_port_disabled(struct bna_llport *llport) 100{ 101 int llport_up = llport_is_up(llport); 102 103 llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED; 104 105 if (llport_up) 106 bfa_fsm_send_event(llport, LLPORT_E_DOWN); 107} 108 109/** 110 * MBOX 111 */ 112static int 113bna_is_aen(u8 msg_id) 114{ 115 switch (msg_id) { 116 case BFI_LL_I2H_LINK_DOWN_AEN: 117 case BFI_LL_I2H_LINK_UP_AEN: 118 case BFI_LL_I2H_PORT_ENABLE_AEN: 119 case BFI_LL_I2H_PORT_DISABLE_AEN: 120 return 1; 121 122 default: 123 return 0; 124 } 125} 126 127static void 128bna_mbox_aen_callback(struct bna *bna, struct bfi_mbmsg *msg) 129{ 130 struct bfi_ll_aen *aen = (struct bfi_ll_aen *)(msg); 131 132 switch (aen->mh.msg_id) { 133 case BFI_LL_I2H_LINK_UP_AEN: 134 bna_port_cb_link_up(&bna->port, aen, aen->reason); 135 break; 136 case BFI_LL_I2H_LINK_DOWN_AEN: 137 bna_port_cb_link_down(&bna->port, aen->reason); 138 break; 139 case BFI_LL_I2H_PORT_ENABLE_AEN: 140 bna_llport_cb_port_enabled(&bna->port.llport); 141 break; 142 case BFI_LL_I2H_PORT_DISABLE_AEN: 143 bna_llport_cb_port_disabled(&bna->port.llport); 144 break; 145 default: 146 break; 147 } 148} 149 150static void 151bna_ll_isr(void *llarg, struct bfi_mbmsg *msg) 152{ 153 struct bna *bna = (struct bna *)(llarg); 154 struct bfi_ll_rsp *mb_rsp = (struct bfi_ll_rsp *)(msg); 155 struct bfi_mhdr *cmd_h, *rsp_h; 156 struct bna_mbox_qe *mb_qe = NULL; 157 int to_post = 0; 158 u8 aen = 0; 159 char message[BNA_MESSAGE_SIZE]; 160 161 aen = bna_is_aen(mb_rsp->mh.msg_id); 162 163 if (!aen) { 164 mb_qe = bfa_q_first(&bna->mbox_mod.posted_q); 165 cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]); 166 rsp_h = (struct bfi_mhdr *)(&mb_rsp->mh); 167 168 if ((BFA_I2HM(cmd_h->msg_id) == rsp_h->msg_id) && 169 (cmd_h->mtag.i2htok == rsp_h->mtag.i2htok)) { 170 /* Remove the request from posted_q, update state */ 171 list_del(&mb_qe->qe); 172 bna->mbox_mod.msg_pending--; 173 if (list_empty(&bna->mbox_mod.posted_q)) 174 bna->mbox_mod.state = BNA_MBOX_FREE; 175 else 176 to_post = 1; 177 178 /* Dispatch the cbfn */ 179 if (mb_qe->cbfn) 180 mb_qe->cbfn(mb_qe->cbarg, mb_rsp->error); 181 182 /* Post the next entry, if needed */ 183 if (to_post) { 184 mb_qe = bfa_q_first(&bna->mbox_mod.posted_q); 185 bfa_nw_ioc_mbox_queue(&bna->device.ioc, 186 &mb_qe->cmd); 187 } 188 } else { 189 snprintf(message, BNA_MESSAGE_SIZE, 190 "No matching rsp for [%d:%d:%d]\n", 191 mb_rsp->mh.msg_class, mb_rsp->mh.msg_id, 192 mb_rsp->mh.mtag.i2htok); 193 pr_info("%s", message); 194 } 195 196 } else 197 bna_mbox_aen_callback(bna, msg); 198} 199 200static void 201bna_err_handler(struct bna *bna, u32 intr_status) 202{ 203 u32 init_halt; 204 205 if (intr_status & __HALT_STATUS_BITS) { 206 init_halt = readl(bna->device.ioc.ioc_regs.ll_halt); 207 init_halt &= ~__FW_INIT_HALT_P; 208 writel(init_halt, bna->device.ioc.ioc_regs.ll_halt); 209 } 210 211 bfa_nw_ioc_error_isr(&bna->device.ioc); 212} 213 214void 215bna_mbox_handler(struct bna *bna, u32 intr_status) 216{ 217 if (BNA_IS_ERR_INTR(intr_status)) { 218 bna_err_handler(bna, intr_status); 219 return; 220 } 221 if (BNA_IS_MBOX_INTR(intr_status)) 222 bfa_nw_ioc_mbox_isr(&bna->device.ioc); 223} 224 225void 226bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe) 227{ 228 struct bfi_mhdr *mh; 229 230 mh = (struct bfi_mhdr *)(&mbox_qe->cmd.msg[0]); 231 232 mh->mtag.i2htok = htons(bna->mbox_mod.msg_ctr); 233 bna->mbox_mod.msg_ctr++; 234 bna->mbox_mod.msg_pending++; 235 if (bna->mbox_mod.state == BNA_MBOX_FREE) { 236 list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q); 237 bfa_nw_ioc_mbox_queue(&bna->device.ioc, &mbox_qe->cmd); 238 bna->mbox_mod.state = BNA_MBOX_POSTED; 239 } else { 240 list_add_tail(&mbox_qe->qe, &bna->mbox_mod.posted_q); 241 } 242} 243 244static void 245bna_mbox_flush_q(struct bna *bna, struct list_head *q) 246{ 247 struct bna_mbox_qe *mb_qe = NULL; 248 struct list_head *mb_q; 249 void (*cbfn)(void *arg, int status); 250 void *cbarg; 251 252 mb_q = &bna->mbox_mod.posted_q; 253 254 while (!list_empty(mb_q)) { 255 bfa_q_deq(mb_q, &mb_qe); 256 cbfn = mb_qe->cbfn; 257 cbarg = mb_qe->cbarg; 258 bfa_q_qe_init(mb_qe); 259 bna->mbox_mod.msg_pending--; 260 261 if (cbfn) 262 cbfn(cbarg, BNA_CB_NOT_EXEC); 263 } 264 265 bna->mbox_mod.state = BNA_MBOX_FREE; 266} 267 268static void 269bna_mbox_mod_start(struct bna_mbox_mod *mbox_mod) 270{ 271} 272 273static void 274bna_mbox_mod_stop(struct bna_mbox_mod *mbox_mod) 275{ 276 bna_mbox_flush_q(mbox_mod->bna, &mbox_mod->posted_q); 277} 278 279static void 280bna_mbox_mod_init(struct bna_mbox_mod *mbox_mod, struct bna *bna) 281{ 282 bfa_nw_ioc_mbox_regisr(&bna->device.ioc, BFI_MC_LL, bna_ll_isr, bna); 283 mbox_mod->state = BNA_MBOX_FREE; 284 mbox_mod->msg_ctr = mbox_mod->msg_pending = 0; 285 INIT_LIST_HEAD(&mbox_mod->posted_q); 286 mbox_mod->bna = bna; 287} 288 289static void 290bna_mbox_mod_uninit(struct bna_mbox_mod *mbox_mod) 291{ 292 mbox_mod->bna = NULL; 293} 294 295/** 296 * LLPORT 297 */ 298#define call_llport_stop_cbfn(llport, status)\ 299do {\ 300 if ((llport)->stop_cbfn)\ 301 (llport)->stop_cbfn(&(llport)->bna->port, status);\ 302 (llport)->stop_cbfn = NULL;\ 303} while (0) 304 305static void bna_fw_llport_up(struct bna_llport *llport); 306static void bna_fw_cb_llport_up(void *arg, int status); 307static void bna_fw_llport_down(struct bna_llport *llport); 308static void bna_fw_cb_llport_down(void *arg, int status); 309static void bna_llport_start(struct bna_llport *llport); 310static void bna_llport_stop(struct bna_llport *llport); 311static void bna_llport_fail(struct bna_llport *llport); 312 313enum bna_llport_state { 314 BNA_LLPORT_STOPPED = 1, 315 BNA_LLPORT_DOWN = 2, 316 BNA_LLPORT_UP_RESP_WAIT = 3, 317 BNA_LLPORT_DOWN_RESP_WAIT = 4, 318 BNA_LLPORT_UP = 5, 319 BNA_LLPORT_LAST_RESP_WAIT = 6 320}; 321 322bfa_fsm_state_decl(bna_llport, stopped, struct bna_llport, 323 enum bna_llport_event); 324bfa_fsm_state_decl(bna_llport, down, struct bna_llport, 325 enum bna_llport_event); 326bfa_fsm_state_decl(bna_llport, up_resp_wait, struct bna_llport, 327 enum bna_llport_event); 328bfa_fsm_state_decl(bna_llport, down_resp_wait, struct bna_llport, 329 enum bna_llport_event); 330bfa_fsm_state_decl(bna_llport, up, struct bna_llport, 331 enum bna_llport_event); 332bfa_fsm_state_decl(bna_llport, last_resp_wait, struct bna_llport, 333 enum bna_llport_event); 334 335static struct bfa_sm_table llport_sm_table[] = { 336 {BFA_SM(bna_llport_sm_stopped), BNA_LLPORT_STOPPED}, 337 {BFA_SM(bna_llport_sm_down), BNA_LLPORT_DOWN}, 338 {BFA_SM(bna_llport_sm_up_resp_wait), BNA_LLPORT_UP_RESP_WAIT}, 339 {BFA_SM(bna_llport_sm_down_resp_wait), BNA_LLPORT_DOWN_RESP_WAIT}, 340 {BFA_SM(bna_llport_sm_up), BNA_LLPORT_UP}, 341 {BFA_SM(bna_llport_sm_last_resp_wait), BNA_LLPORT_LAST_RESP_WAIT} 342}; 343 344static void 345bna_llport_sm_stopped_entry(struct bna_llport *llport) 346{ 347 llport->bna->port.link_cbfn((llport)->bna->bnad, BNA_LINK_DOWN); 348 call_llport_stop_cbfn(llport, BNA_CB_SUCCESS); 349} 350 351static void 352bna_llport_sm_stopped(struct bna_llport *llport, 353 enum bna_llport_event event) 354{ 355 switch (event) { 356 case LLPORT_E_START: 357 bfa_fsm_set_state(llport, bna_llport_sm_down); 358 break; 359 360 case LLPORT_E_STOP: 361 call_llport_stop_cbfn(llport, BNA_CB_SUCCESS); 362 break; 363 364 case LLPORT_E_FAIL: 365 break; 366 367 case LLPORT_E_DOWN: 368 /* This event is received due to Rx objects failing */ 369 /* No-op */ 370 break; 371 372 case LLPORT_E_FWRESP_UP_OK: 373 case LLPORT_E_FWRESP_DOWN: 374 /** 375 * These events are received due to flushing of mbox when 376 * device fails 377 */ 378 /* No-op */ 379 break; 380 381 default: 382 bfa_sm_fault(event); 383 } 384} 385 386static void 387bna_llport_sm_down_entry(struct bna_llport *llport) 388{ 389 bnad_cb_port_link_status((llport)->bna->bnad, BNA_LINK_DOWN); 390} 391 392static void 393bna_llport_sm_down(struct bna_llport *llport, 394 enum bna_llport_event event) 395{ 396 switch (event) { 397 case LLPORT_E_STOP: 398 bfa_fsm_set_state(llport, bna_llport_sm_stopped); 399 break; 400 401 case LLPORT_E_FAIL: 402 bfa_fsm_set_state(llport, bna_llport_sm_stopped); 403 break; 404 405 case LLPORT_E_UP: 406 bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait); 407 bna_fw_llport_up(llport); 408 break; 409 410 default: 411 bfa_sm_fault(event); 412 } 413} 414 415static void 416bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport) 417{ 418 BUG_ON(!llport_can_be_up(llport)); 419 /** 420 * NOTE: Do not call bna_fw_llport_up() here. That will over step 421 * mbox due to down_resp_wait -> up_resp_wait transition on event 422 * LLPORT_E_UP 423 */ 424} 425 426static void 427bna_llport_sm_up_resp_wait(struct bna_llport *llport, 428 enum bna_llport_event event) 429{ 430 switch (event) { 431 case LLPORT_E_STOP: 432 bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait); 433 break; 434 435 case LLPORT_E_FAIL: 436 bfa_fsm_set_state(llport, bna_llport_sm_stopped); 437 break; 438 439 case LLPORT_E_DOWN: 440 bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait); 441 break; 442 443 case LLPORT_E_FWRESP_UP_OK: 444 bfa_fsm_set_state(llport, bna_llport_sm_up); 445 break; 446 447 case LLPORT_E_FWRESP_UP_FAIL: 448 bfa_fsm_set_state(llport, bna_llport_sm_down); 449 break; 450 451 case LLPORT_E_FWRESP_DOWN: 452 /* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */ 453 bna_fw_llport_up(llport); 454 break; 455 456 default: 457 bfa_sm_fault(event); 458 } 459} 460 461static void 462bna_llport_sm_down_resp_wait_entry(struct bna_llport *llport) 463{ 464 /** 465 * NOTE: Do not call bna_fw_llport_down() here. That will over step 466 * mbox due to up_resp_wait -> down_resp_wait transition on event 467 * LLPORT_E_DOWN 468 */ 469} 470 471static void 472bna_llport_sm_down_resp_wait(struct bna_llport *llport, 473 enum bna_llport_event event) 474{ 475 switch (event) { 476 case LLPORT_E_STOP: 477 bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait); 478 break; 479 480 case LLPORT_E_FAIL: 481 bfa_fsm_set_state(llport, bna_llport_sm_stopped); 482 break; 483 484 case LLPORT_E_UP: 485 bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait); 486 break; 487 488 case LLPORT_E_FWRESP_UP_OK: 489 /* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */ 490 bna_fw_llport_down(llport); 491 break; 492 493 case LLPORT_E_FWRESP_UP_FAIL: 494 case LLPORT_E_FWRESP_DOWN: 495 bfa_fsm_set_state(llport, bna_llport_sm_down); 496 break; 497 498 default: 499 bfa_sm_fault(event); 500 } 501} 502 503static void 504bna_llport_sm_up_entry(struct bna_llport *llport) 505{ 506} 507 508static void 509bna_llport_sm_up(struct bna_llport *llport, 510 enum bna_llport_event event) 511{ 512 switch (event) { 513 case LLPORT_E_STOP: 514 bfa_fsm_set_state(llport, bna_llport_sm_last_resp_wait); 515 bna_fw_llport_down(llport); 516 break; 517 518 case LLPORT_E_FAIL: 519 bfa_fsm_set_state(llport, bna_llport_sm_stopped); 520 break; 521 522 case LLPORT_E_DOWN: 523 bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait); 524 bna_fw_llport_down(llport); 525 break; 526 527 default: 528 bfa_sm_fault(event); 529 } 530} 531 532static void 533bna_llport_sm_last_resp_wait_entry(struct bna_llport *llport) 534{ 535} 536 537static void 538bna_llport_sm_last_resp_wait(struct bna_llport *llport, 539 enum bna_llport_event event) 540{ 541 switch (event) { 542 case LLPORT_E_FAIL: 543 bfa_fsm_set_state(llport, bna_llport_sm_stopped); 544 break; 545 546 case LLPORT_E_DOWN: 547 /** 548 * This event is received due to Rx objects stopping in 549 * parallel to llport 550 */ 551 /* No-op */ 552 break; 553 554 case LLPORT_E_FWRESP_UP_OK: 555 /* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */ 556 bna_fw_llport_down(llport); 557 break; 558 559 case LLPORT_E_FWRESP_UP_FAIL: 560 case LLPORT_E_FWRESP_DOWN: 561 bfa_fsm_set_state(llport, bna_llport_sm_stopped); 562 break; 563 564 default: 565 bfa_sm_fault(event); 566 } 567} 568 569static void 570bna_fw_llport_admin_up(struct bna_llport *llport) 571{ 572 struct bfi_ll_port_admin_req ll_req; 573 574 memset(&ll_req, 0, sizeof(ll_req)); 575 ll_req.mh.msg_class = BFI_MC_LL; 576 ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ; 577 ll_req.mh.mtag.h2i.lpu_id = 0; 578 579 ll_req.up = BNA_STATUS_T_ENABLED; 580 581 bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req), 582 bna_fw_cb_llport_up, llport); 583 584 bna_mbox_send(llport->bna, &llport->mbox_qe); 585} 586 587static void 588bna_fw_llport_up(struct bna_llport *llport) 589{ 590 if (llport->type == BNA_PORT_T_REGULAR) 591 bna_fw_llport_admin_up(llport); 592} 593 594static void 595bna_fw_cb_llport_up(void *arg, int status) 596{ 597 struct bna_llport *llport = (struct bna_llport *)arg; 598 599 bfa_q_qe_init(&llport->mbox_qe.qe); 600 if (status == BFI_LL_CMD_FAIL) { 601 if (llport->type == BNA_PORT_T_REGULAR) 602 llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED; 603 else 604 llport->flags &= ~BNA_LLPORT_F_ADMIN_UP; 605 bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_FAIL); 606 } else 607 bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_OK); 608} 609 610static void 611bna_fw_llport_admin_down(struct bna_llport *llport) 612{ 613 struct bfi_ll_port_admin_req ll_req; 614 615 memset(&ll_req, 0, sizeof(ll_req)); 616 ll_req.mh.msg_class = BFI_MC_LL; 617 ll_req.mh.msg_id = BFI_LL_H2I_PORT_ADMIN_REQ; 618 ll_req.mh.mtag.h2i.lpu_id = 0; 619 620 ll_req.up = BNA_STATUS_T_DISABLED; 621 622 bna_mbox_qe_fill(&llport->mbox_qe, &ll_req, sizeof(ll_req), 623 bna_fw_cb_llport_down, llport); 624 625 bna_mbox_send(llport->bna, &llport->mbox_qe); 626} 627 628static void 629bna_fw_llport_down(struct bna_llport *llport) 630{ 631 if (llport->type == BNA_PORT_T_REGULAR) 632 bna_fw_llport_admin_down(llport); 633} 634 635static void 636bna_fw_cb_llport_down(void *arg, int status) 637{ 638 struct bna_llport *llport = (struct bna_llport *)arg; 639 640 bfa_q_qe_init(&llport->mbox_qe.qe); 641 bfa_fsm_send_event(llport, LLPORT_E_FWRESP_DOWN); 642} 643 644static void 645bna_port_cb_llport_stopped(struct bna_port *port, 646 enum bna_cb_status status) 647{ 648 bfa_wc_down(&port->chld_stop_wc); 649} 650 651static void 652bna_llport_init(struct bna_llport *llport, struct bna *bna) 653{ 654 llport->flags |= BNA_LLPORT_F_ADMIN_UP; 655 llport->flags |= BNA_LLPORT_F_PORT_ENABLED; 656 llport->type = BNA_PORT_T_REGULAR; 657 llport->bna = bna; 658 659 llport->link_status = BNA_LINK_DOWN; 660 661 llport->rx_started_count = 0; 662 663 llport->stop_cbfn = NULL; 664 665 bfa_q_qe_init(&llport->mbox_qe.qe); 666 667 bfa_fsm_set_state(llport, bna_llport_sm_stopped); 668} 669 670static void 671bna_llport_uninit(struct bna_llport *llport) 672{ 673 llport->flags &= ~BNA_LLPORT_F_ADMIN_UP; 674 llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED; 675 676 llport->bna = NULL; 677} 678 679static void 680bna_llport_start(struct bna_llport *llport) 681{ 682 bfa_fsm_send_event(llport, LLPORT_E_START); 683} 684 685static void 686bna_llport_stop(struct bna_llport *llport) 687{ 688 llport->stop_cbfn = bna_port_cb_llport_stopped; 689 690 bfa_fsm_send_event(llport, LLPORT_E_STOP); 691} 692 693static void 694bna_llport_fail(struct bna_llport *llport) 695{ 696 /* Reset the physical port status to enabled */ 697 llport->flags |= BNA_LLPORT_F_PORT_ENABLED; 698 bfa_fsm_send_event(llport, LLPORT_E_FAIL); 699} 700 701static int 702bna_llport_state_get(struct bna_llport *llport) 703{ 704 return bfa_sm_to_state(llport_sm_table, llport->fsm); 705} 706 707void 708bna_llport_rx_started(struct bna_llport *llport) 709{ 710 llport->rx_started_count++; 711 712 if (llport->rx_started_count == 1) { 713 714 llport->flags |= BNA_LLPORT_F_RX_STARTED; 715 716 if (llport_can_be_up(llport)) 717 bfa_fsm_send_event(llport, LLPORT_E_UP); 718 } 719} 720 721void 722bna_llport_rx_stopped(struct bna_llport *llport) 723{ 724 int llport_up = llport_is_up(llport); 725 726 llport->rx_started_count--; 727 728 if (llport->rx_started_count == 0) { 729 730 llport->flags &= ~BNA_LLPORT_F_RX_STARTED; 731 732 if (llport_up) 733 bfa_fsm_send_event(llport, LLPORT_E_DOWN); 734 } 735} 736 737/** 738 * PORT 739 */ 740#define bna_port_chld_start(port)\ 741do {\ 742 enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\ 743 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\ 744 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\ 745 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\ 746 bna_llport_start(&(port)->llport);\ 747 bna_tx_mod_start(&(port)->bna->tx_mod, tx_type);\ 748 bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\ 749} while (0) 750 751#define bna_port_chld_stop(port)\ 752do {\ 753 enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\ 754 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\ 755 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\ 756 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\ 757 bfa_wc_up(&(port)->chld_stop_wc);\ 758 bfa_wc_up(&(port)->chld_stop_wc);\ 759 bfa_wc_up(&(port)->chld_stop_wc);\ 760 bna_llport_stop(&(port)->llport);\ 761 bna_tx_mod_stop(&(port)->bna->tx_mod, tx_type);\ 762 bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\ 763} while (0) 764 765#define bna_port_chld_fail(port)\ 766do {\ 767 bna_llport_fail(&(port)->llport);\ 768 bna_tx_mod_fail(&(port)->bna->tx_mod);\ 769 bna_rx_mod_fail(&(port)->bna->rx_mod);\ 770} while (0) 771 772#define bna_port_rx_start(port)\ 773do {\ 774 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\ 775 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\ 776 bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\ 777} while (0) 778 779#define bna_port_rx_stop(port)\ 780do {\ 781 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\ 782 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\ 783 bfa_wc_up(&(port)->chld_stop_wc);\ 784 bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\ 785} while (0) 786 787#define call_port_stop_cbfn(port, status)\ 788do {\ 789 if ((port)->stop_cbfn)\ 790 (port)->stop_cbfn((port)->stop_cbarg, status);\ 791 (port)->stop_cbfn = NULL;\ 792 (port)->stop_cbarg = NULL;\ 793} while (0) 794 795#define call_port_pause_cbfn(port, status)\ 796do {\ 797 if ((port)->pause_cbfn)\ 798 (port)->pause_cbfn((port)->bna->bnad, status);\ 799 (port)->pause_cbfn = NULL;\ 800} while (0) 801 802#define call_port_mtu_cbfn(port, status)\ 803do {\ 804 if ((port)->mtu_cbfn)\ 805 (port)->mtu_cbfn((port)->bna->bnad, status);\ 806 (port)->mtu_cbfn = NULL;\ 807} while (0) 808 809static void bna_fw_pause_set(struct bna_port *port); 810static void bna_fw_cb_pause_set(void *arg, int status); 811static void bna_fw_mtu_set(struct bna_port *port); 812static void bna_fw_cb_mtu_set(void *arg, int status); 813 814enum bna_port_event { 815 PORT_E_START = 1, 816 PORT_E_STOP = 2, 817 PORT_E_FAIL = 3, 818 PORT_E_PAUSE_CFG = 4, 819 PORT_E_MTU_CFG = 5, 820 PORT_E_CHLD_STOPPED = 6, 821 PORT_E_FWRESP_PAUSE = 7, 822 PORT_E_FWRESP_MTU = 8 823}; 824 825enum bna_port_state { 826 BNA_PORT_STOPPED = 1, 827 BNA_PORT_MTU_INIT_WAIT = 2, 828 BNA_PORT_PAUSE_INIT_WAIT = 3, 829 BNA_PORT_LAST_RESP_WAIT = 4, 830 BNA_PORT_STARTED = 5, 831 BNA_PORT_PAUSE_CFG_WAIT = 6, 832 BNA_PORT_RX_STOP_WAIT = 7, 833 BNA_PORT_MTU_CFG_WAIT = 8, 834 BNA_PORT_CHLD_STOP_WAIT = 9 835}; 836 837bfa_fsm_state_decl(bna_port, stopped, struct bna_port, 838 enum bna_port_event); 839bfa_fsm_state_decl(bna_port, mtu_init_wait, struct bna_port, 840 enum bna_port_event); 841bfa_fsm_state_decl(bna_port, pause_init_wait, struct bna_port, 842 enum bna_port_event); 843bfa_fsm_state_decl(bna_port, last_resp_wait, struct bna_port, 844 enum bna_port_event); 845bfa_fsm_state_decl(bna_port, started, struct bna_port, 846 enum bna_port_event); 847bfa_fsm_state_decl(bna_port, pause_cfg_wait, struct bna_port, 848 enum bna_port_event); 849bfa_fsm_state_decl(bna_port, rx_stop_wait, struct bna_port, 850 enum bna_port_event); 851bfa_fsm_state_decl(bna_port, mtu_cfg_wait, struct bna_port, 852 enum bna_port_event); 853bfa_fsm_state_decl(bna_port, chld_stop_wait, struct bna_port, 854 enum bna_port_event); 855 856static struct bfa_sm_table port_sm_table[] = { 857 {BFA_SM(bna_port_sm_stopped), BNA_PORT_STOPPED}, 858 {BFA_SM(bna_port_sm_mtu_init_wait), BNA_PORT_MTU_INIT_WAIT}, 859 {BFA_SM(bna_port_sm_pause_init_wait), BNA_PORT_PAUSE_INIT_WAIT}, 860 {BFA_SM(bna_port_sm_last_resp_wait), BNA_PORT_LAST_RESP_WAIT}, 861 {BFA_SM(bna_port_sm_started), BNA_PORT_STARTED}, 862 {BFA_SM(bna_port_sm_pause_cfg_wait), BNA_PORT_PAUSE_CFG_WAIT}, 863 {BFA_SM(bna_port_sm_rx_stop_wait), BNA_PORT_RX_STOP_WAIT}, 864 {BFA_SM(bna_port_sm_mtu_cfg_wait), BNA_PORT_MTU_CFG_WAIT}, 865 {BFA_SM(bna_port_sm_chld_stop_wait), BNA_PORT_CHLD_STOP_WAIT} 866}; 867 868static void 869bna_port_sm_stopped_entry(struct bna_port *port) 870{ 871 call_port_pause_cbfn(port, BNA_CB_SUCCESS); 872 call_port_mtu_cbfn(port, BNA_CB_SUCCESS); 873 call_port_stop_cbfn(port, BNA_CB_SUCCESS); 874} 875 876static void 877bna_port_sm_stopped(struct bna_port *port, enum bna_port_event event) 878{ 879 switch (event) { 880 case PORT_E_START: 881 bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait); 882 break; 883 884 case PORT_E_STOP: 885 call_port_stop_cbfn(port, BNA_CB_SUCCESS); 886 break; 887 888 case PORT_E_FAIL: 889 /* No-op */ 890 break; 891 892 case PORT_E_PAUSE_CFG: 893 call_port_pause_cbfn(port, BNA_CB_SUCCESS); 894 break; 895 896 case PORT_E_MTU_CFG: 897 call_port_mtu_cbfn(port, BNA_CB_SUCCESS); 898 break; 899 900 case PORT_E_CHLD_STOPPED: 901 /** 902 * This event is received due to LLPort, Tx and Rx objects 903 * failing 904 */ 905 /* No-op */ 906 break; 907 908 case PORT_E_FWRESP_PAUSE: 909 case PORT_E_FWRESP_MTU: 910 /** 911 * These events are received due to flushing of mbox when 912 * device fails 913 */ 914 /* No-op */ 915 break; 916 917 default: 918 bfa_sm_fault(event); 919 } 920} 921 922static void 923bna_port_sm_mtu_init_wait_entry(struct bna_port *port) 924{ 925 bna_fw_mtu_set(port); 926} 927 928static void 929bna_port_sm_mtu_init_wait(struct bna_port *port, enum bna_port_event event) 930{ 931 switch (event) { 932 case PORT_E_STOP: 933 bfa_fsm_set_state(port, bna_port_sm_last_resp_wait); 934 break; 935 936 case PORT_E_FAIL: 937 bfa_fsm_set_state(port, bna_port_sm_stopped); 938 break; 939 940 case PORT_E_PAUSE_CFG: 941 /* No-op */ 942 break; 943 944 case PORT_E_MTU_CFG: 945 port->flags |= BNA_PORT_F_MTU_CHANGED; 946 break; 947 948 case PORT_E_FWRESP_MTU: 949 if (port->flags & BNA_PORT_F_MTU_CHANGED) { 950 port->flags &= ~BNA_PORT_F_MTU_CHANGED; 951 bna_fw_mtu_set(port); 952 } else { 953 bfa_fsm_set_state(port, bna_port_sm_pause_init_wait); 954 } 955 break; 956 957 default: 958 bfa_sm_fault(event); 959 } 960} 961 962static void 963bna_port_sm_pause_init_wait_entry(struct bna_port *port) 964{ 965 bna_fw_pause_set(port); 966} 967 968static void 969bna_port_sm_pause_init_wait(struct bna_port *port, 970 enum bna_port_event event) 971{ 972 switch (event) { 973 case PORT_E_STOP: 974 bfa_fsm_set_state(port, bna_port_sm_last_resp_wait); 975 break; 976 977 case PORT_E_FAIL: 978 bfa_fsm_set_state(port, bna_port_sm_stopped); 979 break; 980 981 case PORT_E_PAUSE_CFG: 982 port->flags |= BNA_PORT_F_PAUSE_CHANGED; 983 break; 984 985 case PORT_E_MTU_CFG: 986 port->flags |= BNA_PORT_F_MTU_CHANGED; 987 break; 988 989 case PORT_E_FWRESP_PAUSE: 990 if (port->flags & BNA_PORT_F_PAUSE_CHANGED) { 991 port->flags &= ~BNA_PORT_F_PAUSE_CHANGED; 992 bna_fw_pause_set(port); 993 } else if (port->flags & BNA_PORT_F_MTU_CHANGED) { 994 port->flags &= ~BNA_PORT_F_MTU_CHANGED; 995 bfa_fsm_set_state(port, bna_port_sm_mtu_init_wait); 996 } else { 997 bfa_fsm_set_state(port, bna_port_sm_started); 998 bna_port_chld_start(port); 999 } 1000 break; 1001 1002 default: 1003 bfa_sm_fault(event); 1004 } 1005} 1006 1007static void 1008bna_port_sm_last_resp_wait_entry(struct bna_port *port) 1009{ 1010} 1011 1012static void 1013bna_port_sm_last_resp_wait(struct bna_port *port, 1014 enum bna_port_event event) 1015{ 1016 switch (event) { 1017 case PORT_E_FAIL: 1018 case PORT_E_FWRESP_PAUSE: 1019 case PORT_E_FWRESP_MTU: 1020 bfa_fsm_set_state(port, bna_port_sm_stopped); 1021 break; 1022 1023 default: 1024 bfa_sm_fault(event); 1025 } 1026} 1027 1028static void 1029bna_port_sm_started_entry(struct bna_port *port) 1030{ 1031 /** 1032 * NOTE: Do not call bna_port_chld_start() here, since it will be 1033 * inadvertently called during pause_cfg_wait->started transition 1034 * as well 1035 */ 1036 call_port_pause_cbfn(port, BNA_CB_SUCCESS); 1037 call_port_mtu_cbfn(port, BNA_CB_SUCCESS); 1038} 1039 1040static void 1041bna_port_sm_started(struct bna_port *port, 1042 enum bna_port_event event) 1043{ 1044 switch (event) { 1045 case PORT_E_STOP: 1046 bfa_fsm_set_state(port, bna_port_sm_chld_stop_wait); 1047 break; 1048 1049 case PORT_E_FAIL: 1050 bfa_fsm_set_state(port, bna_port_sm_stopped); 1051 bna_port_chld_fail(port); 1052 break; 1053 1054 case PORT_E_PAUSE_CFG: 1055 bfa_fsm_set_state(port, bna_port_sm_pause_cfg_wait); 1056 break; 1057 1058 case PORT_E_MTU_CFG: 1059 bfa_fsm_set_state(port, bna_port_sm_rx_stop_wait); 1060 break; 1061 1062 default: 1063 bfa_sm_fault(event); 1064 } 1065} 1066 1067static void 1068bna_port_sm_pause_cfg_wait_entry(struct bna_port *port) 1069{ 1070 bna_fw_pause_set(port); 1071} 1072 1073static void 1074bna_port_sm_pause_cfg_wait(struct bna_port *port, 1075 enum bna_port_event event) 1076{ 1077 switch (event) { 1078 case PORT_E_FAIL: 1079 bfa_fsm_set_state(port, bna_port_sm_stopped); 1080 bna_port_chld_fail(port); 1081 break; 1082 1083 case PORT_E_FWRESP_PAUSE: 1084 bfa_fsm_set_state(port, bna_port_sm_started); 1085 break; 1086 1087 default: 1088 bfa_sm_fault(event); 1089 } 1090} 1091 1092static void 1093bna_port_sm_rx_stop_wait_entry(struct bna_port *port) 1094{ 1095 bna_port_rx_stop(port); 1096} 1097 1098static void 1099bna_port_sm_rx_stop_wait(struct bna_port *port, 1100 enum bna_port_event event) 1101{ 1102 switch (event) { 1103 case PORT_E_FAIL: 1104 bfa_fsm_set_state(port, bna_port_sm_stopped); 1105 bna_port_chld_fail(port); 1106 break; 1107 1108 case PORT_E_CHLD_STOPPED: 1109 bfa_fsm_set_state(port, bna_port_sm_mtu_cfg_wait); 1110 break; 1111 1112 default: 1113 bfa_sm_fault(event); 1114 } 1115} 1116 1117static void 1118bna_port_sm_mtu_cfg_wait_entry(struct bna_port *port) 1119{ 1120 bna_fw_mtu_set(port); 1121} 1122 1123static void 1124bna_port_sm_mtu_cfg_wait(struct bna_port *port, enum bna_port_event event) 1125{ 1126 switch (event) { 1127 case PORT_E_FAIL: 1128 bfa_fsm_set_state(port, bna_port_sm_stopped); 1129 bna_port_chld_fail(port); 1130 break; 1131 1132 case PORT_E_FWRESP_MTU: 1133 bfa_fsm_set_state(port, bna_port_sm_started); 1134 bna_port_rx_start(port); 1135 break; 1136 1137 default: 1138 bfa_sm_fault(event); 1139 } 1140} 1141 1142static void 1143bna_port_sm_chld_stop_wait_entry(struct bna_port *port) 1144{ 1145 bna_port_chld_stop(port); 1146} 1147 1148static void 1149bna_port_sm_chld_stop_wait(struct bna_port *port, 1150 enum bna_port_event event) 1151{ 1152 switch (event) { 1153 case PORT_E_FAIL: 1154 bfa_fsm_set_state(port, bna_port_sm_stopped); 1155 bna_port_chld_fail(port); 1156 break; 1157 1158 case PORT_E_CHLD_STOPPED: 1159 bfa_fsm_set_state(port, bna_port_sm_stopped); 1160 break; 1161 1162 default: 1163 bfa_sm_fault(event); 1164 } 1165} 1166 1167static void 1168bna_fw_pause_set(struct bna_port *port) 1169{ 1170 struct bfi_ll_set_pause_req ll_req; 1171 1172 memset(&ll_req, 0, sizeof(ll_req)); 1173 ll_req.mh.msg_class = BFI_MC_LL; 1174 ll_req.mh.msg_id = BFI_LL_H2I_SET_PAUSE_REQ; 1175 ll_req.mh.mtag.h2i.lpu_id = 0; 1176 1177 ll_req.tx_pause = port->pause_config.tx_pause; 1178 ll_req.rx_pause = port->pause_config.rx_pause; 1179 1180 bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req), 1181 bna_fw_cb_pause_set, port); 1182 1183 bna_mbox_send(port->bna, &port->mbox_qe); 1184} 1185 1186static void 1187bna_fw_cb_pause_set(void *arg, int status) 1188{ 1189 struct bna_port *port = (struct bna_port *)arg; 1190 1191 bfa_q_qe_init(&port->mbox_qe.qe); 1192 bfa_fsm_send_event(port, PORT_E_FWRESP_PAUSE); 1193} 1194 1195void 1196bna_fw_mtu_set(struct bna_port *port) 1197{ 1198 struct bfi_ll_mtu_info_req ll_req; 1199 1200 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_MTU_INFO_REQ, 0); 1201 ll_req.mtu = htons((u16)port->mtu); 1202 1203 bna_mbox_qe_fill(&port->mbox_qe, &ll_req, sizeof(ll_req), 1204 bna_fw_cb_mtu_set, port); 1205 bna_mbox_send(port->bna, &port->mbox_qe); 1206} 1207 1208void 1209bna_fw_cb_mtu_set(void *arg, int status) 1210{ 1211 struct bna_port *port = (struct bna_port *)arg; 1212 1213 bfa_q_qe_init(&port->mbox_qe.qe); 1214 bfa_fsm_send_event(port, PORT_E_FWRESP_MTU); 1215} 1216 1217static void 1218bna_port_cb_chld_stopped(void *arg) 1219{ 1220 struct bna_port *port = (struct bna_port *)arg; 1221 1222 bfa_fsm_send_event(port, PORT_E_CHLD_STOPPED); 1223} 1224 1225static void 1226bna_port_init(struct bna_port *port, struct bna *bna) 1227{ 1228 port->bna = bna; 1229 port->flags = 0; 1230 port->mtu = 0; 1231 port->type = BNA_PORT_T_REGULAR; 1232 1233 port->link_cbfn = bnad_cb_port_link_status; 1234 1235 port->chld_stop_wc.wc_resume = bna_port_cb_chld_stopped; 1236 port->chld_stop_wc.wc_cbarg = port; 1237 port->chld_stop_wc.wc_count = 0; 1238 1239 port->stop_cbfn = NULL; 1240 port->stop_cbarg = NULL; 1241 1242 port->pause_cbfn = NULL; 1243 1244 port->mtu_cbfn = NULL; 1245 1246 bfa_q_qe_init(&port->mbox_qe.qe); 1247 1248 bfa_fsm_set_state(port, bna_port_sm_stopped); 1249 1250 bna_llport_init(&port->llport, bna); 1251} 1252 1253static void 1254bna_port_uninit(struct bna_port *port) 1255{ 1256 bna_llport_uninit(&port->llport); 1257 1258 port->flags = 0; 1259 1260 port->bna = NULL; 1261} 1262 1263static int 1264bna_port_state_get(struct bna_port *port) 1265{ 1266 return bfa_sm_to_state(port_sm_table, port->fsm); 1267} 1268 1269static void 1270bna_port_start(struct bna_port *port) 1271{ 1272 port->flags |= BNA_PORT_F_DEVICE_READY; 1273 if (port->flags & BNA_PORT_F_ENABLED) 1274 bfa_fsm_send_event(port, PORT_E_START); 1275} 1276 1277static void 1278bna_port_stop(struct bna_port *port) 1279{ 1280 port->stop_cbfn = bna_device_cb_port_stopped; 1281 port->stop_cbarg = &port->bna->device; 1282 1283 port->flags &= ~BNA_PORT_F_DEVICE_READY; 1284 bfa_fsm_send_event(port, PORT_E_STOP); 1285} 1286 1287static void 1288bna_port_fail(struct bna_port *port) 1289{ 1290 port->flags &= ~BNA_PORT_F_DEVICE_READY; 1291 bfa_fsm_send_event(port, PORT_E_FAIL); 1292} 1293 1294void 1295bna_port_cb_tx_stopped(struct bna_port *port, enum bna_cb_status status) 1296{ 1297 bfa_wc_down(&port->chld_stop_wc); 1298} 1299 1300void 1301bna_port_cb_rx_stopped(struct bna_port *port, enum bna_cb_status status) 1302{ 1303 bfa_wc_down(&port->chld_stop_wc); 1304} 1305 1306int 1307bna_port_mtu_get(struct bna_port *port) 1308{ 1309 return port->mtu; 1310} 1311 1312void 1313bna_port_enable(struct bna_port *port) 1314{ 1315 if (port->fsm != (bfa_sm_t)bna_port_sm_stopped) 1316 return; 1317 1318 port->flags |= BNA_PORT_F_ENABLED; 1319 1320 if (port->flags & BNA_PORT_F_DEVICE_READY) 1321 bfa_fsm_send_event(port, PORT_E_START); 1322} 1323 1324void 1325bna_port_disable(struct bna_port *port, enum bna_cleanup_type type, 1326 void (*cbfn)(void *, enum bna_cb_status)) 1327{ 1328 if (type == BNA_SOFT_CLEANUP) { 1329 (*cbfn)(port->bna->bnad, BNA_CB_SUCCESS); 1330 return; 1331 } 1332 1333 port->stop_cbfn = cbfn; 1334 port->stop_cbarg = port->bna->bnad; 1335 1336 port->flags &= ~BNA_PORT_F_ENABLED; 1337 1338 bfa_fsm_send_event(port, PORT_E_STOP); 1339} 1340 1341void 1342bna_port_pause_config(struct bna_port *port, 1343 struct bna_pause_config *pause_config, 1344 void (*cbfn)(struct bnad *, enum bna_cb_status)) 1345{ 1346 port->pause_config = *pause_config; 1347 1348 port->pause_cbfn = cbfn; 1349 1350 bfa_fsm_send_event(port, PORT_E_PAUSE_CFG); 1351} 1352 1353void 1354bna_port_mtu_set(struct bna_port *port, int mtu, 1355 void (*cbfn)(struct bnad *, enum bna_cb_status)) 1356{ 1357 port->mtu = mtu; 1358 1359 port->mtu_cbfn = cbfn; 1360 1361 bfa_fsm_send_event(port, PORT_E_MTU_CFG); 1362} 1363 1364void 1365bna_port_mac_get(struct bna_port *port, mac_t *mac) 1366{ 1367 *mac = bfa_nw_ioc_get_mac(&port->bna->device.ioc); 1368} 1369 1370/** 1371 * DEVICE 1372 */ 1373#define enable_mbox_intr(_device)\ 1374do {\ 1375 u32 intr_status;\ 1376 bna_intr_status_get((_device)->bna, intr_status);\ 1377 bnad_cb_device_enable_mbox_intr((_device)->bna->bnad);\ 1378 bna_mbox_intr_enable((_device)->bna);\ 1379} while (0) 1380 1381#define disable_mbox_intr(_device)\ 1382do {\ 1383 bna_mbox_intr_disable((_device)->bna);\ 1384 bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\ 1385} while (0) 1386 1387static const struct bna_chip_regs_offset reg_offset[] = 1388{{HOST_PAGE_NUM_FN0, HOSTFN0_INT_STATUS, 1389 HOSTFN0_INT_MASK, HOST_MSIX_ERR_INDEX_FN0}, 1390{HOST_PAGE_NUM_FN1, HOSTFN1_INT_STATUS, 1391 HOSTFN1_INT_MASK, HOST_MSIX_ERR_INDEX_FN1}, 1392{HOST_PAGE_NUM_FN2, HOSTFN2_INT_STATUS, 1393 HOSTFN2_INT_MASK, HOST_MSIX_ERR_INDEX_FN2}, 1394{HOST_PAGE_NUM_FN3, HOSTFN3_INT_STATUS, 1395 HOSTFN3_INT_MASK, HOST_MSIX_ERR_INDEX_FN3}, 1396}; 1397 1398enum bna_device_event { 1399 DEVICE_E_ENABLE = 1, 1400 DEVICE_E_DISABLE = 2, 1401 DEVICE_E_IOC_READY = 3, 1402 DEVICE_E_IOC_FAILED = 4, 1403 DEVICE_E_IOC_DISABLED = 5, 1404 DEVICE_E_IOC_RESET = 6, 1405 DEVICE_E_PORT_STOPPED = 7, 1406}; 1407 1408enum bna_device_state { 1409 BNA_DEVICE_STOPPED = 1, 1410 BNA_DEVICE_IOC_READY_WAIT = 2, 1411 BNA_DEVICE_READY = 3, 1412 BNA_DEVICE_PORT_STOP_WAIT = 4, 1413 BNA_DEVICE_IOC_DISABLE_WAIT = 5, 1414 BNA_DEVICE_FAILED = 6 1415}; 1416 1417bfa_fsm_state_decl(bna_device, stopped, struct bna_device, 1418 enum bna_device_event); 1419bfa_fsm_state_decl(bna_device, ioc_ready_wait, struct bna_device, 1420 enum bna_device_event); 1421bfa_fsm_state_decl(bna_device, ready, struct bna_device, 1422 enum bna_device_event); 1423bfa_fsm_state_decl(bna_device, port_stop_wait, struct bna_device, 1424 enum bna_device_event); 1425bfa_fsm_state_decl(bna_device, ioc_disable_wait, struct bna_device, 1426 enum bna_device_event); 1427bfa_fsm_state_decl(bna_device, failed, struct bna_device, 1428 enum bna_device_event); 1429 1430static struct bfa_sm_table device_sm_table[] = { 1431 {BFA_SM(bna_device_sm_stopped), BNA_DEVICE_STOPPED}, 1432 {BFA_SM(bna_device_sm_ioc_ready_wait), BNA_DEVICE_IOC_READY_WAIT}, 1433 {BFA_SM(bna_device_sm_ready), BNA_DEVICE_READY}, 1434 {BFA_SM(bna_device_sm_port_stop_wait), BNA_DEVICE_PORT_STOP_WAIT}, 1435 {BFA_SM(bna_device_sm_ioc_disable_wait), BNA_DEVICE_IOC_DISABLE_WAIT}, 1436 {BFA_SM(bna_device_sm_failed), BNA_DEVICE_FAILED}, 1437}; 1438 1439static void 1440bna_device_sm_stopped_entry(struct bna_device *device) 1441{ 1442 if (device->stop_cbfn) 1443 device->stop_cbfn(device->stop_cbarg, BNA_CB_SUCCESS); 1444 1445 device->stop_cbfn = NULL; 1446 device->stop_cbarg = NULL; 1447} 1448 1449static void 1450bna_device_sm_stopped(struct bna_device *device, 1451 enum bna_device_event event) 1452{ 1453 switch (event) { 1454 case DEVICE_E_ENABLE: 1455 if (device->intr_type == BNA_INTR_T_MSIX) 1456 bna_mbox_msix_idx_set(device); 1457 bfa_nw_ioc_enable(&device->ioc); 1458 bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait); 1459 break; 1460 1461 case DEVICE_E_DISABLE: 1462 bfa_fsm_set_state(device, bna_device_sm_stopped); 1463 break; 1464 1465 case DEVICE_E_IOC_RESET: 1466 enable_mbox_intr(device); 1467 break; 1468 1469 case DEVICE_E_IOC_FAILED: 1470 bfa_fsm_set_state(device, bna_device_sm_failed); 1471 break; 1472 1473 default: 1474 bfa_sm_fault(event); 1475 } 1476} 1477 1478static void 1479bna_device_sm_ioc_ready_wait_entry(struct bna_device *device) 1480{ 1481 /** 1482 * Do not call bfa_ioc_enable() here. It must be called in the 1483 * previous state due to failed -> ioc_ready_wait transition. 1484 */ 1485} 1486 1487static void 1488bna_device_sm_ioc_ready_wait(struct bna_device *device, 1489 enum bna_device_event event) 1490{ 1491 switch (event) { 1492 case DEVICE_E_DISABLE: 1493 if (device->ready_cbfn) 1494 device->ready_cbfn(device->ready_cbarg, 1495 BNA_CB_INTERRUPT); 1496 device->ready_cbfn = NULL; 1497 device->ready_cbarg = NULL; 1498 bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait); 1499 break; 1500 1501 case DEVICE_E_IOC_READY: 1502 bfa_fsm_set_state(device, bna_device_sm_ready); 1503 break; 1504 1505 case DEVICE_E_IOC_FAILED: 1506 bfa_fsm_set_state(device, bna_device_sm_failed); 1507 break; 1508 1509 case DEVICE_E_IOC_RESET: 1510 enable_mbox_intr(device); 1511 break; 1512 1513 default: 1514 bfa_sm_fault(event); 1515 } 1516} 1517 1518static void 1519bna_device_sm_ready_entry(struct bna_device *device) 1520{ 1521 bna_mbox_mod_start(&device->bna->mbox_mod); 1522 bna_port_start(&device->bna->port); 1523 1524 if (device->ready_cbfn) 1525 device->ready_cbfn(device->ready_cbarg, 1526 BNA_CB_SUCCESS); 1527 device->ready_cbfn = NULL; 1528 device->ready_cbarg = NULL; 1529} 1530 1531static void 1532bna_device_sm_ready(struct bna_device *device, enum bna_device_event event) 1533{ 1534 switch (event) { 1535 case DEVICE_E_DISABLE: 1536 bfa_fsm_set_state(device, bna_device_sm_port_stop_wait); 1537 break; 1538 1539 case DEVICE_E_IOC_FAILED: 1540 bfa_fsm_set_state(device, bna_device_sm_failed); 1541 break; 1542 1543 default: 1544 bfa_sm_fault(event); 1545 } 1546} 1547 1548static void 1549bna_device_sm_port_stop_wait_entry(struct bna_device *device) 1550{ 1551 bna_port_stop(&device->bna->port); 1552} 1553 1554static void 1555bna_device_sm_port_stop_wait(struct bna_device *device, 1556 enum bna_device_event event) 1557{ 1558 switch (event) { 1559 case DEVICE_E_PORT_STOPPED: 1560 bna_mbox_mod_stop(&device->bna->mbox_mod); 1561 bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait); 1562 break; 1563 1564 case DEVICE_E_IOC_FAILED: 1565 disable_mbox_intr(device); 1566 bna_port_fail(&device->bna->port); 1567 break; 1568 1569 default: 1570 bfa_sm_fault(event); 1571 } 1572} 1573 1574static void 1575bna_device_sm_ioc_disable_wait_entry(struct bna_device *device) 1576{ 1577 bfa_nw_ioc_disable(&device->ioc); 1578} 1579 1580static void 1581bna_device_sm_ioc_disable_wait(struct bna_device *device, 1582 enum bna_device_event event) 1583{ 1584 switch (event) { 1585 case DEVICE_E_IOC_DISABLED: 1586 disable_mbox_intr(device); 1587 bfa_fsm_set_state(device, bna_device_sm_stopped); 1588 break; 1589 1590 default: 1591 bfa_sm_fault(event); 1592 } 1593} 1594 1595static void 1596bna_device_sm_failed_entry(struct bna_device *device) 1597{ 1598 disable_mbox_intr(device); 1599 bna_port_fail(&device->bna->port); 1600 bna_mbox_mod_stop(&device->bna->mbox_mod); 1601 1602 if (device->ready_cbfn) 1603 device->ready_cbfn(device->ready_cbarg, 1604 BNA_CB_FAIL); 1605 device->ready_cbfn = NULL; 1606 device->ready_cbarg = NULL; 1607} 1608 1609static void 1610bna_device_sm_failed(struct bna_device *device, 1611 enum bna_device_event event) 1612{ 1613 switch (event) { 1614 case DEVICE_E_DISABLE: 1615 bfa_fsm_set_state(device, bna_device_sm_ioc_disable_wait); 1616 break; 1617 1618 case DEVICE_E_IOC_RESET: 1619 enable_mbox_intr(device); 1620 bfa_fsm_set_state(device, bna_device_sm_ioc_ready_wait); 1621 break; 1622 1623 default: 1624 bfa_sm_fault(event); 1625 } 1626} 1627 1628/* IOC callback functions */ 1629 1630static void 1631bna_device_cb_iocll_ready(void *dev, enum bfa_status error) 1632{ 1633 struct bna_device *device = (struct bna_device *)dev; 1634 1635 if (error) 1636 bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED); 1637 else 1638 bfa_fsm_send_event(device, DEVICE_E_IOC_READY); 1639} 1640 1641static void 1642bna_device_cb_iocll_disabled(void *dev) 1643{ 1644 struct bna_device *device = (struct bna_device *)dev; 1645 1646 bfa_fsm_send_event(device, DEVICE_E_IOC_DISABLED); 1647} 1648 1649static void 1650bna_device_cb_iocll_failed(void *dev) 1651{ 1652 struct bna_device *device = (struct bna_device *)dev; 1653 1654 bfa_fsm_send_event(device, DEVICE_E_IOC_FAILED); 1655} 1656 1657static void 1658bna_device_cb_iocll_reset(void *dev) 1659{ 1660 struct bna_device *device = (struct bna_device *)dev; 1661 1662 bfa_fsm_send_event(device, DEVICE_E_IOC_RESET); 1663} 1664 1665static struct bfa_ioc_cbfn bfa_iocll_cbfn = { 1666 bna_device_cb_iocll_ready, 1667 bna_device_cb_iocll_disabled, 1668 bna_device_cb_iocll_failed, 1669 bna_device_cb_iocll_reset 1670}; 1671 1672/* device */ 1673static void 1674bna_adv_device_init(struct bna_device *device, struct bna *bna, 1675 struct bna_res_info *res_info) 1676{ 1677 u8 *kva; 1678 u64 dma; 1679 1680 device->bna = bna; 1681 1682 kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva; 1683 1684 /** 1685 * Attach common modules (Diag, SFP, CEE, Port) and claim respective 1686 * DMA memory. 1687 */ 1688 BNA_GET_DMA_ADDR( 1689 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma); 1690 kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva; 1691 1692 bfa_nw_cee_attach(&bna->cee, &device->ioc, bna); 1693 bfa_nw_cee_mem_claim(&bna->cee, kva, dma); 1694 kva += bfa_nw_cee_meminfo(); 1695 dma += bfa_nw_cee_meminfo(); 1696 1697} 1698 1699static void 1700bna_device_init(struct bna_device *device, struct bna *bna, 1701 struct bna_res_info *res_info) 1702{ 1703 u64 dma; 1704 1705 device->bna = bna; 1706 1707 /** 1708 * Attach IOC and claim: 1709 * 1. DMA memory for IOC attributes 1710 * 2. Kernel memory for FW trace 1711 */ 1712 bfa_nw_ioc_attach(&device->ioc, device, &bfa_iocll_cbfn); 1713 bfa_nw_ioc_pci_init(&device->ioc, &bna->pcidev, BFI_MC_LL); 1714 1715 BNA_GET_DMA_ADDR( 1716 &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma); 1717 bfa_nw_ioc_mem_claim(&device->ioc, 1718 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva, 1719 dma); 1720 1721 bna_adv_device_init(device, bna, res_info); 1722 /* 1723 * Initialize mbox_mod only after IOC, so that mbox handler 1724 * registration goes through 1725 */ 1726 device->intr_type = 1727 res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type; 1728 device->vector = 1729 res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.idl[0].vector; 1730 bna_mbox_mod_init(&bna->mbox_mod, bna); 1731 1732 device->ready_cbfn = device->stop_cbfn = NULL; 1733 device->ready_cbarg = device->stop_cbarg = NULL; 1734 1735 bfa_fsm_set_state(device, bna_device_sm_stopped); 1736} 1737 1738static void 1739bna_device_uninit(struct bna_device *device) 1740{ 1741 bna_mbox_mod_uninit(&device->bna->mbox_mod); 1742 1743 bfa_nw_ioc_detach(&device->ioc); 1744 1745 device->bna = NULL; 1746} 1747 1748static void 1749bna_device_cb_port_stopped(void *arg, enum bna_cb_status status) 1750{ 1751 struct bna_device *device = (struct bna_device *)arg; 1752 1753 bfa_fsm_send_event(device, DEVICE_E_PORT_STOPPED); 1754} 1755 1756static int 1757bna_device_status_get(struct bna_device *device) 1758{ 1759 return device->fsm == (bfa_fsm_t)bna_device_sm_ready; 1760} 1761 1762void 1763bna_device_enable(struct bna_device *device) 1764{ 1765 if (device->fsm != (bfa_fsm_t)bna_device_sm_stopped) { 1766 bnad_cb_device_enabled(device->bna->bnad, BNA_CB_BUSY); 1767 return; 1768 } 1769 1770 device->ready_cbfn = bnad_cb_device_enabled; 1771 device->ready_cbarg = device->bna->bnad; 1772 1773 bfa_fsm_send_event(device, DEVICE_E_ENABLE); 1774} 1775 1776void 1777bna_device_disable(struct bna_device *device, enum bna_cleanup_type type) 1778{ 1779 if (type == BNA_SOFT_CLEANUP) { 1780 bnad_cb_device_disabled(device->bna->bnad, BNA_CB_SUCCESS); 1781 return; 1782 } 1783 1784 device->stop_cbfn = bnad_cb_device_disabled; 1785 device->stop_cbarg = device->bna->bnad; 1786 1787 bfa_fsm_send_event(device, DEVICE_E_DISABLE); 1788} 1789 1790static int 1791bna_device_state_get(struct bna_device *device) 1792{ 1793 return bfa_sm_to_state(device_sm_table, device->fsm); 1794} 1795 1796const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { 1797 {12, 12}, 1798 {6, 10}, 1799 {5, 10}, 1800 {4, 8}, 1801 {3, 6}, 1802 {3, 6}, 1803 {2, 4}, 1804 {1, 2}, 1805}; 1806 1807/* utils */ 1808 1809static void 1810bna_adv_res_req(struct bna_res_info *res_info) 1811{ 1812 /* DMA memory for COMMON_MODULE */ 1813 res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM; 1814 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA; 1815 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1; 1816 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN( 1817 bfa_nw_cee_meminfo(), PAGE_SIZE); 1818 1819 /* Virtual memory for retreiving fw_trc */ 1820 res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM; 1821 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA; 1822 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0; 1823 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0; 1824 1825 /* DMA memory for retreiving stats */ 1826 res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM; 1827 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA; 1828 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1; 1829 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len = 1830 ALIGN(BFI_HW_STATS_SIZE, PAGE_SIZE); 1831 1832 /* Virtual memory for soft stats */ 1833 res_info[BNA_RES_MEM_T_SWSTATS].res_type = BNA_RES_T_MEM; 1834 res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mem_type = BNA_MEM_T_KVA; 1835 res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.num = 1; 1836 res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.len = 1837 sizeof(struct bna_sw_stats); 1838} 1839 1840static void 1841bna_sw_stats_get(struct bna *bna, struct bna_sw_stats *sw_stats) 1842{ 1843 struct bna_tx *tx; 1844 struct bna_txq *txq; 1845 struct bna_rx *rx; 1846 struct bna_rxp *rxp; 1847 struct list_head *qe; 1848 struct list_head *txq_qe; 1849 struct list_head *rxp_qe; 1850 struct list_head *mac_qe; 1851 int i; 1852 1853 sw_stats->device_state = bna_device_state_get(&bna->device); 1854 sw_stats->port_state = bna_port_state_get(&bna->port); 1855 sw_stats->port_flags = bna->port.flags; 1856 sw_stats->llport_state = bna_llport_state_get(&bna->port.llport); 1857 sw_stats->priority = bna->port.priority; 1858 1859 i = 0; 1860 list_for_each(qe, &bna->tx_mod.tx_active_q) { 1861 tx = (struct bna_tx *)qe; 1862 sw_stats->tx_stats[i].tx_state = bna_tx_state_get(tx); 1863 sw_stats->tx_stats[i].tx_flags = tx->flags; 1864 1865 sw_stats->tx_stats[i].num_txqs = 0; 1866 sw_stats->tx_stats[i].txq_bmap[0] = 0; 1867 sw_stats->tx_stats[i].txq_bmap[1] = 0; 1868 list_for_each(txq_qe, &tx->txq_q) { 1869 txq = (struct bna_txq *)txq_qe; 1870 if (txq->txq_id < 32) 1871 sw_stats->tx_stats[i].txq_bmap[0] |= 1872 ((u32)1 << txq->txq_id); 1873 else 1874 sw_stats->tx_stats[i].txq_bmap[1] |= 1875 ((u32) 1876 1 << (txq->txq_id - 32)); 1877 sw_stats->tx_stats[i].num_txqs++; 1878 } 1879 1880 sw_stats->tx_stats[i].txf_id = tx->txf.txf_id; 1881 1882 i++; 1883 } 1884 sw_stats->num_active_tx = i; 1885 1886 i = 0; 1887 list_for_each(qe, &bna->rx_mod.rx_active_q) { 1888 rx = (struct bna_rx *)qe; 1889 sw_stats->rx_stats[i].rx_state = bna_rx_state_get(rx); 1890 sw_stats->rx_stats[i].rx_flags = rx->rx_flags; 1891 1892 sw_stats->rx_stats[i].num_rxps = 0; 1893 sw_stats->rx_stats[i].num_rxqs = 0; 1894 sw_stats->rx_stats[i].rxq_bmap[0] = 0; 1895 sw_stats->rx_stats[i].rxq_bmap[1] = 0; 1896 sw_stats->rx_stats[i].cq_bmap[0] = 0; 1897 sw_stats->rx_stats[i].cq_bmap[1] = 0; 1898 list_for_each(rxp_qe, &rx->rxp_q) { 1899 rxp = (struct bna_rxp *)rxp_qe; 1900 1901 sw_stats->rx_stats[i].num_rxqs += 1; 1902 1903 if (rxp->type == BNA_RXP_SINGLE) { 1904 if (rxp->rxq.single.only->rxq_id < 32) { 1905 sw_stats->rx_stats[i].rxq_bmap[0] |= 1906 ((u32)1 << 1907 rxp->rxq.single.only->rxq_id); 1908 } else { 1909 sw_stats->rx_stats[i].rxq_bmap[1] |= 1910 ((u32)1 << 1911 (rxp->rxq.single.only->rxq_id - 32)); 1912 } 1913 } else { 1914 if (rxp->rxq.slr.large->rxq_id < 32) { 1915 sw_stats->rx_stats[i].rxq_bmap[0] |= 1916 ((u32)1 << 1917 rxp->rxq.slr.large->rxq_id); 1918 } else { 1919 sw_stats->rx_stats[i].rxq_bmap[1] |= 1920 ((u32)1 << 1921 (rxp->rxq.slr.large->rxq_id - 32)); 1922 } 1923 1924 if (rxp->rxq.slr.small->rxq_id < 32) { 1925 sw_stats->rx_stats[i].rxq_bmap[0] |= 1926 ((u32)1 << 1927 rxp->rxq.slr.small->rxq_id); 1928 } else { 1929 sw_stats->rx_stats[i].rxq_bmap[1] |= 1930 ((u32)1 << 1931 (rxp->rxq.slr.small->rxq_id - 32)); 1932 } 1933 sw_stats->rx_stats[i].num_rxqs += 1; 1934 } 1935 1936 if (rxp->cq.cq_id < 32) 1937 sw_stats->rx_stats[i].cq_bmap[0] |= 1938 (1 << rxp->cq.cq_id); 1939 else 1940 sw_stats->rx_stats[i].cq_bmap[1] |= 1941 (1 << (rxp->cq.cq_id - 32)); 1942 1943 sw_stats->rx_stats[i].num_rxps++; 1944 } 1945 1946 sw_stats->rx_stats[i].rxf_id = rx->rxf.rxf_id; 1947 sw_stats->rx_stats[i].rxf_state = bna_rxf_state_get(&rx->rxf); 1948 sw_stats->rx_stats[i].rxf_oper_state = rx->rxf.rxf_oper_state; 1949 1950 sw_stats->rx_stats[i].num_active_ucast = 0; 1951 if (rx->rxf.ucast_active_mac) 1952 sw_stats->rx_stats[i].num_active_ucast++; 1953 list_for_each(mac_qe, &rx->rxf.ucast_active_q) 1954 sw_stats->rx_stats[i].num_active_ucast++; 1955 1956 sw_stats->rx_stats[i].num_active_mcast = 0; 1957 list_for_each(mac_qe, &rx->rxf.mcast_active_q) 1958 sw_stats->rx_stats[i].num_active_mcast++; 1959 1960 sw_stats->rx_stats[i].rxmode_active = rx->rxf.rxmode_active; 1961 sw_stats->rx_stats[i].vlan_filter_status = 1962 rx->rxf.vlan_filter_status; 1963 memcpy(sw_stats->rx_stats[i].vlan_filter_table, 1964 rx->rxf.vlan_filter_table, 1965 sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)); 1966 1967 sw_stats->rx_stats[i].rss_status = rx->rxf.rss_status; 1968 sw_stats->rx_stats[i].hds_status = rx->rxf.hds_status; 1969 1970 i++; 1971 } 1972 sw_stats->num_active_rx = i; 1973} 1974 1975static void 1976bna_fw_cb_stats_get(void *arg, int status) 1977{ 1978 struct bna *bna = (struct bna *)arg; 1979 u64 *p_stats; 1980 int i, count; 1981 int rxf_count, txf_count; 1982 u64 rxf_bmap, txf_bmap; 1983 1984 bfa_q_qe_init(&bna->mbox_qe.qe); 1985 1986 if (status == 0) { 1987 p_stats = (u64 *)bna->stats.hw_stats; 1988 count = sizeof(struct bfi_ll_stats) / sizeof(u64); 1989 for (i = 0; i < count; i++) 1990 p_stats[i] = cpu_to_be64(p_stats[i]); 1991 1992 rxf_count = 0; 1993 rxf_bmap = (u64)bna->stats.rxf_bmap[0] | 1994 ((u64)bna->stats.rxf_bmap[1] << 32); 1995 for (i = 0; i < BFI_LL_RXF_ID_MAX; i++) 1996 if (rxf_bmap & ((u64)1 << i)) 1997 rxf_count++; 1998 1999 txf_count = 0; 2000 txf_bmap = (u64)bna->stats.txf_bmap[0] | 2001 ((u64)bna->stats.txf_bmap[1] << 32); 2002 for (i = 0; i < BFI_LL_TXF_ID_MAX; i++) 2003 if (txf_bmap & ((u64)1 << i)) 2004 txf_count++; 2005 2006 p_stats = (u64 *)&bna->stats.hw_stats->rxf_stats[0] + 2007 ((rxf_count * sizeof(struct bfi_ll_stats_rxf) + 2008 txf_count * sizeof(struct bfi_ll_stats_txf))/ 2009 sizeof(u64)); 2010 2011 /* Populate the TXF stats from the firmware DMAed copy */ 2012 for (i = (BFI_LL_TXF_ID_MAX - 1); i >= 0; i--) 2013 if (txf_bmap & ((u64)1 << i)) { 2014 p_stats -= sizeof(struct bfi_ll_stats_txf)/ 2015 sizeof(u64); 2016 memcpy(&bna->stats.hw_stats->txf_stats[i], 2017 p_stats, 2018 sizeof(struct bfi_ll_stats_txf)); 2019 } 2020 2021 /* Populate the RXF stats from the firmware DMAed copy */ 2022 for (i = (BFI_LL_RXF_ID_MAX - 1); i >= 0; i--) 2023 if (rxf_bmap & ((u64)1 << i)) { 2024 p_stats -= sizeof(struct bfi_ll_stats_rxf)/ 2025 sizeof(u64); 2026 memcpy(&bna->stats.hw_stats->rxf_stats[i], 2027 p_stats, 2028 sizeof(struct bfi_ll_stats_rxf)); 2029 } 2030 2031 bna_sw_stats_get(bna, bna->stats.sw_stats); 2032 bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats); 2033 } else 2034 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats); 2035} 2036 2037static void 2038bna_fw_stats_get(struct bna *bna) 2039{ 2040 struct bfi_ll_stats_req ll_req; 2041 2042 bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_GET_REQ, 0); 2043 ll_req.stats_mask = htons(BFI_LL_STATS_ALL); 2044 2045 ll_req.rxf_id_mask[0] = htonl(bna->rx_mod.rxf_bmap[0]); 2046 ll_req.rxf_id_mask[1] = htonl(bna->rx_mod.rxf_bmap[1]); 2047 ll_req.txf_id_mask[0] = htonl(bna->tx_mod.txf_bmap[0]); 2048 ll_req.txf_id_mask[1] = htonl(bna->tx_mod.txf_bmap[1]); 2049 2050 ll_req.host_buffer.a32.addr_hi = bna->hw_stats_dma.msb; 2051 ll_req.host_buffer.a32.addr_lo = bna->hw_stats_dma.lsb; 2052 2053 bna_mbox_qe_fill(&bna->mbox_qe, &ll_req, sizeof(ll_req), 2054 bna_fw_cb_stats_get, bna); 2055 bna_mbox_send(bna, &bna->mbox_qe); 2056 2057 bna->stats.rxf_bmap[0] = bna->rx_mod.rxf_bmap[0]; 2058 bna->stats.rxf_bmap[1] = bna->rx_mod.rxf_bmap[1]; 2059 bna->stats.txf_bmap[0] = bna->tx_mod.txf_bmap[0]; 2060 bna->stats.txf_bmap[1] = bna->tx_mod.txf_bmap[1]; 2061} 2062 2063void 2064bna_stats_get(struct bna *bna) 2065{ 2066 if (bna_device_status_get(&bna->device)) 2067 bna_fw_stats_get(bna); 2068 else 2069 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats); 2070} 2071 2072/* IB */ 2073static void 2074bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) 2075{ 2076 ib->ib_config.coalescing_timeo = coalescing_timeo; 2077 2078 if (ib->start_count) 2079 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK( 2080 (u32)ib->ib_config.coalescing_timeo, 0); 2081} 2082 2083/* RxF */ 2084void 2085bna_rxf_adv_init(struct bna_rxf *rxf, 2086 struct bna_rx *rx, 2087 struct bna_rx_config *q_config) 2088{ 2089 switch (q_config->rxp_type) { 2090 case BNA_RXP_SINGLE: 2091 /* No-op */ 2092 break; 2093 case BNA_RXP_SLR: 2094 rxf->ctrl_flags |= BNA_RXF_CF_SM_LG_RXQ; 2095 break; 2096 case BNA_RXP_HDS: 2097 rxf->hds_cfg.hdr_type = q_config->hds_config.hdr_type; 2098 rxf->hds_cfg.header_size = 2099 q_config->hds_config.header_size; 2100 rxf->forced_offset = 0; 2101 break; 2102 default: 2103 break; 2104 } 2105 2106 if (q_config->rss_status == BNA_STATUS_T_ENABLED) { 2107 rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE; 2108 rxf->rss_cfg.hash_type = q_config->rss_config.hash_type; 2109 rxf->rss_cfg.hash_mask = q_config->rss_config.hash_mask; 2110 memcpy(&rxf->rss_cfg.toeplitz_hash_key[0], 2111 &q_config->rss_config.toeplitz_hash_key[0], 2112 sizeof(rxf->rss_cfg.toeplitz_hash_key)); 2113 } 2114} 2115 2116static void 2117rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status) 2118{ 2119 struct bfi_ll_rxf_req req; 2120 2121 bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0); 2122 2123 req.rxf_id = rxf->rxf_id; 2124 req.enable = status; 2125 2126 bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req), 2127 rxf_cb_cam_fltr_mbox_cmd, rxf); 2128 2129 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); 2130} 2131 2132int 2133rxf_process_packet_filter_ucast(struct bna_rxf *rxf) 2134{ 2135 struct bna_mac *mac = NULL; 2136 struct list_head *qe; 2137 2138 /* Add additional MAC entries */ 2139 if (!list_empty(&rxf->ucast_pending_add_q)) { 2140 bfa_q_deq(&rxf->ucast_pending_add_q, &qe); 2141 bfa_q_qe_init(qe); 2142 mac = (struct bna_mac *)qe; 2143 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_ADD_REQ, mac); 2144 list_add_tail(&mac->qe, &rxf->ucast_active_q); 2145 return 1; 2146 } 2147 2148 /* Delete MAC addresses previousely added */ 2149 if (!list_empty(&rxf->ucast_pending_del_q)) { 2150 bfa_q_deq(&rxf->ucast_pending_del_q, &qe); 2151 bfa_q_qe_init(qe); 2152 mac = (struct bna_mac *)qe; 2153 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac); 2154 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); 2155 return 1; 2156 } 2157 2158 return 0; 2159} 2160 2161int 2162rxf_process_packet_filter_promisc(struct bna_rxf *rxf) 2163{ 2164 struct bna *bna = rxf->rx->bna; 2165 2166 /* Enable/disable promiscuous mode */ 2167 if (is_promisc_enable(rxf->rxmode_pending, 2168 rxf->rxmode_pending_bitmask)) { 2169 /* move promisc configuration from pending -> active */ 2170 promisc_inactive(rxf->rxmode_pending, 2171 rxf->rxmode_pending_bitmask); 2172 rxf->rxmode_active |= BNA_RXMODE_PROMISC; 2173 2174 /* Disable VLAN filter to allow all VLANs */ 2175 __rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED); 2176 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ, 2177 BNA_STATUS_T_ENABLED); 2178 return 1; 2179 } else if (is_promisc_disable(rxf->rxmode_pending, 2180 rxf->rxmode_pending_bitmask)) { 2181 /* move promisc configuration from pending -> active */ 2182 promisc_inactive(rxf->rxmode_pending, 2183 rxf->rxmode_pending_bitmask); 2184 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; 2185 bna->rxf_promisc_id = BFI_MAX_RXF; 2186 2187 /* Revert VLAN filter */ 2188 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status); 2189 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ, 2190 BNA_STATUS_T_DISABLED); 2191 return 1; 2192 } 2193 2194 return 0; 2195} 2196 2197int 2198rxf_process_packet_filter_allmulti(struct bna_rxf *rxf) 2199{ 2200 /* Enable/disable allmulti mode */ 2201 if (is_allmulti_enable(rxf->rxmode_pending, 2202 rxf->rxmode_pending_bitmask)) { 2203 /* move allmulti configuration from pending -> active */ 2204 allmulti_inactive(rxf->rxmode_pending, 2205 rxf->rxmode_pending_bitmask); 2206 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI; 2207 2208 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ, 2209 BNA_STATUS_T_ENABLED); 2210 return 1; 2211 } else if (is_allmulti_disable(rxf->rxmode_pending, 2212 rxf->rxmode_pending_bitmask)) { 2213 /* move allmulti configuration from pending -> active */ 2214 allmulti_inactive(rxf->rxmode_pending, 2215 rxf->rxmode_pending_bitmask); 2216 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; 2217 2218 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ, 2219 BNA_STATUS_T_DISABLED); 2220 return 1; 2221 } 2222 2223 return 0; 2224} 2225 2226int 2227rxf_clear_packet_filter_ucast(struct bna_rxf *rxf) 2228{ 2229 struct bna_mac *mac = NULL; 2230 struct list_head *qe; 2231 2232 /* 1. delete pending ucast entries */ 2233 if (!list_empty(&rxf->ucast_pending_del_q)) { 2234 bfa_q_deq(&rxf->ucast_pending_del_q, &qe); 2235 bfa_q_qe_init(qe); 2236 mac = (struct bna_mac *)qe; 2237 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac); 2238 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); 2239 return 1; 2240 } 2241 2242 /* 2. clear active ucast entries; move them to pending_add_q */ 2243 if (!list_empty(&rxf->ucast_active_q)) { 2244 bfa_q_deq(&rxf->ucast_active_q, &qe); 2245 bfa_q_qe_init(qe); 2246 mac = (struct bna_mac *)qe; 2247 rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_DEL_REQ, mac); 2248 list_add_tail(&mac->qe, &rxf->ucast_pending_add_q); 2249 return 1; 2250 } 2251 2252 return 0; 2253} 2254 2255int 2256rxf_clear_packet_filter_promisc(struct bna_rxf *rxf) 2257{ 2258 struct bna *bna = rxf->rx->bna; 2259 2260 /* 6. Execute pending promisc mode disable command */ 2261 if (is_promisc_disable(rxf->rxmode_pending, 2262 rxf->rxmode_pending_bitmask)) { 2263 /* move promisc configuration from pending -> active */ 2264 promisc_inactive(rxf->rxmode_pending, 2265 rxf->rxmode_pending_bitmask); 2266 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; 2267 bna->rxf_promisc_id = BFI_MAX_RXF; 2268 2269 /* Revert VLAN filter */ 2270 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status); 2271 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ, 2272 BNA_STATUS_T_DISABLED); 2273 return 1; 2274 } 2275 2276 /* 7. Clear active promisc mode; move it to pending enable */ 2277 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { 2278 /* move promisc configuration from active -> pending */ 2279 promisc_enable(rxf->rxmode_pending, 2280 rxf->rxmode_pending_bitmask); 2281 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; 2282 2283 /* Revert VLAN filter */ 2284 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status); 2285 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ, 2286 BNA_STATUS_T_DISABLED); 2287 return 1; 2288 } 2289 2290 return 0; 2291} 2292 2293int 2294rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf) 2295{ 2296 /* 10. Execute pending allmulti mode disable command */ 2297 if (is_allmulti_disable(rxf->rxmode_pending, 2298 rxf->rxmode_pending_bitmask)) { 2299 /* move allmulti configuration from pending -> active */ 2300 allmulti_inactive(rxf->rxmode_pending, 2301 rxf->rxmode_pending_bitmask); 2302 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; 2303 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ, 2304 BNA_STATUS_T_DISABLED); 2305 return 1; 2306 } 2307 2308 /* 11. Clear active allmulti mode; move it to pending enable */ 2309 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { 2310 /* move allmulti configuration from active -> pending */ 2311 allmulti_enable(rxf->rxmode_pending, 2312 rxf->rxmode_pending_bitmask); 2313 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; 2314 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_FILTER_REQ, 2315 BNA_STATUS_T_DISABLED); 2316 return 1; 2317 } 2318 2319 return 0; 2320} 2321 2322void 2323rxf_reset_packet_filter_ucast(struct bna_rxf *rxf) 2324{ 2325 struct list_head *qe; 2326 struct bna_mac *mac; 2327 2328 /* 1. Move active ucast entries to pending_add_q */ 2329 while (!list_empty(&rxf->ucast_active_q)) { 2330 bfa_q_deq(&rxf->ucast_active_q, &qe); 2331 bfa_q_qe_init(qe); 2332 list_add_tail(qe, &rxf->ucast_pending_add_q); 2333 } 2334 2335 /* 2. Throw away delete pending ucast entries */ 2336 while (!list_empty(&rxf->ucast_pending_del_q)) { 2337 bfa_q_deq(&rxf->ucast_pending_del_q, &qe); 2338 bfa_q_qe_init(qe); 2339 mac = (struct bna_mac *)qe; 2340 bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac); 2341 } 2342} 2343 2344void 2345rxf_reset_packet_filter_promisc(struct bna_rxf *rxf) 2346{ 2347 struct bna *bna = rxf->rx->bna; 2348 2349 /* 6. Clear pending promisc mode disable */ 2350 if (is_promisc_disable(rxf->rxmode_pending, 2351 rxf->rxmode_pending_bitmask)) { 2352 promisc_inactive(rxf->rxmode_pending, 2353 rxf->rxmode_pending_bitmask); 2354 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; 2355 bna->rxf_promisc_id = BFI_MAX_RXF; 2356 } 2357 2358 /* 7. Move promisc mode config from active -> pending */ 2359 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { 2360 promisc_enable(rxf->rxmode_pending, 2361 rxf->rxmode_pending_bitmask); 2362 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; 2363 } 2364 2365} 2366 2367void 2368rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf) 2369{ 2370 /* 10. Clear pending allmulti mode disable */ 2371 if (is_allmulti_disable(rxf->rxmode_pending, 2372 rxf->rxmode_pending_bitmask)) { 2373 allmulti_inactive(rxf->rxmode_pending, 2374 rxf->rxmode_pending_bitmask); 2375 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; 2376 } 2377 2378 /* 11. Move allmulti mode config from active -> pending */ 2379 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { 2380 allmulti_enable(rxf->rxmode_pending, 2381 rxf->rxmode_pending_bitmask); 2382 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; 2383 } 2384} 2385 2386/** 2387 * Should only be called by bna_rxf_mode_set. 2388 * Helps deciding if h/w configuration is needed or not. 2389 * Returns: 2390 * 0 = no h/w change 2391 * 1 = need h/w change 2392 */ 2393static int 2394rxf_promisc_enable(struct bna_rxf *rxf) 2395{ 2396 struct bna *bna = rxf->rx->bna; 2397 int ret = 0; 2398 2399 /* There can not be any pending disable command */ 2400 2401 /* Do nothing if pending enable or already enabled */ 2402 if (is_promisc_enable(rxf->rxmode_pending, 2403 rxf->rxmode_pending_bitmask) || 2404 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) { 2405 /* Schedule enable */ 2406 } else { 2407 /* Promisc mode should not be active in the system */ 2408 promisc_enable(rxf->rxmode_pending, 2409 rxf->rxmode_pending_bitmask); 2410 bna->rxf_promisc_id = rxf->rxf_id; 2411 ret = 1; 2412 } 2413 2414 return ret; 2415} 2416 2417/** 2418 * Should only be called by bna_rxf_mode_set. 2419 * Helps deciding if h/w configuration is needed or not. 2420 * Returns: 2421 * 0 = no h/w change 2422 * 1 = need h/w change 2423 */ 2424static int 2425rxf_promisc_disable(struct bna_rxf *rxf) 2426{ 2427 struct bna *bna = rxf->rx->bna; 2428 int ret = 0; 2429 2430 /* There can not be any pending disable */ 2431 2432 /* Turn off pending enable command , if any */ 2433 if (is_promisc_enable(rxf->rxmode_pending, 2434 rxf->rxmode_pending_bitmask)) { 2435 /* Promisc mode should not be active */ 2436 /* system promisc state should be pending */ 2437 promisc_inactive(rxf->rxmode_pending, 2438 rxf->rxmode_pending_bitmask); 2439 /* Remove the promisc state from the system */ 2440 bna->rxf_promisc_id = BFI_MAX_RXF; 2441 2442 /* Schedule disable */ 2443 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { 2444 /* Promisc mode should be active in the system */ 2445 promisc_disable(rxf->rxmode_pending, 2446 rxf->rxmode_pending_bitmask); 2447 ret = 1; 2448 2449 /* Do nothing if already disabled */ 2450 } else { 2451 } 2452 2453 return ret; 2454} 2455 2456/** 2457 * Should only be called by bna_rxf_mode_set. 2458 * Helps deciding if h/w configuration is needed or not. 2459 * Returns: 2460 * 0 = no h/w change 2461 * 1 = need h/w change 2462 */ 2463static int 2464rxf_allmulti_enable(struct bna_rxf *rxf) 2465{ 2466 int ret = 0; 2467 2468 /* There can not be any pending disable command */ 2469 2470 /* Do nothing if pending enable or already enabled */ 2471 if (is_allmulti_enable(rxf->rxmode_pending, 2472 rxf->rxmode_pending_bitmask) || 2473 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) { 2474 /* Schedule enable */ 2475 } else { 2476 allmulti_enable(rxf->rxmode_pending, 2477 rxf->rxmode_pending_bitmask); 2478 ret = 1; 2479 } 2480 2481 return ret; 2482} 2483 2484/** 2485 * Should only be called by bna_rxf_mode_set. 2486 * Helps deciding if h/w configuration is needed or not. 2487 * Returns: 2488 * 0 = no h/w change 2489 * 1 = need h/w change 2490 */ 2491static int 2492rxf_allmulti_disable(struct bna_rxf *rxf) 2493{ 2494 int ret = 0; 2495 2496 /* There can not be any pending disable */ 2497 2498 /* Turn off pending enable command , if any */ 2499 if (is_allmulti_enable(rxf->rxmode_pending, 2500 rxf->rxmode_pending_bitmask)) { 2501 /* Allmulti mode should not be active */ 2502 allmulti_inactive(rxf->rxmode_pending, 2503 rxf->rxmode_pending_bitmask); 2504 2505 /* Schedule disable */ 2506 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { 2507 allmulti_disable(rxf->rxmode_pending, 2508 rxf->rxmode_pending_bitmask); 2509 ret = 1; 2510 } 2511 2512 return ret; 2513} 2514 2515/* RxF <- bnad */ 2516enum bna_cb_status 2517bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, 2518 enum bna_rxmode bitmask, 2519 void (*cbfn)(struct bnad *, struct bna_rx *, 2520 enum bna_cb_status)) 2521{ 2522 struct bna_rxf *rxf = &rx->rxf; 2523 int need_hw_config = 0; 2524 2525 /* Process the commands */ 2526 2527 if (is_promisc_enable(new_mode, bitmask)) { 2528 /* If promisc mode is already enabled elsewhere in the system */ 2529 if ((rx->bna->rxf_promisc_id != BFI_MAX_RXF) && 2530 (rx->bna->rxf_promisc_id != rxf->rxf_id)) 2531 goto err_return; 2532 if (rxf_promisc_enable(rxf)) 2533 need_hw_config = 1; 2534 } else if (is_promisc_disable(new_mode, bitmask)) { 2535 if (rxf_promisc_disable(rxf)) 2536 need_hw_config = 1; 2537 } 2538 2539 if (is_allmulti_enable(new_mode, bitmask)) { 2540 if (rxf_allmulti_enable(rxf)) 2541 need_hw_config = 1; 2542 } else if (is_allmulti_disable(new_mode, bitmask)) { 2543 if (rxf_allmulti_disable(rxf)) 2544 need_hw_config = 1; 2545 } 2546 2547 /* Trigger h/w if needed */ 2548 2549 if (need_hw_config) { 2550 rxf->cam_fltr_cbfn = cbfn; 2551 rxf->cam_fltr_cbarg = rx->bna->bnad; 2552 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); 2553 } else if (cbfn) 2554 (*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS); 2555 2556 return BNA_CB_SUCCESS; 2557 2558err_return: 2559 return BNA_CB_FAIL; 2560} 2561 2562void 2563/* RxF <- bnad */ 2564bna_rx_vlanfilter_enable(struct bna_rx *rx) 2565{ 2566 struct bna_rxf *rxf = &rx->rxf; 2567 2568 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) { 2569 rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING; 2570 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED; 2571 bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD); 2572 } 2573} 2574 2575/* Rx */ 2576 2577/* Rx <- bnad */ 2578void 2579bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo) 2580{ 2581 struct bna_rxp *rxp; 2582 struct list_head *qe; 2583 2584 list_for_each(qe, &rx->rxp_q) { 2585 rxp = (struct bna_rxp *)qe; 2586 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo; 2587 bna_ib_coalescing_timeo_set(rxp->cq.ib, coalescing_timeo); 2588 } 2589} 2590 2591/* Rx <- bnad */ 2592void 2593bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]) 2594{ 2595 int i, j; 2596 2597 for (i = 0; i < BNA_LOAD_T_MAX; i++) 2598 for (j = 0; j < BNA_BIAS_T_MAX; j++) 2599 bna->rx_mod.dim_vector[i][j] = vector[i][j]; 2600} 2601 2602/* Rx <- bnad */ 2603void 2604bna_rx_dim_update(struct bna_ccb *ccb) 2605{ 2606 struct bna *bna = ccb->cq->rx->bna; 2607 u32 load, bias; 2608 u32 pkt_rt, small_rt, large_rt; 2609 u8 coalescing_timeo; 2610 2611 if ((ccb->pkt_rate.small_pkt_cnt == 0) && 2612 (ccb->pkt_rate.large_pkt_cnt == 0)) 2613 return; 2614 2615 /* Arrive at preconfigured coalescing timeo value based on pkt rate */ 2616 2617 small_rt = ccb->pkt_rate.small_pkt_cnt; 2618 large_rt = ccb->pkt_rate.large_pkt_cnt; 2619 2620 pkt_rt = small_rt + large_rt; 2621 2622 if (pkt_rt < BNA_PKT_RATE_10K) 2623 load = BNA_LOAD_T_LOW_4; 2624 else if (pkt_rt < BNA_PKT_RATE_20K) 2625 load = BNA_LOAD_T_LOW_3; 2626 else if (pkt_rt < BNA_PKT_RATE_30K) 2627 load = BNA_LOAD_T_LOW_2; 2628 else if (pkt_rt < BNA_PKT_RATE_40K) 2629 load = BNA_LOAD_T_LOW_1; 2630 else if (pkt_rt < BNA_PKT_RATE_50K) 2631 load = BNA_LOAD_T_HIGH_1; 2632 else if (pkt_rt < BNA_PKT_RATE_60K) 2633 load = BNA_LOAD_T_HIGH_2; 2634 else if (pkt_rt < BNA_PKT_RATE_80K) 2635 load = BNA_LOAD_T_HIGH_3; 2636 else 2637 load = BNA_LOAD_T_HIGH_4; 2638 2639 if (small_rt > (large_rt << 1)) 2640 bias = 0; 2641 else 2642 bias = 1; 2643 2644 ccb->pkt_rate.small_pkt_cnt = 0; 2645 ccb->pkt_rate.large_pkt_cnt = 0; 2646 2647 coalescing_timeo = bna->rx_mod.dim_vector[load][bias]; 2648 ccb->rx_coalescing_timeo = coalescing_timeo; 2649 2650 /* Set it to IB */ 2651 bna_ib_coalescing_timeo_set(ccb->cq->ib, coalescing_timeo); 2652} 2653 2654/* Tx */ 2655/* TX <- bnad */ 2656void 2657bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo) 2658{ 2659 struct bna_txq *txq; 2660 struct list_head *qe; 2661 2662 list_for_each(qe, &tx->txq_q) { 2663 txq = (struct bna_txq *)qe; 2664 bna_ib_coalescing_timeo_set(txq->ib, coalescing_timeo); 2665 } 2666} 2667 2668/* 2669 * Private data 2670 */ 2671 2672struct bna_ritseg_pool_cfg { 2673 u32 pool_size; 2674 u32 pool_entry_size; 2675}; 2676init_ritseg_pool(ritseg_pool_cfg); 2677 2678/* 2679 * Private functions 2680 */ 2681static void 2682bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna, 2683 struct bna_res_info *res_info) 2684{ 2685 int i; 2686 2687 ucam_mod->ucmac = (struct bna_mac *) 2688 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva; 2689 2690 INIT_LIST_HEAD(&ucam_mod->free_q); 2691 for (i = 0; i < BFI_MAX_UCMAC; i++) { 2692 bfa_q_qe_init(&ucam_mod->ucmac[i].qe); 2693 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q); 2694 } 2695 2696 ucam_mod->bna = bna; 2697} 2698 2699static void 2700bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod) 2701{ 2702 struct list_head *qe; 2703 int i = 0; 2704 2705 list_for_each(qe, &ucam_mod->free_q) 2706 i++; 2707 2708 ucam_mod->bna = NULL; 2709} 2710 2711static void 2712bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna, 2713 struct bna_res_info *res_info) 2714{ 2715 int i; 2716 2717 mcam_mod->mcmac = (struct bna_mac *) 2718 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva; 2719 2720 INIT_LIST_HEAD(&mcam_mod->free_q); 2721 for (i = 0; i < BFI_MAX_MCMAC; i++) { 2722 bfa_q_qe_init(&mcam_mod->mcmac[i].qe); 2723 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q); 2724 } 2725 2726 mcam_mod->bna = bna; 2727} 2728 2729static void 2730bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod) 2731{ 2732 struct list_head *qe; 2733 int i = 0; 2734 2735 list_for_each(qe, &mcam_mod->free_q) 2736 i++; 2737 2738 mcam_mod->bna = NULL; 2739} 2740 2741static void 2742bna_rit_mod_init(struct bna_rit_mod *rit_mod, 2743 struct bna_res_info *res_info) 2744{ 2745 int i; 2746 int j; 2747 int count; 2748 int offset; 2749 2750 rit_mod->rit = (struct bna_rit_entry *) 2751 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mdl[0].kva; 2752 rit_mod->rit_segment = (struct bna_rit_segment *) 2753 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mdl[0].kva; 2754 2755 count = 0; 2756 offset = 0; 2757 for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) { 2758 INIT_LIST_HEAD(&rit_mod->rit_seg_pool[i]); 2759 for (j = 0; j < ritseg_pool_cfg[i].pool_size; j++) { 2760 bfa_q_qe_init(&rit_mod->rit_segment[count].qe); 2761 rit_mod->rit_segment[count].max_rit_size = 2762 ritseg_pool_cfg[i].pool_entry_size; 2763 rit_mod->rit_segment[count].rit_offset = offset; 2764 rit_mod->rit_segment[count].rit = 2765 &rit_mod->rit[offset]; 2766 list_add_tail(&rit_mod->rit_segment[count].qe, 2767 &rit_mod->rit_seg_pool[i]); 2768 count++; 2769 offset += ritseg_pool_cfg[i].pool_entry_size; 2770 } 2771 } 2772} 2773 2774/* 2775 * Public functions 2776 */ 2777 2778/* Called during probe(), before calling bna_init() */ 2779void 2780bna_res_req(struct bna_res_info *res_info) 2781{ 2782 bna_adv_res_req(res_info); 2783 2784 /* DMA memory for retrieving IOC attributes */ 2785 res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM; 2786 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA; 2787 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1; 2788 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len = 2789 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE); 2790 2791 /* DMA memory for index segment of an IB */ 2792 res_info[BNA_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; 2793 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mem_type = BNA_MEM_T_DMA; 2794 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.len = 2795 BFI_IBIDX_SIZE * BFI_IBIDX_MAX_SEGSIZE; 2796 res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.num = BFI_MAX_IB; 2797 2798 /* Virtual memory for IB objects - stored by IB module */ 2799 res_info[BNA_RES_MEM_T_IB_ARRAY].res_type = BNA_RES_T_MEM; 2800 res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mem_type = 2801 BNA_MEM_T_KVA; 2802 res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.num = 1; 2803 res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.len = 2804 BFI_MAX_IB * sizeof(struct bna_ib); 2805 2806 /* Virtual memory for intr objects - stored by IB module */ 2807 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_type = BNA_RES_T_MEM; 2808 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mem_type = 2809 BNA_MEM_T_KVA; 2810 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.num = 1; 2811 res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.len = 2812 BFI_MAX_IB * sizeof(struct bna_intr); 2813 2814 /* Virtual memory for idx_seg objects - stored by IB module */ 2815 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_type = BNA_RES_T_MEM; 2816 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mem_type = 2817 BNA_MEM_T_KVA; 2818 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.num = 1; 2819 res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.len = 2820 BFI_IBIDX_TOTAL_SEGS * sizeof(struct bna_ibidx_seg); 2821 2822 /* Virtual memory for Tx objects - stored by Tx module */ 2823 res_info[BNA_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM; 2824 res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type = 2825 BNA_MEM_T_KVA; 2826 res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1; 2827 res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.len = 2828 BFI_MAX_TXQ * sizeof(struct bna_tx); 2829 2830 /* Virtual memory for TxQ - stored by Tx module */ 2831 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM; 2832 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type = 2833 BNA_MEM_T_KVA; 2834 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1; 2835 res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len = 2836 BFI_MAX_TXQ * sizeof(struct bna_txq); 2837 2838 /* Virtual memory for Rx objects - stored by Rx module */ 2839 res_info[BNA_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM; 2840 res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type = 2841 BNA_MEM_T_KVA; 2842 res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1; 2843 res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.len = 2844 BFI_MAX_RXQ * sizeof(struct bna_rx); 2845 2846 /* Virtual memory for RxPath - stored by Rx module */ 2847 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM; 2848 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type = 2849 BNA_MEM_T_KVA; 2850 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1; 2851 res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len = 2852 BFI_MAX_RXQ * sizeof(struct bna_rxp); 2853 2854 /* Virtual memory for RxQ - stored by Rx module */ 2855 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM; 2856 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type = 2857 BNA_MEM_T_KVA; 2858 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1; 2859 res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len = 2860 BFI_MAX_RXQ * sizeof(struct bna_rxq); 2861 2862 /* Virtual memory for Unicast MAC address - stored by ucam module */ 2863 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM; 2864 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type = 2865 BNA_MEM_T_KVA; 2866 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1; 2867 res_info[BNA_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len = 2868 BFI_MAX_UCMAC * sizeof(struct bna_mac); 2869 2870 /* Virtual memory for Multicast MAC address - stored by mcam module */ 2871 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM; 2872 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type = 2873 BNA_MEM_T_KVA; 2874 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1; 2875 res_info[BNA_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len = 2876 BFI_MAX_MCMAC * sizeof(struct bna_mac); 2877 2878 /* Virtual memory for RIT entries */ 2879 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_type = BNA_RES_T_MEM; 2880 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.mem_type = 2881 BNA_MEM_T_KVA; 2882 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.num = 1; 2883 res_info[BNA_RES_MEM_T_RIT_ENTRY].res_u.mem_info.len = 2884 BFI_MAX_RIT_SIZE * sizeof(struct bna_rit_entry); 2885 2886 /* Virtual memory for RIT segment table */ 2887 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_type = BNA_RES_T_MEM; 2888 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.mem_type = 2889 BNA_MEM_T_KVA; 2890 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.num = 1; 2891 res_info[BNA_RES_MEM_T_RIT_SEGMENT].res_u.mem_info.len = 2892 BFI_RIT_TOTAL_SEGS * sizeof(struct bna_rit_segment); 2893 2894 /* Interrupt resource for mailbox interrupt */ 2895 res_info[BNA_RES_INTR_T_MBOX].res_type = BNA_RES_T_INTR; 2896 res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.intr_type = 2897 BNA_INTR_T_MSIX; 2898 res_info[BNA_RES_INTR_T_MBOX].res_u.intr_info.num = 1; 2899} 2900 2901/* Called during probe() */ 2902void 2903bna_init(struct bna *bna, struct bnad *bnad, struct bfa_pcidev *pcidev, 2904 struct bna_res_info *res_info) 2905{ 2906 bna->bnad = bnad; 2907 bna->pcidev = *pcidev; 2908 2909 bna->stats.hw_stats = (struct bfi_ll_stats *) 2910 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva; 2911 bna->hw_stats_dma.msb = 2912 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb; 2913 bna->hw_stats_dma.lsb = 2914 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb; 2915 bna->stats.sw_stats = (struct bna_sw_stats *) 2916 res_info[BNA_RES_MEM_T_SWSTATS].res_u.mem_info.mdl[0].kva; 2917 2918 bna->regs.page_addr = bna->pcidev.pci_bar_kva + 2919 reg_offset[bna->pcidev.pci_func].page_addr; 2920 bna->regs.fn_int_status = bna->pcidev.pci_bar_kva + 2921 reg_offset[bna->pcidev.pci_func].fn_int_status; 2922 bna->regs.fn_int_mask = bna->pcidev.pci_bar_kva + 2923 reg_offset[bna->pcidev.pci_func].fn_int_mask; 2924 2925 if (bna->pcidev.pci_func < 3) 2926 bna->port_num = 0; 2927 else 2928 bna->port_num = 1; 2929 2930 /* Also initializes diag, cee, sfp, phy_port and mbox_mod */ 2931 bna_device_init(&bna->device, bna, res_info); 2932 2933 bna_port_init(&bna->port, bna); 2934 2935 bna_tx_mod_init(&bna->tx_mod, bna, res_info); 2936 2937 bna_rx_mod_init(&bna->rx_mod, bna, res_info); 2938 2939 bna_ib_mod_init(&bna->ib_mod, bna, res_info); 2940 2941 bna_rit_mod_init(&bna->rit_mod, res_info); 2942 2943 bna_ucam_mod_init(&bna->ucam_mod, bna, res_info); 2944 2945 bna_mcam_mod_init(&bna->mcam_mod, bna, res_info); 2946 2947 bna->rxf_promisc_id = BFI_MAX_RXF; 2948 2949 /* Mbox q element for posting stat request to f/w */ 2950 bfa_q_qe_init(&bna->mbox_qe.qe); 2951} 2952 2953void 2954bna_uninit(struct bna *bna) 2955{ 2956 bna_mcam_mod_uninit(&bna->mcam_mod); 2957 2958 bna_ucam_mod_uninit(&bna->ucam_mod); 2959 2960 bna_ib_mod_uninit(&bna->ib_mod); 2961 2962 bna_rx_mod_uninit(&bna->rx_mod); 2963 2964 bna_tx_mod_uninit(&bna->tx_mod); 2965 2966 bna_port_uninit(&bna->port); 2967 2968 bna_device_uninit(&bna->device); 2969 2970 bna->bnad = NULL; 2971} 2972 2973struct bna_mac * 2974bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod) 2975{ 2976 struct list_head *qe; 2977 2978 if (list_empty(&ucam_mod->free_q)) 2979 return NULL; 2980 2981 bfa_q_deq(&ucam_mod->free_q, &qe); 2982 2983 return (struct bna_mac *)qe; 2984} 2985 2986void 2987bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac) 2988{ 2989 list_add_tail(&mac->qe, &ucam_mod->free_q); 2990} 2991 2992struct bna_mac * 2993bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod) 2994{ 2995 struct list_head *qe; 2996 2997 if (list_empty(&mcam_mod->free_q)) 2998 return NULL; 2999 3000 bfa_q_deq(&mcam_mod->free_q, &qe); 3001 3002 return (struct bna_mac *)qe; 3003} 3004 3005void 3006bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac) 3007{ 3008 list_add_tail(&mac->qe, &mcam_mod->free_q); 3009} 3010 3011/** 3012 * Note: This should be called in the same locking context as the call to 3013 * bna_rit_mod_seg_get() 3014 */ 3015int 3016bna_rit_mod_can_satisfy(struct bna_rit_mod *rit_mod, int seg_size) 3017{ 3018 int i; 3019 3020 /* Select the pool for seg_size */ 3021 for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) { 3022 if (seg_size <= ritseg_pool_cfg[i].pool_entry_size) 3023 break; 3024 } 3025 3026 if (i == BFI_RIT_SEG_TOTAL_POOLS) 3027 return 0; 3028 3029 if (list_empty(&rit_mod->rit_seg_pool[i])) 3030 return 0; 3031 3032 return 1; 3033} 3034 3035struct bna_rit_segment * 3036bna_rit_mod_seg_get(struct bna_rit_mod *rit_mod, int seg_size) 3037{ 3038 struct bna_rit_segment *seg; 3039 struct list_head *qe; 3040 int i; 3041 3042 /* Select the pool for seg_size */ 3043 for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) { 3044 if (seg_size <= ritseg_pool_cfg[i].pool_entry_size) 3045 break; 3046 } 3047 3048 if (i == BFI_RIT_SEG_TOTAL_POOLS) 3049 return NULL; 3050 3051 if (list_empty(&rit_mod->rit_seg_pool[i])) 3052 return NULL; 3053 3054 bfa_q_deq(&rit_mod->rit_seg_pool[i], &qe); 3055 seg = (struct bna_rit_segment *)qe; 3056 bfa_q_qe_init(&seg->qe); 3057 seg->rit_size = seg_size; 3058 3059 return seg; 3060} 3061 3062void 3063bna_rit_mod_seg_put(struct bna_rit_mod *rit_mod, 3064 struct bna_rit_segment *seg) 3065{ 3066 int i; 3067 3068 /* Select the pool for seg->max_rit_size */ 3069 for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) { 3070 if (seg->max_rit_size == ritseg_pool_cfg[i].pool_entry_size) 3071 break; 3072 } 3073 3074 seg->rit_size = 0; 3075 list_add_tail(&seg->qe, &rit_mod->rit_seg_pool[i]); 3076}