Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.0-rc7 3288 lines 81 kB view raw
1/* 2 * Linux network driver for Brocade Converged Network Adapter. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License (GPL) Version 2 as 6 * published by the Free Software Foundation 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 */ 13/* 14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. 15 * All rights reserved 16 * www.brocade.com 17 */ 18#include <linux/netdevice.h> 19#include <linux/skbuff.h> 20#include <linux/etherdevice.h> 21#include <linux/in.h> 22#include <linux/ethtool.h> 23#include <linux/if_vlan.h> 24#include <linux/if_ether.h> 25#include <linux/ip.h> 26#include <linux/prefetch.h> 27 28#include "bnad.h" 29#include "bna.h" 30#include "cna.h" 31 32static DEFINE_MUTEX(bnad_fwimg_mutex); 33 34/* 35 * Module params 36 */ 37static uint bnad_msix_disable; 38module_param(bnad_msix_disable, uint, 0444); 39MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode"); 40 41static uint bnad_ioc_auto_recover = 1; 42module_param(bnad_ioc_auto_recover, uint, 0444); 43MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery"); 44 45/* 46 * Global variables 47 */ 48u32 bnad_rxqs_per_cq = 2; 49 50static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 51 52/* 53 * Local MACROS 54 */ 55#define BNAD_TX_UNMAPQ_DEPTH (bnad->txq_depth * 2) 56 57#define BNAD_RX_UNMAPQ_DEPTH (bnad->rxq_depth) 58 59#define BNAD_GET_MBOX_IRQ(_bnad) \ 60 (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \ 61 ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \ 62 ((_bnad)->pcidev->irq)) 63 64#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \ 65do { \ 66 (_res_info)->res_type = BNA_RES_T_MEM; \ 67 (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \ 68 (_res_info)->res_u.mem_info.num = (_num); \ 69 (_res_info)->res_u.mem_info.len = \ 70 sizeof(struct bnad_unmap_q) + \ 71 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \ 72} while (0) 73 74#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */ 75 76/* 77 * Reinitialize completions in CQ, once Rx is taken down 78 */ 79static void 80bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb) 81{ 82 struct bna_cq_entry *cmpl, *next_cmpl; 83 unsigned int wi_range, wis = 0, ccb_prod = 0; 84 int i; 85 86 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, cmpl, 87 wi_range); 88 89 for (i = 0; i < ccb->q_depth; i++) { 90 wis++; 91 if (likely(--wi_range)) 92 next_cmpl = cmpl + 1; 93 else { 94 BNA_QE_INDX_ADD(ccb_prod, wis, ccb->q_depth); 95 wis = 0; 96 BNA_CQ_QPGE_PTR_GET(ccb_prod, ccb->sw_qpt, 97 next_cmpl, wi_range); 98 } 99 cmpl->valid = 0; 100 cmpl = next_cmpl; 101 } 102} 103 104/* 105 * Frees all pending Tx Bufs 106 * At this point no activity is expected on the Q, 107 * so DMA unmap & freeing is fine. 108 */ 109static void 110bnad_free_all_txbufs(struct bnad *bnad, 111 struct bna_tcb *tcb) 112{ 113 u32 unmap_cons; 114 struct bnad_unmap_q *unmap_q = tcb->unmap_q; 115 struct bnad_skb_unmap *unmap_array; 116 struct sk_buff *skb = NULL; 117 int i; 118 119 unmap_array = unmap_q->unmap_array; 120 121 unmap_cons = 0; 122 while (unmap_cons < unmap_q->q_depth) { 123 skb = unmap_array[unmap_cons].skb; 124 if (!skb) { 125 unmap_cons++; 126 continue; 127 } 128 unmap_array[unmap_cons].skb = NULL; 129 130 dma_unmap_single(&bnad->pcidev->dev, 131 dma_unmap_addr(&unmap_array[unmap_cons], 132 dma_addr), skb_headlen(skb), 133 DMA_TO_DEVICE); 134 135 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); 136 if (++unmap_cons >= unmap_q->q_depth) 137 break; 138 139 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 140 dma_unmap_page(&bnad->pcidev->dev, 141 dma_unmap_addr(&unmap_array[unmap_cons], 142 dma_addr), 143 skb_shinfo(skb)->frags[i].size, 144 DMA_TO_DEVICE); 145 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 146 0); 147 if (++unmap_cons >= unmap_q->q_depth) 148 break; 149 } 150 dev_kfree_skb_any(skb); 151 } 152} 153 154/* Data Path Handlers */ 155 156/* 157 * bnad_free_txbufs : Frees the Tx bufs on Tx completion 158 * Can be called in a) Interrupt context 159 * b) Sending context 160 * c) Tasklet context 161 */ 162static u32 163bnad_free_txbufs(struct bnad *bnad, 164 struct bna_tcb *tcb) 165{ 166 u32 sent_packets = 0, sent_bytes = 0; 167 u16 wis, unmap_cons, updated_hw_cons; 168 struct bnad_unmap_q *unmap_q = tcb->unmap_q; 169 struct bnad_skb_unmap *unmap_array; 170 struct sk_buff *skb; 171 int i; 172 173 /* 174 * Just return if TX is stopped. This check is useful 175 * when bnad_free_txbufs() runs out of a tasklet scheduled 176 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit 177 * but this routine runs actually after the cleanup has been 178 * executed. 179 */ 180 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) 181 return 0; 182 183 updated_hw_cons = *(tcb->hw_consumer_index); 184 185 wis = BNA_Q_INDEX_CHANGE(tcb->consumer_index, 186 updated_hw_cons, tcb->q_depth); 187 188 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth))); 189 190 unmap_array = unmap_q->unmap_array; 191 unmap_cons = unmap_q->consumer_index; 192 193 prefetch(&unmap_array[unmap_cons + 1]); 194 while (wis) { 195 skb = unmap_array[unmap_cons].skb; 196 197 unmap_array[unmap_cons].skb = NULL; 198 199 sent_packets++; 200 sent_bytes += skb->len; 201 wis -= BNA_TXQ_WI_NEEDED(1 + skb_shinfo(skb)->nr_frags); 202 203 dma_unmap_single(&bnad->pcidev->dev, 204 dma_unmap_addr(&unmap_array[unmap_cons], 205 dma_addr), skb_headlen(skb), 206 DMA_TO_DEVICE); 207 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); 208 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); 209 210 prefetch(&unmap_array[unmap_cons + 1]); 211 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 212 prefetch(&unmap_array[unmap_cons + 1]); 213 214 dma_unmap_page(&bnad->pcidev->dev, 215 dma_unmap_addr(&unmap_array[unmap_cons], 216 dma_addr), 217 skb_shinfo(skb)->frags[i].size, 218 DMA_TO_DEVICE); 219 dma_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 220 0); 221 BNA_QE_INDX_ADD(unmap_cons, 1, unmap_q->q_depth); 222 } 223 dev_kfree_skb_any(skb); 224 } 225 226 /* Update consumer pointers. */ 227 tcb->consumer_index = updated_hw_cons; 228 unmap_q->consumer_index = unmap_cons; 229 230 tcb->txq->tx_packets += sent_packets; 231 tcb->txq->tx_bytes += sent_bytes; 232 233 return sent_packets; 234} 235 236/* Tx Free Tasklet function */ 237/* Frees for all the tcb's in all the Tx's */ 238/* 239 * Scheduled from sending context, so that 240 * the fat Tx lock is not held for too long 241 * in the sending context. 242 */ 243static void 244bnad_tx_free_tasklet(unsigned long bnad_ptr) 245{ 246 struct bnad *bnad = (struct bnad *)bnad_ptr; 247 struct bna_tcb *tcb; 248 u32 acked = 0; 249 int i, j; 250 251 for (i = 0; i < bnad->num_tx; i++) { 252 for (j = 0; j < bnad->num_txq_per_tx; j++) { 253 tcb = bnad->tx_info[i].tcb[j]; 254 if (!tcb) 255 continue; 256 if (((u16) (*tcb->hw_consumer_index) != 257 tcb->consumer_index) && 258 (!test_and_set_bit(BNAD_TXQ_FREE_SENT, 259 &tcb->flags))) { 260 acked = bnad_free_txbufs(bnad, tcb); 261 if (likely(test_bit(BNAD_TXQ_TX_STARTED, 262 &tcb->flags))) 263 bna_ib_ack(tcb->i_dbell, acked); 264 smp_mb__before_clear_bit(); 265 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 266 } 267 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, 268 &tcb->flags))) 269 continue; 270 if (netif_queue_stopped(bnad->netdev)) { 271 if (acked && netif_carrier_ok(bnad->netdev) && 272 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= 273 BNAD_NETIF_WAKE_THRESHOLD) { 274 netif_wake_queue(bnad->netdev); 275 /* TODO */ 276 /* Counters for individual TxQs? */ 277 BNAD_UPDATE_CTR(bnad, 278 netif_queue_wakeup); 279 } 280 } 281 } 282 } 283} 284 285static u32 286bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) 287{ 288 struct net_device *netdev = bnad->netdev; 289 u32 sent = 0; 290 291 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) 292 return 0; 293 294 sent = bnad_free_txbufs(bnad, tcb); 295 if (sent) { 296 if (netif_queue_stopped(netdev) && 297 netif_carrier_ok(netdev) && 298 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= 299 BNAD_NETIF_WAKE_THRESHOLD) { 300 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { 301 netif_wake_queue(netdev); 302 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); 303 } 304 } 305 } 306 307 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) 308 bna_ib_ack(tcb->i_dbell, sent); 309 310 smp_mb__before_clear_bit(); 311 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 312 313 return sent; 314} 315 316/* MSIX Tx Completion Handler */ 317static irqreturn_t 318bnad_msix_tx(int irq, void *data) 319{ 320 struct bna_tcb *tcb = (struct bna_tcb *)data; 321 struct bnad *bnad = tcb->bnad; 322 323 bnad_tx(bnad, tcb); 324 325 return IRQ_HANDLED; 326} 327 328static void 329bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb) 330{ 331 struct bnad_unmap_q *unmap_q = rcb->unmap_q; 332 333 rcb->producer_index = 0; 334 rcb->consumer_index = 0; 335 336 unmap_q->producer_index = 0; 337 unmap_q->consumer_index = 0; 338} 339 340static void 341bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) 342{ 343 struct bnad_unmap_q *unmap_q; 344 struct bnad_skb_unmap *unmap_array; 345 struct sk_buff *skb; 346 int unmap_cons; 347 348 unmap_q = rcb->unmap_q; 349 unmap_array = unmap_q->unmap_array; 350 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) { 351 skb = unmap_array[unmap_cons].skb; 352 if (!skb) 353 continue; 354 unmap_array[unmap_cons].skb = NULL; 355 dma_unmap_single(&bnad->pcidev->dev, 356 dma_unmap_addr(&unmap_array[unmap_cons], 357 dma_addr), 358 rcb->rxq->buffer_size, 359 DMA_FROM_DEVICE); 360 dev_kfree_skb(skb); 361 } 362 bnad_reset_rcb(bnad, rcb); 363} 364 365static void 366bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) 367{ 368 u16 to_alloc, alloced, unmap_prod, wi_range; 369 struct bnad_unmap_q *unmap_q = rcb->unmap_q; 370 struct bnad_skb_unmap *unmap_array; 371 struct bna_rxq_entry *rxent; 372 struct sk_buff *skb; 373 dma_addr_t dma_addr; 374 375 alloced = 0; 376 to_alloc = 377 BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth); 378 379 unmap_array = unmap_q->unmap_array; 380 unmap_prod = unmap_q->producer_index; 381 382 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, wi_range); 383 384 while (to_alloc--) { 385 if (!wi_range) { 386 BNA_RXQ_QPGE_PTR_GET(unmap_prod, rcb->sw_qpt, rxent, 387 wi_range); 388 } 389 skb = alloc_skb(rcb->rxq->buffer_size + NET_IP_ALIGN, 390 GFP_ATOMIC); 391 if (unlikely(!skb)) { 392 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); 393 goto finishing; 394 } 395 skb->dev = bnad->netdev; 396 skb_reserve(skb, NET_IP_ALIGN); 397 unmap_array[unmap_prod].skb = skb; 398 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, 399 rcb->rxq->buffer_size, 400 DMA_FROM_DEVICE); 401 dma_unmap_addr_set(&unmap_array[unmap_prod], dma_addr, 402 dma_addr); 403 BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); 404 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); 405 406 rxent++; 407 wi_range--; 408 alloced++; 409 } 410 411finishing: 412 if (likely(alloced)) { 413 unmap_q->producer_index = unmap_prod; 414 rcb->producer_index = unmap_prod; 415 smp_mb(); 416 if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags))) 417 bna_rxq_prod_indx_doorbell(rcb); 418 } 419} 420 421static inline void 422bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb) 423{ 424 struct bnad_unmap_q *unmap_q = rcb->unmap_q; 425 426 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { 427 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) 428 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) 429 bnad_alloc_n_post_rxbufs(bnad, rcb); 430 smp_mb__before_clear_bit(); 431 clear_bit(BNAD_RXQ_REFILL, &rcb->flags); 432 } 433} 434 435static u32 436bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) 437{ 438 struct bna_cq_entry *cmpl, *next_cmpl; 439 struct bna_rcb *rcb = NULL; 440 unsigned int wi_range, packets = 0, wis = 0; 441 struct bnad_unmap_q *unmap_q; 442 struct bnad_skb_unmap *unmap_array; 443 struct sk_buff *skb; 444 u32 flags, unmap_cons; 445 u32 qid0 = ccb->rcb[0]->rxq->rxq_id; 446 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; 447 448 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) 449 return 0; 450 451 prefetch(bnad->netdev); 452 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, 453 wi_range); 454 BUG_ON(!(wi_range <= ccb->q_depth)); 455 while (cmpl->valid && packets < budget) { 456 packets++; 457 BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length)); 458 459 if (qid0 == cmpl->rxq_id) 460 rcb = ccb->rcb[0]; 461 else 462 rcb = ccb->rcb[1]; 463 464 unmap_q = rcb->unmap_q; 465 unmap_array = unmap_q->unmap_array; 466 unmap_cons = unmap_q->consumer_index; 467 468 skb = unmap_array[unmap_cons].skb; 469 BUG_ON(!(skb)); 470 unmap_array[unmap_cons].skb = NULL; 471 dma_unmap_single(&bnad->pcidev->dev, 472 dma_unmap_addr(&unmap_array[unmap_cons], 473 dma_addr), 474 rcb->rxq->buffer_size, 475 DMA_FROM_DEVICE); 476 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); 477 478 /* Should be more efficient ? Performance ? */ 479 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth); 480 481 wis++; 482 if (likely(--wi_range)) 483 next_cmpl = cmpl + 1; 484 else { 485 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); 486 wis = 0; 487 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, 488 next_cmpl, wi_range); 489 BUG_ON(!(wi_range <= ccb->q_depth)); 490 } 491 prefetch(next_cmpl); 492 493 flags = ntohl(cmpl->flags); 494 if (unlikely 495 (flags & 496 (BNA_CQ_EF_MAC_ERROR | BNA_CQ_EF_FCS_ERROR | 497 BNA_CQ_EF_TOO_LONG))) { 498 dev_kfree_skb_any(skb); 499 rcb->rxq->rx_packets_with_error++; 500 goto next; 501 } 502 503 skb_put(skb, ntohs(cmpl->length)); 504 if (likely 505 ((bnad->netdev->features & NETIF_F_RXCSUM) && 506 (((flags & BNA_CQ_EF_IPV4) && 507 (flags & BNA_CQ_EF_L3_CKSUM_OK)) || 508 (flags & BNA_CQ_EF_IPV6)) && 509 (flags & (BNA_CQ_EF_TCP | BNA_CQ_EF_UDP)) && 510 (flags & BNA_CQ_EF_L4_CKSUM_OK))) 511 skb->ip_summed = CHECKSUM_UNNECESSARY; 512 else 513 skb_checksum_none_assert(skb); 514 515 rcb->rxq->rx_packets++; 516 rcb->rxq->rx_bytes += skb->len; 517 skb->protocol = eth_type_trans(skb, bnad->netdev); 518 519 if (bnad->vlan_grp && (flags & BNA_CQ_EF_VLAN)) { 520 struct bnad_rx_ctrl *rx_ctrl = 521 (struct bnad_rx_ctrl *)ccb->ctrl; 522 if (skb->ip_summed == CHECKSUM_UNNECESSARY) 523 vlan_gro_receive(&rx_ctrl->napi, bnad->vlan_grp, 524 ntohs(cmpl->vlan_tag), skb); 525 else 526 vlan_hwaccel_receive_skb(skb, 527 bnad->vlan_grp, 528 ntohs(cmpl->vlan_tag)); 529 530 } else { /* Not VLAN tagged/stripped */ 531 struct bnad_rx_ctrl *rx_ctrl = 532 (struct bnad_rx_ctrl *)ccb->ctrl; 533 if (skb->ip_summed == CHECKSUM_UNNECESSARY) 534 napi_gro_receive(&rx_ctrl->napi, skb); 535 else 536 netif_receive_skb(skb); 537 } 538 539next: 540 cmpl->valid = 0; 541 cmpl = next_cmpl; 542 } 543 544 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); 545 546 if (likely(ccb)) { 547 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) 548 bna_ib_ack(ccb->i_dbell, packets); 549 bnad_refill_rxq(bnad, ccb->rcb[0]); 550 if (ccb->rcb[1]) 551 bnad_refill_rxq(bnad, ccb->rcb[1]); 552 } else { 553 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) 554 bna_ib_ack(ccb->i_dbell, 0); 555 } 556 557 return packets; 558} 559 560static void 561bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) 562{ 563 if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) 564 return; 565 566 bna_ib_coalescing_timer_set(ccb->i_dbell, 0); 567 bna_ib_ack(ccb->i_dbell, 0); 568} 569 570static void 571bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) 572{ 573 unsigned long flags; 574 575 /* Because of polling context */ 576 spin_lock_irqsave(&bnad->bna_lock, flags); 577 bnad_enable_rx_irq_unsafe(ccb); 578 spin_unlock_irqrestore(&bnad->bna_lock, flags); 579} 580 581static void 582bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) 583{ 584 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); 585 struct napi_struct *napi = &rx_ctrl->napi; 586 587 if (likely(napi_schedule_prep(napi))) { 588 bnad_disable_rx_irq(bnad, ccb); 589 __napi_schedule(napi); 590 } 591 BNAD_UPDATE_CTR(bnad, netif_rx_schedule); 592} 593 594/* MSIX Rx Path Handler */ 595static irqreturn_t 596bnad_msix_rx(int irq, void *data) 597{ 598 struct bna_ccb *ccb = (struct bna_ccb *)data; 599 struct bnad *bnad = ccb->bnad; 600 601 bnad_netif_rx_schedule_poll(bnad, ccb); 602 603 return IRQ_HANDLED; 604} 605 606/* Interrupt handlers */ 607 608/* Mbox Interrupt Handlers */ 609static irqreturn_t 610bnad_msix_mbox_handler(int irq, void *data) 611{ 612 u32 intr_status; 613 unsigned long flags; 614 struct bnad *bnad = (struct bnad *)data; 615 616 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) 617 return IRQ_HANDLED; 618 619 spin_lock_irqsave(&bnad->bna_lock, flags); 620 621 bna_intr_status_get(&bnad->bna, intr_status); 622 623 if (BNA_IS_MBOX_ERR_INTR(intr_status)) 624 bna_mbox_handler(&bnad->bna, intr_status); 625 626 spin_unlock_irqrestore(&bnad->bna_lock, flags); 627 628 return IRQ_HANDLED; 629} 630 631static irqreturn_t 632bnad_isr(int irq, void *data) 633{ 634 int i, j; 635 u32 intr_status; 636 unsigned long flags; 637 struct bnad *bnad = (struct bnad *)data; 638 struct bnad_rx_info *rx_info; 639 struct bnad_rx_ctrl *rx_ctrl; 640 641 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) 642 return IRQ_NONE; 643 644 bna_intr_status_get(&bnad->bna, intr_status); 645 646 if (unlikely(!intr_status)) 647 return IRQ_NONE; 648 649 spin_lock_irqsave(&bnad->bna_lock, flags); 650 651 if (BNA_IS_MBOX_ERR_INTR(intr_status)) 652 bna_mbox_handler(&bnad->bna, intr_status); 653 654 spin_unlock_irqrestore(&bnad->bna_lock, flags); 655 656 if (!BNA_IS_INTX_DATA_INTR(intr_status)) 657 return IRQ_HANDLED; 658 659 /* Process data interrupts */ 660 /* Tx processing */ 661 for (i = 0; i < bnad->num_tx; i++) { 662 for (j = 0; j < bnad->num_txq_per_tx; j++) 663 bnad_tx(bnad, bnad->tx_info[i].tcb[j]); 664 } 665 /* Rx processing */ 666 for (i = 0; i < bnad->num_rx; i++) { 667 rx_info = &bnad->rx_info[i]; 668 if (!rx_info->rx) 669 continue; 670 for (j = 0; j < bnad->num_rxp_per_rx; j++) { 671 rx_ctrl = &rx_info->rx_ctrl[j]; 672 if (rx_ctrl->ccb) 673 bnad_netif_rx_schedule_poll(bnad, 674 rx_ctrl->ccb); 675 } 676 } 677 return IRQ_HANDLED; 678} 679 680/* 681 * Called in interrupt / callback context 682 * with bna_lock held, so cfg_flags access is OK 683 */ 684static void 685bnad_enable_mbox_irq(struct bnad *bnad) 686{ 687 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); 688 689 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); 690} 691 692/* 693 * Called with bnad->bna_lock held b'cos of 694 * bnad->cfg_flags access. 695 */ 696static void 697bnad_disable_mbox_irq(struct bnad *bnad) 698{ 699 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); 700 701 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); 702} 703 704static void 705bnad_set_netdev_perm_addr(struct bnad *bnad) 706{ 707 struct net_device *netdev = bnad->netdev; 708 709 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len); 710 if (is_zero_ether_addr(netdev->dev_addr)) 711 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len); 712} 713 714/* Control Path Handlers */ 715 716/* Callbacks */ 717void 718bnad_cb_device_enable_mbox_intr(struct bnad *bnad) 719{ 720 bnad_enable_mbox_irq(bnad); 721} 722 723void 724bnad_cb_device_disable_mbox_intr(struct bnad *bnad) 725{ 726 bnad_disable_mbox_irq(bnad); 727} 728 729void 730bnad_cb_device_enabled(struct bnad *bnad, enum bna_cb_status status) 731{ 732 complete(&bnad->bnad_completions.ioc_comp); 733 bnad->bnad_completions.ioc_comp_status = status; 734} 735 736void 737bnad_cb_device_disabled(struct bnad *bnad, enum bna_cb_status status) 738{ 739 complete(&bnad->bnad_completions.ioc_comp); 740 bnad->bnad_completions.ioc_comp_status = status; 741} 742 743static void 744bnad_cb_port_disabled(void *arg, enum bna_cb_status status) 745{ 746 struct bnad *bnad = (struct bnad *)arg; 747 748 complete(&bnad->bnad_completions.port_comp); 749 750 netif_carrier_off(bnad->netdev); 751} 752 753void 754bnad_cb_port_link_status(struct bnad *bnad, 755 enum bna_link_status link_status) 756{ 757 bool link_up = 0; 758 759 link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP); 760 761 if (link_status == BNA_CEE_UP) { 762 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); 763 BNAD_UPDATE_CTR(bnad, cee_up); 764 } else 765 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); 766 767 if (link_up) { 768 if (!netif_carrier_ok(bnad->netdev)) { 769 struct bna_tcb *tcb = bnad->tx_info[0].tcb[0]; 770 if (!tcb) 771 return; 772 pr_warn("bna: %s link up\n", 773 bnad->netdev->name); 774 netif_carrier_on(bnad->netdev); 775 BNAD_UPDATE_CTR(bnad, link_toggle); 776 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { 777 /* Force an immediate Transmit Schedule */ 778 pr_info("bna: %s TX_STARTED\n", 779 bnad->netdev->name); 780 netif_wake_queue(bnad->netdev); 781 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); 782 } else { 783 netif_stop_queue(bnad->netdev); 784 BNAD_UPDATE_CTR(bnad, netif_queue_stop); 785 } 786 } 787 } else { 788 if (netif_carrier_ok(bnad->netdev)) { 789 pr_warn("bna: %s link down\n", 790 bnad->netdev->name); 791 netif_carrier_off(bnad->netdev); 792 BNAD_UPDATE_CTR(bnad, link_toggle); 793 } 794 } 795} 796 797static void 798bnad_cb_tx_disabled(void *arg, struct bna_tx *tx, 799 enum bna_cb_status status) 800{ 801 struct bnad *bnad = (struct bnad *)arg; 802 803 complete(&bnad->bnad_completions.tx_comp); 804} 805 806static void 807bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb) 808{ 809 struct bnad_tx_info *tx_info = 810 (struct bnad_tx_info *)tcb->txq->tx->priv; 811 struct bnad_unmap_q *unmap_q = tcb->unmap_q; 812 813 tx_info->tcb[tcb->id] = tcb; 814 unmap_q->producer_index = 0; 815 unmap_q->consumer_index = 0; 816 unmap_q->q_depth = BNAD_TX_UNMAPQ_DEPTH; 817} 818 819static void 820bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) 821{ 822 struct bnad_tx_info *tx_info = 823 (struct bnad_tx_info *)tcb->txq->tx->priv; 824 struct bnad_unmap_q *unmap_q = tcb->unmap_q; 825 826 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) 827 cpu_relax(); 828 829 bnad_free_all_txbufs(bnad, tcb); 830 831 unmap_q->producer_index = 0; 832 unmap_q->consumer_index = 0; 833 834 smp_mb__before_clear_bit(); 835 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 836 837 tx_info->tcb[tcb->id] = NULL; 838} 839 840static void 841bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb) 842{ 843 struct bnad_unmap_q *unmap_q = rcb->unmap_q; 844 845 unmap_q->producer_index = 0; 846 unmap_q->consumer_index = 0; 847 unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH; 848} 849 850static void 851bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb) 852{ 853 bnad_free_all_rxbufs(bnad, rcb); 854} 855 856static void 857bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) 858{ 859 struct bnad_rx_info *rx_info = 860 (struct bnad_rx_info *)ccb->cq->rx->priv; 861 862 rx_info->rx_ctrl[ccb->id].ccb = ccb; 863 ccb->ctrl = &rx_info->rx_ctrl[ccb->id]; 864} 865 866static void 867bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb) 868{ 869 struct bnad_rx_info *rx_info = 870 (struct bnad_rx_info *)ccb->cq->rx->priv; 871 872 rx_info->rx_ctrl[ccb->id].ccb = NULL; 873} 874 875static void 876bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb) 877{ 878 struct bnad_tx_info *tx_info = 879 (struct bnad_tx_info *)tcb->txq->tx->priv; 880 881 if (tx_info != &bnad->tx_info[0]) 882 return; 883 884 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); 885 netif_stop_queue(bnad->netdev); 886 pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name); 887} 888 889static void 890bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb) 891{ 892 struct bnad_unmap_q *unmap_q = tcb->unmap_q; 893 894 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) 895 return; 896 897 clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags); 898 899 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) 900 cpu_relax(); 901 902 bnad_free_all_txbufs(bnad, tcb); 903 904 unmap_q->producer_index = 0; 905 unmap_q->consumer_index = 0; 906 907 smp_mb__before_clear_bit(); 908 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 909 910 /* 911 * Workaround for first device enable failure & we 912 * get a 0 MAC address. We try to get the MAC address 913 * again here. 914 */ 915 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) { 916 bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr); 917 bnad_set_netdev_perm_addr(bnad); 918 } 919 920 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); 921 922 if (netif_carrier_ok(bnad->netdev)) { 923 pr_info("bna: %s TX_STARTED\n", bnad->netdev->name); 924 netif_wake_queue(bnad->netdev); 925 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); 926 } 927} 928 929static void 930bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb) 931{ 932 /* Delay only once for the whole Tx Path Shutdown */ 933 if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags)) 934 mdelay(BNAD_TXRX_SYNC_MDELAY); 935} 936 937static void 938bnad_cb_rx_cleanup(struct bnad *bnad, 939 struct bna_ccb *ccb) 940{ 941 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); 942 943 if (ccb->rcb[1]) 944 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); 945 946 if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags)) 947 mdelay(BNAD_TXRX_SYNC_MDELAY); 948} 949 950static void 951bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb) 952{ 953 struct bnad_unmap_q *unmap_q = rcb->unmap_q; 954 955 clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags); 956 957 if (rcb == rcb->cq->ccb->rcb[0]) 958 bnad_cq_cmpl_init(bnad, rcb->cq->ccb); 959 960 bnad_free_all_rxbufs(bnad, rcb); 961 962 set_bit(BNAD_RXQ_STARTED, &rcb->flags); 963 964 /* Now allocate & post buffers for this RCB */ 965 /* !!Allocation in callback context */ 966 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { 967 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) 968 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) 969 bnad_alloc_n_post_rxbufs(bnad, rcb); 970 smp_mb__before_clear_bit(); 971 clear_bit(BNAD_RXQ_REFILL, &rcb->flags); 972 } 973} 974 975static void 976bnad_cb_rx_disabled(void *arg, struct bna_rx *rx, 977 enum bna_cb_status status) 978{ 979 struct bnad *bnad = (struct bnad *)arg; 980 981 complete(&bnad->bnad_completions.rx_comp); 982} 983 984static void 985bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx, 986 enum bna_cb_status status) 987{ 988 bnad->bnad_completions.mcast_comp_status = status; 989 complete(&bnad->bnad_completions.mcast_comp); 990} 991 992void 993bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, 994 struct bna_stats *stats) 995{ 996 if (status == BNA_CB_SUCCESS) 997 BNAD_UPDATE_CTR(bnad, hw_stats_updates); 998 999 if (!netif_running(bnad->netdev) || 1000 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) 1001 return; 1002 1003 mod_timer(&bnad->stats_timer, 1004 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); 1005} 1006 1007/* Resource allocation, free functions */ 1008 1009static void 1010bnad_mem_free(struct bnad *bnad, 1011 struct bna_mem_info *mem_info) 1012{ 1013 int i; 1014 dma_addr_t dma_pa; 1015 1016 if (mem_info->mdl == NULL) 1017 return; 1018 1019 for (i = 0; i < mem_info->num; i++) { 1020 if (mem_info->mdl[i].kva != NULL) { 1021 if (mem_info->mem_type == BNA_MEM_T_DMA) { 1022 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma), 1023 dma_pa); 1024 dma_free_coherent(&bnad->pcidev->dev, 1025 mem_info->mdl[i].len, 1026 mem_info->mdl[i].kva, dma_pa); 1027 } else 1028 kfree(mem_info->mdl[i].kva); 1029 } 1030 } 1031 kfree(mem_info->mdl); 1032 mem_info->mdl = NULL; 1033} 1034 1035static int 1036bnad_mem_alloc(struct bnad *bnad, 1037 struct bna_mem_info *mem_info) 1038{ 1039 int i; 1040 dma_addr_t dma_pa; 1041 1042 if ((mem_info->num == 0) || (mem_info->len == 0)) { 1043 mem_info->mdl = NULL; 1044 return 0; 1045 } 1046 1047 mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr), 1048 GFP_KERNEL); 1049 if (mem_info->mdl == NULL) 1050 return -ENOMEM; 1051 1052 if (mem_info->mem_type == BNA_MEM_T_DMA) { 1053 for (i = 0; i < mem_info->num; i++) { 1054 mem_info->mdl[i].len = mem_info->len; 1055 mem_info->mdl[i].kva = 1056 dma_alloc_coherent(&bnad->pcidev->dev, 1057 mem_info->len, &dma_pa, 1058 GFP_KERNEL); 1059 1060 if (mem_info->mdl[i].kva == NULL) 1061 goto err_return; 1062 1063 BNA_SET_DMA_ADDR(dma_pa, 1064 &(mem_info->mdl[i].dma)); 1065 } 1066 } else { 1067 for (i = 0; i < mem_info->num; i++) { 1068 mem_info->mdl[i].len = mem_info->len; 1069 mem_info->mdl[i].kva = kzalloc(mem_info->len, 1070 GFP_KERNEL); 1071 if (mem_info->mdl[i].kva == NULL) 1072 goto err_return; 1073 } 1074 } 1075 1076 return 0; 1077 1078err_return: 1079 bnad_mem_free(bnad, mem_info); 1080 return -ENOMEM; 1081} 1082 1083/* Free IRQ for Mailbox */ 1084static void 1085bnad_mbox_irq_free(struct bnad *bnad, 1086 struct bna_intr_info *intr_info) 1087{ 1088 int irq; 1089 unsigned long flags; 1090 1091 if (intr_info->idl == NULL) 1092 return; 1093 1094 spin_lock_irqsave(&bnad->bna_lock, flags); 1095 bnad_disable_mbox_irq(bnad); 1096 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1097 1098 irq = BNAD_GET_MBOX_IRQ(bnad); 1099 free_irq(irq, bnad); 1100 1101 kfree(intr_info->idl); 1102} 1103 1104/* 1105 * Allocates IRQ for Mailbox, but keep it disabled 1106 * This will be enabled once we get the mbox enable callback 1107 * from bna 1108 */ 1109static int 1110bnad_mbox_irq_alloc(struct bnad *bnad, 1111 struct bna_intr_info *intr_info) 1112{ 1113 int err = 0; 1114 unsigned long irq_flags = 0, flags; 1115 u32 irq; 1116 irq_handler_t irq_handler; 1117 1118 /* Mbox should use only 1 vector */ 1119 1120 intr_info->idl = kzalloc(sizeof(*(intr_info->idl)), GFP_KERNEL); 1121 if (!intr_info->idl) 1122 return -ENOMEM; 1123 1124 spin_lock_irqsave(&bnad->bna_lock, flags); 1125 if (bnad->cfg_flags & BNAD_CF_MSIX) { 1126 irq_handler = (irq_handler_t)bnad_msix_mbox_handler; 1127 irq = bnad->msix_table[bnad->msix_num - 1].vector; 1128 intr_info->intr_type = BNA_INTR_T_MSIX; 1129 intr_info->idl[0].vector = bnad->msix_num - 1; 1130 } else { 1131 irq_handler = (irq_handler_t)bnad_isr; 1132 irq = bnad->pcidev->irq; 1133 irq_flags = IRQF_SHARED; 1134 intr_info->intr_type = BNA_INTR_T_INTX; 1135 /* intr_info->idl.vector = 0 ? */ 1136 } 1137 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1138 flags = irq_flags; 1139 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME); 1140 1141 /* 1142 * Set the Mbox IRQ disable flag, so that the IRQ handler 1143 * called from request_irq() for SHARED IRQs do not execute 1144 */ 1145 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); 1146 1147 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); 1148 1149 err = request_irq(irq, irq_handler, flags, 1150 bnad->mbox_irq_name, bnad); 1151 1152 if (err) { 1153 kfree(intr_info->idl); 1154 intr_info->idl = NULL; 1155 } 1156 1157 return err; 1158} 1159 1160static void 1161bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info) 1162{ 1163 kfree(intr_info->idl); 1164 intr_info->idl = NULL; 1165} 1166 1167/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */ 1168static int 1169bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src, 1170 uint txrx_id, struct bna_intr_info *intr_info) 1171{ 1172 int i, vector_start = 0; 1173 u32 cfg_flags; 1174 unsigned long flags; 1175 1176 spin_lock_irqsave(&bnad->bna_lock, flags); 1177 cfg_flags = bnad->cfg_flags; 1178 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1179 1180 if (cfg_flags & BNAD_CF_MSIX) { 1181 intr_info->intr_type = BNA_INTR_T_MSIX; 1182 intr_info->idl = kcalloc(intr_info->num, 1183 sizeof(struct bna_intr_descr), 1184 GFP_KERNEL); 1185 if (!intr_info->idl) 1186 return -ENOMEM; 1187 1188 switch (src) { 1189 case BNAD_INTR_TX: 1190 vector_start = txrx_id; 1191 break; 1192 1193 case BNAD_INTR_RX: 1194 vector_start = bnad->num_tx * bnad->num_txq_per_tx + 1195 txrx_id; 1196 break; 1197 1198 default: 1199 BUG(); 1200 } 1201 1202 for (i = 0; i < intr_info->num; i++) 1203 intr_info->idl[i].vector = vector_start + i; 1204 } else { 1205 intr_info->intr_type = BNA_INTR_T_INTX; 1206 intr_info->num = 1; 1207 intr_info->idl = kcalloc(intr_info->num, 1208 sizeof(struct bna_intr_descr), 1209 GFP_KERNEL); 1210 if (!intr_info->idl) 1211 return -ENOMEM; 1212 1213 switch (src) { 1214 case BNAD_INTR_TX: 1215 intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */ 1216 break; 1217 1218 case BNAD_INTR_RX: 1219 intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */ 1220 break; 1221 } 1222 } 1223 return 0; 1224} 1225 1226/** 1227 * NOTE: Should be called for MSIX only 1228 * Unregisters Tx MSIX vector(s) from the kernel 1229 */ 1230static void 1231bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info, 1232 int num_txqs) 1233{ 1234 int i; 1235 int vector_num; 1236 1237 for (i = 0; i < num_txqs; i++) { 1238 if (tx_info->tcb[i] == NULL) 1239 continue; 1240 1241 vector_num = tx_info->tcb[i]->intr_vector; 1242 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]); 1243 } 1244} 1245 1246/** 1247 * NOTE: Should be called for MSIX only 1248 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel 1249 */ 1250static int 1251bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info, 1252 uint tx_id, int num_txqs) 1253{ 1254 int i; 1255 int err; 1256 int vector_num; 1257 1258 for (i = 0; i < num_txqs; i++) { 1259 vector_num = tx_info->tcb[i]->intr_vector; 1260 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name, 1261 tx_id + tx_info->tcb[i]->id); 1262 err = request_irq(bnad->msix_table[vector_num].vector, 1263 (irq_handler_t)bnad_msix_tx, 0, 1264 tx_info->tcb[i]->name, 1265 tx_info->tcb[i]); 1266 if (err) 1267 goto err_return; 1268 } 1269 1270 return 0; 1271 1272err_return: 1273 if (i > 0) 1274 bnad_tx_msix_unregister(bnad, tx_info, (i - 1)); 1275 return -1; 1276} 1277 1278/** 1279 * NOTE: Should be called for MSIX only 1280 * Unregisters Rx MSIX vector(s) from the kernel 1281 */ 1282static void 1283bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info, 1284 int num_rxps) 1285{ 1286 int i; 1287 int vector_num; 1288 1289 for (i = 0; i < num_rxps; i++) { 1290 if (rx_info->rx_ctrl[i].ccb == NULL) 1291 continue; 1292 1293 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; 1294 free_irq(bnad->msix_table[vector_num].vector, 1295 rx_info->rx_ctrl[i].ccb); 1296 } 1297} 1298 1299/** 1300 * NOTE: Should be called for MSIX only 1301 * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel 1302 */ 1303static int 1304bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info, 1305 uint rx_id, int num_rxps) 1306{ 1307 int i; 1308 int err; 1309 int vector_num; 1310 1311 for (i = 0; i < num_rxps; i++) { 1312 vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; 1313 sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d", 1314 bnad->netdev->name, 1315 rx_id + rx_info->rx_ctrl[i].ccb->id); 1316 err = request_irq(bnad->msix_table[vector_num].vector, 1317 (irq_handler_t)bnad_msix_rx, 0, 1318 rx_info->rx_ctrl[i].ccb->name, 1319 rx_info->rx_ctrl[i].ccb); 1320 if (err) 1321 goto err_return; 1322 } 1323 1324 return 0; 1325 1326err_return: 1327 if (i > 0) 1328 bnad_rx_msix_unregister(bnad, rx_info, (i - 1)); 1329 return -1; 1330} 1331 1332/* Free Tx object Resources */ 1333static void 1334bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info) 1335{ 1336 int i; 1337 1338 for (i = 0; i < BNA_TX_RES_T_MAX; i++) { 1339 if (res_info[i].res_type == BNA_RES_T_MEM) 1340 bnad_mem_free(bnad, &res_info[i].res_u.mem_info); 1341 else if (res_info[i].res_type == BNA_RES_T_INTR) 1342 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); 1343 } 1344} 1345 1346/* Allocates memory and interrupt resources for Tx object */ 1347static int 1348bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, 1349 uint tx_id) 1350{ 1351 int i, err = 0; 1352 1353 for (i = 0; i < BNA_TX_RES_T_MAX; i++) { 1354 if (res_info[i].res_type == BNA_RES_T_MEM) 1355 err = bnad_mem_alloc(bnad, 1356 &res_info[i].res_u.mem_info); 1357 else if (res_info[i].res_type == BNA_RES_T_INTR) 1358 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id, 1359 &res_info[i].res_u.intr_info); 1360 if (err) 1361 goto err_return; 1362 } 1363 return 0; 1364 1365err_return: 1366 bnad_tx_res_free(bnad, res_info); 1367 return err; 1368} 1369 1370/* Free Rx object Resources */ 1371static void 1372bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info) 1373{ 1374 int i; 1375 1376 for (i = 0; i < BNA_RX_RES_T_MAX; i++) { 1377 if (res_info[i].res_type == BNA_RES_T_MEM) 1378 bnad_mem_free(bnad, &res_info[i].res_u.mem_info); 1379 else if (res_info[i].res_type == BNA_RES_T_INTR) 1380 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); 1381 } 1382} 1383 1384/* Allocates memory and interrupt resources for Rx object */ 1385static int 1386bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, 1387 uint rx_id) 1388{ 1389 int i, err = 0; 1390 1391 /* All memory needs to be allocated before setup_ccbs */ 1392 for (i = 0; i < BNA_RX_RES_T_MAX; i++) { 1393 if (res_info[i].res_type == BNA_RES_T_MEM) 1394 err = bnad_mem_alloc(bnad, 1395 &res_info[i].res_u.mem_info); 1396 else if (res_info[i].res_type == BNA_RES_T_INTR) 1397 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id, 1398 &res_info[i].res_u.intr_info); 1399 if (err) 1400 goto err_return; 1401 } 1402 return 0; 1403 1404err_return: 1405 bnad_rx_res_free(bnad, res_info); 1406 return err; 1407} 1408 1409/* Timer callbacks */ 1410/* a) IOC timer */ 1411static void 1412bnad_ioc_timeout(unsigned long data) 1413{ 1414 struct bnad *bnad = (struct bnad *)data; 1415 unsigned long flags; 1416 1417 spin_lock_irqsave(&bnad->bna_lock, flags); 1418 bfa_nw_ioc_timeout((void *) &bnad->bna.device.ioc); 1419 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1420} 1421 1422static void 1423bnad_ioc_hb_check(unsigned long data) 1424{ 1425 struct bnad *bnad = (struct bnad *)data; 1426 unsigned long flags; 1427 1428 spin_lock_irqsave(&bnad->bna_lock, flags); 1429 bfa_nw_ioc_hb_check((void *) &bnad->bna.device.ioc); 1430 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1431} 1432 1433static void 1434bnad_iocpf_timeout(unsigned long data) 1435{ 1436 struct bnad *bnad = (struct bnad *)data; 1437 unsigned long flags; 1438 1439 spin_lock_irqsave(&bnad->bna_lock, flags); 1440 bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc); 1441 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1442} 1443 1444static void 1445bnad_iocpf_sem_timeout(unsigned long data) 1446{ 1447 struct bnad *bnad = (struct bnad *)data; 1448 unsigned long flags; 1449 1450 spin_lock_irqsave(&bnad->bna_lock, flags); 1451 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc); 1452 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1453} 1454 1455/* 1456 * All timer routines use bnad->bna_lock to protect against 1457 * the following race, which may occur in case of no locking: 1458 * Time CPU m CPU n 1459 * 0 1 = test_bit 1460 * 1 clear_bit 1461 * 2 del_timer_sync 1462 * 3 mod_timer 1463 */ 1464 1465/* b) Dynamic Interrupt Moderation Timer */ 1466static void 1467bnad_dim_timeout(unsigned long data) 1468{ 1469 struct bnad *bnad = (struct bnad *)data; 1470 struct bnad_rx_info *rx_info; 1471 struct bnad_rx_ctrl *rx_ctrl; 1472 int i, j; 1473 unsigned long flags; 1474 1475 if (!netif_carrier_ok(bnad->netdev)) 1476 return; 1477 1478 spin_lock_irqsave(&bnad->bna_lock, flags); 1479 for (i = 0; i < bnad->num_rx; i++) { 1480 rx_info = &bnad->rx_info[i]; 1481 if (!rx_info->rx) 1482 continue; 1483 for (j = 0; j < bnad->num_rxp_per_rx; j++) { 1484 rx_ctrl = &rx_info->rx_ctrl[j]; 1485 if (!rx_ctrl->ccb) 1486 continue; 1487 bna_rx_dim_update(rx_ctrl->ccb); 1488 } 1489 } 1490 1491 /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */ 1492 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) 1493 mod_timer(&bnad->dim_timer, 1494 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ)); 1495 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1496} 1497 1498/* c) Statistics Timer */ 1499static void 1500bnad_stats_timeout(unsigned long data) 1501{ 1502 struct bnad *bnad = (struct bnad *)data; 1503 unsigned long flags; 1504 1505 if (!netif_running(bnad->netdev) || 1506 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) 1507 return; 1508 1509 spin_lock_irqsave(&bnad->bna_lock, flags); 1510 bna_stats_get(&bnad->bna); 1511 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1512} 1513 1514/* 1515 * Set up timer for DIM 1516 * Called with bnad->bna_lock held 1517 */ 1518void 1519bnad_dim_timer_start(struct bnad *bnad) 1520{ 1521 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && 1522 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { 1523 setup_timer(&bnad->dim_timer, bnad_dim_timeout, 1524 (unsigned long)bnad); 1525 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); 1526 mod_timer(&bnad->dim_timer, 1527 jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ)); 1528 } 1529} 1530 1531/* 1532 * Set up timer for statistics 1533 * Called with mutex_lock(&bnad->conf_mutex) held 1534 */ 1535static void 1536bnad_stats_timer_start(struct bnad *bnad) 1537{ 1538 unsigned long flags; 1539 1540 spin_lock_irqsave(&bnad->bna_lock, flags); 1541 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) { 1542 setup_timer(&bnad->stats_timer, bnad_stats_timeout, 1543 (unsigned long)bnad); 1544 mod_timer(&bnad->stats_timer, 1545 jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); 1546 } 1547 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1548} 1549 1550/* 1551 * Stops the stats timer 1552 * Called with mutex_lock(&bnad->conf_mutex) held 1553 */ 1554static void 1555bnad_stats_timer_stop(struct bnad *bnad) 1556{ 1557 int to_del = 0; 1558 unsigned long flags; 1559 1560 spin_lock_irqsave(&bnad->bna_lock, flags); 1561 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) 1562 to_del = 1; 1563 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1564 if (to_del) 1565 del_timer_sync(&bnad->stats_timer); 1566} 1567 1568/* Utilities */ 1569 1570static void 1571bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list) 1572{ 1573 int i = 1; /* Index 0 has broadcast address */ 1574 struct netdev_hw_addr *mc_addr; 1575 1576 netdev_for_each_mc_addr(mc_addr, netdev) { 1577 memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0], 1578 ETH_ALEN); 1579 i++; 1580 } 1581} 1582 1583static int 1584bnad_napi_poll_rx(struct napi_struct *napi, int budget) 1585{ 1586 struct bnad_rx_ctrl *rx_ctrl = 1587 container_of(napi, struct bnad_rx_ctrl, napi); 1588 struct bna_ccb *ccb; 1589 struct bnad *bnad; 1590 int rcvd = 0; 1591 1592 ccb = rx_ctrl->ccb; 1593 1594 bnad = ccb->bnad; 1595 1596 if (!netif_carrier_ok(bnad->netdev)) 1597 goto poll_exit; 1598 1599 rcvd = bnad_poll_cq(bnad, ccb, budget); 1600 if (rcvd == budget) 1601 return rcvd; 1602 1603poll_exit: 1604 napi_complete((napi)); 1605 1606 BNAD_UPDATE_CTR(bnad, netif_rx_complete); 1607 1608 bnad_enable_rx_irq(bnad, ccb); 1609 return rcvd; 1610} 1611 1612static void 1613bnad_napi_enable(struct bnad *bnad, u32 rx_id) 1614{ 1615 struct bnad_rx_ctrl *rx_ctrl; 1616 int i; 1617 1618 /* Initialize & enable NAPI */ 1619 for (i = 0; i < bnad->num_rxp_per_rx; i++) { 1620 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; 1621 1622 netif_napi_add(bnad->netdev, &rx_ctrl->napi, 1623 bnad_napi_poll_rx, 64); 1624 1625 napi_enable(&rx_ctrl->napi); 1626 } 1627} 1628 1629static void 1630bnad_napi_disable(struct bnad *bnad, u32 rx_id) 1631{ 1632 int i; 1633 1634 /* First disable and then clean up */ 1635 for (i = 0; i < bnad->num_rxp_per_rx; i++) { 1636 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi); 1637 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi); 1638 } 1639} 1640 1641/* Should be held with conf_lock held */ 1642void 1643bnad_cleanup_tx(struct bnad *bnad, uint tx_id) 1644{ 1645 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; 1646 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; 1647 unsigned long flags; 1648 1649 if (!tx_info->tx) 1650 return; 1651 1652 init_completion(&bnad->bnad_completions.tx_comp); 1653 spin_lock_irqsave(&bnad->bna_lock, flags); 1654 bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled); 1655 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1656 wait_for_completion(&bnad->bnad_completions.tx_comp); 1657 1658 if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX) 1659 bnad_tx_msix_unregister(bnad, tx_info, 1660 bnad->num_txq_per_tx); 1661 1662 spin_lock_irqsave(&bnad->bna_lock, flags); 1663 bna_tx_destroy(tx_info->tx); 1664 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1665 1666 tx_info->tx = NULL; 1667 1668 if (0 == tx_id) 1669 tasklet_kill(&bnad->tx_free_tasklet); 1670 1671 bnad_tx_res_free(bnad, res_info); 1672} 1673 1674/* Should be held with conf_lock held */ 1675int 1676bnad_setup_tx(struct bnad *bnad, uint tx_id) 1677{ 1678 int err; 1679 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; 1680 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; 1681 struct bna_intr_info *intr_info = 1682 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; 1683 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id]; 1684 struct bna_tx_event_cbfn tx_cbfn; 1685 struct bna_tx *tx; 1686 unsigned long flags; 1687 1688 /* Initialize the Tx object configuration */ 1689 tx_config->num_txq = bnad->num_txq_per_tx; 1690 tx_config->txq_depth = bnad->txq_depth; 1691 tx_config->tx_type = BNA_TX_T_REGULAR; 1692 1693 /* Initialize the tx event handlers */ 1694 tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup; 1695 tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy; 1696 tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall; 1697 tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume; 1698 tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup; 1699 1700 /* Get BNA's resource requirement for one tx object */ 1701 spin_lock_irqsave(&bnad->bna_lock, flags); 1702 bna_tx_res_req(bnad->num_txq_per_tx, 1703 bnad->txq_depth, res_info); 1704 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1705 1706 /* Fill Unmap Q memory requirements */ 1707 BNAD_FILL_UNMAPQ_MEM_REQ( 1708 &res_info[BNA_TX_RES_MEM_T_UNMAPQ], 1709 bnad->num_txq_per_tx, 1710 BNAD_TX_UNMAPQ_DEPTH); 1711 1712 /* Allocate resources */ 1713 err = bnad_tx_res_alloc(bnad, res_info, tx_id); 1714 if (err) 1715 return err; 1716 1717 /* Ask BNA to create one Tx object, supplying required resources */ 1718 spin_lock_irqsave(&bnad->bna_lock, flags); 1719 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info, 1720 tx_info); 1721 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1722 if (!tx) 1723 goto err_return; 1724 tx_info->tx = tx; 1725 1726 /* Register ISR for the Tx object */ 1727 if (intr_info->intr_type == BNA_INTR_T_MSIX) { 1728 err = bnad_tx_msix_register(bnad, tx_info, 1729 tx_id, bnad->num_txq_per_tx); 1730 if (err) 1731 goto err_return; 1732 } 1733 1734 spin_lock_irqsave(&bnad->bna_lock, flags); 1735 bna_tx_enable(tx); 1736 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1737 1738 return 0; 1739 1740err_return: 1741 bnad_tx_res_free(bnad, res_info); 1742 return err; 1743} 1744 1745/* Setup the rx config for bna_rx_create */ 1746/* bnad decides the configuration */ 1747static void 1748bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) 1749{ 1750 rx_config->rx_type = BNA_RX_T_REGULAR; 1751 rx_config->num_paths = bnad->num_rxp_per_rx; 1752 1753 if (bnad->num_rxp_per_rx > 1) { 1754 rx_config->rss_status = BNA_STATUS_T_ENABLED; 1755 rx_config->rss_config.hash_type = 1756 (BFI_RSS_T_V4_TCP | 1757 BFI_RSS_T_V6_TCP | 1758 BFI_RSS_T_V4_IP | 1759 BFI_RSS_T_V6_IP); 1760 rx_config->rss_config.hash_mask = 1761 bnad->num_rxp_per_rx - 1; 1762 get_random_bytes(rx_config->rss_config.toeplitz_hash_key, 1763 sizeof(rx_config->rss_config.toeplitz_hash_key)); 1764 } else { 1765 rx_config->rss_status = BNA_STATUS_T_DISABLED; 1766 memset(&rx_config->rss_config, 0, 1767 sizeof(rx_config->rss_config)); 1768 } 1769 rx_config->rxp_type = BNA_RXP_SLR; 1770 rx_config->q_depth = bnad->rxq_depth; 1771 1772 rx_config->small_buff_size = BFI_SMALL_RXBUF_SIZE; 1773 1774 rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED; 1775} 1776 1777/* Called with mutex_lock(&bnad->conf_mutex) held */ 1778void 1779bnad_cleanup_rx(struct bnad *bnad, uint rx_id) 1780{ 1781 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; 1782 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; 1783 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; 1784 unsigned long flags; 1785 int dim_timer_del = 0; 1786 1787 if (!rx_info->rx) 1788 return; 1789 1790 if (0 == rx_id) { 1791 spin_lock_irqsave(&bnad->bna_lock, flags); 1792 dim_timer_del = bnad_dim_timer_running(bnad); 1793 if (dim_timer_del) 1794 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); 1795 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1796 if (dim_timer_del) 1797 del_timer_sync(&bnad->dim_timer); 1798 } 1799 1800 bnad_napi_disable(bnad, rx_id); 1801 1802 init_completion(&bnad->bnad_completions.rx_comp); 1803 spin_lock_irqsave(&bnad->bna_lock, flags); 1804 bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled); 1805 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1806 wait_for_completion(&bnad->bnad_completions.rx_comp); 1807 1808 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX) 1809 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); 1810 1811 spin_lock_irqsave(&bnad->bna_lock, flags); 1812 bna_rx_destroy(rx_info->rx); 1813 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1814 1815 rx_info->rx = NULL; 1816 1817 bnad_rx_res_free(bnad, res_info); 1818} 1819 1820/* Called with mutex_lock(&bnad->conf_mutex) held */ 1821int 1822bnad_setup_rx(struct bnad *bnad, uint rx_id) 1823{ 1824 int err; 1825 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; 1826 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; 1827 struct bna_intr_info *intr_info = 1828 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; 1829 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; 1830 struct bna_rx_event_cbfn rx_cbfn; 1831 struct bna_rx *rx; 1832 unsigned long flags; 1833 1834 /* Initialize the Rx object configuration */ 1835 bnad_init_rx_config(bnad, rx_config); 1836 1837 /* Initialize the Rx event handlers */ 1838 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup; 1839 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy; 1840 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup; 1841 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy; 1842 rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup; 1843 rx_cbfn.rx_post_cbfn = bnad_cb_rx_post; 1844 1845 /* Get BNA's resource requirement for one Rx object */ 1846 spin_lock_irqsave(&bnad->bna_lock, flags); 1847 bna_rx_res_req(rx_config, res_info); 1848 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1849 1850 /* Fill Unmap Q memory requirements */ 1851 BNAD_FILL_UNMAPQ_MEM_REQ( 1852 &res_info[BNA_RX_RES_MEM_T_UNMAPQ], 1853 rx_config->num_paths + 1854 ((rx_config->rxp_type == BNA_RXP_SINGLE) ? 0 : 1855 rx_config->num_paths), BNAD_RX_UNMAPQ_DEPTH); 1856 1857 /* Allocate resource */ 1858 err = bnad_rx_res_alloc(bnad, res_info, rx_id); 1859 if (err) 1860 return err; 1861 1862 /* Ask BNA to create one Rx object, supplying required resources */ 1863 spin_lock_irqsave(&bnad->bna_lock, flags); 1864 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info, 1865 rx_info); 1866 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1867 if (!rx) 1868 goto err_return; 1869 rx_info->rx = rx; 1870 1871 /* Register ISR for the Rx object */ 1872 if (intr_info->intr_type == BNA_INTR_T_MSIX) { 1873 err = bnad_rx_msix_register(bnad, rx_info, rx_id, 1874 rx_config->num_paths); 1875 if (err) 1876 goto err_return; 1877 } 1878 1879 /* Enable NAPI */ 1880 bnad_napi_enable(bnad, rx_id); 1881 1882 spin_lock_irqsave(&bnad->bna_lock, flags); 1883 if (0 == rx_id) { 1884 /* Set up Dynamic Interrupt Moderation Vector */ 1885 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) 1886 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector); 1887 1888 /* Enable VLAN filtering only on the default Rx */ 1889 bna_rx_vlanfilter_enable(rx); 1890 1891 /* Start the DIM timer */ 1892 bnad_dim_timer_start(bnad); 1893 } 1894 1895 bna_rx_enable(rx); 1896 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1897 1898 return 0; 1899 1900err_return: 1901 bnad_cleanup_rx(bnad, rx_id); 1902 return err; 1903} 1904 1905/* Called with conf_lock & bnad->bna_lock held */ 1906void 1907bnad_tx_coalescing_timeo_set(struct bnad *bnad) 1908{ 1909 struct bnad_tx_info *tx_info; 1910 1911 tx_info = &bnad->tx_info[0]; 1912 if (!tx_info->tx) 1913 return; 1914 1915 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo); 1916} 1917 1918/* Called with conf_lock & bnad->bna_lock held */ 1919void 1920bnad_rx_coalescing_timeo_set(struct bnad *bnad) 1921{ 1922 struct bnad_rx_info *rx_info; 1923 int i; 1924 1925 for (i = 0; i < bnad->num_rx; i++) { 1926 rx_info = &bnad->rx_info[i]; 1927 if (!rx_info->rx) 1928 continue; 1929 bna_rx_coalescing_timeo_set(rx_info->rx, 1930 bnad->rx_coalescing_timeo); 1931 } 1932} 1933 1934/* 1935 * Called with bnad->bna_lock held 1936 */ 1937static int 1938bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr) 1939{ 1940 int ret; 1941 1942 if (!is_valid_ether_addr(mac_addr)) 1943 return -EADDRNOTAVAIL; 1944 1945 /* If datapath is down, pretend everything went through */ 1946 if (!bnad->rx_info[0].rx) 1947 return 0; 1948 1949 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL); 1950 if (ret != BNA_CB_SUCCESS) 1951 return -EADDRNOTAVAIL; 1952 1953 return 0; 1954} 1955 1956/* Should be called with conf_lock held */ 1957static int 1958bnad_enable_default_bcast(struct bnad *bnad) 1959{ 1960 struct bnad_rx_info *rx_info = &bnad->rx_info[0]; 1961 int ret; 1962 unsigned long flags; 1963 1964 init_completion(&bnad->bnad_completions.mcast_comp); 1965 1966 spin_lock_irqsave(&bnad->bna_lock, flags); 1967 ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr, 1968 bnad_cb_rx_mcast_add); 1969 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1970 1971 if (ret == BNA_CB_SUCCESS) 1972 wait_for_completion(&bnad->bnad_completions.mcast_comp); 1973 else 1974 return -ENODEV; 1975 1976 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS) 1977 return -ENODEV; 1978 1979 return 0; 1980} 1981 1982/* Called with bnad_conf_lock() held */ 1983static void 1984bnad_restore_vlans(struct bnad *bnad, u32 rx_id) 1985{ 1986 u16 vlan_id; 1987 unsigned long flags; 1988 1989 if (!bnad->vlan_grp) 1990 return; 1991 1992 BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1))); 1993 1994 for (vlan_id = 0; vlan_id < VLAN_N_VID; vlan_id++) { 1995 if (!vlan_group_get_device(bnad->vlan_grp, vlan_id)) 1996 continue; 1997 spin_lock_irqsave(&bnad->bna_lock, flags); 1998 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vlan_id); 1999 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2000 } 2001} 2002 2003/* Statistics utilities */ 2004void 2005bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) 2006{ 2007 int i, j; 2008 2009 for (i = 0; i < bnad->num_rx; i++) { 2010 for (j = 0; j < bnad->num_rxp_per_rx; j++) { 2011 if (bnad->rx_info[i].rx_ctrl[j].ccb) { 2012 stats->rx_packets += bnad->rx_info[i]. 2013 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets; 2014 stats->rx_bytes += bnad->rx_info[i]. 2015 rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes; 2016 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && 2017 bnad->rx_info[i].rx_ctrl[j].ccb-> 2018 rcb[1]->rxq) { 2019 stats->rx_packets += 2020 bnad->rx_info[i].rx_ctrl[j]. 2021 ccb->rcb[1]->rxq->rx_packets; 2022 stats->rx_bytes += 2023 bnad->rx_info[i].rx_ctrl[j]. 2024 ccb->rcb[1]->rxq->rx_bytes; 2025 } 2026 } 2027 } 2028 } 2029 for (i = 0; i < bnad->num_tx; i++) { 2030 for (j = 0; j < bnad->num_txq_per_tx; j++) { 2031 if (bnad->tx_info[i].tcb[j]) { 2032 stats->tx_packets += 2033 bnad->tx_info[i].tcb[j]->txq->tx_packets; 2034 stats->tx_bytes += 2035 bnad->tx_info[i].tcb[j]->txq->tx_bytes; 2036 } 2037 } 2038 } 2039} 2040 2041/* 2042 * Must be called with the bna_lock held. 2043 */ 2044void 2045bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) 2046{ 2047 struct bfi_ll_stats_mac *mac_stats; 2048 u64 bmap; 2049 int i; 2050 2051 mac_stats = &bnad->stats.bna_stats->hw_stats->mac_stats; 2052 stats->rx_errors = 2053 mac_stats->rx_fcs_error + mac_stats->rx_alignment_error + 2054 mac_stats->rx_frame_length_error + mac_stats->rx_code_error + 2055 mac_stats->rx_undersize; 2056 stats->tx_errors = mac_stats->tx_fcs_error + 2057 mac_stats->tx_undersize; 2058 stats->rx_dropped = mac_stats->rx_drop; 2059 stats->tx_dropped = mac_stats->tx_drop; 2060 stats->multicast = mac_stats->rx_multicast; 2061 stats->collisions = mac_stats->tx_total_collision; 2062 2063 stats->rx_length_errors = mac_stats->rx_frame_length_error; 2064 2065 /* receive ring buffer overflow ?? */ 2066 2067 stats->rx_crc_errors = mac_stats->rx_fcs_error; 2068 stats->rx_frame_errors = mac_stats->rx_alignment_error; 2069 /* recv'r fifo overrun */ 2070 bmap = (u64)bnad->stats.bna_stats->rxf_bmap[0] | 2071 ((u64)bnad->stats.bna_stats->rxf_bmap[1] << 32); 2072 for (i = 0; bmap && (i < BFI_LL_RXF_ID_MAX); i++) { 2073 if (bmap & 1) { 2074 stats->rx_fifo_errors += 2075 bnad->stats.bna_stats-> 2076 hw_stats->rxf_stats[i].frame_drops; 2077 break; 2078 } 2079 bmap >>= 1; 2080 } 2081} 2082 2083static void 2084bnad_mbox_irq_sync(struct bnad *bnad) 2085{ 2086 u32 irq; 2087 unsigned long flags; 2088 2089 spin_lock_irqsave(&bnad->bna_lock, flags); 2090 if (bnad->cfg_flags & BNAD_CF_MSIX) 2091 irq = bnad->msix_table[bnad->msix_num - 1].vector; 2092 else 2093 irq = bnad->pcidev->irq; 2094 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2095 2096 synchronize_irq(irq); 2097} 2098 2099/* Utility used by bnad_start_xmit, for doing TSO */ 2100static int 2101bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb) 2102{ 2103 int err; 2104 2105 /* SKB_GSO_TCPV4 and SKB_GSO_TCPV6 is defined since 2.6.18. */ 2106 BUG_ON(!(skb_shinfo(skb)->gso_type == SKB_GSO_TCPV4 || 2107 skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)); 2108 if (skb_header_cloned(skb)) { 2109 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2110 if (err) { 2111 BNAD_UPDATE_CTR(bnad, tso_err); 2112 return err; 2113 } 2114 } 2115 2116 /* 2117 * For TSO, the TCP checksum field is seeded with pseudo-header sum 2118 * excluding the length field. 2119 */ 2120 if (skb->protocol == htons(ETH_P_IP)) { 2121 struct iphdr *iph = ip_hdr(skb); 2122 2123 /* Do we really need these? */ 2124 iph->tot_len = 0; 2125 iph->check = 0; 2126 2127 tcp_hdr(skb)->check = 2128 ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, 2129 IPPROTO_TCP, 0); 2130 BNAD_UPDATE_CTR(bnad, tso4); 2131 } else { 2132 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 2133 2134 BUG_ON(!(skb->protocol == htons(ETH_P_IPV6))); 2135 ipv6h->payload_len = 0; 2136 tcp_hdr(skb)->check = 2137 ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, 0, 2138 IPPROTO_TCP, 0); 2139 BNAD_UPDATE_CTR(bnad, tso6); 2140 } 2141 2142 return 0; 2143} 2144 2145/* 2146 * Initialize Q numbers depending on Rx Paths 2147 * Called with bnad->bna_lock held, because of cfg_flags 2148 * access. 2149 */ 2150static void 2151bnad_q_num_init(struct bnad *bnad) 2152{ 2153 int rxps; 2154 2155 rxps = min((uint)num_online_cpus(), 2156 (uint)(BNAD_MAX_RXS * BNAD_MAX_RXPS_PER_RX)); 2157 2158 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) 2159 rxps = 1; /* INTx */ 2160 2161 bnad->num_rx = 1; 2162 bnad->num_tx = 1; 2163 bnad->num_rxp_per_rx = rxps; 2164 bnad->num_txq_per_tx = BNAD_TXQ_NUM; 2165} 2166 2167/* 2168 * Adjusts the Q numbers, given a number of msix vectors 2169 * Give preference to RSS as opposed to Tx priority Queues, 2170 * in such a case, just use 1 Tx Q 2171 * Called with bnad->bna_lock held b'cos of cfg_flags access 2172 */ 2173static void 2174bnad_q_num_adjust(struct bnad *bnad, int msix_vectors) 2175{ 2176 bnad->num_txq_per_tx = 1; 2177 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) + 2178 bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) && 2179 (bnad->cfg_flags & BNAD_CF_MSIX)) { 2180 bnad->num_rxp_per_rx = msix_vectors - 2181 (bnad->num_tx * bnad->num_txq_per_tx) - 2182 BNAD_MAILBOX_MSIX_VECTORS; 2183 } else 2184 bnad->num_rxp_per_rx = 1; 2185} 2186 2187/* Enable / disable device */ 2188static void 2189bnad_device_disable(struct bnad *bnad) 2190{ 2191 unsigned long flags; 2192 2193 init_completion(&bnad->bnad_completions.ioc_comp); 2194 2195 spin_lock_irqsave(&bnad->bna_lock, flags); 2196 bna_device_disable(&bnad->bna.device, BNA_HARD_CLEANUP); 2197 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2198 2199 wait_for_completion(&bnad->bnad_completions.ioc_comp); 2200} 2201 2202static int 2203bnad_device_enable(struct bnad *bnad) 2204{ 2205 int err = 0; 2206 unsigned long flags; 2207 2208 init_completion(&bnad->bnad_completions.ioc_comp); 2209 2210 spin_lock_irqsave(&bnad->bna_lock, flags); 2211 bna_device_enable(&bnad->bna.device); 2212 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2213 2214 wait_for_completion(&bnad->bnad_completions.ioc_comp); 2215 2216 if (bnad->bnad_completions.ioc_comp_status) 2217 err = bnad->bnad_completions.ioc_comp_status; 2218 2219 return err; 2220} 2221 2222/* Free BNA resources */ 2223static void 2224bnad_res_free(struct bnad *bnad) 2225{ 2226 int i; 2227 struct bna_res_info *res_info = &bnad->res_info[0]; 2228 2229 for (i = 0; i < BNA_RES_T_MAX; i++) { 2230 if (res_info[i].res_type == BNA_RES_T_MEM) 2231 bnad_mem_free(bnad, &res_info[i].res_u.mem_info); 2232 else 2233 bnad_mbox_irq_free(bnad, &res_info[i].res_u.intr_info); 2234 } 2235} 2236 2237/* Allocates memory and interrupt resources for BNA */ 2238static int 2239bnad_res_alloc(struct bnad *bnad) 2240{ 2241 int i, err; 2242 struct bna_res_info *res_info = &bnad->res_info[0]; 2243 2244 for (i = 0; i < BNA_RES_T_MAX; i++) { 2245 if (res_info[i].res_type == BNA_RES_T_MEM) 2246 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info); 2247 else 2248 err = bnad_mbox_irq_alloc(bnad, 2249 &res_info[i].res_u.intr_info); 2250 if (err) 2251 goto err_return; 2252 } 2253 return 0; 2254 2255err_return: 2256 bnad_res_free(bnad); 2257 return err; 2258} 2259 2260/* Interrupt enable / disable */ 2261static void 2262bnad_enable_msix(struct bnad *bnad) 2263{ 2264 int i, ret; 2265 unsigned long flags; 2266 2267 spin_lock_irqsave(&bnad->bna_lock, flags); 2268 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { 2269 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2270 return; 2271 } 2272 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2273 2274 if (bnad->msix_table) 2275 return; 2276 2277 bnad->msix_table = 2278 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL); 2279 2280 if (!bnad->msix_table) 2281 goto intx_mode; 2282 2283 for (i = 0; i < bnad->msix_num; i++) 2284 bnad->msix_table[i].entry = i; 2285 2286 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num); 2287 if (ret > 0) { 2288 /* Not enough MSI-X vectors. */ 2289 2290 spin_lock_irqsave(&bnad->bna_lock, flags); 2291 /* ret = #of vectors that we got */ 2292 bnad_q_num_adjust(bnad, ret); 2293 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2294 2295 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) 2296 + (bnad->num_rx 2297 * bnad->num_rxp_per_rx) + 2298 BNAD_MAILBOX_MSIX_VECTORS; 2299 2300 /* Try once more with adjusted numbers */ 2301 /* If this fails, fall back to INTx */ 2302 ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, 2303 bnad->msix_num); 2304 if (ret) 2305 goto intx_mode; 2306 2307 } else if (ret < 0) 2308 goto intx_mode; 2309 return; 2310 2311intx_mode: 2312 2313 kfree(bnad->msix_table); 2314 bnad->msix_table = NULL; 2315 bnad->msix_num = 0; 2316 spin_lock_irqsave(&bnad->bna_lock, flags); 2317 bnad->cfg_flags &= ~BNAD_CF_MSIX; 2318 bnad_q_num_init(bnad); 2319 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2320} 2321 2322static void 2323bnad_disable_msix(struct bnad *bnad) 2324{ 2325 u32 cfg_flags; 2326 unsigned long flags; 2327 2328 spin_lock_irqsave(&bnad->bna_lock, flags); 2329 cfg_flags = bnad->cfg_flags; 2330 if (bnad->cfg_flags & BNAD_CF_MSIX) 2331 bnad->cfg_flags &= ~BNAD_CF_MSIX; 2332 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2333 2334 if (cfg_flags & BNAD_CF_MSIX) { 2335 pci_disable_msix(bnad->pcidev); 2336 kfree(bnad->msix_table); 2337 bnad->msix_table = NULL; 2338 } 2339} 2340 2341/* Netdev entry points */ 2342static int 2343bnad_open(struct net_device *netdev) 2344{ 2345 int err; 2346 struct bnad *bnad = netdev_priv(netdev); 2347 struct bna_pause_config pause_config; 2348 int mtu; 2349 unsigned long flags; 2350 2351 mutex_lock(&bnad->conf_mutex); 2352 2353 /* Tx */ 2354 err = bnad_setup_tx(bnad, 0); 2355 if (err) 2356 goto err_return; 2357 2358 /* Rx */ 2359 err = bnad_setup_rx(bnad, 0); 2360 if (err) 2361 goto cleanup_tx; 2362 2363 /* Port */ 2364 pause_config.tx_pause = 0; 2365 pause_config.rx_pause = 0; 2366 2367 mtu = ETH_HLEN + bnad->netdev->mtu + ETH_FCS_LEN; 2368 2369 spin_lock_irqsave(&bnad->bna_lock, flags); 2370 bna_port_mtu_set(&bnad->bna.port, mtu, NULL); 2371 bna_port_pause_config(&bnad->bna.port, &pause_config, NULL); 2372 bna_port_enable(&bnad->bna.port); 2373 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2374 2375 /* Enable broadcast */ 2376 bnad_enable_default_bcast(bnad); 2377 2378 /* Restore VLANs, if any */ 2379 bnad_restore_vlans(bnad, 0); 2380 2381 /* Set the UCAST address */ 2382 spin_lock_irqsave(&bnad->bna_lock, flags); 2383 bnad_mac_addr_set_locked(bnad, netdev->dev_addr); 2384 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2385 2386 /* Start the stats timer */ 2387 bnad_stats_timer_start(bnad); 2388 2389 mutex_unlock(&bnad->conf_mutex); 2390 2391 return 0; 2392 2393cleanup_tx: 2394 bnad_cleanup_tx(bnad, 0); 2395 2396err_return: 2397 mutex_unlock(&bnad->conf_mutex); 2398 return err; 2399} 2400 2401static int 2402bnad_stop(struct net_device *netdev) 2403{ 2404 struct bnad *bnad = netdev_priv(netdev); 2405 unsigned long flags; 2406 2407 mutex_lock(&bnad->conf_mutex); 2408 2409 /* Stop the stats timer */ 2410 bnad_stats_timer_stop(bnad); 2411 2412 init_completion(&bnad->bnad_completions.port_comp); 2413 2414 spin_lock_irqsave(&bnad->bna_lock, flags); 2415 bna_port_disable(&bnad->bna.port, BNA_HARD_CLEANUP, 2416 bnad_cb_port_disabled); 2417 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2418 2419 wait_for_completion(&bnad->bnad_completions.port_comp); 2420 2421 bnad_cleanup_tx(bnad, 0); 2422 bnad_cleanup_rx(bnad, 0); 2423 2424 /* Synchronize mailbox IRQ */ 2425 bnad_mbox_irq_sync(bnad); 2426 2427 mutex_unlock(&bnad->conf_mutex); 2428 2429 return 0; 2430} 2431 2432/* TX */ 2433/* 2434 * bnad_start_xmit : Netdev entry point for Transmit 2435 * Called under lock held by net_device 2436 */ 2437static netdev_tx_t 2438bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) 2439{ 2440 struct bnad *bnad = netdev_priv(netdev); 2441 2442 u16 txq_prod, vlan_tag = 0; 2443 u32 unmap_prod, wis, wis_used, wi_range; 2444 u32 vectors, vect_id, i, acked; 2445 u32 tx_id; 2446 int err; 2447 2448 struct bnad_tx_info *tx_info; 2449 struct bna_tcb *tcb; 2450 struct bnad_unmap_q *unmap_q; 2451 dma_addr_t dma_addr; 2452 struct bna_txq_entry *txqent; 2453 bna_txq_wi_ctrl_flag_t flags; 2454 2455 if (unlikely 2456 (skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) { 2457 dev_kfree_skb(skb); 2458 return NETDEV_TX_OK; 2459 } 2460 2461 tx_id = 0; 2462 2463 tx_info = &bnad->tx_info[tx_id]; 2464 tcb = tx_info->tcb[tx_id]; 2465 unmap_q = tcb->unmap_q; 2466 2467 /* 2468 * Takes care of the Tx that is scheduled between clearing the flag 2469 * and the netif_stop_queue() call. 2470 */ 2471 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { 2472 dev_kfree_skb(skb); 2473 return NETDEV_TX_OK; 2474 } 2475 2476 vectors = 1 + skb_shinfo(skb)->nr_frags; 2477 if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) { 2478 dev_kfree_skb(skb); 2479 return NETDEV_TX_OK; 2480 } 2481 wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */ 2482 acked = 0; 2483 if (unlikely 2484 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) || 2485 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) { 2486 if ((u16) (*tcb->hw_consumer_index) != 2487 tcb->consumer_index && 2488 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { 2489 acked = bnad_free_txbufs(bnad, tcb); 2490 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) 2491 bna_ib_ack(tcb->i_dbell, acked); 2492 smp_mb__before_clear_bit(); 2493 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 2494 } else { 2495 netif_stop_queue(netdev); 2496 BNAD_UPDATE_CTR(bnad, netif_queue_stop); 2497 } 2498 2499 smp_mb(); 2500 /* 2501 * Check again to deal with race condition between 2502 * netif_stop_queue here, and netif_wake_queue in 2503 * interrupt handler which is not inside netif tx lock. 2504 */ 2505 if (likely 2506 (wis > BNA_QE_FREE_CNT(tcb, tcb->q_depth) || 2507 vectors > BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth))) { 2508 BNAD_UPDATE_CTR(bnad, netif_queue_stop); 2509 return NETDEV_TX_BUSY; 2510 } else { 2511 netif_wake_queue(netdev); 2512 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); 2513 } 2514 } 2515 2516 unmap_prod = unmap_q->producer_index; 2517 wis_used = 1; 2518 vect_id = 0; 2519 flags = 0; 2520 2521 txq_prod = tcb->producer_index; 2522 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, txqent, wi_range); 2523 BUG_ON(!(wi_range <= tcb->q_depth)); 2524 txqent->hdr.wi.reserved = 0; 2525 txqent->hdr.wi.num_vectors = vectors; 2526 txqent->hdr.wi.opcode = 2527 htons((skb_is_gso(skb) ? BNA_TXQ_WI_SEND_LSO : 2528 BNA_TXQ_WI_SEND)); 2529 2530 if (vlan_tx_tag_present(skb)) { 2531 vlan_tag = (u16) vlan_tx_tag_get(skb); 2532 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); 2533 } 2534 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) { 2535 vlan_tag = 2536 (tcb->priority & 0x7) << 13 | (vlan_tag & 0x1fff); 2537 flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); 2538 } 2539 2540 txqent->hdr.wi.vlan_tag = htons(vlan_tag); 2541 2542 if (skb_is_gso(skb)) { 2543 err = bnad_tso_prepare(bnad, skb); 2544 if (err) { 2545 dev_kfree_skb(skb); 2546 return NETDEV_TX_OK; 2547 } 2548 txqent->hdr.wi.lso_mss = htons(skb_is_gso(skb)); 2549 flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM); 2550 txqent->hdr.wi.l4_hdr_size_n_offset = 2551 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET 2552 (tcp_hdrlen(skb) >> 2, 2553 skb_transport_offset(skb))); 2554 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 2555 u8 proto = 0; 2556 2557 txqent->hdr.wi.lso_mss = 0; 2558 2559 if (skb->protocol == htons(ETH_P_IP)) 2560 proto = ip_hdr(skb)->protocol; 2561 else if (skb->protocol == htons(ETH_P_IPV6)) { 2562 /* nexthdr may not be TCP immediately. */ 2563 proto = ipv6_hdr(skb)->nexthdr; 2564 } 2565 if (proto == IPPROTO_TCP) { 2566 flags |= BNA_TXQ_WI_CF_TCP_CKSUM; 2567 txqent->hdr.wi.l4_hdr_size_n_offset = 2568 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET 2569 (0, skb_transport_offset(skb))); 2570 2571 BNAD_UPDATE_CTR(bnad, tcpcsum_offload); 2572 2573 BUG_ON(!(skb_headlen(skb) >= 2574 skb_transport_offset(skb) + tcp_hdrlen(skb))); 2575 2576 } else if (proto == IPPROTO_UDP) { 2577 flags |= BNA_TXQ_WI_CF_UDP_CKSUM; 2578 txqent->hdr.wi.l4_hdr_size_n_offset = 2579 htons(BNA_TXQ_WI_L4_HDR_N_OFFSET 2580 (0, skb_transport_offset(skb))); 2581 2582 BNAD_UPDATE_CTR(bnad, udpcsum_offload); 2583 2584 BUG_ON(!(skb_headlen(skb) >= 2585 skb_transport_offset(skb) + 2586 sizeof(struct udphdr))); 2587 } else { 2588 err = skb_checksum_help(skb); 2589 BNAD_UPDATE_CTR(bnad, csum_help); 2590 if (err) { 2591 dev_kfree_skb(skb); 2592 BNAD_UPDATE_CTR(bnad, csum_help_err); 2593 return NETDEV_TX_OK; 2594 } 2595 } 2596 } else { 2597 txqent->hdr.wi.lso_mss = 0; 2598 txqent->hdr.wi.l4_hdr_size_n_offset = 0; 2599 } 2600 2601 txqent->hdr.wi.flags = htons(flags); 2602 2603 txqent->hdr.wi.frame_length = htonl(skb->len); 2604 2605 unmap_q->unmap_array[unmap_prod].skb = skb; 2606 BUG_ON(!(skb_headlen(skb) <= BFI_TX_MAX_DATA_PER_VECTOR)); 2607 txqent->vector[vect_id].length = htons(skb_headlen(skb)); 2608 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, 2609 skb_headlen(skb), DMA_TO_DEVICE); 2610 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, 2611 dma_addr); 2612 2613 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); 2614 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); 2615 2616 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2617 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 2618 u32 size = frag->size; 2619 2620 if (++vect_id == BFI_TX_MAX_VECTORS_PER_WI) { 2621 vect_id = 0; 2622 if (--wi_range) 2623 txqent++; 2624 else { 2625 BNA_QE_INDX_ADD(txq_prod, wis_used, 2626 tcb->q_depth); 2627 wis_used = 0; 2628 BNA_TXQ_QPGE_PTR_GET(txq_prod, tcb->sw_qpt, 2629 txqent, wi_range); 2630 BUG_ON(!(wi_range <= tcb->q_depth)); 2631 } 2632 wis_used++; 2633 txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION); 2634 } 2635 2636 BUG_ON(!(size <= BFI_TX_MAX_DATA_PER_VECTOR)); 2637 txqent->vector[vect_id].length = htons(size); 2638 dma_addr = dma_map_page(&bnad->pcidev->dev, frag->page, 2639 frag->page_offset, size, DMA_TO_DEVICE); 2640 dma_unmap_addr_set(&unmap_q->unmap_array[unmap_prod], dma_addr, 2641 dma_addr); 2642 BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); 2643 BNA_QE_INDX_ADD(unmap_prod, 1, unmap_q->q_depth); 2644 } 2645 2646 unmap_q->producer_index = unmap_prod; 2647 BNA_QE_INDX_ADD(txq_prod, wis_used, tcb->q_depth); 2648 tcb->producer_index = txq_prod; 2649 2650 smp_mb(); 2651 2652 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) 2653 return NETDEV_TX_OK; 2654 2655 bna_txq_prod_indx_doorbell(tcb); 2656 2657 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index) 2658 tasklet_schedule(&bnad->tx_free_tasklet); 2659 2660 return NETDEV_TX_OK; 2661} 2662 2663/* 2664 * Used spin_lock to synchronize reading of stats structures, which 2665 * is written by BNA under the same lock. 2666 */ 2667static struct rtnl_link_stats64 * 2668bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) 2669{ 2670 struct bnad *bnad = netdev_priv(netdev); 2671 unsigned long flags; 2672 2673 spin_lock_irqsave(&bnad->bna_lock, flags); 2674 2675 bnad_netdev_qstats_fill(bnad, stats); 2676 bnad_netdev_hwstats_fill(bnad, stats); 2677 2678 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2679 2680 return stats; 2681} 2682 2683static void 2684bnad_set_rx_mode(struct net_device *netdev) 2685{ 2686 struct bnad *bnad = netdev_priv(netdev); 2687 u32 new_mask, valid_mask; 2688 unsigned long flags; 2689 2690 spin_lock_irqsave(&bnad->bna_lock, flags); 2691 2692 new_mask = valid_mask = 0; 2693 2694 if (netdev->flags & IFF_PROMISC) { 2695 if (!(bnad->cfg_flags & BNAD_CF_PROMISC)) { 2696 new_mask = BNAD_RXMODE_PROMISC_DEFAULT; 2697 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT; 2698 bnad->cfg_flags |= BNAD_CF_PROMISC; 2699 } 2700 } else { 2701 if (bnad->cfg_flags & BNAD_CF_PROMISC) { 2702 new_mask = ~BNAD_RXMODE_PROMISC_DEFAULT; 2703 valid_mask = BNAD_RXMODE_PROMISC_DEFAULT; 2704 bnad->cfg_flags &= ~BNAD_CF_PROMISC; 2705 } 2706 } 2707 2708 if (netdev->flags & IFF_ALLMULTI) { 2709 if (!(bnad->cfg_flags & BNAD_CF_ALLMULTI)) { 2710 new_mask |= BNA_RXMODE_ALLMULTI; 2711 valid_mask |= BNA_RXMODE_ALLMULTI; 2712 bnad->cfg_flags |= BNAD_CF_ALLMULTI; 2713 } 2714 } else { 2715 if (bnad->cfg_flags & BNAD_CF_ALLMULTI) { 2716 new_mask &= ~BNA_RXMODE_ALLMULTI; 2717 valid_mask |= BNA_RXMODE_ALLMULTI; 2718 bnad->cfg_flags &= ~BNAD_CF_ALLMULTI; 2719 } 2720 } 2721 2722 bna_rx_mode_set(bnad->rx_info[0].rx, new_mask, valid_mask, NULL); 2723 2724 if (!netdev_mc_empty(netdev)) { 2725 u8 *mcaddr_list; 2726 int mc_count = netdev_mc_count(netdev); 2727 2728 /* Index 0 holds the broadcast address */ 2729 mcaddr_list = 2730 kzalloc((mc_count + 1) * ETH_ALEN, 2731 GFP_ATOMIC); 2732 if (!mcaddr_list) 2733 goto unlock; 2734 2735 memcpy(&mcaddr_list[0], &bnad_bcast_addr[0], ETH_ALEN); 2736 2737 /* Copy rest of the MC addresses */ 2738 bnad_netdev_mc_list_get(netdev, mcaddr_list); 2739 2740 bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, 2741 mcaddr_list, NULL); 2742 2743 /* Should we enable BNAD_CF_ALLMULTI for err != 0 ? */ 2744 kfree(mcaddr_list); 2745 } 2746unlock: 2747 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2748} 2749 2750/* 2751 * bna_lock is used to sync writes to netdev->addr 2752 * conf_lock cannot be used since this call may be made 2753 * in a non-blocking context. 2754 */ 2755static int 2756bnad_set_mac_address(struct net_device *netdev, void *mac_addr) 2757{ 2758 int err; 2759 struct bnad *bnad = netdev_priv(netdev); 2760 struct sockaddr *sa = (struct sockaddr *)mac_addr; 2761 unsigned long flags; 2762 2763 spin_lock_irqsave(&bnad->bna_lock, flags); 2764 2765 err = bnad_mac_addr_set_locked(bnad, sa->sa_data); 2766 2767 if (!err) 2768 memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len); 2769 2770 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2771 2772 return err; 2773} 2774 2775static int 2776bnad_change_mtu(struct net_device *netdev, int new_mtu) 2777{ 2778 int mtu, err = 0; 2779 unsigned long flags; 2780 2781 struct bnad *bnad = netdev_priv(netdev); 2782 2783 if (new_mtu + ETH_HLEN < ETH_ZLEN || new_mtu > BNAD_JUMBO_MTU) 2784 return -EINVAL; 2785 2786 mutex_lock(&bnad->conf_mutex); 2787 2788 netdev->mtu = new_mtu; 2789 2790 mtu = ETH_HLEN + new_mtu + ETH_FCS_LEN; 2791 2792 spin_lock_irqsave(&bnad->bna_lock, flags); 2793 bna_port_mtu_set(&bnad->bna.port, mtu, NULL); 2794 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2795 2796 mutex_unlock(&bnad->conf_mutex); 2797 return err; 2798} 2799 2800static void 2801bnad_vlan_rx_register(struct net_device *netdev, 2802 struct vlan_group *vlan_grp) 2803{ 2804 struct bnad *bnad = netdev_priv(netdev); 2805 2806 mutex_lock(&bnad->conf_mutex); 2807 bnad->vlan_grp = vlan_grp; 2808 mutex_unlock(&bnad->conf_mutex); 2809} 2810 2811static void 2812bnad_vlan_rx_add_vid(struct net_device *netdev, 2813 unsigned short vid) 2814{ 2815 struct bnad *bnad = netdev_priv(netdev); 2816 unsigned long flags; 2817 2818 if (!bnad->rx_info[0].rx) 2819 return; 2820 2821 mutex_lock(&bnad->conf_mutex); 2822 2823 spin_lock_irqsave(&bnad->bna_lock, flags); 2824 bna_rx_vlan_add(bnad->rx_info[0].rx, vid); 2825 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2826 2827 mutex_unlock(&bnad->conf_mutex); 2828} 2829 2830static void 2831bnad_vlan_rx_kill_vid(struct net_device *netdev, 2832 unsigned short vid) 2833{ 2834 struct bnad *bnad = netdev_priv(netdev); 2835 unsigned long flags; 2836 2837 if (!bnad->rx_info[0].rx) 2838 return; 2839 2840 mutex_lock(&bnad->conf_mutex); 2841 2842 spin_lock_irqsave(&bnad->bna_lock, flags); 2843 bna_rx_vlan_del(bnad->rx_info[0].rx, vid); 2844 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2845 2846 mutex_unlock(&bnad->conf_mutex); 2847} 2848 2849#ifdef CONFIG_NET_POLL_CONTROLLER 2850static void 2851bnad_netpoll(struct net_device *netdev) 2852{ 2853 struct bnad *bnad = netdev_priv(netdev); 2854 struct bnad_rx_info *rx_info; 2855 struct bnad_rx_ctrl *rx_ctrl; 2856 u32 curr_mask; 2857 int i, j; 2858 2859 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { 2860 bna_intx_disable(&bnad->bna, curr_mask); 2861 bnad_isr(bnad->pcidev->irq, netdev); 2862 bna_intx_enable(&bnad->bna, curr_mask); 2863 } else { 2864 for (i = 0; i < bnad->num_rx; i++) { 2865 rx_info = &bnad->rx_info[i]; 2866 if (!rx_info->rx) 2867 continue; 2868 for (j = 0; j < bnad->num_rxp_per_rx; j++) { 2869 rx_ctrl = &rx_info->rx_ctrl[j]; 2870 if (rx_ctrl->ccb) { 2871 bnad_disable_rx_irq(bnad, 2872 rx_ctrl->ccb); 2873 bnad_netif_rx_schedule_poll(bnad, 2874 rx_ctrl->ccb); 2875 } 2876 } 2877 } 2878 } 2879} 2880#endif 2881 2882static const struct net_device_ops bnad_netdev_ops = { 2883 .ndo_open = bnad_open, 2884 .ndo_stop = bnad_stop, 2885 .ndo_start_xmit = bnad_start_xmit, 2886 .ndo_get_stats64 = bnad_get_stats64, 2887 .ndo_set_rx_mode = bnad_set_rx_mode, 2888 .ndo_set_multicast_list = bnad_set_rx_mode, 2889 .ndo_validate_addr = eth_validate_addr, 2890 .ndo_set_mac_address = bnad_set_mac_address, 2891 .ndo_change_mtu = bnad_change_mtu, 2892 .ndo_vlan_rx_register = bnad_vlan_rx_register, 2893 .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid, 2894 .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid, 2895#ifdef CONFIG_NET_POLL_CONTROLLER 2896 .ndo_poll_controller = bnad_netpoll 2897#endif 2898}; 2899 2900static void 2901bnad_netdev_init(struct bnad *bnad, bool using_dac) 2902{ 2903 struct net_device *netdev = bnad->netdev; 2904 2905 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | 2906 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2907 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX; 2908 2909 netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | 2910 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2911 NETIF_F_TSO | NETIF_F_TSO6; 2912 2913 netdev->features |= netdev->hw_features | 2914 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; 2915 2916 if (using_dac) 2917 netdev->features |= NETIF_F_HIGHDMA; 2918 2919 netdev->mem_start = bnad->mmio_start; 2920 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1; 2921 2922 netdev->netdev_ops = &bnad_netdev_ops; 2923 bnad_set_ethtool_ops(netdev); 2924} 2925 2926/* 2927 * 1. Initialize the bnad structure 2928 * 2. Setup netdev pointer in pci_dev 2929 * 3. Initialze Tx free tasklet 2930 * 4. Initialize no. of TxQ & CQs & MSIX vectors 2931 */ 2932static int 2933bnad_init(struct bnad *bnad, 2934 struct pci_dev *pdev, struct net_device *netdev) 2935{ 2936 unsigned long flags; 2937 2938 SET_NETDEV_DEV(netdev, &pdev->dev); 2939 pci_set_drvdata(pdev, netdev); 2940 2941 bnad->netdev = netdev; 2942 bnad->pcidev = pdev; 2943 bnad->mmio_start = pci_resource_start(pdev, 0); 2944 bnad->mmio_len = pci_resource_len(pdev, 0); 2945 bnad->bar0 = ioremap_nocache(bnad->mmio_start, bnad->mmio_len); 2946 if (!bnad->bar0) { 2947 dev_err(&pdev->dev, "ioremap for bar0 failed\n"); 2948 pci_set_drvdata(pdev, NULL); 2949 return -ENOMEM; 2950 } 2951 pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0, 2952 (unsigned long long) bnad->mmio_len); 2953 2954 spin_lock_irqsave(&bnad->bna_lock, flags); 2955 if (!bnad_msix_disable) 2956 bnad->cfg_flags = BNAD_CF_MSIX; 2957 2958 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; 2959 2960 bnad_q_num_init(bnad); 2961 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2962 2963 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) + 2964 (bnad->num_rx * bnad->num_rxp_per_rx) + 2965 BNAD_MAILBOX_MSIX_VECTORS; 2966 2967 bnad->txq_depth = BNAD_TXQ_DEPTH; 2968 bnad->rxq_depth = BNAD_RXQ_DEPTH; 2969 2970 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO; 2971 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO; 2972 2973 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet, 2974 (unsigned long)bnad); 2975 2976 return 0; 2977} 2978 2979/* 2980 * Must be called after bnad_pci_uninit() 2981 * so that iounmap() and pci_set_drvdata(NULL) 2982 * happens only after PCI uninitialization. 2983 */ 2984static void 2985bnad_uninit(struct bnad *bnad) 2986{ 2987 if (bnad->bar0) 2988 iounmap(bnad->bar0); 2989 pci_set_drvdata(bnad->pcidev, NULL); 2990} 2991 2992/* 2993 * Initialize locks 2994 a) Per device mutes used for serializing configuration 2995 changes from OS interface 2996 b) spin lock used to protect bna state machine 2997 */ 2998static void 2999bnad_lock_init(struct bnad *bnad) 3000{ 3001 spin_lock_init(&bnad->bna_lock); 3002 mutex_init(&bnad->conf_mutex); 3003} 3004 3005static void 3006bnad_lock_uninit(struct bnad *bnad) 3007{ 3008 mutex_destroy(&bnad->conf_mutex); 3009} 3010 3011/* PCI Initialization */ 3012static int 3013bnad_pci_init(struct bnad *bnad, 3014 struct pci_dev *pdev, bool *using_dac) 3015{ 3016 int err; 3017 3018 err = pci_enable_device(pdev); 3019 if (err) 3020 return err; 3021 err = pci_request_regions(pdev, BNAD_NAME); 3022 if (err) 3023 goto disable_device; 3024 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && 3025 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 3026 *using_dac = 1; 3027 } else { 3028 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 3029 if (err) { 3030 err = dma_set_coherent_mask(&pdev->dev, 3031 DMA_BIT_MASK(32)); 3032 if (err) 3033 goto release_regions; 3034 } 3035 *using_dac = 0; 3036 } 3037 pci_set_master(pdev); 3038 return 0; 3039 3040release_regions: 3041 pci_release_regions(pdev); 3042disable_device: 3043 pci_disable_device(pdev); 3044 3045 return err; 3046} 3047 3048static void 3049bnad_pci_uninit(struct pci_dev *pdev) 3050{ 3051 pci_release_regions(pdev); 3052 pci_disable_device(pdev); 3053} 3054 3055static int __devinit 3056bnad_pci_probe(struct pci_dev *pdev, 3057 const struct pci_device_id *pcidev_id) 3058{ 3059 bool using_dac = false; 3060 int err; 3061 struct bnad *bnad; 3062 struct bna *bna; 3063 struct net_device *netdev; 3064 struct bfa_pcidev pcidev_info; 3065 unsigned long flags; 3066 3067 pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n", 3068 pdev, pcidev_id, PCI_FUNC(pdev->devfn)); 3069 3070 mutex_lock(&bnad_fwimg_mutex); 3071 if (!cna_get_firmware_buf(pdev)) { 3072 mutex_unlock(&bnad_fwimg_mutex); 3073 pr_warn("Failed to load Firmware Image!\n"); 3074 return -ENODEV; 3075 } 3076 mutex_unlock(&bnad_fwimg_mutex); 3077 3078 /* 3079 * Allocates sizeof(struct net_device + struct bnad) 3080 * bnad = netdev->priv 3081 */ 3082 netdev = alloc_etherdev(sizeof(struct bnad)); 3083 if (!netdev) { 3084 dev_err(&pdev->dev, "alloc_etherdev failed\n"); 3085 err = -ENOMEM; 3086 return err; 3087 } 3088 bnad = netdev_priv(netdev); 3089 3090 /* 3091 * PCI initialization 3092 * Output : using_dac = 1 for 64 bit DMA 3093 * = 0 for 32 bit DMA 3094 */ 3095 err = bnad_pci_init(bnad, pdev, &using_dac); 3096 if (err) 3097 goto free_netdev; 3098 3099 bnad_lock_init(bnad); 3100 /* 3101 * Initialize bnad structure 3102 * Setup relation between pci_dev & netdev 3103 * Init Tx free tasklet 3104 */ 3105 err = bnad_init(bnad, pdev, netdev); 3106 if (err) 3107 goto pci_uninit; 3108 /* Initialize netdev structure, set up ethtool ops */ 3109 bnad_netdev_init(bnad, using_dac); 3110 3111 /* Set link to down state */ 3112 netif_carrier_off(netdev); 3113 3114 bnad_enable_msix(bnad); 3115 3116 /* Get resource requirement form bna */ 3117 bna_res_req(&bnad->res_info[0]); 3118 3119 /* Allocate resources from bna */ 3120 err = bnad_res_alloc(bnad); 3121 if (err) 3122 goto free_netdev; 3123 3124 bna = &bnad->bna; 3125 3126 /* Setup pcidev_info for bna_init() */ 3127 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn); 3128 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn); 3129 pcidev_info.device_id = bnad->pcidev->device; 3130 pcidev_info.pci_bar_kva = bnad->bar0; 3131 3132 mutex_lock(&bnad->conf_mutex); 3133 3134 spin_lock_irqsave(&bnad->bna_lock, flags); 3135 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]); 3136 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3137 3138 bnad->stats.bna_stats = &bna->stats; 3139 3140 /* Set up timers */ 3141 setup_timer(&bnad->bna.device.ioc.ioc_timer, bnad_ioc_timeout, 3142 ((unsigned long)bnad)); 3143 setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check, 3144 ((unsigned long)bnad)); 3145 setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout, 3146 ((unsigned long)bnad)); 3147 setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout, 3148 ((unsigned long)bnad)); 3149 3150 /* Now start the timer before calling IOC */ 3151 mod_timer(&bnad->bna.device.ioc.iocpf_timer, 3152 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); 3153 3154 /* 3155 * Start the chip 3156 * Don't care even if err != 0, bna state machine will 3157 * deal with it 3158 */ 3159 err = bnad_device_enable(bnad); 3160 3161 /* Get the burnt-in mac */ 3162 spin_lock_irqsave(&bnad->bna_lock, flags); 3163 bna_port_mac_get(&bna->port, &bnad->perm_addr); 3164 bnad_set_netdev_perm_addr(bnad); 3165 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3166 3167 mutex_unlock(&bnad->conf_mutex); 3168 3169 /* Finally, reguister with net_device layer */ 3170 err = register_netdev(netdev); 3171 if (err) { 3172 pr_err("BNA : Registering with netdev failed\n"); 3173 goto disable_device; 3174 } 3175 3176 return 0; 3177 3178disable_device: 3179 mutex_lock(&bnad->conf_mutex); 3180 bnad_device_disable(bnad); 3181 del_timer_sync(&bnad->bna.device.ioc.ioc_timer); 3182 del_timer_sync(&bnad->bna.device.ioc.sem_timer); 3183 del_timer_sync(&bnad->bna.device.ioc.hb_timer); 3184 spin_lock_irqsave(&bnad->bna_lock, flags); 3185 bna_uninit(bna); 3186 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3187 mutex_unlock(&bnad->conf_mutex); 3188 3189 bnad_res_free(bnad); 3190 bnad_disable_msix(bnad); 3191pci_uninit: 3192 bnad_pci_uninit(pdev); 3193 bnad_lock_uninit(bnad); 3194 bnad_uninit(bnad); 3195free_netdev: 3196 free_netdev(netdev); 3197 return err; 3198} 3199 3200static void __devexit 3201bnad_pci_remove(struct pci_dev *pdev) 3202{ 3203 struct net_device *netdev = pci_get_drvdata(pdev); 3204 struct bnad *bnad; 3205 struct bna *bna; 3206 unsigned long flags; 3207 3208 if (!netdev) 3209 return; 3210 3211 pr_info("%s bnad_pci_remove\n", netdev->name); 3212 bnad = netdev_priv(netdev); 3213 bna = &bnad->bna; 3214 3215 unregister_netdev(netdev); 3216 3217 mutex_lock(&bnad->conf_mutex); 3218 bnad_device_disable(bnad); 3219 del_timer_sync(&bnad->bna.device.ioc.ioc_timer); 3220 del_timer_sync(&bnad->bna.device.ioc.sem_timer); 3221 del_timer_sync(&bnad->bna.device.ioc.hb_timer); 3222 spin_lock_irqsave(&bnad->bna_lock, flags); 3223 bna_uninit(bna); 3224 spin_unlock_irqrestore(&bnad->bna_lock, flags); 3225 mutex_unlock(&bnad->conf_mutex); 3226 3227 bnad_res_free(bnad); 3228 bnad_disable_msix(bnad); 3229 bnad_pci_uninit(pdev); 3230 bnad_lock_uninit(bnad); 3231 bnad_uninit(bnad); 3232 free_netdev(netdev); 3233} 3234 3235static const struct pci_device_id bnad_pci_id_table[] = { 3236 { 3237 PCI_DEVICE(PCI_VENDOR_ID_BROCADE, 3238 PCI_DEVICE_ID_BROCADE_CT), 3239 .class = PCI_CLASS_NETWORK_ETHERNET << 8, 3240 .class_mask = 0xffff00 3241 }, {0, } 3242}; 3243 3244MODULE_DEVICE_TABLE(pci, bnad_pci_id_table); 3245 3246static struct pci_driver bnad_pci_driver = { 3247 .name = BNAD_NAME, 3248 .id_table = bnad_pci_id_table, 3249 .probe = bnad_pci_probe, 3250 .remove = __devexit_p(bnad_pci_remove), 3251}; 3252 3253static int __init 3254bnad_module_init(void) 3255{ 3256 int err; 3257 3258 pr_info("Brocade 10G Ethernet driver\n"); 3259 3260 bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover); 3261 3262 err = pci_register_driver(&bnad_pci_driver); 3263 if (err < 0) { 3264 pr_err("bna : PCI registration failed in module init " 3265 "(%d)\n", err); 3266 return err; 3267 } 3268 3269 return 0; 3270} 3271 3272static void __exit 3273bnad_module_exit(void) 3274{ 3275 pci_unregister_driver(&bnad_pci_driver); 3276 3277 if (bfi_fw) 3278 release_firmware(bfi_fw); 3279} 3280 3281module_init(bnad_module_init); 3282module_exit(bnad_module_exit); 3283 3284MODULE_AUTHOR("Brocade"); 3285MODULE_LICENSE("GPL"); 3286MODULE_DESCRIPTION("Brocade 10G PCIe Ethernet driver"); 3287MODULE_VERSION(BNAD_VERSION); 3288MODULE_FIRMWARE(CNA_FW_FILE_CT);