bnx2x: NAPI and interrupts enable/disable

Fixing the order of enabling and disabling NAPI and the interrupts

Signed-off-by: Yitchak Gertner <gertner@broadcom.com>
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by Yitchak Gertner and committed by David S. Miller 65abd74d d1014634

+74 -77
+74 -77
drivers/net/bnx2x_main.c
··· 6058 return rc; 6059 } 6060 6061 /* 6062 * Init service functions 6063 */ ··· 6401 6402 /* Enable Rx interrupt handling before sending the ramrod 6403 as it's completed on Rx FP queue */ 6404 - for_each_queue(bp, i) 6405 - napi_enable(&bnx2x_fp(bp, i, napi)); 6406 6407 /* Enable interrupt handling */ 6408 atomic_set(&bp->intr_sem, 0); ··· 6468 return 0; 6469 6470 load_netif_stop: 6471 - for_each_queue(bp, i) 6472 - napi_disable(&bnx2x_fp(bp, i, napi)); 6473 load_rings_free: 6474 /* Free SKBs, SGEs, TPA pool and driver internals */ 6475 bnx2x_free_skbs(bp); ··· 6650 bp->rx_mode = BNX2X_RX_MODE_NONE; 6651 bnx2x_set_storm_rx_mode(bp); 6652 6653 - if (netif_running(bp->dev)) { 6654 - netif_tx_disable(bp->dev); 6655 - bp->dev->trans_start = jiffies; /* prevent tx timeout */ 6656 - } 6657 - 6658 del_timer_sync(&bp->timer); 6659 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, 6660 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); ··· 6666 smp_rmb(); 6667 while (BNX2X_HAS_TX_WORK(fp)) { 6668 6669 - if (!netif_running(bp->dev)) 6670 - bnx2x_tx_int(fp, 1000); 6671 - 6672 if (!cnt) { 6673 BNX2X_ERR("timeout waiting for queue[%d]\n", 6674 i); ··· 6682 smp_rmb(); 6683 } 6684 } 6685 - 6686 /* Give HW time to discard old tx messages */ 6687 msleep(1); 6688 6689 - for_each_queue(bp, i) 6690 - napi_disable(&bnx2x_fp(bp, i, napi)); 6691 - /* Disable interrupts after Tx and Rx are disabled on stack level */ 6692 - bnx2x_int_disable_sync(bp); 6693 - 6694 /* Release IRQs */ 6695 bnx2x_free_irq(bp); 6696 6697 if (unload_mode == UNLOAD_NORMAL) 6698 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; ··· 6745 6746 } else 6747 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6748 - 6749 - if (CHIP_IS_E1(bp)) { 6750 - struct mac_configuration_cmd *config = 6751 - bnx2x_sp(bp, mcast_config); 6752 - 6753 - bnx2x_set_mac_addr_e1(bp, 0); 6754 - 6755 - for (i = 0; i < config->hdr.length_6b; i++) 6756 - CAM_INVALIDATE(config->config_table[i]); 6757 - 6758 - config->hdr.length_6b = i; 6759 - if (CHIP_REV_IS_SLOW(bp)) 6760 - config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port); 6761 - else 6762 - config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port); 6763 - config->hdr.client_id = BP_CL_ID(bp); 6764 - config->hdr.reserved1 = 0; 6765 - 6766 - bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 6767 - U64_HI(bnx2x_sp_mapping(bp, mcast_config)), 6768 - U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); 6769 - 6770 - } else { /* E1H */ 6771 - bnx2x_set_mac_addr_e1h(bp, 0); 6772 - 6773 - for (i = 0; i < MC_HASH_SIZE; i++) 6774 - REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); 6775 - } 6776 - 6777 - if (CHIP_IS_E1H(bp)) 6778 - REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 6779 6780 /* Close multi and leading connections 6781 Completions for ramrods are collected in a synchronous way */ ··· 8644 8645 test_mem_exit: 8646 return rc; 8647 - } 8648 - 8649 - static void bnx2x_netif_start(struct bnx2x *bp) 8650 - { 8651 - int i; 8652 - 8653 - if (atomic_dec_and_test(&bp->intr_sem)) { 8654 - if (netif_running(bp->dev)) { 8655 - bnx2x_int_enable(bp); 8656 - for_each_queue(bp, i) 8657 - napi_enable(&bnx2x_fp(bp, i, napi)); 8658 - if (bp->state == BNX2X_STATE_OPEN) 8659 - netif_wake_queue(bp->dev); 8660 - } 8661 - } 8662 - } 8663 - 8664 - static void bnx2x_netif_stop(struct bnx2x *bp) 8665 - { 8666 - int i; 8667 - 8668 - if (netif_running(bp->dev)) { 8669 - netif_tx_disable(bp->dev); 8670 - bp->dev->trans_start = jiffies; /* prevent tx timeout */ 8671 - for_each_queue(bp, i) 8672 - napi_disable(&bnx2x_fp(bp, i, napi)); 8673 - } 8674 - bnx2x_int_disable_sync(bp); 8675 } 8676 8677 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
··· 6058 return rc; 6059 } 6060 6061 + static void bnx2x_napi_enable(struct bnx2x *bp) 6062 + { 6063 + int i; 6064 + 6065 + for_each_queue(bp, i) 6066 + napi_enable(&bnx2x_fp(bp, i, napi)); 6067 + } 6068 + 6069 + static void bnx2x_napi_disable(struct bnx2x *bp) 6070 + { 6071 + int i; 6072 + 6073 + for_each_queue(bp, i) 6074 + napi_disable(&bnx2x_fp(bp, i, napi)); 6075 + } 6076 + 6077 + static void bnx2x_netif_start(struct bnx2x *bp) 6078 + { 6079 + if (atomic_dec_and_test(&bp->intr_sem)) { 6080 + if (netif_running(bp->dev)) { 6081 + if (bp->state == BNX2X_STATE_OPEN) 6082 + netif_wake_queue(bp->dev); 6083 + bnx2x_napi_enable(bp); 6084 + bnx2x_int_enable(bp); 6085 + } 6086 + } 6087 + } 6088 + 6089 + static void bnx2x_netif_stop(struct bnx2x *bp) 6090 + { 6091 + bnx2x_int_disable_sync(bp); 6092 + if (netif_running(bp->dev)) { 6093 + bnx2x_napi_disable(bp); 6094 + netif_tx_disable(bp->dev); 6095 + bp->dev->trans_start = jiffies; /* prevent tx timeout */ 6096 + } 6097 + } 6098 + 6099 /* 6100 * Init service functions 6101 */ ··· 6363 6364 /* Enable Rx interrupt handling before sending the ramrod 6365 as it's completed on Rx FP queue */ 6366 + bnx2x_napi_enable(bp); 6367 6368 /* Enable interrupt handling */ 6369 atomic_set(&bp->intr_sem, 0); ··· 6431 return 0; 6432 6433 load_netif_stop: 6434 + bnx2x_napi_disable(bp); 6435 load_rings_free: 6436 /* Free SKBs, SGEs, TPA pool and driver internals */ 6437 bnx2x_free_skbs(bp); ··· 6614 bp->rx_mode = BNX2X_RX_MODE_NONE; 6615 bnx2x_set_storm_rx_mode(bp); 6616 6617 + bnx2x_netif_stop(bp); 6618 + if (!netif_running(bp->dev)) 6619 + bnx2x_napi_disable(bp); 6620 del_timer_sync(&bp->timer); 6621 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb, 6622 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq)); ··· 6632 smp_rmb(); 6633 while (BNX2X_HAS_TX_WORK(fp)) { 6634 6635 + bnx2x_tx_int(fp, 1000); 6636 if (!cnt) { 6637 BNX2X_ERR("timeout waiting for queue[%d]\n", 6638 i); ··· 6650 smp_rmb(); 6651 } 6652 } 6653 /* Give HW time to discard old tx messages */ 6654 msleep(1); 6655 6656 /* Release IRQs */ 6657 bnx2x_free_irq(bp); 6658 + 6659 + if (CHIP_IS_E1(bp)) { 6660 + struct mac_configuration_cmd *config = 6661 + bnx2x_sp(bp, mcast_config); 6662 + 6663 + bnx2x_set_mac_addr_e1(bp, 0); 6664 + 6665 + for (i = 0; i < config->hdr.length_6b; i++) 6666 + CAM_INVALIDATE(config->config_table[i]); 6667 + 6668 + config->hdr.length_6b = i; 6669 + if (CHIP_REV_IS_SLOW(bp)) 6670 + config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port); 6671 + else 6672 + config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port); 6673 + config->hdr.client_id = BP_CL_ID(bp); 6674 + config->hdr.reserved1 = 0; 6675 + 6676 + bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 6677 + U64_HI(bnx2x_sp_mapping(bp, mcast_config)), 6678 + U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); 6679 + 6680 + } else { /* E1H */ 6681 + REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 6682 + 6683 + bnx2x_set_mac_addr_e1h(bp, 0); 6684 + 6685 + for (i = 0; i < MC_HASH_SIZE; i++) 6686 + REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); 6687 + } 6688 6689 if (unload_mode == UNLOAD_NORMAL) 6690 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; ··· 6689 6690 } else 6691 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 6692 6693 /* Close multi and leading connections 6694 Completions for ramrods are collected in a synchronous way */ ··· 8619 8620 test_mem_exit: 8621 return rc; 8622 } 8623 8624 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)