Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'qed-next'

Yuval Mintz says:

====================
qed: IOV enhncements and fixups

This is a follow-up on the recent patch series that adds SR-IOV support
to qed. All content here is iov-related fixups [nothing terminal] and
enhancements.

Please consider applying this series to `net-next'.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+99 -103
+32 -27
drivers/net/ethernet/qlogic/qed/qed_int.c
··· 2805 2805 } 2806 2806 2807 2807 #define IGU_CLEANUP_SLEEP_LENGTH (1000) 2808 - void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, 2809 - struct qed_ptt *p_ptt, 2810 - u32 sb_id, 2811 - bool cleanup_set, 2812 - u16 opaque_fid 2813 - ) 2808 + static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, 2809 + struct qed_ptt *p_ptt, 2810 + u32 sb_id, bool cleanup_set, u16 opaque_fid) 2814 2811 { 2812 + u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; 2815 2813 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id; 2816 2814 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; 2817 - u32 data = 0; 2818 - u32 cmd_ctrl = 0; 2819 - u32 val = 0; 2820 - u32 sb_bit = 0; 2821 - u32 sb_bit_addr = 0; 2822 2815 2823 2816 /* Set the data field */ 2824 2817 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); ··· 2856 2863 2857 2864 void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, 2858 2865 struct qed_ptt *p_ptt, 2859 - u32 sb_id, 2860 - u16 opaque, 2861 - bool b_set) 2866 + u32 sb_id, u16 opaque, bool b_set) 2862 2867 { 2863 - int pi; 2868 + int pi, i; 2864 2869 2865 2870 /* Set */ 2866 2871 if (b_set) ··· 2866 2875 2867 2876 /* Clear */ 2868 2877 qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque); 2878 + 2879 + /* Wait for the IGU SB to cleanup */ 2880 + for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { 2881 + u32 val; 2882 + 2883 + val = qed_rd(p_hwfn, p_ptt, 2884 + IGU_REG_WRITE_DONE_PENDING + ((sb_id / 32) * 4)); 2885 + if (val & (1 << (sb_id % 32))) 2886 + usleep_range(10, 20); 2887 + else 2888 + break; 2889 + } 2890 + if (i == IGU_CLEANUP_SLEEP_LENGTH) 2891 + DP_NOTICE(p_hwfn, 2892 + "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", 2893 + sb_id); 2869 2894 2870 2895 /* Clear the CAU for the SB */ 2871 2896 for (pi = 0; pi < 12; pi++) ··· 2891 2884 2892 2885 void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, 2893 2886 struct qed_ptt *p_ptt, 2894 - bool b_set, 2895 - bool b_slowpath) 2887 + bool b_set, bool b_slowpath) 2896 2888 { 2897 2889 u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb; 2898 2890 u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt; 2899 - u32 sb_id = 0; 2900 - u32 val = 0; 2891 + u32 sb_id = 0, val = 0; 2901 2892 2902 2893 val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); 2903 2894 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; ··· 2911 2906 p_hwfn->hw_info.opaque_fid, 2912 2907 b_set); 2913 2908 2914 - if (b_slowpath) { 2915 - sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 2916 - DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 2917 - "IGU cleaning slowpath SB [%d]\n", sb_id); 2918 - qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, 2919 - p_hwfn->hw_info.opaque_fid, 2920 - b_set); 2921 - } 2909 + if (!b_slowpath) 2910 + return; 2911 + 2912 + sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; 2913 + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, 2914 + "IGU cleaning slowpath SB [%d]\n", sb_id); 2915 + qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, 2916 + p_hwfn->hw_info.opaque_fid, b_set); 2922 2917 } 2923 2918 2924 2919 static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
+1 -19
drivers/net/ethernet/qlogic/qed/qed_int.h
··· 298 298 * @param p_hwfn 299 299 * @param p_ptt 300 300 * @param sb_id - igu status block id 301 - * @param cleanup_set - set(1) / clear(0) 302 - * @param opaque_fid - the function for which to perform 303 - * cleanup, for example a PF on behalf of 304 - * its VFs. 305 - */ 306 - void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, 307 - struct qed_ptt *p_ptt, 308 - u32 sb_id, 309 - bool cleanup_set, 310 - u16 opaque_fid); 311 - 312 - /** 313 - * @brief Status block cleanup. Should be called for each status 314 - * block that will be used -> both PF / VF 315 - * 316 - * @param p_hwfn 317 - * @param p_ptt 318 - * @param sb_id - igu status block id 319 301 * @param opaque - opaque fid of the sb owner. 320 - * @param cleanup_set - set(1) / clear(0) 302 + * @param b_set - set(1) / clear(0) 321 303 */ 322 304 void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, 323 305 struct qed_ptt *p_ptt,
+1 -1
drivers/net/ethernet/qlogic/qed/qed_main.c
··· 158 158 } 159 159 160 160 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 161 - if (cdev->pci_params.pm_cap == 0) 161 + if (IS_PF(cdev) && !cdev->pci_params.pm_cap) 162 162 DP_NOTICE(cdev, "Cannot find power management capability\n"); 163 163 164 164 rc = qed_set_coherency_mask(cdev);
+2
drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
··· 429 429 0x184000UL 430 430 #define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \ 431 431 0x180408UL 432 + #define IGU_REG_WRITE_DONE_PENDING \ 433 + 0x180900UL 432 434 #define MISCS_REG_GENERIC_POR_0 \ 433 435 0x0096d4UL 434 436 #define MCP_REG_NVM_CFG4 \
+63 -56
drivers/net/ethernet/qlogic/qed/qed_sriov.c
··· 476 476 static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) 477 477 { 478 478 /* Check PF supports sriov */ 479 - if (!IS_QED_SRIOV(p_hwfn->cdev) || !IS_PF_SRIOV_ALLOC(p_hwfn)) 479 + if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || 480 + !IS_PF_SRIOV_ALLOC(p_hwfn)) 480 481 return false; 481 482 482 483 /* Check VF validity */ 483 - if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || 484 - !IS_PF_SRIOV_ALLOC(p_hwfn)) 484 + if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true)) 485 485 return false; 486 486 487 487 return true; ··· 526 526 static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, 527 527 struct qed_ptt *p_ptt, struct qed_vf_info *vf) 528 528 { 529 - u16 igu_sb_id; 530 529 int i; 531 530 532 531 /* Set VF masks and configuration - pretend */ ··· 533 534 534 535 qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); 535 536 536 - DP_VERBOSE(p_hwfn, QED_MSG_IOV, 537 - "value in VF_CONFIGURATION of vf %d after write %x\n", 538 - vf->abs_vf_id, 539 - qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION)); 540 - 541 537 /* unpretend */ 542 538 qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid); 543 539 544 540 /* iterate over all queues, clear sb consumer */ 545 - for (i = 0; i < vf->num_sbs; i++) { 546 - igu_sb_id = vf->igu_sbs[i]; 547 - /* Set then clear... */ 548 - qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, 549 - vf->opaque_fid); 550 - qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, 551 - vf->opaque_fid); 552 - } 541 + for (i = 0; i < vf->num_sbs; i++) 542 + qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, 543 + vf->igu_sbs[i], 544 + vf->opaque_fid, true); 553 545 } 554 546 555 547 static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, ··· 580 590 vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf)); 581 591 582 592 qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf)); 593 + 594 + qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); 583 595 584 596 rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs); 585 597 if (rc) ··· 806 814 return rc; 807 815 } 808 816 817 + static void qed_iov_set_link(struct qed_hwfn *p_hwfn, 818 + u16 vfid, 819 + struct qed_mcp_link_params *params, 820 + struct qed_mcp_link_state *link, 821 + struct qed_mcp_link_capabilities *p_caps) 822 + { 823 + struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, 824 + vfid, 825 + false); 826 + struct qed_bulletin_content *p_bulletin; 827 + 828 + if (!p_vf) 829 + return; 830 + 831 + p_bulletin = p_vf->bulletin.p_virt; 832 + p_bulletin->req_autoneg = params->speed.autoneg; 833 + p_bulletin->req_adv_speed = params->speed.advertised_speeds; 834 + p_bulletin->req_forced_speed = params->speed.forced_speed; 835 + p_bulletin->req_autoneg_pause = params->pause.autoneg; 836 + p_bulletin->req_forced_rx = params->pause.forced_rx; 837 + p_bulletin->req_forced_tx = params->pause.forced_tx; 838 + p_bulletin->req_loopback = params->loopback_mode; 839 + 840 + p_bulletin->link_up = link->link_up; 841 + p_bulletin->speed = link->speed; 842 + p_bulletin->full_duplex = link->full_duplex; 843 + p_bulletin->autoneg = link->an; 844 + p_bulletin->autoneg_complete = link->an_complete; 845 + p_bulletin->parallel_detection = link->parallel_detection; 846 + p_bulletin->pfc_enabled = link->pfc_enabled; 847 + p_bulletin->partner_adv_speed = link->partner_adv_speed; 848 + p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; 849 + p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; 850 + p_bulletin->partner_adv_pause = link->partner_adv_pause; 851 + p_bulletin->sfp_tx_fault = link->sfp_tx_fault; 852 + 853 + p_bulletin->capability_speed = p_caps->speed_capabilities; 854 + } 855 + 809 856 static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, 810 857 struct qed_ptt *p_ptt, u16 rel_vf_id) 811 858 { 859 + struct qed_mcp_link_capabilities caps; 860 + struct qed_mcp_link_params params; 861 + struct qed_mcp_link_state link; 812 862 struct qed_vf_info *vf = NULL; 813 863 int rc = 0; 814 864 ··· 864 830 memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt)); 865 831 866 832 memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); 833 + 834 + /* Get the link configuration back in bulletin so 835 + * that when VFs are re-enabled they get the actual 836 + * link configuration. 837 + */ 838 + memcpy(&params, qed_mcp_get_link_params(p_hwfn), sizeof(params)); 839 + memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link)); 840 + memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); 841 + qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps); 867 842 868 843 if (vf->state != VF_STOPPED) { 869 844 /* Stopping the VF */ ··· 2593 2550 return found; 2594 2551 } 2595 2552 2596 - void qed_iov_set_link(struct qed_hwfn *p_hwfn, 2597 - u16 vfid, 2598 - struct qed_mcp_link_params *params, 2599 - struct qed_mcp_link_state *link, 2600 - struct qed_mcp_link_capabilities *p_caps) 2601 - { 2602 - struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, 2603 - vfid, 2604 - false); 2605 - struct qed_bulletin_content *p_bulletin; 2606 - 2607 - if (!p_vf) 2608 - return; 2609 - 2610 - p_bulletin = p_vf->bulletin.p_virt; 2611 - p_bulletin->req_autoneg = params->speed.autoneg; 2612 - p_bulletin->req_adv_speed = params->speed.advertised_speeds; 2613 - p_bulletin->req_forced_speed = params->speed.forced_speed; 2614 - p_bulletin->req_autoneg_pause = params->pause.autoneg; 2615 - p_bulletin->req_forced_rx = params->pause.forced_rx; 2616 - p_bulletin->req_forced_tx = params->pause.forced_tx; 2617 - p_bulletin->req_loopback = params->loopback_mode; 2618 - 2619 - p_bulletin->link_up = link->link_up; 2620 - p_bulletin->speed = link->speed; 2621 - p_bulletin->full_duplex = link->full_duplex; 2622 - p_bulletin->autoneg = link->an; 2623 - p_bulletin->autoneg_complete = link->an_complete; 2624 - p_bulletin->parallel_detection = link->parallel_detection; 2625 - p_bulletin->pfc_enabled = link->pfc_enabled; 2626 - p_bulletin->partner_adv_speed = link->partner_adv_speed; 2627 - p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; 2628 - p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; 2629 - p_bulletin->partner_adv_pause = link->partner_adv_pause; 2630 - p_bulletin->sfp_tx_fault = link->sfp_tx_fault; 2631 - 2632 - p_bulletin->capability_speed = p_caps->speed_capabilities; 2633 - } 2634 - 2635 2553 static void qed_iov_get_link(struct qed_hwfn *p_hwfn, 2636 2554 u16 vfid, 2637 2555 struct qed_mcp_link_params *p_params, ··· 3098 3094 rc = -EBUSY; 3099 3095 goto err; 3100 3096 } 3097 + 3098 + if (IS_MF_DEFAULT(hwfn)) 3099 + limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine; 3101 3100 3102 3101 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 3103 3102 qed_int_get_num_sbs(hwfn, &sb_cnt_info);