Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'qed-Add-support-for-new-multi-partitioning-modes'

Sudarsana Reddy Kalluru says:

====================
qed*: Add support for new multi partitioning modes.

The patch series simplifies the multi function (MF) mode implementation of
qed/qede drivers, and adds support for new MF modes.

Please consider applying it to net-next branch.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+389 -82
+57 -4
drivers/net/ethernet/qlogic/qed/qed.h
··· 439 439 u32 init_ops_size; 440 440 }; 441 441 442 + enum qed_mf_mode_bit { 443 + /* Supports PF-classification based on tag */ 444 + QED_MF_OVLAN_CLSS, 445 + 446 + /* Supports PF-classification based on MAC */ 447 + QED_MF_LLH_MAC_CLSS, 448 + 449 + /* Supports PF-classification based on protocol type */ 450 + QED_MF_LLH_PROTO_CLSS, 451 + 452 + /* Requires a default PF to be set */ 453 + QED_MF_NEED_DEF_PF, 454 + 455 + /* Allow LL2 to multicast/broadcast */ 456 + QED_MF_LL2_NON_UNICAST, 457 + 458 + /* Allow Cross-PF [& child VFs] Tx-switching */ 459 + QED_MF_INTER_PF_SWITCH, 460 + 461 + /* Unified Fabtic Port support enabled */ 462 + QED_MF_UFP_SPECIFIC, 463 + 464 + /* Disable Accelerated Receive Flow Steering (aRFS) */ 465 + QED_MF_DISABLE_ARFS, 466 + 467 + /* Use vlan for steering */ 468 + QED_MF_8021Q_TAGGING, 469 + 470 + /* Use stag for steering */ 471 + QED_MF_8021AD_TAGGING, 472 + 473 + /* Allow DSCP to TC mapping */ 474 + QED_MF_DSCP_TO_TC_MAP, 475 + }; 476 + 477 + enum qed_ufp_mode { 478 + QED_UFP_MODE_ETS, 479 + QED_UFP_MODE_VNIC_BW, 480 + QED_UFP_MODE_UNKNOWN 481 + }; 482 + 483 + enum qed_ufp_pri_type { 484 + QED_UFP_PRI_OS, 485 + QED_UFP_PRI_VNIC, 486 + QED_UFP_PRI_UNKNOWN 487 + }; 488 + 489 + struct qed_ufp_info { 490 + enum qed_ufp_pri_type pri_type; 491 + enum qed_ufp_mode mode; 492 + u8 tc; 493 + }; 494 + 442 495 enum BAR_ID { 443 496 BAR_ID_0, /* used for GRC */ 444 497 BAR_ID_1 /* Used for doorbells */ ··· 599 546 struct qed_mcp_info *mcp_info; 600 547 601 548 struct qed_dcbx_info *p_dcbx_info; 549 + 550 + struct qed_ufp_info ufp_info; 602 551 603 552 struct qed_dmae_info dmae_info; 604 553 ··· 724 669 u8 num_funcs_in_port; 725 670 726 671 u8 path_id; 727 - enum qed_mf_mode mf_mode; 728 - #define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT) 729 - #define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR) 730 - #define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN) 672 + 673 + unsigned long mf_bits; 731 674 732 675 int pcie_width; 733 676 int pcie_speed;
+12 -2
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
··· 274 274 u32 pri_tc_tbl, int count, u8 dcbx_version) 275 275 { 276 276 enum dcbx_protocol_type type; 277 + bool enable, ieee, eth_tlv; 277 278 u8 tc, priority_map; 278 - bool enable, ieee; 279 279 u16 protocol_id; 280 280 int priority; 281 281 int i; ··· 283 283 DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); 284 284 285 285 ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE); 286 + eth_tlv = false; 286 287 /* Parse APP TLV */ 287 288 for (i = 0; i < count; i++) { 288 289 protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, ··· 305 304 * indication, but we only got here if there was an 306 305 * app tlv for the protocol, so dcbx must be enabled. 307 306 */ 308 - enable = !(type == DCBX_PROTOCOL_ETH); 307 + if (type == DCBX_PROTOCOL_ETH) { 308 + enable = false; 309 + eth_tlv = true; 310 + } else { 311 + enable = true; 312 + } 309 313 310 314 qed_dcbx_update_app_info(p_data, p_hwfn, enable, 311 315 priority, tc, type); 312 316 } 313 317 } 318 + 319 + /* If Eth TLV is not detected, use UFP TC as default TC */ 320 + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && !eth_tlv) 321 + p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc; 314 322 315 323 /* Update ramrod protocol data and hw_info fields 316 324 * with default info when corresponding APP TLV's are not detected.
+78 -35
drivers/net/ethernet/qlogic/qed/qed_dev.c
··· 1149 1149 return -EINVAL; 1150 1150 } 1151 1151 1152 - switch (p_hwfn->cdev->mf_mode) { 1153 - case QED_MF_DEFAULT: 1154 - case QED_MF_NPAR: 1155 - hw_mode |= 1 << MODE_MF_SI; 1156 - break; 1157 - case QED_MF_OVLAN: 1152 + if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) 1158 1153 hw_mode |= 1 << MODE_MF_SD; 1159 - break; 1160 - default: 1161 - DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); 1154 + else 1162 1155 hw_mode |= 1 << MODE_MF_SI; 1163 - } 1164 1156 1165 1157 hw_mode |= 1 << MODE_ASIC; 1166 1158 ··· 1499 1507 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); 1500 1508 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, 1501 1509 p_hwfn->hw_info.ovlan); 1510 + 1511 + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, 1512 + "Configuring LLH_FUNC_FILTER_HDR_SEL\n"); 1513 + STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET, 1514 + 1); 1502 1515 } 1503 1516 1504 1517 /* Enable classification by MAC if needed */ ··· 1554 1557 1555 1558 /* send function start command */ 1556 1559 rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn, 1557 - p_hwfn->cdev->mf_mode, 1558 1560 allow_npar_tx_switch); 1559 1561 if (rc) { 1560 1562 DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); ··· 1640 1644 bool b_default_mtu = true; 1641 1645 struct qed_hwfn *p_hwfn; 1642 1646 int rc = 0, mfw_rc, i; 1647 + u16 ether_type; 1643 1648 1644 1649 if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 1645 1650 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); ··· 1673 1676 rc = qed_calc_hw_mode(p_hwfn); 1674 1677 if (rc) 1675 1678 return rc; 1679 + 1680 + if (IS_PF(cdev) && (test_bit(QED_MF_8021Q_TAGGING, 1681 + &cdev->mf_bits) || 1682 + test_bit(QED_MF_8021AD_TAGGING, 1683 + &cdev->mf_bits))) { 1684 + if (test_bit(QED_MF_8021Q_TAGGING, &cdev->mf_bits)) 1685 + ether_type = ETH_P_8021Q; 1686 + else 1687 + ether_type = ETH_P_8021AD; 1688 + STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, 1689 + ether_type); 1690 + STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, 1691 + ether_type); 1692 + STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, 1693 + ether_type); 1694 + STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, 1695 + ether_type); 1696 + } 1676 1697 1677 1698 qed_fill_load_req_params(&load_req_params, 1678 1699 p_params->p_drv_load_params); ··· 2654 2639 link->pause.autoneg, 2655 2640 p_caps->default_eee, p_caps->eee_lpi_timer); 2656 2641 2657 - /* Read Multi-function information from shmem */ 2658 - addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 2659 - offsetof(struct nvm_cfg1, glob) + 2660 - offsetof(struct nvm_cfg1_glob, generic_cont0); 2642 + if (IS_LEAD_HWFN(p_hwfn)) { 2643 + struct qed_dev *cdev = p_hwfn->cdev; 2661 2644 2662 - generic_cont0 = qed_rd(p_hwfn, p_ptt, addr); 2645 + /* Read Multi-function information from shmem */ 2646 + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 2647 + offsetof(struct nvm_cfg1, glob) + 2648 + offsetof(struct nvm_cfg1_glob, generic_cont0); 2663 2649 2664 - mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> 2665 - NVM_CFG1_GLOB_MF_MODE_OFFSET; 2650 + generic_cont0 = qed_rd(p_hwfn, p_ptt, addr); 2666 2651 2667 - switch (mf_mode) { 2668 - case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: 2669 - p_hwfn->cdev->mf_mode = QED_MF_OVLAN; 2670 - break; 2671 - case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: 2672 - p_hwfn->cdev->mf_mode = QED_MF_NPAR; 2673 - break; 2674 - case NVM_CFG1_GLOB_MF_MODE_DEFAULT: 2675 - p_hwfn->cdev->mf_mode = QED_MF_DEFAULT; 2676 - break; 2652 + mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> 2653 + NVM_CFG1_GLOB_MF_MODE_OFFSET; 2654 + 2655 + switch (mf_mode) { 2656 + case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: 2657 + cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS); 2658 + break; 2659 + case NVM_CFG1_GLOB_MF_MODE_UFP: 2660 + cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | 2661 + BIT(QED_MF_LLH_PROTO_CLSS) | 2662 + BIT(QED_MF_UFP_SPECIFIC) | 2663 + BIT(QED_MF_8021Q_TAGGING); 2664 + break; 2665 + case NVM_CFG1_GLOB_MF_MODE_BD: 2666 + cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | 2667 + BIT(QED_MF_LLH_PROTO_CLSS) | 2668 + BIT(QED_MF_8021AD_TAGGING); 2669 + break; 2670 + case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: 2671 + cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | 2672 + BIT(QED_MF_LLH_PROTO_CLSS) | 2673 + BIT(QED_MF_LL2_NON_UNICAST) | 2674 + BIT(QED_MF_INTER_PF_SWITCH); 2675 + break; 2676 + case NVM_CFG1_GLOB_MF_MODE_DEFAULT: 2677 + cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | 2678 + BIT(QED_MF_LLH_PROTO_CLSS) | 2679 + BIT(QED_MF_LL2_NON_UNICAST); 2680 + if (QED_IS_BB(p_hwfn->cdev)) 2681 + cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF); 2682 + break; 2683 + } 2684 + 2685 + DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", 2686 + cdev->mf_bits); 2677 2687 } 2678 - DP_INFO(p_hwfn, "Multi function mode is %08x\n", 2679 - p_hwfn->cdev->mf_mode); 2680 2688 2681 - /* Read Multi-function information from shmem */ 2689 + DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", 2690 + p_hwfn->cdev->mf_bits); 2691 + 2692 + /* Read device capabilities information from shmem */ 2682 2693 addr = MCP_REG_SCRATCH + nvm_cfg1_offset + 2683 2694 offsetof(struct nvm_cfg1, glob) + 2684 2695 offsetof(struct nvm_cfg1_glob, device_capabilities); ··· 2897 2856 qed_mcp_cmd_port_init(p_hwfn, p_ptt); 2898 2857 2899 2858 qed_get_eee_caps(p_hwfn, p_ptt); 2859 + 2860 + qed_mcp_read_ufp_config(p_hwfn, p_ptt); 2900 2861 } 2901 2862 2902 2863 if (qed_mcp_is_init(p_hwfn)) { ··· 3505 3462 u32 high = 0, low = 0, en; 3506 3463 int i; 3507 3464 3508 - if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 3465 + if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits)) 3509 3466 return 0; 3510 3467 3511 3468 qed_llh_mac_to_filter(&high, &low, p_filter); ··· 3550 3507 u32 high = 0, low = 0; 3551 3508 int i; 3552 3509 3553 - if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 3510 + if (!test_bit(QED_MF_LLH_MAC_CLSS, &p_hwfn->cdev->mf_bits)) 3554 3511 return; 3555 3512 3556 3513 qed_llh_mac_to_filter(&high, &low, p_filter); ··· 3592 3549 u32 high = 0, low = 0, en; 3593 3550 int i; 3594 3551 3595 - if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 3552 + if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits)) 3596 3553 return 0; 3597 3554 3598 3555 switch (type) { ··· 3690 3647 u32 high = 0, low = 0; 3691 3648 int i; 3692 3649 3693 - if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) 3650 + if (!test_bit(QED_MF_LLH_PROTO_CLSS, &p_hwfn->cdev->mf_bits)) 3694 3651 return; 3695 3652 3696 3653 switch (type) {
+3
drivers/net/ethernet/qlogic/qed/qed_fcoe.c
··· 313 313 p_data->d_id.addr_mid = p_conn->d_id.addr_mid; 314 314 p_data->d_id.addr_lo = p_conn->d_id.addr_lo; 315 315 p_data->flags = p_conn->flags; 316 + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) 317 + SET_FIELD(p_data->flags, 318 + FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN, 1); 316 319 p_data->def_q_idx = p_conn->def_q_idx; 317 320 318 321 return qed_spq_post(p_hwfn, p_ent, NULL);
+28
drivers/net/ethernet/qlogic/qed/qed_hsi.h
··· 11993 11993 #define EEE_REMOTE_TW_TX_OFFSET 0 11994 11994 #define EEE_REMOTE_TW_RX_MASK 0xffff0000 11995 11995 #define EEE_REMOTE_TW_RX_OFFSET 16 11996 + 11997 + u32 oem_cfg_port; 11998 + #define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003 11999 + #define OEM_CFG_CHANNEL_TYPE_OFFSET 0 12000 + #define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1 12001 + #define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2 12002 + #define OEM_CFG_SCHED_TYPE_MASK 0x0000000C 12003 + #define OEM_CFG_SCHED_TYPE_OFFSET 2 12004 + #define OEM_CFG_SCHED_TYPE_ETS 0x1 12005 + #define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2 11996 12006 }; 11997 12007 11998 12008 struct public_func { ··· 12079 12069 #define DRV_ID_DRV_INIT_HW_MASK 0x80000000 12080 12070 #define DRV_ID_DRV_INIT_HW_SHIFT 31 12081 12071 #define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT) 12072 + 12073 + u32 oem_cfg_func; 12074 + #define OEM_CFG_FUNC_TC_MASK 0x0000000F 12075 + #define OEM_CFG_FUNC_TC_OFFSET 0 12076 + #define OEM_CFG_FUNC_TC_0 0x0 12077 + #define OEM_CFG_FUNC_TC_1 0x1 12078 + #define OEM_CFG_FUNC_TC_2 0x2 12079 + #define OEM_CFG_FUNC_TC_3 0x3 12080 + #define OEM_CFG_FUNC_TC_4 0x4 12081 + #define OEM_CFG_FUNC_TC_5 0x5 12082 + #define OEM_CFG_FUNC_TC_6 0x6 12083 + #define OEM_CFG_FUNC_TC_7 0x7 12084 + 12085 + #define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030 12086 + #define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4 12087 + #define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1 12088 + #define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2 12082 12089 }; 12083 12090 12084 12091 struct mcp_mac { ··· 12522 12495 MFW_DRV_MSG_BW_UPDATE10, 12523 12496 MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, 12524 12497 MFW_DRV_MSG_BW_UPDATE11, 12498 + MFW_DRV_MSG_OEM_CFG_UPDATE, 12525 12499 MFW_DRV_MSG_MAX 12526 12500 }; 12527 12501
+32 -14
drivers/net/ethernet/qlogic/qed/qed_ll2.c
··· 919 919 p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; 920 920 p_ramrod->inner_vlan_stripping_en = 921 921 p_ll2_conn->input.rx_vlan_removal_en; 922 + 923 + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && 924 + p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) 925 + p_ramrod->report_outer_vlan = 1; 922 926 p_ramrod->queue_id = p_ll2_conn->queue_id; 923 927 p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0; 924 928 925 - if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) && 926 - p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) && 927 - (conn_type != QED_LL2_TYPE_IWARP)) { 929 + if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) && 930 + p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE && 931 + conn_type != QED_LL2_TYPE_IWARP) { 928 932 p_ramrod->mf_si_bcast_accept_all = 1; 929 933 p_ramrod->mf_si_mcast_accept_all = 1; 930 934 } else { ··· 1497 1493 qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); 1498 1494 1499 1495 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { 1496 + if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) 1497 + qed_llh_add_protocol_filter(p_hwfn, p_ptt, 1498 + ETH_P_FCOE, 0, 1499 + QED_LLH_FILTER_ETHERTYPE); 1500 1500 qed_llh_add_protocol_filter(p_hwfn, p_ptt, 1501 - 0x8906, 0, 1502 - QED_LLH_FILTER_ETHERTYPE); 1503 - qed_llh_add_protocol_filter(p_hwfn, p_ptt, 1504 - 0x8914, 0, 1501 + ETH_P_FIP, 0, 1505 1502 QED_LLH_FILTER_ETHERTYPE); 1506 1503 } 1507 1504 ··· 1658 1653 1659 1654 start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); 1660 1655 if (QED_IS_IWARP_PERSONALITY(p_hwfn) && 1661 - p_ll2->input.conn_type == QED_LL2_TYPE_OOO) 1656 + p_ll2->input.conn_type == QED_LL2_TYPE_OOO) { 1662 1657 start_bd->nw_vlan_or_lb_echo = 1663 1658 cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE); 1664 - else 1659 + } else { 1665 1660 start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan); 1661 + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && 1662 + p_ll2->input.conn_type == QED_LL2_TYPE_FCOE) 1663 + pkt->remove_stag = true; 1664 + } 1665 + 1666 1666 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, 1667 1667 cpu_to_le16(pkt->l4_hdr_offset_w)); 1668 1668 SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest); ··· 1678 1668 SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum)); 1679 1669 SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum)); 1680 1670 SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len)); 1671 + SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION, 1672 + !!(pkt->remove_stag)); 1673 + 1681 1674 start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data); 1682 1675 DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag); 1683 1676 start_bd->nbytes = cpu_to_le16(pkt->first_frag_len); ··· 1897 1884 qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); 1898 1885 1899 1886 if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { 1887 + if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) 1888 + qed_llh_remove_protocol_filter(p_hwfn, p_ptt, 1889 + ETH_P_FCOE, 0, 1890 + QED_LLH_FILTER_ETHERTYPE); 1900 1891 qed_llh_remove_protocol_filter(p_hwfn, p_ptt, 1901 - 0x8906, 0, 1902 - QED_LLH_FILTER_ETHERTYPE); 1903 - qed_llh_remove_protocol_filter(p_hwfn, p_ptt, 1904 - 0x8914, 0, 1892 + ETH_P_FIP, 0, 1905 1893 QED_LLH_FILTER_ETHERTYPE); 1906 1894 } 1907 1895 ··· 2374 2360 return -EINVAL; 2375 2361 } 2376 2362 2377 - static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb) 2363 + static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, 2364 + unsigned long xmit_flags) 2378 2365 { 2379 2366 struct qed_ll2_tx_pkt_info pkt; 2380 2367 const skb_frag_t *frag; ··· 2420 2405 pkt.first_frag = mapping; 2421 2406 pkt.first_frag_len = skb->len; 2422 2407 pkt.cookie = skb; 2408 + if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) && 2409 + test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags)) 2410 + pkt.remove_stag = true; 2423 2411 2424 2412 rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle, 2425 2413 &pkt, 1);
+2 -2
drivers/net/ethernet/qlogic/qed/qed_main.c
··· 264 264 dev_info->pci_mem_end = cdev->pci_params.mem_end; 265 265 dev_info->pci_irq = cdev->pci_params.irq; 266 266 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); 267 - dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); 268 267 dev_info->dev_type = cdev->type; 269 268 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); 270 269 ··· 272 273 dev_info->fw_minor = FW_MINOR_VERSION; 273 274 dev_info->fw_rev = FW_REVISION_VERSION; 274 275 dev_info->fw_eng = FW_ENGINEERING_VERSION; 275 - dev_info->mf_mode = cdev->mf_mode; 276 + dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, 277 + &cdev->mf_bits); 276 278 dev_info->tx_switching = true; 277 279 278 280 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
+78
drivers/net/ethernet/qlogic/qed/qed_mcp.c
··· 40 40 #include <linux/string.h> 41 41 #include <linux/etherdevice.h> 42 42 #include "qed.h" 43 + #include "qed_cxt.h" 43 44 #include "qed_dcbx.h" 44 45 #include "qed_hsi.h" 45 46 #include "qed_hw.h" ··· 1487 1486 &resp, &param); 1488 1487 } 1489 1488 1489 + void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1490 + { 1491 + struct public_func shmem_info; 1492 + u32 port_cfg, val; 1493 + 1494 + if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) 1495 + return; 1496 + 1497 + memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info)); 1498 + port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + 1499 + offsetof(struct public_port, oem_cfg_port)); 1500 + val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >> 1501 + OEM_CFG_CHANNEL_TYPE_OFFSET; 1502 + if (val != OEM_CFG_CHANNEL_TYPE_STAGGED) 1503 + DP_NOTICE(p_hwfn, "Incorrect UFP Channel type %d\n", val); 1504 + 1505 + val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET; 1506 + if (val == OEM_CFG_SCHED_TYPE_ETS) { 1507 + p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS; 1508 + } else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) { 1509 + p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW; 1510 + } else { 1511 + p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN; 1512 + DP_NOTICE(p_hwfn, "Unknown UFP scheduling mode %d\n", val); 1513 + } 1514 + 1515 + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); 1516 + val = (port_cfg & OEM_CFG_FUNC_TC_MASK) >> OEM_CFG_FUNC_TC_OFFSET; 1517 + p_hwfn->ufp_info.tc = (u8)val; 1518 + val = (port_cfg & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >> 1519 + OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET; 1520 + if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) { 1521 + p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC; 1522 + } else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) { 1523 + p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS; 1524 + } else { 1525 + p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN; 1526 + DP_NOTICE(p_hwfn, "Unknown Host priority control %d\n", val); 1527 + } 1528 + 1529 + DP_NOTICE(p_hwfn, 1530 + "UFP shmem config: mode = %d tc = %d pri_type = %d\n", 1531 + p_hwfn->ufp_info.mode, 1532 + p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type); 1533 + } 1534 + 1535 + static int 1536 + qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) 1537 + { 1538 + qed_mcp_read_ufp_config(p_hwfn, p_ptt); 1539 + 1540 + if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) { 1541 + p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc; 1542 + p_hwfn->hw_info.offload_tc = p_hwfn->ufp_info.tc; 1543 + 1544 + qed_qm_reconf(p_hwfn, p_ptt); 1545 + } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) { 1546 + /* Merge UFP TC with the dcbx TC data */ 1547 + qed_dcbx_mib_update_event(p_hwfn, p_ptt, 1548 + QED_DCBX_OPERATIONAL_MIB); 1549 + } else { 1550 + DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n"); 1551 + return -EINVAL; 1552 + } 1553 + 1554 + /* update storm FW with negotiation results */ 1555 + qed_sp_pf_update_ufp(p_hwfn); 1556 + 1557 + /* update stag pcp value */ 1558 + qed_sp_pf_update_stag(p_hwfn); 1559 + 1560 + return 0; 1561 + } 1562 + 1490 1563 int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, 1491 1564 struct qed_ptt *p_ptt) 1492 1565 { ··· 1603 1528 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED: 1604 1529 qed_dcbx_mib_update_event(p_hwfn, p_ptt, 1605 1530 QED_DCBX_OPERATIONAL_MIB); 1531 + break; 1532 + case MFW_DRV_MSG_OEM_CFG_UPDATE: 1533 + qed_mcp_handle_ufp_event(p_hwfn, p_ptt); 1606 1534 break; 1607 1535 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: 1608 1536 qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
+8
drivers/net/ethernet/qlogic/qed/qed_mcp.h
··· 1005 1005 int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 1006 1006 1007 1007 /** 1008 + * @brief Read ufp config from the shared memory. 1009 + * 1010 + * @param p_hwfn 1011 + * @param p_ptt 1012 + */ 1013 + void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); 1014 + 1015 + /** 1008 1016 * @brief Populate the nvm info shadow in the given hardware function 1009 1017 * 1010 1018 * @param p_hwfn
+10 -2
drivers/net/ethernet/qlogic/qed/qed_sp.h
··· 416 416 * @param p_hwfn 417 417 * @param p_ptt 418 418 * @param p_tunn 419 - * @param mode 420 419 * @param allow_npar_tx_switch 421 420 * 422 421 * @return int ··· 424 425 int qed_sp_pf_start(struct qed_hwfn *p_hwfn, 425 426 struct qed_ptt *p_ptt, 426 427 struct qed_tunnel_info *p_tunn, 427 - enum qed_mf_mode mode, bool allow_npar_tx_switch); 428 + bool allow_npar_tx_switch); 428 429 429 430 /** 430 431 * @brief qed_sp_pf_update - PF Function Update Ramrod ··· 461 462 * 462 463 * @return int 463 464 */ 465 + 466 + /** 467 + * @brief qed_sp_pf_update_ufp - PF ufp update Ramrod 468 + * 469 + * @param p_hwfn 470 + * 471 + * @return int 472 + */ 473 + int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn); 464 474 465 475 int qed_sp_pf_stop(struct qed_hwfn *p_hwfn); 466 476
+62 -14
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
··· 306 306 int qed_sp_pf_start(struct qed_hwfn *p_hwfn, 307 307 struct qed_ptt *p_ptt, 308 308 struct qed_tunnel_info *p_tunn, 309 - enum qed_mf_mode mode, bool allow_npar_tx_switch) 309 + bool allow_npar_tx_switch) 310 310 { 311 311 struct pf_start_ramrod_data *p_ramrod = NULL; 312 312 u16 sb = qed_int_get_sp_sb_id(p_hwfn); ··· 314 314 struct qed_spq_entry *p_ent = NULL; 315 315 struct qed_sp_init_data init_data; 316 316 int rc = -EINVAL; 317 - u8 page_cnt; 317 + u8 page_cnt, i; 318 318 319 319 /* update initial eq producer */ 320 320 qed_eq_prod_update(p_hwfn, ··· 339 339 p_ramrod->dont_log_ramrods = 0; 340 340 p_ramrod->log_type_mask = cpu_to_le16(0xf); 341 341 342 - switch (mode) { 343 - case QED_MF_DEFAULT: 344 - case QED_MF_NPAR: 345 - p_ramrod->mf_mode = MF_NPAR; 346 - break; 347 - case QED_MF_OVLAN: 342 + if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) 348 343 p_ramrod->mf_mode = MF_OVLAN; 349 - break; 350 - default: 351 - DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); 344 + else 352 345 p_ramrod->mf_mode = MF_NPAR; 353 - } 354 346 355 347 p_ramrod->outer_tag_config.outer_tag.tci = 356 - cpu_to_le16(p_hwfn->hw_info.ovlan); 348 + cpu_to_le16(p_hwfn->hw_info.ovlan); 349 + if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) { 350 + p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q; 351 + } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) { 352 + p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD; 353 + p_ramrod->outer_tag_config.enable_stag_pri_change = 1; 354 + } 355 + 356 + p_ramrod->outer_tag_config.pri_map_valid = 1; 357 + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) 358 + p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i; 359 + 360 + /* enable_stag_pri_change should be set if port is in BD mode or, 361 + * UFP with Host Control mode. 362 + */ 363 + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) { 364 + if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS) 365 + p_ramrod->outer_tag_config.enable_stag_pri_change = 1; 366 + else 367 + p_ramrod->outer_tag_config.enable_stag_pri_change = 0; 368 + 369 + p_ramrod->outer_tag_config.outer_tag.tci |= 370 + cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); 371 + } 357 372 358 373 /* Place EQ address in RAMROD */ 359 374 DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, ··· 380 365 381 366 qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); 382 367 383 - if (IS_MF_SI(p_hwfn)) 368 + if (test_bit(QED_MF_INTER_PF_SWITCH, &p_hwfn->cdev->mf_bits)) 384 369 p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; 385 370 386 371 switch (p_hwfn->hw_info.personality) { ··· 445 430 446 431 qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results, 447 432 &p_ent->ramrod.pf_update); 433 + 434 + return qed_spq_post(p_hwfn, p_ent, NULL); 435 + } 436 + 437 + int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn) 438 + { 439 + struct qed_spq_entry *p_ent = NULL; 440 + struct qed_sp_init_data init_data; 441 + int rc = -EOPNOTSUPP; 442 + 443 + if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) { 444 + DP_INFO(p_hwfn, "Invalid priority type %d\n", 445 + p_hwfn->ufp_info.pri_type); 446 + return -EINVAL; 447 + } 448 + 449 + /* Get SPQ entry */ 450 + memset(&init_data, 0, sizeof(init_data)); 451 + init_data.cid = qed_spq_get_cid(p_hwfn); 452 + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; 453 + init_data.comp_mode = QED_SPQ_MODE_CB; 454 + 455 + rc = qed_sp_init_request(p_hwfn, &p_ent, 456 + COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, 457 + &init_data); 458 + if (rc) 459 + return rc; 460 + 461 + p_ent->ramrod.pf_update.update_enable_stag_pri_change = true; 462 + if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS) 463 + p_ent->ramrod.pf_update.enable_stag_pri_change = 1; 464 + else 465 + p_ent->ramrod.pf_update.enable_stag_pri_change = 0; 448 466 449 467 return qed_spq_post(p_hwfn, p_ent, NULL); 450 468 }
+2 -2
drivers/net/ethernet/qlogic/qede/qede_main.c
··· 199 199 200 200 /* Enable/Disable Tx switching for PF */ 201 201 if ((rc == num_vfs_param) && netif_running(edev->ndev) && 202 - qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) { 202 + !qed_info->b_inter_pf_switch && qed_info->tx_switching) { 203 203 vport_params->vport_id = 0; 204 204 vport_params->update_tx_switching_flg = 1; 205 205 vport_params->tx_switching_flg = num_vfs_param ? 1 : 0; ··· 1928 1928 vport_update_params->update_vport_active_flg = 1; 1929 1929 vport_update_params->vport_active_flg = 1; 1930 1930 1931 - if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) && 1931 + if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) && 1932 1932 qed_info->tx_switching) { 1933 1933 vport_update_params->update_tx_switching_flg = 1; 1934 1934 vport_update_params->tx_switching_flg = 1;
+5 -2
drivers/scsi/qedf/qedf_fip.c
··· 23 23 struct fip_vlan *vlan; 24 24 #define MY_FIP_ALL_FCF_MACS ((__u8[6]) { 1, 0x10, 0x18, 1, 0, 2 }) 25 25 static u8 my_fcoe_all_fcfs[ETH_ALEN] = MY_FIP_ALL_FCF_MACS; 26 + unsigned long flags = 0; 26 27 27 28 skb = dev_alloc_skb(sizeof(struct fip_vlan)); 28 29 if (!skb) ··· 66 65 kfree_skb(skb); 67 66 return; 68 67 } 69 - qed_ops->ll2->start_xmit(qedf->cdev, skb); 68 + 69 + set_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &flags); 70 + qed_ops->ll2->start_xmit(qedf->cdev, skb, flags); 70 71 } 71 72 72 73 static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf, ··· 142 139 print_hex_dump(KERN_WARNING, "fip ", DUMP_PREFIX_OFFSET, 16, 1, 143 140 skb->data, skb->len, false); 144 141 145 - qed_ops->ll2->start_xmit(qedf->cdev, skb); 142 + qed_ops->ll2->start_xmit(qedf->cdev, skb, 0); 146 143 } 147 144 148 145 /* Process incoming FIP frames. */
+1 -1
drivers/scsi/qedf/qedf_main.c
··· 994 994 if (qedf_dump_frames) 995 995 print_hex_dump(KERN_WARNING, "fcoe: ", DUMP_PREFIX_OFFSET, 16, 996 996 1, skb->data, skb->len, false); 997 - qed_ops->ll2->start_xmit(qedf->cdev, skb); 997 + qed_ops->ll2->start_xmit(qedf->cdev, skb, 0); 998 998 999 999 return 0; 1000 1000 }
+1 -1
drivers/scsi/qedi/qedi_iscsi.c
··· 1150 1150 if (vlanid) 1151 1151 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid); 1152 1152 1153 - rc = qedi_ops->ll2->start_xmit(cdev, skb); 1153 + rc = qedi_ops->ll2->start_xmit(cdev, skb, 0); 1154 1154 if (rc) { 1155 1155 QEDI_ERR(&qedi->dbg_ctx, "ll2 start_xmit returned %d\n", 1156 1156 rc);
+1 -2
include/linux/qed/qed_if.h
··· 339 339 u8 num_hwfns; 340 340 341 341 u8 hw_mac[ETH_ALEN]; 342 - bool is_mf_default; 343 342 344 343 /* FW version */ 345 344 u16 fw_major; ··· 358 359 #define QED_MFW_VERSION_3_OFFSET 24 359 360 360 361 u32 flash_size; 361 - u8 mf_mode; 362 + bool b_inter_pf_switch; 362 363 bool tx_switching; 363 364 bool rdma_supported; 364 365 u16 mtu;
+9 -1
include/linux/qed/qed_ll2_if.h
··· 202 202 bool enable_ip_cksum; 203 203 bool enable_l4_cksum; 204 204 bool calc_ip_len; 205 + bool remove_stag; 205 206 }; 206 207 207 208 #define QED_LL2_UNUSED_HANDLE (0xff) ··· 219 218 u8 tx_tc; 220 219 bool frags_mapped; 221 220 u8 ll2_mac_address[ETH_ALEN]; 221 + }; 222 + 223 + enum qed_ll2_xmit_flags { 224 + /* FIP discovery packet */ 225 + QED_LL2_XMIT_FLAGS_FIP_DISCOVERY 222 226 }; 223 227 224 228 struct qed_ll2_ops { ··· 251 245 * 252 246 * @param cdev 253 247 * @param skb 248 + * @param xmit_flags - Transmit options defined by the enum qed_ll2_xmit_flags. 254 249 * 255 250 * @return 0 on success, otherwise error value. 256 251 */ 257 - int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb); 252 + int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb, 253 + unsigned long xmit_flags); 258 254 259 255 /** 260 256 * @brief register_cb_ops - protocol driver register the callback for Rx/Tx