Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mlx4-next'

Amir Vadai says:

====================
Mellanox ethernet driver updates Jan-27-2015

This patchset introduces some bug fixes, code cleanups and support in a new
firmware event called recoverable error events.

Patches were applied and tested against commit b8665c6 ("net: dsa/mv88e6352:
make mv88e6352_wait generic")

Changes from V0:
- Patch 6/11 ("net/mlx4_core: Fix struct mlx4_vhcr_cmd to make implicit padding
explicit"):
- Removed __packed
- Rephrased commit message

- Added a new patch by Majd ("net/mlx4_core: Update the HCA core clock frequency
after INIT_PORT")
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+136 -72
+4 -2
drivers/net/ethernet/mellanox/mlx4/cmd.c
··· 901 901 index = be32_to_cpu(smp->attr_mod); 902 902 if (port < 1 || port > dev->caps.num_ports) 903 903 return -EINVAL; 904 - table = kcalloc(dev->caps.pkey_table_len[port], sizeof *table, GFP_KERNEL); 904 + table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1, 905 + sizeof(*table) * 32, GFP_KERNEL); 906 + 905 907 if (!table) 906 908 return -ENOMEM; 907 909 /* need to get the full pkey table because the paravirtualized ··· 1223 1221 { 1224 1222 .opcode = MLX4_CMD_HW2SW_EQ, 1225 1223 .has_inbox = false, 1226 - .has_outbox = true, 1224 + .has_outbox = false, 1227 1225 .out_is_imm = false, 1228 1226 .encode_slave_id = true, 1229 1227 .verify = NULL,
+8 -10
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
··· 770 770 return 0; 771 771 } 772 772 773 - proto_admin = cpu_to_be32(ptys_adv); 774 - if (speed >= 0 && speed != priv->port_state.link_speed) 775 - /* If speed was set then speed decides :-) */ 776 - proto_admin = speed_set_ptys_admin(priv, speed, 777 - ptys_reg.eth_proto_cap); 773 + proto_admin = cmd->autoneg == AUTONEG_ENABLE ? 774 + cpu_to_be32(ptys_adv) : 775 + speed_set_ptys_admin(priv, speed, 776 + ptys_reg.eth_proto_cap); 778 777 779 778 proto_admin &= ptys_reg.eth_proto_cap; 780 - 781 - if (proto_admin == ptys_reg.eth_proto_admin) 782 - return 0; /* Nothing to change */ 783 - 784 779 if (!proto_admin) { 785 780 en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n"); 786 781 return -EINVAL; /* nothing to change due to bad input */ 787 782 } 783 + 784 + if (proto_admin == ptys_reg.eth_proto_admin) 785 + return 0; /* Nothing to change */ 788 786 789 787 en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n", 790 788 be32_to_cpu(proto_admin)); ··· 796 798 return ret; 797 799 } 798 800 799 - en_warn(priv, "Port link mode changed, restarting port...\n"); 800 801 mutex_lock(&priv->mdev->state_lock); 801 802 if (priv->port_up) { 803 + en_warn(priv, "Port link mode changed, restarting port...\n"); 802 804 mlx4_en_stop_port(dev, 1); 803 805 if (mlx4_en_start_port(dev)) 804 806 en_err(priv, "Failed restarting port %d\n", priv->port);
+26 -22
drivers/net/ethernet/mellanox/mlx4/eq.c
··· 88 88 u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK; 89 89 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) 90 90 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT); 91 + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) 92 + async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT); 91 93 92 94 return async_ev_mask; 93 95 } ··· 738 736 (unsigned long) eqe); 739 737 break; 740 738 739 + case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT: 740 + switch (eqe->subtype) { 741 + case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE: 742 + mlx4_warn(dev, "Bad cable detected on port %u\n", 743 + eqe->event.bad_cable.port); 744 + break; 745 + case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE: 746 + mlx4_warn(dev, "Unsupported cable detected\n"); 747 + break; 748 + default: 749 + mlx4_dbg(dev, 750 + "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n", 751 + eqe->type, eqe->subtype, eq->eqn, 752 + eq->cons_index, eqe->owner, eq->nent, 753 + !!(eqe->owner & 0x80) ^ 754 + !!(eq->cons_index & eq->nent) ? "HW" : "SW"); 755 + break; 756 + } 757 + break; 758 + 741 759 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: 742 760 case MLX4_EVENT_TYPE_ECC_DETECT: 743 761 default: ··· 868 846 MLX4_CMD_WRAPPED); 869 847 } 870 848 871 - static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 872 - int eq_num) 849 + static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, int eq_num) 873 850 { 874 - return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 875 - 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A, 876 - MLX4_CMD_WRAPPED); 851 + return mlx4_cmd(dev, 0, eq_num, 1, MLX4_CMD_HW2SW_EQ, 852 + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 877 853 } 878 854 879 855 static int mlx4_num_eq_uar(struct mlx4_dev *dev) ··· 1044 1024 struct mlx4_eq *eq) 1045 1025 { 1046 1026 struct mlx4_priv *priv = mlx4_priv(dev); 1047 - struct mlx4_cmd_mailbox *mailbox; 1048 1027 int err; 1049 1028 int i; 1050 1029 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with ··· 1051 1032 */ 1052 1033 int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; 1053 1034 1054 - mailbox = mlx4_alloc_cmd_mailbox(dev); 1055 - if (IS_ERR(mailbox)) 1056 - return; 1057 - 1058 - err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn); 1035 + err = mlx4_HW2SW_EQ(dev, eq->eqn); 1059 1036 if (err) 1060 1037 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); 1061 1038 1062 - if (0) { 1063 - mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); 1064 - for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) { 1065 - if (i % 4 == 0) 1066 - pr_cont("[%02x] ", i * 4); 1067 - pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4)); 1068 - if ((i + 1) % 4 == 0) 1069 - pr_cont("\n"); 1070 - } 1071 - } 1072 1039 synchronize_irq(eq->irq); 1073 1040 tasklet_disable(&eq->tasklet_ctx.task); 1074 1041 ··· 1066 1061 1067 1062 kfree(eq->page_list); 1068 1063 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); 1069 - mlx4_free_cmd_mailbox(dev, mailbox); 1070 1064 } 1071 1065 1072 1066 static void mlx4_free_irqs(struct mlx4_dev *dev)
+70 -18
drivers/net/ethernet/mellanox/mlx4/fw.c
··· 84 84 [ 1] = "UC transport", 85 85 [ 2] = "UD transport", 86 86 [ 3] = "XRC transport", 87 - [ 4] = "reliable multicast", 88 - [ 5] = "FCoIB support", 89 87 [ 6] = "SRQ support", 90 88 [ 7] = "IPoIB checksum offload", 91 89 [ 8] = "P_Key violation counter", 92 90 [ 9] = "Q_Key violation counter", 93 - [10] = "VMM", 94 91 [12] = "Dual Port Different Protocol (DPDP) support", 95 92 [15] = "Big LSO headers", 96 93 [16] = "MW support", ··· 96 99 [19] = "Raw multicast support", 97 100 [20] = "Address vector port checking support", 98 101 [21] = "UD multicast support", 99 - [24] = "Demand paging support", 100 - [25] = "Router support", 101 102 [30] = "IBoE support", 102 103 [32] = "Unicast loopback support", 103 104 [34] = "FCS header control", 104 - [38] = "Wake On LAN support", 105 + [37] = "Wake On LAN (port1) support", 106 + [38] = "Wake On LAN (port2) support", 105 107 [40] = "UDP RSS support", 106 108 [41] = "Unicast VEP steering support", 107 109 [42] = "Multicast VEP steering support", ··· 141 145 [16] = "CONFIG DEV support", 142 146 [17] = "Asymmetric EQs support", 143 147 [18] = "More than 80 VFs support", 144 - [19] = "Performance optimized for limited rule configuration flow steering support" 148 + [19] = "Performance optimized for limited rule configuration flow steering support", 149 + [20] = "Recoverable error events support" 145 150 }; 146 151 int i; 147 152 ··· 256 259 #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28 257 260 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c 258 261 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30 262 + #define QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET 0x48 259 263 260 264 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50 261 265 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54 ··· 271 273 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40 272 274 #define QUERY_FUNC_CAP_FLAG_ETH 0x80 273 275 #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10 276 + #define QUERY_FUNC_CAP_FLAG_RESD_LKEY 0x08 274 277 #define QUERY_FUNC_CAP_FLAG_VALID_MAILBOX 0x04 275 278 276 279 #define QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG (1UL << 31) ··· 343 344 } else if (vhcr->op_modifier == 0) { 344 345 struct mlx4_active_ports actv_ports = 345 346 mlx4_get_active_ports(dev, slave); 346 - /* enable rdma and ethernet interfaces, and new quota locations */ 347 + /* enable rdma and ethernet interfaces, new quota locations, 348 + * and reserved lkey 349 + */ 347 350 field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | 348 - QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX); 351 + QUERY_FUNC_CAP_FLAG_QUOTAS | QUERY_FUNC_CAP_FLAG_VALID_MAILBOX | 352 + QUERY_FUNC_CAP_FLAG_RESD_LKEY); 349 353 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); 350 354 351 355 field = min( ··· 413 411 size = QUERY_FUNC_CAP_EXTRA_FLAGS_BF_QP_ALLOC_FLAG | 414 412 QUERY_FUNC_CAP_EXTRA_FLAGS_A0_QP_ALLOC_FLAG; 415 413 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_EXTRA_FLAGS_OFFSET); 414 + 415 + size = dev->caps.reserved_lkey + ((slave << 8) & 0xFF00); 416 + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET); 416 417 } else 417 418 err = -EINVAL; 418 419 ··· 507 502 508 503 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 509 504 func_cap->reserved_eq = size & 0xFFFFFF; 505 + 506 + if (func_cap->flags & QUERY_FUNC_CAP_FLAG_RESD_LKEY) { 507 + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_RESD_LKEY_OFFSET); 508 + func_cap->reserved_lkey = size; 509 + } else { 510 + func_cap->reserved_lkey = 0; 511 + } 510 512 511 513 func_cap->extra_flags = 0; 512 514 ··· 871 859 MLX4_GET(field32, outbox, QUERY_DEV_CAP_ETH_BACKPL_OFFSET); 872 860 if (field32 & (1 << 0)) 873 861 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP; 862 + if (field32 & (1 << 7)) 863 + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT; 874 864 MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); 875 865 if (field & 1<<6) 876 866 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; ··· 1576 1562 #define INIT_HCA_VXLAN_OFFSET 0x0c 1577 1563 #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e 1578 1564 #define INIT_HCA_FLAGS_OFFSET 0x014 1565 + #define INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET 0x018 1579 1566 #define INIT_HCA_QPC_OFFSET 0x020 1580 1567 #define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) 1581 1568 #define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) ··· 1683 1668 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 1684 1669 } 1685 1670 1671 + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) 1672 + *(inbox + INIT_HCA_RECOVERABLE_ERROR_EVENT_OFFSET / 4) |= cpu_to_be32(1 << 31); 1673 + 1686 1674 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 1687 1675 1688 1676 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); ··· 1770 1752 MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET); 1771 1753 } 1772 1754 1773 - err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000, 1774 - MLX4_CMD_NATIVE); 1755 + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 1756 + MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 1775 1757 1776 1758 if (err) 1777 1759 mlx4_err(dev, "INIT_HCA returns %d\n", err); ··· 1897 1879 return err; 1898 1880 } 1899 1881 1882 + static int mlx4_hca_core_clock_update(struct mlx4_dev *dev) 1883 + { 1884 + struct mlx4_cmd_mailbox *mailbox; 1885 + __be32 *outbox; 1886 + int err; 1887 + 1888 + mailbox = mlx4_alloc_cmd_mailbox(dev); 1889 + if (IS_ERR(mailbox)) { 1890 + mlx4_warn(dev, "hca_core_clock mailbox allocation failed\n"); 1891 + return PTR_ERR(mailbox); 1892 + } 1893 + outbox = mailbox->buf; 1894 + 1895 + err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, 1896 + MLX4_CMD_QUERY_HCA, 1897 + MLX4_CMD_TIME_CLASS_B, 1898 + !mlx4_is_slave(dev)); 1899 + if (err) { 1900 + mlx4_warn(dev, "hca_core_clock update failed\n"); 1901 + goto out; 1902 + } 1903 + 1904 + MLX4_GET(dev->caps.hca_core_clock, outbox, QUERY_HCA_CORE_CLOCK_OFFSET); 1905 + 1906 + out: 1907 + mlx4_free_cmd_mailbox(dev, mailbox); 1908 + 1909 + return err; 1910 + } 1911 + 1900 1912 /* for IB-type ports only in SRIOV mode. Checks that both proxy QP0 1901 1913 * and real QP0 are active, so that the paravirtualized QP0 is ready 1902 1914 * to operate */ ··· 2031 1983 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT, 2032 1984 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2033 1985 1986 + if (!err) 1987 + mlx4_hca_core_clock_update(dev); 1988 + 2034 1989 return err; 2035 1990 } 2036 1991 EXPORT_SYMBOL_GPL(mlx4_INIT_PORT); ··· 2058 2007 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) { 2059 2008 if (priv->mfunc.master.init_port_ref[port] == 1) { 2060 2009 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2061 - 1000, MLX4_CMD_NATIVE); 2010 + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2062 2011 if (err) 2063 2012 return err; 2064 2013 } ··· 2069 2018 if (!priv->mfunc.master.qp0_state[port].qp0_active && 2070 2019 priv->mfunc.master.qp0_state[port].port_active) { 2071 2020 err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2072 - 1000, MLX4_CMD_NATIVE); 2021 + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 2073 2022 if (err) 2074 2023 return err; 2075 2024 priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port); ··· 2084 2033 2085 2034 int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port) 2086 2035 { 2087 - return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000, 2088 - MLX4_CMD_WRAPPED); 2036 + return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 2037 + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2089 2038 } 2090 2039 EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT); 2091 2040 2092 2041 int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic) 2093 2042 { 2094 - return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000, 2095 - MLX4_CMD_NATIVE); 2043 + return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 2044 + MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE); 2096 2045 } 2097 2046 2098 2047 struct mlx4_config_dev { ··· 2231 2180 int mlx4_NOP(struct mlx4_dev *dev) 2232 2181 { 2233 2182 /* Input modifier of 0x1f means "finish as soon as possible." */ 2234 - return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE); 2183 + return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, MLX4_CMD_TIME_CLASS_A, 2184 + MLX4_CMD_NATIVE); 2235 2185 } 2236 2186 2237 2187 int mlx4_get_phys_port_id(struct mlx4_dev *dev)
+1
drivers/net/ethernet/mellanox/mlx4/fw.h
··· 147 147 u32 qp0_proxy_qpn; 148 148 u32 qp1_tunnel_qpn; 149 149 u32 qp1_proxy_qpn; 150 + u32 reserved_lkey; 150 151 u8 physical_port; 151 152 u8 port_flags; 152 153 u8 flags1;
+4 -1
drivers/net/ethernet/mellanox/mlx4/main.c
··· 797 797 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; 798 798 dev->caps.num_eqs = func_cap.max_eq; 799 799 dev->caps.reserved_eqs = func_cap.reserved_eq; 800 + dev->caps.reserved_lkey = func_cap.reserved_lkey; 800 801 dev->caps.num_pds = MLX4_NUM_PDS; 801 802 dev->caps.num_mgms = 0; 802 803 dev->caps.num_amgms = 0; ··· 2979 2978 mlx4_free_eq_table(dev); 2980 2979 2981 2980 err_master_mfunc: 2982 - if (mlx4_is_master(dev)) 2981 + if (mlx4_is_master(dev)) { 2982 + mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY); 2983 2983 mlx4_multi_func_cleanup(dev); 2984 + } 2984 2985 2985 2986 if (mlx4_is_slave(dev)) { 2986 2987 kfree(dev->caps.qp0_qkey);
+1
drivers/net/ethernet/mellanox/mlx4/mlx4.h
··· 196 196 struct mlx4_vhcr_cmd { 197 197 __be64 in_param; 198 198 __be32 in_modifier; 199 + u32 reserved1; 199 200 __be64 out_param; 200 201 __be16 token; 201 202 u16 reserved;
+2 -2
drivers/net/ethernet/mellanox/mlx4/mr.c
··· 1155 1155 1156 1156 int mlx4_SYNC_TPT(struct mlx4_dev *dev) 1157 1157 { 1158 - return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000, 1159 - MLX4_CMD_NATIVE); 1158 + return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1159 + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1160 1160 } 1161 1161 EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
-1
drivers/net/ethernet/mellanox/mlx4/pd.c
··· 214 214 list_add(&uar->bf_list, &priv->bf_list); 215 215 } 216 216 217 - bf->uar = uar; 218 217 idx = ffz(uar->free_bf_bmap); 219 218 uar->free_bf_bmap |= 1 << idx; 220 219 bf->uar = uar;
+4 -12
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
··· 4677 4677 int state; 4678 4678 LIST_HEAD(tlist); 4679 4679 int eqn; 4680 - struct mlx4_cmd_mailbox *mailbox; 4681 4680 4682 4681 err = move_all_busy(dev, slave, RES_EQ); 4683 4682 if (err) ··· 4702 4703 break; 4703 4704 4704 4705 case RES_EQ_HW: 4705 - mailbox = mlx4_alloc_cmd_mailbox(dev); 4706 - if (IS_ERR(mailbox)) { 4707 - cond_resched(); 4708 - continue; 4709 - } 4710 - err = mlx4_cmd_box(dev, slave, 0, 4711 - eqn & 0xff, 0, 4712 - MLX4_CMD_HW2SW_EQ, 4713 - MLX4_CMD_TIME_CLASS_A, 4714 - MLX4_CMD_NATIVE); 4706 + err = mlx4_cmd(dev, slave, eqn & 0xff, 4707 + 1, MLX4_CMD_HW2SW_EQ, 4708 + MLX4_CMD_TIME_CLASS_A, 4709 + MLX4_CMD_NATIVE); 4715 4710 if (err) 4716 4711 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n", 4717 4712 slave, eqn); 4718 - mlx4_free_cmd_mailbox(dev, mailbox); 4719 4713 atomic_dec(&eq->mtt->ref_count); 4720 4714 state = RES_EQ_RESERVED; 4721 4715 break;
+3 -3
include/linux/mlx4/cmd.h
··· 165 165 }; 166 166 167 167 enum { 168 - MLX4_CMD_TIME_CLASS_A = 10000, 169 - MLX4_CMD_TIME_CLASS_B = 10000, 170 - MLX4_CMD_TIME_CLASS_C = 10000, 168 + MLX4_CMD_TIME_CLASS_A = 60000, 169 + MLX4_CMD_TIME_CLASS_B = 60000, 170 + MLX4_CMD_TIME_CLASS_C = 60000, 171 171 }; 172 172 173 173 enum {
+13 -1
include/linux/mlx4/device.h
··· 200 200 MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16, 201 201 MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, 202 202 MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18, 203 - MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19 203 + MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19, 204 + MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT = 1LL << 20 204 205 }; 205 206 206 207 enum { ··· 281 280 MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, 282 281 MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, 283 282 MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d, 283 + MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT = 0x3e, 284 284 MLX4_EVENT_TYPE_NONE = 0xff, 285 285 }; 286 286 287 287 enum { 288 288 MLX4_PORT_CHANGE_SUBTYPE_DOWN = 1, 289 289 MLX4_PORT_CHANGE_SUBTYPE_ACTIVE = 4 290 + }; 291 + 292 + enum { 293 + MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE = 1, 294 + MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE = 2, 290 295 }; 291 296 292 297 enum { ··· 867 860 } __packed tbl_change_info; 868 861 } params; 869 862 } __packed port_mgmt_change; 863 + struct { 864 + u8 reserved[3]; 865 + u8 port; 866 + u32 reserved1[5]; 867 + } __packed bad_cable; 870 868 } event; 871 869 u8 slave_id; 872 870 u8 reserved3[2];