Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mlx4-next'

Or Gerlitz says:

====================
mlx4: Flexible (asymmetric) allocation of EQs and MSI-X vectors

This series from Matan Barak is built as follows:

The 1st two patches fix small bugs w.r.t firmware spec. Next
are two patches which do more re-factoring of the init/fini flow
and a patch that adds support for the QUERY_FUNC firmware command,
these are all pre-steps for the major patch of the series. In this
patch (#6) we change the order of talking/querying the firmware
and enabling SRIOV. This allows to remote worst-case assumption
w.r.t the number of available MSI-X vectors and EQs per function.

The last patch easily enjoys this ordering change, to enable
supports > 64 VFs over a firmware that allows for that.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+439 -118
+1 -2
drivers/infiniband/hw/mlx4/main.c
··· 1975 1975 dev->caps.num_ports > dev->caps.comp_pool) 1976 1976 return; 1977 1977 1978 - eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/ 1979 - dev->caps.num_ports); 1978 + eq_per_port = dev->caps.comp_pool / dev->caps.num_ports; 1980 1979 1981 1980 /* Init eq table */ 1982 1981 added_eqs = 0;
+43 -31
drivers/net/ethernet/mellanox/mlx4/cmd.c
··· 2117 2117 int mlx4_cmd_init(struct mlx4_dev *dev) 2118 2118 { 2119 2119 struct mlx4_priv *priv = mlx4_priv(dev); 2120 + int flags = 0; 2120 2121 2121 - mutex_init(&priv->cmd.hcr_mutex); 2122 - mutex_init(&priv->cmd.slave_cmd_mutex); 2123 - sema_init(&priv->cmd.poll_sem, 1); 2124 - priv->cmd.use_events = 0; 2125 - priv->cmd.toggle = 1; 2122 + if (!priv->cmd.initialized) { 2123 + mutex_init(&priv->cmd.hcr_mutex); 2124 + mutex_init(&priv->cmd.slave_cmd_mutex); 2125 + sema_init(&priv->cmd.poll_sem, 1); 2126 + priv->cmd.use_events = 0; 2127 + priv->cmd.toggle = 1; 2128 + priv->cmd.initialized = 1; 2129 + flags |= MLX4_CMD_CLEANUP_STRUCT; 2130 + } 2126 2131 2127 - priv->cmd.hcr = NULL; 2128 - priv->mfunc.vhcr = NULL; 2129 - 2130 - if (!mlx4_is_slave(dev)) { 2132 + if (!mlx4_is_slave(dev) && !priv->cmd.hcr) { 2131 2133 priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + 2132 2134 MLX4_HCR_BASE, MLX4_HCR_SIZE); 2133 2135 if (!priv->cmd.hcr) { 2134 2136 mlx4_err(dev, "Couldn't map command register\n"); 2135 - return -ENOMEM; 2137 + goto err; 2136 2138 } 2139 + flags |= MLX4_CMD_CLEANUP_HCR; 2137 2140 } 2138 2141 2139 - if (mlx4_is_mfunc(dev)) { 2142 + if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) { 2140 2143 priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE, 2141 2144 &priv->mfunc.vhcr_dma, 2142 2145 GFP_KERNEL); 2143 2146 if (!priv->mfunc.vhcr) 2144 - goto err_hcr; 2147 + goto err; 2148 + 2149 + flags |= MLX4_CMD_CLEANUP_VHCR; 2145 2150 } 2146 2151 2147 - priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, 2148 - MLX4_MAILBOX_SIZE, 2149 - MLX4_MAILBOX_SIZE, 0); 2150 - if (!priv->cmd.pool) 2151 - goto err_vhcr; 2152 + if (!priv->cmd.pool) { 2153 + priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, 2154 + MLX4_MAILBOX_SIZE, 2155 + MLX4_MAILBOX_SIZE, 0); 2156 + if (!priv->cmd.pool) 2157 + goto err; 2158 + 2159 + flags |= MLX4_CMD_CLEANUP_POOL; 2160 + } 2152 2161 2153 2162 return 0; 2154 2163 2155 - err_vhcr: 2156 - if (mlx4_is_mfunc(dev)) 2157 - dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, 2158 - priv->mfunc.vhcr, priv->mfunc.vhcr_dma); 2159 - priv->mfunc.vhcr = NULL; 2160 - 2161 - err_hcr: 2162 - if (!mlx4_is_slave(dev)) 2163 - iounmap(priv->cmd.hcr); 2164 + err: 2165 + mlx4_cmd_cleanup(dev, flags); 2164 2166 return -ENOMEM; 2165 2167 } 2166 2168 ··· 2186 2184 iounmap(priv->mfunc.comm); 2187 2185 } 2188 2186 2189 - void mlx4_cmd_cleanup(struct mlx4_dev *dev) 2187 + void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask) 2190 2188 { 2191 2189 struct mlx4_priv *priv = mlx4_priv(dev); 2192 2190 2193 - pci_pool_destroy(priv->cmd.pool); 2191 + if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) { 2192 + pci_pool_destroy(priv->cmd.pool); 2193 + priv->cmd.pool = NULL; 2194 + } 2194 2195 2195 - if (!mlx4_is_slave(dev)) 2196 + if (!mlx4_is_slave(dev) && priv->cmd.hcr && 2197 + (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) { 2196 2198 iounmap(priv->cmd.hcr); 2197 - if (mlx4_is_mfunc(dev)) 2199 + priv->cmd.hcr = NULL; 2200 + } 2201 + if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr && 2202 + (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) { 2198 2203 dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, 2199 2204 priv->mfunc.vhcr, priv->mfunc.vhcr_dma); 2200 - priv->mfunc.vhcr = NULL; 2205 + priv->mfunc.vhcr = NULL; 2206 + } 2207 + if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT)) 2208 + priv->cmd.initialized = 0; 2201 2209 } 2202 2210 2203 2211 /*
+6 -2
drivers/net/ethernet/mellanox/mlx4/eq.c
··· 1123 1123 goto err_out_free; 1124 1124 } 1125 1125 1126 - err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, 1127 - dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0); 1126 + err = mlx4_bitmap_init(&priv->eq_table.bitmap, 1127 + roundup_pow_of_two(dev->caps.num_eqs), 1128 + dev->caps.num_eqs - 1, 1129 + dev->caps.reserved_eqs, 1130 + roundup_pow_of_two(dev->caps.num_eqs) - 1131 + dev->caps.num_eqs); 1128 1132 if (err) 1129 1133 goto err_out_free; 1130 1134
+100 -13
drivers/net/ethernet/mellanox/mlx4/fw.c
··· 142 142 [13] = "Large cache line (>64B) EQE stride support", 143 143 [14] = "Ethernet protocol control support", 144 144 [15] = "Ethernet Backplane autoneg support", 145 - [16] = "CONFIG DEV support" 145 + [16] = "CONFIG DEV support", 146 + [17] = "Asymmetric EQs support", 147 + [18] = "More than 80 VFs support" 146 148 }; 147 149 int i; 148 150 ··· 179 177 return err; 180 178 } 181 179 180 + int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave) 181 + { 182 + struct mlx4_cmd_mailbox *mailbox; 183 + u32 *outbox; 184 + u8 in_modifier; 185 + u8 field; 186 + u16 field16; 187 + int err; 188 + 189 + #define QUERY_FUNC_BUS_OFFSET 0x00 190 + #define QUERY_FUNC_DEVICE_OFFSET 0x01 191 + #define QUERY_FUNC_FUNCTION_OFFSET 0x01 192 + #define QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET 0x03 193 + #define QUERY_FUNC_RSVD_EQS_OFFSET 0x04 194 + #define QUERY_FUNC_MAX_EQ_OFFSET 0x06 195 + #define QUERY_FUNC_RSVD_UARS_OFFSET 0x0b 196 + 197 + mailbox = mlx4_alloc_cmd_mailbox(dev); 198 + if (IS_ERR(mailbox)) 199 + return PTR_ERR(mailbox); 200 + outbox = mailbox->buf; 201 + 202 + in_modifier = slave; 203 + 204 + err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, 0, 205 + MLX4_CMD_QUERY_FUNC, 206 + MLX4_CMD_TIME_CLASS_A, 207 + MLX4_CMD_NATIVE); 208 + if (err) 209 + goto out; 210 + 211 + MLX4_GET(field, outbox, QUERY_FUNC_BUS_OFFSET); 212 + func->bus = field & 0xf; 213 + MLX4_GET(field, outbox, QUERY_FUNC_DEVICE_OFFSET); 214 + func->device = field & 0xf1; 215 + MLX4_GET(field, outbox, QUERY_FUNC_FUNCTION_OFFSET); 216 + func->function = field & 0x7; 217 + MLX4_GET(field, outbox, QUERY_FUNC_PHYSICAL_FUNCTION_OFFSET); 218 + func->physical_function = field & 0xf; 219 + MLX4_GET(field16, outbox, QUERY_FUNC_RSVD_EQS_OFFSET); 220 + func->rsvd_eqs = field16 & 0xffff; 221 + MLX4_GET(field16, outbox, QUERY_FUNC_MAX_EQ_OFFSET); 222 + func->max_eq = field16 & 0xffff; 223 + MLX4_GET(field, outbox, QUERY_FUNC_RSVD_UARS_OFFSET); 224 + func->rsvd_uars = field & 0x0f; 225 + 226 + mlx4_dbg(dev, "Bus: %d, Device: %d, Function: %d, Physical function: %d, Max EQs: %d, Reserved EQs: %d, Reserved UARs: %d\n", 227 + func->bus, func->device, func->function, func->physical_function, 228 + func->max_eq, func->rsvd_eqs, func->rsvd_uars); 229 + 230 + out: 231 + mlx4_free_cmd_mailbox(dev, mailbox); 232 + return err; 233 + } 234 + 182 235 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, 183 236 struct mlx4_vhcr *vhcr, 184 237 struct mlx4_cmd_mailbox *inbox, ··· 244 187 u8 field, port; 245 188 u32 size, proxy_qp, qkey; 246 189 int err = 0; 190 + struct mlx4_func func; 247 191 248 192 #define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0 249 193 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 ··· 289 231 #define QUERY_FUNC_CAP_VF_ENABLE_QP0 0x08 290 232 291 233 #define QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID 0x80 234 + #define QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS (1 << 31) 292 235 293 236 if (vhcr->op_modifier == 1) { 294 237 struct mlx4_active_ports actv_ports = ··· 368 309 size = dev->caps.num_cqs; 369 310 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); 370 311 371 - size = dev->caps.num_eqs; 372 - MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 373 - 374 - size = dev->caps.reserved_eqs; 375 - MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 312 + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) || 313 + mlx4_QUERY_FUNC(dev, &func, slave)) { 314 + size = vhcr->in_modifier & 315 + QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? 316 + dev->caps.num_eqs : 317 + rounddown_pow_of_two(dev->caps.num_eqs); 318 + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 319 + size = dev->caps.reserved_eqs; 320 + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 321 + } else { 322 + size = vhcr->in_modifier & 323 + QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS ? 324 + func.max_eq : 325 + rounddown_pow_of_two(func.max_eq); 326 + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 327 + size = func.rsvd_eqs; 328 + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 329 + } 376 330 377 331 size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave]; 378 332 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); ··· 407 335 return err; 408 336 } 409 337 410 - int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, 338 + int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, 411 339 struct mlx4_func_cap *func_cap) 412 340 { 413 341 struct mlx4_cmd_mailbox *mailbox; ··· 415 343 u8 field, op_modifier; 416 344 u32 size, qkey; 417 345 int err = 0, quotas = 0; 346 + u32 in_modifier; 418 347 419 348 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */ 349 + in_modifier = op_modifier ? gen_or_port : 350 + QUERY_FUNC_CAP_SUPPORTS_NON_POWER_OF_2_NUM_EQS; 420 351 421 352 mailbox = mlx4_alloc_cmd_mailbox(dev); 422 353 if (IS_ERR(mailbox)) 423 354 return PTR_ERR(mailbox); 424 355 425 - err = mlx4_cmd_box(dev, 0, mailbox->dma, gen_or_port, op_modifier, 356 + err = mlx4_cmd_box(dev, 0, mailbox->dma, in_modifier, op_modifier, 426 357 MLX4_CMD_QUERY_FUNC_CAP, 427 358 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 428 359 if (err) ··· 597 522 #define QUERY_DEV_CAP_MAX_MRW_SZ_OFFSET 0x21 598 523 #define QUERY_DEV_CAP_RSVD_MRW_OFFSET 0x22 599 524 #define QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET 0x23 525 + #define QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET 0x26 600 526 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 601 527 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 602 528 #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b ··· 687 611 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MPT_OFFSET); 688 612 dev_cap->max_mpts = 1 << (field & 0x3f); 689 613 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_EQ_OFFSET); 690 - dev_cap->reserved_eqs = field & 0xf; 614 + dev_cap->reserved_eqs = 1 << (field & 0xf); 691 615 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_EQ_OFFSET); 692 616 dev_cap->max_eqs = 1 << (field & 0xf); 693 617 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_MTT_OFFSET); ··· 698 622 dev_cap->reserved_mrws = 1 << (field & 0xf); 699 623 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MTT_SEG_OFFSET); 700 624 dev_cap->max_mtt_seg = 1 << (field & 0x3f); 625 + MLX4_GET(size, outbox, QUERY_DEV_CAP_NUM_SYS_EQ_OFFSET); 626 + dev_cap->num_sys_eqs = size & 0xfff; 701 627 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_REQ_QP_OFFSET); 702 628 dev_cap->max_requester_per_qp = 1 << (field & 0x3f); 703 629 MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); ··· 861 783 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VLAN_CONTROL; 862 784 if (field32 & (1 << 20)) 863 785 dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM; 786 + if (field32 & (1 << 21)) 787 + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS; 864 788 865 789 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 866 790 for (i = 1; i <= dev_cap->num_ports; ++i) { ··· 929 849 * we can't use any EQs whose doorbell falls on that page, 930 850 * even if the EQ itself isn't reserved. 931 851 */ 932 - dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, 933 - dev_cap->reserved_eqs); 852 + if (dev_cap->num_sys_eqs == 0) 853 + dev_cap->reserved_eqs = max(dev_cap->reserved_uars * 4, 854 + dev_cap->reserved_eqs); 855 + else 856 + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SYS_EQS; 934 857 935 858 mlx4_dbg(dev, "Max ICM size %lld MB\n", 936 859 (unsigned long long) dev_cap->max_icm_sz >> 20); ··· 943 860 dev_cap->max_srqs, dev_cap->reserved_srqs, dev_cap->srq_entry_sz); 944 861 mlx4_dbg(dev, "Max CQs: %d, reserved CQs: %d, entry size: %d\n", 945 862 dev_cap->max_cqs, dev_cap->reserved_cqs, dev_cap->cqc_entry_sz); 946 - mlx4_dbg(dev, "Max EQs: %d, reserved EQs: %d, entry size: %d\n", 947 - dev_cap->max_eqs, dev_cap->reserved_eqs, dev_cap->eqc_entry_sz); 863 + mlx4_dbg(dev, "Num sys EQs: %d, max EQs: %d, reserved EQs: %d, entry size: %d\n", 864 + dev_cap->num_sys_eqs, dev_cap->max_eqs, dev_cap->reserved_eqs, 865 + dev_cap->eqc_entry_sz); 948 866 mlx4_dbg(dev, "reserved MPTs: %d, reserved MTTs: %d\n", 949 867 dev_cap->reserved_mrws, dev_cap->reserved_mtts); 950 868 mlx4_dbg(dev, "Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", ··· 1491 1407 #define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) 1492 1408 #define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) 1493 1409 #define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) 1410 + #define INIT_HCA_NUM_SYS_EQS_OFFSET (INIT_HCA_QPC_OFFSET + 0x6a) 1494 1411 #define INIT_HCA_RDMARC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) 1495 1412 #define INIT_HCA_LOG_RD_OFFSET (INIT_HCA_QPC_OFFSET + 0x77) 1496 1413 #define INIT_HCA_MCAST_OFFSET 0x0c0 ··· 1595 1510 MLX4_PUT(inbox, param->auxc_base, INIT_HCA_AUXC_BASE_OFFSET); 1596 1511 MLX4_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); 1597 1512 MLX4_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); 1513 + MLX4_PUT(inbox, param->num_sys_eqs, INIT_HCA_NUM_SYS_EQS_OFFSET); 1598 1514 MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); 1599 1515 MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); 1600 1516 ··· 1706 1620 MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET); 1707 1621 MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET); 1708 1622 MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET); 1623 + MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET); 1709 1624 MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); 1710 1625 MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); 1711 1626
+14 -1
drivers/net/ethernet/mellanox/mlx4/fw.h
··· 56 56 int max_mpts; 57 57 int reserved_eqs; 58 58 int max_eqs; 59 + int num_sys_eqs; 59 60 int reserved_mtts; 60 61 int max_mrw_sz; 61 62 int reserved_mrws; ··· 146 145 u64 phys_port_id; 147 146 }; 148 147 148 + struct mlx4_func { 149 + int bus; 150 + int device; 151 + int function; 152 + int physical_function; 153 + int rsvd_eqs; 154 + int max_eq; 155 + int rsvd_uars; 156 + }; 157 + 149 158 struct mlx4_adapter { 150 159 char board_id[MLX4_BOARD_ID_LEN]; 151 160 u8 inta_pin; ··· 181 170 u8 log_num_srqs; 182 171 u8 log_num_cqs; 183 172 u8 log_num_eqs; 173 + u16 num_sys_eqs; 184 174 u8 log_rd_per_qp; 185 175 u8 log_mc_table_sz; 186 176 u8 log_mpt_sz; ··· 216 204 }; 217 205 218 206 int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap); 219 - int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port, 207 + int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port, 220 208 struct mlx4_func_cap *func_cap); 221 209 int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave, 222 210 struct mlx4_vhcr *vhcr, 223 211 struct mlx4_cmd_mailbox *inbox, 224 212 struct mlx4_cmd_mailbox *outbox, 225 213 struct mlx4_cmd_info *cmd); 214 + int mlx4_QUERY_FUNC(struct mlx4_dev *dev, struct mlx4_func *func, int slave); 226 215 int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm); 227 216 int mlx4_UNMAP_FA(struct mlx4_dev *dev); 228 217 int mlx4_RUN_FW(struct mlx4_dev *dev);
+247 -60
drivers/net/ethernet/mellanox/mlx4/main.c
··· 197 197 dev->caps.port_mask[i] = dev->caps.port_type[i]; 198 198 } 199 199 200 + enum { 201 + MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, 202 + }; 203 + 204 + static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 205 + { 206 + int err = 0; 207 + struct mlx4_func func; 208 + 209 + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 210 + err = mlx4_QUERY_FUNC(dev, &func, 0); 211 + if (err) { 212 + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 213 + return err; 214 + } 215 + dev_cap->max_eqs = func.max_eq; 216 + dev_cap->reserved_eqs = func.rsvd_eqs; 217 + dev_cap->reserved_uars = func.rsvd_uars; 218 + err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; 219 + } 220 + return err; 221 + } 222 + 200 223 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) 201 224 { 202 225 struct mlx4_caps *dev_cap = &dev->caps; ··· 284 261 } 285 262 286 263 dev->caps.num_ports = dev_cap->num_ports; 287 - dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM; 264 + dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; 265 + dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? 266 + dev->caps.num_sys_eqs : 267 + MLX4_MAX_EQ_NUM; 288 268 for (i = 1; i <= dev->caps.num_ports; ++i) { 289 269 dev->caps.vl_cap[i] = dev_cap->max_vl[i]; 290 270 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; ··· 657 631 struct mlx4_dev_cap dev_cap; 658 632 struct mlx4_func_cap func_cap; 659 633 struct mlx4_init_hca_param hca_param; 660 - int i; 634 + u8 i; 661 635 662 636 memset(&hca_param, 0, sizeof(hca_param)); 663 637 err = mlx4_QUERY_HCA(dev, &hca_param); ··· 758 732 } 759 733 760 734 for (i = 1; i <= dev->caps.num_ports; ++i) { 761 - err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap); 735 + err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap); 762 736 if (err) { 763 737 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", 764 738 i, err); ··· 1156 1130 if (err) 1157 1131 goto err_srq; 1158 1132 1159 - num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : 1160 - dev->caps.num_eqs; 1133 + num_eqs = dev->phys_caps.num_phys_eqs; 1161 1134 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 1162 1135 cmpt_base + 1163 1136 ((u64) (MLX4_CMPT_TYPE_EQ * ··· 1218 1193 } 1219 1194 1220 1195 1221 - num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs : 1222 - dev->caps.num_eqs; 1196 + num_eqs = dev->phys_caps.num_phys_eqs; 1223 1197 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 1224 1198 init_hca->eqc_base, dev_cap->eqc_entry_sz, 1225 1199 num_eqs, num_eqs, 0, 0); ··· 1497 1473 else { 1498 1474 mlx4_CLOSE_HCA(dev, 0); 1499 1475 mlx4_free_icms(dev); 1476 + } 1477 + } 1478 + 1479 + static void mlx4_close_fw(struct mlx4_dev *dev) 1480 + { 1481 + if (!mlx4_is_slave(dev)) { 1500 1482 mlx4_UNMAP_FA(dev); 1501 1483 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 1502 1484 } ··· 1649 1619 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); 1650 1620 } 1651 1621 1652 - static int mlx4_init_hca(struct mlx4_dev *dev) 1622 + static int mlx4_init_fw(struct mlx4_dev *dev) 1653 1623 { 1654 - struct mlx4_priv *priv = mlx4_priv(dev); 1655 - struct mlx4_adapter adapter; 1656 - struct mlx4_dev_cap dev_cap; 1657 1624 struct mlx4_mod_stat_cfg mlx4_cfg; 1658 - struct mlx4_profile profile; 1659 - struct mlx4_init_hca_param init_hca; 1660 - u64 icm_size; 1661 - int err; 1662 - struct mlx4_config_dev_params params; 1625 + int err = 0; 1663 1626 1664 1627 if (!mlx4_is_slave(dev)) { 1665 1628 err = mlx4_QUERY_FW(dev); ··· 1675 1652 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 1676 1653 if (err) 1677 1654 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 1655 + } 1678 1656 1657 + return err; 1658 + } 1659 + 1660 + static int mlx4_init_hca(struct mlx4_dev *dev) 1661 + { 1662 + struct mlx4_priv *priv = mlx4_priv(dev); 1663 + struct mlx4_adapter adapter; 1664 + struct mlx4_dev_cap dev_cap; 1665 + struct mlx4_profile profile; 1666 + struct mlx4_init_hca_param init_hca; 1667 + u64 icm_size; 1668 + struct mlx4_config_dev_params params; 1669 + int err; 1670 + 1671 + if (!mlx4_is_slave(dev)) { 1679 1672 err = mlx4_dev_cap(dev, &dev_cap); 1680 1673 if (err) { 1681 1674 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); ··· 1743 1704 mlx4_err(dev, "INIT_HCA command failed, aborting\n"); 1744 1705 goto err_free_icm; 1745 1706 } 1707 + 1708 + if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 1709 + err = mlx4_query_func(dev, &dev_cap); 1710 + if (err < 0) { 1711 + mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); 1712 + goto err_stop_fw; 1713 + } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { 1714 + dev->caps.num_eqs = dev_cap.max_eqs; 1715 + dev->caps.reserved_eqs = dev_cap.reserved_eqs; 1716 + dev->caps.reserved_uars = dev_cap.reserved_uars; 1717 + } 1718 + } 1719 + 1746 1720 /* 1747 1721 * If TS is supported by FW 1748 1722 * read HCA frequency by QUERY_HCA command ··· 2122 2070 { 2123 2071 struct mlx4_priv *priv = mlx4_priv(dev); 2124 2072 struct msix_entry *entries; 2125 - int nreq = min_t(int, dev->caps.num_ports * 2126 - min_t(int, num_online_cpus() + 1, 2127 - MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX); 2128 2073 int i; 2129 2074 2130 2075 if (msi_x) { 2076 + int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ; 2077 + 2131 2078 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 2132 2079 nreq); 2133 2080 ··· 2326 2275 iounmap(owner); 2327 2276 } 2328 2277 2278 + #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\ 2279 + !!((flags) & MLX4_FLAG_MASTER)) 2280 + 2281 + static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, 2282 + u8 total_vfs, int existing_vfs) 2283 + { 2284 + u64 dev_flags = dev->flags; 2285 + 2286 + dev->dev_vfs = kzalloc( 2287 + total_vfs * sizeof(*dev->dev_vfs), 2288 + GFP_KERNEL); 2289 + if (NULL == dev->dev_vfs) { 2290 + mlx4_err(dev, "Failed to allocate memory for VFs\n"); 2291 + goto disable_sriov; 2292 + } else if (!(dev->flags & MLX4_FLAG_SRIOV)) { 2293 + int err = 0; 2294 + 2295 + atomic_inc(&pf_loading); 2296 + if (existing_vfs) { 2297 + if (existing_vfs != total_vfs) 2298 + mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", 2299 + existing_vfs, total_vfs); 2300 + } else { 2301 + mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); 2302 + err = pci_enable_sriov(pdev, total_vfs); 2303 + } 2304 + if (err) { 2305 + mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", 2306 + err); 2307 + atomic_dec(&pf_loading); 2308 + goto disable_sriov; 2309 + } else { 2310 + mlx4_warn(dev, "Running in master mode\n"); 2311 + dev_flags |= MLX4_FLAG_SRIOV | 2312 + MLX4_FLAG_MASTER; 2313 + dev_flags &= ~MLX4_FLAG_SLAVE; 2314 + dev->num_vfs = total_vfs; 2315 + } 2316 + } 2317 + return dev_flags; 2318 + 2319 + disable_sriov: 2320 + dev->num_vfs = 0; 2321 + kfree(dev->dev_vfs); 2322 + return dev_flags & ~MLX4_FLAG_MASTER; 2323 + } 2324 + 2325 + enum { 2326 + MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1, 2327 + }; 2328 + 2329 + static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 2330 + int *nvfs) 2331 + { 2332 + int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2]; 2333 + /* Checking for 64 VFs as a limitation of CX2 */ 2334 + if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) && 2335 + requested_vfs >= 64) { 2336 + mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", 2337 + requested_vfs); 2338 + return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64; 2339 + } 2340 + return 0; 2341 + } 2342 + 2329 2343 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 2330 2344 int total_vfs, int *nvfs, struct mlx4_priv *priv) 2331 2345 { ··· 2399 2283 int err; 2400 2284 int port; 2401 2285 int i; 2286 + struct mlx4_dev_cap *dev_cap = NULL; 2402 2287 int existing_vfs = 0; 2403 2288 2404 2289 dev = &priv->dev; ··· 2436 2319 } 2437 2320 } 2438 2321 2439 - if (total_vfs) { 2440 - mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", 2441 - total_vfs); 2442 - dev->dev_vfs = kzalloc( 2443 - total_vfs * sizeof(*dev->dev_vfs), 2444 - GFP_KERNEL); 2445 - if (NULL == dev->dev_vfs) { 2446 - mlx4_err(dev, "Failed to allocate memory for VFs\n"); 2447 - err = -ENOMEM; 2448 - goto err_free_own; 2449 - } else { 2450 - atomic_inc(&pf_loading); 2451 - existing_vfs = pci_num_vf(pdev); 2452 - if (existing_vfs) { 2453 - err = 0; 2454 - if (existing_vfs != total_vfs) 2455 - mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", 2456 - existing_vfs, total_vfs); 2457 - } else { 2458 - err = pci_enable_sriov(pdev, total_vfs); 2459 - } 2460 - if (err) { 2461 - mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", 2462 - err); 2463 - atomic_dec(&pf_loading); 2464 - } else { 2465 - mlx4_warn(dev, "Running in master mode\n"); 2466 - dev->flags |= MLX4_FLAG_SRIOV | 2467 - MLX4_FLAG_MASTER; 2468 - dev->num_vfs = total_vfs; 2469 - } 2470 - } 2471 - } 2472 - 2473 2322 atomic_set(&priv->opreq_count, 0); 2474 2323 INIT_WORK(&priv->opreq_task, mlx4_opreq_action); 2475 2324 ··· 2448 2365 if (err) { 2449 2366 mlx4_err(dev, "Failed to reset HCA, aborting\n"); 2450 2367 goto err_sriov; 2368 + } 2369 + 2370 + if (total_vfs) { 2371 + existing_vfs = pci_num_vf(pdev); 2372 + dev->flags = MLX4_FLAG_MASTER; 2373 + dev->num_vfs = total_vfs; 2451 2374 } 2452 2375 } 2453 2376 ··· 2468 2379 * before posting commands. Also, init num_slaves before calling 2469 2380 * mlx4_init_hca */ 2470 2381 if (mlx4_is_mfunc(dev)) { 2471 - if (mlx4_is_master(dev)) 2382 + if (mlx4_is_master(dev)) { 2472 2383 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 2473 - else { 2384 + 2385 + } else { 2474 2386 dev->num_slaves = 0; 2475 2387 err = mlx4_multi_func_init(dev); 2476 2388 if (err) { ··· 2481 2391 } 2482 2392 } 2483 2393 2394 + err = mlx4_init_fw(dev); 2395 + if (err) { 2396 + mlx4_err(dev, "Failed to init fw, aborting.\n"); 2397 + goto err_mfunc; 2398 + } 2399 + 2400 + if (mlx4_is_master(dev)) { 2401 + if (!dev_cap) { 2402 + dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); 2403 + 2404 + if (!dev_cap) { 2405 + err = -ENOMEM; 2406 + goto err_fw; 2407 + } 2408 + 2409 + err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 2410 + if (err) { 2411 + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 2412 + goto err_fw; 2413 + } 2414 + 2415 + if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 2416 + goto err_fw; 2417 + 2418 + if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 2419 + u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, 2420 + existing_vfs); 2421 + 2422 + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2423 + dev->flags = dev_flags; 2424 + if (!SRIOV_VALID_STATE(dev->flags)) { 2425 + mlx4_err(dev, "Invalid SRIOV state\n"); 2426 + goto err_sriov; 2427 + } 2428 + err = mlx4_reset(dev); 2429 + if (err) { 2430 + mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 2431 + goto err_sriov; 2432 + } 2433 + goto slave_start; 2434 + } 2435 + } else { 2436 + /* Legacy mode FW requires SRIOV to be enabled before 2437 + * doing QUERY_DEV_CAP, since max_eq's value is different if 2438 + * SRIOV is enabled. 2439 + */ 2440 + memset(dev_cap, 0, sizeof(*dev_cap)); 2441 + err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 2442 + if (err) { 2443 + mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 2444 + goto err_fw; 2445 + } 2446 + 2447 + if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 2448 + goto err_fw; 2449 + } 2450 + } 2451 + 2484 2452 err = mlx4_init_hca(dev); 2485 2453 if (err) { 2486 2454 if (err == -EACCES) { 2487 2455 /* Not primary Physical function 2488 2456 * Running in slave mode */ 2489 - mlx4_cmd_cleanup(dev); 2457 + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2458 + /* We're not a PF */ 2459 + if (dev->flags & MLX4_FLAG_SRIOV) { 2460 + if (!existing_vfs) 2461 + pci_disable_sriov(pdev); 2462 + if (mlx4_is_master(dev)) 2463 + atomic_dec(&pf_loading); 2464 + dev->flags &= ~MLX4_FLAG_SRIOV; 2465 + } 2466 + if (!mlx4_is_slave(dev)) 2467 + mlx4_free_ownership(dev); 2490 2468 dev->flags |= MLX4_FLAG_SLAVE; 2491 2469 dev->flags &= ~MLX4_FLAG_MASTER; 2492 2470 goto slave_start; 2493 2471 } else 2494 - goto err_mfunc; 2472 + goto err_fw; 2473 + } 2474 + 2475 + if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 2476 + u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, existing_vfs); 2477 + 2478 + if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { 2479 + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); 2480 + dev->flags = dev_flags; 2481 + err = mlx4_cmd_init(dev); 2482 + if (err) { 2483 + /* Only VHCR is cleaned up, so could still 2484 + * send FW commands 2485 + */ 2486 + mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); 2487 + goto err_close; 2488 + } 2489 + } else { 2490 + dev->flags = dev_flags; 2491 + } 2492 + 2493 + if (!SRIOV_VALID_STATE(dev->flags)) { 2494 + mlx4_err(dev, "Invalid SRIOV state\n"); 2495 + goto err_close; 2496 + } 2495 2497 } 2496 2498 2497 2499 /* check if the device is functioning at its maximum possible speed. ··· 2738 2556 err_close: 2739 2557 mlx4_close_hca(dev); 2740 2558 2559 + err_fw: 2560 + mlx4_close_fw(dev); 2561 + 2741 2562 err_mfunc: 2742 2563 if (mlx4_is_slave(dev)) 2743 2564 mlx4_multi_func_cleanup(dev); 2744 2565 2745 2566 err_cmd: 2746 - mlx4_cmd_cleanup(dev); 2567 + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2747 2568 2748 2569 err_sriov: 2749 2570 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) ··· 2757 2572 2758 2573 kfree(priv->dev.dev_vfs); 2759 2574 2760 - err_free_own: 2761 2575 if (!mlx4_is_slave(dev)) 2762 2576 mlx4_free_ownership(dev); 2763 2577 2578 + kfree(dev_cap); 2764 2579 return err; 2765 2580 } 2766 2581 ··· 2988 2803 if (mlx4_is_master(dev)) 2989 2804 mlx4_multi_func_cleanup(dev); 2990 2805 mlx4_close_hca(dev); 2806 + mlx4_close_fw(dev); 2991 2807 if (mlx4_is_slave(dev)) 2992 2808 mlx4_multi_func_cleanup(dev); 2993 - mlx4_cmd_cleanup(dev); 2809 + mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 2994 2810 2995 2811 if (dev->flags & MLX4_FLAG_MSI_X) 2996 2812 pci_disable_msix(pdev); 2997 2813 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { 2998 2814 mlx4_warn(dev, "Disabling SR-IOV\n"); 2999 2815 pci_disable_sriov(pdev); 2816 + dev->flags &= ~MLX4_FLAG_SRIOV; 3000 2817 dev->num_vfs = 0; 3001 2818 } 3002 2819
+10 -1
drivers/net/ethernet/mellanox/mlx4/mlx4.h
··· 606 606 u8 use_events; 607 607 u8 toggle; 608 608 u8 comm_toggle; 609 + u8 initialized; 609 610 }; 610 611 611 612 enum { ··· 1127 1126 1128 1127 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe); 1129 1128 1129 + enum { 1130 + MLX4_CMD_CLEANUP_STRUCT = 1UL << 0, 1131 + MLX4_CMD_CLEANUP_POOL = 1UL << 1, 1132 + MLX4_CMD_CLEANUP_HCR = 1UL << 2, 1133 + MLX4_CMD_CLEANUP_VHCR = 1UL << 3, 1134 + MLX4_CMD_CLEANUP_ALL = (MLX4_CMD_CLEANUP_VHCR << 1) - 1 1135 + }; 1136 + 1130 1137 int mlx4_cmd_init(struct mlx4_dev *dev); 1131 - void mlx4_cmd_cleanup(struct mlx4_dev *dev); 1138 + void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask); 1132 1139 int mlx4_multi_func_init(struct mlx4_dev *dev); 1133 1140 void mlx4_multi_func_cleanup(struct mlx4_dev *dev); 1134 1141 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
+13 -6
drivers/net/ethernet/mellanox/mlx4/profile.c
··· 126 126 profile[MLX4_RES_AUXC].num = request->num_qp; 127 127 profile[MLX4_RES_SRQ].num = request->num_srq; 128 128 profile[MLX4_RES_CQ].num = request->num_cq; 129 - profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ? 130 - dev->phys_caps.num_phys_eqs : 129 + profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ? dev->phys_caps.num_phys_eqs : 131 130 min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); 132 131 profile[MLX4_RES_DMPT].num = request->num_mpt; 133 132 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; ··· 215 216 init_hca->log_num_cqs = profile[i].log_num; 216 217 break; 217 218 case MLX4_RES_EQ: 218 - dev->caps.num_eqs = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs, 219 - MAX_MSIX)); 220 - init_hca->eqc_base = profile[i].start; 221 - init_hca->log_num_eqs = ilog2(dev->caps.num_eqs); 219 + if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 220 + init_hca->log_num_eqs = 0x1f; 221 + init_hca->eqc_base = profile[i].start; 222 + init_hca->num_sys_eqs = dev_cap->num_sys_eqs; 223 + } else { 224 + dev->caps.num_eqs = roundup_pow_of_two( 225 + min_t(unsigned, 226 + dev_cap->max_eqs, 227 + MAX_MSIX)); 228 + init_hca->eqc_base = profile[i].start; 229 + init_hca->log_num_eqs = ilog2(dev->caps.num_eqs); 230 + } 222 231 break; 223 232 case MLX4_RES_DMPT: 224 233 dev->caps.num_mpts = profile[i].num;
+5 -2
include/linux/mlx4/device.h
··· 95 95 96 96 enum { 97 97 MLX4_MAX_NUM_PF = 16, 98 - MLX4_MAX_NUM_VF = 64, 98 + MLX4_MAX_NUM_VF = 126, 99 99 MLX4_MAX_NUM_VF_P_PORT = 64, 100 100 MLX4_MFUNC_MAX = 80, 101 101 MLX4_MAX_EQ_NUM = 1024, ··· 189 189 MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13, 190 190 MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 14, 191 191 MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15, 192 - MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16 192 + MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16, 193 + MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17, 194 + MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18 193 195 }; 194 196 195 197 enum { ··· 445 443 int num_cqs; 446 444 int max_cqes; 447 445 int reserved_cqs; 446 + int num_sys_eqs; 448 447 int num_eqs; 449 448 int reserved_eqs; 450 449 int num_comp_vectors;