Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mlx4'

Or Gerlitz says:

====================
Mellanox driver updates

This patch set from Jack Morgenstein does the following:

1. Fix MAC/VLAN SRIOV implementation, and add wrapper functions for VLAN allocation
and de-allocation (patches 1-6).

2. Implements resource quotas when running under SRIOV (patches 7-10).
Patch 7 is a small bug fix, and patches 8-10 implement the quotas.

Quotas are implemented per resource type for VFs and the PF, to prevent
any entity from simply grabbing all the resources for itself and leaving
the other entities unable to obtain such resources.

The series is against net-next commit ba48650 "ipv6: remove the unnecessary statement in find_match()"

changes from V0:
- dropped the 1st patch which needs to go to -stable and hence through net,
not net-next
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+676 -121
+4 -4
drivers/infiniband/hw/mlx4/main.c
··· 177 177 178 178 props->max_mr_size = ~0ull; 179 179 props->page_size_cap = dev->dev->caps.page_size_cap; 180 - props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps; 180 + props->max_qp = dev->dev->quotas.qp; 181 181 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; 182 182 props->max_sge = min(dev->dev->caps.max_sq_sg, 183 183 dev->dev->caps.max_rq_sg); 184 - props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs; 184 + props->max_cq = dev->dev->quotas.cq; 185 185 props->max_cqe = dev->dev->caps.max_cqes; 186 - props->max_mr = dev->dev->caps.num_mpts - dev->dev->caps.reserved_mrws; 186 + props->max_mr = dev->dev->quotas.mpt; 187 187 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds; 188 188 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma; 189 189 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma; 190 190 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 191 - props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs; 191 + props->max_srq = dev->dev->quotas.srq; 192 192 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1; 193 193 props->max_srq_sge = dev->dev->caps.max_srq_sge; 194 194 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
+2 -1
drivers/net/ethernet/mellanox/mlx4/cmd.c
··· 1687 1687 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port]; 1688 1688 if (NO_INDX != vp_oper->vlan_idx) { 1689 1689 __mlx4_unregister_vlan(&priv->dev, 1690 - port, vp_oper->vlan_idx); 1690 + port, vp_oper->state.default_vlan); 1691 1691 vp_oper->vlan_idx = NO_INDX; 1692 1692 } 1693 1693 if (NO_INDX != vp_oper->mac_idx) { ··· 1718 1718 if (cmd == MLX4_COMM_CMD_RESET) { 1719 1719 mlx4_warn(dev, "Received reset from slave:%d\n", slave); 1720 1720 slave_state[slave].active = false; 1721 + slave_state[slave].old_vlan_api = false; 1721 1722 mlx4_master_deactivate_admin_state(priv, slave); 1722 1723 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) { 1723 1724 slave_state[slave].event_eq[i].eqn = -1;
+1 -5
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 417 417 struct mlx4_en_priv *priv = netdev_priv(dev); 418 418 struct mlx4_en_dev *mdev = priv->mdev; 419 419 int err; 420 - int idx; 421 420 422 421 en_dbg(HW, priv, "Killing VID:%d\n", vid); 423 422 ··· 424 425 425 426 /* Remove VID from port VLAN filter */ 426 427 mutex_lock(&mdev->state_lock); 427 - if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx)) 428 - mlx4_unregister_vlan(mdev->dev, priv->port, idx); 429 - else 430 - en_dbg(HW, priv, "could not find vid %d in cache\n", vid); 428 + mlx4_unregister_vlan(mdev->dev, priv->port, vid); 431 429 432 430 if (mdev->device_up && priv->port_up) { 433 431 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
+71 -28
drivers/net/ethernet/mellanox/mlx4/fw.c
··· 177 177 struct mlx4_cmd_mailbox *outbox, 178 178 struct mlx4_cmd_info *cmd) 179 179 { 180 + struct mlx4_priv *priv = mlx4_priv(dev); 180 181 u8 field; 181 182 u32 size; 182 183 int err = 0; ··· 186 185 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1 187 186 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4 188 187 #define QUERY_FUNC_CAP_FMR_OFFSET 0x8 189 - #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10 190 - #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14 191 - #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18 192 - #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x20 193 - #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24 194 - #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28 188 + #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP 0x10 189 + #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP 0x14 190 + #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP 0x18 191 + #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP 0x20 192 + #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP 0x24 193 + #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP 0x28 195 194 #define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c 196 195 #define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0x30 196 + 197 + #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x50 198 + #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x54 199 + #define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x58 200 + #define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x60 201 + #define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x64 202 + #define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x68 197 203 198 204 #define QUERY_FUNC_CAP_FMR_FLAG 0x80 199 205 #define QUERY_FUNC_CAP_FLAG_RDMA 0x40 200 206 #define QUERY_FUNC_CAP_FLAG_ETH 0x80 207 + #define QUERY_FUNC_CAP_FLAG_QUOTAS 0x10 201 208 202 209 /* when opcode modifier = 1 */ 203 210 #define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3 ··· 246 237 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP1_PROXY); 247 238 248 239 } else if (vhcr->op_modifier == 0) { 249 - /* enable rdma and ethernet interfaces */ 250 - field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA); 240 + /* enable rdma and ethernet interfaces, and new quota locations */ 241 + field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA | 242 + QUERY_FUNC_CAP_FLAG_QUOTAS); 251 243 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET); 252 244 253 245 field = dev->caps.num_ports; ··· 260 250 field = 0; /* protected FMR support not available as yet */ 261 251 MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET); 262 252 263 - size = dev->caps.num_qps; 253 + size = priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[slave]; 264 254 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); 255 + size = dev->caps.num_qps; 256 + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); 265 257 266 - size = dev->caps.num_srqs; 258 + size = priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[slave]; 267 259 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); 260 + size = dev->caps.num_srqs; 261 + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); 268 262 269 - size = dev->caps.num_cqs; 263 + size = priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[slave]; 270 264 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); 265 + size = dev->caps.num_cqs; 266 + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); 271 267 272 268 size = dev->caps.num_eqs; 273 269 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET); ··· 281 265 size = dev->caps.reserved_eqs; 282 266 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 283 267 284 - size = dev->caps.num_mpts; 268 + size = priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[slave]; 285 269 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); 270 + size = dev->caps.num_mpts; 271 + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); 286 272 287 - size = dev->caps.num_mtts; 273 + size = priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[slave]; 288 274 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); 275 + size = dev->caps.num_mtts; 276 + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); 289 277 290 278 size = dev->caps.num_mgms + dev->caps.num_amgms; 291 279 MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); 280 + MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); 292 281 293 282 } else 294 283 err = -EINVAL; ··· 308 287 u32 *outbox; 309 288 u8 field, op_modifier; 310 289 u32 size; 311 - int err = 0; 290 + int err = 0, quotas = 0; 312 291 313 292 op_modifier = !!gen_or_port; /* 0 = general, 1 = logical port */ 314 293 ··· 332 311 goto out; 333 312 } 334 313 func_cap->flags = field; 314 + quotas = !!(func_cap->flags & QUERY_FUNC_CAP_FLAG_QUOTAS); 335 315 336 316 MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET); 337 317 func_cap->num_ports = field; ··· 340 318 MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET); 341 319 func_cap->pf_context_behaviour = size; 342 320 343 - MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); 344 - func_cap->qp_quota = size & 0xFFFFFF; 321 + if (quotas) { 322 + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET); 323 + func_cap->qp_quota = size & 0xFFFFFF; 345 324 346 - MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); 347 - func_cap->srq_quota = size & 0xFFFFFF; 325 + MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET); 326 + func_cap->srq_quota = size & 0xFFFFFF; 348 327 349 - MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); 350 - func_cap->cq_quota = size & 0xFFFFFF; 328 + MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET); 329 + func_cap->cq_quota = size & 0xFFFFFF; 351 330 331 + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); 332 + func_cap->mpt_quota = size & 0xFFFFFF; 333 + 334 + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); 335 + func_cap->mtt_quota = size & 0xFFFFFF; 336 + 337 + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); 338 + func_cap->mcg_quota = size & 0xFFFFFF; 339 + 340 + } else { 341 + MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET_DEP); 342 + func_cap->qp_quota = size & 0xFFFFFF; 343 + 344 + MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET_DEP); 345 + func_cap->srq_quota = size & 0xFFFFFF; 346 + 347 + MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET_DEP); 348 + func_cap->cq_quota = size & 0xFFFFFF; 349 + 350 + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET_DEP); 351 + func_cap->mpt_quota = size & 0xFFFFFF; 352 + 353 + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET_DEP); 354 + func_cap->mtt_quota = size & 0xFFFFFF; 355 + 356 + MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET_DEP); 357 + func_cap->mcg_quota = size & 0xFFFFFF; 358 + } 352 359 MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET); 353 360 func_cap->max_eq = size & 0xFFFFFF; 354 361 355 362 MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET); 356 363 func_cap->reserved_eq = size & 0xFFFFFF; 357 364 358 - MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET); 359 - func_cap->mpt_quota = size & 0xFFFFFF; 360 - 361 - MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET); 362 - func_cap->mtt_quota = size & 0xFFFFFF; 363 - 364 - MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET); 365 - func_cap->mcg_quota = size & 0xFFFFFF; 366 365 goto out; 367 366 } 368 367
+22 -10
drivers/net/ethernet/mellanox/mlx4/main.c
··· 562 562 } 563 563 564 564 dev->caps.num_ports = func_cap.num_ports; 565 - dev->caps.num_qps = func_cap.qp_quota; 566 - dev->caps.num_srqs = func_cap.srq_quota; 567 - dev->caps.num_cqs = func_cap.cq_quota; 568 - dev->caps.num_eqs = func_cap.max_eq; 569 - dev->caps.reserved_eqs = func_cap.reserved_eq; 570 - dev->caps.num_mpts = func_cap.mpt_quota; 571 - dev->caps.num_mtts = func_cap.mtt_quota; 565 + dev->quotas.qp = func_cap.qp_quota; 566 + dev->quotas.srq = func_cap.srq_quota; 567 + dev->quotas.cq = func_cap.cq_quota; 568 + dev->quotas.mpt = func_cap.mpt_quota; 569 + dev->quotas.mtt = func_cap.mtt_quota; 570 + dev->caps.num_qps = 1 << hca_param.log_num_qps; 571 + dev->caps.num_srqs = 1 << hca_param.log_num_srqs; 572 + dev->caps.num_cqs = 1 << hca_param.log_num_cqs; 573 + dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; 574 + dev->caps.num_eqs = func_cap.max_eq; 575 + dev->caps.reserved_eqs = func_cap.reserved_eq; 572 576 dev->caps.num_pds = MLX4_NUM_PDS; 573 577 dev->caps.num_mgms = 0; 574 578 dev->caps.num_amgms = 0; ··· 2106 2102 "aborting.\n"); 2107 2103 return err; 2108 2104 } 2109 - if (num_vfs > MLX4_MAX_NUM_VF) { 2110 - printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n", 2111 - num_vfs, MLX4_MAX_NUM_VF); 2105 + 2106 + /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS 2107 + * per port, we must limit the number of VFs to 63 (since their are 2108 + * 128 MACs) 2109 + */ 2110 + if (num_vfs >= MLX4_MAX_NUM_VF) { 2111 + dev_err(&pdev->dev, 2112 + "Requested more VF's (%d) than allowed (%d)\n", 2113 + num_vfs, MLX4_MAX_NUM_VF - 1); 2112 2114 return -EINVAL; 2113 2115 } 2114 2116 ··· 2331 2321 2332 2322 if (err) 2333 2323 goto err_steer; 2324 + 2325 + mlx4_init_quotas(dev); 2334 2326 2335 2327 for (port = 1; port <= dev->caps.num_ports; port++) { 2336 2328 err = mlx4_init_port_info(dev, port);
+20 -1
drivers/net/ethernet/mellanox/mlx4/mlx4.h
··· 455 455 u8 last_cmd; 456 456 u8 init_port_mask; 457 457 bool active; 458 + bool old_vlan_api; 458 459 u8 function; 459 460 dma_addr_t vhcr_dma; 460 461 u16 mtu[MLX4_MAX_PORTS + 1]; ··· 504 503 struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE]; 505 504 }; 506 505 506 + struct resource_allocator { 507 + spinlock_t alloc_lock; /* protect quotas */ 508 + union { 509 + int res_reserved; 510 + int res_port_rsvd[MLX4_MAX_PORTS]; 511 + }; 512 + union { 513 + int res_free; 514 + int res_port_free[MLX4_MAX_PORTS]; 515 + }; 516 + int *quota; 517 + int *allocated; 518 + int *guaranteed; 519 + }; 520 + 507 521 struct mlx4_resource_tracker { 508 522 spinlock_t lock; 509 523 /* tree for each resources */ 510 524 struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE]; 511 525 /* num_of_slave's lists, one per slave */ 512 526 struct slave_list *slave_list; 527 + struct resource_allocator res_alloc[MLX4_NUM_OF_RESOURCE_TYPE]; 513 528 }; 514 529 515 530 #define SLAVE_EVENT_EQ_SIZE 128 ··· 1128 1111 1129 1112 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table); 1130 1113 void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table); 1131 - void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index); 1114 + void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); 1132 1115 int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 1133 1116 1134 1117 int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz); ··· 1268 1251 #define NOT_MASKED_PD_BITS 17 1269 1252 1270 1253 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work); 1254 + 1255 + void mlx4_init_quotas(struct mlx4_dev *dev); 1271 1256 1272 1257 #endif /* MLX4_H */
+3 -3
drivers/net/ethernet/mellanox/mlx4/mr.c
··· 755 755 struct mlx4_mr_table *mr_table = &priv->mr_table; 756 756 int err; 757 757 758 - if (!is_power_of_2(dev->caps.num_mpts)) 759 - return -EINVAL; 760 - 761 758 /* Nothing to do for slaves - all MR handling is forwarded 762 759 * to the master */ 763 760 if (mlx4_is_slave(dev)) 764 761 return 0; 762 + 763 + if (!is_power_of_2(dev->caps.num_mpts)) 764 + return -EINVAL; 765 765 766 766 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, 767 767 ~0, dev->caps.reserved_mrws, 0);
+54 -33
drivers/net/ethernet/mellanox/mlx4/port.c
··· 178 178 int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) 179 179 { 180 180 u64 out_param = 0; 181 - int err; 181 + int err = -EINVAL; 182 182 183 183 if (mlx4_is_mfunc(dev)) { 184 - set_param_l(&out_param, port); 185 - err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, 186 - RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, 187 - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 184 + if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) { 185 + err = mlx4_cmd_imm(dev, mac, &out_param, 186 + ((u32) port) << 8 | (u32) RES_MAC, 187 + RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, 188 + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 189 + } 190 + if (err && err == -EINVAL && mlx4_is_slave(dev)) { 191 + /* retry using old REG_MAC format */ 192 + set_param_l(&out_param, port); 193 + err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, 194 + RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, 195 + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 196 + if (!err) 197 + dev->flags |= MLX4_FLAG_OLD_REG_MAC; 198 + } 188 199 if (err) 189 200 return err; 190 201 ··· 242 231 u64 out_param = 0; 243 232 244 233 if (mlx4_is_mfunc(dev)) { 245 - set_param_l(&out_param, port); 246 - (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, 247 - RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, 248 - MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 234 + if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) { 235 + (void) mlx4_cmd_imm(dev, mac, &out_param, 236 + ((u32) port) << 8 | (u32) RES_MAC, 237 + RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, 238 + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 239 + } else { 240 + /* use old unregister mac format */ 241 + set_param_l(&out_param, port); 242 + (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC, 243 + RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES, 244 + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 245 + } 249 246 return; 250 247 } 251 248 __mlx4_unregister_mac(dev, port, mac); ··· 303 284 memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE); 304 285 in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port; 305 286 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, 306 - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 287 + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); 307 288 308 289 mlx4_free_cmd_mailbox(dev, mailbox); 309 290 ··· 389 370 u64 out_param = 0; 390 371 int err; 391 372 373 + if (vlan > 4095) 374 + return -EINVAL; 375 + 392 376 if (mlx4_is_mfunc(dev)) { 393 - set_param_l(&out_param, port); 394 - err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN, 377 + err = mlx4_cmd_imm(dev, vlan, &out_param, 378 + ((u32) port) << 8 | (u32) RES_VLAN, 395 379 RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES, 396 380 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 397 381 if (!err) ··· 406 384 } 407 385 EXPORT_SYMBOL_GPL(mlx4_register_vlan); 408 386 409 - void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) 387 + void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) 410 388 { 411 389 struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table; 390 + int index; 391 + 392 + mutex_lock(&table->mutex); 393 + if (mlx4_find_cached_vlan(dev, port, vlan, &index)) { 394 + mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan); 395 + goto out; 396 + } 412 397 413 398 if (index < MLX4_VLAN_REGULAR) { 414 399 mlx4_warn(dev, "Trying to free special vlan index %d\n", index); 415 - return; 416 - } 417 - 418 - mutex_lock(&table->mutex); 419 - if (!table->refs[index]) { 420 - mlx4_warn(dev, "No vlan entry for index %d\n", index); 421 400 goto out; 422 401 } 402 + 423 403 if (--table->refs[index]) { 424 - mlx4_dbg(dev, "Have more references for index %d," 425 - "no need to modify vlan table\n", index); 404 + mlx4_dbg(dev, "Have %d more references for index %d," 405 + "no need to modify vlan table\n", table->refs[index], 406 + index); 426 407 goto out; 427 408 } 428 409 table->entries[index] = 0; ··· 435 410 mutex_unlock(&table->mutex); 436 411 } 437 412 438 - void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index) 413 + void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan) 439 414 { 440 - u64 in_param = 0; 441 - int err; 415 + u64 out_param = 0; 442 416 443 417 if (mlx4_is_mfunc(dev)) { 444 - set_param_l(&in_param, port); 445 - err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP, 446 - MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 447 - MLX4_CMD_WRAPPED); 448 - if (!err) 449 - mlx4_warn(dev, "Failed freeing vlan at index:%d\n", 450 - index); 451 - 418 + (void) mlx4_cmd_imm(dev, vlan, &out_param, 419 + ((u32) port) << 8 | (u32) RES_VLAN, 420 + RES_OP_RESERVE_AND_MAP, 421 + MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 422 + MLX4_CMD_WRAPPED); 452 423 return; 453 424 } 454 - __mlx4_unregister_vlan(dev, port, index); 425 + __mlx4_unregister_vlan(dev, port, vlan); 455 426 } 456 427 EXPORT_SYMBOL_GPL(mlx4_unregister_vlan); 457 428
+1 -2
drivers/net/ethernet/mellanox/mlx4/qp.c
··· 480 480 */ 481 481 482 482 err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps, 483 - (1 << 23) - 1, dev->phys_caps.base_sqpn + 8 + 484 - 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev), 483 + (1 << 23) - 1, mlx4_num_reserved_sqps(dev), 485 484 reserved_from_top); 486 485 if (err) 487 486 return err;
+479 -33
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
··· 55 55 u8 port; 56 56 }; 57 57 58 + struct vlan_res { 59 + struct list_head list; 60 + u16 vlan; 61 + int ref_count; 62 + int vlan_index; 63 + u8 port; 64 + }; 65 + 58 66 struct res_common { 59 67 struct list_head list; 60 68 struct rb_node node; ··· 274 266 case RES_MPT: return "RES_MPT"; 275 267 case RES_MTT: return "RES_MTT"; 276 268 case RES_MAC: return "RES_MAC"; 269 + case RES_VLAN: return "RES_VLAN"; 277 270 case RES_EQ: return "RES_EQ"; 278 271 case RES_COUNTER: return "RES_COUNTER"; 279 272 case RES_FS_RULE: return "RES_FS_RULE"; ··· 283 274 }; 284 275 } 285 276 277 + static void rem_slave_vlans(struct mlx4_dev *dev, int slave); 278 + static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, 279 + enum mlx4_resource res_type, int count, 280 + int port) 281 + { 282 + struct mlx4_priv *priv = mlx4_priv(dev); 283 + struct resource_allocator *res_alloc = 284 + &priv->mfunc.master.res_tracker.res_alloc[res_type]; 285 + int err = -EINVAL; 286 + int allocated, free, reserved, guaranteed, from_free; 287 + 288 + if (slave > dev->num_vfs) 289 + return -EINVAL; 290 + 291 + spin_lock(&res_alloc->alloc_lock); 292 + allocated = (port > 0) ? 293 + res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] : 294 + res_alloc->allocated[slave]; 295 + free = (port > 0) ? res_alloc->res_port_free[port - 1] : 296 + res_alloc->res_free; 297 + reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] : 298 + res_alloc->res_reserved; 299 + guaranteed = res_alloc->guaranteed[slave]; 300 + 301 + if (allocated + count > res_alloc->quota[slave]) 302 + goto out; 303 + 304 + if (allocated + count <= guaranteed) { 305 + err = 0; 306 + } else { 307 + /* portion may need to be obtained from free area */ 308 + if (guaranteed - allocated > 0) 309 + from_free = count - (guaranteed - allocated); 310 + else 311 + from_free = count; 312 + 313 + if (free - from_free > reserved) 314 + err = 0; 315 + } 316 + 317 + if (!err) { 318 + /* grant the request */ 319 + if (port > 0) { 320 + res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count; 321 + res_alloc->res_port_free[port - 1] -= count; 322 + } else { 323 + res_alloc->allocated[slave] += count; 324 + res_alloc->res_free -= count; 325 + } 326 + } 327 + 328 + out: 329 + spin_unlock(&res_alloc->alloc_lock); 330 + return err; 331 + } 332 + 333 + static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave, 334 + enum mlx4_resource res_type, int count, 335 + int port) 336 + { 337 + struct mlx4_priv *priv = mlx4_priv(dev); 338 + struct resource_allocator *res_alloc = 339 + &priv->mfunc.master.res_tracker.res_alloc[res_type]; 340 + 341 + if (slave > dev->num_vfs) 342 + return; 343 + 344 + spin_lock(&res_alloc->alloc_lock); 345 + if (port > 0) { 346 + res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count; 347 + res_alloc->res_port_free[port - 1] += count; 348 + } else { 349 + res_alloc->allocated[slave] -= count; 350 + res_alloc->res_free += count; 351 + } 352 + 353 + spin_unlock(&res_alloc->alloc_lock); 354 + return; 355 + } 356 + 357 + static inline void initialize_res_quotas(struct mlx4_dev *dev, 358 + struct resource_allocator *res_alloc, 359 + enum mlx4_resource res_type, 360 + int vf, int num_instances) 361 + { 362 + res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1)); 363 + res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf]; 364 + if (vf == mlx4_master_func_num(dev)) { 365 + res_alloc->res_free = num_instances; 366 + if (res_type == RES_MTT) { 367 + /* reserved mtts will be taken out of the PF allocation */ 368 + res_alloc->res_free += dev->caps.reserved_mtts; 369 + res_alloc->guaranteed[vf] += dev->caps.reserved_mtts; 370 + res_alloc->quota[vf] += dev->caps.reserved_mtts; 371 + } 372 + } 373 + } 374 + 375 + void mlx4_init_quotas(struct mlx4_dev *dev) 376 + { 377 + struct mlx4_priv *priv = mlx4_priv(dev); 378 + int pf; 379 + 380 + /* quotas for VFs are initialized in mlx4_slave_cap */ 381 + if (mlx4_is_slave(dev)) 382 + return; 383 + 384 + if (!mlx4_is_mfunc(dev)) { 385 + dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps - 386 + mlx4_num_reserved_sqps(dev); 387 + dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs; 388 + dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs; 389 + dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts; 390 + dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws; 391 + return; 392 + } 393 + 394 + pf = mlx4_master_func_num(dev); 395 + dev->quotas.qp = 396 + priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf]; 397 + dev->quotas.cq = 398 + priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf]; 399 + dev->quotas.srq = 400 + priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf]; 401 + dev->quotas.mtt = 402 + priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf]; 403 + dev->quotas.mpt = 404 + priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf]; 405 + } 286 406 int mlx4_init_resource_tracker(struct mlx4_dev *dev) 287 407 { 288 408 struct mlx4_priv *priv = mlx4_priv(dev); 289 - int i; 409 + int i, j; 290 410 int t; 291 411 292 412 priv->mfunc.master.res_tracker.slave_list = ··· 436 298 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) 437 299 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT; 438 300 301 + for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { 302 + struct resource_allocator *res_alloc = 303 + &priv->mfunc.master.res_tracker.res_alloc[i]; 304 + res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); 305 + res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); 306 + if (i == RES_MAC || i == RES_VLAN) 307 + res_alloc->allocated = kzalloc(MLX4_MAX_PORTS * 308 + (dev->num_vfs + 1) * sizeof(int), 309 + GFP_KERNEL); 310 + else 311 + res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); 312 + 313 + if (!res_alloc->quota || !res_alloc->guaranteed || 314 + !res_alloc->allocated) 315 + goto no_mem_err; 316 + 317 + spin_lock_init(&res_alloc->alloc_lock); 318 + for (t = 0; t < dev->num_vfs + 1; t++) { 319 + switch (i) { 320 + case RES_QP: 321 + initialize_res_quotas(dev, res_alloc, RES_QP, 322 + t, dev->caps.num_qps - 323 + dev->caps.reserved_qps - 324 + mlx4_num_reserved_sqps(dev)); 325 + break; 326 + case RES_CQ: 327 + initialize_res_quotas(dev, res_alloc, RES_CQ, 328 + t, dev->caps.num_cqs - 329 + dev->caps.reserved_cqs); 330 + break; 331 + case RES_SRQ: 332 + initialize_res_quotas(dev, res_alloc, RES_SRQ, 333 + t, dev->caps.num_srqs - 334 + dev->caps.reserved_srqs); 335 + break; 336 + case RES_MPT: 337 + initialize_res_quotas(dev, res_alloc, RES_MPT, 338 + t, dev->caps.num_mpts - 339 + dev->caps.reserved_mrws); 340 + break; 341 + case RES_MTT: 342 + initialize_res_quotas(dev, res_alloc, RES_MTT, 343 + t, dev->caps.num_mtts - 344 + dev->caps.reserved_mtts); 345 + break; 346 + case RES_MAC: 347 + if (t == mlx4_master_func_num(dev)) { 348 + res_alloc->quota[t] = MLX4_MAX_MAC_NUM; 349 + res_alloc->guaranteed[t] = 2; 350 + for (j = 0; j < MLX4_MAX_PORTS; j++) 351 + res_alloc->res_port_free[j] = MLX4_MAX_MAC_NUM; 352 + } else { 353 + res_alloc->quota[t] = MLX4_MAX_MAC_NUM; 354 + res_alloc->guaranteed[t] = 2; 355 + } 356 + break; 357 + case RES_VLAN: 358 + if (t == mlx4_master_func_num(dev)) { 359 + res_alloc->quota[t] = MLX4_MAX_VLAN_NUM; 360 + res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2; 361 + for (j = 0; j < MLX4_MAX_PORTS; j++) 362 + res_alloc->res_port_free[j] = 363 + res_alloc->quota[t]; 364 + } else { 365 + res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2; 366 + res_alloc->guaranteed[t] = 0; 367 + } 368 + break; 369 + case RES_COUNTER: 370 + res_alloc->quota[t] = dev->caps.max_counters; 371 + res_alloc->guaranteed[t] = 0; 372 + if (t == mlx4_master_func_num(dev)) 373 + res_alloc->res_free = res_alloc->quota[t]; 374 + break; 375 + default: 376 + break; 377 + } 378 + if (i == RES_MAC || i == RES_VLAN) { 379 + for (j = 0; j < MLX4_MAX_PORTS; j++) 380 + res_alloc->res_port_rsvd[j] += 381 + res_alloc->guaranteed[t]; 382 + } else { 383 + res_alloc->res_reserved += res_alloc->guaranteed[t]; 384 + } 385 + } 386 + } 439 387 spin_lock_init(&priv->mfunc.master.res_tracker.lock); 440 - return 0 ; 388 + return 0; 389 + 390 + no_mem_err: 391 + for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { 392 + kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated); 393 + priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; 394 + kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); 395 + priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; 396 + kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota); 397 + priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; 398 + } 399 + return -ENOMEM; 441 400 } 442 401 443 402 void mlx4_free_resource_tracker(struct mlx4_dev *dev, ··· 544 309 int i; 545 310 546 311 if (priv->mfunc.master.res_tracker.slave_list) { 547 - if (type != RES_TR_FREE_STRUCTS_ONLY) 548 - for (i = 0 ; i < dev->num_slaves; i++) 312 + if (type != RES_TR_FREE_STRUCTS_ONLY) { 313 + for (i = 0; i < dev->num_slaves; i++) { 549 314 if (type == RES_TR_FREE_ALL || 550 315 dev->caps.function != i) 551 316 mlx4_delete_all_resources_for_slave(dev, i); 317 + } 318 + /* free master's vlans */ 319 + i = dev->caps.function; 320 + mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 321 + rem_slave_vlans(dev, i); 322 + mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex); 323 + } 552 324 553 325 if (type != RES_TR_FREE_SLAVES_ONLY) { 326 + for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { 327 + kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated); 328 + priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL; 329 + kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed); 330 + priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL; 331 + kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota); 332 + priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL; 333 + } 554 334 kfree(priv->mfunc.master.res_tracker.slave_list); 555 335 priv->mfunc.master.res_tracker.slave_list = NULL; 556 336 } ··· 1479 1229 case RES_OP_RESERVE: 1480 1230 count = get_param_l(&in_param); 1481 1231 align = get_param_h(&in_param); 1482 - err = __mlx4_qp_reserve_range(dev, count, align, &base); 1232 + err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); 1483 1233 if (err) 1484 1234 return err; 1485 1235 1236 + err = __mlx4_qp_reserve_range(dev, count, align, &base); 1237 + if (err) { 1238 + mlx4_release_resource(dev, slave, RES_QP, count, 0); 1239 + return err; 1240 + } 1241 + 1486 1242 err = add_res_range(dev, slave, base, count, RES_QP, 0); 1487 1243 if (err) { 1244 + mlx4_release_resource(dev, slave, RES_QP, count, 0); 1488 1245 __mlx4_qp_release_range(dev, base, count); 1489 1246 return err; 1490 1247 } ··· 1539 1282 return err; 1540 1283 1541 1284 order = get_param_l(&in_param); 1285 + 1286 + err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0); 1287 + if (err) 1288 + return err; 1289 + 1542 1290 base = __mlx4_alloc_mtt_range(dev, order); 1543 - if (base == -1) 1291 + if (base == -1) { 1292 + mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); 1544 1293 return -ENOMEM; 1294 + } 1545 1295 1546 1296 err = add_res_range(dev, slave, base, 1, RES_MTT, order); 1547 - if (err) 1297 + if (err) { 1298 + mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); 1548 1299 __mlx4_free_mtt_range(dev, base, order); 1549 - else 1300 + } else { 1550 1301 set_param_l(out_param, base); 1302 + } 1551 1303 1552 1304 return err; 1553 1305 } ··· 1571 1305 1572 1306 switch (op) { 1573 1307 case RES_OP_RESERVE: 1574 - index = __mlx4_mpt_reserve(dev); 1575 - if (index == -1) 1308 + err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0); 1309 + if (err) 1576 1310 break; 1311 + 1312 + index = __mlx4_mpt_reserve(dev); 1313 + if (index == -1) { 1314 + mlx4_release_resource(dev, slave, RES_MPT, 1, 0); 1315 + break; 1316 + } 1577 1317 id = index & mpt_mask(dev); 1578 1318 1579 1319 err = add_res_range(dev, slave, id, 1, RES_MPT, index); 1580 1320 if (err) { 1321 + mlx4_release_resource(dev, slave, RES_MPT, 1, 0); 1581 1322 __mlx4_mpt_release(dev, index); 1582 1323 break; 1583 1324 } ··· 1618 1345 1619 1346 switch (op) { 1620 1347 case RES_OP_RESERVE_AND_MAP: 1621 - err = __mlx4_cq_alloc_icm(dev, &cqn); 1348 + err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0); 1622 1349 if (err) 1623 1350 break; 1624 1351 1352 + err = __mlx4_cq_alloc_icm(dev, &cqn); 1353 + if (err) { 1354 + mlx4_release_resource(dev, slave, RES_CQ, 1, 0); 1355 + break; 1356 + } 1357 + 1625 1358 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0); 1626 1359 if (err) { 1360 + mlx4_release_resource(dev, slave, RES_CQ, 1, 0); 1627 1361 __mlx4_cq_free_icm(dev, cqn); 1628 1362 break; 1629 1363 } ··· 1653 1373 1654 1374 switch (op) { 1655 1375 case RES_OP_RESERVE_AND_MAP: 1656 - err = __mlx4_srq_alloc_icm(dev, &srqn); 1376 + err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0); 1657 1377 if (err) 1658 1378 break; 1659 1379 1380 + err = __mlx4_srq_alloc_icm(dev, &srqn); 1381 + if (err) { 1382 + mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); 1383 + break; 1384 + } 1385 + 1660 1386 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0); 1661 1387 if (err) { 1388 + mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); 1662 1389 __mlx4_srq_free_icm(dev, srqn); 1663 1390 break; 1664 1391 } ··· 1686 1399 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1687 1400 struct mac_res *res; 1688 1401 1402 + if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port)) 1403 + return -EINVAL; 1689 1404 res = kzalloc(sizeof *res, GFP_KERNEL); 1690 - if (!res) 1405 + if (!res) { 1406 + mlx4_release_resource(dev, slave, RES_MAC, 1, port); 1691 1407 return -ENOMEM; 1408 + } 1692 1409 res->mac = mac; 1693 1410 res->port = (u8) port; 1694 1411 list_add_tail(&res->list, ··· 1712 1421 list_for_each_entry_safe(res, tmp, mac_list, list) { 1713 1422 if (res->mac == mac && res->port == (u8) port) { 1714 1423 list_del(&res->list); 1424 + mlx4_release_resource(dev, slave, RES_MAC, 1, port); 1715 1425 kfree(res); 1716 1426 break; 1717 1427 } ··· 1730 1438 list_for_each_entry_safe(res, tmp, mac_list, list) { 1731 1439 list_del(&res->list); 1732 1440 __mlx4_unregister_mac(dev, res->port, res->mac); 1441 + mlx4_release_resource(dev, slave, RES_MAC, 1, res->port); 1733 1442 kfree(res); 1734 1443 } 1735 1444 } 1736 1445 1737 1446 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1738 - u64 in_param, u64 *out_param) 1447 + u64 in_param, u64 *out_param, int in_port) 1739 1448 { 1740 1449 int err = -EINVAL; 1741 1450 int port; ··· 1745 1452 if (op != RES_OP_RESERVE_AND_MAP) 1746 1453 return err; 1747 1454 1748 - port = get_param_l(out_param); 1455 + port = !in_port ? get_param_l(out_param) : in_port; 1749 1456 mac = in_param; 1750 1457 1751 1458 err = __mlx4_register_mac(dev, port, mac); ··· 1762 1469 return err; 1763 1470 } 1764 1471 1765 - static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1766 - u64 in_param, u64 *out_param) 1472 + static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan, 1473 + int port, int vlan_index) 1767 1474 { 1475 + struct mlx4_priv *priv = mlx4_priv(dev); 1476 + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1477 + struct list_head *vlan_list = 1478 + &tracker->slave_list[slave].res_list[RES_VLAN]; 1479 + struct vlan_res *res, *tmp; 1480 + 1481 + list_for_each_entry_safe(res, tmp, vlan_list, list) { 1482 + if (res->vlan == vlan && res->port == (u8) port) { 1483 + /* vlan found. update ref count */ 1484 + ++res->ref_count; 1485 + return 0; 1486 + } 1487 + } 1488 + 1489 + if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port)) 1490 + return -EINVAL; 1491 + res = kzalloc(sizeof(*res), GFP_KERNEL); 1492 + if (!res) { 1493 + mlx4_release_resource(dev, slave, RES_VLAN, 1, port); 1494 + return -ENOMEM; 1495 + } 1496 + res->vlan = vlan; 1497 + res->port = (u8) port; 1498 + res->vlan_index = vlan_index; 1499 + res->ref_count = 1; 1500 + list_add_tail(&res->list, 1501 + &tracker->slave_list[slave].res_list[RES_VLAN]); 1768 1502 return 0; 1503 + } 1504 + 1505 + 1506 + static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan, 1507 + int port) 1508 + { 1509 + struct mlx4_priv *priv = mlx4_priv(dev); 1510 + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1511 + struct list_head *vlan_list = 1512 + &tracker->slave_list[slave].res_list[RES_VLAN]; 1513 + struct vlan_res *res, *tmp; 1514 + 1515 + list_for_each_entry_safe(res, tmp, vlan_list, list) { 1516 + if (res->vlan == vlan && res->port == (u8) port) { 1517 + if (!--res->ref_count) { 1518 + list_del(&res->list); 1519 + mlx4_release_resource(dev, slave, RES_VLAN, 1520 + 1, port); 1521 + kfree(res); 1522 + } 1523 + break; 1524 + } 1525 + } 1526 + } 1527 + 1528 + static void rem_slave_vlans(struct mlx4_dev *dev, int slave) 1529 + { 1530 + struct mlx4_priv *priv = mlx4_priv(dev); 1531 + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; 1532 + struct list_head *vlan_list = 1533 + &tracker->slave_list[slave].res_list[RES_VLAN]; 1534 + struct vlan_res *res, *tmp; 1535 + int i; 1536 + 1537 + list_for_each_entry_safe(res, tmp, vlan_list, list) { 1538 + list_del(&res->list); 1539 + /* dereference the vlan the num times the slave referenced it */ 1540 + for (i = 0; i < res->ref_count; i++) 1541 + __mlx4_unregister_vlan(dev, res->port, res->vlan); 1542 + mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port); 1543 + kfree(res); 1544 + } 1545 + } 1546 + 1547 + static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, 1548 + u64 in_param, u64 *out_param, int in_port) 1549 + { 1550 + struct mlx4_priv *priv = mlx4_priv(dev); 1551 + struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; 1552 + int err; 1553 + u16 vlan; 1554 + int vlan_index; 1555 + int port; 1556 + 1557 + port = !in_port ? get_param_l(out_param) : in_port; 1558 + 1559 + if (!port || op != RES_OP_RESERVE_AND_MAP) 1560 + return -EINVAL; 1561 + 1562 + /* upstream kernels had NOP for reg/unreg vlan. Continue this. */ 1563 + if (!in_port && port > 0 && port <= dev->caps.num_ports) { 1564 + slave_state[slave].old_vlan_api = true; 1565 + return 0; 1566 + } 1567 + 1568 + vlan = (u16) in_param; 1569 + 1570 + err = __mlx4_register_vlan(dev, port, vlan, &vlan_index); 1571 + if (!err) { 1572 + set_param_l(out_param, (u32) vlan_index); 1573 + err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index); 1574 + if (err) 1575 + __mlx4_unregister_vlan(dev, port, vlan); 1576 + } 1577 + return err; 1769 1578 } 1770 1579 1771 1580 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, ··· 1879 1484 if (op != RES_OP_RESERVE) 1880 1485 return -EINVAL; 1881 1486 1882 - err = __mlx4_counter_alloc(dev, &index); 1487 + err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0); 1883 1488 if (err) 1884 1489 return err; 1885 1490 1491 + err = __mlx4_counter_alloc(dev, &index); 1492 + if (err) { 1493 + mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 1494 + return err; 1495 + } 1496 + 1886 1497 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0); 1887 - if (err) 1498 + if (err) { 1888 1499 __mlx4_counter_free(dev, index); 1889 - else 1500 + mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 1501 + } else { 1890 1502 set_param_l(out_param, index); 1503 + } 1891 1504 1892 1505 return err; 1893 1506 } ··· 1931 1528 int err; 1932 1529 int alop = vhcr->op_modifier; 1933 1530 1934 - switch (vhcr->in_modifier) { 1531 + switch (vhcr->in_modifier & 0xFF) { 1935 1532 case RES_QP: 1936 1533 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop, 1937 1534 vhcr->in_param, &vhcr->out_param); ··· 1959 1556 1960 1557 case RES_MAC: 1961 1558 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop, 1962 - vhcr->in_param, &vhcr->out_param); 1559 + vhcr->in_param, &vhcr->out_param, 1560 + (vhcr->in_modifier >> 8) & 0xFF); 1963 1561 break; 1964 1562 1965 1563 case RES_VLAN: 1966 1564 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop, 1967 - vhcr->in_param, &vhcr->out_param); 1565 + vhcr->in_param, &vhcr->out_param, 1566 + (vhcr->in_modifier >> 8) & 0xFF); 1968 1567 break; 1969 1568 1970 1569 case RES_COUNTER: ··· 2002 1597 err = rem_res_range(dev, slave, base, count, RES_QP, 0); 2003 1598 if (err) 2004 1599 break; 1600 + mlx4_release_resource(dev, slave, RES_QP, count, 0); 2005 1601 __mlx4_qp_release_range(dev, base, count); 2006 1602 break; 2007 1603 case RES_OP_MAP_ICM: ··· 2040 1634 base = get_param_l(&in_param); 2041 1635 order = get_param_h(&in_param); 2042 1636 err = rem_res_range(dev, slave, base, 1, RES_MTT, order); 2043 - if (!err) 1637 + if (!err) { 1638 + mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0); 2044 1639 __mlx4_free_mtt_range(dev, base, order); 1640 + } 2045 1641 return err; 2046 1642 } 2047 1643 ··· 2068 1660 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0); 2069 1661 if (err) 2070 1662 break; 1663 + mlx4_release_resource(dev, slave, RES_MPT, 1, 0); 2071 1664 __mlx4_mpt_release(dev, index); 2072 1665 break; 2073 1666 case RES_OP_MAP_ICM: ··· 2103 1694 if (err) 2104 1695 break; 2105 1696 1697 + mlx4_release_resource(dev, slave, RES_CQ, 1, 0); 2106 1698 __mlx4_cq_free_icm(dev, cqn); 2107 1699 break; 2108 1700 ··· 2128 1718 if (err) 2129 1719 break; 2130 1720 1721 + mlx4_release_resource(dev, slave, RES_SRQ, 1, 0); 2131 1722 __mlx4_srq_free_icm(dev, srqn); 2132 1723 break; 2133 1724 ··· 2141 1730 } 2142 1731 2143 1732 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2144 - u64 in_param, u64 *out_param) 1733 + u64 in_param, u64 *out_param, int in_port) 2145 1734 { 2146 1735 int port; 2147 1736 int err = 0; 2148 1737 2149 1738 switch (op) { 2150 1739 case RES_OP_RESERVE_AND_MAP: 2151 - port = get_param_l(out_param); 1740 + port = !in_port ? get_param_l(out_param) : in_port; 2152 1741 mac_del_from_slave(dev, slave, in_param, port); 2153 1742 __mlx4_unregister_mac(dev, port, in_param); 2154 1743 break; ··· 2162 1751 } 2163 1752 2164 1753 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, 2165 - u64 in_param, u64 *out_param) 1754 + u64 in_param, u64 *out_param, int port) 2166 1755 { 2167 - return 0; 1756 + struct mlx4_priv *priv = mlx4_priv(dev); 1757 + struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; 1758 + int err = 0; 1759 + 1760 + switch (op) { 1761 + case RES_OP_RESERVE_AND_MAP: 1762 + if (slave_state[slave].old_vlan_api) 1763 + return 0; 1764 + if (!port) 1765 + return -EINVAL; 1766 + vlan_del_from_slave(dev, slave, in_param, port); 1767 + __mlx4_unregister_vlan(dev, port, in_param); 1768 + break; 1769 + default: 1770 + err = -EINVAL; 1771 + break; 1772 + } 1773 + 1774 + return err; 2168 1775 } 2169 1776 2170 1777 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd, ··· 2200 1771 return err; 2201 1772 2202 1773 __mlx4_counter_free(dev, index); 1774 + mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 2203 1775 2204 1776 return err; 2205 1777 } ··· 2233 1803 int err = -EINVAL; 2234 1804 int alop = vhcr->op_modifier; 2235 1805 2236 - switch (vhcr->in_modifier) { 1806 + switch (vhcr->in_modifier & 0xFF) { 2237 1807 case RES_QP: 2238 1808 err = qp_free_res(dev, slave, vhcr->op_modifier, alop, 2239 1809 vhcr->in_param); ··· 2261 1831 2262 1832 case RES_MAC: 2263 1833 err = mac_free_res(dev, slave, vhcr->op_modifier, alop, 2264 - vhcr->in_param, &vhcr->out_param); 1834 + vhcr->in_param, &vhcr->out_param, 1835 + (vhcr->in_modifier >> 8) & 0xFF); 2265 1836 break; 2266 1837 2267 1838 case RES_VLAN: 2268 1839 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop, 2269 - vhcr->in_param, &vhcr->out_param); 1840 + vhcr->in_param, &vhcr->out_param, 1841 + (vhcr->in_modifier >> 8) & 0xFF); 2270 1842 break; 2271 1843 2272 1844 case RES_COUNTER: ··· 3930 3498 &tracker->res_tree[RES_QP]); 3931 3499 list_del(&qp->com.list); 3932 3500 spin_unlock_irq(mlx4_tlock(dev)); 3501 + if (!valid_reserved(dev, slave, qpn)) { 3502 + __mlx4_qp_release_range(dev, qpn, 1); 3503 + mlx4_release_resource(dev, slave, 3504 + RES_QP, 1, 0); 3505 + } 3933 3506 kfree(qp); 3934 3507 state = 0; 3935 3508 break; ··· 4006 3569 &tracker->res_tree[RES_SRQ]); 4007 3570 list_del(&srq->com.list); 4008 3571 spin_unlock_irq(mlx4_tlock(dev)); 3572 + mlx4_release_resource(dev, slave, 3573 + RES_SRQ, 1, 0); 4009 3574 kfree(srq); 4010 3575 state = 0; 4011 3576 break; ··· 4074 3635 &tracker->res_tree[RES_CQ]); 4075 3636 list_del(&cq->com.list); 4076 3637 spin_unlock_irq(mlx4_tlock(dev)); 3638 + mlx4_release_resource(dev, slave, 3639 + RES_CQ, 1, 0); 4077 3640 kfree(cq); 4078 3641 state = 0; 4079 3642 break; ··· 4139 3698 &tracker->res_tree[RES_MPT]); 4140 3699 list_del(&mpt->com.list); 4141 3700 spin_unlock_irq(mlx4_tlock(dev)); 3701 + mlx4_release_resource(dev, slave, 3702 + RES_MPT, 1, 0); 4142 3703 kfree(mpt); 4143 3704 state = 0; 4144 3705 break; ··· 4210 3767 &tracker->res_tree[RES_MTT]); 4211 3768 list_del(&mtt->com.list); 4212 3769 spin_unlock_irq(mlx4_tlock(dev)); 3770 + mlx4_release_resource(dev, slave, RES_MTT, 3771 + 1 << mtt->order, 0); 4213 3772 kfree(mtt); 4214 3773 state = 0; 4215 3774 break; ··· 4370 3925 list_del(&counter->com.list); 4371 3926 kfree(counter); 4372 3927 __mlx4_counter_free(dev, index); 3928 + mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0); 4373 3929 } 4374 3930 } 4375 3931 spin_unlock_irq(mlx4_tlock(dev)); ··· 4410 3964 struct mlx4_priv *priv = mlx4_priv(dev); 4411 3965 4412 3966 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); 4413 - /*VLAN*/ 3967 + rem_slave_vlans(dev, slave); 4414 3968 rem_slave_macs(dev, slave); 4415 3969 rem_slave_fs_rule(dev, slave); 4416 3970 rem_slave_qps(dev, slave); ··· 4527 4081 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors && 4528 4082 NO_INDX != work->orig_vlan_ix) 4529 4083 __mlx4_unregister_vlan(&work->priv->dev, work->port, 4530 - work->orig_vlan_ix); 4084 + work->orig_vlan_id); 4531 4085 out: 4532 4086 kfree(work); 4533 4087 return;
+19 -1
include/linux/mlx4/device.h
··· 54 54 MLX4_FLAG_MASTER = 1 << 2, 55 55 MLX4_FLAG_SLAVE = 1 << 3, 56 56 MLX4_FLAG_SRIOV = 1 << 4, 57 + MLX4_FLAG_OLD_REG_MAC = 1 << 6, 57 58 }; 58 59 59 60 enum { ··· 641 640 __be64 tx_bytes; 642 641 }; 643 642 643 + struct mlx4_quotas { 644 + int qp; 645 + int cq; 646 + int srq; 647 + int mpt; 648 + int mtt; 649 + int counter; 650 + int xrcd; 651 + }; 652 + 644 653 struct mlx4_dev { 645 654 struct pci_dev *pdev; 646 655 unsigned long flags; 647 656 unsigned long num_slaves; 648 657 struct mlx4_caps caps; 649 658 struct mlx4_phys_caps phys_caps; 659 + struct mlx4_quotas quotas; 650 660 struct radix_tree_root qp_table_tree; 651 661 u8 rev_id; 652 662 char board_id[MLX4_BOARD_ID_LEN]; ··· 781 769 static inline int mlx4_is_master(struct mlx4_dev *dev) 782 770 { 783 771 return dev->flags & MLX4_FLAG_MASTER; 772 + } 773 + 774 + static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev) 775 + { 776 + return dev->phys_caps.base_sqpn + 8 + 777 + 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev); 784 778 } 785 779 786 780 static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) ··· 1096 1078 u8 *pg, u16 *ratelimit); 1097 1079 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); 1098 1080 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 1099 - void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index); 1081 + void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); 1100 1082 1101 1083 int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, 1102 1084 int npages, u64 iova, u32 *lkey, u32 *rkey);