Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

Pull InfiniBand/RDMA updates from Roland Dreier:
- Re-enable on-demand paging changes with stable ABI
- Fairly large set of ocrdma HW driver fixes
- Some qib HW driver fixes
- Other miscellaneous changes

* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (43 commits)
IB/qib: Add blank line after declaration
IB/qib: Fix checkpatch warnings
IB/mlx5: Enable the ODP capability query verb
IB/core: Add on demand paging caps to ib_uverbs_ex_query_device
IB/core: Add support for extended query device caps
RDMA/cxgb4: Don't hang threads forever waiting on WR replies
RDMA/ocrdma: Fix off by one in ocrdma_query_gid()
RDMA/ocrdma: Use unsigned for bit index
RDMA/ocrdma: Help gcc generate better code for ocrdma_srq_toggle_bit
RDMA/ocrdma: Update the ocrdma module version string
RDMA/ocrdma: set vlan present bit for user AH
RDMA/ocrdma: remove reference of ocrdma_dev out of ocrdma_qp structure
RDMA/ocrdma: Add support for interrupt moderation
RDMA/ocrdma: Honor return value of ocrdma_resolve_dmac
RDMA/ocrdma: Allow expansion of the SQ CQEs via buddy CQ expansion of the QP
RDMA/ocrdma: Discontinue support of RDMA-READ-WITH-INVALIDATE
RDMA/ocrdma: Host crash on destroying device resources
RDMA/ocrdma: Report correct state in ibv_query_qp
RDMA/ocrdma: Debugfs enhancments for ocrdma driver
RDMA/ocrdma: Report correct count of interrupt vectors while registering ocrdma device
...

+1212 -561
+1 -1
MAINTAINERS
··· 8567 8567 F: drivers/scsi/sr* 8568 8568 8569 8569 SCSI RDMA PROTOCOL (SRP) INITIATOR 8570 - M: Bart Van Assche <bvanassche@acm.org> 8570 + M: Bart Van Assche <bart.vanassche@sandisk.com> 8571 8571 L: linux-rdma@vger.kernel.org 8572 8572 S: Supported 8573 8573 W: http://www.openfabrics.org
+3
drivers/infiniband/core/ucma.c
··· 1124 1124 if (!optlen) 1125 1125 return -EINVAL; 1126 1126 1127 + memset(&sa_path, 0, sizeof(sa_path)); 1128 + sa_path.vlan_id = 0xffff; 1129 + 1127 1130 ib_sa_unpack_path(path_data->path_rec, &sa_path); 1128 1131 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1); 1129 1132 if (ret)
+2 -1
drivers/infiniband/core/umem_odp.c
··· 294 294 if (likely(ib_umem_start(umem) != ib_umem_end(umem))) 295 295 rbt_ib_umem_insert(&umem->odp_data->interval_tree, 296 296 &context->umem_tree); 297 - if (likely(!atomic_read(&context->notifier_count))) 297 + if (likely(!atomic_read(&context->notifier_count)) || 298 + context->odp_mrs_count == 1) 298 299 umem->odp_data->mn_counters_active = true; 299 300 else 300 301 list_add(&umem->odp_data->no_private_counters,
+1
drivers/infiniband/core/uverbs.h
··· 258 258 259 259 IB_UVERBS_DECLARE_EX_CMD(create_flow); 260 260 IB_UVERBS_DECLARE_EX_CMD(destroy_flow); 261 + IB_UVERBS_DECLARE_EX_CMD(query_device); 261 262 262 263 #endif /* UVERBS_H */
+113 -45
drivers/infiniband/core/uverbs_cmd.c
··· 400 400 return ret; 401 401 } 402 402 403 + static void copy_query_dev_fields(struct ib_uverbs_file *file, 404 + struct ib_uverbs_query_device_resp *resp, 405 + struct ib_device_attr *attr) 406 + { 407 + resp->fw_ver = attr->fw_ver; 408 + resp->node_guid = file->device->ib_dev->node_guid; 409 + resp->sys_image_guid = attr->sys_image_guid; 410 + resp->max_mr_size = attr->max_mr_size; 411 + resp->page_size_cap = attr->page_size_cap; 412 + resp->vendor_id = attr->vendor_id; 413 + resp->vendor_part_id = attr->vendor_part_id; 414 + resp->hw_ver = attr->hw_ver; 415 + resp->max_qp = attr->max_qp; 416 + resp->max_qp_wr = attr->max_qp_wr; 417 + resp->device_cap_flags = attr->device_cap_flags; 418 + resp->max_sge = attr->max_sge; 419 + resp->max_sge_rd = attr->max_sge_rd; 420 + resp->max_cq = attr->max_cq; 421 + resp->max_cqe = attr->max_cqe; 422 + resp->max_mr = attr->max_mr; 423 + resp->max_pd = attr->max_pd; 424 + resp->max_qp_rd_atom = attr->max_qp_rd_atom; 425 + resp->max_ee_rd_atom = attr->max_ee_rd_atom; 426 + resp->max_res_rd_atom = attr->max_res_rd_atom; 427 + resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; 428 + resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; 429 + resp->atomic_cap = attr->atomic_cap; 430 + resp->max_ee = attr->max_ee; 431 + resp->max_rdd = attr->max_rdd; 432 + resp->max_mw = attr->max_mw; 433 + resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; 434 + resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; 435 + resp->max_mcast_grp = attr->max_mcast_grp; 436 + resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; 437 + resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; 438 + resp->max_ah = attr->max_ah; 439 + resp->max_fmr = attr->max_fmr; 440 + resp->max_map_per_fmr = attr->max_map_per_fmr; 441 + resp->max_srq = attr->max_srq; 442 + resp->max_srq_wr = attr->max_srq_wr; 443 + resp->max_srq_sge = attr->max_srq_sge; 444 + resp->max_pkeys = attr->max_pkeys; 445 + resp->local_ca_ack_delay = attr->local_ca_ack_delay; 446 + resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt; 447 + } 448 + 403 449 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, 404 450 const char __user *buf, 405 451 int in_len, int out_len) ··· 466 420 return ret; 467 421 468 422 memset(&resp, 0, sizeof resp); 469 - 470 - resp.fw_ver = attr.fw_ver; 471 - resp.node_guid = file->device->ib_dev->node_guid; 472 - resp.sys_image_guid = attr.sys_image_guid; 473 - resp.max_mr_size = attr.max_mr_size; 474 - resp.page_size_cap = attr.page_size_cap; 475 - resp.vendor_id = attr.vendor_id; 476 - resp.vendor_part_id = attr.vendor_part_id; 477 - resp.hw_ver = attr.hw_ver; 478 - resp.max_qp = attr.max_qp; 479 - resp.max_qp_wr = attr.max_qp_wr; 480 - resp.device_cap_flags = attr.device_cap_flags; 481 - resp.max_sge = attr.max_sge; 482 - resp.max_sge_rd = attr.max_sge_rd; 483 - resp.max_cq = attr.max_cq; 484 - resp.max_cqe = attr.max_cqe; 485 - resp.max_mr = attr.max_mr; 486 - resp.max_pd = attr.max_pd; 487 - resp.max_qp_rd_atom = attr.max_qp_rd_atom; 488 - resp.max_ee_rd_atom = attr.max_ee_rd_atom; 489 - resp.max_res_rd_atom = attr.max_res_rd_atom; 490 - resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; 491 - resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; 492 - resp.atomic_cap = attr.atomic_cap; 493 - resp.max_ee = attr.max_ee; 494 - resp.max_rdd = attr.max_rdd; 495 - resp.max_mw = attr.max_mw; 496 - resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; 497 - resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; 498 - resp.max_mcast_grp = attr.max_mcast_grp; 499 - resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; 500 - resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; 501 - resp.max_ah = attr.max_ah; 502 - resp.max_fmr = attr.max_fmr; 503 - resp.max_map_per_fmr = attr.max_map_per_fmr; 504 - resp.max_srq = attr.max_srq; 505 - resp.max_srq_wr = attr.max_srq_wr; 506 - resp.max_srq_sge = attr.max_srq_sge; 507 - resp.max_pkeys = attr.max_pkeys; 508 - resp.local_ca_ack_delay = attr.local_ca_ack_delay; 509 - resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; 423 + copy_query_dev_fields(file, &resp, &attr); 510 424 511 425 if (copy_to_user((void __user *) (unsigned long) cmd.response, 512 426 &resp, sizeof resp)) ··· 2097 2091 if (qp->real_qp == qp) { 2098 2092 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask); 2099 2093 if (ret) 2100 - goto out; 2094 + goto release_qp; 2101 2095 ret = qp->device->modify_qp(qp, attr, 2102 2096 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); 2103 2097 } else { 2104 2098 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); 2105 2099 } 2106 2100 2107 - put_qp_read(qp); 2108 - 2109 2101 if (ret) 2110 - goto out; 2102 + goto release_qp; 2111 2103 2112 2104 ret = in_len; 2105 + 2106 + release_qp: 2107 + put_qp_read(qp); 2113 2108 2114 2109 out: 2115 2110 kfree(attr); ··· 3293 3286 ret = -EFAULT; 3294 3287 3295 3288 return ret ? ret : in_len; 3289 + } 3290 + 3291 + int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, 3292 + struct ib_udata *ucore, 3293 + struct ib_udata *uhw) 3294 + { 3295 + struct ib_uverbs_ex_query_device_resp resp; 3296 + struct ib_uverbs_ex_query_device cmd; 3297 + struct ib_device_attr attr; 3298 + struct ib_device *device; 3299 + int err; 3300 + 3301 + device = file->device->ib_dev; 3302 + if (ucore->inlen < sizeof(cmd)) 3303 + return -EINVAL; 3304 + 3305 + err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); 3306 + if (err) 3307 + return err; 3308 + 3309 + if (cmd.comp_mask) 3310 + return -EINVAL; 3311 + 3312 + if (cmd.reserved) 3313 + return -EINVAL; 3314 + 3315 + resp.response_length = offsetof(typeof(resp), odp_caps); 3316 + 3317 + if (ucore->outlen < resp.response_length) 3318 + return -ENOSPC; 3319 + 3320 + err = device->query_device(device, &attr); 3321 + if (err) 3322 + return err; 3323 + 3324 + copy_query_dev_fields(file, &resp.base, &attr); 3325 + resp.comp_mask = 0; 3326 + 3327 + if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) 3328 + goto end; 3329 + 3330 + #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 3331 + resp.odp_caps.general_caps = attr.odp_caps.general_caps; 3332 + resp.odp_caps.per_transport_caps.rc_odp_caps = 3333 + attr.odp_caps.per_transport_caps.rc_odp_caps; 3334 + resp.odp_caps.per_transport_caps.uc_odp_caps = 3335 + attr.odp_caps.per_transport_caps.uc_odp_caps; 3336 + resp.odp_caps.per_transport_caps.ud_odp_caps = 3337 + attr.odp_caps.per_transport_caps.ud_odp_caps; 3338 + resp.odp_caps.reserved = 0; 3339 + #else 3340 + memset(&resp.odp_caps, 0, sizeof(resp.odp_caps)); 3341 + #endif 3342 + resp.response_length += sizeof(resp.odp_caps); 3343 + 3344 + end: 3345 + err = ib_copy_to_udata(ucore, &resp, resp.response_length); 3346 + if (err) 3347 + return err; 3348 + 3349 + return 0; 3296 3350 }
+1
drivers/infiniband/core/uverbs_main.c
··· 123 123 struct ib_udata *uhw) = { 124 124 [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow, 125 125 [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow, 126 + [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device, 126 127 }; 127 128 128 129 static void ib_uverbs_add_one(struct ib_device *device);
+8 -1
drivers/infiniband/hw/cxgb4/ev.c
··· 225 225 struct c4iw_cq *chp; 226 226 unsigned long flag; 227 227 228 + spin_lock_irqsave(&dev->lock, flag); 228 229 chp = get_chp(dev, qid); 229 230 if (chp) { 231 + atomic_inc(&chp->refcnt); 232 + spin_unlock_irqrestore(&dev->lock, flag); 230 233 t4_clear_cq_armed(&chp->cq); 231 234 spin_lock_irqsave(&chp->comp_handler_lock, flag); 232 235 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); 233 236 spin_unlock_irqrestore(&chp->comp_handler_lock, flag); 234 - } else 237 + if (atomic_dec_and_test(&chp->refcnt)) 238 + wake_up(&chp->wait); 239 + } else { 235 240 PDBG("%s unknown cqid 0x%x\n", __func__, qid); 241 + spin_unlock_irqrestore(&dev->lock, flag); 242 + } 236 243 return 0; 237 244 }
+14 -15
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
··· 196 196 return (int)(rdev->lldi.vr->stag.size >> 5); 197 197 } 198 198 199 - #define C4IW_WR_TO (30*HZ) 199 + #define C4IW_WR_TO (60*HZ) 200 200 201 201 struct c4iw_wr_wait { 202 202 struct completion completion; ··· 220 220 u32 hwtid, u32 qpid, 221 221 const char *func) 222 222 { 223 - unsigned to = C4IW_WR_TO; 224 223 int ret; 225 224 226 - do { 227 - ret = wait_for_completion_timeout(&wr_waitp->completion, to); 228 - if (!ret) { 229 - printk(KERN_ERR MOD "%s - Device %s not responding - " 230 - "tid %u qpid %u\n", func, 231 - pci_name(rdev->lldi.pdev), hwtid, qpid); 232 - if (c4iw_fatal_error(rdev)) { 233 - wr_waitp->ret = -EIO; 234 - break; 235 - } 236 - to = to << 2; 237 - } 238 - } while (!ret); 225 + if (c4iw_fatal_error(rdev)) { 226 + wr_waitp->ret = -EIO; 227 + goto out; 228 + } 229 + 230 + ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO); 231 + if (!ret) { 232 + PDBG("%s - Device %s not responding (disabling device) - tid %u qpid %u\n", 233 + func, pci_name(rdev->lldi.pdev), hwtid, qpid); 234 + rdev->flags |= T4_FATAL_ERROR; 235 + wr_waitp->ret = -EIO; 236 + } 237 + out: 239 238 if (wr_waitp->ret) 240 239 PDBG("%s: FW reply %d tid %u qpid %u\n", 241 240 pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
-3
drivers/infiniband/hw/ipath/ipath_kernel.h
··· 908 908 /* clean up any chip type-specific stuff */ 909 909 void ipath_chip_done(void); 910 910 911 - /* check to see if we have to force ordering for write combining */ 912 - int ipath_unordered_wc(void); 913 - 914 911 void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first, 915 912 unsigned cnt); 916 913 void ipath_cancel_sends(struct ipath_devdata *, int);
-13
drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
··· 47 47 { 48 48 return 0; 49 49 } 50 - 51 - /** 52 - * ipath_unordered_wc - indicate whether write combining is unordered 53 - * 54 - * Because our performance depends on our ability to do write 55 - * combining mmio writes in the most efficient way, we need to 56 - * know if we are on a processor that may reorder stores when 57 - * write combining. 58 - */ 59 - int ipath_unordered_wc(void) 60 - { 61 - return 1; 62 - }
-15
drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
··· 167 167 dd->ipath_wc_cookie = 0; /* even on failure */ 168 168 } 169 169 } 170 - 171 - /** 172 - * ipath_unordered_wc - indicate whether write combining is ordered 173 - * 174 - * Because our performance depends on our ability to do write combining mmio 175 - * writes in the most efficient way, we need to know if we are on an Intel 176 - * or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in 177 - * the order completed, and so no special flushing is required to get 178 - * correct ordering. Intel processors, however, will flush write buffers 179 - * out in "random" orders, and so explicit ordering is needed at times. 180 - */ 181 - int ipath_unordered_wc(void) 182 - { 183 - return boot_cpu_data.x86_vendor != X86_VENDOR_AMD; 184 - }
+1 -1
drivers/infiniband/hw/mlx4/cm.c
··· 372 372 *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id); 373 373 if (*slave < 0) { 374 374 mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n", 375 - gid.global.interface_id); 375 + be64_to_cpu(gid.global.interface_id)); 376 376 return -ENOENT; 377 377 } 378 378 return 0;
+3 -4
drivers/infiniband/hw/mlx4/cq.c
··· 369 369 int err; 370 370 371 371 mutex_lock(&cq->resize_mutex); 372 - 373 - if (entries < 1) { 372 + if (entries < 1 || entries > dev->dev->caps.max_cqes) { 374 373 err = -EINVAL; 375 374 goto out; 376 375 } ··· 380 381 goto out; 381 382 } 382 383 383 - if (entries > dev->dev->caps.max_cqes) { 384 + if (entries > dev->dev->caps.max_cqes + 1) { 384 385 err = -EINVAL; 385 386 goto out; 386 387 } ··· 393 394 /* Can't be smaller than the number of outstanding CQEs */ 394 395 outst_cqe = mlx4_ib_get_outstanding_cqes(cq); 395 396 if (entries < outst_cqe + 1) { 396 - err = 0; 397 + err = -EINVAL; 397 398 goto out; 398 399 } 399 400
+5 -6
drivers/infiniband/hw/mlx4/main.c
··· 1269 1269 struct mlx4_dev *dev = mdev->dev; 1270 1270 struct mlx4_ib_qp *mqp = to_mqp(ibqp); 1271 1271 struct mlx4_ib_steering *ib_steering = NULL; 1272 - enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? 1273 - MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; 1272 + enum mlx4_protocol prot = MLX4_PROT_IB_IPV6; 1274 1273 struct mlx4_flow_reg_id reg_id; 1275 1274 1276 1275 if (mdev->dev->caps.steering_mode == ··· 1283 1284 !!(mqp->flags & 1284 1285 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), 1285 1286 prot, &reg_id.id); 1286 - if (err) 1287 + if (err) { 1288 + pr_err("multicast attach op failed, err %d\n", err); 1287 1289 goto err_malloc; 1290 + } 1288 1291 1289 1292 reg_id.mirror = 0; 1290 1293 if (mlx4_is_bonded(dev)) { ··· 1349 1348 struct net_device *ndev; 1350 1349 struct mlx4_ib_gid_entry *ge; 1351 1350 struct mlx4_flow_reg_id reg_id = {0, 0}; 1352 - 1353 - enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? 1354 - MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; 1351 + enum mlx4_protocol prot = MLX4_PROT_IB_IPV6; 1355 1352 1356 1353 if (mdev->dev->caps.steering_mode == 1357 1354 MLX4_STEERING_MODE_DEVICE_MANAGED) {
+4 -2
drivers/infiniband/hw/mlx4/qp.c
··· 1696 1696 qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || 1697 1697 qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { 1698 1698 err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); 1699 - if (err) 1700 - return -EINVAL; 1699 + if (err) { 1700 + err = -EINVAL; 1701 + goto out; 1702 + } 1701 1703 if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) 1702 1704 dev->qp1_proxy[qp->port - 1] = qp; 1703 1705 }
+3 -1
drivers/infiniband/hw/mlx5/main.c
··· 997 997 struct ib_device_attr *dprops = NULL; 998 998 struct ib_port_attr *pprops = NULL; 999 999 struct mlx5_general_caps *gen; 1000 - int err = 0; 1000 + int err = -ENOMEM; 1001 1001 int port; 1002 1002 1003 1003 gen = &dev->mdev->caps.gen; ··· 1331 1331 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | 1332 1332 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | 1333 1333 (1ull << IB_USER_VERBS_CMD_OPEN_QP); 1334 + dev->ib_dev.uverbs_ex_cmd_mask = 1335 + (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE); 1334 1336 1335 1337 dev->ib_dev.query_device = mlx5_ib_query_device; 1336 1338 dev->ib_dev.query_port = mlx5_ib_query_port;
+1
drivers/infiniband/hw/mlx5/mr.c
··· 1012 1012 goto err_2; 1013 1013 } 1014 1014 mr->umem = umem; 1015 + mr->dev = dev; 1015 1016 mr->live = 1; 1016 1017 kvfree(in); 1017 1018
+36 -2
drivers/infiniband/hw/ocrdma/ocrdma.h
··· 40 40 #include <be_roce.h> 41 41 #include "ocrdma_sli.h" 42 42 43 - #define OCRDMA_ROCE_DRV_VERSION "10.2.287.0u" 43 + #define OCRDMA_ROCE_DRV_VERSION "10.4.205.0u" 44 44 45 45 #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" 46 46 #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" ··· 55 55 #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) 56 56 57 57 #define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo) 58 + #define EQ_INTR_PER_SEC_THRSH_HI 150000 59 + #define EQ_INTR_PER_SEC_THRSH_LOW 100000 60 + #define EQ_AIC_MAX_EQD 20 61 + #define EQ_AIC_MIN_EQD 0 62 + 63 + void ocrdma_eqd_set_task(struct work_struct *work); 58 64 59 65 struct ocrdma_dev_attr { 60 66 u8 fw_ver[32]; 61 67 u32 vendor_id; 62 68 u32 device_id; 63 69 u16 max_pd; 70 + u16 max_dpp_pds; 64 71 u16 max_cq; 65 72 u16 max_cqe; 66 73 u16 max_qp; ··· 123 116 bool created; 124 117 }; 125 118 119 + struct ocrdma_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ 120 + u32 prev_eqd; 121 + u64 eq_intr_cnt; 122 + u64 prev_eq_intr_cnt; 123 + }; 124 + 126 125 struct ocrdma_eq { 127 126 struct ocrdma_queue_info q; 128 127 u32 vector; 129 128 int cq_cnt; 130 129 struct ocrdma_dev *dev; 131 130 char irq_name[32]; 131 + struct ocrdma_aic_obj aic_obj; 132 132 }; 133 133 134 134 struct ocrdma_mq { ··· 185 171 struct ocrdma_dev *dev; 186 172 }; 187 173 174 + struct ocrdma_pd_resource_mgr { 175 + u32 pd_norm_start; 176 + u16 pd_norm_count; 177 + u16 pd_norm_thrsh; 178 + u16 max_normal_pd; 179 + u32 pd_dpp_start; 180 + u16 pd_dpp_count; 181 + u16 pd_dpp_thrsh; 182 + u16 max_dpp_pd; 183 + u16 dpp_page_index; 184 + unsigned long *pd_norm_bitmap; 185 + unsigned long *pd_dpp_bitmap; 186 + bool pd_prealloc_valid; 187 + }; 188 + 188 189 struct stats_mem { 189 190 struct ocrdma_mqe mqe; 190 191 void *va; ··· 227 198 228 199 struct ocrdma_eq *eq_tbl; 229 200 int eq_cnt; 201 + struct delayed_work eqd_work; 230 202 u16 base_eqid; 231 203 u16 max_eq; 232 204 ··· 285 255 struct ocrdma_stats rx_qp_err_stats; 286 256 struct ocrdma_stats tx_dbg_stats; 287 257 struct ocrdma_stats rx_dbg_stats; 258 + struct ocrdma_stats driver_stats; 259 + struct ocrdma_stats reset_stats; 288 260 struct dentry *dir; 261 + atomic_t async_err_stats[OCRDMA_MAX_ASYNC_ERRORS]; 262 + atomic_t cqe_err_stats[OCRDMA_MAX_CQE_ERR]; 263 + struct ocrdma_pd_resource_mgr *pd_mgr; 289 264 }; 290 265 291 266 struct ocrdma_cq { ··· 370 335 371 336 struct ocrdma_qp { 372 337 struct ib_qp ibqp; 373 - struct ocrdma_dev *dev; 374 338 375 339 u8 __iomem *sq_db; 376 340 struct ocrdma_qp_hwq_info sq;
+31 -7
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
··· 29 29 #include <net/netevent.h> 30 30 31 31 #include <rdma/ib_addr.h> 32 + #include <rdma/ib_mad.h> 32 33 33 34 #include "ocrdma.h" 34 35 #include "ocrdma_verbs.h" 35 36 #include "ocrdma_ah.h" 36 37 #include "ocrdma_hw.h" 38 + #include "ocrdma_stats.h" 37 39 38 40 #define OCRDMA_VID_PCP_SHIFT 0xD 39 41 40 42 static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, 41 - struct ib_ah_attr *attr, union ib_gid *sgid, int pdid) 43 + struct ib_ah_attr *attr, union ib_gid *sgid, 44 + int pdid, bool *isvlan) 42 45 { 43 46 int status = 0; 44 - u16 vlan_tag; bool vlan_enabled = false; 47 + u16 vlan_tag; 45 48 struct ocrdma_eth_vlan eth; 46 49 struct ocrdma_grh grh; 47 50 int eth_sz; ··· 62 59 vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT; 63 60 eth.vlan_tag = cpu_to_be16(vlan_tag); 64 61 eth_sz = sizeof(struct ocrdma_eth_vlan); 65 - vlan_enabled = true; 62 + *isvlan = true; 66 63 } else { 67 64 eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); 68 65 eth_sz = sizeof(struct ocrdma_eth_basic); ··· 85 82 /* Eth HDR */ 86 83 memcpy(&ah->av->eth_hdr, &eth, eth_sz); 87 84 memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh)); 88 - if (vlan_enabled) 85 + if (*isvlan) 89 86 ah->av->valid |= OCRDMA_AV_VLAN_VALID; 90 87 ah->av->valid = cpu_to_le32(ah->av->valid); 91 88 return status; ··· 94 91 struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) 95 92 { 96 93 u32 *ahid_addr; 94 + bool isvlan = false; 97 95 int status; 98 96 struct ocrdma_ah *ah; 99 97 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); ··· 131 127 } 132 128 } 133 129 134 - status = set_av_attr(dev, ah, attr, &sgid, pd->id); 130 + status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan); 135 131 if (status) 136 132 goto av_conf_err; 137 133 138 134 /* if pd is for the user process, pass the ah_id to user space */ 139 135 if ((pd->uctx) && (pd->uctx->ah_tbl.va)) { 140 136 ahid_addr = pd->uctx->ah_tbl.va + attr->dlid; 141 - *ahid_addr = ah->id; 137 + *ahid_addr = 0; 138 + *ahid_addr |= ah->id & OCRDMA_AH_ID_MASK; 139 + if (isvlan) 140 + *ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK << 141 + OCRDMA_AH_VLAN_VALID_SHIFT); 142 142 } 143 + 143 144 return &ah->ibah; 144 145 145 146 av_conf_err: ··· 200 191 struct ib_grh *in_grh, 201 192 struct ib_mad *in_mad, struct ib_mad *out_mad) 202 193 { 203 - return IB_MAD_RESULT_SUCCESS; 194 + int status; 195 + struct ocrdma_dev *dev; 196 + 197 + switch (in_mad->mad_hdr.mgmt_class) { 198 + case IB_MGMT_CLASS_PERF_MGMT: 199 + dev = get_ocrdma_dev(ibdev); 200 + if (!ocrdma_pma_counters(dev, out_mad)) 201 + status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 202 + else 203 + status = IB_MAD_RESULT_SUCCESS; 204 + break; 205 + default: 206 + status = IB_MAD_RESULT_SUCCESS; 207 + break; 208 + } 209 + return status; 204 210 }
+6
drivers/infiniband/hw/ocrdma/ocrdma_ah.h
··· 28 28 #ifndef __OCRDMA_AH_H__ 29 29 #define __OCRDMA_AH_H__ 30 30 31 + enum { 32 + OCRDMA_AH_ID_MASK = 0x3FF, 33 + OCRDMA_AH_VLAN_VALID_MASK = 0x01, 34 + OCRDMA_AH_VLAN_VALID_SHIFT = 0x1F 35 + }; 36 + 31 37 struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *); 32 38 int ocrdma_destroy_ah(struct ib_ah *); 33 39 int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);
+271 -41
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
··· 734 734 break; 735 735 } 736 736 737 + if (type < OCRDMA_MAX_ASYNC_ERRORS) 738 + atomic_inc(&dev->async_err_stats[type]); 739 + 737 740 if (qp_event) { 738 741 if (qp->ibqp.event_handler) 739 742 qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context); ··· 834 831 return 0; 835 832 } 836 833 837 - static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, 838 - struct ocrdma_cq *cq) 834 + static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, 835 + struct ocrdma_cq *cq, bool sq) 839 836 { 840 - unsigned long flags; 841 837 struct ocrdma_qp *qp; 842 - bool buddy_cq_found = false; 843 - /* Go through list of QPs in error state which are using this CQ 844 - * and invoke its callback handler to trigger CQE processing for 845 - * error/flushed CQE. It is rare to find more than few entries in 846 - * this list as most consumers stops after getting error CQE. 847 - * List is traversed only once when a matching buddy cq found for a QP. 848 - */ 849 - spin_lock_irqsave(&dev->flush_q_lock, flags); 850 - list_for_each_entry(qp, &cq->sq_head, sq_entry) { 838 + struct list_head *cur; 839 + struct ocrdma_cq *bcq = NULL; 840 + struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head); 841 + 842 + list_for_each(cur, head) { 843 + if (sq) 844 + qp = list_entry(cur, struct ocrdma_qp, sq_entry); 845 + else 846 + qp = list_entry(cur, struct ocrdma_qp, rq_entry); 847 + 851 848 if (qp->srq) 852 849 continue; 853 850 /* if wq and rq share the same cq, than comp_handler ··· 859 856 * if completion came on rq, sq's cq is buddy cq. 860 857 */ 861 858 if (qp->sq_cq == cq) 862 - cq = qp->rq_cq; 859 + bcq = qp->rq_cq; 863 860 else 864 - cq = qp->sq_cq; 865 - buddy_cq_found = true; 866 - break; 861 + bcq = qp->sq_cq; 862 + return bcq; 867 863 } 864 + return NULL; 865 + } 866 + 867 + static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, 868 + struct ocrdma_cq *cq) 869 + { 870 + unsigned long flags; 871 + struct ocrdma_cq *bcq = NULL; 872 + 873 + /* Go through list of QPs in error state which are using this CQ 874 + * and invoke its callback handler to trigger CQE processing for 875 + * error/flushed CQE. It is rare to find more than few entries in 876 + * this list as most consumers stops after getting error CQE. 877 + * List is traversed only once when a matching buddy cq found for a QP. 878 + */ 879 + spin_lock_irqsave(&dev->flush_q_lock, flags); 880 + /* Check if buddy CQ is present. 881 + * true - Check for SQ CQ 882 + * false - Check for RQ CQ 883 + */ 884 + bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true); 885 + if (bcq == NULL) 886 + bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false); 868 887 spin_unlock_irqrestore(&dev->flush_q_lock, flags); 869 - if (buddy_cq_found == false) 870 - return; 871 - if (cq->ibcq.comp_handler) { 872 - spin_lock_irqsave(&cq->comp_handler_lock, flags); 873 - (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); 874 - spin_unlock_irqrestore(&cq->comp_handler_lock, flags); 888 + 889 + /* if there is valid buddy cq, look for its completion handler */ 890 + if (bcq && bcq->ibcq.comp_handler) { 891 + spin_lock_irqsave(&bcq->comp_handler_lock, flags); 892 + (*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context); 893 + spin_unlock_irqrestore(&bcq->comp_handler_lock, flags); 875 894 } 876 895 } 877 896 ··· 960 935 961 936 } while (budget); 962 937 938 + eq->aic_obj.eq_intr_cnt++; 963 939 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0); 964 940 return IRQ_HANDLED; 965 941 } ··· 1076 1050 attr->max_pd = 1077 1051 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >> 1078 1052 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT; 1053 + attr->max_dpp_pds = 1054 + (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >> 1055 + OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET; 1079 1056 attr->max_qp = 1080 1057 (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >> 1081 1058 OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT; ··· 1423 1394 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1424 1395 kfree(cmd); 1425 1396 return status; 1397 + } 1398 + 1399 + 1400 + static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) 1401 + { 1402 + int status = -ENOMEM; 1403 + size_t pd_bitmap_size; 1404 + struct ocrdma_alloc_pd_range *cmd; 1405 + struct ocrdma_alloc_pd_range_rsp *rsp; 1406 + 1407 + /* Pre allocate the DPP PDs */ 1408 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd)); 1409 + if (!cmd) 1410 + return -ENOMEM; 1411 + cmd->pd_count = dev->attr.max_dpp_pds; 1412 + cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP; 1413 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1414 + if (status) 1415 + goto mbx_err; 1416 + rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd; 1417 + 1418 + if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) { 1419 + dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >> 1420 + OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT; 1421 + dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid & 1422 + OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; 1423 + dev->pd_mgr->max_dpp_pd = rsp->pd_count; 1424 + pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long); 1425 + dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size, 1426 + GFP_KERNEL); 1427 + } 1428 + kfree(cmd); 1429 + 1430 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd)); 1431 + if (!cmd) 1432 + return -ENOMEM; 1433 + 1434 + cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds; 1435 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1436 + if (status) 1437 + goto mbx_err; 1438 + rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd; 1439 + if (rsp->pd_count) { 1440 + dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid & 1441 + OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; 1442 + dev->pd_mgr->max_normal_pd = rsp->pd_count; 1443 + pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long); 1444 + dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size, 1445 + GFP_KERNEL); 1446 + } 1447 + 1448 + if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) { 1449 + /* Enable PD resource manager */ 1450 + dev->pd_mgr->pd_prealloc_valid = true; 1451 + } else { 1452 + return -ENOMEM; 1453 + } 1454 + mbx_err: 1455 + kfree(cmd); 1456 + return status; 1457 + } 1458 + 1459 + static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev) 1460 + { 1461 + struct ocrdma_dealloc_pd_range *cmd; 1462 + 1463 + /* return normal PDs to firmware */ 1464 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd)); 1465 + if (!cmd) 1466 + goto mbx_err; 1467 + 1468 + if (dev->pd_mgr->max_normal_pd) { 1469 + cmd->start_pd_id = dev->pd_mgr->pd_norm_start; 1470 + cmd->pd_count = dev->pd_mgr->max_normal_pd; 1471 + ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1472 + } 1473 + 1474 + if (dev->pd_mgr->max_dpp_pd) { 1475 + kfree(cmd); 1476 + /* return DPP PDs to firmware */ 1477 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, 1478 + sizeof(*cmd)); 1479 + if (!cmd) 1480 + goto mbx_err; 1481 + 1482 + cmd->start_pd_id = dev->pd_mgr->pd_dpp_start; 1483 + cmd->pd_count = dev->pd_mgr->max_dpp_pd; 1484 + ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 1485 + } 1486 + mbx_err: 1487 + kfree(cmd); 1488 + } 1489 + 1490 + void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev) 1491 + { 1492 + int status; 1493 + 1494 + dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr), 1495 + GFP_KERNEL); 1496 + if (!dev->pd_mgr) { 1497 + pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id); 1498 + return; 1499 + } 1500 + status = ocrdma_mbx_alloc_pd_range(dev); 1501 + if (status) { 1502 + pr_err("%s(%d) Unable to initialize PD pool, using default.\n", 1503 + __func__, dev->id); 1504 + } 1505 + } 1506 + 1507 + static void ocrdma_free_pd_pool(struct ocrdma_dev *dev) 1508 + { 1509 + ocrdma_mbx_dealloc_pd_range(dev); 1510 + kfree(dev->pd_mgr->pd_norm_bitmap); 1511 + kfree(dev->pd_mgr->pd_dpp_bitmap); 1512 + kfree(dev->pd_mgr); 1426 1513 } 1427 1514 1428 1515 static int ocrdma_build_q_conf(u32 *num_entries, int entry_size, ··· 2041 1896 { 2042 1897 bool found; 2043 1898 unsigned long flags; 1899 + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 2044 1900 2045 - spin_lock_irqsave(&qp->dev->flush_q_lock, flags); 1901 + spin_lock_irqsave(&dev->flush_q_lock, flags); 2046 1902 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); 2047 1903 if (!found) 2048 1904 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head); ··· 2052 1906 if (!found) 2053 1907 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head); 2054 1908 } 2055 - spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags); 1909 + spin_unlock_irqrestore(&dev->flush_q_lock, flags); 2056 1910 } 2057 1911 2058 1912 static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp) ··· 2118 1972 int status; 2119 1973 u32 len, hw_pages, hw_page_size; 2120 1974 dma_addr_t pa; 2121 - struct ocrdma_dev *dev = qp->dev; 1975 + struct ocrdma_pd *pd = qp->pd; 1976 + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); 2122 1977 struct pci_dev *pdev = dev->nic_info.pdev; 2123 1978 u32 max_wqe_allocated; 2124 1979 u32 max_sges = attrs->cap.max_send_sge; ··· 2174 2027 int status; 2175 2028 u32 len, hw_pages, hw_page_size; 2176 2029 dma_addr_t pa = 0; 2177 - struct ocrdma_dev *dev = qp->dev; 2030 + struct ocrdma_pd *pd = qp->pd; 2031 + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); 2178 2032 struct pci_dev *pdev = dev->nic_info.pdev; 2179 2033 u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1; 2180 2034 ··· 2234 2086 static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd, 2235 2087 struct ocrdma_qp *qp) 2236 2088 { 2237 - struct ocrdma_dev *dev = qp->dev; 2089 + struct ocrdma_pd *pd = qp->pd; 2090 + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); 2238 2091 struct pci_dev *pdev = dev->nic_info.pdev; 2239 2092 dma_addr_t pa = 0; 2240 2093 int ird_page_size = dev->attr.ird_page_size; ··· 2306 2157 { 2307 2158 int status = -ENOMEM; 2308 2159 u32 flags = 0; 2309 - struct ocrdma_dev *dev = qp->dev; 2310 2160 struct ocrdma_pd *pd = qp->pd; 2161 + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); 2311 2162 struct pci_dev *pdev = dev->nic_info.pdev; 2312 2163 struct ocrdma_cq *cq; 2313 2164 struct ocrdma_create_qp_req *cmd; ··· 2430 2281 union ib_gid sgid, zgid; 2431 2282 u32 vlan_id; 2432 2283 u8 mac_addr[6]; 2284 + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 2433 2285 2434 2286 if ((ah_attr->ah_flags & IB_AH_GRH) == 0) 2435 2287 return -EINVAL; 2436 - if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0)) 2437 - ocrdma_init_service_level(qp->dev); 2288 + if (atomic_cmpxchg(&dev->update_sl, 1, 0)) 2289 + ocrdma_init_service_level(dev); 2438 2290 cmd->params.tclass_sq_psn |= 2439 2291 (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT); 2440 2292 cmd->params.rnt_rc_sl_fl |= ··· 2446 2296 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID; 2447 2297 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0], 2448 2298 sizeof(cmd->params.dgid)); 2449 - status = ocrdma_query_gid(&qp->dev->ibdev, 1, 2299 + status = ocrdma_query_gid(&dev->ibdev, 1, 2450 2300 ah_attr->grh.sgid_index, &sgid); 2451 2301 if (status) 2452 2302 return status; ··· 2457 2307 2458 2308 qp->sgid_idx = ah_attr->grh.sgid_index; 2459 2309 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid)); 2460 - ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]); 2310 + status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]); 2311 + if (status) 2312 + return status; 2461 2313 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) | 2462 2314 (mac_addr[2] << 16) | (mac_addr[3] << 24); 2463 2315 /* convert them to LE format. */ ··· 2472 2320 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; 2473 2321 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; 2474 2322 cmd->params.rnt_rc_sl_fl |= 2475 - (qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; 2323 + (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; 2476 2324 } 2477 2325 return 0; 2478 2326 } ··· 2482 2330 struct ib_qp_attr *attrs, int attr_mask) 2483 2331 { 2484 2332 int status = 0; 2333 + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 2485 2334 2486 2335 if (attr_mask & IB_QP_PKEY_INDEX) { 2487 2336 cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index & ··· 2500 2347 return status; 2501 2348 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) { 2502 2349 /* set the default mac address for UD, GSI QPs */ 2503 - cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] | 2504 - (qp->dev->nic_info.mac_addr[1] << 8) | 2505 - (qp->dev->nic_info.mac_addr[2] << 16) | 2506 - (qp->dev->nic_info.mac_addr[3] << 24); 2507 - cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] | 2508 - (qp->dev->nic_info.mac_addr[5] << 8); 2350 + cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] | 2351 + (dev->nic_info.mac_addr[1] << 8) | 2352 + (dev->nic_info.mac_addr[2] << 16) | 2353 + (dev->nic_info.mac_addr[3] << 24); 2354 + cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] | 2355 + (dev->nic_info.mac_addr[5] << 8); 2509 2356 } 2510 2357 if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) && 2511 2358 attrs->en_sqd_async_notify) { ··· 2562 2409 cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID; 2563 2410 } 2564 2411 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 2565 - if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) { 2412 + if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) { 2566 2413 status = -EINVAL; 2567 2414 goto pmtu_err; 2568 2415 } ··· 2570 2417 cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID; 2571 2418 } 2572 2419 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 2573 - if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) { 2420 + if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) { 2574 2421 status = -EINVAL; 2575 2422 goto pmtu_err; 2576 2423 } ··· 3023 2870 return status; 3024 2871 } 3025 2872 2873 + static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq, 2874 + int num) 2875 + { 2876 + int i, status = -ENOMEM; 2877 + struct ocrdma_modify_eqd_req *cmd; 2878 + 2879 + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd)); 2880 + if (!cmd) 2881 + return status; 2882 + 2883 + ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY, 2884 + OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); 2885 + 2886 + cmd->cmd.num_eq = num; 2887 + for (i = 0; i < num; i++) { 2888 + cmd->cmd.set_eqd[i].eq_id = eq[i].q.id; 2889 + cmd->cmd.set_eqd[i].phase = 0; 2890 + cmd->cmd.set_eqd[i].delay_multiplier = 2891 + (eq[i].aic_obj.prev_eqd * 65)/100; 2892 + } 2893 + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); 2894 + if (status) 2895 + goto mbx_err; 2896 + mbx_err: 2897 + kfree(cmd); 2898 + return status; 2899 + } 2900 + 2901 + static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq, 2902 + int num) 2903 + { 2904 + int num_eqs, i = 0; 2905 + if (num > 8) { 2906 + while (num) { 2907 + num_eqs = min(num, 8); 2908 + ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs); 2909 + i += num_eqs; 2910 + num -= num_eqs; 2911 + } 2912 + } else { 2913 + ocrdma_mbx_modify_eqd(dev, eq, num); 2914 + } 2915 + return 0; 2916 + } 2917 + 2918 + void ocrdma_eqd_set_task(struct work_struct *work) 2919 + { 2920 + struct ocrdma_dev *dev = 2921 + container_of(work, struct ocrdma_dev, eqd_work.work); 2922 + struct ocrdma_eq *eq = 0; 2923 + int i, num = 0, status = -EINVAL; 2924 + u64 eq_intr; 2925 + 2926 + for (i = 0; i < dev->eq_cnt; i++) { 2927 + eq = &dev->eq_tbl[i]; 2928 + if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) { 2929 + eq_intr = eq->aic_obj.eq_intr_cnt - 2930 + eq->aic_obj.prev_eq_intr_cnt; 2931 + if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) && 2932 + (eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) { 2933 + eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD; 2934 + num++; 2935 + } else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) && 2936 + (eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) { 2937 + eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD; 2938 + num++; 2939 + } 2940 + } 2941 + eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt; 2942 + } 2943 + 2944 + if (num) 2945 + status = ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num); 2946 + schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000)); 2947 + } 2948 + 3026 2949 int ocrdma_init_hw(struct ocrdma_dev *dev) 3027 2950 { 3028 2951 int status; ··· 3144 2915 3145 2916 void ocrdma_cleanup_hw(struct ocrdma_dev *dev) 3146 2917 { 2918 + ocrdma_free_pd_pool(dev); 3147 2919 ocrdma_mbx_delete_ah_tbl(dev); 3148 2920 3149 2921 /* cleanup the eqs */
+2
drivers/infiniband/hw/ocrdma/ocrdma_hw.h
··· 136 136 int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset); 137 137 char *port_speed_string(struct ocrdma_dev *dev); 138 138 void ocrdma_init_service_level(struct ocrdma_dev *); 139 + void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev); 140 + void ocrdma_free_pd_range(struct ocrdma_dev *dev); 139 141 140 142 #endif /* __OCRDMA_HW_H__ */
+9 -3
drivers/infiniband/hw/ocrdma/ocrdma_main.c
··· 239 239 240 240 dev->ibdev.node_type = RDMA_NODE_IB_CA; 241 241 dev->ibdev.phys_port_cnt = 1; 242 - dev->ibdev.num_comp_vectors = 1; 242 + dev->ibdev.num_comp_vectors = dev->eq_cnt; 243 243 244 244 /* mandatory verbs. */ 245 245 dev->ibdev.query_device = ocrdma_query_device; ··· 328 328 dev->stag_arr = kzalloc(sizeof(u64) * OCRDMA_MAX_STAG, GFP_KERNEL); 329 329 if (dev->stag_arr == NULL) 330 330 goto alloc_err; 331 + 332 + ocrdma_alloc_pd_pool(dev); 331 333 332 334 spin_lock_init(&dev->av_tbl.lock); 333 335 spin_lock_init(&dev->flush_q_lock); ··· 493 491 spin_unlock(&ocrdma_devlist_lock); 494 492 /* Init stats */ 495 493 ocrdma_add_port_stats(dev); 494 + /* Interrupt Moderation */ 495 + INIT_DELAYED_WORK(&dev->eqd_work, ocrdma_eqd_set_task); 496 + schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000)); 496 497 497 498 pr_info("%s %s: %s \"%s\" port %d\n", 498 499 dev_name(&dev->nic_info.pdev->dev), hca_name(dev), ··· 533 528 /* first unregister with stack to stop all the active traffic 534 529 * of the registered clients. 535 530 */ 536 - ocrdma_rem_port_stats(dev); 531 + cancel_delayed_work_sync(&dev->eqd_work); 537 532 ocrdma_remove_sysfiles(dev); 538 - 539 533 ib_unregister_device(&dev->ibdev); 534 + 535 + ocrdma_rem_port_stats(dev); 540 536 541 537 spin_lock(&ocrdma_devlist_lock); 542 538 list_del_rcu(&dev->entry);
+65 -3
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
··· 75 75 OCRDMA_CMD_DESTROY_RBQ = 26, 76 76 77 77 OCRDMA_CMD_GET_RDMA_STATS = 27, 78 + OCRDMA_CMD_ALLOC_PD_RANGE = 28, 79 + OCRDMA_CMD_DEALLOC_PD_RANGE = 29, 78 80 79 81 OCRDMA_CMD_MAX 80 82 }; ··· 89 87 OCRDMA_CMD_CREATE_MQ = 21, 90 88 OCRDMA_CMD_GET_CTRL_ATTRIBUTES = 32, 91 89 OCRDMA_CMD_GET_FW_VER = 35, 90 + OCRDMA_CMD_MODIFY_EQ_DELAY = 41, 92 91 OCRDMA_CMD_DELETE_MQ = 53, 93 92 OCRDMA_CMD_DELETE_CQ = 54, 94 93 OCRDMA_CMD_DELETE_EQ = 55, ··· 104 101 QTYPE_MCCQ = 3 105 102 }; 106 103 107 - #define OCRDMA_MAX_SGID 8 104 + #define OCRDMA_MAX_SGID 16 108 105 109 106 #define OCRDMA_MAX_QP 2048 110 107 #define OCRDMA_MAX_CQ 2048 ··· 317 314 318 315 #define OCRDMA_EQ_MINOR_OTHER 0x1 319 316 317 + struct ocrmda_set_eqd { 318 + u32 eq_id; 319 + u32 phase; 320 + u32 delay_multiplier; 321 + }; 322 + 323 + struct ocrdma_modify_eqd_cmd { 324 + struct ocrdma_mbx_hdr req; 325 + u32 num_eq; 326 + struct ocrmda_set_eqd set_eqd[8]; 327 + } __packed; 328 + 329 + struct ocrdma_modify_eqd_req { 330 + struct ocrdma_mqe_hdr hdr; 331 + struct ocrdma_modify_eqd_cmd cmd; 332 + }; 333 + 334 + 335 + struct ocrdma_modify_eq_delay_rsp { 336 + struct ocrdma_mbx_rsp hdr; 337 + u32 rsvd0; 338 + } __packed; 339 + 320 340 enum { 321 341 OCRDMA_MCQE_STATUS_SHIFT = 0, 322 342 OCRDMA_MCQE_STATUS_MASK = 0xFFFF, ··· 467 441 OCRDMA_DEVICE_FATAL_EVENT = 0x08, 468 442 OCRDMA_SRQCAT_ERROR = 0x0E, 469 443 OCRDMA_SRQ_LIMIT_EVENT = 0x0F, 470 - OCRDMA_QP_LAST_WQE_EVENT = 0x10 444 + OCRDMA_QP_LAST_WQE_EVENT = 0x10, 445 + 446 + OCRDMA_MAX_ASYNC_ERRORS 471 447 }; 472 448 473 449 /* mailbox command request and responses */ ··· 1325 1297 struct ocrdma_mbx_rsp rsp; 1326 1298 }; 1327 1299 1300 + struct ocrdma_alloc_pd_range { 1301 + struct ocrdma_mqe_hdr hdr; 1302 + struct ocrdma_mbx_hdr req; 1303 + u32 enable_dpp_rsvd; 1304 + u32 pd_count; 1305 + }; 1306 + 1307 + struct ocrdma_alloc_pd_range_rsp { 1308 + struct ocrdma_mqe_hdr hdr; 1309 + struct ocrdma_mbx_rsp rsp; 1310 + u32 dpp_page_pdid; 1311 + u32 pd_count; 1312 + }; 1313 + 1314 + enum { 1315 + OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK = 0xFFFF, 1316 + }; 1317 + 1318 + struct ocrdma_dealloc_pd_range { 1319 + struct ocrdma_mqe_hdr hdr; 1320 + struct ocrdma_mbx_hdr req; 1321 + u32 start_pd_id; 1322 + u32 pd_count; 1323 + }; 1324 + 1325 + struct ocrdma_dealloc_pd_range_rsp { 1326 + struct ocrdma_mqe_hdr hdr; 1327 + struct ocrdma_mbx_hdr req; 1328 + u32 rsvd; 1329 + }; 1330 + 1328 1331 enum { 1329 1332 OCRDMA_ADDR_CHECK_ENABLE = 1, 1330 1333 OCRDMA_ADDR_CHECK_DISABLE = 0 ··· 1656 1597 OCRDMA_CQE_INV_EEC_STATE_ERR, 1657 1598 OCRDMA_CQE_FATAL_ERR, 1658 1599 OCRDMA_CQE_RESP_TIMEOUT_ERR, 1659 - OCRDMA_CQE_GENERAL_ERR 1600 + OCRDMA_CQE_GENERAL_ERR, 1601 + 1602 + OCRDMA_MAX_CQE_ERR 1660 1603 }; 1661 1604 1662 1605 enum { ··· 1734 1673 OCRDMA_FLAG_FENCE_R = 0x8, 1735 1674 OCRDMA_FLAG_SOLICIT = 0x10, 1736 1675 OCRDMA_FLAG_IMM = 0x20, 1676 + OCRDMA_FLAG_AH_VLAN_PR = 0x40, 1737 1677 1738 1678 /* Stag flags */ 1739 1679 OCRDMA_LKEY_FLAG_LOCAL_WR = 0x1,
+241
drivers/infiniband/hw/ocrdma/ocrdma_stats.c
··· 26 26 *******************************************************************/ 27 27 28 28 #include <rdma/ib_addr.h> 29 + #include <rdma/ib_pma.h> 29 30 #include "ocrdma_stats.h" 30 31 31 32 static struct dentry *ocrdma_dbgfs_dir; ··· 250 249 return stats; 251 250 } 252 251 252 + static u64 ocrdma_sysfs_rcv_pkts(struct ocrdma_dev *dev) 253 + { 254 + struct ocrdma_rdma_stats_resp *rdma_stats = 255 + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 256 + struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats; 257 + 258 + return convert_to_64bit(rx_stats->roce_frames_lo, 259 + rx_stats->roce_frames_hi) + (u64)rx_stats->roce_frame_icrc_drops 260 + + (u64)rx_stats->roce_frame_payload_len_drops; 261 + } 262 + 263 + static u64 ocrdma_sysfs_rcv_data(struct ocrdma_dev *dev) 264 + { 265 + struct ocrdma_rdma_stats_resp *rdma_stats = 266 + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 267 + struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats; 268 + 269 + return (convert_to_64bit(rx_stats->roce_frame_bytes_lo, 270 + rx_stats->roce_frame_bytes_hi))/4; 271 + } 272 + 253 273 static char *ocrdma_tx_stats(struct ocrdma_dev *dev) 254 274 { 255 275 char *stats = dev->stats_mem.debugfs_mem, *pcur; ··· 312 290 (u64)tx_stats->ack_timeouts); 313 291 314 292 return stats; 293 + } 294 + 295 + static u64 ocrdma_sysfs_xmit_pkts(struct ocrdma_dev *dev) 296 + { 297 + struct ocrdma_rdma_stats_resp *rdma_stats = 298 + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 299 + struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats; 300 + 301 + return (convert_to_64bit(tx_stats->send_pkts_lo, 302 + tx_stats->send_pkts_hi) + 303 + convert_to_64bit(tx_stats->write_pkts_lo, tx_stats->write_pkts_hi) + 304 + convert_to_64bit(tx_stats->read_pkts_lo, tx_stats->read_pkts_hi) + 305 + convert_to_64bit(tx_stats->read_rsp_pkts_lo, 306 + tx_stats->read_rsp_pkts_hi) + 307 + convert_to_64bit(tx_stats->ack_pkts_lo, tx_stats->ack_pkts_hi)); 308 + } 309 + 310 + static u64 ocrdma_sysfs_xmit_data(struct ocrdma_dev *dev) 311 + { 312 + struct ocrdma_rdma_stats_resp *rdma_stats = 313 + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 314 + struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats; 315 + 316 + return (convert_to_64bit(tx_stats->send_bytes_lo, 317 + tx_stats->send_bytes_hi) + 318 + convert_to_64bit(tx_stats->write_bytes_lo, 319 + tx_stats->write_bytes_hi) + 320 + convert_to_64bit(tx_stats->read_req_bytes_lo, 321 + tx_stats->read_req_bytes_hi) + 322 + convert_to_64bit(tx_stats->read_rsp_bytes_lo, 323 + tx_stats->read_rsp_bytes_hi))/4; 315 324 } 316 325 317 326 static char *ocrdma_wqe_stats(struct ocrdma_dev *dev) ··· 485 432 return dev->stats_mem.debugfs_mem; 486 433 } 487 434 435 + static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev) 436 + { 437 + char *stats = dev->stats_mem.debugfs_mem, *pcur; 438 + 439 + 440 + memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); 441 + 442 + pcur = stats; 443 + pcur += ocrdma_add_stat(stats, pcur, "async_cq_err", 444 + (u64)(dev->async_err_stats 445 + [OCRDMA_CQ_ERROR].counter)); 446 + pcur += ocrdma_add_stat(stats, pcur, "async_cq_overrun_err", 447 + (u64)dev->async_err_stats 448 + [OCRDMA_CQ_OVERRUN_ERROR].counter); 449 + pcur += ocrdma_add_stat(stats, pcur, "async_cq_qpcat_err", 450 + (u64)dev->async_err_stats 451 + [OCRDMA_CQ_QPCAT_ERROR].counter); 452 + pcur += ocrdma_add_stat(stats, pcur, "async_qp_access_err", 453 + (u64)dev->async_err_stats 454 + [OCRDMA_QP_ACCESS_ERROR].counter); 455 + pcur += ocrdma_add_stat(stats, pcur, "async_qp_commm_est_evt", 456 + (u64)dev->async_err_stats 457 + [OCRDMA_QP_COMM_EST_EVENT].counter); 458 + pcur += ocrdma_add_stat(stats, pcur, "async_sq_drained_evt", 459 + (u64)dev->async_err_stats 460 + [OCRDMA_SQ_DRAINED_EVENT].counter); 461 + pcur += ocrdma_add_stat(stats, pcur, "async_dev_fatal_evt", 462 + (u64)dev->async_err_stats 463 + [OCRDMA_DEVICE_FATAL_EVENT].counter); 464 + pcur += ocrdma_add_stat(stats, pcur, "async_srqcat_err", 465 + (u64)dev->async_err_stats 466 + [OCRDMA_SRQCAT_ERROR].counter); 467 + pcur += ocrdma_add_stat(stats, pcur, "async_srq_limit_evt", 468 + (u64)dev->async_err_stats 469 + [OCRDMA_SRQ_LIMIT_EVENT].counter); 470 + pcur += ocrdma_add_stat(stats, pcur, "async_qp_last_wqe_evt", 471 + (u64)dev->async_err_stats 472 + [OCRDMA_QP_LAST_WQE_EVENT].counter); 473 + 474 + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_len_err", 475 + (u64)dev->cqe_err_stats 476 + [OCRDMA_CQE_LOC_LEN_ERR].counter); 477 + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_qp_op_err", 478 + (u64)dev->cqe_err_stats 479 + [OCRDMA_CQE_LOC_QP_OP_ERR].counter); 480 + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_eec_op_err", 481 + (u64)dev->cqe_err_stats 482 + [OCRDMA_CQE_LOC_EEC_OP_ERR].counter); 483 + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_prot_err", 484 + (u64)dev->cqe_err_stats 485 + [OCRDMA_CQE_LOC_PROT_ERR].counter); 486 + pcur += ocrdma_add_stat(stats, pcur, "cqe_wr_flush_err", 487 + (u64)dev->cqe_err_stats 488 + [OCRDMA_CQE_WR_FLUSH_ERR].counter); 489 + pcur += ocrdma_add_stat(stats, pcur, "cqe_mw_bind_err", 490 + (u64)dev->cqe_err_stats 491 + [OCRDMA_CQE_MW_BIND_ERR].counter); 492 + pcur += ocrdma_add_stat(stats, pcur, "cqe_bad_resp_err", 493 + (u64)dev->cqe_err_stats 494 + [OCRDMA_CQE_BAD_RESP_ERR].counter); 495 + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_access_err", 496 + (u64)dev->cqe_err_stats 497 + [OCRDMA_CQE_LOC_ACCESS_ERR].counter); 498 + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_req_err", 499 + (u64)dev->cqe_err_stats 500 + [OCRDMA_CQE_REM_INV_REQ_ERR].counter); 501 + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_access_err", 502 + (u64)dev->cqe_err_stats 503 + [OCRDMA_CQE_REM_ACCESS_ERR].counter); 504 + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_op_err", 505 + (u64)dev->cqe_err_stats 506 + [OCRDMA_CQE_REM_OP_ERR].counter); 507 + pcur += ocrdma_add_stat(stats, pcur, "cqe_retry_exc_err", 508 + (u64)dev->cqe_err_stats 509 + [OCRDMA_CQE_RETRY_EXC_ERR].counter); 510 + pcur += ocrdma_add_stat(stats, pcur, "cqe_rnr_retry_exc_err", 511 + (u64)dev->cqe_err_stats 512 + [OCRDMA_CQE_RNR_RETRY_EXC_ERR].counter); 513 + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_rdd_viol_err", 514 + (u64)dev->cqe_err_stats 515 + [OCRDMA_CQE_LOC_RDD_VIOL_ERR].counter); 516 + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_rd_req_err", 517 + (u64)dev->cqe_err_stats 518 + [OCRDMA_CQE_REM_INV_RD_REQ_ERR].counter); 519 + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_abort_err", 520 + (u64)dev->cqe_err_stats 521 + [OCRDMA_CQE_REM_ABORT_ERR].counter); 522 + pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eecn_err", 523 + (u64)dev->cqe_err_stats 524 + [OCRDMA_CQE_INV_EECN_ERR].counter); 525 + pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eec_state_err", 526 + (u64)dev->cqe_err_stats 527 + [OCRDMA_CQE_INV_EEC_STATE_ERR].counter); 528 + pcur += ocrdma_add_stat(stats, pcur, "cqe_fatal_err", 529 + (u64)dev->cqe_err_stats 530 + [OCRDMA_CQE_FATAL_ERR].counter); 531 + pcur += ocrdma_add_stat(stats, pcur, "cqe_resp_timeout_err", 532 + (u64)dev->cqe_err_stats 533 + [OCRDMA_CQE_RESP_TIMEOUT_ERR].counter); 534 + pcur += ocrdma_add_stat(stats, pcur, "cqe_general_err", 535 + (u64)dev->cqe_err_stats 536 + [OCRDMA_CQE_GENERAL_ERR].counter); 537 + return stats; 538 + } 539 + 488 540 static void ocrdma_update_stats(struct ocrdma_dev *dev) 489 541 { 490 542 ulong now = jiffies, secs; 491 543 int status = 0; 544 + struct ocrdma_rdma_stats_resp *rdma_stats = 545 + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; 546 + struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats; 492 547 493 548 secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U; 494 549 if (secs) { ··· 605 444 if (status) 606 445 pr_err("%s: stats mbox failed with status = %d\n", 607 446 __func__, status); 447 + /* Update PD counters from PD resource manager */ 448 + if (dev->pd_mgr->pd_prealloc_valid) { 449 + rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_count; 450 + rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_count; 451 + /* Threshold stata*/ 452 + rsrc_stats = &rdma_stats->th_rsrc_stats; 453 + rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_thrsh; 454 + rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_thrsh; 455 + } 608 456 dev->last_stats_time = jiffies; 609 457 } 458 + } 459 + 460 + static ssize_t ocrdma_dbgfs_ops_write(struct file *filp, 461 + const char __user *buffer, 462 + size_t count, loff_t *ppos) 463 + { 464 + char tmp_str[32]; 465 + long reset; 466 + int status = 0; 467 + struct ocrdma_stats *pstats = filp->private_data; 468 + struct ocrdma_dev *dev = pstats->dev; 469 + 470 + if (count > 32) 471 + goto err; 472 + 473 + if (copy_from_user(tmp_str, buffer, count)) 474 + goto err; 475 + 476 + tmp_str[count-1] = '\0'; 477 + if (kstrtol(tmp_str, 10, &reset)) 478 + goto err; 479 + 480 + switch (pstats->type) { 481 + case OCRDMA_RESET_STATS: 482 + if (reset) { 483 + status = ocrdma_mbx_rdma_stats(dev, true); 484 + if (status) { 485 + pr_err("Failed to reset stats = %d", status); 486 + goto err; 487 + } 488 + } 489 + break; 490 + default: 491 + goto err; 492 + } 493 + 494 + return count; 495 + err: 496 + return -EFAULT; 497 + } 498 + 499 + int ocrdma_pma_counters(struct ocrdma_dev *dev, 500 + struct ib_mad *out_mad) 501 + { 502 + struct ib_pma_portcounters *pma_cnt; 503 + 504 + memset(out_mad->data, 0, sizeof out_mad->data); 505 + pma_cnt = (void *)(out_mad->data + 40); 506 + ocrdma_update_stats(dev); 507 + 508 + pma_cnt->port_xmit_data = cpu_to_be32(ocrdma_sysfs_xmit_data(dev)); 509 + pma_cnt->port_rcv_data = cpu_to_be32(ocrdma_sysfs_rcv_data(dev)); 510 + pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev)); 511 + pma_cnt->port_rcv_packets = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev)); 512 + return 0; 610 513 } 611 514 612 515 static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer, ··· 717 492 case OCRDMA_RX_DBG_STATS: 718 493 data = ocrdma_rx_dbg_stats(dev); 719 494 break; 495 + case OCRDMA_DRV_STATS: 496 + data = ocrdma_driver_dbg_stats(dev); 497 + break; 720 498 721 499 default: 722 500 status = -EFAULT; ··· 742 514 .owner = THIS_MODULE, 743 515 .open = simple_open, 744 516 .read = ocrdma_dbgfs_ops_read, 517 + .write = ocrdma_dbgfs_ops_write, 745 518 }; 746 519 747 520 void ocrdma_add_port_stats(struct ocrdma_dev *dev) ··· 809 580 dev->rx_dbg_stats.dev = dev; 810 581 if (!debugfs_create_file("rx_dbg_stats", S_IRUSR, dev->dir, 811 582 &dev->rx_dbg_stats, &ocrdma_dbg_ops)) 583 + goto err; 584 + 585 + dev->driver_stats.type = OCRDMA_DRV_STATS; 586 + dev->driver_stats.dev = dev; 587 + if (!debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir, 588 + &dev->driver_stats, &ocrdma_dbg_ops)) 589 + goto err; 590 + 591 + dev->reset_stats.type = OCRDMA_RESET_STATS; 592 + dev->reset_stats.dev = dev; 593 + if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir, 594 + &dev->reset_stats, &ocrdma_dbg_ops)) 812 595 goto err; 813 596 814 597 /* Now create dma_mem for stats mbx command */
+5 -1
drivers/infiniband/hw/ocrdma/ocrdma_stats.h
··· 43 43 OCRDMA_RXQP_ERRSTATS, 44 44 OCRDMA_TXQP_ERRSTATS, 45 45 OCRDMA_TX_DBG_STATS, 46 - OCRDMA_RX_DBG_STATS 46 + OCRDMA_RX_DBG_STATS, 47 + OCRDMA_DRV_STATS, 48 + OCRDMA_RESET_STATS 47 49 }; 48 50 49 51 void ocrdma_rem_debugfs(void); 50 52 void ocrdma_init_debugfs(void); 51 53 void ocrdma_rem_port_stats(struct ocrdma_dev *dev); 52 54 void ocrdma_add_port_stats(struct ocrdma_dev *dev); 55 + int ocrdma_pma_counters(struct ocrdma_dev *dev, 56 + struct ib_mad *out_mad); 53 57 54 58 #endif /* __OCRDMA_STATS_H__ */
+147 -36
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
··· 53 53 54 54 dev = get_ocrdma_dev(ibdev); 55 55 memset(sgid, 0, sizeof(*sgid)); 56 - if (index > OCRDMA_MAX_SGID) 56 + if (index >= OCRDMA_MAX_SGID) 57 57 return -EINVAL; 58 58 59 59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); ··· 253 253 return found; 254 254 } 255 255 256 + 257 + static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool) 258 + { 259 + u16 pd_bitmap_idx = 0; 260 + const unsigned long *pd_bitmap; 261 + 262 + if (dpp_pool) { 263 + pd_bitmap = dev->pd_mgr->pd_dpp_bitmap; 264 + pd_bitmap_idx = find_first_zero_bit(pd_bitmap, 265 + dev->pd_mgr->max_dpp_pd); 266 + __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap); 267 + dev->pd_mgr->pd_dpp_count++; 268 + if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh) 269 + dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count; 270 + } else { 271 + pd_bitmap = dev->pd_mgr->pd_norm_bitmap; 272 + pd_bitmap_idx = find_first_zero_bit(pd_bitmap, 273 + dev->pd_mgr->max_normal_pd); 274 + __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap); 275 + dev->pd_mgr->pd_norm_count++; 276 + if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh) 277 + dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count; 278 + } 279 + return pd_bitmap_idx; 280 + } 281 + 282 + static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id, 283 + bool dpp_pool) 284 + { 285 + u16 pd_count; 286 + u16 pd_bit_index; 287 + 288 + pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count : 289 + dev->pd_mgr->pd_norm_count; 290 + if (pd_count == 0) 291 + return -EINVAL; 292 + 293 + if (dpp_pool) { 294 + pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start; 295 + if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) { 296 + return -EINVAL; 297 + } else { 298 + __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap); 299 + dev->pd_mgr->pd_dpp_count--; 300 + } 301 + } else { 302 + pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start; 303 + if (pd_bit_index >= dev->pd_mgr->max_normal_pd) { 304 + return -EINVAL; 305 + } else { 306 + __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap); 307 + dev->pd_mgr->pd_norm_count--; 308 + } 309 + } 310 + 311 + return 0; 312 + } 313 + 314 + static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id, 315 + bool dpp_pool) 316 + { 317 + int status; 318 + 319 + mutex_lock(&dev->dev_lock); 320 + status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool); 321 + mutex_unlock(&dev->dev_lock); 322 + return status; 323 + } 324 + 325 + static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd) 326 + { 327 + u16 pd_idx = 0; 328 + int status = 0; 329 + 330 + mutex_lock(&dev->dev_lock); 331 + if (pd->dpp_enabled) { 332 + /* try allocating DPP PD, if not available then normal PD */ 333 + if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) { 334 + pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true); 335 + pd->id = dev->pd_mgr->pd_dpp_start + pd_idx; 336 + pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx; 337 + } else if (dev->pd_mgr->pd_norm_count < 338 + dev->pd_mgr->max_normal_pd) { 339 + pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); 340 + pd->id = dev->pd_mgr->pd_norm_start + pd_idx; 341 + pd->dpp_enabled = false; 342 + } else { 343 + status = -EINVAL; 344 + } 345 + } else { 346 + if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) { 347 + pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); 348 + pd->id = dev->pd_mgr->pd_norm_start + pd_idx; 349 + } else { 350 + status = -EINVAL; 351 + } 352 + } 353 + mutex_unlock(&dev->dev_lock); 354 + return status; 355 + } 356 + 256 357 static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, 257 358 struct ocrdma_ucontext *uctx, 258 359 struct ib_udata *udata) ··· 371 270 pd->num_dpp_qp = 372 271 pd->dpp_enabled ? (dev->nic_info.db_page_size / 373 272 dev->attr.wqe_size) : 0; 273 + } 274 + 275 + if (dev->pd_mgr->pd_prealloc_valid) { 276 + status = ocrdma_get_pd_num(dev, pd); 277 + return (status == 0) ? pd : ERR_PTR(status); 374 278 } 375 279 376 280 retry: ··· 405 299 { 406 300 int status = 0; 407 301 408 - status = ocrdma_mbx_dealloc_pd(dev, pd); 302 + if (dev->pd_mgr->pd_prealloc_valid) 303 + status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled); 304 + else 305 + status = ocrdma_mbx_dealloc_pd(dev, pd); 306 + 409 307 kfree(pd); 410 308 return status; 411 309 } ··· 435 325 436 326 static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) 437 327 { 438 - int status = 0; 439 328 struct ocrdma_pd *pd = uctx->cntxt_pd; 440 329 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); 441 330 ··· 443 334 __func__, dev->id, pd->id); 444 335 } 445 336 uctx->cntxt_pd = NULL; 446 - status = _ocrdma_dealloc_pd(dev, pd); 447 - return status; 337 + (void)_ocrdma_dealloc_pd(dev, pd); 338 + return 0; 448 339 } 449 340 450 341 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx) ··· 678 569 if (is_uctx_pd) { 679 570 ocrdma_release_ucontext_pd(uctx); 680 571 } else { 681 - status = ocrdma_mbx_dealloc_pd(dev, pd); 572 + status = _ocrdma_dealloc_pd(dev, pd); 682 573 kfree(pd); 683 574 } 684 575 exit: ··· 946 837 { 947 838 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); 948 839 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device); 949 - int status; 950 840 951 - status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); 841 + (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); 952 842 953 843 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); 954 844 ··· 958 850 959 851 /* Don't stop cleanup, in case FW is unresponsive */ 960 852 if (dev->mqe_ctx.fw_error_state) { 961 - status = 0; 962 853 pr_err("%s(%d) fw not responding.\n", 963 854 __func__, dev->id); 964 855 } 965 - return status; 856 + return 0; 966 857 } 967 858 968 859 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, ··· 1093 986 1094 987 int ocrdma_destroy_cq(struct ib_cq *ibcq) 1095 988 { 1096 - int status; 1097 989 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); 1098 990 struct ocrdma_eq *eq = NULL; 1099 991 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); ··· 1109 1003 synchronize_irq(irq); 1110 1004 ocrdma_flush_cq(cq); 1111 1005 1112 - status = ocrdma_mbx_destroy_cq(dev, cq); 1006 + (void)ocrdma_mbx_destroy_cq(dev, cq); 1113 1007 if (cq->ucontext) { 1114 1008 pdid = cq->ucontext->cntxt_pd->id; 1115 1009 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, ··· 1120 1014 } 1121 1015 1122 1016 kfree(cq); 1123 - return status; 1017 + return 0; 1124 1018 } 1125 1019 1126 1020 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) ··· 1219 1113 int status = 0; 1220 1114 u64 usr_db; 1221 1115 struct ocrdma_create_qp_uresp uresp; 1222 - struct ocrdma_dev *dev = qp->dev; 1223 1116 struct ocrdma_pd *pd = qp->pd; 1117 + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); 1224 1118 1225 1119 memset(&uresp, 0, sizeof(uresp)); 1226 1120 usr_db = dev->nic_info.unmapped_db + ··· 1359 1253 status = -ENOMEM; 1360 1254 goto gen_err; 1361 1255 } 1362 - qp->dev = dev; 1363 1256 ocrdma_set_qp_init_params(qp, pd, attrs); 1364 1257 if (udata == NULL) 1365 1258 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | ··· 1417 1312 enum ib_qp_state old_qps; 1418 1313 1419 1314 qp = get_ocrdma_qp(ibqp); 1420 - dev = qp->dev; 1315 + dev = get_ocrdma_dev(ibqp->device); 1421 1316 if (attr_mask & IB_QP_STATE) 1422 1317 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps); 1423 1318 /* if new and previous states are same hw doesn't need to ··· 1440 1335 enum ib_qp_state old_qps, new_qps; 1441 1336 1442 1337 qp = get_ocrdma_qp(ibqp); 1443 - dev = qp->dev; 1338 + dev = get_ocrdma_dev(ibqp->device); 1444 1339 1445 1340 /* syncronize with multiple context trying to change, retrive qps */ 1446 1341 mutex_lock(&dev->dev_lock); ··· 1507 1402 u32 qp_state; 1508 1403 struct ocrdma_qp_params params; 1509 1404 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); 1510 - struct ocrdma_dev *dev = qp->dev; 1405 + struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device); 1511 1406 1512 1407 memset(&params, 0, sizeof(params)); 1513 1408 mutex_lock(&dev->dev_lock); ··· 1517 1412 goto mbx_err; 1518 1413 if (qp->qp_type == IB_QPT_UD) 1519 1414 qp_attr->qkey = params.qkey; 1520 - qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT); 1521 - qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT); 1522 1415 qp_attr->path_mtu = 1523 1416 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx & 1524 1417 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >> ··· 1571 1468 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); 1572 1469 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >> 1573 1470 OCRDMA_QP_PARAMS_STATE_SHIFT; 1471 + qp_attr->qp_state = get_ibqp_state(qp_state); 1472 + qp_attr->cur_qp_state = qp_attr->qp_state; 1574 1473 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0; 1575 1474 qp_attr->max_dest_rd_atomic = 1576 1475 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT; ··· 1580 1475 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK; 1581 1476 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags & 1582 1477 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0; 1478 + /* Sync driver QP state with FW */ 1479 + ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL); 1583 1480 mbx_err: 1584 1481 return status; 1585 1482 } 1586 1483 1587 - static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx) 1484 + static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx) 1588 1485 { 1589 - int i = idx / 32; 1590 - unsigned int mask = (1 << (idx % 32)); 1486 + unsigned int i = idx / 32; 1487 + u32 mask = (1U << (idx % 32)); 1591 1488 1592 - if (srq->idx_bit_fields[i] & mask) 1593 - srq->idx_bit_fields[i] &= ~mask; 1594 - else 1595 - srq->idx_bit_fields[i] |= mask; 1489 + srq->idx_bit_fields[i] ^= mask; 1596 1490 } 1597 1491 1598 1492 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) ··· 1700 1596 { 1701 1597 int found = false; 1702 1598 unsigned long flags; 1703 - struct ocrdma_dev *dev = qp->dev; 1599 + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 1704 1600 /* sync with any active CQ poll */ 1705 1601 1706 1602 spin_lock_irqsave(&dev->flush_q_lock, flags); ··· 1717 1613 1718 1614 int ocrdma_destroy_qp(struct ib_qp *ibqp) 1719 1615 { 1720 - int status; 1721 1616 struct ocrdma_pd *pd; 1722 1617 struct ocrdma_qp *qp; 1723 1618 struct ocrdma_dev *dev; ··· 1725 1622 unsigned long flags; 1726 1623 1727 1624 qp = get_ocrdma_qp(ibqp); 1728 - dev = qp->dev; 1625 + dev = get_ocrdma_dev(ibqp->device); 1729 1626 1730 1627 attrs.qp_state = IB_QPS_ERR; 1731 1628 pd = qp->pd; ··· 1738 1635 * discarded until the old CQEs are discarded. 1739 1636 */ 1740 1637 mutex_lock(&dev->dev_lock); 1741 - status = ocrdma_mbx_destroy_qp(dev, qp); 1638 + (void) ocrdma_mbx_destroy_qp(dev, qp); 1742 1639 1743 1640 /* 1744 1641 * acquire CQ lock while destroy is in progress, in order to ··· 1773 1670 kfree(qp->wqe_wr_id_tbl); 1774 1671 kfree(qp->rqe_wr_id_tbl); 1775 1672 kfree(qp); 1776 - return status; 1673 + return 0; 1777 1674 } 1778 1675 1779 1676 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq, ··· 1934 1831 else 1935 1832 ud_hdr->qkey = wr->wr.ud.remote_qkey; 1936 1833 ud_hdr->rsvd_ahid = ah->id; 1834 + if (ah->av->valid & OCRDMA_AV_VLAN_VALID) 1835 + hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT); 1937 1836 } 1938 1837 1939 1838 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr, ··· 2112 2007 u64 fbo; 2113 2008 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1); 2114 2009 struct ocrdma_mr *mr; 2010 + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 2115 2011 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr); 2116 2012 2117 2013 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); 2118 2014 2119 - if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr) 2015 + if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr) 2120 2016 return -EINVAL; 2121 2017 2122 2018 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); ··· 2145 2039 fast_reg->size_sge = 2146 2040 get_encoded_page_size(1 << wr->wr.fast_reg.page_shift); 2147 2041 mr = (struct ocrdma_mr *) (unsigned long) 2148 - qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; 2042 + dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; 2149 2043 build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr); 2150 2044 return 0; 2151 2045 } ··· 2218 2112 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); 2219 2113 status = ocrdma_build_write(qp, hdr, wr); 2220 2114 break; 2221 - case IB_WR_RDMA_READ_WITH_INV: 2222 - hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT); 2223 2115 case IB_WR_RDMA_READ: 2224 2116 ocrdma_build_read(qp, hdr, wr); 2225 2117 break; ··· 2588 2484 bool *polled, bool *stop) 2589 2485 { 2590 2486 bool expand; 2487 + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 2591 2488 int status = (le32_to_cpu(cqe->flags_status_srcqpn) & 2592 2489 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; 2490 + if (status < OCRDMA_MAX_CQE_ERR) 2491 + atomic_inc(&dev->cqe_err_stats[status]); 2593 2492 2594 2493 /* when hw sq is empty, but rq is not empty, so we continue 2595 2494 * to keep the cqe in order to get the cq event again. ··· 2711 2604 int status) 2712 2605 { 2713 2606 bool expand; 2607 + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); 2608 + 2609 + if (status < OCRDMA_MAX_CQE_ERR) 2610 + atomic_inc(&dev->cqe_err_stats[status]); 2714 2611 2715 2612 /* when hw_rq is empty, but wq is not empty, so continue 2716 2613 * to keep the cqe to get the cq event again.
+6 -10
drivers/infiniband/hw/qib/qib.h
··· 1082 1082 /* control high-level access to EEPROM */ 1083 1083 struct mutex eep_lock; 1084 1084 uint64_t traffic_wds; 1085 - /* active time is kept in seconds, but logged in hours */ 1086 - atomic_t active_time; 1087 - /* Below are nominal shadow of EEPROM, new since last EEPROM update */ 1088 - uint8_t eep_st_errs[QIB_EEP_LOG_CNT]; 1089 - uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT]; 1090 - uint16_t eep_hrs; 1091 1085 /* 1092 1086 * masks for which bits of errs, hwerrs that cause 1093 1087 * each of the counters to increment. ··· 1303 1309 int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr, 1304 1310 const void *buffer, int len); 1305 1311 void qib_get_eeprom_info(struct qib_devdata *); 1306 - int qib_update_eeprom_log(struct qib_devdata *dd); 1307 - void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr); 1312 + #define qib_inc_eeprom_err(dd, eidx, incr) 1308 1313 void qib_dump_lookup_output_queue(struct qib_devdata *); 1309 1314 void qib_force_pio_avail_update(struct qib_devdata *); 1310 1315 void qib_clear_symerror_on_linkup(unsigned long opaque); ··· 1460 1467 * Flush write combining store buffers (if present) and perform a write 1461 1468 * barrier. 1462 1469 */ 1470 + static inline void qib_flush_wc(void) 1471 + { 1463 1472 #if defined(CONFIG_X86_64) 1464 - #define qib_flush_wc() asm volatile("sfence" : : : "memory") 1473 + asm volatile("sfence" : : : "memory"); 1465 1474 #else 1466 - #define qib_flush_wc() wmb() /* no reorder around wc flush */ 1475 + wmb(); /* no reorder around wc flush */ 1467 1476 #endif 1477 + } 1468 1478 1469 1479 /* global module parameter variables */ 1470 1480 extern unsigned qib_ibmtu;
+2 -2
drivers/infiniband/hw/qib/qib_common.h
··· 257 257 258 258 /* shared memory page for send buffer disarm status */ 259 259 __u64 spi_sendbuf_status; 260 - } __attribute__ ((aligned(8))); 260 + } __aligned(8); 261 261 262 262 /* 263 263 * This version number is given to the driver by the user code during ··· 361 361 */ 362 362 __u64 spu_base_info; 363 363 364 - } __attribute__ ((aligned(8))); 364 + } __aligned(8); 365 365 366 366 /* User commands. */ 367 367
-1
drivers/infiniband/hw/qib/qib_debugfs.c
··· 255 255 DEBUGFS_FILE_CREATE(opcode_stats); 256 256 DEBUGFS_FILE_CREATE(ctx_stats); 257 257 DEBUGFS_FILE_CREATE(qp_stats); 258 - return; 259 258 } 260 259 261 260 void qib_dbg_ibdev_exit(struct qib_ibdev *ibd)
+7 -2
drivers/infiniband/hw/qib/qib_diag.c
··· 85 85 client_pool = dc->next; 86 86 else 87 87 /* None in pool, alloc and init */ 88 - dc = kmalloc(sizeof *dc, GFP_KERNEL); 88 + dc = kmalloc(sizeof(*dc), GFP_KERNEL); 89 89 90 90 if (dc) { 91 91 dc->next = NULL; ··· 257 257 if (dd->userbase) { 258 258 /* If user regs mapped, they are after send, so set limit. */ 259 259 u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase; 260 + 260 261 if (!dd->piovl15base) 261 262 snd_lim = dd->uregbase; 262 263 krb32 = (u32 __iomem *)dd->userbase; ··· 281 280 snd_bottom = dd->pio2k_bufbase; 282 281 if (snd_lim == 0) { 283 282 u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign); 283 + 284 284 snd_lim = snd_bottom + tot2k; 285 285 } 286 286 /* If 4k buffers exist, account for them by bumping ··· 400 398 /* not very efficient, but it works for now */ 401 399 while (reg_addr < reg_end) { 402 400 u64 data; 401 + 403 402 if (copy_from_user(&data, uaddr, sizeof(data))) { 404 403 ret = -EFAULT; 405 404 goto bail; ··· 701 698 702 699 if (!dd || !op) 703 700 return -EINVAL; 704 - olp = vmalloc(sizeof *olp); 701 + olp = vmalloc(sizeof(*olp)); 705 702 if (!olp) { 706 703 pr_err("vmalloc for observer failed\n"); 707 704 return -ENOMEM; ··· 799 796 op = diag_get_observer(dd, *off); 800 797 if (op) { 801 798 u32 offset = *off; 799 + 802 800 ret = op->hook(dd, op, offset, &data64, 0, use_32); 803 801 } 804 802 /* ··· 877 873 if (count == 4 || count == 8) { 878 874 u64 data64; 879 875 u32 offset = *off; 876 + 880 877 ret = copy_from_user(&data64, data, count); 881 878 if (ret) { 882 879 ret = -EFAULT;
+4 -1
drivers/infiniband/hw/qib/qib_driver.c
··· 86 86 { 87 87 static char iname[16]; 88 88 89 - snprintf(iname, sizeof iname, "infinipath%u", unit); 89 + snprintf(iname, sizeof(iname), "infinipath%u", unit); 90 90 return iname; 91 91 } 92 92 ··· 349 349 qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; 350 350 if (qp_num != QIB_MULTICAST_QPN) { 351 351 int ruc_res; 352 + 352 353 qp = qib_lookup_qpn(ibp, qp_num); 353 354 if (!qp) 354 355 goto drop; ··· 462 461 rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; 463 462 if (dd->flags & QIB_NODMA_RTAIL) { 464 463 u32 seq = qib_hdrget_seq(rhf_addr); 464 + 465 465 if (seq != rcd->seq_cnt) 466 466 goto bail; 467 467 hdrqtail = 0; ··· 653 651 int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc) 654 652 { 655 653 struct qib_devdata *dd = ppd->dd; 654 + 656 655 ppd->lid = lid; 657 656 ppd->lmc = lmc; 658 657
+9 -189
drivers/infiniband/hw/qib/qib_eeprom.c
··· 153 153 154 154 if (t && dd0->nguid > 1 && t <= dd0->nguid) { 155 155 u8 oguid; 156 + 156 157 dd->base_guid = dd0->base_guid; 157 158 bguid = (u8 *) &dd->base_guid; 158 159 ··· 252 251 * This board has a Serial-prefix, which is stored 253 252 * elsewhere for backward-compatibility. 254 253 */ 255 - memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix); 256 - snp[sizeof ifp->if_sprefix] = '\0'; 254 + memcpy(snp, ifp->if_sprefix, sizeof(ifp->if_sprefix)); 255 + snp[sizeof(ifp->if_sprefix)] = '\0'; 257 256 len = strlen(snp); 258 257 snp += len; 259 - len = (sizeof dd->serial) - len; 260 - if (len > sizeof ifp->if_serial) 261 - len = sizeof ifp->if_serial; 258 + len = sizeof(dd->serial) - len; 259 + if (len > sizeof(ifp->if_serial)) 260 + len = sizeof(ifp->if_serial); 262 261 memcpy(snp, ifp->if_serial, len); 263 - } else 264 - memcpy(dd->serial, ifp->if_serial, 265 - sizeof ifp->if_serial); 262 + } else { 263 + memcpy(dd->serial, ifp->if_serial, sizeof(ifp->if_serial)); 264 + } 266 265 if (!strstr(ifp->if_comment, "Tested successfully")) 267 266 qib_dev_err(dd, 268 267 "Board SN %s did not pass functional test: %s\n", 269 268 dd->serial, ifp->if_comment); 270 - 271 - memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT); 272 - /* 273 - * Power-on (actually "active") hours are kept as little-endian value 274 - * in EEPROM, but as seconds in a (possibly as small as 24-bit) 275 - * atomic_t while running. 276 - */ 277 - atomic_set(&dd->active_time, 0); 278 - dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8); 279 269 280 270 done: 281 271 vfree(buf); ··· 274 282 bail:; 275 283 } 276 284 277 - /** 278 - * qib_update_eeprom_log - copy active-time and error counters to eeprom 279 - * @dd: the qlogic_ib device 280 - * 281 - * Although the time is kept as seconds in the qib_devdata struct, it is 282 - * rounded to hours for re-write, as we have only 16 bits in EEPROM. 283 - * First-cut code reads whole (expected) struct qib_flash, modifies, 284 - * re-writes. Future direction: read/write only what we need, assuming 285 - * that the EEPROM had to have been "good enough" for driver init, and 286 - * if not, we aren't making it worse. 287 - * 288 - */ 289 - int qib_update_eeprom_log(struct qib_devdata *dd) 290 - { 291 - void *buf; 292 - struct qib_flash *ifp; 293 - int len, hi_water; 294 - uint32_t new_time, new_hrs; 295 - u8 csum; 296 - int ret, idx; 297 - unsigned long flags; 298 - 299 - /* first, check if we actually need to do anything. */ 300 - ret = 0; 301 - for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { 302 - if (dd->eep_st_new_errs[idx]) { 303 - ret = 1; 304 - break; 305 - } 306 - } 307 - new_time = atomic_read(&dd->active_time); 308 - 309 - if (ret == 0 && new_time < 3600) 310 - goto bail; 311 - 312 - /* 313 - * The quick-check above determined that there is something worthy 314 - * of logging, so get current contents and do a more detailed idea. 315 - * read full flash, not just currently used part, since it may have 316 - * been written with a newer definition 317 - */ 318 - len = sizeof(struct qib_flash); 319 - buf = vmalloc(len); 320 - ret = 1; 321 - if (!buf) { 322 - qib_dev_err(dd, 323 - "Couldn't allocate memory to read %u bytes from eeprom for logging\n", 324 - len); 325 - goto bail; 326 - } 327 - 328 - /* Grab semaphore and read current EEPROM. If we get an 329 - * error, let go, but if not, keep it until we finish write. 330 - */ 331 - ret = mutex_lock_interruptible(&dd->eep_lock); 332 - if (ret) { 333 - qib_dev_err(dd, "Unable to acquire EEPROM for logging\n"); 334 - goto free_bail; 335 - } 336 - ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len); 337 - if (ret) { 338 - mutex_unlock(&dd->eep_lock); 339 - qib_dev_err(dd, "Unable read EEPROM for logging\n"); 340 - goto free_bail; 341 - } 342 - ifp = (struct qib_flash *)buf; 343 - 344 - csum = flash_csum(ifp, 0); 345 - if (csum != ifp->if_csum) { 346 - mutex_unlock(&dd->eep_lock); 347 - qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n", 348 - csum, ifp->if_csum); 349 - ret = 1; 350 - goto free_bail; 351 - } 352 - hi_water = 0; 353 - spin_lock_irqsave(&dd->eep_st_lock, flags); 354 - for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { 355 - int new_val = dd->eep_st_new_errs[idx]; 356 - if (new_val) { 357 - /* 358 - * If we have seen any errors, add to EEPROM values 359 - * We need to saturate at 0xFF (255) and we also 360 - * would need to adjust the checksum if we were 361 - * trying to minimize EEPROM traffic 362 - * Note that we add to actual current count in EEPROM, 363 - * in case it was altered while we were running. 364 - */ 365 - new_val += ifp->if_errcntp[idx]; 366 - if (new_val > 0xFF) 367 - new_val = 0xFF; 368 - if (ifp->if_errcntp[idx] != new_val) { 369 - ifp->if_errcntp[idx] = new_val; 370 - hi_water = offsetof(struct qib_flash, 371 - if_errcntp) + idx; 372 - } 373 - /* 374 - * update our shadow (used to minimize EEPROM 375 - * traffic), to match what we are about to write. 376 - */ 377 - dd->eep_st_errs[idx] = new_val; 378 - dd->eep_st_new_errs[idx] = 0; 379 - } 380 - } 381 - /* 382 - * Now update active-time. We would like to round to the nearest hour 383 - * but unless atomic_t are sure to be proper signed ints we cannot, 384 - * because we need to account for what we "transfer" to EEPROM and 385 - * if we log an hour at 31 minutes, then we would need to set 386 - * active_time to -29 to accurately count the _next_ hour. 387 - */ 388 - if (new_time >= 3600) { 389 - new_hrs = new_time / 3600; 390 - atomic_sub((new_hrs * 3600), &dd->active_time); 391 - new_hrs += dd->eep_hrs; 392 - if (new_hrs > 0xFFFF) 393 - new_hrs = 0xFFFF; 394 - dd->eep_hrs = new_hrs; 395 - if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) { 396 - ifp->if_powerhour[0] = new_hrs & 0xFF; 397 - hi_water = offsetof(struct qib_flash, if_powerhour); 398 - } 399 - if ((new_hrs >> 8) != ifp->if_powerhour[1]) { 400 - ifp->if_powerhour[1] = new_hrs >> 8; 401 - hi_water = offsetof(struct qib_flash, if_powerhour) + 1; 402 - } 403 - } 404 - /* 405 - * There is a tiny possibility that we could somehow fail to write 406 - * the EEPROM after updating our shadows, but problems from holding 407 - * the spinlock too long are a much bigger issue. 408 - */ 409 - spin_unlock_irqrestore(&dd->eep_st_lock, flags); 410 - if (hi_water) { 411 - /* we made some change to the data, uopdate cksum and write */ 412 - csum = flash_csum(ifp, 1); 413 - ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1); 414 - } 415 - mutex_unlock(&dd->eep_lock); 416 - if (ret) 417 - qib_dev_err(dd, "Failed updating EEPROM\n"); 418 - 419 - free_bail: 420 - vfree(buf); 421 - bail: 422 - return ret; 423 - } 424 - 425 - /** 426 - * qib_inc_eeprom_err - increment one of the four error counters 427 - * that are logged to EEPROM. 428 - * @dd: the qlogic_ib device 429 - * @eidx: 0..3, the counter to increment 430 - * @incr: how much to add 431 - * 432 - * Each counter is 8-bits, and saturates at 255 (0xFF). They 433 - * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log() 434 - * is called, but it can only be called in a context that allows sleep. 435 - * This function can be called even at interrupt level. 436 - */ 437 - void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr) 438 - { 439 - uint new_val; 440 - unsigned long flags; 441 - 442 - spin_lock_irqsave(&dd->eep_st_lock, flags); 443 - new_val = dd->eep_st_new_errs[eidx] + incr; 444 - if (new_val > 255) 445 - new_val = 255; 446 - dd->eep_st_new_errs[eidx] = new_val; 447 - spin_unlock_irqrestore(&dd->eep_st_lock, flags); 448 - }
+15 -11
drivers/infiniband/hw/qib/qib_file_ops.c
··· 351 351 * unless perhaps the user has mpin'ed the pages 352 352 * themselves. 353 353 */ 354 - qib_devinfo(dd->pcidev, 355 - "Failed to lock addr %p, %u pages: " 356 - "errno %d\n", (void *) vaddr, cnt, -ret); 354 + qib_devinfo( 355 + dd->pcidev, 356 + "Failed to lock addr %p, %u pages: errno %d\n", 357 + (void *) vaddr, cnt, -ret); 357 358 goto done; 358 359 } 359 360 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { ··· 438 437 goto cleanup; 439 438 } 440 439 if (copy_to_user((void __user *) (unsigned long) ti->tidmap, 441 - tidmap, sizeof tidmap)) { 440 + tidmap, sizeof(tidmap))) { 442 441 ret = -EFAULT; 443 442 goto cleanup; 444 443 } ··· 485 484 } 486 485 487 486 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap, 488 - sizeof tidmap)) { 487 + sizeof(tidmap))) { 489 488 ret = -EFAULT; 490 489 goto done; 491 490 } ··· 952 951 /* rcvegrbufs are read-only on the slave */ 953 952 if (vma->vm_flags & VM_WRITE) { 954 953 qib_devinfo(dd->pcidev, 955 - "Can't map eager buffers as " 956 - "writable (flags=%lx)\n", vma->vm_flags); 954 + "Can't map eager buffers as writable (flags=%lx)\n", 955 + vma->vm_flags); 957 956 ret = -EPERM; 958 957 goto bail; 959 958 } ··· 1186 1185 */ 1187 1186 if (weight >= qib_cpulist_count) { 1188 1187 int cpu; 1188 + 1189 1189 cpu = find_first_zero_bit(qib_cpulist, 1190 1190 qib_cpulist_count); 1191 1191 if (cpu == qib_cpulist_count) ··· 1249 1247 if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16, 1250 1248 uinfo->spu_userversion & 0xffff)) { 1251 1249 qib_devinfo(dd->pcidev, 1252 - "Mismatched user version (%d.%d) and driver " 1253 - "version (%d.%d) while context sharing. Ensure " 1254 - "that driver and library are from the same " 1255 - "release.\n", 1250 + "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n", 1256 1251 (int) (uinfo->spu_userversion >> 16), 1257 1252 (int) (uinfo->spu_userversion & 0xffff), 1258 1253 QIB_USER_SWMAJOR, QIB_USER_SWMINOR); ··· 1390 1391 } 1391 1392 if (!ppd) { 1392 1393 u32 pidx = ctxt % dd->num_pports; 1394 + 1393 1395 if (usable(dd->pport + pidx)) 1394 1396 ppd = dd->pport + pidx; 1395 1397 else { ··· 1438 1438 1439 1439 if (alg == QIB_PORT_ALG_ACROSS) { 1440 1440 unsigned inuse = ~0U; 1441 + 1441 1442 /* find device (with ACTIVE ports) with fewest ctxts in use */ 1442 1443 for (ndev = 0; ndev < devmax; ndev++) { 1443 1444 struct qib_devdata *dd = qib_lookup(ndev); 1444 1445 unsigned cused = 0, cfree = 0, pusable = 0; 1446 + 1445 1447 if (!dd) 1446 1448 continue; 1447 1449 if (port && port <= dd->num_pports && ··· 1473 1471 } else { 1474 1472 for (ndev = 0; ndev < devmax; ndev++) { 1475 1473 struct qib_devdata *dd = qib_lookup(ndev); 1474 + 1476 1475 if (dd) { 1477 1476 ret = choose_port_ctxt(fp, dd, port, uinfo); 1478 1477 if (!ret) ··· 1559 1556 } 1560 1557 for (ndev = 0; ndev < devmax; ndev++) { 1561 1558 struct qib_devdata *dd = qib_lookup(ndev); 1559 + 1562 1560 if (dd) { 1563 1561 if (pcibus_to_node(dd->pcidev->bus) < 0) { 1564 1562 ret = -EINVAL;
+5 -4
drivers/infiniband/hw/qib/qib_fs.c
··· 106 106 { 107 107 qib_stats.sps_ints = qib_sps_ints(); 108 108 return simple_read_from_buffer(buf, count, ppos, &qib_stats, 109 - sizeof qib_stats); 109 + sizeof(qib_stats)); 110 110 } 111 111 112 112 /* ··· 133 133 size_t count, loff_t *ppos) 134 134 { 135 135 return simple_read_from_buffer(buf, count, ppos, qib_statnames, 136 - sizeof qib_statnames - 1); /* no null */ 136 + sizeof(qib_statnames) - 1); /* no null */ 137 137 } 138 138 139 139 static const struct file_operations driver_ops[] = { ··· 379 379 int ret, i; 380 380 381 381 /* create the per-unit directory */ 382 - snprintf(unit, sizeof unit, "%u", dd->unit); 382 + snprintf(unit, sizeof(unit), "%u", dd->unit); 383 383 ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir, 384 384 &simple_dir_operations, dd); 385 385 if (ret) { ··· 482 482 483 483 root = dget(sb->s_root); 484 484 mutex_lock(&root->d_inode->i_mutex); 485 - snprintf(unit, sizeof unit, "%u", dd->unit); 485 + snprintf(unit, sizeof(unit), "%u", dd->unit); 486 486 dir = lookup_one_len(unit, root, strlen(unit)); 487 487 488 488 if (IS_ERR(dir)) { ··· 560 560 const char *dev_name, void *data) 561 561 { 562 562 struct dentry *ret; 563 + 563 564 ret = mount_single(fs_type, flags, data, qibfs_fill_super); 564 565 if (!IS_ERR(ret)) 565 566 qib_super = ret->d_sb;
+9 -6
drivers/infiniband/hw/qib/qib_iba6120.c
··· 333 333 enum qib_ureg regno, u64 value, int ctxt) 334 334 { 335 335 u64 __iomem *ubase; 336 + 336 337 if (dd->userbase) 337 338 ubase = (u64 __iomem *) 338 339 ((char __iomem *) dd->userbase + ··· 835 834 bits = (u32) ((hwerrs >> 836 835 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) & 837 836 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK); 838 - snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, 837 + snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf), 839 838 "[PCIe Mem Parity Errs %x] ", bits); 840 839 strlcat(msg, bitsmsg, msgl); 841 840 } 842 841 843 842 if (hwerrs & _QIB_PLL_FAIL) { 844 843 isfatal = 1; 845 - snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, 844 + snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf), 846 845 "[PLL failed (%llx), InfiniPath hardware unusable]", 847 846 (unsigned long long) hwerrs & _QIB_PLL_FAIL); 848 847 strlcat(msg, bitsmsg, msgl); ··· 1015 1014 1016 1015 /* do these first, they are most important */ 1017 1016 if (errs & ERR_MASK(HardwareErr)) 1018 - qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); 1017 + qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf)); 1019 1018 else 1020 1019 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) 1021 1020 if (errs & dd->eep_st_masks[log_idx].errs_to_log) ··· 1063 1062 */ 1064 1063 mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) | 1065 1064 ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr); 1066 - qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask); 1065 + qib_decode_6120_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask); 1067 1066 1068 1067 if (errs & E_SUM_PKTERRS) 1069 1068 qib_stats.sps_rcverrs++; ··· 1671 1670 } 1672 1671 if (crcs) { 1673 1672 u32 cntr = dd->cspec->lli_counter; 1673 + 1674 1674 cntr += crcs; 1675 1675 if (cntr) { 1676 1676 if (cntr > dd->cspec->lli_thresh) { ··· 1724 1722 "irq is 0, BIOS error? Interrupts won't work\n"); 1725 1723 else { 1726 1724 int ret; 1725 + 1727 1726 ret = request_irq(dd->cspec->irq, qib_6120intr, 0, 1728 1727 QIB_DRV_NAME, dd); 1729 1728 if (ret) ··· 2684 2681 spin_lock_irqsave(&dd->eep_st_lock, flags); 2685 2682 traffic_wds -= dd->traffic_wds; 2686 2683 dd->traffic_wds += traffic_wds; 2687 - if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) 2688 - atomic_add(5, &dd->active_time); /* S/B #define */ 2689 2684 spin_unlock_irqrestore(&dd->eep_st_lock, flags); 2690 2685 2691 2686 qib_chk_6120_errormask(dd); ··· 2930 2929 static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what) 2931 2930 { 2932 2931 int ret = 0; 2932 + 2933 2933 if (!strncmp(what, "ibc", 3)) { 2934 2934 ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback); 2935 2935 qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", ··· 3172 3170 static void set_6120_baseaddrs(struct qib_devdata *dd) 3173 3171 { 3174 3172 u32 cregbase; 3173 + 3175 3174 cregbase = qib_read_kreg32(dd, kr_counterregbase); 3176 3175 dd->cspec->cregbase = (u64 __iomem *) 3177 3176 ((char __iomem *) dd->kregbase + cregbase);
+7 -7
drivers/infiniband/hw/qib/qib_iba7220.c
··· 902 902 errs &= QLOGIC_IB_E_SDMAERRS; 903 903 904 904 msg = dd->cspec->sdmamsgbuf; 905 - qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf); 905 + qib_decode_7220_sdma_errs(ppd, errs, msg, 906 + sizeof(dd->cspec->sdmamsgbuf)); 906 907 spin_lock_irqsave(&ppd->sdma_lock, flags); 907 908 908 909 if (errs & ERR_MASK(SendBufMisuseErr)) { ··· 1044 1043 static void reenable_7220_chase(unsigned long opaque) 1045 1044 { 1046 1045 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; 1046 + 1047 1047 ppd->cpspec->chase_timer.expires = 0; 1048 1048 qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, 1049 1049 QLOGIC_IB_IBCC_LINKINITCMD_POLL); ··· 1103 1101 1104 1102 /* do these first, they are most important */ 1105 1103 if (errs & ERR_MASK(HardwareErr)) 1106 - qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); 1104 + qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf)); 1107 1105 else 1108 1106 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) 1109 1107 if (errs & dd->eep_st_masks[log_idx].errs_to_log) ··· 1157 1155 ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | 1158 1156 ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr); 1159 1157 1160 - qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask); 1158 + qib_decode_7220_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask); 1161 1159 1162 1160 if (errs & E_SUM_PKTERRS) 1163 1161 qib_stats.sps_rcverrs++; ··· 1382 1380 bits = (u32) ((hwerrs >> 1383 1381 QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) & 1384 1382 QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK); 1385 - snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, 1383 + snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf), 1386 1384 "[PCIe Mem Parity Errs %x] ", bits); 1387 1385 strlcat(msg, bitsmsg, msgl); 1388 1386 } ··· 1392 1390 1393 1391 if (hwerrs & _QIB_PLL_FAIL) { 1394 1392 isfatal = 1; 1395 - snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, 1393 + snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf), 1396 1394 "[PLL failed (%llx), InfiniPath hardware unusable]", 1397 1395 (unsigned long long) hwerrs & _QIB_PLL_FAIL); 1398 1396 strlcat(msg, bitsmsg, msgl); ··· 3299 3297 spin_lock_irqsave(&dd->eep_st_lock, flags); 3300 3298 traffic_wds -= dd->traffic_wds; 3301 3299 dd->traffic_wds += traffic_wds; 3302 - if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) 3303 - atomic_add(5, &dd->active_time); /* S/B #define */ 3304 3300 spin_unlock_irqrestore(&dd->eep_st_lock, flags); 3305 3301 done: 3306 3302 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
+36 -16
drivers/infiniband/hw/qib/qib_iba7322.c
··· 117 117 118 118 static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */ 119 119 module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO); 120 - MODULE_PARM_DESC(long_attenuation, \ 120 + MODULE_PARM_DESC(long_attenuation, 121 121 "attenuation cutoff (dB) for long copper cable setup"); 122 122 123 123 static ushort qib_singleport; ··· 153 153 static int setup_txselect(const char *, struct kernel_param *); 154 154 module_param_call(txselect, setup_txselect, param_get_string, 155 155 &kp_txselect, S_IWUSR | S_IRUGO); 156 - MODULE_PARM_DESC(txselect, \ 156 + MODULE_PARM_DESC(txselect, 157 157 "Tx serdes indices (for no QSFP or invalid QSFP data)"); 158 158 159 159 #define BOARD_QME7342 5 160 160 #define BOARD_QMH7342 6 161 + #define BOARD_QMH7360 9 161 162 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ 162 163 BOARD_QMH7342) 163 164 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ ··· 818 817 enum qib_ureg regno, u64 value, int ctxt) 819 818 { 820 819 u64 __iomem *ubase; 820 + 821 821 if (dd->userbase) 822 822 ubase = (u64 __iomem *) 823 823 ((char __iomem *) dd->userbase + ··· 1679 1677 /* do these first, they are most important */ 1680 1678 if (errs & QIB_E_HARDWARE) { 1681 1679 *msg = '\0'; 1682 - qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); 1680 + qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf)); 1683 1681 } else 1684 1682 for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) 1685 1683 if (errs & dd->eep_st_masks[log_idx].errs_to_log) ··· 1704 1702 mask = QIB_E_HARDWARE; 1705 1703 *msg = '\0'; 1706 1704 1707 - err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask, 1705 + err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask, 1708 1706 qib_7322error_msgs); 1709 1707 1710 1708 /* ··· 1891 1889 *msg = '\0'; 1892 1890 1893 1891 if (errs & ~QIB_E_P_BITSEXTANT) { 1894 - err_decode(msg, sizeof ppd->cpspec->epmsgbuf, 1892 + err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), 1895 1893 errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs); 1896 1894 if (!*msg) 1897 - snprintf(msg, sizeof ppd->cpspec->epmsgbuf, 1895 + snprintf(msg, sizeof(ppd->cpspec->epmsgbuf), 1898 1896 "no others"); 1899 1897 qib_dev_porterr(dd, ppd->port, 1900 1898 "error interrupt with unknown errors 0x%016Lx set (and %s)\n", ··· 1908 1906 /* determine cause, then write to clear */ 1909 1907 symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom); 1910 1908 qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0); 1911 - err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom, 1909 + err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom, 1912 1910 hdrchk_msgs); 1913 1911 *msg = '\0'; 1914 1912 /* senderrbuf cleared in SPKTERRS below */ ··· 1924 1922 * isn't valid. We don't want to confuse people, so 1925 1923 * we just don't print them, except at debug 1926 1924 */ 1927 - err_decode(msg, sizeof ppd->cpspec->epmsgbuf, 1925 + err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), 1928 1926 (errs & QIB_E_P_LINK_PKTERRS), 1929 1927 qib_7322p_error_msgs); 1930 1928 *msg = '\0'; ··· 1940 1938 * valid. We don't want to confuse people, so we just 1941 1939 * don't print them, except at debug 1942 1940 */ 1943 - err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs, 1941 + err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs, 1944 1942 qib_7322p_error_msgs); 1945 1943 ignore_this_time = errs & QIB_E_P_LINK_PKTERRS; 1946 1944 *msg = '\0'; ··· 2033 2031 if (dd->cspec->num_msix_entries) { 2034 2032 /* and same for MSIx */ 2035 2033 u64 val = qib_read_kreg64(dd, kr_intgranted); 2034 + 2036 2035 if (val) 2037 2036 qib_write_kreg(dd, kr_intgranted, val); 2038 2037 } ··· 2179 2176 int err; 2180 2177 unsigned long flags; 2181 2178 struct qib_pportdata *ppd = dd->pport; 2179 + 2182 2180 for (; pidx < dd->num_pports; ++pidx, ppd++) { 2183 2181 err = 0; 2184 2182 if (pidx == 0 && (hwerrs & ··· 2805 2801 2806 2802 if (n->rcv) { 2807 2803 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; 2804 + 2808 2805 qib_update_rhdrq_dca(rcd, cpu); 2809 2806 } else { 2810 2807 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; 2808 + 2811 2809 qib_update_sdma_dca(ppd, cpu); 2812 2810 } 2813 2811 } ··· 2822 2816 2823 2817 if (n->rcv) { 2824 2818 struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; 2819 + 2825 2820 dd = rcd->dd; 2826 2821 } else { 2827 2822 struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; 2823 + 2828 2824 dd = ppd->dd; 2829 2825 } 2830 2826 qib_devinfo(dd->pcidev, ··· 3002 2994 struct qib_pportdata *ppd; 3003 2995 struct qib_qsfp_data *qd; 3004 2996 u32 mask; 2997 + 3005 2998 if (!dd->pport[pidx].link_speed_supported) 3006 2999 continue; 3007 3000 mask = QSFP_GPIO_MOD_PRS_N; ··· 3010 3001 mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx); 3011 3002 if (gpiostatus & dd->cspec->gpio_mask & mask) { 3012 3003 u64 pins; 3004 + 3013 3005 qd = &ppd->cpspec->qsfp_data; 3014 3006 gpiostatus &= ~mask; 3015 3007 pins = qib_read_kreg64(dd, kr_extstatus); ··· 3452 3442 } 3453 3443 3454 3444 /* Try to get MSIx interrupts */ 3455 - memset(redirect, 0, sizeof redirect); 3445 + memset(redirect, 0, sizeof(redirect)); 3456 3446 mask = ~0ULL; 3457 3447 msixnum = 0; 3458 3448 local_mask = cpumask_of_pcibus(dd->pcidev->bus); ··· 3627 3617 n = "InfiniPath_QME7362"; 3628 3618 dd->flags |= QIB_HAS_QSFP; 3629 3619 break; 3620 + case BOARD_QMH7360: 3621 + n = "Intel IB QDR 1P FLR-QSFP Adptr"; 3622 + dd->flags |= QIB_HAS_QSFP; 3623 + break; 3630 3624 case 15: 3631 3625 n = "InfiniPath_QLE7342_TEST"; 3632 3626 dd->flags |= QIB_HAS_QSFP; ··· 3708 3694 */ 3709 3695 for (i = 0; i < msix_entries; i++) { 3710 3696 u64 vecaddr, vecdata; 3697 + 3711 3698 vecaddr = qib_read_kreg64(dd, 2 * i + 3712 3699 (QIB_7322_MsixTable_OFFS / sizeof(u64))); 3713 3700 vecdata = qib_read_kreg64(dd, 1 + 2 * i + ··· 5193 5178 spin_lock_irqsave(&ppd->dd->eep_st_lock, flags); 5194 5179 traffic_wds -= ppd->dd->traffic_wds; 5195 5180 ppd->dd->traffic_wds += traffic_wds; 5196 - if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) 5197 - atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time); 5198 5181 spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags); 5199 5182 if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active & 5200 5183 QIB_IB_QDR) && ··· 5370 5357 static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed) 5371 5358 { 5372 5359 u64 newctrlb; 5360 + 5373 5361 newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK | 5374 5362 IBA7322_IBC_IBTA_1_2_MASK | 5375 5363 IBA7322_IBC_MAX_SPEED_MASK); ··· 5857 5843 static void qib_7322_set_baseaddrs(struct qib_devdata *dd) 5858 5844 { 5859 5845 u32 cregbase; 5846 + 5860 5847 cregbase = qib_read_kreg32(dd, kr_counterregbase); 5861 5848 5862 5849 dd->cspec->cregbase = (u64 __iomem *)(cregbase + ··· 6198 6183 struct qib_devdata *dd; 6199 6184 unsigned long val; 6200 6185 char *n; 6186 + 6201 6187 if (strlen(str) >= MAX_ATTEN_LEN) { 6202 6188 pr_info("txselect_values string too long\n"); 6203 6189 return -ENOSPC; ··· 6409 6393 val = TIDFLOW_ERRBITS; /* these are W1C */ 6410 6394 for (i = 0; i < dd->cfgctxts; i++) { 6411 6395 int flow; 6396 + 6412 6397 for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++) 6413 6398 qib_write_ureg(dd, ur_rcvflowtable+flow, val, i); 6414 6399 } ··· 6520 6503 6521 6504 for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) { 6522 6505 struct qib_chippport_specific *cp = ppd->cpspec; 6506 + 6523 6507 ppd->link_speed_supported = features & PORT_SPD_CAP; 6524 6508 features >>= PORT_SPD_CAP_SHIFT; 6525 6509 if (!ppd->link_speed_supported) { ··· 6599 6581 ppd->vls_supported = IB_VL_VL0_7; 6600 6582 else { 6601 6583 qib_devinfo(dd->pcidev, 6602 - "Invalid num_vls %u for MTU %d " 6603 - ", using 4 VLs\n", 6584 + "Invalid num_vls %u for MTU %d , using 4 VLs\n", 6604 6585 qib_num_cfg_vls, mtu); 6605 6586 ppd->vls_supported = IB_VL_VL0_3; 6606 6587 qib_num_cfg_vls = 4; ··· 7907 7890 static int serdes_7322_init(struct qib_pportdata *ppd) 7908 7891 { 7909 7892 int ret = 0; 7893 + 7910 7894 if (ppd->dd->cspec->r1) 7911 7895 ret = serdes_7322_init_old(ppd); 7912 7896 else ··· 8323 8305 8324 8306 static int qib_r_grab(struct qib_devdata *dd) 8325 8307 { 8326 - u64 val; 8327 - val = SJA_EN; 8308 + u64 val = SJA_EN; 8309 + 8328 8310 qib_write_kreg(dd, kr_r_access, val); 8329 8311 qib_read_kreg32(dd, kr_scratch); 8330 8312 return 0; ··· 8337 8319 { 8338 8320 u64 val; 8339 8321 int timeout; 8322 + 8340 8323 for (timeout = 0; timeout < 100 ; ++timeout) { 8341 8324 val = qib_read_kreg32(dd, kr_r_access); 8342 8325 if (val & R_RDY) ··· 8365 8346 } 8366 8347 if (inp) { 8367 8348 int tdi = inp[pos >> 3] >> (pos & 7); 8349 + 8368 8350 val |= ((tdi & 1) << R_TDI_LSB); 8369 8351 } 8370 8352 qib_write_kreg(dd, kr_r_access, val);
+7 -5
drivers/infiniband/hw/qib/qib_init.c
··· 140 140 * Allocate full ctxtcnt array, rather than just cfgctxts, because 141 141 * cleanup iterates across all possible ctxts. 142 142 */ 143 - dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL); 143 + dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL); 144 144 if (!dd->rcd) { 145 145 qib_dev_err(dd, 146 146 "Unable to allocate ctxtdata array, failing\n"); ··· 234 234 u8 hw_pidx, u8 port) 235 235 { 236 236 int size; 237 + 237 238 ppd->dd = dd; 238 239 ppd->hw_pidx = hw_pidx; 239 240 ppd->port = port; /* IB port number, not index */ ··· 614 613 ppd = dd->pport + pidx; 615 614 if (!ppd->qib_wq) { 616 615 char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */ 616 + 617 617 snprintf(wq_name, sizeof(wq_name), "qib%d_%d", 618 618 dd->unit, pidx); 619 619 ppd->qib_wq = ··· 716 714 717 715 for (pidx = 0; pidx < dd->num_pports; ++pidx) { 718 716 int mtu; 717 + 719 718 if (lastfail) 720 719 ret = lastfail; 721 720 ppd = dd->pport + pidx; ··· 934 931 qib_free_pportdata(ppd); 935 932 } 936 933 937 - qib_update_eeprom_log(dd); 938 934 } 939 935 940 936 /** ··· 1028 1026 addr = vmalloc(cnt); 1029 1027 if (!addr) { 1030 1028 qib_devinfo(dd->pcidev, 1031 - "Couldn't get memory for checking PIO perf," 1032 - " skipping\n"); 1029 + "Couldn't get memory for checking PIO perf, skipping\n"); 1033 1030 goto done; 1034 1031 } 1035 1032 ··· 1164 1163 1165 1164 if (!qib_cpulist_count) { 1166 1165 u32 count = num_online_cpus(); 1166 + 1167 1167 qib_cpulist = kzalloc(BITS_TO_LONGS(count) * 1168 1168 sizeof(long), GFP_KERNEL); 1169 1169 if (qib_cpulist) ··· 1181 1179 if (!list_empty(&dd->list)) 1182 1180 list_del_init(&dd->list); 1183 1181 ib_dealloc_device(&dd->verbs_dev.ibdev); 1184 - return ERR_PTR(ret);; 1182 + return ERR_PTR(ret); 1185 1183 } 1186 1184 1187 1185 /*
-1
drivers/infiniband/hw/qib/qib_intr.c
··· 168 168 ppd->lastibcstat = ibcs; 169 169 if (ev) 170 170 signal_ib_event(ppd, ev); 171 - return; 172 171 } 173 172 174 173 void qib_clear_symerror_on_linkup(unsigned long opaque)
+2 -2
drivers/infiniband/hw/qib/qib_keys.c
··· 122 122 if (!mr->lkey_published) 123 123 goto out; 124 124 if (lkey == 0) 125 - rcu_assign_pointer(dev->dma_mr, NULL); 125 + RCU_INIT_POINTER(dev->dma_mr, NULL); 126 126 else { 127 127 r = lkey >> (32 - ib_qib_lkey_table_size); 128 - rcu_assign_pointer(rkt->table[r], NULL); 128 + RCU_INIT_POINTER(rkt->table[r], NULL); 129 129 } 130 130 qib_put_mr(mr); 131 131 mr->lkey_published = 0;
+10 -10
drivers/infiniband/hw/qib/qib_mad.c
··· 152 152 data.trap_num = trap_num; 153 153 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 154 154 data.toggle_count = 0; 155 - memset(&data.details, 0, sizeof data.details); 155 + memset(&data.details, 0, sizeof(data.details)); 156 156 data.details.ntc_257_258.lid1 = lid1; 157 157 data.details.ntc_257_258.lid2 = lid2; 158 158 data.details.ntc_257_258.key = cpu_to_be32(key); 159 159 data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1); 160 160 data.details.ntc_257_258.qp2 = cpu_to_be32(qp2); 161 161 162 - qib_send_trap(ibp, &data, sizeof data); 162 + qib_send_trap(ibp, &data, sizeof(data)); 163 163 } 164 164 165 165 /* ··· 176 176 data.trap_num = IB_NOTICE_TRAP_BAD_MKEY; 177 177 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 178 178 data.toggle_count = 0; 179 - memset(&data.details, 0, sizeof data.details); 179 + memset(&data.details, 0, sizeof(data.details)); 180 180 data.details.ntc_256.lid = data.issuer_lid; 181 181 data.details.ntc_256.method = smp->method; 182 182 data.details.ntc_256.attr_id = smp->attr_id; ··· 198 198 hop_cnt); 199 199 } 200 200 201 - qib_send_trap(ibp, &data, sizeof data); 201 + qib_send_trap(ibp, &data, sizeof(data)); 202 202 } 203 203 204 204 /* ··· 214 214 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; 215 215 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 216 216 data.toggle_count = 0; 217 - memset(&data.details, 0, sizeof data.details); 217 + memset(&data.details, 0, sizeof(data.details)); 218 218 data.details.ntc_144.lid = data.issuer_lid; 219 219 data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags); 220 220 221 - qib_send_trap(ibp, &data, sizeof data); 221 + qib_send_trap(ibp, &data, sizeof(data)); 222 222 } 223 223 224 224 /* ··· 234 234 data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG; 235 235 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 236 236 data.toggle_count = 0; 237 - memset(&data.details, 0, sizeof data.details); 237 + memset(&data.details, 0, sizeof(data.details)); 238 238 data.details.ntc_145.lid = data.issuer_lid; 239 239 data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid; 240 240 241 - qib_send_trap(ibp, &data, sizeof data); 241 + qib_send_trap(ibp, &data, sizeof(data)); 242 242 } 243 243 244 244 /* ··· 254 254 data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; 255 255 data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); 256 256 data.toggle_count = 0; 257 - memset(&data.details, 0, sizeof data.details); 257 + memset(&data.details, 0, sizeof(data.details)); 258 258 data.details.ntc_144.lid = data.issuer_lid; 259 259 data.details.ntc_144.local_changes = 1; 260 260 data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG; 261 261 262 - qib_send_trap(ibp, &data, sizeof data); 262 + qib_send_trap(ibp, &data, sizeof(data)); 263 263 } 264 264 265 265 static int subn_get_nodedescription(struct ib_smp *smp,
+1 -1
drivers/infiniband/hw/qib/qib_mmap.c
··· 134 134 void *obj) { 135 135 struct qib_mmap_info *ip; 136 136 137 - ip = kmalloc(sizeof *ip, GFP_KERNEL); 137 + ip = kmalloc(sizeof(*ip), GFP_KERNEL); 138 138 if (!ip) 139 139 goto bail; 140 140
+5 -5
drivers/infiniband/hw/qib/qib_mr.c
··· 55 55 56 56 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; 57 57 for (; i < m; i++) { 58 - mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL); 58 + mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL); 59 59 if (!mr->map[i]) 60 60 goto bail; 61 61 } ··· 104 104 goto bail; 105 105 } 106 106 107 - mr = kzalloc(sizeof *mr, GFP_KERNEL); 107 + mr = kzalloc(sizeof(*mr), GFP_KERNEL); 108 108 if (!mr) { 109 109 ret = ERR_PTR(-ENOMEM); 110 110 goto bail; ··· 143 143 144 144 /* Allocate struct plus pointers to first level page tables. */ 145 145 m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; 146 - mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); 146 + mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL); 147 147 if (!mr) 148 148 goto bail; 149 149 ··· 347 347 if (size > PAGE_SIZE) 348 348 return ERR_PTR(-EINVAL); 349 349 350 - pl = kzalloc(sizeof *pl, GFP_KERNEL); 350 + pl = kzalloc(sizeof(*pl), GFP_KERNEL); 351 351 if (!pl) 352 352 return ERR_PTR(-ENOMEM); 353 353 ··· 386 386 387 387 /* Allocate struct plus pointers to first level page tables. */ 388 388 m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ; 389 - fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); 389 + fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL); 390 390 if (!fmr) 391 391 goto bail; 392 392
+7 -3
drivers/infiniband/hw/qib/qib_pcie.c
··· 210 210 /* We can't pass qib_msix_entry array to qib_msix_setup 211 211 * so use a dummy msix_entry array and copy the allocated 212 212 * irq back to the qib_msix_entry array. */ 213 - msix_entry = kmalloc(nvec * sizeof(*msix_entry), GFP_KERNEL); 213 + msix_entry = kcalloc(nvec, sizeof(*msix_entry), GFP_KERNEL); 214 214 if (!msix_entry) 215 215 goto do_intx; 216 216 ··· 234 234 kfree(msix_entry); 235 235 236 236 do_intx: 237 - qib_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, " 238 - "falling back to INTx\n", nvec, ret); 237 + qib_dev_err( 238 + dd, 239 + "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n", 240 + nvec, ret); 239 241 *msixcnt = 0; 240 242 qib_enable_intx(dd->pcidev); 241 243 } ··· 461 459 void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline) 462 460 { 463 461 int r; 462 + 464 463 r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, 465 464 dd->pcibar0); 466 465 if (r) ··· 699 696 qib_pci_resume(struct pci_dev *pdev) 700 697 { 701 698 struct qib_devdata *dd = pci_get_drvdata(pdev); 699 + 702 700 qib_devinfo(pdev, "QIB resume function called\n"); 703 701 pci_cleanup_aer_uncorrect_error_status(pdev); 704 702 /*
+4 -4
drivers/infiniband/hw/qib/qib_qp.c
··· 255 255 256 256 if (rcu_dereference_protected(ibp->qp0, 257 257 lockdep_is_held(&dev->qpt_lock)) == qp) { 258 - rcu_assign_pointer(ibp->qp0, NULL); 258 + RCU_INIT_POINTER(ibp->qp0, NULL); 259 259 } else if (rcu_dereference_protected(ibp->qp1, 260 260 lockdep_is_held(&dev->qpt_lock)) == qp) { 261 - rcu_assign_pointer(ibp->qp1, NULL); 261 + RCU_INIT_POINTER(ibp->qp1, NULL); 262 262 } else { 263 263 struct qib_qp *q; 264 264 struct qib_qp __rcu **qpp; ··· 269 269 lockdep_is_held(&dev->qpt_lock))) != NULL; 270 270 qpp = &q->next) 271 271 if (q == qp) { 272 - rcu_assign_pointer(*qpp, 272 + RCU_INIT_POINTER(*qpp, 273 273 rcu_dereference_protected(qp->next, 274 274 lockdep_is_held(&dev->qpt_lock))); 275 275 removed = 1; ··· 315 315 for (n = 0; n < dev->qp_table_size; n++) { 316 316 qp = rcu_dereference_protected(dev->qp_table[n], 317 317 lockdep_is_held(&dev->qpt_lock)); 318 - rcu_assign_pointer(dev->qp_table[n], NULL); 318 + RCU_INIT_POINTER(dev->qp_table[n], NULL); 319 319 320 320 for (; qp; qp = rcu_dereference_protected(qp->next, 321 321 lockdep_is_held(&dev->qpt_lock)))
+8 -5
drivers/infiniband/hw/qib/qib_qsfp.c
··· 81 81 * Module could take up to 2 Msec to respond to MOD_SEL, and there 82 82 * is no way to tell if it is ready, so we must wait. 83 83 */ 84 - msleep(2); 84 + msleep(20); 85 85 86 86 /* Make sure TWSI bus is in sane state. */ 87 87 ret = qib_twsi_reset(dd); ··· 99 99 while (cnt < len) { 100 100 unsigned in_page; 101 101 int wlen = len - cnt; 102 + 102 103 in_page = addr % QSFP_PAGESIZE; 103 104 if ((in_page + wlen) > QSFP_PAGESIZE) 104 105 wlen = QSFP_PAGESIZE - in_page; ··· 140 139 else if (pass) 141 140 qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass); 142 141 143 - msleep(2); 142 + msleep(20); 144 143 145 144 bail: 146 145 mutex_unlock(&dd->eep_lock); ··· 190 189 * Module could take up to 2 Msec to respond to MOD_SEL, 191 190 * and there is no way to tell if it is ready, so we must wait. 192 191 */ 193 - msleep(2); 192 + msleep(20); 194 193 195 194 /* Make sure TWSI bus is in sane state. */ 196 195 ret = qib_twsi_reset(dd); ··· 207 206 while (cnt < len) { 208 207 unsigned in_page; 209 208 int wlen = len - cnt; 209 + 210 210 in_page = addr % QSFP_PAGESIZE; 211 211 if ((in_page + wlen) > QSFP_PAGESIZE) 212 212 wlen = QSFP_PAGESIZE - in_page; ··· 236 234 * going away, and there is no way to tell if it is ready. 237 235 * so we must wait. 238 236 */ 239 - msleep(2); 237 + msleep(20); 240 238 241 239 bail: 242 240 mutex_unlock(&dd->eep_lock); ··· 298 296 * set the page to zero, Even if it already appears to be zero. 299 297 */ 300 298 u8 poke = 0; 299 + 301 300 ret = qib_qsfp_write(ppd, 127, &poke, 1); 302 301 udelay(50); 303 302 if (ret != 1) { ··· 483 480 udelay(20); /* Generous RST dwell */ 484 481 485 482 dd->f_gpio_mod(dd, mask, mask, mask); 486 - return; 487 483 } 488 484 489 485 void qib_qsfp_deinit(struct qib_qsfp_data *qd) ··· 542 540 543 541 while (bidx < QSFP_DEFAULT_HDR_CNT) { 544 542 int iidx; 543 + 545 544 ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK); 546 545 if (ret < 0) 547 546 goto bail;
+2 -2
drivers/infiniband/hw/qib/qib_rc.c
··· 1017 1017 /* Post a send completion queue entry if requested. */ 1018 1018 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || 1019 1019 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 1020 - memset(&wc, 0, sizeof wc); 1020 + memset(&wc, 0, sizeof(wc)); 1021 1021 wc.wr_id = wqe->wr.wr_id; 1022 1022 wc.status = IB_WC_SUCCESS; 1023 1023 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; ··· 1073 1073 /* Post a send completion queue entry if requested. */ 1074 1074 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || 1075 1075 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 1076 - memset(&wc, 0, sizeof wc); 1076 + memset(&wc, 0, sizeof(wc)); 1077 1077 wc.wr_id = wqe->wr.wr_id; 1078 1078 wc.status = IB_WC_SUCCESS; 1079 1079 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
+4 -4
drivers/infiniband/hw/qib/qib_ruc.c
··· 247 247 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 248 248 249 249 return ppd->guid; 250 - } else 251 - return ibp->guids[index - 1]; 250 + } 251 + return ibp->guids[index - 1]; 252 252 } 253 253 254 254 static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) ··· 420 420 goto serr; 421 421 } 422 422 423 - memset(&wc, 0, sizeof wc); 423 + memset(&wc, 0, sizeof(wc)); 424 424 send_status = IB_WC_SUCCESS; 425 425 426 426 release = 1; ··· 792 792 status != IB_WC_SUCCESS) { 793 793 struct ib_wc wc; 794 794 795 - memset(&wc, 0, sizeof wc); 795 + memset(&wc, 0, sizeof(wc)); 796 796 wc.wr_id = wqe->wr.wr_id; 797 797 wc.status = status; 798 798 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
+7 -2
drivers/infiniband/hw/qib/qib_sd7220.c
··· 259 259 * it again during startup. 260 260 */ 261 261 u64 val; 262 + 262 263 rst_val &= ~(1ULL); 263 264 qib_write_kreg(dd, kr_hwerrmask, 264 265 dd->cspec->hwerrmask & ··· 591 590 * Both should be clear 592 591 */ 593 592 u64 newval = 0; 593 + 594 594 qib_write_kreg(dd, acc, newval); 595 595 /* First read after write is not trustworthy */ 596 596 pollval = qib_read_kreg32(dd, acc); ··· 603 601 /* Need to claim */ 604 602 u64 pollval; 605 603 u64 newval = EPB_ACC_REQ | oct_sel; 604 + 606 605 qib_write_kreg(dd, acc, newval); 607 606 /* First read after write is not trustworthy */ 608 607 pollval = qib_read_kreg32(dd, acc); ··· 815 812 if (!sofar) { 816 813 /* Only set address at start of chunk */ 817 814 int addrbyte = (addr + sofar) >> 8; 815 + 818 816 transval = csbit | EPB_MADDRH | addrbyte; 819 817 tries = epb_trans(dd, trans, transval, 820 818 &transval); ··· 926 922 * IRQ not set up at this point in init, so we poll. 927 923 */ 928 924 #define IB_SERDES_TRIM_DONE (1ULL << 11) 929 - #define TRIM_TMO (30) 925 + #define TRIM_TMO (15) 930 926 931 927 static int qib_sd_trimdone_poll(struct qib_devdata *dd) 932 928 { ··· 944 940 ret = 1; 945 941 break; 946 942 } 947 - msleep(10); 943 + msleep(20); 948 944 } 949 945 if (trim_tmo >= TRIM_TMO) { 950 946 qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo); ··· 1075 1071 dds_reg_map >>= 4; 1076 1072 for (midx = 0; midx < DDS_ROWS; ++midx) { 1077 1073 u64 __iomem *daddr = taddr + ((midx << 4) + idx); 1074 + 1078 1075 data = dds_init_vals[midx].reg_vals[idx]; 1079 1076 writeq(data, daddr); 1080 1077 mmiowb();
+2 -26
drivers/infiniband/hw/qib/qib_sysfs.c
··· 586 586 container_of(device, struct qib_ibdev, ibdev.dev); 587 587 struct qib_devdata *dd = dd_from_dev(dev); 588 588 589 - buf[sizeof dd->serial] = '\0'; 590 - memcpy(buf, dd->serial, sizeof dd->serial); 589 + buf[sizeof(dd->serial)] = '\0'; 590 + memcpy(buf, dd->serial, sizeof(dd->serial)); 591 591 strcat(buf, "\n"); 592 592 return strlen(buf); 593 593 } ··· 609 609 ret = qib_reset_device(dd->unit); 610 610 bail: 611 611 return ret < 0 ? ret : count; 612 - } 613 - 614 - static ssize_t show_logged_errs(struct device *device, 615 - struct device_attribute *attr, char *buf) 616 - { 617 - struct qib_ibdev *dev = 618 - container_of(device, struct qib_ibdev, ibdev.dev); 619 - struct qib_devdata *dd = dd_from_dev(dev); 620 - int idx, count; 621 - 622 - /* force consistency with actual EEPROM */ 623 - if (qib_update_eeprom_log(dd) != 0) 624 - return -ENXIO; 625 - 626 - count = 0; 627 - for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { 628 - count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c", 629 - dd->eep_st_errs[idx], 630 - idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' '); 631 - } 632 - 633 - return count; 634 612 } 635 613 636 614 /* ··· 657 679 static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL); 658 680 static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL); 659 681 static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); 660 - static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL); 661 682 static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); 662 683 static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL); 663 684 static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); ··· 670 693 &dev_attr_nfreectxts, 671 694 &dev_attr_serial, 672 695 &dev_attr_boardversion, 673 - &dev_attr_logged_errors, 674 696 &dev_attr_tempsense, 675 697 &dev_attr_localbus_info, 676 698 &dev_attr_chip_reset,
+3 -2
drivers/infiniband/hw/qib/qib_twsi.c
··· 105 105 udelay(2); 106 106 else { 107 107 int rise_usec; 108 + 108 109 for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) { 109 110 if (mask & dd->f_gpio_mod(dd, 0, 0, 0)) 110 111 break; ··· 327 326 static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags) 328 327 { 329 328 int ret = 1; 329 + 330 330 if (flags & QIB_TWSI_START) 331 331 start_seq(dd); 332 332 ··· 437 435 int sub_len; 438 436 const u8 *bp = buffer; 439 437 int max_wait_time, i; 440 - int ret; 441 - ret = 1; 438 + int ret = 1; 442 439 443 440 while (len > 0) { 444 441 if (dev == QIB_TWSI_NO_DEV) {
+1
drivers/infiniband/hw/qib/qib_tx.c
··· 180 180 181 181 for (i = 0; i < cnt; i++) { 182 182 int which; 183 + 183 184 if (!test_bit(i, mask)) 184 185 continue; 185 186 /*
+1 -1
drivers/infiniband/hw/qib/qib_ud.c
··· 127 127 * present on the wire. 128 128 */ 129 129 length = swqe->length; 130 - memset(&wc, 0, sizeof wc); 130 + memset(&wc, 0, sizeof(wc)); 131 131 wc.byte_len = length + sizeof(struct ib_grh); 132 132 133 133 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
+4 -4
drivers/infiniband/hw/qib/qib_user_sdma.c
··· 50 50 /* expected size of headers (for dma_pool) */ 51 51 #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64 52 52 /* attempt to drain the queue for 5secs */ 53 - #define QIB_USER_SDMA_DRAIN_TIMEOUT 500 53 + #define QIB_USER_SDMA_DRAIN_TIMEOUT 250 54 54 55 55 /* 56 56 * track how many times a process open this driver. ··· 226 226 sdma_rb_node->refcount++; 227 227 } else { 228 228 int ret; 229 + 229 230 sdma_rb_node = kmalloc(sizeof( 230 231 struct qib_user_sdma_rb_node), GFP_KERNEL); 231 232 if (!sdma_rb_node) ··· 937 936 938 937 if (tiddma) { 939 938 char *tidsm = (char *)pkt + pktsize; 939 + 940 940 cfur = copy_from_user(tidsm, 941 941 iov[idx].iov_base, tidsmsize); 942 942 if (cfur) { ··· 1144 1142 qib_user_sdma_hwqueue_clean(ppd); 1145 1143 qib_user_sdma_queue_clean(ppd, pq); 1146 1144 mutex_unlock(&pq->lock); 1147 - msleep(10); 1145 + msleep(20); 1148 1146 } 1149 1147 1150 1148 if (pq->num_pending || pq->num_sending) { ··· 1318 1316 1319 1317 if (nfree && !list_empty(pktlist)) 1320 1318 goto retry; 1321 - 1322 - return; 1323 1319 } 1324 1320 1325 1321 /* pq->lock must be held, get packets on the wire... */
+9 -6
drivers/infiniband/hw/qib/qib_verbs.c
··· 1342 1342 done: 1343 1343 if (dd->flags & QIB_USE_SPCL_TRIG) { 1344 1344 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; 1345 + 1345 1346 qib_flush_wc(); 1346 1347 __raw_writel(0xaebecede, piobuf_orig + spcl_off); 1347 1348 } ··· 1745 1744 * we allow allocations of more than we report for this value. 1746 1745 */ 1747 1746 1748 - pd = kmalloc(sizeof *pd, GFP_KERNEL); 1747 + pd = kmalloc(sizeof(*pd), GFP_KERNEL); 1749 1748 if (!pd) { 1750 1749 ret = ERR_PTR(-ENOMEM); 1751 1750 goto bail; ··· 1830 1829 goto bail; 1831 1830 } 1832 1831 1833 - ah = kmalloc(sizeof *ah, GFP_ATOMIC); 1832 + ah = kmalloc(sizeof(*ah), GFP_ATOMIC); 1834 1833 if (!ah) { 1835 1834 ret = ERR_PTR(-ENOMEM); 1836 1835 goto bail; ··· 1863 1862 struct ib_ah *ah = ERR_PTR(-EINVAL); 1864 1863 struct qib_qp *qp0; 1865 1864 1866 - memset(&attr, 0, sizeof attr); 1865 + memset(&attr, 0, sizeof(attr)); 1867 1866 attr.dlid = dlid; 1868 1867 attr.port_num = ppd_from_ibp(ibp)->port; 1869 1868 rcu_read_lock(); ··· 1978 1977 struct qib_ucontext *context; 1979 1978 struct ib_ucontext *ret; 1980 1979 1981 - context = kmalloc(sizeof *context, GFP_KERNEL); 1980 + context = kmalloc(sizeof(*context), GFP_KERNEL); 1982 1981 if (!context) { 1983 1982 ret = ERR_PTR(-ENOMEM); 1984 1983 goto bail; ··· 2055 2054 2056 2055 dev->qp_table_size = ib_qib_qp_table_size; 2057 2056 get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd)); 2058 - dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table, 2057 + dev->qp_table = kmalloc_array( 2058 + dev->qp_table_size, 2059 + sizeof(*dev->qp_table), 2059 2060 GFP_KERNEL); 2060 2061 if (!dev->qp_table) { 2061 2062 ret = -ENOMEM; ··· 2125 2122 for (i = 0; i < ppd->sdma_descq_cnt; i++) { 2126 2123 struct qib_verbs_txreq *tx; 2127 2124 2128 - tx = kzalloc(sizeof *tx, GFP_KERNEL); 2125 + tx = kzalloc(sizeof(*tx), GFP_KERNEL); 2129 2126 if (!tx) { 2130 2127 ret = -ENOMEM; 2131 2128 goto err_tx;
+2 -2
drivers/infiniband/hw/qib/qib_verbs_mcast.c
··· 43 43 { 44 44 struct qib_mcast_qp *mqp; 45 45 46 - mqp = kmalloc(sizeof *mqp, GFP_KERNEL); 46 + mqp = kmalloc(sizeof(*mqp), GFP_KERNEL); 47 47 if (!mqp) 48 48 goto bail; 49 49 ··· 75 75 { 76 76 struct qib_mcast *mcast; 77 77 78 - mcast = kmalloc(sizeof *mcast, GFP_KERNEL); 78 + mcast = kmalloc(sizeof(*mcast), GFP_KERNEL); 79 79 if (!mcast) 80 80 goto bail; 81 81
+4 -3
drivers/infiniband/hw/qib/qib_wc_x86_64.c
··· 72 72 if (dd->piobcnt2k && dd->piobcnt4k) { 73 73 /* 2 sizes for chip */ 74 74 unsigned long pio2kbase, pio4kbase; 75 + 75 76 pio2kbase = dd->piobufbase & 0xffffffffUL; 76 77 pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL; 77 78 if (pio2kbase < pio4kbase) { ··· 92 91 } 93 92 94 93 for (bits = 0; !(piolen & (1ULL << bits)); bits++) 95 - /* do nothing */ ; 94 + ; /* do nothing */ 96 95 97 96 if (piolen != (1ULL << bits)) { 98 97 piolen >>= bits; ··· 101 100 piolen = 1ULL << (bits + 1); 102 101 } 103 102 if (pioaddr & (piolen - 1)) { 104 - u64 atmp; 105 - atmp = pioaddr & ~(piolen - 1); 103 + u64 atmp = pioaddr & ~(piolen - 1); 104 + 106 105 if (atmp < addr || (atmp + piolen) > (addr + len)) { 107 106 qib_dev_err(dd, 108 107 "No way to align address/size (%llx/%llx), no WC mtrr\n",
+3 -1
drivers/infiniband/ulp/iser/iscsi_iser.h
··· 654 654 enum dma_data_direction dma_dir); 655 655 656 656 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, 657 - struct iser_data_buf *data); 657 + struct iser_data_buf *data, 658 + enum dma_data_direction dir); 659 + 658 660 int iser_initialize_task_headers(struct iscsi_task *task, 659 661 struct iser_tx_desc *tx_desc); 660 662 int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
+8 -8
drivers/infiniband/ulp/iser/iser_initiator.c
··· 320 320 struct ib_conn *ib_conn = &iser_conn->ib_conn; 321 321 struct iser_device *device = ib_conn->device; 322 322 323 - if (!iser_conn->rx_descs) 324 - goto free_login_buf; 325 - 326 323 if (device->iser_free_rdma_reg_res) 327 324 device->iser_free_rdma_reg_res(ib_conn); 328 325 ··· 331 334 /* make sure we never redo any unmapping */ 332 335 iser_conn->rx_descs = NULL; 333 336 334 - free_login_buf: 335 337 iser_free_login_buf(iser_conn); 336 338 } 337 339 ··· 710 714 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); 711 715 if (is_rdma_data_aligned) 712 716 iser_dma_unmap_task_data(iser_task, 713 - &iser_task->data[ISER_DIR_IN]); 717 + &iser_task->data[ISER_DIR_IN], 718 + DMA_FROM_DEVICE); 714 719 if (prot_count && is_rdma_prot_aligned) 715 720 iser_dma_unmap_task_data(iser_task, 716 - &iser_task->prot[ISER_DIR_IN]); 721 + &iser_task->prot[ISER_DIR_IN], 722 + DMA_FROM_DEVICE); 717 723 } 718 724 719 725 if (iser_task->dir[ISER_DIR_OUT]) { 720 726 device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT); 721 727 if (is_rdma_data_aligned) 722 728 iser_dma_unmap_task_data(iser_task, 723 - &iser_task->data[ISER_DIR_OUT]); 729 + &iser_task->data[ISER_DIR_OUT], 730 + DMA_TO_DEVICE); 724 731 if (prot_count && is_rdma_prot_aligned) 725 732 iser_dma_unmap_task_data(iser_task, 726 - &iser_task->prot[ISER_DIR_OUT]); 733 + &iser_task->prot[ISER_DIR_OUT], 734 + DMA_TO_DEVICE); 727 735 } 728 736 }
+6 -3
drivers/infiniband/ulp/iser/iser_memory.c
··· 332 332 } 333 333 334 334 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, 335 - struct iser_data_buf *data) 335 + struct iser_data_buf *data, 336 + enum dma_data_direction dir) 336 337 { 337 338 struct ib_device *dev; 338 339 339 340 dev = iser_task->iser_conn->ib_conn.device->ib_device; 340 - ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); 341 + ib_dma_unmap_sg(dev, data->buf, data->size, dir); 341 342 } 342 343 343 344 static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, ··· 358 357 iser_data_buf_dump(mem, ibdev); 359 358 360 359 /* unmap the command data before accessing it */ 361 - iser_dma_unmap_task_data(iser_task, mem); 360 + iser_dma_unmap_task_data(iser_task, mem, 361 + (cmd_dir == ISER_DIR_OUT) ? 362 + DMA_TO_DEVICE : DMA_FROM_DEVICE); 362 363 363 364 /* allocate copy buf, if we are writing, copy the */ 364 365 /* unaligned scatterlist, dma map the copy */
+16 -11
drivers/infiniband/ulp/iser/iser_verbs.c
··· 600 600 /** 601 601 * iser_free_ib_conn_res - release IB related resources 602 602 * @iser_conn: iser connection struct 603 - * @destroy_device: indicator if we need to try to release 604 - * the iser device (only iscsi shutdown and DEVICE_REMOVAL 605 - * will use this. 603 + * @destroy: indicator if we need to try to release the 604 + * iser device and memory regoins pool (only iscsi 605 + * shutdown and DEVICE_REMOVAL will use this). 606 606 * 607 607 * This routine is called with the iser state mutex held 608 608 * so the cm_id removal is out of here. It is Safe to 609 609 * be invoked multiple times. 610 610 */ 611 611 static void iser_free_ib_conn_res(struct iser_conn *iser_conn, 612 - bool destroy_device) 612 + bool destroy) 613 613 { 614 614 struct ib_conn *ib_conn = &iser_conn->ib_conn; 615 615 struct iser_device *device = ib_conn->device; ··· 617 617 iser_info("freeing conn %p cma_id %p qp %p\n", 618 618 iser_conn, ib_conn->cma_id, ib_conn->qp); 619 619 620 - iser_free_rx_descriptors(iser_conn); 621 - 622 620 if (ib_conn->qp != NULL) { 623 621 ib_conn->comp->active_qps--; 624 622 rdma_destroy_qp(ib_conn->cma_id); 625 623 ib_conn->qp = NULL; 626 624 } 627 625 628 - if (destroy_device && device != NULL) { 629 - iser_device_try_release(device); 630 - ib_conn->device = NULL; 626 + if (destroy) { 627 + if (iser_conn->rx_descs) 628 + iser_free_rx_descriptors(iser_conn); 629 + 630 + if (device != NULL) { 631 + iser_device_try_release(device); 632 + ib_conn->device = NULL; 633 + } 631 634 } 632 635 } 633 636 ··· 646 643 mutex_unlock(&ig.connlist_mutex); 647 644 648 645 mutex_lock(&iser_conn->state_mutex); 646 + /* In case we endup here without ep_disconnect being invoked. */ 649 647 if (iser_conn->state != ISER_CONN_DOWN) { 650 648 iser_warn("iser conn %p state %d, expected state down.\n", 651 649 iser_conn, iser_conn->state); 650 + iscsi_destroy_endpoint(iser_conn->ep); 652 651 iser_conn->state = ISER_CONN_DOWN; 653 652 } 654 653 /* ··· 845 840 } 846 841 847 842 static void iser_cleanup_handler(struct rdma_cm_id *cma_id, 848 - bool destroy_device) 843 + bool destroy) 849 844 { 850 845 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; 851 846 ··· 855 850 * and flush errors. 856 851 */ 857 852 iser_disconnected_handler(cma_id); 858 - iser_free_ib_conn_res(iser_conn, destroy_device); 853 + iser_free_ib_conn_res(iser_conn, destroy); 859 854 complete(&iser_conn->ib_completion); 860 855 }; 861 856
+23
include/uapi/rdma/ib_user_verbs.h
··· 90 90 }; 91 91 92 92 enum { 93 + IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE, 93 94 IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, 94 95 IB_USER_VERBS_EX_CMD_DESTROY_FLOW, 95 96 }; ··· 200 199 __u8 local_ca_ack_delay; 201 200 __u8 phys_port_cnt; 202 201 __u8 reserved[4]; 202 + }; 203 + 204 + struct ib_uverbs_ex_query_device { 205 + __u32 comp_mask; 206 + __u32 reserved; 207 + }; 208 + 209 + struct ib_uverbs_odp_caps { 210 + __u64 general_caps; 211 + struct { 212 + __u32 rc_odp_caps; 213 + __u32 uc_odp_caps; 214 + __u32 ud_odp_caps; 215 + } per_transport_caps; 216 + __u32 reserved; 217 + }; 218 + 219 + struct ib_uverbs_ex_query_device_resp { 220 + struct ib_uverbs_query_device_resp base; 221 + __u32 comp_mask; 222 + __u32 response_length; 223 + struct ib_uverbs_odp_caps odp_caps; 203 224 }; 204 225 205 226 struct ib_uverbs_query_port {