Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'misc', 'qedr', 'reject-helpers', 'rxe' and 'srp' into merge-test

+457 -162
+1 -5
MAINTAINERS
··· 6376 6376 6377 6377 INTEL RDMA RNIC DRIVER 6378 6378 M: Faisal Latif <faisal.latif@intel.com> 6379 - R: Chien Tin Tung <chien.tin.tung@intel.com> 6380 - R: Mustafa Ismail <mustafa.ismail@intel.com> 6381 - R: Shiraz Saleem <shiraz.saleem@intel.com> 6382 - R: Tatyana Nikolova <tatyana.e.nikolova@intel.com> 6379 + M: Shiraz Saleem <shiraz.saleem@intel.com> 6383 6380 L: linux-rdma@vger.kernel.org 6384 6381 S: Supported 6385 6382 F: drivers/infiniband/hw/i40iw/ ··· 10898 10901 EMULEX ONECONNECT ROCE DRIVER 10899 10902 M: Selvin Xavier <selvin.xavier@avagotech.com> 10900 10903 M: Devesh Sharma <devesh.sharma@avagotech.com> 10901 - M: Mitesh Ahuja <mitesh.ahuja@avagotech.com> 10902 10904 L: linux-rdma@vger.kernel.org 10903 10905 W: http://www.emulex.com 10904 10906 S: Supported
+71 -1
drivers/infiniband/core/cm.c
··· 57 57 MODULE_DESCRIPTION("InfiniBand CM"); 58 58 MODULE_LICENSE("Dual BSD/GPL"); 59 59 60 + static const char * const ibcm_rej_reason_strs[] = { 61 + [IB_CM_REJ_NO_QP] = "no QP", 62 + [IB_CM_REJ_NO_EEC] = "no EEC", 63 + [IB_CM_REJ_NO_RESOURCES] = "no resources", 64 + [IB_CM_REJ_TIMEOUT] = "timeout", 65 + [IB_CM_REJ_UNSUPPORTED] = "unsupported", 66 + [IB_CM_REJ_INVALID_COMM_ID] = "invalid comm ID", 67 + [IB_CM_REJ_INVALID_COMM_INSTANCE] = "invalid comm instance", 68 + [IB_CM_REJ_INVALID_SERVICE_ID] = "invalid service ID", 69 + [IB_CM_REJ_INVALID_TRANSPORT_TYPE] = "invalid transport type", 70 + [IB_CM_REJ_STALE_CONN] = "stale conn", 71 + [IB_CM_REJ_RDC_NOT_EXIST] = "RDC not exist", 72 + [IB_CM_REJ_INVALID_GID] = "invalid GID", 73 + [IB_CM_REJ_INVALID_LID] = "invalid LID", 74 + [IB_CM_REJ_INVALID_SL] = "invalid SL", 75 + [IB_CM_REJ_INVALID_TRAFFIC_CLASS] = "invalid traffic class", 76 + [IB_CM_REJ_INVALID_HOP_LIMIT] = "invalid hop limit", 77 + [IB_CM_REJ_INVALID_PACKET_RATE] = "invalid packet rate", 78 + [IB_CM_REJ_INVALID_ALT_GID] = "invalid alt GID", 79 + [IB_CM_REJ_INVALID_ALT_LID] = "invalid alt LID", 80 + [IB_CM_REJ_INVALID_ALT_SL] = "invalid alt SL", 81 + [IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS] = "invalid alt traffic class", 82 + [IB_CM_REJ_INVALID_ALT_HOP_LIMIT] = "invalid alt hop limit", 83 + [IB_CM_REJ_INVALID_ALT_PACKET_RATE] = "invalid alt packet rate", 84 + [IB_CM_REJ_PORT_CM_REDIRECT] = "port CM redirect", 85 + [IB_CM_REJ_PORT_REDIRECT] = "port redirect", 86 + [IB_CM_REJ_INVALID_MTU] = "invalid MTU", 87 + [IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES] = "insufficient resp resources", 88 + [IB_CM_REJ_CONSUMER_DEFINED] = "consumer defined", 89 + [IB_CM_REJ_INVALID_RNR_RETRY] = "invalid RNR retry", 90 + [IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID] = "duplicate local comm ID", 91 + [IB_CM_REJ_INVALID_CLASS_VERSION] = "invalid class version", 92 + [IB_CM_REJ_INVALID_FLOW_LABEL] = "invalid flow label", 93 + [IB_CM_REJ_INVALID_ALT_FLOW_LABEL] = "invalid alt flow label", 94 + }; 95 + 96 + const char *__attribute_const__ ibcm_reject_msg(int reason) 97 + { 98 + size_t index = reason; 99 + 100 + if (index < ARRAY_SIZE(ibcm_rej_reason_strs) && 101 + ibcm_rej_reason_strs[index]) 102 + return ibcm_rej_reason_strs[index]; 103 + else 104 + return "unrecognized reason"; 105 + } 106 + EXPORT_SYMBOL(ibcm_reject_msg); 107 + 60 108 static void cm_add_one(struct ib_device *device); 61 109 static void cm_remove_one(struct ib_device *device, void *client_data); 62 110 ··· 1567 1519 struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv; 1568 1520 struct cm_timewait_info *timewait_info; 1569 1521 struct cm_req_msg *req_msg; 1522 + struct ib_cm_id *cm_id; 1570 1523 1571 1524 req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad; 1572 1525 ··· 1589 1540 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); 1590 1541 if (timewait_info) { 1591 1542 cm_cleanup_timewait(cm_id_priv->timewait_info); 1543 + cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, 1544 + timewait_info->work.remote_id); 1545 + 1592 1546 spin_unlock_irq(&cm.lock); 1593 1547 cm_issue_rej(work->port, work->mad_recv_wc, 1594 1548 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ, 1595 1549 NULL, 0); 1550 + if (cur_cm_id_priv) { 1551 + cm_id = &cur_cm_id_priv->id; 1552 + ib_send_cm_dreq(cm_id, NULL, 0); 1553 + cm_deref_id(cur_cm_id_priv); 1554 + } 1596 1555 return NULL; 1597 1556 } 1598 1557 ··· 1976 1919 struct cm_id_private *cm_id_priv; 1977 1920 struct cm_rep_msg *rep_msg; 1978 1921 int ret; 1922 + struct cm_id_private *cur_cm_id_priv; 1923 + struct ib_cm_id *cm_id; 1924 + struct cm_timewait_info *timewait_info; 1979 1925 1980 1926 rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad; 1981 1927 cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0); ··· 2013 1953 goto error; 2014 1954 } 2015 1955 /* Check for a stale connection. */ 2016 - if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) { 1956 + timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info); 1957 + if (timewait_info) { 2017 1958 rb_erase(&cm_id_priv->timewait_info->remote_id_node, 2018 1959 &cm.remote_id_table); 2019 1960 cm_id_priv->timewait_info->inserted_remote_id = 0; 1961 + cur_cm_id_priv = cm_get_id(timewait_info->work.local_id, 1962 + timewait_info->work.remote_id); 1963 + 2020 1964 spin_unlock(&cm.lock); 2021 1965 spin_unlock_irq(&cm_id_priv->lock); 2022 1966 cm_issue_rej(work->port, work->mad_recv_wc, 2023 1967 IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP, 2024 1968 NULL, 0); 2025 1969 ret = -EINVAL; 1970 + if (cur_cm_id_priv) { 1971 + cm_id = &cur_cm_id_priv->id; 1972 + ib_send_cm_dreq(cm_id, NULL, 0); 1973 + cm_deref_id(cur_cm_id_priv); 1974 + } 1975 + 2026 1976 goto error; 2027 1977 } 2028 1978 spin_unlock(&cm.lock);
+43
drivers/infiniband/core/cma.c
··· 101 101 } 102 102 EXPORT_SYMBOL(rdma_event_msg); 103 103 104 + const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id, 105 + int reason) 106 + { 107 + if (rdma_ib_or_roce(id->device, id->port_num)) 108 + return ibcm_reject_msg(reason); 109 + 110 + if (rdma_protocol_iwarp(id->device, id->port_num)) 111 + return iwcm_reject_msg(reason); 112 + 113 + WARN_ON_ONCE(1); 114 + return "unrecognized transport"; 115 + } 116 + EXPORT_SYMBOL(rdma_reject_msg); 117 + 118 + bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason) 119 + { 120 + if (rdma_ib_or_roce(id->device, id->port_num)) 121 + return reason == IB_CM_REJ_CONSUMER_DEFINED; 122 + 123 + if (rdma_protocol_iwarp(id->device, id->port_num)) 124 + return reason == -ECONNREFUSED; 125 + 126 + WARN_ON_ONCE(1); 127 + return false; 128 + } 129 + EXPORT_SYMBOL(rdma_is_consumer_reject); 130 + 131 + const void *rdma_consumer_reject_data(struct rdma_cm_id *id, 132 + struct rdma_cm_event *ev, u8 *data_len) 133 + { 134 + const void *p; 135 + 136 + if (rdma_is_consumer_reject(id, ev->status)) { 137 + *data_len = ev->param.conn.private_data_len; 138 + p = ev->param.conn.private_data; 139 + } else { 140 + *data_len = 0; 141 + p = NULL; 142 + } 143 + return p; 144 + } 145 + EXPORT_SYMBOL(rdma_consumer_reject_data); 146 + 104 147 static void cma_add_one(struct ib_device *device); 105 148 static void cma_remove_one(struct ib_device *device, void *client_data); 106 149
+21
drivers/infiniband/core/iwcm.c
··· 59 59 MODULE_DESCRIPTION("iWARP CM"); 60 60 MODULE_LICENSE("Dual BSD/GPL"); 61 61 62 + static const char * const iwcm_rej_reason_strs[] = { 63 + [ECONNRESET] = "reset by remote host", 64 + [ECONNREFUSED] = "refused by remote application", 65 + [ETIMEDOUT] = "setup timeout", 66 + }; 67 + 68 + const char *__attribute_const__ iwcm_reject_msg(int reason) 69 + { 70 + size_t index; 71 + 72 + /* iWARP uses negative errnos */ 73 + index = -reason; 74 + 75 + if (index < ARRAY_SIZE(iwcm_rej_reason_strs) && 76 + iwcm_rej_reason_strs[index]) 77 + return iwcm_rej_reason_strs[index]; 78 + else 79 + return "unrecognized reason"; 80 + } 81 + EXPORT_SYMBOL(iwcm_reject_msg); 82 + 62 83 static struct ibnl_client_cbs iwcm_nl_cb_table[] = { 63 84 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, 64 85 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
+3 -3
drivers/infiniband/core/mad.c
··· 769 769 * If we are at the start of the LID routed part, don't update the 770 770 * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec. 771 771 */ 772 - if (opa && smp->class_version == OPA_SMP_CLASS_VERSION) { 772 + if (opa && smp->class_version == OPA_SM_CLASS_VERSION) { 773 773 u32 opa_drslid; 774 774 775 775 if ((opa_get_smp_direction(opa_smp) ··· 1728 1728 if (!class) 1729 1729 goto out; 1730 1730 if (convert_mgmt_class(mad_hdr->mgmt_class) >= 1731 - IB_MGMT_MAX_METHODS) 1731 + ARRAY_SIZE(class->method_table)) 1732 1732 goto out; 1733 1733 method = class->method_table[convert_mgmt_class( 1734 1734 mad_hdr->mgmt_class)]; ··· 2149 2149 struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad; 2150 2150 2151 2151 if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION && 2152 - mad_hdr->class_version == OPA_SMI_CLASS_VERSION) 2152 + mad_hdr->class_version == OPA_SM_CLASS_VERSION) 2153 2153 return handle_opa_smi(port_priv, qp_info, wc, port_num, recv, 2154 2154 response); 2155 2155
+5 -2
drivers/infiniband/core/multicast.c
··· 518 518 process_join_error(group, status); 519 519 else { 520 520 int mgids_changed, is_mgid0; 521 - ib_find_pkey(group->port->dev->device, group->port->port_num, 522 - be16_to_cpu(rec->pkey), &pkey_index); 521 + 522 + if (ib_find_pkey(group->port->dev->device, 523 + group->port->port_num, be16_to_cpu(rec->pkey), 524 + &pkey_index)) 525 + pkey_index = MCAST_INVALID_PKEY_INDEX; 523 526 524 527 spin_lock_irq(&group->port->lock); 525 528 if (group->state == MCAST_BUSY &&
+1 -1
drivers/infiniband/core/umem.c
··· 51 51 52 52 if (umem->nmap > 0) 53 53 ib_dma_unmap_sg(dev, umem->sg_head.sgl, 54 - umem->nmap, 54 + umem->npages, 55 55 DMA_BIDIRECTIONAL); 56 56 57 57 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
+6 -6
drivers/infiniband/hw/hfi1/mad.c
··· 128 128 smp = send_buf->mad; 129 129 smp->base_version = OPA_MGMT_BASE_VERSION; 130 130 smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; 131 - smp->class_version = OPA_SMI_CLASS_VERSION; 131 + smp->class_version = OPA_SM_CLASS_VERSION; 132 132 smp->method = IB_MGMT_METHOD_TRAP; 133 133 ibp->rvp.tid++; 134 134 smp->tid = cpu_to_be64(ibp->rvp.tid); ··· 344 344 345 345 ni->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX); 346 346 ni->base_version = OPA_MGMT_BASE_VERSION; 347 - ni->class_version = OPA_SMI_CLASS_VERSION; 347 + ni->class_version = OPA_SM_CLASS_VERSION; 348 348 ni->node_type = 1; /* channel adapter */ 349 349 ni->num_ports = ibdev->phys_port_cnt; 350 350 /* This is already in network order */ ··· 381 381 382 382 nip->port_guid = get_sguid(to_iport(ibdev, port), HFI1_PORT_GUID_INDEX); 383 383 nip->base_version = OPA_MGMT_BASE_VERSION; 384 - nip->class_version = OPA_SMI_CLASS_VERSION; 384 + nip->class_version = OPA_SM_CLASS_VERSION; 385 385 nip->node_type = 1; /* channel adapter */ 386 386 nip->num_ports = ibdev->phys_port_cnt; 387 387 /* This is already in network order */ ··· 2303 2303 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; 2304 2304 2305 2305 p->base_version = OPA_MGMT_BASE_VERSION; 2306 - p->class_version = OPA_SMI_CLASS_VERSION; 2306 + p->class_version = OPA_SM_CLASS_VERSION; 2307 2307 /* 2308 2308 * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec. 2309 2309 */ ··· 4023 4023 4024 4024 am = be32_to_cpu(smp->attr_mod); 4025 4025 attr_id = smp->attr_id; 4026 - if (smp->class_version != OPA_SMI_CLASS_VERSION) { 4026 + if (smp->class_version != OPA_SM_CLASS_VERSION) { 4027 4027 smp->status |= IB_SMP_UNSUP_VERSION; 4028 4028 ret = reply((struct ib_mad_hdr *)smp); 4029 4029 return ret; ··· 4233 4233 4234 4234 *out_mad = *in_mad; 4235 4235 4236 - if (pmp->mad_hdr.class_version != OPA_SMI_CLASS_VERSION) { 4236 + if (pmp->mad_hdr.class_version != OPA_SM_CLASS_VERSION) { 4237 4237 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION; 4238 4238 return reply((struct ib_mad_hdr *)pmp); 4239 4239 }
+2 -1
drivers/infiniband/hw/mlx4/main.c
··· 430 430 struct mlx4_ib_dev *dev = to_mdev(ibdev); 431 431 struct ib_smp *in_mad = NULL; 432 432 struct ib_smp *out_mad = NULL; 433 - int err = -ENOMEM; 433 + int err; 434 434 int have_ib_ports; 435 435 struct mlx4_uverbs_ex_query_device cmd; 436 436 struct mlx4_uverbs_ex_query_device_resp resp = {.comp_mask = 0}; ··· 455 455 sizeof(resp.response_length); 456 456 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 457 457 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 458 + err = -ENOMEM; 458 459 if (!in_mad || !out_mad) 459 460 goto out; 460 461
+48 -36
drivers/infiniband/hw/nes/nes_nic.c
··· 662 662 nesnic->sq_head &= nesnic->sq_size-1; 663 663 } 664 664 } else { 665 - nesvnic->linearized_skbs++; 666 665 hoffset = skb_transport_header(skb) - skb->data; 667 666 nhoffset = skb_network_header(skb) - skb->data; 668 - skb_linearize(skb); 667 + if (skb_linearize(skb)) { 668 + nesvnic->tx_sw_dropped++; 669 + kfree_skb(skb); 670 + return NETDEV_TX_OK; 671 + } 672 + nesvnic->linearized_skbs++; 669 673 skb_set_transport_header(skb, hoffset); 670 674 skb_set_network_header(skb, nhoffset); 671 675 if (!nes_nic_send(skb, netdev)) ··· 1469 1465 /** 1470 1466 * nes_netdev_get_settings 1471 1467 */ 1472 - static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd) 1468 + static int nes_netdev_get_link_ksettings(struct net_device *netdev, 1469 + struct ethtool_link_ksettings *cmd) 1473 1470 { 1474 1471 struct nes_vnic *nesvnic = netdev_priv(netdev); 1475 1472 struct nes_device *nesdev = nesvnic->nesdev; ··· 1479 1474 u8 phy_type = nesadapter->phy_type[mac_index]; 1480 1475 u8 phy_index = nesadapter->phy_index[mac_index]; 1481 1476 u16 phy_data; 1477 + u32 supported, advertising; 1482 1478 1483 - et_cmd->duplex = DUPLEX_FULL; 1484 - et_cmd->port = PORT_MII; 1485 - et_cmd->maxtxpkt = 511; 1486 - et_cmd->maxrxpkt = 511; 1479 + cmd->base.duplex = DUPLEX_FULL; 1480 + cmd->base.port = PORT_MII; 1487 1481 1488 1482 if (nesadapter->OneG_Mode) { 1489 - ethtool_cmd_speed_set(et_cmd, SPEED_1000); 1483 + cmd->base.speed = SPEED_1000; 1490 1484 if (phy_type == NES_PHY_TYPE_PUMA_1G) { 1491 - et_cmd->supported = SUPPORTED_1000baseT_Full; 1492 - et_cmd->advertising = ADVERTISED_1000baseT_Full; 1493 - et_cmd->autoneg = AUTONEG_DISABLE; 1494 - et_cmd->transceiver = XCVR_INTERNAL; 1495 - et_cmd->phy_address = mac_index; 1485 + supported = SUPPORTED_1000baseT_Full; 1486 + advertising = ADVERTISED_1000baseT_Full; 1487 + cmd->base.autoneg = AUTONEG_DISABLE; 1488 + cmd->base.phy_address = mac_index; 1496 1489 } else { 1497 1490 unsigned long flags; 1498 - et_cmd->supported = SUPPORTED_1000baseT_Full 1499 - | SUPPORTED_Autoneg; 1500 - et_cmd->advertising = ADVERTISED_1000baseT_Full 1501 - | ADVERTISED_Autoneg; 1491 + 1492 + supported = SUPPORTED_1000baseT_Full 1493 + | SUPPORTED_Autoneg; 1494 + advertising = ADVERTISED_1000baseT_Full 1495 + | ADVERTISED_Autoneg; 1502 1496 spin_lock_irqsave(&nesadapter->phy_lock, flags); 1503 1497 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); 1504 1498 spin_unlock_irqrestore(&nesadapter->phy_lock, flags); 1505 1499 if (phy_data & 0x1000) 1506 - et_cmd->autoneg = AUTONEG_ENABLE; 1500 + cmd->base.autoneg = AUTONEG_ENABLE; 1507 1501 else 1508 - et_cmd->autoneg = AUTONEG_DISABLE; 1509 - et_cmd->transceiver = XCVR_EXTERNAL; 1510 - et_cmd->phy_address = phy_index; 1502 + cmd->base.autoneg = AUTONEG_DISABLE; 1503 + cmd->base.phy_address = phy_index; 1511 1504 } 1505 + ethtool_convert_legacy_u32_to_link_mode( 1506 + cmd->link_modes.supported, supported); 1507 + ethtool_convert_legacy_u32_to_link_mode( 1508 + cmd->link_modes.advertising, advertising); 1512 1509 return 0; 1513 1510 } 1514 1511 if ((phy_type == NES_PHY_TYPE_ARGUS) || 1515 1512 (phy_type == NES_PHY_TYPE_SFP_D) || 1516 1513 (phy_type == NES_PHY_TYPE_KR)) { 1517 - et_cmd->transceiver = XCVR_EXTERNAL; 1518 - et_cmd->port = PORT_FIBRE; 1519 - et_cmd->supported = SUPPORTED_FIBRE; 1520 - et_cmd->advertising = ADVERTISED_FIBRE; 1521 - et_cmd->phy_address = phy_index; 1514 + cmd->base.port = PORT_FIBRE; 1515 + supported = SUPPORTED_FIBRE; 1516 + advertising = ADVERTISED_FIBRE; 1517 + cmd->base.phy_address = phy_index; 1522 1518 } else { 1523 - et_cmd->transceiver = XCVR_INTERNAL; 1524 - et_cmd->supported = SUPPORTED_10000baseT_Full; 1525 - et_cmd->advertising = ADVERTISED_10000baseT_Full; 1526 - et_cmd->phy_address = mac_index; 1519 + supported = SUPPORTED_10000baseT_Full; 1520 + advertising = ADVERTISED_10000baseT_Full; 1521 + cmd->base.phy_address = mac_index; 1527 1522 } 1528 - ethtool_cmd_speed_set(et_cmd, SPEED_10000); 1529 - et_cmd->autoneg = AUTONEG_DISABLE; 1523 + cmd->base.speed = SPEED_10000; 1524 + cmd->base.autoneg = AUTONEG_DISABLE; 1525 + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, 1526 + supported); 1527 + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, 1528 + advertising); 1529 + 1530 1530 return 0; 1531 1531 } 1532 1532 ··· 1539 1529 /** 1540 1530 * nes_netdev_set_settings 1541 1531 */ 1542 - static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd *et_cmd) 1532 + static int 1533 + nes_netdev_set_link_ksettings(struct net_device *netdev, 1534 + const struct ethtool_link_ksettings *cmd) 1543 1535 { 1544 1536 struct nes_vnic *nesvnic = netdev_priv(netdev); 1545 1537 struct nes_device *nesdev = nesvnic->nesdev; ··· 1555 1543 1556 1544 spin_lock_irqsave(&nesadapter->phy_lock, flags); 1557 1545 nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); 1558 - if (et_cmd->autoneg) { 1546 + if (cmd->base.autoneg) { 1559 1547 /* Turn on Full duplex, Autoneg, and restart autonegotiation */ 1560 1548 phy_data |= 0x1300; 1561 1549 } else { ··· 1572 1560 1573 1561 static const struct ethtool_ops nes_ethtool_ops = { 1574 1562 .get_link = ethtool_op_get_link, 1575 - .get_settings = nes_netdev_get_settings, 1576 - .set_settings = nes_netdev_set_settings, 1577 1563 .get_strings = nes_netdev_get_strings, 1578 1564 .get_sset_count = nes_netdev_get_sset_count, 1579 1565 .get_ethtool_stats = nes_netdev_get_ethtool_stats, ··· 1580 1570 .set_coalesce = nes_netdev_set_coalesce, 1581 1571 .get_pauseparam = nes_netdev_get_pauseparam, 1582 1572 .set_pauseparam = nes_netdev_set_pauseparam, 1573 + .get_link_ksettings = nes_netdev_get_link_ksettings, 1574 + .set_link_ksettings = nes_netdev_set_link_ksettings, 1583 1575 }; 1584 1576 1585 1577 static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, netdev_features_t features)
+1 -1
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
··· 1641 1641 static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev) 1642 1642 { 1643 1643 int i; 1644 - int status = 0; 1644 + int status = -ENOMEM; 1645 1645 int max_ah; 1646 1646 struct ocrdma_create_ah_tbl *cmd; 1647 1647 struct ocrdma_create_ah_tbl_rsp *rsp;
+12 -12
drivers/infiniband/hw/qedr/verbs.c
··· 511 511 struct qedr_dev *dev = get_qedr_dev(ibpd->device); 512 512 struct qedr_pd *pd = get_qedr_pd(ibpd); 513 513 514 - if (!pd) 514 + if (!pd) { 515 515 pr_err("Invalid PD received in dealloc_pd\n"); 516 + return -EINVAL; 517 + } 516 518 517 519 DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id); 518 520 dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id); ··· 1479 1477 struct qedr_ucontext *ctx = NULL; 1480 1478 struct qedr_create_qp_ureq ureq; 1481 1479 struct qedr_qp *qp; 1480 + struct ib_qp *ibqp; 1482 1481 int rc = 0; 1483 1482 1484 1483 DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n", ··· 1489 1486 if (rc) 1490 1487 return ERR_PTR(rc); 1491 1488 1489 + if (attrs->srq) 1490 + return ERR_PTR(-EINVAL); 1491 + 1492 1492 qp = kzalloc(sizeof(*qp), GFP_KERNEL); 1493 1493 if (!qp) 1494 1494 return ERR_PTR(-ENOMEM); 1495 - 1496 - if (attrs->srq) 1497 - return ERR_PTR(-EINVAL); 1498 1495 1499 1496 DP_DEBUG(dev, QEDR_MSG_QP, 1500 1497 "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n", ··· 1511 1508 "create qp: unexpected udata when creating GSI QP\n"); 1512 1509 goto err0; 1513 1510 } 1514 - return qedr_create_gsi_qp(dev, attrs, qp); 1511 + ibqp = qedr_create_gsi_qp(dev, attrs, qp); 1512 + if (IS_ERR(ibqp)) 1513 + kfree(qp); 1514 + return ibqp; 1515 1515 } 1516 1516 1517 1517 memset(&in_params, 0, sizeof(in_params)); ··· 2420 2414 */ 2421 2415 pbl = list_first_entry(&info->inuse_pbl_list, 2422 2416 struct qedr_pbl, list_entry); 2423 - list_del(&pbl->list_entry); 2424 - list_add_tail(&pbl->list_entry, &info->free_pbl_list); 2417 + list_move_tail(&pbl->list_entry, &info->free_pbl_list); 2425 2418 info->completed_handled++; 2426 2419 } 2427 2420 } ··· 2984 2979 DP_DEBUG(dev, QEDR_MSG_CQ, 2985 2980 "QP in wrong state! QP icid=0x%x state %d\n", 2986 2981 qp->icid, qp->state); 2987 - return -EINVAL; 2988 - } 2989 - 2990 - if (!wr) { 2991 - DP_ERR(dev, "Got an empty post send.\n"); 2992 2982 return -EINVAL; 2993 2983 } 2994 2984
+6 -6
drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
··· 117 117 vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic); 118 118 119 119 res_chunk = get_qp_res_chunk(qp_grp); 120 - if (IS_ERR_OR_NULL(res_chunk)) { 120 + if (IS_ERR(res_chunk)) { 121 121 usnic_err("Unable to get qp res with err %ld\n", 122 122 PTR_ERR(res_chunk)); 123 - return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM; 123 + return PTR_ERR(res_chunk); 124 124 } 125 125 126 126 for (i = 0; i < res_chunk->cnt; i++) { ··· 158 158 vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic); 159 159 160 160 res_chunk = get_qp_res_chunk(qp_grp); 161 - if (IS_ERR_OR_NULL(res_chunk)) { 161 + if (IS_ERR(res_chunk)) { 162 162 usnic_err("Unable to get qp res with err %ld\n", 163 163 PTR_ERR(res_chunk)); 164 - return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM; 164 + return PTR_ERR(res_chunk); 165 165 } 166 166 167 167 for (i = 0; i < res_chunk->cnt; i++) { ··· 186 186 struct usnic_vnic_res_chunk *res_chunk; 187 187 188 188 res_chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ); 189 - if (IS_ERR_OR_NULL(res_chunk)) { 189 + if (IS_ERR(res_chunk)) { 190 190 usnic_err("Unable to get %s with err %ld\n", 191 191 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ), 192 192 PTR_ERR(res_chunk)); 193 - return res_chunk ? PTR_ERR(res_chunk) : -ENOMEM; 193 + return PTR_ERR(res_chunk); 194 194 } 195 195 196 196 uaction->vnic_idx = usnic_vnic_get_index(qp_grp->vf->vnic);
+6 -6
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
··· 87 87 resp.bar_len = bar->len; 88 88 89 89 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ); 90 - if (IS_ERR_OR_NULL(chunk)) { 90 + if (IS_ERR(chunk)) { 91 91 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n", 92 92 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ), 93 93 qp_grp->grp_id, 94 94 PTR_ERR(chunk)); 95 - return chunk ? PTR_ERR(chunk) : -ENOMEM; 95 + return PTR_ERR(chunk); 96 96 } 97 97 98 98 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ); ··· 101 101 resp.rq_idx[i] = chunk->res[i]->vnic_idx; 102 102 103 103 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ); 104 - if (IS_ERR_OR_NULL(chunk)) { 104 + if (IS_ERR(chunk)) { 105 105 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n", 106 106 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ), 107 107 qp_grp->grp_id, 108 108 PTR_ERR(chunk)); 109 - return chunk ? PTR_ERR(chunk) : -ENOMEM; 109 + return PTR_ERR(chunk); 110 110 } 111 111 112 112 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ); ··· 115 115 resp.wq_idx[i] = chunk->res[i]->vnic_idx; 116 116 117 117 chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ); 118 - if (IS_ERR_OR_NULL(chunk)) { 118 + if (IS_ERR(chunk)) { 119 119 usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n", 120 120 usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ), 121 121 qp_grp->grp_id, 122 122 PTR_ERR(chunk)); 123 - return chunk ? PTR_ERR(chunk) : -ENOMEM; 123 + return PTR_ERR(chunk); 124 124 } 125 125 126 126 WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ);
+7 -2
drivers/infiniband/sw/rxe/rxe_comp.c
··· 420 420 (wqe->wr.send_flags & IB_SEND_SIGNALED) || 421 421 (qp->req.state == QP_STATE_ERROR)) { 422 422 make_send_cqe(qp, wqe, &cqe); 423 + advance_consumer(qp->sq.queue); 423 424 rxe_cq_post(qp->scq, &cqe, 0); 425 + } else { 426 + advance_consumer(qp->sq.queue); 424 427 } 425 - 426 - advance_consumer(qp->sq.queue); 427 428 428 429 /* 429 430 * we completed something so let req run again ··· 510 509 struct sk_buff *skb = NULL; 511 510 struct rxe_pkt_info *pkt = NULL; 512 511 enum comp_state state; 512 + 513 + rxe_add_ref(qp); 513 514 514 515 if (!qp->valid) { 515 516 while ((skb = skb_dequeue(&qp->resp_pkts))) { ··· 742 739 /* we come here if we are done with processing and want the task to 743 740 * exit from the loop calling us 744 741 */ 742 + rxe_drop_ref(qp); 745 743 return -EAGAIN; 746 744 747 745 done: 748 746 /* we come here if we have processed a packet we want the task to call 749 747 * us again to see if there is anything else to do 750 748 */ 749 + rxe_drop_ref(qp); 751 750 return 0; 752 751 }
-2
drivers/infiniband/sw/rxe/rxe_loc.h
··· 266 266 return err; 267 267 } 268 268 269 - atomic_inc(&qp->skb_out); 270 - 271 269 if ((qp_type(qp) != IB_QPT_RC) && 272 270 (pkt->mask & RXE_END_MASK)) { 273 271 pkt->wqe->state = wqe_state_done;
+3
drivers/infiniband/sw/rxe/rxe_mr.c
··· 355 355 size_t offset; 356 356 u32 crc = crcp ? (*crcp) : 0; 357 357 358 + if (length == 0) 359 + return 0; 360 + 358 361 if (mem->type == RXE_MEM_TYPE_DMA) { 359 362 u8 *src, *dest; 360 363
+3 -5
drivers/infiniband/sw/rxe/rxe_net.c
··· 46 46 #include "rxe_loc.h" 47 47 48 48 static LIST_HEAD(rxe_dev_list); 49 - static spinlock_t dev_list_lock; /* spinlock for device list */ 49 + static DEFINE_SPINLOCK(dev_list_lock); /* spinlock for device list */ 50 50 51 51 struct rxe_dev *net_to_rxe(struct net_device *ndev) 52 52 { ··· 459 459 return -EAGAIN; 460 460 } 461 461 462 + if (pkt->qp) 463 + atomic_inc(&pkt->qp->skb_out); 462 464 kfree_skb(skb); 463 465 464 466 return 0; ··· 665 663 666 664 int rxe_net_ipv4_init(void) 667 665 { 668 - spin_lock_init(&dev_list_lock); 669 - 670 666 recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net, 671 667 htons(ROCE_V2_UDP_DPORT), false); 672 668 if (IS_ERR(recv_sockets.sk4)) { ··· 679 679 int rxe_net_ipv6_init(void) 680 680 { 681 681 #if IS_ENABLED(CONFIG_IPV6) 682 - 683 - spin_lock_init(&dev_list_lock); 684 682 685 683 recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net, 686 684 htons(ROCE_V2_UDP_DPORT), true);
+5 -6
drivers/infiniband/sw/rxe/rxe_recv.c
··· 391 391 payload_size(pkt)); 392 392 calc_icrc = cpu_to_be32(~calc_icrc); 393 393 if (unlikely(calc_icrc != pack_icrc)) { 394 - char saddr[sizeof(struct in6_addr)]; 395 - 396 394 if (skb->protocol == htons(ETH_P_IPV6)) 397 - sprintf(saddr, "%pI6", &ipv6_hdr(skb)->saddr); 395 + pr_warn_ratelimited("bad ICRC from %pI6c\n", 396 + &ipv6_hdr(skb)->saddr); 398 397 else if (skb->protocol == htons(ETH_P_IP)) 399 - sprintf(saddr, "%pI4", &ip_hdr(skb)->saddr); 398 + pr_warn_ratelimited("bad ICRC from %pI4\n", 399 + &ip_hdr(skb)->saddr); 400 400 else 401 - sprintf(saddr, "unknown"); 401 + pr_warn_ratelimited("bad ICRC from unknown\n"); 402 402 403 - pr_warn_ratelimited("bad ICRC from %s\n", saddr); 404 403 goto drop; 405 404 } 406 405
+11 -8
drivers/infiniband/sw/rxe/rxe_req.c
··· 548 548 static void save_state(struct rxe_send_wqe *wqe, 549 549 struct rxe_qp *qp, 550 550 struct rxe_send_wqe *rollback_wqe, 551 - struct rxe_qp *rollback_qp) 551 + u32 *rollback_psn) 552 552 { 553 553 rollback_wqe->state = wqe->state; 554 554 rollback_wqe->first_psn = wqe->first_psn; 555 555 rollback_wqe->last_psn = wqe->last_psn; 556 - rollback_qp->req.psn = qp->req.psn; 556 + *rollback_psn = qp->req.psn; 557 557 } 558 558 559 559 static void rollback_state(struct rxe_send_wqe *wqe, 560 560 struct rxe_qp *qp, 561 561 struct rxe_send_wqe *rollback_wqe, 562 - struct rxe_qp *rollback_qp) 562 + u32 rollback_psn) 563 563 { 564 564 wqe->state = rollback_wqe->state; 565 565 wqe->first_psn = rollback_wqe->first_psn; 566 566 wqe->last_psn = rollback_wqe->last_psn; 567 - qp->req.psn = rollback_qp->req.psn; 567 + qp->req.psn = rollback_psn; 568 568 } 569 569 570 570 static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, ··· 593 593 int mtu; 594 594 int opcode; 595 595 int ret; 596 - struct rxe_qp rollback_qp; 597 596 struct rxe_send_wqe rollback_wqe; 597 + u32 rollback_psn; 598 + 599 + rxe_add_ref(qp); 598 600 599 601 next_wqe: 600 602 if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR)) ··· 720 718 * rxe_xmit_packet(). 721 719 * Otherwise, completer might initiate an unjustified retry flow. 722 720 */ 723 - save_state(wqe, qp, &rollback_wqe, &rollback_qp); 721 + save_state(wqe, qp, &rollback_wqe, &rollback_psn); 724 722 update_wqe_state(qp, wqe, &pkt); 725 723 update_wqe_psn(qp, wqe, &pkt, payload); 726 724 ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb); ··· 728 726 qp->need_req_skb = 1; 729 727 kfree_skb(skb); 730 728 731 - rollback_state(wqe, qp, &rollback_wqe, &rollback_qp); 729 + rollback_state(wqe, qp, &rollback_wqe, rollback_psn); 732 730 733 731 if (ret == -EAGAIN) { 734 732 rxe_run_task(&qp->req.task, 1); ··· 752 750 while (rxe_completer(qp) == 0) 753 751 ; 754 752 } 755 - 753 + rxe_drop_ref(qp); 756 754 return 0; 757 755 758 756 exit: 757 + rxe_drop_ref(qp); 759 758 return -EAGAIN; 760 759 }
+21 -4
drivers/infiniband/sw/rxe/rxe_resp.c
··· 444 444 return RESPST_EXECUTE; 445 445 } 446 446 447 + /* A zero-byte op is not required to set an addr or rkey. */ 448 + if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) && 449 + (pkt->mask & RXE_RETH_MASK) && 450 + reth_len(pkt) == 0) { 451 + return RESPST_EXECUTE; 452 + } 453 + 447 454 va = qp->resp.va; 448 455 rkey = qp->resp.rkey; 449 456 resid = qp->resp.resid; ··· 687 680 res->read.va_org = qp->resp.va; 688 681 689 682 res->first_psn = req_pkt->psn; 690 - res->last_psn = req_pkt->psn + 691 - (reth_len(req_pkt) + mtu - 1) / 692 - mtu - 1; 683 + 684 + if (reth_len(req_pkt)) { 685 + res->last_psn = (req_pkt->psn + 686 + (reth_len(req_pkt) + mtu - 1) / 687 + mtu - 1) & BTH_PSN_MASK; 688 + } else { 689 + res->last_psn = res->first_psn; 690 + } 693 691 res->cur_psn = req_pkt->psn; 694 692 695 693 res->read.resid = qp->resp.resid; ··· 754 742 } else { 755 743 qp->resp.res = NULL; 756 744 qp->resp.opcode = -1; 757 - qp->resp.psn = res->cur_psn; 745 + if (psn_compare(res->cur_psn, qp->resp.psn) >= 0) 746 + qp->resp.psn = res->cur_psn; 758 747 state = RESPST_CLEANUP; 759 748 } 760 749 ··· 1145 1132 pkt, skb_copy); 1146 1133 if (rc) { 1147 1134 pr_err("Failed resending result. This flow is not handled - skb ignored\n"); 1135 + rxe_drop_ref(qp); 1148 1136 kfree_skb(skb_copy); 1149 1137 rc = RESPST_CLEANUP; 1150 1138 goto out; ··· 1211 1197 enum resp_states state; 1212 1198 struct rxe_pkt_info *pkt = NULL; 1213 1199 int ret = 0; 1200 + 1201 + rxe_add_ref(qp); 1214 1202 1215 1203 qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED; 1216 1204 ··· 1402 1386 exit: 1403 1387 ret = -EAGAIN; 1404 1388 done: 1389 + rxe_drop_ref(qp); 1405 1390 return ret; 1406 1391 }
+1 -1
drivers/infiniband/sw/rxe/rxe_srq.c
··· 169 169 } 170 170 } 171 171 172 - err = rxe_queue_resize(q, (unsigned int *)&attr->max_wr, 172 + err = rxe_queue_resize(q, &attr->max_wr, 173 173 rcv_wqe_size(srq->rq.max_sge), 174 174 srq->rq.queue->ip ? 175 175 srq->rq.queue->ip->context :
+19
drivers/infiniband/sw/rxe/rxe_task.c
··· 121 121 task->arg = arg; 122 122 task->func = func; 123 123 snprintf(task->name, sizeof(task->name), "%s", name); 124 + task->destroyed = false; 124 125 125 126 tasklet_init(&task->tasklet, rxe_do_task, (unsigned long)task); 126 127 ··· 133 132 134 133 void rxe_cleanup_task(struct rxe_task *task) 135 134 { 135 + unsigned long flags; 136 + bool idle; 137 + 138 + /* 139 + * Mark the task, then wait for it to finish. It might be 140 + * running in a non-tasklet (direct call) context. 141 + */ 142 + task->destroyed = true; 143 + 144 + do { 145 + spin_lock_irqsave(&task->state_lock, flags); 146 + idle = (task->state == TASK_STATE_START); 147 + spin_unlock_irqrestore(&task->state_lock, flags); 148 + } while (!idle); 149 + 136 150 tasklet_kill(&task->tasklet); 137 151 } 138 152 139 153 void rxe_run_task(struct rxe_task *task, int sched) 140 154 { 155 + if (task->destroyed) 156 + return; 157 + 141 158 if (sched) 142 159 tasklet_schedule(&task->tasklet); 143 160 else
+1
drivers/infiniband/sw/rxe/rxe_task.h
··· 54 54 int (*func)(void *arg); 55 55 int ret; 56 56 char name[16]; 57 + bool destroyed; 57 58 }; 58 59 59 60 /*
+13 -4
drivers/infiniband/sw/rxe/rxe_verbs.c
··· 566 566 if (udata) { 567 567 if (udata->inlen) { 568 568 err = -EINVAL; 569 - goto err1; 569 + goto err2; 570 570 } 571 571 qp->is_user = 1; 572 572 } ··· 575 575 576 576 err = rxe_qp_from_init(rxe, qp, pd, init, udata, ibpd); 577 577 if (err) 578 - goto err2; 578 + goto err3; 579 579 580 580 return &qp->ibqp; 581 581 582 - err2: 582 + err3: 583 583 rxe_drop_index(qp); 584 + err2: 584 585 rxe_drop_ref(qp); 585 586 err1: 586 587 return ERR_PTR(err); ··· 1010 1009 static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 1011 1010 { 1012 1011 struct rxe_cq *cq = to_rcq(ibcq); 1012 + unsigned long irq_flags; 1013 + int ret = 0; 1013 1014 1015 + spin_lock_irqsave(&cq->cq_lock, irq_flags); 1014 1016 if (cq->notify != IB_CQ_NEXT_COMP) 1015 1017 cq->notify = flags & IB_CQ_SOLICITED_MASK; 1016 1018 1017 - return 0; 1019 + if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue)) 1020 + ret = 1; 1021 + 1022 + spin_unlock_irqrestore(&cq->cq_lock, irq_flags); 1023 + 1024 + return ret; 1018 1025 } 1019 1026 1020 1027 static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
+5 -2
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
··· 575 575 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) 576 576 return; 577 577 578 - if (ib_query_port(priv->ca, priv->port, &port_attr) || 579 - port_attr.state != IB_PORT_ACTIVE) { 578 + if (ib_query_port(priv->ca, priv->port, &port_attr)) { 579 + ipoib_dbg(priv, "ib_query_port() failed\n"); 580 + return; 581 + } 582 + if (port_attr.state != IB_PORT_ACTIVE) { 580 583 ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n", 581 584 port_attr.state); 582 585 return;
+4 -1
drivers/infiniband/ulp/iser/iser_verbs.c
··· 890 890 case RDMA_CM_EVENT_ESTABLISHED: 891 891 iser_connected_handler(cma_id, event->param.conn.private_data); 892 892 break; 893 + case RDMA_CM_EVENT_REJECTED: 894 + iser_info("Connection rejected: %s\n", 895 + rdma_reject_msg(cma_id, event->status)); 896 + /* FALLTHROUGH */ 893 897 case RDMA_CM_EVENT_ADDR_ERROR: 894 898 case RDMA_CM_EVENT_ROUTE_ERROR: 895 899 case RDMA_CM_EVENT_CONNECT_ERROR: 896 900 case RDMA_CM_EVENT_UNREACHABLE: 897 - case RDMA_CM_EVENT_REJECTED: 898 901 iser_connect_error(cma_id); 899 902 break; 900 903 case RDMA_CM_EVENT_DISCONNECTED:
+8
drivers/infiniband/ulp/isert/ib_isert.c
··· 789 789 */ 790 790 return 1; 791 791 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */ 792 + isert_info("Connection rejected: %s\n", 793 + rdma_reject_msg(cma_id, event->status)); 792 794 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */ 793 795 case RDMA_CM_EVENT_CONNECT_ERROR: 794 796 ret = isert_connect_error(cma_id); ··· 1844 1842 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 1845 1843 (void *)cmd->sense_buffer, pdu_len, 1846 1844 DMA_TO_DEVICE); 1845 + if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 1846 + return -ENOMEM; 1847 1847 1848 1848 isert_cmd->pdu_buf_len = pdu_len; 1849 1849 tx_dsg->addr = isert_cmd->pdu_buf_dma; ··· 1973 1969 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 1974 1970 (void *)cmd->buf_ptr, ISCSI_HDR_LEN, 1975 1971 DMA_TO_DEVICE); 1972 + if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 1973 + return -ENOMEM; 1976 1974 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN; 1977 1975 tx_dsg->addr = isert_cmd->pdu_buf_dma; 1978 1976 tx_dsg->length = ISCSI_HDR_LEN; ··· 2015 2009 2016 2010 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, 2017 2011 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE); 2012 + if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) 2013 + return -ENOMEM; 2018 2014 2019 2015 isert_cmd->pdu_buf_len = txt_rsp_len; 2020 2016 tx_dsg->addr = isert_cmd->pdu_buf_dma;
+32 -16
drivers/infiniband/ulp/srp/ib_srp.c
··· 64 64 MODULE_VERSION(DRV_VERSION); 65 65 MODULE_INFO(release_date, DRV_RELDATE); 66 66 67 + #if !defined(CONFIG_DYNAMIC_DEBUG) 68 + #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) 69 + #define DYNAMIC_DEBUG_BRANCH(descriptor) false 70 + #endif 71 + 67 72 static unsigned int srp_sg_tablesize; 68 73 static unsigned int cmd_sg_entries; 69 74 static unsigned int indirect_sg_entries; ··· 389 384 max_page_list_len); 390 385 if (IS_ERR(mr)) { 391 386 ret = PTR_ERR(mr); 387 + if (ret == -ENOMEM) 388 + pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n", 389 + dev_name(&device->dev)); 392 390 goto destroy_pool; 393 391 } 394 392 d->mr = mr; ··· 1274 1266 struct ib_pool_fmr *fmr; 1275 1267 u64 io_addr = 0; 1276 1268 1277 - if (state->fmr.next >= state->fmr.end) 1269 + if (state->fmr.next >= state->fmr.end) { 1270 + shost_printk(KERN_ERR, ch->target->scsi_host, 1271 + PFX "Out of MRs (mr_per_cmd = %d)\n", 1272 + ch->target->mr_per_cmd); 1278 1273 return -ENOMEM; 1274 + } 1279 1275 1280 1276 WARN_ON_ONCE(!dev->use_fmr); 1281 1277 ··· 1335 1323 u32 rkey; 1336 1324 int n, err; 1337 1325 1338 - if (state->fr.next >= state->fr.end) 1326 + if (state->fr.next >= state->fr.end) { 1327 + shost_printk(KERN_ERR, ch->target->scsi_host, 1328 + PFX "Out of MRs (mr_per_cmd = %d)\n", 1329 + ch->target->mr_per_cmd); 1339 1330 return -ENOMEM; 1331 + } 1340 1332 1341 1333 WARN_ON_ONCE(!dev->use_fast_reg); 1342 1334 ··· 1572 1556 return 0; 1573 1557 } 1574 1558 1575 - #if defined(DYNAMIC_DATA_DEBUG) 1576 1559 static void srp_check_mapping(struct srp_map_state *state, 1577 1560 struct srp_rdma_ch *ch, struct srp_request *req, 1578 1561 struct scatterlist *scat, int count) ··· 1595 1580 scsi_bufflen(req->scmnd), desc_len, mr_len, 1596 1581 state->ndesc, state->nmdesc); 1597 1582 } 1598 - #endif 1599 1583 1600 1584 /** 1601 1585 * srp_map_data() - map SCSI data buffer onto an SRP request ··· 1683 1669 if (ret < 0) 1684 1670 goto unmap; 1685 1671 1686 - #if defined(DYNAMIC_DEBUG) 1687 1672 { 1688 1673 DEFINE_DYNAMIC_DEBUG_METADATA(ddm, 1689 1674 "Memory mapping consistency check"); 1690 - if (unlikely(ddm.flags & _DPRINTK_FLAGS_PRINT)) 1675 + if (DYNAMIC_DEBUG_BRANCH(ddm)) 1691 1676 srp_check_mapping(&state, ch, req, scat, count); 1692 1677 } 1693 - #endif 1694 1678 1695 1679 /* We've mapped the request, now pull as much of the indirect 1696 1680 * descriptor table as we can into the command buffer. If this ··· 3299 3287 */ 3300 3288 scsi_host_get(target->scsi_host); 3301 3289 3302 - mutex_lock(&host->add_target_mutex); 3290 + ret = mutex_lock_interruptible(&host->add_target_mutex); 3291 + if (ret < 0) 3292 + goto put; 3303 3293 3304 3294 ret = srp_parse_options(buf, target); 3305 3295 if (ret) ··· 3457 3443 out: 3458 3444 mutex_unlock(&host->add_target_mutex); 3459 3445 3446 + put: 3460 3447 scsi_host_put(target->scsi_host); 3461 3448 if (ret < 0) 3462 3449 scsi_host_put(target->scsi_host); ··· 3541 3526 static void srp_add_one(struct ib_device *device) 3542 3527 { 3543 3528 struct srp_device *srp_dev; 3529 + struct ib_device_attr *attr = &device->attrs; 3544 3530 struct srp_host *host; 3545 3531 int mr_page_shift, p; 3546 3532 u64 max_pages_per_mr; ··· 3556 3540 * minimum of 4096 bytes. We're unlikely to build large sglists 3557 3541 * out of smaller entries. 3558 3542 */ 3559 - mr_page_shift = max(12, ffs(device->attrs.page_size_cap) - 1); 3543 + mr_page_shift = max(12, ffs(attr->page_size_cap) - 1); 3560 3544 srp_dev->mr_page_size = 1 << mr_page_shift; 3561 3545 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1); 3562 - max_pages_per_mr = device->attrs.max_mr_size; 3546 + max_pages_per_mr = attr->max_mr_size; 3563 3547 do_div(max_pages_per_mr, srp_dev->mr_page_size); 3564 3548 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__, 3565 - device->attrs.max_mr_size, srp_dev->mr_page_size, 3549 + attr->max_mr_size, srp_dev->mr_page_size, 3566 3550 max_pages_per_mr, SRP_MAX_PAGES_PER_MR); 3567 3551 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR, 3568 3552 max_pages_per_mr); 3569 3553 3570 3554 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr && 3571 3555 device->map_phys_fmr && device->unmap_fmr); 3572 - srp_dev->has_fr = (device->attrs.device_cap_flags & 3556 + srp_dev->has_fr = (attr->device_cap_flags & 3573 3557 IB_DEVICE_MEM_MGT_EXTENSIONS); 3574 3558 if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) { 3575 3559 dev_warn(&device->dev, "neither FMR nor FR is supported\n"); 3576 3560 } else if (!never_register && 3577 - device->attrs.max_mr_size >= 2 * srp_dev->mr_page_size) { 3561 + attr->max_mr_size >= 2 * srp_dev->mr_page_size) { 3578 3562 srp_dev->use_fast_reg = (srp_dev->has_fr && 3579 3563 (!srp_dev->has_fmr || prefer_fr)); 3580 3564 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr; ··· 3587 3571 if (srp_dev->use_fast_reg) { 3588 3572 srp_dev->max_pages_per_mr = 3589 3573 min_t(u32, srp_dev->max_pages_per_mr, 3590 - device->attrs.max_fast_reg_page_list_len); 3574 + attr->max_fast_reg_page_list_len); 3591 3575 } 3592 3576 srp_dev->mr_max_size = srp_dev->mr_page_size * 3593 3577 srp_dev->max_pages_per_mr; 3594 3578 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n", 3595 - device->name, mr_page_shift, device->attrs.max_mr_size, 3596 - device->attrs.max_fast_reg_page_list_len, 3579 + device->name, mr_page_shift, attr->max_mr_size, 3580 + attr->max_fast_reg_page_list_len, 3597 3581 srp_dev->max_pages_per_mr, srp_dev->mr_max_size); 3598 3582 3599 3583 INIT_LIST_HEAD(&srp_dev->dev_list);
+9 -13
drivers/infiniband/ulp/srpt/ib_srpt.c
··· 1840 1840 struct srpt_rdma_ch *ch, *tmp_ch; 1841 1841 u32 it_iu_len; 1842 1842 int i, ret = 0; 1843 - unsigned char *p; 1844 1843 1845 1844 WARN_ON_ONCE(irqs_disabled()); 1846 1845 ··· 1993 1994 be64_to_cpu(*(__be64 *)(ch->i_port_id + 8))); 1994 1995 1995 1996 pr_debug("registering session %s\n", ch->sess_name); 1996 - p = &ch->sess_name[0]; 1997 1997 1998 - try_again: 1999 1998 ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0, 2000 - TARGET_PROT_NORMAL, p, ch, NULL); 1999 + TARGET_PROT_NORMAL, ch->sess_name, ch, 2000 + NULL); 2001 + /* Retry without leading "0x" */ 2002 + if (IS_ERR(ch->sess)) 2003 + ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0, 2004 + TARGET_PROT_NORMAL, 2005 + ch->sess_name + 2, ch, NULL); 2001 2006 if (IS_ERR(ch->sess)) { 2002 - pr_info("Rejected login because no ACL has been" 2003 - " configured yet for initiator %s.\n", p); 2004 - /* 2005 - * XXX: Hack to retry of ch->i_port_id without leading '0x' 2006 - */ 2007 - if (p == &ch->sess_name[0]) { 2008 - p += 2; 2009 - goto try_again; 2010 - } 2007 + pr_info("Rejected login because no ACL has been configured yet for initiator %s.\n", 2008 + ch->sess_name); 2011 2009 rej->reason = cpu_to_be32((PTR_ERR(ch->sess) == -ENOMEM) ? 2012 2010 SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES : 2013 2011 SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
+2 -2
drivers/net/ethernet/qlogic/qede/qede_roce.c
··· 191 191 } 192 192 mutex_unlock(&qedr_dev_list_lock); 193 193 194 - DP_INFO(edev, "qedr: discovered and registered %d RoCE funcs\n", 195 - qedr_counter); 194 + pr_notice("qedr: discovered and registered %d RoCE funcs\n", 195 + qedr_counter); 196 196 197 197 return 0; 198 198 }
+36 -6
drivers/nvme/host/rdma.c
··· 43 43 44 44 #define NVME_RDMA_MAX_INLINE_SEGMENTS 1 45 45 46 + static const char *const nvme_rdma_cm_status_strs[] = { 47 + [NVME_RDMA_CM_INVALID_LEN] = "invalid length", 48 + [NVME_RDMA_CM_INVALID_RECFMT] = "invalid record format", 49 + [NVME_RDMA_CM_INVALID_QID] = "invalid queue ID", 50 + [NVME_RDMA_CM_INVALID_HSQSIZE] = "invalid host SQ size", 51 + [NVME_RDMA_CM_INVALID_HRQSIZE] = "invalid host RQ size", 52 + [NVME_RDMA_CM_NO_RSC] = "resource not found", 53 + [NVME_RDMA_CM_INVALID_IRD] = "invalid IRD", 54 + [NVME_RDMA_CM_INVALID_ORD] = "Invalid ORD", 55 + }; 56 + 57 + static const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status) 58 + { 59 + size_t index = status; 60 + 61 + if (index < ARRAY_SIZE(nvme_rdma_cm_status_strs) && 62 + nvme_rdma_cm_status_strs[index]) 63 + return nvme_rdma_cm_status_strs[index]; 64 + else 65 + return "unrecognized reason"; 66 + }; 67 + 46 68 /* 47 69 * We handle AEN commands ourselves and don't even let the 48 70 * block layer know about them. ··· 1229 1207 static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue, 1230 1208 struct rdma_cm_event *ev) 1231 1209 { 1232 - if (ev->param.conn.private_data_len) { 1233 - struct nvme_rdma_cm_rej *rej = 1234 - (struct nvme_rdma_cm_rej *)ev->param.conn.private_data; 1210 + struct rdma_cm_id *cm_id = queue->cm_id; 1211 + int status = ev->status; 1212 + const char *rej_msg; 1213 + const struct nvme_rdma_cm_rej *rej_data; 1214 + u8 rej_data_len; 1215 + 1216 + rej_msg = rdma_reject_msg(cm_id, status); 1217 + rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len); 1218 + 1219 + if (rej_data && rej_data_len >= sizeof(u16)) { 1220 + u16 sts = le16_to_cpu(rej_data->sts); 1235 1221 1236 1222 dev_err(queue->ctrl->ctrl.device, 1237 - "Connect rejected, status %d.", le16_to_cpu(rej->sts)); 1238 - /* XXX: Think of something clever to do here... */ 1223 + "Connect rejected: status %d (%s) nvme status %d (%s).\n", 1224 + status, rej_msg, sts, nvme_rdma_cm_msg(sts)); 1239 1225 } else { 1240 1226 dev_err(queue->ctrl->ctrl.device, 1241 - "Connect rejected, no private data.\n"); 1227 + "Connect rejected: status %d (%s).\n", status, rej_msg); 1242 1228 } 1243 1229 1244 1230 return -ECONNRESET;
+3
drivers/nvme/target/rdma.c
··· 1358 1358 ret = nvmet_rdma_device_removal(cm_id, queue); 1359 1359 break; 1360 1360 case RDMA_CM_EVENT_REJECTED: 1361 + pr_debug("Connection rejected: %s\n", 1362 + rdma_reject_msg(cm_id, event->status)); 1363 + /* FALLTHROUGH */ 1361 1364 case RDMA_CM_EVENT_UNREACHABLE: 1362 1365 case RDMA_CM_EVENT_CONNECT_ERROR: 1363 1366 nvmet_rdma_queue_connect_fail(cm_id, queue);
+6
include/rdma/ib_cm.h
··· 603 603 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, 604 604 struct ib_cm_sidr_rep_param *param); 605 605 606 + /** 607 + * ibcm_reject_msg - return a pointer to a reject message string. 608 + * @reason: Value returned in the REJECT event status field. 609 + */ 610 + const char *__attribute_const__ ibcm_reject_msg(int reason); 611 + 606 612 #endif /* IB_CM_H */
+1 -1
include/rdma/ib_mad.h
··· 46 46 #define IB_MGMT_BASE_VERSION 1 47 47 #define OPA_MGMT_BASE_VERSION 0x80 48 48 49 - #define OPA_SMP_CLASS_VERSION 0x80 49 + #define OPA_SM_CLASS_VERSION 0x80 50 50 51 51 /* Management classes */ 52 52 #define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01
+6
include/rdma/iw_cm.h
··· 253 253 int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, struct ib_qp_attr *qp_attr, 254 254 int *qp_attr_mask); 255 255 256 + /** 257 + * iwcm_reject_msg - return a pointer to a reject message string. 258 + * @reason: Value returned in the REJECT event status field. 259 + */ 260 + const char *__attribute_const__ iwcm_reject_msg(int reason); 261 + 256 262 #endif /* IW_CM_H */
-2
include/rdma/opa_smi.h
··· 44 44 #define OPA_MAX_SLS 32 45 45 #define OPA_MAX_SCS 32 46 46 47 - #define OPA_SMI_CLASS_VERSION 0x80 48 - 49 47 #define OPA_LID_PERMISSIVE cpu_to_be32(0xFFFFFFFF) 50 48 51 49 struct opa_smp {
+25
include/rdma/rdma_cm.h
··· 388 388 */ 389 389 __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr); 390 390 391 + /** 392 + * rdma_reject_msg - return a pointer to a reject message string. 393 + * @id: Communication identifier that received the REJECT event. 394 + * @reason: Value returned in the REJECT event status field. 395 + */ 396 + const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id, 397 + int reason); 398 + /** 399 + * rdma_is_consumer_reject - return true if the consumer rejected the connect 400 + * request. 401 + * @id: Communication identifier that received the REJECT event. 402 + * @reason: Value returned in the REJECT event status field. 403 + */ 404 + bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason); 405 + 406 + /** 407 + * rdma_consumer_reject_data - return the consumer reject private data and 408 + * length, if any. 409 + * @id: Communication identifier that received the REJECT event. 410 + * @ev: RDMA CM reject event. 411 + * @data_len: Pointer to the resulting length of the consumer data. 412 + */ 413 + const void *rdma_consumer_reject_data(struct rdma_cm_id *id, 414 + struct rdma_cm_event *ev, u8 *data_len); 415 + 391 416 #endif /* RDMA_CM_H */
+6 -6
include/uapi/rdma/rdma_user_cm.h
··· 110 110 __u32 id; 111 111 __u16 addr_size; 112 112 __u16 reserved; 113 - struct sockaddr_storage addr; 113 + struct __kernel_sockaddr_storage addr; 114 114 }; 115 115 116 116 struct rdma_ucm_resolve_ip { ··· 126 126 __u16 src_size; 127 127 __u16 dst_size; 128 128 __u32 reserved; 129 - struct sockaddr_storage src_addr; 130 - struct sockaddr_storage dst_addr; 129 + struct __kernel_sockaddr_storage src_addr; 130 + struct __kernel_sockaddr_storage dst_addr; 131 131 }; 132 132 133 133 struct rdma_ucm_resolve_route { ··· 164 164 __u16 pkey; 165 165 __u16 src_size; 166 166 __u16 dst_size; 167 - struct sockaddr_storage src_addr; 168 - struct sockaddr_storage dst_addr; 167 + struct __kernel_sockaddr_storage src_addr; 168 + struct __kernel_sockaddr_storage dst_addr; 169 169 }; 170 170 171 171 struct rdma_ucm_query_path_resp { ··· 257 257 __u32 id; 258 258 __u16 addr_size; 259 259 __u16 join_flags; 260 - struct sockaddr_storage addr; 260 + struct __kernel_sockaddr_storage addr; 261 261 }; 262 262 263 263 struct rdma_ucm_get_event {
+4 -1
net/rds/rdma_transport.c
··· 100 100 trans->cm_connect_complete(conn, event); 101 101 break; 102 102 103 + case RDMA_CM_EVENT_REJECTED: 104 + rdsdebug("Connection rejected: %s\n", 105 + rdma_reject_msg(cm_id, event->status)); 106 + /* FALLTHROUGH */ 103 107 case RDMA_CM_EVENT_ADDR_ERROR: 104 108 case RDMA_CM_EVENT_ROUTE_ERROR: 105 109 case RDMA_CM_EVENT_CONNECT_ERROR: 106 110 case RDMA_CM_EVENT_UNREACHABLE: 107 - case RDMA_CM_EVENT_REJECTED: 108 111 case RDMA_CM_EVENT_DEVICE_REMOVAL: 109 112 case RDMA_CM_EVENT_ADDR_CHANGE: 110 113 if (conn)