Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA: Split kernel-only global device caps from uverbs device caps

Split out flags from ib_device::device_cap_flags that are only used
internally to the kernel into kernel_cap_flags that is not part of the
uapi. This limits the device_cap_flags to being the same bitmap that will
be copied to userspace.

This cleanly splits out the uverbs flags from the kernel flags to avoid
confusion in the flags bitmap.

Add some short comments describing which each of the kernel flags is
connected to. Remove unused kernel flags.

Link: https://lore.kernel.org/r/0-v2-22c19e565eef+139a-kern_caps_jgg@nvidia.com
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Max Gurtovoy <mgurtovoy@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

+100 -116
+1 -1
drivers/infiniband/core/nldev.c
··· 1739 1739 if (!device) 1740 1740 return -EINVAL; 1741 1741 1742 - if (!(device->attrs.device_cap_flags & IB_DEVICE_ALLOW_USER_UNREG)) { 1742 + if (!(device->attrs.kernel_cap_flags & IBK_ALLOW_USER_UNREG)) { 1743 1743 ib_device_put(device); 1744 1744 return -EINVAL; 1745 1745 }
+2 -4
drivers/infiniband/core/uverbs_cmd.c
··· 337 337 resp->hw_ver = attr->hw_ver; 338 338 resp->max_qp = attr->max_qp; 339 339 resp->max_qp_wr = attr->max_qp_wr; 340 - resp->device_cap_flags = lower_32_bits(attr->device_cap_flags & 341 - IB_UVERBS_DEVICE_CAP_FLAGS_MASK); 340 + resp->device_cap_flags = lower_32_bits(attr->device_cap_flags); 342 341 resp->max_sge = min(attr->max_send_sge, attr->max_recv_sge); 343 342 resp->max_sge_rd = attr->max_sge_rd; 344 343 resp->max_cq = attr->max_cq; ··· 3618 3619 3619 3620 resp.timestamp_mask = attr.timestamp_mask; 3620 3621 resp.hca_core_clock = attr.hca_core_clock; 3621 - resp.device_cap_flags_ex = attr.device_cap_flags & 3622 - IB_UVERBS_DEVICE_CAP_FLAGS_MASK; 3622 + resp.device_cap_flags_ex = attr.device_cap_flags; 3623 3623 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts; 3624 3624 resp.rss_caps.max_rwq_indirection_tables = 3625 3625 attr.rss_caps.max_rwq_indirection_tables;
+4 -4
drivers/infiniband/core/verbs.c
··· 281 281 } 282 282 rdma_restrack_add(&pd->res); 283 283 284 - if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) 284 + if (device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY) 285 285 pd->local_dma_lkey = device->local_dma_lkey; 286 286 else 287 287 mr_access_flags |= IB_ACCESS_LOCAL_WRITE; ··· 308 308 309 309 pd->__internal_mr = mr; 310 310 311 - if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)) 311 + if (!(device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY)) 312 312 pd->local_dma_lkey = pd->__internal_mr->lkey; 313 313 314 314 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) ··· 2131 2131 struct ib_mr *mr; 2132 2132 2133 2133 if (access_flags & IB_ACCESS_ON_DEMAND) { 2134 - if (!(pd->device->attrs.device_cap_flags & 2135 - IB_DEVICE_ON_DEMAND_PAGING)) { 2134 + if (!(pd->device->attrs.kernel_cap_flags & 2135 + IBK_ON_DEMAND_PAGING)) { 2136 2136 pr_debug("ODP support not available\n"); 2137 2137 return ERR_PTR(-EINVAL); 2138 2138 }
+1 -1
drivers/infiniband/hw/bnxt_re/ib_verbs.c
··· 146 146 | IB_DEVICE_RC_RNR_NAK_GEN 147 147 | IB_DEVICE_SHUTDOWN_PORT 148 148 | IB_DEVICE_SYS_IMAGE_GUID 149 - | IB_DEVICE_LOCAL_DMA_LKEY 150 149 | IB_DEVICE_RESIZE_MAX_WR 151 150 | IB_DEVICE_PORT_ACTIVE_EVENT 152 151 | IB_DEVICE_N_NOTIFY_CQ 153 152 | IB_DEVICE_MEM_WINDOW 154 153 | IB_DEVICE_MEM_WINDOW_TYPE_2B 155 154 | IB_DEVICE_MEM_MGT_EXTENSIONS; 155 + ib_attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; 156 156 ib_attr->max_send_sge = dev_attr->max_qp_sges; 157 157 ib_attr->max_recv_sge = dev_attr->max_qp_sges; 158 158 ib_attr->max_sge_rd = dev_attr->max_qp_sges;
-1
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
··· 314 314 struct c4iw_dev { 315 315 struct ib_device ibdev; 316 316 struct c4iw_rdev rdev; 317 - u32 device_cap_flags; 318 317 struct xarray cqs; 319 318 struct xarray qps; 320 319 struct xarray mrs;
+4 -4
drivers/infiniband/hw/cxgb4/provider.c
··· 269 269 dev->rdev.lldi.ports[0]->dev_addr); 270 270 props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type); 271 271 props->fw_ver = dev->rdev.lldi.fw_vers; 272 - props->device_cap_flags = dev->device_cap_flags; 272 + props->device_cap_flags = IB_DEVICE_MEM_WINDOW; 273 + props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; 274 + if (fastreg_support) 275 + props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 273 276 props->page_size_cap = T4_PAGESIZE_MASK; 274 277 props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor; 275 278 props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device; ··· 532 529 pr_debug("c4iw_dev %p\n", dev); 533 530 addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid, 534 531 dev->rdev.lldi.ports[0]->dev_addr); 535 - dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW; 536 - if (fastreg_support) 537 - dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 538 532 dev->ibdev.local_dma_lkey = 0; 539 533 dev->ibdev.node_type = RDMA_NODE_RNIC; 540 534 BUILD_BUG_ON(sizeof(C4IW_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
+2 -2
drivers/infiniband/hw/hfi1/verbs.c
··· 1300 1300 IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | 1301 1301 IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | 1302 1302 IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE | 1303 - IB_DEVICE_MEM_MGT_EXTENSIONS | 1304 - IB_DEVICE_RDMA_NETDEV_OPA; 1303 + IB_DEVICE_MEM_MGT_EXTENSIONS; 1304 + rdi->dparms.props.kernel_cap_flags = IBK_RDMA_NETDEV_OPA; 1305 1305 rdi->dparms.props.page_size_cap = PAGE_SIZE; 1306 1306 rdi->dparms.props.vendor_id = dd->oui1 << 16 | dd->oui2 << 8 | dd->oui3; 1307 1307 rdi->dparms.props.vendor_part_id = dd->pcidev->device;
-4
drivers/infiniband/hw/irdma/hw.c
··· 1827 1827 rf->rsrc_created = true; 1828 1828 } 1829 1829 1830 - iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | 1831 - IB_DEVICE_MEM_WINDOW | 1832 - IB_DEVICE_MEM_MGT_EXTENSIONS; 1833 - 1834 1830 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) 1835 1831 irdma_alloc_set_mac(iwdev); 1836 1832 irdma_add_ip(iwdev);
-1
drivers/infiniband/hw/irdma/main.h
··· 338 338 u32 roce_ackcreds; 339 339 u32 vendor_id; 340 340 u32 vendor_part_id; 341 - u32 device_cap_flags; 342 341 u32 push_mode; 343 342 u32 rcv_wnd; 344 343 u16 mac_ip_table_idx;
+3 -1
drivers/infiniband/hw/irdma/verbs.c
··· 25 25 iwdev->netdev->dev_addr); 26 26 props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 | 27 27 irdma_fw_minor_ver(&rf->sc_dev); 28 - props->device_cap_flags = iwdev->device_cap_flags; 28 + props->device_cap_flags = IB_DEVICE_MEM_WINDOW | 29 + IB_DEVICE_MEM_MGT_EXTENSIONS; 30 + props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; 29 31 props->vendor_id = pcidev->vendor; 30 32 props->vendor_part_id = pcidev->device; 31 33
+4 -4
drivers/infiniband/hw/mlx4/main.c
··· 479 479 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | 480 480 IB_DEVICE_PORT_ACTIVE_EVENT | 481 481 IB_DEVICE_SYS_IMAGE_GUID | 482 - IB_DEVICE_RC_RNR_NAK_GEN | 483 - IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 482 + IB_DEVICE_RC_RNR_NAK_GEN; 483 + props->kernel_cap_flags = IBK_BLOCK_MULTICAST_LOOPBACK; 484 484 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR) 485 485 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 486 486 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) ··· 494 494 if (dev->dev->caps.max_gso_sz && 495 495 (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) && 496 496 (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)) 497 - props->device_cap_flags |= IB_DEVICE_UD_TSO; 497 + props->kernel_cap_flags |= IBK_UD_TSO; 498 498 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) 499 - props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; 499 + props->kernel_cap_flags |= IBK_LOCAL_DMA_LKEY; 500 500 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) && 501 501 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) && 502 502 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
+6 -9
drivers/infiniband/hw/mlx5/main.c
··· 855 855 IB_DEVICE_MEM_WINDOW_TYPE_2B; 856 856 props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); 857 857 /* We support 'Gappy' memory registration too */ 858 - props->device_cap_flags |= IB_DEVICE_SG_GAPS_REG; 858 + props->kernel_cap_flags |= IBK_SG_GAPS_REG; 859 859 } 860 860 /* IB_WR_REG_MR always requires changing the entity size with UMR */ 861 861 if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) 862 862 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 863 863 if (MLX5_CAP_GEN(mdev, sho)) { 864 - props->device_cap_flags |= IB_DEVICE_INTEGRITY_HANDOVER; 864 + props->kernel_cap_flags |= IBK_INTEGRITY_HANDOVER; 865 865 /* At this stage no support for signature handover */ 866 866 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | 867 867 IB_PROT_T10DIF_TYPE_2 | ··· 870 870 IB_GUARD_T10DIF_CSUM; 871 871 } 872 872 if (MLX5_CAP_GEN(mdev, block_lb_mc)) 873 - props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 873 + props->kernel_cap_flags |= IBK_BLOCK_MULTICAST_LOOPBACK; 874 874 875 875 if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) { 876 876 if (MLX5_CAP_ETH(mdev, csum_cap)) { ··· 921 921 922 922 if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) { 923 923 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; 924 - props->device_cap_flags |= IB_DEVICE_UD_TSO; 924 + props->kernel_cap_flags |= IBK_UD_TSO; 925 925 } 926 926 927 927 if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) && ··· 997 997 998 998 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { 999 999 if (dev->odp_caps.general_caps & IB_ODP_SUPPORT) 1000 - props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; 1000 + props->kernel_cap_flags |= IBK_ON_DEMAND_PAGING; 1001 1001 props->odp_caps = dev->odp_caps; 1002 1002 if (!uhw) { 1003 1003 /* ODP for kernel QPs is not implemented for receive ··· 1018 1018 } 1019 1019 } 1020 1020 1021 - if (MLX5_CAP_GEN(mdev, cd)) 1022 - props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL; 1023 - 1024 1021 if (mlx5_core_is_vf(mdev)) 1025 - props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION; 1022 + props->kernel_cap_flags |= IBK_VIRTUAL_FUNCTION; 1026 1023 1027 1024 if (mlx5_ib_port_link_layer(ibdev, 1) == 1028 1025 IB_LINK_LAYER_ETHERNET && raw_support) {
+1 -1
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
··· 90 90 IB_DEVICE_RC_RNR_NAK_GEN | 91 91 IB_DEVICE_SHUTDOWN_PORT | 92 92 IB_DEVICE_SYS_IMAGE_GUID | 93 - IB_DEVICE_LOCAL_DMA_LKEY | 94 93 IB_DEVICE_MEM_MGT_EXTENSIONS; 94 + attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; 95 95 attr->max_send_sge = dev->attr.max_send_sge; 96 96 attr->max_recv_sge = dev->attr.max_recv_sge; 97 97 attr->max_sge_rd = dev->attr.max_rdma_sge;
+2 -1
drivers/infiniband/hw/qedr/verbs.c
··· 134 134 attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe); 135 135 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD | 136 136 IB_DEVICE_RC_RNR_NAK_GEN | 137 - IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS; 137 + IB_DEVICE_MEM_MGT_EXTENSIONS; 138 + attr->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; 138 139 139 140 if (!rdma_protocol_iwarp(&dev->ibdev, 1)) 140 141 attr->device_cap_flags |= IB_DEVICE_XRC;
+2 -1
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
··· 305 305 props->max_qp = qp_per_vf * 306 306 kref_read(&us_ibdev->vf_cnt); 307 307 props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT | 308 - IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 308 + IB_DEVICE_SYS_IMAGE_GUID; 309 + props->kernel_cap_flags = IBK_BLOCK_MULTICAST_LOOPBACK; 309 310 props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] * 310 311 kref_read(&us_ibdev->vf_cnt); 311 312 props->max_pd = USNIC_UIOM_MAX_PD_CNT;
+1
drivers/infiniband/sw/rxe/rxe.c
··· 46 46 rxe->attr.max_qp = RXE_MAX_QP; 47 47 rxe->attr.max_qp_wr = RXE_MAX_QP_WR; 48 48 rxe->attr.device_cap_flags = RXE_DEVICE_CAP_FLAGS; 49 + rxe->attr.kernel_cap_flags = IBK_ALLOW_USER_UNREG; 49 50 rxe->attr.max_send_sge = RXE_MAX_SGE; 50 51 rxe->attr.max_recv_sge = RXE_MAX_SGE; 51 52 rxe->attr.max_sge_rd = RXE_MAX_SGE_RD;
-1
drivers/infiniband/sw/rxe/rxe_param.h
··· 50 50 | IB_DEVICE_RC_RNR_NAK_GEN 51 51 | IB_DEVICE_SRQ_RESIZE 52 52 | IB_DEVICE_MEM_MGT_EXTENSIONS 53 - | IB_DEVICE_ALLOW_USER_UNREG 54 53 | IB_DEVICE_MEM_WINDOW 55 54 | IB_DEVICE_MEM_WINDOW_TYPE_2A 56 55 | IB_DEVICE_MEM_WINDOW_TYPE_2B,
+2 -2
drivers/infiniband/sw/siw/siw_verbs.c
··· 132 132 133 133 /* Revisit atomic caps if RFC 7306 gets supported */ 134 134 attr->atomic_cap = 0; 135 - attr->device_cap_flags = 136 - IB_DEVICE_MEM_MGT_EXTENSIONS | IB_DEVICE_ALLOW_USER_UNREG; 135 + attr->device_cap_flags = IB_DEVICE_MEM_MGT_EXTENSIONS; 136 + attr->kernel_cap_flags = IBK_ALLOW_USER_UNREG; 137 137 attr->max_cq = sdev->attrs.max_cq; 138 138 attr->max_cqe = sdev->attrs.max_cqe; 139 139 attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL;
+1
drivers/infiniband/ulp/ipoib/ipoib.h
··· 411 411 struct dentry *path_dentry; 412 412 #endif 413 413 u64 hca_caps; 414 + u64 kernel_caps; 414 415 struct ipoib_ethtool_st ethtool; 415 416 unsigned int max_send_sge; 416 417 const struct net_device_ops *rn_ops;
+3 -2
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 1850 1850 static void ipoib_set_dev_features(struct ipoib_dev_priv *priv) 1851 1851 { 1852 1852 priv->hca_caps = priv->ca->attrs.device_cap_flags; 1853 + priv->kernel_caps = priv->ca->attrs.kernel_cap_flags; 1853 1854 1854 1855 if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) { 1855 1856 priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 1856 1857 1857 - if (priv->hca_caps & IB_DEVICE_UD_TSO) 1858 + if (priv->kernel_caps & IBK_UD_TSO) 1858 1859 priv->dev->hw_features |= NETIF_F_TSO; 1859 1860 1860 1861 priv->dev->features |= priv->dev->hw_features; ··· 2202 2201 2203 2202 priv->rn_ops = dev->netdev_ops; 2204 2203 2205 - if (hca->attrs.device_cap_flags & IB_DEVICE_VIRTUAL_FUNCTION) 2204 + if (hca->attrs.kernel_cap_flags & IBK_VIRTUAL_FUNCTION) 2206 2205 dev->netdev_ops = &ipoib_netdev_ops_vf; 2207 2206 else 2208 2207 dev->netdev_ops = &ipoib_netdev_ops_pf;
+3 -3
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
··· 197 197 init_attr.send_cq = priv->send_cq; 198 198 init_attr.recv_cq = priv->recv_cq; 199 199 200 - if (priv->hca_caps & IB_DEVICE_UD_TSO) 200 + if (priv->kernel_caps & IBK_UD_TSO) 201 201 init_attr.create_flags |= IB_QP_CREATE_IPOIB_UD_LSO; 202 202 203 - if (priv->hca_caps & IB_DEVICE_BLOCK_MULTICAST_LOOPBACK) 203 + if (priv->kernel_caps & IBK_BLOCK_MULTICAST_LOOPBACK) 204 204 init_attr.create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK; 205 205 206 206 if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING) 207 207 init_attr.create_flags |= IB_QP_CREATE_NETIF_QP; 208 208 209 - if (priv->hca_caps & IB_DEVICE_RDMA_NETDEV_OPA) 209 + if (priv->kernel_caps & IBK_RDMA_NETDEV_OPA) 210 210 init_attr.create_flags |= IB_QP_CREATE_NETDEV_USE; 211 211 212 212 priv->qp = ib_create_qp(priv->pd, &init_attr);
+1 -1
drivers/infiniband/ulp/iser/iscsi_iser.c
··· 650 650 SHOST_DIX_GUARD_CRC); 651 651 } 652 652 653 - if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)) 653 + if (!(ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)) 654 654 shost->virt_boundary_mask = SZ_4K - 1; 655 655 656 656 if (iscsi_host_add(shost, ib_dev->dev.parent)) {
+4 -4
drivers/infiniband/ulp/iser/iser_verbs.c
··· 115 115 if (!desc) 116 116 return ERR_PTR(-ENOMEM); 117 117 118 - if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) 118 + if (ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) 119 119 mr_type = IB_MR_TYPE_SG_GAPS; 120 120 else 121 121 mr_type = IB_MR_TYPE_MEM_REG; ··· 517 517 * (head and tail) for a single page worth data, so one additional 518 518 * entry is required. 519 519 */ 520 - if (attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG) 520 + if (attr->kernel_cap_flags & IBK_SG_GAPS_REG) 521 521 reserved_mr_pages = 0; 522 522 else 523 523 reserved_mr_pages = 1; ··· 562 562 563 563 /* connection T10-PI support */ 564 564 if (iser_pi_enable) { 565 - if (!(device->ib_device->attrs.device_cap_flags & 566 - IB_DEVICE_INTEGRITY_HANDOVER)) { 565 + if (!(device->ib_device->attrs.kernel_cap_flags & 566 + IBK_INTEGRITY_HANDOVER)) { 567 567 iser_warn("T10-PI requested but not supported on %s, " 568 568 "continue without T10-PI\n", 569 569 dev_name(&ib_conn->device->ib_device->dev));
+1 -1
drivers/infiniband/ulp/isert/ib_isert.c
··· 230 230 } 231 231 232 232 /* Check signature cap */ 233 - if (ib_dev->attrs.device_cap_flags & IB_DEVICE_INTEGRITY_HANDOVER) 233 + if (ib_dev->attrs.kernel_cap_flags & IBK_INTEGRITY_HANDOVER) 234 234 device->pi_capable = true; 235 235 else 236 236 device->pi_capable = false;
+4 -4
drivers/infiniband/ulp/srp/ib_srp.c
··· 430 430 spin_lock_init(&pool->lock); 431 431 INIT_LIST_HEAD(&pool->free_list); 432 432 433 - if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) 433 + if (device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) 434 434 mr_type = IB_MR_TYPE_SG_GAPS; 435 435 else 436 436 mr_type = IB_MR_TYPE_MEM_REG; ··· 3650 3650 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; 3651 3651 target_host->max_segment_size = ib_dma_max_seg_size(ibdev); 3652 3652 3653 - if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)) 3653 + if (!(ibdev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)) 3654 3654 target_host->virt_boundary_mask = ~srp_dev->mr_page_mask; 3655 3655 3656 3656 target = host_to_target(target_host); ··· 3706 3706 } 3707 3707 3708 3708 if (srp_dev->use_fast_reg) { 3709 - bool gaps_reg = (ibdev->attrs.device_cap_flags & 3710 - IB_DEVICE_SG_GAPS_REG); 3709 + bool gaps_reg = ibdev->attrs.kernel_cap_flags & 3710 + IBK_SG_GAPS_REG; 3711 3711 3712 3712 max_sectors_per_mr = srp_dev->max_pages_per_mr << 3713 3713 (ilog2(srp_dev->mr_page_size) - 9);
+2 -2
drivers/nvme/host/rdma.c
··· 867 867 ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev); 868 868 869 869 /* T10-PI support */ 870 - if (ctrl->device->dev->attrs.device_cap_flags & 871 - IB_DEVICE_INTEGRITY_HANDOVER) 870 + if (ctrl->device->dev->attrs.kernel_cap_flags & 871 + IBK_INTEGRITY_HANDOVER) 872 872 pi_capable = true; 873 873 874 874 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev,
+2 -2
drivers/nvme/target/rdma.c
··· 1221 1221 ndev->inline_data_size = nport->inline_data_size; 1222 1222 ndev->inline_page_count = inline_page_count; 1223 1223 1224 - if (nport->pi_enable && !(cm_id->device->attrs.device_cap_flags & 1225 - IB_DEVICE_INTEGRITY_HANDOVER)) { 1224 + if (nport->pi_enable && !(cm_id->device->attrs.kernel_cap_flags & 1225 + IBK_INTEGRITY_HANDOVER)) { 1226 1226 pr_warn("T10-PI is not supported by device %s. Disabling it\n", 1227 1227 cm_id->device->name); 1228 1228 nport->pi_enable = false;
+1 -1
fs/cifs/smbdirect.c
··· 649 649 smbd_max_frmr_depth, 650 650 info->id->device->attrs.max_fast_reg_page_list_len); 651 651 info->mr_type = IB_MR_TYPE_MEM_REG; 652 - if (info->id->device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) 652 + if (info->id->device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) 653 653 info->mr_type = IB_MR_TYPE_SG_GAPS; 654 654 655 655 info->pd = ib_alloc_pd(info->id->device, 0);
+35 -49
include/rdma/ib_verbs.h
··· 236 236 IB_DEVICE_SRQ_RESIZE = IB_UVERBS_DEVICE_SRQ_RESIZE, 237 237 IB_DEVICE_N_NOTIFY_CQ = IB_UVERBS_DEVICE_N_NOTIFY_CQ, 238 238 239 - /* 240 - * This device supports a per-device lkey or stag that can be 241 - * used without performing a memory registration for the local 242 - * memory. Note that ULPs should never check this flag, but 243 - * instead of use the local_dma_lkey flag in the ib_pd structure, 244 - * which will always contain a usable lkey. 245 - */ 246 - IB_DEVICE_LOCAL_DMA_LKEY = 1 << 15, 247 239 /* Reserved, old SEND_W_INV = 1 << 16,*/ 248 240 IB_DEVICE_MEM_WINDOW = IB_UVERBS_DEVICE_MEM_WINDOW, 249 241 /* ··· 246 254 * IPoIB driver may set NETIF_F_IP_CSUM for datagram mode. 247 255 */ 248 256 IB_DEVICE_UD_IP_CSUM = IB_UVERBS_DEVICE_UD_IP_CSUM, 249 - IB_DEVICE_UD_TSO = 1 << 19, 250 257 IB_DEVICE_XRC = IB_UVERBS_DEVICE_XRC, 251 258 252 259 /* ··· 258 267 * stag. 259 268 */ 260 269 IB_DEVICE_MEM_MGT_EXTENSIONS = IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS, 261 - IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = 1 << 22, 262 270 IB_DEVICE_MEM_WINDOW_TYPE_2A = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A, 263 271 IB_DEVICE_MEM_WINDOW_TYPE_2B = IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B, 264 272 IB_DEVICE_RC_IP_CSUM = IB_UVERBS_DEVICE_RC_IP_CSUM, 265 273 /* Deprecated. Please use IB_RAW_PACKET_CAP_IP_CSUM. */ 266 274 IB_DEVICE_RAW_IP_CSUM = IB_UVERBS_DEVICE_RAW_IP_CSUM, 267 - /* 268 - * Devices should set IB_DEVICE_CROSS_CHANNEL if they 269 - * support execution of WQEs that involve synchronization 270 - * of I/O operations with single completion queue managed 271 - * by hardware. 272 - */ 273 - IB_DEVICE_CROSS_CHANNEL = 1 << 27, 274 275 IB_DEVICE_MANAGED_FLOW_STEERING = 275 276 IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING, 276 - IB_DEVICE_INTEGRITY_HANDOVER = 1 << 30, 277 - IB_DEVICE_ON_DEMAND_PAGING = 1ULL << 31, 278 - IB_DEVICE_SG_GAPS_REG = 1ULL << 32, 279 - IB_DEVICE_VIRTUAL_FUNCTION = 1ULL << 33, 280 277 /* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */ 281 278 IB_DEVICE_RAW_SCATTER_FCS = IB_UVERBS_DEVICE_RAW_SCATTER_FCS, 282 - IB_DEVICE_RDMA_NETDEV_OPA = 1ULL << 35, 283 279 /* The device supports padding incoming writes to cacheline. */ 284 280 IB_DEVICE_PCI_WRITE_END_PADDING = 285 281 IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING, 286 - IB_DEVICE_ALLOW_USER_UNREG = 1ULL << 37, 287 282 }; 288 283 289 - #define IB_UVERBS_DEVICE_CAP_FLAGS_MASK (IB_UVERBS_DEVICE_RESIZE_MAX_WR | \ 290 - IB_UVERBS_DEVICE_BAD_PKEY_CNTR | \ 291 - IB_UVERBS_DEVICE_BAD_QKEY_CNTR | \ 292 - IB_UVERBS_DEVICE_RAW_MULTI | \ 293 - IB_UVERBS_DEVICE_AUTO_PATH_MIG | \ 294 - IB_UVERBS_DEVICE_CHANGE_PHY_PORT | \ 295 - IB_UVERBS_DEVICE_UD_AV_PORT_ENFORCE | \ 296 - IB_UVERBS_DEVICE_CURR_QP_STATE_MOD | \ 297 - IB_UVERBS_DEVICE_SHUTDOWN_PORT | \ 298 - IB_UVERBS_DEVICE_PORT_ACTIVE_EVENT | \ 299 - IB_UVERBS_DEVICE_SYS_IMAGE_GUID | \ 300 - IB_UVERBS_DEVICE_RC_RNR_NAK_GEN | \ 301 - IB_UVERBS_DEVICE_SRQ_RESIZE | \ 302 - IB_UVERBS_DEVICE_N_NOTIFY_CQ | \ 303 - IB_UVERBS_DEVICE_MEM_WINDOW | \ 304 - IB_UVERBS_DEVICE_UD_IP_CSUM | \ 305 - IB_UVERBS_DEVICE_XRC | \ 306 - IB_UVERBS_DEVICE_MEM_MGT_EXTENSIONS | \ 307 - IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2A | \ 308 - IB_UVERBS_DEVICE_MEM_WINDOW_TYPE_2B | \ 309 - IB_UVERBS_DEVICE_RC_IP_CSUM | \ 310 - IB_UVERBS_DEVICE_RAW_IP_CSUM | \ 311 - IB_UVERBS_DEVICE_MANAGED_FLOW_STEERING | \ 312 - IB_UVERBS_DEVICE_RAW_SCATTER_FCS | \ 313 - IB_UVERBS_DEVICE_PCI_WRITE_END_PADDING) 284 + enum ib_kernel_cap_flags { 285 + /* 286 + * This device supports a per-device lkey or stag that can be 287 + * used without performing a memory registration for the local 288 + * memory. Note that ULPs should never check this flag, but 289 + * instead of use the local_dma_lkey flag in the ib_pd structure, 290 + * which will always contain a usable lkey. 291 + */ 292 + IBK_LOCAL_DMA_LKEY = 1 << 0, 293 + /* IB_QP_CREATE_INTEGRITY_EN is supported to implement T10-PI */ 294 + IBK_INTEGRITY_HANDOVER = 1 << 1, 295 + /* IB_ACCESS_ON_DEMAND is supported during reg_user_mr() */ 296 + IBK_ON_DEMAND_PAGING = 1 << 2, 297 + /* IB_MR_TYPE_SG_GAPS is supported */ 298 + IBK_SG_GAPS_REG = 1 << 3, 299 + /* Driver supports RDMA_NLDEV_CMD_DELLINK */ 300 + IBK_ALLOW_USER_UNREG = 1 << 4, 301 + 302 + /* ipoib will use IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK */ 303 + IBK_BLOCK_MULTICAST_LOOPBACK = 1 << 5, 304 + /* iopib will use IB_QP_CREATE_IPOIB_UD_LSO for its QPs */ 305 + IBK_UD_TSO = 1 << 6, 306 + /* iopib will use the device ops: 307 + * get_vf_config 308 + * get_vf_guid 309 + * get_vf_stats 310 + * set_vf_guid 311 + * set_vf_link_state 312 + */ 313 + IBK_VIRTUAL_FUNCTION = 1 << 7, 314 + /* ipoib will use IB_QP_CREATE_NETDEV_USE for its QPs */ 315 + IBK_RDMA_NETDEV_OPA = 1 << 8, 316 + }; 314 317 315 318 enum ib_atomic_cap { 316 319 IB_ATOMIC_NONE, ··· 402 417 int max_qp; 403 418 int max_qp_wr; 404 419 u64 device_cap_flags; 420 + u64 kernel_cap_flags; 405 421 int max_send_sge; 406 422 int max_recv_sge; 407 423 int max_sge_rd; ··· 4330 4344 return -EINVAL; 4331 4345 4332 4346 if (flags & IB_ACCESS_ON_DEMAND && 4333 - !(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING)) 4347 + !(ib_dev->attrs.kernel_cap_flags & IBK_ON_DEMAND_PAGING)) 4334 4348 return -EINVAL; 4335 4349 return 0; 4336 4350 }
+1 -2
include/rdma/opa_vnic.h
··· 90 90 91 91 static inline bool rdma_cap_opa_vnic(struct ib_device *device) 92 92 { 93 - return !!(device->attrs.device_cap_flags & 94 - IB_DEVICE_RDMA_NETDEV_OPA); 93 + return !!(device->attrs.kernel_cap_flags & IBK_RDMA_NETDEV_OPA); 95 94 } 96 95 97 96 #endif /* _OPA_VNIC_H */
+4
include/uapi/rdma/ib_user_verbs.h
··· 1298 1298 1299 1299 #define IB_DEVICE_NAME_MAX 64 1300 1300 1301 + /* 1302 + * bits 9, 15, 16, 19, 22, 27, 30, 31, 32, 33, 35 and 37 may be set by old 1303 + * kernels and should not be used. 1304 + */ 1301 1305 enum ib_uverbs_device_cap_flags { 1302 1306 IB_UVERBS_DEVICE_RESIZE_MAX_WR = 1 << 0, 1303 1307 IB_UVERBS_DEVICE_BAD_PKEY_CNTR = 1 << 1,
+2 -2
net/rds/ib.c
··· 154 154 rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE); 155 155 156 156 rds_ibdev->odp_capable = 157 - !!(device->attrs.device_cap_flags & 158 - IB_DEVICE_ON_DEMAND_PAGING) && 157 + !!(device->attrs.kernel_cap_flags & 158 + IBK_ON_DEMAND_PAGING) && 159 159 !!(device->attrs.odp_caps.per_transport_caps.rc_odp_caps & 160 160 IB_ODP_SUPPORT_WRITE) && 161 161 !!(device->attrs.odp_caps.per_transport_caps.rc_odp_caps &
+1 -1
net/sunrpc/xprtrdma/frwr_ops.c
··· 195 195 ep->re_attr.cap.max_recv_sge = 1; 196 196 197 197 ep->re_mrtype = IB_MR_TYPE_MEM_REG; 198 - if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG) 198 + if (attrs->kernel_cap_flags & IBK_SG_GAPS_REG) 199 199 ep->re_mrtype = IB_MR_TYPE_SG_GAPS; 200 200 201 201 /* Quirk: Some devices advertise a large max_fast_reg_page_list_len