Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband:
IPoIB: Make sure struct ipoib_neigh.queue is always initialized
IB/iser: Use the new verbs DMA mapping functions
IB/srp: Use new verbs IB DMA mapping functions
IPoIB: Use the new verbs DMA mapping functions
IB/core: Use the new verbs DMA mapping functions
IB/ipath: Implement new verbs DMA mapping functions
IB: Add DMA mapping functions to allow device drivers to interpose
RDMA/cma: Export rdma cm interface to userspace
RDMA/cma: Add support for RDMA_PS_UDP
RDMA/cma: Allow early transition to RTS to handle lost CM messages
RDMA/cma: Report connect info with connect events
RDMA/cma: Remove unneeded qp_type parameter from rdma_cm
IB/ipath: Fix IRQ for PCI Express HCAs
RDMA/amso1100: Fix memory leak in c2_qp_modify()
IB/iser: Remove unused "write-only" variables
IB/ipath: Remove unused "write-only" variables
IB/fmr: ib_flush_fmr_pool() may wait too long

+2145 -353
+5 -1
drivers/infiniband/core/Makefile
··· 1 1 infiniband-$(CONFIG_INFINIBAND_ADDR_TRANS) := ib_addr.o rdma_cm.o 2 + user_access-$(CONFIG_INFINIBAND_ADDR_TRANS) := rdma_ucm.o 2 3 3 4 obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \ 4 5 ib_cm.o iw_cm.o $(infiniband-y) 5 6 obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o 6 - obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o 7 + obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ 8 + $(user_access-y) 7 9 8 10 ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ 9 11 device.o fmr_pool.o cache.o ··· 19 17 iw_cm-y := iwcm.o 20 18 21 19 rdma_cm-y := cma.o 20 + 21 + rdma_ucm-y := ucma.o 22 22 23 23 ib_addr-y := addr.o 24 24
+4
drivers/infiniband/core/cm.c
··· 3289 3289 3290 3290 spin_lock_irqsave(&cm_id_priv->lock, flags); 3291 3291 switch (cm_id_priv->id.state) { 3292 + /* Allow transition to RTS before sending REP */ 3293 + case IB_CM_REQ_RCVD: 3294 + case IB_CM_MRA_REQ_SENT: 3295 + 3292 3296 case IB_CM_REP_RCVD: 3293 3297 case IB_CM_MRA_REP_SENT: 3294 3298 case IB_CM_REP_SENT:
+324 -92
drivers/infiniband/core/cma.c
··· 70 70 static struct workqueue_struct *cma_wq; 71 71 static DEFINE_IDR(sdp_ps); 72 72 static DEFINE_IDR(tcp_ps); 73 + static DEFINE_IDR(udp_ps); 73 74 74 75 struct cma_device { 75 76 struct list_head list; ··· 134 133 135 134 u32 seq_num; 136 135 u32 qp_num; 137 - enum ib_qp_type qp_type; 138 136 u8 srq; 139 137 }; 140 138 ··· 392 392 393 393 id->qp = qp; 394 394 id_priv->qp_num = qp->qp_num; 395 - id_priv->qp_type = qp->qp_type; 396 395 id_priv->srq = (qp->srq != NULL); 397 396 return 0; 398 397 err: ··· 509 510 return cma_zero_addr(addr) || cma_loopback_addr(addr); 510 511 } 511 512 513 + static inline __be16 cma_port(struct sockaddr *addr) 514 + { 515 + if (addr->sa_family == AF_INET) 516 + return ((struct sockaddr_in *) addr)->sin_port; 517 + else 518 + return ((struct sockaddr_in6 *) addr)->sin6_port; 519 + } 520 + 512 521 static inline int cma_any_port(struct sockaddr *addr) 513 522 { 514 - return !((struct sockaddr_in *) addr)->sin_port; 523 + return !cma_port(addr); 515 524 } 516 525 517 526 static int cma_get_net_info(void *hdr, enum rdma_port_space ps, ··· 599 592 default: 600 593 return sizeof(struct cma_hdr); 601 594 } 602 - } 603 - 604 - static int cma_notify_user(struct rdma_id_private *id_priv, 605 - enum rdma_cm_event_type type, int status, 606 - void *data, u8 data_len) 607 - { 608 - struct rdma_cm_event event; 609 - 610 - event.event = type; 611 - event.status = status; 612 - event.private_data = data; 613 - event.private_data_len = data_len; 614 - 615 - return id_priv->id.event_handler(&id_priv->id, &event); 616 595 } 617 596 618 597 static void cma_cancel_route(struct rdma_id_private *id_priv) ··· 769 776 return 0; 770 777 } 771 778 772 - static int cma_rtu_recv(struct rdma_id_private *id_priv) 779 + static void cma_set_rep_event_data(struct rdma_cm_event *event, 780 + struct ib_cm_rep_event_param *rep_data, 781 + void *private_data) 773 782 { 774 - int ret; 775 - 776 - ret = cma_modify_qp_rts(&id_priv->id); 777 - if (ret) 778 - goto reject; 779 - 780 - return 0; 781 - reject: 782 - cma_modify_qp_err(&id_priv->id); 783 - ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 784 - NULL, 0, NULL, 0); 785 - return ret; 783 + event->param.conn.private_data = private_data; 784 + event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 785 + event->param.conn.responder_resources = rep_data->responder_resources; 786 + event->param.conn.initiator_depth = rep_data->initiator_depth; 787 + event->param.conn.flow_control = rep_data->flow_control; 788 + event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; 789 + event->param.conn.srq = rep_data->srq; 790 + event->param.conn.qp_num = rep_data->remote_qpn; 786 791 } 787 792 788 793 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 789 794 { 790 795 struct rdma_id_private *id_priv = cm_id->context; 791 - enum rdma_cm_event_type event; 792 - u8 private_data_len = 0; 793 - int ret = 0, status = 0; 796 + struct rdma_cm_event event; 797 + int ret = 0; 794 798 795 799 atomic_inc(&id_priv->dev_remove); 796 800 if (!cma_comp(id_priv, CMA_CONNECT)) 797 801 goto out; 798 802 803 + memset(&event, 0, sizeof event); 799 804 switch (ib_event->event) { 800 805 case IB_CM_REQ_ERROR: 801 806 case IB_CM_REP_ERROR: 802 - event = RDMA_CM_EVENT_UNREACHABLE; 803 - status = -ETIMEDOUT; 807 + event.event = RDMA_CM_EVENT_UNREACHABLE; 808 + event.status = -ETIMEDOUT; 804 809 break; 805 810 case IB_CM_REP_RECEIVED: 806 - status = cma_verify_rep(id_priv, ib_event->private_data); 807 - if (status) 808 - event = RDMA_CM_EVENT_CONNECT_ERROR; 811 + event.status = cma_verify_rep(id_priv, ib_event->private_data); 812 + if (event.status) 813 + event.event = RDMA_CM_EVENT_CONNECT_ERROR; 809 814 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) { 810 - status = cma_rep_recv(id_priv); 811 - event = status ? RDMA_CM_EVENT_CONNECT_ERROR : 812 - RDMA_CM_EVENT_ESTABLISHED; 815 + event.status = cma_rep_recv(id_priv); 816 + event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : 817 + RDMA_CM_EVENT_ESTABLISHED; 813 818 } else 814 - event = RDMA_CM_EVENT_CONNECT_RESPONSE; 815 - private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; 819 + event.event = RDMA_CM_EVENT_CONNECT_RESPONSE; 820 + cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, 821 + ib_event->private_data); 816 822 break; 817 823 case IB_CM_RTU_RECEIVED: 818 - status = cma_rtu_recv(id_priv); 819 - event = status ? RDMA_CM_EVENT_CONNECT_ERROR : 820 - RDMA_CM_EVENT_ESTABLISHED; 824 + case IB_CM_USER_ESTABLISHED: 825 + event.event = RDMA_CM_EVENT_ESTABLISHED; 821 826 break; 822 827 case IB_CM_DREQ_ERROR: 823 - status = -ETIMEDOUT; /* fall through */ 828 + event.status = -ETIMEDOUT; /* fall through */ 824 829 case IB_CM_DREQ_RECEIVED: 825 830 case IB_CM_DREP_RECEIVED: 826 831 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) 827 832 goto out; 828 - event = RDMA_CM_EVENT_DISCONNECTED; 833 + event.event = RDMA_CM_EVENT_DISCONNECTED; 829 834 break; 830 835 case IB_CM_TIMEWAIT_EXIT: 831 836 case IB_CM_MRA_RECEIVED: ··· 831 840 goto out; 832 841 case IB_CM_REJ_RECEIVED: 833 842 cma_modify_qp_err(&id_priv->id); 834 - status = ib_event->param.rej_rcvd.reason; 835 - event = RDMA_CM_EVENT_REJECTED; 836 - private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 843 + event.status = ib_event->param.rej_rcvd.reason; 844 + event.event = RDMA_CM_EVENT_REJECTED; 845 + event.param.conn.private_data = ib_event->private_data; 846 + event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE; 837 847 break; 838 848 default: 839 849 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d", ··· 842 850 goto out; 843 851 } 844 852 845 - ret = cma_notify_user(id_priv, event, status, ib_event->private_data, 846 - private_data_len); 853 + ret = id_priv->id.event_handler(&id_priv->id, &event); 847 854 if (ret) { 848 855 /* Destroy the CM ID by returning a non-zero value. */ 849 856 id_priv->cm_id.ib = NULL; ··· 856 865 return ret; 857 866 } 858 867 859 - static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id, 860 - struct ib_cm_event *ib_event) 868 + static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, 869 + struct ib_cm_event *ib_event) 861 870 { 862 871 struct rdma_id_private *id_priv; 863 872 struct rdma_cm_id *id; ··· 904 913 return NULL; 905 914 } 906 915 916 + static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, 917 + struct ib_cm_event *ib_event) 918 + { 919 + struct rdma_id_private *id_priv; 920 + struct rdma_cm_id *id; 921 + union cma_ip_addr *src, *dst; 922 + __u16 port; 923 + u8 ip_ver; 924 + int ret; 925 + 926 + id = rdma_create_id(listen_id->event_handler, listen_id->context, 927 + listen_id->ps); 928 + if (IS_ERR(id)) 929 + return NULL; 930 + 931 + 932 + if (cma_get_net_info(ib_event->private_data, listen_id->ps, 933 + &ip_ver, &port, &src, &dst)) 934 + goto err; 935 + 936 + cma_save_net_info(&id->route.addr, &listen_id->route.addr, 937 + ip_ver, port, src, dst); 938 + 939 + ret = rdma_translate_ip(&id->route.addr.src_addr, 940 + &id->route.addr.dev_addr); 941 + if (ret) 942 + goto err; 943 + 944 + id_priv = container_of(id, struct rdma_id_private, id); 945 + id_priv->state = CMA_CONNECT; 946 + return id_priv; 947 + err: 948 + rdma_destroy_id(id); 949 + return NULL; 950 + } 951 + 952 + static void cma_set_req_event_data(struct rdma_cm_event *event, 953 + struct ib_cm_req_event_param *req_data, 954 + void *private_data, int offset) 955 + { 956 + event->param.conn.private_data = private_data + offset; 957 + event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; 958 + event->param.conn.responder_resources = req_data->responder_resources; 959 + event->param.conn.initiator_depth = req_data->initiator_depth; 960 + event->param.conn.flow_control = req_data->flow_control; 961 + event->param.conn.retry_count = req_data->retry_count; 962 + event->param.conn.rnr_retry_count = req_data->rnr_retry_count; 963 + event->param.conn.srq = req_data->srq; 964 + event->param.conn.qp_num = req_data->remote_qpn; 965 + } 966 + 907 967 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) 908 968 { 909 969 struct rdma_id_private *listen_id, *conn_id; 970 + struct rdma_cm_event event; 910 971 int offset, ret; 911 972 912 973 listen_id = cm_id->context; ··· 968 925 goto out; 969 926 } 970 927 971 - conn_id = cma_new_id(&listen_id->id, ib_event); 928 + memset(&event, 0, sizeof event); 929 + offset = cma_user_data_offset(listen_id->id.ps); 930 + event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 931 + if (listen_id->id.ps == RDMA_PS_UDP) { 932 + conn_id = cma_new_udp_id(&listen_id->id, ib_event); 933 + event.param.ud.private_data = ib_event->private_data + offset; 934 + event.param.ud.private_data_len = 935 + IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; 936 + } else { 937 + conn_id = cma_new_conn_id(&listen_id->id, ib_event); 938 + cma_set_req_event_data(&event, &ib_event->param.req_rcvd, 939 + ib_event->private_data, offset); 940 + } 972 941 if (!conn_id) { 973 942 ret = -ENOMEM; 974 943 goto out; ··· 997 942 cm_id->context = conn_id; 998 943 cm_id->cm_handler = cma_ib_handler; 999 944 1000 - offset = cma_user_data_offset(listen_id->id.ps); 1001 - ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0, 1002 - ib_event->private_data + offset, 1003 - IB_CM_REQ_PRIVATE_DATA_SIZE - offset); 945 + ret = conn_id->id.event_handler(&conn_id->id, &event); 1004 946 if (!ret) 1005 947 goto out; 1006 948 ··· 1016 964 1017 965 static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr) 1018 966 { 1019 - return cpu_to_be64(((u64)ps << 16) + 1020 - be16_to_cpu(((struct sockaddr_in *) addr)->sin_port)); 967 + return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr))); 1021 968 } 1022 969 1023 970 static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr, ··· 1072 1021 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) 1073 1022 { 1074 1023 struct rdma_id_private *id_priv = iw_id->context; 1075 - enum rdma_cm_event_type event = 0; 1024 + struct rdma_cm_event event; 1076 1025 struct sockaddr_in *sin; 1077 1026 int ret = 0; 1078 1027 1028 + memset(&event, 0, sizeof event); 1079 1029 atomic_inc(&id_priv->dev_remove); 1080 1030 1081 1031 switch (iw_event->event) { 1082 1032 case IW_CM_EVENT_CLOSE: 1083 - event = RDMA_CM_EVENT_DISCONNECTED; 1033 + event.event = RDMA_CM_EVENT_DISCONNECTED; 1084 1034 break; 1085 1035 case IW_CM_EVENT_CONNECT_REPLY: 1086 1036 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; ··· 1089 1037 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr; 1090 1038 *sin = iw_event->remote_addr; 1091 1039 if (iw_event->status) 1092 - event = RDMA_CM_EVENT_REJECTED; 1040 + event.event = RDMA_CM_EVENT_REJECTED; 1093 1041 else 1094 - event = RDMA_CM_EVENT_ESTABLISHED; 1042 + event.event = RDMA_CM_EVENT_ESTABLISHED; 1095 1043 break; 1096 1044 case IW_CM_EVENT_ESTABLISHED: 1097 - event = RDMA_CM_EVENT_ESTABLISHED; 1045 + event.event = RDMA_CM_EVENT_ESTABLISHED; 1098 1046 break; 1099 1047 default: 1100 1048 BUG_ON(1); 1101 1049 } 1102 1050 1103 - ret = cma_notify_user(id_priv, event, iw_event->status, 1104 - iw_event->private_data, 1105 - iw_event->private_data_len); 1051 + event.status = iw_event->status; 1052 + event.param.conn.private_data = iw_event->private_data; 1053 + event.param.conn.private_data_len = iw_event->private_data_len; 1054 + ret = id_priv->id.event_handler(&id_priv->id, &event); 1106 1055 if (ret) { 1107 1056 /* Destroy the CM ID by returning a non-zero value. */ 1108 1057 id_priv->cm_id.iw = NULL; ··· 1124 1071 struct rdma_id_private *listen_id, *conn_id; 1125 1072 struct sockaddr_in *sin; 1126 1073 struct net_device *dev = NULL; 1074 + struct rdma_cm_event event; 1127 1075 int ret; 1128 1076 1129 1077 listen_id = cm_id->context; ··· 1178 1124 sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr; 1179 1125 *sin = iw_event->remote_addr; 1180 1126 1181 - ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0, 1182 - iw_event->private_data, 1183 - iw_event->private_data_len); 1127 + memset(&event, 0, sizeof event); 1128 + event.event = RDMA_CM_EVENT_CONNECT_REQUEST; 1129 + event.param.conn.private_data = iw_event->private_data; 1130 + event.param.conn.private_data_len = iw_event->private_data_len; 1131 + ret = conn_id->id.event_handler(&conn_id->id, &event); 1184 1132 if (ret) { 1185 1133 /* User wants to destroy the CM ID */ 1186 1134 conn_id->cm_id.iw = NULL; ··· 1571 1515 struct rdma_dev_addr *dev_addr, void *context) 1572 1516 { 1573 1517 struct rdma_id_private *id_priv = context; 1574 - enum rdma_cm_event_type event; 1518 + struct rdma_cm_event event; 1575 1519 1520 + memset(&event, 0, sizeof event); 1576 1521 atomic_inc(&id_priv->dev_remove); 1577 1522 1578 1523 /* ··· 1593 1536 if (status) { 1594 1537 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) 1595 1538 goto out; 1596 - event = RDMA_CM_EVENT_ADDR_ERROR; 1539 + event.event = RDMA_CM_EVENT_ADDR_ERROR; 1540 + event.status = status; 1597 1541 } else { 1598 1542 memcpy(&id_priv->id.route.addr.src_addr, src_addr, 1599 1543 ip_addr_size(src_addr)); 1600 - event = RDMA_CM_EVENT_ADDR_RESOLVED; 1544 + event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 1601 1545 } 1602 1546 1603 - if (cma_notify_user(id_priv, event, status, NULL, 0)) { 1547 + if (id_priv->id.event_handler(&id_priv->id, &event)) { 1604 1548 cma_exch(id_priv, CMA_DESTROYING); 1605 1549 cma_release_remove(id_priv); 1606 1550 cma_deref_id(id_priv); ··· 1791 1733 case RDMA_PS_TCP: 1792 1734 ps = &tcp_ps; 1793 1735 break; 1736 + case RDMA_PS_UDP: 1737 + ps = &udp_ps; 1738 + break; 1794 1739 default: 1795 1740 return -EPROTONOSUPPORT; 1796 1741 } ··· 1882 1821 return 0; 1883 1822 } 1884 1823 1824 + static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, 1825 + struct ib_cm_event *ib_event) 1826 + { 1827 + struct rdma_id_private *id_priv = cm_id->context; 1828 + struct rdma_cm_event event; 1829 + struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 1830 + int ret = 0; 1831 + 1832 + memset(&event, 0, sizeof event); 1833 + atomic_inc(&id_priv->dev_remove); 1834 + if (!cma_comp(id_priv, CMA_CONNECT)) 1835 + goto out; 1836 + 1837 + switch (ib_event->event) { 1838 + case IB_CM_SIDR_REQ_ERROR: 1839 + event.event = RDMA_CM_EVENT_UNREACHABLE; 1840 + event.status = -ETIMEDOUT; 1841 + break; 1842 + case IB_CM_SIDR_REP_RECEIVED: 1843 + event.param.ud.private_data = ib_event->private_data; 1844 + event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE; 1845 + if (rep->status != IB_SIDR_SUCCESS) { 1846 + event.event = RDMA_CM_EVENT_UNREACHABLE; 1847 + event.status = ib_event->param.sidr_rep_rcvd.status; 1848 + break; 1849 + } 1850 + if (rep->qkey != RDMA_UD_QKEY) { 1851 + event.event = RDMA_CM_EVENT_UNREACHABLE; 1852 + event.status = -EINVAL; 1853 + break; 1854 + } 1855 + ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, 1856 + id_priv->id.route.path_rec, 1857 + &event.param.ud.ah_attr); 1858 + event.param.ud.qp_num = rep->qpn; 1859 + event.param.ud.qkey = rep->qkey; 1860 + event.event = RDMA_CM_EVENT_ESTABLISHED; 1861 + event.status = 0; 1862 + break; 1863 + default: 1864 + printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d", 1865 + ib_event->event); 1866 + goto out; 1867 + } 1868 + 1869 + ret = id_priv->id.event_handler(&id_priv->id, &event); 1870 + if (ret) { 1871 + /* Destroy the CM ID by returning a non-zero value. */ 1872 + id_priv->cm_id.ib = NULL; 1873 + cma_exch(id_priv, CMA_DESTROYING); 1874 + cma_release_remove(id_priv); 1875 + rdma_destroy_id(&id_priv->id); 1876 + return ret; 1877 + } 1878 + out: 1879 + cma_release_remove(id_priv); 1880 + return ret; 1881 + } 1882 + 1883 + static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, 1884 + struct rdma_conn_param *conn_param) 1885 + { 1886 + struct ib_cm_sidr_req_param req; 1887 + struct rdma_route *route; 1888 + int ret; 1889 + 1890 + req.private_data_len = sizeof(struct cma_hdr) + 1891 + conn_param->private_data_len; 1892 + req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC); 1893 + if (!req.private_data) 1894 + return -ENOMEM; 1895 + 1896 + if (conn_param->private_data && conn_param->private_data_len) 1897 + memcpy((void *) req.private_data + sizeof(struct cma_hdr), 1898 + conn_param->private_data, conn_param->private_data_len); 1899 + 1900 + route = &id_priv->id.route; 1901 + ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route); 1902 + if (ret) 1903 + goto out; 1904 + 1905 + id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, 1906 + cma_sidr_rep_handler, id_priv); 1907 + if (IS_ERR(id_priv->cm_id.ib)) { 1908 + ret = PTR_ERR(id_priv->cm_id.ib); 1909 + goto out; 1910 + } 1911 + 1912 + req.path = route->path_rec; 1913 + req.service_id = cma_get_service_id(id_priv->id.ps, 1914 + &route->addr.dst_addr); 1915 + req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 1916 + req.max_cm_retries = CMA_MAX_CM_RETRIES; 1917 + 1918 + ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); 1919 + if (ret) { 1920 + ib_destroy_cm_id(id_priv->cm_id.ib); 1921 + id_priv->cm_id.ib = NULL; 1922 + } 1923 + out: 1924 + kfree(req.private_data); 1925 + return ret; 1926 + } 1927 + 1885 1928 static int cma_connect_ib(struct rdma_id_private *id_priv, 1886 1929 struct rdma_conn_param *conn_param) 1887 1930 { ··· 2025 1860 req.service_id = cma_get_service_id(id_priv->id.ps, 2026 1861 &route->addr.dst_addr); 2027 1862 req.qp_num = id_priv->qp_num; 2028 - req.qp_type = id_priv->qp_type; 1863 + req.qp_type = IB_QPT_RC; 2029 1864 req.starting_psn = id_priv->seq_num; 2030 1865 req.responder_resources = conn_param->responder_resources; 2031 1866 req.initiator_depth = conn_param->initiator_depth; ··· 2102 1937 2103 1938 if (!id->qp) { 2104 1939 id_priv->qp_num = conn_param->qp_num; 2105 - id_priv->qp_type = conn_param->qp_type; 2106 1940 id_priv->srq = conn_param->srq; 2107 1941 } 2108 1942 2109 1943 switch (rdma_node_get_transport(id->device->node_type)) { 2110 1944 case RDMA_TRANSPORT_IB: 2111 - ret = cma_connect_ib(id_priv, conn_param); 1945 + if (id->ps == RDMA_PS_UDP) 1946 + ret = cma_resolve_ib_udp(id_priv, conn_param); 1947 + else 1948 + ret = cma_connect_ib(id_priv, conn_param); 2112 1949 break; 2113 1950 case RDMA_TRANSPORT_IWARP: 2114 1951 ret = cma_connect_iw(id_priv, conn_param); ··· 2133 1966 struct rdma_conn_param *conn_param) 2134 1967 { 2135 1968 struct ib_cm_rep_param rep; 2136 - int ret; 1969 + struct ib_qp_attr qp_attr; 1970 + int qp_attr_mask, ret; 2137 1971 2138 - ret = cma_modify_qp_rtr(&id_priv->id); 2139 - if (ret) 2140 - return ret; 1972 + if (id_priv->id.qp) { 1973 + ret = cma_modify_qp_rtr(&id_priv->id); 1974 + if (ret) 1975 + goto out; 1976 + 1977 + qp_attr.qp_state = IB_QPS_RTS; 1978 + ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, &qp_attr, 1979 + &qp_attr_mask); 1980 + if (ret) 1981 + goto out; 1982 + 1983 + qp_attr.max_rd_atomic = conn_param->initiator_depth; 1984 + ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); 1985 + if (ret) 1986 + goto out; 1987 + } 2141 1988 2142 1989 memset(&rep, 0, sizeof rep); 2143 1990 rep.qp_num = id_priv->qp_num; ··· 2166 1985 rep.rnr_retry_count = conn_param->rnr_retry_count; 2167 1986 rep.srq = id_priv->srq ? 1 : 0; 2168 1987 2169 - return ib_send_cm_rep(id_priv->cm_id.ib, &rep); 1988 + ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 1989 + out: 1990 + return ret; 2170 1991 } 2171 1992 2172 1993 static int cma_accept_iw(struct rdma_id_private *id_priv, ··· 2193 2010 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); 2194 2011 } 2195 2012 2013 + static int cma_send_sidr_rep(struct rdma_id_private *id_priv, 2014 + enum ib_cm_sidr_status status, 2015 + const void *private_data, int private_data_len) 2016 + { 2017 + struct ib_cm_sidr_rep_param rep; 2018 + 2019 + memset(&rep, 0, sizeof rep); 2020 + rep.status = status; 2021 + if (status == IB_SIDR_SUCCESS) { 2022 + rep.qp_num = id_priv->qp_num; 2023 + rep.qkey = RDMA_UD_QKEY; 2024 + } 2025 + rep.private_data = private_data; 2026 + rep.private_data_len = private_data_len; 2027 + 2028 + return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 2029 + } 2030 + 2196 2031 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) 2197 2032 { 2198 2033 struct rdma_id_private *id_priv; ··· 2222 2021 2223 2022 if (!id->qp && conn_param) { 2224 2023 id_priv->qp_num = conn_param->qp_num; 2225 - id_priv->qp_type = conn_param->qp_type; 2226 2024 id_priv->srq = conn_param->srq; 2227 2025 } 2228 2026 2229 2027 switch (rdma_node_get_transport(id->device->node_type)) { 2230 2028 case RDMA_TRANSPORT_IB: 2231 - if (conn_param) 2029 + if (id->ps == RDMA_PS_UDP) 2030 + ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, 2031 + conn_param->private_data, 2032 + conn_param->private_data_len); 2033 + else if (conn_param) 2232 2034 ret = cma_accept_ib(id_priv, conn_param); 2233 2035 else 2234 2036 ret = cma_rep_recv(id_priv); ··· 2255 2051 } 2256 2052 EXPORT_SYMBOL(rdma_accept); 2257 2053 2054 + int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) 2055 + { 2056 + struct rdma_id_private *id_priv; 2057 + int ret; 2058 + 2059 + id_priv = container_of(id, struct rdma_id_private, id); 2060 + if (!cma_comp(id_priv, CMA_CONNECT)) 2061 + return -EINVAL; 2062 + 2063 + switch (id->device->node_type) { 2064 + case RDMA_NODE_IB_CA: 2065 + ret = ib_cm_notify(id_priv->cm_id.ib, event); 2066 + break; 2067 + default: 2068 + ret = 0; 2069 + break; 2070 + } 2071 + return ret; 2072 + } 2073 + EXPORT_SYMBOL(rdma_notify); 2074 + 2258 2075 int rdma_reject(struct rdma_cm_id *id, const void *private_data, 2259 2076 u8 private_data_len) 2260 2077 { ··· 2288 2063 2289 2064 switch (rdma_node_get_transport(id->device->node_type)) { 2290 2065 case RDMA_TRANSPORT_IB: 2291 - ret = ib_send_cm_rej(id_priv->cm_id.ib, 2292 - IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, 2293 - private_data, private_data_len); 2066 + if (id->ps == RDMA_PS_UDP) 2067 + ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 2068 + private_data, private_data_len); 2069 + else 2070 + ret = ib_send_cm_rej(id_priv->cm_id.ib, 2071 + IB_CM_REJ_CONSUMER_DEFINED, NULL, 2072 + 0, private_data, private_data_len); 2294 2073 break; 2295 2074 case RDMA_TRANSPORT_IWARP: 2296 2075 ret = iw_cm_reject(id_priv->cm_id.iw, ··· 2365 2136 2366 2137 static int cma_remove_id_dev(struct rdma_id_private *id_priv) 2367 2138 { 2139 + struct rdma_cm_event event; 2368 2140 enum cma_state state; 2369 2141 2370 2142 /* Record that we want to remove the device */ ··· 2380 2150 if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) 2381 2151 return 0; 2382 2152 2383 - return cma_notify_user(id_priv, RDMA_CM_EVENT_DEVICE_REMOVAL, 2384 - 0, NULL, 0); 2153 + memset(&event, 0, sizeof event); 2154 + event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; 2155 + return id_priv->id.event_handler(&id_priv->id, &event); 2385 2156 } 2386 2157 2387 2158 static void cma_process_remove(struct cma_device *cma_dev) ··· 2464 2233 destroy_workqueue(cma_wq); 2465 2234 idr_destroy(&sdp_ps); 2466 2235 idr_destroy(&tcp_ps); 2236 + idr_destroy(&udp_ps); 2467 2237 } 2468 2238 2469 2239 module_init(cma_init);
+2 -10
drivers/infiniband/core/fmr_pool.c
··· 394 394 */ 395 395 int ib_flush_fmr_pool(struct ib_fmr_pool *pool) 396 396 { 397 - int serial; 398 - 399 - atomic_inc(&pool->req_ser); 400 - /* 401 - * It's OK if someone else bumps req_ser again here -- we'll 402 - * just wait a little longer. 403 - */ 404 - serial = atomic_read(&pool->req_ser); 397 + int serial = atomic_inc_return(&pool->req_ser); 405 398 406 399 wake_up_process(pool->thread); 407 400 408 401 if (wait_event_interruptible(pool->force_wait, 409 - atomic_read(&pool->flush_ser) - 410 - atomic_read(&pool->req_ser) >= 0)) 402 + atomic_read(&pool->flush_ser) - serial >= 0)) 411 403 return -EINTR; 412 404 413 405 return 0;
+43 -45
drivers/infiniband/core/mad.c
··· 998 998 999 999 mad_agent = mad_send_wr->send_buf.mad_agent; 1000 1000 sge = mad_send_wr->sg_list; 1001 - sge[0].addr = dma_map_single(mad_agent->device->dma_device, 1002 - mad_send_wr->send_buf.mad, 1003 - sge[0].length, 1004 - DMA_TO_DEVICE); 1005 - pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr); 1001 + sge[0].addr = ib_dma_map_single(mad_agent->device, 1002 + mad_send_wr->send_buf.mad, 1003 + sge[0].length, 1004 + DMA_TO_DEVICE); 1005 + mad_send_wr->header_mapping = sge[0].addr; 1006 1006 1007 - sge[1].addr = dma_map_single(mad_agent->device->dma_device, 1008 - ib_get_payload(mad_send_wr), 1009 - sge[1].length, 1010 - DMA_TO_DEVICE); 1011 - pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr); 1007 + sge[1].addr = ib_dma_map_single(mad_agent->device, 1008 + ib_get_payload(mad_send_wr), 1009 + sge[1].length, 1010 + DMA_TO_DEVICE); 1011 + mad_send_wr->payload_mapping = sge[1].addr; 1012 1012 1013 1013 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 1014 1014 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { ··· 1026 1026 } 1027 1027 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 1028 1028 if (ret) { 1029 - dma_unmap_single(mad_agent->device->dma_device, 1030 - pci_unmap_addr(mad_send_wr, header_mapping), 1031 - sge[0].length, DMA_TO_DEVICE); 1032 - dma_unmap_single(mad_agent->device->dma_device, 1033 - pci_unmap_addr(mad_send_wr, payload_mapping), 1034 - sge[1].length, DMA_TO_DEVICE); 1029 + ib_dma_unmap_single(mad_agent->device, 1030 + mad_send_wr->header_mapping, 1031 + sge[0].length, DMA_TO_DEVICE); 1032 + ib_dma_unmap_single(mad_agent->device, 1033 + mad_send_wr->payload_mapping, 1034 + sge[1].length, DMA_TO_DEVICE); 1035 1035 } 1036 1036 return ret; 1037 1037 } ··· 1850 1850 mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header, 1851 1851 mad_list); 1852 1852 recv = container_of(mad_priv_hdr, struct ib_mad_private, header); 1853 - dma_unmap_single(port_priv->device->dma_device, 1854 - pci_unmap_addr(&recv->header, mapping), 1855 - sizeof(struct ib_mad_private) - 1856 - sizeof(struct ib_mad_private_header), 1857 - DMA_FROM_DEVICE); 1853 + ib_dma_unmap_single(port_priv->device, 1854 + recv->header.mapping, 1855 + sizeof(struct ib_mad_private) - 1856 + sizeof(struct ib_mad_private_header), 1857 + DMA_FROM_DEVICE); 1858 1858 1859 1859 /* Setup MAD receive work completion from "normal" work completion */ 1860 1860 recv->header.wc = *wc; ··· 2080 2080 qp_info = send_queue->qp_info; 2081 2081 2082 2082 retry: 2083 - dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device, 2084 - pci_unmap_addr(mad_send_wr, header_mapping), 2085 - mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); 2086 - dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device, 2087 - pci_unmap_addr(mad_send_wr, payload_mapping), 2088 - mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); 2083 + ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2084 + mad_send_wr->header_mapping, 2085 + mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); 2086 + ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device, 2087 + mad_send_wr->payload_mapping, 2088 + mad_send_wr->sg_list[1].length, DMA_TO_DEVICE); 2089 2089 queued_send_wr = NULL; 2090 2090 spin_lock_irqsave(&send_queue->lock, flags); 2091 2091 list_del(&mad_list->list); ··· 2528 2528 break; 2529 2529 } 2530 2530 } 2531 - sg_list.addr = dma_map_single(qp_info->port_priv-> 2532 - device->dma_device, 2533 - &mad_priv->grh, 2534 - sizeof *mad_priv - 2535 - sizeof mad_priv->header, 2536 - DMA_FROM_DEVICE); 2537 - pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr); 2531 + sg_list.addr = ib_dma_map_single(qp_info->port_priv->device, 2532 + &mad_priv->grh, 2533 + sizeof *mad_priv - 2534 + sizeof mad_priv->header, 2535 + DMA_FROM_DEVICE); 2536 + mad_priv->header.mapping = sg_list.addr; 2538 2537 recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list; 2539 2538 mad_priv->header.mad_list.mad_queue = recv_queue; 2540 2539 ··· 2548 2549 list_del(&mad_priv->header.mad_list.list); 2549 2550 recv_queue->count--; 2550 2551 spin_unlock_irqrestore(&recv_queue->lock, flags); 2551 - dma_unmap_single(qp_info->port_priv->device->dma_device, 2552 - pci_unmap_addr(&mad_priv->header, 2553 - mapping), 2554 - sizeof *mad_priv - 2555 - sizeof mad_priv->header, 2556 - DMA_FROM_DEVICE); 2552 + ib_dma_unmap_single(qp_info->port_priv->device, 2553 + mad_priv->header.mapping, 2554 + sizeof *mad_priv - 2555 + sizeof mad_priv->header, 2556 + DMA_FROM_DEVICE); 2557 2557 kmem_cache_free(ib_mad_cache, mad_priv); 2558 2558 printk(KERN_ERR PFX "ib_post_recv failed: %d\n", ret); 2559 2559 break; ··· 2584 2586 /* Remove from posted receive MAD list */ 2585 2587 list_del(&mad_list->list); 2586 2588 2587 - dma_unmap_single(qp_info->port_priv->device->dma_device, 2588 - pci_unmap_addr(&recv->header, mapping), 2589 - sizeof(struct ib_mad_private) - 2590 - sizeof(struct ib_mad_private_header), 2591 - DMA_FROM_DEVICE); 2589 + ib_dma_unmap_single(qp_info->port_priv->device, 2590 + recv->header.mapping, 2591 + sizeof(struct ib_mad_private) - 2592 + sizeof(struct ib_mad_private_header), 2593 + DMA_FROM_DEVICE); 2592 2594 kmem_cache_free(ib_mad_cache, recv); 2593 2595 } 2594 2596
+3 -3
drivers/infiniband/core/mad_priv.h
··· 73 73 struct ib_mad_list_head mad_list; 74 74 struct ib_mad_recv_wc recv_wc; 75 75 struct ib_wc wc; 76 - DECLARE_PCI_UNMAP_ADDR(mapping) 76 + u64 mapping; 77 77 } __attribute__ ((packed)); 78 78 79 79 struct ib_mad_private { ··· 126 126 struct list_head agent_list; 127 127 struct ib_mad_agent_private *mad_agent_priv; 128 128 struct ib_mad_send_buf send_buf; 129 - DECLARE_PCI_UNMAP_ADDR(header_mapping) 130 - DECLARE_PCI_UNMAP_ADDR(payload_mapping) 129 + u64 header_mapping; 130 + u64 payload_mapping; 131 131 struct ib_send_wr send_wr; 132 132 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; 133 133 __be64 tid;
+874
drivers/infiniband/core/ucma.c
··· 1 + /* 2 + * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 3 + * 4 + * This software is available to you under a choice of one of two 5 + * licenses. You may choose to be licensed under the terms of the GNU 6 + * General Public License (GPL) Version 2, available from the file 7 + * COPYING in the main directory of this source tree, or the 8 + * OpenIB.org BSD license below: 9 + * 10 + * Redistribution and use in source and binary forms, with or 11 + * without modification, are permitted provided that the following 12 + * conditions are met: 13 + * 14 + * - Redistributions of source code must retain the above 15 + * copyright notice, this list of conditions and the following 16 + * disclaimer. 17 + * 18 + * - Redistributions in binary form must reproduce the above 19 + * copyright notice, this list of conditions and the following 20 + * disclaimer in the documentation and/or other materials 21 + * provided with the distribution. 22 + * 23 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 + * SOFTWARE. 31 + */ 32 + 33 + #include <linux/completion.h> 34 + #include <linux/mutex.h> 35 + #include <linux/poll.h> 36 + #include <linux/idr.h> 37 + #include <linux/in.h> 38 + #include <linux/in6.h> 39 + #include <linux/miscdevice.h> 40 + 41 + #include <rdma/rdma_user_cm.h> 42 + #include <rdma/ib_marshall.h> 43 + #include <rdma/rdma_cm.h> 44 + 45 + MODULE_AUTHOR("Sean Hefty"); 46 + MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access"); 47 + MODULE_LICENSE("Dual BSD/GPL"); 48 + 49 + enum { 50 + UCMA_MAX_BACKLOG = 128 51 + }; 52 + 53 + struct ucma_file { 54 + struct mutex mut; 55 + struct file *filp; 56 + struct list_head ctx_list; 57 + struct list_head event_list; 58 + wait_queue_head_t poll_wait; 59 + }; 60 + 61 + struct ucma_context { 62 + int id; 63 + struct completion comp; 64 + atomic_t ref; 65 + int events_reported; 66 + int backlog; 67 + 68 + struct ucma_file *file; 69 + struct rdma_cm_id *cm_id; 70 + u64 uid; 71 + 72 + struct list_head list; 73 + }; 74 + 75 + struct ucma_event { 76 + struct ucma_context *ctx; 77 + struct list_head list; 78 + struct rdma_cm_id *cm_id; 79 + struct rdma_ucm_event_resp resp; 80 + }; 81 + 82 + static DEFINE_MUTEX(mut); 83 + static DEFINE_IDR(ctx_idr); 84 + 85 + static inline struct ucma_context *_ucma_find_context(int id, 86 + struct ucma_file *file) 87 + { 88 + struct ucma_context *ctx; 89 + 90 + ctx = idr_find(&ctx_idr, id); 91 + if (!ctx) 92 + ctx = ERR_PTR(-ENOENT); 93 + else if (ctx->file != file) 94 + ctx = ERR_PTR(-EINVAL); 95 + return ctx; 96 + } 97 + 98 + static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id) 99 + { 100 + struct ucma_context *ctx; 101 + 102 + mutex_lock(&mut); 103 + ctx = _ucma_find_context(id, file); 104 + if (!IS_ERR(ctx)) 105 + atomic_inc(&ctx->ref); 106 + mutex_unlock(&mut); 107 + return ctx; 108 + } 109 + 110 + static void ucma_put_ctx(struct ucma_context *ctx) 111 + { 112 + if (atomic_dec_and_test(&ctx->ref)) 113 + complete(&ctx->comp); 114 + } 115 + 116 + static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) 117 + { 118 + struct ucma_context *ctx; 119 + int ret; 120 + 121 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 122 + if (!ctx) 123 + return NULL; 124 + 125 + atomic_set(&ctx->ref, 1); 126 + init_completion(&ctx->comp); 127 + ctx->file = file; 128 + 129 + do { 130 + ret = idr_pre_get(&ctx_idr, GFP_KERNEL); 131 + if (!ret) 132 + goto error; 133 + 134 + mutex_lock(&mut); 135 + ret = idr_get_new(&ctx_idr, ctx, &ctx->id); 136 + mutex_unlock(&mut); 137 + } while (ret == -EAGAIN); 138 + 139 + if (ret) 140 + goto error; 141 + 142 + list_add_tail(&ctx->list, &file->ctx_list); 143 + return ctx; 144 + 145 + error: 146 + kfree(ctx); 147 + return NULL; 148 + } 149 + 150 + static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst, 151 + struct rdma_conn_param *src) 152 + { 153 + if (src->private_data_len) 154 + memcpy(dst->private_data, src->private_data, 155 + src->private_data_len); 156 + dst->private_data_len = src->private_data_len; 157 + dst->responder_resources =src->responder_resources; 158 + dst->initiator_depth = src->initiator_depth; 159 + dst->flow_control = src->flow_control; 160 + dst->retry_count = src->retry_count; 161 + dst->rnr_retry_count = src->rnr_retry_count; 162 + dst->srq = src->srq; 163 + dst->qp_num = src->qp_num; 164 + } 165 + 166 + static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst, 167 + struct rdma_ud_param *src) 168 + { 169 + if (src->private_data_len) 170 + memcpy(dst->private_data, src->private_data, 171 + src->private_data_len); 172 + dst->private_data_len = src->private_data_len; 173 + ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr); 174 + dst->qp_num = src->qp_num; 175 + dst->qkey = src->qkey; 176 + } 177 + 178 + static void ucma_set_event_context(struct ucma_context *ctx, 179 + struct rdma_cm_event *event, 180 + struct ucma_event *uevent) 181 + { 182 + uevent->ctx = ctx; 183 + uevent->resp.uid = ctx->uid; 184 + uevent->resp.id = ctx->id; 185 + } 186 + 187 + static int ucma_event_handler(struct rdma_cm_id *cm_id, 188 + struct rdma_cm_event *event) 189 + { 190 + struct ucma_event *uevent; 191 + struct ucma_context *ctx = cm_id->context; 192 + int ret = 0; 193 + 194 + uevent = kzalloc(sizeof(*uevent), GFP_KERNEL); 195 + if (!uevent) 196 + return event->event == RDMA_CM_EVENT_CONNECT_REQUEST; 197 + 198 + uevent->cm_id = cm_id; 199 + ucma_set_event_context(ctx, event, uevent); 200 + uevent->resp.event = event->event; 201 + uevent->resp.status = event->status; 202 + if (cm_id->ps == RDMA_PS_UDP) 203 + ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud); 204 + else 205 + ucma_copy_conn_event(&uevent->resp.param.conn, 206 + &event->param.conn); 207 + 208 + mutex_lock(&ctx->file->mut); 209 + if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { 210 + if (!ctx->backlog) { 211 + ret = -EDQUOT; 212 + goto out; 213 + } 214 + ctx->backlog--; 215 + } 216 + list_add_tail(&uevent->list, &ctx->file->event_list); 217 + wake_up_interruptible(&ctx->file->poll_wait); 218 + out: 219 + mutex_unlock(&ctx->file->mut); 220 + return ret; 221 + } 222 + 223 + static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf, 224 + int in_len, int out_len) 225 + { 226 + struct ucma_context *ctx; 227 + struct rdma_ucm_get_event cmd; 228 + struct ucma_event *uevent; 229 + int ret = 0; 230 + DEFINE_WAIT(wait); 231 + 232 + if (out_len < sizeof uevent->resp) 233 + return -ENOSPC; 234 + 235 + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 236 + return -EFAULT; 237 + 238 + mutex_lock(&file->mut); 239 + while (list_empty(&file->event_list)) { 240 + if (file->filp->f_flags & O_NONBLOCK) { 241 + ret = -EAGAIN; 242 + break; 243 + } 244 + 245 + if (signal_pending(current)) { 246 + ret = -ERESTARTSYS; 247 + break; 248 + } 249 + 250 + prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE); 251 + mutex_unlock(&file->mut); 252 + schedule(); 253 + mutex_lock(&file->mut); 254 + finish_wait(&file->poll_wait, &wait); 255 + } 256 + 257 + if (ret) 258 + goto done; 259 + 260 + uevent = list_entry(file->event_list.next, struct ucma_event, list); 261 + 262 + if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) { 263 + ctx = ucma_alloc_ctx(file); 264 + if (!ctx) { 265 + ret = -ENOMEM; 266 + goto done; 267 + } 268 + uevent->ctx->backlog++; 269 + ctx->cm_id = uevent->cm_id; 270 + ctx->cm_id->context = ctx; 271 + uevent->resp.id = ctx->id; 272 + } 273 + 274 + if (copy_to_user((void __user *)(unsigned long)cmd.response, 275 + &uevent->resp, sizeof uevent->resp)) { 276 + ret = -EFAULT; 277 + goto done; 278 + } 279 + 280 + list_del(&uevent->list); 281 + uevent->ctx->events_reported++; 282 + kfree(uevent); 283 + done: 284 + mutex_unlock(&file->mut); 285 + return ret; 286 + } 287 + 288 + static ssize_t ucma_create_id(struct ucma_file *file, 289 + const char __user *inbuf, 290 + int in_len, int out_len) 291 + { 292 + struct rdma_ucm_create_id cmd; 293 + struct rdma_ucm_create_id_resp resp; 294 + struct ucma_context *ctx; 295 + int ret; 296 + 297 + if (out_len < sizeof(resp)) 298 + return -ENOSPC; 299 + 300 + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 301 + return -EFAULT; 302 + 303 + mutex_lock(&file->mut); 304 + ctx = ucma_alloc_ctx(file); 305 + mutex_unlock(&file->mut); 306 + if (!ctx) 307 + return -ENOMEM; 308 + 309 + ctx->uid = cmd.uid; 310 + ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps); 311 + if (IS_ERR(ctx->cm_id)) { 312 + ret = PTR_ERR(ctx->cm_id); 313 + goto err1; 314 + } 315 + 316 + resp.id = ctx->id; 317 + if (copy_to_user((void __user *)(unsigned long)cmd.response, 318 + &resp, sizeof(resp))) { 319 + ret = -EFAULT; 320 + goto err2; 321 + } 322 + return 0; 323 + 324 + err2: 325 + rdma_destroy_id(ctx->cm_id); 326 + err1: 327 + mutex_lock(&mut); 328 + idr_remove(&ctx_idr, ctx->id); 329 + mutex_unlock(&mut); 330 + kfree(ctx); 331 + return ret; 332 + } 333 + 334 + static void ucma_cleanup_events(struct ucma_context *ctx) 335 + { 336 + struct ucma_event *uevent, *tmp; 337 + 338 + list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { 339 + if (uevent->ctx != ctx) 340 + continue; 341 + 342 + list_del(&uevent->list); 343 + 344 + /* clear incoming connections. */ 345 + if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) 346 + rdma_destroy_id(uevent->cm_id); 347 + 348 + kfree(uevent); 349 + } 350 + } 351 + 352 + static int ucma_free_ctx(struct ucma_context *ctx) 353 + { 354 + int events_reported; 355 + 356 + /* No new events will be generated after destroying the id. */ 357 + rdma_destroy_id(ctx->cm_id); 358 + 359 + /* Cleanup events not yet reported to the user. */ 360 + mutex_lock(&ctx->file->mut); 361 + ucma_cleanup_events(ctx); 362 + list_del(&ctx->list); 363 + mutex_unlock(&ctx->file->mut); 364 + 365 + events_reported = ctx->events_reported; 366 + kfree(ctx); 367 + return events_reported; 368 + } 369 + 370 + static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf, 371 + int in_len, int out_len) 372 + { 373 + struct rdma_ucm_destroy_id cmd; 374 + struct rdma_ucm_destroy_id_resp resp; 375 + struct ucma_context *ctx; 376 + int ret = 0; 377 + 378 + if (out_len < sizeof(resp)) 379 + return -ENOSPC; 380 + 381 + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 382 + return -EFAULT; 383 + 384 + mutex_lock(&mut); 385 + ctx = _ucma_find_context(cmd.id, file); 386 + if (!IS_ERR(ctx)) 387 + idr_remove(&ctx_idr, ctx->id); 388 + mutex_unlock(&mut); 389 + 390 + if (IS_ERR(ctx)) 391 + return PTR_ERR(ctx); 392 + 393 + ucma_put_ctx(ctx); 394 + wait_for_completion(&ctx->comp); 395 + resp.events_reported = ucma_free_ctx(ctx); 396 + 397 + if (copy_to_user((void __user *)(unsigned long)cmd.response, 398 + &resp, sizeof(resp))) 399 + ret = -EFAULT; 400 + 401 + return ret; 402 + } 403 + 404 + static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf, 405 + int in_len, int out_len) 406 + { 407 + struct rdma_ucm_bind_addr cmd; 408 + struct ucma_context *ctx; 409 + int ret; 410 + 411 + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 412 + return -EFAULT; 413 + 414 + ctx = ucma_get_ctx(file, cmd.id); 415 + if (IS_ERR(ctx)) 416 + return PTR_ERR(ctx); 417 + 418 + ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); 419 + ucma_put_ctx(ctx); 420 + return ret; 421 + } 422 + 423 + static ssize_t ucma_resolve_addr(struct ucma_file *file, 424 + const char __user *inbuf, 425 + int in_len, int out_len) 426 + { 427 + struct rdma_ucm_resolve_addr cmd; 428 + struct ucma_context *ctx; 429 + int ret; 430 + 431 + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 432 + return -EFAULT; 433 + 434 + ctx = ucma_get_ctx(file, cmd.id); 435 + if (IS_ERR(ctx)) 436 + return PTR_ERR(ctx); 437 + 438 + ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, 439 + (struct sockaddr *) &cmd.dst_addr, 440 + cmd.timeout_ms); 441 + ucma_put_ctx(ctx); 442 + return ret; 443 + } 444 + 445 + static ssize_t ucma_resolve_route(struct ucma_file *file, 446 + const char __user *inbuf, 447 + int in_len, int out_len) 448 + { 449 + struct rdma_ucm_resolve_route cmd; 450 + struct ucma_context *ctx; 451 + int ret; 452 + 453 + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 454 + return -EFAULT; 455 + 456 + ctx = ucma_get_ctx(file, cmd.id); 457 + if (IS_ERR(ctx)) 458 + return PTR_ERR(ctx); 459 + 460 + ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); 461 + ucma_put_ctx(ctx); 462 + return ret; 463 + } 464 + 465 + static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp, 466 + struct rdma_route *route) 467 + { 468 + struct rdma_dev_addr *dev_addr; 469 + 470 + resp->num_paths = route->num_paths; 471 + switch (route->num_paths) { 472 + case 0: 473 + dev_addr = &route->addr.dev_addr; 474 + ib_addr_get_dgid(dev_addr, 475 + (union ib_gid *) &resp->ib_route[0].dgid); 476 + ib_addr_get_sgid(dev_addr, 477 + (union ib_gid *) &resp->ib_route[0].sgid); 478 + resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); 479 + break; 480 + case 2: 481 + ib_copy_path_rec_to_user(&resp->ib_route[1], 482 + &route->path_rec[1]); 483 + /* fall through */ 484 + case 1: 485 + ib_copy_path_rec_to_user(&resp->ib_route[0], 486 + &route->path_rec[0]); 487 + break; 488 + default: 489 + break; 490 + } 491 + } 492 + 493 + static ssize_t ucma_query_route(struct ucma_file *file, 494 + const char __user *inbuf, 495 + int in_len, int out_len) 496 + { 497 + struct rdma_ucm_query_route cmd; 498 + struct rdma_ucm_query_route_resp resp; 499 + struct ucma_context *ctx; 500 + struct sockaddr *addr; 501 + int ret = 0; 502 + 503 + if (out_len < sizeof(resp)) 504 + return -ENOSPC; 505 + 506 + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 507 + return -EFAULT; 508 + 509 + ctx = ucma_get_ctx(file, cmd.id); 510 + if (IS_ERR(ctx)) 511 + return PTR_ERR(ctx); 512 + 513 + memset(&resp, 0, sizeof resp); 514 + addr = &ctx->cm_id->route.addr.src_addr; 515 + memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ? 516 + sizeof(struct sockaddr_in) : 517 + sizeof(struct sockaddr_in6)); 518 + addr = &ctx->cm_id->route.addr.dst_addr; 519 + memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ? 520 + sizeof(struct sockaddr_in) : 521 + sizeof(struct sockaddr_in6)); 522 + if (!ctx->cm_id->device) 523 + goto out; 524 + 525 + resp.node_guid = ctx->cm_id->device->node_guid; 526 + resp.port_num = ctx->cm_id->port_num; 527 + switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) { 528 + case RDMA_TRANSPORT_IB: 529 + ucma_copy_ib_route(&resp, &ctx->cm_id->route); 530 + break; 531 + default: 532 + break; 533 + } 534 + 535 + out: 536 + if (copy_to_user((void __user *)(unsigned long)cmd.response, 537 + &resp, sizeof(resp))) 538 + ret = -EFAULT; 539 + 540 + ucma_put_ctx(ctx); 541 + return ret; 542 + } 543 + 544 + static void ucma_copy_conn_param(struct rdma_conn_param *dst, 545 + struct rdma_ucm_conn_param *src) 546 + { 547 + dst->private_data = src->private_data; 548 + dst->private_data_len = src->private_data_len; 549 + dst->responder_resources =src->responder_resources; 550 + dst->initiator_depth = src->initiator_depth; 551 + dst->flow_control = src->flow_control; 552 + dst->retry_count = src->retry_count; 553 + dst->rnr_retry_count = src->rnr_retry_count; 554 + dst->srq = src->srq; 555 + dst->qp_num = src->qp_num; 556 + } 557 + 558 + static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf, 559 + int in_len, int out_len) 560 + { 561 + struct rdma_ucm_connect cmd; 562 + struct rdma_conn_param conn_param; 563 + struct ucma_context *ctx; 564 + int ret; 565 + 566 + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 567 + return -EFAULT; 568 + 569 + if (!cmd.conn_param.valid) 570 + return -EINVAL; 571 + 572 + ctx = ucma_get_ctx(file, cmd.id); 573 + if (IS_ERR(ctx)) 574 + return PTR_ERR(ctx); 575 + 576 + ucma_copy_conn_param(&conn_param, &cmd.conn_param); 577 + ret = rdma_connect(ctx->cm_id, &conn_param); 578 + ucma_put_ctx(ctx); 579 + return ret; 580 + } 581 + 582 + static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf, 583 + int in_len, int out_len) 584 + { 585 + struct rdma_ucm_listen cmd; 586 + struct ucma_context *ctx; 587 + int ret; 588 + 589 + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 590 + return -EFAULT; 591 + 592 + ctx = ucma_get_ctx(file, cmd.id); 593 + if (IS_ERR(ctx)) 594 + return PTR_ERR(ctx); 595 + 596 + ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ? 597 + cmd.backlog : UCMA_MAX_BACKLOG; 598 + ret = rdma_listen(ctx->cm_id, ctx->backlog); 599 + ucma_put_ctx(ctx); 600 + return ret; 601 + } 602 + 603 + static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf, 604 + int in_len, int out_len) 605 + { 606 + struct rdma_ucm_accept cmd; 607 + struct rdma_conn_param conn_param; 608 + struct ucma_context *ctx; 609 + int ret; 610 + 611 + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 612 + return -EFAULT; 613 + 614 + ctx = ucma_get_ctx(file, cmd.id); 615 + if (IS_ERR(ctx)) 616 + return PTR_ERR(ctx); 617 + 618 + if (cmd.conn_param.valid) { 619 + ctx->uid = cmd.uid; 620 + ucma_copy_conn_param(&conn_param, &cmd.conn_param); 621 + ret = rdma_accept(ctx->cm_id, &conn_param); 622 + } else 623 + ret = rdma_accept(ctx->cm_id, NULL); 624 + 625 + ucma_put_ctx(ctx); 626 + return ret; 627 + } 628 + 629 + static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf, 630 + int in_len, int out_len) 631 + { 632 + struct rdma_ucm_reject cmd; 633 + struct ucma_context *ctx; 634 + int ret; 635 + 636 + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 637 + return -EFAULT; 638 + 639 + ctx = ucma_get_ctx(file, cmd.id); 640 + if (IS_ERR(ctx)) 641 + return PTR_ERR(ctx); 642 + 643 + ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len); 644 + ucma_put_ctx(ctx); 645 + return ret; 646 + } 647 + 648 + static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf, 649 + int in_len, int out_len) 650 + { 651 + struct rdma_ucm_disconnect cmd; 652 + struct ucma_context *ctx; 653 + int ret; 654 + 655 + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 656 + return -EFAULT; 657 + 658 + ctx = ucma_get_ctx(file, cmd.id); 659 + if (IS_ERR(ctx)) 660 + return PTR_ERR(ctx); 661 + 662 + ret = rdma_disconnect(ctx->cm_id); 663 + ucma_put_ctx(ctx); 664 + return ret; 665 + } 666 + 667 + static ssize_t ucma_init_qp_attr(struct ucma_file *file, 668 + const char __user *inbuf, 669 + int in_len, int out_len) 670 + { 671 + struct rdma_ucm_init_qp_attr cmd; 672 + struct ib_uverbs_qp_attr resp; 673 + struct ucma_context *ctx; 674 + struct ib_qp_attr qp_attr; 675 + int ret; 676 + 677 + if (out_len < sizeof(resp)) 678 + return -ENOSPC; 679 + 680 + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 681 + return -EFAULT; 682 + 683 + ctx = ucma_get_ctx(file, cmd.id); 684 + if (IS_ERR(ctx)) 685 + return PTR_ERR(ctx); 686 + 687 + resp.qp_attr_mask = 0; 688 + memset(&qp_attr, 0, sizeof qp_attr); 689 + qp_attr.qp_state = cmd.qp_state; 690 + ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); 691 + if (ret) 692 + goto out; 693 + 694 + ib_copy_qp_attr_to_user(&resp, &qp_attr); 695 + if (copy_to_user((void __user *)(unsigned long)cmd.response, 696 + &resp, sizeof(resp))) 697 + ret = -EFAULT; 698 + 699 + out: 700 + ucma_put_ctx(ctx); 701 + return ret; 702 + } 703 + 704 + static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, 705 + int in_len, int out_len) 706 + { 707 + struct rdma_ucm_notify cmd; 708 + struct ucma_context *ctx; 709 + int ret; 710 + 711 + if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 712 + return -EFAULT; 713 + 714 + ctx = ucma_get_ctx(file, cmd.id); 715 + if (IS_ERR(ctx)) 716 + return PTR_ERR(ctx); 717 + 718 + ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event); 719 + ucma_put_ctx(ctx); 720 + return ret; 721 + } 722 + 723 + static ssize_t (*ucma_cmd_table[])(struct ucma_file *file, 724 + const char __user *inbuf, 725 + int in_len, int out_len) = { 726 + [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id, 727 + [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id, 728 + [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr, 729 + [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr, 730 + [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route, 731 + [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route, 732 + [RDMA_USER_CM_CMD_CONNECT] = ucma_connect, 733 + [RDMA_USER_CM_CMD_LISTEN] = ucma_listen, 734 + [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept, 735 + [RDMA_USER_CM_CMD_REJECT] = ucma_reject, 736 + [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect, 737 + [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr, 738 + [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event, 739 + [RDMA_USER_CM_CMD_GET_OPTION] = NULL, 740 + [RDMA_USER_CM_CMD_SET_OPTION] = NULL, 741 + [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify, 742 + }; 743 + 744 + static ssize_t ucma_write(struct file *filp, const char __user *buf, 745 + size_t len, loff_t *pos) 746 + { 747 + struct ucma_file *file = filp->private_data; 748 + struct rdma_ucm_cmd_hdr hdr; 749 + ssize_t ret; 750 + 751 + if (len < sizeof(hdr)) 752 + return -EINVAL; 753 + 754 + if (copy_from_user(&hdr, buf, sizeof(hdr))) 755 + return -EFAULT; 756 + 757 + if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) 758 + return -EINVAL; 759 + 760 + if (hdr.in + sizeof(hdr) > len) 761 + return -EINVAL; 762 + 763 + if (!ucma_cmd_table[hdr.cmd]) 764 + return -ENOSYS; 765 + 766 + ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out); 767 + if (!ret) 768 + ret = len; 769 + 770 + return ret; 771 + } 772 + 773 + static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait) 774 + { 775 + struct ucma_file *file = filp->private_data; 776 + unsigned int mask = 0; 777 + 778 + poll_wait(filp, &file->poll_wait, wait); 779 + 780 + if (!list_empty(&file->event_list)) 781 + mask = POLLIN | POLLRDNORM; 782 + 783 + return mask; 784 + } 785 + 786 + static int ucma_open(struct inode *inode, struct file *filp) 787 + { 788 + struct ucma_file *file; 789 + 790 + file = kmalloc(sizeof *file, GFP_KERNEL); 791 + if (!file) 792 + return -ENOMEM; 793 + 794 + INIT_LIST_HEAD(&file->event_list); 795 + INIT_LIST_HEAD(&file->ctx_list); 796 + init_waitqueue_head(&file->poll_wait); 797 + mutex_init(&file->mut); 798 + 799 + filp->private_data = file; 800 + file->filp = filp; 801 + return 0; 802 + } 803 + 804 + static int ucma_close(struct inode *inode, struct file *filp) 805 + { 806 + struct ucma_file *file = filp->private_data; 807 + struct ucma_context *ctx, *tmp; 808 + 809 + mutex_lock(&file->mut); 810 + list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) { 811 + mutex_unlock(&file->mut); 812 + 813 + mutex_lock(&mut); 814 + idr_remove(&ctx_idr, ctx->id); 815 + mutex_unlock(&mut); 816 + 817 + ucma_free_ctx(ctx); 818 + mutex_lock(&file->mut); 819 + } 820 + mutex_unlock(&file->mut); 821 + kfree(file); 822 + return 0; 823 + } 824 + 825 + static struct file_operations ucma_fops = { 826 + .owner = THIS_MODULE, 827 + .open = ucma_open, 828 + .release = ucma_close, 829 + .write = ucma_write, 830 + .poll = ucma_poll, 831 + }; 832 + 833 + static struct miscdevice ucma_misc = { 834 + .minor = MISC_DYNAMIC_MINOR, 835 + .name = "rdma_cm", 836 + .fops = &ucma_fops, 837 + }; 838 + 839 + static ssize_t show_abi_version(struct device *dev, 840 + struct device_attribute *attr, 841 + char *buf) 842 + { 843 + return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION); 844 + } 845 + static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); 846 + 847 + static int __init ucma_init(void) 848 + { 849 + int ret; 850 + 851 + ret = misc_register(&ucma_misc); 852 + if (ret) 853 + return ret; 854 + 855 + ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version); 856 + if (ret) { 857 + printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n"); 858 + goto err; 859 + } 860 + return 0; 861 + err: 862 + misc_deregister(&ucma_misc); 863 + return ret; 864 + } 865 + 866 + static void __exit ucma_cleanup(void) 867 + { 868 + device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); 869 + misc_deregister(&ucma_misc); 870 + idr_destroy(&ctx_idr); 871 + } 872 + 873 + module_init(ucma_init); 874 + module_exit(ucma_cleanup);
+3 -2
drivers/infiniband/core/uverbs_marshall.c
··· 32 32 33 33 #include <rdma/ib_marshall.h> 34 34 35 - static void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst, 36 - struct ib_ah_attr *src) 35 + void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst, 36 + struct ib_ah_attr *src) 37 37 { 38 38 memcpy(dst->grh.dgid, src->grh.dgid.raw, sizeof src->grh.dgid); 39 39 dst->grh.flow_label = src->grh.flow_label; ··· 47 47 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0; 48 48 dst->port_num = src->port_num; 49 49 } 50 + EXPORT_SYMBOL(ib_copy_ah_attr_to_user); 50 51 51 52 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, 52 53 struct ib_qp_attr *src)
+6 -6
drivers/infiniband/core/uverbs_mem.c
··· 52 52 int i; 53 53 54 54 list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) { 55 - dma_unmap_sg(dev->dma_device, chunk->page_list, 56 - chunk->nents, DMA_BIDIRECTIONAL); 55 + ib_dma_unmap_sg(dev, chunk->page_list, 56 + chunk->nents, DMA_BIDIRECTIONAL); 57 57 for (i = 0; i < chunk->nents; ++i) { 58 58 if (umem->writable && dirty) 59 59 set_page_dirty_lock(chunk->page_list[i].page); ··· 136 136 chunk->page_list[i].length = PAGE_SIZE; 137 137 } 138 138 139 - chunk->nmap = dma_map_sg(dev->dma_device, 140 - &chunk->page_list[0], 141 - chunk->nents, 142 - DMA_BIDIRECTIONAL); 139 + chunk->nmap = ib_dma_map_sg(dev, 140 + &chunk->page_list[0], 141 + chunk->nents, 142 + DMA_BIDIRECTIONAL); 143 143 if (chunk->nmap <= 0) { 144 144 for (i = 0; i < chunk->nents; ++i) 145 145 put_page(chunk->page_list[i].page);
+8 -5
drivers/infiniband/hw/amso1100/c2_qp.c
··· 161 161 162 162 if (attr_mask & IB_QP_STATE) { 163 163 /* Ensure the state is valid */ 164 - if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) 165 - return -EINVAL; 164 + if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) { 165 + err = -EINVAL; 166 + goto bail0; 167 + } 166 168 167 169 wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state)); 168 170 ··· 186 184 if (attr->cur_qp_state != IB_QPS_RTR && 187 185 attr->cur_qp_state != IB_QPS_RTS && 188 186 attr->cur_qp_state != IB_QPS_SQD && 189 - attr->cur_qp_state != IB_QPS_SQE) 190 - return -EINVAL; 191 - else 187 + attr->cur_qp_state != IB_QPS_SQE) { 188 + err = -EINVAL; 189 + goto bail0; 190 + } else 192 191 wr.next_qp_state = 193 192 cpu_to_be32(to_c2_state(attr->cur_qp_state)); 194 193
+1
drivers/infiniband/hw/ipath/Makefile
··· 6 6 ib_ipath-y := \ 7 7 ipath_cq.o \ 8 8 ipath_diag.o \ 9 + ipath_dma.o \ 9 10 ipath_driver.o \ 10 11 ipath_eeprom.o \ 11 12 ipath_file_ops.o \
+189
drivers/infiniband/hw/ipath/ipath_dma.c
··· 1 + /* 2 + * Copyright (c) 2006 QLogic, Corporation. All rights reserved. 3 + * 4 + * This software is available to you under a choice of one of two 5 + * licenses. You may choose to be licensed under the terms of the GNU 6 + * General Public License (GPL) Version 2, available from the file 7 + * COPYING in the main directory of this source tree, or the 8 + * OpenIB.org BSD license below: 9 + * 10 + * Redistribution and use in source and binary forms, with or 11 + * without modification, are permitted provided that the following 12 + * conditions are met: 13 + * 14 + * - Redistributions of source code must retain the above 15 + * copyright notice, this list of conditions and the following 16 + * disclaimer. 17 + * 18 + * - Redistributions in binary form must reproduce the above 19 + * copyright notice, this list of conditions and the following 20 + * disclaimer in the documentation and/or other materials 21 + * provided with the distribution. 22 + * 23 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 + * SOFTWARE. 31 + */ 32 + 33 + #include <rdma/ib_verbs.h> 34 + 35 + #include "ipath_verbs.h" 36 + 37 + #define BAD_DMA_ADDRESS ((u64) 0) 38 + 39 + /* 40 + * The following functions implement driver specific replacements 41 + * for the ib_dma_*() functions. 42 + * 43 + * These functions return kernel virtual addresses instead of 44 + * device bus addresses since the driver uses the CPU to copy 45 + * data instead of using hardware DMA. 46 + */ 47 + 48 + static int ipath_mapping_error(struct ib_device *dev, u64 dma_addr) 49 + { 50 + return dma_addr == BAD_DMA_ADDRESS; 51 + } 52 + 53 + static u64 ipath_dma_map_single(struct ib_device *dev, 54 + void *cpu_addr, size_t size, 55 + enum dma_data_direction direction) 56 + { 57 + BUG_ON(!valid_dma_direction(direction)); 58 + return (u64) cpu_addr; 59 + } 60 + 61 + static void ipath_dma_unmap_single(struct ib_device *dev, 62 + u64 addr, size_t size, 63 + enum dma_data_direction direction) 64 + { 65 + BUG_ON(!valid_dma_direction(direction)); 66 + } 67 + 68 + static u64 ipath_dma_map_page(struct ib_device *dev, 69 + struct page *page, 70 + unsigned long offset, 71 + size_t size, 72 + enum dma_data_direction direction) 73 + { 74 + u64 addr; 75 + 76 + BUG_ON(!valid_dma_direction(direction)); 77 + 78 + if (offset + size > PAGE_SIZE) { 79 + addr = BAD_DMA_ADDRESS; 80 + goto done; 81 + } 82 + 83 + addr = (u64) page_address(page); 84 + if (addr) 85 + addr += offset; 86 + /* TODO: handle highmem pages */ 87 + 88 + done: 89 + return addr; 90 + } 91 + 92 + static void ipath_dma_unmap_page(struct ib_device *dev, 93 + u64 addr, size_t size, 94 + enum dma_data_direction direction) 95 + { 96 + BUG_ON(!valid_dma_direction(direction)); 97 + } 98 + 99 + int ipath_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents, 100 + enum dma_data_direction direction) 101 + { 102 + u64 addr; 103 + int i; 104 + int ret = nents; 105 + 106 + BUG_ON(!valid_dma_direction(direction)); 107 + 108 + for (i = 0; i < nents; i++) { 109 + addr = (u64) page_address(sg[i].page); 110 + /* TODO: handle highmem pages */ 111 + if (!addr) { 112 + ret = 0; 113 + break; 114 + } 115 + } 116 + return ret; 117 + } 118 + 119 + static void ipath_unmap_sg(struct ib_device *dev, 120 + struct scatterlist *sg, int nents, 121 + enum dma_data_direction direction) 122 + { 123 + BUG_ON(!valid_dma_direction(direction)); 124 + } 125 + 126 + static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg) 127 + { 128 + u64 addr = (u64) page_address(sg->page); 129 + 130 + if (addr) 131 + addr += sg->offset; 132 + return addr; 133 + } 134 + 135 + static unsigned int ipath_sg_dma_len(struct ib_device *dev, 136 + struct scatterlist *sg) 137 + { 138 + return sg->length; 139 + } 140 + 141 + static void ipath_sync_single_for_cpu(struct ib_device *dev, 142 + u64 addr, 143 + size_t size, 144 + enum dma_data_direction dir) 145 + { 146 + } 147 + 148 + static void ipath_sync_single_for_device(struct ib_device *dev, 149 + u64 addr, 150 + size_t size, 151 + enum dma_data_direction dir) 152 + { 153 + } 154 + 155 + static void *ipath_dma_alloc_coherent(struct ib_device *dev, size_t size, 156 + u64 *dma_handle, gfp_t flag) 157 + { 158 + struct page *p; 159 + void *addr = NULL; 160 + 161 + p = alloc_pages(flag, get_order(size)); 162 + if (p) 163 + addr = page_address(p); 164 + if (dma_handle) 165 + *dma_handle = (u64) addr; 166 + return addr; 167 + } 168 + 169 + static void ipath_dma_free_coherent(struct ib_device *dev, size_t size, 170 + void *cpu_addr, dma_addr_t dma_handle) 171 + { 172 + free_pages((unsigned long) cpu_addr, get_order(size)); 173 + } 174 + 175 + struct ib_dma_mapping_ops ipath_dma_mapping_ops = { 176 + ipath_mapping_error, 177 + ipath_dma_map_single, 178 + ipath_dma_unmap_single, 179 + ipath_dma_map_page, 180 + ipath_dma_unmap_page, 181 + ipath_map_sg, 182 + ipath_unmap_sg, 183 + ipath_sg_dma_address, 184 + ipath_sg_dma_len, 185 + ipath_sync_single_for_cpu, 186 + ipath_sync_single_for_device, 187 + ipath_dma_alloc_coherent, 188 + ipath_dma_free_coherent 189 + };
+1 -3
drivers/infiniband/hw/ipath/ipath_driver.c
··· 1825 1825 */ 1826 1826 void ipath_shutdown_device(struct ipath_devdata *dd) 1827 1827 { 1828 - u64 val; 1829 - 1830 1828 ipath_dbg("Shutting down the device\n"); 1831 1829 1832 1830 dd->ipath_flags |= IPATH_LINKUNK; ··· 1847 1849 */ 1848 1850 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 0ULL); 1849 1851 /* flush it */ 1850 - val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 1852 + ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 1851 1853 /* 1852 1854 * enough for anything that's going to trickle out to have actually 1853 1855 * done so.
+2 -3
drivers/infiniband/hw/ipath/ipath_file_ops.c
··· 699 699 int start_stop) 700 700 { 701 701 struct ipath_devdata *dd = pd->port_dd; 702 - u64 tval; 703 702 704 703 ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n", 705 704 start_stop ? "en" : "dis", dd->ipath_unit, ··· 728 729 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 729 730 dd->ipath_rcvctrl); 730 731 /* now be sure chip saw it before we return */ 731 - tval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 732 + ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 732 733 if (start_stop) { 733 734 /* 734 735 * And try to be sure that tail reg update has happened too. ··· 737 738 * in memory copy, since we could overwrite an update by the 738 739 * chip if we did. 739 740 */ 740 - tval = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); 741 + ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); 741 742 } 742 743 /* always; new head should be equal to new tail; see above */ 743 744 bail:
+1 -2
drivers/infiniband/hw/ipath/ipath_iba6110.c
··· 1447 1447 static int ipath_ht_early_init(struct ipath_devdata *dd) 1448 1448 { 1449 1449 u32 __iomem *piobuf; 1450 - u32 pioincr, val32, egrsize; 1450 + u32 pioincr, val32; 1451 1451 int i; 1452 1452 1453 1453 /* ··· 1467 1467 * errors interrupts if we ever see one). 1468 1468 */ 1469 1469 dd->ipath_rcvegrbufsize = dd->ipath_piosize2k; 1470 - egrsize = dd->ipath_rcvegrbufsize; 1471 1470 1472 1471 /* 1473 1472 * the min() check here is currently a nop, but it may not
+4 -4
drivers/infiniband/hw/ipath/ipath_iba6120.c
··· 602 602 */ 603 603 static int ipath_pe_bringup_serdes(struct ipath_devdata *dd) 604 604 { 605 - u64 val, tmp, config1, prev_val; 605 + u64 val, config1, prev_val; 606 606 int ret = 0; 607 607 608 608 ipath_dbg("Trying to bringup serdes\n"); ··· 633 633 | INFINIPATH_SERDC0_L1PWR_DN; 634 634 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); 635 635 /* be sure chip saw it */ 636 - tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 636 + ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 637 637 udelay(5); /* need pll reset set at least for a bit */ 638 638 /* 639 639 * after PLL is reset, set the per-lane Resets and TxIdle and ··· 647 647 "and txidle (%llx)\n", (unsigned long long) val); 648 648 ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); 649 649 /* be sure chip saw it */ 650 - tmp = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 650 + ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); 651 651 /* need PLL reset clear for at least 11 usec before lane 652 652 * resets cleared; give it a few more to be sure */ 653 653 udelay(15); ··· 851 851 int pos, ret; 852 852 853 853 dd->ipath_msi_lo = 0; /* used as a flag during reset processing */ 854 - dd->ipath_irq = pdev->irq; 855 854 ret = pci_enable_msi(dd->pcidev); 856 855 if (ret) 857 856 ipath_dev_err(dd, "pci_enable_msi failed: %d, " 858 857 "interrupts may not work\n", ret); 859 858 /* continue even if it fails, we may still be OK... */ 859 + dd->ipath_irq = pdev->irq; 860 860 861 861 if ((pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI))) { 862 862 u16 control;
+1 -2
drivers/infiniband/hw/ipath/ipath_init_chip.c
··· 347 347 static int init_chip_reset(struct ipath_devdata *dd, 348 348 struct ipath_portdata **pdp) 349 349 { 350 - struct ipath_portdata *pd; 351 350 u32 rtmp; 352 351 353 - *pdp = pd = dd->ipath_pd[0]; 352 + *pdp = dd->ipath_pd[0]; 354 353 /* ensure chip does no sends or receives while we re-initialize */ 355 354 dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U; 356 355 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, 0);
+1 -2
drivers/infiniband/hw/ipath/ipath_intr.c
··· 598 598 * on close 599 599 */ 600 600 if (errs & INFINIPATH_E_RRCVHDRFULL) { 601 - int any; 602 601 u32 hd, tl; 603 602 ipath_stats.sps_hdrqfull++; 604 - for (any = i = 0; i < dd->ipath_cfgports; i++) { 603 + for (i = 0; i < dd->ipath_cfgports; i++) { 605 604 struct ipath_portdata *pd = dd->ipath_pd[i]; 606 605 if (i == 0) { 607 606 hd = dd->ipath_port0head;
+4 -4
drivers/infiniband/hw/ipath/ipath_keys.c
··· 134 134 */ 135 135 if (sge->lkey == 0) { 136 136 isge->mr = NULL; 137 - isge->vaddr = bus_to_virt(sge->addr); 137 + isge->vaddr = (void *) sge->addr; 138 138 isge->length = sge->length; 139 139 isge->sge_length = sge->length; 140 140 ret = 1; ··· 202 202 int ret; 203 203 204 204 /* 205 - * We use RKEY == zero for physical addresses 206 - * (see ipath_get_dma_mr). 205 + * We use RKEY == zero for kernel virtual addresses 206 + * (see ipath_get_dma_mr and ipath_dma.c). 207 207 */ 208 208 if (rkey == 0) { 209 209 sge->mr = NULL; 210 - sge->vaddr = phys_to_virt(vaddr); 210 + sge->vaddr = (void *) vaddr; 211 211 sge->length = len; 212 212 sge->sge_length = len; 213 213 ss->sg_list = NULL;
+4 -3
drivers/infiniband/hw/ipath/ipath_mr.c
··· 54 54 * @acc: access flags 55 55 * 56 56 * Returns the memory region on success, otherwise returns an errno. 57 + * Note that all DMA addresses should be created via the 58 + * struct ib_dma_mapping_ops functions (see ipath_dma.c). 57 59 */ 58 60 struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc) 59 61 { ··· 151 149 m = 0; 152 150 n = 0; 153 151 for (i = 0; i < num_phys_buf; i++) { 154 - mr->mr.map[m]->segs[n].vaddr = 155 - phys_to_virt(buffer_list[i].addr); 152 + mr->mr.map[m]->segs[n].vaddr = (void *) buffer_list[i].addr; 156 153 mr->mr.map[m]->segs[n].length = buffer_list[i].size; 157 154 mr->mr.length += buffer_list[i].size; 158 155 n++; ··· 348 347 n = 0; 349 348 ps = 1 << fmr->page_shift; 350 349 for (i = 0; i < list_len; i++) { 351 - fmr->mr.map[m]->segs[n].vaddr = phys_to_virt(page_list[i]); 350 + fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i]; 352 351 fmr->mr.map[m]->segs[n].length = ps; 353 352 if (++n == IPATH_SEGSZ) { 354 353 m++;
-3
drivers/infiniband/hw/ipath/ipath_sysfs.c
··· 215 215 size_t count) 216 216 { 217 217 struct ipath_devdata *dd = dev_get_drvdata(dev); 218 - int unit; 219 218 u16 mlid; 220 219 int ret; 221 220 222 221 ret = ipath_parse_ushort(buf, &mlid); 223 222 if (ret < 0 || mlid < IPATH_MULTICAST_LID_BASE) 224 223 goto invalid; 225 - 226 - unit = dd->ipath_unit; 227 224 228 225 dd->ipath_mlid = mlid; 229 226
+1
drivers/infiniband/hw/ipath/ipath_verbs.c
··· 1599 1599 dev->detach_mcast = ipath_multicast_detach; 1600 1600 dev->process_mad = ipath_process_mad; 1601 1601 dev->mmap = ipath_mmap; 1602 + dev->dma_ops = &ipath_dma_mapping_ops; 1602 1603 1603 1604 snprintf(dev->node_desc, sizeof(dev->node_desc), 1604 1605 IPATH_IDSTR " %s", init_utsname()->nodename);
+2
drivers/infiniband/hw/ipath/ipath_verbs.h
··· 812 812 813 813 extern const u32 ib_ipath_rnr_table[]; 814 814 815 + extern struct ib_dma_mapping_ops ipath_dma_mapping_ops; 816 + 815 817 #endif /* IPATH_VERBS_H */
+2 -2
drivers/infiniband/ulp/ipoib/ipoib.h
··· 105 105 106 106 struct ipoib_rx_buf { 107 107 struct sk_buff *skb; 108 - dma_addr_t mapping; 108 + u64 mapping; 109 109 }; 110 110 111 111 struct ipoib_tx_buf { 112 112 struct sk_buff *skb; 113 - DECLARE_PCI_UNMAP_ADDR(mapping) 113 + u64 mapping; 114 114 }; 115 115 116 116 /*
+36 -39
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 109 109 ret = ib_post_recv(priv->qp, &param, &bad_wr); 110 110 if (unlikely(ret)) { 111 111 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); 112 - dma_unmap_single(priv->ca->dma_device, 113 - priv->rx_ring[id].mapping, 114 - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 112 + ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping, 113 + IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 115 114 dev_kfree_skb_any(priv->rx_ring[id].skb); 116 115 priv->rx_ring[id].skb = NULL; 117 116 } ··· 122 123 { 123 124 struct ipoib_dev_priv *priv = netdev_priv(dev); 124 125 struct sk_buff *skb; 125 - dma_addr_t addr; 126 + u64 addr; 126 127 127 128 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); 128 129 if (!skb) ··· 135 136 */ 136 137 skb_reserve(skb, 4); 137 138 138 - addr = dma_map_single(priv->ca->dma_device, 139 - skb->data, IPOIB_BUF_SIZE, 140 - DMA_FROM_DEVICE); 141 - if (unlikely(dma_mapping_error(addr))) { 139 + addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE, 140 + DMA_FROM_DEVICE); 141 + if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { 142 142 dev_kfree_skb_any(skb); 143 143 return -EIO; 144 144 } ··· 172 174 struct ipoib_dev_priv *priv = netdev_priv(dev); 173 175 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; 174 176 struct sk_buff *skb; 175 - dma_addr_t addr; 177 + u64 addr; 176 178 177 179 ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n", 178 180 wr_id, wc->opcode, wc->status); ··· 191 193 ipoib_warn(priv, "failed recv event " 192 194 "(status=%d, wrid=%d vend_err %x)\n", 193 195 wc->status, wr_id, wc->vendor_err); 194 - dma_unmap_single(priv->ca->dma_device, addr, 195 - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 196 + ib_dma_unmap_single(priv->ca, addr, 197 + IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 196 198 dev_kfree_skb_any(skb); 197 199 priv->rx_ring[wr_id].skb = NULL; 198 200 return; ··· 210 212 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 211 213 wc->byte_len, wc->slid); 212 214 213 - dma_unmap_single(priv->ca->dma_device, addr, 214 - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 215 + ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 215 216 216 217 skb_put(skb, wc->byte_len); 217 218 skb_pull(skb, IB_GRH_BYTES); ··· 258 261 259 262 tx_req = &priv->tx_ring[wr_id]; 260 263 261 - dma_unmap_single(priv->ca->dma_device, 262 - pci_unmap_addr(tx_req, mapping), 263 - tx_req->skb->len, 264 - DMA_TO_DEVICE); 264 + ib_dma_unmap_single(priv->ca, tx_req->mapping, 265 + tx_req->skb->len, DMA_TO_DEVICE); 265 266 266 267 ++priv->stats.tx_packets; 267 268 priv->stats.tx_bytes += tx_req->skb->len; ··· 306 311 static inline int post_send(struct ipoib_dev_priv *priv, 307 312 unsigned int wr_id, 308 313 struct ib_ah *address, u32 qpn, 309 - dma_addr_t addr, int len) 314 + u64 addr, int len) 310 315 { 311 316 struct ib_send_wr *bad_wr; 312 317 ··· 325 330 { 326 331 struct ipoib_dev_priv *priv = netdev_priv(dev); 327 332 struct ipoib_tx_buf *tx_req; 328 - dma_addr_t addr; 333 + u64 addr; 329 334 330 335 if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) { 331 336 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", ··· 348 353 */ 349 354 tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; 350 355 tx_req->skb = skb; 351 - addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len, 352 - DMA_TO_DEVICE); 353 - if (unlikely(dma_mapping_error(addr))) { 356 + addr = ib_dma_map_single(priv->ca, skb->data, skb->len, 357 + DMA_TO_DEVICE); 358 + if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { 354 359 ++priv->stats.tx_errors; 355 360 dev_kfree_skb_any(skb); 356 361 return; 357 362 } 358 - pci_unmap_addr_set(tx_req, mapping, addr); 363 + tx_req->mapping = addr; 359 364 360 365 if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), 361 366 address->ah, qpn, addr, skb->len))) { 362 367 ipoib_warn(priv, "post_send failed\n"); 363 368 ++priv->stats.tx_errors; 364 - dma_unmap_single(priv->ca->dma_device, addr, skb->len, 365 - DMA_TO_DEVICE); 369 + ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); 366 370 dev_kfree_skb_any(skb); 367 371 } else { 368 372 dev->trans_start = jiffies; ··· 532 538 while ((int) priv->tx_tail - (int) priv->tx_head < 0) { 533 539 tx_req = &priv->tx_ring[priv->tx_tail & 534 540 (ipoib_sendq_size - 1)]; 535 - dma_unmap_single(priv->ca->dma_device, 536 - pci_unmap_addr(tx_req, mapping), 537 - tx_req->skb->len, 538 - DMA_TO_DEVICE); 541 + ib_dma_unmap_single(priv->ca, 542 + tx_req->mapping, 543 + tx_req->skb->len, 544 + DMA_TO_DEVICE); 539 545 dev_kfree_skb_any(tx_req->skb); 540 546 ++priv->tx_tail; 541 547 } 542 548 543 - for (i = 0; i < ipoib_recvq_size; ++i) 544 - if (priv->rx_ring[i].skb) { 545 - dma_unmap_single(priv->ca->dma_device, 546 - pci_unmap_addr(&priv->rx_ring[i], 547 - mapping), 548 - IPOIB_BUF_SIZE, 549 - DMA_FROM_DEVICE); 550 - dev_kfree_skb_any(priv->rx_ring[i].skb); 551 - priv->rx_ring[i].skb = NULL; 552 - } 549 + for (i = 0; i < ipoib_recvq_size; ++i) { 550 + struct ipoib_rx_buf *rx_req; 551 + 552 + rx_req = &priv->rx_ring[i]; 553 + if (!rx_req->skb) 554 + continue; 555 + ib_dma_unmap_single(priv->ca, 556 + rx_req->mapping, 557 + IPOIB_BUF_SIZE, 558 + DMA_FROM_DEVICE); 559 + dev_kfree_skb_any(rx_req->skb); 560 + rx_req->skb = NULL; 561 + } 553 562 554 563 goto timeout; 555 564 }
+1 -2
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 497 497 return; 498 498 } 499 499 500 - skb_queue_head_init(&neigh->queue); 501 - 502 500 /* 503 501 * We can only be called from ipoib_start_xmit, so we're 504 502 * inside tx_lock -- no need to save/restore flags. ··· 804 806 805 807 neigh->neighbour = neighbour; 806 808 *to_ipoib_neigh(neighbour) = neigh; 809 + skb_queue_head_init(&neigh->queue); 807 810 808 811 return neigh; 809 812 }
+1 -1
drivers/infiniband/ulp/iser/iscsi_iser.h
··· 182 182 struct iser_mem_reg reg; /* memory registration info */ 183 183 void *virt_addr; 184 184 struct iser_device *device; /* device->device for dma_unmap */ 185 - dma_addr_t dma_addr; /* if non zero, addr for dma_unmap */ 185 + u64 dma_addr; /* if non zero, addr for dma_unmap */ 186 186 enum dma_data_direction direction; /* direction for dma_unmap */ 187 187 unsigned int data_size; 188 188 atomic_t ref_count; /* refcount, freed when dec to 0 */
-4
drivers/infiniband/ulp/iser/iser_initiator.c
··· 487 487 struct iscsi_iser_conn *iser_conn = conn->dd_data; 488 488 struct iser_desc *mdesc = mtask->dd_data; 489 489 struct iser_dto *send_dto = NULL; 490 - unsigned int itt; 491 490 unsigned long data_seg_len; 492 491 int err = 0; 493 - unsigned char opcode; 494 492 struct iser_regd_buf *regd_buf; 495 493 struct iser_device *device; 496 494 ··· 510 512 511 513 iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE); 512 514 513 - itt = ntohl(mtask->hdr->itt); 514 - opcode = mtask->hdr->opcode & ISCSI_OPCODE_MASK; 515 515 data_seg_len = ntoh24(mtask->hdr->dlength); 516 516 517 517 if (data_seg_len > 0) {
+61 -64
drivers/infiniband/ulp/iser/iser_memory.c
··· 52 52 */ 53 53 int iser_regd_buff_release(struct iser_regd_buf *regd_buf) 54 54 { 55 - struct device *dma_device; 55 + struct ib_device *dev; 56 56 57 57 if ((atomic_read(&regd_buf->ref_count) == 0) || 58 58 atomic_dec_and_test(&regd_buf->ref_count)) { ··· 61 61 iser_unreg_mem(&regd_buf->reg); 62 62 63 63 if (regd_buf->dma_addr) { 64 - dma_device = regd_buf->device->ib_device->dma_device; 65 - dma_unmap_single(dma_device, 64 + dev = regd_buf->device->ib_device; 65 + ib_dma_unmap_single(dev, 66 66 regd_buf->dma_addr, 67 67 regd_buf->data_size, 68 68 regd_buf->direction); ··· 84 84 struct iser_regd_buf *regd_buf, 85 85 enum dma_data_direction direction) 86 86 { 87 - dma_addr_t dma_addr; 87 + u64 dma_addr; 88 88 89 - dma_addr = dma_map_single(device->ib_device->dma_device, 90 - regd_buf->virt_addr, 91 - regd_buf->data_size, direction); 92 - BUG_ON(dma_mapping_error(dma_addr)); 89 + dma_addr = ib_dma_map_single(device->ib_device, 90 + regd_buf->virt_addr, 91 + regd_buf->data_size, direction); 92 + BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr)); 93 93 94 94 regd_buf->reg.lkey = device->mr->lkey; 95 95 regd_buf->reg.len = regd_buf->data_size; ··· 107 107 enum iser_data_dir cmd_dir) 108 108 { 109 109 int dma_nents; 110 - struct device *dma_device; 110 + struct ib_device *dev; 111 111 char *mem = NULL; 112 112 struct iser_data_buf *data = &iser_ctask->data[cmd_dir]; 113 113 unsigned long cmd_data_len = data->data_len; ··· 147 147 148 148 iser_ctask->data_copy[cmd_dir].copy_buf = mem; 149 149 150 - dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; 151 - 152 - if (cmd_dir == ISER_DIR_OUT) 153 - dma_nents = dma_map_sg(dma_device, 154 - &iser_ctask->data_copy[cmd_dir].sg_single, 155 - 1, DMA_TO_DEVICE); 156 - else 157 - dma_nents = dma_map_sg(dma_device, 158 - &iser_ctask->data_copy[cmd_dir].sg_single, 159 - 1, DMA_FROM_DEVICE); 160 - 150 + dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 151 + dma_nents = ib_dma_map_sg(dev, 152 + &iser_ctask->data_copy[cmd_dir].sg_single, 153 + 1, 154 + (cmd_dir == ISER_DIR_OUT) ? 155 + DMA_TO_DEVICE : DMA_FROM_DEVICE); 161 156 BUG_ON(dma_nents == 0); 162 157 163 158 iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents; ··· 165 170 void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, 166 171 enum iser_data_dir cmd_dir) 167 172 { 168 - struct device *dma_device; 173 + struct ib_device *dev; 169 174 struct iser_data_buf *mem_copy; 170 175 unsigned long cmd_data_len; 171 176 172 - dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; 173 - mem_copy = &iser_ctask->data_copy[cmd_dir]; 177 + dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 178 + mem_copy = &iser_ctask->data_copy[cmd_dir]; 174 179 175 - if (cmd_dir == ISER_DIR_OUT) 176 - dma_unmap_sg(dma_device, &mem_copy->sg_single, 1, 177 - DMA_TO_DEVICE); 178 - else 179 - dma_unmap_sg(dma_device, &mem_copy->sg_single, 1, 180 - DMA_FROM_DEVICE); 180 + ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1, 181 + (cmd_dir == ISER_DIR_OUT) ? 182 + DMA_TO_DEVICE : DMA_FROM_DEVICE); 181 183 182 184 if (cmd_dir == ISER_DIR_IN) { 183 185 char *mem; ··· 223 231 * consecutive elements. Also, it handles one entry SG. 224 232 */ 225 233 static int iser_sg_to_page_vec(struct iser_data_buf *data, 226 - struct iser_page_vec *page_vec) 234 + struct iser_page_vec *page_vec, 235 + struct ib_device *ibdev) 227 236 { 228 237 struct scatterlist *sg = (struct scatterlist *)data->buf; 229 - dma_addr_t first_addr, last_addr, page; 230 - int start_aligned, end_aligned; 238 + u64 first_addr, last_addr, page; 239 + int end_aligned; 231 240 unsigned int cur_page = 0; 232 241 unsigned long total_sz = 0; 233 242 int i; ··· 237 244 page_vec->offset = (u64) sg[0].offset & ~MASK_4K; 238 245 239 246 for (i = 0; i < data->dma_nents; i++) { 240 - total_sz += sg_dma_len(&sg[i]); 247 + unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]); 241 248 242 - first_addr = sg_dma_address(&sg[i]); 243 - last_addr = first_addr + sg_dma_len(&sg[i]); 249 + total_sz += dma_len; 244 250 245 - start_aligned = !(first_addr & ~MASK_4K); 251 + first_addr = ib_sg_dma_address(ibdev, &sg[i]); 252 + last_addr = first_addr + dma_len; 253 + 246 254 end_aligned = !(last_addr & ~MASK_4K); 247 255 248 256 /* continue to collect page fragments till aligned or SG ends */ 249 257 while (!end_aligned && (i + 1 < data->dma_nents)) { 250 258 i++; 251 - total_sz += sg_dma_len(&sg[i]); 252 - last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]); 259 + dma_len = ib_sg_dma_len(ibdev, &sg[i]); 260 + total_sz += dma_len; 261 + last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len; 253 262 end_aligned = !(last_addr & ~MASK_4K); 254 263 } 255 264 ··· 283 288 * the number of entries which are aligned correctly. Supports the case where 284 289 * consecutive SG elements are actually fragments of the same physcial page. 285 290 */ 286 - static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data) 291 + static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, 292 + struct ib_device *ibdev) 287 293 { 288 294 struct scatterlist *sg; 289 - dma_addr_t end_addr, next_addr; 295 + u64 end_addr, next_addr; 290 296 int i, cnt; 291 297 unsigned int ret_len = 0; 292 298 ··· 299 303 (unsigned long)page_to_phys(sg[i].page), 300 304 (unsigned long)sg[i].offset, 301 305 (unsigned long)sg[i].length); */ 302 - end_addr = sg_dma_address(&sg[i]) + 303 - sg_dma_len(&sg[i]); 306 + end_addr = ib_sg_dma_address(ibdev, &sg[i]) + 307 + ib_sg_dma_len(ibdev, &sg[i]); 304 308 /* iser_dbg("Checking sg iobuf end address " 305 309 "0x%08lX\n", end_addr); */ 306 310 if (i + 1 < data->dma_nents) { 307 - next_addr = sg_dma_address(&sg[i+1]); 311 + next_addr = ib_sg_dma_address(ibdev, &sg[i+1]); 308 312 /* are i, i+1 fragments of the same page? */ 309 313 if (end_addr == next_addr) 310 314 continue; ··· 321 325 return ret_len; 322 326 } 323 327 324 - static void iser_data_buf_dump(struct iser_data_buf *data) 328 + static void iser_data_buf_dump(struct iser_data_buf *data, 329 + struct ib_device *ibdev) 325 330 { 326 331 struct scatterlist *sg = (struct scatterlist *)data->buf; 327 332 int i; ··· 330 333 for (i = 0; i < data->dma_nents; i++) 331 334 iser_err("sg[%d] dma_addr:0x%lX page:0x%p " 332 335 "off:0x%x sz:0x%x dma_len:0x%x\n", 333 - i, (unsigned long)sg_dma_address(&sg[i]), 336 + i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]), 334 337 sg[i].page, sg[i].offset, 335 - sg[i].length,sg_dma_len(&sg[i])); 338 + sg[i].length, ib_sg_dma_len(ibdev, &sg[i])); 336 339 } 337 340 338 341 static void iser_dump_page_vec(struct iser_page_vec *page_vec) ··· 346 349 } 347 350 348 351 static void iser_page_vec_build(struct iser_data_buf *data, 349 - struct iser_page_vec *page_vec) 352 + struct iser_page_vec *page_vec, 353 + struct ib_device *ibdev) 350 354 { 351 355 int page_vec_len = 0; 352 356 ··· 355 357 page_vec->offset = 0; 356 358 357 359 iser_dbg("Translating sg sz: %d\n", data->dma_nents); 358 - page_vec_len = iser_sg_to_page_vec(data,page_vec); 360 + page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev); 359 361 iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len); 360 362 361 363 page_vec->length = page_vec_len; 362 364 363 365 if (page_vec_len * SIZE_4K < page_vec->data_size) { 364 366 iser_err("page_vec too short to hold this SG\n"); 365 - iser_data_buf_dump(data); 367 + iser_data_buf_dump(data, ibdev); 366 368 iser_dump_page_vec(page_vec); 367 369 BUG(); 368 370 } ··· 373 375 enum iser_data_dir iser_dir, 374 376 enum dma_data_direction dma_dir) 375 377 { 376 - struct device *dma_device; 378 + struct ib_device *dev; 377 379 378 380 iser_ctask->dir[iser_dir] = 1; 379 - dma_device = 380 - iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; 381 + dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 381 382 382 - data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir); 383 + data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); 383 384 if (data->dma_nents == 0) { 384 385 iser_err("dma_map_sg failed!!!\n"); 385 386 return -EINVAL; ··· 388 391 389 392 void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) 390 393 { 391 - struct device *dma_device; 394 + struct ib_device *dev; 392 395 struct iser_data_buf *data; 393 396 394 - dma_device = 395 - iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; 397 + dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 396 398 397 399 if (iser_ctask->dir[ISER_DIR_IN]) { 398 400 data = &iser_ctask->data[ISER_DIR_IN]; 399 - dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE); 401 + ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); 400 402 } 401 403 402 404 if (iser_ctask->dir[ISER_DIR_OUT]) { 403 405 data = &iser_ctask->data[ISER_DIR_OUT]; 404 - dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE); 406 + ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE); 405 407 } 406 408 } 407 409 ··· 415 419 { 416 420 struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; 417 421 struct iser_device *device = ib_conn->device; 422 + struct ib_device *ibdev = device->ib_device; 418 423 struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; 419 424 struct iser_regd_buf *regd_buf; 420 425 int aligned_len; ··· 425 428 426 429 regd_buf = &iser_ctask->rdma_regd[cmd_dir]; 427 430 428 - aligned_len = iser_data_buf_aligned_len(mem); 431 + aligned_len = iser_data_buf_aligned_len(mem, ibdev); 429 432 if (aligned_len != mem->dma_nents) { 430 433 iser_err("rdma alignment violation %d/%d aligned\n", 431 434 aligned_len, mem->size); 432 - iser_data_buf_dump(mem); 435 + iser_data_buf_dump(mem, ibdev); 433 436 434 437 /* unmap the command data before accessing it */ 435 438 iser_dma_unmap_task_data(iser_ctask); ··· 447 450 448 451 regd_buf->reg.lkey = device->mr->lkey; 449 452 regd_buf->reg.rkey = device->mr->rkey; 450 - regd_buf->reg.len = sg_dma_len(&sg[0]); 451 - regd_buf->reg.va = sg_dma_address(&sg[0]); 453 + regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]); 454 + regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]); 452 455 regd_buf->reg.is_fmr = 0; 453 456 454 457 iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " ··· 458 461 (unsigned long)regd_buf->reg.va, 459 462 (unsigned long)regd_buf->reg.len); 460 463 } else { /* use FMR for multiple dma entries */ 461 - iser_page_vec_build(mem, ib_conn->page_vec); 464 + iser_page_vec_build(mem, ib_conn->page_vec, ibdev); 462 465 err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg); 463 466 if (err) { 464 - iser_data_buf_dump(mem); 467 + iser_data_buf_dump(mem, ibdev); 465 468 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, 466 469 ntoh24(iser_ctask->desc.iscsi_header.dlength)); 467 470 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
+48 -33
drivers/infiniband/ulp/srp/ib_srp.c
··· 122 122 if (!iu->buf) 123 123 goto out_free_iu; 124 124 125 - iu->dma = dma_map_single(host->dev->dev->dma_device, 126 - iu->buf, size, direction); 127 - if (dma_mapping_error(iu->dma)) 125 + iu->dma = ib_dma_map_single(host->dev->dev, iu->buf, size, direction); 126 + if (ib_dma_mapping_error(host->dev->dev, iu->dma)) 128 127 goto out_free_buf; 129 128 130 129 iu->size = size; ··· 144 145 if (!iu) 145 146 return; 146 147 147 - dma_unmap_single(host->dev->dev->dma_device, 148 - iu->dma, iu->size, iu->direction); 148 + ib_dma_unmap_single(host->dev->dev, iu->dma, iu->size, iu->direction); 149 149 kfree(iu->buf); 150 150 kfree(iu); 151 151 } ··· 480 482 scat = &req->fake_sg; 481 483 } 482 484 483 - dma_unmap_sg(target->srp_host->dev->dev->dma_device, scat, nents, 484 - scmnd->sc_data_direction); 485 + ib_dma_unmap_sg(target->srp_host->dev->dev, scat, nents, 486 + scmnd->sc_data_direction); 485 487 } 486 488 487 489 static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) ··· 593 595 int i, j; 594 596 int ret; 595 597 struct srp_device *dev = target->srp_host->dev; 598 + struct ib_device *ibdev = dev->dev; 596 599 597 600 if (!dev->fmr_pool) 598 601 return -ENODEV; 599 602 600 - if ((sg_dma_address(&scat[0]) & ~dev->fmr_page_mask) && 603 + if ((ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask) && 601 604 mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3)) 602 605 return -EINVAL; 603 606 604 607 len = page_cnt = 0; 605 608 for (i = 0; i < sg_cnt; ++i) { 606 - if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) { 609 + unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]); 610 + 611 + if (ib_sg_dma_address(ibdev, &scat[i]) & ~dev->fmr_page_mask) { 607 612 if (i > 0) 608 613 return -EINVAL; 609 614 else 610 615 ++page_cnt; 611 616 } 612 - if ((sg_dma_address(&scat[i]) + sg_dma_len(&scat[i])) & 617 + if ((ib_sg_dma_address(ibdev, &scat[i]) + dma_len) & 613 618 ~dev->fmr_page_mask) { 614 619 if (i < sg_cnt - 1) 615 620 return -EINVAL; ··· 620 619 ++page_cnt; 621 620 } 622 621 623 - len += sg_dma_len(&scat[i]); 622 + len += dma_len; 624 623 } 625 624 626 625 page_cnt += len >> dev->fmr_page_shift; ··· 632 631 return -ENOMEM; 633 632 634 633 page_cnt = 0; 635 - for (i = 0; i < sg_cnt; ++i) 636 - for (j = 0; j < sg_dma_len(&scat[i]); j += dev->fmr_page_size) 634 + for (i = 0; i < sg_cnt; ++i) { 635 + unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]); 636 + 637 + for (j = 0; j < dma_len; j += dev->fmr_page_size) 637 638 dma_pages[page_cnt++] = 638 - (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j; 639 + (ib_sg_dma_address(ibdev, &scat[i]) & 640 + dev->fmr_page_mask) + j; 641 + } 639 642 640 643 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, 641 644 dma_pages, page_cnt, io_addr); ··· 649 644 goto out; 650 645 } 651 646 652 - buf->va = cpu_to_be64(sg_dma_address(&scat[0]) & ~dev->fmr_page_mask); 647 + buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) & 648 + ~dev->fmr_page_mask); 653 649 buf->key = cpu_to_be32(req->fmr->fmr->rkey); 654 650 buf->len = cpu_to_be32(len); 655 651 ··· 669 663 struct srp_cmd *cmd = req->cmd->buf; 670 664 int len, nents, count; 671 665 u8 fmt = SRP_DATA_DESC_DIRECT; 666 + struct srp_device *dev; 667 + struct ib_device *ibdev; 672 668 673 669 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) 674 670 return sizeof (struct srp_cmd); ··· 695 687 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen); 696 688 } 697 689 698 - count = dma_map_sg(target->srp_host->dev->dev->dma_device, 699 - scat, nents, scmnd->sc_data_direction); 690 + dev = target->srp_host->dev; 691 + ibdev = dev->dev; 692 + 693 + count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); 700 694 701 695 fmt = SRP_DATA_DESC_DIRECT; 702 696 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); ··· 712 702 */ 713 703 struct srp_direct_buf *buf = (void *) cmd->add_data; 714 704 715 - buf->va = cpu_to_be64(sg_dma_address(scat)); 716 - buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey); 717 - buf->len = cpu_to_be32(sg_dma_len(scat)); 705 + buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); 706 + buf->key = cpu_to_be32(dev->mr->rkey); 707 + buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); 718 708 } else if (srp_map_fmr(target, scat, count, req, 719 709 (void *) cmd->add_data)) { 720 710 /* ··· 732 722 count * sizeof (struct srp_direct_buf); 733 723 734 724 for (i = 0; i < count; ++i) { 725 + unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]); 726 + 735 727 buf->desc_list[i].va = 736 - cpu_to_be64(sg_dma_address(&scat[i])); 728 + cpu_to_be64(ib_sg_dma_address(ibdev, &scat[i])); 737 729 buf->desc_list[i].key = 738 - cpu_to_be32(target->srp_host->dev->mr->rkey); 739 - buf->desc_list[i].len = 740 - cpu_to_be32(sg_dma_len(&scat[i])); 741 - datalen += sg_dma_len(&scat[i]); 730 + cpu_to_be32(dev->mr->rkey); 731 + buf->desc_list[i].len = cpu_to_be32(dma_len); 732 + datalen += dma_len; 742 733 } 743 734 744 735 if (scmnd->sc_data_direction == DMA_TO_DEVICE) ··· 819 808 820 809 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 821 810 { 811 + struct ib_device *dev; 822 812 struct srp_iu *iu; 823 813 u8 opcode; 824 814 825 815 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; 826 816 827 - dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, 828 - target->max_ti_iu_len, DMA_FROM_DEVICE); 817 + dev = target->srp_host->dev->dev; 818 + ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, 819 + DMA_FROM_DEVICE); 829 820 830 821 opcode = *(u8 *) iu->buf; 831 822 ··· 863 850 break; 864 851 } 865 852 866 - dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, 867 - target->max_ti_iu_len, DMA_FROM_DEVICE); 853 + ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, 854 + DMA_FROM_DEVICE); 868 855 } 869 856 870 857 static void srp_completion(struct ib_cq *cq, void *target_ptr) ··· 982 969 struct srp_request *req; 983 970 struct srp_iu *iu; 984 971 struct srp_cmd *cmd; 972 + struct ib_device *dev; 985 973 int len; 986 974 987 975 if (target->state == SRP_TARGET_CONNECTING) ··· 999 985 if (!iu) 1000 986 goto err; 1001 987 1002 - dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, 1003 - srp_max_iu_len, DMA_TO_DEVICE); 988 + dev = target->srp_host->dev->dev; 989 + ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, 990 + DMA_TO_DEVICE); 1004 991 1005 992 req = list_entry(target->free_reqs.next, struct srp_request, list); 1006 993 ··· 1033 1018 goto err_unmap; 1034 1019 } 1035 1020 1036 - dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, 1037 - srp_max_iu_len, DMA_TO_DEVICE); 1021 + ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, 1022 + DMA_TO_DEVICE); 1038 1023 1039 1024 if (__srp_post_send(target, iu, len)) { 1040 1025 printk(KERN_ERR PFX "Send failed\n");
+1 -1
drivers/infiniband/ulp/srp/ib_srp.h
··· 161 161 }; 162 162 163 163 struct srp_iu { 164 - dma_addr_t dma; 164 + u64 dma; 165 165 void *buf; 166 166 size_t size; 167 167 enum dma_data_direction direction;
+4 -1
include/rdma/ib_marshall.h
··· 1 1 /* 2 - * Copyright (c) 2005 Intel Corporation. All rights reserved. 2 + * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 3 3 * 4 4 * This software is available to you under a choice of one of two 5 5 * licenses. You may choose to be licensed under the terms of the GNU ··· 40 40 41 41 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, 42 42 struct ib_qp_attr *src); 43 + 44 + void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst, 45 + struct ib_ah_attr *src); 43 46 44 47 void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, 45 48 struct ib_sa_path_rec *src);
+253
include/rdma/ib_verbs.h
··· 43 43 44 44 #include <linux/types.h> 45 45 #include <linux/device.h> 46 + #include <linux/mm.h> 47 + #include <linux/dma-mapping.h> 46 48 47 49 #include <asm/atomic.h> 48 50 #include <asm/scatterlist.h> ··· 850 848 u8 *lmc_cache; 851 849 }; 852 850 851 + struct ib_dma_mapping_ops { 852 + int (*mapping_error)(struct ib_device *dev, 853 + u64 dma_addr); 854 + u64 (*map_single)(struct ib_device *dev, 855 + void *ptr, size_t size, 856 + enum dma_data_direction direction); 857 + void (*unmap_single)(struct ib_device *dev, 858 + u64 addr, size_t size, 859 + enum dma_data_direction direction); 860 + u64 (*map_page)(struct ib_device *dev, 861 + struct page *page, unsigned long offset, 862 + size_t size, 863 + enum dma_data_direction direction); 864 + void (*unmap_page)(struct ib_device *dev, 865 + u64 addr, size_t size, 866 + enum dma_data_direction direction); 867 + int (*map_sg)(struct ib_device *dev, 868 + struct scatterlist *sg, int nents, 869 + enum dma_data_direction direction); 870 + void (*unmap_sg)(struct ib_device *dev, 871 + struct scatterlist *sg, int nents, 872 + enum dma_data_direction direction); 873 + u64 (*dma_address)(struct ib_device *dev, 874 + struct scatterlist *sg); 875 + unsigned int (*dma_len)(struct ib_device *dev, 876 + struct scatterlist *sg); 877 + void (*sync_single_for_cpu)(struct ib_device *dev, 878 + u64 dma_handle, 879 + size_t size, 880 + enum dma_data_direction dir); 881 + void (*sync_single_for_device)(struct ib_device *dev, 882 + u64 dma_handle, 883 + size_t size, 884 + enum dma_data_direction dir); 885 + void *(*alloc_coherent)(struct ib_device *dev, 886 + size_t size, 887 + u64 *dma_handle, 888 + gfp_t flag); 889 + void (*free_coherent)(struct ib_device *dev, 890 + size_t size, void *cpu_addr, 891 + u64 dma_handle); 892 + }; 893 + 853 894 struct iw_cm_verbs; 854 895 855 896 struct ib_device { ··· 1036 991 struct ib_grh *in_grh, 1037 992 struct ib_mad *in_mad, 1038 993 struct ib_mad *out_mad); 994 + 995 + struct ib_dma_mapping_ops *dma_ops; 1039 996 1040 997 struct module *owner; 1041 998 struct class_device class_dev; ··· 1442 1395 * usable for DMA. 1443 1396 * @pd: The protection domain associated with the memory region. 1444 1397 * @mr_access_flags: Specifies the memory access rights. 1398 + * 1399 + * Note that the ib_dma_*() functions defined below must be used 1400 + * to create/destroy addresses used with the Lkey or Rkey returned 1401 + * by ib_get_dma_mr(). 1445 1402 */ 1446 1403 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); 1404 + 1405 + /** 1406 + * ib_dma_mapping_error - check a DMA addr for error 1407 + * @dev: The device for which the dma_addr was created 1408 + * @dma_addr: The DMA address to check 1409 + */ 1410 + static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) 1411 + { 1412 + return dev->dma_ops ? 1413 + dev->dma_ops->mapping_error(dev, dma_addr) : 1414 + dma_mapping_error(dma_addr); 1415 + } 1416 + 1417 + /** 1418 + * ib_dma_map_single - Map a kernel virtual address to DMA address 1419 + * @dev: The device for which the dma_addr is to be created 1420 + * @cpu_addr: The kernel virtual address 1421 + * @size: The size of the region in bytes 1422 + * @direction: The direction of the DMA 1423 + */ 1424 + static inline u64 ib_dma_map_single(struct ib_device *dev, 1425 + void *cpu_addr, size_t size, 1426 + enum dma_data_direction direction) 1427 + { 1428 + return dev->dma_ops ? 1429 + dev->dma_ops->map_single(dev, cpu_addr, size, direction) : 1430 + dma_map_single(dev->dma_device, cpu_addr, size, direction); 1431 + } 1432 + 1433 + /** 1434 + * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single() 1435 + * @dev: The device for which the DMA address was created 1436 + * @addr: The DMA address 1437 + * @size: The size of the region in bytes 1438 + * @direction: The direction of the DMA 1439 + */ 1440 + static inline void ib_dma_unmap_single(struct ib_device *dev, 1441 + u64 addr, size_t size, 1442 + enum dma_data_direction direction) 1443 + { 1444 + dev->dma_ops ? 1445 + dev->dma_ops->unmap_single(dev, addr, size, direction) : 1446 + dma_unmap_single(dev->dma_device, addr, size, direction); 1447 + } 1448 + 1449 + /** 1450 + * ib_dma_map_page - Map a physical page to DMA address 1451 + * @dev: The device for which the dma_addr is to be created 1452 + * @page: The page to be mapped 1453 + * @offset: The offset within the page 1454 + * @size: The size of the region in bytes 1455 + * @direction: The direction of the DMA 1456 + */ 1457 + static inline u64 ib_dma_map_page(struct ib_device *dev, 1458 + struct page *page, 1459 + unsigned long offset, 1460 + size_t size, 1461 + enum dma_data_direction direction) 1462 + { 1463 + return dev->dma_ops ? 1464 + dev->dma_ops->map_page(dev, page, offset, size, direction) : 1465 + dma_map_page(dev->dma_device, page, offset, size, direction); 1466 + } 1467 + 1468 + /** 1469 + * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page() 1470 + * @dev: The device for which the DMA address was created 1471 + * @addr: The DMA address 1472 + * @size: The size of the region in bytes 1473 + * @direction: The direction of the DMA 1474 + */ 1475 + static inline void ib_dma_unmap_page(struct ib_device *dev, 1476 + u64 addr, size_t size, 1477 + enum dma_data_direction direction) 1478 + { 1479 + dev->dma_ops ? 1480 + dev->dma_ops->unmap_page(dev, addr, size, direction) : 1481 + dma_unmap_page(dev->dma_device, addr, size, direction); 1482 + } 1483 + 1484 + /** 1485 + * ib_dma_map_sg - Map a scatter/gather list to DMA addresses 1486 + * @dev: The device for which the DMA addresses are to be created 1487 + * @sg: The array of scatter/gather entries 1488 + * @nents: The number of scatter/gather entries 1489 + * @direction: The direction of the DMA 1490 + */ 1491 + static inline int ib_dma_map_sg(struct ib_device *dev, 1492 + struct scatterlist *sg, int nents, 1493 + enum dma_data_direction direction) 1494 + { 1495 + return dev->dma_ops ? 1496 + dev->dma_ops->map_sg(dev, sg, nents, direction) : 1497 + dma_map_sg(dev->dma_device, sg, nents, direction); 1498 + } 1499 + 1500 + /** 1501 + * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses 1502 + * @dev: The device for which the DMA addresses were created 1503 + * @sg: The array of scatter/gather entries 1504 + * @nents: The number of scatter/gather entries 1505 + * @direction: The direction of the DMA 1506 + */ 1507 + static inline void ib_dma_unmap_sg(struct ib_device *dev, 1508 + struct scatterlist *sg, int nents, 1509 + enum dma_data_direction direction) 1510 + { 1511 + dev->dma_ops ? 1512 + dev->dma_ops->unmap_sg(dev, sg, nents, direction) : 1513 + dma_unmap_sg(dev->dma_device, sg, nents, direction); 1514 + } 1515 + 1516 + /** 1517 + * ib_sg_dma_address - Return the DMA address from a scatter/gather entry 1518 + * @dev: The device for which the DMA addresses were created 1519 + * @sg: The scatter/gather entry 1520 + */ 1521 + static inline u64 ib_sg_dma_address(struct ib_device *dev, 1522 + struct scatterlist *sg) 1523 + { 1524 + return dev->dma_ops ? 1525 + dev->dma_ops->dma_address(dev, sg) : sg_dma_address(sg); 1526 + } 1527 + 1528 + /** 1529 + * ib_sg_dma_len - Return the DMA length from a scatter/gather entry 1530 + * @dev: The device for which the DMA addresses were created 1531 + * @sg: The scatter/gather entry 1532 + */ 1533 + static inline unsigned int ib_sg_dma_len(struct ib_device *dev, 1534 + struct scatterlist *sg) 1535 + { 1536 + return dev->dma_ops ? 1537 + dev->dma_ops->dma_len(dev, sg) : sg_dma_len(sg); 1538 + } 1539 + 1540 + /** 1541 + * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU 1542 + * @dev: The device for which the DMA address was created 1543 + * @addr: The DMA address 1544 + * @size: The size of the region in bytes 1545 + * @dir: The direction of the DMA 1546 + */ 1547 + static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, 1548 + u64 addr, 1549 + size_t size, 1550 + enum dma_data_direction dir) 1551 + { 1552 + dev->dma_ops ? 1553 + dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir) : 1554 + dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); 1555 + } 1556 + 1557 + /** 1558 + * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device 1559 + * @dev: The device for which the DMA address was created 1560 + * @addr: The DMA address 1561 + * @size: The size of the region in bytes 1562 + * @dir: The direction of the DMA 1563 + */ 1564 + static inline void ib_dma_sync_single_for_device(struct ib_device *dev, 1565 + u64 addr, 1566 + size_t size, 1567 + enum dma_data_direction dir) 1568 + { 1569 + dev->dma_ops ? 1570 + dev->dma_ops->sync_single_for_device(dev, addr, size, dir) : 1571 + dma_sync_single_for_device(dev->dma_device, addr, size, dir); 1572 + } 1573 + 1574 + /** 1575 + * ib_dma_alloc_coherent - Allocate memory and map it for DMA 1576 + * @dev: The device for which the DMA address is requested 1577 + * @size: The size of the region to allocate in bytes 1578 + * @dma_handle: A pointer for returning the DMA address of the region 1579 + * @flag: memory allocator flags 1580 + */ 1581 + static inline void *ib_dma_alloc_coherent(struct ib_device *dev, 1582 + size_t size, 1583 + u64 *dma_handle, 1584 + gfp_t flag) 1585 + { 1586 + return dev->dma_ops ? 1587 + dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag) : 1588 + dma_alloc_coherent(dev->dma_device, size, dma_handle, flag); 1589 + } 1590 + 1591 + /** 1592 + * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent() 1593 + * @dev: The device for which the DMA addresses were allocated 1594 + * @size: The size of the region 1595 + * @cpu_addr: the address returned by ib_dma_alloc_coherent() 1596 + * @dma_handle: the DMA address returned by ib_dma_alloc_coherent() 1597 + */ 1598 + static inline void ib_dma_free_coherent(struct ib_device *dev, 1599 + size_t size, void *cpu_addr, 1600 + u64 dma_handle) 1601 + { 1602 + dev->dma_ops ? 1603 + dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle) : 1604 + dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); 1605 + } 1447 1606 1448 1607 /** 1449 1608 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
+46 -16
include/rdma/rdma_cm.h
··· 77 77 int num_paths; 78 78 }; 79 79 80 + struct rdma_conn_param { 81 + const void *private_data; 82 + u8 private_data_len; 83 + u8 responder_resources; 84 + u8 initiator_depth; 85 + u8 flow_control; 86 + u8 retry_count; /* ignored when accepting */ 87 + u8 rnr_retry_count; 88 + /* Fields below ignored if a QP is created on the rdma_cm_id. */ 89 + u8 srq; 90 + u32 qp_num; 91 + }; 92 + 93 + struct rdma_ud_param { 94 + const void *private_data; 95 + u8 private_data_len; 96 + struct ib_ah_attr ah_attr; 97 + u32 qp_num; 98 + u32 qkey; 99 + }; 100 + 80 101 struct rdma_cm_event { 81 102 enum rdma_cm_event_type event; 82 103 int status; 83 - void *private_data; 84 - u8 private_data_len; 104 + union { 105 + struct rdma_conn_param conn; 106 + struct rdma_ud_param ud; 107 + } param; 85 108 }; 86 109 87 110 struct rdma_cm_id; ··· 227 204 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 228 205 int *qp_attr_mask); 229 206 230 - struct rdma_conn_param { 231 - const void *private_data; 232 - u8 private_data_len; 233 - u8 responder_resources; 234 - u8 initiator_depth; 235 - u8 flow_control; 236 - u8 retry_count; /* ignored when accepting */ 237 - u8 rnr_retry_count; 238 - /* Fields below ignored if a QP is created on the rdma_cm_id. */ 239 - u8 srq; 240 - u32 qp_num; 241 - enum ib_qp_type qp_type; 242 - }; 243 - 244 207 /** 245 208 * rdma_connect - Initiate an active connection request. 209 + * @id: Connection identifier to connect. 210 + * @conn_param: Connection information used for connected QPs. 246 211 * 247 212 * Users must have resolved a route for the rdma_cm_id to connect with 248 213 * by having called rdma_resolve_route before calling this routine. 214 + * 215 + * This call will either connect to a remote QP or obtain remote QP 216 + * information for unconnected rdma_cm_id's. The actual operation is 217 + * based on the rdma_cm_id's port space. 249 218 */ 250 219 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); 251 220 ··· 266 251 * previously posted receive buffers would be flushed. 267 252 */ 268 253 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); 254 + 255 + /** 256 + * rdma_notify - Notifies the RDMA CM of an asynchronous event that has 257 + * occurred on the connection. 258 + * @id: Connection identifier to transition to established. 259 + * @event: Asynchronous event. 260 + * 261 + * This routine should be invoked by users to notify the CM of relevant 262 + * communication events. Events that should be reported to the CM and 263 + * when to report them are: 264 + * 265 + * IB_EVENT_COMM_EST - Used when a message is received on a connected 266 + * QP before an RTU has been received. 267 + */ 268 + int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event); 269 269 270 270 /** 271 271 * rdma_reject - Called to reject a connection request or response.
+3
include/rdma/rdma_cm_ib.h
··· 44 44 int rdma_set_ib_paths(struct rdma_cm_id *id, 45 45 struct ib_sa_path_rec *path_rec, int num_paths); 46 46 47 + /* Global qkey for UD QPs and multicast groups. */ 48 + #define RDMA_UD_QKEY 0x01234567 49 + 47 50 #endif /* RDMA_CM_IB_H */
+206
include/rdma/rdma_user_cm.h
··· 1 + /* 2 + * Copyright (c) 2005-2006 Intel Corporation. All rights reserved. 3 + * 4 + * This software is available to you under a choice of one of two 5 + * licenses. You may choose to be licensed under the terms of the GNU 6 + * General Public License (GPL) Version 2, available from the file 7 + * COPYING in the main directory of this source tree, or the 8 + * OpenIB.org BSD license below: 9 + * 10 + * Redistribution and use in source and binary forms, with or 11 + * without modification, are permitted provided that the following 12 + * conditions are met: 13 + * 14 + * - Redistributions of source code must retain the above 15 + * copyright notice, this list of conditions and the following 16 + * disclaimer. 17 + * 18 + * - Redistributions in binary form must reproduce the above 19 + * copyright notice, this list of conditions and the following 20 + * disclaimer in the documentation and/or other materials 21 + * provided with the distribution. 22 + * 23 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 + * SOFTWARE. 31 + */ 32 + 33 + #ifndef RDMA_USER_CM_H 34 + #define RDMA_USER_CM_H 35 + 36 + #include <linux/types.h> 37 + #include <linux/in6.h> 38 + #include <rdma/ib_user_verbs.h> 39 + #include <rdma/ib_user_sa.h> 40 + 41 + #define RDMA_USER_CM_ABI_VERSION 3 42 + 43 + #define RDMA_MAX_PRIVATE_DATA 256 44 + 45 + enum { 46 + RDMA_USER_CM_CMD_CREATE_ID, 47 + RDMA_USER_CM_CMD_DESTROY_ID, 48 + RDMA_USER_CM_CMD_BIND_ADDR, 49 + RDMA_USER_CM_CMD_RESOLVE_ADDR, 50 + RDMA_USER_CM_CMD_RESOLVE_ROUTE, 51 + RDMA_USER_CM_CMD_QUERY_ROUTE, 52 + RDMA_USER_CM_CMD_CONNECT, 53 + RDMA_USER_CM_CMD_LISTEN, 54 + RDMA_USER_CM_CMD_ACCEPT, 55 + RDMA_USER_CM_CMD_REJECT, 56 + RDMA_USER_CM_CMD_DISCONNECT, 57 + RDMA_USER_CM_CMD_INIT_QP_ATTR, 58 + RDMA_USER_CM_CMD_GET_EVENT, 59 + RDMA_USER_CM_CMD_GET_OPTION, 60 + RDMA_USER_CM_CMD_SET_OPTION, 61 + RDMA_USER_CM_CMD_NOTIFY 62 + }; 63 + 64 + /* 65 + * command ABI structures. 66 + */ 67 + struct rdma_ucm_cmd_hdr { 68 + __u32 cmd; 69 + __u16 in; 70 + __u16 out; 71 + }; 72 + 73 + struct rdma_ucm_create_id { 74 + __u64 uid; 75 + __u64 response; 76 + __u16 ps; 77 + __u8 reserved[6]; 78 + }; 79 + 80 + struct rdma_ucm_create_id_resp { 81 + __u32 id; 82 + }; 83 + 84 + struct rdma_ucm_destroy_id { 85 + __u64 response; 86 + __u32 id; 87 + __u32 reserved; 88 + }; 89 + 90 + struct rdma_ucm_destroy_id_resp { 91 + __u32 events_reported; 92 + }; 93 + 94 + struct rdma_ucm_bind_addr { 95 + __u64 response; 96 + struct sockaddr_in6 addr; 97 + __u32 id; 98 + }; 99 + 100 + struct rdma_ucm_resolve_addr { 101 + struct sockaddr_in6 src_addr; 102 + struct sockaddr_in6 dst_addr; 103 + __u32 id; 104 + __u32 timeout_ms; 105 + }; 106 + 107 + struct rdma_ucm_resolve_route { 108 + __u32 id; 109 + __u32 timeout_ms; 110 + }; 111 + 112 + struct rdma_ucm_query_route { 113 + __u64 response; 114 + __u32 id; 115 + __u32 reserved; 116 + }; 117 + 118 + struct rdma_ucm_query_route_resp { 119 + __u64 node_guid; 120 + struct ib_user_path_rec ib_route[2]; 121 + struct sockaddr_in6 src_addr; 122 + struct sockaddr_in6 dst_addr; 123 + __u32 num_paths; 124 + __u8 port_num; 125 + __u8 reserved[3]; 126 + }; 127 + 128 + struct rdma_ucm_conn_param { 129 + __u32 qp_num; 130 + __u32 reserved; 131 + __u8 private_data[RDMA_MAX_PRIVATE_DATA]; 132 + __u8 private_data_len; 133 + __u8 srq; 134 + __u8 responder_resources; 135 + __u8 initiator_depth; 136 + __u8 flow_control; 137 + __u8 retry_count; 138 + __u8 rnr_retry_count; 139 + __u8 valid; 140 + }; 141 + 142 + struct rdma_ucm_ud_param { 143 + __u32 qp_num; 144 + __u32 qkey; 145 + struct ib_uverbs_ah_attr ah_attr; 146 + __u8 private_data[RDMA_MAX_PRIVATE_DATA]; 147 + __u8 private_data_len; 148 + __u8 reserved[7]; 149 + }; 150 + 151 + struct rdma_ucm_connect { 152 + struct rdma_ucm_conn_param conn_param; 153 + __u32 id; 154 + __u32 reserved; 155 + }; 156 + 157 + struct rdma_ucm_listen { 158 + __u32 id; 159 + __u32 backlog; 160 + }; 161 + 162 + struct rdma_ucm_accept { 163 + __u64 uid; 164 + struct rdma_ucm_conn_param conn_param; 165 + __u32 id; 166 + __u32 reserved; 167 + }; 168 + 169 + struct rdma_ucm_reject { 170 + __u32 id; 171 + __u8 private_data_len; 172 + __u8 reserved[3]; 173 + __u8 private_data[RDMA_MAX_PRIVATE_DATA]; 174 + }; 175 + 176 + struct rdma_ucm_disconnect { 177 + __u32 id; 178 + }; 179 + 180 + struct rdma_ucm_init_qp_attr { 181 + __u64 response; 182 + __u32 id; 183 + __u32 qp_state; 184 + }; 185 + 186 + struct rdma_ucm_notify { 187 + __u32 id; 188 + __u32 event; 189 + }; 190 + 191 + struct rdma_ucm_get_event { 192 + __u64 response; 193 + }; 194 + 195 + struct rdma_ucm_event_resp { 196 + __u64 uid; 197 + __u32 id; 198 + __u32 event; 199 + __u32 status; 200 + union { 201 + struct rdma_ucm_conn_param conn; 202 + struct rdma_ucm_ud_param ud; 203 + } param; 204 + }; 205 + 206 + #endif /* RDMA_USER_CM_H */