Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA/cma: Export enum cma_state in <rdma/rdma_cm.h>

Move cma.c's internal definition of enum cma_state to enum rdma_cm_state
in an exported header so that it can be exported via RDMA netlink.

Signed-off-by: Nir Muchtar <nirm@voltaire.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>

authored by

Nir Muchtar and committed by
Roland Dreier
550e5ca7 b2cbae2c

+94 -90
+80 -90
drivers/infiniband/core/cma.c
··· 89 89 struct list_head id_list; 90 90 }; 91 91 92 - enum cma_state { 93 - CMA_IDLE, 94 - CMA_ADDR_QUERY, 95 - CMA_ADDR_RESOLVED, 96 - CMA_ROUTE_QUERY, 97 - CMA_ROUTE_RESOLVED, 98 - CMA_CONNECT, 99 - CMA_DISCONNECT, 100 - CMA_ADDR_BOUND, 101 - CMA_LISTEN, 102 - CMA_DEVICE_REMOVAL, 103 - CMA_DESTROYING 104 - }; 105 - 106 92 struct rdma_bind_list { 107 93 struct idr *ps; 108 94 struct hlist_head owners; ··· 112 126 struct list_head mc_list; 113 127 114 128 int internal_id; 115 - enum cma_state state; 129 + enum rdma_cm_state state; 116 130 spinlock_t lock; 117 131 struct mutex qp_mutex; 118 132 ··· 151 165 struct cma_work { 152 166 struct work_struct work; 153 167 struct rdma_id_private *id; 154 - enum cma_state old_state; 155 - enum cma_state new_state; 168 + enum rdma_cm_state old_state; 169 + enum rdma_cm_state new_state; 156 170 struct rdma_cm_event event; 157 171 }; 158 172 ··· 203 217 #define CMA_VERSION 0x00 204 218 #define SDP_MAJ_VERSION 0x2 205 219 206 - static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) 220 + static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) 207 221 { 208 222 unsigned long flags; 209 223 int ret; ··· 215 229 } 216 230 217 231 static int cma_comp_exch(struct rdma_id_private *id_priv, 218 - enum cma_state comp, enum cma_state exch) 232 + enum rdma_cm_state comp, enum rdma_cm_state exch) 219 233 { 220 234 unsigned long flags; 221 235 int ret; ··· 227 241 return ret; 228 242 } 229 243 230 - static enum cma_state cma_exch(struct rdma_id_private *id_priv, 231 - enum cma_state exch) 244 + static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, 245 + enum rdma_cm_state exch) 232 246 { 233 247 unsigned long flags; 234 - enum cma_state old; 248 + enum rdma_cm_state old; 235 249 236 250 spin_lock_irqsave(&id_priv->lock, flags); 237 251 old = id_priv->state; ··· 399 413 } 400 414 401 415 static int cma_disable_callback(struct rdma_id_private *id_priv, 402 - enum cma_state state) 416 + enum rdma_cm_state state) 403 417 { 404 418 mutex_lock(&id_priv->handler_mutex); 405 419 if (id_priv->state != state) { ··· 423 437 if (!id_priv) 424 438 return ERR_PTR(-ENOMEM); 425 439 426 - id_priv->state = CMA_IDLE; 440 + id_priv->state = RDMA_CM_IDLE; 427 441 id_priv->id.context = context; 428 442 id_priv->id.event_handler = event_handler; 429 443 id_priv->id.ps = ps; ··· 844 858 } 845 859 846 860 static void cma_cancel_operation(struct rdma_id_private *id_priv, 847 - enum cma_state state) 861 + enum rdma_cm_state state) 848 862 { 849 863 switch (state) { 850 - case CMA_ADDR_QUERY: 864 + case RDMA_CM_ADDR_QUERY: 851 865 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); 852 866 break; 853 - case CMA_ROUTE_QUERY: 867 + case RDMA_CM_ROUTE_QUERY: 854 868 cma_cancel_route(id_priv); 855 869 break; 856 - case CMA_LISTEN: 870 + case RDMA_CM_LISTEN: 857 871 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) 858 872 && !id_priv->cma_dev) 859 873 cma_cancel_listens(id_priv); ··· 904 918 void rdma_destroy_id(struct rdma_cm_id *id) 905 919 { 906 920 struct rdma_id_private *id_priv; 907 - enum cma_state state; 921 + enum rdma_cm_state state; 908 922 909 923 id_priv = container_of(id, struct rdma_id_private, id); 910 - state = cma_exch(id_priv, CMA_DESTROYING); 924 + state = cma_exch(id_priv, RDMA_CM_DESTROYING); 911 925 cma_cancel_operation(id_priv, state); 912 926 913 927 /* ··· 1001 1015 int ret = 0; 1002 1016 1003 1017 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && 1004 - cma_disable_callback(id_priv, CMA_CONNECT)) || 1018 + cma_disable_callback(id_priv, RDMA_CM_CONNECT)) || 1005 1019 (ib_event->event == IB_CM_TIMEWAIT_EXIT && 1006 - cma_disable_callback(id_priv, CMA_DISCONNECT))) 1020 + cma_disable_callback(id_priv, RDMA_CM_DISCONNECT))) 1007 1021 return 0; 1008 1022 1009 1023 memset(&event, 0, sizeof event); ··· 1034 1048 event.status = -ETIMEDOUT; /* fall through */ 1035 1049 case IB_CM_DREQ_RECEIVED: 1036 1050 case IB_CM_DREP_RECEIVED: 1037 - if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) 1051 + if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, 1052 + RDMA_CM_DISCONNECT)) 1038 1053 goto out; 1039 1054 event.event = RDMA_CM_EVENT_DISCONNECTED; 1040 1055 break; ··· 1062 1075 if (ret) { 1063 1076 /* Destroy the CM ID by returning a non-zero value. */ 1064 1077 id_priv->cm_id.ib = NULL; 1065 - cma_exch(id_priv, CMA_DESTROYING); 1078 + cma_exch(id_priv, RDMA_CM_DESTROYING); 1066 1079 mutex_unlock(&id_priv->handler_mutex); 1067 1080 rdma_destroy_id(&id_priv->id); 1068 1081 return ret; ··· 1119 1132 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1120 1133 1121 1134 id_priv = container_of(id, struct rdma_id_private, id); 1122 - id_priv->state = CMA_CONNECT; 1135 + id_priv->state = RDMA_CM_CONNECT; 1123 1136 return id_priv; 1124 1137 1125 1138 destroy_id: ··· 1159 1172 } 1160 1173 1161 1174 id_priv = container_of(id, struct rdma_id_private, id); 1162 - id_priv->state = CMA_CONNECT; 1175 + id_priv->state = RDMA_CM_CONNECT; 1163 1176 return id_priv; 1164 1177 err: 1165 1178 rdma_destroy_id(id); ··· 1188 1201 int offset, ret; 1189 1202 1190 1203 listen_id = cm_id->context; 1191 - if (cma_disable_callback(listen_id, CMA_LISTEN)) 1204 + if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) 1192 1205 return -ECONNABORTED; 1193 1206 1194 1207 memset(&event, 0, sizeof event); ··· 1230 1243 * while we're accessing the cm_id. 1231 1244 */ 1232 1245 mutex_lock(&lock); 1233 - if (cma_comp(conn_id, CMA_CONNECT) && 1246 + if (cma_comp(conn_id, RDMA_CM_CONNECT) && 1234 1247 !cma_is_ud_ps(conn_id->id.ps)) 1235 1248 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1236 1249 mutex_unlock(&lock); ··· 1244 1257 conn_id->cm_id.ib = NULL; 1245 1258 1246 1259 release_conn_id: 1247 - cma_exch(conn_id, CMA_DESTROYING); 1260 + cma_exch(conn_id, RDMA_CM_DESTROYING); 1248 1261 mutex_unlock(&conn_id->handler_mutex); 1249 1262 rdma_destroy_id(&conn_id->id); 1250 1263 ··· 1315 1328 struct sockaddr_in *sin; 1316 1329 int ret = 0; 1317 1330 1318 - if (cma_disable_callback(id_priv, CMA_CONNECT)) 1331 + if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) 1319 1332 return 0; 1320 1333 1321 1334 memset(&event, 0, sizeof event); ··· 1358 1371 if (ret) { 1359 1372 /* Destroy the CM ID by returning a non-zero value. */ 1360 1373 id_priv->cm_id.iw = NULL; 1361 - cma_exch(id_priv, CMA_DESTROYING); 1374 + cma_exch(id_priv, RDMA_CM_DESTROYING); 1362 1375 mutex_unlock(&id_priv->handler_mutex); 1363 1376 rdma_destroy_id(&id_priv->id); 1364 1377 return ret; ··· 1380 1393 struct ib_device_attr attr; 1381 1394 1382 1395 listen_id = cm_id->context; 1383 - if (cma_disable_callback(listen_id, CMA_LISTEN)) 1396 + if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) 1384 1397 return -ECONNABORTED; 1385 1398 1386 1399 /* Create a new RDMA id for the new IW CM ID */ ··· 1393 1406 } 1394 1407 conn_id = container_of(new_cm_id, struct rdma_id_private, id); 1395 1408 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1396 - conn_id->state = CMA_CONNECT; 1409 + conn_id->state = RDMA_CM_CONNECT; 1397 1410 1398 1411 dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr); 1399 1412 if (!dev) { ··· 1448 1461 if (ret) { 1449 1462 /* User wants to destroy the CM ID */ 1450 1463 conn_id->cm_id.iw = NULL; 1451 - cma_exch(conn_id, CMA_DESTROYING); 1464 + cma_exch(conn_id, RDMA_CM_DESTROYING); 1452 1465 mutex_unlock(&conn_id->handler_mutex); 1453 1466 cma_deref_id(conn_id); 1454 1467 rdma_destroy_id(&conn_id->id); ··· 1541 1554 1542 1555 dev_id_priv = container_of(id, struct rdma_id_private, id); 1543 1556 1544 - dev_id_priv->state = CMA_ADDR_BOUND; 1557 + dev_id_priv->state = RDMA_CM_ADDR_BOUND; 1545 1558 memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, 1546 1559 ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); 1547 1560 ··· 1588 1601 route->num_paths = 1; 1589 1602 *route->path_rec = *path_rec; 1590 1603 } else { 1591 - work->old_state = CMA_ROUTE_QUERY; 1592 - work->new_state = CMA_ADDR_RESOLVED; 1604 + work->old_state = RDMA_CM_ROUTE_QUERY; 1605 + work->new_state = RDMA_CM_ADDR_RESOLVED; 1593 1606 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; 1594 1607 work->event.status = status; 1595 1608 } ··· 1647 1660 goto out; 1648 1661 1649 1662 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1650 - cma_exch(id_priv, CMA_DESTROYING); 1663 + cma_exch(id_priv, RDMA_CM_DESTROYING); 1651 1664 destroy = 1; 1652 1665 } 1653 1666 out: ··· 1665 1678 int destroy = 0; 1666 1679 1667 1680 mutex_lock(&id_priv->handler_mutex); 1668 - if (id_priv->state == CMA_DESTROYING || 1669 - id_priv->state == CMA_DEVICE_REMOVAL) 1681 + if (id_priv->state == RDMA_CM_DESTROYING || 1682 + id_priv->state == RDMA_CM_DEVICE_REMOVAL) 1670 1683 goto out; 1671 1684 1672 1685 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 1673 - cma_exch(id_priv, CMA_DESTROYING); 1686 + cma_exch(id_priv, RDMA_CM_DESTROYING); 1674 1687 destroy = 1; 1675 1688 } 1676 1689 ··· 1694 1707 1695 1708 work->id = id_priv; 1696 1709 INIT_WORK(&work->work, cma_work_handler); 1697 - work->old_state = CMA_ROUTE_QUERY; 1698 - work->new_state = CMA_ROUTE_RESOLVED; 1710 + work->old_state = RDMA_CM_ROUTE_QUERY; 1711 + work->new_state = RDMA_CM_ROUTE_RESOLVED; 1699 1712 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1700 1713 1701 1714 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); ··· 1724 1737 int ret; 1725 1738 1726 1739 id_priv = container_of(id, struct rdma_id_private, id); 1727 - if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) 1740 + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 1741 + RDMA_CM_ROUTE_RESOLVED)) 1728 1742 return -EINVAL; 1729 1743 1730 1744 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, ··· 1738 1750 id->route.num_paths = num_paths; 1739 1751 return 0; 1740 1752 err: 1741 - cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); 1753 + cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); 1742 1754 return ret; 1743 1755 } 1744 1756 EXPORT_SYMBOL(rdma_set_ib_paths); ··· 1753 1765 1754 1766 work->id = id_priv; 1755 1767 INIT_WORK(&work->work, cma_work_handler); 1756 - work->old_state = CMA_ROUTE_QUERY; 1757 - work->new_state = CMA_ROUTE_RESOLVED; 1768 + work->old_state = RDMA_CM_ROUTE_QUERY; 1769 + work->new_state = RDMA_CM_ROUTE_RESOLVED; 1758 1770 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1759 1771 queue_work(cma_wq, &work->work); 1760 1772 return 0; ··· 1818 1830 goto err2; 1819 1831 } 1820 1832 1821 - work->old_state = CMA_ROUTE_QUERY; 1822 - work->new_state = CMA_ROUTE_RESOLVED; 1833 + work->old_state = RDMA_CM_ROUTE_QUERY; 1834 + work->new_state = RDMA_CM_ROUTE_RESOLVED; 1823 1835 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; 1824 1836 work->event.status = 0; 1825 1837 ··· 1841 1853 int ret; 1842 1854 1843 1855 id_priv = container_of(id, struct rdma_id_private, id); 1844 - if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY)) 1856 + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) 1845 1857 return -EINVAL; 1846 1858 1847 1859 atomic_inc(&id_priv->refcount); ··· 1870 1882 1871 1883 return 0; 1872 1884 err: 1873 - cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED); 1885 + cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); 1874 1886 cma_deref_id(id_priv); 1875 1887 return ret; 1876 1888 } ··· 1929 1941 1930 1942 memset(&event, 0, sizeof event); 1931 1943 mutex_lock(&id_priv->handler_mutex); 1932 - if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) 1944 + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, 1945 + RDMA_CM_ADDR_RESOLVED)) 1933 1946 goto out; 1934 1947 1935 1948 if (!status && !id_priv->cma_dev) 1936 1949 status = cma_acquire_dev(id_priv); 1937 1950 1938 1951 if (status) { 1939 - if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) 1952 + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, 1953 + RDMA_CM_ADDR_BOUND)) 1940 1954 goto out; 1941 1955 event.event = RDMA_CM_EVENT_ADDR_ERROR; 1942 1956 event.status = status; ··· 1949 1959 } 1950 1960 1951 1961 if (id_priv->id.event_handler(&id_priv->id, &event)) { 1952 - cma_exch(id_priv, CMA_DESTROYING); 1962 + cma_exch(id_priv, RDMA_CM_DESTROYING); 1953 1963 mutex_unlock(&id_priv->handler_mutex); 1954 1964 cma_deref_id(id_priv); 1955 1965 rdma_destroy_id(&id_priv->id); ··· 1994 2004 1995 2005 work->id = id_priv; 1996 2006 INIT_WORK(&work->work, cma_work_handler); 1997 - work->old_state = CMA_ADDR_QUERY; 1998 - work->new_state = CMA_ADDR_RESOLVED; 2007 + work->old_state = RDMA_CM_ADDR_QUERY; 2008 + work->new_state = RDMA_CM_ADDR_RESOLVED; 1999 2009 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 2000 2010 queue_work(cma_wq, &work->work); 2001 2011 return 0; ··· 2024 2034 int ret; 2025 2035 2026 2036 id_priv = container_of(id, struct rdma_id_private, id); 2027 - if (id_priv->state == CMA_IDLE) { 2037 + if (id_priv->state == RDMA_CM_IDLE) { 2028 2038 ret = cma_bind_addr(id, src_addr, dst_addr); 2029 2039 if (ret) 2030 2040 return ret; 2031 2041 } 2032 2042 2033 - if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY)) 2043 + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) 2034 2044 return -EINVAL; 2035 2045 2036 2046 atomic_inc(&id_priv->refcount); ··· 2046 2056 2047 2057 return 0; 2048 2058 err: 2049 - cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND); 2059 + cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); 2050 2060 cma_deref_id(id_priv); 2051 2061 return ret; 2052 2062 } ··· 2060 2070 2061 2071 id_priv = container_of(id, struct rdma_id_private, id); 2062 2072 spin_lock_irqsave(&id_priv->lock, flags); 2063 - if (id_priv->state == CMA_IDLE) { 2073 + if (id_priv->state == RDMA_CM_IDLE) { 2064 2074 id_priv->reuseaddr = reuse; 2065 2075 ret = 0; 2066 2076 } else { ··· 2167 2177 if (id_priv == cur_id) 2168 2178 continue; 2169 2179 2170 - if ((cur_id->state == CMA_LISTEN) || 2180 + if ((cur_id->state == RDMA_CM_LISTEN) || 2171 2181 !reuseaddr || !cur_id->reuseaddr) { 2172 2182 cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; 2173 2183 if (cma_any_addr(cur_addr)) ··· 2270 2280 int ret; 2271 2281 2272 2282 id_priv = container_of(id, struct rdma_id_private, id); 2273 - if (id_priv->state == CMA_IDLE) { 2283 + if (id_priv->state == RDMA_CM_IDLE) { 2274 2284 ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; 2275 2285 ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); 2276 2286 if (ret) 2277 2287 return ret; 2278 2288 } 2279 2289 2280 - if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) 2290 + if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) 2281 2291 return -EINVAL; 2282 2292 2283 2293 if (id_priv->reuseaddr) { ··· 2309 2319 return 0; 2310 2320 err: 2311 2321 id_priv->backlog = 0; 2312 - cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); 2322 + cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); 2313 2323 return ret; 2314 2324 } 2315 2325 EXPORT_SYMBOL(rdma_listen); ··· 2323 2333 return -EAFNOSUPPORT; 2324 2334 2325 2335 id_priv = container_of(id, struct rdma_id_private, id); 2326 - if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND)) 2336 + if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) 2327 2337 return -EINVAL; 2328 2338 2329 2339 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); ··· 2350 2360 if (id_priv->cma_dev) 2351 2361 cma_release_dev(id_priv); 2352 2362 err1: 2353 - cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); 2363 + cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); 2354 2364 return ret; 2355 2365 } 2356 2366 EXPORT_SYMBOL(rdma_bind_addr); ··· 2423 2433 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; 2424 2434 int ret = 0; 2425 2435 2426 - if (cma_disable_callback(id_priv, CMA_CONNECT)) 2436 + if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) 2427 2437 return 0; 2428 2438 2429 2439 memset(&event, 0, sizeof event); ··· 2469 2479 if (ret) { 2470 2480 /* Destroy the CM ID by returning a non-zero value. */ 2471 2481 id_priv->cm_id.ib = NULL; 2472 - cma_exch(id_priv, CMA_DESTROYING); 2482 + cma_exch(id_priv, RDMA_CM_DESTROYING); 2473 2483 mutex_unlock(&id_priv->handler_mutex); 2474 2484 rdma_destroy_id(&id_priv->id); 2475 2485 return ret; ··· 2635 2645 int ret; 2636 2646 2637 2647 id_priv = container_of(id, struct rdma_id_private, id); 2638 - if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT)) 2648 + if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) 2639 2649 return -EINVAL; 2640 2650 2641 2651 if (!id->qp) { ··· 2662 2672 2663 2673 return 0; 2664 2674 err: 2665 - cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED); 2675 + cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); 2666 2676 return ret; 2667 2677 } 2668 2678 EXPORT_SYMBOL(rdma_connect); ··· 2748 2758 int ret; 2749 2759 2750 2760 id_priv = container_of(id, struct rdma_id_private, id); 2751 - if (!cma_comp(id_priv, CMA_CONNECT)) 2761 + if (!cma_comp(id_priv, RDMA_CM_CONNECT)) 2752 2762 return -EINVAL; 2753 2763 2754 2764 if (!id->qp && conn_param) { ··· 2877 2887 int ret; 2878 2888 2879 2889 id_priv = mc->id_priv; 2880 - if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) && 2881 - cma_disable_callback(id_priv, CMA_ADDR_RESOLVED)) 2890 + if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) && 2891 + cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED)) 2882 2892 return 0; 2883 2893 2884 2894 mutex_lock(&id_priv->qp_mutex); ··· 2902 2912 2903 2913 ret = id_priv->id.event_handler(&id_priv->id, &event); 2904 2914 if (ret) { 2905 - cma_exch(id_priv, CMA_DESTROYING); 2915 + cma_exch(id_priv, RDMA_CM_DESTROYING); 2906 2916 mutex_unlock(&id_priv->handler_mutex); 2907 2917 rdma_destroy_id(&id_priv->id); 2908 2918 return 0; ··· 3085 3095 int ret; 3086 3096 3087 3097 id_priv = container_of(id, struct rdma_id_private, id); 3088 - if (!cma_comp(id_priv, CMA_ADDR_BOUND) && 3089 - !cma_comp(id_priv, CMA_ADDR_RESOLVED)) 3098 + if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && 3099 + !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) 3090 3100 return -EINVAL; 3091 3101 3092 3102 mc = kmalloc(sizeof *mc, GFP_KERNEL); ··· 3251 3261 static int cma_remove_id_dev(struct rdma_id_private *id_priv) 3252 3262 { 3253 3263 struct rdma_cm_event event; 3254 - enum cma_state state; 3264 + enum rdma_cm_state state; 3255 3265 int ret = 0; 3256 3266 3257 3267 /* Record that we want to remove the device */ 3258 - state = cma_exch(id_priv, CMA_DEVICE_REMOVAL); 3259 - if (state == CMA_DESTROYING) 3268 + state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL); 3269 + if (state == RDMA_CM_DESTROYING) 3260 3270 return 0; 3261 3271 3262 3272 cma_cancel_operation(id_priv, state); 3263 3273 mutex_lock(&id_priv->handler_mutex); 3264 3274 3265 3275 /* Check for destruction from another callback. */ 3266 - if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) 3276 + if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL)) 3267 3277 goto out; 3268 3278 3269 3279 memset(&event, 0, sizeof event);
+14
include/rdma/rdma_cm.h
··· 111 111 } param; 112 112 }; 113 113 114 + enum rdma_cm_state { 115 + RDMA_CM_IDLE, 116 + RDMA_CM_ADDR_QUERY, 117 + RDMA_CM_ADDR_RESOLVED, 118 + RDMA_CM_ROUTE_QUERY, 119 + RDMA_CM_ROUTE_RESOLVED, 120 + RDMA_CM_CONNECT, 121 + RDMA_CM_DISCONNECT, 122 + RDMA_CM_ADDR_BOUND, 123 + RDMA_CM_LISTEN, 124 + RDMA_CM_DEVICE_REMOVAL, 125 + RDMA_CM_DESTROYING 126 + }; 127 + 114 128 struct rdma_cm_id; 115 129 116 130 /**