Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA/cma: Add trace points in RDMA Connection Manager

Record state transitions as each connection is established. The IP address
of both peers and the Type of Service is reported. These trace points are
not in performance hot paths.

Also, record each cm_event_handler call to ULPs. This eliminates the need
for each ULP to add its own similar trace point in its CM event handler
function.

These new trace points appear in a new trace subsystem called "rdma_cma".

Sample events:

<...>-220 [004] 121.430733: cm_id_create: cm.id=0
<...>-472 [003] 121.430991: cm_event_handler: cm.id=0 src=192.168.2.51:35090 dst=192.168.2.55:20049 tos=0 ADDR_RESOLVED (0/0)
<...>-472 [003] 121.430995: cm_event_done: cm.id=0 src=192.168.2.51:35090 dst=192.168.2.55:20049 tos=0 result=0
<...>-472 [003] 121.431172: cm_event_handler: cm.id=0 src=192.168.2.51:35090 dst=192.168.2.55:20049 tos=0 ROUTE_RESOLVED (2/0)
<...>-472 [003] 121.431174: cm_event_done: cm.id=0 src=192.168.2.51:35090 dst=192.168.2.55:20049 tos=0 result=0
<...>-220 [004] 121.433480: cm_qp_create: cm.id=0 src=192.168.2.51:35090 dst=192.168.2.55:20049 tos=0 pd.id=2 qp_type=RC send_wr=4091 recv_wr=256 qp_num=521 rc=0
<...>-220 [004] 121.433577: cm_send_req: cm.id=0 src=192.168.2.51:35090 dst=192.168.2.55:20049 tos=0 qp_num=521
kworker/1:2-973 [001] 121.436190: cm_send_mra: cm.id=0 src=192.168.2.51:35090 dst=192.168.2.55:20049 tos=0
kworker/1:2-973 [001] 121.436340: cm_send_rtu: cm.id=0 src=192.168.2.51:35090 dst=192.168.2.55:20049 tos=0
kworker/1:2-973 [001] 121.436359: cm_event_handler: cm.id=0 src=192.168.2.51:35090 dst=192.168.2.55:20049 tos=0 ESTABLISHED (9/0)
kworker/1:2-973 [001] 121.436365: cm_event_done: cm.id=0 src=192.168.2.51:35090 dst=192.168.2.55:20049 tos=0 result=0
<...>-1975 [005] 123.161954: cm_disconnect: cm.id=0 src=192.168.2.51:35090 dst=192.168.2.55:20049 tos=0
<...>-1975 [005] 123.161974: cm_sent_dreq: cm.id=0 src=192.168.2.51:35090 dst=192.168.2.55:20049 tos=0
<...>-220 [004] 123.162102: cm_disconnect: cm.id=0 src=192.168.2.51:35090 dst=192.168.2.55:20049 tos=0
kworker/0:1-13 [000] 123.162391: cm_event_handler: cm.id=0 src=192.168.2.51:35090 dst=192.168.2.55:20049 tos=0 DISCONNECTED (10/0)
kworker/0:1-13 [000] 123.162393: cm_event_done: cm.id=0 src=192.168.2.51:35090 dst=192.168.2.55:20049 tos=0 result=0
<...>-220 [004] 123.164456: cm_qp_destroy: cm.id=0 src=192.168.2.51:35090 dst=192.168.2.55:20049 tos=0 qp_num=521
<...>-220 [004] 123.165290: cm_id_destroy: cm.id=0 src=192.168.2.51:35090 dst=192.168.2.55:20049 tos=0

Some features to note:
- restracker ID of the rdma_cm_id is tagged on each trace event
- The source and destination IP addresses and TOS are reported
- CM event upcalls are shown with decoded event and status
- CM state transitions are reported
- rdma_cm_id lifetime events are captured
- The latency of ULP CM event handlers is reported
- Lifetime events of associated QPs are reported
- Device removal and insertion is reported

This patch is based on previous work by:

Saeed Mahameed <saeedm@mellanox.com>
Mukesh Kacker <mukesh.kacker@oracle.com>
Ajaykumar Hotchandani <ajaykumar.hotchandani@oracle.com>
Aron Silverton <aron.silverton@oracle.com>
Avinash Repaka <avinash.repaka@oracle.com>
Somasundaram Krishnasamy <somasundaram.krishnasamy@oracle.com>

Link: https://lore.kernel.org/r/20191218201810.30584.3052.stgit@manet.1015granger.net
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>

authored by

Chuck Lever and committed by
Jason Gunthorpe
ed999f82 ad9efa05

+475 -23
+2 -1
drivers/infiniband/core/Makefile
··· 20 20 21 21 iw_cm-y := iwcm.o iwpm_util.o iwpm_msg.o 22 22 23 - rdma_cm-y := cma.o 23 + CFLAGS_cma_trace.o += -I$(src) 24 + rdma_cm-y := cma.o cma_trace.o 24 25 25 26 rdma_cm-$(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) += cma_configfs.o 26 27
+66 -22
drivers/infiniband/core/cma.c
··· 36 36 37 37 #include "core_priv.h" 38 38 #include "cma_priv.h" 39 + #include "cma_trace.h" 39 40 40 41 MODULE_AUTHOR("Sean Hefty"); 41 42 MODULE_DESCRIPTION("Generic RDMA CM Agent"); ··· 878 877 id_priv->id.route.addr.dev_addr.net = get_net(net); 879 878 id_priv->seq_num &= 0x00ffffff; 880 879 880 + trace_cm_id_create(id_priv); 881 881 return &id_priv->id; 882 882 } 883 883 EXPORT_SYMBOL(__rdma_create_id); ··· 930 928 int ret; 931 929 932 930 id_priv = container_of(id, struct rdma_id_private, id); 933 - if (id->device != pd->device) 934 - return -EINVAL; 931 + if (id->device != pd->device) { 932 + ret = -EINVAL; 933 + goto out_err; 934 + } 935 935 936 936 qp_init_attr->port_num = id->port_num; 937 937 qp = ib_create_qp(pd, qp_init_attr); 938 - if (IS_ERR(qp)) 939 - return PTR_ERR(qp); 938 + if (IS_ERR(qp)) { 939 + ret = PTR_ERR(qp); 940 + goto out_err; 941 + } 940 942 941 943 if (id->qp_type == IB_QPT_UD) 942 944 ret = cma_init_ud_qp(id_priv, qp); 943 945 else 944 946 ret = cma_init_conn_qp(id_priv, qp); 945 947 if (ret) 946 - goto err; 948 + goto out_destroy; 947 949 948 950 id->qp = qp; 949 951 id_priv->qp_num = qp->qp_num; 950 952 id_priv->srq = (qp->srq != NULL); 953 + trace_cm_qp_create(id_priv, pd, qp_init_attr, 0); 951 954 return 0; 952 - err: 955 + out_destroy: 953 956 ib_destroy_qp(qp); 957 + out_err: 958 + trace_cm_qp_create(id_priv, pd, qp_init_attr, ret); 954 959 return ret; 955 960 } 956 961 EXPORT_SYMBOL(rdma_create_qp); ··· 967 958 struct rdma_id_private *id_priv; 968 959 969 960 id_priv = container_of(id, struct rdma_id_private, id); 961 + trace_cm_qp_destroy(id_priv); 970 962 mutex_lock(&id_priv->qp_mutex); 971 963 ib_destroy_qp(id_priv->id.qp); 972 964 id_priv->id.qp = NULL; ··· 1821 1811 enum rdma_cm_state state; 1822 1812 1823 1813 id_priv = container_of(id, struct rdma_id_private, id); 1814 + trace_cm_id_destroy(id_priv); 1824 1815 state = cma_exch(id_priv, RDMA_CM_DESTROYING); 1825 1816 cma_cancel_operation(id_priv, state); 1826 1817 ··· 1874 1863 if (ret) 1875 1864 goto reject; 1876 1865 1866 + trace_cm_send_rtu(id_priv); 1877 1867 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); 1878 1868 if (ret) 1879 1869 goto reject; ··· 1883 1871 reject: 1884 1872 pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret); 1885 1873 cma_modify_qp_err(id_priv); 1874 + trace_cm_send_rej(id_priv); 1886 1875 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, 1887 1876 NULL, 0, NULL, 0); 1888 1877 return ret; ··· 1901 1888 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; 1902 1889 event->param.conn.srq = rep_data->srq; 1903 1890 event->param.conn.qp_num = rep_data->remote_qpn; 1891 + } 1892 + 1893 + static int cma_cm_event_handler(struct rdma_id_private *id_priv, 1894 + struct rdma_cm_event *event) 1895 + { 1896 + int ret; 1897 + 1898 + trace_cm_event_handler(id_priv, event); 1899 + ret = id_priv->id.event_handler(&id_priv->id, event); 1900 + trace_cm_event_done(id_priv, event, ret); 1901 + return ret; 1904 1902 } 1905 1903 1906 1904 static int cma_ib_handler(struct ib_cm_id *cm_id, ··· 1936 1912 break; 1937 1913 case IB_CM_REP_RECEIVED: 1938 1914 if (cma_comp(id_priv, RDMA_CM_CONNECT) && 1939 - (id_priv->id.qp_type != IB_QPT_UD)) 1915 + (id_priv->id.qp_type != IB_QPT_UD)) { 1916 + trace_cm_send_mra(id_priv); 1940 1917 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1918 + } 1941 1919 if (id_priv->id.qp) { 1942 1920 event.status = cma_rep_recv(id_priv); 1943 1921 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR : ··· 1984 1958 goto out; 1985 1959 } 1986 1960 1987 - ret = id_priv->id.event_handler(&id_priv->id, &event); 1961 + ret = cma_cm_event_handler(id_priv, &event); 1988 1962 if (ret) { 1989 1963 /* Destroy the CM ID by returning a non-zero value. */ 1990 1964 id_priv->cm_id.ib = NULL; ··· 2145 2119 if (IS_ERR(listen_id)) 2146 2120 return PTR_ERR(listen_id); 2147 2121 2122 + trace_cm_req_handler(listen_id, ib_event->event); 2148 2123 if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) { 2149 2124 ret = -EINVAL; 2150 2125 goto net_dev_put; ··· 2188 2161 * until we're done accessing it. 2189 2162 */ 2190 2163 atomic_inc(&conn_id->refcount); 2191 - ret = conn_id->id.event_handler(&conn_id->id, &event); 2164 + ret = cma_cm_event_handler(conn_id, &event); 2192 2165 if (ret) 2193 2166 goto err3; 2194 2167 /* ··· 2197 2170 */ 2198 2171 mutex_lock(&lock); 2199 2172 if (cma_comp(conn_id, RDMA_CM_CONNECT) && 2200 - (conn_id->id.qp_type != IB_QPT_UD)) 2173 + (conn_id->id.qp_type != IB_QPT_UD)) { 2174 + trace_cm_send_mra(cm_id->context); 2201 2175 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 2176 + } 2202 2177 mutex_unlock(&lock); 2203 2178 mutex_unlock(&conn_id->handler_mutex); 2204 2179 mutex_unlock(&listen_id->handler_mutex); ··· 2315 2286 event.status = iw_event->status; 2316 2287 event.param.conn.private_data = iw_event->private_data; 2317 2288 event.param.conn.private_data_len = iw_event->private_data_len; 2318 - ret = id_priv->id.event_handler(&id_priv->id, &event); 2289 + ret = cma_cm_event_handler(id_priv, &event); 2319 2290 if (ret) { 2320 2291 /* Destroy the CM ID by returning a non-zero value. */ 2321 2292 id_priv->cm_id.iw = NULL; ··· 2392 2363 * until we're done accessing it. 2393 2364 */ 2394 2365 atomic_inc(&conn_id->refcount); 2395 - ret = conn_id->id.event_handler(&conn_id->id, &event); 2366 + ret = cma_cm_event_handler(conn_id, &event); 2396 2367 if (ret) { 2397 2368 /* User wants to destroy the CM ID */ 2398 2369 conn_id->cm_id.iw = NULL; ··· 2464 2435 2465 2436 id->context = id_priv->id.context; 2466 2437 id->event_handler = id_priv->id.event_handler; 2438 + trace_cm_event_handler(id_priv, event); 2467 2439 return id_priv->id.event_handler(id, event); 2468 2440 } 2469 2441 ··· 2641 2611 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) 2642 2612 goto out; 2643 2613 2644 - if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 2614 + if (cma_cm_event_handler(id_priv, &work->event)) { 2645 2615 cma_exch(id_priv, RDMA_CM_DESTROYING); 2646 2616 destroy = 1; 2647 2617 } ··· 2664 2634 id_priv->state == RDMA_CM_DEVICE_REMOVAL) 2665 2635 goto out; 2666 2636 2667 - if (id_priv->id.event_handler(&id_priv->id, &work->event)) { 2637 + if (cma_cm_event_handler(id_priv, &work->event)) { 2668 2638 cma_exch(id_priv, RDMA_CM_DESTROYING); 2669 2639 destroy = 1; 2670 2640 } ··· 3119 3089 } else 3120 3090 event.event = RDMA_CM_EVENT_ADDR_RESOLVED; 3121 3091 3122 - if (id_priv->id.event_handler(&id_priv->id, &event)) { 3092 + if (cma_cm_event_handler(id_priv, &event)) { 3123 3093 cma_exch(id_priv, RDMA_CM_DESTROYING); 3124 3094 mutex_unlock(&id_priv->handler_mutex); 3125 3095 rdma_destroy_id(&id_priv->id); ··· 3766 3736 goto out; 3767 3737 } 3768 3738 3769 - ret = id_priv->id.event_handler(&id_priv->id, &event); 3739 + ret = cma_cm_event_handler(id_priv, &event); 3770 3740 3771 3741 rdma_destroy_ah_attr(&event.param.ud.ah_attr); 3772 3742 if (ret) { ··· 3830 3800 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); 3831 3801 req.max_cm_retries = CMA_MAX_CM_RETRIES; 3832 3802 3803 + trace_cm_send_sidr_req(id_priv); 3833 3804 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); 3834 3805 if (ret) { 3835 3806 ib_destroy_cm_id(id_priv->cm_id.ib); ··· 3904 3873 req.max_cm_retries = CMA_MAX_CM_RETRIES; 3905 3874 req.srq = id_priv->srq ? 1 : 0; 3906 3875 3876 + trace_cm_send_req(id_priv); 3907 3877 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 3908 3878 out: 3909 3879 if (ret && !IS_ERR(id)) { ··· 4018 3986 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); 4019 3987 rep.srq = id_priv->srq ? 1 : 0; 4020 3988 3989 + trace_cm_send_rep(id_priv); 4021 3990 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); 4022 3991 out: 4023 3992 return ret; ··· 4068 4035 rep.private_data = private_data; 4069 4036 rep.private_data_len = private_data_len; 4070 4037 4038 + trace_cm_send_sidr_rep(id_priv); 4071 4039 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); 4072 4040 } 4073 4041 ··· 4154 4120 return -EINVAL; 4155 4121 4156 4122 if (rdma_cap_ib_cm(id->device, id->port_num)) { 4157 - if (id->qp_type == IB_QPT_UD) 4123 + if (id->qp_type == IB_QPT_UD) { 4158 4124 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, 4159 4125 private_data, private_data_len); 4160 - else 4126 + } else { 4127 + trace_cm_send_rej(id_priv); 4161 4128 ret = ib_send_cm_rej(id_priv->cm_id.ib, 4162 4129 IB_CM_REJ_CONSUMER_DEFINED, NULL, 4163 4130 0, private_data, private_data_len); 4131 + } 4164 4132 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 4165 4133 ret = iw_cm_reject(id_priv->cm_id.iw, 4166 4134 private_data, private_data_len); ··· 4187 4151 if (ret) 4188 4152 goto out; 4189 4153 /* Initiate or respond to a disconnect. */ 4190 - if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) 4191 - ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); 4154 + trace_cm_disconnect(id_priv); 4155 + if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) { 4156 + if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0)) 4157 + trace_cm_sent_drep(id_priv); 4158 + } else { 4159 + trace_cm_sent_dreq(id_priv); 4160 + } 4192 4161 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { 4193 4162 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); 4194 4163 } else ··· 4259 4218 } else 4260 4219 event.event = RDMA_CM_EVENT_MULTICAST_ERROR; 4261 4220 4262 - ret = id_priv->id.event_handler(&id_priv->id, &event); 4221 + ret = cma_cm_event_handler(id_priv, &event); 4263 4222 4264 4223 rdma_destroy_ah_attr(&event.param.ud.ah_attr); 4265 4224 if (ret) { ··· 4664 4623 cma_listen_on_dev(id_priv, cma_dev); 4665 4624 mutex_unlock(&lock); 4666 4625 4626 + trace_cm_add_one(device); 4667 4627 return; 4668 4628 4669 4629 free_gid_type: ··· 4695 4653 goto out; 4696 4654 4697 4655 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL; 4698 - ret = id_priv->id.event_handler(&id_priv->id, &event); 4656 + ret = cma_cm_event_handler(id_priv, &event); 4699 4657 out: 4700 4658 mutex_unlock(&id_priv->handler_mutex); 4701 4659 return ret; ··· 4732 4690 static void cma_remove_one(struct ib_device *device, void *client_data) 4733 4691 { 4734 4692 struct cma_device *cma_dev = client_data; 4693 + 4694 + trace_cm_remove_one(device); 4735 4695 4736 4696 if (!cma_dev) 4737 4697 return;
+16
drivers/infiniband/core/cma_trace.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Trace points for the RDMA Connection Manager. 4 + * 5 + * Author: Chuck Lever <chuck.lever@oracle.com> 6 + * 7 + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. 8 + */ 9 + 10 + #define CREATE_TRACE_POINTS 11 + 12 + #include <rdma/rdma_cm.h> 13 + #include <rdma/ib_cm.h> 14 + #include "cma_priv.h" 15 + 16 + #include "cma_trace.h"
+391
drivers/infiniband/core/cma_trace.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Trace point definitions for the RDMA Connect Manager. 4 + * 5 + * Author: Chuck Lever <chuck.lever@oracle.com> 6 + * 7 + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. 8 + */ 9 + 10 + #undef TRACE_SYSTEM 11 + #define TRACE_SYSTEM rdma_cma 12 + 13 + #if !defined(_TRACE_RDMA_CMA_H) || defined(TRACE_HEADER_MULTI_READ) 14 + 15 + #define _TRACE_RDMA_CMA_H 16 + 17 + #include <linux/tracepoint.h> 18 + #include <trace/events/rdma.h> 19 + 20 + /* 21 + * enum ib_cm_event_type, from include/rdma/ib_cm.h 22 + */ 23 + #define IB_CM_EVENT_LIST \ 24 + ib_cm_event(REQ_ERROR) \ 25 + ib_cm_event(REQ_RECEIVED) \ 26 + ib_cm_event(REP_ERROR) \ 27 + ib_cm_event(REP_RECEIVED) \ 28 + ib_cm_event(RTU_RECEIVED) \ 29 + ib_cm_event(USER_ESTABLISHED) \ 30 + ib_cm_event(DREQ_ERROR) \ 31 + ib_cm_event(DREQ_RECEIVED) \ 32 + ib_cm_event(DREP_RECEIVED) \ 33 + ib_cm_event(TIMEWAIT_EXIT) \ 34 + ib_cm_event(MRA_RECEIVED) \ 35 + ib_cm_event(REJ_RECEIVED) \ 36 + ib_cm_event(LAP_ERROR) \ 37 + ib_cm_event(LAP_RECEIVED) \ 38 + ib_cm_event(APR_RECEIVED) \ 39 + ib_cm_event(SIDR_REQ_ERROR) \ 40 + ib_cm_event(SIDR_REQ_RECEIVED) \ 41 + ib_cm_event_end(SIDR_REP_RECEIVED) 42 + 43 + #undef ib_cm_event 44 + #undef ib_cm_event_end 45 + 46 + #define ib_cm_event(x) TRACE_DEFINE_ENUM(IB_CM_##x); 47 + #define ib_cm_event_end(x) TRACE_DEFINE_ENUM(IB_CM_##x); 48 + 49 + IB_CM_EVENT_LIST 50 + 51 + #undef ib_cm_event 52 + #undef ib_cm_event_end 53 + 54 + #define ib_cm_event(x) { IB_CM_##x, #x }, 55 + #define ib_cm_event_end(x) { IB_CM_##x, #x } 56 + 57 + #define rdma_show_ib_cm_event(x) \ 58 + __print_symbolic(x, IB_CM_EVENT_LIST) 59 + 60 + 61 + DECLARE_EVENT_CLASS(cma_fsm_class, 62 + TP_PROTO( 63 + const struct rdma_id_private *id_priv 64 + ), 65 + 66 + TP_ARGS(id_priv), 67 + 68 + TP_STRUCT__entry( 69 + __field(u32, cm_id) 70 + __field(u32, tos) 71 + __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) 72 + __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) 73 + ), 74 + 75 + TP_fast_assign( 76 + __entry->cm_id = id_priv->res.id; 77 + __entry->tos = id_priv->tos; 78 + memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr, 79 + sizeof(struct sockaddr_in6)); 80 + memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr, 81 + sizeof(struct sockaddr_in6)); 82 + ), 83 + 84 + TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u", 85 + __entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos 86 + ) 87 + ); 88 + 89 + #define DEFINE_CMA_FSM_EVENT(name) \ 90 + DEFINE_EVENT(cma_fsm_class, cm_##name, \ 91 + TP_PROTO( \ 92 + const struct rdma_id_private *id_priv \ 93 + ), \ 94 + TP_ARGS(id_priv)) 95 + 96 + DEFINE_CMA_FSM_EVENT(send_rtu); 97 + DEFINE_CMA_FSM_EVENT(send_rej); 98 + DEFINE_CMA_FSM_EVENT(send_mra); 99 + DEFINE_CMA_FSM_EVENT(send_sidr_req); 100 + DEFINE_CMA_FSM_EVENT(send_sidr_rep); 101 + DEFINE_CMA_FSM_EVENT(disconnect); 102 + DEFINE_CMA_FSM_EVENT(sent_drep); 103 + DEFINE_CMA_FSM_EVENT(sent_dreq); 104 + DEFINE_CMA_FSM_EVENT(id_destroy); 105 + 106 + TRACE_EVENT(cm_id_create, 107 + TP_PROTO( 108 + const struct rdma_id_private *id_priv 109 + ), 110 + 111 + TP_ARGS(id_priv), 112 + 113 + TP_STRUCT__entry( 114 + __field(u32, cm_id) 115 + ), 116 + 117 + TP_fast_assign( 118 + __entry->cm_id = id_priv->res.id; 119 + ), 120 + 121 + TP_printk("cm.id=%u", 122 + __entry->cm_id 123 + ) 124 + ); 125 + 126 + DECLARE_EVENT_CLASS(cma_qp_class, 127 + TP_PROTO( 128 + const struct rdma_id_private *id_priv 129 + ), 130 + 131 + TP_ARGS(id_priv), 132 + 133 + TP_STRUCT__entry( 134 + __field(u32, cm_id) 135 + __field(u32, tos) 136 + __field(u32, qp_num) 137 + __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) 138 + __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) 139 + ), 140 + 141 + TP_fast_assign( 142 + __entry->cm_id = id_priv->res.id; 143 + __entry->tos = id_priv->tos; 144 + __entry->qp_num = id_priv->qp_num; 145 + memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr, 146 + sizeof(struct sockaddr_in6)); 147 + memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr, 148 + sizeof(struct sockaddr_in6)); 149 + ), 150 + 151 + TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u qp_num=%u", 152 + __entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos, 153 + __entry->qp_num 154 + ) 155 + ); 156 + 157 + #define DEFINE_CMA_QP_EVENT(name) \ 158 + DEFINE_EVENT(cma_qp_class, cm_##name, \ 159 + TP_PROTO( \ 160 + const struct rdma_id_private *id_priv \ 161 + ), \ 162 + TP_ARGS(id_priv)) 163 + 164 + DEFINE_CMA_QP_EVENT(send_req); 165 + DEFINE_CMA_QP_EVENT(send_rep); 166 + DEFINE_CMA_QP_EVENT(qp_destroy); 167 + 168 + /* 169 + * enum ib_wp_type, from include/rdma/ib_verbs.h 170 + */ 171 + #define IB_QP_TYPE_LIST \ 172 + ib_qp_type(SMI) \ 173 + ib_qp_type(GSI) \ 174 + ib_qp_type(RC) \ 175 + ib_qp_type(UC) \ 176 + ib_qp_type(UD) \ 177 + ib_qp_type(RAW_IPV6) \ 178 + ib_qp_type(RAW_ETHERTYPE) \ 179 + ib_qp_type(RAW_PACKET) \ 180 + ib_qp_type(XRC_INI) \ 181 + ib_qp_type_end(XRC_TGT) 182 + 183 + #undef ib_qp_type 184 + #undef ib_qp_type_end 185 + 186 + #define ib_qp_type(x) TRACE_DEFINE_ENUM(IB_QPT_##x); 187 + #define ib_qp_type_end(x) TRACE_DEFINE_ENUM(IB_QPT_##x); 188 + 189 + IB_QP_TYPE_LIST 190 + 191 + #undef ib_qp_type 192 + #undef ib_qp_type_end 193 + 194 + #define ib_qp_type(x) { IB_QPT_##x, #x }, 195 + #define ib_qp_type_end(x) { IB_QPT_##x, #x } 196 + 197 + #define rdma_show_qp_type(x) \ 198 + __print_symbolic(x, IB_QP_TYPE_LIST) 199 + 200 + 201 + TRACE_EVENT(cm_qp_create, 202 + TP_PROTO( 203 + const struct rdma_id_private *id_priv, 204 + const struct ib_pd *pd, 205 + const struct ib_qp_init_attr *qp_init_attr, 206 + int rc 207 + ), 208 + 209 + TP_ARGS(id_priv, pd, qp_init_attr, rc), 210 + 211 + TP_STRUCT__entry( 212 + __field(u32, cm_id) 213 + __field(u32, pd_id) 214 + __field(u32, tos) 215 + __field(u32, qp_num) 216 + __field(u32, send_wr) 217 + __field(u32, recv_wr) 218 + __field(int, rc) 219 + __field(unsigned long, qp_type) 220 + __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) 221 + __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) 222 + ), 223 + 224 + TP_fast_assign( 225 + __entry->cm_id = id_priv->res.id; 226 + __entry->pd_id = pd->res.id; 227 + __entry->tos = id_priv->tos; 228 + __entry->send_wr = qp_init_attr->cap.max_send_wr; 229 + __entry->recv_wr = qp_init_attr->cap.max_recv_wr; 230 + __entry->rc = rc; 231 + if (!rc) { 232 + __entry->qp_num = id_priv->qp_num; 233 + __entry->qp_type = id_priv->id.qp_type; 234 + } else { 235 + __entry->qp_num = 0; 236 + __entry->qp_type = 0; 237 + } 238 + memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr, 239 + sizeof(struct sockaddr_in6)); 240 + memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr, 241 + sizeof(struct sockaddr_in6)); 242 + ), 243 + 244 + TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u pd.id=%u qp_type=%s" 245 + " send_wr=%u recv_wr=%u qp_num=%u rc=%d", 246 + __entry->cm_id, __entry->srcaddr, __entry->dstaddr, 247 + __entry->tos, __entry->pd_id, 248 + rdma_show_qp_type(__entry->qp_type), __entry->send_wr, 249 + __entry->recv_wr, __entry->qp_num, __entry->rc 250 + ) 251 + ); 252 + 253 + TRACE_EVENT(cm_req_handler, 254 + TP_PROTO( 255 + const struct rdma_id_private *id_priv, 256 + int event 257 + ), 258 + 259 + TP_ARGS(id_priv, event), 260 + 261 + TP_STRUCT__entry( 262 + __field(u32, cm_id) 263 + __field(u32, tos) 264 + __field(unsigned long, event) 265 + __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) 266 + __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) 267 + ), 268 + 269 + TP_fast_assign( 270 + __entry->cm_id = id_priv->res.id; 271 + __entry->tos = id_priv->tos; 272 + __entry->event = event; 273 + memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr, 274 + sizeof(struct sockaddr_in6)); 275 + memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr, 276 + sizeof(struct sockaddr_in6)); 277 + ), 278 + 279 + TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u %s (%lu)", 280 + __entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos, 281 + rdma_show_ib_cm_event(__entry->event), __entry->event 282 + ) 283 + ); 284 + 285 + TRACE_EVENT(cm_event_handler, 286 + TP_PROTO( 287 + const struct rdma_id_private *id_priv, 288 + const struct rdma_cm_event *event 289 + ), 290 + 291 + TP_ARGS(id_priv, event), 292 + 293 + TP_STRUCT__entry( 294 + __field(u32, cm_id) 295 + __field(u32, tos) 296 + __field(unsigned long, event) 297 + __field(int, status) 298 + __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) 299 + __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) 300 + ), 301 + 302 + TP_fast_assign( 303 + __entry->cm_id = id_priv->res.id; 304 + __entry->tos = id_priv->tos; 305 + __entry->event = event->event; 306 + __entry->status = event->status; 307 + memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr, 308 + sizeof(struct sockaddr_in6)); 309 + memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr, 310 + sizeof(struct sockaddr_in6)); 311 + ), 312 + 313 + TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u %s (%lu/%d)", 314 + __entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos, 315 + rdma_show_cm_event(__entry->event), __entry->event, 316 + __entry->status 317 + ) 318 + ); 319 + 320 + TRACE_EVENT(cm_event_done, 321 + TP_PROTO( 322 + const struct rdma_id_private *id_priv, 323 + const struct rdma_cm_event *event, 324 + int result 325 + ), 326 + 327 + TP_ARGS(id_priv, event, result), 328 + 329 + TP_STRUCT__entry( 330 + __field(u32, cm_id) 331 + __field(u32, tos) 332 + __field(unsigned long, event) 333 + __field(int, result) 334 + __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6)) 335 + __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6)) 336 + ), 337 + 338 + TP_fast_assign( 339 + __entry->cm_id = id_priv->res.id; 340 + __entry->tos = id_priv->tos; 341 + __entry->event = event->event; 342 + __entry->result = result; 343 + memcpy(__entry->srcaddr, &id_priv->id.route.addr.src_addr, 344 + sizeof(struct sockaddr_in6)); 345 + memcpy(__entry->dstaddr, &id_priv->id.route.addr.dst_addr, 346 + sizeof(struct sockaddr_in6)); 347 + ), 348 + 349 + TP_printk("cm.id=%u src=%pISpc dst=%pISpc tos=%u %s consumer returns %d", 350 + __entry->cm_id, __entry->srcaddr, __entry->dstaddr, __entry->tos, 351 + rdma_show_cm_event(__entry->event), __entry->result 352 + ) 353 + ); 354 + 355 + DECLARE_EVENT_CLASS(cma_client_class, 356 + TP_PROTO( 357 + const struct ib_device *device 358 + ), 359 + 360 + TP_ARGS(device), 361 + 362 + TP_STRUCT__entry( 363 + __string(name, device->name) 364 + ), 365 + 366 + TP_fast_assign( 367 + __assign_str(name, device->name); 368 + ), 369 + 370 + TP_printk("device name=%s", 371 + __get_str(name) 372 + ) 373 + ); 374 + 375 + #define DEFINE_CMA_CLIENT_EVENT(name) \ 376 + DEFINE_EVENT(cma_client_class, cm_##name, \ 377 + TP_PROTO( \ 378 + const struct ib_device *device \ 379 + ), \ 380 + TP_ARGS(device)) 381 + 382 + DEFINE_CMA_CLIENT_EVENT(add_one); 383 + DEFINE_CMA_CLIENT_EVENT(remove_one); 384 + 385 + #endif /* _TRACE_RDMA_CMA_H */ 386 + 387 + #undef TRACE_INCLUDE_PATH 388 + #define TRACE_INCLUDE_PATH . 389 + #define TRACE_INCLUDE_FILE cma_trace 390 + 391 + #include <trace/define_trace.h>