Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xprtrdma: Replace dprintk() in rpcrdma_update_connect_private()

Clean up: Use a single trace point to record each connection's
negotiated inline thresholds and the computed maximum byte size
of transport headers.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>

authored by

Chuck Lever and committed by
Anna Schumaker
f54c870d d4957f01

+46 -15
+36
include/trace/events/rpcrdma.h
··· 371 371 ) 372 372 ); 373 373 374 + TRACE_EVENT(xprtrdma_inline_thresh, 375 + TP_PROTO( 376 + const struct rpcrdma_xprt *r_xprt 377 + ), 378 + 379 + TP_ARGS(r_xprt), 380 + 381 + TP_STRUCT__entry( 382 + __field(const void *, r_xprt) 383 + __field(unsigned int, inline_send) 384 + __field(unsigned int, inline_recv) 385 + __field(unsigned int, max_send) 386 + __field(unsigned int, max_recv) 387 + __string(addr, rpcrdma_addrstr(r_xprt)) 388 + __string(port, rpcrdma_portstr(r_xprt)) 389 + ), 390 + 391 + TP_fast_assign( 392 + const struct rpcrdma_ep *ep = &r_xprt->rx_ep; 393 + 394 + __entry->r_xprt = r_xprt; 395 + __entry->inline_send = ep->rep_inline_send; 396 + __entry->inline_recv = ep->rep_inline_recv; 397 + __entry->max_send = ep->rep_max_inline_send; 398 + __entry->max_recv = ep->rep_max_inline_recv; 399 + __assign_str(addr, rpcrdma_addrstr(r_xprt)); 400 + __assign_str(port, rpcrdma_portstr(r_xprt)); 401 + ), 402 + 403 + TP_printk("peer=[%s]:%s r_xprt=%p neg send/recv=%u/%u, calc send/recv=%u/%u", 404 + __get_str(addr), __get_str(port), __entry->r_xprt, 405 + __entry->inline_send, __entry->inline_recv, 406 + __entry->max_send, __entry->max_recv 407 + ) 408 + ); 409 + 374 410 DEFINE_CONN_EVENT(connect); 375 411 DEFINE_CONN_EVENT(disconnect); 376 412
-4
net/sunrpc/xprtrdma/rpc_rdma.c
··· 78 78 size += rpcrdma_segment_maxsz * sizeof(__be32); 79 79 size += sizeof(__be32); /* list discriminator */ 80 80 81 - dprintk("RPC: %s: max call header size = %u\n", 82 - __func__, size); 83 81 return size; 84 82 } 85 83 ··· 98 100 size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32); 99 101 size += sizeof(__be32); /* list discriminator */ 100 102 101 - dprintk("RPC: %s: max reply header size = %u\n", 102 - __func__, size); 103 103 return size; 104 104 } 105 105
+10 -11
net/sunrpc/xprtrdma/verbs.c
··· 177 177 rpcrdma_recv_buffer_put(rep); 178 178 } 179 179 180 - static void 181 - rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt, 182 - struct rdma_conn_param *param) 180 + static void rpcrdma_update_cm_private(struct rpcrdma_xprt *r_xprt, 181 + struct rdma_conn_param *param) 183 182 { 184 183 const struct rpcrdma_connect_private *pmsg = param->private_data; 184 + struct rpcrdma_ep *ep = &r_xprt->rx_ep; 185 185 unsigned int rsize, wsize; 186 186 187 187 /* Default settings for RPC-over-RDMA Version One */ ··· 197 197 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); 198 198 } 199 199 200 - if (rsize < r_xprt->rx_ep.rep_inline_recv) 201 - r_xprt->rx_ep.rep_inline_recv = rsize; 202 - if (wsize < r_xprt->rx_ep.rep_inline_send) 203 - r_xprt->rx_ep.rep_inline_send = wsize; 204 - dprintk("RPC: %s: max send %u, max recv %u\n", __func__, 205 - r_xprt->rx_ep.rep_inline_send, 206 - r_xprt->rx_ep.rep_inline_recv); 200 + if (rsize < ep->rep_inline_recv) 201 + ep->rep_inline_recv = rsize; 202 + if (wsize < ep->rep_inline_send) 203 + ep->rep_inline_send = wsize; 204 + 207 205 rpcrdma_set_max_header_sizes(r_xprt); 208 206 } 209 207 ··· 255 257 case RDMA_CM_EVENT_ESTABLISHED: 256 258 ++xprt->connect_cookie; 257 259 ep->rep_connected = 1; 258 - rpcrdma_update_connect_private(r_xprt, &event->param.conn); 260 + rpcrdma_update_cm_private(r_xprt, &event->param.conn); 261 + trace_xprtrdma_inline_thresh(r_xprt); 259 262 wake_up_all(&ep->rep_connect_wait); 260 263 break; 261 264 case RDMA_CM_EVENT_CONNECT_ERROR: