Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IB/cm: Simplify ib_cancel_mad() and ib_modify_mad() calls

The mad_agent parameter is redundant since the struct ib_mad_send_buf
already has a pointer of it.

Link: https://lore.kernel.org/r/0987c784b25f7bfa72f78691f50cff066de587e1.1622629024.git.leonro@nvidia.com
Signed-off-by: Mark Zhang <markzhang@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>

authored by

Mark Zhang and committed by
Jason Gunthorpe
70076a41 3595c398

+39 -51
+19 -23
drivers/infiniband/core/cm.c
··· 1058 1058 break; 1059 1059 case IB_CM_SIDR_REQ_SENT: 1060 1060 cm_id->state = IB_CM_IDLE; 1061 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1061 + ib_cancel_mad(cm_id_priv->msg); 1062 1062 break; 1063 1063 case IB_CM_SIDR_REQ_RCVD: 1064 1064 cm_send_sidr_rep_locked(cm_id_priv, ··· 1069 1069 break; 1070 1070 case IB_CM_REQ_SENT: 1071 1071 case IB_CM_MRA_REQ_RCVD: 1072 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1072 + ib_cancel_mad(cm_id_priv->msg); 1073 1073 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_TIMEOUT, 1074 1074 &cm_id_priv->id.device->node_guid, 1075 1075 sizeof(cm_id_priv->id.device->node_guid), ··· 1087 1087 break; 1088 1088 case IB_CM_REP_SENT: 1089 1089 case IB_CM_MRA_REP_RCVD: 1090 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1090 + ib_cancel_mad(cm_id_priv->msg); 1091 1091 cm_send_rej_locked(cm_id_priv, IB_CM_REJ_CONSUMER_DEFINED, NULL, 1092 1092 0, NULL, 0); 1093 1093 goto retest; ··· 1105 1105 cm_send_dreq_locked(cm_id_priv, NULL, 0); 1106 1106 goto retest; 1107 1107 case IB_CM_DREQ_SENT: 1108 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1108 + ib_cancel_mad(cm_id_priv->msg); 1109 1109 cm_enter_timewait(cm_id_priv); 1110 1110 goto retest; 1111 1111 case IB_CM_DREQ_RCVD: ··· 2531 2531 cm_ack_timeout(cm_id_priv->target_ack_delay, 2532 2532 cm_id_priv->alt_av.timeout - 1); 2533 2533 2534 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2534 + ib_cancel_mad(cm_id_priv->msg); 2535 2535 cm_queue_work_unlock(cm_id_priv, work); 2536 2536 return 0; 2537 2537 ··· 2555 2555 goto out; 2556 2556 } 2557 2557 2558 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2558 + ib_cancel_mad(cm_id_priv->msg); 2559 2559 cm_queue_work_unlock(cm_id_priv, work); 2560 2560 return 0; 2561 2561 out: ··· 2588 2588 } 2589 2589 cm_id_priv->id.state = IB_CM_ESTABLISHED; 2590 2590 2591 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2591 + ib_cancel_mad(cm_id_priv->msg); 2592 2592 cm_queue_work_unlock(cm_id_priv, work); 2593 2593 return 0; 2594 2594 out: ··· 2633 2633 2634 2634 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT || 2635 2635 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) 2636 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2636 + ib_cancel_mad(cm_id_priv->msg); 2637 2637 2638 2638 msg = cm_alloc_priv_msg(cm_id_priv); 2639 2639 if (IS_ERR(msg)) { ··· 2807 2807 switch (cm_id_priv->id.state) { 2808 2808 case IB_CM_REP_SENT: 2809 2809 case IB_CM_DREQ_SENT: 2810 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2810 + ib_cancel_mad(cm_id_priv->msg); 2811 2811 break; 2812 2812 case IB_CM_ESTABLISHED: 2813 2813 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT || 2814 2814 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) 2815 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2815 + ib_cancel_mad(cm_id_priv->msg); 2816 2816 break; 2817 2817 case IB_CM_MRA_REP_RCVD: 2818 2818 break; ··· 2873 2873 } 2874 2874 cm_enter_timewait(cm_id_priv); 2875 2875 2876 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2876 + ib_cancel_mad(cm_id_priv->msg); 2877 2877 cm_queue_work_unlock(cm_id_priv, work); 2878 2878 return 0; 2879 2879 out: ··· 3009 3009 case IB_CM_MRA_REQ_RCVD: 3010 3010 case IB_CM_REP_SENT: 3011 3011 case IB_CM_MRA_REP_RCVD: 3012 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 3012 + ib_cancel_mad(cm_id_priv->msg); 3013 3013 fallthrough; 3014 3014 case IB_CM_REQ_RCVD: 3015 3015 case IB_CM_MRA_REQ_SENT: ··· 3019 3019 cm_reset_to_idle(cm_id_priv); 3020 3020 break; 3021 3021 case IB_CM_DREQ_SENT: 3022 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 3022 + ib_cancel_mad(cm_id_priv->msg); 3023 3023 fallthrough; 3024 3024 case IB_CM_REP_RCVD: 3025 3025 case IB_CM_MRA_REP_SENT: ··· 3029 3029 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT || 3030 3030 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) { 3031 3031 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT) 3032 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, 3033 - cm_id_priv->msg); 3032 + ib_cancel_mad(cm_id_priv->msg); 3034 3033 cm_enter_timewait(cm_id_priv); 3035 3034 break; 3036 3035 } ··· 3168 3169 case IB_CM_REQ_SENT: 3169 3170 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) != 3170 3171 CM_MSG_RESPONSE_REQ || 3171 - ib_modify_mad(cm_id_priv->av.port->mad_agent, 3172 - cm_id_priv->msg, timeout)) 3172 + ib_modify_mad(cm_id_priv->msg, timeout)) 3173 3173 goto out; 3174 3174 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; 3175 3175 break; 3176 3176 case IB_CM_REP_SENT: 3177 3177 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) != 3178 3178 CM_MSG_RESPONSE_REP || 3179 - ib_modify_mad(cm_id_priv->av.port->mad_agent, 3180 - cm_id_priv->msg, timeout)) 3179 + ib_modify_mad(cm_id_priv->msg, timeout)) 3181 3180 goto out; 3182 3181 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; 3183 3182 break; ··· 3183 3186 if (IBA_GET(CM_MRA_MESSAGE_MRAED, mra_msg) != 3184 3187 CM_MSG_RESPONSE_OTHER || 3185 3188 cm_id_priv->id.lap_state != IB_CM_LAP_SENT || 3186 - ib_modify_mad(cm_id_priv->av.port->mad_agent, 3187 - cm_id_priv->msg, timeout)) { 3189 + ib_modify_mad(cm_id_priv->msg, timeout)) { 3188 3190 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) 3189 3191 atomic_long_inc(&work->port-> 3190 3192 counter_group[CM_RECV_DUPLICATES]. ··· 3383 3387 goto out; 3384 3388 } 3385 3389 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; 3386 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 3390 + ib_cancel_mad(cm_id_priv->msg); 3387 3391 cm_queue_work_unlock(cm_id_priv, work); 3388 3392 return 0; 3389 3393 out: ··· 3711 3715 goto out; 3712 3716 } 3713 3717 cm_id_priv->id.state = IB_CM_IDLE; 3714 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 3718 + ib_cancel_mad(cm_id_priv->msg); 3715 3719 spin_unlock_irq(&cm_id_priv->lock); 3716 3720 3717 3721 cm_format_sidr_rep_event(work, cm_id_priv);
+6 -11
drivers/infiniband/core/mad.c
··· 2459 2459 return NULL; 2460 2460 } 2461 2461 2462 - int ib_modify_mad(struct ib_mad_agent *mad_agent, 2463 - struct ib_mad_send_buf *send_buf, u32 timeout_ms) 2462 + int ib_modify_mad(struct ib_mad_send_buf *send_buf, u32 timeout_ms) 2464 2463 { 2465 2464 struct ib_mad_agent_private *mad_agent_priv; 2466 2465 struct ib_mad_send_wr_private *mad_send_wr; 2467 2466 unsigned long flags; 2468 2467 int active; 2469 2468 2470 - mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 2471 - agent); 2469 + if (!send_buf) 2470 + return -EINVAL; 2471 + 2472 + mad_agent_priv = container_of(send_buf->mad_agent, 2473 + struct ib_mad_agent_private, agent); 2472 2474 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2473 2475 mad_send_wr = find_send_wr(mad_agent_priv, send_buf); 2474 2476 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { ··· 2494 2492 return 0; 2495 2493 } 2496 2494 EXPORT_SYMBOL(ib_modify_mad); 2497 - 2498 - void ib_cancel_mad(struct ib_mad_agent *mad_agent, 2499 - struct ib_mad_send_buf *send_buf) 2500 - { 2501 - ib_modify_mad(mad_agent, send_buf, 0); 2502 - } 2503 - EXPORT_SYMBOL(ib_cancel_mad); 2504 2495 2505 2496 static void local_completions(struct work_struct *work) 2506 2497 {
+1 -3
drivers/infiniband/core/sa_query.c
··· 1172 1172 void ib_sa_cancel_query(int id, struct ib_sa_query *query) 1173 1173 { 1174 1174 unsigned long flags; 1175 - struct ib_mad_agent *agent; 1176 1175 struct ib_mad_send_buf *mad_buf; 1177 1176 1178 1177 xa_lock_irqsave(&queries, flags); ··· 1179 1180 xa_unlock_irqrestore(&queries, flags); 1180 1181 return; 1181 1182 } 1182 - agent = query->port->agent; 1183 1183 mad_buf = query->mad_buf; 1184 1184 xa_unlock_irqrestore(&queries, flags); 1185 1185 ··· 1188 1190 * sent to the MAD layer and has to be cancelled from there. 1189 1191 */ 1190 1192 if (!ib_nl_cancel_request(query)) 1191 - ib_cancel_mad(agent, mad_buf); 1193 + ib_cancel_mad(mad_buf); 1192 1194 } 1193 1195 EXPORT_SYMBOL(ib_sa_cancel_query); 1194 1196
+13 -14
include/rdma/ib_mad.h
··· 718 718 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc); 719 719 720 720 /** 721 - * ib_cancel_mad - Cancels an outstanding send MAD operation. 722 - * @mad_agent: Specifies the registration associated with sent MAD. 723 - * @send_buf: Indicates the MAD to cancel. 724 - * 725 - * MADs will be returned to the user through the corresponding 726 - * ib_mad_send_handler. 727 - */ 728 - void ib_cancel_mad(struct ib_mad_agent *mad_agent, 729 - struct ib_mad_send_buf *send_buf); 730 - 731 - /** 732 721 * ib_modify_mad - Modifies an outstanding send MAD operation. 733 - * @mad_agent: Specifies the registration associated with sent MAD. 734 722 * @send_buf: Indicates the MAD to modify. 735 723 * @timeout_ms: New timeout value for sent MAD. 736 724 * 737 725 * This call will reset the timeout value for a sent MAD to the specified 738 726 * value. 739 727 */ 740 - int ib_modify_mad(struct ib_mad_agent *mad_agent, 741 - struct ib_mad_send_buf *send_buf, u32 timeout_ms); 728 + int ib_modify_mad(struct ib_mad_send_buf *send_buf, u32 timeout_ms); 729 + 730 + /** 731 + * ib_cancel_mad - Cancels an outstanding send MAD operation. 732 + * @send_buf: Indicates the MAD to cancel. 733 + * 734 + * MADs will be returned to the user through the corresponding 735 + * ib_mad_send_handler. 736 + */ 737 + static inline void ib_cancel_mad(struct ib_mad_send_buf *send_buf) 738 + { 739 + ib_modify_mad(send_buf, 0); 740 + } 742 741 743 742 /** 744 743 * ib_create_send_mad - Allocate and initialize a data buffer and work request