Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RDMA/sa_query: Add RMPP support for SA queries

Register GSI mad agent with RMPP support and add rmpp_callback for
SA queries. This is needed for querying more than one service record
in one query.

Signed-off-by: Or Har-Toov <ohartoov@nvidia.com>
Signed-off-by: Mark Zhang <markzhang@nvidia.com>
Reviewed-by: Vlad Dumitrescu <vdumitrescu@nvidia.com>
Link: https://patch.msgid.link/81dbcb48682e1838dc40f381cdcc0dc63f25f0f1.1751279793.git.leonro@nvidia.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>

authored by

Mark Zhang and committed by
Leon Romanovsky
ef5fcdb7 8f5ae30d

+28 -11
+28 -11
drivers/infiniband/core/sa_query.c
··· 107 107 struct ib_sa_query { 108 108 void (*callback)(struct ib_sa_query *sa_query, int status, 109 109 struct ib_sa_mad *mad); 110 + void (*rmpp_callback)(struct ib_sa_query *sa_query, int status, 111 + struct ib_mad_recv_wc *mad); 110 112 void (*release)(struct ib_sa_query *); 111 113 struct ib_sa_client *client; 112 114 struct ib_sa_port *port; ··· 1989 1987 { 1990 1988 struct ib_sa_query *query = mad_send_wc->send_buf->context[0]; 1991 1989 unsigned long flags; 1990 + int status = 0; 1992 1991 1993 - if (query->callback) 1992 + if (query->callback || query->rmpp_callback) { 1994 1993 switch (mad_send_wc->status) { 1995 1994 case IB_WC_SUCCESS: 1996 1995 /* No callback -- already got recv */ 1997 1996 break; 1998 1997 case IB_WC_RESP_TIMEOUT_ERR: 1999 - query->callback(query, -ETIMEDOUT, NULL); 1998 + status = -ETIMEDOUT; 2000 1999 break; 2001 2000 case IB_WC_WR_FLUSH_ERR: 2002 - query->callback(query, -EINTR, NULL); 2001 + status = -EINTR; 2003 2002 break; 2004 2003 default: 2005 - query->callback(query, -EIO, NULL); 2004 + status = -EIO; 2006 2005 break; 2007 2006 } 2007 + 2008 + if (status) 2009 + query->callback ? query->callback(query, status, NULL) : 2010 + query->rmpp_callback(query, status, NULL); 2011 + } 2008 2012 2009 2013 xa_lock_irqsave(&queries, flags); 2010 2014 __xa_erase(&queries, query->id); ··· 2027 2019 struct ib_mad_recv_wc *mad_recv_wc) 2028 2020 { 2029 2021 struct ib_sa_query *query; 2022 + struct ib_mad *mad; 2023 + 2030 2024 2031 2025 if (!send_buf) 2032 2026 return; 2033 2027 2034 2028 query = send_buf->context[0]; 2035 - if (query->callback) { 2029 + mad = mad_recv_wc->recv_buf.mad; 2030 + 2031 + if (query->rmpp_callback) { 2036 2032 if (mad_recv_wc->wc->status == IB_WC_SUCCESS) 2037 - query->callback(query, 2038 - mad_recv_wc->recv_buf.mad->mad_hdr.status ? 2039 - -EINVAL : 0, 2040 - (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad); 2033 + query->rmpp_callback(query, mad->mad_hdr.status ? 2034 + -EINVAL : 0, mad_recv_wc); 2035 + else 2036 + query->rmpp_callback(query, -EIO, NULL); 2037 + } else if (query->callback) { 2038 + if (mad_recv_wc->wc->status == IB_WC_SUCCESS) 2039 + query->callback(query, mad->mad_hdr.status ? 2040 + -EINVAL : 0, (struct ib_sa_mad *)mad); 2041 2041 else 2042 2042 query->callback(query, -EIO, NULL); 2043 2043 } ··· 2197 2181 2198 2182 sa_dev->port[i].agent = 2199 2183 ib_register_mad_agent(device, i + s, IB_QPT_GSI, 2200 - NULL, 0, send_handler, 2201 - recv_handler, sa_dev, 0); 2184 + NULL, IB_MGMT_RMPP_VERSION, 2185 + send_handler, recv_handler, 2186 + sa_dev, 0); 2202 2187 if (IS_ERR(sa_dev->port[i].agent)) { 2203 2188 ret = PTR_ERR(sa_dev->port[i].agent); 2204 2189 goto err;