Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[IB] Fix MAD layer DMA mappings to avoid touching data buffer once mapped

The MAD layer was violating the DMA API by touching data buffers used
for sends after the DMA mapping was done. This causes problems on
non-cache-coherent architectures, because the device doing DMA won't
see updates to the payload buffers that exist only in the CPU cache.

Fix this by having all MAD consumers use ib_create_send_mad() to
allocate their send buffers, and moving the DMA mapping into the MAD
layer so it can be done just before calling send (and after any
modifications of the send buffer by the MAD layer).

Tested on a non-cache-coherent PowerPC 440SPe system.

Signed-off-by: Sean Hefty <sean.hefty@intel.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>

authored by

Sean Hefty and committed by
Roland Dreier
34816ad9 ae7971a7

+475 -844
+79 -220
drivers/infiniband/core/agent.c
··· 37 37 * $Id: agent.c 1389 2004-12-27 22:56:47Z roland $ 38 38 */ 39 39 40 - #include <linux/dma-mapping.h> 41 - 42 - #include <asm/bug.h> 43 - 44 - #include <rdma/ib_smi.h> 45 - 46 - #include "smi.h" 47 - #include "agent_priv.h" 48 - #include "mad_priv.h" 49 40 #include "agent.h" 41 + #include "smi.h" 50 42 51 - spinlock_t ib_agent_port_list_lock; 43 + #define SPFX "ib_agent: " 44 + 45 + struct ib_agent_port_private { 46 + struct list_head port_list; 47 + struct ib_mad_agent *agent[2]; 48 + }; 49 + 50 + static DEFINE_SPINLOCK(ib_agent_port_list_lock); 52 51 static LIST_HEAD(ib_agent_port_list); 53 52 54 - /* 55 - * Caller must hold ib_agent_port_list_lock 56 - */ 57 - static inline struct ib_agent_port_private * 58 - __ib_get_agent_port(struct ib_device *device, int port_num, 59 - struct ib_mad_agent *mad_agent) 53 + static struct ib_agent_port_private * 54 + __ib_get_agent_port(struct ib_device *device, int port_num) 60 55 { 61 56 struct ib_agent_port_private *entry; 62 57 63 - BUG_ON(!(!!device ^ !!mad_agent)); /* Exactly one MUST be (!NULL) */ 64 - 65 - if (device) { 66 - list_for_each_entry(entry, &ib_agent_port_list, port_list) { 67 - if (entry->smp_agent->device == device && 68 - entry->port_num == port_num) 69 - return entry; 70 - } 71 - } else { 72 - list_for_each_entry(entry, &ib_agent_port_list, port_list) { 73 - if ((entry->smp_agent == mad_agent) || 74 - (entry->perf_mgmt_agent == mad_agent)) 75 - return entry; 76 - } 58 + list_for_each_entry(entry, &ib_agent_port_list, port_list) { 59 + if (entry->agent[0]->device == device && 60 + entry->agent[0]->port_num == port_num) 61 + return entry; 77 62 } 78 63 return NULL; 79 64 } 80 65 81 - static inline struct ib_agent_port_private * 82 - ib_get_agent_port(struct ib_device *device, int port_num, 83 - struct ib_mad_agent *mad_agent) 66 + static struct ib_agent_port_private * 67 + ib_get_agent_port(struct ib_device *device, int port_num) 84 68 { 85 69 struct ib_agent_port_private *entry; 86 70 unsigned long flags; 87 71 88 72 spin_lock_irqsave(&ib_agent_port_list_lock, flags); 89 - entry = __ib_get_agent_port(device, port_num, mad_agent); 73 + entry = __ib_get_agent_port(device, port_num); 90 74 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); 91 - 92 75 return entry; 93 76 } 94 77 ··· 83 100 84 101 if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) 85 102 return 1; 86 - port_priv = ib_get_agent_port(device, port_num, NULL); 103 + 104 + port_priv = ib_get_agent_port(device, port_num); 87 105 if (!port_priv) { 88 106 printk(KERN_DEBUG SPFX "smi_check_local_dr_smp %s port %d " 89 - "not open\n", 90 - device->name, port_num); 107 + "not open\n", device->name, port_num); 91 108 return 1; 92 109 } 93 110 94 - return smi_check_local_smp(port_priv->smp_agent, smp); 111 + return smi_check_local_smp(port_priv->agent[0], smp); 95 112 } 96 113 97 - static int agent_mad_send(struct ib_mad_agent *mad_agent, 98 - struct ib_agent_port_private *port_priv, 99 - struct ib_mad_private *mad_priv, 100 - struct ib_grh *grh, 101 - struct ib_wc *wc) 102 - { 103 - struct ib_agent_send_wr *agent_send_wr; 104 - struct ib_sge gather_list; 105 - struct ib_send_wr send_wr; 106 - struct ib_send_wr *bad_send_wr; 107 - struct ib_ah_attr ah_attr; 108 - unsigned long flags; 109 - int ret = 1; 110 - 111 - agent_send_wr = kmalloc(sizeof(*agent_send_wr), GFP_KERNEL); 112 - if (!agent_send_wr) 113 - goto out; 114 - agent_send_wr->mad = mad_priv; 115 - 116 - gather_list.addr = dma_map_single(mad_agent->device->dma_device, 117 - &mad_priv->mad, 118 - sizeof(mad_priv->mad), 119 - DMA_TO_DEVICE); 120 - gather_list.length = sizeof(mad_priv->mad); 121 - gather_list.lkey = mad_agent->mr->lkey; 122 - 123 - send_wr.next = NULL; 124 - send_wr.opcode = IB_WR_SEND; 125 - send_wr.sg_list = &gather_list; 126 - send_wr.num_sge = 1; 127 - send_wr.wr.ud.remote_qpn = wc->src_qp; /* DQPN */ 128 - send_wr.wr.ud.timeout_ms = 0; 129 - send_wr.send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; 130 - 131 - ah_attr.dlid = wc->slid; 132 - ah_attr.port_num = mad_agent->port_num; 133 - ah_attr.src_path_bits = wc->dlid_path_bits; 134 - ah_attr.sl = wc->sl; 135 - ah_attr.static_rate = 0; 136 - ah_attr.ah_flags = 0; /* No GRH */ 137 - if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) { 138 - if (wc->wc_flags & IB_WC_GRH) { 139 - ah_attr.ah_flags = IB_AH_GRH; 140 - /* Should sgid be looked up ? */ 141 - ah_attr.grh.sgid_index = 0; 142 - ah_attr.grh.hop_limit = grh->hop_limit; 143 - ah_attr.grh.flow_label = be32_to_cpu( 144 - grh->version_tclass_flow) & 0xfffff; 145 - ah_attr.grh.traffic_class = (be32_to_cpu( 146 - grh->version_tclass_flow) >> 20) & 0xff; 147 - memcpy(ah_attr.grh.dgid.raw, 148 - grh->sgid.raw, 149 - sizeof(ah_attr.grh.dgid)); 150 - } 151 - } 152 - 153 - agent_send_wr->ah = ib_create_ah(mad_agent->qp->pd, &ah_attr); 154 - if (IS_ERR(agent_send_wr->ah)) { 155 - printk(KERN_ERR SPFX "No memory for address handle\n"); 156 - kfree(agent_send_wr); 157 - goto out; 158 - } 159 - 160 - send_wr.wr.ud.ah = agent_send_wr->ah; 161 - if (mad_priv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT) { 162 - send_wr.wr.ud.pkey_index = wc->pkey_index; 163 - send_wr.wr.ud.remote_qkey = IB_QP1_QKEY; 164 - } else { /* for SMPs */ 165 - send_wr.wr.ud.pkey_index = 0; 166 - send_wr.wr.ud.remote_qkey = 0; 167 - } 168 - send_wr.wr.ud.mad_hdr = &mad_priv->mad.mad.mad_hdr; 169 - send_wr.wr_id = (unsigned long)agent_send_wr; 170 - 171 - pci_unmap_addr_set(agent_send_wr, mapping, gather_list.addr); 172 - 173 - /* Send */ 174 - spin_lock_irqsave(&port_priv->send_list_lock, flags); 175 - if (ib_post_send_mad(mad_agent, &send_wr, &bad_send_wr)) { 176 - spin_unlock_irqrestore(&port_priv->send_list_lock, flags); 177 - dma_unmap_single(mad_agent->device->dma_device, 178 - pci_unmap_addr(agent_send_wr, mapping), 179 - sizeof(mad_priv->mad), 180 - DMA_TO_DEVICE); 181 - ib_destroy_ah(agent_send_wr->ah); 182 - kfree(agent_send_wr); 183 - } else { 184 - list_add_tail(&agent_send_wr->send_list, 185 - &port_priv->send_posted_list); 186 - spin_unlock_irqrestore(&port_priv->send_list_lock, flags); 187 - ret = 0; 188 - } 189 - 190 - out: 191 - return ret; 192 - } 193 - 194 - int agent_send(struct ib_mad_private *mad, 195 - struct ib_grh *grh, 196 - struct ib_wc *wc, 197 - struct ib_device *device, 198 - int port_num) 114 + int agent_send_response(struct ib_mad *mad, struct ib_grh *grh, 115 + struct ib_wc *wc, struct ib_device *device, 116 + int port_num, int qpn) 199 117 { 200 118 struct ib_agent_port_private *port_priv; 201 - struct ib_mad_agent *mad_agent; 119 + struct ib_mad_agent *agent; 120 + struct ib_mad_send_buf *send_buf; 121 + struct ib_ah *ah; 122 + int ret; 202 123 203 - port_priv = ib_get_agent_port(device, port_num, NULL); 124 + port_priv = ib_get_agent_port(device, port_num); 204 125 if (!port_priv) { 205 - printk(KERN_DEBUG SPFX "agent_send %s port %d not open\n", 206 - device->name, port_num); 207 - return 1; 126 + printk(KERN_ERR SPFX "Unable to find port agent\n"); 127 + return -ENODEV; 208 128 } 209 129 210 - /* Get mad agent based on mgmt_class in MAD */ 211 - switch (mad->mad.mad.mad_hdr.mgmt_class) { 212 - case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: 213 - case IB_MGMT_CLASS_SUBN_LID_ROUTED: 214 - mad_agent = port_priv->smp_agent; 215 - break; 216 - case IB_MGMT_CLASS_PERF_MGMT: 217 - mad_agent = port_priv->perf_mgmt_agent; 218 - break; 219 - default: 220 - return 1; 130 + agent = port_priv->agent[qpn]; 131 + ah = ib_create_ah_from_wc(agent->qp->pd, wc, grh, port_num); 132 + if (IS_ERR(ah)) { 133 + ret = PTR_ERR(ah); 134 + printk(KERN_ERR SPFX "ib_create_ah_from_wc error:%d\n", ret); 135 + return ret; 221 136 } 222 137 223 - return agent_mad_send(mad_agent, port_priv, mad, grh, wc); 138 + send_buf = ib_create_send_mad(agent, wc->src_qp, wc->pkey_index, 0, 139 + IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 140 + GFP_KERNEL); 141 + if (IS_ERR(send_buf)) { 142 + ret = PTR_ERR(send_buf); 143 + printk(KERN_ERR SPFX "ib_create_send_mad error:%d\n", ret); 144 + goto err1; 145 + } 146 + 147 + memcpy(send_buf->mad, mad, sizeof *mad); 148 + send_buf->ah = ah; 149 + if ((ret = ib_post_send_mad(send_buf, NULL))) { 150 + printk(KERN_ERR SPFX "ib_post_send_mad error:%d\n", ret); 151 + goto err2; 152 + } 153 + return 0; 154 + err2: 155 + ib_free_send_mad(send_buf); 156 + err1: 157 + ib_destroy_ah(ah); 158 + return ret; 224 159 } 225 160 226 161 static void agent_send_handler(struct ib_mad_agent *mad_agent, 227 162 struct ib_mad_send_wc *mad_send_wc) 228 163 { 229 - struct ib_agent_port_private *port_priv; 230 - struct ib_agent_send_wr *agent_send_wr; 231 - unsigned long flags; 232 - 233 - /* Find matching MAD agent */ 234 - port_priv = ib_get_agent_port(NULL, 0, mad_agent); 235 - if (!port_priv) { 236 - printk(KERN_ERR SPFX "agent_send_handler: no matching MAD " 237 - "agent %p\n", mad_agent); 238 - return; 239 - } 240 - 241 - agent_send_wr = (struct ib_agent_send_wr *)(unsigned long)mad_send_wc->wr_id; 242 - spin_lock_irqsave(&port_priv->send_list_lock, flags); 243 - /* Remove completed send from posted send MAD list */ 244 - list_del(&agent_send_wr->send_list); 245 - spin_unlock_irqrestore(&port_priv->send_list_lock, flags); 246 - 247 - dma_unmap_single(mad_agent->device->dma_device, 248 - pci_unmap_addr(agent_send_wr, mapping), 249 - sizeof(agent_send_wr->mad->mad), 250 - DMA_TO_DEVICE); 251 - 252 - ib_destroy_ah(agent_send_wr->ah); 253 - 254 - /* Release allocated memory */ 255 - kmem_cache_free(ib_mad_cache, agent_send_wr->mad); 256 - kfree(agent_send_wr); 164 + ib_destroy_ah(mad_send_wc->send_buf->ah); 165 + ib_free_send_mad(mad_send_wc->send_buf); 257 166 } 258 167 259 168 int ib_agent_port_open(struct ib_device *device, int port_num) 260 169 { 261 - int ret; 262 170 struct ib_agent_port_private *port_priv; 263 171 unsigned long flags; 264 - 265 - /* First, check if port already open for SMI */ 266 - port_priv = ib_get_agent_port(device, port_num, NULL); 267 - if (port_priv) { 268 - printk(KERN_DEBUG SPFX "%s port %d already open\n", 269 - device->name, port_num); 270 - return 0; 271 - } 172 + int ret; 272 173 273 174 /* Create new device info */ 274 175 port_priv = kmalloc(sizeof *port_priv, GFP_KERNEL); ··· 161 294 ret = -ENOMEM; 162 295 goto error1; 163 296 } 164 - 165 297 memset(port_priv, 0, sizeof *port_priv); 166 - port_priv->port_num = port_num; 167 - spin_lock_init(&port_priv->send_list_lock); 168 - INIT_LIST_HEAD(&port_priv->send_posted_list); 169 298 170 - /* Obtain send only MAD agent for SM class (SMI QP) */ 171 - port_priv->smp_agent = ib_register_mad_agent(device, port_num, 172 - IB_QPT_SMI, 173 - NULL, 0, 299 + /* Obtain send only MAD agent for SMI QP */ 300 + port_priv->agent[0] = ib_register_mad_agent(device, port_num, 301 + IB_QPT_SMI, NULL, 0, 174 302 &agent_send_handler, 175 - NULL, NULL); 176 - 177 - if (IS_ERR(port_priv->smp_agent)) { 178 - ret = PTR_ERR(port_priv->smp_agent); 303 + NULL, NULL); 304 + if (IS_ERR(port_priv->agent[0])) { 305 + ret = PTR_ERR(port_priv->agent[0]); 179 306 goto error2; 180 307 } 181 308 182 - /* Obtain send only MAD agent for PerfMgmt class (GSI QP) */ 183 - port_priv->perf_mgmt_agent = ib_register_mad_agent(device, port_num, 184 - IB_QPT_GSI, 185 - NULL, 0, 186 - &agent_send_handler, 187 - NULL, NULL); 188 - if (IS_ERR(port_priv->perf_mgmt_agent)) { 189 - ret = PTR_ERR(port_priv->perf_mgmt_agent); 309 + /* Obtain send only MAD agent for GSI QP */ 310 + port_priv->agent[1] = ib_register_mad_agent(device, port_num, 311 + IB_QPT_GSI, NULL, 0, 312 + &agent_send_handler, 313 + NULL, NULL); 314 + if (IS_ERR(port_priv->agent[1])) { 315 + ret = PTR_ERR(port_priv->agent[1]); 190 316 goto error3; 191 317 } 192 318 ··· 190 330 return 0; 191 331 192 332 error3: 193 - ib_unregister_mad_agent(port_priv->smp_agent); 333 + ib_unregister_mad_agent(port_priv->agent[0]); 194 334 error2: 195 335 kfree(port_priv); 196 336 error1: ··· 203 343 unsigned long flags; 204 344 205 345 spin_lock_irqsave(&ib_agent_port_list_lock, flags); 206 - port_priv = __ib_get_agent_port(device, port_num, NULL); 346 + port_priv = __ib_get_agent_port(device, port_num); 207 347 if (port_priv == NULL) { 208 348 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); 209 349 printk(KERN_ERR SPFX "Port %d not found\n", port_num); ··· 212 352 list_del(&port_priv->port_list); 213 353 spin_unlock_irqrestore(&ib_agent_port_list_lock, flags); 214 354 215 - ib_unregister_mad_agent(port_priv->perf_mgmt_agent); 216 - ib_unregister_mad_agent(port_priv->smp_agent); 355 + ib_unregister_mad_agent(port_priv->agent[1]); 356 + ib_unregister_mad_agent(port_priv->agent[0]); 217 357 kfree(port_priv); 218 - 219 358 return 0; 220 359 }
+5 -8
drivers/infiniband/core/agent.h
··· 39 39 #ifndef __AGENT_H_ 40 40 #define __AGENT_H_ 41 41 42 - extern spinlock_t ib_agent_port_list_lock; 42 + #include <rdma/ib_mad.h> 43 43 44 - extern int ib_agent_port_open(struct ib_device *device, 45 - int port_num); 44 + extern int ib_agent_port_open(struct ib_device *device, int port_num); 46 45 47 46 extern int ib_agent_port_close(struct ib_device *device, int port_num); 48 47 49 - extern int agent_send(struct ib_mad_private *mad, 50 - struct ib_grh *grh, 51 - struct ib_wc *wc, 52 - struct ib_device *device, 53 - int port_num); 48 + extern int agent_send_response(struct ib_mad *mad, struct ib_grh *grh, 49 + struct ib_wc *wc, struct ib_device *device, 50 + int port_num, int qpn); 54 51 55 52 #endif /* __AGENT_H_ */
-62
drivers/infiniband/core/agent_priv.h
··· 1 - /* 2 - * Copyright (c) 2004, 2005 Mellanox Technologies Ltd. All rights reserved. 3 - * Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved. 4 - * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved. 5 - * Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved. 6 - * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved. 7 - * 8 - * This software is available to you under a choice of one of two 9 - * licenses. You may choose to be licensed under the terms of the GNU 10 - * General Public License (GPL) Version 2, available from the file 11 - * COPYING in the main directory of this source tree, or the 12 - * OpenIB.org BSD license below: 13 - * 14 - * Redistribution and use in source and binary forms, with or 15 - * without modification, are permitted provided that the following 16 - * conditions are met: 17 - * 18 - * - Redistributions of source code must retain the above 19 - * copyright notice, this list of conditions and the following 20 - * disclaimer. 21 - * 22 - * - Redistributions in binary form must reproduce the above 23 - * copyright notice, this list of conditions and the following 24 - * disclaimer in the documentation and/or other materials 25 - * provided with the distribution. 26 - * 27 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 - * SOFTWARE. 35 - * 36 - * $Id: agent_priv.h 1640 2005-01-24 22:39:02Z halr $ 37 - */ 38 - 39 - #ifndef __IB_AGENT_PRIV_H__ 40 - #define __IB_AGENT_PRIV_H__ 41 - 42 - #include <linux/pci.h> 43 - 44 - #define SPFX "ib_agent: " 45 - 46 - struct ib_agent_send_wr { 47 - struct list_head send_list; 48 - struct ib_ah *ah; 49 - struct ib_mad_private *mad; 50 - DECLARE_PCI_UNMAP_ADDR(mapping) 51 - }; 52 - 53 - struct ib_agent_port_private { 54 - struct list_head port_list; 55 - struct list_head send_posted_list; 56 - spinlock_t send_list_lock; 57 - int port_num; 58 - struct ib_mad_agent *smp_agent; /* SM class */ 59 - struct ib_mad_agent *perf_mgmt_agent; /* PerfMgmt class */ 60 - }; 61 - 62 - #endif /* __IB_AGENT_PRIV_H__ */
+45 -92
drivers/infiniband/core/cm.c
··· 176 176 177 177 m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 178 178 cm_id_priv->av.pkey_index, 179 - ah, 0, sizeof(struct ib_mad_hdr), 180 - sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr), 179 + 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 181 180 GFP_ATOMIC); 182 181 if (IS_ERR(m)) { 183 182 ib_destroy_ah(ah); ··· 184 185 } 185 186 186 187 /* Timeout set by caller if response is expected. */ 187 - m->send_wr.wr.ud.retries = cm_id_priv->max_cm_retries; 188 + m->ah = ah; 189 + m->retries = cm_id_priv->max_cm_retries; 188 190 189 191 atomic_inc(&cm_id_priv->refcount); 190 192 m->context[0] = cm_id_priv; ··· 206 206 return PTR_ERR(ah); 207 207 208 208 m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index, 209 - ah, 0, sizeof(struct ib_mad_hdr), 210 - sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr), 209 + 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, 211 210 GFP_ATOMIC); 212 211 if (IS_ERR(m)) { 213 212 ib_destroy_ah(ah); 214 213 return PTR_ERR(m); 215 214 } 215 + m->ah = ah; 216 216 *msg = m; 217 217 return 0; 218 218 } 219 219 220 220 static void cm_free_msg(struct ib_mad_send_buf *msg) 221 221 { 222 - ib_destroy_ah(msg->send_wr.wr.ud.ah); 222 + ib_destroy_ah(msg->ah); 223 223 if (msg->context[0]) 224 224 cm_deref_id(msg->context[0]); 225 225 ib_free_send_mad(msg); ··· 678 678 break; 679 679 case IB_CM_SIDR_REQ_SENT: 680 680 cm_id->state = IB_CM_IDLE; 681 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, 682 - (unsigned long) cm_id_priv->msg); 681 + ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 683 682 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 684 683 break; 685 684 case IB_CM_SIDR_REQ_RCVD: ··· 689 690 case IB_CM_MRA_REQ_RCVD: 690 691 case IB_CM_REP_SENT: 691 692 case IB_CM_MRA_REP_RCVD: 692 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, 693 - (unsigned long) cm_id_priv->msg); 693 + ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 694 694 /* Fall through */ 695 695 case IB_CM_REQ_RCVD: 696 696 case IB_CM_MRA_REQ_SENT: ··· 706 708 ib_send_cm_dreq(cm_id, NULL, 0); 707 709 goto retest; 708 710 case IB_CM_DREQ_SENT: 709 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, 710 - (unsigned long) cm_id_priv->msg); 711 + ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 711 712 cm_enter_timewait(cm_id_priv); 712 713 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 713 714 break; ··· 880 883 struct ib_cm_req_param *param) 881 884 { 882 885 struct cm_id_private *cm_id_priv; 883 - struct ib_send_wr *bad_send_wr; 884 886 struct cm_req_msg *req_msg; 885 887 unsigned long flags; 886 888 int ret; ··· 932 936 req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad; 933 937 cm_format_req(req_msg, cm_id_priv, param); 934 938 cm_id_priv->tid = req_msg->hdr.tid; 935 - cm_id_priv->msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms; 939 + cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms; 936 940 cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT; 937 941 938 942 cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg); ··· 941 945 cm_req_get_primary_local_ack_timeout(req_msg); 942 946 943 947 spin_lock_irqsave(&cm_id_priv->lock, flags); 944 - ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, 945 - &cm_id_priv->msg->send_wr, &bad_send_wr); 948 + ret = ib_post_send_mad(cm_id_priv->msg, NULL); 946 949 if (ret) { 947 950 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 948 951 goto error2; ··· 964 969 void *ari, u8 ari_length) 965 970 { 966 971 struct ib_mad_send_buf *msg = NULL; 967 - struct ib_send_wr *bad_send_wr; 968 972 struct cm_rej_msg *rej_msg, *rcv_msg; 969 973 int ret; 970 974 ··· 986 992 memcpy(rej_msg->ari, ari, ari_length); 987 993 } 988 994 989 - ret = ib_post_send_mad(port->mad_agent, &msg->send_wr, &bad_send_wr); 995 + ret = ib_post_send_mad(msg, NULL); 990 996 if (ret) 991 997 cm_free_msg(msg); 992 998 ··· 1166 1172 struct cm_id_private *cm_id_priv) 1167 1173 { 1168 1174 struct ib_mad_send_buf *msg = NULL; 1169 - struct ib_send_wr *bad_send_wr; 1170 1175 unsigned long flags; 1171 1176 int ret; 1172 1177 ··· 1194 1201 } 1195 1202 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1196 1203 1197 - ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr, 1198 - &bad_send_wr); 1204 + ret = ib_post_send_mad(msg, NULL); 1199 1205 if (ret) 1200 1206 goto free; 1201 1207 return; ··· 1359 1367 struct cm_id_private *cm_id_priv; 1360 1368 struct ib_mad_send_buf *msg; 1361 1369 struct cm_rep_msg *rep_msg; 1362 - struct ib_send_wr *bad_send_wr; 1363 1370 unsigned long flags; 1364 1371 int ret; 1365 1372 ··· 1380 1389 1381 1390 rep_msg = (struct cm_rep_msg *) msg->mad; 1382 1391 cm_format_rep(rep_msg, cm_id_priv, param); 1383 - msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms; 1392 + msg->timeout_ms = cm_id_priv->timeout_ms; 1384 1393 msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT; 1385 1394 1386 - ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, 1387 - &msg->send_wr, &bad_send_wr); 1395 + ret = ib_post_send_mad(msg, NULL); 1388 1396 if (ret) { 1389 1397 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1390 1398 cm_free_msg(msg); ··· 1421 1431 { 1422 1432 struct cm_id_private *cm_id_priv; 1423 1433 struct ib_mad_send_buf *msg; 1424 - struct ib_send_wr *bad_send_wr; 1425 1434 unsigned long flags; 1426 1435 void *data; 1427 1436 int ret; ··· 1447 1458 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv, 1448 1459 private_data, private_data_len); 1449 1460 1450 - ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, 1451 - &msg->send_wr, &bad_send_wr); 1461 + ret = ib_post_send_mad(msg, NULL); 1452 1462 if (ret) { 1453 1463 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1454 1464 cm_free_msg(msg); ··· 1492 1504 struct cm_id_private *cm_id_priv; 1493 1505 struct cm_rep_msg *rep_msg; 1494 1506 struct ib_mad_send_buf *msg = NULL; 1495 - struct ib_send_wr *bad_send_wr; 1496 1507 unsigned long flags; 1497 1508 int ret; 1498 1509 ··· 1519 1532 goto unlock; 1520 1533 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1521 1534 1522 - ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr, 1523 - &bad_send_wr); 1535 + ret = ib_post_send_mad(msg, NULL); 1524 1536 if (ret) 1525 1537 goto free; 1526 1538 goto deref; ··· 1587 1601 1588 1602 /* todo: handle peer_to_peer */ 1589 1603 1590 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, 1591 - (unsigned long) cm_id_priv->msg); 1604 + ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1592 1605 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1593 1606 if (!ret) 1594 1607 list_add_tail(&work->list, &cm_id_priv->work_list); ··· 1621 1636 goto out; 1622 1637 } 1623 1638 1624 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, 1625 - (unsigned long) cm_id_priv->msg); 1639 + ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1626 1640 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1627 1641 if (!ret) 1628 1642 list_add_tail(&work->list, &cm_id_priv->work_list); ··· 1660 1676 } 1661 1677 cm_id_priv->id.state = IB_CM_ESTABLISHED; 1662 1678 1663 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, 1664 - (unsigned long) cm_id_priv->msg); 1679 + ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1665 1680 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1666 1681 if (!ret) 1667 1682 list_add_tail(&work->list, &cm_id_priv->work_list); ··· 1697 1714 { 1698 1715 struct cm_id_private *cm_id_priv; 1699 1716 struct ib_mad_send_buf *msg; 1700 - struct ib_send_wr *bad_send_wr; 1701 1717 unsigned long flags; 1702 1718 int ret; 1703 1719 ··· 1718 1736 1719 1737 cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv, 1720 1738 private_data, private_data_len); 1721 - msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms; 1739 + msg->timeout_ms = cm_id_priv->timeout_ms; 1722 1740 msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT; 1723 1741 1724 - ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, 1725 - &msg->send_wr, &bad_send_wr); 1742 + ret = ib_post_send_mad(msg, NULL); 1726 1743 if (ret) { 1727 1744 cm_enter_timewait(cm_id_priv); 1728 1745 spin_unlock_irqrestore(&cm_id_priv->lock, flags); ··· 1755 1774 { 1756 1775 struct cm_id_private *cm_id_priv; 1757 1776 struct ib_mad_send_buf *msg; 1758 - struct ib_send_wr *bad_send_wr; 1759 1777 unsigned long flags; 1760 1778 void *data; 1761 1779 int ret; ··· 1784 1804 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv, 1785 1805 private_data, private_data_len); 1786 1806 1787 - ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr, 1788 - &bad_send_wr); 1807 + ret = ib_post_send_mad(msg, NULL); 1789 1808 if (ret) { 1790 1809 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1791 1810 cm_free_msg(msg); ··· 1801 1822 struct cm_id_private *cm_id_priv; 1802 1823 struct cm_dreq_msg *dreq_msg; 1803 1824 struct ib_mad_send_buf *msg = NULL; 1804 - struct ib_send_wr *bad_send_wr; 1805 1825 unsigned long flags; 1806 1826 int ret; 1807 1827 ··· 1819 1841 switch (cm_id_priv->id.state) { 1820 1842 case IB_CM_REP_SENT: 1821 1843 case IB_CM_DREQ_SENT: 1822 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, 1823 - (unsigned long) cm_id_priv->msg); 1844 + ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1824 1845 break; 1825 1846 case IB_CM_ESTABLISHED: 1826 1847 case IB_CM_MRA_REP_RCVD: ··· 1833 1856 cm_id_priv->private_data_len); 1834 1857 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 1835 1858 1836 - if (ib_post_send_mad(cm_id_priv->av.port->mad_agent, 1837 - &msg->send_wr, &bad_send_wr)) 1859 + if (ib_post_send_mad(msg, NULL)) 1838 1860 cm_free_msg(msg); 1839 1861 goto deref; 1840 1862 default: ··· 1880 1904 } 1881 1905 cm_enter_timewait(cm_id_priv); 1882 1906 1883 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, 1884 - (unsigned long) cm_id_priv->msg); 1907 + ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 1885 1908 ret = atomic_inc_and_test(&cm_id_priv->work_count); 1886 1909 if (!ret) 1887 1910 list_add_tail(&work->list, &cm_id_priv->work_list); ··· 1905 1930 { 1906 1931 struct cm_id_private *cm_id_priv; 1907 1932 struct ib_mad_send_buf *msg; 1908 - struct ib_send_wr *bad_send_wr; 1909 1933 unsigned long flags; 1910 1934 int ret; 1911 1935 ··· 1948 1974 if (ret) 1949 1975 goto out; 1950 1976 1951 - ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, 1952 - &msg->send_wr, &bad_send_wr); 1977 + ret = ib_post_send_mad(msg, NULL); 1953 1978 if (ret) 1954 1979 cm_free_msg(msg); 1955 1980 ··· 2024 2051 case IB_CM_MRA_REQ_RCVD: 2025 2052 case IB_CM_REP_SENT: 2026 2053 case IB_CM_MRA_REP_RCVD: 2027 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, 2028 - (unsigned long) cm_id_priv->msg); 2054 + ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2029 2055 /* fall through */ 2030 2056 case IB_CM_REQ_RCVD: 2031 2057 case IB_CM_MRA_REQ_SENT: ··· 2034 2062 cm_reset_to_idle(cm_id_priv); 2035 2063 break; 2036 2064 case IB_CM_DREQ_SENT: 2037 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, 2038 - (unsigned long) cm_id_priv->msg); 2065 + ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2039 2066 /* fall through */ 2040 2067 case IB_CM_REP_RCVD: 2041 2068 case IB_CM_MRA_REP_SENT: ··· 2069 2098 { 2070 2099 struct cm_id_private *cm_id_priv; 2071 2100 struct ib_mad_send_buf *msg; 2072 - struct ib_send_wr *bad_send_wr; 2073 2101 void *data; 2074 2102 unsigned long flags; 2075 2103 int ret; ··· 2092 2122 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2093 2123 CM_MSG_RESPONSE_REQ, service_timeout, 2094 2124 private_data, private_data_len); 2095 - ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, 2096 - &msg->send_wr, &bad_send_wr); 2125 + ret = ib_post_send_mad(msg, NULL); 2097 2126 if (ret) 2098 2127 goto error2; 2099 2128 cm_id->state = IB_CM_MRA_REQ_SENT; ··· 2105 2136 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2106 2137 CM_MSG_RESPONSE_REP, service_timeout, 2107 2138 private_data, private_data_len); 2108 - ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, 2109 - &msg->send_wr, &bad_send_wr); 2139 + ret = ib_post_send_mad(msg, NULL); 2110 2140 if (ret) 2111 2141 goto error2; 2112 2142 cm_id->state = IB_CM_MRA_REP_SENT; ··· 2118 2150 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv, 2119 2151 CM_MSG_RESPONSE_OTHER, service_timeout, 2120 2152 private_data, private_data_len); 2121 - ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, 2122 - &msg->send_wr, &bad_send_wr); 2153 + ret = ib_post_send_mad(msg, NULL); 2123 2154 if (ret) 2124 2155 goto error2; 2125 2156 cm_id->lap_state = IB_CM_MRA_LAP_SENT; ··· 2180 2213 case IB_CM_REQ_SENT: 2181 2214 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ || 2182 2215 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2183 - (unsigned long) cm_id_priv->msg, timeout)) 2216 + cm_id_priv->msg, timeout)) 2184 2217 goto out; 2185 2218 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD; 2186 2219 break; 2187 2220 case IB_CM_REP_SENT: 2188 2221 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP || 2189 2222 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2190 - (unsigned long) cm_id_priv->msg, timeout)) 2223 + cm_id_priv->msg, timeout)) 2191 2224 goto out; 2192 2225 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD; 2193 2226 break; ··· 2195 2228 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER || 2196 2229 cm_id_priv->id.lap_state != IB_CM_LAP_SENT || 2197 2230 ib_modify_mad(cm_id_priv->av.port->mad_agent, 2198 - (unsigned long) cm_id_priv->msg, timeout)) 2231 + cm_id_priv->msg, timeout)) 2199 2232 goto out; 2200 2233 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD; 2201 2234 break; ··· 2258 2291 { 2259 2292 struct cm_id_private *cm_id_priv; 2260 2293 struct ib_mad_send_buf *msg; 2261 - struct ib_send_wr *bad_send_wr; 2262 2294 unsigned long flags; 2263 2295 int ret; 2264 2296 ··· 2278 2312 2279 2313 cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv, 2280 2314 alternate_path, private_data, private_data_len); 2281 - msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms; 2315 + msg->timeout_ms = cm_id_priv->timeout_ms; 2282 2316 msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED; 2283 2317 2284 - ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, 2285 - &msg->send_wr, &bad_send_wr); 2318 + ret = ib_post_send_mad(msg, NULL); 2286 2319 if (ret) { 2287 2320 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2288 2321 cm_free_msg(msg); ··· 2325 2360 struct cm_lap_msg *lap_msg; 2326 2361 struct ib_cm_lap_event_param *param; 2327 2362 struct ib_mad_send_buf *msg = NULL; 2328 - struct ib_send_wr *bad_send_wr; 2329 2363 unsigned long flags; 2330 2364 int ret; 2331 2365 ··· 2358 2394 cm_id_priv->private_data_len); 2359 2395 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2360 2396 2361 - if (ib_post_send_mad(cm_id_priv->av.port->mad_agent, 2362 - &msg->send_wr, &bad_send_wr)) 2397 + if (ib_post_send_mad(msg, NULL)) 2363 2398 cm_free_msg(msg); 2364 2399 goto deref; 2365 2400 default: ··· 2414 2451 { 2415 2452 struct cm_id_private *cm_id_priv; 2416 2453 struct ib_mad_send_buf *msg; 2417 - struct ib_send_wr *bad_send_wr; 2418 2454 unsigned long flags; 2419 2455 int ret; 2420 2456 ··· 2436 2474 2437 2475 cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status, 2438 2476 info, info_length, private_data, private_data_len); 2439 - ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, 2440 - &msg->send_wr, &bad_send_wr); 2477 + ret = ib_post_send_mad(msg, NULL); 2441 2478 if (ret) { 2442 2479 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2443 2480 cm_free_msg(msg); ··· 2475 2514 goto out; 2476 2515 } 2477 2516 cm_id_priv->id.lap_state = IB_CM_LAP_IDLE; 2478 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, 2479 - (unsigned long) cm_id_priv->msg); 2517 + ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2480 2518 cm_id_priv->msg = NULL; 2481 2519 2482 2520 ret = atomic_inc_and_test(&cm_id_priv->work_count); ··· 2550 2590 { 2551 2591 struct cm_id_private *cm_id_priv; 2552 2592 struct ib_mad_send_buf *msg; 2553 - struct ib_send_wr *bad_send_wr; 2554 2593 unsigned long flags; 2555 2594 int ret; 2556 2595 ··· 2572 2613 2573 2614 cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv, 2574 2615 param); 2575 - msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms; 2616 + msg->timeout_ms = cm_id_priv->timeout_ms; 2576 2617 msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT; 2577 2618 2578 2619 spin_lock_irqsave(&cm_id_priv->lock, flags); 2579 2620 if (cm_id->state == IB_CM_IDLE) 2580 - ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, 2581 - &msg->send_wr, &bad_send_wr); 2621 + ret = ib_post_send_mad(msg, NULL); 2582 2622 else 2583 2623 ret = -EINVAL; 2584 2624 ··· 2691 2733 { 2692 2734 struct cm_id_private *cm_id_priv; 2693 2735 struct ib_mad_send_buf *msg; 2694 - struct ib_send_wr *bad_send_wr; 2695 2736 unsigned long flags; 2696 2737 int ret; 2697 2738 ··· 2712 2755 2713 2756 cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv, 2714 2757 param); 2715 - ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, 2716 - &msg->send_wr, &bad_send_wr); 2758 + ret = ib_post_send_mad(msg, NULL); 2717 2759 if (ret) { 2718 2760 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2719 2761 cm_free_msg(msg); ··· 2765 2809 goto out; 2766 2810 } 2767 2811 cm_id_priv->id.state = IB_CM_IDLE; 2768 - ib_cancel_mad(cm_id_priv->av.port->mad_agent, 2769 - (unsigned long) cm_id_priv->msg); 2812 + ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2770 2813 spin_unlock_irqrestore(&cm_id_priv->lock, flags); 2771 2814 2772 2815 cm_format_sidr_rep_event(work); ··· 2833 2878 static void cm_send_handler(struct ib_mad_agent *mad_agent, 2834 2879 struct ib_mad_send_wc *mad_send_wc) 2835 2880 { 2836 - struct ib_mad_send_buf *msg; 2837 - 2838 - msg = (struct ib_mad_send_buf *)(unsigned long)mad_send_wc->wr_id; 2881 + struct ib_mad_send_buf *msg = mad_send_wc->send_buf; 2839 2882 2840 2883 switch (mad_send_wc->status) { 2841 2884 case IB_WC_SUCCESS:
+122 -160
drivers/infiniband/core/mad.c
··· 579 579 } 580 580 581 581 static void snoop_send(struct ib_mad_qp_info *qp_info, 582 - struct ib_send_wr *send_wr, 582 + struct ib_mad_send_buf *send_buf, 583 583 struct ib_mad_send_wc *mad_send_wc, 584 584 int mad_snoop_flags) 585 585 { ··· 597 597 atomic_inc(&mad_snoop_priv->refcount); 598 598 spin_unlock_irqrestore(&qp_info->snoop_lock, flags); 599 599 mad_snoop_priv->agent.snoop_handler(&mad_snoop_priv->agent, 600 - send_wr, mad_send_wc); 600 + send_buf, mad_send_wc); 601 601 if (atomic_dec_and_test(&mad_snoop_priv->refcount)) 602 602 wake_up(&mad_snoop_priv->wait); 603 603 spin_lock_irqsave(&qp_info->snoop_lock, flags); ··· 654 654 * Return < 0 if error 655 655 */ 656 656 static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, 657 - struct ib_smp *smp, 658 - struct ib_send_wr *send_wr) 657 + struct ib_mad_send_wr_private *mad_send_wr) 659 658 { 660 659 int ret; 660 + struct ib_smp *smp = mad_send_wr->send_buf.mad; 661 661 unsigned long flags; 662 662 struct ib_mad_local_private *local; 663 663 struct ib_mad_private *mad_priv; ··· 666 666 struct ib_device *device = mad_agent_priv->agent.device; 667 667 u8 port_num = mad_agent_priv->agent.port_num; 668 668 struct ib_wc mad_wc; 669 + struct ib_send_wr *send_wr = &mad_send_wr->send_wr; 669 670 670 671 if (!smi_handle_dr_smp_send(smp, device->node_type, port_num)) { 671 672 ret = -EINVAL; ··· 746 745 goto out; 747 746 } 748 747 749 - local->send_wr = *send_wr; 750 - local->send_wr.sg_list = local->sg_list; 751 - memcpy(local->sg_list, send_wr->sg_list, 752 - sizeof *send_wr->sg_list * send_wr->num_sge); 753 - local->send_wr.next = NULL; 754 - local->tid = send_wr->wr.ud.mad_hdr->tid; 755 - local->wr_id = send_wr->wr_id; 748 + local->mad_send_wr = mad_send_wr; 756 749 /* Reference MAD agent until send side of local completion handled */ 757 750 atomic_inc(&mad_agent_priv->refcount); 758 751 /* Queue local completion to local list */ ··· 776 781 777 782 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, 778 783 u32 remote_qpn, u16 pkey_index, 779 - struct ib_ah *ah, int rmpp_active, 784 + int rmpp_active, 780 785 int hdr_len, int data_len, 781 786 gfp_t gfp_mask) 782 787 { 783 788 struct ib_mad_agent_private *mad_agent_priv; 784 - struct ib_mad_send_buf *send_buf; 789 + struct ib_mad_send_wr_private *mad_send_wr; 785 790 int buf_size; 786 791 void *buf; 787 792 788 - mad_agent_priv = container_of(mad_agent, 789 - struct ib_mad_agent_private, agent); 793 + mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 794 + agent); 790 795 buf_size = get_buf_length(hdr_len, data_len); 791 796 792 797 if ((!mad_agent->rmpp_version && ··· 794 799 (!rmpp_active && buf_size > sizeof(struct ib_mad))) 795 800 return ERR_PTR(-EINVAL); 796 801 797 - buf = kmalloc(sizeof *send_buf + buf_size, gfp_mask); 802 + buf = kmalloc(sizeof *mad_send_wr + buf_size, gfp_mask); 798 803 if (!buf) 799 804 return ERR_PTR(-ENOMEM); 800 - memset(buf, 0, sizeof *send_buf + buf_size); 805 + memset(buf, 0, sizeof *mad_send_wr + buf_size); 801 806 802 - send_buf = buf + buf_size; 803 - send_buf->mad = buf; 807 + mad_send_wr = buf + buf_size; 808 + mad_send_wr->send_buf.mad = buf; 804 809 805 - send_buf->sge.addr = dma_map_single(mad_agent->device->dma_device, 806 - buf, buf_size, DMA_TO_DEVICE); 807 - pci_unmap_addr_set(send_buf, mapping, send_buf->sge.addr); 808 - send_buf->sge.length = buf_size; 809 - send_buf->sge.lkey = mad_agent->mr->lkey; 810 + mad_send_wr->mad_agent_priv = mad_agent_priv; 811 + mad_send_wr->sg_list[0].length = buf_size; 812 + mad_send_wr->sg_list[0].lkey = mad_agent->mr->lkey; 810 813 811 - send_buf->send_wr.wr_id = (unsigned long) send_buf; 812 - send_buf->send_wr.sg_list = &send_buf->sge; 813 - send_buf->send_wr.num_sge = 1; 814 - send_buf->send_wr.opcode = IB_WR_SEND; 815 - send_buf->send_wr.send_flags = IB_SEND_SIGNALED; 816 - send_buf->send_wr.wr.ud.ah = ah; 817 - send_buf->send_wr.wr.ud.mad_hdr = &send_buf->mad->mad_hdr; 818 - send_buf->send_wr.wr.ud.remote_qpn = remote_qpn; 819 - send_buf->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; 820 - send_buf->send_wr.wr.ud.pkey_index = pkey_index; 814 + mad_send_wr->send_wr.wr_id = (unsigned long) mad_send_wr; 815 + mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; 816 + mad_send_wr->send_wr.num_sge = 1; 817 + mad_send_wr->send_wr.opcode = IB_WR_SEND; 818 + mad_send_wr->send_wr.send_flags = IB_SEND_SIGNALED; 819 + mad_send_wr->send_wr.wr.ud.remote_qpn = remote_qpn; 820 + mad_send_wr->send_wr.wr.ud.remote_qkey = IB_QP_SET_QKEY; 821 + mad_send_wr->send_wr.wr.ud.pkey_index = pkey_index; 821 822 822 823 if (rmpp_active) { 823 - struct ib_rmpp_mad *rmpp_mad; 824 - rmpp_mad = (struct ib_rmpp_mad *)send_buf->mad; 824 + struct ib_rmpp_mad *rmpp_mad = mad_send_wr->send_buf.mad; 825 825 rmpp_mad->rmpp_hdr.paylen_newwin = cpu_to_be32(hdr_len - 826 - offsetof(struct ib_rmpp_mad, data) + data_len); 826 + IB_MGMT_RMPP_HDR + data_len); 827 827 rmpp_mad->rmpp_hdr.rmpp_version = mad_agent->rmpp_version; 828 828 rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA; 829 829 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, 830 830 IB_MGMT_RMPP_FLAG_ACTIVE); 831 831 } 832 832 833 - send_buf->mad_agent = mad_agent; 833 + mad_send_wr->send_buf.mad_agent = mad_agent; 834 834 atomic_inc(&mad_agent_priv->refcount); 835 - return send_buf; 835 + return &mad_send_wr->send_buf; 836 836 } 837 837 EXPORT_SYMBOL(ib_create_send_mad); 838 838 ··· 837 847 838 848 mad_agent_priv = container_of(send_buf->mad_agent, 839 849 struct ib_mad_agent_private, agent); 840 - 841 - dma_unmap_single(send_buf->mad_agent->device->dma_device, 842 - pci_unmap_addr(send_buf, mapping), 843 - send_buf->sge.length, DMA_TO_DEVICE); 844 850 kfree(send_buf->mad); 845 851 846 852 if (atomic_dec_and_test(&mad_agent_priv->refcount)) ··· 847 861 int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr) 848 862 { 849 863 struct ib_mad_qp_info *qp_info; 850 - struct ib_send_wr *bad_send_wr; 851 864 struct list_head *list; 865 + struct ib_send_wr *bad_send_wr; 866 + struct ib_mad_agent *mad_agent; 867 + struct ib_sge *sge; 852 868 unsigned long flags; 853 869 int ret; 854 870 ··· 859 871 mad_send_wr->send_wr.wr_id = (unsigned long)&mad_send_wr->mad_list; 860 872 mad_send_wr->mad_list.mad_queue = &qp_info->send_queue; 861 873 874 + mad_agent = mad_send_wr->send_buf.mad_agent; 875 + sge = mad_send_wr->sg_list; 876 + sge->addr = dma_map_single(mad_agent->device->dma_device, 877 + mad_send_wr->send_buf.mad, sge->length, 878 + DMA_TO_DEVICE); 879 + pci_unmap_addr_set(mad_send_wr, mapping, sge->addr); 880 + 862 881 spin_lock_irqsave(&qp_info->send_queue.lock, flags); 863 882 if (qp_info->send_queue.count < qp_info->send_queue.max_active) { 864 - ret = ib_post_send(mad_send_wr->mad_agent_priv->agent.qp, 865 - &mad_send_wr->send_wr, &bad_send_wr); 883 + ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr, 884 + &bad_send_wr); 866 885 list = &qp_info->send_queue.list; 867 886 } else { 868 887 ret = 0; ··· 881 886 list_add_tail(&mad_send_wr->mad_list.list, list); 882 887 } 883 888 spin_unlock_irqrestore(&qp_info->send_queue.lock, flags); 889 + if (ret) 890 + dma_unmap_single(mad_agent->device->dma_device, 891 + pci_unmap_addr(mad_send_wr, mapping), 892 + sge->length, DMA_TO_DEVICE); 893 + 884 894 return ret; 885 895 } 886 896 ··· 893 893 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated 894 894 * with the registered client 895 895 */ 896 - int ib_post_send_mad(struct ib_mad_agent *mad_agent, 897 - struct ib_send_wr *send_wr, 898 - struct ib_send_wr **bad_send_wr) 896 + int ib_post_send_mad(struct ib_mad_send_buf *send_buf, 897 + struct ib_mad_send_buf **bad_send_buf) 899 898 { 900 - int ret = -EINVAL; 901 899 struct ib_mad_agent_private *mad_agent_priv; 902 - 903 - /* Validate supplied parameters */ 904 - if (!bad_send_wr) 905 - goto error1; 906 - 907 - if (!mad_agent || !send_wr) 908 - goto error2; 909 - 910 - if (!mad_agent->send_handler) 911 - goto error2; 912 - 913 - mad_agent_priv = container_of(mad_agent, 914 - struct ib_mad_agent_private, 915 - agent); 900 + struct ib_mad_send_buf *next_send_buf; 901 + struct ib_mad_send_wr_private *mad_send_wr; 902 + unsigned long flags; 903 + int ret = -EINVAL; 916 904 917 905 /* Walk list of send WRs and post each on send list */ 918 - while (send_wr) { 919 - unsigned long flags; 920 - struct ib_send_wr *next_send_wr; 921 - struct ib_mad_send_wr_private *mad_send_wr; 922 - struct ib_smp *smp; 906 + for (; send_buf; send_buf = next_send_buf) { 923 907 924 - /* Validate more parameters */ 925 - if (send_wr->num_sge > IB_MAD_SEND_REQ_MAX_SG) 926 - goto error2; 908 + mad_send_wr = container_of(send_buf, 909 + struct ib_mad_send_wr_private, 910 + send_buf); 911 + mad_agent_priv = mad_send_wr->mad_agent_priv; 927 912 928 - if (send_wr->wr.ud.timeout_ms && !mad_agent->recv_handler) 929 - goto error2; 930 - 931 - if (!send_wr->wr.ud.mad_hdr) { 932 - printk(KERN_ERR PFX "MAD header must be supplied " 933 - "in WR %p\n", send_wr); 934 - goto error2; 913 + if (!send_buf->mad_agent->send_handler || 914 + (send_buf->timeout_ms && 915 + !send_buf->mad_agent->recv_handler)) { 916 + ret = -EINVAL; 917 + goto error; 935 918 } 936 919 937 920 /* ··· 922 939 * current one completes, and the user modifies the work 923 940 * request associated with the completion 924 941 */ 925 - next_send_wr = (struct ib_send_wr *)send_wr->next; 942 + next_send_buf = send_buf->next; 943 + mad_send_wr->send_wr.wr.ud.ah = send_buf->ah; 926 944 927 - smp = (struct ib_smp *)send_wr->wr.ud.mad_hdr; 928 - if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 929 - ret = handle_outgoing_dr_smp(mad_agent_priv, smp, 930 - send_wr); 945 + if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class == 946 + IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { 947 + ret = handle_outgoing_dr_smp(mad_agent_priv, 948 + mad_send_wr); 931 949 if (ret < 0) /* error */ 932 - goto error2; 950 + goto error; 933 951 else if (ret == 1) /* locally consumed */ 934 - goto next; 952 + continue; 935 953 } 936 954 937 - /* Allocate MAD send WR tracking structure */ 938 - mad_send_wr = kmalloc(sizeof *mad_send_wr, GFP_ATOMIC); 939 - if (!mad_send_wr) { 940 - printk(KERN_ERR PFX "No memory for " 941 - "ib_mad_send_wr_private\n"); 942 - ret = -ENOMEM; 943 - goto error2; 944 - } 945 - memset(mad_send_wr, 0, sizeof *mad_send_wr); 946 - 947 - mad_send_wr->send_wr = *send_wr; 948 - mad_send_wr->send_wr.sg_list = mad_send_wr->sg_list; 949 - memcpy(mad_send_wr->sg_list, send_wr->sg_list, 950 - sizeof *send_wr->sg_list * send_wr->num_sge); 951 - mad_send_wr->wr_id = send_wr->wr_id; 952 - mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid; 953 - mad_send_wr->mad_agent_priv = mad_agent_priv; 955 + mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid; 954 956 /* Timeout will be updated after send completes */ 955 - mad_send_wr->timeout = msecs_to_jiffies(send_wr->wr. 956 - ud.timeout_ms); 957 - mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries; 958 - /* One reference for each work request to QP + response */ 957 + mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms); 958 + mad_send_wr->retries = send_buf->retries; 959 + /* Reference for work request to QP + response */ 959 960 mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0); 960 961 mad_send_wr->status = IB_WC_SUCCESS; 961 962 ··· 962 995 list_del(&mad_send_wr->agent_list); 963 996 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 964 997 atomic_dec(&mad_agent_priv->refcount); 965 - goto error2; 998 + goto error; 966 999 } 967 - next: 968 - send_wr = next_send_wr; 969 1000 } 970 1001 return 0; 971 - 972 - error2: 973 - *bad_send_wr = send_wr; 974 - error1: 1002 + error: 1003 + if (bad_send_buf) 1004 + *bad_send_buf = send_buf; 975 1005 return ret; 976 1006 } 977 1007 EXPORT_SYMBOL(ib_post_send_mad); ··· 1411 1447 * of MAD. 1412 1448 */ 1413 1449 hi_tid = be64_to_cpu(mad->mad_hdr.tid) >> 32; 1414 - list_for_each_entry(entry, &port_priv->agent_list, 1415 - agent_list) { 1450 + list_for_each_entry(entry, &port_priv->agent_list, agent_list) { 1416 1451 if (entry->agent.hi_tid == hi_tid) { 1417 1452 mad_agent = entry; 1418 1453 break; ··· 1534 1571 */ 1535 1572 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 1536 1573 agent_list) { 1537 - if (is_data_mad(mad_agent_priv, 1538 - mad_send_wr->send_wr.wr.ud.mad_hdr) && 1574 + if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) && 1539 1575 mad_send_wr->tid == tid && mad_send_wr->timeout) { 1540 1576 /* Verify request has not been canceled */ 1541 1577 return (mad_send_wr->status == IB_WC_SUCCESS) ? ··· 1590 1628 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1591 1629 1592 1630 /* Defined behavior is to complete response before request */ 1593 - mad_recv_wc->wc->wr_id = mad_send_wr->wr_id; 1631 + mad_recv_wc->wc->wr_id = (unsigned long) &mad_send_wr->send_buf; 1594 1632 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, 1595 1633 mad_recv_wc); 1596 1634 atomic_dec(&mad_agent_priv->refcount); 1597 1635 1598 1636 mad_send_wc.status = IB_WC_SUCCESS; 1599 1637 mad_send_wc.vendor_err = 0; 1600 - mad_send_wc.wr_id = mad_send_wr->wr_id; 1638 + mad_send_wc.send_buf = &mad_send_wr->send_buf; 1601 1639 ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 1602 1640 } else { 1603 1641 mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, ··· 1690 1728 if (ret & IB_MAD_RESULT_CONSUMED) 1691 1729 goto out; 1692 1730 if (ret & IB_MAD_RESULT_REPLY) { 1693 - /* Send response */ 1694 - if (!agent_send(response, &recv->grh, wc, 1695 - port_priv->device, 1696 - port_priv->port_num)) 1697 - response = NULL; 1731 + agent_send_response(&response->mad.mad, 1732 + &recv->grh, wc, 1733 + port_priv->device, 1734 + port_priv->port_num, 1735 + qp_info->qp->qp_num); 1698 1736 goto out; 1699 1737 } 1700 1738 } ··· 1828 1866 1829 1867 if (mad_send_wr->status != IB_WC_SUCCESS ) 1830 1868 mad_send_wc->status = mad_send_wr->status; 1831 - if (ret != IB_RMPP_RESULT_INTERNAL) 1869 + if (ret == IB_RMPP_RESULT_INTERNAL) 1870 + ib_rmpp_send_handler(mad_send_wc); 1871 + else 1832 1872 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 1833 1873 mad_send_wc); 1834 1874 1835 1875 /* Release reference on agent taken when sending */ 1836 1876 if (atomic_dec_and_test(&mad_agent_priv->refcount)) 1837 1877 wake_up(&mad_agent_priv->wait); 1838 - 1839 - kfree(mad_send_wr); 1840 1878 return; 1841 1879 done: 1842 1880 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); ··· 1850 1888 struct ib_mad_qp_info *qp_info; 1851 1889 struct ib_mad_queue *send_queue; 1852 1890 struct ib_send_wr *bad_send_wr; 1891 + struct ib_mad_send_wc mad_send_wc; 1853 1892 unsigned long flags; 1854 1893 int ret; 1855 1894 ··· 1861 1898 qp_info = send_queue->qp_info; 1862 1899 1863 1900 retry: 1901 + dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device, 1902 + pci_unmap_addr(mad_send_wr, mapping), 1903 + mad_send_wr->sg_list[0].length, DMA_TO_DEVICE); 1864 1904 queued_send_wr = NULL; 1865 1905 spin_lock_irqsave(&send_queue->lock, flags); 1866 1906 list_del(&mad_list->list); ··· 1880 1914 } 1881 1915 spin_unlock_irqrestore(&send_queue->lock, flags); 1882 1916 1883 - /* Restore client wr_id in WC and complete send */ 1884 - wc->wr_id = mad_send_wr->wr_id; 1917 + mad_send_wc.send_buf = &mad_send_wr->send_buf; 1918 + mad_send_wc.status = wc->status; 1919 + mad_send_wc.vendor_err = wc->vendor_err; 1885 1920 if (atomic_read(&qp_info->snoop_count)) 1886 - snoop_send(qp_info, &mad_send_wr->send_wr, 1887 - (struct ib_mad_send_wc *)wc, 1921 + snoop_send(qp_info, &mad_send_wr->send_buf, &mad_send_wc, 1888 1922 IB_MAD_SNOOP_SEND_COMPLETIONS); 1889 - ib_mad_complete_send_wr(mad_send_wr, (struct ib_mad_send_wc *)wc); 1923 + ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc); 1890 1924 1891 1925 if (queued_send_wr) { 1892 1926 ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr, 1893 - &bad_send_wr); 1927 + &bad_send_wr); 1894 1928 if (ret) { 1895 1929 printk(KERN_ERR PFX "ib_post_send failed: %d\n", ret); 1896 1930 mad_send_wr = queued_send_wr; ··· 2032 2066 2033 2067 list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, 2034 2068 &cancel_list, agent_list) { 2035 - mad_send_wc.wr_id = mad_send_wr->wr_id; 2069 + mad_send_wc.send_buf = &mad_send_wr->send_buf; 2070 + list_del(&mad_send_wr->agent_list); 2036 2071 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2037 2072 &mad_send_wc); 2038 - 2039 - list_del(&mad_send_wr->agent_list); 2040 - kfree(mad_send_wr); 2041 2073 atomic_dec(&mad_agent_priv->refcount); 2042 2074 } 2043 2075 } 2044 2076 2045 2077 static struct ib_mad_send_wr_private* 2046 - find_send_by_wr_id(struct ib_mad_agent_private *mad_agent_priv, u64 wr_id) 2078 + find_send_wr(struct ib_mad_agent_private *mad_agent_priv, 2079 + struct ib_mad_send_buf *send_buf) 2047 2080 { 2048 2081 struct ib_mad_send_wr_private *mad_send_wr; 2049 2082 2050 2083 list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list, 2051 2084 agent_list) { 2052 - if (mad_send_wr->wr_id == wr_id) 2085 + if (&mad_send_wr->send_buf == send_buf) 2053 2086 return mad_send_wr; 2054 2087 } 2055 2088 2056 2089 list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, 2057 2090 agent_list) { 2058 - if (is_data_mad(mad_agent_priv, 2059 - mad_send_wr->send_wr.wr.ud.mad_hdr) && 2060 - mad_send_wr->wr_id == wr_id) 2091 + if (is_data_mad(mad_agent_priv, mad_send_wr->send_buf.mad) && 2092 + &mad_send_wr->send_buf == send_buf) 2061 2093 return mad_send_wr; 2062 2094 } 2063 2095 return NULL; 2064 2096 } 2065 2097 2066 - int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms) 2098 + int ib_modify_mad(struct ib_mad_agent *mad_agent, 2099 + struct ib_mad_send_buf *send_buf, u32 timeout_ms) 2067 2100 { 2068 2101 struct ib_mad_agent_private *mad_agent_priv; 2069 2102 struct ib_mad_send_wr_private *mad_send_wr; ··· 2072 2107 mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, 2073 2108 agent); 2074 2109 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2075 - mad_send_wr = find_send_by_wr_id(mad_agent_priv, wr_id); 2110 + mad_send_wr = find_send_wr(mad_agent_priv, send_buf); 2076 2111 if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) { 2077 2112 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 2078 2113 return -EINVAL; ··· 2084 2119 mad_send_wr->refcount -= (mad_send_wr->timeout > 0); 2085 2120 } 2086 2121 2087 - mad_send_wr->send_wr.wr.ud.timeout_ms = timeout_ms; 2122 + mad_send_wr->send_buf.timeout_ms = timeout_ms; 2088 2123 if (active) 2089 2124 mad_send_wr->timeout = msecs_to_jiffies(timeout_ms); 2090 2125 else ··· 2095 2130 } 2096 2131 EXPORT_SYMBOL(ib_modify_mad); 2097 2132 2098 - void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id) 2133 + void ib_cancel_mad(struct ib_mad_agent *mad_agent, 2134 + struct ib_mad_send_buf *send_buf) 2099 2135 { 2100 - ib_modify_mad(mad_agent, wr_id, 0); 2136 + ib_modify_mad(mad_agent, send_buf, 0); 2101 2137 } 2102 2138 EXPORT_SYMBOL(ib_cancel_mad); 2103 2139 ··· 2132 2166 * Defined behavior is to complete response 2133 2167 * before request 2134 2168 */ 2135 - build_smp_wc(local->wr_id, 2169 + build_smp_wc((unsigned long) local->mad_send_wr, 2136 2170 be16_to_cpu(IB_LID_PERMISSIVE), 2137 - 0 /* pkey index */, 2138 - recv_mad_agent->agent.port_num, &wc); 2171 + 0, recv_mad_agent->agent.port_num, &wc); 2139 2172 2140 2173 local->mad_priv->header.recv_wc.wc = &wc; 2141 2174 local->mad_priv->header.recv_wc.mad_len = ··· 2161 2196 /* Complete send */ 2162 2197 mad_send_wc.status = IB_WC_SUCCESS; 2163 2198 mad_send_wc.vendor_err = 0; 2164 - mad_send_wc.wr_id = local->wr_id; 2199 + mad_send_wc.send_buf = &local->mad_send_wr->send_buf; 2165 2200 if (atomic_read(&mad_agent_priv->qp_info->snoop_count)) 2166 - snoop_send(mad_agent_priv->qp_info, &local->send_wr, 2167 - &mad_send_wc, 2168 - IB_MAD_SNOOP_SEND_COMPLETIONS); 2201 + snoop_send(mad_agent_priv->qp_info, 2202 + &local->mad_send_wr->send_buf, 2203 + &mad_send_wc, IB_MAD_SNOOP_SEND_COMPLETIONS); 2169 2204 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2170 2205 &mad_send_wc); 2171 2206 ··· 2186 2221 if (!mad_send_wr->retries--) 2187 2222 return -ETIMEDOUT; 2188 2223 2189 - mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_wr. 2190 - wr.ud.timeout_ms); 2224 + mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); 2191 2225 2192 2226 if (mad_send_wr->mad_agent_priv->agent.rmpp_version) { 2193 2227 ret = ib_retry_rmpp(mad_send_wr); ··· 2249 2285 mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR; 2250 2286 else 2251 2287 mad_send_wc.status = mad_send_wr->status; 2252 - mad_send_wc.wr_id = mad_send_wr->wr_id; 2288 + mad_send_wc.send_buf = &mad_send_wr->send_buf; 2253 2289 mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, 2254 2290 &mad_send_wc); 2255 2291 2256 - kfree(mad_send_wr); 2257 2292 atomic_dec(&mad_agent_priv->refcount); 2258 2293 spin_lock_irqsave(&mad_agent_priv->lock, flags); 2259 2294 } ··· 2724 2761 int ret; 2725 2762 2726 2763 spin_lock_init(&ib_mad_port_list_lock); 2727 - spin_lock_init(&ib_agent_port_list_lock); 2728 2764 2729 2765 ib_mad_cache = kmem_cache_create("ib_mad", 2730 2766 sizeof(struct ib_mad_private),
+3 -5
drivers/infiniband/core/mad_priv.h
··· 118 118 struct ib_mad_list_head mad_list; 119 119 struct list_head agent_list; 120 120 struct ib_mad_agent_private *mad_agent_priv; 121 + struct ib_mad_send_buf send_buf; 122 + DECLARE_PCI_UNMAP_ADDR(mapping) 121 123 struct ib_send_wr send_wr; 122 124 struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; 123 - u64 wr_id; /* client WR ID */ 124 125 __be64 tid; 125 126 unsigned long timeout; 126 127 int retries; ··· 142 141 struct list_head completion_list; 143 142 struct ib_mad_private *mad_priv; 144 143 struct ib_mad_agent_private *recv_mad_agent; 145 - struct ib_send_wr send_wr; 146 - struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG]; 147 - u64 wr_id; /* client WR ID */ 148 - __be64 tid; 144 + struct ib_mad_send_wr_private *mad_send_wr; 149 145 }; 150 146 151 147 struct ib_mad_mgmt_method_table {
+39 -48
drivers/infiniband/core/mad_rmpp.c
··· 103 103 static int data_offset(u8 mgmt_class) 104 104 { 105 105 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM) 106 - return offsetof(struct ib_sa_mad, data); 106 + return IB_MGMT_SA_HDR; 107 107 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 108 108 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) 109 - return offsetof(struct ib_vendor_mad, data); 109 + return IB_MGMT_VENDOR_HDR; 110 110 else 111 - return offsetof(struct ib_rmpp_mad, data); 111 + return IB_MGMT_RMPP_HDR; 112 112 } 113 113 114 114 static void format_ack(struct ib_rmpp_mad *ack, ··· 135 135 struct ib_mad_recv_wc *recv_wc) 136 136 { 137 137 struct ib_mad_send_buf *msg; 138 - struct ib_send_wr *bad_send_wr; 139 - int hdr_len, ret; 138 + int ret; 140 139 141 - hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); 142 140 msg = ib_create_send_mad(&rmpp_recv->agent->agent, recv_wc->wc->src_qp, 143 - recv_wc->wc->pkey_index, rmpp_recv->ah, 1, 144 - hdr_len, sizeof(struct ib_rmpp_mad) - hdr_len, 145 - GFP_KERNEL); 141 + recv_wc->wc->pkey_index, 1, IB_MGMT_RMPP_HDR, 142 + IB_MGMT_RMPP_DATA, GFP_KERNEL); 146 143 if (!msg) 147 144 return; 148 145 149 - format_ack((struct ib_rmpp_mad *) msg->mad, 150 - (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, rmpp_recv); 151 - ret = ib_post_send_mad(&rmpp_recv->agent->agent, &msg->send_wr, 152 - &bad_send_wr); 146 + format_ack(msg->mad, (struct ib_rmpp_mad *) recv_wc->recv_buf.mad, 147 + rmpp_recv); 148 + msg->ah = rmpp_recv->ah; 149 + ret = ib_post_send_mad(msg, NULL); 153 150 if (ret) 154 151 ib_free_send_mad(msg); 155 152 } ··· 157 160 { 158 161 struct ib_mad_send_buf *m; 159 162 struct ib_ah *ah; 160 - int hdr_len; 161 163 162 164 ah = ib_create_ah_from_wc(agent->qp->pd, recv_wc->wc, 163 165 recv_wc->recv_buf.grh, agent->port_num); 164 166 if (IS_ERR(ah)) 165 167 return PTR_ERR(ah); 166 168 167 - hdr_len = sizeof(struct ib_mad_hdr) + sizeof(struct ib_rmpp_hdr); 168 169 m = ib_create_send_mad(agent, recv_wc->wc->src_qp, 169 - recv_wc->wc->pkey_index, ah, 1, hdr_len, 170 - sizeof(struct ib_rmpp_mad) - hdr_len, 171 - GFP_KERNEL); 170 + recv_wc->wc->pkey_index, 1, 171 + IB_MGMT_RMPP_HDR, IB_MGMT_RMPP_DATA, GFP_KERNEL); 172 172 if (IS_ERR(m)) { 173 173 ib_destroy_ah(ah); 174 174 return PTR_ERR(m); 175 175 } 176 + m->ah = ah; 176 177 *msg = m; 177 178 return 0; 178 179 } 179 180 180 - static void free_msg(struct ib_mad_send_buf *msg) 181 + void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc) 181 182 { 182 - ib_destroy_ah(msg->send_wr.wr.ud.ah); 183 - ib_free_send_mad(msg); 183 + struct ib_rmpp_mad *rmpp_mad = mad_send_wc->send_buf->mad; 184 + 185 + if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_ACK) 186 + ib_destroy_ah(mad_send_wc->send_buf->ah); 187 + ib_free_send_mad(mad_send_wc->send_buf); 184 188 } 185 189 186 190 static void nack_recv(struct ib_mad_agent_private *agent, ··· 189 191 { 190 192 struct ib_mad_send_buf *msg; 191 193 struct ib_rmpp_mad *rmpp_mad; 192 - struct ib_send_wr *bad_send_wr; 193 194 int ret; 194 195 195 196 ret = alloc_response_msg(&agent->agent, recv_wc, &msg); 196 197 if (ret) 197 198 return; 198 199 199 - rmpp_mad = (struct ib_rmpp_mad *) msg->mad; 200 + rmpp_mad = msg->mad; 200 201 memcpy(rmpp_mad, recv_wc->recv_buf.mad, 201 202 data_offset(recv_wc->recv_buf.mad->mad_hdr.mgmt_class)); 202 203 ··· 207 210 rmpp_mad->rmpp_hdr.seg_num = 0; 208 211 rmpp_mad->rmpp_hdr.paylen_newwin = 0; 209 212 210 - ret = ib_post_send_mad(&agent->agent, &msg->send_wr, &bad_send_wr); 211 - if (ret) 212 - free_msg(msg); 213 + ret = ib_post_send_mad(msg, NULL); 214 + if (ret) { 215 + ib_destroy_ah(msg->ah); 216 + ib_free_send_mad(msg); 217 + } 213 218 } 214 219 215 220 static void recv_timeout_handler(void *data) ··· 584 585 int timeout; 585 586 u32 paylen; 586 587 587 - rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; 588 + rmpp_mad = mad_send_wr->send_buf.mad; 588 589 ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE); 589 590 rmpp_mad->rmpp_hdr.seg_num = cpu_to_be32(mad_send_wr->seg_num); 590 591 ··· 611 612 } 612 613 613 614 /* 2 seconds for an ACK until we can find the packet lifetime */ 614 - timeout = mad_send_wr->send_wr.wr.ud.timeout_ms; 615 + timeout = mad_send_wr->send_buf.timeout_ms; 615 616 if (!timeout || timeout > 2000) 616 617 mad_send_wr->timeout = msecs_to_jiffies(2000); 617 618 mad_send_wr->seg_num++; ··· 639 640 640 641 wc.status = IB_WC_REM_ABORT_ERR; 641 642 wc.vendor_err = rmpp_status; 642 - wc.wr_id = mad_send_wr->wr_id; 643 + wc.send_buf = &mad_send_wr->send_buf; 643 644 ib_mad_complete_send_wr(mad_send_wr, &wc); 644 645 return; 645 646 out: ··· 693 694 694 695 if (seg_num > mad_send_wr->last_ack) { 695 696 mad_send_wr->last_ack = seg_num; 696 - mad_send_wr->retries = mad_send_wr->send_wr.wr.ud.retries; 697 + mad_send_wr->retries = mad_send_wr->send_buf.retries; 697 698 } 698 699 mad_send_wr->newwin = newwin; 699 700 if (mad_send_wr->last_ack == mad_send_wr->total_seg) { 700 701 /* If no response is expected, the ACK completes the send */ 701 - if (!mad_send_wr->send_wr.wr.ud.timeout_ms) { 702 + if (!mad_send_wr->send_buf.timeout_ms) { 702 703 struct ib_mad_send_wc wc; 703 704 704 705 ib_mark_mad_done(mad_send_wr); ··· 706 707 707 708 wc.status = IB_WC_SUCCESS; 708 709 wc.vendor_err = 0; 709 - wc.wr_id = mad_send_wr->wr_id; 710 + wc.send_buf = &mad_send_wr->send_buf; 710 711 ib_mad_complete_send_wr(mad_send_wr, &wc); 711 712 return; 712 713 } 713 714 if (mad_send_wr->refcount == 1) 714 - ib_reset_mad_timeout(mad_send_wr, mad_send_wr-> 715 - send_wr.wr.ud.timeout_ms); 715 + ib_reset_mad_timeout(mad_send_wr, 716 + mad_send_wr->send_buf.timeout_ms); 716 717 } else if (mad_send_wr->refcount == 1 && 717 718 mad_send_wr->seg_num < mad_send_wr->newwin && 718 719 mad_send_wr->seg_num <= mad_send_wr->total_seg) { ··· 841 842 struct ib_rmpp_mad *rmpp_mad; 842 843 int i, total_len, ret; 843 844 844 - rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; 845 + rmpp_mad = mad_send_wr->send_buf.mad; 845 846 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 846 847 IB_MGMT_RMPP_FLAG_ACTIVE)) 847 848 return IB_RMPP_RESULT_UNHANDLED; ··· 862 863 863 864 mad_send_wr->total_seg = (total_len - mad_send_wr->data_offset) / 864 865 (sizeof(struct ib_rmpp_mad) - mad_send_wr->data_offset); 865 - mad_send_wr->pad = total_len - offsetof(struct ib_rmpp_mad, data) - 866 + mad_send_wr->pad = total_len - IB_MGMT_RMPP_HDR - 866 867 be32_to_cpu(rmpp_mad->rmpp_hdr.paylen_newwin); 867 868 868 869 /* We need to wait for the final ACK even if there isn't a response */ ··· 877 878 struct ib_mad_send_wc *mad_send_wc) 878 879 { 879 880 struct ib_rmpp_mad *rmpp_mad; 880 - struct ib_mad_send_buf *msg; 881 881 int ret; 882 882 883 - rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; 883 + rmpp_mad = mad_send_wr->send_buf.mad; 884 884 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 885 885 IB_MGMT_RMPP_FLAG_ACTIVE)) 886 886 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */ 887 887 888 - if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) { 889 - msg = (struct ib_mad_send_buf *) (unsigned long) 890 - mad_send_wc->wr_id; 891 - if (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_ACK) 892 - ib_free_send_mad(msg); 893 - else 894 - free_msg(msg); 888 + if (rmpp_mad->rmpp_hdr.rmpp_type != IB_MGMT_RMPP_TYPE_DATA) 895 889 return IB_RMPP_RESULT_INTERNAL; /* ACK, STOP, or ABORT */ 896 - } 897 890 898 891 if (mad_send_wc->status != IB_WC_SUCCESS || 899 892 mad_send_wr->status != IB_WC_SUCCESS) ··· 896 905 897 906 if (mad_send_wr->last_ack == mad_send_wr->total_seg) { 898 907 mad_send_wr->timeout = 899 - msecs_to_jiffies(mad_send_wr->send_wr.wr.ud.timeout_ms); 908 + msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms); 900 909 return IB_RMPP_RESULT_PROCESSED; /* Send done */ 901 910 } 902 911 ··· 917 926 struct ib_rmpp_mad *rmpp_mad; 918 927 int ret; 919 928 920 - rmpp_mad = (struct ib_rmpp_mad *)mad_send_wr->send_wr.wr.ud.mad_hdr; 929 + rmpp_mad = mad_send_wr->send_buf.mad; 921 930 if (!(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 922 931 IB_MGMT_RMPP_FLAG_ACTIVE)) 923 932 return IB_RMPP_RESULT_UNHANDLED; /* RMPP not active */
+2
drivers/infiniband/core/mad_rmpp.h
··· 51 51 int ib_process_rmpp_send_wc(struct ib_mad_send_wr_private *mad_send_wr, 52 52 struct ib_mad_send_wc *mad_send_wc); 53 53 54 + void ib_rmpp_send_handler(struct ib_mad_send_wc *mad_send_wc); 55 + 54 56 void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent); 55 57 56 58 int ib_retry_rmpp(struct ib_mad_send_wr_private *mad_send_wr);
+114 -125
drivers/infiniband/core/sa_query.c
··· 73 73 struct ib_sa_query { 74 74 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); 75 75 void (*release)(struct ib_sa_query *); 76 - struct ib_sa_port *port; 77 - struct ib_sa_mad *mad; 78 - struct ib_sa_sm_ah *sm_ah; 79 - DECLARE_PCI_UNMAP_ADDR(mapping) 80 - int id; 76 + struct ib_sa_port *port; 77 + struct ib_mad_send_buf *mad_buf; 78 + struct ib_sa_sm_ah *sm_ah; 79 + int id; 81 80 }; 82 81 83 82 struct ib_sa_service_query { ··· 425 426 { 426 427 unsigned long flags; 427 428 struct ib_mad_agent *agent; 429 + struct ib_mad_send_buf *mad_buf; 428 430 429 431 spin_lock_irqsave(&idr_lock, flags); 430 432 if (idr_find(&query_idr, id) != query) { ··· 433 433 return; 434 434 } 435 435 agent = query->port->agent; 436 + mad_buf = query->mad_buf; 436 437 spin_unlock_irqrestore(&idr_lock, flags); 437 438 438 - ib_cancel_mad(agent, id); 439 + ib_cancel_mad(agent, mad_buf); 439 440 } 440 441 EXPORT_SYMBOL(ib_sa_cancel_query); 441 442 ··· 458 457 459 458 static int send_mad(struct ib_sa_query *query, int timeout_ms) 460 459 { 461 - struct ib_sa_port *port = query->port; 462 460 unsigned long flags; 463 - int ret; 464 - struct ib_sge gather_list; 465 - struct ib_send_wr *bad_wr, wr = { 466 - .opcode = IB_WR_SEND, 467 - .sg_list = &gather_list, 468 - .num_sge = 1, 469 - .send_flags = IB_SEND_SIGNALED, 470 - .wr = { 471 - .ud = { 472 - .mad_hdr = &query->mad->mad_hdr, 473 - .remote_qpn = 1, 474 - .remote_qkey = IB_QP1_QKEY, 475 - .timeout_ms = timeout_ms, 476 - } 477 - } 478 - }; 461 + int ret, id; 479 462 480 463 retry: 481 464 if (!idr_pre_get(&query_idr, GFP_ATOMIC)) 482 465 return -ENOMEM; 483 466 spin_lock_irqsave(&idr_lock, flags); 484 - ret = idr_get_new(&query_idr, query, &query->id); 467 + ret = idr_get_new(&query_idr, query, &id); 485 468 spin_unlock_irqrestore(&idr_lock, flags); 486 469 if (ret == -EAGAIN) 487 470 goto retry; 488 471 if (ret) 489 472 return ret; 490 473 491 - wr.wr_id = query->id; 474 + query->mad_buf->timeout_ms = timeout_ms; 475 + query->mad_buf->context[0] = query; 476 + query->id = id; 492 477 493 - spin_lock_irqsave(&port->ah_lock, flags); 494 - kref_get(&port->sm_ah->ref); 495 - query->sm_ah = port->sm_ah; 496 - wr.wr.ud.ah = port->sm_ah->ah; 497 - spin_unlock_irqrestore(&port->ah_lock, flags); 478 + spin_lock_irqsave(&query->port->ah_lock, flags); 479 + kref_get(&query->port->sm_ah->ref); 480 + query->sm_ah = query->port->sm_ah; 481 + spin_unlock_irqrestore(&query->port->ah_lock, flags); 498 482 499 - gather_list.addr = dma_map_single(port->agent->device->dma_device, 500 - query->mad, 501 - sizeof (struct ib_sa_mad), 502 - DMA_TO_DEVICE); 503 - gather_list.length = sizeof (struct ib_sa_mad); 504 - gather_list.lkey = port->agent->mr->lkey; 505 - pci_unmap_addr_set(query, mapping, gather_list.addr); 483 + query->mad_buf->ah = query->sm_ah->ah; 506 484 507 - ret = ib_post_send_mad(port->agent, &wr, &bad_wr); 485 + ret = ib_post_send_mad(query->mad_buf, NULL); 508 486 if (ret) { 509 - dma_unmap_single(port->agent->device->dma_device, 510 - pci_unmap_addr(query, mapping), 511 - sizeof (struct ib_sa_mad), 512 - DMA_TO_DEVICE); 513 - kref_put(&query->sm_ah->ref, free_sm_ah); 514 487 spin_lock_irqsave(&idr_lock, flags); 515 - idr_remove(&query_idr, query->id); 488 + idr_remove(&query_idr, id); 516 489 spin_unlock_irqrestore(&idr_lock, flags); 490 + 491 + kref_put(&query->sm_ah->ref, free_sm_ah); 517 492 } 518 493 519 494 /* 520 495 * It's not safe to dereference query any more, because the 521 496 * send may already have completed and freed the query in 522 - * another context. So use wr.wr_id, which has a copy of the 523 - * query's id. 497 + * another context. 524 498 */ 525 - return ret ? ret : wr.wr_id; 499 + return ret ? ret : id; 526 500 } 527 501 528 502 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, ··· 519 543 520 544 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) 521 545 { 522 - kfree(sa_query->mad); 523 546 kfree(container_of(sa_query, struct ib_sa_path_query, sa_query)); 524 547 } 525 548 ··· 560 585 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 561 586 struct ib_sa_port *port; 562 587 struct ib_mad_agent *agent; 588 + struct ib_sa_mad *mad; 563 589 int ret; 564 590 565 591 if (!sa_dev) ··· 572 596 query = kmalloc(sizeof *query, gfp_mask); 573 597 if (!query) 574 598 return -ENOMEM; 575 - query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask); 576 - if (!query->sa_query.mad) { 577 - kfree(query); 578 - return -ENOMEM; 599 + 600 + query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0, 601 + 0, IB_MGMT_SA_HDR, 602 + IB_MGMT_SA_DATA, gfp_mask); 603 + if (!query->sa_query.mad_buf) { 604 + ret = -ENOMEM; 605 + goto err1; 579 606 } 580 607 581 608 query->callback = callback; 582 609 query->context = context; 583 610 584 - init_mad(query->sa_query.mad, agent); 611 + mad = query->sa_query.mad_buf->mad; 612 + init_mad(mad, agent); 585 613 586 - query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; 587 - query->sa_query.release = ib_sa_path_rec_release; 588 - query->sa_query.port = port; 589 - query->sa_query.mad->mad_hdr.method = IB_MGMT_METHOD_GET; 590 - query->sa_query.mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); 591 - query->sa_query.mad->sa_hdr.comp_mask = comp_mask; 614 + query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; 615 + query->sa_query.release = ib_sa_path_rec_release; 616 + query->sa_query.port = port; 617 + mad->mad_hdr.method = IB_MGMT_METHOD_GET; 618 + mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); 619 + mad->sa_hdr.comp_mask = comp_mask; 592 620 593 - ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), 594 - rec, query->sa_query.mad->data); 621 + ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data); 595 622 596 623 *sa_query = &query->sa_query; 597 624 598 625 ret = send_mad(&query->sa_query, timeout_ms); 599 - if (ret < 0) { 600 - *sa_query = NULL; 601 - kfree(query->sa_query.mad); 602 - kfree(query); 603 - } 626 + if (ret < 0) 627 + goto err2; 604 628 629 + return ret; 630 + 631 + err2: 632 + *sa_query = NULL; 633 + ib_free_send_mad(query->sa_query.mad_buf); 634 + 635 + err1: 636 + kfree(query); 605 637 return ret; 606 638 } 607 639 EXPORT_SYMBOL(ib_sa_path_rec_get); ··· 633 649 634 650 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) 635 651 { 636 - kfree(sa_query->mad); 637 652 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query)); 638 653 } 639 654 ··· 676 693 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 677 694 struct ib_sa_port *port; 678 695 struct ib_mad_agent *agent; 696 + struct ib_sa_mad *mad; 679 697 int ret; 680 698 681 699 if (!sa_dev) ··· 693 709 query = kmalloc(sizeof *query, gfp_mask); 694 710 if (!query) 695 711 return -ENOMEM; 696 - query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask); 697 - if (!query->sa_query.mad) { 698 - kfree(query); 699 - return -ENOMEM; 712 + 713 + query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0, 714 + 0, IB_MGMT_SA_HDR, 715 + IB_MGMT_SA_DATA, gfp_mask); 716 + if (!query->sa_query.mad_buf) { 717 + ret = -ENOMEM; 718 + goto err1; 700 719 } 701 720 702 721 query->callback = callback; 703 722 query->context = context; 704 723 705 - init_mad(query->sa_query.mad, agent); 724 + mad = query->sa_query.mad_buf->mad; 725 + init_mad(mad, agent); 706 726 707 - query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL; 708 - query->sa_query.release = ib_sa_service_rec_release; 709 - query->sa_query.port = port; 710 - query->sa_query.mad->mad_hdr.method = method; 711 - query->sa_query.mad->mad_hdr.attr_id = 712 - cpu_to_be16(IB_SA_ATTR_SERVICE_REC); 713 - query->sa_query.mad->sa_hdr.comp_mask = comp_mask; 727 + query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL; 728 + query->sa_query.release = ib_sa_service_rec_release; 729 + query->sa_query.port = port; 730 + mad->mad_hdr.method = method; 731 + mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC); 732 + mad->sa_hdr.comp_mask = comp_mask; 714 733 715 734 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), 716 - rec, query->sa_query.mad->data); 735 + rec, mad->data); 717 736 718 737 *sa_query = &query->sa_query; 719 738 720 739 ret = send_mad(&query->sa_query, timeout_ms); 721 - if (ret < 0) { 722 - *sa_query = NULL; 723 - kfree(query->sa_query.mad); 724 - kfree(query); 725 - } 740 + if (ret < 0) 741 + goto err2; 726 742 743 + return ret; 744 + 745 + err2: 746 + *sa_query = NULL; 747 + ib_free_send_mad(query->sa_query.mad_buf); 748 + 749 + err1: 750 + kfree(query); 727 751 return ret; 728 752 } 729 753 EXPORT_SYMBOL(ib_sa_service_rec_query); ··· 755 763 756 764 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) 757 765 { 758 - kfree(sa_query->mad); 759 766 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); 760 767 } 761 768 ··· 773 782 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 774 783 struct ib_sa_port *port; 775 784 struct ib_mad_agent *agent; 785 + struct ib_sa_mad *mad; 776 786 int ret; 777 787 778 788 if (!sa_dev) ··· 785 793 query = kmalloc(sizeof *query, gfp_mask); 786 794 if (!query) 787 795 return -ENOMEM; 788 - query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask); 789 - if (!query->sa_query.mad) { 790 - kfree(query); 791 - return -ENOMEM; 796 + 797 + query->sa_query.mad_buf = ib_create_send_mad(agent, 1, 0, 798 + 0, IB_MGMT_SA_HDR, 799 + IB_MGMT_SA_DATA, gfp_mask); 800 + if (!query->sa_query.mad_buf) { 801 + ret = -ENOMEM; 802 + goto err1; 792 803 } 793 804 794 805 query->callback = callback; 795 806 query->context = context; 796 807 797 - init_mad(query->sa_query.mad, agent); 808 + mad = query->sa_query.mad_buf->mad; 809 + init_mad(mad, agent); 798 810 799 - query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; 800 - query->sa_query.release = ib_sa_mcmember_rec_release; 801 - query->sa_query.port = port; 802 - query->sa_query.mad->mad_hdr.method = method; 803 - query->sa_query.mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); 804 - query->sa_query.mad->sa_hdr.comp_mask = comp_mask; 811 + query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; 812 + query->sa_query.release = ib_sa_mcmember_rec_release; 813 + query->sa_query.port = port; 814 + mad->mad_hdr.method = method; 815 + mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); 816 + mad->sa_hdr.comp_mask = comp_mask; 805 817 806 818 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 807 - rec, query->sa_query.mad->data); 819 + rec, mad->data); 808 820 809 821 *sa_query = &query->sa_query; 810 822 811 823 ret = send_mad(&query->sa_query, timeout_ms); 812 - if (ret < 0) { 813 - *sa_query = NULL; 814 - kfree(query->sa_query.mad); 815 - kfree(query); 816 - } 824 + if (ret < 0) 825 + goto err2; 817 826 827 + return ret; 828 + 829 + err2: 830 + *sa_query = NULL; 831 + ib_free_send_mad(query->sa_query.mad_buf); 832 + 833 + err1: 834 + kfree(query); 818 835 return ret; 819 836 } 820 837 EXPORT_SYMBOL(ib_sa_mcmember_rec_query); ··· 831 830 static void send_handler(struct ib_mad_agent *agent, 832 831 struct ib_mad_send_wc *mad_send_wc) 833 832 { 834 - struct ib_sa_query *query; 833 + struct ib_sa_query *query = mad_send_wc->send_buf->context[0]; 835 834 unsigned long flags; 836 - 837 - spin_lock_irqsave(&idr_lock, flags); 838 - query = idr_find(&query_idr, mad_send_wc->wr_id); 839 - spin_unlock_irqrestore(&idr_lock, flags); 840 - 841 - if (!query) 842 - return; 843 835 844 836 if (query->callback) 845 837 switch (mad_send_wc->status) { ··· 850 856 break; 851 857 } 852 858 853 - dma_unmap_single(agent->device->dma_device, 854 - pci_unmap_addr(query, mapping), 855 - sizeof (struct ib_sa_mad), 856 - DMA_TO_DEVICE); 857 - kref_put(&query->sm_ah->ref, free_sm_ah); 858 - 859 - query->release(query); 860 - 861 859 spin_lock_irqsave(&idr_lock, flags); 862 - idr_remove(&query_idr, mad_send_wc->wr_id); 860 + idr_remove(&query_idr, query->id); 863 861 spin_unlock_irqrestore(&idr_lock, flags); 862 + 863 + ib_free_send_mad(mad_send_wc->send_buf); 864 + kref_put(&query->sm_ah->ref, free_sm_ah); 865 + query->release(query); 864 866 } 865 867 866 868 static void recv_handler(struct ib_mad_agent *mad_agent, 867 869 struct ib_mad_recv_wc *mad_recv_wc) 868 870 { 869 871 struct ib_sa_query *query; 870 - unsigned long flags; 872 + struct ib_mad_send_buf *mad_buf; 871 873 872 - spin_lock_irqsave(&idr_lock, flags); 873 - query = idr_find(&query_idr, mad_recv_wc->wc->wr_id); 874 - spin_unlock_irqrestore(&idr_lock, flags); 874 + mad_buf = (void *) (unsigned long) mad_recv_wc->wc->wr_id; 875 + query = mad_buf->context[0]; 875 876 876 - if (query && query->callback) { 877 + if (query->callback) { 877 878 if (mad_recv_wc->wc->status == IB_WC_SUCCESS) 878 879 query->callback(query, 879 880 mad_recv_wc->recv_buf.mad->mad_hdr.status ?
+2
drivers/infiniband/core/smi.h
··· 39 39 #ifndef __SMI_H_ 40 40 #define __SMI_H_ 41 41 42 + #include <rdma/ib_smi.h> 43 + 42 44 int smi_handle_dr_smp_recv(struct ib_smp *smp, 43 45 u8 node_type, 44 46 int port_num,
+21 -26
drivers/infiniband/core/user_mad.c
··· 96 96 }; 97 97 98 98 struct ib_umad_packet { 99 - struct ib_ah *ah; 100 99 struct ib_mad_send_buf *msg; 101 100 struct list_head list; 102 101 int length; ··· 138 139 struct ib_mad_send_wc *send_wc) 139 140 { 140 141 struct ib_umad_file *file = agent->context; 141 - struct ib_umad_packet *timeout, *packet = 142 - (void *) (unsigned long) send_wc->wr_id; 142 + struct ib_umad_packet *timeout; 143 + struct ib_umad_packet *packet = send_wc->send_buf->context[0]; 143 144 144 - ib_destroy_ah(packet->msg->send_wr.wr.ud.ah); 145 + ib_destroy_ah(packet->msg->ah); 145 146 ib_free_send_mad(packet->msg); 146 147 147 148 if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { ··· 267 268 struct ib_umad_packet *packet; 268 269 struct ib_mad_agent *agent; 269 270 struct ib_ah_attr ah_attr; 270 - struct ib_send_wr *bad_wr; 271 + struct ib_ah *ah; 271 272 struct ib_rmpp_mad *rmpp_mad; 272 273 u8 method; 273 274 __be64 *tid; 274 - int ret, length, hdr_len, data_len, rmpp_hdr_size; 275 + int ret, length, hdr_len, rmpp_hdr_size; 275 276 int rmpp_active = 0; 276 277 277 278 if (count < sizeof (struct ib_user_mad)) ··· 320 321 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; 321 322 } 322 323 323 - packet->ah = ib_create_ah(agent->qp->pd, &ah_attr); 324 - if (IS_ERR(packet->ah)) { 325 - ret = PTR_ERR(packet->ah); 324 + ah = ib_create_ah(agent->qp->pd, &ah_attr); 325 + if (IS_ERR(ah)) { 326 + ret = PTR_ERR(ah); 326 327 goto err_up; 327 328 } 328 329 ··· 336 337 337 338 /* Validate that the management class can support RMPP */ 338 339 if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { 339 - hdr_len = offsetof(struct ib_sa_mad, data); 340 - data_len = length - hdr_len; 340 + hdr_len = IB_MGMT_SA_HDR; 341 341 } else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && 342 342 (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) { 343 - hdr_len = offsetof(struct ib_vendor_mad, data); 344 - data_len = length - hdr_len; 343 + hdr_len = IB_MGMT_VENDOR_HDR; 345 344 } else { 346 345 ret = -EINVAL; 347 346 goto err_ah; ··· 350 353 ret = -EINVAL; 351 354 goto err_ah; 352 355 } 353 - hdr_len = offsetof(struct ib_mad, data); 354 - data_len = length - hdr_len; 356 + hdr_len = IB_MGMT_MAD_HDR; 355 357 } 356 358 357 359 packet->msg = ib_create_send_mad(agent, 358 360 be32_to_cpu(packet->mad.hdr.qpn), 359 - 0, packet->ah, rmpp_active, 360 - hdr_len, data_len, 361 + 0, rmpp_active, 362 + hdr_len, length - hdr_len, 361 363 GFP_KERNEL); 362 364 if (IS_ERR(packet->msg)) { 363 365 ret = PTR_ERR(packet->msg); 364 366 goto err_ah; 365 367 } 366 368 367 - packet->msg->send_wr.wr.ud.timeout_ms = packet->mad.hdr.timeout_ms; 368 - packet->msg->send_wr.wr.ud.retries = packet->mad.hdr.retries; 369 - 370 - /* Override send WR WRID initialized in ib_create_send_mad */ 371 - packet->msg->send_wr.wr_id = (unsigned long) packet; 369 + packet->msg->ah = ah; 370 + packet->msg->timeout_ms = packet->mad.hdr.timeout_ms; 371 + packet->msg->retries = packet->mad.hdr.retries; 372 + packet->msg->context[0] = packet; 372 373 373 374 if (!rmpp_active) { 374 375 /* Copy message from user into send buffer */ ··· 398 403 * transaction ID matches the agent being used to send the 399 404 * MAD. 400 405 */ 401 - method = packet->msg->mad->mad_hdr.method; 406 + method = ((struct ib_mad_hdr *) packet->msg)->method; 402 407 403 408 if (!(method & IB_MGMT_METHOD_RESP) && 404 409 method != IB_MGMT_METHOD_TRAP_REPRESS && 405 410 method != IB_MGMT_METHOD_SEND) { 406 - tid = &packet->msg->mad->mad_hdr.tid; 411 + tid = &((struct ib_mad_hdr *) packet->msg)->tid; 407 412 *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | 408 413 (be64_to_cpup(tid) & 0xffffffff)); 409 414 } 410 415 411 - ret = ib_post_send_mad(agent, &packet->msg->send_wr, &bad_wr); 416 + ret = ib_post_send_mad(packet->msg, NULL); 412 417 if (ret) 413 418 goto err_msg; 414 419 ··· 420 425 ib_free_send_mad(packet->msg); 421 426 422 427 err_ah: 423 - ib_destroy_ah(packet->ah); 428 + ib_destroy_ah(ah); 424 429 425 430 err_up: 426 431 up_read(&file->agent_mutex);
+9 -63
drivers/infiniband/hw/mthca/mthca_mad.c
··· 46 46 MTHCA_VENDOR_CLASS2 = 0xa 47 47 }; 48 48 49 - struct mthca_trap_mad { 50 - struct ib_mad *mad; 51 - DECLARE_PCI_UNMAP_ADDR(mapping) 52 - }; 53 - 54 49 static void update_sm_ah(struct mthca_dev *dev, 55 50 u8 port_num, u16 lid, u8 sl) 56 51 { ··· 111 116 struct ib_mad *mad) 112 117 { 113 118 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; 114 - struct mthca_trap_mad *tmad; 115 - struct ib_sge gather_list; 116 - struct ib_send_wr *bad_wr, wr = { 117 - .opcode = IB_WR_SEND, 118 - .sg_list = &gather_list, 119 - .num_sge = 1, 120 - .send_flags = IB_SEND_SIGNALED, 121 - .wr = { 122 - .ud = { 123 - .remote_qpn = qpn, 124 - .remote_qkey = qpn ? IB_QP1_QKEY : 0, 125 - .timeout_ms = 0 126 - } 127 - } 128 - }; 119 + struct ib_mad_send_buf *send_buf; 129 120 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; 130 121 int ret; 131 122 unsigned long flags; 132 123 133 124 if (agent) { 134 - tmad = kmalloc(sizeof *tmad, GFP_KERNEL); 135 - if (!tmad) 136 - return; 137 - 138 - tmad->mad = kmalloc(sizeof *tmad->mad, GFP_KERNEL); 139 - if (!tmad->mad) { 140 - kfree(tmad); 141 - return; 142 - } 143 - 144 - memcpy(tmad->mad, mad, sizeof *mad); 145 - 146 - wr.wr.ud.mad_hdr = &tmad->mad->mad_hdr; 147 - wr.wr_id = (unsigned long) tmad; 148 - 149 - gather_list.addr = dma_map_single(agent->device->dma_device, 150 - tmad->mad, 151 - sizeof *tmad->mad, 152 - DMA_TO_DEVICE); 153 - gather_list.length = sizeof *tmad->mad; 154 - gather_list.lkey = to_mpd(agent->qp->pd)->ntmr.ibmr.lkey; 155 - pci_unmap_addr_set(tmad, mapping, gather_list.addr); 156 - 125 + send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, 126 + IB_MGMT_MAD_DATA, GFP_ATOMIC); 157 127 /* 158 128 * We rely here on the fact that MLX QPs don't use the 159 129 * address handle after the send is posted (this is ··· 126 166 * it's OK for our devices). 127 167 */ 128 168 spin_lock_irqsave(&dev->sm_lock, flags); 129 - wr.wr.ud.ah = dev->sm_ah[port_num - 1]; 130 - if (wr.wr.ud.ah) 131 - ret = ib_post_send_mad(agent, &wr, &bad_wr); 169 + memcpy(send_buf->mad, mad, sizeof *mad); 170 + if ((send_buf->ah = dev->sm_ah[port_num - 1])) 171 + ret = ib_post_send_mad(send_buf, NULL); 132 172 else 133 173 ret = -EINVAL; 134 174 spin_unlock_irqrestore(&dev->sm_lock, flags); 135 175 136 - if (ret) { 137 - dma_unmap_single(agent->device->dma_device, 138 - pci_unmap_addr(tmad, mapping), 139 - sizeof *tmad->mad, 140 - DMA_TO_DEVICE); 141 - kfree(tmad->mad); 142 - kfree(tmad); 143 - } 176 + if (ret) 177 + ib_free_send_mad(send_buf); 144 178 } 145 179 } 146 180 ··· 221 267 static void send_handler(struct ib_mad_agent *agent, 222 268 struct ib_mad_send_wc *mad_send_wc) 223 269 { 224 - struct mthca_trap_mad *tmad = 225 - (void *) (unsigned long) mad_send_wc->wr_id; 226 - 227 - dma_unmap_single(agent->device->dma_device, 228 - pci_unmap_addr(tmad, mapping), 229 - sizeof *tmad->mad, 230 - DMA_TO_DEVICE); 231 - kfree(tmad->mad); 232 - kfree(tmad); 270 + ib_free_send_mad(mad_send_wc->send_buf); 233 271 } 234 272 235 273 int mthca_create_agents(struct mthca_dev *dev)
+34 -32
include/rdma/ib_mad.h
··· 109 109 #define IB_QP_SET_QKEY 0x80000000 110 110 111 111 enum { 112 + IB_MGMT_MAD_HDR = 24, 112 113 IB_MGMT_MAD_DATA = 232, 114 + IB_MGMT_RMPP_HDR = 36, 113 115 IB_MGMT_RMPP_DATA = 220, 116 + IB_MGMT_VENDOR_HDR = 40, 114 117 IB_MGMT_VENDOR_DATA = 216, 115 - IB_MGMT_SA_DATA = 200 118 + IB_MGMT_SA_HDR = 56, 119 + IB_MGMT_SA_DATA = 200, 116 120 }; 117 121 118 122 struct ib_mad_hdr { ··· 207 203 208 204 /** 209 205 * ib_mad_send_buf - MAD data buffer and work request for sends. 210 - * @mad: References an allocated MAD data buffer. The size of the data 211 - * buffer is specified in the @send_wr.length field. 212 - * @mapping: DMA mapping information. 206 + * @next: A pointer used to chain together MADs for posting. 207 + * @mad: References an allocated MAD data buffer. 213 208 * @mad_agent: MAD agent that allocated the buffer. 209 + * @ah: The address handle to use when sending the MAD. 214 210 * @context: User-controlled context fields. 215 - * @send_wr: An initialized work request structure used when sending the MAD. 216 - * The wr_id field of the work request is initialized to reference this 217 - * data structure. 218 - * @sge: A scatter-gather list referenced by the work request. 211 + * @timeout_ms: Time to wait for a response. 212 + * @retries: Number of times to retry a request for a response. 219 213 * 220 214 * Users are responsible for initializing the MAD buffer itself, with the 221 215 * exception of specifying the payload length field in any RMPP MAD. 222 216 */ 223 217 struct ib_mad_send_buf { 224 - struct ib_mad *mad; 225 - DECLARE_PCI_UNMAP_ADDR(mapping) 218 + struct ib_mad_send_buf *next; 219 + void *mad; 226 220 struct ib_mad_agent *mad_agent; 221 + struct ib_ah *ah; 227 222 void *context[2]; 228 - struct ib_send_wr send_wr; 229 - struct ib_sge sge; 223 + int timeout_ms; 224 + int retries; 230 225 }; 231 226 232 227 /** ··· 290 287 * or @mad_send_wc. 291 288 */ 292 289 typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent, 293 - struct ib_send_wr *send_wr, 290 + struct ib_mad_send_buf *send_buf, 294 291 struct ib_mad_send_wc *mad_send_wc); 295 292 296 293 /** ··· 337 334 338 335 /** 339 336 * ib_mad_send_wc - MAD send completion information. 340 - * @wr_id: Work request identifier associated with the send MAD request. 337 + * @send_buf: Send MAD data buffer associated with the send MAD request. 341 338 * @status: Completion status. 342 339 * @vendor_err: Optional vendor error information returned with a failed 343 340 * request. 344 341 */ 345 342 struct ib_mad_send_wc { 346 - u64 wr_id; 343 + struct ib_mad_send_buf *send_buf; 347 344 enum ib_wc_status status; 348 345 u32 vendor_err; 349 346 }; ··· 369 366 * @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers. 370 367 * @mad_len: The length of the received MAD, without duplicated headers. 371 368 * 372 - * For received response, the wr_id field of the wc is set to the wr_id 369 + * For received response, the wr_id contains a pointer to the ib_mad_send_buf 373 370 * for the corresponding send request. 374 371 */ 375 372 struct ib_mad_recv_wc { ··· 466 463 /** 467 464 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated 468 465 * with the registered client. 469 - * @mad_agent: Specifies the associated registration to post the send to. 470 - * @send_wr: Specifies the information needed to send the MAD(s). 471 - * @bad_send_wr: Specifies the MAD on which an error was encountered. 466 + * @send_buf: Specifies the information needed to send the MAD(s). 467 + * @bad_send_buf: Specifies the MAD on which an error was encountered. This 468 + * parameter is optional if only a single MAD is posted. 472 469 * 473 470 * Sent MADs are not guaranteed to complete in the order that they were posted. 474 471 * ··· 482 479 * defined data being transferred. The paylen_newwin field should be 483 480 * specified in network-byte order. 484 481 */ 485 - int ib_post_send_mad(struct ib_mad_agent *mad_agent, 486 - struct ib_send_wr *send_wr, 487 - struct ib_send_wr **bad_send_wr); 482 + int ib_post_send_mad(struct ib_mad_send_buf *send_buf, 483 + struct ib_mad_send_buf **bad_send_buf); 488 484 489 485 /** 490 486 * ib_coalesce_recv_mad - Coalesces received MAD data into a single buffer. ··· 509 507 /** 510 508 * ib_cancel_mad - Cancels an outstanding send MAD operation. 511 509 * @mad_agent: Specifies the registration associated with sent MAD. 512 - * @wr_id: Indicates the work request identifier of the MAD to cancel. 510 + * @send_buf: Indicates the MAD to cancel. 513 511 * 514 512 * MADs will be returned to the user through the corresponding 515 513 * ib_mad_send_handler. 516 514 */ 517 - void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id); 515 + void ib_cancel_mad(struct ib_mad_agent *mad_agent, 516 + struct ib_mad_send_buf *send_buf); 518 517 519 518 /** 520 519 * ib_modify_mad - Modifies an outstanding send MAD operation. 521 520 * @mad_agent: Specifies the registration associated with sent MAD. 522 - * @wr_id: Indicates the work request identifier of the MAD to modify. 521 + * @send_buf: Indicates the MAD to modify. 523 522 * @timeout_ms: New timeout value for sent MAD. 524 523 * 525 524 * This call will reset the timeout value for a sent MAD to the specified 526 525 * value. 527 526 */ 528 - int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms); 527 + int ib_modify_mad(struct ib_mad_agent *mad_agent, 528 + struct ib_mad_send_buf *send_buf, u32 timeout_ms); 529 529 530 530 /** 531 531 * ib_redirect_mad_qp - Registers a QP for MAD services. ··· 576 572 * @remote_qpn: Specifies the QPN of the receiving node. 577 573 * @pkey_index: Specifies which PKey the MAD will be sent using. This field 578 574 * is valid only if the remote_qpn is QP 1. 579 - * @ah: References the address handle used to transfer to the remote node. 580 575 * @rmpp_active: Indicates if the send will enable RMPP. 581 576 * @hdr_len: Indicates the size of the data header of the MAD. This length 582 577 * should include the common MAD header, RMPP header, plus any class ··· 585 582 * additional padding that may be necessary. 586 583 * @gfp_mask: GFP mask used for the memory allocation. 587 584 * 588 - * This is a helper routine that may be used to allocate a MAD. Users are 589 - * not required to allocate outbound MADs using this call. The returned 590 - * MAD send buffer will reference a data buffer usable for sending a MAD, along 585 + * This routine allocates a MAD for sending. The returned MAD send buffer 586 + * will reference a data buffer usable for sending a MAD, along 591 587 * with an initialized work request structure. Users may modify the returned 592 - * MAD data buffer or work request before posting the send. 588 + * MAD data buffer before posting the send. 593 589 * 594 590 * The returned data buffer will be cleared. Users are responsible for 595 591 * initializing the common MAD and any class specific headers. If @rmpp_active ··· 596 594 */ 597 595 struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, 598 596 u32 remote_qpn, u16 pkey_index, 599 - struct ib_ah *ah, int rmpp_active, 597 + int rmpp_active, 600 598 int hdr_len, int data_len, 601 599 gfp_t gfp_mask); 602 600
-3
include/rdma/ib_verbs.h
··· 595 595 } atomic; 596 596 struct { 597 597 struct ib_ah *ah; 598 - struct ib_mad_hdr *mad_hdr; 599 598 u32 remote_qpn; 600 599 u32 remote_qkey; 601 - int timeout_ms; /* valid for MADs only */ 602 - int retries; /* valid for MADs only */ 603 600 u16 pkey_index; /* valid for GSI only */ 604 601 u8 port_num; /* valid for DR SMPs on switch only */ 605 602 } ud;