Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.18 680 lines 18 kB view raw
1/* 2 * Copyright (c) 2006 QLogic, Inc. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include "ipath_verbs.h" 35#include "ipath_common.h" 36 37/* 38 * Convert the AETH RNR timeout code into the number of milliseconds. 39 */ 40const u32 ib_ipath_rnr_table[32] = { 41 656, /* 0 */ 42 1, /* 1 */ 43 1, /* 2 */ 44 1, /* 3 */ 45 1, /* 4 */ 46 1, /* 5 */ 47 1, /* 6 */ 48 1, /* 7 */ 49 1, /* 8 */ 50 1, /* 9 */ 51 1, /* A */ 52 1, /* B */ 53 1, /* C */ 54 1, /* D */ 55 2, /* E */ 56 2, /* F */ 57 3, /* 10 */ 58 4, /* 11 */ 59 6, /* 12 */ 60 8, /* 13 */ 61 11, /* 14 */ 62 16, /* 15 */ 63 21, /* 16 */ 64 31, /* 17 */ 65 41, /* 18 */ 66 62, /* 19 */ 67 82, /* 1A */ 68 123, /* 1B */ 69 164, /* 1C */ 70 246, /* 1D */ 71 328, /* 1E */ 72 492 /* 1F */ 73}; 74 75/** 76 * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device 77 * @qp: the QP 78 * 79 * XXX Use a simple list for now. We might need a priority 80 * queue if we have lots of QPs waiting for RNR timeouts 81 * but that should be rare. 82 */ 83void ipath_insert_rnr_queue(struct ipath_qp *qp) 84{ 85 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 86 unsigned long flags; 87 88 spin_lock_irqsave(&dev->pending_lock, flags); 89 if (list_empty(&dev->rnrwait)) 90 list_add(&qp->timerwait, &dev->rnrwait); 91 else { 92 struct list_head *l = &dev->rnrwait; 93 struct ipath_qp *nqp = list_entry(l->next, struct ipath_qp, 94 timerwait); 95 96 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) { 97 qp->s_rnr_timeout -= nqp->s_rnr_timeout; 98 l = l->next; 99 if (l->next == &dev->rnrwait) 100 break; 101 nqp = list_entry(l->next, struct ipath_qp, 102 timerwait); 103 } 104 list_add(&qp->timerwait, l); 105 } 106 spin_unlock_irqrestore(&dev->pending_lock, flags); 107} 108 109/** 110 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE 111 * @qp: the QP 112 * @wr_id_only: update wr_id only, not SGEs 113 * 114 * Return 0 if no RWQE is available, otherwise return 1. 115 * 116 * Can be called from interrupt level. 117 */ 118int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) 119{ 120 unsigned long flags; 121 struct ipath_rq *rq; 122 struct ipath_srq *srq; 123 struct ipath_rwqe *wqe; 124 int ret = 1; 125 126 if (!qp->ibqp.srq) { 127 rq = &qp->r_rq; 128 spin_lock_irqsave(&rq->lock, flags); 129 130 if (unlikely(rq->tail == rq->head)) { 131 ret = 0; 132 goto done; 133 } 134 wqe = get_rwqe_ptr(rq, rq->tail); 135 qp->r_wr_id = wqe->wr_id; 136 if (!wr_id_only) { 137 qp->r_sge.sge = wqe->sg_list[0]; 138 qp->r_sge.sg_list = wqe->sg_list + 1; 139 qp->r_sge.num_sge = wqe->num_sge; 140 qp->r_len = wqe->length; 141 } 142 if (++rq->tail >= rq->size) 143 rq->tail = 0; 144 goto done; 145 } 146 147 srq = to_isrq(qp->ibqp.srq); 148 rq = &srq->rq; 149 spin_lock_irqsave(&rq->lock, flags); 150 151 if (unlikely(rq->tail == rq->head)) { 152 ret = 0; 153 goto done; 154 } 155 wqe = get_rwqe_ptr(rq, rq->tail); 156 qp->r_wr_id = wqe->wr_id; 157 if (!wr_id_only) { 158 qp->r_sge.sge = wqe->sg_list[0]; 159 qp->r_sge.sg_list = wqe->sg_list + 1; 160 qp->r_sge.num_sge = wqe->num_sge; 161 qp->r_len = wqe->length; 162 } 163 if (++rq->tail >= rq->size) 164 rq->tail = 0; 165 if (srq->ibsrq.event_handler) { 166 struct ib_event ev; 167 u32 n; 168 169 if (rq->head < rq->tail) 170 n = rq->size + rq->head - rq->tail; 171 else 172 n = rq->head - rq->tail; 173 if (n < srq->limit) { 174 srq->limit = 0; 175 spin_unlock_irqrestore(&rq->lock, flags); 176 ev.device = qp->ibqp.device; 177 ev.element.srq = qp->ibqp.srq; 178 ev.event = IB_EVENT_SRQ_LIMIT_REACHED; 179 srq->ibsrq.event_handler(&ev, 180 srq->ibsrq.srq_context); 181 goto bail; 182 } 183 } 184 185done: 186 spin_unlock_irqrestore(&rq->lock, flags); 187bail: 188 return ret; 189} 190 191/** 192 * ipath_ruc_loopback - handle UC and RC lookback requests 193 * @sqp: the loopback QP 194 * 195 * This is called from ipath_do_uc_send() or ipath_do_rc_send() to 196 * forward a WQE addressed to the same HCA. 197 * Note that although we are single threaded due to the tasklet, we still 198 * have to protect against post_send(). We don't have to worry about 199 * receive interrupts since this is a connected protocol and all packets 200 * will pass through here. 201 */ 202static void ipath_ruc_loopback(struct ipath_qp *sqp) 203{ 204 struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); 205 struct ipath_qp *qp; 206 struct ipath_swqe *wqe; 207 struct ipath_sge *sge; 208 unsigned long flags; 209 struct ib_wc wc; 210 u64 sdata; 211 212 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn); 213 if (!qp) { 214 dev->n_pkt_drops++; 215 return; 216 } 217 218again: 219 spin_lock_irqsave(&sqp->s_lock, flags); 220 221 if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK)) { 222 spin_unlock_irqrestore(&sqp->s_lock, flags); 223 goto done; 224 } 225 226 /* Get the next send request. */ 227 if (sqp->s_last == sqp->s_head) { 228 /* Send work queue is empty. */ 229 spin_unlock_irqrestore(&sqp->s_lock, flags); 230 goto done; 231 } 232 233 /* 234 * We can rely on the entry not changing without the s_lock 235 * being held until we update s_last. 236 */ 237 wqe = get_swqe_ptr(sqp, sqp->s_last); 238 spin_unlock_irqrestore(&sqp->s_lock, flags); 239 240 wc.wc_flags = 0; 241 wc.imm_data = 0; 242 243 sqp->s_sge.sge = wqe->sg_list[0]; 244 sqp->s_sge.sg_list = wqe->sg_list + 1; 245 sqp->s_sge.num_sge = wqe->wr.num_sge; 246 sqp->s_len = wqe->length; 247 switch (wqe->wr.opcode) { 248 case IB_WR_SEND_WITH_IMM: 249 wc.wc_flags = IB_WC_WITH_IMM; 250 wc.imm_data = wqe->wr.imm_data; 251 /* FALLTHROUGH */ 252 case IB_WR_SEND: 253 if (!ipath_get_rwqe(qp, 0)) { 254 rnr_nak: 255 /* Handle RNR NAK */ 256 if (qp->ibqp.qp_type == IB_QPT_UC) 257 goto send_comp; 258 if (sqp->s_rnr_retry == 0) { 259 wc.status = IB_WC_RNR_RETRY_EXC_ERR; 260 goto err; 261 } 262 if (sqp->s_rnr_retry_cnt < 7) 263 sqp->s_rnr_retry--; 264 dev->n_rnr_naks++; 265 sqp->s_rnr_timeout = 266 ib_ipath_rnr_table[sqp->r_min_rnr_timer]; 267 ipath_insert_rnr_queue(sqp); 268 goto done; 269 } 270 break; 271 272 case IB_WR_RDMA_WRITE_WITH_IMM: 273 wc.wc_flags = IB_WC_WITH_IMM; 274 wc.imm_data = wqe->wr.imm_data; 275 if (!ipath_get_rwqe(qp, 1)) 276 goto rnr_nak; 277 /* FALLTHROUGH */ 278 case IB_WR_RDMA_WRITE: 279 if (wqe->length == 0) 280 break; 281 if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, wqe->length, 282 wqe->wr.wr.rdma.remote_addr, 283 wqe->wr.wr.rdma.rkey, 284 IB_ACCESS_REMOTE_WRITE))) { 285 acc_err: 286 wc.status = IB_WC_REM_ACCESS_ERR; 287 err: 288 wc.wr_id = wqe->wr.wr_id; 289 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 290 wc.vendor_err = 0; 291 wc.byte_len = 0; 292 wc.qp_num = sqp->ibqp.qp_num; 293 wc.src_qp = sqp->remote_qpn; 294 wc.pkey_index = 0; 295 wc.slid = sqp->remote_ah_attr.dlid; 296 wc.sl = sqp->remote_ah_attr.sl; 297 wc.dlid_path_bits = 0; 298 wc.port_num = 0; 299 ipath_sqerror_qp(sqp, &wc); 300 goto done; 301 } 302 break; 303 304 case IB_WR_RDMA_READ: 305 if (unlikely(!ipath_rkey_ok(dev, &sqp->s_sge, wqe->length, 306 wqe->wr.wr.rdma.remote_addr, 307 wqe->wr.wr.rdma.rkey, 308 IB_ACCESS_REMOTE_READ))) 309 goto acc_err; 310 if (unlikely(!(qp->qp_access_flags & 311 IB_ACCESS_REMOTE_READ))) 312 goto acc_err; 313 qp->r_sge.sge = wqe->sg_list[0]; 314 qp->r_sge.sg_list = wqe->sg_list + 1; 315 qp->r_sge.num_sge = wqe->wr.num_sge; 316 break; 317 318 case IB_WR_ATOMIC_CMP_AND_SWP: 319 case IB_WR_ATOMIC_FETCH_AND_ADD: 320 if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, sizeof(u64), 321 wqe->wr.wr.rdma.remote_addr, 322 wqe->wr.wr.rdma.rkey, 323 IB_ACCESS_REMOTE_ATOMIC))) 324 goto acc_err; 325 /* Perform atomic OP and save result. */ 326 sdata = wqe->wr.wr.atomic.swap; 327 spin_lock_irqsave(&dev->pending_lock, flags); 328 qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr; 329 if (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) 330 *(u64 *) qp->r_sge.sge.vaddr = 331 qp->r_atomic_data + sdata; 332 else if (qp->r_atomic_data == wqe->wr.wr.atomic.compare_add) 333 *(u64 *) qp->r_sge.sge.vaddr = sdata; 334 spin_unlock_irqrestore(&dev->pending_lock, flags); 335 *(u64 *) sqp->s_sge.sge.vaddr = qp->r_atomic_data; 336 goto send_comp; 337 338 default: 339 goto done; 340 } 341 342 sge = &sqp->s_sge.sge; 343 while (sqp->s_len) { 344 u32 len = sqp->s_len; 345 346 if (len > sge->length) 347 len = sge->length; 348 BUG_ON(len == 0); 349 ipath_copy_sge(&qp->r_sge, sge->vaddr, len); 350 sge->vaddr += len; 351 sge->length -= len; 352 sge->sge_length -= len; 353 if (sge->sge_length == 0) { 354 if (--sqp->s_sge.num_sge) 355 *sge = *sqp->s_sge.sg_list++; 356 } else if (sge->length == 0 && sge->mr != NULL) { 357 if (++sge->n >= IPATH_SEGSZ) { 358 if (++sge->m >= sge->mr->mapsz) 359 break; 360 sge->n = 0; 361 } 362 sge->vaddr = 363 sge->mr->map[sge->m]->segs[sge->n].vaddr; 364 sge->length = 365 sge->mr->map[sge->m]->segs[sge->n].length; 366 } 367 sqp->s_len -= len; 368 } 369 370 if (wqe->wr.opcode == IB_WR_RDMA_WRITE || 371 wqe->wr.opcode == IB_WR_RDMA_READ) 372 goto send_comp; 373 374 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) 375 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; 376 else 377 wc.opcode = IB_WC_RECV; 378 wc.wr_id = qp->r_wr_id; 379 wc.status = IB_WC_SUCCESS; 380 wc.vendor_err = 0; 381 wc.byte_len = wqe->length; 382 wc.qp_num = qp->ibqp.qp_num; 383 wc.src_qp = qp->remote_qpn; 384 /* XXX do we know which pkey matched? Only needed for GSI. */ 385 wc.pkey_index = 0; 386 wc.slid = qp->remote_ah_attr.dlid; 387 wc.sl = qp->remote_ah_attr.sl; 388 wc.dlid_path_bits = 0; 389 /* Signal completion event if the solicited bit is set. */ 390 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 391 wqe->wr.send_flags & IB_SEND_SOLICITED); 392 393send_comp: 394 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; 395 396 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &sqp->s_flags) || 397 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 398 wc.wr_id = wqe->wr.wr_id; 399 wc.status = IB_WC_SUCCESS; 400 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 401 wc.vendor_err = 0; 402 wc.byte_len = wqe->length; 403 wc.qp_num = sqp->ibqp.qp_num; 404 wc.src_qp = 0; 405 wc.pkey_index = 0; 406 wc.slid = 0; 407 wc.sl = 0; 408 wc.dlid_path_bits = 0; 409 wc.port_num = 0; 410 ipath_cq_enter(to_icq(sqp->ibqp.send_cq), &wc, 0); 411 } 412 413 /* Update s_last now that we are finished with the SWQE */ 414 spin_lock_irqsave(&sqp->s_lock, flags); 415 if (++sqp->s_last >= sqp->s_size) 416 sqp->s_last = 0; 417 spin_unlock_irqrestore(&sqp->s_lock, flags); 418 goto again; 419 420done: 421 if (atomic_dec_and_test(&qp->refcount)) 422 wake_up(&qp->wait); 423} 424 425/** 426 * ipath_no_bufs_available - tell the layer driver we need buffers 427 * @qp: the QP that caused the problem 428 * @dev: the device we ran out of buffers on 429 * 430 * Called when we run out of PIO buffers. 431 */ 432void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) 433{ 434 unsigned long flags; 435 436 spin_lock_irqsave(&dev->pending_lock, flags); 437 if (list_empty(&qp->piowait)) 438 list_add_tail(&qp->piowait, &dev->piowait); 439 spin_unlock_irqrestore(&dev->pending_lock, flags); 440 /* 441 * Note that as soon as ipath_layer_want_buffer() is called and 442 * possibly before it returns, ipath_ib_piobufavail() 443 * could be called. If we are still in the tasklet function, 444 * tasklet_hi_schedule() will not call us until the next time 445 * tasklet_hi_schedule() is called. 446 * We clear the tasklet flag now since we are committing to return 447 * from the tasklet function. 448 */ 449 clear_bit(IPATH_S_BUSY, &qp->s_flags); 450 tasklet_unlock(&qp->s_task); 451 ipath_layer_want_buffer(dev->dd); 452 dev->n_piowait++; 453} 454 455/** 456 * ipath_post_ruc_send - post RC and UC sends 457 * @qp: the QP to post on 458 * @wr: the work request to send 459 */ 460int ipath_post_ruc_send(struct ipath_qp *qp, struct ib_send_wr *wr) 461{ 462 struct ipath_swqe *wqe; 463 unsigned long flags; 464 u32 next; 465 int i, j; 466 int acc; 467 int ret; 468 469 /* 470 * Don't allow RDMA reads or atomic operations on UC or 471 * undefined operations. 472 * Make sure buffer is large enough to hold the result for atomics. 473 */ 474 if (qp->ibqp.qp_type == IB_QPT_UC) { 475 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) { 476 ret = -EINVAL; 477 goto bail; 478 } 479 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) { 480 ret = -EINVAL; 481 goto bail; 482 } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && 483 (wr->num_sge == 0 || 484 wr->sg_list[0].length < sizeof(u64) || 485 wr->sg_list[0].addr & (sizeof(u64) - 1))) { 486 ret = -EINVAL; 487 goto bail; 488 } 489 /* IB spec says that num_sge == 0 is OK. */ 490 if (wr->num_sge > qp->s_max_sge) { 491 ret = -ENOMEM; 492 goto bail; 493 } 494 spin_lock_irqsave(&qp->s_lock, flags); 495 next = qp->s_head + 1; 496 if (next >= qp->s_size) 497 next = 0; 498 if (next == qp->s_last) { 499 spin_unlock_irqrestore(&qp->s_lock, flags); 500 ret = -EINVAL; 501 goto bail; 502 } 503 504 wqe = get_swqe_ptr(qp, qp->s_head); 505 wqe->wr = *wr; 506 wqe->ssn = qp->s_ssn++; 507 wqe->sg_list[0].mr = NULL; 508 wqe->sg_list[0].vaddr = NULL; 509 wqe->sg_list[0].length = 0; 510 wqe->sg_list[0].sge_length = 0; 511 wqe->length = 0; 512 acc = wr->opcode >= IB_WR_RDMA_READ ? IB_ACCESS_LOCAL_WRITE : 0; 513 for (i = 0, j = 0; i < wr->num_sge; i++) { 514 if (to_ipd(qp->ibqp.pd)->user && wr->sg_list[i].lkey == 0) { 515 spin_unlock_irqrestore(&qp->s_lock, flags); 516 ret = -EINVAL; 517 goto bail; 518 } 519 if (wr->sg_list[i].length == 0) 520 continue; 521 if (!ipath_lkey_ok(&to_idev(qp->ibqp.device)->lk_table, 522 &wqe->sg_list[j], &wr->sg_list[i], 523 acc)) { 524 spin_unlock_irqrestore(&qp->s_lock, flags); 525 ret = -EINVAL; 526 goto bail; 527 } 528 wqe->length += wr->sg_list[i].length; 529 j++; 530 } 531 wqe->wr.num_sge = j; 532 qp->s_head = next; 533 spin_unlock_irqrestore(&qp->s_lock, flags); 534 535 ipath_do_ruc_send((unsigned long) qp); 536 537 ret = 0; 538 539bail: 540 return ret; 541} 542 543/** 544 * ipath_make_grh - construct a GRH header 545 * @dev: a pointer to the ipath device 546 * @hdr: a pointer to the GRH header being constructed 547 * @grh: the global route address to send to 548 * @hwords: the number of 32 bit words of header being sent 549 * @nwords: the number of 32 bit words of data being sent 550 * 551 * Return the size of the header in 32 bit words. 552 */ 553u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr, 554 struct ib_global_route *grh, u32 hwords, u32 nwords) 555{ 556 hdr->version_tclass_flow = 557 cpu_to_be32((6 << 28) | 558 (grh->traffic_class << 20) | 559 grh->flow_label); 560 hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2); 561 /* next_hdr is defined by C8-7 in ch. 8.4.1 */ 562 hdr->next_hdr = 0x1B; 563 hdr->hop_limit = grh->hop_limit; 564 /* The SGID is 32-bit aligned. */ 565 hdr->sgid.global.subnet_prefix = dev->gid_prefix; 566 hdr->sgid.global.interface_id = ipath_layer_get_guid(dev->dd); 567 hdr->dgid = grh->dgid; 568 569 /* GRH header size in 32-bit words. */ 570 return sizeof(struct ib_grh) / sizeof(u32); 571} 572 573/** 574 * ipath_do_ruc_send - perform a send on an RC or UC QP 575 * @data: contains a pointer to the QP 576 * 577 * Process entries in the send work queue until credit or queue is 578 * exhausted. Only allow one CPU to send a packet per QP (tasklet). 579 * Otherwise, after we drop the QP s_lock, two threads could send 580 * packets out of order. 581 */ 582void ipath_do_ruc_send(unsigned long data) 583{ 584 struct ipath_qp *qp = (struct ipath_qp *)data; 585 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 586 unsigned long flags; 587 u16 lrh0; 588 u32 nwords; 589 u32 extra_bytes; 590 u32 bth0; 591 u32 bth2; 592 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); 593 struct ipath_other_headers *ohdr; 594 595 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags)) 596 goto bail; 597 598 if (unlikely(qp->remote_ah_attr.dlid == 599 ipath_layer_get_lid(dev->dd))) { 600 ipath_ruc_loopback(qp); 601 goto clear; 602 } 603 604 ohdr = &qp->s_hdr.u.oth; 605 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) 606 ohdr = &qp->s_hdr.u.l.oth; 607 608again: 609 /* Check for a constructed packet to be sent. */ 610 if (qp->s_hdrwords != 0) { 611 /* 612 * If no PIO bufs are available, return. An interrupt will 613 * call ipath_ib_piobufavail() when one is available. 614 */ 615 if (ipath_verbs_send(dev->dd, qp->s_hdrwords, 616 (u32 *) &qp->s_hdr, qp->s_cur_size, 617 qp->s_cur_sge)) { 618 ipath_no_bufs_available(qp, dev); 619 goto bail; 620 } 621 dev->n_unicast_xmit++; 622 /* Record that we sent the packet and s_hdr is empty. */ 623 qp->s_hdrwords = 0; 624 } 625 626 /* 627 * The lock is needed to synchronize between setting 628 * qp->s_ack_state, resend timer, and post_send(). 629 */ 630 spin_lock_irqsave(&qp->s_lock, flags); 631 632 /* Sending responses has higher priority over sending requests. */ 633 if (qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE && 634 (bth0 = ipath_make_rc_ack(qp, ohdr, pmtu)) != 0) 635 bth2 = qp->s_ack_psn++ & IPATH_PSN_MASK; 636 else if (!((qp->ibqp.qp_type == IB_QPT_RC) ? 637 ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2) : 638 ipath_make_uc_req(qp, ohdr, pmtu, &bth0, &bth2))) { 639 /* 640 * Clear the busy bit before unlocking to avoid races with 641 * adding new work queue items and then failing to process 642 * them. 643 */ 644 clear_bit(IPATH_S_BUSY, &qp->s_flags); 645 spin_unlock_irqrestore(&qp->s_lock, flags); 646 goto bail; 647 } 648 649 spin_unlock_irqrestore(&qp->s_lock, flags); 650 651 /* Construct the header. */ 652 extra_bytes = (4 - qp->s_cur_size) & 3; 653 nwords = (qp->s_cur_size + extra_bytes) >> 2; 654 lrh0 = IPATH_LRH_BTH; 655 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { 656 qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh, 657 &qp->remote_ah_attr.grh, 658 qp->s_hdrwords, nwords); 659 lrh0 = IPATH_LRH_GRH; 660 } 661 lrh0 |= qp->remote_ah_attr.sl << 4; 662 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); 663 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); 664 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + 665 SIZE_OF_CRC); 666 qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd)); 667 bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index); 668 bth0 |= extra_bytes << 20; 669 ohdr->bth[0] = cpu_to_be32(bth0); 670 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); 671 ohdr->bth[2] = cpu_to_be32(bth2); 672 673 /* Check for more work to do. */ 674 goto again; 675 676clear: 677 clear_bit(IPATH_S_BUSY, &qp->s_flags); 678bail: 679 return; 680}