Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.23 753 lines 20 kB view raw
1/* 2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34#include "ipath_verbs.h" 35#include "ipath_kernel.h" 36 37/* 38 * Convert the AETH RNR timeout code into the number of milliseconds. 39 */ 40const u32 ib_ipath_rnr_table[32] = { 41 656, /* 0 */ 42 1, /* 1 */ 43 1, /* 2 */ 44 1, /* 3 */ 45 1, /* 4 */ 46 1, /* 5 */ 47 1, /* 6 */ 48 1, /* 7 */ 49 1, /* 8 */ 50 1, /* 9 */ 51 1, /* A */ 52 1, /* B */ 53 1, /* C */ 54 1, /* D */ 55 2, /* E */ 56 2, /* F */ 57 3, /* 10 */ 58 4, /* 11 */ 59 6, /* 12 */ 60 8, /* 13 */ 61 11, /* 14 */ 62 16, /* 15 */ 63 21, /* 16 */ 64 31, /* 17 */ 65 41, /* 18 */ 66 62, /* 19 */ 67 82, /* 1A */ 68 123, /* 1B */ 69 164, /* 1C */ 70 246, /* 1D */ 71 328, /* 1E */ 72 492 /* 1F */ 73}; 74 75/** 76 * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device 77 * @qp: the QP 78 * 79 * XXX Use a simple list for now. We might need a priority 80 * queue if we have lots of QPs waiting for RNR timeouts 81 * but that should be rare. 82 */ 83void ipath_insert_rnr_queue(struct ipath_qp *qp) 84{ 85 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 86 unsigned long flags; 87 88 spin_lock_irqsave(&dev->pending_lock, flags); 89 if (list_empty(&dev->rnrwait)) 90 list_add(&qp->timerwait, &dev->rnrwait); 91 else { 92 struct list_head *l = &dev->rnrwait; 93 struct ipath_qp *nqp = list_entry(l->next, struct ipath_qp, 94 timerwait); 95 96 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) { 97 qp->s_rnr_timeout -= nqp->s_rnr_timeout; 98 l = l->next; 99 if (l->next == &dev->rnrwait) 100 break; 101 nqp = list_entry(l->next, struct ipath_qp, 102 timerwait); 103 } 104 list_add(&qp->timerwait, l); 105 } 106 spin_unlock_irqrestore(&dev->pending_lock, flags); 107} 108 109static int init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe) 110{ 111 int user = to_ipd(qp->ibqp.pd)->user; 112 int i, j, ret; 113 struct ib_wc wc; 114 115 qp->r_len = 0; 116 for (i = j = 0; i < wqe->num_sge; i++) { 117 if (wqe->sg_list[i].length == 0) 118 continue; 119 /* Check LKEY */ 120 if ((user && wqe->sg_list[i].lkey == 0) || 121 !ipath_lkey_ok(qp, &qp->r_sg_list[j], &wqe->sg_list[i], 122 IB_ACCESS_LOCAL_WRITE)) 123 goto bad_lkey; 124 qp->r_len += wqe->sg_list[i].length; 125 j++; 126 } 127 qp->r_sge.sge = qp->r_sg_list[0]; 128 qp->r_sge.sg_list = qp->r_sg_list + 1; 129 qp->r_sge.num_sge = j; 130 ret = 1; 131 goto bail; 132 133bad_lkey: 134 wc.wr_id = wqe->wr_id; 135 wc.status = IB_WC_LOC_PROT_ERR; 136 wc.opcode = IB_WC_RECV; 137 wc.vendor_err = 0; 138 wc.byte_len = 0; 139 wc.imm_data = 0; 140 wc.qp = &qp->ibqp; 141 wc.src_qp = 0; 142 wc.wc_flags = 0; 143 wc.pkey_index = 0; 144 wc.slid = 0; 145 wc.sl = 0; 146 wc.dlid_path_bits = 0; 147 wc.port_num = 0; 148 /* Signal solicited completion event. */ 149 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); 150 ret = 0; 151bail: 152 return ret; 153} 154 155/** 156 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE 157 * @qp: the QP 158 * @wr_id_only: update wr_id only, not SGEs 159 * 160 * Return 0 if no RWQE is available, otherwise return 1. 161 * 162 * Can be called from interrupt level. 163 */ 164int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only) 165{ 166 unsigned long flags; 167 struct ipath_rq *rq; 168 struct ipath_rwq *wq; 169 struct ipath_srq *srq; 170 struct ipath_rwqe *wqe; 171 void (*handler)(struct ib_event *, void *); 172 u32 tail; 173 int ret; 174 175 if (qp->ibqp.srq) { 176 srq = to_isrq(qp->ibqp.srq); 177 handler = srq->ibsrq.event_handler; 178 rq = &srq->rq; 179 } else { 180 srq = NULL; 181 handler = NULL; 182 rq = &qp->r_rq; 183 } 184 185 spin_lock_irqsave(&rq->lock, flags); 186 wq = rq->wq; 187 tail = wq->tail; 188 /* Validate tail before using it since it is user writable. */ 189 if (tail >= rq->size) 190 tail = 0; 191 do { 192 if (unlikely(tail == wq->head)) { 193 spin_unlock_irqrestore(&rq->lock, flags); 194 ret = 0; 195 goto bail; 196 } 197 /* Make sure entry is read after head index is read. */ 198 smp_rmb(); 199 wqe = get_rwqe_ptr(rq, tail); 200 if (++tail >= rq->size) 201 tail = 0; 202 } while (!wr_id_only && !init_sge(qp, wqe)); 203 qp->r_wr_id = wqe->wr_id; 204 wq->tail = tail; 205 206 ret = 1; 207 qp->r_wrid_valid = 1; 208 if (handler) { 209 u32 n; 210 211 /* 212 * validate head pointer value and compute 213 * the number of remaining WQEs. 214 */ 215 n = wq->head; 216 if (n >= rq->size) 217 n = 0; 218 if (n < tail) 219 n += rq->size - tail; 220 else 221 n -= tail; 222 if (n < srq->limit) { 223 struct ib_event ev; 224 225 srq->limit = 0; 226 spin_unlock_irqrestore(&rq->lock, flags); 227 ev.device = qp->ibqp.device; 228 ev.element.srq = qp->ibqp.srq; 229 ev.event = IB_EVENT_SRQ_LIMIT_REACHED; 230 handler(&ev, srq->ibsrq.srq_context); 231 goto bail; 232 } 233 } 234 spin_unlock_irqrestore(&rq->lock, flags); 235 236bail: 237 return ret; 238} 239 240/** 241 * ipath_ruc_loopback - handle UC and RC lookback requests 242 * @sqp: the loopback QP 243 * 244 * This is called from ipath_do_uc_send() or ipath_do_rc_send() to 245 * forward a WQE addressed to the same HCA. 246 * Note that although we are single threaded due to the tasklet, we still 247 * have to protect against post_send(). We don't have to worry about 248 * receive interrupts since this is a connected protocol and all packets 249 * will pass through here. 250 */ 251static void ipath_ruc_loopback(struct ipath_qp *sqp) 252{ 253 struct ipath_ibdev *dev = to_idev(sqp->ibqp.device); 254 struct ipath_qp *qp; 255 struct ipath_swqe *wqe; 256 struct ipath_sge *sge; 257 unsigned long flags; 258 struct ib_wc wc; 259 u64 sdata; 260 atomic64_t *maddr; 261 262 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn); 263 if (!qp) { 264 dev->n_pkt_drops++; 265 return; 266 } 267 268again: 269 spin_lock_irqsave(&sqp->s_lock, flags); 270 271 if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK) || 272 sqp->s_rnr_timeout) { 273 spin_unlock_irqrestore(&sqp->s_lock, flags); 274 goto done; 275 } 276 277 /* Get the next send request. */ 278 if (sqp->s_last == sqp->s_head) { 279 /* Send work queue is empty. */ 280 spin_unlock_irqrestore(&sqp->s_lock, flags); 281 goto done; 282 } 283 284 /* 285 * We can rely on the entry not changing without the s_lock 286 * being held until we update s_last. 287 */ 288 wqe = get_swqe_ptr(sqp, sqp->s_last); 289 spin_unlock_irqrestore(&sqp->s_lock, flags); 290 291 wc.wc_flags = 0; 292 wc.imm_data = 0; 293 294 sqp->s_sge.sge = wqe->sg_list[0]; 295 sqp->s_sge.sg_list = wqe->sg_list + 1; 296 sqp->s_sge.num_sge = wqe->wr.num_sge; 297 sqp->s_len = wqe->length; 298 switch (wqe->wr.opcode) { 299 case IB_WR_SEND_WITH_IMM: 300 wc.wc_flags = IB_WC_WITH_IMM; 301 wc.imm_data = wqe->wr.imm_data; 302 /* FALLTHROUGH */ 303 case IB_WR_SEND: 304 if (!ipath_get_rwqe(qp, 0)) { 305 rnr_nak: 306 /* Handle RNR NAK */ 307 if (qp->ibqp.qp_type == IB_QPT_UC) 308 goto send_comp; 309 if (sqp->s_rnr_retry == 0) { 310 wc.status = IB_WC_RNR_RETRY_EXC_ERR; 311 goto err; 312 } 313 if (sqp->s_rnr_retry_cnt < 7) 314 sqp->s_rnr_retry--; 315 dev->n_rnr_naks++; 316 sqp->s_rnr_timeout = 317 ib_ipath_rnr_table[qp->r_min_rnr_timer]; 318 ipath_insert_rnr_queue(sqp); 319 goto done; 320 } 321 break; 322 323 case IB_WR_RDMA_WRITE_WITH_IMM: 324 if (unlikely(!(qp->qp_access_flags & 325 IB_ACCESS_REMOTE_WRITE))) { 326 wc.status = IB_WC_REM_INV_REQ_ERR; 327 goto err; 328 } 329 wc.wc_flags = IB_WC_WITH_IMM; 330 wc.imm_data = wqe->wr.imm_data; 331 if (!ipath_get_rwqe(qp, 1)) 332 goto rnr_nak; 333 /* FALLTHROUGH */ 334 case IB_WR_RDMA_WRITE: 335 if (unlikely(!(qp->qp_access_flags & 336 IB_ACCESS_REMOTE_WRITE))) { 337 wc.status = IB_WC_REM_INV_REQ_ERR; 338 goto err; 339 } 340 if (wqe->length == 0) 341 break; 342 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length, 343 wqe->wr.wr.rdma.remote_addr, 344 wqe->wr.wr.rdma.rkey, 345 IB_ACCESS_REMOTE_WRITE))) { 346 acc_err: 347 wc.status = IB_WC_REM_ACCESS_ERR; 348 err: 349 wc.wr_id = wqe->wr.wr_id; 350 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 351 wc.vendor_err = 0; 352 wc.byte_len = 0; 353 wc.qp = &sqp->ibqp; 354 wc.src_qp = sqp->remote_qpn; 355 wc.pkey_index = 0; 356 wc.slid = sqp->remote_ah_attr.dlid; 357 wc.sl = sqp->remote_ah_attr.sl; 358 wc.dlid_path_bits = 0; 359 wc.port_num = 0; 360 spin_lock_irqsave(&sqp->s_lock, flags); 361 ipath_sqerror_qp(sqp, &wc); 362 spin_unlock_irqrestore(&sqp->s_lock, flags); 363 goto done; 364 } 365 break; 366 367 case IB_WR_RDMA_READ: 368 if (unlikely(!(qp->qp_access_flags & 369 IB_ACCESS_REMOTE_READ))) { 370 wc.status = IB_WC_REM_INV_REQ_ERR; 371 goto err; 372 } 373 if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length, 374 wqe->wr.wr.rdma.remote_addr, 375 wqe->wr.wr.rdma.rkey, 376 IB_ACCESS_REMOTE_READ))) 377 goto acc_err; 378 qp->r_sge.sge = wqe->sg_list[0]; 379 qp->r_sge.sg_list = wqe->sg_list + 1; 380 qp->r_sge.num_sge = wqe->wr.num_sge; 381 break; 382 383 case IB_WR_ATOMIC_CMP_AND_SWP: 384 case IB_WR_ATOMIC_FETCH_AND_ADD: 385 if (unlikely(!(qp->qp_access_flags & 386 IB_ACCESS_REMOTE_ATOMIC))) { 387 wc.status = IB_WC_REM_INV_REQ_ERR; 388 goto err; 389 } 390 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64), 391 wqe->wr.wr.atomic.remote_addr, 392 wqe->wr.wr.atomic.rkey, 393 IB_ACCESS_REMOTE_ATOMIC))) 394 goto acc_err; 395 /* Perform atomic OP and save result. */ 396 maddr = (atomic64_t *) qp->r_sge.sge.vaddr; 397 sdata = wqe->wr.wr.atomic.compare_add; 398 *(u64 *) sqp->s_sge.sge.vaddr = 399 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ? 400 (u64) atomic64_add_return(sdata, maddr) - sdata : 401 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, 402 sdata, wqe->wr.wr.atomic.swap); 403 goto send_comp; 404 405 default: 406 goto done; 407 } 408 409 sge = &sqp->s_sge.sge; 410 while (sqp->s_len) { 411 u32 len = sqp->s_len; 412 413 if (len > sge->length) 414 len = sge->length; 415 if (len > sge->sge_length) 416 len = sge->sge_length; 417 BUG_ON(len == 0); 418 ipath_copy_sge(&qp->r_sge, sge->vaddr, len); 419 sge->vaddr += len; 420 sge->length -= len; 421 sge->sge_length -= len; 422 if (sge->sge_length == 0) { 423 if (--sqp->s_sge.num_sge) 424 *sge = *sqp->s_sge.sg_list++; 425 } else if (sge->length == 0 && sge->mr != NULL) { 426 if (++sge->n >= IPATH_SEGSZ) { 427 if (++sge->m >= sge->mr->mapsz) 428 break; 429 sge->n = 0; 430 } 431 sge->vaddr = 432 sge->mr->map[sge->m]->segs[sge->n].vaddr; 433 sge->length = 434 sge->mr->map[sge->m]->segs[sge->n].length; 435 } 436 sqp->s_len -= len; 437 } 438 439 if (wqe->wr.opcode == IB_WR_RDMA_WRITE || 440 wqe->wr.opcode == IB_WR_RDMA_READ) 441 goto send_comp; 442 443 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) 444 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; 445 else 446 wc.opcode = IB_WC_RECV; 447 wc.wr_id = qp->r_wr_id; 448 wc.status = IB_WC_SUCCESS; 449 wc.vendor_err = 0; 450 wc.byte_len = wqe->length; 451 wc.qp = &qp->ibqp; 452 wc.src_qp = qp->remote_qpn; 453 /* XXX do we know which pkey matched? Only needed for GSI. */ 454 wc.pkey_index = 0; 455 wc.slid = qp->remote_ah_attr.dlid; 456 wc.sl = qp->remote_ah_attr.sl; 457 wc.dlid_path_bits = 0; 458 /* Signal completion event if the solicited bit is set. */ 459 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 460 wqe->wr.send_flags & IB_SEND_SOLICITED); 461 462send_comp: 463 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; 464 465 if (!(sqp->s_flags & IPATH_S_SIGNAL_REQ_WR) || 466 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 467 wc.wr_id = wqe->wr.wr_id; 468 wc.status = IB_WC_SUCCESS; 469 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 470 wc.vendor_err = 0; 471 wc.byte_len = wqe->length; 472 wc.qp = &sqp->ibqp; 473 wc.src_qp = 0; 474 wc.pkey_index = 0; 475 wc.slid = 0; 476 wc.sl = 0; 477 wc.dlid_path_bits = 0; 478 wc.port_num = 0; 479 ipath_cq_enter(to_icq(sqp->ibqp.send_cq), &wc, 0); 480 } 481 482 /* Update s_last now that we are finished with the SWQE */ 483 spin_lock_irqsave(&sqp->s_lock, flags); 484 if (++sqp->s_last >= sqp->s_size) 485 sqp->s_last = 0; 486 spin_unlock_irqrestore(&sqp->s_lock, flags); 487 goto again; 488 489done: 490 if (atomic_dec_and_test(&qp->refcount)) 491 wake_up(&qp->wait); 492} 493 494static int want_buffer(struct ipath_devdata *dd) 495{ 496 set_bit(IPATH_S_PIOINTBUFAVAIL, &dd->ipath_sendctrl); 497 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, 498 dd->ipath_sendctrl); 499 500 return 0; 501} 502 503/** 504 * ipath_no_bufs_available - tell the layer driver we need buffers 505 * @qp: the QP that caused the problem 506 * @dev: the device we ran out of buffers on 507 * 508 * Called when we run out of PIO buffers. 509 */ 510static void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev) 511{ 512 unsigned long flags; 513 514 spin_lock_irqsave(&dev->pending_lock, flags); 515 if (list_empty(&qp->piowait)) 516 list_add_tail(&qp->piowait, &dev->piowait); 517 spin_unlock_irqrestore(&dev->pending_lock, flags); 518 /* 519 * Note that as soon as want_buffer() is called and 520 * possibly before it returns, ipath_ib_piobufavail() 521 * could be called. If we are still in the tasklet function, 522 * tasklet_hi_schedule() will not call us until the next time 523 * tasklet_hi_schedule() is called. 524 * We leave the busy flag set so that another post send doesn't 525 * try to put the same QP on the piowait list again. 526 */ 527 want_buffer(dev->dd); 528 dev->n_piowait++; 529} 530 531/** 532 * ipath_post_ruc_send - post RC and UC sends 533 * @qp: the QP to post on 534 * @wr: the work request to send 535 */ 536int ipath_post_ruc_send(struct ipath_qp *qp, struct ib_send_wr *wr) 537{ 538 struct ipath_swqe *wqe; 539 unsigned long flags; 540 u32 next; 541 int i, j; 542 int acc; 543 int ret; 544 545 /* 546 * Don't allow RDMA reads or atomic operations on UC or 547 * undefined operations. 548 * Make sure buffer is large enough to hold the result for atomics. 549 */ 550 if (qp->ibqp.qp_type == IB_QPT_UC) { 551 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) { 552 ret = -EINVAL; 553 goto bail; 554 } 555 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) { 556 ret = -EINVAL; 557 goto bail; 558 } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && 559 (wr->num_sge == 0 || 560 wr->sg_list[0].length < sizeof(u64) || 561 wr->sg_list[0].addr & (sizeof(u64) - 1))) { 562 ret = -EINVAL; 563 goto bail; 564 } else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) { 565 ret = -EINVAL; 566 goto bail; 567 } 568 /* IB spec says that num_sge == 0 is OK. */ 569 if (wr->num_sge > qp->s_max_sge) { 570 ret = -ENOMEM; 571 goto bail; 572 } 573 spin_lock_irqsave(&qp->s_lock, flags); 574 next = qp->s_head + 1; 575 if (next >= qp->s_size) 576 next = 0; 577 if (next == qp->s_last) { 578 spin_unlock_irqrestore(&qp->s_lock, flags); 579 ret = -EINVAL; 580 goto bail; 581 } 582 583 wqe = get_swqe_ptr(qp, qp->s_head); 584 wqe->wr = *wr; 585 wqe->ssn = qp->s_ssn++; 586 wqe->sg_list[0].mr = NULL; 587 wqe->sg_list[0].vaddr = NULL; 588 wqe->sg_list[0].length = 0; 589 wqe->sg_list[0].sge_length = 0; 590 wqe->length = 0; 591 acc = wr->opcode >= IB_WR_RDMA_READ ? IB_ACCESS_LOCAL_WRITE : 0; 592 for (i = 0, j = 0; i < wr->num_sge; i++) { 593 if (to_ipd(qp->ibqp.pd)->user && wr->sg_list[i].lkey == 0) { 594 spin_unlock_irqrestore(&qp->s_lock, flags); 595 ret = -EINVAL; 596 goto bail; 597 } 598 if (wr->sg_list[i].length == 0) 599 continue; 600 if (!ipath_lkey_ok(qp, &wqe->sg_list[j], &wr->sg_list[i], 601 acc)) { 602 spin_unlock_irqrestore(&qp->s_lock, flags); 603 ret = -EINVAL; 604 goto bail; 605 } 606 wqe->length += wr->sg_list[i].length; 607 j++; 608 } 609 wqe->wr.num_sge = j; 610 qp->s_head = next; 611 spin_unlock_irqrestore(&qp->s_lock, flags); 612 613 ipath_do_ruc_send((unsigned long) qp); 614 615 ret = 0; 616 617bail: 618 return ret; 619} 620 621/** 622 * ipath_make_grh - construct a GRH header 623 * @dev: a pointer to the ipath device 624 * @hdr: a pointer to the GRH header being constructed 625 * @grh: the global route address to send to 626 * @hwords: the number of 32 bit words of header being sent 627 * @nwords: the number of 32 bit words of data being sent 628 * 629 * Return the size of the header in 32 bit words. 630 */ 631u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr, 632 struct ib_global_route *grh, u32 hwords, u32 nwords) 633{ 634 hdr->version_tclass_flow = 635 cpu_to_be32((6 << 28) | 636 (grh->traffic_class << 20) | 637 grh->flow_label); 638 hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2); 639 /* next_hdr is defined by C8-7 in ch. 8.4.1 */ 640 hdr->next_hdr = 0x1B; 641 hdr->hop_limit = grh->hop_limit; 642 /* The SGID is 32-bit aligned. */ 643 hdr->sgid.global.subnet_prefix = dev->gid_prefix; 644 hdr->sgid.global.interface_id = dev->dd->ipath_guid; 645 hdr->dgid = grh->dgid; 646 647 /* GRH header size in 32-bit words. */ 648 return sizeof(struct ib_grh) / sizeof(u32); 649} 650 651/** 652 * ipath_do_ruc_send - perform a send on an RC or UC QP 653 * @data: contains a pointer to the QP 654 * 655 * Process entries in the send work queue until credit or queue is 656 * exhausted. Only allow one CPU to send a packet per QP (tasklet). 657 * Otherwise, after we drop the QP s_lock, two threads could send 658 * packets out of order. 659 */ 660void ipath_do_ruc_send(unsigned long data) 661{ 662 struct ipath_qp *qp = (struct ipath_qp *)data; 663 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 664 unsigned long flags; 665 u16 lrh0; 666 u32 nwords; 667 u32 extra_bytes; 668 u32 bth0; 669 u32 bth2; 670 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); 671 struct ipath_other_headers *ohdr; 672 673 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_busy)) 674 goto bail; 675 676 if (unlikely(qp->remote_ah_attr.dlid == dev->dd->ipath_lid)) { 677 ipath_ruc_loopback(qp); 678 goto clear; 679 } 680 681 ohdr = &qp->s_hdr.u.oth; 682 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) 683 ohdr = &qp->s_hdr.u.l.oth; 684 685again: 686 /* Check for a constructed packet to be sent. */ 687 if (qp->s_hdrwords != 0) { 688 /* 689 * If no PIO bufs are available, return. An interrupt will 690 * call ipath_ib_piobufavail() when one is available. 691 */ 692 if (ipath_verbs_send(dev->dd, qp->s_hdrwords, 693 (u32 *) &qp->s_hdr, qp->s_cur_size, 694 qp->s_cur_sge)) { 695 ipath_no_bufs_available(qp, dev); 696 goto bail; 697 } 698 dev->n_unicast_xmit++; 699 /* Record that we sent the packet and s_hdr is empty. */ 700 qp->s_hdrwords = 0; 701 } 702 703 /* 704 * The lock is needed to synchronize between setting 705 * qp->s_ack_state, resend timer, and post_send(). 706 */ 707 spin_lock_irqsave(&qp->s_lock, flags); 708 709 if (!((qp->ibqp.qp_type == IB_QPT_RC) ? 710 ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2) : 711 ipath_make_uc_req(qp, ohdr, pmtu, &bth0, &bth2))) { 712 /* 713 * Clear the busy bit before unlocking to avoid races with 714 * adding new work queue items and then failing to process 715 * them. 716 */ 717 clear_bit(IPATH_S_BUSY, &qp->s_busy); 718 spin_unlock_irqrestore(&qp->s_lock, flags); 719 goto bail; 720 } 721 722 spin_unlock_irqrestore(&qp->s_lock, flags); 723 724 /* Construct the header. */ 725 extra_bytes = (4 - qp->s_cur_size) & 3; 726 nwords = (qp->s_cur_size + extra_bytes) >> 2; 727 lrh0 = IPATH_LRH_BTH; 728 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { 729 qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh, 730 &qp->remote_ah_attr.grh, 731 qp->s_hdrwords, nwords); 732 lrh0 = IPATH_LRH_GRH; 733 } 734 lrh0 |= qp->remote_ah_attr.sl << 4; 735 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0); 736 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); 737 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + 738 SIZE_OF_CRC); 739 qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid); 740 bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index); 741 bth0 |= extra_bytes << 20; 742 ohdr->bth[0] = cpu_to_be32(bth0); 743 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); 744 ohdr->bth[2] = cpu_to_be32(bth2); 745 746 /* Check for more work to do. */ 747 goto again; 748 749clear: 750 clear_bit(IPATH_S_BUSY, &qp->s_busy); 751bail: 752 return; 753}