Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
RDMA/cxgb3: Wrap the software send queue pointer as needed on flush
IB/ipath: Change ipath_devdata.ipath_sdma_status to be unsigned long
IB/ipath: Make ipath_portdata work with struct pid * not pid_t
IB/ipath: Fix RDMA read response sequence checking
IB/ipath: Fix many locking issues when switching to error state
IB/ipath: Fix RC and UC error handling
RDMA/nes: Fix up nes_lro_max_aggr module parameter

+725 -555
+2 -2
drivers/infiniband/hw/cxgb3/cxio_hal.c
··· 405 405 struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2); 406 406 407 407 ptr = wq->sq_rptr + count; 408 - sqp += count; 408 + sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); 409 409 while (ptr != wq->sq_wptr) { 410 410 insert_sq_cqe(wq, cq, sqp); 411 - sqp++; 412 411 ptr++; 412 + sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); 413 413 flushed++; 414 414 } 415 415 return flushed;
+11 -9
drivers/infiniband/hw/ipath/ipath_driver.c
··· 1894 1894 */ 1895 1895 if (dd->ipath_flags & IPATH_HAS_SEND_DMA) { 1896 1896 int skip_cancel; 1897 - u64 *statp = &dd->ipath_sdma_status; 1897 + unsigned long *statp = &dd->ipath_sdma_status; 1898 1898 1899 1899 spin_lock_irqsave(&dd->ipath_sdma_lock, flags); 1900 1900 skip_cancel = ··· 2616 2616 ipath_dbg("unit %u port %d is in use " 2617 2617 "(PID %u cmd %s), can't reset\n", 2618 2618 unit, i, 2619 - dd->ipath_pd[i]->port_pid, 2619 + pid_nr(dd->ipath_pd[i]->port_pid), 2620 2620 dd->ipath_pd[i]->port_comm); 2621 2621 ret = -EBUSY; 2622 2622 goto bail; ··· 2654 2654 static int ipath_signal_procs(struct ipath_devdata *dd, int sig) 2655 2655 { 2656 2656 int i, sub, any = 0; 2657 - pid_t pid; 2657 + struct pid *pid; 2658 2658 2659 2659 if (!dd->ipath_pd) 2660 2660 return 0; 2661 2661 for (i = 1; i < dd->ipath_cfgports; i++) { 2662 - if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt || 2663 - !dd->ipath_pd[i]->port_pid) 2662 + if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt) 2664 2663 continue; 2665 2664 pid = dd->ipath_pd[i]->port_pid; 2665 + if (!pid) 2666 + continue; 2667 + 2666 2668 dev_info(&dd->pcidev->dev, "context %d in use " 2667 2669 "(PID %u), sending signal %d\n", 2668 - i, pid, sig); 2669 - kill_proc(pid, sig, 1); 2670 + i, pid_nr(pid), sig); 2671 + kill_pid(pid, sig, 1); 2670 2672 any++; 2671 2673 for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) { 2672 2674 pid = dd->ipath_pd[i]->port_subpid[sub]; ··· 2676 2674 continue; 2677 2675 dev_info(&dd->pcidev->dev, "sub-context " 2678 2676 "%d:%d in use (PID %u), sending " 2679 - "signal %d\n", i, sub, pid, sig); 2680 - kill_proc(pid, sig, 1); 2677 + "signal %d\n", i, sub, pid_nr(pid), sig); 2678 + kill_pid(pid, sig, 1); 2681 2679 any++; 2682 2680 } 2683 2681 }
+11 -8
drivers/infiniband/hw/ipath/ipath_file_ops.c
··· 555 555 p = dd->ipath_pageshadow[porttid + tid]; 556 556 dd->ipath_pageshadow[porttid + tid] = NULL; 557 557 ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n", 558 - pd->port_pid, tid); 558 + pid_nr(pd->port_pid), tid); 559 559 dd->ipath_f_put_tid(dd, &tidbase[tid], 560 560 RCVHQ_RCV_TYPE_EXPECTED, 561 561 dd->ipath_tidinvalid); ··· 1609 1609 port); 1610 1610 pd->port_cnt = 1; 1611 1611 port_fp(fp) = pd; 1612 - pd->port_pid = current->pid; 1612 + pd->port_pid = get_pid(task_pid(current)); 1613 1613 strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); 1614 1614 ipath_stats.sps_ports++; 1615 1615 ret = 0; ··· 1793 1793 } 1794 1794 port_fp(fp) = pd; 1795 1795 subport_fp(fp) = pd->port_cnt++; 1796 - pd->port_subpid[subport_fp(fp)] = current->pid; 1796 + pd->port_subpid[subport_fp(fp)] = 1797 + get_pid(task_pid(current)); 1797 1798 tidcursor_fp(fp) = 0; 1798 1799 pd->active_slaves |= 1 << subport_fp(fp); 1799 1800 ipath_cdbg(PROC, 1800 1801 "%s[%u] %u sharing %s[%u] unit:port %u:%u\n", 1801 1802 current->comm, current->pid, 1802 1803 subport_fp(fp), 1803 - pd->port_comm, pd->port_pid, 1804 + pd->port_comm, pid_nr(pd->port_pid), 1804 1805 dd->ipath_unit, pd->port_port); 1805 1806 ret = 1; 1806 1807 goto done; ··· 2067 2066 * the slave(s) don't wait for receive data forever. 2068 2067 */ 2069 2068 pd->active_slaves &= ~(1 << fd->subport); 2070 - pd->port_subpid[fd->subport] = 0; 2069 + put_pid(pd->port_subpid[fd->subport]); 2070 + pd->port_subpid[fd->subport] = NULL; 2071 2071 mutex_unlock(&ipath_mutex); 2072 2072 goto bail; 2073 2073 } ··· 2076 2074 2077 2075 if (pd->port_hdrqfull) { 2078 2076 ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors " 2079 - "during run\n", pd->port_comm, pd->port_pid, 2077 + "during run\n", pd->port_comm, pid_nr(pd->port_pid), 2080 2078 pd->port_hdrqfull); 2081 2079 pd->port_hdrqfull = 0; 2082 2080 } ··· 2136 2134 unlock_expected_tids(pd); 2137 2135 ipath_stats.sps_ports--; 2138 2136 ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n", 2139 - pd->port_comm, pd->port_pid, 2137 + pd->port_comm, pid_nr(pd->port_pid), 2140 2138 dd->ipath_unit, port); 2141 2139 } 2142 2140 2143 - pd->port_pid = 0; 2141 + put_pid(pd->port_pid); 2142 + pd->port_pid = NULL; 2144 2143 dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */ 2145 2144 mutex_unlock(&ipath_mutex); 2146 2145 ipath_free_pddata(dd, pd); /* after releasing the mutex */
+5 -5
drivers/infiniband/hw/ipath/ipath_kernel.h
··· 159 159 /* saved total number of polled urgent packets for poll edge trigger */ 160 160 u32 port_urgent_poll; 161 161 /* pid of process using this port */ 162 - pid_t port_pid; 163 - pid_t port_subpid[INFINIPATH_MAX_SUBPORT]; 162 + struct pid *port_pid; 163 + struct pid *port_subpid[INFINIPATH_MAX_SUBPORT]; 164 164 /* same size as task_struct .comm[] */ 165 165 char port_comm[16]; 166 166 /* pkeys set by this use of this port */ ··· 483 483 484 484 /* SendDMA related entries */ 485 485 spinlock_t ipath_sdma_lock; 486 - u64 ipath_sdma_status; 486 + unsigned long ipath_sdma_status; 487 487 unsigned long ipath_sdma_abort_jiffies; 488 488 unsigned long ipath_sdma_abort_intr_timeout; 489 489 unsigned long ipath_sdma_buf_jiffies; ··· 822 822 #define IPATH_SDMA_DISARMED 1 823 823 #define IPATH_SDMA_DISABLED 2 824 824 #define IPATH_SDMA_LAYERBUF 3 825 - #define IPATH_SDMA_RUNNING 62 826 - #define IPATH_SDMA_SHUTDOWN 63 825 + #define IPATH_SDMA_RUNNING 30 826 + #define IPATH_SDMA_SHUTDOWN 31 827 827 828 828 /* bit combinations that correspond to abort states */ 829 829 #define IPATH_SDMA_ABORT_NONE 0
+98 -137
drivers/infiniband/hw/ipath/ipath_qp.c
··· 242 242 { 243 243 struct ipath_qp *q, **qpp; 244 244 unsigned long flags; 245 - int fnd = 0; 246 245 247 246 spin_lock_irqsave(&qpt->lock, flags); 248 247 ··· 252 253 *qpp = qp->next; 253 254 qp->next = NULL; 254 255 atomic_dec(&qp->refcount); 255 - fnd = 1; 256 256 break; 257 257 } 258 258 } 259 259 260 260 spin_unlock_irqrestore(&qpt->lock, flags); 261 - 262 - if (!fnd) 263 - return; 264 - 265 - free_qpn(qpt, qp->ibqp.qp_num); 266 - 267 - wait_event(qp->wait, !atomic_read(&qp->refcount)); 268 261 } 269 262 270 263 /** 271 - * ipath_free_all_qps - remove all QPs from the table 264 + * ipath_free_all_qps - check for QPs still in use 272 265 * @qpt: the QP table to empty 266 + * 267 + * There should not be any QPs still in use. 268 + * Free memory for table. 273 269 */ 274 - void ipath_free_all_qps(struct ipath_qp_table *qpt) 270 + unsigned ipath_free_all_qps(struct ipath_qp_table *qpt) 275 271 { 276 272 unsigned long flags; 277 - struct ipath_qp *qp, *nqp; 278 - u32 n; 273 + struct ipath_qp *qp; 274 + u32 n, qp_inuse = 0; 279 275 276 + spin_lock_irqsave(&qpt->lock, flags); 280 277 for (n = 0; n < qpt->max; n++) { 281 - spin_lock_irqsave(&qpt->lock, flags); 282 278 qp = qpt->table[n]; 283 279 qpt->table[n] = NULL; 284 - spin_unlock_irqrestore(&qpt->lock, flags); 285 280 286 - while (qp) { 287 - nqp = qp->next; 288 - free_qpn(qpt, qp->ibqp.qp_num); 289 - if (!atomic_dec_and_test(&qp->refcount) || 290 - !ipath_destroy_qp(&qp->ibqp)) 291 - ipath_dbg("QP memory leak!\n"); 292 - qp = nqp; 293 - } 281 + for (; qp; qp = qp->next) 282 + qp_inuse++; 294 283 } 284 + spin_unlock_irqrestore(&qpt->lock, flags); 295 285 296 - for (n = 0; n < ARRAY_SIZE(qpt->map); n++) { 286 + for (n = 0; n < ARRAY_SIZE(qpt->map); n++) 297 287 if (qpt->map[n].page) 298 - free_page((unsigned long)qpt->map[n].page); 299 - } 288 + free_page((unsigned long) qpt->map[n].page); 289 + return qp_inuse; 300 290 } 301 291 302 292 /** ··· 324 336 qp->remote_qpn = 0; 325 337 qp->qkey = 0; 326 338 qp->qp_access_flags = 0; 327 - qp->s_busy = 0; 339 + atomic_set(&qp->s_dma_busy, 0); 328 340 qp->s_flags &= IPATH_S_SIGNAL_REQ_WR; 329 341 qp->s_hdrwords = 0; 330 342 qp->s_wqe = NULL; 331 343 qp->s_pkt_delay = 0; 344 + qp->s_draining = 0; 332 345 qp->s_psn = 0; 333 346 qp->r_psn = 0; 334 347 qp->r_msn = 0; ··· 342 353 } 343 354 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 344 355 qp->r_nak_state = 0; 345 - qp->r_wrid_valid = 0; 356 + qp->r_aflags = 0; 357 + qp->r_flags = 0; 346 358 qp->s_rnr_timeout = 0; 347 359 qp->s_head = 0; 348 360 qp->s_tail = 0; ··· 351 361 qp->s_last = 0; 352 362 qp->s_ssn = 1; 353 363 qp->s_lsn = 0; 354 - qp->s_wait_credit = 0; 355 364 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); 356 365 qp->r_head_ack_queue = 0; 357 366 qp->s_tail_ack_queue = 0; ··· 359 370 qp->r_rq.wq->head = 0; 360 371 qp->r_rq.wq->tail = 0; 361 372 } 362 - qp->r_reuse_sge = 0; 363 373 } 364 374 365 375 /** 366 - * ipath_error_qp - put a QP into an error state 367 - * @qp: the QP to put into an error state 376 + * ipath_error_qp - put a QP into the error state 377 + * @qp: the QP to put into the error state 368 378 * @err: the receive completion error to signal if a RWQE is active 369 379 * 370 380 * Flushes both send and receive work queues. 371 381 * Returns true if last WQE event should be generated. 372 382 * The QP s_lock should be held and interrupts disabled. 383 + * If we are already in error state, just return. 373 384 */ 374 385 375 386 int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) ··· 378 389 struct ib_wc wc; 379 390 int ret = 0; 380 391 381 - ipath_dbg("QP%d/%d in error state (%d)\n", 382 - qp->ibqp.qp_num, qp->remote_qpn, err); 392 + if (qp->state == IB_QPS_ERR) 393 + goto bail; 394 + 395 + qp->state = IB_QPS_ERR; 383 396 384 397 spin_lock(&dev->pending_lock); 385 398 if (!list_empty(&qp->timerwait)) ··· 390 399 list_del_init(&qp->piowait); 391 400 spin_unlock(&dev->pending_lock); 392 401 393 - wc.vendor_err = 0; 394 - wc.byte_len = 0; 395 - wc.imm_data = 0; 402 + /* Schedule the sending tasklet to drain the send work queue. */ 403 + if (qp->s_last != qp->s_head) 404 + ipath_schedule_send(qp); 405 + 406 + memset(&wc, 0, sizeof(wc)); 396 407 wc.qp = &qp->ibqp; 397 - wc.src_qp = 0; 398 - wc.wc_flags = 0; 399 - wc.pkey_index = 0; 400 - wc.slid = 0; 401 - wc.sl = 0; 402 - wc.dlid_path_bits = 0; 403 - wc.port_num = 0; 404 - if (qp->r_wrid_valid) { 405 - qp->r_wrid_valid = 0; 408 + wc.opcode = IB_WC_RECV; 409 + 410 + if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) { 406 411 wc.wr_id = qp->r_wr_id; 407 - wc.opcode = IB_WC_RECV; 408 412 wc.status = err; 409 413 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); 410 414 } 411 415 wc.status = IB_WC_WR_FLUSH_ERR; 412 - 413 - while (qp->s_last != qp->s_head) { 414 - struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 415 - 416 - wc.wr_id = wqe->wr.wr_id; 417 - wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 418 - if (++qp->s_last >= qp->s_size) 419 - qp->s_last = 0; 420 - ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); 421 - } 422 - qp->s_cur = qp->s_tail = qp->s_head; 423 - qp->s_hdrwords = 0; 424 - qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; 425 416 426 417 if (qp->r_rq.wq) { 427 418 struct ipath_rwq *wq; ··· 420 447 tail = wq->tail; 421 448 if (tail >= qp->r_rq.size) 422 449 tail = 0; 423 - wc.opcode = IB_WC_RECV; 424 450 while (tail != head) { 425 451 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; 426 452 if (++tail >= qp->r_rq.size) ··· 432 460 } else if (qp->ibqp.event_handler) 433 461 ret = 1; 434 462 463 + bail: 435 464 return ret; 436 465 } 437 466 ··· 451 478 struct ipath_ibdev *dev = to_idev(ibqp->device); 452 479 struct ipath_qp *qp = to_iqp(ibqp); 453 480 enum ib_qp_state cur_state, new_state; 454 - unsigned long flags; 455 481 int lastwqe = 0; 456 482 int ret; 457 483 458 - spin_lock_irqsave(&qp->s_lock, flags); 484 + spin_lock_irq(&qp->s_lock); 459 485 460 486 cur_state = attr_mask & IB_QP_CUR_STATE ? 461 487 attr->cur_qp_state : qp->state; ··· 507 535 508 536 switch (new_state) { 509 537 case IB_QPS_RESET: 538 + if (qp->state != IB_QPS_RESET) { 539 + qp->state = IB_QPS_RESET; 540 + spin_lock(&dev->pending_lock); 541 + if (!list_empty(&qp->timerwait)) 542 + list_del_init(&qp->timerwait); 543 + if (!list_empty(&qp->piowait)) 544 + list_del_init(&qp->piowait); 545 + spin_unlock(&dev->pending_lock); 546 + qp->s_flags &= ~IPATH_S_ANY_WAIT; 547 + spin_unlock_irq(&qp->s_lock); 548 + /* Stop the sending tasklet */ 549 + tasklet_kill(&qp->s_task); 550 + wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); 551 + spin_lock_irq(&qp->s_lock); 552 + } 510 553 ipath_reset_qp(qp, ibqp->qp_type); 554 + break; 555 + 556 + case IB_QPS_SQD: 557 + qp->s_draining = qp->s_last != qp->s_cur; 558 + qp->state = new_state; 559 + break; 560 + 561 + case IB_QPS_SQE: 562 + if (qp->ibqp.qp_type == IB_QPT_RC) 563 + goto inval; 564 + qp->state = new_state; 511 565 break; 512 566 513 567 case IB_QPS_ERR: ··· 541 543 break; 542 544 543 545 default: 546 + qp->state = new_state; 544 547 break; 545 - 546 548 } 547 549 548 550 if (attr_mask & IB_QP_PKEY_INDEX) ··· 595 597 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) 596 598 qp->s_max_rd_atomic = attr->max_rd_atomic; 597 599 598 - qp->state = new_state; 599 - spin_unlock_irqrestore(&qp->s_lock, flags); 600 + spin_unlock_irq(&qp->s_lock); 600 601 601 602 if (lastwqe) { 602 603 struct ib_event ev; ··· 609 612 goto bail; 610 613 611 614 inval: 612 - spin_unlock_irqrestore(&qp->s_lock, flags); 615 + spin_unlock_irq(&qp->s_lock); 613 616 ret = -EINVAL; 614 617 615 618 bail: ··· 640 643 attr->pkey_index = qp->s_pkey_index; 641 644 attr->alt_pkey_index = 0; 642 645 attr->en_sqd_async_notify = 0; 643 - attr->sq_draining = 0; 646 + attr->sq_draining = qp->s_draining; 644 647 attr->max_rd_atomic = qp->s_max_rd_atomic; 645 648 attr->max_dest_rd_atomic = qp->r_max_rd_atomic; 646 649 attr->min_rnr_timer = qp->r_min_rnr_timer; ··· 830 833 spin_lock_init(&qp->r_rq.lock); 831 834 atomic_set(&qp->refcount, 0); 832 835 init_waitqueue_head(&qp->wait); 836 + init_waitqueue_head(&qp->wait_dma); 833 837 tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp); 834 838 INIT_LIST_HEAD(&qp->piowait); 835 839 INIT_LIST_HEAD(&qp->timerwait); ··· 924 926 else 925 927 vfree(qp->r_rq.wq); 926 928 ipath_free_qp(&dev->qp_table, qp); 929 + free_qpn(&dev->qp_table, qp->ibqp.qp_num); 927 930 bail_qp: 928 931 kfree(qp); 929 932 bail_swq: ··· 946 947 { 947 948 struct ipath_qp *qp = to_iqp(ibqp); 948 949 struct ipath_ibdev *dev = to_idev(ibqp->device); 949 - unsigned long flags; 950 950 951 - spin_lock_irqsave(&qp->s_lock, flags); 952 - qp->state = IB_QPS_ERR; 953 - spin_unlock_irqrestore(&qp->s_lock, flags); 954 - spin_lock(&dev->n_qps_lock); 955 - dev->n_qps_allocated--; 956 - spin_unlock(&dev->n_qps_lock); 951 + /* Make sure HW and driver activity is stopped. */ 952 + spin_lock_irq(&qp->s_lock); 953 + if (qp->state != IB_QPS_RESET) { 954 + qp->state = IB_QPS_RESET; 955 + spin_lock(&dev->pending_lock); 956 + if (!list_empty(&qp->timerwait)) 957 + list_del_init(&qp->timerwait); 958 + if (!list_empty(&qp->piowait)) 959 + list_del_init(&qp->piowait); 960 + spin_unlock(&dev->pending_lock); 961 + qp->s_flags &= ~IPATH_S_ANY_WAIT; 962 + spin_unlock_irq(&qp->s_lock); 963 + /* Stop the sending tasklet */ 964 + tasklet_kill(&qp->s_task); 965 + wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); 966 + } else 967 + spin_unlock_irq(&qp->s_lock); 957 968 958 - /* Stop the sending tasklet. */ 959 - tasklet_kill(&qp->s_task); 969 + ipath_free_qp(&dev->qp_table, qp); 960 970 961 971 if (qp->s_tx) { 962 972 atomic_dec(&qp->refcount); 963 973 if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF) 964 974 kfree(qp->s_tx->txreq.map_addr); 975 + spin_lock_irq(&dev->pending_lock); 976 + list_add(&qp->s_tx->txreq.list, &dev->txreq_free); 977 + spin_unlock_irq(&dev->pending_lock); 978 + qp->s_tx = NULL; 965 979 } 966 980 967 - /* Make sure the QP isn't on the timeout list. */ 968 - spin_lock_irqsave(&dev->pending_lock, flags); 969 - if (!list_empty(&qp->timerwait)) 970 - list_del_init(&qp->timerwait); 971 - if (!list_empty(&qp->piowait)) 972 - list_del_init(&qp->piowait); 973 - if (qp->s_tx) 974 - list_add(&qp->s_tx->txreq.list, &dev->txreq_free); 975 - spin_unlock_irqrestore(&dev->pending_lock, flags); 981 + wait_event(qp->wait, !atomic_read(&qp->refcount)); 976 982 977 - /* 978 - * Make sure that the QP is not in the QPN table so receive 979 - * interrupts will discard packets for this QP. XXX Also remove QP 980 - * from multicast table. 981 - */ 982 - if (atomic_read(&qp->refcount) != 0) 983 - ipath_free_qp(&dev->qp_table, qp); 983 + /* all user's cleaned up, mark it available */ 984 + free_qpn(&dev->qp_table, qp->ibqp.qp_num); 985 + spin_lock(&dev->n_qps_lock); 986 + dev->n_qps_allocated--; 987 + spin_unlock(&dev->n_qps_lock); 984 988 985 989 if (qp->ip) 986 990 kref_put(&qp->ip->ref, ipath_release_mmap_info); ··· 1028 1026 } 1029 1027 1030 1028 /** 1031 - * ipath_sqerror_qp - put a QP's send queue into an error state 1032 - * @qp: QP who's send queue will be put into an error state 1033 - * @wc: the WC responsible for putting the QP in this state 1034 - * 1035 - * Flushes the send work queue. 1036 - * The QP s_lock should be held and interrupts disabled. 1037 - */ 1038 - 1039 - void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc) 1040 - { 1041 - struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 1042 - struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 1043 - 1044 - ipath_dbg("Send queue error on QP%d/%d: err: %d\n", 1045 - qp->ibqp.qp_num, qp->remote_qpn, wc->status); 1046 - 1047 - spin_lock(&dev->pending_lock); 1048 - if (!list_empty(&qp->timerwait)) 1049 - list_del_init(&qp->timerwait); 1050 - if (!list_empty(&qp->piowait)) 1051 - list_del_init(&qp->piowait); 1052 - spin_unlock(&dev->pending_lock); 1053 - 1054 - ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); 1055 - if (++qp->s_last >= qp->s_size) 1056 - qp->s_last = 0; 1057 - 1058 - wc->status = IB_WC_WR_FLUSH_ERR; 1059 - 1060 - while (qp->s_last != qp->s_head) { 1061 - wqe = get_swqe_ptr(qp, qp->s_last); 1062 - wc->wr_id = wqe->wr.wr_id; 1063 - wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 1064 - ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1); 1065 - if (++qp->s_last >= qp->s_size) 1066 - qp->s_last = 0; 1067 - } 1068 - qp->s_cur = qp->s_tail = qp->s_head; 1069 - qp->state = IB_QPS_SQE; 1070 - } 1071 - 1072 - /** 1073 1029 * ipath_get_credit - flush the send work queue of a QP 1074 1030 * @qp: the qp who's send work queue to flush 1075 1031 * @aeth: the Acknowledge Extended Transport Header ··· 1053 1093 } 1054 1094 1055 1095 /* Restart sending if it was blocked due to lack of credits. */ 1056 - if (qp->s_cur != qp->s_head && 1096 + if ((qp->s_flags & IPATH_S_WAIT_SSN_CREDIT) && 1097 + qp->s_cur != qp->s_head && 1057 1098 (qp->s_lsn == (u32) -1 || 1058 1099 ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn, 1059 1100 qp->s_lsn + 1) <= 0)) 1060 - tasklet_hi_schedule(&qp->s_task); 1101 + ipath_schedule_send(qp); 1061 1102 }
+145 -140
drivers/infiniband/hw/ipath/ipath_rc.c
··· 92 92 u32 bth0; 93 93 u32 bth2; 94 94 95 + /* Don't send an ACK if we aren't supposed to. */ 96 + if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) 97 + goto bail; 98 + 95 99 /* header size in 32-bit words LRH+BTH = (8+12)/4. */ 96 100 hwords = 5; 97 101 ··· 242 238 ipath_make_rc_ack(dev, qp, ohdr, pmtu)) 243 239 goto done; 244 240 245 - if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) || 246 - qp->s_rnr_timeout || qp->s_wait_credit) 247 - goto bail; 241 + if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) { 242 + if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND)) 243 + goto bail; 244 + /* We are in the error state, flush the work request. */ 245 + if (qp->s_last == qp->s_head) 246 + goto bail; 247 + /* If DMAs are in progress, we can't flush immediately. */ 248 + if (atomic_read(&qp->s_dma_busy)) { 249 + qp->s_flags |= IPATH_S_WAIT_DMA; 250 + goto bail; 251 + } 252 + wqe = get_swqe_ptr(qp, qp->s_last); 253 + ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 254 + goto done; 255 + } 248 256 249 - /* Limit the number of packets sent without an ACK. */ 250 - if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT) > 0) { 251 - qp->s_wait_credit = 1; 252 - dev->n_rc_stalls++; 257 + /* Leave BUSY set until RNR timeout. */ 258 + if (qp->s_rnr_timeout) { 259 + qp->s_flags |= IPATH_S_WAITING; 253 260 goto bail; 254 261 } 255 262 ··· 272 257 wqe = get_swqe_ptr(qp, qp->s_cur); 273 258 switch (qp->s_state) { 274 259 default: 260 + if (!(ib_ipath_state_ops[qp->state] & 261 + IPATH_PROCESS_NEXT_SEND_OK)) 262 + goto bail; 275 263 /* 276 264 * Resend an old request or start a new one. 277 265 * ··· 312 294 case IB_WR_SEND_WITH_IMM: 313 295 /* If no credit, return. */ 314 296 if (qp->s_lsn != (u32) -1 && 315 - ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) 297 + ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { 298 + qp->s_flags |= IPATH_S_WAIT_SSN_CREDIT; 316 299 goto bail; 300 + } 317 301 wqe->lpsn = wqe->psn; 318 302 if (len > pmtu) { 319 303 wqe->lpsn += (len - 1) / pmtu; ··· 345 325 case IB_WR_RDMA_WRITE_WITH_IMM: 346 326 /* If no credit, return. */ 347 327 if (qp->s_lsn != (u32) -1 && 348 - ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) 328 + ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { 329 + qp->s_flags |= IPATH_S_WAIT_SSN_CREDIT; 349 330 goto bail; 331 + } 350 332 ohdr->u.rc.reth.vaddr = 351 333 cpu_to_be64(wqe->wr.wr.rdma.remote_addr); 352 334 ohdr->u.rc.reth.rkey = ··· 592 570 ipath_make_ruc_header(dev, qp, ohdr, bth0 | (qp->s_state << 24), bth2); 593 571 done: 594 572 ret = 1; 573 + goto unlock; 574 + 595 575 bail: 576 + qp->s_flags &= ~IPATH_S_BUSY; 577 + unlock: 596 578 spin_unlock_irqrestore(&qp->s_lock, flags); 597 579 return ret; 598 580 } ··· 632 606 633 607 spin_unlock_irqrestore(&qp->s_lock, flags); 634 608 609 + /* Don't try to send ACKs if the link isn't ACTIVE */ 635 610 dd = dev->dd; 611 + if (!(dd->ipath_flags & IPATH_LINKACTIVE)) 612 + goto done; 613 + 636 614 piobuf = ipath_getpiobuf(dd, 0, NULL); 637 615 if (!piobuf) { 638 616 /* ··· 698 668 goto done; 699 669 700 670 queue_ack: 701 - dev->n_rc_qacks++; 702 - qp->s_flags |= IPATH_S_ACK_PENDING; 703 - qp->s_nak_state = qp->r_nak_state; 704 - qp->s_ack_psn = qp->r_ack_psn; 671 + if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK) { 672 + dev->n_rc_qacks++; 673 + qp->s_flags |= IPATH_S_ACK_PENDING; 674 + qp->s_nak_state = qp->r_nak_state; 675 + qp->s_ack_psn = qp->r_ack_psn; 676 + 677 + /* Schedule the send tasklet. */ 678 + ipath_schedule_send(qp); 679 + } 705 680 spin_unlock_irqrestore(&qp->s_lock, flags); 706 - 707 - /* Call ipath_do_rc_send() in another thread. */ 708 - tasklet_hi_schedule(&qp->s_task); 709 - 710 681 done: 711 682 return; 712 683 } ··· 766 735 /* 767 736 * Set the state to restart in the middle of a request. 768 737 * Don't change the s_sge, s_cur_sge, or s_cur_size. 769 - * See ipath_do_rc_send(). 738 + * See ipath_make_rc_req(). 770 739 */ 771 740 switch (opcode) { 772 741 case IB_WR_SEND: ··· 802 771 * 803 772 * The QP s_lock should be held and interrupts disabled. 804 773 */ 805 - void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc) 774 + void ipath_restart_rc(struct ipath_qp *qp, u32 psn) 806 775 { 807 776 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); 808 777 struct ipath_ibdev *dev; 809 778 810 779 if (qp->s_retry == 0) { 811 - wc->wr_id = wqe->wr.wr_id; 812 - wc->status = IB_WC_RETRY_EXC_ERR; 813 - wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 814 - wc->vendor_err = 0; 815 - wc->byte_len = 0; 816 - wc->qp = &qp->ibqp; 817 - wc->imm_data = 0; 818 - wc->src_qp = qp->remote_qpn; 819 - wc->wc_flags = 0; 820 - wc->pkey_index = 0; 821 - wc->slid = qp->remote_ah_attr.dlid; 822 - wc->sl = qp->remote_ah_attr.sl; 823 - wc->dlid_path_bits = 0; 824 - wc->port_num = 0; 825 - ipath_sqerror_qp(qp, wc); 780 + ipath_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); 781 + ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); 826 782 goto bail; 827 783 } 828 784 qp->s_retry--; ··· 822 804 spin_lock(&dev->pending_lock); 823 805 if (!list_empty(&qp->timerwait)) 824 806 list_del_init(&qp->timerwait); 807 + if (!list_empty(&qp->piowait)) 808 + list_del_init(&qp->piowait); 825 809 spin_unlock(&dev->pending_lock); 826 810 827 811 if (wqe->wr.opcode == IB_WR_RDMA_READ) ··· 832 812 dev->n_rc_resends += (qp->s_psn - psn) & IPATH_PSN_MASK; 833 813 834 814 reset_psn(qp, psn); 835 - tasklet_hi_schedule(&qp->s_task); 815 + ipath_schedule_send(qp); 836 816 837 817 bail: 838 818 return; ··· 840 820 841 821 static inline void update_last_psn(struct ipath_qp *qp, u32 psn) 842 822 { 843 - if (qp->s_last_psn != psn) { 844 - qp->s_last_psn = psn; 845 - if (qp->s_wait_credit) { 846 - qp->s_wait_credit = 0; 847 - tasklet_hi_schedule(&qp->s_task); 848 - } 849 - } 823 + qp->s_last_psn = psn; 850 824 } 851 825 852 826 /** ··· 859 845 { 860 846 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 861 847 struct ib_wc wc; 848 + enum ib_wc_status status; 862 849 struct ipath_swqe *wqe; 863 850 int ret = 0; 864 851 u32 ack_psn; ··· 924 909 */ 925 910 update_last_psn(qp, wqe->psn - 1); 926 911 /* Retry this request. */ 927 - ipath_restart_rc(qp, wqe->psn, &wc); 912 + ipath_restart_rc(qp, wqe->psn); 928 913 /* 929 914 * No need to process the ACK/NAK since we are 930 915 * restarting an earlier request. ··· 940 925 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { 941 926 qp->s_num_rd_atomic--; 942 927 /* Restart sending task if fence is complete */ 943 - if ((qp->s_flags & IPATH_S_FENCE_PENDING) && 944 - !qp->s_num_rd_atomic) { 945 - qp->s_flags &= ~IPATH_S_FENCE_PENDING; 946 - tasklet_hi_schedule(&qp->s_task); 947 - } else if (qp->s_flags & IPATH_S_RDMAR_PENDING) { 948 - qp->s_flags &= ~IPATH_S_RDMAR_PENDING; 949 - tasklet_hi_schedule(&qp->s_task); 950 - } 928 + if (((qp->s_flags & IPATH_S_FENCE_PENDING) && 929 + !qp->s_num_rd_atomic) || 930 + qp->s_flags & IPATH_S_RDMAR_PENDING) 931 + ipath_schedule_send(qp); 951 932 } 952 933 /* Post a send completion queue entry if requested. */ 953 934 if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) || 954 935 (wqe->wr.send_flags & IB_SEND_SIGNALED)) { 936 + memset(&wc, 0, sizeof wc); 955 937 wc.wr_id = wqe->wr.wr_id; 956 938 wc.status = IB_WC_SUCCESS; 957 939 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 958 - wc.vendor_err = 0; 959 940 wc.byte_len = wqe->length; 960 - wc.imm_data = 0; 961 941 wc.qp = &qp->ibqp; 962 942 wc.src_qp = qp->remote_qpn; 963 - wc.wc_flags = 0; 964 - wc.pkey_index = 0; 965 943 wc.slid = qp->remote_ah_attr.dlid; 966 944 wc.sl = qp->remote_ah_attr.sl; 967 - wc.dlid_path_bits = 0; 968 - wc.port_num = 0; 969 945 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); 970 946 } 971 947 qp->s_retry = qp->s_retry_cnt; ··· 977 971 } else { 978 972 if (++qp->s_last >= qp->s_size) 979 973 qp->s_last = 0; 974 + if (qp->state == IB_QPS_SQD && qp->s_last == qp->s_cur) 975 + qp->s_draining = 0; 980 976 if (qp->s_last == qp->s_tail) 981 977 break; 982 978 wqe = get_swqe_ptr(qp, qp->s_last); ··· 1002 994 */ 1003 995 if (ipath_cmp24(qp->s_psn, psn) <= 0) { 1004 996 reset_psn(qp, psn + 1); 1005 - tasklet_hi_schedule(&qp->s_task); 997 + ipath_schedule_send(qp); 1006 998 } 1007 999 } else if (ipath_cmp24(qp->s_psn, psn) <= 0) { 1008 1000 qp->s_state = OP(SEND_LAST); ··· 1020 1012 if (qp->s_last == qp->s_tail) 1021 1013 goto bail; 1022 1014 if (qp->s_rnr_retry == 0) { 1023 - wc.status = IB_WC_RNR_RETRY_EXC_ERR; 1015 + status = IB_WC_RNR_RETRY_EXC_ERR; 1024 1016 goto class_b; 1025 1017 } 1026 1018 if (qp->s_rnr_retry_cnt < 7) ··· 1041 1033 ib_ipath_rnr_table[(aeth >> IPATH_AETH_CREDIT_SHIFT) & 1042 1034 IPATH_AETH_CREDIT_MASK]; 1043 1035 ipath_insert_rnr_queue(qp); 1036 + ipath_schedule_send(qp); 1044 1037 goto bail; 1045 1038 1046 1039 case 3: /* NAK */ ··· 1059 1050 * RDMA READ response which terminates the RDMA 1060 1051 * READ. 1061 1052 */ 1062 - ipath_restart_rc(qp, psn, &wc); 1053 + ipath_restart_rc(qp, psn); 1063 1054 break; 1064 1055 1065 1056 case 1: /* Invalid Request */ 1066 - wc.status = IB_WC_REM_INV_REQ_ERR; 1057 + status = IB_WC_REM_INV_REQ_ERR; 1067 1058 dev->n_other_naks++; 1068 1059 goto class_b; 1069 1060 1070 1061 case 2: /* Remote Access Error */ 1071 - wc.status = IB_WC_REM_ACCESS_ERR; 1062 + status = IB_WC_REM_ACCESS_ERR; 1072 1063 dev->n_other_naks++; 1073 1064 goto class_b; 1074 1065 1075 1066 case 3: /* Remote Operation Error */ 1076 - wc.status = IB_WC_REM_OP_ERR; 1067 + status = IB_WC_REM_OP_ERR; 1077 1068 dev->n_other_naks++; 1078 1069 class_b: 1079 - wc.wr_id = wqe->wr.wr_id; 1080 - wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 1081 - wc.vendor_err = 0; 1082 - wc.byte_len = 0; 1083 - wc.qp = &qp->ibqp; 1084 - wc.imm_data = 0; 1085 - wc.src_qp = qp->remote_qpn; 1086 - wc.wc_flags = 0; 1087 - wc.pkey_index = 0; 1088 - wc.slid = qp->remote_ah_attr.dlid; 1089 - wc.sl = qp->remote_ah_attr.sl; 1090 - wc.dlid_path_bits = 0; 1091 - wc.port_num = 0; 1092 - ipath_sqerror_qp(qp, &wc); 1070 + ipath_send_complete(qp, wqe, status); 1071 + ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1093 1072 break; 1094 1073 1095 1074 default: ··· 1123 1126 int header_in_data) 1124 1127 { 1125 1128 struct ipath_swqe *wqe; 1129 + enum ib_wc_status status; 1126 1130 unsigned long flags; 1127 - struct ib_wc wc; 1128 1131 int diff; 1129 1132 u32 pad; 1130 1133 u32 aeth; 1131 1134 u64 val; 1132 1135 1133 1136 spin_lock_irqsave(&qp->s_lock, flags); 1137 + 1138 + /* Double check we can process this now that we hold the s_lock. */ 1139 + if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) 1140 + goto ack_done; 1134 1141 1135 1142 /* Ignore invalid responses. */ 1136 1143 if (ipath_cmp24(psn, qp->s_next_psn) >= 0) ··· 1160 1159 if (unlikely(qp->s_last == qp->s_tail)) 1161 1160 goto ack_done; 1162 1161 wqe = get_swqe_ptr(qp, qp->s_last); 1162 + status = IB_WC_SUCCESS; 1163 1163 1164 1164 switch (opcode) { 1165 1165 case OP(ACKNOWLEDGE): ··· 1189 1187 wqe = get_swqe_ptr(qp, qp->s_last); 1190 1188 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) 1191 1189 goto ack_op_err; 1190 + qp->r_flags &= ~IPATH_R_RDMAR_SEQ; 1192 1191 /* 1193 1192 * If this is a response to a resent RDMA read, we 1194 1193 * have to be careful to copy the data to the right ··· 1203 1200 /* no AETH, no ACK */ 1204 1201 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { 1205 1202 dev->n_rdma_seq++; 1206 - ipath_restart_rc(qp, qp->s_last_psn + 1, &wc); 1203 + if (qp->r_flags & IPATH_R_RDMAR_SEQ) 1204 + goto ack_done; 1205 + qp->r_flags |= IPATH_R_RDMAR_SEQ; 1206 + ipath_restart_rc(qp, qp->s_last_psn + 1); 1207 1207 goto ack_done; 1208 1208 } 1209 1209 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) ··· 1267 1261 /* ACKs READ req. */ 1268 1262 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) { 1269 1263 dev->n_rdma_seq++; 1270 - ipath_restart_rc(qp, qp->s_last_psn + 1, &wc); 1264 + if (qp->r_flags & IPATH_R_RDMAR_SEQ) 1265 + goto ack_done; 1266 + qp->r_flags |= IPATH_R_RDMAR_SEQ; 1267 + ipath_restart_rc(qp, qp->s_last_psn + 1); 1271 1268 goto ack_done; 1272 1269 } 1273 1270 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) ··· 1300 1291 goto ack_done; 1301 1292 } 1302 1293 1303 - ack_done: 1304 - spin_unlock_irqrestore(&qp->s_lock, flags); 1305 - goto bail; 1306 - 1307 1294 ack_op_err: 1308 - wc.status = IB_WC_LOC_QP_OP_ERR; 1295 + status = IB_WC_LOC_QP_OP_ERR; 1309 1296 goto ack_err; 1310 1297 1311 1298 ack_len_err: 1312 - wc.status = IB_WC_LOC_LEN_ERR; 1299 + status = IB_WC_LOC_LEN_ERR; 1313 1300 ack_err: 1314 - wc.wr_id = wqe->wr.wr_id; 1315 - wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 1316 - wc.vendor_err = 0; 1317 - wc.byte_len = 0; 1318 - wc.imm_data = 0; 1319 - wc.qp = &qp->ibqp; 1320 - wc.src_qp = qp->remote_qpn; 1321 - wc.wc_flags = 0; 1322 - wc.pkey_index = 0; 1323 - wc.slid = qp->remote_ah_attr.dlid; 1324 - wc.sl = qp->remote_ah_attr.sl; 1325 - wc.dlid_path_bits = 0; 1326 - wc.port_num = 0; 1327 - ipath_sqerror_qp(qp, &wc); 1301 + ipath_send_complete(qp, wqe, status); 1302 + ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); 1303 + ack_done: 1328 1304 spin_unlock_irqrestore(&qp->s_lock, flags); 1329 1305 bail: 1330 1306 return; ··· 1378 1384 psn &= IPATH_PSN_MASK; 1379 1385 e = NULL; 1380 1386 old_req = 1; 1387 + 1381 1388 spin_lock_irqsave(&qp->s_lock, flags); 1389 + /* Double check we can process this now that we hold the s_lock. */ 1390 + if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) 1391 + goto unlock_done; 1392 + 1382 1393 for (i = qp->r_head_ack_queue; ; i = prev) { 1383 1394 if (i == qp->s_tail_ack_queue) 1384 1395 old_req = 0; ··· 1511 1512 break; 1512 1513 } 1513 1514 qp->r_nak_state = 0; 1514 - tasklet_hi_schedule(&qp->s_task); 1515 + ipath_schedule_send(qp); 1515 1516 1516 1517 unlock_done: 1517 1518 spin_unlock_irqrestore(&qp->s_lock, flags); ··· 1522 1523 return 0; 1523 1524 } 1524 1525 1525 - static void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err) 1526 + void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err) 1526 1527 { 1527 1528 unsigned long flags; 1528 1529 int lastwqe; 1529 1530 1530 1531 spin_lock_irqsave(&qp->s_lock, flags); 1531 - qp->state = IB_QPS_ERR; 1532 1532 lastwqe = ipath_error_qp(qp, err); 1533 1533 spin_unlock_irqrestore(&qp->s_lock, flags); 1534 1534 ··· 1543 1545 1544 1546 static inline void ipath_update_ack_queue(struct ipath_qp *qp, unsigned n) 1545 1547 { 1546 - unsigned long flags; 1547 1548 unsigned next; 1548 1549 1549 1550 next = n + 1; 1550 1551 if (next > IPATH_MAX_RDMA_ATOMIC) 1551 1552 next = 0; 1552 - spin_lock_irqsave(&qp->s_lock, flags); 1553 1553 if (n == qp->s_tail_ack_queue) { 1554 1554 qp->s_tail_ack_queue = next; 1555 1555 qp->s_ack_state = OP(ACKNOWLEDGE); 1556 1556 } 1557 - spin_unlock_irqrestore(&qp->s_lock, flags); 1558 1557 } 1559 1558 1560 1559 /** ··· 1580 1585 int diff; 1581 1586 struct ib_reth *reth; 1582 1587 int header_in_data; 1588 + unsigned long flags; 1583 1589 1584 1590 /* Validate the SLID. See Ch. 9.6.1.5 */ 1585 1591 if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid)) ··· 1639 1643 opcode == OP(SEND_LAST) || 1640 1644 opcode == OP(SEND_LAST_WITH_IMMEDIATE)) 1641 1645 break; 1642 - nack_inv: 1643 - ipath_rc_error(qp, IB_WC_REM_INV_REQ_ERR); 1644 - qp->r_nak_state = IB_NAK_INVALID_REQUEST; 1645 - qp->r_ack_psn = qp->r_psn; 1646 - goto send_ack; 1646 + goto nack_inv; 1647 1647 1648 1648 case OP(RDMA_WRITE_FIRST): 1649 1649 case OP(RDMA_WRITE_MIDDLE): ··· 1665 1673 break; 1666 1674 } 1667 1675 1668 - wc.imm_data = 0; 1669 - wc.wc_flags = 0; 1676 + memset(&wc, 0, sizeof wc); 1670 1677 1671 1678 /* OK, process the packet. */ 1672 1679 switch (opcode) { 1673 1680 case OP(SEND_FIRST): 1674 - if (!ipath_get_rwqe(qp, 0)) { 1675 - rnr_nak: 1676 - qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; 1677 - qp->r_ack_psn = qp->r_psn; 1678 - goto send_ack; 1679 - } 1681 + if (!ipath_get_rwqe(qp, 0)) 1682 + goto rnr_nak; 1680 1683 qp->r_rcv_len = 0; 1681 1684 /* FALLTHROUGH */ 1682 1685 case OP(SEND_MIDDLE): ··· 1728 1741 goto nack_inv; 1729 1742 ipath_copy_sge(&qp->r_sge, data, tlen); 1730 1743 qp->r_msn++; 1731 - if (!qp->r_wrid_valid) 1744 + if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) 1732 1745 break; 1733 - qp->r_wrid_valid = 0; 1734 1746 wc.wr_id = qp->r_wr_id; 1735 1747 wc.status = IB_WC_SUCCESS; 1736 1748 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) || ··· 1737 1751 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; 1738 1752 else 1739 1753 wc.opcode = IB_WC_RECV; 1740 - wc.vendor_err = 0; 1741 1754 wc.qp = &qp->ibqp; 1742 1755 wc.src_qp = qp->remote_qpn; 1743 - wc.pkey_index = 0; 1744 1756 wc.slid = qp->remote_ah_attr.dlid; 1745 1757 wc.sl = qp->remote_ah_attr.sl; 1746 - wc.dlid_path_bits = 0; 1747 - wc.port_num = 0; 1748 1758 /* Signal completion event if the solicited bit is set. */ 1749 1759 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1750 1760 (ohdr->bth[0] & ··· 1801 1819 next = qp->r_head_ack_queue + 1; 1802 1820 if (next > IPATH_MAX_RDMA_ATOMIC) 1803 1821 next = 0; 1822 + spin_lock_irqsave(&qp->s_lock, flags); 1823 + /* Double check we can process this while holding the s_lock. */ 1824 + if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) 1825 + goto unlock; 1804 1826 if (unlikely(next == qp->s_tail_ack_queue)) { 1805 1827 if (!qp->s_ack_queue[next].sent) 1806 - goto nack_inv; 1828 + goto nack_inv_unlck; 1807 1829 ipath_update_ack_queue(qp, next); 1808 1830 } 1809 1831 e = &qp->s_ack_queue[qp->r_head_ack_queue]; ··· 1828 1842 ok = ipath_rkey_ok(qp, &e->rdma_sge, len, vaddr, 1829 1843 rkey, IB_ACCESS_REMOTE_READ); 1830 1844 if (unlikely(!ok)) 1831 - goto nack_acc; 1845 + goto nack_acc_unlck; 1832 1846 /* 1833 1847 * Update the next expected PSN. We add 1 later 1834 1848 * below, so only add the remainder here. ··· 1855 1869 qp->r_psn++; 1856 1870 qp->r_state = opcode; 1857 1871 qp->r_nak_state = 0; 1858 - barrier(); 1859 1872 qp->r_head_ack_queue = next; 1860 1873 1861 - /* Call ipath_do_rc_send() in another thread. */ 1862 - tasklet_hi_schedule(&qp->s_task); 1874 + /* Schedule the send tasklet. */ 1875 + ipath_schedule_send(qp); 1863 1876 1864 - goto done; 1877 + goto unlock; 1865 1878 } 1866 1879 1867 1880 case OP(COMPARE_SWAP): ··· 1879 1894 next = qp->r_head_ack_queue + 1; 1880 1895 if (next > IPATH_MAX_RDMA_ATOMIC) 1881 1896 next = 0; 1897 + spin_lock_irqsave(&qp->s_lock, flags); 1898 + /* Double check we can process this while holding the s_lock. */ 1899 + if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) 1900 + goto unlock; 1882 1901 if (unlikely(next == qp->s_tail_ack_queue)) { 1883 1902 if (!qp->s_ack_queue[next].sent) 1884 - goto nack_inv; 1903 + goto nack_inv_unlck; 1885 1904 ipath_update_ack_queue(qp, next); 1886 1905 } 1887 1906 if (!header_in_data) ··· 1895 1906 vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) | 1896 1907 be32_to_cpu(ateth->vaddr[1]); 1897 1908 if (unlikely(vaddr & (sizeof(u64) - 1))) 1898 - goto nack_inv; 1909 + goto nack_inv_unlck; 1899 1910 rkey = be32_to_cpu(ateth->rkey); 1900 1911 /* Check rkey & NAK */ 1901 1912 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, 1902 1913 sizeof(u64), vaddr, rkey, 1903 1914 IB_ACCESS_REMOTE_ATOMIC))) 1904 - goto nack_acc; 1915 + goto nack_acc_unlck; 1905 1916 /* Perform atomic OP and save result. */ 1906 1917 maddr = (atomic64_t *) qp->r_sge.sge.vaddr; 1907 1918 sdata = be64_to_cpu(ateth->swap_data); ··· 1918 1929 qp->r_psn++; 1919 1930 qp->r_state = opcode; 1920 1931 qp->r_nak_state = 0; 1921 - barrier(); 1922 1932 qp->r_head_ack_queue = next; 1923 1933 1924 - /* Call ipath_do_rc_send() in another thread. */ 1925 - tasklet_hi_schedule(&qp->s_task); 1934 + /* Schedule the send tasklet. */ 1935 + ipath_schedule_send(qp); 1926 1936 1927 - goto done; 1937 + goto unlock; 1928 1938 } 1929 1939 1930 1940 default: ··· 1939 1951 goto send_ack; 1940 1952 goto done; 1941 1953 1954 + rnr_nak: 1955 + qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; 1956 + qp->r_ack_psn = qp->r_psn; 1957 + goto send_ack; 1958 + 1959 + nack_inv_unlck: 1960 + spin_unlock_irqrestore(&qp->s_lock, flags); 1961 + nack_inv: 1962 + ipath_rc_error(qp, IB_WC_LOC_QP_OP_ERR); 1963 + qp->r_nak_state = IB_NAK_INVALID_REQUEST; 1964 + qp->r_ack_psn = qp->r_psn; 1965 + goto send_ack; 1966 + 1967 + nack_acc_unlck: 1968 + spin_unlock_irqrestore(&qp->s_lock, flags); 1942 1969 nack_acc: 1943 - ipath_rc_error(qp, IB_WC_REM_ACCESS_ERR); 1970 + ipath_rc_error(qp, IB_WC_LOC_PROT_ERR); 1944 1971 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; 1945 1972 qp->r_ack_psn = qp->r_psn; 1946 - 1947 1973 send_ack: 1948 1974 send_rc_ack(qp); 1975 + goto done; 1949 1976 1977 + unlock: 1978 + spin_unlock_irqrestore(&qp->s_lock, flags); 1950 1979 done: 1951 1980 return; 1952 1981 }
+192 -137
drivers/infiniband/hw/ipath/ipath_ruc.c
··· 1 1 /* 2 - * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 + * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. 3 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 4 4 * 5 5 * This software is available to you under a choice of one of two ··· 78 78 * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device 79 79 * @qp: the QP 80 80 * 81 + * Called with the QP s_lock held and interrupts disabled. 81 82 * XXX Use a simple list for now. We might need a priority 82 83 * queue if we have lots of QPs waiting for RNR timeouts 83 84 * but that should be rare. ··· 86 85 void ipath_insert_rnr_queue(struct ipath_qp *qp) 87 86 { 88 87 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 89 - unsigned long flags; 90 88 91 - spin_lock_irqsave(&dev->pending_lock, flags); 89 + /* We already did a spin_lock_irqsave(), so just use spin_lock */ 90 + spin_lock(&dev->pending_lock); 92 91 if (list_empty(&dev->rnrwait)) 93 92 list_add(&qp->timerwait, &dev->rnrwait); 94 93 else { ··· 110 109 nqp->s_rnr_timeout -= qp->s_rnr_timeout; 111 110 list_add(&qp->timerwait, l); 112 111 } 113 - spin_unlock_irqrestore(&dev->pending_lock, flags); 112 + spin_unlock(&dev->pending_lock); 114 113 } 115 114 116 115 /** ··· 141 140 goto bail; 142 141 143 142 bad_lkey: 143 + memset(&wc, 0, sizeof(wc)); 144 144 wc.wr_id = wqe->wr_id; 145 145 wc.status = IB_WC_LOC_PROT_ERR; 146 146 wc.opcode = IB_WC_RECV; 147 - wc.vendor_err = 0; 148 - wc.byte_len = 0; 149 - wc.imm_data = 0; 150 147 wc.qp = &qp->ibqp; 151 - wc.src_qp = 0; 152 - wc.wc_flags = 0; 153 - wc.pkey_index = 0; 154 - wc.slid = 0; 155 - wc.sl = 0; 156 - wc.dlid_path_bits = 0; 157 - wc.port_num = 0; 158 148 /* Signal solicited completion event. */ 159 149 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); 160 150 ret = 0; ··· 186 194 } 187 195 188 196 spin_lock_irqsave(&rq->lock, flags); 197 + if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { 198 + ret = 0; 199 + goto unlock; 200 + } 201 + 189 202 wq = rq->wq; 190 203 tail = wq->tail; 191 204 /* Validate tail before using it since it is user writable. */ ··· 198 201 tail = 0; 199 202 do { 200 203 if (unlikely(tail == wq->head)) { 201 - spin_unlock_irqrestore(&rq->lock, flags); 202 204 ret = 0; 203 - goto bail; 205 + goto unlock; 204 206 } 205 207 /* Make sure entry is read after head index is read. */ 206 208 smp_rmb(); ··· 212 216 wq->tail = tail; 213 217 214 218 ret = 1; 215 - qp->r_wrid_valid = 1; 219 + set_bit(IPATH_R_WRID_VALID, &qp->r_aflags); 216 220 if (handler) { 217 221 u32 n; 218 222 ··· 239 243 goto bail; 240 244 } 241 245 } 246 + unlock: 242 247 spin_unlock_irqrestore(&rq->lock, flags); 243 - 244 248 bail: 245 249 return ret; 246 250 } ··· 266 270 struct ib_wc wc; 267 271 u64 sdata; 268 272 atomic64_t *maddr; 273 + enum ib_wc_status send_status; 269 274 275 + /* 276 + * Note that we check the responder QP state after 277 + * checking the requester's state. 278 + */ 270 279 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn); 271 - if (!qp) { 272 - dev->n_pkt_drops++; 273 - return; 274 - } 275 280 276 - again: 277 281 spin_lock_irqsave(&sqp->s_lock, flags); 278 282 279 - if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK) || 280 - sqp->s_rnr_timeout) { 281 - spin_unlock_irqrestore(&sqp->s_lock, flags); 282 - goto done; 283 - } 283 + /* Return if we are already busy processing a work request. */ 284 + if ((sqp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) || 285 + !(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) 286 + goto unlock; 284 287 285 - /* Get the next send request. */ 286 - if (sqp->s_last == sqp->s_head) { 287 - /* Send work queue is empty. */ 288 - spin_unlock_irqrestore(&sqp->s_lock, flags); 289 - goto done; 288 + sqp->s_flags |= IPATH_S_BUSY; 289 + 290 + again: 291 + if (sqp->s_last == sqp->s_head) 292 + goto clr_busy; 293 + wqe = get_swqe_ptr(sqp, sqp->s_last); 294 + 295 + /* Return if it is not OK to start a new work reqeust. */ 296 + if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_NEXT_SEND_OK)) { 297 + if (!(ib_ipath_state_ops[sqp->state] & IPATH_FLUSH_SEND)) 298 + goto clr_busy; 299 + /* We are in the error state, flush the work request. */ 300 + send_status = IB_WC_WR_FLUSH_ERR; 301 + goto flush_send; 290 302 } 291 303 292 304 /* 293 305 * We can rely on the entry not changing without the s_lock 294 306 * being held until we update s_last. 307 + * We increment s_cur to indicate s_last is in progress. 295 308 */ 296 - wqe = get_swqe_ptr(sqp, sqp->s_last); 309 + if (sqp->s_last == sqp->s_cur) { 310 + if (++sqp->s_cur >= sqp->s_size) 311 + sqp->s_cur = 0; 312 + } 297 313 spin_unlock_irqrestore(&sqp->s_lock, flags); 298 314 299 - wc.wc_flags = 0; 300 - wc.imm_data = 0; 315 + if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { 316 + dev->n_pkt_drops++; 317 + /* 318 + * For RC, the requester would timeout and retry so 319 + * shortcut the timeouts and just signal too many retries. 320 + */ 321 + if (sqp->ibqp.qp_type == IB_QPT_RC) 322 + send_status = IB_WC_RETRY_EXC_ERR; 323 + else 324 + send_status = IB_WC_SUCCESS; 325 + goto serr; 326 + } 327 + 328 + memset(&wc, 0, sizeof wc); 329 + send_status = IB_WC_SUCCESS; 301 330 302 331 sqp->s_sge.sge = wqe->sg_list[0]; 303 332 sqp->s_sge.sg_list = wqe->sg_list + 1; ··· 334 313 wc.imm_data = wqe->wr.ex.imm_data; 335 314 /* FALLTHROUGH */ 336 315 case IB_WR_SEND: 337 - if (!ipath_get_rwqe(qp, 0)) { 338 - rnr_nak: 339 - /* Handle RNR NAK */ 340 - if (qp->ibqp.qp_type == IB_QPT_UC) 341 - goto send_comp; 342 - if (sqp->s_rnr_retry == 0) { 343 - wc.status = IB_WC_RNR_RETRY_EXC_ERR; 344 - goto err; 345 - } 346 - if (sqp->s_rnr_retry_cnt < 7) 347 - sqp->s_rnr_retry--; 348 - dev->n_rnr_naks++; 349 - sqp->s_rnr_timeout = 350 - ib_ipath_rnr_table[qp->r_min_rnr_timer]; 351 - ipath_insert_rnr_queue(sqp); 352 - goto done; 353 - } 316 + if (!ipath_get_rwqe(qp, 0)) 317 + goto rnr_nak; 354 318 break; 355 319 356 320 case IB_WR_RDMA_WRITE_WITH_IMM: 357 - if (unlikely(!(qp->qp_access_flags & 358 - IB_ACCESS_REMOTE_WRITE))) { 359 - wc.status = IB_WC_REM_INV_REQ_ERR; 360 - goto err; 361 - } 321 + if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) 322 + goto inv_err; 362 323 wc.wc_flags = IB_WC_WITH_IMM; 363 324 wc.imm_data = wqe->wr.ex.imm_data; 364 325 if (!ipath_get_rwqe(qp, 1)) 365 326 goto rnr_nak; 366 327 /* FALLTHROUGH */ 367 328 case IB_WR_RDMA_WRITE: 368 - if (unlikely(!(qp->qp_access_flags & 369 - IB_ACCESS_REMOTE_WRITE))) { 370 - wc.status = IB_WC_REM_INV_REQ_ERR; 371 - goto err; 372 - } 329 + if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) 330 + goto inv_err; 373 331 if (wqe->length == 0) 374 332 break; 375 333 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length, 376 334 wqe->wr.wr.rdma.remote_addr, 377 335 wqe->wr.wr.rdma.rkey, 378 - IB_ACCESS_REMOTE_WRITE))) { 379 - acc_err: 380 - wc.status = IB_WC_REM_ACCESS_ERR; 381 - err: 382 - wc.wr_id = wqe->wr.wr_id; 383 - wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 384 - wc.vendor_err = 0; 385 - wc.byte_len = 0; 386 - wc.qp = &sqp->ibqp; 387 - wc.src_qp = sqp->remote_qpn; 388 - wc.pkey_index = 0; 389 - wc.slid = sqp->remote_ah_attr.dlid; 390 - wc.sl = sqp->remote_ah_attr.sl; 391 - wc.dlid_path_bits = 0; 392 - wc.port_num = 0; 393 - spin_lock_irqsave(&sqp->s_lock, flags); 394 - ipath_sqerror_qp(sqp, &wc); 395 - spin_unlock_irqrestore(&sqp->s_lock, flags); 396 - goto done; 397 - } 336 + IB_ACCESS_REMOTE_WRITE))) 337 + goto acc_err; 398 338 break; 399 339 400 340 case IB_WR_RDMA_READ: 401 - if (unlikely(!(qp->qp_access_flags & 402 - IB_ACCESS_REMOTE_READ))) { 403 - wc.status = IB_WC_REM_INV_REQ_ERR; 404 - goto err; 405 - } 341 + if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) 342 + goto inv_err; 406 343 if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length, 407 344 wqe->wr.wr.rdma.remote_addr, 408 345 wqe->wr.wr.rdma.rkey, ··· 373 394 374 395 case IB_WR_ATOMIC_CMP_AND_SWP: 375 396 case IB_WR_ATOMIC_FETCH_AND_ADD: 376 - if (unlikely(!(qp->qp_access_flags & 377 - IB_ACCESS_REMOTE_ATOMIC))) { 378 - wc.status = IB_WC_REM_INV_REQ_ERR; 379 - goto err; 380 - } 397 + if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) 398 + goto inv_err; 381 399 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64), 382 400 wqe->wr.wr.atomic.remote_addr, 383 401 wqe->wr.wr.atomic.rkey, ··· 391 415 goto send_comp; 392 416 393 417 default: 394 - goto done; 418 + send_status = IB_WC_LOC_QP_OP_ERR; 419 + goto serr; 395 420 } 396 421 397 422 sge = &sqp->s_sge.sge; ··· 425 448 sqp->s_len -= len; 426 449 } 427 450 428 - if (wqe->wr.opcode == IB_WR_RDMA_WRITE || 429 - wqe->wr.opcode == IB_WR_RDMA_READ) 451 + if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) 430 452 goto send_comp; 431 453 432 454 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM) ··· 434 458 wc.opcode = IB_WC_RECV; 435 459 wc.wr_id = qp->r_wr_id; 436 460 wc.status = IB_WC_SUCCESS; 437 - wc.vendor_err = 0; 438 461 wc.byte_len = wqe->length; 439 462 wc.qp = &qp->ibqp; 440 463 wc.src_qp = qp->remote_qpn; 441 - wc.pkey_index = 0; 442 464 wc.slid = qp->remote_ah_attr.dlid; 443 465 wc.sl = qp->remote_ah_attr.sl; 444 - wc.dlid_path_bits = 0; 445 466 wc.port_num = 1; 446 467 /* Signal completion event if the solicited bit is set. */ 447 468 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 448 469 wqe->wr.send_flags & IB_SEND_SOLICITED); 449 470 450 471 send_comp: 472 + spin_lock_irqsave(&sqp->s_lock, flags); 473 + flush_send: 451 474 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt; 452 - ipath_send_complete(sqp, wqe, IB_WC_SUCCESS); 475 + ipath_send_complete(sqp, wqe, send_status); 453 476 goto again; 454 477 478 + rnr_nak: 479 + /* Handle RNR NAK */ 480 + if (qp->ibqp.qp_type == IB_QPT_UC) 481 + goto send_comp; 482 + /* 483 + * Note: we don't need the s_lock held since the BUSY flag 484 + * makes this single threaded. 485 + */ 486 + if (sqp->s_rnr_retry == 0) { 487 + send_status = IB_WC_RNR_RETRY_EXC_ERR; 488 + goto serr; 489 + } 490 + if (sqp->s_rnr_retry_cnt < 7) 491 + sqp->s_rnr_retry--; 492 + spin_lock_irqsave(&sqp->s_lock, flags); 493 + if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_RECV_OK)) 494 + goto clr_busy; 495 + sqp->s_flags |= IPATH_S_WAITING; 496 + dev->n_rnr_naks++; 497 + sqp->s_rnr_timeout = ib_ipath_rnr_table[qp->r_min_rnr_timer]; 498 + ipath_insert_rnr_queue(sqp); 499 + goto clr_busy; 500 + 501 + inv_err: 502 + send_status = IB_WC_REM_INV_REQ_ERR; 503 + wc.status = IB_WC_LOC_QP_OP_ERR; 504 + goto err; 505 + 506 + acc_err: 507 + send_status = IB_WC_REM_ACCESS_ERR; 508 + wc.status = IB_WC_LOC_PROT_ERR; 509 + err: 510 + /* responder goes to error state */ 511 + ipath_rc_error(qp, wc.status); 512 + 513 + serr: 514 + spin_lock_irqsave(&sqp->s_lock, flags); 515 + ipath_send_complete(sqp, wqe, send_status); 516 + if (sqp->ibqp.qp_type == IB_QPT_RC) { 517 + int lastwqe = ipath_error_qp(sqp, IB_WC_WR_FLUSH_ERR); 518 + 519 + sqp->s_flags &= ~IPATH_S_BUSY; 520 + spin_unlock_irqrestore(&sqp->s_lock, flags); 521 + if (lastwqe) { 522 + struct ib_event ev; 523 + 524 + ev.device = sqp->ibqp.device; 525 + ev.element.qp = &sqp->ibqp; 526 + ev.event = IB_EVENT_QP_LAST_WQE_REACHED; 527 + sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context); 528 + } 529 + goto done; 530 + } 531 + clr_busy: 532 + sqp->s_flags &= ~IPATH_S_BUSY; 533 + unlock: 534 + spin_unlock_irqrestore(&sqp->s_lock, flags); 455 535 done: 456 - if (atomic_dec_and_test(&qp->refcount)) 536 + if (qp && atomic_dec_and_test(&qp->refcount)) 457 537 wake_up(&qp->wait); 458 538 } 459 539 460 540 static void want_buffer(struct ipath_devdata *dd, struct ipath_qp *qp) 461 541 { 462 542 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA) || 463 - qp->ibqp.qp_type == IB_QPT_SMI) { 543 + qp->ibqp.qp_type == IB_QPT_SMI) { 464 544 unsigned long flags; 465 545 466 546 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags); ··· 534 502 * @dev: the device we ran out of buffers on 535 503 * 536 504 * Called when we run out of PIO buffers. 505 + * If we are now in the error state, return zero to flush the 506 + * send work request. 537 507 */ 538 - static void ipath_no_bufs_available(struct ipath_qp *qp, 508 + static int ipath_no_bufs_available(struct ipath_qp *qp, 539 509 struct ipath_ibdev *dev) 540 510 { 541 511 unsigned long flags; 512 + int ret = 1; 542 513 543 514 /* 544 515 * Note that as soon as want_buffer() is called and 545 516 * possibly before it returns, ipath_ib_piobufavail() 546 - * could be called. If we are still in the tasklet function, 547 - * tasklet_hi_schedule() will not call us until the next time 548 - * tasklet_hi_schedule() is called. 549 - * We leave the busy flag set so that another post send doesn't 550 - * try to put the same QP on the piowait list again. 517 + * could be called. Therefore, put QP on the piowait list before 518 + * enabling the PIO avail interrupt. 551 519 */ 552 - spin_lock_irqsave(&dev->pending_lock, flags); 553 - list_add_tail(&qp->piowait, &dev->piowait); 554 - spin_unlock_irqrestore(&dev->pending_lock, flags); 555 - want_buffer(dev->dd, qp); 556 - dev->n_piowait++; 520 + spin_lock_irqsave(&qp->s_lock, flags); 521 + if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) { 522 + dev->n_piowait++; 523 + qp->s_flags |= IPATH_S_WAITING; 524 + qp->s_flags &= ~IPATH_S_BUSY; 525 + spin_lock(&dev->pending_lock); 526 + if (list_empty(&qp->piowait)) 527 + list_add_tail(&qp->piowait, &dev->piowait); 528 + spin_unlock(&dev->pending_lock); 529 + } else 530 + ret = 0; 531 + spin_unlock_irqrestore(&qp->s_lock, flags); 532 + if (ret) 533 + want_buffer(dev->dd, qp); 534 + return ret; 557 535 } 558 536 559 537 /** ··· 639 597 struct ipath_qp *qp = (struct ipath_qp *)data; 640 598 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 641 599 int (*make_req)(struct ipath_qp *qp); 642 - 643 - if (test_and_set_bit(IPATH_S_BUSY, &qp->s_busy)) 644 - goto bail; 600 + unsigned long flags; 645 601 646 602 if ((qp->ibqp.qp_type == IB_QPT_RC || 647 603 qp->ibqp.qp_type == IB_QPT_UC) && 648 604 qp->remote_ah_attr.dlid == dev->dd->ipath_lid) { 649 605 ipath_ruc_loopback(qp); 650 - goto clear; 606 + goto bail; 651 607 } 652 608 653 609 if (qp->ibqp.qp_type == IB_QPT_RC) ··· 654 614 make_req = ipath_make_uc_req; 655 615 else 656 616 make_req = ipath_make_ud_req; 617 + 618 + spin_lock_irqsave(&qp->s_lock, flags); 619 + 620 + /* Return if we are already busy processing a work request. */ 621 + if ((qp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) || 622 + !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) { 623 + spin_unlock_irqrestore(&qp->s_lock, flags); 624 + goto bail; 625 + } 626 + 627 + qp->s_flags |= IPATH_S_BUSY; 628 + 629 + spin_unlock_irqrestore(&qp->s_lock, flags); 657 630 658 631 again: 659 632 /* Check for a constructed packet to be sent. */ ··· 677 624 */ 678 625 if (ipath_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords, 679 626 qp->s_cur_sge, qp->s_cur_size)) { 680 - ipath_no_bufs_available(qp, dev); 681 - goto bail; 627 + if (ipath_no_bufs_available(qp, dev)) 628 + goto bail; 682 629 } 683 630 dev->n_unicast_xmit++; 684 631 /* Record that we sent the packet and s_hdr is empty. */ ··· 687 634 688 635 if (make_req(qp)) 689 636 goto again; 690 - clear: 691 - clear_bit(IPATH_S_BUSY, &qp->s_busy); 637 + 692 638 bail:; 693 639 } 694 640 641 + /* 642 + * This should be called with s_lock held. 643 + */ 695 644 void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe, 696 645 enum ib_wc_status status) 697 646 { 698 - unsigned long flags; 699 - u32 last; 647 + u32 old_last, last; 648 + 649 + if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) 650 + return; 700 651 701 652 /* See ch. 11.2.4.1 and 10.7.3.1 */ 702 653 if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) || ··· 708 651 status != IB_WC_SUCCESS) { 709 652 struct ib_wc wc; 710 653 654 + memset(&wc, 0, sizeof wc); 711 655 wc.wr_id = wqe->wr.wr_id; 712 656 wc.status = status; 713 657 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode]; 714 - wc.vendor_err = 0; 715 - wc.byte_len = wqe->length; 716 - wc.imm_data = 0; 717 658 wc.qp = &qp->ibqp; 718 - wc.src_qp = 0; 719 - wc.wc_flags = 0; 720 - wc.pkey_index = 0; 721 - wc.slid = 0; 722 - wc.sl = 0; 723 - wc.dlid_path_bits = 0; 724 - wc.port_num = 0; 725 - ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); 659 + if (status == IB_WC_SUCCESS) 660 + wc.byte_len = wqe->length; 661 + ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 662 + status != IB_WC_SUCCESS); 726 663 } 727 664 728 - spin_lock_irqsave(&qp->s_lock, flags); 729 - last = qp->s_last; 665 + old_last = last = qp->s_last; 730 666 if (++last >= qp->s_size) 731 667 last = 0; 732 668 qp->s_last = last; 733 - spin_unlock_irqrestore(&qp->s_lock, flags); 669 + if (qp->s_cur == old_last) 670 + qp->s_cur = last; 671 + if (qp->s_tail == old_last) 672 + qp->s_tail = last; 673 + if (qp->state == IB_QPS_SQD && last == qp->s_cur) 674 + qp->s_draining = 0; 734 675 }
+39 -20
drivers/infiniband/hw/ipath/ipath_uc.c
··· 1 1 /* 2 - * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. 2 + * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. 3 3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. 4 4 * 5 5 * This software is available to you under a choice of one of two ··· 47 47 { 48 48 struct ipath_other_headers *ohdr; 49 49 struct ipath_swqe *wqe; 50 + unsigned long flags; 50 51 u32 hwords; 51 52 u32 bth0; 52 53 u32 len; 53 54 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu); 54 55 int ret = 0; 55 56 56 - if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) 57 + spin_lock_irqsave(&qp->s_lock, flags); 58 + 59 + if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) { 60 + if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND)) 61 + goto bail; 62 + /* We are in the error state, flush the work request. */ 63 + if (qp->s_last == qp->s_head) 64 + goto bail; 65 + /* If DMAs are in progress, we can't flush immediately. */ 66 + if (atomic_read(&qp->s_dma_busy)) { 67 + qp->s_flags |= IPATH_S_WAIT_DMA; 68 + goto bail; 69 + } 70 + wqe = get_swqe_ptr(qp, qp->s_last); 71 + ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 57 72 goto done; 73 + } 58 74 59 75 ohdr = &qp->s_hdr.u.oth; 60 76 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) ··· 85 69 qp->s_wqe = NULL; 86 70 switch (qp->s_state) { 87 71 default: 72 + if (!(ib_ipath_state_ops[qp->state] & 73 + IPATH_PROCESS_NEXT_SEND_OK)) 74 + goto bail; 88 75 /* Check if send work queue is empty. */ 89 76 if (qp->s_cur == qp->s_head) 90 - goto done; 77 + goto bail; 91 78 /* 92 79 * Start a new request. 93 80 */ ··· 153 134 break; 154 135 155 136 default: 156 - goto done; 137 + goto bail; 157 138 } 158 139 break; 159 140 ··· 213 194 ipath_make_ruc_header(to_idev(qp->ibqp.device), 214 195 qp, ohdr, bth0 | (qp->s_state << 24), 215 196 qp->s_next_psn++ & IPATH_PSN_MASK); 216 - ret = 1; 217 - 218 197 done: 198 + ret = 1; 199 + goto unlock; 200 + 201 + bail: 202 + qp->s_flags &= ~IPATH_S_BUSY; 203 + unlock: 204 + spin_unlock_irqrestore(&qp->s_lock, flags); 219 205 return ret; 220 206 } 221 207 ··· 282 258 */ 283 259 opcode = be32_to_cpu(ohdr->bth[0]) >> 24; 284 260 285 - wc.imm_data = 0; 286 - wc.wc_flags = 0; 261 + memset(&wc, 0, sizeof wc); 287 262 288 263 /* Compare the PSN verses the expected PSN. */ 289 264 if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) { ··· 345 322 case OP(SEND_ONLY): 346 323 case OP(SEND_ONLY_WITH_IMMEDIATE): 347 324 send_first: 348 - if (qp->r_reuse_sge) { 349 - qp->r_reuse_sge = 0; 325 + if (qp->r_flags & IPATH_R_REUSE_SGE) { 326 + qp->r_flags &= ~IPATH_R_REUSE_SGE; 350 327 qp->r_sge = qp->s_rdma_read_sge; 351 328 } else if (!ipath_get_rwqe(qp, 0)) { 352 329 dev->n_pkt_drops++; ··· 363 340 case OP(SEND_MIDDLE): 364 341 /* Check for invalid length PMTU or posted rwqe len. */ 365 342 if (unlikely(tlen != (hdrsize + pmtu + 4))) { 366 - qp->r_reuse_sge = 1; 343 + qp->r_flags |= IPATH_R_REUSE_SGE; 367 344 dev->n_pkt_drops++; 368 345 goto done; 369 346 } 370 347 qp->r_rcv_len += pmtu; 371 348 if (unlikely(qp->r_rcv_len > qp->r_len)) { 372 - qp->r_reuse_sge = 1; 349 + qp->r_flags |= IPATH_R_REUSE_SGE; 373 350 dev->n_pkt_drops++; 374 351 goto done; 375 352 } ··· 395 372 /* Check for invalid length. */ 396 373 /* XXX LAST len should be >= 1 */ 397 374 if (unlikely(tlen < (hdrsize + pad + 4))) { 398 - qp->r_reuse_sge = 1; 375 + qp->r_flags |= IPATH_R_REUSE_SGE; 399 376 dev->n_pkt_drops++; 400 377 goto done; 401 378 } ··· 403 380 tlen -= (hdrsize + pad + 4); 404 381 wc.byte_len = tlen + qp->r_rcv_len; 405 382 if (unlikely(wc.byte_len > qp->r_len)) { 406 - qp->r_reuse_sge = 1; 383 + qp->r_flags |= IPATH_R_REUSE_SGE; 407 384 dev->n_pkt_drops++; 408 385 goto done; 409 386 } ··· 413 390 wc.wr_id = qp->r_wr_id; 414 391 wc.status = IB_WC_SUCCESS; 415 392 wc.opcode = IB_WC_RECV; 416 - wc.vendor_err = 0; 417 393 wc.qp = &qp->ibqp; 418 394 wc.src_qp = qp->remote_qpn; 419 - wc.pkey_index = 0; 420 395 wc.slid = qp->remote_ah_attr.dlid; 421 396 wc.sl = qp->remote_ah_attr.sl; 422 - wc.dlid_path_bits = 0; 423 - wc.port_num = 0; 424 397 /* Signal completion event if the solicited bit is set. */ 425 398 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 426 399 (ohdr->bth[0] & ··· 507 488 dev->n_pkt_drops++; 508 489 goto done; 509 490 } 510 - if (qp->r_reuse_sge) 511 - qp->r_reuse_sge = 0; 491 + if (qp->r_flags & IPATH_R_REUSE_SGE) 492 + qp->r_flags &= ~IPATH_R_REUSE_SGE; 512 493 else if (!ipath_get_rwqe(qp, 1)) { 513 494 dev->n_pkt_drops++; 514 495 goto done;
+48 -18
drivers/infiniband/hw/ipath/ipath_ud.c
··· 65 65 u32 length; 66 66 67 67 qp = ipath_lookup_qpn(&dev->qp_table, swqe->wr.wr.ud.remote_qpn); 68 - if (!qp) { 68 + if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) { 69 69 dev->n_pkt_drops++; 70 - goto send_comp; 70 + goto done; 71 71 } 72 72 73 73 rsge.sg_list = NULL; ··· 91 91 * present on the wire. 92 92 */ 93 93 length = swqe->length; 94 + memset(&wc, 0, sizeof wc); 94 95 wc.byte_len = length + sizeof(struct ib_grh); 95 96 96 97 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) { 97 98 wc.wc_flags = IB_WC_WITH_IMM; 98 99 wc.imm_data = swqe->wr.ex.imm_data; 99 - } else { 100 - wc.wc_flags = 0; 101 - wc.imm_data = 0; 102 100 } 103 101 104 102 /* ··· 227 229 } 228 230 wc.status = IB_WC_SUCCESS; 229 231 wc.opcode = IB_WC_RECV; 230 - wc.vendor_err = 0; 231 232 wc.qp = &qp->ibqp; 232 233 wc.src_qp = sqp->ibqp.qp_num; 233 234 /* XXX do we know which pkey matched? Only needed for GSI. */ ··· 245 248 kfree(rsge.sg_list); 246 249 if (atomic_dec_and_test(&qp->refcount)) 247 250 wake_up(&qp->wait); 248 - send_comp: 249 - ipath_send_complete(sqp, swqe, IB_WC_SUCCESS); 251 + done:; 250 252 } 251 253 252 254 /** ··· 260 264 struct ipath_other_headers *ohdr; 261 265 struct ib_ah_attr *ah_attr; 262 266 struct ipath_swqe *wqe; 267 + unsigned long flags; 263 268 u32 nwords; 264 269 u32 extra_bytes; 265 270 u32 bth0; ··· 268 271 u16 lid; 269 272 int ret = 0; 270 273 271 - if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK))) 272 - goto bail; 274 + spin_lock_irqsave(&qp->s_lock, flags); 275 + 276 + if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_NEXT_SEND_OK)) { 277 + if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND)) 278 + goto bail; 279 + /* We are in the error state, flush the work request. */ 280 + if (qp->s_last == qp->s_head) 281 + goto bail; 282 + /* If DMAs are in progress, we can't flush immediately. */ 283 + if (atomic_read(&qp->s_dma_busy)) { 284 + qp->s_flags |= IPATH_S_WAIT_DMA; 285 + goto bail; 286 + } 287 + wqe = get_swqe_ptr(qp, qp->s_last); 288 + ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 289 + goto done; 290 + } 273 291 274 292 if (qp->s_cur == qp->s_head) 275 293 goto bail; 276 294 277 295 wqe = get_swqe_ptr(qp, qp->s_cur); 296 + if (++qp->s_cur >= qp->s_size) 297 + qp->s_cur = 0; 278 298 279 299 /* Construct the header. */ 280 300 ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; ··· 302 288 dev->n_unicast_xmit++; 303 289 } else { 304 290 dev->n_unicast_xmit++; 305 - lid = ah_attr->dlid & 306 - ~((1 << dev->dd->ipath_lmc) - 1); 291 + lid = ah_attr->dlid & ~((1 << dev->dd->ipath_lmc) - 1); 307 292 if (unlikely(lid == dev->dd->ipath_lid)) { 293 + /* 294 + * If DMAs are in progress, we can't generate 295 + * a completion for the loopback packet since 296 + * it would be out of order. 297 + * XXX Instead of waiting, we could queue a 298 + * zero length descriptor so we get a callback. 299 + */ 300 + if (atomic_read(&qp->s_dma_busy)) { 301 + qp->s_flags |= IPATH_S_WAIT_DMA; 302 + goto bail; 303 + } 304 + spin_unlock_irqrestore(&qp->s_lock, flags); 308 305 ipath_ud_loopback(qp, wqe); 306 + spin_lock_irqsave(&qp->s_lock, flags); 307 + ipath_send_complete(qp, wqe, IB_WC_SUCCESS); 309 308 goto done; 310 309 } 311 310 } ··· 395 368 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num); 396 369 397 370 done: 398 - if (++qp->s_cur >= qp->s_size) 399 - qp->s_cur = 0; 400 371 ret = 1; 372 + goto unlock; 401 373 402 374 bail: 375 + qp->s_flags &= ~IPATH_S_BUSY; 376 + unlock: 377 + spin_unlock_irqrestore(&qp->s_lock, flags); 403 378 return ret; 404 379 } 405 380 ··· 535 506 /* 536 507 * Get the next work request entry to find where to put the data. 537 508 */ 538 - if (qp->r_reuse_sge) 539 - qp->r_reuse_sge = 0; 509 + if (qp->r_flags & IPATH_R_REUSE_SGE) 510 + qp->r_flags &= ~IPATH_R_REUSE_SGE; 540 511 else if (!ipath_get_rwqe(qp, 0)) { 541 512 /* 542 513 * Count VL15 packets dropped due to no receive buffer. ··· 552 523 } 553 524 /* Silently drop packets which are too big. */ 554 525 if (wc.byte_len > qp->r_len) { 555 - qp->r_reuse_sge = 1; 526 + qp->r_flags |= IPATH_R_REUSE_SGE; 556 527 dev->n_pkt_drops++; 557 528 goto bail; 558 529 } ··· 564 535 ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh)); 565 536 ipath_copy_sge(&qp->r_sge, data, 566 537 wc.byte_len - sizeof(struct ib_grh)); 567 - qp->r_wrid_valid = 0; 538 + if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) 539 + goto bail; 568 540 wc.wr_id = qp->r_wr_id; 569 541 wc.status = IB_WC_SUCCESS; 570 542 wc.opcode = IB_WC_RECV;
-2
drivers/infiniband/hw/ipath/ipath_user_sdma.h
··· 45 45 int ipath_user_sdma_make_progress(struct ipath_devdata *dd, 46 46 struct ipath_user_sdma_queue *pq); 47 47 48 - int ipath_user_sdma_pkt_sent(const struct ipath_user_sdma_queue *pq, 49 - u32 counter); 50 48 void ipath_user_sdma_queue_drain(struct ipath_devdata *dd, 51 49 struct ipath_user_sdma_queue *pq); 52 50
+117 -59
drivers/infiniband/hw/ipath/ipath_verbs.c
··· 111 111 module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO); 112 112 MODULE_PARM_DESC(disable_sma, "Disable the SMA"); 113 113 114 + /* 115 + * Note that it is OK to post send work requests in the SQE and ERR 116 + * states; ipath_do_send() will process them and generate error 117 + * completions as per IB 1.2 C10-96. 118 + */ 114 119 const int ib_ipath_state_ops[IB_QPS_ERR + 1] = { 115 120 [IB_QPS_RESET] = 0, 116 121 [IB_QPS_INIT] = IPATH_POST_RECV_OK, 117 122 [IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK, 118 123 [IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK | 119 - IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK, 124 + IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK | 125 + IPATH_PROCESS_NEXT_SEND_OK, 120 126 [IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK | 121 - IPATH_POST_SEND_OK, 122 - [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK, 123 - [IB_QPS_ERR] = 0, 127 + IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK, 128 + [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK | 129 + IPATH_POST_SEND_OK | IPATH_FLUSH_SEND, 130 + [IB_QPS_ERR] = IPATH_POST_RECV_OK | IPATH_FLUSH_RECV | 131 + IPATH_POST_SEND_OK | IPATH_FLUSH_SEND, 124 132 }; 125 133 126 134 struct ipath_ucontext { ··· 238 230 } 239 231 } 240 232 241 - static void ipath_flush_wqe(struct ipath_qp *qp, struct ib_send_wr *wr) 242 - { 243 - struct ib_wc wc; 244 - 245 - memset(&wc, 0, sizeof(wc)); 246 - wc.wr_id = wr->wr_id; 247 - wc.status = IB_WC_WR_FLUSH_ERR; 248 - wc.opcode = ib_ipath_wc_opcode[wr->opcode]; 249 - wc.qp = &qp->ibqp; 250 - ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1); 251 - } 252 - 253 233 /* 254 234 * Count the number of DMA descriptors needed to send length bytes of data. 255 235 * Don't modify the ipath_sge_state to get the count. ··· 343 347 spin_lock_irqsave(&qp->s_lock, flags); 344 348 345 349 /* Check that state is OK to post send. */ 346 - if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) { 347 - if (qp->state != IB_QPS_SQE && qp->state != IB_QPS_ERR) 348 - goto bail_inval; 349 - /* C10-96 says generate a flushed completion entry. */ 350 - ipath_flush_wqe(qp, wr); 351 - ret = 0; 352 - goto bail; 353 - } 350 + if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK))) 351 + goto bail_inval; 354 352 355 353 /* IB spec says that num_sge == 0 is OK. */ 356 354 if (wr->num_sge > qp->s_max_sge) ··· 667 677 static void ipath_ib_timer(struct ipath_ibdev *dev) 668 678 { 669 679 struct ipath_qp *resend = NULL; 680 + struct ipath_qp *rnr = NULL; 670 681 struct list_head *last; 671 682 struct ipath_qp *qp; 672 683 unsigned long flags; ··· 694 703 if (--qp->s_rnr_timeout == 0) { 695 704 do { 696 705 list_del_init(&qp->timerwait); 697 - tasklet_hi_schedule(&qp->s_task); 706 + qp->timer_next = rnr; 707 + rnr = qp; 708 + atomic_inc(&qp->refcount); 698 709 if (list_empty(last)) 699 710 break; 700 711 qp = list_entry(last->next, struct ipath_qp, ··· 736 743 spin_unlock_irqrestore(&dev->pending_lock, flags); 737 744 738 745 /* XXX What if timer fires again while this is running? */ 739 - for (qp = resend; qp != NULL; qp = qp->timer_next) { 740 - struct ib_wc wc; 746 + while (resend != NULL) { 747 + qp = resend; 748 + resend = qp->timer_next; 741 749 742 750 spin_lock_irqsave(&qp->s_lock, flags); 743 - if (qp->s_last != qp->s_tail && qp->state == IB_QPS_RTS) { 751 + if (qp->s_last != qp->s_tail && 752 + ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) { 744 753 dev->n_timeouts++; 745 - ipath_restart_rc(qp, qp->s_last_psn + 1, &wc); 754 + ipath_restart_rc(qp, qp->s_last_psn + 1); 746 755 } 756 + spin_unlock_irqrestore(&qp->s_lock, flags); 757 + 758 + /* Notify ipath_destroy_qp() if it is waiting. */ 759 + if (atomic_dec_and_test(&qp->refcount)) 760 + wake_up(&qp->wait); 761 + } 762 + while (rnr != NULL) { 763 + qp = rnr; 764 + rnr = qp->timer_next; 765 + 766 + spin_lock_irqsave(&qp->s_lock, flags); 767 + if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) 768 + ipath_schedule_send(qp); 747 769 spin_unlock_irqrestore(&qp->s_lock, flags); 748 770 749 771 /* Notify ipath_destroy_qp() if it is waiting. */ ··· 1020 1012 struct ipath_verbs_txreq *tx = cookie; 1021 1013 struct ipath_qp *qp = tx->qp; 1022 1014 struct ipath_ibdev *dev = to_idev(qp->ibqp.device); 1015 + unsigned int flags; 1016 + enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ? 1017 + IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR; 1023 1018 1024 - /* Generate a completion queue entry if needed */ 1025 - if (qp->ibqp.qp_type != IB_QPT_RC && tx->wqe) { 1026 - enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ? 1027 - IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR; 1028 - 1019 + if (atomic_dec_and_test(&qp->s_dma_busy)) { 1020 + spin_lock_irqsave(&qp->s_lock, flags); 1021 + if (tx->wqe) 1022 + ipath_send_complete(qp, tx->wqe, ibs); 1023 + if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND && 1024 + qp->s_last != qp->s_head) || 1025 + (qp->s_flags & IPATH_S_WAIT_DMA)) 1026 + ipath_schedule_send(qp); 1027 + spin_unlock_irqrestore(&qp->s_lock, flags); 1028 + wake_up(&qp->wait_dma); 1029 + } else if (tx->wqe) { 1030 + spin_lock_irqsave(&qp->s_lock, flags); 1029 1031 ipath_send_complete(qp, tx->wqe, ibs); 1032 + spin_unlock_irqrestore(&qp->s_lock, flags); 1030 1033 } 1031 1034 1032 1035 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF) ··· 1046 1027 1047 1028 if (atomic_dec_and_test(&qp->refcount)) 1048 1029 wake_up(&qp->wait); 1030 + } 1031 + 1032 + static void decrement_dma_busy(struct ipath_qp *qp) 1033 + { 1034 + unsigned int flags; 1035 + 1036 + if (atomic_dec_and_test(&qp->s_dma_busy)) { 1037 + spin_lock_irqsave(&qp->s_lock, flags); 1038 + if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND && 1039 + qp->s_last != qp->s_head) || 1040 + (qp->s_flags & IPATH_S_WAIT_DMA)) 1041 + ipath_schedule_send(qp); 1042 + spin_unlock_irqrestore(&qp->s_lock, flags); 1043 + wake_up(&qp->wait_dma); 1044 + } 1049 1045 } 1050 1046 1051 1047 /* ··· 1101 1067 if (tx) { 1102 1068 qp->s_tx = NULL; 1103 1069 /* resend previously constructed packet */ 1070 + atomic_inc(&qp->s_dma_busy); 1104 1071 ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx); 1105 - if (ret) 1072 + if (ret) { 1106 1073 qp->s_tx = tx; 1074 + decrement_dma_busy(qp); 1075 + } 1107 1076 goto bail; 1108 1077 } 1109 1078 ··· 1157 1120 tx->txreq.sg_count = ndesc; 1158 1121 tx->map_len = (hdrwords + 2) << 2; 1159 1122 tx->txreq.map_addr = &tx->hdr; 1123 + atomic_inc(&qp->s_dma_busy); 1160 1124 ret = ipath_sdma_verbs_send(dd, ss, dwords, tx); 1161 1125 if (ret) { 1162 1126 /* save ss and length in dwords */ 1163 1127 tx->ss = ss; 1164 1128 tx->len = dwords; 1165 1129 qp->s_tx = tx; 1130 + decrement_dma_busy(qp); 1166 1131 } 1167 1132 goto bail; 1168 1133 } ··· 1185 1146 memcpy(piobuf, hdr, hdrwords << 2); 1186 1147 ipath_copy_from_sge(piobuf + hdrwords, ss, len); 1187 1148 1149 + atomic_inc(&qp->s_dma_busy); 1188 1150 ret = ipath_sdma_verbs_send(dd, NULL, 0, tx); 1189 1151 /* 1190 1152 * If we couldn't queue the DMA request, save the info ··· 1196 1156 tx->ss = NULL; 1197 1157 tx->len = 0; 1198 1158 qp->s_tx = tx; 1159 + decrement_dma_busy(qp); 1199 1160 } 1200 1161 dev->n_unaligned++; 1201 1162 goto bail; ··· 1220 1179 unsigned flush_wc; 1221 1180 u32 control; 1222 1181 int ret; 1182 + unsigned int flags; 1223 1183 1224 1184 piobuf = ipath_getpiobuf(dd, plen, NULL); 1225 1185 if (unlikely(piobuf == NULL)) { ··· 1291 1249 } 1292 1250 copy_io(piobuf, ss, len, flush_wc); 1293 1251 done: 1294 - if (qp->s_wqe) 1252 + if (qp->s_wqe) { 1253 + spin_lock_irqsave(&qp->s_lock, flags); 1295 1254 ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); 1255 + spin_unlock_irqrestore(&qp->s_lock, flags); 1256 + } 1296 1257 ret = 0; 1297 1258 bail: 1298 1259 return ret; ··· 1328 1283 * can defer SDMA restart until link goes ACTIVE without 1329 1284 * worrying about just how we got there. 1330 1285 */ 1331 - if (qp->ibqp.qp_type == IB_QPT_SMI) 1286 + if (qp->ibqp.qp_type == IB_QPT_SMI || 1287 + !(dd->ipath_flags & IPATH_HAS_SEND_DMA)) 1332 1288 ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len, 1333 - plen, dwords); 1334 - /* All non-VL15 packets are dropped if link is not ACTIVE */ 1335 - else if (!(dd->ipath_flags & IPATH_LINKACTIVE)) { 1336 - if (qp->s_wqe) 1337 - ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); 1338 - ret = 0; 1339 - } else if (dd->ipath_flags & IPATH_HAS_SEND_DMA) 1340 - ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len, 1341 1289 plen, dwords); 1342 1290 else 1343 - ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len, 1291 + ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len, 1344 1292 plen, dwords); 1345 1293 1346 1294 return ret; ··· 1441 1403 * This is called from ipath_intr() at interrupt level when a PIO buffer is 1442 1404 * available after ipath_verbs_send() returned an error that no buffers were 1443 1405 * available. Return 1 if we consumed all the PIO buffers and we still have 1444 - * QPs waiting for buffers (for now, just do a tasklet_hi_schedule and 1406 + * QPs waiting for buffers (for now, just restart the send tasklet and 1445 1407 * return zero). 1446 1408 */ 1447 1409 int ipath_ib_piobufavail(struct ipath_ibdev *dev) 1448 1410 { 1411 + struct list_head *list; 1412 + struct ipath_qp *qplist; 1449 1413 struct ipath_qp *qp; 1450 1414 unsigned long flags; 1451 1415 1452 1416 if (dev == NULL) 1453 1417 goto bail; 1454 1418 1419 + list = &dev->piowait; 1420 + qplist = NULL; 1421 + 1455 1422 spin_lock_irqsave(&dev->pending_lock, flags); 1456 - while (!list_empty(&dev->piowait)) { 1457 - qp = list_entry(dev->piowait.next, struct ipath_qp, 1458 - piowait); 1423 + while (!list_empty(list)) { 1424 + qp = list_entry(list->next, struct ipath_qp, piowait); 1459 1425 list_del_init(&qp->piowait); 1460 - clear_bit(IPATH_S_BUSY, &qp->s_busy); 1461 - tasklet_hi_schedule(&qp->s_task); 1426 + qp->pio_next = qplist; 1427 + qplist = qp; 1428 + atomic_inc(&qp->refcount); 1462 1429 } 1463 1430 spin_unlock_irqrestore(&dev->pending_lock, flags); 1431 + 1432 + while (qplist != NULL) { 1433 + qp = qplist; 1434 + qplist = qp->pio_next; 1435 + 1436 + spin_lock_irqsave(&qp->s_lock, flags); 1437 + if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) 1438 + ipath_schedule_send(qp); 1439 + spin_unlock_irqrestore(&qp->s_lock, flags); 1440 + 1441 + /* Notify ipath_destroy_qp() if it is waiting. */ 1442 + if (atomic_dec_and_test(&qp->refcount)) 1443 + wake_up(&qp->wait); 1444 + } 1464 1445 1465 1446 bail: 1466 1447 return 0; ··· 2202 2145 void ipath_unregister_ib_device(struct ipath_ibdev *dev) 2203 2146 { 2204 2147 struct ib_device *ibdev = &dev->ibdev; 2205 - 2206 - disable_timer(dev->dd); 2148 + u32 qps_inuse; 2207 2149 2208 2150 ib_unregister_device(ibdev); 2151 + 2152 + disable_timer(dev->dd); 2209 2153 2210 2154 if (!list_empty(&dev->pending[0]) || 2211 2155 !list_empty(&dev->pending[1]) || ··· 2222 2164 * Note that ipath_unregister_ib_device() can be called before all 2223 2165 * the QPs are destroyed! 2224 2166 */ 2225 - ipath_free_all_qps(&dev->qp_table); 2167 + qps_inuse = ipath_free_all_qps(&dev->qp_table); 2168 + if (qps_inuse) 2169 + ipath_dev_err(dev->dd, "QP memory leak! %u still in use\n", 2170 + qps_inuse); 2226 2171 kfree(dev->qp_table.table); 2227 2172 kfree(dev->lk_table.table); 2228 2173 kfree(dev->txreq_bufs); ··· 2276 2215 "RC OTH NAKs %d\n" 2277 2216 "RC timeouts %d\n" 2278 2217 "RC RDMA dup %d\n" 2279 - "RC stalls %d\n" 2280 2218 "piobuf wait %d\n" 2281 - "no piobuf %d\n" 2282 2219 "unaligned %d\n" 2283 2220 "PKT drops %d\n" 2284 2221 "WQE errs %d\n", 2285 2222 dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks, 2286 2223 dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks, 2287 2224 dev->n_other_naks, dev->n_timeouts, 2288 - dev->n_rdma_dup_busy, dev->n_rc_stalls, dev->n_piowait, 2289 - dev->n_no_piobuf, dev->n_unaligned, 2225 + dev->n_rdma_dup_busy, dev->n_piowait, dev->n_unaligned, 2290 2226 dev->n_pkt_drops, dev->n_wqe_errs); 2291 2227 for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) { 2292 2228 const struct ipath_opcode_stats *si = &dev->opstats[i];
+52 -12
drivers/infiniband/hw/ipath/ipath_verbs.h
··· 74 74 #define IPATH_POST_RECV_OK 0x02 75 75 #define IPATH_PROCESS_RECV_OK 0x04 76 76 #define IPATH_PROCESS_SEND_OK 0x08 77 + #define IPATH_PROCESS_NEXT_SEND_OK 0x10 78 + #define IPATH_FLUSH_SEND 0x20 79 + #define IPATH_FLUSH_RECV 0x40 80 + #define IPATH_PROCESS_OR_FLUSH_SEND \ 81 + (IPATH_PROCESS_SEND_OK | IPATH_FLUSH_SEND) 77 82 78 83 /* IB Performance Manager status values */ 79 84 #define IB_PMA_SAMPLE_STATUS_DONE 0x00 ··· 358 353 struct ib_qp ibqp; 359 354 struct ipath_qp *next; /* link list for QPN hash table */ 360 355 struct ipath_qp *timer_next; /* link list for ipath_ib_timer() */ 356 + struct ipath_qp *pio_next; /* link for ipath_ib_piobufavail() */ 361 357 struct list_head piowait; /* link for wait PIO buf */ 362 358 struct list_head timerwait; /* link for waiting for timeouts */ 363 359 struct ib_ah_attr remote_ah_attr; 364 360 struct ipath_ib_header s_hdr; /* next packet header to send */ 365 361 atomic_t refcount; 366 362 wait_queue_head_t wait; 363 + wait_queue_head_t wait_dma; 367 364 struct tasklet_struct s_task; 368 365 struct ipath_mmap_info *ip; 369 366 struct ipath_sge_state *s_cur_sge; ··· 376 369 struct ipath_sge_state s_rdma_read_sge; 377 370 struct ipath_sge_state r_sge; /* current receive data */ 378 371 spinlock_t s_lock; 379 - unsigned long s_busy; 372 + atomic_t s_dma_busy; 380 373 u16 s_pkt_delay; 381 374 u16 s_hdrwords; /* size of s_hdr in 32 bit words */ 382 375 u32 s_cur_size; /* size of send packet in bytes */ ··· 390 383 u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */ 391 384 u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ 392 385 u64 r_wr_id; /* ID for current receive WQE */ 386 + unsigned long r_aflags; 393 387 u32 r_len; /* total length of r_sge */ 394 388 u32 r_rcv_len; /* receive data len processed */ 395 389 u32 r_psn; /* expected rcv packet sequence number */ ··· 402 394 u8 r_state; /* opcode of last packet received */ 403 395 u8 r_nak_state; /* non-zero if NAK is pending */ 404 396 u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ 405 - u8 r_reuse_sge; /* for UC receive errors */ 406 - u8 r_wrid_valid; /* r_wrid set but CQ entry not yet made */ 397 + u8 r_flags; 407 398 u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ 408 399 u8 r_head_ack_queue; /* index into s_ack_queue[] */ 409 400 u8 qp_access_flags; ··· 411 404 u8 s_rnr_retry_cnt; 412 405 u8 s_retry; /* requester retry counter */ 413 406 u8 s_rnr_retry; /* requester RNR retry counter */ 414 - u8 s_wait_credit; /* limit number of unacked packets sent */ 415 407 u8 s_pkey_index; /* PKEY index to use */ 416 408 u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */ 417 409 u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ 418 410 u8 s_tail_ack_queue; /* index into s_ack_queue[] */ 419 411 u8 s_flags; 420 412 u8 s_dmult; 413 + u8 s_draining; 421 414 u8 timeout; /* Timeout for this QP */ 422 415 enum ib_mtu path_mtu; 423 416 u32 remote_qpn; ··· 435 428 struct ipath_sge r_sg_list[0]; /* verified SGEs */ 436 429 }; 437 430 438 - /* Bit definition for s_busy. */ 439 - #define IPATH_S_BUSY 0 431 + /* 432 + * Atomic bit definitions for r_aflags. 433 + */ 434 + #define IPATH_R_WRID_VALID 0 435 + 436 + /* 437 + * Bit definitions for r_flags. 438 + */ 439 + #define IPATH_R_REUSE_SGE 0x01 440 + #define IPATH_R_RDMAR_SEQ 0x02 440 441 441 442 /* 442 443 * Bit definitions for s_flags. 444 + * 445 + * IPATH_S_FENCE_PENDING - waiting for all prior RDMA read or atomic SWQEs 446 + * before processing the next SWQE 447 + * IPATH_S_RDMAR_PENDING - waiting for any RDMA read or atomic SWQEs 448 + * before processing the next SWQE 449 + * IPATH_S_WAITING - waiting for RNR timeout or send buffer available. 450 + * IPATH_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE 451 + * IPATH_S_WAIT_DMA - waiting for send DMA queue to drain before generating 452 + * next send completion entry not via send DMA. 443 453 */ 444 454 #define IPATH_S_SIGNAL_REQ_WR 0x01 445 455 #define IPATH_S_FENCE_PENDING 0x02 446 456 #define IPATH_S_RDMAR_PENDING 0x04 447 457 #define IPATH_S_ACK_PENDING 0x08 458 + #define IPATH_S_BUSY 0x10 459 + #define IPATH_S_WAITING 0x20 460 + #define IPATH_S_WAIT_SSN_CREDIT 0x40 461 + #define IPATH_S_WAIT_DMA 0x80 462 + 463 + #define IPATH_S_ANY_WAIT (IPATH_S_FENCE_PENDING | IPATH_S_RDMAR_PENDING | \ 464 + IPATH_S_WAITING | IPATH_S_WAIT_SSN_CREDIT | IPATH_S_WAIT_DMA) 448 465 449 466 #define IPATH_PSN_CREDIT 512 450 467 ··· 604 573 u32 n_rnr_naks; 605 574 u32 n_other_naks; 606 575 u32 n_timeouts; 607 - u32 n_rc_stalls; 608 576 u32 n_pkt_drops; 609 577 u32 n_vl15_dropped; 610 578 u32 n_wqe_errs; 611 579 u32 n_rdma_dup_busy; 612 580 u32 n_piowait; 613 - u32 n_no_piobuf; 614 581 u32 n_unaligned; 615 582 u32 port_cap_flags; 616 583 u32 pma_sample_start; ··· 686 657 return container_of(ibdev, struct ipath_ibdev, ibdev); 687 658 } 688 659 660 + /* 661 + * This must be called with s_lock held. 662 + */ 663 + static inline void ipath_schedule_send(struct ipath_qp *qp) 664 + { 665 + if (qp->s_flags & IPATH_S_ANY_WAIT) 666 + qp->s_flags &= ~IPATH_S_ANY_WAIT; 667 + if (!(qp->s_flags & IPATH_S_BUSY)) 668 + tasklet_hi_schedule(&qp->s_task); 669 + } 670 + 689 671 int ipath_process_mad(struct ib_device *ibdev, 690 672 int mad_flags, 691 673 u8 port_num, ··· 746 706 int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, 747 707 int attr_mask, struct ib_qp_init_attr *init_attr); 748 708 749 - void ipath_free_all_qps(struct ipath_qp_table *qpt); 709 + unsigned ipath_free_all_qps(struct ipath_qp_table *qpt); 750 710 751 711 int ipath_init_qp_table(struct ipath_ibdev *idev, int size); 752 - 753 - void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc); 754 712 755 713 void ipath_get_credit(struct ipath_qp *qp, u32 aeth); 756 714 ··· 767 729 void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, 768 730 int has_grh, void *data, u32 tlen, struct ipath_qp *qp); 769 731 770 - void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc); 732 + void ipath_restart_rc(struct ipath_qp *qp, u32 psn); 733 + 734 + void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err); 771 735 772 736 int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr); 773 737
-4
drivers/infiniband/hw/nes/nes.c
··· 91 91 module_param_named(debug_level, nes_debug_level, uint, 0644); 92 92 MODULE_PARM_DESC(debug_level, "Enable debug output level"); 93 93 94 - unsigned int nes_lro_max_aggr = NES_LRO_MAX_AGGR; 95 - module_param(nes_lro_max_aggr, int, NES_LRO_MAX_AGGR); 96 - MODULE_PARM_DESC(nes_mro_max_aggr, " nic LRO MAX packet aggregation"); 97 - 98 94 LIST_HEAD(nes_adapter_list); 99 95 static LIST_HEAD(nes_dev_list); 100 96
-1
drivers/infiniband/hw/nes/nes.h
··· 173 173 extern unsigned int send_first; 174 174 extern unsigned int nes_drv_opt; 175 175 extern unsigned int nes_debug_level; 176 - extern unsigned int nes_lro_max_aggr; 177 176 178 177 extern struct list_head nes_adapter_list; 179 178
+5 -1
drivers/infiniband/hw/nes/nes_hw.c
··· 42 42 43 43 #include "nes.h" 44 44 45 + static unsigned int nes_lro_max_aggr = NES_LRO_MAX_AGGR; 46 + module_param(nes_lro_max_aggr, uint, 0444); 47 + MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation"); 48 + 45 49 static u32 crit_err_count; 46 50 u32 int_mod_timer_init; 47 51 u32 int_mod_cq_depth_256; ··· 1742 1738 jumbomode = 1; 1743 1739 nes_nic_init_timer_defaults(nesdev, jumbomode); 1744 1740 } 1745 - nesvnic->lro_mgr.max_aggr = NES_LRO_MAX_AGGR; 1741 + nesvnic->lro_mgr.max_aggr = nes_lro_max_aggr; 1746 1742 nesvnic->lro_mgr.max_desc = NES_MAX_LRO_DESCRIPTORS; 1747 1743 nesvnic->lro_mgr.lro_arr = nesvnic->lro_desc; 1748 1744 nesvnic->lro_mgr.get_skb_header = nes_lro_get_skb_hdr;