Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
mlx4_core: Add helper to move QP to ready-to-send
mlx4_core: Add HW queues allocation helpers
RDMA/nes: Remove volatile qualifier from struct nes_hw_cq.cq_vbase
mlx4_core: CQ resizing should pass a 0 opcode modifier to MODIFY_CQ
mlx4_core: Move kernel doorbell management into core
IB/ehca: Bump version number to 0026
IB/ehca: Make some module parameters bool, update descriptions
IB/ehca: Remove mr_largepage parameter
IB/ehca: Move high-volume debug output to higher debug levels
IB/ehca: Prevent posting of SQ WQEs if QP not in RTS
IPoIB: Handle 4K IB MTU for UD (datagram) mode
RDMA/nes: Fix adapter reset after PXE boot
RDMA/nes: Print IPv4 addresses in a readable format
RDMA/nes: Use print_mac() to format ethernet addresses for printing

+532 -350
+1
drivers/infiniband/hw/ehca/ehca_classes.h
··· 160 160 }; 161 161 u32 qp_type; 162 162 enum ehca_ext_qp_type ext_type; 163 + enum ib_qp_state state; 163 164 struct ipz_queue ipz_squeue; 164 165 struct ipz_queue ipz_rqueue; 165 166 struct h_galpas galpas;
+1 -1
drivers/infiniband/hw/ehca/ehca_irq.c
··· 633 633 unsigned long flags; 634 634 635 635 WARN_ON_ONCE(!in_interrupt()); 636 - if (ehca_debug_level) 636 + if (ehca_debug_level >= 3) 637 637 ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); 638 638 639 639 spin_lock_irqsave(&pool->last_cpu_lock, flags);
+32 -43
drivers/infiniband/hw/ehca/ehca_main.c
··· 50 50 #include "ehca_tools.h" 51 51 #include "hcp_if.h" 52 52 53 - #define HCAD_VERSION "0025" 53 + #define HCAD_VERSION "0026" 54 54 55 55 MODULE_LICENSE("Dual BSD/GPL"); 56 56 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); ··· 60 60 static int ehca_open_aqp1 = 0; 61 61 static int ehca_hw_level = 0; 62 62 static int ehca_poll_all_eqs = 1; 63 - static int ehca_mr_largepage = 1; 64 63 65 64 int ehca_debug_level = 0; 66 65 int ehca_nr_ports = 2; ··· 69 70 int ehca_scaling_code = 0; 70 71 int ehca_lock_hcalls = -1; 71 72 72 - module_param_named(open_aqp1, ehca_open_aqp1, int, S_IRUGO); 73 - module_param_named(debug_level, ehca_debug_level, int, S_IRUGO); 74 - module_param_named(hw_level, ehca_hw_level, int, S_IRUGO); 75 - module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO); 76 - module_param_named(use_hp_mr, ehca_use_hp_mr, int, S_IRUGO); 77 - module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO); 78 - module_param_named(poll_all_eqs, ehca_poll_all_eqs, int, S_IRUGO); 79 - module_param_named(static_rate, ehca_static_rate, int, S_IRUGO); 80 - module_param_named(scaling_code, ehca_scaling_code, int, S_IRUGO); 81 - module_param_named(mr_largepage, ehca_mr_largepage, int, S_IRUGO); 73 + module_param_named(open_aqp1, ehca_open_aqp1, bool, S_IRUGO); 74 + module_param_named(debug_level, ehca_debug_level, int, S_IRUGO); 75 + module_param_named(hw_level, ehca_hw_level, int, S_IRUGO); 76 + module_param_named(nr_ports, ehca_nr_ports, int, S_IRUGO); 77 + module_param_named(use_hp_mr, ehca_use_hp_mr, bool, S_IRUGO); 78 + module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO); 79 + module_param_named(poll_all_eqs, ehca_poll_all_eqs, bool, S_IRUGO); 80 + module_param_named(static_rate, ehca_static_rate, int, S_IRUGO); 81 + module_param_named(scaling_code, ehca_scaling_code, bool, S_IRUGO); 82 82 module_param_named(lock_hcalls, ehca_lock_hcalls, bool, S_IRUGO); 83 83 84 84 MODULE_PARM_DESC(open_aqp1, 85 - "AQP1 on startup (0: no (default), 1: yes)"); 85 + "Open AQP1 on startup (default: no)"); 86 86 MODULE_PARM_DESC(debug_level, 87 - "debug level" 88 - " (0: no debug traces (default), 1: with debug traces)"); 87 + "Amount of debug output (0: none (default), 1: traces, " 88 + "2: some dumps, 3: lots)"); 89 89 MODULE_PARM_DESC(hw_level, 90 - "hardware level" 91 - " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)"); 90 + "Hardware level (0: autosensing (default), " 91 + "0x10..0x14: eHCA, 0x20..0x23: eHCA2)"); 92 92 MODULE_PARM_DESC(nr_ports, 93 93 "number of connected ports (-1: autodetect, 1: port one only, " 94 94 "2: two ports (default)"); 95 95 MODULE_PARM_DESC(use_hp_mr, 96 - "high performance MRs (0: no (default), 1: yes)"); 96 + "Use high performance MRs (default: no)"); 97 97 MODULE_PARM_DESC(port_act_time, 98 - "time to wait for port activation (default: 30 sec)"); 98 + "Time to wait for port activation (default: 30 sec)"); 99 99 MODULE_PARM_DESC(poll_all_eqs, 100 - "polls all event queues periodically" 101 - " (0: no, 1: yes (default))"); 100 + "Poll all event queues periodically (default: yes)"); 102 101 MODULE_PARM_DESC(static_rate, 103 - "set permanent static rate (default: disabled)"); 102 + "Set permanent static rate (default: no static rate)"); 104 103 MODULE_PARM_DESC(scaling_code, 105 - "set scaling code (0: disabled/default, 1: enabled)"); 106 - MODULE_PARM_DESC(mr_largepage, 107 - "use large page for MR (0: use PAGE_SIZE (default), " 108 - "1: use large page depending on MR size"); 104 + "Enable scaling code (default: no)"); 109 105 MODULE_PARM_DESC(lock_hcalls, 110 - "serialize all hCalls made by the driver " 106 + "Serialize all hCalls made by the driver " 111 107 "(default: autodetect)"); 112 108 113 109 DEFINE_RWLOCK(ehca_qp_idr_lock); ··· 269 275 u64 h_ret; 270 276 struct hipz_query_hca *rblock; 271 277 struct hipz_query_port *port; 278 + const char *loc_code; 272 279 273 280 static const u32 pgsize_map[] = { 274 281 HCA_CAP_MR_PGSIZE_4K, 0x1000, ··· 277 282 HCA_CAP_MR_PGSIZE_1M, 0x100000, 278 283 HCA_CAP_MR_PGSIZE_16M, 0x1000000, 279 284 }; 285 + 286 + ehca_gen_dbg("Probing adapter %s...", 287 + shca->ofdev->node->full_name); 288 + loc_code = of_get_property(shca->ofdev->node, "ibm,loc-code", NULL); 289 + if (loc_code) 290 + ehca_gen_dbg(" ... location lode=%s", loc_code); 280 291 281 292 rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 282 293 if (!rblock) { ··· 351 350 352 351 /* translate supported MR page sizes; always support 4K */ 353 352 shca->hca_cap_mr_pgsize = EHCA_PAGESIZE; 354 - if (ehca_mr_largepage) { /* support extra sizes only if enabled */ 355 - for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2) 356 - if (rblock->memory_page_size_supported & pgsize_map[i]) 357 - shca->hca_cap_mr_pgsize |= pgsize_map[i + 1]; 358 - } 353 + for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2) 354 + if (rblock->memory_page_size_supported & pgsize_map[i]) 355 + shca->hca_cap_mr_pgsize |= pgsize_map[i + 1]; 359 356 360 357 /* query max MTU from first port -- it's the same for all ports */ 361 358 port = (struct hipz_query_port *)rblock; ··· 566 567 567 568 static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf) 568 569 { 569 - return snprintf(buf, PAGE_SIZE, "%d\n", 570 - ehca_debug_level); 570 + return snprintf(buf, PAGE_SIZE, "%d\n", ehca_debug_level); 571 571 } 572 572 573 573 static ssize_t ehca_store_debug_level(struct device_driver *ddp, ··· 655 657 } 656 658 static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL); 657 659 658 - static ssize_t ehca_show_mr_largepage(struct device *dev, 659 - struct device_attribute *attr, 660 - char *buf) 661 - { 662 - return sprintf(buf, "%d\n", ehca_mr_largepage); 663 - } 664 - static DEVICE_ATTR(mr_largepage, S_IRUGO, ehca_show_mr_largepage, NULL); 665 - 666 660 static struct attribute *ehca_dev_attrs[] = { 667 661 &dev_attr_adapter_handle.attr, 668 662 &dev_attr_num_ports.attr, ··· 671 681 &dev_attr_cur_mw.attr, 672 682 &dev_attr_max_pd.attr, 673 683 &dev_attr_max_ah.attr, 674 - &dev_attr_mr_largepage.attr, 675 684 NULL 676 685 }; 677 686
+10 -6
drivers/infiniband/hw/ehca/ehca_mrmw.c
··· 1794 1794 int t; 1795 1795 for (t = start_idx; t <= end_idx; t++) { 1796 1796 u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; 1797 - ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr, 1798 - *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); 1797 + if (ehca_debug_level >= 3) 1798 + ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr, 1799 + *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); 1799 1800 if (pgaddr - PAGE_SIZE != *prev_pgaddr) { 1800 1801 ehca_gen_err("uncontiguous page found pgaddr=%lx " 1801 1802 "prev_pgaddr=%lx page_list_i=%x", ··· 1863 1862 pgaddr & 1864 1863 ~(pginfo->hwpage_size - 1)); 1865 1864 } 1866 - ehca_gen_dbg("kpage=%lx chunk_page=%lx " 1867 - "value=%016lx", *kpage, pgaddr, 1868 - *(u64 *)abs_to_virt( 1869 - phys_to_abs(pgaddr))); 1865 + if (ehca_debug_level >= 3) { 1866 + u64 val = *(u64 *)abs_to_virt( 1867 + phys_to_abs(pgaddr)); 1868 + ehca_gen_dbg("kpage=%lx chunk_page=%lx " 1869 + "value=%016lx", 1870 + *kpage, pgaddr, val); 1871 + } 1870 1872 prev_pgaddr = pgaddr; 1871 1873 i++; 1872 1874 pginfo->kpage_cnt++;
+9 -6
drivers/infiniband/hw/ehca/ehca_qp.c
··· 550 550 spin_lock_init(&my_qp->spinlock_r); 551 551 my_qp->qp_type = qp_type; 552 552 my_qp->ext_type = parms.ext_type; 553 + my_qp->state = IB_QPS_RESET; 553 554 554 555 if (init_attr->recv_cq) 555 556 my_qp->recv_cq = ··· 966 965 qp_num, bad_send_wqe_p); 967 966 /* convert wqe pointer to vadr */ 968 967 bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p); 969 - if (ehca_debug_level) 968 + if (ehca_debug_level >= 2) 970 969 ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num); 971 970 squeue = &my_qp->ipz_squeue; 972 971 if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) { ··· 979 978 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs); 980 979 *bad_wqe_cnt = 0; 981 980 while (wqe->optype != 0xff && wqe->wqef != 0xff) { 982 - if (ehca_debug_level) 981 + if (ehca_debug_level >= 2) 983 982 ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num); 984 983 wqe->nr_of_data_seg = 0; /* suppress data access */ 985 984 wqe->wqef = WQEF_PURGE; /* WQE to be purged */ ··· 1451 1450 /* no support for max_send/recv_sge yet */ 1452 1451 } 1453 1452 1454 - if (ehca_debug_level) 1453 + if (ehca_debug_level >= 2) 1455 1454 ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num); 1456 1455 1457 1456 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, ··· 1508 1507 1509 1508 if (attr_mask & IB_QP_QKEY) 1510 1509 my_qp->qkey = attr->qkey; 1510 + 1511 + my_qp->state = qp_new_state; 1511 1512 1512 1513 modify_qp_exit2: 1513 1514 if (squeue_locked) { /* this means: sqe -> rts */ ··· 1766 1763 if (qp_init_attr) 1767 1764 *qp_init_attr = my_qp->init_attr; 1768 1765 1769 - if (ehca_debug_level) 1766 + if (ehca_debug_level >= 2) 1770 1767 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num); 1771 1768 1772 1769 query_qp_exit1: ··· 1814 1811 goto modify_srq_exit0; 1815 1812 } 1816 1813 1817 - if (ehca_debug_level) 1814 + if (ehca_debug_level >= 2) 1818 1815 ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); 1819 1816 1820 1817 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle, ··· 1867 1864 srq_attr->srq_limit = EHCA_BMASK_GET( 1868 1865 MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit); 1869 1866 1870 - if (ehca_debug_level) 1867 + if (ehca_debug_level >= 2) 1871 1868 ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); 1872 1869 1873 1870 query_srq_exit1:
+27 -24
drivers/infiniband/hw/ehca/ehca_reqs.c
··· 81 81 recv_wr->sg_list[cnt_ds].length; 82 82 } 83 83 84 - if (ehca_debug_level) { 84 + if (ehca_debug_level >= 3) { 85 85 ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", 86 86 ipz_rqueue); 87 87 ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe"); ··· 281 281 return -EINVAL; 282 282 } 283 283 284 - if (ehca_debug_level) { 284 + if (ehca_debug_level >= 3) { 285 285 ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp); 286 286 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe"); 287 287 } ··· 421 421 int ret = 0; 422 422 unsigned long flags; 423 423 424 + if (unlikely(my_qp->state != IB_QPS_RTS)) { 425 + ehca_err(qp->device, "QP not in RTS state qpn=%x", qp->qp_num); 426 + return -EINVAL; 427 + } 428 + 424 429 /* LOCK the QUEUE */ 425 430 spin_lock_irqsave(&my_qp->spinlock_s, flags); 426 431 ··· 459 454 goto post_send_exit0; 460 455 } 461 456 wqe_cnt++; 462 - ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d", 463 - my_qp, qp->qp_num, wqe_cnt); 464 457 } /* eof for cur_send_wr */ 465 458 466 459 post_send_exit0: 467 460 iosync(); /* serialize GAL register access */ 468 461 hipz_update_sqa(my_qp, wqe_cnt); 462 + if (unlikely(ret || ehca_debug_level >= 2)) 463 + ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i", 464 + my_qp, qp->qp_num, wqe_cnt, ret); 469 465 my_qp->message_count += wqe_cnt; 470 466 spin_unlock_irqrestore(&my_qp->spinlock_s, flags); 471 467 return ret; ··· 526 520 goto post_recv_exit0; 527 521 } 528 522 wqe_cnt++; 529 - ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d", 530 - my_qp, my_qp->real_qp_num, wqe_cnt); 531 523 } /* eof for cur_recv_wr */ 532 524 533 525 post_recv_exit0: 534 526 iosync(); /* serialize GAL register access */ 535 527 hipz_update_rqa(my_qp, wqe_cnt); 528 + if (unlikely(ret || ehca_debug_level >= 2)) 529 + ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i", 530 + my_qp, my_qp->real_qp_num, wqe_cnt, ret); 536 531 spin_unlock_irqrestore(&my_qp->spinlock_r, flags); 537 532 return ret; 538 533 } ··· 577 570 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 578 571 struct ehca_cqe *cqe; 579 572 struct ehca_qp *my_qp; 580 - int cqe_count = 0; 573 + int cqe_count = 0, is_error; 581 574 582 575 poll_cq_one_read_cqe: 583 576 cqe = (struct ehca_cqe *) 584 577 ipz_qeit_get_inc_valid(&my_cq->ipz_queue); 585 578 if (!cqe) { 586 579 ret = -EAGAIN; 587 - ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p " 588 - "cq_num=%x ret=%i", my_cq, my_cq->cq_number, ret); 589 - goto poll_cq_one_exit0; 580 + if (ehca_debug_level >= 3) 581 + ehca_dbg(cq->device, "Completion queue is empty " 582 + "my_cq=%p cq_num=%x", my_cq, my_cq->cq_number); 583 + goto poll_cq_one_exit0; 590 584 } 591 585 592 586 /* prevents loads being reordered across this point */ ··· 617 609 ehca_dbg(cq->device, 618 610 "Got CQE with purged bit qp_num=%x src_qp=%x", 619 611 cqe->local_qp_number, cqe->remote_qp_number); 620 - if (ehca_debug_level) 612 + if (ehca_debug_level >= 2) 621 613 ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x", 622 614 cqe->local_qp_number, 623 615 cqe->remote_qp_number); ··· 630 622 } 631 623 } 632 624 633 - /* tracing cqe */ 634 - if (unlikely(ehca_debug_level)) { 625 + is_error = cqe->status & WC_STATUS_ERROR_BIT; 626 + 627 + /* trace error CQEs if debug_level >= 1, trace all CQEs if >= 3 */ 628 + if (unlikely(ehca_debug_level >= 3 || (ehca_debug_level && is_error))) { 635 629 ehca_dbg(cq->device, 636 - "Received COMPLETION ehca_cq=%p cq_num=%x -----", 637 - my_cq, my_cq->cq_number); 630 + "Received %sCOMPLETION ehca_cq=%p cq_num=%x -----", 631 + is_error ? "ERROR " : "", my_cq, my_cq->cq_number); 638 632 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x", 639 633 my_cq, my_cq->cq_number); 640 634 ehca_dbg(cq->device, ··· 659 649 /* update also queue adder to throw away this entry!!! */ 660 650 goto poll_cq_one_exit0; 661 651 } 652 + 662 653 /* eval ib_wc_status */ 663 - if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) { 654 + if (unlikely(is_error)) { 664 655 /* complete with errors */ 665 656 map_ib_wc_status(cqe->status, &wc->status); 666 657 wc->vendor_err = wc->status; ··· 681 670 wc->wc_flags = cqe->w_completion_flags; 682 671 wc->imm_data = cpu_to_be32(cqe->immediate_data); 683 672 wc->sl = cqe->service_level; 684 - 685 - if (unlikely(wc->status != IB_WC_SUCCESS)) 686 - ehca_dbg(cq->device, 687 - "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe " 688 - "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx " 689 - "cqe=%p", my_cq, my_cq->cq_number, cqe->optype, 690 - cqe->status, cqe->local_qp_number, 691 - cqe->remote_qp_number, cqe->work_request_id, cqe); 692 673 693 674 poll_cq_one_exit0: 694 675 if (cqe_count > 0)
+2 -4
drivers/infiniband/hw/ehca/ehca_uverbs.c
··· 211 211 break; 212 212 213 213 case 1: /* qp rqueue_addr */ 214 - ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue", 215 - qp->ib_qp.qp_num); 214 + ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num); 216 215 ret = ehca_mmap_queue(vma, &qp->ipz_rqueue, 217 216 &qp->mm_count_rqueue); 218 217 if (unlikely(ret)) { ··· 223 224 break; 224 225 225 226 case 2: /* qp squeue_addr */ 226 - ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue", 227 - qp->ib_qp.qp_num); 227 + ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num); 228 228 ret = ehca_mmap_queue(vma, &qp->ipz_squeue, 229 229 &qp->mm_count_squeue); 230 230 if (unlikely(ret)) {
+12 -11
drivers/infiniband/hw/ehca/hcp_if.c
··· 123 123 int i, sleep_msecs; 124 124 unsigned long flags = 0; 125 125 126 - ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT, 127 - opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7); 126 + if (unlikely(ehca_debug_level >= 2)) 127 + ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT, 128 + opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7); 128 129 129 130 for (i = 0; i < 5; i++) { 130 131 /* serialize hCalls to work around firmware issue */ ··· 149 148 opcode, ret, arg1, arg2, arg3, 150 149 arg4, arg5, arg6, arg7); 151 150 else 152 - ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret); 151 + if (unlikely(ehca_debug_level >= 2)) 152 + ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret); 153 153 154 154 return ret; 155 155 } ··· 174 172 int i, sleep_msecs; 175 173 unsigned long flags = 0; 176 174 177 - ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode, 178 - arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9); 175 + if (unlikely(ehca_debug_level >= 2)) 176 + ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode, 177 + arg1, arg2, arg3, arg4, arg5, 178 + arg6, arg7, arg8, arg9); 179 179 180 180 for (i = 0; i < 5; i++) { 181 181 /* serialize hCalls to work around firmware issue */ ··· 205 201 ret, outs[0], outs[1], outs[2], outs[3], 206 202 outs[4], outs[5], outs[6], outs[7], 207 203 outs[8]); 208 - } else 204 + } else if (unlikely(ehca_debug_level >= 2)) 209 205 ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT, 210 206 ret, outs[0], outs[1], outs[2], outs[3], 211 207 outs[4], outs[5], outs[6], outs[7], ··· 385 381 r_cb, /* r6 */ 386 382 0, 0, 0, 0); 387 383 388 - if (ehca_debug_level) 384 + if (ehca_debug_level >= 2) 389 385 ehca_dmp(query_port_response_block, 64, "response_block"); 390 386 391 387 return ret; ··· 735 731 u64 ret; 736 732 u64 outs[PLPAR_HCALL9_BUFSIZE]; 737 733 738 - ehca_gen_dbg("kernel PAGE_SIZE=%x access_ctrl=%016x " 739 - "vaddr=%lx length=%lx", 740 - (u32)PAGE_SIZE, access_ctrl, vaddr, length); 741 734 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 742 735 adapter_handle.handle, /* r4 */ 743 736 5, /* r5 */ ··· 759 758 { 760 759 u64 ret; 761 760 762 - if (unlikely(ehca_debug_level >= 2)) { 761 + if (unlikely(ehca_debug_level >= 3)) { 763 762 if (count > 1) { 764 763 u64 *kpage; 765 764 int i;
+3 -3
drivers/infiniband/hw/mlx4/cq.c
··· 204 204 205 205 uar = &to_mucontext(context)->uar; 206 206 } else { 207 - err = mlx4_ib_db_alloc(dev, &cq->db, 1); 207 + err = mlx4_db_alloc(dev->dev, &cq->db, 1); 208 208 if (err) 209 209 goto err_cq; 210 210 ··· 250 250 251 251 err_db: 252 252 if (!context) 253 - mlx4_ib_db_free(dev, &cq->db); 253 + mlx4_db_free(dev->dev, &cq->db); 254 254 255 255 err_cq: 256 256 kfree(cq); ··· 435 435 ib_umem_release(mcq->umem); 436 436 } else { 437 437 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1); 438 - mlx4_ib_db_free(dev, &mcq->db); 438 + mlx4_db_free(dev->dev, &mcq->db); 439 439 } 440 440 441 441 kfree(mcq);
+2 -120
drivers/infiniband/hw/mlx4/doorbell.c
··· 34 34 35 35 #include "mlx4_ib.h" 36 36 37 - struct mlx4_ib_db_pgdir { 38 - struct list_head list; 39 - DECLARE_BITMAP(order0, MLX4_IB_DB_PER_PAGE); 40 - DECLARE_BITMAP(order1, MLX4_IB_DB_PER_PAGE / 2); 41 - unsigned long *bits[2]; 42 - __be32 *db_page; 43 - dma_addr_t db_dma; 44 - }; 45 - 46 - static struct mlx4_ib_db_pgdir *mlx4_ib_alloc_db_pgdir(struct mlx4_ib_dev *dev) 47 - { 48 - struct mlx4_ib_db_pgdir *pgdir; 49 - 50 - pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL); 51 - if (!pgdir) 52 - return NULL; 53 - 54 - bitmap_fill(pgdir->order1, MLX4_IB_DB_PER_PAGE / 2); 55 - pgdir->bits[0] = pgdir->order0; 56 - pgdir->bits[1] = pgdir->order1; 57 - pgdir->db_page = dma_alloc_coherent(dev->ib_dev.dma_device, 58 - PAGE_SIZE, &pgdir->db_dma, 59 - GFP_KERNEL); 60 - if (!pgdir->db_page) { 61 - kfree(pgdir); 62 - return NULL; 63 - } 64 - 65 - return pgdir; 66 - } 67 - 68 - static int mlx4_ib_alloc_db_from_pgdir(struct mlx4_ib_db_pgdir *pgdir, 69 - struct mlx4_ib_db *db, int order) 70 - { 71 - int o; 72 - int i; 73 - 74 - for (o = order; o <= 1; ++o) { 75 - i = find_first_bit(pgdir->bits[o], MLX4_IB_DB_PER_PAGE >> o); 76 - if (i < MLX4_IB_DB_PER_PAGE >> o) 77 - goto found; 78 - } 79 - 80 - return -ENOMEM; 81 - 82 - found: 83 - clear_bit(i, pgdir->bits[o]); 84 - 85 - i <<= o; 86 - 87 - if (o > order) 88 - set_bit(i ^ 1, pgdir->bits[order]); 89 - 90 - db->u.pgdir = pgdir; 91 - db->index = i; 92 - db->db = pgdir->db_page + db->index; 93 - db->dma = pgdir->db_dma + db->index * 4; 94 - db->order = order; 95 - 96 - return 0; 97 - } 98 - 99 - int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order) 100 - { 101 - struct mlx4_ib_db_pgdir *pgdir; 102 - int ret = 0; 103 - 104 - mutex_lock(&dev->pgdir_mutex); 105 - 106 - list_for_each_entry(pgdir, &dev->pgdir_list, list) 107 - if (!mlx4_ib_alloc_db_from_pgdir(pgdir, db, order)) 108 - goto out; 109 - 110 - pgdir = mlx4_ib_alloc_db_pgdir(dev); 111 - if (!pgdir) { 112 - ret = -ENOMEM; 113 - goto out; 114 - } 115 - 116 - list_add(&pgdir->list, &dev->pgdir_list); 117 - 118 - /* This should never fail -- we just allocated an empty page: */ 119 - WARN_ON(mlx4_ib_alloc_db_from_pgdir(pgdir, db, order)); 120 - 121 - out: 122 - mutex_unlock(&dev->pgdir_mutex); 123 - 124 - return ret; 125 - } 126 - 127 - void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db) 128 - { 129 - int o; 130 - int i; 131 - 132 - mutex_lock(&dev->pgdir_mutex); 133 - 134 - o = db->order; 135 - i = db->index; 136 - 137 - if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) { 138 - clear_bit(i ^ 1, db->u.pgdir->order0); 139 - ++o; 140 - } 141 - 142 - i >>= o; 143 - set_bit(i, db->u.pgdir->bits[o]); 144 - 145 - if (bitmap_full(db->u.pgdir->order1, MLX4_IB_DB_PER_PAGE / 2)) { 146 - dma_free_coherent(dev->ib_dev.dma_device, PAGE_SIZE, 147 - db->u.pgdir->db_page, db->u.pgdir->db_dma); 148 - list_del(&db->u.pgdir->list); 149 - kfree(db->u.pgdir); 150 - } 151 - 152 - mutex_unlock(&dev->pgdir_mutex); 153 - } 154 - 155 37 struct mlx4_ib_user_db_page { 156 38 struct list_head list; 157 39 struct ib_umem *umem; ··· 42 160 }; 43 161 44 162 int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, 45 - struct mlx4_ib_db *db) 163 + struct mlx4_db *db) 46 164 { 47 165 struct mlx4_ib_user_db_page *page; 48 166 struct ib_umem_chunk *chunk; ··· 84 202 return err; 85 203 } 86 204 87 - void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db) 205 + void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db) 88 206 { 89 207 mutex_lock(&context->db_page_mutex); 90 208
-3
drivers/infiniband/hw/mlx4/main.c
··· 557 557 goto err_uar; 558 558 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); 559 559 560 - INIT_LIST_HEAD(&ibdev->pgdir_list); 561 - mutex_init(&ibdev->pgdir_mutex); 562 - 563 560 ibdev->dev = dev; 564 561 565 562 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
+5 -28
drivers/infiniband/hw/mlx4/mlx4_ib.h
··· 43 43 #include <linux/mlx4/device.h> 44 44 #include <linux/mlx4/doorbell.h> 45 45 46 - enum { 47 - MLX4_IB_DB_PER_PAGE = PAGE_SIZE / 4 48 - }; 49 - 50 - struct mlx4_ib_db_pgdir; 51 - struct mlx4_ib_user_db_page; 52 - 53 - struct mlx4_ib_db { 54 - __be32 *db; 55 - union { 56 - struct mlx4_ib_db_pgdir *pgdir; 57 - struct mlx4_ib_user_db_page *user_page; 58 - } u; 59 - dma_addr_t dma; 60 - int index; 61 - int order; 62 - }; 63 - 64 46 struct mlx4_ib_ucontext { 65 47 struct ib_ucontext ibucontext; 66 48 struct mlx4_uar uar; ··· 70 88 struct mlx4_cq mcq; 71 89 struct mlx4_ib_cq_buf buf; 72 90 struct mlx4_ib_cq_resize *resize_buf; 73 - struct mlx4_ib_db db; 91 + struct mlx4_db db; 74 92 spinlock_t lock; 75 93 struct mutex resize_mutex; 76 94 struct ib_umem *umem; ··· 109 127 struct mlx4_qp mqp; 110 128 struct mlx4_buf buf; 111 129 112 - struct mlx4_ib_db db; 130 + struct mlx4_db db; 113 131 struct mlx4_ib_wq rq; 114 132 115 133 u32 doorbell_qpn; ··· 136 154 struct ib_srq ibsrq; 137 155 struct mlx4_srq msrq; 138 156 struct mlx4_buf buf; 139 - struct mlx4_ib_db db; 157 + struct mlx4_db db; 140 158 u64 *wrid; 141 159 spinlock_t lock; 142 160 int head; ··· 156 174 struct ib_device ib_dev; 157 175 struct mlx4_dev *dev; 158 176 void __iomem *uar_map; 159 - 160 - struct list_head pgdir_list; 161 - struct mutex pgdir_mutex; 162 177 163 178 struct mlx4_uar priv_uar; 164 179 u32 priv_pdn; ··· 227 248 return container_of(ibah, struct mlx4_ib_ah, ibah); 228 249 } 229 250 230 - int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order); 231 - void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db); 232 251 int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, 233 - struct mlx4_ib_db *db); 234 - void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db); 252 + struct mlx4_db *db); 253 + void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db); 235 254 236 255 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc); 237 256 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
+3 -3
drivers/infiniband/hw/mlx4/qp.c
··· 514 514 goto err; 515 515 516 516 if (!init_attr->srq) { 517 - err = mlx4_ib_db_alloc(dev, &qp->db, 0); 517 + err = mlx4_db_alloc(dev->dev, &qp->db, 0); 518 518 if (err) 519 519 goto err; 520 520 ··· 580 580 581 581 err_db: 582 582 if (!pd->uobject && !init_attr->srq) 583 - mlx4_ib_db_free(dev, &qp->db); 583 + mlx4_db_free(dev->dev, &qp->db); 584 584 585 585 err: 586 586 return err; ··· 666 666 kfree(qp->rq.wrid); 667 667 mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf); 668 668 if (!qp->ibqp.srq) 669 - mlx4_ib_db_free(dev, &qp->db); 669 + mlx4_db_free(dev->dev, &qp->db); 670 670 } 671 671 } 672 672
+3 -3
drivers/infiniband/hw/mlx4/srq.c
··· 129 129 if (err) 130 130 goto err_mtt; 131 131 } else { 132 - err = mlx4_ib_db_alloc(dev, &srq->db, 0); 132 + err = mlx4_db_alloc(dev->dev, &srq->db, 0); 133 133 if (err) 134 134 goto err_srq; 135 135 ··· 200 200 201 201 err_db: 202 202 if (!pd->uobject) 203 - mlx4_ib_db_free(dev, &srq->db); 203 + mlx4_db_free(dev->dev, &srq->db); 204 204 205 205 err_srq: 206 206 kfree(srq); ··· 267 267 kfree(msrq->wrid); 268 268 mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift, 269 269 &msrq->buf); 270 - mlx4_ib_db_free(dev, &msrq->db); 270 + mlx4_db_free(dev->dev, &msrq->db); 271 271 } 272 272 273 273 kfree(msrq);
+7 -8
drivers/infiniband/hw/nes/nes.c
··· 139 139 140 140 addr = ntohl(ifa->ifa_address); 141 141 mask = ntohl(ifa->ifa_mask); 142 - nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %08X, netmask %08X.\n", 143 - addr, mask); 142 + nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address " NIPQUAD_FMT 143 + ", netmask " NIPQUAD_FMT ".\n", 144 + HIPQUAD(addr), HIPQUAD(mask)); 144 145 list_for_each_entry(nesdev, &nes_dev_list, list) { 145 146 nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n", 146 147 nesdev, nesdev->netdev[0]->name); ··· 354 353 */ 355 354 static void nes_print_macaddr(struct net_device *netdev) 356 355 { 357 - nes_debug(NES_DBG_INIT, "%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, IRQ %u\n", 358 - netdev->name, 359 - netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], 360 - netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5], 361 - netdev->irq); 362 - } 356 + DECLARE_MAC_BUF(mac); 363 357 358 + nes_debug(NES_DBG_INIT, "%s: %s, IRQ %u\n", 359 + netdev->name, print_mac(mac, netdev->dev_addr), netdev->irq); 360 + } 364 361 365 362 /** 366 363 * nes_interrupt - handle interrupts
+14 -13
drivers/infiniband/hw/nes/nes_cm.c
··· 852 852 /* get a handle on the hte */ 853 853 hte = &cm_core->connected_nodes; 854 854 855 - nes_debug(NES_DBG_CM, "Searching for an owner node:%x:%x from core %p->%p\n", 856 - loc_addr, loc_port, cm_core, hte); 855 + nes_debug(NES_DBG_CM, "Searching for an owner node: " NIPQUAD_FMT ":%x from core %p->%p\n", 856 + HIPQUAD(loc_addr), loc_port, cm_core, hte); 857 857 858 858 /* walk list and find cm_node associated with this session ID */ 859 859 spin_lock_irqsave(&cm_core->ht_lock, flags); ··· 902 902 } 903 903 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); 904 904 905 - nes_debug(NES_DBG_CM, "Unable to find listener- %x:%x\n", 906 - dst_addr, dst_port); 905 + nes_debug(NES_DBG_CM, "Unable to find listener for " NIPQUAD_FMT ":%x\n", 906 + HIPQUAD(dst_addr), dst_port); 907 907 908 908 /* no listener */ 909 909 return NULL; ··· 1054 1054 int arpindex = 0; 1055 1055 struct nes_device *nesdev; 1056 1056 struct nes_adapter *nesadapter; 1057 + DECLARE_MAC_BUF(mac); 1057 1058 1058 1059 /* create an hte and cm_node for this instance */ 1059 1060 cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC); ··· 1067 1066 cm_node->loc_port = cm_info->loc_port; 1068 1067 cm_node->rem_port = cm_info->rem_port; 1069 1068 cm_node->send_write0 = send_first; 1070 - nes_debug(NES_DBG_CM, "Make node addresses : loc = %x:%x, rem = %x:%x\n", 1071 - cm_node->loc_addr, cm_node->loc_port, cm_node->rem_addr, cm_node->rem_port); 1069 + nes_debug(NES_DBG_CM, "Make node addresses : loc = " NIPQUAD_FMT ":%x, rem = " NIPQUAD_FMT ":%x\n", 1070 + HIPQUAD(cm_node->loc_addr), cm_node->loc_port, 1071 + HIPQUAD(cm_node->rem_addr), cm_node->rem_port); 1072 1072 cm_node->listener = listener; 1073 1073 cm_node->netdev = nesvnic->netdev; 1074 1074 cm_node->cm_id = cm_info->cm_id; ··· 1118 1116 1119 1117 /* copy the mac addr to node context */ 1120 1118 memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN); 1121 - nes_debug(NES_DBG_CM, "Remote mac addr from arp table:%02x," 1122 - " %02x, %02x, %02x, %02x, %02x\n", 1123 - cm_node->rem_mac[0], cm_node->rem_mac[1], 1124 - cm_node->rem_mac[2], cm_node->rem_mac[3], 1125 - cm_node->rem_mac[4], cm_node->rem_mac[5]); 1119 + nes_debug(NES_DBG_CM, "Remote mac addr from arp table: %s\n", 1120 + print_mac(mac, cm_node->rem_mac)); 1126 1121 1127 1122 add_hte_node(cm_core, cm_node); 1128 1123 atomic_inc(&cm_nodes_created); ··· 1849 1850 nfo.rem_addr = ntohl(iph->saddr); 1850 1851 nfo.rem_port = ntohs(tcph->source); 1851 1852 1852 - nes_debug(NES_DBG_CM, "Received packet: dest=0x%08X:0x%04X src=0x%08X:0x%04X\n", 1853 - iph->daddr, tcph->dest, iph->saddr, tcph->source); 1853 + nes_debug(NES_DBG_CM, "Received packet: dest=" NIPQUAD_FMT 1854 + ":0x%04X src=" NIPQUAD_FMT ":0x%04X\n", 1855 + NIPQUAD(iph->daddr), tcph->dest, 1856 + NIPQUAD(iph->saddr), tcph->source); 1854 1857 1855 1858 /* note: this call is going to increment cm_node ref count */ 1856 1859 cm_node = find_node(cm_core,
+9 -11
drivers/infiniband/hw/nes/nes_hw.c
··· 636 636 nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n"); 637 637 return 0; 638 638 } 639 + 640 + i = 0; 641 + while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000) 642 + mdelay(1); 643 + if (i >= 10000) { 644 + printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n", 645 + nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS)); 646 + return 0; 647 + } 639 648 } 640 649 641 650 /* port reset */ ··· 691 682 nes_debug(NES_DBG_INIT, "Serdes 1 not ready, status=%x\n", u32temp); 692 683 return 0; 693 684 } 694 - } 695 - 696 - 697 - 698 - i = 0; 699 - while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000) 700 - mdelay(1); 701 - if (i >= 10000) { 702 - printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n", 703 - nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS)); 704 - return 0; 705 685 } 706 686 707 687 return port_count;
+1 -1
drivers/infiniband/hw/nes/nes_hw.h
··· 905 905 }; 906 906 907 907 struct nes_hw_cq { 908 - struct nes_hw_cqe volatile *cq_vbase; /* PCI memory for host rings */ 908 + struct nes_hw_cqe *cq_vbase; /* PCI memory for host rings */ 909 909 void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq); 910 910 dma_addr_t cq_pbase; /* PCI memory for host rings */ 911 911 u16 cq_head;
+8 -10
drivers/infiniband/hw/nes/nes_nic.c
··· 787 787 int i; 788 788 u32 macaddr_low; 789 789 u16 macaddr_high; 790 + DECLARE_MAC_BUF(mac); 790 791 791 792 if (!is_valid_ether_addr(mac_addr->sa_data)) 792 793 return -EADDRNOTAVAIL; 793 794 794 795 memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len); 795 - printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n", 796 - __func__, netdev->addr_len, 797 - mac_addr->sa_data[0], mac_addr->sa_data[1], 798 - mac_addr->sa_data[2], mac_addr->sa_data[3], 799 - mac_addr->sa_data[4], mac_addr->sa_data[5]); 796 + printk(PFX "%s: Address length = %d, Address = %s\n", 797 + __func__, netdev->addr_len, print_mac(mac, mac_addr->sa_data)); 800 798 macaddr_high = ((u16)netdev->dev_addr[0]) << 8; 801 799 macaddr_high += (u16)netdev->dev_addr[1]; 802 800 macaddr_low = ((u32)netdev->dev_addr[2]) << 24; ··· 876 878 if (mc_nic_index < 0) 877 879 mc_nic_index = nesvnic->nic_index; 878 880 if (multicast_addr) { 879 - nes_debug(NES_DBG_NIC_RX, "Assigning MC Address = %02X%02X%02X%02X%02X%02X to register 0x%04X nic_idx=%d\n", 880 - multicast_addr->dmi_addr[0], multicast_addr->dmi_addr[1], 881 - multicast_addr->dmi_addr[2], multicast_addr->dmi_addr[3], 882 - multicast_addr->dmi_addr[4], multicast_addr->dmi_addr[5], 883 - perfect_filter_register_address+(mc_index * 8), mc_nic_index); 881 + DECLARE_MAC_BUF(mac); 882 + nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %s to register 0x%04X nic_idx=%d\n", 883 + print_mac(mac, multicast_addr->dmi_addr), 884 + perfect_filter_register_address+(mc_index * 8), 885 + mc_nic_index); 884 886 macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8; 885 887 macaddr_high += (u16)multicast_addr->dmi_addr[1]; 886 888 macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24;
+3 -1
drivers/infiniband/hw/nes/nes_utils.c
··· 660 660 661 661 /* DELETE or RESOLVE */ 662 662 if (arp_index == nesadapter->arp_table_size) { 663 - nes_debug(NES_DBG_NETDEV, "mac address not in ARP table - cannot delete or resolve\n"); 663 + nes_debug(NES_DBG_NETDEV, "MAC for " NIPQUAD_FMT " not in ARP table - cannot %s\n", 664 + HIPQUAD(ip_addr), 665 + action == NES_ARP_RESOLVE ? "resolve" : "delete"); 664 666 return -1; 665 667 } 666 668
+7 -1
drivers/infiniband/hw/nes/nes_verbs.c
··· 1976 1976 1977 1977 if (nescq->cq_mem_size) 1978 1978 pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size, 1979 - (void *)nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase); 1979 + nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase); 1980 1980 kfree(nescq); 1981 1981 1982 1982 return ret; ··· 3610 3610 while (cqe_count < num_entries) { 3611 3611 if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & 3612 3612 NES_CQE_VALID) { 3613 + /* 3614 + * Make sure we read CQ entry contents *after* 3615 + * we've checked the valid bit. 3616 + */ 3617 + rmb(); 3618 + 3613 3619 cqe = nescq->hw_cq.cq_vbase[head]; 3614 3620 nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0; 3615 3621 u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
+16 -4
drivers/infiniband/ulp/ipoib/ipoib.h
··· 56 56 /* constants */ 57 57 58 58 enum { 59 - IPOIB_PACKET_SIZE = 2048, 60 - IPOIB_BUF_SIZE = IPOIB_PACKET_SIZE + IB_GRH_BYTES, 61 - 62 59 IPOIB_ENCAP_LEN = 4, 60 + 61 + IPOIB_UD_HEAD_SIZE = IB_GRH_BYTES + IPOIB_ENCAP_LEN, 62 + IPOIB_UD_RX_SG = 2, /* max buffer needed for 4K mtu */ 63 63 64 64 IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */ 65 65 IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN, ··· 139 139 140 140 struct ipoib_rx_buf { 141 141 struct sk_buff *skb; 142 - u64 mapping; 142 + u64 mapping[IPOIB_UD_RX_SG]; 143 143 }; 144 144 145 145 struct ipoib_tx_buf { ··· 294 294 295 295 unsigned int admin_mtu; 296 296 unsigned int mcast_mtu; 297 + unsigned int max_ib_mtu; 297 298 298 299 struct ipoib_rx_buf *rx_ring; 299 300 ··· 305 304 struct ib_sge tx_sge[MAX_SKB_FRAGS + 1]; 306 305 struct ib_send_wr tx_wr; 307 306 unsigned tx_outstanding; 307 + 308 + struct ib_recv_wr rx_wr; 309 + struct ib_sge rx_sge[IPOIB_UD_RX_SG]; 308 310 309 311 struct ib_wc ibwc[IPOIB_NUM_WC]; 310 312 ··· 369 365 370 366 struct list_head list; 371 367 }; 368 + 369 + #define IPOIB_UD_MTU(ib_mtu) (ib_mtu - IPOIB_ENCAP_LEN) 370 + #define IPOIB_UD_BUF_SIZE(ib_mtu) (ib_mtu + IB_GRH_BYTES) 371 + 372 + static inline int ipoib_ud_need_sg(unsigned int ib_mtu) 373 + { 374 + return IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE; 375 + } 372 376 373 377 /* 374 378 * We stash a pointer to our private neighbour information after our
+88 -37
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 89 89 spin_unlock_irqrestore(&priv->lock, flags); 90 90 } 91 91 92 + static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv, 93 + u64 mapping[IPOIB_UD_RX_SG]) 94 + { 95 + if (ipoib_ud_need_sg(priv->max_ib_mtu)) { 96 + ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE, 97 + DMA_FROM_DEVICE); 98 + ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE, 99 + DMA_FROM_DEVICE); 100 + } else 101 + ib_dma_unmap_single(priv->ca, mapping[0], 102 + IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), 103 + DMA_FROM_DEVICE); 104 + } 105 + 106 + static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv, 107 + struct sk_buff *skb, 108 + unsigned int length) 109 + { 110 + if (ipoib_ud_need_sg(priv->max_ib_mtu)) { 111 + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; 112 + unsigned int size; 113 + /* 114 + * There is only two buffers needed for max_payload = 4K, 115 + * first buf size is IPOIB_UD_HEAD_SIZE 116 + */ 117 + skb->tail += IPOIB_UD_HEAD_SIZE; 118 + skb->len += length; 119 + 120 + size = length - IPOIB_UD_HEAD_SIZE; 121 + 122 + frag->size = size; 123 + skb->data_len += size; 124 + skb->truesize += size; 125 + } else 126 + skb_put(skb, length); 127 + 128 + } 129 + 92 130 static int ipoib_ib_post_receive(struct net_device *dev, int id) 93 131 { 94 132 struct ipoib_dev_priv *priv = netdev_priv(dev); 95 - struct ib_sge list; 96 - struct ib_recv_wr param; 97 133 struct ib_recv_wr *bad_wr; 98 134 int ret; 99 135 100 - list.addr = priv->rx_ring[id].mapping; 101 - list.length = IPOIB_BUF_SIZE; 102 - list.lkey = priv->mr->lkey; 136 + priv->rx_wr.wr_id = id | IPOIB_OP_RECV; 137 + priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0]; 138 + priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1]; 103 139 104 - param.next = NULL; 105 - param.wr_id = id | IPOIB_OP_RECV; 106 - param.sg_list = &list; 107 - param.num_sge = 1; 108 140 109 - ret = ib_post_recv(priv->qp, &param, &bad_wr); 141 + ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr); 110 142 if (unlikely(ret)) { 111 143 ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); 112 - ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping, 113 - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 144 + ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping); 114 145 dev_kfree_skb_any(priv->rx_ring[id].skb); 115 146 priv->rx_ring[id].skb = NULL; 116 147 } ··· 149 118 return ret; 150 119 } 151 120 152 - static int ipoib_alloc_rx_skb(struct net_device *dev, int id) 121 + static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id) 153 122 { 154 123 struct ipoib_dev_priv *priv = netdev_priv(dev); 155 124 struct sk_buff *skb; 156 - u64 addr; 125 + int buf_size; 126 + u64 *mapping; 157 127 158 - skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); 159 - if (!skb) 160 - return -ENOMEM; 128 + if (ipoib_ud_need_sg(priv->max_ib_mtu)) 129 + buf_size = IPOIB_UD_HEAD_SIZE; 130 + else 131 + buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); 132 + 133 + skb = dev_alloc_skb(buf_size + 4); 134 + if (unlikely(!skb)) 135 + return NULL; 161 136 162 137 /* 163 138 * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte ··· 172 135 */ 173 136 skb_reserve(skb, 4); 174 137 175 - addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE, 176 - DMA_FROM_DEVICE); 177 - if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { 178 - dev_kfree_skb_any(skb); 179 - return -EIO; 138 + mapping = priv->rx_ring[id].mapping; 139 + mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size, 140 + DMA_FROM_DEVICE); 141 + if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) 142 + goto error; 143 + 144 + if (ipoib_ud_need_sg(priv->max_ib_mtu)) { 145 + struct page *page = alloc_page(GFP_ATOMIC); 146 + if (!page) 147 + goto partial_error; 148 + skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE); 149 + mapping[1] = 150 + ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page, 151 + 0, PAGE_SIZE, DMA_FROM_DEVICE); 152 + if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1]))) 153 + goto partial_error; 180 154 } 181 155 182 - priv->rx_ring[id].skb = skb; 183 - priv->rx_ring[id].mapping = addr; 156 + priv->rx_ring[id].skb = skb; 157 + return skb; 184 158 185 - return 0; 159 + partial_error: 160 + ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE); 161 + error: 162 + dev_kfree_skb_any(skb); 163 + return NULL; 186 164 } 187 165 188 166 static int ipoib_ib_post_receives(struct net_device *dev) ··· 206 154 int i; 207 155 208 156 for (i = 0; i < ipoib_recvq_size; ++i) { 209 - if (ipoib_alloc_rx_skb(dev, i)) { 157 + if (!ipoib_alloc_rx_skb(dev, i)) { 210 158 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); 211 159 return -ENOMEM; 212 160 } ··· 224 172 struct ipoib_dev_priv *priv = netdev_priv(dev); 225 173 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; 226 174 struct sk_buff *skb; 227 - u64 addr; 175 + u64 mapping[IPOIB_UD_RX_SG]; 228 176 229 177 ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", 230 178 wr_id, wc->status); ··· 236 184 } 237 185 238 186 skb = priv->rx_ring[wr_id].skb; 239 - addr = priv->rx_ring[wr_id].mapping; 240 187 241 188 if (unlikely(wc->status != IB_WC_SUCCESS)) { 242 189 if (wc->status != IB_WC_WR_FLUSH_ERR) 243 190 ipoib_warn(priv, "failed recv event " 244 191 "(status=%d, wrid=%d vend_err %x)\n", 245 192 wc->status, wr_id, wc->vendor_err); 246 - ib_dma_unmap_single(priv->ca, addr, 247 - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 193 + ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); 248 194 dev_kfree_skb_any(skb); 249 195 priv->rx_ring[wr_id].skb = NULL; 250 196 return; ··· 255 205 if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) 256 206 goto repost; 257 207 208 + memcpy(mapping, priv->rx_ring[wr_id].mapping, 209 + IPOIB_UD_RX_SG * sizeof *mapping); 210 + 258 211 /* 259 212 * If we can't allocate a new RX buffer, dump 260 213 * this packet and reuse the old buffer. 261 214 */ 262 - if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) { 215 + if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) { 263 216 ++dev->stats.rx_dropped; 264 217 goto repost; 265 218 } ··· 270 217 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", 271 218 wc->byte_len, wc->slid); 272 219 273 - ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE); 220 + ipoib_ud_dma_unmap_rx(priv, mapping); 221 + ipoib_ud_skb_put_frags(priv, skb, wc->byte_len); 274 222 275 - skb_put(skb, wc->byte_len); 276 223 skb_pull(skb, IB_GRH_BYTES); 277 224 278 225 skb->protocol = ((struct ipoib_header *) skb->data)->proto; ··· 786 733 rx_req = &priv->rx_ring[i]; 787 734 if (!rx_req->skb) 788 735 continue; 789 - ib_dma_unmap_single(priv->ca, 790 - rx_req->mapping, 791 - IPOIB_BUF_SIZE, 792 - DMA_FROM_DEVICE); 736 + ipoib_ud_dma_unmap_rx(priv, 737 + priv->rx_ring[i].mapping); 793 738 dev_kfree_skb_any(rx_req->skb); 794 739 rx_req->skb = NULL; 795 740 }
+14 -5
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 195 195 return 0; 196 196 } 197 197 198 - if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN) 198 + if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu)) 199 199 return -EINVAL; 200 200 201 201 priv->admin_mtu = new_mtu; ··· 971 971 NETIF_F_LLTX | 972 972 NETIF_F_HIGHDMA); 973 973 974 - /* MTU will be reset when mcast join happens */ 975 - dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN; 976 - priv->mcast_mtu = priv->admin_mtu = dev->mtu; 977 - 978 974 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN); 979 975 980 976 netif_carrier_off(dev); ··· 1103 1107 { 1104 1108 struct ipoib_dev_priv *priv; 1105 1109 struct ib_device_attr *device_attr; 1110 + struct ib_port_attr attr; 1106 1111 int result = -ENOMEM; 1107 1112 1108 1113 priv = ipoib_intf_alloc(format); ··· 1111 1114 goto alloc_mem_failed; 1112 1115 1113 1116 SET_NETDEV_DEV(priv->dev, hca->dma_device); 1117 + 1118 + if (!ib_query_port(hca, port, &attr)) 1119 + priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu); 1120 + else { 1121 + printk(KERN_WARNING "%s: ib_query_port %d failed\n", 1122 + hca->name, port); 1123 + goto device_init_failed; 1124 + } 1125 + 1126 + /* MTU will be reset when mcast join happens */ 1127 + priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu); 1128 + priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu; 1114 1129 1115 1130 result = ib_query_pkey(hca, port, 0, &priv->pkey); 1116 1131 if (result) {
+1 -2
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
··· 567 567 return; 568 568 } 569 569 570 - priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) - 571 - IPOIB_ENCAP_LEN; 570 + priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu)); 572 571 573 572 if (!ipoib_cm_admin_enabled(dev)) 574 573 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
+14 -1
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
··· 150 150 .max_send_wr = ipoib_sendq_size, 151 151 .max_recv_wr = ipoib_recvq_size, 152 152 .max_send_sge = 1, 153 - .max_recv_sge = 1 153 + .max_recv_sge = IPOIB_UD_RX_SG 154 154 }, 155 155 .sq_sig_type = IB_SIGNAL_ALL_WR, 156 156 .qp_type = IB_QPT_UD ··· 214 214 priv->tx_wr.opcode = IB_WR_SEND; 215 215 priv->tx_wr.sg_list = priv->tx_sge; 216 216 priv->tx_wr.send_flags = IB_SEND_SIGNALED; 217 + 218 + priv->rx_sge[0].lkey = priv->mr->lkey; 219 + if (ipoib_ud_need_sg(priv->max_ib_mtu)) { 220 + priv->rx_sge[0].length = IPOIB_UD_HEAD_SIZE; 221 + priv->rx_sge[1].length = PAGE_SIZE; 222 + priv->rx_sge[1].lkey = priv->mr->lkey; 223 + priv->rx_wr.num_sge = IPOIB_UD_RX_SG; 224 + } else { 225 + priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); 226 + priv->rx_wr.num_sge = 1; 227 + } 228 + priv->rx_wr.next = NULL; 229 + priv->rx_wr.sg_list = priv->rx_sge; 217 230 218 231 return 0; 219 232
+1
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
··· 89 89 goto err; 90 90 } 91 91 92 + priv->max_ib_mtu = ppriv->max_ib_mtu; 92 93 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags); 93 94 94 95 priv->pkey = pkey;
+157
drivers/net/mlx4/alloc.c
··· 196 196 } 197 197 } 198 198 EXPORT_SYMBOL_GPL(mlx4_buf_free); 199 + 200 + static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device) 201 + { 202 + struct mlx4_db_pgdir *pgdir; 203 + 204 + pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL); 205 + if (!pgdir) 206 + return NULL; 207 + 208 + bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2); 209 + pgdir->bits[0] = pgdir->order0; 210 + pgdir->bits[1] = pgdir->order1; 211 + pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, 212 + &pgdir->db_dma, GFP_KERNEL); 213 + if (!pgdir->db_page) { 214 + kfree(pgdir); 215 + return NULL; 216 + } 217 + 218 + return pgdir; 219 + } 220 + 221 + static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir, 222 + struct mlx4_db *db, int order) 223 + { 224 + int o; 225 + int i; 226 + 227 + for (o = order; o <= 1; ++o) { 228 + i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o); 229 + if (i < MLX4_DB_PER_PAGE >> o) 230 + goto found; 231 + } 232 + 233 + return -ENOMEM; 234 + 235 + found: 236 + clear_bit(i, pgdir->bits[o]); 237 + 238 + i <<= o; 239 + 240 + if (o > order) 241 + set_bit(i ^ 1, pgdir->bits[order]); 242 + 243 + db->u.pgdir = pgdir; 244 + db->index = i; 245 + db->db = pgdir->db_page + db->index; 246 + db->dma = pgdir->db_dma + db->index * 4; 247 + db->order = order; 248 + 249 + return 0; 250 + } 251 + 252 + int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order) 253 + { 254 + struct mlx4_priv *priv = mlx4_priv(dev); 255 + struct mlx4_db_pgdir *pgdir; 256 + int ret = 0; 257 + 258 + mutex_lock(&priv->pgdir_mutex); 259 + 260 + list_for_each_entry(pgdir, &priv->pgdir_list, list) 261 + if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) 262 + goto out; 263 + 264 + pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev)); 265 + if (!pgdir) { 266 + ret = -ENOMEM; 267 + goto out; 268 + } 269 + 270 + list_add(&pgdir->list, &priv->pgdir_list); 271 + 272 + /* This should never fail -- we just allocated an empty page: */ 273 + WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order)); 274 + 275 + out: 276 + mutex_unlock(&priv->pgdir_mutex); 277 + 278 + return ret; 279 + } 280 + EXPORT_SYMBOL_GPL(mlx4_db_alloc); 281 + 282 + void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db) 283 + { 284 + struct mlx4_priv *priv = mlx4_priv(dev); 285 + int o; 286 + int i; 287 + 288 + mutex_lock(&priv->pgdir_mutex); 289 + 290 + o = db->order; 291 + i = db->index; 292 + 293 + if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) { 294 + clear_bit(i ^ 1, db->u.pgdir->order0); 295 + ++o; 296 + } 297 + i >>= o; 298 + set_bit(i, db->u.pgdir->bits[o]); 299 + 300 + if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) { 301 + dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, 302 + db->u.pgdir->db_page, db->u.pgdir->db_dma); 303 + list_del(&db->u.pgdir->list); 304 + kfree(db->u.pgdir); 305 + } 306 + 307 + mutex_unlock(&priv->pgdir_mutex); 308 + } 309 + EXPORT_SYMBOL_GPL(mlx4_db_free); 310 + 311 + int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, 312 + int size, int max_direct) 313 + { 314 + int err; 315 + 316 + err = mlx4_db_alloc(dev, &wqres->db, 1); 317 + if (err) 318 + return err; 319 + 320 + *wqres->db.db = 0; 321 + 322 + err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf); 323 + if (err) 324 + goto err_db; 325 + 326 + err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift, 327 + &wqres->mtt); 328 + if (err) 329 + goto err_buf; 330 + 331 + err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf); 332 + if (err) 333 + goto err_mtt; 334 + 335 + return 0; 336 + 337 + err_mtt: 338 + mlx4_mtt_cleanup(dev, &wqres->mtt); 339 + err_buf: 340 + mlx4_buf_free(dev, size, &wqres->buf); 341 + err_db: 342 + mlx4_db_free(dev, &wqres->db); 343 + 344 + return err; 345 + } 346 + EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res); 347 + 348 + void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, 349 + int size) 350 + { 351 + mlx4_mtt_cleanup(dev, &wqres->mtt); 352 + mlx4_buf_free(dev, size, &wqres->buf); 353 + mlx4_db_free(dev, &wqres->db); 354 + } 355 + EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);
+1 -1
drivers/net/mlx4/cq.c
··· 180 180 cq_context->mtt_base_addr_h = mtt_addr >> 32; 181 181 cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); 182 182 183 - err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1); 183 + err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0); 184 184 185 185 mlx4_free_cmd_mailbox(dev, mailbox); 186 186 return err;
+3
drivers/net/mlx4/main.c
··· 798 798 INIT_LIST_HEAD(&priv->ctx_list); 799 799 spin_lock_init(&priv->ctx_lock); 800 800 801 + INIT_LIST_HEAD(&priv->pgdir_list); 802 + mutex_init(&priv->pgdir_mutex); 803 + 801 804 /* 802 805 * Now reset the HCA before we touch the PCI capabilities or 803 806 * attempt a firmware command, since a boot ROM may have left
+3
drivers/net/mlx4/mlx4.h
··· 257 257 struct list_head ctx_list; 258 258 spinlock_t ctx_lock; 259 259 260 + struct list_head pgdir_list; 261 + struct mutex pgdir_mutex; 262 + 260 263 struct mlx4_fw fw; 261 264 struct mlx4_cmd cmd; 262 265
+31
drivers/net/mlx4/qp.c
··· 299 299 } 300 300 EXPORT_SYMBOL_GPL(mlx4_qp_query); 301 301 302 + int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 303 + struct mlx4_qp_context *context, 304 + struct mlx4_qp *qp, enum mlx4_qp_state *qp_state) 305 + { 306 + int err; 307 + int i; 308 + enum mlx4_qp_state states[] = { 309 + MLX4_QP_STATE_RST, 310 + MLX4_QP_STATE_INIT, 311 + MLX4_QP_STATE_RTR, 312 + MLX4_QP_STATE_RTS 313 + }; 314 + 315 + for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { 316 + context->flags &= cpu_to_be32(~(0xf << 28)); 317 + context->flags |= cpu_to_be32(states[i + 1] << 28); 318 + err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], 319 + context, 0, 0, qp); 320 + if (err) { 321 + mlx4_err(dev, "Failed to bring QP to state: " 322 + "%d with error: %d\n", 323 + states[i + 1], err); 324 + return err; 325 + } 326 + 327 + *qp_state = states[i + 1]; 328 + } 329 + 330 + return 0; 331 + } 332 + EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
+40
include/linux/mlx4/device.h
··· 208 208 int page_shift; 209 209 }; 210 210 211 + enum { 212 + MLX4_DB_PER_PAGE = PAGE_SIZE / 4 213 + }; 214 + 215 + struct mlx4_db_pgdir { 216 + struct list_head list; 217 + DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE); 218 + DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2); 219 + unsigned long *bits[2]; 220 + __be32 *db_page; 221 + dma_addr_t db_dma; 222 + }; 223 + 224 + struct mlx4_ib_user_db_page; 225 + 226 + struct mlx4_db { 227 + __be32 *db; 228 + union { 229 + struct mlx4_db_pgdir *pgdir; 230 + struct mlx4_ib_user_db_page *user_page; 231 + } u; 232 + dma_addr_t dma; 233 + int index; 234 + int order; 235 + }; 236 + 237 + struct mlx4_hwq_resources { 238 + struct mlx4_db db; 239 + struct mlx4_mtt mtt; 240 + struct mlx4_buf buf; 241 + }; 242 + 211 243 struct mlx4_mr { 212 244 struct mlx4_mtt mtt; 213 245 u64 iova; ··· 372 340 int start_index, int npages, u64 *page_list); 373 341 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 374 342 struct mlx4_buf *buf); 343 + 344 + int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order); 345 + void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db); 346 + 347 + int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, 348 + int size, int max_direct); 349 + void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres, 350 + int size); 375 351 376 352 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, 377 353 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq);
+4
include/linux/mlx4/qp.h
··· 296 296 int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp, 297 297 struct mlx4_qp_context *context); 298 298 299 + int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 300 + struct mlx4_qp_context *context, 301 + struct mlx4_qp *qp, enum mlx4_qp_state *qp_state); 302 + 299 303 static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) 300 304 { 301 305 return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1));