Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
IB/ehca: Use consistent types for ehca_plpar_hcall9()
IB/ehca: Fix printk format warnings from u64 type change
IPoIB: Do not print error messages for multicast join retries
IB/mlx4: Fix memory ordering problem when posting LSO sends
mlx4_core: Fix min() warning
IPoIB: Fix deadlock between ipoib_open() and child interface create
IPoIB: Fix hang in napi_disable() if P_Key is never found

+189 -171
+8 -8
drivers/infiniband/hw/ehca/ehca_cq.c
··· 196 197 if (h_ret != H_SUCCESS) { 198 ehca_err(device, "hipz_h_alloc_resource_cq() failed " 199 - "h_ret=%li device=%p", h_ret, device); 200 cq = ERR_PTR(ehca2ib_return_code(h_ret)); 201 goto create_cq_exit2; 202 } ··· 232 233 if (h_ret < H_SUCCESS) { 234 ehca_err(device, "hipz_h_register_rpage_cq() failed " 235 - "ehca_cq=%p cq_num=%x h_ret=%li counter=%i " 236 "act_pages=%i", my_cq, my_cq->cq_number, 237 h_ret, counter, param.act_pages); 238 cq = ERR_PTR(-EINVAL); ··· 244 if ((h_ret != H_SUCCESS) || vpage) { 245 ehca_err(device, "Registration of pages not " 246 "complete ehca_cq=%p cq_num=%x " 247 - "h_ret=%li", my_cq, my_cq->cq_number, 248 h_ret); 249 cq = ERR_PTR(-EAGAIN); 250 goto create_cq_exit4; ··· 252 } else { 253 if (h_ret != H_PAGE_REGISTERED) { 254 ehca_err(device, "Registration of page failed " 255 - "ehca_cq=%p cq_num=%x h_ret=%li " 256 "counter=%i act_pages=%i", 257 my_cq, my_cq->cq_number, 258 h_ret, counter, param.act_pages); ··· 266 267 gal = my_cq->galpas.kernel; 268 cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec)); 269 - ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%lx", 270 my_cq, my_cq->cq_number, cqx_fec); 271 272 my_cq->ib_cq.cqe = my_cq->nr_of_entries = ··· 307 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1); 308 if (h_ret != H_SUCCESS) 309 ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p " 310 - "cq_num=%x h_ret=%li", my_cq, my_cq->cq_number, h_ret); 311 312 create_cq_exit2: 313 write_lock_irqsave(&ehca_cq_idr_lock, flags); ··· 355 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0); 356 if (h_ret == H_R_STATE) { 357 /* cq in err: read err data and destroy it forcibly */ 358 - ehca_dbg(device, "ehca_cq=%p cq_num=%x ressource=%lx in err " 359 "state. Try to delete it forcibly.", 360 my_cq, cq_num, my_cq->ipz_cq_handle.handle); 361 ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle); ··· 365 cq_num); 366 } 367 if (h_ret != H_SUCCESS) { 368 - ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%li " 369 "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num); 370 return ehca2ib_return_code(h_ret); 371 }
··· 196 197 if (h_ret != H_SUCCESS) { 198 ehca_err(device, "hipz_h_alloc_resource_cq() failed " 199 + "h_ret=%lli device=%p", h_ret, device); 200 cq = ERR_PTR(ehca2ib_return_code(h_ret)); 201 goto create_cq_exit2; 202 } ··· 232 233 if (h_ret < H_SUCCESS) { 234 ehca_err(device, "hipz_h_register_rpage_cq() failed " 235 + "ehca_cq=%p cq_num=%x h_ret=%lli counter=%i " 236 "act_pages=%i", my_cq, my_cq->cq_number, 237 h_ret, counter, param.act_pages); 238 cq = ERR_PTR(-EINVAL); ··· 244 if ((h_ret != H_SUCCESS) || vpage) { 245 ehca_err(device, "Registration of pages not " 246 "complete ehca_cq=%p cq_num=%x " 247 + "h_ret=%lli", my_cq, my_cq->cq_number, 248 h_ret); 249 cq = ERR_PTR(-EAGAIN); 250 goto create_cq_exit4; ··· 252 } else { 253 if (h_ret != H_PAGE_REGISTERED) { 254 ehca_err(device, "Registration of page failed " 255 + "ehca_cq=%p cq_num=%x h_ret=%lli " 256 "counter=%i act_pages=%i", 257 my_cq, my_cq->cq_number, 258 h_ret, counter, param.act_pages); ··· 266 267 gal = my_cq->galpas.kernel; 268 cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec)); 269 + ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx", 270 my_cq, my_cq->cq_number, cqx_fec); 271 272 my_cq->ib_cq.cqe = my_cq->nr_of_entries = ··· 307 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1); 308 if (h_ret != H_SUCCESS) 309 ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p " 310 + "cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret); 311 312 create_cq_exit2: 313 write_lock_irqsave(&ehca_cq_idr_lock, flags); ··· 355 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0); 356 if (h_ret == H_R_STATE) { 357 /* cq in err: read err data and destroy it forcibly */ 358 + ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err " 359 "state. Try to delete it forcibly.", 360 my_cq, cq_num, my_cq->ipz_cq_handle.handle); 361 ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle); ··· 365 cq_num); 366 } 367 if (h_ret != H_SUCCESS) { 368 + ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli " 369 "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num); 370 return ehca2ib_return_code(h_ret); 371 }
+1 -1
drivers/infiniband/hw/ehca/ehca_hca.c
··· 393 hret = hipz_h_modify_port(shca->ipz_hca_handle, port, 394 cap, props->init_type, port_modify_mask); 395 if (hret != H_SUCCESS) { 396 - ehca_err(&shca->ib_device, "Modify port failed h_ret=%li", 397 hret); 398 ret = -EINVAL; 399 }
··· 393 hret = hipz_h_modify_port(shca->ipz_hca_handle, port, 394 cap, props->init_type, port_modify_mask); 395 if (hret != H_SUCCESS) { 396 + ehca_err(&shca->ib_device, "Modify port failed h_ret=%lli", 397 hret); 398 ret = -EINVAL; 399 }
+9 -9
drivers/infiniband/hw/ehca/ehca_irq.c
··· 99 return; 100 101 ehca_err(&shca->ib_device, 102 - "QP 0x%x (resource=%lx) has errors.", 103 qp->ib_qp.qp_num, resource); 104 break; 105 } ··· 108 struct ehca_cq *cq = (struct ehca_cq *)data; 109 110 ehca_err(&shca->ib_device, 111 - "CQ 0x%x (resource=%lx) has errors.", 112 cq->cq_number, resource); 113 break; 114 } 115 default: 116 ehca_err(&shca->ib_device, 117 - "Unknown error type: %lx on %s.", 118 type, shca->ib_device.name); 119 break; 120 } 121 122 - ehca_err(&shca->ib_device, "Error data is available: %lx.", resource); 123 ehca_err(&shca->ib_device, "EHCA ----- error data begin " 124 "---------------------------------------------------"); 125 - ehca_dmp(rblock, length, "resource=%lx", resource); 126 ehca_err(&shca->ib_device, "EHCA ----- error data end " 127 "----------------------------------------------------"); 128 ··· 152 153 if (ret == H_R_STATE) 154 ehca_err(&shca->ib_device, 155 - "No error data is available: %lx.", resource); 156 else if (ret == H_SUCCESS) { 157 int length; 158 ··· 164 print_error_data(shca, data, rblock, length); 165 } else 166 ehca_err(&shca->ib_device, 167 - "Error data could not be fetched: %lx", resource); 168 169 ehca_free_fw_ctrlblock(rblock); 170 ··· 514 struct ehca_cq *cq; 515 516 eqe_value = eqe->entry; 517 - ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value); 518 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { 519 ehca_dbg(&shca->ib_device, "Got completion event"); 520 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); ··· 603 ret = hipz_h_eoi(eq->ist); 604 if (ret != H_SUCCESS) 605 ehca_err(&shca->ib_device, 606 - "bad return code EOI -rc = %ld\n", ret); 607 ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt); 608 } 609 if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
··· 99 return; 100 101 ehca_err(&shca->ib_device, 102 + "QP 0x%x (resource=%llx) has errors.", 103 qp->ib_qp.qp_num, resource); 104 break; 105 } ··· 108 struct ehca_cq *cq = (struct ehca_cq *)data; 109 110 ehca_err(&shca->ib_device, 111 + "CQ 0x%x (resource=%llx) has errors.", 112 cq->cq_number, resource); 113 break; 114 } 115 default: 116 ehca_err(&shca->ib_device, 117 + "Unknown error type: %llx on %s.", 118 type, shca->ib_device.name); 119 break; 120 } 121 122 + ehca_err(&shca->ib_device, "Error data is available: %llx.", resource); 123 ehca_err(&shca->ib_device, "EHCA ----- error data begin " 124 "---------------------------------------------------"); 125 + ehca_dmp(rblock, length, "resource=%llx", resource); 126 ehca_err(&shca->ib_device, "EHCA ----- error data end " 127 "----------------------------------------------------"); 128 ··· 152 153 if (ret == H_R_STATE) 154 ehca_err(&shca->ib_device, 155 + "No error data is available: %llx.", resource); 156 else if (ret == H_SUCCESS) { 157 int length; 158 ··· 164 print_error_data(shca, data, rblock, length); 165 } else 166 ehca_err(&shca->ib_device, 167 + "Error data could not be fetched: %llx", resource); 168 169 ehca_free_fw_ctrlblock(rblock); 170 ··· 514 struct ehca_cq *cq; 515 516 eqe_value = eqe->entry; 517 + ehca_dbg(&shca->ib_device, "eqe_value=%llx", eqe_value); 518 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { 519 ehca_dbg(&shca->ib_device, "Got completion event"); 520 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); ··· 603 ret = hipz_h_eoi(eq->ist); 604 if (ret != H_SUCCESS) 605 ehca_err(&shca->ib_device, 606 + "bad return code EOI -rc = %lld\n", ret); 607 ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt); 608 } 609 if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
+3 -3
drivers/infiniband/hw/ehca/ehca_main.c
··· 304 305 h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock); 306 if (h_ret != H_SUCCESS) { 307 - ehca_gen_err("Cannot query device properties. h_ret=%li", 308 h_ret); 309 ret = -EPERM; 310 goto sense_attributes1; ··· 391 port = (struct hipz_query_port *)rblock; 392 h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port); 393 if (h_ret != H_SUCCESS) { 394 - ehca_gen_err("Cannot query port properties. h_ret=%li", 395 h_ret); 396 ret = -EPERM; 397 goto sense_attributes1; ··· 682 { 683 struct ehca_shca *shca = dev->driver_data; 684 685 - return sprintf(buf, "%lx\n", shca->ipz_hca_handle.handle); 686 687 } 688 static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
··· 304 305 h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock); 306 if (h_ret != H_SUCCESS) { 307 + ehca_gen_err("Cannot query device properties. h_ret=%lli", 308 h_ret); 309 ret = -EPERM; 310 goto sense_attributes1; ··· 391 port = (struct hipz_query_port *)rblock; 392 h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port); 393 if (h_ret != H_SUCCESS) { 394 + ehca_gen_err("Cannot query port properties. h_ret=%lli", 395 h_ret); 396 ret = -EPERM; 397 goto sense_attributes1; ··· 682 { 683 struct ehca_shca *shca = dev->driver_data; 684 685 + return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle); 686 687 } 688 static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
+2 -2
drivers/infiniband/hw/ehca/ehca_mcast.c
··· 88 if (h_ret != H_SUCCESS) 89 ehca_err(ibqp->device, 90 "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed " 91 - "h_ret=%li", my_qp, ibqp->qp_num, h_ret); 92 93 return ehca2ib_return_code(h_ret); 94 } ··· 125 if (h_ret != H_SUCCESS) 126 ehca_err(ibqp->device, 127 "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed " 128 - "h_ret=%li", my_qp, ibqp->qp_num, h_ret); 129 130 return ehca2ib_return_code(h_ret); 131 }
··· 88 if (h_ret != H_SUCCESS) 89 ehca_err(ibqp->device, 90 "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed " 91 + "h_ret=%lli", my_qp, ibqp->qp_num, h_ret); 92 93 return ehca2ib_return_code(h_ret); 94 } ··· 125 if (h_ret != H_SUCCESS) 126 ehca_err(ibqp->device, 127 "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed " 128 + "h_ret=%lli", my_qp, ibqp->qp_num, h_ret); 129 130 return ehca2ib_return_code(h_ret); 131 }
+72 -72
drivers/infiniband/hw/ehca/ehca_mrmw.c
··· 204 } 205 if ((size == 0) || 206 (((u64)iova_start + size) < (u64)iova_start)) { 207 - ehca_err(pd->device, "bad input values: size=%lx iova_start=%p", 208 size, iova_start); 209 ib_mr = ERR_PTR(-EINVAL); 210 goto reg_phys_mr_exit0; ··· 309 } 310 311 if (length == 0 || virt + length < virt) { 312 - ehca_err(pd->device, "bad input values: length=%lx " 313 - "virt_base=%lx", length, virt); 314 ib_mr = ERR_PTR(-EINVAL); 315 goto reg_user_mr_exit0; 316 } ··· 373 &e_mr->ib.ib_mr.rkey); 374 if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) { 375 ehca_warn(pd->device, "failed to register mr " 376 - "with hwpage_size=%lx", hwpage_size); 377 ehca_info(pd->device, "try to register mr with " 378 "kpage_size=%lx", PAGE_SIZE); 379 /* ··· 509 goto rereg_phys_mr_exit1; 510 if ((new_size == 0) || 511 (((u64)iova_start + new_size) < (u64)iova_start)) { 512 - ehca_err(mr->device, "bad input values: new_size=%lx " 513 "iova_start=%p", new_size, iova_start); 514 ret = -EINVAL; 515 goto rereg_phys_mr_exit1; ··· 580 581 h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout); 582 if (h_ret != H_SUCCESS) { 583 - ehca_err(mr->device, "hipz_mr_query failed, h_ret=%li mr=%p " 584 - "hca_hndl=%lx mr_hndl=%lx lkey=%x", 585 h_ret, mr, shca->ipz_hca_handle.handle, 586 e_mr->ipz_mr_handle.handle, mr->lkey); 587 ret = ehca2ib_return_code(h_ret); ··· 630 /* TODO: BUSY: MR still has bound window(s) */ 631 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); 632 if (h_ret != H_SUCCESS) { 633 - ehca_err(mr->device, "hipz_free_mr failed, h_ret=%li shca=%p " 634 - "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x", 635 h_ret, shca, e_mr, shca->ipz_hca_handle.handle, 636 e_mr->ipz_mr_handle.handle, mr->lkey); 637 ret = ehca2ib_return_code(h_ret); ··· 671 h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw, 672 e_pd->fw_pd, &hipzout); 673 if (h_ret != H_SUCCESS) { 674 - ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%li " 675 - "shca=%p hca_hndl=%lx mw=%p", 676 h_ret, shca, shca->ipz_hca_handle.handle, e_mw); 677 ib_mw = ERR_PTR(ehca2ib_return_code(h_ret)); 678 goto alloc_mw_exit1; ··· 713 714 h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw); 715 if (h_ret != H_SUCCESS) { 716 - ehca_err(mw->device, "hipz_free_mw failed, h_ret=%li shca=%p " 717 - "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx", 718 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle, 719 e_mw->ipz_mw_handle.handle); 720 return ehca2ib_return_code(h_ret); ··· 840 goto map_phys_fmr_exit0; 841 if (iova % e_fmr->fmr_page_size) { 842 /* only whole-numbered pages */ 843 - ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x", 844 iova, e_fmr->fmr_page_size); 845 ret = -EINVAL; 846 goto map_phys_fmr_exit0; ··· 878 map_phys_fmr_exit0: 879 if (ret) 880 ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x " 881 - "iova=%lx", ret, fmr, page_list, list_len, iova); 882 return ret; 883 } /* end ehca_map_phys_fmr() */ 884 ··· 964 965 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); 966 if (h_ret != H_SUCCESS) { 967 - ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%li e_fmr=%p " 968 - "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x", 969 h_ret, e_fmr, shca->ipz_hca_handle.handle, 970 e_fmr->ipz_mr_handle.handle, fmr->lkey); 971 ret = ehca2ib_return_code(h_ret); ··· 1007 (u64)iova_start, size, hipz_acl, 1008 e_pd->fw_pd, &hipzout); 1009 if (h_ret != H_SUCCESS) { 1010 - ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%li " 1011 - "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle); 1012 ret = ehca2ib_return_code(h_ret); 1013 goto ehca_reg_mr_exit0; 1014 } ··· 1033 ehca_reg_mr_exit1: 1034 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); 1035 if (h_ret != H_SUCCESS) { 1036 - ehca_err(&shca->ib_device, "h_ret=%li shca=%p e_mr=%p " 1037 - "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x " 1038 - "pginfo=%p num_kpages=%lx num_hwpages=%lx ret=%i", 1039 h_ret, shca, e_mr, iova_start, size, acl, e_pd, 1040 hipzout.lkey, pginfo, pginfo->num_kpages, 1041 pginfo->num_hwpages, ret); ··· 1045 ehca_reg_mr_exit0: 1046 if (ret) 1047 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p " 1048 - "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " 1049 - "num_kpages=%lx num_hwpages=%lx", 1050 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo, 1051 pginfo->num_kpages, pginfo->num_hwpages); 1052 return ret; ··· 1116 */ 1117 if (h_ret != H_SUCCESS) { 1118 ehca_err(&shca->ib_device, "last " 1119 - "hipz_reg_rpage_mr failed, h_ret=%li " 1120 - "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx" 1121 " lkey=%x", h_ret, e_mr, i, 1122 shca->ipz_hca_handle.handle, 1123 e_mr->ipz_mr_handle.handle, ··· 1128 ret = 0; 1129 } else if (h_ret != H_PAGE_REGISTERED) { 1130 ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, " 1131 - "h_ret=%li e_mr=%p i=%x lkey=%x hca_hndl=%lx " 1132 - "mr_hndl=%lx", h_ret, e_mr, i, 1133 e_mr->ib.ib_mr.lkey, 1134 shca->ipz_hca_handle.handle, 1135 e_mr->ipz_mr_handle.handle); ··· 1145 ehca_reg_mr_rpages_exit0: 1146 if (ret) 1147 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p " 1148 - "num_kpages=%lx num_hwpages=%lx", ret, shca, e_mr, 1149 pginfo, pginfo->num_kpages, pginfo->num_hwpages); 1150 return ret; 1151 } /* end ehca_reg_mr_rpages() */ ··· 1184 ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage); 1185 if (ret) { 1186 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p " 1187 - "pginfo=%p type=%x num_kpages=%lx num_hwpages=%lx " 1188 "kpage=%p", e_mr, pginfo, pginfo->type, 1189 pginfo->num_kpages, pginfo->num_hwpages, kpage); 1190 goto ehca_rereg_mr_rereg1_exit1; ··· 1205 * (MW bound or MR is shared) 1206 */ 1207 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed " 1208 - "(Rereg1), h_ret=%li e_mr=%p", h_ret, e_mr); 1209 *pginfo = pginfo_save; 1210 ret = -EAGAIN; 1211 } else if ((u64 *)hipzout.vaddr != iova_start) { 1212 ehca_err(&shca->ib_device, "PHYP changed iova_start in " 1213 - "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p " 1214 - "mr_handle=%lx lkey=%x lkey_out=%x", iova_start, 1215 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle, 1216 e_mr->ib.ib_mr.lkey, hipzout.lkey); 1217 ret = -EFAULT; ··· 1235 ehca_rereg_mr_rereg1_exit0: 1236 if ( ret && (ret != -EAGAIN) ) 1237 ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x " 1238 - "pginfo=%p num_kpages=%lx num_hwpages=%lx", 1239 ret, *lkey, *rkey, pginfo, pginfo->num_kpages, 1240 pginfo->num_hwpages); 1241 return ret; ··· 1263 (e_mr->num_hwpages > MAX_RPAGES) || 1264 (pginfo->num_hwpages > e_mr->num_hwpages)) { 1265 ehca_dbg(&shca->ib_device, "Rereg3 case, " 1266 - "pginfo->num_hwpages=%lx e_mr->num_hwpages=%x", 1267 pginfo->num_hwpages, e_mr->num_hwpages); 1268 rereg_1_hcall = 0; 1269 rereg_3_hcall = 1; ··· 1295 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); 1296 if (h_ret != H_SUCCESS) { 1297 ehca_err(&shca->ib_device, "hipz_free_mr failed, " 1298 - "h_ret=%li e_mr=%p hca_hndl=%lx mr_hndl=%lx " 1299 "mr->lkey=%x", 1300 h_ret, e_mr, shca->ipz_hca_handle.handle, 1301 e_mr->ipz_mr_handle.handle, ··· 1328 ehca_rereg_mr_exit0: 1329 if (ret) 1330 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p " 1331 - "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " 1332 - "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x " 1333 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size, 1334 acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey, 1335 rereg_1_hcall, rereg_3_hcall); ··· 1371 * FMRs are not shared and no MW bound to FMRs 1372 */ 1373 ehca_err(&shca->ib_device, "hipz_reregister_pmr failed " 1374 - "(Rereg1), h_ret=%li e_fmr=%p hca_hndl=%lx " 1375 - "mr_hndl=%lx lkey=%x lkey_out=%x", 1376 h_ret, e_fmr, shca->ipz_hca_handle.handle, 1377 e_fmr->ipz_mr_handle.handle, 1378 e_fmr->ib.ib_fmr.lkey, hipzout.lkey); ··· 1383 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); 1384 if (h_ret != H_SUCCESS) { 1385 ehca_err(&shca->ib_device, "hipz_free_mr failed, " 1386 - "h_ret=%li e_fmr=%p hca_hndl=%lx mr_hndl=%lx " 1387 "lkey=%x", 1388 h_ret, e_fmr, shca->ipz_hca_handle.handle, 1389 e_fmr->ipz_mr_handle.handle, ··· 1447 (u64)iova_start, hipz_acl, e_pd->fw_pd, 1448 &hipzout); 1449 if (h_ret != H_SUCCESS) { 1450 - ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%li " 1451 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x " 1452 - "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x", 1453 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd, 1454 shca->ipz_hca_handle.handle, 1455 e_origmr->ipz_mr_handle.handle, ··· 1527 &e_mr->ib.ib_mr.rkey); 1528 if (ret) { 1529 ehca_err(&shca->ib_device, "reg of internal max MR failed, " 1530 - "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x " 1531 "num_hwpages=%x", e_mr, iova_start, size_maxmr, 1532 num_kpages, num_hwpages); 1533 goto ehca_reg_internal_maxmr_exit1; ··· 1573 (u64)iova_start, hipz_acl, e_pd->fw_pd, 1574 &hipzout); 1575 if (h_ret != H_SUCCESS) { 1576 - ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%li " 1577 - "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x", 1578 h_ret, e_origmr, shca->ipz_hca_handle.handle, 1579 e_origmr->ipz_mr_handle.handle, 1580 e_origmr->ib.ib_mr.lkey); ··· 1651 /* check first buffer */ 1652 if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) { 1653 ehca_gen_err("iova_start/addr mismatch, iova_start=%p " 1654 - "pbuf->addr=%lx pbuf->size=%lx", 1655 iova_start, pbuf->addr, pbuf->size); 1656 return -EINVAL; 1657 } 1658 if (((pbuf->addr + pbuf->size) % PAGE_SIZE) && 1659 (num_phys_buf > 1)) { 1660 - ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx " 1661 - "pbuf->size=%lx", pbuf->addr, pbuf->size); 1662 return -EINVAL; 1663 } 1664 1665 for (i = 0; i < num_phys_buf; i++) { 1666 if ((i > 0) && (pbuf->addr % PAGE_SIZE)) { 1667 - ehca_gen_err("bad address, i=%x pbuf->addr=%lx " 1668 - "pbuf->size=%lx", 1669 i, pbuf->addr, pbuf->size); 1670 return -EINVAL; 1671 } 1672 if (((i > 0) && /* not 1st */ 1673 (i < (num_phys_buf - 1)) && /* not last */ 1674 (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) { 1675 - ehca_gen_err("bad size, i=%x pbuf->size=%lx", 1676 i, pbuf->size); 1677 return -EINVAL; 1678 } ··· 1705 page = page_list; 1706 for (i = 0; i < list_len; i++) { 1707 if (*page % e_fmr->fmr_page_size) { 1708 - ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p " 1709 "fmr_page_size=%x", i, *page, page, e_fmr, 1710 e_fmr->fmr_page_size); 1711 return -EINVAL; ··· 1743 (pginfo->next_hwpage * 1744 pginfo->hwpage_size)); 1745 if ( !(*kpage) ) { 1746 - ehca_gen_err("pgaddr=%lx " 1747 - "chunk->page_list[i]=%lx " 1748 - "i=%x next_hwpage=%lx", 1749 pgaddr, (u64)sg_dma_address( 1750 &chunk->page_list[i]), 1751 i, pginfo->next_hwpage); ··· 1795 for (t = start_idx; t <= end_idx; t++) { 1796 u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; 1797 if (ehca_debug_level >= 3) 1798 - ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr, 1799 *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); 1800 if (pgaddr - PAGE_SIZE != *prev_pgaddr) { 1801 - ehca_gen_err("uncontiguous page found pgaddr=%lx " 1802 - "prev_pgaddr=%lx page_list_i=%x", 1803 pgaddr, *prev_pgaddr, t); 1804 return -EINVAL; 1805 } ··· 1833 << PAGE_SHIFT ); 1834 *kpage = phys_to_abs(pgaddr); 1835 if ( !(*kpage) ) { 1836 - ehca_gen_err("pgaddr=%lx i=%x", 1837 pgaddr, i); 1838 ret = -EFAULT; 1839 return ret; ··· 1846 if (pginfo->hwpage_cnt) { 1847 ehca_gen_err( 1848 "invalid alignment " 1849 - "pgaddr=%lx i=%x " 1850 - "mr_pgsize=%lx", 1851 pgaddr, i, 1852 pginfo->hwpage_size); 1853 ret = -EFAULT; ··· 1866 if (ehca_debug_level >= 3) { 1867 u64 val = *(u64 *)abs_to_virt( 1868 phys_to_abs(pgaddr)); 1869 - ehca_gen_dbg("kpage=%lx chunk_page=%lx " 1870 - "value=%016lx", 1871 *kpage, pgaddr, val); 1872 } 1873 prev_pgaddr = pgaddr; ··· 1944 if ((pginfo->kpage_cnt >= pginfo->num_kpages) || 1945 (pginfo->hwpage_cnt >= pginfo->num_hwpages)) { 1946 ehca_gen_err("kpage_cnt >= num_kpages, " 1947 - "kpage_cnt=%lx num_kpages=%lx " 1948 - "hwpage_cnt=%lx " 1949 - "num_hwpages=%lx i=%x", 1950 pginfo->kpage_cnt, 1951 pginfo->num_kpages, 1952 pginfo->hwpage_cnt, ··· 1957 (pbuf->addr & ~(pginfo->hwpage_size - 1)) + 1958 (pginfo->next_hwpage * pginfo->hwpage_size)); 1959 if ( !(*kpage) && pbuf->addr ) { 1960 - ehca_gen_err("pbuf->addr=%lx pbuf->size=%lx " 1961 - "next_hwpage=%lx", pbuf->addr, 1962 pbuf->size, pginfo->next_hwpage); 1963 return -EFAULT; 1964 } ··· 1996 *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) + 1997 pginfo->next_hwpage * pginfo->hwpage_size); 1998 if ( !(*kpage) ) { 1999 - ehca_gen_err("*fmrlist=%lx fmrlist=%p " 2000 - "next_listelem=%lx next_hwpage=%lx", 2001 *fmrlist, fmrlist, 2002 pginfo->u.fmr.next_listelem, 2003 pginfo->next_hwpage); ··· 2025 ~(pginfo->hwpage_size - 1)); 2026 if (prev + pginfo->u.fmr.fmr_pgsize != p) { 2027 ehca_gen_err("uncontiguous fmr pages " 2028 - "found prev=%lx p=%lx " 2029 "idx=%x", prev, p, i + j); 2030 return -EINVAL; 2031 }
··· 204 } 205 if ((size == 0) || 206 (((u64)iova_start + size) < (u64)iova_start)) { 207 + ehca_err(pd->device, "bad input values: size=%llx iova_start=%p", 208 size, iova_start); 209 ib_mr = ERR_PTR(-EINVAL); 210 goto reg_phys_mr_exit0; ··· 309 } 310 311 if (length == 0 || virt + length < virt) { 312 + ehca_err(pd->device, "bad input values: length=%llx " 313 + "virt_base=%llx", length, virt); 314 ib_mr = ERR_PTR(-EINVAL); 315 goto reg_user_mr_exit0; 316 } ··· 373 &e_mr->ib.ib_mr.rkey); 374 if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) { 375 ehca_warn(pd->device, "failed to register mr " 376 + "with hwpage_size=%llx", hwpage_size); 377 ehca_info(pd->device, "try to register mr with " 378 "kpage_size=%lx", PAGE_SIZE); 379 /* ··· 509 goto rereg_phys_mr_exit1; 510 if ((new_size == 0) || 511 (((u64)iova_start + new_size) < (u64)iova_start)) { 512 + ehca_err(mr->device, "bad input values: new_size=%llx " 513 "iova_start=%p", new_size, iova_start); 514 ret = -EINVAL; 515 goto rereg_phys_mr_exit1; ··· 580 581 h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout); 582 if (h_ret != H_SUCCESS) { 583 + ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lli mr=%p " 584 + "hca_hndl=%llx mr_hndl=%llx lkey=%x", 585 h_ret, mr, shca->ipz_hca_handle.handle, 586 e_mr->ipz_mr_handle.handle, mr->lkey); 587 ret = ehca2ib_return_code(h_ret); ··· 630 /* TODO: BUSY: MR still has bound window(s) */ 631 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); 632 if (h_ret != H_SUCCESS) { 633 + ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p " 634 + "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x", 635 h_ret, shca, e_mr, shca->ipz_hca_handle.handle, 636 e_mr->ipz_mr_handle.handle, mr->lkey); 637 ret = ehca2ib_return_code(h_ret); ··· 671 h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw, 672 e_pd->fw_pd, &hipzout); 673 if (h_ret != H_SUCCESS) { 674 + ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli " 675 + "shca=%p hca_hndl=%llx mw=%p", 676 h_ret, shca, shca->ipz_hca_handle.handle, e_mw); 677 ib_mw = ERR_PTR(ehca2ib_return_code(h_ret)); 678 goto alloc_mw_exit1; ··· 713 714 h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw); 715 if (h_ret != H_SUCCESS) { 716 + ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p " 717 + "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx", 718 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle, 719 e_mw->ipz_mw_handle.handle); 720 return ehca2ib_return_code(h_ret); ··· 840 goto map_phys_fmr_exit0; 841 if (iova % e_fmr->fmr_page_size) { 842 /* only whole-numbered pages */ 843 + ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x", 844 iova, e_fmr->fmr_page_size); 845 ret = -EINVAL; 846 goto map_phys_fmr_exit0; ··· 878 map_phys_fmr_exit0: 879 if (ret) 880 ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x " 881 + "iova=%llx", ret, fmr, page_list, list_len, iova); 882 return ret; 883 } /* end ehca_map_phys_fmr() */ 884 ··· 964 965 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); 966 if (h_ret != H_SUCCESS) { 967 + ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p " 968 + "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x", 969 h_ret, e_fmr, shca->ipz_hca_handle.handle, 970 e_fmr->ipz_mr_handle.handle, fmr->lkey); 971 ret = ehca2ib_return_code(h_ret); ··· 1007 (u64)iova_start, size, hipz_acl, 1008 e_pd->fw_pd, &hipzout); 1009 if (h_ret != H_SUCCESS) { 1010 + ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli " 1011 + "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle); 1012 ret = ehca2ib_return_code(h_ret); 1013 goto ehca_reg_mr_exit0; 1014 } ··· 1033 ehca_reg_mr_exit1: 1034 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); 1035 if (h_ret != H_SUCCESS) { 1036 + ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p " 1037 + "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x " 1038 + "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i", 1039 h_ret, shca, e_mr, iova_start, size, acl, e_pd, 1040 hipzout.lkey, pginfo, pginfo->num_kpages, 1041 pginfo->num_hwpages, ret); ··· 1045 ehca_reg_mr_exit0: 1046 if (ret) 1047 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p " 1048 + "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p " 1049 + "num_kpages=%llx num_hwpages=%llx", 1050 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo, 1051 pginfo->num_kpages, pginfo->num_hwpages); 1052 return ret; ··· 1116 */ 1117 if (h_ret != H_SUCCESS) { 1118 ehca_err(&shca->ib_device, "last " 1119 + "hipz_reg_rpage_mr failed, h_ret=%lli " 1120 + "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx" 1121 " lkey=%x", h_ret, e_mr, i, 1122 shca->ipz_hca_handle.handle, 1123 e_mr->ipz_mr_handle.handle, ··· 1128 ret = 0; 1129 } else if (h_ret != H_PAGE_REGISTERED) { 1130 ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, " 1131 + "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx " 1132 + "mr_hndl=%llx", h_ret, e_mr, i, 1133 e_mr->ib.ib_mr.lkey, 1134 shca->ipz_hca_handle.handle, 1135 e_mr->ipz_mr_handle.handle); ··· 1145 ehca_reg_mr_rpages_exit0: 1146 if (ret) 1147 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p " 1148 + "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr, 1149 pginfo, pginfo->num_kpages, pginfo->num_hwpages); 1150 return ret; 1151 } /* end ehca_reg_mr_rpages() */ ··· 1184 ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage); 1185 if (ret) { 1186 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p " 1187 + "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx " 1188 "kpage=%p", e_mr, pginfo, pginfo->type, 1189 pginfo->num_kpages, pginfo->num_hwpages, kpage); 1190 goto ehca_rereg_mr_rereg1_exit1; ··· 1205 * (MW bound or MR is shared) 1206 */ 1207 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed " 1208 + "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr); 1209 *pginfo = pginfo_save; 1210 ret = -EAGAIN; 1211 } else if ((u64 *)hipzout.vaddr != iova_start) { 1212 ehca_err(&shca->ib_device, "PHYP changed iova_start in " 1213 + "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p " 1214 + "mr_handle=%llx lkey=%x lkey_out=%x", iova_start, 1215 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle, 1216 e_mr->ib.ib_mr.lkey, hipzout.lkey); 1217 ret = -EFAULT; ··· 1235 ehca_rereg_mr_rereg1_exit0: 1236 if ( ret && (ret != -EAGAIN) ) 1237 ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x " 1238 + "pginfo=%p num_kpages=%llx num_hwpages=%llx", 1239 ret, *lkey, *rkey, pginfo, pginfo->num_kpages, 1240 pginfo->num_hwpages); 1241 return ret; ··· 1263 (e_mr->num_hwpages > MAX_RPAGES) || 1264 (pginfo->num_hwpages > e_mr->num_hwpages)) { 1265 ehca_dbg(&shca->ib_device, "Rereg3 case, " 1266 + "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x", 1267 pginfo->num_hwpages, e_mr->num_hwpages); 1268 rereg_1_hcall = 0; 1269 rereg_3_hcall = 1; ··· 1295 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); 1296 if (h_ret != H_SUCCESS) { 1297 ehca_err(&shca->ib_device, "hipz_free_mr failed, " 1298 + "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx " 1299 "mr->lkey=%x", 1300 h_ret, e_mr, shca->ipz_hca_handle.handle, 1301 e_mr->ipz_mr_handle.handle, ··· 1328 ehca_rereg_mr_exit0: 1329 if (ret) 1330 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p " 1331 + "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p " 1332 + "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x " 1333 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size, 1334 acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey, 1335 rereg_1_hcall, rereg_3_hcall); ··· 1371 * FMRs are not shared and no MW bound to FMRs 1372 */ 1373 ehca_err(&shca->ib_device, "hipz_reregister_pmr failed " 1374 + "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx " 1375 + "mr_hndl=%llx lkey=%x lkey_out=%x", 1376 h_ret, e_fmr, shca->ipz_hca_handle.handle, 1377 e_fmr->ipz_mr_handle.handle, 1378 e_fmr->ib.ib_fmr.lkey, hipzout.lkey); ··· 1383 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); 1384 if (h_ret != H_SUCCESS) { 1385 ehca_err(&shca->ib_device, "hipz_free_mr failed, " 1386 + "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx " 1387 "lkey=%x", 1388 h_ret, e_fmr, shca->ipz_hca_handle.handle, 1389 e_fmr->ipz_mr_handle.handle, ··· 1447 (u64)iova_start, hipz_acl, e_pd->fw_pd, 1448 &hipzout); 1449 if (h_ret != H_SUCCESS) { 1450 + ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli " 1451 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x " 1452 + "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x", 1453 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd, 1454 shca->ipz_hca_handle.handle, 1455 e_origmr->ipz_mr_handle.handle, ··· 1527 &e_mr->ib.ib_mr.rkey); 1528 if (ret) { 1529 ehca_err(&shca->ib_device, "reg of internal max MR failed, " 1530 + "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x " 1531 "num_hwpages=%x", e_mr, iova_start, size_maxmr, 1532 num_kpages, num_hwpages); 1533 goto ehca_reg_internal_maxmr_exit1; ··· 1573 (u64)iova_start, hipz_acl, e_pd->fw_pd, 1574 &hipzout); 1575 if (h_ret != H_SUCCESS) { 1576 + ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli " 1577 + "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x", 1578 h_ret, e_origmr, shca->ipz_hca_handle.handle, 1579 e_origmr->ipz_mr_handle.handle, 1580 e_origmr->ib.ib_mr.lkey); ··· 1651 /* check first buffer */ 1652 if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) { 1653 ehca_gen_err("iova_start/addr mismatch, iova_start=%p " 1654 + "pbuf->addr=%llx pbuf->size=%llx", 1655 iova_start, pbuf->addr, pbuf->size); 1656 return -EINVAL; 1657 } 1658 if (((pbuf->addr + pbuf->size) % PAGE_SIZE) && 1659 (num_phys_buf > 1)) { 1660 + ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%llx " 1661 + "pbuf->size=%llx", pbuf->addr, pbuf->size); 1662 return -EINVAL; 1663 } 1664 1665 for (i = 0; i < num_phys_buf; i++) { 1666 if ((i > 0) && (pbuf->addr % PAGE_SIZE)) { 1667 + ehca_gen_err("bad address, i=%x pbuf->addr=%llx " 1668 + "pbuf->size=%llx", 1669 i, pbuf->addr, pbuf->size); 1670 return -EINVAL; 1671 } 1672 if (((i > 0) && /* not 1st */ 1673 (i < (num_phys_buf - 1)) && /* not last */ 1674 (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) { 1675 + ehca_gen_err("bad size, i=%x pbuf->size=%llx", 1676 i, pbuf->size); 1677 return -EINVAL; 1678 } ··· 1705 page = page_list; 1706 for (i = 0; i < list_len; i++) { 1707 if (*page % e_fmr->fmr_page_size) { 1708 + ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p " 1709 "fmr_page_size=%x", i, *page, page, e_fmr, 1710 e_fmr->fmr_page_size); 1711 return -EINVAL; ··· 1743 (pginfo->next_hwpage * 1744 pginfo->hwpage_size)); 1745 if ( !(*kpage) ) { 1746 + ehca_gen_err("pgaddr=%llx " 1747 + "chunk->page_list[i]=%llx " 1748 + "i=%x next_hwpage=%llx", 1749 pgaddr, (u64)sg_dma_address( 1750 &chunk->page_list[i]), 1751 i, pginfo->next_hwpage); ··· 1795 for (t = start_idx; t <= end_idx; t++) { 1796 u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; 1797 if (ehca_debug_level >= 3) 1798 + ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr, 1799 *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); 1800 if (pgaddr - PAGE_SIZE != *prev_pgaddr) { 1801 + ehca_gen_err("uncontiguous page found pgaddr=%llx " 1802 + "prev_pgaddr=%llx page_list_i=%x", 1803 pgaddr, *prev_pgaddr, t); 1804 return -EINVAL; 1805 } ··· 1833 << PAGE_SHIFT ); 1834 *kpage = phys_to_abs(pgaddr); 1835 if ( !(*kpage) ) { 1836 + ehca_gen_err("pgaddr=%llx i=%x", 1837 pgaddr, i); 1838 ret = -EFAULT; 1839 return ret; ··· 1846 if (pginfo->hwpage_cnt) { 1847 ehca_gen_err( 1848 "invalid alignment " 1849 + "pgaddr=%llx i=%x " 1850 + "mr_pgsize=%llx", 1851 pgaddr, i, 1852 pginfo->hwpage_size); 1853 ret = -EFAULT; ··· 1866 if (ehca_debug_level >= 3) { 1867 u64 val = *(u64 *)abs_to_virt( 1868 phys_to_abs(pgaddr)); 1869 + ehca_gen_dbg("kpage=%llx chunk_page=%llx " 1870 + "value=%016llx", 1871 *kpage, pgaddr, val); 1872 } 1873 prev_pgaddr = pgaddr; ··· 1944 if ((pginfo->kpage_cnt >= pginfo->num_kpages) || 1945 (pginfo->hwpage_cnt >= pginfo->num_hwpages)) { 1946 ehca_gen_err("kpage_cnt >= num_kpages, " 1947 + "kpage_cnt=%llx num_kpages=%llx " 1948 + "hwpage_cnt=%llx " 1949 + "num_hwpages=%llx i=%x", 1950 pginfo->kpage_cnt, 1951 pginfo->num_kpages, 1952 pginfo->hwpage_cnt, ··· 1957 (pbuf->addr & ~(pginfo->hwpage_size - 1)) + 1958 (pginfo->next_hwpage * pginfo->hwpage_size)); 1959 if ( !(*kpage) && pbuf->addr ) { 1960 + ehca_gen_err("pbuf->addr=%llx pbuf->size=%llx " 1961 + "next_hwpage=%llx", pbuf->addr, 1962 pbuf->size, pginfo->next_hwpage); 1963 return -EFAULT; 1964 } ··· 1996 *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) + 1997 pginfo->next_hwpage * pginfo->hwpage_size); 1998 if ( !(*kpage) ) { 1999 + ehca_gen_err("*fmrlist=%llx fmrlist=%p " 2000 + "next_listelem=%llx next_hwpage=%llx", 2001 *fmrlist, fmrlist, 2002 pginfo->u.fmr.next_listelem, 2003 pginfo->next_hwpage); ··· 2025 ~(pginfo->hwpage_size - 1)); 2026 if (prev + pginfo->u.fmr.fmr_pgsize != p) { 2027 ehca_gen_err("uncontiguous fmr pages " 2028 + "found prev=%llx p=%llx " 2029 "idx=%x", prev, p, i + j); 2030 return -EINVAL; 2031 }
+16 -16
drivers/infiniband/hw/ehca/ehca_qp.c
··· 331 if (cnt == (nr_q_pages - 1)) { /* last page! */ 332 if (h_ret != expected_hret) { 333 ehca_err(ib_dev, "hipz_qp_register_rpage() " 334 - "h_ret=%li", h_ret); 335 ret = ehca2ib_return_code(h_ret); 336 goto init_qp_queue1; 337 } ··· 345 } else { 346 if (h_ret != H_PAGE_REGISTERED) { 347 ehca_err(ib_dev, "hipz_qp_register_rpage() " 348 - "h_ret=%li", h_ret); 349 ret = ehca2ib_return_code(h_ret); 350 goto init_qp_queue1; 351 } ··· 709 710 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms); 711 if (h_ret != H_SUCCESS) { 712 - ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%li", 713 h_ret); 714 ret = ehca2ib_return_code(h_ret); 715 goto create_qp_exit1; ··· 1010 mqpcb, my_qp->galpas.kernel); 1011 if (hret != H_SUCCESS) { 1012 ehca_err(pd->device, "Could not modify SRQ to INIT " 1013 - "ehca_qp=%p qp_num=%x h_ret=%li", 1014 my_qp, my_qp->real_qp_num, hret); 1015 goto create_srq2; 1016 } ··· 1024 mqpcb, my_qp->galpas.kernel); 1025 if (hret != H_SUCCESS) { 1026 ehca_err(pd->device, "Could not enable SRQ " 1027 - "ehca_qp=%p qp_num=%x h_ret=%li", 1028 my_qp, my_qp->real_qp_num, hret); 1029 goto create_srq2; 1030 } ··· 1038 mqpcb, my_qp->galpas.kernel); 1039 if (hret != H_SUCCESS) { 1040 ehca_err(pd->device, "Could not modify SRQ to RTR " 1041 - "ehca_qp=%p qp_num=%x h_ret=%li", 1042 my_qp, my_qp->real_qp_num, hret); 1043 goto create_srq2; 1044 } ··· 1078 &bad_send_wqe_p, NULL, 2); 1079 if (h_ret != H_SUCCESS) { 1080 ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed" 1081 - " ehca_qp=%p qp_num=%x h_ret=%li", 1082 my_qp, qp_num, h_ret); 1083 return ehca2ib_return_code(h_ret); 1084 } ··· 1134 1135 if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) { 1136 ehca_gen_err("Invalid offset for calculating left cqes " 1137 - "wqe_p=%#lx wqe_v=%p\n", wqe_p, wqe_v); 1138 return -EFAULT; 1139 } 1140 ··· 1168 &send_wqe_p, &recv_wqe_p, 4); 1169 if (h_ret != H_SUCCESS) { 1170 ehca_err(&shca->ib_device, "disable_and_get_wqe() " 1171 - "failed ehca_qp=%p qp_num=%x h_ret=%li", 1172 my_qp, qp_num, h_ret); 1173 return ehca2ib_return_code(h_ret); 1174 } ··· 1261 mqpcb, my_qp->galpas.kernel); 1262 if (h_ret != H_SUCCESS) { 1263 ehca_err(ibqp->device, "hipz_h_query_qp() failed " 1264 - "ehca_qp=%p qp_num=%x h_ret=%li", 1265 my_qp, ibqp->qp_num, h_ret); 1266 ret = ehca2ib_return_code(h_ret); 1267 goto modify_qp_exit1; ··· 1690 1691 if (h_ret != H_SUCCESS) { 1692 ret = ehca2ib_return_code(h_ret); 1693 - ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%li " 1694 "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num); 1695 goto modify_qp_exit2; 1696 } ··· 1723 ret = ehca2ib_return_code(h_ret); 1724 ehca_err(ibqp->device, "ENABLE in context of " 1725 "RESET_2_INIT failed! Maybe you didn't get " 1726 - "a LID h_ret=%li ehca_qp=%p qp_num=%x", 1727 h_ret, my_qp, ibqp->qp_num); 1728 goto modify_qp_exit2; 1729 } ··· 1909 if (h_ret != H_SUCCESS) { 1910 ret = ehca2ib_return_code(h_ret); 1911 ehca_err(qp->device, "hipz_h_query_qp() failed " 1912 - "ehca_qp=%p qp_num=%x h_ret=%li", 1913 my_qp, qp->qp_num, h_ret); 1914 goto query_qp_exit1; 1915 } ··· 2074 2075 if (h_ret != H_SUCCESS) { 2076 ret = ehca2ib_return_code(h_ret); 2077 - ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%li " 2078 "ehca_qp=%p qp_num=%x", 2079 h_ret, my_qp, my_qp->real_qp_num); 2080 } ··· 2108 if (h_ret != H_SUCCESS) { 2109 ret = ehca2ib_return_code(h_ret); 2110 ehca_err(srq->device, "hipz_h_query_qp() failed " 2111 - "ehca_qp=%p qp_num=%x h_ret=%li", 2112 my_qp, my_qp->real_qp_num, h_ret); 2113 goto query_srq_exit1; 2114 } ··· 2179 2180 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); 2181 if (h_ret != H_SUCCESS) { 2182 - ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li " 2183 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num); 2184 return ehca2ib_return_code(h_ret); 2185 }
··· 331 if (cnt == (nr_q_pages - 1)) { /* last page! */ 332 if (h_ret != expected_hret) { 333 ehca_err(ib_dev, "hipz_qp_register_rpage() " 334 + "h_ret=%lli", h_ret); 335 ret = ehca2ib_return_code(h_ret); 336 goto init_qp_queue1; 337 } ··· 345 } else { 346 if (h_ret != H_PAGE_REGISTERED) { 347 ehca_err(ib_dev, "hipz_qp_register_rpage() " 348 + "h_ret=%lli", h_ret); 349 ret = ehca2ib_return_code(h_ret); 350 goto init_qp_queue1; 351 } ··· 709 710 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms); 711 if (h_ret != H_SUCCESS) { 712 + ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli", 713 h_ret); 714 ret = ehca2ib_return_code(h_ret); 715 goto create_qp_exit1; ··· 1010 mqpcb, my_qp->galpas.kernel); 1011 if (hret != H_SUCCESS) { 1012 ehca_err(pd->device, "Could not modify SRQ to INIT " 1013 + "ehca_qp=%p qp_num=%x h_ret=%lli", 1014 my_qp, my_qp->real_qp_num, hret); 1015 goto create_srq2; 1016 } ··· 1024 mqpcb, my_qp->galpas.kernel); 1025 if (hret != H_SUCCESS) { 1026 ehca_err(pd->device, "Could not enable SRQ " 1027 + "ehca_qp=%p qp_num=%x h_ret=%lli", 1028 my_qp, my_qp->real_qp_num, hret); 1029 goto create_srq2; 1030 } ··· 1038 mqpcb, my_qp->galpas.kernel); 1039 if (hret != H_SUCCESS) { 1040 ehca_err(pd->device, "Could not modify SRQ to RTR " 1041 + "ehca_qp=%p qp_num=%x h_ret=%lli", 1042 my_qp, my_qp->real_qp_num, hret); 1043 goto create_srq2; 1044 } ··· 1078 &bad_send_wqe_p, NULL, 2); 1079 if (h_ret != H_SUCCESS) { 1080 ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed" 1081 + " ehca_qp=%p qp_num=%x h_ret=%lli", 1082 my_qp, qp_num, h_ret); 1083 return ehca2ib_return_code(h_ret); 1084 } ··· 1134 1135 if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) { 1136 ehca_gen_err("Invalid offset for calculating left cqes " 1137 + "wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v); 1138 return -EFAULT; 1139 } 1140 ··· 1168 &send_wqe_p, &recv_wqe_p, 4); 1169 if (h_ret != H_SUCCESS) { 1170 ehca_err(&shca->ib_device, "disable_and_get_wqe() " 1171 + "failed ehca_qp=%p qp_num=%x h_ret=%lli", 1172 my_qp, qp_num, h_ret); 1173 return ehca2ib_return_code(h_ret); 1174 } ··· 1261 mqpcb, my_qp->galpas.kernel); 1262 if (h_ret != H_SUCCESS) { 1263 ehca_err(ibqp->device, "hipz_h_query_qp() failed " 1264 + "ehca_qp=%p qp_num=%x h_ret=%lli", 1265 my_qp, ibqp->qp_num, h_ret); 1266 ret = ehca2ib_return_code(h_ret); 1267 goto modify_qp_exit1; ··· 1690 1691 if (h_ret != H_SUCCESS) { 1692 ret = ehca2ib_return_code(h_ret); 1693 + ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%lli " 1694 "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num); 1695 goto modify_qp_exit2; 1696 } ··· 1723 ret = ehca2ib_return_code(h_ret); 1724 ehca_err(ibqp->device, "ENABLE in context of " 1725 "RESET_2_INIT failed! Maybe you didn't get " 1726 + "a LID h_ret=%lli ehca_qp=%p qp_num=%x", 1727 h_ret, my_qp, ibqp->qp_num); 1728 goto modify_qp_exit2; 1729 } ··· 1909 if (h_ret != H_SUCCESS) { 1910 ret = ehca2ib_return_code(h_ret); 1911 ehca_err(qp->device, "hipz_h_query_qp() failed " 1912 + "ehca_qp=%p qp_num=%x h_ret=%lli", 1913 my_qp, qp->qp_num, h_ret); 1914 goto query_qp_exit1; 1915 } ··· 2074 2075 if (h_ret != H_SUCCESS) { 2076 ret = ehca2ib_return_code(h_ret); 2077 + ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%lli " 2078 "ehca_qp=%p qp_num=%x", 2079 h_ret, my_qp, my_qp->real_qp_num); 2080 } ··· 2108 if (h_ret != H_SUCCESS) { 2109 ret = ehca2ib_return_code(h_ret); 2110 ehca_err(srq->device, "hipz_h_query_qp() failed " 2111 + "ehca_qp=%p qp_num=%x h_ret=%lli", 2112 my_qp, my_qp->real_qp_num, h_ret); 2113 goto query_srq_exit1; 2114 } ··· 2179 2180 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); 2181 if (h_ret != H_SUCCESS) { 2182 + ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%lli " 2183 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num); 2184 return ehca2ib_return_code(h_ret); 2185 }
+1 -1
drivers/infiniband/hw/ehca/ehca_reqs.c
··· 822 offset = qmap->next_wqe_idx * ipz_queue->qe_size; 823 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset); 824 if (!wqe) { 825 - ehca_err(cq->device, "Invalid wqe offset=%#lx on " 826 "qp_num=%#x", offset, my_qp->real_qp_num); 827 return nr; 828 }
··· 822 offset = qmap->next_wqe_idx * ipz_queue->qe_size; 823 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset); 824 if (!wqe) { 825 + ehca_err(cq->device, "Invalid wqe offset=%#llx on " 826 "qp_num=%#x", offset, my_qp->real_qp_num); 827 return nr; 828 }
+1 -1
drivers/infiniband/hw/ehca/ehca_sqp.c
··· 85 86 if (ret != H_SUCCESS) { 87 ehca_err(&shca->ib_device, 88 - "Can't define AQP1 for port %x. h_ret=%li", 89 port, ret); 90 return ret; 91 }
··· 85 86 if (ret != H_SUCCESS) { 87 ehca_err(&shca->ib_device, 88 + "Can't define AQP1 for port %x. h_ret=%lli", 89 port, ret); 90 return ret; 91 }
+1 -1
drivers/infiniband/hw/ehca/ehca_tools.h
··· 116 unsigned char *deb = (unsigned char *)(adr); \ 117 for (x = 0; x < l; x += 16) { \ 118 printk(KERN_INFO "EHCA_DMP:%s " format \ 119 - " adr=%p ofs=%04x %016lx %016lx\n", \ 120 __func__, ##args, deb, x, \ 121 *((u64 *)&deb[0]), *((u64 *)&deb[8])); \ 122 deb += 16; \
··· 116 unsigned char *deb = (unsigned char *)(adr); \ 117 for (x = 0; x < l; x += 16) { \ 118 printk(KERN_INFO "EHCA_DMP:%s " format \ 119 + " adr=%p ofs=%04x %016llx %016llx\n", \ 120 __func__, ##args, deb, x, \ 121 *((u64 *)&deb[0]), *((u64 *)&deb[8])); \ 122 deb += 16; \
+1 -1
drivers/infiniband/hw/ehca/ehca_uverbs.c
··· 114 115 physical = galpas->user.fw_handle; 116 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 117 - ehca_gen_dbg("vsize=%lx physical=%lx", vsize, physical); 118 /* VM_IO | VM_RESERVED are set by remap_pfn_range() */ 119 ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT, 120 vma->vm_page_prot);
··· 114 115 physical = galpas->user.fw_handle; 116 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 117 + ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical); 118 /* VM_IO | VM_RESERVED are set by remap_pfn_range() */ 119 ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT, 120 vma->vm_page_prot);
+28 -28
drivers/infiniband/hw/ehca/hcp_if.c
··· 226 u32 *eq_ist) 227 { 228 u64 ret; 229 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 230 u64 allocate_controls; 231 232 /* resource type */ ··· 249 *eq_ist = (u32)outs[5]; 250 251 if (ret == H_NOT_ENOUGH_RESOURCES) 252 - ehca_gen_err("Not enough resource - ret=%li ", ret); 253 254 return ret; 255 } ··· 270 struct ehca_alloc_cq_parms *param) 271 { 272 u64 ret; 273 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 274 275 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 276 adapter_handle.handle, /* r4 */ ··· 287 hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]); 288 289 if (ret == H_NOT_ENOUGH_RESOURCES) 290 - ehca_gen_err("Not enough resources. ret=%li", ret); 291 292 return ret; 293 } ··· 297 { 298 u64 ret; 299 u64 allocate_controls, max_r10_reg, r11, r12; 300 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 301 302 allocate_controls = 303 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type) ··· 362 hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]); 363 364 if (ret == H_NOT_ENOUGH_RESOURCES) 365 - ehca_gen_err("Not enough resources. ret=%li", ret); 366 367 return ret; 368 } ··· 454 const u64 count) 455 { 456 if (count != 1) { 457 - ehca_gen_err("Ppage counter=%lx", count); 458 return H_PARAMETER; 459 } 460 return hipz_h_register_rpage(adapter_handle, ··· 489 const struct h_galpa gal) 490 { 491 if (count != 1) { 492 - ehca_gen_err("Page counter=%lx", count); 493 return H_PARAMETER; 494 } 495 ··· 508 const struct h_galpa galpa) 509 { 510 if (count > 1) { 511 - ehca_gen_err("Page counter=%lx", count); 512 return H_PARAMETER; 513 } 514 ··· 525 int dis_and_get_function_code) 526 { 527 u64 ret; 528 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 529 530 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs, 531 adapter_handle.handle, /* r4 */ ··· 548 struct h_galpa gal) 549 { 550 u64 ret; 551 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 552 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs, 553 adapter_handle.handle, /* r4 */ 554 qp_handle.handle, /* r5 */ ··· 557 0, 0, 0, 0, 0); 558 559 if (ret == H_NOT_ENOUGH_RESOURCES) 560 - ehca_gen_err("Insufficient resources ret=%li", ret); 561 562 return ret; 563 } ··· 579 struct ehca_qp *qp) 580 { 581 u64 ret; 582 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 583 584 ret = hcp_galpas_dtor(&qp->galpas); 585 if (ret) { ··· 593 qp->ipz_qp_handle.handle, /* r6 */ 594 0, 0, 0, 0, 0, 0); 595 if (ret == H_HARDWARE) 596 - ehca_gen_err("HCA not operational. ret=%li", ret); 597 598 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE, 599 adapter_handle.handle, /* r4 */ ··· 601 0, 0, 0, 0, 0); 602 603 if (ret == H_RESOURCE) 604 - ehca_gen_err("Resource still in use. ret=%li", ret); 605 606 return ret; 607 } ··· 625 u32 * bma_qp_nr) 626 { 627 u64 ret; 628 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 629 630 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs, 631 adapter_handle.handle, /* r4 */ ··· 636 *bma_qp_nr = (u32)outs[1]; 637 638 if (ret == H_ALIAS_EXIST) 639 - ehca_gen_err("AQP1 already exists. ret=%li", ret); 640 641 return ret; 642 } ··· 658 0, 0); 659 660 if (ret == H_NOT_ENOUGH_RESOURCES) 661 - ehca_gen_err("Not enough resources. ret=%li", ret); 662 663 return ret; 664 } ··· 697 0, 0, 0, 0); 698 699 if (ret == H_RESOURCE) 700 - ehca_gen_err("H_FREE_RESOURCE failed ret=%li ", ret); 701 702 return ret; 703 } ··· 719 0, 0, 0, 0, 0); 720 721 if (ret == H_RESOURCE) 722 - ehca_gen_err("Resource in use. ret=%li ", ret); 723 724 return ret; 725 } ··· 733 struct ehca_mr_hipzout_parms *outparms) 734 { 735 u64 ret; 736 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 737 738 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 739 adapter_handle.handle, /* r4 */ ··· 774 775 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) { 776 ehca_gen_err("logical_address_of_page not on a 4k boundary " 777 - "adapter_handle=%lx mr=%p mr_handle=%lx " 778 "pagesize=%x queue_type=%x " 779 - "logical_address_of_page=%lx count=%lx", 780 adapter_handle.handle, mr, 781 mr->ipz_mr_handle.handle, pagesize, queue_type, 782 logical_address_of_page, count); ··· 794 struct ehca_mr_hipzout_parms *outparms) 795 { 796 u64 ret; 797 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 798 799 ret = ehca_plpar_hcall9(H_QUERY_MR, outs, 800 adapter_handle.handle, /* r4 */ ··· 828 struct ehca_mr_hipzout_parms *outparms) 829 { 830 u64 ret; 831 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 832 833 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs, 834 adapter_handle.handle, /* r4 */ ··· 855 struct ehca_mr_hipzout_parms *outparms) 856 { 857 u64 ret; 858 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 859 860 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs, 861 adapter_handle.handle, /* r4 */ ··· 877 struct ehca_mw_hipzout_parms *outparms) 878 { 879 u64 ret; 880 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 881 882 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 883 adapter_handle.handle, /* r4 */ ··· 895 struct ehca_mw_hipzout_parms *outparms) 896 { 897 u64 ret; 898 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 899 900 ret = ehca_plpar_hcall9(H_QUERY_MW, outs, 901 adapter_handle.handle, /* r4 */
··· 226 u32 *eq_ist) 227 { 228 u64 ret; 229 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 230 u64 allocate_controls; 231 232 /* resource type */ ··· 249 *eq_ist = (u32)outs[5]; 250 251 if (ret == H_NOT_ENOUGH_RESOURCES) 252 + ehca_gen_err("Not enough resource - ret=%lli ", ret); 253 254 return ret; 255 } ··· 270 struct ehca_alloc_cq_parms *param) 271 { 272 u64 ret; 273 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 274 275 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 276 adapter_handle.handle, /* r4 */ ··· 287 hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]); 288 289 if (ret == H_NOT_ENOUGH_RESOURCES) 290 + ehca_gen_err("Not enough resources. ret=%lli", ret); 291 292 return ret; 293 } ··· 297 { 298 u64 ret; 299 u64 allocate_controls, max_r10_reg, r11, r12; 300 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 301 302 allocate_controls = 303 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type) ··· 362 hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]); 363 364 if (ret == H_NOT_ENOUGH_RESOURCES) 365 + ehca_gen_err("Not enough resources. ret=%lli", ret); 366 367 return ret; 368 } ··· 454 const u64 count) 455 { 456 if (count != 1) { 457 + ehca_gen_err("Ppage counter=%llx", count); 458 return H_PARAMETER; 459 } 460 return hipz_h_register_rpage(adapter_handle, ··· 489 const struct h_galpa gal) 490 { 491 if (count != 1) { 492 + ehca_gen_err("Page counter=%llx", count); 493 return H_PARAMETER; 494 } 495 ··· 508 const struct h_galpa galpa) 509 { 510 if (count > 1) { 511 + ehca_gen_err("Page counter=%llx", count); 512 return H_PARAMETER; 513 } 514 ··· 525 int dis_and_get_function_code) 526 { 527 u64 ret; 528 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 529 530 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs, 531 adapter_handle.handle, /* r4 */ ··· 548 struct h_galpa gal) 549 { 550 u64 ret; 551 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 552 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs, 553 adapter_handle.handle, /* r4 */ 554 qp_handle.handle, /* r5 */ ··· 557 0, 0, 0, 0, 0); 558 559 if (ret == H_NOT_ENOUGH_RESOURCES) 560 + ehca_gen_err("Insufficient resources ret=%lli", ret); 561 562 return ret; 563 } ··· 579 struct ehca_qp *qp) 580 { 581 u64 ret; 582 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 583 584 ret = hcp_galpas_dtor(&qp->galpas); 585 if (ret) { ··· 593 qp->ipz_qp_handle.handle, /* r6 */ 594 0, 0, 0, 0, 0, 0); 595 if (ret == H_HARDWARE) 596 + ehca_gen_err("HCA not operational. ret=%lli", ret); 597 598 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE, 599 adapter_handle.handle, /* r4 */ ··· 601 0, 0, 0, 0, 0); 602 603 if (ret == H_RESOURCE) 604 + ehca_gen_err("Resource still in use. ret=%lli", ret); 605 606 return ret; 607 } ··· 625 u32 * bma_qp_nr) 626 { 627 u64 ret; 628 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 629 630 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs, 631 adapter_handle.handle, /* r4 */ ··· 636 *bma_qp_nr = (u32)outs[1]; 637 638 if (ret == H_ALIAS_EXIST) 639 + ehca_gen_err("AQP1 already exists. ret=%lli", ret); 640 641 return ret; 642 } ··· 658 0, 0); 659 660 if (ret == H_NOT_ENOUGH_RESOURCES) 661 + ehca_gen_err("Not enough resources. ret=%lli", ret); 662 663 return ret; 664 } ··· 697 0, 0, 0, 0); 698 699 if (ret == H_RESOURCE) 700 + ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret); 701 702 return ret; 703 } ··· 719 0, 0, 0, 0, 0); 720 721 if (ret == H_RESOURCE) 722 + ehca_gen_err("Resource in use. ret=%lli ", ret); 723 724 return ret; 725 } ··· 733 struct ehca_mr_hipzout_parms *outparms) 734 { 735 u64 ret; 736 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 737 738 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 739 adapter_handle.handle, /* r4 */ ··· 774 775 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) { 776 ehca_gen_err("logical_address_of_page not on a 4k boundary " 777 + "adapter_handle=%llx mr=%p mr_handle=%llx " 778 "pagesize=%x queue_type=%x " 779 + "logical_address_of_page=%llx count=%llx", 780 adapter_handle.handle, mr, 781 mr->ipz_mr_handle.handle, pagesize, queue_type, 782 logical_address_of_page, count); ··· 794 struct ehca_mr_hipzout_parms *outparms) 795 { 796 u64 ret; 797 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 798 799 ret = ehca_plpar_hcall9(H_QUERY_MR, outs, 800 adapter_handle.handle, /* r4 */ ··· 828 struct ehca_mr_hipzout_parms *outparms) 829 { 830 u64 ret; 831 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 832 833 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs, 834 adapter_handle.handle, /* r4 */ ··· 855 struct ehca_mr_hipzout_parms *outparms) 856 { 857 u64 ret; 858 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 859 860 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs, 861 adapter_handle.handle, /* r4 */ ··· 877 struct ehca_mw_hipzout_parms *outparms) 878 { 879 u64 ret; 880 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 881 882 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 883 adapter_handle.handle, /* r4 */ ··· 895 struct ehca_mw_hipzout_parms *outparms) 896 { 897 u64 ret; 898 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 899 900 ret = ehca_plpar_hcall9(H_QUERY_MW, outs, 901 adapter_handle.handle, /* r4 */
+19 -9
drivers/infiniband/hw/mlx4/qp.c
··· 1462 } 1463 1464 static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, 1465 - struct mlx4_ib_qp *qp, unsigned *lso_seg_len) 1466 { 1467 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16); 1468 ··· 1480 1481 memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); 1482 1483 - /* make sure LSO header is written before overwriting stamping */ 1484 - wmb(); 1485 - 1486 - wqe->mss_hdr_size = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 | 1487 - wr->wr.ud.hlen); 1488 - 1489 *lso_seg_len = halign; 1490 return 0; 1491 } ··· 1515 int uninitialized_var(stamp); 1516 int uninitialized_var(size); 1517 unsigned uninitialized_var(seglen); 1518 int i; 1519 1520 spin_lock_irqsave(&qp->sq.lock, flags); ··· 1525 ind = qp->sq_next_wqe; 1526 1527 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1528 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 1529 err = -ENOMEM; 1530 *bad_wr = wr; ··· 1608 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 1609 1610 if (wr->opcode == IB_WR_LSO) { 1611 - err = build_lso_seg(wqe, wr, qp, &seglen); 1612 if (unlikely(err)) { 1613 *bad_wr = wr; 1614 goto out; 1615 } 1616 wqe += seglen; 1617 size += seglen / 16; 1618 } ··· 1655 for (i = wr->num_sge - 1; i >= 0; --i, --dseg) 1656 set_data_seg(dseg, wr->sg_list + i); 1657 1658 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? 1659 MLX4_WQE_CTRL_FENCE : 0) | size; 1660 ··· 1697 stamp_send_wqe(qp, stamp, size * 16); 1698 ind = pad_wraparound(qp, ind); 1699 } 1700 - 1701 } 1702 1703 out:
··· 1462 } 1463 1464 static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, 1465 + struct mlx4_ib_qp *qp, unsigned *lso_seg_len, 1466 + __be32 *lso_hdr_sz) 1467 { 1468 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16); 1469 ··· 1479 1480 memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); 1481 1482 + *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 | 1483 + wr->wr.ud.hlen); 1484 *lso_seg_len = halign; 1485 return 0; 1486 } ··· 1518 int uninitialized_var(stamp); 1519 int uninitialized_var(size); 1520 unsigned uninitialized_var(seglen); 1521 + __be32 dummy; 1522 + __be32 *lso_wqe; 1523 + __be32 uninitialized_var(lso_hdr_sz); 1524 int i; 1525 1526 spin_lock_irqsave(&qp->sq.lock, flags); ··· 1525 ind = qp->sq_next_wqe; 1526 1527 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1528 + lso_wqe = &dummy; 1529 + 1530 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 1531 err = -ENOMEM; 1532 *bad_wr = wr; ··· 1606 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 1607 1608 if (wr->opcode == IB_WR_LSO) { 1609 + err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz); 1610 if (unlikely(err)) { 1611 *bad_wr = wr; 1612 goto out; 1613 } 1614 + lso_wqe = (__be32 *) wqe; 1615 wqe += seglen; 1616 size += seglen / 16; 1617 } ··· 1652 for (i = wr->num_sge - 1; i >= 0; --i, --dseg) 1653 set_data_seg(dseg, wr->sg_list + i); 1654 1655 + /* 1656 + * Possibly overwrite stamping in cacheline with LSO 1657 + * segment only after making sure all data segments 1658 + * are written. 1659 + */ 1660 + wmb(); 1661 + *lso_wqe = lso_hdr_sz; 1662 + 1663 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? 1664 MLX4_WQE_CTRL_FENCE : 0) | size; 1665 ··· 1686 stamp_send_wqe(qp, stamp, size * 16); 1687 ind = pad_wraparound(qp, ind); 1688 } 1689 } 1690 1691 out:
+15 -12
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 106 107 ipoib_dbg(priv, "bringing up interface\n"); 108 109 - set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 110 111 if (ipoib_pkey_dev_delay_open(dev)) 112 return 0; 113 114 - napi_enable(&priv->napi); 115 116 - if (ipoib_ib_dev_open(dev)) { 117 - napi_disable(&priv->napi); 118 - return -EINVAL; 119 - } 120 - 121 - if (ipoib_ib_dev_up(dev)) { 122 - ipoib_ib_dev_stop(dev, 1); 123 - napi_disable(&priv->napi); 124 - return -EINVAL; 125 - } 126 127 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 128 struct ipoib_dev_priv *cpriv; ··· 138 netif_start_queue(dev); 139 140 return 0; 141 } 142 143 static int ipoib_stop(struct net_device *dev)
··· 106 107 ipoib_dbg(priv, "bringing up interface\n"); 108 109 + if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 110 + napi_enable(&priv->napi); 111 112 if (ipoib_pkey_dev_delay_open(dev)) 113 return 0; 114 115 + if (ipoib_ib_dev_open(dev)) 116 + goto err_disable; 117 118 + if (ipoib_ib_dev_up(dev)) 119 + goto err_stop; 120 121 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 122 struct ipoib_dev_priv *cpriv; ··· 144 netif_start_queue(dev); 145 146 return 0; 147 + 148 + err_stop: 149 + ipoib_ib_dev_stop(dev, 1); 150 + 151 + err_disable: 152 + napi_disable(&priv->napi); 153 + clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 154 + 155 + return -EINVAL; 156 } 157 158 static int ipoib_stop(struct net_device *dev)
+1 -1
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
··· 409 } 410 411 if (mcast->logcount++ < 20) { 412 - if (status == -ETIMEDOUT) { 413 ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", 414 mcast->mcmember.mgid.raw, status); 415 } else {
··· 409 } 410 411 if (mcast->logcount++ < 20) { 412 + if (status == -ETIMEDOUT || status == -EAGAIN) { 413 ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", 414 mcast->mcmember.mgid.raw, status); 415 } else {
+8 -3
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
··· 61 62 ppriv = netdev_priv(pdev); 63 64 mutex_lock(&ppriv->vlan_mutex); 65 66 /* ··· 112 goto device_init_failed; 113 } 114 115 - result = register_netdev(priv->dev); 116 if (result) { 117 ipoib_warn(priv, "failed to initialize; error %i", result); 118 goto register_failed; ··· 135 list_add_tail(&priv->list, &ppriv->child_intfs); 136 137 mutex_unlock(&ppriv->vlan_mutex); 138 139 return 0; 140 141 sysfs_failed: 142 ipoib_delete_debug_files(priv->dev); 143 - unregister_netdev(priv->dev); 144 145 register_failed: 146 ipoib_dev_cleanup(priv->dev); ··· 151 152 err: 153 mutex_unlock(&ppriv->vlan_mutex); 154 return result; 155 } 156 ··· 165 166 ppriv = netdev_priv(pdev); 167 168 mutex_lock(&ppriv->vlan_mutex); 169 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { 170 if (priv->pkey == pkey) { 171 - unregister_netdev(priv->dev); 172 ipoib_dev_cleanup(priv->dev); 173 list_del(&priv->list); 174 free_netdev(priv->dev); ··· 179 } 180 } 181 mutex_unlock(&ppriv->vlan_mutex); 182 183 return ret; 184 }
··· 61 62 ppriv = netdev_priv(pdev); 63 64 + rtnl_lock(); 65 mutex_lock(&ppriv->vlan_mutex); 66 67 /* ··· 111 goto device_init_failed; 112 } 113 114 + result = register_netdevice(priv->dev); 115 if (result) { 116 ipoib_warn(priv, "failed to initialize; error %i", result); 117 goto register_failed; ··· 134 list_add_tail(&priv->list, &ppriv->child_intfs); 135 136 mutex_unlock(&ppriv->vlan_mutex); 137 + rtnl_unlock(); 138 139 return 0; 140 141 sysfs_failed: 142 ipoib_delete_debug_files(priv->dev); 143 + unregister_netdevice(priv->dev); 144 145 register_failed: 146 ipoib_dev_cleanup(priv->dev); ··· 149 150 err: 151 mutex_unlock(&ppriv->vlan_mutex); 152 + rtnl_unlock(); 153 return result; 154 } 155 ··· 162 163 ppriv = netdev_priv(pdev); 164 165 + rtnl_lock(); 166 mutex_lock(&ppriv->vlan_mutex); 167 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { 168 if (priv->pkey == pkey) { 169 + unregister_netdevice(priv->dev); 170 ipoib_dev_cleanup(priv->dev); 171 list_del(&priv->list); 172 free_netdev(priv->dev); ··· 175 } 176 } 177 mutex_unlock(&ppriv->vlan_mutex); 178 + rtnl_unlock(); 179 180 return ret; 181 }
+3 -3
drivers/net/mlx4/profile.c
··· 107 profile[MLX4_RES_AUXC].num = request->num_qp; 108 profile[MLX4_RES_SRQ].num = request->num_srq; 109 profile[MLX4_RES_CQ].num = request->num_cq; 110 - profile[MLX4_RES_EQ].num = min(dev_cap->max_eqs, 111 - dev_cap->reserved_eqs + 112 - num_possible_cpus() + 1); 113 profile[MLX4_RES_DMPT].num = request->num_mpt; 114 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; 115 profile[MLX4_RES_MTT].num = request->num_mtt;
··· 107 profile[MLX4_RES_AUXC].num = request->num_qp; 108 profile[MLX4_RES_SRQ].num = request->num_srq; 109 profile[MLX4_RES_CQ].num = request->num_cq; 110 + profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, 111 + dev_cap->reserved_eqs + 112 + num_possible_cpus() + 1); 113 profile[MLX4_RES_DMPT].num = request->num_mpt; 114 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; 115 profile[MLX4_RES_MTT].num = request->num_mtt;