Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
IB/ehca: Use consistent types for ehca_plpar_hcall9()
IB/ehca: Fix printk format warnings from u64 type change
IPoIB: Do not print error messages for multicast join retries
IB/mlx4: Fix memory ordering problem when posting LSO sends
mlx4_core: Fix min() warning
IPoIB: Fix deadlock between ipoib_open() and child interface create
IPoIB: Fix hang in napi_disable() if P_Key is never found

+189 -171
+8 -8
drivers/infiniband/hw/ehca/ehca_cq.c
··· 196 196 197 197 if (h_ret != H_SUCCESS) { 198 198 ehca_err(device, "hipz_h_alloc_resource_cq() failed " 199 - "h_ret=%li device=%p", h_ret, device); 199 + "h_ret=%lli device=%p", h_ret, device); 200 200 cq = ERR_PTR(ehca2ib_return_code(h_ret)); 201 201 goto create_cq_exit2; 202 202 } ··· 232 232 233 233 if (h_ret < H_SUCCESS) { 234 234 ehca_err(device, "hipz_h_register_rpage_cq() failed " 235 - "ehca_cq=%p cq_num=%x h_ret=%li counter=%i " 235 + "ehca_cq=%p cq_num=%x h_ret=%lli counter=%i " 236 236 "act_pages=%i", my_cq, my_cq->cq_number, 237 237 h_ret, counter, param.act_pages); 238 238 cq = ERR_PTR(-EINVAL); ··· 244 244 if ((h_ret != H_SUCCESS) || vpage) { 245 245 ehca_err(device, "Registration of pages not " 246 246 "complete ehca_cq=%p cq_num=%x " 247 - "h_ret=%li", my_cq, my_cq->cq_number, 247 + "h_ret=%lli", my_cq, my_cq->cq_number, 248 248 h_ret); 249 249 cq = ERR_PTR(-EAGAIN); 250 250 goto create_cq_exit4; ··· 252 252 } else { 253 253 if (h_ret != H_PAGE_REGISTERED) { 254 254 ehca_err(device, "Registration of page failed " 255 - "ehca_cq=%p cq_num=%x h_ret=%li " 255 + "ehca_cq=%p cq_num=%x h_ret=%lli " 256 256 "counter=%i act_pages=%i", 257 257 my_cq, my_cq->cq_number, 258 258 h_ret, counter, param.act_pages); ··· 266 266 267 267 gal = my_cq->galpas.kernel; 268 268 cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec)); 269 - ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%lx", 269 + ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx", 270 270 my_cq, my_cq->cq_number, cqx_fec); 271 271 272 272 my_cq->ib_cq.cqe = my_cq->nr_of_entries = ··· 307 307 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1); 308 308 if (h_ret != H_SUCCESS) 309 309 ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p " 310 - "cq_num=%x h_ret=%li", my_cq, my_cq->cq_number, h_ret); 310 + "cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret); 311 311 312 312 create_cq_exit2: 313 313 write_lock_irqsave(&ehca_cq_idr_lock, flags); ··· 355 355 h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0); 356 356 if (h_ret == H_R_STATE) { 357 357 /* cq in err: read err data and destroy it forcibly */ 358 - ehca_dbg(device, "ehca_cq=%p cq_num=%x ressource=%lx in err " 358 + ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err " 359 359 "state. Try to delete it forcibly.", 360 360 my_cq, cq_num, my_cq->ipz_cq_handle.handle); 361 361 ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle); ··· 365 365 cq_num); 366 366 } 367 367 if (h_ret != H_SUCCESS) { 368 - ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%li " 368 + ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli " 369 369 "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num); 370 370 return ehca2ib_return_code(h_ret); 371 371 }
+1 -1
drivers/infiniband/hw/ehca/ehca_hca.c
··· 393 393 hret = hipz_h_modify_port(shca->ipz_hca_handle, port, 394 394 cap, props->init_type, port_modify_mask); 395 395 if (hret != H_SUCCESS) { 396 - ehca_err(&shca->ib_device, "Modify port failed h_ret=%li", 396 + ehca_err(&shca->ib_device, "Modify port failed h_ret=%lli", 397 397 hret); 398 398 ret = -EINVAL; 399 399 }
+9 -9
drivers/infiniband/hw/ehca/ehca_irq.c
··· 99 99 return; 100 100 101 101 ehca_err(&shca->ib_device, 102 - "QP 0x%x (resource=%lx) has errors.", 102 + "QP 0x%x (resource=%llx) has errors.", 103 103 qp->ib_qp.qp_num, resource); 104 104 break; 105 105 } ··· 108 108 struct ehca_cq *cq = (struct ehca_cq *)data; 109 109 110 110 ehca_err(&shca->ib_device, 111 - "CQ 0x%x (resource=%lx) has errors.", 111 + "CQ 0x%x (resource=%llx) has errors.", 112 112 cq->cq_number, resource); 113 113 break; 114 114 } 115 115 default: 116 116 ehca_err(&shca->ib_device, 117 - "Unknown error type: %lx on %s.", 117 + "Unknown error type: %llx on %s.", 118 118 type, shca->ib_device.name); 119 119 break; 120 120 } 121 121 122 - ehca_err(&shca->ib_device, "Error data is available: %lx.", resource); 122 + ehca_err(&shca->ib_device, "Error data is available: %llx.", resource); 123 123 ehca_err(&shca->ib_device, "EHCA ----- error data begin " 124 124 "---------------------------------------------------"); 125 - ehca_dmp(rblock, length, "resource=%lx", resource); 125 + ehca_dmp(rblock, length, "resource=%llx", resource); 126 126 ehca_err(&shca->ib_device, "EHCA ----- error data end " 127 127 "----------------------------------------------------"); 128 128 ··· 152 152 153 153 if (ret == H_R_STATE) 154 154 ehca_err(&shca->ib_device, 155 - "No error data is available: %lx.", resource); 155 + "No error data is available: %llx.", resource); 156 156 else if (ret == H_SUCCESS) { 157 157 int length; 158 158 ··· 164 164 print_error_data(shca, data, rblock, length); 165 165 } else 166 166 ehca_err(&shca->ib_device, 167 - "Error data could not be fetched: %lx", resource); 167 + "Error data could not be fetched: %llx", resource); 168 168 169 169 ehca_free_fw_ctrlblock(rblock); 170 170 ··· 514 514 struct ehca_cq *cq; 515 515 516 516 eqe_value = eqe->entry; 517 - ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value); 517 + ehca_dbg(&shca->ib_device, "eqe_value=%llx", eqe_value); 518 518 if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) { 519 519 ehca_dbg(&shca->ib_device, "Got completion event"); 520 520 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value); ··· 603 603 ret = hipz_h_eoi(eq->ist); 604 604 if (ret != H_SUCCESS) 605 605 ehca_err(&shca->ib_device, 606 - "bad return code EOI -rc = %ld\n", ret); 606 + "bad return code EOI -rc = %lld\n", ret); 607 607 ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt); 608 608 } 609 609 if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
+3 -3
drivers/infiniband/hw/ehca/ehca_main.c
··· 304 304 305 305 h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock); 306 306 if (h_ret != H_SUCCESS) { 307 - ehca_gen_err("Cannot query device properties. h_ret=%li", 307 + ehca_gen_err("Cannot query device properties. h_ret=%lli", 308 308 h_ret); 309 309 ret = -EPERM; 310 310 goto sense_attributes1; ··· 391 391 port = (struct hipz_query_port *)rblock; 392 392 h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port); 393 393 if (h_ret != H_SUCCESS) { 394 - ehca_gen_err("Cannot query port properties. h_ret=%li", 394 + ehca_gen_err("Cannot query port properties. h_ret=%lli", 395 395 h_ret); 396 396 ret = -EPERM; 397 397 goto sense_attributes1; ··· 682 682 { 683 683 struct ehca_shca *shca = dev->driver_data; 684 684 685 - return sprintf(buf, "%lx\n", shca->ipz_hca_handle.handle); 685 + return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle); 686 686 687 687 } 688 688 static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
+2 -2
drivers/infiniband/hw/ehca/ehca_mcast.c
··· 88 88 if (h_ret != H_SUCCESS) 89 89 ehca_err(ibqp->device, 90 90 "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed " 91 - "h_ret=%li", my_qp, ibqp->qp_num, h_ret); 91 + "h_ret=%lli", my_qp, ibqp->qp_num, h_ret); 92 92 93 93 return ehca2ib_return_code(h_ret); 94 94 } ··· 125 125 if (h_ret != H_SUCCESS) 126 126 ehca_err(ibqp->device, 127 127 "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed " 128 - "h_ret=%li", my_qp, ibqp->qp_num, h_ret); 128 + "h_ret=%lli", my_qp, ibqp->qp_num, h_ret); 129 129 130 130 return ehca2ib_return_code(h_ret); 131 131 }
+72 -72
drivers/infiniband/hw/ehca/ehca_mrmw.c
··· 204 204 } 205 205 if ((size == 0) || 206 206 (((u64)iova_start + size) < (u64)iova_start)) { 207 - ehca_err(pd->device, "bad input values: size=%lx iova_start=%p", 207 + ehca_err(pd->device, "bad input values: size=%llx iova_start=%p", 208 208 size, iova_start); 209 209 ib_mr = ERR_PTR(-EINVAL); 210 210 goto reg_phys_mr_exit0; ··· 309 309 } 310 310 311 311 if (length == 0 || virt + length < virt) { 312 - ehca_err(pd->device, "bad input values: length=%lx " 313 - "virt_base=%lx", length, virt); 312 + ehca_err(pd->device, "bad input values: length=%llx " 313 + "virt_base=%llx", length, virt); 314 314 ib_mr = ERR_PTR(-EINVAL); 315 315 goto reg_user_mr_exit0; 316 316 } ··· 373 373 &e_mr->ib.ib_mr.rkey); 374 374 if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) { 375 375 ehca_warn(pd->device, "failed to register mr " 376 - "with hwpage_size=%lx", hwpage_size); 376 + "with hwpage_size=%llx", hwpage_size); 377 377 ehca_info(pd->device, "try to register mr with " 378 378 "kpage_size=%lx", PAGE_SIZE); 379 379 /* ··· 509 509 goto rereg_phys_mr_exit1; 510 510 if ((new_size == 0) || 511 511 (((u64)iova_start + new_size) < (u64)iova_start)) { 512 - ehca_err(mr->device, "bad input values: new_size=%lx " 512 + ehca_err(mr->device, "bad input values: new_size=%llx " 513 513 "iova_start=%p", new_size, iova_start); 514 514 ret = -EINVAL; 515 515 goto rereg_phys_mr_exit1; ··· 580 580 581 581 h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout); 582 582 if (h_ret != H_SUCCESS) { 583 - ehca_err(mr->device, "hipz_mr_query failed, h_ret=%li mr=%p " 584 - "hca_hndl=%lx mr_hndl=%lx lkey=%x", 583 + ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lli mr=%p " 584 + "hca_hndl=%llx mr_hndl=%llx lkey=%x", 585 585 h_ret, mr, shca->ipz_hca_handle.handle, 586 586 e_mr->ipz_mr_handle.handle, mr->lkey); 587 587 ret = ehca2ib_return_code(h_ret); ··· 630 630 /* TODO: BUSY: MR still has bound window(s) */ 631 631 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); 632 632 if (h_ret != H_SUCCESS) { 633 - ehca_err(mr->device, "hipz_free_mr failed, h_ret=%li shca=%p " 634 - "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x", 633 + ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p " 634 + "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x", 635 635 h_ret, shca, e_mr, shca->ipz_hca_handle.handle, 636 636 e_mr->ipz_mr_handle.handle, mr->lkey); 637 637 ret = ehca2ib_return_code(h_ret); ··· 671 671 h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw, 672 672 e_pd->fw_pd, &hipzout); 673 673 if (h_ret != H_SUCCESS) { 674 - ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%li " 675 - "shca=%p hca_hndl=%lx mw=%p", 674 + ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli " 675 + "shca=%p hca_hndl=%llx mw=%p", 676 676 h_ret, shca, shca->ipz_hca_handle.handle, e_mw); 677 677 ib_mw = ERR_PTR(ehca2ib_return_code(h_ret)); 678 678 goto alloc_mw_exit1; ··· 713 713 714 714 h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw); 715 715 if (h_ret != H_SUCCESS) { 716 - ehca_err(mw->device, "hipz_free_mw failed, h_ret=%li shca=%p " 717 - "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx", 716 + ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p " 717 + "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx", 718 718 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle, 719 719 e_mw->ipz_mw_handle.handle); 720 720 return ehca2ib_return_code(h_ret); ··· 840 840 goto map_phys_fmr_exit0; 841 841 if (iova % e_fmr->fmr_page_size) { 842 842 /* only whole-numbered pages */ 843 - ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x", 843 + ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x", 844 844 iova, e_fmr->fmr_page_size); 845 845 ret = -EINVAL; 846 846 goto map_phys_fmr_exit0; ··· 878 878 map_phys_fmr_exit0: 879 879 if (ret) 880 880 ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x " 881 - "iova=%lx", ret, fmr, page_list, list_len, iova); 881 + "iova=%llx", ret, fmr, page_list, list_len, iova); 882 882 return ret; 883 883 } /* end ehca_map_phys_fmr() */ 884 884 ··· 964 964 965 965 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); 966 966 if (h_ret != H_SUCCESS) { 967 - ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%li e_fmr=%p " 968 - "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x", 967 + ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p " 968 + "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x", 969 969 h_ret, e_fmr, shca->ipz_hca_handle.handle, 970 970 e_fmr->ipz_mr_handle.handle, fmr->lkey); 971 971 ret = ehca2ib_return_code(h_ret); ··· 1007 1007 (u64)iova_start, size, hipz_acl, 1008 1008 e_pd->fw_pd, &hipzout); 1009 1009 if (h_ret != H_SUCCESS) { 1010 - ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%li " 1011 - "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle); 1010 + ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli " 1011 + "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle); 1012 1012 ret = ehca2ib_return_code(h_ret); 1013 1013 goto ehca_reg_mr_exit0; 1014 1014 } ··· 1033 1033 ehca_reg_mr_exit1: 1034 1034 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); 1035 1035 if (h_ret != H_SUCCESS) { 1036 - ehca_err(&shca->ib_device, "h_ret=%li shca=%p e_mr=%p " 1037 - "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x " 1038 - "pginfo=%p num_kpages=%lx num_hwpages=%lx ret=%i", 1036 + ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p " 1037 + "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x " 1038 + "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i", 1039 1039 h_ret, shca, e_mr, iova_start, size, acl, e_pd, 1040 1040 hipzout.lkey, pginfo, pginfo->num_kpages, 1041 1041 pginfo->num_hwpages, ret); ··· 1045 1045 ehca_reg_mr_exit0: 1046 1046 if (ret) 1047 1047 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p " 1048 - "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " 1049 - "num_kpages=%lx num_hwpages=%lx", 1048 + "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p " 1049 + "num_kpages=%llx num_hwpages=%llx", 1050 1050 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo, 1051 1051 pginfo->num_kpages, pginfo->num_hwpages); 1052 1052 return ret; ··· 1116 1116 */ 1117 1117 if (h_ret != H_SUCCESS) { 1118 1118 ehca_err(&shca->ib_device, "last " 1119 - "hipz_reg_rpage_mr failed, h_ret=%li " 1120 - "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx" 1119 + "hipz_reg_rpage_mr failed, h_ret=%lli " 1120 + "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx" 1121 1121 " lkey=%x", h_ret, e_mr, i, 1122 1122 shca->ipz_hca_handle.handle, 1123 1123 e_mr->ipz_mr_handle.handle, ··· 1128 1128 ret = 0; 1129 1129 } else if (h_ret != H_PAGE_REGISTERED) { 1130 1130 ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, " 1131 - "h_ret=%li e_mr=%p i=%x lkey=%x hca_hndl=%lx " 1132 - "mr_hndl=%lx", h_ret, e_mr, i, 1131 + "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx " 1132 + "mr_hndl=%llx", h_ret, e_mr, i, 1133 1133 e_mr->ib.ib_mr.lkey, 1134 1134 shca->ipz_hca_handle.handle, 1135 1135 e_mr->ipz_mr_handle.handle); ··· 1145 1145 ehca_reg_mr_rpages_exit0: 1146 1146 if (ret) 1147 1147 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p " 1148 - "num_kpages=%lx num_hwpages=%lx", ret, shca, e_mr, 1148 + "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr, 1149 1149 pginfo, pginfo->num_kpages, pginfo->num_hwpages); 1150 1150 return ret; 1151 1151 } /* end ehca_reg_mr_rpages() */ ··· 1184 1184 ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage); 1185 1185 if (ret) { 1186 1186 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p " 1187 - "pginfo=%p type=%x num_kpages=%lx num_hwpages=%lx " 1187 + "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx " 1188 1188 "kpage=%p", e_mr, pginfo, pginfo->type, 1189 1189 pginfo->num_kpages, pginfo->num_hwpages, kpage); 1190 1190 goto ehca_rereg_mr_rereg1_exit1; ··· 1205 1205 * (MW bound or MR is shared) 1206 1206 */ 1207 1207 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed " 1208 - "(Rereg1), h_ret=%li e_mr=%p", h_ret, e_mr); 1208 + "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr); 1209 1209 *pginfo = pginfo_save; 1210 1210 ret = -EAGAIN; 1211 1211 } else if ((u64 *)hipzout.vaddr != iova_start) { 1212 1212 ehca_err(&shca->ib_device, "PHYP changed iova_start in " 1213 - "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p " 1214 - "mr_handle=%lx lkey=%x lkey_out=%x", iova_start, 1213 + "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p " 1214 + "mr_handle=%llx lkey=%x lkey_out=%x", iova_start, 1215 1215 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle, 1216 1216 e_mr->ib.ib_mr.lkey, hipzout.lkey); 1217 1217 ret = -EFAULT; ··· 1235 1235 ehca_rereg_mr_rereg1_exit0: 1236 1236 if ( ret && (ret != -EAGAIN) ) 1237 1237 ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x " 1238 - "pginfo=%p num_kpages=%lx num_hwpages=%lx", 1238 + "pginfo=%p num_kpages=%llx num_hwpages=%llx", 1239 1239 ret, *lkey, *rkey, pginfo, pginfo->num_kpages, 1240 1240 pginfo->num_hwpages); 1241 1241 return ret; ··· 1263 1263 (e_mr->num_hwpages > MAX_RPAGES) || 1264 1264 (pginfo->num_hwpages > e_mr->num_hwpages)) { 1265 1265 ehca_dbg(&shca->ib_device, "Rereg3 case, " 1266 - "pginfo->num_hwpages=%lx e_mr->num_hwpages=%x", 1266 + "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x", 1267 1267 pginfo->num_hwpages, e_mr->num_hwpages); 1268 1268 rereg_1_hcall = 0; 1269 1269 rereg_3_hcall = 1; ··· 1295 1295 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr); 1296 1296 if (h_ret != H_SUCCESS) { 1297 1297 ehca_err(&shca->ib_device, "hipz_free_mr failed, " 1298 - "h_ret=%li e_mr=%p hca_hndl=%lx mr_hndl=%lx " 1298 + "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx " 1299 1299 "mr->lkey=%x", 1300 1300 h_ret, e_mr, shca->ipz_hca_handle.handle, 1301 1301 e_mr->ipz_mr_handle.handle, ··· 1328 1328 ehca_rereg_mr_exit0: 1329 1329 if (ret) 1330 1330 ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p " 1331 - "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p " 1332 - "num_kpages=%lx lkey=%x rkey=%x rereg_1_hcall=%x " 1331 + "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p " 1332 + "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x " 1333 1333 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size, 1334 1334 acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey, 1335 1335 rereg_1_hcall, rereg_3_hcall); ··· 1371 1371 * FMRs are not shared and no MW bound to FMRs 1372 1372 */ 1373 1373 ehca_err(&shca->ib_device, "hipz_reregister_pmr failed " 1374 - "(Rereg1), h_ret=%li e_fmr=%p hca_hndl=%lx " 1375 - "mr_hndl=%lx lkey=%x lkey_out=%x", 1374 + "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx " 1375 + "mr_hndl=%llx lkey=%x lkey_out=%x", 1376 1376 h_ret, e_fmr, shca->ipz_hca_handle.handle, 1377 1377 e_fmr->ipz_mr_handle.handle, 1378 1378 e_fmr->ib.ib_fmr.lkey, hipzout.lkey); ··· 1383 1383 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr); 1384 1384 if (h_ret != H_SUCCESS) { 1385 1385 ehca_err(&shca->ib_device, "hipz_free_mr failed, " 1386 - "h_ret=%li e_fmr=%p hca_hndl=%lx mr_hndl=%lx " 1386 + "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx " 1387 1387 "lkey=%x", 1388 1388 h_ret, e_fmr, shca->ipz_hca_handle.handle, 1389 1389 e_fmr->ipz_mr_handle.handle, ··· 1447 1447 (u64)iova_start, hipz_acl, e_pd->fw_pd, 1448 1448 &hipzout); 1449 1449 if (h_ret != H_SUCCESS) { 1450 - ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%li " 1450 + ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli " 1451 1451 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x " 1452 - "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x", 1452 + "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x", 1453 1453 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd, 1454 1454 shca->ipz_hca_handle.handle, 1455 1455 e_origmr->ipz_mr_handle.handle, ··· 1527 1527 &e_mr->ib.ib_mr.rkey); 1528 1528 if (ret) { 1529 1529 ehca_err(&shca->ib_device, "reg of internal max MR failed, " 1530 - "e_mr=%p iova_start=%p size_maxmr=%lx num_kpages=%x " 1530 + "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x " 1531 1531 "num_hwpages=%x", e_mr, iova_start, size_maxmr, 1532 1532 num_kpages, num_hwpages); 1533 1533 goto ehca_reg_internal_maxmr_exit1; ··· 1573 1573 (u64)iova_start, hipz_acl, e_pd->fw_pd, 1574 1574 &hipzout); 1575 1575 if (h_ret != H_SUCCESS) { 1576 - ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%li " 1577 - "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x", 1576 + ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli " 1577 + "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x", 1578 1578 h_ret, e_origmr, shca->ipz_hca_handle.handle, 1579 1579 e_origmr->ipz_mr_handle.handle, 1580 1580 e_origmr->ib.ib_mr.lkey); ··· 1651 1651 /* check first buffer */ 1652 1652 if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) { 1653 1653 ehca_gen_err("iova_start/addr mismatch, iova_start=%p " 1654 - "pbuf->addr=%lx pbuf->size=%lx", 1654 + "pbuf->addr=%llx pbuf->size=%llx", 1655 1655 iova_start, pbuf->addr, pbuf->size); 1656 1656 return -EINVAL; 1657 1657 } 1658 1658 if (((pbuf->addr + pbuf->size) % PAGE_SIZE) && 1659 1659 (num_phys_buf > 1)) { 1660 - ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx " 1661 - "pbuf->size=%lx", pbuf->addr, pbuf->size); 1660 + ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%llx " 1661 + "pbuf->size=%llx", pbuf->addr, pbuf->size); 1662 1662 return -EINVAL; 1663 1663 } 1664 1664 1665 1665 for (i = 0; i < num_phys_buf; i++) { 1666 1666 if ((i > 0) && (pbuf->addr % PAGE_SIZE)) { 1667 - ehca_gen_err("bad address, i=%x pbuf->addr=%lx " 1668 - "pbuf->size=%lx", 1667 + ehca_gen_err("bad address, i=%x pbuf->addr=%llx " 1668 + "pbuf->size=%llx", 1669 1669 i, pbuf->addr, pbuf->size); 1670 1670 return -EINVAL; 1671 1671 } 1672 1672 if (((i > 0) && /* not 1st */ 1673 1673 (i < (num_phys_buf - 1)) && /* not last */ 1674 1674 (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) { 1675 - ehca_gen_err("bad size, i=%x pbuf->size=%lx", 1675 + ehca_gen_err("bad size, i=%x pbuf->size=%llx", 1676 1676 i, pbuf->size); 1677 1677 return -EINVAL; 1678 1678 } ··· 1705 1705 page = page_list; 1706 1706 for (i = 0; i < list_len; i++) { 1707 1707 if (*page % e_fmr->fmr_page_size) { 1708 - ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p " 1708 + ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p " 1709 1709 "fmr_page_size=%x", i, *page, page, e_fmr, 1710 1710 e_fmr->fmr_page_size); 1711 1711 return -EINVAL; ··· 1743 1743 (pginfo->next_hwpage * 1744 1744 pginfo->hwpage_size)); 1745 1745 if ( !(*kpage) ) { 1746 - ehca_gen_err("pgaddr=%lx " 1747 - "chunk->page_list[i]=%lx " 1748 - "i=%x next_hwpage=%lx", 1746 + ehca_gen_err("pgaddr=%llx " 1747 + "chunk->page_list[i]=%llx " 1748 + "i=%x next_hwpage=%llx", 1749 1749 pgaddr, (u64)sg_dma_address( 1750 1750 &chunk->page_list[i]), 1751 1751 i, pginfo->next_hwpage); ··· 1795 1795 for (t = start_idx; t <= end_idx; t++) { 1796 1796 u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT; 1797 1797 if (ehca_debug_level >= 3) 1798 - ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr, 1798 + ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr, 1799 1799 *(u64 *)abs_to_virt(phys_to_abs(pgaddr))); 1800 1800 if (pgaddr - PAGE_SIZE != *prev_pgaddr) { 1801 - ehca_gen_err("uncontiguous page found pgaddr=%lx " 1802 - "prev_pgaddr=%lx page_list_i=%x", 1801 + ehca_gen_err("uncontiguous page found pgaddr=%llx " 1802 + "prev_pgaddr=%llx page_list_i=%x", 1803 1803 pgaddr, *prev_pgaddr, t); 1804 1804 return -EINVAL; 1805 1805 } ··· 1833 1833 << PAGE_SHIFT ); 1834 1834 *kpage = phys_to_abs(pgaddr); 1835 1835 if ( !(*kpage) ) { 1836 - ehca_gen_err("pgaddr=%lx i=%x", 1836 + ehca_gen_err("pgaddr=%llx i=%x", 1837 1837 pgaddr, i); 1838 1838 ret = -EFAULT; 1839 1839 return ret; ··· 1846 1846 if (pginfo->hwpage_cnt) { 1847 1847 ehca_gen_err( 1848 1848 "invalid alignment " 1849 - "pgaddr=%lx i=%x " 1850 - "mr_pgsize=%lx", 1849 + "pgaddr=%llx i=%x " 1850 + "mr_pgsize=%llx", 1851 1851 pgaddr, i, 1852 1852 pginfo->hwpage_size); 1853 1853 ret = -EFAULT; ··· 1866 1866 if (ehca_debug_level >= 3) { 1867 1867 u64 val = *(u64 *)abs_to_virt( 1868 1868 phys_to_abs(pgaddr)); 1869 - ehca_gen_dbg("kpage=%lx chunk_page=%lx " 1870 - "value=%016lx", 1869 + ehca_gen_dbg("kpage=%llx chunk_page=%llx " 1870 + "value=%016llx", 1871 1871 *kpage, pgaddr, val); 1872 1872 } 1873 1873 prev_pgaddr = pgaddr; ··· 1944 1944 if ((pginfo->kpage_cnt >= pginfo->num_kpages) || 1945 1945 (pginfo->hwpage_cnt >= pginfo->num_hwpages)) { 1946 1946 ehca_gen_err("kpage_cnt >= num_kpages, " 1947 - "kpage_cnt=%lx num_kpages=%lx " 1948 - "hwpage_cnt=%lx " 1949 - "num_hwpages=%lx i=%x", 1947 + "kpage_cnt=%llx num_kpages=%llx " 1948 + "hwpage_cnt=%llx " 1949 + "num_hwpages=%llx i=%x", 1950 1950 pginfo->kpage_cnt, 1951 1951 pginfo->num_kpages, 1952 1952 pginfo->hwpage_cnt, ··· 1957 1957 (pbuf->addr & ~(pginfo->hwpage_size - 1)) + 1958 1958 (pginfo->next_hwpage * pginfo->hwpage_size)); 1959 1959 if ( !(*kpage) && pbuf->addr ) { 1960 - ehca_gen_err("pbuf->addr=%lx pbuf->size=%lx " 1961 - "next_hwpage=%lx", pbuf->addr, 1960 + ehca_gen_err("pbuf->addr=%llx pbuf->size=%llx " 1961 + "next_hwpage=%llx", pbuf->addr, 1962 1962 pbuf->size, pginfo->next_hwpage); 1963 1963 return -EFAULT; 1964 1964 } ··· 1996 1996 *kpage = phys_to_abs((*fmrlist & ~(pginfo->hwpage_size - 1)) + 1997 1997 pginfo->next_hwpage * pginfo->hwpage_size); 1998 1998 if ( !(*kpage) ) { 1999 - ehca_gen_err("*fmrlist=%lx fmrlist=%p " 2000 - "next_listelem=%lx next_hwpage=%lx", 1999 + ehca_gen_err("*fmrlist=%llx fmrlist=%p " 2000 + "next_listelem=%llx next_hwpage=%llx", 2001 2001 *fmrlist, fmrlist, 2002 2002 pginfo->u.fmr.next_listelem, 2003 2003 pginfo->next_hwpage); ··· 2025 2025 ~(pginfo->hwpage_size - 1)); 2026 2026 if (prev + pginfo->u.fmr.fmr_pgsize != p) { 2027 2027 ehca_gen_err("uncontiguous fmr pages " 2028 - "found prev=%lx p=%lx " 2028 + "found prev=%llx p=%llx " 2029 2029 "idx=%x", prev, p, i + j); 2030 2030 return -EINVAL; 2031 2031 }
+16 -16
drivers/infiniband/hw/ehca/ehca_qp.c
··· 331 331 if (cnt == (nr_q_pages - 1)) { /* last page! */ 332 332 if (h_ret != expected_hret) { 333 333 ehca_err(ib_dev, "hipz_qp_register_rpage() " 334 - "h_ret=%li", h_ret); 334 + "h_ret=%lli", h_ret); 335 335 ret = ehca2ib_return_code(h_ret); 336 336 goto init_qp_queue1; 337 337 } ··· 345 345 } else { 346 346 if (h_ret != H_PAGE_REGISTERED) { 347 347 ehca_err(ib_dev, "hipz_qp_register_rpage() " 348 - "h_ret=%li", h_ret); 348 + "h_ret=%lli", h_ret); 349 349 ret = ehca2ib_return_code(h_ret); 350 350 goto init_qp_queue1; 351 351 } ··· 709 709 710 710 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms); 711 711 if (h_ret != H_SUCCESS) { 712 - ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%li", 712 + ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli", 713 713 h_ret); 714 714 ret = ehca2ib_return_code(h_ret); 715 715 goto create_qp_exit1; ··· 1010 1010 mqpcb, my_qp->galpas.kernel); 1011 1011 if (hret != H_SUCCESS) { 1012 1012 ehca_err(pd->device, "Could not modify SRQ to INIT " 1013 - "ehca_qp=%p qp_num=%x h_ret=%li", 1013 + "ehca_qp=%p qp_num=%x h_ret=%lli", 1014 1014 my_qp, my_qp->real_qp_num, hret); 1015 1015 goto create_srq2; 1016 1016 } ··· 1024 1024 mqpcb, my_qp->galpas.kernel); 1025 1025 if (hret != H_SUCCESS) { 1026 1026 ehca_err(pd->device, "Could not enable SRQ " 1027 - "ehca_qp=%p qp_num=%x h_ret=%li", 1027 + "ehca_qp=%p qp_num=%x h_ret=%lli", 1028 1028 my_qp, my_qp->real_qp_num, hret); 1029 1029 goto create_srq2; 1030 1030 } ··· 1038 1038 mqpcb, my_qp->galpas.kernel); 1039 1039 if (hret != H_SUCCESS) { 1040 1040 ehca_err(pd->device, "Could not modify SRQ to RTR " 1041 - "ehca_qp=%p qp_num=%x h_ret=%li", 1041 + "ehca_qp=%p qp_num=%x h_ret=%lli", 1042 1042 my_qp, my_qp->real_qp_num, hret); 1043 1043 goto create_srq2; 1044 1044 } ··· 1078 1078 &bad_send_wqe_p, NULL, 2); 1079 1079 if (h_ret != H_SUCCESS) { 1080 1080 ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed" 1081 - " ehca_qp=%p qp_num=%x h_ret=%li", 1081 + " ehca_qp=%p qp_num=%x h_ret=%lli", 1082 1082 my_qp, qp_num, h_ret); 1083 1083 return ehca2ib_return_code(h_ret); 1084 1084 } ··· 1134 1134 1135 1135 if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) { 1136 1136 ehca_gen_err("Invalid offset for calculating left cqes " 1137 - "wqe_p=%#lx wqe_v=%p\n", wqe_p, wqe_v); 1137 + "wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v); 1138 1138 return -EFAULT; 1139 1139 } 1140 1140 ··· 1168 1168 &send_wqe_p, &recv_wqe_p, 4); 1169 1169 if (h_ret != H_SUCCESS) { 1170 1170 ehca_err(&shca->ib_device, "disable_and_get_wqe() " 1171 - "failed ehca_qp=%p qp_num=%x h_ret=%li", 1171 + "failed ehca_qp=%p qp_num=%x h_ret=%lli", 1172 1172 my_qp, qp_num, h_ret); 1173 1173 return ehca2ib_return_code(h_ret); 1174 1174 } ··· 1261 1261 mqpcb, my_qp->galpas.kernel); 1262 1262 if (h_ret != H_SUCCESS) { 1263 1263 ehca_err(ibqp->device, "hipz_h_query_qp() failed " 1264 - "ehca_qp=%p qp_num=%x h_ret=%li", 1264 + "ehca_qp=%p qp_num=%x h_ret=%lli", 1265 1265 my_qp, ibqp->qp_num, h_ret); 1266 1266 ret = ehca2ib_return_code(h_ret); 1267 1267 goto modify_qp_exit1; ··· 1690 1690 1691 1691 if (h_ret != H_SUCCESS) { 1692 1692 ret = ehca2ib_return_code(h_ret); 1693 - ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%li " 1693 + ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%lli " 1694 1694 "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num); 1695 1695 goto modify_qp_exit2; 1696 1696 } ··· 1723 1723 ret = ehca2ib_return_code(h_ret); 1724 1724 ehca_err(ibqp->device, "ENABLE in context of " 1725 1725 "RESET_2_INIT failed! Maybe you didn't get " 1726 - "a LID h_ret=%li ehca_qp=%p qp_num=%x", 1726 + "a LID h_ret=%lli ehca_qp=%p qp_num=%x", 1727 1727 h_ret, my_qp, ibqp->qp_num); 1728 1728 goto modify_qp_exit2; 1729 1729 } ··· 1909 1909 if (h_ret != H_SUCCESS) { 1910 1910 ret = ehca2ib_return_code(h_ret); 1911 1911 ehca_err(qp->device, "hipz_h_query_qp() failed " 1912 - "ehca_qp=%p qp_num=%x h_ret=%li", 1912 + "ehca_qp=%p qp_num=%x h_ret=%lli", 1913 1913 my_qp, qp->qp_num, h_ret); 1914 1914 goto query_qp_exit1; 1915 1915 } ··· 2074 2074 2075 2075 if (h_ret != H_SUCCESS) { 2076 2076 ret = ehca2ib_return_code(h_ret); 2077 - ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%li " 2077 + ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%lli " 2078 2078 "ehca_qp=%p qp_num=%x", 2079 2079 h_ret, my_qp, my_qp->real_qp_num); 2080 2080 } ··· 2108 2108 if (h_ret != H_SUCCESS) { 2109 2109 ret = ehca2ib_return_code(h_ret); 2110 2110 ehca_err(srq->device, "hipz_h_query_qp() failed " 2111 - "ehca_qp=%p qp_num=%x h_ret=%li", 2111 + "ehca_qp=%p qp_num=%x h_ret=%lli", 2112 2112 my_qp, my_qp->real_qp_num, h_ret); 2113 2113 goto query_srq_exit1; 2114 2114 } ··· 2179 2179 2180 2180 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); 2181 2181 if (h_ret != H_SUCCESS) { 2182 - ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%li " 2182 + ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%lli " 2183 2183 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num); 2184 2184 return ehca2ib_return_code(h_ret); 2185 2185 }
+1 -1
drivers/infiniband/hw/ehca/ehca_reqs.c
··· 822 822 offset = qmap->next_wqe_idx * ipz_queue->qe_size; 823 823 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset); 824 824 if (!wqe) { 825 - ehca_err(cq->device, "Invalid wqe offset=%#lx on " 825 + ehca_err(cq->device, "Invalid wqe offset=%#llx on " 826 826 "qp_num=%#x", offset, my_qp->real_qp_num); 827 827 return nr; 828 828 }
+1 -1
drivers/infiniband/hw/ehca/ehca_sqp.c
··· 85 85 86 86 if (ret != H_SUCCESS) { 87 87 ehca_err(&shca->ib_device, 88 - "Can't define AQP1 for port %x. h_ret=%li", 88 + "Can't define AQP1 for port %x. h_ret=%lli", 89 89 port, ret); 90 90 return ret; 91 91 }
+1 -1
drivers/infiniband/hw/ehca/ehca_tools.h
··· 116 116 unsigned char *deb = (unsigned char *)(adr); \ 117 117 for (x = 0; x < l; x += 16) { \ 118 118 printk(KERN_INFO "EHCA_DMP:%s " format \ 119 - " adr=%p ofs=%04x %016lx %016lx\n", \ 119 + " adr=%p ofs=%04x %016llx %016llx\n", \ 120 120 __func__, ##args, deb, x, \ 121 121 *((u64 *)&deb[0]), *((u64 *)&deb[8])); \ 122 122 deb += 16; \
+1 -1
drivers/infiniband/hw/ehca/ehca_uverbs.c
··· 114 114 115 115 physical = galpas->user.fw_handle; 116 116 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 117 - ehca_gen_dbg("vsize=%lx physical=%lx", vsize, physical); 117 + ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical); 118 118 /* VM_IO | VM_RESERVED are set by remap_pfn_range() */ 119 119 ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT, 120 120 vma->vm_page_prot);
+28 -28
drivers/infiniband/hw/ehca/hcp_if.c
··· 226 226 u32 *eq_ist) 227 227 { 228 228 u64 ret; 229 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 229 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 230 230 u64 allocate_controls; 231 231 232 232 /* resource type */ ··· 249 249 *eq_ist = (u32)outs[5]; 250 250 251 251 if (ret == H_NOT_ENOUGH_RESOURCES) 252 - ehca_gen_err("Not enough resource - ret=%li ", ret); 252 + ehca_gen_err("Not enough resource - ret=%lli ", ret); 253 253 254 254 return ret; 255 255 } ··· 270 270 struct ehca_alloc_cq_parms *param) 271 271 { 272 272 u64 ret; 273 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 273 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 274 274 275 275 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 276 276 adapter_handle.handle, /* r4 */ ··· 287 287 hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]); 288 288 289 289 if (ret == H_NOT_ENOUGH_RESOURCES) 290 - ehca_gen_err("Not enough resources. ret=%li", ret); 290 + ehca_gen_err("Not enough resources. ret=%lli", ret); 291 291 292 292 return ret; 293 293 } ··· 297 297 { 298 298 u64 ret; 299 299 u64 allocate_controls, max_r10_reg, r11, r12; 300 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 300 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 301 301 302 302 allocate_controls = 303 303 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type) ··· 362 362 hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]); 363 363 364 364 if (ret == H_NOT_ENOUGH_RESOURCES) 365 - ehca_gen_err("Not enough resources. ret=%li", ret); 365 + ehca_gen_err("Not enough resources. ret=%lli", ret); 366 366 367 367 return ret; 368 368 } ··· 454 454 const u64 count) 455 455 { 456 456 if (count != 1) { 457 - ehca_gen_err("Ppage counter=%lx", count); 457 + ehca_gen_err("Ppage counter=%llx", count); 458 458 return H_PARAMETER; 459 459 } 460 460 return hipz_h_register_rpage(adapter_handle, ··· 489 489 const struct h_galpa gal) 490 490 { 491 491 if (count != 1) { 492 - ehca_gen_err("Page counter=%lx", count); 492 + ehca_gen_err("Page counter=%llx", count); 493 493 return H_PARAMETER; 494 494 } 495 495 ··· 508 508 const struct h_galpa galpa) 509 509 { 510 510 if (count > 1) { 511 - ehca_gen_err("Page counter=%lx", count); 511 + ehca_gen_err("Page counter=%llx", count); 512 512 return H_PARAMETER; 513 513 } 514 514 ··· 525 525 int dis_and_get_function_code) 526 526 { 527 527 u64 ret; 528 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 528 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 529 529 530 530 ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs, 531 531 adapter_handle.handle, /* r4 */ ··· 548 548 struct h_galpa gal) 549 549 { 550 550 u64 ret; 551 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 551 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 552 552 ret = ehca_plpar_hcall9(H_MODIFY_QP, outs, 553 553 adapter_handle.handle, /* r4 */ 554 554 qp_handle.handle, /* r5 */ ··· 557 557 0, 0, 0, 0, 0); 558 558 559 559 if (ret == H_NOT_ENOUGH_RESOURCES) 560 - ehca_gen_err("Insufficient resources ret=%li", ret); 560 + ehca_gen_err("Insufficient resources ret=%lli", ret); 561 561 562 562 return ret; 563 563 } ··· 579 579 struct ehca_qp *qp) 580 580 { 581 581 u64 ret; 582 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 582 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 583 583 584 584 ret = hcp_galpas_dtor(&qp->galpas); 585 585 if (ret) { ··· 593 593 qp->ipz_qp_handle.handle, /* r6 */ 594 594 0, 0, 0, 0, 0, 0); 595 595 if (ret == H_HARDWARE) 596 - ehca_gen_err("HCA not operational. ret=%li", ret); 596 + ehca_gen_err("HCA not operational. ret=%lli", ret); 597 597 598 598 ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE, 599 599 adapter_handle.handle, /* r4 */ ··· 601 601 0, 0, 0, 0, 0); 602 602 603 603 if (ret == H_RESOURCE) 604 - ehca_gen_err("Resource still in use. ret=%li", ret); 604 + ehca_gen_err("Resource still in use. ret=%lli", ret); 605 605 606 606 return ret; 607 607 } ··· 625 625 u32 * bma_qp_nr) 626 626 { 627 627 u64 ret; 628 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 628 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 629 629 630 630 ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs, 631 631 adapter_handle.handle, /* r4 */ ··· 636 636 *bma_qp_nr = (u32)outs[1]; 637 637 638 638 if (ret == H_ALIAS_EXIST) 639 - ehca_gen_err("AQP1 already exists. ret=%li", ret); 639 + ehca_gen_err("AQP1 already exists. ret=%lli", ret); 640 640 641 641 return ret; 642 642 } ··· 658 658 0, 0); 659 659 660 660 if (ret == H_NOT_ENOUGH_RESOURCES) 661 - ehca_gen_err("Not enough resources. ret=%li", ret); 661 + ehca_gen_err("Not enough resources. ret=%lli", ret); 662 662 663 663 return ret; 664 664 } ··· 697 697 0, 0, 0, 0); 698 698 699 699 if (ret == H_RESOURCE) 700 - ehca_gen_err("H_FREE_RESOURCE failed ret=%li ", ret); 700 + ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret); 701 701 702 702 return ret; 703 703 } ··· 719 719 0, 0, 0, 0, 0); 720 720 721 721 if (ret == H_RESOURCE) 722 - ehca_gen_err("Resource in use. ret=%li ", ret); 722 + ehca_gen_err("Resource in use. ret=%lli ", ret); 723 723 724 724 return ret; 725 725 } ··· 733 733 struct ehca_mr_hipzout_parms *outparms) 734 734 { 735 735 u64 ret; 736 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 736 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 737 737 738 738 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 739 739 adapter_handle.handle, /* r4 */ ··· 774 774 775 775 if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) { 776 776 ehca_gen_err("logical_address_of_page not on a 4k boundary " 777 - "adapter_handle=%lx mr=%p mr_handle=%lx " 777 + "adapter_handle=%llx mr=%p mr_handle=%llx " 778 778 "pagesize=%x queue_type=%x " 779 - "logical_address_of_page=%lx count=%lx", 779 + "logical_address_of_page=%llx count=%llx", 780 780 adapter_handle.handle, mr, 781 781 mr->ipz_mr_handle.handle, pagesize, queue_type, 782 782 logical_address_of_page, count); ··· 794 794 struct ehca_mr_hipzout_parms *outparms) 795 795 { 796 796 u64 ret; 797 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 797 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 798 798 799 799 ret = ehca_plpar_hcall9(H_QUERY_MR, outs, 800 800 adapter_handle.handle, /* r4 */ ··· 828 828 struct ehca_mr_hipzout_parms *outparms) 829 829 { 830 830 u64 ret; 831 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 831 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 832 832 833 833 ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs, 834 834 adapter_handle.handle, /* r4 */ ··· 855 855 struct ehca_mr_hipzout_parms *outparms) 856 856 { 857 857 u64 ret; 858 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 858 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 859 859 860 860 ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs, 861 861 adapter_handle.handle, /* r4 */ ··· 877 877 struct ehca_mw_hipzout_parms *outparms) 878 878 { 879 879 u64 ret; 880 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 880 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 881 881 882 882 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 883 883 adapter_handle.handle, /* r4 */ ··· 895 895 struct ehca_mw_hipzout_parms *outparms) 896 896 { 897 897 u64 ret; 898 - u64 outs[PLPAR_HCALL9_BUFSIZE]; 898 + unsigned long outs[PLPAR_HCALL9_BUFSIZE]; 899 899 900 900 ret = ehca_plpar_hcall9(H_QUERY_MW, outs, 901 901 adapter_handle.handle, /* r4 */
+19 -9
drivers/infiniband/hw/mlx4/qp.c
··· 1462 1462 } 1463 1463 1464 1464 static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, 1465 - struct mlx4_ib_qp *qp, unsigned *lso_seg_len) 1465 + struct mlx4_ib_qp *qp, unsigned *lso_seg_len, 1466 + __be32 *lso_hdr_sz) 1466 1467 { 1467 1468 unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16); 1468 1469 ··· 1480 1479 1481 1480 memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen); 1482 1481 1483 - /* make sure LSO header is written before overwriting stamping */ 1484 - wmb(); 1485 - 1486 - wqe->mss_hdr_size = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 | 1487 - wr->wr.ud.hlen); 1488 - 1482 + *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 | 1483 + wr->wr.ud.hlen); 1489 1484 *lso_seg_len = halign; 1490 1485 return 0; 1491 1486 } ··· 1515 1518 int uninitialized_var(stamp); 1516 1519 int uninitialized_var(size); 1517 1520 unsigned uninitialized_var(seglen); 1521 + __be32 dummy; 1522 + __be32 *lso_wqe; 1523 + __be32 uninitialized_var(lso_hdr_sz); 1518 1524 int i; 1519 1525 1520 1526 spin_lock_irqsave(&qp->sq.lock, flags); ··· 1525 1525 ind = qp->sq_next_wqe; 1526 1526 1527 1527 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1528 + lso_wqe = &dummy; 1529 + 1528 1530 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 1529 1531 err = -ENOMEM; 1530 1532 *bad_wr = wr; ··· 1608 1606 size += sizeof (struct mlx4_wqe_datagram_seg) / 16; 1609 1607 1610 1608 if (wr->opcode == IB_WR_LSO) { 1611 - err = build_lso_seg(wqe, wr, qp, &seglen); 1609 + err = build_lso_seg(wqe, wr, qp, &seglen, &lso_hdr_sz); 1612 1610 if (unlikely(err)) { 1613 1611 *bad_wr = wr; 1614 1612 goto out; 1615 1613 } 1614 + lso_wqe = (__be32 *) wqe; 1616 1615 wqe += seglen; 1617 1616 size += seglen / 16; 1618 1617 } ··· 1655 1652 for (i = wr->num_sge - 1; i >= 0; --i, --dseg) 1656 1653 set_data_seg(dseg, wr->sg_list + i); 1657 1654 1655 + /* 1656 + * Possibly overwrite stamping in cacheline with LSO 1657 + * segment only after making sure all data segments 1658 + * are written. 1659 + */ 1660 + wmb(); 1661 + *lso_wqe = lso_hdr_sz; 1662 + 1658 1663 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? 1659 1664 MLX4_WQE_CTRL_FENCE : 0) | size; 1660 1665 ··· 1697 1686 stamp_send_wqe(qp, stamp, size * 16); 1698 1687 ind = pad_wraparound(qp, ind); 1699 1688 } 1700 - 1701 1689 } 1702 1690 1703 1691 out:
+15 -12
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 106 106 107 107 ipoib_dbg(priv, "bringing up interface\n"); 108 108 109 - set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 109 + if (!test_and_set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) 110 + napi_enable(&priv->napi); 110 111 111 112 if (ipoib_pkey_dev_delay_open(dev)) 112 113 return 0; 113 114 114 - napi_enable(&priv->napi); 115 + if (ipoib_ib_dev_open(dev)) 116 + goto err_disable; 115 117 116 - if (ipoib_ib_dev_open(dev)) { 117 - napi_disable(&priv->napi); 118 - return -EINVAL; 119 - } 120 - 121 - if (ipoib_ib_dev_up(dev)) { 122 - ipoib_ib_dev_stop(dev, 1); 123 - napi_disable(&priv->napi); 124 - return -EINVAL; 125 - } 118 + if (ipoib_ib_dev_up(dev)) 119 + goto err_stop; 126 120 127 121 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { 128 122 struct ipoib_dev_priv *cpriv; ··· 138 144 netif_start_queue(dev); 139 145 140 146 return 0; 147 + 148 + err_stop: 149 + ipoib_ib_dev_stop(dev, 1); 150 + 151 + err_disable: 152 + napi_disable(&priv->napi); 153 + clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags); 154 + 155 + return -EINVAL; 141 156 } 142 157 143 158 static int ipoib_stop(struct net_device *dev)
+1 -1
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
··· 409 409 } 410 410 411 411 if (mcast->logcount++ < 20) { 412 - if (status == -ETIMEDOUT) { 412 + if (status == -ETIMEDOUT || status == -EAGAIN) { 413 413 ipoib_dbg_mcast(priv, "multicast join failed for %pI6, status %d\n", 414 414 mcast->mcmember.mgid.raw, status); 415 415 } else {
+8 -3
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
··· 61 61 62 62 ppriv = netdev_priv(pdev); 63 63 64 + rtnl_lock(); 64 65 mutex_lock(&ppriv->vlan_mutex); 65 66 66 67 /* ··· 112 111 goto device_init_failed; 113 112 } 114 113 115 - result = register_netdev(priv->dev); 114 + result = register_netdevice(priv->dev); 116 115 if (result) { 117 116 ipoib_warn(priv, "failed to initialize; error %i", result); 118 117 goto register_failed; ··· 135 134 list_add_tail(&priv->list, &ppriv->child_intfs); 136 135 137 136 mutex_unlock(&ppriv->vlan_mutex); 137 + rtnl_unlock(); 138 138 139 139 return 0; 140 140 141 141 sysfs_failed: 142 142 ipoib_delete_debug_files(priv->dev); 143 - unregister_netdev(priv->dev); 143 + unregister_netdevice(priv->dev); 144 144 145 145 register_failed: 146 146 ipoib_dev_cleanup(priv->dev); ··· 151 149 152 150 err: 153 151 mutex_unlock(&ppriv->vlan_mutex); 152 + rtnl_unlock(); 154 153 return result; 155 154 } 156 155 ··· 165 162 166 163 ppriv = netdev_priv(pdev); 167 164 165 + rtnl_lock(); 168 166 mutex_lock(&ppriv->vlan_mutex); 169 167 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { 170 168 if (priv->pkey == pkey) { 171 - unregister_netdev(priv->dev); 169 + unregister_netdevice(priv->dev); 172 170 ipoib_dev_cleanup(priv->dev); 173 171 list_del(&priv->list); 174 172 free_netdev(priv->dev); ··· 179 175 } 180 176 } 181 177 mutex_unlock(&ppriv->vlan_mutex); 178 + rtnl_unlock(); 182 179 183 180 return ret; 184 181 }
+3 -3
drivers/net/mlx4/profile.c
··· 107 107 profile[MLX4_RES_AUXC].num = request->num_qp; 108 108 profile[MLX4_RES_SRQ].num = request->num_srq; 109 109 profile[MLX4_RES_CQ].num = request->num_cq; 110 - profile[MLX4_RES_EQ].num = min(dev_cap->max_eqs, 111 - dev_cap->reserved_eqs + 112 - num_possible_cpus() + 1); 110 + profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, 111 + dev_cap->reserved_eqs + 112 + num_possible_cpus() + 1); 113 113 profile[MLX4_RES_DMPT].num = request->num_mpt; 114 114 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; 115 115 profile[MLX4_RES_MTT].num = request->num_mtt;