Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

IB/mthca: Stop returning separate error and status from FW commands

Instead of having firmware command functions return an error and also
a status, leading to code like:

err = mthca_FW_COMMAND(..., &status);
if (err)
goto out;
if (status) {
err = -E...;
goto out;
}

all over the place, just handle the FW status inside the FW command
handling code (the way mlx4 does it), so we can simply write:

err = mthca_FW_COMMAND(...);
if (err)
goto out;

In addition to simplifying the source code, this also saves a healthy
chunk of text:

add/remove: 0/0 grow/shrink: 10/88 up/down: 510/-3357 (-2847)
function old new delta
static.trans_table 324 584 +260
mthca_cmd_poll 352 477 +125
mthca_cmd_wait 511 567 +56
mthca_table_put 213 240 +27
mthca_cleanup_db_tab 372 387 +15
__mthca_remove_one 314 323 +9
mthca_cleanup_user_db_tab 275 283 +8
__mthca_init_one 1738 1746 +8
mthca_cleanup 20 21 +1
mthca_MAD_IFC 1081 1082 +1
mthca_MGID_HASH 43 40 -3
mthca_MAP_ICM_AUX 23 20 -3
mthca_MAP_ICM 19 16 -3
mthca_MAP_FA 23 20 -3
mthca_READ_MGM 43 38 -5
mthca_QUERY_SRQ 43 38 -5
mthca_QUERY_QP 59 54 -5
mthca_HW2SW_SRQ 43 38 -5
mthca_HW2SW_MPT 60 55 -5
mthca_HW2SW_EQ 43 38 -5
mthca_HW2SW_CQ 43 38 -5
mthca_free_icm_table 120 114 -6
mthca_query_srq 214 206 -8
mthca_free_qp 662 654 -8
mthca_cmd 38 28 -10
mthca_alloc_db 1321 1311 -10
mthca_setup_hca 1067 1055 -12
mthca_WRITE_MTT 35 22 -13
mthca_WRITE_MGM 40 27 -13
mthca_UNMAP_ICM_AUX 36 23 -13
mthca_UNMAP_FA 36 23 -13
mthca_SYS_DIS 36 23 -13
mthca_SYNC_TPT 36 23 -13
mthca_SW2HW_SRQ 35 22 -13
mthca_SW2HW_MPT 35 22 -13
mthca_SW2HW_EQ 35 22 -13
mthca_SW2HW_CQ 35 22 -13
mthca_RUN_FW 36 23 -13
mthca_DISABLE_LAM 36 23 -13
mthca_CLOSE_IB 36 23 -13
mthca_CLOSE_HCA 38 25 -13
mthca_ARM_SRQ 39 26 -13
mthca_free_icms 178 164 -14
mthca_QUERY_DDR 389 375 -14
mthca_resize_cq 1063 1048 -15
mthca_unmap_eq_icm 123 107 -16
mthca_map_eq_icm 396 380 -16
mthca_cmd_box 90 74 -16
mthca_SET_IB 433 417 -16
mthca_RESIZE_CQ 369 353 -16
mthca_MAP_ICM_page 240 224 -16
mthca_MAP_EQ 183 167 -16
mthca_INIT_IB 473 457 -16
mthca_INIT_HCA 745 729 -16
mthca_map_user_db 816 798 -18
mthca_SYS_EN 157 139 -18
mthca_cleanup_qp_table 78 59 -19
mthca_cleanup_eq_table 168 149 -19
mthca_UNMAP_ICM 143 121 -22
mthca_modify_srq 172 149 -23
mthca_unmap_fmr 198 174 -24
mthca_query_qp 814 790 -24
mthca_query_pkey 343 319 -24
mthca_SET_ICM_SIZE 34 10 -24
mthca_QUERY_DEV_LIM 1870 1846 -24
mthca_map_cmd 1130 1105 -25
mthca_ENABLE_LAM 401 375 -26
mthca_modify_port 247 220 -27
mthca_query_device 884 850 -34
mthca_NOP 75 41 -34
mthca_table_get 287 249 -38
mthca_init_qp_table 333 293 -40
mthca_MODIFY_QP 348 308 -40
mthca_close_hca 131 89 -42
mthca_free_eq 435 390 -45
mthca_query_port 755 705 -50
mthca_free_cq 581 528 -53
mthca_alloc_icm_table 578 524 -54
mthca_multicast_attach 1041 986 -55
mthca_init_hca 326 271 -55
mthca_query_gid 487 431 -56
mthca_free_srq 524 468 -56
mthca_free_mr 168 111 -57
mthca_create_eq 1560 1501 -59
mthca_multicast_detach 790 728 -62
mthca_write_mtt 918 854 -64
mthca_register_device 1406 1342 -64
mthca_fmr_alloc 947 883 -64
mthca_mr_alloc 652 582 -70
mthca_process_mad 1242 1164 -78
mthca_dev_lim 910 830 -80
find_mgm 482 400 -82
mthca_modify_qp 3852 3753 -99
mthca_init_cq 1281 1181 -100
mthca_alloc_srq 1719 1610 -109
mthca_init_eq_table 1807 1679 -128
mthca_init_tavor 761 491 -270
mthca_init_arbel 2617 2098 -519

Signed-off-by: Goldwyn Rodrigues <rgoldwyn@suse.de>

authored by

Goldwyn Rodrigues and committed by
Roland Dreier
cdb73db0 620917de

+342 -611
+152 -124
drivers/infiniband/hw/mthca/mthca_cmd.c
··· 301 301 return err; 302 302 } 303 303 304 + 305 + static int mthca_status_to_errno(u8 status) 306 + { 307 + static const int trans_table[] = { 308 + [MTHCA_CMD_STAT_INTERNAL_ERR] = -EIO, 309 + [MTHCA_CMD_STAT_BAD_OP] = -EPERM, 310 + [MTHCA_CMD_STAT_BAD_PARAM] = -EINVAL, 311 + [MTHCA_CMD_STAT_BAD_SYS_STATE] = -ENXIO, 312 + [MTHCA_CMD_STAT_BAD_RESOURCE] = -EBADF, 313 + [MTHCA_CMD_STAT_RESOURCE_BUSY] = -EBUSY, 314 + [MTHCA_CMD_STAT_DDR_MEM_ERR] = -ENOMEM, 315 + [MTHCA_CMD_STAT_EXCEED_LIM] = -ENOMEM, 316 + [MTHCA_CMD_STAT_BAD_RES_STATE] = -EBADF, 317 + [MTHCA_CMD_STAT_BAD_INDEX] = -EBADF, 318 + [MTHCA_CMD_STAT_BAD_NVMEM] = -EFAULT, 319 + [MTHCA_CMD_STAT_BAD_QPEE_STATE] = -EINVAL, 320 + [MTHCA_CMD_STAT_BAD_SEG_PARAM] = -EFAULT, 321 + [MTHCA_CMD_STAT_REG_BOUND] = -EBUSY, 322 + [MTHCA_CMD_STAT_LAM_NOT_PRE] = -EAGAIN, 323 + [MTHCA_CMD_STAT_BAD_PKT] = -EBADMSG, 324 + [MTHCA_CMD_STAT_BAD_SIZE] = -ENOMEM, 325 + }; 326 + 327 + if (status >= ARRAY_SIZE(trans_table) || 328 + (status != MTHCA_CMD_STAT_OK 329 + && trans_table[status] == 0)) 330 + return -EINVAL; 331 + 332 + return trans_table[status]; 333 + } 334 + 335 + 304 336 static int mthca_cmd_poll(struct mthca_dev *dev, 305 337 u64 in_param, 306 338 u64 *out_param, ··· 340 308 u32 in_modifier, 341 309 u8 op_modifier, 342 310 u16 op, 343 - unsigned long timeout, 344 - u8 *status) 311 + unsigned long timeout) 345 312 { 346 313 int err = 0; 347 314 unsigned long end; 315 + u8 status; 348 316 349 317 down(&dev->cmd.poll_sem); 350 318 ··· 373 341 (u64) be32_to_cpu((__force __be32) 374 342 __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4)); 375 343 376 - *status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24; 344 + status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24; 345 + if (status) { 346 + mthca_dbg(dev, "Command %02x completed with status %02x\n", 347 + op, status); 348 + err = mthca_status_to_errno(status); 349 + } 377 350 378 351 out: 379 352 up(&dev->cmd.poll_sem); ··· 411 374 u32 in_modifier, 412 375 u8 op_modifier, 413 376 u16 op, 414 - unsigned long timeout, 415 - u8 *status) 377 + unsigned long timeout) 416 378 { 417 379 int err = 0; 418 380 struct mthca_cmd_context *context; ··· 443 407 if (err) 444 408 goto out; 445 409 446 - *status = context->status; 447 - if (*status) 410 + if (context->status) { 448 411 mthca_dbg(dev, "Command %02x completed with status %02x\n", 449 - op, *status); 412 + op, context->status); 413 + err = mthca_status_to_errno(context->status); 414 + } 450 415 451 416 if (out_is_imm) 452 417 *out_param = context->out_param; ··· 469 432 u32 in_modifier, 470 433 u8 op_modifier, 471 434 u16 op, 472 - unsigned long timeout, 473 - u8 *status) 435 + unsigned long timeout) 474 436 { 475 437 if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS) 476 438 return mthca_cmd_wait(dev, in_param, &out_param, 0, 477 439 in_modifier, op_modifier, op, 478 - timeout, status); 440 + timeout); 479 441 else 480 442 return mthca_cmd_poll(dev, in_param, &out_param, 0, 481 443 in_modifier, op_modifier, op, 482 - timeout, status); 444 + timeout); 483 445 } 484 446 485 447 /* Invoke a command with no output parameter */ ··· 487 451 u32 in_modifier, 488 452 u8 op_modifier, 489 453 u16 op, 490 - unsigned long timeout, 491 - u8 *status) 454 + unsigned long timeout) 492 455 { 493 456 return mthca_cmd_box(dev, in_param, 0, in_modifier, 494 - op_modifier, op, timeout, status); 457 + op_modifier, op, timeout); 495 458 } 496 459 497 460 /* ··· 504 469 u32 in_modifier, 505 470 u8 op_modifier, 506 471 u16 op, 507 - unsigned long timeout, 508 - u8 *status) 472 + unsigned long timeout) 509 473 { 510 474 if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS) 511 475 return mthca_cmd_wait(dev, in_param, out_param, 1, 512 476 in_modifier, op_modifier, op, 513 - timeout, status); 477 + timeout); 514 478 else 515 479 return mthca_cmd_poll(dev, in_param, out_param, 1, 516 480 in_modifier, op_modifier, op, 517 - timeout, status); 481 + timeout); 518 482 } 519 483 520 484 int mthca_cmd_init(struct mthca_dev *dev) ··· 630 596 kfree(mailbox); 631 597 } 632 598 633 - int mthca_SYS_EN(struct mthca_dev *dev, u8 *status) 599 + int mthca_SYS_EN(struct mthca_dev *dev) 634 600 { 635 601 u64 out; 636 602 int ret; 637 603 638 - ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D, status); 604 + ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D); 639 605 640 - if (*status == MTHCA_CMD_STAT_DDR_MEM_ERR) 606 + if (ret == -ENOMEM) 641 607 mthca_warn(dev, "SYS_EN DDR error: syn=%x, sock=%d, " 642 608 "sladdr=%d, SPD source=%s\n", 643 609 (int) (out >> 6) & 0xf, (int) (out >> 4) & 3, ··· 646 612 return ret; 647 613 } 648 614 649 - int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status) 615 + int mthca_SYS_DIS(struct mthca_dev *dev) 650 616 { 651 - return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C, status); 617 + return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C); 652 618 } 653 619 654 620 static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, 655 - u64 virt, u8 *status) 621 + u64 virt) 656 622 { 657 623 struct mthca_mailbox *mailbox; 658 624 struct mthca_icm_iter iter; ··· 700 666 701 667 if (++nent == MTHCA_MAILBOX_SIZE / 16) { 702 668 err = mthca_cmd(dev, mailbox->dma, nent, 0, op, 703 - CMD_TIME_CLASS_B, status); 704 - if (err || *status) 669 + CMD_TIME_CLASS_B); 670 + if (err) 705 671 goto out; 706 672 nent = 0; 707 673 } ··· 710 676 711 677 if (nent) 712 678 err = mthca_cmd(dev, mailbox->dma, nent, 0, op, 713 - CMD_TIME_CLASS_B, status); 679 + CMD_TIME_CLASS_B); 714 680 715 681 switch (op) { 716 682 case CMD_MAP_FA: ··· 730 696 return err; 731 697 } 732 698 733 - int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status) 699 + int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm) 734 700 { 735 - return mthca_map_cmd(dev, CMD_MAP_FA, icm, -1, status); 701 + return mthca_map_cmd(dev, CMD_MAP_FA, icm, -1); 736 702 } 737 703 738 - int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status) 704 + int mthca_UNMAP_FA(struct mthca_dev *dev) 739 705 { 740 - return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B, status); 706 + return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B); 741 707 } 742 708 743 - int mthca_RUN_FW(struct mthca_dev *dev, u8 *status) 709 + int mthca_RUN_FW(struct mthca_dev *dev) 744 710 { 745 - return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A, status); 711 + return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A); 746 712 } 747 713 748 714 static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base) ··· 771 737 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n"); 772 738 } 773 739 774 - int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status) 740 + int mthca_QUERY_FW(struct mthca_dev *dev) 775 741 { 776 742 struct mthca_mailbox *mailbox; 777 743 u32 *outbox; ··· 805 771 outbox = mailbox->buf; 806 772 807 773 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW, 808 - CMD_TIME_CLASS_A, status); 774 + CMD_TIME_CLASS_A); 809 775 810 776 if (err) 811 777 goto out; ··· 877 843 return err; 878 844 } 879 845 880 - int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status) 846 + int mthca_ENABLE_LAM(struct mthca_dev *dev) 881 847 { 882 848 struct mthca_mailbox *mailbox; 883 849 u8 info; ··· 898 864 outbox = mailbox->buf; 899 865 900 866 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM, 901 - CMD_TIME_CLASS_C, status); 867 + CMD_TIME_CLASS_C); 902 868 903 869 if (err) 904 - goto out; 905 - 906 - if (*status == MTHCA_CMD_STAT_LAM_NOT_PRE) 907 870 goto out; 908 871 909 872 MTHCA_GET(dev->ddr_start, outbox, ENABLE_LAM_START_OFFSET); ··· 927 896 return err; 928 897 } 929 898 930 - int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status) 899 + int mthca_DISABLE_LAM(struct mthca_dev *dev) 931 900 { 932 - return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C, status); 901 + return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C); 933 902 } 934 903 935 - int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status) 904 + int mthca_QUERY_DDR(struct mthca_dev *dev) 936 905 { 937 906 struct mthca_mailbox *mailbox; 938 907 u8 info; ··· 953 922 outbox = mailbox->buf; 954 923 955 924 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR, 956 - CMD_TIME_CLASS_A, status); 925 + CMD_TIME_CLASS_A); 957 926 958 927 if (err) 959 928 goto out; ··· 983 952 } 984 953 985 954 int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, 986 - struct mthca_dev_lim *dev_lim, u8 *status) 955 + struct mthca_dev_lim *dev_lim) 987 956 { 988 957 struct mthca_mailbox *mailbox; 989 958 u32 *outbox; ··· 1059 1028 outbox = mailbox->buf; 1060 1029 1061 1030 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM, 1062 - CMD_TIME_CLASS_A, status); 1031 + CMD_TIME_CLASS_A); 1063 1032 1064 1033 if (err) 1065 1034 goto out; ··· 1263 1232 } 1264 1233 1265 1234 int mthca_QUERY_ADAPTER(struct mthca_dev *dev, 1266 - struct mthca_adapter *adapter, u8 *status) 1235 + struct mthca_adapter *adapter) 1267 1236 { 1268 1237 struct mthca_mailbox *mailbox; 1269 1238 u32 *outbox; ··· 1282 1251 outbox = mailbox->buf; 1283 1252 1284 1253 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER, 1285 - CMD_TIME_CLASS_A, status); 1254 + CMD_TIME_CLASS_A); 1286 1255 1287 1256 if (err) 1288 1257 goto out; ··· 1306 1275 } 1307 1276 1308 1277 int mthca_INIT_HCA(struct mthca_dev *dev, 1309 - struct mthca_init_hca_param *param, 1310 - u8 *status) 1278 + struct mthca_init_hca_param *param) 1311 1279 { 1312 1280 struct mthca_mailbox *mailbox; 1313 1281 __be32 *inbox; ··· 1423 1393 MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET); 1424 1394 } 1425 1395 1426 - err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, CMD_TIME_CLASS_D, status); 1396 + err = mthca_cmd(dev, mailbox->dma, 0, 0, 1397 + CMD_INIT_HCA, CMD_TIME_CLASS_D); 1427 1398 1428 1399 mthca_free_mailbox(dev, mailbox); 1429 1400 return err; ··· 1432 1401 1433 1402 int mthca_INIT_IB(struct mthca_dev *dev, 1434 1403 struct mthca_init_ib_param *param, 1435 - int port, u8 *status) 1404 + int port) 1436 1405 { 1437 1406 struct mthca_mailbox *mailbox; 1438 1407 u32 *inbox; ··· 1476 1445 MTHCA_PUT(inbox, param->si_guid, INIT_IB_SI_GUID_OFFSET); 1477 1446 1478 1447 err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB, 1479 - CMD_TIME_CLASS_A, status); 1448 + CMD_TIME_CLASS_A); 1480 1449 1481 1450 mthca_free_mailbox(dev, mailbox); 1482 1451 return err; 1483 1452 } 1484 1453 1485 - int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status) 1454 + int mthca_CLOSE_IB(struct mthca_dev *dev, int port) 1486 1455 { 1487 - return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, CMD_TIME_CLASS_A, status); 1456 + return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, CMD_TIME_CLASS_A); 1488 1457 } 1489 1458 1490 - int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status) 1459 + int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic) 1491 1460 { 1492 - return mthca_cmd(dev, 0, 0, panic, CMD_CLOSE_HCA, CMD_TIME_CLASS_C, status); 1461 + return mthca_cmd(dev, 0, 0, panic, CMD_CLOSE_HCA, CMD_TIME_CLASS_C); 1493 1462 } 1494 1463 1495 1464 int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param, 1496 - int port, u8 *status) 1465 + int port) 1497 1466 { 1498 1467 struct mthca_mailbox *mailbox; 1499 1468 u32 *inbox; ··· 1522 1491 MTHCA_PUT(inbox, param->si_guid, SET_IB_SI_GUID_OFFSET); 1523 1492 1524 1493 err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB, 1525 - CMD_TIME_CLASS_B, status); 1494 + CMD_TIME_CLASS_B); 1526 1495 1527 1496 mthca_free_mailbox(dev, mailbox); 1528 1497 return err; 1529 1498 } 1530 1499 1531 - int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status) 1500 + int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt) 1532 1501 { 1533 - return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt, status); 1502 + return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt); 1534 1503 } 1535 1504 1536 - int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status) 1505 + int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt) 1537 1506 { 1538 1507 struct mthca_mailbox *mailbox; 1539 1508 __be64 *inbox; ··· 1548 1517 inbox[1] = cpu_to_be64(dma_addr); 1549 1518 1550 1519 err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM, 1551 - CMD_TIME_CLASS_B, status); 1520 + CMD_TIME_CLASS_B); 1552 1521 1553 1522 mthca_free_mailbox(dev, mailbox); 1554 1523 ··· 1559 1528 return err; 1560 1529 } 1561 1530 1562 - int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status) 1531 + int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count) 1563 1532 { 1564 1533 mthca_dbg(dev, "Unmapping %d pages at %llx from ICM.\n", 1565 1534 page_count, (unsigned long long) virt); 1566 1535 1567 - return mthca_cmd(dev, virt, page_count, 0, CMD_UNMAP_ICM, CMD_TIME_CLASS_B, status); 1536 + return mthca_cmd(dev, virt, page_count, 0, 1537 + CMD_UNMAP_ICM, CMD_TIME_CLASS_B); 1568 1538 } 1569 1539 1570 - int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status) 1540 + int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm) 1571 1541 { 1572 - return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, -1, status); 1542 + return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, -1); 1573 1543 } 1574 1544 1575 - int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status) 1545 + int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev) 1576 1546 { 1577 - return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B, status); 1547 + return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B); 1578 1548 } 1579 1549 1580 - int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages, 1581 - u8 *status) 1550 + int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages) 1582 1551 { 1583 - int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0, 0, CMD_SET_ICM_SIZE, 1584 - CMD_TIME_CLASS_A, status); 1552 + int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0, 1553 + 0, CMD_SET_ICM_SIZE, CMD_TIME_CLASS_A); 1585 1554 1586 - if (ret || status) 1555 + if (ret) 1587 1556 return ret; 1588 1557 1589 1558 /* ··· 1597 1566 } 1598 1567 1599 1568 int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1600 - int mpt_index, u8 *status) 1569 + int mpt_index) 1601 1570 { 1602 1571 return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT, 1603 - CMD_TIME_CLASS_B, status); 1572 + CMD_TIME_CLASS_B); 1604 1573 } 1605 1574 1606 1575 int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1607 - int mpt_index, u8 *status) 1576 + int mpt_index) 1608 1577 { 1609 1578 return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, 1610 1579 !mailbox, CMD_HW2SW_MPT, 1611 - CMD_TIME_CLASS_B, status); 1580 + CMD_TIME_CLASS_B); 1612 1581 } 1613 1582 1614 1583 int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1615 - int num_mtt, u8 *status) 1584 + int num_mtt) 1616 1585 { 1617 1586 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT, 1618 - CMD_TIME_CLASS_B, status); 1587 + CMD_TIME_CLASS_B); 1619 1588 } 1620 1589 1621 - int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status) 1590 + int mthca_SYNC_TPT(struct mthca_dev *dev) 1622 1591 { 1623 - return mthca_cmd(dev, 0, 0, 0, CMD_SYNC_TPT, CMD_TIME_CLASS_B, status); 1592 + return mthca_cmd(dev, 0, 0, 0, CMD_SYNC_TPT, CMD_TIME_CLASS_B); 1624 1593 } 1625 1594 1626 1595 int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap, 1627 - int eq_num, u8 *status) 1596 + int eq_num) 1628 1597 { 1629 1598 mthca_dbg(dev, "%s mask %016llx for eqn %d\n", 1630 1599 unmap ? "Clearing" : "Setting", 1631 1600 (unsigned long long) event_mask, eq_num); 1632 1601 return mthca_cmd(dev, event_mask, (unmap << 31) | eq_num, 1633 - 0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status); 1602 + 0, CMD_MAP_EQ, CMD_TIME_CLASS_B); 1634 1603 } 1635 1604 1636 1605 int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1637 - int eq_num, u8 *status) 1606 + int eq_num) 1638 1607 { 1639 1608 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ, 1640 - CMD_TIME_CLASS_A, status); 1609 + CMD_TIME_CLASS_A); 1641 1610 } 1642 1611 1643 1612 int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1644 - int eq_num, u8 *status) 1613 + int eq_num) 1645 1614 { 1646 1615 return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0, 1647 1616 CMD_HW2SW_EQ, 1648 - CMD_TIME_CLASS_A, status); 1617 + CMD_TIME_CLASS_A); 1649 1618 } 1650 1619 1651 1620 int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1652 - int cq_num, u8 *status) 1621 + int cq_num) 1653 1622 { 1654 1623 return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ, 1655 - CMD_TIME_CLASS_A, status); 1624 + CMD_TIME_CLASS_A); 1656 1625 } 1657 1626 1658 1627 int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1659 - int cq_num, u8 *status) 1628 + int cq_num) 1660 1629 { 1661 1630 return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0, 1662 1631 CMD_HW2SW_CQ, 1663 - CMD_TIME_CLASS_A, status); 1632 + CMD_TIME_CLASS_A); 1664 1633 } 1665 1634 1666 - int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size, 1667 - u8 *status) 1635 + int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size) 1668 1636 { 1669 1637 struct mthca_mailbox *mailbox; 1670 1638 __be32 *inbox; ··· 1687 1657 MTHCA_PUT(inbox, lkey, RESIZE_CQ_LKEY_OFFSET); 1688 1658 1689 1659 err = mthca_cmd(dev, mailbox->dma, cq_num, 1, CMD_RESIZE_CQ, 1690 - CMD_TIME_CLASS_B, status); 1660 + CMD_TIME_CLASS_B); 1691 1661 1692 1662 mthca_free_mailbox(dev, mailbox); 1693 1663 return err; 1694 1664 } 1695 1665 1696 1666 int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1697 - int srq_num, u8 *status) 1667 + int srq_num) 1698 1668 { 1699 1669 return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ, 1700 - CMD_TIME_CLASS_A, status); 1670 + CMD_TIME_CLASS_A); 1701 1671 } 1702 1672 1703 1673 int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1704 - int srq_num, u8 *status) 1674 + int srq_num) 1705 1675 { 1706 1676 return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0, 1707 1677 CMD_HW2SW_SRQ, 1708 - CMD_TIME_CLASS_A, status); 1678 + CMD_TIME_CLASS_A); 1709 1679 } 1710 1680 1711 1681 int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num, 1712 - struct mthca_mailbox *mailbox, u8 *status) 1682 + struct mthca_mailbox *mailbox) 1713 1683 { 1714 1684 return mthca_cmd_box(dev, 0, mailbox->dma, num, 0, 1715 - CMD_QUERY_SRQ, CMD_TIME_CLASS_A, status); 1685 + CMD_QUERY_SRQ, CMD_TIME_CLASS_A); 1716 1686 } 1717 1687 1718 - int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status) 1688 + int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit) 1719 1689 { 1720 1690 return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ, 1721 - CMD_TIME_CLASS_B, status); 1691 + CMD_TIME_CLASS_B); 1722 1692 } 1723 1693 1724 1694 int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur, 1725 1695 enum ib_qp_state next, u32 num, int is_ee, 1726 - struct mthca_mailbox *mailbox, u32 optmask, 1727 - u8 *status) 1696 + struct mthca_mailbox *mailbox, u32 optmask) 1728 1697 { 1729 1698 static const u16 op[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { 1730 1699 [IB_QPS_RESET] = { ··· 1784 1755 1785 1756 err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, 1786 1757 (!!is_ee << 24) | num, op_mod, 1787 - op[cur][next], CMD_TIME_CLASS_C, status); 1758 + op[cur][next], CMD_TIME_CLASS_C); 1788 1759 1789 1760 if (0 && mailbox) { 1790 1761 int i; ··· 1818 1789 } 1819 1790 1820 1791 err = mthca_cmd(dev, mailbox->dma, optmask | (!!is_ee << 24) | num, 1821 - op_mod, op[cur][next], CMD_TIME_CLASS_C, status); 1792 + op_mod, op[cur][next], CMD_TIME_CLASS_C); 1822 1793 } 1823 1794 1824 1795 return err; 1825 1796 } 1826 1797 1827 1798 int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee, 1828 - struct mthca_mailbox *mailbox, u8 *status) 1799 + struct mthca_mailbox *mailbox) 1829 1800 { 1830 1801 return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0, 1831 - CMD_QUERY_QPEE, CMD_TIME_CLASS_A, status); 1802 + CMD_QUERY_QPEE, CMD_TIME_CLASS_A); 1832 1803 } 1833 1804 1834 - int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, 1835 - u8 *status) 1805 + int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn) 1836 1806 { 1837 1807 u8 op_mod; 1838 1808 ··· 1853 1825 } 1854 1826 1855 1827 return mthca_cmd(dev, 0, qpn, op_mod, CMD_CONF_SPECIAL_QP, 1856 - CMD_TIME_CLASS_B, status); 1828 + CMD_TIME_CLASS_B); 1857 1829 } 1858 1830 1859 1831 int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, 1860 1832 int port, struct ib_wc *in_wc, struct ib_grh *in_grh, 1861 - void *in_mad, void *response_mad, u8 *status) 1833 + void *in_mad, void *response_mad) 1862 1834 { 1863 1835 struct mthca_mailbox *inmailbox, *outmailbox; 1864 1836 void *inbox; ··· 1925 1897 1926 1898 err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma, 1927 1899 in_modifier, op_modifier, 1928 - CMD_MAD_IFC, CMD_TIME_CLASS_C, status); 1900 + CMD_MAD_IFC, CMD_TIME_CLASS_C); 1929 1901 1930 - if (!err && !*status) 1902 + if (!err) 1931 1903 memcpy(response_mad, outmailbox->buf, 256); 1932 1904 1933 1905 mthca_free_mailbox(dev, inmailbox); ··· 1936 1908 } 1937 1909 1938 1910 int mthca_READ_MGM(struct mthca_dev *dev, int index, 1939 - struct mthca_mailbox *mailbox, u8 *status) 1911 + struct mthca_mailbox *mailbox) 1940 1912 { 1941 1913 return mthca_cmd_box(dev, 0, mailbox->dma, index, 0, 1942 - CMD_READ_MGM, CMD_TIME_CLASS_A, status); 1914 + CMD_READ_MGM, CMD_TIME_CLASS_A); 1943 1915 } 1944 1916 1945 1917 int mthca_WRITE_MGM(struct mthca_dev *dev, int index, 1946 - struct mthca_mailbox *mailbox, u8 *status) 1918 + struct mthca_mailbox *mailbox) 1947 1919 { 1948 1920 return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM, 1949 - CMD_TIME_CLASS_A, status); 1921 + CMD_TIME_CLASS_A); 1950 1922 } 1951 1923 1952 1924 int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1953 - u16 *hash, u8 *status) 1925 + u16 *hash) 1954 1926 { 1955 1927 u64 imm; 1956 1928 int err; 1957 1929 1958 1930 err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH, 1959 - CMD_TIME_CLASS_A, status); 1931 + CMD_TIME_CLASS_A); 1960 1932 1961 1933 *hash = imm; 1962 1934 return err; 1963 1935 } 1964 1936 1965 - int mthca_NOP(struct mthca_dev *dev, u8 *status) 1937 + int mthca_NOP(struct mthca_dev *dev) 1966 1938 { 1967 - return mthca_cmd(dev, 0, 0x1f, 0, CMD_NOP, msecs_to_jiffies(100), status); 1939 + return mthca_cmd(dev, 0, 0x1f, 0, CMD_NOP, msecs_to_jiffies(100)); 1968 1940 }
+44 -49
drivers/infiniband/hw/mthca/mthca_cmd.h
··· 252 252 gfp_t gfp_mask); 253 253 void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox); 254 254 255 - int mthca_SYS_EN(struct mthca_dev *dev, u8 *status); 256 - int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status); 257 - int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status); 258 - int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status); 259 - int mthca_RUN_FW(struct mthca_dev *dev, u8 *status); 260 - int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status); 261 - int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status); 262 - int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status); 263 - int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status); 255 + int mthca_SYS_EN(struct mthca_dev *dev); 256 + int mthca_SYS_DIS(struct mthca_dev *dev); 257 + int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm); 258 + int mthca_UNMAP_FA(struct mthca_dev *dev); 259 + int mthca_RUN_FW(struct mthca_dev *dev); 260 + int mthca_QUERY_FW(struct mthca_dev *dev); 261 + int mthca_ENABLE_LAM(struct mthca_dev *dev); 262 + int mthca_DISABLE_LAM(struct mthca_dev *dev); 263 + int mthca_QUERY_DDR(struct mthca_dev *dev); 264 264 int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, 265 - struct mthca_dev_lim *dev_lim, u8 *status); 265 + struct mthca_dev_lim *dev_lim); 266 266 int mthca_QUERY_ADAPTER(struct mthca_dev *dev, 267 - struct mthca_adapter *adapter, u8 *status); 267 + struct mthca_adapter *adapter); 268 268 int mthca_INIT_HCA(struct mthca_dev *dev, 269 - struct mthca_init_hca_param *param, 270 - u8 *status); 269 + struct mthca_init_hca_param *param); 271 270 int mthca_INIT_IB(struct mthca_dev *dev, 272 271 struct mthca_init_ib_param *param, 273 - int port, u8 *status); 274 - int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status); 275 - int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status); 272 + int port); 273 + int mthca_CLOSE_IB(struct mthca_dev *dev, int port); 274 + int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic); 276 275 int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param, 277 - int port, u8 *status); 278 - int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status); 279 - int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status); 280 - int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status); 281 - int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status); 282 - int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status); 283 - int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages, 284 - u8 *status); 276 + int port); 277 + int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt); 278 + int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt); 279 + int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count); 280 + int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm); 281 + int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev); 282 + int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages); 285 283 int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 286 - int mpt_index, u8 *status); 284 + int mpt_index); 287 285 int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 288 - int mpt_index, u8 *status); 286 + int mpt_index); 289 287 int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 290 - int num_mtt, u8 *status); 291 - int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status); 288 + int num_mtt); 289 + int mthca_SYNC_TPT(struct mthca_dev *dev); 292 290 int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap, 293 - int eq_num, u8 *status); 291 + int eq_num); 294 292 int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 295 - int eq_num, u8 *status); 293 + int eq_num); 296 294 int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 297 - int eq_num, u8 *status); 295 + int eq_num); 298 296 int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 299 - int cq_num, u8 *status); 297 + int cq_num); 300 298 int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 301 - int cq_num, u8 *status); 302 - int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size, 303 - u8 *status); 299 + int cq_num); 300 + int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size); 304 301 int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 305 - int srq_num, u8 *status); 302 + int srq_num); 306 303 int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 307 - int srq_num, u8 *status); 304 + int srq_num); 308 305 int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num, 309 - struct mthca_mailbox *mailbox, u8 *status); 310 - int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status); 306 + struct mthca_mailbox *mailbox); 307 + int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit); 311 308 int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur, 312 309 enum ib_qp_state next, u32 num, int is_ee, 313 - struct mthca_mailbox *mailbox, u32 optmask, 314 - u8 *status); 310 + struct mthca_mailbox *mailbox, u32 optmask); 315 311 int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee, 316 - struct mthca_mailbox *mailbox, u8 *status); 317 - int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, 318 - u8 *status); 312 + struct mthca_mailbox *mailbox); 313 + int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn); 319 314 int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, 320 315 int port, struct ib_wc *in_wc, struct ib_grh *in_grh, 321 - void *in_mad, void *response_mad, u8 *status); 316 + void *in_mad, void *response_mad); 322 317 int mthca_READ_MGM(struct mthca_dev *dev, int index, 323 - struct mthca_mailbox *mailbox, u8 *status); 318 + struct mthca_mailbox *mailbox); 324 319 int mthca_WRITE_MGM(struct mthca_dev *dev, int index, 325 - struct mthca_mailbox *mailbox, u8 *status); 320 + struct mthca_mailbox *mailbox); 326 321 int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 327 - u16 *hash, u8 *status); 328 - int mthca_NOP(struct mthca_dev *dev, u8 *status); 322 + u16 *hash); 323 + int mthca_NOP(struct mthca_dev *dev); 329 324 330 325 #endif /* MTHCA_CMD_H */
+2 -13
drivers/infiniband/hw/mthca/mthca_cq.c
··· 779 779 struct mthca_mailbox *mailbox; 780 780 struct mthca_cq_context *cq_context; 781 781 int err = -ENOMEM; 782 - u8 status; 783 782 784 783 cq->ibcq.cqe = nent - 1; 785 784 cq->is_kernel = !ctx; ··· 846 847 cq_context->state_db = cpu_to_be32(cq->arm_db_index); 847 848 } 848 849 849 - err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status); 850 + err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn); 850 851 if (err) { 851 852 mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err); 852 - goto err_out_free_mr; 853 - } 854 - 855 - if (status) { 856 - mthca_warn(dev, "SW2HW_CQ returned status 0x%02x\n", 857 - status); 858 - err = -EINVAL; 859 853 goto err_out_free_mr; 860 854 } 861 855 ··· 907 915 { 908 916 struct mthca_mailbox *mailbox; 909 917 int err; 910 - u8 status; 911 918 912 919 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 913 920 if (IS_ERR(mailbox)) { ··· 914 923 return; 915 924 } 916 925 917 - err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status); 926 + err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn); 918 927 if (err) 919 928 mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err); 920 - else if (status) 921 - mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status); 922 929 923 930 if (0) { 924 931 __be32 *ctx = mailbox->buf;
+10 -33
drivers/infiniband/hw/mthca/mthca_eq.c
··· 474 474 struct mthca_eq_context *eq_context; 475 475 int err = -ENOMEM; 476 476 int i; 477 - u8 status; 478 477 479 478 eq->dev = dev; 480 479 eq->nent = roundup_pow_of_two(max(nent, 2)); ··· 542 543 eq_context->intr = intr; 543 544 eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey); 544 545 545 - err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status); 546 + err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn); 546 547 if (err) { 547 - mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err); 548 - goto err_out_free_mr; 549 - } 550 - if (status) { 551 - mthca_warn(dev, "SW2HW_EQ returned status 0x%02x\n", 552 - status); 553 - err = -EINVAL; 548 + mthca_warn(dev, "SW2HW_EQ returned %d\n", err); 554 549 goto err_out_free_mr; 555 550 } 556 551 ··· 590 597 { 591 598 struct mthca_mailbox *mailbox; 592 599 int err; 593 - u8 status; 594 600 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / 595 601 PAGE_SIZE; 596 602 int i; ··· 598 606 if (IS_ERR(mailbox)) 599 607 return; 600 608 601 - err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status); 609 + err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn); 602 610 if (err) 603 - mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err); 604 - if (status) 605 - mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status); 611 + mthca_warn(dev, "HW2SW_EQ returned %d\n", err); 606 612 607 613 dev->eq_table.arm_mask &= ~eq->eqn_mask; 608 614 ··· 728 738 int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) 729 739 { 730 740 int ret; 731 - u8 status; 732 741 733 742 /* 734 743 * We assume that mapping one page is enough for the whole EQ ··· 746 757 return -ENOMEM; 747 758 } 748 759 749 - ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status); 750 - if (!ret && status) 751 - ret = -EINVAL; 760 + ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt); 752 761 if (ret) { 753 762 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, 754 763 PCI_DMA_BIDIRECTIONAL); ··· 758 771 759 772 void mthca_unmap_eq_icm(struct mthca_dev *dev) 760 773 { 761 - u8 status; 762 - 763 - mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1, &status); 774 + mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1); 764 775 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, 765 776 PCI_DMA_BIDIRECTIONAL); 766 777 __free_page(dev->eq_table.icm_page); ··· 767 782 int mthca_init_eq_table(struct mthca_dev *dev) 768 783 { 769 784 int err; 770 - u8 status; 771 785 u8 intr; 772 786 int i; 773 787 ··· 848 864 } 849 865 850 866 err = mthca_MAP_EQ(dev, async_mask(dev), 851 - 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status); 867 + 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn); 852 868 if (err) 853 869 mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", 854 870 dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err); 855 - if (status) 856 - mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n", 857 - dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status); 858 871 859 872 err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, 860 - 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status); 873 + 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn); 861 874 if (err) 862 875 mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n", 863 876 dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err); 864 - if (status) 865 - mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n", 866 - dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status); 867 877 868 878 for (i = 0; i < MTHCA_NUM_EQ; ++i) 869 879 if (mthca_is_memfree(dev)) ··· 887 909 888 910 void mthca_cleanup_eq_table(struct mthca_dev *dev) 889 911 { 890 - u8 status; 891 912 int i; 892 913 893 914 mthca_free_irqs(dev); 894 915 895 916 mthca_MAP_EQ(dev, async_mask(dev), 896 - 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status); 917 + 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn); 897 918 mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, 898 - 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status); 919 + 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn); 899 920 900 921 for (i = 0; i < MTHCA_NUM_EQ; ++i) 901 922 mthca_free_eq(dev, &dev->eq_table.eq[i]);
+4 -11
drivers/infiniband/hw/mthca/mthca_mad.c
··· 201 201 struct ib_mad *out_mad) 202 202 { 203 203 int err; 204 - u8 status; 205 204 u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 206 205 u16 prev_lid = 0; 207 206 struct ib_port_attr pattr; ··· 251 252 err = mthca_MAD_IFC(to_mdev(ibdev), 252 253 mad_flags & IB_MAD_IGNORE_MKEY, 253 254 mad_flags & IB_MAD_IGNORE_BKEY, 254 - port_num, in_wc, in_grh, in_mad, out_mad, 255 - &status); 256 - if (err) { 257 - mthca_err(to_mdev(ibdev), "MAD_IFC failed\n"); 258 - return IB_MAD_RESULT_FAILURE; 259 - } 260 - if (status == MTHCA_CMD_STAT_BAD_PKT) 255 + port_num, in_wc, in_grh, in_mad, out_mad); 256 + if (err == -EBADMSG) 261 257 return IB_MAD_RESULT_SUCCESS; 262 - if (status) { 263 - mthca_err(to_mdev(ibdev), "MAD_IFC returned status %02x\n", 264 - status); 258 + else if (err) { 259 + mthca_err(to_mdev(ibdev), "MAD_IFC returned %d\n", err); 265 260 return IB_MAD_RESULT_FAILURE; 266 261 } 267 262
+47 -126
drivers/infiniband/hw/mthca/mthca_main.c
··· 165 165 static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim) 166 166 { 167 167 int err; 168 - u8 status; 169 168 170 169 mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8; 171 - err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status); 170 + err = mthca_QUERY_DEV_LIM(mdev, dev_lim); 172 171 if (err) { 173 - mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n"); 172 + mthca_err(mdev, "QUERY_DEV_LIM command returned %d" 173 + ", aborting.\n", err); 174 174 return err; 175 - } 176 - if (status) { 177 - mthca_err(mdev, "QUERY_DEV_LIM returned status 0x%02x, " 178 - "aborting.\n", status); 179 - return -EINVAL; 180 175 } 181 176 if (dev_lim->min_page_sz > PAGE_SIZE) { 182 177 mthca_err(mdev, "HCA minimum page size of %d bigger than " ··· 288 293 static int mthca_init_tavor(struct mthca_dev *mdev) 289 294 { 290 295 s64 size; 291 - u8 status; 292 296 int err; 293 297 struct mthca_dev_lim dev_lim; 294 298 struct mthca_profile profile; 295 299 struct mthca_init_hca_param init_hca; 296 300 297 - err = mthca_SYS_EN(mdev, &status); 301 + err = mthca_SYS_EN(mdev); 298 302 if (err) { 299 - mthca_err(mdev, "SYS_EN command failed, aborting.\n"); 303 + mthca_err(mdev, "SYS_EN command returned %d, aborting.\n", err); 300 304 return err; 301 305 } 302 - if (status) { 303 - mthca_err(mdev, "SYS_EN returned status 0x%02x, " 304 - "aborting.\n", status); 305 - return -EINVAL; 306 - } 307 306 308 - err = mthca_QUERY_FW(mdev, &status); 307 + err = mthca_QUERY_FW(mdev); 309 308 if (err) { 310 - mthca_err(mdev, "QUERY_FW command failed, aborting.\n"); 309 + mthca_err(mdev, "QUERY_FW command returned %d," 310 + " aborting.\n", err); 311 311 goto err_disable; 312 312 } 313 - if (status) { 314 - mthca_err(mdev, "QUERY_FW returned status 0x%02x, " 315 - "aborting.\n", status); 316 - err = -EINVAL; 317 - goto err_disable; 318 - } 319 - err = mthca_QUERY_DDR(mdev, &status); 313 + err = mthca_QUERY_DDR(mdev); 320 314 if (err) { 321 - mthca_err(mdev, "QUERY_DDR command failed, aborting.\n"); 322 - goto err_disable; 323 - } 324 - if (status) { 325 - mthca_err(mdev, "QUERY_DDR returned status 0x%02x, " 326 - "aborting.\n", status); 327 - err = -EINVAL; 315 + mthca_err(mdev, "QUERY_DDR command returned %d, aborting.\n", err); 328 316 goto err_disable; 329 317 } 330 318 331 319 err = mthca_dev_lim(mdev, &dev_lim); 332 320 if (err) { 333 - mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n"); 321 + mthca_err(mdev, "QUERY_DEV_LIM command returned %d, aborting.\n", err); 334 322 goto err_disable; 335 323 } 336 324 ··· 329 351 goto err_disable; 330 352 } 331 353 332 - err = mthca_INIT_HCA(mdev, &init_hca, &status); 354 + err = mthca_INIT_HCA(mdev, &init_hca); 333 355 if (err) { 334 - mthca_err(mdev, "INIT_HCA command failed, aborting.\n"); 335 - goto err_disable; 336 - } 337 - if (status) { 338 - mthca_err(mdev, "INIT_HCA returned status 0x%02x, " 339 - "aborting.\n", status); 340 - err = -EINVAL; 356 + mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err); 341 357 goto err_disable; 342 358 } 343 359 344 360 return 0; 345 361 346 362 err_disable: 347 - mthca_SYS_DIS(mdev, &status); 363 + mthca_SYS_DIS(mdev); 348 364 349 365 return err; 350 366 } 351 367 352 368 static int mthca_load_fw(struct mthca_dev *mdev) 353 369 { 354 - u8 status; 355 370 int err; 356 371 357 372 /* FIXME: use HCA-attached memory for FW if present */ ··· 357 386 return -ENOMEM; 358 387 } 359 388 360 - err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm, &status); 389 + err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm); 361 390 if (err) { 362 - mthca_err(mdev, "MAP_FA command failed, aborting.\n"); 391 + mthca_err(mdev, "MAP_FA command returned %d, aborting.\n", err); 363 392 goto err_free; 364 393 } 365 - if (status) { 366 - mthca_err(mdev, "MAP_FA returned status 0x%02x, aborting.\n", status); 367 - err = -EINVAL; 368 - goto err_free; 369 - } 370 - err = mthca_RUN_FW(mdev, &status); 394 + err = mthca_RUN_FW(mdev); 371 395 if (err) { 372 - mthca_err(mdev, "RUN_FW command failed, aborting.\n"); 373 - goto err_unmap_fa; 374 - } 375 - if (status) { 376 - mthca_err(mdev, "RUN_FW returned status 0x%02x, aborting.\n", status); 377 - err = -EINVAL; 396 + mthca_err(mdev, "RUN_FW command returned %d, aborting.\n", err); 378 397 goto err_unmap_fa; 379 398 } 380 399 381 400 return 0; 382 401 383 402 err_unmap_fa: 384 - mthca_UNMAP_FA(mdev, &status); 403 + mthca_UNMAP_FA(mdev); 385 404 386 405 err_free: 387 406 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); ··· 384 423 u64 icm_size) 385 424 { 386 425 u64 aux_pages; 387 - u8 status; 388 426 int err; 389 427 390 - err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages, &status); 428 + err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages); 391 429 if (err) { 392 - mthca_err(mdev, "SET_ICM_SIZE command failed, aborting.\n"); 430 + mthca_err(mdev, "SET_ICM_SIZE command returned %d, aborting.\n", err); 393 431 return err; 394 - } 395 - if (status) { 396 - mthca_err(mdev, "SET_ICM_SIZE returned status 0x%02x, " 397 - "aborting.\n", status); 398 - return -EINVAL; 399 432 } 400 433 401 434 mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n", ··· 403 448 return -ENOMEM; 404 449 } 405 450 406 - err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm, &status); 451 + err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm); 407 452 if (err) { 408 - mthca_err(mdev, "MAP_ICM_AUX command failed, aborting.\n"); 409 - goto err_free_aux; 410 - } 411 - if (status) { 412 - mthca_err(mdev, "MAP_ICM_AUX returned status 0x%02x, aborting.\n", status); 413 - err = -EINVAL; 453 + mthca_err(mdev, "MAP_ICM_AUX returned %d, aborting.\n", err); 414 454 goto err_free_aux; 415 455 } 416 456 ··· 546 596 mthca_unmap_eq_icm(mdev); 547 597 548 598 err_unmap_aux: 549 - mthca_UNMAP_ICM_AUX(mdev, &status); 599 + mthca_UNMAP_ICM_AUX(mdev); 550 600 551 601 err_free_aux: 552 602 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0); ··· 556 606 557 607 static void mthca_free_icms(struct mthca_dev *mdev) 558 608 { 559 - u8 status; 560 609 561 610 mthca_free_icm_table(mdev, mdev->mcg_table.table); 562 611 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) ··· 568 619 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); 569 620 mthca_unmap_eq_icm(mdev); 570 621 571 - mthca_UNMAP_ICM_AUX(mdev, &status); 622 + mthca_UNMAP_ICM_AUX(mdev); 572 623 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0); 573 624 } 574 625 ··· 578 629 struct mthca_profile profile; 579 630 struct mthca_init_hca_param init_hca; 580 631 s64 icm_size; 581 - u8 status; 582 632 int err; 583 633 584 - err = mthca_QUERY_FW(mdev, &status); 634 + err = mthca_QUERY_FW(mdev); 585 635 if (err) { 586 - mthca_err(mdev, "QUERY_FW command failed, aborting.\n"); 636 + mthca_err(mdev, "QUERY_FW command failed %d, aborting.\n", err); 587 637 return err; 588 - } 589 - if (status) { 590 - mthca_err(mdev, "QUERY_FW returned status 0x%02x, " 591 - "aborting.\n", status); 592 - return -EINVAL; 593 638 } 594 639 595 - err = mthca_ENABLE_LAM(mdev, &status); 596 - if (err) { 597 - mthca_err(mdev, "ENABLE_LAM command failed, aborting.\n"); 598 - return err; 599 - } 600 - if (status == MTHCA_CMD_STAT_LAM_NOT_PRE) { 640 + err = mthca_ENABLE_LAM(mdev); 641 + if (err == -EAGAIN) { 601 642 mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n"); 602 643 mdev->mthca_flags |= MTHCA_FLAG_NO_LAM; 603 - } else if (status) { 604 - mthca_err(mdev, "ENABLE_LAM returned status 0x%02x, " 605 - "aborting.\n", status); 606 - return -EINVAL; 644 + } else if (err) { 645 + mthca_err(mdev, "ENABLE_LAM returned %d, aborting.\n", err); 646 + return err; 607 647 } 608 648 609 649 err = mthca_load_fw(mdev); 610 650 if (err) { 611 - mthca_err(mdev, "Failed to start FW, aborting.\n"); 651 + mthca_err(mdev, "Loading FW returned %d, aborting.\n", err); 612 652 goto err_disable; 613 653 } 614 654 615 655 err = mthca_dev_lim(mdev, &dev_lim); 616 656 if (err) { 617 - mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n"); 657 + mthca_err(mdev, "QUERY_DEV_LIM returned %d, aborting.\n", err); 618 658 goto err_stop_fw; 619 659 } 620 660 ··· 623 685 if (err) 624 686 goto err_stop_fw; 625 687 626 - err = mthca_INIT_HCA(mdev, &init_hca, &status); 688 + err = mthca_INIT_HCA(mdev, &init_hca); 627 689 if (err) { 628 - mthca_err(mdev, "INIT_HCA command failed, aborting.\n"); 629 - goto err_free_icm; 630 - } 631 - if (status) { 632 - mthca_err(mdev, "INIT_HCA returned status 0x%02x, " 633 - "aborting.\n", status); 634 - err = -EINVAL; 690 + mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err); 635 691 goto err_free_icm; 636 692 } 637 693 ··· 635 703 mthca_free_icms(mdev); 636 704 637 705 err_stop_fw: 638 - mthca_UNMAP_FA(mdev, &status); 706 + mthca_UNMAP_FA(mdev); 639 707 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); 640 708 641 709 err_disable: 642 710 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) 643 - mthca_DISABLE_LAM(mdev, &status); 711 + mthca_DISABLE_LAM(mdev); 644 712 645 713 return err; 646 714 } 647 715 648 716 static void mthca_close_hca(struct mthca_dev *mdev) 649 717 { 650 - u8 status; 651 - 652 - mthca_CLOSE_HCA(mdev, 0, &status); 718 + mthca_CLOSE_HCA(mdev, 0); 653 719 654 720 if (mthca_is_memfree(mdev)) { 655 721 mthca_free_icms(mdev); 656 722 657 - mthca_UNMAP_FA(mdev, &status); 723 + mthca_UNMAP_FA(mdev); 658 724 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); 659 725 660 726 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) 661 - mthca_DISABLE_LAM(mdev, &status); 727 + mthca_DISABLE_LAM(mdev); 662 728 } else 663 - mthca_SYS_DIS(mdev, &status); 729 + mthca_SYS_DIS(mdev); 664 730 } 665 731 666 732 static int mthca_init_hca(struct mthca_dev *mdev) 667 733 { 668 - u8 status; 669 734 int err; 670 735 struct mthca_adapter adapter; 671 736 ··· 674 745 if (err) 675 746 return err; 676 747 677 - err = mthca_QUERY_ADAPTER(mdev, &adapter, &status); 748 + err = mthca_QUERY_ADAPTER(mdev, &adapter); 678 749 if (err) { 679 - mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n"); 680 - goto err_close; 681 - } 682 - if (status) { 683 - mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, " 684 - "aborting.\n", status); 685 - err = -EINVAL; 750 + mthca_err(mdev, "QUERY_ADAPTER command returned %d, aborting.\n", err); 686 751 goto err_close; 687 752 } 688 753 ··· 695 772 static int mthca_setup_hca(struct mthca_dev *dev) 696 773 { 697 774 int err; 698 - u8 status; 699 775 700 776 MTHCA_INIT_DOORBELL_LOCK(&dev->doorbell_lock); 701 777 ··· 755 833 goto err_eq_table_free; 756 834 } 757 835 758 - err = mthca_NOP(dev, &status); 759 - if (err || status) { 836 + err = mthca_NOP(dev); 837 + if (err) { 760 838 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { 761 839 mthca_warn(dev, "NOP command failed to generate interrupt " 762 840 "(IRQ %d).\n", ··· 1088 1166 static void __mthca_remove_one(struct pci_dev *pdev) 1089 1167 { 1090 1168 struct mthca_dev *mdev = pci_get_drvdata(pdev); 1091 - u8 status; 1092 1169 int p; 1093 1170 1094 1171 if (mdev) { ··· 1095 1174 mthca_unregister_device(mdev); 1096 1175 1097 1176 for (p = 1; p <= mdev->limits.num_ports; ++p) 1098 - mthca_CLOSE_IB(mdev, p, &status); 1177 + mthca_CLOSE_IB(mdev, p); 1099 1178 1100 1179 mthca_cleanup_mcg_table(mdev); 1101 1180 mthca_cleanup_av_table(mdev);
+32 -69
drivers/infiniband/hw/mthca/mthca_mcg.c
··· 68 68 struct mthca_mgm *mgm = mgm_mailbox->buf; 69 69 u8 *mgid; 70 70 int err; 71 - u8 status; 72 71 73 72 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 74 73 if (IS_ERR(mailbox)) ··· 76 77 77 78 memcpy(mgid, gid, 16); 78 79 79 - err = mthca_MGID_HASH(dev, mailbox, hash, &status); 80 - if (err) 81 - goto out; 82 - if (status) { 83 - mthca_err(dev, "MGID_HASH returned status %02x\n", status); 84 - err = -EINVAL; 80 + err = mthca_MGID_HASH(dev, mailbox, hash); 81 + if (err) { 82 + mthca_err(dev, "MGID_HASH failed (%d)\n", err); 85 83 goto out; 86 84 } 87 85 ··· 89 93 *prev = -1; 90 94 91 95 do { 92 - err = mthca_READ_MGM(dev, *index, mgm_mailbox, &status); 93 - if (err) 94 - goto out; 95 - if (status) { 96 - mthca_err(dev, "READ_MGM returned status %02x\n", status); 97 - err = -EINVAL; 96 + err = mthca_READ_MGM(dev, *index, mgm_mailbox); 97 + if (err) { 98 + mthca_err(dev, "READ_MGM failed (%d)\n", err); 98 99 goto out; 99 100 } 100 101 ··· 127 134 int link = 0; 128 135 int i; 129 136 int err; 130 - u8 status; 131 137 132 138 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 133 139 if (IS_ERR(mailbox)) ··· 152 160 goto out; 153 161 } 154 162 155 - err = mthca_READ_MGM(dev, index, mailbox, &status); 156 - if (err) 157 - goto out; 158 - if (status) { 159 - mthca_err(dev, "READ_MGM returned status %02x\n", status); 160 - err = -EINVAL; 163 + err = mthca_READ_MGM(dev, index, mailbox); 164 + if (err) { 165 + mthca_err(dev, "READ_MGM failed (%d)\n", err); 161 166 goto out; 162 167 } 163 168 memset(mgm, 0, sizeof *mgm); ··· 178 189 goto out; 179 190 } 180 191 181 - err = mthca_WRITE_MGM(dev, index, mailbox, &status); 182 - if (err) 183 - goto out; 184 - if (status) { 185 - mthca_err(dev, "WRITE_MGM returned status %02x\n", status); 192 + err = mthca_WRITE_MGM(dev, index, mailbox); 193 + if (err) { 194 + mthca_err(dev, "WRITE_MGM failed %d\n", err); 186 195 err = -EINVAL; 187 196 goto out; 188 197 } ··· 188 201 if (!link) 189 202 goto out; 190 203 191 - err = mthca_READ_MGM(dev, prev, mailbox, &status); 192 - if (err) 193 - goto out; 194 - if (status) { 195 - mthca_err(dev, "READ_MGM returned status %02x\n", status); 196 - err = -EINVAL; 204 + err = mthca_READ_MGM(dev, prev, mailbox); 205 + if (err) { 206 + mthca_err(dev, "READ_MGM failed %d\n", err); 197 207 goto out; 198 208 } 199 209 200 210 mgm->next_gid_index = cpu_to_be32(index << 6); 201 211 202 - err = mthca_WRITE_MGM(dev, prev, mailbox, &status); 212 + err = mthca_WRITE_MGM(dev, prev, mailbox); 203 213 if (err) 204 - goto out; 205 - if (status) { 206 - mthca_err(dev, "WRITE_MGM returned status %02x\n", status); 207 - err = -EINVAL; 208 - } 214 + mthca_err(dev, "WRITE_MGM returned %d\n", err); 209 215 210 216 out: 211 217 if (err && link && index != -1) { ··· 220 240 int prev, index; 221 241 int i, loc; 222 242 int err; 223 - u8 status; 224 243 225 244 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 226 245 if (IS_ERR(mailbox)) ··· 254 275 mgm->qp[loc] = mgm->qp[i - 1]; 255 276 mgm->qp[i - 1] = 0; 256 277 257 - err = mthca_WRITE_MGM(dev, index, mailbox, &status); 258 - if (err) 259 - goto out; 260 - if (status) { 261 - mthca_err(dev, "WRITE_MGM returned status %02x\n", status); 262 - err = -EINVAL; 278 + err = mthca_WRITE_MGM(dev, index, mailbox); 279 + if (err) { 280 + mthca_err(dev, "WRITE_MGM returned %d\n", err); 263 281 goto out; 264 282 } 265 283 ··· 268 292 int amgm_index_to_free = be32_to_cpu(mgm->next_gid_index) >> 6; 269 293 if (amgm_index_to_free) { 270 294 err = mthca_READ_MGM(dev, amgm_index_to_free, 271 - mailbox, &status); 272 - if (err) 273 - goto out; 274 - if (status) { 275 - mthca_err(dev, "READ_MGM returned status %02x\n", 276 - status); 277 - err = -EINVAL; 295 + mailbox); 296 + if (err) { 297 + mthca_err(dev, "READ_MGM returned %d\n", err); 278 298 goto out; 279 299 } 280 300 } else 281 301 memset(mgm->gid, 0, 16); 282 302 283 - err = mthca_WRITE_MGM(dev, index, mailbox, &status); 284 - if (err) 285 - goto out; 286 - if (status) { 287 - mthca_err(dev, "WRITE_MGM returned status %02x\n", status); 288 - err = -EINVAL; 303 + err = mthca_WRITE_MGM(dev, index, mailbox); 304 + if (err) { 305 + mthca_err(dev, "WRITE_MGM returned %d\n", err); 289 306 goto out; 290 307 } 291 308 if (amgm_index_to_free) { ··· 288 319 } else { 289 320 /* Remove entry from AMGM */ 290 321 int curr_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; 291 - err = mthca_READ_MGM(dev, prev, mailbox, &status); 292 - if (err) 293 - goto out; 294 - if (status) { 295 - mthca_err(dev, "READ_MGM returned status %02x\n", status); 296 - err = -EINVAL; 322 + err = mthca_READ_MGM(dev, prev, mailbox); 323 + if (err) { 324 + mthca_err(dev, "READ_MGM returned %d\n", err); 297 325 goto out; 298 326 } 299 327 300 328 mgm->next_gid_index = cpu_to_be32(curr_next_index << 6); 301 329 302 - err = mthca_WRITE_MGM(dev, prev, mailbox, &status); 303 - if (err) 304 - goto out; 305 - if (status) { 306 - mthca_err(dev, "WRITE_MGM returned status %02x\n", status); 307 - err = -EINVAL; 330 + err = mthca_WRITE_MGM(dev, prev, mailbox); 331 + if (err) { 332 + mthca_err(dev, "WRITE_MGM returned %d\n", err); 308 333 goto out; 309 334 } 310 335 BUG_ON(index < dev->limits.num_mgms);
+14 -29
drivers/infiniband/hw/mthca/mthca_memfree.c
··· 223 223 { 224 224 int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; 225 225 int ret = 0; 226 - u8 status; 227 226 228 227 mutex_lock(&table->mutex); 229 228 ··· 239 240 goto out; 240 241 } 241 242 242 - if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE, 243 - &status) || status) { 243 + if (mthca_MAP_ICM(dev, table->icm[i], 244 + table->virt + i * MTHCA_TABLE_CHUNK_SIZE)) { 244 245 mthca_free_icm(dev, table->icm[i], table->coherent); 245 246 table->icm[i] = NULL; 246 247 ret = -ENOMEM; ··· 257 258 void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) 258 259 { 259 260 int i; 260 - u8 status; 261 261 262 262 if (!mthca_is_memfree(dev)) 263 263 return; ··· 267 269 268 270 if (--table->icm[i]->refcount == 0) { 269 271 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, 270 - MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE, 271 - &status); 272 + MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE); 272 273 mthca_free_icm(dev, table->icm[i], table->coherent); 273 274 table->icm[i] = NULL; 274 275 } ··· 363 366 int num_icm; 364 367 unsigned chunk_size; 365 368 int i; 366 - u8 status; 367 369 368 370 obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size; 369 371 num_icm = DIV_ROUND_UP(nobj, obj_per_chunk); ··· 392 396 __GFP_NOWARN, use_coherent); 393 397 if (!table->icm[i]) 394 398 goto err; 395 - if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE, 396 - &status) || status) { 399 + if (mthca_MAP_ICM(dev, table->icm[i], 400 + virt + i * MTHCA_TABLE_CHUNK_SIZE)) { 397 401 mthca_free_icm(dev, table->icm[i], table->coherent); 398 402 table->icm[i] = NULL; 399 403 goto err; ··· 412 416 for (i = 0; i < num_icm; ++i) 413 417 if (table->icm[i]) { 414 418 mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE, 415 - MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE, 416 - &status); 419 + MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE); 417 420 mthca_free_icm(dev, table->icm[i], table->coherent); 418 421 } 419 422 ··· 424 429 void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table) 425 430 { 426 431 int i; 427 - u8 status; 428 432 429 433 for (i = 0; i < table->num_icm; ++i) 430 434 if (table->icm[i]) { 431 - mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, 432 - MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE, 433 - &status); 435 + mthca_UNMAP_ICM(dev, 436 + table->virt + i * MTHCA_TABLE_CHUNK_SIZE, 437 + MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE); 434 438 mthca_free_icm(dev, table->icm[i], table->coherent); 435 439 } 436 440 ··· 448 454 { 449 455 struct page *pages[1]; 450 456 int ret = 0; 451 - u8 status; 452 457 int i; 453 458 454 459 if (!mthca_is_memfree(dev)) ··· 487 494 } 488 495 489 496 ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem), 490 - mthca_uarc_virt(dev, uar, i), &status); 491 - if (!ret && status) 492 - ret = -EINVAL; 497 + mthca_uarc_virt(dev, uar, i)); 493 498 if (ret) { 494 499 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 495 500 put_page(sg_page(&db_tab->page[i].mem)); ··· 548 557 struct mthca_user_db_table *db_tab) 549 558 { 550 559 int i; 551 - u8 status; 552 560 553 561 if (!mthca_is_memfree(dev)) 554 562 return; 555 563 556 564 for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) { 557 565 if (db_tab->page[i].uvirt) { 558 - mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status); 566 + mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1); 559 567 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 560 568 put_page(sg_page(&db_tab->page[i].mem)); 561 569 } ··· 571 581 int i, j; 572 582 struct mthca_db_page *page; 573 583 int ret = 0; 574 - u8 status; 575 584 576 585 mutex_lock(&dev->db_tab->mutex); 577 586 ··· 633 644 memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE); 634 645 635 646 ret = mthca_MAP_ICM_page(dev, page->mapping, 636 - mthca_uarc_virt(dev, &dev->driver_uar, i), &status); 637 - if (!ret && status) 638 - ret = -EINVAL; 647 + mthca_uarc_virt(dev, &dev->driver_uar, i)); 639 648 if (ret) { 640 649 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, 641 650 page->db_rec, page->mapping); ··· 665 678 { 666 679 int i, j; 667 680 struct mthca_db_page *page; 668 - u8 status; 669 681 670 682 i = db_index / MTHCA_DB_REC_PER_PAGE; 671 683 j = db_index % MTHCA_DB_REC_PER_PAGE; ··· 680 694 681 695 if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) && 682 696 i >= dev->db_tab->max_group1 - 1) { 683 - mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status); 697 + mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1); 684 698 685 699 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, 686 700 page->db_rec, page->mapping); ··· 731 745 void mthca_cleanup_db_tab(struct mthca_dev *dev) 732 746 { 733 747 int i; 734 - u8 status; 735 748 736 749 if (!mthca_is_memfree(dev)) 737 750 return; ··· 748 763 if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE)) 749 764 mthca_warn(dev, "Kernel UARC page %d not empty\n", i); 750 765 751 - mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status); 766 + mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1); 752 767 753 768 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, 754 769 dev->db_tab->page[i].db_rec,
+4 -31
drivers/infiniband/hw/mthca/mthca_mr.c
··· 257 257 struct mthca_mailbox *mailbox; 258 258 __be64 *mtt_entry; 259 259 int err = 0; 260 - u8 status; 261 260 int i; 262 261 263 262 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); ··· 280 281 if (i & 1) 281 282 mtt_entry[i + 2] = 0; 282 283 283 - err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1, &status); 284 + err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1); 284 285 if (err) { 285 286 mthca_warn(dev, "WRITE_MTT failed (%d)\n", err); 286 - goto out; 287 - } 288 - if (status) { 289 - mthca_warn(dev, "WRITE_MTT returned status 0x%02x\n", 290 - status); 291 - err = -EINVAL; 292 287 goto out; 293 288 } 294 289 ··· 434 441 u32 key; 435 442 int i; 436 443 int err; 437 - u8 status; 438 444 439 445 WARN_ON(buffer_size_shift >= 32); 440 446 ··· 489 497 } 490 498 491 499 err = mthca_SW2HW_MPT(dev, mailbox, 492 - key & (dev->limits.num_mpts - 1), 493 - &status); 500 + key & (dev->limits.num_mpts - 1)); 494 501 if (err) { 495 502 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err); 496 - goto err_out_mailbox; 497 - } else if (status) { 498 - mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n", 499 - status); 500 - err = -EINVAL; 501 503 goto err_out_mailbox; 502 504 } 503 505 ··· 553 567 void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr) 554 568 { 555 569 int err; 556 - u8 status; 557 570 558 571 err = mthca_HW2SW_MPT(dev, NULL, 559 572 key_to_hw_index(dev, mr->ibmr.lkey) & 560 - (dev->limits.num_mpts - 1), 561 - &status); 573 + (dev->limits.num_mpts - 1)); 562 574 if (err) 563 575 mthca_warn(dev, "HW2SW_MPT failed (%d)\n", err); 564 - else if (status) 565 - mthca_warn(dev, "HW2SW_MPT returned status 0x%02x\n", 566 - status); 567 576 568 577 mthca_free_region(dev, mr->ibmr.lkey); 569 578 mthca_free_mtt(dev, mr->mtt); ··· 571 590 struct mthca_mailbox *mailbox; 572 591 u64 mtt_seg; 573 592 u32 key, idx; 574 - u8 status; 575 593 int list_len = mr->attr.max_pages; 576 594 int err = -ENOMEM; 577 595 int i; ··· 652 672 } 653 673 654 674 err = mthca_SW2HW_MPT(dev, mailbox, 655 - key & (dev->limits.num_mpts - 1), 656 - &status); 675 + key & (dev->limits.num_mpts - 1)); 657 676 if (err) { 658 677 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err); 659 - goto err_out_mailbox_free; 660 - } 661 - if (status) { 662 - mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n", 663 - status); 664 - err = -EINVAL; 665 678 goto err_out_mailbox_free; 666 679 } 667 680
+11 -66
drivers/infiniband/hw/mthca/mthca_provider.c
··· 63 63 int err = -ENOMEM; 64 64 struct mthca_dev *mdev = to_mdev(ibdev); 65 65 66 - u8 status; 67 - 68 66 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 69 67 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 70 68 if (!in_mad || !out_mad) ··· 76 78 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 77 79 78 80 err = mthca_MAD_IFC(mdev, 1, 1, 79 - 1, NULL, NULL, in_mad, out_mad, 80 - &status); 81 + 1, NULL, NULL, in_mad, out_mad); 81 82 if (err) 82 83 goto out; 83 - if (status) { 84 - err = -EINVAL; 85 - goto out; 86 - } 87 84 88 85 props->device_cap_flags = mdev->device_cap_flags; 89 86 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & ··· 134 141 struct ib_smp *in_mad = NULL; 135 142 struct ib_smp *out_mad = NULL; 136 143 int err = -ENOMEM; 137 - u8 status; 138 144 139 145 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 140 146 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); ··· 147 155 in_mad->attr_mod = cpu_to_be32(port); 148 156 149 157 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 150 - port, NULL, NULL, in_mad, out_mad, 151 - &status); 158 + port, NULL, NULL, in_mad, out_mad); 152 159 if (err) 153 160 goto out; 154 - if (status) { 155 - err = -EINVAL; 156 - goto out; 157 - } 158 161 159 162 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); 160 163 props->lmc = out_mad->data[34] & 0x7; ··· 201 214 struct mthca_set_ib_param set_ib; 202 215 struct ib_port_attr attr; 203 216 int err; 204 - u8 status; 205 217 206 218 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) 207 219 return -ERESTARTSYS; ··· 215 229 set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & 216 230 ~props->clr_port_cap_mask; 217 231 218 - err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status); 232 + err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port); 219 233 if (err) 220 234 goto out; 221 - if (status) { 222 - err = -EINVAL; 223 - goto out; 224 - } 225 - 226 235 out: 227 236 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); 228 237 return err; ··· 229 248 struct ib_smp *in_mad = NULL; 230 249 struct ib_smp *out_mad = NULL; 231 250 int err = -ENOMEM; 232 - u8 status; 233 251 234 252 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 235 253 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); ··· 240 260 in_mad->attr_mod = cpu_to_be32(index / 32); 241 261 242 262 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 243 - port, NULL, NULL, in_mad, out_mad, 244 - &status); 263 + port, NULL, NULL, in_mad, out_mad); 245 264 if (err) 246 265 goto out; 247 - if (status) { 248 - err = -EINVAL; 249 - goto out; 250 - } 251 266 252 267 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); 253 268 ··· 258 283 struct ib_smp *in_mad = NULL; 259 284 struct ib_smp *out_mad = NULL; 260 285 int err = -ENOMEM; 261 - u8 status; 262 286 263 287 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 264 288 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); ··· 269 295 in_mad->attr_mod = cpu_to_be32(port); 270 296 271 297 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 272 - port, NULL, NULL, in_mad, out_mad, 273 - &status); 298 + port, NULL, NULL, in_mad, out_mad); 274 299 if (err) 275 300 goto out; 276 - if (status) { 277 - err = -EINVAL; 278 - goto out; 279 - } 280 301 281 302 memcpy(gid->raw, out_mad->data + 8, 8); 282 303 ··· 280 311 in_mad->attr_mod = cpu_to_be32(index / 8); 281 312 282 313 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 283 - port, NULL, NULL, in_mad, out_mad, 284 - &status); 314 + port, NULL, NULL, in_mad, out_mad); 285 315 if (err) 286 316 goto out; 287 - if (status) { 288 - err = -EINVAL; 289 - goto out; 290 - } 291 317 292 318 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); 293 319 ··· 764 800 struct mthca_cq *cq = to_mcq(ibcq); 765 801 struct mthca_resize_cq ucmd; 766 802 u32 lkey; 767 - u8 status; 768 803 int ret; 769 804 770 805 if (entries < 1 || entries > dev->limits.max_cqes) ··· 790 827 lkey = ucmd.lkey; 791 828 } 792 829 793 - ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries), &status); 794 - if (status) 795 - ret = -EINVAL; 830 + ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries)); 796 831 797 832 if (ret) { 798 833 if (cq->resize_buf) { ··· 1122 1161 { 1123 1162 struct ib_fmr *fmr; 1124 1163 int err; 1125 - u8 status; 1126 1164 struct mthca_dev *mdev = NULL; 1127 1165 1128 1166 list_for_each_entry(fmr, fmr_list, list) { ··· 1142 1182 list_for_each_entry(fmr, fmr_list, list) 1143 1183 mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr)); 1144 1184 1145 - err = mthca_SYNC_TPT(mdev, &status); 1146 - if (err) 1147 - return err; 1148 - if (status) 1149 - return -EINVAL; 1150 - return 0; 1185 + err = mthca_SYNC_TPT(mdev); 1186 + return err; 1151 1187 } 1152 1188 1153 1189 static ssize_t show_rev(struct device *device, struct device_attribute *attr, ··· 1209 1253 struct ib_smp *in_mad = NULL; 1210 1254 struct ib_smp *out_mad = NULL; 1211 1255 int err = -ENOMEM; 1212 - u8 status; 1213 1256 1214 1257 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 1215 1258 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); ··· 1219 1264 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; 1220 1265 1221 1266 err = mthca_MAD_IFC(dev, 1, 1, 1222 - 1, NULL, NULL, in_mad, out_mad, 1223 - &status); 1267 + 1, NULL, NULL, in_mad, out_mad); 1224 1268 if (err) 1225 1269 goto out; 1226 - if (status) { 1227 - err = -EINVAL; 1228 - goto out; 1229 - } 1230 1270 1231 1271 memcpy(dev->ib_dev.node_desc, out_mad->data, 64); 1232 1272 1233 1273 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 1234 1274 1235 1275 err = mthca_MAD_IFC(dev, 1, 1, 1236 - 1, NULL, NULL, in_mad, out_mad, 1237 - &status); 1276 + 1, NULL, NULL, in_mad, out_mad); 1238 1277 if (err) 1239 1278 goto out; 1240 - if (status) { 1241 - err = -EINVAL; 1242 - goto out; 1243 - } 1244 1279 1245 1280 if (mthca_is_memfree(dev)) 1246 1281 dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
+15 -34
drivers/infiniband/hw/mthca/mthca_qp.c
··· 308 308 static void init_port(struct mthca_dev *dev, int port) 309 309 { 310 310 int err; 311 - u8 status; 312 311 struct mthca_init_ib_param param; 313 312 314 313 memset(&param, 0, sizeof param); ··· 318 319 param.gid_cap = dev->limits.gid_table_len; 319 320 param.pkey_cap = dev->limits.pkey_table_len; 320 321 321 - err = mthca_INIT_IB(dev, &param, port, &status); 322 + err = mthca_INIT_IB(dev, &param, port); 322 323 if (err) 323 324 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err); 324 - if (status) 325 - mthca_warn(dev, "INIT_IB returned status %02x.\n", status); 326 325 } 327 326 328 327 static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr, ··· 430 433 struct mthca_qp_param *qp_param; 431 434 struct mthca_qp_context *context; 432 435 int mthca_state; 433 - u8 status; 434 436 435 437 mutex_lock(&qp->mutex); 436 438 ··· 444 448 goto out; 445 449 } 446 450 447 - err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status); 448 - if (err) 449 - goto out_mailbox; 450 - if (status) { 451 - mthca_warn(dev, "QUERY_QP returned status %02x\n", status); 452 - err = -EINVAL; 451 + err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox); 452 + if (err) { 453 + mthca_warn(dev, "QUERY_QP failed (%d)\n", err); 453 454 goto out_mailbox; 454 455 } 455 456 ··· 548 555 struct mthca_qp_param *qp_param; 549 556 struct mthca_qp_context *qp_context; 550 557 u32 sqd_event = 0; 551 - u8 status; 552 558 int err = -EINVAL; 553 559 554 560 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); ··· 773 781 sqd_event = 1 << 31; 774 782 775 783 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, 776 - mailbox, sqd_event, &status); 777 - if (err) 778 - goto out_mailbox; 779 - if (status) { 780 - mthca_warn(dev, "modify QP %d->%d returned status %02x.\n", 781 - cur_state, new_state, status); 782 - err = -EINVAL; 784 + mailbox, sqd_event); 785 + if (err) { 786 + mthca_warn(dev, "modify QP %d->%d returned %d.\n", 787 + cur_state, new_state, err); 783 788 goto out_mailbox; 784 789 } 785 790 ··· 806 817 cur_state != IB_QPS_ERR && 807 818 (new_state == IB_QPS_RESET || 808 819 new_state == IB_QPS_ERR)) 809 - mthca_CLOSE_IB(dev, qp->port, &status); 820 + mthca_CLOSE_IB(dev, qp->port); 810 821 } 811 822 812 823 /* ··· 1418 1429 void mthca_free_qp(struct mthca_dev *dev, 1419 1430 struct mthca_qp *qp) 1420 1431 { 1421 - u8 status; 1422 1432 struct mthca_cq *send_cq; 1423 1433 struct mthca_cq *recv_cq; 1424 1434 ··· 1442 1454 1443 1455 if (qp->state != IB_QPS_RESET) 1444 1456 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, 1445 - NULL, 0, &status); 1457 + NULL, 0); 1446 1458 1447 1459 /* 1448 1460 * If this is a userspace QP, the buffers, MR, CQs and so on ··· 2251 2263 int mthca_init_qp_table(struct mthca_dev *dev) 2252 2264 { 2253 2265 int err; 2254 - u8 status; 2255 2266 int i; 2256 2267 2257 2268 spin_lock_init(&dev->qp_table.lock); ··· 2277 2290 2278 2291 for (i = 0; i < 2; ++i) { 2279 2292 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI, 2280 - dev->qp_table.sqp_start + i * 2, 2281 - &status); 2282 - if (err) 2283 - goto err_out; 2284 - if (status) { 2293 + dev->qp_table.sqp_start + i * 2); 2294 + if (err) { 2285 2295 mthca_warn(dev, "CONF_SPECIAL_QP returned " 2286 - "status %02x, aborting.\n", 2287 - status); 2288 - err = -EINVAL; 2296 + "%d, aborting.\n", err); 2289 2297 goto err_out; 2290 2298 } 2291 2299 } ··· 2288 2306 2289 2307 err_out: 2290 2308 for (i = 0; i < 2; ++i) 2291 - mthca_CONF_SPECIAL_QP(dev, i, 0, &status); 2309 + mthca_CONF_SPECIAL_QP(dev, i, 0); 2292 2310 2293 2311 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); 2294 2312 mthca_alloc_cleanup(&dev->qp_table.alloc); ··· 2299 2317 void mthca_cleanup_qp_table(struct mthca_dev *dev) 2300 2318 { 2301 2319 int i; 2302 - u8 status; 2303 2320 2304 2321 for (i = 0; i < 2; ++i) 2305 - mthca_CONF_SPECIAL_QP(dev, i, 0, &status); 2322 + mthca_CONF_SPECIAL_QP(dev, i, 0); 2306 2323 2307 2324 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); 2308 2325 mthca_alloc_cleanup(&dev->qp_table.alloc);
+7 -26
drivers/infiniband/hw/mthca/mthca_srq.c
··· 200 200 struct ib_srq_attr *attr, struct mthca_srq *srq) 201 201 { 202 202 struct mthca_mailbox *mailbox; 203 - u8 status; 204 203 int ds; 205 204 int err; 206 205 ··· 265 266 else 266 267 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf); 267 268 268 - err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status); 269 + err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn); 269 270 270 271 if (err) { 271 272 mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err); 272 - goto err_out_free_buf; 273 - } 274 - if (status) { 275 - mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n", 276 - status); 277 - err = -EINVAL; 278 273 goto err_out_free_buf; 279 274 } 280 275 ··· 292 299 return 0; 293 300 294 301 err_out_free_srq: 295 - err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); 302 + err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn); 296 303 if (err) 297 304 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); 298 - else if (status) 299 - mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); 300 305 301 306 err_out_free_buf: 302 307 if (!pd->ibpd.uobject) ··· 331 340 { 332 341 struct mthca_mailbox *mailbox; 333 342 int err; 334 - u8 status; 335 343 336 344 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 337 345 if (IS_ERR(mailbox)) { ··· 338 348 return; 339 349 } 340 350 341 - err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); 351 + err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn); 342 352 if (err) 343 353 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); 344 - else if (status) 345 - mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status); 346 354 347 355 spin_lock_irq(&dev->srq_table.lock); 348 356 mthca_array_clear(&dev->srq_table.srq, ··· 366 378 { 367 379 struct mthca_dev *dev = to_mdev(ibsrq->device); 368 380 struct mthca_srq *srq = to_msrq(ibsrq); 369 - int ret; 370 - u8 status; 381 + int ret = 0; 371 382 372 383 /* We don't support resizing SRQs (yet?) */ 373 384 if (attr_mask & IB_SRQ_MAX_WR) ··· 378 391 return -EINVAL; 379 392 380 393 mutex_lock(&srq->mutex); 381 - ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status); 394 + ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit); 382 395 mutex_unlock(&srq->mutex); 383 - 384 - if (ret) 385 - return ret; 386 - if (status) 387 - return -EINVAL; 388 396 } 389 397 390 - return 0; 398 + return ret; 391 399 } 392 400 393 401 int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) ··· 392 410 struct mthca_mailbox *mailbox; 393 411 struct mthca_arbel_srq_context *arbel_ctx; 394 412 struct mthca_tavor_srq_context *tavor_ctx; 395 - u8 status; 396 413 int err; 397 414 398 415 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 399 416 if (IS_ERR(mailbox)) 400 417 return PTR_ERR(mailbox); 401 418 402 - err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status); 419 + err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox); 403 420 if (err) 404 421 goto out; 405 422