[PATCH] IB/mthca: Align FW command mailboxes to 4K

Future versions of Mellanox HCA firmware will require command mailboxes to be
aligned to 4K. Support this by using a pci_pool to allocate all mailboxes.
This has the added benefit of shrinking the source and text of mthca.

Signed-off-by: Roland Dreier <roland@topspin.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Roland Dreier and committed by
Linus Torvalds
ed878458 80fd8238

+329 -422
+206 -304
drivers/infiniband/hw/mthca/mthca_cmd.c
··· 444 444 return -ENOMEM; 445 445 } 446 446 447 + dev->cmd.pool = pci_pool_create("mthca_cmd", dev->pdev, 448 + MTHCA_MAILBOX_SIZE, 449 + MTHCA_MAILBOX_SIZE, 0); 450 + if (!dev->cmd.pool) { 451 + iounmap(dev->hcr); 452 + return -ENOMEM; 453 + } 454 + 447 455 return 0; 448 456 } 449 457 450 458 void mthca_cmd_cleanup(struct mthca_dev *dev) 451 459 { 460 + pci_pool_destroy(dev->cmd.pool); 452 461 iounmap(dev->hcr); 453 462 } 454 463 ··· 519 510 up(&dev->cmd.poll_sem); 520 511 } 521 512 513 + struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev, 514 + unsigned int gfp_mask) 515 + { 516 + struct mthca_mailbox *mailbox; 517 + 518 + mailbox = kmalloc(sizeof *mailbox, gfp_mask); 519 + if (!mailbox) 520 + return ERR_PTR(-ENOMEM); 521 + 522 + mailbox->buf = pci_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma); 523 + if (!mailbox->buf) { 524 + kfree(mailbox); 525 + return ERR_PTR(-ENOMEM); 526 + } 527 + 528 + return mailbox; 529 + } 530 + 531 + void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox) 532 + { 533 + if (!mailbox) 534 + return; 535 + 536 + pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); 537 + kfree(mailbox); 538 + } 539 + 522 540 int mthca_SYS_EN(struct mthca_dev *dev, u8 *status) 523 541 { 524 542 u64 out; ··· 570 534 static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, 571 535 u64 virt, u8 *status) 572 536 { 573 - u32 *inbox; 574 - dma_addr_t indma; 537 + struct mthca_mailbox *mailbox; 575 538 struct mthca_icm_iter iter; 539 + __be64 *pages; 576 540 int lg; 577 541 int nent = 0; 578 542 int i; 579 543 int err = 0; 580 544 int ts = 0, tc = 0; 581 545 582 - inbox = pci_alloc_consistent(dev->pdev, PAGE_SIZE, &indma); 583 - if (!inbox) 584 - return -ENOMEM; 585 - 586 - memset(inbox, 0, PAGE_SIZE); 546 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 547 + if (IS_ERR(mailbox)) 548 + return PTR_ERR(mailbox); 549 + memset(mailbox->buf, 0, MTHCA_MAILBOX_SIZE); 550 + pages = mailbox->buf; 587 551 588 552 for (mthca_icm_first(icm, &iter); 589 553 !mthca_icm_last(&iter); ··· 603 567 } 604 568 for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i, ++nent) { 605 569 if (virt != -1) { 606 - *((__be64 *) (inbox + nent * 4)) = 607 - cpu_to_be64(virt); 570 + pages[nent * 2] = cpu_to_be64(virt); 608 571 virt += 1 << lg; 609 572 } 610 573 611 - *((__be64 *) (inbox + nent * 4 + 2)) = 612 - cpu_to_be64((mthca_icm_addr(&iter) + 613 - (i << lg)) | (lg - 12)); 574 + pages[nent * 2 + 1] = cpu_to_be64((mthca_icm_addr(&iter) + 575 + (i << lg)) | (lg - 12)); 614 576 ts += 1 << (lg - 10); 615 577 ++tc; 616 578 617 - if (nent == PAGE_SIZE / 16) { 618 - err = mthca_cmd(dev, indma, nent, 0, op, 579 + if (nent == MTHCA_MAILBOX_SIZE / 16) { 580 + err = mthca_cmd(dev, mailbox->dma, nent, 0, op, 619 581 CMD_TIME_CLASS_B, status); 620 582 if (err || *status) 621 583 goto out; ··· 623 589 } 624 590 625 591 if (nent) 626 - err = mthca_cmd(dev, indma, nent, 0, op, 592 + err = mthca_cmd(dev, mailbox->dma, nent, 0, op, 627 593 CMD_TIME_CLASS_B, status); 628 594 629 595 switch (op) { ··· 640 606 } 641 607 642 608 out: 643 - pci_free_consistent(dev->pdev, PAGE_SIZE, inbox, indma); 609 + mthca_free_mailbox(dev, mailbox); 644 610 return err; 645 611 } 646 612 ··· 661 627 662 628 int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status) 663 629 { 630 + struct mthca_mailbox *mailbox; 664 631 u32 *outbox; 665 - dma_addr_t outdma; 666 632 int err = 0; 667 633 u8 lg; 668 634 ··· 680 646 #define QUERY_FW_EQ_ARM_BASE_OFFSET 0x40 681 647 #define QUERY_FW_EQ_SET_CI_BASE_OFFSET 0x48 682 648 683 - outbox = pci_alloc_consistent(dev->pdev, QUERY_FW_OUT_SIZE, &outdma); 684 - if (!outbox) { 685 - return -ENOMEM; 686 - } 649 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 650 + if (IS_ERR(mailbox)) 651 + return PTR_ERR(mailbox); 652 + outbox = mailbox->buf; 687 653 688 - err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_FW, 654 + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW, 689 655 CMD_TIME_CLASS_A, status); 690 656 691 657 if (err) ··· 736 702 } 737 703 738 704 out: 739 - pci_free_consistent(dev->pdev, QUERY_FW_OUT_SIZE, outbox, outdma); 705 + mthca_free_mailbox(dev, mailbox); 740 706 return err; 741 707 } 742 708 743 709 int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status) 744 710 { 711 + struct mthca_mailbox *mailbox; 745 712 u8 info; 746 713 u32 *outbox; 747 - dma_addr_t outdma; 748 714 int err = 0; 749 715 750 716 #define ENABLE_LAM_OUT_SIZE 0x100 ··· 755 721 #define ENABLE_LAM_INFO_HIDDEN_FLAG (1 << 4) 756 722 #define ENABLE_LAM_INFO_ECC_MASK 0x3 757 723 758 - outbox = pci_alloc_consistent(dev->pdev, ENABLE_LAM_OUT_SIZE, &outdma); 759 - if (!outbox) 760 - return -ENOMEM; 724 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 725 + if (IS_ERR(mailbox)) 726 + return PTR_ERR(mailbox); 727 + outbox = mailbox->buf; 761 728 762 - err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_ENABLE_LAM, 729 + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM, 763 730 CMD_TIME_CLASS_C, status); 764 731 765 732 if (err) ··· 789 754 (unsigned long long) dev->ddr_end); 790 755 791 756 out: 792 - pci_free_consistent(dev->pdev, ENABLE_LAM_OUT_SIZE, outbox, outdma); 757 + mthca_free_mailbox(dev, mailbox); 793 758 return err; 794 759 } 795 760 ··· 800 765 801 766 int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status) 802 767 { 768 + struct mthca_mailbox *mailbox; 803 769 u8 info; 804 770 u32 *outbox; 805 - dma_addr_t outdma; 806 771 int err = 0; 807 772 808 773 #define QUERY_DDR_OUT_SIZE 0x100 ··· 813 778 #define QUERY_DDR_INFO_HIDDEN_FLAG (1 << 4) 814 779 #define QUERY_DDR_INFO_ECC_MASK 0x3 815 780 816 - outbox = pci_alloc_consistent(dev->pdev, QUERY_DDR_OUT_SIZE, &outdma); 817 - if (!outbox) 818 - return -ENOMEM; 781 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 782 + if (IS_ERR(mailbox)) 783 + return PTR_ERR(mailbox); 784 + outbox = mailbox->buf; 819 785 820 - err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_DDR, 786 + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR, 821 787 CMD_TIME_CLASS_A, status); 822 788 823 789 if (err) ··· 844 808 (unsigned long long) dev->ddr_end); 845 809 846 810 out: 847 - pci_free_consistent(dev->pdev, QUERY_DDR_OUT_SIZE, outbox, outdma); 811 + mthca_free_mailbox(dev, mailbox); 848 812 return err; 849 813 } 850 814 851 815 int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, 852 816 struct mthca_dev_lim *dev_lim, u8 *status) 853 817 { 818 + struct mthca_mailbox *mailbox; 854 819 u32 *outbox; 855 - dma_addr_t outdma; 856 820 u8 field; 857 821 u16 size; 858 822 int err; ··· 917 881 #define QUERY_DEV_LIM_LAMR_OFFSET 0x9f 918 882 #define QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET 0xa0 919 883 920 - outbox = pci_alloc_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, &outdma); 921 - if (!outbox) 922 - return -ENOMEM; 884 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 885 + if (IS_ERR(mailbox)) 886 + return PTR_ERR(mailbox); 887 + outbox = mailbox->buf; 923 888 924 - err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_DEV_LIM, 889 + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM, 925 890 CMD_TIME_CLASS_A, status); 926 891 927 892 if (err) ··· 1078 1041 } 1079 1042 1080 1043 out: 1081 - pci_free_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, outbox, outdma); 1044 + mthca_free_mailbox(dev, mailbox); 1082 1045 return err; 1083 1046 } 1084 1047 1085 1048 int mthca_QUERY_ADAPTER(struct mthca_dev *dev, 1086 1049 struct mthca_adapter *adapter, u8 *status) 1087 1050 { 1051 + struct mthca_mailbox *mailbox; 1088 1052 u32 *outbox; 1089 - dma_addr_t outdma; 1090 1053 int err; 1091 1054 1092 1055 #define QUERY_ADAPTER_OUT_SIZE 0x100 ··· 1095 1058 #define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08 1096 1059 #define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 1097 1060 1098 - outbox = pci_alloc_consistent(dev->pdev, QUERY_ADAPTER_OUT_SIZE, &outdma); 1099 - if (!outbox) 1100 - return -ENOMEM; 1061 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 1062 + if (IS_ERR(mailbox)) 1063 + return PTR_ERR(mailbox); 1064 + outbox = mailbox->buf; 1101 1065 1102 - err = mthca_cmd_box(dev, 0, outdma, 0, 0, CMD_QUERY_ADAPTER, 1066 + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER, 1103 1067 CMD_TIME_CLASS_A, status); 1104 1068 1105 1069 if (err) 1106 1070 goto out; 1107 1071 1108 - MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET); 1109 - MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET); 1072 + MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET); 1073 + MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET); 1110 1074 MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET); 1111 - MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); 1075 + MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); 1112 1076 1113 1077 out: 1114 - pci_free_consistent(dev->pdev, QUERY_DEV_LIM_OUT_SIZE, outbox, outdma); 1078 + mthca_free_mailbox(dev, mailbox); 1115 1079 return err; 1116 1080 } 1117 1081 ··· 1120 1082 struct mthca_init_hca_param *param, 1121 1083 u8 *status) 1122 1084 { 1085 + struct mthca_mailbox *mailbox; 1123 1086 u32 *inbox; 1124 - dma_addr_t indma; 1125 1087 int err; 1126 1088 1127 1089 #define INIT_HCA_IN_SIZE 0x200 ··· 1161 1123 #define INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10) 1162 1124 #define INIT_HCA_UAR_CTX_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x18) 1163 1125 1164 - inbox = pci_alloc_consistent(dev->pdev, INIT_HCA_IN_SIZE, &indma); 1165 - if (!inbox) 1166 - return -ENOMEM; 1126 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 1127 + if (IS_ERR(mailbox)) 1128 + return PTR_ERR(mailbox); 1129 + inbox = mailbox->buf; 1167 1130 1168 1131 memset(inbox, 0, INIT_HCA_IN_SIZE); 1169 1132 ··· 1227 1188 MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET); 1228 1189 } 1229 1190 1230 - err = mthca_cmd(dev, indma, 0, 0, CMD_INIT_HCA, 1231 - HZ, status); 1191 + err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, HZ, status); 1232 1192 1233 - pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma); 1193 + mthca_free_mailbox(dev, mailbox); 1234 1194 return err; 1235 1195 } 1236 1196 ··· 1237 1199 struct mthca_init_ib_param *param, 1238 1200 int port, u8 *status) 1239 1201 { 1202 + struct mthca_mailbox *mailbox; 1240 1203 u32 *inbox; 1241 - dma_addr_t indma; 1242 1204 int err; 1243 1205 u32 flags; 1244 1206 ··· 1258 1220 #define INIT_IB_NODE_GUID_OFFSET 0x18 1259 1221 #define INIT_IB_SI_GUID_OFFSET 0x20 1260 1222 1261 - inbox = pci_alloc_consistent(dev->pdev, INIT_IB_IN_SIZE, &indma); 1262 - if (!inbox) 1263 - return -ENOMEM; 1223 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 1224 + if (IS_ERR(mailbox)) 1225 + return PTR_ERR(mailbox); 1226 + inbox = mailbox->buf; 1264 1227 1265 1228 memset(inbox, 0, INIT_IB_IN_SIZE); 1266 1229 ··· 1281 1242 MTHCA_PUT(inbox, param->node_guid, INIT_IB_NODE_GUID_OFFSET); 1282 1243 MTHCA_PUT(inbox, param->si_guid, INIT_IB_SI_GUID_OFFSET); 1283 1244 1284 - err = mthca_cmd(dev, indma, port, 0, CMD_INIT_IB, 1245 + err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB, 1285 1246 CMD_TIME_CLASS_A, status); 1286 1247 1287 - pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma); 1248 + mthca_free_mailbox(dev, mailbox); 1288 1249 return err; 1289 1250 } 1290 1251 ··· 1301 1262 int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param, 1302 1263 int port, u8 *status) 1303 1264 { 1265 + struct mthca_mailbox *mailbox; 1304 1266 u32 *inbox; 1305 - dma_addr_t indma; 1306 1267 int err; 1307 1268 u32 flags = 0; 1308 1269 ··· 1313 1274 #define SET_IB_CAP_MASK_OFFSET 0x04 1314 1275 #define SET_IB_SI_GUID_OFFSET 0x08 1315 1276 1316 - inbox = pci_alloc_consistent(dev->pdev, SET_IB_IN_SIZE, &indma); 1317 - if (!inbox) 1318 - return -ENOMEM; 1277 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 1278 + if (IS_ERR(mailbox)) 1279 + return PTR_ERR(mailbox); 1280 + inbox = mailbox->buf; 1319 1281 1320 1282 memset(inbox, 0, SET_IB_IN_SIZE); 1321 1283 ··· 1327 1287 MTHCA_PUT(inbox, param->cap_mask, SET_IB_CAP_MASK_OFFSET); 1328 1288 MTHCA_PUT(inbox, param->si_guid, SET_IB_SI_GUID_OFFSET); 1329 1289 1330 - err = mthca_cmd(dev, indma, port, 0, CMD_SET_IB, 1290 + err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB, 1331 1291 CMD_TIME_CLASS_B, status); 1332 1292 1333 - pci_free_consistent(dev->pdev, INIT_HCA_IN_SIZE, inbox, indma); 1293 + mthca_free_mailbox(dev, mailbox); 1334 1294 return err; 1335 1295 } 1336 1296 ··· 1341 1301 1342 1302 int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status) 1343 1303 { 1304 + struct mthca_mailbox *mailbox; 1344 1305 u64 *inbox; 1345 - dma_addr_t indma; 1346 1306 int err; 1347 1307 1348 - inbox = pci_alloc_consistent(dev->pdev, 16, &indma); 1349 - if (!inbox) 1350 - return -ENOMEM; 1308 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 1309 + if (IS_ERR(mailbox)) 1310 + return PTR_ERR(mailbox); 1311 + inbox = mailbox->buf; 1351 1312 1352 1313 inbox[0] = cpu_to_be64(virt); 1353 1314 inbox[1] = cpu_to_be64(dma_addr); 1354 1315 1355 - err = mthca_cmd(dev, indma, 1, 0, CMD_MAP_ICM, CMD_TIME_CLASS_B, status); 1316 + err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM, 1317 + CMD_TIME_CLASS_B, status); 1356 1318 1357 - pci_free_consistent(dev->pdev, 16, inbox, indma); 1319 + mthca_free_mailbox(dev, mailbox); 1358 1320 1359 1321 if (!err) 1360 1322 mthca_dbg(dev, "Mapped page at %llx to %llx for ICM.\n", ··· 1401 1359 return 0; 1402 1360 } 1403 1361 1404 - int mthca_SW2HW_MPT(struct mthca_dev *dev, void *mpt_entry, 1362 + int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1405 1363 int mpt_index, u8 *status) 1406 1364 { 1407 - dma_addr_t indma; 1408 - int err; 1409 - 1410 - indma = pci_map_single(dev->pdev, mpt_entry, 1411 - MTHCA_MPT_ENTRY_SIZE, 1412 - PCI_DMA_TODEVICE); 1413 - if (pci_dma_mapping_error(indma)) 1414 - return -ENOMEM; 1415 - 1416 - err = mthca_cmd(dev, indma, mpt_index, 0, CMD_SW2HW_MPT, 1417 - CMD_TIME_CLASS_B, status); 1418 - 1419 - pci_unmap_single(dev->pdev, indma, 1420 - MTHCA_MPT_ENTRY_SIZE, PCI_DMA_TODEVICE); 1421 - return err; 1365 + return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT, 1366 + CMD_TIME_CLASS_B, status); 1422 1367 } 1423 1368 1424 - int mthca_HW2SW_MPT(struct mthca_dev *dev, void *mpt_entry, 1369 + int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1425 1370 int mpt_index, u8 *status) 1426 1371 { 1427 - dma_addr_t outdma = 0; 1428 - int err; 1429 - 1430 - if (mpt_entry) { 1431 - outdma = pci_map_single(dev->pdev, mpt_entry, 1432 - MTHCA_MPT_ENTRY_SIZE, 1433 - PCI_DMA_FROMDEVICE); 1434 - if (pci_dma_mapping_error(outdma)) 1435 - return -ENOMEM; 1436 - } 1437 - 1438 - err = mthca_cmd_box(dev, 0, outdma, mpt_index, !mpt_entry, 1439 - CMD_HW2SW_MPT, 1440 - CMD_TIME_CLASS_B, status); 1441 - 1442 - if (mpt_entry) 1443 - pci_unmap_single(dev->pdev, outdma, 1444 - MTHCA_MPT_ENTRY_SIZE, 1445 - PCI_DMA_FROMDEVICE); 1446 - return err; 1372 + return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, 1373 + !mailbox, CMD_HW2SW_MPT, 1374 + CMD_TIME_CLASS_B, status); 1447 1375 } 1448 1376 1449 - int mthca_WRITE_MTT(struct mthca_dev *dev, u64 *mtt_entry, 1377 + int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1450 1378 int num_mtt, u8 *status) 1451 1379 { 1452 - dma_addr_t indma; 1453 - int err; 1454 - 1455 - indma = pci_map_single(dev->pdev, mtt_entry, 1456 - (num_mtt + 2) * 8, 1457 - PCI_DMA_TODEVICE); 1458 - if (pci_dma_mapping_error(indma)) 1459 - return -ENOMEM; 1460 - 1461 - err = mthca_cmd(dev, indma, num_mtt, 0, CMD_WRITE_MTT, 1462 - CMD_TIME_CLASS_B, status); 1463 - 1464 - pci_unmap_single(dev->pdev, indma, 1465 - (num_mtt + 2) * 8, PCI_DMA_TODEVICE); 1466 - return err; 1380 + return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT, 1381 + CMD_TIME_CLASS_B, status); 1467 1382 } 1468 1383 1469 1384 int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status) ··· 1438 1439 0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status); 1439 1440 } 1440 1441 1441 - int mthca_SW2HW_EQ(struct mthca_dev *dev, void *eq_context, 1442 + int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1442 1443 int eq_num, u8 *status) 1443 1444 { 1444 - dma_addr_t indma; 1445 - int err; 1446 - 1447 - indma = pci_map_single(dev->pdev, eq_context, 1448 - MTHCA_EQ_CONTEXT_SIZE, 1449 - PCI_DMA_TODEVICE); 1450 - if (pci_dma_mapping_error(indma)) 1451 - return -ENOMEM; 1452 - 1453 - err = mthca_cmd(dev, indma, eq_num, 0, CMD_SW2HW_EQ, 1454 - CMD_TIME_CLASS_A, status); 1455 - 1456 - pci_unmap_single(dev->pdev, indma, 1457 - MTHCA_EQ_CONTEXT_SIZE, PCI_DMA_TODEVICE); 1458 - return err; 1445 + return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ, 1446 + CMD_TIME_CLASS_A, status); 1459 1447 } 1460 1448 1461 - int mthca_HW2SW_EQ(struct mthca_dev *dev, void *eq_context, 1449 + int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1462 1450 int eq_num, u8 *status) 1463 1451 { 1464 - dma_addr_t outdma = 0; 1465 - int err; 1466 - 1467 - outdma = pci_map_single(dev->pdev, eq_context, 1468 - MTHCA_EQ_CONTEXT_SIZE, 1469 - PCI_DMA_FROMDEVICE); 1470 - if (pci_dma_mapping_error(outdma)) 1471 - return -ENOMEM; 1472 - 1473 - err = mthca_cmd_box(dev, 0, outdma, eq_num, 0, 1474 - CMD_HW2SW_EQ, 1475 - CMD_TIME_CLASS_A, status); 1476 - 1477 - pci_unmap_single(dev->pdev, outdma, 1478 - MTHCA_EQ_CONTEXT_SIZE, 1479 - PCI_DMA_FROMDEVICE); 1480 - return err; 1452 + return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0, 1453 + CMD_HW2SW_EQ, 1454 + CMD_TIME_CLASS_A, status); 1481 1455 } 1482 1456 1483 - int mthca_SW2HW_CQ(struct mthca_dev *dev, void *cq_context, 1457 + int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1484 1458 int cq_num, u8 *status) 1485 1459 { 1486 - dma_addr_t indma; 1487 - int err; 1488 - 1489 - indma = pci_map_single(dev->pdev, cq_context, 1490 - MTHCA_CQ_CONTEXT_SIZE, 1491 - PCI_DMA_TODEVICE); 1492 - if (pci_dma_mapping_error(indma)) 1493 - return -ENOMEM; 1494 - 1495 - err = mthca_cmd(dev, indma, cq_num, 0, CMD_SW2HW_CQ, 1460 + return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ, 1496 1461 CMD_TIME_CLASS_A, status); 1497 - 1498 - pci_unmap_single(dev->pdev, indma, 1499 - MTHCA_CQ_CONTEXT_SIZE, PCI_DMA_TODEVICE); 1500 - return err; 1501 1462 } 1502 1463 1503 - int mthca_HW2SW_CQ(struct mthca_dev *dev, void *cq_context, 1464 + int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1504 1465 int cq_num, u8 *status) 1505 1466 { 1506 - dma_addr_t outdma = 0; 1507 - int err; 1508 - 1509 - outdma = pci_map_single(dev->pdev, cq_context, 1510 - MTHCA_CQ_CONTEXT_SIZE, 1511 - PCI_DMA_FROMDEVICE); 1512 - if (pci_dma_mapping_error(outdma)) 1513 - return -ENOMEM; 1514 - 1515 - err = mthca_cmd_box(dev, 0, outdma, cq_num, 0, 1516 - CMD_HW2SW_CQ, 1517 - CMD_TIME_CLASS_A, status); 1518 - 1519 - pci_unmap_single(dev->pdev, outdma, 1520 - MTHCA_CQ_CONTEXT_SIZE, 1521 - PCI_DMA_FROMDEVICE); 1522 - return err; 1467 + return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0, 1468 + CMD_HW2SW_CQ, 1469 + CMD_TIME_CLASS_A, status); 1523 1470 } 1524 1471 1525 1472 int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, 1526 - int is_ee, void *qp_context, u32 optmask, 1473 + int is_ee, struct mthca_mailbox *mailbox, u32 optmask, 1527 1474 u8 *status) 1528 1475 { 1529 1476 static const u16 op[] = { ··· 1486 1541 [MTHCA_TRANS_ANY2RST] = CMD_ERR2RST_QPEE 1487 1542 }; 1488 1543 u8 op_mod = 0; 1489 - 1490 - dma_addr_t indma; 1544 + int my_mailbox = 0; 1491 1545 int err; 1492 1546 1493 1547 if (trans < 0 || trans >= ARRAY_SIZE(op)) 1494 1548 return -EINVAL; 1495 1549 1496 1550 if (trans == MTHCA_TRANS_ANY2RST) { 1497 - indma = 0; 1498 1551 op_mod = 3; /* don't write outbox, any->reset */ 1499 1552 1500 1553 /* For debugging */ 1501 - qp_context = pci_alloc_consistent(dev->pdev, MTHCA_QP_CONTEXT_SIZE, 1502 - &indma); 1503 - op_mod = 2; /* write outbox, any->reset */ 1554 + if (!mailbox) { 1555 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 1556 + if (!IS_ERR(mailbox)) { 1557 + my_mailbox = 1; 1558 + op_mod = 2; /* write outbox, any->reset */ 1559 + } else 1560 + mailbox = NULL; 1561 + } 1504 1562 } else { 1505 - indma = pci_map_single(dev->pdev, qp_context, 1506 - MTHCA_QP_CONTEXT_SIZE, 1507 - PCI_DMA_TODEVICE); 1508 - if (pci_dma_mapping_error(indma)) 1509 - return -ENOMEM; 1510 - 1511 1563 if (0) { 1512 1564 int i; 1513 1565 mthca_dbg(dev, "Dumping QP context:\n"); 1514 - printk(" opt param mask: %08x\n", be32_to_cpup(qp_context)); 1566 + printk(" opt param mask: %08x\n", be32_to_cpup(mailbox->buf)); 1515 1567 for (i = 0; i < 0x100 / 4; ++i) { 1516 1568 if (i % 8 == 0) 1517 1569 printk(" [%02x] ", i * 4); 1518 - printk(" %08x", be32_to_cpu(((u32 *) qp_context)[i + 2])); 1570 + printk(" %08x", 1571 + be32_to_cpu(((u32 *) mailbox->buf)[i + 2])); 1519 1572 if ((i + 1) % 8 == 0) 1520 1573 printk("\n"); 1521 1574 } ··· 1521 1578 } 1522 1579 1523 1580 if (trans == MTHCA_TRANS_ANY2RST) { 1524 - err = mthca_cmd_box(dev, 0, indma, (!!is_ee << 24) | num, 1525 - op_mod, op[trans], CMD_TIME_CLASS_C, status); 1581 + err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, 1582 + (!!is_ee << 24) | num, op_mod, 1583 + op[trans], CMD_TIME_CLASS_C, status); 1526 1584 1527 - if (0) { 1585 + if (0 && mailbox) { 1528 1586 int i; 1529 1587 mthca_dbg(dev, "Dumping QP context:\n"); 1530 - printk(" %08x\n", be32_to_cpup(qp_context)); 1588 + printk(" %08x\n", be32_to_cpup(mailbox->buf)); 1531 1589 for (i = 0; i < 0x100 / 4; ++i) { 1532 1590 if (i % 8 == 0) 1533 1591 printk("[%02x] ", i * 4); 1534 - printk(" %08x", be32_to_cpu(((u32 *) qp_context)[i + 2])); 1592 + printk(" %08x", 1593 + be32_to_cpu(((u32 *) mailbox->buf)[i + 2])); 1535 1594 if ((i + 1) % 8 == 0) 1536 1595 printk("\n"); 1537 1596 } 1538 1597 } 1539 1598 1540 1599 } else 1541 - err = mthca_cmd(dev, indma, (!!is_ee << 24) | num, 1600 + err = mthca_cmd(dev, mailbox->dma, (!!is_ee << 24) | num, 1542 1601 op_mod, op[trans], CMD_TIME_CLASS_C, status); 1543 1602 1544 - if (trans != MTHCA_TRANS_ANY2RST) 1545 - pci_unmap_single(dev->pdev, indma, 1546 - MTHCA_QP_CONTEXT_SIZE, PCI_DMA_TODEVICE); 1547 - else 1548 - pci_free_consistent(dev->pdev, MTHCA_QP_CONTEXT_SIZE, 1549 - qp_context, indma); 1603 + if (my_mailbox) 1604 + mthca_free_mailbox(dev, mailbox); 1605 + 1550 1606 return err; 1551 1607 } 1552 1608 1553 1609 int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee, 1554 - void *qp_context, u8 *status) 1610 + struct mthca_mailbox *mailbox, u8 *status) 1555 1611 { 1556 - dma_addr_t outdma = 0; 1557 - int err; 1558 - 1559 - outdma = pci_map_single(dev->pdev, qp_context, 1560 - MTHCA_QP_CONTEXT_SIZE, 1561 - PCI_DMA_FROMDEVICE); 1562 - if (pci_dma_mapping_error(outdma)) 1563 - return -ENOMEM; 1564 - 1565 - err = mthca_cmd_box(dev, 0, outdma, (!!is_ee << 24) | num, 0, 1566 - CMD_QUERY_QPEE, 1567 - CMD_TIME_CLASS_A, status); 1568 - 1569 - pci_unmap_single(dev->pdev, outdma, 1570 - MTHCA_QP_CONTEXT_SIZE, 1571 - PCI_DMA_FROMDEVICE); 1572 - return err; 1612 + return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0, 1613 + CMD_QUERY_QPEE, CMD_TIME_CLASS_A, status); 1573 1614 } 1574 1615 1575 1616 int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, ··· 1583 1656 } 1584 1657 1585 1658 int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, 1586 - int port, struct ib_wc* in_wc, struct ib_grh* in_grh, 1659 + int port, struct ib_wc *in_wc, struct ib_grh *in_grh, 1587 1660 void *in_mad, void *response_mad, u8 *status) 1588 1661 { 1589 - void *box; 1590 - dma_addr_t dma; 1662 + struct mthca_mailbox *inmailbox, *outmailbox; 1663 + void *inbox; 1591 1664 int err; 1592 1665 u32 in_modifier = port; 1593 1666 u8 op_modifier = 0; ··· 1601 1674 #define MAD_IFC_PKEY_OFFSET 0x10e 1602 1675 #define MAD_IFC_GRH_OFFSET 0x140 1603 1676 1604 - box = pci_alloc_consistent(dev->pdev, MAD_IFC_BOX_SIZE, &dma); 1605 - if (!box) 1606 - return -ENOMEM; 1677 + inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 1678 + if (IS_ERR(inmailbox)) 1679 + return PTR_ERR(inmailbox); 1680 + inbox = inmailbox->buf; 1607 1681 1608 - memcpy(box, in_mad, 256); 1682 + outmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 1683 + if (IS_ERR(outmailbox)) { 1684 + mthca_free_mailbox(dev, inmailbox); 1685 + return PTR_ERR(outmailbox); 1686 + } 1687 + 1688 + memcpy(inbox, in_mad, 256); 1609 1689 1610 1690 /* 1611 1691 * Key check traps can't be generated unless we have in_wc to ··· 1626 1692 if (in_wc) { 1627 1693 u8 val; 1628 1694 1629 - memset(box + 256, 0, 256); 1695 + memset(inbox + 256, 0, 256); 1630 1696 1631 - MTHCA_PUT(box, in_wc->qp_num, MAD_IFC_MY_QPN_OFFSET); 1632 - MTHCA_PUT(box, in_wc->src_qp, MAD_IFC_RQPN_OFFSET); 1697 + MTHCA_PUT(inbox, in_wc->qp_num, MAD_IFC_MY_QPN_OFFSET); 1698 + MTHCA_PUT(inbox, in_wc->src_qp, MAD_IFC_RQPN_OFFSET); 1633 1699 1634 1700 val = in_wc->sl << 4; 1635 - MTHCA_PUT(box, val, MAD_IFC_SL_OFFSET); 1701 + MTHCA_PUT(inbox, val, MAD_IFC_SL_OFFSET); 1636 1702 1637 1703 val = in_wc->dlid_path_bits | 1638 1704 (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0); 1639 - MTHCA_PUT(box, val, MAD_IFC_GRH_OFFSET); 1705 + MTHCA_PUT(inbox, val, MAD_IFC_GRH_OFFSET); 1640 1706 1641 - MTHCA_PUT(box, in_wc->slid, MAD_IFC_RLID_OFFSET); 1642 - MTHCA_PUT(box, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET); 1707 + MTHCA_PUT(inbox, in_wc->slid, MAD_IFC_RLID_OFFSET); 1708 + MTHCA_PUT(inbox, in_wc->pkey_index, MAD_IFC_PKEY_OFFSET); 1643 1709 1644 1710 if (in_grh) 1645 - memcpy((u8 *) box + MAD_IFC_GRH_OFFSET, in_grh, 40); 1711 + memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40); 1646 1712 1647 1713 op_modifier |= 0x10; 1648 1714 1649 1715 in_modifier |= in_wc->slid << 16; 1650 1716 } 1651 1717 1652 - err = mthca_cmd_box(dev, dma, dma + 512, in_modifier, op_modifier, 1718 + err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma, 1719 + in_modifier, op_modifier, 1653 1720 CMD_MAD_IFC, CMD_TIME_CLASS_C, status); 1654 1721 1655 1722 if (!err && !*status) 1656 - memcpy(response_mad, box + 512, 256); 1723 + memcpy(response_mad, outmailbox->buf, 256); 1657 1724 1658 - pci_free_consistent(dev->pdev, MAD_IFC_BOX_SIZE, box, dma); 1725 + mthca_free_mailbox(dev, inmailbox); 1726 + mthca_free_mailbox(dev, outmailbox); 1659 1727 return err; 1660 1728 } 1661 1729 1662 - int mthca_READ_MGM(struct mthca_dev *dev, int index, void *mgm, 1663 - u8 *status) 1730 + int mthca_READ_MGM(struct mthca_dev *dev, int index, 1731 + struct mthca_mailbox *mailbox, u8 *status) 1664 1732 { 1665 - dma_addr_t outdma = 0; 1666 - int err; 1667 - 1668 - outdma = pci_map_single(dev->pdev, mgm, 1669 - MTHCA_MGM_ENTRY_SIZE, 1670 - PCI_DMA_FROMDEVICE); 1671 - if (pci_dma_mapping_error(outdma)) 1672 - return -ENOMEM; 1673 - 1674 - err = mthca_cmd_box(dev, 0, outdma, index, 0, 1675 - CMD_READ_MGM, 1676 - CMD_TIME_CLASS_A, status); 1677 - 1678 - pci_unmap_single(dev->pdev, outdma, 1679 - MTHCA_MGM_ENTRY_SIZE, 1680 - PCI_DMA_FROMDEVICE); 1681 - return err; 1733 + return mthca_cmd_box(dev, 0, mailbox->dma, index, 0, 1734 + CMD_READ_MGM, CMD_TIME_CLASS_A, status); 1682 1735 } 1683 1736 1684 - int mthca_WRITE_MGM(struct mthca_dev *dev, int index, void *mgm, 1685 - u8 *status) 1737 + int mthca_WRITE_MGM(struct mthca_dev *dev, int index, 1738 + struct mthca_mailbox *mailbox, u8 *status) 1686 1739 { 1687 - dma_addr_t indma; 1688 - int err; 1689 - 1690 - indma = pci_map_single(dev->pdev, mgm, 1691 - MTHCA_MGM_ENTRY_SIZE, 1692 - PCI_DMA_TODEVICE); 1693 - if (pci_dma_mapping_error(indma)) 1694 - return -ENOMEM; 1695 - 1696 - err = mthca_cmd(dev, indma, index, 0, CMD_WRITE_MGM, 1697 - CMD_TIME_CLASS_A, status); 1698 - 1699 - pci_unmap_single(dev->pdev, indma, 1700 - MTHCA_MGM_ENTRY_SIZE, PCI_DMA_TODEVICE); 1701 - return err; 1740 + return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM, 1741 + CMD_TIME_CLASS_A, status); 1702 1742 } 1703 1743 1704 - int mthca_MGID_HASH(struct mthca_dev *dev, void *gid, u16 *hash, 1705 - u8 *status) 1744 + int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1745 + u16 *hash, u8 *status) 1706 1746 { 1707 - dma_addr_t indma; 1708 1747 u64 imm; 1709 1748 int err; 1710 1749 1711 - indma = pci_map_single(dev->pdev, gid, 16, PCI_DMA_TODEVICE); 1712 - if (pci_dma_mapping_error(indma)) 1713 - return -ENOMEM; 1714 - 1715 - err = mthca_cmd_imm(dev, indma, &imm, 0, 0, CMD_MGID_HASH, 1750 + err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH, 1716 1751 CMD_TIME_CLASS_A, status); 1717 - *hash = imm; 1718 1752 1719 - pci_unmap_single(dev->pdev, indma, 16, PCI_DMA_TODEVICE); 1753 + *hash = imm; 1720 1754 return err; 1721 1755 } 1722 1756
+26 -20
drivers/infiniband/hw/mthca/mthca_cmd.h
··· 37 37 38 38 #include <ib_verbs.h> 39 39 40 - #define MTHCA_CMD_MAILBOX_ALIGN 16UL 41 - #define MTHCA_CMD_MAILBOX_EXTRA (MTHCA_CMD_MAILBOX_ALIGN - 1) 40 + #define MTHCA_MAILBOX_SIZE 4096 42 41 43 42 enum { 44 43 /* command completed successfully: */ ··· 109 110 DEV_LIM_FLAG_RAW_MULTI = 1 << 19, 110 111 DEV_LIM_FLAG_UD_AV_PORT_ENFORCE = 1 << 20, 111 112 DEV_LIM_FLAG_UD_MULTI = 1 << 21, 113 + }; 114 + 115 + struct mthca_mailbox { 116 + dma_addr_t dma; 117 + void *buf; 112 118 }; 113 119 114 120 struct mthca_dev_lim { ··· 246 242 void mthca_cmd_event(struct mthca_dev *dev, u16 token, 247 243 u8 status, u64 out_param); 248 244 245 + struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev, 246 + unsigned int gfp_mask); 247 + void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox); 248 + 249 249 int mthca_SYS_EN(struct mthca_dev *dev, u8 *status); 250 250 int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status); 251 251 int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status); ··· 280 272 int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status); 281 273 int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages, 282 274 u8 *status); 283 - int mthca_SW2HW_MPT(struct mthca_dev *dev, void *mpt_entry, 275 + int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 284 276 int mpt_index, u8 *status); 285 - int mthca_HW2SW_MPT(struct mthca_dev *dev, void *mpt_entry, 277 + int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 286 278 int mpt_index, u8 *status); 287 - int mthca_WRITE_MTT(struct mthca_dev *dev, u64 *mtt_entry, 279 + int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 288 280 int num_mtt, u8 *status); 289 281 int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status); 290 282 int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap, 291 283 int eq_num, u8 *status); 292 - int mthca_SW2HW_EQ(struct mthca_dev *dev, void *eq_context, 284 + int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 293 285 int eq_num, u8 *status); 294 - int mthca_HW2SW_EQ(struct mthca_dev *dev, void *eq_context, 286 + int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 295 287 int eq_num, u8 *status); 296 - int mthca_SW2HW_CQ(struct mthca_dev *dev, void *cq_context, 288 + int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 297 289 int cq_num, u8 *status); 298 - int mthca_HW2SW_CQ(struct mthca_dev *dev, void *cq_context, 290 + int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 299 291 int cq_num, u8 *status); 300 292 int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, 301 - int is_ee, void *qp_context, u32 optmask, 293 + int is_ee, struct mthca_mailbox *mailbox, u32 optmask, 302 294 u8 *status); 303 295 int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee, 304 - void *qp_context, u8 *status); 296 + struct mthca_mailbox *mailbox, u8 *status); 305 297 int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, 306 298 u8 *status); 307 299 int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, 308 - int port, struct ib_wc* in_wc, struct ib_grh* in_grh, 300 + int port, struct ib_wc *in_wc, struct ib_grh *in_grh, 309 301 void *in_mad, void *response_mad, u8 *status); 310 - int mthca_READ_MGM(struct mthca_dev *dev, int index, void *mgm, 311 - u8 *status); 312 - int mthca_WRITE_MGM(struct mthca_dev *dev, int index, void *mgm, 313 - u8 *status); 314 - int mthca_MGID_HASH(struct mthca_dev *dev, void *gid, u16 *hash, 315 - u8 *status); 302 + int mthca_READ_MGM(struct mthca_dev *dev, int index, 303 + struct mthca_mailbox *mailbox, u8 *status); 304 + int mthca_WRITE_MGM(struct mthca_dev *dev, int index, 305 + struct mthca_mailbox *mailbox, u8 *status); 306 + int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 307 + u16 *hash, u8 *status); 316 308 int mthca_NOP(struct mthca_dev *dev, u8 *status); 317 - 318 - #define MAILBOX_ALIGN(x) ((void *) ALIGN((unsigned long) (x), MTHCA_CMD_MAILBOX_ALIGN)) 319 309 320 310 #endif /* MTHCA_CMD_H */
+16 -18
drivers/infiniband/hw/mthca/mthca_cq.c
··· 745 745 struct mthca_cq *cq) 746 746 { 747 747 int size = nent * MTHCA_CQ_ENTRY_SIZE; 748 - void *mailbox = NULL; 748 + struct mthca_mailbox *mailbox; 749 749 struct mthca_cq_context *cq_context; 750 750 int err = -ENOMEM; 751 751 u8 status; ··· 779 779 goto err_out_ci; 780 780 } 781 781 782 - mailbox = kmalloc(sizeof (struct mthca_cq_context) + MTHCA_CMD_MAILBOX_EXTRA, 783 - GFP_KERNEL); 784 - if (!mailbox) 785 - goto err_out_mailbox; 782 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 783 + if (IS_ERR(mailbox)) 784 + goto err_out_arm; 786 785 787 - cq_context = MAILBOX_ALIGN(mailbox); 786 + cq_context = mailbox->buf; 788 787 789 788 err = mthca_alloc_cq_buf(dev, size, cq); 790 789 if (err) ··· 814 815 cq_context->state_db = cpu_to_be32(cq->arm_db_index); 815 816 } 816 817 817 - err = mthca_SW2HW_CQ(dev, cq_context, cq->cqn, &status); 818 + err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status); 818 819 if (err) { 819 820 mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err); 820 821 goto err_out_free_mr; ··· 838 839 839 840 cq->cons_index = 0; 840 841 841 - kfree(mailbox); 842 + mthca_free_mailbox(dev, mailbox); 842 843 843 844 return 0; 844 845 ··· 847 848 mthca_free_cq_buf(dev, cq); 848 849 849 850 err_out_mailbox: 850 - kfree(mailbox); 851 + mthca_free_mailbox(dev, mailbox); 851 852 853 + err_out_arm: 852 854 if (mthca_is_memfree(dev)) 853 855 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); 854 856 ··· 869 869 void mthca_free_cq(struct mthca_dev *dev, 870 870 struct mthca_cq *cq) 871 871 { 872 - void *mailbox; 872 + struct mthca_mailbox *mailbox; 873 873 int err; 874 874 u8 status; 875 875 876 876 might_sleep(); 877 877 878 - mailbox = kmalloc(sizeof (struct mthca_cq_context) + MTHCA_CMD_MAILBOX_EXTRA, 879 - GFP_KERNEL); 880 - if (!mailbox) { 878 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 879 + if (IS_ERR(mailbox)) { 881 880 mthca_warn(dev, "No memory for mailbox to free CQ.\n"); 882 881 return; 883 882 } 884 883 885 - err = mthca_HW2SW_CQ(dev, MAILBOX_ALIGN(mailbox), cq->cqn, &status); 884 + err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status); 886 885 if (err) 887 886 mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err); 888 887 else if (status) 889 - mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", 890 - status); 888 + mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status); 891 889 892 890 if (0) { 893 - u32 *ctx = MAILBOX_ALIGN(mailbox); 891 + u32 *ctx = mailbox->buf; 894 892 int j; 895 893 896 894 printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n", ··· 920 922 921 923 mthca_table_put(dev, dev->cq_table.table, cq->cqn); 922 924 mthca_free(&dev->cq_table.alloc, cq->cqn); 923 - kfree(mailbox); 925 + mthca_free_mailbox(dev, mailbox); 924 926 } 925 927 926 928 int __devinit mthca_init_cq_table(struct mthca_dev *dev)
+1
drivers/infiniband/hw/mthca/mthca_dev.h
··· 99 99 }; 100 100 101 101 struct mthca_cmd { 102 + struct pci_pool *pool; 102 103 int use_events; 103 104 struct semaphore hcr_sem; 104 105 struct semaphore poll_sem;
+18 -19
drivers/infiniband/hw/mthca/mthca_eq.c
··· 469 469 PAGE_SIZE; 470 470 u64 *dma_list = NULL; 471 471 dma_addr_t t; 472 - void *mailbox = NULL; 472 + struct mthca_mailbox *mailbox; 473 473 struct mthca_eq_context *eq_context; 474 474 int err = -ENOMEM; 475 475 int i; ··· 494 494 if (!dma_list) 495 495 goto err_out_free; 496 496 497 - mailbox = kmalloc(sizeof *eq_context + MTHCA_CMD_MAILBOX_EXTRA, 498 - GFP_KERNEL); 499 - if (!mailbox) 497 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 498 + if (IS_ERR(mailbox)) 500 499 goto err_out_free; 501 - eq_context = MAILBOX_ALIGN(mailbox); 500 + eq_context = mailbox->buf; 502 501 503 502 for (i = 0; i < npages; ++i) { 504 503 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, 505 504 PAGE_SIZE, &t, GFP_KERNEL); 506 505 if (!eq->page_list[i].buf) 507 - goto err_out_free; 506 + goto err_out_free_pages; 508 507 509 508 dma_list[i] = t; 510 509 pci_unmap_addr_set(&eq->page_list[i], mapping, t); ··· 516 517 517 518 eq->eqn = mthca_alloc(&dev->eq_table.alloc); 518 519 if (eq->eqn == -1) 519 - goto err_out_free; 520 + goto err_out_free_pages; 520 521 521 522 err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num, 522 523 dma_list, PAGE_SHIFT, npages, ··· 547 548 eq_context->intr = intr; 548 549 eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey); 549 550 550 - err = mthca_SW2HW_EQ(dev, eq_context, eq->eqn, &status); 551 + err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status); 551 552 if (err) { 552 553 mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err); 553 554 goto err_out_free_mr; ··· 560 561 } 561 562 562 563 kfree(dma_list); 563 - kfree(mailbox); 564 + mthca_free_mailbox(dev, mailbox); 564 565 565 566 eq->eqn_mask = swab32(1 << eq->eqn); 566 567 eq->cons_index = 0; ··· 578 579 err_out_free_eq: 579 580 mthca_free(&dev->eq_table.alloc, eq->eqn); 580 581 581 - err_out_free: 582 + err_out_free_pages: 582 583 for (i = 0; i < npages; ++i) 583 584 if (eq->page_list[i].buf) 584 585 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, ··· 586 587 pci_unmap_addr(&eq->page_list[i], 587 588 mapping)); 588 589 590 + mthca_free_mailbox(dev, mailbox); 591 + 592 + err_out_free: 589 593 kfree(eq->page_list); 590 594 kfree(dma_list); 591 - kfree(mailbox); 592 595 593 596 err_out: 594 597 return err; ··· 599 598 static void mthca_free_eq(struct mthca_dev *dev, 600 599 struct mthca_eq *eq) 601 600 { 602 - void *mailbox = NULL; 601 + struct mthca_mailbox *mailbox; 603 602 int err; 604 603 u8 status; 605 604 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / 606 605 PAGE_SIZE; 607 606 int i; 608 607 609 - mailbox = kmalloc(sizeof (struct mthca_eq_context) + MTHCA_CMD_MAILBOX_EXTRA, 610 - GFP_KERNEL); 611 - if (!mailbox) 608 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 609 + if (IS_ERR(mailbox)) 612 610 return; 613 611 614 - err = mthca_HW2SW_EQ(dev, MAILBOX_ALIGN(mailbox), 615 - eq->eqn, &status); 612 + err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status); 616 613 if (err) 617 614 mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err); 618 615 if (status) ··· 623 624 for (i = 0; i < sizeof (struct mthca_eq_context) / 4; ++i) { 624 625 if (i % 4 == 0) 625 626 printk("[%02x] ", i * 4); 626 - printk(" %08x", be32_to_cpup(MAILBOX_ALIGN(mailbox) + i * 4)); 627 + printk(" %08x", be32_to_cpup(mailbox->buf + i * 4)); 627 628 if ((i + 1) % 4 == 0) 628 629 printk("\n"); 629 630 } ··· 636 637 pci_unmap_addr(&eq->page_list[i], mapping)); 637 638 638 639 kfree(eq->page_list); 639 - kfree(mailbox); 640 + mthca_free_mailbox(dev, mailbox); 640 641 } 641 642 642 643 static void mthca_free_irqs(struct mthca_dev *dev)
+32 -31
drivers/infiniband/hw/mthca/mthca_mcg.c
··· 66 66 * entry in hash chain and *mgm holds end of hash chain. 67 67 */ 68 68 static int find_mgm(struct mthca_dev *dev, 69 - u8 *gid, struct mthca_mgm *mgm, 69 + u8 *gid, struct mthca_mailbox *mgm_mailbox, 70 70 u16 *hash, int *prev, int *index) 71 71 { 72 - void *mailbox; 72 + struct mthca_mailbox *mailbox; 73 + struct mthca_mgm *mgm = mgm_mailbox->buf; 73 74 u8 *mgid; 74 75 int err; 75 76 u8 status; 76 77 77 - mailbox = kmalloc(16 + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL); 78 - if (!mailbox) 78 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 79 + if (IS_ERR(mailbox)) 79 80 return -ENOMEM; 80 - mgid = MAILBOX_ALIGN(mailbox); 81 + mgid = mailbox->buf; 81 82 82 83 memcpy(mgid, gid, 16); 83 84 84 - err = mthca_MGID_HASH(dev, mgid, hash, &status); 85 + err = mthca_MGID_HASH(dev, mailbox, hash, &status); 85 86 if (err) 86 87 goto out; 87 88 if (status) { ··· 104 103 *prev = -1; 105 104 106 105 do { 107 - err = mthca_READ_MGM(dev, *index, mgm, &status); 106 + err = mthca_READ_MGM(dev, *index, mgm_mailbox, &status); 108 107 if (err) 109 108 goto out; 110 109 if (status) { ··· 130 129 *index = -1; 131 130 132 131 out: 133 - kfree(mailbox); 132 + mthca_free_mailbox(dev, mailbox); 134 133 return err; 135 134 } 136 135 137 136 int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 138 137 { 139 138 struct mthca_dev *dev = to_mdev(ibqp->device); 140 - void *mailbox; 139 + struct mthca_mailbox *mailbox; 141 140 struct mthca_mgm *mgm; 142 141 u16 hash; 143 142 int index, prev; ··· 146 145 int err; 147 146 u8 status; 148 147 149 - mailbox = kmalloc(sizeof *mgm + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL); 150 - if (!mailbox) 151 - return -ENOMEM; 152 - mgm = MAILBOX_ALIGN(mailbox); 148 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 149 + if (IS_ERR(mailbox)) 150 + return PTR_ERR(mailbox); 151 + mgm = mailbox->buf; 153 152 154 153 if (down_interruptible(&dev->mcg_table.sem)) 155 154 return -EINTR; 156 155 157 - err = find_mgm(dev, gid->raw, mgm, &hash, &prev, &index); 156 + err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); 158 157 if (err) 159 158 goto out; 160 159 ··· 171 170 goto out; 172 171 } 173 172 174 - err = mthca_READ_MGM(dev, index, mgm, &status); 173 + err = mthca_READ_MGM(dev, index, mailbox, &status); 175 174 if (err) 176 175 goto out; 177 176 if (status) { ··· 196 195 goto out; 197 196 } 198 197 199 - err = mthca_WRITE_MGM(dev, index, mgm, &status); 198 + err = mthca_WRITE_MGM(dev, index, mailbox, &status); 200 199 if (err) 201 200 goto out; 202 201 if (status) { ··· 207 206 if (!link) 208 207 goto out; 209 208 210 - err = mthca_READ_MGM(dev, prev, mgm, &status); 209 + err = mthca_READ_MGM(dev, prev, mailbox, &status); 211 210 if (err) 212 211 goto out; 213 212 if (status) { ··· 218 217 219 218 mgm->next_gid_index = cpu_to_be32(index << 5); 220 219 221 - err = mthca_WRITE_MGM(dev, prev, mgm, &status); 220 + err = mthca_WRITE_MGM(dev, prev, mailbox, &status); 222 221 if (err) 223 222 goto out; 224 223 if (status) { ··· 228 227 229 228 out: 230 229 up(&dev->mcg_table.sem); 231 - kfree(mailbox); 230 + mthca_free_mailbox(dev, mailbox); 232 231 return err; 233 232 } 234 233 235 234 int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 236 235 { 237 236 struct mthca_dev *dev = to_mdev(ibqp->device); 238 - void *mailbox; 237 + struct mthca_mailbox *mailbox; 239 238 struct mthca_mgm *mgm; 240 239 u16 hash; 241 240 int prev, index; ··· 243 242 int err; 244 243 u8 status; 245 244 246 - mailbox = kmalloc(sizeof *mgm + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL); 247 - if (!mailbox) 248 - return -ENOMEM; 249 - mgm = MAILBOX_ALIGN(mailbox); 245 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 246 + if (IS_ERR(mailbox)) 247 + return PTR_ERR(mailbox); 248 + mgm = mailbox->buf; 250 249 251 250 if (down_interruptible(&dev->mcg_table.sem)) 252 251 return -EINTR; 253 252 254 - err = find_mgm(dev, gid->raw, mgm, &hash, &prev, &index); 253 + err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); 255 254 if (err) 256 255 goto out; 257 256 ··· 286 285 mgm->qp[loc] = mgm->qp[i - 1]; 287 286 mgm->qp[i - 1] = 0; 288 287 289 - err = mthca_WRITE_MGM(dev, index, mgm, &status); 288 + err = mthca_WRITE_MGM(dev, index, mailbox, &status); 290 289 if (err) 291 290 goto out; 292 291 if (status) { ··· 305 304 if (be32_to_cpu(mgm->next_gid_index) >> 5) { 306 305 err = mthca_READ_MGM(dev, 307 306 be32_to_cpu(mgm->next_gid_index) >> 5, 308 - mgm, &status); 307 + mailbox, &status); 309 308 if (err) 310 309 goto out; 311 310 if (status) { ··· 317 316 } else 318 317 memset(mgm->gid, 0, 16); 319 318 320 - err = mthca_WRITE_MGM(dev, index, mgm, &status); 319 + err = mthca_WRITE_MGM(dev, index, mailbox, &status); 321 320 if (err) 322 321 goto out; 323 322 if (status) { ··· 328 327 } else { 329 328 /* Remove entry from AMGM */ 330 329 index = be32_to_cpu(mgm->next_gid_index) >> 5; 331 - err = mthca_READ_MGM(dev, prev, mgm, &status); 330 + err = mthca_READ_MGM(dev, prev, mailbox, &status); 332 331 if (err) 333 332 goto out; 334 333 if (status) { ··· 339 338 340 339 mgm->next_gid_index = cpu_to_be32(index << 5); 341 340 342 - err = mthca_WRITE_MGM(dev, prev, mgm, &status); 341 + err = mthca_WRITE_MGM(dev, prev, mailbox, &status); 343 342 if (err) 344 343 goto out; 345 344 if (status) { ··· 351 350 352 351 out: 353 352 up(&dev->mcg_table.sem); 354 - kfree(mailbox); 353 + mthca_free_mailbox(dev, mailbox); 355 354 return err; 356 355 } 357 356
+23 -23
drivers/infiniband/hw/mthca/mthca_mr.c
··· 246 246 int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, 247 247 int start_index, u64 *buffer_list, int list_len) 248 248 { 249 + struct mthca_mailbox *mailbox; 249 250 u64 *mtt_entry; 250 251 int err = 0; 251 252 u8 status; 252 253 int i; 253 254 254 - mtt_entry = (u64 *) __get_free_page(GFP_KERNEL); 255 - if (!mtt_entry) 256 - return -ENOMEM; 255 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 256 + if (IS_ERR(mailbox)) 257 + return PTR_ERR(mailbox); 258 + mtt_entry = mailbox->buf; 257 259 258 260 while (list_len > 0) { 259 261 mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base + 260 262 mtt->first_seg * MTHCA_MTT_SEG_SIZE + 261 263 start_index * 8); 262 264 mtt_entry[1] = 0; 263 - for (i = 0; i < list_len && i < PAGE_SIZE / 8 - 2; ++i) 265 + for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i) 264 266 mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] | 265 267 MTHCA_MTT_FLAG_PRESENT); 266 268 ··· 273 271 if (i & 1) 274 272 mtt_entry[i + 2] = 0; 275 273 276 - err = mthca_WRITE_MTT(dev, mtt_entry, (i + 1) & ~1, &status); 274 + err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1, &status); 277 275 if (err) { 278 276 mthca_warn(dev, "WRITE_MTT failed (%d)\n", err); 279 277 goto out; ··· 291 289 } 292 290 293 291 out: 294 - free_page((unsigned long) mtt_entry); 292 + mthca_free_mailbox(dev, mailbox); 295 293 return err; 296 294 } 297 295 ··· 334 332 int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, 335 333 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr) 336 334 { 337 - void *mailbox; 335 + struct mthca_mailbox *mailbox; 338 336 struct mthca_mpt_entry *mpt_entry; 339 337 u32 key; 340 338 int i; ··· 356 354 goto err_out_mpt_free; 357 355 } 358 356 359 - mailbox = kmalloc(sizeof *mpt_entry + MTHCA_CMD_MAILBOX_EXTRA, 360 - GFP_KERNEL); 361 - if (!mailbox) { 362 - err = -ENOMEM; 357 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 358 + if (IS_ERR(mailbox)) { 359 + err = PTR_ERR(mailbox); 363 360 goto err_out_table; 364 361 } 365 - mpt_entry = MAILBOX_ALIGN(mailbox); 362 + mpt_entry = mailbox->buf; 366 363 367 364 mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS | 368 365 MTHCA_MPT_FLAG_MIO | ··· 395 394 } 396 395 } 397 396 398 - err = mthca_SW2HW_MPT(dev, mpt_entry, 397 + err = mthca_SW2HW_MPT(dev, mailbox, 399 398 key & (dev->limits.num_mpts - 1), 400 399 &status); 401 400 if (err) { ··· 408 407 goto err_out_mailbox; 409 408 } 410 409 411 - kfree(mailbox); 410 + mthca_free_mailbox(dev, mailbox); 412 411 return err; 413 412 414 413 err_out_mailbox: 415 - kfree(mailbox); 414 + mthca_free_mailbox(dev, mailbox); 416 415 417 416 err_out_table: 418 417 mthca_table_put(dev, dev->mr_table.mpt_table, key); ··· 488 487 u32 access, struct mthca_fmr *mr) 489 488 { 490 489 struct mthca_mpt_entry *mpt_entry; 491 - void *mailbox; 490 + struct mthca_mailbox *mailbox; 492 491 u64 mtt_seg; 493 492 u32 key, idx; 494 493 u8 status; ··· 539 538 } else 540 539 mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg; 541 540 542 - mailbox = kmalloc(sizeof *mpt_entry + MTHCA_CMD_MAILBOX_EXTRA, 543 - GFP_KERNEL); 544 - if (!mailbox) 541 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 542 + if (IS_ERR(mailbox)) 545 543 goto err_out_free_mtt; 546 544 547 - mpt_entry = MAILBOX_ALIGN(mailbox); 545 + mpt_entry = mailbox->buf; 548 546 549 547 mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS | 550 548 MTHCA_MPT_FLAG_MIO | ··· 568 568 } 569 569 } 570 570 571 - err = mthca_SW2HW_MPT(dev, mpt_entry, 571 + err = mthca_SW2HW_MPT(dev, mailbox, 572 572 key & (dev->limits.num_mpts - 1), 573 573 &status); 574 574 if (err) { ··· 582 582 goto err_out_mailbox_free; 583 583 } 584 584 585 - kfree(mailbox); 585 + mthca_free_mailbox(dev, mailbox); 586 586 return 0; 587 587 588 588 err_out_mailbox_free: 589 - kfree(mailbox); 589 + mthca_free_mailbox(dev, mailbox); 590 590 591 591 err_out_free_mtt: 592 592 mthca_free_mtt(dev, mr->mtt);
+7 -7
drivers/infiniband/hw/mthca/mthca_qp.c
··· 589 589 struct mthca_dev *dev = to_mdev(ibqp->device); 590 590 struct mthca_qp *qp = to_mqp(ibqp); 591 591 enum ib_qp_state cur_state, new_state; 592 - void *mailbox = NULL; 592 + struct mthca_mailbox *mailbox; 593 593 struct mthca_qp_param *qp_param; 594 594 struct mthca_qp_context *qp_context; 595 595 u32 req_param, opt_param; ··· 646 646 return -EINVAL; 647 647 } 648 648 649 - mailbox = kmalloc(sizeof (*qp_param) + MTHCA_CMD_MAILBOX_EXTRA, GFP_KERNEL); 650 - if (!mailbox) 651 - return -ENOMEM; 652 - qp_param = MAILBOX_ALIGN(mailbox); 649 + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 650 + if (IS_ERR(mailbox)) 651 + return PTR_ERR(mailbox); 652 + qp_param = mailbox->buf; 653 653 qp_context = &qp_param->context; 654 654 memset(qp_param, 0, sizeof *qp_param); 655 655 ··· 872 872 } 873 873 874 874 err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, 875 - qp->qpn, 0, qp_param, 0, &status); 875 + qp->qpn, 0, mailbox, 0, &status); 876 876 if (status) { 877 877 mthca_warn(dev, "modify QP %d returned status %02x.\n", 878 878 state_table[cur_state][new_state].trans, status); ··· 882 882 if (!err) 883 883 qp->state = new_state; 884 884 885 - kfree(mailbox); 885 + mthca_free_mailbox(dev, mailbox); 886 886 887 887 if (is_sqp(dev, qp)) 888 888 store_attrs(to_msqp(qp), attr, attr_mask);