ieee1394: sbp2: fix bogus dma mapping

Need to use a PCI device, not a FireWire host device. Problem found by
Andreas Schwab, mistake pointed out by Benjamin Herrenschmidt.
http://ozlabs.org/pipermail/linuxppc-dev/2006-December/029595.html

Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Tested-by: Andreas Schwab <schwab@suse.de>

+40 -33
+40 -33
drivers/ieee1394/sbp2.c
··· 490 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags); 491 return -ENOMEM; 492 } 493 - cmd->command_orb_dma = dma_map_single(&hi->host->device, 494 &cmd->command_orb, 495 sizeof(struct sbp2_command_orb), 496 DMA_TO_DEVICE); 497 - cmd->sge_dma = dma_map_single(&hi->host->device, 498 &cmd->scatter_gather_element, 499 sizeof(cmd->scatter_gather_element), 500 DMA_BIDIRECTIONAL); ··· 516 if (!list_empty(&lu->cmd_orb_completed)) 517 list_for_each_safe(lh, next, &lu->cmd_orb_completed) { 518 cmd = list_entry(lh, struct sbp2_command_info, list); 519 - dma_unmap_single(&host->device, cmd->command_orb_dma, 520 sizeof(struct sbp2_command_orb), 521 DMA_TO_DEVICE); 522 - dma_unmap_single(&host->device, cmd->sge_dma, 523 sizeof(cmd->scatter_gather_element), 524 DMA_BIDIRECTIONAL); 525 kfree(cmd); ··· 602 603 if (cmd->cmd_dma) { 604 if (cmd->dma_type == CMD_DMA_SINGLE) 605 - dma_unmap_single(&host->device, cmd->cmd_dma, 606 cmd->dma_size, cmd->dma_dir); 607 else if (cmd->dma_type == CMD_DMA_PAGE) 608 - dma_unmap_page(&host->device, cmd->cmd_dma, 609 cmd->dma_size, cmd->dma_dir); 610 /* XXX: Check for CMD_DMA_NONE bug */ 611 cmd->dma_type = CMD_DMA_NONE; 612 cmd->cmd_dma = 0; 613 } 614 if (cmd->sge_buffer) { 615 - dma_unmap_sg(&host->device, cmd->sge_buffer, 616 cmd->dma_size, cmd->dma_dir); 617 cmd->sge_buffer = NULL; 618 } ··· 837 struct sbp2_fwhost_info *hi = lu->hi; 838 int error; 839 840 - lu->login_response = dma_alloc_coherent(&hi->host->device, 841 sizeof(struct sbp2_login_response), 842 &lu->login_response_dma, GFP_KERNEL); 843 if (!lu->login_response) 844 goto alloc_fail; 845 846 - lu->query_logins_orb = dma_alloc_coherent(&hi->host->device, 847 sizeof(struct sbp2_query_logins_orb), 848 &lu->query_logins_orb_dma, GFP_KERNEL); 849 if (!lu->query_logins_orb) 850 goto alloc_fail; 851 852 - lu->query_logins_response = dma_alloc_coherent(&hi->host->device, 853 sizeof(struct sbp2_query_logins_response), 854 &lu->query_logins_response_dma, GFP_KERNEL); 855 if (!lu->query_logins_response) 856 goto alloc_fail; 857 858 - lu->reconnect_orb = dma_alloc_coherent(&hi->host->device, 859 sizeof(struct sbp2_reconnect_orb), 860 &lu->reconnect_orb_dma, GFP_KERNEL); 861 if (!lu->reconnect_orb) 862 goto alloc_fail; 863 864 - lu->logout_orb = dma_alloc_coherent(&hi->host->device, 865 sizeof(struct sbp2_logout_orb), 866 &lu->logout_orb_dma, GFP_KERNEL); 867 if (!lu->logout_orb) 868 goto alloc_fail; 869 870 - lu->login_orb = dma_alloc_coherent(&hi->host->device, 871 sizeof(struct sbp2_login_orb), 872 &lu->login_orb_dma, GFP_KERNEL); 873 if (!lu->login_orb) ··· 930 list_del(&lu->lu_list); 931 932 if (lu->login_response) 933 - dma_free_coherent(&hi->host->device, 934 sizeof(struct sbp2_login_response), 935 lu->login_response, 936 lu->login_response_dma); 937 if (lu->login_orb) 938 - dma_free_coherent(&hi->host->device, 939 sizeof(struct sbp2_login_orb), 940 lu->login_orb, 941 lu->login_orb_dma); 942 if (lu->reconnect_orb) 943 - dma_free_coherent(&hi->host->device, 944 sizeof(struct sbp2_reconnect_orb), 945 lu->reconnect_orb, 946 lu->reconnect_orb_dma); 947 if (lu->logout_orb) 948 - dma_free_coherent(&hi->host->device, 949 sizeof(struct sbp2_logout_orb), 950 lu->logout_orb, 951 lu->logout_orb_dma); 952 if (lu->query_logins_orb) 953 - dma_free_coherent(&hi->host->device, 954 sizeof(struct sbp2_query_logins_orb), 955 lu->query_logins_orb, 956 lu->query_logins_orb_dma); 957 if (lu->query_logins_response) 958 - dma_free_coherent(&hi->host->device, 959 sizeof(struct sbp2_query_logins_response), 960 lu->query_logins_response, 961 lu->query_logins_response_dma); ··· 1446 1447 cmd->dma_size = sgpnt[0].length; 1448 cmd->dma_type = CMD_DMA_PAGE; 1449 - cmd->cmd_dma = dma_map_page(&hi->host->device, 1450 sgpnt[0].page, sgpnt[0].offset, 1451 cmd->dma_size, cmd->dma_dir); 1452 ··· 1458 &cmd->scatter_gather_element[0]; 1459 u32 sg_count, sg_len; 1460 dma_addr_t sg_addr; 1461 - int i, count = dma_map_sg(&hi->host->device, sgpnt, scsi_use_sg, 1462 - dma_dir); 1463 1464 cmd->dma_size = scsi_use_sg; 1465 cmd->sge_buffer = sgpnt; ··· 1509 cmd->dma_dir = dma_dir; 1510 cmd->dma_size = scsi_request_bufflen; 1511 cmd->dma_type = CMD_DMA_SINGLE; 1512 - cmd->cmd_dma = dma_map_single(&hi->host->device, scsi_request_buffer, 1513 cmd->dma_size, cmd->dma_dir); 1514 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id); 1515 orb->misc |= ORB_SET_DIRECTION(orb_direction); ··· 1628 size_t length; 1629 unsigned long flags; 1630 1631 - dma_sync_single_for_device(&hi->host->device, cmd->command_orb_dma, 1632 sizeof(struct sbp2_command_orb), 1633 DMA_TO_DEVICE); 1634 - dma_sync_single_for_device(&hi->host->device, cmd->sge_dma, 1635 sizeof(cmd->scatter_gather_element), 1636 DMA_BIDIRECTIONAL); 1637 ··· 1658 * The target's fetch agent may or may not have read this 1659 * previous ORB yet. 1660 */ 1661 - dma_sync_single_for_cpu(&hi->host->device, last_orb_dma, 1662 sizeof(struct sbp2_command_orb), 1663 DMA_TO_DEVICE); 1664 last_orb->next_ORB_lo = cpu_to_be32(cmd->command_orb_dma); 1665 wmb(); 1666 /* Tells hardware that this pointer is valid */ 1667 last_orb->next_ORB_hi = 0; 1668 - dma_sync_single_for_device(&hi->host->device, last_orb_dma, 1669 sizeof(struct sbp2_command_orb), 1670 DMA_TO_DEVICE); 1671 addr += SBP2_DOORBELL_OFFSET; ··· 1794 else 1795 cmd = sbp2util_find_command_for_orb(lu, sb->ORB_offset_lo); 1796 if (cmd) { 1797 - dma_sync_single_for_cpu(&hi->host->device, cmd->command_orb_dma, 1798 sizeof(struct sbp2_command_orb), 1799 DMA_TO_DEVICE); 1800 - dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma, 1801 sizeof(cmd->scatter_gather_element), 1802 DMA_BIDIRECTIONAL); 1803 /* Grab SCSI command pointers and check status. */ ··· 1926 while (!list_empty(&lu->cmd_orb_inuse)) { 1927 lh = lu->cmd_orb_inuse.next; 1928 cmd = list_entry(lh, struct sbp2_command_info, list); 1929 - dma_sync_single_for_cpu(&hi->host->device, cmd->command_orb_dma, 1930 sizeof(struct sbp2_command_orb), 1931 DMA_TO_DEVICE); 1932 - dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma, 1933 sizeof(cmd->scatter_gather_element), 1934 DMA_BIDIRECTIONAL); 1935 sbp2util_mark_command_completed(lu, cmd); ··· 2055 spin_lock_irqsave(&lu->cmd_orb_lock, flags); 2056 cmd = sbp2util_find_command_for_SCpnt(lu, SCpnt); 2057 if (cmd) { 2058 - dma_sync_single_for_cpu(&hi->host->device, 2059 cmd->command_orb_dma, 2060 sizeof(struct sbp2_command_orb), 2061 DMA_TO_DEVICE); 2062 - dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma, 2063 sizeof(cmd->scatter_gather_element), 2064 DMA_BIDIRECTIONAL); 2065 sbp2util_mark_command_completed(lu, cmd);
··· 490 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags); 491 return -ENOMEM; 492 } 493 + cmd->command_orb_dma = dma_map_single(hi->host->device.parent, 494 &cmd->command_orb, 495 sizeof(struct sbp2_command_orb), 496 DMA_TO_DEVICE); 497 + cmd->sge_dma = dma_map_single(hi->host->device.parent, 498 &cmd->scatter_gather_element, 499 sizeof(cmd->scatter_gather_element), 500 DMA_BIDIRECTIONAL); ··· 516 if (!list_empty(&lu->cmd_orb_completed)) 517 list_for_each_safe(lh, next, &lu->cmd_orb_completed) { 518 cmd = list_entry(lh, struct sbp2_command_info, list); 519 + dma_unmap_single(host->device.parent, 520 + cmd->command_orb_dma, 521 sizeof(struct sbp2_command_orb), 522 DMA_TO_DEVICE); 523 + dma_unmap_single(host->device.parent, cmd->sge_dma, 524 sizeof(cmd->scatter_gather_element), 525 DMA_BIDIRECTIONAL); 526 kfree(cmd); ··· 601 602 if (cmd->cmd_dma) { 603 if (cmd->dma_type == CMD_DMA_SINGLE) 604 + dma_unmap_single(host->device.parent, cmd->cmd_dma, 605 cmd->dma_size, cmd->dma_dir); 606 else if (cmd->dma_type == CMD_DMA_PAGE) 607 + dma_unmap_page(host->device.parent, cmd->cmd_dma, 608 cmd->dma_size, cmd->dma_dir); 609 /* XXX: Check for CMD_DMA_NONE bug */ 610 cmd->dma_type = CMD_DMA_NONE; 611 cmd->cmd_dma = 0; 612 } 613 if (cmd->sge_buffer) { 614 + dma_unmap_sg(host->device.parent, cmd->sge_buffer, 615 cmd->dma_size, cmd->dma_dir); 616 cmd->sge_buffer = NULL; 617 } ··· 836 struct sbp2_fwhost_info *hi = lu->hi; 837 int error; 838 839 + lu->login_response = dma_alloc_coherent(hi->host->device.parent, 840 sizeof(struct sbp2_login_response), 841 &lu->login_response_dma, GFP_KERNEL); 842 if (!lu->login_response) 843 goto alloc_fail; 844 845 + lu->query_logins_orb = dma_alloc_coherent(hi->host->device.parent, 846 sizeof(struct sbp2_query_logins_orb), 847 &lu->query_logins_orb_dma, GFP_KERNEL); 848 if (!lu->query_logins_orb) 849 goto alloc_fail; 850 851 + lu->query_logins_response = dma_alloc_coherent(hi->host->device.parent, 852 sizeof(struct sbp2_query_logins_response), 853 &lu->query_logins_response_dma, GFP_KERNEL); 854 if (!lu->query_logins_response) 855 goto alloc_fail; 856 857 + lu->reconnect_orb = dma_alloc_coherent(hi->host->device.parent, 858 sizeof(struct sbp2_reconnect_orb), 859 &lu->reconnect_orb_dma, GFP_KERNEL); 860 if (!lu->reconnect_orb) 861 goto alloc_fail; 862 863 + lu->logout_orb = dma_alloc_coherent(hi->host->device.parent, 864 sizeof(struct sbp2_logout_orb), 865 &lu->logout_orb_dma, GFP_KERNEL); 866 if (!lu->logout_orb) 867 goto alloc_fail; 868 869 + lu->login_orb = dma_alloc_coherent(hi->host->device.parent, 870 sizeof(struct sbp2_login_orb), 871 &lu->login_orb_dma, GFP_KERNEL); 872 if (!lu->login_orb) ··· 929 list_del(&lu->lu_list); 930 931 if (lu->login_response) 932 + dma_free_coherent(hi->host->device.parent, 933 sizeof(struct sbp2_login_response), 934 lu->login_response, 935 lu->login_response_dma); 936 if (lu->login_orb) 937 + dma_free_coherent(hi->host->device.parent, 938 sizeof(struct sbp2_login_orb), 939 lu->login_orb, 940 lu->login_orb_dma); 941 if (lu->reconnect_orb) 942 + dma_free_coherent(hi->host->device.parent, 943 sizeof(struct sbp2_reconnect_orb), 944 lu->reconnect_orb, 945 lu->reconnect_orb_dma); 946 if (lu->logout_orb) 947 + dma_free_coherent(hi->host->device.parent, 948 sizeof(struct sbp2_logout_orb), 949 lu->logout_orb, 950 lu->logout_orb_dma); 951 if (lu->query_logins_orb) 952 + dma_free_coherent(hi->host->device.parent, 953 sizeof(struct sbp2_query_logins_orb), 954 lu->query_logins_orb, 955 lu->query_logins_orb_dma); 956 if (lu->query_logins_response) 957 + dma_free_coherent(hi->host->device.parent, 958 sizeof(struct sbp2_query_logins_response), 959 lu->query_logins_response, 960 lu->query_logins_response_dma); ··· 1445 1446 cmd->dma_size = sgpnt[0].length; 1447 cmd->dma_type = CMD_DMA_PAGE; 1448 + cmd->cmd_dma = dma_map_page(hi->host->device.parent, 1449 sgpnt[0].page, sgpnt[0].offset, 1450 cmd->dma_size, cmd->dma_dir); 1451 ··· 1457 &cmd->scatter_gather_element[0]; 1458 u32 sg_count, sg_len; 1459 dma_addr_t sg_addr; 1460 + int i, count = dma_map_sg(hi->host->device.parent, sgpnt, 1461 + scsi_use_sg, dma_dir); 1462 1463 cmd->dma_size = scsi_use_sg; 1464 cmd->sge_buffer = sgpnt; ··· 1508 cmd->dma_dir = dma_dir; 1509 cmd->dma_size = scsi_request_bufflen; 1510 cmd->dma_type = CMD_DMA_SINGLE; 1511 + cmd->cmd_dma = dma_map_single(hi->host->device.parent, 1512 + scsi_request_buffer, 1513 cmd->dma_size, cmd->dma_dir); 1514 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id); 1515 orb->misc |= ORB_SET_DIRECTION(orb_direction); ··· 1626 size_t length; 1627 unsigned long flags; 1628 1629 + dma_sync_single_for_device(hi->host->device.parent, 1630 + cmd->command_orb_dma, 1631 sizeof(struct sbp2_command_orb), 1632 DMA_TO_DEVICE); 1633 + dma_sync_single_for_device(hi->host->device.parent, cmd->sge_dma, 1634 sizeof(cmd->scatter_gather_element), 1635 DMA_BIDIRECTIONAL); 1636 ··· 1655 * The target's fetch agent may or may not have read this 1656 * previous ORB yet. 1657 */ 1658 + dma_sync_single_for_cpu(hi->host->device.parent, last_orb_dma, 1659 sizeof(struct sbp2_command_orb), 1660 DMA_TO_DEVICE); 1661 last_orb->next_ORB_lo = cpu_to_be32(cmd->command_orb_dma); 1662 wmb(); 1663 /* Tells hardware that this pointer is valid */ 1664 last_orb->next_ORB_hi = 0; 1665 + dma_sync_single_for_device(hi->host->device.parent, 1666 + last_orb_dma, 1667 sizeof(struct sbp2_command_orb), 1668 DMA_TO_DEVICE); 1669 addr += SBP2_DOORBELL_OFFSET; ··· 1790 else 1791 cmd = sbp2util_find_command_for_orb(lu, sb->ORB_offset_lo); 1792 if (cmd) { 1793 + dma_sync_single_for_cpu(hi->host->device.parent, 1794 + cmd->command_orb_dma, 1795 sizeof(struct sbp2_command_orb), 1796 DMA_TO_DEVICE); 1797 + dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma, 1798 sizeof(cmd->scatter_gather_element), 1799 DMA_BIDIRECTIONAL); 1800 /* Grab SCSI command pointers and check status. */ ··· 1921 while (!list_empty(&lu->cmd_orb_inuse)) { 1922 lh = lu->cmd_orb_inuse.next; 1923 cmd = list_entry(lh, struct sbp2_command_info, list); 1924 + dma_sync_single_for_cpu(hi->host->device.parent, 1925 + cmd->command_orb_dma, 1926 sizeof(struct sbp2_command_orb), 1927 DMA_TO_DEVICE); 1928 + dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma, 1929 sizeof(cmd->scatter_gather_element), 1930 DMA_BIDIRECTIONAL); 1931 sbp2util_mark_command_completed(lu, cmd); ··· 2049 spin_lock_irqsave(&lu->cmd_orb_lock, flags); 2050 cmd = sbp2util_find_command_for_SCpnt(lu, SCpnt); 2051 if (cmd) { 2052 + dma_sync_single_for_cpu(hi->host->device.parent, 2053 cmd->command_orb_dma, 2054 sizeof(struct sbp2_command_orb), 2055 DMA_TO_DEVICE); 2056 + dma_sync_single_for_cpu(hi->host->device.parent, 2057 + cmd->sge_dma, 2058 sizeof(cmd->scatter_gather_element), 2059 DMA_BIDIRECTIONAL); 2060 sbp2util_mark_command_completed(lu, cmd);