ieee1394: sbp2: fix bogus dma mapping

Need to use a PCI device, not a FireWire host device. Problem found by
Andreas Schwab, mistake pointed out by Benjamin Herrenschmidt.
http://ozlabs.org/pipermail/linuxppc-dev/2006-December/029595.html

Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Tested-by: Andreas Schwab <schwab@suse.de>

+40 -33
+40 -33
drivers/ieee1394/sbp2.c
··· 490 490 spin_unlock_irqrestore(&lu->cmd_orb_lock, flags); 491 491 return -ENOMEM; 492 492 } 493 - cmd->command_orb_dma = dma_map_single(&hi->host->device, 493 + cmd->command_orb_dma = dma_map_single(hi->host->device.parent, 494 494 &cmd->command_orb, 495 495 sizeof(struct sbp2_command_orb), 496 496 DMA_TO_DEVICE); 497 - cmd->sge_dma = dma_map_single(&hi->host->device, 497 + cmd->sge_dma = dma_map_single(hi->host->device.parent, 498 498 &cmd->scatter_gather_element, 499 499 sizeof(cmd->scatter_gather_element), 500 500 DMA_BIDIRECTIONAL); ··· 516 516 if (!list_empty(&lu->cmd_orb_completed)) 517 517 list_for_each_safe(lh, next, &lu->cmd_orb_completed) { 518 518 cmd = list_entry(lh, struct sbp2_command_info, list); 519 - dma_unmap_single(&host->device, cmd->command_orb_dma, 519 + dma_unmap_single(host->device.parent, 520 + cmd->command_orb_dma, 520 521 sizeof(struct sbp2_command_orb), 521 522 DMA_TO_DEVICE); 522 - dma_unmap_single(&host->device, cmd->sge_dma, 523 + dma_unmap_single(host->device.parent, cmd->sge_dma, 523 524 sizeof(cmd->scatter_gather_element), 524 525 DMA_BIDIRECTIONAL); 525 526 kfree(cmd); ··· 602 601 603 602 if (cmd->cmd_dma) { 604 603 if (cmd->dma_type == CMD_DMA_SINGLE) 605 - dma_unmap_single(&host->device, cmd->cmd_dma, 604 + dma_unmap_single(host->device.parent, cmd->cmd_dma, 606 605 cmd->dma_size, cmd->dma_dir); 607 606 else if (cmd->dma_type == CMD_DMA_PAGE) 608 - dma_unmap_page(&host->device, cmd->cmd_dma, 607 + dma_unmap_page(host->device.parent, cmd->cmd_dma, 609 608 cmd->dma_size, cmd->dma_dir); 610 609 /* XXX: Check for CMD_DMA_NONE bug */ 611 610 cmd->dma_type = CMD_DMA_NONE; 612 611 cmd->cmd_dma = 0; 613 612 } 614 613 if (cmd->sge_buffer) { 615 - dma_unmap_sg(&host->device, cmd->sge_buffer, 614 + dma_unmap_sg(host->device.parent, cmd->sge_buffer, 616 615 cmd->dma_size, cmd->dma_dir); 617 616 cmd->sge_buffer = NULL; 618 617 } ··· 837 836 struct sbp2_fwhost_info *hi = lu->hi; 838 837 int error; 839 838 840 - lu->login_response = dma_alloc_coherent(&hi->host->device, 839 + lu->login_response = dma_alloc_coherent(hi->host->device.parent, 841 840 sizeof(struct sbp2_login_response), 842 841 &lu->login_response_dma, GFP_KERNEL); 843 842 if (!lu->login_response) 844 843 goto alloc_fail; 845 844 846 - lu->query_logins_orb = dma_alloc_coherent(&hi->host->device, 845 + lu->query_logins_orb = dma_alloc_coherent(hi->host->device.parent, 847 846 sizeof(struct sbp2_query_logins_orb), 848 847 &lu->query_logins_orb_dma, GFP_KERNEL); 849 848 if (!lu->query_logins_orb) 850 849 goto alloc_fail; 851 850 852 - lu->query_logins_response = dma_alloc_coherent(&hi->host->device, 851 + lu->query_logins_response = dma_alloc_coherent(hi->host->device.parent, 853 852 sizeof(struct sbp2_query_logins_response), 854 853 &lu->query_logins_response_dma, GFP_KERNEL); 855 854 if (!lu->query_logins_response) 856 855 goto alloc_fail; 857 856 858 - lu->reconnect_orb = dma_alloc_coherent(&hi->host->device, 857 + lu->reconnect_orb = dma_alloc_coherent(hi->host->device.parent, 859 858 sizeof(struct sbp2_reconnect_orb), 860 859 &lu->reconnect_orb_dma, GFP_KERNEL); 861 860 if (!lu->reconnect_orb) 862 861 goto alloc_fail; 863 862 864 - lu->logout_orb = dma_alloc_coherent(&hi->host->device, 863 + lu->logout_orb = dma_alloc_coherent(hi->host->device.parent, 865 864 sizeof(struct sbp2_logout_orb), 866 865 &lu->logout_orb_dma, GFP_KERNEL); 867 866 if (!lu->logout_orb) 868 867 goto alloc_fail; 869 868 870 - lu->login_orb = dma_alloc_coherent(&hi->host->device, 869 + lu->login_orb = dma_alloc_coherent(hi->host->device.parent, 871 870 sizeof(struct sbp2_login_orb), 872 871 &lu->login_orb_dma, GFP_KERNEL); 873 872 if (!lu->login_orb) ··· 930 929 list_del(&lu->lu_list); 931 930 932 931 if (lu->login_response) 933 - dma_free_coherent(&hi->host->device, 932 + dma_free_coherent(hi->host->device.parent, 934 933 sizeof(struct sbp2_login_response), 935 934 lu->login_response, 936 935 lu->login_response_dma); 937 936 if (lu->login_orb) 938 - dma_free_coherent(&hi->host->device, 937 + dma_free_coherent(hi->host->device.parent, 939 938 sizeof(struct sbp2_login_orb), 940 939 lu->login_orb, 941 940 lu->login_orb_dma); 942 941 if (lu->reconnect_orb) 943 - dma_free_coherent(&hi->host->device, 942 + dma_free_coherent(hi->host->device.parent, 944 943 sizeof(struct sbp2_reconnect_orb), 945 944 lu->reconnect_orb, 946 945 lu->reconnect_orb_dma); 947 946 if (lu->logout_orb) 948 - dma_free_coherent(&hi->host->device, 947 + dma_free_coherent(hi->host->device.parent, 949 948 sizeof(struct sbp2_logout_orb), 950 949 lu->logout_orb, 951 950 lu->logout_orb_dma); 952 951 if (lu->query_logins_orb) 953 - dma_free_coherent(&hi->host->device, 952 + dma_free_coherent(hi->host->device.parent, 954 953 sizeof(struct sbp2_query_logins_orb), 955 954 lu->query_logins_orb, 956 955 lu->query_logins_orb_dma); 957 956 if (lu->query_logins_response) 958 - dma_free_coherent(&hi->host->device, 957 + dma_free_coherent(hi->host->device.parent, 959 958 sizeof(struct sbp2_query_logins_response), 960 959 lu->query_logins_response, 961 960 lu->query_logins_response_dma); ··· 1446 1445 1447 1446 cmd->dma_size = sgpnt[0].length; 1448 1447 cmd->dma_type = CMD_DMA_PAGE; 1449 - cmd->cmd_dma = dma_map_page(&hi->host->device, 1448 + cmd->cmd_dma = dma_map_page(hi->host->device.parent, 1450 1449 sgpnt[0].page, sgpnt[0].offset, 1451 1450 cmd->dma_size, cmd->dma_dir); 1452 1451 ··· 1458 1457 &cmd->scatter_gather_element[0]; 1459 1458 u32 sg_count, sg_len; 1460 1459 dma_addr_t sg_addr; 1461 - int i, count = dma_map_sg(&hi->host->device, sgpnt, scsi_use_sg, 1462 - dma_dir); 1460 + int i, count = dma_map_sg(hi->host->device.parent, sgpnt, 1461 + scsi_use_sg, dma_dir); 1463 1462 1464 1463 cmd->dma_size = scsi_use_sg; 1465 1464 cmd->sge_buffer = sgpnt; ··· 1509 1508 cmd->dma_dir = dma_dir; 1510 1509 cmd->dma_size = scsi_request_bufflen; 1511 1510 cmd->dma_type = CMD_DMA_SINGLE; 1512 - cmd->cmd_dma = dma_map_single(&hi->host->device, scsi_request_buffer, 1511 + cmd->cmd_dma = dma_map_single(hi->host->device.parent, 1512 + scsi_request_buffer, 1513 1513 cmd->dma_size, cmd->dma_dir); 1514 1514 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id); 1515 1515 orb->misc |= ORB_SET_DIRECTION(orb_direction); ··· 1628 1626 size_t length; 1629 1627 unsigned long flags; 1630 1628 1631 - dma_sync_single_for_device(&hi->host->device, cmd->command_orb_dma, 1629 + dma_sync_single_for_device(hi->host->device.parent, 1630 + cmd->command_orb_dma, 1632 1631 sizeof(struct sbp2_command_orb), 1633 1632 DMA_TO_DEVICE); 1634 - dma_sync_single_for_device(&hi->host->device, cmd->sge_dma, 1633 + dma_sync_single_for_device(hi->host->device.parent, cmd->sge_dma, 1635 1634 sizeof(cmd->scatter_gather_element), 1636 1635 DMA_BIDIRECTIONAL); 1637 1636 ··· 1658 1655 * The target's fetch agent may or may not have read this 1659 1656 * previous ORB yet. 1660 1657 */ 1661 - dma_sync_single_for_cpu(&hi->host->device, last_orb_dma, 1658 + dma_sync_single_for_cpu(hi->host->device.parent, last_orb_dma, 1662 1659 sizeof(struct sbp2_command_orb), 1663 1660 DMA_TO_DEVICE); 1664 1661 last_orb->next_ORB_lo = cpu_to_be32(cmd->command_orb_dma); 1665 1662 wmb(); 1666 1663 /* Tells hardware that this pointer is valid */ 1667 1664 last_orb->next_ORB_hi = 0; 1668 - dma_sync_single_for_device(&hi->host->device, last_orb_dma, 1665 + dma_sync_single_for_device(hi->host->device.parent, 1666 + last_orb_dma, 1669 1667 sizeof(struct sbp2_command_orb), 1670 1668 DMA_TO_DEVICE); 1671 1669 addr += SBP2_DOORBELL_OFFSET; ··· 1794 1790 else 1795 1791 cmd = sbp2util_find_command_for_orb(lu, sb->ORB_offset_lo); 1796 1792 if (cmd) { 1797 - dma_sync_single_for_cpu(&hi->host->device, cmd->command_orb_dma, 1793 + dma_sync_single_for_cpu(hi->host->device.parent, 1794 + cmd->command_orb_dma, 1798 1795 sizeof(struct sbp2_command_orb), 1799 1796 DMA_TO_DEVICE); 1800 - dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma, 1797 + dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma, 1801 1798 sizeof(cmd->scatter_gather_element), 1802 1799 DMA_BIDIRECTIONAL); 1803 1800 /* Grab SCSI command pointers and check status. */ ··· 1926 1921 while (!list_empty(&lu->cmd_orb_inuse)) { 1927 1922 lh = lu->cmd_orb_inuse.next; 1928 1923 cmd = list_entry(lh, struct sbp2_command_info, list); 1929 - dma_sync_single_for_cpu(&hi->host->device, cmd->command_orb_dma, 1924 + dma_sync_single_for_cpu(hi->host->device.parent, 1925 + cmd->command_orb_dma, 1930 1926 sizeof(struct sbp2_command_orb), 1931 1927 DMA_TO_DEVICE); 1932 - dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma, 1928 + dma_sync_single_for_cpu(hi->host->device.parent, cmd->sge_dma, 1933 1929 sizeof(cmd->scatter_gather_element), 1934 1930 DMA_BIDIRECTIONAL); 1935 1931 sbp2util_mark_command_completed(lu, cmd); ··· 2055 2049 spin_lock_irqsave(&lu->cmd_orb_lock, flags); 2056 2050 cmd = sbp2util_find_command_for_SCpnt(lu, SCpnt); 2057 2051 if (cmd) { 2058 - dma_sync_single_for_cpu(&hi->host->device, 2052 + dma_sync_single_for_cpu(hi->host->device.parent, 2059 2053 cmd->command_orb_dma, 2060 2054 sizeof(struct sbp2_command_orb), 2061 2055 DMA_TO_DEVICE); 2062 - dma_sync_single_for_cpu(&hi->host->device, cmd->sge_dma, 2056 + dma_sync_single_for_cpu(hi->host->device.parent, 2057 + cmd->sge_dma, 2063 2058 sizeof(cmd->scatter_gather_element), 2064 2059 DMA_BIDIRECTIONAL); 2065 2060 sbp2util_mark_command_completed(lu, cmd);