Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-mapping: add the device argument to dma_mapping_error()

Add per-device dma_mapping_ops support for CONFIG_X86_64 as POWER
architecture does:

This enables us to cleanly fix the Calgary IOMMU issue that some devices
are not behind the IOMMU (http://lkml.org/lkml/2008/5/8/423).

I think that per-device dma_mapping_ops support would be also helpful for
KVM people to support PCI passthrough but Andi thinks that this makes it
difficult to support the PCI passthrough (see the above thread). So I
CC'ed this to KVM camp. Comments are appreciated.

A pointer to dma_mapping_ops to struct dev_archdata is added. If the
pointer is non NULL, DMA operations in asm/dma-mapping.h use it. If it's
NULL, the system-wide dma_ops pointer is used as before.

If it's useful for KVM people, I plan to implement a mechanism to register
a hook called when a new pci (or dma capable) device is created (it works
with hot plugging). It enables IOMMUs to set up an appropriate
dma_mapping_ops per device.

The major obstacle is that dma_mapping_error doesn't take a pointer to the
device unlike other DMA operations. So x86 can't have dma_mapping_ops per
device. Note all the POWER IOMMUs use the same dma_mapping_error function
so this is not a problem for POWER but x86 IOMMUs use different
dma_mapping_error functions.

The first patch adds the device argument to dma_mapping_error. The patch
is trivial but large since it touches lots of drivers and dma-mapping.h in
all the architecture.

This patch:

dma_mapping_error() doesn't take a pointer to the device unlike other DMA
operations. So we can't have dma_mapping_ops per device.

Note that POWER already has dma_mapping_ops per device but all the POWER
IOMMUs use the same dma_mapping_error function. x86 IOMMUs use device
argument.

[akpm@linux-foundation.org: fix sge]
[akpm@linux-foundation.org: fix svc_rdma]
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: fix bnx2x]
[akpm@linux-foundation.org: fix s2io]
[akpm@linux-foundation.org: fix pasemi_mac]
[akpm@linux-foundation.org: fix sdhci]
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: fix sparc]
[akpm@linux-foundation.org: fix ibmvscsi]
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Muli Ben-Yehuda <muli@il.ibm.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Avi Kivity <avi@qumranet.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

FUJITA Tomonori and committed by
Linus Torvalds
8d8bb39b c485b465

+256 -210
+2 -2
Documentation/DMA-API.txt
··· 298 298 cache width is. 299 299 300 300 int 301 - dma_mapping_error(dma_addr_t dma_addr) 301 + dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 302 302 303 303 int 304 - pci_dma_mapping_error(dma_addr_t dma_addr) 304 + pci_dma_mapping_error(struct pci_dev *hwdev, dma_addr_t dma_addr) 305 305 306 306 In some circumstances dma_map_single and dma_map_page will fail to create 307 307 a mapping. A driver can check for these errors by testing the returned
+1 -1
arch/arm/common/dmabounce.c
··· 280 280 /* 281 281 * Trying to unmap an invalid mapping 282 282 */ 283 - if (dma_mapping_error(dma_addr)) { 283 + if (dma_mapping_error(dev, dma_addr)) { 284 284 dev_err(dev, "Trying to unmap invalid mapping\n"); 285 285 return; 286 286 }
+3 -2
arch/ia64/hp/common/hwsw_iommu.c
··· 186 186 } 187 187 188 188 int 189 - hwsw_dma_mapping_error (dma_addr_t dma_addr) 189 + hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 190 190 { 191 - return hwiommu_dma_mapping_error (dma_addr) || swiotlb_dma_mapping_error(dma_addr); 191 + return hwiommu_dma_mapping_error(dev, dma_addr) || 192 + swiotlb_dma_mapping_error(dev, dma_addr); 192 193 } 193 194 194 195 EXPORT_SYMBOL(hwsw_dma_mapping_error);
+1 -1
arch/ia64/hp/common/sba_iommu.c
··· 2147 2147 } 2148 2148 2149 2149 int 2150 - sba_dma_mapping_error (dma_addr_t dma_addr) 2150 + sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 2151 2151 { 2152 2152 return 0; 2153 2153 }
+1 -1
arch/ia64/sn/pci/pci_dma.c
··· 350 350 } 351 351 EXPORT_SYMBOL(sn_dma_sync_sg_for_device); 352 352 353 - int sn_dma_mapping_error(dma_addr_t dma_addr) 353 + int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 354 354 { 355 355 return 0; 356 356 }
+1 -1
arch/mips/mm/dma-default.c
··· 348 348 349 349 EXPORT_SYMBOL(dma_sync_sg_for_device); 350 350 351 - int dma_mapping_error(dma_addr_t dma_addr) 351 + int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 352 352 { 353 353 return 0; 354 354 }
+1 -1
arch/powerpc/platforms/cell/celleb_scc_pciex.c
··· 281 281 282 282 dummy_page_da = dma_map_single(bus->phb->parent, dummy_page_va, 283 283 PAGE_SIZE, DMA_FROM_DEVICE); 284 - if (dma_mapping_error(dummy_page_da)) { 284 + if (dma_mapping_error(bus->phb->parent, dummy_page_da)) { 285 285 pr_err("PCIEX:Map dummy page failed.\n"); 286 286 kfree(dummy_page_va); 287 287 return -1;
+1 -1
arch/powerpc/platforms/cell/spider-pci.c
··· 111 111 112 112 dummy_page_da = dma_map_single(phb->parent, dummy_page_va, 113 113 PAGE_SIZE, DMA_FROM_DEVICE); 114 - if (dma_mapping_error(dummy_page_da)) { 114 + if (dma_mapping_error(phb->parent, dummy_page_da)) { 115 115 pr_err("SPIDER-IOWA:Map dummy page filed.\n"); 116 116 kfree(dummy_page_va); 117 117 return -1;
+1 -1
arch/powerpc/platforms/iseries/mf.c
··· 871 871 count = 256 - off; 872 872 873 873 dma_addr = iseries_hv_map(page, off + count, DMA_FROM_DEVICE); 874 - if (dma_mapping_error(dma_addr)) 874 + if (dma_mapping_error(NULL, dma_addr)) 875 875 return -ENOMEM; 876 876 memset(page, 0, off + count); 877 877 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
+1 -1
arch/x86/kernel/pci-calgary_64.c
··· 544 544 return ret; 545 545 } 546 546 547 - static const struct dma_mapping_ops calgary_dma_ops = { 547 + static struct dma_mapping_ops calgary_dma_ops = { 548 548 .alloc_coherent = calgary_alloc_coherent, 549 549 .map_single = calgary_map_single, 550 550 .unmap_single = calgary_unmap_single,
+16 -11
arch/x86/kernel/pci-dma.c
··· 11 11 12 12 static int forbid_dac __read_mostly; 13 13 14 - const struct dma_mapping_ops *dma_ops; 14 + struct dma_mapping_ops *dma_ops; 15 15 EXPORT_SYMBOL(dma_ops); 16 16 17 17 static int iommu_sac_force __read_mostly; ··· 312 312 313 313 int dma_supported(struct device *dev, u64 mask) 314 314 { 315 + struct dma_mapping_ops *ops = get_dma_ops(dev); 316 + 315 317 #ifdef CONFIG_PCI 316 318 if (mask > 0xffffffff && forbid_dac > 0) { 317 319 dev_info(dev, "PCI: Disallowing DAC for device\n"); ··· 321 319 } 322 320 #endif 323 321 324 - if (dma_ops->dma_supported) 325 - return dma_ops->dma_supported(dev, mask); 322 + if (ops->dma_supported) 323 + return ops->dma_supported(dev, mask); 326 324 327 325 /* Copied from i386. Doesn't make much sense, because it will 328 326 only work for pci_alloc_coherent. ··· 369 367 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 370 368 gfp_t gfp) 371 369 { 370 + struct dma_mapping_ops *ops = get_dma_ops(dev); 372 371 void *memory = NULL; 373 372 struct page *page; 374 373 unsigned long dma_mask = 0; ··· 438 435 /* Let low level make its own zone decisions */ 439 436 gfp &= ~(GFP_DMA32|GFP_DMA); 440 437 441 - if (dma_ops->alloc_coherent) 442 - return dma_ops->alloc_coherent(dev, size, 438 + if (ops->alloc_coherent) 439 + return ops->alloc_coherent(dev, size, 443 440 dma_handle, gfp); 444 441 return NULL; 445 442 } ··· 451 448 } 452 449 } 453 450 454 - if (dma_ops->alloc_coherent) { 451 + if (ops->alloc_coherent) { 455 452 free_pages((unsigned long)memory, get_order(size)); 456 453 gfp &= ~(GFP_DMA|GFP_DMA32); 457 - return dma_ops->alloc_coherent(dev, size, dma_handle, gfp); 454 + return ops->alloc_coherent(dev, size, dma_handle, gfp); 458 455 } 459 456 460 - if (dma_ops->map_simple) { 461 - *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory), 457 + if (ops->map_simple) { 458 + *dma_handle = ops->map_simple(dev, virt_to_phys(memory), 462 459 size, 463 460 PCI_DMA_BIDIRECTIONAL); 464 461 if (*dma_handle != bad_dma_address) ··· 480 477 void dma_free_coherent(struct device *dev, size_t size, 481 478 void *vaddr, dma_addr_t bus) 482 479 { 480 + struct dma_mapping_ops *ops = get_dma_ops(dev); 481 + 483 482 int order = get_order(size); 484 483 WARN_ON(irqs_disabled()); /* for portability */ 485 484 if (dma_release_coherent(dev, order, vaddr)) 486 485 return; 487 - if (dma_ops->unmap_single) 488 - dma_ops->unmap_single(dev, bus, size, 0); 486 + if (ops->unmap_single) 487 + ops->unmap_single(dev, bus, size, 0); 489 488 free_pages((unsigned long)vaddr, order); 490 489 } 491 490 EXPORT_SYMBOL(dma_free_coherent);
+1 -2
arch/x86/kernel/pci-gart_64.c
··· 692 692 693 693 extern int agp_amd64_init(void); 694 694 695 - static const struct dma_mapping_ops gart_dma_ops = { 696 - .mapping_error = NULL, 695 + static struct dma_mapping_ops gart_dma_ops = { 697 696 .map_single = gart_map_single, 698 697 .map_simple = gart_map_simple, 699 698 .unmap_single = gart_unmap_single,
+1 -13
arch/x86/kernel/pci-nommu.c
··· 72 72 return nents; 73 73 } 74 74 75 - /* Make sure we keep the same behaviour */ 76 - static int nommu_mapping_error(dma_addr_t dma_addr) 77 - { 78 - #ifdef CONFIG_X86_32 79 - return 0; 80 - #else 81 - return (dma_addr == bad_dma_address); 82 - #endif 83 - } 84 - 85 - 86 - const struct dma_mapping_ops nommu_dma_ops = { 75 + struct dma_mapping_ops nommu_dma_ops = { 87 76 .map_single = nommu_map_single, 88 77 .map_sg = nommu_map_sg, 89 - .mapping_error = nommu_mapping_error, 90 78 .is_phys = 1, 91 79 }; 92 80
+1 -1
arch/x86/kernel/pci-swiotlb_64.c
··· 18 18 return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction); 19 19 } 20 20 21 - const struct dma_mapping_ops swiotlb_dma_ops = { 21 + struct dma_mapping_ops swiotlb_dma_ops = { 22 22 .mapping_error = swiotlb_dma_mapping_error, 23 23 .alloc_coherent = swiotlb_alloc_coherent, 24 24 .free_coherent = swiotlb_free_coherent,
+1 -1
drivers/firewire/fw-iso.c
··· 50 50 51 51 address = dma_map_page(card->device, buffer->pages[i], 52 52 0, PAGE_SIZE, direction); 53 - if (dma_mapping_error(address)) { 53 + if (dma_mapping_error(card->device, address)) { 54 54 __free_page(buffer->pages[i]); 55 55 goto out_pages; 56 56 }
+1 -1
drivers/firewire/fw-ohci.c
··· 953 953 payload_bus = 954 954 dma_map_single(ohci->card.device, packet->payload, 955 955 packet->payload_length, DMA_TO_DEVICE); 956 - if (dma_mapping_error(payload_bus)) { 956 + if (dma_mapping_error(ohci->card.device, payload_bus)) { 957 957 packet->ack = RCODE_SEND_ERROR; 958 958 return -1; 959 959 }
+4 -4
drivers/firewire/fw-sbp2.c
··· 543 543 orb->response_bus = 544 544 dma_map_single(device->card->device, &orb->response, 545 545 sizeof(orb->response), DMA_FROM_DEVICE); 546 - if (dma_mapping_error(orb->response_bus)) 546 + if (dma_mapping_error(device->card->device, orb->response_bus)) 547 547 goto fail_mapping_response; 548 548 549 549 orb->request.response.high = 0; ··· 577 577 orb->base.request_bus = 578 578 dma_map_single(device->card->device, &orb->request, 579 579 sizeof(orb->request), DMA_TO_DEVICE); 580 - if (dma_mapping_error(orb->base.request_bus)) 580 + if (dma_mapping_error(device->card->device, orb->base.request_bus)) 581 581 goto fail_mapping_request; 582 582 583 583 sbp2_send_orb(&orb->base, lu, node_id, generation, ··· 1424 1424 orb->page_table_bus = 1425 1425 dma_map_single(device->card->device, orb->page_table, 1426 1426 sizeof(orb->page_table), DMA_TO_DEVICE); 1427 - if (dma_mapping_error(orb->page_table_bus)) 1427 + if (dma_mapping_error(device->card->device, orb->page_table_bus)) 1428 1428 goto fail_page_table; 1429 1429 1430 1430 /* ··· 1509 1509 orb->base.request_bus = 1510 1510 dma_map_single(device->card->device, &orb->request, 1511 1511 sizeof(orb->request), DMA_TO_DEVICE); 1512 - if (dma_mapping_error(orb->base.request_bus)) 1512 + if (dma_mapping_error(device->card->device, orb->base.request_bus)) 1513 1513 goto out; 1514 1514 1515 1515 sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation,
+1 -1
drivers/infiniband/hw/ipath/ipath_sdma.c
··· 698 698 699 699 addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr, 700 700 tx->map_len, DMA_TO_DEVICE); 701 - if (dma_mapping_error(addr)) { 701 + if (dma_mapping_error(&dd->pcidev->dev, addr)) { 702 702 ret = -EIO; 703 703 goto unlock; 704 704 }
+3 -3
drivers/infiniband/hw/ipath/ipath_user_sdma.c
··· 206 206 207 207 dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len, 208 208 DMA_TO_DEVICE); 209 - if (dma_mapping_error(dma_addr)) { 209 + if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { 210 210 ret = -ENOMEM; 211 211 goto free_unmap; 212 212 } ··· 301 301 pages[j], 0, flen, DMA_TO_DEVICE); 302 302 unsigned long fofs = addr & ~PAGE_MASK; 303 303 304 - if (dma_mapping_error(dma_addr)) { 304 + if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { 305 305 ret = -ENOMEM; 306 306 goto done; 307 307 } ··· 508 508 if (page) { 509 509 dma_addr = dma_map_page(&dd->pcidev->dev, 510 510 page, 0, len, DMA_TO_DEVICE); 511 - if (dma_mapping_error(dma_addr)) { 511 + if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) { 512 512 ret = -ENOMEM; 513 513 goto free_pbc; 514 514 }
+1 -1
drivers/infiniband/hw/mthca/mthca_eq.c
··· 780 780 return -ENOMEM; 781 781 dev->eq_table.icm_dma = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0, 782 782 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 783 - if (pci_dma_mapping_error(dev->eq_table.icm_dma)) { 783 + if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) { 784 784 __free_page(dev->eq_table.icm_page); 785 785 return -ENOMEM; 786 786 }
+1 -1
drivers/media/dvb/pluto2/pluto2.c
··· 242 242 pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf, 243 243 TS_DMA_BYTES, PCI_DMA_FROMDEVICE); 244 244 245 - return pci_dma_mapping_error(pluto->dma_addr); 245 + return pci_dma_mapping_error(pluto->pdev, pluto->dma_addr); 246 246 } 247 247 248 248 static void pluto_dma_unmap(struct pluto *pluto)
+2 -2
drivers/mmc/host/sdhci.c
··· 337 337 338 338 host->align_addr = dma_map_single(mmc_dev(host->mmc), 339 339 host->align_buffer, 128 * 4, direction); 340 - if (dma_mapping_error(host->align_addr)) 340 + if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) 341 341 goto fail; 342 342 BUG_ON(host->align_addr & 0x3); 343 343 ··· 439 439 440 440 host->adma_addr = dma_map_single(mmc_dev(host->mmc), 441 441 host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); 442 - if (dma_mapping_error(host->align_addr)) 442 + if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) 443 443 goto unmap_entries; 444 444 BUG_ON(host->adma_addr & 0x3); 445 445
+2 -2
drivers/net/arm/ep93xx_eth.c
··· 482 482 goto err; 483 483 484 484 d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); 485 - if (dma_mapping_error(d)) { 485 + if (dma_mapping_error(NULL, d)) { 486 486 free_page((unsigned long)page); 487 487 goto err; 488 488 } ··· 505 505 goto err; 506 506 507 507 d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); 508 - if (dma_mapping_error(d)) { 508 + if (dma_mapping_error(NULL, d)) { 509 509 free_page((unsigned long)page); 510 510 goto err; 511 511 }
+2 -2
drivers/net/bnx2x_main.c
··· 1020 1020 1021 1021 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE, 1022 1022 PCI_DMA_FROMDEVICE); 1023 - if (unlikely(dma_mapping_error(mapping))) { 1023 + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 1024 1024 __free_pages(page, PAGES_PER_SGE_SHIFT); 1025 1025 return -ENOMEM; 1026 1026 } ··· 1048 1048 1049 1049 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size, 1050 1050 PCI_DMA_FROMDEVICE); 1051 - if (unlikely(dma_mapping_error(mapping))) { 1051 + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 1052 1052 dev_kfree_skb(skb); 1053 1053 return -ENOMEM; 1054 1054 }
+1 -1
drivers/net/cxgb3/sge.c
··· 386 386 dma_addr_t mapping; 387 387 388 388 mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); 389 - if (unlikely(pci_dma_mapping_error(mapping))) 389 + if (unlikely(pci_dma_mapping_error(pdev, mapping))) 390 390 return -ENOMEM; 391 391 392 392 pci_unmap_addr_set(sd, dma_addr, mapping);
+1 -1
drivers/net/e100.c
··· 1790 1790 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, 1791 1791 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); 1792 1792 1793 - if (pci_dma_mapping_error(rx->dma_addr)) { 1793 + if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) { 1794 1794 dev_kfree_skb_any(rx->skb); 1795 1795 rx->skb = NULL; 1796 1796 rx->dma_addr = 0;
+2 -2
drivers/net/e1000e/ethtool.c
··· 1090 1090 tx_ring->buffer_info[i].dma = 1091 1091 pci_map_single(pdev, skb->data, skb->len, 1092 1092 PCI_DMA_TODEVICE); 1093 - if (pci_dma_mapping_error(tx_ring->buffer_info[i].dma)) { 1093 + if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) { 1094 1094 ret_val = 4; 1095 1095 goto err_nomem; 1096 1096 } ··· 1153 1153 rx_ring->buffer_info[i].dma = 1154 1154 pci_map_single(pdev, skb->data, 2048, 1155 1155 PCI_DMA_FROMDEVICE); 1156 - if (pci_dma_mapping_error(rx_ring->buffer_info[i].dma)) { 1156 + if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) { 1157 1157 ret_val = 8; 1158 1158 goto err_nomem; 1159 1159 }
+6 -5
drivers/net/e1000e/netdev.c
··· 195 195 buffer_info->dma = pci_map_single(pdev, skb->data, 196 196 adapter->rx_buffer_len, 197 197 PCI_DMA_FROMDEVICE); 198 - if (pci_dma_mapping_error(buffer_info->dma)) { 198 + if (pci_dma_mapping_error(pdev, buffer_info->dma)) { 199 199 dev_err(&pdev->dev, "RX DMA map failed\n"); 200 200 adapter->rx_dma_failed++; 201 201 break; ··· 265 265 ps_page->page, 266 266 0, PAGE_SIZE, 267 267 PCI_DMA_FROMDEVICE); 268 - if (pci_dma_mapping_error(ps_page->dma)) { 268 + if (pci_dma_mapping_error(pdev, ps_page->dma)) { 269 269 dev_err(&adapter->pdev->dev, 270 270 "RX DMA page map failed\n"); 271 271 adapter->rx_dma_failed++; ··· 300 300 buffer_info->dma = pci_map_single(pdev, skb->data, 301 301 adapter->rx_ps_bsize0, 302 302 PCI_DMA_FROMDEVICE); 303 - if (pci_dma_mapping_error(buffer_info->dma)) { 303 + if (pci_dma_mapping_error(pdev, buffer_info->dma)) { 304 304 dev_err(&pdev->dev, "RX DMA map failed\n"); 305 305 adapter->rx_dma_failed++; 306 306 /* cleanup skb */ ··· 3344 3344 skb->data + offset, 3345 3345 size, 3346 3346 PCI_DMA_TODEVICE); 3347 - if (pci_dma_mapping_error(buffer_info->dma)) { 3347 + if (pci_dma_mapping_error(adapter->pdev, buffer_info->dma)) { 3348 3348 dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); 3349 3349 adapter->tx_dma_failed++; 3350 3350 return -1; ··· 3382 3382 offset, 3383 3383 size, 3384 3384 PCI_DMA_TODEVICE); 3385 - if (pci_dma_mapping_error(buffer_info->dma)) { 3385 + if (pci_dma_mapping_error(adapter->pdev, 3386 + buffer_info->dma)) { 3386 3387 dev_err(&adapter->pdev->dev, 3387 3388 "TX DMA page map failed\n"); 3388 3389 adapter->tx_dma_failed++;
+20 -18
drivers/net/ibmveth.c
··· 260 260 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data, 261 261 pool->buff_size, DMA_FROM_DEVICE); 262 262 263 - if (dma_mapping_error(dma_addr)) 263 + if (dma_mapping_error((&adapter->vdev->dev, dma_addr)) 264 264 goto failure; 265 265 266 266 pool->free_map[free_index] = IBM_VETH_INVALID_MAP; ··· 294 294 pool->consumer_index = pool->size - 1; 295 295 else 296 296 pool->consumer_index--; 297 - if (!dma_mapping_error(dma_addr)) 297 + if (!dma_mapping_error((&adapter->vdev->dev, dma_addr)) 298 298 dma_unmap_single(&adapter->vdev->dev, 299 299 pool->dma_addr[index], pool->buff_size, 300 300 DMA_FROM_DEVICE); ··· 448 448 static void ibmveth_cleanup(struct ibmveth_adapter *adapter) 449 449 { 450 450 int i; 451 + struct device *dev = &adapter->vdev->dev; 451 452 452 453 if(adapter->buffer_list_addr != NULL) { 453 - if(!dma_mapping_error(adapter->buffer_list_dma)) { 454 - dma_unmap_single(&adapter->vdev->dev, 455 - adapter->buffer_list_dma, 4096, 454 + if (!dma_mapping_error(dev, adapter->buffer_list_dma)) { 455 + dma_unmap_single(dev, adapter->buffer_list_dma, 4096, 456 456 DMA_BIDIRECTIONAL); 457 457 adapter->buffer_list_dma = DMA_ERROR_CODE; 458 458 } ··· 461 461 } 462 462 463 463 if(adapter->filter_list_addr != NULL) { 464 - if(!dma_mapping_error(adapter->filter_list_dma)) { 465 - dma_unmap_single(&adapter->vdev->dev, 466 - adapter->filter_list_dma, 4096, 464 + if (!dma_mapping_error(dev, adapter->filter_list_dma)) { 465 + dma_unmap_single(dev, adapter->filter_list_dma, 4096, 467 466 DMA_BIDIRECTIONAL); 468 467 adapter->filter_list_dma = DMA_ERROR_CODE; 469 468 } ··· 471 472 } 472 473 473 474 if(adapter->rx_queue.queue_addr != NULL) { 474 - if(!dma_mapping_error(adapter->rx_queue.queue_dma)) { 475 - dma_unmap_single(&adapter->vdev->dev, 475 + if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { 476 + dma_unmap_single(dev, 476 477 adapter->rx_queue.queue_dma, 477 478 adapter->rx_queue.queue_len, 478 479 DMA_BIDIRECTIONAL); ··· 534 535 int rc; 535 536 union ibmveth_buf_desc rxq_desc; 536 537 int i; 538 + struct device *dev; 537 539 538 540 ibmveth_debug_printk("open starting\n"); 539 541 ··· 563 563 return -ENOMEM; 564 564 } 565 565 566 - adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev, 566 + dev = &adapter->vdev->dev; 567 + 568 + adapter->buffer_list_dma = dma_map_single(dev, 567 569 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); 568 - adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev, 570 + adapter->filter_list_dma = dma_map_single(dev, 569 571 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); 570 - adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev, 572 + adapter->rx_queue.queue_dma = dma_map_single(dev, 571 573 adapter->rx_queue.queue_addr, 572 574 adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); 573 575 574 - if((dma_mapping_error(adapter->buffer_list_dma) ) || 575 - (dma_mapping_error(adapter->filter_list_dma)) || 576 - (dma_mapping_error(adapter->rx_queue.queue_dma))) { 576 + if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || 577 + (dma_mapping_error(dev, adapter->filter_list_dma)) || 578 + (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { 577 579 ibmveth_error_printk("unable to map filter or buffer list pages\n"); 578 580 ibmveth_cleanup(adapter); 579 581 napi_disable(&adapter->napi); ··· 647 645 adapter->bounce_buffer_dma = 648 646 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, 649 647 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); 650 - if (dma_mapping_error(adapter->bounce_buffer_dma)) { 648 + if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { 651 649 ibmveth_error_printk("unable to map bounce buffer\n"); 652 650 ibmveth_cleanup(adapter); 653 651 napi_disable(&adapter->napi); ··· 924 922 buf[1] = 0; 925 923 } 926 924 927 - if (dma_mapping_error(data_dma_addr)) { 925 + if (dma_mapping_error((&adapter->vdev->dev, data_dma_addr)) { 928 926 if (!firmware_has_feature(FW_FEATURE_CMO)) 929 927 ibmveth_error_printk("tx: unable to map xmit buffer\n"); 930 928 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
+2 -2
drivers/net/iseries_veth.c
··· 1128 1128 msg->data.addr[0] = dma_map_single(port->dev, skb->data, 1129 1129 skb->len, DMA_TO_DEVICE); 1130 1130 1131 - if (dma_mapping_error(msg->data.addr[0])) 1131 + if (dma_mapping_error(port->dev, msg->data.addr[0])) 1132 1132 goto recycle_and_drop; 1133 1133 1134 1134 msg->dev = port->dev; ··· 1226 1226 dma_address = msg->data.addr[0]; 1227 1227 dma_length = msg->data.len[0]; 1228 1228 1229 - if (!dma_mapping_error(dma_address)) 1229 + if (!dma_mapping_error(msg->dev, dma_address)) 1230 1230 dma_unmap_single(msg->dev, dma_address, dma_length, 1231 1231 DMA_TO_DEVICE); 1232 1232
+1 -1
drivers/net/mlx4/eq.c
··· 526 526 return -ENOMEM; 527 527 priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, 528 528 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 529 - if (pci_dma_mapping_error(priv->eq_table.icm_dma)) { 529 + if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) { 530 530 __free_page(priv->eq_table.icm_page); 531 531 return -ENOMEM; 532 532 }
+3 -3
drivers/net/pasemi_mac.c
··· 650 650 mac->bufsz - LOCAL_SKB_ALIGN, 651 651 PCI_DMA_FROMDEVICE); 652 652 653 - if (unlikely(dma_mapping_error(dma))) { 653 + if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) { 654 654 dev_kfree_skb_irq(info->skb); 655 655 break; 656 656 } ··· 1519 1519 map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), 1520 1520 PCI_DMA_TODEVICE); 1521 1521 map_size[0] = skb_headlen(skb); 1522 - if (dma_mapping_error(map[0])) 1522 + if (pci_dma_mapping_error(mac->dma_pdev, map[0])) 1523 1523 goto out_err_nolock; 1524 1524 1525 1525 for (i = 0; i < nfrags; i++) { ··· 1529 1529 frag->page_offset, frag->size, 1530 1530 PCI_DMA_TODEVICE); 1531 1531 map_size[i+1] = frag->size; 1532 - if (dma_mapping_error(map[i+1])) { 1532 + if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) { 1533 1533 nfrags = i; 1534 1534 goto out_err_nolock; 1535 1535 }
+6 -6
drivers/net/qla3xxx.c
··· 328 328 qdev->lrg_buffer_len - 329 329 QL_HEADER_SPACE, 330 330 PCI_DMA_FROMDEVICE); 331 - err = pci_dma_mapping_error(map); 331 + err = pci_dma_mapping_error(qdev->pdev, map); 332 332 if(err) { 333 333 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 334 334 qdev->ndev->name, err); ··· 1919 1919 QL_HEADER_SPACE, 1920 1920 PCI_DMA_FROMDEVICE); 1921 1921 1922 - err = pci_dma_mapping_error(map); 1922 + err = pci_dma_mapping_error(qdev->pdev, map); 1923 1923 if(err) { 1924 1924 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 1925 1925 qdev->ndev->name, err); ··· 2454 2454 */ 2455 2455 map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); 2456 2456 2457 - err = pci_dma_mapping_error(map); 2457 + err = pci_dma_mapping_error(qdev->pdev, map); 2458 2458 if(err) { 2459 2459 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 2460 2460 qdev->ndev->name, err); ··· 2487 2487 sizeof(struct oal), 2488 2488 PCI_DMA_TODEVICE); 2489 2489 2490 - err = pci_dma_mapping_error(map); 2490 + err = pci_dma_mapping_error(qdev->pdev, map); 2491 2491 if(err) { 2492 2492 2493 2493 printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n", ··· 2514 2514 frag->page_offset, frag->size, 2515 2515 PCI_DMA_TODEVICE); 2516 2516 2517 - err = pci_dma_mapping_error(map); 2517 + err = pci_dma_mapping_error(qdev->pdev, map); 2518 2518 if(err) { 2519 2519 printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n", 2520 2520 qdev->ndev->name, err); ··· 2916 2916 QL_HEADER_SPACE, 2917 2917 PCI_DMA_FROMDEVICE); 2918 2918 2919 - err = pci_dma_mapping_error(map); 2919 + err = pci_dma_mapping_error(qdev->pdev, map); 2920 2920 if(err) { 2921 2921 printk(KERN_ERR "%s: PCI mapping failed with error: %d\n", 2922 2922 qdev->ndev->name, err);
+27 -21
drivers/net/s2io.c
··· 2512 2512 * Return Value: 2513 2513 * SUCCESS on success or an appropriate -ve value on failure. 2514 2514 */ 2515 - 2516 - static int fill_rx_buffers(struct ring_info *ring, int from_card_up) 2515 + static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring, 2516 + int from_card_up) 2517 2517 { 2518 2518 struct sk_buff *skb; 2519 2519 struct RxD_t *rxdp; ··· 2602 2602 rxdp1->Buffer0_ptr = pci_map_single 2603 2603 (ring->pdev, skb->data, size - NET_IP_ALIGN, 2604 2604 PCI_DMA_FROMDEVICE); 2605 - if(pci_dma_mapping_error(rxdp1->Buffer0_ptr)) 2605 + if (pci_dma_mapping_error(nic->pdev, 2606 + rxdp1->Buffer0_ptr)) 2606 2607 goto pci_map_failed; 2607 2608 2608 2609 rxdp->Control_2 = ··· 2637 2636 rxdp3->Buffer0_ptr = 2638 2637 pci_map_single(ring->pdev, ba->ba_0, 2639 2638 BUF0_LEN, PCI_DMA_FROMDEVICE); 2640 - if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) 2639 + if (pci_dma_mapping_error(nic->pdev, 2640 + rxdp3->Buffer0_ptr)) 2641 2641 goto pci_map_failed; 2642 2642 } else 2643 2643 pci_dma_sync_single_for_device(ring->pdev, ··· 2657 2655 (ring->pdev, skb->data, ring->mtu + 4, 2658 2656 PCI_DMA_FROMDEVICE); 2659 2657 2660 - if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) 2658 + if (pci_dma_mapping_error(nic->pdev, 2659 + rxdp3->Buffer2_ptr)) 2661 2660 goto pci_map_failed; 2662 2661 2663 2662 if (from_card_up) { ··· 2667 2664 ba->ba_1, BUF1_LEN, 2668 2665 PCI_DMA_FROMDEVICE); 2669 2666 2670 - if (pci_dma_mapping_error 2671 - (rxdp3->Buffer1_ptr)) { 2667 + if (pci_dma_mapping_error(nic->pdev, 2668 + rxdp3->Buffer1_ptr)) { 2672 2669 pci_unmap_single 2673 2670 (ring->pdev, 2674 2671 (dma_addr_t)(unsigned long) ··· 2809 2806 } 2810 2807 } 2811 2808 2812 - static int s2io_chk_rx_buffers(struct ring_info *ring) 2809 + static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring) 2813 2810 { 2814 - if (fill_rx_buffers(ring, 0) == -ENOMEM) { 2811 + if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) { 2815 2812 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); 2816 2813 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); 2817 2814 } ··· 2851 2848 return 0; 2852 2849 2853 2850 pkts_processed = rx_intr_handler(ring, budget); 2854 - s2io_chk_rx_buffers(ring); 2851 + s2io_chk_rx_buffers(nic, ring); 2855 2852 2856 2853 if (pkts_processed < budget_org) { 2857 2854 netif_rx_complete(dev, napi); ··· 2885 2882 for (i = 0; i < config->rx_ring_num; i++) { 2886 2883 ring = &mac_control->rings[i]; 2887 2884 ring_pkts_processed = rx_intr_handler(ring, budget); 2888 - s2io_chk_rx_buffers(ring); 2885 + s2io_chk_rx_buffers(nic, ring); 2889 2886 pkts_processed += ring_pkts_processed; 2890 2887 budget -= ring_pkts_processed; 2891 2888 if (budget <= 0) ··· 2942 2939 rx_intr_handler(&mac_control->rings[i], 0); 2943 2940 2944 2941 for (i = 0; i < config->rx_ring_num; i++) { 2945 - if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) { 2942 + if (fill_rx_buffers(nic, &mac_control->rings[i], 0) == 2943 + -ENOMEM) { 2946 2944 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); 2947 2945 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); 2948 2946 break; ··· 4239 4235 txdp->Buffer_Pointer = pci_map_single(sp->pdev, 4240 4236 fifo->ufo_in_band_v, 4241 4237 sizeof(u64), PCI_DMA_TODEVICE); 4242 - if (pci_dma_mapping_error(txdp->Buffer_Pointer)) 4238 + if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) 4243 4239 goto pci_map_failed; 4244 4240 txdp++; 4245 4241 } 4246 4242 4247 4243 txdp->Buffer_Pointer = pci_map_single 4248 4244 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE); 4249 - if (pci_dma_mapping_error(txdp->Buffer_Pointer)) 4245 + if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer)) 4250 4246 goto pci_map_failed; 4251 4247 4252 4248 txdp->Host_Control = (unsigned long) skb; ··· 4349 4345 netif_rx_schedule(dev, &ring->napi); 4350 4346 } else { 4351 4347 rx_intr_handler(ring, 0); 4352 - s2io_chk_rx_buffers(ring); 4348 + s2io_chk_rx_buffers(sp, ring); 4353 4349 } 4354 4350 4355 4351 return IRQ_HANDLED; ··· 4830 4826 */ 4831 4827 if (!config->napi) { 4832 4828 for (i = 0; i < config->rx_ring_num; i++) 4833 - s2io_chk_rx_buffers(&mac_control->rings[i]); 4829 + s2io_chk_rx_buffers(sp, &mac_control->rings[i]); 4834 4830 } 4835 4831 writeq(sp->general_int_mask, &bar0->general_int_mask); 4836 4832 readl(&bar0->general_int_status); ··· 6863 6859 pci_map_single( sp->pdev, (*skb)->data, 6864 6860 size - NET_IP_ALIGN, 6865 6861 PCI_DMA_FROMDEVICE); 6866 - if (pci_dma_mapping_error(rxdp1->Buffer0_ptr)) 6862 + if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr)) 6867 6863 goto memalloc_failed; 6868 6864 rxdp->Host_Control = (unsigned long) (*skb); 6869 6865 } ··· 6890 6886 pci_map_single(sp->pdev, (*skb)->data, 6891 6887 dev->mtu + 4, 6892 6888 PCI_DMA_FROMDEVICE); 6893 - if (pci_dma_mapping_error(rxdp3->Buffer2_ptr)) 6889 + if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr)) 6894 6890 goto memalloc_failed; 6895 6891 rxdp3->Buffer0_ptr = *temp0 = 6896 6892 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN, 6897 6893 PCI_DMA_FROMDEVICE); 6898 - if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) { 6894 + if (pci_dma_mapping_error(sp->pdev, 6895 + rxdp3->Buffer0_ptr)) { 6899 6896 pci_unmap_single (sp->pdev, 6900 6897 (dma_addr_t)rxdp3->Buffer2_ptr, 6901 6898 dev->mtu + 4, PCI_DMA_FROMDEVICE); ··· 6908 6903 rxdp3->Buffer1_ptr = *temp1 = 6909 6904 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN, 6910 6905 PCI_DMA_FROMDEVICE); 6911 - if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) { 6906 + if (pci_dma_mapping_error(sp->pdev, 6907 + rxdp3->Buffer1_ptr)) { 6912 6908 pci_unmap_single (sp->pdev, 6913 6909 (dma_addr_t)rxdp3->Buffer0_ptr, 6914 6910 BUF0_LEN, PCI_DMA_FROMDEVICE); ··· 7193 7187 7194 7188 for (i = 0; i < config->rx_ring_num; i++) { 7195 7189 mac_control->rings[i].mtu = dev->mtu; 7196 - ret = fill_rx_buffers(&mac_control->rings[i], 1); 7190 + ret = fill_rx_buffers(sp, &mac_control->rings[i], 1); 7197 7191 if (ret) { 7198 7192 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", 7199 7193 dev->name);
+2 -2
drivers/net/sfc/rx.c
··· 233 233 rx_buf->data, rx_buf->len, 234 234 PCI_DMA_FROMDEVICE); 235 235 236 - if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) { 236 + if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) { 237 237 dev_kfree_skb_any(rx_buf->skb); 238 238 rx_buf->skb = NULL; 239 239 return -EIO; ··· 275 275 0, efx_rx_buf_size(efx), 276 276 PCI_DMA_FROMDEVICE); 277 277 278 - if (unlikely(pci_dma_mapping_error(dma_addr))) { 278 + if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) { 279 279 __free_pages(rx_buf->page, efx->rx_buffer_order); 280 280 rx_buf->page = NULL; 281 281 return -EIO;
+4 -3
drivers/net/sfc/tx.c
··· 172 172 173 173 /* Process all fragments */ 174 174 while (1) { 175 - if (unlikely(pci_dma_mapping_error(dma_addr))) 175 + if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr))) 176 176 goto pci_err; 177 177 178 178 /* Store fields for marking in the per-fragment final ··· 661 661 tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, 662 662 TSOH_BUFFER(tsoh), header_len, 663 663 PCI_DMA_TODEVICE); 664 - if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) { 664 + if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev, 665 + tsoh->dma_addr))) { 665 666 kfree(tsoh); 666 667 return NULL; 667 668 } ··· 864 863 865 864 st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off, 866 865 len, PCI_DMA_TODEVICE); 867 - if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) { 866 + if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) { 868 867 st->ifc.unmap_len = len; 869 868 st->ifc.len = len; 870 869 st->ifc.dma_addr = st->ifc.unmap_addr;
+2 -2
drivers/net/spider_net.c
··· 452 452 /* iommu-map the skb */ 453 453 buf = pci_map_single(card->pdev, descr->skb->data, 454 454 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); 455 - if (pci_dma_mapping_error(buf)) { 455 + if (pci_dma_mapping_error(card->pdev, buf)) { 456 456 dev_kfree_skb_any(descr->skb); 457 457 descr->skb = NULL; 458 458 if (netif_msg_rx_err(card) && net_ratelimit()) ··· 691 691 unsigned long flags; 692 692 693 693 buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 694 - if (pci_dma_mapping_error(buf)) { 694 + if (pci_dma_mapping_error(card->pdev, buf)) { 695 695 if (netif_msg_tx_err(card) && net_ratelimit()) 696 696 dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). " 697 697 "Dropping packet\n", skb->data, skb->len);
+2 -2
drivers/net/tc35815.c
··· 506 506 return NULL; 507 507 *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE, 508 508 PCI_DMA_FROMDEVICE); 509 - if (pci_dma_mapping_error(*dma_handle)) { 509 + if (pci_dma_mapping_error(hwdev, *dma_handle)) { 510 510 free_page((unsigned long)buf); 511 511 return NULL; 512 512 } ··· 536 536 return NULL; 537 537 *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE, 538 538 PCI_DMA_FROMDEVICE); 539 - if (pci_dma_mapping_error(*dma_handle)) { 539 + if (pci_dma_mapping_error(hwdev, *dma_handle)) { 540 540 dev_kfree_skb_any(skb); 541 541 return NULL; 542 542 }
+2 -2
drivers/net/wireless/ath5k/base.c
··· 1166 1166 bf->skb = skb; 1167 1167 bf->skbaddr = pci_map_single(sc->pdev, 1168 1168 skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); 1169 - if (unlikely(pci_dma_mapping_error(bf->skbaddr))) { 1169 + if (unlikely(pci_dma_mapping_error(sc->pdev, bf->skbaddr))) { 1170 1170 ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); 1171 1171 dev_kfree_skb(skb); 1172 1172 bf->skb = NULL; ··· 1918 1918 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " 1919 1919 "skbaddr %llx\n", skb, skb->data, skb->len, 1920 1920 (unsigned long long)bf->skbaddr); 1921 - if (pci_dma_mapping_error(bf->skbaddr)) { 1921 + if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) { 1922 1922 ATH5K_ERR(sc, "beacon DMA mapping failed\n"); 1923 1923 return -EIO; 1924 1924 }
+2 -2
drivers/scsi/ibmvscsi/ibmvfc.c
··· 3525 3525 crq->msg_token = dma_map_single(dev, crq->msgs, 3526 3526 PAGE_SIZE, DMA_BIDIRECTIONAL); 3527 3527 3528 - if (dma_mapping_error(crq->msg_token)) 3528 + if (dma_mapping_error(dev, crq->msg_token)) 3529 3529 goto map_failed; 3530 3530 3531 3531 retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address, ··· 3618 3618 async_q->size * sizeof(*async_q->msgs), 3619 3619 DMA_BIDIRECTIONAL); 3620 3620 3621 - if (dma_mapping_error(async_q->msg_token)) { 3621 + if (dma_mapping_error(dev, async_q->msg_token)) { 3622 3622 dev_err(dev, "Failed to map async queue\n"); 3623 3623 goto free_async_crq; 3624 3624 }
+2 -2
drivers/scsi/ibmvscsi/ibmvscsi.c
··· 859 859 sizeof(hostdata->madapter_info), 860 860 DMA_BIDIRECTIONAL); 861 861 862 - if (dma_mapping_error(req->buffer)) { 862 + if (dma_mapping_error(hostdata->dev, req->buffer)) { 863 863 if (!firmware_has_feature(FW_FEATURE_CMO)) 864 864 dev_err(hostdata->dev, 865 865 "Unable to map request_buffer for " ··· 1407 1407 length, 1408 1408 DMA_BIDIRECTIONAL); 1409 1409 1410 - if (dma_mapping_error(host_config->buffer)) { 1410 + if (dma_mapping_error(hostdata->dev, host_config->buffer)) { 1411 1411 if (!firmware_has_feature(FW_FEATURE_CMO)) 1412 1412 dev_err(hostdata->dev, 1413 1413 "dma_mapping error getting host config\n");
+1 -1
drivers/scsi/ibmvscsi/ibmvstgt.c
··· 564 564 queue->size * sizeof(*queue->msgs), 565 565 DMA_BIDIRECTIONAL); 566 566 567 - if (dma_mapping_error(queue->msg_token)) 567 + if (dma_mapping_error(target->dev, queue->msg_token)) 568 568 goto map_failed; 569 569 570 570 err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
+1 -1
drivers/scsi/ibmvscsi/rpa_vscsi.c
··· 253 253 queue->size * sizeof(*queue->msgs), 254 254 DMA_BIDIRECTIONAL); 255 255 256 - if (dma_mapping_error(queue->msg_token)) 256 + if (dma_mapping_error(hostdata->dev, queue->msg_token)) 257 257 goto map_failed; 258 258 259 259 gather_partition_info();
+2 -2
drivers/spi/atmel_spi.c
··· 313 313 xfer->tx_dma = dma_map_single(dev, 314 314 (void *) xfer->tx_buf, xfer->len, 315 315 DMA_TO_DEVICE); 316 - if (dma_mapping_error(xfer->tx_dma)) 316 + if (dma_mapping_error(dev, xfer->tx_dma)) 317 317 return -ENOMEM; 318 318 } 319 319 if (xfer->rx_buf) { 320 320 xfer->rx_dma = dma_map_single(dev, 321 321 xfer->rx_buf, xfer->len, 322 322 DMA_FROM_DEVICE); 323 - if (dma_mapping_error(xfer->rx_dma)) { 323 + if (dma_mapping_error(dev, xfer->rx_dma)) { 324 324 if (xfer->tx_buf) 325 325 dma_unmap_single(dev, 326 326 xfer->tx_dma, xfer->len,
+3 -3
drivers/spi/au1550_spi.c
··· 334 334 hw->dma_rx_tmpbuf_size = size; 335 335 hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf, 336 336 size, DMA_FROM_DEVICE); 337 - if (dma_mapping_error(hw->dma_rx_tmpbuf_addr)) { 337 + if (dma_mapping_error(hw->dev, hw->dma_rx_tmpbuf_addr)) { 338 338 kfree(hw->dma_rx_tmpbuf); 339 339 hw->dma_rx_tmpbuf = 0; 340 340 hw->dma_rx_tmpbuf_size = 0; ··· 378 378 dma_rx_addr = dma_map_single(hw->dev, 379 379 (void *)t->rx_buf, 380 380 t->len, DMA_FROM_DEVICE); 381 - if (dma_mapping_error(dma_rx_addr)) 381 + if (dma_mapping_error(hw->dev, dma_rx_addr)) 382 382 dev_err(hw->dev, "rx dma map error\n"); 383 383 } 384 384 } else { ··· 401 401 dma_tx_addr = dma_map_single(hw->dev, 402 402 (void *)t->tx_buf, 403 403 t->len, DMA_TO_DEVICE); 404 - if (dma_mapping_error(dma_tx_addr)) 404 + if (dma_mapping_error(hw->dev, dma_tx_addr)) 405 405 dev_err(hw->dev, "tx dma map error\n"); 406 406 } 407 407 } else {
+2 -2
drivers/spi/omap2_mcspi.c
··· 836 836 if (tx_buf != NULL) { 837 837 t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf, 838 838 len, DMA_TO_DEVICE); 839 - if (dma_mapping_error(t->tx_dma)) { 839 + if (dma_mapping_error(&spi->dev, t->tx_dma)) { 840 840 dev_dbg(&spi->dev, "dma %cX %d bytes error\n", 841 841 'T', len); 842 842 return -EINVAL; ··· 845 845 if (rx_buf != NULL) { 846 846 t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len, 847 847 DMA_FROM_DEVICE); 848 - if (dma_mapping_error(t->rx_dma)) { 848 + if (dma_mapping_error(&spi->dev, t->rx_dma)) { 849 849 dev_dbg(&spi->dev, "dma %cX %d bytes error\n", 850 850 'R', len); 851 851 if (tx_buf != NULL)
+2 -2
drivers/spi/pxa2xx_spi.c
··· 353 353 drv_data->rx_dma = dma_map_single(dev, drv_data->rx, 354 354 drv_data->rx_map_len, 355 355 DMA_FROM_DEVICE); 356 - if (dma_mapping_error(drv_data->rx_dma)) 356 + if (dma_mapping_error(dev, drv_data->rx_dma)) 357 357 return 0; 358 358 359 359 /* Stream map the tx buffer */ ··· 361 361 drv_data->tx_map_len, 362 362 DMA_TO_DEVICE); 363 363 364 - if (dma_mapping_error(drv_data->tx_dma)) { 364 + if (dma_mapping_error(dev, drv_data->tx_dma)) { 365 365 dma_unmap_single(dev, drv_data->rx_dma, 366 366 drv_data->rx_map_len, DMA_FROM_DEVICE); 367 367 return 0;
+3 -3
drivers/spi/spi_imx.c
··· 491 491 buf, 492 492 drv_data->tx_map_len, 493 493 DMA_TO_DEVICE); 494 - if (dma_mapping_error(drv_data->tx_dma)) 494 + if (dma_mapping_error(dev, drv_data->tx_dma)) 495 495 return -1; 496 496 497 497 drv_data->tx_dma_needs_unmap = 1; ··· 516 516 buf, 517 517 drv_data->len, 518 518 DMA_FROM_DEVICE); 519 - if (dma_mapping_error(drv_data->rx_dma)) 519 + if (dma_mapping_error(dev, drv_data->rx_dma)) 520 520 return -1; 521 521 drv_data->rx_dma_needs_unmap = 1; 522 522 } ··· 534 534 buf, 535 535 drv_data->tx_map_len, 536 536 DMA_TO_DEVICE); 537 - if (dma_mapping_error(drv_data->tx_dma)) { 537 + if (dma_mapping_error(dev, drv_data->tx_dma)) { 538 538 if (drv_data->rx_dma) { 539 539 dma_unmap_single(dev, 540 540 drv_data->rx_dma,
+3 -3
include/asm-alpha/dma-mapping.h
··· 24 24 pci_unmap_sg(alpha_gendev_to_pci(dev), sg, nents, dir) 25 25 #define dma_supported(dev, mask) \ 26 26 pci_dma_supported(alpha_gendev_to_pci(dev), mask) 27 - #define dma_mapping_error(addr) \ 28 - pci_dma_mapping_error(addr) 27 + #define dma_mapping_error(dev, addr) \ 28 + pci_dma_mapping_error(alpha_gendev_to_pci(dev), addr) 29 29 30 30 #else /* no PCI - no IOMMU. */ 31 31 ··· 45 45 #define dma_unmap_page(dev, addr, size, dir) ((void)0) 46 46 #define dma_unmap_sg(dev, sg, nents, dir) ((void)0) 47 47 48 - #define dma_mapping_error(addr) (0) 48 + #define dma_mapping_error(dev, addr) (0) 49 49 50 50 #endif /* !CONFIG_PCI */ 51 51
+1 -1
include/asm-alpha/pci.h
··· 106 106 /* Test for pci_map_single or pci_map_page having generated an error. */ 107 107 108 108 static inline int 109 - pci_dma_mapping_error(dma_addr_t dma_addr) 109 + pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) 110 110 { 111 111 return dma_addr == 0; 112 112 }
+1 -1
include/asm-arm/dma-mapping.h
··· 56 56 /* 57 57 * DMA errors are defined by all-bits-set in the DMA address. 58 58 */ 59 - static inline int dma_mapping_error(dma_addr_t dma_addr) 59 + static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 60 60 { 61 61 return dma_addr == ~0; 62 62 }
+1 -1
include/asm-avr32/dma-mapping.h
··· 35 35 /* 36 36 * dma_map_single can't fail as it is implemented now. 37 37 */ 38 - static inline int dma_mapping_error(dma_addr_t addr) 38 + static inline int dma_mapping_error(struct device *dev, dma_addr_t addr) 39 39 { 40 40 return 0; 41 41 }
+1 -1
include/asm-cris/dma-mapping.h
··· 120 120 } 121 121 122 122 static inline int 123 - dma_mapping_error(dma_addr_t dma_addr) 123 + dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 124 124 { 125 125 return 0; 126 126 }
+1 -1
include/asm-frv/dma-mapping.h
··· 126 126 } 127 127 128 128 static inline 129 - int dma_mapping_error(dma_addr_t dma_addr) 129 + int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 130 130 { 131 131 return 0; 132 132 }
+1 -1
include/asm-generic/dma-mapping-broken.h
··· 61 61 #define dma_sync_sg_for_device dma_sync_sg_for_cpu 62 62 63 63 extern int 64 - dma_mapping_error(dma_addr_t dma_addr); 64 + dma_mapping_error(struct device *dev, dma_addr_t dma_addr); 65 65 66 66 extern int 67 67 dma_supported(struct device *dev, u64 mask);
+2 -2
include/asm-generic/dma-mapping.h
··· 144 144 } 145 145 146 146 static inline int 147 - dma_mapping_error(dma_addr_t dma_addr) 147 + dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 148 148 { 149 - return pci_dma_mapping_error(dma_addr); 149 + return pci_dma_mapping_error(to_pci_dev(dev), dma_addr); 150 150 } 151 151 152 152
+2 -2
include/asm-generic/pci-dma-compat.h
··· 99 99 } 100 100 101 101 static inline int 102 - pci_dma_mapping_error(dma_addr_t dma_addr) 102 + pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr) 103 103 { 104 - return dma_mapping_error(dma_addr); 104 + return dma_mapping_error(&pdev->dev, dma_addr); 105 105 } 106 106 107 107 #endif
+1 -1
include/asm-ia64/machvec.h
··· 54 54 typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int); 55 55 typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int); 56 56 typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int); 57 - typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr); 57 + typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr); 58 58 typedef int ia64_mv_dma_supported (struct device *, u64); 59 59 60 60 typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
+1 -1
include/asm-m68k/dma-mapping.h
··· 84 84 { 85 85 } 86 86 87 - static inline int dma_mapping_error(dma_addr_t handle) 87 + static inline int dma_mapping_error(struct device *dev, dma_addr_t handle) 88 88 { 89 89 return 0; 90 90 }
+1 -1
include/asm-mips/dma-mapping.h
··· 42 42 int nelems, enum dma_data_direction direction); 43 43 extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 44 44 int nelems, enum dma_data_direction direction); 45 - extern int dma_mapping_error(dma_addr_t dma_addr); 45 + extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr); 46 46 extern int dma_supported(struct device *dev, u64 mask); 47 47 48 48 static inline int
+1 -1
include/asm-mn10300/dma-mapping.h
··· 182 182 } 183 183 184 184 static inline 185 - int dma_mapping_error(dma_addr_t dma_addr) 185 + int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 186 186 { 187 187 return 0; 188 188 }
+1 -1
include/asm-parisc/dma-mapping.h
··· 248 248 #endif 249 249 250 250 /* At the moment, we panic on error for IOMMU resource exaustion */ 251 - #define dma_mapping_error(x) 0 251 + #define dma_mapping_error(dev, x) 0 252 252 253 253 #endif
+1 -1
include/asm-powerpc/dma-mapping.h
··· 415 415 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); 416 416 } 417 417 418 - static inline int dma_mapping_error(dma_addr_t dma_addr) 418 + static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 419 419 { 420 420 #ifdef CONFIG_PPC64 421 421 return (dma_addr == DMA_ERROR_CODE);
+1 -1
include/asm-sh/dma-mapping.h
··· 171 171 return L1_CACHE_BYTES; 172 172 } 173 173 174 - static inline int dma_mapping_error(dma_addr_t dma_addr) 174 + static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 175 175 { 176 176 return dma_addr == 0; 177 177 }
+1 -1
include/asm-sparc/dma-mapping_64.h
··· 135 135 /* No flushing needed to sync cpu writes to the device. */ 136 136 } 137 137 138 - static inline int dma_mapping_error(dma_addr_t dma_addr) 138 + static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 139 139 { 140 140 return (dma_addr == DMA_ERROR_CODE); 141 141 }
+2 -1
include/asm-sparc/pci_32.h
··· 154 154 155 155 #define PCI_DMA_ERROR_CODE (~(dma_addr_t)0x0) 156 156 157 - static inline int pci_dma_mapping_error(dma_addr_t dma_addr) 157 + static inline int pci_dma_mapping_error(struct pci_dev *pdev, 158 + dma_addr_t dma_addr) 158 159 { 159 160 return (dma_addr == PCI_DMA_ERROR_CODE); 160 161 }
+3 -2
include/asm-sparc/pci_64.h
··· 140 140 #define PCI64_REQUIRED_MASK (~(dma64_addr_t)0) 141 141 #define PCI64_ADDR_BASE 0xfffc000000000000UL 142 142 143 - static inline int pci_dma_mapping_error(dma_addr_t dma_addr) 143 + static inline int pci_dma_mapping_error(struct pci_dev *pdev, 144 + dma_addr_t dma_addr) 144 145 { 145 - return dma_mapping_error(dma_addr); 146 + return dma_mapping_error(&pdev->dev, dma_addr); 146 147 } 147 148 148 149 #ifdef CONFIG_PCI
+3
include/asm-x86/device.h
··· 5 5 #ifdef CONFIG_ACPI 6 6 void *acpi_handle; 7 7 #endif 8 + #ifdef CONFIG_X86_64 9 + struct dma_mapping_ops *dma_ops; 10 + #endif 8 11 #ifdef CONFIG_DMAR 9 12 void *iommu; /* hook for IOMMU specific extension */ 10 13 #endif
+68 -31
include/asm-x86/dma-mapping.h
··· 17 17 extern int force_iommu; 18 18 19 19 struct dma_mapping_ops { 20 - int (*mapping_error)(dma_addr_t dma_addr); 20 + int (*mapping_error)(struct device *dev, 21 + dma_addr_t dma_addr); 21 22 void* (*alloc_coherent)(struct device *dev, size_t size, 22 23 dma_addr_t *dma_handle, gfp_t gfp); 23 24 void (*free_coherent)(struct device *dev, size_t size, ··· 57 56 int is_phys; 58 57 }; 59 58 60 - extern const struct dma_mapping_ops *dma_ops; 59 + extern struct dma_mapping_ops *dma_ops; 61 60 62 - static inline int dma_mapping_error(dma_addr_t dma_addr) 61 + static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) 63 62 { 64 - if (dma_ops->mapping_error) 65 - return dma_ops->mapping_error(dma_addr); 63 + #ifdef CONFIG_X86_32 64 + return dma_ops; 65 + #else 66 + if (unlikely(!dev) || !dev->archdata.dma_ops) 67 + return dma_ops; 68 + else 69 + return dev->archdata.dma_ops; 70 + #endif 71 + } 72 + 73 + /* Make sure we keep the same behaviour */ 74 + static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 75 + { 76 + #ifdef CONFIG_X86_32 77 + return 0; 78 + #else 79 + struct dma_mapping_ops *ops = get_dma_ops(dev); 80 + if (ops->mapping_error) 81 + return ops->mapping_error(dev, dma_addr); 66 82 67 83 return (dma_addr == bad_dma_address); 84 + #endif 68 85 } 69 86 70 87 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) ··· 102 83 dma_map_single(struct device *hwdev, void *ptr, size_t size, 103 84 int direction) 104 85 { 86 + struct dma_mapping_ops *ops = get_dma_ops(hwdev); 87 + 105 88 BUG_ON(!valid_dma_direction(direction)); 106 - return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction); 89 + return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); 107 90 } 108 91 109 92 static inline void 110 93 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, 111 94 int direction) 112 95 { 96 + struct dma_mapping_ops *ops = get_dma_ops(dev); 97 + 113 98 BUG_ON(!valid_dma_direction(direction)); 114 - if (dma_ops->unmap_single) 115 - dma_ops->unmap_single(dev, addr, size, direction); 99 + if (ops->unmap_single) 100 + ops->unmap_single(dev, addr, size, direction); 116 101 } 117 102 118 103 static inline int 119 104 dma_map_sg(struct device *hwdev, struct scatterlist *sg, 120 105 int nents, int direction) 121 106 { 107 + struct dma_mapping_ops *ops = get_dma_ops(hwdev); 108 + 122 109 BUG_ON(!valid_dma_direction(direction)); 123 - return dma_ops->map_sg(hwdev, sg, nents, direction); 110 + return ops->map_sg(hwdev, sg, nents, direction); 124 111 } 125 112 126 113 static inline void 127 114 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, 128 115 int direction) 129 116 { 117 + struct dma_mapping_ops *ops = get_dma_ops(hwdev); 118 + 130 119 BUG_ON(!valid_dma_direction(direction)); 131 - if (dma_ops->unmap_sg) 132 - dma_ops->unmap_sg(hwdev, sg, nents, direction); 120 + if (ops->unmap_sg) 121 + ops->unmap_sg(hwdev, sg, nents, direction); 133 122 } 134 123 135 124 static inline void 136 125 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 137 126 size_t size, int direction) 138 127 { 128 + struct dma_mapping_ops *ops = get_dma_ops(hwdev); 129 + 139 130 BUG_ON(!valid_dma_direction(direction)); 140 - if (dma_ops->sync_single_for_cpu) 141 - dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, 142 - direction); 131 + if (ops->sync_single_for_cpu) 132 + ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); 143 133 flush_write_buffers(); 144 134 } 145 135 ··· 156 128 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, 157 129 size_t size, int direction) 158 130 { 131 + struct dma_mapping_ops *ops = get_dma_ops(hwdev); 132 + 159 133 BUG_ON(!valid_dma_direction(direction)); 160 - if (dma_ops->sync_single_for_device) 161 - dma_ops->sync_single_for_device(hwdev, dma_handle, size, 162 - direction); 134 + if (ops->sync_single_for_device) 135 + ops->sync_single_for_device(hwdev, dma_handle, size, direction); 163 136 flush_write_buffers(); 164 137 } 165 138 ··· 168 139 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, 169 140 unsigned long offset, size_t size, int direction) 170 141 { 171 - BUG_ON(!valid_dma_direction(direction)); 172 - if (dma_ops->sync_single_range_for_cpu) 173 - dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, 174 - size, direction); 142 + struct dma_mapping_ops *ops = get_dma_ops(hwdev); 175 143 144 + BUG_ON(!valid_dma_direction(direction)); 145 + if (ops->sync_single_range_for_cpu) 146 + ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, 147 + size, direction); 176 148 flush_write_buffers(); 177 149 } 178 150 ··· 182 152 unsigned long offset, size_t size, 183 153 int direction) 184 154 { 185 - BUG_ON(!valid_dma_direction(direction)); 186 - if (dma_ops->sync_single_range_for_device) 187 - dma_ops->sync_single_range_for_device(hwdev, dma_handle, 188 - offset, size, direction); 155 + struct dma_mapping_ops *ops = get_dma_ops(hwdev); 189 156 157 + BUG_ON(!valid_dma_direction(direction)); 158 + if (ops->sync_single_range_for_device) 159 + ops->sync_single_range_for_device(hwdev, dma_handle, 160 + offset, size, direction); 190 161 flush_write_buffers(); 191 162 } 192 163 ··· 195 164 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 196 165 int nelems, int direction) 197 166 { 167 + struct dma_mapping_ops *ops = get_dma_ops(hwdev); 168 + 198 169 BUG_ON(!valid_dma_direction(direction)); 199 - if (dma_ops->sync_sg_for_cpu) 200 - dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); 170 + if (ops->sync_sg_for_cpu) 171 + ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); 201 172 flush_write_buffers(); 202 173 } 203 174 ··· 207 174 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 208 175 int nelems, int direction) 209 176 { 177 + struct dma_mapping_ops *ops = get_dma_ops(hwdev); 178 + 210 179 BUG_ON(!valid_dma_direction(direction)); 211 - if (dma_ops->sync_sg_for_device) 212 - dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); 180 + if (ops->sync_sg_for_device) 181 + ops->sync_sg_for_device(hwdev, sg, nelems, direction); 213 182 214 183 flush_write_buffers(); 215 184 } ··· 220 185 size_t offset, size_t size, 221 186 int direction) 222 187 { 188 + struct dma_mapping_ops *ops = get_dma_ops(dev); 189 + 223 190 BUG_ON(!valid_dma_direction(direction)); 224 - return dma_ops->map_single(dev, page_to_phys(page)+offset, 225 - size, direction); 191 + return ops->map_single(dev, page_to_phys(page) + offset, 192 + size, direction); 226 193 } 227 194 228 195 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
+1 -1
include/asm-x86/swiotlb.h
··· 35 35 int nents, int direction); 36 36 extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, 37 37 int nents, int direction); 38 - extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr); 38 + extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); 39 39 extern void swiotlb_free_coherent(struct device *hwdev, size_t size, 40 40 void *vaddr, dma_addr_t dma_handle); 41 41 extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
+1 -1
include/asm-xtensa/dma-mapping.h
··· 139 139 consistent_sync(sg_virt(sg), sg->length, dir); 140 140 } 141 141 static inline int 142 - dma_mapping_error(dma_addr_t dma_addr) 142 + dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 143 143 { 144 144 return 0; 145 145 }
+1 -1
include/linux/i2o.h
··· 758 758 } 759 759 760 760 dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction); 761 - if (!dma_mapping_error(dma_addr)) { 761 + if (!dma_mapping_error(&c->pdev->dev, dma_addr)) { 762 762 #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64 763 763 if ((sizeof(dma_addr_t) > 4) && c->pae_support) { 764 764 *mptr++ = cpu_to_le32(0x7C020002);
+2 -2
include/linux/ssb/ssb.h
··· 427 427 { 428 428 switch (dev->bus->bustype) { 429 429 case SSB_BUSTYPE_PCI: 430 - return pci_dma_mapping_error(addr); 430 + return pci_dma_mapping_error(dev->bus->host_pci, addr); 431 431 case SSB_BUSTYPE_SSB: 432 - return dma_mapping_error(addr); 432 + return dma_mapping_error(dev->dev, addr); 433 433 default: 434 434 __ssb_dma_not_implemented(dev); 435 435 }
+1 -1
include/rdma/ib_verbs.h
··· 1590 1590 { 1591 1591 if (dev->dma_ops) 1592 1592 return dev->dma_ops->mapping_error(dev, dma_addr); 1593 - return dma_mapping_error(dma_addr); 1593 + return dma_mapping_error(dev->dma_device, dma_addr); 1594 1594 } 1595 1595 1596 1596 /**
+2 -2
lib/swiotlb.c
··· 492 492 */ 493 493 dma_addr_t handle; 494 494 handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); 495 - if (swiotlb_dma_mapping_error(handle)) 495 + if (swiotlb_dma_mapping_error(hwdev, handle)) 496 496 return NULL; 497 497 498 498 ret = bus_to_virt(handle); ··· 824 824 } 825 825 826 826 int 827 - swiotlb_dma_mapping_error(dma_addr_t dma_addr) 827 + swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) 828 828 { 829 829 return (dma_addr == virt_to_bus(io_tlb_overflow_buffer)); 830 830 }
+2 -1
net/sunrpc/xprtrdma/svc_rdma_sendto.c
··· 169 169 (void *) 170 170 vec->sge[xdr_sge_no].iov_base + sge_off, 171 171 sge_bytes, DMA_TO_DEVICE); 172 - if (dma_mapping_error(sge[sge_no].addr)) 172 + if (dma_mapping_error(xprt->sc_cm_id->device->dma_device, 173 + sge[sge_no].addr)) 173 174 goto err; 174 175 sge_off = 0; 175 176 sge_no++;