Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: neterion: vxge: switch from 'pci_' to 'dma_' API

The wrappers in include/linux/pci-dma-compat.h should go away.

The patch has been generated with the coccinelle script below. No GFP_
flag needs to be corrected.
It has been compile tested.

@@
@@
- PCI_DMA_BIDIRECTIONAL
+ DMA_BIDIRECTIONAL

@@
@@
- PCI_DMA_TODEVICE
+ DMA_TO_DEVICE

@@
@@
- PCI_DMA_FROMDEVICE
+ DMA_FROM_DEVICE

@@
@@
- PCI_DMA_NONE
+ DMA_NONE

@@
expression e1, e2, e3;
@@
- pci_alloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3;
@@
- pci_zalloc_consistent(e1, e2, e3)
+ dma_alloc_coherent(&e1->dev, e2, e3, GFP_)

@@
expression e1, e2, e3, e4;
@@
- pci_free_consistent(e1, e2, e3, e4)
+ dma_free_coherent(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
- pci_map_single(e1, e2, e3, e4)
+ dma_map_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
- pci_unmap_single(e1, e2, e3, e4)
+ dma_unmap_single(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4, e5;
@@
- pci_map_page(e1, e2, e3, e4, e5)
+ dma_map_page(&e1->dev, e2, e3, e4, e5)

@@
expression e1, e2, e3, e4;
@@
- pci_unmap_page(e1, e2, e3, e4)
+ dma_unmap_page(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
- pci_map_sg(e1, e2, e3, e4)
+ dma_map_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
- pci_unmap_sg(e1, e2, e3, e4)
+ dma_unmap_sg(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_cpu(e1, e2, e3, e4)
+ dma_sync_single_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_single_for_device(e1, e2, e3, e4)
+ dma_sync_single_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_cpu(e1, e2, e3, e4)
+ dma_sync_sg_for_cpu(&e1->dev, e2, e3, e4)

@@
expression e1, e2, e3, e4;
@@
- pci_dma_sync_sg_for_device(e1, e2, e3, e4)
+ dma_sync_sg_for_device(&e1->dev, e2, e3, e4)

@@
expression e1, e2;
@@
- pci_dma_mapping_error(e1, e2)
+ dma_mapping_error(&e1->dev, e2)

@@
expression e1, e2;
@@
- pci_set_dma_mask(e1, e2)
+ dma_set_mask(&e1->dev, e2)

@@
expression e1, e2;
@@
- pci_set_consistent_dma_mask(e1, e2)
+ dma_set_coherent_mask(&e1->dev, e2)

Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Christophe JAILLET and committed by
David S. Miller
8331bbe9 fb059b26

+52 -54
+20 -22
drivers/net/ethernet/neterion/vxge/vxge-config.c
··· 1102 1102 hldev = blockpool->hldev; 1103 1103 1104 1104 list_for_each_safe(p, n, &blockpool->free_block_list) { 1105 - pci_unmap_single(hldev->pdev, 1106 - ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, 1107 - ((struct __vxge_hw_blockpool_entry *)p)->length, 1108 - PCI_DMA_BIDIRECTIONAL); 1105 + dma_unmap_single(&hldev->pdev->dev, 1106 + ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, 1107 + ((struct __vxge_hw_blockpool_entry *)p)->length, 1108 + DMA_BIDIRECTIONAL); 1109 1109 1110 1110 vxge_os_dma_free(hldev->pdev, 1111 1111 ((struct __vxge_hw_blockpool_entry *)p)->memblock, ··· 1178 1178 goto blockpool_create_exit; 1179 1179 } 1180 1180 1181 - dma_addr = pci_map_single(hldev->pdev, memblock, 1182 - VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL); 1183 - if (unlikely(pci_dma_mapping_error(hldev->pdev, 1184 - dma_addr))) { 1181 + dma_addr = dma_map_single(&hldev->pdev->dev, memblock, 1182 + VXGE_HW_BLOCK_SIZE, 1183 + DMA_BIDIRECTIONAL); 1184 + if (unlikely(dma_mapping_error(&hldev->pdev->dev, dma_addr))) { 1185 1185 vxge_os_dma_free(hldev->pdev, memblock, &acc_handle); 1186 1186 __vxge_hw_blockpool_destroy(blockpool); 1187 1187 status = VXGE_HW_ERR_OUT_OF_MEMORY; ··· 2264 2264 goto exit; 2265 2265 } 2266 2266 2267 - dma_addr = pci_map_single(devh->pdev, block_addr, length, 2268 - PCI_DMA_BIDIRECTIONAL); 2267 + dma_addr = dma_map_single(&devh->pdev->dev, block_addr, length, 2268 + DMA_BIDIRECTIONAL); 2269 2269 2270 - if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) { 2270 + if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_addr))) { 2271 2271 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle); 2272 2272 blockpool->req_out--; 2273 2273 goto exit; ··· 2359 2359 if (!memblock) 2360 2360 goto exit; 2361 2361 2362 - dma_object->addr = pci_map_single(devh->pdev, memblock, size, 2363 - PCI_DMA_BIDIRECTIONAL); 2362 + dma_object->addr = dma_map_single(&devh->pdev->dev, memblock, 2363 + size, DMA_BIDIRECTIONAL); 2364 2364 2365 - if (unlikely(pci_dma_mapping_error(devh->pdev, 2366 - dma_object->addr))) { 2365 + if (unlikely(dma_mapping_error(&devh->pdev->dev, dma_object->addr))) { 2367 2366 vxge_os_dma_free(devh->pdev, memblock, 2368 2367 &dma_object->acc_handle); 2369 2368 memblock = NULL; ··· 2409 2410 if (blockpool->pool_size < blockpool->pool_max) 2410 2411 break; 2411 2412 2412 - pci_unmap_single( 2413 - (blockpool->hldev)->pdev, 2414 - ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, 2415 - ((struct __vxge_hw_blockpool_entry *)p)->length, 2416 - PCI_DMA_BIDIRECTIONAL); 2413 + dma_unmap_single(&(blockpool->hldev)->pdev->dev, 2414 + ((struct __vxge_hw_blockpool_entry *)p)->dma_addr, 2415 + ((struct __vxge_hw_blockpool_entry *)p)->length, 2416 + DMA_BIDIRECTIONAL); 2417 2417 2418 2418 vxge_os_dma_free( 2419 2419 (blockpool->hldev)->pdev, ··· 2443 2445 blockpool = &devh->block_pool; 2444 2446 2445 2447 if (size != blockpool->block_size) { 2446 - pci_unmap_single(devh->pdev, dma_object->addr, size, 2447 - PCI_DMA_BIDIRECTIONAL); 2448 + dma_unmap_single(&devh->pdev->dev, dma_object->addr, size, 2449 + DMA_BIDIRECTIONAL); 2448 2450 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle); 2449 2451 } else { 2450 2452
+32 -32
drivers/net/ethernet/neterion/vxge/vxge-main.c
··· 241 241 rx_priv = vxge_hw_ring_rxd_private_get(dtrh); 242 242 243 243 rx_priv->skb_data = rx_priv->skb->data; 244 - dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data, 245 - rx_priv->data_size, PCI_DMA_FROMDEVICE); 244 + dma_addr = dma_map_single(&ring->pdev->dev, rx_priv->skb_data, 245 + rx_priv->data_size, DMA_FROM_DEVICE); 246 246 247 - if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) { 247 + if (unlikely(dma_mapping_error(&ring->pdev->dev, dma_addr))) { 248 248 ring->stats.pci_map_fail++; 249 249 return -EIO; 250 250 } ··· 323 323 static inline void vxge_re_pre_post(void *dtr, struct vxge_ring *ring, 324 324 struct vxge_rx_priv *rx_priv) 325 325 { 326 - pci_dma_sync_single_for_device(ring->pdev, 327 - rx_priv->data_dma, rx_priv->data_size, PCI_DMA_FROMDEVICE); 326 + dma_sync_single_for_device(&ring->pdev->dev, rx_priv->data_dma, 327 + rx_priv->data_size, DMA_FROM_DEVICE); 328 328 329 329 vxge_hw_ring_rxd_1b_set(dtr, rx_priv->data_dma, rx_priv->data_size); 330 330 vxge_hw_ring_rxd_pre_post(ring->handle, dtr); ··· 425 425 if (!vxge_rx_map(dtr, ring)) { 426 426 skb_put(skb, pkt_length); 427 427 428 - pci_unmap_single(ring->pdev, data_dma, 429 - data_size, PCI_DMA_FROMDEVICE); 428 + dma_unmap_single(&ring->pdev->dev, 429 + data_dma, data_size, 430 + DMA_FROM_DEVICE); 430 431 431 432 vxge_hw_ring_rxd_pre_post(ringh, dtr); 432 433 vxge_post(&dtr_cnt, &first_dtr, dtr, ··· 459 458 skb_reserve(skb_up, 460 459 VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN); 461 460 462 - pci_dma_sync_single_for_cpu(ring->pdev, 463 - data_dma, data_size, 464 - PCI_DMA_FROMDEVICE); 461 + dma_sync_single_for_cpu(&ring->pdev->dev, 462 + data_dma, data_size, 463 + DMA_FROM_DEVICE); 465 464 466 465 vxge_debug_mem(VXGE_TRACE, 467 466 "%s: %s:%d skb_up = %p", ··· 586 585 } 587 586 588 587 /* for unfragmented skb */ 589 - pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], 590 - skb_headlen(skb), PCI_DMA_TODEVICE); 588 + dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++], 589 + skb_headlen(skb), DMA_TO_DEVICE); 591 590 592 591 for (j = 0; j < frg_cnt; j++) { 593 - pci_unmap_page(fifo->pdev, 594 - txd_priv->dma_buffers[i++], 595 - skb_frag_size(frag), PCI_DMA_TODEVICE); 592 + dma_unmap_page(&fifo->pdev->dev, 593 + txd_priv->dma_buffers[i++], 594 + skb_frag_size(frag), DMA_TO_DEVICE); 596 595 frag += 1; 597 596 } 598 597 ··· 898 897 899 898 first_frg_len = skb_headlen(skb); 900 899 901 - dma_pointer = pci_map_single(fifo->pdev, skb->data, first_frg_len, 902 - PCI_DMA_TODEVICE); 900 + dma_pointer = dma_map_single(&fifo->pdev->dev, skb->data, 901 + first_frg_len, DMA_TO_DEVICE); 903 902 904 - if (unlikely(pci_dma_mapping_error(fifo->pdev, dma_pointer))) { 903 + if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer))) { 905 904 vxge_hw_fifo_txdl_free(fifo_hw, dtr); 906 905 fifo->stats.pci_map_fail++; 907 906 goto _exit0; ··· 978 977 j = 0; 979 978 frag = &skb_shinfo(skb)->frags[0]; 980 979 981 - pci_unmap_single(fifo->pdev, txdl_priv->dma_buffers[j++], 982 - skb_headlen(skb), PCI_DMA_TODEVICE); 980 + dma_unmap_single(&fifo->pdev->dev, txdl_priv->dma_buffers[j++], 981 + skb_headlen(skb), DMA_TO_DEVICE); 983 982 984 983 for (; j < i; j++) { 985 - pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j], 986 - skb_frag_size(frag), PCI_DMA_TODEVICE); 984 + dma_unmap_page(&fifo->pdev->dev, txdl_priv->dma_buffers[j], 985 + skb_frag_size(frag), DMA_TO_DEVICE); 987 986 frag += 1; 988 987 } 989 988 ··· 1013 1012 if (state != VXGE_HW_RXD_STATE_POSTED) 1014 1013 return; 1015 1014 1016 - pci_unmap_single(ring->pdev, rx_priv->data_dma, 1017 - rx_priv->data_size, PCI_DMA_FROMDEVICE); 1015 + dma_unmap_single(&ring->pdev->dev, rx_priv->data_dma, 1016 + rx_priv->data_size, DMA_FROM_DEVICE); 1018 1017 1019 1018 dev_kfree_skb(rx_priv->skb); 1020 1019 rx_priv->skb_data = NULL; ··· 1049 1048 frag = &skb_shinfo(skb)->frags[0]; 1050 1049 1051 1050 /* for unfragmented skb */ 1052 - pci_unmap_single(fifo->pdev, txd_priv->dma_buffers[i++], 1053 - skb_headlen(skb), PCI_DMA_TODEVICE); 1051 + dma_unmap_single(&fifo->pdev->dev, txd_priv->dma_buffers[i++], 1052 + skb_headlen(skb), DMA_TO_DEVICE); 1054 1053 1055 1054 for (j = 0; j < frg_cnt; j++) { 1056 - pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++], 1057 - skb_frag_size(frag), PCI_DMA_TODEVICE); 1055 + dma_unmap_page(&fifo->pdev->dev, txd_priv->dma_buffers[i++], 1056 + skb_frag_size(frag), DMA_TO_DEVICE); 1058 1057 frag += 1; 1059 1058 } 1060 1059 ··· 4388 4387 goto _exit0; 4389 4388 } 4390 4389 4391 - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 4390 + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { 4392 4391 vxge_debug_ll_config(VXGE_TRACE, 4393 4392 "%s : using 64bit DMA", __func__); 4394 4393 4395 4394 high_dma = 1; 4396 4395 4397 - if (pci_set_consistent_dma_mask(pdev, 4398 - DMA_BIT_MASK(64))) { 4396 + if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 4399 4397 vxge_debug_init(VXGE_ERR, 4400 4398 "%s : unable to obtain 64bit DMA for " 4401 4399 "consistent allocations", __func__); 4402 4400 ret = -ENOMEM; 4403 4401 goto _exit1; 4404 4402 } 4405 - } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 4403 + } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { 4406 4404 vxge_debug_ll_config(VXGE_TRACE, 4407 4405 "%s : using 32bit DMA", __func__); 4408 4406 } else {