Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: velocity: Convert to generic dma functions

Remove the pci_* dma functions and replace with the more generic
versions.

In preparation of adding platform support, a new struct device *dev
is added to struct velocity_info which can be used by both the pci
and platform code.

Signed-off-by: Tony Prisk <linux@prisktech.co.nz>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Tony Prisk and committed by
David S. Miller
e2c41f14 a9683c94

+26 -26
+25 -26
drivers/net/ethernet/via/via-velocity.c
··· 46 46 #include <linux/types.h> 47 47 #include <linux/bitops.h> 48 48 #include <linux/init.h> 49 + #include <linux/dma-mapping.h> 49 50 #include <linux/mm.h> 50 51 #include <linux/errno.h> 51 52 #include <linux/ioport.h> ··· 1460 1459 struct velocity_opt *opt = &vptr->options; 1461 1460 const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc); 1462 1461 const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc); 1463 - struct pci_dev *pdev = vptr->pdev; 1464 1462 dma_addr_t pool_dma; 1465 1463 void *pool; 1466 1464 unsigned int i; ··· 1467 1467 /* 1468 1468 * Allocate all RD/TD rings a single pool. 1469 1469 * 1470 - * pci_alloc_consistent() fulfills the requirement for 64 bytes 1470 + * dma_alloc_coherent() fulfills the requirement for 64 bytes 1471 1471 * alignment 1472 1472 */ 1473 - pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq + 1474 - rx_ring_size, &pool_dma); 1473 + pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq + 1474 + rx_ring_size, &pool_dma, GFP_ATOMIC); 1475 1475 if (!pool) { 1476 - dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", 1476 + dev_err(vptr->dev, "%s : DMA memory allocation failed.\n", 1477 1477 vptr->netdev->name); 1478 1478 return -ENOMEM; 1479 1479 } ··· 1524 1524 */ 1525 1525 skb_reserve(rd_info->skb, 1526 1526 64 - ((unsigned long) rd_info->skb->data & 63)); 1527 - rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, 1528 - vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); 1527 + rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data, 1528 + vptr->rx.buf_sz, DMA_FROM_DEVICE); 1529 1529 1530 1530 /* 1531 1531 * Fill in the descriptor to match ··· 1588 1588 1589 1589 if (!rd_info->skb) 1590 1590 continue; 1591 - pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, 1592 - PCI_DMA_FROMDEVICE); 1591 + dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz, 1592 + DMA_FROM_DEVICE); 1593 1593 rd_info->skb_dma = 0; 1594 1594 1595 1595 dev_kfree_skb(rd_info->skb); ··· 1670 1670 const int size = vptr->options.numrx * sizeof(struct rx_desc) + 1671 1671 vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; 1672 1672 1673 - pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma); 1673 + dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma); 1674 1674 } 1675 1675 1676 1676 static int velocity_init_rings(struct velocity_info *vptr, int mtu) ··· 1727 1727 pktlen = max_t(size_t, pktlen, 1728 1728 td->td_buf[i].size & ~TD_QUEUE); 1729 1729 1730 - pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], 1731 - le16_to_cpu(pktlen), PCI_DMA_TODEVICE); 1730 + dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], 1731 + le16_to_cpu(pktlen), DMA_TO_DEVICE); 1732 1732 } 1733 1733 } 1734 1734 dev_kfree_skb_irq(skb); ··· 1750 1750 if (td_info->skb) { 1751 1751 for (i = 0; i < td_info->nskb_dma; i++) { 1752 1752 if (td_info->skb_dma[i]) { 1753 - pci_unmap_single(vptr->pdev, td_info->skb_dma[i], 1754 - td_info->skb->len, PCI_DMA_TODEVICE); 1753 + dma_unmap_single(vptr->dev, td_info->skb_dma[i], 1754 + td_info->skb->len, DMA_TO_DEVICE); 1755 1755 td_info->skb_dma[i] = 0; 1756 1756 } 1757 1757 } ··· 2029 2029 */ 2030 2030 static int velocity_receive_frame(struct velocity_info *vptr, int idx) 2031 2031 { 2032 - void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); 2033 2032 struct net_device_stats *stats = &vptr->netdev->stats; 2034 2033 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); 2035 2034 struct rx_desc *rd = &(vptr->rx.ring[idx]); ··· 2046 2047 2047 2048 skb = rd_info->skb; 2048 2049 2049 - pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, 2050 - vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); 2050 + dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma, 2051 + vptr->rx.buf_sz, DMA_FROM_DEVICE); 2051 2052 2052 2053 /* 2053 2054 * Drop frame not meeting IEEE 802.3 ··· 2060 2061 } 2061 2062 } 2062 2063 2063 - pci_action = pci_dma_sync_single_for_device; 2064 - 2065 2064 velocity_rx_csum(rd, skb); 2066 2065 2067 2066 if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) { 2068 2067 velocity_iph_realign(vptr, skb, pkt_len); 2069 - pci_action = pci_unmap_single; 2070 2068 rd_info->skb = NULL; 2069 + dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz, 2070 + DMA_FROM_DEVICE); 2071 + } else { 2072 + dma_sync_single_for_device(vptr->dev, rd_info->skb_dma, 2073 + vptr->rx.buf_sz, DMA_FROM_DEVICE); 2071 2074 } 2072 - 2073 - pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, 2074 - PCI_DMA_FROMDEVICE); 2075 2075 2076 2076 skb_put(skb, pkt_len - 4); 2077 2077 skb->protocol = eth_type_trans(skb, vptr->netdev); ··· 2548 2550 * add it to the transmit ring. 2549 2551 */ 2550 2552 tdinfo->skb = skb; 2551 - tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE); 2553 + tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen, 2554 + DMA_TO_DEVICE); 2552 2555 td_ptr->tdesc0.len = cpu_to_le16(pktlen); 2553 2556 td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); 2554 2557 td_ptr->td_buf[0].pa_high = 0; ··· 2559 2560 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2560 2561 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2561 2562 2562 - tdinfo->skb_dma[i + 1] = skb_frag_dma_map(&vptr->pdev->dev, 2563 + tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev, 2563 2564 frag, 0, 2564 2565 skb_frag_size(frag), 2565 2566 DMA_TO_DEVICE); ··· 2636 2637 { 2637 2638 memset(vptr, 0, sizeof(struct velocity_info)); 2638 2639 2640 + vptr->dev = &pdev->dev; 2639 2641 vptr->pdev = pdev; 2640 2642 vptr->chip_id = info->chip_id; 2641 2643 vptr->tx.numq = info->txqueue; ··· 2743 2743 2744 2744 SET_NETDEV_DEV(dev, &pdev->dev); 2745 2745 vptr = netdev_priv(dev); 2746 - 2747 2746 2748 2747 if (first) { 2749 2748 printk(KERN_INFO "%s Ver. %s\n",
+1
drivers/net/ethernet/via/via-velocity.h
··· 1434 1434 #define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) 1435 1435 1436 1436 struct velocity_info { 1437 + struct device *dev; 1437 1438 struct pci_dev *pdev; 1438 1439 struct net_device *netdev; 1439 1440