Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

iio: buffer: support getting dma channel from the buffer

Add a new buffer accessor .get_dma_dev() in order to get the
struct device responsible for actually providing the dma channel. We
cannot assume that we can use the parent of the IIO device for mapping
the DMA buffer. This becomes important on systems (like the Xilinx/AMD
zynqMP Ultrascale) where memory (or part of it) is mapped above the
32 bit range. On such systems and given that a device by default has
a dma mask of 32 bits we would then need to rely on bounce buffers (to
swiotlb) for mapping memory above the dma mask limit.

In the process, add an iio_buffer_get_dma_dev() helper function to get
the proper DMA device.

Cc: stable@vger.kernel.org
Reviewed-by: David Lechner <dlechner@baylibre.com>
Signed-off-by: Nuno Sá <nuno.sa@analog.com>
Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>

authored by

Nuno Sá and committed by
Jonathan Cameron
a514bb10 0bf1bfde

+18 -5
+16 -5
drivers/iio/industrialio-buffer.c
··· 1623 1623 return 0; 1624 1624 } 1625 1625 1626 + static struct device *iio_buffer_get_dma_dev(const struct iio_dev *indio_dev, 1627 + struct iio_buffer *buffer) 1628 + { 1629 + if (buffer->access->get_dma_dev) 1630 + return buffer->access->get_dma_dev(buffer); 1631 + 1632 + return indio_dev->dev.parent; 1633 + } 1634 + 1626 1635 static struct dma_buf_attachment * 1627 1636 iio_buffer_find_attachment(struct iio_dev_buffer_pair *ib, 1628 1637 struct dma_buf *dmabuf, bool nonblock) 1629 1638 { 1630 - struct device *dev = ib->indio_dev->dev.parent; 1631 1639 struct iio_buffer *buffer = ib->buffer; 1640 + struct device *dma_dev = iio_buffer_get_dma_dev(ib->indio_dev, buffer); 1632 1641 struct dma_buf_attachment *attach = NULL; 1633 1642 struct iio_dmabuf_priv *priv; 1634 1643 1635 1644 guard(mutex)(&buffer->dmabufs_mutex); 1636 1645 1637 1646 list_for_each_entry(priv, &buffer->dmabufs, entry) { 1638 - if (priv->attach->dev == dev 1647 + if (priv->attach->dev == dma_dev 1639 1648 && priv->attach->dmabuf == dmabuf) { 1640 1649 attach = priv->attach; 1641 1650 break; ··· 1662 1653 { 1663 1654 struct iio_dev *indio_dev = ib->indio_dev; 1664 1655 struct iio_buffer *buffer = ib->buffer; 1656 + struct device *dma_dev = iio_buffer_get_dma_dev(indio_dev, buffer); 1665 1657 struct dma_buf_attachment *attach; 1666 1658 struct iio_dmabuf_priv *priv, *each; 1667 1659 struct dma_buf *dmabuf; ··· 1689 1679 goto err_free_priv; 1690 1680 } 1691 1681 1692 - attach = dma_buf_attach(dmabuf, indio_dev->dev.parent); 1682 + attach = dma_buf_attach(dmabuf, dma_dev); 1693 1683 if (IS_ERR(attach)) { 1694 1684 err = PTR_ERR(attach); 1695 1685 goto err_dmabuf_put; ··· 1729 1719 * combo. If we do, refuse to attach. 1730 1720 */ 1731 1721 list_for_each_entry(each, &buffer->dmabufs, entry) { 1732 - if (each->attach->dev == indio_dev->dev.parent 1722 + if (each->attach->dev == dma_dev 1733 1723 && each->attach->dmabuf == dmabuf) { 1734 1724 /* 1735 1725 * We unlocked the reservation object, so going through ··· 1768 1758 { 1769 1759 struct iio_buffer *buffer = ib->buffer; 1770 1760 struct iio_dev *indio_dev = ib->indio_dev; 1761 + struct device *dma_dev = iio_buffer_get_dma_dev(indio_dev, buffer); 1771 1762 struct iio_dmabuf_priv *priv; 1772 1763 struct dma_buf *dmabuf; 1773 1764 int dmabuf_fd, ret = -EPERM; ··· 1783 1772 guard(mutex)(&buffer->dmabufs_mutex); 1784 1773 1785 1774 list_for_each_entry(priv, &buffer->dmabufs, entry) { 1786 - if (priv->attach->dev == indio_dev->dev.parent 1775 + if (priv->attach->dev == dma_dev 1787 1776 && priv->attach->dmabuf == dmabuf) { 1788 1777 list_del(&priv->entry); 1789 1778
+2
include/linux/iio/buffer_impl.h
··· 50 50 * @enqueue_dmabuf: called from userspace via ioctl to queue this DMABUF 51 51 * object to this buffer. Requires a valid DMABUF fd, that 52 52 * was previouly attached to this buffer. 53 + * @get_dma_dev: called to get the DMA channel associated with this buffer. 53 54 * @lock_queue: called when the core needs to lock the buffer queue; 54 55 * it is used when enqueueing DMABUF objects. 55 56 * @unlock_queue: used to unlock a previously locked buffer queue ··· 91 90 struct iio_dma_buffer_block *block, 92 91 struct dma_fence *fence, struct sg_table *sgt, 93 92 size_t size, bool cyclic); 93 + struct device * (*get_dma_dev)(struct iio_buffer *buffer); 94 94 void (*lock_queue)(struct iio_buffer *buffer); 95 95 void (*unlock_queue)(struct iio_buffer *buffer); 96 96