Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

char: xillybus: Eliminate redundant wrappers to DMA related calls

The driver was originally written with the assumption that a different
API must be used for DMA-related functions if the device is PCIe based
or if not. Since Xillybus' driver supports devices on a PCIe bus (with
xillybus_pcie) as well as connected directly to the processor (with
xillybus_of), it originally used wrapper functions that ensure that
a different API is used for each.

This patch eliminates the said wrapper functions, as all use the same
dma_* API now. This is most notable by the code deleted in xillybus_pcie.c
and xillybus_of.c.

It also eliminates the OF driver's check for a "dma-coherent" attribute
in the device's OF entry, since this is taken care of by the kernel's
implementation of dma_sync_single_for_*().

There is however still need for one wrapper function, which is merged
from xillybus_pcie.c and xillybus_of.c into xillybus_core.c: The call to
dma_map_single() is wrapped by a function that uses the Managed Device
(devres) framework, in the absence of a relevant function in the current
kernel's API.

Suggested-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Suggested-by: Arnd Bergmann <arnd@arndb.de>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Eli Billauer <eli.billauer@gmail.com>
Link: https://lore.kernel.org/r/20210929094442.46383-1-eli.billauer@gmail.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Eli Billauer and committed by
Greg Kroah-Hartman
c31bbc14 847afd7b

+86 -242
+2 -21
drivers/char/xillybus/xillybus.h
··· 88 88 89 89 struct xilly_endpoint { 90 90 struct device *dev; 91 - struct xilly_endpoint_hardware *ephw; 91 + struct module *owner; 92 92 93 93 int dma_using_dac; /* =1 if 64-bit DMA is used, =0 otherwise. */ 94 94 __iomem void *registers; ··· 108 108 unsigned int msg_buf_size; 109 109 }; 110 110 111 - struct xilly_endpoint_hardware { 112 - struct module *owner; 113 - void (*hw_sync_sgl_for_cpu)(struct xilly_endpoint *, 114 - dma_addr_t, 115 - size_t, 116 - int); 117 - void (*hw_sync_sgl_for_device)(struct xilly_endpoint *, 118 - dma_addr_t, 119 - size_t, 120 - int); 121 - int (*map_single)(struct xilly_endpoint *, 122 - void *, 123 - size_t, 124 - int, 125 - dma_addr_t *); 126 - }; 127 - 128 111 struct xilly_mapping { 129 112 struct device *device; 130 113 dma_addr_t dma_addr; ··· 117 134 118 135 irqreturn_t xillybus_isr(int irq, void *data); 119 136 120 - struct xilly_endpoint *xillybus_init_endpoint(struct device *dev, 121 - struct xilly_endpoint_hardware 122 - *ephw); 137 + struct xilly_endpoint *xillybus_init_endpoint(struct device *dev); 123 138 124 139 int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint); 125 140
+78 -51
drivers/char/xillybus/xillybus_core.c
··· 122 122 buf = ep->msgbuf_addr; 123 123 buf_size = ep->msg_buf_size/sizeof(u32); 124 124 125 - ep->ephw->hw_sync_sgl_for_cpu(ep, 126 - ep->msgbuf_dma_addr, 127 - ep->msg_buf_size, 128 - DMA_FROM_DEVICE); 125 + dma_sync_single_for_cpu(ep->dev, ep->msgbuf_dma_addr, 126 + ep->msg_buf_size, DMA_FROM_DEVICE); 129 127 130 128 for (i = 0; i < buf_size; i += 2) { 131 129 if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) { ··· 138 140 dev_err(ep->dev, 139 141 "Lost sync with interrupt messages. Stopping.\n"); 140 142 } else { 141 - ep->ephw->hw_sync_sgl_for_device( 142 - ep, 143 - ep->msgbuf_dma_addr, 144 - ep->msg_buf_size, 145 - DMA_FROM_DEVICE); 143 + dma_sync_single_for_device(ep->dev, 144 + ep->msgbuf_dma_addr, 145 + ep->msg_buf_size, 146 + DMA_FROM_DEVICE); 146 147 147 148 iowrite32(0x01, /* Message NACK */ 148 149 ep->registers + fpga_msg_ctrl_reg); ··· 272 275 } 273 276 } 274 277 275 - ep->ephw->hw_sync_sgl_for_device(ep, 276 - ep->msgbuf_dma_addr, 277 - ep->msg_buf_size, 278 - DMA_FROM_DEVICE); 278 + dma_sync_single_for_device(ep->dev, ep->msgbuf_dma_addr, 279 + ep->msg_buf_size, DMA_FROM_DEVICE); 279 280 280 281 ep->msg_counter = (ep->msg_counter + 1) & 0xf; 281 282 ep->failed_messages = 0; ··· 298 303 enum dma_data_direction direction; 299 304 u32 regdirection; 300 305 }; 306 + 307 + static void xilly_unmap(void *ptr) 308 + { 309 + struct xilly_mapping *data = ptr; 310 + 311 + dma_unmap_single(data->device, data->dma_addr, 312 + data->size, data->direction); 313 + 314 + kfree(ptr); 315 + } 316 + 317 + static int xilly_map_single(struct xilly_endpoint *ep, 318 + void *ptr, 319 + size_t size, 320 + int direction, 321 + dma_addr_t *ret_dma_handle 322 + ) 323 + { 324 + dma_addr_t addr; 325 + struct xilly_mapping *this; 326 + 327 + this = kzalloc(sizeof(*this), GFP_KERNEL); 328 + if (!this) 329 + return -ENOMEM; 330 + 331 + addr = dma_map_single(ep->dev, ptr, size, direction); 332 + 333 + if (dma_mapping_error(ep->dev, addr)) { 334 + kfree(this); 335 + return -ENODEV; 336 + } 337 + 338 + this->device = ep->dev; 339 + this->dma_addr = addr; 340 + this->size = size; 341 + this->direction = direction; 342 + 343 + *ret_dma_handle = addr; 344 + 345 + return devm_add_action_or_reset(ep->dev, xilly_unmap, this); 346 + } 301 347 302 348 static int xilly_get_dma_buffers(struct xilly_endpoint *ep, 303 349 struct xilly_alloc_state *s, ··· 391 355 s->left_of_salami = allocsize; 392 356 } 393 357 394 - rc = ep->ephw->map_single(ep, s->salami, 395 - bytebufsize, s->direction, 396 - &dma_addr); 358 + rc = xilly_map_single(ep, s->salami, 359 + bytebufsize, s->direction, 360 + &dma_addr); 397 361 if (rc) 398 362 return rc; 399 363 ··· 656 620 return -ENODEV; 657 621 } 658 622 659 - endpoint->ephw->hw_sync_sgl_for_cpu( 660 - channel->endpoint, 661 - channel->wr_buffers[0]->dma_addr, 662 - channel->wr_buf_size, 663 - DMA_FROM_DEVICE); 623 + dma_sync_single_for_cpu(channel->endpoint->dev, 624 + channel->wr_buffers[0]->dma_addr, 625 + channel->wr_buf_size, 626 + DMA_FROM_DEVICE); 664 627 665 628 if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) { 666 629 dev_err(endpoint->dev, ··· 770 735 if (!empty) { /* Go on, now without the spinlock */ 771 736 772 737 if (bufpos == 0) /* Position zero means it's virgin */ 773 - channel->endpoint->ephw->hw_sync_sgl_for_cpu( 774 - channel->endpoint, 775 - channel->wr_buffers[bufidx]->dma_addr, 776 - channel->wr_buf_size, 777 - DMA_FROM_DEVICE); 738 + dma_sync_single_for_cpu(channel->endpoint->dev, 739 + channel->wr_buffers[bufidx]->dma_addr, 740 + channel->wr_buf_size, 741 + DMA_FROM_DEVICE); 778 742 779 743 if (copy_to_user( 780 744 userbuf, ··· 785 751 bytes_done += howmany; 786 752 787 753 if (bufferdone) { 788 - channel->endpoint->ephw->hw_sync_sgl_for_device( 789 - channel->endpoint, 790 - channel->wr_buffers[bufidx]->dma_addr, 791 - channel->wr_buf_size, 792 - DMA_FROM_DEVICE); 754 + dma_sync_single_for_device(channel->endpoint->dev, 755 + channel->wr_buffers[bufidx]->dma_addr, 756 + channel->wr_buf_size, 757 + DMA_FROM_DEVICE); 793 758 794 759 /* 795 760 * Tell FPGA the buffer is done with. It's an ··· 1088 1055 else 1089 1056 channel->rd_host_buf_idx++; 1090 1057 1091 - channel->endpoint->ephw->hw_sync_sgl_for_device( 1092 - channel->endpoint, 1093 - channel->rd_buffers[bufidx]->dma_addr, 1094 - channel->rd_buf_size, 1095 - DMA_TO_DEVICE); 1058 + dma_sync_single_for_device(channel->endpoint->dev, 1059 + channel->rd_buffers[bufidx]->dma_addr, 1060 + channel->rd_buf_size, 1061 + DMA_TO_DEVICE); 1096 1062 1097 1063 mutex_lock(&channel->endpoint->register_mutex); 1098 1064 ··· 1307 1275 1308 1276 if ((bufpos == 0) || /* Zero means it's virgin */ 1309 1277 (channel->rd_leftovers[3] != 0)) { 1310 - channel->endpoint->ephw->hw_sync_sgl_for_cpu( 1311 - channel->endpoint, 1312 - channel->rd_buffers[bufidx]->dma_addr, 1313 - channel->rd_buf_size, 1314 - DMA_TO_DEVICE); 1278 + dma_sync_single_for_cpu(channel->endpoint->dev, 1279 + channel->rd_buffers[bufidx]->dma_addr, 1280 + channel->rd_buf_size, 1281 + DMA_TO_DEVICE); 1315 1282 1316 1283 /* Virgin, but leftovers are due */ 1317 1284 for (i = 0; i < bufpos; i++) ··· 1328 1297 bytes_done += howmany; 1329 1298 1330 1299 if (bufferdone) { 1331 - channel->endpoint->ephw->hw_sync_sgl_for_device( 1332 - channel->endpoint, 1333 - channel->rd_buffers[bufidx]->dma_addr, 1334 - channel->rd_buf_size, 1335 - DMA_TO_DEVICE); 1300 + dma_sync_single_for_device(channel->endpoint->dev, 1301 + channel->rd_buffers[bufidx]->dma_addr, 1302 + channel->rd_buf_size, 1303 + DMA_TO_DEVICE); 1336 1304 1337 1305 mutex_lock(&channel->endpoint->register_mutex); 1338 1306 ··· 1802 1772 .poll = xillybus_poll, 1803 1773 }; 1804 1774 1805 - struct xilly_endpoint *xillybus_init_endpoint(struct device *dev, 1806 - struct xilly_endpoint_hardware 1807 - *ephw) 1775 + struct xilly_endpoint *xillybus_init_endpoint(struct device *dev) 1808 1776 { 1809 1777 struct xilly_endpoint *endpoint; 1810 1778 ··· 1811 1783 return NULL; 1812 1784 1813 1785 endpoint->dev = dev; 1814 - endpoint->ephw = ephw; 1815 1786 endpoint->msg_counter = 0x0b; 1816 1787 endpoint->failed_messages = 0; 1817 1788 endpoint->fatal_error = 0; ··· 1937 1910 goto failed_idt; 1938 1911 1939 1912 rc = xillybus_init_chrdev(dev, &xillybus_fops, 1940 - endpoint->ephw->owner, endpoint, 1913 + endpoint->owner, endpoint, 1941 1914 idt_handle.names, 1942 1915 idt_handle.names_len, 1943 1916 endpoint->num_channels,
+3 -83
drivers/char/xillybus/xillybus_of.c
··· 31 31 32 32 MODULE_DEVICE_TABLE(of, xillybus_of_match); 33 33 34 - static void xilly_dma_sync_single_for_cpu_of(struct xilly_endpoint *ep, 35 - dma_addr_t dma_handle, 36 - size_t size, 37 - int direction) 38 - { 39 - dma_sync_single_for_cpu(ep->dev, dma_handle, size, direction); 40 - } 41 - 42 - static void xilly_dma_sync_single_for_device_of(struct xilly_endpoint *ep, 43 - dma_addr_t dma_handle, 44 - size_t size, 45 - int direction) 46 - { 47 - dma_sync_single_for_device(ep->dev, dma_handle, size, direction); 48 - } 49 - 50 - static void xilly_dma_sync_single_nop(struct xilly_endpoint *ep, 51 - dma_addr_t dma_handle, 52 - size_t size, 53 - int direction) 54 - { 55 - } 56 - 57 - static void xilly_of_unmap(void *ptr) 58 - { 59 - struct xilly_mapping *data = ptr; 60 - 61 - dma_unmap_single(data->device, data->dma_addr, 62 - data->size, data->direction); 63 - 64 - kfree(ptr); 65 - } 66 - 67 - static int xilly_map_single_of(struct xilly_endpoint *ep, 68 - void *ptr, 69 - size_t size, 70 - int direction, 71 - dma_addr_t *ret_dma_handle 72 - ) 73 - { 74 - dma_addr_t addr; 75 - struct xilly_mapping *this; 76 - 77 - this = kzalloc(sizeof(*this), GFP_KERNEL); 78 - if (!this) 79 - return -ENOMEM; 80 - 81 - addr = dma_map_single(ep->dev, ptr, size, direction); 82 - 83 - if (dma_mapping_error(ep->dev, addr)) { 84 - kfree(this); 85 - return -ENODEV; 86 - } 87 - 88 - this->device = ep->dev; 89 - this->dma_addr = addr; 90 - this->size = size; 91 - this->direction = direction; 92 - 93 - *ret_dma_handle = addr; 94 - 95 - return devm_add_action_or_reset(ep->dev, xilly_of_unmap, this); 96 - } 97 - 98 - static struct xilly_endpoint_hardware of_hw = { 99 - .owner = THIS_MODULE, 100 - .hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_of, 101 - .hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_of, 102 - .map_single = xilly_map_single_of, 103 - }; 104 - 105 - static struct xilly_endpoint_hardware of_hw_coherent = { 106 - .owner = THIS_MODULE, 107 - .hw_sync_sgl_for_cpu = xilly_dma_sync_single_nop, 108 - .hw_sync_sgl_for_device = xilly_dma_sync_single_nop, 109 - .map_single = xilly_map_single_of, 110 - }; 111 - 112 34 static int xilly_drv_probe(struct platform_device *op) 113 35 { 114 36 struct device *dev = &op->dev; 115 37 struct xilly_endpoint *endpoint; 116 38 int rc; 117 39 int irq; 118 - struct xilly_endpoint_hardware *ephw = &of_hw; 119 40 120 - if (of_property_read_bool(dev->of_node, "dma-coherent")) 121 - ephw = &of_hw_coherent; 122 - 123 - endpoint = xillybus_init_endpoint(dev, ephw); 41 + endpoint = xillybus_init_endpoint(dev); 124 42 125 43 if (!endpoint) 126 44 return -ENOMEM; 127 45 128 46 dev_set_drvdata(dev, endpoint); 47 + 48 + endpoint->owner = THIS_MODULE; 129 49 130 50 endpoint->registers = devm_platform_ioremap_resource(op, 0); 131 51 if (IS_ERR(endpoint->registers))
+3 -87
drivers/char/xillybus/xillybus_pcie.c
··· 32 32 { /* End: all zeroes */ } 33 33 }; 34 34 35 - static int xilly_pci_direction(int direction) 36 - { 37 - switch (direction) { 38 - case DMA_TO_DEVICE: 39 - case DMA_FROM_DEVICE: 40 - return direction; 41 - default: 42 - return DMA_BIDIRECTIONAL; 43 - } 44 - } 45 - 46 - static void xilly_dma_sync_single_for_cpu_pci(struct xilly_endpoint *ep, 47 - dma_addr_t dma_handle, 48 - size_t size, 49 - int direction) 50 - { 51 - dma_sync_single_for_cpu(ep->dev, dma_handle, size, 52 - xilly_pci_direction(direction)); 53 - } 54 - 55 - static void xilly_dma_sync_single_for_device_pci(struct xilly_endpoint *ep, 56 - dma_addr_t dma_handle, 57 - size_t size, 58 - int direction) 59 - { 60 - dma_sync_single_for_device(ep->dev, dma_handle, size, 61 - xilly_pci_direction(direction)); 62 - } 63 - 64 - static void xilly_pci_unmap(void *ptr) 65 - { 66 - struct xilly_mapping *data = ptr; 67 - 68 - dma_unmap_single(data->device, data->dma_addr, data->size, 69 - data->direction); 70 - 71 - kfree(ptr); 72 - } 73 - 74 - /* 75 - * Map either through the PCI DMA mapper or the non_PCI one. Behind the 76 - * scenes exactly the same functions are called with the same parameters, 77 - * but that can change. 78 - */ 79 - 80 - static int xilly_map_single_pci(struct xilly_endpoint *ep, 81 - void *ptr, 82 - size_t size, 83 - int direction, 84 - dma_addr_t *ret_dma_handle 85 - ) 86 - { 87 - int pci_direction; 88 - dma_addr_t addr; 89 - struct xilly_mapping *this; 90 - 91 - this = kzalloc(sizeof(*this), GFP_KERNEL); 92 - if (!this) 93 - return -ENOMEM; 94 - 95 - pci_direction = xilly_pci_direction(direction); 96 - 97 - addr = dma_map_single(ep->dev, ptr, size, pci_direction); 98 - 99 - if (dma_mapping_error(ep->dev, addr)) { 100 - kfree(this); 101 - return -ENODEV; 102 - } 103 - 104 - this->device = ep->dev; 105 - this->dma_addr = addr; 106 - this->size = size; 107 - this->direction = pci_direction; 108 - 109 - *ret_dma_handle = addr; 110 - 111 - return devm_add_action_or_reset(ep->dev, xilly_pci_unmap, this); 112 - } 113 - 114 - static struct xilly_endpoint_hardware pci_hw = { 115 - .owner = THIS_MODULE, 116 - .hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_pci, 117 - .hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_pci, 118 - .map_single = xilly_map_single_pci, 119 - }; 120 - 121 35 static int xilly_probe(struct pci_dev *pdev, 122 36 const struct pci_device_id *ent) 123 37 { 124 38 struct xilly_endpoint *endpoint; 125 39 int rc; 126 40 127 - endpoint = xillybus_init_endpoint(&pdev->dev, &pci_hw); 41 + endpoint = xillybus_init_endpoint(&pdev->dev); 128 42 129 43 if (!endpoint) 130 44 return -ENOMEM; 131 45 132 46 pci_set_drvdata(pdev, endpoint); 47 + 48 + endpoint->owner = THIS_MODULE; 133 49 134 50 rc = pcim_enable_device(pdev); 135 51 if (rc) {