Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: core: Introduce new, universal API to request a channel

The two API function can cover most, if not all current APIs used to
request a channel. With minimal effort dmaengine drivers, platforms and
dmaengine user drivers can be converted to use the two function.

struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask);

To request any channel matching with the requested capabilities, can be
used to request channel for memcpy, memset, xor, etc where no hardware
synchronization is needed.

struct dma_chan *dma_request_chan(struct device *dev, const char *name);
To request a slave channel. The dma_request_chan() will try to find the
channel via DT, ACPI or in case if the kernel booted in non DT/ACPI mode
it will use a filter lookup table and retrieves the needed information from
the dma_slave_map provided by the DMA drivers.
This legacy mode needs changes in platform code, in dmaengine drivers and
finally the dmaengine user drivers can be converted:

For each dmaengine driver an array of DMA device, slave and the parameter
for the filter function needs to be added:

static const struct dma_slave_map da830_edma_map[] = {
{ "davinci-mcasp.0", "rx", EDMA_FILTER_PARAM(0, 0) },
{ "davinci-mcasp.0", "tx", EDMA_FILTER_PARAM(0, 1) },
{ "davinci-mcasp.1", "rx", EDMA_FILTER_PARAM(0, 2) },
{ "davinci-mcasp.1", "tx", EDMA_FILTER_PARAM(0, 3) },
{ "davinci-mcasp.2", "rx", EDMA_FILTER_PARAM(0, 4) },
{ "davinci-mcasp.2", "tx", EDMA_FILTER_PARAM(0, 5) },
{ "spi_davinci.0", "rx", EDMA_FILTER_PARAM(0, 14) },
{ "spi_davinci.0", "tx", EDMA_FILTER_PARAM(0, 15) },
{ "da830-mmc.0", "rx", EDMA_FILTER_PARAM(0, 16) },
{ "da830-mmc.0", "tx", EDMA_FILTER_PARAM(0, 17) },
{ "spi_davinci.1", "rx", EDMA_FILTER_PARAM(0, 18) },
{ "spi_davinci.1", "tx", EDMA_FILTER_PARAM(0, 19) },
};

This information is going to be needed by the dmaengine driver, so
modification to the platform_data is needed, and the driver map should be
added to the pdata of the DMA driver:

da8xx_edma0_pdata.slave_map = da830_edma_map;
da8xx_edma0_pdata.slavecnt = ARRAY_SIZE(da830_edma_map);

The DMA driver then needs to configure the needed device -> filter_fn
mapping before it registers with dma_async_device_register() :

ecc->dma_slave.filter_map.map = info->slave_map;
ecc->dma_slave.filter_map.mapcnt = info->slavecnt;
ecc->dma_slave.filter_map.fn = edma_filter_fn;

When neither DT or ACPI lookup is available the dma_request_chan() will
try to match the requester's device name with the filter_map's list of
device names, when a match found it will use the information from the
dma_slave_map to get the channel with the dma_get_channel() internal
function.

Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>

authored by

Peter Ujfalusi and committed by
Vinod Koul
a8135d0d 7bd903c5

+126 -35
+5 -16
Documentation/dmaengine/client.txt
··· 22 22 Channel allocation is slightly different in the slave DMA context, 23 23 client drivers typically need a channel from a particular DMA 24 24 controller only and even in some cases a specific channel is desired. 25 - To request a channel dma_request_channel() API is used. 25 + To request a channel dma_request_chan() API is used. 26 26 27 27 Interface: 28 - struct dma_chan *dma_request_channel(dma_cap_mask_t mask, 29 - dma_filter_fn filter_fn, 30 - void *filter_param); 31 - where dma_filter_fn is defined as: 32 - typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); 28 + struct dma_chan *dma_request_chan(struct device *dev, const char *name); 33 29 34 - The 'filter_fn' parameter is optional, but highly recommended for 35 - slave and cyclic channels as they typically need to obtain a specific 36 - DMA channel. 37 - 38 - When the optional 'filter_fn' parameter is NULL, dma_request_channel() 39 - simply returns the first channel that satisfies the capability mask. 40 - 41 - Otherwise, the 'filter_fn' routine will be called once for each free 42 - channel which has a capability in 'mask'. 'filter_fn' is expected to 43 - return 'true' when the desired DMA channel is found. 30 + Which will find and return the 'name' DMA channel associated with the 'dev' 31 + device. The association is done via DT, ACPI or board file based 32 + dma_slave_map matching table. 44 33 45 34 A channel allocated via this interface is exclusive to the caller, 46 35 until dma_release_channel() is called.
+77 -12
drivers/dma/dmaengine.c
··· 43 43 44 44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 45 45 46 + #include <linux/platform_device.h> 46 47 #include <linux/dma-mapping.h> 47 48 #include <linux/init.h> 48 49 #include <linux/module.h> ··· 666 665 } 667 666 EXPORT_SYMBOL_GPL(__dma_request_channel); 668 667 668 + static const struct dma_slave_map *dma_filter_match(struct dma_device *device, 669 + const char *name, 670 + struct device *dev) 671 + { 672 + int i; 673 + 674 + if (!device->filter.mapcnt) 675 + return NULL; 676 + 677 + for (i = 0; i < device->filter.mapcnt; i++) { 678 + const struct dma_slave_map *map = &device->filter.map[i]; 679 + 680 + if (!strcmp(map->devname, dev_name(dev)) && 681 + !strcmp(map->slave, name)) 682 + return map; 683 + } 684 + 685 + return NULL; 686 + } 687 + 669 688 /** 670 - * dma_request_slave_channel_reason - try to allocate an exclusive slave channel 689 + * dma_request_chan - try to allocate an exclusive slave channel 671 690 * @dev: pointer to client device structure 672 691 * @name: slave channel name 673 692 * 674 693 * Returns pointer to appropriate DMA channel on success or an error pointer. 675 694 */ 676 - struct dma_chan *dma_request_slave_channel_reason(struct device *dev, 677 - const char *name) 695 + struct dma_chan *dma_request_chan(struct device *dev, const char *name) 678 696 { 697 + struct dma_device *d, *_d; 698 + struct dma_chan *chan = NULL; 699 + 679 700 /* If device-tree is present get slave info from here */ 680 701 if (dev->of_node) 681 - return of_dma_request_slave_channel(dev->of_node, name); 702 + chan = of_dma_request_slave_channel(dev->of_node, name); 682 703 683 704 /* If device was enumerated by ACPI get slave info from here */ 684 - if (ACPI_HANDLE(dev)) 685 - return acpi_dma_request_slave_chan_by_name(dev, name); 705 + if (has_acpi_companion(dev) && !chan) 706 + chan = acpi_dma_request_slave_chan_by_name(dev, name); 686 707 687 - return ERR_PTR(-ENODEV); 708 + if (chan) { 709 + /* Valid channel found or requester need to be deferred */ 710 + if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) 711 + return chan; 712 + } 713 + 714 + /* Try to find the channel via the DMA filter map(s) */ 715 + mutex_lock(&dma_list_mutex); 716 + list_for_each_entry_safe(d, _d, &dma_device_list, global_node) { 717 + dma_cap_mask_t mask; 718 + const struct dma_slave_map *map = dma_filter_match(d, name, dev); 719 + 720 + if (!map) 721 + continue; 722 + 723 + dma_cap_zero(mask); 724 + dma_cap_set(DMA_SLAVE, mask); 725 + 726 + chan = find_candidate(d, &mask, d->filter.fn, map->param); 727 + if (!IS_ERR(chan)) 728 + break; 729 + } 730 + mutex_unlock(&dma_list_mutex); 731 + 732 + return chan ? chan : ERR_PTR(-EPROBE_DEFER); 688 733 } 689 - EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason); 734 + EXPORT_SYMBOL_GPL(dma_request_chan); 690 735 691 736 /** 692 737 * dma_request_slave_channel - try to allocate an exclusive slave channel ··· 744 697 struct dma_chan *dma_request_slave_channel(struct device *dev, 745 698 const char *name) 746 699 { 747 - struct dma_chan *ch = dma_request_slave_channel_reason(dev, name); 700 + struct dma_chan *ch = dma_request_chan(dev, name); 748 701 if (IS_ERR(ch)) 749 702 return NULL; 750 - 751 - dma_cap_set(DMA_PRIVATE, ch->device->cap_mask); 752 - ch->device->privatecnt++; 753 703 754 704 return ch; 755 705 } 756 706 EXPORT_SYMBOL_GPL(dma_request_slave_channel); 707 + 708 + /** 709 + * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities 710 + * @mask: capabilities that the channel must satisfy 711 + * 712 + * Returns pointer to appropriate DMA channel on success or an error pointer. 713 + */ 714 + struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask) 715 + { 716 + struct dma_chan *chan; 717 + 718 + if (!mask) 719 + return ERR_PTR(-ENODEV); 720 + 721 + chan = __dma_request_channel(mask, NULL, NULL); 722 + if (!chan) 723 + chan = ERR_PTR(-ENODEV); 724 + 725 + return chan; 726 + } 727 + EXPORT_SYMBOL_GPL(dma_request_chan_by_mask); 757 728 758 729 void dma_release_channel(struct dma_chan *chan) 759 730 {
+44 -7
include/linux/dmaengine.h
··· 607 607 }; 608 608 609 609 /** 610 + * struct dma_slave_map - associates slave device and it's slave channel with 611 + * parameter to be used by a filter function 612 + * @devname: name of the device 613 + * @slave: slave channel name 614 + * @param: opaque parameter to pass to struct dma_filter.fn 615 + */ 616 + struct dma_slave_map { 617 + const char *devname; 618 + const char *slave; 619 + void *param; 620 + }; 621 + 622 + /** 623 + * struct dma_filter - information for slave device/channel to filter_fn/param 624 + * mapping 625 + * @fn: filter function callback 626 + * @mapcnt: number of slave device/channel in the map 627 + * @map: array of channel to filter mapping data 628 + */ 629 + struct dma_filter { 630 + dma_filter_fn fn; 631 + int mapcnt; 632 + const struct dma_slave_map *map; 633 + }; 634 + 635 + /** 610 636 * struct dma_device - info on the entity supplying DMA services 611 637 * @chancnt: how many DMA channels are supported 612 638 * @privatecnt: how many DMA channels are requested by dma_request_channel 613 639 * @channels: the list of struct dma_chan 614 640 * @global_node: list_head for global dma_device_list 641 + * @filter: information for device/slave to filter function/param mapping 615 642 * @cap_mask: one or more dma_capability flags 616 643 * @max_xor: maximum number of xor sources, 0 if no capability 617 644 * @max_pq: maximum number of PQ sources and PQ-continue capability ··· 693 666 unsigned int privatecnt; 694 667 struct list_head channels; 695 668 struct list_head global_node; 669 + struct dma_filter filter; 696 670 dma_cap_mask_t cap_mask; 697 671 unsigned short max_xor; 698 672 unsigned short max_pq; ··· 1168 1140 void dma_issue_pending_all(void); 1169 1141 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 1170 1142 dma_filter_fn fn, void *fn_param); 1171 - struct dma_chan *dma_request_slave_channel_reason(struct device *dev, 1172 - const char *name); 1173 1143 struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); 1144 + 1145 + struct dma_chan *dma_request_chan(struct device *dev, const char *name); 1146 + struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask); 1147 + 1174 1148 void dma_release_channel(struct dma_chan *chan); 1175 1149 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); 1176 1150 #else ··· 1196 1166 { 1197 1167 return NULL; 1198 1168 } 1199 - static inline struct dma_chan *dma_request_slave_channel_reason( 1200 - struct device *dev, const char *name) 1201 - { 1202 - return ERR_PTR(-ENODEV); 1203 - } 1204 1169 static inline struct dma_chan *dma_request_slave_channel(struct device *dev, 1205 1170 const char *name) 1206 1171 { 1207 1172 return NULL; 1173 + } 1174 + static inline struct dma_chan *dma_request_chan(struct device *dev, 1175 + const char *name) 1176 + { 1177 + return ERR_PTR(-ENODEV); 1178 + } 1179 + static inline struct dma_chan *dma_request_chan_by_mask( 1180 + const dma_cap_mask_t *mask) 1181 + { 1182 + return ERR_PTR(-ENODEV); 1208 1183 } 1209 1184 static inline void dma_release_channel(struct dma_chan *chan) 1210 1185 { ··· 1220 1185 return -ENXIO; 1221 1186 } 1222 1187 #endif 1188 + 1189 + #define dma_request_slave_channel_reason(dev, name) dma_request_chan(dev, name) 1223 1190 1224 1191 static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx) 1225 1192 {