Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma

Pull slave-dmaengine updates from Vinod Koul:
"This time we have dmatest improvements from Andy along with dw_dmac
fixes. He has also done support for acpi for dmanegine.

Also we have bunch of fixes going in DT support for dmanegine for
various folks. Then Haswell and other ioat changes from Dave and
SUDMAC support from Shimoda."

* 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (53 commits)
dma: tegra: implement suspend/resume callbacks
dma:of: Use a mutex to protect the of_dma_list
dma: of: Fix of_node reference leak
dmaengine: sirf: move driver init from module_init to subsys_initcall
sudmac: add support for SUDMAC
dma: sh: add Kconfig
at_hdmac: move to generic DMA binding
ioatdma: ioat3_alloc_sed can be static
ioatdma: Adding write back descriptor error status support for ioatdma 3.3
ioatdma: S1200 platforms ioatdma channel 2 and 3 falsely advertise RAID cap
ioatdma: Adding support for 16 src PQ ops and super extended descriptors
ioatdma: Removing hw bug workaround for CB3.x .2 and earlier
dw_dmac: add ACPI support
dmaengine: call acpi_dma_request_slave_channel as well
dma: acpi-dma: introduce ACPI DMA helpers
dma: of: Remove unnecessary list_empty check
DMA: OF: Check properties value before running be32_to_cpup() on it
DMA: of: Constant names
ioatdma: skip silicon bug workaround for pq_align for cb3.3
ioatdma: Removing PQ val disable for cb3.3
...

+3223 -525
+77
Documentation/acpi/enumeration.txt
··· 66 66 drivers/acpi/acpi_platform.c. This limitation is only for the platform 67 67 devices, SPI and I2C devices are created automatically as described below. 68 68 69 + DMA support 70 + ~~~~~~~~~~~ 71 + DMA controllers enumerated via ACPI should be registered in the system to 72 + provide generic access to their resources. For example, a driver that would 73 + like to be accessible to slave devices via generic API call 74 + dma_request_slave_channel() must register itself at the end of the probe 75 + function like this: 76 + 77 + err = devm_acpi_dma_controller_register(dev, xlate_func, dw); 78 + /* Handle the error if it's not a case of !CONFIG_ACPI */ 79 + 80 + and implement custom xlate function if needed (usually acpi_dma_simple_xlate() 81 + is enough) which converts the FixedDMA resource provided by struct 82 + acpi_dma_spec into the corresponding DMA channel. A piece of code for that case 83 + could look like: 84 + 85 + #ifdef CONFIG_ACPI 86 + struct filter_args { 87 + /* Provide necessary information for the filter_func */ 88 + ... 89 + }; 90 + 91 + static bool filter_func(struct dma_chan *chan, void *param) 92 + { 93 + /* Choose the proper channel */ 94 + ... 95 + } 96 + 97 + static struct dma_chan *xlate_func(struct acpi_dma_spec *dma_spec, 98 + struct acpi_dma *adma) 99 + { 100 + dma_cap_mask_t cap; 101 + struct filter_args args; 102 + 103 + /* Prepare arguments for filter_func */ 104 + ... 105 + return dma_request_channel(cap, filter_func, &args); 106 + } 107 + #else 108 + static struct dma_chan *xlate_func(struct acpi_dma_spec *dma_spec, 109 + struct acpi_dma *adma) 110 + { 111 + return NULL; 112 + } 113 + #endif 114 + 115 + dma_request_slave_channel() will call xlate_func() for each registered DMA 116 + controller. In the xlate function the proper channel must be chosen based on 117 + information in struct acpi_dma_spec and the properties of the controller 118 + provided by struct acpi_dma. 119 + 120 + Clients must call dma_request_slave_channel() with the string parameter that 121 + corresponds to a specific FixedDMA resource. By default "tx" means the first 122 + entry of the FixedDMA resource array, "rx" means the second entry. The table 123 + below shows a layout: 124 + 125 + Device (I2C0) 126 + { 127 + ... 128 + Method (_CRS, 0, NotSerialized) 129 + { 130 + Name (DBUF, ResourceTemplate () 131 + { 132 + FixedDMA (0x0018, 0x0004, Width32bit, _Y48) 133 + FixedDMA (0x0019, 0x0005, Width32bit, ) 134 + }) 135 + ... 136 + } 137 + } 138 + 139 + So, the FixedDMA with request line 0x0018 is "tx" and next one is "rx" in 140 + this example. 141 + 142 + In robust cases the client unfortunately needs to call 143 + acpi_dma_request_slave_chan_by_index() directly and therefore choose the 144 + specific FixedDMA resource by its index. 145 + 69 146 SPI serial bus support 70 147 ~~~~~~~~~~~~~~~~~~~~~~ 71 148 Slave devices behind SPI bus have SpiSerialBus resource attached to them.
+30 -5
Documentation/devicetree/bindings/dma/atmel-dma.txt
··· 1 1 * Atmel Direct Memory Access Controller (DMA) 2 2 3 3 Required properties: 4 - - compatible: Should be "atmel,<chip>-dma" 5 - - reg: Should contain DMA registers location and length 6 - - interrupts: Should contain DMA interrupt 4 + - compatible: Should be "atmel,<chip>-dma". 5 + - reg: Should contain DMA registers location and length. 6 + - interrupts: Should contain DMA interrupt. 7 + - #dma-cells: Must be <2>, used to represent the number of integer cells in 8 + the dmas property of client devices. 7 9 8 - Examples: 10 + Example: 9 11 10 - dma@ffffec00 { 12 + dma0: dma@ffffec00 { 11 13 compatible = "atmel,at91sam9g45-dma"; 12 14 reg = <0xffffec00 0x200>; 13 15 interrupts = <21>; 16 + #dma-cells = <2>; 17 + }; 18 + 19 + DMA clients connected to the Atmel DMA controller must use the format 20 + described in the dma.txt file, using a three-cell specifier for each channel: 21 + a phandle plus two interger cells. 22 + The three cells in order are: 23 + 24 + 1. A phandle pointing to the DMA controller. 25 + 2. The memory interface (16 most significant bits), the peripheral interface 26 + (16 less significant bits). 27 + 3. The peripheral identifier for the hardware handshaking interface. The 28 + identifier can be different for tx and rx. 29 + 30 + Example: 31 + 32 + i2c0@i2c@f8010000 { 33 + compatible = "atmel,at91sam9x5-i2c"; 34 + reg = <0xf8010000 0x100>; 35 + interrupts = <9 4 6>; 36 + dmas = <&dma0 1 7>, 37 + <&dma0 1 8>; 38 + dma-names = "tx", "rx"; 14 39 };
+81
Documentation/dmatest.txt
··· 1 + DMA Test Guide 2 + ============== 3 + 4 + Andy Shevchenko <andriy.shevchenko@linux.intel.com> 5 + 6 + This small document introduces how to test DMA drivers using dmatest module. 7 + 8 + Part 1 - How to build the test module 9 + 10 + The menuconfig contains an option that could be found by following path: 11 + Device Drivers -> DMA Engine support -> DMA Test client 12 + 13 + In the configuration file the option called CONFIG_DMATEST. The dmatest could 14 + be built as module or inside kernel. Let's consider those cases. 15 + 16 + Part 2 - When dmatest is built as a module... 17 + 18 + After mounting debugfs and loading the module, the /sys/kernel/debug/dmatest 19 + folder with nodes will be created. They are the same as module parameters with 20 + addition of the 'run' node that controls run and stop phases of the test. 21 + 22 + Note that in this case test will not run on load automatically. 23 + 24 + Example of usage: 25 + % echo dma0chan0 > /sys/kernel/debug/dmatest/channel 26 + % echo 2000 > /sys/kernel/debug/dmatest/timeout 27 + % echo 1 > /sys/kernel/debug/dmatest/iterations 28 + % echo 1 > /sys/kernel/debug/dmatest/run 29 + 30 + Hint: available channel list could be extracted by running the following 31 + command: 32 + % ls -1 /sys/class/dma/ 33 + 34 + After a while you will start to get messages about current status or error like 35 + in the original code. 36 + 37 + Note that running a new test will stop any in progress test. 38 + 39 + The following command should return actual state of the test. 40 + % cat /sys/kernel/debug/dmatest/run 41 + 42 + To wait for test done the user may perform a busy loop that checks the state. 43 + 44 + % while [ $(cat /sys/kernel/debug/dmatest/run) = "Y" ] 45 + > do 46 + > echo -n "." 47 + > sleep 1 48 + > done 49 + > echo 50 + 51 + Part 3 - When built-in in the kernel... 52 + 53 + The module parameters that is supplied to the kernel command line will be used 54 + for the first performed test. After user gets a control, the test could be 55 + interrupted or re-run with same or different parameters. For the details see 56 + the above section "Part 2 - When dmatest is built as a module..." 57 + 58 + In both cases the module parameters are used as initial values for the test case. 59 + You always could check them at run-time by running 60 + % grep -H . /sys/module/dmatest/parameters/* 61 + 62 + Part 4 - Gathering the test results 63 + 64 + The module provides a storage for the test results in the memory. The gathered 65 + data could be used after test is done. 66 + 67 + The special file 'results' in the debugfs represents gathered data of the in 68 + progress test. The messages collected are printed to the kernel log as well. 69 + 70 + Example of output: 71 + % cat /sys/kernel/debug/dmatest/results 72 + dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0) 73 + 74 + The message format is unified across the different types of errors. A number in 75 + the parens represents additional information, e.g. error code, error counter, 76 + or status. 77 + 78 + Comparison between buffers is stored to the dedicated structure. 79 + 80 + Note that the verify result is now accessible only via file 'results' in the 81 + debugfs.
+4
arch/arm/mach-omap2/dma.c
··· 28 28 #include <linux/init.h> 29 29 #include <linux/device.h> 30 30 #include <linux/dma-mapping.h> 31 + #include <linux/of.h> 31 32 #include <linux/omap-dma.h> 32 33 33 34 #include "soc.h" ··· 303 302 res = omap_hwmod_for_each_by_class("dma", 304 303 omap2_system_dma_init_dev, NULL); 305 304 if (res) 305 + return res; 306 + 307 + if (of_have_populated_dt()) 306 308 return res; 307 309 308 310 pdev = platform_device_register_full(&omap_dma_dev_info);
+5 -11
drivers/dma/Kconfig
··· 63 63 depends on PCI && X86 64 64 select DMA_ENGINE 65 65 select DCA 66 - select ASYNC_TX_DISABLE_PQ_VAL_DMA 67 - select ASYNC_TX_DISABLE_XOR_VAL_DMA 68 66 help 69 67 Enable support for the Intel(R) I/OAT DMA engine present 70 68 in recent Intel Xeon chipsets. ··· 172 174 This DMA controller transfers data from memory to peripheral fifo 173 175 or vice versa. It does not support memory to memory data transfer. 174 176 175 - 176 - 177 - config SH_DMAE 178 - tristate "Renesas SuperH DMAC support" 179 - depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) 180 - depends on !SH_DMA_API 181 - select DMA_ENGINE 182 - help 183 - Enable support for the Renesas SuperH DMA controllers. 177 + source "drivers/dma/sh/Kconfig" 184 178 185 179 config COH901318 186 180 bool "ST-Ericsson COH901318 DMA support" ··· 317 327 318 328 config DMA_VIRTUAL_CHANNELS 319 329 tristate 330 + 331 + config DMA_ACPI 332 + def_bool y 333 + depends on ACPI 320 334 321 335 config DMA_OF 322 336 def_bool y
+2 -1
drivers/dma/Makefile
··· 3 3 4 4 obj-$(CONFIG_DMA_ENGINE) += dmaengine.o 5 5 obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o 6 + obj-$(CONFIG_DMA_ACPI) += acpi-dma.o 6 7 obj-$(CONFIG_DMA_OF) += of-dma.o 7 8 8 9 obj-$(CONFIG_NET_DMA) += iovlock.o ··· 19 18 obj-$(CONFIG_AT_HDMAC) += at_hdmac.o 20 19 obj-$(CONFIG_MX3_IPU) += ipu/ 21 20 obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o 22 - obj-$(CONFIG_SH_DMAE) += sh/ 21 + obj-$(CONFIG_SH_DMAE_BASE) += sh/ 23 22 obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o 24 23 obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ 25 24 obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
+279
drivers/dma/acpi-dma.c
··· 1 + /* 2 + * ACPI helpers for DMA request / controller 3 + * 4 + * Based on of-dma.c 5 + * 6 + * Copyright (C) 2013, Intel Corporation 7 + * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 as 11 + * published by the Free Software Foundation. 12 + */ 13 + 14 + #include <linux/device.h> 15 + #include <linux/module.h> 16 + #include <linux/list.h> 17 + #include <linux/mutex.h> 18 + #include <linux/slab.h> 19 + #include <linux/acpi.h> 20 + #include <linux/acpi_dma.h> 21 + 22 + static LIST_HEAD(acpi_dma_list); 23 + static DEFINE_MUTEX(acpi_dma_lock); 24 + 25 + /** 26 + * acpi_dma_controller_register - Register a DMA controller to ACPI DMA helpers 27 + * @dev: struct device of DMA controller 28 + * @acpi_dma_xlate: translation function which converts a dma specifier 29 + * into a dma_chan structure 30 + * @data pointer to controller specific data to be used by 31 + * translation function 32 + * 33 + * Returns 0 on success or appropriate errno value on error. 34 + * 35 + * Allocated memory should be freed with appropriate acpi_dma_controller_free() 36 + * call. 37 + */ 38 + int acpi_dma_controller_register(struct device *dev, 39 + struct dma_chan *(*acpi_dma_xlate) 40 + (struct acpi_dma_spec *, struct acpi_dma *), 41 + void *data) 42 + { 43 + struct acpi_device *adev; 44 + struct acpi_dma *adma; 45 + 46 + if (!dev || !acpi_dma_xlate) 47 + return -EINVAL; 48 + 49 + /* Check if the device was enumerated by ACPI */ 50 + if (!ACPI_HANDLE(dev)) 51 + return -EINVAL; 52 + 53 + if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev)) 54 + return -EINVAL; 55 + 56 + adma = kzalloc(sizeof(*adma), GFP_KERNEL); 57 + if (!adma) 58 + return -ENOMEM; 59 + 60 + adma->dev = dev; 61 + adma->acpi_dma_xlate = acpi_dma_xlate; 62 + adma->data = data; 63 + 64 + /* Now queue acpi_dma controller structure in list */ 65 + mutex_lock(&acpi_dma_lock); 66 + list_add_tail(&adma->dma_controllers, &acpi_dma_list); 67 + mutex_unlock(&acpi_dma_lock); 68 + 69 + return 0; 70 + } 71 + EXPORT_SYMBOL_GPL(acpi_dma_controller_register); 72 + 73 + /** 74 + * acpi_dma_controller_free - Remove a DMA controller from ACPI DMA helpers list 75 + * @dev: struct device of DMA controller 76 + * 77 + * Memory allocated by acpi_dma_controller_register() is freed here. 78 + */ 79 + int acpi_dma_controller_free(struct device *dev) 80 + { 81 + struct acpi_dma *adma; 82 + 83 + if (!dev) 84 + return -EINVAL; 85 + 86 + mutex_lock(&acpi_dma_lock); 87 + 88 + list_for_each_entry(adma, &acpi_dma_list, dma_controllers) 89 + if (adma->dev == dev) { 90 + list_del(&adma->dma_controllers); 91 + mutex_unlock(&acpi_dma_lock); 92 + kfree(adma); 93 + return 0; 94 + } 95 + 96 + mutex_unlock(&acpi_dma_lock); 97 + return -ENODEV; 98 + } 99 + EXPORT_SYMBOL_GPL(acpi_dma_controller_free); 100 + 101 + static void devm_acpi_dma_release(struct device *dev, void *res) 102 + { 103 + acpi_dma_controller_free(dev); 104 + } 105 + 106 + /** 107 + * devm_acpi_dma_controller_register - resource managed acpi_dma_controller_register() 108 + * @dev: device that is registering this DMA controller 109 + * @acpi_dma_xlate: translation function 110 + * @data pointer to controller specific data 111 + * 112 + * Managed acpi_dma_controller_register(). DMA controller registered by this 113 + * function are automatically freed on driver detach. See 114 + * acpi_dma_controller_register() for more information. 115 + */ 116 + int devm_acpi_dma_controller_register(struct device *dev, 117 + struct dma_chan *(*acpi_dma_xlate) 118 + (struct acpi_dma_spec *, struct acpi_dma *), 119 + void *data) 120 + { 121 + void *res; 122 + int ret; 123 + 124 + res = devres_alloc(devm_acpi_dma_release, 0, GFP_KERNEL); 125 + if (!res) 126 + return -ENOMEM; 127 + 128 + ret = acpi_dma_controller_register(dev, acpi_dma_xlate, data); 129 + if (ret) { 130 + devres_free(res); 131 + return ret; 132 + } 133 + devres_add(dev, res); 134 + return 0; 135 + } 136 + EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register); 137 + 138 + /** 139 + * devm_acpi_dma_controller_free - resource managed acpi_dma_controller_free() 140 + * 141 + * Unregister a DMA controller registered with 142 + * devm_acpi_dma_controller_register(). Normally this function will not need to 143 + * be called and the resource management code will ensure that the resource is 144 + * freed. 145 + */ 146 + void devm_acpi_dma_controller_free(struct device *dev) 147 + { 148 + WARN_ON(devres_destroy(dev, devm_acpi_dma_release, NULL, NULL)); 149 + } 150 + EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free); 151 + 152 + struct acpi_dma_parser_data { 153 + struct acpi_dma_spec dma_spec; 154 + size_t index; 155 + size_t n; 156 + }; 157 + 158 + /** 159 + * acpi_dma_parse_fixed_dma - Parse FixedDMA ACPI resources to a DMA specifier 160 + * @res: struct acpi_resource to get FixedDMA resources from 161 + * @data: pointer to a helper struct acpi_dma_parser_data 162 + */ 163 + static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data) 164 + { 165 + struct acpi_dma_parser_data *pdata = data; 166 + 167 + if (res->type == ACPI_RESOURCE_TYPE_FIXED_DMA) { 168 + struct acpi_resource_fixed_dma *dma = &res->data.fixed_dma; 169 + 170 + if (pdata->n++ == pdata->index) { 171 + pdata->dma_spec.chan_id = dma->channels; 172 + pdata->dma_spec.slave_id = dma->request_lines; 173 + } 174 + } 175 + 176 + /* Tell the ACPI core to skip this resource */ 177 + return 1; 178 + } 179 + 180 + /** 181 + * acpi_dma_request_slave_chan_by_index - Get the DMA slave channel 182 + * @dev: struct device to get DMA request from 183 + * @index: index of FixedDMA descriptor for @dev 184 + * 185 + * Returns pointer to appropriate dma channel on success or NULL on error. 186 + */ 187 + struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, 188 + size_t index) 189 + { 190 + struct acpi_dma_parser_data pdata; 191 + struct acpi_dma_spec *dma_spec = &pdata.dma_spec; 192 + struct list_head resource_list; 193 + struct acpi_device *adev; 194 + struct acpi_dma *adma; 195 + struct dma_chan *chan = NULL; 196 + 197 + /* Check if the device was enumerated by ACPI */ 198 + if (!dev || !ACPI_HANDLE(dev)) 199 + return NULL; 200 + 201 + if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev)) 202 + return NULL; 203 + 204 + memset(&pdata, 0, sizeof(pdata)); 205 + pdata.index = index; 206 + 207 + /* Initial values for the request line and channel */ 208 + dma_spec->chan_id = -1; 209 + dma_spec->slave_id = -1; 210 + 211 + INIT_LIST_HEAD(&resource_list); 212 + acpi_dev_get_resources(adev, &resource_list, 213 + acpi_dma_parse_fixed_dma, &pdata); 214 + acpi_dev_free_resource_list(&resource_list); 215 + 216 + if (dma_spec->slave_id < 0 || dma_spec->chan_id < 0) 217 + return NULL; 218 + 219 + mutex_lock(&acpi_dma_lock); 220 + 221 + list_for_each_entry(adma, &acpi_dma_list, dma_controllers) { 222 + dma_spec->dev = adma->dev; 223 + chan = adma->acpi_dma_xlate(dma_spec, adma); 224 + if (chan) 225 + break; 226 + } 227 + 228 + mutex_unlock(&acpi_dma_lock); 229 + return chan; 230 + } 231 + EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index); 232 + 233 + /** 234 + * acpi_dma_request_slave_chan_by_name - Get the DMA slave channel 235 + * @dev: struct device to get DMA request from 236 + * @name: represents corresponding FixedDMA descriptor for @dev 237 + * 238 + * In order to support both Device Tree and ACPI in a single driver we 239 + * translate the names "tx" and "rx" here based on the most common case where 240 + * the first FixedDMA descriptor is TX and second is RX. 241 + * 242 + * Returns pointer to appropriate dma channel on success or NULL on error. 243 + */ 244 + struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev, 245 + const char *name) 246 + { 247 + size_t index; 248 + 249 + if (!strcmp(name, "tx")) 250 + index = 0; 251 + else if (!strcmp(name, "rx")) 252 + index = 1; 253 + else 254 + return NULL; 255 + 256 + return acpi_dma_request_slave_chan_by_index(dev, index); 257 + } 258 + EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name); 259 + 260 + /** 261 + * acpi_dma_simple_xlate - Simple ACPI DMA engine translation helper 262 + * @dma_spec: pointer to ACPI DMA specifier 263 + * @adma: pointer to ACPI DMA controller data 264 + * 265 + * A simple translation function for ACPI based devices. Passes &struct 266 + * dma_spec to the DMA controller driver provided filter function. Returns 267 + * pointer to the channel if found or %NULL otherwise. 268 + */ 269 + struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec, 270 + struct acpi_dma *adma) 271 + { 272 + struct acpi_dma_filter_info *info = adma->data; 273 + 274 + if (!info || !info->filter_fn) 275 + return NULL; 276 + 277 + return dma_request_channel(info->dma_cap, info->filter_fn, dma_spec); 278 + } 279 + EXPORT_SYMBOL_GPL(acpi_dma_simple_xlate);
+89 -8
drivers/dma/at_hdmac.c
··· 24 24 #include <linux/slab.h> 25 25 #include <linux/of.h> 26 26 #include <linux/of_device.h> 27 + #include <linux/of_dma.h> 27 28 28 29 #include "at_hdmac_regs.h" 29 30 #include "dmaengine.h" ··· 678 677 ctrlb |= ATC_DST_ADDR_MODE_FIXED 679 678 | ATC_SRC_ADDR_MODE_INCR 680 679 | ATC_FC_MEM2PER 681 - | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); 680 + | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if); 682 681 reg = sconfig->dst_addr; 683 682 for_each_sg(sgl, sg, sg_len, i) { 684 683 struct at_desc *desc; ··· 717 716 ctrlb |= ATC_DST_ADDR_MODE_INCR 718 717 | ATC_SRC_ADDR_MODE_FIXED 719 718 | ATC_FC_PER2MEM 720 - | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); 719 + | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if); 721 720 722 721 reg = sconfig->src_addr; 723 722 for_each_sg(sgl, sg, sg_len, i) { ··· 823 822 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED 824 823 | ATC_SRC_ADDR_MODE_INCR 825 824 | ATC_FC_MEM2PER 826 - | ATC_SIF(AT_DMA_MEM_IF) 827 - | ATC_DIF(AT_DMA_PER_IF); 825 + | ATC_SIF(atchan->mem_if) 826 + | ATC_DIF(atchan->per_if); 828 827 break; 829 828 830 829 case DMA_DEV_TO_MEM: ··· 834 833 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR 835 834 | ATC_SRC_ADDR_MODE_FIXED 836 835 | ATC_FC_PER2MEM 837 - | ATC_SIF(AT_DMA_PER_IF) 838 - | ATC_DIF(AT_DMA_MEM_IF); 836 + | ATC_SIF(atchan->per_if) 837 + | ATC_DIF(atchan->mem_if); 839 838 break; 840 839 841 840 default: ··· 1189 1188 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); 1190 1189 } 1191 1190 1191 + #ifdef CONFIG_OF 1192 + static bool at_dma_filter(struct dma_chan *chan, void *slave) 1193 + { 1194 + struct at_dma_slave *atslave = slave; 1195 + 1196 + if (atslave->dma_dev == chan->device->dev) { 1197 + chan->private = atslave; 1198 + return true; 1199 + } else { 1200 + return false; 1201 + } 1202 + } 1203 + 1204 + static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, 1205 + struct of_dma *of_dma) 1206 + { 1207 + struct dma_chan *chan; 1208 + struct at_dma_chan *atchan; 1209 + struct at_dma_slave *atslave; 1210 + dma_cap_mask_t mask; 1211 + unsigned int per_id; 1212 + struct platform_device *dmac_pdev; 1213 + 1214 + if (dma_spec->args_count != 2) 1215 + return NULL; 1216 + 1217 + dmac_pdev = of_find_device_by_node(dma_spec->np); 1218 + 1219 + dma_cap_zero(mask); 1220 + dma_cap_set(DMA_SLAVE, mask); 1221 + 1222 + atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL); 1223 + if (!atslave) 1224 + return NULL; 1225 + /* 1226 + * We can fill both SRC_PER and DST_PER, one of these fields will be 1227 + * ignored depending on DMA transfer direction. 1228 + */ 1229 + per_id = dma_spec->args[1]; 1230 + atslave->cfg = ATC_FIFOCFG_HALFFIFO | ATC_DST_H2SEL_HW 1231 + | ATC_SRC_H2SEL_HW | ATC_DST_PER(per_id) 1232 + | ATC_SRC_PER(per_id); 1233 + atslave->dma_dev = &dmac_pdev->dev; 1234 + 1235 + chan = dma_request_channel(mask, at_dma_filter, atslave); 1236 + if (!chan) 1237 + return NULL; 1238 + 1239 + atchan = to_at_dma_chan(chan); 1240 + atchan->per_if = dma_spec->args[0] & 0xff; 1241 + atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff; 1242 + 1243 + return chan; 1244 + } 1245 + #else 1246 + static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, 1247 + struct of_dma *of_dma) 1248 + { 1249 + return NULL; 1250 + } 1251 + #endif 1192 1252 1193 1253 /*-- Module Management -----------------------------------------------*/ 1194 1254 ··· 1404 1342 for (i = 0; i < plat_dat->nr_channels; i++) { 1405 1343 struct at_dma_chan *atchan = &atdma->chan[i]; 1406 1344 1345 + atchan->mem_if = AT_DMA_MEM_IF; 1346 + atchan->per_if = AT_DMA_PER_IF; 1407 1347 atchan->chan_common.device = &atdma->dma_common; 1408 1348 dma_cookie_init(&atchan->chan_common); 1409 1349 list_add_tail(&atchan->chan_common.device_node, ··· 1452 1388 1453 1389 dma_async_device_register(&atdma->dma_common); 1454 1390 1391 + /* 1392 + * Do not return an error if the dmac node is not present in order to 1393 + * not break the existing way of requesting channel with 1394 + * dma_request_channel(). 1395 + */ 1396 + if (pdev->dev.of_node) { 1397 + err = of_dma_controller_register(pdev->dev.of_node, 1398 + at_dma_xlate, atdma); 1399 + if (err) { 1400 + dev_err(&pdev->dev, "could not register of_dma_controller\n"); 1401 + goto err_of_dma_controller_register; 1402 + } 1403 + } 1404 + 1455 1405 return 0; 1456 1406 1407 + err_of_dma_controller_register: 1408 + dma_async_device_unregister(&atdma->dma_common); 1409 + dma_pool_destroy(atdma->dma_desc_pool); 1457 1410 err_pool_create: 1458 1411 platform_set_drvdata(pdev, NULL); 1459 1412 free_irq(platform_get_irq(pdev, 0), atdma); ··· 1487 1406 return err; 1488 1407 } 1489 1408 1490 - static int __exit at_dma_remove(struct platform_device *pdev) 1409 + static int at_dma_remove(struct platform_device *pdev) 1491 1410 { 1492 1411 struct at_dma *atdma = platform_get_drvdata(pdev); 1493 1412 struct dma_chan *chan, *_chan; ··· 1645 1564 }; 1646 1565 1647 1566 static struct platform_driver at_dma_driver = { 1648 - .remove = __exit_p(at_dma_remove), 1567 + .remove = at_dma_remove, 1649 1568 .shutdown = at_dma_shutdown, 1650 1569 .id_table = atdma_devtypes, 1651 1570 .driver = {
+4
drivers/dma/at_hdmac_regs.h
··· 220 220 * @device: parent device 221 221 * @ch_regs: memory mapped register base 222 222 * @mask: channel index in a mask 223 + * @per_if: peripheral interface 224 + * @mem_if: memory interface 223 225 * @status: transmit status information from irq/prep* functions 224 226 * to tasklet (use atomic operations) 225 227 * @tasklet: bottom half to finish transaction work ··· 240 238 struct at_dma *device; 241 239 void __iomem *ch_regs; 242 240 u8 mask; 241 + u8 per_if; 242 + u8 mem_if; 243 243 unsigned long status; 244 244 struct tasklet_struct tasklet; 245 245 u32 save_cfg;
+2 -2
drivers/dma/coh901318.c
··· 2748 2748 return err; 2749 2749 } 2750 2750 2751 - static int __exit coh901318_remove(struct platform_device *pdev) 2751 + static int coh901318_remove(struct platform_device *pdev) 2752 2752 { 2753 2753 struct coh901318_base *base = platform_get_drvdata(pdev); 2754 2754 ··· 2760 2760 2761 2761 2762 2762 static struct platform_driver coh901318_driver = { 2763 - .remove = __exit_p(coh901318_remove), 2763 + .remove = coh901318_remove, 2764 2764 .driver = { 2765 2765 .name = "coh901318", 2766 2766 },
+13 -4
drivers/dma/dmaengine.c
··· 62 62 #include <linux/rculist.h> 63 63 #include <linux/idr.h> 64 64 #include <linux/slab.h> 65 + #include <linux/acpi.h> 66 + #include <linux/acpi_dma.h> 65 67 #include <linux/of_dma.h> 66 68 67 69 static DEFINE_MUTEX(dma_list_mutex); ··· 176 174 #define dma_device_satisfies_mask(device, mask) \ 177 175 __dma_device_satisfies_mask((device), &(mask)) 178 176 static int 179 - __dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want) 177 + __dma_device_satisfies_mask(struct dma_device *device, 178 + const dma_cap_mask_t *want) 180 179 { 181 180 dma_cap_mask_t has; 182 181 ··· 466 463 } 467 464 } 468 465 469 - static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev, 466 + static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, 467 + struct dma_device *dev, 470 468 dma_filter_fn fn, void *fn_param) 471 469 { 472 470 struct dma_chan *chan; ··· 509 505 * @fn: optional callback to disposition available channels 510 506 * @fn_param: opaque parameter to pass to dma_filter_fn 511 507 */ 512 - struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param) 508 + struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 509 + dma_filter_fn fn, void *fn_param) 513 510 { 514 511 struct dma_device *device, *_d; 515 512 struct dma_chan *chan = NULL; ··· 560 555 * @dev: pointer to client device structure 561 556 * @name: slave channel name 562 557 */ 563 - struct dma_chan *dma_request_slave_channel(struct device *dev, char *name) 558 + struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name) 564 559 { 565 560 /* If device-tree is present get slave info from here */ 566 561 if (dev->of_node) 567 562 return of_dma_request_slave_channel(dev->of_node, name); 563 + 564 + /* If device was enumerated by ACPI get slave info from here */ 565 + if (ACPI_HANDLE(dev)) 566 + return acpi_dma_request_slave_chan_by_name(dev, name); 568 567 569 568 return NULL; 570 569 }
+754 -139
drivers/dma/dmatest.c
··· 2 2 * DMA Engine test module 3 3 * 4 4 * Copyright (C) 2007 Atmel Corporation 5 + * Copyright (C) 2013 Intel Corporation 5 6 * 6 7 * This program is free software; you can redistribute it and/or modify 7 8 * it under the terms of the GNU General Public License version 2 as ··· 19 18 #include <linux/random.h> 20 19 #include <linux/slab.h> 21 20 #include <linux/wait.h> 21 + #include <linux/ctype.h> 22 + #include <linux/debugfs.h> 23 + #include <linux/uaccess.h> 24 + #include <linux/seq_file.h> 22 25 23 26 static unsigned int test_buf_size = 16384; 24 27 module_param(test_buf_size, uint, S_IRUGO); ··· 66 61 MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " 67 62 "Pass -1 for infinite timeout"); 68 63 64 + /* Maximum amount of mismatched bytes in buffer to print */ 65 + #define MAX_ERROR_COUNT 32 66 + 69 67 /* 70 68 * Initialization patterns. All bytes in the source buffer has bit 7 71 69 * set, all bytes in the destination buffer has bit 7 cleared. ··· 86 78 #define PATTERN_OVERWRITE 0x20 87 79 #define PATTERN_COUNT_MASK 0x1f 88 80 81 + enum dmatest_error_type { 82 + DMATEST_ET_OK, 83 + DMATEST_ET_MAP_SRC, 84 + DMATEST_ET_MAP_DST, 85 + DMATEST_ET_PREP, 86 + DMATEST_ET_SUBMIT, 87 + DMATEST_ET_TIMEOUT, 88 + DMATEST_ET_DMA_ERROR, 89 + DMATEST_ET_DMA_IN_PROGRESS, 90 + DMATEST_ET_VERIFY, 91 + DMATEST_ET_VERIFY_BUF, 92 + }; 93 + 94 + struct dmatest_verify_buffer { 95 + unsigned int index; 96 + u8 expected; 97 + u8 actual; 98 + }; 99 + 100 + struct dmatest_verify_result { 101 + unsigned int error_count; 102 + struct dmatest_verify_buffer data[MAX_ERROR_COUNT]; 103 + u8 pattern; 104 + bool is_srcbuf; 105 + }; 106 + 107 + struct dmatest_thread_result { 108 + struct list_head node; 109 + unsigned int n; 110 + unsigned int src_off; 111 + unsigned int dst_off; 112 + unsigned int len; 113 + enum dmatest_error_type type; 114 + union { 115 + unsigned long data; 116 + dma_cookie_t cookie; 117 + enum dma_status status; 118 + int error; 119 + struct dmatest_verify_result *vr; 120 + }; 121 + }; 122 + 123 + struct dmatest_result { 124 + struct list_head node; 125 + char *name; 126 + struct list_head results; 127 + }; 128 + 129 + struct dmatest_info; 130 + 89 131 struct dmatest_thread { 90 132 struct list_head node; 133 + struct dmatest_info *info; 91 134 struct task_struct *task; 92 135 struct dma_chan *chan; 93 136 u8 **srcs; 94 137 u8 **dsts; 95 138 enum dma_transaction_type type; 139 + bool done; 96 140 }; 97 141 98 142 struct dmatest_chan { ··· 153 93 struct list_head threads; 154 94 }; 155 95 156 - /* 157 - * These are protected by dma_list_mutex since they're only used by 158 - * the DMA filter function callback 96 + /** 97 + * struct dmatest_params - test parameters. 98 + * @buf_size: size of the memcpy test buffer 99 + * @channel: bus ID of the channel to test 100 + * @device: bus ID of the DMA Engine to test 101 + * @threads_per_chan: number of threads to start per channel 102 + * @max_channels: maximum number of channels to use 103 + * @iterations: iterations before stopping test 104 + * @xor_sources: number of xor source buffers 105 + * @pq_sources: number of p+q source buffers 106 + * @timeout: transfer timeout in msec, -1 for infinite timeout 159 107 */ 160 - static LIST_HEAD(dmatest_channels); 161 - static unsigned int nr_channels; 108 + struct dmatest_params { 109 + unsigned int buf_size; 110 + char channel[20]; 111 + char device[20]; 112 + unsigned int threads_per_chan; 113 + unsigned int max_channels; 114 + unsigned int iterations; 115 + unsigned int xor_sources; 116 + unsigned int pq_sources; 117 + int timeout; 118 + }; 162 119 163 - static bool dmatest_match_channel(struct dma_chan *chan) 120 + /** 121 + * struct dmatest_info - test information. 122 + * @params: test parameters 123 + * @lock: access protection to the fields of this structure 124 + */ 125 + struct dmatest_info { 126 + /* Test parameters */ 127 + struct dmatest_params params; 128 + 129 + /* Internal state */ 130 + struct list_head channels; 131 + unsigned int nr_channels; 132 + struct mutex lock; 133 + 134 + /* debugfs related stuff */ 135 + struct dentry *root; 136 + struct dmatest_params dbgfs_params; 137 + 138 + /* Test results */ 139 + struct list_head results; 140 + struct mutex results_lock; 141 + }; 142 + 143 + static struct dmatest_info test_info; 144 + 145 + static bool dmatest_match_channel(struct dmatest_params *params, 146 + struct dma_chan *chan) 164 147 { 165 - if (test_channel[0] == '\0') 148 + if (params->channel[0] == '\0') 166 149 return true; 167 - return strcmp(dma_chan_name(chan), test_channel) == 0; 150 + return strcmp(dma_chan_name(chan), params->channel) == 0; 168 151 } 169 152 170 - static bool dmatest_match_device(struct dma_device *device) 153 + static bool dmatest_match_device(struct dmatest_params *params, 154 + struct dma_device *device) 171 155 { 172 - if (test_device[0] == '\0') 156 + if (params->device[0] == '\0') 173 157 return true; 174 - return strcmp(dev_name(device->dev), test_device) == 0; 158 + return strcmp(dev_name(device->dev), params->device) == 0; 175 159 } 176 160 177 161 static unsigned long dmatest_random(void) ··· 226 122 return buf; 227 123 } 228 124 229 - static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len) 125 + static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len, 126 + unsigned int buf_size) 230 127 { 231 128 unsigned int i; 232 129 u8 *buf; ··· 238 133 for ( ; i < start + len; i++) 239 134 buf[i] = PATTERN_SRC | PATTERN_COPY 240 135 | (~i & PATTERN_COUNT_MASK); 241 - for ( ; i < test_buf_size; i++) 136 + for ( ; i < buf_size; i++) 242 137 buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); 243 138 buf++; 244 139 } 245 140 } 246 141 247 - static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len) 142 + static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, 143 + unsigned int buf_size) 248 144 { 249 145 unsigned int i; 250 146 u8 *buf; ··· 256 150 for ( ; i < start + len; i++) 257 151 buf[i] = PATTERN_DST | PATTERN_OVERWRITE 258 152 | (~i & PATTERN_COUNT_MASK); 259 - for ( ; i < test_buf_size; i++) 153 + for ( ; i < buf_size; i++) 260 154 buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); 261 155 } 262 156 } 263 157 264 - static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, 265 - unsigned int counter, bool is_srcbuf) 266 - { 267 - u8 diff = actual ^ pattern; 268 - u8 expected = pattern | (~counter & PATTERN_COUNT_MASK); 269 - const char *thread_name = current->comm; 270 - 271 - if (is_srcbuf) 272 - pr_warning("%s: srcbuf[0x%x] overwritten!" 273 - " Expected %02x, got %02x\n", 274 - thread_name, index, expected, actual); 275 - else if ((pattern & PATTERN_COPY) 276 - && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) 277 - pr_warning("%s: dstbuf[0x%x] not copied!" 278 - " Expected %02x, got %02x\n", 279 - thread_name, index, expected, actual); 280 - else if (diff & PATTERN_SRC) 281 - pr_warning("%s: dstbuf[0x%x] was copied!" 282 - " Expected %02x, got %02x\n", 283 - thread_name, index, expected, actual); 284 - else 285 - pr_warning("%s: dstbuf[0x%x] mismatch!" 286 - " Expected %02x, got %02x\n", 287 - thread_name, index, expected, actual); 288 - } 289 - 290 - static unsigned int dmatest_verify(u8 **bufs, unsigned int start, 291 - unsigned int end, unsigned int counter, u8 pattern, 292 - bool is_srcbuf) 158 + static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, 159 + unsigned int start, unsigned int end, unsigned int counter, 160 + u8 pattern, bool is_srcbuf) 293 161 { 294 162 unsigned int i; 295 163 unsigned int error_count = 0; ··· 271 191 u8 expected; 272 192 u8 *buf; 273 193 unsigned int counter_orig = counter; 194 + struct dmatest_verify_buffer *vb; 274 195 275 196 for (; (buf = *bufs); bufs++) { 276 197 counter = counter_orig; ··· 279 198 actual = buf[i]; 280 199 expected = pattern | (~counter & PATTERN_COUNT_MASK); 281 200 if (actual != expected) { 282 - if (error_count < 32) 283 - dmatest_mismatch(actual, pattern, i, 284 - counter, is_srcbuf); 201 + if (error_count < MAX_ERROR_COUNT && vr) { 202 + vb = &vr->data[error_count]; 203 + vb->index = i; 204 + vb->expected = expected; 205 + vb->actual = actual; 206 + } 285 207 error_count++; 286 208 } 287 209 counter++; 288 210 } 289 211 } 290 212 291 - if (error_count > 32) 213 + if (error_count > MAX_ERROR_COUNT) 292 214 pr_warning("%s: %u errors suppressed\n", 293 - current->comm, error_count - 32); 215 + current->comm, error_count - MAX_ERROR_COUNT); 294 216 295 217 return error_count; 296 218 } ··· 333 249 return val % 2 ? val : val - 1; 334 250 } 335 251 252 + static char *verify_result_get_one(struct dmatest_verify_result *vr, 253 + unsigned int i) 254 + { 255 + struct dmatest_verify_buffer *vb = &vr->data[i]; 256 + u8 diff = vb->actual ^ vr->pattern; 257 + static char buf[512]; 258 + char *msg; 259 + 260 + if (vr->is_srcbuf) 261 + msg = "srcbuf overwritten!"; 262 + else if ((vr->pattern & PATTERN_COPY) 263 + && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) 264 + msg = "dstbuf not copied!"; 265 + else if (diff & PATTERN_SRC) 266 + msg = "dstbuf was copied!"; 267 + else 268 + msg = "dstbuf mismatch!"; 269 + 270 + snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg, 271 + vb->index, vb->expected, vb->actual); 272 + 273 + return buf; 274 + } 275 + 276 + static char *thread_result_get(const char *name, 277 + struct dmatest_thread_result *tr) 278 + { 279 + static const char * const messages[] = { 280 + [DMATEST_ET_OK] = "No errors", 281 + [DMATEST_ET_MAP_SRC] = "src mapping error", 282 + [DMATEST_ET_MAP_DST] = "dst mapping error", 283 + [DMATEST_ET_PREP] = "prep error", 284 + [DMATEST_ET_SUBMIT] = "submit error", 285 + [DMATEST_ET_TIMEOUT] = "test timed out", 286 + [DMATEST_ET_DMA_ERROR] = 287 + "got completion callback (DMA_ERROR)", 288 + [DMATEST_ET_DMA_IN_PROGRESS] = 289 + "got completion callback (DMA_IN_PROGRESS)", 290 + [DMATEST_ET_VERIFY] = "errors", 291 + [DMATEST_ET_VERIFY_BUF] = "verify errors", 292 + }; 293 + static char buf[512]; 294 + 295 + snprintf(buf, sizeof(buf) - 1, 296 + "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", 297 + name, tr->n, messages[tr->type], tr->src_off, tr->dst_off, 298 + tr->len, tr->data); 299 + 300 + return buf; 301 + } 302 + 303 + static int thread_result_add(struct dmatest_info *info, 304 + struct dmatest_result *r, enum dmatest_error_type type, 305 + unsigned int n, unsigned int src_off, unsigned int dst_off, 306 + unsigned int len, unsigned long data) 307 + { 308 + struct dmatest_thread_result *tr; 309 + 310 + tr = kzalloc(sizeof(*tr), GFP_KERNEL); 311 + if (!tr) 312 + return -ENOMEM; 313 + 314 + tr->type = type; 315 + tr->n = n; 316 + tr->src_off = src_off; 317 + tr->dst_off = dst_off; 318 + tr->len = len; 319 + tr->data = data; 320 + 321 + mutex_lock(&info->results_lock); 322 + list_add_tail(&tr->node, &r->results); 323 + mutex_unlock(&info->results_lock); 324 + 325 + pr_warn("%s\n", thread_result_get(r->name, tr)); 326 + return 0; 327 + } 328 + 329 + static unsigned int verify_result_add(struct dmatest_info *info, 330 + struct dmatest_result *r, unsigned int n, 331 + unsigned int src_off, unsigned int dst_off, unsigned int len, 332 + u8 **bufs, int whence, unsigned int counter, u8 pattern, 333 + bool is_srcbuf) 334 + { 335 + struct dmatest_verify_result *vr; 336 + unsigned int error_count; 337 + unsigned int buf_off = is_srcbuf ? src_off : dst_off; 338 + unsigned int start, end; 339 + 340 + if (whence < 0) { 341 + start = 0; 342 + end = buf_off; 343 + } else if (whence > 0) { 344 + start = buf_off + len; 345 + end = info->params.buf_size; 346 + } else { 347 + start = buf_off; 348 + end = buf_off + len; 349 + } 350 + 351 + vr = kmalloc(sizeof(*vr), GFP_KERNEL); 352 + if (!vr) { 353 + pr_warn("dmatest: No memory to store verify result\n"); 354 + return dmatest_verify(NULL, bufs, start, end, counter, pattern, 355 + is_srcbuf); 356 + } 357 + 358 + vr->pattern = pattern; 359 + vr->is_srcbuf = is_srcbuf; 360 + 361 + error_count = dmatest_verify(vr, bufs, start, end, counter, pattern, 362 + is_srcbuf); 363 + if (error_count) { 364 + vr->error_count = error_count; 365 + thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off, 366 + dst_off, len, (unsigned long)vr); 367 + return error_count; 368 + } 369 + 370 + kfree(vr); 371 + return 0; 372 + } 373 + 374 + static void result_free(struct dmatest_info *info, const char *name) 375 + { 376 + struct dmatest_result *r, *_r; 377 + 378 + mutex_lock(&info->results_lock); 379 + list_for_each_entry_safe(r, _r, &info->results, node) { 380 + struct dmatest_thread_result *tr, *_tr; 381 + 382 + if (name && strcmp(r->name, name)) 383 + continue; 384 + 385 + list_for_each_entry_safe(tr, _tr, &r->results, node) { 386 + if (tr->type == DMATEST_ET_VERIFY_BUF) 387 + kfree(tr->vr); 388 + list_del(&tr->node); 389 + kfree(tr); 390 + } 391 + 392 + kfree(r->name); 393 + list_del(&r->node); 394 + kfree(r); 395 + } 396 + 397 + mutex_unlock(&info->results_lock); 398 + } 399 + 400 + static struct dmatest_result *result_init(struct dmatest_info *info, 401 + const char *name) 402 + { 403 + struct dmatest_result *r; 404 + 405 + r = kzalloc(sizeof(*r), GFP_KERNEL); 406 + if (r) { 407 + r->name = kstrdup(name, GFP_KERNEL); 408 + INIT_LIST_HEAD(&r->results); 409 + mutex_lock(&info->results_lock); 410 + list_add_tail(&r->node, &info->results); 411 + mutex_unlock(&info->results_lock); 412 + } 413 + return r; 414 + } 415 + 336 416 /* 337 417 * This function repeatedly tests DMA transfers of various lengths and 338 418 * offsets for a given operation type until it is told to exit by ··· 516 268 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); 517 269 struct dmatest_thread *thread = data; 518 270 struct dmatest_done done = { .wait = &done_wait }; 271 + struct dmatest_info *info; 272 + struct dmatest_params *params; 519 273 struct dma_chan *chan; 520 274 struct dma_device *dev; 521 275 const char *thread_name; ··· 528 278 dma_cookie_t cookie; 529 279 enum dma_status status; 530 280 enum dma_ctrl_flags flags; 531 - u8 pq_coefs[pq_sources + 1]; 281 + u8 *pq_coefs = NULL; 532 282 int ret; 533 283 int src_cnt; 534 284 int dst_cnt; 535 285 int i; 286 + struct dmatest_result *result; 536 287 537 288 thread_name = current->comm; 538 289 set_freezable(); ··· 541 290 ret = -ENOMEM; 542 291 543 292 smp_rmb(); 293 + info = thread->info; 294 + params = &info->params; 544 295 chan = thread->chan; 545 296 dev = chan->device; 546 297 if (thread->type == DMA_MEMCPY) 547 298 src_cnt = dst_cnt = 1; 548 299 else if (thread->type == DMA_XOR) { 549 300 /* force odd to ensure dst = src */ 550 - src_cnt = min_odd(xor_sources | 1, dev->max_xor); 301 + src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); 551 302 dst_cnt = 1; 552 303 } else if (thread->type == DMA_PQ) { 553 304 /* force odd to ensure dst = src */ 554 - src_cnt = min_odd(pq_sources | 1, dma_maxpq(dev, 0)); 305 + src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); 555 306 dst_cnt = 2; 307 + 308 + pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL); 309 + if (!pq_coefs) 310 + goto err_thread_type; 311 + 556 312 for (i = 0; i < src_cnt; i++) 557 313 pq_coefs[i] = 1; 558 314 } else 315 + goto err_thread_type; 316 + 317 + result = result_init(info, thread_name); 318 + if (!result) 559 319 goto err_srcs; 560 320 561 321 thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); 562 322 if (!thread->srcs) 563 323 goto err_srcs; 564 324 for (i = 0; i < src_cnt; i++) { 565 - thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL); 325 + thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL); 566 326 if (!thread->srcs[i]) 567 327 goto err_srcbuf; 568 328 } ··· 583 321 if (!thread->dsts) 584 322 goto err_dsts; 585 323 for (i = 0; i < dst_cnt; i++) { 586 - thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL); 324 + thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL); 587 325 if (!thread->dsts[i]) 588 326 goto err_dstbuf; 589 327 } ··· 599 337 | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; 600 338 601 339 while (!kthread_should_stop() 602 - && !(iterations && total_tests >= iterations)) { 340 + && !(params->iterations && total_tests >= params->iterations)) { 603 341 struct dma_async_tx_descriptor *tx = NULL; 604 342 dma_addr_t dma_srcs[src_cnt]; 605 343 dma_addr_t dma_dsts[dst_cnt]; ··· 615 353 else if (thread->type == DMA_PQ) 616 354 align = dev->pq_align; 617 355 618 - if (1 << align > test_buf_size) { 356 + if (1 << align > params->buf_size) { 619 357 pr_err("%u-byte buffer too small for %d-byte alignment\n", 620 - test_buf_size, 1 << align); 358 + params->buf_size, 1 << align); 621 359 break; 622 360 } 623 361 624 - len = dmatest_random() % test_buf_size + 1; 362 + len = dmatest_random() % params->buf_size + 1; 625 363 len = (len >> align) << align; 626 364 if (!len) 627 365 len = 1 << align; 628 - src_off = dmatest_random() % (test_buf_size - len + 1); 629 - dst_off = dmatest_random() % (test_buf_size - len + 1); 366 + src_off = dmatest_random() % (params->buf_size - len + 1); 367 + dst_off = dmatest_random() % (params->buf_size - len + 1); 630 368 631 369 src_off = (src_off >> align) << align; 632 370 dst_off = (dst_off >> align) << align; 633 371 634 - dmatest_init_srcs(thread->srcs, src_off, len); 635 - dmatest_init_dsts(thread->dsts, dst_off, len); 372 + dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size); 373 + dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size); 636 374 637 375 for (i = 0; i < src_cnt; i++) { 638 376 u8 *buf = thread->srcs[i] + src_off; ··· 642 380 ret = dma_mapping_error(dev->dev, dma_srcs[i]); 643 381 if (ret) { 644 382 unmap_src(dev->dev, dma_srcs, len, i); 645 - pr_warn("%s: #%u: mapping error %d with " 646 - "src_off=0x%x len=0x%x\n", 647 - thread_name, total_tests - 1, ret, 648 - src_off, len); 383 + thread_result_add(info, result, 384 + DMATEST_ET_MAP_SRC, 385 + total_tests, src_off, dst_off, 386 + len, ret); 649 387 failed_tests++; 650 388 continue; 651 389 } ··· 653 391 /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ 654 392 for (i = 0; i < dst_cnt; i++) { 655 393 dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], 656 - test_buf_size, 394 + params->buf_size, 657 395 DMA_BIDIRECTIONAL); 658 396 ret = dma_mapping_error(dev->dev, dma_dsts[i]); 659 397 if (ret) { 660 398 unmap_src(dev->dev, dma_srcs, len, src_cnt); 661 - unmap_dst(dev->dev, dma_dsts, test_buf_size, i); 662 - pr_warn("%s: #%u: mapping error %d with " 663 - "dst_off=0x%x len=0x%x\n", 664 - thread_name, total_tests - 1, ret, 665 - dst_off, test_buf_size); 399 + unmap_dst(dev->dev, dma_dsts, params->buf_size, 400 + i); 401 + thread_result_add(info, result, 402 + DMATEST_ET_MAP_DST, 403 + total_tests, src_off, dst_off, 404 + len, ret); 666 405 failed_tests++; 667 406 continue; 668 407 } ··· 691 428 692 429 if (!tx) { 693 430 unmap_src(dev->dev, dma_srcs, len, src_cnt); 694 - unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt); 695 - pr_warning("%s: #%u: prep error with src_off=0x%x " 696 - "dst_off=0x%x len=0x%x\n", 697 - thread_name, total_tests - 1, 698 - src_off, dst_off, len); 431 + unmap_dst(dev->dev, dma_dsts, params->buf_size, 432 + dst_cnt); 433 + thread_result_add(info, result, DMATEST_ET_PREP, 434 + total_tests, src_off, dst_off, 435 + len, 0); 699 436 msleep(100); 700 437 failed_tests++; 701 438 continue; ··· 707 444 cookie = tx->tx_submit(tx); 708 445 709 446 if (dma_submit_error(cookie)) { 710 - pr_warning("%s: #%u: submit error %d with src_off=0x%x " 711 - "dst_off=0x%x len=0x%x\n", 712 - thread_name, total_tests - 1, cookie, 713 - src_off, dst_off, len); 447 + thread_result_add(info, result, DMATEST_ET_SUBMIT, 448 + total_tests, src_off, dst_off, 449 + len, cookie); 714 450 msleep(100); 715 451 failed_tests++; 716 452 continue; 717 453 } 718 454 dma_async_issue_pending(chan); 719 455 720 - wait_event_freezable_timeout(done_wait, done.done, 721 - msecs_to_jiffies(timeout)); 456 + wait_event_freezable_timeout(done_wait, 457 + done.done || kthread_should_stop(), 458 + msecs_to_jiffies(params->timeout)); 722 459 723 460 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 724 461 ··· 731 468 * free it this time?" dancing. For now, just 732 469 * leave it dangling. 733 470 */ 734 - pr_warning("%s: #%u: test timed out\n", 735 - thread_name, total_tests - 1); 471 + thread_result_add(info, result, DMATEST_ET_TIMEOUT, 472 + total_tests, src_off, dst_off, 473 + len, 0); 736 474 failed_tests++; 737 475 continue; 738 476 } else if (status != DMA_SUCCESS) { 739 - pr_warning("%s: #%u: got completion callback," 740 - " but status is \'%s\'\n", 741 - thread_name, total_tests - 1, 742 - status == DMA_ERROR ? "error" : "in progress"); 477 + enum dmatest_error_type type = (status == DMA_ERROR) ? 478 + DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS; 479 + thread_result_add(info, result, type, 480 + total_tests, src_off, dst_off, 481 + len, status); 743 482 failed_tests++; 744 483 continue; 745 484 } 746 485 747 486 /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ 748 - unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt); 487 + unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt); 749 488 750 489 error_count = 0; 751 490 752 491 pr_debug("%s: verifying source buffer...\n", thread_name); 753 - error_count += dmatest_verify(thread->srcs, 0, src_off, 492 + error_count += verify_result_add(info, result, total_tests, 493 + src_off, dst_off, len, thread->srcs, -1, 754 494 0, PATTERN_SRC, true); 755 - error_count += dmatest_verify(thread->srcs, src_off, 756 - src_off + len, src_off, 757 - PATTERN_SRC | PATTERN_COPY, true); 758 - error_count += dmatest_verify(thread->srcs, src_off + len, 759 - test_buf_size, src_off + len, 760 - PATTERN_SRC, true); 495 + error_count += verify_result_add(info, result, total_tests, 496 + src_off, dst_off, len, thread->srcs, 0, 497 + src_off, PATTERN_SRC | PATTERN_COPY, true); 498 + error_count += verify_result_add(info, result, total_tests, 499 + src_off, dst_off, len, thread->srcs, 1, 500 + src_off + len, PATTERN_SRC, true); 761 501 762 - pr_debug("%s: verifying dest buffer...\n", 763 - thread->task->comm); 764 - error_count += dmatest_verify(thread->dsts, 0, dst_off, 502 + pr_debug("%s: verifying dest buffer...\n", thread_name); 503 + error_count += verify_result_add(info, result, total_tests, 504 + src_off, dst_off, len, thread->dsts, -1, 765 505 0, PATTERN_DST, false); 766 - error_count += dmatest_verify(thread->dsts, dst_off, 767 - dst_off + len, src_off, 768 - PATTERN_SRC | PATTERN_COPY, false); 769 - error_count += dmatest_verify(thread->dsts, dst_off + len, 770 - test_buf_size, dst_off + len, 771 - PATTERN_DST, false); 506 + error_count += verify_result_add(info, result, total_tests, 507 + src_off, dst_off, len, thread->dsts, 0, 508 + src_off, PATTERN_SRC | PATTERN_COPY, false); 509 + error_count += verify_result_add(info, result, total_tests, 510 + src_off, dst_off, len, thread->dsts, 1, 511 + dst_off + len, PATTERN_DST, false); 772 512 773 513 if (error_count) { 774 - pr_warning("%s: #%u: %u errors with " 775 - "src_off=0x%x dst_off=0x%x len=0x%x\n", 776 - thread_name, total_tests - 1, error_count, 777 - src_off, dst_off, len); 514 + thread_result_add(info, result, DMATEST_ET_VERIFY, 515 + total_tests, src_off, dst_off, 516 + len, error_count); 778 517 failed_tests++; 779 518 } else { 780 - pr_debug("%s: #%u: No errors with " 781 - "src_off=0x%x dst_off=0x%x len=0x%x\n", 782 - thread_name, total_tests - 1, 783 - src_off, dst_off, len); 519 + thread_result_add(info, result, DMATEST_ET_OK, 520 + total_tests, src_off, dst_off, 521 + len, 0); 784 522 } 785 523 } 786 524 ··· 796 532 err_srcbuf: 797 533 kfree(thread->srcs); 798 534 err_srcs: 535 + kfree(pq_coefs); 536 + err_thread_type: 799 537 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", 800 538 thread_name, total_tests, failed_tests, ret); 801 539 ··· 805 539 if (ret) 806 540 dmaengine_terminate_all(chan); 807 541 808 - if (iterations > 0) 542 + thread->done = true; 543 + 544 + if (params->iterations > 0) 809 545 while (!kthread_should_stop()) { 810 546 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); 811 547 interruptible_sleep_on(&wait_dmatest_exit); ··· 836 568 kfree(dtc); 837 569 } 838 570 839 - static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type) 571 + static int dmatest_add_threads(struct dmatest_info *info, 572 + struct dmatest_chan *dtc, enum dma_transaction_type type) 840 573 { 574 + struct dmatest_params *params = &info->params; 841 575 struct dmatest_thread *thread; 842 576 struct dma_chan *chan = dtc->chan; 843 577 char *op; ··· 854 584 else 855 585 return -EINVAL; 856 586 857 - for (i = 0; i < threads_per_chan; i++) { 587 + for (i = 0; i < params->threads_per_chan; i++) { 858 588 thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); 859 589 if (!thread) { 860 590 pr_warning("dmatest: No memory for %s-%s%u\n", ··· 862 592 863 593 break; 864 594 } 595 + thread->info = info; 865 596 thread->chan = dtc->chan; 866 597 thread->type = type; 867 598 smp_wmb(); ··· 883 612 return i; 884 613 } 885 614 886 - static int dmatest_add_channel(struct dma_chan *chan) 615 + static int dmatest_add_channel(struct dmatest_info *info, 616 + struct dma_chan *chan) 887 617 { 888 618 struct dmatest_chan *dtc; 889 619 struct dma_device *dma_dev = chan->device; ··· 901 629 INIT_LIST_HEAD(&dtc->threads); 902 630 903 631 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 904 - cnt = dmatest_add_threads(dtc, DMA_MEMCPY); 632 + cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); 905 633 thread_count += cnt > 0 ? cnt : 0; 906 634 } 907 635 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 908 - cnt = dmatest_add_threads(dtc, DMA_XOR); 636 + cnt = dmatest_add_threads(info, dtc, DMA_XOR); 909 637 thread_count += cnt > 0 ? cnt : 0; 910 638 } 911 639 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { 912 - cnt = dmatest_add_threads(dtc, DMA_PQ); 640 + cnt = dmatest_add_threads(info, dtc, DMA_PQ); 913 641 thread_count += cnt > 0 ? cnt : 0; 914 642 } 915 643 916 644 pr_info("dmatest: Started %u threads using %s\n", 917 645 thread_count, dma_chan_name(chan)); 918 646 919 - list_add_tail(&dtc->node, &dmatest_channels); 920 - nr_channels++; 647 + list_add_tail(&dtc->node, &info->channels); 648 + info->nr_channels++; 921 649 922 650 return 0; 923 651 } 924 652 925 653 static bool filter(struct dma_chan *chan, void *param) 926 654 { 927 - if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device)) 655 + struct dmatest_params *params = param; 656 + 657 + if (!dmatest_match_channel(params, chan) || 658 + !dmatest_match_device(params, chan->device)) 928 659 return false; 929 660 else 930 661 return true; 931 662 } 932 663 933 - static int __init dmatest_init(void) 664 + static int __run_threaded_test(struct dmatest_info *info) 934 665 { 935 666 dma_cap_mask_t mask; 936 667 struct dma_chan *chan; 668 + struct dmatest_params *params = &info->params; 937 669 int err = 0; 938 670 939 671 dma_cap_zero(mask); 940 672 dma_cap_set(DMA_MEMCPY, mask); 941 673 for (;;) { 942 - chan = dma_request_channel(mask, filter, NULL); 674 + chan = dma_request_channel(mask, filter, params); 943 675 if (chan) { 944 - err = dmatest_add_channel(chan); 676 + err = dmatest_add_channel(info, chan); 945 677 if (err) { 946 678 dma_release_channel(chan); 947 679 break; /* add_channel failed, punt */ 948 680 } 949 681 } else 950 682 break; /* no more channels available */ 951 - if (max_channels && nr_channels >= max_channels) 683 + if (params->max_channels && 684 + info->nr_channels >= params->max_channels) 952 685 break; /* we have all we need */ 953 686 } 954 - 955 687 return err; 688 + } 689 + 690 + #ifndef MODULE 691 + static int run_threaded_test(struct dmatest_info *info) 692 + { 693 + int ret; 694 + 695 + mutex_lock(&info->lock); 696 + ret = __run_threaded_test(info); 697 + mutex_unlock(&info->lock); 698 + return ret; 699 + } 700 + #endif 701 + 702 + static void __stop_threaded_test(struct dmatest_info *info) 703 + { 704 + struct dmatest_chan *dtc, *_dtc; 705 + struct dma_chan *chan; 706 + 707 + list_for_each_entry_safe(dtc, _dtc, &info->channels, node) { 708 + list_del(&dtc->node); 709 + chan = dtc->chan; 710 + dmatest_cleanup_channel(dtc); 711 + pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan)); 712 + dma_release_channel(chan); 713 + } 714 + 715 + info->nr_channels = 0; 716 + } 717 + 718 + static void stop_threaded_test(struct dmatest_info *info) 719 + { 720 + mutex_lock(&info->lock); 721 + __stop_threaded_test(info); 722 + mutex_unlock(&info->lock); 723 + } 724 + 725 + static int __restart_threaded_test(struct dmatest_info *info, bool run) 726 + { 727 + struct dmatest_params *params = &info->params; 728 + int ret; 729 + 730 + /* Stop any running test first */ 731 + __stop_threaded_test(info); 732 + 733 + if (run == false) 734 + return 0; 735 + 736 + /* Clear results from previous run */ 737 + result_free(info, NULL); 738 + 739 + /* Copy test parameters */ 740 + memcpy(params, &info->dbgfs_params, sizeof(*params)); 741 + 742 + /* Run test with new parameters */ 743 + ret = __run_threaded_test(info); 744 + if (ret) { 745 + __stop_threaded_test(info); 746 + pr_err("dmatest: Can't run test\n"); 747 + } 748 + 749 + return ret; 750 + } 751 + 752 + static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos, 753 + const void __user *from, size_t count) 754 + { 755 + char tmp[20]; 756 + ssize_t len; 757 + 758 + len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count); 759 + if (len >= 0) { 760 + tmp[len] = '\0'; 761 + strlcpy(to, strim(tmp), available); 762 + } 763 + 764 + return len; 765 + } 766 + 767 + static ssize_t dtf_read_channel(struct file *file, char __user *buf, 768 + size_t count, loff_t *ppos) 769 + { 770 + struct dmatest_info *info = file->private_data; 771 + return simple_read_from_buffer(buf, count, ppos, 772 + info->dbgfs_params.channel, 773 + strlen(info->dbgfs_params.channel)); 774 + } 775 + 776 + static ssize_t dtf_write_channel(struct file *file, const char __user *buf, 777 + size_t size, loff_t *ppos) 778 + { 779 + struct dmatest_info *info = file->private_data; 780 + return dtf_write_string(info->dbgfs_params.channel, 781 + sizeof(info->dbgfs_params.channel), 782 + ppos, buf, size); 783 + } 784 + 785 + static const struct file_operations dtf_channel_fops = { 786 + .read = dtf_read_channel, 787 + .write = dtf_write_channel, 788 + .open = simple_open, 789 + .llseek = default_llseek, 790 + }; 791 + 792 + static ssize_t dtf_read_device(struct file *file, char __user *buf, 793 + size_t count, loff_t *ppos) 794 + { 795 + struct dmatest_info *info = file->private_data; 796 + return simple_read_from_buffer(buf, count, ppos, 797 + info->dbgfs_params.device, 798 + strlen(info->dbgfs_params.device)); 799 + } 800 + 801 + static ssize_t dtf_write_device(struct file *file, const char __user *buf, 802 + size_t size, loff_t *ppos) 803 + { 804 + struct dmatest_info *info = file->private_data; 805 + return dtf_write_string(info->dbgfs_params.device, 806 + sizeof(info->dbgfs_params.device), 807 + ppos, buf, size); 808 + } 809 + 810 + static const struct file_operations dtf_device_fops = { 811 + .read = dtf_read_device, 812 + .write = dtf_write_device, 813 + .open = simple_open, 814 + .llseek = default_llseek, 815 + }; 816 + 817 + static ssize_t dtf_read_run(struct file *file, char __user *user_buf, 818 + size_t count, loff_t *ppos) 819 + { 820 + struct dmatest_info *info = file->private_data; 821 + char buf[3]; 822 + struct dmatest_chan *dtc; 823 + bool alive = false; 824 + 825 + mutex_lock(&info->lock); 826 + list_for_each_entry(dtc, &info->channels, node) { 827 + struct dmatest_thread *thread; 828 + 829 + list_for_each_entry(thread, &dtc->threads, node) { 830 + if (!thread->done) { 831 + alive = true; 832 + break; 833 + } 834 + } 835 + } 836 + 837 + if (alive) { 838 + buf[0] = 'Y'; 839 + } else { 840 + __stop_threaded_test(info); 841 + buf[0] = 'N'; 842 + } 843 + 844 + mutex_unlock(&info->lock); 845 + buf[1] = '\n'; 846 + buf[2] = 0x00; 847 + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 848 + } 849 + 850 + static ssize_t dtf_write_run(struct file *file, const char __user *user_buf, 851 + size_t count, loff_t *ppos) 852 + { 853 + struct dmatest_info *info = file->private_data; 854 + char buf[16]; 855 + bool bv; 856 + int ret = 0; 857 + 858 + if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1)))) 859 + return -EFAULT; 860 + 861 + if (strtobool(buf, &bv) == 0) { 862 + mutex_lock(&info->lock); 863 + ret = __restart_threaded_test(info, bv); 864 + mutex_unlock(&info->lock); 865 + } 866 + 867 + return ret ? ret : count; 868 + } 869 + 870 + static const struct file_operations dtf_run_fops = { 871 + .read = dtf_read_run, 872 + .write = dtf_write_run, 873 + .open = simple_open, 874 + .llseek = default_llseek, 875 + }; 876 + 877 + static int dtf_results_show(struct seq_file *sf, void *data) 878 + { 879 + struct dmatest_info *info = sf->private; 880 + struct dmatest_result *result; 881 + struct dmatest_thread_result *tr; 882 + unsigned int i; 883 + 884 + mutex_lock(&info->results_lock); 885 + list_for_each_entry(result, &info->results, node) { 886 + list_for_each_entry(tr, &result->results, node) { 887 + seq_printf(sf, "%s\n", 888 + thread_result_get(result->name, tr)); 889 + if (tr->type == DMATEST_ET_VERIFY_BUF) { 890 + for (i = 0; i < tr->vr->error_count; i++) { 891 + seq_printf(sf, "\t%s\n", 892 + verify_result_get_one(tr->vr, i)); 893 + } 894 + } 895 + } 896 + } 897 + 898 + mutex_unlock(&info->results_lock); 899 + return 0; 900 + } 901 + 902 + static int dtf_results_open(struct inode *inode, struct file *file) 903 + { 904 + return single_open(file, dtf_results_show, inode->i_private); 905 + } 906 + 907 + static const struct file_operations dtf_results_fops = { 908 + .open = dtf_results_open, 909 + .read = seq_read, 910 + .llseek = seq_lseek, 911 + .release = single_release, 912 + }; 913 + 914 + static int dmatest_register_dbgfs(struct dmatest_info *info) 915 + { 916 + struct dentry *d; 917 + struct dmatest_params *params = &info->dbgfs_params; 918 + int ret = -ENOMEM; 919 + 920 + d = debugfs_create_dir("dmatest", NULL); 921 + if (IS_ERR(d)) 922 + return PTR_ERR(d); 923 + if (!d) 924 + goto err_root; 925 + 926 + info->root = d; 927 + 928 + /* Copy initial values */ 929 + memcpy(params, &info->params, sizeof(*params)); 930 + 931 + /* Test parameters */ 932 + 933 + d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root, 934 + (u32 *)&params->buf_size); 935 + if (IS_ERR_OR_NULL(d)) 936 + goto err_node; 937 + 938 + d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root, 939 + info, &dtf_channel_fops); 940 + if (IS_ERR_OR_NULL(d)) 941 + goto err_node; 942 + 943 + d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root, 944 + info, &dtf_device_fops); 945 + if (IS_ERR_OR_NULL(d)) 946 + goto err_node; 947 + 948 + d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root, 949 + (u32 *)&params->threads_per_chan); 950 + if (IS_ERR_OR_NULL(d)) 951 + goto err_node; 952 + 953 + d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root, 954 + (u32 *)&params->max_channels); 955 + if (IS_ERR_OR_NULL(d)) 956 + goto err_node; 957 + 958 + d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root, 959 + (u32 *)&params->iterations); 960 + if (IS_ERR_OR_NULL(d)) 961 + goto err_node; 962 + 963 + d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root, 964 + (u32 *)&params->xor_sources); 965 + if (IS_ERR_OR_NULL(d)) 966 + goto err_node; 967 + 968 + d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root, 969 + (u32 *)&params->pq_sources); 970 + if (IS_ERR_OR_NULL(d)) 971 + goto err_node; 972 + 973 + d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root, 974 + (u32 *)&params->timeout); 975 + if (IS_ERR_OR_NULL(d)) 976 + goto err_node; 977 + 978 + /* Run or stop threaded test */ 979 + d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, 980 + info, &dtf_run_fops); 981 + if (IS_ERR_OR_NULL(d)) 982 + goto err_node; 983 + 984 + /* Results of test in progress */ 985 + d = debugfs_create_file("results", S_IRUGO, info->root, info, 986 + &dtf_results_fops); 987 + if (IS_ERR_OR_NULL(d)) 988 + goto err_node; 989 + 990 + return 0; 991 + 992 + err_node: 993 + debugfs_remove_recursive(info->root); 994 + err_root: 995 + pr_err("dmatest: Failed to initialize debugfs\n"); 996 + return ret; 997 + } 998 + 999 + static int __init dmatest_init(void) 1000 + { 1001 + struct dmatest_info *info = &test_info; 1002 + struct dmatest_params *params = &info->params; 1003 + int ret; 1004 + 1005 + memset(info, 0, sizeof(*info)); 1006 + 1007 + mutex_init(&info->lock); 1008 + INIT_LIST_HEAD(&info->channels); 1009 + 1010 + mutex_init(&info->results_lock); 1011 + INIT_LIST_HEAD(&info->results); 1012 + 1013 + /* Set default parameters */ 1014 + params->buf_size = test_buf_size; 1015 + strlcpy(params->channel, test_channel, sizeof(params->channel)); 1016 + strlcpy(params->device, test_device, sizeof(params->device)); 1017 + params->threads_per_chan = threads_per_chan; 1018 + params->max_channels = max_channels; 1019 + params->iterations = iterations; 1020 + params->xor_sources = xor_sources; 1021 + params->pq_sources = pq_sources; 1022 + params->timeout = timeout; 1023 + 1024 + ret = dmatest_register_dbgfs(info); 1025 + if (ret) 1026 + return ret; 1027 + 1028 + #ifdef MODULE 1029 + return 0; 1030 + #else 1031 + return run_threaded_test(info); 1032 + #endif 956 1033 } 957 1034 /* when compiled-in wait for drivers to load first */ 958 1035 late_initcall(dmatest_init); 959 1036 960 1037 static void __exit dmatest_exit(void) 961 1038 { 962 - struct dmatest_chan *dtc, *_dtc; 963 - struct dma_chan *chan; 1039 + struct dmatest_info *info = &test_info; 964 1040 965 - list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) { 966 - list_del(&dtc->node); 967 - chan = dtc->chan; 968 - dmatest_cleanup_channel(dtc); 969 - pr_debug("dmatest: dropped channel %s\n", 970 - dma_chan_name(chan)); 971 - dma_release_channel(chan); 972 - } 1041 + debugfs_remove_recursive(info->root); 1042 + stop_threaded_test(info); 1043 + result_free(info, NULL); 973 1044 } 974 1045 module_exit(dmatest_exit); 975 1046
+110 -93
drivers/dma/dw_dmac.c
··· 25 25 #include <linux/module.h> 26 26 #include <linux/platform_device.h> 27 27 #include <linux/slab.h> 28 + #include <linux/acpi.h> 29 + #include <linux/acpi_dma.h> 28 30 29 31 #include "dw_dmac_regs.h" 30 32 #include "dmaengine.h" ··· 51 49 return slave ? slave->src_master : 1; 52 50 } 53 51 54 - #define SRC_MASTER 0 55 - #define DST_MASTER 1 56 - 57 - static inline unsigned int dwc_get_master(struct dma_chan *chan, int master) 52 + static inline void dwc_set_masters(struct dw_dma_chan *dwc) 58 53 { 59 - struct dw_dma *dw = to_dw_dma(chan->device); 60 - struct dw_dma_slave *dws = chan->private; 61 - unsigned int m; 54 + struct dw_dma *dw = to_dw_dma(dwc->chan.device); 55 + struct dw_dma_slave *dws = dwc->chan.private; 56 + unsigned char mmax = dw->nr_masters - 1; 62 57 63 - if (master == SRC_MASTER) 64 - m = dwc_get_sms(dws); 65 - else 66 - m = dwc_get_dms(dws); 67 - 68 - return min_t(unsigned int, dw->nr_masters - 1, m); 58 + if (dwc->request_line == ~0) { 59 + dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws)); 60 + dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws)); 61 + } 69 62 } 70 63 71 64 #define DWC_DEFAULT_CTLLO(_chan) ({ \ 72 65 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ 73 66 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ 74 67 bool _is_slave = is_slave_direction(_dwc->direction); \ 75 - int _dms = dwc_get_master(_chan, DST_MASTER); \ 76 - int _sms = dwc_get_master(_chan, SRC_MASTER); \ 77 68 u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ 78 69 DW_DMA_MSIZE_16; \ 79 70 u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ ··· 76 81 | DWC_CTLL_SRC_MSIZE(_smsize) \ 77 82 | DWC_CTLL_LLP_D_EN \ 78 83 | DWC_CTLL_LLP_S_EN \ 79 - | DWC_CTLL_DMS(_dms) \ 80 - | DWC_CTLL_SMS(_sms)); \ 84 + | DWC_CTLL_DMS(_dwc->dst_master) \ 85 + | DWC_CTLL_SMS(_dwc->src_master)); \ 81 86 }) 82 87 83 88 /* ··· 86 91 * ones using slave transfers) should be able to give us a hint. 87 92 */ 88 93 #define NR_DESCS_PER_CHANNEL 64 89 - 90 - static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master) 91 - { 92 - struct dw_dma *dw = to_dw_dma(chan->device); 93 - 94 - return dw->data_width[dwc_get_master(chan, master)]; 95 - } 96 94 97 95 /*----------------------------------------------------------------------*/ 98 96 ··· 160 172 if (dwc->initialized == true) 161 173 return; 162 174 163 - if (dws && dws->cfg_hi == ~0 && dws->cfg_lo == ~0) { 164 - /* autoconfigure based on request line from DT */ 165 - if (dwc->direction == DMA_MEM_TO_DEV) 166 - cfghi = DWC_CFGH_DST_PER(dwc->request_line); 167 - else if (dwc->direction == DMA_DEV_TO_MEM) 168 - cfghi = DWC_CFGH_SRC_PER(dwc->request_line); 169 - } else if (dws) { 175 + if (dws) { 170 176 /* 171 177 * We need controller-specific data to set up slave 172 178 * transfers. ··· 171 189 cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; 172 190 } else { 173 191 if (dwc->direction == DMA_MEM_TO_DEV) 174 - cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id); 192 + cfghi = DWC_CFGH_DST_PER(dwc->request_line); 175 193 else if (dwc->direction == DMA_DEV_TO_MEM) 176 - cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id); 194 + cfghi = DWC_CFGH_SRC_PER(dwc->request_line); 177 195 } 178 196 179 197 channel_writel(dwc, CFG_LO, cfglo); ··· 455 473 (unsigned long long)llp); 456 474 457 475 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { 458 - /* initial residue value */ 476 + /* Initial residue value */ 459 477 dwc->residue = desc->total_len; 460 478 461 - /* check first descriptors addr */ 479 + /* Check first descriptors addr */ 462 480 if (desc->txd.phys == llp) { 463 481 spin_unlock_irqrestore(&dwc->lock, flags); 464 482 return; 465 483 } 466 484 467 - /* check first descriptors llp */ 485 + /* Check first descriptors llp */ 468 486 if (desc->lli.llp == llp) { 469 487 /* This one is currently in progress */ 470 488 dwc->residue -= dwc_get_sent(dwc); ··· 570 588 } 571 589 EXPORT_SYMBOL(dw_dma_get_dst_addr); 572 590 573 - /* called with dwc->lock held and all DMAC interrupts disabled */ 591 + /* Called with dwc->lock held and all DMAC interrupts disabled */ 574 592 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, 575 593 u32 status_err, u32 status_xfer) 576 594 { ··· 608 626 609 627 dwc_chan_disable(dw, dwc); 610 628 611 - /* make sure DMA does not restart by loading a new list */ 629 + /* Make sure DMA does not restart by loading a new list */ 612 630 channel_writel(dwc, LLP, 0); 613 631 channel_writel(dwc, CTL_LO, 0); 614 632 channel_writel(dwc, CTL_HI, 0); ··· 727 745 size_t len, unsigned long flags) 728 746 { 729 747 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 748 + struct dw_dma *dw = to_dw_dma(chan->device); 730 749 struct dw_desc *desc; 731 750 struct dw_desc *first; 732 751 struct dw_desc *prev; ··· 750 767 751 768 dwc->direction = DMA_MEM_TO_MEM; 752 769 753 - data_width = min_t(unsigned int, dwc_get_data_width(chan, SRC_MASTER), 754 - dwc_get_data_width(chan, DST_MASTER)); 770 + data_width = min_t(unsigned int, dw->data_width[dwc->src_master], 771 + dw->data_width[dwc->dst_master]); 755 772 756 773 src_width = dst_width = min_t(unsigned int, data_width, 757 774 dwc_fast_fls(src | dest | len)); ··· 809 826 unsigned long flags, void *context) 810 827 { 811 828 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 829 + struct dw_dma *dw = to_dw_dma(chan->device); 812 830 struct dma_slave_config *sconfig = &dwc->dma_sconfig; 813 831 struct dw_desc *prev; 814 832 struct dw_desc *first; ··· 843 859 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : 844 860 DWC_CTLL_FC(DW_DMA_FC_D_M2P); 845 861 846 - data_width = dwc_get_data_width(chan, SRC_MASTER); 862 + data_width = dw->data_width[dwc->src_master]; 847 863 848 864 for_each_sg(sgl, sg, sg_len, i) { 849 865 struct dw_desc *desc; ··· 903 919 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : 904 920 DWC_CTLL_FC(DW_DMA_FC_D_P2M); 905 921 906 - data_width = dwc_get_data_width(chan, DST_MASTER); 922 + data_width = dw->data_width[dwc->dst_master]; 907 923 908 924 for_each_sg(sgl, sg, sg_len, i) { 909 925 struct dw_desc *desc; ··· 985 1001 *maxburst = 0; 986 1002 } 987 1003 988 - static inline void convert_slave_id(struct dw_dma_chan *dwc) 989 - { 990 - struct dw_dma *dw = to_dw_dma(dwc->chan.device); 991 - 992 - dwc->dma_sconfig.slave_id -= dw->request_line_base; 993 - } 994 - 995 1004 static int 996 1005 set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) 997 1006 { ··· 997 1020 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); 998 1021 dwc->direction = sconfig->direction; 999 1022 1023 + /* Take the request line from slave_id member */ 1024 + if (dwc->request_line == ~0) 1025 + dwc->request_line = sconfig->slave_id; 1026 + 1000 1027 convert_burst(&dwc->dma_sconfig.src_maxburst); 1001 1028 convert_burst(&dwc->dma_sconfig.dst_maxburst); 1002 - convert_slave_id(dwc); 1003 1029 1004 1030 return 0; 1005 1031 } ··· 1010 1030 static inline void dwc_chan_pause(struct dw_dma_chan *dwc) 1011 1031 { 1012 1032 u32 cfglo = channel_readl(dwc, CFG_LO); 1033 + unsigned int count = 20; /* timeout iterations */ 1013 1034 1014 1035 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); 1015 - while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) 1016 - cpu_relax(); 1036 + while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) 1037 + udelay(2); 1017 1038 1018 1039 dwc->paused = true; 1019 1040 } ··· 1150 1169 * doesn't mean what you think it means), and status writeback. 1151 1170 */ 1152 1171 1172 + dwc_set_masters(dwc); 1173 + 1153 1174 spin_lock_irqsave(&dwc->lock, flags); 1154 1175 i = dwc->descs_allocated; 1155 1176 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { ··· 1209 1226 list_splice_init(&dwc->free_list, &list); 1210 1227 dwc->descs_allocated = 0; 1211 1228 dwc->initialized = false; 1229 + dwc->request_line = ~0; 1212 1230 1213 1231 /* Disable interrupts */ 1214 1232 channel_clear_bit(dw, MASK.XFER, dwc->mask); ··· 1225 1241 dev_vdbg(chan2dev(chan), "%s: done\n", __func__); 1226 1242 } 1227 1243 1228 - struct dw_dma_filter_args { 1244 + /*----------------------------------------------------------------------*/ 1245 + 1246 + struct dw_dma_of_filter_args { 1229 1247 struct dw_dma *dw; 1230 1248 unsigned int req; 1231 1249 unsigned int src; 1232 1250 unsigned int dst; 1233 1251 }; 1234 1252 1235 - static bool dw_dma_generic_filter(struct dma_chan *chan, void *param) 1253 + static bool dw_dma_of_filter(struct dma_chan *chan, void *param) 1236 1254 { 1237 1255 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1238 - struct dw_dma *dw = to_dw_dma(chan->device); 1239 - struct dw_dma_filter_args *fargs = param; 1240 - struct dw_dma_slave *dws = &dwc->slave; 1256 + struct dw_dma_of_filter_args *fargs = param; 1241 1257 1242 - /* ensure the device matches our channel */ 1258 + /* Ensure the device matches our channel */ 1243 1259 if (chan->device != &fargs->dw->dma) 1244 1260 return false; 1245 1261 1246 - dws->dma_dev = dw->dma.dev; 1247 - dws->cfg_hi = ~0; 1248 - dws->cfg_lo = ~0; 1249 - dws->src_master = fargs->src; 1250 - dws->dst_master = fargs->dst; 1251 - 1252 1262 dwc->request_line = fargs->req; 1253 - 1254 - chan->private = dws; 1263 + dwc->src_master = fargs->src; 1264 + dwc->dst_master = fargs->dst; 1255 1265 1256 1266 return true; 1257 1267 } 1258 1268 1259 - static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec, 1260 - struct of_dma *ofdma) 1269 + static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, 1270 + struct of_dma *ofdma) 1261 1271 { 1262 1272 struct dw_dma *dw = ofdma->of_dma_data; 1263 - struct dw_dma_filter_args fargs = { 1273 + struct dw_dma_of_filter_args fargs = { 1264 1274 .dw = dw, 1265 1275 }; 1266 1276 dma_cap_mask_t cap; ··· 1275 1297 dma_cap_set(DMA_SLAVE, cap); 1276 1298 1277 1299 /* TODO: there should be a simpler way to do this */ 1278 - return dma_request_channel(cap, dw_dma_generic_filter, &fargs); 1300 + return dma_request_channel(cap, dw_dma_of_filter, &fargs); 1279 1301 } 1302 + 1303 + #ifdef CONFIG_ACPI 1304 + static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) 1305 + { 1306 + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1307 + struct acpi_dma_spec *dma_spec = param; 1308 + 1309 + if (chan->device->dev != dma_spec->dev || 1310 + chan->chan_id != dma_spec->chan_id) 1311 + return false; 1312 + 1313 + dwc->request_line = dma_spec->slave_id; 1314 + dwc->src_master = dwc_get_sms(NULL); 1315 + dwc->dst_master = dwc_get_dms(NULL); 1316 + 1317 + return true; 1318 + } 1319 + 1320 + static void dw_dma_acpi_controller_register(struct dw_dma *dw) 1321 + { 1322 + struct device *dev = dw->dma.dev; 1323 + struct acpi_dma_filter_info *info; 1324 + int ret; 1325 + 1326 + info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); 1327 + if (!info) 1328 + return; 1329 + 1330 + dma_cap_zero(info->dma_cap); 1331 + dma_cap_set(DMA_SLAVE, info->dma_cap); 1332 + info->filter_fn = dw_dma_acpi_filter; 1333 + 1334 + ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate, 1335 + info); 1336 + if (ret) 1337 + dev_err(dev, "could not register acpi_dma_controller\n"); 1338 + } 1339 + #else /* !CONFIG_ACPI */ 1340 + static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {} 1341 + #endif /* !CONFIG_ACPI */ 1280 1342 1281 1343 /* --------------------- Cyclic DMA API extensions -------------------- */ 1282 1344 ··· 1340 1322 1341 1323 spin_lock_irqsave(&dwc->lock, flags); 1342 1324 1343 - /* assert channel is idle */ 1325 + /* Assert channel is idle */ 1344 1326 if (dma_readl(dw, CH_EN) & dwc->mask) { 1345 1327 dev_err(chan2dev(&dwc->chan), 1346 1328 "BUG: Attempted to start non-idle channel\n"); ··· 1352 1334 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1353 1335 dma_writel(dw, CLEAR.XFER, dwc->mask); 1354 1336 1355 - /* setup DMAC channel registers */ 1337 + /* Setup DMAC channel registers */ 1356 1338 channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); 1357 1339 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); 1358 1340 channel_writel(dwc, CTL_HI, 0); ··· 1519 1501 last = desc; 1520 1502 } 1521 1503 1522 - /* lets make a cyclic list */ 1504 + /* Let's make a cyclic list */ 1523 1505 last->lli.llp = cdesc->desc[0]->txd.phys; 1524 1506 1525 1507 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " ··· 1654 1636 1655 1637 static int dw_probe(struct platform_device *pdev) 1656 1638 { 1657 - const struct platform_device_id *match; 1658 1639 struct dw_dma_platform_data *pdata; 1659 1640 struct resource *io; 1660 1641 struct dw_dma *dw; ··· 1723 1706 1724 1707 dw->regs = regs; 1725 1708 1726 - /* get hardware configuration parameters */ 1709 + /* Get hardware configuration parameters */ 1727 1710 if (autocfg) { 1728 1711 max_blk_size = dma_readl(dw, MAX_BLK_SIZE); 1729 1712 ··· 1737 1720 memcpy(dw->data_width, pdata->data_width, 4); 1738 1721 } 1739 1722 1740 - /* Get the base request line if set */ 1741 - match = platform_get_device_id(pdev); 1742 - if (match) 1743 - dw->request_line_base = (unsigned int)match->driver_data; 1744 - 1745 1723 /* Calculate all channel mask before DMA setup */ 1746 1724 dw->all_chan_mask = (1 << nr_channels) - 1; 1747 1725 1748 - /* force dma off, just in case */ 1726 + /* Force dma off, just in case */ 1749 1727 dw_dma_off(dw); 1750 1728 1751 - /* disable BLOCK interrupts as well */ 1729 + /* Disable BLOCK interrupts as well */ 1752 1730 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 1753 1731 1754 1732 err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0, ··· 1753 1741 1754 1742 platform_set_drvdata(pdev, dw); 1755 1743 1756 - /* create a pool of consistent memory blocks for hardware descriptors */ 1744 + /* Create a pool of consistent memory blocks for hardware descriptors */ 1757 1745 dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev, 1758 1746 sizeof(struct dw_desc), 4, 0); 1759 1747 if (!dw->desc_pool) { ··· 1793 1781 channel_clear_bit(dw, CH_EN, dwc->mask); 1794 1782 1795 1783 dwc->direction = DMA_TRANS_NONE; 1784 + dwc->request_line = ~0; 1796 1785 1797 - /* hardware configuration */ 1786 + /* Hardware configuration */ 1798 1787 if (autocfg) { 1799 1788 unsigned int dwc_params; 1800 1789 ··· 1855 1842 1856 1843 if (pdev->dev.of_node) { 1857 1844 err = of_dma_controller_register(pdev->dev.of_node, 1858 - dw_dma_xlate, dw); 1859 - if (err && err != -ENODEV) 1845 + dw_dma_of_xlate, dw); 1846 + if (err) 1860 1847 dev_err(&pdev->dev, 1861 1848 "could not register of_dma_controller\n"); 1862 1849 } 1850 + 1851 + if (ACPI_HANDLE(&pdev->dev)) 1852 + dw_dma_acpi_controller_register(dw); 1863 1853 1864 1854 return 0; 1865 1855 } ··· 1928 1912 }; 1929 1913 1930 1914 #ifdef CONFIG_OF 1931 - static const struct of_device_id dw_dma_id_table[] = { 1915 + static const struct of_device_id dw_dma_of_id_table[] = { 1932 1916 { .compatible = "snps,dma-spear1340" }, 1933 1917 {} 1934 1918 }; 1935 - MODULE_DEVICE_TABLE(of, dw_dma_id_table); 1919 + MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); 1936 1920 #endif 1937 1921 1938 - static const struct platform_device_id dw_dma_ids[] = { 1939 - /* Name, Request Line Base */ 1940 - { "INTL9C60", (kernel_ulong_t)16 }, 1922 + #ifdef CONFIG_ACPI 1923 + static const struct acpi_device_id dw_dma_acpi_id_table[] = { 1924 + { "INTL9C60", 0 }, 1941 1925 { } 1942 1926 }; 1927 + #endif 1943 1928 1944 1929 static struct platform_driver dw_driver = { 1945 1930 .probe = dw_probe, ··· 1949 1932 .driver = { 1950 1933 .name = "dw_dmac", 1951 1934 .pm = &dw_dev_pm_ops, 1952 - .of_match_table = of_match_ptr(dw_dma_id_table), 1935 + .of_match_table = of_match_ptr(dw_dma_of_id_table), 1936 + .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), 1953 1937 }, 1954 - .id_table = dw_dma_ids, 1955 1938 }; 1956 1939 1957 1940 static int __init dw_init(void)
+4 -2
drivers/dma/dw_dmac_regs.h
··· 212 212 /* hardware configuration */ 213 213 unsigned int block_size; 214 214 bool nollp; 215 + 216 + /* custom slave configuration */ 215 217 unsigned int request_line; 216 - struct dw_dma_slave slave; 218 + unsigned char src_master; 219 + unsigned char dst_master; 217 220 218 221 /* configuration passed via DMA_SLAVE_CONFIG */ 219 222 struct dma_slave_config dma_sconfig; ··· 250 247 /* hardware configuration */ 251 248 unsigned char nr_masters; 252 249 unsigned char data_width[4]; 253 - unsigned int request_line_base; 254 250 255 251 struct dw_dma_chan chan[0]; 256 252 };
+3 -4
drivers/dma/imx-dma.c
··· 859 859 860 860 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); 861 861 862 - if (imxdmac->sg_list) 863 - kfree(imxdmac->sg_list); 862 + kfree(imxdmac->sg_list); 864 863 865 864 imxdmac->sg_list = kcalloc(periods + 1, 866 865 sizeof(struct scatterlist), GFP_KERNEL); ··· 1144 1145 return ret; 1145 1146 } 1146 1147 1147 - static int __exit imxdma_remove(struct platform_device *pdev) 1148 + static int imxdma_remove(struct platform_device *pdev) 1148 1149 { 1149 1150 struct imxdma_engine *imxdma = platform_get_drvdata(pdev); 1150 1151 ··· 1161 1162 .name = "imx-dma", 1162 1163 }, 1163 1164 .id_table = imx_dma_devtype, 1164 - .remove = __exit_p(imxdma_remove), 1165 + .remove = imxdma_remove, 1165 1166 }; 1166 1167 1167 1168 static int __init imxdma_module_init(void)
+2 -2
drivers/dma/imx-sdma.c
··· 1462 1462 return ret; 1463 1463 } 1464 1464 1465 - static int __exit sdma_remove(struct platform_device *pdev) 1465 + static int sdma_remove(struct platform_device *pdev) 1466 1466 { 1467 1467 return -EBUSY; 1468 1468 } ··· 1473 1473 .of_match_table = sdma_dt_ids, 1474 1474 }, 1475 1475 .id_table = sdma_devtypes, 1476 - .remove = __exit_p(sdma_remove), 1476 + .remove = sdma_remove, 1477 1477 }; 1478 1478 1479 1479 static int __init sdma_module_init(void)
+7 -1
drivers/dma/ioat/dma.c
··· 892 892 * ioat_dma_setup_interrupts - setup interrupt handler 893 893 * @device: ioat device 894 894 */ 895 - static int ioat_dma_setup_interrupts(struct ioatdma_device *device) 895 + int ioat_dma_setup_interrupts(struct ioatdma_device *device) 896 896 { 897 897 struct ioat_chan_common *chan; 898 898 struct pci_dev *pdev = device->pdev; ··· 941 941 } 942 942 } 943 943 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; 944 + device->irq_mode = IOAT_MSIX; 944 945 goto done; 945 946 946 947 msix_single_vector: ··· 957 956 pci_disable_msix(pdev); 958 957 goto msi; 959 958 } 959 + device->irq_mode = IOAT_MSIX_SINGLE; 960 960 goto done; 961 961 962 962 msi: ··· 971 969 pci_disable_msi(pdev); 972 970 goto intx; 973 971 } 972 + device->irq_mode = IOAT_MSIX; 974 973 goto done; 975 974 976 975 intx: ··· 980 977 if (err) 981 978 goto err_no_irq; 982 979 980 + device->irq_mode = IOAT_INTX; 983 981 done: 984 982 if (device->intr_quirk) 985 983 device->intr_quirk(device); ··· 991 987 err_no_irq: 992 988 /* Disable all interrupt generation */ 993 989 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); 990 + device->irq_mode = IOAT_NOIRQ; 994 991 dev_err(dev, "no usable interrupts\n"); 995 992 return err; 996 993 } 994 + EXPORT_SYMBOL(ioat_dma_setup_interrupts); 997 995 998 996 static void ioat_disable_interrupts(struct ioatdma_device *device) 999 997 {
+51 -2
drivers/dma/ioat/dma.h
··· 39 39 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) 40 40 #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd) 41 41 #define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev) 42 + #define to_pdev(ioat_chan) ((ioat_chan)->device->pdev) 42 43 43 44 #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) 44 45 ··· 48 47 * (channel returns error when size is 0) 49 48 */ 50 49 #define NULL_DESC_BUFFER_SIZE 1 50 + 51 + enum ioat_irq_mode { 52 + IOAT_NOIRQ = 0, 53 + IOAT_MSIX, 54 + IOAT_MSIX_SINGLE, 55 + IOAT_MSI, 56 + IOAT_INTX 57 + }; 51 58 52 59 /** 53 60 * struct ioatdma_device - internal representation of a IOAT device ··· 81 72 void __iomem *reg_base; 82 73 struct pci_pool *dma_pool; 83 74 struct pci_pool *completion_pool; 75 + #define MAX_SED_POOLS 5 76 + struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; 77 + struct kmem_cache *sed_pool; 84 78 struct dma_device common; 85 79 u8 version; 86 80 struct msix_entry msix_entries[4]; 87 81 struct ioat_chan_common *idx[4]; 88 82 struct dca_provider *dca; 83 + enum ioat_irq_mode irq_mode; 84 + u32 cap; 89 85 void (*intr_quirk)(struct ioatdma_device *device); 90 86 int (*enumerate_channels)(struct ioatdma_device *device); 91 87 int (*reset_hw)(struct ioat_chan_common *chan); ··· 145 131 u16 active; 146 132 }; 147 133 134 + /** 135 + * struct ioat_sed_ent - wrapper around super extended hardware descriptor 136 + * @hw: hardware SED 137 + * @sed_dma: dma address for the SED 138 + * @list: list member 139 + * @parent: point to the dma descriptor that's the parent 140 + */ 141 + struct ioat_sed_ent { 142 + struct ioat_sed_raw_descriptor *hw; 143 + dma_addr_t dma; 144 + struct ioat_ring_ent *parent; 145 + unsigned int hw_pool; 146 + }; 147 + 148 148 static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c) 149 149 { 150 150 return container_of(c, struct ioat_chan_common, common); ··· 207 179 struct device *dev = to_dev(chan); 208 180 209 181 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x" 210 - " ctl: %#x (op: %d int_en: %d compl: %d)\n", id, 182 + " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id, 211 183 (unsigned long long) tx->phys, 212 184 (unsigned long long) hw->next, tx->cookie, tx->flags, 213 185 hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write); ··· 229 201 return device->idx[index]; 230 202 } 231 203 232 - static inline u64 ioat_chansts(struct ioat_chan_common *chan) 204 + static inline u64 ioat_chansts_32(struct ioat_chan_common *chan) 233 205 { 234 206 u8 ver = chan->device->version; 235 207 u64 status; ··· 245 217 246 218 return status; 247 219 } 220 + 221 + #if BITS_PER_LONG == 64 222 + 223 + static inline u64 ioat_chansts(struct ioat_chan_common *chan) 224 + { 225 + u8 ver = chan->device->version; 226 + u64 status; 227 + 228 + /* With IOAT v3.3 the status register is 64bit. */ 229 + if (ver >= IOAT_VER_3_3) 230 + status = readq(chan->reg_base + IOAT_CHANSTS_OFFSET(ver)); 231 + else 232 + status = ioat_chansts_32(chan); 233 + 234 + return status; 235 + } 236 + 237 + #else 238 + #define ioat_chansts ioat_chansts_32 239 + #endif 248 240 249 241 static inline void ioat_start(struct ioat_chan_common *chan) 250 242 { ··· 369 321 dma_addr_t *phys_complete); 370 322 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); 371 323 void ioat_kobject_del(struct ioatdma_device *device); 324 + int ioat_dma_setup_interrupts(struct ioatdma_device *device); 372 325 extern const struct sysfs_ops ioat_sysfs_ops; 373 326 extern struct ioat_sysfs_entry ioat_version_attr; 374 327 extern struct ioat_sysfs_entry ioat_cap_attr;
+2
drivers/dma/ioat/dma_v2.h
··· 137 137 #ifdef DEBUG 138 138 int id; 139 139 #endif 140 + struct ioat_sed_ent *sed; 140 141 }; 141 142 142 143 static inline struct ioat_ring_ent * ··· 158 157 159 158 int ioat2_dma_probe(struct ioatdma_device *dev, int dca); 160 159 int ioat3_dma_probe(struct ioatdma_device *dev, int dca); 160 + void ioat3_dma_remove(struct ioatdma_device *dev); 161 161 struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); 162 162 struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); 163 163 int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs);
+791 -131
drivers/dma/ioat/dma_v3.c
··· 55 55 /* 56 56 * Support routines for v3+ hardware 57 57 */ 58 - 58 + #include <linux/module.h> 59 59 #include <linux/pci.h> 60 60 #include <linux/gfp.h> 61 61 #include <linux/dmaengine.h> ··· 70 70 /* ioat hardware assumes at least two sources for raid operations */ 71 71 #define src_cnt_to_sw(x) ((x) + 2) 72 72 #define src_cnt_to_hw(x) ((x) - 2) 73 + #define ndest_to_sw(x) ((x) + 1) 74 + #define ndest_to_hw(x) ((x) - 1) 75 + #define src16_cnt_to_sw(x) ((x) + 9) 76 + #define src16_cnt_to_hw(x) ((x) - 9) 73 77 74 78 /* provide a lookup table for setting the source address in the base or 75 79 * extended descriptor of an xor or pq descriptor ··· 81 77 static const u8 xor_idx_to_desc = 0xe0; 82 78 static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; 83 79 static const u8 pq_idx_to_desc = 0xf8; 80 + static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1, 81 + 2, 2, 2, 2, 2, 2, 2 }; 84 82 static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; 83 + static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, 84 + 0, 1, 2, 3, 4, 5, 6 }; 85 + 86 + /* 87 + * technically sources 1 and 2 do not require SED, but the op will have 88 + * at least 9 descriptors so that's irrelevant. 89 + */ 90 + static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 91 + 1, 1, 1, 1, 1, 1, 1 }; 92 + 93 + static void ioat3_eh(struct ioat2_dma_chan *ioat); 85 94 86 95 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) 87 96 { ··· 118 101 return raw->field[pq_idx_to_field[idx]]; 119 102 } 120 103 104 + static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx) 105 + { 106 + struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; 107 + 108 + return raw->field[pq16_idx_to_field[idx]]; 109 + } 110 + 121 111 static void pq_set_src(struct ioat_raw_descriptor *descs[2], 122 112 dma_addr_t addr, u32 offset, u8 coef, int idx) 123 113 { ··· 133 109 134 110 raw->field[pq_idx_to_field[idx]] = addr + offset; 135 111 pq->coef[idx] = coef; 112 + } 113 + 114 + static int sed_get_pq16_pool_idx(int src_cnt) 115 + { 116 + 117 + return pq16_idx_to_sed[src_cnt]; 118 + } 119 + 120 + static bool is_jf_ioat(struct pci_dev *pdev) 121 + { 122 + switch (pdev->device) { 123 + case PCI_DEVICE_ID_INTEL_IOAT_JSF0: 124 + case PCI_DEVICE_ID_INTEL_IOAT_JSF1: 125 + case PCI_DEVICE_ID_INTEL_IOAT_JSF2: 126 + case PCI_DEVICE_ID_INTEL_IOAT_JSF3: 127 + case PCI_DEVICE_ID_INTEL_IOAT_JSF4: 128 + case PCI_DEVICE_ID_INTEL_IOAT_JSF5: 129 + case PCI_DEVICE_ID_INTEL_IOAT_JSF6: 130 + case PCI_DEVICE_ID_INTEL_IOAT_JSF7: 131 + case PCI_DEVICE_ID_INTEL_IOAT_JSF8: 132 + case PCI_DEVICE_ID_INTEL_IOAT_JSF9: 133 + return true; 134 + default: 135 + return false; 136 + } 137 + } 138 + 139 + static bool is_snb_ioat(struct pci_dev *pdev) 140 + { 141 + switch (pdev->device) { 142 + case PCI_DEVICE_ID_INTEL_IOAT_SNB0: 143 + case PCI_DEVICE_ID_INTEL_IOAT_SNB1: 144 + case PCI_DEVICE_ID_INTEL_IOAT_SNB2: 145 + case PCI_DEVICE_ID_INTEL_IOAT_SNB3: 146 + case PCI_DEVICE_ID_INTEL_IOAT_SNB4: 147 + case PCI_DEVICE_ID_INTEL_IOAT_SNB5: 148 + case PCI_DEVICE_ID_INTEL_IOAT_SNB6: 149 + case PCI_DEVICE_ID_INTEL_IOAT_SNB7: 150 + case PCI_DEVICE_ID_INTEL_IOAT_SNB8: 151 + case PCI_DEVICE_ID_INTEL_IOAT_SNB9: 152 + return true; 153 + default: 154 + return false; 155 + } 156 + } 157 + 158 + static bool is_ivb_ioat(struct pci_dev *pdev) 159 + { 160 + switch (pdev->device) { 161 + case PCI_DEVICE_ID_INTEL_IOAT_IVB0: 162 + case PCI_DEVICE_ID_INTEL_IOAT_IVB1: 163 + case PCI_DEVICE_ID_INTEL_IOAT_IVB2: 164 + case PCI_DEVICE_ID_INTEL_IOAT_IVB3: 165 + case PCI_DEVICE_ID_INTEL_IOAT_IVB4: 166 + case PCI_DEVICE_ID_INTEL_IOAT_IVB5: 167 + case PCI_DEVICE_ID_INTEL_IOAT_IVB6: 168 + case PCI_DEVICE_ID_INTEL_IOAT_IVB7: 169 + case PCI_DEVICE_ID_INTEL_IOAT_IVB8: 170 + case PCI_DEVICE_ID_INTEL_IOAT_IVB9: 171 + return true; 172 + default: 173 + return false; 174 + } 175 + 176 + } 177 + 178 + static bool is_hsw_ioat(struct pci_dev *pdev) 179 + { 180 + switch (pdev->device) { 181 + case PCI_DEVICE_ID_INTEL_IOAT_HSW0: 182 + case PCI_DEVICE_ID_INTEL_IOAT_HSW1: 183 + case PCI_DEVICE_ID_INTEL_IOAT_HSW2: 184 + case PCI_DEVICE_ID_INTEL_IOAT_HSW3: 185 + case PCI_DEVICE_ID_INTEL_IOAT_HSW4: 186 + case PCI_DEVICE_ID_INTEL_IOAT_HSW5: 187 + case PCI_DEVICE_ID_INTEL_IOAT_HSW6: 188 + case PCI_DEVICE_ID_INTEL_IOAT_HSW7: 189 + case PCI_DEVICE_ID_INTEL_IOAT_HSW8: 190 + case PCI_DEVICE_ID_INTEL_IOAT_HSW9: 191 + return true; 192 + default: 193 + return false; 194 + } 195 + 196 + } 197 + 198 + static bool is_xeon_cb32(struct pci_dev *pdev) 199 + { 200 + return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || 201 + is_hsw_ioat(pdev); 202 + } 203 + 204 + static bool is_bwd_ioat(struct pci_dev *pdev) 205 + { 206 + switch (pdev->device) { 207 + case PCI_DEVICE_ID_INTEL_IOAT_BWD0: 208 + case PCI_DEVICE_ID_INTEL_IOAT_BWD1: 209 + case PCI_DEVICE_ID_INTEL_IOAT_BWD2: 210 + case PCI_DEVICE_ID_INTEL_IOAT_BWD3: 211 + return true; 212 + default: 213 + return false; 214 + } 215 + } 216 + 217 + static bool is_bwd_noraid(struct pci_dev *pdev) 218 + { 219 + switch (pdev->device) { 220 + case PCI_DEVICE_ID_INTEL_IOAT_BWD2: 221 + case PCI_DEVICE_ID_INTEL_IOAT_BWD3: 222 + return true; 223 + default: 224 + return false; 225 + } 226 + 227 + } 228 + 229 + static void pq16_set_src(struct ioat_raw_descriptor *desc[3], 230 + dma_addr_t addr, u32 offset, u8 coef, int idx) 231 + { 232 + struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0]; 233 + struct ioat_pq16a_descriptor *pq16 = 234 + (struct ioat_pq16a_descriptor *)desc[1]; 235 + struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; 236 + 237 + raw->field[pq16_idx_to_field[idx]] = addr + offset; 238 + 239 + if (idx < 8) 240 + pq->coef[idx] = coef; 241 + else 242 + pq16->coef[idx - 8] = coef; 243 + } 244 + 245 + static struct ioat_sed_ent * 246 + ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) 247 + { 248 + struct ioat_sed_ent *sed; 249 + gfp_t flags = __GFP_ZERO | GFP_ATOMIC; 250 + 251 + sed = kmem_cache_alloc(device->sed_pool, flags); 252 + if (!sed) 253 + return NULL; 254 + 255 + sed->hw_pool = hw_pool; 256 + sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], 257 + flags, &sed->dma); 258 + if (!sed->hw) { 259 + kmem_cache_free(device->sed_pool, sed); 260 + return NULL; 261 + } 262 + 263 + return sed; 264 + } 265 + 266 + static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed) 267 + { 268 + if (!sed) 269 + return; 270 + 271 + dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); 272 + kmem_cache_free(device->sed_pool, sed); 136 273 } 137 274 138 275 static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, ··· 408 223 } 409 224 break; 410 225 } 226 + case IOAT_OP_PQ_16S: 227 + case IOAT_OP_PQ_VAL_16S: { 228 + struct ioat_pq_descriptor *pq = desc->pq; 229 + int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); 230 + struct ioat_raw_descriptor *descs[4]; 231 + int i; 232 + 233 + /* in the 'continue' case don't unmap the dests as sources */ 234 + if (dmaf_p_disabled_continue(flags)) 235 + src_cnt--; 236 + else if (dmaf_continue(flags)) 237 + src_cnt -= 3; 238 + 239 + if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 240 + descs[0] = (struct ioat_raw_descriptor *)pq; 241 + descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw); 242 + descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]); 243 + for (i = 0; i < src_cnt; i++) { 244 + dma_addr_t src = pq16_get_src(descs, i); 245 + 246 + ioat_unmap(pdev, src - offset, len, 247 + PCI_DMA_TODEVICE, flags, 0); 248 + } 249 + 250 + /* the dests are sources in pq validate operations */ 251 + if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { 252 + if (!(flags & DMA_PREP_PQ_DISABLE_P)) 253 + ioat_unmap(pdev, pq->p_addr - offset, 254 + len, PCI_DMA_TODEVICE, 255 + flags, 0); 256 + if (!(flags & DMA_PREP_PQ_DISABLE_Q)) 257 + ioat_unmap(pdev, pq->q_addr - offset, 258 + len, PCI_DMA_TODEVICE, 259 + flags, 0); 260 + break; 261 + } 262 + } 263 + 264 + if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 265 + if (!(flags & DMA_PREP_PQ_DISABLE_P)) 266 + ioat_unmap(pdev, pq->p_addr - offset, len, 267 + PCI_DMA_BIDIRECTIONAL, flags, 1); 268 + if (!(flags & DMA_PREP_PQ_DISABLE_Q)) 269 + ioat_unmap(pdev, pq->q_addr - offset, len, 270 + PCI_DMA_BIDIRECTIONAL, flags, 1); 271 + } 272 + break; 273 + } 411 274 default: 412 275 dev_err(&pdev->dev, "%s: unknown op type: %#x\n", 413 276 __func__, desc->hw->ctl_f.op); ··· 483 250 return false; 484 251 } 485 252 253 + static u64 ioat3_get_current_completion(struct ioat_chan_common *chan) 254 + { 255 + u64 phys_complete; 256 + u64 completion; 257 + 258 + completion = *chan->completion; 259 + phys_complete = ioat_chansts_to_addr(completion); 260 + 261 + dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, 262 + (unsigned long long) phys_complete); 263 + 264 + return phys_complete; 265 + } 266 + 267 + static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan, 268 + u64 *phys_complete) 269 + { 270 + *phys_complete = ioat3_get_current_completion(chan); 271 + if (*phys_complete == chan->last_completion) 272 + return false; 273 + 274 + clear_bit(IOAT_COMPLETION_ACK, &chan->state); 275 + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 276 + 277 + return true; 278 + } 279 + 280 + static void 281 + desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc) 282 + { 283 + struct ioat_dma_descriptor *hw = desc->hw; 284 + 285 + switch (hw->ctl_f.op) { 286 + case IOAT_OP_PQ_VAL: 287 + case IOAT_OP_PQ_VAL_16S: 288 + { 289 + struct ioat_pq_descriptor *pq = desc->pq; 290 + 291 + /* check if there's error written */ 292 + if (!pq->dwbes_f.wbes) 293 + return; 294 + 295 + /* need to set a chanerr var for checking to clear later */ 296 + 297 + if (pq->dwbes_f.p_val_err) 298 + *desc->result |= SUM_CHECK_P_RESULT; 299 + 300 + if (pq->dwbes_f.q_val_err) 301 + *desc->result |= SUM_CHECK_Q_RESULT; 302 + 303 + return; 304 + } 305 + default: 306 + return; 307 + } 308 + } 309 + 486 310 /** 487 311 * __cleanup - reclaim used descriptors 488 312 * @ioat: channel (ring) to clean ··· 550 260 static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) 551 261 { 552 262 struct ioat_chan_common *chan = &ioat->base; 263 + struct ioatdma_device *device = chan->device; 553 264 struct ioat_ring_ent *desc; 554 265 bool seen_current = false; 555 266 int idx = ioat->tail, i; ··· 558 267 559 268 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", 560 269 __func__, ioat->head, ioat->tail, ioat->issued); 270 + 271 + /* 272 + * At restart of the channel, the completion address and the 273 + * channel status will be 0 due to starting a new chain. Since 274 + * it's new chain and the first descriptor "fails", there is 275 + * nothing to clean up. We do not want to reap the entire submitted 276 + * chain due to this 0 address value and then BUG. 277 + */ 278 + if (!phys_complete) 279 + return; 561 280 562 281 active = ioat2_ring_active(ioat); 563 282 for (i = 0; i < active && !seen_current; i++) { ··· 577 276 prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); 578 277 desc = ioat2_get_ring_ent(ioat, idx + i); 579 278 dump_desc_dbg(ioat, desc); 279 + 280 + /* set err stat if we are using dwbes */ 281 + if (device->cap & IOAT_CAP_DWBES) 282 + desc_get_errstat(ioat, desc); 283 + 580 284 tx = &desc->txd; 581 285 if (tx->cookie) { 582 286 dma_cookie_complete(tx); ··· 599 293 if (desc_has_ext(desc)) { 600 294 BUG_ON(i + 1 >= active); 601 295 i++; 296 + } 297 + 298 + /* cleanup super extended descriptors */ 299 + if (desc->sed) { 300 + ioat3_free_sed(device, desc->sed); 301 + desc->sed = NULL; 602 302 } 603 303 } 604 304 smp_mb(); /* finish all descriptor reads before incrementing tail */ ··· 626 314 static void ioat3_cleanup(struct ioat2_dma_chan *ioat) 627 315 { 628 316 struct ioat_chan_common *chan = &ioat->base; 629 - dma_addr_t phys_complete; 317 + u64 phys_complete; 630 318 631 319 spin_lock_bh(&chan->cleanup_lock); 632 - if (ioat_cleanup_preamble(chan, &phys_complete)) 320 + 321 + if (ioat3_cleanup_preamble(chan, &phys_complete)) 633 322 __cleanup(ioat, phys_complete); 323 + 324 + if (is_ioat_halted(*chan->completion)) { 325 + u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 326 + 327 + if (chanerr & IOAT_CHANERR_HANDLE_MASK) { 328 + mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 329 + ioat3_eh(ioat); 330 + } 331 + } 332 + 634 333 spin_unlock_bh(&chan->cleanup_lock); 635 334 } 636 335 ··· 656 333 static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) 657 334 { 658 335 struct ioat_chan_common *chan = &ioat->base; 659 - dma_addr_t phys_complete; 336 + u64 phys_complete; 660 337 661 338 ioat2_quiesce(chan, 0); 662 - if (ioat_cleanup_preamble(chan, &phys_complete)) 339 + if (ioat3_cleanup_preamble(chan, &phys_complete)) 663 340 __cleanup(ioat, phys_complete); 664 341 665 342 __ioat2_restart_chan(ioat); 343 + } 344 + 345 + static void ioat3_eh(struct ioat2_dma_chan *ioat) 346 + { 347 + struct ioat_chan_common *chan = &ioat->base; 348 + struct pci_dev *pdev = to_pdev(chan); 349 + struct ioat_dma_descriptor *hw; 350 + u64 phys_complete; 351 + struct ioat_ring_ent *desc; 352 + u32 err_handled = 0; 353 + u32 chanerr_int; 354 + u32 chanerr; 355 + 356 + /* cleanup so tail points to descriptor that caused the error */ 357 + if (ioat3_cleanup_preamble(chan, &phys_complete)) 358 + __cleanup(ioat, phys_complete); 359 + 360 + chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 361 + pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int); 362 + 363 + dev_dbg(to_dev(chan), "%s: error = %x:%x\n", 364 + __func__, chanerr, chanerr_int); 365 + 366 + desc = ioat2_get_ring_ent(ioat, ioat->tail); 367 + hw = desc->hw; 368 + dump_desc_dbg(ioat, desc); 369 + 370 + switch (hw->ctl_f.op) { 371 + case IOAT_OP_XOR_VAL: 372 + if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { 373 + *desc->result |= SUM_CHECK_P_RESULT; 374 + err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; 375 + } 376 + break; 377 + case IOAT_OP_PQ_VAL: 378 + case IOAT_OP_PQ_VAL_16S: 379 + if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { 380 + *desc->result |= SUM_CHECK_P_RESULT; 381 + err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; 382 + } 383 + if (chanerr & IOAT_CHANERR_XOR_Q_ERR) { 384 + *desc->result |= SUM_CHECK_Q_RESULT; 385 + err_handled |= IOAT_CHANERR_XOR_Q_ERR; 386 + } 387 + break; 388 + } 389 + 390 + /* fault on unhandled error or spurious halt */ 391 + if (chanerr ^ err_handled || chanerr == 0) { 392 + dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n", 393 + __func__, chanerr, err_handled); 394 + BUG(); 395 + } 396 + 397 + writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); 398 + pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); 399 + 400 + /* mark faulting descriptor as complete */ 401 + *chan->completion = desc->txd.phys; 402 + 403 + spin_lock_bh(&ioat->prep_lock); 404 + ioat3_restart_channel(ioat); 405 + spin_unlock_bh(&ioat->prep_lock); 666 406 } 667 407 668 408 static void check_active(struct ioat2_dma_chan *ioat) ··· 991 605 int i; 992 606 993 607 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" 994 - " sz: %#x ctl: %#x (op: %d int: %d compl: %d pq: '%s%s' src_cnt: %d)\n", 608 + " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" 609 + " src_cnt: %d)\n", 995 610 desc_id(desc), (unsigned long long) desc->txd.phys, 996 611 (unsigned long long) (pq_ex ? pq_ex->next : pq->next), 997 612 desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en, ··· 1002 615 for (i = 0; i < src_cnt; i++) 1003 616 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, 1004 617 (unsigned long long) pq_get_src(descs, i), pq->coef[i]); 618 + dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); 619 + dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); 620 + dev_dbg(dev, "\tNEXT: %#llx\n", pq->next); 621 + } 622 + 623 + static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat, 624 + struct ioat_ring_ent *desc) 625 + { 626 + struct device *dev = to_dev(&ioat->base); 627 + struct ioat_pq_descriptor *pq = desc->pq; 628 + struct ioat_raw_descriptor *descs[] = { (void *)pq, 629 + (void *)pq, 630 + (void *)pq }; 631 + int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); 632 + int i; 633 + 634 + if (desc->sed) { 635 + descs[1] = (void *)desc->sed->hw; 636 + descs[2] = (void *)desc->sed->hw + 64; 637 + } 638 + 639 + dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" 640 + " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" 641 + " src_cnt: %d)\n", 642 + desc_id(desc), (unsigned long long) desc->txd.phys, 643 + (unsigned long long) pq->next, 644 + desc->txd.flags, pq->size, pq->ctl, 645 + pq->ctl_f.op, pq->ctl_f.int_en, 646 + pq->ctl_f.compl_write, 647 + pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", 648 + pq->ctl_f.src_cnt); 649 + for (i = 0; i < src_cnt; i++) { 650 + dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, 651 + (unsigned long long) pq16_get_src(descs, i), 652 + pq->coef[i]); 653 + } 1005 654 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); 1006 655 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); 1007 656 } ··· 1050 627 { 1051 628 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 1052 629 struct ioat_chan_common *chan = &ioat->base; 630 + struct ioatdma_device *device = chan->device; 1053 631 struct ioat_ring_ent *compl_desc; 1054 632 struct ioat_ring_ent *desc; 1055 633 struct ioat_ring_ent *ext; ··· 1061 637 u32 offset = 0; 1062 638 u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; 1063 639 int i, s, idx, with_ext, num_descs; 640 + int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0; 1064 641 1065 642 dev_dbg(to_dev(chan), "%s\n", __func__); 1066 643 /* the engine requires at least two sources (we provide ··· 1087 662 * order. 1088 663 */ 1089 664 if (likely(num_descs) && 1090 - ioat2_check_space_lock(ioat, num_descs+1) == 0) 665 + ioat2_check_space_lock(ioat, num_descs + cb32) == 0) 1091 666 idx = ioat->head; 1092 667 else 1093 668 return NULL; ··· 1125 700 pq->q_addr = dst[1] + offset; 1126 701 pq->ctl = 0; 1127 702 pq->ctl_f.op = op; 703 + /* we turn on descriptor write back error status */ 704 + if (device->cap & IOAT_CAP_DWBES) 705 + pq->ctl_f.wb_en = result ? 1 : 0; 1128 706 pq->ctl_f.src_cnt = src_cnt_to_hw(s); 1129 707 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); 1130 708 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); ··· 1144 716 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); 1145 717 dump_pq_desc_dbg(ioat, desc, ext); 1146 718 1147 - /* completion descriptor carries interrupt bit */ 1148 - compl_desc = ioat2_get_ring_ent(ioat, idx + i); 1149 - compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; 1150 - hw = compl_desc->hw; 1151 - hw->ctl = 0; 1152 - hw->ctl_f.null = 1; 1153 - hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 1154 - hw->ctl_f.compl_write = 1; 1155 - hw->size = NULL_DESC_BUFFER_SIZE; 1156 - dump_desc_dbg(ioat, compl_desc); 719 + if (!cb32) { 720 + pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 721 + pq->ctl_f.compl_write = 1; 722 + compl_desc = desc; 723 + } else { 724 + /* completion descriptor carries interrupt bit */ 725 + compl_desc = ioat2_get_ring_ent(ioat, idx + i); 726 + compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; 727 + hw = compl_desc->hw; 728 + hw->ctl = 0; 729 + hw->ctl_f.null = 1; 730 + hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 731 + hw->ctl_f.compl_write = 1; 732 + hw->size = NULL_DESC_BUFFER_SIZE; 733 + dump_desc_dbg(ioat, compl_desc); 734 + } 735 + 1157 736 1158 737 /* we leave the channel locked to ensure in order submission */ 1159 738 return &compl_desc->txd; 739 + } 740 + 741 + static struct dma_async_tx_descriptor * 742 + __ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, 743 + const dma_addr_t *dst, const dma_addr_t *src, 744 + unsigned int src_cnt, const unsigned char *scf, 745 + size_t len, unsigned long flags) 746 + { 747 + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 748 + struct ioat_chan_common *chan = &ioat->base; 749 + struct ioatdma_device *device = chan->device; 750 + struct ioat_ring_ent *desc; 751 + size_t total_len = len; 752 + struct ioat_pq_descriptor *pq; 753 + u32 offset = 0; 754 + u8 op; 755 + int i, s, idx, num_descs; 756 + 757 + /* this function only handles src_cnt 9 - 16 */ 758 + BUG_ON(src_cnt < 9); 759 + 760 + /* this function is only called with 9-16 sources */ 761 + op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; 762 + 763 + dev_dbg(to_dev(chan), "%s\n", __func__); 764 + 765 + num_descs = ioat2_xferlen_to_descs(ioat, len); 766 + 767 + /* 768 + * 16 source pq is only available on cb3.3 and has no completion 769 + * write hw bug. 770 + */ 771 + if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0) 772 + idx = ioat->head; 773 + else 774 + return NULL; 775 + 776 + i = 0; 777 + 778 + do { 779 + struct ioat_raw_descriptor *descs[4]; 780 + size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); 781 + 782 + desc = ioat2_get_ring_ent(ioat, idx + i); 783 + pq = desc->pq; 784 + 785 + descs[0] = (struct ioat_raw_descriptor *) pq; 786 + 787 + desc->sed = ioat3_alloc_sed(device, 788 + sed_get_pq16_pool_idx(src_cnt)); 789 + if (!desc->sed) { 790 + dev_err(to_dev(chan), 791 + "%s: no free sed entries\n", __func__); 792 + return NULL; 793 + } 794 + 795 + pq->sed_addr = desc->sed->dma; 796 + desc->sed->parent = desc; 797 + 798 + descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw; 799 + descs[2] = (void *)descs[1] + 64; 800 + 801 + for (s = 0; s < src_cnt; s++) 802 + pq16_set_src(descs, src[s], offset, scf[s], s); 803 + 804 + /* see the comment for dma_maxpq in include/linux/dmaengine.h */ 805 + if (dmaf_p_disabled_continue(flags)) 806 + pq16_set_src(descs, dst[1], offset, 1, s++); 807 + else if (dmaf_continue(flags)) { 808 + pq16_set_src(descs, dst[0], offset, 0, s++); 809 + pq16_set_src(descs, dst[1], offset, 1, s++); 810 + pq16_set_src(descs, dst[1], offset, 0, s++); 811 + } 812 + 813 + pq->size = xfer_size; 814 + pq->p_addr = dst[0] + offset; 815 + pq->q_addr = dst[1] + offset; 816 + pq->ctl = 0; 817 + pq->ctl_f.op = op; 818 + pq->ctl_f.src_cnt = src16_cnt_to_hw(s); 819 + /* we turn on descriptor write back error status */ 820 + if (device->cap & IOAT_CAP_DWBES) 821 + pq->ctl_f.wb_en = result ? 1 : 0; 822 + pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); 823 + pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); 824 + 825 + len -= xfer_size; 826 + offset += xfer_size; 827 + } while (++i < num_descs); 828 + 829 + /* last pq descriptor carries the unmap parameters and fence bit */ 830 + desc->txd.flags = flags; 831 + desc->len = total_len; 832 + if (result) 833 + desc->result = result; 834 + pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); 835 + 836 + /* with cb3.3 we should be able to do completion w/o a null desc */ 837 + pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); 838 + pq->ctl_f.compl_write = 1; 839 + 840 + dump_pq16_desc_dbg(ioat, desc); 841 + 842 + /* we leave the channel locked to ensure in order submission */ 843 + return &desc->txd; 1160 844 } 1161 845 1162 846 static struct dma_async_tx_descriptor * ··· 1276 736 unsigned int src_cnt, const unsigned char *scf, size_t len, 1277 737 unsigned long flags) 1278 738 { 739 + struct dma_device *dma = chan->device; 740 + 1279 741 /* specify valid address for disabled result */ 1280 742 if (flags & DMA_PREP_PQ_DISABLE_P) 1281 743 dst[0] = dst[1]; ··· 1297 755 single_source_coef[0] = scf[0]; 1298 756 single_source_coef[1] = 0; 1299 757 1300 - return __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2, 1301 - single_source_coef, len, flags); 1302 - } else 1303 - return __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, scf, 1304 - len, flags); 758 + return (src_cnt > 8) && (dma->max_pq > 8) ? 759 + __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, 760 + 2, single_source_coef, len, 761 + flags) : 762 + __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2, 763 + single_source_coef, len, flags); 764 + 765 + } else { 766 + return (src_cnt > 8) && (dma->max_pq > 8) ? 767 + __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, 768 + scf, len, flags) : 769 + __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, 770 + scf, len, flags); 771 + } 1305 772 } 1306 773 1307 774 struct dma_async_tx_descriptor * ··· 1318 767 unsigned int src_cnt, const unsigned char *scf, size_t len, 1319 768 enum sum_check_flags *pqres, unsigned long flags) 1320 769 { 770 + struct dma_device *dma = chan->device; 771 + 1321 772 /* specify valid address for disabled result */ 1322 773 if (flags & DMA_PREP_PQ_DISABLE_P) 1323 774 pq[0] = pq[1]; ··· 1331 778 */ 1332 779 *pqres = 0; 1333 780 1334 - return __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, 1335 - flags); 781 + return (src_cnt > 8) && (dma->max_pq > 8) ? 782 + __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, 783 + flags) : 784 + __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, 785 + flags); 1336 786 } 1337 787 1338 788 static struct dma_async_tx_descriptor * 1339 789 ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, 1340 790 unsigned int src_cnt, size_t len, unsigned long flags) 1341 791 { 792 + struct dma_device *dma = chan->device; 1342 793 unsigned char scf[src_cnt]; 1343 794 dma_addr_t pq[2]; 1344 795 ··· 1351 794 flags |= DMA_PREP_PQ_DISABLE_Q; 1352 795 pq[1] = dst; /* specify valid address for disabled result */ 1353 796 1354 - return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, 1355 - flags); 797 + return (src_cnt > 8) && (dma->max_pq > 8) ? 798 + __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, 799 + flags) : 800 + __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, 801 + flags); 1356 802 } 1357 803 1358 804 struct dma_async_tx_descriptor * ··· 1363 803 unsigned int src_cnt, size_t len, 1364 804 enum sum_check_flags *result, unsigned long flags) 1365 805 { 806 + struct dma_device *dma = chan->device; 1366 807 unsigned char scf[src_cnt]; 1367 808 dma_addr_t pq[2]; 1368 809 ··· 1377 816 flags |= DMA_PREP_PQ_DISABLE_Q; 1378 817 pq[1] = pq[0]; /* specify valid address for disabled result */ 1379 818 1380 - return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf, 1381 - len, flags); 819 + 820 + return (src_cnt > 8) && (dma->max_pq > 8) ? 821 + __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, 822 + scf, len, flags) : 823 + __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, 824 + scf, len, flags); 1382 825 } 1383 826 1384 827 static struct dma_async_tx_descriptor * ··· 1732 1167 return 0; 1733 1168 } 1734 1169 1170 + static int ioat3_irq_reinit(struct ioatdma_device *device) 1171 + { 1172 + int msixcnt = device->common.chancnt; 1173 + struct pci_dev *pdev = device->pdev; 1174 + int i; 1175 + struct msix_entry *msix; 1176 + struct ioat_chan_common *chan; 1177 + int err = 0; 1178 + 1179 + switch (device->irq_mode) { 1180 + case IOAT_MSIX: 1181 + 1182 + for (i = 0; i < msixcnt; i++) { 1183 + msix = &device->msix_entries[i]; 1184 + chan = ioat_chan_by_index(device, i); 1185 + devm_free_irq(&pdev->dev, msix->vector, chan); 1186 + } 1187 + 1188 + pci_disable_msix(pdev); 1189 + break; 1190 + 1191 + case IOAT_MSIX_SINGLE: 1192 + msix = &device->msix_entries[0]; 1193 + chan = ioat_chan_by_index(device, 0); 1194 + devm_free_irq(&pdev->dev, msix->vector, chan); 1195 + pci_disable_msix(pdev); 1196 + break; 1197 + 1198 + case IOAT_MSI: 1199 + chan = ioat_chan_by_index(device, 0); 1200 + devm_free_irq(&pdev->dev, pdev->irq, chan); 1201 + pci_disable_msi(pdev); 1202 + break; 1203 + 1204 + case IOAT_INTX: 1205 + chan = ioat_chan_by_index(device, 0); 1206 + devm_free_irq(&pdev->dev, pdev->irq, chan); 1207 + break; 1208 + 1209 + default: 1210 + return 0; 1211 + } 1212 + 1213 + device->irq_mode = IOAT_NOIRQ; 1214 + 1215 + err = ioat_dma_setup_interrupts(device); 1216 + 1217 + return err; 1218 + } 1219 + 1735 1220 static int ioat3_reset_hw(struct ioat_chan_common *chan) 1736 1221 { 1737 1222 /* throw away whatever the channel was doing and get it ··· 1798 1183 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); 1799 1184 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); 1800 1185 1801 - /* clear any pending errors */ 1802 - err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); 1186 + if (device->version < IOAT_VER_3_3) { 1187 + /* clear any pending errors */ 1188 + err = pci_read_config_dword(pdev, 1189 + IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); 1190 + if (err) { 1191 + dev_err(&pdev->dev, 1192 + "channel error register unreachable\n"); 1193 + return err; 1194 + } 1195 + pci_write_config_dword(pdev, 1196 + IOAT_PCI_CHANERR_INT_OFFSET, chanerr); 1197 + 1198 + /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit 1199 + * (workaround for spurious config parity error after restart) 1200 + */ 1201 + pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); 1202 + if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { 1203 + pci_write_config_dword(pdev, 1204 + IOAT_PCI_DMAUNCERRSTS_OFFSET, 1205 + 0x10); 1206 + } 1207 + } 1208 + 1209 + err = ioat2_reset_sync(chan, msecs_to_jiffies(200)); 1803 1210 if (err) { 1804 - dev_err(&pdev->dev, "channel error register unreachable\n"); 1211 + dev_err(&pdev->dev, "Failed to reset!\n"); 1805 1212 return err; 1806 1213 } 1807 - pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr); 1808 1214 1809 - /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit 1810 - * (workaround for spurious config parity error after restart) 1215 + if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev)) 1216 + err = ioat3_irq_reinit(device); 1217 + 1218 + return err; 1219 + } 1220 + 1221 + static void ioat3_intr_quirk(struct ioatdma_device *device) 1222 + { 1223 + struct dma_device *dma; 1224 + struct dma_chan *c; 1225 + struct ioat_chan_common *chan; 1226 + u32 errmask; 1227 + 1228 + dma = &device->common; 1229 + 1230 + /* 1231 + * if we have descriptor write back error status, we mask the 1232 + * error interrupts 1811 1233 */ 1812 - pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); 1813 - if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) 1814 - pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); 1815 - 1816 - return ioat2_reset_sync(chan, msecs_to_jiffies(200)); 1817 - } 1818 - 1819 - static bool is_jf_ioat(struct pci_dev *pdev) 1820 - { 1821 - switch (pdev->device) { 1822 - case PCI_DEVICE_ID_INTEL_IOAT_JSF0: 1823 - case PCI_DEVICE_ID_INTEL_IOAT_JSF1: 1824 - case PCI_DEVICE_ID_INTEL_IOAT_JSF2: 1825 - case PCI_DEVICE_ID_INTEL_IOAT_JSF3: 1826 - case PCI_DEVICE_ID_INTEL_IOAT_JSF4: 1827 - case PCI_DEVICE_ID_INTEL_IOAT_JSF5: 1828 - case PCI_DEVICE_ID_INTEL_IOAT_JSF6: 1829 - case PCI_DEVICE_ID_INTEL_IOAT_JSF7: 1830 - case PCI_DEVICE_ID_INTEL_IOAT_JSF8: 1831 - case PCI_DEVICE_ID_INTEL_IOAT_JSF9: 1832 - return true; 1833 - default: 1834 - return false; 1234 + if (device->cap & IOAT_CAP_DWBES) { 1235 + list_for_each_entry(c, &dma->channels, device_node) { 1236 + chan = to_chan_common(c); 1237 + errmask = readl(chan->reg_base + 1238 + IOAT_CHANERR_MASK_OFFSET); 1239 + errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR | 1240 + IOAT_CHANERR_XOR_Q_ERR; 1241 + writel(errmask, chan->reg_base + 1242 + IOAT_CHANERR_MASK_OFFSET); 1243 + } 1835 1244 } 1836 - } 1837 - 1838 - static bool is_snb_ioat(struct pci_dev *pdev) 1839 - { 1840 - switch (pdev->device) { 1841 - case PCI_DEVICE_ID_INTEL_IOAT_SNB0: 1842 - case PCI_DEVICE_ID_INTEL_IOAT_SNB1: 1843 - case PCI_DEVICE_ID_INTEL_IOAT_SNB2: 1844 - case PCI_DEVICE_ID_INTEL_IOAT_SNB3: 1845 - case PCI_DEVICE_ID_INTEL_IOAT_SNB4: 1846 - case PCI_DEVICE_ID_INTEL_IOAT_SNB5: 1847 - case PCI_DEVICE_ID_INTEL_IOAT_SNB6: 1848 - case PCI_DEVICE_ID_INTEL_IOAT_SNB7: 1849 - case PCI_DEVICE_ID_INTEL_IOAT_SNB8: 1850 - case PCI_DEVICE_ID_INTEL_IOAT_SNB9: 1851 - return true; 1852 - default: 1853 - return false; 1854 - } 1855 - } 1856 - 1857 - static bool is_ivb_ioat(struct pci_dev *pdev) 1858 - { 1859 - switch (pdev->device) { 1860 - case PCI_DEVICE_ID_INTEL_IOAT_IVB0: 1861 - case PCI_DEVICE_ID_INTEL_IOAT_IVB1: 1862 - case PCI_DEVICE_ID_INTEL_IOAT_IVB2: 1863 - case PCI_DEVICE_ID_INTEL_IOAT_IVB3: 1864 - case PCI_DEVICE_ID_INTEL_IOAT_IVB4: 1865 - case PCI_DEVICE_ID_INTEL_IOAT_IVB5: 1866 - case PCI_DEVICE_ID_INTEL_IOAT_IVB6: 1867 - case PCI_DEVICE_ID_INTEL_IOAT_IVB7: 1868 - case PCI_DEVICE_ID_INTEL_IOAT_IVB8: 1869 - case PCI_DEVICE_ID_INTEL_IOAT_IVB9: 1870 - return true; 1871 - default: 1872 - return false; 1873 - } 1874 - 1875 1245 } 1876 1246 1877 1247 int ioat3_dma_probe(struct ioatdma_device *device, int dca) ··· 1868 1268 struct ioat_chan_common *chan; 1869 1269 bool is_raid_device = false; 1870 1270 int err; 1871 - u32 cap; 1872 1271 1873 1272 device->enumerate_channels = ioat2_enumerate_channels; 1874 1273 device->reset_hw = ioat3_reset_hw; 1875 1274 device->self_test = ioat3_dma_self_test; 1275 + device->intr_quirk = ioat3_intr_quirk; 1876 1276 dma = &device->common; 1877 1277 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; 1878 1278 dma->device_issue_pending = ioat2_issue_pending; 1879 1279 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; 1880 1280 dma->device_free_chan_resources = ioat2_free_chan_resources; 1881 1281 1882 - if (is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev)) 1282 + if (is_xeon_cb32(pdev)) 1883 1283 dma->copy_align = 6; 1884 1284 1885 1285 dma_cap_set(DMA_INTERRUPT, dma->cap_mask); 1886 1286 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; 1887 1287 1888 - cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); 1288 + device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); 1289 + 1290 + if (is_bwd_noraid(pdev)) 1291 + device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); 1889 1292 1890 1293 /* dca is incompatible with raid operations */ 1891 - if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) 1892 - cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); 1294 + if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) 1295 + device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); 1893 1296 1894 - if (cap & IOAT_CAP_XOR) { 1297 + if (device->cap & IOAT_CAP_XOR) { 1895 1298 is_raid_device = true; 1896 1299 dma->max_xor = 8; 1897 1300 dma->xor_align = 6; ··· 1905 1302 dma_cap_set(DMA_XOR_VAL, dma->cap_mask); 1906 1303 dma->device_prep_dma_xor_val = ioat3_prep_xor_val; 1907 1304 } 1908 - if (cap & IOAT_CAP_PQ) { 1305 + 1306 + if (device->cap & IOAT_CAP_PQ) { 1909 1307 is_raid_device = true; 1910 - dma_set_maxpq(dma, 8, 0); 1911 - dma->pq_align = 6; 1912 1308 1913 - dma_cap_set(DMA_PQ, dma->cap_mask); 1914 1309 dma->device_prep_dma_pq = ioat3_prep_pq; 1915 - 1916 - dma_cap_set(DMA_PQ_VAL, dma->cap_mask); 1917 1310 dma->device_prep_dma_pq_val = ioat3_prep_pq_val; 1311 + dma_cap_set(DMA_PQ, dma->cap_mask); 1312 + dma_cap_set(DMA_PQ_VAL, dma->cap_mask); 1918 1313 1919 - if (!(cap & IOAT_CAP_XOR)) { 1920 - dma->max_xor = 8; 1921 - dma->xor_align = 6; 1314 + if (device->cap & IOAT_CAP_RAID16SS) { 1315 + dma_set_maxpq(dma, 16, 0); 1316 + dma->pq_align = 0; 1317 + } else { 1318 + dma_set_maxpq(dma, 8, 0); 1319 + if (is_xeon_cb32(pdev)) 1320 + dma->pq_align = 6; 1321 + else 1322 + dma->pq_align = 0; 1323 + } 1922 1324 1923 - dma_cap_set(DMA_XOR, dma->cap_mask); 1325 + if (!(device->cap & IOAT_CAP_XOR)) { 1924 1326 dma->device_prep_dma_xor = ioat3_prep_pqxor; 1925 - 1926 - dma_cap_set(DMA_XOR_VAL, dma->cap_mask); 1927 1327 dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; 1328 + dma_cap_set(DMA_XOR, dma->cap_mask); 1329 + dma_cap_set(DMA_XOR_VAL, dma->cap_mask); 1330 + 1331 + if (device->cap & IOAT_CAP_RAID16SS) { 1332 + dma->max_xor = 16; 1333 + dma->xor_align = 0; 1334 + } else { 1335 + dma->max_xor = 8; 1336 + if (is_xeon_cb32(pdev)) 1337 + dma->xor_align = 6; 1338 + else 1339 + dma->xor_align = 0; 1340 + } 1928 1341 } 1929 1342 } 1930 - if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) { 1343 + 1344 + if (is_raid_device && (device->cap & IOAT_CAP_FILL_BLOCK)) { 1931 1345 dma_cap_set(DMA_MEMSET, dma->cap_mask); 1932 1346 dma->device_prep_dma_memset = ioat3_prep_memset_lock; 1933 1347 } 1934 1348 1935 1349 1936 - if (is_raid_device) { 1937 - dma->device_tx_status = ioat3_tx_status; 1938 - device->cleanup_fn = ioat3_cleanup_event; 1939 - device->timer_fn = ioat3_timer_event; 1940 - } else { 1941 - dma->device_tx_status = ioat_dma_tx_status; 1942 - device->cleanup_fn = ioat2_cleanup_event; 1943 - device->timer_fn = ioat2_timer_event; 1350 + dma->device_tx_status = ioat3_tx_status; 1351 + device->cleanup_fn = ioat3_cleanup_event; 1352 + device->timer_fn = ioat3_timer_event; 1353 + 1354 + if (is_xeon_cb32(pdev)) { 1355 + dma_cap_clear(DMA_XOR_VAL, dma->cap_mask); 1356 + dma->device_prep_dma_xor_val = NULL; 1357 + 1358 + dma_cap_clear(DMA_PQ_VAL, dma->cap_mask); 1359 + dma->device_prep_dma_pq_val = NULL; 1944 1360 } 1945 1361 1946 - #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA 1947 - dma_cap_clear(DMA_PQ_VAL, dma->cap_mask); 1948 - dma->device_prep_dma_pq_val = NULL; 1949 - #endif 1362 + /* starting with CB3.3 super extended descriptors are supported */ 1363 + if (device->cap & IOAT_CAP_RAID16SS) { 1364 + char pool_name[14]; 1365 + int i; 1950 1366 1951 - #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA 1952 - dma_cap_clear(DMA_XOR_VAL, dma->cap_mask); 1953 - dma->device_prep_dma_xor_val = NULL; 1954 - #endif 1367 + /* allocate sw descriptor pool for SED */ 1368 + device->sed_pool = kmem_cache_create("ioat_sed", 1369 + sizeof(struct ioat_sed_ent), 0, 0, NULL); 1370 + if (!device->sed_pool) 1371 + return -ENOMEM; 1372 + 1373 + for (i = 0; i < MAX_SED_POOLS; i++) { 1374 + snprintf(pool_name, 14, "ioat_hw%d_sed", i); 1375 + 1376 + /* allocate SED DMA pool */ 1377 + device->sed_hw_pool[i] = dma_pool_create(pool_name, 1378 + &pdev->dev, 1379 + SED_SIZE * (i + 1), 64, 0); 1380 + if (!device->sed_hw_pool[i]) 1381 + goto sed_pool_cleanup; 1382 + 1383 + } 1384 + } 1955 1385 1956 1386 err = ioat_probe(device); 1957 1387 if (err) ··· 2007 1371 device->dca = ioat3_dca_init(pdev, device->reg_base); 2008 1372 2009 1373 return 0; 1374 + 1375 + sed_pool_cleanup: 1376 + if (device->sed_pool) { 1377 + int i; 1378 + kmem_cache_destroy(device->sed_pool); 1379 + 1380 + for (i = 0; i < MAX_SED_POOLS; i++) 1381 + if (device->sed_hw_pool[i]) 1382 + dma_pool_destroy(device->sed_hw_pool[i]); 1383 + } 1384 + 1385 + return -ENOMEM; 1386 + } 1387 + 1388 + void ioat3_dma_remove(struct ioatdma_device *device) 1389 + { 1390 + if (device->sed_pool) { 1391 + int i; 1392 + kmem_cache_destroy(device->sed_pool); 1393 + 1394 + for (i = 0; i < MAX_SED_POOLS; i++) 1395 + if (device->sed_hw_pool[i]) 1396 + dma_pool_destroy(device->sed_hw_pool[i]); 1397 + } 2010 1398 }
+80 -8
drivers/dma/ioat/hw.h
··· 30 30 #define IOAT_PCI_DID_SCNB 0x65FF 31 31 #define IOAT_PCI_DID_SNB 0x402F 32 32 33 - #define IOAT_VER_1_2 0x12 /* Version 1.2 */ 34 - #define IOAT_VER_2_0 0x20 /* Version 2.0 */ 35 - #define IOAT_VER_3_0 0x30 /* Version 3.0 */ 36 - #define IOAT_VER_3_2 0x32 /* Version 3.2 */ 37 - 38 33 #define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20 39 34 #define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21 40 35 #define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22 ··· 40 45 #define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27 41 46 #define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e 42 47 #define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f 48 + 49 + #define PCI_DEVICE_ID_INTEL_IOAT_HSW0 0x2f20 50 + #define PCI_DEVICE_ID_INTEL_IOAT_HSW1 0x2f21 51 + #define PCI_DEVICE_ID_INTEL_IOAT_HSW2 0x2f22 52 + #define PCI_DEVICE_ID_INTEL_IOAT_HSW3 0x2f23 53 + #define PCI_DEVICE_ID_INTEL_IOAT_HSW4 0x2f24 54 + #define PCI_DEVICE_ID_INTEL_IOAT_HSW5 0x2f25 55 + #define PCI_DEVICE_ID_INTEL_IOAT_HSW6 0x2f26 56 + #define PCI_DEVICE_ID_INTEL_IOAT_HSW7 0x2f27 57 + #define PCI_DEVICE_ID_INTEL_IOAT_HSW8 0x2f2e 58 + #define PCI_DEVICE_ID_INTEL_IOAT_HSW9 0x2f2f 59 + 60 + #define PCI_DEVICE_ID_INTEL_IOAT_BWD0 0x0C50 61 + #define PCI_DEVICE_ID_INTEL_IOAT_BWD1 0x0C51 62 + #define PCI_DEVICE_ID_INTEL_IOAT_BWD2 0x0C52 63 + #define PCI_DEVICE_ID_INTEL_IOAT_BWD3 0x0C53 64 + 65 + #define IOAT_VER_1_2 0x12 /* Version 1.2 */ 66 + #define IOAT_VER_2_0 0x20 /* Version 2.0 */ 67 + #define IOAT_VER_3_0 0x30 /* Version 3.0 */ 68 + #define IOAT_VER_3_2 0x32 /* Version 3.2 */ 69 + #define IOAT_VER_3_3 0x33 /* Version 3.3 */ 70 + 43 71 44 72 int system_has_dca_enabled(struct pci_dev *pdev); 45 73 ··· 165 147 }; 166 148 167 149 struct ioat_pq_descriptor { 168 - uint32_t size; 150 + union { 151 + uint32_t size; 152 + uint32_t dwbes; 153 + struct { 154 + unsigned int rsvd:25; 155 + unsigned int p_val_err:1; 156 + unsigned int q_val_err:1; 157 + unsigned int rsvd1:4; 158 + unsigned int wbes:1; 159 + } dwbes_f; 160 + }; 169 161 union { 170 162 uint32_t ctl; 171 163 struct { ··· 190 162 unsigned int hint:1; 191 163 unsigned int p_disable:1; 192 164 unsigned int q_disable:1; 193 - unsigned int rsvd:11; 165 + unsigned int rsvd2:2; 166 + unsigned int wb_en:1; 167 + unsigned int prl_en:1; 168 + unsigned int rsvd3:7; 194 169 #define IOAT_OP_PQ 0x89 195 170 #define IOAT_OP_PQ_VAL 0x8a 171 + #define IOAT_OP_PQ_16S 0xa0 172 + #define IOAT_OP_PQ_VAL_16S 0xa1 196 173 unsigned int op:8; 197 174 } ctl_f; 198 175 }; ··· 205 172 uint64_t p_addr; 206 173 uint64_t next; 207 174 uint64_t src_addr2; 208 - uint64_t src_addr3; 175 + union { 176 + uint64_t src_addr3; 177 + uint64_t sed_addr; 178 + }; 209 179 uint8_t coef[8]; 210 180 uint64_t q_addr; 211 181 }; ··· 257 221 struct ioat_raw_descriptor { 258 222 uint64_t field[8]; 259 223 }; 224 + 225 + struct ioat_pq16a_descriptor { 226 + uint8_t coef[8]; 227 + uint64_t src_addr3; 228 + uint64_t src_addr4; 229 + uint64_t src_addr5; 230 + uint64_t src_addr6; 231 + uint64_t src_addr7; 232 + uint64_t src_addr8; 233 + uint64_t src_addr9; 234 + }; 235 + 236 + struct ioat_pq16b_descriptor { 237 + uint64_t src_addr10; 238 + uint64_t src_addr11; 239 + uint64_t src_addr12; 240 + uint64_t src_addr13; 241 + uint64_t src_addr14; 242 + uint64_t src_addr15; 243 + uint64_t src_addr16; 244 + uint64_t rsvd; 245 + }; 246 + 247 + union ioat_sed_pq_descriptor { 248 + struct ioat_pq16a_descriptor a; 249 + struct ioat_pq16b_descriptor b; 250 + }; 251 + 252 + #define SED_SIZE 64 253 + 254 + struct ioat_sed_raw_descriptor { 255 + uint64_t a[8]; 256 + uint64_t b[8]; 257 + uint64_t c[8]; 258 + }; 259 + 260 260 #endif
+20
drivers/dma/ioat/pci.c
··· 94 94 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) }, 95 95 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) }, 96 96 97 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) }, 98 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) }, 99 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) }, 100 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) }, 101 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) }, 102 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) }, 103 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) }, 104 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) }, 105 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) }, 106 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) }, 107 + 108 + /* I/OAT v3.3 platforms */ 109 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) }, 110 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) }, 111 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) }, 112 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) }, 113 + 97 114 { 0, } 98 115 }; 99 116 MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); ··· 206 189 207 190 if (!device) 208 191 return; 192 + 193 + if (device->version >= IOAT_VER_3_0) 194 + ioat3_dma_remove(device); 209 195 210 196 dev_err(&pdev->dev, "Removing dma and dca services\n"); 211 197 if (device->dca) {
+4
drivers/dma/ioat/registers.h
··· 79 79 #define IOAT_CAP_APIC 0x00000080 80 80 #define IOAT_CAP_XOR 0x00000100 81 81 #define IOAT_CAP_PQ 0x00000200 82 + #define IOAT_CAP_DWBES 0x00002000 83 + #define IOAT_CAP_RAID16SS 0x00020000 82 84 83 85 #define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ 84 86 ··· 95 93 #define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 96 94 #define IOAT_CHANCTRL_INT_REARM 0x0001 97 95 #define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\ 96 + IOAT_CHANCTRL_ERR_INT_EN |\ 97 + IOAT_CHANCTRL_ERR_COMPLETION_EN |\ 98 98 IOAT_CHANCTRL_ANY_ERR_ABORT_EN) 99 99 100 100 #define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */
+3 -3
drivers/dma/ipu/ipu_idmac.c
··· 1642 1642 return dma_async_device_register(&idmac->dma); 1643 1643 } 1644 1644 1645 - static void __exit ipu_idmac_exit(struct ipu *ipu) 1645 + static void ipu_idmac_exit(struct ipu *ipu) 1646 1646 { 1647 1647 int i; 1648 1648 struct idmac *idmac = &ipu->idmac; ··· 1756 1756 return ret; 1757 1757 } 1758 1758 1759 - static int __exit ipu_remove(struct platform_device *pdev) 1759 + static int ipu_remove(struct platform_device *pdev) 1760 1760 { 1761 1761 struct ipu *ipu = platform_get_drvdata(pdev); 1762 1762 ··· 1781 1781 .name = "ipu-core", 1782 1782 .owner = THIS_MODULE, 1783 1783 }, 1784 - .remove = __exit_p(ipu_remove), 1784 + .remove = ipu_remove, 1785 1785 }; 1786 1786 1787 1787 static int __init ipu_init(void)
+30 -66
drivers/dma/of-dma.c
··· 13 13 #include <linux/device.h> 14 14 #include <linux/err.h> 15 15 #include <linux/module.h> 16 - #include <linux/rculist.h> 16 + #include <linux/mutex.h> 17 17 #include <linux/slab.h> 18 18 #include <linux/of.h> 19 19 #include <linux/of_dma.h> 20 20 21 21 static LIST_HEAD(of_dma_list); 22 - static DEFINE_SPINLOCK(of_dma_lock); 22 + static DEFINE_MUTEX(of_dma_lock); 23 23 24 24 /** 25 - * of_dma_get_controller - Get a DMA controller in DT DMA helpers list 25 + * of_dma_find_controller - Get a DMA controller in DT DMA helpers list 26 26 * @dma_spec: pointer to DMA specifier as found in the device tree 27 27 * 28 28 * Finds a DMA controller with matching device node and number for dma cells 29 - * in a list of registered DMA controllers. If a match is found the use_count 30 - * variable is increased and a valid pointer to the DMA data stored is retuned. 31 - * A NULL pointer is returned if no match is found. 29 + * in a list of registered DMA controllers. If a match is found a valid pointer 30 + * to the DMA data stored is retuned. A NULL pointer is returned if no match is 31 + * found. 32 32 */ 33 - static struct of_dma *of_dma_get_controller(struct of_phandle_args *dma_spec) 33 + static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec) 34 34 { 35 35 struct of_dma *ofdma; 36 36 37 - spin_lock(&of_dma_lock); 38 - 39 - if (list_empty(&of_dma_list)) { 40 - spin_unlock(&of_dma_lock); 41 - return NULL; 42 - } 43 - 44 37 list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers) 45 38 if ((ofdma->of_node == dma_spec->np) && 46 - (ofdma->of_dma_nbcells == dma_spec->args_count)) { 47 - ofdma->use_count++; 48 - spin_unlock(&of_dma_lock); 39 + (ofdma->of_dma_nbcells == dma_spec->args_count)) 49 40 return ofdma; 50 - } 51 - 52 - spin_unlock(&of_dma_lock); 53 41 54 42 pr_debug("%s: can't find DMA controller %s\n", __func__, 55 43 dma_spec->np->full_name); 56 44 57 45 return NULL; 58 - } 59 - 60 - /** 61 - * of_dma_put_controller - Decrement use count for a registered DMA controller 62 - * @of_dma: pointer to DMA controller data 63 - * 64 - * Decrements the use_count variable in the DMA data structure. This function 65 - * should be called only when a valid pointer is returned from 66 - * of_dma_get_controller() and no further accesses to data referenced by that 67 - * pointer are needed. 68 - */ 69 - static void of_dma_put_controller(struct of_dma *ofdma) 70 - { 71 - spin_lock(&of_dma_lock); 72 - ofdma->use_count--; 73 - spin_unlock(&of_dma_lock); 74 46 } 75 47 76 48 /** ··· 65 93 { 66 94 struct of_dma *ofdma; 67 95 int nbcells; 96 + const __be32 *prop; 68 97 69 98 if (!np || !of_dma_xlate) { 70 99 pr_err("%s: not enough information provided\n", __func__); ··· 76 103 if (!ofdma) 77 104 return -ENOMEM; 78 105 79 - nbcells = be32_to_cpup(of_get_property(np, "#dma-cells", NULL)); 80 - if (!nbcells) { 106 + prop = of_get_property(np, "#dma-cells", NULL); 107 + if (prop) 108 + nbcells = be32_to_cpup(prop); 109 + 110 + if (!prop || !nbcells) { 81 111 pr_err("%s: #dma-cells property is missing or invalid\n", 82 112 __func__); 83 113 kfree(ofdma); ··· 91 115 ofdma->of_dma_nbcells = nbcells; 92 116 ofdma->of_dma_xlate = of_dma_xlate; 93 117 ofdma->of_dma_data = data; 94 - ofdma->use_count = 0; 95 118 96 119 /* Now queue of_dma controller structure in list */ 97 - spin_lock(&of_dma_lock); 120 + mutex_lock(&of_dma_lock); 98 121 list_add_tail(&ofdma->of_dma_controllers, &of_dma_list); 99 - spin_unlock(&of_dma_lock); 122 + mutex_unlock(&of_dma_lock); 100 123 101 124 return 0; 102 125 } ··· 107 132 * 108 133 * Memory allocated by of_dma_controller_register() is freed here. 109 134 */ 110 - int of_dma_controller_free(struct device_node *np) 135 + void of_dma_controller_free(struct device_node *np) 111 136 { 112 137 struct of_dma *ofdma; 113 138 114 - spin_lock(&of_dma_lock); 115 - 116 - if (list_empty(&of_dma_list)) { 117 - spin_unlock(&of_dma_lock); 118 - return -ENODEV; 119 - } 139 + mutex_lock(&of_dma_lock); 120 140 121 141 list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers) 122 142 if (ofdma->of_node == np) { 123 - if (ofdma->use_count) { 124 - spin_unlock(&of_dma_lock); 125 - return -EBUSY; 126 - } 127 - 128 143 list_del(&ofdma->of_dma_controllers); 129 - spin_unlock(&of_dma_lock); 130 144 kfree(ofdma); 131 - return 0; 145 + break; 132 146 } 133 147 134 - spin_unlock(&of_dma_lock); 135 - return -ENODEV; 148 + mutex_unlock(&of_dma_lock); 136 149 } 137 150 EXPORT_SYMBOL_GPL(of_dma_controller_free); 138 151 ··· 135 172 * specifiers, matches the name provided. Returns 0 if the name matches and 136 173 * a valid pointer to the DMA specifier is found. Otherwise returns -ENODEV. 137 174 */ 138 - static int of_dma_match_channel(struct device_node *np, char *name, int index, 139 - struct of_phandle_args *dma_spec) 175 + static int of_dma_match_channel(struct device_node *np, const char *name, 176 + int index, struct of_phandle_args *dma_spec) 140 177 { 141 178 const char *s; 142 179 ··· 161 198 * Returns pointer to appropriate dma channel on success or NULL on error. 162 199 */ 163 200 struct dma_chan *of_dma_request_slave_channel(struct device_node *np, 164 - char *name) 201 + const char *name) 165 202 { 166 203 struct of_phandle_args dma_spec; 167 204 struct of_dma *ofdma; ··· 183 220 if (of_dma_match_channel(np, name, i, &dma_spec)) 184 221 continue; 185 222 186 - ofdma = of_dma_get_controller(&dma_spec); 223 + mutex_lock(&of_dma_lock); 224 + ofdma = of_dma_find_controller(&dma_spec); 187 225 188 - if (!ofdma) 189 - continue; 226 + if (ofdma) 227 + chan = ofdma->of_dma_xlate(&dma_spec, ofdma); 228 + else 229 + chan = NULL; 190 230 191 - chan = ofdma->of_dma_xlate(&dma_spec, ofdma); 192 - 193 - of_dma_put_controller(ofdma); 231 + mutex_unlock(&of_dma_lock); 194 232 195 233 of_node_put(dma_spec.np); 196 234
+36 -2
drivers/dma/omap-dma.c
··· 16 16 #include <linux/platform_device.h> 17 17 #include <linux/slab.h> 18 18 #include <linux/spinlock.h> 19 + #include <linux/of_dma.h> 20 + #include <linux/of_device.h> 19 21 20 22 #include "virt-dma.h" 21 23 ··· 67 65 [OMAP_DMA_DATA_TYPE_S8] = 1, 68 66 [OMAP_DMA_DATA_TYPE_S16] = 2, 69 67 [OMAP_DMA_DATA_TYPE_S32] = 4, 68 + }; 69 + 70 + static struct of_dma_filter_info omap_dma_info = { 71 + .filter_fn = omap_dma_filter_fn, 70 72 }; 71 73 72 74 static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) ··· 635 629 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", 636 630 rc); 637 631 omap_dma_free(od); 638 - } else { 639 - platform_set_drvdata(pdev, od); 632 + return rc; 633 + } 634 + 635 + platform_set_drvdata(pdev, od); 636 + 637 + if (pdev->dev.of_node) { 638 + omap_dma_info.dma_cap = od->ddev.cap_mask; 639 + 640 + /* Device-tree DMA controller registration */ 641 + rc = of_dma_controller_register(pdev->dev.of_node, 642 + of_dma_simple_xlate, &omap_dma_info); 643 + if (rc) { 644 + pr_warn("OMAP-DMA: failed to register DMA controller\n"); 645 + dma_async_device_unregister(&od->ddev); 646 + omap_dma_free(od); 647 + } 640 648 } 641 649 642 650 dev_info(&pdev->dev, "OMAP DMA engine driver\n"); ··· 662 642 { 663 643 struct omap_dmadev *od = platform_get_drvdata(pdev); 664 644 645 + if (pdev->dev.of_node) 646 + of_dma_controller_free(pdev->dev.of_node); 647 + 665 648 dma_async_device_unregister(&od->ddev); 666 649 omap_dma_free(od); 667 650 668 651 return 0; 669 652 } 653 + 654 + static const struct of_device_id omap_dma_match[] = { 655 + { .compatible = "ti,omap2420-sdma", }, 656 + { .compatible = "ti,omap2430-sdma", }, 657 + { .compatible = "ti,omap3430-sdma", }, 658 + { .compatible = "ti,omap3630-sdma", }, 659 + { .compatible = "ti,omap4430-sdma", }, 660 + {}, 661 + }; 662 + MODULE_DEVICE_TABLE(of, omap_dma_match); 670 663 671 664 static struct platform_driver omap_dma_driver = { 672 665 .probe = omap_dma_probe, ··· 687 654 .driver = { 688 655 .name = "omap-dma-engine", 689 656 .owner = THIS_MODULE, 657 + .of_match_table = of_match_ptr(omap_dma_match), 690 658 }, 691 659 }; 692 660
+1 -1
drivers/dma/pch_dma.c
··· 476 476 dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); 477 477 478 478 if (!ret) { 479 - ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); 479 + ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC); 480 480 if (ret) { 481 481 spin_lock(&pd_chan->lock); 482 482 pd_chan->descs_allocated++;
+5 -5
drivers/dma/pl330.c
··· 26 26 #include <linux/scatterlist.h> 27 27 #include <linux/of.h> 28 28 #include <linux/of_dma.h> 29 + #include <linux/err.h> 29 30 30 31 #include "dmaengine.h" 31 32 #define PL330_MAX_CHAN 8 ··· 2289 2288 2290 2289 /* If already submitted */ 2291 2290 if (desc->status == BUSY) 2292 - break; 2291 + continue; 2293 2292 2294 2293 ret = pl330_submit_req(pch->pl330_chid, 2295 2294 &desc->req); 2296 2295 if (!ret) { 2297 2296 desc->status = BUSY; 2298 - break; 2299 2297 } else if (ret == -EAGAIN) { 2300 2298 /* QFull or DMAC Dying */ 2301 2299 break; ··· 2904 2904 pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; 2905 2905 2906 2906 res = &adev->res; 2907 - pi->base = devm_request_and_ioremap(&adev->dev, res); 2908 - if (!pi->base) 2909 - return -ENXIO; 2907 + pi->base = devm_ioremap_resource(&adev->dev, res); 2908 + if (IS_ERR(pi->base)) 2909 + return PTR_ERR(pi->base); 2910 2910 2911 2911 amba_set_drvdata(adev, pdmac); 2912 2912
+24
drivers/dma/sh/Kconfig
··· 1 + # 2 + # DMA engine configuration for sh 3 + # 4 + 5 + config SH_DMAE_BASE 6 + bool "Renesas SuperH DMA Engine support" 7 + depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) 8 + depends on !SH_DMA_API 9 + default y 10 + select DMA_ENGINE 11 + help 12 + Enable support for the Renesas SuperH DMA controllers. 13 + 14 + config SH_DMAE 15 + tristate "Renesas SuperH DMAC support" 16 + depends on SH_DMAE_BASE 17 + help 18 + Enable support for the Renesas SuperH DMA controllers. 19 + 20 + config SUDMAC 21 + tristate "Renesas SUDMAC support" 22 + depends on SH_DMAE_BASE 23 + help 24 + Enable support for the Renesas SUDMAC controllers.
+2 -1
drivers/dma/sh/Makefile
··· 1 - obj-$(CONFIG_SH_DMAE) += shdma-base.o 1 + obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o 2 2 obj-$(CONFIG_SH_DMAE) += shdma.o 3 + obj-$(CONFIG_SUDMAC) += sudmac.o
+428
drivers/dma/sh/sudmac.c
··· 1 + /* 2 + * Renesas SUDMAC support 3 + * 4 + * Copyright (C) 2013 Renesas Solutions Corp. 5 + * 6 + * based on drivers/dma/sh/shdma.c: 7 + * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> 8 + * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> 9 + * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. 10 + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 11 + * 12 + * This is free software; you can redistribute it and/or modify 13 + * it under the terms of version 2 of the GNU General Public License as 14 + * published by the Free Software Foundation. 15 + */ 16 + 17 + #include <linux/init.h> 18 + #include <linux/module.h> 19 + #include <linux/slab.h> 20 + #include <linux/interrupt.h> 21 + #include <linux/dmaengine.h> 22 + #include <linux/platform_device.h> 23 + #include <linux/sudmac.h> 24 + 25 + struct sudmac_chan { 26 + struct shdma_chan shdma_chan; 27 + void __iomem *base; 28 + char dev_id[16]; /* unique name per DMAC of channel */ 29 + 30 + u32 offset; /* for CFG, BA, BBC, CA, CBC, DEN */ 31 + u32 cfg; 32 + u32 dint_end_bit; 33 + }; 34 + 35 + struct sudmac_device { 36 + struct shdma_dev shdma_dev; 37 + struct sudmac_pdata *pdata; 38 + void __iomem *chan_reg; 39 + }; 40 + 41 + struct sudmac_regs { 42 + u32 base_addr; 43 + u32 base_byte_count; 44 + }; 45 + 46 + struct sudmac_desc { 47 + struct sudmac_regs hw; 48 + struct shdma_desc shdma_desc; 49 + }; 50 + 51 + #define to_chan(schan) container_of(schan, struct sudmac_chan, shdma_chan) 52 + #define to_desc(sdesc) container_of(sdesc, struct sudmac_desc, shdma_desc) 53 + #define to_sdev(sc) container_of(sc->shdma_chan.dma_chan.device, \ 54 + struct sudmac_device, shdma_dev.dma_dev) 55 + 56 + /* SUDMAC register */ 57 + #define SUDMAC_CH0CFG 0x00 58 + #define SUDMAC_CH0BA 0x10 59 + #define SUDMAC_CH0BBC 0x18 60 + #define SUDMAC_CH0CA 0x20 61 + #define SUDMAC_CH0CBC 0x28 62 + #define SUDMAC_CH0DEN 0x30 63 + #define SUDMAC_DSTSCLR 0x38 64 + #define SUDMAC_DBUFCTRL 0x3C 65 + #define SUDMAC_DINTCTRL 0x40 66 + #define SUDMAC_DINTSTS 0x44 67 + #define SUDMAC_DINTSTSCLR 0x48 68 + #define SUDMAC_CH0SHCTRL 0x50 69 + 70 + /* Definitions for the sudmac_channel.config */ 71 + #define SUDMAC_SENDBUFM 0x1000 /* b12: Transmit Buffer Mode */ 72 + #define SUDMAC_RCVENDM 0x0100 /* b8: Receive Data Transfer End Mode */ 73 + #define SUDMAC_LBA_WAIT 0x0030 /* b5-4: Local Bus Access Wait */ 74 + 75 + /* Definitions for the sudmac_channel.dint_end_bit */ 76 + #define SUDMAC_CH1ENDE 0x0002 /* b1: Ch1 DMA Transfer End Int Enable */ 77 + #define SUDMAC_CH0ENDE 0x0001 /* b0: Ch0 DMA Transfer End Int Enable */ 78 + 79 + #define SUDMAC_DRV_NAME "sudmac" 80 + 81 + static void sudmac_writel(struct sudmac_chan *sc, u32 data, u32 reg) 82 + { 83 + iowrite32(data, sc->base + reg); 84 + } 85 + 86 + static u32 sudmac_readl(struct sudmac_chan *sc, u32 reg) 87 + { 88 + return ioread32(sc->base + reg); 89 + } 90 + 91 + static bool sudmac_is_busy(struct sudmac_chan *sc) 92 + { 93 + u32 den = sudmac_readl(sc, SUDMAC_CH0DEN + sc->offset); 94 + 95 + if (den) 96 + return true; /* working */ 97 + 98 + return false; /* waiting */ 99 + } 100 + 101 + static void sudmac_set_reg(struct sudmac_chan *sc, struct sudmac_regs *hw, 102 + struct shdma_desc *sdesc) 103 + { 104 + sudmac_writel(sc, sc->cfg, SUDMAC_CH0CFG + sc->offset); 105 + sudmac_writel(sc, hw->base_addr, SUDMAC_CH0BA + sc->offset); 106 + sudmac_writel(sc, hw->base_byte_count, SUDMAC_CH0BBC + sc->offset); 107 + } 108 + 109 + static void sudmac_start(struct sudmac_chan *sc) 110 + { 111 + u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL); 112 + 113 + sudmac_writel(sc, dintctrl | sc->dint_end_bit, SUDMAC_DINTCTRL); 114 + sudmac_writel(sc, 1, SUDMAC_CH0DEN + sc->offset); 115 + } 116 + 117 + static void sudmac_start_xfer(struct shdma_chan *schan, 118 + struct shdma_desc *sdesc) 119 + { 120 + struct sudmac_chan *sc = to_chan(schan); 121 + struct sudmac_desc *sd = to_desc(sdesc); 122 + 123 + sudmac_set_reg(sc, &sd->hw, sdesc); 124 + sudmac_start(sc); 125 + } 126 + 127 + static bool sudmac_channel_busy(struct shdma_chan *schan) 128 + { 129 + struct sudmac_chan *sc = to_chan(schan); 130 + 131 + return sudmac_is_busy(sc); 132 + } 133 + 134 + static void sudmac_setup_xfer(struct shdma_chan *schan, int slave_id) 135 + { 136 + } 137 + 138 + static const struct sudmac_slave_config *sudmac_find_slave( 139 + struct sudmac_chan *sc, int slave_id) 140 + { 141 + struct sudmac_device *sdev = to_sdev(sc); 142 + struct sudmac_pdata *pdata = sdev->pdata; 143 + const struct sudmac_slave_config *cfg; 144 + int i; 145 + 146 + for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) 147 + if (cfg->slave_id == slave_id) 148 + return cfg; 149 + 150 + return NULL; 151 + } 152 + 153 + static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, bool try) 154 + { 155 + struct sudmac_chan *sc = to_chan(schan); 156 + const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id); 157 + 158 + if (!cfg) 159 + return -ENODEV; 160 + 161 + return 0; 162 + } 163 + 164 + static inline void sudmac_dma_halt(struct sudmac_chan *sc) 165 + { 166 + u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL); 167 + 168 + sudmac_writel(sc, 0, SUDMAC_CH0DEN + sc->offset); 169 + sudmac_writel(sc, dintctrl & ~sc->dint_end_bit, SUDMAC_DINTCTRL); 170 + sudmac_writel(sc, sc->dint_end_bit, SUDMAC_DINTSTSCLR); 171 + } 172 + 173 + static int sudmac_desc_setup(struct shdma_chan *schan, 174 + struct shdma_desc *sdesc, 175 + dma_addr_t src, dma_addr_t dst, size_t *len) 176 + { 177 + struct sudmac_chan *sc = to_chan(schan); 178 + struct sudmac_desc *sd = to_desc(sdesc); 179 + 180 + dev_dbg(sc->shdma_chan.dev, "%s: src=%x, dst=%x, len=%d\n", 181 + __func__, src, dst, *len); 182 + 183 + if (*len > schan->max_xfer_len) 184 + *len = schan->max_xfer_len; 185 + 186 + if (dst) 187 + sd->hw.base_addr = dst; 188 + else if (src) 189 + sd->hw.base_addr = src; 190 + sd->hw.base_byte_count = *len; 191 + 192 + return 0; 193 + } 194 + 195 + static void sudmac_halt(struct shdma_chan *schan) 196 + { 197 + struct sudmac_chan *sc = to_chan(schan); 198 + 199 + sudmac_dma_halt(sc); 200 + } 201 + 202 + static bool sudmac_chan_irq(struct shdma_chan *schan, int irq) 203 + { 204 + struct sudmac_chan *sc = to_chan(schan); 205 + u32 dintsts = sudmac_readl(sc, SUDMAC_DINTSTS); 206 + 207 + if (!(dintsts & sc->dint_end_bit)) 208 + return false; 209 + 210 + /* DMA stop */ 211 + sudmac_dma_halt(sc); 212 + 213 + return true; 214 + } 215 + 216 + static size_t sudmac_get_partial(struct shdma_chan *schan, 217 + struct shdma_desc *sdesc) 218 + { 219 + struct sudmac_chan *sc = to_chan(schan); 220 + struct sudmac_desc *sd = to_desc(sdesc); 221 + u32 current_byte_count = sudmac_readl(sc, SUDMAC_CH0CBC + sc->offset); 222 + 223 + return sd->hw.base_byte_count - current_byte_count; 224 + } 225 + 226 + static bool sudmac_desc_completed(struct shdma_chan *schan, 227 + struct shdma_desc *sdesc) 228 + { 229 + struct sudmac_chan *sc = to_chan(schan); 230 + struct sudmac_desc *sd = to_desc(sdesc); 231 + u32 current_addr = sudmac_readl(sc, SUDMAC_CH0CA + sc->offset); 232 + 233 + return sd->hw.base_addr + sd->hw.base_byte_count == current_addr; 234 + } 235 + 236 + static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq, 237 + unsigned long flags) 238 + { 239 + struct shdma_dev *sdev = &su_dev->shdma_dev; 240 + struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); 241 + struct sudmac_chan *sc; 242 + struct shdma_chan *schan; 243 + int err; 244 + 245 + sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL); 246 + if (!sc) { 247 + dev_err(sdev->dma_dev.dev, 248 + "No free memory for allocating dma channels!\n"); 249 + return -ENOMEM; 250 + } 251 + 252 + schan = &sc->shdma_chan; 253 + schan->max_xfer_len = 64 * 1024 * 1024 - 1; 254 + 255 + shdma_chan_probe(sdev, schan, id); 256 + 257 + sc->base = su_dev->chan_reg; 258 + 259 + /* get platform_data */ 260 + sc->offset = su_dev->pdata->channel->offset; 261 + if (su_dev->pdata->channel->config & SUDMAC_TX_BUFFER_MODE) 262 + sc->cfg |= SUDMAC_SENDBUFM; 263 + if (su_dev->pdata->channel->config & SUDMAC_RX_END_MODE) 264 + sc->cfg |= SUDMAC_RCVENDM; 265 + sc->cfg |= (su_dev->pdata->channel->wait << 4) & SUDMAC_LBA_WAIT; 266 + 267 + if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH0) 268 + sc->dint_end_bit |= SUDMAC_CH0ENDE; 269 + if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH1) 270 + sc->dint_end_bit |= SUDMAC_CH1ENDE; 271 + 272 + /* set up channel irq */ 273 + if (pdev->id >= 0) 274 + snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d.%d", 275 + pdev->id, id); 276 + else 277 + snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d", id); 278 + 279 + err = shdma_request_irq(schan, irq, flags, sc->dev_id); 280 + if (err) { 281 + dev_err(sdev->dma_dev.dev, 282 + "DMA channel %d request_irq failed %d\n", id, err); 283 + goto err_no_irq; 284 + } 285 + 286 + return 0; 287 + 288 + err_no_irq: 289 + /* remove from dmaengine device node */ 290 + shdma_chan_remove(schan); 291 + return err; 292 + } 293 + 294 + static void sudmac_chan_remove(struct sudmac_device *su_dev) 295 + { 296 + struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev; 297 + struct shdma_chan *schan; 298 + int i; 299 + 300 + shdma_for_each_chan(schan, &su_dev->shdma_dev, i) { 301 + struct sudmac_chan *sc = to_chan(schan); 302 + 303 + BUG_ON(!schan); 304 + 305 + shdma_free_irq(&sc->shdma_chan); 306 + shdma_chan_remove(schan); 307 + } 308 + dma_dev->chancnt = 0; 309 + } 310 + 311 + static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan) 312 + { 313 + /* SUDMAC doesn't need the address */ 314 + return 0; 315 + } 316 + 317 + static struct shdma_desc *sudmac_embedded_desc(void *buf, int i) 318 + { 319 + return &((struct sudmac_desc *)buf)[i].shdma_desc; 320 + } 321 + 322 + static const struct shdma_ops sudmac_shdma_ops = { 323 + .desc_completed = sudmac_desc_completed, 324 + .halt_channel = sudmac_halt, 325 + .channel_busy = sudmac_channel_busy, 326 + .slave_addr = sudmac_slave_addr, 327 + .desc_setup = sudmac_desc_setup, 328 + .set_slave = sudmac_set_slave, 329 + .setup_xfer = sudmac_setup_xfer, 330 + .start_xfer = sudmac_start_xfer, 331 + .embedded_desc = sudmac_embedded_desc, 332 + .chan_irq = sudmac_chan_irq, 333 + .get_partial = sudmac_get_partial, 334 + }; 335 + 336 + static int sudmac_probe(struct platform_device *pdev) 337 + { 338 + struct sudmac_pdata *pdata = pdev->dev.platform_data; 339 + int err, i; 340 + struct sudmac_device *su_dev; 341 + struct dma_device *dma_dev; 342 + struct resource *chan, *irq_res; 343 + 344 + /* get platform data */ 345 + if (!pdata) 346 + return -ENODEV; 347 + 348 + chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); 349 + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 350 + if (!chan || !irq_res) 351 + return -ENODEV; 352 + 353 + err = -ENOMEM; 354 + su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device), 355 + GFP_KERNEL); 356 + if (!su_dev) { 357 + dev_err(&pdev->dev, "Not enough memory\n"); 358 + return err; 359 + } 360 + 361 + dma_dev = &su_dev->shdma_dev.dma_dev; 362 + 363 + su_dev->chan_reg = devm_request_and_ioremap(&pdev->dev, chan); 364 + if (!su_dev->chan_reg) 365 + return err; 366 + 367 + dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); 368 + 369 + su_dev->shdma_dev.ops = &sudmac_shdma_ops; 370 + su_dev->shdma_dev.desc_size = sizeof(struct sudmac_desc); 371 + err = shdma_init(&pdev->dev, &su_dev->shdma_dev, pdata->channel_num); 372 + if (err < 0) 373 + return err; 374 + 375 + /* platform data */ 376 + su_dev->pdata = pdev->dev.platform_data; 377 + 378 + platform_set_drvdata(pdev, su_dev); 379 + 380 + /* Create DMA Channel */ 381 + for (i = 0; i < pdata->channel_num; i++) { 382 + err = sudmac_chan_probe(su_dev, i, irq_res->start, IRQF_SHARED); 383 + if (err) 384 + goto chan_probe_err; 385 + } 386 + 387 + err = dma_async_device_register(&su_dev->shdma_dev.dma_dev); 388 + if (err < 0) 389 + goto chan_probe_err; 390 + 391 + return err; 392 + 393 + chan_probe_err: 394 + sudmac_chan_remove(su_dev); 395 + 396 + platform_set_drvdata(pdev, NULL); 397 + shdma_cleanup(&su_dev->shdma_dev); 398 + 399 + return err; 400 + } 401 + 402 + static int sudmac_remove(struct platform_device *pdev) 403 + { 404 + struct sudmac_device *su_dev = platform_get_drvdata(pdev); 405 + struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev; 406 + 407 + dma_async_device_unregister(dma_dev); 408 + sudmac_chan_remove(su_dev); 409 + shdma_cleanup(&su_dev->shdma_dev); 410 + platform_set_drvdata(pdev, NULL); 411 + 412 + return 0; 413 + } 414 + 415 + static struct platform_driver sudmac_driver = { 416 + .driver = { 417 + .owner = THIS_MODULE, 418 + .name = SUDMAC_DRV_NAME, 419 + }, 420 + .probe = sudmac_probe, 421 + .remove = sudmac_remove, 422 + }; 423 + module_platform_driver(sudmac_driver); 424 + 425 + MODULE_AUTHOR("Yoshihiro Shimoda"); 426 + MODULE_DESCRIPTION("Renesas SUDMAC driver"); 427 + MODULE_LICENSE("GPL v2"); 428 + MODULE_ALIAS("platform:" SUDMAC_DRV_NAME);
+23 -1
drivers/dma/sirf-dma.c
··· 16 16 #include <linux/of_address.h> 17 17 #include <linux/of_device.h> 18 18 #include <linux/of_platform.h> 19 + #include <linux/clk.h> 19 20 #include <linux/sirfsoc_dma.h> 20 21 21 22 #include "dmaengine.h" ··· 79 78 struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; 80 79 void __iomem *base; 81 80 int irq; 81 + struct clk *clk; 82 82 bool is_marco; 83 83 }; 84 84 ··· 641 639 return -EINVAL; 642 640 } 643 641 642 + sdma->clk = devm_clk_get(dev, NULL); 643 + if (IS_ERR(sdma->clk)) { 644 + dev_err(dev, "failed to get a clock.\n"); 645 + return PTR_ERR(sdma->clk); 646 + } 647 + 644 648 ret = of_address_to_resource(dn, 0, &res); 645 649 if (ret) { 646 650 dev_err(dev, "Error parsing memory region!\n"); ··· 706 698 707 699 tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma); 708 700 701 + clk_prepare_enable(sdma->clk); 702 + 709 703 /* Register DMA engine */ 710 704 dev_set_drvdata(dev, sdma); 711 705 ret = dma_async_device_register(dma); ··· 730 720 struct device *dev = &op->dev; 731 721 struct sirfsoc_dma *sdma = dev_get_drvdata(dev); 732 722 723 + clk_disable_unprepare(sdma->clk); 733 724 dma_async_device_unregister(&sdma->dma); 734 725 free_irq(sdma->irq, sdma); 735 726 irq_dispose_mapping(sdma->irq); ··· 753 742 }, 754 743 }; 755 744 756 - module_platform_driver(sirfsoc_dma_driver); 745 + static __init int sirfsoc_dma_init(void) 746 + { 747 + return platform_driver_register(&sirfsoc_dma_driver); 748 + } 749 + 750 + static void __exit sirfsoc_dma_exit(void) 751 + { 752 + platform_driver_unregister(&sirfsoc_dma_driver); 753 + } 754 + 755 + subsys_initcall(sirfsoc_dma_init); 756 + module_exit(sirfsoc_dma_exit); 757 757 758 758 MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, " 759 759 "Barry Song <baohua.song@csr.com>");
+72 -15
drivers/dma/tegra20-apb-dma.c
··· 30 30 #include <linux/of.h> 31 31 #include <linux/of_device.h> 32 32 #include <linux/platform_device.h> 33 + #include <linux/pm.h> 33 34 #include <linux/pm_runtime.h> 34 35 #include <linux/slab.h> 35 36 #include <linux/clk/tegra.h> ··· 200 199 201 200 /* Channel-slave specific configuration */ 202 201 struct dma_slave_config dma_sconfig; 202 + struct tegra_dma_channel_regs channel_reg; 203 203 }; 204 204 205 205 /* tegra_dma: Tegra DMA specific information */ ··· 1215 1213 .support_channel_pause = false, 1216 1214 }; 1217 1215 1218 - #if defined(CONFIG_OF) 1219 1216 /* Tegra30 specific DMA controller information */ 1220 1217 static const struct tegra_dma_chip_data tegra30_dma_chip_data = { 1221 1218 .nr_channels = 32, ··· 1244 1243 }, 1245 1244 }; 1246 1245 MODULE_DEVICE_TABLE(of, tegra_dma_of_match); 1247 - #endif 1248 1246 1249 1247 static int tegra_dma_probe(struct platform_device *pdev) 1250 1248 { ··· 1252 1252 int ret; 1253 1253 int i; 1254 1254 const struct tegra_dma_chip_data *cdata = NULL; 1255 + const struct of_device_id *match; 1255 1256 1256 - if (pdev->dev.of_node) { 1257 - const struct of_device_id *match; 1258 - match = of_match_device(of_match_ptr(tegra_dma_of_match), 1259 - &pdev->dev); 1260 - if (!match) { 1261 - dev_err(&pdev->dev, "Error: No device match found\n"); 1262 - return -ENODEV; 1263 - } 1264 - cdata = match->data; 1265 - } else { 1266 - /* If no device tree then fallback to tegra20 */ 1267 - cdata = &tegra20_dma_chip_data; 1257 + match = of_match_device(tegra_dma_of_match, &pdev->dev); 1258 + if (!match) { 1259 + dev_err(&pdev->dev, "Error: No device match found\n"); 1260 + return -ENODEV; 1268 1261 } 1262 + cdata = match->data; 1269 1263 1270 1264 tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * 1271 1265 sizeof(struct tegra_dma_channel), GFP_KERNEL); ··· 1442 1448 return 0; 1443 1449 } 1444 1450 1451 + #ifdef CONFIG_PM_SLEEP 1452 + static int tegra_dma_pm_suspend(struct device *dev) 1453 + { 1454 + struct tegra_dma *tdma = dev_get_drvdata(dev); 1455 + int i; 1456 + int ret; 1457 + 1458 + /* Enable clock before accessing register */ 1459 + ret = tegra_dma_runtime_resume(dev); 1460 + if (ret < 0) 1461 + return ret; 1462 + 1463 + tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL); 1464 + for (i = 0; i < tdma->chip_data->nr_channels; i++) { 1465 + struct tegra_dma_channel *tdc = &tdma->channels[i]; 1466 + struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg; 1467 + 1468 + ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR); 1469 + ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR); 1470 + ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR); 1471 + ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ); 1472 + ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ); 1473 + } 1474 + 1475 + /* Disable clock */ 1476 + tegra_dma_runtime_suspend(dev); 1477 + return 0; 1478 + } 1479 + 1480 + static int tegra_dma_pm_resume(struct device *dev) 1481 + { 1482 + struct tegra_dma *tdma = dev_get_drvdata(dev); 1483 + int i; 1484 + int ret; 1485 + 1486 + /* Enable clock before accessing register */ 1487 + ret = tegra_dma_runtime_resume(dev); 1488 + if (ret < 0) 1489 + return ret; 1490 + 1491 + tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen); 1492 + tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); 1493 + tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); 1494 + 1495 + for (i = 0; i < tdma->chip_data->nr_channels; i++) { 1496 + struct tegra_dma_channel *tdc = &tdma->channels[i]; 1497 + struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg; 1498 + 1499 + tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq); 1500 + tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr); 1501 + tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq); 1502 + tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr); 1503 + tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, 1504 + (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB)); 1505 + } 1506 + 1507 + /* Disable clock */ 1508 + tegra_dma_runtime_suspend(dev); 1509 + return 0; 1510 + } 1511 + #endif 1512 + 1445 1513 static const struct dev_pm_ops tegra_dma_dev_pm_ops = { 1446 1514 #ifdef CONFIG_PM_RUNTIME 1447 1515 .runtime_suspend = tegra_dma_runtime_suspend, 1448 1516 .runtime_resume = tegra_dma_runtime_resume, 1449 1517 #endif 1518 + SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume) 1450 1519 }; 1451 1520 1452 1521 static struct platform_driver tegra_dmac_driver = { ··· 1517 1460 .name = "tegra-apbdma", 1518 1461 .owner = THIS_MODULE, 1519 1462 .pm = &tegra_dma_dev_pm_ops, 1520 - .of_match_table = of_match_ptr(tegra_dma_of_match), 1463 + .of_match_table = tegra_dma_of_match, 1521 1464 }, 1522 1465 .probe = tegra_dma_probe, 1523 1466 .remove = tegra_dma_remove,
+1 -1
drivers/dma/timb_dma.c
··· 823 823 .owner = THIS_MODULE, 824 824 }, 825 825 .probe = td_probe, 826 - .remove = __exit_p(td_remove), 826 + .remove = td_remove, 827 827 }; 828 828 829 829 module_platform_driver(td_driver);
+4 -4
drivers/dma/txx9dmac.c
··· 1190 1190 return 0; 1191 1191 } 1192 1192 1193 - static int __exit txx9dmac_chan_remove(struct platform_device *pdev) 1193 + static int txx9dmac_chan_remove(struct platform_device *pdev) 1194 1194 { 1195 1195 struct txx9dmac_chan *dc = platform_get_drvdata(pdev); 1196 1196 ··· 1252 1252 return 0; 1253 1253 } 1254 1254 1255 - static int __exit txx9dmac_remove(struct platform_device *pdev) 1255 + static int txx9dmac_remove(struct platform_device *pdev) 1256 1256 { 1257 1257 struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); 1258 1258 ··· 1299 1299 }; 1300 1300 1301 1301 static struct platform_driver txx9dmac_chan_driver = { 1302 - .remove = __exit_p(txx9dmac_chan_remove), 1302 + .remove = txx9dmac_chan_remove, 1303 1303 .driver = { 1304 1304 .name = "txx9dmac-chan", 1305 1305 }, 1306 1306 }; 1307 1307 1308 1308 static struct platform_driver txx9dmac_driver = { 1309 - .remove = __exit_p(txx9dmac_remove), 1309 + .remove = txx9dmac_remove, 1310 1310 .shutdown = txx9dmac_shutdown, 1311 1311 .driver = { 1312 1312 .name = "txx9dmac",
+116
include/linux/acpi_dma.h
··· 1 + /* 2 + * ACPI helpers for DMA request / controller 3 + * 4 + * Based on of_dma.h 5 + * 6 + * Copyright (C) 2013, Intel Corporation 7 + * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License version 2 as 11 + * published by the Free Software Foundation. 12 + */ 13 + 14 + #ifndef __LINUX_ACPI_DMA_H 15 + #define __LINUX_ACPI_DMA_H 16 + 17 + #include <linux/list.h> 18 + #include <linux/device.h> 19 + #include <linux/dmaengine.h> 20 + 21 + /** 22 + * struct acpi_dma_spec - slave device DMA resources 23 + * @chan_id: channel unique id 24 + * @slave_id: request line unique id 25 + * @dev: struct device of the DMA controller to be used in the filter 26 + * function 27 + */ 28 + struct acpi_dma_spec { 29 + int chan_id; 30 + int slave_id; 31 + struct device *dev; 32 + }; 33 + 34 + /** 35 + * struct acpi_dma - representation of the registered DMAC 36 + * @dma_controllers: linked list node 37 + * @dev: struct device of this controller 38 + * @acpi_dma_xlate: callback function to find a suitable channel 39 + * @data: private data used by a callback function 40 + */ 41 + struct acpi_dma { 42 + struct list_head dma_controllers; 43 + struct device *dev; 44 + struct dma_chan *(*acpi_dma_xlate) 45 + (struct acpi_dma_spec *, struct acpi_dma *); 46 + void *data; 47 + }; 48 + 49 + /* Used with acpi_dma_simple_xlate() */ 50 + struct acpi_dma_filter_info { 51 + dma_cap_mask_t dma_cap; 52 + dma_filter_fn filter_fn; 53 + }; 54 + 55 + #ifdef CONFIG_DMA_ACPI 56 + 57 + int acpi_dma_controller_register(struct device *dev, 58 + struct dma_chan *(*acpi_dma_xlate) 59 + (struct acpi_dma_spec *, struct acpi_dma *), 60 + void *data); 61 + int acpi_dma_controller_free(struct device *dev); 62 + int devm_acpi_dma_controller_register(struct device *dev, 63 + struct dma_chan *(*acpi_dma_xlate) 64 + (struct acpi_dma_spec *, struct acpi_dma *), 65 + void *data); 66 + void devm_acpi_dma_controller_free(struct device *dev); 67 + 68 + struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, 69 + size_t index); 70 + struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev, 71 + const char *name); 72 + 73 + struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec, 74 + struct acpi_dma *adma); 75 + #else 76 + 77 + static inline int acpi_dma_controller_register(struct device *dev, 78 + struct dma_chan *(*acpi_dma_xlate) 79 + (struct acpi_dma_spec *, struct acpi_dma *), 80 + void *data) 81 + { 82 + return -ENODEV; 83 + } 84 + static inline int acpi_dma_controller_free(struct device *dev) 85 + { 86 + return -ENODEV; 87 + } 88 + static inline int devm_acpi_dma_controller_register(struct device *dev, 89 + struct dma_chan *(*acpi_dma_xlate) 90 + (struct acpi_dma_spec *, struct acpi_dma *), 91 + void *data) 92 + { 93 + return -ENODEV; 94 + } 95 + static inline void devm_acpi_dma_controller_free(struct device *dev) 96 + { 97 + } 98 + 99 + static inline struct dma_chan *acpi_dma_request_slave_chan_by_index( 100 + struct device *dev, size_t index) 101 + { 102 + return NULL; 103 + } 104 + static inline struct dma_chan *acpi_dma_request_slave_chan_by_name( 105 + struct device *dev, const char *name) 106 + { 107 + return NULL; 108 + } 109 + 110 + #define acpi_dma_simple_xlate NULL 111 + 112 + #endif 113 + 114 + #define acpi_dma_request_slave_channel acpi_dma_request_slave_chan_by_index 115 + 116 + #endif /* __LINUX_ACPI_DMA_H */
+8 -7
include/linux/dmaengine.h
··· 967 967 #ifdef CONFIG_DMA_ENGINE 968 968 enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); 969 969 void dma_issue_pending_all(void); 970 - struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); 971 - struct dma_chan *dma_request_slave_channel(struct device *dev, char *name); 970 + struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 971 + dma_filter_fn fn, void *fn_param); 972 + struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); 972 973 void dma_release_channel(struct dma_chan *chan); 973 974 #else 974 975 static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) ··· 979 978 static inline void dma_issue_pending_all(void) 980 979 { 981 980 } 982 - static inline struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, 981 + static inline struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, 983 982 dma_filter_fn fn, void *fn_param) 984 983 { 985 984 return NULL; 986 985 } 987 986 static inline struct dma_chan *dma_request_slave_channel(struct device *dev, 988 - char *name) 987 + const char *name) 989 988 { 990 989 return NULL; 991 990 } ··· 1006 1005 __dma_request_slave_channel_compat(&(mask), x, y, dev, name) 1007 1006 1008 1007 static inline struct dma_chan 1009 - *__dma_request_slave_channel_compat(dma_cap_mask_t *mask, dma_filter_fn fn, 1010 - void *fn_param, struct device *dev, 1011 - char *name) 1008 + *__dma_request_slave_channel_compat(const dma_cap_mask_t *mask, 1009 + dma_filter_fn fn, void *fn_param, 1010 + struct device *dev, char *name) 1012 1011 { 1013 1012 struct dma_chan *chan; 1014 1013
+4 -6
include/linux/of_dma.h
··· 25 25 struct dma_chan *(*of_dma_xlate) 26 26 (struct of_phandle_args *, struct of_dma *); 27 27 void *of_dma_data; 28 - int use_count; 29 28 }; 30 29 31 30 struct of_dma_filter_info { ··· 37 38 struct dma_chan *(*of_dma_xlate) 38 39 (struct of_phandle_args *, struct of_dma *), 39 40 void *data); 40 - extern int of_dma_controller_free(struct device_node *np); 41 + extern void of_dma_controller_free(struct device_node *np); 41 42 extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np, 42 - char *name); 43 + const char *name); 43 44 extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, 44 45 struct of_dma *ofdma); 45 46 #else ··· 51 52 return -ENODEV; 52 53 } 53 54 54 - static inline int of_dma_controller_free(struct device_node *np) 55 + static inline void of_dma_controller_free(struct device_node *np) 55 56 { 56 - return -ENODEV; 57 57 } 58 58 59 59 static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np, 60 - char *name) 60 + const char *name) 61 61 { 62 62 return NULL; 63 63 }
+52
include/linux/sudmac.h
··· 1 + /* 2 + * Header for the SUDMAC driver 3 + * 4 + * Copyright (C) 2013 Renesas Solutions Corp. 5 + * 6 + * This is free software; you can redistribute it and/or modify 7 + * it under the terms of version 2 of the GNU General Public License as 8 + * published by the Free Software Foundation. 9 + */ 10 + #ifndef SUDMAC_H 11 + #define SUDMAC_H 12 + 13 + #include <linux/dmaengine.h> 14 + #include <linux/shdma-base.h> 15 + #include <linux/types.h> 16 + 17 + /* Used by slave DMA clients to request DMA to/from a specific peripheral */ 18 + struct sudmac_slave { 19 + struct shdma_slave shdma_slave; /* Set by the platform */ 20 + }; 21 + 22 + /* 23 + * Supplied by platforms to specify, how a DMA channel has to be configured for 24 + * a certain peripheral 25 + */ 26 + struct sudmac_slave_config { 27 + int slave_id; 28 + }; 29 + 30 + struct sudmac_channel { 31 + unsigned long offset; 32 + unsigned long config; 33 + unsigned long wait; /* The configuable range is 0 to 3 */ 34 + unsigned long dint_end_bit; 35 + }; 36 + 37 + struct sudmac_pdata { 38 + const struct sudmac_slave_config *slave; 39 + int slave_num; 40 + const struct sudmac_channel *channel; 41 + int channel_num; 42 + }; 43 + 44 + /* Definitions for the sudmac_channel.config */ 45 + #define SUDMAC_TX_BUFFER_MODE BIT(0) 46 + #define SUDMAC_RX_END_MODE BIT(1) 47 + 48 + /* Definitions for the sudmac_channel.dint_end_bit */ 49 + #define SUDMAC_DMA_BIT_CH0 BIT(0) 50 + #define SUDMAC_DMA_BIT_CH1 BIT(1) 51 + 52 + #endif