Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'dmaengine-4.19-rc1' of git://git.infradead.org/users/vkoul/slave-dma

Pull DMAengine updates from Vinod Koul:
"This round brings couple of framework changes, a new driver and usual
driver updates:

- new managed helper for dmaengine framework registration

- split dmaengine pause capability to pause and resume and allow
drivers to report that individually

- update dma_request_chan_by_mask() to handle deferred probing

- move imx-sdma to use virt-dma

- new driver for Actions Semi Owl family S900 controller

- minor updates to intel, renesas, mv_xor, pl330 etc"

* tag 'dmaengine-4.19-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (46 commits)
dmaengine: Add Actions Semi Owl family S900 DMA driver
dt-bindings: dmaengine: Add binding for Actions Semi Owl SoCs
dmaengine: sh: rcar-dmac: Should not stop the DMAC by rcar_dmac_sync_tcr()
dmaengine: mic_x100_dma: use the new helper to simplify the code
dmaengine: add a new helper dmaenginem_async_device_register
dmaengine: imx-sdma: add memcpy interface
dmaengine: imx-sdma: add SDMA_BD_MAX_CNT to replace '0xffff'
dmaengine: dma_request_chan_by_mask() to handle deferred probing
dmaengine: pl330: fix irq race with terminate_all
dmaengine: Revert "dmaengine: mv_xor_v2: enable COMPILE_TEST"
dmaengine: mv_xor_v2: use {lower,upper}_32_bits to configure HW descriptor address
dmaengine: mv_xor_v2: enable COMPILE_TEST
dmaengine: mv_xor_v2: move unmap to before callback
dmaengine: mv_xor_v2: convert callback to helper function
dmaengine: mv_xor_v2: kill the tasklets upon exit
dmaengine: mv_xor_v2: explicitly freeup irq
dmaengine: sh: rcar-dmac: Add dma_pause operation
dmaengine: sh: rcar-dmac: add a new function to clear CHCR.DE with barrier
dmaengine: idma64: Support dmaengine_terminate_sync()
dmaengine: hsu: Support dmaengine_terminate_sync()
...

+1599 -306
+47
Documentation/devicetree/bindings/dma/owl-dma.txt
··· 1 + * Actions Semi Owl SoCs DMA controller 2 + 3 + This binding follows the generic DMA bindings defined in dma.txt. 4 + 5 + Required properties: 6 + - compatible: Should be "actions,s900-dma". 7 + - reg: Should contain DMA registers location and length. 8 + - interrupts: Should contain 4 interrupts shared by all channel. 9 + - #dma-cells: Must be <1>. Used to represent the number of integer 10 + cells in the dmas property of client device. 11 + - dma-channels: Physical channels supported. 12 + - dma-requests: Number of DMA request signals supported by the controller. 13 + Refer to Documentation/devicetree/bindings/dma/dma.txt 14 + - clocks: Phandle and Specifier of the clock feeding the DMA controller. 15 + 16 + Example: 17 + 18 + Controller: 19 + dma: dma-controller@e0260000 { 20 + compatible = "actions,s900-dma"; 21 + reg = <0x0 0xe0260000 0x0 0x1000>; 22 + interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>, 23 + <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>, 24 + <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>, 25 + <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>; 26 + #dma-cells = <1>; 27 + dma-channels = <12>; 28 + dma-requests = <46>; 29 + clocks = <&clock CLK_DMAC>; 30 + }; 31 + 32 + Client: 33 + 34 + DMA clients connected to the Actions Semi Owl SoCs DMA controller must 35 + use the format described in the dma.txt file, using a two-cell specifier 36 + for each channel. 37 + 38 + The two cells in order are: 39 + 1. A phandle pointing to the DMA controller. 40 + 2. The channel id. 41 + 42 + uart5: serial@e012a000 { 43 + ... 44 + dma-names = "tx", "rx"; 45 + dmas = <&dma 26>, <&dma 27>; 46 + ... 47 + };
+1
Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
··· 29 29 - "renesas,dmac-r8a77965" (R-Car M3-N) 30 30 - "renesas,dmac-r8a77970" (R-Car V3M) 31 31 - "renesas,dmac-r8a77980" (R-Car V3H) 32 + - "renesas,dmac-r8a77990" (R-Car E3) 32 33 - "renesas,dmac-r8a77995" (R-Car D3) 33 34 34 35 - reg: base address and length of the registers block for the DMAC
+2
Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
··· 66 66 Optional child node properties for VDMA: 67 67 - xlnx,genlock-mode: Tells Genlock synchronization is 68 68 enabled/disabled in hardware. 69 + - xlnx,enable-vert-flip: Tells vertical flip is 70 + enabled/disabled in hardware(S2MM path). 69 71 Optional child node properties for AXI DMA: 70 72 -dma-channels: Number of dma channels in child node. 71 73
+1
Documentation/driver-model/devres.txt
··· 240 240 devm_of_clk_add_hw_provider() 241 241 242 242 DMA 243 + dmaenginem_async_device_register() 243 244 dmam_alloc_coherent() 244 245 dmam_alloc_attrs() 245 246 dmam_declare_coherent_memory()
+6 -4
crypto/async_tx/async_pq.c
··· 42 42 #define P(b, d) (b[d-2]) 43 43 #define Q(b, d) (b[d-1]) 44 44 45 + #define MAX_DISKS 255 46 + 45 47 /** 46 48 * do_async_gen_syndrome - asynchronously calculate P and/or Q 47 49 */ ··· 186 184 struct dma_device *device = chan ? chan->device : NULL; 187 185 struct dmaengine_unmap_data *unmap = NULL; 188 186 189 - BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); 187 + BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks))); 190 188 191 189 if (device) 192 190 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); ··· 198 196 is_dma_pq_aligned(device, offset, 0, len)) { 199 197 struct dma_async_tx_descriptor *tx; 200 198 enum dma_ctrl_flags dma_flags = 0; 201 - unsigned char coefs[src_cnt]; 199 + unsigned char coefs[MAX_DISKS]; 202 200 int i, j; 203 201 204 202 /* run the p+q asynchronously */ ··· 301 299 struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len); 302 300 struct dma_device *device = chan ? chan->device : NULL; 303 301 struct dma_async_tx_descriptor *tx; 304 - unsigned char coefs[disks-2]; 302 + unsigned char coefs[MAX_DISKS]; 305 303 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; 306 304 struct dmaengine_unmap_data *unmap = NULL; 307 305 308 - BUG_ON(disks < 4); 306 + BUG_ON(disks < 4 || disks > MAX_DISKS); 309 307 310 308 if (device) 311 309 unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
+3 -1
crypto/async_tx/raid6test.c
··· 81 81 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); 82 82 tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); 83 83 } else { 84 - struct page *blocks[disks]; 84 + struct page *blocks[NDISKS]; 85 85 struct page *dest; 86 86 int count = 0; 87 87 int i; 88 + 89 + BUG_ON(disks > NDISKS); 88 90 89 91 /* data+Q failure. Reconstruct data from P, 90 92 * then rebuild syndrome
+9
drivers/dma/Kconfig
··· 250 250 tristate "i.MX SDMA support" 251 251 depends on ARCH_MXC 252 252 select DMA_ENGINE 253 + select DMA_VIRTUAL_CHANNELS 253 254 help 254 255 Support the i.MX SDMA engine. This engine is integrated into 255 256 Freescale i.MX25/31/35/51/53/6 chips. ··· 413 412 depends on ARM || COMPILE_TEST 414 413 help 415 414 Support for "Type-AXI" NBPF DMA IPs from Renesas 415 + 416 + config OWL_DMA 417 + tristate "Actions Semi Owl SoCs DMA support" 418 + depends on ARCH_ACTIONS 419 + select DMA_ENGINE 420 + select DMA_VIRTUAL_CHANNELS 421 + help 422 + Enable support for the Actions Semi Owl SoCs DMA controller. 416 423 417 424 config PCH_DMA 418 425 tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
+1
drivers/dma/Makefile
··· 52 52 obj-$(CONFIG_MXS_DMA) += mxs-dma.o 53 53 obj-$(CONFIG_MX3_IPU) += ipu/ 54 54 obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o 55 + obj-$(CONFIG_OWL_DMA) += owl-dma.o 55 56 obj-$(CONFIG_PCH_DMA) += pch_dma.o 56 57 obj-$(CONFIG_PL330_DMA) += pl330.o 57 58 obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
+45 -8
drivers/dma/dmaengine.c
··· 500 500 caps->max_burst = device->max_burst; 501 501 caps->residue_granularity = device->residue_granularity; 502 502 caps->descriptor_reuse = device->descriptor_reuse; 503 - 504 - /* 505 - * Some devices implement only pause (e.g. to get residuum) but no 506 - * resume. However cmd_pause is advertised as pause AND resume. 507 - */ 508 - caps->cmd_pause = !!(device->device_pause && device->device_resume); 503 + caps->cmd_pause = !!device->device_pause; 504 + caps->cmd_resume = !!device->device_resume; 509 505 caps->cmd_terminate = !!device->device_terminate_all; 510 506 511 507 return 0; ··· 770 774 return ERR_PTR(-ENODEV); 771 775 772 776 chan = __dma_request_channel(mask, NULL, NULL); 773 - if (!chan) 774 - chan = ERR_PTR(-ENODEV); 777 + if (!chan) { 778 + mutex_lock(&dma_list_mutex); 779 + if (list_empty(&dma_device_list)) 780 + chan = ERR_PTR(-EPROBE_DEFER); 781 + else 782 + chan = ERR_PTR(-ENODEV); 783 + mutex_unlock(&dma_list_mutex); 784 + } 775 785 776 786 return chan; 777 787 } ··· 1140 1138 } 1141 1139 } 1142 1140 EXPORT_SYMBOL(dma_async_device_unregister); 1141 + 1142 + static void dmam_device_release(struct device *dev, void *res) 1143 + { 1144 + struct dma_device *device; 1145 + 1146 + device = *(struct dma_device **)res; 1147 + dma_async_device_unregister(device); 1148 + } 1149 + 1150 + /** 1151 + * dmaenginem_async_device_register - registers DMA devices found 1152 + * @device: &dma_device 1153 + * 1154 + * The operation is managed and will be undone on driver detach. 1155 + */ 1156 + int dmaenginem_async_device_register(struct dma_device *device) 1157 + { 1158 + void *p; 1159 + int ret; 1160 + 1161 + p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL); 1162 + if (!p) 1163 + return -ENOMEM; 1164 + 1165 + ret = dma_async_device_register(device); 1166 + if (!ret) { 1167 + *(struct dma_device **)p = device; 1168 + devres_add(device->dev, p); 1169 + } else { 1170 + devres_free(p); 1171 + } 1172 + 1173 + return ret; 1174 + } 1175 + EXPORT_SYMBOL(dmaenginem_async_device_register); 1143 1176 1144 1177 struct dmaengine_unmap_pool { 1145 1178 struct kmem_cache *cache;
+8
drivers/dma/hsu/hsu.c
··· 413 413 vchan_free_chan_resources(to_virt_chan(chan)); 414 414 } 415 415 416 + static void hsu_dma_synchronize(struct dma_chan *chan) 417 + { 418 + struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); 419 + 420 + vchan_synchronize(&hsuc->vchan); 421 + } 422 + 416 423 int hsu_dma_probe(struct hsu_dma_chip *chip) 417 424 { 418 425 struct hsu_dma *hsu; ··· 466 459 hsu->dma.device_pause = hsu_dma_pause; 467 460 hsu->dma.device_resume = hsu_dma_resume; 468 461 hsu->dma.device_terminate_all = hsu_dma_terminate_all; 462 + hsu->dma.device_synchronize = hsu_dma_synchronize; 469 463 470 464 hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS; 471 465 hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS;
+8
drivers/dma/idma64.c
··· 496 496 return 0; 497 497 } 498 498 499 + static void idma64_synchronize(struct dma_chan *chan) 500 + { 501 + struct idma64_chan *idma64c = to_idma64_chan(chan); 502 + 503 + vchan_synchronize(&idma64c->vchan); 504 + } 505 + 499 506 static int idma64_alloc_chan_resources(struct dma_chan *chan) 500 507 { 501 508 struct idma64_chan *idma64c = to_idma64_chan(chan); ··· 590 583 idma64->dma.device_pause = idma64_pause; 591 584 idma64->dma.device_resume = idma64_resume; 592 585 idma64->dma.device_terminate_all = idma64_terminate_all; 586 + idma64->dma.device_synchronize = idma64_synchronize; 593 587 594 588 idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS; 595 589 idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS;
+378 -198
drivers/dma/imx-sdma.c
··· 24 24 #include <linux/spinlock.h> 25 25 #include <linux/device.h> 26 26 #include <linux/dma-mapping.h> 27 + #include <linux/dmapool.h> 27 28 #include <linux/firmware.h> 28 29 #include <linux/slab.h> 29 30 #include <linux/platform_device.h> ··· 42 41 #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> 43 42 44 43 #include "dmaengine.h" 44 + #include "virt-dma.h" 45 45 46 46 /* SDMA registers */ 47 47 #define SDMA_H_C0PTR 0x000 ··· 185 183 * Mode/Count of data node descriptors - IPCv2 186 184 */ 187 185 struct sdma_mode_count { 186 + #define SDMA_BD_MAX_CNT 0xffff 188 187 u32 count : 16; /* size of the buffer pointed by this BD */ 189 188 u32 status : 8; /* E,R,I,C,W,D status bits stored here */ 190 189 u32 command : 8; /* command mostly used for channel 0 */ ··· 203 200 /** 204 201 * struct sdma_channel_control - Channel control Block 205 202 * 206 - * @current_bd_ptr current buffer descriptor processed 207 - * @base_bd_ptr first element of buffer descriptor array 208 - * @unused padding. The SDMA engine expects an array of 128 byte 203 + * @current_bd_ptr: current buffer descriptor processed 204 + * @base_bd_ptr: first element of buffer descriptor array 205 + * @unused: padding. The SDMA engine expects an array of 128 byte 209 206 * control blocks 210 207 */ 211 208 struct sdma_channel_control { ··· 218 215 * struct sdma_state_registers - SDMA context for a channel 219 216 * 220 217 * @pc: program counter 218 + * @unused1: unused 221 219 * @t: test bit: status of arithmetic & test instruction 222 220 * @rpc: return program counter 221 + * @unused0: unused 223 222 * @sf: source fault while loading data 224 223 * @spc: loop start program counter 224 + * @unused2: unused 225 225 * @df: destination fault while storing data 226 226 * @epc: loop end program counter 227 227 * @lm: loop mode ··· 262 256 * @dsa: dedicated core source address register 263 257 * @ds: dedicated core status register 264 258 * @dd: dedicated core data register 259 + * @scratch0: 1st word of dedicated ram for context switch 260 + * @scratch1: 2nd word of dedicated ram for context switch 261 + * @scratch2: 3rd word of dedicated ram for context switch 262 + * @scratch3: 4th word of dedicated ram for context switch 263 + * @scratch4: 5th word of dedicated ram for context switch 264 + * @scratch5: 6th word of dedicated ram for context switch 265 + * @scratch6: 7th word of dedicated ram for context switch 266 + * @scratch7: 8th word of dedicated ram for context switch 265 267 */ 266 268 struct sdma_context_data { 267 269 struct sdma_state_registers channel_state; ··· 298 284 u32 scratch7; 299 285 } __attribute__ ((packed)); 300 286 301 - #define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor)) 302 287 303 288 struct sdma_engine; 304 289 305 290 /** 291 + * struct sdma_desc - descriptor structor for one transfer 292 + * @vd: descriptor for virt dma 293 + * @num_bd: number of descriptors currently handling 294 + * @bd_phys: physical address of bd 295 + * @buf_tail: ID of the buffer that was processed 296 + * @buf_ptail: ID of the previous buffer that was processed 297 + * @period_len: period length, used in cyclic. 298 + * @chn_real_count: the real count updated from bd->mode.count 299 + * @chn_count: the transfer count set 300 + * @sdmac: sdma_channel pointer 301 + * @bd: pointer of allocate bd 302 + */ 303 + struct sdma_desc { 304 + struct virt_dma_desc vd; 305 + unsigned int num_bd; 306 + dma_addr_t bd_phys; 307 + unsigned int buf_tail; 308 + unsigned int buf_ptail; 309 + unsigned int period_len; 310 + unsigned int chn_real_count; 311 + unsigned int chn_count; 312 + struct sdma_channel *sdmac; 313 + struct sdma_buffer_descriptor *bd; 314 + }; 315 + 316 + /** 306 317 * struct sdma_channel - housekeeping for a SDMA channel 307 318 * 308 - * @sdma pointer to the SDMA engine for this channel 309 - * @channel the channel number, matches dmaengine chan_id + 1 310 - * @direction transfer type. Needed for setting SDMA script 311 - * @peripheral_type Peripheral type. Needed for setting SDMA script 312 - * @event_id0 aka dma request line 313 - * @event_id1 for channels that use 2 events 314 - * @word_size peripheral access size 315 - * @buf_tail ID of the buffer that was processed 316 - * @buf_ptail ID of the previous buffer that was processed 317 - * @num_bd max NUM_BD. number of descriptors currently handling 319 + * @vc: virt_dma base structure 320 + * @desc: sdma description including vd and other special member 321 + * @sdma: pointer to the SDMA engine for this channel 322 + * @channel: the channel number, matches dmaengine chan_id + 1 323 + * @direction: transfer type. Needed for setting SDMA script 324 + * @peripheral_type: Peripheral type. Needed for setting SDMA script 325 + * @event_id0: aka dma request line 326 + * @event_id1: for channels that use 2 events 327 + * @word_size: peripheral access size 328 + * @pc_from_device: script address for those device_2_memory 329 + * @pc_to_device: script address for those memory_2_device 330 + * @device_to_device: script address for those device_2_device 331 + * @pc_to_pc: script address for those memory_2_memory 332 + * @flags: loop mode or not 333 + * @per_address: peripheral source or destination address in common case 334 + * destination address in p_2_p case 335 + * @per_address2: peripheral source address in p_2_p case 336 + * @event_mask: event mask used in p_2_p script 337 + * @watermark_level: value for gReg[7], some script will extend it from 338 + * basic watermark such as p_2_p 339 + * @shp_addr: value for gReg[6] 340 + * @per_addr: value for gReg[2] 341 + * @status: status of dma channel 342 + * @data: specific sdma interface structure 343 + * @bd_pool: dma_pool for bd 318 344 */ 319 345 struct sdma_channel { 346 + struct virt_dma_chan vc; 347 + struct sdma_desc *desc; 320 348 struct sdma_engine *sdma; 321 349 unsigned int channel; 322 350 enum dma_transfer_direction direction; ··· 366 310 unsigned int event_id0; 367 311 unsigned int event_id1; 368 312 enum dma_slave_buswidth word_size; 369 - unsigned int buf_tail; 370 - unsigned int buf_ptail; 371 - unsigned int num_bd; 372 - unsigned int period_len; 373 - struct sdma_buffer_descriptor *bd; 374 - dma_addr_t bd_phys; 375 313 unsigned int pc_from_device, pc_to_device; 376 314 unsigned int device_to_device; 315 + unsigned int pc_to_pc; 377 316 unsigned long flags; 378 317 dma_addr_t per_address, per_address2; 379 318 unsigned long event_mask[2]; 380 319 unsigned long watermark_level; 381 320 u32 shp_addr, per_addr; 382 - struct dma_chan chan; 383 - spinlock_t lock; 384 - struct dma_async_tx_descriptor desc; 385 321 enum dma_status status; 386 - unsigned int chn_count; 387 - unsigned int chn_real_count; 388 - struct tasklet_struct tasklet; 389 322 struct imx_dma_data data; 390 - bool enabled; 323 + struct dma_pool *bd_pool; 391 324 }; 392 325 393 326 #define IMX_DMA_SG_LOOP BIT(0) ··· 391 346 /** 392 347 * struct sdma_firmware_header - Layout of the firmware image 393 348 * 394 - * @magic "SDMA" 395 - * @version_major increased whenever layout of struct sdma_script_start_addrs 396 - * changes. 397 - * @version_minor firmware minor version (for binary compatible changes) 398 - * @script_addrs_start offset of struct sdma_script_start_addrs in this image 399 - * @num_script_addrs Number of script addresses in this image 400 - * @ram_code_start offset of SDMA ram image in this firmware image 401 - * @ram_code_size size of SDMA ram image 402 - * @script_addrs Stores the start address of the SDMA scripts 349 + * @magic: "SDMA" 350 + * @version_major: increased whenever layout of struct 351 + * sdma_script_start_addrs changes. 352 + * @version_minor: firmware minor version (for binary compatible changes) 353 + * @script_addrs_start: offset of struct sdma_script_start_addrs in this image 354 + * @num_script_addrs: Number of script addresses in this image 355 + * @ram_code_start: offset of SDMA ram image in this firmware image 356 + * @ram_code_size: size of SDMA ram image 357 + * @script_addrs: Stores the start address of the SDMA scripts 403 358 * (in SDMA memory space) 404 359 */ 405 360 struct sdma_firmware_header { ··· 436 391 u32 spba_start_addr; 437 392 u32 spba_end_addr; 438 393 unsigned int irq; 394 + dma_addr_t bd0_phys; 395 + struct sdma_buffer_descriptor *bd0; 439 396 }; 440 397 441 398 static struct sdma_driver_data sdma_imx31 = { ··· 637 590 638 591 static void sdma_enable_channel(struct sdma_engine *sdma, int channel) 639 592 { 640 - unsigned long flags; 641 - struct sdma_channel *sdmac = &sdma->channel[channel]; 642 - 643 593 writel(BIT(channel), sdma->regs + SDMA_H_START); 644 - 645 - spin_lock_irqsave(&sdmac->lock, flags); 646 - sdmac->enabled = true; 647 - spin_unlock_irqrestore(&sdmac->lock, flags); 648 594 } 649 595 650 596 /* ··· 665 625 static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, 666 626 u32 address) 667 627 { 668 - struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; 628 + struct sdma_buffer_descriptor *bd0 = sdma->bd0; 669 629 void *buf_virt; 670 630 dma_addr_t buf_phys; 671 631 int ret; ··· 721 681 writel_relaxed(val, sdma->regs + chnenbl); 722 682 } 723 683 684 + static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t) 685 + { 686 + return container_of(t, struct sdma_desc, vd.tx); 687 + } 688 + 689 + static void sdma_start_desc(struct sdma_channel *sdmac) 690 + { 691 + struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc); 692 + struct sdma_desc *desc; 693 + struct sdma_engine *sdma = sdmac->sdma; 694 + int channel = sdmac->channel; 695 + 696 + if (!vd) { 697 + sdmac->desc = NULL; 698 + return; 699 + } 700 + sdmac->desc = desc = to_sdma_desc(&vd->tx); 701 + /* 702 + * Do not delete the node in desc_issued list in cyclic mode, otherwise 703 + * the desc allocated will never be freed in vchan_dma_desc_free_list 704 + */ 705 + if (!(sdmac->flags & IMX_DMA_SG_LOOP)) 706 + list_del(&vd->node); 707 + 708 + sdma->channel_control[channel].base_bd_ptr = desc->bd_phys; 709 + sdma->channel_control[channel].current_bd_ptr = desc->bd_phys; 710 + sdma_enable_channel(sdma, sdmac->channel); 711 + } 712 + 724 713 static void sdma_update_channel_loop(struct sdma_channel *sdmac) 725 714 { 726 715 struct sdma_buffer_descriptor *bd; 727 716 int error = 0; 728 717 enum dma_status old_status = sdmac->status; 729 - unsigned long flags; 730 - 731 - spin_lock_irqsave(&sdmac->lock, flags); 732 - if (!sdmac->enabled) { 733 - spin_unlock_irqrestore(&sdmac->lock, flags); 734 - return; 735 - } 736 - spin_unlock_irqrestore(&sdmac->lock, flags); 737 718 738 719 /* 739 720 * loop mode. Iterate over descriptors, re-setup them and 740 721 * call callback function. 741 722 */ 742 - while (1) { 743 - bd = &sdmac->bd[sdmac->buf_tail]; 723 + while (sdmac->desc) { 724 + struct sdma_desc *desc = sdmac->desc; 725 + 726 + bd = &desc->bd[desc->buf_tail]; 744 727 745 728 if (bd->mode.status & BD_DONE) 746 729 break; ··· 779 716 * the number of bytes present in the current buffer descriptor. 780 717 */ 781 718 782 - sdmac->chn_real_count = bd->mode.count; 719 + desc->chn_real_count = bd->mode.count; 783 720 bd->mode.status |= BD_DONE; 784 - bd->mode.count = sdmac->period_len; 785 - sdmac->buf_ptail = sdmac->buf_tail; 786 - sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac->num_bd; 721 + bd->mode.count = desc->period_len; 722 + desc->buf_ptail = desc->buf_tail; 723 + desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd; 787 724 788 725 /* 789 726 * The callback is called from the interrupt context in order ··· 791 728 * SDMA transaction status by the time the client tasklet is 792 729 * executed. 793 730 */ 794 - 795 - dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL); 731 + spin_unlock(&sdmac->vc.lock); 732 + dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL); 733 + spin_lock(&sdmac->vc.lock); 796 734 797 735 if (error) 798 736 sdmac->status = old_status; 799 737 } 800 738 } 801 739 802 - static void mxc_sdma_handle_channel_normal(unsigned long data) 740 + static void mxc_sdma_handle_channel_normal(struct sdma_channel *data) 803 741 { 804 742 struct sdma_channel *sdmac = (struct sdma_channel *) data; 805 743 struct sdma_buffer_descriptor *bd; 806 744 int i, error = 0; 807 745 808 - sdmac->chn_real_count = 0; 746 + sdmac->desc->chn_real_count = 0; 809 747 /* 810 748 * non loop mode. Iterate over all descriptors, collect 811 749 * errors and call callback function 812 750 */ 813 - for (i = 0; i < sdmac->num_bd; i++) { 814 - bd = &sdmac->bd[i]; 751 + for (i = 0; i < sdmac->desc->num_bd; i++) { 752 + bd = &sdmac->desc->bd[i]; 815 753 816 754 if (bd->mode.status & (BD_DONE | BD_RROR)) 817 755 error = -EIO; 818 - sdmac->chn_real_count += bd->mode.count; 756 + sdmac->desc->chn_real_count += bd->mode.count; 819 757 } 820 758 821 759 if (error) 822 760 sdmac->status = DMA_ERROR; 823 761 else 824 762 sdmac->status = DMA_COMPLETE; 825 - 826 - dma_cookie_complete(&sdmac->desc); 827 - 828 - dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL); 829 763 } 830 764 831 765 static irqreturn_t sdma_int_handler(int irq, void *dev_id) ··· 838 778 while (stat) { 839 779 int channel = fls(stat) - 1; 840 780 struct sdma_channel *sdmac = &sdma->channel[channel]; 781 + struct sdma_desc *desc; 841 782 842 - if (sdmac->flags & IMX_DMA_SG_LOOP) 843 - sdma_update_channel_loop(sdmac); 844 - else 845 - tasklet_schedule(&sdmac->tasklet); 783 + spin_lock(&sdmac->vc.lock); 784 + desc = sdmac->desc; 785 + if (desc) { 786 + if (sdmac->flags & IMX_DMA_SG_LOOP) { 787 + sdma_update_channel_loop(sdmac); 788 + } else { 789 + mxc_sdma_handle_channel_normal(sdmac); 790 + vchan_cookie_complete(&desc->vd); 791 + sdma_start_desc(sdmac); 792 + } 793 + } 846 794 795 + spin_unlock(&sdmac->vc.lock); 847 796 __clear_bit(channel, &stat); 848 797 } 849 798 ··· 871 802 * These are needed once we start to support transfers between 872 803 * two peripherals or memory-to-memory transfers 873 804 */ 874 - int per_2_per = 0; 805 + int per_2_per = 0, emi_2_emi = 0; 875 806 876 807 sdmac->pc_from_device = 0; 877 808 sdmac->pc_to_device = 0; 878 809 sdmac->device_to_device = 0; 810 + sdmac->pc_to_pc = 0; 879 811 880 812 switch (peripheral_type) { 881 813 case IMX_DMATYPE_MEMORY: 814 + emi_2_emi = sdma->script_addrs->ap_2_ap_addr; 882 815 break; 883 816 case IMX_DMATYPE_DSP: 884 817 emi_2_per = sdma->script_addrs->bp_2_ap_addr; ··· 953 882 sdmac->pc_from_device = per_2_emi; 954 883 sdmac->pc_to_device = emi_2_per; 955 884 sdmac->device_to_device = per_2_per; 885 + sdmac->pc_to_pc = emi_2_emi; 956 886 } 957 887 958 888 static int sdma_load_context(struct sdma_channel *sdmac) ··· 962 890 int channel = sdmac->channel; 963 891 int load_address; 964 892 struct sdma_context_data *context = sdma->context; 965 - struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; 893 + struct sdma_buffer_descriptor *bd0 = sdma->bd0; 966 894 int ret; 967 895 unsigned long flags; 968 896 ··· 970 898 load_address = sdmac->pc_from_device; 971 899 else if (sdmac->direction == DMA_DEV_TO_DEV) 972 900 load_address = sdmac->device_to_device; 901 + else if (sdmac->direction == DMA_MEM_TO_MEM) 902 + load_address = sdmac->pc_to_pc; 973 903 else 974 904 load_address = sdmac->pc_to_device; 975 905 ··· 1013 939 1014 940 static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) 1015 941 { 1016 - return container_of(chan, struct sdma_channel, chan); 942 + return container_of(chan, struct sdma_channel, vc.chan); 1017 943 } 1018 944 1019 945 static int sdma_disable_channel(struct dma_chan *chan) ··· 1021 947 struct sdma_channel *sdmac = to_sdma_chan(chan); 1022 948 struct sdma_engine *sdma = sdmac->sdma; 1023 949 int channel = sdmac->channel; 1024 - unsigned long flags; 1025 950 1026 951 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP); 1027 952 sdmac->status = DMA_ERROR; 1028 - 1029 - spin_lock_irqsave(&sdmac->lock, flags); 1030 - sdmac->enabled = false; 1031 - spin_unlock_irqrestore(&sdmac->lock, flags); 1032 953 1033 954 return 0; 1034 955 } 1035 956 1036 957 static int sdma_disable_channel_with_delay(struct dma_chan *chan) 1037 958 { 959 + struct sdma_channel *sdmac = to_sdma_chan(chan); 960 + unsigned long flags; 961 + LIST_HEAD(head); 962 + 1038 963 sdma_disable_channel(chan); 964 + spin_lock_irqsave(&sdmac->vc.lock, flags); 965 + vchan_get_all_descriptors(&sdmac->vc, &head); 966 + sdmac->desc = NULL; 967 + spin_unlock_irqrestore(&sdmac->vc.lock, flags); 968 + vchan_dma_desc_free_list(&sdmac->vc, &head); 1039 969 1040 970 /* 1041 971 * According to NXP R&D team a delay of one BD SDMA cost time ··· 1168 1090 return 0; 1169 1091 } 1170 1092 1171 - static int sdma_request_channel(struct sdma_channel *sdmac) 1093 + static int sdma_request_channel0(struct sdma_engine *sdma) 1172 1094 { 1173 - struct sdma_engine *sdma = sdmac->sdma; 1174 - int channel = sdmac->channel; 1175 1095 int ret = -EBUSY; 1176 1096 1177 - sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, 1178 - GFP_KERNEL); 1179 - if (!sdmac->bd) { 1097 + sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys, 1098 + GFP_NOWAIT); 1099 + if (!sdma->bd0) { 1180 1100 ret = -ENOMEM; 1181 1101 goto out; 1182 1102 } 1183 1103 1184 - sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys; 1185 - sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 1104 + sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys; 1105 + sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys; 1186 1106 1187 - sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY); 1107 + sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY); 1188 1108 return 0; 1189 1109 out: 1190 1110 1191 1111 return ret; 1192 1112 } 1193 1113 1194 - static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) 1114 + 1115 + static int sdma_alloc_bd(struct sdma_desc *desc) 1195 1116 { 1196 - unsigned long flags; 1197 - struct sdma_channel *sdmac = to_sdma_chan(tx->chan); 1198 - dma_cookie_t cookie; 1117 + int ret = 0; 1199 1118 1200 - spin_lock_irqsave(&sdmac->lock, flags); 1119 + desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_NOWAIT, 1120 + &desc->bd_phys); 1121 + if (!desc->bd) { 1122 + ret = -ENOMEM; 1123 + goto out; 1124 + } 1125 + out: 1126 + return ret; 1127 + } 1201 1128 1202 - cookie = dma_cookie_assign(tx); 1129 + static void sdma_free_bd(struct sdma_desc *desc) 1130 + { 1131 + dma_pool_free(desc->sdmac->bd_pool, desc->bd, desc->bd_phys); 1132 + } 1203 1133 1204 - spin_unlock_irqrestore(&sdmac->lock, flags); 1134 + static void sdma_desc_free(struct virt_dma_desc *vd) 1135 + { 1136 + struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd); 1205 1137 1206 - return cookie; 1138 + sdma_free_bd(desc); 1139 + kfree(desc); 1207 1140 } 1208 1141 1209 1142 static int sdma_alloc_chan_resources(struct dma_chan *chan) 1210 1143 { 1211 1144 struct sdma_channel *sdmac = to_sdma_chan(chan); 1212 1145 struct imx_dma_data *data = chan->private; 1146 + struct imx_dma_data mem_data; 1213 1147 int prio, ret; 1214 1148 1215 - if (!data) 1216 - return -EINVAL; 1149 + /* 1150 + * MEMCPY may never setup chan->private by filter function such as 1151 + * dmatest, thus create 'struct imx_dma_data mem_data' for this case. 1152 + * Please note in any other slave case, you have to setup chan->private 1153 + * with 'struct imx_dma_data' in your own filter function if you want to 1154 + * request dma channel by dma_request_channel() rather than 1155 + * dma_request_slave_channel(). Othwise, 'MEMCPY in case?' will appear 1156 + * to warn you to correct your filter function. 1157 + */ 1158 + if (!data) { 1159 + dev_dbg(sdmac->sdma->dev, "MEMCPY in case?\n"); 1160 + mem_data.priority = 2; 1161 + mem_data.peripheral_type = IMX_DMATYPE_MEMORY; 1162 + mem_data.dma_request = 0; 1163 + mem_data.dma_request2 = 0; 1164 + data = &mem_data; 1165 + 1166 + sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY); 1167 + } 1217 1168 1218 1169 switch (data->priority) { 1219 1170 case DMA_PRIO_HIGH: ··· 1268 1161 if (ret) 1269 1162 goto disable_clk_ipg; 1270 1163 1271 - ret = sdma_request_channel(sdmac); 1272 - if (ret) 1273 - goto disable_clk_ahb; 1274 - 1275 1164 ret = sdma_set_channel_priority(sdmac, prio); 1276 1165 if (ret) 1277 1166 goto disable_clk_ahb; 1278 1167 1279 - dma_async_tx_descriptor_init(&sdmac->desc, chan); 1280 - sdmac->desc.tx_submit = sdma_tx_submit; 1281 - /* txd.flags will be overwritten in prep funcs */ 1282 - sdmac->desc.flags = DMA_CTRL_ACK; 1168 + sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev, 1169 + sizeof(struct sdma_buffer_descriptor), 1170 + 32, 0); 1283 1171 1284 1172 return 0; 1285 1173 ··· 1290 1188 struct sdma_channel *sdmac = to_sdma_chan(chan); 1291 1189 struct sdma_engine *sdma = sdmac->sdma; 1292 1190 1293 - sdma_disable_channel(chan); 1191 + sdma_disable_channel_with_delay(chan); 1294 1192 1295 1193 if (sdmac->event_id0) 1296 1194 sdma_event_disable(sdmac, sdmac->event_id0); ··· 1302 1200 1303 1201 sdma_set_channel_priority(sdmac, 0); 1304 1202 1305 - dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys); 1306 - 1307 1203 clk_disable(sdma->clk_ipg); 1308 1204 clk_disable(sdma->clk_ahb); 1205 + 1206 + dma_pool_destroy(sdmac->bd_pool); 1207 + sdmac->bd_pool = NULL; 1208 + } 1209 + 1210 + static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac, 1211 + enum dma_transfer_direction direction, u32 bds) 1212 + { 1213 + struct sdma_desc *desc; 1214 + 1215 + desc = kzalloc((sizeof(*desc)), GFP_NOWAIT); 1216 + if (!desc) 1217 + goto err_out; 1218 + 1219 + sdmac->status = DMA_IN_PROGRESS; 1220 + sdmac->direction = direction; 1221 + sdmac->flags = 0; 1222 + 1223 + desc->chn_count = 0; 1224 + desc->chn_real_count = 0; 1225 + desc->buf_tail = 0; 1226 + desc->buf_ptail = 0; 1227 + desc->sdmac = sdmac; 1228 + desc->num_bd = bds; 1229 + 1230 + if (sdma_alloc_bd(desc)) 1231 + goto err_desc_out; 1232 + 1233 + /* No slave_config called in MEMCPY case, so do here */ 1234 + if (direction == DMA_MEM_TO_MEM) 1235 + sdma_config_ownership(sdmac, false, true, false); 1236 + 1237 + if (sdma_load_context(sdmac)) 1238 + goto err_desc_out; 1239 + 1240 + return desc; 1241 + 1242 + err_desc_out: 1243 + kfree(desc); 1244 + err_out: 1245 + return NULL; 1246 + } 1247 + 1248 + static struct dma_async_tx_descriptor *sdma_prep_memcpy( 1249 + struct dma_chan *chan, dma_addr_t dma_dst, 1250 + dma_addr_t dma_src, size_t len, unsigned long flags) 1251 + { 1252 + struct sdma_channel *sdmac = to_sdma_chan(chan); 1253 + struct sdma_engine *sdma = sdmac->sdma; 1254 + int channel = sdmac->channel; 1255 + size_t count; 1256 + int i = 0, param; 1257 + struct sdma_buffer_descriptor *bd; 1258 + struct sdma_desc *desc; 1259 + 1260 + if (!chan || !len) 1261 + return NULL; 1262 + 1263 + dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n", 1264 + &dma_src, &dma_dst, len, channel); 1265 + 1266 + desc = sdma_transfer_init(sdmac, DMA_MEM_TO_MEM, 1267 + len / SDMA_BD_MAX_CNT + 1); 1268 + if (!desc) 1269 + return NULL; 1270 + 1271 + do { 1272 + count = min_t(size_t, len, SDMA_BD_MAX_CNT); 1273 + bd = &desc->bd[i]; 1274 + bd->buffer_addr = dma_src; 1275 + bd->ext_buffer_addr = dma_dst; 1276 + bd->mode.count = count; 1277 + desc->chn_count += count; 1278 + bd->mode.command = 0; 1279 + 1280 + dma_src += count; 1281 + dma_dst += count; 1282 + len -= count; 1283 + i++; 1284 + 1285 + param = BD_DONE | BD_EXTD | BD_CONT; 1286 + /* last bd */ 1287 + if (!len) { 1288 + param |= BD_INTR; 1289 + param |= BD_LAST; 1290 + param &= ~BD_CONT; 1291 + } 1292 + 1293 + dev_dbg(sdma->dev, "entry %d: count: %zd dma: 0x%x %s%s\n", 1294 + i, count, bd->buffer_addr, 1295 + param & BD_WRAP ? "wrap" : "", 1296 + param & BD_INTR ? " intr" : ""); 1297 + 1298 + bd->mode.status = param; 1299 + } while (len); 1300 + 1301 + return vchan_tx_prep(&sdmac->vc, &desc->vd, flags); 1309 1302 } 1310 1303 1311 1304 static struct dma_async_tx_descriptor *sdma_prep_slave_sg( ··· 1410 1213 { 1411 1214 struct sdma_channel *sdmac = to_sdma_chan(chan); 1412 1215 struct sdma_engine *sdma = sdmac->sdma; 1413 - int ret, i, count; 1216 + int i, count; 1414 1217 int channel = sdmac->channel; 1415 1218 struct scatterlist *sg; 1219 + struct sdma_desc *desc; 1416 1220 1417 - if (sdmac->status == DMA_IN_PROGRESS) 1418 - return NULL; 1419 - sdmac->status = DMA_IN_PROGRESS; 1420 - 1421 - sdmac->flags = 0; 1422 - 1423 - sdmac->buf_tail = 0; 1424 - sdmac->buf_ptail = 0; 1425 - sdmac->chn_real_count = 0; 1221 + desc = sdma_transfer_init(sdmac, direction, sg_len); 1222 + if (!desc) 1223 + goto err_out; 1426 1224 1427 1225 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", 1428 1226 sg_len, channel); 1429 1227 1430 - sdmac->direction = direction; 1431 - ret = sdma_load_context(sdmac); 1432 - if (ret) 1433 - goto err_out; 1434 - 1435 - if (sg_len > NUM_BD) { 1436 - dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n", 1437 - channel, sg_len, NUM_BD); 1438 - ret = -EINVAL; 1439 - goto err_out; 1440 - } 1441 - 1442 - sdmac->chn_count = 0; 1443 1228 for_each_sg(sgl, sg, sg_len, i) { 1444 - struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; 1229 + struct sdma_buffer_descriptor *bd = &desc->bd[i]; 1445 1230 int param; 1446 1231 1447 1232 bd->buffer_addr = sg->dma_address; 1448 1233 1449 1234 count = sg_dma_len(sg); 1450 1235 1451 - if (count > 0xffff) { 1236 + if (count > SDMA_BD_MAX_CNT) { 1452 1237 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", 1453 - channel, count, 0xffff); 1454 - ret = -EINVAL; 1455 - goto err_out; 1238 + channel, count, SDMA_BD_MAX_CNT); 1239 + goto err_bd_out; 1456 1240 } 1457 1241 1458 1242 bd->mode.count = count; 1459 - sdmac->chn_count += count; 1243 + desc->chn_count += count; 1460 1244 1461 - if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) { 1462 - ret = -EINVAL; 1463 - goto err_out; 1464 - } 1245 + if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) 1246 + goto err_bd_out; 1465 1247 1466 1248 switch (sdmac->word_size) { 1467 1249 case DMA_SLAVE_BUSWIDTH_4_BYTES: 1468 1250 bd->mode.command = 0; 1469 1251 if (count & 3 || sg->dma_address & 3) 1470 - return NULL; 1252 + goto err_bd_out; 1471 1253 break; 1472 1254 case DMA_SLAVE_BUSWIDTH_2_BYTES: 1473 1255 bd->mode.command = 2; 1474 1256 if (count & 1 || sg->dma_address & 1) 1475 - return NULL; 1257 + goto err_bd_out; 1476 1258 break; 1477 1259 case DMA_SLAVE_BUSWIDTH_1_BYTE: 1478 1260 bd->mode.command = 1; 1479 1261 break; 1480 1262 default: 1481 - return NULL; 1263 + goto err_bd_out; 1482 1264 } 1483 1265 1484 1266 param = BD_DONE | BD_EXTD | BD_CONT; ··· 1476 1300 bd->mode.status = param; 1477 1301 } 1478 1302 1479 - sdmac->num_bd = sg_len; 1480 - sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 1481 - 1482 - return &sdmac->desc; 1303 + return vchan_tx_prep(&sdmac->vc, &desc->vd, flags); 1304 + err_bd_out: 1305 + sdma_free_bd(desc); 1306 + kfree(desc); 1483 1307 err_out: 1484 1308 sdmac->status = DMA_ERROR; 1485 1309 return NULL; ··· 1494 1318 struct sdma_engine *sdma = sdmac->sdma; 1495 1319 int num_periods = buf_len / period_len; 1496 1320 int channel = sdmac->channel; 1497 - int ret, i = 0, buf = 0; 1321 + int i = 0, buf = 0; 1322 + struct sdma_desc *desc; 1498 1323 1499 1324 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); 1500 1325 1501 - if (sdmac->status == DMA_IN_PROGRESS) 1502 - return NULL; 1326 + desc = sdma_transfer_init(sdmac, direction, num_periods); 1327 + if (!desc) 1328 + goto err_out; 1503 1329 1504 - sdmac->status = DMA_IN_PROGRESS; 1505 - 1506 - sdmac->buf_tail = 0; 1507 - sdmac->buf_ptail = 0; 1508 - sdmac->chn_real_count = 0; 1509 - sdmac->period_len = period_len; 1330 + desc->period_len = period_len; 1510 1331 1511 1332 sdmac->flags |= IMX_DMA_SG_LOOP; 1512 - sdmac->direction = direction; 1513 - ret = sdma_load_context(sdmac); 1514 - if (ret) 1515 - goto err_out; 1516 1333 1517 - if (num_periods > NUM_BD) { 1518 - dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n", 1519 - channel, num_periods, NUM_BD); 1520 - goto err_out; 1521 - } 1522 - 1523 - if (period_len > 0xffff) { 1334 + if (period_len > SDMA_BD_MAX_CNT) { 1524 1335 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n", 1525 - channel, period_len, 0xffff); 1526 - goto err_out; 1336 + channel, period_len, SDMA_BD_MAX_CNT); 1337 + goto err_bd_out; 1527 1338 } 1528 1339 1529 1340 while (buf < buf_len) { 1530 - struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; 1341 + struct sdma_buffer_descriptor *bd = &desc->bd[i]; 1531 1342 int param; 1532 1343 1533 1344 bd->buffer_addr = dma_addr; ··· 1522 1359 bd->mode.count = period_len; 1523 1360 1524 1361 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) 1525 - goto err_out; 1362 + goto err_bd_out; 1526 1363 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) 1527 1364 bd->mode.command = 0; 1528 1365 else ··· 1545 1382 i++; 1546 1383 } 1547 1384 1548 - sdmac->num_bd = num_periods; 1549 - sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 1550 - 1551 - return &sdmac->desc; 1385 + return vchan_tx_prep(&sdmac->vc, &desc->vd, flags); 1386 + err_bd_out: 1387 + sdma_free_bd(desc); 1388 + kfree(desc); 1552 1389 err_out: 1553 1390 sdmac->status = DMA_ERROR; 1554 1391 return NULL; ··· 1587 1424 struct dma_tx_state *txstate) 1588 1425 { 1589 1426 struct sdma_channel *sdmac = to_sdma_chan(chan); 1427 + struct sdma_desc *desc; 1590 1428 u32 residue; 1429 + struct virt_dma_desc *vd; 1430 + enum dma_status ret; 1431 + unsigned long flags; 1591 1432 1592 - if (sdmac->flags & IMX_DMA_SG_LOOP) 1593 - residue = (sdmac->num_bd - sdmac->buf_ptail) * 1594 - sdmac->period_len - sdmac->chn_real_count; 1595 - else 1596 - residue = sdmac->chn_count - sdmac->chn_real_count; 1433 + ret = dma_cookie_status(chan, cookie, txstate); 1434 + if (ret == DMA_COMPLETE || !txstate) 1435 + return ret; 1436 + 1437 + spin_lock_irqsave(&sdmac->vc.lock, flags); 1438 + vd = vchan_find_desc(&sdmac->vc, cookie); 1439 + if (vd) { 1440 + desc = to_sdma_desc(&vd->tx); 1441 + if (sdmac->flags & IMX_DMA_SG_LOOP) 1442 + residue = (desc->num_bd - desc->buf_ptail) * 1443 + desc->period_len - desc->chn_real_count; 1444 + else 1445 + residue = desc->chn_count - desc->chn_real_count; 1446 + } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) { 1447 + residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count; 1448 + } else { 1449 + residue = 0; 1450 + } 1451 + spin_unlock_irqrestore(&sdmac->vc.lock, flags); 1597 1452 1598 1453 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 1599 1454 residue); ··· 1622 1441 static void sdma_issue_pending(struct dma_chan *chan) 1623 1442 { 1624 1443 struct sdma_channel *sdmac = to_sdma_chan(chan); 1625 - struct sdma_engine *sdma = sdmac->sdma; 1444 + unsigned long flags; 1626 1445 1627 - if (sdmac->status == DMA_IN_PROGRESS) 1628 - sdma_enable_channel(sdma, sdmac->channel); 1446 + spin_lock_irqsave(&sdmac->vc.lock, flags); 1447 + if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc) 1448 + sdma_start_desc(sdmac); 1449 + spin_unlock_irqrestore(&sdmac->vc.lock, flags); 1629 1450 } 1630 1451 1631 1452 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 ··· 1833 1650 for (i = 0; i < MAX_DMA_CHANNELS; i++) 1834 1651 writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4); 1835 1652 1836 - ret = sdma_request_channel(&sdma->channel[0]); 1653 + ret = sdma_request_channel0(sdma); 1837 1654 if (ret) 1838 1655 goto err_dma_alloc; 1839 1656 ··· 1988 1805 1989 1806 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); 1990 1807 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); 1808 + dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask); 1991 1809 1992 1810 INIT_LIST_HEAD(&sdma->dma_device.channels); 1993 1811 /* Initialize channel parameters */ ··· 1996 1812 struct sdma_channel *sdmac = &sdma->channel[i]; 1997 1813 1998 1814 sdmac->sdma = sdma; 1999 - spin_lock_init(&sdmac->lock); 2000 1815 2001 - sdmac->chan.device = &sdma->dma_device; 2002 - dma_cookie_init(&sdmac->chan); 2003 1816 sdmac->channel = i; 2004 - 2005 - tasklet_init(&sdmac->tasklet, mxc_sdma_handle_channel_normal, 2006 - (unsigned long) sdmac); 1817 + sdmac->vc.desc_free = sdma_desc_free; 2007 1818 /* 2008 1819 * Add the channel to the DMAC list. Do not add channel 0 though 2009 1820 * because we need it internally in the SDMA driver. This also means 2010 1821 * that channel 0 in dmaengine counting matches sdma channel 1. 2011 1822 */ 2012 1823 if (i) 2013 - list_add_tail(&sdmac->chan.device_node, 2014 - &sdma->dma_device.channels); 1824 + vchan_init(&sdmac->vc, &sdma->dma_device); 2015 1825 } 2016 1826 2017 1827 ret = sdma_init(sdma); ··· 2055 1877 sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS; 2056 1878 sdma->dma_device.directions = SDMA_DMA_DIRECTIONS; 2057 1879 sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 1880 + sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy; 2058 1881 sdma->dma_device.device_issue_pending = sdma_issue_pending; 2059 1882 sdma->dma_device.dev->dma_parms = &sdma->dma_parms; 2060 - dma_set_max_seg_size(sdma->dma_device.dev, 65535); 1883 + dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT); 2061 1884 2062 1885 platform_set_drvdata(pdev, sdma); 2063 1886 ··· 2111 1932 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 2112 1933 struct sdma_channel *sdmac = &sdma->channel[i]; 2113 1934 2114 - tasklet_kill(&sdmac->tasklet); 1935 + tasklet_kill(&sdmac->vc.task); 1936 + sdma_free_chan_resources(&sdmac->vc.chan); 2115 1937 } 2116 1938 2117 1939 platform_set_drvdata(pdev, NULL);
+6
drivers/dma/ioat/dma.c
··· 688 688 { 689 689 u64 phys_complete; 690 690 691 + /* set the completion address register again */ 692 + writel(lower_32_bits(ioat_chan->completion_dma), 693 + ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); 694 + writel(upper_32_bits(ioat_chan->completion_dma), 695 + ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); 696 + 691 697 ioat_quiesce(ioat_chan, 0); 692 698 if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) 693 699 __cleanup(ioat_chan, phys_complete);
+1 -7
drivers/dma/mic_x100_dma.c
··· 470 470 mic_dma_chan_mask_intr(ch); 471 471 } 472 472 473 - static void mic_dma_unregister_dma_device(struct mic_dma_device *mic_dma_dev) 474 - { 475 - dma_async_device_unregister(&mic_dma_dev->dma_dev); 476 - } 477 - 478 473 static int mic_dma_setup_irq(struct mic_dma_chan *ch) 479 474 { 480 475 ch->cookie = ··· 625 630 list_add_tail(&mic_dma_dev->mic_ch[i].api_ch.device_node, 626 631 &mic_dma_dev->dma_dev.channels); 627 632 } 628 - return dma_async_device_register(&mic_dma_dev->dma_dev); 633 + return dmaenginem_async_device_register(&mic_dma_dev->dma_dev); 629 634 } 630 635 631 636 /* ··· 673 678 674 679 static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev) 675 680 { 676 - mic_dma_unregister_dma_device(mic_dma_dev); 677 681 mic_dma_uninit(mic_dma_dev); 678 682 kfree(mic_dma_dev); 679 683 }
+10 -6
drivers/dma/mv_xor_v2.c
··· 174 174 int desc_size; 175 175 unsigned int npendings; 176 176 unsigned int hw_queue_idx; 177 + struct msi_desc *msi_desc; 177 178 }; 178 179 179 180 /** ··· 589 588 */ 590 589 dma_cookie_complete(&next_pending_sw_desc->async_tx); 591 590 592 - if (next_pending_sw_desc->async_tx.callback) 593 - next_pending_sw_desc->async_tx.callback( 594 - next_pending_sw_desc->async_tx.callback_param); 595 - 596 591 dma_descriptor_unmap(&next_pending_sw_desc->async_tx); 592 + dmaengine_desc_get_callback_invoke( 593 + &next_pending_sw_desc->async_tx, NULL); 597 594 } 598 595 599 596 dma_run_dependencies(&next_pending_sw_desc->async_tx); ··· 642 643 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF); 643 644 644 645 /* write the DESQ address to the DMA enngine*/ 645 - writel(xor_dev->hw_desq & 0xFFFFFFFF, 646 + writel(lower_32_bits(xor_dev->hw_desq), 646 647 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF); 647 - writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32, 648 + writel(upper_32_bits(xor_dev->hw_desq), 648 649 xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); 649 650 650 651 /* ··· 779 780 msi_desc = first_msi_entry(&pdev->dev); 780 781 if (!msi_desc) 781 782 goto free_msi_irqs; 783 + xor_dev->msi_desc = msi_desc; 782 784 783 785 ret = devm_request_irq(&pdev->dev, msi_desc->irq, 784 786 mv_xor_v2_interrupt_handler, 0, ··· 897 897 xor_dev->desc_size * MV_XOR_V2_DESC_NUM, 898 898 xor_dev->hw_desq_virt, xor_dev->hw_desq); 899 899 900 + devm_free_irq(&pdev->dev, xor_dev->msi_desc->irq, xor_dev); 901 + 900 902 platform_msi_domain_free_irqs(&pdev->dev); 903 + 904 + tasklet_kill(&xor_dev->irq_tasklet); 901 905 902 906 clk_disable_unprepare(xor_dev->clk); 903 907
+1
drivers/dma/nbpfaxi.c
··· 479 479 480 480 default: 481 481 pr_warn("%s(): invalid bus width %u\n", __func__, width); 482 + /* fall through */ 482 483 case DMA_SLAVE_BUSWIDTH_1_BYTE: 483 484 size = burst; 484 485 }
+971
drivers/dma/owl-dma.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + // 3 + // Actions Semi Owl SoCs DMA driver 4 + // 5 + // Copyright (c) 2014 Actions Semi Inc. 6 + // Author: David Liu <liuwei@actions-semi.com> 7 + // 8 + // Copyright (c) 2018 Linaro Ltd. 9 + // Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> 10 + 11 + #include <linux/bitops.h> 12 + #include <linux/clk.h> 13 + #include <linux/delay.h> 14 + #include <linux/dmaengine.h> 15 + #include <linux/dma-mapping.h> 16 + #include <linux/dmapool.h> 17 + #include <linux/err.h> 18 + #include <linux/init.h> 19 + #include <linux/interrupt.h> 20 + #include <linux/io.h> 21 + #include <linux/mm.h> 22 + #include <linux/module.h> 23 + #include <linux/of_device.h> 24 + #include <linux/slab.h> 25 + #include "virt-dma.h" 26 + 27 + #define OWL_DMA_FRAME_MAX_LENGTH 0xfffff 28 + 29 + /* Global DMA Controller Registers */ 30 + #define OWL_DMA_IRQ_PD0 0x00 31 + #define OWL_DMA_IRQ_PD1 0x04 32 + #define OWL_DMA_IRQ_PD2 0x08 33 + #define OWL_DMA_IRQ_PD3 0x0C 34 + #define OWL_DMA_IRQ_EN0 0x10 35 + #define OWL_DMA_IRQ_EN1 0x14 36 + #define OWL_DMA_IRQ_EN2 0x18 37 + #define OWL_DMA_IRQ_EN3 0x1C 38 + #define OWL_DMA_SECURE_ACCESS_CTL 0x20 39 + #define OWL_DMA_NIC_QOS 0x24 40 + #define OWL_DMA_DBGSEL 0x28 41 + #define OWL_DMA_IDLE_STAT 0x2C 42 + 43 + /* Channel Registers */ 44 + #define OWL_DMA_CHAN_BASE(i) (0x100 + (i) * 0x100) 45 + #define OWL_DMAX_MODE 0x00 46 + #define OWL_DMAX_SOURCE 0x04 47 + #define OWL_DMAX_DESTINATION 0x08 48 + #define OWL_DMAX_FRAME_LEN 0x0C 49 + #define OWL_DMAX_FRAME_CNT 0x10 50 + #define OWL_DMAX_REMAIN_FRAME_CNT 0x14 51 + #define OWL_DMAX_REMAIN_CNT 0x18 52 + #define OWL_DMAX_SOURCE_STRIDE 0x1C 53 + #define OWL_DMAX_DESTINATION_STRIDE 0x20 54 + #define OWL_DMAX_START 0x24 55 + #define OWL_DMAX_PAUSE 0x28 56 + #define OWL_DMAX_CHAINED_CTL 0x2C 57 + #define OWL_DMAX_CONSTANT 0x30 58 + #define OWL_DMAX_LINKLIST_CTL 0x34 59 + #define OWL_DMAX_NEXT_DESCRIPTOR 0x38 60 + #define OWL_DMAX_CURRENT_DESCRIPTOR_NUM 0x3C 61 + #define OWL_DMAX_INT_CTL 0x40 62 + #define OWL_DMAX_INT_STATUS 0x44 63 + #define OWL_DMAX_CURRENT_SOURCE_POINTER 0x48 64 + #define OWL_DMAX_CURRENT_DESTINATION_POINTER 0x4C 65 + 66 + /* OWL_DMAX_MODE Bits */ 67 + #define OWL_DMA_MODE_TS(x) (((x) & GENMASK(5, 0)) << 0) 68 + #define OWL_DMA_MODE_ST(x) (((x) & GENMASK(1, 0)) << 8) 69 + #define OWL_DMA_MODE_ST_DEV OWL_DMA_MODE_ST(0) 70 + #define OWL_DMA_MODE_ST_DCU OWL_DMA_MODE_ST(2) 71 + #define OWL_DMA_MODE_ST_SRAM OWL_DMA_MODE_ST(3) 72 + #define OWL_DMA_MODE_DT(x) (((x) & GENMASK(1, 0)) << 10) 73 + #define OWL_DMA_MODE_DT_DEV OWL_DMA_MODE_DT(0) 74 + #define OWL_DMA_MODE_DT_DCU OWL_DMA_MODE_DT(2) 75 + #define OWL_DMA_MODE_DT_SRAM OWL_DMA_MODE_DT(3) 76 + #define OWL_DMA_MODE_SAM(x) (((x) & GENMASK(1, 0)) << 16) 77 + #define OWL_DMA_MODE_SAM_CONST OWL_DMA_MODE_SAM(0) 78 + #define OWL_DMA_MODE_SAM_INC OWL_DMA_MODE_SAM(1) 79 + #define OWL_DMA_MODE_SAM_STRIDE OWL_DMA_MODE_SAM(2) 80 + #define OWL_DMA_MODE_DAM(x) (((x) & GENMASK(1, 0)) << 18) 81 + #define OWL_DMA_MODE_DAM_CONST OWL_DMA_MODE_DAM(0) 82 + #define OWL_DMA_MODE_DAM_INC OWL_DMA_MODE_DAM(1) 83 + #define OWL_DMA_MODE_DAM_STRIDE OWL_DMA_MODE_DAM(2) 84 + #define OWL_DMA_MODE_PW(x) (((x) & GENMASK(2, 0)) << 20) 85 + #define OWL_DMA_MODE_CB BIT(23) 86 + #define OWL_DMA_MODE_NDDBW(x) (((x) & 0x1) << 28) 87 + #define OWL_DMA_MODE_NDDBW_32BIT OWL_DMA_MODE_NDDBW(0) 88 + #define OWL_DMA_MODE_NDDBW_8BIT OWL_DMA_MODE_NDDBW(1) 89 + #define OWL_DMA_MODE_CFE BIT(29) 90 + #define OWL_DMA_MODE_LME BIT(30) 91 + #define OWL_DMA_MODE_CME BIT(31) 92 + 93 + /* OWL_DMAX_LINKLIST_CTL Bits */ 94 + #define OWL_DMA_LLC_SAV(x) (((x) & GENMASK(1, 0)) << 8) 95 + #define OWL_DMA_LLC_SAV_INC OWL_DMA_LLC_SAV(0) 96 + #define OWL_DMA_LLC_SAV_LOAD_NEXT OWL_DMA_LLC_SAV(1) 97 + #define OWL_DMA_LLC_SAV_LOAD_PREV OWL_DMA_LLC_SAV(2) 98 + #define OWL_DMA_LLC_DAV(x) (((x) & GENMASK(1, 0)) << 10) 99 + #define OWL_DMA_LLC_DAV_INC OWL_DMA_LLC_DAV(0) 100 + #define OWL_DMA_LLC_DAV_LOAD_NEXT OWL_DMA_LLC_DAV(1) 101 + #define OWL_DMA_LLC_DAV_LOAD_PREV OWL_DMA_LLC_DAV(2) 102 + #define OWL_DMA_LLC_SUSPEND BIT(16) 103 + 104 + /* OWL_DMAX_INT_CTL Bits */ 105 + #define OWL_DMA_INTCTL_BLOCK BIT(0) 106 + #define OWL_DMA_INTCTL_SUPER_BLOCK BIT(1) 107 + #define OWL_DMA_INTCTL_FRAME BIT(2) 108 + #define OWL_DMA_INTCTL_HALF_FRAME BIT(3) 109 + #define OWL_DMA_INTCTL_LAST_FRAME BIT(4) 110 + 111 + /* OWL_DMAX_INT_STATUS Bits */ 112 + #define OWL_DMA_INTSTAT_BLOCK BIT(0) 113 + #define OWL_DMA_INTSTAT_SUPER_BLOCK BIT(1) 114 + #define OWL_DMA_INTSTAT_FRAME BIT(2) 115 + #define OWL_DMA_INTSTAT_HALF_FRAME BIT(3) 116 + #define OWL_DMA_INTSTAT_LAST_FRAME BIT(4) 117 + 118 + /* Pack shift and newshift in a single word */ 119 + #define BIT_FIELD(val, width, shift, newshift) \ 120 + ((((val) >> (shift)) & ((BIT(width)) - 1)) << (newshift)) 121 + 122 + /** 123 + * struct owl_dma_lli_hw - Hardware link list for dma transfer 124 + * @next_lli: physical address of the next link list 125 + * @saddr: source physical address 126 + * @daddr: destination physical address 127 + * @flen: frame length 128 + * @fcnt: frame count 129 + * @src_stride: source stride 130 + * @dst_stride: destination stride 131 + * @ctrla: dma_mode and linklist ctrl config 132 + * @ctrlb: interrupt config 133 + * @const_num: data for constant fill 134 + */ 135 + struct owl_dma_lli_hw { 136 + u32 next_lli; 137 + u32 saddr; 138 + u32 daddr; 139 + u32 flen:20; 140 + u32 fcnt:12; 141 + u32 src_stride; 142 + u32 dst_stride; 143 + u32 ctrla; 144 + u32 ctrlb; 145 + u32 const_num; 146 + }; 147 + 148 + /** 149 + * struct owl_dma_lli - Link list for dma transfer 150 + * @hw: hardware link list 151 + * @phys: physical address of hardware link list 152 + * @node: node for txd's lli_list 153 + */ 154 + struct owl_dma_lli { 155 + struct owl_dma_lli_hw hw; 156 + dma_addr_t phys; 157 + struct list_head node; 158 + }; 159 + 160 + /** 161 + * struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor 162 + * @vd: virtual DMA descriptor 163 + * @lli_list: link list of lli nodes 164 + */ 165 + struct owl_dma_txd { 166 + struct virt_dma_desc vd; 167 + struct list_head lli_list; 168 + }; 169 + 170 + /** 171 + * struct owl_dma_pchan - Holder for the physical channels 172 + * @id: physical index to this channel 173 + * @base: virtual memory base for the dma channel 174 + * @vchan: the virtual channel currently being served by this physical channel 175 + * @lock: a lock to use when altering an instance of this struct 176 + */ 177 + struct owl_dma_pchan { 178 + u32 id; 179 + void __iomem *base; 180 + struct owl_dma_vchan *vchan; 181 + spinlock_t lock; 182 + }; 183 + 184 + /** 185 + * struct owl_dma_pchan - Wrapper for DMA ENGINE channel 186 + * @vc: wrappped virtual channel 187 + * @pchan: the physical channel utilized by this channel 188 + * @txd: active transaction on this channel 189 + */ 190 + struct owl_dma_vchan { 191 + struct virt_dma_chan vc; 192 + struct owl_dma_pchan *pchan; 193 + struct owl_dma_txd *txd; 194 + }; 195 + 196 + /** 197 + * struct owl_dma - Holder for the Owl DMA controller 198 + * @dma: dma engine for this instance 199 + * @base: virtual memory base for the DMA controller 200 + * @clk: clock for the DMA controller 201 + * @lock: a lock to use when change DMA controller global register 202 + * @lli_pool: a pool for the LLI descriptors 203 + * @nr_pchans: the number of physical channels 204 + * @pchans: array of data for the physical channels 205 + * @nr_vchans: the number of physical channels 206 + * @vchans: array of data for the physical channels 207 + */ 208 + struct owl_dma { 209 + struct dma_device dma; 210 + void __iomem *base; 211 + struct clk *clk; 212 + spinlock_t lock; 213 + struct dma_pool *lli_pool; 214 + int irq; 215 + 216 + unsigned int nr_pchans; 217 + struct owl_dma_pchan *pchans; 218 + 219 + unsigned int nr_vchans; 220 + struct owl_dma_vchan *vchans; 221 + }; 222 + 223 + static void pchan_update(struct owl_dma_pchan *pchan, u32 reg, 224 + u32 val, bool state) 225 + { 226 + u32 regval; 227 + 228 + regval = readl(pchan->base + reg); 229 + 230 + if (state) 231 + regval |= val; 232 + else 233 + regval &= ~val; 234 + 235 + writel(val, pchan->base + reg); 236 + } 237 + 238 + static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data) 239 + { 240 + writel(data, pchan->base + reg); 241 + } 242 + 243 + static u32 pchan_readl(struct owl_dma_pchan *pchan, u32 reg) 244 + { 245 + return readl(pchan->base + reg); 246 + } 247 + 248 + static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state) 249 + { 250 + u32 regval; 251 + 252 + regval = readl(od->base + reg); 253 + 254 + if (state) 255 + regval |= val; 256 + else 257 + regval &= ~val; 258 + 259 + writel(val, od->base + reg); 260 + } 261 + 262 + static void dma_writel(struct owl_dma *od, u32 reg, u32 data) 263 + { 264 + writel(data, od->base + reg); 265 + } 266 + 267 + static u32 dma_readl(struct owl_dma *od, u32 reg) 268 + { 269 + return readl(od->base + reg); 270 + } 271 + 272 + static inline struct owl_dma *to_owl_dma(struct dma_device *dd) 273 + { 274 + return container_of(dd, struct owl_dma, dma); 275 + } 276 + 277 + static struct device *chan2dev(struct dma_chan *chan) 278 + { 279 + return &chan->dev->device; 280 + } 281 + 282 + static inline struct owl_dma_vchan *to_owl_vchan(struct dma_chan *chan) 283 + { 284 + return container_of(chan, struct owl_dma_vchan, vc.chan); 285 + } 286 + 287 + static inline struct owl_dma_txd *to_owl_txd(struct dma_async_tx_descriptor *tx) 288 + { 289 + return container_of(tx, struct owl_dma_txd, vd.tx); 290 + } 291 + 292 + static inline u32 llc_hw_ctrla(u32 mode, u32 llc_ctl) 293 + { 294 + u32 ctl; 295 + 296 + ctl = BIT_FIELD(mode, 4, 28, 28) | 297 + BIT_FIELD(mode, 8, 16, 20) | 298 + BIT_FIELD(mode, 4, 8, 16) | 299 + BIT_FIELD(mode, 6, 0, 10) | 300 + BIT_FIELD(llc_ctl, 2, 10, 8) | 301 + BIT_FIELD(llc_ctl, 2, 8, 6); 302 + 303 + return ctl; 304 + } 305 + 306 + static inline u32 llc_hw_ctrlb(u32 int_ctl) 307 + { 308 + u32 ctl; 309 + 310 + ctl = BIT_FIELD(int_ctl, 7, 0, 18); 311 + 312 + return ctl; 313 + } 314 + 315 + static void owl_dma_free_lli(struct owl_dma *od, 316 + struct owl_dma_lli *lli) 317 + { 318 + list_del(&lli->node); 319 + dma_pool_free(od->lli_pool, lli, lli->phys); 320 + } 321 + 322 + static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od) 323 + { 324 + struct owl_dma_lli *lli; 325 + dma_addr_t phys; 326 + 327 + lli = dma_pool_alloc(od->lli_pool, GFP_NOWAIT, &phys); 328 + if (!lli) 329 + return NULL; 330 + 331 + INIT_LIST_HEAD(&lli->node); 332 + lli->phys = phys; 333 + 334 + return lli; 335 + } 336 + 337 + static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd, 338 + struct owl_dma_lli *prev, 339 + struct owl_dma_lli *next) 340 + { 341 + list_add_tail(&next->node, &txd->lli_list); 342 + 343 + if (prev) { 344 + prev->hw.next_lli = next->phys; 345 + prev->hw.ctrla |= llc_hw_ctrla(OWL_DMA_MODE_LME, 0); 346 + } 347 + 348 + return next; 349 + } 350 + 351 + static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan, 352 + struct owl_dma_lli *lli, 353 + dma_addr_t src, dma_addr_t dst, 354 + u32 len, enum dma_transfer_direction dir) 355 + { 356 + struct owl_dma_lli_hw *hw = &lli->hw; 357 + u32 mode; 358 + 359 + mode = OWL_DMA_MODE_PW(0); 360 + 361 + switch (dir) { 362 + case DMA_MEM_TO_MEM: 363 + mode |= OWL_DMA_MODE_TS(0) | OWL_DMA_MODE_ST_DCU | 364 + OWL_DMA_MODE_DT_DCU | OWL_DMA_MODE_SAM_INC | 365 + OWL_DMA_MODE_DAM_INC; 366 + 367 + break; 368 + default: 369 + return -EINVAL; 370 + } 371 + 372 + hw->next_lli = 0; /* One link list by default */ 373 + hw->saddr = src; 374 + hw->daddr = dst; 375 + 376 + hw->fcnt = 1; /* Frame count fixed as 1 */ 377 + hw->flen = len; /* Max frame length is 1MB */ 378 + hw->src_stride = 0; 379 + hw->dst_stride = 0; 380 + hw->ctrla = llc_hw_ctrla(mode, 381 + OWL_DMA_LLC_SAV_LOAD_NEXT | 382 + OWL_DMA_LLC_DAV_LOAD_NEXT); 383 + 384 + hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK); 385 + 386 + return 0; 387 + } 388 + 389 + static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od, 390 + struct owl_dma_vchan *vchan) 391 + { 392 + struct owl_dma_pchan *pchan = NULL; 393 + unsigned long flags; 394 + int i; 395 + 396 + for (i = 0; i < od->nr_pchans; i++) { 397 + pchan = &od->pchans[i]; 398 + 399 + spin_lock_irqsave(&pchan->lock, flags); 400 + if (!pchan->vchan) { 401 + pchan->vchan = vchan; 402 + spin_unlock_irqrestore(&pchan->lock, flags); 403 + break; 404 + } 405 + 406 + spin_unlock_irqrestore(&pchan->lock, flags); 407 + } 408 + 409 + return pchan; 410 + } 411 + 412 + static int owl_dma_pchan_busy(struct owl_dma *od, struct owl_dma_pchan *pchan) 413 + { 414 + unsigned int val; 415 + 416 + val = dma_readl(od, OWL_DMA_IDLE_STAT); 417 + 418 + return !(val & (1 << pchan->id)); 419 + } 420 + 421 + static void owl_dma_terminate_pchan(struct owl_dma *od, 422 + struct owl_dma_pchan *pchan) 423 + { 424 + unsigned long flags; 425 + u32 irq_pd; 426 + 427 + pchan_writel(pchan, OWL_DMAX_START, 0); 428 + pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); 429 + 430 + spin_lock_irqsave(&od->lock, flags); 431 + dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), false); 432 + 433 + irq_pd = dma_readl(od, OWL_DMA_IRQ_PD0); 434 + if (irq_pd & (1 << pchan->id)) { 435 + dev_warn(od->dma.dev, 436 + "terminating pchan %d that still has pending irq\n", 437 + pchan->id); 438 + dma_writel(od, OWL_DMA_IRQ_PD0, (1 << pchan->id)); 439 + } 440 + 441 + pchan->vchan = NULL; 442 + 443 + spin_unlock_irqrestore(&od->lock, flags); 444 + } 445 + 446 + static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan) 447 + { 448 + struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); 449 + struct virt_dma_desc *vd = vchan_next_desc(&vchan->vc); 450 + struct owl_dma_pchan *pchan = vchan->pchan; 451 + struct owl_dma_txd *txd = to_owl_txd(&vd->tx); 452 + struct owl_dma_lli *lli; 453 + unsigned long flags; 454 + u32 int_ctl; 455 + 456 + list_del(&vd->node); 457 + 458 + vchan->txd = txd; 459 + 460 + /* Wait for channel inactive */ 461 + while (owl_dma_pchan_busy(od, pchan)) 462 + cpu_relax(); 463 + 464 + lli = list_first_entry(&txd->lli_list, 465 + struct owl_dma_lli, node); 466 + 467 + int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK; 468 + 469 + pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME); 470 + pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL, 471 + OWL_DMA_LLC_SAV_LOAD_NEXT | OWL_DMA_LLC_DAV_LOAD_NEXT); 472 + pchan_writel(pchan, OWL_DMAX_NEXT_DESCRIPTOR, lli->phys); 473 + pchan_writel(pchan, OWL_DMAX_INT_CTL, int_ctl); 474 + 475 + /* Clear IRQ status for this pchan */ 476 + pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); 477 + 478 + spin_lock_irqsave(&od->lock, flags); 479 + 480 + dma_update(od, OWL_DMA_IRQ_EN0, (1 << pchan->id), true); 481 + 482 + spin_unlock_irqrestore(&od->lock, flags); 483 + 484 + dev_dbg(chan2dev(&vchan->vc.chan), "starting pchan %d\n", pchan->id); 485 + 486 + /* Start DMA transfer for this pchan */ 487 + pchan_writel(pchan, OWL_DMAX_START, 0x1); 488 + 489 + return 0; 490 + } 491 + 492 + static void owl_dma_phy_free(struct owl_dma *od, struct owl_dma_vchan *vchan) 493 + { 494 + /* Ensure that the physical channel is stopped */ 495 + owl_dma_terminate_pchan(od, vchan->pchan); 496 + 497 + vchan->pchan = NULL; 498 + } 499 + 500 + static irqreturn_t owl_dma_interrupt(int irq, void *dev_id) 501 + { 502 + struct owl_dma *od = dev_id; 503 + struct owl_dma_vchan *vchan; 504 + struct owl_dma_pchan *pchan; 505 + unsigned long pending; 506 + int i; 507 + unsigned int global_irq_pending, chan_irq_pending; 508 + 509 + spin_lock(&od->lock); 510 + 511 + pending = dma_readl(od, OWL_DMA_IRQ_PD0); 512 + 513 + /* Clear IRQ status for each pchan */ 514 + for_each_set_bit(i, &pending, od->nr_pchans) { 515 + pchan = &od->pchans[i]; 516 + pchan_update(pchan, OWL_DMAX_INT_STATUS, 0xff, false); 517 + } 518 + 519 + /* Clear pending IRQ */ 520 + dma_writel(od, OWL_DMA_IRQ_PD0, pending); 521 + 522 + /* Check missed pending IRQ */ 523 + for (i = 0; i < od->nr_pchans; i++) { 524 + pchan = &od->pchans[i]; 525 + chan_irq_pending = pchan_readl(pchan, OWL_DMAX_INT_CTL) & 526 + pchan_readl(pchan, OWL_DMAX_INT_STATUS); 527 + 528 + /* Dummy read to ensure OWL_DMA_IRQ_PD0 value is updated */ 529 + dma_readl(od, OWL_DMA_IRQ_PD0); 530 + 531 + global_irq_pending = dma_readl(od, OWL_DMA_IRQ_PD0); 532 + 533 + if (chan_irq_pending && !(global_irq_pending & BIT(i))) { 534 + dev_dbg(od->dma.dev, 535 + "global and channel IRQ pending match err\n"); 536 + 537 + /* Clear IRQ status for this pchan */ 538 + pchan_update(pchan, OWL_DMAX_INT_STATUS, 539 + 0xff, false); 540 + 541 + /* Update global IRQ pending */ 542 + pending |= BIT(i); 543 + } 544 + } 545 + 546 + spin_unlock(&od->lock); 547 + 548 + for_each_set_bit(i, &pending, od->nr_pchans) { 549 + struct owl_dma_txd *txd; 550 + 551 + pchan = &od->pchans[i]; 552 + 553 + vchan = pchan->vchan; 554 + if (!vchan) { 555 + dev_warn(od->dma.dev, "no vchan attached on pchan %d\n", 556 + pchan->id); 557 + continue; 558 + } 559 + 560 + spin_lock(&vchan->vc.lock); 561 + 562 + txd = vchan->txd; 563 + if (txd) { 564 + vchan->txd = NULL; 565 + 566 + vchan_cookie_complete(&txd->vd); 567 + 568 + /* 569 + * Start the next descriptor (if any), 570 + * otherwise free this channel. 571 + */ 572 + if (vchan_next_desc(&vchan->vc)) 573 + owl_dma_start_next_txd(vchan); 574 + else 575 + owl_dma_phy_free(od, vchan); 576 + } 577 + 578 + spin_unlock(&vchan->vc.lock); 579 + } 580 + 581 + return IRQ_HANDLED; 582 + } 583 + 584 + static void owl_dma_free_txd(struct owl_dma *od, struct owl_dma_txd *txd) 585 + { 586 + struct owl_dma_lli *lli, *_lli; 587 + 588 + if (unlikely(!txd)) 589 + return; 590 + 591 + list_for_each_entry_safe(lli, _lli, &txd->lli_list, node) 592 + owl_dma_free_lli(od, lli); 593 + 594 + kfree(txd); 595 + } 596 + 597 + static void owl_dma_desc_free(struct virt_dma_desc *vd) 598 + { 599 + struct owl_dma *od = to_owl_dma(vd->tx.chan->device); 600 + struct owl_dma_txd *txd = to_owl_txd(&vd->tx); 601 + 602 + owl_dma_free_txd(od, txd); 603 + } 604 + 605 + static int owl_dma_terminate_all(struct dma_chan *chan) 606 + { 607 + struct owl_dma *od = to_owl_dma(chan->device); 608 + struct owl_dma_vchan *vchan = to_owl_vchan(chan); 609 + unsigned long flags; 610 + LIST_HEAD(head); 611 + 612 + spin_lock_irqsave(&vchan->vc.lock, flags); 613 + 614 + if (vchan->pchan) 615 + owl_dma_phy_free(od, vchan); 616 + 617 + if (vchan->txd) { 618 + owl_dma_desc_free(&vchan->txd->vd); 619 + vchan->txd = NULL; 620 + } 621 + 622 + vchan_get_all_descriptors(&vchan->vc, &head); 623 + vchan_dma_desc_free_list(&vchan->vc, &head); 624 + 625 + spin_unlock_irqrestore(&vchan->vc.lock, flags); 626 + 627 + return 0; 628 + } 629 + 630 + static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan) 631 + { 632 + struct owl_dma_pchan *pchan; 633 + struct owl_dma_txd *txd; 634 + struct owl_dma_lli *lli; 635 + unsigned int next_lli_phy; 636 + size_t bytes; 637 + 638 + pchan = vchan->pchan; 639 + txd = vchan->txd; 640 + 641 + if (!pchan || !txd) 642 + return 0; 643 + 644 + /* Get remain count of current node in link list */ 645 + bytes = pchan_readl(pchan, OWL_DMAX_REMAIN_CNT); 646 + 647 + /* Loop through the preceding nodes to get total remaining bytes */ 648 + if (pchan_readl(pchan, OWL_DMAX_MODE) & OWL_DMA_MODE_LME) { 649 + next_lli_phy = pchan_readl(pchan, OWL_DMAX_NEXT_DESCRIPTOR); 650 + list_for_each_entry(lli, &txd->lli_list, node) { 651 + /* Start from the next active node */ 652 + if (lli->phys == next_lli_phy) { 653 + list_for_each_entry(lli, &txd->lli_list, node) 654 + bytes += lli->hw.flen; 655 + break; 656 + } 657 + } 658 + } 659 + 660 + return bytes; 661 + } 662 + 663 + static enum dma_status owl_dma_tx_status(struct dma_chan *chan, 664 + dma_cookie_t cookie, 665 + struct dma_tx_state *state) 666 + { 667 + struct owl_dma_vchan *vchan = to_owl_vchan(chan); 668 + struct owl_dma_lli *lli; 669 + struct virt_dma_desc *vd; 670 + struct owl_dma_txd *txd; 671 + enum dma_status ret; 672 + unsigned long flags; 673 + size_t bytes = 0; 674 + 675 + ret = dma_cookie_status(chan, cookie, state); 676 + if (ret == DMA_COMPLETE || !state) 677 + return ret; 678 + 679 + spin_lock_irqsave(&vchan->vc.lock, flags); 680 + 681 + vd = vchan_find_desc(&vchan->vc, cookie); 682 + if (vd) { 683 + txd = to_owl_txd(&vd->tx); 684 + list_for_each_entry(lli, &txd->lli_list, node) 685 + bytes += lli->hw.flen; 686 + } else { 687 + bytes = owl_dma_getbytes_chan(vchan); 688 + } 689 + 690 + spin_unlock_irqrestore(&vchan->vc.lock, flags); 691 + 692 + dma_set_residue(state, bytes); 693 + 694 + return ret; 695 + } 696 + 697 + static void owl_dma_phy_alloc_and_start(struct owl_dma_vchan *vchan) 698 + { 699 + struct owl_dma *od = to_owl_dma(vchan->vc.chan.device); 700 + struct owl_dma_pchan *pchan; 701 + 702 + pchan = owl_dma_get_pchan(od, vchan); 703 + if (!pchan) 704 + return; 705 + 706 + dev_dbg(od->dma.dev, "allocated pchan %d\n", pchan->id); 707 + 708 + vchan->pchan = pchan; 709 + owl_dma_start_next_txd(vchan); 710 + } 711 + 712 + static void owl_dma_issue_pending(struct dma_chan *chan) 713 + { 714 + struct owl_dma_vchan *vchan = to_owl_vchan(chan); 715 + unsigned long flags; 716 + 717 + spin_lock_irqsave(&vchan->vc.lock, flags); 718 + if (vchan_issue_pending(&vchan->vc)) { 719 + if (!vchan->pchan) 720 + owl_dma_phy_alloc_and_start(vchan); 721 + } 722 + spin_unlock_irqrestore(&vchan->vc.lock, flags); 723 + } 724 + 725 + static struct dma_async_tx_descriptor 726 + *owl_dma_prep_memcpy(struct dma_chan *chan, 727 + dma_addr_t dst, dma_addr_t src, 728 + size_t len, unsigned long flags) 729 + { 730 + struct owl_dma *od = to_owl_dma(chan->device); 731 + struct owl_dma_vchan *vchan = to_owl_vchan(chan); 732 + struct owl_dma_txd *txd; 733 + struct owl_dma_lli *lli, *prev = NULL; 734 + size_t offset, bytes; 735 + int ret; 736 + 737 + if (!len) 738 + return NULL; 739 + 740 + txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 741 + if (!txd) 742 + return NULL; 743 + 744 + INIT_LIST_HEAD(&txd->lli_list); 745 + 746 + /* Process the transfer as frame by frame */ 747 + for (offset = 0; offset < len; offset += bytes) { 748 + lli = owl_dma_alloc_lli(od); 749 + if (!lli) { 750 + dev_warn(chan2dev(chan), "failed to allocate lli\n"); 751 + goto err_txd_free; 752 + } 753 + 754 + bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH); 755 + 756 + ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset, 757 + bytes, DMA_MEM_TO_MEM); 758 + if (ret) { 759 + dev_warn(chan2dev(chan), "failed to config lli\n"); 760 + goto err_txd_free; 761 + } 762 + 763 + prev = owl_dma_add_lli(txd, prev, lli); 764 + } 765 + 766 + return vchan_tx_prep(&vchan->vc, &txd->vd, flags); 767 + 768 + err_txd_free: 769 + owl_dma_free_txd(od, txd); 770 + return NULL; 771 + } 772 + 773 + static void owl_dma_free_chan_resources(struct dma_chan *chan) 774 + { 775 + struct owl_dma_vchan *vchan = to_owl_vchan(chan); 776 + 777 + /* Ensure all queued descriptors are freed */ 778 + vchan_free_chan_resources(&vchan->vc); 779 + } 780 + 781 + static inline void owl_dma_free(struct owl_dma *od) 782 + { 783 + struct owl_dma_vchan *vchan = NULL; 784 + struct owl_dma_vchan *next; 785 + 786 + list_for_each_entry_safe(vchan, 787 + next, &od->dma.channels, vc.chan.device_node) { 788 + list_del(&vchan->vc.chan.device_node); 789 + tasklet_kill(&vchan->vc.task); 790 + } 791 + } 792 + 793 + static int owl_dma_probe(struct platform_device *pdev) 794 + { 795 + struct device_node *np = pdev->dev.of_node; 796 + struct owl_dma *od; 797 + struct resource *res; 798 + int ret, i, nr_channels, nr_requests; 799 + 800 + od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); 801 + if (!od) 802 + return -ENOMEM; 803 + 804 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 805 + if (!res) 806 + return -EINVAL; 807 + 808 + od->base = devm_ioremap_resource(&pdev->dev, res); 809 + if (IS_ERR(od->base)) 810 + return PTR_ERR(od->base); 811 + 812 + ret = of_property_read_u32(np, "dma-channels", &nr_channels); 813 + if (ret) { 814 + dev_err(&pdev->dev, "can't get dma-channels\n"); 815 + return ret; 816 + } 817 + 818 + ret = of_property_read_u32(np, "dma-requests", &nr_requests); 819 + if (ret) { 820 + dev_err(&pdev->dev, "can't get dma-requests\n"); 821 + return ret; 822 + } 823 + 824 + dev_info(&pdev->dev, "dma-channels %d, dma-requests %d\n", 825 + nr_channels, nr_requests); 826 + 827 + od->nr_pchans = nr_channels; 828 + od->nr_vchans = nr_requests; 829 + 830 + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 831 + 832 + platform_set_drvdata(pdev, od); 833 + spin_lock_init(&od->lock); 834 + 835 + dma_cap_set(DMA_MEMCPY, od->dma.cap_mask); 836 + 837 + od->dma.dev = &pdev->dev; 838 + od->dma.device_free_chan_resources = owl_dma_free_chan_resources; 839 + od->dma.device_tx_status = owl_dma_tx_status; 840 + od->dma.device_issue_pending = owl_dma_issue_pending; 841 + od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy; 842 + od->dma.device_terminate_all = owl_dma_terminate_all; 843 + od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 844 + od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 845 + od->dma.directions = BIT(DMA_MEM_TO_MEM); 846 + od->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 847 + 848 + INIT_LIST_HEAD(&od->dma.channels); 849 + 850 + od->clk = devm_clk_get(&pdev->dev, NULL); 851 + if (IS_ERR(od->clk)) { 852 + dev_err(&pdev->dev, "unable to get clock\n"); 853 + return PTR_ERR(od->clk); 854 + } 855 + 856 + /* 857 + * Eventhough the DMA controller is capable of generating 4 858 + * IRQ's for DMA priority feature, we only use 1 IRQ for 859 + * simplification. 860 + */ 861 + od->irq = platform_get_irq(pdev, 0); 862 + ret = devm_request_irq(&pdev->dev, od->irq, owl_dma_interrupt, 0, 863 + dev_name(&pdev->dev), od); 864 + if (ret) { 865 + dev_err(&pdev->dev, "unable to request IRQ\n"); 866 + return ret; 867 + } 868 + 869 + /* Init physical channel */ 870 + od->pchans = devm_kcalloc(&pdev->dev, od->nr_pchans, 871 + sizeof(struct owl_dma_pchan), GFP_KERNEL); 872 + if (!od->pchans) 873 + return -ENOMEM; 874 + 875 + for (i = 0; i < od->nr_pchans; i++) { 876 + struct owl_dma_pchan *pchan = &od->pchans[i]; 877 + 878 + pchan->id = i; 879 + pchan->base = od->base + OWL_DMA_CHAN_BASE(i); 880 + } 881 + 882 + /* Init virtual channel */ 883 + od->vchans = devm_kcalloc(&pdev->dev, od->nr_vchans, 884 + sizeof(struct owl_dma_vchan), GFP_KERNEL); 885 + if (!od->vchans) 886 + return -ENOMEM; 887 + 888 + for (i = 0; i < od->nr_vchans; i++) { 889 + struct owl_dma_vchan *vchan = &od->vchans[i]; 890 + 891 + vchan->vc.desc_free = owl_dma_desc_free; 892 + vchan_init(&vchan->vc, &od->dma); 893 + } 894 + 895 + /* Create a pool of consistent memory blocks for hardware descriptors */ 896 + od->lli_pool = dma_pool_create(dev_name(od->dma.dev), od->dma.dev, 897 + sizeof(struct owl_dma_lli), 898 + __alignof__(struct owl_dma_lli), 899 + 0); 900 + if (!od->lli_pool) { 901 + dev_err(&pdev->dev, "unable to allocate DMA descriptor pool\n"); 902 + return -ENOMEM; 903 + } 904 + 905 + clk_prepare_enable(od->clk); 906 + 907 + ret = dma_async_device_register(&od->dma); 908 + if (ret) { 909 + dev_err(&pdev->dev, "failed to register DMA engine device\n"); 910 + goto err_pool_free; 911 + } 912 + 913 + return 0; 914 + 915 + err_pool_free: 916 + clk_disable_unprepare(od->clk); 917 + dma_pool_destroy(od->lli_pool); 918 + 919 + return ret; 920 + } 921 + 922 + static int owl_dma_remove(struct platform_device *pdev) 923 + { 924 + struct owl_dma *od = platform_get_drvdata(pdev); 925 + 926 + dma_async_device_unregister(&od->dma); 927 + 928 + /* Mask all interrupts for this execution environment */ 929 + dma_writel(od, OWL_DMA_IRQ_EN0, 0x0); 930 + 931 + /* Make sure we won't have any further interrupts */ 932 + devm_free_irq(od->dma.dev, od->irq, od); 933 + 934 + owl_dma_free(od); 935 + 936 + clk_disable_unprepare(od->clk); 937 + 938 + return 0; 939 + } 940 + 941 + static const struct of_device_id owl_dma_match[] = { 942 + { .compatible = "actions,s900-dma", }, 943 + { /* sentinel */ } 944 + }; 945 + MODULE_DEVICE_TABLE(of, owl_dma_match); 946 + 947 + static struct platform_driver owl_dma_driver = { 948 + .probe = owl_dma_probe, 949 + .remove = owl_dma_remove, 950 + .driver = { 951 + .name = "dma-owl", 952 + .of_match_table = of_match_ptr(owl_dma_match), 953 + }, 954 + }; 955 + 956 + static int owl_dma_init(void) 957 + { 958 + return platform_driver_register(&owl_dma_driver); 959 + } 960 + subsys_initcall(owl_dma_init); 961 + 962 + static void __exit owl_dma_exit(void) 963 + { 964 + platform_driver_unregister(&owl_dma_driver); 965 + } 966 + module_exit(owl_dma_exit); 967 + 968 + MODULE_AUTHOR("David Liu <liuwei@actions-semi.com>"); 969 + MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); 970 + MODULE_DESCRIPTION("Actions Semi Owl SoCs DMA driver"); 971 + MODULE_LICENSE("GPL");
+6 -6
drivers/dma/pl330.c
··· 1046 1046 1047 1047 if (_state(thrd) == PL330_STATE_KILLING) 1048 1048 UNTIL(thrd, PL330_STATE_STOPPED) 1049 + /* fall through */ 1049 1050 1050 1051 case PL330_STATE_FAULTING: 1051 1052 _stop(thrd); 1053 + /* fall through */ 1052 1054 1053 1055 case PL330_STATE_KILLING: 1054 1056 case PL330_STATE_COMPLETING: 1055 1057 UNTIL(thrd, PL330_STATE_STOPPED) 1058 + /* fall through */ 1056 1059 1057 1060 case PL330_STATE_STOPPED: 1058 1061 return _trigger(thrd); ··· 1782 1779 1783 1780 static void pl330_release_channel(struct pl330_thread *thrd) 1784 1781 { 1785 - struct pl330_dmac *pl330; 1786 - 1787 1782 if (!thrd || thrd->free) 1788 1783 return; 1789 1784 ··· 1789 1788 1790 1789 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT); 1791 1790 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT); 1792 - 1793 - pl330 = thrd->dmac; 1794 1791 1795 1792 _free_event(thrd, thrd->ev); 1796 1793 thrd->free = true; ··· 2256 2257 2257 2258 pm_runtime_get_sync(pl330->ddma.dev); 2258 2259 spin_lock_irqsave(&pch->lock, flags); 2260 + 2259 2261 spin_lock(&pl330->lock); 2260 2262 _stop(pch->thread); 2261 - spin_unlock(&pl330->lock); 2262 - 2263 2263 pch->thread->req[0].desc = NULL; 2264 2264 pch->thread->req[1].desc = NULL; 2265 2265 pch->thread->req_running = -1; 2266 + spin_unlock(&pl330->lock); 2267 + 2266 2268 power_down = pch->active; 2267 2269 pch->active = false; 2268 2270
+46 -66
drivers/dma/sh/rcar-dmac.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 1 2 /* 2 3 * Renesas R-Car Gen2 DMA Controller Driver 3 4 * 4 5 * Copyright (C) 2014 Renesas Electronics Inc. 5 6 * 6 7 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 7 - * 8 - * This is free software; you can redistribute it and/or modify 9 - * it under the terms of version 2 of the GNU General Public License as 10 - * published by the Free Software Foundation. 11 8 */ 12 9 13 10 #include <linux/delay.h> ··· 428 431 chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE; 429 432 } 430 433 431 - rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr | RCAR_DMACHCR_DE); 434 + rcar_dmac_chan_write(chan, RCAR_DMACHCR, 435 + chcr | RCAR_DMACHCR_DE | RCAR_DMACHCR_CAIE); 432 436 } 433 437 434 438 static int rcar_dmac_init(struct rcar_dmac *dmac) ··· 759 761 dev_err(chan->chan.device->dev, "CHCR DE check error\n"); 760 762 } 761 763 762 - static void rcar_dmac_sync_tcr(struct rcar_dmac_chan *chan) 764 + static void rcar_dmac_clear_chcr_de(struct rcar_dmac_chan *chan) 763 765 { 764 766 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); 765 - 766 - if (!(chcr & RCAR_DMACHCR_DE)) 767 - return; 768 767 769 768 /* set DE=0 and flush remaining data */ 770 769 rcar_dmac_chan_write(chan, RCAR_DMACHCR, (chcr & ~RCAR_DMACHCR_DE)); 771 770 772 771 /* make sure all remaining data was flushed */ 773 772 rcar_dmac_chcr_de_barrier(chan); 774 - 775 - /* back DE */ 776 - rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr); 777 773 } 778 774 779 775 static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan) ··· 775 783 u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); 776 784 777 785 chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE | 778 - RCAR_DMACHCR_TE | RCAR_DMACHCR_DE); 786 + RCAR_DMACHCR_TE | RCAR_DMACHCR_DE | 787 + RCAR_DMACHCR_CAE | RCAR_DMACHCR_CAIE); 779 788 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr); 780 789 rcar_dmac_chcr_de_barrier(chan); 781 790 } ··· 805 812 } 806 813 } 807 814 808 - static void rcar_dmac_stop(struct rcar_dmac *dmac) 809 - { 810 - rcar_dmac_write(dmac, RCAR_DMAOR, 0); 811 - } 812 - 813 - static void rcar_dmac_abort(struct rcar_dmac *dmac) 815 + static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac) 814 816 { 815 817 unsigned int i; 816 818 ··· 814 826 struct rcar_dmac_chan *chan = &dmac->channels[i]; 815 827 816 828 /* Stop and reinitialize the channel. */ 817 - spin_lock(&chan->lock); 829 + spin_lock_irq(&chan->lock); 818 830 rcar_dmac_chan_halt(chan); 819 - spin_unlock(&chan->lock); 820 - 821 - rcar_dmac_chan_reinit(chan); 831 + spin_unlock_irq(&chan->lock); 822 832 } 833 + } 834 + 835 + static int rcar_dmac_chan_pause(struct dma_chan *chan) 836 + { 837 + unsigned long flags; 838 + struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); 839 + 840 + spin_lock_irqsave(&rchan->lock, flags); 841 + rcar_dmac_clear_chcr_de(rchan); 842 + spin_unlock_irqrestore(&rchan->lock, flags); 843 + 844 + return 0; 823 845 } 824 846 825 847 /* ----------------------------------------------------------------------------- ··· 1353 1355 residue += chunk->size; 1354 1356 } 1355 1357 1356 - if (desc->direction == DMA_DEV_TO_MEM) 1357 - rcar_dmac_sync_tcr(chan); 1358 - 1359 1358 /* Add the residue for the current chunk. */ 1360 1359 residue += rcar_dmac_chan_read(chan, RCAR_DMATCRB) << desc->xfer_shift; 1361 1360 ··· 1517 1522 u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE; 1518 1523 struct rcar_dmac_chan *chan = dev; 1519 1524 irqreturn_t ret = IRQ_NONE; 1525 + bool reinit = false; 1520 1526 u32 chcr; 1521 1527 1522 1528 spin_lock(&chan->lock); 1523 1529 1524 1530 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); 1531 + if (chcr & RCAR_DMACHCR_CAE) { 1532 + struct rcar_dmac *dmac = to_rcar_dmac(chan->chan.device); 1533 + 1534 + /* 1535 + * We don't need to call rcar_dmac_chan_halt() 1536 + * because channel is already stopped in error case. 1537 + * We need to clear register and check DE bit as recovery. 1538 + */ 1539 + rcar_dmac_write(dmac, RCAR_DMACHCLR, 1 << chan->index); 1540 + rcar_dmac_chcr_de_barrier(chan); 1541 + reinit = true; 1542 + goto spin_lock_end; 1543 + } 1544 + 1525 1545 if (chcr & RCAR_DMACHCR_TE) 1526 1546 mask |= RCAR_DMACHCR_DE; 1527 1547 rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask); ··· 1549 1539 if (chcr & RCAR_DMACHCR_TE) 1550 1540 ret |= rcar_dmac_isr_transfer_end(chan); 1551 1541 1542 + spin_lock_end: 1552 1543 spin_unlock(&chan->lock); 1544 + 1545 + if (reinit) { 1546 + dev_err(chan->chan.device->dev, "Channel Address Error\n"); 1547 + 1548 + rcar_dmac_chan_reinit(chan); 1549 + ret = IRQ_HANDLED; 1550 + } 1553 1551 1554 1552 return ret; 1555 1553 } ··· 1611 1593 1612 1594 /* Recycle all acked descriptors. */ 1613 1595 rcar_dmac_desc_recycle_acked(chan); 1614 - 1615 - return IRQ_HANDLED; 1616 - } 1617 - 1618 - static irqreturn_t rcar_dmac_isr_error(int irq, void *data) 1619 - { 1620 - struct rcar_dmac *dmac = data; 1621 - 1622 - if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE)) 1623 - return IRQ_NONE; 1624 - 1625 - /* 1626 - * An unrecoverable error occurred on an unknown channel. Halt the DMAC, 1627 - * abort transfers on all channels, and reinitialize the DMAC. 1628 - */ 1629 - rcar_dmac_stop(dmac); 1630 - rcar_dmac_abort(dmac); 1631 - rcar_dmac_init(dmac); 1632 1596 1633 1597 return IRQ_HANDLED; 1634 1598 } ··· 1784 1784 struct rcar_dmac *dmac; 1785 1785 struct resource *mem; 1786 1786 unsigned int i; 1787 - char *irqname; 1788 - int irq; 1789 1787 int ret; 1790 1788 1791 1789 dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); ··· 1822 1824 if (IS_ERR(dmac->iomem)) 1823 1825 return PTR_ERR(dmac->iomem); 1824 1826 1825 - irq = platform_get_irq_byname(pdev, "error"); 1826 - if (irq < 0) { 1827 - dev_err(&pdev->dev, "no error IRQ specified\n"); 1828 - return -ENODEV; 1829 - } 1830 - 1831 - irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error", 1832 - dev_name(dmac->dev)); 1833 - if (!irqname) 1834 - return -ENOMEM; 1835 - 1836 1827 /* Enable runtime PM and initialize the device. */ 1837 1828 pm_runtime_enable(&pdev->dev); 1838 1829 ret = pm_runtime_get_sync(&pdev->dev); ··· 1858 1871 engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg; 1859 1872 engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic; 1860 1873 engine->device_config = rcar_dmac_device_config; 1874 + engine->device_pause = rcar_dmac_chan_pause; 1861 1875 engine->device_terminate_all = rcar_dmac_chan_terminate_all; 1862 1876 engine->device_tx_status = rcar_dmac_tx_status; 1863 1877 engine->device_issue_pending = rcar_dmac_issue_pending; ··· 1871 1883 i + channels_offset); 1872 1884 if (ret < 0) 1873 1885 goto error; 1874 - } 1875 - 1876 - ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0, 1877 - irqname, dmac); 1878 - if (ret) { 1879 - dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n", 1880 - irq, ret); 1881 - return ret; 1882 1886 } 1883 1887 1884 1888 /* Register the DMAC as a DMA provider for DT. */ ··· 1912 1932 { 1913 1933 struct rcar_dmac *dmac = platform_get_drvdata(pdev); 1914 1934 1915 - rcar_dmac_stop(dmac); 1935 + rcar_dmac_stop_all_chan(dmac); 1916 1936 } 1917 1937 1918 1938 static const struct of_device_id rcar_dmac_of_ids[] = {
+13 -2
drivers/dma/ste_dma40.c
··· 555 555 * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and 556 556 * later 557 557 * @reg_val_backup_chan: Backup data for standard channel parameter registers. 558 + * @regs_interrupt: Scratch space for registers during interrupt. 558 559 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off. 559 560 * @gen_dmac: the struct for generic registers values to represent u8500/8540 560 561 * DMA controller ··· 593 592 u32 reg_val_backup[BACKUP_REGS_SZ]; 594 593 u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX]; 595 594 u32 *reg_val_backup_chan; 595 + u32 *regs_interrupt; 596 596 u16 gcc_pwr_off_mask; 597 597 struct d40_gen_dmac gen_dmac; 598 598 }; ··· 1639 1637 struct d40_chan *d40c; 1640 1638 unsigned long flags; 1641 1639 struct d40_base *base = data; 1642 - u32 regs[base->gen_dmac.il_size]; 1640 + u32 *regs = base->regs_interrupt; 1643 1641 struct d40_interrupt_lookup *il = base->gen_dmac.il; 1644 1642 u32 il_size = base->gen_dmac.il_size; 1645 1643 ··· 3260 3258 if (!base->lcla_pool.alloc_map) 3261 3259 goto free_backup_chan; 3262 3260 3261 + base->regs_interrupt = kmalloc_array(base->gen_dmac.il_size, 3262 + sizeof(*base->regs_interrupt), 3263 + GFP_KERNEL); 3264 + if (!base->regs_interrupt) 3265 + goto free_map; 3266 + 3263 3267 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), 3264 3268 0, SLAB_HWCACHE_ALIGN, 3265 3269 NULL); 3266 3270 if (base->desc_slab == NULL) 3267 - goto free_map; 3271 + goto free_regs; 3272 + 3268 3273 3269 3274 return base; 3275 + free_regs: 3276 + kfree(base->regs_interrupt); 3270 3277 free_map: 3271 3278 kfree(base->lcla_pool.alloc_map); 3272 3279 free_backup_chan:
+2 -2
drivers/dma/stm32-dma.c
··· 594 594 595 595 chan->busy = true; 596 596 597 - dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan); 597 + dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan); 598 598 } 599 599 600 600 static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan) ··· 693 693 694 694 spin_lock_irqsave(&chan->vchan.lock, flags); 695 695 if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) { 696 - dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan); 696 + dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan); 697 697 stm32_dma_start_transfer(chan); 698 698 699 699 }
+4 -4
drivers/dma/stm32-mdma.c
··· 1170 1170 1171 1171 chan->busy = true; 1172 1172 1173 - dev_dbg(chan2dev(chan), "vchan %p: started\n", &chan->vchan); 1173 + dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan); 1174 1174 } 1175 1175 1176 1176 static void stm32_mdma_issue_pending(struct dma_chan *c) ··· 1183 1183 if (!vchan_issue_pending(&chan->vchan)) 1184 1184 goto end; 1185 1185 1186 - dev_dbg(chan2dev(chan), "vchan %p: issued\n", &chan->vchan); 1186 + dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan); 1187 1187 1188 1188 if (!chan->desc && !chan->busy) 1189 1189 stm32_mdma_start_transfer(chan); ··· 1203 1203 spin_unlock_irqrestore(&chan->vchan.lock, flags); 1204 1204 1205 1205 if (!ret) 1206 - dev_dbg(chan2dev(chan), "vchan %p: pause\n", &chan->vchan); 1206 + dev_dbg(chan2dev(chan), "vchan %pK: pause\n", &chan->vchan); 1207 1207 1208 1208 return ret; 1209 1209 } ··· 1240 1240 1241 1241 spin_unlock_irqrestore(&chan->vchan.lock, flags); 1242 1242 1243 - dev_dbg(chan2dev(chan), "vchan %p: resume\n", &chan->vchan); 1243 + dev_dbg(chan2dev(chan), "vchan %pK: resume\n", &chan->vchan); 1244 1244 1245 1245 return 0; 1246 1246 }
+22
drivers/dma/xilinx/xilinx_dma.c
··· 115 115 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) 116 116 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) 117 117 118 + #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec 119 + #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0) 120 + 118 121 /* HW specific definitions */ 119 122 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 120 123 ··· 343 340 * @start_transfer: Differentiate b/w DMA IP's transfer 344 341 * @stop_transfer: Differentiate b/w DMA IP's quiesce 345 342 * @tdest: TDEST value for mcdma 343 + * @has_vflip: S2MM vertical flip 346 344 */ 347 345 struct xilinx_dma_chan { 348 346 struct xilinx_dma_device *xdev; ··· 380 376 void (*start_transfer)(struct xilinx_dma_chan *chan); 381 377 int (*stop_transfer)(struct xilinx_dma_chan *chan); 382 378 u16 tdest; 379 + bool has_vflip; 383 380 }; 384 381 385 382 /** ··· 1097 1092 desc->async_tx.phys); 1098 1093 1099 1094 /* Configure the hardware using info in the config structure */ 1095 + if (chan->has_vflip) { 1096 + reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP); 1097 + reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP; 1098 + reg |= config->vflip_en; 1099 + dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP, 1100 + reg); 1101 + } 1102 + 1100 1103 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1101 1104 1102 1105 if (config->frm_cnt_en) ··· 2118 2105 } 2119 2106 2120 2107 chan->config.frm_cnt_en = cfg->frm_cnt_en; 2108 + chan->config.vflip_en = cfg->vflip_en; 2109 + 2121 2110 if (cfg->park) 2122 2111 chan->config.park_frm = cfg->park_frm; 2123 2112 else ··· 2443 2428 chan->direction = DMA_DEV_TO_MEM; 2444 2429 chan->id = chan_id; 2445 2430 chan->tdest = chan_id - xdev->nr_channels; 2431 + chan->has_vflip = of_property_read_bool(node, 2432 + "xlnx,enable-vert-flip"); 2433 + if (chan->has_vflip) { 2434 + chan->config.vflip_en = dma_read(chan, 2435 + XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) & 2436 + XILINX_VDMA_ENABLE_VERTICAL_FLIP; 2437 + } 2446 2438 2447 2439 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; 2448 2440 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
+2
include/linux/dma/xilinx_dma.h
··· 27 27 * @delay: Delay counter 28 28 * @reset: Reset Channel 29 29 * @ext_fsync: External Frame Sync source 30 + * @vflip_en: Vertical Flip enable 30 31 */ 31 32 struct xilinx_vdma_config { 32 33 int frm_dly; ··· 40 39 int delay; 41 40 int reset; 42 41 int ext_fsync; 42 + bool vflip_en; 43 43 }; 44 44 45 45 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
+5 -1
include/linux/dmaengine.h
··· 415 415 * each type, the dma controller should set BIT(<TYPE>) and same 416 416 * should be checked by controller as well 417 417 * @max_burst: max burst capability per-transfer 418 - * @cmd_pause: true, if pause and thereby resume is supported 418 + * @cmd_pause: true, if pause is supported (i.e. for reading residue or 419 + * for resume later) 420 + * @cmd_resume: true, if resume is supported 419 421 * @cmd_terminate: true, if terminate cmd is supported 420 422 * @residue_granularity: granularity of the reported transfer residue 421 423 * @descriptor_reuse: if a descriptor can be reused by client and ··· 429 427 u32 directions; 430 428 u32 max_burst; 431 429 bool cmd_pause; 430 + bool cmd_resume; 432 431 bool cmd_terminate; 433 432 enum dma_residue_granularity residue_granularity; 434 433 bool descriptor_reuse; ··· 1406 1403 /* --- DMA device --- */ 1407 1404 1408 1405 int dma_async_device_register(struct dma_device *device); 1406 + int dmaenginem_async_device_register(struct dma_device *device); 1409 1407 void dma_async_device_unregister(struct dma_device *device); 1410 1408 void dma_run_dependencies(struct dma_async_tx_descriptor *tx); 1411 1409 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
+1 -1
sound/soc/soc-generic-dmaengine-pcm.c
··· 147 147 148 148 ret = dma_get_slave_caps(chan, &dma_caps); 149 149 if (ret == 0) { 150 - if (dma_caps.cmd_pause) 150 + if (dma_caps.cmd_pause && dma_caps.cmd_resume) 151 151 hw.info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME; 152 152 if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT) 153 153 hw.info |= SNDRV_PCM_INFO_BATCH;