Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: xilinx: dpdma: Add the Xilinx DisplayPort DMA engine driver

The ZynqMP DisplayPort subsystem includes a DMA engine called DPDMA with
6 DMa channels (4 for display and 2 for audio). This driver exposes the
DPDMA through the dmaengine API, to be used by audio (ALSA) and display
(DRM) drivers for the DisplayPort subsystem.

Signed-off-by: Hyun Kwon <hyun.kwon@xilinx.com>
Signed-off-by: Tejas Upadhyay <tejasu@xilinx.com>
Signed-off-by: Michal Simek <michal.simek@xilinx.com>
Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
Link: https://lore.kernel.org/r/20200717013337.24122-4-laurent.pinchart@ideasonboard.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Hyun Kwon and committed by
Vinod Koul
7cbb0c63 9c8ebd8b

+1545
+1
MAINTAINERS
··· 18858 18858 L: dmaengine@vger.kernel.org 18859 18859 S: Supported 18860 18860 F: Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dpdma.yaml 18861 + F: drivers/dma/xilinx/xilinx_dpdma.c 18861 18862 F: include/dt-bindings/dma/xlnx-zynqmp-dpdma.h 18862 18863 18863 18864 XILLYBUS DRIVER
+10
drivers/dma/Kconfig
··· 707 707 help 708 708 Enable support for Xilinx ZynqMP DMA controller. 709 709 710 + config XILINX_ZYNQMP_DPDMA 711 + tristate "Xilinx DPDMA Engine" 712 + select DMA_ENGINE 713 + select DMA_VIRTUAL_CHANNELS 714 + help 715 + Enable support for Xilinx ZynqMP DisplayPort DMA. Choose this option 716 + if you have a Xilinx ZynqMP SoC with a DisplayPort subsystem. The 717 + driver provides the dmaengine required by the DisplayPort subsystem 718 + display driver. 719 + 710 720 config ZX_DMA 711 721 tristate "ZTE ZX DMA support" 712 722 depends on ARCH_ZX || COMPILE_TEST
+1
drivers/dma/xilinx/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o 3 3 obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o 4 + obj-$(CONFIG_XILINX_ZYNQMP_DPDMA) += xilinx_dpdma.o
+1533
drivers/dma/xilinx/xilinx_dpdma.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Xilinx ZynqMP DPDMA Engine driver 4 + * 5 + * Copyright (C) 2015 - 2020 Xilinx, Inc. 6 + * 7 + * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com> 8 + */ 9 + 10 + #include <linux/bitfield.h> 11 + #include <linux/bits.h> 12 + #include <linux/clk.h> 13 + #include <linux/delay.h> 14 + #include <linux/dmaengine.h> 15 + #include <linux/dmapool.h> 16 + #include <linux/interrupt.h> 17 + #include <linux/module.h> 18 + #include <linux/of.h> 19 + #include <linux/of_dma.h> 20 + #include <linux/platform_device.h> 21 + #include <linux/sched.h> 22 + #include <linux/slab.h> 23 + #include <linux/spinlock.h> 24 + #include <linux/wait.h> 25 + 26 + #include <dt-bindings/dma/xlnx-zynqmp-dpdma.h> 27 + 28 + #include "../dmaengine.h" 29 + #include "../virt-dma.h" 30 + 31 + /* DPDMA registers */ 32 + #define XILINX_DPDMA_ERR_CTRL 0x000 33 + #define XILINX_DPDMA_ISR 0x004 34 + #define XILINX_DPDMA_IMR 0x008 35 + #define XILINX_DPDMA_IEN 0x00c 36 + #define XILINX_DPDMA_IDS 0x010 37 + #define XILINX_DPDMA_INTR_DESC_DONE(n) BIT((n) + 0) 38 + #define XILINX_DPDMA_INTR_DESC_DONE_MASK GENMASK(5, 0) 39 + #define XILINX_DPDMA_INTR_NO_OSTAND(n) BIT((n) + 6) 40 + #define XILINX_DPDMA_INTR_NO_OSTAND_MASK GENMASK(11, 6) 41 + #define XILINX_DPDMA_INTR_AXI_ERR(n) BIT((n) + 12) 42 + #define XILINX_DPDMA_INTR_AXI_ERR_MASK GENMASK(17, 12) 43 + #define XILINX_DPDMA_INTR_DESC_ERR(n) BIT((n) + 16) 44 + #define XILINX_DPDMA_INTR_DESC_ERR_MASK GENMASK(23, 18) 45 + #define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL BIT(24) 46 + #define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL BIT(25) 47 + #define XILINX_DPDMA_INTR_AXI_4K_CROSS BIT(26) 48 + #define XILINX_DPDMA_INTR_VSYNC BIT(27) 49 + #define XILINX_DPDMA_INTR_CHAN_ERR_MASK 0x00041000 50 + #define XILINX_DPDMA_INTR_CHAN_ERR 0x00fff000 51 + #define XILINX_DPDMA_INTR_GLOBAL_ERR 0x07000000 52 + #define XILINX_DPDMA_INTR_ERR_ALL 0x07fff000 53 + #define XILINX_DPDMA_INTR_CHAN_MASK 0x00041041 54 + #define XILINX_DPDMA_INTR_GLOBAL_MASK 0x0f000000 55 + #define XILINX_DPDMA_INTR_ALL 0x0fffffff 56 + #define XILINX_DPDMA_EISR 0x014 57 + #define XILINX_DPDMA_EIMR 0x018 58 + #define XILINX_DPDMA_EIEN 0x01c 59 + #define XILINX_DPDMA_EIDS 0x020 60 + #define XILINX_DPDMA_EINTR_INV_APB BIT(0) 61 + #define XILINX_DPDMA_EINTR_RD_AXI_ERR(n) BIT((n) + 1) 62 + #define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK GENMASK(6, 1) 63 + #define XILINX_DPDMA_EINTR_PRE_ERR(n) BIT((n) + 7) 64 + #define XILINX_DPDMA_EINTR_PRE_ERR_MASK GENMASK(12, 7) 65 + #define XILINX_DPDMA_EINTR_CRC_ERR(n) BIT((n) + 13) 66 + #define XILINX_DPDMA_EINTR_CRC_ERR_MASK GENMASK(18, 13) 67 + #define XILINX_DPDMA_EINTR_WR_AXI_ERR(n) BIT((n) + 19) 68 + #define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK GENMASK(24, 19) 69 + #define XILINX_DPDMA_EINTR_DESC_DONE_ERR(n) BIT((n) + 25) 70 + #define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK GENMASK(30, 25) 71 + #define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL BIT(32) 72 + #define XILINX_DPDMA_EINTR_CHAN_ERR_MASK 0x02082082 73 + #define XILINX_DPDMA_EINTR_CHAN_ERR 0x7ffffffe 74 + #define XILINX_DPDMA_EINTR_GLOBAL_ERR 0x80000001 75 + #define XILINX_DPDMA_EINTR_ALL 0xffffffff 76 + #define XILINX_DPDMA_CNTL 0x100 77 + #define XILINX_DPDMA_GBL 0x104 78 + #define XILINX_DPDMA_GBL_TRIG_MASK(n) ((n) << 0) 79 + #define XILINX_DPDMA_GBL_RETRIG_MASK(n) ((n) << 6) 80 + #define XILINX_DPDMA_ALC0_CNTL 0x108 81 + #define XILINX_DPDMA_ALC0_STATUS 0x10c 82 + #define XILINX_DPDMA_ALC0_MAX 0x110 83 + #define XILINX_DPDMA_ALC0_MIN 0x114 84 + #define XILINX_DPDMA_ALC0_ACC 0x118 85 + #define XILINX_DPDMA_ALC0_ACC_TRAN 0x11c 86 + #define XILINX_DPDMA_ALC1_CNTL 0x120 87 + #define XILINX_DPDMA_ALC1_STATUS 0x124 88 + #define XILINX_DPDMA_ALC1_MAX 0x128 89 + #define XILINX_DPDMA_ALC1_MIN 0x12c 90 + #define XILINX_DPDMA_ALC1_ACC 0x130 91 + #define XILINX_DPDMA_ALC1_ACC_TRAN 0x134 92 + 93 + /* Channel register */ 94 + #define XILINX_DPDMA_CH_BASE 0x200 95 + #define XILINX_DPDMA_CH_OFFSET 0x100 96 + #define XILINX_DPDMA_CH_DESC_START_ADDRE 0x000 97 + #define XILINX_DPDMA_CH_DESC_START_ADDRE_MASK GENMASK(15, 0) 98 + #define XILINX_DPDMA_CH_DESC_START_ADDR 0x004 99 + #define XILINX_DPDMA_CH_DESC_NEXT_ADDRE 0x008 100 + #define XILINX_DPDMA_CH_DESC_NEXT_ADDR 0x00c 101 + #define XILINX_DPDMA_CH_PYLD_CUR_ADDRE 0x010 102 + #define XILINX_DPDMA_CH_PYLD_CUR_ADDR 0x014 103 + #define XILINX_DPDMA_CH_CNTL 0x018 104 + #define XILINX_DPDMA_CH_CNTL_ENABLE BIT(0) 105 + #define XILINX_DPDMA_CH_CNTL_PAUSE BIT(1) 106 + #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK GENMASK(5, 2) 107 + #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK GENMASK(9, 6) 108 + #define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK GENMASK(13, 10) 109 + #define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS 11 110 + #define XILINX_DPDMA_CH_STATUS 0x01c 111 + #define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK GENMASK(24, 21) 112 + #define XILINX_DPDMA_CH_VDO 0x020 113 + #define XILINX_DPDMA_CH_PYLD_SZ 0x024 114 + #define XILINX_DPDMA_CH_DESC_ID 0x028 115 + 116 + /* DPDMA descriptor fields */ 117 + #define XILINX_DPDMA_DESC_CONTROL_PREEMBLE 0xa5 118 + #define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR BIT(8) 119 + #define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE BIT(9) 120 + #define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE BIT(10) 121 + #define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE BIT(18) 122 + #define XILINX_DPDMA_DESC_CONTROL_LAST BIT(19) 123 + #define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC BIT(20) 124 + #define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME BIT(21) 125 + #define XILINX_DPDMA_DESC_ID_MASK GENMASK(15, 0) 126 + #define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK GENMASK(17, 0) 127 + #define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK GENMASK(31, 18) 128 + #define XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK GENMASK(15, 0) 129 + #define XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK GENMASK(31, 16) 130 + 131 + #define XILINX_DPDMA_ALIGN_BYTES 256 132 + #define XILINX_DPDMA_LINESIZE_ALIGN_BITS 128 133 + 134 + #define XILINX_DPDMA_NUM_CHAN 6 135 + 136 + struct xilinx_dpdma_chan; 137 + 138 + /** 139 + * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor 140 + * @control: control configuration field 141 + * @desc_id: descriptor ID 142 + * @xfer_size: transfer size 143 + * @hsize_stride: horizontal size and stride 144 + * @timestamp_lsb: LSB of time stamp 145 + * @timestamp_msb: MSB of time stamp 146 + * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr) 147 + * @next_desc: next descriptor 32 bit address 148 + * @src_addr: payload source address (1st page, 32 LSB) 149 + * @addr_ext_23: payload source address (3nd and 3rd pages, 16 LSBs) 150 + * @addr_ext_45: payload source address (4th and 5th pages, 16 LSBs) 151 + * @src_addr2: payload source address (2nd page, 32 LSB) 152 + * @src_addr3: payload source address (3rd page, 32 LSB) 153 + * @src_addr4: payload source address (4th page, 32 LSB) 154 + * @src_addr5: payload source address (5th page, 32 LSB) 155 + * @crc: descriptor CRC 156 + */ 157 + struct xilinx_dpdma_hw_desc { 158 + u32 control; 159 + u32 desc_id; 160 + u32 xfer_size; 161 + u32 hsize_stride; 162 + u32 timestamp_lsb; 163 + u32 timestamp_msb; 164 + u32 addr_ext; 165 + u32 next_desc; 166 + u32 src_addr; 167 + u32 addr_ext_23; 168 + u32 addr_ext_45; 169 + u32 src_addr2; 170 + u32 src_addr3; 171 + u32 src_addr4; 172 + u32 src_addr5; 173 + u32 crc; 174 + } __aligned(XILINX_DPDMA_ALIGN_BYTES); 175 + 176 + /** 177 + * struct xilinx_dpdma_sw_desc - DPDMA software descriptor 178 + * @hw: DPDMA hardware descriptor 179 + * @node: list node for software descriptors 180 + * @dma_addr: DMA address of the software descriptor 181 + */ 182 + struct xilinx_dpdma_sw_desc { 183 + struct xilinx_dpdma_hw_desc hw; 184 + struct list_head node; 185 + dma_addr_t dma_addr; 186 + }; 187 + 188 + /** 189 + * struct xilinx_dpdma_tx_desc - DPDMA transaction descriptor 190 + * @vdesc: virtual DMA descriptor 191 + * @chan: DMA channel 192 + * @descriptors: list of software descriptors 193 + * @error: an error has been detected with this descriptor 194 + */ 195 + struct xilinx_dpdma_tx_desc { 196 + struct virt_dma_desc vdesc; 197 + struct xilinx_dpdma_chan *chan; 198 + struct list_head descriptors; 199 + bool error; 200 + }; 201 + 202 + #define to_dpdma_tx_desc(_desc) \ 203 + container_of(_desc, struct xilinx_dpdma_tx_desc, vdesc) 204 + 205 + /** 206 + * struct xilinx_dpdma_chan - DPDMA channel 207 + * @vchan: virtual DMA channel 208 + * @reg: register base address 209 + * @id: channel ID 210 + * @wait_to_stop: queue to wait for outstanding transacitons before stopping 211 + * @running: true if the channel is running 212 + * @first_frame: flag for the first frame of stream 213 + * @video_group: flag if multi-channel operation is needed for video channels 214 + * @lock: lock to access struct xilinx_dpdma_chan 215 + * @desc_pool: descriptor allocation pool 216 + * @err_task: error IRQ bottom half handler 217 + * @desc.pending: Descriptor schedule to the hardware, pending execution 218 + * @desc.active: Descriptor being executed by the hardware 219 + * @xdev: DPDMA device 220 + */ 221 + struct xilinx_dpdma_chan { 222 + struct virt_dma_chan vchan; 223 + void __iomem *reg; 224 + unsigned int id; 225 + 226 + wait_queue_head_t wait_to_stop; 227 + bool running; 228 + bool first_frame; 229 + bool video_group; 230 + 231 + spinlock_t lock; /* lock to access struct xilinx_dpdma_chan */ 232 + struct dma_pool *desc_pool; 233 + struct tasklet_struct err_task; 234 + 235 + struct { 236 + struct xilinx_dpdma_tx_desc *pending; 237 + struct xilinx_dpdma_tx_desc *active; 238 + } desc; 239 + 240 + struct xilinx_dpdma_device *xdev; 241 + }; 242 + 243 + #define to_xilinx_chan(_chan) \ 244 + container_of(_chan, struct xilinx_dpdma_chan, vchan.chan) 245 + 246 + /** 247 + * struct xilinx_dpdma_device - DPDMA device 248 + * @common: generic dma device structure 249 + * @reg: register base address 250 + * @dev: generic device structure 251 + * @irq: the interrupt number 252 + * @axi_clk: axi clock 253 + * @chan: DPDMA channels 254 + * @ext_addr: flag for 64 bit system (48 bit addressing) 255 + */ 256 + struct xilinx_dpdma_device { 257 + struct dma_device common; 258 + void __iomem *reg; 259 + struct device *dev; 260 + int irq; 261 + 262 + struct clk *axi_clk; 263 + struct xilinx_dpdma_chan *chan[XILINX_DPDMA_NUM_CHAN]; 264 + 265 + bool ext_addr; 266 + }; 267 + 268 + /* ----------------------------------------------------------------------------- 269 + * I/O Accessors 270 + */ 271 + 272 + static inline u32 dpdma_read(void __iomem *base, u32 offset) 273 + { 274 + return ioread32(base + offset); 275 + } 276 + 277 + static inline void dpdma_write(void __iomem *base, u32 offset, u32 val) 278 + { 279 + iowrite32(val, base + offset); 280 + } 281 + 282 + static inline void dpdma_clr(void __iomem *base, u32 offset, u32 clr) 283 + { 284 + dpdma_write(base, offset, dpdma_read(base, offset) & ~clr); 285 + } 286 + 287 + static inline void dpdma_set(void __iomem *base, u32 offset, u32 set) 288 + { 289 + dpdma_write(base, offset, dpdma_read(base, offset) | set); 290 + } 291 + 292 + /* ----------------------------------------------------------------------------- 293 + * Descriptor Operations 294 + */ 295 + 296 + /** 297 + * xilinx_dpdma_sw_desc_set_dma_addrs - Set DMA addresses in the descriptor 298 + * @sw_desc: The software descriptor in which to set DMA addresses 299 + * @prev: The previous descriptor 300 + * @dma_addr: array of dma addresses 301 + * @num_src_addr: number of addresses in @dma_addr 302 + * 303 + * Set all the DMA addresses in the hardware descriptor corresponding to @dev 304 + * from @dma_addr. If a previous descriptor is specified in @prev, its next 305 + * descriptor DMA address is set to the DMA address of @sw_desc. @prev may be 306 + * identical to @sw_desc for cyclic transfers. 307 + */ 308 + static void xilinx_dpdma_sw_desc_set_dma_addrs(struct xilinx_dpdma_device *xdev, 309 + struct xilinx_dpdma_sw_desc *sw_desc, 310 + struct xilinx_dpdma_sw_desc *prev, 311 + dma_addr_t dma_addr[], 312 + unsigned int num_src_addr) 313 + { 314 + struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw; 315 + unsigned int i; 316 + 317 + hw_desc->src_addr = lower_32_bits(dma_addr[0]); 318 + if (xdev->ext_addr) 319 + hw_desc->addr_ext |= 320 + FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK, 321 + upper_32_bits(dma_addr[0])); 322 + 323 + for (i = 1; i < num_src_addr; i++) { 324 + u32 *addr = &hw_desc->src_addr2; 325 + 326 + addr[i-1] = lower_32_bits(dma_addr[i]); 327 + 328 + if (xdev->ext_addr) { 329 + u32 *addr_ext = &hw_desc->addr_ext_23; 330 + u32 addr_msb; 331 + 332 + addr_msb = upper_32_bits(dma_addr[i]) & GENMASK(15, 0); 333 + addr_msb <<= 16 * ((i - 1) % 2); 334 + addr_ext[(i - 1) / 2] |= addr_msb; 335 + } 336 + } 337 + 338 + if (!prev) 339 + return; 340 + 341 + prev->hw.next_desc = lower_32_bits(sw_desc->dma_addr); 342 + if (xdev->ext_addr) 343 + prev->hw.addr_ext |= 344 + FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK, 345 + upper_32_bits(sw_desc->dma_addr)); 346 + } 347 + 348 + /** 349 + * xilinx_dpdma_chan_alloc_sw_desc - Allocate a software descriptor 350 + * @chan: DPDMA channel 351 + * 352 + * Allocate a software descriptor from the channel's descriptor pool. 353 + * 354 + * Return: a software descriptor or NULL. 355 + */ 356 + static struct xilinx_dpdma_sw_desc * 357 + xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan *chan) 358 + { 359 + struct xilinx_dpdma_sw_desc *sw_desc; 360 + dma_addr_t dma_addr; 361 + 362 + sw_desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &dma_addr); 363 + if (!sw_desc) 364 + return NULL; 365 + 366 + sw_desc->dma_addr = dma_addr; 367 + 368 + return sw_desc; 369 + } 370 + 371 + /** 372 + * xilinx_dpdma_chan_free_sw_desc - Free a software descriptor 373 + * @chan: DPDMA channel 374 + * @sw_desc: software descriptor to free 375 + * 376 + * Free a software descriptor from the channel's descriptor pool. 377 + */ 378 + static void 379 + xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan *chan, 380 + struct xilinx_dpdma_sw_desc *sw_desc) 381 + { 382 + dma_pool_free(chan->desc_pool, sw_desc, sw_desc->dma_addr); 383 + } 384 + 385 + /** 386 + * xilinx_dpdma_chan_dump_tx_desc - Dump a tx descriptor 387 + * @chan: DPDMA channel 388 + * @tx_desc: tx descriptor to dump 389 + * 390 + * Dump contents of a tx descriptor 391 + */ 392 + static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan *chan, 393 + struct xilinx_dpdma_tx_desc *tx_desc) 394 + { 395 + struct xilinx_dpdma_sw_desc *sw_desc; 396 + struct device *dev = chan->xdev->dev; 397 + unsigned int i = 0; 398 + 399 + dev_dbg(dev, "------- TX descriptor dump start -------\n"); 400 + dev_dbg(dev, "------- channel ID = %d -------\n", chan->id); 401 + 402 + list_for_each_entry(sw_desc, &tx_desc->descriptors, node) { 403 + struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw; 404 + 405 + dev_dbg(dev, "------- HW descriptor %d -------\n", i++); 406 + dev_dbg(dev, "descriptor DMA addr: %pad\n", &sw_desc->dma_addr); 407 + dev_dbg(dev, "control: 0x%08x\n", hw_desc->control); 408 + dev_dbg(dev, "desc_id: 0x%08x\n", hw_desc->desc_id); 409 + dev_dbg(dev, "xfer_size: 0x%08x\n", hw_desc->xfer_size); 410 + dev_dbg(dev, "hsize_stride: 0x%08x\n", hw_desc->hsize_stride); 411 + dev_dbg(dev, "timestamp_lsb: 0x%08x\n", hw_desc->timestamp_lsb); 412 + dev_dbg(dev, "timestamp_msb: 0x%08x\n", hw_desc->timestamp_msb); 413 + dev_dbg(dev, "addr_ext: 0x%08x\n", hw_desc->addr_ext); 414 + dev_dbg(dev, "next_desc: 0x%08x\n", hw_desc->next_desc); 415 + dev_dbg(dev, "src_addr: 0x%08x\n", hw_desc->src_addr); 416 + dev_dbg(dev, "addr_ext_23: 0x%08x\n", hw_desc->addr_ext_23); 417 + dev_dbg(dev, "addr_ext_45: 0x%08x\n", hw_desc->addr_ext_45); 418 + dev_dbg(dev, "src_addr2: 0x%08x\n", hw_desc->src_addr2); 419 + dev_dbg(dev, "src_addr3: 0x%08x\n", hw_desc->src_addr3); 420 + dev_dbg(dev, "src_addr4: 0x%08x\n", hw_desc->src_addr4); 421 + dev_dbg(dev, "src_addr5: 0x%08x\n", hw_desc->src_addr5); 422 + dev_dbg(dev, "crc: 0x%08x\n", hw_desc->crc); 423 + } 424 + 425 + dev_dbg(dev, "------- TX descriptor dump end -------\n"); 426 + } 427 + 428 + /** 429 + * xilinx_dpdma_chan_alloc_tx_desc - Allocate a transaction descriptor 430 + * @chan: DPDMA channel 431 + * 432 + * Allocate a tx descriptor. 433 + * 434 + * Return: a tx descriptor or NULL. 435 + */ 436 + static struct xilinx_dpdma_tx_desc * 437 + xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan *chan) 438 + { 439 + struct xilinx_dpdma_tx_desc *tx_desc; 440 + 441 + tx_desc = kzalloc(sizeof(*tx_desc), GFP_NOWAIT); 442 + if (!tx_desc) 443 + return NULL; 444 + 445 + INIT_LIST_HEAD(&tx_desc->descriptors); 446 + tx_desc->chan = chan; 447 + tx_desc->error = false; 448 + 449 + return tx_desc; 450 + } 451 + 452 + /** 453 + * xilinx_dpdma_chan_free_tx_desc - Free a virtual DMA descriptor 454 + * @vdesc: virtual DMA descriptor 455 + * 456 + * Free the virtual DMA descriptor @vdesc including its software descriptors. 457 + */ 458 + static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc) 459 + { 460 + struct xilinx_dpdma_sw_desc *sw_desc, *next; 461 + struct xilinx_dpdma_tx_desc *desc; 462 + 463 + if (!vdesc) 464 + return; 465 + 466 + desc = to_dpdma_tx_desc(vdesc); 467 + 468 + list_for_each_entry_safe(sw_desc, next, &desc->descriptors, node) { 469 + list_del(&sw_desc->node); 470 + xilinx_dpdma_chan_free_sw_desc(desc->chan, sw_desc); 471 + } 472 + 473 + kfree(desc); 474 + } 475 + 476 + /** 477 + * xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma 478 + * descriptor 479 + * @chan: DPDMA channel 480 + * @xt: dma interleaved template 481 + * 482 + * Prepare a tx descriptor including internal software/hardware descriptors 483 + * based on @xt. 484 + * 485 + * Return: A DPDMA TX descriptor on success, or NULL. 486 + */ 487 + static struct xilinx_dpdma_tx_desc * 488 + xilinx_dpdma_chan_prep_interleaved_dma(struct xilinx_dpdma_chan *chan, 489 + struct dma_interleaved_template *xt) 490 + { 491 + struct xilinx_dpdma_tx_desc *tx_desc; 492 + struct xilinx_dpdma_sw_desc *sw_desc; 493 + struct xilinx_dpdma_hw_desc *hw_desc; 494 + size_t hsize = xt->sgl[0].size; 495 + size_t stride = hsize + xt->sgl[0].icg; 496 + 497 + if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) { 498 + dev_err(chan->xdev->dev, "buffer should be aligned at %d B\n", 499 + XILINX_DPDMA_ALIGN_BYTES); 500 + return NULL; 501 + } 502 + 503 + tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan); 504 + if (!tx_desc) 505 + return NULL; 506 + 507 + sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan); 508 + if (!sw_desc) { 509 + xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc); 510 + return NULL; 511 + } 512 + 513 + xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, sw_desc, 514 + &xt->src_start, 1); 515 + 516 + hw_desc = &sw_desc->hw; 517 + hsize = ALIGN(hsize, XILINX_DPDMA_LINESIZE_ALIGN_BITS / 8); 518 + hw_desc->xfer_size = hsize * xt->numf; 519 + hw_desc->hsize_stride = 520 + FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK, hsize) | 521 + FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK, 522 + stride / 16); 523 + hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE; 524 + hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR; 525 + hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE; 526 + hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME; 527 + 528 + list_add_tail(&sw_desc->node, &tx_desc->descriptors); 529 + 530 + return tx_desc; 531 + } 532 + 533 + /* ----------------------------------------------------------------------------- 534 + * DPDMA Channel Operations 535 + */ 536 + 537 + /** 538 + * xilinx_dpdma_chan_enable - Enable the channel 539 + * @chan: DPDMA channel 540 + * 541 + * Enable the channel and its interrupts. Set the QoS values for video class. 542 + */ 543 + static void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan *chan) 544 + { 545 + u32 reg; 546 + 547 + reg = (XILINX_DPDMA_INTR_CHAN_MASK << chan->id) 548 + | XILINX_DPDMA_INTR_GLOBAL_MASK; 549 + dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg); 550 + reg = (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id) 551 + | XILINX_DPDMA_INTR_GLOBAL_ERR; 552 + dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg); 553 + 554 + reg = XILINX_DPDMA_CH_CNTL_ENABLE 555 + | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK, 556 + XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS) 557 + | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK, 558 + XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS) 559 + | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK, 560 + XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS); 561 + dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, reg); 562 + } 563 + 564 + /** 565 + * xilinx_dpdma_chan_disable - Disable the channel 566 + * @chan: DPDMA channel 567 + * 568 + * Disable the channel and its interrupts. 569 + */ 570 + static void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan *chan) 571 + { 572 + u32 reg; 573 + 574 + reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id; 575 + dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg); 576 + reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id; 577 + dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg); 578 + 579 + dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE); 580 + } 581 + 582 + /** 583 + * xilinx_dpdma_chan_pause - Pause the channel 584 + * @chan: DPDMA channel 585 + * 586 + * Pause the channel. 587 + */ 588 + static void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan *chan) 589 + { 590 + dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE); 591 + } 592 + 593 + /** 594 + * xilinx_dpdma_chan_unpause - Unpause the channel 595 + * @chan: DPDMA channel 596 + * 597 + * Unpause the channel. 598 + */ 599 + static void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan *chan) 600 + { 601 + dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE); 602 + } 603 + 604 + static u32 xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan *chan) 605 + { 606 + struct xilinx_dpdma_device *xdev = chan->xdev; 607 + u32 channels = 0; 608 + unsigned int i; 609 + 610 + for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) { 611 + if (xdev->chan[i]->video_group && !xdev->chan[i]->running) 612 + return 0; 613 + 614 + if (xdev->chan[i]->video_group) 615 + channels |= BIT(i); 616 + } 617 + 618 + return channels; 619 + } 620 + 621 + /** 622 + * xilinx_dpdma_chan_queue_transfer - Queue the next transfer 623 + * @chan: DPDMA channel 624 + * 625 + * Queue the next descriptor, if any, to the hardware. If the channel is 626 + * stopped, start it first. Otherwise retrigger it with the next descriptor. 627 + */ 628 + static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan) 629 + { 630 + struct xilinx_dpdma_device *xdev = chan->xdev; 631 + struct xilinx_dpdma_sw_desc *sw_desc; 632 + struct xilinx_dpdma_tx_desc *desc; 633 + struct virt_dma_desc *vdesc; 634 + u32 reg, channels; 635 + 636 + lockdep_assert_held(&chan->lock); 637 + 638 + if (chan->desc.pending) 639 + return; 640 + 641 + if (!chan->running) { 642 + xilinx_dpdma_chan_unpause(chan); 643 + xilinx_dpdma_chan_enable(chan); 644 + chan->first_frame = true; 645 + chan->running = true; 646 + } 647 + 648 + if (chan->video_group) 649 + channels = xilinx_dpdma_chan_video_group_ready(chan); 650 + else 651 + channels = BIT(chan->id); 652 + 653 + if (!channels) 654 + return; 655 + 656 + vdesc = vchan_next_desc(&chan->vchan); 657 + if (!vdesc) 658 + return; 659 + 660 + desc = to_dpdma_tx_desc(vdesc); 661 + chan->desc.pending = desc; 662 + list_del(&desc->vdesc.node); 663 + 664 + /* 665 + * Assign the cookie to descriptors in this transaction. Only 16 bit 666 + * will be used, but it should be enough. 667 + */ 668 + list_for_each_entry(sw_desc, &desc->descriptors, node) 669 + sw_desc->hw.desc_id = desc->vdesc.tx.cookie; 670 + 671 + sw_desc = list_first_entry(&desc->descriptors, 672 + struct xilinx_dpdma_sw_desc, node); 673 + dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR, 674 + lower_32_bits(sw_desc->dma_addr)); 675 + if (xdev->ext_addr) 676 + dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE, 677 + FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK, 678 + upper_32_bits(sw_desc->dma_addr))); 679 + 680 + if (chan->first_frame) 681 + reg = XILINX_DPDMA_GBL_TRIG_MASK(channels); 682 + else 683 + reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels); 684 + 685 + chan->first_frame = false; 686 + 687 + dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg); 688 + } 689 + 690 + /** 691 + * xilinx_dpdma_chan_ostand - Number of outstanding transactions 692 + * @chan: DPDMA channel 693 + * 694 + * Read and return the number of outstanding transactions from register. 695 + * 696 + * Return: Number of outstanding transactions from the status register. 697 + */ 698 + static u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan) 699 + { 700 + return FIELD_GET(XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK, 701 + dpdma_read(chan->reg, XILINX_DPDMA_CH_STATUS)); 702 + } 703 + 704 + /** 705 + * xilinx_dpdma_chan_no_ostand - Notify no outstanding transaction event 706 + * @chan: DPDMA channel 707 + * 708 + * Notify waiters for no outstanding event, so waiters can stop the channel 709 + * safely. This function is supposed to be called when 'no outstanding' 710 + * interrupt is generated. The 'no outstanding' interrupt is disabled and 711 + * should be re-enabled when this event is handled. If the channel status 712 + * register still shows some number of outstanding transactions, the interrupt 713 + * remains enabled. 714 + * 715 + * Return: 0 on success. On failure, -EWOULDBLOCK if there's still outstanding 716 + * transaction(s). 717 + */ 718 + static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan) 719 + { 720 + u32 cnt; 721 + 722 + cnt = xilinx_dpdma_chan_ostand(chan); 723 + if (cnt) { 724 + dev_dbg(chan->xdev->dev, "%d outstanding transactions\n", cnt); 725 + return -EWOULDBLOCK; 726 + } 727 + 728 + /* Disable 'no outstanding' interrupt */ 729 + dpdma_write(chan->xdev->reg, XILINX_DPDMA_IDS, 730 + XILINX_DPDMA_INTR_NO_OSTAND(chan->id)); 731 + wake_up(&chan->wait_to_stop); 732 + 733 + return 0; 734 + } 735 + 736 + /** 737 + * xilinx_dpdma_chan_wait_no_ostand - Wait for the no outstanding irq 738 + * @chan: DPDMA channel 739 + * 740 + * Wait for the no outstanding transaction interrupt. This functions can sleep 741 + * for 50ms. 742 + * 743 + * Return: 0 on success. On failure, -ETIMEOUT for time out, or the error code 744 + * from wait_event_interruptible_timeout(). 745 + */ 746 + static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan) 747 + { 748 + int ret; 749 + 750 + /* Wait for a no outstanding transaction interrupt upto 50msec */ 751 + ret = wait_event_interruptible_timeout(chan->wait_to_stop, 752 + !xilinx_dpdma_chan_ostand(chan), 753 + msecs_to_jiffies(50)); 754 + if (ret > 0) { 755 + dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, 756 + XILINX_DPDMA_INTR_NO_OSTAND(chan->id)); 757 + return 0; 758 + } 759 + 760 + dev_err(chan->xdev->dev, "not ready to stop: %d trans\n", 761 + xilinx_dpdma_chan_ostand(chan)); 762 + 763 + if (ret == 0) 764 + return -ETIMEDOUT; 765 + 766 + return ret; 767 + } 768 + 769 + /** 770 + * xilinx_dpdma_chan_poll_no_ostand - Poll the outstanding transaction status 771 + * @chan: DPDMA channel 772 + * 773 + * Poll the outstanding transaction status, and return when there's no 774 + * outstanding transaction. This functions can be used in the interrupt context 775 + * or where the atomicity is required. Calling thread may wait more than 50ms. 776 + * 777 + * Return: 0 on success, or -ETIMEDOUT. 778 + */ 779 + static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan) 780 + { 781 + u32 cnt, loop = 50000; 782 + 783 + /* Poll at least for 50ms (20 fps). */ 784 + do { 785 + cnt = xilinx_dpdma_chan_ostand(chan); 786 + udelay(1); 787 + } while (loop-- > 0 && cnt); 788 + 789 + if (loop) { 790 + dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, 791 + XILINX_DPDMA_INTR_NO_OSTAND(chan->id)); 792 + return 0; 793 + } 794 + 795 + dev_err(chan->xdev->dev, "not ready to stop: %d trans\n", 796 + xilinx_dpdma_chan_ostand(chan)); 797 + 798 + return -ETIMEDOUT; 799 + } 800 + 801 + /** 802 + * xilinx_dpdma_chan_stop - Stop the channel 803 + * @chan: DPDMA channel 804 + * 805 + * Stop a previously paused channel by first waiting for completion of all 806 + * outstanding transaction and then disabling the channel. 807 + * 808 + * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop. 809 + */ 810 + static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan) 811 + { 812 + unsigned long flags; 813 + int ret; 814 + 815 + ret = xilinx_dpdma_chan_wait_no_ostand(chan); 816 + if (ret) 817 + return ret; 818 + 819 + spin_lock_irqsave(&chan->lock, flags); 820 + xilinx_dpdma_chan_disable(chan); 821 + chan->running = false; 822 + spin_unlock_irqrestore(&chan->lock, flags); 823 + 824 + return 0; 825 + } 826 + 827 + /** 828 + * xilinx_dpdma_chan_done_irq - Handle hardware descriptor completion 829 + * @chan: DPDMA channel 830 + * 831 + * Handle completion of the currently active descriptor (@chan->desc.active). As 832 + * we currently support cyclic transfers only, this just invokes the cyclic 833 + * callback. The descriptor will be completed at the VSYNC interrupt when a new 834 + * descriptor replaces it. 835 + */ 836 + static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan) 837 + { 838 + struct xilinx_dpdma_tx_desc *active = chan->desc.active; 839 + unsigned long flags; 840 + 841 + spin_lock_irqsave(&chan->lock, flags); 842 + 843 + if (active) 844 + vchan_cyclic_callback(&active->vdesc); 845 + else 846 + dev_warn(chan->xdev->dev, 847 + "DONE IRQ with no active descriptor!\n"); 848 + 849 + spin_unlock_irqrestore(&chan->lock, flags); 850 + } 851 + 852 + /** 853 + * xilinx_dpdma_chan_vsync_irq - Handle hardware descriptor scheduling 854 + * @chan: DPDMA channel 855 + * 856 + * At VSYNC the active descriptor may have been replaced by the pending 857 + * descriptor. Detect this through the DESC_ID and perform appropriate 858 + * bookkeeping. 859 + */ 860 + static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan) 861 + { 862 + struct xilinx_dpdma_tx_desc *pending; 863 + struct xilinx_dpdma_sw_desc *sw_desc; 864 + unsigned long flags; 865 + u32 desc_id; 866 + 867 + spin_lock_irqsave(&chan->lock, flags); 868 + 869 + pending = chan->desc.pending; 870 + if (!chan->running || !pending) 871 + goto out; 872 + 873 + desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID); 874 + 875 + /* If the retrigger raced with vsync, retry at the next frame. */ 876 + sw_desc = list_first_entry(&pending->descriptors, 877 + struct xilinx_dpdma_sw_desc, node); 878 + if (sw_desc->hw.desc_id != desc_id) 879 + goto out; 880 + 881 + /* 882 + * Complete the active descriptor, if any, promote the pending 883 + * descriptor to active, and queue the next transfer, if any. 884 + */ 885 + if (chan->desc.active) 886 + vchan_cookie_complete(&chan->desc.active->vdesc); 887 + chan->desc.active = pending; 888 + chan->desc.pending = NULL; 889 + 890 + xilinx_dpdma_chan_queue_transfer(chan); 891 + 892 + out: 893 + spin_unlock_irqrestore(&chan->lock, flags); 894 + } 895 + 896 + /** 897 + * xilinx_dpdma_chan_err - Detect any channel error 898 + * @chan: DPDMA channel 899 + * @isr: masked Interrupt Status Register 900 + * @eisr: Error Interrupt Status Register 901 + * 902 + * Return: true if any channel error occurs, or false otherwise. 903 + */ 904 + static bool 905 + xilinx_dpdma_chan_err(struct xilinx_dpdma_chan *chan, u32 isr, u32 eisr) 906 + { 907 + if (!chan) 908 + return false; 909 + 910 + if (chan->running && 911 + ((isr & (XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id)) || 912 + (eisr & (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id)))) 913 + return true; 914 + 915 + return false; 916 + } 917 + 918 + /** 919 + * xilinx_dpdma_chan_handle_err - DPDMA channel error handling 920 + * @chan: DPDMA channel 921 + * 922 + * This function is called when any channel error or any global error occurs. 923 + * The function disables the paused channel by errors and determines 924 + * if the current active descriptor can be rescheduled depending on 925 + * the descriptor status. 926 + */ 927 + static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan) 928 + { 929 + struct xilinx_dpdma_device *xdev = chan->xdev; 930 + struct xilinx_dpdma_tx_desc *active; 931 + unsigned long flags; 932 + 933 + spin_lock_irqsave(&chan->lock, flags); 934 + 935 + dev_dbg(xdev->dev, "cur desc addr = 0x%04x%08x\n", 936 + dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE), 937 + dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR)); 938 + dev_dbg(xdev->dev, "cur payload addr = 0x%04x%08x\n", 939 + dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE), 940 + dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR)); 941 + 942 + xilinx_dpdma_chan_disable(chan); 943 + chan->running = false; 944 + 945 + if (!chan->desc.active) 946 + goto out_unlock; 947 + 948 + active = chan->desc.active; 949 + chan->desc.active = NULL; 950 + 951 + xilinx_dpdma_chan_dump_tx_desc(chan, active); 952 + 953 + if (active->error) 954 + dev_dbg(xdev->dev, "repeated error on desc\n"); 955 + 956 + /* Reschedule if there's no new descriptor */ 957 + if (!chan->desc.pending && 958 + list_empty(&chan->vchan.desc_issued)) { 959 + active->error = true; 960 + list_add_tail(&active->vdesc.node, 961 + &chan->vchan.desc_issued); 962 + } else { 963 + xilinx_dpdma_chan_free_tx_desc(&active->vdesc); 964 + } 965 + 966 + out_unlock: 967 + spin_unlock_irqrestore(&chan->lock, flags); 968 + } 969 + 970 + /* ----------------------------------------------------------------------------- 971 + * DMA Engine Operations 972 + */ 973 + 974 + static struct dma_async_tx_descriptor * 975 + xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan, 976 + struct dma_interleaved_template *xt, 977 + unsigned long flags) 978 + { 979 + struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); 980 + struct xilinx_dpdma_tx_desc *desc; 981 + 982 + if (xt->dir != DMA_MEM_TO_DEV) 983 + return NULL; 984 + 985 + if (!xt->numf || !xt->sgl[0].size) 986 + return NULL; 987 + 988 + if (!(flags & DMA_PREP_REPEAT) || !(flags & DMA_PREP_LOAD_EOT)) 989 + return NULL; 990 + 991 + desc = xilinx_dpdma_chan_prep_interleaved_dma(chan, xt); 992 + if (!desc) 993 + return NULL; 994 + 995 + vchan_tx_prep(&chan->vchan, &desc->vdesc, flags | DMA_CTRL_ACK); 996 + 997 + return &desc->vdesc.tx; 998 + } 999 + 1000 + /** 1001 + * xilinx_dpdma_alloc_chan_resources - Allocate resources for the channel 1002 + * @dchan: DMA channel 1003 + * 1004 + * Allocate a descriptor pool for the channel. 1005 + * 1006 + * Return: 0 on success, or -ENOMEM if failed to allocate a pool. 1007 + */ 1008 + static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan) 1009 + { 1010 + struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); 1011 + size_t align = __alignof__(struct xilinx_dpdma_sw_desc); 1012 + 1013 + chan->desc_pool = dma_pool_create(dev_name(chan->xdev->dev), 1014 + chan->xdev->dev, 1015 + sizeof(struct xilinx_dpdma_sw_desc), 1016 + align, 0); 1017 + if (!chan->desc_pool) { 1018 + dev_err(chan->xdev->dev, 1019 + "failed to allocate a descriptor pool\n"); 1020 + return -ENOMEM; 1021 + } 1022 + 1023 + return 0; 1024 + } 1025 + 1026 + /** 1027 + * xilinx_dpdma_free_chan_resources - Free all resources for the channel 1028 + * @dchan: DMA channel 1029 + * 1030 + * Free resources associated with the virtual DMA channel, and destroy the 1031 + * descriptor pool. 1032 + */ 1033 + static void xilinx_dpdma_free_chan_resources(struct dma_chan *dchan) 1034 + { 1035 + struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); 1036 + 1037 + vchan_free_chan_resources(&chan->vchan); 1038 + 1039 + dma_pool_destroy(chan->desc_pool); 1040 + chan->desc_pool = NULL; 1041 + } 1042 + 1043 + static void xilinx_dpdma_issue_pending(struct dma_chan *dchan) 1044 + { 1045 + struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); 1046 + unsigned long flags; 1047 + 1048 + spin_lock_irqsave(&chan->vchan.lock, flags); 1049 + if (vchan_issue_pending(&chan->vchan)) 1050 + xilinx_dpdma_chan_queue_transfer(chan); 1051 + spin_unlock_irqrestore(&chan->vchan.lock, flags); 1052 + } 1053 + 1054 + static int xilinx_dpdma_config(struct dma_chan *dchan, 1055 + struct dma_slave_config *config) 1056 + { 1057 + struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); 1058 + unsigned long flags; 1059 + 1060 + /* 1061 + * The destination address doesn't need to be specified as the DPDMA is 1062 + * hardwired to the destination (the DP controller). The transfer 1063 + * width, burst size and port window size are thus meaningless, they're 1064 + * fixed both on the DPDMA side and on the DP controller side. 1065 + */ 1066 + 1067 + spin_lock_irqsave(&chan->lock, flags); 1068 + 1069 + /* 1070 + * Abuse the slave_id to indicate that the channel is part of a video 1071 + * group. 1072 + */ 1073 + if (chan->id >= ZYNQMP_DPDMA_VIDEO0 && chan->id <= ZYNQMP_DPDMA_VIDEO2) 1074 + chan->video_group = config->slave_id != 0; 1075 + 1076 + spin_unlock_irqrestore(&chan->lock, flags); 1077 + 1078 + return 0; 1079 + } 1080 + 1081 + static int xilinx_dpdma_pause(struct dma_chan *dchan) 1082 + { 1083 + xilinx_dpdma_chan_pause(to_xilinx_chan(dchan)); 1084 + 1085 + return 0; 1086 + } 1087 + 1088 + static int xilinx_dpdma_resume(struct dma_chan *dchan) 1089 + { 1090 + xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan)); 1091 + 1092 + return 0; 1093 + } 1094 + 1095 + /** 1096 + * xilinx_dpdma_terminate_all - Terminate the channel and descriptors 1097 + * @dchan: DMA channel 1098 + * 1099 + * Pause the channel without waiting for ongoing transfers to complete. Waiting 1100 + * for completion is performed by xilinx_dpdma_synchronize() that will disable 1101 + * the channel to complete the stop. 1102 + * 1103 + * All the descriptors associated with the channel that are guaranteed not to 1104 + * be touched by the hardware. The pending and active descriptor are not 1105 + * touched, and will be freed either upon completion, or by 1106 + * xilinx_dpdma_synchronize(). 1107 + * 1108 + * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop. 1109 + */ 1110 + static int xilinx_dpdma_terminate_all(struct dma_chan *dchan) 1111 + { 1112 + struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); 1113 + struct xilinx_dpdma_device *xdev = chan->xdev; 1114 + LIST_HEAD(descriptors); 1115 + unsigned long flags; 1116 + unsigned int i; 1117 + 1118 + /* Pause the channel (including the whole video group if applicable). */ 1119 + if (chan->video_group) { 1120 + for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) { 1121 + if (xdev->chan[i]->video_group && 1122 + xdev->chan[i]->running) { 1123 + xilinx_dpdma_chan_pause(xdev->chan[i]); 1124 + xdev->chan[i]->video_group = false; 1125 + } 1126 + } 1127 + } else { 1128 + xilinx_dpdma_chan_pause(chan); 1129 + } 1130 + 1131 + /* Gather all the descriptors we can free and free them. */ 1132 + spin_lock_irqsave(&chan->vchan.lock, flags); 1133 + vchan_get_all_descriptors(&chan->vchan, &descriptors); 1134 + spin_unlock_irqrestore(&chan->vchan.lock, flags); 1135 + 1136 + vchan_dma_desc_free_list(&chan->vchan, &descriptors); 1137 + 1138 + return 0; 1139 + } 1140 + 1141 + /** 1142 + * xilinx_dpdma_synchronize - Synchronize callback execution 1143 + * @dchan: DMA channel 1144 + * 1145 + * Synchronizing callback execution ensures that all previously issued 1146 + * transfers have completed and all associated callbacks have been called and 1147 + * have returned. 1148 + * 1149 + * This function waits for the DMA channel to stop. It assumes it has been 1150 + * paused by a previous call to dmaengine_terminate_async(), and that no new 1151 + * pending descriptors have been issued with dma_async_issue_pending(). The 1152 + * behaviour is undefined otherwise. 1153 + */ 1154 + static void xilinx_dpdma_synchronize(struct dma_chan *dchan) 1155 + { 1156 + struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); 1157 + unsigned long flags; 1158 + 1159 + xilinx_dpdma_chan_stop(chan); 1160 + 1161 + spin_lock_irqsave(&chan->vchan.lock, flags); 1162 + if (chan->desc.pending) { 1163 + vchan_terminate_vdesc(&chan->desc.pending->vdesc); 1164 + chan->desc.pending = NULL; 1165 + } 1166 + if (chan->desc.active) { 1167 + vchan_terminate_vdesc(&chan->desc.active->vdesc); 1168 + chan->desc.active = NULL; 1169 + } 1170 + spin_unlock_irqrestore(&chan->vchan.lock, flags); 1171 + 1172 + vchan_synchronize(&chan->vchan); 1173 + } 1174 + 1175 + /* ----------------------------------------------------------------------------- 1176 + * Interrupt and Tasklet Handling 1177 + */ 1178 + 1179 + /** 1180 + * xilinx_dpdma_err - Detect any global error 1181 + * @isr: Interrupt Status Register 1182 + * @eisr: Error Interrupt Status Register 1183 + * 1184 + * Return: True if any global error occurs, or false otherwise. 1185 + */ 1186 + static bool xilinx_dpdma_err(u32 isr, u32 eisr) 1187 + { 1188 + if (isr & XILINX_DPDMA_INTR_GLOBAL_ERR || 1189 + eisr & XILINX_DPDMA_EINTR_GLOBAL_ERR) 1190 + return true; 1191 + 1192 + return false; 1193 + } 1194 + 1195 + /** 1196 + * xilinx_dpdma_handle_err_irq - Handle DPDMA error interrupt 1197 + * @xdev: DPDMA device 1198 + * @isr: masked Interrupt Status Register 1199 + * @eisr: Error Interrupt Status Register 1200 + * 1201 + * Handle if any error occurs based on @isr and @eisr. This function disables 1202 + * corresponding error interrupts, and those should be re-enabled once handling 1203 + * is done. 1204 + */ 1205 + static void xilinx_dpdma_handle_err_irq(struct xilinx_dpdma_device *xdev, 1206 + u32 isr, u32 eisr) 1207 + { 1208 + bool err = xilinx_dpdma_err(isr, eisr); 1209 + unsigned int i; 1210 + 1211 + dev_dbg_ratelimited(xdev->dev, 1212 + "error irq: isr = 0x%08x, eisr = 0x%08x\n", 1213 + isr, eisr); 1214 + 1215 + /* Disable channel error interrupts until errors are handled. */ 1216 + dpdma_write(xdev->reg, XILINX_DPDMA_IDS, 1217 + isr & ~XILINX_DPDMA_INTR_GLOBAL_ERR); 1218 + dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, 1219 + eisr & ~XILINX_DPDMA_EINTR_GLOBAL_ERR); 1220 + 1221 + for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) 1222 + if (err || xilinx_dpdma_chan_err(xdev->chan[i], isr, eisr)) 1223 + tasklet_schedule(&xdev->chan[i]->err_task); 1224 + } 1225 + 1226 + /** 1227 + * xilinx_dpdma_enable_irq - Enable interrupts 1228 + * @xdev: DPDMA device 1229 + * 1230 + * Enable interrupts. 1231 + */ 1232 + static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev) 1233 + { 1234 + dpdma_write(xdev->reg, XILINX_DPDMA_IEN, XILINX_DPDMA_INTR_ALL); 1235 + dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, XILINX_DPDMA_EINTR_ALL); 1236 + } 1237 + 1238 + /** 1239 + * xilinx_dpdma_disable_irq - Disable interrupts 1240 + * @xdev: DPDMA device 1241 + * 1242 + * Disable interrupts. 1243 + */ 1244 + static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev) 1245 + { 1246 + dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL); 1247 + dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL); 1248 + } 1249 + 1250 + /** 1251 + * xilinx_dpdma_chan_err_task - Per channel tasklet for error handling 1252 + * @data: tasklet data to be casted to DPDMA channel structure 1253 + * 1254 + * Per channel error handling tasklet. This function waits for the outstanding 1255 + * transaction to complete and triggers error handling. After error handling, 1256 + * re-enable channel error interrupts, and restart the channel if needed. 1257 + */ 1258 + static void xilinx_dpdma_chan_err_task(unsigned long data) 1259 + { 1260 + struct xilinx_dpdma_chan *chan = (struct xilinx_dpdma_chan *)data; 1261 + struct xilinx_dpdma_device *xdev = chan->xdev; 1262 + unsigned long flags; 1263 + 1264 + /* Proceed error handling even when polling fails. */ 1265 + xilinx_dpdma_chan_poll_no_ostand(chan); 1266 + 1267 + xilinx_dpdma_chan_handle_err(chan); 1268 + 1269 + dpdma_write(xdev->reg, XILINX_DPDMA_IEN, 1270 + XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id); 1271 + dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, 1272 + XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id); 1273 + 1274 + spin_lock_irqsave(&chan->lock, flags); 1275 + xilinx_dpdma_chan_queue_transfer(chan); 1276 + spin_unlock_irqrestore(&chan->lock, flags); 1277 + } 1278 + 1279 + static irqreturn_t xilinx_dpdma_irq_handler(int irq, void *data) 1280 + { 1281 + struct xilinx_dpdma_device *xdev = data; 1282 + unsigned long mask; 1283 + unsigned int i; 1284 + u32 status; 1285 + u32 error; 1286 + 1287 + status = dpdma_read(xdev->reg, XILINX_DPDMA_ISR); 1288 + error = dpdma_read(xdev->reg, XILINX_DPDMA_EISR); 1289 + if (!status && !error) 1290 + return IRQ_NONE; 1291 + 1292 + dpdma_write(xdev->reg, XILINX_DPDMA_ISR, status); 1293 + dpdma_write(xdev->reg, XILINX_DPDMA_EISR, error); 1294 + 1295 + if (status & XILINX_DPDMA_INTR_VSYNC) { 1296 + /* 1297 + * There's a single VSYNC interrupt that needs to be processed 1298 + * by each running channel to update the active descriptor. 1299 + */ 1300 + for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) { 1301 + struct xilinx_dpdma_chan *chan = xdev->chan[i]; 1302 + 1303 + if (chan) 1304 + xilinx_dpdma_chan_vsync_irq(chan); 1305 + } 1306 + } 1307 + 1308 + mask = FIELD_GET(XILINX_DPDMA_INTR_DESC_DONE_MASK, status); 1309 + if (mask) { 1310 + for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan)) 1311 + xilinx_dpdma_chan_done_irq(xdev->chan[i]); 1312 + } 1313 + 1314 + mask = FIELD_GET(XILINX_DPDMA_INTR_NO_OSTAND_MASK, status); 1315 + if (mask) { 1316 + for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan)) 1317 + xilinx_dpdma_chan_notify_no_ostand(xdev->chan[i]); 1318 + } 1319 + 1320 + mask = status & XILINX_DPDMA_INTR_ERR_ALL; 1321 + if (mask || error) 1322 + xilinx_dpdma_handle_err_irq(xdev, mask, error); 1323 + 1324 + return IRQ_HANDLED; 1325 + } 1326 + 1327 + /* ----------------------------------------------------------------------------- 1328 + * Initialization & Cleanup 1329 + */ 1330 + 1331 + static int xilinx_dpdma_chan_init(struct xilinx_dpdma_device *xdev, 1332 + unsigned int chan_id) 1333 + { 1334 + struct xilinx_dpdma_chan *chan; 1335 + 1336 + chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); 1337 + if (!chan) 1338 + return -ENOMEM; 1339 + 1340 + chan->id = chan_id; 1341 + chan->reg = xdev->reg + XILINX_DPDMA_CH_BASE 1342 + + XILINX_DPDMA_CH_OFFSET * chan->id; 1343 + chan->running = false; 1344 + chan->xdev = xdev; 1345 + 1346 + spin_lock_init(&chan->lock); 1347 + init_waitqueue_head(&chan->wait_to_stop); 1348 + 1349 + tasklet_init(&chan->err_task, xilinx_dpdma_chan_err_task, 1350 + (unsigned long)chan); 1351 + 1352 + chan->vchan.desc_free = xilinx_dpdma_chan_free_tx_desc; 1353 + vchan_init(&chan->vchan, &xdev->common); 1354 + 1355 + xdev->chan[chan->id] = chan; 1356 + 1357 + return 0; 1358 + } 1359 + 1360 + static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan *chan) 1361 + { 1362 + if (!chan) 1363 + return; 1364 + 1365 + tasklet_kill(&chan->err_task); 1366 + list_del(&chan->vchan.chan.device_node); 1367 + } 1368 + 1369 + static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, 1370 + struct of_dma *ofdma) 1371 + { 1372 + struct xilinx_dpdma_device *xdev = ofdma->of_dma_data; 1373 + uint32_t chan_id = dma_spec->args[0]; 1374 + 1375 + if (chan_id >= ARRAY_SIZE(xdev->chan)) 1376 + return NULL; 1377 + 1378 + if (!xdev->chan[chan_id]) 1379 + return NULL; 1380 + 1381 + return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan); 1382 + } 1383 + 1384 + static int xilinx_dpdma_probe(struct platform_device *pdev) 1385 + { 1386 + struct xilinx_dpdma_device *xdev; 1387 + struct dma_device *ddev; 1388 + unsigned int i; 1389 + int ret; 1390 + 1391 + xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); 1392 + if (!xdev) 1393 + return -ENOMEM; 1394 + 1395 + xdev->dev = &pdev->dev; 1396 + xdev->ext_addr = sizeof(dma_addr_t) > 4; 1397 + 1398 + INIT_LIST_HEAD(&xdev->common.channels); 1399 + 1400 + platform_set_drvdata(pdev, xdev); 1401 + 1402 + xdev->axi_clk = devm_clk_get(xdev->dev, "axi_clk"); 1403 + if (IS_ERR(xdev->axi_clk)) 1404 + return PTR_ERR(xdev->axi_clk); 1405 + 1406 + xdev->reg = devm_platform_ioremap_resource(pdev, 0); 1407 + if (IS_ERR(xdev->reg)) 1408 + return PTR_ERR(xdev->reg); 1409 + 1410 + xdev->irq = platform_get_irq(pdev, 0); 1411 + if (xdev->irq < 0) { 1412 + dev_err(xdev->dev, "failed to get platform irq\n"); 1413 + return xdev->irq; 1414 + } 1415 + 1416 + ret = request_irq(xdev->irq, xilinx_dpdma_irq_handler, IRQF_SHARED, 1417 + dev_name(xdev->dev), xdev); 1418 + if (ret) { 1419 + dev_err(xdev->dev, "failed to request IRQ\n"); 1420 + return ret; 1421 + } 1422 + 1423 + ddev = &xdev->common; 1424 + ddev->dev = &pdev->dev; 1425 + 1426 + dma_cap_set(DMA_SLAVE, ddev->cap_mask); 1427 + dma_cap_set(DMA_PRIVATE, ddev->cap_mask); 1428 + dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask); 1429 + dma_cap_set(DMA_REPEAT, ddev->cap_mask); 1430 + dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask); 1431 + ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1); 1432 + 1433 + ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources; 1434 + ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources; 1435 + ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma; 1436 + /* TODO: Can we achieve better granularity ? */ 1437 + ddev->device_tx_status = dma_cookie_status; 1438 + ddev->device_issue_pending = xilinx_dpdma_issue_pending; 1439 + ddev->device_config = xilinx_dpdma_config; 1440 + ddev->device_pause = xilinx_dpdma_pause; 1441 + ddev->device_resume = xilinx_dpdma_resume; 1442 + ddev->device_terminate_all = xilinx_dpdma_terminate_all; 1443 + ddev->device_synchronize = xilinx_dpdma_synchronize; 1444 + ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED); 1445 + ddev->directions = BIT(DMA_MEM_TO_DEV); 1446 + ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; 1447 + 1448 + for (i = 0; i < ARRAY_SIZE(xdev->chan); ++i) { 1449 + ret = xilinx_dpdma_chan_init(xdev, i); 1450 + if (ret < 0) { 1451 + dev_err(xdev->dev, "failed to initialize channel %u\n", 1452 + i); 1453 + goto error; 1454 + } 1455 + } 1456 + 1457 + ret = clk_prepare_enable(xdev->axi_clk); 1458 + if (ret) { 1459 + dev_err(xdev->dev, "failed to enable the axi clock\n"); 1460 + goto error; 1461 + } 1462 + 1463 + ret = dma_async_device_register(ddev); 1464 + if (ret) { 1465 + dev_err(xdev->dev, "failed to register the dma device\n"); 1466 + goto error_dma_async; 1467 + } 1468 + 1469 + ret = of_dma_controller_register(xdev->dev->of_node, 1470 + of_dma_xilinx_xlate, ddev); 1471 + if (ret) { 1472 + dev_err(xdev->dev, "failed to register DMA to DT DMA helper\n"); 1473 + goto error_of_dma; 1474 + } 1475 + 1476 + xilinx_dpdma_enable_irq(xdev); 1477 + 1478 + dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n"); 1479 + 1480 + return 0; 1481 + 1482 + error_of_dma: 1483 + dma_async_device_unregister(ddev); 1484 + error_dma_async: 1485 + clk_disable_unprepare(xdev->axi_clk); 1486 + error: 1487 + for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) 1488 + xilinx_dpdma_chan_remove(xdev->chan[i]); 1489 + 1490 + free_irq(xdev->irq, xdev); 1491 + 1492 + return ret; 1493 + } 1494 + 1495 + static int xilinx_dpdma_remove(struct platform_device *pdev) 1496 + { 1497 + struct xilinx_dpdma_device *xdev = platform_get_drvdata(pdev); 1498 + unsigned int i; 1499 + 1500 + /* Start by disabling the IRQ to avoid races during cleanup. */ 1501 + free_irq(xdev->irq, xdev); 1502 + 1503 + xilinx_dpdma_disable_irq(xdev); 1504 + of_dma_controller_free(pdev->dev.of_node); 1505 + dma_async_device_unregister(&xdev->common); 1506 + clk_disable_unprepare(xdev->axi_clk); 1507 + 1508 + for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) 1509 + xilinx_dpdma_chan_remove(xdev->chan[i]); 1510 + 1511 + return 0; 1512 + } 1513 + 1514 + static const struct of_device_id xilinx_dpdma_of_match[] = { 1515 + { .compatible = "xlnx,zynqmp-dpdma",}, 1516 + { /* end of table */ }, 1517 + }; 1518 + MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match); 1519 + 1520 + static struct platform_driver xilinx_dpdma_driver = { 1521 + .probe = xilinx_dpdma_probe, 1522 + .remove = xilinx_dpdma_remove, 1523 + .driver = { 1524 + .name = "xilinx-zynqmp-dpdma", 1525 + .of_match_table = xilinx_dpdma_of_match, 1526 + }, 1527 + }; 1528 + 1529 + module_platform_driver(xilinx_dpdma_driver); 1530 + 1531 + MODULE_AUTHOR("Xilinx, Inc."); 1532 + MODULE_DESCRIPTION("Xilinx ZynqMP DPDMA driver"); 1533 + MODULE_LICENSE("GPL v2");