Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: tegra: Add tegra gpcdma driver

Adding GPC DMA controller driver for Tegra. The driver supports dma
transfers between memory to memory, IO peripheral to memory and
memory to IO peripheral.

Co-developed-by: Pavan Kunapuli <pkunapuli@nvidia.com>
Signed-off-by: Pavan Kunapuli <pkunapuli@nvidia.com>
Co-developed-by: Rajesh Gumasta <rgumasta@nvidia.com>
Signed-off-by: Rajesh Gumasta <rgumasta@nvidia.com>
Signed-off-by: Akhil R <akhilrajeev@nvidia.com>
Reviewed-by: Jon Hunter <jonathanh@nvidia.com>
Reviewed-by: Dmitry Osipenko <digetx@gmail.com>
Acked-by: Thierry Reding <treding@nvidia.com>
Link: https://lore.kernel.org/r/20220225132044.14478-3-akhilrajeev@nvidia.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Akhil R and committed by
Vinod Koul
ee170280 72910626

+1519
+11
drivers/dma/Kconfig
··· 629 629 Support the TXx9 SoC internal DMA controller. This can be 630 630 integrated in chips such as the Toshiba TX4927/38/39. 631 631 632 + config TEGRA186_GPC_DMA 633 + tristate "NVIDIA Tegra GPC DMA support" 634 + depends on (ARCH_TEGRA || COMPILE_TEST) && ARCH_DMA_ADDR_T_64BIT 635 + select DMA_ENGINE 636 + help 637 + Support for the NVIDIA Tegra General Purpose Central DMA controller. 638 + The DMA controller has multiple DMA channels which can be configured 639 + for different peripherals like UART, SPI, etc which are on APB bus. 640 + This DMA controller transfers data from memory to peripheral FIFO 641 + or vice versa. It also supports memory to memory data transfer. 642 + 632 643 config TEGRA20_APB_DMA 633 644 tristate "NVIDIA Tegra20 APB DMA support" 634 645 depends on ARCH_TEGRA || COMPILE_TEST
+1
drivers/dma/Makefile
··· 72 72 obj-$(CONFIG_SPRD_DMA) += sprd-dma.o 73 73 obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o 74 74 obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o 75 + obj-$(CONFIG_TEGRA186_GPC_DMA) += tegra186-gpc-dma.o 75 76 obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o 76 77 obj-$(CONFIG_TEGRA210_ADMA) += tegra210-adma.o 77 78 obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+1507
drivers/dma/tegra186-gpc-dma.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * DMA driver for NVIDIA Tegra GPC DMA controller. 4 + * 5 + * Copyright (c) 2014-2022, NVIDIA CORPORATION. All rights reserved. 6 + */ 7 + 8 + #include <linux/bitfield.h> 9 + #include <linux/dmaengine.h> 10 + #include <linux/dma-mapping.h> 11 + #include <linux/interrupt.h> 12 + #include <linux/iommu.h> 13 + #include <linux/iopoll.h> 14 + #include <linux/minmax.h> 15 + #include <linux/module.h> 16 + #include <linux/of_device.h> 17 + #include <linux/of_dma.h> 18 + #include <linux/platform_device.h> 19 + #include <linux/reset.h> 20 + #include <linux/slab.h> 21 + #include <linux/version.h> 22 + #include <dt-bindings/memory/tegra186-mc.h> 23 + #include "virt-dma.h" 24 + 25 + /* CSR register */ 26 + #define TEGRA_GPCDMA_CHAN_CSR 0x00 27 + #define TEGRA_GPCDMA_CSR_ENB BIT(31) 28 + #define TEGRA_GPCDMA_CSR_IE_EOC BIT(30) 29 + #define TEGRA_GPCDMA_CSR_ONCE BIT(27) 30 + 31 + #define TEGRA_GPCDMA_CSR_FC_MODE GENMASK(25, 24) 32 + #define TEGRA_GPCDMA_CSR_FC_MODE_NO_MMIO \ 33 + FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 0) 34 + #define TEGRA_GPCDMA_CSR_FC_MODE_ONE_MMIO \ 35 + FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 1) 36 + #define TEGRA_GPCDMA_CSR_FC_MODE_TWO_MMIO \ 37 + FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 2) 38 + #define TEGRA_GPCDMA_CSR_FC_MODE_FOUR_MMIO \ 39 + FIELD_PREP(TEGRA_GPCDMA_CSR_FC_MODE, 3) 40 + 41 + #define TEGRA_GPCDMA_CSR_DMA GENMASK(23, 21) 42 + #define TEGRA_GPCDMA_CSR_DMA_IO2MEM_NO_FC \ 43 + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 0) 44 + #define TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC \ 45 + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 1) 46 + #define TEGRA_GPCDMA_CSR_DMA_MEM2IO_NO_FC \ 47 + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 2) 48 + #define TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC \ 49 + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 3) 50 + #define TEGRA_GPCDMA_CSR_DMA_MEM2MEM \ 51 + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 4) 52 + #define TEGRA_GPCDMA_CSR_DMA_FIXED_PAT \ 53 + FIELD_PREP(TEGRA_GPCDMA_CSR_DMA, 6) 54 + 55 + #define TEGRA_GPCDMA_CSR_REQ_SEL_MASK GENMASK(20, 16) 56 + #define TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED \ 57 + FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, 4) 58 + #define TEGRA_GPCDMA_CSR_IRQ_MASK BIT(15) 59 + #define TEGRA_GPCDMA_CSR_WEIGHT GENMASK(13, 10) 60 + 61 + /* STATUS register */ 62 + #define TEGRA_GPCDMA_CHAN_STATUS 0x004 63 + #define TEGRA_GPCDMA_STATUS_BUSY BIT(31) 64 + #define TEGRA_GPCDMA_STATUS_ISE_EOC BIT(30) 65 + #define TEGRA_GPCDMA_STATUS_PING_PONG BIT(28) 66 + #define TEGRA_GPCDMA_STATUS_DMA_ACTIVITY BIT(27) 67 + #define TEGRA_GPCDMA_STATUS_CHANNEL_PAUSE BIT(26) 68 + #define TEGRA_GPCDMA_STATUS_CHANNEL_RX BIT(25) 69 + #define TEGRA_GPCDMA_STATUS_CHANNEL_TX BIT(24) 70 + #define TEGRA_GPCDMA_STATUS_IRQ_INTR_STA BIT(23) 71 + #define TEGRA_GPCDMA_STATUS_IRQ_STA BIT(21) 72 + #define TEGRA_GPCDMA_STATUS_IRQ_TRIG_STA BIT(20) 73 + 74 + #define TEGRA_GPCDMA_CHAN_CSRE 0x008 75 + #define TEGRA_GPCDMA_CHAN_CSRE_PAUSE BIT(31) 76 + 77 + /* Source address */ 78 + #define TEGRA_GPCDMA_CHAN_SRC_PTR 0x00C 79 + 80 + /* Destination address */ 81 + #define TEGRA_GPCDMA_CHAN_DST_PTR 0x010 82 + 83 + /* High address pointer */ 84 + #define TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR 0x014 85 + #define TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR GENMASK(7, 0) 86 + #define TEGRA_GPCDMA_HIGH_ADDR_DST_PTR GENMASK(23, 16) 87 + 88 + /* MC sequence register */ 89 + #define TEGRA_GPCDMA_CHAN_MCSEQ 0x18 90 + #define TEGRA_GPCDMA_MCSEQ_DATA_SWAP BIT(31) 91 + #define TEGRA_GPCDMA_MCSEQ_REQ_COUNT GENMASK(30, 25) 92 + #define TEGRA_GPCDMA_MCSEQ_BURST GENMASK(24, 23) 93 + #define TEGRA_GPCDMA_MCSEQ_BURST_2 \ 94 + FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 0) 95 + #define TEGRA_GPCDMA_MCSEQ_BURST_16 \ 96 + FIELD_PREP(TEGRA_GPCDMA_MCSEQ_BURST, 3) 97 + #define TEGRA_GPCDMA_MCSEQ_WRAP1 GENMASK(22, 20) 98 + #define TEGRA_GPCDMA_MCSEQ_WRAP0 GENMASK(19, 17) 99 + #define TEGRA_GPCDMA_MCSEQ_WRAP_NONE 0 100 + 101 + #define TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK GENMASK(13, 7) 102 + #define TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK GENMASK(6, 0) 103 + 104 + /* MMIO sequence register */ 105 + #define TEGRA_GPCDMA_CHAN_MMIOSEQ 0x01c 106 + #define TEGRA_GPCDMA_MMIOSEQ_DBL_BUF BIT(31) 107 + #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH GENMASK(30, 28) 108 + #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8 \ 109 + FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 0) 110 + #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16 \ 111 + FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 1) 112 + #define TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32 \ 113 + FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH, 2) 114 + #define TEGRA_GPCDMA_MMIOSEQ_DATA_SWAP BIT(27) 115 + #define TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT 23 116 + #define TEGRA_GPCDMA_MMIOSEQ_BURST_MIN 2U 117 + #define TEGRA_GPCDMA_MMIOSEQ_BURST_MAX 32U 118 + #define TEGRA_GPCDMA_MMIOSEQ_BURST(bs) \ 119 + (GENMASK((fls(bs) - 2), 0) << TEGRA_GPCDMA_MMIOSEQ_BURST_SHIFT) 120 + #define TEGRA_GPCDMA_MMIOSEQ_MASTER_ID GENMASK(22, 19) 121 + #define TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD GENMASK(18, 16) 122 + #define TEGRA_GPCDMA_MMIOSEQ_MMIO_PROT GENMASK(8, 7) 123 + 124 + /* Channel WCOUNT */ 125 + #define TEGRA_GPCDMA_CHAN_WCOUNT 0x20 126 + 127 + /* Transfer count */ 128 + #define TEGRA_GPCDMA_CHAN_XFER_COUNT 0x24 129 + 130 + /* DMA byte count status */ 131 + #define TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS 0x28 132 + 133 + /* Error Status Register */ 134 + #define TEGRA_GPCDMA_CHAN_ERR_STATUS 0x30 135 + #define TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT 8 136 + #define TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK 0xF 137 + #define TEGRA_GPCDMA_CHAN_ERR_TYPE(err) ( \ 138 + ((err) >> TEGRA_GPCDMA_CHAN_ERR_TYPE_SHIFT) & \ 139 + TEGRA_GPCDMA_CHAN_ERR_TYPE_MASK) 140 + #define TEGRA_DMA_BM_FIFO_FULL_ERR 0xF 141 + #define TEGRA_DMA_PERIPH_FIFO_FULL_ERR 0xE 142 + #define TEGRA_DMA_PERIPH_ID_ERR 0xD 143 + #define TEGRA_DMA_STREAM_ID_ERR 0xC 144 + #define TEGRA_DMA_MC_SLAVE_ERR 0xB 145 + #define TEGRA_DMA_MMIO_SLAVE_ERR 0xA 146 + 147 + /* Fixed Pattern */ 148 + #define TEGRA_GPCDMA_CHAN_FIXED_PATTERN 0x34 149 + 150 + #define TEGRA_GPCDMA_CHAN_TZ 0x38 151 + #define TEGRA_GPCDMA_CHAN_TZ_MMIO_PROT_1 BIT(0) 152 + #define TEGRA_GPCDMA_CHAN_TZ_MC_PROT_1 BIT(1) 153 + 154 + #define TEGRA_GPCDMA_CHAN_SPARE 0x3c 155 + #define TEGRA_GPCDMA_CHAN_SPARE_EN_LEGACY_FC BIT(16) 156 + 157 + /* 158 + * If any burst is in flight and DMA paused then this is the time to complete 159 + * on-flight burst and update DMA status register. 160 + */ 161 + #define TEGRA_GPCDMA_BURST_COMPLETE_TIME 20 162 + #define TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT 100 163 + 164 + /* Channel base address offset from GPCDMA base address */ 165 + #define TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET 0x20000 166 + 167 + struct tegra_dma; 168 + struct tegra_dma_channel; 169 + 170 + /* 171 + * tegra_dma_chip_data Tegra chip specific DMA data 172 + * @nr_channels: Number of channels available in the controller. 173 + * @channel_reg_size: Channel register size. 174 + * @max_dma_count: Maximum DMA transfer count supported by DMA controller. 175 + * @hw_support_pause: DMA HW engine support pause of the channel. 176 + */ 177 + struct tegra_dma_chip_data { 178 + bool hw_support_pause; 179 + unsigned int nr_channels; 180 + unsigned int channel_reg_size; 181 + unsigned int max_dma_count; 182 + int (*terminate)(struct tegra_dma_channel *tdc); 183 + }; 184 + 185 + /* DMA channel registers */ 186 + struct tegra_dma_channel_regs { 187 + u32 csr; 188 + u32 src_ptr; 189 + u32 dst_ptr; 190 + u32 high_addr_ptr; 191 + u32 mc_seq; 192 + u32 mmio_seq; 193 + u32 wcount; 194 + u32 fixed_pattern; 195 + }; 196 + 197 + /* 198 + * tegra_dma_sg_req: DMA request details to configure hardware. This 199 + * contains the details for one transfer to configure DMA hw. 200 + * The client's request for data transfer can be broken into multiple 201 + * sub-transfer as per requester details and hw support. This sub transfer 202 + * get added as an array in Tegra DMA desc which manages the transfer details. 203 + */ 204 + struct tegra_dma_sg_req { 205 + unsigned int len; 206 + struct tegra_dma_channel_regs ch_regs; 207 + }; 208 + 209 + /* 210 + * tegra_dma_desc: Tegra DMA descriptors which uses virt_dma_desc to 211 + * manage client request and keep track of transfer status, callbacks 212 + * and request counts etc. 213 + */ 214 + struct tegra_dma_desc { 215 + bool cyclic; 216 + unsigned int bytes_req; 217 + unsigned int bytes_xfer; 218 + unsigned int sg_idx; 219 + unsigned int sg_count; 220 + struct virt_dma_desc vd; 221 + struct tegra_dma_channel *tdc; 222 + struct tegra_dma_sg_req sg_req[]; 223 + }; 224 + 225 + /* 226 + * tegra_dma_channel: Channel specific information 227 + */ 228 + struct tegra_dma_channel { 229 + bool config_init; 230 + char name[30]; 231 + enum dma_transfer_direction sid_dir; 232 + int id; 233 + int irq; 234 + int slave_id; 235 + struct tegra_dma *tdma; 236 + struct virt_dma_chan vc; 237 + struct tegra_dma_desc *dma_desc; 238 + struct dma_slave_config dma_sconfig; 239 + unsigned int stream_id; 240 + unsigned long chan_base_offset; 241 + }; 242 + 243 + /* 244 + * tegra_dma: Tegra DMA specific information 245 + */ 246 + struct tegra_dma { 247 + const struct tegra_dma_chip_data *chip_data; 248 + unsigned long sid_m2d_reserved; 249 + unsigned long sid_d2m_reserved; 250 + void __iomem *base_addr; 251 + struct device *dev; 252 + struct dma_device dma_dev; 253 + struct reset_control *rst; 254 + struct tegra_dma_channel channels[]; 255 + }; 256 + 257 + static inline void tdc_write(struct tegra_dma_channel *tdc, 258 + u32 reg, u32 val) 259 + { 260 + writel_relaxed(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg); 261 + } 262 + 263 + static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg) 264 + { 265 + return readl_relaxed(tdc->tdma->base_addr + tdc->chan_base_offset + reg); 266 + } 267 + 268 + static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc) 269 + { 270 + return container_of(dc, struct tegra_dma_channel, vc.chan); 271 + } 272 + 273 + static inline struct tegra_dma_desc *vd_to_tegra_dma_desc(struct virt_dma_desc *vd) 274 + { 275 + return container_of(vd, struct tegra_dma_desc, vd); 276 + } 277 + 278 + static inline struct device *tdc2dev(struct tegra_dma_channel *tdc) 279 + { 280 + return tdc->vc.chan.device->dev; 281 + } 282 + 283 + static void tegra_dma_dump_chan_regs(struct tegra_dma_channel *tdc) 284 + { 285 + dev_dbg(tdc2dev(tdc), "DMA Channel %d name %s register dump:\n", 286 + tdc->id, tdc->name); 287 + dev_dbg(tdc2dev(tdc), "CSR %x STA %x CSRE %x SRC %x DST %x\n", 288 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR), 289 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS), 290 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE), 291 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR), 292 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_DST_PTR) 293 + ); 294 + dev_dbg(tdc2dev(tdc), "MCSEQ %x IOSEQ %x WCNT %x XFER %x BSTA %x\n", 295 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ), 296 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ), 297 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_WCOUNT), 298 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT), 299 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_DMA_BYTE_STATUS) 300 + ); 301 + dev_dbg(tdc2dev(tdc), "DMA ERR_STA %x\n", 302 + tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS)); 303 + } 304 + 305 + static int tegra_dma_sid_reserve(struct tegra_dma_channel *tdc, 306 + enum dma_transfer_direction direction) 307 + { 308 + struct tegra_dma *tdma = tdc->tdma; 309 + int sid = tdc->slave_id; 310 + 311 + if (!is_slave_direction(direction)) 312 + return 0; 313 + 314 + switch (direction) { 315 + case DMA_MEM_TO_DEV: 316 + if (test_and_set_bit(sid, &tdma->sid_m2d_reserved)) { 317 + dev_err(tdma->dev, "slave id already in use\n"); 318 + return -EINVAL; 319 + } 320 + break; 321 + case DMA_DEV_TO_MEM: 322 + if (test_and_set_bit(sid, &tdma->sid_d2m_reserved)) { 323 + dev_err(tdma->dev, "slave id already in use\n"); 324 + return -EINVAL; 325 + } 326 + break; 327 + default: 328 + break; 329 + } 330 + 331 + tdc->sid_dir = direction; 332 + 333 + return 0; 334 + } 335 + 336 + static void tegra_dma_sid_free(struct tegra_dma_channel *tdc) 337 + { 338 + struct tegra_dma *tdma = tdc->tdma; 339 + int sid = tdc->slave_id; 340 + 341 + switch (tdc->sid_dir) { 342 + case DMA_MEM_TO_DEV: 343 + clear_bit(sid, &tdma->sid_m2d_reserved); 344 + break; 345 + case DMA_DEV_TO_MEM: 346 + clear_bit(sid, &tdma->sid_d2m_reserved); 347 + break; 348 + default: 349 + break; 350 + } 351 + 352 + tdc->sid_dir = DMA_TRANS_NONE; 353 + } 354 + 355 + static void tegra_dma_desc_free(struct virt_dma_desc *vd) 356 + { 357 + kfree(container_of(vd, struct tegra_dma_desc, vd)); 358 + } 359 + 360 + static int tegra_dma_slave_config(struct dma_chan *dc, 361 + struct dma_slave_config *sconfig) 362 + { 363 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 364 + 365 + memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); 366 + tdc->config_init = true; 367 + 368 + return 0; 369 + } 370 + 371 + static int tegra_dma_pause(struct tegra_dma_channel *tdc) 372 + { 373 + int ret; 374 + u32 val; 375 + 376 + val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE); 377 + val |= TEGRA_GPCDMA_CHAN_CSRE_PAUSE; 378 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val); 379 + 380 + /* Wait until busy bit is de-asserted */ 381 + ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr + 382 + tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS, 383 + val, 384 + !(val & TEGRA_GPCDMA_STATUS_BUSY), 385 + TEGRA_GPCDMA_BURST_COMPLETE_TIME, 386 + TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT); 387 + 388 + if (ret) { 389 + dev_err(tdc2dev(tdc), "DMA pause timed out\n"); 390 + tegra_dma_dump_chan_regs(tdc); 391 + } 392 + 393 + return ret; 394 + } 395 + 396 + static int tegra_dma_device_pause(struct dma_chan *dc) 397 + { 398 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 399 + unsigned long flags; 400 + int ret; 401 + 402 + if (!tdc->tdma->chip_data->hw_support_pause) 403 + return -ENOSYS; 404 + 405 + spin_lock_irqsave(&tdc->vc.lock, flags); 406 + ret = tegra_dma_pause(tdc); 407 + spin_unlock_irqrestore(&tdc->vc.lock, flags); 408 + 409 + return ret; 410 + } 411 + 412 + static void tegra_dma_resume(struct tegra_dma_channel *tdc) 413 + { 414 + u32 val; 415 + 416 + val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSRE); 417 + val &= ~TEGRA_GPCDMA_CHAN_CSRE_PAUSE; 418 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSRE, val); 419 + } 420 + 421 + static int tegra_dma_device_resume(struct dma_chan *dc) 422 + { 423 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 424 + unsigned long flags; 425 + 426 + if (!tdc->tdma->chip_data->hw_support_pause) 427 + return -ENOSYS; 428 + 429 + spin_lock_irqsave(&tdc->vc.lock, flags); 430 + tegra_dma_resume(tdc); 431 + spin_unlock_irqrestore(&tdc->vc.lock, flags); 432 + 433 + return 0; 434 + } 435 + 436 + static void tegra_dma_disable(struct tegra_dma_channel *tdc) 437 + { 438 + u32 csr, status; 439 + 440 + csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR); 441 + 442 + /* Disable interrupts */ 443 + csr &= ~TEGRA_GPCDMA_CSR_IE_EOC; 444 + 445 + /* Disable DMA */ 446 + csr &= ~TEGRA_GPCDMA_CSR_ENB; 447 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr); 448 + 449 + /* Clear interrupt status if it is there */ 450 + status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS); 451 + if (status & TEGRA_GPCDMA_STATUS_ISE_EOC) { 452 + dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__); 453 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS, status); 454 + } 455 + } 456 + 457 + static void tegra_dma_configure_next_sg(struct tegra_dma_channel *tdc) 458 + { 459 + struct tegra_dma_desc *dma_desc = tdc->dma_desc; 460 + struct tegra_dma_channel_regs *ch_regs; 461 + int ret; 462 + u32 val; 463 + 464 + dma_desc->sg_idx++; 465 + 466 + /* Reset the sg index for cyclic transfers */ 467 + if (dma_desc->sg_idx == dma_desc->sg_count) 468 + dma_desc->sg_idx = 0; 469 + 470 + /* Configure next transfer immediately after DMA is busy */ 471 + ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr + 472 + tdc->chan_base_offset + TEGRA_GPCDMA_CHAN_STATUS, 473 + val, 474 + (val & TEGRA_GPCDMA_STATUS_BUSY), 0, 475 + TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT); 476 + if (ret) 477 + return; 478 + 479 + ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs; 480 + 481 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount); 482 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr); 483 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr); 484 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr); 485 + 486 + /* Start DMA */ 487 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, 488 + ch_regs->csr | TEGRA_GPCDMA_CSR_ENB); 489 + } 490 + 491 + static void tegra_dma_start(struct tegra_dma_channel *tdc) 492 + { 493 + struct tegra_dma_desc *dma_desc = tdc->dma_desc; 494 + struct tegra_dma_channel_regs *ch_regs; 495 + struct virt_dma_desc *vdesc; 496 + 497 + if (!dma_desc) { 498 + vdesc = vchan_next_desc(&tdc->vc); 499 + if (!vdesc) 500 + return; 501 + 502 + dma_desc = vd_to_tegra_dma_desc(vdesc); 503 + list_del(&vdesc->node); 504 + dma_desc->tdc = tdc; 505 + tdc->dma_desc = dma_desc; 506 + 507 + tegra_dma_resume(tdc); 508 + } 509 + 510 + ch_regs = &dma_desc->sg_req[dma_desc->sg_idx].ch_regs; 511 + 512 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_WCOUNT, ch_regs->wcount); 513 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, 0); 514 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_SRC_PTR, ch_regs->src_ptr); 515 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_DST_PTR, ch_regs->dst_ptr); 516 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_HIGH_ADDR_PTR, ch_regs->high_addr_ptr); 517 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_FIXED_PATTERN, ch_regs->fixed_pattern); 518 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_MMIOSEQ, ch_regs->mmio_seq); 519 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, ch_regs->mc_seq); 520 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, ch_regs->csr); 521 + 522 + /* Start DMA */ 523 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, 524 + ch_regs->csr | TEGRA_GPCDMA_CSR_ENB); 525 + } 526 + 527 + static void tegra_dma_xfer_complete(struct tegra_dma_channel *tdc) 528 + { 529 + vchan_cookie_complete(&tdc->dma_desc->vd); 530 + 531 + tegra_dma_sid_free(tdc); 532 + tdc->dma_desc = NULL; 533 + } 534 + 535 + static void tegra_dma_chan_decode_error(struct tegra_dma_channel *tdc, 536 + unsigned int err_status) 537 + { 538 + switch (TEGRA_GPCDMA_CHAN_ERR_TYPE(err_status)) { 539 + case TEGRA_DMA_BM_FIFO_FULL_ERR: 540 + dev_err(tdc->tdma->dev, 541 + "GPCDMA CH%d bm fifo full\n", tdc->id); 542 + break; 543 + 544 + case TEGRA_DMA_PERIPH_FIFO_FULL_ERR: 545 + dev_err(tdc->tdma->dev, 546 + "GPCDMA CH%d peripheral fifo full\n", tdc->id); 547 + break; 548 + 549 + case TEGRA_DMA_PERIPH_ID_ERR: 550 + dev_err(tdc->tdma->dev, 551 + "GPCDMA CH%d illegal peripheral id\n", tdc->id); 552 + break; 553 + 554 + case TEGRA_DMA_STREAM_ID_ERR: 555 + dev_err(tdc->tdma->dev, 556 + "GPCDMA CH%d illegal stream id\n", tdc->id); 557 + break; 558 + 559 + case TEGRA_DMA_MC_SLAVE_ERR: 560 + dev_err(tdc->tdma->dev, 561 + "GPCDMA CH%d mc slave error\n", tdc->id); 562 + break; 563 + 564 + case TEGRA_DMA_MMIO_SLAVE_ERR: 565 + dev_err(tdc->tdma->dev, 566 + "GPCDMA CH%d mmio slave error\n", tdc->id); 567 + break; 568 + 569 + default: 570 + dev_err(tdc->tdma->dev, 571 + "GPCDMA CH%d security violation %x\n", tdc->id, 572 + err_status); 573 + } 574 + } 575 + 576 + static irqreturn_t tegra_dma_isr(int irq, void *dev_id) 577 + { 578 + struct tegra_dma_channel *tdc = dev_id; 579 + struct tegra_dma_desc *dma_desc = tdc->dma_desc; 580 + struct tegra_dma_sg_req *sg_req; 581 + u32 status; 582 + 583 + /* Check channel error status register */ 584 + status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS); 585 + if (status) { 586 + tegra_dma_chan_decode_error(tdc, status); 587 + tegra_dma_dump_chan_regs(tdc); 588 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_ERR_STATUS, 0xFFFFFFFF); 589 + } 590 + 591 + spin_lock(&tdc->vc.lock); 592 + status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS); 593 + if (!(status & TEGRA_GPCDMA_STATUS_ISE_EOC)) 594 + goto irq_done; 595 + 596 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_STATUS, 597 + TEGRA_GPCDMA_STATUS_ISE_EOC); 598 + 599 + if (!dma_desc) 600 + goto irq_done; 601 + 602 + sg_req = dma_desc->sg_req; 603 + dma_desc->bytes_xfer += sg_req[dma_desc->sg_idx].len; 604 + 605 + if (dma_desc->cyclic) { 606 + vchan_cyclic_callback(&dma_desc->vd); 607 + tegra_dma_configure_next_sg(tdc); 608 + } else { 609 + dma_desc->sg_idx++; 610 + if (dma_desc->sg_idx == dma_desc->sg_count) 611 + tegra_dma_xfer_complete(tdc); 612 + else 613 + tegra_dma_start(tdc); 614 + } 615 + 616 + irq_done: 617 + spin_unlock(&tdc->vc.lock); 618 + return IRQ_HANDLED; 619 + } 620 + 621 + static void tegra_dma_issue_pending(struct dma_chan *dc) 622 + { 623 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 624 + unsigned long flags; 625 + 626 + if (tdc->dma_desc) 627 + return; 628 + 629 + spin_lock_irqsave(&tdc->vc.lock, flags); 630 + if (vchan_issue_pending(&tdc->vc)) 631 + tegra_dma_start(tdc); 632 + 633 + /* 634 + * For cyclic DMA transfers, program the second 635 + * transfer parameters as soon as the first DMA 636 + * transfer is started inorder for the DMA 637 + * controller to trigger the second transfer 638 + * with the correct parameters. 639 + */ 640 + if (tdc->dma_desc && tdc->dma_desc->cyclic) 641 + tegra_dma_configure_next_sg(tdc); 642 + 643 + spin_unlock_irqrestore(&tdc->vc.lock, flags); 644 + } 645 + 646 + static int tegra_dma_stop_client(struct tegra_dma_channel *tdc) 647 + { 648 + int ret; 649 + u32 status, csr; 650 + 651 + /* 652 + * Change the client associated with the DMA channel 653 + * to stop DMA engine from starting any more bursts for 654 + * the given client and wait for in flight bursts to complete 655 + */ 656 + csr = tdc_read(tdc, TEGRA_GPCDMA_CHAN_CSR); 657 + csr &= ~(TEGRA_GPCDMA_CSR_REQ_SEL_MASK); 658 + csr |= TEGRA_GPCDMA_CSR_REQ_SEL_UNUSED; 659 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_CSR, csr); 660 + 661 + /* Wait for in flight data transfer to finish */ 662 + udelay(TEGRA_GPCDMA_BURST_COMPLETE_TIME); 663 + 664 + /* If TX/RX path is still active wait till it becomes 665 + * inactive 666 + */ 667 + 668 + ret = readl_relaxed_poll_timeout_atomic(tdc->tdma->base_addr + 669 + tdc->chan_base_offset + 670 + TEGRA_GPCDMA_CHAN_STATUS, 671 + status, 672 + !(status & (TEGRA_GPCDMA_STATUS_CHANNEL_TX | 673 + TEGRA_GPCDMA_STATUS_CHANNEL_RX)), 674 + 5, 675 + TEGRA_GPCDMA_BURST_COMPLETION_TIMEOUT); 676 + if (ret) { 677 + dev_err(tdc2dev(tdc), "Timeout waiting for DMA burst completion!\n"); 678 + tegra_dma_dump_chan_regs(tdc); 679 + } 680 + 681 + return ret; 682 + } 683 + 684 + static int tegra_dma_terminate_all(struct dma_chan *dc) 685 + { 686 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 687 + unsigned long flags; 688 + LIST_HEAD(head); 689 + int err; 690 + 691 + spin_lock_irqsave(&tdc->vc.lock, flags); 692 + 693 + if (tdc->dma_desc) { 694 + err = tdc->tdma->chip_data->terminate(tdc); 695 + if (err) { 696 + spin_unlock_irqrestore(&tdc->vc.lock, flags); 697 + return err; 698 + } 699 + 700 + tegra_dma_disable(tdc); 701 + tdc->dma_desc = NULL; 702 + } 703 + 704 + tegra_dma_sid_free(tdc); 705 + vchan_get_all_descriptors(&tdc->vc, &head); 706 + spin_unlock_irqrestore(&tdc->vc.lock, flags); 707 + 708 + vchan_dma_desc_free_list(&tdc->vc, &head); 709 + 710 + return 0; 711 + } 712 + 713 + static int tegra_dma_get_residual(struct tegra_dma_channel *tdc) 714 + { 715 + struct tegra_dma_desc *dma_desc = tdc->dma_desc; 716 + struct tegra_dma_sg_req *sg_req = dma_desc->sg_req; 717 + unsigned int bytes_xfer, residual; 718 + u32 wcount = 0, status; 719 + 720 + wcount = tdc_read(tdc, TEGRA_GPCDMA_CHAN_XFER_COUNT); 721 + 722 + /* 723 + * Set wcount = 0 if EOC bit is set. The transfer would have 724 + * already completed and the CHAN_XFER_COUNT could have updated 725 + * for the next transfer, specifically in case of cyclic transfers. 726 + */ 727 + status = tdc_read(tdc, TEGRA_GPCDMA_CHAN_STATUS); 728 + if (status & TEGRA_GPCDMA_STATUS_ISE_EOC) 729 + wcount = 0; 730 + 731 + bytes_xfer = dma_desc->bytes_xfer + 732 + sg_req[dma_desc->sg_idx].len - (wcount * 4); 733 + 734 + residual = dma_desc->bytes_req - (bytes_xfer % dma_desc->bytes_req); 735 + 736 + return residual; 737 + } 738 + 739 + static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, 740 + dma_cookie_t cookie, 741 + struct dma_tx_state *txstate) 742 + { 743 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 744 + struct tegra_dma_desc *dma_desc; 745 + struct virt_dma_desc *vd; 746 + unsigned int residual; 747 + unsigned long flags; 748 + enum dma_status ret; 749 + 750 + ret = dma_cookie_status(dc, cookie, txstate); 751 + if (ret == DMA_COMPLETE) 752 + return ret; 753 + 754 + spin_lock_irqsave(&tdc->vc.lock, flags); 755 + vd = vchan_find_desc(&tdc->vc, cookie); 756 + if (vd) { 757 + dma_desc = vd_to_tegra_dma_desc(vd); 758 + residual = dma_desc->bytes_req; 759 + dma_set_residue(txstate, residual); 760 + } else if (tdc->dma_desc && tdc->dma_desc->vd.tx.cookie == cookie) { 761 + residual = tegra_dma_get_residual(tdc); 762 + dma_set_residue(txstate, residual); 763 + } else { 764 + dev_err(tdc2dev(tdc), "cookie %d is not found\n", cookie); 765 + } 766 + spin_unlock_irqrestore(&tdc->vc.lock, flags); 767 + 768 + return ret; 769 + } 770 + 771 + static inline int get_bus_width(struct tegra_dma_channel *tdc, 772 + enum dma_slave_buswidth slave_bw) 773 + { 774 + switch (slave_bw) { 775 + case DMA_SLAVE_BUSWIDTH_1_BYTE: 776 + return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_8; 777 + case DMA_SLAVE_BUSWIDTH_2_BYTES: 778 + return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_16; 779 + case DMA_SLAVE_BUSWIDTH_4_BYTES: 780 + return TEGRA_GPCDMA_MMIOSEQ_BUS_WIDTH_32; 781 + default: 782 + dev_err(tdc2dev(tdc), "given slave bus width is not supported\n"); 783 + return -EINVAL; 784 + } 785 + } 786 + 787 + static unsigned int get_burst_size(struct tegra_dma_channel *tdc, 788 + u32 burst_size, enum dma_slave_buswidth slave_bw, 789 + int len) 790 + { 791 + unsigned int burst_mmio_width, burst_byte; 792 + 793 + /* 794 + * burst_size from client is in terms of the bus_width. 795 + * convert that into words. 796 + * If burst_size is not specified from client, then use 797 + * len to calculate the optimum burst size 798 + */ 799 + burst_byte = burst_size ? burst_size * slave_bw : len; 800 + burst_mmio_width = burst_byte / 4; 801 + 802 + if (burst_mmio_width < TEGRA_GPCDMA_MMIOSEQ_BURST_MIN) 803 + return 0; 804 + 805 + burst_mmio_width = min(burst_mmio_width, TEGRA_GPCDMA_MMIOSEQ_BURST_MAX); 806 + 807 + return TEGRA_GPCDMA_MMIOSEQ_BURST(burst_mmio_width); 808 + } 809 + 810 + static int get_transfer_param(struct tegra_dma_channel *tdc, 811 + enum dma_transfer_direction direction, 812 + u32 *apb_addr, 813 + u32 *mmio_seq, 814 + u32 *csr, 815 + unsigned int *burst_size, 816 + enum dma_slave_buswidth *slave_bw) 817 + { 818 + switch (direction) { 819 + case DMA_MEM_TO_DEV: 820 + *apb_addr = tdc->dma_sconfig.dst_addr; 821 + *mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width); 822 + *burst_size = tdc->dma_sconfig.dst_maxburst; 823 + *slave_bw = tdc->dma_sconfig.dst_addr_width; 824 + *csr = TEGRA_GPCDMA_CSR_DMA_MEM2IO_FC; 825 + return 0; 826 + case DMA_DEV_TO_MEM: 827 + *apb_addr = tdc->dma_sconfig.src_addr; 828 + *mmio_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width); 829 + *burst_size = tdc->dma_sconfig.src_maxburst; 830 + *slave_bw = tdc->dma_sconfig.src_addr_width; 831 + *csr = TEGRA_GPCDMA_CSR_DMA_IO2MEM_FC; 832 + return 0; 833 + case DMA_MEM_TO_MEM: 834 + *burst_size = tdc->dma_sconfig.src_addr_width; 835 + *csr = TEGRA_GPCDMA_CSR_DMA_MEM2MEM; 836 + return 0; 837 + default: 838 + dev_err(tdc2dev(tdc), "DMA direction is not supported\n"); 839 + } 840 + 841 + return -EINVAL; 842 + } 843 + 844 + static struct dma_async_tx_descriptor * 845 + tegra_dma_prep_dma_memset(struct dma_chan *dc, dma_addr_t dest, int value, 846 + size_t len, unsigned long flags) 847 + { 848 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 849 + unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count; 850 + struct tegra_dma_sg_req *sg_req; 851 + struct tegra_dma_desc *dma_desc; 852 + u32 csr, mc_seq; 853 + 854 + if ((len & 3) || (dest & 3) || len > max_dma_count) { 855 + dev_err(tdc2dev(tdc), 856 + "DMA length/memory address is not supported\n"); 857 + return NULL; 858 + } 859 + 860 + /* Set DMA mode to fixed pattern */ 861 + csr = TEGRA_GPCDMA_CSR_DMA_FIXED_PAT; 862 + /* Enable once or continuous mode */ 863 + csr |= TEGRA_GPCDMA_CSR_ONCE; 864 + /* Enable IRQ mask */ 865 + csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; 866 + /* Enable the DMA interrupt */ 867 + if (flags & DMA_PREP_INTERRUPT) 868 + csr |= TEGRA_GPCDMA_CSR_IE_EOC; 869 + /* Configure default priority weight for the channel */ 870 + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); 871 + 872 + mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); 873 + /* retain stream-id and clean rest */ 874 + mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK; 875 + 876 + /* Set the address wrapping */ 877 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, 878 + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); 879 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, 880 + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); 881 + 882 + /* Program outstanding MC requests */ 883 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); 884 + /* Set burst size */ 885 + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; 886 + 887 + dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT); 888 + if (!dma_desc) 889 + return NULL; 890 + 891 + dma_desc->bytes_req = len; 892 + dma_desc->sg_count = 1; 893 + sg_req = dma_desc->sg_req; 894 + 895 + sg_req[0].ch_regs.src_ptr = 0; 896 + sg_req[0].ch_regs.dst_ptr = dest; 897 + sg_req[0].ch_regs.high_addr_ptr = 898 + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32)); 899 + sg_req[0].ch_regs.fixed_pattern = value; 900 + /* Word count reg takes value as (N +1) words */ 901 + sg_req[0].ch_regs.wcount = ((len - 4) >> 2); 902 + sg_req[0].ch_regs.csr = csr; 903 + sg_req[0].ch_regs.mmio_seq = 0; 904 + sg_req[0].ch_regs.mc_seq = mc_seq; 905 + sg_req[0].len = len; 906 + 907 + dma_desc->cyclic = false; 908 + return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags); 909 + } 910 + 911 + static struct dma_async_tx_descriptor * 912 + tegra_dma_prep_dma_memcpy(struct dma_chan *dc, dma_addr_t dest, 913 + dma_addr_t src, size_t len, unsigned long flags) 914 + { 915 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 916 + struct tegra_dma_sg_req *sg_req; 917 + struct tegra_dma_desc *dma_desc; 918 + unsigned int max_dma_count; 919 + u32 csr, mc_seq; 920 + 921 + max_dma_count = tdc->tdma->chip_data->max_dma_count; 922 + if ((len & 3) || (src & 3) || (dest & 3) || len > max_dma_count) { 923 + dev_err(tdc2dev(tdc), 924 + "DMA length/memory address is not supported\n"); 925 + return NULL; 926 + } 927 + 928 + /* Set DMA mode to memory to memory transfer */ 929 + csr = TEGRA_GPCDMA_CSR_DMA_MEM2MEM; 930 + /* Enable once or continuous mode */ 931 + csr |= TEGRA_GPCDMA_CSR_ONCE; 932 + /* Enable IRQ mask */ 933 + csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; 934 + /* Enable the DMA interrupt */ 935 + if (flags & DMA_PREP_INTERRUPT) 936 + csr |= TEGRA_GPCDMA_CSR_IE_EOC; 937 + /* Configure default priority weight for the channel */ 938 + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); 939 + 940 + mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); 941 + /* retain stream-id and clean rest */ 942 + mc_seq &= (TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK) | 943 + (TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK); 944 + 945 + /* Set the address wrapping */ 946 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, 947 + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); 948 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, 949 + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); 950 + 951 + /* Program outstanding MC requests */ 952 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); 953 + /* Set burst size */ 954 + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; 955 + 956 + dma_desc = kzalloc(struct_size(dma_desc, sg_req, 1), GFP_NOWAIT); 957 + if (!dma_desc) 958 + return NULL; 959 + 960 + dma_desc->bytes_req = len; 961 + dma_desc->sg_count = 1; 962 + sg_req = dma_desc->sg_req; 963 + 964 + sg_req[0].ch_regs.src_ptr = src; 965 + sg_req[0].ch_regs.dst_ptr = dest; 966 + sg_req[0].ch_regs.high_addr_ptr = 967 + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (src >> 32)); 968 + sg_req[0].ch_regs.high_addr_ptr |= 969 + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (dest >> 32)); 970 + /* Word count reg takes value as (N +1) words */ 971 + sg_req[0].ch_regs.wcount = ((len - 4) >> 2); 972 + sg_req[0].ch_regs.csr = csr; 973 + sg_req[0].ch_regs.mmio_seq = 0; 974 + sg_req[0].ch_regs.mc_seq = mc_seq; 975 + sg_req[0].len = len; 976 + 977 + dma_desc->cyclic = false; 978 + return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags); 979 + } 980 + 981 + static struct dma_async_tx_descriptor * 982 + tegra_dma_prep_slave_sg(struct dma_chan *dc, struct scatterlist *sgl, 983 + unsigned int sg_len, enum dma_transfer_direction direction, 984 + unsigned long flags, void *context) 985 + { 986 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 987 + unsigned int max_dma_count = tdc->tdma->chip_data->max_dma_count; 988 + u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0; 989 + enum dma_slave_buswidth slave_bw; 990 + struct tegra_dma_sg_req *sg_req; 991 + struct tegra_dma_desc *dma_desc; 992 + struct scatterlist *sg; 993 + u32 burst_size; 994 + unsigned int i; 995 + int ret; 996 + 997 + if (!tdc->config_init) { 998 + dev_err(tdc2dev(tdc), "DMA channel is not configured\n"); 999 + return NULL; 1000 + } 1001 + if (sg_len < 1) { 1002 + dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len); 1003 + return NULL; 1004 + } 1005 + 1006 + ret = tegra_dma_sid_reserve(tdc, direction); 1007 + if (ret) 1008 + return NULL; 1009 + 1010 + ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr, 1011 + &burst_size, &slave_bw); 1012 + if (ret < 0) 1013 + return NULL; 1014 + 1015 + /* Enable once or continuous mode */ 1016 + csr |= TEGRA_GPCDMA_CSR_ONCE; 1017 + /* Program the slave id in requestor select */ 1018 + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id); 1019 + /* Enable IRQ mask */ 1020 + csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; 1021 + /* Configure default priority weight for the channel*/ 1022 + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); 1023 + 1024 + /* Enable the DMA interrupt */ 1025 + if (flags & DMA_PREP_INTERRUPT) 1026 + csr |= TEGRA_GPCDMA_CSR_IE_EOC; 1027 + 1028 + mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); 1029 + /* retain stream-id and clean rest */ 1030 + mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK; 1031 + 1032 + /* Set the address wrapping on both MC and MMIO side */ 1033 + 1034 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, 1035 + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); 1036 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, 1037 + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); 1038 + mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1); 1039 + 1040 + /* Program 2 MC outstanding requests by default. */ 1041 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); 1042 + 1043 + /* Setting MC burst size depending on MMIO burst size */ 1044 + if (burst_size == 64) 1045 + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; 1046 + else 1047 + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2; 1048 + 1049 + dma_desc = kzalloc(struct_size(dma_desc, sg_req, sg_len), GFP_NOWAIT); 1050 + if (!dma_desc) 1051 + return NULL; 1052 + 1053 + dma_desc->sg_count = sg_len; 1054 + sg_req = dma_desc->sg_req; 1055 + 1056 + /* Make transfer requests */ 1057 + for_each_sg(sgl, sg, sg_len, i) { 1058 + u32 len; 1059 + dma_addr_t mem; 1060 + 1061 + mem = sg_dma_address(sg); 1062 + len = sg_dma_len(sg); 1063 + 1064 + if ((len & 3) || (mem & 3) || len > max_dma_count) { 1065 + dev_err(tdc2dev(tdc), 1066 + "DMA length/memory address is not supported\n"); 1067 + kfree(dma_desc); 1068 + return NULL; 1069 + } 1070 + 1071 + mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len); 1072 + dma_desc->bytes_req += len; 1073 + 1074 + if (direction == DMA_MEM_TO_DEV) { 1075 + sg_req[i].ch_regs.src_ptr = mem; 1076 + sg_req[i].ch_regs.dst_ptr = apb_ptr; 1077 + sg_req[i].ch_regs.high_addr_ptr = 1078 + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32)); 1079 + } else if (direction == DMA_DEV_TO_MEM) { 1080 + sg_req[i].ch_regs.src_ptr = apb_ptr; 1081 + sg_req[i].ch_regs.dst_ptr = mem; 1082 + sg_req[i].ch_regs.high_addr_ptr = 1083 + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32)); 1084 + } 1085 + 1086 + /* 1087 + * Word count register takes input in words. Writing a value 1088 + * of N into word count register means a req of (N+1) words. 1089 + */ 1090 + sg_req[i].ch_regs.wcount = ((len - 4) >> 2); 1091 + sg_req[i].ch_regs.csr = csr; 1092 + sg_req[i].ch_regs.mmio_seq = mmio_seq; 1093 + sg_req[i].ch_regs.mc_seq = mc_seq; 1094 + sg_req[i].len = len; 1095 + } 1096 + 1097 + dma_desc->cyclic = false; 1098 + return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags); 1099 + } 1100 + 1101 + static struct dma_async_tx_descriptor * 1102 + tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, 1103 + size_t period_len, enum dma_transfer_direction direction, 1104 + unsigned long flags) 1105 + { 1106 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1107 + struct tegra_dma_desc *dma_desc; 1108 + struct tegra_dma_sg_req *sg_req; 1109 + enum dma_slave_buswidth slave_bw; 1110 + u32 csr, mc_seq, apb_ptr = 0, mmio_seq = 0, burst_size; 1111 + unsigned int max_dma_count, len, period_count, i; 1112 + dma_addr_t mem = buf_addr; 1113 + int ret; 1114 + 1115 + if (!buf_len || !period_len) { 1116 + dev_err(tdc2dev(tdc), "Invalid buffer/period len\n"); 1117 + return NULL; 1118 + } 1119 + 1120 + if (!tdc->config_init) { 1121 + dev_err(tdc2dev(tdc), "DMA slave is not configured\n"); 1122 + return NULL; 1123 + } 1124 + 1125 + ret = tegra_dma_sid_reserve(tdc, direction); 1126 + if (ret) 1127 + return NULL; 1128 + 1129 + /* 1130 + * We only support cycle transfer when buf_len is multiple of 1131 + * period_len. 1132 + */ 1133 + if (buf_len % period_len) { 1134 + dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n"); 1135 + return NULL; 1136 + } 1137 + 1138 + len = period_len; 1139 + max_dma_count = tdc->tdma->chip_data->max_dma_count; 1140 + if ((len & 3) || (buf_addr & 3) || len > max_dma_count) { 1141 + dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n"); 1142 + return NULL; 1143 + } 1144 + 1145 + ret = get_transfer_param(tdc, direction, &apb_ptr, &mmio_seq, &csr, 1146 + &burst_size, &slave_bw); 1147 + if (ret < 0) 1148 + return NULL; 1149 + 1150 + /* Enable once or continuous mode */ 1151 + csr &= ~TEGRA_GPCDMA_CSR_ONCE; 1152 + /* Program the slave id in requestor select */ 1153 + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_REQ_SEL_MASK, tdc->slave_id); 1154 + /* Enable IRQ mask */ 1155 + csr |= TEGRA_GPCDMA_CSR_IRQ_MASK; 1156 + /* Configure default priority weight for the channel*/ 1157 + csr |= FIELD_PREP(TEGRA_GPCDMA_CSR_WEIGHT, 1); 1158 + 1159 + /* Enable the DMA interrupt */ 1160 + if (flags & DMA_PREP_INTERRUPT) 1161 + csr |= TEGRA_GPCDMA_CSR_IE_EOC; 1162 + 1163 + mmio_seq |= FIELD_PREP(TEGRA_GPCDMA_MMIOSEQ_WRAP_WORD, 1); 1164 + 1165 + mc_seq = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); 1166 + /* retain stream-id and clean rest */ 1167 + mc_seq &= TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK; 1168 + 1169 + /* Set the address wrapping on both MC and MMIO side */ 1170 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP0, 1171 + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); 1172 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_WRAP1, 1173 + TEGRA_GPCDMA_MCSEQ_WRAP_NONE); 1174 + 1175 + /* Program 2 MC outstanding requests by default. */ 1176 + mc_seq |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_REQ_COUNT, 1); 1177 + /* Setting MC burst size depending on MMIO burst size */ 1178 + if (burst_size == 64) 1179 + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_16; 1180 + else 1181 + mc_seq |= TEGRA_GPCDMA_MCSEQ_BURST_2; 1182 + 1183 + period_count = buf_len / period_len; 1184 + dma_desc = kzalloc(struct_size(dma_desc, sg_req, period_count), 1185 + GFP_NOWAIT); 1186 + if (!dma_desc) 1187 + return NULL; 1188 + 1189 + dma_desc->bytes_req = buf_len; 1190 + dma_desc->sg_count = period_count; 1191 + sg_req = dma_desc->sg_req; 1192 + 1193 + /* Split transfer equal to period size */ 1194 + for (i = 0; i < period_count; i++) { 1195 + mmio_seq |= get_burst_size(tdc, burst_size, slave_bw, len); 1196 + if (direction == DMA_MEM_TO_DEV) { 1197 + sg_req[i].ch_regs.src_ptr = mem; 1198 + sg_req[i].ch_regs.dst_ptr = apb_ptr; 1199 + sg_req[i].ch_regs.high_addr_ptr = 1200 + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_SRC_PTR, (mem >> 32)); 1201 + } else if (direction == DMA_DEV_TO_MEM) { 1202 + sg_req[i].ch_regs.src_ptr = apb_ptr; 1203 + sg_req[i].ch_regs.dst_ptr = mem; 1204 + sg_req[i].ch_regs.high_addr_ptr = 1205 + FIELD_PREP(TEGRA_GPCDMA_HIGH_ADDR_DST_PTR, (mem >> 32)); 1206 + } 1207 + /* 1208 + * Word count register takes input in words. Writing a value 1209 + * of N into word count register means a req of (N+1) words. 1210 + */ 1211 + sg_req[i].ch_regs.wcount = ((len - 4) >> 2); 1212 + sg_req[i].ch_regs.csr = csr; 1213 + sg_req[i].ch_regs.mmio_seq = mmio_seq; 1214 + sg_req[i].ch_regs.mc_seq = mc_seq; 1215 + sg_req[i].len = len; 1216 + 1217 + mem += len; 1218 + } 1219 + 1220 + dma_desc->cyclic = true; 1221 + 1222 + return vchan_tx_prep(&tdc->vc, &dma_desc->vd, flags); 1223 + } 1224 + 1225 + static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) 1226 + { 1227 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1228 + int ret; 1229 + 1230 + ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc); 1231 + if (ret) { 1232 + dev_err(tdc2dev(tdc), "request_irq failed for %s\n", tdc->name); 1233 + return ret; 1234 + } 1235 + 1236 + dma_cookie_init(&tdc->vc.chan); 1237 + tdc->config_init = false; 1238 + return 0; 1239 + } 1240 + 1241 + static void tegra_dma_chan_synchronize(struct dma_chan *dc) 1242 + { 1243 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1244 + 1245 + synchronize_irq(tdc->irq); 1246 + vchan_synchronize(&tdc->vc); 1247 + } 1248 + 1249 + static void tegra_dma_free_chan_resources(struct dma_chan *dc) 1250 + { 1251 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1252 + 1253 + dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); 1254 + 1255 + tegra_dma_terminate_all(dc); 1256 + synchronize_irq(tdc->irq); 1257 + 1258 + tasklet_kill(&tdc->vc.task); 1259 + tdc->config_init = false; 1260 + tdc->slave_id = -1; 1261 + tdc->sid_dir = DMA_TRANS_NONE; 1262 + free_irq(tdc->irq, tdc); 1263 + 1264 + vchan_free_chan_resources(&tdc->vc); 1265 + } 1266 + 1267 + static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec, 1268 + struct of_dma *ofdma) 1269 + { 1270 + struct tegra_dma *tdma = ofdma->of_dma_data; 1271 + struct tegra_dma_channel *tdc; 1272 + struct dma_chan *chan; 1273 + 1274 + chan = dma_get_any_slave_channel(&tdma->dma_dev); 1275 + if (!chan) 1276 + return NULL; 1277 + 1278 + tdc = to_tegra_dma_chan(chan); 1279 + tdc->slave_id = dma_spec->args[0]; 1280 + 1281 + return chan; 1282 + } 1283 + 1284 + static const struct tegra_dma_chip_data tegra186_dma_chip_data = { 1285 + .nr_channels = 31, 1286 + .channel_reg_size = SZ_64K, 1287 + .max_dma_count = SZ_1G, 1288 + .hw_support_pause = false, 1289 + .terminate = tegra_dma_stop_client, 1290 + }; 1291 + 1292 + static const struct tegra_dma_chip_data tegra194_dma_chip_data = { 1293 + .nr_channels = 31, 1294 + .channel_reg_size = SZ_64K, 1295 + .max_dma_count = SZ_1G, 1296 + .hw_support_pause = true, 1297 + .terminate = tegra_dma_pause, 1298 + }; 1299 + 1300 + static const struct of_device_id tegra_dma_of_match[] = { 1301 + { 1302 + .compatible = "nvidia,tegra186-gpcdma", 1303 + .data = &tegra186_dma_chip_data, 1304 + }, { 1305 + .compatible = "nvidia,tegra194-gpcdma", 1306 + .data = &tegra194_dma_chip_data, 1307 + }, { 1308 + }, 1309 + }; 1310 + MODULE_DEVICE_TABLE(of, tegra_dma_of_match); 1311 + 1312 + static int tegra_dma_program_sid(struct tegra_dma_channel *tdc, int stream_id) 1313 + { 1314 + unsigned int reg_val = tdc_read(tdc, TEGRA_GPCDMA_CHAN_MCSEQ); 1315 + 1316 + reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK); 1317 + reg_val &= ~(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK); 1318 + 1319 + reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID0_MASK, stream_id); 1320 + reg_val |= FIELD_PREP(TEGRA_GPCDMA_MCSEQ_STREAM_ID1_MASK, stream_id); 1321 + 1322 + tdc_write(tdc, TEGRA_GPCDMA_CHAN_MCSEQ, reg_val); 1323 + return 0; 1324 + } 1325 + 1326 + static int tegra_dma_probe(struct platform_device *pdev) 1327 + { 1328 + const struct tegra_dma_chip_data *cdata = NULL; 1329 + struct iommu_fwspec *iommu_spec; 1330 + unsigned int stream_id, i; 1331 + struct tegra_dma *tdma; 1332 + struct resource *res; 1333 + int ret; 1334 + 1335 + cdata = of_device_get_match_data(&pdev->dev); 1336 + 1337 + tdma = devm_kzalloc(&pdev->dev, 1338 + struct_size(tdma, channels, cdata->nr_channels), 1339 + GFP_KERNEL); 1340 + if (!tdma) 1341 + return -ENOMEM; 1342 + 1343 + tdma->dev = &pdev->dev; 1344 + tdma->chip_data = cdata; 1345 + platform_set_drvdata(pdev, tdma); 1346 + 1347 + tdma->base_addr = devm_platform_ioremap_resource(pdev, 0); 1348 + if (IS_ERR(tdma->base_addr)) 1349 + return PTR_ERR(tdma->base_addr); 1350 + 1351 + tdma->rst = devm_reset_control_get_exclusive(&pdev->dev, "gpcdma"); 1352 + if (IS_ERR(tdma->rst)) { 1353 + return dev_err_probe(&pdev->dev, PTR_ERR(tdma->rst), 1354 + "Missing controller reset\n"); 1355 + } 1356 + reset_control_reset(tdma->rst); 1357 + 1358 + tdma->dma_dev.dev = &pdev->dev; 1359 + 1360 + iommu_spec = dev_iommu_fwspec_get(&pdev->dev); 1361 + if (!iommu_spec) { 1362 + dev_err(&pdev->dev, "Missing iommu stream-id\n"); 1363 + return -EINVAL; 1364 + } 1365 + stream_id = iommu_spec->ids[0] & 0xffff; 1366 + 1367 + INIT_LIST_HEAD(&tdma->dma_dev.channels); 1368 + for (i = 0; i < cdata->nr_channels; i++) { 1369 + struct tegra_dma_channel *tdc = &tdma->channels[i]; 1370 + 1371 + tdc->chan_base_offset = TEGRA_GPCDMA_CHANNEL_BASE_ADD_OFFSET + 1372 + i * cdata->channel_reg_size; 1373 + res = platform_get_resource(pdev, IORESOURCE_IRQ, i); 1374 + if (!res) { 1375 + dev_err(&pdev->dev, "No irq resource for chan %d\n", i); 1376 + return -EINVAL; 1377 + } 1378 + tdc->irq = res->start; 1379 + snprintf(tdc->name, sizeof(tdc->name), "gpcdma.%d", i); 1380 + 1381 + tdc->tdma = tdma; 1382 + tdc->id = i; 1383 + tdc->slave_id = -1; 1384 + 1385 + vchan_init(&tdc->vc, &tdma->dma_dev); 1386 + tdc->vc.desc_free = tegra_dma_desc_free; 1387 + 1388 + /* program stream-id for this channel */ 1389 + tegra_dma_program_sid(tdc, stream_id); 1390 + tdc->stream_id = stream_id; 1391 + } 1392 + 1393 + dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); 1394 + dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); 1395 + dma_cap_set(DMA_MEMCPY, tdma->dma_dev.cap_mask); 1396 + dma_cap_set(DMA_MEMSET, tdma->dma_dev.cap_mask); 1397 + dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); 1398 + 1399 + /* 1400 + * Only word aligned transfers are supported. Set the copy 1401 + * alignment shift. 1402 + */ 1403 + tdma->dma_dev.copy_align = 2; 1404 + tdma->dma_dev.fill_align = 2; 1405 + tdma->dma_dev.device_alloc_chan_resources = 1406 + tegra_dma_alloc_chan_resources; 1407 + tdma->dma_dev.device_free_chan_resources = 1408 + tegra_dma_free_chan_resources; 1409 + tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; 1410 + tdma->dma_dev.device_prep_dma_memcpy = tegra_dma_prep_dma_memcpy; 1411 + tdma->dma_dev.device_prep_dma_memset = tegra_dma_prep_dma_memset; 1412 + tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; 1413 + tdma->dma_dev.device_config = tegra_dma_slave_config; 1414 + tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all; 1415 + tdma->dma_dev.device_tx_status = tegra_dma_tx_status; 1416 + tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; 1417 + tdma->dma_dev.device_pause = tegra_dma_device_pause; 1418 + tdma->dma_dev.device_resume = tegra_dma_device_resume; 1419 + tdma->dma_dev.device_synchronize = tegra_dma_chan_synchronize; 1420 + tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1421 + 1422 + ret = dma_async_device_register(&tdma->dma_dev); 1423 + if (ret < 0) { 1424 + dev_err_probe(&pdev->dev, ret, 1425 + "GPC DMA driver registration failed\n"); 1426 + return ret; 1427 + } 1428 + 1429 + ret = of_dma_controller_register(pdev->dev.of_node, 1430 + tegra_dma_of_xlate, tdma); 1431 + if (ret < 0) { 1432 + dev_err_probe(&pdev->dev, ret, 1433 + "GPC DMA OF registration failed\n"); 1434 + 1435 + dma_async_device_unregister(&tdma->dma_dev); 1436 + return ret; 1437 + } 1438 + 1439 + dev_info(&pdev->dev, "GPC DMA driver register %d channels\n", 1440 + cdata->nr_channels); 1441 + 1442 + return 0; 1443 + } 1444 + 1445 + static int tegra_dma_remove(struct platform_device *pdev) 1446 + { 1447 + struct tegra_dma *tdma = platform_get_drvdata(pdev); 1448 + 1449 + of_dma_controller_free(pdev->dev.of_node); 1450 + dma_async_device_unregister(&tdma->dma_dev); 1451 + 1452 + return 0; 1453 + } 1454 + 1455 + static int __maybe_unused tegra_dma_pm_suspend(struct device *dev) 1456 + { 1457 + struct tegra_dma *tdma = dev_get_drvdata(dev); 1458 + unsigned int i; 1459 + 1460 + for (i = 0; i < tdma->chip_data->nr_channels; i++) { 1461 + struct tegra_dma_channel *tdc = &tdma->channels[i]; 1462 + 1463 + if (tdc->dma_desc) { 1464 + dev_err(tdma->dev, "channel %u busy\n", i); 1465 + return -EBUSY; 1466 + } 1467 + } 1468 + 1469 + return 0; 1470 + } 1471 + 1472 + static int __maybe_unused tegra_dma_pm_resume(struct device *dev) 1473 + { 1474 + struct tegra_dma *tdma = dev_get_drvdata(dev); 1475 + unsigned int i; 1476 + 1477 + reset_control_reset(tdma->rst); 1478 + 1479 + for (i = 0; i < tdma->chip_data->nr_channels; i++) { 1480 + struct tegra_dma_channel *tdc = &tdma->channels[i]; 1481 + 1482 + tegra_dma_program_sid(tdc, tdc->stream_id); 1483 + } 1484 + 1485 + return 0; 1486 + } 1487 + 1488 + static const struct dev_pm_ops tegra_dma_dev_pm_ops = { 1489 + SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume) 1490 + }; 1491 + 1492 + static struct platform_driver tegra_dma_driver = { 1493 + .driver = { 1494 + .name = "tegra-gpcdma", 1495 + .pm = &tegra_dma_dev_pm_ops, 1496 + .of_match_table = tegra_dma_of_match, 1497 + }, 1498 + .probe = tegra_dma_probe, 1499 + .remove = tegra_dma_remove, 1500 + }; 1501 + 1502 + module_platform_driver(tegra_dma_driver); 1503 + 1504 + MODULE_DESCRIPTION("NVIDIA Tegra GPC DMA Controller driver"); 1505 + MODULE_AUTHOR("Pavan Kunapuli <pkunapuli@nvidia.com>"); 1506 + MODULE_AUTHOR("Rajesh Gumasta <rgumasta@nvidia.com>"); 1507 + MODULE_LICENSE("GPL");