Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: sh: Add DMAC driver for RZ/G2L SoC

Add DMA Controller driver for RZ/G2L SoC.

Based on the work done by Chris Brandt for RZ/A DMA driver.

Signed-off-by: Biju Das <biju.das.jz@bp.renesas.com>
Reviewed-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
Link: https://lore.kernel.org/r/20210806095322.2326-4-biju.das.jz@bp.renesas.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Biju Das and committed by
Vinod Koul
5000d370 ab959c7d

+981
+9
drivers/dma/sh/Kconfig
··· 47 47 help 48 48 This driver supports the USB-DMA controller found in the Renesas 49 49 SoCs. 50 + 51 + config RZ_DMAC 52 + tristate "Renesas RZ/G2L DMA Controller" 53 + depends on ARCH_R9A07G044 || COMPILE_TEST 54 + select RENESAS_DMA 55 + select DMA_VIRTUAL_CHANNELS 56 + help 57 + This driver supports the general purpose DMA controller found in the 58 + Renesas RZ/G2L SoC variants.
+1
drivers/dma/sh/Makefile
··· 15 15 16 16 obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o 17 17 obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o 18 + obj-$(CONFIG_RZ_DMAC) += rz-dmac.o
+971
drivers/dma/sh/rz-dmac.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Renesas RZ/G2L DMA Controller Driver 4 + * 5 + * Based on imx-dma.c 6 + * 7 + * Copyright (C) 2021 Renesas Electronics Corp. 8 + * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 9 + * Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com> 10 + */ 11 + 12 + #include <linux/dma-mapping.h> 13 + #include <linux/dmaengine.h> 14 + #include <linux/interrupt.h> 15 + #include <linux/list.h> 16 + #include <linux/module.h> 17 + #include <linux/of.h> 18 + #include <linux/of_dma.h> 19 + #include <linux/of_platform.h> 20 + #include <linux/platform_device.h> 21 + #include <linux/slab.h> 22 + #include <linux/spinlock.h> 23 + 24 + #include "../dmaengine.h" 25 + #include "../virt-dma.h" 26 + 27 + enum rz_dmac_prep_type { 28 + RZ_DMAC_DESC_MEMCPY, 29 + RZ_DMAC_DESC_SLAVE_SG, 30 + }; 31 + 32 + struct rz_lmdesc { 33 + u32 header; 34 + u32 sa; 35 + u32 da; 36 + u32 tb; 37 + u32 chcfg; 38 + u32 chitvl; 39 + u32 chext; 40 + u32 nxla; 41 + }; 42 + 43 + struct rz_dmac_desc { 44 + struct virt_dma_desc vd; 45 + dma_addr_t src; 46 + dma_addr_t dest; 47 + size_t len; 48 + struct list_head node; 49 + enum dma_transfer_direction direction; 50 + enum rz_dmac_prep_type type; 51 + /* For slave sg */ 52 + struct scatterlist *sg; 53 + unsigned int sgcount; 54 + }; 55 + 56 + #define to_rz_dmac_desc(d) container_of(d, struct rz_dmac_desc, vd) 57 + 58 + struct rz_dmac_chan { 59 + struct virt_dma_chan vc; 60 + void __iomem *ch_base; 61 + void __iomem *ch_cmn_base; 62 + unsigned int index; 63 + int irq; 64 + struct rz_dmac_desc *desc; 65 + int descs_allocated; 66 + 67 + enum dma_slave_buswidth src_word_size; 68 + enum dma_slave_buswidth dst_word_size; 69 + dma_addr_t src_per_address; 70 + dma_addr_t dst_per_address; 71 + 72 + u32 chcfg; 73 + u32 chctrl; 74 + int mid_rid; 75 + 76 + struct list_head ld_free; 77 + struct list_head ld_queue; 78 + struct list_head ld_active; 79 + 80 + struct { 81 + struct rz_lmdesc *base; 82 + struct rz_lmdesc *head; 83 + struct rz_lmdesc *tail; 84 + dma_addr_t base_dma; 85 + } lmdesc; 86 + }; 87 + 88 + #define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan) 89 + 90 + struct rz_dmac { 91 + struct dma_device engine; 92 + struct device *dev; 93 + void __iomem *base; 94 + void __iomem *ext_base; 95 + 96 + unsigned int n_channels; 97 + struct rz_dmac_chan *channels; 98 + 99 + DECLARE_BITMAP(modules, 1024); 100 + }; 101 + 102 + #define to_rz_dmac(d) container_of(d, struct rz_dmac, engine) 103 + 104 + /* 105 + * ----------------------------------------------------------------------------- 106 + * Registers 107 + */ 108 + 109 + #define CHSTAT 0x0024 110 + #define CHCTRL 0x0028 111 + #define CHCFG 0x002c 112 + #define NXLA 0x0038 113 + 114 + #define DCTRL 0x0000 115 + 116 + #define EACH_CHANNEL_OFFSET 0x0040 117 + #define CHANNEL_0_7_OFFSET 0x0000 118 + #define CHANNEL_0_7_COMMON_BASE 0x0300 119 + #define CHANNEL_8_15_OFFSET 0x0400 120 + #define CHANNEL_8_15_COMMON_BASE 0x0700 121 + 122 + #define CHSTAT_ER BIT(4) 123 + #define CHSTAT_EN BIT(0) 124 + 125 + #define CHCTRL_CLRINTMSK BIT(17) 126 + #define CHCTRL_CLRSUS BIT(9) 127 + #define CHCTRL_CLRTC BIT(6) 128 + #define CHCTRL_CLREND BIT(5) 129 + #define CHCTRL_CLRRQ BIT(4) 130 + #define CHCTRL_SWRST BIT(3) 131 + #define CHCTRL_STG BIT(2) 132 + #define CHCTRL_CLREN BIT(1) 133 + #define CHCTRL_SETEN BIT(0) 134 + #define CHCTRL_DEFAULT (CHCTRL_CLRINTMSK | CHCTRL_CLRSUS | \ 135 + CHCTRL_CLRTC | CHCTRL_CLREND | \ 136 + CHCTRL_CLRRQ | CHCTRL_SWRST | \ 137 + CHCTRL_CLREN) 138 + 139 + #define CHCFG_DMS BIT(31) 140 + #define CHCFG_DEM BIT(24) 141 + #define CHCFG_DAD BIT(21) 142 + #define CHCFG_SAD BIT(20) 143 + #define CHCFG_REQD BIT(3) 144 + #define CHCFG_SEL(bits) ((bits) & 0x07) 145 + #define CHCFG_MEM_COPY (0x80400008) 146 + #define CHCFG_FILL_DDS(a) (((a) << 16) & GENMASK(19, 16)) 147 + #define CHCFG_FILL_SDS(a) (((a) << 12) & GENMASK(15, 12)) 148 + #define CHCFG_FILL_TM(a) (((a) & BIT(5)) << 22) 149 + #define CHCFG_FILL_AM(a) (((a) & GENMASK(4, 2)) << 6) 150 + #define CHCFG_FILL_LVL(a) (((a) & BIT(1)) << 5) 151 + #define CHCFG_FILL_HIEN(a) (((a) & BIT(0)) << 5) 152 + 153 + #define MID_RID_MASK GENMASK(9, 0) 154 + #define CHCFG_MASK GENMASK(15, 10) 155 + #define CHCFG_DS_INVALID 0xFF 156 + #define DCTRL_LVINT BIT(1) 157 + #define DCTRL_PR BIT(0) 158 + #define DCTRL_DEFAULT (DCTRL_LVINT | DCTRL_PR) 159 + 160 + /* LINK MODE DESCRIPTOR */ 161 + #define HEADER_LV BIT(0) 162 + 163 + #define RZ_DMAC_MAX_CHAN_DESCRIPTORS 16 164 + #define RZ_DMAC_MAX_CHANNELS 16 165 + #define DMAC_NR_LMDESC 64 166 + 167 + /* 168 + * ----------------------------------------------------------------------------- 169 + * Device access 170 + */ 171 + 172 + static void rz_dmac_writel(struct rz_dmac *dmac, unsigned int val, 173 + unsigned int offset) 174 + { 175 + writel(val, dmac->base + offset); 176 + } 177 + 178 + static void rz_dmac_ext_writel(struct rz_dmac *dmac, unsigned int val, 179 + unsigned int offset) 180 + { 181 + writel(val, dmac->ext_base + offset); 182 + } 183 + 184 + static u32 rz_dmac_ext_readl(struct rz_dmac *dmac, unsigned int offset) 185 + { 186 + return readl(dmac->ext_base + offset); 187 + } 188 + 189 + static void rz_dmac_ch_writel(struct rz_dmac_chan *channel, unsigned int val, 190 + unsigned int offset, int which) 191 + { 192 + if (which) 193 + writel(val, channel->ch_base + offset); 194 + else 195 + writel(val, channel->ch_cmn_base + offset); 196 + } 197 + 198 + static u32 rz_dmac_ch_readl(struct rz_dmac_chan *channel, 199 + unsigned int offset, int which) 200 + { 201 + if (which) 202 + return readl(channel->ch_base + offset); 203 + else 204 + return readl(channel->ch_cmn_base + offset); 205 + } 206 + 207 + /* 208 + * ----------------------------------------------------------------------------- 209 + * Initialization 210 + */ 211 + 212 + static void rz_lmdesc_setup(struct rz_dmac_chan *channel, 213 + struct rz_lmdesc *lmdesc) 214 + { 215 + u32 nxla; 216 + 217 + channel->lmdesc.base = lmdesc; 218 + channel->lmdesc.head = lmdesc; 219 + channel->lmdesc.tail = lmdesc; 220 + nxla = channel->lmdesc.base_dma; 221 + while (lmdesc < (channel->lmdesc.base + (DMAC_NR_LMDESC - 1))) { 222 + lmdesc->header = 0; 223 + nxla += sizeof(*lmdesc); 224 + lmdesc->nxla = nxla; 225 + lmdesc++; 226 + } 227 + 228 + lmdesc->header = 0; 229 + lmdesc->nxla = channel->lmdesc.base_dma; 230 + } 231 + 232 + /* 233 + * ----------------------------------------------------------------------------- 234 + * Descriptors preparation 235 + */ 236 + 237 + static void rz_dmac_lmdesc_recycle(struct rz_dmac_chan *channel) 238 + { 239 + struct rz_lmdesc *lmdesc = channel->lmdesc.head; 240 + 241 + while (!(lmdesc->header & HEADER_LV)) { 242 + lmdesc->header = 0; 243 + lmdesc++; 244 + if (lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC)) 245 + lmdesc = channel->lmdesc.base; 246 + } 247 + channel->lmdesc.head = lmdesc; 248 + } 249 + 250 + static void rz_dmac_enable_hw(struct rz_dmac_chan *channel) 251 + { 252 + struct dma_chan *chan = &channel->vc.chan; 253 + struct rz_dmac *dmac = to_rz_dmac(chan->device); 254 + unsigned long flags; 255 + u32 nxla; 256 + u32 chctrl; 257 + u32 chstat; 258 + 259 + dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index); 260 + 261 + local_irq_save(flags); 262 + 263 + rz_dmac_lmdesc_recycle(channel); 264 + 265 + nxla = channel->lmdesc.base_dma + 266 + (sizeof(struct rz_lmdesc) * (channel->lmdesc.head - 267 + channel->lmdesc.base)); 268 + 269 + chstat = rz_dmac_ch_readl(channel, CHSTAT, 1); 270 + if (!(chstat & CHSTAT_EN)) { 271 + chctrl = (channel->chctrl | CHCTRL_SETEN); 272 + rz_dmac_ch_writel(channel, nxla, NXLA, 1); 273 + rz_dmac_ch_writel(channel, channel->chcfg, CHCFG, 1); 274 + rz_dmac_ch_writel(channel, CHCTRL_SWRST, CHCTRL, 1); 275 + rz_dmac_ch_writel(channel, chctrl, CHCTRL, 1); 276 + } 277 + 278 + local_irq_restore(flags); 279 + } 280 + 281 + static void rz_dmac_disable_hw(struct rz_dmac_chan *channel) 282 + { 283 + struct dma_chan *chan = &channel->vc.chan; 284 + struct rz_dmac *dmac = to_rz_dmac(chan->device); 285 + unsigned long flags; 286 + 287 + dev_dbg(dmac->dev, "%s channel %d\n", __func__, channel->index); 288 + 289 + local_irq_save(flags); 290 + rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 291 + local_irq_restore(flags); 292 + } 293 + 294 + static void rz_dmac_set_dmars_register(struct rz_dmac *dmac, int nr, u32 dmars) 295 + { 296 + u32 dmars_offset = (nr / 2) * 4; 297 + u32 shift = (nr % 2) * 16; 298 + u32 dmars32; 299 + 300 + dmars32 = rz_dmac_ext_readl(dmac, dmars_offset); 301 + dmars32 &= ~(0xffff << shift); 302 + dmars32 |= dmars << shift; 303 + 304 + rz_dmac_ext_writel(dmac, dmars32, dmars_offset); 305 + } 306 + 307 + static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel) 308 + { 309 + struct dma_chan *chan = &channel->vc.chan; 310 + struct rz_dmac *dmac = to_rz_dmac(chan->device); 311 + struct rz_lmdesc *lmdesc = channel->lmdesc.base; 312 + struct rz_dmac_desc *d = channel->desc; 313 + u32 chcfg = CHCFG_MEM_COPY; 314 + 315 + lmdesc = channel->lmdesc.tail; 316 + 317 + /* prepare descriptor */ 318 + lmdesc->sa = d->src; 319 + lmdesc->da = d->dest; 320 + lmdesc->tb = d->len; 321 + lmdesc->chcfg = chcfg; 322 + lmdesc->chitvl = 0; 323 + lmdesc->chext = 0; 324 + lmdesc->header = HEADER_LV; 325 + 326 + rz_dmac_set_dmars_register(dmac, channel->index, 0); 327 + 328 + channel->chcfg = chcfg; 329 + channel->chctrl = CHCTRL_STG | CHCTRL_SETEN; 330 + } 331 + 332 + static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel) 333 + { 334 + struct dma_chan *chan = &channel->vc.chan; 335 + struct rz_dmac *dmac = to_rz_dmac(chan->device); 336 + struct rz_dmac_desc *d = channel->desc; 337 + struct scatterlist *sg, *sgl = d->sg; 338 + struct rz_lmdesc *lmdesc; 339 + unsigned int i, sg_len = d->sgcount; 340 + 341 + channel->chcfg |= CHCFG_SEL(channel->index) | CHCFG_DEM | CHCFG_DMS; 342 + 343 + if (d->direction == DMA_DEV_TO_MEM) { 344 + channel->chcfg |= CHCFG_SAD; 345 + channel->chcfg &= ~CHCFG_REQD; 346 + } else { 347 + channel->chcfg |= CHCFG_DAD | CHCFG_REQD; 348 + } 349 + 350 + lmdesc = channel->lmdesc.tail; 351 + 352 + for (i = 0, sg = sgl; i < sg_len; i++, sg = sg_next(sg)) { 353 + if (d->direction == DMA_DEV_TO_MEM) { 354 + lmdesc->sa = channel->src_per_address; 355 + lmdesc->da = sg_dma_address(sg); 356 + } else { 357 + lmdesc->sa = sg_dma_address(sg); 358 + lmdesc->da = channel->dst_per_address; 359 + } 360 + 361 + lmdesc->tb = sg_dma_len(sg); 362 + lmdesc->chitvl = 0; 363 + lmdesc->chext = 0; 364 + if (i == (sg_len - 1)) { 365 + lmdesc->chcfg = (channel->chcfg & ~CHCFG_DEM); 366 + lmdesc->header = HEADER_LV; 367 + } else { 368 + lmdesc->chcfg = channel->chcfg; 369 + lmdesc->header = HEADER_LV; 370 + } 371 + if (++lmdesc >= (channel->lmdesc.base + DMAC_NR_LMDESC)) 372 + lmdesc = channel->lmdesc.base; 373 + } 374 + 375 + channel->lmdesc.tail = lmdesc; 376 + 377 + rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid); 378 + channel->chctrl = CHCTRL_SETEN; 379 + } 380 + 381 + static int rz_dmac_xfer_desc(struct rz_dmac_chan *chan) 382 + { 383 + struct rz_dmac_desc *d = chan->desc; 384 + struct virt_dma_desc *vd; 385 + 386 + vd = vchan_next_desc(&chan->vc); 387 + if (!vd) 388 + return 0; 389 + 390 + list_del(&vd->node); 391 + 392 + switch (d->type) { 393 + case RZ_DMAC_DESC_MEMCPY: 394 + rz_dmac_prepare_desc_for_memcpy(chan); 395 + break; 396 + 397 + case RZ_DMAC_DESC_SLAVE_SG: 398 + rz_dmac_prepare_descs_for_slave_sg(chan); 399 + break; 400 + 401 + default: 402 + return -EINVAL; 403 + } 404 + 405 + rz_dmac_enable_hw(chan); 406 + 407 + return 0; 408 + } 409 + 410 + /* 411 + * ----------------------------------------------------------------------------- 412 + * DMA engine operations 413 + */ 414 + 415 + static int rz_dmac_alloc_chan_resources(struct dma_chan *chan) 416 + { 417 + struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 418 + 419 + while (channel->descs_allocated < RZ_DMAC_MAX_CHAN_DESCRIPTORS) { 420 + struct rz_dmac_desc *desc; 421 + 422 + desc = kzalloc(sizeof(*desc), GFP_KERNEL); 423 + if (!desc) 424 + break; 425 + 426 + list_add_tail(&desc->node, &channel->ld_free); 427 + channel->descs_allocated++; 428 + } 429 + 430 + if (!channel->descs_allocated) 431 + return -ENOMEM; 432 + 433 + return channel->descs_allocated; 434 + } 435 + 436 + static void rz_dmac_free_chan_resources(struct dma_chan *chan) 437 + { 438 + struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 439 + struct rz_dmac *dmac = to_rz_dmac(chan->device); 440 + struct rz_lmdesc *lmdesc = channel->lmdesc.base; 441 + struct rz_dmac_desc *desc, *_desc; 442 + unsigned long flags; 443 + unsigned int i; 444 + 445 + spin_lock_irqsave(&channel->vc.lock, flags); 446 + 447 + for (i = 0; i < DMAC_NR_LMDESC; i++) 448 + lmdesc[i].header = 0; 449 + 450 + rz_dmac_disable_hw(channel); 451 + list_splice_tail_init(&channel->ld_active, &channel->ld_free); 452 + list_splice_tail_init(&channel->ld_queue, &channel->ld_free); 453 + 454 + if (channel->mid_rid >= 0) { 455 + clear_bit(channel->mid_rid, dmac->modules); 456 + channel->mid_rid = -EINVAL; 457 + } 458 + 459 + spin_unlock_irqrestore(&channel->vc.lock, flags); 460 + 461 + list_for_each_entry_safe(desc, _desc, &channel->ld_free, node) { 462 + kfree(desc); 463 + channel->descs_allocated--; 464 + } 465 + 466 + INIT_LIST_HEAD(&channel->ld_free); 467 + vchan_free_chan_resources(&channel->vc); 468 + } 469 + 470 + static struct dma_async_tx_descriptor * 471 + rz_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 472 + size_t len, unsigned long flags) 473 + { 474 + struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 475 + struct rz_dmac *dmac = to_rz_dmac(chan->device); 476 + struct rz_dmac_desc *desc; 477 + 478 + dev_dbg(dmac->dev, "%s channel: %d src=0x%pad dst=0x%pad len=%zu\n", 479 + __func__, channel->index, &src, &dest, len); 480 + 481 + if (list_empty(&channel->ld_free)) 482 + return NULL; 483 + 484 + desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 485 + 486 + desc->type = RZ_DMAC_DESC_MEMCPY; 487 + desc->src = src; 488 + desc->dest = dest; 489 + desc->len = len; 490 + desc->direction = DMA_MEM_TO_MEM; 491 + 492 + list_move_tail(channel->ld_free.next, &channel->ld_queue); 493 + return vchan_tx_prep(&channel->vc, &desc->vd, flags); 494 + } 495 + 496 + static struct dma_async_tx_descriptor * 497 + rz_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 498 + unsigned int sg_len, 499 + enum dma_transfer_direction direction, 500 + unsigned long flags, void *context) 501 + { 502 + struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 503 + struct rz_dmac_desc *desc; 504 + struct scatterlist *sg; 505 + int dma_length = 0; 506 + int i = 0; 507 + 508 + if (list_empty(&channel->ld_free)) 509 + return NULL; 510 + 511 + desc = list_first_entry(&channel->ld_free, struct rz_dmac_desc, node); 512 + 513 + for_each_sg(sgl, sg, sg_len, i) { 514 + dma_length += sg_dma_len(sg); 515 + } 516 + 517 + desc->type = RZ_DMAC_DESC_SLAVE_SG; 518 + desc->sg = sgl; 519 + desc->sgcount = sg_len; 520 + desc->len = dma_length; 521 + desc->direction = direction; 522 + 523 + if (direction == DMA_DEV_TO_MEM) 524 + desc->src = channel->src_per_address; 525 + else 526 + desc->dest = channel->dst_per_address; 527 + 528 + list_move_tail(channel->ld_free.next, &channel->ld_queue); 529 + return vchan_tx_prep(&channel->vc, &desc->vd, flags); 530 + } 531 + 532 + static int rz_dmac_terminate_all(struct dma_chan *chan) 533 + { 534 + struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 535 + unsigned long flags; 536 + LIST_HEAD(head); 537 + 538 + rz_dmac_disable_hw(channel); 539 + spin_lock_irqsave(&channel->vc.lock, flags); 540 + list_splice_tail_init(&channel->ld_active, &channel->ld_free); 541 + list_splice_tail_init(&channel->ld_queue, &channel->ld_free); 542 + spin_unlock_irqrestore(&channel->vc.lock, flags); 543 + vchan_get_all_descriptors(&channel->vc, &head); 544 + vchan_dma_desc_free_list(&channel->vc, &head); 545 + 546 + return 0; 547 + } 548 + 549 + static void rz_dmac_issue_pending(struct dma_chan *chan) 550 + { 551 + struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 552 + struct rz_dmac *dmac = to_rz_dmac(chan->device); 553 + struct rz_dmac_desc *desc; 554 + unsigned long flags; 555 + 556 + spin_lock_irqsave(&channel->vc.lock, flags); 557 + 558 + if (!list_empty(&channel->ld_queue)) { 559 + desc = list_first_entry(&channel->ld_queue, 560 + struct rz_dmac_desc, node); 561 + channel->desc = desc; 562 + if (vchan_issue_pending(&channel->vc)) { 563 + if (rz_dmac_xfer_desc(channel) < 0) 564 + dev_warn(dmac->dev, "ch: %d couldn't issue DMA xfer\n", 565 + channel->index); 566 + else 567 + list_move_tail(channel->ld_queue.next, 568 + &channel->ld_active); 569 + } 570 + } 571 + 572 + spin_unlock_irqrestore(&channel->vc.lock, flags); 573 + } 574 + 575 + static u8 rz_dmac_ds_to_val_mapping(enum dma_slave_buswidth ds) 576 + { 577 + u8 i; 578 + const enum dma_slave_buswidth ds_lut[] = { 579 + DMA_SLAVE_BUSWIDTH_1_BYTE, 580 + DMA_SLAVE_BUSWIDTH_2_BYTES, 581 + DMA_SLAVE_BUSWIDTH_4_BYTES, 582 + DMA_SLAVE_BUSWIDTH_8_BYTES, 583 + DMA_SLAVE_BUSWIDTH_16_BYTES, 584 + DMA_SLAVE_BUSWIDTH_32_BYTES, 585 + DMA_SLAVE_BUSWIDTH_64_BYTES, 586 + DMA_SLAVE_BUSWIDTH_128_BYTES, 587 + }; 588 + 589 + for (i = 0; i < ARRAY_SIZE(ds_lut); i++) { 590 + if (ds_lut[i] == ds) 591 + return i; 592 + } 593 + 594 + return CHCFG_DS_INVALID; 595 + } 596 + 597 + static int rz_dmac_config(struct dma_chan *chan, 598 + struct dma_slave_config *config) 599 + { 600 + struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 601 + u32 val; 602 + 603 + channel->src_per_address = config->src_addr; 604 + channel->src_word_size = config->src_addr_width; 605 + channel->dst_per_address = config->dst_addr; 606 + channel->dst_word_size = config->dst_addr_width; 607 + 608 + val = rz_dmac_ds_to_val_mapping(config->dst_addr_width); 609 + if (val == CHCFG_DS_INVALID) 610 + return -EINVAL; 611 + 612 + channel->chcfg |= CHCFG_FILL_DDS(val); 613 + 614 + val = rz_dmac_ds_to_val_mapping(config->src_addr_width); 615 + if (val == CHCFG_DS_INVALID) 616 + return -EINVAL; 617 + 618 + channel->chcfg |= CHCFG_FILL_SDS(val); 619 + 620 + return 0; 621 + } 622 + 623 + static void rz_dmac_virt_desc_free(struct virt_dma_desc *vd) 624 + { 625 + /* 626 + * Place holder 627 + * Descriptor allocation is done during alloc_chan_resources and 628 + * get freed during free_chan_resources. 629 + * list is used to manage the descriptors and avoid any memory 630 + * allocation/free during DMA read/write. 631 + */ 632 + } 633 + 634 + /* 635 + * ----------------------------------------------------------------------------- 636 + * IRQ handling 637 + */ 638 + 639 + static void rz_dmac_irq_handle_channel(struct rz_dmac_chan *channel) 640 + { 641 + struct dma_chan *chan = &channel->vc.chan; 642 + struct rz_dmac *dmac = to_rz_dmac(chan->device); 643 + u32 chstat, chctrl; 644 + 645 + chstat = rz_dmac_ch_readl(channel, CHSTAT, 1); 646 + if (chstat & CHSTAT_ER) { 647 + dev_err(dmac->dev, "DMAC err CHSTAT_%d = %08X\n", 648 + channel->index, chstat); 649 + rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 650 + goto done; 651 + } 652 + 653 + chctrl = rz_dmac_ch_readl(channel, CHCTRL, 1); 654 + rz_dmac_ch_writel(channel, chctrl | CHCTRL_CLREND, CHCTRL, 1); 655 + done: 656 + return; 657 + } 658 + 659 + static irqreturn_t rz_dmac_irq_handler(int irq, void *dev_id) 660 + { 661 + struct rz_dmac_chan *channel = dev_id; 662 + 663 + if (channel) { 664 + rz_dmac_irq_handle_channel(channel); 665 + return IRQ_WAKE_THREAD; 666 + } 667 + /* handle DMAERR irq */ 668 + return IRQ_HANDLED; 669 + } 670 + 671 + static irqreturn_t rz_dmac_irq_handler_thread(int irq, void *dev_id) 672 + { 673 + struct rz_dmac_chan *channel = dev_id; 674 + struct rz_dmac_desc *desc = NULL; 675 + unsigned long flags; 676 + 677 + spin_lock_irqsave(&channel->vc.lock, flags); 678 + 679 + if (list_empty(&channel->ld_active)) { 680 + /* Someone might have called terminate all */ 681 + goto out; 682 + } 683 + 684 + desc = list_first_entry(&channel->ld_active, struct rz_dmac_desc, node); 685 + vchan_cookie_complete(&desc->vd); 686 + list_move_tail(channel->ld_active.next, &channel->ld_free); 687 + if (!list_empty(&channel->ld_queue)) { 688 + desc = list_first_entry(&channel->ld_queue, struct rz_dmac_desc, 689 + node); 690 + channel->desc = desc; 691 + if (rz_dmac_xfer_desc(channel) == 0) 692 + list_move_tail(channel->ld_queue.next, &channel->ld_active); 693 + } 694 + out: 695 + spin_unlock_irqrestore(&channel->vc.lock, flags); 696 + 697 + return IRQ_HANDLED; 698 + } 699 + 700 + /* 701 + * ----------------------------------------------------------------------------- 702 + * OF xlate and channel filter 703 + */ 704 + 705 + static bool rz_dmac_chan_filter(struct dma_chan *chan, void *arg) 706 + { 707 + struct rz_dmac_chan *channel = to_rz_dmac_chan(chan); 708 + struct rz_dmac *dmac = to_rz_dmac(chan->device); 709 + struct of_phandle_args *dma_spec = arg; 710 + u32 ch_cfg; 711 + 712 + channel->mid_rid = dma_spec->args[0] & MID_RID_MASK; 713 + ch_cfg = (dma_spec->args[0] & CHCFG_MASK) >> 10; 714 + channel->chcfg = CHCFG_FILL_TM(ch_cfg) | CHCFG_FILL_AM(ch_cfg) | 715 + CHCFG_FILL_LVL(ch_cfg) | CHCFG_FILL_HIEN(ch_cfg); 716 + 717 + return !test_and_set_bit(channel->mid_rid, dmac->modules); 718 + } 719 + 720 + static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec, 721 + struct of_dma *ofdma) 722 + { 723 + dma_cap_mask_t mask; 724 + 725 + if (dma_spec->args_count != 1) 726 + return NULL; 727 + 728 + /* Only slave DMA channels can be allocated via DT */ 729 + dma_cap_zero(mask); 730 + dma_cap_set(DMA_SLAVE, mask); 731 + 732 + return dma_request_channel(mask, rz_dmac_chan_filter, dma_spec); 733 + } 734 + 735 + /* 736 + * ----------------------------------------------------------------------------- 737 + * Probe and remove 738 + */ 739 + 740 + static int rz_dmac_chan_probe(struct rz_dmac *dmac, 741 + struct rz_dmac_chan *channel, 742 + unsigned int index) 743 + { 744 + struct platform_device *pdev = to_platform_device(dmac->dev); 745 + struct rz_lmdesc *lmdesc; 746 + char pdev_irqname[5]; 747 + char *irqname; 748 + int ret; 749 + 750 + channel->index = index; 751 + channel->mid_rid = -EINVAL; 752 + 753 + /* Request the channel interrupt. */ 754 + sprintf(pdev_irqname, "ch%u", index); 755 + channel->irq = platform_get_irq_byname(pdev, pdev_irqname); 756 + if (channel->irq < 0) 757 + return channel->irq; 758 + 759 + irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", 760 + dev_name(dmac->dev), index); 761 + if (!irqname) 762 + return -ENOMEM; 763 + 764 + ret = devm_request_threaded_irq(dmac->dev, channel->irq, 765 + rz_dmac_irq_handler, 766 + rz_dmac_irq_handler_thread, 0, 767 + irqname, channel); 768 + if (ret) { 769 + dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", 770 + channel->irq, ret); 771 + return ret; 772 + } 773 + 774 + /* Set io base address for each channel */ 775 + if (index < 8) { 776 + channel->ch_base = dmac->base + CHANNEL_0_7_OFFSET + 777 + EACH_CHANNEL_OFFSET * index; 778 + channel->ch_cmn_base = dmac->base + CHANNEL_0_7_COMMON_BASE; 779 + } else { 780 + channel->ch_base = dmac->base + CHANNEL_8_15_OFFSET + 781 + EACH_CHANNEL_OFFSET * (index - 8); 782 + channel->ch_cmn_base = dmac->base + CHANNEL_8_15_COMMON_BASE; 783 + } 784 + 785 + /* Allocate descriptors */ 786 + lmdesc = dma_alloc_coherent(&pdev->dev, 787 + sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, 788 + &channel->lmdesc.base_dma, GFP_KERNEL); 789 + if (!lmdesc) { 790 + dev_err(&pdev->dev, "Can't allocate memory (lmdesc)\n"); 791 + return -ENOMEM; 792 + } 793 + rz_lmdesc_setup(channel, lmdesc); 794 + 795 + /* Initialize register for each channel */ 796 + rz_dmac_ch_writel(channel, CHCTRL_DEFAULT, CHCTRL, 1); 797 + 798 + channel->vc.desc_free = rz_dmac_virt_desc_free; 799 + vchan_init(&channel->vc, &dmac->engine); 800 + INIT_LIST_HEAD(&channel->ld_queue); 801 + INIT_LIST_HEAD(&channel->ld_free); 802 + INIT_LIST_HEAD(&channel->ld_active); 803 + 804 + return 0; 805 + } 806 + 807 + static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac) 808 + { 809 + struct device_node *np = dev->of_node; 810 + int ret; 811 + 812 + ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); 813 + if (ret < 0) { 814 + dev_err(dev, "unable to read dma-channels property\n"); 815 + return ret; 816 + } 817 + 818 + if (!dmac->n_channels || dmac->n_channels > RZ_DMAC_MAX_CHANNELS) { 819 + dev_err(dev, "invalid number of channels %u\n", dmac->n_channels); 820 + return -EINVAL; 821 + } 822 + 823 + return 0; 824 + } 825 + 826 + static int rz_dmac_probe(struct platform_device *pdev) 827 + { 828 + const char *irqname = "error"; 829 + struct dma_device *engine; 830 + struct rz_dmac *dmac; 831 + int channel_num; 832 + unsigned int i; 833 + int ret; 834 + int irq; 835 + 836 + dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); 837 + if (!dmac) 838 + return -ENOMEM; 839 + 840 + dmac->dev = &pdev->dev; 841 + platform_set_drvdata(pdev, dmac); 842 + 843 + ret = rz_dmac_parse_of(&pdev->dev, dmac); 844 + if (ret < 0) 845 + return ret; 846 + 847 + dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, 848 + sizeof(*dmac->channels), GFP_KERNEL); 849 + if (!dmac->channels) 850 + return -ENOMEM; 851 + 852 + /* Request resources */ 853 + dmac->base = devm_platform_ioremap_resource(pdev, 0); 854 + if (IS_ERR(dmac->base)) 855 + return PTR_ERR(dmac->base); 856 + 857 + dmac->ext_base = devm_platform_ioremap_resource(pdev, 1); 858 + if (IS_ERR(dmac->ext_base)) 859 + return PTR_ERR(dmac->ext_base); 860 + 861 + /* Register interrupt handler for error */ 862 + irq = platform_get_irq_byname(pdev, irqname); 863 + if (irq < 0) 864 + return irq; 865 + 866 + ret = devm_request_irq(&pdev->dev, irq, rz_dmac_irq_handler, 0, 867 + irqname, NULL); 868 + if (ret) { 869 + dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n", 870 + irq, ret); 871 + return ret; 872 + } 873 + 874 + /* Initialize the channels. */ 875 + INIT_LIST_HEAD(&dmac->engine.channels); 876 + 877 + for (i = 0; i < dmac->n_channels; i++) { 878 + ret = rz_dmac_chan_probe(dmac, &dmac->channels[i], i); 879 + if (ret < 0) 880 + goto err; 881 + } 882 + 883 + /* Register the DMAC as a DMA provider for DT. */ 884 + ret = of_dma_controller_register(pdev->dev.of_node, rz_dmac_of_xlate, 885 + NULL); 886 + if (ret < 0) 887 + goto err; 888 + 889 + /* Register the DMA engine device. */ 890 + engine = &dmac->engine; 891 + dma_cap_set(DMA_SLAVE, engine->cap_mask); 892 + dma_cap_set(DMA_MEMCPY, engine->cap_mask); 893 + rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_0_7_COMMON_BASE + DCTRL); 894 + rz_dmac_writel(dmac, DCTRL_DEFAULT, CHANNEL_8_15_COMMON_BASE + DCTRL); 895 + 896 + engine->dev = &pdev->dev; 897 + 898 + engine->device_alloc_chan_resources = rz_dmac_alloc_chan_resources; 899 + engine->device_free_chan_resources = rz_dmac_free_chan_resources; 900 + engine->device_tx_status = dma_cookie_status; 901 + engine->device_prep_slave_sg = rz_dmac_prep_slave_sg; 902 + engine->device_prep_dma_memcpy = rz_dmac_prep_dma_memcpy; 903 + engine->device_config = rz_dmac_config; 904 + engine->device_terminate_all = rz_dmac_terminate_all; 905 + engine->device_issue_pending = rz_dmac_issue_pending; 906 + 907 + engine->copy_align = DMAENGINE_ALIGN_1_BYTE; 908 + dma_set_max_seg_size(engine->dev, U32_MAX); 909 + 910 + ret = dma_async_device_register(engine); 911 + if (ret < 0) { 912 + dev_err(&pdev->dev, "unable to register\n"); 913 + goto dma_register_err; 914 + } 915 + return 0; 916 + 917 + dma_register_err: 918 + of_dma_controller_free(pdev->dev.of_node); 919 + err: 920 + channel_num = i ? i - 1 : 0; 921 + for (i = 0; i < channel_num; i++) { 922 + struct rz_dmac_chan *channel = &dmac->channels[i]; 923 + 924 + dma_free_coherent(NULL, 925 + sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, 926 + channel->lmdesc.base, 927 + channel->lmdesc.base_dma); 928 + } 929 + 930 + return ret; 931 + } 932 + 933 + static int rz_dmac_remove(struct platform_device *pdev) 934 + { 935 + struct rz_dmac *dmac = platform_get_drvdata(pdev); 936 + unsigned int i; 937 + 938 + for (i = 0; i < dmac->n_channels; i++) { 939 + struct rz_dmac_chan *channel = &dmac->channels[i]; 940 + 941 + dma_free_coherent(NULL, 942 + sizeof(struct rz_lmdesc) * DMAC_NR_LMDESC, 943 + channel->lmdesc.base, 944 + channel->lmdesc.base_dma); 945 + } 946 + of_dma_controller_free(pdev->dev.of_node); 947 + dma_async_device_unregister(&dmac->engine); 948 + 949 + return 0; 950 + } 951 + 952 + static const struct of_device_id of_rz_dmac_match[] = { 953 + { .compatible = "renesas,rz-dmac", }, 954 + { /* Sentinel */ } 955 + }; 956 + MODULE_DEVICE_TABLE(of, of_rz_dmac_match); 957 + 958 + static struct platform_driver rz_dmac_driver = { 959 + .driver = { 960 + .name = "rz-dmac", 961 + .of_match_table = of_rz_dmac_match, 962 + }, 963 + .probe = rz_dmac_probe, 964 + .remove = rz_dmac_remove, 965 + }; 966 + 967 + module_platform_driver(rz_dmac_driver); 968 + 969 + MODULE_DESCRIPTION("Renesas RZ/G2L DMA Controller Driver"); 970 + MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>"); 971 + MODULE_LICENSE("GPL v2");