Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: add a driver for AMBA AXI NBPF DMAC IP cores

This patch adds a driver for NBPF DMAC IP cores from Renesas, designed for
the AMBA AXI bus.

Signed-off-by: Guennadi Liakhovetski <g.liakhovetski+renesas@gmail.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>

authored by

Guennadi Liakhovetski and committed by
Vinod Koul
b45b262c 94c7b6fc

+1518
+6
drivers/dma/Kconfig
··· 383 383 help 384 384 Support for the DMA engine for Allwinner A31 SoCs. 385 385 386 + config NBPFAXI_DMA 387 + tristate "Renesas Type-AXI NBPF DMA support" 388 + select DMA_ENGINE 389 + help 390 + Support for "Type-AXI" NBPF DMA IPs from Renesas 391 + 386 392 config DMA_ENGINE 387 393 bool 388 394
+1
drivers/dma/Makefile
··· 47 47 obj-$(CONFIG_FSL_EDMA) += fsl-edma.o 48 48 obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o 49 49 obj-y += xilinx/ 50 + obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o 50 51 obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
+1511
drivers/dma/nbpfaxi.c
··· 1 + /* 2 + * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd. 3 + * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of version 2 of the GNU General Public License as 7 + * published by the Free Software Foundation. 8 + */ 9 + 10 + #include <linux/bitmap.h> 11 + #include <linux/bitops.h> 12 + #include <linux/clk.h> 13 + #include <linux/dma-mapping.h> 14 + #include <linux/dmaengine.h> 15 + #include <linux/err.h> 16 + #include <linux/interrupt.h> 17 + #include <linux/io.h> 18 + #include <linux/log2.h> 19 + #include <linux/module.h> 20 + #include <linux/of.h> 21 + #include <linux/of_device.h> 22 + #include <linux/of_dma.h> 23 + #include <linux/platform_device.h> 24 + #include <linux/slab.h> 25 + 26 + #include <dt-bindings/dma/nbpfaxi.h> 27 + 28 + #include "dmaengine.h" 29 + 30 + #define NBPF_REG_CHAN_OFFSET 0 31 + #define NBPF_REG_CHAN_SIZE 0x40 32 + 33 + /* Channel Current Transaction Byte register */ 34 + #define NBPF_CHAN_CUR_TR_BYTE 0x20 35 + 36 + /* Channel Status register */ 37 + #define NBPF_CHAN_STAT 0x24 38 + #define NBPF_CHAN_STAT_EN 1 39 + #define NBPF_CHAN_STAT_TACT 4 40 + #define NBPF_CHAN_STAT_ERR 0x10 41 + #define NBPF_CHAN_STAT_END 0x20 42 + #define NBPF_CHAN_STAT_TC 0x40 43 + #define NBPF_CHAN_STAT_DER 0x400 44 + 45 + /* Channel Control register */ 46 + #define NBPF_CHAN_CTRL 0x28 47 + #define NBPF_CHAN_CTRL_SETEN 1 48 + #define NBPF_CHAN_CTRL_CLREN 2 49 + #define NBPF_CHAN_CTRL_STG 4 50 + #define NBPF_CHAN_CTRL_SWRST 8 51 + #define NBPF_CHAN_CTRL_CLRRQ 0x10 52 + #define NBPF_CHAN_CTRL_CLREND 0x20 53 + #define NBPF_CHAN_CTRL_CLRTC 0x40 54 + #define NBPF_CHAN_CTRL_SETSUS 0x100 55 + #define NBPF_CHAN_CTRL_CLRSUS 0x200 56 + 57 + /* Channel Configuration register */ 58 + #define NBPF_CHAN_CFG 0x2c 59 + #define NBPF_CHAN_CFG_SEL 7 /* terminal SELect: 0..7 */ 60 + #define NBPF_CHAN_CFG_REQD 8 /* REQuest Direction: DMAREQ is 0: input, 1: output */ 61 + #define NBPF_CHAN_CFG_LOEN 0x10 /* LOw ENable: low DMA request line is: 0: inactive, 1: active */ 62 + #define NBPF_CHAN_CFG_HIEN 0x20 /* HIgh ENable: high DMA request line is: 0: inactive, 1: active */ 63 + #define NBPF_CHAN_CFG_LVL 0x40 /* LeVeL: DMA request line is sensed as 0: edge, 1: level */ 64 + #define NBPF_CHAN_CFG_AM 0x700 /* ACK Mode: 0: Pulse mode, 1: Level mode, b'1x: Bus Cycle */ 65 + #define NBPF_CHAN_CFG_SDS 0xf000 /* Source Data Size: 0: 8 bits,... , 7: 1024 bits */ 66 + #define NBPF_CHAN_CFG_DDS 0xf0000 /* Destination Data Size: as above */ 67 + #define NBPF_CHAN_CFG_SAD 0x100000 /* Source ADdress counting: 0: increment, 1: fixed */ 68 + #define NBPF_CHAN_CFG_DAD 0x200000 /* Destination ADdress counting: 0: increment, 1: fixed */ 69 + #define NBPF_CHAN_CFG_TM 0x400000 /* Transfer Mode: 0: single, 1: block TM */ 70 + #define NBPF_CHAN_CFG_DEM 0x1000000 /* DMAEND interrupt Mask */ 71 + #define NBPF_CHAN_CFG_TCM 0x2000000 /* DMATCO interrupt Mask */ 72 + #define NBPF_CHAN_CFG_SBE 0x8000000 /* Sweep Buffer Enable */ 73 + #define NBPF_CHAN_CFG_RSEL 0x10000000 /* RM: Register Set sELect */ 74 + #define NBPF_CHAN_CFG_RSW 0x20000000 /* RM: Register Select sWitch */ 75 + #define NBPF_CHAN_CFG_REN 0x40000000 /* RM: Register Set Enable */ 76 + #define NBPF_CHAN_CFG_DMS 0x80000000 /* 0: register mode (RM), 1: link mode (LM) */ 77 + 78 + #define NBPF_CHAN_NXLA 0x38 79 + #define NBPF_CHAN_CRLA 0x3c 80 + 81 + /* Link Header field */ 82 + #define NBPF_HEADER_LV 1 83 + #define NBPF_HEADER_LE 2 84 + #define NBPF_HEADER_WBD 4 85 + #define NBPF_HEADER_DIM 8 86 + 87 + #define NBPF_CTRL 0x300 88 + #define NBPF_CTRL_PR 1 /* 0: fixed priority, 1: round robin */ 89 + #define NBPF_CTRL_LVINT 2 /* DMAEND and DMAERR signalling: 0: pulse, 1: level */ 90 + 91 + #define NBPF_DSTAT_ER 0x314 92 + #define NBPF_DSTAT_END 0x318 93 + 94 + #define NBPF_DMA_BUSWIDTHS \ 95 + (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ 96 + BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 97 + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 98 + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 99 + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) 100 + 101 + struct nbpf_config { 102 + int num_channels; 103 + int buffer_size; 104 + }; 105 + 106 + /* 107 + * We've got 3 types of objects, used to describe DMA transfers: 108 + * 1. high-level descriptor, containing a struct dma_async_tx_descriptor object 109 + * in it, used to communicate with the user 110 + * 2. hardware DMA link descriptors, that we pass to DMAC for DMA transfer 111 + * queuing, these must be DMAable, using either the streaming DMA API or 112 + * allocated from coherent memory - one per SG segment 113 + * 3. one per SG segment descriptors, used to manage HW link descriptors from 114 + * (2). They do not have to be DMAable. They can either be (a) allocated 115 + * together with link descriptors as mixed (DMA / CPU) objects, or (b) 116 + * separately. Even if allocated separately it would be best to link them 117 + * to link descriptors once during channel resource allocation and always 118 + * use them as a single object. 119 + * Therefore for both cases (a) and (b) at run-time objects (2) and (3) shall be 120 + * treated as a single SG segment descriptor. 121 + */ 122 + 123 + struct nbpf_link_reg { 124 + u32 header; 125 + u32 src_addr; 126 + u32 dst_addr; 127 + u32 transaction_size; 128 + u32 config; 129 + u32 interval; 130 + u32 extension; 131 + u32 next; 132 + } __packed; 133 + 134 + struct nbpf_device; 135 + struct nbpf_channel; 136 + struct nbpf_desc; 137 + 138 + struct nbpf_link_desc { 139 + struct nbpf_link_reg *hwdesc; 140 + dma_addr_t hwdesc_dma_addr; 141 + struct nbpf_desc *desc; 142 + struct list_head node; 143 + }; 144 + 145 + /** 146 + * struct nbpf_desc - DMA transfer descriptor 147 + * @async_tx: dmaengine object 148 + * @user_wait: waiting for a user ack 149 + * @length: total transfer length 150 + * @sg: list of hardware descriptors, represented by struct nbpf_link_desc 151 + * @node: member in channel descriptor lists 152 + */ 153 + struct nbpf_desc { 154 + struct dma_async_tx_descriptor async_tx; 155 + bool user_wait; 156 + size_t length; 157 + struct nbpf_channel *chan; 158 + struct list_head sg; 159 + struct list_head node; 160 + }; 161 + 162 + /* Take a wild guess: allocate 4 segments per descriptor */ 163 + #define NBPF_SEGMENTS_PER_DESC 4 164 + #define NBPF_DESCS_PER_PAGE ((PAGE_SIZE - sizeof(struct list_head)) / \ 165 + (sizeof(struct nbpf_desc) + \ 166 + NBPF_SEGMENTS_PER_DESC * \ 167 + (sizeof(struct nbpf_link_desc) + sizeof(struct nbpf_link_reg)))) 168 + #define NBPF_SEGMENTS_PER_PAGE (NBPF_SEGMENTS_PER_DESC * NBPF_DESCS_PER_PAGE) 169 + 170 + struct nbpf_desc_page { 171 + struct list_head node; 172 + struct nbpf_desc desc[NBPF_DESCS_PER_PAGE]; 173 + struct nbpf_link_desc ldesc[NBPF_SEGMENTS_PER_PAGE]; 174 + struct nbpf_link_reg hwdesc[NBPF_SEGMENTS_PER_PAGE]; 175 + }; 176 + 177 + /** 178 + * struct nbpf_channel - one DMAC channel 179 + * @dma_chan: standard dmaengine channel object 180 + * @base: register address base 181 + * @nbpf: DMAC 182 + * @name: IRQ name 183 + * @irq: IRQ number 184 + * @slave_addr: address for slave DMA 185 + * @slave_width:slave data size in bytes 186 + * @slave_burst:maximum slave burst size in bytes 187 + * @terminal: DMA terminal, assigned to this channel 188 + * @dmarq_cfg: DMA request line configuration - high / low, edge / level for NBPF_CHAN_CFG 189 + * @flags: configuration flags from DT 190 + * @lock: protect descriptor lists 191 + * @free_links: list of free link descriptors 192 + * @free: list of free descriptors 193 + * @queued: list of queued descriptors 194 + * @active: list of descriptors, scheduled for processing 195 + * @done: list of completed descriptors, waiting post-processing 196 + * @desc_page: list of additionally allocated descriptor pages - if any 197 + */ 198 + struct nbpf_channel { 199 + struct dma_chan dma_chan; 200 + void __iomem *base; 201 + struct nbpf_device *nbpf; 202 + char name[16]; 203 + int irq; 204 + dma_addr_t slave_src_addr; 205 + size_t slave_src_width; 206 + size_t slave_src_burst; 207 + dma_addr_t slave_dst_addr; 208 + size_t slave_dst_width; 209 + size_t slave_dst_burst; 210 + unsigned int terminal; 211 + u32 dmarq_cfg; 212 + unsigned long flags; 213 + spinlock_t lock; 214 + struct list_head free_links; 215 + struct list_head free; 216 + struct list_head queued; 217 + struct list_head active; 218 + struct list_head done; 219 + struct list_head desc_page; 220 + struct nbpf_desc *running; 221 + bool paused; 222 + }; 223 + 224 + struct nbpf_device { 225 + struct dma_device dma_dev; 226 + void __iomem *base; 227 + struct clk *clk; 228 + const struct nbpf_config *config; 229 + struct nbpf_channel chan[]; 230 + }; 231 + 232 + enum nbpf_model { 233 + NBPF1B4, 234 + NBPF1B8, 235 + NBPF1B16, 236 + NBPF4B4, 237 + NBPF4B8, 238 + NBPF4B16, 239 + NBPF8B4, 240 + NBPF8B8, 241 + NBPF8B16, 242 + }; 243 + 244 + static struct nbpf_config nbpf_cfg[] = { 245 + [NBPF1B4] = { 246 + .num_channels = 1, 247 + .buffer_size = 4, 248 + }, 249 + [NBPF1B8] = { 250 + .num_channels = 1, 251 + .buffer_size = 8, 252 + }, 253 + [NBPF1B16] = { 254 + .num_channels = 1, 255 + .buffer_size = 16, 256 + }, 257 + [NBPF4B4] = { 258 + .num_channels = 4, 259 + .buffer_size = 4, 260 + }, 261 + [NBPF4B8] = { 262 + .num_channels = 4, 263 + .buffer_size = 8, 264 + }, 265 + [NBPF4B16] = { 266 + .num_channels = 4, 267 + .buffer_size = 16, 268 + }, 269 + [NBPF8B4] = { 270 + .num_channels = 8, 271 + .buffer_size = 4, 272 + }, 273 + [NBPF8B8] = { 274 + .num_channels = 8, 275 + .buffer_size = 8, 276 + }, 277 + [NBPF8B16] = { 278 + .num_channels = 8, 279 + .buffer_size = 16, 280 + }, 281 + }; 282 + 283 + #define nbpf_to_chan(d) container_of(d, struct nbpf_channel, dma_chan) 284 + 285 + /* 286 + * dmaengine drivers seem to have a lot in common and instead of sharing more 287 + * code, they reimplement those common algorithms independently. In this driver 288 + * we try to separate the hardware-specific part from the (largely) generic 289 + * part. This improves code readability and makes it possible in the future to 290 + * reuse the generic code in form of a helper library. That generic code should 291 + * be suitable for various DMA controllers, using transfer descriptors in RAM 292 + * and pushing one SG list at a time to the DMA controller. 293 + */ 294 + 295 + /* Hardware-specific part */ 296 + 297 + static inline u32 nbpf_chan_read(struct nbpf_channel *chan, 298 + unsigned int offset) 299 + { 300 + u32 data = ioread32(chan->base + offset); 301 + dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", 302 + __func__, chan->base, offset, data); 303 + return data; 304 + } 305 + 306 + static inline void nbpf_chan_write(struct nbpf_channel *chan, 307 + unsigned int offset, u32 data) 308 + { 309 + iowrite32(data, chan->base + offset); 310 + dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", 311 + __func__, chan->base, offset, data); 312 + } 313 + 314 + static inline u32 nbpf_read(struct nbpf_device *nbpf, 315 + unsigned int offset) 316 + { 317 + u32 data = ioread32(nbpf->base + offset); 318 + dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n", 319 + __func__, nbpf->base, offset, data); 320 + return data; 321 + } 322 + 323 + static inline void nbpf_write(struct nbpf_device *nbpf, 324 + unsigned int offset, u32 data) 325 + { 326 + iowrite32(data, nbpf->base + offset); 327 + dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n", 328 + __func__, nbpf->base, offset, data); 329 + } 330 + 331 + static void nbpf_chan_halt(struct nbpf_channel *chan) 332 + { 333 + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); 334 + } 335 + 336 + static bool nbpf_status_get(struct nbpf_channel *chan) 337 + { 338 + u32 status = nbpf_read(chan->nbpf, NBPF_DSTAT_END); 339 + 340 + return status & BIT(chan - chan->nbpf->chan); 341 + } 342 + 343 + static void nbpf_status_ack(struct nbpf_channel *chan) 344 + { 345 + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREND); 346 + } 347 + 348 + static u32 nbpf_error_get(struct nbpf_device *nbpf) 349 + { 350 + return nbpf_read(nbpf, NBPF_DSTAT_ER); 351 + } 352 + 353 + struct nbpf_channel *nbpf_error_get_channel(struct nbpf_device *nbpf, u32 error) 354 + { 355 + return nbpf->chan + __ffs(error); 356 + } 357 + 358 + static void nbpf_error_clear(struct nbpf_channel *chan) 359 + { 360 + u32 status; 361 + int i; 362 + 363 + /* Stop the channel, make sure DMA has been aborted */ 364 + nbpf_chan_halt(chan); 365 + 366 + for (i = 1000; i; i--) { 367 + status = nbpf_chan_read(chan, NBPF_CHAN_STAT); 368 + if (!(status & NBPF_CHAN_STAT_TACT)) 369 + break; 370 + cpu_relax(); 371 + } 372 + 373 + if (!i) 374 + dev_err(chan->dma_chan.device->dev, 375 + "%s(): abort timeout, channel status 0x%x\n", __func__, status); 376 + 377 + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SWRST); 378 + } 379 + 380 + static int nbpf_start(struct nbpf_desc *desc) 381 + { 382 + struct nbpf_channel *chan = desc->chan; 383 + struct nbpf_link_desc *ldesc = list_first_entry(&desc->sg, struct nbpf_link_desc, node); 384 + 385 + nbpf_chan_write(chan, NBPF_CHAN_NXLA, (u32)ldesc->hwdesc_dma_addr); 386 + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETEN | NBPF_CHAN_CTRL_CLRSUS); 387 + chan->paused = false; 388 + 389 + /* Software trigger MEMCPY - only MEMCPY uses the block mode */ 390 + if (ldesc->hwdesc->config & NBPF_CHAN_CFG_TM) 391 + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_STG); 392 + 393 + dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n", __func__, 394 + nbpf_chan_read(chan, NBPF_CHAN_NXLA), nbpf_chan_read(chan, NBPF_CHAN_CRLA)); 395 + 396 + return 0; 397 + } 398 + 399 + static void nbpf_chan_prepare(struct nbpf_channel *chan) 400 + { 401 + chan->dmarq_cfg = (chan->flags & NBPF_SLAVE_RQ_HIGH ? NBPF_CHAN_CFG_HIEN : 0) | 402 + (chan->flags & NBPF_SLAVE_RQ_LOW ? NBPF_CHAN_CFG_LOEN : 0) | 403 + (chan->flags & NBPF_SLAVE_RQ_LEVEL ? 404 + NBPF_CHAN_CFG_LVL | (NBPF_CHAN_CFG_AM & 0x200) : 0) | 405 + chan->terminal; 406 + } 407 + 408 + static void nbpf_chan_prepare_default(struct nbpf_channel *chan) 409 + { 410 + /* Don't output DMAACK */ 411 + chan->dmarq_cfg = NBPF_CHAN_CFG_AM & 0x400; 412 + chan->terminal = 0; 413 + chan->flags = 0; 414 + } 415 + 416 + static void nbpf_chan_configure(struct nbpf_channel *chan) 417 + { 418 + /* 419 + * We assume, that only the link mode and DMA request line configuration 420 + * have to be set in the configuration register manually. Dynamic 421 + * per-transfer configuration will be loaded from transfer descriptors. 422 + */ 423 + nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg); 424 + } 425 + 426 + static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size) 427 + { 428 + /* Maximum supported bursts depend on the buffer size */ 429 + return min_t(int, __ffs(size), ilog2(nbpf->config->buffer_size * 8)); 430 + } 431 + 432 + static size_t nbpf_xfer_size(struct nbpf_device *nbpf, 433 + enum dma_slave_buswidth width, u32 burst) 434 + { 435 + size_t size; 436 + 437 + if (!burst) 438 + burst = 1; 439 + 440 + switch (width) { 441 + case DMA_SLAVE_BUSWIDTH_8_BYTES: 442 + size = 8 * burst; 443 + break; 444 + 445 + case DMA_SLAVE_BUSWIDTH_4_BYTES: 446 + size = 4 * burst; 447 + break; 448 + 449 + case DMA_SLAVE_BUSWIDTH_2_BYTES: 450 + size = 2 * burst; 451 + break; 452 + 453 + default: 454 + pr_warn("%s(): invalid bus width %u\n", __func__, width); 455 + case DMA_SLAVE_BUSWIDTH_1_BYTE: 456 + size = burst; 457 + } 458 + 459 + return nbpf_xfer_ds(nbpf, size); 460 + } 461 + 462 + /* 463 + * We need a way to recognise slaves, whose data is sent "raw" over the bus, 464 + * i.e. it isn't known in advance how many bytes will be received. Therefore 465 + * the slave driver has to provide a "large enough" buffer and either read the 466 + * buffer, when it is full, or detect, that some data has arrived, then wait for 467 + * a timeout, if no more data arrives - receive what's already there. We want to 468 + * handle such slaves in a special way to allow an optimised mode for other 469 + * users, for whom the amount of data is known in advance. So far there's no way 470 + * to recognise such slaves. We use a data-width check to distinguish between 471 + * the SD host and the PL011 UART. 472 + */ 473 + 474 + static int nbpf_prep_one(struct nbpf_link_desc *ldesc, 475 + enum dma_transfer_direction direction, 476 + dma_addr_t src, dma_addr_t dst, size_t size, bool last) 477 + { 478 + struct nbpf_link_reg *hwdesc = ldesc->hwdesc; 479 + struct nbpf_desc *desc = ldesc->desc; 480 + struct nbpf_channel *chan = desc->chan; 481 + struct device *dev = chan->dma_chan.device->dev; 482 + size_t mem_xfer, slave_xfer; 483 + bool can_burst; 484 + 485 + hwdesc->header = NBPF_HEADER_WBD | NBPF_HEADER_LV | 486 + (last ? NBPF_HEADER_LE : 0); 487 + 488 + hwdesc->src_addr = src; 489 + hwdesc->dst_addr = dst; 490 + hwdesc->transaction_size = size; 491 + 492 + /* 493 + * set config: SAD, DAD, DDS, SDS, etc. 494 + * Note on transfer sizes: the DMAC can perform unaligned DMA transfers, 495 + * but it is important to have transaction size a multiple of both 496 + * receiver and transmitter transfer sizes. It is also possible to use 497 + * different RAM and device transfer sizes, and it does work well with 498 + * some devices, e.g. with V08R07S01E SD host controllers, which can use 499 + * 128 byte transfers. But this doesn't work with other devices, 500 + * especially when the transaction size is unknown. This is the case, 501 + * e.g. with serial drivers like amba-pl011.c. For reception it sets up 502 + * the transaction size of 4K and if fewer bytes are received, it 503 + * pauses DMA and reads out data received via DMA as well as those left 504 + * in the Rx FIFO. For this to work with the RAM side using burst 505 + * transfers we enable the SBE bit and terminate the transfer in our 506 + * DMA_PAUSE handler. 507 + */ 508 + mem_xfer = nbpf_xfer_ds(chan->nbpf, size); 509 + 510 + switch (direction) { 511 + case DMA_DEV_TO_MEM: 512 + can_burst = chan->slave_src_width >= 3; 513 + slave_xfer = min(mem_xfer, can_burst ? 514 + chan->slave_src_burst : chan->slave_src_width); 515 + /* 516 + * Is the slave narrower than 64 bits, i.e. isn't using the full 517 + * bus width and cannot use bursts? 518 + */ 519 + if (mem_xfer > chan->slave_src_burst && !can_burst) 520 + mem_xfer = chan->slave_src_burst; 521 + /* Device-to-RAM DMA is unreliable without REQD set */ 522 + hwdesc->config = NBPF_CHAN_CFG_SAD | (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)) | 523 + (NBPF_CHAN_CFG_SDS & (slave_xfer << 12)) | NBPF_CHAN_CFG_REQD | 524 + NBPF_CHAN_CFG_SBE; 525 + break; 526 + 527 + case DMA_MEM_TO_DEV: 528 + slave_xfer = min(mem_xfer, chan->slave_dst_width >= 3 ? 529 + chan->slave_dst_burst : chan->slave_dst_width); 530 + hwdesc->config = NBPF_CHAN_CFG_DAD | (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) | 531 + (NBPF_CHAN_CFG_DDS & (slave_xfer << 16)) | NBPF_CHAN_CFG_REQD; 532 + break; 533 + 534 + case DMA_MEM_TO_MEM: 535 + hwdesc->config = NBPF_CHAN_CFG_TCM | NBPF_CHAN_CFG_TM | 536 + (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) | 537 + (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)); 538 + break; 539 + 540 + default: 541 + return -EINVAL; 542 + } 543 + 544 + hwdesc->config |= chan->dmarq_cfg | (last ? 0 : NBPF_CHAN_CFG_DEM) | 545 + NBPF_CHAN_CFG_DMS; 546 + 547 + dev_dbg(dev, "%s(): desc @ %pad: hdr 0x%x, cfg 0x%x, %zu @ %pad -> %pad\n", 548 + __func__, &ldesc->hwdesc_dma_addr, hwdesc->header, 549 + hwdesc->config, size, &src, &dst); 550 + 551 + dma_sync_single_for_device(dev, ldesc->hwdesc_dma_addr, sizeof(*hwdesc), 552 + DMA_TO_DEVICE); 553 + 554 + return 0; 555 + } 556 + 557 + static size_t nbpf_bytes_left(struct nbpf_channel *chan) 558 + { 559 + return nbpf_chan_read(chan, NBPF_CHAN_CUR_TR_BYTE); 560 + } 561 + 562 + static void nbpf_configure(struct nbpf_device *nbpf) 563 + { 564 + nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT); 565 + } 566 + 567 + static void nbpf_pause(struct nbpf_channel *chan) 568 + { 569 + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS); 570 + /* See comment in nbpf_prep_one() */ 571 + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); 572 + } 573 + 574 + /* Generic part */ 575 + 576 + /* DMA ENGINE functions */ 577 + static void nbpf_issue_pending(struct dma_chan *dchan) 578 + { 579 + struct nbpf_channel *chan = nbpf_to_chan(dchan); 580 + unsigned long flags; 581 + 582 + dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); 583 + 584 + spin_lock_irqsave(&chan->lock, flags); 585 + if (list_empty(&chan->queued)) 586 + goto unlock; 587 + 588 + list_splice_tail_init(&chan->queued, &chan->active); 589 + 590 + if (!chan->running) { 591 + struct nbpf_desc *desc = list_first_entry(&chan->active, 592 + struct nbpf_desc, node); 593 + if (!nbpf_start(desc)) 594 + chan->running = desc; 595 + } 596 + 597 + unlock: 598 + spin_unlock_irqrestore(&chan->lock, flags); 599 + } 600 + 601 + static enum dma_status nbpf_tx_status(struct dma_chan *dchan, 602 + dma_cookie_t cookie, struct dma_tx_state *state) 603 + { 604 + struct nbpf_channel *chan = nbpf_to_chan(dchan); 605 + enum dma_status status = dma_cookie_status(dchan, cookie, state); 606 + 607 + if (state) { 608 + dma_cookie_t running; 609 + unsigned long flags; 610 + 611 + spin_lock_irqsave(&chan->lock, flags); 612 + running = chan->running ? chan->running->async_tx.cookie : -EINVAL; 613 + 614 + if (cookie == running) { 615 + state->residue = nbpf_bytes_left(chan); 616 + dev_dbg(dchan->device->dev, "%s(): residue %u\n", __func__, 617 + state->residue); 618 + } else if (status == DMA_IN_PROGRESS) { 619 + struct nbpf_desc *desc; 620 + bool found = false; 621 + 622 + list_for_each_entry(desc, &chan->active, node) 623 + if (desc->async_tx.cookie == cookie) { 624 + found = true; 625 + break; 626 + } 627 + 628 + if (!found) 629 + list_for_each_entry(desc, &chan->queued, node) 630 + if (desc->async_tx.cookie == cookie) { 631 + found = true; 632 + break; 633 + 634 + } 635 + 636 + state->residue = found ? desc->length : 0; 637 + } 638 + 639 + spin_unlock_irqrestore(&chan->lock, flags); 640 + } 641 + 642 + if (chan->paused) 643 + status = DMA_PAUSED; 644 + 645 + return status; 646 + } 647 + 648 + static dma_cookie_t nbpf_tx_submit(struct dma_async_tx_descriptor *tx) 649 + { 650 + struct nbpf_desc *desc = container_of(tx, struct nbpf_desc, async_tx); 651 + struct nbpf_channel *chan = desc->chan; 652 + unsigned long flags; 653 + dma_cookie_t cookie; 654 + 655 + spin_lock_irqsave(&chan->lock, flags); 656 + cookie = dma_cookie_assign(tx); 657 + list_add_tail(&desc->node, &chan->queued); 658 + spin_unlock_irqrestore(&chan->lock, flags); 659 + 660 + dev_dbg(chan->dma_chan.device->dev, "Entry %s(%d)\n", __func__, cookie); 661 + 662 + return cookie; 663 + } 664 + 665 + static int nbpf_desc_page_alloc(struct nbpf_channel *chan) 666 + { 667 + struct dma_chan *dchan = &chan->dma_chan; 668 + struct nbpf_desc_page *dpage = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 669 + struct nbpf_link_desc *ldesc; 670 + struct nbpf_link_reg *hwdesc; 671 + struct nbpf_desc *desc; 672 + LIST_HEAD(head); 673 + LIST_HEAD(lhead); 674 + int i; 675 + struct device *dev = dchan->device->dev; 676 + 677 + if (!dpage) 678 + return -ENOMEM; 679 + 680 + dev_dbg(dev, "%s(): alloc %lu descriptors, %lu segments, total alloc %zu\n", 681 + __func__, NBPF_DESCS_PER_PAGE, NBPF_SEGMENTS_PER_PAGE, sizeof(*dpage)); 682 + 683 + for (i = 0, ldesc = dpage->ldesc, hwdesc = dpage->hwdesc; 684 + i < ARRAY_SIZE(dpage->ldesc); 685 + i++, ldesc++, hwdesc++) { 686 + ldesc->hwdesc = hwdesc; 687 + list_add_tail(&ldesc->node, &lhead); 688 + ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev, 689 + hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE); 690 + 691 + dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__, 692 + hwdesc, &ldesc->hwdesc_dma_addr); 693 + } 694 + 695 + for (i = 0, desc = dpage->desc; 696 + i < ARRAY_SIZE(dpage->desc); 697 + i++, desc++) { 698 + dma_async_tx_descriptor_init(&desc->async_tx, dchan); 699 + desc->async_tx.tx_submit = nbpf_tx_submit; 700 + desc->chan = chan; 701 + INIT_LIST_HEAD(&desc->sg); 702 + list_add_tail(&desc->node, &head); 703 + } 704 + 705 + /* 706 + * This function cannot be called from interrupt context, so, no need to 707 + * save flags 708 + */ 709 + spin_lock_irq(&chan->lock); 710 + list_splice_tail(&lhead, &chan->free_links); 711 + list_splice_tail(&head, &chan->free); 712 + list_add(&dpage->node, &chan->desc_page); 713 + spin_unlock_irq(&chan->lock); 714 + 715 + return ARRAY_SIZE(dpage->desc); 716 + } 717 + 718 + static void nbpf_desc_put(struct nbpf_desc *desc) 719 + { 720 + struct nbpf_channel *chan = desc->chan; 721 + struct nbpf_link_desc *ldesc, *tmp; 722 + unsigned long flags; 723 + 724 + spin_lock_irqsave(&chan->lock, flags); 725 + list_for_each_entry_safe(ldesc, tmp, &desc->sg, node) 726 + list_move(&ldesc->node, &chan->free_links); 727 + 728 + list_add(&desc->node, &chan->free); 729 + spin_unlock_irqrestore(&chan->lock, flags); 730 + } 731 + 732 + static void nbpf_scan_acked(struct nbpf_channel *chan) 733 + { 734 + struct nbpf_desc *desc, *tmp; 735 + unsigned long flags; 736 + LIST_HEAD(head); 737 + 738 + spin_lock_irqsave(&chan->lock, flags); 739 + list_for_each_entry_safe(desc, tmp, &chan->done, node) 740 + if (async_tx_test_ack(&desc->async_tx) && desc->user_wait) { 741 + list_move(&desc->node, &head); 742 + desc->user_wait = false; 743 + } 744 + spin_unlock_irqrestore(&chan->lock, flags); 745 + 746 + list_for_each_entry_safe(desc, tmp, &head, node) { 747 + list_del(&desc->node); 748 + nbpf_desc_put(desc); 749 + } 750 + } 751 + 752 + /* 753 + * We have to allocate descriptors with the channel lock dropped. This means, 754 + * before we re-acquire the lock buffers can be taken already, so we have to 755 + * re-check after re-acquiring the lock and possibly retry, if buffers are gone 756 + * again. 757 + */ 758 + static struct nbpf_desc *nbpf_desc_get(struct nbpf_channel *chan, size_t len) 759 + { 760 + struct nbpf_desc *desc = NULL; 761 + struct nbpf_link_desc *ldesc, *prev = NULL; 762 + 763 + nbpf_scan_acked(chan); 764 + 765 + spin_lock_irq(&chan->lock); 766 + 767 + do { 768 + int i = 0, ret; 769 + 770 + if (list_empty(&chan->free)) { 771 + /* No more free descriptors */ 772 + spin_unlock_irq(&chan->lock); 773 + ret = nbpf_desc_page_alloc(chan); 774 + if (ret < 0) 775 + return NULL; 776 + spin_lock_irq(&chan->lock); 777 + continue; 778 + } 779 + desc = list_first_entry(&chan->free, struct nbpf_desc, node); 780 + list_del(&desc->node); 781 + 782 + do { 783 + if (list_empty(&chan->free_links)) { 784 + /* No more free link descriptors */ 785 + spin_unlock_irq(&chan->lock); 786 + ret = nbpf_desc_page_alloc(chan); 787 + if (ret < 0) { 788 + nbpf_desc_put(desc); 789 + return NULL; 790 + } 791 + spin_lock_irq(&chan->lock); 792 + continue; 793 + } 794 + 795 + ldesc = list_first_entry(&chan->free_links, 796 + struct nbpf_link_desc, node); 797 + ldesc->desc = desc; 798 + if (prev) 799 + prev->hwdesc->next = (u32)ldesc->hwdesc_dma_addr; 800 + 801 + prev = ldesc; 802 + list_move_tail(&ldesc->node, &desc->sg); 803 + 804 + i++; 805 + } while (i < len); 806 + } while (!desc); 807 + 808 + prev->hwdesc->next = 0; 809 + 810 + spin_unlock_irq(&chan->lock); 811 + 812 + return desc; 813 + } 814 + 815 + static void nbpf_chan_idle(struct nbpf_channel *chan) 816 + { 817 + struct nbpf_desc *desc, *tmp; 818 + unsigned long flags; 819 + LIST_HEAD(head); 820 + 821 + spin_lock_irqsave(&chan->lock, flags); 822 + 823 + list_splice_init(&chan->done, &head); 824 + list_splice_init(&chan->active, &head); 825 + list_splice_init(&chan->queued, &head); 826 + 827 + chan->running = NULL; 828 + 829 + spin_unlock_irqrestore(&chan->lock, flags); 830 + 831 + list_for_each_entry_safe(desc, tmp, &head, node) { 832 + dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n", 833 + __func__, desc, desc->async_tx.cookie); 834 + list_del(&desc->node); 835 + nbpf_desc_put(desc); 836 + } 837 + } 838 + 839 + static int nbpf_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, 840 + unsigned long arg) 841 + { 842 + struct nbpf_channel *chan = nbpf_to_chan(dchan); 843 + struct dma_slave_config *config; 844 + 845 + dev_dbg(dchan->device->dev, "Entry %s(%d)\n", __func__, cmd); 846 + 847 + switch (cmd) { 848 + case DMA_TERMINATE_ALL: 849 + dev_dbg(dchan->device->dev, "Terminating\n"); 850 + nbpf_chan_halt(chan); 851 + nbpf_chan_idle(chan); 852 + break; 853 + 854 + case DMA_SLAVE_CONFIG: 855 + if (!arg) 856 + return -EINVAL; 857 + config = (struct dma_slave_config *)arg; 858 + 859 + /* 860 + * We could check config->slave_id to match chan->terminal here, 861 + * but with DT they would be coming from the same source, so 862 + * such a check would be superflous 863 + */ 864 + 865 + chan->slave_dst_addr = config->dst_addr; 866 + chan->slave_dst_width = nbpf_xfer_size(chan->nbpf, 867 + config->dst_addr_width, 1); 868 + chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf, 869 + config->dst_addr_width, 870 + config->dst_maxburst); 871 + chan->slave_src_addr = config->src_addr; 872 + chan->slave_src_width = nbpf_xfer_size(chan->nbpf, 873 + config->src_addr_width, 1); 874 + chan->slave_src_burst = nbpf_xfer_size(chan->nbpf, 875 + config->src_addr_width, 876 + config->src_maxburst); 877 + break; 878 + 879 + case DMA_PAUSE: 880 + chan->paused = true; 881 + nbpf_pause(chan); 882 + break; 883 + 884 + default: 885 + return -ENXIO; 886 + } 887 + 888 + return 0; 889 + } 890 + 891 + static struct dma_async_tx_descriptor *nbpf_prep_sg(struct nbpf_channel *chan, 892 + struct scatterlist *src_sg, struct scatterlist *dst_sg, 893 + size_t len, enum dma_transfer_direction direction, 894 + unsigned long flags) 895 + { 896 + struct nbpf_link_desc *ldesc; 897 + struct scatterlist *mem_sg; 898 + struct nbpf_desc *desc; 899 + bool inc_src, inc_dst; 900 + size_t data_len = 0; 901 + int i = 0; 902 + 903 + switch (direction) { 904 + case DMA_DEV_TO_MEM: 905 + mem_sg = dst_sg; 906 + inc_src = false; 907 + inc_dst = true; 908 + break; 909 + 910 + case DMA_MEM_TO_DEV: 911 + mem_sg = src_sg; 912 + inc_src = true; 913 + inc_dst = false; 914 + break; 915 + 916 + default: 917 + case DMA_MEM_TO_MEM: 918 + mem_sg = src_sg; 919 + inc_src = true; 920 + inc_dst = true; 921 + } 922 + 923 + desc = nbpf_desc_get(chan, len); 924 + if (!desc) 925 + return NULL; 926 + 927 + desc->async_tx.flags = flags; 928 + desc->async_tx.cookie = -EBUSY; 929 + desc->user_wait = false; 930 + 931 + /* 932 + * This is a private descriptor list, and we own the descriptor. No need 933 + * to lock. 934 + */ 935 + list_for_each_entry(ldesc, &desc->sg, node) { 936 + int ret = nbpf_prep_one(ldesc, direction, 937 + sg_dma_address(src_sg), 938 + sg_dma_address(dst_sg), 939 + sg_dma_len(mem_sg), 940 + i == len - 1); 941 + if (ret < 0) { 942 + nbpf_desc_put(desc); 943 + return NULL; 944 + } 945 + data_len += sg_dma_len(mem_sg); 946 + if (inc_src) 947 + src_sg = sg_next(src_sg); 948 + if (inc_dst) 949 + dst_sg = sg_next(dst_sg); 950 + mem_sg = direction == DMA_DEV_TO_MEM ? dst_sg : src_sg; 951 + i++; 952 + } 953 + 954 + desc->length = data_len; 955 + 956 + /* The user has to return the descriptor to us ASAP via .tx_submit() */ 957 + return &desc->async_tx; 958 + } 959 + 960 + static struct dma_async_tx_descriptor *nbpf_prep_memcpy( 961 + struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src, 962 + size_t len, unsigned long flags) 963 + { 964 + struct nbpf_channel *chan = nbpf_to_chan(dchan); 965 + struct scatterlist dst_sg; 966 + struct scatterlist src_sg; 967 + 968 + sg_init_table(&dst_sg, 1); 969 + sg_init_table(&src_sg, 1); 970 + 971 + sg_dma_address(&dst_sg) = dst; 972 + sg_dma_address(&src_sg) = src; 973 + 974 + sg_dma_len(&dst_sg) = len; 975 + sg_dma_len(&src_sg) = len; 976 + 977 + dev_dbg(dchan->device->dev, "%s(): %zu @ %pad -> %pad\n", 978 + __func__, len, &src, &dst); 979 + 980 + return nbpf_prep_sg(chan, &src_sg, &dst_sg, 1, 981 + DMA_MEM_TO_MEM, flags); 982 + } 983 + 984 + static struct dma_async_tx_descriptor *nbpf_prep_memcpy_sg( 985 + struct dma_chan *dchan, 986 + struct scatterlist *dst_sg, unsigned int dst_nents, 987 + struct scatterlist *src_sg, unsigned int src_nents, 988 + unsigned long flags) 989 + { 990 + struct nbpf_channel *chan = nbpf_to_chan(dchan); 991 + 992 + if (dst_nents != src_nents) 993 + return NULL; 994 + 995 + return nbpf_prep_sg(chan, src_sg, dst_sg, src_nents, 996 + DMA_MEM_TO_MEM, flags); 997 + } 998 + 999 + static struct dma_async_tx_descriptor *nbpf_prep_slave_sg( 1000 + struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 1001 + enum dma_transfer_direction direction, unsigned long flags, void *context) 1002 + { 1003 + struct nbpf_channel *chan = nbpf_to_chan(dchan); 1004 + struct scatterlist slave_sg; 1005 + 1006 + dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); 1007 + 1008 + sg_init_table(&slave_sg, 1); 1009 + 1010 + switch (direction) { 1011 + case DMA_MEM_TO_DEV: 1012 + sg_dma_address(&slave_sg) = chan->slave_dst_addr; 1013 + return nbpf_prep_sg(chan, sgl, &slave_sg, sg_len, 1014 + direction, flags); 1015 + 1016 + case DMA_DEV_TO_MEM: 1017 + sg_dma_address(&slave_sg) = chan->slave_src_addr; 1018 + return nbpf_prep_sg(chan, &slave_sg, sgl, sg_len, 1019 + direction, flags); 1020 + 1021 + default: 1022 + return NULL; 1023 + } 1024 + } 1025 + 1026 + static int nbpf_alloc_chan_resources(struct dma_chan *dchan) 1027 + { 1028 + struct nbpf_channel *chan = nbpf_to_chan(dchan); 1029 + int ret; 1030 + 1031 + INIT_LIST_HEAD(&chan->free); 1032 + INIT_LIST_HEAD(&chan->free_links); 1033 + INIT_LIST_HEAD(&chan->queued); 1034 + INIT_LIST_HEAD(&chan->active); 1035 + INIT_LIST_HEAD(&chan->done); 1036 + 1037 + ret = nbpf_desc_page_alloc(chan); 1038 + if (ret < 0) 1039 + return ret; 1040 + 1041 + dev_dbg(dchan->device->dev, "Entry %s(): terminal %u\n", __func__, 1042 + chan->terminal); 1043 + 1044 + nbpf_chan_configure(chan); 1045 + 1046 + return ret; 1047 + } 1048 + 1049 + static void nbpf_free_chan_resources(struct dma_chan *dchan) 1050 + { 1051 + struct nbpf_channel *chan = nbpf_to_chan(dchan); 1052 + struct nbpf_desc_page *dpage, *tmp; 1053 + 1054 + dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); 1055 + 1056 + nbpf_chan_halt(chan); 1057 + /* Clean up for if a channel is re-used for MEMCPY after slave DMA */ 1058 + nbpf_chan_prepare_default(chan); 1059 + 1060 + list_for_each_entry_safe(dpage, tmp, &chan->desc_page, node) { 1061 + struct nbpf_link_desc *ldesc; 1062 + int i; 1063 + list_del(&dpage->node); 1064 + for (i = 0, ldesc = dpage->ldesc; 1065 + i < ARRAY_SIZE(dpage->ldesc); 1066 + i++, ldesc++) 1067 + dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr, 1068 + sizeof(*ldesc->hwdesc), DMA_TO_DEVICE); 1069 + free_page((unsigned long)dpage); 1070 + } 1071 + } 1072 + 1073 + static int nbpf_slave_caps(struct dma_chan *dchan, 1074 + struct dma_slave_caps *caps) 1075 + { 1076 + caps->src_addr_widths = NBPF_DMA_BUSWIDTHS; 1077 + caps->dstn_addr_widths = NBPF_DMA_BUSWIDTHS; 1078 + caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 1079 + caps->cmd_pause = false; 1080 + caps->cmd_terminate = true; 1081 + 1082 + return 0; 1083 + } 1084 + 1085 + static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec, 1086 + struct of_dma *ofdma) 1087 + { 1088 + struct nbpf_device *nbpf = ofdma->of_dma_data; 1089 + struct dma_chan *dchan; 1090 + struct nbpf_channel *chan; 1091 + 1092 + if (dma_spec->args_count != 2) 1093 + return NULL; 1094 + 1095 + dchan = dma_get_any_slave_channel(&nbpf->dma_dev); 1096 + if (!dchan) 1097 + return NULL; 1098 + 1099 + dev_dbg(dchan->device->dev, "Entry %s(%s)\n", __func__, 1100 + dma_spec->np->name); 1101 + 1102 + chan = nbpf_to_chan(dchan); 1103 + 1104 + chan->terminal = dma_spec->args[0]; 1105 + chan->flags = dma_spec->args[1]; 1106 + 1107 + nbpf_chan_prepare(chan); 1108 + nbpf_chan_configure(chan); 1109 + 1110 + return dchan; 1111 + } 1112 + 1113 + static irqreturn_t nbpf_chan_irqt(int irq, void *dev) 1114 + { 1115 + struct nbpf_channel *chan = dev; 1116 + struct nbpf_desc *desc, *tmp; 1117 + dma_async_tx_callback callback; 1118 + void *param; 1119 + 1120 + while (!list_empty(&chan->done)) { 1121 + bool found = false, must_put, recycling = false; 1122 + 1123 + spin_lock_irq(&chan->lock); 1124 + 1125 + list_for_each_entry_safe(desc, tmp, &chan->done, node) { 1126 + if (!desc->user_wait) { 1127 + /* Newly completed descriptor, have to process */ 1128 + found = true; 1129 + break; 1130 + } else if (async_tx_test_ack(&desc->async_tx)) { 1131 + /* 1132 + * This descriptor was waiting for a user ACK, 1133 + * it can be recycled now. 1134 + */ 1135 + list_del(&desc->node); 1136 + spin_unlock_irq(&chan->lock); 1137 + nbpf_desc_put(desc); 1138 + recycling = true; 1139 + break; 1140 + } 1141 + } 1142 + 1143 + if (recycling) 1144 + continue; 1145 + 1146 + if (!found) { 1147 + /* This can happen if TERMINATE_ALL has been called */ 1148 + spin_unlock_irq(&chan->lock); 1149 + break; 1150 + } 1151 + 1152 + dma_cookie_complete(&desc->async_tx); 1153 + 1154 + /* 1155 + * With released lock we cannot dereference desc, maybe it's 1156 + * still on the "done" list 1157 + */ 1158 + if (async_tx_test_ack(&desc->async_tx)) { 1159 + list_del(&desc->node); 1160 + must_put = true; 1161 + } else { 1162 + desc->user_wait = true; 1163 + must_put = false; 1164 + } 1165 + 1166 + callback = desc->async_tx.callback; 1167 + param = desc->async_tx.callback_param; 1168 + 1169 + /* ack and callback completed descriptor */ 1170 + spin_unlock_irq(&chan->lock); 1171 + 1172 + if (callback) 1173 + callback(param); 1174 + 1175 + if (must_put) 1176 + nbpf_desc_put(desc); 1177 + } 1178 + 1179 + return IRQ_HANDLED; 1180 + } 1181 + 1182 + static irqreturn_t nbpf_chan_irq(int irq, void *dev) 1183 + { 1184 + struct nbpf_channel *chan = dev; 1185 + bool done = nbpf_status_get(chan); 1186 + struct nbpf_desc *desc; 1187 + irqreturn_t ret; 1188 + 1189 + if (!done) 1190 + return IRQ_NONE; 1191 + 1192 + nbpf_status_ack(chan); 1193 + 1194 + dev_dbg(&chan->dma_chan.dev->device, "%s()\n", __func__); 1195 + 1196 + spin_lock(&chan->lock); 1197 + desc = chan->running; 1198 + if (WARN_ON(!desc)) { 1199 + ret = IRQ_NONE; 1200 + goto unlock; 1201 + } else { 1202 + ret = IRQ_WAKE_THREAD; 1203 + } 1204 + 1205 + list_move_tail(&desc->node, &chan->done); 1206 + chan->running = NULL; 1207 + 1208 + if (!list_empty(&chan->active)) { 1209 + desc = list_first_entry(&chan->active, 1210 + struct nbpf_desc, node); 1211 + if (!nbpf_start(desc)) 1212 + chan->running = desc; 1213 + } 1214 + 1215 + unlock: 1216 + spin_unlock(&chan->lock); 1217 + 1218 + return ret; 1219 + } 1220 + 1221 + static irqreturn_t nbpf_err_irq(int irq, void *dev) 1222 + { 1223 + struct nbpf_device *nbpf = dev; 1224 + u32 error = nbpf_error_get(nbpf); 1225 + 1226 + dev_warn(nbpf->dma_dev.dev, "DMA error IRQ %u\n", irq); 1227 + 1228 + if (!error) 1229 + return IRQ_NONE; 1230 + 1231 + do { 1232 + struct nbpf_channel *chan = nbpf_error_get_channel(nbpf, error); 1233 + /* On error: abort all queued transfers, no callback */ 1234 + nbpf_error_clear(chan); 1235 + nbpf_chan_idle(chan); 1236 + error = nbpf_error_get(nbpf); 1237 + } while (error); 1238 + 1239 + return IRQ_HANDLED; 1240 + } 1241 + 1242 + static int nbpf_chan_probe(struct nbpf_device *nbpf, int n) 1243 + { 1244 + struct dma_device *dma_dev = &nbpf->dma_dev; 1245 + struct nbpf_channel *chan = nbpf->chan + n; 1246 + int ret; 1247 + 1248 + chan->nbpf = nbpf; 1249 + chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n; 1250 + INIT_LIST_HEAD(&chan->desc_page); 1251 + spin_lock_init(&chan->lock); 1252 + chan->dma_chan.device = dma_dev; 1253 + dma_cookie_init(&chan->dma_chan); 1254 + nbpf_chan_prepare_default(chan); 1255 + 1256 + dev_dbg(dma_dev->dev, "%s(): channel %d: -> %p\n", __func__, n, chan->base); 1257 + 1258 + snprintf(chan->name, sizeof(chan->name), "nbpf %d", n); 1259 + 1260 + ret = devm_request_threaded_irq(dma_dev->dev, chan->irq, 1261 + nbpf_chan_irq, nbpf_chan_irqt, IRQF_SHARED, 1262 + chan->name, chan); 1263 + if (ret < 0) 1264 + return ret; 1265 + 1266 + /* Add the channel to DMA device channel list */ 1267 + list_add_tail(&chan->dma_chan.device_node, 1268 + &dma_dev->channels); 1269 + 1270 + return 0; 1271 + } 1272 + 1273 + static const struct of_device_id nbpf_match[] = { 1274 + {.compatible = "renesas,nbpfaxi64dmac1b4", .data = &nbpf_cfg[NBPF1B4]}, 1275 + {.compatible = "renesas,nbpfaxi64dmac1b8", .data = &nbpf_cfg[NBPF1B8]}, 1276 + {.compatible = "renesas,nbpfaxi64dmac1b16", .data = &nbpf_cfg[NBPF1B16]}, 1277 + {.compatible = "renesas,nbpfaxi64dmac4b4", .data = &nbpf_cfg[NBPF4B4]}, 1278 + {.compatible = "renesas,nbpfaxi64dmac4b8", .data = &nbpf_cfg[NBPF4B8]}, 1279 + {.compatible = "renesas,nbpfaxi64dmac4b16", .data = &nbpf_cfg[NBPF4B16]}, 1280 + {.compatible = "renesas,nbpfaxi64dmac8b4", .data = &nbpf_cfg[NBPF8B4]}, 1281 + {.compatible = "renesas,nbpfaxi64dmac8b8", .data = &nbpf_cfg[NBPF8B8]}, 1282 + {.compatible = "renesas,nbpfaxi64dmac8b16", .data = &nbpf_cfg[NBPF8B16]}, 1283 + {} 1284 + }; 1285 + MODULE_DEVICE_TABLE(of, nbpf_match); 1286 + 1287 + static int nbpf_probe(struct platform_device *pdev) 1288 + { 1289 + struct device *dev = &pdev->dev; 1290 + const struct of_device_id *of_id = of_match_device(nbpf_match, dev); 1291 + struct device_node *np = dev->of_node; 1292 + struct nbpf_device *nbpf; 1293 + struct dma_device *dma_dev; 1294 + struct resource *iomem, *irq_res; 1295 + const struct nbpf_config *cfg; 1296 + int num_channels; 1297 + int ret, irq, eirq, i; 1298 + int irqbuf[9] /* maximum 8 channels + error IRQ */; 1299 + unsigned int irqs = 0; 1300 + 1301 + BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE); 1302 + 1303 + /* DT only */ 1304 + if (!np || !of_id || !of_id->data) 1305 + return -ENODEV; 1306 + 1307 + cfg = of_id->data; 1308 + num_channels = cfg->num_channels; 1309 + 1310 + nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels * 1311 + sizeof(nbpf->chan[0]), GFP_KERNEL); 1312 + if (!nbpf) { 1313 + dev_err(dev, "Memory allocation failed\n"); 1314 + return -ENOMEM; 1315 + } 1316 + dma_dev = &nbpf->dma_dev; 1317 + dma_dev->dev = dev; 1318 + 1319 + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1320 + nbpf->base = devm_ioremap_resource(dev, iomem); 1321 + if (IS_ERR(nbpf->base)) 1322 + return PTR_ERR(nbpf->base); 1323 + 1324 + nbpf->clk = devm_clk_get(dev, NULL); 1325 + if (IS_ERR(nbpf->clk)) 1326 + return PTR_ERR(nbpf->clk); 1327 + 1328 + nbpf->config = cfg; 1329 + 1330 + for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) { 1331 + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i); 1332 + if (!irq_res) 1333 + break; 1334 + 1335 + for (irq = irq_res->start; irq <= irq_res->end; 1336 + irq++, irqs++) 1337 + irqbuf[irqs] = irq; 1338 + } 1339 + 1340 + /* 1341 + * 3 IRQ resource schemes are supported: 1342 + * 1. 1 shared IRQ for error and all channels 1343 + * 2. 2 IRQs: one for error and one shared for all channels 1344 + * 3. 1 IRQ for error and an own IRQ for each channel 1345 + */ 1346 + if (irqs != 1 && irqs != 2 && irqs != num_channels + 1) 1347 + return -ENXIO; 1348 + 1349 + if (irqs == 1) { 1350 + eirq = irqbuf[0]; 1351 + 1352 + for (i = 0; i <= num_channels; i++) 1353 + nbpf->chan[i].irq = irqbuf[0]; 1354 + } else { 1355 + eirq = platform_get_irq_byname(pdev, "error"); 1356 + if (eirq < 0) 1357 + return eirq; 1358 + 1359 + if (irqs == num_channels + 1) { 1360 + struct nbpf_channel *chan; 1361 + 1362 + for (i = 0, chan = nbpf->chan; i <= num_channels; 1363 + i++, chan++) { 1364 + /* Skip the error IRQ */ 1365 + if (irqbuf[i] == eirq) 1366 + i++; 1367 + chan->irq = irqbuf[i]; 1368 + } 1369 + 1370 + if (chan != nbpf->chan + num_channels) 1371 + return -EINVAL; 1372 + } else { 1373 + /* 2 IRQs and more than one channel */ 1374 + if (irqbuf[0] == eirq) 1375 + irq = irqbuf[1]; 1376 + else 1377 + irq = irqbuf[0]; 1378 + 1379 + for (i = 0; i <= num_channels; i++) 1380 + nbpf->chan[i].irq = irq; 1381 + } 1382 + } 1383 + 1384 + ret = devm_request_irq(dev, eirq, nbpf_err_irq, 1385 + IRQF_SHARED, "dma error", nbpf); 1386 + if (ret < 0) 1387 + return ret; 1388 + 1389 + INIT_LIST_HEAD(&dma_dev->channels); 1390 + 1391 + /* Create DMA Channel */ 1392 + for (i = 0; i < num_channels; i++) { 1393 + ret = nbpf_chan_probe(nbpf, i); 1394 + if (ret < 0) 1395 + return ret; 1396 + } 1397 + 1398 + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 1399 + dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); 1400 + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); 1401 + dma_cap_set(DMA_SG, dma_dev->cap_mask); 1402 + 1403 + /* Common and MEMCPY operations */ 1404 + dma_dev->device_alloc_chan_resources 1405 + = nbpf_alloc_chan_resources; 1406 + dma_dev->device_free_chan_resources = nbpf_free_chan_resources; 1407 + dma_dev->device_prep_dma_sg = nbpf_prep_memcpy_sg; 1408 + dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy; 1409 + dma_dev->device_tx_status = nbpf_tx_status; 1410 + dma_dev->device_issue_pending = nbpf_issue_pending; 1411 + dma_dev->device_slave_caps = nbpf_slave_caps; 1412 + 1413 + /* 1414 + * If we drop support for unaligned MEMCPY buffer addresses and / or 1415 + * lengths by setting 1416 + * dma_dev->copy_align = 4; 1417 + * then we can set transfer length to 4 bytes in nbpf_prep_one() for 1418 + * DMA_MEM_TO_MEM 1419 + */ 1420 + 1421 + /* Compulsory for DMA_SLAVE fields */ 1422 + dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg; 1423 + dma_dev->device_control = nbpf_control; 1424 + 1425 + platform_set_drvdata(pdev, nbpf); 1426 + 1427 + ret = clk_prepare_enable(nbpf->clk); 1428 + if (ret < 0) 1429 + return ret; 1430 + 1431 + nbpf_configure(nbpf); 1432 + 1433 + ret = dma_async_device_register(dma_dev); 1434 + if (ret < 0) 1435 + goto e_clk_off; 1436 + 1437 + ret = of_dma_controller_register(np, nbpf_of_xlate, nbpf); 1438 + if (ret < 0) 1439 + goto e_dma_dev_unreg; 1440 + 1441 + return 0; 1442 + 1443 + e_dma_dev_unreg: 1444 + dma_async_device_unregister(dma_dev); 1445 + e_clk_off: 1446 + clk_disable_unprepare(nbpf->clk); 1447 + 1448 + return ret; 1449 + } 1450 + 1451 + static int nbpf_remove(struct platform_device *pdev) 1452 + { 1453 + struct nbpf_device *nbpf = platform_get_drvdata(pdev); 1454 + 1455 + of_dma_controller_free(pdev->dev.of_node); 1456 + dma_async_device_unregister(&nbpf->dma_dev); 1457 + clk_disable_unprepare(nbpf->clk); 1458 + 1459 + return 0; 1460 + } 1461 + 1462 + static struct platform_device_id nbpf_ids[] = { 1463 + {"nbpfaxi64dmac1b4", (kernel_ulong_t)&nbpf_cfg[NBPF1B4]}, 1464 + {"nbpfaxi64dmac1b8", (kernel_ulong_t)&nbpf_cfg[NBPF1B8]}, 1465 + {"nbpfaxi64dmac1b16", (kernel_ulong_t)&nbpf_cfg[NBPF1B16]}, 1466 + {"nbpfaxi64dmac4b4", (kernel_ulong_t)&nbpf_cfg[NBPF4B4]}, 1467 + {"nbpfaxi64dmac4b8", (kernel_ulong_t)&nbpf_cfg[NBPF4B8]}, 1468 + {"nbpfaxi64dmac4b16", (kernel_ulong_t)&nbpf_cfg[NBPF4B16]}, 1469 + {"nbpfaxi64dmac8b4", (kernel_ulong_t)&nbpf_cfg[NBPF8B4]}, 1470 + {"nbpfaxi64dmac8b8", (kernel_ulong_t)&nbpf_cfg[NBPF8B8]}, 1471 + {"nbpfaxi64dmac8b16", (kernel_ulong_t)&nbpf_cfg[NBPF8B16]}, 1472 + {}, 1473 + }; 1474 + MODULE_DEVICE_TABLE(platform, nbpf_ids); 1475 + 1476 + #ifdef CONFIG_PM_RUNTIME 1477 + static int nbpf_runtime_suspend(struct device *dev) 1478 + { 1479 + struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev)); 1480 + clk_disable_unprepare(nbpf->clk); 1481 + return 0; 1482 + } 1483 + 1484 + static int nbpf_runtime_resume(struct device *dev) 1485 + { 1486 + struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev)); 1487 + return clk_prepare_enable(nbpf->clk); 1488 + } 1489 + #endif 1490 + 1491 + static const struct dev_pm_ops nbpf_pm_ops = { 1492 + SET_RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL) 1493 + }; 1494 + 1495 + static struct platform_driver nbpf_driver = { 1496 + .driver = { 1497 + .owner = THIS_MODULE, 1498 + .name = "dma-nbpf", 1499 + .of_match_table = nbpf_match, 1500 + .pm = &nbpf_pm_ops, 1501 + }, 1502 + .id_table = nbpf_ids, 1503 + .probe = nbpf_probe, 1504 + .remove = nbpf_remove, 1505 + }; 1506 + 1507 + module_platform_driver(nbpf_driver); 1508 + 1509 + MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); 1510 + MODULE_DESCRIPTION("dmaengine driver for NBPFAXI64* DMACs"); 1511 + MODULE_LICENSE("GPL v2");