Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.17-rc5 1127 lines 30 kB view raw
1/* 2 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 and 6 * only version 2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 */ 14/* 15 * QCOM BAM DMA engine driver 16 * 17 * QCOM BAM DMA blocks are distributed amongst a number of the on-chip 18 * peripherals on the MSM 8x74. The configuration of the channels are dependent 19 * on the way they are hard wired to that specific peripheral. The peripheral 20 * device tree entries specify the configuration of each channel. 21 * 22 * The DMA controller requires the use of external memory for storage of the 23 * hardware descriptors for each channel. The descriptor FIFO is accessed as a 24 * circular buffer and operations are managed according to the offset within the 25 * FIFO. After pipe/channel reset, all of the pipe registers and internal state 26 * are back to defaults. 27 * 28 * During DMA operations, we write descriptors to the FIFO, being careful to 29 * handle wrapping and then write the last FIFO offset to that channel's 30 * P_EVNT_REG register to kick off the transaction. The P_SW_OFSTS register 31 * indicates the current FIFO offset that is being processed, so there is some 32 * indication of where the hardware is currently working. 33 */ 34 35#include <linux/kernel.h> 36#include <linux/io.h> 37#include <linux/init.h> 38#include <linux/slab.h> 39#include <linux/module.h> 40#include <linux/interrupt.h> 41#include <linux/dma-mapping.h> 42#include <linux/scatterlist.h> 43#include <linux/device.h> 44#include <linux/platform_device.h> 45#include <linux/of.h> 46#include <linux/of_address.h> 47#include <linux/of_irq.h> 48#include <linux/of_dma.h> 49#include <linux/clk.h> 50#include <linux/dmaengine.h> 51 52#include "dmaengine.h" 53#include "virt-dma.h" 54 55struct bam_desc_hw { 56 u32 addr; /* Buffer physical address */ 57 u16 size; /* Buffer size in bytes */ 58 u16 flags; 59}; 60 61#define DESC_FLAG_INT BIT(15) 62#define DESC_FLAG_EOT BIT(14) 63#define DESC_FLAG_EOB BIT(13) 64#define DESC_FLAG_NWD BIT(12) 65 66struct bam_async_desc { 67 struct virt_dma_desc vd; 68 69 u32 num_desc; 70 u32 xfer_len; 71 72 /* transaction flags, EOT|EOB|NWD */ 73 u16 flags; 74 75 struct bam_desc_hw *curr_desc; 76 77 enum dma_transfer_direction dir; 78 size_t length; 79 struct bam_desc_hw desc[0]; 80}; 81 82#define BAM_CTRL 0x0000 83#define BAM_REVISION 0x0004 84#define BAM_SW_REVISION 0x0080 85#define BAM_NUM_PIPES 0x003C 86#define BAM_TIMER 0x0040 87#define BAM_TIMER_CTRL 0x0044 88#define BAM_DESC_CNT_TRSHLD 0x0008 89#define BAM_IRQ_SRCS 0x000C 90#define BAM_IRQ_SRCS_MSK 0x0010 91#define BAM_IRQ_SRCS_UNMASKED 0x0030 92#define BAM_IRQ_STTS 0x0014 93#define BAM_IRQ_CLR 0x0018 94#define BAM_IRQ_EN 0x001C 95#define BAM_CNFG_BITS 0x007C 96#define BAM_IRQ_SRCS_EE(ee) (0x0800 + ((ee) * 0x80)) 97#define BAM_IRQ_SRCS_MSK_EE(ee) (0x0804 + ((ee) * 0x80)) 98#define BAM_P_CTRL(pipe) (0x1000 + ((pipe) * 0x1000)) 99#define BAM_P_RST(pipe) (0x1004 + ((pipe) * 0x1000)) 100#define BAM_P_HALT(pipe) (0x1008 + ((pipe) * 0x1000)) 101#define BAM_P_IRQ_STTS(pipe) (0x1010 + ((pipe) * 0x1000)) 102#define BAM_P_IRQ_CLR(pipe) (0x1014 + ((pipe) * 0x1000)) 103#define BAM_P_IRQ_EN(pipe) (0x1018 + ((pipe) * 0x1000)) 104#define BAM_P_EVNT_DEST_ADDR(pipe) (0x182C + ((pipe) * 0x1000)) 105#define BAM_P_EVNT_REG(pipe) (0x1818 + ((pipe) * 0x1000)) 106#define BAM_P_SW_OFSTS(pipe) (0x1800 + ((pipe) * 0x1000)) 107#define BAM_P_DATA_FIFO_ADDR(pipe) (0x1824 + ((pipe) * 0x1000)) 108#define BAM_P_DESC_FIFO_ADDR(pipe) (0x181C + ((pipe) * 0x1000)) 109#define BAM_P_EVNT_TRSHLD(pipe) (0x1828 + ((pipe) * 0x1000)) 110#define BAM_P_FIFO_SIZES(pipe) (0x1820 + ((pipe) * 0x1000)) 111 112/* BAM CTRL */ 113#define BAM_SW_RST BIT(0) 114#define BAM_EN BIT(1) 115#define BAM_EN_ACCUM BIT(4) 116#define BAM_TESTBUS_SEL_SHIFT 5 117#define BAM_TESTBUS_SEL_MASK 0x3F 118#define BAM_DESC_CACHE_SEL_SHIFT 13 119#define BAM_DESC_CACHE_SEL_MASK 0x3 120#define BAM_CACHED_DESC_STORE BIT(15) 121#define IBC_DISABLE BIT(16) 122 123/* BAM REVISION */ 124#define REVISION_SHIFT 0 125#define REVISION_MASK 0xFF 126#define NUM_EES_SHIFT 8 127#define NUM_EES_MASK 0xF 128#define CE_BUFFER_SIZE BIT(13) 129#define AXI_ACTIVE BIT(14) 130#define USE_VMIDMT BIT(15) 131#define SECURED BIT(16) 132#define BAM_HAS_NO_BYPASS BIT(17) 133#define HIGH_FREQUENCY_BAM BIT(18) 134#define INACTIV_TMRS_EXST BIT(19) 135#define NUM_INACTIV_TMRS BIT(20) 136#define DESC_CACHE_DEPTH_SHIFT 21 137#define DESC_CACHE_DEPTH_1 (0 << DESC_CACHE_DEPTH_SHIFT) 138#define DESC_CACHE_DEPTH_2 (1 << DESC_CACHE_DEPTH_SHIFT) 139#define DESC_CACHE_DEPTH_3 (2 << DESC_CACHE_DEPTH_SHIFT) 140#define DESC_CACHE_DEPTH_4 (3 << DESC_CACHE_DEPTH_SHIFT) 141#define CMD_DESC_EN BIT(23) 142#define INACTIV_TMR_BASE_SHIFT 24 143#define INACTIV_TMR_BASE_MASK 0xFF 144 145/* BAM NUM PIPES */ 146#define BAM_NUM_PIPES_SHIFT 0 147#define BAM_NUM_PIPES_MASK 0xFF 148#define PERIPH_NON_PIPE_GRP_SHIFT 16 149#define PERIPH_NON_PIP_GRP_MASK 0xFF 150#define BAM_NON_PIPE_GRP_SHIFT 24 151#define BAM_NON_PIPE_GRP_MASK 0xFF 152 153/* BAM CNFG BITS */ 154#define BAM_PIPE_CNFG BIT(2) 155#define BAM_FULL_PIPE BIT(11) 156#define BAM_NO_EXT_P_RST BIT(12) 157#define BAM_IBC_DISABLE BIT(13) 158#define BAM_SB_CLK_REQ BIT(14) 159#define BAM_PSM_CSW_REQ BIT(15) 160#define BAM_PSM_P_RES BIT(16) 161#define BAM_AU_P_RES BIT(17) 162#define BAM_SI_P_RES BIT(18) 163#define BAM_WB_P_RES BIT(19) 164#define BAM_WB_BLK_CSW BIT(20) 165#define BAM_WB_CSW_ACK_IDL BIT(21) 166#define BAM_WB_RETR_SVPNT BIT(22) 167#define BAM_WB_DSC_AVL_P_RST BIT(23) 168#define BAM_REG_P_EN BIT(24) 169#define BAM_PSM_P_HD_DATA BIT(25) 170#define BAM_AU_ACCUMED BIT(26) 171#define BAM_CMD_ENABLE BIT(27) 172 173#define BAM_CNFG_BITS_DEFAULT (BAM_PIPE_CNFG | \ 174 BAM_NO_EXT_P_RST | \ 175 BAM_IBC_DISABLE | \ 176 BAM_SB_CLK_REQ | \ 177 BAM_PSM_CSW_REQ | \ 178 BAM_PSM_P_RES | \ 179 BAM_AU_P_RES | \ 180 BAM_SI_P_RES | \ 181 BAM_WB_P_RES | \ 182 BAM_WB_BLK_CSW | \ 183 BAM_WB_CSW_ACK_IDL | \ 184 BAM_WB_RETR_SVPNT | \ 185 BAM_WB_DSC_AVL_P_RST | \ 186 BAM_REG_P_EN | \ 187 BAM_PSM_P_HD_DATA | \ 188 BAM_AU_ACCUMED | \ 189 BAM_CMD_ENABLE) 190 191/* PIPE CTRL */ 192#define P_EN BIT(1) 193#define P_DIRECTION BIT(3) 194#define P_SYS_STRM BIT(4) 195#define P_SYS_MODE BIT(5) 196#define P_AUTO_EOB BIT(6) 197#define P_AUTO_EOB_SEL_SHIFT 7 198#define P_AUTO_EOB_SEL_512 (0 << P_AUTO_EOB_SEL_SHIFT) 199#define P_AUTO_EOB_SEL_256 (1 << P_AUTO_EOB_SEL_SHIFT) 200#define P_AUTO_EOB_SEL_128 (2 << P_AUTO_EOB_SEL_SHIFT) 201#define P_AUTO_EOB_SEL_64 (3 << P_AUTO_EOB_SEL_SHIFT) 202#define P_PREFETCH_LIMIT_SHIFT 9 203#define P_PREFETCH_LIMIT_32 (0 << P_PREFETCH_LIMIT_SHIFT) 204#define P_PREFETCH_LIMIT_16 (1 << P_PREFETCH_LIMIT_SHIFT) 205#define P_PREFETCH_LIMIT_4 (2 << P_PREFETCH_LIMIT_SHIFT) 206#define P_WRITE_NWD BIT(11) 207#define P_LOCK_GROUP_SHIFT 16 208#define P_LOCK_GROUP_MASK 0x1F 209 210/* BAM_DESC_CNT_TRSHLD */ 211#define CNT_TRSHLD 0xffff 212#define DEFAULT_CNT_THRSHLD 0x4 213 214/* BAM_IRQ_SRCS */ 215#define BAM_IRQ BIT(31) 216#define P_IRQ 0x7fffffff 217 218/* BAM_IRQ_SRCS_MSK */ 219#define BAM_IRQ_MSK BAM_IRQ 220#define P_IRQ_MSK P_IRQ 221 222/* BAM_IRQ_STTS */ 223#define BAM_TIMER_IRQ BIT(4) 224#define BAM_EMPTY_IRQ BIT(3) 225#define BAM_ERROR_IRQ BIT(2) 226#define BAM_HRESP_ERR_IRQ BIT(1) 227 228/* BAM_IRQ_CLR */ 229#define BAM_TIMER_CLR BIT(4) 230#define BAM_EMPTY_CLR BIT(3) 231#define BAM_ERROR_CLR BIT(2) 232#define BAM_HRESP_ERR_CLR BIT(1) 233 234/* BAM_IRQ_EN */ 235#define BAM_TIMER_EN BIT(4) 236#define BAM_EMPTY_EN BIT(3) 237#define BAM_ERROR_EN BIT(2) 238#define BAM_HRESP_ERR_EN BIT(1) 239 240/* BAM_P_IRQ_EN */ 241#define P_PRCSD_DESC_EN BIT(0) 242#define P_TIMER_EN BIT(1) 243#define P_WAKE_EN BIT(2) 244#define P_OUT_OF_DESC_EN BIT(3) 245#define P_ERR_EN BIT(4) 246#define P_TRNSFR_END_EN BIT(5) 247#define P_DEFAULT_IRQS_EN (P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN) 248 249/* BAM_P_SW_OFSTS */ 250#define P_SW_OFSTS_MASK 0xffff 251 252#define BAM_DESC_FIFO_SIZE SZ_32K 253#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1) 254#define BAM_MAX_DATA_SIZE (SZ_32K - 8) 255 256struct bam_chan { 257 struct virt_dma_chan vc; 258 259 struct bam_device *bdev; 260 261 /* configuration from device tree */ 262 u32 id; 263 264 struct bam_async_desc *curr_txd; /* current running dma */ 265 266 /* runtime configuration */ 267 struct dma_slave_config slave; 268 269 /* fifo storage */ 270 struct bam_desc_hw *fifo_virt; 271 dma_addr_t fifo_phys; 272 273 /* fifo markers */ 274 unsigned short head; /* start of active descriptor entries */ 275 unsigned short tail; /* end of active descriptor entries */ 276 277 unsigned int initialized; /* is the channel hw initialized? */ 278 unsigned int paused; /* is the channel paused? */ 279 unsigned int reconfigure; /* new slave config? */ 280 281 struct list_head node; 282}; 283 284static inline struct bam_chan *to_bam_chan(struct dma_chan *common) 285{ 286 return container_of(common, struct bam_chan, vc.chan); 287} 288 289struct bam_device { 290 void __iomem *regs; 291 struct device *dev; 292 struct dma_device common; 293 struct device_dma_parameters dma_parms; 294 struct bam_chan *channels; 295 u32 num_channels; 296 297 /* execution environment ID, from DT */ 298 u32 ee; 299 300 struct clk *bamclk; 301 int irq; 302 303 /* dma start transaction tasklet */ 304 struct tasklet_struct task; 305}; 306 307/** 308 * bam_reset_channel - Reset individual BAM DMA channel 309 * @bchan: bam channel 310 * 311 * This function resets a specific BAM channel 312 */ 313static void bam_reset_channel(struct bam_chan *bchan) 314{ 315 struct bam_device *bdev = bchan->bdev; 316 317 lockdep_assert_held(&bchan->vc.lock); 318 319 /* reset channel */ 320 writel_relaxed(1, bdev->regs + BAM_P_RST(bchan->id)); 321 writel_relaxed(0, bdev->regs + BAM_P_RST(bchan->id)); 322 323 /* don't allow cpu to reorder BAM register accesses done after this */ 324 wmb(); 325 326 /* make sure hw is initialized when channel is used the first time */ 327 bchan->initialized = 0; 328} 329 330/** 331 * bam_chan_init_hw - Initialize channel hardware 332 * @bchan: bam channel 333 * 334 * This function resets and initializes the BAM channel 335 */ 336static void bam_chan_init_hw(struct bam_chan *bchan, 337 enum dma_transfer_direction dir) 338{ 339 struct bam_device *bdev = bchan->bdev; 340 u32 val; 341 342 /* Reset the channel to clear internal state of the FIFO */ 343 bam_reset_channel(bchan); 344 345 /* 346 * write out 8 byte aligned address. We have enough space for this 347 * because we allocated 1 more descriptor (8 bytes) than we can use 348 */ 349 writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)), 350 bdev->regs + BAM_P_DESC_FIFO_ADDR(bchan->id)); 351 writel_relaxed(BAM_DESC_FIFO_SIZE, bdev->regs + 352 BAM_P_FIFO_SIZES(bchan->id)); 353 354 /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */ 355 writel_relaxed(P_DEFAULT_IRQS_EN, bdev->regs + BAM_P_IRQ_EN(bchan->id)); 356 357 /* unmask the specific pipe and EE combo */ 358 val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); 359 val |= BIT(bchan->id); 360 writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); 361 362 /* don't allow cpu to reorder the channel enable done below */ 363 wmb(); 364 365 /* set fixed direction and mode, then enable channel */ 366 val = P_EN | P_SYS_MODE; 367 if (dir == DMA_DEV_TO_MEM) 368 val |= P_DIRECTION; 369 370 writel_relaxed(val, bdev->regs + BAM_P_CTRL(bchan->id)); 371 372 bchan->initialized = 1; 373 374 /* init FIFO pointers */ 375 bchan->head = 0; 376 bchan->tail = 0; 377} 378 379/** 380 * bam_alloc_chan - Allocate channel resources for DMA channel. 381 * @chan: specified channel 382 * 383 * This function allocates the FIFO descriptor memory 384 */ 385static int bam_alloc_chan(struct dma_chan *chan) 386{ 387 struct bam_chan *bchan = to_bam_chan(chan); 388 struct bam_device *bdev = bchan->bdev; 389 390 if (bchan->fifo_virt) 391 return 0; 392 393 /* allocate FIFO descriptor space, but only if necessary */ 394 bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, 395 &bchan->fifo_phys, GFP_KERNEL); 396 397 if (!bchan->fifo_virt) { 398 dev_err(bdev->dev, "Failed to allocate desc fifo\n"); 399 return -ENOMEM; 400 } 401 402 return 0; 403} 404 405/** 406 * bam_free_chan - Frees dma resources associated with specific channel 407 * @chan: specified channel 408 * 409 * Free the allocated fifo descriptor memory and channel resources 410 * 411 */ 412static void bam_free_chan(struct dma_chan *chan) 413{ 414 struct bam_chan *bchan = to_bam_chan(chan); 415 struct bam_device *bdev = bchan->bdev; 416 u32 val; 417 unsigned long flags; 418 419 vchan_free_chan_resources(to_virt_chan(chan)); 420 421 if (bchan->curr_txd) { 422 dev_err(bchan->bdev->dev, "Cannot free busy channel\n"); 423 return; 424 } 425 426 spin_lock_irqsave(&bchan->vc.lock, flags); 427 bam_reset_channel(bchan); 428 spin_unlock_irqrestore(&bchan->vc.lock, flags); 429 430 dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt, 431 bchan->fifo_phys); 432 bchan->fifo_virt = NULL; 433 434 /* mask irq for pipe/channel */ 435 val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); 436 val &= ~BIT(bchan->id); 437 writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); 438 439 /* disable irq */ 440 writel_relaxed(0, bdev->regs + BAM_P_IRQ_EN(bchan->id)); 441} 442 443/** 444 * bam_slave_config - set slave configuration for channel 445 * @chan: dma channel 446 * @cfg: slave configuration 447 * 448 * Sets slave configuration for channel 449 * 450 */ 451static void bam_slave_config(struct bam_chan *bchan, 452 struct dma_slave_config *cfg) 453{ 454 memcpy(&bchan->slave, cfg, sizeof(*cfg)); 455 bchan->reconfigure = 1; 456} 457 458/** 459 * bam_prep_slave_sg - Prep slave sg transaction 460 * 461 * @chan: dma channel 462 * @sgl: scatter gather list 463 * @sg_len: length of sg 464 * @direction: DMA transfer direction 465 * @flags: DMA flags 466 * @context: transfer context (unused) 467 */ 468static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, 469 struct scatterlist *sgl, unsigned int sg_len, 470 enum dma_transfer_direction direction, unsigned long flags, 471 void *context) 472{ 473 struct bam_chan *bchan = to_bam_chan(chan); 474 struct bam_device *bdev = bchan->bdev; 475 struct bam_async_desc *async_desc; 476 struct scatterlist *sg; 477 u32 i; 478 struct bam_desc_hw *desc; 479 unsigned int num_alloc = 0; 480 481 482 if (!is_slave_direction(direction)) { 483 dev_err(bdev->dev, "invalid dma direction\n"); 484 return NULL; 485 } 486 487 /* calculate number of required entries */ 488 for_each_sg(sgl, sg, sg_len, i) 489 num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_MAX_DATA_SIZE); 490 491 /* allocate enough room to accomodate the number of entries */ 492 async_desc = kzalloc(sizeof(*async_desc) + 493 (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT); 494 495 if (!async_desc) 496 goto err_out; 497 498 if (flags & DMA_PREP_FENCE) 499 async_desc->flags |= DESC_FLAG_NWD; 500 501 if (flags & DMA_PREP_INTERRUPT) 502 async_desc->flags |= DESC_FLAG_EOT; 503 else 504 async_desc->flags |= DESC_FLAG_INT; 505 506 async_desc->num_desc = num_alloc; 507 async_desc->curr_desc = async_desc->desc; 508 async_desc->dir = direction; 509 510 /* fill in temporary descriptors */ 511 desc = async_desc->desc; 512 for_each_sg(sgl, sg, sg_len, i) { 513 unsigned int remainder = sg_dma_len(sg); 514 unsigned int curr_offset = 0; 515 516 do { 517 desc->addr = sg_dma_address(sg) + curr_offset; 518 519 if (remainder > BAM_MAX_DATA_SIZE) { 520 desc->size = BAM_MAX_DATA_SIZE; 521 remainder -= BAM_MAX_DATA_SIZE; 522 curr_offset += BAM_MAX_DATA_SIZE; 523 } else { 524 desc->size = remainder; 525 remainder = 0; 526 } 527 528 async_desc->length += desc->size; 529 desc++; 530 } while (remainder > 0); 531 } 532 533 return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags); 534 535err_out: 536 kfree(async_desc); 537 return NULL; 538} 539 540/** 541 * bam_dma_terminate_all - terminate all transactions on a channel 542 * @bchan: bam dma channel 543 * 544 * Dequeues and frees all transactions 545 * No callbacks are done 546 * 547 */ 548static void bam_dma_terminate_all(struct bam_chan *bchan) 549{ 550 unsigned long flag; 551 LIST_HEAD(head); 552 553 /* remove all transactions, including active transaction */ 554 spin_lock_irqsave(&bchan->vc.lock, flag); 555 if (bchan->curr_txd) { 556 list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued); 557 bchan->curr_txd = NULL; 558 } 559 560 vchan_get_all_descriptors(&bchan->vc, &head); 561 spin_unlock_irqrestore(&bchan->vc.lock, flag); 562 563 vchan_dma_desc_free_list(&bchan->vc, &head); 564} 565 566/** 567 * bam_control - DMA device control 568 * @chan: dma channel 569 * @cmd: control cmd 570 * @arg: cmd argument 571 * 572 * Perform DMA control command 573 * 574 */ 575static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 576 unsigned long arg) 577{ 578 struct bam_chan *bchan = to_bam_chan(chan); 579 struct bam_device *bdev = bchan->bdev; 580 int ret = 0; 581 unsigned long flag; 582 583 switch (cmd) { 584 case DMA_PAUSE: 585 spin_lock_irqsave(&bchan->vc.lock, flag); 586 writel_relaxed(1, bdev->regs + BAM_P_HALT(bchan->id)); 587 bchan->paused = 1; 588 spin_unlock_irqrestore(&bchan->vc.lock, flag); 589 break; 590 591 case DMA_RESUME: 592 spin_lock_irqsave(&bchan->vc.lock, flag); 593 writel_relaxed(0, bdev->regs + BAM_P_HALT(bchan->id)); 594 bchan->paused = 0; 595 spin_unlock_irqrestore(&bchan->vc.lock, flag); 596 break; 597 598 case DMA_TERMINATE_ALL: 599 bam_dma_terminate_all(bchan); 600 break; 601 602 case DMA_SLAVE_CONFIG: 603 spin_lock_irqsave(&bchan->vc.lock, flag); 604 bam_slave_config(bchan, (struct dma_slave_config *)arg); 605 spin_unlock_irqrestore(&bchan->vc.lock, flag); 606 break; 607 608 default: 609 ret = -ENXIO; 610 break; 611 } 612 613 return ret; 614} 615 616/** 617 * process_channel_irqs - processes the channel interrupts 618 * @bdev: bam controller 619 * 620 * This function processes the channel interrupts 621 * 622 */ 623static u32 process_channel_irqs(struct bam_device *bdev) 624{ 625 u32 i, srcs, pipe_stts; 626 unsigned long flags; 627 struct bam_async_desc *async_desc; 628 629 srcs = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_EE(bdev->ee)); 630 631 /* return early if no pipe/channel interrupts are present */ 632 if (!(srcs & P_IRQ)) 633 return srcs; 634 635 for (i = 0; i < bdev->num_channels; i++) { 636 struct bam_chan *bchan = &bdev->channels[i]; 637 638 if (!(srcs & BIT(i))) 639 continue; 640 641 /* clear pipe irq */ 642 pipe_stts = readl_relaxed(bdev->regs + 643 BAM_P_IRQ_STTS(i)); 644 645 writel_relaxed(pipe_stts, bdev->regs + 646 BAM_P_IRQ_CLR(i)); 647 648 spin_lock_irqsave(&bchan->vc.lock, flags); 649 async_desc = bchan->curr_txd; 650 651 if (async_desc) { 652 async_desc->num_desc -= async_desc->xfer_len; 653 async_desc->curr_desc += async_desc->xfer_len; 654 bchan->curr_txd = NULL; 655 656 /* manage FIFO */ 657 bchan->head += async_desc->xfer_len; 658 bchan->head %= MAX_DESCRIPTORS; 659 660 /* 661 * if complete, process cookie. Otherwise 662 * push back to front of desc_issued so that 663 * it gets restarted by the tasklet 664 */ 665 if (!async_desc->num_desc) 666 vchan_cookie_complete(&async_desc->vd); 667 else 668 list_add(&async_desc->vd.node, 669 &bchan->vc.desc_issued); 670 } 671 672 spin_unlock_irqrestore(&bchan->vc.lock, flags); 673 } 674 675 return srcs; 676} 677 678/** 679 * bam_dma_irq - irq handler for bam controller 680 * @irq: IRQ of interrupt 681 * @data: callback data 682 * 683 * IRQ handler for the bam controller 684 */ 685static irqreturn_t bam_dma_irq(int irq, void *data) 686{ 687 struct bam_device *bdev = data; 688 u32 clr_mask = 0, srcs = 0; 689 690 srcs |= process_channel_irqs(bdev); 691 692 /* kick off tasklet to start next dma transfer */ 693 if (srcs & P_IRQ) 694 tasklet_schedule(&bdev->task); 695 696 if (srcs & BAM_IRQ) 697 clr_mask = readl_relaxed(bdev->regs + BAM_IRQ_STTS); 698 699 /* don't allow reorder of the various accesses to the BAM registers */ 700 mb(); 701 702 writel_relaxed(clr_mask, bdev->regs + BAM_IRQ_CLR); 703 704 return IRQ_HANDLED; 705} 706 707/** 708 * bam_tx_status - returns status of transaction 709 * @chan: dma channel 710 * @cookie: transaction cookie 711 * @txstate: DMA transaction state 712 * 713 * Return status of dma transaction 714 */ 715static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 716 struct dma_tx_state *txstate) 717{ 718 struct bam_chan *bchan = to_bam_chan(chan); 719 struct virt_dma_desc *vd; 720 int ret; 721 size_t residue = 0; 722 unsigned int i; 723 unsigned long flags; 724 725 ret = dma_cookie_status(chan, cookie, txstate); 726 if (ret == DMA_COMPLETE) 727 return ret; 728 729 if (!txstate) 730 return bchan->paused ? DMA_PAUSED : ret; 731 732 spin_lock_irqsave(&bchan->vc.lock, flags); 733 vd = vchan_find_desc(&bchan->vc, cookie); 734 if (vd) 735 residue = container_of(vd, struct bam_async_desc, vd)->length; 736 else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie) 737 for (i = 0; i < bchan->curr_txd->num_desc; i++) 738 residue += bchan->curr_txd->curr_desc[i].size; 739 740 spin_unlock_irqrestore(&bchan->vc.lock, flags); 741 742 dma_set_residue(txstate, residue); 743 744 if (ret == DMA_IN_PROGRESS && bchan->paused) 745 ret = DMA_PAUSED; 746 747 return ret; 748} 749 750/** 751 * bam_apply_new_config 752 * @bchan: bam dma channel 753 * @dir: DMA direction 754 */ 755static void bam_apply_new_config(struct bam_chan *bchan, 756 enum dma_transfer_direction dir) 757{ 758 struct bam_device *bdev = bchan->bdev; 759 u32 maxburst; 760 761 if (dir == DMA_DEV_TO_MEM) 762 maxburst = bchan->slave.src_maxburst; 763 else 764 maxburst = bchan->slave.dst_maxburst; 765 766 writel_relaxed(maxburst, bdev->regs + BAM_DESC_CNT_TRSHLD); 767 768 bchan->reconfigure = 0; 769} 770 771/** 772 * bam_start_dma - start next transaction 773 * @bchan - bam dma channel 774 */ 775static void bam_start_dma(struct bam_chan *bchan) 776{ 777 struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc); 778 struct bam_device *bdev = bchan->bdev; 779 struct bam_async_desc *async_desc; 780 struct bam_desc_hw *desc; 781 struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt, 782 sizeof(struct bam_desc_hw)); 783 784 lockdep_assert_held(&bchan->vc.lock); 785 786 if (!vd) 787 return; 788 789 list_del(&vd->node); 790 791 async_desc = container_of(vd, struct bam_async_desc, vd); 792 bchan->curr_txd = async_desc; 793 794 /* on first use, initialize the channel hardware */ 795 if (!bchan->initialized) 796 bam_chan_init_hw(bchan, async_desc->dir); 797 798 /* apply new slave config changes, if necessary */ 799 if (bchan->reconfigure) 800 bam_apply_new_config(bchan, async_desc->dir); 801 802 desc = bchan->curr_txd->curr_desc; 803 804 if (async_desc->num_desc > MAX_DESCRIPTORS) 805 async_desc->xfer_len = MAX_DESCRIPTORS; 806 else 807 async_desc->xfer_len = async_desc->num_desc; 808 809 /* set any special flags on the last descriptor */ 810 if (async_desc->num_desc == async_desc->xfer_len) 811 desc[async_desc->xfer_len - 1].flags = async_desc->flags; 812 else 813 desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT; 814 815 if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { 816 u32 partial = MAX_DESCRIPTORS - bchan->tail; 817 818 memcpy(&fifo[bchan->tail], desc, 819 partial * sizeof(struct bam_desc_hw)); 820 memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) * 821 sizeof(struct bam_desc_hw)); 822 } else { 823 memcpy(&fifo[bchan->tail], desc, 824 async_desc->xfer_len * sizeof(struct bam_desc_hw)); 825 } 826 827 bchan->tail += async_desc->xfer_len; 828 bchan->tail %= MAX_DESCRIPTORS; 829 830 /* ensure descriptor writes and dma start not reordered */ 831 wmb(); 832 writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), 833 bdev->regs + BAM_P_EVNT_REG(bchan->id)); 834} 835 836/** 837 * dma_tasklet - DMA IRQ tasklet 838 * @data: tasklet argument (bam controller structure) 839 * 840 * Sets up next DMA operation and then processes all completed transactions 841 */ 842static void dma_tasklet(unsigned long data) 843{ 844 struct bam_device *bdev = (struct bam_device *)data; 845 struct bam_chan *bchan; 846 unsigned long flags; 847 unsigned int i; 848 849 /* go through the channels and kick off transactions */ 850 for (i = 0; i < bdev->num_channels; i++) { 851 bchan = &bdev->channels[i]; 852 spin_lock_irqsave(&bchan->vc.lock, flags); 853 854 if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd) 855 bam_start_dma(bchan); 856 spin_unlock_irqrestore(&bchan->vc.lock, flags); 857 } 858} 859 860/** 861 * bam_issue_pending - starts pending transactions 862 * @chan: dma channel 863 * 864 * Calls tasklet directly which in turn starts any pending transactions 865 */ 866static void bam_issue_pending(struct dma_chan *chan) 867{ 868 struct bam_chan *bchan = to_bam_chan(chan); 869 unsigned long flags; 870 871 spin_lock_irqsave(&bchan->vc.lock, flags); 872 873 /* if work pending and idle, start a transaction */ 874 if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd) 875 bam_start_dma(bchan); 876 877 spin_unlock_irqrestore(&bchan->vc.lock, flags); 878} 879 880/** 881 * bam_dma_free_desc - free descriptor memory 882 * @vd: virtual descriptor 883 * 884 */ 885static void bam_dma_free_desc(struct virt_dma_desc *vd) 886{ 887 struct bam_async_desc *async_desc = container_of(vd, 888 struct bam_async_desc, vd); 889 890 kfree(async_desc); 891} 892 893static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec, 894 struct of_dma *of) 895{ 896 struct bam_device *bdev = container_of(of->of_dma_data, 897 struct bam_device, common); 898 unsigned int request; 899 900 if (dma_spec->args_count != 1) 901 return NULL; 902 903 request = dma_spec->args[0]; 904 if (request >= bdev->num_channels) 905 return NULL; 906 907 return dma_get_slave_channel(&(bdev->channels[request].vc.chan)); 908} 909 910/** 911 * bam_init 912 * @bdev: bam device 913 * 914 * Initialization helper for global bam registers 915 */ 916static int bam_init(struct bam_device *bdev) 917{ 918 u32 val; 919 920 /* read revision and configuration information */ 921 val = readl_relaxed(bdev->regs + BAM_REVISION) >> NUM_EES_SHIFT; 922 val &= NUM_EES_MASK; 923 924 /* check that configured EE is within range */ 925 if (bdev->ee >= val) 926 return -EINVAL; 927 928 val = readl_relaxed(bdev->regs + BAM_NUM_PIPES); 929 bdev->num_channels = val & BAM_NUM_PIPES_MASK; 930 931 /* s/w reset bam */ 932 /* after reset all pipes are disabled and idle */ 933 val = readl_relaxed(bdev->regs + BAM_CTRL); 934 val |= BAM_SW_RST; 935 writel_relaxed(val, bdev->regs + BAM_CTRL); 936 val &= ~BAM_SW_RST; 937 writel_relaxed(val, bdev->regs + BAM_CTRL); 938 939 /* make sure previous stores are visible before enabling BAM */ 940 wmb(); 941 942 /* enable bam */ 943 val |= BAM_EN; 944 writel_relaxed(val, bdev->regs + BAM_CTRL); 945 946 /* set descriptor threshhold, start with 4 bytes */ 947 writel_relaxed(DEFAULT_CNT_THRSHLD, bdev->regs + BAM_DESC_CNT_TRSHLD); 948 949 /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */ 950 writel_relaxed(BAM_CNFG_BITS_DEFAULT, bdev->regs + BAM_CNFG_BITS); 951 952 /* enable irqs for errors */ 953 writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN, 954 bdev->regs + BAM_IRQ_EN); 955 956 /* unmask global bam interrupt */ 957 writel_relaxed(BAM_IRQ_MSK, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); 958 959 return 0; 960} 961 962static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan, 963 u32 index) 964{ 965 bchan->id = index; 966 bchan->bdev = bdev; 967 968 vchan_init(&bchan->vc, &bdev->common); 969 bchan->vc.desc_free = bam_dma_free_desc; 970} 971 972static int bam_dma_probe(struct platform_device *pdev) 973{ 974 struct bam_device *bdev; 975 struct resource *iores; 976 int ret, i; 977 978 bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL); 979 if (!bdev) 980 return -ENOMEM; 981 982 bdev->dev = &pdev->dev; 983 984 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 985 bdev->regs = devm_ioremap_resource(&pdev->dev, iores); 986 if (IS_ERR(bdev->regs)) 987 return PTR_ERR(bdev->regs); 988 989 bdev->irq = platform_get_irq(pdev, 0); 990 if (bdev->irq < 0) 991 return bdev->irq; 992 993 ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee); 994 if (ret) { 995 dev_err(bdev->dev, "Execution environment unspecified\n"); 996 return ret; 997 } 998 999 bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk"); 1000 if (IS_ERR(bdev->bamclk)) 1001 return PTR_ERR(bdev->bamclk); 1002 1003 ret = clk_prepare_enable(bdev->bamclk); 1004 if (ret) { 1005 dev_err(bdev->dev, "failed to prepare/enable clock\n"); 1006 return ret; 1007 } 1008 1009 ret = bam_init(bdev); 1010 if (ret) 1011 goto err_disable_clk; 1012 1013 tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev); 1014 1015 bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels, 1016 sizeof(*bdev->channels), GFP_KERNEL); 1017 1018 if (!bdev->channels) { 1019 ret = -ENOMEM; 1020 goto err_disable_clk; 1021 } 1022 1023 /* allocate and initialize channels */ 1024 INIT_LIST_HEAD(&bdev->common.channels); 1025 1026 for (i = 0; i < bdev->num_channels; i++) 1027 bam_channel_init(bdev, &bdev->channels[i], i); 1028 1029 ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq, 1030 IRQF_TRIGGER_HIGH, "bam_dma", bdev); 1031 if (ret) 1032 goto err_disable_clk; 1033 1034 /* set max dma segment size */ 1035 bdev->common.dev = bdev->dev; 1036 bdev->common.dev->dma_parms = &bdev->dma_parms; 1037 ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE); 1038 if (ret) { 1039 dev_err(bdev->dev, "cannot set maximum segment size\n"); 1040 goto err_disable_clk; 1041 } 1042 1043 platform_set_drvdata(pdev, bdev); 1044 1045 /* set capabilities */ 1046 dma_cap_zero(bdev->common.cap_mask); 1047 dma_cap_set(DMA_SLAVE, bdev->common.cap_mask); 1048 1049 /* initialize dmaengine apis */ 1050 bdev->common.device_alloc_chan_resources = bam_alloc_chan; 1051 bdev->common.device_free_chan_resources = bam_free_chan; 1052 bdev->common.device_prep_slave_sg = bam_prep_slave_sg; 1053 bdev->common.device_control = bam_control; 1054 bdev->common.device_issue_pending = bam_issue_pending; 1055 bdev->common.device_tx_status = bam_tx_status; 1056 bdev->common.dev = bdev->dev; 1057 1058 ret = dma_async_device_register(&bdev->common); 1059 if (ret) { 1060 dev_err(bdev->dev, "failed to register dma async device\n"); 1061 goto err_disable_clk; 1062 } 1063 1064 ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate, 1065 &bdev->common); 1066 if (ret) 1067 goto err_unregister_dma; 1068 1069 return 0; 1070 1071err_unregister_dma: 1072 dma_async_device_unregister(&bdev->common); 1073err_disable_clk: 1074 clk_disable_unprepare(bdev->bamclk); 1075 return ret; 1076} 1077 1078static int bam_dma_remove(struct platform_device *pdev) 1079{ 1080 struct bam_device *bdev = platform_get_drvdata(pdev); 1081 u32 i; 1082 1083 of_dma_controller_free(pdev->dev.of_node); 1084 dma_async_device_unregister(&bdev->common); 1085 1086 /* mask all interrupts for this execution environment */ 1087 writel_relaxed(0, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); 1088 1089 devm_free_irq(bdev->dev, bdev->irq, bdev); 1090 1091 for (i = 0; i < bdev->num_channels; i++) { 1092 bam_dma_terminate_all(&bdev->channels[i]); 1093 tasklet_kill(&bdev->channels[i].vc.task); 1094 1095 dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, 1096 bdev->channels[i].fifo_virt, 1097 bdev->channels[i].fifo_phys); 1098 } 1099 1100 tasklet_kill(&bdev->task); 1101 1102 clk_disable_unprepare(bdev->bamclk); 1103 1104 return 0; 1105} 1106 1107static const struct of_device_id bam_of_match[] = { 1108 { .compatible = "qcom,bam-v1.4.0", }, 1109 {} 1110}; 1111MODULE_DEVICE_TABLE(of, bam_of_match); 1112 1113static struct platform_driver bam_dma_driver = { 1114 .probe = bam_dma_probe, 1115 .remove = bam_dma_remove, 1116 .driver = { 1117 .name = "bam-dma-engine", 1118 .owner = THIS_MODULE, 1119 .of_match_table = bam_of_match, 1120 }, 1121}; 1122 1123module_platform_driver(bam_dma_driver); 1124 1125MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>"); 1126MODULE_DESCRIPTION("QCOM BAM DMA engine driver"); 1127MODULE_LICENSE("GPL v2");