Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.7-rc4 1727 lines 45 kB view raw
1/* 2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on 3 * AVR32 systems.) 4 * 5 * Copyright (C) 2007-2008 Atmel Corporation 6 * Copyright (C) 2010-2011 ST Microelectronics 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12#include <linux/bitops.h> 13#include <linux/clk.h> 14#include <linux/delay.h> 15#include <linux/dmaengine.h> 16#include <linux/dma-mapping.h> 17#include <linux/init.h> 18#include <linux/interrupt.h> 19#include <linux/io.h> 20#include <linux/of.h> 21#include <linux/mm.h> 22#include <linux/module.h> 23#include <linux/platform_device.h> 24#include <linux/slab.h> 25 26#include "dw_dmac_regs.h" 27#include "dmaengine.h" 28 29/* 30 * This supports the Synopsys "DesignWare AHB Central DMA Controller", 31 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all 32 * of which use ARM any more). See the "Databook" from Synopsys for 33 * information beyond what licensees probably provide. 34 * 35 * The driver has currently been tested only with the Atmel AT32AP7000, 36 * which does not support descriptor writeback. 37 */ 38 39static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave) 40{ 41 return slave ? slave->dst_master : 0; 42} 43 44static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) 45{ 46 return slave ? slave->src_master : 1; 47} 48 49#define DWC_DEFAULT_CTLLO(_chan) ({ \ 50 struct dw_dma_slave *__slave = (_chan->private); \ 51 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ 52 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ 53 int _dms = dwc_get_dms(__slave); \ 54 int _sms = dwc_get_sms(__slave); \ 55 u8 _smsize = __slave ? _sconfig->src_maxburst : \ 56 DW_DMA_MSIZE_16; \ 57 u8 _dmsize = __slave ? _sconfig->dst_maxburst : \ 58 DW_DMA_MSIZE_16; \ 59 \ 60 (DWC_CTLL_DST_MSIZE(_dmsize) \ 61 | DWC_CTLL_SRC_MSIZE(_smsize) \ 62 | DWC_CTLL_LLP_D_EN \ 63 | DWC_CTLL_LLP_S_EN \ 64 | DWC_CTLL_DMS(_dms) \ 65 | DWC_CTLL_SMS(_sms)); \ 66 }) 67 68/* 69 * Number of descriptors to allocate for each channel. This should be 70 * made configurable somehow; preferably, the clients (at least the 71 * ones using slave transfers) should be able to give us a hint. 72 */ 73#define NR_DESCS_PER_CHANNEL 64 74 75/*----------------------------------------------------------------------*/ 76 77/* 78 * Because we're not relying on writeback from the controller (it may not 79 * even be configured into the core!) we don't need to use dma_pool. These 80 * descriptors -- and associated data -- are cacheable. We do need to make 81 * sure their dcache entries are written back before handing them off to 82 * the controller, though. 83 */ 84 85static struct device *chan2dev(struct dma_chan *chan) 86{ 87 return &chan->dev->device; 88} 89static struct device *chan2parent(struct dma_chan *chan) 90{ 91 return chan->dev->device.parent; 92} 93 94static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) 95{ 96 return list_entry(dwc->active_list.next, struct dw_desc, desc_node); 97} 98 99static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) 100{ 101 struct dw_desc *desc, *_desc; 102 struct dw_desc *ret = NULL; 103 unsigned int i = 0; 104 unsigned long flags; 105 106 spin_lock_irqsave(&dwc->lock, flags); 107 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { 108 i++; 109 if (async_tx_test_ack(&desc->txd)) { 110 list_del(&desc->desc_node); 111 ret = desc; 112 break; 113 } 114 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); 115 } 116 spin_unlock_irqrestore(&dwc->lock, flags); 117 118 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); 119 120 return ret; 121} 122 123static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) 124{ 125 struct dw_desc *child; 126 127 list_for_each_entry(child, &desc->tx_list, desc_node) 128 dma_sync_single_for_cpu(chan2parent(&dwc->chan), 129 child->txd.phys, sizeof(child->lli), 130 DMA_TO_DEVICE); 131 dma_sync_single_for_cpu(chan2parent(&dwc->chan), 132 desc->txd.phys, sizeof(desc->lli), 133 DMA_TO_DEVICE); 134} 135 136/* 137 * Move a descriptor, including any children, to the free list. 138 * `desc' must not be on any lists. 139 */ 140static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) 141{ 142 unsigned long flags; 143 144 if (desc) { 145 struct dw_desc *child; 146 147 dwc_sync_desc_for_cpu(dwc, desc); 148 149 spin_lock_irqsave(&dwc->lock, flags); 150 list_for_each_entry(child, &desc->tx_list, desc_node) 151 dev_vdbg(chan2dev(&dwc->chan), 152 "moving child desc %p to freelist\n", 153 child); 154 list_splice_init(&desc->tx_list, &dwc->free_list); 155 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); 156 list_add(&desc->desc_node, &dwc->free_list); 157 spin_unlock_irqrestore(&dwc->lock, flags); 158 } 159} 160 161static void dwc_initialize(struct dw_dma_chan *dwc) 162{ 163 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 164 struct dw_dma_slave *dws = dwc->chan.private; 165 u32 cfghi = DWC_CFGH_FIFO_MODE; 166 u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority); 167 168 if (dwc->initialized == true) 169 return; 170 171 if (dws) { 172 /* 173 * We need controller-specific data to set up slave 174 * transfers. 175 */ 176 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); 177 178 cfghi = dws->cfg_hi; 179 cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; 180 } else { 181 if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) 182 cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id); 183 else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) 184 cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id); 185 } 186 187 channel_writel(dwc, CFG_LO, cfglo); 188 channel_writel(dwc, CFG_HI, cfghi); 189 190 /* Enable interrupts */ 191 channel_set_bit(dw, MASK.XFER, dwc->mask); 192 channel_set_bit(dw, MASK.ERROR, dwc->mask); 193 194 dwc->initialized = true; 195} 196 197/*----------------------------------------------------------------------*/ 198 199static inline unsigned int dwc_fast_fls(unsigned long long v) 200{ 201 /* 202 * We can be a lot more clever here, but this should take care 203 * of the most common optimization. 204 */ 205 if (!(v & 7)) 206 return 3; 207 else if (!(v & 3)) 208 return 2; 209 else if (!(v & 1)) 210 return 1; 211 return 0; 212} 213 214static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) 215{ 216 dev_err(chan2dev(&dwc->chan), 217 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 218 channel_readl(dwc, SAR), 219 channel_readl(dwc, DAR), 220 channel_readl(dwc, LLP), 221 channel_readl(dwc, CTL_HI), 222 channel_readl(dwc, CTL_LO)); 223} 224 225 226static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) 227{ 228 channel_clear_bit(dw, CH_EN, dwc->mask); 229 while (dma_readl(dw, CH_EN) & dwc->mask) 230 cpu_relax(); 231} 232 233/*----------------------------------------------------------------------*/ 234 235/* Perform single block transfer */ 236static inline void dwc_do_single_block(struct dw_dma_chan *dwc, 237 struct dw_desc *desc) 238{ 239 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 240 u32 ctllo; 241 242 /* Software emulation of LLP mode relies on interrupts to continue 243 * multi block transfer. */ 244 ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; 245 246 channel_writel(dwc, SAR, desc->lli.sar); 247 channel_writel(dwc, DAR, desc->lli.dar); 248 channel_writel(dwc, CTL_LO, ctllo); 249 channel_writel(dwc, CTL_HI, desc->lli.ctlhi); 250 channel_set_bit(dw, CH_EN, dwc->mask); 251} 252 253/* Called with dwc->lock held and bh disabled */ 254static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) 255{ 256 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 257 unsigned long was_soft_llp; 258 259 /* ASSERT: channel is idle */ 260 if (dma_readl(dw, CH_EN) & dwc->mask) { 261 dev_err(chan2dev(&dwc->chan), 262 "BUG: Attempted to start non-idle channel\n"); 263 dwc_dump_chan_regs(dwc); 264 265 /* The tasklet will hopefully advance the queue... */ 266 return; 267 } 268 269 if (dwc->nollp) { 270 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, 271 &dwc->flags); 272 if (was_soft_llp) { 273 dev_err(chan2dev(&dwc->chan), 274 "BUG: Attempted to start new LLP transfer " 275 "inside ongoing one\n"); 276 return; 277 } 278 279 dwc_initialize(dwc); 280 281 dwc->tx_list = &first->tx_list; 282 dwc->tx_node_active = first->tx_list.next; 283 284 dwc_do_single_block(dwc, first); 285 286 return; 287 } 288 289 dwc_initialize(dwc); 290 291 channel_writel(dwc, LLP, first->txd.phys); 292 channel_writel(dwc, CTL_LO, 293 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); 294 channel_writel(dwc, CTL_HI, 0); 295 channel_set_bit(dw, CH_EN, dwc->mask); 296} 297 298/*----------------------------------------------------------------------*/ 299 300static void 301dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, 302 bool callback_required) 303{ 304 dma_async_tx_callback callback = NULL; 305 void *param = NULL; 306 struct dma_async_tx_descriptor *txd = &desc->txd; 307 struct dw_desc *child; 308 unsigned long flags; 309 310 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); 311 312 spin_lock_irqsave(&dwc->lock, flags); 313 dma_cookie_complete(txd); 314 if (callback_required) { 315 callback = txd->callback; 316 param = txd->callback_param; 317 } 318 319 dwc_sync_desc_for_cpu(dwc, desc); 320 321 /* async_tx_ack */ 322 list_for_each_entry(child, &desc->tx_list, desc_node) 323 async_tx_ack(&child->txd); 324 async_tx_ack(&desc->txd); 325 326 list_splice_init(&desc->tx_list, &dwc->free_list); 327 list_move(&desc->desc_node, &dwc->free_list); 328 329 if (!dwc->chan.private) { 330 struct device *parent = chan2parent(&dwc->chan); 331 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 332 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) 333 dma_unmap_single(parent, desc->lli.dar, 334 desc->len, DMA_FROM_DEVICE); 335 else 336 dma_unmap_page(parent, desc->lli.dar, 337 desc->len, DMA_FROM_DEVICE); 338 } 339 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 340 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) 341 dma_unmap_single(parent, desc->lli.sar, 342 desc->len, DMA_TO_DEVICE); 343 else 344 dma_unmap_page(parent, desc->lli.sar, 345 desc->len, DMA_TO_DEVICE); 346 } 347 } 348 349 spin_unlock_irqrestore(&dwc->lock, flags); 350 351 if (callback_required && callback) 352 callback(param); 353} 354 355static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) 356{ 357 struct dw_desc *desc, *_desc; 358 LIST_HEAD(list); 359 unsigned long flags; 360 361 spin_lock_irqsave(&dwc->lock, flags); 362 if (dma_readl(dw, CH_EN) & dwc->mask) { 363 dev_err(chan2dev(&dwc->chan), 364 "BUG: XFER bit set, but channel not idle!\n"); 365 366 /* Try to continue after resetting the channel... */ 367 dwc_chan_disable(dw, dwc); 368 } 369 370 /* 371 * Submit queued descriptors ASAP, i.e. before we go through 372 * the completed ones. 373 */ 374 list_splice_init(&dwc->active_list, &list); 375 if (!list_empty(&dwc->queue)) { 376 list_move(dwc->queue.next, &dwc->active_list); 377 dwc_dostart(dwc, dwc_first_active(dwc)); 378 } 379 380 spin_unlock_irqrestore(&dwc->lock, flags); 381 382 list_for_each_entry_safe(desc, _desc, &list, desc_node) 383 dwc_descriptor_complete(dwc, desc, true); 384} 385 386static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) 387{ 388 dma_addr_t llp; 389 struct dw_desc *desc, *_desc; 390 struct dw_desc *child; 391 u32 status_xfer; 392 unsigned long flags; 393 394 spin_lock_irqsave(&dwc->lock, flags); 395 llp = channel_readl(dwc, LLP); 396 status_xfer = dma_readl(dw, RAW.XFER); 397 398 if (status_xfer & dwc->mask) { 399 /* Everything we've submitted is done */ 400 dma_writel(dw, CLEAR.XFER, dwc->mask); 401 spin_unlock_irqrestore(&dwc->lock, flags); 402 403 dwc_complete_all(dw, dwc); 404 return; 405 } 406 407 if (list_empty(&dwc->active_list)) { 408 spin_unlock_irqrestore(&dwc->lock, flags); 409 return; 410 } 411 412 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, 413 (unsigned long long)llp); 414 415 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { 416 /* check first descriptors addr */ 417 if (desc->txd.phys == llp) { 418 spin_unlock_irqrestore(&dwc->lock, flags); 419 return; 420 } 421 422 /* check first descriptors llp */ 423 if (desc->lli.llp == llp) { 424 /* This one is currently in progress */ 425 spin_unlock_irqrestore(&dwc->lock, flags); 426 return; 427 } 428 429 list_for_each_entry(child, &desc->tx_list, desc_node) 430 if (child->lli.llp == llp) { 431 /* Currently in progress */ 432 spin_unlock_irqrestore(&dwc->lock, flags); 433 return; 434 } 435 436 /* 437 * No descriptors so far seem to be in progress, i.e. 438 * this one must be done. 439 */ 440 spin_unlock_irqrestore(&dwc->lock, flags); 441 dwc_descriptor_complete(dwc, desc, true); 442 spin_lock_irqsave(&dwc->lock, flags); 443 } 444 445 dev_err(chan2dev(&dwc->chan), 446 "BUG: All descriptors done, but channel not idle!\n"); 447 448 /* Try to continue after resetting the channel... */ 449 dwc_chan_disable(dw, dwc); 450 451 if (!list_empty(&dwc->queue)) { 452 list_move(dwc->queue.next, &dwc->active_list); 453 dwc_dostart(dwc, dwc_first_active(dwc)); 454 } 455 spin_unlock_irqrestore(&dwc->lock, flags); 456} 457 458static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) 459{ 460 dev_printk(KERN_CRIT, chan2dev(&dwc->chan), 461 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", 462 lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); 463} 464 465static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) 466{ 467 struct dw_desc *bad_desc; 468 struct dw_desc *child; 469 unsigned long flags; 470 471 dwc_scan_descriptors(dw, dwc); 472 473 spin_lock_irqsave(&dwc->lock, flags); 474 475 /* 476 * The descriptor currently at the head of the active list is 477 * borked. Since we don't have any way to report errors, we'll 478 * just have to scream loudly and try to carry on. 479 */ 480 bad_desc = dwc_first_active(dwc); 481 list_del_init(&bad_desc->desc_node); 482 list_move(dwc->queue.next, dwc->active_list.prev); 483 484 /* Clear the error flag and try to restart the controller */ 485 dma_writel(dw, CLEAR.ERROR, dwc->mask); 486 if (!list_empty(&dwc->active_list)) 487 dwc_dostart(dwc, dwc_first_active(dwc)); 488 489 /* 490 * KERN_CRITICAL may seem harsh, but since this only happens 491 * when someone submits a bad physical address in a 492 * descriptor, we should consider ourselves lucky that the 493 * controller flagged an error instead of scribbling over 494 * random memory locations. 495 */ 496 dev_printk(KERN_CRIT, chan2dev(&dwc->chan), 497 "Bad descriptor submitted for DMA!\n"); 498 dev_printk(KERN_CRIT, chan2dev(&dwc->chan), 499 " cookie: %d\n", bad_desc->txd.cookie); 500 dwc_dump_lli(dwc, &bad_desc->lli); 501 list_for_each_entry(child, &bad_desc->tx_list, desc_node) 502 dwc_dump_lli(dwc, &child->lli); 503 504 spin_unlock_irqrestore(&dwc->lock, flags); 505 506 /* Pretend the descriptor completed successfully */ 507 dwc_descriptor_complete(dwc, bad_desc, true); 508} 509 510/* --------------------- Cyclic DMA API extensions -------------------- */ 511 512inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) 513{ 514 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 515 return channel_readl(dwc, SAR); 516} 517EXPORT_SYMBOL(dw_dma_get_src_addr); 518 519inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) 520{ 521 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 522 return channel_readl(dwc, DAR); 523} 524EXPORT_SYMBOL(dw_dma_get_dst_addr); 525 526/* called with dwc->lock held and all DMAC interrupts disabled */ 527static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, 528 u32 status_err, u32 status_xfer) 529{ 530 unsigned long flags; 531 532 if (dwc->mask) { 533 void (*callback)(void *param); 534 void *callback_param; 535 536 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", 537 channel_readl(dwc, LLP)); 538 539 callback = dwc->cdesc->period_callback; 540 callback_param = dwc->cdesc->period_callback_param; 541 542 if (callback) 543 callback(callback_param); 544 } 545 546 /* 547 * Error and transfer complete are highly unlikely, and will most 548 * likely be due to a configuration error by the user. 549 */ 550 if (unlikely(status_err & dwc->mask) || 551 unlikely(status_xfer & dwc->mask)) { 552 int i; 553 554 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " 555 "interrupt, stopping DMA transfer\n", 556 status_xfer ? "xfer" : "error"); 557 558 spin_lock_irqsave(&dwc->lock, flags); 559 560 dwc_dump_chan_regs(dwc); 561 562 dwc_chan_disable(dw, dwc); 563 564 /* make sure DMA does not restart by loading a new list */ 565 channel_writel(dwc, LLP, 0); 566 channel_writel(dwc, CTL_LO, 0); 567 channel_writel(dwc, CTL_HI, 0); 568 569 dma_writel(dw, CLEAR.ERROR, dwc->mask); 570 dma_writel(dw, CLEAR.XFER, dwc->mask); 571 572 for (i = 0; i < dwc->cdesc->periods; i++) 573 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); 574 575 spin_unlock_irqrestore(&dwc->lock, flags); 576 } 577} 578 579/* ------------------------------------------------------------------------- */ 580 581static void dw_dma_tasklet(unsigned long data) 582{ 583 struct dw_dma *dw = (struct dw_dma *)data; 584 struct dw_dma_chan *dwc; 585 u32 status_xfer; 586 u32 status_err; 587 int i; 588 589 status_xfer = dma_readl(dw, RAW.XFER); 590 status_err = dma_readl(dw, RAW.ERROR); 591 592 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); 593 594 for (i = 0; i < dw->dma.chancnt; i++) { 595 dwc = &dw->chan[i]; 596 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) 597 dwc_handle_cyclic(dw, dwc, status_err, status_xfer); 598 else if (status_err & (1 << i)) 599 dwc_handle_error(dw, dwc); 600 else if (status_xfer & (1 << i)) { 601 unsigned long flags; 602 603 spin_lock_irqsave(&dwc->lock, flags); 604 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { 605 if (dwc->tx_node_active != dwc->tx_list) { 606 struct dw_desc *desc = 607 list_entry(dwc->tx_node_active, 608 struct dw_desc, 609 desc_node); 610 611 dma_writel(dw, CLEAR.XFER, dwc->mask); 612 613 /* move pointer to next descriptor */ 614 dwc->tx_node_active = 615 dwc->tx_node_active->next; 616 617 dwc_do_single_block(dwc, desc); 618 619 spin_unlock_irqrestore(&dwc->lock, flags); 620 continue; 621 } else { 622 /* we are done here */ 623 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); 624 } 625 } 626 spin_unlock_irqrestore(&dwc->lock, flags); 627 628 dwc_scan_descriptors(dw, dwc); 629 } 630 } 631 632 /* 633 * Re-enable interrupts. 634 */ 635 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); 636 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); 637} 638 639static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) 640{ 641 struct dw_dma *dw = dev_id; 642 u32 status; 643 644 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, 645 dma_readl(dw, STATUS_INT)); 646 647 /* 648 * Just disable the interrupts. We'll turn them back on in the 649 * softirq handler. 650 */ 651 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 652 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 653 654 status = dma_readl(dw, STATUS_INT); 655 if (status) { 656 dev_err(dw->dma.dev, 657 "BUG: Unexpected interrupts pending: 0x%x\n", 658 status); 659 660 /* Try to recover */ 661 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); 662 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); 663 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); 664 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); 665 } 666 667 tasklet_schedule(&dw->tasklet); 668 669 return IRQ_HANDLED; 670} 671 672/*----------------------------------------------------------------------*/ 673 674static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) 675{ 676 struct dw_desc *desc = txd_to_dw_desc(tx); 677 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); 678 dma_cookie_t cookie; 679 unsigned long flags; 680 681 spin_lock_irqsave(&dwc->lock, flags); 682 cookie = dma_cookie_assign(tx); 683 684 /* 685 * REVISIT: We should attempt to chain as many descriptors as 686 * possible, perhaps even appending to those already submitted 687 * for DMA. But this is hard to do in a race-free manner. 688 */ 689 if (list_empty(&dwc->active_list)) { 690 dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__, 691 desc->txd.cookie); 692 list_add_tail(&desc->desc_node, &dwc->active_list); 693 dwc_dostart(dwc, dwc_first_active(dwc)); 694 } else { 695 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, 696 desc->txd.cookie); 697 698 list_add_tail(&desc->desc_node, &dwc->queue); 699 } 700 701 spin_unlock_irqrestore(&dwc->lock, flags); 702 703 return cookie; 704} 705 706static struct dma_async_tx_descriptor * 707dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 708 size_t len, unsigned long flags) 709{ 710 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 711 struct dw_dma_slave *dws = chan->private; 712 struct dw_desc *desc; 713 struct dw_desc *first; 714 struct dw_desc *prev; 715 size_t xfer_count; 716 size_t offset; 717 unsigned int src_width; 718 unsigned int dst_width; 719 unsigned int data_width; 720 u32 ctllo; 721 722 dev_vdbg(chan2dev(chan), 723 "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__, 724 (unsigned long long)dest, (unsigned long long)src, 725 len, flags); 726 727 if (unlikely(!len)) { 728 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); 729 return NULL; 730 } 731 732 data_width = min_t(unsigned int, dwc->dw->data_width[dwc_get_sms(dws)], 733 dwc->dw->data_width[dwc_get_dms(dws)]); 734 735 src_width = dst_width = min_t(unsigned int, data_width, 736 dwc_fast_fls(src | dest | len)); 737 738 ctllo = DWC_DEFAULT_CTLLO(chan) 739 | DWC_CTLL_DST_WIDTH(dst_width) 740 | DWC_CTLL_SRC_WIDTH(src_width) 741 | DWC_CTLL_DST_INC 742 | DWC_CTLL_SRC_INC 743 | DWC_CTLL_FC_M2M; 744 prev = first = NULL; 745 746 for (offset = 0; offset < len; offset += xfer_count << src_width) { 747 xfer_count = min_t(size_t, (len - offset) >> src_width, 748 dwc->block_size); 749 750 desc = dwc_desc_get(dwc); 751 if (!desc) 752 goto err_desc_get; 753 754 desc->lli.sar = src + offset; 755 desc->lli.dar = dest + offset; 756 desc->lli.ctllo = ctllo; 757 desc->lli.ctlhi = xfer_count; 758 759 if (!first) { 760 first = desc; 761 } else { 762 prev->lli.llp = desc->txd.phys; 763 dma_sync_single_for_device(chan2parent(chan), 764 prev->txd.phys, sizeof(prev->lli), 765 DMA_TO_DEVICE); 766 list_add_tail(&desc->desc_node, 767 &first->tx_list); 768 } 769 prev = desc; 770 } 771 772 773 if (flags & DMA_PREP_INTERRUPT) 774 /* Trigger interrupt after last block */ 775 prev->lli.ctllo |= DWC_CTLL_INT_EN; 776 777 prev->lli.llp = 0; 778 dma_sync_single_for_device(chan2parent(chan), 779 prev->txd.phys, sizeof(prev->lli), 780 DMA_TO_DEVICE); 781 782 first->txd.flags = flags; 783 first->len = len; 784 785 return &first->txd; 786 787err_desc_get: 788 dwc_desc_put(dwc, first); 789 return NULL; 790} 791 792static struct dma_async_tx_descriptor * 793dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 794 unsigned int sg_len, enum dma_transfer_direction direction, 795 unsigned long flags, void *context) 796{ 797 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 798 struct dw_dma_slave *dws = chan->private; 799 struct dma_slave_config *sconfig = &dwc->dma_sconfig; 800 struct dw_desc *prev; 801 struct dw_desc *first; 802 u32 ctllo; 803 dma_addr_t reg; 804 unsigned int reg_width; 805 unsigned int mem_width; 806 unsigned int data_width; 807 unsigned int i; 808 struct scatterlist *sg; 809 size_t total_len = 0; 810 811 dev_vdbg(chan2dev(chan), "%s\n", __func__); 812 813 if (unlikely(!dws || !sg_len)) 814 return NULL; 815 816 prev = first = NULL; 817 818 switch (direction) { 819 case DMA_MEM_TO_DEV: 820 reg_width = __fls(sconfig->dst_addr_width); 821 reg = sconfig->dst_addr; 822 ctllo = (DWC_DEFAULT_CTLLO(chan) 823 | DWC_CTLL_DST_WIDTH(reg_width) 824 | DWC_CTLL_DST_FIX 825 | DWC_CTLL_SRC_INC); 826 827 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : 828 DWC_CTLL_FC(DW_DMA_FC_D_M2P); 829 830 data_width = dwc->dw->data_width[dwc_get_sms(dws)]; 831 832 for_each_sg(sgl, sg, sg_len, i) { 833 struct dw_desc *desc; 834 u32 len, dlen, mem; 835 836 mem = sg_dma_address(sg); 837 len = sg_dma_len(sg); 838 839 mem_width = min_t(unsigned int, 840 data_width, dwc_fast_fls(mem | len)); 841 842slave_sg_todev_fill_desc: 843 desc = dwc_desc_get(dwc); 844 if (!desc) { 845 dev_err(chan2dev(chan), 846 "not enough descriptors available\n"); 847 goto err_desc_get; 848 } 849 850 desc->lli.sar = mem; 851 desc->lli.dar = reg; 852 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); 853 if ((len >> mem_width) > dwc->block_size) { 854 dlen = dwc->block_size << mem_width; 855 mem += dlen; 856 len -= dlen; 857 } else { 858 dlen = len; 859 len = 0; 860 } 861 862 desc->lli.ctlhi = dlen >> mem_width; 863 864 if (!first) { 865 first = desc; 866 } else { 867 prev->lli.llp = desc->txd.phys; 868 dma_sync_single_for_device(chan2parent(chan), 869 prev->txd.phys, 870 sizeof(prev->lli), 871 DMA_TO_DEVICE); 872 list_add_tail(&desc->desc_node, 873 &first->tx_list); 874 } 875 prev = desc; 876 total_len += dlen; 877 878 if (len) 879 goto slave_sg_todev_fill_desc; 880 } 881 break; 882 case DMA_DEV_TO_MEM: 883 reg_width = __fls(sconfig->src_addr_width); 884 reg = sconfig->src_addr; 885 ctllo = (DWC_DEFAULT_CTLLO(chan) 886 | DWC_CTLL_SRC_WIDTH(reg_width) 887 | DWC_CTLL_DST_INC 888 | DWC_CTLL_SRC_FIX); 889 890 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : 891 DWC_CTLL_FC(DW_DMA_FC_D_P2M); 892 893 data_width = dwc->dw->data_width[dwc_get_dms(dws)]; 894 895 for_each_sg(sgl, sg, sg_len, i) { 896 struct dw_desc *desc; 897 u32 len, dlen, mem; 898 899 mem = sg_dma_address(sg); 900 len = sg_dma_len(sg); 901 902 mem_width = min_t(unsigned int, 903 data_width, dwc_fast_fls(mem | len)); 904 905slave_sg_fromdev_fill_desc: 906 desc = dwc_desc_get(dwc); 907 if (!desc) { 908 dev_err(chan2dev(chan), 909 "not enough descriptors available\n"); 910 goto err_desc_get; 911 } 912 913 desc->lli.sar = reg; 914 desc->lli.dar = mem; 915 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); 916 if ((len >> reg_width) > dwc->block_size) { 917 dlen = dwc->block_size << reg_width; 918 mem += dlen; 919 len -= dlen; 920 } else { 921 dlen = len; 922 len = 0; 923 } 924 desc->lli.ctlhi = dlen >> reg_width; 925 926 if (!first) { 927 first = desc; 928 } else { 929 prev->lli.llp = desc->txd.phys; 930 dma_sync_single_for_device(chan2parent(chan), 931 prev->txd.phys, 932 sizeof(prev->lli), 933 DMA_TO_DEVICE); 934 list_add_tail(&desc->desc_node, 935 &first->tx_list); 936 } 937 prev = desc; 938 total_len += dlen; 939 940 if (len) 941 goto slave_sg_fromdev_fill_desc; 942 } 943 break; 944 default: 945 return NULL; 946 } 947 948 if (flags & DMA_PREP_INTERRUPT) 949 /* Trigger interrupt after last block */ 950 prev->lli.ctllo |= DWC_CTLL_INT_EN; 951 952 prev->lli.llp = 0; 953 dma_sync_single_for_device(chan2parent(chan), 954 prev->txd.phys, sizeof(prev->lli), 955 DMA_TO_DEVICE); 956 957 first->len = total_len; 958 959 return &first->txd; 960 961err_desc_get: 962 dwc_desc_put(dwc, first); 963 return NULL; 964} 965 966/* 967 * Fix sconfig's burst size according to dw_dmac. We need to convert them as: 968 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. 969 * 970 * NOTE: burst size 2 is not supported by controller. 971 * 972 * This can be done by finding least significant bit set: n & (n - 1) 973 */ 974static inline void convert_burst(u32 *maxburst) 975{ 976 if (*maxburst > 1) 977 *maxburst = fls(*maxburst) - 2; 978 else 979 *maxburst = 0; 980} 981 982static int 983set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) 984{ 985 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 986 987 /* Check if it is chan is configured for slave transfers */ 988 if (!chan->private) 989 return -EINVAL; 990 991 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); 992 993 convert_burst(&dwc->dma_sconfig.src_maxburst); 994 convert_burst(&dwc->dma_sconfig.dst_maxburst); 995 996 return 0; 997} 998 999static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1000 unsigned long arg) 1001{ 1002 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1003 struct dw_dma *dw = to_dw_dma(chan->device); 1004 struct dw_desc *desc, *_desc; 1005 unsigned long flags; 1006 u32 cfglo; 1007 LIST_HEAD(list); 1008 1009 if (cmd == DMA_PAUSE) { 1010 spin_lock_irqsave(&dwc->lock, flags); 1011 1012 cfglo = channel_readl(dwc, CFG_LO); 1013 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); 1014 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) 1015 cpu_relax(); 1016 1017 dwc->paused = true; 1018 spin_unlock_irqrestore(&dwc->lock, flags); 1019 } else if (cmd == DMA_RESUME) { 1020 if (!dwc->paused) 1021 return 0; 1022 1023 spin_lock_irqsave(&dwc->lock, flags); 1024 1025 cfglo = channel_readl(dwc, CFG_LO); 1026 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP); 1027 dwc->paused = false; 1028 1029 spin_unlock_irqrestore(&dwc->lock, flags); 1030 } else if (cmd == DMA_TERMINATE_ALL) { 1031 spin_lock_irqsave(&dwc->lock, flags); 1032 1033 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); 1034 1035 dwc_chan_disable(dw, dwc); 1036 1037 dwc->paused = false; 1038 1039 /* active_list entries will end up before queued entries */ 1040 list_splice_init(&dwc->queue, &list); 1041 list_splice_init(&dwc->active_list, &list); 1042 1043 spin_unlock_irqrestore(&dwc->lock, flags); 1044 1045 /* Flush all pending and queued descriptors */ 1046 list_for_each_entry_safe(desc, _desc, &list, desc_node) 1047 dwc_descriptor_complete(dwc, desc, false); 1048 } else if (cmd == DMA_SLAVE_CONFIG) { 1049 return set_runtime_config(chan, (struct dma_slave_config *)arg); 1050 } else { 1051 return -ENXIO; 1052 } 1053 1054 return 0; 1055} 1056 1057static enum dma_status 1058dwc_tx_status(struct dma_chan *chan, 1059 dma_cookie_t cookie, 1060 struct dma_tx_state *txstate) 1061{ 1062 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1063 enum dma_status ret; 1064 1065 ret = dma_cookie_status(chan, cookie, txstate); 1066 if (ret != DMA_SUCCESS) { 1067 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 1068 1069 ret = dma_cookie_status(chan, cookie, txstate); 1070 } 1071 1072 if (ret != DMA_SUCCESS) 1073 dma_set_residue(txstate, dwc_first_active(dwc)->len); 1074 1075 if (dwc->paused) 1076 return DMA_PAUSED; 1077 1078 return ret; 1079} 1080 1081static void dwc_issue_pending(struct dma_chan *chan) 1082{ 1083 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1084 1085 if (!list_empty(&dwc->queue)) 1086 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 1087} 1088 1089static int dwc_alloc_chan_resources(struct dma_chan *chan) 1090{ 1091 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1092 struct dw_dma *dw = to_dw_dma(chan->device); 1093 struct dw_desc *desc; 1094 int i; 1095 unsigned long flags; 1096 1097 dev_vdbg(chan2dev(chan), "%s\n", __func__); 1098 1099 /* ASSERT: channel is idle */ 1100 if (dma_readl(dw, CH_EN) & dwc->mask) { 1101 dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); 1102 return -EIO; 1103 } 1104 1105 dma_cookie_init(chan); 1106 1107 /* 1108 * NOTE: some controllers may have additional features that we 1109 * need to initialize here, like "scatter-gather" (which 1110 * doesn't mean what you think it means), and status writeback. 1111 */ 1112 1113 spin_lock_irqsave(&dwc->lock, flags); 1114 i = dwc->descs_allocated; 1115 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { 1116 spin_unlock_irqrestore(&dwc->lock, flags); 1117 1118 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); 1119 if (!desc) { 1120 dev_info(chan2dev(chan), 1121 "only allocated %d descriptors\n", i); 1122 spin_lock_irqsave(&dwc->lock, flags); 1123 break; 1124 } 1125 1126 INIT_LIST_HEAD(&desc->tx_list); 1127 dma_async_tx_descriptor_init(&desc->txd, chan); 1128 desc->txd.tx_submit = dwc_tx_submit; 1129 desc->txd.flags = DMA_CTRL_ACK; 1130 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, 1131 sizeof(desc->lli), DMA_TO_DEVICE); 1132 dwc_desc_put(dwc, desc); 1133 1134 spin_lock_irqsave(&dwc->lock, flags); 1135 i = ++dwc->descs_allocated; 1136 } 1137 1138 spin_unlock_irqrestore(&dwc->lock, flags); 1139 1140 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); 1141 1142 return i; 1143} 1144 1145static void dwc_free_chan_resources(struct dma_chan *chan) 1146{ 1147 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1148 struct dw_dma *dw = to_dw_dma(chan->device); 1149 struct dw_desc *desc, *_desc; 1150 unsigned long flags; 1151 LIST_HEAD(list); 1152 1153 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, 1154 dwc->descs_allocated); 1155 1156 /* ASSERT: channel is idle */ 1157 BUG_ON(!list_empty(&dwc->active_list)); 1158 BUG_ON(!list_empty(&dwc->queue)); 1159 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); 1160 1161 spin_lock_irqsave(&dwc->lock, flags); 1162 list_splice_init(&dwc->free_list, &list); 1163 dwc->descs_allocated = 0; 1164 dwc->initialized = false; 1165 1166 /* Disable interrupts */ 1167 channel_clear_bit(dw, MASK.XFER, dwc->mask); 1168 channel_clear_bit(dw, MASK.ERROR, dwc->mask); 1169 1170 spin_unlock_irqrestore(&dwc->lock, flags); 1171 1172 list_for_each_entry_safe(desc, _desc, &list, desc_node) { 1173 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 1174 dma_unmap_single(chan2parent(chan), desc->txd.phys, 1175 sizeof(desc->lli), DMA_TO_DEVICE); 1176 kfree(desc); 1177 } 1178 1179 dev_vdbg(chan2dev(chan), "%s: done\n", __func__); 1180} 1181 1182/* --------------------- Cyclic DMA API extensions -------------------- */ 1183 1184/** 1185 * dw_dma_cyclic_start - start the cyclic DMA transfer 1186 * @chan: the DMA channel to start 1187 * 1188 * Must be called with soft interrupts disabled. Returns zero on success or 1189 * -errno on failure. 1190 */ 1191int dw_dma_cyclic_start(struct dma_chan *chan) 1192{ 1193 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1194 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1195 unsigned long flags; 1196 1197 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { 1198 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); 1199 return -ENODEV; 1200 } 1201 1202 spin_lock_irqsave(&dwc->lock, flags); 1203 1204 /* assert channel is idle */ 1205 if (dma_readl(dw, CH_EN) & dwc->mask) { 1206 dev_err(chan2dev(&dwc->chan), 1207 "BUG: Attempted to start non-idle channel\n"); 1208 dwc_dump_chan_regs(dwc); 1209 spin_unlock_irqrestore(&dwc->lock, flags); 1210 return -EBUSY; 1211 } 1212 1213 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1214 dma_writel(dw, CLEAR.XFER, dwc->mask); 1215 1216 /* setup DMAC channel registers */ 1217 channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); 1218 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); 1219 channel_writel(dwc, CTL_HI, 0); 1220 1221 channel_set_bit(dw, CH_EN, dwc->mask); 1222 1223 spin_unlock_irqrestore(&dwc->lock, flags); 1224 1225 return 0; 1226} 1227EXPORT_SYMBOL(dw_dma_cyclic_start); 1228 1229/** 1230 * dw_dma_cyclic_stop - stop the cyclic DMA transfer 1231 * @chan: the DMA channel to stop 1232 * 1233 * Must be called with soft interrupts disabled. 1234 */ 1235void dw_dma_cyclic_stop(struct dma_chan *chan) 1236{ 1237 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1238 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1239 unsigned long flags; 1240 1241 spin_lock_irqsave(&dwc->lock, flags); 1242 1243 dwc_chan_disable(dw, dwc); 1244 1245 spin_unlock_irqrestore(&dwc->lock, flags); 1246} 1247EXPORT_SYMBOL(dw_dma_cyclic_stop); 1248 1249/** 1250 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer 1251 * @chan: the DMA channel to prepare 1252 * @buf_addr: physical DMA address where the buffer starts 1253 * @buf_len: total number of bytes for the entire buffer 1254 * @period_len: number of bytes for each period 1255 * @direction: transfer direction, to or from device 1256 * 1257 * Must be called before trying to start the transfer. Returns a valid struct 1258 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. 1259 */ 1260struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, 1261 dma_addr_t buf_addr, size_t buf_len, size_t period_len, 1262 enum dma_transfer_direction direction) 1263{ 1264 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1265 struct dma_slave_config *sconfig = &dwc->dma_sconfig; 1266 struct dw_cyclic_desc *cdesc; 1267 struct dw_cyclic_desc *retval = NULL; 1268 struct dw_desc *desc; 1269 struct dw_desc *last = NULL; 1270 unsigned long was_cyclic; 1271 unsigned int reg_width; 1272 unsigned int periods; 1273 unsigned int i; 1274 unsigned long flags; 1275 1276 spin_lock_irqsave(&dwc->lock, flags); 1277 if (dwc->nollp) { 1278 spin_unlock_irqrestore(&dwc->lock, flags); 1279 dev_dbg(chan2dev(&dwc->chan), 1280 "channel doesn't support LLP transfers\n"); 1281 return ERR_PTR(-EINVAL); 1282 } 1283 1284 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { 1285 spin_unlock_irqrestore(&dwc->lock, flags); 1286 dev_dbg(chan2dev(&dwc->chan), 1287 "queue and/or active list are not empty\n"); 1288 return ERR_PTR(-EBUSY); 1289 } 1290 1291 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1292 spin_unlock_irqrestore(&dwc->lock, flags); 1293 if (was_cyclic) { 1294 dev_dbg(chan2dev(&dwc->chan), 1295 "channel already prepared for cyclic DMA\n"); 1296 return ERR_PTR(-EBUSY); 1297 } 1298 1299 retval = ERR_PTR(-EINVAL); 1300 1301 if (direction == DMA_MEM_TO_DEV) 1302 reg_width = __ffs(sconfig->dst_addr_width); 1303 else 1304 reg_width = __ffs(sconfig->src_addr_width); 1305 1306 periods = buf_len / period_len; 1307 1308 /* Check for too big/unaligned periods and unaligned DMA buffer. */ 1309 if (period_len > (dwc->block_size << reg_width)) 1310 goto out_err; 1311 if (unlikely(period_len & ((1 << reg_width) - 1))) 1312 goto out_err; 1313 if (unlikely(buf_addr & ((1 << reg_width) - 1))) 1314 goto out_err; 1315 if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM)))) 1316 goto out_err; 1317 1318 retval = ERR_PTR(-ENOMEM); 1319 1320 if (periods > NR_DESCS_PER_CHANNEL) 1321 goto out_err; 1322 1323 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); 1324 if (!cdesc) 1325 goto out_err; 1326 1327 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); 1328 if (!cdesc->desc) 1329 goto out_err_alloc; 1330 1331 for (i = 0; i < periods; i++) { 1332 desc = dwc_desc_get(dwc); 1333 if (!desc) 1334 goto out_err_desc_get; 1335 1336 switch (direction) { 1337 case DMA_MEM_TO_DEV: 1338 desc->lli.dar = sconfig->dst_addr; 1339 desc->lli.sar = buf_addr + (period_len * i); 1340 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) 1341 | DWC_CTLL_DST_WIDTH(reg_width) 1342 | DWC_CTLL_SRC_WIDTH(reg_width) 1343 | DWC_CTLL_DST_FIX 1344 | DWC_CTLL_SRC_INC 1345 | DWC_CTLL_INT_EN); 1346 1347 desc->lli.ctllo |= sconfig->device_fc ? 1348 DWC_CTLL_FC(DW_DMA_FC_P_M2P) : 1349 DWC_CTLL_FC(DW_DMA_FC_D_M2P); 1350 1351 break; 1352 case DMA_DEV_TO_MEM: 1353 desc->lli.dar = buf_addr + (period_len * i); 1354 desc->lli.sar = sconfig->src_addr; 1355 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) 1356 | DWC_CTLL_SRC_WIDTH(reg_width) 1357 | DWC_CTLL_DST_WIDTH(reg_width) 1358 | DWC_CTLL_DST_INC 1359 | DWC_CTLL_SRC_FIX 1360 | DWC_CTLL_INT_EN); 1361 1362 desc->lli.ctllo |= sconfig->device_fc ? 1363 DWC_CTLL_FC(DW_DMA_FC_P_P2M) : 1364 DWC_CTLL_FC(DW_DMA_FC_D_P2M); 1365 1366 break; 1367 default: 1368 break; 1369 } 1370 1371 desc->lli.ctlhi = (period_len >> reg_width); 1372 cdesc->desc[i] = desc; 1373 1374 if (last) { 1375 last->lli.llp = desc->txd.phys; 1376 dma_sync_single_for_device(chan2parent(chan), 1377 last->txd.phys, sizeof(last->lli), 1378 DMA_TO_DEVICE); 1379 } 1380 1381 last = desc; 1382 } 1383 1384 /* lets make a cyclic list */ 1385 last->lli.llp = cdesc->desc[0]->txd.phys; 1386 dma_sync_single_for_device(chan2parent(chan), last->txd.phys, 1387 sizeof(last->lli), DMA_TO_DEVICE); 1388 1389 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " 1390 "period %zu periods %d\n", (unsigned long long)buf_addr, 1391 buf_len, period_len, periods); 1392 1393 cdesc->periods = periods; 1394 dwc->cdesc = cdesc; 1395 1396 return cdesc; 1397 1398out_err_desc_get: 1399 while (i--) 1400 dwc_desc_put(dwc, cdesc->desc[i]); 1401out_err_alloc: 1402 kfree(cdesc); 1403out_err: 1404 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1405 return (struct dw_cyclic_desc *)retval; 1406} 1407EXPORT_SYMBOL(dw_dma_cyclic_prep); 1408 1409/** 1410 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer 1411 * @chan: the DMA channel to free 1412 */ 1413void dw_dma_cyclic_free(struct dma_chan *chan) 1414{ 1415 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1416 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1417 struct dw_cyclic_desc *cdesc = dwc->cdesc; 1418 int i; 1419 unsigned long flags; 1420 1421 dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); 1422 1423 if (!cdesc) 1424 return; 1425 1426 spin_lock_irqsave(&dwc->lock, flags); 1427 1428 dwc_chan_disable(dw, dwc); 1429 1430 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1431 dma_writel(dw, CLEAR.XFER, dwc->mask); 1432 1433 spin_unlock_irqrestore(&dwc->lock, flags); 1434 1435 for (i = 0; i < cdesc->periods; i++) 1436 dwc_desc_put(dwc, cdesc->desc[i]); 1437 1438 kfree(cdesc->desc); 1439 kfree(cdesc); 1440 1441 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1442} 1443EXPORT_SYMBOL(dw_dma_cyclic_free); 1444 1445/*----------------------------------------------------------------------*/ 1446 1447static void dw_dma_off(struct dw_dma *dw) 1448{ 1449 int i; 1450 1451 dma_writel(dw, CFG, 0); 1452 1453 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 1454 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); 1455 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); 1456 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 1457 1458 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) 1459 cpu_relax(); 1460 1461 for (i = 0; i < dw->dma.chancnt; i++) 1462 dw->chan[i].initialized = false; 1463} 1464 1465static int __devinit dw_probe(struct platform_device *pdev) 1466{ 1467 struct dw_dma_platform_data *pdata; 1468 struct resource *io; 1469 struct dw_dma *dw; 1470 size_t size; 1471 void __iomem *regs; 1472 bool autocfg; 1473 unsigned int dw_params; 1474 unsigned int nr_channels; 1475 unsigned int max_blk_size = 0; 1476 int irq; 1477 int err; 1478 int i; 1479 1480 pdata = dev_get_platdata(&pdev->dev); 1481 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) 1482 return -EINVAL; 1483 1484 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1485 if (!io) 1486 return -EINVAL; 1487 1488 irq = platform_get_irq(pdev, 0); 1489 if (irq < 0) 1490 return irq; 1491 1492 regs = devm_request_and_ioremap(&pdev->dev, io); 1493 if (!regs) 1494 return -EBUSY; 1495 1496 dw_params = dma_read_byaddr(regs, DW_PARAMS); 1497 autocfg = dw_params >> DW_PARAMS_EN & 0x1; 1498 1499 if (autocfg) 1500 nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; 1501 else 1502 nr_channels = pdata->nr_channels; 1503 1504 size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan); 1505 dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); 1506 if (!dw) 1507 return -ENOMEM; 1508 1509 dw->clk = devm_clk_get(&pdev->dev, "hclk"); 1510 if (IS_ERR(dw->clk)) 1511 return PTR_ERR(dw->clk); 1512 clk_prepare_enable(dw->clk); 1513 1514 dw->regs = regs; 1515 1516 /* get hardware configuration parameters */ 1517 if (autocfg) { 1518 max_blk_size = dma_readl(dw, MAX_BLK_SIZE); 1519 1520 dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; 1521 for (i = 0; i < dw->nr_masters; i++) { 1522 dw->data_width[i] = 1523 (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2; 1524 } 1525 } else { 1526 dw->nr_masters = pdata->nr_masters; 1527 memcpy(dw->data_width, pdata->data_width, 4); 1528 } 1529 1530 /* Calculate all channel mask before DMA setup */ 1531 dw->all_chan_mask = (1 << nr_channels) - 1; 1532 1533 /* force dma off, just in case */ 1534 dw_dma_off(dw); 1535 1536 /* disable BLOCK interrupts as well */ 1537 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 1538 1539 err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0, 1540 "dw_dmac", dw); 1541 if (err) 1542 return err; 1543 1544 platform_set_drvdata(pdev, dw); 1545 1546 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); 1547 1548 INIT_LIST_HEAD(&dw->dma.channels); 1549 for (i = 0; i < nr_channels; i++) { 1550 struct dw_dma_chan *dwc = &dw->chan[i]; 1551 int r = nr_channels - i - 1; 1552 1553 dwc->chan.device = &dw->dma; 1554 dma_cookie_init(&dwc->chan); 1555 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) 1556 list_add_tail(&dwc->chan.device_node, 1557 &dw->dma.channels); 1558 else 1559 list_add(&dwc->chan.device_node, &dw->dma.channels); 1560 1561 /* 7 is highest priority & 0 is lowest. */ 1562 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) 1563 dwc->priority = r; 1564 else 1565 dwc->priority = i; 1566 1567 dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; 1568 spin_lock_init(&dwc->lock); 1569 dwc->mask = 1 << i; 1570 1571 INIT_LIST_HEAD(&dwc->active_list); 1572 INIT_LIST_HEAD(&dwc->queue); 1573 INIT_LIST_HEAD(&dwc->free_list); 1574 1575 channel_clear_bit(dw, CH_EN, dwc->mask); 1576 1577 dwc->dw = dw; 1578 1579 /* hardware configuration */ 1580 if (autocfg) { 1581 unsigned int dwc_params; 1582 1583 dwc_params = dma_read_byaddr(regs + r * sizeof(u32), 1584 DWC_PARAMS); 1585 1586 /* Decode maximum block size for given channel. The 1587 * stored 4 bit value represents blocks from 0x00 for 3 1588 * up to 0x0a for 4095. */ 1589 dwc->block_size = 1590 (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; 1591 dwc->nollp = 1592 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; 1593 } else { 1594 dwc->block_size = pdata->block_size; 1595 1596 /* Check if channel supports multi block transfer */ 1597 channel_writel(dwc, LLP, 0xfffffffc); 1598 dwc->nollp = 1599 (channel_readl(dwc, LLP) & 0xfffffffc) == 0; 1600 channel_writel(dwc, LLP, 0); 1601 } 1602 } 1603 1604 /* Clear all interrupts on all channels. */ 1605 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); 1606 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); 1607 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); 1608 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); 1609 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); 1610 1611 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); 1612 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); 1613 if (pdata->is_private) 1614 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); 1615 dw->dma.dev = &pdev->dev; 1616 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; 1617 dw->dma.device_free_chan_resources = dwc_free_chan_resources; 1618 1619 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; 1620 1621 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; 1622 dw->dma.device_control = dwc_control; 1623 1624 dw->dma.device_tx_status = dwc_tx_status; 1625 dw->dma.device_issue_pending = dwc_issue_pending; 1626 1627 dma_writel(dw, CFG, DW_CFG_DMA_EN); 1628 1629 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", 1630 dev_name(&pdev->dev), nr_channels); 1631 1632 dma_async_device_register(&dw->dma); 1633 1634 return 0; 1635} 1636 1637static int __devexit dw_remove(struct platform_device *pdev) 1638{ 1639 struct dw_dma *dw = platform_get_drvdata(pdev); 1640 struct dw_dma_chan *dwc, *_dwc; 1641 1642 dw_dma_off(dw); 1643 dma_async_device_unregister(&dw->dma); 1644 1645 tasklet_kill(&dw->tasklet); 1646 1647 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, 1648 chan.device_node) { 1649 list_del(&dwc->chan.device_node); 1650 channel_clear_bit(dw, CH_EN, dwc->mask); 1651 } 1652 1653 return 0; 1654} 1655 1656static void dw_shutdown(struct platform_device *pdev) 1657{ 1658 struct dw_dma *dw = platform_get_drvdata(pdev); 1659 1660 dw_dma_off(platform_get_drvdata(pdev)); 1661 clk_disable_unprepare(dw->clk); 1662} 1663 1664static int dw_suspend_noirq(struct device *dev) 1665{ 1666 struct platform_device *pdev = to_platform_device(dev); 1667 struct dw_dma *dw = platform_get_drvdata(pdev); 1668 1669 dw_dma_off(platform_get_drvdata(pdev)); 1670 clk_disable_unprepare(dw->clk); 1671 1672 return 0; 1673} 1674 1675static int dw_resume_noirq(struct device *dev) 1676{ 1677 struct platform_device *pdev = to_platform_device(dev); 1678 struct dw_dma *dw = platform_get_drvdata(pdev); 1679 1680 clk_prepare_enable(dw->clk); 1681 dma_writel(dw, CFG, DW_CFG_DMA_EN); 1682 return 0; 1683} 1684 1685static const struct dev_pm_ops dw_dev_pm_ops = { 1686 .suspend_noirq = dw_suspend_noirq, 1687 .resume_noirq = dw_resume_noirq, 1688 .freeze_noirq = dw_suspend_noirq, 1689 .thaw_noirq = dw_resume_noirq, 1690 .restore_noirq = dw_resume_noirq, 1691 .poweroff_noirq = dw_suspend_noirq, 1692}; 1693 1694#ifdef CONFIG_OF 1695static const struct of_device_id dw_dma_id_table[] = { 1696 { .compatible = "snps,dma-spear1340" }, 1697 {} 1698}; 1699MODULE_DEVICE_TABLE(of, dw_dma_id_table); 1700#endif 1701 1702static struct platform_driver dw_driver = { 1703 .remove = __devexit_p(dw_remove), 1704 .shutdown = dw_shutdown, 1705 .driver = { 1706 .name = "dw_dmac", 1707 .pm = &dw_dev_pm_ops, 1708 .of_match_table = of_match_ptr(dw_dma_id_table), 1709 }, 1710}; 1711 1712static int __init dw_init(void) 1713{ 1714 return platform_driver_probe(&dw_driver, dw_probe); 1715} 1716subsys_initcall(dw_init); 1717 1718static void __exit dw_exit(void) 1719{ 1720 platform_driver_unregister(&dw_driver); 1721} 1722module_exit(dw_exit); 1723 1724MODULE_LICENSE("GPL v2"); 1725MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); 1726MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 1727MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");