Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.33-rc4 1456 lines 38 kB view raw
1/* 2 * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on 3 * AVR32 systems.) 4 * 5 * Copyright (C) 2007-2008 Atmel Corporation 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11#include <linux/clk.h> 12#include <linux/delay.h> 13#include <linux/dmaengine.h> 14#include <linux/dma-mapping.h> 15#include <linux/init.h> 16#include <linux/interrupt.h> 17#include <linux/io.h> 18#include <linux/mm.h> 19#include <linux/module.h> 20#include <linux/platform_device.h> 21#include <linux/slab.h> 22 23#include "dw_dmac_regs.h" 24 25/* 26 * This supports the Synopsys "DesignWare AHB Central DMA Controller", 27 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all 28 * of which use ARM any more). See the "Databook" from Synopsys for 29 * information beyond what licensees probably provide. 30 * 31 * The driver has currently been tested only with the Atmel AT32AP7000, 32 * which does not support descriptor writeback. 33 */ 34 35/* NOTE: DMS+SMS is system-specific. We should get this information 36 * from the platform code somehow. 37 */ 38#define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \ 39 | DWC_CTLL_SRC_MSIZE(0) \ 40 | DWC_CTLL_DMS(0) \ 41 | DWC_CTLL_SMS(1) \ 42 | DWC_CTLL_LLP_D_EN \ 43 | DWC_CTLL_LLP_S_EN) 44 45/* 46 * This is configuration-dependent and usually a funny size like 4095. 47 * Let's round it down to the nearest power of two. 48 * 49 * Note that this is a transfer count, i.e. if we transfer 32-bit 50 * words, we can do 8192 bytes per descriptor. 51 * 52 * This parameter is also system-specific. 53 */ 54#define DWC_MAX_COUNT 2048U 55 56/* 57 * Number of descriptors to allocate for each channel. This should be 58 * made configurable somehow; preferably, the clients (at least the 59 * ones using slave transfers) should be able to give us a hint. 60 */ 61#define NR_DESCS_PER_CHANNEL 64 62 63/*----------------------------------------------------------------------*/ 64 65/* 66 * Because we're not relying on writeback from the controller (it may not 67 * even be configured into the core!) we don't need to use dma_pool. These 68 * descriptors -- and associated data -- are cacheable. We do need to make 69 * sure their dcache entries are written back before handing them off to 70 * the controller, though. 71 */ 72 73static struct device *chan2dev(struct dma_chan *chan) 74{ 75 return &chan->dev->device; 76} 77static struct device *chan2parent(struct dma_chan *chan) 78{ 79 return chan->dev->device.parent; 80} 81 82static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) 83{ 84 return list_entry(dwc->active_list.next, struct dw_desc, desc_node); 85} 86 87static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc) 88{ 89 return list_entry(dwc->queue.next, struct dw_desc, desc_node); 90} 91 92static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) 93{ 94 struct dw_desc *desc, *_desc; 95 struct dw_desc *ret = NULL; 96 unsigned int i = 0; 97 98 spin_lock_bh(&dwc->lock); 99 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { 100 if (async_tx_test_ack(&desc->txd)) { 101 list_del(&desc->desc_node); 102 ret = desc; 103 break; 104 } 105 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); 106 i++; 107 } 108 spin_unlock_bh(&dwc->lock); 109 110 dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); 111 112 return ret; 113} 114 115static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) 116{ 117 struct dw_desc *child; 118 119 list_for_each_entry(child, &desc->tx_list, desc_node) 120 dma_sync_single_for_cpu(chan2parent(&dwc->chan), 121 child->txd.phys, sizeof(child->lli), 122 DMA_TO_DEVICE); 123 dma_sync_single_for_cpu(chan2parent(&dwc->chan), 124 desc->txd.phys, sizeof(desc->lli), 125 DMA_TO_DEVICE); 126} 127 128/* 129 * Move a descriptor, including any children, to the free list. 130 * `desc' must not be on any lists. 131 */ 132static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) 133{ 134 if (desc) { 135 struct dw_desc *child; 136 137 dwc_sync_desc_for_cpu(dwc, desc); 138 139 spin_lock_bh(&dwc->lock); 140 list_for_each_entry(child, &desc->tx_list, desc_node) 141 dev_vdbg(chan2dev(&dwc->chan), 142 "moving child desc %p to freelist\n", 143 child); 144 list_splice_init(&desc->tx_list, &dwc->free_list); 145 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); 146 list_add(&desc->desc_node, &dwc->free_list); 147 spin_unlock_bh(&dwc->lock); 148 } 149} 150 151/* Called with dwc->lock held and bh disabled */ 152static dma_cookie_t 153dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc) 154{ 155 dma_cookie_t cookie = dwc->chan.cookie; 156 157 if (++cookie < 0) 158 cookie = 1; 159 160 dwc->chan.cookie = cookie; 161 desc->txd.cookie = cookie; 162 163 return cookie; 164} 165 166/*----------------------------------------------------------------------*/ 167 168/* Called with dwc->lock held and bh disabled */ 169static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) 170{ 171 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 172 173 /* ASSERT: channel is idle */ 174 if (dma_readl(dw, CH_EN) & dwc->mask) { 175 dev_err(chan2dev(&dwc->chan), 176 "BUG: Attempted to start non-idle channel\n"); 177 dev_err(chan2dev(&dwc->chan), 178 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 179 channel_readl(dwc, SAR), 180 channel_readl(dwc, DAR), 181 channel_readl(dwc, LLP), 182 channel_readl(dwc, CTL_HI), 183 channel_readl(dwc, CTL_LO)); 184 185 /* The tasklet will hopefully advance the queue... */ 186 return; 187 } 188 189 channel_writel(dwc, LLP, first->txd.phys); 190 channel_writel(dwc, CTL_LO, 191 DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); 192 channel_writel(dwc, CTL_HI, 0); 193 channel_set_bit(dw, CH_EN, dwc->mask); 194} 195 196/*----------------------------------------------------------------------*/ 197 198static void 199dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) 200{ 201 dma_async_tx_callback callback; 202 void *param; 203 struct dma_async_tx_descriptor *txd = &desc->txd; 204 205 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); 206 207 dwc->completed = txd->cookie; 208 callback = txd->callback; 209 param = txd->callback_param; 210 211 dwc_sync_desc_for_cpu(dwc, desc); 212 list_splice_init(&desc->tx_list, &dwc->free_list); 213 list_move(&desc->desc_node, &dwc->free_list); 214 215 if (!dwc->chan.private) { 216 struct device *parent = chan2parent(&dwc->chan); 217 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 218 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) 219 dma_unmap_single(parent, desc->lli.dar, 220 desc->len, DMA_FROM_DEVICE); 221 else 222 dma_unmap_page(parent, desc->lli.dar, 223 desc->len, DMA_FROM_DEVICE); 224 } 225 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 226 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) 227 dma_unmap_single(parent, desc->lli.sar, 228 desc->len, DMA_TO_DEVICE); 229 else 230 dma_unmap_page(parent, desc->lli.sar, 231 desc->len, DMA_TO_DEVICE); 232 } 233 } 234 235 /* 236 * The API requires that no submissions are done from a 237 * callback, so we don't need to drop the lock here 238 */ 239 if (callback) 240 callback(param); 241} 242 243static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) 244{ 245 struct dw_desc *desc, *_desc; 246 LIST_HEAD(list); 247 248 if (dma_readl(dw, CH_EN) & dwc->mask) { 249 dev_err(chan2dev(&dwc->chan), 250 "BUG: XFER bit set, but channel not idle!\n"); 251 252 /* Try to continue after resetting the channel... */ 253 channel_clear_bit(dw, CH_EN, dwc->mask); 254 while (dma_readl(dw, CH_EN) & dwc->mask) 255 cpu_relax(); 256 } 257 258 /* 259 * Submit queued descriptors ASAP, i.e. before we go through 260 * the completed ones. 261 */ 262 if (!list_empty(&dwc->queue)) 263 dwc_dostart(dwc, dwc_first_queued(dwc)); 264 list_splice_init(&dwc->active_list, &list); 265 list_splice_init(&dwc->queue, &dwc->active_list); 266 267 list_for_each_entry_safe(desc, _desc, &list, desc_node) 268 dwc_descriptor_complete(dwc, desc); 269} 270 271static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) 272{ 273 dma_addr_t llp; 274 struct dw_desc *desc, *_desc; 275 struct dw_desc *child; 276 u32 status_xfer; 277 278 /* 279 * Clear block interrupt flag before scanning so that we don't 280 * miss any, and read LLP before RAW_XFER to ensure it is 281 * valid if we decide to scan the list. 282 */ 283 dma_writel(dw, CLEAR.BLOCK, dwc->mask); 284 llp = channel_readl(dwc, LLP); 285 status_xfer = dma_readl(dw, RAW.XFER); 286 287 if (status_xfer & dwc->mask) { 288 /* Everything we've submitted is done */ 289 dma_writel(dw, CLEAR.XFER, dwc->mask); 290 dwc_complete_all(dw, dwc); 291 return; 292 } 293 294 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); 295 296 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { 297 if (desc->lli.llp == llp) 298 /* This one is currently in progress */ 299 return; 300 301 list_for_each_entry(child, &desc->tx_list, desc_node) 302 if (child->lli.llp == llp) 303 /* Currently in progress */ 304 return; 305 306 /* 307 * No descriptors so far seem to be in progress, i.e. 308 * this one must be done. 309 */ 310 dwc_descriptor_complete(dwc, desc); 311 } 312 313 dev_err(chan2dev(&dwc->chan), 314 "BUG: All descriptors done, but channel not idle!\n"); 315 316 /* Try to continue after resetting the channel... */ 317 channel_clear_bit(dw, CH_EN, dwc->mask); 318 while (dma_readl(dw, CH_EN) & dwc->mask) 319 cpu_relax(); 320 321 if (!list_empty(&dwc->queue)) { 322 dwc_dostart(dwc, dwc_first_queued(dwc)); 323 list_splice_init(&dwc->queue, &dwc->active_list); 324 } 325} 326 327static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) 328{ 329 dev_printk(KERN_CRIT, chan2dev(&dwc->chan), 330 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", 331 lli->sar, lli->dar, lli->llp, 332 lli->ctlhi, lli->ctllo); 333} 334 335static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) 336{ 337 struct dw_desc *bad_desc; 338 struct dw_desc *child; 339 340 dwc_scan_descriptors(dw, dwc); 341 342 /* 343 * The descriptor currently at the head of the active list is 344 * borked. Since we don't have any way to report errors, we'll 345 * just have to scream loudly and try to carry on. 346 */ 347 bad_desc = dwc_first_active(dwc); 348 list_del_init(&bad_desc->desc_node); 349 list_splice_init(&dwc->queue, dwc->active_list.prev); 350 351 /* Clear the error flag and try to restart the controller */ 352 dma_writel(dw, CLEAR.ERROR, dwc->mask); 353 if (!list_empty(&dwc->active_list)) 354 dwc_dostart(dwc, dwc_first_active(dwc)); 355 356 /* 357 * KERN_CRITICAL may seem harsh, but since this only happens 358 * when someone submits a bad physical address in a 359 * descriptor, we should consider ourselves lucky that the 360 * controller flagged an error instead of scribbling over 361 * random memory locations. 362 */ 363 dev_printk(KERN_CRIT, chan2dev(&dwc->chan), 364 "Bad descriptor submitted for DMA!\n"); 365 dev_printk(KERN_CRIT, chan2dev(&dwc->chan), 366 " cookie: %d\n", bad_desc->txd.cookie); 367 dwc_dump_lli(dwc, &bad_desc->lli); 368 list_for_each_entry(child, &bad_desc->tx_list, desc_node) 369 dwc_dump_lli(dwc, &child->lli); 370 371 /* Pretend the descriptor completed successfully */ 372 dwc_descriptor_complete(dwc, bad_desc); 373} 374 375/* --------------------- Cyclic DMA API extensions -------------------- */ 376 377inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) 378{ 379 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 380 return channel_readl(dwc, SAR); 381} 382EXPORT_SYMBOL(dw_dma_get_src_addr); 383 384inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) 385{ 386 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 387 return channel_readl(dwc, DAR); 388} 389EXPORT_SYMBOL(dw_dma_get_dst_addr); 390 391/* called with dwc->lock held and all DMAC interrupts disabled */ 392static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, 393 u32 status_block, u32 status_err, u32 status_xfer) 394{ 395 if (status_block & dwc->mask) { 396 void (*callback)(void *param); 397 void *callback_param; 398 399 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", 400 channel_readl(dwc, LLP)); 401 dma_writel(dw, CLEAR.BLOCK, dwc->mask); 402 403 callback = dwc->cdesc->period_callback; 404 callback_param = dwc->cdesc->period_callback_param; 405 if (callback) { 406 spin_unlock(&dwc->lock); 407 callback(callback_param); 408 spin_lock(&dwc->lock); 409 } 410 } 411 412 /* 413 * Error and transfer complete are highly unlikely, and will most 414 * likely be due to a configuration error by the user. 415 */ 416 if (unlikely(status_err & dwc->mask) || 417 unlikely(status_xfer & dwc->mask)) { 418 int i; 419 420 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " 421 "interrupt, stopping DMA transfer\n", 422 status_xfer ? "xfer" : "error"); 423 dev_err(chan2dev(&dwc->chan), 424 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 425 channel_readl(dwc, SAR), 426 channel_readl(dwc, DAR), 427 channel_readl(dwc, LLP), 428 channel_readl(dwc, CTL_HI), 429 channel_readl(dwc, CTL_LO)); 430 431 channel_clear_bit(dw, CH_EN, dwc->mask); 432 while (dma_readl(dw, CH_EN) & dwc->mask) 433 cpu_relax(); 434 435 /* make sure DMA does not restart by loading a new list */ 436 channel_writel(dwc, LLP, 0); 437 channel_writel(dwc, CTL_LO, 0); 438 channel_writel(dwc, CTL_HI, 0); 439 440 dma_writel(dw, CLEAR.BLOCK, dwc->mask); 441 dma_writel(dw, CLEAR.ERROR, dwc->mask); 442 dma_writel(dw, CLEAR.XFER, dwc->mask); 443 444 for (i = 0; i < dwc->cdesc->periods; i++) 445 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); 446 } 447} 448 449/* ------------------------------------------------------------------------- */ 450 451static void dw_dma_tasklet(unsigned long data) 452{ 453 struct dw_dma *dw = (struct dw_dma *)data; 454 struct dw_dma_chan *dwc; 455 u32 status_block; 456 u32 status_xfer; 457 u32 status_err; 458 int i; 459 460 status_block = dma_readl(dw, RAW.BLOCK); 461 status_xfer = dma_readl(dw, RAW.XFER); 462 status_err = dma_readl(dw, RAW.ERROR); 463 464 dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", 465 status_block, status_err); 466 467 for (i = 0; i < dw->dma.chancnt; i++) { 468 dwc = &dw->chan[i]; 469 spin_lock(&dwc->lock); 470 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) 471 dwc_handle_cyclic(dw, dwc, status_block, status_err, 472 status_xfer); 473 else if (status_err & (1 << i)) 474 dwc_handle_error(dw, dwc); 475 else if ((status_block | status_xfer) & (1 << i)) 476 dwc_scan_descriptors(dw, dwc); 477 spin_unlock(&dwc->lock); 478 } 479 480 /* 481 * Re-enable interrupts. Block Complete interrupts are only 482 * enabled if the INT_EN bit in the descriptor is set. This 483 * will trigger a scan before the whole list is done. 484 */ 485 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); 486 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask); 487 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); 488} 489 490static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) 491{ 492 struct dw_dma *dw = dev_id; 493 u32 status; 494 495 dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n", 496 dma_readl(dw, STATUS_INT)); 497 498 /* 499 * Just disable the interrupts. We'll turn them back on in the 500 * softirq handler. 501 */ 502 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 503 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 504 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 505 506 status = dma_readl(dw, STATUS_INT); 507 if (status) { 508 dev_err(dw->dma.dev, 509 "BUG: Unexpected interrupts pending: 0x%x\n", 510 status); 511 512 /* Try to recover */ 513 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); 514 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); 515 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); 516 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); 517 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); 518 } 519 520 tasklet_schedule(&dw->tasklet); 521 522 return IRQ_HANDLED; 523} 524 525/*----------------------------------------------------------------------*/ 526 527static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) 528{ 529 struct dw_desc *desc = txd_to_dw_desc(tx); 530 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); 531 dma_cookie_t cookie; 532 533 spin_lock_bh(&dwc->lock); 534 cookie = dwc_assign_cookie(dwc, desc); 535 536 /* 537 * REVISIT: We should attempt to chain as many descriptors as 538 * possible, perhaps even appending to those already submitted 539 * for DMA. But this is hard to do in a race-free manner. 540 */ 541 if (list_empty(&dwc->active_list)) { 542 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 543 desc->txd.cookie); 544 dwc_dostart(dwc, desc); 545 list_add_tail(&desc->desc_node, &dwc->active_list); 546 } else { 547 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 548 desc->txd.cookie); 549 550 list_add_tail(&desc->desc_node, &dwc->queue); 551 } 552 553 spin_unlock_bh(&dwc->lock); 554 555 return cookie; 556} 557 558static struct dma_async_tx_descriptor * 559dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 560 size_t len, unsigned long flags) 561{ 562 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 563 struct dw_desc *desc; 564 struct dw_desc *first; 565 struct dw_desc *prev; 566 size_t xfer_count; 567 size_t offset; 568 unsigned int src_width; 569 unsigned int dst_width; 570 u32 ctllo; 571 572 dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", 573 dest, src, len, flags); 574 575 if (unlikely(!len)) { 576 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 577 return NULL; 578 } 579 580 /* 581 * We can be a lot more clever here, but this should take care 582 * of the most common optimization. 583 */ 584 if (!((src | dest | len) & 3)) 585 src_width = dst_width = 2; 586 else if (!((src | dest | len) & 1)) 587 src_width = dst_width = 1; 588 else 589 src_width = dst_width = 0; 590 591 ctllo = DWC_DEFAULT_CTLLO 592 | DWC_CTLL_DST_WIDTH(dst_width) 593 | DWC_CTLL_SRC_WIDTH(src_width) 594 | DWC_CTLL_DST_INC 595 | DWC_CTLL_SRC_INC 596 | DWC_CTLL_FC_M2M; 597 prev = first = NULL; 598 599 for (offset = 0; offset < len; offset += xfer_count << src_width) { 600 xfer_count = min_t(size_t, (len - offset) >> src_width, 601 DWC_MAX_COUNT); 602 603 desc = dwc_desc_get(dwc); 604 if (!desc) 605 goto err_desc_get; 606 607 desc->lli.sar = src + offset; 608 desc->lli.dar = dest + offset; 609 desc->lli.ctllo = ctllo; 610 desc->lli.ctlhi = xfer_count; 611 612 if (!first) { 613 first = desc; 614 } else { 615 prev->lli.llp = desc->txd.phys; 616 dma_sync_single_for_device(chan2parent(chan), 617 prev->txd.phys, sizeof(prev->lli), 618 DMA_TO_DEVICE); 619 list_add_tail(&desc->desc_node, 620 &first->tx_list); 621 } 622 prev = desc; 623 } 624 625 626 if (flags & DMA_PREP_INTERRUPT) 627 /* Trigger interrupt after last block */ 628 prev->lli.ctllo |= DWC_CTLL_INT_EN; 629 630 prev->lli.llp = 0; 631 dma_sync_single_for_device(chan2parent(chan), 632 prev->txd.phys, sizeof(prev->lli), 633 DMA_TO_DEVICE); 634 635 first->txd.flags = flags; 636 first->len = len; 637 638 return &first->txd; 639 640err_desc_get: 641 dwc_desc_put(dwc, first); 642 return NULL; 643} 644 645static struct dma_async_tx_descriptor * 646dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 647 unsigned int sg_len, enum dma_data_direction direction, 648 unsigned long flags) 649{ 650 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 651 struct dw_dma_slave *dws = chan->private; 652 struct dw_desc *prev; 653 struct dw_desc *first; 654 u32 ctllo; 655 dma_addr_t reg; 656 unsigned int reg_width; 657 unsigned int mem_width; 658 unsigned int i; 659 struct scatterlist *sg; 660 size_t total_len = 0; 661 662 dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); 663 664 if (unlikely(!dws || !sg_len)) 665 return NULL; 666 667 reg_width = dws->reg_width; 668 prev = first = NULL; 669 670 switch (direction) { 671 case DMA_TO_DEVICE: 672 ctllo = (DWC_DEFAULT_CTLLO 673 | DWC_CTLL_DST_WIDTH(reg_width) 674 | DWC_CTLL_DST_FIX 675 | DWC_CTLL_SRC_INC 676 | DWC_CTLL_FC_M2P); 677 reg = dws->tx_reg; 678 for_each_sg(sgl, sg, sg_len, i) { 679 struct dw_desc *desc; 680 u32 len; 681 u32 mem; 682 683 desc = dwc_desc_get(dwc); 684 if (!desc) { 685 dev_err(chan2dev(chan), 686 "not enough descriptors available\n"); 687 goto err_desc_get; 688 } 689 690 mem = sg_phys(sg); 691 len = sg_dma_len(sg); 692 mem_width = 2; 693 if (unlikely(mem & 3 || len & 3)) 694 mem_width = 0; 695 696 desc->lli.sar = mem; 697 desc->lli.dar = reg; 698 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); 699 desc->lli.ctlhi = len >> mem_width; 700 701 if (!first) { 702 first = desc; 703 } else { 704 prev->lli.llp = desc->txd.phys; 705 dma_sync_single_for_device(chan2parent(chan), 706 prev->txd.phys, 707 sizeof(prev->lli), 708 DMA_TO_DEVICE); 709 list_add_tail(&desc->desc_node, 710 &first->tx_list); 711 } 712 prev = desc; 713 total_len += len; 714 } 715 break; 716 case DMA_FROM_DEVICE: 717 ctllo = (DWC_DEFAULT_CTLLO 718 | DWC_CTLL_SRC_WIDTH(reg_width) 719 | DWC_CTLL_DST_INC 720 | DWC_CTLL_SRC_FIX 721 | DWC_CTLL_FC_P2M); 722 723 reg = dws->rx_reg; 724 for_each_sg(sgl, sg, sg_len, i) { 725 struct dw_desc *desc; 726 u32 len; 727 u32 mem; 728 729 desc = dwc_desc_get(dwc); 730 if (!desc) { 731 dev_err(chan2dev(chan), 732 "not enough descriptors available\n"); 733 goto err_desc_get; 734 } 735 736 mem = sg_phys(sg); 737 len = sg_dma_len(sg); 738 mem_width = 2; 739 if (unlikely(mem & 3 || len & 3)) 740 mem_width = 0; 741 742 desc->lli.sar = reg; 743 desc->lli.dar = mem; 744 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); 745 desc->lli.ctlhi = len >> reg_width; 746 747 if (!first) { 748 first = desc; 749 } else { 750 prev->lli.llp = desc->txd.phys; 751 dma_sync_single_for_device(chan2parent(chan), 752 prev->txd.phys, 753 sizeof(prev->lli), 754 DMA_TO_DEVICE); 755 list_add_tail(&desc->desc_node, 756 &first->tx_list); 757 } 758 prev = desc; 759 total_len += len; 760 } 761 break; 762 default: 763 return NULL; 764 } 765 766 if (flags & DMA_PREP_INTERRUPT) 767 /* Trigger interrupt after last block */ 768 prev->lli.ctllo |= DWC_CTLL_INT_EN; 769 770 prev->lli.llp = 0; 771 dma_sync_single_for_device(chan2parent(chan), 772 prev->txd.phys, sizeof(prev->lli), 773 DMA_TO_DEVICE); 774 775 first->len = total_len; 776 777 return &first->txd; 778 779err_desc_get: 780 dwc_desc_put(dwc, first); 781 return NULL; 782} 783 784static void dwc_terminate_all(struct dma_chan *chan) 785{ 786 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 787 struct dw_dma *dw = to_dw_dma(chan->device); 788 struct dw_desc *desc, *_desc; 789 LIST_HEAD(list); 790 791 /* 792 * This is only called when something went wrong elsewhere, so 793 * we don't really care about the data. Just disable the 794 * channel. We still have to poll the channel enable bit due 795 * to AHB/HSB limitations. 796 */ 797 spin_lock_bh(&dwc->lock); 798 799 channel_clear_bit(dw, CH_EN, dwc->mask); 800 801 while (dma_readl(dw, CH_EN) & dwc->mask) 802 cpu_relax(); 803 804 /* active_list entries will end up before queued entries */ 805 list_splice_init(&dwc->queue, &list); 806 list_splice_init(&dwc->active_list, &list); 807 808 spin_unlock_bh(&dwc->lock); 809 810 /* Flush all pending and queued descriptors */ 811 list_for_each_entry_safe(desc, _desc, &list, desc_node) 812 dwc_descriptor_complete(dwc, desc); 813} 814 815static enum dma_status 816dwc_is_tx_complete(struct dma_chan *chan, 817 dma_cookie_t cookie, 818 dma_cookie_t *done, dma_cookie_t *used) 819{ 820 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 821 dma_cookie_t last_used; 822 dma_cookie_t last_complete; 823 int ret; 824 825 last_complete = dwc->completed; 826 last_used = chan->cookie; 827 828 ret = dma_async_is_complete(cookie, last_complete, last_used); 829 if (ret != DMA_SUCCESS) { 830 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 831 832 last_complete = dwc->completed; 833 last_used = chan->cookie; 834 835 ret = dma_async_is_complete(cookie, last_complete, last_used); 836 } 837 838 if (done) 839 *done = last_complete; 840 if (used) 841 *used = last_used; 842 843 return ret; 844} 845 846static void dwc_issue_pending(struct dma_chan *chan) 847{ 848 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 849 850 spin_lock_bh(&dwc->lock); 851 if (!list_empty(&dwc->queue)) 852 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 853 spin_unlock_bh(&dwc->lock); 854} 855 856static int dwc_alloc_chan_resources(struct dma_chan *chan) 857{ 858 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 859 struct dw_dma *dw = to_dw_dma(chan->device); 860 struct dw_desc *desc; 861 struct dw_dma_slave *dws; 862 int i; 863 u32 cfghi; 864 u32 cfglo; 865 866 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 867 868 /* ASSERT: channel is idle */ 869 if (dma_readl(dw, CH_EN) & dwc->mask) { 870 dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); 871 return -EIO; 872 } 873 874 dwc->completed = chan->cookie = 1; 875 876 cfghi = DWC_CFGH_FIFO_MODE; 877 cfglo = 0; 878 879 dws = chan->private; 880 if (dws) { 881 /* 882 * We need controller-specific data to set up slave 883 * transfers. 884 */ 885 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); 886 887 cfghi = dws->cfg_hi; 888 cfglo = dws->cfg_lo; 889 } 890 channel_writel(dwc, CFG_LO, cfglo); 891 channel_writel(dwc, CFG_HI, cfghi); 892 893 /* 894 * NOTE: some controllers may have additional features that we 895 * need to initialize here, like "scatter-gather" (which 896 * doesn't mean what you think it means), and status writeback. 897 */ 898 899 spin_lock_bh(&dwc->lock); 900 i = dwc->descs_allocated; 901 while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { 902 spin_unlock_bh(&dwc->lock); 903 904 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); 905 if (!desc) { 906 dev_info(chan2dev(chan), 907 "only allocated %d descriptors\n", i); 908 spin_lock_bh(&dwc->lock); 909 break; 910 } 911 912 INIT_LIST_HEAD(&desc->tx_list); 913 dma_async_tx_descriptor_init(&desc->txd, chan); 914 desc->txd.tx_submit = dwc_tx_submit; 915 desc->txd.flags = DMA_CTRL_ACK; 916 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, 917 sizeof(desc->lli), DMA_TO_DEVICE); 918 dwc_desc_put(dwc, desc); 919 920 spin_lock_bh(&dwc->lock); 921 i = ++dwc->descs_allocated; 922 } 923 924 /* Enable interrupts */ 925 channel_set_bit(dw, MASK.XFER, dwc->mask); 926 channel_set_bit(dw, MASK.BLOCK, dwc->mask); 927 channel_set_bit(dw, MASK.ERROR, dwc->mask); 928 929 spin_unlock_bh(&dwc->lock); 930 931 dev_dbg(chan2dev(chan), 932 "alloc_chan_resources allocated %d descriptors\n", i); 933 934 return i; 935} 936 937static void dwc_free_chan_resources(struct dma_chan *chan) 938{ 939 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 940 struct dw_dma *dw = to_dw_dma(chan->device); 941 struct dw_desc *desc, *_desc; 942 LIST_HEAD(list); 943 944 dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", 945 dwc->descs_allocated); 946 947 /* ASSERT: channel is idle */ 948 BUG_ON(!list_empty(&dwc->active_list)); 949 BUG_ON(!list_empty(&dwc->queue)); 950 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); 951 952 spin_lock_bh(&dwc->lock); 953 list_splice_init(&dwc->free_list, &list); 954 dwc->descs_allocated = 0; 955 956 /* Disable interrupts */ 957 channel_clear_bit(dw, MASK.XFER, dwc->mask); 958 channel_clear_bit(dw, MASK.BLOCK, dwc->mask); 959 channel_clear_bit(dw, MASK.ERROR, dwc->mask); 960 961 spin_unlock_bh(&dwc->lock); 962 963 list_for_each_entry_safe(desc, _desc, &list, desc_node) { 964 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); 965 dma_unmap_single(chan2parent(chan), desc->txd.phys, 966 sizeof(desc->lli), DMA_TO_DEVICE); 967 kfree(desc); 968 } 969 970 dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); 971} 972 973/* --------------------- Cyclic DMA API extensions -------------------- */ 974 975/** 976 * dw_dma_cyclic_start - start the cyclic DMA transfer 977 * @chan: the DMA channel to start 978 * 979 * Must be called with soft interrupts disabled. Returns zero on success or 980 * -errno on failure. 981 */ 982int dw_dma_cyclic_start(struct dma_chan *chan) 983{ 984 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 985 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 986 987 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { 988 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); 989 return -ENODEV; 990 } 991 992 spin_lock(&dwc->lock); 993 994 /* assert channel is idle */ 995 if (dma_readl(dw, CH_EN) & dwc->mask) { 996 dev_err(chan2dev(&dwc->chan), 997 "BUG: Attempted to start non-idle channel\n"); 998 dev_err(chan2dev(&dwc->chan), 999 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 1000 channel_readl(dwc, SAR), 1001 channel_readl(dwc, DAR), 1002 channel_readl(dwc, LLP), 1003 channel_readl(dwc, CTL_HI), 1004 channel_readl(dwc, CTL_LO)); 1005 spin_unlock(&dwc->lock); 1006 return -EBUSY; 1007 } 1008 1009 dma_writel(dw, CLEAR.BLOCK, dwc->mask); 1010 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1011 dma_writel(dw, CLEAR.XFER, dwc->mask); 1012 1013 /* setup DMAC channel registers */ 1014 channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); 1015 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); 1016 channel_writel(dwc, CTL_HI, 0); 1017 1018 channel_set_bit(dw, CH_EN, dwc->mask); 1019 1020 spin_unlock(&dwc->lock); 1021 1022 return 0; 1023} 1024EXPORT_SYMBOL(dw_dma_cyclic_start); 1025 1026/** 1027 * dw_dma_cyclic_stop - stop the cyclic DMA transfer 1028 * @chan: the DMA channel to stop 1029 * 1030 * Must be called with soft interrupts disabled. 1031 */ 1032void dw_dma_cyclic_stop(struct dma_chan *chan) 1033{ 1034 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1035 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1036 1037 spin_lock(&dwc->lock); 1038 1039 channel_clear_bit(dw, CH_EN, dwc->mask); 1040 while (dma_readl(dw, CH_EN) & dwc->mask) 1041 cpu_relax(); 1042 1043 spin_unlock(&dwc->lock); 1044} 1045EXPORT_SYMBOL(dw_dma_cyclic_stop); 1046 1047/** 1048 * dw_dma_cyclic_prep - prepare the cyclic DMA transfer 1049 * @chan: the DMA channel to prepare 1050 * @buf_addr: physical DMA address where the buffer starts 1051 * @buf_len: total number of bytes for the entire buffer 1052 * @period_len: number of bytes for each period 1053 * @direction: transfer direction, to or from device 1054 * 1055 * Must be called before trying to start the transfer. Returns a valid struct 1056 * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. 1057 */ 1058struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, 1059 dma_addr_t buf_addr, size_t buf_len, size_t period_len, 1060 enum dma_data_direction direction) 1061{ 1062 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1063 struct dw_cyclic_desc *cdesc; 1064 struct dw_cyclic_desc *retval = NULL; 1065 struct dw_desc *desc; 1066 struct dw_desc *last = NULL; 1067 struct dw_dma_slave *dws = chan->private; 1068 unsigned long was_cyclic; 1069 unsigned int reg_width; 1070 unsigned int periods; 1071 unsigned int i; 1072 1073 spin_lock_bh(&dwc->lock); 1074 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { 1075 spin_unlock_bh(&dwc->lock); 1076 dev_dbg(chan2dev(&dwc->chan), 1077 "queue and/or active list are not empty\n"); 1078 return ERR_PTR(-EBUSY); 1079 } 1080 1081 was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1082 spin_unlock_bh(&dwc->lock); 1083 if (was_cyclic) { 1084 dev_dbg(chan2dev(&dwc->chan), 1085 "channel already prepared for cyclic DMA\n"); 1086 return ERR_PTR(-EBUSY); 1087 } 1088 1089 retval = ERR_PTR(-EINVAL); 1090 reg_width = dws->reg_width; 1091 periods = buf_len / period_len; 1092 1093 /* Check for too big/unaligned periods and unaligned DMA buffer. */ 1094 if (period_len > (DWC_MAX_COUNT << reg_width)) 1095 goto out_err; 1096 if (unlikely(period_len & ((1 << reg_width) - 1))) 1097 goto out_err; 1098 if (unlikely(buf_addr & ((1 << reg_width) - 1))) 1099 goto out_err; 1100 if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) 1101 goto out_err; 1102 1103 retval = ERR_PTR(-ENOMEM); 1104 1105 if (periods > NR_DESCS_PER_CHANNEL) 1106 goto out_err; 1107 1108 cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); 1109 if (!cdesc) 1110 goto out_err; 1111 1112 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); 1113 if (!cdesc->desc) 1114 goto out_err_alloc; 1115 1116 for (i = 0; i < periods; i++) { 1117 desc = dwc_desc_get(dwc); 1118 if (!desc) 1119 goto out_err_desc_get; 1120 1121 switch (direction) { 1122 case DMA_TO_DEVICE: 1123 desc->lli.dar = dws->tx_reg; 1124 desc->lli.sar = buf_addr + (period_len * i); 1125 desc->lli.ctllo = (DWC_DEFAULT_CTLLO 1126 | DWC_CTLL_DST_WIDTH(reg_width) 1127 | DWC_CTLL_SRC_WIDTH(reg_width) 1128 | DWC_CTLL_DST_FIX 1129 | DWC_CTLL_SRC_INC 1130 | DWC_CTLL_FC_M2P 1131 | DWC_CTLL_INT_EN); 1132 break; 1133 case DMA_FROM_DEVICE: 1134 desc->lli.dar = buf_addr + (period_len * i); 1135 desc->lli.sar = dws->rx_reg; 1136 desc->lli.ctllo = (DWC_DEFAULT_CTLLO 1137 | DWC_CTLL_SRC_WIDTH(reg_width) 1138 | DWC_CTLL_DST_WIDTH(reg_width) 1139 | DWC_CTLL_DST_INC 1140 | DWC_CTLL_SRC_FIX 1141 | DWC_CTLL_FC_P2M 1142 | DWC_CTLL_INT_EN); 1143 break; 1144 default: 1145 break; 1146 } 1147 1148 desc->lli.ctlhi = (period_len >> reg_width); 1149 cdesc->desc[i] = desc; 1150 1151 if (last) { 1152 last->lli.llp = desc->txd.phys; 1153 dma_sync_single_for_device(chan2parent(chan), 1154 last->txd.phys, sizeof(last->lli), 1155 DMA_TO_DEVICE); 1156 } 1157 1158 last = desc; 1159 } 1160 1161 /* lets make a cyclic list */ 1162 last->lli.llp = cdesc->desc[0]->txd.phys; 1163 dma_sync_single_for_device(chan2parent(chan), last->txd.phys, 1164 sizeof(last->lli), DMA_TO_DEVICE); 1165 1166 dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu " 1167 "period %zu periods %d\n", buf_addr, buf_len, 1168 period_len, periods); 1169 1170 cdesc->periods = periods; 1171 dwc->cdesc = cdesc; 1172 1173 return cdesc; 1174 1175out_err_desc_get: 1176 while (i--) 1177 dwc_desc_put(dwc, cdesc->desc[i]); 1178out_err_alloc: 1179 kfree(cdesc); 1180out_err: 1181 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1182 return (struct dw_cyclic_desc *)retval; 1183} 1184EXPORT_SYMBOL(dw_dma_cyclic_prep); 1185 1186/** 1187 * dw_dma_cyclic_free - free a prepared cyclic DMA transfer 1188 * @chan: the DMA channel to free 1189 */ 1190void dw_dma_cyclic_free(struct dma_chan *chan) 1191{ 1192 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1193 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1194 struct dw_cyclic_desc *cdesc = dwc->cdesc; 1195 int i; 1196 1197 dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); 1198 1199 if (!cdesc) 1200 return; 1201 1202 spin_lock_bh(&dwc->lock); 1203 1204 channel_clear_bit(dw, CH_EN, dwc->mask); 1205 while (dma_readl(dw, CH_EN) & dwc->mask) 1206 cpu_relax(); 1207 1208 dma_writel(dw, CLEAR.BLOCK, dwc->mask); 1209 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1210 dma_writel(dw, CLEAR.XFER, dwc->mask); 1211 1212 spin_unlock_bh(&dwc->lock); 1213 1214 for (i = 0; i < cdesc->periods; i++) 1215 dwc_desc_put(dwc, cdesc->desc[i]); 1216 1217 kfree(cdesc->desc); 1218 kfree(cdesc); 1219 1220 clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1221} 1222EXPORT_SYMBOL(dw_dma_cyclic_free); 1223 1224/*----------------------------------------------------------------------*/ 1225 1226static void dw_dma_off(struct dw_dma *dw) 1227{ 1228 dma_writel(dw, CFG, 0); 1229 1230 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 1231 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 1232 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); 1233 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); 1234 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 1235 1236 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) 1237 cpu_relax(); 1238} 1239 1240static int __init dw_probe(struct platform_device *pdev) 1241{ 1242 struct dw_dma_platform_data *pdata; 1243 struct resource *io; 1244 struct dw_dma *dw; 1245 size_t size; 1246 int irq; 1247 int err; 1248 int i; 1249 1250 pdata = pdev->dev.platform_data; 1251 if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) 1252 return -EINVAL; 1253 1254 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1255 if (!io) 1256 return -EINVAL; 1257 1258 irq = platform_get_irq(pdev, 0); 1259 if (irq < 0) 1260 return irq; 1261 1262 size = sizeof(struct dw_dma); 1263 size += pdata->nr_channels * sizeof(struct dw_dma_chan); 1264 dw = kzalloc(size, GFP_KERNEL); 1265 if (!dw) 1266 return -ENOMEM; 1267 1268 if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) { 1269 err = -EBUSY; 1270 goto err_kfree; 1271 } 1272 1273 dw->regs = ioremap(io->start, DW_REGLEN); 1274 if (!dw->regs) { 1275 err = -ENOMEM; 1276 goto err_release_r; 1277 } 1278 1279 dw->clk = clk_get(&pdev->dev, "hclk"); 1280 if (IS_ERR(dw->clk)) { 1281 err = PTR_ERR(dw->clk); 1282 goto err_clk; 1283 } 1284 clk_enable(dw->clk); 1285 1286 /* force dma off, just in case */ 1287 dw_dma_off(dw); 1288 1289 err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); 1290 if (err) 1291 goto err_irq; 1292 1293 platform_set_drvdata(pdev, dw); 1294 1295 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); 1296 1297 dw->all_chan_mask = (1 << pdata->nr_channels) - 1; 1298 1299 INIT_LIST_HEAD(&dw->dma.channels); 1300 for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) { 1301 struct dw_dma_chan *dwc = &dw->chan[i]; 1302 1303 dwc->chan.device = &dw->dma; 1304 dwc->chan.cookie = dwc->completed = 1; 1305 dwc->chan.chan_id = i; 1306 list_add_tail(&dwc->chan.device_node, &dw->dma.channels); 1307 1308 dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; 1309 spin_lock_init(&dwc->lock); 1310 dwc->mask = 1 << i; 1311 1312 INIT_LIST_HEAD(&dwc->active_list); 1313 INIT_LIST_HEAD(&dwc->queue); 1314 INIT_LIST_HEAD(&dwc->free_list); 1315 1316 channel_clear_bit(dw, CH_EN, dwc->mask); 1317 } 1318 1319 /* Clear/disable all interrupts on all channels. */ 1320 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); 1321 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); 1322 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); 1323 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); 1324 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); 1325 1326 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 1327 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 1328 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); 1329 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); 1330 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 1331 1332 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); 1333 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); 1334 dw->dma.dev = &pdev->dev; 1335 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; 1336 dw->dma.device_free_chan_resources = dwc_free_chan_resources; 1337 1338 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; 1339 1340 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; 1341 dw->dma.device_terminate_all = dwc_terminate_all; 1342 1343 dw->dma.device_is_tx_complete = dwc_is_tx_complete; 1344 dw->dma.device_issue_pending = dwc_issue_pending; 1345 1346 dma_writel(dw, CFG, DW_CFG_DMA_EN); 1347 1348 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", 1349 dev_name(&pdev->dev), dw->dma.chancnt); 1350 1351 dma_async_device_register(&dw->dma); 1352 1353 return 0; 1354 1355err_irq: 1356 clk_disable(dw->clk); 1357 clk_put(dw->clk); 1358err_clk: 1359 iounmap(dw->regs); 1360 dw->regs = NULL; 1361err_release_r: 1362 release_resource(io); 1363err_kfree: 1364 kfree(dw); 1365 return err; 1366} 1367 1368static int __exit dw_remove(struct platform_device *pdev) 1369{ 1370 struct dw_dma *dw = platform_get_drvdata(pdev); 1371 struct dw_dma_chan *dwc, *_dwc; 1372 struct resource *io; 1373 1374 dw_dma_off(dw); 1375 dma_async_device_unregister(&dw->dma); 1376 1377 free_irq(platform_get_irq(pdev, 0), dw); 1378 tasklet_kill(&dw->tasklet); 1379 1380 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, 1381 chan.device_node) { 1382 list_del(&dwc->chan.device_node); 1383 channel_clear_bit(dw, CH_EN, dwc->mask); 1384 } 1385 1386 clk_disable(dw->clk); 1387 clk_put(dw->clk); 1388 1389 iounmap(dw->regs); 1390 dw->regs = NULL; 1391 1392 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1393 release_mem_region(io->start, DW_REGLEN); 1394 1395 kfree(dw); 1396 1397 return 0; 1398} 1399 1400static void dw_shutdown(struct platform_device *pdev) 1401{ 1402 struct dw_dma *dw = platform_get_drvdata(pdev); 1403 1404 dw_dma_off(platform_get_drvdata(pdev)); 1405 clk_disable(dw->clk); 1406} 1407 1408static int dw_suspend_noirq(struct device *dev) 1409{ 1410 struct platform_device *pdev = to_platform_device(dev); 1411 struct dw_dma *dw = platform_get_drvdata(pdev); 1412 1413 dw_dma_off(platform_get_drvdata(pdev)); 1414 clk_disable(dw->clk); 1415 return 0; 1416} 1417 1418static int dw_resume_noirq(struct device *dev) 1419{ 1420 struct platform_device *pdev = to_platform_device(dev); 1421 struct dw_dma *dw = platform_get_drvdata(pdev); 1422 1423 clk_enable(dw->clk); 1424 dma_writel(dw, CFG, DW_CFG_DMA_EN); 1425 return 0; 1426} 1427 1428static const struct dev_pm_ops dw_dev_pm_ops = { 1429 .suspend_noirq = dw_suspend_noirq, 1430 .resume_noirq = dw_resume_noirq, 1431}; 1432 1433static struct platform_driver dw_driver = { 1434 .remove = __exit_p(dw_remove), 1435 .shutdown = dw_shutdown, 1436 .driver = { 1437 .name = "dw_dmac", 1438 .pm = &dw_dev_pm_ops, 1439 }, 1440}; 1441 1442static int __init dw_init(void) 1443{ 1444 return platform_driver_probe(&dw_driver, dw_probe); 1445} 1446module_init(dw_init); 1447 1448static void __exit dw_exit(void) 1449{ 1450 platform_driver_unregister(&dw_driver); 1451} 1452module_exit(dw_exit); 1453 1454MODULE_LICENSE("GPL v2"); 1455MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); 1456MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");