Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.5-rc5 1524 lines 39 kB view raw
1/* 2 * Renesas SuperH DMA Engine support 3 * 4 * base is drivers/dma/flsdma.c 5 * 6 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> 7 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. 8 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 9 * 10 * This is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * - DMA of SuperH does not have Hardware DMA chain mode. 16 * - MAX DMA size is 16MB. 17 * 18 */ 19 20#include <linux/init.h> 21#include <linux/module.h> 22#include <linux/slab.h> 23#include <linux/interrupt.h> 24#include <linux/dmaengine.h> 25#include <linux/delay.h> 26#include <linux/platform_device.h> 27#include <linux/pm_runtime.h> 28#include <linux/sh_dma.h> 29#include <linux/notifier.h> 30#include <linux/kdebug.h> 31#include <linux/spinlock.h> 32#include <linux/rculist.h> 33 34#include "dmaengine.h" 35#include "shdma.h" 36 37/* DMA descriptor control */ 38enum sh_dmae_desc_status { 39 DESC_IDLE, 40 DESC_PREPARED, 41 DESC_SUBMITTED, 42 DESC_COMPLETED, /* completed, have to call callback */ 43 DESC_WAITING, /* callback called, waiting for ack / re-submit */ 44}; 45 46#define NR_DESCS_PER_CHANNEL 32 47/* Default MEMCPY transfer size = 2^2 = 4 bytes */ 48#define LOG2_DEFAULT_XFER_SIZE 2 49 50/* 51 * Used for write-side mutual exclusion for the global device list, 52 * read-side synchronization by way of RCU, and per-controller data. 53 */ 54static DEFINE_SPINLOCK(sh_dmae_lock); 55static LIST_HEAD(sh_dmae_devices); 56 57/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ 58static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; 59 60static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); 61static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan); 62 63static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data) 64{ 65 struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 66 67 __raw_writel(data, shdev->chan_reg + 68 shdev->pdata->channel[sh_dc->id].chclr_offset); 69} 70 71static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 72{ 73 __raw_writel(data, sh_dc->base + reg / sizeof(u32)); 74} 75 76static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) 77{ 78 return __raw_readl(sh_dc->base + reg / sizeof(u32)); 79} 80 81static u16 dmaor_read(struct sh_dmae_device *shdev) 82{ 83 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); 84 85 if (shdev->pdata->dmaor_is_32bit) 86 return __raw_readl(addr); 87 else 88 return __raw_readw(addr); 89} 90 91static void dmaor_write(struct sh_dmae_device *shdev, u16 data) 92{ 93 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); 94 95 if (shdev->pdata->dmaor_is_32bit) 96 __raw_writel(data, addr); 97 else 98 __raw_writew(data, addr); 99} 100 101static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) 102{ 103 struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 104 105 __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32)); 106} 107 108static u32 chcr_read(struct sh_dmae_chan *sh_dc) 109{ 110 struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 111 112 return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32)); 113} 114 115/* 116 * Reset DMA controller 117 * 118 * SH7780 has two DMAOR register 119 */ 120static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) 121{ 122 unsigned short dmaor; 123 unsigned long flags; 124 125 spin_lock_irqsave(&sh_dmae_lock, flags); 126 127 dmaor = dmaor_read(shdev); 128 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); 129 130 spin_unlock_irqrestore(&sh_dmae_lock, flags); 131} 132 133static int sh_dmae_rst(struct sh_dmae_device *shdev) 134{ 135 unsigned short dmaor; 136 unsigned long flags; 137 138 spin_lock_irqsave(&sh_dmae_lock, flags); 139 140 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); 141 142 if (shdev->pdata->chclr_present) { 143 int i; 144 for (i = 0; i < shdev->pdata->channel_num; i++) { 145 struct sh_dmae_chan *sh_chan = shdev->chan[i]; 146 if (sh_chan) 147 chclr_write(sh_chan, 0); 148 } 149 } 150 151 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); 152 153 dmaor = dmaor_read(shdev); 154 155 spin_unlock_irqrestore(&sh_dmae_lock, flags); 156 157 if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { 158 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n"); 159 return -EIO; 160 } 161 if (shdev->pdata->dmaor_init & ~dmaor) 162 dev_warn(shdev->common.dev, 163 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", 164 dmaor, shdev->pdata->dmaor_init); 165 return 0; 166} 167 168static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) 169{ 170 u32 chcr = chcr_read(sh_chan); 171 172 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) 173 return true; /* working */ 174 175 return false; /* waiting */ 176} 177 178static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) 179{ 180 struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 181 struct sh_dmae_pdata *pdata = shdev->pdata; 182 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | 183 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); 184 185 if (cnt >= pdata->ts_shift_num) 186 cnt = 0; 187 188 return pdata->ts_shift[cnt]; 189} 190 191static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) 192{ 193 struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 194 struct sh_dmae_pdata *pdata = shdev->pdata; 195 int i; 196 197 for (i = 0; i < pdata->ts_shift_num; i++) 198 if (pdata->ts_shift[i] == l2size) 199 break; 200 201 if (i == pdata->ts_shift_num) 202 i = 0; 203 204 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | 205 ((i << pdata->ts_high_shift) & pdata->ts_high_mask); 206} 207 208static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) 209{ 210 sh_dmae_writel(sh_chan, hw->sar, SAR); 211 sh_dmae_writel(sh_chan, hw->dar, DAR); 212 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); 213} 214 215static void dmae_start(struct sh_dmae_chan *sh_chan) 216{ 217 struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 218 u32 chcr = chcr_read(sh_chan); 219 220 if (shdev->pdata->needs_tend_set) 221 sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); 222 223 chcr |= CHCR_DE | shdev->chcr_ie_bit; 224 chcr_write(sh_chan, chcr & ~CHCR_TE); 225} 226 227static void dmae_halt(struct sh_dmae_chan *sh_chan) 228{ 229 struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 230 u32 chcr = chcr_read(sh_chan); 231 232 chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); 233 chcr_write(sh_chan, chcr); 234} 235 236static void dmae_init(struct sh_dmae_chan *sh_chan) 237{ 238 /* 239 * Default configuration for dual address memory-memory transfer. 240 * 0x400 represents auto-request. 241 */ 242 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, 243 LOG2_DEFAULT_XFER_SIZE); 244 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); 245 chcr_write(sh_chan, chcr); 246} 247 248static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) 249{ 250 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ 251 if (dmae_is_busy(sh_chan)) 252 return -EBUSY; 253 254 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); 255 chcr_write(sh_chan, val); 256 257 return 0; 258} 259 260static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) 261{ 262 struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 263 struct sh_dmae_pdata *pdata = shdev->pdata; 264 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; 265 u16 __iomem *addr = shdev->dmars; 266 unsigned int shift = chan_pdata->dmars_bit; 267 268 if (dmae_is_busy(sh_chan)) 269 return -EBUSY; 270 271 if (pdata->no_dmars) 272 return 0; 273 274 /* in the case of a missing DMARS resource use first memory window */ 275 if (!addr) 276 addr = (u16 __iomem *)shdev->chan_reg; 277 addr += chan_pdata->dmars / sizeof(u16); 278 279 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), 280 addr); 281 282 return 0; 283} 284 285static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) 286{ 287 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; 288 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); 289 struct sh_dmae_slave *param = tx->chan->private; 290 dma_async_tx_callback callback = tx->callback; 291 dma_cookie_t cookie; 292 bool power_up; 293 294 spin_lock_irq(&sh_chan->desc_lock); 295 296 if (list_empty(&sh_chan->ld_queue)) 297 power_up = true; 298 else 299 power_up = false; 300 301 cookie = dma_cookie_assign(tx); 302 303 /* Mark all chunks of this descriptor as submitted, move to the queue */ 304 list_for_each_entry_safe(chunk, c, desc->node.prev, node) { 305 /* 306 * All chunks are on the global ld_free, so, we have to find 307 * the end of the chain ourselves 308 */ 309 if (chunk != desc && (chunk->mark == DESC_IDLE || 310 chunk->async_tx.cookie > 0 || 311 chunk->async_tx.cookie == -EBUSY || 312 &chunk->node == &sh_chan->ld_free)) 313 break; 314 chunk->mark = DESC_SUBMITTED; 315 /* Callback goes to the last chunk */ 316 chunk->async_tx.callback = NULL; 317 chunk->cookie = cookie; 318 list_move_tail(&chunk->node, &sh_chan->ld_queue); 319 last = chunk; 320 } 321 322 last->async_tx.callback = callback; 323 last->async_tx.callback_param = tx->callback_param; 324 325 dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n", 326 tx->cookie, &last->async_tx, sh_chan->id, 327 desc->hw.sar, desc->hw.tcr, desc->hw.dar); 328 329 if (power_up) { 330 sh_chan->pm_state = DMAE_PM_BUSY; 331 332 pm_runtime_get(sh_chan->dev); 333 334 spin_unlock_irq(&sh_chan->desc_lock); 335 336 pm_runtime_barrier(sh_chan->dev); 337 338 spin_lock_irq(&sh_chan->desc_lock); 339 340 /* Have we been reset, while waiting? */ 341 if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) { 342 dev_dbg(sh_chan->dev, "Bring up channel %d\n", 343 sh_chan->id); 344 if (param) { 345 const struct sh_dmae_slave_config *cfg = 346 param->config; 347 348 dmae_set_dmars(sh_chan, cfg->mid_rid); 349 dmae_set_chcr(sh_chan, cfg->chcr); 350 } else { 351 dmae_init(sh_chan); 352 } 353 354 if (sh_chan->pm_state == DMAE_PM_PENDING) 355 sh_chan_xfer_ld_queue(sh_chan); 356 sh_chan->pm_state = DMAE_PM_ESTABLISHED; 357 } 358 } else { 359 sh_chan->pm_state = DMAE_PM_PENDING; 360 } 361 362 spin_unlock_irq(&sh_chan->desc_lock); 363 364 return cookie; 365} 366 367/* Called with desc_lock held */ 368static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) 369{ 370 struct sh_desc *desc; 371 372 list_for_each_entry(desc, &sh_chan->ld_free, node) 373 if (desc->mark != DESC_PREPARED) { 374 BUG_ON(desc->mark != DESC_IDLE); 375 list_del(&desc->node); 376 return desc; 377 } 378 379 return NULL; 380} 381 382static const struct sh_dmae_slave_config *sh_dmae_find_slave( 383 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param) 384{ 385 struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 386 struct sh_dmae_pdata *pdata = shdev->pdata; 387 int i; 388 389 if (param->slave_id >= SH_DMA_SLAVE_NUMBER) 390 return NULL; 391 392 for (i = 0; i < pdata->slave_num; i++) 393 if (pdata->slave[i].slave_id == param->slave_id) 394 return pdata->slave + i; 395 396 return NULL; 397} 398 399static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) 400{ 401 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 402 struct sh_desc *desc; 403 struct sh_dmae_slave *param = chan->private; 404 int ret; 405 406 /* 407 * This relies on the guarantee from dmaengine that alloc_chan_resources 408 * never runs concurrently with itself or free_chan_resources. 409 */ 410 if (param) { 411 const struct sh_dmae_slave_config *cfg; 412 413 cfg = sh_dmae_find_slave(sh_chan, param); 414 if (!cfg) { 415 ret = -EINVAL; 416 goto efindslave; 417 } 418 419 if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) { 420 ret = -EBUSY; 421 goto etestused; 422 } 423 424 param->config = cfg; 425 } 426 427 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { 428 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); 429 if (!desc) 430 break; 431 dma_async_tx_descriptor_init(&desc->async_tx, 432 &sh_chan->common); 433 desc->async_tx.tx_submit = sh_dmae_tx_submit; 434 desc->mark = DESC_IDLE; 435 436 list_add(&desc->node, &sh_chan->ld_free); 437 sh_chan->descs_allocated++; 438 } 439 440 if (!sh_chan->descs_allocated) { 441 ret = -ENOMEM; 442 goto edescalloc; 443 } 444 445 return sh_chan->descs_allocated; 446 447edescalloc: 448 if (param) 449 clear_bit(param->slave_id, sh_dmae_slave_used); 450etestused: 451efindslave: 452 chan->private = NULL; 453 return ret; 454} 455 456/* 457 * sh_dma_free_chan_resources - Free all resources of the channel. 458 */ 459static void sh_dmae_free_chan_resources(struct dma_chan *chan) 460{ 461 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 462 struct sh_desc *desc, *_desc; 463 LIST_HEAD(list); 464 465 /* Protect against ISR */ 466 spin_lock_irq(&sh_chan->desc_lock); 467 dmae_halt(sh_chan); 468 spin_unlock_irq(&sh_chan->desc_lock); 469 470 /* Now no new interrupts will occur */ 471 472 /* Prepared and not submitted descriptors can still be on the queue */ 473 if (!list_empty(&sh_chan->ld_queue)) 474 sh_dmae_chan_ld_cleanup(sh_chan, true); 475 476 if (chan->private) { 477 /* The caller is holding dma_list_mutex */ 478 struct sh_dmae_slave *param = chan->private; 479 clear_bit(param->slave_id, sh_dmae_slave_used); 480 chan->private = NULL; 481 } 482 483 spin_lock_irq(&sh_chan->desc_lock); 484 485 list_splice_init(&sh_chan->ld_free, &list); 486 sh_chan->descs_allocated = 0; 487 488 spin_unlock_irq(&sh_chan->desc_lock); 489 490 list_for_each_entry_safe(desc, _desc, &list, node) 491 kfree(desc); 492} 493 494/** 495 * sh_dmae_add_desc - get, set up and return one transfer descriptor 496 * @sh_chan: DMA channel 497 * @flags: DMA transfer flags 498 * @dest: destination DMA address, incremented when direction equals 499 * DMA_DEV_TO_MEM 500 * @src: source DMA address, incremented when direction equals 501 * DMA_MEM_TO_DEV 502 * @len: DMA transfer length 503 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY 504 * @direction: needed for slave DMA to decide which address to keep constant, 505 * equals DMA_MEM_TO_MEM for MEMCPY 506 * Returns 0 or an error 507 * Locks: called with desc_lock held 508 */ 509static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, 510 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, 511 struct sh_desc **first, enum dma_transfer_direction direction) 512{ 513 struct sh_desc *new; 514 size_t copy_size; 515 516 if (!*len) 517 return NULL; 518 519 /* Allocate the link descriptor from the free list */ 520 new = sh_dmae_get_desc(sh_chan); 521 if (!new) { 522 dev_err(sh_chan->dev, "No free link descriptor available\n"); 523 return NULL; 524 } 525 526 copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1); 527 528 new->hw.sar = *src; 529 new->hw.dar = *dest; 530 new->hw.tcr = copy_size; 531 532 if (!*first) { 533 /* First desc */ 534 new->async_tx.cookie = -EBUSY; 535 *first = new; 536 } else { 537 /* Other desc - invisible to the user */ 538 new->async_tx.cookie = -EINVAL; 539 } 540 541 dev_dbg(sh_chan->dev, 542 "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n", 543 copy_size, *len, *src, *dest, &new->async_tx, 544 new->async_tx.cookie, sh_chan->xmit_shift); 545 546 new->mark = DESC_PREPARED; 547 new->async_tx.flags = flags; 548 new->direction = direction; 549 550 *len -= copy_size; 551 if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) 552 *src += copy_size; 553 if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) 554 *dest += copy_size; 555 556 return new; 557} 558 559/* 560 * sh_dmae_prep_sg - prepare transfer descriptors from an SG list 561 * 562 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also 563 * converted to scatter-gather to guarantee consistent locking and a correct 564 * list manipulation. For slave DMA direction carries the usual meaning, and, 565 * logically, the SG list is RAM and the addr variable contains slave address, 566 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM 567 * and the SG list contains only one element and points at the source buffer. 568 */ 569static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, 570 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, 571 enum dma_transfer_direction direction, unsigned long flags) 572{ 573 struct scatterlist *sg; 574 struct sh_desc *first = NULL, *new = NULL /* compiler... */; 575 LIST_HEAD(tx_list); 576 int chunks = 0; 577 unsigned long irq_flags; 578 int i; 579 580 if (!sg_len) 581 return NULL; 582 583 for_each_sg(sgl, sg, sg_len, i) 584 chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) / 585 (SH_DMA_TCR_MAX + 1); 586 587 /* Have to lock the whole loop to protect against concurrent release */ 588 spin_lock_irqsave(&sh_chan->desc_lock, irq_flags); 589 590 /* 591 * Chaining: 592 * first descriptor is what user is dealing with in all API calls, its 593 * cookie is at first set to -EBUSY, at tx-submit to a positive 594 * number 595 * if more than one chunk is needed further chunks have cookie = -EINVAL 596 * the last chunk, if not equal to the first, has cookie = -ENOSPC 597 * all chunks are linked onto the tx_list head with their .node heads 598 * only during this function, then they are immediately spliced 599 * back onto the free list in form of a chain 600 */ 601 for_each_sg(sgl, sg, sg_len, i) { 602 dma_addr_t sg_addr = sg_dma_address(sg); 603 size_t len = sg_dma_len(sg); 604 605 if (!len) 606 goto err_get_desc; 607 608 do { 609 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", 610 i, sg, len, (unsigned long long)sg_addr); 611 612 if (direction == DMA_DEV_TO_MEM) 613 new = sh_dmae_add_desc(sh_chan, flags, 614 &sg_addr, addr, &len, &first, 615 direction); 616 else 617 new = sh_dmae_add_desc(sh_chan, flags, 618 addr, &sg_addr, &len, &first, 619 direction); 620 if (!new) 621 goto err_get_desc; 622 623 new->chunks = chunks--; 624 list_add_tail(&new->node, &tx_list); 625 } while (len); 626 } 627 628 if (new != first) 629 new->async_tx.cookie = -ENOSPC; 630 631 /* Put them back on the free list, so, they don't get lost */ 632 list_splice_tail(&tx_list, &sh_chan->ld_free); 633 634 spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags); 635 636 return &first->async_tx; 637 638err_get_desc: 639 list_for_each_entry(new, &tx_list, node) 640 new->mark = DESC_IDLE; 641 list_splice(&tx_list, &sh_chan->ld_free); 642 643 spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags); 644 645 return NULL; 646} 647 648static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( 649 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, 650 size_t len, unsigned long flags) 651{ 652 struct sh_dmae_chan *sh_chan; 653 struct scatterlist sg; 654 655 if (!chan || !len) 656 return NULL; 657 658 sh_chan = to_sh_chan(chan); 659 660 sg_init_table(&sg, 1); 661 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, 662 offset_in_page(dma_src)); 663 sg_dma_address(&sg) = dma_src; 664 sg_dma_len(&sg) = len; 665 666 return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, 667 flags); 668} 669 670static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( 671 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 672 enum dma_transfer_direction direction, unsigned long flags, 673 void *context) 674{ 675 struct sh_dmae_slave *param; 676 struct sh_dmae_chan *sh_chan; 677 dma_addr_t slave_addr; 678 679 if (!chan) 680 return NULL; 681 682 sh_chan = to_sh_chan(chan); 683 param = chan->private; 684 685 /* Someone calling slave DMA on a public channel? */ 686 if (!param || !sg_len) { 687 dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n", 688 __func__, param, sg_len, param ? param->slave_id : -1); 689 return NULL; 690 } 691 692 slave_addr = param->config->addr; 693 694 /* 695 * if (param != NULL), this is a successfully requested slave channel, 696 * therefore param->config != NULL too. 697 */ 698 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr, 699 direction, flags); 700} 701 702static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 703 unsigned long arg) 704{ 705 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 706 unsigned long flags; 707 708 /* Only supports DMA_TERMINATE_ALL */ 709 if (cmd != DMA_TERMINATE_ALL) 710 return -ENXIO; 711 712 if (!chan) 713 return -EINVAL; 714 715 spin_lock_irqsave(&sh_chan->desc_lock, flags); 716 dmae_halt(sh_chan); 717 718 if (!list_empty(&sh_chan->ld_queue)) { 719 /* Record partial transfer */ 720 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, 721 struct sh_desc, node); 722 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << 723 sh_chan->xmit_shift; 724 } 725 spin_unlock_irqrestore(&sh_chan->desc_lock, flags); 726 727 sh_dmae_chan_ld_cleanup(sh_chan, true); 728 729 return 0; 730} 731 732static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) 733{ 734 struct sh_desc *desc, *_desc; 735 /* Is the "exposed" head of a chain acked? */ 736 bool head_acked = false; 737 dma_cookie_t cookie = 0; 738 dma_async_tx_callback callback = NULL; 739 void *param = NULL; 740 unsigned long flags; 741 742 spin_lock_irqsave(&sh_chan->desc_lock, flags); 743 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { 744 struct dma_async_tx_descriptor *tx = &desc->async_tx; 745 746 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); 747 BUG_ON(desc->mark != DESC_SUBMITTED && 748 desc->mark != DESC_COMPLETED && 749 desc->mark != DESC_WAITING); 750 751 /* 752 * queue is ordered, and we use this loop to (1) clean up all 753 * completed descriptors, and to (2) update descriptor flags of 754 * any chunks in a (partially) completed chain 755 */ 756 if (!all && desc->mark == DESC_SUBMITTED && 757 desc->cookie != cookie) 758 break; 759 760 if (tx->cookie > 0) 761 cookie = tx->cookie; 762 763 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { 764 if (sh_chan->common.completed_cookie != desc->cookie - 1) 765 dev_dbg(sh_chan->dev, 766 "Completing cookie %d, expected %d\n", 767 desc->cookie, 768 sh_chan->common.completed_cookie + 1); 769 sh_chan->common.completed_cookie = desc->cookie; 770 } 771 772 /* Call callback on the last chunk */ 773 if (desc->mark == DESC_COMPLETED && tx->callback) { 774 desc->mark = DESC_WAITING; 775 callback = tx->callback; 776 param = tx->callback_param; 777 dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n", 778 tx->cookie, tx, sh_chan->id); 779 BUG_ON(desc->chunks != 1); 780 break; 781 } 782 783 if (tx->cookie > 0 || tx->cookie == -EBUSY) { 784 if (desc->mark == DESC_COMPLETED) { 785 BUG_ON(tx->cookie < 0); 786 desc->mark = DESC_WAITING; 787 } 788 head_acked = async_tx_test_ack(tx); 789 } else { 790 switch (desc->mark) { 791 case DESC_COMPLETED: 792 desc->mark = DESC_WAITING; 793 /* Fall through */ 794 case DESC_WAITING: 795 if (head_acked) 796 async_tx_ack(&desc->async_tx); 797 } 798 } 799 800 dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n", 801 tx, tx->cookie); 802 803 if (((desc->mark == DESC_COMPLETED || 804 desc->mark == DESC_WAITING) && 805 async_tx_test_ack(&desc->async_tx)) || all) { 806 /* Remove from ld_queue list */ 807 desc->mark = DESC_IDLE; 808 809 list_move(&desc->node, &sh_chan->ld_free); 810 811 if (list_empty(&sh_chan->ld_queue)) { 812 dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id); 813 pm_runtime_put(sh_chan->dev); 814 } 815 } 816 } 817 818 if (all && !callback) 819 /* 820 * Terminating and the loop completed normally: forgive 821 * uncompleted cookies 822 */ 823 sh_chan->common.completed_cookie = sh_chan->common.cookie; 824 825 spin_unlock_irqrestore(&sh_chan->desc_lock, flags); 826 827 if (callback) 828 callback(param); 829 830 return callback; 831} 832 833/* 834 * sh_chan_ld_cleanup - Clean up link descriptors 835 * 836 * This function cleans up the ld_queue of DMA channel. 837 */ 838static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) 839{ 840 while (__ld_cleanup(sh_chan, all)) 841 ; 842} 843 844/* Called under spin_lock_irq(&sh_chan->desc_lock) */ 845static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) 846{ 847 struct sh_desc *desc; 848 849 /* DMA work check */ 850 if (dmae_is_busy(sh_chan)) 851 return; 852 853 /* Find the first not transferred descriptor */ 854 list_for_each_entry(desc, &sh_chan->ld_queue, node) 855 if (desc->mark == DESC_SUBMITTED) { 856 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n", 857 desc->async_tx.cookie, sh_chan->id, 858 desc->hw.tcr, desc->hw.sar, desc->hw.dar); 859 /* Get the ld start address from ld_queue */ 860 dmae_set_reg(sh_chan, &desc->hw); 861 dmae_start(sh_chan); 862 break; 863 } 864} 865 866static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) 867{ 868 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 869 870 spin_lock_irq(&sh_chan->desc_lock); 871 if (sh_chan->pm_state == DMAE_PM_ESTABLISHED) 872 sh_chan_xfer_ld_queue(sh_chan); 873 else 874 sh_chan->pm_state = DMAE_PM_PENDING; 875 spin_unlock_irq(&sh_chan->desc_lock); 876} 877 878static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, 879 dma_cookie_t cookie, 880 struct dma_tx_state *txstate) 881{ 882 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 883 enum dma_status status; 884 unsigned long flags; 885 886 sh_dmae_chan_ld_cleanup(sh_chan, false); 887 888 spin_lock_irqsave(&sh_chan->desc_lock, flags); 889 890 status = dma_cookie_status(chan, cookie, txstate); 891 892 /* 893 * If we don't find cookie on the queue, it has been aborted and we have 894 * to report error 895 */ 896 if (status != DMA_SUCCESS) { 897 struct sh_desc *desc; 898 status = DMA_ERROR; 899 list_for_each_entry(desc, &sh_chan->ld_queue, node) 900 if (desc->cookie == cookie) { 901 status = DMA_IN_PROGRESS; 902 break; 903 } 904 } 905 906 spin_unlock_irqrestore(&sh_chan->desc_lock, flags); 907 908 return status; 909} 910 911static irqreturn_t sh_dmae_interrupt(int irq, void *data) 912{ 913 irqreturn_t ret = IRQ_NONE; 914 struct sh_dmae_chan *sh_chan = data; 915 u32 chcr; 916 917 spin_lock(&sh_chan->desc_lock); 918 919 chcr = chcr_read(sh_chan); 920 921 if (chcr & CHCR_TE) { 922 /* DMA stop */ 923 dmae_halt(sh_chan); 924 925 ret = IRQ_HANDLED; 926 tasklet_schedule(&sh_chan->tasklet); 927 } 928 929 spin_unlock(&sh_chan->desc_lock); 930 931 return ret; 932} 933 934/* Called from error IRQ or NMI */ 935static bool sh_dmae_reset(struct sh_dmae_device *shdev) 936{ 937 unsigned int handled = 0; 938 int i; 939 940 /* halt the dma controller */ 941 sh_dmae_ctl_stop(shdev); 942 943 /* We cannot detect, which channel caused the error, have to reset all */ 944 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { 945 struct sh_dmae_chan *sh_chan = shdev->chan[i]; 946 struct sh_desc *desc; 947 LIST_HEAD(dl); 948 949 if (!sh_chan) 950 continue; 951 952 spin_lock(&sh_chan->desc_lock); 953 954 /* Stop the channel */ 955 dmae_halt(sh_chan); 956 957 list_splice_init(&sh_chan->ld_queue, &dl); 958 959 if (!list_empty(&dl)) { 960 dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id); 961 pm_runtime_put(sh_chan->dev); 962 } 963 sh_chan->pm_state = DMAE_PM_ESTABLISHED; 964 965 spin_unlock(&sh_chan->desc_lock); 966 967 /* Complete all */ 968 list_for_each_entry(desc, &dl, node) { 969 struct dma_async_tx_descriptor *tx = &desc->async_tx; 970 desc->mark = DESC_IDLE; 971 if (tx->callback) 972 tx->callback(tx->callback_param); 973 } 974 975 spin_lock(&sh_chan->desc_lock); 976 list_splice(&dl, &sh_chan->ld_free); 977 spin_unlock(&sh_chan->desc_lock); 978 979 handled++; 980 } 981 982 sh_dmae_rst(shdev); 983 984 return !!handled; 985} 986 987static irqreturn_t sh_dmae_err(int irq, void *data) 988{ 989 struct sh_dmae_device *shdev = data; 990 991 if (!(dmaor_read(shdev) & DMAOR_AE)) 992 return IRQ_NONE; 993 994 sh_dmae_reset(data); 995 return IRQ_HANDLED; 996} 997 998static void dmae_do_tasklet(unsigned long data) 999{ 1000 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; 1001 struct sh_desc *desc; 1002 u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 1003 u32 dar_buf = sh_dmae_readl(sh_chan, DAR); 1004 1005 spin_lock_irq(&sh_chan->desc_lock); 1006 list_for_each_entry(desc, &sh_chan->ld_queue, node) { 1007 if (desc->mark == DESC_SUBMITTED && 1008 ((desc->direction == DMA_DEV_TO_MEM && 1009 (desc->hw.dar + desc->hw.tcr) == dar_buf) || 1010 (desc->hw.sar + desc->hw.tcr) == sar_buf)) { 1011 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", 1012 desc->async_tx.cookie, &desc->async_tx, 1013 desc->hw.dar); 1014 desc->mark = DESC_COMPLETED; 1015 break; 1016 } 1017 } 1018 /* Next desc */ 1019 sh_chan_xfer_ld_queue(sh_chan); 1020 spin_unlock_irq(&sh_chan->desc_lock); 1021 1022 sh_dmae_chan_ld_cleanup(sh_chan, false); 1023} 1024 1025static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) 1026{ 1027 /* Fast path out if NMIF is not asserted for this controller */ 1028 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) 1029 return false; 1030 1031 return sh_dmae_reset(shdev); 1032} 1033 1034static int sh_dmae_nmi_handler(struct notifier_block *self, 1035 unsigned long cmd, void *data) 1036{ 1037 struct sh_dmae_device *shdev; 1038 int ret = NOTIFY_DONE; 1039 bool triggered; 1040 1041 /* 1042 * Only concern ourselves with NMI events. 1043 * 1044 * Normally we would check the die chain value, but as this needs 1045 * to be architecture independent, check for NMI context instead. 1046 */ 1047 if (!in_nmi()) 1048 return NOTIFY_DONE; 1049 1050 rcu_read_lock(); 1051 list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { 1052 /* 1053 * Only stop if one of the controllers has NMIF asserted, 1054 * we do not want to interfere with regular address error 1055 * handling or NMI events that don't concern the DMACs. 1056 */ 1057 triggered = sh_dmae_nmi_notify(shdev); 1058 if (triggered == true) 1059 ret = NOTIFY_OK; 1060 } 1061 rcu_read_unlock(); 1062 1063 return ret; 1064} 1065 1066static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { 1067 .notifier_call = sh_dmae_nmi_handler, 1068 1069 /* Run before NMI debug handler and KGDB */ 1070 .priority = 1, 1071}; 1072 1073static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, 1074 int irq, unsigned long flags) 1075{ 1076 int err; 1077 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; 1078 struct platform_device *pdev = to_platform_device(shdev->common.dev); 1079 struct sh_dmae_chan *new_sh_chan; 1080 1081 /* alloc channel */ 1082 new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); 1083 if (!new_sh_chan) { 1084 dev_err(shdev->common.dev, 1085 "No free memory for allocating dma channels!\n"); 1086 return -ENOMEM; 1087 } 1088 1089 new_sh_chan->pm_state = DMAE_PM_ESTABLISHED; 1090 1091 /* reference struct dma_device */ 1092 new_sh_chan->common.device = &shdev->common; 1093 dma_cookie_init(&new_sh_chan->common); 1094 1095 new_sh_chan->dev = shdev->common.dev; 1096 new_sh_chan->id = id; 1097 new_sh_chan->irq = irq; 1098 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); 1099 1100 /* Init DMA tasklet */ 1101 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, 1102 (unsigned long)new_sh_chan); 1103 1104 spin_lock_init(&new_sh_chan->desc_lock); 1105 1106 /* Init descripter manage list */ 1107 INIT_LIST_HEAD(&new_sh_chan->ld_queue); 1108 INIT_LIST_HEAD(&new_sh_chan->ld_free); 1109 1110 /* Add the channel to DMA device channel list */ 1111 list_add_tail(&new_sh_chan->common.device_node, 1112 &shdev->common.channels); 1113 shdev->common.chancnt++; 1114 1115 if (pdev->id >= 0) 1116 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), 1117 "sh-dmae%d.%d", pdev->id, new_sh_chan->id); 1118 else 1119 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), 1120 "sh-dma%d", new_sh_chan->id); 1121 1122 /* set up channel irq */ 1123 err = request_irq(irq, &sh_dmae_interrupt, flags, 1124 new_sh_chan->dev_id, new_sh_chan); 1125 if (err) { 1126 dev_err(shdev->common.dev, "DMA channel %d request_irq error " 1127 "with return %d\n", id, err); 1128 goto err_no_irq; 1129 } 1130 1131 shdev->chan[id] = new_sh_chan; 1132 return 0; 1133 1134err_no_irq: 1135 /* remove from dmaengine device node */ 1136 list_del(&new_sh_chan->common.device_node); 1137 kfree(new_sh_chan); 1138 return err; 1139} 1140 1141static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) 1142{ 1143 int i; 1144 1145 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { 1146 if (shdev->chan[i]) { 1147 struct sh_dmae_chan *sh_chan = shdev->chan[i]; 1148 1149 free_irq(sh_chan->irq, sh_chan); 1150 1151 list_del(&sh_chan->common.device_node); 1152 kfree(sh_chan); 1153 shdev->chan[i] = NULL; 1154 } 1155 } 1156 shdev->common.chancnt = 0; 1157} 1158 1159static int __init sh_dmae_probe(struct platform_device *pdev) 1160{ 1161 struct sh_dmae_pdata *pdata = pdev->dev.platform_data; 1162 unsigned long irqflags = IRQF_DISABLED, 1163 chan_flag[SH_DMAC_MAX_CHANNELS] = {}; 1164 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; 1165 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; 1166 struct sh_dmae_device *shdev; 1167 struct resource *chan, *dmars, *errirq_res, *chanirq_res; 1168 1169 /* get platform data */ 1170 if (!pdata || !pdata->channel_num) 1171 return -ENODEV; 1172 1173 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1174 /* DMARS area is optional */ 1175 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1176 /* 1177 * IRQ resources: 1178 * 1. there always must be at least one IRQ IO-resource. On SH4 it is 1179 * the error IRQ, in which case it is the only IRQ in this resource: 1180 * start == end. If it is the only IRQ resource, all channels also 1181 * use the same IRQ. 1182 * 2. DMA channel IRQ resources can be specified one per resource or in 1183 * ranges (start != end) 1184 * 3. iff all events (channels and, optionally, error) on this 1185 * controller use the same IRQ, only one IRQ resource can be 1186 * specified, otherwise there must be one IRQ per channel, even if 1187 * some of them are equal 1188 * 4. if all IRQs on this controller are equal or if some specific IRQs 1189 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be 1190 * requested with the IRQF_SHARED flag 1191 */ 1192 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1193 if (!chan || !errirq_res) 1194 return -ENODEV; 1195 1196 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { 1197 dev_err(&pdev->dev, "DMAC register region already claimed\n"); 1198 return -EBUSY; 1199 } 1200 1201 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { 1202 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); 1203 err = -EBUSY; 1204 goto ermrdmars; 1205 } 1206 1207 err = -ENOMEM; 1208 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); 1209 if (!shdev) { 1210 dev_err(&pdev->dev, "Not enough memory\n"); 1211 goto ealloc; 1212 } 1213 1214 shdev->chan_reg = ioremap(chan->start, resource_size(chan)); 1215 if (!shdev->chan_reg) 1216 goto emapchan; 1217 if (dmars) { 1218 shdev->dmars = ioremap(dmars->start, resource_size(dmars)); 1219 if (!shdev->dmars) 1220 goto emapdmars; 1221 } 1222 1223 /* platform data */ 1224 shdev->pdata = pdata; 1225 1226 if (pdata->chcr_offset) 1227 shdev->chcr_offset = pdata->chcr_offset; 1228 else 1229 shdev->chcr_offset = CHCR; 1230 1231 if (pdata->chcr_ie_bit) 1232 shdev->chcr_ie_bit = pdata->chcr_ie_bit; 1233 else 1234 shdev->chcr_ie_bit = CHCR_IE; 1235 1236 platform_set_drvdata(pdev, shdev); 1237 1238 shdev->common.dev = &pdev->dev; 1239 1240 pm_runtime_enable(&pdev->dev); 1241 pm_runtime_get_sync(&pdev->dev); 1242 1243 spin_lock_irq(&sh_dmae_lock); 1244 list_add_tail_rcu(&shdev->node, &sh_dmae_devices); 1245 spin_unlock_irq(&sh_dmae_lock); 1246 1247 /* reset dma controller - only needed as a test */ 1248 err = sh_dmae_rst(shdev); 1249 if (err) 1250 goto rst_err; 1251 1252 INIT_LIST_HEAD(&shdev->common.channels); 1253 1254 if (!pdata->slave_only) 1255 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); 1256 if (pdata->slave && pdata->slave_num) 1257 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); 1258 1259 shdev->common.device_alloc_chan_resources 1260 = sh_dmae_alloc_chan_resources; 1261 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; 1262 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; 1263 shdev->common.device_tx_status = sh_dmae_tx_status; 1264 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; 1265 1266 /* Compulsory for DMA_SLAVE fields */ 1267 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; 1268 shdev->common.device_control = sh_dmae_control; 1269 1270 /* Default transfer size of 32 bytes requires 32-byte alignment */ 1271 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; 1272 1273#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 1274 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 1275 1276 if (!chanirq_res) 1277 chanirq_res = errirq_res; 1278 else 1279 irqres++; 1280 1281 if (chanirq_res == errirq_res || 1282 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) 1283 irqflags = IRQF_SHARED; 1284 1285 errirq = errirq_res->start; 1286 1287 err = request_irq(errirq, sh_dmae_err, irqflags, 1288 "DMAC Address Error", shdev); 1289 if (err) { 1290 dev_err(&pdev->dev, 1291 "DMA failed requesting irq #%d, error %d\n", 1292 errirq, err); 1293 goto eirq_err; 1294 } 1295 1296#else 1297 chanirq_res = errirq_res; 1298#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */ 1299 1300 if (chanirq_res->start == chanirq_res->end && 1301 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { 1302 /* Special case - all multiplexed */ 1303 for (; irq_cnt < pdata->channel_num; irq_cnt++) { 1304 if (irq_cnt < SH_DMAC_MAX_CHANNELS) { 1305 chan_irq[irq_cnt] = chanirq_res->start; 1306 chan_flag[irq_cnt] = IRQF_SHARED; 1307 } else { 1308 irq_cap = 1; 1309 break; 1310 } 1311 } 1312 } else { 1313 do { 1314 for (i = chanirq_res->start; i <= chanirq_res->end; i++) { 1315 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) { 1316 irq_cap = 1; 1317 break; 1318 } 1319 1320 if ((errirq_res->flags & IORESOURCE_BITS) == 1321 IORESOURCE_IRQ_SHAREABLE) 1322 chan_flag[irq_cnt] = IRQF_SHARED; 1323 else 1324 chan_flag[irq_cnt] = IRQF_DISABLED; 1325 dev_dbg(&pdev->dev, 1326 "Found IRQ %d for channel %d\n", 1327 i, irq_cnt); 1328 chan_irq[irq_cnt++] = i; 1329 } 1330 1331 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) 1332 break; 1333 1334 chanirq_res = platform_get_resource(pdev, 1335 IORESOURCE_IRQ, ++irqres); 1336 } while (irq_cnt < pdata->channel_num && chanirq_res); 1337 } 1338 1339 /* Create DMA Channel */ 1340 for (i = 0; i < irq_cnt; i++) { 1341 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); 1342 if (err) 1343 goto chan_probe_err; 1344 } 1345 1346 if (irq_cap) 1347 dev_notice(&pdev->dev, "Attempting to register %d DMA " 1348 "channels when a maximum of %d are supported.\n", 1349 pdata->channel_num, SH_DMAC_MAX_CHANNELS); 1350 1351 pm_runtime_put(&pdev->dev); 1352 1353 dma_async_device_register(&shdev->common); 1354 1355 return err; 1356 1357chan_probe_err: 1358 sh_dmae_chan_remove(shdev); 1359 1360#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 1361 free_irq(errirq, shdev); 1362eirq_err: 1363#endif 1364rst_err: 1365 spin_lock_irq(&sh_dmae_lock); 1366 list_del_rcu(&shdev->node); 1367 spin_unlock_irq(&sh_dmae_lock); 1368 1369 pm_runtime_put(&pdev->dev); 1370 pm_runtime_disable(&pdev->dev); 1371 1372 if (dmars) 1373 iounmap(shdev->dmars); 1374 1375 platform_set_drvdata(pdev, NULL); 1376emapdmars: 1377 iounmap(shdev->chan_reg); 1378 synchronize_rcu(); 1379emapchan: 1380 kfree(shdev); 1381ealloc: 1382 if (dmars) 1383 release_mem_region(dmars->start, resource_size(dmars)); 1384ermrdmars: 1385 release_mem_region(chan->start, resource_size(chan)); 1386 1387 return err; 1388} 1389 1390static int __exit sh_dmae_remove(struct platform_device *pdev) 1391{ 1392 struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 1393 struct resource *res; 1394 int errirq = platform_get_irq(pdev, 0); 1395 1396 dma_async_device_unregister(&shdev->common); 1397 1398 if (errirq > 0) 1399 free_irq(errirq, shdev); 1400 1401 spin_lock_irq(&sh_dmae_lock); 1402 list_del_rcu(&shdev->node); 1403 spin_unlock_irq(&sh_dmae_lock); 1404 1405 /* channel data remove */ 1406 sh_dmae_chan_remove(shdev); 1407 1408 pm_runtime_disable(&pdev->dev); 1409 1410 if (shdev->dmars) 1411 iounmap(shdev->dmars); 1412 iounmap(shdev->chan_reg); 1413 1414 platform_set_drvdata(pdev, NULL); 1415 1416 synchronize_rcu(); 1417 kfree(shdev); 1418 1419 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1420 if (res) 1421 release_mem_region(res->start, resource_size(res)); 1422 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1423 if (res) 1424 release_mem_region(res->start, resource_size(res)); 1425 1426 return 0; 1427} 1428 1429static void sh_dmae_shutdown(struct platform_device *pdev) 1430{ 1431 struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 1432 sh_dmae_ctl_stop(shdev); 1433} 1434 1435static int sh_dmae_runtime_suspend(struct device *dev) 1436{ 1437 return 0; 1438} 1439 1440static int sh_dmae_runtime_resume(struct device *dev) 1441{ 1442 struct sh_dmae_device *shdev = dev_get_drvdata(dev); 1443 1444 return sh_dmae_rst(shdev); 1445} 1446 1447#ifdef CONFIG_PM 1448static int sh_dmae_suspend(struct device *dev) 1449{ 1450 return 0; 1451} 1452 1453static int sh_dmae_resume(struct device *dev) 1454{ 1455 struct sh_dmae_device *shdev = dev_get_drvdata(dev); 1456 int i, ret; 1457 1458 ret = sh_dmae_rst(shdev); 1459 if (ret < 0) 1460 dev_err(dev, "Failed to reset!\n"); 1461 1462 for (i = 0; i < shdev->pdata->channel_num; i++) { 1463 struct sh_dmae_chan *sh_chan = shdev->chan[i]; 1464 struct sh_dmae_slave *param = sh_chan->common.private; 1465 1466 if (!sh_chan->descs_allocated) 1467 continue; 1468 1469 if (param) { 1470 const struct sh_dmae_slave_config *cfg = param->config; 1471 dmae_set_dmars(sh_chan, cfg->mid_rid); 1472 dmae_set_chcr(sh_chan, cfg->chcr); 1473 } else { 1474 dmae_init(sh_chan); 1475 } 1476 } 1477 1478 return 0; 1479} 1480#else 1481#define sh_dmae_suspend NULL 1482#define sh_dmae_resume NULL 1483#endif 1484 1485const struct dev_pm_ops sh_dmae_pm = { 1486 .suspend = sh_dmae_suspend, 1487 .resume = sh_dmae_resume, 1488 .runtime_suspend = sh_dmae_runtime_suspend, 1489 .runtime_resume = sh_dmae_runtime_resume, 1490}; 1491 1492static struct platform_driver sh_dmae_driver = { 1493 .remove = __exit_p(sh_dmae_remove), 1494 .shutdown = sh_dmae_shutdown, 1495 .driver = { 1496 .owner = THIS_MODULE, 1497 .name = "sh-dma-engine", 1498 .pm = &sh_dmae_pm, 1499 }, 1500}; 1501 1502static int __init sh_dmae_init(void) 1503{ 1504 /* Wire up NMI handling */ 1505 int err = register_die_notifier(&sh_dmae_nmi_notifier); 1506 if (err) 1507 return err; 1508 1509 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); 1510} 1511module_init(sh_dmae_init); 1512 1513static void __exit sh_dmae_exit(void) 1514{ 1515 platform_driver_unregister(&sh_dmae_driver); 1516 1517 unregister_die_notifier(&sh_dmae_nmi_notifier); 1518} 1519module_exit(sh_dmae_exit); 1520 1521MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); 1522MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); 1523MODULE_LICENSE("GPL"); 1524MODULE_ALIAS("platform:sh-dma-engine");