Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.11 974 lines 25 kB view raw
1/* 2 * Renesas SuperH DMA Engine support 3 * 4 * base is drivers/dma/flsdma.c 5 * 6 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> 7 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> 8 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. 9 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 10 * 11 * This is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2 of the License, or 14 * (at your option) any later version. 15 * 16 * - DMA of SuperH does not have Hardware DMA chain mode. 17 * - MAX DMA size is 16MB. 18 * 19 */ 20 21#include <linux/init.h> 22#include <linux/module.h> 23#include <linux/slab.h> 24#include <linux/interrupt.h> 25#include <linux/dmaengine.h> 26#include <linux/delay.h> 27#include <linux/platform_device.h> 28#include <linux/pm_runtime.h> 29#include <linux/sh_dma.h> 30#include <linux/notifier.h> 31#include <linux/kdebug.h> 32#include <linux/spinlock.h> 33#include <linux/rculist.h> 34 35#include "../dmaengine.h" 36#include "shdma.h" 37 38#define SH_DMAE_DRV_NAME "sh-dma-engine" 39 40/* Default MEMCPY transfer size = 2^2 = 4 bytes */ 41#define LOG2_DEFAULT_XFER_SIZE 2 42#define SH_DMA_SLAVE_NUMBER 256 43#define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1) 44 45/* 46 * Used for write-side mutual exclusion for the global device list, 47 * read-side synchronization by way of RCU, and per-controller data. 48 */ 49static DEFINE_SPINLOCK(sh_dmae_lock); 50static LIST_HEAD(sh_dmae_devices); 51 52static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data) 53{ 54 struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 55 56 __raw_writel(data, shdev->chan_reg + 57 shdev->pdata->channel[sh_dc->shdma_chan.id].chclr_offset); 58} 59 60static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 61{ 62 __raw_writel(data, sh_dc->base + reg / sizeof(u32)); 63} 64 65static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) 66{ 67 return __raw_readl(sh_dc->base + reg / sizeof(u32)); 68} 69 70static u16 dmaor_read(struct sh_dmae_device *shdev) 71{ 72 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); 73 74 if (shdev->pdata->dmaor_is_32bit) 75 return __raw_readl(addr); 76 else 77 return __raw_readw(addr); 78} 79 80static void dmaor_write(struct sh_dmae_device *shdev, u16 data) 81{ 82 u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); 83 84 if (shdev->pdata->dmaor_is_32bit) 85 __raw_writel(data, addr); 86 else 87 __raw_writew(data, addr); 88} 89 90static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) 91{ 92 struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 93 94 __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32)); 95} 96 97static u32 chcr_read(struct sh_dmae_chan *sh_dc) 98{ 99 struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 100 101 return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32)); 102} 103 104/* 105 * Reset DMA controller 106 * 107 * SH7780 has two DMAOR register 108 */ 109static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) 110{ 111 unsigned short dmaor; 112 unsigned long flags; 113 114 spin_lock_irqsave(&sh_dmae_lock, flags); 115 116 dmaor = dmaor_read(shdev); 117 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); 118 119 spin_unlock_irqrestore(&sh_dmae_lock, flags); 120} 121 122static int sh_dmae_rst(struct sh_dmae_device *shdev) 123{ 124 unsigned short dmaor; 125 unsigned long flags; 126 127 spin_lock_irqsave(&sh_dmae_lock, flags); 128 129 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); 130 131 if (shdev->pdata->chclr_present) { 132 int i; 133 for (i = 0; i < shdev->pdata->channel_num; i++) { 134 struct sh_dmae_chan *sh_chan = shdev->chan[i]; 135 if (sh_chan) 136 chclr_write(sh_chan, 0); 137 } 138 } 139 140 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); 141 142 dmaor = dmaor_read(shdev); 143 144 spin_unlock_irqrestore(&sh_dmae_lock, flags); 145 146 if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { 147 dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n"); 148 return -EIO; 149 } 150 if (shdev->pdata->dmaor_init & ~dmaor) 151 dev_warn(shdev->shdma_dev.dma_dev.dev, 152 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", 153 dmaor, shdev->pdata->dmaor_init); 154 return 0; 155} 156 157static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) 158{ 159 u32 chcr = chcr_read(sh_chan); 160 161 if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) 162 return true; /* working */ 163 164 return false; /* waiting */ 165} 166 167static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) 168{ 169 struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 170 struct sh_dmae_pdata *pdata = shdev->pdata; 171 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | 172 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); 173 174 if (cnt >= pdata->ts_shift_num) 175 cnt = 0; 176 177 return pdata->ts_shift[cnt]; 178} 179 180static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) 181{ 182 struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 183 struct sh_dmae_pdata *pdata = shdev->pdata; 184 int i; 185 186 for (i = 0; i < pdata->ts_shift_num; i++) 187 if (pdata->ts_shift[i] == l2size) 188 break; 189 190 if (i == pdata->ts_shift_num) 191 i = 0; 192 193 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | 194 ((i << pdata->ts_high_shift) & pdata->ts_high_mask); 195} 196 197static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) 198{ 199 sh_dmae_writel(sh_chan, hw->sar, SAR); 200 sh_dmae_writel(sh_chan, hw->dar, DAR); 201 sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); 202} 203 204static void dmae_start(struct sh_dmae_chan *sh_chan) 205{ 206 struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 207 u32 chcr = chcr_read(sh_chan); 208 209 if (shdev->pdata->needs_tend_set) 210 sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); 211 212 chcr |= CHCR_DE | shdev->chcr_ie_bit; 213 chcr_write(sh_chan, chcr & ~CHCR_TE); 214} 215 216static void dmae_init(struct sh_dmae_chan *sh_chan) 217{ 218 /* 219 * Default configuration for dual address memory-memory transfer. 220 * 0x400 represents auto-request. 221 */ 222 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, 223 LOG2_DEFAULT_XFER_SIZE); 224 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); 225 chcr_write(sh_chan, chcr); 226} 227 228static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) 229{ 230 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ 231 if (dmae_is_busy(sh_chan)) 232 return -EBUSY; 233 234 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); 235 chcr_write(sh_chan, val); 236 237 return 0; 238} 239 240static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) 241{ 242 struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 243 struct sh_dmae_pdata *pdata = shdev->pdata; 244 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id]; 245 u16 __iomem *addr = shdev->dmars; 246 unsigned int shift = chan_pdata->dmars_bit; 247 248 if (dmae_is_busy(sh_chan)) 249 return -EBUSY; 250 251 if (pdata->no_dmars) 252 return 0; 253 254 /* in the case of a missing DMARS resource use first memory window */ 255 if (!addr) 256 addr = (u16 __iomem *)shdev->chan_reg; 257 addr += chan_pdata->dmars / sizeof(u16); 258 259 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), 260 addr); 261 262 return 0; 263} 264 265static void sh_dmae_start_xfer(struct shdma_chan *schan, 266 struct shdma_desc *sdesc) 267{ 268 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 269 shdma_chan); 270 struct sh_dmae_desc *sh_desc = container_of(sdesc, 271 struct sh_dmae_desc, shdma_desc); 272 dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n", 273 sdesc->async_tx.cookie, sh_chan->shdma_chan.id, 274 sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar); 275 /* Get the ld start address from ld_queue */ 276 dmae_set_reg(sh_chan, &sh_desc->hw); 277 dmae_start(sh_chan); 278} 279 280static bool sh_dmae_channel_busy(struct shdma_chan *schan) 281{ 282 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 283 shdma_chan); 284 return dmae_is_busy(sh_chan); 285} 286 287static void sh_dmae_setup_xfer(struct shdma_chan *schan, 288 int slave_id) 289{ 290 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 291 shdma_chan); 292 293 if (slave_id >= 0) { 294 const struct sh_dmae_slave_config *cfg = 295 sh_chan->config; 296 297 dmae_set_dmars(sh_chan, cfg->mid_rid); 298 dmae_set_chcr(sh_chan, cfg->chcr); 299 } else { 300 dmae_init(sh_chan); 301 } 302} 303 304/* 305 * Find a slave channel configuration from the contoller list by either a slave 306 * ID in the non-DT case, or by a MID/RID value in the DT case 307 */ 308static const struct sh_dmae_slave_config *dmae_find_slave( 309 struct sh_dmae_chan *sh_chan, int match) 310{ 311 struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 312 struct sh_dmae_pdata *pdata = shdev->pdata; 313 const struct sh_dmae_slave_config *cfg; 314 int i; 315 316 if (!sh_chan->shdma_chan.dev->of_node) { 317 if (match >= SH_DMA_SLAVE_NUMBER) 318 return NULL; 319 320 for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) 321 if (cfg->slave_id == match) 322 return cfg; 323 } else { 324 for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) 325 if (cfg->mid_rid == match) { 326 sh_chan->shdma_chan.slave_id = cfg->slave_id; 327 return cfg; 328 } 329 } 330 331 return NULL; 332} 333 334static int sh_dmae_set_slave(struct shdma_chan *schan, 335 int slave_id, bool try) 336{ 337 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 338 shdma_chan); 339 const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id); 340 if (!cfg) 341 return -ENXIO; 342 343 if (!try) 344 sh_chan->config = cfg; 345 346 return 0; 347} 348 349static void dmae_halt(struct sh_dmae_chan *sh_chan) 350{ 351 struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 352 u32 chcr = chcr_read(sh_chan); 353 354 chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); 355 chcr_write(sh_chan, chcr); 356} 357 358static int sh_dmae_desc_setup(struct shdma_chan *schan, 359 struct shdma_desc *sdesc, 360 dma_addr_t src, dma_addr_t dst, size_t *len) 361{ 362 struct sh_dmae_desc *sh_desc = container_of(sdesc, 363 struct sh_dmae_desc, shdma_desc); 364 365 if (*len > schan->max_xfer_len) 366 *len = schan->max_xfer_len; 367 368 sh_desc->hw.sar = src; 369 sh_desc->hw.dar = dst; 370 sh_desc->hw.tcr = *len; 371 372 return 0; 373} 374 375static void sh_dmae_halt(struct shdma_chan *schan) 376{ 377 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 378 shdma_chan); 379 dmae_halt(sh_chan); 380} 381 382static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) 383{ 384 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 385 shdma_chan); 386 387 if (!(chcr_read(sh_chan) & CHCR_TE)) 388 return false; 389 390 /* DMA stop */ 391 dmae_halt(sh_chan); 392 393 return true; 394} 395 396static size_t sh_dmae_get_partial(struct shdma_chan *schan, 397 struct shdma_desc *sdesc) 398{ 399 struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 400 shdma_chan); 401 struct sh_dmae_desc *sh_desc = container_of(sdesc, 402 struct sh_dmae_desc, shdma_desc); 403 return sh_desc->hw.tcr - 404 (sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift); 405} 406 407/* Called from error IRQ or NMI */ 408static bool sh_dmae_reset(struct sh_dmae_device *shdev) 409{ 410 bool ret; 411 412 /* halt the dma controller */ 413 sh_dmae_ctl_stop(shdev); 414 415 /* We cannot detect, which channel caused the error, have to reset all */ 416 ret = shdma_reset(&shdev->shdma_dev); 417 418 sh_dmae_rst(shdev); 419 420 return ret; 421} 422 423static irqreturn_t sh_dmae_err(int irq, void *data) 424{ 425 struct sh_dmae_device *shdev = data; 426 427 if (!(dmaor_read(shdev) & DMAOR_AE)) 428 return IRQ_NONE; 429 430 sh_dmae_reset(shdev); 431 return IRQ_HANDLED; 432} 433 434static bool sh_dmae_desc_completed(struct shdma_chan *schan, 435 struct shdma_desc *sdesc) 436{ 437 struct sh_dmae_chan *sh_chan = container_of(schan, 438 struct sh_dmae_chan, shdma_chan); 439 struct sh_dmae_desc *sh_desc = container_of(sdesc, 440 struct sh_dmae_desc, shdma_desc); 441 u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 442 u32 dar_buf = sh_dmae_readl(sh_chan, DAR); 443 444 return (sdesc->direction == DMA_DEV_TO_MEM && 445 (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) || 446 (sdesc->direction != DMA_DEV_TO_MEM && 447 (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf); 448} 449 450static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) 451{ 452 /* Fast path out if NMIF is not asserted for this controller */ 453 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) 454 return false; 455 456 return sh_dmae_reset(shdev); 457} 458 459static int sh_dmae_nmi_handler(struct notifier_block *self, 460 unsigned long cmd, void *data) 461{ 462 struct sh_dmae_device *shdev; 463 int ret = NOTIFY_DONE; 464 bool triggered; 465 466 /* 467 * Only concern ourselves with NMI events. 468 * 469 * Normally we would check the die chain value, but as this needs 470 * to be architecture independent, check for NMI context instead. 471 */ 472 if (!in_nmi()) 473 return NOTIFY_DONE; 474 475 rcu_read_lock(); 476 list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { 477 /* 478 * Only stop if one of the controllers has NMIF asserted, 479 * we do not want to interfere with regular address error 480 * handling or NMI events that don't concern the DMACs. 481 */ 482 triggered = sh_dmae_nmi_notify(shdev); 483 if (triggered == true) 484 ret = NOTIFY_OK; 485 } 486 rcu_read_unlock(); 487 488 return ret; 489} 490 491static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { 492 .notifier_call = sh_dmae_nmi_handler, 493 494 /* Run before NMI debug handler and KGDB */ 495 .priority = 1, 496}; 497 498static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, 499 int irq, unsigned long flags) 500{ 501 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; 502 struct shdma_dev *sdev = &shdev->shdma_dev; 503 struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); 504 struct sh_dmae_chan *sh_chan; 505 struct shdma_chan *schan; 506 int err; 507 508 sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); 509 if (!sh_chan) { 510 dev_err(sdev->dma_dev.dev, 511 "No free memory for allocating dma channels!\n"); 512 return -ENOMEM; 513 } 514 515 schan = &sh_chan->shdma_chan; 516 schan->max_xfer_len = SH_DMA_TCR_MAX + 1; 517 518 shdma_chan_probe(sdev, schan, id); 519 520 sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); 521 522 /* set up channel irq */ 523 if (pdev->id >= 0) 524 snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), 525 "sh-dmae%d.%d", pdev->id, id); 526 else 527 snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), 528 "sh-dma%d", id); 529 530 err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id); 531 if (err) { 532 dev_err(sdev->dma_dev.dev, 533 "DMA channel %d request_irq error %d\n", 534 id, err); 535 goto err_no_irq; 536 } 537 538 shdev->chan[id] = sh_chan; 539 return 0; 540 541err_no_irq: 542 /* remove from dmaengine device node */ 543 shdma_chan_remove(schan); 544 kfree(sh_chan); 545 return err; 546} 547 548static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) 549{ 550 struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; 551 struct shdma_chan *schan; 552 int i; 553 554 shdma_for_each_chan(schan, &shdev->shdma_dev, i) { 555 struct sh_dmae_chan *sh_chan = container_of(schan, 556 struct sh_dmae_chan, shdma_chan); 557 BUG_ON(!schan); 558 559 shdma_free_irq(&sh_chan->shdma_chan); 560 561 shdma_chan_remove(schan); 562 kfree(sh_chan); 563 } 564 dma_dev->chancnt = 0; 565} 566 567static void sh_dmae_shutdown(struct platform_device *pdev) 568{ 569 struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 570 sh_dmae_ctl_stop(shdev); 571} 572 573static int sh_dmae_runtime_suspend(struct device *dev) 574{ 575 return 0; 576} 577 578static int sh_dmae_runtime_resume(struct device *dev) 579{ 580 struct sh_dmae_device *shdev = dev_get_drvdata(dev); 581 582 return sh_dmae_rst(shdev); 583} 584 585#ifdef CONFIG_PM 586static int sh_dmae_suspend(struct device *dev) 587{ 588 return 0; 589} 590 591static int sh_dmae_resume(struct device *dev) 592{ 593 struct sh_dmae_device *shdev = dev_get_drvdata(dev); 594 int i, ret; 595 596 ret = sh_dmae_rst(shdev); 597 if (ret < 0) 598 dev_err(dev, "Failed to reset!\n"); 599 600 for (i = 0; i < shdev->pdata->channel_num; i++) { 601 struct sh_dmae_chan *sh_chan = shdev->chan[i]; 602 603 if (!sh_chan->shdma_chan.desc_num) 604 continue; 605 606 if (sh_chan->shdma_chan.slave_id >= 0) { 607 const struct sh_dmae_slave_config *cfg = sh_chan->config; 608 dmae_set_dmars(sh_chan, cfg->mid_rid); 609 dmae_set_chcr(sh_chan, cfg->chcr); 610 } else { 611 dmae_init(sh_chan); 612 } 613 } 614 615 return 0; 616} 617#else 618#define sh_dmae_suspend NULL 619#define sh_dmae_resume NULL 620#endif 621 622const struct dev_pm_ops sh_dmae_pm = { 623 .suspend = sh_dmae_suspend, 624 .resume = sh_dmae_resume, 625 .runtime_suspend = sh_dmae_runtime_suspend, 626 .runtime_resume = sh_dmae_runtime_resume, 627}; 628 629static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan) 630{ 631 struct sh_dmae_chan *sh_chan = container_of(schan, 632 struct sh_dmae_chan, shdma_chan); 633 634 /* 635 * Implicit BUG_ON(!sh_chan->config) 636 * This is an exclusive slave DMA operation, may only be called after a 637 * successful slave configuration. 638 */ 639 return sh_chan->config->addr; 640} 641 642static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i) 643{ 644 return &((struct sh_dmae_desc *)buf)[i].shdma_desc; 645} 646 647static const struct shdma_ops sh_dmae_shdma_ops = { 648 .desc_completed = sh_dmae_desc_completed, 649 .halt_channel = sh_dmae_halt, 650 .channel_busy = sh_dmae_channel_busy, 651 .slave_addr = sh_dmae_slave_addr, 652 .desc_setup = sh_dmae_desc_setup, 653 .set_slave = sh_dmae_set_slave, 654 .setup_xfer = sh_dmae_setup_xfer, 655 .start_xfer = sh_dmae_start_xfer, 656 .embedded_desc = sh_dmae_embedded_desc, 657 .chan_irq = sh_dmae_chan_irq, 658 .get_partial = sh_dmae_get_partial, 659}; 660 661static int sh_dmae_probe(struct platform_device *pdev) 662{ 663 struct sh_dmae_pdata *pdata = pdev->dev.platform_data; 664 unsigned long irqflags = IRQF_DISABLED, 665 chan_flag[SH_DMAE_MAX_CHANNELS] = {}; 666 int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; 667 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; 668 struct sh_dmae_device *shdev; 669 struct dma_device *dma_dev; 670 struct resource *chan, *dmars, *errirq_res, *chanirq_res; 671 672 /* get platform data */ 673 if (!pdata || !pdata->channel_num) 674 return -ENODEV; 675 676 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); 677 /* DMARS area is optional */ 678 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); 679 /* 680 * IRQ resources: 681 * 1. there always must be at least one IRQ IO-resource. On SH4 it is 682 * the error IRQ, in which case it is the only IRQ in this resource: 683 * start == end. If it is the only IRQ resource, all channels also 684 * use the same IRQ. 685 * 2. DMA channel IRQ resources can be specified one per resource or in 686 * ranges (start != end) 687 * 3. iff all events (channels and, optionally, error) on this 688 * controller use the same IRQ, only one IRQ resource can be 689 * specified, otherwise there must be one IRQ per channel, even if 690 * some of them are equal 691 * 4. if all IRQs on this controller are equal or if some specific IRQs 692 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be 693 * requested with the IRQF_SHARED flag 694 */ 695 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 696 if (!chan || !errirq_res) 697 return -ENODEV; 698 699 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { 700 dev_err(&pdev->dev, "DMAC register region already claimed\n"); 701 return -EBUSY; 702 } 703 704 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { 705 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); 706 err = -EBUSY; 707 goto ermrdmars; 708 } 709 710 err = -ENOMEM; 711 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); 712 if (!shdev) { 713 dev_err(&pdev->dev, "Not enough memory\n"); 714 goto ealloc; 715 } 716 717 dma_dev = &shdev->shdma_dev.dma_dev; 718 719 shdev->chan_reg = ioremap(chan->start, resource_size(chan)); 720 if (!shdev->chan_reg) 721 goto emapchan; 722 if (dmars) { 723 shdev->dmars = ioremap(dmars->start, resource_size(dmars)); 724 if (!shdev->dmars) 725 goto emapdmars; 726 } 727 728 if (!pdata->slave_only) 729 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 730 if (pdata->slave && pdata->slave_num) 731 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); 732 733 /* Default transfer size of 32 bytes requires 32-byte alignment */ 734 dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE; 735 736 shdev->shdma_dev.ops = &sh_dmae_shdma_ops; 737 shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc); 738 err = shdma_init(&pdev->dev, &shdev->shdma_dev, 739 pdata->channel_num); 740 if (err < 0) 741 goto eshdma; 742 743 /* platform data */ 744 shdev->pdata = pdata; 745 746 if (pdata->chcr_offset) 747 shdev->chcr_offset = pdata->chcr_offset; 748 else 749 shdev->chcr_offset = CHCR; 750 751 if (pdata->chcr_ie_bit) 752 shdev->chcr_ie_bit = pdata->chcr_ie_bit; 753 else 754 shdev->chcr_ie_bit = CHCR_IE; 755 756 platform_set_drvdata(pdev, shdev); 757 758 pm_runtime_enable(&pdev->dev); 759 err = pm_runtime_get_sync(&pdev->dev); 760 if (err < 0) 761 dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err); 762 763 spin_lock_irq(&sh_dmae_lock); 764 list_add_tail_rcu(&shdev->node, &sh_dmae_devices); 765 spin_unlock_irq(&sh_dmae_lock); 766 767 /* reset dma controller - only needed as a test */ 768 err = sh_dmae_rst(shdev); 769 if (err) 770 goto rst_err; 771 772#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 773 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 774 775 if (!chanirq_res) 776 chanirq_res = errirq_res; 777 else 778 irqres++; 779 780 if (chanirq_res == errirq_res || 781 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) 782 irqflags = IRQF_SHARED; 783 784 errirq = errirq_res->start; 785 786 err = request_irq(errirq, sh_dmae_err, irqflags, 787 "DMAC Address Error", shdev); 788 if (err) { 789 dev_err(&pdev->dev, 790 "DMA failed requesting irq #%d, error %d\n", 791 errirq, err); 792 goto eirq_err; 793 } 794 795#else 796 chanirq_res = errirq_res; 797#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */ 798 799 if (chanirq_res->start == chanirq_res->end && 800 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { 801 /* Special case - all multiplexed */ 802 for (; irq_cnt < pdata->channel_num; irq_cnt++) { 803 if (irq_cnt < SH_DMAE_MAX_CHANNELS) { 804 chan_irq[irq_cnt] = chanirq_res->start; 805 chan_flag[irq_cnt] = IRQF_SHARED; 806 } else { 807 irq_cap = 1; 808 break; 809 } 810 } 811 } else { 812 do { 813 for (i = chanirq_res->start; i <= chanirq_res->end; i++) { 814 if (irq_cnt >= SH_DMAE_MAX_CHANNELS) { 815 irq_cap = 1; 816 break; 817 } 818 819 if ((errirq_res->flags & IORESOURCE_BITS) == 820 IORESOURCE_IRQ_SHAREABLE) 821 chan_flag[irq_cnt] = IRQF_SHARED; 822 else 823 chan_flag[irq_cnt] = IRQF_DISABLED; 824 dev_dbg(&pdev->dev, 825 "Found IRQ %d for channel %d\n", 826 i, irq_cnt); 827 chan_irq[irq_cnt++] = i; 828 } 829 830 if (irq_cnt >= SH_DMAE_MAX_CHANNELS) 831 break; 832 833 chanirq_res = platform_get_resource(pdev, 834 IORESOURCE_IRQ, ++irqres); 835 } while (irq_cnt < pdata->channel_num && chanirq_res); 836 } 837 838 /* Create DMA Channel */ 839 for (i = 0; i < irq_cnt; i++) { 840 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); 841 if (err) 842 goto chan_probe_err; 843 } 844 845 if (irq_cap) 846 dev_notice(&pdev->dev, "Attempting to register %d DMA " 847 "channels when a maximum of %d are supported.\n", 848 pdata->channel_num, SH_DMAE_MAX_CHANNELS); 849 850 pm_runtime_put(&pdev->dev); 851 852 err = dma_async_device_register(&shdev->shdma_dev.dma_dev); 853 if (err < 0) 854 goto edmadevreg; 855 856 return err; 857 858edmadevreg: 859 pm_runtime_get(&pdev->dev); 860 861chan_probe_err: 862 sh_dmae_chan_remove(shdev); 863 864#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 865 free_irq(errirq, shdev); 866eirq_err: 867#endif 868rst_err: 869 spin_lock_irq(&sh_dmae_lock); 870 list_del_rcu(&shdev->node); 871 spin_unlock_irq(&sh_dmae_lock); 872 873 pm_runtime_put(&pdev->dev); 874 pm_runtime_disable(&pdev->dev); 875 876 platform_set_drvdata(pdev, NULL); 877 shdma_cleanup(&shdev->shdma_dev); 878eshdma: 879 if (dmars) 880 iounmap(shdev->dmars); 881emapdmars: 882 iounmap(shdev->chan_reg); 883 synchronize_rcu(); 884emapchan: 885 kfree(shdev); 886ealloc: 887 if (dmars) 888 release_mem_region(dmars->start, resource_size(dmars)); 889ermrdmars: 890 release_mem_region(chan->start, resource_size(chan)); 891 892 return err; 893} 894 895static int sh_dmae_remove(struct platform_device *pdev) 896{ 897 struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 898 struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; 899 struct resource *res; 900 int errirq = platform_get_irq(pdev, 0); 901 902 dma_async_device_unregister(dma_dev); 903 904 if (errirq > 0) 905 free_irq(errirq, shdev); 906 907 spin_lock_irq(&sh_dmae_lock); 908 list_del_rcu(&shdev->node); 909 spin_unlock_irq(&sh_dmae_lock); 910 911 pm_runtime_disable(&pdev->dev); 912 913 sh_dmae_chan_remove(shdev); 914 shdma_cleanup(&shdev->shdma_dev); 915 916 if (shdev->dmars) 917 iounmap(shdev->dmars); 918 iounmap(shdev->chan_reg); 919 920 platform_set_drvdata(pdev, NULL); 921 922 synchronize_rcu(); 923 kfree(shdev); 924 925 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 926 if (res) 927 release_mem_region(res->start, resource_size(res)); 928 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 929 if (res) 930 release_mem_region(res->start, resource_size(res)); 931 932 return 0; 933} 934 935static const struct of_device_id sh_dmae_of_match[] = { 936 { .compatible = "renesas,shdma", }, 937 { } 938}; 939MODULE_DEVICE_TABLE(of, sh_dmae_of_match); 940 941static struct platform_driver sh_dmae_driver = { 942 .driver = { 943 .owner = THIS_MODULE, 944 .pm = &sh_dmae_pm, 945 .name = SH_DMAE_DRV_NAME, 946 .of_match_table = sh_dmae_of_match, 947 }, 948 .remove = sh_dmae_remove, 949 .shutdown = sh_dmae_shutdown, 950}; 951 952static int __init sh_dmae_init(void) 953{ 954 /* Wire up NMI handling */ 955 int err = register_die_notifier(&sh_dmae_nmi_notifier); 956 if (err) 957 return err; 958 959 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); 960} 961module_init(sh_dmae_init); 962 963static void __exit sh_dmae_exit(void) 964{ 965 platform_driver_unregister(&sh_dmae_driver); 966 967 unregister_die_notifier(&sh_dmae_nmi_notifier); 968} 969module_exit(sh_dmae_exit); 970 971MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); 972MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); 973MODULE_LICENSE("GPL"); 974MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);