Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.12 757 lines 19 kB view raw
1/* 2 * TI EDMA DMA engine driver 3 * 4 * Copyright 2012 Texas Instruments 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation version 2. 9 * 10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 11 * kind, whether express or implied; without even the implied warranty 12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 16#include <linux/dmaengine.h> 17#include <linux/dma-mapping.h> 18#include <linux/err.h> 19#include <linux/init.h> 20#include <linux/interrupt.h> 21#include <linux/list.h> 22#include <linux/module.h> 23#include <linux/platform_device.h> 24#include <linux/slab.h> 25#include <linux/spinlock.h> 26 27#include <linux/platform_data/edma.h> 28 29#include "dmaengine.h" 30#include "virt-dma.h" 31 32/* 33 * This will go away when the private EDMA API is folded 34 * into this driver and the platform device(s) are 35 * instantiated in the arch code. We can only get away 36 * with this simplification because DA8XX may not be built 37 * in the same kernel image with other DaVinci parts. This 38 * avoids having to sprinkle dmaengine driver platform devices 39 * and data throughout all the existing board files. 40 */ 41#ifdef CONFIG_ARCH_DAVINCI_DA8XX 42#define EDMA_CTLRS 2 43#define EDMA_CHANS 32 44#else 45#define EDMA_CTLRS 1 46#define EDMA_CHANS 64 47#endif /* CONFIG_ARCH_DAVINCI_DA8XX */ 48 49/* Max of 16 segments per channel to conserve PaRAM slots */ 50#define MAX_NR_SG 16 51#define EDMA_MAX_SLOTS MAX_NR_SG 52#define EDMA_DESCRIPTORS 16 53 54struct edma_desc { 55 struct virt_dma_desc vdesc; 56 struct list_head node; 57 int absync; 58 int pset_nr; 59 int processed; 60 struct edmacc_param pset[0]; 61}; 62 63struct edma_cc; 64 65struct edma_chan { 66 struct virt_dma_chan vchan; 67 struct list_head node; 68 struct edma_desc *edesc; 69 struct edma_cc *ecc; 70 int ch_num; 71 bool alloced; 72 int slot[EDMA_MAX_SLOTS]; 73 int missed; 74 struct dma_slave_config cfg; 75}; 76 77struct edma_cc { 78 int ctlr; 79 struct dma_device dma_slave; 80 struct edma_chan slave_chans[EDMA_CHANS]; 81 int num_slave_chans; 82 int dummy_slot; 83}; 84 85static inline struct edma_cc *to_edma_cc(struct dma_device *d) 86{ 87 return container_of(d, struct edma_cc, dma_slave); 88} 89 90static inline struct edma_chan *to_edma_chan(struct dma_chan *c) 91{ 92 return container_of(c, struct edma_chan, vchan.chan); 93} 94 95static inline struct edma_desc 96*to_edma_desc(struct dma_async_tx_descriptor *tx) 97{ 98 return container_of(tx, struct edma_desc, vdesc.tx); 99} 100 101static void edma_desc_free(struct virt_dma_desc *vdesc) 102{ 103 kfree(container_of(vdesc, struct edma_desc, vdesc)); 104} 105 106/* Dispatch a queued descriptor to the controller (caller holds lock) */ 107static void edma_execute(struct edma_chan *echan) 108{ 109 struct virt_dma_desc *vdesc; 110 struct edma_desc *edesc; 111 struct device *dev = echan->vchan.chan.device->dev; 112 int i, j, left, nslots; 113 114 /* If either we processed all psets or we're still not started */ 115 if (!echan->edesc || 116 echan->edesc->pset_nr == echan->edesc->processed) { 117 /* Get next vdesc */ 118 vdesc = vchan_next_desc(&echan->vchan); 119 if (!vdesc) { 120 echan->edesc = NULL; 121 return; 122 } 123 list_del(&vdesc->node); 124 echan->edesc = to_edma_desc(&vdesc->tx); 125 } 126 127 edesc = echan->edesc; 128 129 /* Find out how many left */ 130 left = edesc->pset_nr - edesc->processed; 131 nslots = min(MAX_NR_SG, left); 132 133 /* Write descriptor PaRAM set(s) */ 134 for (i = 0; i < nslots; i++) { 135 j = i + edesc->processed; 136 edma_write_slot(echan->slot[i], &edesc->pset[j]); 137 dev_dbg(echan->vchan.chan.device->dev, 138 "\n pset[%d]:\n" 139 " chnum\t%d\n" 140 " slot\t%d\n" 141 " opt\t%08x\n" 142 " src\t%08x\n" 143 " dst\t%08x\n" 144 " abcnt\t%08x\n" 145 " ccnt\t%08x\n" 146 " bidx\t%08x\n" 147 " cidx\t%08x\n" 148 " lkrld\t%08x\n", 149 j, echan->ch_num, echan->slot[i], 150 edesc->pset[j].opt, 151 edesc->pset[j].src, 152 edesc->pset[j].dst, 153 edesc->pset[j].a_b_cnt, 154 edesc->pset[j].ccnt, 155 edesc->pset[j].src_dst_bidx, 156 edesc->pset[j].src_dst_cidx, 157 edesc->pset[j].link_bcntrld); 158 /* Link to the previous slot if not the last set */ 159 if (i != (nslots - 1)) 160 edma_link(echan->slot[i], echan->slot[i+1]); 161 } 162 163 edesc->processed += nslots; 164 165 /* 166 * If this is either the last set in a set of SG-list transactions 167 * then setup a link to the dummy slot, this results in all future 168 * events being absorbed and that's OK because we're done 169 */ 170 if (edesc->processed == edesc->pset_nr) 171 edma_link(echan->slot[nslots-1], echan->ecc->dummy_slot); 172 173 edma_resume(echan->ch_num); 174 175 if (edesc->processed <= MAX_NR_SG) { 176 dev_dbg(dev, "first transfer starting %d\n", echan->ch_num); 177 edma_start(echan->ch_num); 178 } 179 180 /* 181 * This happens due to setup times between intermediate transfers 182 * in long SG lists which have to be broken up into transfers of 183 * MAX_NR_SG 184 */ 185 if (echan->missed) { 186 dev_dbg(dev, "missed event in execute detected\n"); 187 edma_clean_channel(echan->ch_num); 188 edma_stop(echan->ch_num); 189 edma_start(echan->ch_num); 190 edma_trigger_channel(echan->ch_num); 191 echan->missed = 0; 192 } 193} 194 195static int edma_terminate_all(struct edma_chan *echan) 196{ 197 unsigned long flags; 198 LIST_HEAD(head); 199 200 spin_lock_irqsave(&echan->vchan.lock, flags); 201 202 /* 203 * Stop DMA activity: we assume the callback will not be called 204 * after edma_dma() returns (even if it does, it will see 205 * echan->edesc is NULL and exit.) 206 */ 207 if (echan->edesc) { 208 echan->edesc = NULL; 209 edma_stop(echan->ch_num); 210 } 211 212 vchan_get_all_descriptors(&echan->vchan, &head); 213 spin_unlock_irqrestore(&echan->vchan.lock, flags); 214 vchan_dma_desc_free_list(&echan->vchan, &head); 215 216 return 0; 217} 218 219static int edma_slave_config(struct edma_chan *echan, 220 struct dma_slave_config *cfg) 221{ 222 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 223 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 224 return -EINVAL; 225 226 memcpy(&echan->cfg, cfg, sizeof(echan->cfg)); 227 228 return 0; 229} 230 231static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 232 unsigned long arg) 233{ 234 int ret = 0; 235 struct dma_slave_config *config; 236 struct edma_chan *echan = to_edma_chan(chan); 237 238 switch (cmd) { 239 case DMA_TERMINATE_ALL: 240 edma_terminate_all(echan); 241 break; 242 case DMA_SLAVE_CONFIG: 243 config = (struct dma_slave_config *)arg; 244 ret = edma_slave_config(echan, config); 245 break; 246 default: 247 ret = -ENOSYS; 248 } 249 250 return ret; 251} 252 253static struct dma_async_tx_descriptor *edma_prep_slave_sg( 254 struct dma_chan *chan, struct scatterlist *sgl, 255 unsigned int sg_len, enum dma_transfer_direction direction, 256 unsigned long tx_flags, void *context) 257{ 258 struct edma_chan *echan = to_edma_chan(chan); 259 struct device *dev = chan->device->dev; 260 struct edma_desc *edesc; 261 dma_addr_t dev_addr; 262 enum dma_slave_buswidth dev_width; 263 u32 burst; 264 struct scatterlist *sg; 265 int acnt, bcnt, ccnt, src, dst, cidx; 266 int src_bidx, dst_bidx, src_cidx, dst_cidx; 267 int i, nslots; 268 269 if (unlikely(!echan || !sgl || !sg_len)) 270 return NULL; 271 272 if (direction == DMA_DEV_TO_MEM) { 273 dev_addr = echan->cfg.src_addr; 274 dev_width = echan->cfg.src_addr_width; 275 burst = echan->cfg.src_maxburst; 276 } else if (direction == DMA_MEM_TO_DEV) { 277 dev_addr = echan->cfg.dst_addr; 278 dev_width = echan->cfg.dst_addr_width; 279 burst = echan->cfg.dst_maxburst; 280 } else { 281 dev_err(dev, "%s: bad direction?\n", __func__); 282 return NULL; 283 } 284 285 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { 286 dev_err(dev, "Undefined slave buswidth\n"); 287 return NULL; 288 } 289 290 edesc = kzalloc(sizeof(*edesc) + sg_len * 291 sizeof(edesc->pset[0]), GFP_ATOMIC); 292 if (!edesc) { 293 dev_dbg(dev, "Failed to allocate a descriptor\n"); 294 return NULL; 295 } 296 297 edesc->pset_nr = sg_len; 298 299 /* Allocate a PaRAM slot, if needed */ 300 nslots = min_t(unsigned, MAX_NR_SG, sg_len); 301 302 for (i = 0; i < nslots; i++) { 303 if (echan->slot[i] < 0) { 304 echan->slot[i] = 305 edma_alloc_slot(EDMA_CTLR(echan->ch_num), 306 EDMA_SLOT_ANY); 307 if (echan->slot[i] < 0) { 308 kfree(edesc); 309 dev_err(dev, "Failed to allocate slot\n"); 310 kfree(edesc); 311 return NULL; 312 } 313 } 314 } 315 316 /* Configure PaRAM sets for each SG */ 317 for_each_sg(sgl, sg, sg_len, i) { 318 319 acnt = dev_width; 320 321 /* 322 * If the maxburst is equal to the fifo width, use 323 * A-synced transfers. This allows for large contiguous 324 * buffer transfers using only one PaRAM set. 325 */ 326 if (burst == 1) { 327 edesc->absync = false; 328 ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); 329 bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); 330 if (bcnt) 331 ccnt++; 332 else 333 bcnt = SZ_64K - 1; 334 cidx = acnt; 335 /* 336 * If maxburst is greater than the fifo address_width, 337 * use AB-synced transfers where A count is the fifo 338 * address_width and B count is the maxburst. In this 339 * case, we are limited to transfers of C count frames 340 * of (address_width * maxburst) where C count is limited 341 * to SZ_64K-1. This places an upper bound on the length 342 * of an SG segment that can be handled. 343 */ 344 } else { 345 edesc->absync = true; 346 bcnt = burst; 347 ccnt = sg_dma_len(sg) / (acnt * bcnt); 348 if (ccnt > (SZ_64K - 1)) { 349 dev_err(dev, "Exceeded max SG segment size\n"); 350 kfree(edesc); 351 return NULL; 352 } 353 cidx = acnt * bcnt; 354 } 355 356 if (direction == DMA_MEM_TO_DEV) { 357 src = sg_dma_address(sg); 358 dst = dev_addr; 359 src_bidx = acnt; 360 src_cidx = cidx; 361 dst_bidx = 0; 362 dst_cidx = 0; 363 } else { 364 src = dev_addr; 365 dst = sg_dma_address(sg); 366 src_bidx = 0; 367 src_cidx = 0; 368 dst_bidx = acnt; 369 dst_cidx = cidx; 370 } 371 372 edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); 373 /* Configure A or AB synchronized transfers */ 374 if (edesc->absync) 375 edesc->pset[i].opt |= SYNCDIM; 376 377 /* If this is the last in a current SG set of transactions, 378 enable interrupts so that next set is processed */ 379 if (!((i+1) % MAX_NR_SG)) 380 edesc->pset[i].opt |= TCINTEN; 381 382 /* If this is the last set, enable completion interrupt flag */ 383 if (i == sg_len - 1) 384 edesc->pset[i].opt |= TCINTEN; 385 386 edesc->pset[i].src = src; 387 edesc->pset[i].dst = dst; 388 389 edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx; 390 edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx; 391 392 edesc->pset[i].a_b_cnt = bcnt << 16 | acnt; 393 edesc->pset[i].ccnt = ccnt; 394 edesc->pset[i].link_bcntrld = 0xffffffff; 395 396 } 397 398 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 399} 400 401static void edma_callback(unsigned ch_num, u16 ch_status, void *data) 402{ 403 struct edma_chan *echan = data; 404 struct device *dev = echan->vchan.chan.device->dev; 405 struct edma_desc *edesc; 406 unsigned long flags; 407 struct edmacc_param p; 408 409 /* Pause the channel */ 410 edma_pause(echan->ch_num); 411 412 switch (ch_status) { 413 case DMA_COMPLETE: 414 spin_lock_irqsave(&echan->vchan.lock, flags); 415 416 edesc = echan->edesc; 417 if (edesc) { 418 if (edesc->processed == edesc->pset_nr) { 419 dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); 420 edma_stop(echan->ch_num); 421 vchan_cookie_complete(&edesc->vdesc); 422 } else { 423 dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); 424 } 425 426 edma_execute(echan); 427 } 428 429 spin_unlock_irqrestore(&echan->vchan.lock, flags); 430 431 break; 432 case DMA_CC_ERROR: 433 spin_lock_irqsave(&echan->vchan.lock, flags); 434 435 edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); 436 437 /* 438 * Issue later based on missed flag which will be sure 439 * to happen as: 440 * (1) we finished transmitting an intermediate slot and 441 * edma_execute is coming up. 442 * (2) or we finished current transfer and issue will 443 * call edma_execute. 444 * 445 * Important note: issuing can be dangerous here and 446 * lead to some nasty recursion when we are in a NULL 447 * slot. So we avoid doing so and set the missed flag. 448 */ 449 if (p.a_b_cnt == 0 && p.ccnt == 0) { 450 dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n"); 451 echan->missed = 1; 452 } else { 453 /* 454 * The slot is already programmed but the event got 455 * missed, so its safe to issue it here. 456 */ 457 dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n"); 458 edma_clean_channel(echan->ch_num); 459 edma_stop(echan->ch_num); 460 edma_start(echan->ch_num); 461 edma_trigger_channel(echan->ch_num); 462 } 463 464 spin_unlock_irqrestore(&echan->vchan.lock, flags); 465 466 break; 467 default: 468 break; 469 } 470} 471 472/* Alloc channel resources */ 473static int edma_alloc_chan_resources(struct dma_chan *chan) 474{ 475 struct edma_chan *echan = to_edma_chan(chan); 476 struct device *dev = chan->device->dev; 477 int ret; 478 int a_ch_num; 479 LIST_HEAD(descs); 480 481 a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback, 482 chan, EVENTQ_DEFAULT); 483 484 if (a_ch_num < 0) { 485 ret = -ENODEV; 486 goto err_no_chan; 487 } 488 489 if (a_ch_num != echan->ch_num) { 490 dev_err(dev, "failed to allocate requested channel %u:%u\n", 491 EDMA_CTLR(echan->ch_num), 492 EDMA_CHAN_SLOT(echan->ch_num)); 493 ret = -ENODEV; 494 goto err_wrong_chan; 495 } 496 497 echan->alloced = true; 498 echan->slot[0] = echan->ch_num; 499 500 dev_info(dev, "allocated channel for %u:%u\n", 501 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); 502 503 return 0; 504 505err_wrong_chan: 506 edma_free_channel(a_ch_num); 507err_no_chan: 508 return ret; 509} 510 511/* Free channel resources */ 512static void edma_free_chan_resources(struct dma_chan *chan) 513{ 514 struct edma_chan *echan = to_edma_chan(chan); 515 struct device *dev = chan->device->dev; 516 int i; 517 518 /* Terminate transfers */ 519 edma_stop(echan->ch_num); 520 521 vchan_free_chan_resources(&echan->vchan); 522 523 /* Free EDMA PaRAM slots */ 524 for (i = 1; i < EDMA_MAX_SLOTS; i++) { 525 if (echan->slot[i] >= 0) { 526 edma_free_slot(echan->slot[i]); 527 echan->slot[i] = -1; 528 } 529 } 530 531 /* Free EDMA channel */ 532 if (echan->alloced) { 533 edma_free_channel(echan->ch_num); 534 echan->alloced = false; 535 } 536 537 dev_info(dev, "freeing channel for %u\n", echan->ch_num); 538} 539 540/* Send pending descriptor to hardware */ 541static void edma_issue_pending(struct dma_chan *chan) 542{ 543 struct edma_chan *echan = to_edma_chan(chan); 544 unsigned long flags; 545 546 spin_lock_irqsave(&echan->vchan.lock, flags); 547 if (vchan_issue_pending(&echan->vchan) && !echan->edesc) 548 edma_execute(echan); 549 spin_unlock_irqrestore(&echan->vchan.lock, flags); 550} 551 552static size_t edma_desc_size(struct edma_desc *edesc) 553{ 554 int i; 555 size_t size; 556 557 if (edesc->absync) 558 for (size = i = 0; i < edesc->pset_nr; i++) 559 size += (edesc->pset[i].a_b_cnt & 0xffff) * 560 (edesc->pset[i].a_b_cnt >> 16) * 561 edesc->pset[i].ccnt; 562 else 563 size = (edesc->pset[0].a_b_cnt & 0xffff) * 564 (edesc->pset[0].a_b_cnt >> 16) + 565 (edesc->pset[0].a_b_cnt & 0xffff) * 566 (SZ_64K - 1) * edesc->pset[0].ccnt; 567 568 return size; 569} 570 571/* Check request completion status */ 572static enum dma_status edma_tx_status(struct dma_chan *chan, 573 dma_cookie_t cookie, 574 struct dma_tx_state *txstate) 575{ 576 struct edma_chan *echan = to_edma_chan(chan); 577 struct virt_dma_desc *vdesc; 578 enum dma_status ret; 579 unsigned long flags; 580 581 ret = dma_cookie_status(chan, cookie, txstate); 582 if (ret == DMA_SUCCESS || !txstate) 583 return ret; 584 585 spin_lock_irqsave(&echan->vchan.lock, flags); 586 vdesc = vchan_find_desc(&echan->vchan, cookie); 587 if (vdesc) { 588 txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx)); 589 } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { 590 struct edma_desc *edesc = echan->edesc; 591 txstate->residue = edma_desc_size(edesc); 592 } 593 spin_unlock_irqrestore(&echan->vchan.lock, flags); 594 595 return ret; 596} 597 598static void __init edma_chan_init(struct edma_cc *ecc, 599 struct dma_device *dma, 600 struct edma_chan *echans) 601{ 602 int i, j; 603 604 for (i = 0; i < EDMA_CHANS; i++) { 605 struct edma_chan *echan = &echans[i]; 606 echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i); 607 echan->ecc = ecc; 608 echan->vchan.desc_free = edma_desc_free; 609 610 vchan_init(&echan->vchan, dma); 611 612 INIT_LIST_HEAD(&echan->node); 613 for (j = 0; j < EDMA_MAX_SLOTS; j++) 614 echan->slot[j] = -1; 615 } 616} 617 618static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, 619 struct device *dev) 620{ 621 dma->device_prep_slave_sg = edma_prep_slave_sg; 622 dma->device_alloc_chan_resources = edma_alloc_chan_resources; 623 dma->device_free_chan_resources = edma_free_chan_resources; 624 dma->device_issue_pending = edma_issue_pending; 625 dma->device_tx_status = edma_tx_status; 626 dma->device_control = edma_control; 627 dma->dev = dev; 628 629 INIT_LIST_HEAD(&dma->channels); 630} 631 632static int edma_probe(struct platform_device *pdev) 633{ 634 struct edma_cc *ecc; 635 int ret; 636 637 ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL); 638 if (!ecc) { 639 dev_err(&pdev->dev, "Can't allocate controller\n"); 640 return -ENOMEM; 641 } 642 643 ecc->ctlr = pdev->id; 644 ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY); 645 if (ecc->dummy_slot < 0) { 646 dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n"); 647 return -EIO; 648 } 649 650 dma_cap_zero(ecc->dma_slave.cap_mask); 651 dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); 652 653 edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); 654 655 edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans); 656 657 ret = dma_async_device_register(&ecc->dma_slave); 658 if (ret) 659 goto err_reg1; 660 661 platform_set_drvdata(pdev, ecc); 662 663 dev_info(&pdev->dev, "TI EDMA DMA engine driver\n"); 664 665 return 0; 666 667err_reg1: 668 edma_free_slot(ecc->dummy_slot); 669 return ret; 670} 671 672static int edma_remove(struct platform_device *pdev) 673{ 674 struct device *dev = &pdev->dev; 675 struct edma_cc *ecc = dev_get_drvdata(dev); 676 677 dma_async_device_unregister(&ecc->dma_slave); 678 edma_free_slot(ecc->dummy_slot); 679 680 return 0; 681} 682 683static struct platform_driver edma_driver = { 684 .probe = edma_probe, 685 .remove = edma_remove, 686 .driver = { 687 .name = "edma-dma-engine", 688 .owner = THIS_MODULE, 689 }, 690}; 691 692bool edma_filter_fn(struct dma_chan *chan, void *param) 693{ 694 if (chan->device->dev->driver == &edma_driver.driver) { 695 struct edma_chan *echan = to_edma_chan(chan); 696 unsigned ch_req = *(unsigned *)param; 697 return ch_req == echan->ch_num; 698 } 699 return false; 700} 701EXPORT_SYMBOL(edma_filter_fn); 702 703static struct platform_device *pdev0, *pdev1; 704 705static const struct platform_device_info edma_dev_info0 = { 706 .name = "edma-dma-engine", 707 .id = 0, 708}; 709 710static const struct platform_device_info edma_dev_info1 = { 711 .name = "edma-dma-engine", 712 .id = 1, 713}; 714 715static int edma_init(void) 716{ 717 int ret = platform_driver_register(&edma_driver); 718 719 if (ret == 0) { 720 pdev0 = platform_device_register_full(&edma_dev_info0); 721 if (IS_ERR(pdev0)) { 722 platform_driver_unregister(&edma_driver); 723 ret = PTR_ERR(pdev0); 724 goto out; 725 } 726 pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask; 727 pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32); 728 } 729 730 if (EDMA_CTLRS == 2) { 731 pdev1 = platform_device_register_full(&edma_dev_info1); 732 if (IS_ERR(pdev1)) { 733 platform_driver_unregister(&edma_driver); 734 platform_device_unregister(pdev0); 735 ret = PTR_ERR(pdev1); 736 } 737 pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask; 738 pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32); 739 } 740 741out: 742 return ret; 743} 744subsys_initcall(edma_init); 745 746static void __exit edma_exit(void) 747{ 748 platform_device_unregister(pdev0); 749 if (pdev1) 750 platform_device_unregister(pdev1); 751 platform_driver_unregister(&edma_driver); 752} 753module_exit(edma_exit); 754 755MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>"); 756MODULE_DESCRIPTION("TI EDMA DMA engine driver"); 757MODULE_LICENSE("GPL v2");