Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.11 672 lines 16 kB view raw
1/* 2 * TI EDMA DMA engine driver 3 * 4 * Copyright 2012 Texas Instruments 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation version 2. 9 * 10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 11 * kind, whether express or implied; without even the implied warranty 12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 16#include <linux/dmaengine.h> 17#include <linux/dma-mapping.h> 18#include <linux/err.h> 19#include <linux/init.h> 20#include <linux/interrupt.h> 21#include <linux/list.h> 22#include <linux/module.h> 23#include <linux/platform_device.h> 24#include <linux/slab.h> 25#include <linux/spinlock.h> 26 27#include <linux/platform_data/edma.h> 28 29#include "dmaengine.h" 30#include "virt-dma.h" 31 32/* 33 * This will go away when the private EDMA API is folded 34 * into this driver and the platform device(s) are 35 * instantiated in the arch code. We can only get away 36 * with this simplification because DA8XX may not be built 37 * in the same kernel image with other DaVinci parts. This 38 * avoids having to sprinkle dmaengine driver platform devices 39 * and data throughout all the existing board files. 40 */ 41#ifdef CONFIG_ARCH_DAVINCI_DA8XX 42#define EDMA_CTLRS 2 43#define EDMA_CHANS 32 44#else 45#define EDMA_CTLRS 1 46#define EDMA_CHANS 64 47#endif /* CONFIG_ARCH_DAVINCI_DA8XX */ 48 49/* Max of 16 segments per channel to conserve PaRAM slots */ 50#define MAX_NR_SG 16 51#define EDMA_MAX_SLOTS MAX_NR_SG 52#define EDMA_DESCRIPTORS 16 53 54struct edma_desc { 55 struct virt_dma_desc vdesc; 56 struct list_head node; 57 int absync; 58 int pset_nr; 59 struct edmacc_param pset[0]; 60}; 61 62struct edma_cc; 63 64struct edma_chan { 65 struct virt_dma_chan vchan; 66 struct list_head node; 67 struct edma_desc *edesc; 68 struct edma_cc *ecc; 69 int ch_num; 70 bool alloced; 71 int slot[EDMA_MAX_SLOTS]; 72 struct dma_slave_config cfg; 73}; 74 75struct edma_cc { 76 int ctlr; 77 struct dma_device dma_slave; 78 struct edma_chan slave_chans[EDMA_CHANS]; 79 int num_slave_chans; 80 int dummy_slot; 81}; 82 83static inline struct edma_cc *to_edma_cc(struct dma_device *d) 84{ 85 return container_of(d, struct edma_cc, dma_slave); 86} 87 88static inline struct edma_chan *to_edma_chan(struct dma_chan *c) 89{ 90 return container_of(c, struct edma_chan, vchan.chan); 91} 92 93static inline struct edma_desc 94*to_edma_desc(struct dma_async_tx_descriptor *tx) 95{ 96 return container_of(tx, struct edma_desc, vdesc.tx); 97} 98 99static void edma_desc_free(struct virt_dma_desc *vdesc) 100{ 101 kfree(container_of(vdesc, struct edma_desc, vdesc)); 102} 103 104/* Dispatch a queued descriptor to the controller (caller holds lock) */ 105static void edma_execute(struct edma_chan *echan) 106{ 107 struct virt_dma_desc *vdesc = vchan_next_desc(&echan->vchan); 108 struct edma_desc *edesc; 109 int i; 110 111 if (!vdesc) { 112 echan->edesc = NULL; 113 return; 114 } 115 116 list_del(&vdesc->node); 117 118 echan->edesc = edesc = to_edma_desc(&vdesc->tx); 119 120 /* Write descriptor PaRAM set(s) */ 121 for (i = 0; i < edesc->pset_nr; i++) { 122 edma_write_slot(echan->slot[i], &edesc->pset[i]); 123 dev_dbg(echan->vchan.chan.device->dev, 124 "\n pset[%d]:\n" 125 " chnum\t%d\n" 126 " slot\t%d\n" 127 " opt\t%08x\n" 128 " src\t%08x\n" 129 " dst\t%08x\n" 130 " abcnt\t%08x\n" 131 " ccnt\t%08x\n" 132 " bidx\t%08x\n" 133 " cidx\t%08x\n" 134 " lkrld\t%08x\n", 135 i, echan->ch_num, echan->slot[i], 136 edesc->pset[i].opt, 137 edesc->pset[i].src, 138 edesc->pset[i].dst, 139 edesc->pset[i].a_b_cnt, 140 edesc->pset[i].ccnt, 141 edesc->pset[i].src_dst_bidx, 142 edesc->pset[i].src_dst_cidx, 143 edesc->pset[i].link_bcntrld); 144 /* Link to the previous slot if not the last set */ 145 if (i != (edesc->pset_nr - 1)) 146 edma_link(echan->slot[i], echan->slot[i+1]); 147 /* Final pset links to the dummy pset */ 148 else 149 edma_link(echan->slot[i], echan->ecc->dummy_slot); 150 } 151 152 edma_start(echan->ch_num); 153} 154 155static int edma_terminate_all(struct edma_chan *echan) 156{ 157 unsigned long flags; 158 LIST_HEAD(head); 159 160 spin_lock_irqsave(&echan->vchan.lock, flags); 161 162 /* 163 * Stop DMA activity: we assume the callback will not be called 164 * after edma_dma() returns (even if it does, it will see 165 * echan->edesc is NULL and exit.) 166 */ 167 if (echan->edesc) { 168 echan->edesc = NULL; 169 edma_stop(echan->ch_num); 170 } 171 172 vchan_get_all_descriptors(&echan->vchan, &head); 173 spin_unlock_irqrestore(&echan->vchan.lock, flags); 174 vchan_dma_desc_free_list(&echan->vchan, &head); 175 176 return 0; 177} 178 179static int edma_slave_config(struct edma_chan *echan, 180 struct dma_slave_config *cfg) 181{ 182 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 183 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 184 return -EINVAL; 185 186 memcpy(&echan->cfg, cfg, sizeof(echan->cfg)); 187 188 return 0; 189} 190 191static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 192 unsigned long arg) 193{ 194 int ret = 0; 195 struct dma_slave_config *config; 196 struct edma_chan *echan = to_edma_chan(chan); 197 198 switch (cmd) { 199 case DMA_TERMINATE_ALL: 200 edma_terminate_all(echan); 201 break; 202 case DMA_SLAVE_CONFIG: 203 config = (struct dma_slave_config *)arg; 204 ret = edma_slave_config(echan, config); 205 break; 206 default: 207 ret = -ENOSYS; 208 } 209 210 return ret; 211} 212 213static struct dma_async_tx_descriptor *edma_prep_slave_sg( 214 struct dma_chan *chan, struct scatterlist *sgl, 215 unsigned int sg_len, enum dma_transfer_direction direction, 216 unsigned long tx_flags, void *context) 217{ 218 struct edma_chan *echan = to_edma_chan(chan); 219 struct device *dev = chan->device->dev; 220 struct edma_desc *edesc; 221 dma_addr_t dev_addr; 222 enum dma_slave_buswidth dev_width; 223 u32 burst; 224 struct scatterlist *sg; 225 int i; 226 int acnt, bcnt, ccnt, src, dst, cidx; 227 int src_bidx, dst_bidx, src_cidx, dst_cidx; 228 229 if (unlikely(!echan || !sgl || !sg_len)) 230 return NULL; 231 232 if (direction == DMA_DEV_TO_MEM) { 233 dev_addr = echan->cfg.src_addr; 234 dev_width = echan->cfg.src_addr_width; 235 burst = echan->cfg.src_maxburst; 236 } else if (direction == DMA_MEM_TO_DEV) { 237 dev_addr = echan->cfg.dst_addr; 238 dev_width = echan->cfg.dst_addr_width; 239 burst = echan->cfg.dst_maxburst; 240 } else { 241 dev_err(dev, "%s: bad direction?\n", __func__); 242 return NULL; 243 } 244 245 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { 246 dev_err(dev, "Undefined slave buswidth\n"); 247 return NULL; 248 } 249 250 if (sg_len > MAX_NR_SG) { 251 dev_err(dev, "Exceeded max SG segments %d > %d\n", 252 sg_len, MAX_NR_SG); 253 return NULL; 254 } 255 256 edesc = kzalloc(sizeof(*edesc) + sg_len * 257 sizeof(edesc->pset[0]), GFP_ATOMIC); 258 if (!edesc) { 259 dev_dbg(dev, "Failed to allocate a descriptor\n"); 260 return NULL; 261 } 262 263 edesc->pset_nr = sg_len; 264 265 for_each_sg(sgl, sg, sg_len, i) { 266 /* Allocate a PaRAM slot, if needed */ 267 if (echan->slot[i] < 0) { 268 echan->slot[i] = 269 edma_alloc_slot(EDMA_CTLR(echan->ch_num), 270 EDMA_SLOT_ANY); 271 if (echan->slot[i] < 0) { 272 dev_err(dev, "Failed to allocate slot\n"); 273 return NULL; 274 } 275 } 276 277 acnt = dev_width; 278 279 /* 280 * If the maxburst is equal to the fifo width, use 281 * A-synced transfers. This allows for large contiguous 282 * buffer transfers using only one PaRAM set. 283 */ 284 if (burst == 1) { 285 edesc->absync = false; 286 ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); 287 bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); 288 if (bcnt) 289 ccnt++; 290 else 291 bcnt = SZ_64K - 1; 292 cidx = acnt; 293 /* 294 * If maxburst is greater than the fifo address_width, 295 * use AB-synced transfers where A count is the fifo 296 * address_width and B count is the maxburst. In this 297 * case, we are limited to transfers of C count frames 298 * of (address_width * maxburst) where C count is limited 299 * to SZ_64K-1. This places an upper bound on the length 300 * of an SG segment that can be handled. 301 */ 302 } else { 303 edesc->absync = true; 304 bcnt = burst; 305 ccnt = sg_dma_len(sg) / (acnt * bcnt); 306 if (ccnt > (SZ_64K - 1)) { 307 dev_err(dev, "Exceeded max SG segment size\n"); 308 return NULL; 309 } 310 cidx = acnt * bcnt; 311 } 312 313 if (direction == DMA_MEM_TO_DEV) { 314 src = sg_dma_address(sg); 315 dst = dev_addr; 316 src_bidx = acnt; 317 src_cidx = cidx; 318 dst_bidx = 0; 319 dst_cidx = 0; 320 } else { 321 src = dev_addr; 322 dst = sg_dma_address(sg); 323 src_bidx = 0; 324 src_cidx = 0; 325 dst_bidx = acnt; 326 dst_cidx = cidx; 327 } 328 329 edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); 330 /* Configure A or AB synchronized transfers */ 331 if (edesc->absync) 332 edesc->pset[i].opt |= SYNCDIM; 333 /* If this is the last set, enable completion interrupt flag */ 334 if (i == sg_len - 1) 335 edesc->pset[i].opt |= TCINTEN; 336 337 edesc->pset[i].src = src; 338 edesc->pset[i].dst = dst; 339 340 edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx; 341 edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx; 342 343 edesc->pset[i].a_b_cnt = bcnt << 16 | acnt; 344 edesc->pset[i].ccnt = ccnt; 345 edesc->pset[i].link_bcntrld = 0xffffffff; 346 347 } 348 349 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 350} 351 352static void edma_callback(unsigned ch_num, u16 ch_status, void *data) 353{ 354 struct edma_chan *echan = data; 355 struct device *dev = echan->vchan.chan.device->dev; 356 struct edma_desc *edesc; 357 unsigned long flags; 358 359 /* Stop the channel */ 360 edma_stop(echan->ch_num); 361 362 switch (ch_status) { 363 case DMA_COMPLETE: 364 dev_dbg(dev, "transfer complete on channel %d\n", ch_num); 365 366 spin_lock_irqsave(&echan->vchan.lock, flags); 367 368 edesc = echan->edesc; 369 if (edesc) { 370 edma_execute(echan); 371 vchan_cookie_complete(&edesc->vdesc); 372 } 373 374 spin_unlock_irqrestore(&echan->vchan.lock, flags); 375 376 break; 377 case DMA_CC_ERROR: 378 dev_dbg(dev, "transfer error on channel %d\n", ch_num); 379 break; 380 default: 381 break; 382 } 383} 384 385/* Alloc channel resources */ 386static int edma_alloc_chan_resources(struct dma_chan *chan) 387{ 388 struct edma_chan *echan = to_edma_chan(chan); 389 struct device *dev = chan->device->dev; 390 int ret; 391 int a_ch_num; 392 LIST_HEAD(descs); 393 394 a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback, 395 chan, EVENTQ_DEFAULT); 396 397 if (a_ch_num < 0) { 398 ret = -ENODEV; 399 goto err_no_chan; 400 } 401 402 if (a_ch_num != echan->ch_num) { 403 dev_err(dev, "failed to allocate requested channel %u:%u\n", 404 EDMA_CTLR(echan->ch_num), 405 EDMA_CHAN_SLOT(echan->ch_num)); 406 ret = -ENODEV; 407 goto err_wrong_chan; 408 } 409 410 echan->alloced = true; 411 echan->slot[0] = echan->ch_num; 412 413 dev_info(dev, "allocated channel for %u:%u\n", 414 EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); 415 416 return 0; 417 418err_wrong_chan: 419 edma_free_channel(a_ch_num); 420err_no_chan: 421 return ret; 422} 423 424/* Free channel resources */ 425static void edma_free_chan_resources(struct dma_chan *chan) 426{ 427 struct edma_chan *echan = to_edma_chan(chan); 428 struct device *dev = chan->device->dev; 429 int i; 430 431 /* Terminate transfers */ 432 edma_stop(echan->ch_num); 433 434 vchan_free_chan_resources(&echan->vchan); 435 436 /* Free EDMA PaRAM slots */ 437 for (i = 1; i < EDMA_MAX_SLOTS; i++) { 438 if (echan->slot[i] >= 0) { 439 edma_free_slot(echan->slot[i]); 440 echan->slot[i] = -1; 441 } 442 } 443 444 /* Free EDMA channel */ 445 if (echan->alloced) { 446 edma_free_channel(echan->ch_num); 447 echan->alloced = false; 448 } 449 450 dev_info(dev, "freeing channel for %u\n", echan->ch_num); 451} 452 453/* Send pending descriptor to hardware */ 454static void edma_issue_pending(struct dma_chan *chan) 455{ 456 struct edma_chan *echan = to_edma_chan(chan); 457 unsigned long flags; 458 459 spin_lock_irqsave(&echan->vchan.lock, flags); 460 if (vchan_issue_pending(&echan->vchan) && !echan->edesc) 461 edma_execute(echan); 462 spin_unlock_irqrestore(&echan->vchan.lock, flags); 463} 464 465static size_t edma_desc_size(struct edma_desc *edesc) 466{ 467 int i; 468 size_t size; 469 470 if (edesc->absync) 471 for (size = i = 0; i < edesc->pset_nr; i++) 472 size += (edesc->pset[i].a_b_cnt & 0xffff) * 473 (edesc->pset[i].a_b_cnt >> 16) * 474 edesc->pset[i].ccnt; 475 else 476 size = (edesc->pset[0].a_b_cnt & 0xffff) * 477 (edesc->pset[0].a_b_cnt >> 16) + 478 (edesc->pset[0].a_b_cnt & 0xffff) * 479 (SZ_64K - 1) * edesc->pset[0].ccnt; 480 481 return size; 482} 483 484/* Check request completion status */ 485static enum dma_status edma_tx_status(struct dma_chan *chan, 486 dma_cookie_t cookie, 487 struct dma_tx_state *txstate) 488{ 489 struct edma_chan *echan = to_edma_chan(chan); 490 struct virt_dma_desc *vdesc; 491 enum dma_status ret; 492 unsigned long flags; 493 494 ret = dma_cookie_status(chan, cookie, txstate); 495 if (ret == DMA_SUCCESS || !txstate) 496 return ret; 497 498 spin_lock_irqsave(&echan->vchan.lock, flags); 499 vdesc = vchan_find_desc(&echan->vchan, cookie); 500 if (vdesc) { 501 txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx)); 502 } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { 503 struct edma_desc *edesc = echan->edesc; 504 txstate->residue = edma_desc_size(edesc); 505 } else { 506 txstate->residue = 0; 507 } 508 spin_unlock_irqrestore(&echan->vchan.lock, flags); 509 510 return ret; 511} 512 513static void __init edma_chan_init(struct edma_cc *ecc, 514 struct dma_device *dma, 515 struct edma_chan *echans) 516{ 517 int i, j; 518 519 for (i = 0; i < EDMA_CHANS; i++) { 520 struct edma_chan *echan = &echans[i]; 521 echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i); 522 echan->ecc = ecc; 523 echan->vchan.desc_free = edma_desc_free; 524 525 vchan_init(&echan->vchan, dma); 526 527 INIT_LIST_HEAD(&echan->node); 528 for (j = 0; j < EDMA_MAX_SLOTS; j++) 529 echan->slot[j] = -1; 530 } 531} 532 533static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, 534 struct device *dev) 535{ 536 dma->device_prep_slave_sg = edma_prep_slave_sg; 537 dma->device_alloc_chan_resources = edma_alloc_chan_resources; 538 dma->device_free_chan_resources = edma_free_chan_resources; 539 dma->device_issue_pending = edma_issue_pending; 540 dma->device_tx_status = edma_tx_status; 541 dma->device_control = edma_control; 542 dma->dev = dev; 543 544 INIT_LIST_HEAD(&dma->channels); 545} 546 547static int edma_probe(struct platform_device *pdev) 548{ 549 struct edma_cc *ecc; 550 int ret; 551 552 ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL); 553 if (!ecc) { 554 dev_err(&pdev->dev, "Can't allocate controller\n"); 555 return -ENOMEM; 556 } 557 558 ecc->ctlr = pdev->id; 559 ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY); 560 if (ecc->dummy_slot < 0) { 561 dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n"); 562 return -EIO; 563 } 564 565 dma_cap_zero(ecc->dma_slave.cap_mask); 566 dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); 567 568 edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); 569 570 edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans); 571 572 ret = dma_async_device_register(&ecc->dma_slave); 573 if (ret) 574 goto err_reg1; 575 576 platform_set_drvdata(pdev, ecc); 577 578 dev_info(&pdev->dev, "TI EDMA DMA engine driver\n"); 579 580 return 0; 581 582err_reg1: 583 edma_free_slot(ecc->dummy_slot); 584 return ret; 585} 586 587static int edma_remove(struct platform_device *pdev) 588{ 589 struct device *dev = &pdev->dev; 590 struct edma_cc *ecc = dev_get_drvdata(dev); 591 592 dma_async_device_unregister(&ecc->dma_slave); 593 edma_free_slot(ecc->dummy_slot); 594 595 return 0; 596} 597 598static struct platform_driver edma_driver = { 599 .probe = edma_probe, 600 .remove = edma_remove, 601 .driver = { 602 .name = "edma-dma-engine", 603 .owner = THIS_MODULE, 604 }, 605}; 606 607bool edma_filter_fn(struct dma_chan *chan, void *param) 608{ 609 if (chan->device->dev->driver == &edma_driver.driver) { 610 struct edma_chan *echan = to_edma_chan(chan); 611 unsigned ch_req = *(unsigned *)param; 612 return ch_req == echan->ch_num; 613 } 614 return false; 615} 616EXPORT_SYMBOL(edma_filter_fn); 617 618static struct platform_device *pdev0, *pdev1; 619 620static const struct platform_device_info edma_dev_info0 = { 621 .name = "edma-dma-engine", 622 .id = 0, 623}; 624 625static const struct platform_device_info edma_dev_info1 = { 626 .name = "edma-dma-engine", 627 .id = 1, 628}; 629 630static int edma_init(void) 631{ 632 int ret = platform_driver_register(&edma_driver); 633 634 if (ret == 0) { 635 pdev0 = platform_device_register_full(&edma_dev_info0); 636 if (IS_ERR(pdev0)) { 637 platform_driver_unregister(&edma_driver); 638 ret = PTR_ERR(pdev0); 639 goto out; 640 } 641 pdev0->dev.dma_mask = &pdev0->dev.coherent_dma_mask; 642 pdev0->dev.coherent_dma_mask = DMA_BIT_MASK(32); 643 } 644 645 if (EDMA_CTLRS == 2) { 646 pdev1 = platform_device_register_full(&edma_dev_info1); 647 if (IS_ERR(pdev1)) { 648 platform_driver_unregister(&edma_driver); 649 platform_device_unregister(pdev0); 650 ret = PTR_ERR(pdev1); 651 } 652 pdev1->dev.dma_mask = &pdev1->dev.coherent_dma_mask; 653 pdev1->dev.coherent_dma_mask = DMA_BIT_MASK(32); 654 } 655 656out: 657 return ret; 658} 659subsys_initcall(edma_init); 660 661static void __exit edma_exit(void) 662{ 663 platform_device_unregister(pdev0); 664 if (pdev1) 665 platform_device_unregister(pdev1); 666 platform_driver_unregister(&edma_driver); 667} 668module_exit(edma_exit); 669 670MODULE_AUTHOR("Matt Porter <mporter@ti.com>"); 671MODULE_DESCRIPTION("TI EDMA DMA engine driver"); 672MODULE_LICENSE("GPL v2");