Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.9-rc4 677 lines 16 kB view raw
1/* 2 * OMAP DMAengine support 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8#include <linux/dmaengine.h> 9#include <linux/dma-mapping.h> 10#include <linux/err.h> 11#include <linux/init.h> 12#include <linux/interrupt.h> 13#include <linux/list.h> 14#include <linux/module.h> 15#include <linux/omap-dma.h> 16#include <linux/platform_device.h> 17#include <linux/slab.h> 18#include <linux/spinlock.h> 19 20#include "virt-dma.h" 21 22struct omap_dmadev { 23 struct dma_device ddev; 24 spinlock_t lock; 25 struct tasklet_struct task; 26 struct list_head pending; 27}; 28 29struct omap_chan { 30 struct virt_dma_chan vc; 31 struct list_head node; 32 33 struct dma_slave_config cfg; 34 unsigned dma_sig; 35 bool cyclic; 36 bool paused; 37 38 int dma_ch; 39 struct omap_desc *desc; 40 unsigned sgidx; 41}; 42 43struct omap_sg { 44 dma_addr_t addr; 45 uint32_t en; /* number of elements (24-bit) */ 46 uint32_t fn; /* number of frames (16-bit) */ 47}; 48 49struct omap_desc { 50 struct virt_dma_desc vd; 51 enum dma_transfer_direction dir; 52 dma_addr_t dev_addr; 53 54 int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ 55 uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */ 56 uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */ 57 uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */ 58 uint8_t periph_port; /* Peripheral port */ 59 60 unsigned sglen; 61 struct omap_sg sg[0]; 62}; 63 64static const unsigned es_bytes[] = { 65 [OMAP_DMA_DATA_TYPE_S8] = 1, 66 [OMAP_DMA_DATA_TYPE_S16] = 2, 67 [OMAP_DMA_DATA_TYPE_S32] = 4, 68}; 69 70static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) 71{ 72 return container_of(d, struct omap_dmadev, ddev); 73} 74 75static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) 76{ 77 return container_of(c, struct omap_chan, vc.chan); 78} 79 80static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t) 81{ 82 return container_of(t, struct omap_desc, vd.tx); 83} 84 85static void omap_dma_desc_free(struct virt_dma_desc *vd) 86{ 87 kfree(container_of(vd, struct omap_desc, vd)); 88} 89 90static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, 91 unsigned idx) 92{ 93 struct omap_sg *sg = d->sg + idx; 94 95 if (d->dir == DMA_DEV_TO_MEM) 96 omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, 97 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); 98 else 99 omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, 100 OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); 101 102 omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn, 103 d->sync_mode, c->dma_sig, d->sync_type); 104 105 omap_start_dma(c->dma_ch); 106} 107 108static void omap_dma_start_desc(struct omap_chan *c) 109{ 110 struct virt_dma_desc *vd = vchan_next_desc(&c->vc); 111 struct omap_desc *d; 112 113 if (!vd) { 114 c->desc = NULL; 115 return; 116 } 117 118 list_del(&vd->node); 119 120 c->desc = d = to_omap_dma_desc(&vd->tx); 121 c->sgidx = 0; 122 123 if (d->dir == DMA_DEV_TO_MEM) 124 omap_set_dma_src_params(c->dma_ch, d->periph_port, 125 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); 126 else 127 omap_set_dma_dest_params(c->dma_ch, d->periph_port, 128 OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); 129 130 omap_dma_start_sg(c, d, 0); 131} 132 133static void omap_dma_callback(int ch, u16 status, void *data) 134{ 135 struct omap_chan *c = data; 136 struct omap_desc *d; 137 unsigned long flags; 138 139 spin_lock_irqsave(&c->vc.lock, flags); 140 d = c->desc; 141 if (d) { 142 if (!c->cyclic) { 143 if (++c->sgidx < d->sglen) { 144 omap_dma_start_sg(c, d, c->sgidx); 145 } else { 146 omap_dma_start_desc(c); 147 vchan_cookie_complete(&d->vd); 148 } 149 } else { 150 vchan_cyclic_callback(&d->vd); 151 } 152 } 153 spin_unlock_irqrestore(&c->vc.lock, flags); 154} 155 156/* 157 * This callback schedules all pending channels. We could be more 158 * clever here by postponing allocation of the real DMA channels to 159 * this point, and freeing them when our virtual channel becomes idle. 160 * 161 * We would then need to deal with 'all channels in-use' 162 */ 163static void omap_dma_sched(unsigned long data) 164{ 165 struct omap_dmadev *d = (struct omap_dmadev *)data; 166 LIST_HEAD(head); 167 168 spin_lock_irq(&d->lock); 169 list_splice_tail_init(&d->pending, &head); 170 spin_unlock_irq(&d->lock); 171 172 while (!list_empty(&head)) { 173 struct omap_chan *c = list_first_entry(&head, 174 struct omap_chan, node); 175 176 spin_lock_irq(&c->vc.lock); 177 list_del_init(&c->node); 178 omap_dma_start_desc(c); 179 spin_unlock_irq(&c->vc.lock); 180 } 181} 182 183static int omap_dma_alloc_chan_resources(struct dma_chan *chan) 184{ 185 struct omap_chan *c = to_omap_dma_chan(chan); 186 187 dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); 188 189 return omap_request_dma(c->dma_sig, "DMA engine", 190 omap_dma_callback, c, &c->dma_ch); 191} 192 193static void omap_dma_free_chan_resources(struct dma_chan *chan) 194{ 195 struct omap_chan *c = to_omap_dma_chan(chan); 196 197 vchan_free_chan_resources(&c->vc); 198 omap_free_dma(c->dma_ch); 199 200 dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); 201} 202 203static size_t omap_dma_sg_size(struct omap_sg *sg) 204{ 205 return sg->en * sg->fn; 206} 207 208static size_t omap_dma_desc_size(struct omap_desc *d) 209{ 210 unsigned i; 211 size_t size; 212 213 for (size = i = 0; i < d->sglen; i++) 214 size += omap_dma_sg_size(&d->sg[i]); 215 216 return size * es_bytes[d->es]; 217} 218 219static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) 220{ 221 unsigned i; 222 size_t size, es_size = es_bytes[d->es]; 223 224 for (size = i = 0; i < d->sglen; i++) { 225 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size; 226 227 if (size) 228 size += this_size; 229 else if (addr >= d->sg[i].addr && 230 addr < d->sg[i].addr + this_size) 231 size += d->sg[i].addr + this_size - addr; 232 } 233 return size; 234} 235 236static enum dma_status omap_dma_tx_status(struct dma_chan *chan, 237 dma_cookie_t cookie, struct dma_tx_state *txstate) 238{ 239 struct omap_chan *c = to_omap_dma_chan(chan); 240 struct virt_dma_desc *vd; 241 enum dma_status ret; 242 unsigned long flags; 243 244 ret = dma_cookie_status(chan, cookie, txstate); 245 if (ret == DMA_SUCCESS || !txstate) 246 return ret; 247 248 spin_lock_irqsave(&c->vc.lock, flags); 249 vd = vchan_find_desc(&c->vc, cookie); 250 if (vd) { 251 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx)); 252 } else if (c->desc && c->desc->vd.tx.cookie == cookie) { 253 struct omap_desc *d = c->desc; 254 dma_addr_t pos; 255 256 if (d->dir == DMA_MEM_TO_DEV) 257 pos = omap_get_dma_src_pos(c->dma_ch); 258 else if (d->dir == DMA_DEV_TO_MEM) 259 pos = omap_get_dma_dst_pos(c->dma_ch); 260 else 261 pos = 0; 262 263 txstate->residue = omap_dma_desc_size_pos(d, pos); 264 } else { 265 txstate->residue = 0; 266 } 267 spin_unlock_irqrestore(&c->vc.lock, flags); 268 269 return ret; 270} 271 272static void omap_dma_issue_pending(struct dma_chan *chan) 273{ 274 struct omap_chan *c = to_omap_dma_chan(chan); 275 unsigned long flags; 276 277 spin_lock_irqsave(&c->vc.lock, flags); 278 if (vchan_issue_pending(&c->vc) && !c->desc) { 279 struct omap_dmadev *d = to_omap_dma_dev(chan->device); 280 spin_lock(&d->lock); 281 if (list_empty(&c->node)) 282 list_add_tail(&c->node, &d->pending); 283 spin_unlock(&d->lock); 284 tasklet_schedule(&d->task); 285 } 286 spin_unlock_irqrestore(&c->vc.lock, flags); 287} 288 289static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( 290 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, 291 enum dma_transfer_direction dir, unsigned long tx_flags, void *context) 292{ 293 struct omap_chan *c = to_omap_dma_chan(chan); 294 enum dma_slave_buswidth dev_width; 295 struct scatterlist *sgent; 296 struct omap_desc *d; 297 dma_addr_t dev_addr; 298 unsigned i, j = 0, es, en, frame_bytes, sync_type; 299 u32 burst; 300 301 if (dir == DMA_DEV_TO_MEM) { 302 dev_addr = c->cfg.src_addr; 303 dev_width = c->cfg.src_addr_width; 304 burst = c->cfg.src_maxburst; 305 sync_type = OMAP_DMA_SRC_SYNC; 306 } else if (dir == DMA_MEM_TO_DEV) { 307 dev_addr = c->cfg.dst_addr; 308 dev_width = c->cfg.dst_addr_width; 309 burst = c->cfg.dst_maxburst; 310 sync_type = OMAP_DMA_DST_SYNC; 311 } else { 312 dev_err(chan->device->dev, "%s: bad direction?\n", __func__); 313 return NULL; 314 } 315 316 /* Bus width translates to the element size (ES) */ 317 switch (dev_width) { 318 case DMA_SLAVE_BUSWIDTH_1_BYTE: 319 es = OMAP_DMA_DATA_TYPE_S8; 320 break; 321 case DMA_SLAVE_BUSWIDTH_2_BYTES: 322 es = OMAP_DMA_DATA_TYPE_S16; 323 break; 324 case DMA_SLAVE_BUSWIDTH_4_BYTES: 325 es = OMAP_DMA_DATA_TYPE_S32; 326 break; 327 default: /* not reached */ 328 return NULL; 329 } 330 331 /* Now allocate and setup the descriptor. */ 332 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); 333 if (!d) 334 return NULL; 335 336 d->dir = dir; 337 d->dev_addr = dev_addr; 338 d->es = es; 339 d->sync_mode = OMAP_DMA_SYNC_FRAME; 340 d->sync_type = sync_type; 341 d->periph_port = OMAP_DMA_PORT_TIPB; 342 343 /* 344 * Build our scatterlist entries: each contains the address, 345 * the number of elements (EN) in each frame, and the number of 346 * frames (FN). Number of bytes for this entry = ES * EN * FN. 347 * 348 * Burst size translates to number of elements with frame sync. 349 * Note: DMA engine defines burst to be the number of dev-width 350 * transfers. 351 */ 352 en = burst; 353 frame_bytes = es_bytes[es] * en; 354 for_each_sg(sgl, sgent, sglen, i) { 355 d->sg[j].addr = sg_dma_address(sgent); 356 d->sg[j].en = en; 357 d->sg[j].fn = sg_dma_len(sgent) / frame_bytes; 358 j++; 359 } 360 361 d->sglen = j; 362 363 return vchan_tx_prep(&c->vc, &d->vd, tx_flags); 364} 365 366static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( 367 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 368 size_t period_len, enum dma_transfer_direction dir, unsigned long flags, 369 void *context) 370{ 371 struct omap_chan *c = to_omap_dma_chan(chan); 372 enum dma_slave_buswidth dev_width; 373 struct omap_desc *d; 374 dma_addr_t dev_addr; 375 unsigned es, sync_type; 376 u32 burst; 377 378 if (dir == DMA_DEV_TO_MEM) { 379 dev_addr = c->cfg.src_addr; 380 dev_width = c->cfg.src_addr_width; 381 burst = c->cfg.src_maxburst; 382 sync_type = OMAP_DMA_SRC_SYNC; 383 } else if (dir == DMA_MEM_TO_DEV) { 384 dev_addr = c->cfg.dst_addr; 385 dev_width = c->cfg.dst_addr_width; 386 burst = c->cfg.dst_maxburst; 387 sync_type = OMAP_DMA_DST_SYNC; 388 } else { 389 dev_err(chan->device->dev, "%s: bad direction?\n", __func__); 390 return NULL; 391 } 392 393 /* Bus width translates to the element size (ES) */ 394 switch (dev_width) { 395 case DMA_SLAVE_BUSWIDTH_1_BYTE: 396 es = OMAP_DMA_DATA_TYPE_S8; 397 break; 398 case DMA_SLAVE_BUSWIDTH_2_BYTES: 399 es = OMAP_DMA_DATA_TYPE_S16; 400 break; 401 case DMA_SLAVE_BUSWIDTH_4_BYTES: 402 es = OMAP_DMA_DATA_TYPE_S32; 403 break; 404 default: /* not reached */ 405 return NULL; 406 } 407 408 /* Now allocate and setup the descriptor. */ 409 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); 410 if (!d) 411 return NULL; 412 413 d->dir = dir; 414 d->dev_addr = dev_addr; 415 d->fi = burst; 416 d->es = es; 417 if (burst) 418 d->sync_mode = OMAP_DMA_SYNC_PACKET; 419 else 420 d->sync_mode = OMAP_DMA_SYNC_ELEMENT; 421 d->sync_type = sync_type; 422 d->periph_port = OMAP_DMA_PORT_MPUI; 423 d->sg[0].addr = buf_addr; 424 d->sg[0].en = period_len / es_bytes[es]; 425 d->sg[0].fn = buf_len / period_len; 426 d->sglen = 1; 427 428 if (!c->cyclic) { 429 c->cyclic = true; 430 omap_dma_link_lch(c->dma_ch, c->dma_ch); 431 432 if (flags & DMA_PREP_INTERRUPT) 433 omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ); 434 435 omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ); 436 } 437 438 if (dma_omap2plus()) { 439 omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); 440 omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); 441 } 442 443 return vchan_tx_prep(&c->vc, &d->vd, flags); 444} 445 446static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) 447{ 448 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 449 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 450 return -EINVAL; 451 452 memcpy(&c->cfg, cfg, sizeof(c->cfg)); 453 454 return 0; 455} 456 457static int omap_dma_terminate_all(struct omap_chan *c) 458{ 459 struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); 460 unsigned long flags; 461 LIST_HEAD(head); 462 463 spin_lock_irqsave(&c->vc.lock, flags); 464 465 /* Prevent this channel being scheduled */ 466 spin_lock(&d->lock); 467 list_del_init(&c->node); 468 spin_unlock(&d->lock); 469 470 /* 471 * Stop DMA activity: we assume the callback will not be called 472 * after omap_stop_dma() returns (even if it does, it will see 473 * c->desc is NULL and exit.) 474 */ 475 if (c->desc) { 476 c->desc = NULL; 477 /* Avoid stopping the dma twice */ 478 if (!c->paused) 479 omap_stop_dma(c->dma_ch); 480 } 481 482 if (c->cyclic) { 483 c->cyclic = false; 484 c->paused = false; 485 omap_dma_unlink_lch(c->dma_ch, c->dma_ch); 486 } 487 488 vchan_get_all_descriptors(&c->vc, &head); 489 spin_unlock_irqrestore(&c->vc.lock, flags); 490 vchan_dma_desc_free_list(&c->vc, &head); 491 492 return 0; 493} 494 495static int omap_dma_pause(struct omap_chan *c) 496{ 497 /* Pause/Resume only allowed with cyclic mode */ 498 if (!c->cyclic) 499 return -EINVAL; 500 501 if (!c->paused) { 502 omap_stop_dma(c->dma_ch); 503 c->paused = true; 504 } 505 506 return 0; 507} 508 509static int omap_dma_resume(struct omap_chan *c) 510{ 511 /* Pause/Resume only allowed with cyclic mode */ 512 if (!c->cyclic) 513 return -EINVAL; 514 515 if (c->paused) { 516 omap_start_dma(c->dma_ch); 517 c->paused = false; 518 } 519 520 return 0; 521} 522 523static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 524 unsigned long arg) 525{ 526 struct omap_chan *c = to_omap_dma_chan(chan); 527 int ret; 528 529 switch (cmd) { 530 case DMA_SLAVE_CONFIG: 531 ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg); 532 break; 533 534 case DMA_TERMINATE_ALL: 535 ret = omap_dma_terminate_all(c); 536 break; 537 538 case DMA_PAUSE: 539 ret = omap_dma_pause(c); 540 break; 541 542 case DMA_RESUME: 543 ret = omap_dma_resume(c); 544 break; 545 546 default: 547 ret = -ENXIO; 548 break; 549 } 550 551 return ret; 552} 553 554static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) 555{ 556 struct omap_chan *c; 557 558 c = kzalloc(sizeof(*c), GFP_KERNEL); 559 if (!c) 560 return -ENOMEM; 561 562 c->dma_sig = dma_sig; 563 c->vc.desc_free = omap_dma_desc_free; 564 vchan_init(&c->vc, &od->ddev); 565 INIT_LIST_HEAD(&c->node); 566 567 od->ddev.chancnt++; 568 569 return 0; 570} 571 572static void omap_dma_free(struct omap_dmadev *od) 573{ 574 tasklet_kill(&od->task); 575 while (!list_empty(&od->ddev.channels)) { 576 struct omap_chan *c = list_first_entry(&od->ddev.channels, 577 struct omap_chan, vc.chan.device_node); 578 579 list_del(&c->vc.chan.device_node); 580 tasklet_kill(&c->vc.task); 581 kfree(c); 582 } 583 kfree(od); 584} 585 586static int omap_dma_probe(struct platform_device *pdev) 587{ 588 struct omap_dmadev *od; 589 int rc, i; 590 591 od = kzalloc(sizeof(*od), GFP_KERNEL); 592 if (!od) 593 return -ENOMEM; 594 595 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); 596 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); 597 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; 598 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; 599 od->ddev.device_tx_status = omap_dma_tx_status; 600 od->ddev.device_issue_pending = omap_dma_issue_pending; 601 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; 602 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; 603 od->ddev.device_control = omap_dma_control; 604 od->ddev.dev = &pdev->dev; 605 INIT_LIST_HEAD(&od->ddev.channels); 606 INIT_LIST_HEAD(&od->pending); 607 spin_lock_init(&od->lock); 608 609 tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); 610 611 for (i = 0; i < 127; i++) { 612 rc = omap_dma_chan_init(od, i); 613 if (rc) { 614 omap_dma_free(od); 615 return rc; 616 } 617 } 618 619 rc = dma_async_device_register(&od->ddev); 620 if (rc) { 621 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", 622 rc); 623 omap_dma_free(od); 624 } else { 625 platform_set_drvdata(pdev, od); 626 } 627 628 dev_info(&pdev->dev, "OMAP DMA engine driver\n"); 629 630 return rc; 631} 632 633static int omap_dma_remove(struct platform_device *pdev) 634{ 635 struct omap_dmadev *od = platform_get_drvdata(pdev); 636 637 dma_async_device_unregister(&od->ddev); 638 omap_dma_free(od); 639 640 return 0; 641} 642 643static struct platform_driver omap_dma_driver = { 644 .probe = omap_dma_probe, 645 .remove = omap_dma_remove, 646 .driver = { 647 .name = "omap-dma-engine", 648 .owner = THIS_MODULE, 649 }, 650}; 651 652bool omap_dma_filter_fn(struct dma_chan *chan, void *param) 653{ 654 if (chan->device->dev->driver == &omap_dma_driver.driver) { 655 struct omap_chan *c = to_omap_dma_chan(chan); 656 unsigned req = *(unsigned *)param; 657 658 return req == c->dma_sig; 659 } 660 return false; 661} 662EXPORT_SYMBOL_GPL(omap_dma_filter_fn); 663 664static int omap_dma_init(void) 665{ 666 return platform_driver_register(&omap_dma_driver); 667} 668subsys_initcall(omap_dma_init); 669 670static void __exit omap_dma_exit(void) 671{ 672 platform_driver_unregister(&omap_dma_driver); 673} 674module_exit(omap_dma_exit); 675 676MODULE_AUTHOR("Russell King"); 677MODULE_LICENSE("GPL");