Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.15 1321 lines 36 kB view raw
1/* 2 * S3C24XX DMA handling 3 * 4 * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de> 5 * 6 * based on amba-pl08x.c 7 * 8 * Copyright (c) 2006 ARM Ltd. 9 * Copyright (c) 2010 ST-Ericsson SA 10 * 11 * Author: Peter Pearse <peter.pearse@arm.com> 12 * Author: Linus Walleij <linus.walleij@stericsson.com> 13 * 14 * This program is free software; you can redistribute it and/or modify it 15 * under the terms of the GNU General Public License as published by the Free 16 * Software Foundation; either version 2 of the License, or (at your option) 17 * any later version. 18 * 19 * The DMA controllers in S3C24XX SoCs have a varying number of DMA signals 20 * that can be routed to any of the 4 to 8 hardware-channels. 21 * 22 * Therefore on these DMA controllers the number of channels 23 * and the number of incoming DMA signals are two totally different things. 24 * It is usually not possible to theoretically handle all physical signals, 25 * so a multiplexing scheme with possible denial of use is necessary. 26 * 27 * Open items: 28 * - bursts 29 */ 30 31#include <linux/platform_device.h> 32#include <linux/types.h> 33#include <linux/dmaengine.h> 34#include <linux/dma-mapping.h> 35#include <linux/interrupt.h> 36#include <linux/clk.h> 37#include <linux/module.h> 38#include <linux/slab.h> 39#include <linux/platform_data/dma-s3c24xx.h> 40 41#include "dmaengine.h" 42#include "virt-dma.h" 43 44#define MAX_DMA_CHANNELS 8 45 46#define S3C24XX_DISRC 0x00 47#define S3C24XX_DISRCC 0x04 48#define S3C24XX_DISRCC_INC_INCREMENT 0 49#define S3C24XX_DISRCC_INC_FIXED BIT(0) 50#define S3C24XX_DISRCC_LOC_AHB 0 51#define S3C24XX_DISRCC_LOC_APB BIT(1) 52 53#define S3C24XX_DIDST 0x08 54#define S3C24XX_DIDSTC 0x0c 55#define S3C24XX_DIDSTC_INC_INCREMENT 0 56#define S3C24XX_DIDSTC_INC_FIXED BIT(0) 57#define S3C24XX_DIDSTC_LOC_AHB 0 58#define S3C24XX_DIDSTC_LOC_APB BIT(1) 59#define S3C24XX_DIDSTC_INT_TC0 0 60#define S3C24XX_DIDSTC_INT_RELOAD BIT(2) 61 62#define S3C24XX_DCON 0x10 63 64#define S3C24XX_DCON_TC_MASK 0xfffff 65#define S3C24XX_DCON_DSZ_BYTE (0 << 20) 66#define S3C24XX_DCON_DSZ_HALFWORD (1 << 20) 67#define S3C24XX_DCON_DSZ_WORD (2 << 20) 68#define S3C24XX_DCON_DSZ_MASK (3 << 20) 69#define S3C24XX_DCON_DSZ_SHIFT 20 70#define S3C24XX_DCON_AUTORELOAD 0 71#define S3C24XX_DCON_NORELOAD BIT(22) 72#define S3C24XX_DCON_HWTRIG BIT(23) 73#define S3C24XX_DCON_HWSRC_SHIFT 24 74#define S3C24XX_DCON_SERV_SINGLE 0 75#define S3C24XX_DCON_SERV_WHOLE BIT(27) 76#define S3C24XX_DCON_TSZ_UNIT 0 77#define S3C24XX_DCON_TSZ_BURST4 BIT(28) 78#define S3C24XX_DCON_INT BIT(29) 79#define S3C24XX_DCON_SYNC_PCLK 0 80#define S3C24XX_DCON_SYNC_HCLK BIT(30) 81#define S3C24XX_DCON_DEMAND 0 82#define S3C24XX_DCON_HANDSHAKE BIT(31) 83 84#define S3C24XX_DSTAT 0x14 85#define S3C24XX_DSTAT_STAT_BUSY BIT(20) 86#define S3C24XX_DSTAT_CURRTC_MASK 0xfffff 87 88#define S3C24XX_DMASKTRIG 0x20 89#define S3C24XX_DMASKTRIG_SWTRIG BIT(0) 90#define S3C24XX_DMASKTRIG_ON BIT(1) 91#define S3C24XX_DMASKTRIG_STOP BIT(2) 92 93#define S3C24XX_DMAREQSEL 0x24 94#define S3C24XX_DMAREQSEL_HW BIT(0) 95 96/* 97 * S3C2410, S3C2440 and S3C2442 SoCs cannot select any physical channel 98 * for a DMA source. Instead only specific channels are valid. 99 * All of these SoCs have 4 physical channels and the number of request 100 * source bits is 3. Additionally we also need 1 bit to mark the channel 101 * as valid. 102 * Therefore we separate the chansel element of the channel data into 4 103 * parts of 4 bits each, to hold the information if the channel is valid 104 * and the hw request source to use. 105 * 106 * Example: 107 * SDI is valid on channels 0, 2 and 3 - with varying hw request sources. 108 * For it the chansel field would look like 109 * 110 * ((BIT(3) | 1) << 3 * 4) | // channel 3, with request source 1 111 * ((BIT(3) | 2) << 2 * 4) | // channel 2, with request source 2 112 * ((BIT(3) | 2) << 0 * 4) // channel 0, with request source 2 113 */ 114#define S3C24XX_CHANSEL_WIDTH 4 115#define S3C24XX_CHANSEL_VALID BIT(3) 116#define S3C24XX_CHANSEL_REQ_MASK 7 117 118/* 119 * struct soc_data - vendor-specific config parameters for individual SoCs 120 * @stride: spacing between the registers of each channel 121 * @has_reqsel: does the controller use the newer requestselection mechanism 122 * @has_clocks: are controllable dma-clocks present 123 */ 124struct soc_data { 125 int stride; 126 bool has_reqsel; 127 bool has_clocks; 128}; 129 130/* 131 * enum s3c24xx_dma_chan_state - holds the virtual channel states 132 * @S3C24XX_DMA_CHAN_IDLE: the channel is idle 133 * @S3C24XX_DMA_CHAN_RUNNING: the channel has allocated a physical transport 134 * channel and is running a transfer on it 135 * @S3C24XX_DMA_CHAN_WAITING: the channel is waiting for a physical transport 136 * channel to become available (only pertains to memcpy channels) 137 */ 138enum s3c24xx_dma_chan_state { 139 S3C24XX_DMA_CHAN_IDLE, 140 S3C24XX_DMA_CHAN_RUNNING, 141 S3C24XX_DMA_CHAN_WAITING, 142}; 143 144/* 145 * struct s3c24xx_sg - structure containing data per sg 146 * @src_addr: src address of sg 147 * @dst_addr: dst address of sg 148 * @len: transfer len in bytes 149 * @node: node for txd's dsg_list 150 */ 151struct s3c24xx_sg { 152 dma_addr_t src_addr; 153 dma_addr_t dst_addr; 154 size_t len; 155 struct list_head node; 156}; 157 158/* 159 * struct s3c24xx_txd - wrapper for struct dma_async_tx_descriptor 160 * @vd: virtual DMA descriptor 161 * @dsg_list: list of children sg's 162 * @at: sg currently being transfered 163 * @width: transfer width 164 * @disrcc: value for source control register 165 * @didstc: value for destination control register 166 * @dcon: base value for dcon register 167 */ 168struct s3c24xx_txd { 169 struct virt_dma_desc vd; 170 struct list_head dsg_list; 171 struct list_head *at; 172 u8 width; 173 u32 disrcc; 174 u32 didstc; 175 u32 dcon; 176}; 177 178struct s3c24xx_dma_chan; 179 180/* 181 * struct s3c24xx_dma_phy - holder for the physical channels 182 * @id: physical index to this channel 183 * @valid: does the channel have all required elements 184 * @base: virtual memory base (remapped) for the this channel 185 * @irq: interrupt for this channel 186 * @clk: clock for this channel 187 * @lock: a lock to use when altering an instance of this struct 188 * @serving: virtual channel currently being served by this physicalchannel 189 * @host: a pointer to the host (internal use) 190 */ 191struct s3c24xx_dma_phy { 192 unsigned int id; 193 bool valid; 194 void __iomem *base; 195 int irq; 196 struct clk *clk; 197 spinlock_t lock; 198 struct s3c24xx_dma_chan *serving; 199 struct s3c24xx_dma_engine *host; 200}; 201 202/* 203 * struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel 204 * @id: the id of the channel 205 * @name: name of the channel 206 * @vc: wrappped virtual channel 207 * @phy: the physical channel utilized by this channel, if there is one 208 * @runtime_addr: address for RX/TX according to the runtime config 209 * @at: active transaction on this channel 210 * @lock: a lock for this channel data 211 * @host: a pointer to the host (internal use) 212 * @state: whether the channel is idle, running etc 213 * @slave: whether this channel is a device (slave) or for memcpy 214 */ 215struct s3c24xx_dma_chan { 216 int id; 217 const char *name; 218 struct virt_dma_chan vc; 219 struct s3c24xx_dma_phy *phy; 220 struct dma_slave_config cfg; 221 struct s3c24xx_txd *at; 222 struct s3c24xx_dma_engine *host; 223 enum s3c24xx_dma_chan_state state; 224 bool slave; 225}; 226 227/* 228 * struct s3c24xx_dma_engine - the local state holder for the S3C24XX 229 * @pdev: the corresponding platform device 230 * @pdata: platform data passed in from the platform/machine 231 * @base: virtual memory base (remapped) 232 * @slave: slave engine for this instance 233 * @memcpy: memcpy engine for this instance 234 * @phy_chans: array of data for the physical channels 235 */ 236struct s3c24xx_dma_engine { 237 struct platform_device *pdev; 238 const struct s3c24xx_dma_platdata *pdata; 239 struct soc_data *sdata; 240 void __iomem *base; 241 struct dma_device slave; 242 struct dma_device memcpy; 243 struct s3c24xx_dma_phy *phy_chans; 244}; 245 246/* 247 * Physical channel handling 248 */ 249 250/* 251 * Check whether a certain channel is busy or not. 252 */ 253static int s3c24xx_dma_phy_busy(struct s3c24xx_dma_phy *phy) 254{ 255 unsigned int val = readl(phy->base + S3C24XX_DSTAT); 256 return val & S3C24XX_DSTAT_STAT_BUSY; 257} 258 259static bool s3c24xx_dma_phy_valid(struct s3c24xx_dma_chan *s3cchan, 260 struct s3c24xx_dma_phy *phy) 261{ 262 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 263 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; 264 struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id]; 265 int phyvalid; 266 267 /* every phy is valid for memcopy channels */ 268 if (!s3cchan->slave) 269 return true; 270 271 /* On newer variants all phys can be used for all virtual channels */ 272 if (s3cdma->sdata->has_reqsel) 273 return true; 274 275 phyvalid = (cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH)); 276 return (phyvalid & S3C24XX_CHANSEL_VALID) ? true : false; 277} 278 279/* 280 * Allocate a physical channel for a virtual channel 281 * 282 * Try to locate a physical channel to be used for this transfer. If all 283 * are taken return NULL and the requester will have to cope by using 284 * some fallback PIO mode or retrying later. 285 */ 286static 287struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan) 288{ 289 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 290 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; 291 struct s3c24xx_dma_channel *cdata; 292 struct s3c24xx_dma_phy *phy = NULL; 293 unsigned long flags; 294 int i; 295 int ret; 296 297 if (s3cchan->slave) 298 cdata = &pdata->channels[s3cchan->id]; 299 300 for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) { 301 phy = &s3cdma->phy_chans[i]; 302 303 if (!phy->valid) 304 continue; 305 306 if (!s3c24xx_dma_phy_valid(s3cchan, phy)) 307 continue; 308 309 spin_lock_irqsave(&phy->lock, flags); 310 311 if (!phy->serving) { 312 phy->serving = s3cchan; 313 spin_unlock_irqrestore(&phy->lock, flags); 314 break; 315 } 316 317 spin_unlock_irqrestore(&phy->lock, flags); 318 } 319 320 /* No physical channel available, cope with it */ 321 if (i == s3cdma->pdata->num_phy_channels) { 322 dev_warn(&s3cdma->pdev->dev, "no phy channel available\n"); 323 return NULL; 324 } 325 326 /* start the phy clock */ 327 if (s3cdma->sdata->has_clocks) { 328 ret = clk_enable(phy->clk); 329 if (ret) { 330 dev_err(&s3cdma->pdev->dev, "could not enable clock for channel %d, err %d\n", 331 phy->id, ret); 332 phy->serving = NULL; 333 return NULL; 334 } 335 } 336 337 return phy; 338} 339 340/* 341 * Mark the physical channel as free. 342 * 343 * This drops the link between the physical and virtual channel. 344 */ 345static inline void s3c24xx_dma_put_phy(struct s3c24xx_dma_phy *phy) 346{ 347 struct s3c24xx_dma_engine *s3cdma = phy->host; 348 349 if (s3cdma->sdata->has_clocks) 350 clk_disable(phy->clk); 351 352 phy->serving = NULL; 353} 354 355/* 356 * Stops the channel by writing the stop bit. 357 * This should not be used for an on-going transfer, but as a method of 358 * shutting down a channel (eg, when it's no longer used) or terminating a 359 * transfer. 360 */ 361static void s3c24xx_dma_terminate_phy(struct s3c24xx_dma_phy *phy) 362{ 363 writel(S3C24XX_DMASKTRIG_STOP, phy->base + S3C24XX_DMASKTRIG); 364} 365 366/* 367 * Virtual channel handling 368 */ 369 370static inline 371struct s3c24xx_dma_chan *to_s3c24xx_dma_chan(struct dma_chan *chan) 372{ 373 return container_of(chan, struct s3c24xx_dma_chan, vc.chan); 374} 375 376static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan) 377{ 378 struct s3c24xx_dma_phy *phy = s3cchan->phy; 379 struct s3c24xx_txd *txd = s3cchan->at; 380 u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK; 381 382 return tc * txd->width; 383} 384 385static int s3c24xx_dma_set_runtime_config(struct s3c24xx_dma_chan *s3cchan, 386 struct dma_slave_config *config) 387{ 388 if (!s3cchan->slave) 389 return -EINVAL; 390 391 /* Reject definitely invalid configurations */ 392 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 393 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 394 return -EINVAL; 395 396 s3cchan->cfg = *config; 397 398 return 0; 399} 400 401/* 402 * Transfer handling 403 */ 404 405static inline 406struct s3c24xx_txd *to_s3c24xx_txd(struct dma_async_tx_descriptor *tx) 407{ 408 return container_of(tx, struct s3c24xx_txd, vd.tx); 409} 410 411static struct s3c24xx_txd *s3c24xx_dma_get_txd(void) 412{ 413 struct s3c24xx_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 414 415 if (txd) { 416 INIT_LIST_HEAD(&txd->dsg_list); 417 txd->dcon = S3C24XX_DCON_INT | S3C24XX_DCON_NORELOAD; 418 } 419 420 return txd; 421} 422 423static void s3c24xx_dma_free_txd(struct s3c24xx_txd *txd) 424{ 425 struct s3c24xx_sg *dsg, *_dsg; 426 427 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { 428 list_del(&dsg->node); 429 kfree(dsg); 430 } 431 432 kfree(txd); 433} 434 435static void s3c24xx_dma_start_next_sg(struct s3c24xx_dma_chan *s3cchan, 436 struct s3c24xx_txd *txd) 437{ 438 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 439 struct s3c24xx_dma_phy *phy = s3cchan->phy; 440 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; 441 struct s3c24xx_sg *dsg = list_entry(txd->at, struct s3c24xx_sg, node); 442 u32 dcon = txd->dcon; 443 u32 val; 444 445 /* transfer-size and -count from len and width */ 446 switch (txd->width) { 447 case 1: 448 dcon |= S3C24XX_DCON_DSZ_BYTE | dsg->len; 449 break; 450 case 2: 451 dcon |= S3C24XX_DCON_DSZ_HALFWORD | (dsg->len / 2); 452 break; 453 case 4: 454 dcon |= S3C24XX_DCON_DSZ_WORD | (dsg->len / 4); 455 break; 456 } 457 458 if (s3cchan->slave) { 459 struct s3c24xx_dma_channel *cdata = 460 &pdata->channels[s3cchan->id]; 461 462 if (s3cdma->sdata->has_reqsel) { 463 writel_relaxed((cdata->chansel << 1) | 464 S3C24XX_DMAREQSEL_HW, 465 phy->base + S3C24XX_DMAREQSEL); 466 } else { 467 int csel = cdata->chansel >> (phy->id * 468 S3C24XX_CHANSEL_WIDTH); 469 470 csel &= S3C24XX_CHANSEL_REQ_MASK; 471 dcon |= csel << S3C24XX_DCON_HWSRC_SHIFT; 472 dcon |= S3C24XX_DCON_HWTRIG; 473 } 474 } else { 475 if (s3cdma->sdata->has_reqsel) 476 writel_relaxed(0, phy->base + S3C24XX_DMAREQSEL); 477 } 478 479 writel_relaxed(dsg->src_addr, phy->base + S3C24XX_DISRC); 480 writel_relaxed(txd->disrcc, phy->base + S3C24XX_DISRCC); 481 writel_relaxed(dsg->dst_addr, phy->base + S3C24XX_DIDST); 482 writel_relaxed(txd->didstc, phy->base + S3C24XX_DIDSTC); 483 writel_relaxed(dcon, phy->base + S3C24XX_DCON); 484 485 val = readl_relaxed(phy->base + S3C24XX_DMASKTRIG); 486 val &= ~S3C24XX_DMASKTRIG_STOP; 487 val |= S3C24XX_DMASKTRIG_ON; 488 489 /* trigger the dma operation for memcpy transfers */ 490 if (!s3cchan->slave) 491 val |= S3C24XX_DMASKTRIG_SWTRIG; 492 493 writel(val, phy->base + S3C24XX_DMASKTRIG); 494} 495 496/* 497 * Set the initial DMA register values and start first sg. 498 */ 499static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan) 500{ 501 struct s3c24xx_dma_phy *phy = s3cchan->phy; 502 struct virt_dma_desc *vd = vchan_next_desc(&s3cchan->vc); 503 struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); 504 505 list_del(&txd->vd.node); 506 507 s3cchan->at = txd; 508 509 /* Wait for channel inactive */ 510 while (s3c24xx_dma_phy_busy(phy)) 511 cpu_relax(); 512 513 /* point to the first element of the sg list */ 514 txd->at = txd->dsg_list.next; 515 s3c24xx_dma_start_next_sg(s3cchan, txd); 516} 517 518static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma, 519 struct s3c24xx_dma_chan *s3cchan) 520{ 521 LIST_HEAD(head); 522 523 vchan_get_all_descriptors(&s3cchan->vc, &head); 524 vchan_dma_desc_free_list(&s3cchan->vc, &head); 525} 526 527/* 528 * Try to allocate a physical channel. When successful, assign it to 529 * this virtual channel, and initiate the next descriptor. The 530 * virtual channel lock must be held at this point. 531 */ 532static void s3c24xx_dma_phy_alloc_and_start(struct s3c24xx_dma_chan *s3cchan) 533{ 534 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 535 struct s3c24xx_dma_phy *phy; 536 537 phy = s3c24xx_dma_get_phy(s3cchan); 538 if (!phy) { 539 dev_dbg(&s3cdma->pdev->dev, "no physical channel available for xfer on %s\n", 540 s3cchan->name); 541 s3cchan->state = S3C24XX_DMA_CHAN_WAITING; 542 return; 543 } 544 545 dev_dbg(&s3cdma->pdev->dev, "allocated physical channel %d for xfer on %s\n", 546 phy->id, s3cchan->name); 547 548 s3cchan->phy = phy; 549 s3cchan->state = S3C24XX_DMA_CHAN_RUNNING; 550 551 s3c24xx_dma_start_next_txd(s3cchan); 552} 553 554static void s3c24xx_dma_phy_reassign_start(struct s3c24xx_dma_phy *phy, 555 struct s3c24xx_dma_chan *s3cchan) 556{ 557 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 558 559 dev_dbg(&s3cdma->pdev->dev, "reassigned physical channel %d for xfer on %s\n", 560 phy->id, s3cchan->name); 561 562 /* 563 * We do this without taking the lock; we're really only concerned 564 * about whether this pointer is NULL or not, and we're guaranteed 565 * that this will only be called when it _already_ is non-NULL. 566 */ 567 phy->serving = s3cchan; 568 s3cchan->phy = phy; 569 s3cchan->state = S3C24XX_DMA_CHAN_RUNNING; 570 s3c24xx_dma_start_next_txd(s3cchan); 571} 572 573/* 574 * Free a physical DMA channel, potentially reallocating it to another 575 * virtual channel if we have any pending. 576 */ 577static void s3c24xx_dma_phy_free(struct s3c24xx_dma_chan *s3cchan) 578{ 579 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 580 struct s3c24xx_dma_chan *p, *next; 581 582retry: 583 next = NULL; 584 585 /* Find a waiting virtual channel for the next transfer. */ 586 list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node) 587 if (p->state == S3C24XX_DMA_CHAN_WAITING) { 588 next = p; 589 break; 590 } 591 592 if (!next) { 593 list_for_each_entry(p, &s3cdma->slave.channels, 594 vc.chan.device_node) 595 if (p->state == S3C24XX_DMA_CHAN_WAITING && 596 s3c24xx_dma_phy_valid(p, s3cchan->phy)) { 597 next = p; 598 break; 599 } 600 } 601 602 /* Ensure that the physical channel is stopped */ 603 s3c24xx_dma_terminate_phy(s3cchan->phy); 604 605 if (next) { 606 bool success; 607 608 /* 609 * Eww. We know this isn't going to deadlock 610 * but lockdep probably doesn't. 611 */ 612 spin_lock(&next->vc.lock); 613 /* Re-check the state now that we have the lock */ 614 success = next->state == S3C24XX_DMA_CHAN_WAITING; 615 if (success) 616 s3c24xx_dma_phy_reassign_start(s3cchan->phy, next); 617 spin_unlock(&next->vc.lock); 618 619 /* If the state changed, try to find another channel */ 620 if (!success) 621 goto retry; 622 } else { 623 /* No more jobs, so free up the physical channel */ 624 s3c24xx_dma_put_phy(s3cchan->phy); 625 } 626 627 s3cchan->phy = NULL; 628 s3cchan->state = S3C24XX_DMA_CHAN_IDLE; 629} 630 631static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd) 632{ 633 struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); 634 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan); 635 636 if (!s3cchan->slave) 637 dma_descriptor_unmap(&vd->tx); 638 639 s3c24xx_dma_free_txd(txd); 640} 641 642static irqreturn_t s3c24xx_dma_irq(int irq, void *data) 643{ 644 struct s3c24xx_dma_phy *phy = data; 645 struct s3c24xx_dma_chan *s3cchan = phy->serving; 646 struct s3c24xx_txd *txd; 647 648 dev_dbg(&phy->host->pdev->dev, "interrupt on channel %d\n", phy->id); 649 650 /* 651 * Interrupts happen to notify the completion of a transfer and the 652 * channel should have moved into its stop state already on its own. 653 * Therefore interrupts on channels not bound to a virtual channel 654 * should never happen. Nevertheless send a terminate command to the 655 * channel if the unlikely case happens. 656 */ 657 if (unlikely(!s3cchan)) { 658 dev_err(&phy->host->pdev->dev, "interrupt on unused channel %d\n", 659 phy->id); 660 661 s3c24xx_dma_terminate_phy(phy); 662 663 return IRQ_HANDLED; 664 } 665 666 spin_lock(&s3cchan->vc.lock); 667 txd = s3cchan->at; 668 if (txd) { 669 /* when more sg's are in this txd, start the next one */ 670 if (!list_is_last(txd->at, &txd->dsg_list)) { 671 txd->at = txd->at->next; 672 s3c24xx_dma_start_next_sg(s3cchan, txd); 673 } else { 674 s3cchan->at = NULL; 675 vchan_cookie_complete(&txd->vd); 676 677 /* 678 * And start the next descriptor (if any), 679 * otherwise free this channel. 680 */ 681 if (vchan_next_desc(&s3cchan->vc)) 682 s3c24xx_dma_start_next_txd(s3cchan); 683 else 684 s3c24xx_dma_phy_free(s3cchan); 685 } 686 } 687 spin_unlock(&s3cchan->vc.lock); 688 689 return IRQ_HANDLED; 690} 691 692/* 693 * The DMA ENGINE API 694 */ 695 696static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 697 unsigned long arg) 698{ 699 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); 700 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 701 unsigned long flags; 702 int ret = 0; 703 704 spin_lock_irqsave(&s3cchan->vc.lock, flags); 705 706 switch (cmd) { 707 case DMA_SLAVE_CONFIG: 708 ret = s3c24xx_dma_set_runtime_config(s3cchan, 709 (struct dma_slave_config *)arg); 710 break; 711 case DMA_TERMINATE_ALL: 712 if (!s3cchan->phy && !s3cchan->at) { 713 dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n", 714 s3cchan->id); 715 ret = -EINVAL; 716 break; 717 } 718 719 s3cchan->state = S3C24XX_DMA_CHAN_IDLE; 720 721 /* Mark physical channel as free */ 722 if (s3cchan->phy) 723 s3c24xx_dma_phy_free(s3cchan); 724 725 /* Dequeue current job */ 726 if (s3cchan->at) { 727 s3c24xx_dma_desc_free(&s3cchan->at->vd); 728 s3cchan->at = NULL; 729 } 730 731 /* Dequeue jobs not yet fired as well */ 732 s3c24xx_dma_free_txd_list(s3cdma, s3cchan); 733 break; 734 default: 735 /* Unknown command */ 736 ret = -ENXIO; 737 break; 738 } 739 740 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); 741 742 return ret; 743} 744 745static int s3c24xx_dma_alloc_chan_resources(struct dma_chan *chan) 746{ 747 return 0; 748} 749 750static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan) 751{ 752 /* Ensure all queued descriptors are freed */ 753 vchan_free_chan_resources(to_virt_chan(chan)); 754} 755 756static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan, 757 dma_cookie_t cookie, struct dma_tx_state *txstate) 758{ 759 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); 760 struct s3c24xx_txd *txd; 761 struct s3c24xx_sg *dsg; 762 struct virt_dma_desc *vd; 763 unsigned long flags; 764 enum dma_status ret; 765 size_t bytes = 0; 766 767 spin_lock_irqsave(&s3cchan->vc.lock, flags); 768 ret = dma_cookie_status(chan, cookie, txstate); 769 if (ret == DMA_COMPLETE) { 770 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); 771 return ret; 772 } 773 774 /* 775 * There's no point calculating the residue if there's 776 * no txstate to store the value. 777 */ 778 if (!txstate) { 779 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); 780 return ret; 781 } 782 783 vd = vchan_find_desc(&s3cchan->vc, cookie); 784 if (vd) { 785 /* On the issued list, so hasn't been processed yet */ 786 txd = to_s3c24xx_txd(&vd->tx); 787 788 list_for_each_entry(dsg, &txd->dsg_list, node) 789 bytes += dsg->len; 790 } else { 791 /* 792 * Currently running, so sum over the pending sg's and 793 * the currently active one. 794 */ 795 txd = s3cchan->at; 796 797 dsg = list_entry(txd->at, struct s3c24xx_sg, node); 798 list_for_each_entry_from(dsg, &txd->dsg_list, node) 799 bytes += dsg->len; 800 801 bytes += s3c24xx_dma_getbytes_chan(s3cchan); 802 } 803 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); 804 805 /* 806 * This cookie not complete yet 807 * Get number of bytes left in the active transactions and queue 808 */ 809 dma_set_residue(txstate, bytes); 810 811 /* Whether waiting or running, we're in progress */ 812 return ret; 813} 814 815/* 816 * Initialize a descriptor to be used by memcpy submit 817 */ 818static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy( 819 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 820 size_t len, unsigned long flags) 821{ 822 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); 823 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 824 struct s3c24xx_txd *txd; 825 struct s3c24xx_sg *dsg; 826 int src_mod, dest_mod; 827 828 dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %d bytes from %s\n", 829 len, s3cchan->name); 830 831 if ((len & S3C24XX_DCON_TC_MASK) != len) { 832 dev_err(&s3cdma->pdev->dev, "memcpy size %d to large\n", len); 833 return NULL; 834 } 835 836 txd = s3c24xx_dma_get_txd(); 837 if (!txd) 838 return NULL; 839 840 dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT); 841 if (!dsg) { 842 s3c24xx_dma_free_txd(txd); 843 return NULL; 844 } 845 list_add_tail(&dsg->node, &txd->dsg_list); 846 847 dsg->src_addr = src; 848 dsg->dst_addr = dest; 849 dsg->len = len; 850 851 /* 852 * Determine a suitable transfer width. 853 * The DMA controller cannot fetch/store information which is not 854 * naturally aligned on the bus, i.e., a 4 byte fetch must start at 855 * an address divisible by 4 - more generally addr % width must be 0. 856 */ 857 src_mod = src % 4; 858 dest_mod = dest % 4; 859 switch (len % 4) { 860 case 0: 861 txd->width = (src_mod == 0 && dest_mod == 0) ? 4 : 1; 862 break; 863 case 2: 864 txd->width = ((src_mod == 2 || src_mod == 0) && 865 (dest_mod == 2 || dest_mod == 0)) ? 2 : 1; 866 break; 867 default: 868 txd->width = 1; 869 break; 870 } 871 872 txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT; 873 txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT; 874 txd->dcon |= S3C24XX_DCON_DEMAND | S3C24XX_DCON_SYNC_HCLK | 875 S3C24XX_DCON_SERV_WHOLE; 876 877 return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); 878} 879 880static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg( 881 struct dma_chan *chan, struct scatterlist *sgl, 882 unsigned int sg_len, enum dma_transfer_direction direction, 883 unsigned long flags, void *context) 884{ 885 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); 886 struct s3c24xx_dma_engine *s3cdma = s3cchan->host; 887 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; 888 struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id]; 889 struct s3c24xx_txd *txd; 890 struct s3c24xx_sg *dsg; 891 struct scatterlist *sg; 892 dma_addr_t slave_addr; 893 u32 hwcfg = 0; 894 int tmp; 895 896 dev_dbg(&s3cdma->pdev->dev, "prepare transaction of %d bytes from %s\n", 897 sg_dma_len(sgl), s3cchan->name); 898 899 txd = s3c24xx_dma_get_txd(); 900 if (!txd) 901 return NULL; 902 903 if (cdata->handshake) 904 txd->dcon |= S3C24XX_DCON_HANDSHAKE; 905 906 switch (cdata->bus) { 907 case S3C24XX_DMA_APB: 908 txd->dcon |= S3C24XX_DCON_SYNC_PCLK; 909 hwcfg |= S3C24XX_DISRCC_LOC_APB; 910 break; 911 case S3C24XX_DMA_AHB: 912 txd->dcon |= S3C24XX_DCON_SYNC_HCLK; 913 hwcfg |= S3C24XX_DISRCC_LOC_AHB; 914 break; 915 } 916 917 /* 918 * Always assume our peripheral desintation is a fixed 919 * address in memory. 920 */ 921 hwcfg |= S3C24XX_DISRCC_INC_FIXED; 922 923 /* 924 * Individual dma operations are requested by the slave, 925 * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE). 926 */ 927 txd->dcon |= S3C24XX_DCON_SERV_SINGLE; 928 929 if (direction == DMA_MEM_TO_DEV) { 930 txd->disrcc = S3C24XX_DISRCC_LOC_AHB | 931 S3C24XX_DISRCC_INC_INCREMENT; 932 txd->didstc = hwcfg; 933 slave_addr = s3cchan->cfg.dst_addr; 934 txd->width = s3cchan->cfg.dst_addr_width; 935 } else if (direction == DMA_DEV_TO_MEM) { 936 txd->disrcc = hwcfg; 937 txd->didstc = S3C24XX_DIDSTC_LOC_AHB | 938 S3C24XX_DIDSTC_INC_INCREMENT; 939 slave_addr = s3cchan->cfg.src_addr; 940 txd->width = s3cchan->cfg.src_addr_width; 941 } else { 942 s3c24xx_dma_free_txd(txd); 943 dev_err(&s3cdma->pdev->dev, 944 "direction %d unsupported\n", direction); 945 return NULL; 946 } 947 948 for_each_sg(sgl, sg, sg_len, tmp) { 949 dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT); 950 if (!dsg) { 951 s3c24xx_dma_free_txd(txd); 952 return NULL; 953 } 954 list_add_tail(&dsg->node, &txd->dsg_list); 955 956 dsg->len = sg_dma_len(sg); 957 if (direction == DMA_MEM_TO_DEV) { 958 dsg->src_addr = sg_dma_address(sg); 959 dsg->dst_addr = slave_addr; 960 } else { /* DMA_DEV_TO_MEM */ 961 dsg->src_addr = slave_addr; 962 dsg->dst_addr = sg_dma_address(sg); 963 } 964 break; 965 } 966 967 return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); 968} 969 970/* 971 * Slave transactions callback to the slave device to allow 972 * synchronization of slave DMA signals with the DMAC enable 973 */ 974static void s3c24xx_dma_issue_pending(struct dma_chan *chan) 975{ 976 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); 977 unsigned long flags; 978 979 spin_lock_irqsave(&s3cchan->vc.lock, flags); 980 if (vchan_issue_pending(&s3cchan->vc)) { 981 if (!s3cchan->phy && s3cchan->state != S3C24XX_DMA_CHAN_WAITING) 982 s3c24xx_dma_phy_alloc_and_start(s3cchan); 983 } 984 spin_unlock_irqrestore(&s3cchan->vc.lock, flags); 985} 986 987/* 988 * Bringup and teardown 989 */ 990 991/* 992 * Initialise the DMAC memcpy/slave channels. 993 * Make a local wrapper to hold required data 994 */ 995static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma, 996 struct dma_device *dmadev, unsigned int channels, bool slave) 997{ 998 struct s3c24xx_dma_chan *chan; 999 int i; 1000 1001 INIT_LIST_HEAD(&dmadev->channels); 1002 1003 /* 1004 * Register as many many memcpy as we have physical channels, 1005 * we won't always be able to use all but the code will have 1006 * to cope with that situation. 1007 */ 1008 for (i = 0; i < channels; i++) { 1009 chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL); 1010 if (!chan) { 1011 dev_err(dmadev->dev, 1012 "%s no memory for channel\n", __func__); 1013 return -ENOMEM; 1014 } 1015 1016 chan->id = i; 1017 chan->host = s3cdma; 1018 chan->state = S3C24XX_DMA_CHAN_IDLE; 1019 1020 if (slave) { 1021 chan->slave = true; 1022 chan->name = kasprintf(GFP_KERNEL, "slave%d", i); 1023 if (!chan->name) 1024 return -ENOMEM; 1025 } else { 1026 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); 1027 if (!chan->name) 1028 return -ENOMEM; 1029 } 1030 dev_dbg(dmadev->dev, 1031 "initialize virtual channel \"%s\"\n", 1032 chan->name); 1033 1034 chan->vc.desc_free = s3c24xx_dma_desc_free; 1035 vchan_init(&chan->vc, dmadev); 1036 } 1037 dev_info(dmadev->dev, "initialized %d virtual %s channels\n", 1038 i, slave ? "slave" : "memcpy"); 1039 return i; 1040} 1041 1042static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev) 1043{ 1044 struct s3c24xx_dma_chan *chan = NULL; 1045 struct s3c24xx_dma_chan *next; 1046 1047 list_for_each_entry_safe(chan, 1048 next, &dmadev->channels, vc.chan.device_node) 1049 list_del(&chan->vc.chan.device_node); 1050} 1051 1052/* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */ 1053static struct soc_data soc_s3c2410 = { 1054 .stride = 0x40, 1055 .has_reqsel = false, 1056 .has_clocks = false, 1057}; 1058 1059/* s3c2412 and s3c2413 have a 0x40 stride and dmareqsel mechanism */ 1060static struct soc_data soc_s3c2412 = { 1061 .stride = 0x40, 1062 .has_reqsel = true, 1063 .has_clocks = true, 1064}; 1065 1066/* s3c2443 and following have a 0x100 stride and dmareqsel mechanism */ 1067static struct soc_data soc_s3c2443 = { 1068 .stride = 0x100, 1069 .has_reqsel = true, 1070 .has_clocks = true, 1071}; 1072 1073static struct platform_device_id s3c24xx_dma_driver_ids[] = { 1074 { 1075 .name = "s3c2410-dma", 1076 .driver_data = (kernel_ulong_t)&soc_s3c2410, 1077 }, { 1078 .name = "s3c2412-dma", 1079 .driver_data = (kernel_ulong_t)&soc_s3c2412, 1080 }, { 1081 .name = "s3c2443-dma", 1082 .driver_data = (kernel_ulong_t)&soc_s3c2443, 1083 }, 1084 { }, 1085}; 1086 1087static struct soc_data *s3c24xx_dma_get_soc_data(struct platform_device *pdev) 1088{ 1089 return (struct soc_data *) 1090 platform_get_device_id(pdev)->driver_data; 1091} 1092 1093static int s3c24xx_dma_probe(struct platform_device *pdev) 1094{ 1095 const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev); 1096 struct s3c24xx_dma_engine *s3cdma; 1097 struct soc_data *sdata; 1098 struct resource *res; 1099 int ret; 1100 int i; 1101 1102 if (!pdata) { 1103 dev_err(&pdev->dev, "platform data missing\n"); 1104 return -ENODEV; 1105 } 1106 1107 /* Basic sanity check */ 1108 if (pdata->num_phy_channels > MAX_DMA_CHANNELS) { 1109 dev_err(&pdev->dev, "to many dma channels %d, max %d\n", 1110 pdata->num_phy_channels, MAX_DMA_CHANNELS); 1111 return -EINVAL; 1112 } 1113 1114 sdata = s3c24xx_dma_get_soc_data(pdev); 1115 if (!sdata) 1116 return -EINVAL; 1117 1118 s3cdma = devm_kzalloc(&pdev->dev, sizeof(*s3cdma), GFP_KERNEL); 1119 if (!s3cdma) 1120 return -ENOMEM; 1121 1122 s3cdma->pdev = pdev; 1123 s3cdma->pdata = pdata; 1124 s3cdma->sdata = sdata; 1125 1126 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1127 s3cdma->base = devm_ioremap_resource(&pdev->dev, res); 1128 if (IS_ERR(s3cdma->base)) 1129 return PTR_ERR(s3cdma->base); 1130 1131 s3cdma->phy_chans = devm_kzalloc(&pdev->dev, 1132 sizeof(struct s3c24xx_dma_phy) * 1133 pdata->num_phy_channels, 1134 GFP_KERNEL); 1135 if (!s3cdma->phy_chans) 1136 return -ENOMEM; 1137 1138 /* aquire irqs and clocks for all physical channels */ 1139 for (i = 0; i < pdata->num_phy_channels; i++) { 1140 struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; 1141 char clk_name[6]; 1142 1143 phy->id = i; 1144 phy->base = s3cdma->base + (i * sdata->stride); 1145 phy->host = s3cdma; 1146 1147 phy->irq = platform_get_irq(pdev, i); 1148 if (phy->irq < 0) { 1149 dev_err(&pdev->dev, "failed to get irq %d, err %d\n", 1150 i, phy->irq); 1151 continue; 1152 } 1153 1154 ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq, 1155 0, pdev->name, phy); 1156 if (ret) { 1157 dev_err(&pdev->dev, "Unable to request irq for channel %d, error %d\n", 1158 i, ret); 1159 continue; 1160 } 1161 1162 if (sdata->has_clocks) { 1163 sprintf(clk_name, "dma.%d", i); 1164 phy->clk = devm_clk_get(&pdev->dev, clk_name); 1165 if (IS_ERR(phy->clk) && sdata->has_clocks) { 1166 dev_err(&pdev->dev, "unable to aquire clock for channel %d, error %lu", 1167 i, PTR_ERR(phy->clk)); 1168 continue; 1169 } 1170 1171 ret = clk_prepare(phy->clk); 1172 if (ret) { 1173 dev_err(&pdev->dev, "clock for phy %d failed, error %d\n", 1174 i, ret); 1175 continue; 1176 } 1177 } 1178 1179 spin_lock_init(&phy->lock); 1180 phy->valid = true; 1181 1182 dev_dbg(&pdev->dev, "physical channel %d is %s\n", 1183 i, s3c24xx_dma_phy_busy(phy) ? "BUSY" : "FREE"); 1184 } 1185 1186 /* Initialize memcpy engine */ 1187 dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask); 1188 dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask); 1189 s3cdma->memcpy.dev = &pdev->dev; 1190 s3cdma->memcpy.device_alloc_chan_resources = 1191 s3c24xx_dma_alloc_chan_resources; 1192 s3cdma->memcpy.device_free_chan_resources = 1193 s3c24xx_dma_free_chan_resources; 1194 s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy; 1195 s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status; 1196 s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending; 1197 s3cdma->memcpy.device_control = s3c24xx_dma_control; 1198 1199 /* Initialize slave engine for SoC internal dedicated peripherals */ 1200 dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); 1201 dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask); 1202 s3cdma->slave.dev = &pdev->dev; 1203 s3cdma->slave.device_alloc_chan_resources = 1204 s3c24xx_dma_alloc_chan_resources; 1205 s3cdma->slave.device_free_chan_resources = 1206 s3c24xx_dma_free_chan_resources; 1207 s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status; 1208 s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending; 1209 s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg; 1210 s3cdma->slave.device_control = s3c24xx_dma_control; 1211 1212 /* Register as many memcpy channels as there are physical channels */ 1213 ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy, 1214 pdata->num_phy_channels, false); 1215 if (ret <= 0) { 1216 dev_warn(&pdev->dev, 1217 "%s failed to enumerate memcpy channels - %d\n", 1218 __func__, ret); 1219 goto err_memcpy; 1220 } 1221 1222 /* Register slave channels */ 1223 ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->slave, 1224 pdata->num_channels, true); 1225 if (ret <= 0) { 1226 dev_warn(&pdev->dev, 1227 "%s failed to enumerate slave channels - %d\n", 1228 __func__, ret); 1229 goto err_slave; 1230 } 1231 1232 ret = dma_async_device_register(&s3cdma->memcpy); 1233 if (ret) { 1234 dev_warn(&pdev->dev, 1235 "%s failed to register memcpy as an async device - %d\n", 1236 __func__, ret); 1237 goto err_memcpy_reg; 1238 } 1239 1240 ret = dma_async_device_register(&s3cdma->slave); 1241 if (ret) { 1242 dev_warn(&pdev->dev, 1243 "%s failed to register slave as an async device - %d\n", 1244 __func__, ret); 1245 goto err_slave_reg; 1246 } 1247 1248 platform_set_drvdata(pdev, s3cdma); 1249 dev_info(&pdev->dev, "Loaded dma driver with %d physical channels\n", 1250 pdata->num_phy_channels); 1251 1252 return 0; 1253 1254err_slave_reg: 1255 dma_async_device_unregister(&s3cdma->memcpy); 1256err_memcpy_reg: 1257 s3c24xx_dma_free_virtual_channels(&s3cdma->slave); 1258err_slave: 1259 s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy); 1260err_memcpy: 1261 if (sdata->has_clocks) 1262 for (i = 0; i < pdata->num_phy_channels; i++) { 1263 struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; 1264 if (phy->valid) 1265 clk_unprepare(phy->clk); 1266 } 1267 1268 return ret; 1269} 1270 1271static int s3c24xx_dma_remove(struct platform_device *pdev) 1272{ 1273 const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev); 1274 struct s3c24xx_dma_engine *s3cdma = platform_get_drvdata(pdev); 1275 struct soc_data *sdata = s3c24xx_dma_get_soc_data(pdev); 1276 int i; 1277 1278 dma_async_device_unregister(&s3cdma->slave); 1279 dma_async_device_unregister(&s3cdma->memcpy); 1280 1281 s3c24xx_dma_free_virtual_channels(&s3cdma->slave); 1282 s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy); 1283 1284 if (sdata->has_clocks) 1285 for (i = 0; i < pdata->num_phy_channels; i++) { 1286 struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; 1287 if (phy->valid) 1288 clk_unprepare(phy->clk); 1289 } 1290 1291 return 0; 1292} 1293 1294static struct platform_driver s3c24xx_dma_driver = { 1295 .driver = { 1296 .name = "s3c24xx-dma", 1297 .owner = THIS_MODULE, 1298 }, 1299 .id_table = s3c24xx_dma_driver_ids, 1300 .probe = s3c24xx_dma_probe, 1301 .remove = s3c24xx_dma_remove, 1302}; 1303 1304module_platform_driver(s3c24xx_dma_driver); 1305 1306bool s3c24xx_dma_filter(struct dma_chan *chan, void *param) 1307{ 1308 struct s3c24xx_dma_chan *s3cchan; 1309 1310 if (chan->device->dev->driver != &s3c24xx_dma_driver.driver) 1311 return false; 1312 1313 s3cchan = to_s3c24xx_dma_chan(chan); 1314 1315 return s3cchan->id == (int)param; 1316} 1317EXPORT_SYMBOL(s3c24xx_dma_filter); 1318 1319MODULE_DESCRIPTION("S3C24XX DMA Driver"); 1320MODULE_AUTHOR("Heiko Stuebner"); 1321MODULE_LICENSE("GPL v2");