Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.3 1179 lines 34 kB view raw
1/* 2 * DMA controller driver for CSR SiRFprimaII 3 * 4 * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. 5 * 6 * Licensed under GPLv2 or later. 7 */ 8 9#include <linux/module.h> 10#include <linux/dmaengine.h> 11#include <linux/dma-mapping.h> 12#include <linux/pm_runtime.h> 13#include <linux/interrupt.h> 14#include <linux/io.h> 15#include <linux/slab.h> 16#include <linux/of_irq.h> 17#include <linux/of_address.h> 18#include <linux/of_device.h> 19#include <linux/of_platform.h> 20#include <linux/clk.h> 21#include <linux/of_dma.h> 22#include <linux/sirfsoc_dma.h> 23 24#include "dmaengine.h" 25 26#define SIRFSOC_DMA_VER_A7V1 1 27#define SIRFSOC_DMA_VER_A7V2 2 28#define SIRFSOC_DMA_VER_A6 4 29 30#define SIRFSOC_DMA_DESCRIPTORS 16 31#define SIRFSOC_DMA_CHANNELS 16 32#define SIRFSOC_DMA_TABLE_NUM 256 33 34#define SIRFSOC_DMA_CH_ADDR 0x00 35#define SIRFSOC_DMA_CH_XLEN 0x04 36#define SIRFSOC_DMA_CH_YLEN 0x08 37#define SIRFSOC_DMA_CH_CTRL 0x0C 38 39#define SIRFSOC_DMA_WIDTH_0 0x100 40#define SIRFSOC_DMA_CH_VALID 0x140 41#define SIRFSOC_DMA_CH_INT 0x144 42#define SIRFSOC_DMA_INT_EN 0x148 43#define SIRFSOC_DMA_INT_EN_CLR 0x14C 44#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150 45#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x154 46#define SIRFSOC_DMA_WIDTH_ATLAS7 0x10 47#define SIRFSOC_DMA_VALID_ATLAS7 0x14 48#define SIRFSOC_DMA_INT_ATLAS7 0x18 49#define SIRFSOC_DMA_INT_EN_ATLAS7 0x1c 50#define SIRFSOC_DMA_LOOP_CTRL_ATLAS7 0x20 51#define SIRFSOC_DMA_CUR_DATA_ADDR 0x34 52#define SIRFSOC_DMA_MUL_ATLAS7 0x38 53#define SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7 0x158 54#define SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7 0x15C 55#define SIRFSOC_DMA_IOBG_SCMD_EN 0x800 56#define SIRFSOC_DMA_EARLY_RESP_SET 0x818 57#define SIRFSOC_DMA_EARLY_RESP_CLR 0x81C 58 59#define SIRFSOC_DMA_MODE_CTRL_BIT 4 60#define SIRFSOC_DMA_DIR_CTRL_BIT 5 61#define SIRFSOC_DMA_MODE_CTRL_BIT_ATLAS7 2 62#define SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7 3 63#define SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7 4 64#define SIRFSOC_DMA_TAB_NUM_ATLAS7 7 65#define SIRFSOC_DMA_CHAIN_INT_BIT_ATLAS7 5 66#define SIRFSOC_DMA_CHAIN_FLAG_SHIFT_ATLAS7 25 67#define SIRFSOC_DMA_CHAIN_ADDR_SHIFT 32 68 69#define SIRFSOC_DMA_INT_FINI_INT_ATLAS7 BIT(0) 70#define SIRFSOC_DMA_INT_CNT_INT_ATLAS7 BIT(1) 71#define SIRFSOC_DMA_INT_PAU_INT_ATLAS7 BIT(2) 72#define SIRFSOC_DMA_INT_LOOP_INT_ATLAS7 BIT(3) 73#define SIRFSOC_DMA_INT_INV_INT_ATLAS7 BIT(4) 74#define SIRFSOC_DMA_INT_END_INT_ATLAS7 BIT(5) 75#define SIRFSOC_DMA_INT_ALL_ATLAS7 0x3F 76 77/* xlen and dma_width register is in 4 bytes boundary */ 78#define SIRFSOC_DMA_WORD_LEN 4 79#define SIRFSOC_DMA_XLEN_MAX_V1 0x800 80#define SIRFSOC_DMA_XLEN_MAX_V2 0x1000 81 82struct sirfsoc_dma_desc { 83 struct dma_async_tx_descriptor desc; 84 struct list_head node; 85 86 /* SiRFprimaII 2D-DMA parameters */ 87 88 int xlen; /* DMA xlen */ 89 int ylen; /* DMA ylen */ 90 int width; /* DMA width */ 91 int dir; 92 bool cyclic; /* is loop DMA? */ 93 bool chain; /* is chain DMA? */ 94 u32 addr; /* DMA buffer address */ 95 u64 chain_table[SIRFSOC_DMA_TABLE_NUM]; /* chain tbl */ 96}; 97 98struct sirfsoc_dma_chan { 99 struct dma_chan chan; 100 struct list_head free; 101 struct list_head prepared; 102 struct list_head queued; 103 struct list_head active; 104 struct list_head completed; 105 unsigned long happened_cyclic; 106 unsigned long completed_cyclic; 107 108 /* Lock for this structure */ 109 spinlock_t lock; 110 111 int mode; 112}; 113 114struct sirfsoc_dma_regs { 115 u32 ctrl[SIRFSOC_DMA_CHANNELS]; 116 u32 interrupt_en; 117}; 118 119struct sirfsoc_dma { 120 struct dma_device dma; 121 struct tasklet_struct tasklet; 122 struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; 123 void __iomem *base; 124 int irq; 125 struct clk *clk; 126 int type; 127 void (*exec_desc)(struct sirfsoc_dma_desc *sdesc, 128 int cid, int burst_mode, void __iomem *base); 129 struct sirfsoc_dma_regs regs_save; 130}; 131 132struct sirfsoc_dmadata { 133 void (*exec)(struct sirfsoc_dma_desc *sdesc, 134 int cid, int burst_mode, void __iomem *base); 135 int type; 136}; 137 138enum sirfsoc_dma_chain_flag { 139 SIRFSOC_DMA_CHAIN_NORMAL = 0x01, 140 SIRFSOC_DMA_CHAIN_PAUSE = 0x02, 141 SIRFSOC_DMA_CHAIN_LOOP = 0x03, 142 SIRFSOC_DMA_CHAIN_END = 0x04 143}; 144 145#define DRV_NAME "sirfsoc_dma" 146 147static int sirfsoc_dma_runtime_suspend(struct device *dev); 148 149/* Convert struct dma_chan to struct sirfsoc_dma_chan */ 150static inline 151struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c) 152{ 153 return container_of(c, struct sirfsoc_dma_chan, chan); 154} 155 156/* Convert struct dma_chan to struct sirfsoc_dma */ 157static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c) 158{ 159 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c); 160 return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]); 161} 162 163static void sirfsoc_dma_execute_hw_a7v2(struct sirfsoc_dma_desc *sdesc, 164 int cid, int burst_mode, void __iomem *base) 165{ 166 if (sdesc->chain) { 167 /* DMA v2 HW chain mode */ 168 writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) | 169 (sdesc->chain << 170 SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) | 171 (0x8 << SIRFSOC_DMA_TAB_NUM_ATLAS7) | 0x3, 172 base + SIRFSOC_DMA_CH_CTRL); 173 } else { 174 /* DMA v2 legacy mode */ 175 writel_relaxed(sdesc->xlen, base + SIRFSOC_DMA_CH_XLEN); 176 writel_relaxed(sdesc->ylen, base + SIRFSOC_DMA_CH_YLEN); 177 writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_ATLAS7); 178 writel_relaxed((sdesc->width*((sdesc->ylen+1)>>1)), 179 base + SIRFSOC_DMA_MUL_ATLAS7); 180 writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) | 181 (sdesc->chain << 182 SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) | 183 0x3, base + SIRFSOC_DMA_CH_CTRL); 184 } 185 writel_relaxed(sdesc->chain ? SIRFSOC_DMA_INT_END_INT_ATLAS7 : 186 (SIRFSOC_DMA_INT_FINI_INT_ATLAS7 | 187 SIRFSOC_DMA_INT_LOOP_INT_ATLAS7), 188 base + SIRFSOC_DMA_INT_EN_ATLAS7); 189 writel(sdesc->addr, base + SIRFSOC_DMA_CH_ADDR); 190 if (sdesc->cyclic) 191 writel(0x10001, base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); 192} 193 194static void sirfsoc_dma_execute_hw_a7v1(struct sirfsoc_dma_desc *sdesc, 195 int cid, int burst_mode, void __iomem *base) 196{ 197 writel_relaxed(1, base + SIRFSOC_DMA_IOBG_SCMD_EN); 198 writel_relaxed((1 << cid), base + SIRFSOC_DMA_EARLY_RESP_SET); 199 writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4); 200 writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) | 201 (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), 202 base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); 203 writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN); 204 writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN); 205 writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) | 206 (1 << cid), base + SIRFSOC_DMA_INT_EN); 207 writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); 208 if (sdesc->cyclic) { 209 writel((1 << cid) | 1 << (cid + 16) | 210 readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7), 211 base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7); 212 } 213 214} 215 216static void sirfsoc_dma_execute_hw_a6(struct sirfsoc_dma_desc *sdesc, 217 int cid, int burst_mode, void __iomem *base) 218{ 219 writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4); 220 writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) | 221 (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), 222 base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); 223 writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN); 224 writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN); 225 writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) | 226 (1 << cid), base + SIRFSOC_DMA_INT_EN); 227 writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); 228 if (sdesc->cyclic) { 229 writel((1 << cid) | 1 << (cid + 16) | 230 readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL), 231 base + SIRFSOC_DMA_CH_LOOP_CTRL); 232 } 233 234} 235 236/* Execute all queued DMA descriptors */ 237static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan) 238{ 239 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); 240 int cid = schan->chan.chan_id; 241 struct sirfsoc_dma_desc *sdesc = NULL; 242 void __iomem *base; 243 244 /* 245 * lock has been held by functions calling this, so we don't hold 246 * lock again 247 */ 248 base = sdma->base; 249 sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, 250 node); 251 /* Move the first queued descriptor to active list */ 252 list_move_tail(&sdesc->node, &schan->active); 253 254 if (sdma->type == SIRFSOC_DMA_VER_A7V2) 255 cid = 0; 256 257 /* Start the DMA transfer */ 258 sdma->exec_desc(sdesc, cid, schan->mode, base); 259 260 if (sdesc->cyclic) 261 schan->happened_cyclic = schan->completed_cyclic = 0; 262} 263 264/* Interrupt handler */ 265static irqreturn_t sirfsoc_dma_irq(int irq, void *data) 266{ 267 struct sirfsoc_dma *sdma = data; 268 struct sirfsoc_dma_chan *schan; 269 struct sirfsoc_dma_desc *sdesc = NULL; 270 u32 is; 271 bool chain; 272 int ch; 273 void __iomem *reg; 274 275 switch (sdma->type) { 276 case SIRFSOC_DMA_VER_A6: 277 case SIRFSOC_DMA_VER_A7V1: 278 is = readl(sdma->base + SIRFSOC_DMA_CH_INT); 279 reg = sdma->base + SIRFSOC_DMA_CH_INT; 280 while ((ch = fls(is) - 1) >= 0) { 281 is &= ~(1 << ch); 282 writel_relaxed(1 << ch, reg); 283 schan = &sdma->channels[ch]; 284 spin_lock(&schan->lock); 285 sdesc = list_first_entry(&schan->active, 286 struct sirfsoc_dma_desc, node); 287 if (!sdesc->cyclic) { 288 /* Execute queued descriptors */ 289 list_splice_tail_init(&schan->active, 290 &schan->completed); 291 dma_cookie_complete(&sdesc->desc); 292 if (!list_empty(&schan->queued)) 293 sirfsoc_dma_execute(schan); 294 } else 295 schan->happened_cyclic++; 296 spin_unlock(&schan->lock); 297 } 298 break; 299 300 case SIRFSOC_DMA_VER_A7V2: 301 is = readl(sdma->base + SIRFSOC_DMA_INT_ATLAS7); 302 303 reg = sdma->base + SIRFSOC_DMA_INT_ATLAS7; 304 writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7, reg); 305 schan = &sdma->channels[0]; 306 spin_lock(&schan->lock); 307 sdesc = list_first_entry(&schan->active, 308 struct sirfsoc_dma_desc, node); 309 if (!sdesc->cyclic) { 310 chain = sdesc->chain; 311 if ((chain && (is & SIRFSOC_DMA_INT_END_INT_ATLAS7)) || 312 (!chain && 313 (is & SIRFSOC_DMA_INT_FINI_INT_ATLAS7))) { 314 /* Execute queued descriptors */ 315 list_splice_tail_init(&schan->active, 316 &schan->completed); 317 dma_cookie_complete(&sdesc->desc); 318 if (!list_empty(&schan->queued)) 319 sirfsoc_dma_execute(schan); 320 } 321 } else if (sdesc->cyclic && (is & 322 SIRFSOC_DMA_INT_LOOP_INT_ATLAS7)) 323 schan->happened_cyclic++; 324 325 spin_unlock(&schan->lock); 326 break; 327 328 default: 329 break; 330 } 331 332 /* Schedule tasklet */ 333 tasklet_schedule(&sdma->tasklet); 334 335 return IRQ_HANDLED; 336} 337 338/* process completed descriptors */ 339static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma) 340{ 341 dma_cookie_t last_cookie = 0; 342 struct sirfsoc_dma_chan *schan; 343 struct sirfsoc_dma_desc *sdesc; 344 struct dma_async_tx_descriptor *desc; 345 unsigned long flags; 346 unsigned long happened_cyclic; 347 LIST_HEAD(list); 348 int i; 349 350 for (i = 0; i < sdma->dma.chancnt; i++) { 351 schan = &sdma->channels[i]; 352 353 /* Get all completed descriptors */ 354 spin_lock_irqsave(&schan->lock, flags); 355 if (!list_empty(&schan->completed)) { 356 list_splice_tail_init(&schan->completed, &list); 357 spin_unlock_irqrestore(&schan->lock, flags); 358 359 /* Execute callbacks and run dependencies */ 360 list_for_each_entry(sdesc, &list, node) { 361 desc = &sdesc->desc; 362 363 if (desc->callback) 364 desc->callback(desc->callback_param); 365 366 last_cookie = desc->cookie; 367 dma_run_dependencies(desc); 368 } 369 370 /* Free descriptors */ 371 spin_lock_irqsave(&schan->lock, flags); 372 list_splice_tail_init(&list, &schan->free); 373 schan->chan.completed_cookie = last_cookie; 374 spin_unlock_irqrestore(&schan->lock, flags); 375 } else { 376 if (list_empty(&schan->active)) { 377 spin_unlock_irqrestore(&schan->lock, flags); 378 continue; 379 } 380 381 /* for cyclic channel, desc is always in active list */ 382 sdesc = list_first_entry(&schan->active, 383 struct sirfsoc_dma_desc, node); 384 385 /* cyclic DMA */ 386 happened_cyclic = schan->happened_cyclic; 387 spin_unlock_irqrestore(&schan->lock, flags); 388 389 desc = &sdesc->desc; 390 while (happened_cyclic != schan->completed_cyclic) { 391 if (desc->callback) 392 desc->callback(desc->callback_param); 393 schan->completed_cyclic++; 394 } 395 } 396 } 397} 398 399/* DMA Tasklet */ 400static void sirfsoc_dma_tasklet(unsigned long data) 401{ 402 struct sirfsoc_dma *sdma = (void *)data; 403 404 sirfsoc_dma_process_completed(sdma); 405} 406 407/* Submit descriptor to hardware */ 408static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd) 409{ 410 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan); 411 struct sirfsoc_dma_desc *sdesc; 412 unsigned long flags; 413 dma_cookie_t cookie; 414 415 sdesc = container_of(txd, struct sirfsoc_dma_desc, desc); 416 417 spin_lock_irqsave(&schan->lock, flags); 418 419 /* Move descriptor to queue */ 420 list_move_tail(&sdesc->node, &schan->queued); 421 422 cookie = dma_cookie_assign(txd); 423 424 spin_unlock_irqrestore(&schan->lock, flags); 425 426 return cookie; 427} 428 429static int sirfsoc_dma_slave_config(struct dma_chan *chan, 430 struct dma_slave_config *config) 431{ 432 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); 433 unsigned long flags; 434 435 if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || 436 (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)) 437 return -EINVAL; 438 439 spin_lock_irqsave(&schan->lock, flags); 440 schan->mode = (config->src_maxburst == 4 ? 1 : 0); 441 spin_unlock_irqrestore(&schan->lock, flags); 442 443 return 0; 444} 445 446static int sirfsoc_dma_terminate_all(struct dma_chan *chan) 447{ 448 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); 449 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); 450 int cid = schan->chan.chan_id; 451 unsigned long flags; 452 453 spin_lock_irqsave(&schan->lock, flags); 454 455 switch (sdma->type) { 456 case SIRFSOC_DMA_VER_A7V1: 457 writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR); 458 writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_INT); 459 writel_relaxed((1 << cid) | 1 << (cid + 16), 460 sdma->base + 461 SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7); 462 writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); 463 break; 464 case SIRFSOC_DMA_VER_A7V2: 465 writel_relaxed(0, sdma->base + SIRFSOC_DMA_INT_EN_ATLAS7); 466 writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7, 467 sdma->base + SIRFSOC_DMA_INT_ATLAS7); 468 writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); 469 writel_relaxed(0, sdma->base + SIRFSOC_DMA_VALID_ATLAS7); 470 break; 471 case SIRFSOC_DMA_VER_A6: 472 writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & 473 ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); 474 writel_relaxed(readl_relaxed(sdma->base + 475 SIRFSOC_DMA_CH_LOOP_CTRL) & 476 ~((1 << cid) | 1 << (cid + 16)), 477 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); 478 writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); 479 break; 480 default: 481 break; 482 } 483 484 list_splice_tail_init(&schan->active, &schan->free); 485 list_splice_tail_init(&schan->queued, &schan->free); 486 487 spin_unlock_irqrestore(&schan->lock, flags); 488 489 return 0; 490} 491 492static int sirfsoc_dma_pause_chan(struct dma_chan *chan) 493{ 494 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); 495 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); 496 int cid = schan->chan.chan_id; 497 unsigned long flags; 498 499 spin_lock_irqsave(&schan->lock, flags); 500 501 switch (sdma->type) { 502 case SIRFSOC_DMA_VER_A7V1: 503 writel_relaxed((1 << cid) | 1 << (cid + 16), 504 sdma->base + 505 SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7); 506 break; 507 case SIRFSOC_DMA_VER_A7V2: 508 writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); 509 break; 510 case SIRFSOC_DMA_VER_A6: 511 writel_relaxed(readl_relaxed(sdma->base + 512 SIRFSOC_DMA_CH_LOOP_CTRL) & 513 ~((1 << cid) | 1 << (cid + 16)), 514 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); 515 break; 516 517 default: 518 break; 519 } 520 521 spin_unlock_irqrestore(&schan->lock, flags); 522 523 return 0; 524} 525 526static int sirfsoc_dma_resume_chan(struct dma_chan *chan) 527{ 528 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); 529 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); 530 int cid = schan->chan.chan_id; 531 unsigned long flags; 532 533 spin_lock_irqsave(&schan->lock, flags); 534 switch (sdma->type) { 535 case SIRFSOC_DMA_VER_A7V1: 536 writel_relaxed((1 << cid) | 1 << (cid + 16), 537 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7); 538 break; 539 case SIRFSOC_DMA_VER_A7V2: 540 writel_relaxed(0x10001, 541 sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); 542 break; 543 case SIRFSOC_DMA_VER_A6: 544 writel_relaxed(readl_relaxed(sdma->base + 545 SIRFSOC_DMA_CH_LOOP_CTRL) | 546 ((1 << cid) | 1 << (cid + 16)), 547 sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); 548 break; 549 550 default: 551 break; 552 } 553 554 spin_unlock_irqrestore(&schan->lock, flags); 555 556 return 0; 557} 558 559/* Alloc channel resources */ 560static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) 561{ 562 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); 563 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); 564 struct sirfsoc_dma_desc *sdesc; 565 unsigned long flags; 566 LIST_HEAD(descs); 567 int i; 568 569 pm_runtime_get_sync(sdma->dma.dev); 570 571 /* Alloc descriptors for this channel */ 572 for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) { 573 sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL); 574 if (!sdesc) { 575 dev_notice(sdma->dma.dev, "Memory allocation error. " 576 "Allocated only %u descriptors\n", i); 577 break; 578 } 579 580 dma_async_tx_descriptor_init(&sdesc->desc, chan); 581 sdesc->desc.flags = DMA_CTRL_ACK; 582 sdesc->desc.tx_submit = sirfsoc_dma_tx_submit; 583 584 list_add_tail(&sdesc->node, &descs); 585 } 586 587 /* Return error only if no descriptors were allocated */ 588 if (i == 0) 589 return -ENOMEM; 590 591 spin_lock_irqsave(&schan->lock, flags); 592 593 list_splice_tail_init(&descs, &schan->free); 594 spin_unlock_irqrestore(&schan->lock, flags); 595 596 return i; 597} 598 599/* Free channel resources */ 600static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan) 601{ 602 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); 603 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); 604 struct sirfsoc_dma_desc *sdesc, *tmp; 605 unsigned long flags; 606 LIST_HEAD(descs); 607 608 spin_lock_irqsave(&schan->lock, flags); 609 610 /* Channel must be idle */ 611 BUG_ON(!list_empty(&schan->prepared)); 612 BUG_ON(!list_empty(&schan->queued)); 613 BUG_ON(!list_empty(&schan->active)); 614 BUG_ON(!list_empty(&schan->completed)); 615 616 /* Move data */ 617 list_splice_tail_init(&schan->free, &descs); 618 619 spin_unlock_irqrestore(&schan->lock, flags); 620 621 /* Free descriptors */ 622 list_for_each_entry_safe(sdesc, tmp, &descs, node) 623 kfree(sdesc); 624 625 pm_runtime_put(sdma->dma.dev); 626} 627 628/* Send pending descriptor to hardware */ 629static void sirfsoc_dma_issue_pending(struct dma_chan *chan) 630{ 631 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); 632 unsigned long flags; 633 634 spin_lock_irqsave(&schan->lock, flags); 635 636 if (list_empty(&schan->active) && !list_empty(&schan->queued)) 637 sirfsoc_dma_execute(schan); 638 639 spin_unlock_irqrestore(&schan->lock, flags); 640} 641 642/* Check request completion status */ 643static enum dma_status 644sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 645 struct dma_tx_state *txstate) 646{ 647 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); 648 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); 649 unsigned long flags; 650 enum dma_status ret; 651 struct sirfsoc_dma_desc *sdesc; 652 int cid = schan->chan.chan_id; 653 unsigned long dma_pos; 654 unsigned long dma_request_bytes; 655 unsigned long residue; 656 657 spin_lock_irqsave(&schan->lock, flags); 658 659 if (list_empty(&schan->active)) { 660 ret = dma_cookie_status(chan, cookie, txstate); 661 dma_set_residue(txstate, 0); 662 spin_unlock_irqrestore(&schan->lock, flags); 663 return ret; 664 } 665 sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, node); 666 if (sdesc->cyclic) 667 dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) * 668 (sdesc->width * SIRFSOC_DMA_WORD_LEN); 669 else 670 dma_request_bytes = sdesc->xlen * SIRFSOC_DMA_WORD_LEN; 671 672 ret = dma_cookie_status(chan, cookie, txstate); 673 674 if (sdma->type == SIRFSOC_DMA_VER_A7V2) 675 cid = 0; 676 677 if (sdma->type == SIRFSOC_DMA_VER_A7V2) { 678 dma_pos = readl_relaxed(sdma->base + SIRFSOC_DMA_CUR_DATA_ADDR); 679 } else { 680 dma_pos = readl_relaxed( 681 sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR) << 2; 682 } 683 684 residue = dma_request_bytes - (dma_pos - sdesc->addr); 685 dma_set_residue(txstate, residue); 686 687 spin_unlock_irqrestore(&schan->lock, flags); 688 689 return ret; 690} 691 692static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved( 693 struct dma_chan *chan, struct dma_interleaved_template *xt, 694 unsigned long flags) 695{ 696 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); 697 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); 698 struct sirfsoc_dma_desc *sdesc = NULL; 699 unsigned long iflags; 700 int ret; 701 702 if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) { 703 ret = -EINVAL; 704 goto err_dir; 705 } 706 707 /* Get free descriptor */ 708 spin_lock_irqsave(&schan->lock, iflags); 709 if (!list_empty(&schan->free)) { 710 sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, 711 node); 712 list_del(&sdesc->node); 713 } 714 spin_unlock_irqrestore(&schan->lock, iflags); 715 716 if (!sdesc) { 717 /* try to free completed descriptors */ 718 sirfsoc_dma_process_completed(sdma); 719 ret = 0; 720 goto no_desc; 721 } 722 723 /* Place descriptor in prepared list */ 724 spin_lock_irqsave(&schan->lock, iflags); 725 726 /* 727 * Number of chunks in a frame can only be 1 for prima2 728 * and ylen (number of frame - 1) must be at least 0 729 */ 730 if ((xt->frame_size == 1) && (xt->numf > 0)) { 731 sdesc->cyclic = 0; 732 sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN; 733 sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) / 734 SIRFSOC_DMA_WORD_LEN; 735 sdesc->ylen = xt->numf - 1; 736 if (xt->dir == DMA_MEM_TO_DEV) { 737 sdesc->addr = xt->src_start; 738 sdesc->dir = 1; 739 } else { 740 sdesc->addr = xt->dst_start; 741 sdesc->dir = 0; 742 } 743 744 list_add_tail(&sdesc->node, &schan->prepared); 745 } else { 746 pr_err("sirfsoc DMA Invalid xfer\n"); 747 ret = -EINVAL; 748 goto err_xfer; 749 } 750 spin_unlock_irqrestore(&schan->lock, iflags); 751 752 return &sdesc->desc; 753err_xfer: 754 spin_unlock_irqrestore(&schan->lock, iflags); 755no_desc: 756err_dir: 757 return ERR_PTR(ret); 758} 759 760static struct dma_async_tx_descriptor * 761sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, 762 size_t buf_len, size_t period_len, 763 enum dma_transfer_direction direction, unsigned long flags) 764{ 765 struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); 766 struct sirfsoc_dma_desc *sdesc = NULL; 767 unsigned long iflags; 768 769 /* 770 * we only support cycle transfer with 2 period 771 * If the X-length is set to 0, it would be the loop mode. 772 * The DMA address keeps increasing until reaching the end of a loop 773 * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then 774 * the DMA address goes back to the beginning of this area. 775 * In loop mode, the DMA data region is divided into two parts, BUFA 776 * and BUFB. DMA controller generates interrupts twice in each loop: 777 * when the DMA address reaches the end of BUFA or the end of the 778 * BUFB 779 */ 780 if (buf_len != 2 * period_len) 781 return ERR_PTR(-EINVAL); 782 783 /* Get free descriptor */ 784 spin_lock_irqsave(&schan->lock, iflags); 785 if (!list_empty(&schan->free)) { 786 sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, 787 node); 788 list_del(&sdesc->node); 789 } 790 spin_unlock_irqrestore(&schan->lock, iflags); 791 792 if (!sdesc) 793 return NULL; 794 795 /* Place descriptor in prepared list */ 796 spin_lock_irqsave(&schan->lock, iflags); 797 sdesc->addr = addr; 798 sdesc->cyclic = 1; 799 sdesc->xlen = 0; 800 sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1; 801 sdesc->width = 1; 802 list_add_tail(&sdesc->node, &schan->prepared); 803 spin_unlock_irqrestore(&schan->lock, iflags); 804 805 return &sdesc->desc; 806} 807 808/* 809 * The DMA controller consists of 16 independent DMA channels. 810 * Each channel is allocated to a different function 811 */ 812bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id) 813{ 814 unsigned int ch_nr = (unsigned int) chan_id; 815 816 if (ch_nr == chan->chan_id + 817 chan->device->dev_id * SIRFSOC_DMA_CHANNELS) 818 return true; 819 820 return false; 821} 822EXPORT_SYMBOL(sirfsoc_dma_filter_id); 823 824#define SIRFSOC_DMA_BUSWIDTHS \ 825 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ 826 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 827 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 828 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 829 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) 830 831static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec, 832 struct of_dma *ofdma) 833{ 834 struct sirfsoc_dma *sdma = ofdma->of_dma_data; 835 unsigned int request = dma_spec->args[0]; 836 837 if (request >= SIRFSOC_DMA_CHANNELS) 838 return NULL; 839 840 return dma_get_slave_channel(&sdma->channels[request].chan); 841} 842 843static int sirfsoc_dma_probe(struct platform_device *op) 844{ 845 struct device_node *dn = op->dev.of_node; 846 struct device *dev = &op->dev; 847 struct dma_device *dma; 848 struct sirfsoc_dma *sdma; 849 struct sirfsoc_dma_chan *schan; 850 struct sirfsoc_dmadata *data; 851 struct resource res; 852 ulong regs_start, regs_size; 853 u32 id; 854 int ret, i; 855 856 sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL); 857 if (!sdma) { 858 dev_err(dev, "Memory exhausted!\n"); 859 return -ENOMEM; 860 } 861 data = (struct sirfsoc_dmadata *) 862 (of_match_device(op->dev.driver->of_match_table, 863 &op->dev)->data); 864 sdma->exec_desc = data->exec; 865 sdma->type = data->type; 866 867 if (of_property_read_u32(dn, "cell-index", &id)) { 868 dev_err(dev, "Fail to get DMAC index\n"); 869 return -ENODEV; 870 } 871 872 sdma->irq = irq_of_parse_and_map(dn, 0); 873 if (sdma->irq == NO_IRQ) { 874 dev_err(dev, "Error mapping IRQ!\n"); 875 return -EINVAL; 876 } 877 878 sdma->clk = devm_clk_get(dev, NULL); 879 if (IS_ERR(sdma->clk)) { 880 dev_err(dev, "failed to get a clock.\n"); 881 return PTR_ERR(sdma->clk); 882 } 883 884 ret = of_address_to_resource(dn, 0, &res); 885 if (ret) { 886 dev_err(dev, "Error parsing memory region!\n"); 887 goto irq_dispose; 888 } 889 890 regs_start = res.start; 891 regs_size = resource_size(&res); 892 893 sdma->base = devm_ioremap(dev, regs_start, regs_size); 894 if (!sdma->base) { 895 dev_err(dev, "Error mapping memory region!\n"); 896 ret = -ENOMEM; 897 goto irq_dispose; 898 } 899 900 ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma); 901 if (ret) { 902 dev_err(dev, "Error requesting IRQ!\n"); 903 ret = -EINVAL; 904 goto irq_dispose; 905 } 906 907 dma = &sdma->dma; 908 dma->dev = dev; 909 910 dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; 911 dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; 912 dma->device_issue_pending = sirfsoc_dma_issue_pending; 913 dma->device_config = sirfsoc_dma_slave_config; 914 dma->device_pause = sirfsoc_dma_pause_chan; 915 dma->device_resume = sirfsoc_dma_resume_chan; 916 dma->device_terminate_all = sirfsoc_dma_terminate_all; 917 dma->device_tx_status = sirfsoc_dma_tx_status; 918 dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; 919 dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; 920 dma->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS; 921 dma->dst_addr_widths = SIRFSOC_DMA_BUSWIDTHS; 922 dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 923 924 INIT_LIST_HEAD(&dma->channels); 925 dma_cap_set(DMA_SLAVE, dma->cap_mask); 926 dma_cap_set(DMA_CYCLIC, dma->cap_mask); 927 dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); 928 dma_cap_set(DMA_PRIVATE, dma->cap_mask); 929 930 for (i = 0; i < SIRFSOC_DMA_CHANNELS; i++) { 931 schan = &sdma->channels[i]; 932 933 schan->chan.device = dma; 934 dma_cookie_init(&schan->chan); 935 936 INIT_LIST_HEAD(&schan->free); 937 INIT_LIST_HEAD(&schan->prepared); 938 INIT_LIST_HEAD(&schan->queued); 939 INIT_LIST_HEAD(&schan->active); 940 INIT_LIST_HEAD(&schan->completed); 941 942 spin_lock_init(&schan->lock); 943 list_add_tail(&schan->chan.device_node, &dma->channels); 944 } 945 946 tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma); 947 948 /* Register DMA engine */ 949 dev_set_drvdata(dev, sdma); 950 951 ret = dma_async_device_register(dma); 952 if (ret) 953 goto free_irq; 954 955 /* Device-tree DMA controller registration */ 956 ret = of_dma_controller_register(dn, of_dma_sirfsoc_xlate, sdma); 957 if (ret) { 958 dev_err(dev, "failed to register DMA controller\n"); 959 goto unreg_dma_dev; 960 } 961 962 pm_runtime_enable(&op->dev); 963 dev_info(dev, "initialized SIRFSOC DMAC driver\n"); 964 965 return 0; 966 967unreg_dma_dev: 968 dma_async_device_unregister(dma); 969free_irq: 970 free_irq(sdma->irq, sdma); 971irq_dispose: 972 irq_dispose_mapping(sdma->irq); 973 return ret; 974} 975 976static int sirfsoc_dma_remove(struct platform_device *op) 977{ 978 struct device *dev = &op->dev; 979 struct sirfsoc_dma *sdma = dev_get_drvdata(dev); 980 981 of_dma_controller_free(op->dev.of_node); 982 dma_async_device_unregister(&sdma->dma); 983 free_irq(sdma->irq, sdma); 984 irq_dispose_mapping(sdma->irq); 985 pm_runtime_disable(&op->dev); 986 if (!pm_runtime_status_suspended(&op->dev)) 987 sirfsoc_dma_runtime_suspend(&op->dev); 988 989 return 0; 990} 991 992static int sirfsoc_dma_runtime_suspend(struct device *dev) 993{ 994 struct sirfsoc_dma *sdma = dev_get_drvdata(dev); 995 996 clk_disable_unprepare(sdma->clk); 997 return 0; 998} 999 1000static int sirfsoc_dma_runtime_resume(struct device *dev) 1001{ 1002 struct sirfsoc_dma *sdma = dev_get_drvdata(dev); 1003 int ret; 1004 1005 ret = clk_prepare_enable(sdma->clk); 1006 if (ret < 0) { 1007 dev_err(dev, "clk_enable failed: %d\n", ret); 1008 return ret; 1009 } 1010 return 0; 1011} 1012 1013#ifdef CONFIG_PM_SLEEP 1014static int sirfsoc_dma_pm_suspend(struct device *dev) 1015{ 1016 struct sirfsoc_dma *sdma = dev_get_drvdata(dev); 1017 struct sirfsoc_dma_regs *save = &sdma->regs_save; 1018 struct sirfsoc_dma_desc *sdesc; 1019 struct sirfsoc_dma_chan *schan; 1020 int ch; 1021 int ret; 1022 int count; 1023 u32 int_offset; 1024 1025 /* 1026 * if we were runtime-suspended before, resume to enable clock 1027 * before accessing register 1028 */ 1029 if (pm_runtime_status_suspended(dev)) { 1030 ret = sirfsoc_dma_runtime_resume(dev); 1031 if (ret < 0) 1032 return ret; 1033 } 1034 1035 if (sdma->type == SIRFSOC_DMA_VER_A7V2) { 1036 count = 1; 1037 int_offset = SIRFSOC_DMA_INT_EN_ATLAS7; 1038 } else { 1039 count = SIRFSOC_DMA_CHANNELS; 1040 int_offset = SIRFSOC_DMA_INT_EN; 1041 } 1042 1043 /* 1044 * DMA controller will lose all registers while suspending 1045 * so we need to save registers for active channels 1046 */ 1047 for (ch = 0; ch < count; ch++) { 1048 schan = &sdma->channels[ch]; 1049 if (list_empty(&schan->active)) 1050 continue; 1051 sdesc = list_first_entry(&schan->active, 1052 struct sirfsoc_dma_desc, 1053 node); 1054 save->ctrl[ch] = readl_relaxed(sdma->base + 1055 ch * 0x10 + SIRFSOC_DMA_CH_CTRL); 1056 } 1057 save->interrupt_en = readl_relaxed(sdma->base + int_offset); 1058 1059 /* Disable clock */ 1060 sirfsoc_dma_runtime_suspend(dev); 1061 1062 return 0; 1063} 1064 1065static int sirfsoc_dma_pm_resume(struct device *dev) 1066{ 1067 struct sirfsoc_dma *sdma = dev_get_drvdata(dev); 1068 struct sirfsoc_dma_regs *save = &sdma->regs_save; 1069 struct sirfsoc_dma_desc *sdesc; 1070 struct sirfsoc_dma_chan *schan; 1071 int ch; 1072 int ret; 1073 int count; 1074 u32 int_offset; 1075 u32 width_offset; 1076 1077 /* Enable clock before accessing register */ 1078 ret = sirfsoc_dma_runtime_resume(dev); 1079 if (ret < 0) 1080 return ret; 1081 1082 if (sdma->type == SIRFSOC_DMA_VER_A7V2) { 1083 count = 1; 1084 int_offset = SIRFSOC_DMA_INT_EN_ATLAS7; 1085 width_offset = SIRFSOC_DMA_WIDTH_ATLAS7; 1086 } else { 1087 count = SIRFSOC_DMA_CHANNELS; 1088 int_offset = SIRFSOC_DMA_INT_EN; 1089 width_offset = SIRFSOC_DMA_WIDTH_0; 1090 } 1091 1092 writel_relaxed(save->interrupt_en, sdma->base + int_offset); 1093 for (ch = 0; ch < count; ch++) { 1094 schan = &sdma->channels[ch]; 1095 if (list_empty(&schan->active)) 1096 continue; 1097 sdesc = list_first_entry(&schan->active, 1098 struct sirfsoc_dma_desc, 1099 node); 1100 writel_relaxed(sdesc->width, 1101 sdma->base + width_offset + ch * 4); 1102 writel_relaxed(sdesc->xlen, 1103 sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN); 1104 writel_relaxed(sdesc->ylen, 1105 sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN); 1106 writel_relaxed(save->ctrl[ch], 1107 sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL); 1108 if (sdma->type == SIRFSOC_DMA_VER_A7V2) { 1109 writel_relaxed(sdesc->addr, 1110 sdma->base + SIRFSOC_DMA_CH_ADDR); 1111 } else { 1112 writel_relaxed(sdesc->addr >> 2, 1113 sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR); 1114 1115 } 1116 } 1117 1118 /* if we were runtime-suspended before, suspend again */ 1119 if (pm_runtime_status_suspended(dev)) 1120 sirfsoc_dma_runtime_suspend(dev); 1121 1122 return 0; 1123} 1124#endif 1125 1126static const struct dev_pm_ops sirfsoc_dma_pm_ops = { 1127 SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL) 1128 SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume) 1129}; 1130 1131struct sirfsoc_dmadata sirfsoc_dmadata_a6 = { 1132 .exec = sirfsoc_dma_execute_hw_a6, 1133 .type = SIRFSOC_DMA_VER_A6, 1134}; 1135 1136struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = { 1137 .exec = sirfsoc_dma_execute_hw_a7v1, 1138 .type = SIRFSOC_DMA_VER_A7V1, 1139}; 1140 1141struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = { 1142 .exec = sirfsoc_dma_execute_hw_a7v2, 1143 .type = SIRFSOC_DMA_VER_A7V2, 1144}; 1145 1146static const struct of_device_id sirfsoc_dma_match[] = { 1147 { .compatible = "sirf,prima2-dmac", .data = &sirfsoc_dmadata_a6,}, 1148 { .compatible = "sirf,atlas7-dmac", .data = &sirfsoc_dmadata_a7v1,}, 1149 { .compatible = "sirf,atlas7-dmac-v2", .data = &sirfsoc_dmadata_a7v2,}, 1150 {}, 1151}; 1152 1153static struct platform_driver sirfsoc_dma_driver = { 1154 .probe = sirfsoc_dma_probe, 1155 .remove = sirfsoc_dma_remove, 1156 .driver = { 1157 .name = DRV_NAME, 1158 .pm = &sirfsoc_dma_pm_ops, 1159 .of_match_table = sirfsoc_dma_match, 1160 }, 1161}; 1162 1163static __init int sirfsoc_dma_init(void) 1164{ 1165 return platform_driver_register(&sirfsoc_dma_driver); 1166} 1167 1168static void __exit sirfsoc_dma_exit(void) 1169{ 1170 platform_driver_unregister(&sirfsoc_dma_driver); 1171} 1172 1173subsys_initcall(sirfsoc_dma_init); 1174module_exit(sirfsoc_dma_exit); 1175 1176MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>"); 1177MODULE_AUTHOR("Barry Song <baohua.song@csr.com>"); 1178MODULE_DESCRIPTION("SIRFSOC DMA control driver"); 1179MODULE_LICENSE("GPL v2");