Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.1-rc9 1467 lines 41 kB view raw
1/* 2 * intel_mid_dma.c - Intel Langwell DMA Drivers 3 * 4 * Copyright (C) 2008-10 Intel Corp 5 * Author: Vinod Koul <vinod.koul@intel.com> 6 * The driver design is based on dw_dmac driver 7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; version 2 of the License. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License along 19 * with this program; if not, write to the Free Software Foundation, Inc., 20 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 21 * 22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 * 24 * 25 */ 26#include <linux/pci.h> 27#include <linux/interrupt.h> 28#include <linux/pm_runtime.h> 29#include <linux/intel_mid_dma.h> 30 31#define MAX_CHAN 4 /*max ch across controllers*/ 32#include "intel_mid_dma_regs.h" 33 34#define INTEL_MID_DMAC1_ID 0x0814 35#define INTEL_MID_DMAC2_ID 0x0813 36#define INTEL_MID_GP_DMAC2_ID 0x0827 37#define INTEL_MFLD_DMAC1_ID 0x0830 38#define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008 39#define LNW_PERIPHRAL_MASK_SIZE 0x10 40#define LNW_PERIPHRAL_STATUS 0x0 41#define LNW_PERIPHRAL_MASK 0x8 42 43struct intel_mid_dma_probe_info { 44 u8 max_chan; 45 u8 ch_base; 46 u16 block_size; 47 u32 pimr_mask; 48}; 49 50#define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \ 51 ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \ 52 .max_chan = (_max_chan), \ 53 .ch_base = (_ch_base), \ 54 .block_size = (_block_size), \ 55 .pimr_mask = (_pimr_mask), \ 56 }) 57 58/***************************************************************************** 59Utility Functions*/ 60/** 61 * get_ch_index - convert status to channel 62 * @status: status mask 63 * @base: dma ch base value 64 * 65 * Modify the status mask and return the channel index needing 66 * attention (or -1 if neither) 67 */ 68static int get_ch_index(int *status, unsigned int base) 69{ 70 int i; 71 for (i = 0; i < MAX_CHAN; i++) { 72 if (*status & (1 << (i + base))) { 73 *status = *status & ~(1 << (i + base)); 74 pr_debug("MDMA: index %d New status %x\n", i, *status); 75 return i; 76 } 77 } 78 return -1; 79} 80 81/** 82 * get_block_ts - calculates dma transaction length 83 * @len: dma transfer length 84 * @tx_width: dma transfer src width 85 * @block_size: dma controller max block size 86 * 87 * Based on src width calculate the DMA trsaction length in data items 88 * return data items or FFFF if exceeds max length for block 89 */ 90static int get_block_ts(int len, int tx_width, int block_size) 91{ 92 int byte_width = 0, block_ts = 0; 93 94 switch (tx_width) { 95 case DMA_SLAVE_BUSWIDTH_1_BYTE: 96 byte_width = 1; 97 break; 98 case DMA_SLAVE_BUSWIDTH_2_BYTES: 99 byte_width = 2; 100 break; 101 case DMA_SLAVE_BUSWIDTH_4_BYTES: 102 default: 103 byte_width = 4; 104 break; 105 } 106 107 block_ts = len/byte_width; 108 if (block_ts > block_size) 109 block_ts = 0xFFFF; 110 return block_ts; 111} 112 113/***************************************************************************** 114DMAC1 interrupt Functions*/ 115 116/** 117 * dmac1_mask_periphral_intr - mask the periphral interrupt 118 * @midc: dma channel for which masking is required 119 * 120 * Masks the DMA periphral interrupt 121 * this is valid for DMAC1 family controllers only 122 * This controller should have periphral mask registers already mapped 123 */ 124static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc) 125{ 126 u32 pimr; 127 struct middma_device *mid = to_middma_device(midc->chan.device); 128 129 if (mid->pimr_mask) { 130 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); 131 pimr |= mid->pimr_mask; 132 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); 133 } 134 return; 135} 136 137/** 138 * dmac1_unmask_periphral_intr - unmask the periphral interrupt 139 * @midc: dma channel for which masking is required 140 * 141 * UnMasks the DMA periphral interrupt, 142 * this is valid for DMAC1 family controllers only 143 * This controller should have periphral mask registers already mapped 144 */ 145static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc) 146{ 147 u32 pimr; 148 struct middma_device *mid = to_middma_device(midc->chan.device); 149 150 if (mid->pimr_mask) { 151 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); 152 pimr &= ~mid->pimr_mask; 153 writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); 154 } 155 return; 156} 157 158/** 159 * enable_dma_interrupt - enable the periphral interrupt 160 * @midc: dma channel for which enable interrupt is required 161 * 162 * Enable the DMA periphral interrupt, 163 * this is valid for DMAC1 family controllers only 164 * This controller should have periphral mask registers already mapped 165 */ 166static void enable_dma_interrupt(struct intel_mid_dma_chan *midc) 167{ 168 dmac1_unmask_periphral_intr(midc); 169 170 /*en ch interrupts*/ 171 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); 172 iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); 173 return; 174} 175 176/** 177 * disable_dma_interrupt - disable the periphral interrupt 178 * @midc: dma channel for which disable interrupt is required 179 * 180 * Disable the DMA periphral interrupt, 181 * this is valid for DMAC1 family controllers only 182 * This controller should have periphral mask registers already mapped 183 */ 184static void disable_dma_interrupt(struct intel_mid_dma_chan *midc) 185{ 186 /*Check LPE PISR, make sure fwd is disabled*/ 187 dmac1_mask_periphral_intr(midc); 188 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); 189 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); 190 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); 191 return; 192} 193 194/***************************************************************************** 195DMA channel helper Functions*/ 196/** 197 * mid_desc_get - get a descriptor 198 * @midc: dma channel for which descriptor is required 199 * 200 * Obtain a descriptor for the channel. Returns NULL if none are free. 201 * Once the descriptor is returned it is private until put on another 202 * list or freed 203 */ 204static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc) 205{ 206 struct intel_mid_dma_desc *desc, *_desc; 207 struct intel_mid_dma_desc *ret = NULL; 208 209 spin_lock_bh(&midc->lock); 210 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { 211 if (async_tx_test_ack(&desc->txd)) { 212 list_del(&desc->desc_node); 213 ret = desc; 214 break; 215 } 216 } 217 spin_unlock_bh(&midc->lock); 218 return ret; 219} 220 221/** 222 * mid_desc_put - put a descriptor 223 * @midc: dma channel for which descriptor is required 224 * @desc: descriptor to put 225 * 226 * Return a descriptor from lwn_desc_get back to the free pool 227 */ 228static void midc_desc_put(struct intel_mid_dma_chan *midc, 229 struct intel_mid_dma_desc *desc) 230{ 231 if (desc) { 232 spin_lock_bh(&midc->lock); 233 list_add_tail(&desc->desc_node, &midc->free_list); 234 spin_unlock_bh(&midc->lock); 235 } 236} 237/** 238 * midc_dostart - begin a DMA transaction 239 * @midc: channel for which txn is to be started 240 * @first: first descriptor of series 241 * 242 * Load a transaction into the engine. This must be called with midc->lock 243 * held and bh disabled. 244 */ 245static void midc_dostart(struct intel_mid_dma_chan *midc, 246 struct intel_mid_dma_desc *first) 247{ 248 struct middma_device *mid = to_middma_device(midc->chan.device); 249 250 /* channel is idle */ 251 if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) { 252 /*error*/ 253 pr_err("ERR_MDMA: channel is busy in start\n"); 254 /* The tasklet will hopefully advance the queue... */ 255 return; 256 } 257 midc->busy = true; 258 /*write registers and en*/ 259 iowrite32(first->sar, midc->ch_regs + SAR); 260 iowrite32(first->dar, midc->ch_regs + DAR); 261 iowrite32(first->lli_phys, midc->ch_regs + LLP); 262 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); 263 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); 264 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); 265 iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH); 266 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", 267 (int)first->sar, (int)first->dar, first->cfg_hi, 268 first->cfg_lo, first->ctl_hi, first->ctl_lo); 269 first->status = DMA_IN_PROGRESS; 270 271 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 272} 273 274/** 275 * midc_descriptor_complete - process completed descriptor 276 * @midc: channel owning the descriptor 277 * @desc: the descriptor itself 278 * 279 * Process a completed descriptor and perform any callbacks upon 280 * the completion. The completion handling drops the lock during the 281 * callbacks but must be called with the lock held. 282 */ 283static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, 284 struct intel_mid_dma_desc *desc) 285{ 286 struct dma_async_tx_descriptor *txd = &desc->txd; 287 dma_async_tx_callback callback_txd = NULL; 288 struct intel_mid_dma_lli *llitem; 289 void *param_txd = NULL; 290 291 midc->completed = txd->cookie; 292 callback_txd = txd->callback; 293 param_txd = txd->callback_param; 294 295 if (desc->lli != NULL) { 296 /*clear the DONE bit of completed LLI in memory*/ 297 llitem = desc->lli + desc->current_lli; 298 llitem->ctl_hi &= CLEAR_DONE; 299 if (desc->current_lli < desc->lli_length-1) 300 (desc->current_lli)++; 301 else 302 desc->current_lli = 0; 303 } 304 spin_unlock_bh(&midc->lock); 305 if (callback_txd) { 306 pr_debug("MDMA: TXD callback set ... calling\n"); 307 callback_txd(param_txd); 308 } 309 if (midc->raw_tfr) { 310 desc->status = DMA_SUCCESS; 311 if (desc->lli != NULL) { 312 pci_pool_free(desc->lli_pool, desc->lli, 313 desc->lli_phys); 314 pci_pool_destroy(desc->lli_pool); 315 } 316 list_move(&desc->desc_node, &midc->free_list); 317 midc->busy = false; 318 } 319 spin_lock_bh(&midc->lock); 320 321} 322/** 323 * midc_scan_descriptors - check the descriptors in channel 324 * mark completed when tx is completete 325 * @mid: device 326 * @midc: channel to scan 327 * 328 * Walk the descriptor chain for the device and process any entries 329 * that are complete. 330 */ 331static void midc_scan_descriptors(struct middma_device *mid, 332 struct intel_mid_dma_chan *midc) 333{ 334 struct intel_mid_dma_desc *desc = NULL, *_desc = NULL; 335 336 /*tx is complete*/ 337 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 338 if (desc->status == DMA_IN_PROGRESS) 339 midc_descriptor_complete(midc, desc); 340 } 341 return; 342 } 343/** 344 * midc_lli_fill_sg - Helper function to convert 345 * SG list to Linked List Items. 346 *@midc: Channel 347 *@desc: DMA descriptor 348 *@sglist: Pointer to SG list 349 *@sglen: SG list length 350 *@flags: DMA transaction flags 351 * 352 * Walk through the SG list and convert the SG list into Linked 353 * List Items (LLI). 354 */ 355static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, 356 struct intel_mid_dma_desc *desc, 357 struct scatterlist *sglist, 358 unsigned int sglen, 359 unsigned int flags) 360{ 361 struct intel_mid_dma_slave *mids; 362 struct scatterlist *sg; 363 dma_addr_t lli_next, sg_phy_addr; 364 struct intel_mid_dma_lli *lli_bloc_desc; 365 union intel_mid_dma_ctl_lo ctl_lo; 366 union intel_mid_dma_ctl_hi ctl_hi; 367 int i; 368 369 pr_debug("MDMA: Entered midc_lli_fill_sg\n"); 370 mids = midc->mid_slave; 371 372 lli_bloc_desc = desc->lli; 373 lli_next = desc->lli_phys; 374 375 ctl_lo.ctl_lo = desc->ctl_lo; 376 ctl_hi.ctl_hi = desc->ctl_hi; 377 for_each_sg(sglist, sg, sglen, i) { 378 /*Populate CTL_LOW and LLI values*/ 379 if (i != sglen - 1) { 380 lli_next = lli_next + 381 sizeof(struct intel_mid_dma_lli); 382 } else { 383 /*Check for circular list, otherwise terminate LLI to ZERO*/ 384 if (flags & DMA_PREP_CIRCULAR_LIST) { 385 pr_debug("MDMA: LLI is configured in circular mode\n"); 386 lli_next = desc->lli_phys; 387 } else { 388 lli_next = 0; 389 ctl_lo.ctlx.llp_dst_en = 0; 390 ctl_lo.ctlx.llp_src_en = 0; 391 } 392 } 393 /*Populate CTL_HI values*/ 394 ctl_hi.ctlx.block_ts = get_block_ts(sg->length, 395 desc->width, 396 midc->dma->block_size); 397 /*Populate SAR and DAR values*/ 398 sg_phy_addr = sg_phys(sg); 399 if (desc->dirn == DMA_TO_DEVICE) { 400 lli_bloc_desc->sar = sg_phy_addr; 401 lli_bloc_desc->dar = mids->dma_slave.dst_addr; 402 } else if (desc->dirn == DMA_FROM_DEVICE) { 403 lli_bloc_desc->sar = mids->dma_slave.src_addr; 404 lli_bloc_desc->dar = sg_phy_addr; 405 } 406 /*Copy values into block descriptor in system memroy*/ 407 lli_bloc_desc->llp = lli_next; 408 lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo; 409 lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi; 410 411 lli_bloc_desc++; 412 } 413 /*Copy very first LLI values to descriptor*/ 414 desc->ctl_lo = desc->lli->ctl_lo; 415 desc->ctl_hi = desc->lli->ctl_hi; 416 desc->sar = desc->lli->sar; 417 desc->dar = desc->lli->dar; 418 419 return 0; 420} 421/***************************************************************************** 422DMA engine callback Functions*/ 423/** 424 * intel_mid_dma_tx_submit - callback to submit DMA transaction 425 * @tx: dma engine descriptor 426 * 427 * Submit the DMA trasaction for this descriptor, start if ch idle 428 */ 429static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) 430{ 431 struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx); 432 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan); 433 dma_cookie_t cookie; 434 435 spin_lock_bh(&midc->lock); 436 cookie = midc->chan.cookie; 437 438 if (++cookie < 0) 439 cookie = 1; 440 441 midc->chan.cookie = cookie; 442 desc->txd.cookie = cookie; 443 444 445 if (list_empty(&midc->active_list)) 446 list_add_tail(&desc->desc_node, &midc->active_list); 447 else 448 list_add_tail(&desc->desc_node, &midc->queue); 449 450 midc_dostart(midc, desc); 451 spin_unlock_bh(&midc->lock); 452 453 return cookie; 454} 455 456/** 457 * intel_mid_dma_issue_pending - callback to issue pending txn 458 * @chan: chan where pending trascation needs to be checked and submitted 459 * 460 * Call for scan to issue pending descriptors 461 */ 462static void intel_mid_dma_issue_pending(struct dma_chan *chan) 463{ 464 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 465 466 spin_lock_bh(&midc->lock); 467 if (!list_empty(&midc->queue)) 468 midc_scan_descriptors(to_middma_device(chan->device), midc); 469 spin_unlock_bh(&midc->lock); 470} 471 472/** 473 * intel_mid_dma_tx_status - Return status of txn 474 * @chan: chan for where status needs to be checked 475 * @cookie: cookie for txn 476 * @txstate: DMA txn state 477 * 478 * Return status of DMA txn 479 */ 480static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, 481 dma_cookie_t cookie, 482 struct dma_tx_state *txstate) 483{ 484 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 485 dma_cookie_t last_used; 486 dma_cookie_t last_complete; 487 int ret; 488 489 last_complete = midc->completed; 490 last_used = chan->cookie; 491 492 ret = dma_async_is_complete(cookie, last_complete, last_used); 493 if (ret != DMA_SUCCESS) { 494 midc_scan_descriptors(to_middma_device(chan->device), midc); 495 496 last_complete = midc->completed; 497 last_used = chan->cookie; 498 499 ret = dma_async_is_complete(cookie, last_complete, last_used); 500 } 501 502 if (txstate) { 503 txstate->last = last_complete; 504 txstate->used = last_used; 505 txstate->residue = 0; 506 } 507 return ret; 508} 509 510static int dma_slave_control(struct dma_chan *chan, unsigned long arg) 511{ 512 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 513 struct dma_slave_config *slave = (struct dma_slave_config *)arg; 514 struct intel_mid_dma_slave *mid_slave; 515 516 BUG_ON(!midc); 517 BUG_ON(!slave); 518 pr_debug("MDMA: slave control called\n"); 519 520 mid_slave = to_intel_mid_dma_slave(slave); 521 522 BUG_ON(!mid_slave); 523 524 midc->mid_slave = mid_slave; 525 return 0; 526} 527/** 528 * intel_mid_dma_device_control - DMA device control 529 * @chan: chan for DMA control 530 * @cmd: control cmd 531 * @arg: cmd arg value 532 * 533 * Perform DMA control command 534 */ 535static int intel_mid_dma_device_control(struct dma_chan *chan, 536 enum dma_ctrl_cmd cmd, unsigned long arg) 537{ 538 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 539 struct middma_device *mid = to_middma_device(chan->device); 540 struct intel_mid_dma_desc *desc, *_desc; 541 union intel_mid_dma_cfg_lo cfg_lo; 542 543 if (cmd == DMA_SLAVE_CONFIG) 544 return dma_slave_control(chan, arg); 545 546 if (cmd != DMA_TERMINATE_ALL) 547 return -ENXIO; 548 549 spin_lock_bh(&midc->lock); 550 if (midc->busy == false) { 551 spin_unlock_bh(&midc->lock); 552 return 0; 553 } 554 /*Suspend and disable the channel*/ 555 cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW); 556 cfg_lo.cfgx.ch_susp = 1; 557 iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW); 558 iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 559 midc->busy = false; 560 /* Disable interrupts */ 561 disable_dma_interrupt(midc); 562 midc->descs_allocated = 0; 563 564 spin_unlock_bh(&midc->lock); 565 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 566 if (desc->lli != NULL) { 567 pci_pool_free(desc->lli_pool, desc->lli, 568 desc->lli_phys); 569 pci_pool_destroy(desc->lli_pool); 570 } 571 list_move(&desc->desc_node, &midc->free_list); 572 } 573 return 0; 574} 575 576 577/** 578 * intel_mid_dma_prep_memcpy - Prep memcpy txn 579 * @chan: chan for DMA transfer 580 * @dest: destn address 581 * @src: src address 582 * @len: DMA transfer len 583 * @flags: DMA flags 584 * 585 * Perform a DMA memcpy. Note we support slave periphral DMA transfers only 586 * The periphral txn details should be filled in slave structure properly 587 * Returns the descriptor for this txn 588 */ 589static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( 590 struct dma_chan *chan, dma_addr_t dest, 591 dma_addr_t src, size_t len, unsigned long flags) 592{ 593 struct intel_mid_dma_chan *midc; 594 struct intel_mid_dma_desc *desc = NULL; 595 struct intel_mid_dma_slave *mids; 596 union intel_mid_dma_ctl_lo ctl_lo; 597 union intel_mid_dma_ctl_hi ctl_hi; 598 union intel_mid_dma_cfg_lo cfg_lo; 599 union intel_mid_dma_cfg_hi cfg_hi; 600 enum dma_slave_buswidth width; 601 602 pr_debug("MDMA: Prep for memcpy\n"); 603 BUG_ON(!chan); 604 if (!len) 605 return NULL; 606 607 midc = to_intel_mid_dma_chan(chan); 608 BUG_ON(!midc); 609 610 mids = midc->mid_slave; 611 BUG_ON(!mids); 612 613 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", 614 midc->dma->pci_id, midc->ch_id, len); 615 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", 616 mids->cfg_mode, mids->dma_slave.direction, 617 mids->hs_mode, mids->dma_slave.src_addr_width); 618 619 /*calculate CFG_LO*/ 620 if (mids->hs_mode == LNW_DMA_SW_HS) { 621 cfg_lo.cfg_lo = 0; 622 cfg_lo.cfgx.hs_sel_dst = 1; 623 cfg_lo.cfgx.hs_sel_src = 1; 624 } else if (mids->hs_mode == LNW_DMA_HW_HS) 625 cfg_lo.cfg_lo = 0x00000; 626 627 /*calculate CFG_HI*/ 628 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 629 /*SW HS only*/ 630 cfg_hi.cfg_hi = 0; 631 } else { 632 cfg_hi.cfg_hi = 0; 633 if (midc->dma->pimr_mask) { 634 cfg_hi.cfgx.protctl = 0x0; /*default value*/ 635 cfg_hi.cfgx.fifo_mode = 1; 636 if (mids->dma_slave.direction == DMA_TO_DEVICE) { 637 cfg_hi.cfgx.src_per = 0; 638 if (mids->device_instance == 0) 639 cfg_hi.cfgx.dst_per = 3; 640 if (mids->device_instance == 1) 641 cfg_hi.cfgx.dst_per = 1; 642 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { 643 if (mids->device_instance == 0) 644 cfg_hi.cfgx.src_per = 2; 645 if (mids->device_instance == 1) 646 cfg_hi.cfgx.src_per = 0; 647 cfg_hi.cfgx.dst_per = 0; 648 } 649 } else { 650 cfg_hi.cfgx.protctl = 0x1; /*default value*/ 651 cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per = 652 midc->ch_id - midc->dma->chan_base; 653 } 654 } 655 656 /*calculate CTL_HI*/ 657 ctl_hi.ctlx.reser = 0; 658 ctl_hi.ctlx.done = 0; 659 width = mids->dma_slave.src_addr_width; 660 661 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); 662 pr_debug("MDMA:calc len %d for block size %d\n", 663 ctl_hi.ctlx.block_ts, midc->dma->block_size); 664 /*calculate CTL_LO*/ 665 ctl_lo.ctl_lo = 0; 666 ctl_lo.ctlx.int_en = 1; 667 ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; 668 ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; 669 670 /* 671 * Here we need some translation from "enum dma_slave_buswidth" 672 * to the format for our dma controller 673 * standard intel_mid_dmac's format 674 * 1 Byte 0b000 675 * 2 Bytes 0b001 676 * 4 Bytes 0b010 677 */ 678 ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2; 679 ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2; 680 681 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 682 ctl_lo.ctlx.tt_fc = 0; 683 ctl_lo.ctlx.sinc = 0; 684 ctl_lo.ctlx.dinc = 0; 685 } else { 686 if (mids->dma_slave.direction == DMA_TO_DEVICE) { 687 ctl_lo.ctlx.sinc = 0; 688 ctl_lo.ctlx.dinc = 2; 689 ctl_lo.ctlx.tt_fc = 1; 690 } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { 691 ctl_lo.ctlx.sinc = 2; 692 ctl_lo.ctlx.dinc = 0; 693 ctl_lo.ctlx.tt_fc = 2; 694 } 695 } 696 697 pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n", 698 ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi); 699 700 enable_dma_interrupt(midc); 701 702 desc = midc_desc_get(midc); 703 if (desc == NULL) 704 goto err_desc_get; 705 desc->sar = src; 706 desc->dar = dest ; 707 desc->len = len; 708 desc->cfg_hi = cfg_hi.cfg_hi; 709 desc->cfg_lo = cfg_lo.cfg_lo; 710 desc->ctl_lo = ctl_lo.ctl_lo; 711 desc->ctl_hi = ctl_hi.ctl_hi; 712 desc->width = width; 713 desc->dirn = mids->dma_slave.direction; 714 desc->lli_phys = 0; 715 desc->lli = NULL; 716 desc->lli_pool = NULL; 717 return &desc->txd; 718 719err_desc_get: 720 pr_err("ERR_MDMA: Failed to get desc\n"); 721 midc_desc_put(midc, desc); 722 return NULL; 723} 724/** 725 * intel_mid_dma_prep_slave_sg - Prep slave sg txn 726 * @chan: chan for DMA transfer 727 * @sgl: scatter gather list 728 * @sg_len: length of sg txn 729 * @direction: DMA transfer dirtn 730 * @flags: DMA flags 731 * 732 * Prepares LLI based periphral transfer 733 */ 734static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( 735 struct dma_chan *chan, struct scatterlist *sgl, 736 unsigned int sg_len, enum dma_data_direction direction, 737 unsigned long flags) 738{ 739 struct intel_mid_dma_chan *midc = NULL; 740 struct intel_mid_dma_slave *mids = NULL; 741 struct intel_mid_dma_desc *desc = NULL; 742 struct dma_async_tx_descriptor *txd = NULL; 743 union intel_mid_dma_ctl_lo ctl_lo; 744 745 pr_debug("MDMA: Prep for slave SG\n"); 746 747 if (!sg_len) { 748 pr_err("MDMA: Invalid SG length\n"); 749 return NULL; 750 } 751 midc = to_intel_mid_dma_chan(chan); 752 BUG_ON(!midc); 753 754 mids = midc->mid_slave; 755 BUG_ON(!mids); 756 757 if (!midc->dma->pimr_mask) { 758 /* We can still handle sg list with only one item */ 759 if (sg_len == 1) { 760 txd = intel_mid_dma_prep_memcpy(chan, 761 mids->dma_slave.dst_addr, 762 mids->dma_slave.src_addr, 763 sgl->length, 764 flags); 765 return txd; 766 } else { 767 pr_warn("MDMA: SG list is not supported by this controller\n"); 768 return NULL; 769 } 770 } 771 772 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", 773 sg_len, direction, flags); 774 775 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags); 776 if (NULL == txd) { 777 pr_err("MDMA: Prep memcpy failed\n"); 778 return NULL; 779 } 780 781 desc = to_intel_mid_dma_desc(txd); 782 desc->dirn = direction; 783 ctl_lo.ctl_lo = desc->ctl_lo; 784 ctl_lo.ctlx.llp_dst_en = 1; 785 ctl_lo.ctlx.llp_src_en = 1; 786 desc->ctl_lo = ctl_lo.ctl_lo; 787 desc->lli_length = sg_len; 788 desc->current_lli = 0; 789 /* DMA coherent memory pool for LLI descriptors*/ 790 desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool", 791 midc->dma->pdev, 792 (sizeof(struct intel_mid_dma_lli)*sg_len), 793 32, 0); 794 if (NULL == desc->lli_pool) { 795 pr_err("MID_DMA:LLI pool create failed\n"); 796 return NULL; 797 } 798 799 desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys); 800 if (!desc->lli) { 801 pr_err("MID_DMA: LLI alloc failed\n"); 802 pci_pool_destroy(desc->lli_pool); 803 return NULL; 804 } 805 806 midc_lli_fill_sg(midc, desc, sgl, sg_len, flags); 807 if (flags & DMA_PREP_INTERRUPT) { 808 iowrite32(UNMASK_INTR_REG(midc->ch_id), 809 midc->dma_base + MASK_BLOCK); 810 pr_debug("MDMA:Enabled Block interrupt\n"); 811 } 812 return &desc->txd; 813} 814 815/** 816 * intel_mid_dma_free_chan_resources - Frees dma resources 817 * @chan: chan requiring attention 818 * 819 * Frees the allocated resources on this DMA chan 820 */ 821static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) 822{ 823 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 824 struct middma_device *mid = to_middma_device(chan->device); 825 struct intel_mid_dma_desc *desc, *_desc; 826 827 if (true == midc->busy) { 828 /*trying to free ch in use!!!!!*/ 829 pr_err("ERR_MDMA: trying to free ch in use\n"); 830 } 831 pm_runtime_put(&mid->pdev->dev); 832 spin_lock_bh(&midc->lock); 833 midc->descs_allocated = 0; 834 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 835 list_del(&desc->desc_node); 836 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 837 } 838 list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { 839 list_del(&desc->desc_node); 840 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 841 } 842 list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) { 843 list_del(&desc->desc_node); 844 pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 845 } 846 spin_unlock_bh(&midc->lock); 847 midc->in_use = false; 848 midc->busy = false; 849 /* Disable CH interrupts */ 850 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); 851 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); 852} 853 854/** 855 * intel_mid_dma_alloc_chan_resources - Allocate dma resources 856 * @chan: chan requiring attention 857 * 858 * Allocates DMA resources on this chan 859 * Return the descriptors allocated 860 */ 861static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) 862{ 863 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 864 struct middma_device *mid = to_middma_device(chan->device); 865 struct intel_mid_dma_desc *desc; 866 dma_addr_t phys; 867 int i = 0; 868 869 pm_runtime_get_sync(&mid->pdev->dev); 870 871 if (mid->state == SUSPENDED) { 872 if (dma_resume(mid->pdev)) { 873 pr_err("ERR_MDMA: resume failed"); 874 return -EFAULT; 875 } 876 } 877 878 /* ASSERT: channel is idle */ 879 if (test_ch_en(mid->dma_base, midc->ch_id)) { 880 /*ch is not idle*/ 881 pr_err("ERR_MDMA: ch not idle\n"); 882 pm_runtime_put(&mid->pdev->dev); 883 return -EIO; 884 } 885 midc->completed = chan->cookie = 1; 886 887 spin_lock_bh(&midc->lock); 888 while (midc->descs_allocated < DESCS_PER_CHANNEL) { 889 spin_unlock_bh(&midc->lock); 890 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); 891 if (!desc) { 892 pr_err("ERR_MDMA: desc failed\n"); 893 pm_runtime_put(&mid->pdev->dev); 894 return -ENOMEM; 895 /*check*/ 896 } 897 dma_async_tx_descriptor_init(&desc->txd, chan); 898 desc->txd.tx_submit = intel_mid_dma_tx_submit; 899 desc->txd.flags = DMA_CTRL_ACK; 900 desc->txd.phys = phys; 901 spin_lock_bh(&midc->lock); 902 i = ++midc->descs_allocated; 903 list_add_tail(&desc->desc_node, &midc->free_list); 904 } 905 spin_unlock_bh(&midc->lock); 906 midc->in_use = true; 907 midc->busy = false; 908 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); 909 return i; 910} 911 912/** 913 * midc_handle_error - Handle DMA txn error 914 * @mid: controller where error occurred 915 * @midc: chan where error occurred 916 * 917 * Scan the descriptor for error 918 */ 919static void midc_handle_error(struct middma_device *mid, 920 struct intel_mid_dma_chan *midc) 921{ 922 midc_scan_descriptors(mid, midc); 923} 924 925/** 926 * dma_tasklet - DMA interrupt tasklet 927 * @data: tasklet arg (the controller structure) 928 * 929 * Scan the controller for interrupts for completion/error 930 * Clear the interrupt and call for handling completion/error 931 */ 932static void dma_tasklet(unsigned long data) 933{ 934 struct middma_device *mid = NULL; 935 struct intel_mid_dma_chan *midc = NULL; 936 u32 status, raw_tfr, raw_block; 937 int i; 938 939 mid = (struct middma_device *)data; 940 if (mid == NULL) { 941 pr_err("ERR_MDMA: tasklet Null param\n"); 942 return; 943 } 944 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); 945 raw_tfr = ioread32(mid->dma_base + RAW_TFR); 946 raw_block = ioread32(mid->dma_base + RAW_BLOCK); 947 status = raw_tfr | raw_block; 948 status &= mid->intr_mask; 949 while (status) { 950 /*txn interrupt*/ 951 i = get_ch_index(&status, mid->chan_base); 952 if (i < 0) { 953 pr_err("ERR_MDMA:Invalid ch index %x\n", i); 954 return; 955 } 956 midc = &mid->ch[i]; 957 if (midc == NULL) { 958 pr_err("ERR_MDMA:Null param midc\n"); 959 return; 960 } 961 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 962 status, midc->ch_id, i); 963 midc->raw_tfr = raw_tfr; 964 midc->raw_block = raw_block; 965 spin_lock_bh(&midc->lock); 966 /*clearing this interrupts first*/ 967 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); 968 if (raw_block) { 969 iowrite32((1 << midc->ch_id), 970 mid->dma_base + CLEAR_BLOCK); 971 } 972 midc_scan_descriptors(mid, midc); 973 pr_debug("MDMA:Scan of desc... complete, unmasking\n"); 974 iowrite32(UNMASK_INTR_REG(midc->ch_id), 975 mid->dma_base + MASK_TFR); 976 if (raw_block) { 977 iowrite32(UNMASK_INTR_REG(midc->ch_id), 978 mid->dma_base + MASK_BLOCK); 979 } 980 spin_unlock_bh(&midc->lock); 981 } 982 983 status = ioread32(mid->dma_base + RAW_ERR); 984 status &= mid->intr_mask; 985 while (status) { 986 /*err interrupt*/ 987 i = get_ch_index(&status, mid->chan_base); 988 if (i < 0) { 989 pr_err("ERR_MDMA:Invalid ch index %x\n", i); 990 return; 991 } 992 midc = &mid->ch[i]; 993 if (midc == NULL) { 994 pr_err("ERR_MDMA:Null param midc\n"); 995 return; 996 } 997 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 998 status, midc->ch_id, i); 999 1000 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR); 1001 spin_lock_bh(&midc->lock); 1002 midc_handle_error(mid, midc); 1003 iowrite32(UNMASK_INTR_REG(midc->ch_id), 1004 mid->dma_base + MASK_ERR); 1005 spin_unlock_bh(&midc->lock); 1006 } 1007 pr_debug("MDMA:Exiting takslet...\n"); 1008 return; 1009} 1010 1011static void dma_tasklet1(unsigned long data) 1012{ 1013 pr_debug("MDMA:in takslet1...\n"); 1014 return dma_tasklet(data); 1015} 1016 1017static void dma_tasklet2(unsigned long data) 1018{ 1019 pr_debug("MDMA:in takslet2...\n"); 1020 return dma_tasklet(data); 1021} 1022 1023/** 1024 * intel_mid_dma_interrupt - DMA ISR 1025 * @irq: IRQ where interrupt occurred 1026 * @data: ISR cllback data (the controller structure) 1027 * 1028 * See if this is our interrupt if so then schedule the tasklet 1029 * otherwise ignore 1030 */ 1031static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) 1032{ 1033 struct middma_device *mid = data; 1034 u32 tfr_status, err_status; 1035 int call_tasklet = 0; 1036 1037 tfr_status = ioread32(mid->dma_base + RAW_TFR); 1038 err_status = ioread32(mid->dma_base + RAW_ERR); 1039 if (!tfr_status && !err_status) 1040 return IRQ_NONE; 1041 1042 /*DMA Interrupt*/ 1043 pr_debug("MDMA:Got an interrupt on irq %d\n", irq); 1044 pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); 1045 tfr_status &= mid->intr_mask; 1046 if (tfr_status) { 1047 /*need to disable intr*/ 1048 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR); 1049 iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK); 1050 pr_debug("MDMA: Calling tasklet %x\n", tfr_status); 1051 call_tasklet = 1; 1052 } 1053 err_status &= mid->intr_mask; 1054 if (err_status) { 1055 iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR); 1056 call_tasklet = 1; 1057 } 1058 if (call_tasklet) 1059 tasklet_schedule(&mid->tasklet); 1060 1061 return IRQ_HANDLED; 1062} 1063 1064static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data) 1065{ 1066 return intel_mid_dma_interrupt(irq, data); 1067} 1068 1069static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data) 1070{ 1071 return intel_mid_dma_interrupt(irq, data); 1072} 1073 1074/** 1075 * mid_setup_dma - Setup the DMA controller 1076 * @pdev: Controller PCI device structure 1077 * 1078 * Initialize the DMA controller, channels, registers with DMA engine, 1079 * ISR. Initialize DMA controller channels. 1080 */ 1081static int mid_setup_dma(struct pci_dev *pdev) 1082{ 1083 struct middma_device *dma = pci_get_drvdata(pdev); 1084 int err, i; 1085 1086 /* DMA coherent memory pool for DMA descriptor allocations */ 1087 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, 1088 sizeof(struct intel_mid_dma_desc), 1089 32, 0); 1090 if (NULL == dma->dma_pool) { 1091 pr_err("ERR_MDMA:pci_pool_create failed\n"); 1092 err = -ENOMEM; 1093 goto err_dma_pool; 1094 } 1095 1096 INIT_LIST_HEAD(&dma->common.channels); 1097 dma->pci_id = pdev->device; 1098 if (dma->pimr_mask) { 1099 dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE, 1100 LNW_PERIPHRAL_MASK_SIZE); 1101 if (dma->mask_reg == NULL) { 1102 pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); 1103 return -ENOMEM; 1104 } 1105 } else 1106 dma->mask_reg = NULL; 1107 1108 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); 1109 /*init CH structures*/ 1110 dma->intr_mask = 0; 1111 dma->state = RUNNING; 1112 for (i = 0; i < dma->max_chan; i++) { 1113 struct intel_mid_dma_chan *midch = &dma->ch[i]; 1114 1115 midch->chan.device = &dma->common; 1116 midch->chan.cookie = 1; 1117 midch->chan.chan_id = i; 1118 midch->ch_id = dma->chan_base + i; 1119 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); 1120 1121 midch->dma_base = dma->dma_base; 1122 midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id; 1123 midch->dma = dma; 1124 dma->intr_mask |= 1 << (dma->chan_base + i); 1125 spin_lock_init(&midch->lock); 1126 1127 INIT_LIST_HEAD(&midch->active_list); 1128 INIT_LIST_HEAD(&midch->queue); 1129 INIT_LIST_HEAD(&midch->free_list); 1130 /*mask interrupts*/ 1131 iowrite32(MASK_INTR_REG(midch->ch_id), 1132 dma->dma_base + MASK_BLOCK); 1133 iowrite32(MASK_INTR_REG(midch->ch_id), 1134 dma->dma_base + MASK_SRC_TRAN); 1135 iowrite32(MASK_INTR_REG(midch->ch_id), 1136 dma->dma_base + MASK_DST_TRAN); 1137 iowrite32(MASK_INTR_REG(midch->ch_id), 1138 dma->dma_base + MASK_ERR); 1139 iowrite32(MASK_INTR_REG(midch->ch_id), 1140 dma->dma_base + MASK_TFR); 1141 1142 disable_dma_interrupt(midch); 1143 list_add_tail(&midch->chan.device_node, &dma->common.channels); 1144 } 1145 pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask); 1146 1147 /*init dma structure*/ 1148 dma_cap_zero(dma->common.cap_mask); 1149 dma_cap_set(DMA_MEMCPY, dma->common.cap_mask); 1150 dma_cap_set(DMA_SLAVE, dma->common.cap_mask); 1151 dma_cap_set(DMA_PRIVATE, dma->common.cap_mask); 1152 dma->common.dev = &pdev->dev; 1153 dma->common.chancnt = dma->max_chan; 1154 1155 dma->common.device_alloc_chan_resources = 1156 intel_mid_dma_alloc_chan_resources; 1157 dma->common.device_free_chan_resources = 1158 intel_mid_dma_free_chan_resources; 1159 1160 dma->common.device_tx_status = intel_mid_dma_tx_status; 1161 dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy; 1162 dma->common.device_issue_pending = intel_mid_dma_issue_pending; 1163 dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg; 1164 dma->common.device_control = intel_mid_dma_device_control; 1165 1166 /*enable dma cntrl*/ 1167 iowrite32(REG_BIT0, dma->dma_base + DMA_CFG); 1168 1169 /*register irq */ 1170 if (dma->pimr_mask) { 1171 pr_debug("MDMA:Requesting irq shared for DMAC1\n"); 1172 err = request_irq(pdev->irq, intel_mid_dma_interrupt1, 1173 IRQF_SHARED, "INTEL_MID_DMAC1", dma); 1174 if (0 != err) 1175 goto err_irq; 1176 } else { 1177 dma->intr_mask = 0x03; 1178 pr_debug("MDMA:Requesting irq for DMAC2\n"); 1179 err = request_irq(pdev->irq, intel_mid_dma_interrupt2, 1180 IRQF_SHARED, "INTEL_MID_DMAC2", dma); 1181 if (0 != err) 1182 goto err_irq; 1183 } 1184 /*register device w/ engine*/ 1185 err = dma_async_device_register(&dma->common); 1186 if (0 != err) { 1187 pr_err("ERR_MDMA:device_register failed: %d\n", err); 1188 goto err_engine; 1189 } 1190 if (dma->pimr_mask) { 1191 pr_debug("setting up tasklet1 for DMAC1\n"); 1192 tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma); 1193 } else { 1194 pr_debug("setting up tasklet2 for DMAC2\n"); 1195 tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma); 1196 } 1197 return 0; 1198 1199err_engine: 1200 free_irq(pdev->irq, dma); 1201err_irq: 1202 pci_pool_destroy(dma->dma_pool); 1203err_dma_pool: 1204 pr_err("ERR_MDMA:setup_dma failed: %d\n", err); 1205 return err; 1206 1207} 1208 1209/** 1210 * middma_shutdown - Shutdown the DMA controller 1211 * @pdev: Controller PCI device structure 1212 * 1213 * Called by remove 1214 * Unregister DMa controller, clear all structures and free interrupt 1215 */ 1216static void middma_shutdown(struct pci_dev *pdev) 1217{ 1218 struct middma_device *device = pci_get_drvdata(pdev); 1219 1220 dma_async_device_unregister(&device->common); 1221 pci_pool_destroy(device->dma_pool); 1222 if (device->mask_reg) 1223 iounmap(device->mask_reg); 1224 if (device->dma_base) 1225 iounmap(device->dma_base); 1226 free_irq(pdev->irq, device); 1227 return; 1228} 1229 1230/** 1231 * intel_mid_dma_probe - PCI Probe 1232 * @pdev: Controller PCI device structure 1233 * @id: pci device id structure 1234 * 1235 * Initialize the PCI device, map BARs, query driver data. 1236 * Call setup_dma to complete contoller and chan initilzation 1237 */ 1238static int __devinit intel_mid_dma_probe(struct pci_dev *pdev, 1239 const struct pci_device_id *id) 1240{ 1241 struct middma_device *device; 1242 u32 base_addr, bar_size; 1243 struct intel_mid_dma_probe_info *info; 1244 int err; 1245 1246 pr_debug("MDMA: probe for %x\n", pdev->device); 1247 info = (void *)id->driver_data; 1248 pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n", 1249 info->max_chan, info->ch_base, 1250 info->block_size, info->pimr_mask); 1251 1252 err = pci_enable_device(pdev); 1253 if (err) 1254 goto err_enable_device; 1255 1256 err = pci_request_regions(pdev, "intel_mid_dmac"); 1257 if (err) 1258 goto err_request_regions; 1259 1260 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1261 if (err) 1262 goto err_set_dma_mask; 1263 1264 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1265 if (err) 1266 goto err_set_dma_mask; 1267 1268 device = kzalloc(sizeof(*device), GFP_KERNEL); 1269 if (!device) { 1270 pr_err("ERR_MDMA:kzalloc failed probe\n"); 1271 err = -ENOMEM; 1272 goto err_kzalloc; 1273 } 1274 device->pdev = pci_dev_get(pdev); 1275 1276 base_addr = pci_resource_start(pdev, 0); 1277 bar_size = pci_resource_len(pdev, 0); 1278 device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE); 1279 if (!device->dma_base) { 1280 pr_err("ERR_MDMA:ioremap failed\n"); 1281 err = -ENOMEM; 1282 goto err_ioremap; 1283 } 1284 pci_set_drvdata(pdev, device); 1285 pci_set_master(pdev); 1286 device->max_chan = info->max_chan; 1287 device->chan_base = info->ch_base; 1288 device->block_size = info->block_size; 1289 device->pimr_mask = info->pimr_mask; 1290 1291 err = mid_setup_dma(pdev); 1292 if (err) 1293 goto err_dma; 1294 1295 pm_runtime_put_noidle(&pdev->dev); 1296 pm_runtime_allow(&pdev->dev); 1297 return 0; 1298 1299err_dma: 1300 iounmap(device->dma_base); 1301err_ioremap: 1302 pci_dev_put(pdev); 1303 kfree(device); 1304err_kzalloc: 1305err_set_dma_mask: 1306 pci_release_regions(pdev); 1307 pci_disable_device(pdev); 1308err_request_regions: 1309err_enable_device: 1310 pr_err("ERR_MDMA:Probe failed %d\n", err); 1311 return err; 1312} 1313 1314/** 1315 * intel_mid_dma_remove - PCI remove 1316 * @pdev: Controller PCI device structure 1317 * 1318 * Free up all resources and data 1319 * Call shutdown_dma to complete contoller and chan cleanup 1320 */ 1321static void __devexit intel_mid_dma_remove(struct pci_dev *pdev) 1322{ 1323 struct middma_device *device = pci_get_drvdata(pdev); 1324 1325 pm_runtime_get_noresume(&pdev->dev); 1326 pm_runtime_forbid(&pdev->dev); 1327 middma_shutdown(pdev); 1328 pci_dev_put(pdev); 1329 kfree(device); 1330 pci_release_regions(pdev); 1331 pci_disable_device(pdev); 1332} 1333 1334/* Power Management */ 1335/* 1336* dma_suspend - PCI suspend function 1337* 1338* @pci: PCI device structure 1339* @state: PM message 1340* 1341* This function is called by OS when a power event occurs 1342*/ 1343int dma_suspend(struct pci_dev *pci, pm_message_t state) 1344{ 1345 int i; 1346 struct middma_device *device = pci_get_drvdata(pci); 1347 pr_debug("MDMA: dma_suspend called\n"); 1348 1349 for (i = 0; i < device->max_chan; i++) { 1350 if (device->ch[i].in_use) 1351 return -EAGAIN; 1352 } 1353 device->state = SUSPENDED; 1354 pci_save_state(pci); 1355 pci_disable_device(pci); 1356 pci_set_power_state(pci, PCI_D3hot); 1357 return 0; 1358} 1359 1360/** 1361* dma_resume - PCI resume function 1362* 1363* @pci: PCI device structure 1364* 1365* This function is called by OS when a power event occurs 1366*/ 1367int dma_resume(struct pci_dev *pci) 1368{ 1369 int ret; 1370 struct middma_device *device = pci_get_drvdata(pci); 1371 1372 pr_debug("MDMA: dma_resume called\n"); 1373 pci_set_power_state(pci, PCI_D0); 1374 pci_restore_state(pci); 1375 ret = pci_enable_device(pci); 1376 if (ret) { 1377 pr_err("MDMA: device can't be enabled for %x\n", pci->device); 1378 return ret; 1379 } 1380 device->state = RUNNING; 1381 iowrite32(REG_BIT0, device->dma_base + DMA_CFG); 1382 return 0; 1383} 1384 1385static int dma_runtime_suspend(struct device *dev) 1386{ 1387 struct pci_dev *pci_dev = to_pci_dev(dev); 1388 struct middma_device *device = pci_get_drvdata(pci_dev); 1389 1390 device->state = SUSPENDED; 1391 return 0; 1392} 1393 1394static int dma_runtime_resume(struct device *dev) 1395{ 1396 struct pci_dev *pci_dev = to_pci_dev(dev); 1397 struct middma_device *device = pci_get_drvdata(pci_dev); 1398 1399 device->state = RUNNING; 1400 iowrite32(REG_BIT0, device->dma_base + DMA_CFG); 1401 return 0; 1402} 1403 1404static int dma_runtime_idle(struct device *dev) 1405{ 1406 struct pci_dev *pdev = to_pci_dev(dev); 1407 struct middma_device *device = pci_get_drvdata(pdev); 1408 int i; 1409 1410 for (i = 0; i < device->max_chan; i++) { 1411 if (device->ch[i].in_use) 1412 return -EAGAIN; 1413 } 1414 1415 return pm_schedule_suspend(dev, 0); 1416} 1417 1418/****************************************************************************** 1419* PCI stuff 1420*/ 1421static struct pci_device_id intel_mid_dma_ids[] = { 1422 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)}, 1423 { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)}, 1424 { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)}, 1425 { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)}, 1426 { 0, } 1427}; 1428MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); 1429 1430static const struct dev_pm_ops intel_mid_dma_pm = { 1431 .runtime_suspend = dma_runtime_suspend, 1432 .runtime_resume = dma_runtime_resume, 1433 .runtime_idle = dma_runtime_idle, 1434}; 1435 1436static struct pci_driver intel_mid_dma_pci_driver = { 1437 .name = "Intel MID DMA", 1438 .id_table = intel_mid_dma_ids, 1439 .probe = intel_mid_dma_probe, 1440 .remove = __devexit_p(intel_mid_dma_remove), 1441#ifdef CONFIG_PM 1442 .suspend = dma_suspend, 1443 .resume = dma_resume, 1444 .driver = { 1445 .pm = &intel_mid_dma_pm, 1446 }, 1447#endif 1448}; 1449 1450static int __init intel_mid_dma_init(void) 1451{ 1452 pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", 1453 INTEL_MID_DMA_DRIVER_VERSION); 1454 return pci_register_driver(&intel_mid_dma_pci_driver); 1455} 1456fs_initcall(intel_mid_dma_init); 1457 1458static void __exit intel_mid_dma_exit(void) 1459{ 1460 pci_unregister_driver(&intel_mid_dma_pci_driver); 1461} 1462module_exit(intel_mid_dma_exit); 1463 1464MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>"); 1465MODULE_DESCRIPTION("Intel (R) MID DMAC Driver"); 1466MODULE_LICENSE("GPL v2"); 1467MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION);