Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: intel-mid-dma: remove the driver

Since the last and the only user of this driver is converted to use dw_dmac we
can remove driver from the tree.

Moreover, besides the driver is unmaintained a long time, it serves for the
DesignWare DMA IP, for which we have already driver in the tree.

Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Acked-by: Vinod Koul <vinod.koul@intel.com>
Signed-off-by: Mark Brown <broonie@kernel.org>

authored by

Andy Shevchenko and committed by
Mark Brown
36111da7 dd114443

-1836
-13
drivers/dma/Kconfig
··· 51 51 OS and tools for MIC to use with this driver are available from 52 52 <http://software.intel.com/en-us/mic-developer>. 53 53 54 - config INTEL_MID_DMAC 55 - tristate "Intel MID DMA support for Peripheral DMA controllers" 56 - depends on PCI && X86 57 - select DMA_ENGINE 58 - default n 59 - help 60 - Enable support for the Intel(R) MID DMA engine present 61 - in Intel MID chipsets. 62 - 63 - Say Y here if you have such a chipset. 64 - 65 - If unsure, say N. 66 - 67 54 config ASYNC_TX_ENABLE_CHANNEL_SWITCH 68 55 bool 69 56
-1
drivers/dma/Makefile
··· 6 6 obj-$(CONFIG_DMA_ACPI) += acpi-dma.o 7 7 obj-$(CONFIG_DMA_OF) += of-dma.o 8 8 9 - obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o 10 9 obj-$(CONFIG_DMATEST) += dmatest.o 11 10 obj-$(CONFIG_INTEL_IOATDMA) += ioat/ 12 11 obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
-1447
drivers/dma/intel_mid_dma.c
··· 1 - /* 2 - * intel_mid_dma.c - Intel Langwell DMA Drivers 3 - * 4 - * Copyright (C) 2008-10 Intel Corp 5 - * Author: Vinod Koul <vinod.koul@intel.com> 6 - * The driver design is based on dw_dmac driver 7 - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 - * 9 - * This program is free software; you can redistribute it and/or modify 10 - * it under the terms of the GNU General Public License as published by 11 - * the Free Software Foundation; version 2 of the License. 12 - * 13 - * This program is distributed in the hope that it will be useful, but 14 - * WITHOUT ANY WARRANTY; without even the implied warranty of 15 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 - * General Public License for more details. 17 - * 18 - * You should have received a copy of the GNU General Public License along 19 - * with this program; if not, write to the Free Software Foundation, Inc., 20 - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 21 - * 22 - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 - * 24 - * 25 - */ 26 - #include <linux/pci.h> 27 - #include <linux/interrupt.h> 28 - #include <linux/pm_runtime.h> 29 - #include <linux/intel_mid_dma.h> 30 - #include <linux/module.h> 31 - 32 - #include "dmaengine.h" 33 - 34 - #define MAX_CHAN 4 /*max ch across controllers*/ 35 - #include "intel_mid_dma_regs.h" 36 - 37 - #define INTEL_MID_DMAC1_ID 0x0814 38 - #define INTEL_MID_DMAC2_ID 0x0813 39 - #define INTEL_MID_GP_DMAC2_ID 0x0827 40 - #define INTEL_MFLD_DMAC1_ID 0x0830 41 - #define LNW_PERIPHRAL_MASK_BASE 0xFFAE8008 42 - #define LNW_PERIPHRAL_MASK_SIZE 0x10 43 - #define LNW_PERIPHRAL_STATUS 0x0 44 - #define LNW_PERIPHRAL_MASK 0x8 45 - 46 - struct intel_mid_dma_probe_info { 47 - u8 max_chan; 48 - u8 ch_base; 49 - u16 block_size; 50 - u32 pimr_mask; 51 - }; 52 - 53 - #define INFO(_max_chan, _ch_base, _block_size, _pimr_mask) \ 54 - ((kernel_ulong_t)&(struct intel_mid_dma_probe_info) { \ 55 - .max_chan = (_max_chan), \ 56 - .ch_base = (_ch_base), \ 57 - .block_size = (_block_size), \ 58 - .pimr_mask = (_pimr_mask), \ 59 - }) 60 - 61 - /***************************************************************************** 62 - Utility Functions*/ 63 - /** 64 - * get_ch_index - convert status to channel 65 - * @status: status mask 66 - * @base: dma ch base value 67 - * 68 - * Modify the status mask and return the channel index needing 69 - * attention (or -1 if neither) 70 - */ 71 - static int get_ch_index(int *status, unsigned int base) 72 - { 73 - int i; 74 - for (i = 0; i < MAX_CHAN; i++) { 75 - if (*status & (1 << (i + base))) { 76 - *status = *status & ~(1 << (i + base)); 77 - pr_debug("MDMA: index %d New status %x\n", i, *status); 78 - return i; 79 - } 80 - } 81 - return -1; 82 - } 83 - 84 - /** 85 - * get_block_ts - calculates dma transaction length 86 - * @len: dma transfer length 87 - * @tx_width: dma transfer src width 88 - * @block_size: dma controller max block size 89 - * 90 - * Based on src width calculate the DMA trsaction length in data items 91 - * return data items or FFFF if exceeds max length for block 92 - */ 93 - static int get_block_ts(int len, int tx_width, int block_size) 94 - { 95 - int byte_width = 0, block_ts = 0; 96 - 97 - switch (tx_width) { 98 - case DMA_SLAVE_BUSWIDTH_1_BYTE: 99 - byte_width = 1; 100 - break; 101 - case DMA_SLAVE_BUSWIDTH_2_BYTES: 102 - byte_width = 2; 103 - break; 104 - case DMA_SLAVE_BUSWIDTH_4_BYTES: 105 - default: 106 - byte_width = 4; 107 - break; 108 - } 109 - 110 - block_ts = len/byte_width; 111 - if (block_ts > block_size) 112 - block_ts = 0xFFFF; 113 - return block_ts; 114 - } 115 - 116 - /***************************************************************************** 117 - DMAC1 interrupt Functions*/ 118 - 119 - /** 120 - * dmac1_mask_periphral_intr - mask the periphral interrupt 121 - * @mid: dma device for which masking is required 122 - * 123 - * Masks the DMA periphral interrupt 124 - * this is valid for DMAC1 family controllers only 125 - * This controller should have periphral mask registers already mapped 126 - */ 127 - static void dmac1_mask_periphral_intr(struct middma_device *mid) 128 - { 129 - u32 pimr; 130 - 131 - if (mid->pimr_mask) { 132 - pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); 133 - pimr |= mid->pimr_mask; 134 - writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); 135 - } 136 - return; 137 - } 138 - 139 - /** 140 - * dmac1_unmask_periphral_intr - unmask the periphral interrupt 141 - * @midc: dma channel for which masking is required 142 - * 143 - * UnMasks the DMA periphral interrupt, 144 - * this is valid for DMAC1 family controllers only 145 - * This controller should have periphral mask registers already mapped 146 - */ 147 - static void dmac1_unmask_periphral_intr(struct intel_mid_dma_chan *midc) 148 - { 149 - u32 pimr; 150 - struct middma_device *mid = to_middma_device(midc->chan.device); 151 - 152 - if (mid->pimr_mask) { 153 - pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); 154 - pimr &= ~mid->pimr_mask; 155 - writel(pimr, mid->mask_reg + LNW_PERIPHRAL_MASK); 156 - } 157 - return; 158 - } 159 - 160 - /** 161 - * enable_dma_interrupt - enable the periphral interrupt 162 - * @midc: dma channel for which enable interrupt is required 163 - * 164 - * Enable the DMA periphral interrupt, 165 - * this is valid for DMAC1 family controllers only 166 - * This controller should have periphral mask registers already mapped 167 - */ 168 - static void enable_dma_interrupt(struct intel_mid_dma_chan *midc) 169 - { 170 - dmac1_unmask_periphral_intr(midc); 171 - 172 - /*en ch interrupts*/ 173 - iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); 174 - iowrite32(UNMASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); 175 - return; 176 - } 177 - 178 - /** 179 - * disable_dma_interrupt - disable the periphral interrupt 180 - * @midc: dma channel for which disable interrupt is required 181 - * 182 - * Disable the DMA periphral interrupt, 183 - * this is valid for DMAC1 family controllers only 184 - * This controller should have periphral mask registers already mapped 185 - */ 186 - static void disable_dma_interrupt(struct intel_mid_dma_chan *midc) 187 - { 188 - /*Check LPE PISR, make sure fwd is disabled*/ 189 - iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); 190 - iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); 191 - iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); 192 - return; 193 - } 194 - 195 - /***************************************************************************** 196 - DMA channel helper Functions*/ 197 - /** 198 - * mid_desc_get - get a descriptor 199 - * @midc: dma channel for which descriptor is required 200 - * 201 - * Obtain a descriptor for the channel. Returns NULL if none are free. 202 - * Once the descriptor is returned it is private until put on another 203 - * list or freed 204 - */ 205 - static struct intel_mid_dma_desc *midc_desc_get(struct intel_mid_dma_chan *midc) 206 - { 207 - struct intel_mid_dma_desc *desc, *_desc; 208 - struct intel_mid_dma_desc *ret = NULL; 209 - 210 - spin_lock_bh(&midc->lock); 211 - list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { 212 - if (async_tx_test_ack(&desc->txd)) { 213 - list_del(&desc->desc_node); 214 - ret = desc; 215 - break; 216 - } 217 - } 218 - spin_unlock_bh(&midc->lock); 219 - return ret; 220 - } 221 - 222 - /** 223 - * mid_desc_put - put a descriptor 224 - * @midc: dma channel for which descriptor is required 225 - * @desc: descriptor to put 226 - * 227 - * Return a descriptor from lwn_desc_get back to the free pool 228 - */ 229 - static void midc_desc_put(struct intel_mid_dma_chan *midc, 230 - struct intel_mid_dma_desc *desc) 231 - { 232 - if (desc) { 233 - spin_lock_bh(&midc->lock); 234 - list_add_tail(&desc->desc_node, &midc->free_list); 235 - spin_unlock_bh(&midc->lock); 236 - } 237 - } 238 - /** 239 - * midc_dostart - begin a DMA transaction 240 - * @midc: channel for which txn is to be started 241 - * @first: first descriptor of series 242 - * 243 - * Load a transaction into the engine. This must be called with midc->lock 244 - * held and bh disabled. 245 - */ 246 - static void midc_dostart(struct intel_mid_dma_chan *midc, 247 - struct intel_mid_dma_desc *first) 248 - { 249 - struct middma_device *mid = to_middma_device(midc->chan.device); 250 - 251 - /* channel is idle */ 252 - if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) { 253 - /*error*/ 254 - pr_err("ERR_MDMA: channel is busy in start\n"); 255 - /* The tasklet will hopefully advance the queue... */ 256 - return; 257 - } 258 - midc->busy = true; 259 - /*write registers and en*/ 260 - iowrite32(first->sar, midc->ch_regs + SAR); 261 - iowrite32(first->dar, midc->ch_regs + DAR); 262 - iowrite32(first->lli_phys, midc->ch_regs + LLP); 263 - iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); 264 - iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); 265 - iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); 266 - iowrite32(first->ctl_hi, midc->ch_regs + CTL_HIGH); 267 - pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", 268 - (int)first->sar, (int)first->dar, first->cfg_hi, 269 - first->cfg_lo, first->ctl_hi, first->ctl_lo); 270 - first->status = DMA_IN_PROGRESS; 271 - 272 - iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 273 - } 274 - 275 - /** 276 - * midc_descriptor_complete - process completed descriptor 277 - * @midc: channel owning the descriptor 278 - * @desc: the descriptor itself 279 - * 280 - * Process a completed descriptor and perform any callbacks upon 281 - * the completion. The completion handling drops the lock during the 282 - * callbacks but must be called with the lock held. 283 - */ 284 - static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, 285 - struct intel_mid_dma_desc *desc) 286 - __releases(&midc->lock) __acquires(&midc->lock) 287 - { 288 - struct dma_async_tx_descriptor *txd = &desc->txd; 289 - dma_async_tx_callback callback_txd = NULL; 290 - struct intel_mid_dma_lli *llitem; 291 - void *param_txd = NULL; 292 - 293 - dma_cookie_complete(txd); 294 - callback_txd = txd->callback; 295 - param_txd = txd->callback_param; 296 - 297 - if (desc->lli != NULL) { 298 - /*clear the DONE bit of completed LLI in memory*/ 299 - llitem = desc->lli + desc->current_lli; 300 - llitem->ctl_hi &= CLEAR_DONE; 301 - if (desc->current_lli < desc->lli_length-1) 302 - (desc->current_lli)++; 303 - else 304 - desc->current_lli = 0; 305 - } 306 - spin_unlock_bh(&midc->lock); 307 - if (callback_txd) { 308 - pr_debug("MDMA: TXD callback set ... calling\n"); 309 - callback_txd(param_txd); 310 - } 311 - if (midc->raw_tfr) { 312 - desc->status = DMA_COMPLETE; 313 - if (desc->lli != NULL) { 314 - pci_pool_free(desc->lli_pool, desc->lli, 315 - desc->lli_phys); 316 - pci_pool_destroy(desc->lli_pool); 317 - desc->lli = NULL; 318 - } 319 - list_move(&desc->desc_node, &midc->free_list); 320 - midc->busy = false; 321 - } 322 - spin_lock_bh(&midc->lock); 323 - 324 - } 325 - /** 326 - * midc_scan_descriptors - check the descriptors in channel 327 - * mark completed when tx is completete 328 - * @mid: device 329 - * @midc: channel to scan 330 - * 331 - * Walk the descriptor chain for the device and process any entries 332 - * that are complete. 333 - */ 334 - static void midc_scan_descriptors(struct middma_device *mid, 335 - struct intel_mid_dma_chan *midc) 336 - { 337 - struct intel_mid_dma_desc *desc = NULL, *_desc = NULL; 338 - 339 - /*tx is complete*/ 340 - list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 341 - if (desc->status == DMA_IN_PROGRESS) 342 - midc_descriptor_complete(midc, desc); 343 - } 344 - return; 345 - } 346 - /** 347 - * midc_lli_fill_sg - Helper function to convert 348 - * SG list to Linked List Items. 349 - *@midc: Channel 350 - *@desc: DMA descriptor 351 - *@sglist: Pointer to SG list 352 - *@sglen: SG list length 353 - *@flags: DMA transaction flags 354 - * 355 - * Walk through the SG list and convert the SG list into Linked 356 - * List Items (LLI). 357 - */ 358 - static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, 359 - struct intel_mid_dma_desc *desc, 360 - struct scatterlist *sglist, 361 - unsigned int sglen, 362 - unsigned int flags) 363 - { 364 - struct intel_mid_dma_slave *mids; 365 - struct scatterlist *sg; 366 - dma_addr_t lli_next, sg_phy_addr; 367 - struct intel_mid_dma_lli *lli_bloc_desc; 368 - union intel_mid_dma_ctl_lo ctl_lo; 369 - union intel_mid_dma_ctl_hi ctl_hi; 370 - int i; 371 - 372 - pr_debug("MDMA: Entered midc_lli_fill_sg\n"); 373 - mids = midc->mid_slave; 374 - 375 - lli_bloc_desc = desc->lli; 376 - lli_next = desc->lli_phys; 377 - 378 - ctl_lo.ctl_lo = desc->ctl_lo; 379 - ctl_hi.ctl_hi = desc->ctl_hi; 380 - for_each_sg(sglist, sg, sglen, i) { 381 - /*Populate CTL_LOW and LLI values*/ 382 - if (i != sglen - 1) { 383 - lli_next = lli_next + 384 - sizeof(struct intel_mid_dma_lli); 385 - } else { 386 - /*Check for circular list, otherwise terminate LLI to ZERO*/ 387 - if (flags & DMA_PREP_CIRCULAR_LIST) { 388 - pr_debug("MDMA: LLI is configured in circular mode\n"); 389 - lli_next = desc->lli_phys; 390 - } else { 391 - lli_next = 0; 392 - ctl_lo.ctlx.llp_dst_en = 0; 393 - ctl_lo.ctlx.llp_src_en = 0; 394 - } 395 - } 396 - /*Populate CTL_HI values*/ 397 - ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg), 398 - desc->width, 399 - midc->dma->block_size); 400 - /*Populate SAR and DAR values*/ 401 - sg_phy_addr = sg_dma_address(sg); 402 - if (desc->dirn == DMA_MEM_TO_DEV) { 403 - lli_bloc_desc->sar = sg_phy_addr; 404 - lli_bloc_desc->dar = mids->dma_slave.dst_addr; 405 - } else if (desc->dirn == DMA_DEV_TO_MEM) { 406 - lli_bloc_desc->sar = mids->dma_slave.src_addr; 407 - lli_bloc_desc->dar = sg_phy_addr; 408 - } 409 - /*Copy values into block descriptor in system memroy*/ 410 - lli_bloc_desc->llp = lli_next; 411 - lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo; 412 - lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi; 413 - 414 - lli_bloc_desc++; 415 - } 416 - /*Copy very first LLI values to descriptor*/ 417 - desc->ctl_lo = desc->lli->ctl_lo; 418 - desc->ctl_hi = desc->lli->ctl_hi; 419 - desc->sar = desc->lli->sar; 420 - desc->dar = desc->lli->dar; 421 - 422 - return 0; 423 - } 424 - /***************************************************************************** 425 - DMA engine callback Functions*/ 426 - /** 427 - * intel_mid_dma_tx_submit - callback to submit DMA transaction 428 - * @tx: dma engine descriptor 429 - * 430 - * Submit the DMA transaction for this descriptor, start if ch idle 431 - */ 432 - static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) 433 - { 434 - struct intel_mid_dma_desc *desc = to_intel_mid_dma_desc(tx); 435 - struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(tx->chan); 436 - dma_cookie_t cookie; 437 - 438 - spin_lock_bh(&midc->lock); 439 - cookie = dma_cookie_assign(tx); 440 - 441 - if (list_empty(&midc->active_list)) 442 - list_add_tail(&desc->desc_node, &midc->active_list); 443 - else 444 - list_add_tail(&desc->desc_node, &midc->queue); 445 - 446 - midc_dostart(midc, desc); 447 - spin_unlock_bh(&midc->lock); 448 - 449 - return cookie; 450 - } 451 - 452 - /** 453 - * intel_mid_dma_issue_pending - callback to issue pending txn 454 - * @chan: chan where pending trascation needs to be checked and submitted 455 - * 456 - * Call for scan to issue pending descriptors 457 - */ 458 - static void intel_mid_dma_issue_pending(struct dma_chan *chan) 459 - { 460 - struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 461 - 462 - spin_lock_bh(&midc->lock); 463 - if (!list_empty(&midc->queue)) 464 - midc_scan_descriptors(to_middma_device(chan->device), midc); 465 - spin_unlock_bh(&midc->lock); 466 - } 467 - 468 - /** 469 - * intel_mid_dma_tx_status - Return status of txn 470 - * @chan: chan for where status needs to be checked 471 - * @cookie: cookie for txn 472 - * @txstate: DMA txn state 473 - * 474 - * Return status of DMA txn 475 - */ 476 - static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, 477 - dma_cookie_t cookie, 478 - struct dma_tx_state *txstate) 479 - { 480 - struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 481 - enum dma_status ret; 482 - 483 - ret = dma_cookie_status(chan, cookie, txstate); 484 - if (ret != DMA_COMPLETE) { 485 - spin_lock_bh(&midc->lock); 486 - midc_scan_descriptors(to_middma_device(chan->device), midc); 487 - spin_unlock_bh(&midc->lock); 488 - 489 - ret = dma_cookie_status(chan, cookie, txstate); 490 - } 491 - 492 - return ret; 493 - } 494 - 495 - static int intel_mid_dma_config(struct dma_chan *chan, 496 - struct dma_slave_config *slave) 497 - { 498 - struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 499 - struct intel_mid_dma_slave *mid_slave; 500 - 501 - BUG_ON(!midc); 502 - BUG_ON(!slave); 503 - pr_debug("MDMA: slave control called\n"); 504 - 505 - mid_slave = to_intel_mid_dma_slave(slave); 506 - 507 - BUG_ON(!mid_slave); 508 - 509 - midc->mid_slave = mid_slave; 510 - return 0; 511 - } 512 - 513 - static int intel_mid_dma_terminate_all(struct dma_chan *chan) 514 - { 515 - struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 516 - struct middma_device *mid = to_middma_device(chan->device); 517 - struct intel_mid_dma_desc *desc, *_desc; 518 - union intel_mid_dma_cfg_lo cfg_lo; 519 - 520 - spin_lock_bh(&midc->lock); 521 - if (midc->busy == false) { 522 - spin_unlock_bh(&midc->lock); 523 - return 0; 524 - } 525 - /*Suspend and disable the channel*/ 526 - cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW); 527 - cfg_lo.cfgx.ch_susp = 1; 528 - iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW); 529 - iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 530 - midc->busy = false; 531 - /* Disable interrupts */ 532 - disable_dma_interrupt(midc); 533 - midc->descs_allocated = 0; 534 - 535 - spin_unlock_bh(&midc->lock); 536 - list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 537 - if (desc->lli != NULL) { 538 - pci_pool_free(desc->lli_pool, desc->lli, 539 - desc->lli_phys); 540 - pci_pool_destroy(desc->lli_pool); 541 - desc->lli = NULL; 542 - } 543 - list_move(&desc->desc_node, &midc->free_list); 544 - } 545 - return 0; 546 - } 547 - 548 - 549 - /** 550 - * intel_mid_dma_prep_memcpy - Prep memcpy txn 551 - * @chan: chan for DMA transfer 552 - * @dest: destn address 553 - * @src: src address 554 - * @len: DMA transfer len 555 - * @flags: DMA flags 556 - * 557 - * Perform a DMA memcpy. Note we support slave periphral DMA transfers only 558 - * The periphral txn details should be filled in slave structure properly 559 - * Returns the descriptor for this txn 560 - */ 561 - static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( 562 - struct dma_chan *chan, dma_addr_t dest, 563 - dma_addr_t src, size_t len, unsigned long flags) 564 - { 565 - struct intel_mid_dma_chan *midc; 566 - struct intel_mid_dma_desc *desc = NULL; 567 - struct intel_mid_dma_slave *mids; 568 - union intel_mid_dma_ctl_lo ctl_lo; 569 - union intel_mid_dma_ctl_hi ctl_hi; 570 - union intel_mid_dma_cfg_lo cfg_lo; 571 - union intel_mid_dma_cfg_hi cfg_hi; 572 - enum dma_slave_buswidth width; 573 - 574 - pr_debug("MDMA: Prep for memcpy\n"); 575 - BUG_ON(!chan); 576 - if (!len) 577 - return NULL; 578 - 579 - midc = to_intel_mid_dma_chan(chan); 580 - BUG_ON(!midc); 581 - 582 - mids = midc->mid_slave; 583 - BUG_ON(!mids); 584 - 585 - pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", 586 - midc->dma->pci_id, midc->ch_id, len); 587 - pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", 588 - mids->cfg_mode, mids->dma_slave.direction, 589 - mids->hs_mode, mids->dma_slave.src_addr_width); 590 - 591 - /*calculate CFG_LO*/ 592 - if (mids->hs_mode == LNW_DMA_SW_HS) { 593 - cfg_lo.cfg_lo = 0; 594 - cfg_lo.cfgx.hs_sel_dst = 1; 595 - cfg_lo.cfgx.hs_sel_src = 1; 596 - } else if (mids->hs_mode == LNW_DMA_HW_HS) 597 - cfg_lo.cfg_lo = 0x00000; 598 - 599 - /*calculate CFG_HI*/ 600 - if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 601 - /*SW HS only*/ 602 - cfg_hi.cfg_hi = 0; 603 - } else { 604 - cfg_hi.cfg_hi = 0; 605 - if (midc->dma->pimr_mask) { 606 - cfg_hi.cfgx.protctl = 0x0; /*default value*/ 607 - cfg_hi.cfgx.fifo_mode = 1; 608 - if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { 609 - cfg_hi.cfgx.src_per = 0; 610 - if (mids->device_instance == 0) 611 - cfg_hi.cfgx.dst_per = 3; 612 - if (mids->device_instance == 1) 613 - cfg_hi.cfgx.dst_per = 1; 614 - } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { 615 - if (mids->device_instance == 0) 616 - cfg_hi.cfgx.src_per = 2; 617 - if (mids->device_instance == 1) 618 - cfg_hi.cfgx.src_per = 0; 619 - cfg_hi.cfgx.dst_per = 0; 620 - } 621 - } else { 622 - cfg_hi.cfgx.protctl = 0x1; /*default value*/ 623 - cfg_hi.cfgx.src_per = cfg_hi.cfgx.dst_per = 624 - midc->ch_id - midc->dma->chan_base; 625 - } 626 - } 627 - 628 - /*calculate CTL_HI*/ 629 - ctl_hi.ctlx.reser = 0; 630 - ctl_hi.ctlx.done = 0; 631 - width = mids->dma_slave.src_addr_width; 632 - 633 - ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); 634 - pr_debug("MDMA:calc len %d for block size %d\n", 635 - ctl_hi.ctlx.block_ts, midc->dma->block_size); 636 - /*calculate CTL_LO*/ 637 - ctl_lo.ctl_lo = 0; 638 - ctl_lo.ctlx.int_en = 1; 639 - ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; 640 - ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; 641 - 642 - /* 643 - * Here we need some translation from "enum dma_slave_buswidth" 644 - * to the format for our dma controller 645 - * standard intel_mid_dmac's format 646 - * 1 Byte 0b000 647 - * 2 Bytes 0b001 648 - * 4 Bytes 0b010 649 - */ 650 - ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width / 2; 651 - ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width / 2; 652 - 653 - if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 654 - ctl_lo.ctlx.tt_fc = 0; 655 - ctl_lo.ctlx.sinc = 0; 656 - ctl_lo.ctlx.dinc = 0; 657 - } else { 658 - if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { 659 - ctl_lo.ctlx.sinc = 0; 660 - ctl_lo.ctlx.dinc = 2; 661 - ctl_lo.ctlx.tt_fc = 1; 662 - } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { 663 - ctl_lo.ctlx.sinc = 2; 664 - ctl_lo.ctlx.dinc = 0; 665 - ctl_lo.ctlx.tt_fc = 2; 666 - } 667 - } 668 - 669 - pr_debug("MDMA:Calc CTL LO %x, CTL HI %x, CFG LO %x, CFG HI %x\n", 670 - ctl_lo.ctl_lo, ctl_hi.ctl_hi, cfg_lo.cfg_lo, cfg_hi.cfg_hi); 671 - 672 - enable_dma_interrupt(midc); 673 - 674 - desc = midc_desc_get(midc); 675 - if (desc == NULL) 676 - goto err_desc_get; 677 - desc->sar = src; 678 - desc->dar = dest ; 679 - desc->len = len; 680 - desc->cfg_hi = cfg_hi.cfg_hi; 681 - desc->cfg_lo = cfg_lo.cfg_lo; 682 - desc->ctl_lo = ctl_lo.ctl_lo; 683 - desc->ctl_hi = ctl_hi.ctl_hi; 684 - desc->width = width; 685 - desc->dirn = mids->dma_slave.direction; 686 - desc->lli_phys = 0; 687 - desc->lli = NULL; 688 - desc->lli_pool = NULL; 689 - return &desc->txd; 690 - 691 - err_desc_get: 692 - pr_err("ERR_MDMA: Failed to get desc\n"); 693 - midc_desc_put(midc, desc); 694 - return NULL; 695 - } 696 - /** 697 - * intel_mid_dma_prep_slave_sg - Prep slave sg txn 698 - * @chan: chan for DMA transfer 699 - * @sgl: scatter gather list 700 - * @sg_len: length of sg txn 701 - * @direction: DMA transfer dirtn 702 - * @flags: DMA flags 703 - * @context: transfer context (ignored) 704 - * 705 - * Prepares LLI based periphral transfer 706 - */ 707 - static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( 708 - struct dma_chan *chan, struct scatterlist *sgl, 709 - unsigned int sg_len, enum dma_transfer_direction direction, 710 - unsigned long flags, void *context) 711 - { 712 - struct intel_mid_dma_chan *midc = NULL; 713 - struct intel_mid_dma_slave *mids = NULL; 714 - struct intel_mid_dma_desc *desc = NULL; 715 - struct dma_async_tx_descriptor *txd = NULL; 716 - union intel_mid_dma_ctl_lo ctl_lo; 717 - 718 - pr_debug("MDMA: Prep for slave SG\n"); 719 - 720 - if (!sg_len) { 721 - pr_err("MDMA: Invalid SG length\n"); 722 - return NULL; 723 - } 724 - midc = to_intel_mid_dma_chan(chan); 725 - BUG_ON(!midc); 726 - 727 - mids = midc->mid_slave; 728 - BUG_ON(!mids); 729 - 730 - if (!midc->dma->pimr_mask) { 731 - /* We can still handle sg list with only one item */ 732 - if (sg_len == 1) { 733 - txd = intel_mid_dma_prep_memcpy(chan, 734 - mids->dma_slave.dst_addr, 735 - mids->dma_slave.src_addr, 736 - sg_dma_len(sgl), 737 - flags); 738 - return txd; 739 - } else { 740 - pr_warn("MDMA: SG list is not supported by this controller\n"); 741 - return NULL; 742 - } 743 - } 744 - 745 - pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", 746 - sg_len, direction, flags); 747 - 748 - txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags); 749 - if (NULL == txd) { 750 - pr_err("MDMA: Prep memcpy failed\n"); 751 - return NULL; 752 - } 753 - 754 - desc = to_intel_mid_dma_desc(txd); 755 - desc->dirn = direction; 756 - ctl_lo.ctl_lo = desc->ctl_lo; 757 - ctl_lo.ctlx.llp_dst_en = 1; 758 - ctl_lo.ctlx.llp_src_en = 1; 759 - desc->ctl_lo = ctl_lo.ctl_lo; 760 - desc->lli_length = sg_len; 761 - desc->current_lli = 0; 762 - /* DMA coherent memory pool for LLI descriptors*/ 763 - desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool", 764 - midc->dma->pdev, 765 - (sizeof(struct intel_mid_dma_lli)*sg_len), 766 - 32, 0); 767 - if (NULL == desc->lli_pool) { 768 - pr_err("MID_DMA:LLI pool create failed\n"); 769 - return NULL; 770 - } 771 - 772 - desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys); 773 - if (!desc->lli) { 774 - pr_err("MID_DMA: LLI alloc failed\n"); 775 - pci_pool_destroy(desc->lli_pool); 776 - return NULL; 777 - } 778 - 779 - midc_lli_fill_sg(midc, desc, sgl, sg_len, flags); 780 - if (flags & DMA_PREP_INTERRUPT) { 781 - iowrite32(UNMASK_INTR_REG(midc->ch_id), 782 - midc->dma_base + MASK_BLOCK); 783 - pr_debug("MDMA:Enabled Block interrupt\n"); 784 - } 785 - return &desc->txd; 786 - } 787 - 788 - /** 789 - * intel_mid_dma_free_chan_resources - Frees dma resources 790 - * @chan: chan requiring attention 791 - * 792 - * Frees the allocated resources on this DMA chan 793 - */ 794 - static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) 795 - { 796 - struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 797 - struct middma_device *mid = to_middma_device(chan->device); 798 - struct intel_mid_dma_desc *desc, *_desc; 799 - 800 - if (true == midc->busy) { 801 - /*trying to free ch in use!!!!!*/ 802 - pr_err("ERR_MDMA: trying to free ch in use\n"); 803 - } 804 - spin_lock_bh(&midc->lock); 805 - midc->descs_allocated = 0; 806 - list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 807 - list_del(&desc->desc_node); 808 - pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 809 - } 810 - list_for_each_entry_safe(desc, _desc, &midc->free_list, desc_node) { 811 - list_del(&desc->desc_node); 812 - pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 813 - } 814 - list_for_each_entry_safe(desc, _desc, &midc->queue, desc_node) { 815 - list_del(&desc->desc_node); 816 - pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 817 - } 818 - spin_unlock_bh(&midc->lock); 819 - midc->in_use = false; 820 - midc->busy = false; 821 - /* Disable CH interrupts */ 822 - iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); 823 - iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); 824 - pm_runtime_put(&mid->pdev->dev); 825 - } 826 - 827 - /** 828 - * intel_mid_dma_alloc_chan_resources - Allocate dma resources 829 - * @chan: chan requiring attention 830 - * 831 - * Allocates DMA resources on this chan 832 - * Return the descriptors allocated 833 - */ 834 - static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) 835 - { 836 - struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 837 - struct middma_device *mid = to_middma_device(chan->device); 838 - struct intel_mid_dma_desc *desc; 839 - dma_addr_t phys; 840 - int i = 0; 841 - 842 - pm_runtime_get_sync(&mid->pdev->dev); 843 - 844 - if (mid->state == SUSPENDED) { 845 - if (dma_resume(&mid->pdev->dev)) { 846 - pr_err("ERR_MDMA: resume failed"); 847 - return -EFAULT; 848 - } 849 - } 850 - 851 - /* ASSERT: channel is idle */ 852 - if (test_ch_en(mid->dma_base, midc->ch_id)) { 853 - /*ch is not idle*/ 854 - pr_err("ERR_MDMA: ch not idle\n"); 855 - pm_runtime_put(&mid->pdev->dev); 856 - return -EIO; 857 - } 858 - dma_cookie_init(chan); 859 - 860 - spin_lock_bh(&midc->lock); 861 - while (midc->descs_allocated < DESCS_PER_CHANNEL) { 862 - spin_unlock_bh(&midc->lock); 863 - desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); 864 - if (!desc) { 865 - pr_err("ERR_MDMA: desc failed\n"); 866 - pm_runtime_put(&mid->pdev->dev); 867 - return -ENOMEM; 868 - /*check*/ 869 - } 870 - dma_async_tx_descriptor_init(&desc->txd, chan); 871 - desc->txd.tx_submit = intel_mid_dma_tx_submit; 872 - desc->txd.flags = DMA_CTRL_ACK; 873 - desc->txd.phys = phys; 874 - spin_lock_bh(&midc->lock); 875 - i = ++midc->descs_allocated; 876 - list_add_tail(&desc->desc_node, &midc->free_list); 877 - } 878 - spin_unlock_bh(&midc->lock); 879 - midc->in_use = true; 880 - midc->busy = false; 881 - pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); 882 - return i; 883 - } 884 - 885 - /** 886 - * midc_handle_error - Handle DMA txn error 887 - * @mid: controller where error occurred 888 - * @midc: chan where error occurred 889 - * 890 - * Scan the descriptor for error 891 - */ 892 - static void midc_handle_error(struct middma_device *mid, 893 - struct intel_mid_dma_chan *midc) 894 - { 895 - midc_scan_descriptors(mid, midc); 896 - } 897 - 898 - /** 899 - * dma_tasklet - DMA interrupt tasklet 900 - * @data: tasklet arg (the controller structure) 901 - * 902 - * Scan the controller for interrupts for completion/error 903 - * Clear the interrupt and call for handling completion/error 904 - */ 905 - static void dma_tasklet(unsigned long data) 906 - { 907 - struct middma_device *mid = NULL; 908 - struct intel_mid_dma_chan *midc = NULL; 909 - u32 status, raw_tfr, raw_block; 910 - int i; 911 - 912 - mid = (struct middma_device *)data; 913 - if (mid == NULL) { 914 - pr_err("ERR_MDMA: tasklet Null param\n"); 915 - return; 916 - } 917 - pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); 918 - raw_tfr = ioread32(mid->dma_base + RAW_TFR); 919 - raw_block = ioread32(mid->dma_base + RAW_BLOCK); 920 - status = raw_tfr | raw_block; 921 - status &= mid->intr_mask; 922 - while (status) { 923 - /*txn interrupt*/ 924 - i = get_ch_index(&status, mid->chan_base); 925 - if (i < 0) { 926 - pr_err("ERR_MDMA:Invalid ch index %x\n", i); 927 - return; 928 - } 929 - midc = &mid->ch[i]; 930 - if (midc == NULL) { 931 - pr_err("ERR_MDMA:Null param midc\n"); 932 - return; 933 - } 934 - pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 935 - status, midc->ch_id, i); 936 - midc->raw_tfr = raw_tfr; 937 - midc->raw_block = raw_block; 938 - spin_lock_bh(&midc->lock); 939 - /*clearing this interrupts first*/ 940 - iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); 941 - if (raw_block) { 942 - iowrite32((1 << midc->ch_id), 943 - mid->dma_base + CLEAR_BLOCK); 944 - } 945 - midc_scan_descriptors(mid, midc); 946 - pr_debug("MDMA:Scan of desc... complete, unmasking\n"); 947 - iowrite32(UNMASK_INTR_REG(midc->ch_id), 948 - mid->dma_base + MASK_TFR); 949 - if (raw_block) { 950 - iowrite32(UNMASK_INTR_REG(midc->ch_id), 951 - mid->dma_base + MASK_BLOCK); 952 - } 953 - spin_unlock_bh(&midc->lock); 954 - } 955 - 956 - status = ioread32(mid->dma_base + RAW_ERR); 957 - status &= mid->intr_mask; 958 - while (status) { 959 - /*err interrupt*/ 960 - i = get_ch_index(&status, mid->chan_base); 961 - if (i < 0) { 962 - pr_err("ERR_MDMA:Invalid ch index %x\n", i); 963 - return; 964 - } 965 - midc = &mid->ch[i]; 966 - if (midc == NULL) { 967 - pr_err("ERR_MDMA:Null param midc\n"); 968 - return; 969 - } 970 - pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 971 - status, midc->ch_id, i); 972 - 973 - iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_ERR); 974 - spin_lock_bh(&midc->lock); 975 - midc_handle_error(mid, midc); 976 - iowrite32(UNMASK_INTR_REG(midc->ch_id), 977 - mid->dma_base + MASK_ERR); 978 - spin_unlock_bh(&midc->lock); 979 - } 980 - pr_debug("MDMA:Exiting takslet...\n"); 981 - return; 982 - } 983 - 984 - static void dma_tasklet1(unsigned long data) 985 - { 986 - pr_debug("MDMA:in takslet1...\n"); 987 - return dma_tasklet(data); 988 - } 989 - 990 - static void dma_tasklet2(unsigned long data) 991 - { 992 - pr_debug("MDMA:in takslet2...\n"); 993 - return dma_tasklet(data); 994 - } 995 - 996 - /** 997 - * intel_mid_dma_interrupt - DMA ISR 998 - * @irq: IRQ where interrupt occurred 999 - * @data: ISR cllback data (the controller structure) 1000 - * 1001 - * See if this is our interrupt if so then schedule the tasklet 1002 - * otherwise ignore 1003 - */ 1004 - static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) 1005 - { 1006 - struct middma_device *mid = data; 1007 - u32 tfr_status, err_status; 1008 - int call_tasklet = 0; 1009 - 1010 - tfr_status = ioread32(mid->dma_base + RAW_TFR); 1011 - err_status = ioread32(mid->dma_base + RAW_ERR); 1012 - if (!tfr_status && !err_status) 1013 - return IRQ_NONE; 1014 - 1015 - /*DMA Interrupt*/ 1016 - pr_debug("MDMA:Got an interrupt on irq %d\n", irq); 1017 - pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); 1018 - tfr_status &= mid->intr_mask; 1019 - if (tfr_status) { 1020 - /*need to disable intr*/ 1021 - iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR); 1022 - iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK); 1023 - pr_debug("MDMA: Calling tasklet %x\n", tfr_status); 1024 - call_tasklet = 1; 1025 - } 1026 - err_status &= mid->intr_mask; 1027 - if (err_status) { 1028 - iowrite32((err_status << INT_MASK_WE), 1029 - mid->dma_base + MASK_ERR); 1030 - call_tasklet = 1; 1031 - } 1032 - if (call_tasklet) 1033 - tasklet_schedule(&mid->tasklet); 1034 - 1035 - return IRQ_HANDLED; 1036 - } 1037 - 1038 - static irqreturn_t intel_mid_dma_interrupt1(int irq, void *data) 1039 - { 1040 - return intel_mid_dma_interrupt(irq, data); 1041 - } 1042 - 1043 - static irqreturn_t intel_mid_dma_interrupt2(int irq, void *data) 1044 - { 1045 - return intel_mid_dma_interrupt(irq, data); 1046 - } 1047 - 1048 - /** 1049 - * mid_setup_dma - Setup the DMA controller 1050 - * @pdev: Controller PCI device structure 1051 - * 1052 - * Initialize the DMA controller, channels, registers with DMA engine, 1053 - * ISR. Initialize DMA controller channels. 1054 - */ 1055 - static int mid_setup_dma(struct pci_dev *pdev) 1056 - { 1057 - struct middma_device *dma = pci_get_drvdata(pdev); 1058 - int err, i; 1059 - 1060 - /* DMA coherent memory pool for DMA descriptor allocations */ 1061 - dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, 1062 - sizeof(struct intel_mid_dma_desc), 1063 - 32, 0); 1064 - if (NULL == dma->dma_pool) { 1065 - pr_err("ERR_MDMA:pci_pool_create failed\n"); 1066 - err = -ENOMEM; 1067 - goto err_dma_pool; 1068 - } 1069 - 1070 - INIT_LIST_HEAD(&dma->common.channels); 1071 - dma->pci_id = pdev->device; 1072 - if (dma->pimr_mask) { 1073 - dma->mask_reg = ioremap(LNW_PERIPHRAL_MASK_BASE, 1074 - LNW_PERIPHRAL_MASK_SIZE); 1075 - if (dma->mask_reg == NULL) { 1076 - pr_err("ERR_MDMA:Can't map periphral intr space !!\n"); 1077 - err = -ENOMEM; 1078 - goto err_ioremap; 1079 - } 1080 - } else 1081 - dma->mask_reg = NULL; 1082 - 1083 - pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); 1084 - /*init CH structures*/ 1085 - dma->intr_mask = 0; 1086 - dma->state = RUNNING; 1087 - for (i = 0; i < dma->max_chan; i++) { 1088 - struct intel_mid_dma_chan *midch = &dma->ch[i]; 1089 - 1090 - midch->chan.device = &dma->common; 1091 - dma_cookie_init(&midch->chan); 1092 - midch->ch_id = dma->chan_base + i; 1093 - pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); 1094 - 1095 - midch->dma_base = dma->dma_base; 1096 - midch->ch_regs = dma->dma_base + DMA_CH_SIZE * midch->ch_id; 1097 - midch->dma = dma; 1098 - dma->intr_mask |= 1 << (dma->chan_base + i); 1099 - spin_lock_init(&midch->lock); 1100 - 1101 - INIT_LIST_HEAD(&midch->active_list); 1102 - INIT_LIST_HEAD(&midch->queue); 1103 - INIT_LIST_HEAD(&midch->free_list); 1104 - /*mask interrupts*/ 1105 - iowrite32(MASK_INTR_REG(midch->ch_id), 1106 - dma->dma_base + MASK_BLOCK); 1107 - iowrite32(MASK_INTR_REG(midch->ch_id), 1108 - dma->dma_base + MASK_SRC_TRAN); 1109 - iowrite32(MASK_INTR_REG(midch->ch_id), 1110 - dma->dma_base + MASK_DST_TRAN); 1111 - iowrite32(MASK_INTR_REG(midch->ch_id), 1112 - dma->dma_base + MASK_ERR); 1113 - iowrite32(MASK_INTR_REG(midch->ch_id), 1114 - dma->dma_base + MASK_TFR); 1115 - 1116 - disable_dma_interrupt(midch); 1117 - list_add_tail(&midch->chan.device_node, &dma->common.channels); 1118 - } 1119 - pr_debug("MDMA: Calc Mask as %x for this controller\n", dma->intr_mask); 1120 - 1121 - /*init dma structure*/ 1122 - dma_cap_zero(dma->common.cap_mask); 1123 - dma_cap_set(DMA_MEMCPY, dma->common.cap_mask); 1124 - dma_cap_set(DMA_SLAVE, dma->common.cap_mask); 1125 - dma_cap_set(DMA_PRIVATE, dma->common.cap_mask); 1126 - dma->common.dev = &pdev->dev; 1127 - 1128 - dma->common.device_alloc_chan_resources = 1129 - intel_mid_dma_alloc_chan_resources; 1130 - dma->common.device_free_chan_resources = 1131 - intel_mid_dma_free_chan_resources; 1132 - 1133 - dma->common.device_tx_status = intel_mid_dma_tx_status; 1134 - dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy; 1135 - dma->common.device_issue_pending = intel_mid_dma_issue_pending; 1136 - dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg; 1137 - dma->common.device_config = intel_mid_dma_config; 1138 - dma->common.device_terminate_all = intel_mid_dma_terminate_all; 1139 - 1140 - /*enable dma cntrl*/ 1141 - iowrite32(REG_BIT0, dma->dma_base + DMA_CFG); 1142 - 1143 - /*register irq */ 1144 - if (dma->pimr_mask) { 1145 - pr_debug("MDMA:Requesting irq shared for DMAC1\n"); 1146 - err = request_irq(pdev->irq, intel_mid_dma_interrupt1, 1147 - IRQF_SHARED, "INTEL_MID_DMAC1", dma); 1148 - if (0 != err) 1149 - goto err_irq; 1150 - } else { 1151 - dma->intr_mask = 0x03; 1152 - pr_debug("MDMA:Requesting irq for DMAC2\n"); 1153 - err = request_irq(pdev->irq, intel_mid_dma_interrupt2, 1154 - IRQF_SHARED, "INTEL_MID_DMAC2", dma); 1155 - if (0 != err) 1156 - goto err_irq; 1157 - } 1158 - /*register device w/ engine*/ 1159 - err = dma_async_device_register(&dma->common); 1160 - if (0 != err) { 1161 - pr_err("ERR_MDMA:device_register failed: %d\n", err); 1162 - goto err_engine; 1163 - } 1164 - if (dma->pimr_mask) { 1165 - pr_debug("setting up tasklet1 for DMAC1\n"); 1166 - tasklet_init(&dma->tasklet, dma_tasklet1, (unsigned long)dma); 1167 - } else { 1168 - pr_debug("setting up tasklet2 for DMAC2\n"); 1169 - tasklet_init(&dma->tasklet, dma_tasklet2, (unsigned long)dma); 1170 - } 1171 - return 0; 1172 - 1173 - err_engine: 1174 - free_irq(pdev->irq, dma); 1175 - err_irq: 1176 - if (dma->mask_reg) 1177 - iounmap(dma->mask_reg); 1178 - err_ioremap: 1179 - pci_pool_destroy(dma->dma_pool); 1180 - err_dma_pool: 1181 - pr_err("ERR_MDMA:setup_dma failed: %d\n", err); 1182 - return err; 1183 - 1184 - } 1185 - 1186 - /** 1187 - * middma_shutdown - Shutdown the DMA controller 1188 - * @pdev: Controller PCI device structure 1189 - * 1190 - * Called by remove 1191 - * Unregister DMa controller, clear all structures and free interrupt 1192 - */ 1193 - static void middma_shutdown(struct pci_dev *pdev) 1194 - { 1195 - struct middma_device *device = pci_get_drvdata(pdev); 1196 - 1197 - dma_async_device_unregister(&device->common); 1198 - pci_pool_destroy(device->dma_pool); 1199 - if (device->mask_reg) 1200 - iounmap(device->mask_reg); 1201 - if (device->dma_base) 1202 - iounmap(device->dma_base); 1203 - free_irq(pdev->irq, device); 1204 - return; 1205 - } 1206 - 1207 - /** 1208 - * intel_mid_dma_probe - PCI Probe 1209 - * @pdev: Controller PCI device structure 1210 - * @id: pci device id structure 1211 - * 1212 - * Initialize the PCI device, map BARs, query driver data. 1213 - * Call setup_dma to complete contoller and chan initilzation 1214 - */ 1215 - static int intel_mid_dma_probe(struct pci_dev *pdev, 1216 - const struct pci_device_id *id) 1217 - { 1218 - struct middma_device *device; 1219 - u32 base_addr, bar_size; 1220 - struct intel_mid_dma_probe_info *info; 1221 - int err; 1222 - 1223 - pr_debug("MDMA: probe for %x\n", pdev->device); 1224 - info = (void *)id->driver_data; 1225 - pr_debug("MDMA: CH %d, base %d, block len %d, Periphral mask %x\n", 1226 - info->max_chan, info->ch_base, 1227 - info->block_size, info->pimr_mask); 1228 - 1229 - err = pci_enable_device(pdev); 1230 - if (err) 1231 - goto err_enable_device; 1232 - 1233 - err = pci_request_regions(pdev, "intel_mid_dmac"); 1234 - if (err) 1235 - goto err_request_regions; 1236 - 1237 - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1238 - if (err) 1239 - goto err_set_dma_mask; 1240 - 1241 - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1242 - if (err) 1243 - goto err_set_dma_mask; 1244 - 1245 - device = kzalloc(sizeof(*device), GFP_KERNEL); 1246 - if (!device) { 1247 - pr_err("ERR_MDMA:kzalloc failed probe\n"); 1248 - err = -ENOMEM; 1249 - goto err_kzalloc; 1250 - } 1251 - device->pdev = pci_dev_get(pdev); 1252 - 1253 - base_addr = pci_resource_start(pdev, 0); 1254 - bar_size = pci_resource_len(pdev, 0); 1255 - device->dma_base = ioremap_nocache(base_addr, DMA_REG_SIZE); 1256 - if (!device->dma_base) { 1257 - pr_err("ERR_MDMA:ioremap failed\n"); 1258 - err = -ENOMEM; 1259 - goto err_ioremap; 1260 - } 1261 - pci_set_drvdata(pdev, device); 1262 - pci_set_master(pdev); 1263 - device->max_chan = info->max_chan; 1264 - device->chan_base = info->ch_base; 1265 - device->block_size = info->block_size; 1266 - device->pimr_mask = info->pimr_mask; 1267 - 1268 - err = mid_setup_dma(pdev); 1269 - if (err) 1270 - goto err_dma; 1271 - 1272 - pm_runtime_put_noidle(&pdev->dev); 1273 - pm_runtime_allow(&pdev->dev); 1274 - return 0; 1275 - 1276 - err_dma: 1277 - iounmap(device->dma_base); 1278 - err_ioremap: 1279 - pci_dev_put(pdev); 1280 - kfree(device); 1281 - err_kzalloc: 1282 - err_set_dma_mask: 1283 - pci_release_regions(pdev); 1284 - pci_disable_device(pdev); 1285 - err_request_regions: 1286 - err_enable_device: 1287 - pr_err("ERR_MDMA:Probe failed %d\n", err); 1288 - return err; 1289 - } 1290 - 1291 - /** 1292 - * intel_mid_dma_remove - PCI remove 1293 - * @pdev: Controller PCI device structure 1294 - * 1295 - * Free up all resources and data 1296 - * Call shutdown_dma to complete contoller and chan cleanup 1297 - */ 1298 - static void intel_mid_dma_remove(struct pci_dev *pdev) 1299 - { 1300 - struct middma_device *device = pci_get_drvdata(pdev); 1301 - 1302 - pm_runtime_get_noresume(&pdev->dev); 1303 - pm_runtime_forbid(&pdev->dev); 1304 - middma_shutdown(pdev); 1305 - pci_dev_put(pdev); 1306 - kfree(device); 1307 - pci_release_regions(pdev); 1308 - pci_disable_device(pdev); 1309 - } 1310 - 1311 - /* Power Management */ 1312 - /* 1313 - * dma_suspend - PCI suspend function 1314 - * 1315 - * @pci: PCI device structure 1316 - * @state: PM message 1317 - * 1318 - * This function is called by OS when a power event occurs 1319 - */ 1320 - static int dma_suspend(struct device *dev) 1321 - { 1322 - struct pci_dev *pci = to_pci_dev(dev); 1323 - int i; 1324 - struct middma_device *device = pci_get_drvdata(pci); 1325 - pr_debug("MDMA: dma_suspend called\n"); 1326 - 1327 - for (i = 0; i < device->max_chan; i++) { 1328 - if (device->ch[i].in_use) 1329 - return -EAGAIN; 1330 - } 1331 - dmac1_mask_periphral_intr(device); 1332 - device->state = SUSPENDED; 1333 - pci_save_state(pci); 1334 - pci_disable_device(pci); 1335 - pci_set_power_state(pci, PCI_D3hot); 1336 - return 0; 1337 - } 1338 - 1339 - /** 1340 - * dma_resume - PCI resume function 1341 - * 1342 - * @pci: PCI device structure 1343 - * 1344 - * This function is called by OS when a power event occurs 1345 - */ 1346 - int dma_resume(struct device *dev) 1347 - { 1348 - struct pci_dev *pci = to_pci_dev(dev); 1349 - int ret; 1350 - struct middma_device *device = pci_get_drvdata(pci); 1351 - 1352 - pr_debug("MDMA: dma_resume called\n"); 1353 - pci_set_power_state(pci, PCI_D0); 1354 - pci_restore_state(pci); 1355 - ret = pci_enable_device(pci); 1356 - if (ret) { 1357 - pr_err("MDMA: device can't be enabled for %x\n", pci->device); 1358 - return ret; 1359 - } 1360 - device->state = RUNNING; 1361 - iowrite32(REG_BIT0, device->dma_base + DMA_CFG); 1362 - return 0; 1363 - } 1364 - 1365 - static int dma_runtime_suspend(struct device *dev) 1366 - { 1367 - struct pci_dev *pci_dev = to_pci_dev(dev); 1368 - struct middma_device *device = pci_get_drvdata(pci_dev); 1369 - 1370 - device->state = SUSPENDED; 1371 - return 0; 1372 - } 1373 - 1374 - static int dma_runtime_resume(struct device *dev) 1375 - { 1376 - struct pci_dev *pci_dev = to_pci_dev(dev); 1377 - struct middma_device *device = pci_get_drvdata(pci_dev); 1378 - 1379 - device->state = RUNNING; 1380 - iowrite32(REG_BIT0, device->dma_base + DMA_CFG); 1381 - return 0; 1382 - } 1383 - 1384 - static int dma_runtime_idle(struct device *dev) 1385 - { 1386 - struct pci_dev *pdev = to_pci_dev(dev); 1387 - struct middma_device *device = pci_get_drvdata(pdev); 1388 - int i; 1389 - 1390 - for (i = 0; i < device->max_chan; i++) { 1391 - if (device->ch[i].in_use) 1392 - return -EAGAIN; 1393 - } 1394 - 1395 - return 0; 1396 - } 1397 - 1398 - /****************************************************************************** 1399 - * PCI stuff 1400 - */ 1401 - static struct pci_device_id intel_mid_dma_ids[] = { 1402 - { PCI_VDEVICE(INTEL, INTEL_MID_DMAC1_ID), INFO(2, 6, 4095, 0x200020)}, 1403 - { PCI_VDEVICE(INTEL, INTEL_MID_DMAC2_ID), INFO(2, 0, 2047, 0)}, 1404 - { PCI_VDEVICE(INTEL, INTEL_MID_GP_DMAC2_ID), INFO(2, 0, 2047, 0)}, 1405 - { PCI_VDEVICE(INTEL, INTEL_MFLD_DMAC1_ID), INFO(4, 0, 4095, 0x400040)}, 1406 - { 0, } 1407 - }; 1408 - MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); 1409 - 1410 - static const struct dev_pm_ops intel_mid_dma_pm = { 1411 - .runtime_suspend = dma_runtime_suspend, 1412 - .runtime_resume = dma_runtime_resume, 1413 - .runtime_idle = dma_runtime_idle, 1414 - .suspend = dma_suspend, 1415 - .resume = dma_resume, 1416 - }; 1417 - 1418 - static struct pci_driver intel_mid_dma_pci_driver = { 1419 - .name = "Intel MID DMA", 1420 - .id_table = intel_mid_dma_ids, 1421 - .probe = intel_mid_dma_probe, 1422 - .remove = intel_mid_dma_remove, 1423 - #ifdef CONFIG_PM 1424 - .driver = { 1425 - .pm = &intel_mid_dma_pm, 1426 - }, 1427 - #endif 1428 - }; 1429 - 1430 - static int __init intel_mid_dma_init(void) 1431 - { 1432 - pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", 1433 - INTEL_MID_DMA_DRIVER_VERSION); 1434 - return pci_register_driver(&intel_mid_dma_pci_driver); 1435 - } 1436 - fs_initcall(intel_mid_dma_init); 1437 - 1438 - static void __exit intel_mid_dma_exit(void) 1439 - { 1440 - pci_unregister_driver(&intel_mid_dma_pci_driver); 1441 - } 1442 - module_exit(intel_mid_dma_exit); 1443 - 1444 - MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>"); 1445 - MODULE_DESCRIPTION("Intel (R) MID DMAC Driver"); 1446 - MODULE_LICENSE("GPL v2"); 1447 - MODULE_VERSION(INTEL_MID_DMA_DRIVER_VERSION);
-299
drivers/dma/intel_mid_dma_regs.h
··· 1 - /* 2 - * intel_mid_dma_regs.h - Intel MID DMA Drivers 3 - * 4 - * Copyright (C) 2008-10 Intel Corp 5 - * Author: Vinod Koul <vinod.koul@intel.com> 6 - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 7 - * 8 - * This program is free software; you can redistribute it and/or modify 9 - * it under the terms of the GNU General Public License as published by 10 - * the Free Software Foundation; version 2 of the License. 11 - * 12 - * This program is distributed in the hope that it will be useful, but 13 - * WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 - * General Public License for more details. 16 - * 17 - * You should have received a copy of the GNU General Public License along 18 - * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 20 - * 21 - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 22 - * 23 - * 24 - */ 25 - #ifndef __INTEL_MID_DMAC_REGS_H__ 26 - #define __INTEL_MID_DMAC_REGS_H__ 27 - 28 - #include <linux/dmaengine.h> 29 - #include <linux/dmapool.h> 30 - #include <linux/pci_ids.h> 31 - 32 - #define INTEL_MID_DMA_DRIVER_VERSION "1.1.0" 33 - 34 - #define REG_BIT0 0x00000001 35 - #define REG_BIT8 0x00000100 36 - #define INT_MASK_WE 0x8 37 - #define CLEAR_DONE 0xFFFFEFFF 38 - #define UNMASK_INTR_REG(chan_num) \ 39 - ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) 40 - #define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num) 41 - 42 - #define ENABLE_CHANNEL(chan_num) \ 43 - ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) 44 - 45 - #define DISABLE_CHANNEL(chan_num) \ 46 - (REG_BIT8 << chan_num) 47 - 48 - #define DESCS_PER_CHANNEL 16 49 - /*DMA Registers*/ 50 - /*registers associated with channel programming*/ 51 - #define DMA_REG_SIZE 0x400 52 - #define DMA_CH_SIZE 0x58 53 - 54 - /*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/ 55 - #define SAR 0x00 /* Source Address Register*/ 56 - #define DAR 0x08 /* Destination Address Register*/ 57 - #define LLP 0x10 /* Linked List Pointer Register*/ 58 - #define CTL_LOW 0x18 /* Control Register*/ 59 - #define CTL_HIGH 0x1C /* Control Register*/ 60 - #define CFG_LOW 0x40 /* Configuration Register Low*/ 61 - #define CFG_HIGH 0x44 /* Configuration Register high*/ 62 - 63 - #define STATUS_TFR 0x2E8 64 - #define STATUS_BLOCK 0x2F0 65 - #define STATUS_ERR 0x308 66 - 67 - #define RAW_TFR 0x2C0 68 - #define RAW_BLOCK 0x2C8 69 - #define RAW_ERR 0x2E0 70 - 71 - #define MASK_TFR 0x310 72 - #define MASK_BLOCK 0x318 73 - #define MASK_SRC_TRAN 0x320 74 - #define MASK_DST_TRAN 0x328 75 - #define MASK_ERR 0x330 76 - 77 - #define CLEAR_TFR 0x338 78 - #define CLEAR_BLOCK 0x340 79 - #define CLEAR_SRC_TRAN 0x348 80 - #define CLEAR_DST_TRAN 0x350 81 - #define CLEAR_ERR 0x358 82 - 83 - #define INTR_STATUS 0x360 84 - #define DMA_CFG 0x398 85 - #define DMA_CHAN_EN 0x3A0 86 - 87 - /*DMA channel control registers*/ 88 - union intel_mid_dma_ctl_lo { 89 - struct { 90 - u32 int_en:1; /*enable or disable interrupts*/ 91 - /*should be 0*/ 92 - u32 dst_tr_width:3; /*destination transfer width*/ 93 - /*usually 32 bits = 010*/ 94 - u32 src_tr_width:3; /*source transfer width*/ 95 - /*usually 32 bits = 010*/ 96 - u32 dinc:2; /*destination address inc/dec*/ 97 - /*For mem:INC=00, Periphral NoINC=11*/ 98 - u32 sinc:2; /*source address inc or dec, as above*/ 99 - u32 dst_msize:3; /*destination burst transaction length*/ 100 - /*always = 16 ie 011*/ 101 - u32 src_msize:3; /*source burst transaction length*/ 102 - /*always = 16 ie 011*/ 103 - u32 reser1:3; 104 - u32 tt_fc:3; /*transfer type and flow controller*/ 105 - /*M-M = 000 106 - P-M = 010 107 - M-P = 001*/ 108 - u32 dms:2; /*destination master select = 0*/ 109 - u32 sms:2; /*source master select = 0*/ 110 - u32 llp_dst_en:1; /*enable/disable destination LLP = 0*/ 111 - u32 llp_src_en:1; /*enable/disable source LLP = 0*/ 112 - u32 reser2:3; 113 - } ctlx; 114 - u32 ctl_lo; 115 - }; 116 - 117 - union intel_mid_dma_ctl_hi { 118 - struct { 119 - u32 block_ts:12; /*block transfer size*/ 120 - u32 done:1; /*Done - updated by DMAC*/ 121 - u32 reser:19; /*configured by DMAC*/ 122 - } ctlx; 123 - u32 ctl_hi; 124 - 125 - }; 126 - 127 - /*DMA channel configuration registers*/ 128 - union intel_mid_dma_cfg_lo { 129 - struct { 130 - u32 reser1:5; 131 - u32 ch_prior:3; /*channel priority = 0*/ 132 - u32 ch_susp:1; /*channel suspend = 0*/ 133 - u32 fifo_empty:1; /*FIFO empty or not R bit = 0*/ 134 - u32 hs_sel_dst:1; /*select HW/SW destn handshaking*/ 135 - /*HW = 0, SW = 1*/ 136 - u32 hs_sel_src:1; /*select HW/SW src handshaking*/ 137 - u32 reser2:6; 138 - u32 dst_hs_pol:1; /*dest HS interface polarity*/ 139 - u32 src_hs_pol:1; /*src HS interface polarity*/ 140 - u32 max_abrst:10; /*max AMBA burst len = 0 (no sw limit*/ 141 - u32 reload_src:1; /*auto reload src addr =1 if src is P*/ 142 - u32 reload_dst:1; /*AR destn addr =1 if dstn is P*/ 143 - } cfgx; 144 - u32 cfg_lo; 145 - }; 146 - 147 - union intel_mid_dma_cfg_hi { 148 - struct { 149 - u32 fcmode:1; /*flow control mode = 1*/ 150 - u32 fifo_mode:1; /*FIFO mode select = 1*/ 151 - u32 protctl:3; /*protection control = 0*/ 152 - u32 rsvd:2; 153 - u32 src_per:4; /*src hw HS interface*/ 154 - u32 dst_per:4; /*dstn hw HS interface*/ 155 - u32 reser2:17; 156 - } cfgx; 157 - u32 cfg_hi; 158 - }; 159 - 160 - 161 - /** 162 - * struct intel_mid_dma_chan - internal mid representation of a DMA channel 163 - * @chan: dma_chan strcture represetation for mid chan 164 - * @ch_regs: MMIO register space pointer to channel register 165 - * @dma_base: MMIO register space DMA engine base pointer 166 - * @ch_id: DMA channel id 167 - * @lock: channel spinlock 168 - * @active_list: current active descriptors 169 - * @queue: current queued up descriptors 170 - * @free_list: current free descriptors 171 - * @slave: dma slave structure 172 - * @descs_allocated: total number of descriptors allocated 173 - * @dma: dma device structure pointer 174 - * @busy: bool representing if ch is busy (active txn) or not 175 - * @in_use: bool representing if ch is in use or not 176 - * @raw_tfr: raw trf interrupt received 177 - * @raw_block: raw block interrupt received 178 - */ 179 - struct intel_mid_dma_chan { 180 - struct dma_chan chan; 181 - void __iomem *ch_regs; 182 - void __iomem *dma_base; 183 - int ch_id; 184 - spinlock_t lock; 185 - struct list_head active_list; 186 - struct list_head queue; 187 - struct list_head free_list; 188 - unsigned int descs_allocated; 189 - struct middma_device *dma; 190 - bool busy; 191 - bool in_use; 192 - u32 raw_tfr; 193 - u32 raw_block; 194 - struct intel_mid_dma_slave *mid_slave; 195 - }; 196 - 197 - static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( 198 - struct dma_chan *chan) 199 - { 200 - return container_of(chan, struct intel_mid_dma_chan, chan); 201 - } 202 - 203 - enum intel_mid_dma_state { 204 - RUNNING = 0, 205 - SUSPENDED, 206 - }; 207 - /** 208 - * struct middma_device - internal representation of a DMA device 209 - * @pdev: PCI device 210 - * @dma_base: MMIO register space pointer of DMA 211 - * @dma_pool: for allocating DMA descriptors 212 - * @common: embedded struct dma_device 213 - * @tasklet: dma tasklet for processing interrupts 214 - * @ch: per channel data 215 - * @pci_id: DMA device PCI ID 216 - * @intr_mask: Interrupt mask to be used 217 - * @mask_reg: MMIO register for periphral mask 218 - * @chan_base: Base ch index (read from driver data) 219 - * @max_chan: max number of chs supported (from drv_data) 220 - * @block_size: Block size of DMA transfer supported (from drv_data) 221 - * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data) 222 - * @state: dma PM device state 223 - */ 224 - struct middma_device { 225 - struct pci_dev *pdev; 226 - void __iomem *dma_base; 227 - struct pci_pool *dma_pool; 228 - struct dma_device common; 229 - struct tasklet_struct tasklet; 230 - struct intel_mid_dma_chan ch[MAX_CHAN]; 231 - unsigned int pci_id; 232 - unsigned int intr_mask; 233 - void __iomem *mask_reg; 234 - int chan_base; 235 - int max_chan; 236 - int block_size; 237 - unsigned int pimr_mask; 238 - enum intel_mid_dma_state state; 239 - }; 240 - 241 - static inline struct middma_device *to_middma_device(struct dma_device *common) 242 - { 243 - return container_of(common, struct middma_device, common); 244 - } 245 - 246 - struct intel_mid_dma_desc { 247 - void __iomem *block; /*ch ptr*/ 248 - struct list_head desc_node; 249 - struct dma_async_tx_descriptor txd; 250 - size_t len; 251 - dma_addr_t sar; 252 - dma_addr_t dar; 253 - u32 cfg_hi; 254 - u32 cfg_lo; 255 - u32 ctl_lo; 256 - u32 ctl_hi; 257 - struct pci_pool *lli_pool; 258 - struct intel_mid_dma_lli *lli; 259 - dma_addr_t lli_phys; 260 - unsigned int lli_length; 261 - unsigned int current_lli; 262 - dma_addr_t next; 263 - enum dma_transfer_direction dirn; 264 - enum dma_status status; 265 - enum dma_slave_buswidth width; /*width of DMA txn*/ 266 - enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ 267 - 268 - }; 269 - 270 - struct intel_mid_dma_lli { 271 - dma_addr_t sar; 272 - dma_addr_t dar; 273 - dma_addr_t llp; 274 - u32 ctl_lo; 275 - u32 ctl_hi; 276 - } __attribute__ ((packed)); 277 - 278 - static inline int test_ch_en(void __iomem *dma, u32 ch_no) 279 - { 280 - u32 en_reg = ioread32(dma + DMA_CHAN_EN); 281 - return (en_reg >> ch_no) & 0x1; 282 - } 283 - 284 - static inline struct intel_mid_dma_desc *to_intel_mid_dma_desc 285 - (struct dma_async_tx_descriptor *txd) 286 - { 287 - return container_of(txd, struct intel_mid_dma_desc, txd); 288 - } 289 - 290 - static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave 291 - (struct dma_slave_config *slave) 292 - { 293 - return container_of(slave, struct intel_mid_dma_slave, dma_slave); 294 - } 295 - 296 - 297 - int dma_resume(struct device *dev); 298 - 299 - #endif /*__INTEL_MID_DMAC_REGS_H__*/
-76
include/linux/intel_mid_dma.h
··· 1 - /* 2 - * intel_mid_dma.h - Intel MID DMA Drivers 3 - * 4 - * Copyright (C) 2008-10 Intel Corp 5 - * Author: Vinod Koul <vinod.koul@intel.com> 6 - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 7 - * 8 - * This program is free software; you can redistribute it and/or modify 9 - * it under the terms of the GNU General Public License as published by 10 - * the Free Software Foundation; version 2 of the License. 11 - * 12 - * This program is distributed in the hope that it will be useful, but 13 - * WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 - * General Public License for more details. 16 - * 17 - * You should have received a copy of the GNU General Public License along 18 - * with this program; if not, write to the Free Software Foundation, Inc., 19 - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 20 - * 21 - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 22 - * 23 - * 24 - */ 25 - #ifndef __INTEL_MID_DMA_H__ 26 - #define __INTEL_MID_DMA_H__ 27 - 28 - #include <linux/dmaengine.h> 29 - 30 - #define DMA_PREP_CIRCULAR_LIST (1 << 10) 31 - 32 - /*DMA mode configurations*/ 33 - enum intel_mid_dma_mode { 34 - LNW_DMA_PER_TO_MEM = 0, /*periphral to memory configuration*/ 35 - LNW_DMA_MEM_TO_PER, /*memory to periphral configuration*/ 36 - LNW_DMA_MEM_TO_MEM, /*mem to mem confg (testing only)*/ 37 - }; 38 - 39 - /*DMA handshaking*/ 40 - enum intel_mid_dma_hs_mode { 41 - LNW_DMA_HW_HS = 0, /*HW Handshaking only*/ 42 - LNW_DMA_SW_HS = 1, /*SW Handshaking not recommended*/ 43 - }; 44 - 45 - /*Burst size configuration*/ 46 - enum intel_mid_dma_msize { 47 - LNW_DMA_MSIZE_1 = 0x0, 48 - LNW_DMA_MSIZE_4 = 0x1, 49 - LNW_DMA_MSIZE_8 = 0x2, 50 - LNW_DMA_MSIZE_16 = 0x3, 51 - LNW_DMA_MSIZE_32 = 0x4, 52 - LNW_DMA_MSIZE_64 = 0x5, 53 - }; 54 - 55 - /** 56 - * struct intel_mid_dma_slave - DMA slave structure 57 - * 58 - * @dirn: DMA trf direction 59 - * @src_width: tx register width 60 - * @dst_width: rx register width 61 - * @hs_mode: HW/SW handshaking mode 62 - * @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem) 63 - * @src_msize: Source DMA burst size 64 - * @dst_msize: Dst DMA burst size 65 - * @per_addr: Periphral address 66 - * @device_instance: DMA peripheral device instance, we can have multiple 67 - * peripheral device connected to single DMAC 68 - */ 69 - struct intel_mid_dma_slave { 70 - enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/ 71 - enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ 72 - unsigned int device_instance; /*0, 1 for periphral instance*/ 73 - struct dma_slave_config dma_slave; 74 - }; 75 - 76 - #endif /*__INTEL_MID_DMA_H__*/