Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.16 2562 lines 68 kB view raw
1/* 2 * TI EDMA DMA engine driver 3 * 4 * Copyright 2012 Texas Instruments 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation version 2. 9 * 10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 11 * kind, whether express or implied; without even the implied warranty 12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 */ 15 16#include <linux/dmaengine.h> 17#include <linux/dma-mapping.h> 18#include <linux/edma.h> 19#include <linux/err.h> 20#include <linux/init.h> 21#include <linux/interrupt.h> 22#include <linux/list.h> 23#include <linux/module.h> 24#include <linux/platform_device.h> 25#include <linux/slab.h> 26#include <linux/spinlock.h> 27#include <linux/of.h> 28#include <linux/of_dma.h> 29#include <linux/of_irq.h> 30#include <linux/of_address.h> 31#include <linux/of_device.h> 32#include <linux/pm_runtime.h> 33 34#include <linux/platform_data/edma.h> 35 36#include "dmaengine.h" 37#include "virt-dma.h" 38 39/* Offsets matching "struct edmacc_param" */ 40#define PARM_OPT 0x00 41#define PARM_SRC 0x04 42#define PARM_A_B_CNT 0x08 43#define PARM_DST 0x0c 44#define PARM_SRC_DST_BIDX 0x10 45#define PARM_LINK_BCNTRLD 0x14 46#define PARM_SRC_DST_CIDX 0x18 47#define PARM_CCNT 0x1c 48 49#define PARM_SIZE 0x20 50 51/* Offsets for EDMA CC global channel registers and their shadows */ 52#define SH_ER 0x00 /* 64 bits */ 53#define SH_ECR 0x08 /* 64 bits */ 54#define SH_ESR 0x10 /* 64 bits */ 55#define SH_CER 0x18 /* 64 bits */ 56#define SH_EER 0x20 /* 64 bits */ 57#define SH_EECR 0x28 /* 64 bits */ 58#define SH_EESR 0x30 /* 64 bits */ 59#define SH_SER 0x38 /* 64 bits */ 60#define SH_SECR 0x40 /* 64 bits */ 61#define SH_IER 0x50 /* 64 bits */ 62#define SH_IECR 0x58 /* 64 bits */ 63#define SH_IESR 0x60 /* 64 bits */ 64#define SH_IPR 0x68 /* 64 bits */ 65#define SH_ICR 0x70 /* 64 bits */ 66#define SH_IEVAL 0x78 67#define SH_QER 0x80 68#define SH_QEER 0x84 69#define SH_QEECR 0x88 70#define SH_QEESR 0x8c 71#define SH_QSER 0x90 72#define SH_QSECR 0x94 73#define SH_SIZE 0x200 74 75/* Offsets for EDMA CC global registers */ 76#define EDMA_REV 0x0000 77#define EDMA_CCCFG 0x0004 78#define EDMA_QCHMAP 0x0200 /* 8 registers */ 79#define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */ 80#define EDMA_QDMAQNUM 0x0260 81#define EDMA_QUETCMAP 0x0280 82#define EDMA_QUEPRI 0x0284 83#define EDMA_EMR 0x0300 /* 64 bits */ 84#define EDMA_EMCR 0x0308 /* 64 bits */ 85#define EDMA_QEMR 0x0310 86#define EDMA_QEMCR 0x0314 87#define EDMA_CCERR 0x0318 88#define EDMA_CCERRCLR 0x031c 89#define EDMA_EEVAL 0x0320 90#define EDMA_DRAE 0x0340 /* 4 x 64 bits*/ 91#define EDMA_QRAE 0x0380 /* 4 registers */ 92#define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */ 93#define EDMA_QSTAT 0x0600 /* 2 registers */ 94#define EDMA_QWMTHRA 0x0620 95#define EDMA_QWMTHRB 0x0624 96#define EDMA_CCSTAT 0x0640 97 98#define EDMA_M 0x1000 /* global channel registers */ 99#define EDMA_ECR 0x1008 100#define EDMA_ECRH 0x100C 101#define EDMA_SHADOW0 0x2000 /* 4 shadow regions */ 102#define EDMA_PARM 0x4000 /* PaRAM entries */ 103 104#define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) 105 106#define EDMA_DCHMAP 0x0100 /* 64 registers */ 107 108/* CCCFG register */ 109#define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ 110#define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */ 111#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ 112#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ 113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ 114#define CHMAP_EXIST BIT(24) 115 116/* CCSTAT register */ 117#define EDMA_CCSTAT_ACTV BIT(4) 118 119/* 120 * Max of 20 segments per channel to conserve PaRAM slots 121 * Also note that MAX_NR_SG should be atleast the no.of periods 122 * that are required for ASoC, otherwise DMA prep calls will 123 * fail. Today davinci-pcm is the only user of this driver and 124 * requires atleast 17 slots, so we setup the default to 20. 125 */ 126#define MAX_NR_SG 20 127#define EDMA_MAX_SLOTS MAX_NR_SG 128#define EDMA_DESCRIPTORS 16 129 130#define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */ 131#define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */ 132#define EDMA_CONT_PARAMS_ANY 1001 133#define EDMA_CONT_PARAMS_FIXED_EXACT 1002 134#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003 135 136/* PaRAM slots are laid out like this */ 137struct edmacc_param { 138 u32 opt; 139 u32 src; 140 u32 a_b_cnt; 141 u32 dst; 142 u32 src_dst_bidx; 143 u32 link_bcntrld; 144 u32 src_dst_cidx; 145 u32 ccnt; 146} __packed; 147 148/* fields in edmacc_param.opt */ 149#define SAM BIT(0) 150#define DAM BIT(1) 151#define SYNCDIM BIT(2) 152#define STATIC BIT(3) 153#define EDMA_FWID (0x07 << 8) 154#define TCCMODE BIT(11) 155#define EDMA_TCC(t) ((t) << 12) 156#define TCINTEN BIT(20) 157#define ITCINTEN BIT(21) 158#define TCCHEN BIT(22) 159#define ITCCHEN BIT(23) 160 161struct edma_pset { 162 u32 len; 163 dma_addr_t addr; 164 struct edmacc_param param; 165}; 166 167struct edma_desc { 168 struct virt_dma_desc vdesc; 169 struct list_head node; 170 enum dma_transfer_direction direction; 171 int cyclic; 172 int absync; 173 int pset_nr; 174 struct edma_chan *echan; 175 int processed; 176 177 /* 178 * The following 4 elements are used for residue accounting. 179 * 180 * - processed_stat: the number of SG elements we have traversed 181 * so far to cover accounting. This is updated directly to processed 182 * during edma_callback and is always <= processed, because processed 183 * refers to the number of pending transfer (programmed to EDMA 184 * controller), where as processed_stat tracks number of transfers 185 * accounted for so far. 186 * 187 * - residue: The amount of bytes we have left to transfer for this desc 188 * 189 * - residue_stat: The residue in bytes of data we have covered 190 * so far for accounting. This is updated directly to residue 191 * during callbacks to keep it current. 192 * 193 * - sg_len: Tracks the length of the current intermediate transfer, 194 * this is required to update the residue during intermediate transfer 195 * completion callback. 196 */ 197 int processed_stat; 198 u32 sg_len; 199 u32 residue; 200 u32 residue_stat; 201 202 struct edma_pset pset[0]; 203}; 204 205struct edma_cc; 206 207struct edma_tc { 208 struct device_node *node; 209 u16 id; 210}; 211 212struct edma_chan { 213 struct virt_dma_chan vchan; 214 struct list_head node; 215 struct edma_desc *edesc; 216 struct edma_cc *ecc; 217 struct edma_tc *tc; 218 int ch_num; 219 bool alloced; 220 bool hw_triggered; 221 int slot[EDMA_MAX_SLOTS]; 222 int missed; 223 struct dma_slave_config cfg; 224}; 225 226struct edma_cc { 227 struct device *dev; 228 struct edma_soc_info *info; 229 void __iomem *base; 230 int id; 231 bool legacy_mode; 232 233 /* eDMA3 resource information */ 234 unsigned num_channels; 235 unsigned num_qchannels; 236 unsigned num_region; 237 unsigned num_slots; 238 unsigned num_tc; 239 bool chmap_exist; 240 enum dma_event_q default_queue; 241 242 unsigned int ccint; 243 unsigned int ccerrint; 244 245 /* 246 * The slot_inuse bit for each PaRAM slot is clear unless the slot is 247 * in use by Linux or if it is allocated to be used by DSP. 248 */ 249 unsigned long *slot_inuse; 250 251 struct dma_device dma_slave; 252 struct dma_device *dma_memcpy; 253 struct edma_chan *slave_chans; 254 struct edma_tc *tc_list; 255 int dummy_slot; 256}; 257 258/* dummy param set used to (re)initialize parameter RAM slots */ 259static const struct edmacc_param dummy_paramset = { 260 .link_bcntrld = 0xffff, 261 .ccnt = 1, 262}; 263 264#define EDMA_BINDING_LEGACY 0 265#define EDMA_BINDING_TPCC 1 266static const u32 edma_binding_type[] = { 267 [EDMA_BINDING_LEGACY] = EDMA_BINDING_LEGACY, 268 [EDMA_BINDING_TPCC] = EDMA_BINDING_TPCC, 269}; 270 271static const struct of_device_id edma_of_ids[] = { 272 { 273 .compatible = "ti,edma3", 274 .data = &edma_binding_type[EDMA_BINDING_LEGACY], 275 }, 276 { 277 .compatible = "ti,edma3-tpcc", 278 .data = &edma_binding_type[EDMA_BINDING_TPCC], 279 }, 280 {} 281}; 282MODULE_DEVICE_TABLE(of, edma_of_ids); 283 284static const struct of_device_id edma_tptc_of_ids[] = { 285 { .compatible = "ti,edma3-tptc", }, 286 {} 287}; 288MODULE_DEVICE_TABLE(of, edma_tptc_of_ids); 289 290static inline unsigned int edma_read(struct edma_cc *ecc, int offset) 291{ 292 return (unsigned int)__raw_readl(ecc->base + offset); 293} 294 295static inline void edma_write(struct edma_cc *ecc, int offset, int val) 296{ 297 __raw_writel(val, ecc->base + offset); 298} 299 300static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and, 301 unsigned or) 302{ 303 unsigned val = edma_read(ecc, offset); 304 305 val &= and; 306 val |= or; 307 edma_write(ecc, offset, val); 308} 309 310static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and) 311{ 312 unsigned val = edma_read(ecc, offset); 313 314 val &= and; 315 edma_write(ecc, offset, val); 316} 317 318static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or) 319{ 320 unsigned val = edma_read(ecc, offset); 321 322 val |= or; 323 edma_write(ecc, offset, val); 324} 325 326static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset, 327 int i) 328{ 329 return edma_read(ecc, offset + (i << 2)); 330} 331 332static inline void edma_write_array(struct edma_cc *ecc, int offset, int i, 333 unsigned val) 334{ 335 edma_write(ecc, offset + (i << 2), val); 336} 337 338static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i, 339 unsigned and, unsigned or) 340{ 341 edma_modify(ecc, offset + (i << 2), and, or); 342} 343 344static inline void edma_or_array(struct edma_cc *ecc, int offset, int i, 345 unsigned or) 346{ 347 edma_or(ecc, offset + (i << 2), or); 348} 349 350static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j, 351 unsigned or) 352{ 353 edma_or(ecc, offset + ((i * 2 + j) << 2), or); 354} 355 356static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i, 357 int j, unsigned val) 358{ 359 edma_write(ecc, offset + ((i * 2 + j) << 2), val); 360} 361 362static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset) 363{ 364 return edma_read(ecc, EDMA_SHADOW0 + offset); 365} 366 367static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc, 368 int offset, int i) 369{ 370 return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2)); 371} 372 373static inline void edma_shadow0_write(struct edma_cc *ecc, int offset, 374 unsigned val) 375{ 376 edma_write(ecc, EDMA_SHADOW0 + offset, val); 377} 378 379static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset, 380 int i, unsigned val) 381{ 382 edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val); 383} 384 385static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset, 386 int param_no) 387{ 388 return edma_read(ecc, EDMA_PARM + offset + (param_no << 5)); 389} 390 391static inline void edma_param_write(struct edma_cc *ecc, int offset, 392 int param_no, unsigned val) 393{ 394 edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val); 395} 396 397static inline void edma_param_modify(struct edma_cc *ecc, int offset, 398 int param_no, unsigned and, unsigned or) 399{ 400 edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or); 401} 402 403static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no, 404 unsigned and) 405{ 406 edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and); 407} 408 409static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no, 410 unsigned or) 411{ 412 edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or); 413} 414 415static inline void edma_set_bits(int offset, int len, unsigned long *p) 416{ 417 for (; len > 0; len--) 418 set_bit(offset + (len - 1), p); 419} 420 421static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no, 422 int priority) 423{ 424 int bit = queue_no * 4; 425 426 edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit)); 427} 428 429static void edma_set_chmap(struct edma_chan *echan, int slot) 430{ 431 struct edma_cc *ecc = echan->ecc; 432 int channel = EDMA_CHAN_SLOT(echan->ch_num); 433 434 if (ecc->chmap_exist) { 435 slot = EDMA_CHAN_SLOT(slot); 436 edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5)); 437 } 438} 439 440static void edma_setup_interrupt(struct edma_chan *echan, bool enable) 441{ 442 struct edma_cc *ecc = echan->ecc; 443 int channel = EDMA_CHAN_SLOT(echan->ch_num); 444 445 if (enable) { 446 edma_shadow0_write_array(ecc, SH_ICR, channel >> 5, 447 BIT(channel & 0x1f)); 448 edma_shadow0_write_array(ecc, SH_IESR, channel >> 5, 449 BIT(channel & 0x1f)); 450 } else { 451 edma_shadow0_write_array(ecc, SH_IECR, channel >> 5, 452 BIT(channel & 0x1f)); 453 } 454} 455 456/* 457 * paRAM slot management functions 458 */ 459static void edma_write_slot(struct edma_cc *ecc, unsigned slot, 460 const struct edmacc_param *param) 461{ 462 slot = EDMA_CHAN_SLOT(slot); 463 if (slot >= ecc->num_slots) 464 return; 465 memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE); 466} 467 468static int edma_read_slot(struct edma_cc *ecc, unsigned slot, 469 struct edmacc_param *param) 470{ 471 slot = EDMA_CHAN_SLOT(slot); 472 if (slot >= ecc->num_slots) 473 return -EINVAL; 474 memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE); 475 476 return 0; 477} 478 479/** 480 * edma_alloc_slot - allocate DMA parameter RAM 481 * @ecc: pointer to edma_cc struct 482 * @slot: specific slot to allocate; negative for "any unused slot" 483 * 484 * This allocates a parameter RAM slot, initializing it to hold a 485 * dummy transfer. Slots allocated using this routine have not been 486 * mapped to a hardware DMA channel, and will normally be used by 487 * linking to them from a slot associated with a DMA channel. 488 * 489 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific 490 * slots may be allocated on behalf of DSP firmware. 491 * 492 * Returns the number of the slot, else negative errno. 493 */ 494static int edma_alloc_slot(struct edma_cc *ecc, int slot) 495{ 496 if (slot >= 0) { 497 slot = EDMA_CHAN_SLOT(slot); 498 /* Requesting entry paRAM slot for a HW triggered channel. */ 499 if (ecc->chmap_exist && slot < ecc->num_channels) 500 slot = EDMA_SLOT_ANY; 501 } 502 503 if (slot < 0) { 504 if (ecc->chmap_exist) 505 slot = 0; 506 else 507 slot = ecc->num_channels; 508 for (;;) { 509 slot = find_next_zero_bit(ecc->slot_inuse, 510 ecc->num_slots, 511 slot); 512 if (slot == ecc->num_slots) 513 return -ENOMEM; 514 if (!test_and_set_bit(slot, ecc->slot_inuse)) 515 break; 516 } 517 } else if (slot >= ecc->num_slots) { 518 return -EINVAL; 519 } else if (test_and_set_bit(slot, ecc->slot_inuse)) { 520 return -EBUSY; 521 } 522 523 edma_write_slot(ecc, slot, &dummy_paramset); 524 525 return EDMA_CTLR_CHAN(ecc->id, slot); 526} 527 528static void edma_free_slot(struct edma_cc *ecc, unsigned slot) 529{ 530 slot = EDMA_CHAN_SLOT(slot); 531 if (slot >= ecc->num_slots) 532 return; 533 534 edma_write_slot(ecc, slot, &dummy_paramset); 535 clear_bit(slot, ecc->slot_inuse); 536} 537 538/** 539 * edma_link - link one parameter RAM slot to another 540 * @ecc: pointer to edma_cc struct 541 * @from: parameter RAM slot originating the link 542 * @to: parameter RAM slot which is the link target 543 * 544 * The originating slot should not be part of any active DMA transfer. 545 */ 546static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to) 547{ 548 if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to))) 549 dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n"); 550 551 from = EDMA_CHAN_SLOT(from); 552 to = EDMA_CHAN_SLOT(to); 553 if (from >= ecc->num_slots || to >= ecc->num_slots) 554 return; 555 556 edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000, 557 PARM_OFFSET(to)); 558} 559 560/** 561 * edma_get_position - returns the current transfer point 562 * @ecc: pointer to edma_cc struct 563 * @slot: parameter RAM slot being examined 564 * @dst: true selects the dest position, false the source 565 * 566 * Returns the position of the current active slot 567 */ 568static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot, 569 bool dst) 570{ 571 u32 offs; 572 573 slot = EDMA_CHAN_SLOT(slot); 574 offs = PARM_OFFSET(slot); 575 offs += dst ? PARM_DST : PARM_SRC; 576 577 return edma_read(ecc, offs); 578} 579 580/* 581 * Channels with event associations will be triggered by their hardware 582 * events, and channels without such associations will be triggered by 583 * software. (At this writing there is no interface for using software 584 * triggers except with channels that don't support hardware triggers.) 585 */ 586static void edma_start(struct edma_chan *echan) 587{ 588 struct edma_cc *ecc = echan->ecc; 589 int channel = EDMA_CHAN_SLOT(echan->ch_num); 590 int j = (channel >> 5); 591 unsigned int mask = BIT(channel & 0x1f); 592 593 if (!echan->hw_triggered) { 594 /* EDMA channels without event association */ 595 dev_dbg(ecc->dev, "ESR%d %08x\n", j, 596 edma_shadow0_read_array(ecc, SH_ESR, j)); 597 edma_shadow0_write_array(ecc, SH_ESR, j, mask); 598 } else { 599 /* EDMA channel with event association */ 600 dev_dbg(ecc->dev, "ER%d %08x\n", j, 601 edma_shadow0_read_array(ecc, SH_ER, j)); 602 /* Clear any pending event or error */ 603 edma_write_array(ecc, EDMA_ECR, j, mask); 604 edma_write_array(ecc, EDMA_EMCR, j, mask); 605 /* Clear any SER */ 606 edma_shadow0_write_array(ecc, SH_SECR, j, mask); 607 edma_shadow0_write_array(ecc, SH_EESR, j, mask); 608 dev_dbg(ecc->dev, "EER%d %08x\n", j, 609 edma_shadow0_read_array(ecc, SH_EER, j)); 610 } 611} 612 613static void edma_stop(struct edma_chan *echan) 614{ 615 struct edma_cc *ecc = echan->ecc; 616 int channel = EDMA_CHAN_SLOT(echan->ch_num); 617 int j = (channel >> 5); 618 unsigned int mask = BIT(channel & 0x1f); 619 620 edma_shadow0_write_array(ecc, SH_EECR, j, mask); 621 edma_shadow0_write_array(ecc, SH_ECR, j, mask); 622 edma_shadow0_write_array(ecc, SH_SECR, j, mask); 623 edma_write_array(ecc, EDMA_EMCR, j, mask); 624 625 /* clear possibly pending completion interrupt */ 626 edma_shadow0_write_array(ecc, SH_ICR, j, mask); 627 628 dev_dbg(ecc->dev, "EER%d %08x\n", j, 629 edma_shadow0_read_array(ecc, SH_EER, j)); 630 631 /* REVISIT: consider guarding against inappropriate event 632 * chaining by overwriting with dummy_paramset. 633 */ 634} 635 636/* 637 * Temporarily disable EDMA hardware events on the specified channel, 638 * preventing them from triggering new transfers 639 */ 640static void edma_pause(struct edma_chan *echan) 641{ 642 int channel = EDMA_CHAN_SLOT(echan->ch_num); 643 unsigned int mask = BIT(channel & 0x1f); 644 645 edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask); 646} 647 648/* Re-enable EDMA hardware events on the specified channel. */ 649static void edma_resume(struct edma_chan *echan) 650{ 651 int channel = EDMA_CHAN_SLOT(echan->ch_num); 652 unsigned int mask = BIT(channel & 0x1f); 653 654 edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask); 655} 656 657static void edma_trigger_channel(struct edma_chan *echan) 658{ 659 struct edma_cc *ecc = echan->ecc; 660 int channel = EDMA_CHAN_SLOT(echan->ch_num); 661 unsigned int mask = BIT(channel & 0x1f); 662 663 edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask); 664 665 dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5), 666 edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5))); 667} 668 669static void edma_clean_channel(struct edma_chan *echan) 670{ 671 struct edma_cc *ecc = echan->ecc; 672 int channel = EDMA_CHAN_SLOT(echan->ch_num); 673 int j = (channel >> 5); 674 unsigned int mask = BIT(channel & 0x1f); 675 676 dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j)); 677 edma_shadow0_write_array(ecc, SH_ECR, j, mask); 678 /* Clear the corresponding EMR bits */ 679 edma_write_array(ecc, EDMA_EMCR, j, mask); 680 /* Clear any SER */ 681 edma_shadow0_write_array(ecc, SH_SECR, j, mask); 682 edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0)); 683} 684 685/* Move channel to a specific event queue */ 686static void edma_assign_channel_eventq(struct edma_chan *echan, 687 enum dma_event_q eventq_no) 688{ 689 struct edma_cc *ecc = echan->ecc; 690 int channel = EDMA_CHAN_SLOT(echan->ch_num); 691 int bit = (channel & 0x7) * 4; 692 693 /* default to low priority queue */ 694 if (eventq_no == EVENTQ_DEFAULT) 695 eventq_no = ecc->default_queue; 696 if (eventq_no >= ecc->num_tc) 697 return; 698 699 eventq_no &= 7; 700 edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit), 701 eventq_no << bit); 702} 703 704static int edma_alloc_channel(struct edma_chan *echan, 705 enum dma_event_q eventq_no) 706{ 707 struct edma_cc *ecc = echan->ecc; 708 int channel = EDMA_CHAN_SLOT(echan->ch_num); 709 710 /* ensure access through shadow region 0 */ 711 edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f)); 712 713 /* ensure no events are pending */ 714 edma_stop(echan); 715 716 edma_setup_interrupt(echan, true); 717 718 edma_assign_channel_eventq(echan, eventq_no); 719 720 return 0; 721} 722 723static void edma_free_channel(struct edma_chan *echan) 724{ 725 /* ensure no events are pending */ 726 edma_stop(echan); 727 /* REVISIT should probably take out of shadow region 0 */ 728 edma_setup_interrupt(echan, false); 729} 730 731static inline struct edma_cc *to_edma_cc(struct dma_device *d) 732{ 733 return container_of(d, struct edma_cc, dma_slave); 734} 735 736static inline struct edma_chan *to_edma_chan(struct dma_chan *c) 737{ 738 return container_of(c, struct edma_chan, vchan.chan); 739} 740 741static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx) 742{ 743 return container_of(tx, struct edma_desc, vdesc.tx); 744} 745 746static void edma_desc_free(struct virt_dma_desc *vdesc) 747{ 748 kfree(container_of(vdesc, struct edma_desc, vdesc)); 749} 750 751/* Dispatch a queued descriptor to the controller (caller holds lock) */ 752static void edma_execute(struct edma_chan *echan) 753{ 754 struct edma_cc *ecc = echan->ecc; 755 struct virt_dma_desc *vdesc; 756 struct edma_desc *edesc; 757 struct device *dev = echan->vchan.chan.device->dev; 758 int i, j, left, nslots; 759 760 if (!echan->edesc) { 761 /* Setup is needed for the first transfer */ 762 vdesc = vchan_next_desc(&echan->vchan); 763 if (!vdesc) 764 return; 765 list_del(&vdesc->node); 766 echan->edesc = to_edma_desc(&vdesc->tx); 767 } 768 769 edesc = echan->edesc; 770 771 /* Find out how many left */ 772 left = edesc->pset_nr - edesc->processed; 773 nslots = min(MAX_NR_SG, left); 774 edesc->sg_len = 0; 775 776 /* Write descriptor PaRAM set(s) */ 777 for (i = 0; i < nslots; i++) { 778 j = i + edesc->processed; 779 edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param); 780 edesc->sg_len += edesc->pset[j].len; 781 dev_vdbg(dev, 782 "\n pset[%d]:\n" 783 " chnum\t%d\n" 784 " slot\t%d\n" 785 " opt\t%08x\n" 786 " src\t%08x\n" 787 " dst\t%08x\n" 788 " abcnt\t%08x\n" 789 " ccnt\t%08x\n" 790 " bidx\t%08x\n" 791 " cidx\t%08x\n" 792 " lkrld\t%08x\n", 793 j, echan->ch_num, echan->slot[i], 794 edesc->pset[j].param.opt, 795 edesc->pset[j].param.src, 796 edesc->pset[j].param.dst, 797 edesc->pset[j].param.a_b_cnt, 798 edesc->pset[j].param.ccnt, 799 edesc->pset[j].param.src_dst_bidx, 800 edesc->pset[j].param.src_dst_cidx, 801 edesc->pset[j].param.link_bcntrld); 802 /* Link to the previous slot if not the last set */ 803 if (i != (nslots - 1)) 804 edma_link(ecc, echan->slot[i], echan->slot[i + 1]); 805 } 806 807 edesc->processed += nslots; 808 809 /* 810 * If this is either the last set in a set of SG-list transactions 811 * then setup a link to the dummy slot, this results in all future 812 * events being absorbed and that's OK because we're done 813 */ 814 if (edesc->processed == edesc->pset_nr) { 815 if (edesc->cyclic) 816 edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]); 817 else 818 edma_link(ecc, echan->slot[nslots - 1], 819 echan->ecc->dummy_slot); 820 } 821 822 if (echan->missed) { 823 /* 824 * This happens due to setup times between intermediate 825 * transfers in long SG lists which have to be broken up into 826 * transfers of MAX_NR_SG 827 */ 828 dev_dbg(dev, "missed event on channel %d\n", echan->ch_num); 829 edma_clean_channel(echan); 830 edma_stop(echan); 831 edma_start(echan); 832 edma_trigger_channel(echan); 833 echan->missed = 0; 834 } else if (edesc->processed <= MAX_NR_SG) { 835 dev_dbg(dev, "first transfer starting on channel %d\n", 836 echan->ch_num); 837 edma_start(echan); 838 } else { 839 dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", 840 echan->ch_num, edesc->processed); 841 edma_resume(echan); 842 } 843} 844 845static int edma_terminate_all(struct dma_chan *chan) 846{ 847 struct edma_chan *echan = to_edma_chan(chan); 848 unsigned long flags; 849 LIST_HEAD(head); 850 851 spin_lock_irqsave(&echan->vchan.lock, flags); 852 853 /* 854 * Stop DMA activity: we assume the callback will not be called 855 * after edma_dma() returns (even if it does, it will see 856 * echan->edesc is NULL and exit.) 857 */ 858 if (echan->edesc) { 859 edma_stop(echan); 860 /* Move the cyclic channel back to default queue */ 861 if (!echan->tc && echan->edesc->cyclic) 862 edma_assign_channel_eventq(echan, EVENTQ_DEFAULT); 863 864 vchan_terminate_vdesc(&echan->edesc->vdesc); 865 echan->edesc = NULL; 866 } 867 868 vchan_get_all_descriptors(&echan->vchan, &head); 869 spin_unlock_irqrestore(&echan->vchan.lock, flags); 870 vchan_dma_desc_free_list(&echan->vchan, &head); 871 872 return 0; 873} 874 875static void edma_synchronize(struct dma_chan *chan) 876{ 877 struct edma_chan *echan = to_edma_chan(chan); 878 879 vchan_synchronize(&echan->vchan); 880} 881 882static int edma_slave_config(struct dma_chan *chan, 883 struct dma_slave_config *cfg) 884{ 885 struct edma_chan *echan = to_edma_chan(chan); 886 887 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || 888 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) 889 return -EINVAL; 890 891 if (cfg->src_maxburst > chan->device->max_burst || 892 cfg->dst_maxburst > chan->device->max_burst) 893 return -EINVAL; 894 895 memcpy(&echan->cfg, cfg, sizeof(echan->cfg)); 896 897 return 0; 898} 899 900static int edma_dma_pause(struct dma_chan *chan) 901{ 902 struct edma_chan *echan = to_edma_chan(chan); 903 904 if (!echan->edesc) 905 return -EINVAL; 906 907 edma_pause(echan); 908 return 0; 909} 910 911static int edma_dma_resume(struct dma_chan *chan) 912{ 913 struct edma_chan *echan = to_edma_chan(chan); 914 915 edma_resume(echan); 916 return 0; 917} 918 919/* 920 * A PaRAM set configuration abstraction used by other modes 921 * @chan: Channel who's PaRAM set we're configuring 922 * @pset: PaRAM set to initialize and setup. 923 * @src_addr: Source address of the DMA 924 * @dst_addr: Destination address of the DMA 925 * @burst: In units of dev_width, how much to send 926 * @dev_width: How much is the dev_width 927 * @dma_length: Total length of the DMA transfer 928 * @direction: Direction of the transfer 929 */ 930static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset, 931 dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, 932 unsigned int acnt, unsigned int dma_length, 933 enum dma_transfer_direction direction) 934{ 935 struct edma_chan *echan = to_edma_chan(chan); 936 struct device *dev = chan->device->dev; 937 struct edmacc_param *param = &epset->param; 938 int bcnt, ccnt, cidx; 939 int src_bidx, dst_bidx, src_cidx, dst_cidx; 940 int absync; 941 942 /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */ 943 if (!burst) 944 burst = 1; 945 /* 946 * If the maxburst is equal to the fifo width, use 947 * A-synced transfers. This allows for large contiguous 948 * buffer transfers using only one PaRAM set. 949 */ 950 if (burst == 1) { 951 /* 952 * For the A-sync case, bcnt and ccnt are the remainder 953 * and quotient respectively of the division of: 954 * (dma_length / acnt) by (SZ_64K -1). This is so 955 * that in case bcnt over flows, we have ccnt to use. 956 * Note: In A-sync tranfer only, bcntrld is used, but it 957 * only applies for sg_dma_len(sg) >= SZ_64K. 958 * In this case, the best way adopted is- bccnt for the 959 * first frame will be the remainder below. Then for 960 * every successive frame, bcnt will be SZ_64K-1. This 961 * is assured as bcntrld = 0xffff in end of function. 962 */ 963 absync = false; 964 ccnt = dma_length / acnt / (SZ_64K - 1); 965 bcnt = dma_length / acnt - ccnt * (SZ_64K - 1); 966 /* 967 * If bcnt is non-zero, we have a remainder and hence an 968 * extra frame to transfer, so increment ccnt. 969 */ 970 if (bcnt) 971 ccnt++; 972 else 973 bcnt = SZ_64K - 1; 974 cidx = acnt; 975 } else { 976 /* 977 * If maxburst is greater than the fifo address_width, 978 * use AB-synced transfers where A count is the fifo 979 * address_width and B count is the maxburst. In this 980 * case, we are limited to transfers of C count frames 981 * of (address_width * maxburst) where C count is limited 982 * to SZ_64K-1. This places an upper bound on the length 983 * of an SG segment that can be handled. 984 */ 985 absync = true; 986 bcnt = burst; 987 ccnt = dma_length / (acnt * bcnt); 988 if (ccnt > (SZ_64K - 1)) { 989 dev_err(dev, "Exceeded max SG segment size\n"); 990 return -EINVAL; 991 } 992 cidx = acnt * bcnt; 993 } 994 995 epset->len = dma_length; 996 997 if (direction == DMA_MEM_TO_DEV) { 998 src_bidx = acnt; 999 src_cidx = cidx; 1000 dst_bidx = 0; 1001 dst_cidx = 0; 1002 epset->addr = src_addr; 1003 } else if (direction == DMA_DEV_TO_MEM) { 1004 src_bidx = 0; 1005 src_cidx = 0; 1006 dst_bidx = acnt; 1007 dst_cidx = cidx; 1008 epset->addr = dst_addr; 1009 } else if (direction == DMA_MEM_TO_MEM) { 1010 src_bidx = acnt; 1011 src_cidx = cidx; 1012 dst_bidx = acnt; 1013 dst_cidx = cidx; 1014 } else { 1015 dev_err(dev, "%s: direction not implemented yet\n", __func__); 1016 return -EINVAL; 1017 } 1018 1019 param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); 1020 /* Configure A or AB synchronized transfers */ 1021 if (absync) 1022 param->opt |= SYNCDIM; 1023 1024 param->src = src_addr; 1025 param->dst = dst_addr; 1026 1027 param->src_dst_bidx = (dst_bidx << 16) | src_bidx; 1028 param->src_dst_cidx = (dst_cidx << 16) | src_cidx; 1029 1030 param->a_b_cnt = bcnt << 16 | acnt; 1031 param->ccnt = ccnt; 1032 /* 1033 * Only time when (bcntrld) auto reload is required is for 1034 * A-sync case, and in this case, a requirement of reload value 1035 * of SZ_64K-1 only is assured. 'link' is initially set to NULL 1036 * and then later will be populated by edma_execute. 1037 */ 1038 param->link_bcntrld = 0xffffffff; 1039 return absync; 1040} 1041 1042static struct dma_async_tx_descriptor *edma_prep_slave_sg( 1043 struct dma_chan *chan, struct scatterlist *sgl, 1044 unsigned int sg_len, enum dma_transfer_direction direction, 1045 unsigned long tx_flags, void *context) 1046{ 1047 struct edma_chan *echan = to_edma_chan(chan); 1048 struct device *dev = chan->device->dev; 1049 struct edma_desc *edesc; 1050 dma_addr_t src_addr = 0, dst_addr = 0; 1051 enum dma_slave_buswidth dev_width; 1052 u32 burst; 1053 struct scatterlist *sg; 1054 int i, nslots, ret; 1055 1056 if (unlikely(!echan || !sgl || !sg_len)) 1057 return NULL; 1058 1059 if (direction == DMA_DEV_TO_MEM) { 1060 src_addr = echan->cfg.src_addr; 1061 dev_width = echan->cfg.src_addr_width; 1062 burst = echan->cfg.src_maxburst; 1063 } else if (direction == DMA_MEM_TO_DEV) { 1064 dst_addr = echan->cfg.dst_addr; 1065 dev_width = echan->cfg.dst_addr_width; 1066 burst = echan->cfg.dst_maxburst; 1067 } else { 1068 dev_err(dev, "%s: bad direction: %d\n", __func__, direction); 1069 return NULL; 1070 } 1071 1072 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { 1073 dev_err(dev, "%s: Undefined slave buswidth\n", __func__); 1074 return NULL; 1075 } 1076 1077 edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]), 1078 GFP_ATOMIC); 1079 if (!edesc) 1080 return NULL; 1081 1082 edesc->pset_nr = sg_len; 1083 edesc->residue = 0; 1084 edesc->direction = direction; 1085 edesc->echan = echan; 1086 1087 /* Allocate a PaRAM slot, if needed */ 1088 nslots = min_t(unsigned, MAX_NR_SG, sg_len); 1089 1090 for (i = 0; i < nslots; i++) { 1091 if (echan->slot[i] < 0) { 1092 echan->slot[i] = 1093 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY); 1094 if (echan->slot[i] < 0) { 1095 kfree(edesc); 1096 dev_err(dev, "%s: Failed to allocate slot\n", 1097 __func__); 1098 return NULL; 1099 } 1100 } 1101 } 1102 1103 /* Configure PaRAM sets for each SG */ 1104 for_each_sg(sgl, sg, sg_len, i) { 1105 /* Get address for each SG */ 1106 if (direction == DMA_DEV_TO_MEM) 1107 dst_addr = sg_dma_address(sg); 1108 else 1109 src_addr = sg_dma_address(sg); 1110 1111 ret = edma_config_pset(chan, &edesc->pset[i], src_addr, 1112 dst_addr, burst, dev_width, 1113 sg_dma_len(sg), direction); 1114 if (ret < 0) { 1115 kfree(edesc); 1116 return NULL; 1117 } 1118 1119 edesc->absync = ret; 1120 edesc->residue += sg_dma_len(sg); 1121 1122 if (i == sg_len - 1) 1123 /* Enable completion interrupt */ 1124 edesc->pset[i].param.opt |= TCINTEN; 1125 else if (!((i+1) % MAX_NR_SG)) 1126 /* 1127 * Enable early completion interrupt for the 1128 * intermediateset. In this case the driver will be 1129 * notified when the paRAM set is submitted to TC. This 1130 * will allow more time to set up the next set of slots. 1131 */ 1132 edesc->pset[i].param.opt |= (TCINTEN | TCCMODE); 1133 } 1134 edesc->residue_stat = edesc->residue; 1135 1136 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 1137} 1138 1139static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( 1140 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 1141 size_t len, unsigned long tx_flags) 1142{ 1143 int ret, nslots; 1144 struct edma_desc *edesc; 1145 struct device *dev = chan->device->dev; 1146 struct edma_chan *echan = to_edma_chan(chan); 1147 unsigned int width, pset_len, array_size; 1148 1149 if (unlikely(!echan || !len)) 1150 return NULL; 1151 1152 /* Align the array size (acnt block) with the transfer properties */ 1153 switch (__ffs((src | dest | len))) { 1154 case 0: 1155 array_size = SZ_32K - 1; 1156 break; 1157 case 1: 1158 array_size = SZ_32K - 2; 1159 break; 1160 default: 1161 array_size = SZ_32K - 4; 1162 break; 1163 } 1164 1165 if (len < SZ_64K) { 1166 /* 1167 * Transfer size less than 64K can be handled with one paRAM 1168 * slot and with one burst. 1169 * ACNT = length 1170 */ 1171 width = len; 1172 pset_len = len; 1173 nslots = 1; 1174 } else { 1175 /* 1176 * Transfer size bigger than 64K will be handled with maximum of 1177 * two paRAM slots. 1178 * slot1: (full_length / 32767) times 32767 bytes bursts. 1179 * ACNT = 32767, length1: (full_length / 32767) * 32767 1180 * slot2: the remaining amount of data after slot1. 1181 * ACNT = full_length - length1, length2 = ACNT 1182 * 1183 * When the full_length is multibple of 32767 one slot can be 1184 * used to complete the transfer. 1185 */ 1186 width = array_size; 1187 pset_len = rounddown(len, width); 1188 /* One slot is enough for lengths multiple of (SZ_32K -1) */ 1189 if (unlikely(pset_len == len)) 1190 nslots = 1; 1191 else 1192 nslots = 2; 1193 } 1194 1195 edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), 1196 GFP_ATOMIC); 1197 if (!edesc) 1198 return NULL; 1199 1200 edesc->pset_nr = nslots; 1201 edesc->residue = edesc->residue_stat = len; 1202 edesc->direction = DMA_MEM_TO_MEM; 1203 edesc->echan = echan; 1204 1205 ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1, 1206 width, pset_len, DMA_MEM_TO_MEM); 1207 if (ret < 0) { 1208 kfree(edesc); 1209 return NULL; 1210 } 1211 1212 edesc->absync = ret; 1213 1214 edesc->pset[0].param.opt |= ITCCHEN; 1215 if (nslots == 1) { 1216 /* Enable transfer complete interrupt */ 1217 edesc->pset[0].param.opt |= TCINTEN; 1218 } else { 1219 /* Enable transfer complete chaining for the first slot */ 1220 edesc->pset[0].param.opt |= TCCHEN; 1221 1222 if (echan->slot[1] < 0) { 1223 echan->slot[1] = edma_alloc_slot(echan->ecc, 1224 EDMA_SLOT_ANY); 1225 if (echan->slot[1] < 0) { 1226 kfree(edesc); 1227 dev_err(dev, "%s: Failed to allocate slot\n", 1228 __func__); 1229 return NULL; 1230 } 1231 } 1232 dest += pset_len; 1233 src += pset_len; 1234 pset_len = width = len % array_size; 1235 1236 ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, 1237 width, pset_len, DMA_MEM_TO_MEM); 1238 if (ret < 0) { 1239 kfree(edesc); 1240 return NULL; 1241 } 1242 1243 edesc->pset[1].param.opt |= ITCCHEN; 1244 edesc->pset[1].param.opt |= TCINTEN; 1245 } 1246 1247 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 1248} 1249 1250static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( 1251 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 1252 size_t period_len, enum dma_transfer_direction direction, 1253 unsigned long tx_flags) 1254{ 1255 struct edma_chan *echan = to_edma_chan(chan); 1256 struct device *dev = chan->device->dev; 1257 struct edma_desc *edesc; 1258 dma_addr_t src_addr, dst_addr; 1259 enum dma_slave_buswidth dev_width; 1260 bool use_intermediate = false; 1261 u32 burst; 1262 int i, ret, nslots; 1263 1264 if (unlikely(!echan || !buf_len || !period_len)) 1265 return NULL; 1266 1267 if (direction == DMA_DEV_TO_MEM) { 1268 src_addr = echan->cfg.src_addr; 1269 dst_addr = buf_addr; 1270 dev_width = echan->cfg.src_addr_width; 1271 burst = echan->cfg.src_maxburst; 1272 } else if (direction == DMA_MEM_TO_DEV) { 1273 src_addr = buf_addr; 1274 dst_addr = echan->cfg.dst_addr; 1275 dev_width = echan->cfg.dst_addr_width; 1276 burst = echan->cfg.dst_maxburst; 1277 } else { 1278 dev_err(dev, "%s: bad direction: %d\n", __func__, direction); 1279 return NULL; 1280 } 1281 1282 if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { 1283 dev_err(dev, "%s: Undefined slave buswidth\n", __func__); 1284 return NULL; 1285 } 1286 1287 if (unlikely(buf_len % period_len)) { 1288 dev_err(dev, "Period should be multiple of Buffer length\n"); 1289 return NULL; 1290 } 1291 1292 nslots = (buf_len / period_len) + 1; 1293 1294 /* 1295 * Cyclic DMA users such as audio cannot tolerate delays introduced 1296 * by cases where the number of periods is more than the maximum 1297 * number of SGs the EDMA driver can handle at a time. For DMA types 1298 * such as Slave SGs, such delays are tolerable and synchronized, 1299 * but the synchronization is difficult to achieve with Cyclic and 1300 * cannot be guaranteed, so we error out early. 1301 */ 1302 if (nslots > MAX_NR_SG) { 1303 /* 1304 * If the burst and period sizes are the same, we can put 1305 * the full buffer into a single period and activate 1306 * intermediate interrupts. This will produce interrupts 1307 * after each burst, which is also after each desired period. 1308 */ 1309 if (burst == period_len) { 1310 period_len = buf_len; 1311 nslots = 2; 1312 use_intermediate = true; 1313 } else { 1314 return NULL; 1315 } 1316 } 1317 1318 edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), 1319 GFP_ATOMIC); 1320 if (!edesc) 1321 return NULL; 1322 1323 edesc->cyclic = 1; 1324 edesc->pset_nr = nslots; 1325 edesc->residue = edesc->residue_stat = buf_len; 1326 edesc->direction = direction; 1327 edesc->echan = echan; 1328 1329 dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n", 1330 __func__, echan->ch_num, nslots, period_len, buf_len); 1331 1332 for (i = 0; i < nslots; i++) { 1333 /* Allocate a PaRAM slot, if needed */ 1334 if (echan->slot[i] < 0) { 1335 echan->slot[i] = 1336 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY); 1337 if (echan->slot[i] < 0) { 1338 kfree(edesc); 1339 dev_err(dev, "%s: Failed to allocate slot\n", 1340 __func__); 1341 return NULL; 1342 } 1343 } 1344 1345 if (i == nslots - 1) { 1346 memcpy(&edesc->pset[i], &edesc->pset[0], 1347 sizeof(edesc->pset[0])); 1348 break; 1349 } 1350 1351 ret = edma_config_pset(chan, &edesc->pset[i], src_addr, 1352 dst_addr, burst, dev_width, period_len, 1353 direction); 1354 if (ret < 0) { 1355 kfree(edesc); 1356 return NULL; 1357 } 1358 1359 if (direction == DMA_DEV_TO_MEM) 1360 dst_addr += period_len; 1361 else 1362 src_addr += period_len; 1363 1364 dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i); 1365 dev_vdbg(dev, 1366 "\n pset[%d]:\n" 1367 " chnum\t%d\n" 1368 " slot\t%d\n" 1369 " opt\t%08x\n" 1370 " src\t%08x\n" 1371 " dst\t%08x\n" 1372 " abcnt\t%08x\n" 1373 " ccnt\t%08x\n" 1374 " bidx\t%08x\n" 1375 " cidx\t%08x\n" 1376 " lkrld\t%08x\n", 1377 i, echan->ch_num, echan->slot[i], 1378 edesc->pset[i].param.opt, 1379 edesc->pset[i].param.src, 1380 edesc->pset[i].param.dst, 1381 edesc->pset[i].param.a_b_cnt, 1382 edesc->pset[i].param.ccnt, 1383 edesc->pset[i].param.src_dst_bidx, 1384 edesc->pset[i].param.src_dst_cidx, 1385 edesc->pset[i].param.link_bcntrld); 1386 1387 edesc->absync = ret; 1388 1389 /* 1390 * Enable period interrupt only if it is requested 1391 */ 1392 if (tx_flags & DMA_PREP_INTERRUPT) { 1393 edesc->pset[i].param.opt |= TCINTEN; 1394 1395 /* Also enable intermediate interrupts if necessary */ 1396 if (use_intermediate) 1397 edesc->pset[i].param.opt |= ITCINTEN; 1398 } 1399 } 1400 1401 /* Place the cyclic channel to highest priority queue */ 1402 if (!echan->tc) 1403 edma_assign_channel_eventq(echan, EVENTQ_0); 1404 1405 return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 1406} 1407 1408static void edma_completion_handler(struct edma_chan *echan) 1409{ 1410 struct device *dev = echan->vchan.chan.device->dev; 1411 struct edma_desc *edesc; 1412 1413 spin_lock(&echan->vchan.lock); 1414 edesc = echan->edesc; 1415 if (edesc) { 1416 if (edesc->cyclic) { 1417 vchan_cyclic_callback(&edesc->vdesc); 1418 spin_unlock(&echan->vchan.lock); 1419 return; 1420 } else if (edesc->processed == edesc->pset_nr) { 1421 edesc->residue = 0; 1422 edma_stop(echan); 1423 vchan_cookie_complete(&edesc->vdesc); 1424 echan->edesc = NULL; 1425 1426 dev_dbg(dev, "Transfer completed on channel %d\n", 1427 echan->ch_num); 1428 } else { 1429 dev_dbg(dev, "Sub transfer completed on channel %d\n", 1430 echan->ch_num); 1431 1432 edma_pause(echan); 1433 1434 /* Update statistics for tx_status */ 1435 edesc->residue -= edesc->sg_len; 1436 edesc->residue_stat = edesc->residue; 1437 edesc->processed_stat = edesc->processed; 1438 } 1439 edma_execute(echan); 1440 } 1441 1442 spin_unlock(&echan->vchan.lock); 1443} 1444 1445/* eDMA interrupt handler */ 1446static irqreturn_t dma_irq_handler(int irq, void *data) 1447{ 1448 struct edma_cc *ecc = data; 1449 int ctlr; 1450 u32 sh_ier; 1451 u32 sh_ipr; 1452 u32 bank; 1453 1454 ctlr = ecc->id; 1455 if (ctlr < 0) 1456 return IRQ_NONE; 1457 1458 dev_vdbg(ecc->dev, "dma_irq_handler\n"); 1459 1460 sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0); 1461 if (!sh_ipr) { 1462 sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1); 1463 if (!sh_ipr) 1464 return IRQ_NONE; 1465 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1); 1466 bank = 1; 1467 } else { 1468 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0); 1469 bank = 0; 1470 } 1471 1472 do { 1473 u32 slot; 1474 u32 channel; 1475 1476 slot = __ffs(sh_ipr); 1477 sh_ipr &= ~(BIT(slot)); 1478 1479 if (sh_ier & BIT(slot)) { 1480 channel = (bank << 5) | slot; 1481 /* Clear the corresponding IPR bits */ 1482 edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot)); 1483 edma_completion_handler(&ecc->slave_chans[channel]); 1484 } 1485 } while (sh_ipr); 1486 1487 edma_shadow0_write(ecc, SH_IEVAL, 1); 1488 return IRQ_HANDLED; 1489} 1490 1491static void edma_error_handler(struct edma_chan *echan) 1492{ 1493 struct edma_cc *ecc = echan->ecc; 1494 struct device *dev = echan->vchan.chan.device->dev; 1495 struct edmacc_param p; 1496 int err; 1497 1498 if (!echan->edesc) 1499 return; 1500 1501 spin_lock(&echan->vchan.lock); 1502 1503 err = edma_read_slot(ecc, echan->slot[0], &p); 1504 1505 /* 1506 * Issue later based on missed flag which will be sure 1507 * to happen as: 1508 * (1) we finished transmitting an intermediate slot and 1509 * edma_execute is coming up. 1510 * (2) or we finished current transfer and issue will 1511 * call edma_execute. 1512 * 1513 * Important note: issuing can be dangerous here and 1514 * lead to some nasty recursion when we are in a NULL 1515 * slot. So we avoid doing so and set the missed flag. 1516 */ 1517 if (err || (p.a_b_cnt == 0 && p.ccnt == 0)) { 1518 dev_dbg(dev, "Error on null slot, setting miss\n"); 1519 echan->missed = 1; 1520 } else { 1521 /* 1522 * The slot is already programmed but the event got 1523 * missed, so its safe to issue it here. 1524 */ 1525 dev_dbg(dev, "Missed event, TRIGGERING\n"); 1526 edma_clean_channel(echan); 1527 edma_stop(echan); 1528 edma_start(echan); 1529 edma_trigger_channel(echan); 1530 } 1531 spin_unlock(&echan->vchan.lock); 1532} 1533 1534static inline bool edma_error_pending(struct edma_cc *ecc) 1535{ 1536 if (edma_read_array(ecc, EDMA_EMR, 0) || 1537 edma_read_array(ecc, EDMA_EMR, 1) || 1538 edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR)) 1539 return true; 1540 1541 return false; 1542} 1543 1544/* eDMA error interrupt handler */ 1545static irqreturn_t dma_ccerr_handler(int irq, void *data) 1546{ 1547 struct edma_cc *ecc = data; 1548 int i, j; 1549 int ctlr; 1550 unsigned int cnt = 0; 1551 unsigned int val; 1552 1553 ctlr = ecc->id; 1554 if (ctlr < 0) 1555 return IRQ_NONE; 1556 1557 dev_vdbg(ecc->dev, "dma_ccerr_handler\n"); 1558 1559 if (!edma_error_pending(ecc)) { 1560 /* 1561 * The registers indicate no pending error event but the irq 1562 * handler has been called. 1563 * Ask eDMA to re-evaluate the error registers. 1564 */ 1565 dev_err(ecc->dev, "%s: Error interrupt without error event!\n", 1566 __func__); 1567 edma_write(ecc, EDMA_EEVAL, 1); 1568 return IRQ_NONE; 1569 } 1570 1571 while (1) { 1572 /* Event missed register(s) */ 1573 for (j = 0; j < 2; j++) { 1574 unsigned long emr; 1575 1576 val = edma_read_array(ecc, EDMA_EMR, j); 1577 if (!val) 1578 continue; 1579 1580 dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val); 1581 emr = val; 1582 for (i = find_next_bit(&emr, 32, 0); i < 32; 1583 i = find_next_bit(&emr, 32, i + 1)) { 1584 int k = (j << 5) + i; 1585 1586 /* Clear the corresponding EMR bits */ 1587 edma_write_array(ecc, EDMA_EMCR, j, BIT(i)); 1588 /* Clear any SER */ 1589 edma_shadow0_write_array(ecc, SH_SECR, j, 1590 BIT(i)); 1591 edma_error_handler(&ecc->slave_chans[k]); 1592 } 1593 } 1594 1595 val = edma_read(ecc, EDMA_QEMR); 1596 if (val) { 1597 dev_dbg(ecc->dev, "QEMR 0x%02x\n", val); 1598 /* Not reported, just clear the interrupt reason. */ 1599 edma_write(ecc, EDMA_QEMCR, val); 1600 edma_shadow0_write(ecc, SH_QSECR, val); 1601 } 1602 1603 val = edma_read(ecc, EDMA_CCERR); 1604 if (val) { 1605 dev_warn(ecc->dev, "CCERR 0x%08x\n", val); 1606 /* Not reported, just clear the interrupt reason. */ 1607 edma_write(ecc, EDMA_CCERRCLR, val); 1608 } 1609 1610 if (!edma_error_pending(ecc)) 1611 break; 1612 cnt++; 1613 if (cnt > 10) 1614 break; 1615 } 1616 edma_write(ecc, EDMA_EEVAL, 1); 1617 return IRQ_HANDLED; 1618} 1619 1620/* Alloc channel resources */ 1621static int edma_alloc_chan_resources(struct dma_chan *chan) 1622{ 1623 struct edma_chan *echan = to_edma_chan(chan); 1624 struct edma_cc *ecc = echan->ecc; 1625 struct device *dev = ecc->dev; 1626 enum dma_event_q eventq_no = EVENTQ_DEFAULT; 1627 int ret; 1628 1629 if (echan->tc) { 1630 eventq_no = echan->tc->id; 1631 } else if (ecc->tc_list) { 1632 /* memcpy channel */ 1633 echan->tc = &ecc->tc_list[ecc->info->default_queue]; 1634 eventq_no = echan->tc->id; 1635 } 1636 1637 ret = edma_alloc_channel(echan, eventq_no); 1638 if (ret) 1639 return ret; 1640 1641 echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num); 1642 if (echan->slot[0] < 0) { 1643 dev_err(dev, "Entry slot allocation failed for channel %u\n", 1644 EDMA_CHAN_SLOT(echan->ch_num)); 1645 ret = echan->slot[0]; 1646 goto err_slot; 1647 } 1648 1649 /* Set up channel -> slot mapping for the entry slot */ 1650 edma_set_chmap(echan, echan->slot[0]); 1651 echan->alloced = true; 1652 1653 dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n", 1654 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id, 1655 echan->hw_triggered ? "HW" : "SW"); 1656 1657 return 0; 1658 1659err_slot: 1660 edma_free_channel(echan); 1661 return ret; 1662} 1663 1664/* Free channel resources */ 1665static void edma_free_chan_resources(struct dma_chan *chan) 1666{ 1667 struct edma_chan *echan = to_edma_chan(chan); 1668 struct device *dev = echan->ecc->dev; 1669 int i; 1670 1671 /* Terminate transfers */ 1672 edma_stop(echan); 1673 1674 vchan_free_chan_resources(&echan->vchan); 1675 1676 /* Free EDMA PaRAM slots */ 1677 for (i = 0; i < EDMA_MAX_SLOTS; i++) { 1678 if (echan->slot[i] >= 0) { 1679 edma_free_slot(echan->ecc, echan->slot[i]); 1680 echan->slot[i] = -1; 1681 } 1682 } 1683 1684 /* Set entry slot to the dummy slot */ 1685 edma_set_chmap(echan, echan->ecc->dummy_slot); 1686 1687 /* Free EDMA channel */ 1688 if (echan->alloced) { 1689 edma_free_channel(echan); 1690 echan->alloced = false; 1691 } 1692 1693 echan->tc = NULL; 1694 echan->hw_triggered = false; 1695 1696 dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n", 1697 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id); 1698} 1699 1700/* Send pending descriptor to hardware */ 1701static void edma_issue_pending(struct dma_chan *chan) 1702{ 1703 struct edma_chan *echan = to_edma_chan(chan); 1704 unsigned long flags; 1705 1706 spin_lock_irqsave(&echan->vchan.lock, flags); 1707 if (vchan_issue_pending(&echan->vchan) && !echan->edesc) 1708 edma_execute(echan); 1709 spin_unlock_irqrestore(&echan->vchan.lock, flags); 1710} 1711 1712/* 1713 * This limit exists to avoid a possible infinite loop when waiting for proof 1714 * that a particular transfer is completed. This limit can be hit if there 1715 * are large bursts to/from slow devices or the CPU is never able to catch 1716 * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART 1717 * RX-FIFO, as many as 55 loops have been seen. 1718 */ 1719#define EDMA_MAX_TR_WAIT_LOOPS 1000 1720 1721static u32 edma_residue(struct edma_desc *edesc) 1722{ 1723 bool dst = edesc->direction == DMA_DEV_TO_MEM; 1724 int loop_count = EDMA_MAX_TR_WAIT_LOOPS; 1725 struct edma_chan *echan = edesc->echan; 1726 struct edma_pset *pset = edesc->pset; 1727 dma_addr_t done, pos; 1728 int i; 1729 1730 /* 1731 * We always read the dst/src position from the first RamPar 1732 * pset. That's the one which is active now. 1733 */ 1734 pos = edma_get_position(echan->ecc, echan->slot[0], dst); 1735 1736 /* 1737 * "pos" may represent a transfer request that is still being 1738 * processed by the EDMACC or EDMATC. We will busy wait until 1739 * any one of the situations occurs: 1740 * 1. the DMA hardware is idle 1741 * 2. a new transfer request is setup 1742 * 3. we hit the loop limit 1743 */ 1744 while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) { 1745 /* check if a new transfer request is setup */ 1746 if (edma_get_position(echan->ecc, 1747 echan->slot[0], dst) != pos) { 1748 break; 1749 } 1750 1751 if (!--loop_count) { 1752 dev_dbg_ratelimited(echan->vchan.chan.device->dev, 1753 "%s: timeout waiting for PaRAM update\n", 1754 __func__); 1755 break; 1756 } 1757 1758 cpu_relax(); 1759 } 1760 1761 /* 1762 * Cyclic is simple. Just subtract pset[0].addr from pos. 1763 * 1764 * We never update edesc->residue in the cyclic case, so we 1765 * can tell the remaining room to the end of the circular 1766 * buffer. 1767 */ 1768 if (edesc->cyclic) { 1769 done = pos - pset->addr; 1770 edesc->residue_stat = edesc->residue - done; 1771 return edesc->residue_stat; 1772 } 1773 1774 /* 1775 * For SG operation we catch up with the last processed 1776 * status. 1777 */ 1778 pset += edesc->processed_stat; 1779 1780 for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) { 1781 /* 1782 * If we are inside this pset address range, we know 1783 * this is the active one. Get the current delta and 1784 * stop walking the psets. 1785 */ 1786 if (pos >= pset->addr && pos < pset->addr + pset->len) 1787 return edesc->residue_stat - (pos - pset->addr); 1788 1789 /* Otherwise mark it done and update residue_stat. */ 1790 edesc->processed_stat++; 1791 edesc->residue_stat -= pset->len; 1792 } 1793 return edesc->residue_stat; 1794} 1795 1796/* Check request completion status */ 1797static enum dma_status edma_tx_status(struct dma_chan *chan, 1798 dma_cookie_t cookie, 1799 struct dma_tx_state *txstate) 1800{ 1801 struct edma_chan *echan = to_edma_chan(chan); 1802 struct virt_dma_desc *vdesc; 1803 enum dma_status ret; 1804 unsigned long flags; 1805 1806 ret = dma_cookie_status(chan, cookie, txstate); 1807 if (ret == DMA_COMPLETE || !txstate) 1808 return ret; 1809 1810 spin_lock_irqsave(&echan->vchan.lock, flags); 1811 if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) 1812 txstate->residue = edma_residue(echan->edesc); 1813 else if ((vdesc = vchan_find_desc(&echan->vchan, cookie))) 1814 txstate->residue = to_edma_desc(&vdesc->tx)->residue; 1815 spin_unlock_irqrestore(&echan->vchan.lock, flags); 1816 1817 return ret; 1818} 1819 1820static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels) 1821{ 1822 if (!memcpy_channels) 1823 return false; 1824 while (*memcpy_channels != -1) { 1825 if (*memcpy_channels == ch_num) 1826 return true; 1827 memcpy_channels++; 1828 } 1829 return false; 1830} 1831 1832#define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 1833 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 1834 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ 1835 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 1836 1837static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) 1838{ 1839 struct dma_device *s_ddev = &ecc->dma_slave; 1840 struct dma_device *m_ddev = NULL; 1841 s32 *memcpy_channels = ecc->info->memcpy_channels; 1842 int i, j; 1843 1844 dma_cap_zero(s_ddev->cap_mask); 1845 dma_cap_set(DMA_SLAVE, s_ddev->cap_mask); 1846 dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask); 1847 if (ecc->legacy_mode && !memcpy_channels) { 1848 dev_warn(ecc->dev, 1849 "Legacy memcpy is enabled, things might not work\n"); 1850 1851 dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask); 1852 s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; 1853 s_ddev->directions = BIT(DMA_MEM_TO_MEM); 1854 } 1855 1856 s_ddev->device_prep_slave_sg = edma_prep_slave_sg; 1857 s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic; 1858 s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; 1859 s_ddev->device_free_chan_resources = edma_free_chan_resources; 1860 s_ddev->device_issue_pending = edma_issue_pending; 1861 s_ddev->device_tx_status = edma_tx_status; 1862 s_ddev->device_config = edma_slave_config; 1863 s_ddev->device_pause = edma_dma_pause; 1864 s_ddev->device_resume = edma_dma_resume; 1865 s_ddev->device_terminate_all = edma_terminate_all; 1866 s_ddev->device_synchronize = edma_synchronize; 1867 1868 s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; 1869 s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; 1870 s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV)); 1871 s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1872 s_ddev->max_burst = SZ_32K - 1; /* CIDX: 16bit signed */ 1873 1874 s_ddev->dev = ecc->dev; 1875 INIT_LIST_HEAD(&s_ddev->channels); 1876 1877 if (memcpy_channels) { 1878 m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL); 1879 ecc->dma_memcpy = m_ddev; 1880 1881 dma_cap_zero(m_ddev->cap_mask); 1882 dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask); 1883 1884 m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; 1885 m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; 1886 m_ddev->device_free_chan_resources = edma_free_chan_resources; 1887 m_ddev->device_issue_pending = edma_issue_pending; 1888 m_ddev->device_tx_status = edma_tx_status; 1889 m_ddev->device_config = edma_slave_config; 1890 m_ddev->device_pause = edma_dma_pause; 1891 m_ddev->device_resume = edma_dma_resume; 1892 m_ddev->device_terminate_all = edma_terminate_all; 1893 m_ddev->device_synchronize = edma_synchronize; 1894 1895 m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; 1896 m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; 1897 m_ddev->directions = BIT(DMA_MEM_TO_MEM); 1898 m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1899 1900 m_ddev->dev = ecc->dev; 1901 INIT_LIST_HEAD(&m_ddev->channels); 1902 } else if (!ecc->legacy_mode) { 1903 dev_info(ecc->dev, "memcpy is disabled\n"); 1904 } 1905 1906 for (i = 0; i < ecc->num_channels; i++) { 1907 struct edma_chan *echan = &ecc->slave_chans[i]; 1908 echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i); 1909 echan->ecc = ecc; 1910 echan->vchan.desc_free = edma_desc_free; 1911 1912 if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels)) 1913 vchan_init(&echan->vchan, m_ddev); 1914 else 1915 vchan_init(&echan->vchan, s_ddev); 1916 1917 INIT_LIST_HEAD(&echan->node); 1918 for (j = 0; j < EDMA_MAX_SLOTS; j++) 1919 echan->slot[j] = -1; 1920 } 1921} 1922 1923static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, 1924 struct edma_cc *ecc) 1925{ 1926 int i; 1927 u32 value, cccfg; 1928 s8 (*queue_priority_map)[2]; 1929 1930 /* Decode the eDMA3 configuration from CCCFG register */ 1931 cccfg = edma_read(ecc, EDMA_CCCFG); 1932 1933 value = GET_NUM_REGN(cccfg); 1934 ecc->num_region = BIT(value); 1935 1936 value = GET_NUM_DMACH(cccfg); 1937 ecc->num_channels = BIT(value + 1); 1938 1939 value = GET_NUM_QDMACH(cccfg); 1940 ecc->num_qchannels = value * 2; 1941 1942 value = GET_NUM_PAENTRY(cccfg); 1943 ecc->num_slots = BIT(value + 4); 1944 1945 value = GET_NUM_EVQUE(cccfg); 1946 ecc->num_tc = value + 1; 1947 1948 ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false; 1949 1950 dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg); 1951 dev_dbg(dev, "num_region: %u\n", ecc->num_region); 1952 dev_dbg(dev, "num_channels: %u\n", ecc->num_channels); 1953 dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels); 1954 dev_dbg(dev, "num_slots: %u\n", ecc->num_slots); 1955 dev_dbg(dev, "num_tc: %u\n", ecc->num_tc); 1956 dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no"); 1957 1958 /* Nothing need to be done if queue priority is provided */ 1959 if (pdata->queue_priority_mapping) 1960 return 0; 1961 1962 /* 1963 * Configure TC/queue priority as follows: 1964 * Q0 - priority 0 1965 * Q1 - priority 1 1966 * Q2 - priority 2 1967 * ... 1968 * The meaning of priority numbers: 0 highest priority, 7 lowest 1969 * priority. So Q0 is the highest priority queue and the last queue has 1970 * the lowest priority. 1971 */ 1972 queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8), 1973 GFP_KERNEL); 1974 if (!queue_priority_map) 1975 return -ENOMEM; 1976 1977 for (i = 0; i < ecc->num_tc; i++) { 1978 queue_priority_map[i][0] = i; 1979 queue_priority_map[i][1] = i; 1980 } 1981 queue_priority_map[i][0] = -1; 1982 queue_priority_map[i][1] = -1; 1983 1984 pdata->queue_priority_mapping = queue_priority_map; 1985 /* Default queue has the lowest priority */ 1986 pdata->default_queue = i - 1; 1987 1988 return 0; 1989} 1990 1991#if IS_ENABLED(CONFIG_OF) 1992static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata, 1993 size_t sz) 1994{ 1995 const char pname[] = "ti,edma-xbar-event-map"; 1996 struct resource res; 1997 void __iomem *xbar; 1998 s16 (*xbar_chans)[2]; 1999 size_t nelm = sz / sizeof(s16); 2000 u32 shift, offset, mux; 2001 int ret, i; 2002 2003 xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL); 2004 if (!xbar_chans) 2005 return -ENOMEM; 2006 2007 ret = of_address_to_resource(dev->of_node, 1, &res); 2008 if (ret) 2009 return -ENOMEM; 2010 2011 xbar = devm_ioremap(dev, res.start, resource_size(&res)); 2012 if (!xbar) 2013 return -ENOMEM; 2014 2015 ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans, 2016 nelm); 2017 if (ret) 2018 return -EIO; 2019 2020 /* Invalidate last entry for the other user of this mess */ 2021 nelm >>= 1; 2022 xbar_chans[nelm][0] = -1; 2023 xbar_chans[nelm][1] = -1; 2024 2025 for (i = 0; i < nelm; i++) { 2026 shift = (xbar_chans[i][1] & 0x03) << 3; 2027 offset = xbar_chans[i][1] & 0xfffffffc; 2028 mux = readl(xbar + offset); 2029 mux &= ~(0xff << shift); 2030 mux |= xbar_chans[i][0] << shift; 2031 writel(mux, (xbar + offset)); 2032 } 2033 2034 pdata->xbar_chans = (const s16 (*)[2]) xbar_chans; 2035 return 0; 2036} 2037 2038static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, 2039 bool legacy_mode) 2040{ 2041 struct edma_soc_info *info; 2042 struct property *prop; 2043 int sz, ret; 2044 2045 info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL); 2046 if (!info) 2047 return ERR_PTR(-ENOMEM); 2048 2049 if (legacy_mode) { 2050 prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map", 2051 &sz); 2052 if (prop) { 2053 ret = edma_xbar_event_map(dev, info, sz); 2054 if (ret) 2055 return ERR_PTR(ret); 2056 } 2057 return info; 2058 } 2059 2060 /* Get the list of channels allocated to be used for memcpy */ 2061 prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz); 2062 if (prop) { 2063 const char pname[] = "ti,edma-memcpy-channels"; 2064 size_t nelm = sz / sizeof(s32); 2065 s32 *memcpy_ch; 2066 2067 memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32), 2068 GFP_KERNEL); 2069 if (!memcpy_ch) 2070 return ERR_PTR(-ENOMEM); 2071 2072 ret = of_property_read_u32_array(dev->of_node, pname, 2073 (u32 *)memcpy_ch, nelm); 2074 if (ret) 2075 return ERR_PTR(ret); 2076 2077 memcpy_ch[nelm] = -1; 2078 info->memcpy_channels = memcpy_ch; 2079 } 2080 2081 prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges", 2082 &sz); 2083 if (prop) { 2084 const char pname[] = "ti,edma-reserved-slot-ranges"; 2085 u32 (*tmp)[2]; 2086 s16 (*rsv_slots)[2]; 2087 size_t nelm = sz / sizeof(*tmp); 2088 struct edma_rsv_info *rsv_info; 2089 int i; 2090 2091 if (!nelm) 2092 return info; 2093 2094 tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL); 2095 if (!tmp) 2096 return ERR_PTR(-ENOMEM); 2097 2098 rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL); 2099 if (!rsv_info) { 2100 kfree(tmp); 2101 return ERR_PTR(-ENOMEM); 2102 } 2103 2104 rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots), 2105 GFP_KERNEL); 2106 if (!rsv_slots) { 2107 kfree(tmp); 2108 return ERR_PTR(-ENOMEM); 2109 } 2110 2111 ret = of_property_read_u32_array(dev->of_node, pname, 2112 (u32 *)tmp, nelm * 2); 2113 if (ret) { 2114 kfree(tmp); 2115 return ERR_PTR(ret); 2116 } 2117 2118 for (i = 0; i < nelm; i++) { 2119 rsv_slots[i][0] = tmp[i][0]; 2120 rsv_slots[i][1] = tmp[i][1]; 2121 } 2122 rsv_slots[nelm][0] = -1; 2123 rsv_slots[nelm][1] = -1; 2124 2125 info->rsv = rsv_info; 2126 info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots; 2127 2128 kfree(tmp); 2129 } 2130 2131 return info; 2132} 2133 2134static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec, 2135 struct of_dma *ofdma) 2136{ 2137 struct edma_cc *ecc = ofdma->of_dma_data; 2138 struct dma_chan *chan = NULL; 2139 struct edma_chan *echan; 2140 int i; 2141 2142 if (!ecc || dma_spec->args_count < 1) 2143 return NULL; 2144 2145 for (i = 0; i < ecc->num_channels; i++) { 2146 echan = &ecc->slave_chans[i]; 2147 if (echan->ch_num == dma_spec->args[0]) { 2148 chan = &echan->vchan.chan; 2149 break; 2150 } 2151 } 2152 2153 if (!chan) 2154 return NULL; 2155 2156 if (echan->ecc->legacy_mode && dma_spec->args_count == 1) 2157 goto out; 2158 2159 if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 && 2160 dma_spec->args[1] < echan->ecc->num_tc) { 2161 echan->tc = &echan->ecc->tc_list[dma_spec->args[1]]; 2162 goto out; 2163 } 2164 2165 return NULL; 2166out: 2167 /* The channel is going to be used as HW synchronized */ 2168 echan->hw_triggered = true; 2169 return dma_get_slave_channel(chan); 2170} 2171#else 2172static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, 2173 bool legacy_mode) 2174{ 2175 return ERR_PTR(-EINVAL); 2176} 2177 2178static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec, 2179 struct of_dma *ofdma) 2180{ 2181 return NULL; 2182} 2183#endif 2184 2185static int edma_probe(struct platform_device *pdev) 2186{ 2187 struct edma_soc_info *info = pdev->dev.platform_data; 2188 s8 (*queue_priority_mapping)[2]; 2189 int i, off, ln; 2190 const s16 (*rsv_slots)[2]; 2191 const s16 (*xbar_chans)[2]; 2192 int irq; 2193 char *irq_name; 2194 struct resource *mem; 2195 struct device_node *node = pdev->dev.of_node; 2196 struct device *dev = &pdev->dev; 2197 struct edma_cc *ecc; 2198 bool legacy_mode = true; 2199 int ret; 2200 2201 if (node) { 2202 const struct of_device_id *match; 2203 2204 match = of_match_node(edma_of_ids, node); 2205 if (match && (*(u32 *)match->data) == EDMA_BINDING_TPCC) 2206 legacy_mode = false; 2207 2208 info = edma_setup_info_from_dt(dev, legacy_mode); 2209 if (IS_ERR(info)) { 2210 dev_err(dev, "failed to get DT data\n"); 2211 return PTR_ERR(info); 2212 } 2213 } 2214 2215 if (!info) 2216 return -ENODEV; 2217 2218 pm_runtime_enable(dev); 2219 ret = pm_runtime_get_sync(dev); 2220 if (ret < 0) { 2221 dev_err(dev, "pm_runtime_get_sync() failed\n"); 2222 return ret; 2223 } 2224 2225 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 2226 if (ret) 2227 return ret; 2228 2229 ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); 2230 if (!ecc) 2231 return -ENOMEM; 2232 2233 ecc->dev = dev; 2234 ecc->id = pdev->id; 2235 ecc->legacy_mode = legacy_mode; 2236 /* When booting with DT the pdev->id is -1 */ 2237 if (ecc->id < 0) 2238 ecc->id = 0; 2239 2240 mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc"); 2241 if (!mem) { 2242 dev_dbg(dev, "mem resource not found, using index 0\n"); 2243 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2244 if (!mem) { 2245 dev_err(dev, "no mem resource?\n"); 2246 return -ENODEV; 2247 } 2248 } 2249 ecc->base = devm_ioremap_resource(dev, mem); 2250 if (IS_ERR(ecc->base)) 2251 return PTR_ERR(ecc->base); 2252 2253 platform_set_drvdata(pdev, ecc); 2254 2255 /* Get eDMA3 configuration from IP */ 2256 ret = edma_setup_from_hw(dev, info, ecc); 2257 if (ret) 2258 return ret; 2259 2260 /* Allocate memory based on the information we got from the IP */ 2261 ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels, 2262 sizeof(*ecc->slave_chans), GFP_KERNEL); 2263 if (!ecc->slave_chans) 2264 return -ENOMEM; 2265 2266 ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots), 2267 sizeof(unsigned long), GFP_KERNEL); 2268 if (!ecc->slot_inuse) 2269 return -ENOMEM; 2270 2271 ecc->default_queue = info->default_queue; 2272 2273 for (i = 0; i < ecc->num_slots; i++) 2274 edma_write_slot(ecc, i, &dummy_paramset); 2275 2276 if (info->rsv) { 2277 /* Set the reserved slots in inuse list */ 2278 rsv_slots = info->rsv->rsv_slots; 2279 if (rsv_slots) { 2280 for (i = 0; rsv_slots[i][0] != -1; i++) { 2281 off = rsv_slots[i][0]; 2282 ln = rsv_slots[i][1]; 2283 edma_set_bits(off, ln, ecc->slot_inuse); 2284 } 2285 } 2286 } 2287 2288 /* Clear the xbar mapped channels in unused list */ 2289 xbar_chans = info->xbar_chans; 2290 if (xbar_chans) { 2291 for (i = 0; xbar_chans[i][1] != -1; i++) { 2292 off = xbar_chans[i][1]; 2293 } 2294 } 2295 2296 irq = platform_get_irq_byname(pdev, "edma3_ccint"); 2297 if (irq < 0 && node) 2298 irq = irq_of_parse_and_map(node, 0); 2299 2300 if (irq >= 0) { 2301 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint", 2302 dev_name(dev)); 2303 ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name, 2304 ecc); 2305 if (ret) { 2306 dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret); 2307 return ret; 2308 } 2309 ecc->ccint = irq; 2310 } 2311 2312 irq = platform_get_irq_byname(pdev, "edma3_ccerrint"); 2313 if (irq < 0 && node) 2314 irq = irq_of_parse_and_map(node, 2); 2315 2316 if (irq >= 0) { 2317 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint", 2318 dev_name(dev)); 2319 ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name, 2320 ecc); 2321 if (ret) { 2322 dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret); 2323 return ret; 2324 } 2325 ecc->ccerrint = irq; 2326 } 2327 2328 ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY); 2329 if (ecc->dummy_slot < 0) { 2330 dev_err(dev, "Can't allocate PaRAM dummy slot\n"); 2331 return ecc->dummy_slot; 2332 } 2333 2334 queue_priority_mapping = info->queue_priority_mapping; 2335 2336 if (!ecc->legacy_mode) { 2337 int lowest_priority = 0; 2338 struct of_phandle_args tc_args; 2339 2340 ecc->tc_list = devm_kcalloc(dev, ecc->num_tc, 2341 sizeof(*ecc->tc_list), GFP_KERNEL); 2342 if (!ecc->tc_list) 2343 return -ENOMEM; 2344 2345 for (i = 0;; i++) { 2346 ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs", 2347 1, i, &tc_args); 2348 if (ret || i == ecc->num_tc) 2349 break; 2350 2351 ecc->tc_list[i].node = tc_args.np; 2352 ecc->tc_list[i].id = i; 2353 queue_priority_mapping[i][1] = tc_args.args[0]; 2354 if (queue_priority_mapping[i][1] > lowest_priority) { 2355 lowest_priority = queue_priority_mapping[i][1]; 2356 info->default_queue = i; 2357 } 2358 } 2359 } 2360 2361 /* Event queue priority mapping */ 2362 for (i = 0; queue_priority_mapping[i][0] != -1; i++) 2363 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0], 2364 queue_priority_mapping[i][1]); 2365 2366 for (i = 0; i < ecc->num_region; i++) { 2367 edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0); 2368 edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0); 2369 edma_write_array(ecc, EDMA_QRAE, i, 0x0); 2370 } 2371 ecc->info = info; 2372 2373 /* Init the dma device and channels */ 2374 edma_dma_init(ecc, legacy_mode); 2375 2376 for (i = 0; i < ecc->num_channels; i++) { 2377 /* Assign all channels to the default queue */ 2378 edma_assign_channel_eventq(&ecc->slave_chans[i], 2379 info->default_queue); 2380 /* Set entry slot to the dummy slot */ 2381 edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot); 2382 } 2383 2384 ecc->dma_slave.filter.map = info->slave_map; 2385 ecc->dma_slave.filter.mapcnt = info->slavecnt; 2386 ecc->dma_slave.filter.fn = edma_filter_fn; 2387 2388 ret = dma_async_device_register(&ecc->dma_slave); 2389 if (ret) { 2390 dev_err(dev, "slave ddev registration failed (%d)\n", ret); 2391 goto err_reg1; 2392 } 2393 2394 if (ecc->dma_memcpy) { 2395 ret = dma_async_device_register(ecc->dma_memcpy); 2396 if (ret) { 2397 dev_err(dev, "memcpy ddev registration failed (%d)\n", 2398 ret); 2399 dma_async_device_unregister(&ecc->dma_slave); 2400 goto err_reg1; 2401 } 2402 } 2403 2404 if (node) 2405 of_dma_controller_register(node, of_edma_xlate, ecc); 2406 2407 dev_info(dev, "TI EDMA DMA engine driver\n"); 2408 2409 return 0; 2410 2411err_reg1: 2412 edma_free_slot(ecc, ecc->dummy_slot); 2413 return ret; 2414} 2415 2416static void edma_cleanupp_vchan(struct dma_device *dmadev) 2417{ 2418 struct edma_chan *echan, *_echan; 2419 2420 list_for_each_entry_safe(echan, _echan, 2421 &dmadev->channels, vchan.chan.device_node) { 2422 list_del(&echan->vchan.chan.device_node); 2423 tasklet_kill(&echan->vchan.task); 2424 } 2425} 2426 2427static int edma_remove(struct platform_device *pdev) 2428{ 2429 struct device *dev = &pdev->dev; 2430 struct edma_cc *ecc = dev_get_drvdata(dev); 2431 2432 devm_free_irq(dev, ecc->ccint, ecc); 2433 devm_free_irq(dev, ecc->ccerrint, ecc); 2434 2435 edma_cleanupp_vchan(&ecc->dma_slave); 2436 2437 if (dev->of_node) 2438 of_dma_controller_free(dev->of_node); 2439 dma_async_device_unregister(&ecc->dma_slave); 2440 if (ecc->dma_memcpy) 2441 dma_async_device_unregister(ecc->dma_memcpy); 2442 edma_free_slot(ecc, ecc->dummy_slot); 2443 2444 return 0; 2445} 2446 2447#ifdef CONFIG_PM_SLEEP 2448static int edma_pm_suspend(struct device *dev) 2449{ 2450 struct edma_cc *ecc = dev_get_drvdata(dev); 2451 struct edma_chan *echan = ecc->slave_chans; 2452 int i; 2453 2454 for (i = 0; i < ecc->num_channels; i++) { 2455 if (echan[i].alloced) 2456 edma_setup_interrupt(&echan[i], false); 2457 } 2458 2459 return 0; 2460} 2461 2462static int edma_pm_resume(struct device *dev) 2463{ 2464 struct edma_cc *ecc = dev_get_drvdata(dev); 2465 struct edma_chan *echan = ecc->slave_chans; 2466 int i; 2467 s8 (*queue_priority_mapping)[2]; 2468 2469 /* re initialize dummy slot to dummy param set */ 2470 edma_write_slot(ecc, ecc->dummy_slot, &dummy_paramset); 2471 2472 queue_priority_mapping = ecc->info->queue_priority_mapping; 2473 2474 /* Event queue priority mapping */ 2475 for (i = 0; queue_priority_mapping[i][0] != -1; i++) 2476 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0], 2477 queue_priority_mapping[i][1]); 2478 2479 for (i = 0; i < ecc->num_channels; i++) { 2480 if (echan[i].alloced) { 2481 /* ensure access through shadow region 0 */ 2482 edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5, 2483 BIT(i & 0x1f)); 2484 2485 edma_setup_interrupt(&echan[i], true); 2486 2487 /* Set up channel -> slot mapping for the entry slot */ 2488 edma_set_chmap(&echan[i], echan[i].slot[0]); 2489 } 2490 } 2491 2492 return 0; 2493} 2494#endif 2495 2496static const struct dev_pm_ops edma_pm_ops = { 2497 SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume) 2498}; 2499 2500static struct platform_driver edma_driver = { 2501 .probe = edma_probe, 2502 .remove = edma_remove, 2503 .driver = { 2504 .name = "edma", 2505 .pm = &edma_pm_ops, 2506 .of_match_table = edma_of_ids, 2507 }, 2508}; 2509 2510static int edma_tptc_probe(struct platform_device *pdev) 2511{ 2512 pm_runtime_enable(&pdev->dev); 2513 return pm_runtime_get_sync(&pdev->dev); 2514} 2515 2516static struct platform_driver edma_tptc_driver = { 2517 .probe = edma_tptc_probe, 2518 .driver = { 2519 .name = "edma3-tptc", 2520 .of_match_table = edma_tptc_of_ids, 2521 }, 2522}; 2523 2524bool edma_filter_fn(struct dma_chan *chan, void *param) 2525{ 2526 bool match = false; 2527 2528 if (chan->device->dev->driver == &edma_driver.driver) { 2529 struct edma_chan *echan = to_edma_chan(chan); 2530 unsigned ch_req = *(unsigned *)param; 2531 if (ch_req == echan->ch_num) { 2532 /* The channel is going to be used as HW synchronized */ 2533 echan->hw_triggered = true; 2534 match = true; 2535 } 2536 } 2537 return match; 2538} 2539EXPORT_SYMBOL(edma_filter_fn); 2540 2541static int edma_init(void) 2542{ 2543 int ret; 2544 2545 ret = platform_driver_register(&edma_tptc_driver); 2546 if (ret) 2547 return ret; 2548 2549 return platform_driver_register(&edma_driver); 2550} 2551subsys_initcall(edma_init); 2552 2553static void __exit edma_exit(void) 2554{ 2555 platform_driver_unregister(&edma_driver); 2556 platform_driver_unregister(&edma_tptc_driver); 2557} 2558module_exit(edma_exit); 2559 2560MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>"); 2561MODULE_DESCRIPTION("TI EDMA DMA engine driver"); 2562MODULE_LICENSE("GPL v2");