Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: sirf: add CSRatlas7 SoC support

add support for new CSR atlas7 SoC. atlas7 exists V1 and V2 IP.
atlas7 DMAv1 is basically moved from marco, which has never been
delivered to customers and renamed in this patch.
atlas7 DMAv2 supports chain DMA by a chain table, this
patch also adds chain DMA support for atlas7.

atlas7 DMAv1 and DMAv2 co-exist in the same chip. there are some HW
configuration differences(register offset etc.) with old prima2 chips,
so we use compatible string to differentiate old prima2 and new atlas7,
then results in different set in HW for them.

Signed-off-by: Hao Liu <Hao.Liu@csr.com>
Signed-off-by: Yanchang Li <Yanchang.Li@csr.com>
Signed-off-by: Barry Song <Baohua.Song@csr.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>

authored by

Hao Liu and committed by
Vinod Koul
0a45dcab 6d0767c1

+336 -90
+2 -1
Documentation/devicetree/bindings/dma/sirfsoc-dma.txt
··· 3 3 See dma.txt first 4 4 5 5 Required properties: 6 - - compatible: Should be "sirf,prima2-dmac" or "sirf,marco-dmac" 6 + - compatible: Should be "sirf,prima2-dmac", "sirf,atlas7-dmac" or 7 + "sirf,atlas7-dmac-v2" 7 8 - reg: Should contain DMA registers location and length. 8 9 - interrupts: Should contain one interrupt shared by all channel 9 10 - #dma-cells: must be <1>. used to represent the number of integer
+334 -89
drivers/dma/sirf-dma.c
··· 23 23 24 24 #include "dmaengine.h" 25 25 26 + #define SIRFSOC_DMA_VER_A7V1 1 27 + #define SIRFSOC_DMA_VER_A7V2 2 28 + #define SIRFSOC_DMA_VER_A6 4 29 + 26 30 #define SIRFSOC_DMA_DESCRIPTORS 16 27 31 #define SIRFSOC_DMA_CHANNELS 16 32 + #define SIRFSOC_DMA_TABLE_NUM 256 28 33 29 34 #define SIRFSOC_DMA_CH_ADDR 0x00 30 35 #define SIRFSOC_DMA_CH_XLEN 0x04 ··· 40 35 #define SIRFSOC_DMA_CH_VALID 0x140 41 36 #define SIRFSOC_DMA_CH_INT 0x144 42 37 #define SIRFSOC_DMA_INT_EN 0x148 43 - #define SIRFSOC_DMA_INT_EN_CLR 0x14C 38 + #define SIRFSOC_DMA_INT_EN_CLR 0x14C 44 39 #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150 45 - #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x15C 40 + #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x154 41 + #define SIRFSOC_DMA_WIDTH_ATLAS7 0x10 42 + #define SIRFSOC_DMA_VALID_ATLAS7 0x14 43 + #define SIRFSOC_DMA_INT_ATLAS7 0x18 44 + #define SIRFSOC_DMA_INT_EN_ATLAS7 0x1c 45 + #define SIRFSOC_DMA_LOOP_CTRL_ATLAS7 0x20 46 + #define SIRFSOC_DMA_CUR_DATA_ADDR 0x34 47 + #define SIRFSOC_DMA_MUL_ATLAS7 0x38 48 + #define SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7 0x158 49 + #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7 0x15C 50 + #define SIRFSOC_DMA_IOBG_SCMD_EN 0x800 51 + #define SIRFSOC_DMA_EARLY_RESP_SET 0x818 52 + #define SIRFSOC_DMA_EARLY_RESP_CLR 0x81C 46 53 47 54 #define SIRFSOC_DMA_MODE_CTRL_BIT 4 48 55 #define SIRFSOC_DMA_DIR_CTRL_BIT 5 56 + #define SIRFSOC_DMA_MODE_CTRL_BIT_ATLAS7 2 57 + #define SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7 3 58 + #define SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7 4 59 + #define SIRFSOC_DMA_TAB_NUM_ATLAS7 7 60 + #define SIRFSOC_DMA_CHAIN_INT_BIT_ATLAS7 5 61 + #define SIRFSOC_DMA_CHAIN_FLAG_SHIFT_ATLAS7 25 62 + #define SIRFSOC_DMA_CHAIN_ADDR_SHIFT 32 63 + 64 + #define SIRFSOC_DMA_INT_FINI_INT_ATLAS7 BIT(0) 65 + #define SIRFSOC_DMA_INT_CNT_INT_ATLAS7 BIT(1) 66 + #define SIRFSOC_DMA_INT_PAU_INT_ATLAS7 BIT(2) 67 + #define SIRFSOC_DMA_INT_LOOP_INT_ATLAS7 BIT(3) 68 + #define SIRFSOC_DMA_INT_INV_INT_ATLAS7 BIT(4) 69 + #define SIRFSOC_DMA_INT_END_INT_ATLAS7 BIT(5) 70 + #define SIRFSOC_DMA_INT_ALL_ATLAS7 0x3F 49 71 50 72 /* xlen and dma_width register is in 4 bytes boundary */ 51 73 #define SIRFSOC_DMA_WORD_LEN 4 74 + #define SIRFSOC_DMA_XLEN_MAX_V1 0x800 75 + #define SIRFSOC_DMA_XLEN_MAX_V2 0x1000 52 76 53 77 struct sirfsoc_dma_desc { 54 78 struct dma_async_tx_descriptor desc; ··· 90 56 int width; /* DMA width */ 91 57 int dir; 92 58 bool cyclic; /* is loop DMA? */ 59 + bool chain; /* is chain DMA? */ 93 60 u32 addr; /* DMA buffer address */ 61 + u64 chain_table[SIRFSOC_DMA_TABLE_NUM]; /* chain tbl */ 94 62 }; 95 63 96 64 struct sirfsoc_dma_chan { ··· 123 87 void __iomem *base; 124 88 int irq; 125 89 struct clk *clk; 126 - bool is_marco; 90 + int type; 91 + void (*exec_desc)(struct sirfsoc_dma_desc *sdesc, 92 + int cid, int burst_mode, void __iomem *base); 127 93 struct sirfsoc_dma_regs regs_save; 94 + }; 95 + 96 + struct sirfsoc_dmadata { 97 + void (*exec)(struct sirfsoc_dma_desc *sdesc, 98 + int cid, int burst_mode, void __iomem *base); 99 + int type; 100 + }; 101 + 102 + enum sirfsoc_dma_chain_flag { 103 + SIRFSOC_DMA_CHAIN_NORMAL = 0x01, 104 + SIRFSOC_DMA_CHAIN_PAUSE = 0x02, 105 + SIRFSOC_DMA_CHAIN_LOOP = 0x03, 106 + SIRFSOC_DMA_CHAIN_END = 0x04 128 107 }; 129 108 130 109 #define DRV_NAME "sirfsoc_dma" ··· 160 109 return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]); 161 110 } 162 111 112 + static void sirfsoc_dma_execute_hw_a7v2(struct sirfsoc_dma_desc *sdesc, 113 + int cid, int burst_mode, void __iomem *base) 114 + { 115 + if (sdesc->chain) { 116 + /* DMA v2 HW chain mode */ 117 + writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) | 118 + (sdesc->chain << 119 + SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) | 120 + (0x8 << SIRFSOC_DMA_TAB_NUM_ATLAS7) | 0x3, 121 + base + SIRFSOC_DMA_CH_CTRL); 122 + } else { 123 + /* DMA v2 legacy mode */ 124 + writel_relaxed(sdesc->xlen, base + SIRFSOC_DMA_CH_XLEN); 125 + writel_relaxed(sdesc->ylen, base + SIRFSOC_DMA_CH_YLEN); 126 + writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_ATLAS7); 127 + writel_relaxed((sdesc->width*((sdesc->ylen+1)>>1)), 128 + base + SIRFSOC_DMA_MUL_ATLAS7); 129 + writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) | 130 + (sdesc->chain << 131 + SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) | 132 + 0x3, base + SIRFSOC_DMA_CH_CTRL); 133 + } 134 + writel_relaxed(sdesc->chain ? SIRFSOC_DMA_INT_END_INT_ATLAS7 : 135 + (SIRFSOC_DMA_INT_FINI_INT_ATLAS7 | 136 + SIRFSOC_DMA_INT_LOOP_INT_ATLAS7), 137 + base + SIRFSOC_DMA_INT_EN_ATLAS7); 138 + writel(sdesc->addr, base + SIRFSOC_DMA_CH_ADDR); 139 + if (sdesc->cyclic) 140 + writel(0x10001, base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); 141 + } 142 + 143 + static void sirfsoc_dma_execute_hw_a7v1(struct sirfsoc_dma_desc *sdesc, 144 + int cid, int burst_mode, void __iomem *base) 145 + { 146 + writel_relaxed(1, base + SIRFSOC_DMA_IOBG_SCMD_EN); 147 + writel_relaxed((1 << cid), base + SIRFSOC_DMA_EARLY_RESP_SET); 148 + writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4); 149 + writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) | 150 + (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), 151 + base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); 152 + writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN); 153 + writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN); 154 + writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) | 155 + (1 << cid), base + SIRFSOC_DMA_INT_EN); 156 + writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); 157 + if (sdesc->cyclic) { 158 + writel((1 << cid) | 1 << (cid + 16) | 159 + readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7), 160 + base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7); 161 + } 162 + 163 + } 164 + 165 + static void sirfsoc_dma_execute_hw_a6(struct sirfsoc_dma_desc *sdesc, 166 + int cid, int burst_mode, void __iomem *base) 167 + { 168 + writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4); 169 + writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) | 170 + (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), 171 + base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); 172 + writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN); 173 + writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN); 174 + writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) | 175 + (1 << cid), base + SIRFSOC_DMA_INT_EN); 176 + writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); 177 + if (sdesc->cyclic) { 178 + writel((1 << cid) | 1 << (cid + 16) | 179 + readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL), 180 + base + SIRFSOC_DMA_CH_LOOP_CTRL); 181 + } 182 + 183 + } 184 + 163 185 /* Execute all queued DMA descriptors */ 164 186 static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan) 165 187 { 166 188 struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); 167 189 int cid = schan->chan.chan_id; 168 190 struct sirfsoc_dma_desc *sdesc = NULL; 191 + void __iomem *base; 169 192 170 193 /* 171 194 * lock has been held by functions calling this, so we don't hold 172 195 * lock again 173 196 */ 174 - 197 + base = sdma->base; 175 198 sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, 176 - node); 199 + node); 177 200 /* Move the first queued descriptor to active list */ 178 201 list_move_tail(&sdesc->node, &schan->active); 179 202 203 + if (sdma->type == SIRFSOC_DMA_VER_A7V2) 204 + cid = 0; 205 + 180 206 /* Start the DMA transfer */ 181 - writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 + 182 - cid * 4); 183 - writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) | 184 - (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), 185 - sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); 186 - writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 + 187 - SIRFSOC_DMA_CH_XLEN); 188 - writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 + 189 - SIRFSOC_DMA_CH_YLEN); 190 - writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) | 191 - (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); 207 + sdma->exec_desc(sdesc, cid, schan->mode, base); 192 208 193 - /* 194 - * writel has an implict memory write barrier to make sure data is 195 - * flushed into memory before starting DMA 196 - */ 197 - writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); 198 - 199 - if (sdesc->cyclic) { 200 - writel((1 << cid) | 1 << (cid + 16) | 201 - readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL), 202 - sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); 209 + if (sdesc->cyclic) 203 210 schan->happened_cyclic = schan->completed_cyclic = 0; 204 - } 205 211 } 206 212 207 213 /* Interrupt handler */ ··· 268 160 struct sirfsoc_dma_chan *schan; 269 161 struct sirfsoc_dma_desc *sdesc = NULL; 270 162 u32 is; 163 + bool chain; 271 164 int ch; 165 + void __iomem *reg; 272 166 273 - is = readl(sdma->base + SIRFSOC_DMA_CH_INT); 274 - while ((ch = fls(is) - 1) >= 0) { 275 - is &= ~(1 << ch); 276 - writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT); 277 - schan = &sdma->channels[ch]; 167 + switch (sdma->type) { 168 + case SIRFSOC_DMA_VER_A6: 169 + case SIRFSOC_DMA_VER_A7V1: 170 + is = readl(sdma->base + SIRFSOC_DMA_CH_INT); 171 + reg = sdma->base + SIRFSOC_DMA_CH_INT; 172 + while ((ch = fls(is) - 1) >= 0) { 173 + is &= ~(1 << ch); 174 + writel_relaxed(1 << ch, reg); 175 + schan = &sdma->channels[ch]; 176 + spin_lock(&schan->lock); 177 + sdesc = list_first_entry(&schan->active, 178 + struct sirfsoc_dma_desc, node); 179 + if (!sdesc->cyclic) { 180 + /* Execute queued descriptors */ 181 + list_splice_tail_init(&schan->active, 182 + &schan->completed); 183 + dma_cookie_complete(&sdesc->desc); 184 + if (!list_empty(&schan->queued)) 185 + sirfsoc_dma_execute(schan); 186 + } else 187 + schan->happened_cyclic++; 188 + spin_unlock(&schan->lock); 189 + } 190 + break; 278 191 192 + case SIRFSOC_DMA_VER_A7V2: 193 + is = readl(sdma->base + SIRFSOC_DMA_INT_ATLAS7); 194 + 195 + reg = sdma->base + SIRFSOC_DMA_INT_ATLAS7; 196 + writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7, reg); 197 + schan = &sdma->channels[0]; 279 198 spin_lock(&schan->lock); 280 - 281 - sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, 282 - node); 199 + sdesc = list_first_entry(&schan->active, 200 + struct sirfsoc_dma_desc, node); 283 201 if (!sdesc->cyclic) { 284 - /* Execute queued descriptors */ 285 - list_splice_tail_init(&schan->active, &schan->completed); 286 - if (!list_empty(&schan->queued)) 287 - sirfsoc_dma_execute(schan); 288 - } else 202 + chain = sdesc->chain; 203 + if ((chain && (is & SIRFSOC_DMA_INT_END_INT_ATLAS7)) || 204 + (!chain && 205 + (is & SIRFSOC_DMA_INT_FINI_INT_ATLAS7))) { 206 + /* Execute queued descriptors */ 207 + list_splice_tail_init(&schan->active, 208 + &schan->completed); 209 + dma_cookie_complete(&sdesc->desc); 210 + if (!list_empty(&schan->queued)) 211 + sirfsoc_dma_execute(schan); 212 + } 213 + } else if (sdesc->cyclic && (is & 214 + SIRFSOC_DMA_INT_LOOP_INT_ATLAS7)) 289 215 schan->happened_cyclic++; 290 216 291 217 spin_unlock(&schan->lock); 218 + break; 219 + 220 + default: 221 + break; 292 222 } 293 223 294 224 /* Schedule tasklet */ ··· 373 227 schan->chan.completed_cookie = last_cookie; 374 228 spin_unlock_irqrestore(&schan->lock, flags); 375 229 } else { 376 - /* for cyclic channel, desc is always in active list */ 377 - sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, 378 - node); 379 - 380 - if (!sdesc || (sdesc && !sdesc->cyclic)) { 381 - /* without active cyclic DMA */ 230 + if (list_empty(&schan->active)) { 382 231 spin_unlock_irqrestore(&schan->lock, flags); 383 232 continue; 384 233 } 234 + 235 + /* for cyclic channel, desc is always in active list */ 236 + sdesc = list_first_entry(&schan->active, 237 + struct sirfsoc_dma_desc, node); 385 238 386 239 /* cyclic DMA */ 387 240 happened_cyclic = schan->happened_cyclic; ··· 452 307 453 308 spin_lock_irqsave(&schan->lock, flags); 454 309 455 - if (!sdma->is_marco) { 456 - writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & 457 - ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); 458 - writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) 459 - & ~((1 << cid) | 1 << (cid + 16)), 460 - sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); 461 - } else { 310 + switch (sdma->type) { 311 + case SIRFSOC_DMA_VER_A7V1: 462 312 writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR); 463 313 writel_relaxed((1 << cid) | 1 << (cid + 16), 464 - sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR); 314 + sdma->base + 315 + SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7); 316 + writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); 317 + break; 318 + case SIRFSOC_DMA_VER_A7V2: 319 + writel_relaxed(0, sdma->base + SIRFSOC_DMA_INT_EN_ATLAS7); 320 + writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); 321 + writel_relaxed(0, sdma->base + SIRFSOC_DMA_VALID_ATLAS7); 322 + break; 323 + case SIRFSOC_DMA_VER_A6: 324 + writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & 325 + ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); 326 + writel_relaxed(readl_relaxed(sdma->base + 327 + SIRFSOC_DMA_CH_LOOP_CTRL) & 328 + ~((1 << cid) | 1 << (cid + 16)), 329 + sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); 330 + writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); 331 + break; 332 + default: 333 + break; 465 334 } 466 - 467 - writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); 468 335 469 336 list_splice_tail_init(&schan->active, &schan->free); 470 337 list_splice_tail_init(&schan->queued, &schan->free); ··· 495 338 496 339 spin_lock_irqsave(&schan->lock, flags); 497 340 498 - if (!sdma->is_marco) 499 - writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) 500 - & ~((1 << cid) | 1 << (cid + 16)), 501 - sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); 502 - else 341 + switch (sdma->type) { 342 + case SIRFSOC_DMA_VER_A7V1: 503 343 writel_relaxed((1 << cid) | 1 << (cid + 16), 504 - sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR); 344 + sdma->base + 345 + SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7); 346 + break; 347 + case SIRFSOC_DMA_VER_A7V2: 348 + writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); 349 + break; 350 + case SIRFSOC_DMA_VER_A6: 351 + writel_relaxed(readl_relaxed(sdma->base + 352 + SIRFSOC_DMA_CH_LOOP_CTRL) & 353 + ~((1 << cid) | 1 << (cid + 16)), 354 + sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); 355 + break; 356 + 357 + default: 358 + break; 359 + } 505 360 506 361 spin_unlock_irqrestore(&schan->lock, flags); 507 362 ··· 528 359 unsigned long flags; 529 360 530 361 spin_lock_irqsave(&schan->lock, flags); 531 - 532 - if (!sdma->is_marco) 533 - writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) 534 - | ((1 << cid) | 1 << (cid + 16)), 535 - sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); 536 - else 362 + switch (sdma->type) { 363 + case SIRFSOC_DMA_VER_A7V1: 537 364 writel_relaxed((1 << cid) | 1 << (cid + 16), 538 - sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); 365 + sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7); 366 + break; 367 + case SIRFSOC_DMA_VER_A7V2: 368 + writel_relaxed(0x10001, 369 + sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); 370 + break; 371 + case SIRFSOC_DMA_VER_A6: 372 + writel_relaxed(readl_relaxed(sdma->base + 373 + SIRFSOC_DMA_CH_LOOP_CTRL) | 374 + ((1 << cid) | 1 << (cid + 16)), 375 + sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); 376 + break; 377 + 378 + default: 379 + break; 380 + } 539 381 540 382 spin_unlock_irqrestore(&schan->lock, flags); 541 383 ··· 653 473 654 474 spin_lock_irqsave(&schan->lock, flags); 655 475 656 - sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, 657 - node); 658 - dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) * 659 - (sdesc->width * SIRFSOC_DMA_WORD_LEN); 476 + if (list_empty(&schan->active)) { 477 + ret = dma_cookie_status(chan, cookie, txstate); 478 + dma_set_residue(txstate, 0); 479 + spin_unlock_irqrestore(&schan->lock, flags); 480 + return ret; 481 + } 482 + sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, node); 483 + if (sdesc->cyclic) 484 + dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) * 485 + (sdesc->width * SIRFSOC_DMA_WORD_LEN); 486 + else 487 + dma_request_bytes = sdesc->xlen * SIRFSOC_DMA_WORD_LEN; 660 488 661 489 ret = dma_cookie_status(chan, cookie, txstate); 662 - dma_pos = readl_relaxed(sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR) 663 - << 2; 490 + 491 + if (sdma->type == SIRFSOC_DMA_VER_A7V2) 492 + cid = 0; 493 + 494 + if (sdma->type == SIRFSOC_DMA_VER_A7V2) { 495 + dma_pos = readl_relaxed(sdma->base + SIRFSOC_DMA_CUR_DATA_ADDR); 496 + } else { 497 + dma_pos = readl_relaxed( 498 + sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR) << 2; 499 + } 500 + 664 501 residue = dma_request_bytes - (dma_pos - sdesc->addr); 665 502 dma_set_residue(txstate, residue); 666 503 ··· 844 647 struct dma_device *dma; 845 648 struct sirfsoc_dma *sdma; 846 649 struct sirfsoc_dma_chan *schan; 650 + struct sirfsoc_dmadata *data; 847 651 struct resource res; 848 652 ulong regs_start, regs_size; 849 653 u32 id; ··· 855 657 dev_err(dev, "Memory exhausted!\n"); 856 658 return -ENOMEM; 857 659 } 858 - 859 - if (of_device_is_compatible(dn, "sirf,marco-dmac")) 860 - sdma->is_marco = true; 660 + data = (struct sirfsoc_dmadata *) 661 + (of_match_device(op->dev.driver->of_match_table, 662 + &op->dev)->data); 663 + sdma->exec_desc = data->exec; 664 + sdma->type = data->type; 861 665 862 666 if (of_property_read_u32(dn, "cell-index", &id)) { 863 667 dev_err(dev, "Fail to get DMAC index\n"); ··· 1016 816 struct sirfsoc_dma_chan *schan; 1017 817 int ch; 1018 818 int ret; 819 + int count; 820 + u32 int_offset; 1019 821 1020 822 /* 1021 823 * if we were runtime-suspended before, resume to enable clock ··· 1029 827 return ret; 1030 828 } 1031 829 830 + if (sdma->type == SIRFSOC_DMA_VER_A7V2) { 831 + count = 1; 832 + int_offset = SIRFSOC_DMA_INT_EN_ATLAS7; 833 + } else { 834 + count = SIRFSOC_DMA_CHANNELS; 835 + int_offset = SIRFSOC_DMA_INT_EN; 836 + } 837 + 1032 838 /* 1033 839 * DMA controller will lose all registers while suspending 1034 840 * so we need to save registers for active channels 1035 841 */ 1036 - for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) { 842 + for (ch = 0; ch < count; ch++) { 1037 843 schan = &sdma->channels[ch]; 1038 844 if (list_empty(&schan->active)) 1039 845 continue; ··· 1051 841 save->ctrl[ch] = readl_relaxed(sdma->base + 1052 842 ch * 0x10 + SIRFSOC_DMA_CH_CTRL); 1053 843 } 1054 - save->interrupt_en = readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN); 844 + save->interrupt_en = readl_relaxed(sdma->base + int_offset); 1055 845 1056 846 /* Disable clock */ 1057 847 sirfsoc_dma_runtime_suspend(dev); ··· 1067 857 struct sirfsoc_dma_chan *schan; 1068 858 int ch; 1069 859 int ret; 860 + int count; 861 + u32 int_offset; 862 + u32 width_offset; 1070 863 1071 864 /* Enable clock before accessing register */ 1072 865 ret = sirfsoc_dma_runtime_resume(dev); 1073 866 if (ret < 0) 1074 867 return ret; 1075 868 1076 - writel_relaxed(save->interrupt_en, sdma->base + SIRFSOC_DMA_INT_EN); 1077 - for (ch = 0; ch < SIRFSOC_DMA_CHANNELS; ch++) { 869 + if (sdma->type == SIRFSOC_DMA_VER_A7V2) { 870 + count = 1; 871 + int_offset = SIRFSOC_DMA_INT_EN_ATLAS7; 872 + width_offset = SIRFSOC_DMA_WIDTH_ATLAS7; 873 + } else { 874 + count = SIRFSOC_DMA_CHANNELS; 875 + int_offset = SIRFSOC_DMA_INT_EN; 876 + width_offset = SIRFSOC_DMA_WIDTH_0; 877 + } 878 + 879 + writel_relaxed(save->interrupt_en, sdma->base + int_offset); 880 + for (ch = 0; ch < count; ch++) { 1078 881 schan = &sdma->channels[ch]; 1079 882 if (list_empty(&schan->active)) 1080 883 continue; ··· 1095 872 struct sirfsoc_dma_desc, 1096 873 node); 1097 874 writel_relaxed(sdesc->width, 1098 - sdma->base + SIRFSOC_DMA_WIDTH_0 + ch * 4); 875 + sdma->base + width_offset + ch * 4); 1099 876 writel_relaxed(sdesc->xlen, 1100 877 sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN); 1101 878 writel_relaxed(sdesc->ylen, 1102 879 sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN); 1103 880 writel_relaxed(save->ctrl[ch], 1104 881 sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL); 1105 - writel_relaxed(sdesc->addr >> 2, 1106 - sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR); 882 + if (sdma->type == SIRFSOC_DMA_VER_A7V2) { 883 + writel_relaxed(sdesc->addr, 884 + sdma->base + SIRFSOC_DMA_CH_ADDR); 885 + } else { 886 + writel_relaxed(sdesc->addr >> 2, 887 + sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR); 888 + 889 + } 1107 890 } 1108 891 1109 892 /* if we were runtime-suspended before, suspend again */ ··· 1125 896 SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume) 1126 897 }; 1127 898 899 + struct sirfsoc_dmadata sirfsoc_dmadata_a6 = { 900 + .exec = sirfsoc_dma_execute_hw_a6, 901 + .type = SIRFSOC_DMA_VER_A6, 902 + }; 903 + 904 + struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = { 905 + .exec = sirfsoc_dma_execute_hw_a7v1, 906 + .type = SIRFSOC_DMA_VER_A7V1, 907 + }; 908 + 909 + struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = { 910 + .exec = sirfsoc_dma_execute_hw_a7v2, 911 + .type = SIRFSOC_DMA_VER_A7V2, 912 + }; 913 + 1128 914 static const struct of_device_id sirfsoc_dma_match[] = { 1129 - { .compatible = "sirf,prima2-dmac", }, 1130 - { .compatible = "sirf,marco-dmac", }, 915 + { .compatible = "sirf,prima2-dmac", .data = &sirfsoc_dmadata_a6,}, 916 + { .compatible = "sirf,atlas7-dmac", .data = &sirfsoc_dmadata_a7v1,}, 917 + { .compatible = "sirf,atlas7-dmac-v2", .data = &sirfsoc_dmadata_a7v2,}, 1131 918 {}, 1132 919 }; 1133 920 ··· 1170 925 subsys_initcall(sirfsoc_dma_init); 1171 926 module_exit(sirfsoc_dma_exit); 1172 927 1173 - MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, " 1174 - "Barry Song <baohua.song@csr.com>"); 928 + MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>"); 929 + MODULE_AUTHOR("Barry Song <baohua.song@csr.com>"); 1175 930 MODULE_DESCRIPTION("SIRFSOC DMA control driver"); 1176 931 MODULE_LICENSE("GPL v2");