Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: dw: Remove AVR32 bits from the driver

AVR32 is gone. Now it's time to clean up the driver by removing
leftovers that was used by AVR32 related code.

Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Acked-by: Vinod Koul <vinod.koul@intel.com>
Signed-off-by: Takashi Iwai <tiwai@suse.de>

authored by

Andy Shevchenko and committed by
Takashi Iwai
14bebd01 020c5260

+14 -396
+1 -6
drivers/dma/dw/Kconfig
··· 6 6 tristate 7 7 select DMA_ENGINE 8 8 9 - config DW_DMAC_BIG_ENDIAN_IO 10 - bool 11 - 12 9 config DW_DMAC 13 10 tristate "Synopsys DesignWare AHB DMA platform driver" 14 11 select DW_DMAC_CORE 15 - select DW_DMAC_BIG_ENDIAN_IO if AVR32 16 - default y if CPU_AT32AP7000 17 12 help 18 13 Support the Synopsys DesignWare AHB DMA controller. This 19 - can be integrated in chips such as the Atmel AT32ap7000. 14 + can be integrated in chips such as the Intel Cherrytrail. 20 15 21 16 config DW_DMAC_PCI 22 17 tristate "Synopsys DesignWare AHB DMA PCI driver"
+2 -330
drivers/dma/dw/core.c
··· 561 561 dwc_descriptor_complete(dwc, bad_desc, true); 562 562 } 563 563 564 - /* --------------------- Cyclic DMA API extensions -------------------- */ 565 - 566 - dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) 567 - { 568 - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 569 - return channel_readl(dwc, SAR); 570 - } 571 - EXPORT_SYMBOL(dw_dma_get_src_addr); 572 - 573 - dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) 574 - { 575 - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 576 - return channel_readl(dwc, DAR); 577 - } 578 - EXPORT_SYMBOL(dw_dma_get_dst_addr); 579 - 580 - /* Called with dwc->lock held and all DMAC interrupts disabled */ 581 - static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, 582 - u32 status_block, u32 status_err, u32 status_xfer) 583 - { 584 - unsigned long flags; 585 - 586 - if (status_block & dwc->mask) { 587 - void (*callback)(void *param); 588 - void *callback_param; 589 - 590 - dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", 591 - channel_readl(dwc, LLP)); 592 - dma_writel(dw, CLEAR.BLOCK, dwc->mask); 593 - 594 - callback = dwc->cdesc->period_callback; 595 - callback_param = dwc->cdesc->period_callback_param; 596 - 597 - if (callback) 598 - callback(callback_param); 599 - } 600 - 601 - /* 602 - * Error and transfer complete are highly unlikely, and will most 603 - * likely be due to a configuration error by the user. 604 - */ 605 - if (unlikely(status_err & dwc->mask) || 606 - unlikely(status_xfer & dwc->mask)) { 607 - unsigned int i; 608 - 609 - dev_err(chan2dev(&dwc->chan), 610 - "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n", 611 - status_xfer ? "xfer" : "error"); 612 - 613 - spin_lock_irqsave(&dwc->lock, flags); 614 - 615 - dwc_dump_chan_regs(dwc); 616 - 617 - dwc_chan_disable(dw, dwc); 618 - 619 - /* Make sure DMA does not restart by loading a new list */ 620 - channel_writel(dwc, LLP, 0); 621 - channel_writel(dwc, CTL_LO, 0); 622 - channel_writel(dwc, CTL_HI, 0); 623 - 624 - dma_writel(dw, CLEAR.BLOCK, dwc->mask); 625 - dma_writel(dw, CLEAR.ERROR, dwc->mask); 626 - dma_writel(dw, CLEAR.XFER, dwc->mask); 627 - 628 - for (i = 0; i < dwc->cdesc->periods; i++) 629 - dwc_dump_lli(dwc, dwc->cdesc->desc[i]); 630 - 631 - spin_unlock_irqrestore(&dwc->lock, flags); 632 - } 633 - 634 - /* Re-enable interrupts */ 635 - channel_set_bit(dw, MASK.BLOCK, dwc->mask); 636 - } 637 - 638 - /* ------------------------------------------------------------------------- */ 639 - 640 564 static void dw_dma_tasklet(unsigned long data) 641 565 { 642 566 struct dw_dma *dw = (struct dw_dma *)data; 643 567 struct dw_dma_chan *dwc; 644 - u32 status_block; 645 568 u32 status_xfer; 646 569 u32 status_err; 647 570 unsigned int i; 648 571 649 - status_block = dma_readl(dw, RAW.BLOCK); 650 572 status_xfer = dma_readl(dw, RAW.XFER); 651 573 status_err = dma_readl(dw, RAW.ERROR); 652 574 ··· 577 655 for (i = 0; i < dw->dma.chancnt; i++) { 578 656 dwc = &dw->chan[i]; 579 657 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) 580 - dwc_handle_cyclic(dw, dwc, status_block, status_err, 581 - status_xfer); 658 + dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n"); 582 659 else if (status_err & (1 << i)) 583 660 dwc_handle_error(dw, dwc); 584 661 else if (status_xfer & (1 << i)) ··· 1185 1264 dev_vdbg(chan2dev(chan), "%s: done\n", __func__); 1186 1265 } 1187 1266 1188 - /* --------------------- Cyclic DMA API extensions -------------------- */ 1189 - 1190 - /** 1191 - * dw_dma_cyclic_start - start the cyclic DMA transfer 1192 - * @chan: the DMA channel to start 1193 - * 1194 - * Must be called with soft interrupts disabled. Returns zero on success or 1195 - * -errno on failure. 1196 - */ 1197 - int dw_dma_cyclic_start(struct dma_chan *chan) 1198 - { 1199 - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1200 - struct dw_dma *dw = to_dw_dma(chan->device); 1201 - unsigned long flags; 1202 - 1203 - if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { 1204 - dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); 1205 - return -ENODEV; 1206 - } 1207 - 1208 - spin_lock_irqsave(&dwc->lock, flags); 1209 - 1210 - /* Enable interrupts to perform cyclic transfer */ 1211 - channel_set_bit(dw, MASK.BLOCK, dwc->mask); 1212 - 1213 - dwc_dostart(dwc, dwc->cdesc->desc[0]); 1214 - 1215 - spin_unlock_irqrestore(&dwc->lock, flags); 1216 - 1217 - return 0; 1218 - } 1219 - EXPORT_SYMBOL(dw_dma_cyclic_start); 1220 - 1221 - /** 1222 - * dw_dma_cyclic_stop - stop the cyclic DMA transfer 1223 - * @chan: the DMA channel to stop 1224 - * 1225 - * Must be called with soft interrupts disabled. 1226 - */ 1227 - void dw_dma_cyclic_stop(struct dma_chan *chan) 1228 - { 1229 - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1230 - struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1231 - unsigned long flags; 1232 - 1233 - spin_lock_irqsave(&dwc->lock, flags); 1234 - 1235 - dwc_chan_disable(dw, dwc); 1236 - 1237 - spin_unlock_irqrestore(&dwc->lock, flags); 1238 - } 1239 - EXPORT_SYMBOL(dw_dma_cyclic_stop); 1240 - 1241 - /** 1242 - * dw_dma_cyclic_prep - prepare the cyclic DMA transfer 1243 - * @chan: the DMA channel to prepare 1244 - * @buf_addr: physical DMA address where the buffer starts 1245 - * @buf_len: total number of bytes for the entire buffer 1246 - * @period_len: number of bytes for each period 1247 - * @direction: transfer direction, to or from device 1248 - * 1249 - * Must be called before trying to start the transfer. Returns a valid struct 1250 - * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. 1251 - */ 1252 - struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, 1253 - dma_addr_t buf_addr, size_t buf_len, size_t period_len, 1254 - enum dma_transfer_direction direction) 1255 - { 1256 - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1257 - struct dma_slave_config *sconfig = &dwc->dma_sconfig; 1258 - struct dw_cyclic_desc *cdesc; 1259 - struct dw_cyclic_desc *retval = NULL; 1260 - struct dw_desc *desc; 1261 - struct dw_desc *last = NULL; 1262 - u8 lms = DWC_LLP_LMS(dwc->dws.m_master); 1263 - unsigned long was_cyclic; 1264 - unsigned int reg_width; 1265 - unsigned int periods; 1266 - unsigned int i; 1267 - unsigned long flags; 1268 - 1269 - spin_lock_irqsave(&dwc->lock, flags); 1270 - if (dwc->nollp) { 1271 - spin_unlock_irqrestore(&dwc->lock, flags); 1272 - dev_dbg(chan2dev(&dwc->chan), 1273 - "channel doesn't support LLP transfers\n"); 1274 - return ERR_PTR(-EINVAL); 1275 - } 1276 - 1277 - if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { 1278 - spin_unlock_irqrestore(&dwc->lock, flags); 1279 - dev_dbg(chan2dev(&dwc->chan), 1280 - "queue and/or active list are not empty\n"); 1281 - return ERR_PTR(-EBUSY); 1282 - } 1283 - 1284 - was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1285 - spin_unlock_irqrestore(&dwc->lock, flags); 1286 - if (was_cyclic) { 1287 - dev_dbg(chan2dev(&dwc->chan), 1288 - "channel already prepared for cyclic DMA\n"); 1289 - return ERR_PTR(-EBUSY); 1290 - } 1291 - 1292 - retval = ERR_PTR(-EINVAL); 1293 - 1294 - if (unlikely(!is_slave_direction(direction))) 1295 - goto out_err; 1296 - 1297 - dwc->direction = direction; 1298 - 1299 - if (direction == DMA_MEM_TO_DEV) 1300 - reg_width = __ffs(sconfig->dst_addr_width); 1301 - else 1302 - reg_width = __ffs(sconfig->src_addr_width); 1303 - 1304 - periods = buf_len / period_len; 1305 - 1306 - /* Check for too big/unaligned periods and unaligned DMA buffer. */ 1307 - if (period_len > (dwc->block_size << reg_width)) 1308 - goto out_err; 1309 - if (unlikely(period_len & ((1 << reg_width) - 1))) 1310 - goto out_err; 1311 - if (unlikely(buf_addr & ((1 << reg_width) - 1))) 1312 - goto out_err; 1313 - 1314 - retval = ERR_PTR(-ENOMEM); 1315 - 1316 - cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); 1317 - if (!cdesc) 1318 - goto out_err; 1319 - 1320 - cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); 1321 - if (!cdesc->desc) 1322 - goto out_err_alloc; 1323 - 1324 - for (i = 0; i < periods; i++) { 1325 - desc = dwc_desc_get(dwc); 1326 - if (!desc) 1327 - goto out_err_desc_get; 1328 - 1329 - switch (direction) { 1330 - case DMA_MEM_TO_DEV: 1331 - lli_write(desc, dar, sconfig->dst_addr); 1332 - lli_write(desc, sar, buf_addr + period_len * i); 1333 - lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan) 1334 - | DWC_CTLL_DST_WIDTH(reg_width) 1335 - | DWC_CTLL_SRC_WIDTH(reg_width) 1336 - | DWC_CTLL_DST_FIX 1337 - | DWC_CTLL_SRC_INC 1338 - | DWC_CTLL_INT_EN)); 1339 - 1340 - lli_set(desc, ctllo, sconfig->device_fc ? 1341 - DWC_CTLL_FC(DW_DMA_FC_P_M2P) : 1342 - DWC_CTLL_FC(DW_DMA_FC_D_M2P)); 1343 - 1344 - break; 1345 - case DMA_DEV_TO_MEM: 1346 - lli_write(desc, dar, buf_addr + period_len * i); 1347 - lli_write(desc, sar, sconfig->src_addr); 1348 - lli_write(desc, ctllo, (DWC_DEFAULT_CTLLO(chan) 1349 - | DWC_CTLL_SRC_WIDTH(reg_width) 1350 - | DWC_CTLL_DST_WIDTH(reg_width) 1351 - | DWC_CTLL_DST_INC 1352 - | DWC_CTLL_SRC_FIX 1353 - | DWC_CTLL_INT_EN)); 1354 - 1355 - lli_set(desc, ctllo, sconfig->device_fc ? 1356 - DWC_CTLL_FC(DW_DMA_FC_P_P2M) : 1357 - DWC_CTLL_FC(DW_DMA_FC_D_P2M)); 1358 - 1359 - break; 1360 - default: 1361 - break; 1362 - } 1363 - 1364 - lli_write(desc, ctlhi, period_len >> reg_width); 1365 - cdesc->desc[i] = desc; 1366 - 1367 - if (last) 1368 - lli_write(last, llp, desc->txd.phys | lms); 1369 - 1370 - last = desc; 1371 - } 1372 - 1373 - /* Let's make a cyclic list */ 1374 - lli_write(last, llp, cdesc->desc[0]->txd.phys | lms); 1375 - 1376 - dev_dbg(chan2dev(&dwc->chan), 1377 - "cyclic prepared buf %pad len %zu period %zu periods %d\n", 1378 - &buf_addr, buf_len, period_len, periods); 1379 - 1380 - cdesc->periods = periods; 1381 - dwc->cdesc = cdesc; 1382 - 1383 - return cdesc; 1384 - 1385 - out_err_desc_get: 1386 - while (i--) 1387 - dwc_desc_put(dwc, cdesc->desc[i]); 1388 - out_err_alloc: 1389 - kfree(cdesc); 1390 - out_err: 1391 - clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1392 - return (struct dw_cyclic_desc *)retval; 1393 - } 1394 - EXPORT_SYMBOL(dw_dma_cyclic_prep); 1395 - 1396 - /** 1397 - * dw_dma_cyclic_free - free a prepared cyclic DMA transfer 1398 - * @chan: the DMA channel to free 1399 - */ 1400 - void dw_dma_cyclic_free(struct dma_chan *chan) 1401 - { 1402 - struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1403 - struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1404 - struct dw_cyclic_desc *cdesc = dwc->cdesc; 1405 - unsigned int i; 1406 - unsigned long flags; 1407 - 1408 - dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); 1409 - 1410 - if (!cdesc) 1411 - return; 1412 - 1413 - spin_lock_irqsave(&dwc->lock, flags); 1414 - 1415 - dwc_chan_disable(dw, dwc); 1416 - 1417 - dma_writel(dw, CLEAR.BLOCK, dwc->mask); 1418 - dma_writel(dw, CLEAR.ERROR, dwc->mask); 1419 - dma_writel(dw, CLEAR.XFER, dwc->mask); 1420 - 1421 - spin_unlock_irqrestore(&dwc->lock, flags); 1422 - 1423 - for (i = 0; i < cdesc->periods; i++) 1424 - dwc_desc_put(dwc, cdesc->desc[i]); 1425 - 1426 - kfree(cdesc->desc); 1427 - kfree(cdesc); 1428 - 1429 - dwc->cdesc = NULL; 1430 - 1431 - clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1432 - } 1433 - EXPORT_SYMBOL(dw_dma_cyclic_free); 1434 - 1435 - /*----------------------------------------------------------------------*/ 1436 - 1437 1267 int dw_dma_probe(struct dw_dma_chip *chip) 1438 1268 { 1439 1269 struct dw_dma_platform_data *pdata; ··· 1314 1642 if (autocfg) { 1315 1643 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; 1316 1644 void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r]; 1317 - unsigned int dwc_params = dma_readl_native(addr); 1645 + unsigned int dwc_params = readl(addr); 1318 1646 1319 1647 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, 1320 1648 dwc_params);
+11 -39
drivers/dma/dw/regs.h
··· 116 116 DW_REG(GLOBAL_CFG); 117 117 }; 118 118 119 - /* 120 - * Big endian I/O access when reading and writing to the DMA controller 121 - * registers. This is needed on some platforms, like the Atmel AVR32 122 - * architecture. 123 - */ 124 - 125 - #ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO 126 - #define dma_readl_native ioread32be 127 - #define dma_writel_native iowrite32be 128 - #else 129 - #define dma_readl_native readl 130 - #define dma_writel_native writel 131 - #endif 132 - 133 119 /* Bitfields in DW_PARAMS */ 134 120 #define DW_PARAMS_NR_CHAN 8 /* number of channels */ 135 121 #define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */ ··· 266 280 unsigned long flags; 267 281 struct list_head active_list; 268 282 struct list_head queue; 269 - struct dw_cyclic_desc *cdesc; 270 283 271 284 unsigned int descs_allocated; 272 285 ··· 287 302 } 288 303 289 304 #define channel_readl(dwc, name) \ 290 - dma_readl_native(&(__dwc_regs(dwc)->name)) 305 + readl(&(__dwc_regs(dwc)->name)) 291 306 #define channel_writel(dwc, name, val) \ 292 - dma_writel_native((val), &(__dwc_regs(dwc)->name)) 307 + writel((val), &(__dwc_regs(dwc)->name)) 293 308 294 309 static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) 295 310 { ··· 318 333 } 319 334 320 335 #define dma_readl(dw, name) \ 321 - dma_readl_native(&(__dw_regs(dw)->name)) 336 + readl(&(__dw_regs(dw)->name)) 322 337 #define dma_writel(dw, name, val) \ 323 - dma_writel_native((val), &(__dw_regs(dw)->name)) 338 + writel((val), &(__dw_regs(dw)->name)) 324 339 325 340 #define idma32_readq(dw, name) \ 326 341 hi_lo_readq(&(__dw_regs(dw)->name)) ··· 337 352 return container_of(ddev, struct dw_dma, dma); 338 353 } 339 354 340 - #ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO 341 - typedef __be32 __dw32; 342 - #else 343 - typedef __le32 __dw32; 344 - #endif 345 - 346 355 /* LLI == Linked List Item; a.k.a. DMA block descriptor */ 347 356 struct dw_lli { 348 357 /* values that are not changed by hardware */ 349 - __dw32 sar; 350 - __dw32 dar; 351 - __dw32 llp; /* chain to next lli */ 352 - __dw32 ctllo; 358 + __le32 sar; 359 + __le32 dar; 360 + __le32 llp; /* chain to next lli */ 361 + __le32 ctllo; 353 362 /* values that may get written back: */ 354 - __dw32 ctlhi; 363 + __le32 ctlhi; 355 364 /* sstat and dstat can snapshot peripheral register state. 356 365 * silicon config may discard either or both... 357 366 */ 358 - __dw32 sstat; 359 - __dw32 dstat; 367 + __le32 sstat; 368 + __le32 dstat; 360 369 }; 361 370 362 371 struct dw_desc { 363 372 /* FIRST values the hardware uses */ 364 373 struct dw_lli lli; 365 374 366 - #ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO 367 - #define lli_set(d, reg, v) ((d)->lli.reg |= cpu_to_be32(v)) 368 - #define lli_clear(d, reg, v) ((d)->lli.reg &= ~cpu_to_be32(v)) 369 - #define lli_read(d, reg) be32_to_cpu((d)->lli.reg) 370 - #define lli_write(d, reg, v) ((d)->lli.reg = cpu_to_be32(v)) 371 - #else 372 375 #define lli_set(d, reg, v) ((d)->lli.reg |= cpu_to_le32(v)) 373 376 #define lli_clear(d, reg, v) ((d)->lli.reg &= ~cpu_to_le32(v)) 374 377 #define lli_read(d, reg) le32_to_cpu((d)->lli.reg) 375 378 #define lli_write(d, reg, v) ((d)->lli.reg = cpu_to_le32(v)) 376 - #endif 377 379 378 380 /* THEN values for driver housekeeping */ 379 381 struct list_head desc_node;
-21
include/linux/dma/dw.h
··· 50 50 static inline int dw_dma_remove(struct dw_dma_chip *chip) { return 0; } 51 51 #endif /* CONFIG_DW_DMAC_CORE */ 52 52 53 - /* DMA API extensions */ 54 - struct dw_desc; 55 - 56 - struct dw_cyclic_desc { 57 - struct dw_desc **desc; 58 - unsigned long periods; 59 - void (*period_callback)(void *param); 60 - void *period_callback_param; 61 - }; 62 - 63 - struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, 64 - dma_addr_t buf_addr, size_t buf_len, size_t period_len, 65 - enum dma_transfer_direction direction); 66 - void dw_dma_cyclic_free(struct dma_chan *chan); 67 - int dw_dma_cyclic_start(struct dma_chan *chan); 68 - void dw_dma_cyclic_stop(struct dma_chan *chan); 69 - 70 - dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan); 71 - 72 - dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan); 73 - 74 53 #endif /* _DMA_DW_H */