Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dw_dmac: add cyclic API to DW DMA driver

This patch adds a cyclic DMA interface to the DW DMA driver. This is
very useful if you want to use the DMA controller in combination with a
sound device which uses cyclic buffers.

Using a DMA channel for cyclic DMA will disable the possibility to use
it as a normal DMA engine until the user calls the cyclic free function
on the DMA channel. Also a cyclic DMA list can not be prepared if the
channel is already active.

Signed-off-by: Hans-Christian Egtvedt <hans-christian.egtvedt@atmel.com>
Acked-by: Haavard Skinnemoen <haavard.skinnemoen@atmel.com>
Acked-by: Maciej Sosnowski <maciej.sosnowski@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

authored by

Hans-Christian Egtvedt and committed by
Dan Williams
d9de4519 0f571515

+356 -2
+331 -1
drivers/dma/dw_dmac.c
··· 363 363 dwc_descriptor_complete(dwc, bad_desc); 364 364 } 365 365 366 + /* --------------------- Cyclic DMA API extensions -------------------- */ 367 + 368 + inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) 369 + { 370 + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 371 + return channel_readl(dwc, SAR); 372 + } 373 + EXPORT_SYMBOL(dw_dma_get_src_addr); 374 + 375 + inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) 376 + { 377 + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 378 + return channel_readl(dwc, DAR); 379 + } 380 + EXPORT_SYMBOL(dw_dma_get_dst_addr); 381 + 382 + /* called with dwc->lock held and all DMAC interrupts disabled */ 383 + static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, 384 + u32 status_block, u32 status_err, u32 status_xfer) 385 + { 386 + if (status_block & dwc->mask) { 387 + void (*callback)(void *param); 388 + void *callback_param; 389 + 390 + dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", 391 + channel_readl(dwc, LLP)); 392 + dma_writel(dw, CLEAR.BLOCK, dwc->mask); 393 + 394 + callback = dwc->cdesc->period_callback; 395 + callback_param = dwc->cdesc->period_callback_param; 396 + if (callback) { 397 + spin_unlock(&dwc->lock); 398 + callback(callback_param); 399 + spin_lock(&dwc->lock); 400 + } 401 + } 402 + 403 + /* 404 + * Error and transfer complete are highly unlikely, and will most 405 + * likely be due to a configuration error by the user. 406 + */ 407 + if (unlikely(status_err & dwc->mask) || 408 + unlikely(status_xfer & dwc->mask)) { 409 + int i; 410 + 411 + dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " 412 + "interrupt, stopping DMA transfer\n", 413 + status_xfer ? "xfer" : "error"); 414 + dev_err(chan2dev(&dwc->chan), 415 + " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 416 + channel_readl(dwc, SAR), 417 + channel_readl(dwc, DAR), 418 + channel_readl(dwc, LLP), 419 + channel_readl(dwc, CTL_HI), 420 + channel_readl(dwc, CTL_LO)); 421 + 422 + channel_clear_bit(dw, CH_EN, dwc->mask); 423 + while (dma_readl(dw, CH_EN) & dwc->mask) 424 + cpu_relax(); 425 + 426 + /* make sure DMA does not restart by loading a new list */ 427 + channel_writel(dwc, LLP, 0); 428 + channel_writel(dwc, CTL_LO, 0); 429 + channel_writel(dwc, CTL_HI, 0); 430 + 431 + dma_writel(dw, CLEAR.BLOCK, dwc->mask); 432 + dma_writel(dw, CLEAR.ERROR, dwc->mask); 433 + dma_writel(dw, CLEAR.XFER, dwc->mask); 434 + 435 + for (i = 0; i < dwc->cdesc->periods; i++) 436 + dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); 437 + } 438 + } 439 + 440 + /* ------------------------------------------------------------------------- */ 441 + 366 442 static void dw_dma_tasklet(unsigned long data) 367 443 { 368 444 struct dw_dma *dw = (struct dw_dma *)data; ··· 458 382 for (i = 0; i < dw->dma.chancnt; i++) { 459 383 dwc = &dw->chan[i]; 460 384 spin_lock(&dwc->lock); 461 - if (status_err & (1 << i)) 385 + if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) 386 + dwc_handle_cyclic(dw, dwc, status_block, status_err, 387 + status_xfer); 388 + else if (status_err & (1 << i)) 462 389 dwc_handle_error(dw, dwc); 463 390 else if ((status_block | status_xfer) & (1 << i)) 464 391 dwc_scan_descriptors(dw, dwc); ··· 961 882 962 883 dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); 963 884 } 885 + 886 + /* --------------------- Cyclic DMA API extensions -------------------- */ 887 + 888 + /** 889 + * dw_dma_cyclic_start - start the cyclic DMA transfer 890 + * @chan: the DMA channel to start 891 + * 892 + * Must be called with soft interrupts disabled. Returns zero on success or 893 + * -errno on failure. 894 + */ 895 + int dw_dma_cyclic_start(struct dma_chan *chan) 896 + { 897 + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 898 + struct dw_dma *dw = to_dw_dma(dwc->chan.device); 899 + 900 + if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { 901 + dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); 902 + return -ENODEV; 903 + } 904 + 905 + spin_lock(&dwc->lock); 906 + 907 + /* assert channel is idle */ 908 + if (dma_readl(dw, CH_EN) & dwc->mask) { 909 + dev_err(chan2dev(&dwc->chan), 910 + "BUG: Attempted to start non-idle channel\n"); 911 + dev_err(chan2dev(&dwc->chan), 912 + " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 913 + channel_readl(dwc, SAR), 914 + channel_readl(dwc, DAR), 915 + channel_readl(dwc, LLP), 916 + channel_readl(dwc, CTL_HI), 917 + channel_readl(dwc, CTL_LO)); 918 + spin_unlock(&dwc->lock); 919 + return -EBUSY; 920 + } 921 + 922 + dma_writel(dw, CLEAR.BLOCK, dwc->mask); 923 + dma_writel(dw, CLEAR.ERROR, dwc->mask); 924 + dma_writel(dw, CLEAR.XFER, dwc->mask); 925 + 926 + /* setup DMAC channel registers */ 927 + channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); 928 + channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); 929 + channel_writel(dwc, CTL_HI, 0); 930 + 931 + channel_set_bit(dw, CH_EN, dwc->mask); 932 + 933 + spin_unlock(&dwc->lock); 934 + 935 + return 0; 936 + } 937 + EXPORT_SYMBOL(dw_dma_cyclic_start); 938 + 939 + /** 940 + * dw_dma_cyclic_stop - stop the cyclic DMA transfer 941 + * @chan: the DMA channel to stop 942 + * 943 + * Must be called with soft interrupts disabled. 944 + */ 945 + void dw_dma_cyclic_stop(struct dma_chan *chan) 946 + { 947 + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 948 + struct dw_dma *dw = to_dw_dma(dwc->chan.device); 949 + 950 + spin_lock(&dwc->lock); 951 + 952 + channel_clear_bit(dw, CH_EN, dwc->mask); 953 + while (dma_readl(dw, CH_EN) & dwc->mask) 954 + cpu_relax(); 955 + 956 + spin_unlock(&dwc->lock); 957 + } 958 + EXPORT_SYMBOL(dw_dma_cyclic_stop); 959 + 960 + /** 961 + * dw_dma_cyclic_prep - prepare the cyclic DMA transfer 962 + * @chan: the DMA channel to prepare 963 + * @buf_addr: physical DMA address where the buffer starts 964 + * @buf_len: total number of bytes for the entire buffer 965 + * @period_len: number of bytes for each period 966 + * @direction: transfer direction, to or from device 967 + * 968 + * Must be called before trying to start the transfer. Returns a valid struct 969 + * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. 970 + */ 971 + struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, 972 + dma_addr_t buf_addr, size_t buf_len, size_t period_len, 973 + enum dma_data_direction direction) 974 + { 975 + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 976 + struct dw_cyclic_desc *cdesc; 977 + struct dw_cyclic_desc *retval = NULL; 978 + struct dw_desc *desc; 979 + struct dw_desc *last = NULL; 980 + struct dw_dma_slave *dws = chan->private; 981 + unsigned long was_cyclic; 982 + unsigned int reg_width; 983 + unsigned int periods; 984 + unsigned int i; 985 + 986 + spin_lock_bh(&dwc->lock); 987 + if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { 988 + spin_unlock_bh(&dwc->lock); 989 + dev_dbg(chan2dev(&dwc->chan), 990 + "queue and/or active list are not empty\n"); 991 + return ERR_PTR(-EBUSY); 992 + } 993 + 994 + was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 995 + spin_unlock_bh(&dwc->lock); 996 + if (was_cyclic) { 997 + dev_dbg(chan2dev(&dwc->chan), 998 + "channel already prepared for cyclic DMA\n"); 999 + return ERR_PTR(-EBUSY); 1000 + } 1001 + 1002 + retval = ERR_PTR(-EINVAL); 1003 + reg_width = dws->reg_width; 1004 + periods = buf_len / period_len; 1005 + 1006 + /* Check for too big/unaligned periods and unaligned DMA buffer. */ 1007 + if (period_len > (DWC_MAX_COUNT << reg_width)) 1008 + goto out_err; 1009 + if (unlikely(period_len & ((1 << reg_width) - 1))) 1010 + goto out_err; 1011 + if (unlikely(buf_addr & ((1 << reg_width) - 1))) 1012 + goto out_err; 1013 + if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) 1014 + goto out_err; 1015 + 1016 + retval = ERR_PTR(-ENOMEM); 1017 + 1018 + if (periods > NR_DESCS_PER_CHANNEL) 1019 + goto out_err; 1020 + 1021 + cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); 1022 + if (!cdesc) 1023 + goto out_err; 1024 + 1025 + cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); 1026 + if (!cdesc->desc) 1027 + goto out_err_alloc; 1028 + 1029 + for (i = 0; i < periods; i++) { 1030 + desc = dwc_desc_get(dwc); 1031 + if (!desc) 1032 + goto out_err_desc_get; 1033 + 1034 + switch (direction) { 1035 + case DMA_TO_DEVICE: 1036 + desc->lli.dar = dws->tx_reg; 1037 + desc->lli.sar = buf_addr + (period_len * i); 1038 + desc->lli.ctllo = (DWC_DEFAULT_CTLLO 1039 + | DWC_CTLL_DST_WIDTH(reg_width) 1040 + | DWC_CTLL_SRC_WIDTH(reg_width) 1041 + | DWC_CTLL_DST_FIX 1042 + | DWC_CTLL_SRC_INC 1043 + | DWC_CTLL_FC_M2P 1044 + | DWC_CTLL_INT_EN); 1045 + break; 1046 + case DMA_FROM_DEVICE: 1047 + desc->lli.dar = buf_addr + (period_len * i); 1048 + desc->lli.sar = dws->rx_reg; 1049 + desc->lli.ctllo = (DWC_DEFAULT_CTLLO 1050 + | DWC_CTLL_SRC_WIDTH(reg_width) 1051 + | DWC_CTLL_DST_WIDTH(reg_width) 1052 + | DWC_CTLL_DST_INC 1053 + | DWC_CTLL_SRC_FIX 1054 + | DWC_CTLL_FC_P2M 1055 + | DWC_CTLL_INT_EN); 1056 + break; 1057 + default: 1058 + break; 1059 + } 1060 + 1061 + desc->lli.ctlhi = (period_len >> reg_width); 1062 + cdesc->desc[i] = desc; 1063 + 1064 + if (last) { 1065 + last->lli.llp = desc->txd.phys; 1066 + dma_sync_single_for_device(chan2parent(chan), 1067 + last->txd.phys, sizeof(last->lli), 1068 + DMA_TO_DEVICE); 1069 + } 1070 + 1071 + last = desc; 1072 + } 1073 + 1074 + /* lets make a cyclic list */ 1075 + last->lli.llp = cdesc->desc[0]->txd.phys; 1076 + dma_sync_single_for_device(chan2parent(chan), last->txd.phys, 1077 + sizeof(last->lli), DMA_TO_DEVICE); 1078 + 1079 + dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu " 1080 + "period %zu periods %d\n", buf_addr, buf_len, 1081 + period_len, periods); 1082 + 1083 + cdesc->periods = periods; 1084 + dwc->cdesc = cdesc; 1085 + 1086 + return cdesc; 1087 + 1088 + out_err_desc_get: 1089 + while (i--) 1090 + dwc_desc_put(dwc, cdesc->desc[i]); 1091 + out_err_alloc: 1092 + kfree(cdesc); 1093 + out_err: 1094 + clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1095 + return (struct dw_cyclic_desc *)retval; 1096 + } 1097 + EXPORT_SYMBOL(dw_dma_cyclic_prep); 1098 + 1099 + /** 1100 + * dw_dma_cyclic_free - free a prepared cyclic DMA transfer 1101 + * @chan: the DMA channel to free 1102 + */ 1103 + void dw_dma_cyclic_free(struct dma_chan *chan) 1104 + { 1105 + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1106 + struct dw_dma *dw = to_dw_dma(dwc->chan.device); 1107 + struct dw_cyclic_desc *cdesc = dwc->cdesc; 1108 + int i; 1109 + 1110 + dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); 1111 + 1112 + if (!cdesc) 1113 + return; 1114 + 1115 + spin_lock_bh(&dwc->lock); 1116 + 1117 + channel_clear_bit(dw, CH_EN, dwc->mask); 1118 + while (dma_readl(dw, CH_EN) & dwc->mask) 1119 + cpu_relax(); 1120 + 1121 + dma_writel(dw, CLEAR.BLOCK, dwc->mask); 1122 + dma_writel(dw, CLEAR.ERROR, dwc->mask); 1123 + dma_writel(dw, CLEAR.XFER, dwc->mask); 1124 + 1125 + spin_unlock_bh(&dwc->lock); 1126 + 1127 + for (i = 0; i < cdesc->periods; i++) 1128 + dwc_desc_put(dwc, cdesc->desc[i]); 1129 + 1130 + kfree(cdesc->desc); 1131 + kfree(cdesc); 1132 + 1133 + clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); 1134 + } 1135 + EXPORT_SYMBOL(dw_dma_cyclic_free); 964 1136 965 1137 /*----------------------------------------------------------------------*/ 966 1138
+6 -1
drivers/dma/dw_dmac_regs.h
··· 126 126 127 127 #define DW_REGLEN 0x400 128 128 129 + enum dw_dmac_flags { 130 + DW_DMA_IS_CYCLIC = 0, 131 + }; 132 + 129 133 struct dw_dma_chan { 130 134 struct dma_chan chan; 131 135 void __iomem *ch_regs; ··· 138 134 spinlock_t lock; 139 135 140 136 /* these other elements are all protected by lock */ 137 + unsigned long flags; 141 138 dma_cookie_t completed; 142 139 struct list_head active_list; 143 140 struct list_head queue; 144 141 struct list_head free_list; 142 + struct dw_cyclic_desc *cdesc; 145 143 146 144 unsigned int descs_allocated; 147 145 }; ··· 163 157 { 164 158 return container_of(chan, struct dw_dma_chan, chan); 165 159 } 166 - 167 160 168 161 struct dw_dma { 169 162 struct dma_device dma;
+19
include/linux/dw_dmac.h
··· 74 74 #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ 75 75 #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ 76 76 77 + /* DMA API extensions */ 78 + struct dw_cyclic_desc { 79 + struct dw_desc **desc; 80 + unsigned long periods; 81 + void (*period_callback)(void *param); 82 + void *period_callback_param; 83 + }; 84 + 85 + struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, 86 + dma_addr_t buf_addr, size_t buf_len, size_t period_len, 87 + enum dma_data_direction direction); 88 + void dw_dma_cyclic_free(struct dma_chan *chan); 89 + int dw_dma_cyclic_start(struct dma_chan *chan); 90 + void dw_dma_cyclic_stop(struct dma_chan *chan); 91 + 92 + dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan); 93 + 94 + dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan); 95 + 77 96 #endif /* DW_DMAC_H */