Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: move drivers to dma_transfer_direction

fixup usage of dma direction by introducing dma_transfer_direction,
this patch moves dma/drivers/* to use new enum

Cc: Jassi Brar <jaswinder.singh@linaro.org>
Cc: Russell King <rmk+kernel@arm.linux.org.uk>
Cc: Viresh Kumar <viresh.kumar@st.com>
Cc: Linus Walleij <linus.walleij@linaro.org>
Cc: Nicolas Ferre <nicolas.ferre@atmel.com>
Cc: Mika Westerberg <mika.westerberg@iki.fi>
Cc: H Hartley Sweeten <hartleys@visionengravers.com>
Cc: Li Yang <leoli@freescale.com>
Cc: Zhang Wei <zw@zh-kernel.org>
Cc: Sascha Hauer <s.hauer@pengutronix.de>
Cc: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Cc: Shawn Guo <shawn.guo@freescale.com>
Cc: Yong Wang <yong.y.wang@intel.com>
Cc: Tomoya MORINAGA <tomoya-linux@dsn.lapis-semi.com>
Cc: Boojin Kim <boojin.kim@samsung.com>
Cc: Barry Song <Baohua.Song@csr.com>
Acked-by: Mika Westerberg <mika.westerberg@iki.fi>
Acked-by: Linus Walleij <linus.walleij@linaro.org>
Acked-by: Viresh Kumar <viresh.kumar@st.com>
Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>

+150 -152
+3 -3
arch/arm/mach-ep93xx/include/mach/dma.h
··· 37 37 */ 38 38 struct ep93xx_dma_data { 39 39 int port; 40 - enum dma_data_direction direction; 40 + enum dma_transfer_direction direction; 41 41 const char *name; 42 42 }; 43 43 ··· 80 80 * channel supports given DMA direction. Only M2P channels have such 81 81 * limitation, for M2M channels the direction is configurable. 82 82 */ 83 - static inline enum dma_data_direction 83 + static inline enum dma_transfer_direction 84 84 ep93xx_dma_chan_direction(struct dma_chan *chan) 85 85 { 86 86 if (!ep93xx_dma_chan_is_m2p(chan)) 87 87 return DMA_NONE; 88 88 89 89 /* even channels are for TX, odd for RX */ 90 - return (chan->chan_id % 2 == 0) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 90 + return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; 91 91 } 92 92 93 93 #endif /* __ASM_ARCH_DMA_H */
+2 -2
arch/arm/plat-nomadik/include/plat/ste_dma40.h
··· 187 187 dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan, 188 188 dma_addr_t addr, 189 189 unsigned int size, 190 - enum dma_data_direction direction, 190 + enum dma_transfer_direction direction, 191 191 unsigned long flags) 192 192 { 193 193 struct scatterlist sg; ··· 209 209 dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan, 210 210 dma_addr_t addr, 211 211 unsigned int size, 212 - enum dma_data_direction direction, 212 + enum dma_transfer_direction direction, 213 213 unsigned long flags) 214 214 { 215 215 return NULL;
+12 -12
drivers/dma/amba-pl08x.c
··· 882 882 ch->signal = ret; 883 883 884 884 /* Assign the flow control signal to this channel */ 885 - if (txd->direction == DMA_TO_DEVICE) 885 + if (txd->direction == DMA_MEM_TO_DEV) 886 886 txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; 887 - else if (txd->direction == DMA_FROM_DEVICE) 887 + else if (txd->direction == DMA_DEV_TO_MEM) 888 888 txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; 889 889 } 890 890 ··· 1102 1102 1103 1103 /* Transfer direction */ 1104 1104 plchan->runtime_direction = config->direction; 1105 - if (config->direction == DMA_TO_DEVICE) { 1105 + if (config->direction == DMA_MEM_TO_DEV) { 1106 1106 addr_width = config->dst_addr_width; 1107 1107 maxburst = config->dst_maxburst; 1108 - } else if (config->direction == DMA_FROM_DEVICE) { 1108 + } else if (config->direction == DMA_DEV_TO_MEM) { 1109 1109 addr_width = config->src_addr_width; 1110 1110 maxburst = config->src_maxburst; 1111 1111 } else { ··· 1136 1136 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; 1137 1137 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; 1138 1138 1139 - if (plchan->runtime_direction == DMA_FROM_DEVICE) { 1139 + if (plchan->runtime_direction == DMA_DEV_TO_MEM) { 1140 1140 plchan->src_addr = config->src_addr; 1141 1141 plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | 1142 1142 pl08x_select_bus(plchan->cd->periph_buses, ··· 1152 1152 "configured channel %s (%s) for %s, data width %d, " 1153 1153 "maxburst %d words, LE, CCTL=0x%08x\n", 1154 1154 dma_chan_name(chan), plchan->name, 1155 - (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", 1155 + (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", 1156 1156 addr_width, 1157 1157 maxburst, 1158 1158 cctl); ··· 1322 1322 1323 1323 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1324 1324 struct dma_chan *chan, struct scatterlist *sgl, 1325 - unsigned int sg_len, enum dma_data_direction direction, 1325 + unsigned int sg_len, enum dma_transfer_direction direction, 1326 1326 unsigned long flags) 1327 1327 { 1328 1328 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); ··· 1354 1354 */ 1355 1355 txd->direction = direction; 1356 1356 1357 - if (direction == DMA_TO_DEVICE) { 1357 + if (direction == DMA_MEM_TO_DEV) { 1358 1358 txd->cctl = plchan->dst_cctl; 1359 1359 slave_addr = plchan->dst_addr; 1360 - } else if (direction == DMA_FROM_DEVICE) { 1360 + } else if (direction == DMA_DEV_TO_MEM) { 1361 1361 txd->cctl = plchan->src_cctl; 1362 1362 slave_addr = plchan->src_addr; 1363 1363 } else { ··· 1368 1368 } 1369 1369 1370 1370 if (plchan->cd->device_fc) 1371 - tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER : 1371 + tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : 1372 1372 PL080_FLOW_PER2MEM_PER; 1373 1373 else 1374 - tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER : 1374 + tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER : 1375 1375 PL080_FLOW_PER2MEM; 1376 1376 1377 1377 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; ··· 1387 1387 list_add_tail(&dsg->node, &txd->dsg_list); 1388 1388 1389 1389 dsg->len = sg_dma_len(sg); 1390 - if (direction == DMA_TO_DEVICE) { 1390 + if (direction == DMA_MEM_TO_DEV) { 1391 1391 dsg->src_addr = sg_phys(sg); 1392 1392 dsg->dst_addr = slave_addr; 1393 1393 } else {
+11 -11
drivers/dma/at_hdmac.c
··· 660 660 */ 661 661 static struct dma_async_tx_descriptor * 662 662 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 663 - unsigned int sg_len, enum dma_data_direction direction, 663 + unsigned int sg_len, enum dma_transfer_direction direction, 664 664 unsigned long flags) 665 665 { 666 666 struct at_dma_chan *atchan = to_at_dma_chan(chan); ··· 678 678 679 679 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n", 680 680 sg_len, 681 - direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", 681 + direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 682 682 flags); 683 683 684 684 if (unlikely(!atslave || !sg_len)) { ··· 692 692 ctrlb = ATC_IEN; 693 693 694 694 switch (direction) { 695 - case DMA_TO_DEVICE: 695 + case DMA_MEM_TO_DEV: 696 696 ctrla |= ATC_DST_WIDTH(reg_width); 697 697 ctrlb |= ATC_DST_ADDR_MODE_FIXED 698 698 | ATC_SRC_ADDR_MODE_INCR ··· 725 725 total_len += len; 726 726 } 727 727 break; 728 - case DMA_FROM_DEVICE: 728 + case DMA_DEV_TO_MEM: 729 729 ctrla |= ATC_SRC_WIDTH(reg_width); 730 730 ctrlb |= ATC_DST_ADDR_MODE_INCR 731 731 | ATC_SRC_ADDR_MODE_FIXED ··· 787 787 */ 788 788 static int 789 789 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, 790 - size_t period_len, enum dma_data_direction direction) 790 + size_t period_len, enum dma_transfer_direction direction) 791 791 { 792 792 if (period_len > (ATC_BTSIZE_MAX << reg_width)) 793 793 goto err_out; ··· 795 795 goto err_out; 796 796 if (unlikely(buf_addr & ((1 << reg_width) - 1))) 797 797 goto err_out; 798 - if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) 798 + if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV)))) 799 799 goto err_out; 800 800 801 801 return 0; ··· 810 810 static int 811 811 atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, 812 812 unsigned int period_index, dma_addr_t buf_addr, 813 - size_t period_len, enum dma_data_direction direction) 813 + size_t period_len, enum dma_transfer_direction direction) 814 814 { 815 815 u32 ctrla; 816 816 unsigned int reg_width = atslave->reg_width; ··· 822 822 | period_len >> reg_width; 823 823 824 824 switch (direction) { 825 - case DMA_TO_DEVICE: 825 + case DMA_MEM_TO_DEV: 826 826 desc->lli.saddr = buf_addr + (period_len * period_index); 827 827 desc->lli.daddr = atslave->tx_reg; 828 828 desc->lli.ctrla = ctrla; ··· 833 833 | ATC_DIF(AT_DMA_PER_IF); 834 834 break; 835 835 836 - case DMA_FROM_DEVICE: 836 + case DMA_DEV_TO_MEM: 837 837 desc->lli.saddr = atslave->rx_reg; 838 838 desc->lli.daddr = buf_addr + (period_len * period_index); 839 839 desc->lli.ctrla = ctrla; ··· 861 861 */ 862 862 static struct dma_async_tx_descriptor * 863 863 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 864 - size_t period_len, enum dma_data_direction direction) 864 + size_t period_len, enum dma_transfer_direction direction) 865 865 { 866 866 struct at_dma_chan *atchan = to_at_dma_chan(chan); 867 867 struct at_dma_slave *atslave = chan->private; ··· 872 872 unsigned int i; 873 873 874 874 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", 875 - direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", 875 + direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", 876 876 buf_addr, 877 877 periods, buf_len, period_len); 878 878
+6 -6
drivers/dma/coh901318.c
··· 39 39 struct scatterlist *sg; 40 40 unsigned int sg_len; 41 41 struct coh901318_lli *lli; 42 - enum dma_data_direction dir; 42 + enum dma_transfer_direction dir; 43 43 unsigned long flags; 44 44 u32 head_config; 45 45 u32 head_ctrl; ··· 1034 1034 1035 1035 static struct dma_async_tx_descriptor * 1036 1036 coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 1037 - unsigned int sg_len, enum dma_data_direction direction, 1037 + unsigned int sg_len, enum dma_transfer_direction direction, 1038 1038 unsigned long flags) 1039 1039 { 1040 1040 struct coh901318_chan *cohc = to_coh901318_chan(chan); ··· 1077 1077 ctrl_last |= cohc->runtime_ctrl; 1078 1078 ctrl |= cohc->runtime_ctrl; 1079 1079 1080 - if (direction == DMA_TO_DEVICE) { 1080 + if (direction == DMA_MEM_TO_DEV) { 1081 1081 u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | 1082 1082 COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; 1083 1083 ··· 1085 1085 ctrl_chained |= tx_flags; 1086 1086 ctrl_last |= tx_flags; 1087 1087 ctrl |= tx_flags; 1088 - } else if (direction == DMA_FROM_DEVICE) { 1088 + } else if (direction == DMA_DEV_TO_MEM) { 1089 1089 u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | 1090 1090 COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; 1091 1091 ··· 1274 1274 int i = 0; 1275 1275 1276 1276 /* We only support mem to per or per to mem transfers */ 1277 - if (config->direction == DMA_FROM_DEVICE) { 1277 + if (config->direction == DMA_DEV_TO_MEM) { 1278 1278 addr = config->src_addr; 1279 1279 addr_width = config->src_addr_width; 1280 1280 maxburst = config->src_maxburst; 1281 - } else if (config->direction == DMA_TO_DEVICE) { 1281 + } else if (config->direction == DMA_MEM_TO_DEV) { 1282 1282 addr = config->dst_addr; 1283 1283 addr_width = config->dst_addr_width; 1284 1284 maxburst = config->dst_maxburst;
+11 -12
drivers/dma/coh901318_lli.c
··· 7 7 * Author: Per Friden <per.friden@stericsson.com> 8 8 */ 9 9 10 - #include <linux/dma-mapping.h> 11 10 #include <linux/spinlock.h> 12 - #include <linux/dmapool.h> 13 11 #include <linux/memory.h> 14 12 #include <linux/gfp.h> 13 + #include <linux/dmapool.h> 15 14 #include <mach/coh901318.h> 16 15 17 16 #include "coh901318_lli.h" ··· 176 177 struct coh901318_lli *lli, 177 178 dma_addr_t buf, unsigned int size, 178 179 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom, 179 - enum dma_data_direction dir) 180 + enum dma_transfer_direction dir) 180 181 { 181 182 int s = size; 182 183 dma_addr_t src; 183 184 dma_addr_t dst; 184 185 185 186 186 - if (dir == DMA_TO_DEVICE) { 187 + if (dir == DMA_MEM_TO_DEV) { 187 188 src = buf; 188 189 dst = dev_addr; 189 190 190 - } else if (dir == DMA_FROM_DEVICE) { 191 + } else if (dir == DMA_DEV_TO_MEM) { 191 192 192 193 src = dev_addr; 193 194 dst = buf; ··· 214 215 215 216 lli = coh901318_lli_next(lli); 216 217 217 - if (dir == DMA_TO_DEVICE) 218 + if (dir == DMA_MEM_TO_DEV) 218 219 src += block_size; 219 - else if (dir == DMA_FROM_DEVICE) 220 + else if (dir == DMA_DEV_TO_MEM) 220 221 dst += block_size; 221 222 } 222 223 ··· 233 234 struct scatterlist *sgl, unsigned int nents, 234 235 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl, 235 236 u32 ctrl_last, 236 - enum dma_data_direction dir, u32 ctrl_irq_mask) 237 + enum dma_transfer_direction dir, u32 ctrl_irq_mask) 237 238 { 238 239 int i; 239 240 struct scatterlist *sg; ··· 248 249 249 250 spin_lock(&pool->lock); 250 251 251 - if (dir == DMA_TO_DEVICE) 252 + if (dir == DMA_MEM_TO_DEV) 252 253 dst = dev_addr; 253 - else if (dir == DMA_FROM_DEVICE) 254 + else if (dir == DMA_DEV_TO_MEM) 254 255 src = dev_addr; 255 256 else 256 257 goto err; ··· 268 269 ctrl_sg = ctrl ? ctrl : ctrl_last; 269 270 270 271 271 - if (dir == DMA_TO_DEVICE) 272 + if (dir == DMA_MEM_TO_DEV) 272 273 /* increment source address */ 273 274 src = sg_phys(sg); 274 275 else ··· 292 293 lli->src_addr = src; 293 294 lli->dst_addr = dst; 294 295 295 - if (dir == DMA_FROM_DEVICE) 296 + if (dir == DMA_DEV_TO_MEM) 296 297 dst += elem_size; 297 298 else 298 299 src += elem_size;
+2 -2
drivers/dma/coh901318_lli.h
··· 97 97 struct coh901318_lli *lli, 98 98 dma_addr_t buf, unsigned int size, 99 99 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last, 100 - enum dma_data_direction dir); 100 + enum dma_transfer_direction dir); 101 101 102 102 /** 103 103 * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer ··· 119 119 struct scatterlist *sg, unsigned int nents, 120 120 dma_addr_t dev_addr, u32 ctrl_chained, 121 121 u32 ctrl, u32 ctrl_last, 122 - enum dma_data_direction dir, u32 ctrl_irq_mask); 122 + enum dma_transfer_direction dir, u32 ctrl_irq_mask); 123 123 124 124 #endif /* COH901318_LLI_H */
+7 -7
drivers/dma/dw_dmac.c
··· 696 696 697 697 static struct dma_async_tx_descriptor * 698 698 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 699 - unsigned int sg_len, enum dma_data_direction direction, 699 + unsigned int sg_len, enum dma_transfer_direction direction, 700 700 unsigned long flags) 701 701 { 702 702 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); ··· 720 720 prev = first = NULL; 721 721 722 722 switch (direction) { 723 - case DMA_TO_DEVICE: 723 + case DMA_MEM_TO_DEV: 724 724 ctllo = (DWC_DEFAULT_CTLLO(chan->private) 725 725 | DWC_CTLL_DST_WIDTH(reg_width) 726 726 | DWC_CTLL_DST_FIX ··· 777 777 goto slave_sg_todev_fill_desc; 778 778 } 779 779 break; 780 - case DMA_FROM_DEVICE: 780 + case DMA_DEV_TO_MEM: 781 781 ctllo = (DWC_DEFAULT_CTLLO(chan->private) 782 782 | DWC_CTLL_SRC_WIDTH(reg_width) 783 783 | DWC_CTLL_DST_INC ··· 1165 1165 */ 1166 1166 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, 1167 1167 dma_addr_t buf_addr, size_t buf_len, size_t period_len, 1168 - enum dma_data_direction direction) 1168 + enum dma_transfer_direction direction) 1169 1169 { 1170 1170 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1171 1171 struct dw_cyclic_desc *cdesc; ··· 1206 1206 goto out_err; 1207 1207 if (unlikely(buf_addr & ((1 << reg_width) - 1))) 1208 1208 goto out_err; 1209 - if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) 1209 + if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM)))) 1210 1210 goto out_err; 1211 1211 1212 1212 retval = ERR_PTR(-ENOMEM); ··· 1228 1228 goto out_err_desc_get; 1229 1229 1230 1230 switch (direction) { 1231 - case DMA_TO_DEVICE: 1231 + case DMA_MEM_TO_DEV: 1232 1232 desc->lli.dar = dws->tx_reg; 1233 1233 desc->lli.sar = buf_addr + (period_len * i); 1234 1234 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) ··· 1239 1239 | DWC_CTLL_FC(dws->fc) 1240 1240 | DWC_CTLL_INT_EN); 1241 1241 break; 1242 - case DMA_FROM_DEVICE: 1242 + case DMA_DEV_TO_MEM: 1243 1243 desc->lli.dar = buf_addr + (period_len * i); 1244 1244 desc->lli.sar = dws->rx_reg; 1245 1245 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
+11 -11
drivers/dma/ep93xx_dma.c
··· 330 330 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); 331 331 u32 bus_addr; 332 332 333 - if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE) 333 + if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV) 334 334 bus_addr = desc->src_addr; 335 335 else 336 336 bus_addr = desc->dst_addr; ··· 443 443 control = (5 << M2M_CONTROL_PWSC_SHIFT); 444 444 control |= M2M_CONTROL_NO_HDSK; 445 445 446 - if (data->direction == DMA_TO_DEVICE) { 446 + if (data->direction == DMA_MEM_TO_DEV) { 447 447 control |= M2M_CONTROL_DAH; 448 448 control |= M2M_CONTROL_TM_TX; 449 449 control |= M2M_CONTROL_RSS_SSPTX; ··· 463 463 control |= M2M_CONTROL_RSS_IDE; 464 464 control |= M2M_CONTROL_PW_16; 465 465 466 - if (data->direction == DMA_TO_DEVICE) { 466 + if (data->direction == DMA_MEM_TO_DEV) { 467 467 /* Worst case from the UG */ 468 468 control = (3 << M2M_CONTROL_PWSC_SHIFT); 469 469 control |= M2M_CONTROL_DAH; ··· 803 803 switch (data->port) { 804 804 case EP93XX_DMA_SSP: 805 805 case EP93XX_DMA_IDE: 806 - if (data->direction != DMA_TO_DEVICE && 807 - data->direction != DMA_FROM_DEVICE) 806 + if (data->direction != DMA_MEM_TO_DEV && 807 + data->direction != DMA_DEV_TO_MEM) 808 808 return -EINVAL; 809 809 break; 810 810 default: ··· 952 952 */ 953 953 static struct dma_async_tx_descriptor * 954 954 ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 955 - unsigned int sg_len, enum dma_data_direction dir, 955 + unsigned int sg_len, enum dma_transfer_direction dir, 956 956 unsigned long flags) 957 957 { 958 958 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); ··· 988 988 goto fail; 989 989 } 990 990 991 - if (dir == DMA_TO_DEVICE) { 991 + if (dir == DMA_MEM_TO_DEV) { 992 992 desc->src_addr = sg_dma_address(sg); 993 993 desc->dst_addr = edmac->runtime_addr; 994 994 } else { ··· 1032 1032 static struct dma_async_tx_descriptor * 1033 1033 ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, 1034 1034 size_t buf_len, size_t period_len, 1035 - enum dma_data_direction dir) 1035 + enum dma_transfer_direction dir) 1036 1036 { 1037 1037 struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); 1038 1038 struct ep93xx_dma_desc *desc, *first; ··· 1065 1065 goto fail; 1066 1066 } 1067 1067 1068 - if (dir == DMA_TO_DEVICE) { 1068 + if (dir == DMA_MEM_TO_DEV) { 1069 1069 desc->src_addr = dma_addr + offset; 1070 1070 desc->dst_addr = edmac->runtime_addr; 1071 1071 } else { ··· 1133 1133 return -EINVAL; 1134 1134 1135 1135 switch (config->direction) { 1136 - case DMA_FROM_DEVICE: 1136 + case DMA_DEV_TO_MEM: 1137 1137 width = config->src_addr_width; 1138 1138 addr = config->src_addr; 1139 1139 break; 1140 1140 1141 - case DMA_TO_DEVICE: 1141 + case DMA_MEM_TO_DEV: 1142 1142 width = config->dst_addr_width; 1143 1143 addr = config->dst_addr; 1144 1144 break;
+2 -2
drivers/dma/fsldma.c
··· 772 772 */ 773 773 static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( 774 774 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 775 - enum dma_data_direction direction, unsigned long flags) 775 + enum dma_transfer_direction direction, unsigned long flags) 776 776 { 777 777 /* 778 778 * This operation is not supported on the Freescale DMA controller ··· 819 819 return -ENXIO; 820 820 821 821 /* we set the controller burst size depending on direction */ 822 - if (config->direction == DMA_TO_DEVICE) 822 + if (config->direction == DMA_MEM_TO_DEV) 823 823 size = config->dst_addr_width * config->dst_maxburst; 824 824 else 825 825 size = config->src_addr_width * config->src_maxburst;
+5 -5
drivers/dma/imx-dma.c
··· 106 106 imx_dma_disable(imxdmac->imxdma_channel); 107 107 return 0; 108 108 case DMA_SLAVE_CONFIG: 109 - if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { 109 + if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { 110 110 imxdmac->per_address = dmaengine_cfg->src_addr; 111 111 imxdmac->watermark_level = dmaengine_cfg->src_maxburst; 112 112 imxdmac->word_size = dmaengine_cfg->src_addr_width; ··· 223 223 224 224 static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( 225 225 struct dma_chan *chan, struct scatterlist *sgl, 226 - unsigned int sg_len, enum dma_data_direction direction, 226 + unsigned int sg_len, enum dma_transfer_direction direction, 227 227 unsigned long flags) 228 228 { 229 229 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); ··· 240 240 dma_length += sg->length; 241 241 } 242 242 243 - if (direction == DMA_FROM_DEVICE) 243 + if (direction == DMA_DEV_TO_MEM) 244 244 dmamode = DMA_MODE_READ; 245 245 else 246 246 dmamode = DMA_MODE_WRITE; ··· 270 270 271 271 static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( 272 272 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 273 - size_t period_len, enum dma_data_direction direction) 273 + size_t period_len, enum dma_transfer_direction direction) 274 274 { 275 275 struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 276 276 struct imxdma_engine *imxdma = imxdmac->imxdma; ··· 316 316 imxdmac->sg_list[periods].page_link = 317 317 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; 318 318 319 - if (direction == DMA_FROM_DEVICE) 319 + if (direction == DMA_DEV_TO_MEM) 320 320 dmamode = DMA_MODE_READ; 321 321 else 322 322 dmamode = DMA_MODE_WRITE;
+5 -5
drivers/dma/imx-sdma.c
··· 246 246 struct sdma_channel { 247 247 struct sdma_engine *sdma; 248 248 unsigned int channel; 249 - enum dma_data_direction direction; 249 + enum dma_transfer_direction direction; 250 250 enum sdma_peripheral_type peripheral_type; 251 251 unsigned int event_id0; 252 252 unsigned int event_id1; ··· 649 649 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; 650 650 int ret; 651 651 652 - if (sdmac->direction == DMA_FROM_DEVICE) { 652 + if (sdmac->direction == DMA_DEV_TO_MEM) { 653 653 load_address = sdmac->pc_from_device; 654 654 } else { 655 655 load_address = sdmac->pc_to_device; ··· 910 910 911 911 static struct dma_async_tx_descriptor *sdma_prep_slave_sg( 912 912 struct dma_chan *chan, struct scatterlist *sgl, 913 - unsigned int sg_len, enum dma_data_direction direction, 913 + unsigned int sg_len, enum dma_transfer_direction direction, 914 914 unsigned long flags) 915 915 { 916 916 struct sdma_channel *sdmac = to_sdma_chan(chan); ··· 1007 1007 1008 1008 static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( 1009 1009 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 1010 - size_t period_len, enum dma_data_direction direction) 1010 + size_t period_len, enum dma_transfer_direction direction) 1011 1011 { 1012 1012 struct sdma_channel *sdmac = to_sdma_chan(chan); 1013 1013 struct sdma_engine *sdma = sdmac->sdma; ··· 1092 1092 sdma_disable_channel(sdmac); 1093 1093 return 0; 1094 1094 case DMA_SLAVE_CONFIG: 1095 - if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { 1095 + if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { 1096 1096 sdmac->per_address = dmaengine_cfg->src_addr; 1097 1097 sdmac->watermark_level = dmaengine_cfg->src_maxburst; 1098 1098 sdmac->word_size = dmaengine_cfg->src_addr_width;
+7 -7
drivers/dma/intel_mid_dma.c
··· 394 394 midc->dma->block_size); 395 395 /*Populate SAR and DAR values*/ 396 396 sg_phy_addr = sg_phys(sg); 397 - if (desc->dirn == DMA_TO_DEVICE) { 397 + if (desc->dirn == DMA_MEM_TO_DEV) { 398 398 lli_bloc_desc->sar = sg_phy_addr; 399 399 lli_bloc_desc->dar = mids->dma_slave.dst_addr; 400 - } else if (desc->dirn == DMA_FROM_DEVICE) { 400 + } else if (desc->dirn == DMA_DEV_TO_MEM) { 401 401 lli_bloc_desc->sar = mids->dma_slave.src_addr; 402 402 lli_bloc_desc->dar = sg_phy_addr; 403 403 } ··· 631 631 if (midc->dma->pimr_mask) { 632 632 cfg_hi.cfgx.protctl = 0x0; /*default value*/ 633 633 cfg_hi.cfgx.fifo_mode = 1; 634 - if (mids->dma_slave.direction == DMA_TO_DEVICE) { 634 + if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { 635 635 cfg_hi.cfgx.src_per = 0; 636 636 if (mids->device_instance == 0) 637 637 cfg_hi.cfgx.dst_per = 3; 638 638 if (mids->device_instance == 1) 639 639 cfg_hi.cfgx.dst_per = 1; 640 - } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { 640 + } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { 641 641 if (mids->device_instance == 0) 642 642 cfg_hi.cfgx.src_per = 2; 643 643 if (mids->device_instance == 1) ··· 681 681 ctl_lo.ctlx.sinc = 0; 682 682 ctl_lo.ctlx.dinc = 0; 683 683 } else { 684 - if (mids->dma_slave.direction == DMA_TO_DEVICE) { 684 + if (mids->dma_slave.direction == DMA_MEM_TO_DEV) { 685 685 ctl_lo.ctlx.sinc = 0; 686 686 ctl_lo.ctlx.dinc = 2; 687 687 ctl_lo.ctlx.tt_fc = 1; 688 - } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { 688 + } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) { 689 689 ctl_lo.ctlx.sinc = 2; 690 690 ctl_lo.ctlx.dinc = 0; 691 691 ctl_lo.ctlx.tt_fc = 2; ··· 731 731 */ 732 732 static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( 733 733 struct dma_chan *chan, struct scatterlist *sgl, 734 - unsigned int sg_len, enum dma_data_direction direction, 734 + unsigned int sg_len, enum dma_transfer_direction direction, 735 735 unsigned long flags) 736 736 { 737 737 struct intel_mid_dma_chan *midc = NULL;
+1 -1
drivers/dma/intel_mid_dma_regs.h
··· 262 262 unsigned int lli_length; 263 263 unsigned int current_lli; 264 264 dma_addr_t next; 265 - enum dma_data_direction dirn; 265 + enum dma_transfer_direction dirn; 266 266 enum dma_status status; 267 267 enum dma_slave_buswidth width; /*width of DMA txn*/ 268 268 enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
+2 -2
drivers/dma/ipu/ipu_idmac.c
··· 1362 1362 /* Allocate and initialise a transfer descriptor. */ 1363 1363 static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, 1364 1364 struct scatterlist *sgl, unsigned int sg_len, 1365 - enum dma_data_direction direction, unsigned long tx_flags) 1365 + enum dma_transfer_direction direction, unsigned long tx_flags) 1366 1366 { 1367 1367 struct idmac_channel *ichan = to_idmac_chan(chan); 1368 1368 struct idmac_tx_desc *desc = NULL; ··· 1374 1374 chan->chan_id != IDMAC_IC_7) 1375 1375 return NULL; 1376 1376 1377 - if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) { 1377 + if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) { 1378 1378 dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction); 1379 1379 return NULL; 1380 1380 }
+4 -4
drivers/dma/mxs-dma.c
··· 377 377 378 378 static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( 379 379 struct dma_chan *chan, struct scatterlist *sgl, 380 - unsigned int sg_len, enum dma_data_direction direction, 380 + unsigned int sg_len, enum dma_transfer_direction direction, 381 381 unsigned long append) 382 382 { 383 383 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); ··· 450 450 ccw->bits |= CCW_CHAIN; 451 451 ccw->bits |= CCW_HALT_ON_TERM; 452 452 ccw->bits |= CCW_TERM_FLUSH; 453 - ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? 453 + ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? 454 454 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, 455 455 COMMAND); 456 456 ··· 472 472 473 473 static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( 474 474 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 475 - size_t period_len, enum dma_data_direction direction) 475 + size_t period_len, enum dma_transfer_direction direction) 476 476 { 477 477 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 478 478 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; ··· 515 515 ccw->bits |= CCW_IRQ; 516 516 ccw->bits |= CCW_HALT_ON_TERM; 517 517 ccw->bits |= CCW_TERM_FLUSH; 518 - ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? 518 + ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? 519 519 MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); 520 520 521 521 dma_addr += period_len;
+6 -6
drivers/dma/pch_dma.c
··· 99 99 struct pch_dma_chan { 100 100 struct dma_chan chan; 101 101 void __iomem *membase; 102 - enum dma_data_direction dir; 102 + enum dma_transfer_direction dir; 103 103 struct tasklet_struct tasklet; 104 104 unsigned long err_status; 105 105 ··· 224 224 mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << 225 225 (DMA_CTL0_BITS_PER_CH * chan->chan_id)); 226 226 val &= mask_mode; 227 - if (pd_chan->dir == DMA_TO_DEVICE) 227 + if (pd_chan->dir == DMA_MEM_TO_DEV) 228 228 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + 229 229 DMA_CTL0_DIR_SHIFT_BITS); 230 230 else ··· 242 242 mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << 243 243 (DMA_CTL0_BITS_PER_CH * ch)); 244 244 val &= mask_mode; 245 - if (pd_chan->dir == DMA_TO_DEVICE) 245 + if (pd_chan->dir == DMA_MEM_TO_DEV) 246 246 val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + 247 247 DMA_CTL0_DIR_SHIFT_BITS); 248 248 else ··· 607 607 608 608 static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, 609 609 struct scatterlist *sgl, unsigned int sg_len, 610 - enum dma_data_direction direction, unsigned long flags) 610 + enum dma_transfer_direction direction, unsigned long flags) 611 611 { 612 612 struct pch_dma_chan *pd_chan = to_pd_chan(chan); 613 613 struct pch_dma_slave *pd_slave = chan->private; ··· 623 623 return NULL; 624 624 } 625 625 626 - if (direction == DMA_FROM_DEVICE) 626 + if (direction == DMA_DEV_TO_MEM) 627 627 reg = pd_slave->rx_reg; 628 - else if (direction == DMA_TO_DEVICE) 628 + else if (direction == DMA_MEM_TO_DEV) 629 629 reg = pd_slave->tx_reg; 630 630 else 631 631 return NULL;
+9 -9
drivers/dma/pl330.c
··· 320 320 case DMA_SLAVE_CONFIG: 321 321 slave_config = (struct dma_slave_config *)arg; 322 322 323 - if (slave_config->direction == DMA_TO_DEVICE) { 323 + if (slave_config->direction == DMA_MEM_TO_DEV) { 324 324 if (slave_config->dst_addr) 325 325 pch->fifo_addr = slave_config->dst_addr; 326 326 if (slave_config->dst_addr_width) 327 327 pch->burst_sz = __ffs(slave_config->dst_addr_width); 328 328 if (slave_config->dst_maxburst) 329 329 pch->burst_len = slave_config->dst_maxburst; 330 - } else if (slave_config->direction == DMA_FROM_DEVICE) { 330 + } else if (slave_config->direction == DMA_DEV_TO_MEM) { 331 331 if (slave_config->src_addr) 332 332 pch->fifo_addr = slave_config->src_addr; 333 333 if (slave_config->src_addr_width) ··· 597 597 598 598 static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( 599 599 struct dma_chan *chan, dma_addr_t dma_addr, size_t len, 600 - size_t period_len, enum dma_data_direction direction) 600 + size_t period_len, enum dma_transfer_direction direction) 601 601 { 602 602 struct dma_pl330_desc *desc; 603 603 struct dma_pl330_chan *pch = to_pchan(chan); ··· 612 612 } 613 613 614 614 switch (direction) { 615 - case DMA_TO_DEVICE: 615 + case DMA_MEM_TO_DEV: 616 616 desc->rqcfg.src_inc = 1; 617 617 desc->rqcfg.dst_inc = 0; 618 618 src = dma_addr; 619 619 dst = pch->fifo_addr; 620 620 break; 621 - case DMA_FROM_DEVICE: 621 + case DMA_DEV_TO_MEM: 622 622 desc->rqcfg.src_inc = 0; 623 623 desc->rqcfg.dst_inc = 1; 624 624 src = pch->fifo_addr; ··· 687 687 688 688 static struct dma_async_tx_descriptor * 689 689 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 690 - unsigned int sg_len, enum dma_data_direction direction, 690 + unsigned int sg_len, enum dma_transfer_direction direction, 691 691 unsigned long flg) 692 692 { 693 693 struct dma_pl330_desc *first, *desc = NULL; ··· 702 702 return NULL; 703 703 704 704 /* Make sure the direction is consistent */ 705 - if ((direction == DMA_TO_DEVICE && 705 + if ((direction == DMA_MEM_TO_DEV && 706 706 peri->rqtype != MEMTODEV) || 707 - (direction == DMA_FROM_DEVICE && 707 + (direction == DMA_DEV_TO_MEM && 708 708 peri->rqtype != DEVTOMEM)) { 709 709 dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n", 710 710 __func__, __LINE__); ··· 747 747 else 748 748 list_add_tail(&desc->node, &first->node); 749 749 750 - if (direction == DMA_TO_DEVICE) { 750 + if (direction == DMA_MEM_TO_DEV) { 751 751 desc->rqcfg.src_inc = 1; 752 752 desc->rqcfg.dst_inc = 0; 753 753 fill_px(&desc->px,
+12 -13
drivers/dma/shdma.c
··· 23 23 #include <linux/interrupt.h> 24 24 #include <linux/dmaengine.h> 25 25 #include <linux/delay.h> 26 - #include <linux/dma-mapping.h> 27 26 #include <linux/platform_device.h> 28 27 #include <linux/pm_runtime.h> 29 28 #include <linux/sh_dma.h> ··· 478 479 * @sh_chan: DMA channel 479 480 * @flags: DMA transfer flags 480 481 * @dest: destination DMA address, incremented when direction equals 481 - * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL 482 + * DMA_DEV_TO_MEM 482 483 * @src: source DMA address, incremented when direction equals 483 - * DMA_TO_DEVICE or DMA_BIDIRECTIONAL 484 + * DMA_MEM_TO_DEV 484 485 * @len: DMA transfer length 485 486 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY 486 487 * @direction: needed for slave DMA to decide which address to keep constant, 487 - * equals DMA_BIDIRECTIONAL for MEMCPY 488 + * equals DMA_MEM_TO_MEM for MEMCPY 488 489 * Returns 0 or an error 489 490 * Locks: called with desc_lock held 490 491 */ 491 492 static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, 492 493 unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, 493 - struct sh_desc **first, enum dma_data_direction direction) 494 + struct sh_desc **first, enum dma_transfer_direction direction) 494 495 { 495 496 struct sh_desc *new; 496 497 size_t copy_size; ··· 530 531 new->direction = direction; 531 532 532 533 *len -= copy_size; 533 - if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) 534 + if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) 534 535 *src += copy_size; 535 - if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE) 536 + if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) 536 537 *dest += copy_size; 537 538 538 539 return new; ··· 545 546 * converted to scatter-gather to guarantee consistent locking and a correct 546 547 * list manipulation. For slave DMA direction carries the usual meaning, and, 547 548 * logically, the SG list is RAM and the addr variable contains slave address, 548 - * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL 549 + * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM 549 550 * and the SG list contains only one element and points at the source buffer. 550 551 */ 551 552 static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, 552 553 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, 553 - enum dma_data_direction direction, unsigned long flags) 554 + enum dma_transfer_direction direction, unsigned long flags) 554 555 { 555 556 struct scatterlist *sg; 556 557 struct sh_desc *first = NULL, *new = NULL /* compiler... */; ··· 591 592 dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", 592 593 i, sg, len, (unsigned long long)sg_addr); 593 594 594 - if (direction == DMA_FROM_DEVICE) 595 + if (direction == DMA_DEV_TO_MEM) 595 596 new = sh_dmae_add_desc(sh_chan, flags, 596 597 &sg_addr, addr, &len, &first, 597 598 direction); ··· 645 646 sg_dma_address(&sg) = dma_src; 646 647 sg_dma_len(&sg) = len; 647 648 648 - return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL, 649 + return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, 649 650 flags); 650 651 } 651 652 652 653 static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( 653 654 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 654 - enum dma_data_direction direction, unsigned long flags) 655 + enum dma_transfer_direction direction, unsigned long flags) 655 656 { 656 657 struct sh_dmae_slave *param; 657 658 struct sh_dmae_chan *sh_chan; ··· 995 996 spin_lock_irq(&sh_chan->desc_lock); 996 997 list_for_each_entry(desc, &sh_chan->ld_queue, node) { 997 998 if (desc->mark == DESC_SUBMITTED && 998 - ((desc->direction == DMA_FROM_DEVICE && 999 + ((desc->direction == DMA_DEV_TO_MEM && 999 1000 (desc->hw.dar + desc->hw.tcr) == dar_buf) || 1000 1001 (desc->hw.sar + desc->hw.tcr) == sar_buf)) { 1001 1002 dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
+13 -13
drivers/dma/ste_dma40.c
··· 216 216 struct d40_log_lli_full *lcpa; 217 217 /* Runtime reconfiguration */ 218 218 dma_addr_t runtime_addr; 219 - enum dma_data_direction runtime_direction; 219 + enum dma_transfer_direction runtime_direction; 220 220 }; 221 221 222 222 /** ··· 1854 1854 } 1855 1855 1856 1856 static dma_addr_t 1857 - d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) 1857 + d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction) 1858 1858 { 1859 1859 struct stedma40_platform_data *plat = chan->base->plat_data; 1860 1860 struct stedma40_chan_cfg *cfg = &chan->dma_cfg; ··· 1863 1863 if (chan->runtime_addr) 1864 1864 return chan->runtime_addr; 1865 1865 1866 - if (direction == DMA_FROM_DEVICE) 1866 + if (direction == DMA_DEV_TO_MEM) 1867 1867 addr = plat->dev_rx[cfg->src_dev_type]; 1868 - else if (direction == DMA_TO_DEVICE) 1868 + else if (direction == DMA_MEM_TO_DEV) 1869 1869 addr = plat->dev_tx[cfg->dst_dev_type]; 1870 1870 1871 1871 return addr; ··· 1874 1874 static struct dma_async_tx_descriptor * 1875 1875 d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, 1876 1876 struct scatterlist *sg_dst, unsigned int sg_len, 1877 - enum dma_data_direction direction, unsigned long dma_flags) 1877 + enum dma_transfer_direction direction, unsigned long dma_flags) 1878 1878 { 1879 1879 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); 1880 1880 dma_addr_t src_dev_addr = 0; ··· 1901 1901 if (direction != DMA_NONE) { 1902 1902 dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); 1903 1903 1904 - if (direction == DMA_FROM_DEVICE) 1904 + if (direction == DMA_DEV_TO_MEM) 1905 1905 src_dev_addr = dev_addr; 1906 - else if (direction == DMA_TO_DEVICE) 1906 + else if (direction == DMA_MEM_TO_DEV) 1907 1907 dst_dev_addr = dev_addr; 1908 1908 } 1909 1909 ··· 2107 2107 static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, 2108 2108 struct scatterlist *sgl, 2109 2109 unsigned int sg_len, 2110 - enum dma_data_direction direction, 2110 + enum dma_transfer_direction direction, 2111 2111 unsigned long dma_flags) 2112 2112 { 2113 - if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) 2113 + if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) 2114 2114 return NULL; 2115 2115 2116 2116 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); ··· 2119 2119 static struct dma_async_tx_descriptor * 2120 2120 dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, 2121 2121 size_t buf_len, size_t period_len, 2122 - enum dma_data_direction direction) 2122 + enum dma_transfer_direction direction) 2123 2123 { 2124 2124 unsigned int periods = buf_len / period_len; 2125 2125 struct dma_async_tx_descriptor *txd; ··· 2268 2268 dst_addr_width = config->dst_addr_width; 2269 2269 dst_maxburst = config->dst_maxburst; 2270 2270 2271 - if (config->direction == DMA_FROM_DEVICE) { 2271 + if (config->direction == DMA_DEV_TO_MEM) { 2272 2272 dma_addr_t dev_addr_rx = 2273 2273 d40c->base->plat_data->dev_rx[cfg->src_dev_type]; 2274 2274 ··· 2291 2291 if (dst_maxburst == 0) 2292 2292 dst_maxburst = src_maxburst; 2293 2293 2294 - } else if (config->direction == DMA_TO_DEVICE) { 2294 + } else if (config->direction == DMA_MEM_TO_DEV) { 2295 2295 dma_addr_t dev_addr_tx = 2296 2296 d40c->base->plat_data->dev_tx[cfg->dst_dev_type]; 2297 2297 ··· 2356 2356 "configured channel %s for %s, data width %d/%d, " 2357 2357 "maxburst %d/%d elements, LE, no flow control\n", 2358 2358 dma_chan_name(chan), 2359 - (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", 2359 + (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", 2360 2360 src_addr_width, dst_addr_width, 2361 2361 src_maxburst, dst_maxburst); 2362 2362
+9 -9
drivers/dma/timb_dma.c
··· 90 90 struct list_head queue; 91 91 struct list_head free_list; 92 92 unsigned int bytes_per_line; 93 - enum dma_data_direction direction; 93 + enum dma_transfer_direction direction; 94 94 unsigned int descs; /* Descriptors to allocate */ 95 95 unsigned int desc_elems; /* number of elems per descriptor */ 96 96 }; ··· 235 235 "td_chan: %p, chan: %d, membase: %p\n", 236 236 td_chan, td_chan->chan.chan_id, td_chan->membase); 237 237 238 - if (td_chan->direction == DMA_FROM_DEVICE) { 238 + if (td_chan->direction == DMA_DEV_TO_MEM) { 239 239 240 240 /* descriptor address */ 241 241 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); ··· 278 278 txd->cookie); 279 279 280 280 /* make sure to stop the transfer */ 281 - if (td_chan->direction == DMA_FROM_DEVICE) 281 + if (td_chan->direction == DMA_DEV_TO_MEM) 282 282 iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); 283 283 /* Currently no support for stopping DMA transfers 284 284 else ··· 398 398 td_desc->txd.flags = DMA_CTRL_ACK; 399 399 400 400 td_desc->txd.phys = dma_map_single(chan2dmadev(chan), 401 - td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE); 401 + td_desc->desc_list, td_desc->desc_list_len, DMA_MEM_TO_DEV); 402 402 403 403 err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys); 404 404 if (err) { ··· 419 419 { 420 420 dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc); 421 421 dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys, 422 - td_desc->desc_list_len, DMA_TO_DEVICE); 422 + td_desc->desc_list_len, DMA_MEM_TO_DEV); 423 423 424 424 kfree(td_desc->desc_list); 425 425 kfree(td_desc); ··· 558 558 559 559 static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, 560 560 struct scatterlist *sgl, unsigned int sg_len, 561 - enum dma_data_direction direction, unsigned long flags) 561 + enum dma_transfer_direction direction, unsigned long flags) 562 562 { 563 563 struct timb_dma_chan *td_chan = 564 564 container_of(chan, struct timb_dma_chan, chan); ··· 606 606 } 607 607 608 608 dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, 609 - td_desc->desc_list_len, DMA_TO_DEVICE); 609 + td_desc->desc_list_len, DMA_MEM_TO_DEV); 610 610 611 611 return &td_desc->txd; 612 612 } ··· 775 775 td_chan->descs = pchan->descriptors; 776 776 td_chan->desc_elems = pchan->descriptor_elements; 777 777 td_chan->bytes_per_line = pchan->bytes_per_line; 778 - td_chan->direction = pchan->rx ? DMA_FROM_DEVICE : 779 - DMA_TO_DEVICE; 778 + td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM : 779 + DMA_MEM_TO_DEV; 780 780 781 781 td_chan->membase = td->membase + 782 782 (i / 2) * TIMBDMA_INSTANCE_OFFSET +
+6 -6
drivers/dma/txx9dmac.c
··· 845 845 846 846 static struct dma_async_tx_descriptor * 847 847 txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 848 - unsigned int sg_len, enum dma_data_direction direction, 848 + unsigned int sg_len, enum dma_transfer_direction direction, 849 849 unsigned long flags) 850 850 { 851 851 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); ··· 860 860 861 861 BUG_ON(!ds || !ds->reg_width); 862 862 if (ds->tx_reg) 863 - BUG_ON(direction != DMA_TO_DEVICE); 863 + BUG_ON(direction != DMA_MEM_TO_DEV); 864 864 else 865 - BUG_ON(direction != DMA_FROM_DEVICE); 865 + BUG_ON(direction != DMA_DEV_TO_MEM); 866 866 if (unlikely(!sg_len)) 867 867 return NULL; 868 868 ··· 882 882 mem = sg_dma_address(sg); 883 883 884 884 if (__is_dmac64(ddev)) { 885 - if (direction == DMA_TO_DEVICE) { 885 + if (direction == DMA_MEM_TO_DEV) { 886 886 desc->hwdesc.SAR = mem; 887 887 desc->hwdesc.DAR = ds->tx_reg; 888 888 } else { ··· 891 891 } 892 892 desc->hwdesc.CNTR = sg_dma_len(sg); 893 893 } else { 894 - if (direction == DMA_TO_DEVICE) { 894 + if (direction == DMA_MEM_TO_DEV) { 895 895 desc->hwdesc32.SAR = mem; 896 896 desc->hwdesc32.DAR = ds->tx_reg; 897 897 } else { ··· 900 900 } 901 901 desc->hwdesc32.CNTR = sg_dma_len(sg); 902 902 } 903 - if (direction == DMA_TO_DEVICE) { 903 + if (direction == DMA_MEM_TO_DEV) { 904 904 sai = ds->reg_width; 905 905 dai = 0; 906 906 } else {
+2 -2
include/linux/amba/pl08x.h
··· 134 134 struct dma_async_tx_descriptor tx; 135 135 struct list_head node; 136 136 struct list_head dsg_list; 137 - enum dma_data_direction direction; 137 + enum dma_transfer_direction direction; 138 138 dma_addr_t llis_bus; 139 139 struct pl08x_lli *llis_va; 140 140 /* Default cctl value for LLIs */ ··· 197 197 dma_addr_t dst_addr; 198 198 u32 src_cctl; 199 199 u32 dst_cctl; 200 - enum dma_data_direction runtime_direction; 200 + enum dma_transfer_direction runtime_direction; 201 201 dma_cookie_t lc; 202 202 struct list_head pend_list; 203 203 struct pl08x_txd *at;
+1 -1
include/linux/dw_dmac.h
··· 127 127 128 128 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, 129 129 dma_addr_t buf_addr, size_t buf_len, size_t period_len, 130 - enum dma_data_direction direction); 130 + enum dma_transfer_direction direction); 131 131 void dw_dma_cyclic_free(struct dma_chan *chan); 132 132 int dw_dma_cyclic_start(struct dma_chan *chan); 133 133 void dw_dma_cyclic_stop(struct dma_chan *chan);
+1 -1
include/linux/sh_dma.h
··· 30 30 struct sh_dmae_regs hw; 31 31 struct list_head node; 32 32 struct dma_async_tx_descriptor async_tx; 33 - enum dma_data_direction direction; 33 + enum dma_transfer_direction direction; 34 34 dma_cookie_t cookie; 35 35 size_t partial; 36 36 int chunks;