Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: xdmac: Add memset support

The XDMAC supports memset transfers, both over contiguous areas, and over
discontiguous areas through a LLI.

The current memset operation only supports contiguous memset for now, add some
support for it. Scatter-gathered memset will come eventually.

Signed-off-by: Maxime Ripard <maxime.ripard@free-electrons.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>

authored by

Maxime Ripard and committed by
Vinod Koul
b206d9a2 f2704052

+89
+89
drivers/dma/at_xdmac.c
··· 1073 1073 return &first->tx_dma_desc; 1074 1074 } 1075 1075 1076 + static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, 1077 + struct at_xdmac_chan *atchan, 1078 + dma_addr_t dst_addr, 1079 + size_t len, 1080 + int value) 1081 + { 1082 + struct at_xdmac_desc *desc; 1083 + unsigned long flags; 1084 + size_t ublen; 1085 + u32 dwidth; 1086 + /* 1087 + * WARNING: The channel configuration is set here since there is no 1088 + * dmaengine_slave_config call in this case. Moreover we don't know the 1089 + * direction, it involves we can't dynamically set the source and dest 1090 + * interface so we have to use the same one. Only interface 0 allows EBI 1091 + * access. Hopefully we can access DDR through both ports (at least on 1092 + * SAMA5D4x), so we can use the same interface for source and dest, 1093 + * that solves the fact we don't know the direction. 1094 + */ 1095 + u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM 1096 + | AT_XDMAC_CC_SAM_INCREMENTED_AM 1097 + | AT_XDMAC_CC_DIF(0) 1098 + | AT_XDMAC_CC_SIF(0) 1099 + | AT_XDMAC_CC_MBSIZE_SIXTEEN 1100 + | AT_XDMAC_CC_MEMSET_HW_MODE 1101 + | AT_XDMAC_CC_TYPE_MEM_TRAN; 1102 + 1103 + dwidth = at_xdmac_align_width(chan, dst_addr); 1104 + 1105 + if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) { 1106 + dev_err(chan2dev(chan), 1107 + "%s: Transfer too large, aborting...\n", 1108 + __func__); 1109 + return NULL; 1110 + } 1111 + 1112 + spin_lock_irqsave(&atchan->lock, flags); 1113 + desc = at_xdmac_get_desc(atchan); 1114 + spin_unlock_irqrestore(&atchan->lock, flags); 1115 + if (!desc) { 1116 + dev_err(chan2dev(chan), "can't get descriptor\n"); 1117 + return NULL; 1118 + } 1119 + 1120 + chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth); 1121 + 1122 + ublen = len >> dwidth; 1123 + 1124 + desc->lld.mbr_da = dst_addr; 1125 + desc->lld.mbr_ds = value; 1126 + desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3 1127 + | AT_XDMAC_MBR_UBC_NDEN 1128 + | AT_XDMAC_MBR_UBC_NSEN 1129 + | ublen; 1130 + desc->lld.mbr_cfg = chan_cc; 1131 + 1132 + dev_dbg(chan2dev(chan), 1133 + "%s: lld: mbr_da=0x%08x, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", 1134 + __func__, desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, 1135 + desc->lld.mbr_cfg); 1136 + 1137 + return desc; 1138 + } 1139 + 1140 + struct dma_async_tx_descriptor * 1141 + at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, 1142 + size_t len, unsigned long flags) 1143 + { 1144 + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); 1145 + struct at_xdmac_desc *desc; 1146 + 1147 + dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", 1148 + __func__, dest, len, value, flags); 1149 + 1150 + if (unlikely(!len)) 1151 + return NULL; 1152 + 1153 + desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value); 1154 + list_add_tail(&desc->desc_node, &desc->descs_list); 1155 + 1156 + desc->tx_dma_desc.cookie = -EBUSY; 1157 + desc->tx_dma_desc.flags = flags; 1158 + desc->xfer_size = len; 1159 + 1160 + return &desc->tx_dma_desc; 1161 + } 1162 + 1076 1163 static enum dma_status 1077 1164 at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 1078 1165 struct dma_tx_state *txstate) ··· 1686 1599 dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask); 1687 1600 dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask); 1688 1601 dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask); 1602 + dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask); 1689 1603 dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask); 1690 1604 /* 1691 1605 * Without DMA_PRIVATE the driver is not able to allocate more than ··· 1701 1613 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic; 1702 1614 atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved; 1703 1615 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy; 1616 + atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset; 1704 1617 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg; 1705 1618 atxdmac->dma.device_config = at_xdmac_device_config; 1706 1619 atxdmac->dma.device_pause = at_xdmac_device_pause;