Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.9-rc4 343 lines 8.3 kB view raw
1/* 2 * linux/drivers/mmc/tmio_mmc_dma.c 3 * 4 * Copyright (C) 2010-2011 Guennadi Liakhovetski 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * DMA function for TMIO MMC implementations 11 */ 12 13#include <linux/device.h> 14#include <linux/dma-mapping.h> 15#include <linux/dmaengine.h> 16#include <linux/mfd/tmio.h> 17#include <linux/mmc/host.h> 18#include <linux/pagemap.h> 19#include <linux/scatterlist.h> 20 21#include "tmio_mmc.h" 22 23#define TMIO_MMC_MIN_DMA_LEN 8 24 25void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) 26{ 27 if (!host->chan_tx || !host->chan_rx) 28 return; 29 30 if (host->dma->enable) 31 host->dma->enable(host, enable); 32} 33 34void tmio_mmc_abort_dma(struct tmio_mmc_host *host) 35{ 36 tmio_mmc_enable_dma(host, false); 37 38 if (host->chan_rx) 39 dmaengine_terminate_all(host->chan_rx); 40 if (host->chan_tx) 41 dmaengine_terminate_all(host->chan_tx); 42 43 tmio_mmc_enable_dma(host, true); 44} 45 46static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) 47{ 48 struct scatterlist *sg = host->sg_ptr, *sg_tmp; 49 struct dma_async_tx_descriptor *desc = NULL; 50 struct dma_chan *chan = host->chan_rx; 51 dma_cookie_t cookie; 52 int ret, i; 53 bool aligned = true, multiple = true; 54 unsigned int align = (1 << host->pdata->alignment_shift) - 1; 55 56 for_each_sg(sg, sg_tmp, host->sg_len, i) { 57 if (sg_tmp->offset & align) 58 aligned = false; 59 if (sg_tmp->length & align) { 60 multiple = false; 61 break; 62 } 63 } 64 65 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE || 66 (align & PAGE_MASK))) || !multiple) { 67 ret = -EINVAL; 68 goto pio; 69 } 70 71 if (sg->length < TMIO_MMC_MIN_DMA_LEN) { 72 host->force_pio = true; 73 return; 74 } 75 76 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY); 77 78 /* The only sg element can be unaligned, use our bounce buffer then */ 79 if (!aligned) { 80 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 81 host->sg_ptr = &host->bounce_sg; 82 sg = host->sg_ptr; 83 } 84 85 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); 86 if (ret > 0) 87 desc = dmaengine_prep_slave_sg(chan, sg, ret, 88 DMA_DEV_TO_MEM, DMA_CTRL_ACK); 89 90 if (desc) { 91 cookie = dmaengine_submit(desc); 92 if (cookie < 0) { 93 desc = NULL; 94 ret = cookie; 95 } 96 } 97pio: 98 if (!desc) { 99 /* DMA failed, fall back to PIO */ 100 tmio_mmc_enable_dma(host, false); 101 if (ret >= 0) 102 ret = -EIO; 103 host->chan_rx = NULL; 104 dma_release_channel(chan); 105 /* Free the Tx channel too */ 106 chan = host->chan_tx; 107 if (chan) { 108 host->chan_tx = NULL; 109 dma_release_channel(chan); 110 } 111 dev_warn(&host->pdev->dev, 112 "DMA failed: %d, falling back to PIO\n", ret); 113 } 114} 115 116static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) 117{ 118 struct scatterlist *sg = host->sg_ptr, *sg_tmp; 119 struct dma_async_tx_descriptor *desc = NULL; 120 struct dma_chan *chan = host->chan_tx; 121 dma_cookie_t cookie; 122 int ret, i; 123 bool aligned = true, multiple = true; 124 unsigned int align = (1 << host->pdata->alignment_shift) - 1; 125 126 for_each_sg(sg, sg_tmp, host->sg_len, i) { 127 if (sg_tmp->offset & align) 128 aligned = false; 129 if (sg_tmp->length & align) { 130 multiple = false; 131 break; 132 } 133 } 134 135 if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE || 136 (align & PAGE_MASK))) || !multiple) { 137 ret = -EINVAL; 138 goto pio; 139 } 140 141 if (sg->length < TMIO_MMC_MIN_DMA_LEN) { 142 host->force_pio = true; 143 return; 144 } 145 146 tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ); 147 148 /* The only sg element can be unaligned, use our bounce buffer then */ 149 if (!aligned) { 150 unsigned long flags; 151 void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); 152 sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); 153 memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); 154 tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); 155 host->sg_ptr = &host->bounce_sg; 156 sg = host->sg_ptr; 157 } 158 159 ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); 160 if (ret > 0) 161 desc = dmaengine_prep_slave_sg(chan, sg, ret, 162 DMA_MEM_TO_DEV, DMA_CTRL_ACK); 163 164 if (desc) { 165 cookie = dmaengine_submit(desc); 166 if (cookie < 0) { 167 desc = NULL; 168 ret = cookie; 169 } 170 } 171pio: 172 if (!desc) { 173 /* DMA failed, fall back to PIO */ 174 tmio_mmc_enable_dma(host, false); 175 if (ret >= 0) 176 ret = -EIO; 177 host->chan_tx = NULL; 178 dma_release_channel(chan); 179 /* Free the Rx channel too */ 180 chan = host->chan_rx; 181 if (chan) { 182 host->chan_rx = NULL; 183 dma_release_channel(chan); 184 } 185 dev_warn(&host->pdev->dev, 186 "DMA failed: %d, falling back to PIO\n", ret); 187 } 188} 189 190void tmio_mmc_start_dma(struct tmio_mmc_host *host, 191 struct mmc_data *data) 192{ 193 if (data->flags & MMC_DATA_READ) { 194 if (host->chan_rx) 195 tmio_mmc_start_dma_rx(host); 196 } else { 197 if (host->chan_tx) 198 tmio_mmc_start_dma_tx(host); 199 } 200} 201 202static void tmio_mmc_issue_tasklet_fn(unsigned long priv) 203{ 204 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; 205 struct dma_chan *chan = NULL; 206 207 spin_lock_irq(&host->lock); 208 209 if (host && host->data) { 210 if (host->data->flags & MMC_DATA_READ) 211 chan = host->chan_rx; 212 else 213 chan = host->chan_tx; 214 } 215 216 spin_unlock_irq(&host->lock); 217 218 tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); 219 220 if (chan) 221 dma_async_issue_pending(chan); 222} 223 224static void tmio_mmc_tasklet_fn(unsigned long arg) 225{ 226 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; 227 228 spin_lock_irq(&host->lock); 229 230 if (!host->data) 231 goto out; 232 233 if (host->data->flags & MMC_DATA_READ) 234 dma_unmap_sg(host->chan_rx->device->dev, 235 host->sg_ptr, host->sg_len, 236 DMA_FROM_DEVICE); 237 else 238 dma_unmap_sg(host->chan_tx->device->dev, 239 host->sg_ptr, host->sg_len, 240 DMA_TO_DEVICE); 241 242 tmio_mmc_do_data_irq(host); 243out: 244 spin_unlock_irq(&host->lock); 245} 246 247void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) 248{ 249 /* We can only either use DMA for both Tx and Rx or not use it at all */ 250 if (!host->dma || (!host->pdev->dev.of_node && 251 (!pdata->chan_priv_tx || !pdata->chan_priv_rx))) 252 return; 253 254 if (!host->chan_tx && !host->chan_rx) { 255 struct resource *res = platform_get_resource(host->pdev, 256 IORESOURCE_MEM, 0); 257 struct dma_slave_config cfg = {}; 258 dma_cap_mask_t mask; 259 int ret; 260 261 if (!res) 262 return; 263 264 dma_cap_zero(mask); 265 dma_cap_set(DMA_SLAVE, mask); 266 267 host->chan_tx = dma_request_slave_channel_compat(mask, 268 host->dma->filter, pdata->chan_priv_tx, 269 &host->pdev->dev, "tx"); 270 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, 271 host->chan_tx); 272 273 if (!host->chan_tx) 274 return; 275 276 cfg.direction = DMA_MEM_TO_DEV; 277 cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->bus_shift); 278 cfg.dst_addr_width = host->dma->dma_buswidth; 279 if (!cfg.dst_addr_width) 280 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 281 cfg.src_addr = 0; 282 ret = dmaengine_slave_config(host->chan_tx, &cfg); 283 if (ret < 0) 284 goto ecfgtx; 285 286 host->chan_rx = dma_request_slave_channel_compat(mask, 287 host->dma->filter, pdata->chan_priv_rx, 288 &host->pdev->dev, "rx"); 289 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, 290 host->chan_rx); 291 292 if (!host->chan_rx) 293 goto ereqrx; 294 295 cfg.direction = DMA_DEV_TO_MEM; 296 cfg.src_addr = cfg.dst_addr + host->pdata->dma_rx_offset; 297 cfg.src_addr_width = host->dma->dma_buswidth; 298 if (!cfg.src_addr_width) 299 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 300 cfg.dst_addr = 0; 301 ret = dmaengine_slave_config(host->chan_rx, &cfg); 302 if (ret < 0) 303 goto ecfgrx; 304 305 host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); 306 if (!host->bounce_buf) 307 goto ebouncebuf; 308 309 tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); 310 tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); 311 } 312 313 tmio_mmc_enable_dma(host, true); 314 315 return; 316 317ebouncebuf: 318ecfgrx: 319 dma_release_channel(host->chan_rx); 320 host->chan_rx = NULL; 321ereqrx: 322ecfgtx: 323 dma_release_channel(host->chan_tx); 324 host->chan_tx = NULL; 325} 326 327void tmio_mmc_release_dma(struct tmio_mmc_host *host) 328{ 329 if (host->chan_tx) { 330 struct dma_chan *chan = host->chan_tx; 331 host->chan_tx = NULL; 332 dma_release_channel(chan); 333 } 334 if (host->chan_rx) { 335 struct dma_chan *chan = host->chan_rx; 336 host->chan_rx = NULL; 337 dma_release_channel(chan); 338 } 339 if (host->bounce_buf) { 340 free_pages((unsigned long)host->bounce_buf, 0); 341 host->bounce_buf = NULL; 342 } 343}