Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v5.7-rc2 322 lines 7.6 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * Special handling for DW core on Intel MID platform 4 * 5 * Copyright (c) 2009, 2014 Intel Corporation. 6 */ 7 8#include <linux/dma-mapping.h> 9#include <linux/dmaengine.h> 10#include <linux/interrupt.h> 11#include <linux/slab.h> 12#include <linux/spi/spi.h> 13#include <linux/types.h> 14 15#include "spi-dw.h" 16 17#ifdef CONFIG_SPI_DW_MID_DMA 18#include <linux/pci.h> 19#include <linux/platform_data/dma-dw.h> 20 21#define RX_BUSY 0 22#define TX_BUSY 1 23 24static struct dw_dma_slave mid_dma_tx = { .dst_id = 1 }; 25static struct dw_dma_slave mid_dma_rx = { .src_id = 0 }; 26 27static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param) 28{ 29 struct dw_dma_slave *s = param; 30 31 if (s->dma_dev != chan->device->dev) 32 return false; 33 34 chan->private = s; 35 return true; 36} 37 38static int mid_spi_dma_init(struct dw_spi *dws) 39{ 40 struct pci_dev *dma_dev; 41 struct dw_dma_slave *tx = dws->dma_tx; 42 struct dw_dma_slave *rx = dws->dma_rx; 43 dma_cap_mask_t mask; 44 45 /* 46 * Get pci device for DMA controller, currently it could only 47 * be the DMA controller of Medfield 48 */ 49 dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL); 50 if (!dma_dev) 51 return -ENODEV; 52 53 dma_cap_zero(mask); 54 dma_cap_set(DMA_SLAVE, mask); 55 56 /* 1. Init rx channel */ 57 rx->dma_dev = &dma_dev->dev; 58 dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, rx); 59 if (!dws->rxchan) 60 goto err_exit; 61 dws->master->dma_rx = dws->rxchan; 62 63 /* 2. Init tx channel */ 64 tx->dma_dev = &dma_dev->dev; 65 dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, tx); 66 if (!dws->txchan) 67 goto free_rxchan; 68 dws->master->dma_tx = dws->txchan; 69 70 dws->dma_inited = 1; 71 return 0; 72 73free_rxchan: 74 dma_release_channel(dws->rxchan); 75err_exit: 76 return -EBUSY; 77} 78 79static void mid_spi_dma_exit(struct dw_spi *dws) 80{ 81 if (!dws->dma_inited) 82 return; 83 84 dmaengine_terminate_sync(dws->txchan); 85 dma_release_channel(dws->txchan); 86 87 dmaengine_terminate_sync(dws->rxchan); 88 dma_release_channel(dws->rxchan); 89} 90 91static irqreturn_t dma_transfer(struct dw_spi *dws) 92{ 93 u16 irq_status = dw_readl(dws, DW_SPI_ISR); 94 95 if (!irq_status) 96 return IRQ_NONE; 97 98 dw_readl(dws, DW_SPI_ICR); 99 spi_reset_chip(dws); 100 101 dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__); 102 dws->master->cur_msg->status = -EIO; 103 spi_finalize_current_transfer(dws->master); 104 return IRQ_HANDLED; 105} 106 107static bool mid_spi_can_dma(struct spi_controller *master, 108 struct spi_device *spi, struct spi_transfer *xfer) 109{ 110 struct dw_spi *dws = spi_controller_get_devdata(master); 111 112 if (!dws->dma_inited) 113 return false; 114 115 return xfer->len > dws->fifo_len; 116} 117 118static enum dma_slave_buswidth convert_dma_width(u32 dma_width) { 119 if (dma_width == 1) 120 return DMA_SLAVE_BUSWIDTH_1_BYTE; 121 else if (dma_width == 2) 122 return DMA_SLAVE_BUSWIDTH_2_BYTES; 123 124 return DMA_SLAVE_BUSWIDTH_UNDEFINED; 125} 126 127/* 128 * dws->dma_chan_busy is set before the dma transfer starts, callback for tx 129 * channel will clear a corresponding bit. 130 */ 131static void dw_spi_dma_tx_done(void *arg) 132{ 133 struct dw_spi *dws = arg; 134 135 clear_bit(TX_BUSY, &dws->dma_chan_busy); 136 if (test_bit(RX_BUSY, &dws->dma_chan_busy)) 137 return; 138 spi_finalize_current_transfer(dws->master); 139} 140 141static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws, 142 struct spi_transfer *xfer) 143{ 144 struct dma_slave_config txconf; 145 struct dma_async_tx_descriptor *txdesc; 146 147 if (!xfer->tx_buf) 148 return NULL; 149 150 txconf.direction = DMA_MEM_TO_DEV; 151 txconf.dst_addr = dws->dma_addr; 152 txconf.dst_maxburst = 16; 153 txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 154 txconf.dst_addr_width = convert_dma_width(dws->dma_width); 155 txconf.device_fc = false; 156 157 dmaengine_slave_config(dws->txchan, &txconf); 158 159 txdesc = dmaengine_prep_slave_sg(dws->txchan, 160 xfer->tx_sg.sgl, 161 xfer->tx_sg.nents, 162 DMA_MEM_TO_DEV, 163 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 164 if (!txdesc) 165 return NULL; 166 167 txdesc->callback = dw_spi_dma_tx_done; 168 txdesc->callback_param = dws; 169 170 return txdesc; 171} 172 173/* 174 * dws->dma_chan_busy is set before the dma transfer starts, callback for rx 175 * channel will clear a corresponding bit. 176 */ 177static void dw_spi_dma_rx_done(void *arg) 178{ 179 struct dw_spi *dws = arg; 180 181 clear_bit(RX_BUSY, &dws->dma_chan_busy); 182 if (test_bit(TX_BUSY, &dws->dma_chan_busy)) 183 return; 184 spi_finalize_current_transfer(dws->master); 185} 186 187static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws, 188 struct spi_transfer *xfer) 189{ 190 struct dma_slave_config rxconf; 191 struct dma_async_tx_descriptor *rxdesc; 192 193 if (!xfer->rx_buf) 194 return NULL; 195 196 rxconf.direction = DMA_DEV_TO_MEM; 197 rxconf.src_addr = dws->dma_addr; 198 rxconf.src_maxburst = 16; 199 rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 200 rxconf.src_addr_width = convert_dma_width(dws->dma_width); 201 rxconf.device_fc = false; 202 203 dmaengine_slave_config(dws->rxchan, &rxconf); 204 205 rxdesc = dmaengine_prep_slave_sg(dws->rxchan, 206 xfer->rx_sg.sgl, 207 xfer->rx_sg.nents, 208 DMA_DEV_TO_MEM, 209 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 210 if (!rxdesc) 211 return NULL; 212 213 rxdesc->callback = dw_spi_dma_rx_done; 214 rxdesc->callback_param = dws; 215 216 return rxdesc; 217} 218 219static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer) 220{ 221 u16 dma_ctrl = 0; 222 223 dw_writel(dws, DW_SPI_DMARDLR, 0xf); 224 dw_writel(dws, DW_SPI_DMATDLR, 0x10); 225 226 if (xfer->tx_buf) 227 dma_ctrl |= SPI_DMA_TDMAE; 228 if (xfer->rx_buf) 229 dma_ctrl |= SPI_DMA_RDMAE; 230 dw_writel(dws, DW_SPI_DMACR, dma_ctrl); 231 232 /* Set the interrupt mask */ 233 spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI); 234 235 dws->transfer_handler = dma_transfer; 236 237 return 0; 238} 239 240static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer) 241{ 242 struct dma_async_tx_descriptor *txdesc, *rxdesc; 243 244 /* Prepare the TX dma transfer */ 245 txdesc = dw_spi_dma_prepare_tx(dws, xfer); 246 247 /* Prepare the RX dma transfer */ 248 rxdesc = dw_spi_dma_prepare_rx(dws, xfer); 249 250 /* rx must be started before tx due to spi instinct */ 251 if (rxdesc) { 252 set_bit(RX_BUSY, &dws->dma_chan_busy); 253 dmaengine_submit(rxdesc); 254 dma_async_issue_pending(dws->rxchan); 255 } 256 257 if (txdesc) { 258 set_bit(TX_BUSY, &dws->dma_chan_busy); 259 dmaengine_submit(txdesc); 260 dma_async_issue_pending(dws->txchan); 261 } 262 263 return 0; 264} 265 266static void mid_spi_dma_stop(struct dw_spi *dws) 267{ 268 if (test_bit(TX_BUSY, &dws->dma_chan_busy)) { 269 dmaengine_terminate_sync(dws->txchan); 270 clear_bit(TX_BUSY, &dws->dma_chan_busy); 271 } 272 if (test_bit(RX_BUSY, &dws->dma_chan_busy)) { 273 dmaengine_terminate_sync(dws->rxchan); 274 clear_bit(RX_BUSY, &dws->dma_chan_busy); 275 } 276} 277 278static const struct dw_spi_dma_ops mid_dma_ops = { 279 .dma_init = mid_spi_dma_init, 280 .dma_exit = mid_spi_dma_exit, 281 .dma_setup = mid_spi_dma_setup, 282 .can_dma = mid_spi_can_dma, 283 .dma_transfer = mid_spi_dma_transfer, 284 .dma_stop = mid_spi_dma_stop, 285}; 286#endif 287 288/* Some specific info for SPI0 controller on Intel MID */ 289 290/* HW info for MRST Clk Control Unit, 32b reg per controller */ 291#define MRST_SPI_CLK_BASE 100000000 /* 100m */ 292#define MRST_CLK_SPI_REG 0xff11d86c 293#define CLK_SPI_BDIV_OFFSET 0 294#define CLK_SPI_BDIV_MASK 0x00000007 295#define CLK_SPI_CDIV_OFFSET 9 296#define CLK_SPI_CDIV_MASK 0x00000e00 297#define CLK_SPI_DISABLE_OFFSET 8 298 299int dw_spi_mid_init(struct dw_spi *dws) 300{ 301 void __iomem *clk_reg; 302 u32 clk_cdiv; 303 304 clk_reg = ioremap(MRST_CLK_SPI_REG, 16); 305 if (!clk_reg) 306 return -ENOMEM; 307 308 /* Get SPI controller operating freq info */ 309 clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32)); 310 clk_cdiv &= CLK_SPI_CDIV_MASK; 311 clk_cdiv >>= CLK_SPI_CDIV_OFFSET; 312 dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1); 313 314 iounmap(clk_reg); 315 316#ifdef CONFIG_SPI_DW_MID_DMA 317 dws->dma_tx = &mid_dma_tx; 318 dws->dma_rx = &mid_dma_rx; 319 dws->dma_ops = &mid_dma_ops; 320#endif 321 return 0; 322}